From 5ffe261a85e8a408e81f6833d60a883553120604 Mon Sep 17 00:00:00 2001 From: ndickson Date: Fri, 12 Apr 2024 20:36:55 -0400 Subject: [PATCH 001/175] Added and integrated C++ graphium_cpp library, a Python module implemented in C++ for featurization and preprocessing optimizations, along with a few other optimizations, significantly reducing memory usage, disk usage, and processing time for large datasets. --- LICENSE | 1 + env.yml | 2 +- expts/configs/config_mpnn_10M_b3lyp.yaml | 1 - expts/configs/config_mpnn_pcqm4m.yaml | 1 - .../hydra-configs/architecture/largemix.yaml | 1 - expts/hydra-configs/architecture/toymix.yaml | 2 +- .../finetuning/admet_baseline.yaml | 1 - .../base_config/large.yaml | 1 - .../base_config/large_pcba.yaml | 1 - .../base_config/large_pcqm_g25.yaml | 1 - .../base_config/large_pcqm_n4.yaml | 1 - graphium/config/_loader.py | 6 - graphium/data/collate.py | 88 +- graphium/data/datamodule.py | 765 ++------ graphium/data/dataset.py | 461 +---- graphium/data/multilevel_utils.py | 148 +- graphium/data/normalization.py | 6 + graphium/features/featurizer.py | 181 +- graphium/graphium_cpp/commute.cpp | 67 + graphium/graphium_cpp/commute.h | 38 + graphium/graphium_cpp/electrostatic.cpp | 106 ++ graphium/graphium_cpp/electrostatic.h | 60 + graphium/graphium_cpp/features.cpp | 1395 +++++++++++++++ graphium/graphium_cpp/features.h | 275 +++ graphium/graphium_cpp/float_features.cpp | 526 ++++++ graphium/graphium_cpp/float_features.h | 58 + graphium/graphium_cpp/graphium_cpp.cpp | 92 + graphium/graphium_cpp/graphormer.cpp | 70 + graphium/graphium_cpp/graphormer.h | 31 + graphium/graphium_cpp/labels.cpp | 1584 +++++++++++++++++ graphium/graphium_cpp/labels.h | 69 + graphium/graphium_cpp/one_hot.cpp | 358 ++++ graphium/graphium_cpp/one_hot.h | 29 + graphium/graphium_cpp/random_walk.cpp | 141 ++ graphium/graphium_cpp/random_walk.h | 44 + graphium/graphium_cpp/setup.py | 82 + graphium/graphium_cpp/spectral.cpp | 296 +++ graphium/graphium_cpp/spectral.h | 35 + tests/test_collate.py | 35 +- tests/test_datamodule.py | 266 +-- tests/test_dataset.py | 6 +- tests/test_multitask_datamodule.py | 3 - tests/test_training.py | 1 - 43 files changed, 6037 insertions(+), 1298 deletions(-) create mode 100644 graphium/graphium_cpp/commute.cpp create mode 100644 graphium/graphium_cpp/commute.h create mode 100644 graphium/graphium_cpp/electrostatic.cpp create mode 100644 graphium/graphium_cpp/electrostatic.h create mode 100644 graphium/graphium_cpp/features.cpp create mode 100644 graphium/graphium_cpp/features.h create mode 100644 graphium/graphium_cpp/float_features.cpp create mode 100644 graphium/graphium_cpp/float_features.h create mode 100644 graphium/graphium_cpp/graphium_cpp.cpp create mode 100644 graphium/graphium_cpp/graphormer.cpp create mode 100644 graphium/graphium_cpp/graphormer.h create mode 100644 graphium/graphium_cpp/labels.cpp create mode 100644 graphium/graphium_cpp/labels.h create mode 100644 graphium/graphium_cpp/one_hot.cpp create mode 100644 graphium/graphium_cpp/one_hot.h create mode 100644 graphium/graphium_cpp/random_walk.cpp create mode 100644 graphium/graphium_cpp/random_walk.h create mode 100755 graphium/graphium_cpp/setup.py create mode 100644 graphium/graphium_cpp/spectral.cpp create mode 100644 graphium/graphium_cpp/spectral.h diff --git a/LICENSE b/LICENSE index 4cef7c9e1..cbca6ebfd 100644 --- a/LICENSE +++ b/LICENSE @@ -189,6 +189,7 @@ Copyright 2023 Valence Labs Copyright 2023 Recursion Pharmaceuticals Copyright 2023 Graphcore Limited + Copyright 2024 NVIDIA CORPORATION & AFFILIATES Various Academic groups have also contributed to this software under the given license. These include, but are not limited, to the following diff --git a/env.yml b/env.yml index fa4e89136..e9999d2d6 100644 --- a/env.yml +++ b/env.yml @@ -28,7 +28,7 @@ dependencies: - gcsfs >=2021.6 # ML packages - - cuda-version # works also with CPU-only system. + - cuda-version == 11.2 # works also with CPU-only system. - pytorch >=1.12 - lightning >=2.0 - torchmetrics >=0.7.0,<0.11 diff --git a/expts/configs/config_mpnn_10M_b3lyp.yaml b/expts/configs/config_mpnn_10M_b3lyp.yaml index c385d7689..d54bc6667 100644 --- a/expts/configs/config_mpnn_10M_b3lyp.yaml +++ b/expts/configs/config_mpnn_10M_b3lyp.yaml @@ -93,7 +93,6 @@ datamodule: featurization_progress: True featurization_backend: "loky" processed_graph_data_path: "../datacache/b3lyp/" - dataloading_from: ram featurization: # OGB: ['atomic_num', 'degree', 'possible_formal_charge', 'possible_numH' (total-valence), # 'possible_number_radical_e', 'possible_is_aromatic', 'possible_is_in_ring', diff --git a/expts/configs/config_mpnn_pcqm4m.yaml b/expts/configs/config_mpnn_pcqm4m.yaml index 9735f9555..34a9ca6b9 100644 --- a/expts/configs/config_mpnn_pcqm4m.yaml +++ b/expts/configs/config_mpnn_pcqm4m.yaml @@ -31,7 +31,6 @@ datamodule: featurization_progress: True featurization_backend: "loky" processed_graph_data_path: "graphium/data/PCQM4Mv2/" - dataloading_from: ram featurization: # OGB: ['atomic_num', 'degree', 'possible_formal_charge', 'possible_numH' (total-valence), # 'possible_number_radical_e', 'possible_is_aromatic', 'possible_is_in_ring', diff --git a/expts/hydra-configs/architecture/largemix.yaml b/expts/hydra-configs/architecture/largemix.yaml index 32efef778..e6969f3f7 100644 --- a/expts/hydra-configs/architecture/largemix.yaml +++ b/expts/hydra-configs/architecture/largemix.yaml @@ -88,7 +88,6 @@ datamodule: featurization_progress: True featurization_backend: "loky" processed_graph_data_path: ${constants.datacache_path} - dataloading_from: "disk" num_workers: 20 # -1 to use all persistent_workers: True featurization: diff --git a/expts/hydra-configs/architecture/toymix.yaml b/expts/hydra-configs/architecture/toymix.yaml index a62b839cd..65999f304 100644 --- a/expts/hydra-configs/architecture/toymix.yaml +++ b/expts/hydra-configs/architecture/toymix.yaml @@ -79,10 +79,10 @@ datamodule: featurization_progress: True featurization_backend: "loky" processed_graph_data_path: ${constants.datacache_path} - dataloading_from: ram num_workers: 30 # -1 to use all persistent_workers: False featurization: + use_graphium_cpp: True atom_property_list_onehot: [atomic-number, group, period, total-valence] atom_property_list_float: [degree, formal-charge, radical-electron, aromatic, in-ring] edge_property_list: [bond-type-onehot, stereo, in-ring] diff --git a/expts/hydra-configs/finetuning/admet_baseline.yaml b/expts/hydra-configs/finetuning/admet_baseline.yaml index 410d0dd64..6f9fc1c93 100644 --- a/expts/hydra-configs/finetuning/admet_baseline.yaml +++ b/expts/hydra-configs/finetuning/admet_baseline.yaml @@ -20,7 +20,6 @@ constants: datamodule: args: batch_size_training: 32 - dataloading_from: ram persistent_workers: true num_workers: 4 diff --git a/expts/neurips2023_configs/base_config/large.yaml b/expts/neurips2023_configs/base_config/large.yaml index 8a836f368..222663d47 100644 --- a/expts/neurips2023_configs/base_config/large.yaml +++ b/expts/neurips2023_configs/base_config/large.yaml @@ -137,7 +137,6 @@ datamodule: featurization_n_jobs: 30 featurization_progress: True featurization_backend: "loky" - dataloading_from: disk processed_graph_data_path: ${constants.datacache_path} featurization: # OGB: ['atomic_num', 'degree', 'possible_formal_charge', 'possible_numH' (total-valence), diff --git a/expts/neurips2023_configs/base_config/large_pcba.yaml b/expts/neurips2023_configs/base_config/large_pcba.yaml index f90675e73..2bb9b4d93 100644 --- a/expts/neurips2023_configs/base_config/large_pcba.yaml +++ b/expts/neurips2023_configs/base_config/large_pcba.yaml @@ -136,7 +136,6 @@ datamodule: featurization_n_jobs: 30 featurization_progress: True featurization_backend: "loky" - dataloading_from: disk processed_graph_data_path: ${constants.datacache_path} featurization: # OGB: ['atomic_num', 'degree', 'possible_formal_charge', 'possible_numH' (total-valence), diff --git a/expts/neurips2023_configs/base_config/large_pcqm_g25.yaml b/expts/neurips2023_configs/base_config/large_pcqm_g25.yaml index 1fac9176b..7041a4c9c 100644 --- a/expts/neurips2023_configs/base_config/large_pcqm_g25.yaml +++ b/expts/neurips2023_configs/base_config/large_pcqm_g25.yaml @@ -136,7 +136,6 @@ datamodule: featurization_n_jobs: 30 featurization_progress: True featurization_backend: "loky" - dataloading_from: disk processed_graph_data_path: ${constants.datacache_path} featurization: # OGB: ['atomic_num', 'degree', 'possible_formal_charge', 'possible_numH' (total-valence), diff --git a/expts/neurips2023_configs/base_config/large_pcqm_n4.yaml b/expts/neurips2023_configs/base_config/large_pcqm_n4.yaml index f9a9e58b8..b0f4a56f9 100644 --- a/expts/neurips2023_configs/base_config/large_pcqm_n4.yaml +++ b/expts/neurips2023_configs/base_config/large_pcqm_n4.yaml @@ -136,7 +136,6 @@ datamodule: featurization_n_jobs: 30 featurization_progress: True featurization_backend: "loky" - dataloading_from: disk processed_graph_data_path: ${constants.datacache_path} featurization: # OGB: ['atomic_num', 'degree', 'possible_formal_charge', 'possible_numH' (total-valence), diff --git a/graphium/config/_loader.py b/graphium/config/_loader.py index 1e542592d..b27aa9b4e 100644 --- a/graphium/config/_loader.py +++ b/graphium/config/_loader.py @@ -203,8 +203,6 @@ def load_architecture( architecture: The datamodule used to process and load the data """ - if isinstance(config, dict) and "finetuning" not in config: - config = omegaconf.OmegaConf.create(config) cfg_arch = config["architecture"] # Select the architecture @@ -262,10 +260,6 @@ def load_architecture( else: gnn_kwargs.setdefault("in_dim", edge_in_dim) - # Set the parameters for the full network - if "finetuning" not in config: - task_heads_kwargs = omegaconf.OmegaConf.to_object(task_heads_kwargs) - # Set all the input arguments for the model model_kwargs = dict( gnn_kwargs=gnn_kwargs, diff --git a/graphium/data/collate.py b/graphium/data/collate.py index 22486b034..211be06d5 100644 --- a/graphium/data/collate.py +++ b/graphium/data/collate.py @@ -1,12 +1,12 @@ """ -------------------------------------------------------------------------------- -Copyright (c) 2023 Valence Labs, Recursion Pharmaceuticals and Graphcore. +Copyright (c) 2023 Valence Labs, Recursion Pharmaceuticals, Graphcore, and NVIDIA Corporation & Affiliates. Use of this software is subject to the terms and conditions outlined in the LICENSE file. Unauthorized modification, distribution, or use is prohibited. Provided 'as is' without warranties of any kind. -Valence Labs, Recursion Pharmaceuticals and Graphcore are not liable for any damages arising from its use. +Valence Labs, Recursion Pharmaceuticals, Graphcore, and NVIDIA Corporation & Affiliates are not liable for any damages arising from its use. Refer to the LICENSE file for the full terms and conditions. -------------------------------------------------------------------------------- """ @@ -26,11 +26,11 @@ from graphium.utils.packing import fast_packing, get_pack_sizes, node_to_pack_indices_mask from loguru import logger from graphium.data.utils import get_keys - +from graphium.data.dataset import torch_enum_to_dtype def graphium_collate_fn( elements: Union[List[Any], Dict[str, List[Any]]], - labels_size_dict: Optional[Dict[str, Any]] = None, + labels_num_cols_dict: Optional[Dict[str, Any]] = None, labels_dtype_dict: Optional[Dict[str, Any]] = None, mask_nan: Union[str, float, Type[None]] = "raise", do_not_collate_keys: List[str] = [], @@ -52,7 +52,7 @@ def graphium_collate_fn( elements: The elements to batch. See `torch.utils.data.dataloader.default_collate`. - labels_size_dict: + labels_num_cols_dict: (Note): This is an attribute of the `MultitaskDataset`. A dictionary of the form Dict[tasks, sizes] which has task names as keys and the size of the label tensor as value. The size of the tensor corresponds to how many @@ -86,6 +86,10 @@ def graphium_collate_fn( The batched elements. See `torch.utils.data.dataloader.default_collate`. """ + # Skip any elements that failed + if None in elements: + elements = [e for e in elements if e is not None] + elem = elements[0] if isinstance(elem, Mapping): batch = {} @@ -93,7 +97,15 @@ def graphium_collate_fn( # Multitask setting: We have to pad the missing labels if key == "labels": labels = [d[key] for d in elements] - batch[key] = collate_labels(labels, labels_size_dict, labels_dtype_dict) + if "features" in elem: + num_nodes = [d["features"].num_nodes for d in elements] + num_edges = [d["features"].num_edges for d in elements] + else: + num_nodes = [d["num_nodes"] for d in elements] + num_edges = [d["num_edges"] for d in elements] + batch[key] = collate_labels(labels, labels_num_cols_dict, labels_dtype_dict, num_nodes, num_edges) + elif key == "num_nodes" or key == "num_edges": + continue # If the features are a dictionary containing GraphDict elements, # Convert to pyg graphs and use the pyg batching. @@ -182,23 +194,21 @@ def collage_pyg_graph(pyg_graphs: Iterable[Union[Data, Dict]], batch_size_per_pa return Batch.from_data_list(pyg_batch) -def pad_to_expected_label_size(labels: torch.Tensor, label_size: List[int]): +def pad_to_expected_label_size(labels: torch.Tensor, label_rows: int, label_cols: int): """Determine difference of ``labels`` shape to expected shape `label_size` and pad with ``torch.nan`` accordingly. """ - if label_size == list(labels.shape): + if len(labels.shape) == 2 and label_rows == labels.shape[0] and label_cols == labels.shape[1]: return labels - missing_dims = len(label_size) - len(labels.shape) + missing_dims = 2 - len(labels.shape) for _ in range(missing_dims): labels.unsqueeze(-1) - pad_sizes = [(0, expected - actual) for expected, actual in zip(label_size, labels.shape)] - pad_sizes = [item for before_after in pad_sizes for item in before_after] - pad_sizes.reverse() + pad_sizes = [label_cols - labels.shape[1], 0, label_rows - labels.shape[0], 0] if any([s < 0 for s in pad_sizes]): - logger.warning(f"More labels available than expected. Will remove data to fit expected size.") + logger.warning(f"More labels available than expected. Will remove data to fit expected size. cols: {labels.shape[1]}->{label_cols}, rows: {labels.shape[0]}->{label_rows}") return torch.nn.functional.pad(labels, pad_sizes, value=torch.nan) @@ -226,31 +236,41 @@ def collate_pyg_graph_labels(pyg_labels: List[Data]): return Batch.from_data_list(pyg_batch) -def get_expected_label_size(label_data: Data, task: str, label_size: List[int]): +def get_expected_label_rows( + label_data: Data, + task: str, + num_nodes: int, + num_edges: int +): """Determines expected label size based on the specfic graph properties and the number of targets in the task-dataset. """ if task.startswith("graph_"): num_labels = 1 elif task.startswith("node_"): - num_labels = label_data.x.size(0) + num_labels = num_nodes elif task.startswith("edge_"): - num_labels = label_data.edge_index.size(1) + num_labels = num_edges elif task.startswith("nodepair_"): raise NotImplementedError() - return [num_labels] + label_size + else: + print("Task name "+task+" in get_expected_label_rows") + raise NotImplementedError() + return num_labels def collate_labels( labels: List[Data], - labels_size_dict: Optional[Dict[str, Any]] = None, + labels_num_cols_dict: Optional[Dict[str, Any]] = None, labels_dtype_dict: Optional[Dict[str, Any]] = None, + num_nodes: List[int] = None, + num_edges: List[int] = None ): """Collate labels for multitask learning. Parameters: labels: List of labels - labels_size_dict: Dict of the form Dict[tasks, sizes] which has task names as keys + labels_num_cols_dict: Dict of the form Dict[tasks, sizes] which has task names as keys and the size of the label tensor as value. The size of the tensor corresponds to how many labels/values there are to predict for that task. labels_dtype_dict: @@ -260,25 +280,19 @@ def collate_labels( Returns: A dictionary of the form Dict[tasks, labels] where tasks is the name of the task and labels - is a tensor of shape (batch_size, *labels_size_dict[task]). + is a tensor of shape (batch_size, *labels_num_cols_dict[task]). """ - if labels_size_dict is not None: - for this_label in labels: - for task in labels_size_dict.keys(): - labels_size_dict[task] = list(labels_size_dict[task]) - if len(labels_size_dict[task]) >= 2: - labels_size_dict[task] = labels_size_dict[task][1:] - elif not task.startswith("graph_"): - labels_size_dict[task] = [1] + if labels_num_cols_dict is not None: + for index, this_label in enumerate(labels): label_keys_set = set(get_keys(this_label)) - empty_task_labels = set(labels_size_dict.keys()) - label_keys_set + empty_task_labels = set(labels_num_cols_dict.keys()) - label_keys_set for task in empty_task_labels: - labels_size_dict[task] = get_expected_label_size(this_label, task, labels_size_dict[task]) - dtype = labels_dtype_dict[task] - this_label[task] = torch.full([*labels_size_dict[task]], torch.nan, dtype=dtype) + label_rows = get_expected_label_rows(this_label, task, num_nodes[index], num_edges[index]) + dtype = torch_enum_to_dtype(labels_dtype_dict[task]) + this_label[task] = torch.full((label_rows, labels_num_cols_dict[task]), fill_value=torch.nan, dtype=dtype) for task in label_keys_set - set(["x", "edge_index"]) - empty_task_labels: - labels_size_dict[task] = get_expected_label_size(this_label, task, labels_size_dict[task]) + label_rows = get_expected_label_rows(this_label, task, num_nodes[index], num_edges[index]) if not isinstance(this_label[task], (torch.Tensor)): this_label[task] = torch.as_tensor(this_label[task]) @@ -286,19 +300,19 @@ def collate_labels( # Ensure explicit task dimension also for single task labels if len(this_label[task].shape) == 1: # Distinguish whether target dim or entity dim is missing - if labels_size_dict[task][0] == this_label[task].shape[0]: + if label_rows == this_label[task].shape[0]: # num graphs/nodes/edges/nodepairs already matching this_label[task] = this_label[task].unsqueeze(1) else: # data lost unless entity dim is supposed to be 1 - if labels_size_dict[task][0] == 1: + if label_rows == 1: this_label[task] = this_label[task].unsqueeze(0) else: raise ValueError( - f"Labels for {labels_size_dict[task][0]} nodes/edges/nodepairs expected, got 1." + f"Labels for {label_rows} nodes/edges/nodepairs expected, got 1." ) - this_label[task] = pad_to_expected_label_size(this_label[task], labels_size_dict[task]) + this_label[task] = pad_to_expected_label_size(this_label[task], label_rows, labels_num_cols_dict[task]) return collate_pyg_graph_labels(labels) diff --git a/graphium/data/datamodule.py b/graphium/data/datamodule.py index 4e89f6728..f27218176 100644 --- a/graphium/data/datamodule.py +++ b/graphium/data/datamodule.py @@ -1,12 +1,12 @@ """ -------------------------------------------------------------------------------- -Copyright (c) 2023 Valence Labs, Recursion Pharmaceuticals and Graphcore Limited. +Copyright (c) 2023 Valence Labs, Recursion Pharmaceuticals, Graphcore Limited, and NVIDIA Corporation & Affiliates. Use of this software is subject to the terms and conditions outlined in the LICENSE file. Unauthorized modification, distribution, or use is prohibited. Provided 'as is' without warranties of any kind. -Valence Labs, Recursion Pharmaceuticals and Graphcore Limited are not liable for any damages arising from its use. +Valence Labs, Recursion Pharmaceuticals, Graphcore Limited, and NVIDIA Corporation & Affiliates are not liable for any damages arising from its use. Refer to the LICENSE file for the full terms and conditions. -------------------------------------------------------------------------------- """ @@ -51,6 +51,8 @@ from torch.utils.data.dataloader import DataLoader, Dataset from torch.utils.data import Subset +from rdkit import RDLogger + from graphium.utils import fs from graphium.features import ( mol_to_graph_dict, @@ -62,16 +64,13 @@ from graphium.data.utils import graphium_package_path, found_size_mismatch from graphium.utils.arg_checker import check_arg_iterator from graphium.utils.hashing import get_md5_hash -from graphium.data.smiles_transform import ( - did_featurization_fail, - BatchingSmilesTransform, - smiles_to_unique_mol_ids, -) from graphium.data.collate import graphium_collate_fn import graphium.data.dataset as Datasets from graphium.data.normalization import LabelNormalization from graphium.data.multilevel_utils import extract_labels +import graphium_cpp + torch.multiprocessing.set_sharing_strategy("file_system") @@ -153,7 +152,6 @@ def __init__( self._predict_ds = None self._data_is_prepared = False - self._data_is_cached = False def prepare_data(self): raise NotImplementedError() @@ -790,8 +788,7 @@ class MultitaskFromSmilesDataModule(BaseDataModule, IPUDataModuleModifier): def __init__( self, task_specific_args: Union[Dict[str, DatasetProcessingParams], Dict[str, Any]], - processed_graph_data_path: Optional[Union[str, os.PathLike]] = None, - dataloading_from: str = "ram", + processed_graph_data_path: Union[str, os.PathLike], featurization: Optional[Union[Dict[str, Any], omegaconf.DictConfig]] = None, batch_size_training: int = 16, batch_size_inference: int = 16, @@ -800,10 +797,6 @@ def __init__( pin_memory: bool = True, persistent_workers: bool = False, multiprocessing_context: Optional[str] = None, - featurization_n_jobs: int = -1, - featurization_progress: bool = False, - featurization_backend: str = "loky", - featurization_batch_size: int = 1000, collate_fn: Optional[Callable] = None, prepare_dict_or_graph: str = "pyg:graph", **kwargs, @@ -821,23 +814,16 @@ def __init__( - `df_path` - `smiles_col` - `label_cols` - dataloading_from: Whether to load the data from RAM or from disk. If set to "disk", the data - must have been previously cached with `processed_graph_data_path` set. If set to "ram", the data - will be loaded in RAM and the `processed_graph_data_path` will be ignored. featurization: args to apply to the SMILES to Graph featurizer. batch_size_training: batch size for training and val dataset. batch_size_inference: batch size for test dataset. num_workers: Number of workers for the dataloader. Use -1 to use all available cores. pin_memory: Whether to pin on paginated CPU memory for the dataloader. - featurization_n_jobs: Number of cores to use for the featurization. - featurization_progress: whether to show a progress bar during featurization. - featurization_backend: The backend to use for the molecular featurization. - "multiprocessing": Found to cause less memory issues. - "loky": joblib's Default. Found to cause memory leaks. - "threading": Found to be slow. - featurization_batch_size: Batch size to use for the featurization. collate_fn: A custom torch collate function. Default is to `graphium.data.graphium_collate_fn` prepare_dict_or_graph: Whether to preprocess all molecules as Graph dict or PyG graphs. @@ -878,11 +864,7 @@ def __init__( task: self.task_dataset_processing_params[task].epoch_sampling_fraction for task in self.task_dataset_processing_params.keys() } - - self.featurization_n_jobs = featurization_n_jobs - self.featurization_progress = featurization_progress - self.featurization_backend = featurization_backend - self.featurization_batch_size = featurization_batch_size + self.task_names = [task for task in self.task_dataset_processing_params.keys()] self.task_train_indices = None self.task_val_indices = None @@ -897,7 +879,7 @@ def __init__( self.val_ds = None self.test_ds = None - self._parse_caching_args(processed_graph_data_path, dataloading_from) + self._parse_caching_args(processed_graph_data_path) self.task_norms = {} @@ -905,12 +887,65 @@ def __init__( featurization = {} self.featurization = featurization + self.encoded_featurization = featurization + + use_graphium_cpp = "use_graphium_cpp" not in featurization or featurization["use_graphium_cpp"] + if use_graphium_cpp: + # Copy featurization for the representation used by graphium_cpp + encoded_featurization = deepcopy(featurization) + self.encoded_featurization = encoded_featurization + encoded_featurization["use_graphium_cpp"] = True + if "atom_property_list_onehot" not in featurization: + featurization["atom_property_list_onehot"] = None + if "atom_property_list_float" not in featurization: + featurization["atom_property_list_float"] = None + if "edge_property_list" not in featurization: + featurization["edge_property_list"] = None + if "pos_encoding_as_features" not in featurization: + featurization["pos_encoding_as_features"] = None + encoded_featurization["original_featurization"] = { + "atom_property_list_onehot": featurization["atom_property_list_onehot"], + "atom_property_list_float": featurization["atom_property_list_float"], + "edge_property_list": featurization["edge_property_list"], + "pos_encoding_as_features": featurization["pos_encoding_as_features"] + } + if featurization["atom_property_list_onehot"] is not None: + self.atom_onehot_property_tensor = graphium_cpp.atom_onehot_feature_names_to_tensor(featurization["atom_property_list_onehot"]) + else: + self.atom_onehot_property_tensor = torch.tensor(data=[], dtype=torch.int64) + encoded_featurization["atom_property_list_onehot"] = self.atom_onehot_property_tensor + + if featurization["atom_property_list_float"] is not None: + self.atom_float_property_tensor = graphium_cpp.atom_float_feature_names_to_tensor(featurization["atom_property_list_float"]) + else: + self.atom_float_property_tensor = torch.tensor(data=[], dtype=torch.int64) + encoded_featurization["atom_property_list_float"] = self.atom_float_property_tensor + + if featurization["edge_property_list"] is not None: + self.edge_property_tensor = graphium_cpp.bond_feature_names_to_tensor(featurization["edge_property_list"]) + else: + self.edge_property_tensor = torch.tensor(data=[], dtype=torch.int64) + encoded_featurization["edge_property_list"] = self.edge_property_tensor + + if featurization["pos_encoding_as_features"] is not None and featurization["pos_encoding_as_features"]["pos_types"] is not None: + (self.pos_encoding_names, self.pos_encoding_tensor) = graphium_cpp.positional_feature_options_to_tensor(featurization["pos_encoding_as_features"]["pos_types"]) + else: + self.pos_encoding_names = [] + self.pos_encoding_tensor = torch.tensor(data=[], dtype=torch.int64) + encoded_featurization["pos_encoding_as_features"] = (self.pos_encoding_names, self.pos_encoding_tensor) + + explicit_H = featurization["explicit_H"] if "explicit_H" in featurization else False + add_self_loop = featurization["add_self_loop"] if "add_self_loop" in featurization else False + + # Save these for calling graphium_cpp.prepare_and_save_data later + self.add_self_loop = add_self_loop + self.explicit_H = explicit_H # Whether to transform the smiles into a pyg `Data` graph or a dictionary compatible with pyg if prepare_dict_or_graph == "pyg:dict": - self.smiles_transformer = partial(mol_to_graph_dict, **featurization) + self.smiles_transformer = partial(mol_to_graph_dict, **encoded_featurization) elif prepare_dict_or_graph == "pyg:graph": - self.smiles_transformer = partial(mol_to_pyggraph, **featurization) + self.smiles_transformer = partial(mol_to_pyggraph, **encoded_featurization) else: raise ValueError( f"`prepare_dict_or_graph` should be either 'pyg:dict' or 'pyg:graph', Provided: `{prepare_dict_or_graph}`" @@ -920,28 +955,19 @@ def __init__( if self.processed_graph_data_path is not None: if self._ready_to_load_all_from_file(): self._data_is_prepared = True - self._data_is_cached = True - def _parse_caching_args(self, processed_graph_data_path, dataloading_from): + def _parse_caching_args(self, processed_graph_data_path): """ Parse the caching arguments, and raise errors if the arguments are invalid. """ - # Whether to load the data from RAM or from disk - dataloading_from = dataloading_from.lower() - if dataloading_from not in ["disk", "ram"]: - raise ValueError( - f"`dataloading_from` should be either 'disk' or 'ram', Provided: `{dataloading_from}`" - ) - # If loading from disk, the path to the cached data must be provided - if dataloading_from == "disk" and processed_graph_data_path is None: + if processed_graph_data_path is None: raise ValueError( - "When `dataloading_from` is 'disk', `processed_graph_data_path` must be provided." + "`processed_graph_data_path` must be provided." ) self.processed_graph_data_path = processed_graph_data_path - self.dataloading_from = dataloading_from def _get_task_key(self, task_level: str, task: str): task_prefix = f"{task_level}_" @@ -959,7 +985,7 @@ def get_task_levels(self): return task_level_map - def prepare_data(self, save_smiles_and_ids: bool = False): + def prepare_data(self): """Called only from a single process in distributed settings. Steps: - If each cache is set and exists, reload from cache and return. Otherwise, @@ -970,30 +996,51 @@ def prepare_data(self, save_smiles_and_ids: bool = False): - In the previous step, we were also able to get the unique smiles, which we use to compute the features - For each single-task dataframe and associated data (smiles, labels, etc.): - Filter out the data corresponding to molecules which failed featurization. - - Create a corresponding SingletaskDataset - - Split the SingletaskDataset according to the task-specific splits for train, val and test + - Split the dataset according to the task-specific splits for train, val and test """ - def has_atoms_after_h_removal(smiles): - # Remove all 'H' characters from the SMILES - smiles_without_h = re.sub("H", "", smiles) - # Check if any letters are remaining in the modified string - has_atoms = bool(re.search("[a-zA-Z]", smiles_without_h)) - if has_atoms == False: - logger.info(f"Removed Hydrogen molecule: {smiles}") - return has_atoms + # Don't log error messages from SMILES parsing in RDKit. + # Common error messages were: + # WARNING: not removing hydrogen atom without neighbors + # SMILES Parse Error: syntax error while parsing: restricted + # SMILES Parse Error: Failed parsing SMILES 'restricted' for input: 'restricted' + RDLogger.DisableLog('rdApp.*') + + self.data_offsets_tensor_index = 0 + self.concat_smiles_tensor_index = 1 + self.smiles_offsets_tensor_index = 2 + self.num_nodes_tensor_index = 3 + self.num_edges_tensor_index = 4 + + for task, args in self.task_dataset_processing_params.items(): + if args.label_normalization is None: + args.label_normalization = {} + label_normalization = LabelNormalization(**args.label_normalization) + self.task_norms[task] = label_normalization if self._data_is_prepared: logger.info("Data is already prepared.") - self.get_label_statistics(self.processed_graph_data_path, self.data_hash, dataset=None) + self.label_num_cols, self.label_dtypes = graphium_cpp.load_num_cols_and_dtypes(self.processed_graph_data_path, self.data_hash) + self.stage_data = { + "train": graphium_cpp.load_metadata_tensors(self.processed_graph_data_path, "train", self.data_hash), + "val": graphium_cpp.load_metadata_tensors(self.processed_graph_data_path, "val", self.data_hash), + "test": graphium_cpp.load_metadata_tensors(self.processed_graph_data_path, "test", self.data_hash), + } + for task in self.task_dataset_processing_params.keys(): + stats = graphium_cpp.load_stats(self.processed_graph_data_path, self.data_hash, task) + if len(stats) < 4: + raise RuntimeError(f"Error loading cached stats for task \"{task}\"") + + self.task_norms[task].set_statistics(stats[0], stats[1], stats[2], stats[3]) return + task_dataset_args = {} + self.task_train_indices = {} + self.task_val_indices = {} + self.task_test_indices = {} + """Load all single-task dataframes.""" - task_df = {} for task, args in self.task_dataset_processing_params.items(): - if args.label_normalization is None: - args.label_normalization = {} - label_normalization = LabelNormalization(**args.label_normalization) logger.info(f"Reading data for task '{task}'") if args.df is None: # Only load the useful columns, as some datasets can be very large when loading all columns. @@ -1007,24 +1054,18 @@ def has_atoms_after_h_removal(smiles): + check_arg_iterator(args.weights_col, enforce_type=list) ) label_dtype = {col: np.float32 for col in label_cols} - task_df[task] = self._read_table(args.df_path, usecols=usecols, dtype=label_dtype) + df = self._read_table(args.df_path, usecols=usecols, dtype=label_dtype) else: label_cols = self._parse_label_cols( df=args.df, df_path=None, label_cols=args.label_cols, smiles_col=args.smiles_col ) - task_df[task] = args.df - task_df[task] = task_df[task] + df = args.df + args.label_cols = label_cols - self.task_norms[task] = label_normalization - logger.info("Done reading datasets") - """Subsample the data frames and extract the necessary data to create SingleTaskDatasets for each task (smiles, labels, extras).""" - task_dataset_args = {} - for task in task_df.keys(): - task_dataset_args[task] = {} + """Subsample the data frames and extract the necessary data for each task (smiles, labels, extras).""" - for task, df in task_df.items(): # Subsample all the dataframes sample_size = self.task_dataset_processing_params[task].sample_size df = self._sub_sample_df(df, sample_size, self.task_dataset_processing_params[task].seed) @@ -1036,7 +1077,7 @@ def has_atoms_after_h_removal(smiles): logger.info("Filtering done") # Extract smiles, labels, extras args = self.task_dataset_processing_params[task] - smiles, labels, sample_idx, extras = self._extract_smiles_labels( + smiles, labels, label_offsets, sample_idx, extras = self._extract_smiles_labels( df, task_level=args.task_level, smiles_col=args.smiles_col, @@ -1045,126 +1086,66 @@ def has_atoms_after_h_removal(smiles): weights_col=args.weights_col, weights_type=args.weights_type, ) + + num_molecules = len(smiles) + + # Clear the reference to the DataFrame, so that Python can free up the memory. + df = None # Store the relevant information for each task's dataset - task_dataset_args[task]["smiles"] = smiles - task_dataset_args[task]["labels"] = labels - task_dataset_args[task]["sample_idx"] = sample_idx - task_dataset_args[task]["extras"] = extras - - """Convert SMILES to features (graphs, fingerprints, etc.) for the unique molecules found.""" - all_smiles = [] - all_tasks = [] - idx_per_task = {} - total_len = 0 - for task, dataset_args in task_dataset_args.items(): - all_smiles.extend(dataset_args["smiles"]) - num_smiles = len(dataset_args["smiles"]) - idx_per_task[task] = (total_len, total_len + num_smiles) - total_len += num_smiles - for count in range(len(dataset_args["smiles"])): - all_tasks.append(task) - # Get all unique mol ids - all_unique_mol_ids = smiles_to_unique_mol_ids( - all_smiles, - n_jobs=self.featurization_n_jobs, - featurization_batch_size=self.featurization_batch_size, - backend=self.featurization_backend, - ) - _, unique_ids_idx, unique_ids_inv = np.unique( - all_unique_mol_ids, return_index=True, return_inverse=True - ) - - smiles_to_featurize = [all_smiles[ii] for ii in unique_ids_idx] - - # Convert SMILES to features - features, _ = self._featurize_molecules(smiles_to_featurize) - - # Store the features (including Nones, which will be filtered in the next step) - for task in task_dataset_args.keys(): - task_dataset_args[task]["features"] = [] - task_dataset_args[task]["idx_none"] = [] - # Create a list of features matching up with the original smiles - all_features = [features[unique_idx] for unique_idx in unique_ids_inv] - - # Add the features to the task-specific data - for all_idx, task in enumerate(all_tasks): - task_dataset_args[task]["features"].append(all_features[all_idx]) - - """Filter data based on molecules which failed featurization. Create single task datasets as well.""" - self.single_task_datasets = {} - for task, args in task_dataset_args.items(): - # Find out which molecule failed featurization, and filter them out - idx_none = [] - for idx, (feat, labels, smiles) in enumerate( - zip(args["features"], args["labels"], args["smiles"]) - ): - if did_featurization_fail(feat) or found_size_mismatch(task, feat, labels, smiles): - idx_none.append(idx) - this_unique_ids = all_unique_mol_ids[idx_per_task[task][0] : idx_per_task[task][1]] - df, features, smiles, labels, sample_idx, extras, this_unique_ids = self._filter_none_molecules( - idx_none, - task_df[task], - args["features"], - args["smiles"], - args["labels"], - args["sample_idx"], - args["extras"], - this_unique_ids, - ) - task_dataset_args[task]["smiles"] = smiles - task_dataset_args[task]["labels"] = labels - task_dataset_args[task]["features"] = features - task_dataset_args[task]["sample_idx"] = sample_idx - task_dataset_args[task]["extras"] = extras + task_dataset_args[task] = { + "smiles": smiles, + "labels": labels, + "label_offsets": label_offsets, + # sample_idx is not needed here anymore + #"sample_idx": sample_idx, + "extras": extras, + } - # We have the necessary components to create single-task datasets. - self.single_task_datasets[task] = Datasets.SingleTaskDataset( - features=task_dataset_args[task]["features"], - labels=task_dataset_args[task]["labels"], - smiles=task_dataset_args[task]["smiles"], - unique_ids=this_unique_ids, - indices=task_dataset_args[task]["sample_idx"], - **task_dataset_args[task]["extras"], - ) + """We split the data up to create train, val and test datasets""" - """We split the data up to create train, val and test datasets""" - self.task_train_indices = {} - self.task_val_indices = {} - self.task_test_indices = {} - - for task, df in task_df.items(): train_indices, val_indices, test_indices = self._get_split_indices( - len(df), + num_molecules, split_val=self.task_dataset_processing_params[task].split_val, split_test=self.task_dataset_processing_params[task].split_test, split_seed=self.task_dataset_processing_params[task].seed, splits_path=self.task_dataset_processing_params[task].splits_path, split_names=self.task_dataset_processing_params[task].split_names, - sample_idx=task_dataset_args[task]["sample_idx"], + # smiles and labels are already sub-sampled, so the split indices need to be + # relative to the sample, not the original. + #sample_idx=task_dataset_args[task]["sample_idx"], ) self.task_train_indices[task] = train_indices self.task_val_indices[task] = val_indices self.task_test_indices[task] = test_indices - ( - self.train_singletask_datasets, - self.val_singletask_datasets, - self.test_singletask_datasets, - ) = self.get_subsets_of_datasets( - self.single_task_datasets, self.task_train_indices, self.task_val_indices, self.task_test_indices - ) + logger.info("Done reading datasets") - if self.processed_graph_data_path is not None: - self._save_data_to_files(save_smiles_and_ids) - self._data_is_cached = True + # The rest of the data preparation and caching is done in graphium_cpp.prepare_and_save_data + normalizations = {task: self.task_dataset_processing_params[task].label_normalization for task in self.task_dataset_processing_params.keys()} + self.stage_data, all_stats, self.label_num_cols, self.label_dtypes = graphium_cpp.prepare_and_save_data( + self.task_names, + task_dataset_args, + normalizations, + self.processed_graph_data_path, + self.data_hash, + self.task_train_indices, + self.task_val_indices, + self.task_test_indices, + self.add_self_loop, + self.explicit_H) + + for task, stats in all_stats.items(): + if len(stats) < 4: + raise RuntimeError(f"Error loading cached stats for task \"{task}\"") + + self.task_norms[task].set_statistics(stats[0], stats[1], stats[2], stats[3]) self._data_is_prepared = True def setup( self, stage: str = None, - save_smiles_and_ids: bool = False, ): """ Prepare the torch dataset. Called on every GPUs. Setting state here is ok. @@ -1174,54 +1155,46 @@ def setup( # Can possibly get rid of setup because a single dataset will have molecules exclusively in train, val or test # Produce the label sizes to update the collate function - labels_size = {} - labels_dtype = {} + label_num_cols = {} + label_dtypes = {} if stage == "fit" or stage is None: if self.train_ds is None: - self.train_ds = self._make_multitask_dataset( - self.dataloading_from, "train", save_smiles_and_ids=save_smiles_and_ids - ) + self.train_ds = self._make_multitask_dataset("train") if self.val_ds is None: - self.val_ds = self._make_multitask_dataset( - self.dataloading_from, "val", save_smiles_and_ids=save_smiles_and_ids - ) + self.val_ds = self._make_multitask_dataset("val") logger.info(self.train_ds) logger.info(self.val_ds) - labels_size.update( - self.train_ds.labels_size + label_num_cols.update( + dict(zip(self.train_ds.task_names, self.train_ds.label_num_cols)) ) # Make sure that all task label sizes are contained in here. Maybe do the update outside these if statements. - labels_size.update(self.val_ds.labels_size) - labels_dtype.update(self.train_ds.labels_dtype) - labels_dtype.update(self.val_ds.labels_dtype) + label_num_cols.update(dict(zip(self.val_ds.task_names, self.val_ds.label_num_cols))) + label_dtypes.update(dict(zip(self.train_ds.task_names, self.train_ds.label_dtypes))) + label_dtypes.update(dict(zip(self.val_ds.task_names, self.val_ds.label_dtypes))) if stage == "test" or stage is None: if self.test_ds is None: - self.test_ds = self._make_multitask_dataset( - self.dataloading_from, "test", save_smiles_and_ids=save_smiles_and_ids - ) + self.test_ds = self._make_multitask_dataset("test") logger.info(self.test_ds) - labels_size.update(self.test_ds.labels_size) - labels_dtype.update(self.test_ds.labels_dtype) + label_num_cols.update(dict(zip(self.test_ds.task_names, self.test_ds.label_num_cols))) + label_dtypes.update(dict(zip(self.test_ds.task_names, self.test_ds.label_dtypes))) - default_labels_size_dict = self.collate_fn.keywords.get("labels_size_dict", None) + default_labels_num_cols_dict = self.collate_fn.keywords.get("labels_num_cols_dict", None) - if default_labels_size_dict is None: - self.collate_fn.keywords["labels_size_dict"] = labels_size + if default_labels_num_cols_dict is None: + self.collate_fn.keywords["labels_num_cols_dict"] = label_num_cols default_labels_dtype_dict = self.collate_fn.keywords.get("labels_dtype_dict", None) if default_labels_dtype_dict is None: - self.collate_fn.keywords["labels_dtype_dict"] = labels_dtype + self.collate_fn.keywords["labels_dtype_dict"] = label_dtypes def _make_multitask_dataset( self, - dataloading_from: Literal["disk", "ram"], stage: Literal["train", "val", "test"], - save_smiles_and_ids: bool, ) -> Datasets.MultitaskDataset: """ Create a MultitaskDataset for the given stage using single task datasets @@ -1229,7 +1202,6 @@ def _make_multitask_dataset( Parameters: stage: Stage to create multitask dataset for - save_smiles_and_ids: Whether to save SMILES strings and unique IDs processed_graph_data_path: path to save and load processed graph data from """ @@ -1237,13 +1209,10 @@ def _make_multitask_dataset( assert stage in allowed_stages, f"Multitask dataset stage `{stage}` not in {allowed_stages}" if stage == "train": - singletask_datasets = self.train_singletask_datasets about = "training set" elif stage == "val": - singletask_datasets = self.val_singletask_datasets about = "validation set" elif stage == "test": - singletask_datasets = self.test_singletask_datasets about = "test set" else: raise ValueError(f"Unknown stage {stage}") @@ -1251,27 +1220,19 @@ def _make_multitask_dataset( processed_graph_data_path = self.processed_graph_data_path multitask_dataset = Datasets.MultitaskDataset( - singletask_datasets, - n_jobs=self.featurization_n_jobs, - backend=self.featurization_backend, - featurization_batch_size=self.featurization_batch_size, - progress=self.featurization_progress, about=about, - save_smiles_and_ids=save_smiles_and_ids, data_path=self._path_to_load_from_file(stage) if processed_graph_data_path else None, - dataloading_from=dataloading_from, - data_is_cached=self._data_is_cached, + featurize_smiles=self.smiles_transformer, + task_names=self.task_names, + label_num_cols=self.label_num_cols, + label_dtypes=self.label_dtypes, + mol_file_data_offsets=self.stage_data[stage][self.data_offsets_tensor_index], + concat_smiles_tensor=self.stage_data[stage][self.concat_smiles_tensor_index], + smiles_offsets_tensor=self.stage_data[stage][self.smiles_offsets_tensor_index], + num_nodes_tensor=self.stage_data[stage][self.num_nodes_tensor_index], + num_edges_tensor=self.stage_data[stage][self.num_edges_tensor_index], ) # type: ignore - # calculate statistics for the train split and used for all splits normalization - if stage == "train": - self.get_label_statistics( - self.processed_graph_data_path, self.data_hash, multitask_dataset, train=True - ) - # Normalization has already been applied in cached data - if not self._data_is_prepared: - self.normalize_label(multitask_dataset, stage) - return multitask_dataset def _ready_to_load_all_from_file(self) -> bool: @@ -1300,139 +1261,10 @@ def _data_ready_at_path(self, path: str) -> bool: return can_load_from_file - def _save_data_to_files(self, save_smiles_and_ids: bool = False) -> None: - """ - Save data to files so that they can be loaded from file during training/validation/test - """ - - stages = ["train", "val", "test"] - - # At the moment, we need to merge the `SingleTaskDataset`'s into `MultitaskDataset`s in order to save to file - # This is because the combined labels need to be stored together. We can investigate not doing this if this is a problem - temp_datasets = { - stage: self._make_multitask_dataset( - dataloading_from="ram", stage=stage, save_smiles_and_ids=save_smiles_and_ids - ) - for stage in stages - } - for stage in stages: - self.save_featurized_data(temp_datasets[stage], self._path_to_load_from_file(stage)) - temp_datasets[stage].save_metadata(self._path_to_load_from_file(stage)) - # self.train_ds, self.val_ds, self.test_ds will be created during `setup()` - - if self.dataloading_from == "disk": - del temp_datasets - else: - self.train_ds = temp_datasets["train"] - self.val_ds = temp_datasets["val"] - self.test_ds = temp_datasets["test"] - def get_folder_size(self, path): # check if the data items are actually saved into the folders return sum(os.path.getsize(osp.join(path, f)) for f in os.listdir(path)) - def calculate_statistics(self, dataset: Datasets.MultitaskDataset, train: bool = False): - """ - Calculate the statistics of the labels for each task, and overwrites the `self.task_norms` attribute. - - Parameters: - dataset: the dataset to calculate the statistics from - train: whether the dataset is the training set - - """ - - if self.task_norms and train: - for task in dataset.labels_size.keys(): - # if the label type is graph_*, we need to stack them as the tensor shape is (num_labels, ) - if task.startswith("graph"): - labels = np.stack( - np.array([datum["labels"][task] for datum in dataset if task in datum["labels"]]), - axis=0, - ) - # for other tasks with node_ and edge_, the label shape is [num_nodes/num_edges, num_labels] - # we can concatenate them directly - else: - labels = np.concatenate( - [datum["labels"][task] for datum in dataset if task in datum["labels"]], axis=0 - ) - - self.task_norms[task].calculate_statistics(labels) - - def get_label_statistics( - self, - data_path: Union[str, os.PathLike], - data_hash: str, - dataset: Datasets.MultitaskDataset, - train: bool = False, - ): - """ - Get the label statistics from the dataset, and save them to file, if needed. - `self.task_norms` will be modified in-place with the label statistics. - - Parameters: - data_path: the path to save and load the label statistics to. If None, no saving and loading will be done. - data_hash: the hash of the dataset generated by `get_data_hash()` - dataset: the dataset to calculate the statistics from - train: whether the dataset is the training set - - """ - if data_path is None: - self.calculate_statistics(dataset, train=train) - else: - path_with_hash = os.path.join(data_path, data_hash) - os.makedirs(path_with_hash, exist_ok=True) - filename = os.path.join(path_with_hash, "task_norms.pkl") - if self.task_norms and train and not os.path.isfile(filename): - self.calculate_statistics(dataset, train=train) - torch.save(self.task_norms, filename, pickle_protocol=4) - # if any of the above three condition does not satisfy, we load from file. - else: - self.task_norms = torch.load(filename) - - def normalize_label(self, dataset: Datasets.MultitaskDataset, stage) -> Datasets.MultitaskDataset: - """ - Normalize the labels in the dataset using the statistics in `self.task_norms`. - - Parameters: - dataset: the dataset to normalize the labels from - - Returns: - the dataset with normalized labels - """ - for task in dataset.labels_size.keys(): - # we normalize the dataset if (it is train split) or (it is val/test splits and normalize_val_test is set to true) - if (stage == "train") or (stage in ["val", "test"] and self.task_norms[task].normalize_val_test): - for i in range(len(dataset)): - if task in dataset[i]["labels"]: - dataset[i]["labels"][task] = self.task_norms[task].normalize( - dataset[i]["labels"][task] - ) - return dataset - - def save_featurized_data(self, dataset: Datasets.MultitaskDataset, processed_data_path): - os.makedirs(processed_data_path) # In case the len(dataset) is 0 - for i in range(0, len(dataset), 1000): - os.makedirs(os.path.join(processed_data_path, format(i // 1000, "04d")), exist_ok=True) - process_params = [(index, datum, processed_data_path) for index, datum in enumerate(dataset)] - - # Check if "about" is in the Dataset object - about = "" - if hasattr(dataset, "about"): - about = dataset.about - for param in tqdm(process_params, desc=f"Saving featurized data {about}"): - self.process_func(param) - return - - def process_func(self, param): - index, datum, folder = param - filename = os.path.join(folder, format(index // 1000, "04d"), format(index, "07d") + ".pkl") - torch.save( - {"graph_with_features": datum["features"], "labels": datum["labels"]}, - filename, - pickle_protocol=4, - ) - return - def get_dataloader_kwargs(self, stage: RunningStage, shuffle: bool, **kwargs) -> Dict[str, Any]: """ Get the options for the dataloader depending on the current stage. @@ -1514,110 +1346,6 @@ def get_collate_fn(self, collate_fn): collate_fn.__name__ = graphium_collate_fn.__name__ return collate_fn - # Cannot be used as is for the multitask version, because sample_idx does not apply. - def _featurize_molecules(self, smiles: Iterable[str]) -> Tuple[List, List]: - """ - Precompute the features (graphs, fingerprints, etc.) from the SMILES. - Features are computed from `self.smiles_transformer`. - A warning is issued to mention which molecules failed featurization. - - Note: - (hadim): in case of very large dataset we could: - - or cache the data and read from it during `next(iter(dataloader))` - - or compute the features on-the-fly during `next(iter(dataloader))` - For now we compute in advance and hold everything in memory. - - Parameters: - smiles: A list of all the molecular SMILES to featurize - sample_idx: The indexes corresponding to the sampled SMILES. - If not provided, computed from `numpy.arange`. - - Returns: - features: A list of all the featurized molecules - idx_none: A list of the indexes that failed featurization - """ - - batch_size = BatchingSmilesTransform.parse_batch_size( - numel=len(smiles), - desired_batch_size=self.featurization_batch_size, - n_jobs=self.featurization_n_jobs, - ) - - # Loop all the smiles and compute the features - features = dm.parallelized_with_batches( - BatchingSmilesTransform(self.smiles_transformer), - smiles, - batch_size=batch_size, - progress=True, - n_jobs=self.featurization_n_jobs, - backend=self.featurization_backend, - tqdm_kwargs={"desc": f"featurizing_smiles, batch={batch_size}"}, - ) - - # Warn about None molecules - idx_none = [ii for ii, feat in enumerate(features) if did_featurization_fail(feat)] - if len(idx_none) > 0: - mols_to_msg = [ - f"idx={idx} - smiles={smiles[idx]} - Error_msg[:-200]=\n{str(features[idx])[:-200]}" - for idx in idx_none - ] - msg = "\n".join(mols_to_msg) - logger.warning( - (f"{len(idx_none)} molecules will be removed since they failed featurization:\n" + msg) - ) - - return features, idx_none - - @staticmethod - def _filter_none_molecules( - idx_none: Iterable, - *args: Union[pd.DataFrame, pd.Series, np.ndarray, torch.Tensor, list, tuple, Dict[Any, Iterable]], - ) -> List[Union[pd.DataFrame, pd.Series, np.ndarray, torch.Tensor, list, tuple, Dict[Any, Iterable]]]: - """ - Filter the molecules, labels, etc. for the molecules that failed featurization. - - Parameters: - idx_none: A list of the indexes that failed featurization - args: Any argument from which to filter the failed SMILES. - Can be a `list`, `tuple`, `Tensor`, `np.array`, `Dict`, `pd.DataFrame`, `pd.Series`. - Otherwise, it is not filtered. - WARNING: If a `pd.DataFrame` or `pd.Series` is passed, it filters by the row indexes, - NOT by the `DataFrame.index` or `Series.index`! Be careful! - - Returns: - out: All the `args` with the indexes from `idx_none` removed. - """ - if len(idx_none) == 0: - return args - idx_none = np.asarray(idx_none) - - out = [] - for arg in args: - if isinstance(arg, pd.DataFrame): - new = arg.drop(arg.index[idx_none], axis=0) - elif isinstance(arg, pd.Series): - new = arg.drop(arg.index[idx_none], axis=0) - elif isinstance(arg, np.ndarray): - new = np.delete(arg, idx_none, axis=0) - elif isinstance(arg, torch.Tensor): - not_none = torch.ones(arg.shape[0], dtype=bool) - not_none[idx_none] = False - new = arg[not_none] - elif isinstance(arg, (list, tuple)): - arg = list(arg) - new = [elem for ii, elem in enumerate(arg) if ii not in idx_none] - elif isinstance(arg, dict): - new = {} - for key, val in arg.items(): - new[key] = MultitaskFromSmilesDataModule._filter_none_molecules(idx_none, val) # Careful - else: - new = arg - out.append(new) - - out = tuple(out) if len(out) > 1 else out[0] - - return out - def _parse_label_cols( self, df: pd.DataFrame, @@ -1735,11 +1463,6 @@ def get_fake_graph(self): return graph ########################## Private methods ###################################### - def _save_to_cache(self): - raise NotImplementedError() - - def _load_from_cache(self): - raise NotImplementedError() def _extract_smiles_labels( self, @@ -1752,7 +1475,7 @@ def _extract_smiles_labels( weights_col: Optional[str] = None, weights_type: Optional[str] = None, ) -> Tuple[ - np.ndarray, np.ndarray, Union[Type[None], np.ndarray], Dict[str, Union[Type[None], np.ndarray]] + np.ndarray, np.ndarray, np.ndarray, Union[Type[None], np.ndarray], Dict[str, Union[Type[None], np.ndarray]] ]: """ For a given dataframe extract the SMILES and labels columns. Smiles is returned as a list @@ -1788,17 +1511,18 @@ def _extract_smiles_labels( smiles = df[smiles_col].values if len(label_cols) > 0: if task_level == "graph": - labels = extract_labels(df, "graph", label_cols) + labels, label_offsets = extract_labels(df, "graph", label_cols) elif task_level == "node": - labels = extract_labels(df, "node", label_cols) + labels, label_offsets = extract_labels(df, "node", label_cols) elif task_level == "edge": - labels = extract_labels(df, "edge", label_cols) + labels, label_offsets = extract_labels(df, "edge", label_cols) elif task_level == "nodepair": - labels = extract_labels(df, "nodepair", label_cols) + labels, label_offsets = extract_labels(df, "nodepair", label_cols) else: raise ValueError(f"Unknown task level: {task_level}") else: labels = float("nan") + np.zeros([len(smiles), 0]) + label_offsets = None # Get the indices, used for sub-sampling and splitting the dataset if idx_col is not None: @@ -1837,7 +1561,7 @@ def _extract_smiles_labels( weights /= np.max(weights) # Put the max weight to 1 extras = {"weights": weights, "mol_ids": mol_ids} - return smiles, labels, sample_idx, extras + return smiles, labels, label_offsets, sample_idx, extras def _get_split_indices( self, @@ -1919,8 +1643,8 @@ def _get_split_indices( # Filter train, val and test indices _, train_idx, _ = np.intersect1d(sample_idx, train_indices, return_indices=True) train_indices = train_idx.tolist() - _, valid_idx, _ = np.intersect1d(sample_idx, val_indices, return_indices=True) - val_indices = valid_idx.tolist() + _, val_idx, _ = np.intersect1d(sample_idx, val_indices, return_indices=True) + val_indices = val_idx.tolist() _, test_idx, _ = np.intersect1d(sample_idx, test_indices, return_indices=True) test_indices = test_idx.tolist() @@ -1954,7 +1678,7 @@ def _sub_sample_df( def get_data_hash(self): """ - Get a hash specific to a dataset and smiles_transformer. + Get a hash specific to a dataset. Useful to cache the pre-processed data. """ args = {} @@ -1975,114 +1699,11 @@ def get_data_hash(self): args[task_key] = task_args hash_dict = { - "smiles_transformer": self.smiles_transformer, "task_specific_args": args, } data_hash = get_md5_hash(hash_dict) return data_hash - def get_data_cache_fullname(self, compress: bool = False) -> str: - """ - Create a hash for the dataset, and use it to generate a file name - - Parameters: - compress: Whether to compress the data - Returns: - full path to the data cache file - """ - if self.processed_graph_data_path is None: - return - ext = ".datacache" - if compress: - ext += ".gz" - data_cache_fullname = fs.join(self.processed_graph_data_path, self.data_hash + ext) - return data_cache_fullname - - def load_data_from_cache(self, verbose: bool = True, compress: bool = False) -> bool: - """ - Load the datasets from cache. First create a hash for the dataset, and verify if that - hash is available at the path given by `self.processed_graph_data_path`. - - Parameters: - verbose: Whether to print the progress - compress: Whether to compress the data - - Returns: - cache_data_exists: Whether the cache exists (if the hash matches) and the loading succeeded - """ - full_cache_data_path = self.get_data_cache_fullname(compress=compress) - - if full_cache_data_path is None: - logger.info("No cache data path specified. Skipping loading the data from cache.") - return False - - cache_data_exists = fs.exists(full_cache_data_path) - - if cache_data_exists: - try: - logger.info(f"Loading the data from cache at path `{full_cache_data_path}`") - now = time.time() - with fsspec.open(full_cache_data_path, mode="rb", compression="infer") as file: - load_params = torch.load(file) - self.__dict__.update(load_params) - ( - self.train_singletask_datasets, - self.val_singletask_datasets, - self.test_singletask_datasets, - ) = self.get_subsets_of_datasets( - self.single_task_datasets, - self.task_train_indices, - self.task_val_indices, - self.task_test_indices, - ) - elapsed = round(time.time() - now) - logger.info( - f"Successfully loaded the data from cache in {elapsed}s at path: `{full_cache_data_path}`" - ) - return True - except Exception as e: - if verbose: - logger.warning( - f"Data cache failed to load path: `{full_cache_data_path}`.\nThe data will be prepared and cache will be created for future runs." - ) - logger.warning(e.__str__()) - return False - else: - if verbose: - logger.info( - f"Data cache not found at path: `{full_cache_data_path}`.\nThe data will be prepared and cache will be created for future runs." - ) - return False - - def get_subsets_of_datasets( - self, - single_task_datasets: Dict[str, Datasets.SingleTaskDataset], - task_train_indices: Dict[str, Iterable], - task_val_indices: Dict[str, Iterable], - task_test_indices: Dict[str, Iterable], - ) -> Tuple[Subset, Subset, Subset]: - """ - From a dictionary of datasets and their associated indices, subset the train/val/test sets - - Parameters: - single_task_datasets: Dictionary of datasets - task_train_indices: Dictionary of train indices - task_val_indices: Dictionary of val indices - task_test_indices: Dictionary of test indices - Returns: - train_singletask_datasets: Dictionary of train subsets - val_singletask_datasets: Dictionary of val subsets - test_singletask_datasets: Dictionary of test subsets - """ - train_singletask_datasets = {} - val_singletask_datasets = {} - test_singletask_datasets = {} - for task in task_train_indices.keys(): - train_singletask_datasets[task] = Subset(single_task_datasets[task], task_train_indices[task]) - val_singletask_datasets[task] = Subset(single_task_datasets[task], task_val_indices[task]) - test_singletask_datasets[task] = Subset(single_task_datasets[task], task_test_indices[task]) - return train_singletask_datasets, val_singletask_datasets, test_singletask_datasets - def __len__(self) -> int: r""" Returns the number of elements of the current DataModule, which is the combined size of all single-task datasets given. @@ -2138,7 +1759,6 @@ def __init__( self, task_specific_args: Dict[str, Union[DatasetProcessingParams, Dict[str, Any]]], processed_graph_data_path: Optional[Union[str, os.PathLike]] = None, - dataloading_from: str = "ram", featurization: Optional[Union[Dict[str, Any], omegaconf.DictConfig]] = None, batch_size_training: int = 16, batch_size_inference: int = 16, @@ -2147,9 +1767,6 @@ def __init__( pin_memory: bool = True, persistent_workers: bool = False, multiprocessing_context: Optional[str] = None, - featurization_n_jobs: int = -1, - featurization_progress: bool = False, - featurization_backend: str = "loky", collate_fn: Optional[Callable] = None, prepare_dict_or_graph: str = "pyg:graph", **kwargs, @@ -2168,16 +1785,12 @@ def __init__( meaning that all molecules will be considered. processed_graph_data_path: Path to the processed graph data. If None, the data will be downloaded from the OGB website. - dataloading_from: Whether to load the data from RAM or disk. Default is "ram". featurization: args to apply to the SMILES to Graph featurizer. batch_size_training: batch size for training and val dataset. batch_size_inference: batch size for test dataset. num_workers: Number of workers for the dataloader. Use -1 to use all available cores. pin_memory: Whether to pin on paginated CPU memory for the dataloader. - featurization_n_jobs: Number of cores to use for the featurization. - featurization_progress: whether to show a progress bar during featurization. - featurization_backend: The backend to use for the molecular featurization. - "multiprocessing": Found to cause less memory issues. - "loky": joblib's Default. Found to cause memory leaks. @@ -2214,17 +1827,12 @@ def __init__( dm_args = {} dm_args["task_specific_args"] = new_task_specific_args dm_args["processed_graph_data_path"] = processed_graph_data_path - dm_args["dataloading_from"] = dataloading_from - dm_args["dataloader_from"] = dataloading_from dm_args["featurization"] = featurization dm_args["batch_size_training"] = batch_size_training dm_args["batch_size_inference"] = batch_size_inference dm_args["batch_size_per_pack"] = batch_size_per_pack dm_args["num_workers"] = num_workers dm_args["pin_memory"] = pin_memory - dm_args["featurization_n_jobs"] = featurization_n_jobs - dm_args["featurization_progress"] = featurization_progress - dm_args["featurization_backend"] = featurization_backend dm_args["persistent_workers"] = persistent_workers dm_args["multiprocessing_context"] = multiprocessing_context dm_args["collate_fn"] = collate_fn @@ -2400,7 +2008,6 @@ def __init__( tdc_train_val_seed: int = 0, # Inherited arguments from superclass processed_graph_data_path: Optional[Union[str, Path]] = None, - dataloading_from: str = "ram", featurization: Optional[Union[Dict[str, Any], omegaconf.DictConfig]] = None, batch_size_training: int = 16, batch_size_inference: int = 16, @@ -2409,9 +2016,6 @@ def __init__( pin_memory: bool = True, persistent_workers: bool = False, multiprocessing_context: Optional[str] = None, - featurization_n_jobs: int = -1, - featurization_progress: bool = False, - featurization_backend: str = "loky", collate_fn: Optional[Callable] = None, prepare_dict_or_graph: str = "pyg:graph", **kwargs, @@ -2460,7 +2064,6 @@ def __init__( task_specific_args=task_specific_args, featurization=featurization, processed_graph_data_path=processed_graph_data_path, - dataloading_from=dataloading_from, batch_size_training=batch_size_training, batch_size_inference=batch_size_inference, batch_size_per_pack=batch_size_per_pack, @@ -2468,9 +2071,6 @@ def __init__( pin_memory=pin_memory, persistent_workers=persistent_workers, multiprocessing_context=multiprocessing_context, - featurization_n_jobs=featurization_n_jobs, - featurization_progress=featurization_progress, - featurization_backend=featurization_backend, collate_fn=collate_fn, prepare_dict_or_graph=prepare_dict_or_graph, **kwargs, @@ -2676,6 +2276,7 @@ def prepare_data(self): ) # Convert SMILES to features features, _ = self._featurize_molecules(all_smiles) + # FIXME: What is task supposed to be here? task_dataset_args[task]["features"] = features """Filter data based on molecules which failed featurization. Create single task datasets as well.""" self.single_task_datasets = {} diff --git a/graphium/data/dataset.py b/graphium/data/dataset.py index 34c1b30aa..064f3b5ad 100644 --- a/graphium/data/dataset.py +++ b/graphium/data/dataset.py @@ -1,12 +1,12 @@ """ -------------------------------------------------------------------------------- -Copyright (c) 2023 Valence Labs, Recursion Pharmaceuticals and Graphcore Limited. +Copyright (c) 2023 Valence Labs, Recursion Pharmaceuticals, Graphcore Limited, and NVIDIA Corporation & Affiliates. Use of this software is subject to the terms and conditions outlined in the LICENSE file. Unauthorized modification, distribution, or use is prohibited. Provided 'as is' without warranties of any kind. -Valence Labs, Recursion Pharmaceuticals and Graphcore Limited are not liable for any damages arising from its use. +Valence Labs, Recursion Pharmaceuticals, Graphcore Limited, and NVIDIA Corporation & Affiliates are not liable for any damages arising from its use. Refer to the LICENSE file for the full terms and conditions. -------------------------------------------------------------------------------- """ @@ -17,6 +17,7 @@ from functools import lru_cache from multiprocessing import Manager from typing import Any, Dict, List, Optional, Tuple, Union +from collections.abc import Callable import fsspec import numpy as np @@ -26,9 +27,10 @@ from torch.utils.data.dataloader import Dataset from torch_geometric.data import Batch, Data -from graphium.data.smiles_transform import smiles_to_unique_mol_ids from graphium.features import GraphDict +import graphium_cpp + class SingleTaskDataset(Dataset): def __init__( @@ -152,178 +154,48 @@ class MultitaskDataset(Dataset): def __init__( self, - datasets: Dict[str, SingleTaskDataset], - n_jobs=-1, - backend: str = "loky", - featurization_batch_size=1000, - progress: bool = True, - save_smiles_and_ids: bool = False, + featurize_smiles: Callable[[str],dict], + task_names: List[str], + label_num_cols: List[int], + label_dtypes: List[int], + mol_file_data_offsets, + concat_smiles_tensor, + smiles_offsets_tensor, + num_nodes_tensor, + num_edges_tensor, about: str = "", data_path: Optional[Union[str, os.PathLike]] = None, - dataloading_from: str = "ram", - data_is_cached: bool = False, ): r""" This class holds the information for the multitask dataset. - Several single-task datasets can be merged to create a multi-task dataset. After merging the dictionary of single-task datasets. we will have a multitask dataset of the following form: - - self.mol_ids will be a list to contain the unique molecular IDs to identify the molecules - - self.smiles will be a list to contain the corresponding smiles for that molecular ID across all single-task datasets - - self.labels will be a list of dictionaries where the key is the task name and the value is the label(s) for that task. - At this point, any particular molecule will only have entries for tasks for which it has a label. Later, in the collate - function, we fill up the missing task labels with NaNs. - - self.features will be a list of featurized graphs corresponding to that particular unique molecule. - However, for testing purposes we may not require features so that we can make sure that this merge function works. + - self.mol_file_data_offsets will be a Tensor representing where to find + label data about each molecule in the corresponding file + - self.smiles_tensor will be a Tensor containing all smiles strings concatenated, with null terminators + - self.smiles_offsets_tensor will be a Tensor indicating where smiles strings start in smiles_tensor + - self.num_nodes_tensor will be a Tensor of the number of nodes in each graph + - self.num_edges_tensor will be a Tensor of the number of edges in each graph Parameters: - datasets: A dictionary of single-task datasets - n_jobs: Number of jobs to run in parallel - backend: Parallelization backend - featurization_batch_size: The batch size to use for the parallelization of the featurization - progress: Whether to display the progress bar - save_smiles_and_ids: Whether to save the smiles and ids for the dataset. If `False`, `mol_ids` and `smiles` are set to `None` about: A description of the dataset data_path: The location of the data if saved on disk - dataloading_from: Whether to load the data from `"disk"` or `"ram"` - data_is_cached: Whether the data is already cached on `"disk"` """ super().__init__() - self.n_jobs = n_jobs - self.backend = backend - self.featurization_batch_size = featurization_batch_size - self.progress = progress + self.about = about - self.save_smiles_and_ids = save_smiles_and_ids self.data_path = data_path - self.dataloading_from = dataloading_from - - logger.info(f"Dataloading from {dataloading_from.upper()}") - - if data_is_cached: - self._load_metadata() - - if dataloading_from == "disk": - self.features = None - self.labels = None - elif dataloading_from == "ram": - logger.info(f"Transferring {about} from DISK to RAM...") - self.transfer_from_disk_to_ram() - - else: - task = next(iter(datasets)) - self.features = None - if (len(datasets[task]) > 0) and ("features" in datasets[task][0]): - self.mol_ids, self.smiles, self.labels, self.features = self.merge(datasets) - else: - self.mol_ids, self.smiles, self.labels = self.merge(datasets) - # Set mol_ids and smiles to None to save memory as they are not needed. - if not save_smiles_and_ids: - self.mol_ids = None - self.smiles = None - self.labels_size = self.set_label_size_dict(datasets) - self.labels_dtype = self.set_label_dtype_dict(datasets) - self.dataset_length = len(self.labels) - self._num_nodes_list = None - self._num_edges_list = None - if self.features is not None: - self._num_nodes_list = get_num_nodes_per_graph(self.features) - self._num_edges_list = get_num_edges_per_graph(self.features) - - def transfer_from_disk_to_ram(self, parallel_with_batches: bool = False): - """ - Function parallelizing transfer from DISK to RAM - """ - - def transfer_mol_from_disk_to_ram(idx): - """ - Function transferring single mol from DISK to RAM - """ - data_dict = self.load_graph_from_index(idx) - mol_in_ram = { - "features": data_dict["graph_with_features"], - "labels": data_dict["labels"], - } - - return mol_in_ram - - if parallel_with_batches and self.featurization_batch_size: - data_in_ram = parallelized_with_batches( - transfer_mol_from_disk_to_ram, - range(self.dataset_length), - batch_size=self.featurization_batch_size, - n_jobs=0, - backend=self.backend, - progress=self.progress, - tqdm_kwargs={"desc": "Transfer from DISK to RAM"}, - ) - else: - data_in_ram = parallelized( - transfer_mol_from_disk_to_ram, - range(self.dataset_length), - n_jobs=0, - backend=self.backend, - progress=self.progress, - tqdm_kwargs={"desc": "Transfer from DISK to RAM"}, - ) - - self.features = [sample["features"] for sample in data_in_ram] - self.labels = [sample["labels"] for sample in data_in_ram] - - def save_metadata(self, directory: str): - """ - Save everything other than features/labels - """ - attrs_to_save = [ - "mol_ids", - "smiles", - "labels_size", - "labels_dtype", - "dataset_length", - "_num_nodes_list", - "_num_edges_list", - ] - attrs = {attr: getattr(self, attr) for attr in attrs_to_save} - - path = os.path.join(directory, "multitask_metadata.pkl") - - torch.save(attrs, path, pickle_protocol=4) - - def _load_metadata(self): - """ - Load everything other than features/labels - """ - attrs_to_load = [ - "mol_ids", - "smiles", - "labels_size", - "labels_dtype", - "dataset_length", - "_num_nodes_list", - "_num_edges_list", - ] - path = os.path.join(self.data_path, "multitask_metadata.pkl") - with fsspec.open(path, "rb") as f: - attrs = torch.load(path) - - if not set(attrs_to_load).issubset(set(attrs.keys())): - raise ValueError( - f"The metadata in the cache at {self.data_path} does not contain the right information. " - f"This may be because the cache was prepared using an earlier version of Graphium. " - f"You can try deleting the cache and running the data preparation again. " - f"\nMetadata keys found: {attrs.keys()}" - f"\nMetadata keys required: {attrs_to_load}" - ) - - for attr, value in attrs.items(): - setattr(self, attr, value) - - if self.save_smiles_and_ids: - if self.smiles is None or self.mol_ids is None: - logger.warning( - f"Argument `save_smiles_and_ids` is set to {self.save_smiles_and_ids} but metadata in the cache at {self.data_path} does not contain smiles and mol_ids. " - f"This may be because `Datamodule.prepare_data(save_smiles_and_ids=False)` was run followed by `Datamodule.setup(save_smiles_and_ids=True)`. " - f"When loading from cached files, the `save_smiles_and_ids` argument of `Datamodule.setup()` is superseeded by the `Datamodule.prepare_data()`. " - ) + self.featurize_smiles = featurize_smiles + self.task_names = task_names + self.label_num_cols = label_num_cols + self.label_dtypes = label_dtypes + self.mol_file_data_offsets = mol_file_data_offsets + self.smiles_tensor = concat_smiles_tensor + self.smiles_offsets_tensor = smiles_offsets_tensor + self.num_nodes_tensor = num_nodes_tensor + self.num_edges_tensor = num_edges_tensor + self.dataset_length = num_nodes_tensor.size(dim=0) + + logger.info(f"Dataloading from DISK") def __len__(self): r""" @@ -336,24 +208,14 @@ def num_nodes_list(self): """ The number of nodes per graph """ - if self._num_nodes_list is None: - if len(self) == 0: - self._num_nodes_list = [] - else: - self._num_nodes_list = get_num_nodes_per_graph(self.features) - return self._num_nodes_list + return self.num_nodes_tensor @property def num_edges_list(self): """ The number of edges per graph """ - if self._num_edges_list is None: - if len(self) == 0: - self._num_edges_list = [] - else: - self._num_edges_list = get_num_edges_per_graph(self.features) - return self._num_edges_list + return self.num_edges_tensor @property def num_graphs_total(self): @@ -367,28 +229,30 @@ def num_nodes_total(self): """Total number of nodes for all graphs""" if len(self) == 0: return - return sum(self.num_nodes_list) + return torch.sum(self.num_nodes_list, dtype=torch.int64).item() @property def max_num_nodes_per_graph(self): """Maximum number of nodes per graph""" if len(self) == 0: return - return max(self.num_nodes_list) + return torch.max(self.num_nodes_list).item() @property def std_num_nodes_per_graph(self): """Standard deviation of number of nodes per graph""" if len(self) == 0: return - return np.std(self.num_nodes_list) + # correction is zero to match previous default behaviour of numpy.std + # Consider changing it to 1 (the torch.std default) + return torch.std(self.num_nodes_list.to(torch.float64), correction=0).item() @property def min_num_nodes_per_graph(self): """Minimum number of nodes per graph""" if len(self) == 0: return - return min(self.num_nodes_list) + return torch.min(self.num_nodes_list).item() @property def mean_num_nodes_per_graph(self): @@ -402,28 +266,30 @@ def num_edges_total(self): """Total number of edges for all graphs""" if len(self) == 0: return - return sum(self.num_edges_list) + return torch.sum(self.num_edges_list, dtype=torch.int64).item() @property def max_num_edges_per_graph(self): """Maximum number of edges per graph""" if len(self) == 0: return - return max(self.num_edges_list) + return torch.max(self.num_edges_list).item() @property def min_num_edges_per_graph(self): """Minimum number of edges per graph""" if len(self) == 0: return - return min(self.num_edges_list) + return torch.min(self.num_edges_list).item() @property def std_num_edges_per_graph(self): """Standard deviation of number of nodes per graph""" if len(self) == 0: return - return np.std(self.num_edges_list) + # correction is zero to match previous default behaviour of numpy.std + # Consider changing it to 1 (the torch.std default) + return torch.std(self.num_edges_list.to(torch.float64), correction=0).item() @property def mean_num_edges_per_graph(self): @@ -438,27 +304,23 @@ def __getitem__(self, idx): Parameters: idx: The index of the data to retrieve Returns: - A dictionary containing the data for the specified index with keys "mol_ids", "smiles", "labels", and "features" + A dictionary containing the data for the specified index with keys "labels", "num_nodes", "num_edges", and "features" """ - datum = {} - if self.dataloading_from == "disk": - data_dict = self.load_graph_from_index(idx) - datum["features"] = data_dict["graph_with_features"] - datum["labels"] = data_dict["labels"] - if "smiles" in data_dict.keys(): - datum["smiles"] = data_dict["smiles"] - else: - if self.mol_ids is not None: - datum["mol_ids"] = self.mol_ids[idx] + if self.smiles_tensor is None or self.smiles_offsets_tensor is None: + raise ValueError("Missing smiles in MultitaskDataset.__getitem__") - if self.smiles is not None: - datum["smiles"] = self.smiles[idx] + smiles_str = graphium_cpp.extract_string(self.smiles_tensor, self.smiles_offsets_tensor, idx) - if self.labels is not None: - datum["labels"] = self.labels[idx] + datum = { + "labels": self.load_graph_from_index(idx), + "features": self.featurize_smiles(smiles_str), + } - if self.features is not None: - datum["features"] = self.features[idx] + # One of the featurization error handling options returns a string on error, + # instead of throwing an exception, so assume that the intention is to just skip, + # instead of crashing. + if isinstance(datum["features"], str): + datum = None return datum @@ -468,165 +330,15 @@ def load_graph_from_index(self, data_idx): Parameters: data_idx: The index of the data to retrieve Returns: - A dictionary containing the data for the specified index with keys "graph_with_features", "labels" and "smiles" (optional). - """ - filename = os.path.join( - self.data_path, format(data_idx // 1000, "04d"), format(data_idx, "07d") + ".pkl" - ) - with fsspec.open(filename, "rb") as f: - data_dict = torch.load(f) - return data_dict - - def merge( - self, datasets: Dict[str, SingleTaskDataset] - ) -> Tuple[List[str], List[str], List[Dict[str, Any]], List[Any]]: - r"""This function merges several single task datasets into a multitask dataset. - - The idea: for each of the smiles, labels, features and tasks, we create a corresponding list that concatenates these items across all tasks. - In particular, for any index, the elements in the smiles, labels, features and task lists at that index will correspond to each other (i.e. match up). - Over this list of all smiles (which we created by concatenating the smiles across all tasks), we compute their molecular ID using functions from Datamol. - Once again, we will have a list of molecular IDs which is the same size as the list of smiles, labels, features and tasks. - We then use numpy's `unique` function to find the exact list of unique molecular IDs as these will identify the molecules in our dataset. We also get the - inverse from numpy's `unique`, which will allow us to index in addition to the list of all molecular IDs, the list of all smiles, labels, features and tasks. - Finally, we use this inverse to construct the list of list of smiles, list of label dictionaries (indexed by task) and the list of features such that - the indices match up. This is what is needed for the `get_item` function to work. - - Parameters: - datasets: A dictionary of single-task datasets - Returns: - A tuple of (list of molecular IDs, list of smiles, list of label dictionaries, list of features) - """ - - # Get all the smiles, labels, features and tasks. - all_lists = self._get_all_lists_ids(datasets=datasets) - mol_ids, inv = self._get_inv_of_mol_ids(all_mol_ids=all_lists["mol_ids"]) - - # Store the smiles. - smiles = [[] for _ in range(len(mol_ids))] - for all_idx, unique_idx in enumerate(inv): - smiles[unique_idx].append(all_lists["smiles"][all_idx]) - - # Store the labels. - labels = [Data() for _ in range(len(mol_ids))] - for all_idx, unique_idx in enumerate(inv): - task: str = all_lists["tasks"][all_idx] - label = all_lists["labels"][all_idx] - labels[unique_idx][task] = label - - if all_idx < len(all_lists["features"]): - features = all_lists["features"][all_idx] - labels[unique_idx]["x"] = torch.empty( - (features.num_nodes, 1) - ) # IPU is not happy with zero-sized tensors, so use shape (features.num_nodes, 1) here - labels[unique_idx]["edge_index"] = torch.empty((2, features.num_edges)) - - # Store the features - if len(all_lists["features"]) > 0: - features = [-1 for i in range(len(mol_ids))] - for all_idx, unique_idx in enumerate(inv): - features[unique_idx] = all_lists["features"][all_idx] - return mol_ids, smiles, labels, features - else: - return mol_ids, smiles, labels - - def _get_all_lists_ids(self, datasets: Dict[str, SingleTaskDataset]) -> Dict[str, Any]: - all_smiles = [] - all_features = [] - all_labels = [] - all_mol_ids = [] - all_tasks = [] - - for task, ds in datasets.items(): - if len(ds) == 0: - continue - # Get data from single task dataset - ds_smiles = [ds[i]["smiles"] for i in range(len(ds))] - ds_labels = [ds[i]["labels"] for i in range(len(ds))] - if "unique_ids" in ds[0].keys(): - ds_mol_ids = [ds[i]["unique_ids"] for i in range(len(ds))] - else: - ds_mol_ids = smiles_to_unique_mol_ids( - ds_smiles, - n_jobs=self.n_jobs, - featurization_batch_size=self.featurization_batch_size, - backend=self.backend, - progress=self.progress, - progress_desc=f"{task}: mol to ids", - ) - if "features" in ds[0]: - ds_features = [ds[i]["features"] for i in range(len(ds))] - else: - ds_features = None - all_smiles.extend(ds_smiles) - all_labels.extend(ds_labels) - all_mol_ids.extend(ds_mol_ids) - if ds_features is not None: - all_features.extend(ds_features) - - task_list = [task] * ds.__len__() - all_tasks.extend(task_list) - - all_lists = { - "smiles": all_smiles, - "features": all_features, - "labels": all_labels, - "mol_ids": all_mol_ids, - "tasks": all_tasks, - } - - return all_lists - - def _get_inv_of_mol_ids(self, all_mol_ids): - mol_ids, inv = np.unique(all_mol_ids, return_inverse=True) - return mol_ids, inv - - def _find_valid_label(self, task, ds): - r""" - For a given dataset, find a genuine label for that dataset + A Data object containing the data for the specified index with keys corresponding to the tasks. """ - valid_label = None - for i in range(len(ds)): - if ds[i] is not None: - valid_label = ds[i]["labels"] - break - - if valid_label is None: - raise ValueError(f"Dataset for task {task} has no valid labels.") + labels = {} + graphium_cpp.load_labels_from_index(self.data_path, data_idx, self.mol_file_data_offsets, self.task_names, self.label_num_cols, self.label_dtypes, labels) + data_dict = Data() + for task, values in labels.items(): + data_dict[task] = values - return valid_label - - def set_label_size_dict(self, datasets: Dict[str, SingleTaskDataset]): - r""" - This gives the number of labels to predict for a given task. - """ - task_labels_size = {} - for task, ds in datasets.items(): - if len(ds) == 0: - continue - - valid_label = self._find_valid_label(task, ds) - - # Assume for a fixed task, the label dimension is the same across data points - torch_label = torch.as_tensor(valid_label) - - # First dimension is graph-specific - task_labels_size[task] = torch_label.size() - return task_labels_size - - def set_label_dtype_dict(self, datasets: Dict[str, SingleTaskDataset]): - r""" - Gets correct dtype for a given label - """ - task_labels_dtype = {} - for task, ds in datasets.items(): - if len(ds) == 0: - continue - - valid_label = self._find_valid_label(task, ds) - - torch_label = torch.as_tensor(valid_label) - task_labels_dtype[task] = torch_label.dtype - return task_labels_dtype + return data_dict def __repr__(self) -> str: """ @@ -643,11 +355,6 @@ def __repr__(self) -> str: ) return out_str - # Faster to compute the statistics if we unbatch first. - features = self.features - if isinstance(self.features, Batch): - self.features = self.features.to_data_list() - out_str = ( f"-------------------\n{self.__class__.__name__}\n" + f"\tabout = {self.about}\n" @@ -665,9 +372,6 @@ def __repr__(self) -> str: + f"-------------------\n" ) - # Restore the original features. - self.features = features - return out_str @@ -696,6 +400,8 @@ def __init__( self.about = "FakeDatasets" task = next(iter(datasets)) + self.features = None + self.labels = None if "features" in datasets[task][0]: self.mol_ids, self.smiles, self.labels, self.features = self.merge(datasets) if self.indexing_same_elem is False: @@ -709,9 +415,8 @@ def __init__( self.mol_ids, self.smiles, self.labels ) - self.labels_size = self.set_label_size_dict(datasets) - self.labels_dtype = self.set_label_dtype_dict(datasets) - self.features = self.features + self.label_num_cols = self.set_label_num_cols(datasets) + self.label_dtypes = self.set_label_dtype_dict(datasets) def _get_inv_of_mol_ids(self, all_mol_ids): # The generated data is a single molecule duplicated @@ -771,6 +476,30 @@ def __getitem__(self, idx): return datum +def torch_enum_to_dtype(v: Union[int, torch.dtype]): + if isinstance(v, torch.dtype): + return v + + mapping = [ + torch.uint8, + torch.int8, + torch.int16, + torch.int32, + torch.int64, + torch.float16, + torch.float32, + torch.float64, + torch.complex32, + torch.complex64, + torch.complex128, + torch.bool, + torch.qint8, + torch.quint8, + torch.qint32, + torch.bfloat16, + torch.quint4x2 + ] + return mapping[v] if (v >= 0 and v < len(mapping)) else None def get_num_nodes_per_graph(graphs): r""" diff --git a/graphium/data/multilevel_utils.py b/graphium/data/multilevel_utils.py index 7f9ed5813..3cfa4e49d 100644 --- a/graphium/data/multilevel_utils.py +++ b/graphium/data/multilevel_utils.py @@ -1,12 +1,12 @@ """ -------------------------------------------------------------------------------- -Copyright (c) 2023 Valence Labs, Recursion Pharmaceuticals and Graphcore Limited. +Copyright (c) 2023 Valence Labs, Recursion Pharmaceuticals, Graphcore Limited, and NVIDIA Corporation & Affiliates. Use of this software is subject to the terms and conditions outlined in the LICENSE file. Unauthorized modification, distribution, or use is prohibited. Provided 'as is' without warranties of any kind. -Valence Labs, Recursion Pharmaceuticals and Graphcore Limited are not liable for any damages arising from its use. +Valence Labs, Recursion Pharmaceuticals, Graphcore Limited, and NVIDIA Corporation & Affiliates are not liable for any damages arising from its use. Refer to the LICENSE file for the full terms and conditions. -------------------------------------------------------------------------------- """ @@ -21,45 +21,113 @@ def extract_labels(df: pd.DataFrame, task_level: str, label_cols: List[str]): - """Extracts labels in label_cols from dataframe df for a given task_level. - Returns a list of numpy arrays converted to the correct shape. Multiple - targets are concatenated for each graph. + """Extracts the labels specified by label_cols from dataframe df. + If task_level is "graph", each entry in df must be a single numeric value, + and this function returns a single, 2D numpy array containing the data. + If task_level is something else, each entry in df must be a numpy array, + python list, or single numeric value, and this function returns both a 2D + numpy array of data and a 1D numpy array of integers indicating the row + number in the first array where each molecule's data starts, with an extra + integer at the end that should equal the total number of rows in the first + array. The first array can have type float16, float32, or float64, + depending on the largest precision of input data, and arrays of varying + sizes across columns are padded with nan values, so that a single molecule + occupies a fixed number of rows and len(label_cols) columns. """ - def unpack(graph_data): - graph_data = pd.to_numeric(graph_data, errors="coerce") - if isinstance(graph_data, str): - graph_data_list = ast.literal_eval(graph_data) - return np.array(graph_data_list) - elif isinstance(graph_data, (int, float)): - return np.array([graph_data]) - elif isinstance(graph_data, list): - return np.array(graph_data) - elif isinstance(graph_data, np.ndarray): - if len(graph_data.shape) == 0: - graph_data = np.expand_dims(graph_data, 0) - if graph_data.shape[0] == 0: - graph_data = np.array([np.nan]) - # TODO: Warning - return graph_data - else: - raise ValueError( - f"Graph data should be one of str, float, int, list, np.ndarray, got {type(graph_data)}" - ) - - def unpack_column(data: pd.Series): - return data.apply(unpack) - - def merge_columns(data: pd.Series): - data = data.to_list() - data = [np.array([np.nan]) if not isinstance(d, np.ndarray) and math.isnan(d) else d for d in data] - padded_data = itertools.zip_longest(*data, fillvalue=np.nan) - data = np.stack(list(padded_data), 1).T - return data - - unpacked_df: pd.DataFrame = df[label_cols].apply(unpack_column) - output = unpacked_df.apply(merge_columns, axis="columns").to_list() + num_rows = df.shape[0] + num_cols = len(label_cols) if task_level == "graph": - return np.concatenate(output) - return output + output = np.empty((num_rows,num_cols), dtype=np.float64) + + for col_index, col in enumerate(label_cols): + for i, v in enumerate(df[col]): + if isinstance(v, float): + output[i, col_index] = v + continue + + v = pd.to_numeric(v, errors="coerce") + + if isinstance(v, (int, float)): + output[i, col_index] = v + + else: + raise ValueError( + f"Graph data should be one of float or int, got {type(v)}" + ) + + return output, None + + # First, find the max length of each row (likely the number of nodes or edges) + # +1 is for the cumulative sum below + begin_offsets = np.zeros((num_rows+1,), dtype=np.int64) + max_type = np.float16 + for col in label_cols: + for i, v in enumerate(df[col]): + if not isinstance(v, np.ndarray) and not isinstance(v, (int, float, list)): + v = pd.to_numeric(v, errors="coerce") + length = 0 + if isinstance(v, np.ndarray): + length = v.shape[0] if len(v.shape) == 1 else 0 + dtype = v.dtype + if dtype == np.float64: + max_type = np.float64 + elif dtype == np.float32 and max_type == np.float16: + max_type = np.float32 + elif isinstance(v, (int, float)): + length = 1 + max_type = np.float64 + elif isinstance(v, list): + length = len(v) + max_type = np.float64 + else: + raise ValueError( + f"Graph data should be one of float, int, list, np.ndarray, got {type(v)}" + ) + # The +1 is so that the cumulative sum below gives the beginning offsets + begin_offsets[i+1] = max(begin_offsets[i+1], length) + + begin_offsets = np.cumsum(begin_offsets) + full_num_rows = begin_offsets[-1] + + output = np.empty((full_num_rows,num_cols), dtype=max_type) + + # Now, fill in the values + for col_index, col in enumerate(label_cols): + for i, v in enumerate(df[col]): + full_row = begin_offsets[i] + + if not isinstance(v, np.ndarray): + v = pd.to_numeric(v, errors="coerce") + + if isinstance(v, np.ndarray): + length = v.shape[0] if len(v.shape) == 1 else 0 + for j in range(length): + output[full_row + j, col_index] = v[j] + if full_row + length != begin_offsets[i+1]: + for j in range(full_row, begin_offsets[i+1]): + output[j, col_index] = np.nan + + elif isinstance(v, (int, float)): + output[full_row, col_index] = v + # Fill the rest of the rows in the column with nan + end_row = begin_offsets[i+1] + if end_row != full_row+1: + for row in range(full_row+1, end_row): + output[row, col_index] = np.nan + + elif isinstance(v, list): + length = len(v) + for j in range(length): + output[full_row + j, col_index] = v[j] + if full_row + length != begin_offsets[i+1]: + for j in range(full_row, begin_offsets[i+1]): + output[j, col_index] = np.nan + + else: + raise ValueError( + f"Graph data should be one of float, int, list, np.ndarray, got {type(v)}" + ) + + return output, begin_offsets diff --git a/graphium/data/normalization.py b/graphium/data/normalization.py index 994e8939b..e57a9bcc8 100644 --- a/graphium/data/normalization.py +++ b/graphium/data/normalization.py @@ -57,6 +57,12 @@ def __init__( self.data_mean = None self.data_std = None + def set_statistics(self, data_min, data_max, data_mean, data_std): + self.data_min = data_min + self.data_max = data_max + self.data_mean = data_mean + self.data_std = data_std + def calculate_statistics(self, array): """ Saves the normalization parameters (e.g. mean and variance) to the object. diff --git a/graphium/features/featurizer.py b/graphium/features/featurizer.py index 8d8e18159..7f7c4b27b 100644 --- a/graphium/features/featurizer.py +++ b/graphium/features/featurizer.py @@ -1,12 +1,12 @@ """ -------------------------------------------------------------------------------- -Copyright (c) 2023 Valence Labs, Recursion Pharmaceuticals and Graphcore Limited. +Copyright (c) 2023 Valence Labs, Recursion Pharmaceuticals, Graphcore Limited, and NVIDIA Corporation & Affiliates. Use of this software is subject to the terms and conditions outlined in the LICENSE file. Unauthorized modification, distribution, or use is prohibited. Provided 'as is' without warranties of any kind. -Valence Labs, Recursion Pharmaceuticals and Graphcore Limited are not liable for any damages arising from its use. +Valence Labs, Recursion Pharmaceuticals, Graphcore Limited, and NVIDIA Corporation & Affiliates are not liable for any damages arising from its use. Refer to the LICENSE file for the full terms and conditions. -------------------------------------------------------------------------------- """ @@ -30,6 +30,7 @@ from graphium.utils.tensor import one_of_k_encoding from graphium.features.positional_encoding import get_all_positional_encodings +import graphium_cpp def to_dense_array(array: np.ndarray, dtype: str = None) -> np.ndarray: r""" @@ -644,6 +645,7 @@ def mol_to_adj_and_features( pos_encoding_as_features: Dict[str, Any] = None, dtype: np.dtype = np.float16, mask_nan: Union[str, float, type(None)] = "raise", + use_graphium_cpp: bool = False, ) -> Union[ coo_matrix, Union[Tensor, None], @@ -831,7 +833,7 @@ def mol_to_adjacency_matrix( adj_idx.append([bond.GetBeginAtomIdx(), bond.GetEndAtomIdx()]) adj_idx.append([bond.GetEndAtomIdx(), bond.GetBeginAtomIdx()]) if use_bonds_weights: - val = nmp.BOND_TYPES[bond.GetBondType()] + val = bond.GetBondTypeAsDouble() else: val = 1 adj_val.extend([val, val]) @@ -967,21 +969,26 @@ def num_edges(self) -> int: else: return np.count_nonzero(self.adj) # No division by 2 because edges are counted twice +# These are the integers that correspond with the torch data types in C++ +NP_DTYPE_TO_TORCH_INT = {np.float16: 5, np.float32: 6, np.float64: 7} def mol_to_graph_dict( - mol: dm.Mol, - atom_property_list_onehot: List[str] = [], - atom_property_list_float: List[Union[str, Callable]] = [], + mol: Union[str, dm.Mol], + atom_property_list_onehot: Union[List[str],torch.Tensor] = [], + atom_property_list_float: Union[List[Union[str, Callable]],torch.Tensor] = [], conformer_property_list: List[str] = [], - edge_property_list: List[str] = [], + edge_property_list: Union[List[str],torch.Tensor] = [], add_self_loop: bool = False, explicit_H: bool = False, use_bonds_weights: bool = False, - pos_encoding_as_features: Dict[str, Any] = None, + pos_encoding_as_features: Union[Dict[str, Any], Tuple[List[str],torch.Tensor]] = None, dtype: np.dtype = np.float16, on_error: str = "ignore", mask_nan: Union[str, float, type(None)] = "raise", max_num_atoms: Optional[int] = None, + use_graphium_cpp: bool = False, + original_featurization: Optional[Dict[str, Any]] = None, + output_pyg_graph = False ) -> Union[GraphDict, str]: r""" Transforms a molecule into an adjacency matrix representing the molecular graph @@ -1070,33 +1077,114 @@ def mol_to_graph_dict( input_mol = mol try: - if isinstance(mol, str): - mol = dm.to_mol(mol, ordered=True) - if explicit_H: - mol = Chem.AddHs(mol) + if use_graphium_cpp: + if not isinstance(mol, str): + raise ValueError(f"use_graphium_cpp option requires that molecule be received as a string in mol_to_graph_dict, not type "+str(type(mol))) + has_conformer = ('positions_3d' in conformer_property_list) + pe_index = 4 + if has_conformer: + pe_index = 5; + mask_nan_value = 0.0 + if mask_nan is None: + mask_nan_style_int = 0 + elif mask_nan == "raise" or mask_nan == "warn": + mask_nan_style_int = 1 + else: + mask_nan_style_int = 2 + mask_nan_value = float(mask_nan) + tensors, num_nans, nan_tensor_index = graphium_cpp.featurize_smiles( + mol, + atom_property_list_onehot, + atom_property_list_float, + 'positions_3d' in conformer_property_list, + edge_property_list, + pos_encoding_as_features[1], + True, # duplicate_edges, so that we don't have to duplicate below + add_self_loop, + explicit_H, + use_bonds_weights, + True, #offset_carbon + NP_DTYPE_TO_TORCH_INT[dtype], + mask_nan_style_int, + mask_nan_value + ) + + if num_nans > 0: + if nan_tensor_index == 2: + array_name = "atom featurization" + elif nan_tensor_index == 3: + array_name = "edge property" + elif nan_tensor_index == 4 and has_conformer: + array_name = 'positions_3d' + else: + array_name = pos_encoding_as_features[0][nan_tensor_index - pe_index] + msg = f"There are {num_nans} NaNs in `{array_name}`" + if mask_nan == "raise": + raise ValueError(msg) + elif mask_nan == "warn": + logger.warning(msg) + + num_atoms = tensors[2].size(0) + if not output_pyg_graph: + adj = coo_matrix( + (tensors[1], tensors[0]), + shape=(num_atoms, num_atoms), + dtype=dtype, + ) + else: + data_dict = { + "feat": tensors[2], + "edge_feat": tensors[3] + } + if has_conformer: + data_dict['positions_3d'] = tensors[4] + for i in range(len(tensors)-pe_index): + data_dict[pos_encoding_as_features[0][i]] = tensors[i+pe_index] + # Create the PyG graph object `Data` + data = Data(edge_index=tensors[0], edge_weight=tensors[1], num_nodes=num_atoms, **data_dict) + return data + + ndata = tensors[2] + edata = tensors[3] + if has_conformer: + conf_dict = {'positions_3d': tensors[4]} + else: + conf_dict = {} + pe_tensors = tensors[pe_index:] + pe_dict = {pos_encoding_as_features[0][i]: pe_tensors[i] for i in range(len(pe_tensors))} + else: - mol = Chem.RemoveHs(mol) - num_atoms = mol.GetNumAtoms() - if (max_num_atoms is not None) and (num_atoms > max_num_atoms): - raise ValueError(f"Maximum number of atoms greater than permitted {num_atoms}>{max_num_atoms}") - ( - adj, - ndata, - edata, - pe_dict, - conf_dict, - ) = mol_to_adj_and_features( - mol=mol, - atom_property_list_onehot=atom_property_list_onehot, - atom_property_list_float=atom_property_list_float, - conformer_property_list=conformer_property_list, - edge_property_list=edge_property_list, - add_self_loop=add_self_loop, - explicit_H=explicit_H, - use_bonds_weights=use_bonds_weights, - pos_encoding_as_features=pos_encoding_as_features, - mask_nan=mask_nan, - ) + if isinstance(mol, str): + mol = dm.to_mol(mol, ordered=True) + if explicit_H: + mol = Chem.AddHs(mol) + else: + mol = Chem.RemoveHs(mol) + num_atoms = mol.GetNumAtoms() + if (max_num_atoms is not None) and (num_atoms > max_num_atoms): + raise ValueError(f"Maximum number of atoms greater than permitted {num_atoms}>{max_num_atoms}") + ( + adj, + ndata, + edata, + pe_dict, + conf_dict, + ) = mol_to_adj_and_features( + mol=mol, + atom_property_list_onehot=atom_property_list_onehot, + atom_property_list_float=atom_property_list_float, + conformer_property_list=conformer_property_list, + edge_property_list=edge_property_list, + add_self_loop=add_self_loop, + explicit_H=explicit_H, + use_bonds_weights=use_bonds_weights, + pos_encoding_as_features=pos_encoding_as_features, + mask_nan=mask_nan, + ) + if edata is not None: + if issparse(edata): + edata = to_dense_array(edata, dtype=dtype) + edata = edata.repeat(2, axis=0) except Exception as e: if on_error.lower() == "raise": raise e @@ -1118,10 +1206,7 @@ def mol_to_graph_dict( # Assign the edge data if edata is not None: - if issparse(edata): - edata = to_dense_array(edata, dtype=dtype) - hetero_edata = edata.repeat(2, axis=0) - graph_dict["data"]["edge_feat"] = hetero_edata + graph_dict["data"]["edge_feat"] = edata # Put the positional encodings as node features # TODO: add support for PE on edges @@ -1137,19 +1222,21 @@ def mol_to_graph_dict( def mol_to_pyggraph( - mol: dm.Mol, - atom_property_list_onehot: List[str] = [], - atom_property_list_float: List[Union[str, Callable]] = [], + mol: Union[str, dm.Mol], + atom_property_list_onehot: Union[List[str],torch.Tensor] = [], + atom_property_list_float: Union[List[Union[str, Callable]],torch.Tensor] = [], conformer_property_list: List[str] = [], - edge_property_list: List[str] = [], + edge_property_list: Union[List[str],torch.Tensor] = [], add_self_loop: bool = False, explicit_H: bool = False, use_bonds_weights: bool = False, - pos_encoding_as_features: Dict[str, Any] = None, + pos_encoding_as_features: Union[Dict[str, Any], Tuple[List[str],torch.Tensor]] = None, dtype: np.dtype = np.float16, on_error: str = "ignore", mask_nan: Union[str, float, type(None)] = "raise", max_num_atoms: Optional[int] = None, + use_graphium_cpp: bool = False, + original_featurization: Optional[Dict[str, Any]] = None, ) -> Union[Data, str]: r""" Transforms a molecule into an adjacency matrix representing the molecular graph @@ -1242,12 +1329,12 @@ def mol_to_pyggraph( on_error=on_error, mask_nan=mask_nan, max_num_atoms=max_num_atoms, + use_graphium_cpp=use_graphium_cpp, + original_featurization=original_featurization, + output_pyg_graph=True ) - if (graph_dict is not None) and not isinstance(graph_dict, str): - return graph_dict.make_pyg_graph() - else: - return graph_dict + return graph_dict def mol_to_graph_signature(featurizer_args: Dict[str, Any] = None) -> Dict[str, Any]: diff --git a/graphium/graphium_cpp/commute.cpp b/graphium/graphium_cpp/commute.cpp new file mode 100644 index 000000000..a81156815 --- /dev/null +++ b/graphium/graphium_cpp/commute.cpp @@ -0,0 +1,67 @@ +// SPDX-FileCopyrightText: Copyright (c) 2024 NVIDIA CORPORATION & AFFILIATES. All rights reserved. +// SPDX-License-Identifier: Apache-2.0 + +#include "commute.h" + +#include "electrostatic.h" +#include "spectral.h" + +#include +#include + +template +void compute_commute_distances( + const uint32_t n, + const uint32_t* row_starts, + const uint32_t* neighbors, + LaplacianData& data, + std::vector& laplacian_pseudoinverse, + std::vector& matrix, + const T* weights) { + + if (laplacian_pseudoinverse.size() == 0) { + compute_laplacian_pseudoinverse(n, row_starts, neighbors, data, laplacian_pseudoinverse, weights); + } + + T full_sum = T(0); + if (weights != nullptr) { + for (size_t i = 0, weights_size = row_starts[n]; i < weights_size; ++i) { + full_sum += weights[i]; + } + } + else { + // Unweighted, so just twice the unique edge count + // (each edge appears twice in neighbors) + full_sum = T(row_starts[n]); + } + + matrix.resize(n * n); + + for (size_t row = 0, row_diag_index = 0, i = 0; row < n; ++row, row_diag_index += (n + 1)) { + for (size_t col = 0, col_diag_index = 0; col < n; ++col, ++i, col_diag_index += (n + 1)) { + matrix[i] = full_sum * ( + laplacian_pseudoinverse[row_diag_index] + + laplacian_pseudoinverse[col_diag_index] + - 2 * laplacian_pseudoinverse[row*n + col]); + } + } +} + +template +void compute_commute_distances( + const uint32_t n, + const uint32_t* row_starts, + const uint32_t* neighbors, + LaplacianData& data, + std::vector& laplacian_pseudoinverse, + std::vector& matrix, + const float* weights); +template +void compute_commute_distances( + const uint32_t n, + const uint32_t* row_starts, + const uint32_t* neighbors, + LaplacianData& data, + std::vector& laplacian_pseudoinverse, + std::vector& matrix, + const double* weights); diff --git a/graphium/graphium_cpp/commute.h b/graphium/graphium_cpp/commute.h new file mode 100644 index 000000000..a8611d74c --- /dev/null +++ b/graphium/graphium_cpp/commute.h @@ -0,0 +1,38 @@ +// SPDX-FileCopyrightText: Copyright (c) 2024 NVIDIA CORPORATION & AFFILIATES. All rights reserved. +// SPDX-License-Identifier: Apache-2.0 + +#pragma once + +#include "spectral.h" + +#include +#include + +template +void compute_commute_distances( + const uint32_t n, + const uint32_t* row_starts, + const uint32_t* neighbors, + LaplacianData& data, + std::vector& laplacian_pseudoinverse, + std::vector& matrix, + const T* weights = nullptr); + +extern template +void compute_commute_distances( + const uint32_t n, + const uint32_t* row_starts, + const uint32_t* neighbors, + LaplacianData& data, + std::vector& laplacian_pseudoinverse, + std::vector& matrix, + const float* weights); +extern template +void compute_commute_distances( + const uint32_t n, + const uint32_t* row_starts, + const uint32_t* neighbors, + LaplacianData& data, + std::vector& laplacian_pseudoinverse, + std::vector& matrix, + const double* weights); diff --git a/graphium/graphium_cpp/electrostatic.cpp b/graphium/graphium_cpp/electrostatic.cpp new file mode 100644 index 000000000..29ff430c2 --- /dev/null +++ b/graphium/graphium_cpp/electrostatic.cpp @@ -0,0 +1,106 @@ +// SPDX-FileCopyrightText: Copyright (c) 2024 NVIDIA CORPORATION & AFFILIATES. All rights reserved. +// SPDX-License-Identifier: Apache-2.0 + +#include "electrostatic.h" + +#include "spectral.h" + +#include +#include +#include + +template +void compute_laplacian_pseudoinverse( + const uint32_t n, + const uint32_t* row_starts, + const uint32_t* neighbors, + LaplacianData& data, + std::vector& matrix, + const T* weights) { + + // If we've already computed the eigendecomposition with the correct normalization, + // skip recomputing it. + if (data.eigenvalues.size() != n || data.normalization != Normalization::NONE) { + compute_laplacian_eigendecomp(n, row_starts, neighbors, Normalization::NONE, data, false, weights); + } + + matrix.clear(); + matrix.resize(size_t(n) * n, T(0)); + const T maxEigenvalue = data.eigenvalues.back(); + // zero_threshold is an estimate of how accurately the diagonalization + // algorithm determines eigenvalues close to zero. Anything smaller + // should be considered zero for the pseudoinverse. + const T eigendecomp_relative_threshold = T(1e-6); + const T zero_threshold = n * eigendecomp_relative_threshold * maxEigenvalue; + for (size_t eigenIndex = 0; eigenIndex < n; ++eigenIndex) { + // This is a positive semi-definite matrix, so we don't need to take the absolute value + // when checking the threshold. + if (data.eigenvalues[eigenIndex] < zero_threshold) { + continue; + } + const T eigenvalueInverse = T(1) / data.eigenvalues[eigenIndex]; + const T* const eigenvector = data.vectors.data() + eigenIndex * n; + for (size_t row = 0, i = 0; row < n; ++row) { + for (size_t col = 0; col < n; ++col, ++i) { + const T value = eigenvalueInverse * eigenvector[row] * eigenvector[col]; + matrix[i] += value; + } + } + } +} + +template void compute_laplacian_pseudoinverse( + const uint32_t n, + const uint32_t* row_starts, + const uint32_t* neighbors, + LaplacianData& data, + std::vector& matrix, + const float* weights); +template void compute_laplacian_pseudoinverse( + const uint32_t n, + const uint32_t* row_starts, + const uint32_t* neighbors, + LaplacianData& data, + std::vector& matrix, + const double* weights); + +template +void compute_electrostatic_interactions( + const uint32_t n, + const uint32_t* row_starts, + const uint32_t* neighbors, + LaplacianData& data, + std::vector& laplacian_pseudoinverse, + std::vector& matrix, + const T* weights) { + + if (laplacian_pseudoinverse.size() == 0) { + compute_laplacian_pseudoinverse(n, row_starts, neighbors, data, laplacian_pseudoinverse, weights); + } + + matrix.resize(n * n); + + // Subtract the diagonal value from each column + for (size_t row = 0, i = 0; row < n; ++row) { + for (size_t col = 0, diag_index = 0; col < n; ++col, ++i, diag_index += (n+1)) { + matrix[i] = laplacian_pseudoinverse[i] - laplacian_pseudoinverse[diag_index]; + } + } +} + +template void compute_electrostatic_interactions( + const uint32_t n, + const uint32_t* row_starts, + const uint32_t* neighbors, + LaplacianData& data, + std::vector& laplacian_pseudoinverse, + std::vector& matrix, + const float* weights); +template void compute_electrostatic_interactions( + const uint32_t n, + const uint32_t* row_starts, + const uint32_t* neighbors, + LaplacianData& data, + std::vector& laplacian_pseudoinverse, + std::vector& matrix, + const double* weights); diff --git a/graphium/graphium_cpp/electrostatic.h b/graphium/graphium_cpp/electrostatic.h new file mode 100644 index 000000000..575dc3f83 --- /dev/null +++ b/graphium/graphium_cpp/electrostatic.h @@ -0,0 +1,60 @@ +// SPDX-FileCopyrightText: Copyright (c) 2024 NVIDIA CORPORATION & AFFILIATES. All rights reserved. +// SPDX-License-Identifier: Apache-2.0 + +#pragma once + +#include "spectral.h" + +#include +#include + +template +void compute_laplacian_pseudoinverse( + const uint32_t n, + const uint32_t* row_starts, + const uint32_t* neighbors, + LaplacianData& data, + std::vector& matrix, + const T* weights = nullptr); + +extern template void compute_laplacian_pseudoinverse( + const uint32_t n, + const uint32_t* row_starts, + const uint32_t* neighbors, + LaplacianData& data, + std::vector& matrix, + const float* weights); +extern template void compute_laplacian_pseudoinverse( + const uint32_t n, + const uint32_t* row_starts, + const uint32_t* neighbors, + LaplacianData& data, + std::vector& matrix, + const double* weights); + +template +void compute_electrostatic_interactions( + const uint32_t n, + const uint32_t* row_starts, + const uint32_t* neighbors, + LaplacianData& data, + std::vector& laplacian_pseudoinverse, + std::vector& matrix, + const T* weights = nullptr); + +extern template void compute_electrostatic_interactions( + const uint32_t n, + const uint32_t* row_starts, + const uint32_t* neighbors, + LaplacianData& data, + std::vector& laplacian_pseudoinverse, + std::vector& matrix, + const float* weights); +extern template void compute_electrostatic_interactions( + const uint32_t n, + const uint32_t* row_starts, + const uint32_t* neighbors, + LaplacianData& data, + std::vector& laplacian_pseudoinverse, + std::vector& matrix, + const double* weights); diff --git a/graphium/graphium_cpp/features.cpp b/graphium/graphium_cpp/features.cpp new file mode 100644 index 000000000..98f9aff85 --- /dev/null +++ b/graphium/graphium_cpp/features.cpp @@ -0,0 +1,1395 @@ +// SPDX-FileCopyrightText: Copyright (c) 2024 NVIDIA CORPORATION & AFFILIATES. All rights reserved. +// SPDX-License-Identifier: Apache-2.0 + +#define DEBUG_LOGGING 0 + +#include "features.h" + +#include "commute.h" +#include "electrostatic.h" +#include "float_features.h" +#include "graphormer.h" +#include "one_hot.h" +#include "random_walk.h" +#include "spectral.h" + +#include + +static GraphData read_graph(const std::string& smiles_string, bool explicit_H) { + std::unique_ptr mol{ parse_mol(smiles_string, explicit_H) }; + + if (!mol) { + return GraphData{ 0, std::unique_ptr(), 0, std::unique_ptr(), std::move(mol) }; + } + + const size_t num_atoms = mol->getNumAtoms(); + const size_t num_bonds = mol->getNumBonds(); +#if DEBUG_LOGGING + printf("# atoms = %zu\n# bonds = %zu\n", num_atoms, num_bonds); +#endif +#if REPORT_STATS + ++statsMolAtomCounts[(num_atoms >= STATS_NUM_MOL_ATOM_COUNTS) ? (STATS_NUM_MOL_ATOM_COUNTS - 1) : num_atoms]; + ++statsMolBondCounts[(num_bonds >= STATS_NUM_MOL_BOND_COUNTS) ? (STATS_NUM_MOL_BOND_COUNTS - 1) : num_bonds]; + statsTotalNumAtoms += num_atoms; + statsTotalNumBonds += num_bonds; +#endif + +#if ORDER_ATOMS + // Determine a canonical ordering of the atoms, if desired. + std::vector atomOrder; + atomOrder.reserve(num_atoms); + RDKit::Canon::rankMolAtoms(*mol, atomOrder); + assert(atomOrder.size() == num_atoms); +#endif + + // Allocate an array of atom data, and fill it from the RDKit atom data. + std::unique_ptr atoms(new CompactAtom[num_atoms]); + for (size_t atomIdx = 0; atomIdx < num_atoms; ++atomIdx) { + const RDKit::Atom* const atom = mol->getAtomWithIdx(atomIdx); + auto atomicNum = atom->getAtomicNum(); + auto totalDegree = atom->getTotalDegree(); + auto formalCharge = atom->getFormalCharge(); + const RDKit::Atom::ChiralType chiralType = atom->getChiralTag(); + auto totalNumHs = atom->getTotalNumHs(); + const RDKit::Atom::HybridizationType hybridization = atom->getHybridization(); + + const bool isAromatic = atom->getIsAromatic(); +#if REPORT_STATS + ++statsElementCounts[(atomicNum < 0 || atomicNum >= STATS_NUM_ELEMENTS) ? (STATS_NUM_ELEMENTS - 1) : atomicNum]; + ++statsDegreeCounts[(totalDegree < 0 || totalDegree >= STATS_NUM_DEGREES) ? (STATS_NUM_DEGREES - 1) : totalDegree]; + size_t formalChargeIndex = formalCharge + int(STATS_CHARGE_OFFSET); + if (formalCharge < -int(STATS_CHARGE_OFFSET)) { + formalChargeIndex = 0; + } + else if (formalCharge > int(STATS_CHARGE_OFFSET)) { + formalChargeIndex = STATS_NUM_CHARGES - 1; + } + + ++statsChargeCounts[formalChargeIndex]; + ++statsChiralityCounts[(size_t(chiralType) >= STATS_NUM_CHIRALITIES) ? (STATS_NUM_CHIRALITIES - 1) : size_t(chiralType)]; + ++statsHCounts[(totalNumHs < 0 || totalNumHs >= STATS_NUM_HS) ? (STATS_NUM_HS - 1) : totalNumHs]; + ++statsHybridizationCounts[(size_t(hybridization) >= STATS_NUM_HYBRIDIZATIONS) ? (STATS_NUM_HYBRIDIZATIONS - 1) : size_t(hybridization)]; + statsAromaticAtomCount += (isAromatic ? 1 : 0); +#endif + const double mass = atom->getMass(); + +#if ORDER_ATOMS + const size_t destAtomIdx = atomOrder[atomIdx]; +#else + const size_t destAtomIdx = atomIdx; +#endif + atoms[destAtomIdx] = CompactAtom{ + uint8_t(atomicNum), + uint8_t(totalDegree), + int8_t(formalCharge), + uint8_t(chiralType), + uint8_t(totalNumHs), + uint8_t(hybridization), + isAromatic, + float(mass) + }; +#if DEBUG_LOGGING + printf( + "atom[%zu] = {%zu, %u, %d, %u, %u, %u, %s, %f}\n", + destAtomIdx, + int(atomicNum), + int(totalDegree), + int(formalCharge), + int(chiralType), + int(totalNumHs), + int(hybridization), + isAromatic ? "true" : "false", + mass + ); +#endif + } + + // Allocate an array of bond data, and fill it from the RDKit bond data. + std::unique_ptr bonds(new CompactBond[num_bonds]); + const RDKit::RingInfo* const ringInfo = mol->getRingInfo(); + for (size_t bondIdx = 0; bondIdx < num_bonds; ++bondIdx) { + const RDKit::Bond* const bond = mol->getBondWithIdx(bondIdx); + const RDKit::Bond::BondType bondType = bond->getBondType(); + const bool isConjugated = bond->getIsConjugated(); + // TODO: Verify that it's the same index as bond->getIdx() + const bool isInRing = (ringInfo->numBondRings(bondIdx) != 0); + const RDKit::Bond::BondStereo stereo = bond->getStereo(); + +#if REPORT_STATS + ++statsBondTypeCounts[(size_t(bondType) >= STATS_NUM_BOND_TYPES) ? (STATS_NUM_BOND_TYPES - 1) : size_t(bondType)]; + ++statsBondStereoCounts[(size_t(stereo) >= STATS_NUM_BOND_STEREOS) ? (STATS_NUM_BOND_STEREOS - 1) : size_t(stereo)]; + statsConjugatedBondCount += (isConjugated ? 1 : 0); + statsBondInRingCount += (isInRing ? 1 : 0); +#endif + + auto beginAtomIdx = bond->getBeginAtomIdx(); + auto endAtomIdx = bond->getEndAtomIdx(); +#if ORDER_ATOMS + beginAtomIdx = atomOrder[beginAtomIdx]; + endAtomIdx = atomOrder[endAtomIdx]; +#endif + bonds[bondIdx] = CompactBond{ + uint8_t(bondType), + isConjugated, + isInRing, + uint8_t(stereo), + beginAtomIdx, + endAtomIdx + }; +#if DEBUG_LOGGING + printf( + "bond[%zu] = {%u, %s, %s, %u, {%u, %u}}\n", + bondIdx, + int(bondType), + isConjugated ? "true" : "false", + isInRing ? "true" : "false", + int(stereo), + beginAtomIdx, + endAtomIdx + ); +#endif + } + + // Return a GraphData structure, taking ownership of the atom and bond data arrays. + return GraphData{ num_atoms, std::move(atoms), num_bonds, std::move(bonds), std::move(mol) }; +} + +// This is a structure for managing the adjacency data (CSR format) for use by randomSubgraph. +struct NeighbourData { + // This owns the data of all 3 arrays, which are actually a single, contiguous allocation. + std::unique_ptr deleter; + + // This is an array of indices into the other two arrays, indicating where + // each atom's neighbours start, including the first entry being 0 for the start of + // atom 0, and the num_atoms entry being 2*num_bonds (2x because each bond is on 2 atoms), + // so there are num_atoms+1 entries. The number of neighbours of an atom i is + // neighbour_starts[i+1]-neighbour_starts[i] + const uint32_t* neighbour_starts; + + // The neighbour atom for each bond, with each atom having an entry for each of + // its neighbours, so each bond occurs twice. + const uint32_t* neighbours; + + // This is in the same order as neighbours, but indicates the index of the bond. + // Each bond occurs twice, so each number occurs twice. + const uint32_t* bond_indices; +}; + +// Construct a NeighbourData structure representing the molecule's graph in CSR format. +static NeighbourData construct_neighbours(const GraphData& graph) { + const uint32_t num_atoms = graph.num_atoms; + const uint32_t num_bonds = graph.num_bonds; + // Do a single allocation for all 3 arrays. + std::unique_ptr deleter(new uint32_t[num_atoms + 1 + 4 * num_bonds]); + + uint32_t* neighbour_starts = deleter.get(); + for (uint32_t i = 0; i <= num_atoms; ++i) { + neighbour_starts[i] = 0; + } + + // First, get atom neighbour counts + const CompactBond* const bonds = graph.bonds.get(); + for (uint32_t i = 0; i < num_bonds; ++i) { + uint32_t a = bonds[i].beginAtomIdx; + uint32_t b = bonds[i].endAtomIdx; + // NOTE: +1 is because first entry will stay zero. + ++neighbour_starts[a + 1]; + ++neighbour_starts[b + 1]; + } + + // Find the starts by partial-summing the neighbour counts. + // NOTE: +1 is because first entry will stay zero. + std::partial_sum(neighbour_starts + 1, neighbour_starts + 1 + num_atoms, neighbour_starts + 1); + + // Fill in the neighbours and bond_indices arrays. + uint32_t* neighbours = neighbour_starts + num_atoms + 1; + uint32_t* bond_indices = neighbours + 2 * num_bonds; + for (uint32_t i = 0; i < num_bonds; ++i) { + uint32_t a = bonds[i].beginAtomIdx; + uint32_t b = bonds[i].endAtomIdx; + + uint32_t ai = neighbour_starts[a]; + neighbours[ai] = b; + bond_indices[ai] = i; + ++neighbour_starts[a]; + + uint32_t bi = neighbour_starts[b]; + neighbours[bi] = a; + bond_indices[bi] = i; + ++neighbour_starts[b]; + } + + // Shift neighbour_starts forward one after incrementing it. + uint32_t previous = 0; + for (uint32_t i = 0; i < num_atoms; ++i) { + uint32_t next = neighbour_starts[i]; + neighbour_starts[i] = previous; + previous = next; + } + + // NeighbourData takes ownership of the memory. + return NeighbourData{ std::move(deleter), neighbour_starts, neighbours, bond_indices }; +} + +// This fills in 3 values for each atom +template +at::Tensor get_conformer_features( + const RDKit::ROMol &mol, + c10::ScalarType dtype, + MaskNaNStyle mask_nan_style, + T mask_nan_value, + int64_t &num_nans) { + + const size_t n = mol.getNumAtoms(); + std::unique_ptr conformer_data(new T[3 * n]); + T* data = conformer_data.get(); + + if (mol.beginConformers() == mol.endConformers()) { + // No conformers: treat as NaN + if (mask_nan_style == MaskNaNStyle::REPORT) { + num_nans += 3*n; + } + else { + for (size_t i = 0; i < 3 * n; ++i) { + data[i] = mask_nan_value; + } + } + } + else { + const RDKit::Conformer& conformer = mol.getConformer(); + const auto& positions = conformer.getPositions(); + assert(positions.size() == n); + for (size_t i = 0; i < n; ++i, data += 3) { + const auto& position = positions[i]; + data[0] = FeatureValues::convertToFeatureType(position.x); + data[1] = FeatureValues::convertToFeatureType(position.y); + data[2] = FeatureValues::convertToFeatureType(position.z); + } + + num_nans += mask_nans(data, 3 * n, mask_nan_style, mask_nan_value); + } + + const int64_t dims[1] = { int64_t(3 * n) }; + return torch_tensor_from_array(std::move(conformer_data), dims, 1, dtype); +} + +static const std::unordered_map atom_float_name_to_enum{ + {std::string("atomic-number"), int64_t(AtomFloatFeature::ATOMIC_NUMBER)}, + {std::string("mass"), int64_t(AtomFloatFeature::MASS)}, + {std::string("weight"), int64_t(AtomFloatFeature::MASS)}, + {std::string("valence"), int64_t(AtomFloatFeature::VALENCE)}, + {std::string("total-valence"), int64_t(AtomFloatFeature::VALENCE)}, + {std::string("implicit-valence"), int64_t(AtomFloatFeature::IMPLICIT_VALENCE)}, + {std::string("hybridization"), int64_t(AtomFloatFeature::HYBRIDIZATION)}, + {std::string("chirality"), int64_t(AtomFloatFeature::CHIRALITY)}, + {std::string("aromatic"), int64_t(AtomFloatFeature::AROMATIC)}, + {std::string("ring"), int64_t(AtomFloatFeature::IN_RING)}, + {std::string("in-ring"), int64_t(AtomFloatFeature::IN_RING)}, + {std::string("min-ring"), int64_t(AtomFloatFeature::MIN_RING)}, + {std::string("max-ring"), int64_t(AtomFloatFeature::MAX_RING)}, + {std::string("num-ring"), int64_t(AtomFloatFeature::NUM_RING)}, + {std::string("degree"), int64_t(AtomFloatFeature::DEGREE)}, + {std::string("radical-electron"), int64_t(AtomFloatFeature::RADICAL_ELECTRON)}, + {std::string("formal-charge"), int64_t(AtomFloatFeature::FORMAL_CHARGE)}, + {std::string("vdw-radius"), int64_t(AtomFloatFeature::VDW_RADIUS)}, + {std::string("covalent-radius"), int64_t(AtomFloatFeature::COVALENT_RADIUS)}, + {std::string("electronegativity"), int64_t(AtomFloatFeature::ELECTRONEGATIVITY)}, + {std::string("ionization"), int64_t(AtomFloatFeature::IONIZATION)}, + {std::string("first-ionization"), int64_t(AtomFloatFeature::IONIZATION)}, + {std::string("melting-point"), int64_t(AtomFloatFeature::MELTING_POINT)}, + {std::string("metal"), int64_t(AtomFloatFeature::METAL)}, + {std::string("group"), int64_t(AtomFloatFeature::GROUP)}, + {std::string("period"), int64_t(AtomFloatFeature::PERIOD)}, + {std::string("single-bond"), int64_t(AtomFloatFeature::SINGLE_BOND)}, + {std::string("aromatic-bond"), int64_t(AtomFloatFeature::AROMATIC_BOND)}, + {std::string("double-bond"), int64_t(AtomFloatFeature::DOUBLE_BOND)}, + {std::string("triple-bond"), int64_t(AtomFloatFeature::TRIPLE_BOND)}, + {std::string("is-carbon"), int64_t(AtomFloatFeature::IS_CARBON)}, +}; + +at::Tensor atom_float_feature_names_to_tensor(const std::vector& features) { + const size_t num_features = features.size(); + std::unique_ptr feature_enum_values(new int64_t[num_features]); + for (size_t i = 0; i < num_features; ++i) { + auto it = atom_float_name_to_enum.find(features[i]); + if (it != atom_float_name_to_enum.end()) { + feature_enum_values[i] = it->second; + } + else { + feature_enum_values[i] = int64_t(AtomFloatFeature::UNKNOWN); + } + } + const int64_t dims[1] = { int64_t(num_features) }; + return torch_tensor_from_array(std::move(feature_enum_values), dims, 1, c10::ScalarType::Long); +} + +static const std::unordered_map atom_onehot_name_to_enum{ + {std::string("atomic-number"), int64_t(AtomOneHotFeature::ATOMIC_NUM)}, + {std::string("degree"), int64_t(AtomOneHotFeature::DEGREE)}, + {std::string("valence"), int64_t(AtomOneHotFeature::VALENCE)}, + {std::string("total-valence"), int64_t(AtomOneHotFeature::VALENCE)}, + {std::string("implicit-valence"), int64_t(AtomOneHotFeature::IMPLICIT_VALENCE)}, + {std::string("hybridization"), int64_t(AtomOneHotFeature::HYBRIDIZATION)}, + {std::string("chirality"), int64_t(AtomOneHotFeature::CHIRALITY)}, + {std::string("phase"), int64_t(AtomOneHotFeature::PHASE)}, + {std::string("type"), int64_t(AtomOneHotFeature::TYPE)}, + {std::string("group"), int64_t(AtomOneHotFeature::GROUP)}, + {std::string("period"), int64_t(AtomOneHotFeature::PERIOD)}, +}; + +at::Tensor atom_onehot_feature_names_to_tensor(const std::vector& features) { + const size_t num_features = features.size(); + std::unique_ptr feature_enum_values(new int64_t[num_features]); + for (size_t i = 0; i < num_features; ++i) { + auto it = atom_onehot_name_to_enum.find(features[i]); + if (it != atom_onehot_name_to_enum.end()) { + feature_enum_values[i] = it->second; + } + else { + feature_enum_values[i] = int64_t(AtomFloatFeature::UNKNOWN); + } + } + const int64_t dims[1] = { int64_t(num_features) }; + return torch_tensor_from_array(std::move(feature_enum_values), dims, 1, c10::ScalarType::Long); +} + +static const std::unordered_map bond_name_to_enum{ + {std::string("bond-type-onehot"), int64_t(BondFeature::TYPE_ONE_HOT)}, + {std::string("bond-type-float"), int64_t(BondFeature::TYPE_FLOAT)}, + {std::string("stereo"), int64_t(BondFeature::STEREO_ONE_HOT)}, + {std::string("in-ring"), int64_t(BondFeature::IN_RING)}, + {std::string("conjugated"), int64_t(BondFeature::CONJUGATED)}, + {std::string("conformer-bond-length"), int64_t(BondFeature::CONFORMER_BOND_LENGTH)}, + {std::string("estimated-bond-length"), int64_t(BondFeature::ESTIMATED_BOND_LENGTH)}, +}; + +at::Tensor bond_feature_names_to_tensor(const std::vector& features) { + const size_t num_features = features.size(); + std::unique_ptr feature_enum_values(new int64_t[num_features]); + for (size_t i = 0; i < num_features; ++i) { + auto it = bond_name_to_enum.find(features[i]); + if (it != bond_name_to_enum.end()) { + feature_enum_values[i] = it->second; + } + else { + feature_enum_values[i] = int64_t(AtomFloatFeature::UNKNOWN); + } + } + const int64_t dims[1] = { int64_t(num_features) }; + return torch_tensor_from_array(std::move(feature_enum_values), dims, 1, c10::ScalarType::Long); +} + +static const std::unordered_map positional_name_to_enum{ + {std::string("laplacian_eigvec"), int64_t(PositionalFeature::LAPLACIAN_EIGENVEC)}, + {std::string("laplacian_eigval"), int64_t(PositionalFeature::LAPLACIAN_EIGENVAL)}, + {std::string("rw_return_probs"), int64_t(PositionalFeature::RW_RETURN_PROBS)}, + {std::string("rw_transition_probs"), int64_t(PositionalFeature::RW_TRANSITION_PROBS)}, + {std::string("electrostatic"), int64_t(PositionalFeature::ELECTROSTATIC)}, + {std::string("commute"), int64_t(PositionalFeature::COMMUTE)}, + {std::string("graphormer"), int64_t(PositionalFeature::GRAPHORMER)}, +}; + +static const std::unordered_map feature_level_to_enum{ + {std::string("node"), int64_t(FeatureLevel::NODE)}, + {std::string("edge"), int64_t(FeatureLevel::EDGE)}, + {std::string("nodepair"), int64_t(FeatureLevel::NODEPAIR)}, + {std::string("graph"), int64_t(FeatureLevel::GRAPH)}, +}; + +static const std::unordered_map normalization_to_enum{ + {std::string("none"), int64_t(Normalization::NONE)}, + {std::string("inv"), int64_t(Normalization::INVERSE)}, + {std::string("sym"), int64_t(Normalization::SYMMETRIC)}, +}; + +std::pair,at::Tensor> positional_feature_options_to_tensor( + const pybind11::dict& dict) { + size_t num_features = 0; + size_t num_values = 0; + for (const auto& pair : dict) { + // The string keys (pair.first) of the outer dictionary aren't needed for this + if (!pybind11::isinstance(pair.second)) { + continue; + } + pybind11::dict feature_dict = pair.second.cast(); + pybind11::handle feature_name_handle = pybind11::handle(PyDict_GetItemString(feature_dict.ptr(), "pos_type")); + pybind11::handle feature_level_handle = pybind11::handle(PyDict_GetItemString(feature_dict.ptr(), "pos_level")); + if (!feature_name_handle || !feature_level_handle) { + continue; + } + std::string feature_name{ pybind11::str(feature_name_handle) }; + std::string feature_level{ pybind11::str(feature_level_handle) }; + + auto feature_it = positional_name_to_enum.find(feature_name); + auto level_it = feature_level_to_enum.find(feature_level); + if (feature_it == positional_name_to_enum.end() || level_it == feature_level_to_enum.end()) { + continue; + } + + PositionalFeature feature = PositionalFeature(feature_it->second); + switch (feature) { + case PositionalFeature::LAPLACIAN_EIGENVEC: + case PositionalFeature::LAPLACIAN_EIGENVAL: { + // Required int num_pos + pybind11::handle num_pos_handle = pybind11::handle(PyDict_GetItemString(feature_dict.ptr(), "num_pos")); + if (!num_pos_handle || !pybind11::isinstance(num_pos_handle)) { + break; + } + // Optional string normalization + pybind11::handle normalization_handle = pybind11::handle(PyDict_GetItemString(feature_dict.ptr(), "normalization")); + if (normalization_handle) { + if (!pybind11::isinstance(normalization_handle)) { + break; + } + std::string normalization_name{ pybind11::str(normalization_handle) }; + if (!normalization_to_enum.contains(normalization_name)) { + break; + } + } + // Optional bool disconnected_comp + pybind11::handle disconnected_comp_handle = pybind11::handle(PyDict_GetItemString(feature_dict.ptr(), "disconnected_comp")); + if (disconnected_comp_handle && !pybind11::isinstance(disconnected_comp_handle)) { + break; + } + num_values += 3 + 3; + ++num_features; + break; + } + case PositionalFeature::RW_RETURN_PROBS: + case PositionalFeature::RW_TRANSITION_PROBS: { + pybind11::handle ksteps_handle = pybind11::handle(PyDict_GetItemString(feature_dict.ptr(), "ksteps")); + if (!ksteps_handle) { + break; + } + int64_t power_count = 0; + if (pybind11::isinstance(ksteps_handle)) { + power_count = int64_t(ksteps_handle.cast()); + } + else if (pybind11::isinstance(ksteps_handle)) { + power_count = ksteps_handle.cast().size(); + } + if (power_count < 1) { + break; + } + pybind11::handle space_dim_handle = pybind11::handle(PyDict_GetItemString(feature_dict.ptr(), "space_dim")); + if (space_dim_handle && !pybind11::isinstance(space_dim_handle)) { + break; + } + num_values += 3 + 1 + power_count; + ++num_features; + break; + } + case PositionalFeature::ELECTROSTATIC: + case PositionalFeature::COMMUTE: + case PositionalFeature::GRAPHORMER: + num_values += 3; + ++num_features; + break; + } + } + + std::unique_ptr values(new int64_t[num_values]); + std::vector names(num_features); + + size_t prev_feature_index = 0; + size_t feature_index = 0; + size_t value_index = 0; + for (const auto& pair : dict) { + // The string keys (pair.first) of the outer dictionary aren't needed for this + if (!pybind11::isinstance(pair.second)) { + continue; + } + pybind11::dict feature_dict = pair.second.cast(); + pybind11::handle feature_name_handle = pybind11::handle(PyDict_GetItemString(feature_dict.ptr(), "pos_type")); + pybind11::handle feature_level_handle = pybind11::handle(PyDict_GetItemString(feature_dict.ptr(), "pos_level")); + if (!feature_name_handle || !feature_level_handle) { + continue; + } + std::string feature_name{ pybind11::str(feature_name_handle) }; + std::string feature_level{ pybind11::str(feature_level_handle) }; + + auto feature_it = positional_name_to_enum.find(feature_name); + auto level_it = feature_level_to_enum.find(feature_level); + if (feature_it == positional_name_to_enum.end() || level_it == feature_level_to_enum.end()) { + continue; + } + + PositionalFeature feature = PositionalFeature(feature_it->second); + switch (feature) { + case PositionalFeature::LAPLACIAN_EIGENVEC: + case PositionalFeature::LAPLACIAN_EIGENVAL: { + // Required int num_pos + pybind11::handle num_pos_handle = pybind11::handle(PyDict_GetItemString(feature_dict.ptr(), "num_pos")); + if (!num_pos_handle || !pybind11::isinstance(num_pos_handle)) { + continue; + } + // Optional string normalization + pybind11::handle normalization_handle = pybind11::handle(PyDict_GetItemString(feature_dict.ptr(), "normalization")); + Normalization normalization = Normalization::NONE; + if (normalization_handle) { + if (!pybind11::isinstance(normalization_handle)) { + continue; + } + std::string normalization_name{ pybind11::str(normalization_handle) }; + auto it = normalization_to_enum.find(normalization_name); + if (it == normalization_to_enum.end()) { + continue; + } + normalization = Normalization(it->second); + } + // Optional bool disconnected_comp + pybind11::handle disconnected_comp_handle = pybind11::handle(PyDict_GetItemString(feature_dict.ptr(), "disconnected_comp")); + if (disconnected_comp_handle && !pybind11::isinstance(disconnected_comp_handle)) { + continue; + } + values[value_index] = feature_it->second; + values[value_index + 1] = 3; + values[value_index + 2] = level_it->second; + values[value_index + 3] = int64_t(num_pos_handle.cast()); + values[value_index + 4] = int64_t(normalization); + values[value_index + 5] = disconnected_comp_handle ? bool(disconnected_comp_handle.cast()) : true; + value_index += 3 + 3; + ++feature_index; + break; + } + case PositionalFeature::RW_RETURN_PROBS: + case PositionalFeature::RW_TRANSITION_PROBS: { + // Required int or list[int] ksteps + pybind11::handle ksteps_handle = pybind11::handle(PyDict_GetItemString(feature_dict.ptr(), "ksteps")); + if (!ksteps_handle) { + continue; + } + int64_t power_count = 0; + if (pybind11::isinstance(ksteps_handle)) { + // Integer means use all powers from 1 up to this value, inclusive. + power_count = int64_t(ksteps_handle.cast()); + } + else if (pybind11::isinstance(ksteps_handle)) { + power_count = ksteps_handle.cast().size(); + } + if (power_count < 1) { + break; + } + // Optional int space_dim + pybind11::handle space_dim_handle = pybind11::handle(PyDict_GetItemString(feature_dict.ptr(), "space_dim")); + if (space_dim_handle && !pybind11::isinstance(space_dim_handle)) { + break; + } + values[value_index] = feature_it->second; + values[value_index + 1] = 1 + power_count; + values[value_index + 2] = level_it->second; + + int64_t space_dim = space_dim_handle ? int64_t(space_dim_handle.cast()) : 0; + values[value_index + 3] = space_dim; + if (pybind11::isinstance(ksteps_handle)) { + for (int64_t power = 1; power <= power_count; ++power) { + values[value_index + 3 + power] = power; + } + } + else if (pybind11::isinstance(ksteps_handle)) { + size_t power_index = 0; + int64_t prev_power = 0; + for(const auto item : ksteps_handle.cast()) { + int64_t next_power = pybind11::isinstance(item) ? int64_t(item.cast()) : prev_power; + if (next_power < prev_power) { + // Force the integers to be ascending + next_power = prev_power; + } + values[value_index + 3 + 1 + power_index] = next_power; + prev_power = next_power; + ++power_index; + } + } + value_index += 3 + 1 + power_count; + ++feature_index; + break; + } + case PositionalFeature::ELECTROSTATIC: + case PositionalFeature::COMMUTE: + case PositionalFeature::GRAPHORMER: + values[value_index] = feature_it->second; + values[value_index + 1] = 0; + values[value_index + 2] = level_it->second; + value_index += 3; + ++feature_index; + break; + } + if (feature_index != prev_feature_index) { + names[prev_feature_index] = (level_it->second == int64_t(FeatureLevel::NODE)) ? feature_name : (feature_level + std::string("_") + feature_name); + ++prev_feature_index; + } + } + assert(feature_index == num_features && prev_feature_index == num_features && value_index == num_values); + + const int64_t dims[1] = { int64_t(num_values) }; + return std::make_pair( + std::move(names), + torch_tensor_from_array(std::move(values), dims, 1, c10::ScalarType::Long)); +} + +template +at::Tensor create_edge_weights( + const GraphData& graph, + bool duplicate_edges, + bool add_self_loop, + bool use_bonds_weights, + c10::ScalarType dtype) { + + const size_t edge_coo_count = (duplicate_edges ? 2*graph.num_bonds : graph.num_bonds) + + (add_self_loop ? graph.num_atoms : 0); + std::unique_ptr edge_weights(new T[edge_coo_count]); + + // TODO: Use use_bonds_weights to optionally give weights + // in same order as other edge features + for (size_t i = 0; i < edge_coo_count; ++i) { + edge_weights[i] = FeatureValues::one; + } + + const int64_t dims[1] = { int64_t(edge_coo_count) }; + return torch_tensor_from_array(std::move(edge_weights), dims, 1, dtype); +} + +template +at::Tensor create_atom_features( + const GraphData& graph, + const at::Tensor& atom_property_list_onehot, + const at::Tensor& atom_property_list_float, + bool offset_carbon, + c10::ScalarType dtype, + MaskNaNStyle mask_nan_style, + T mask_nan_value, + int64_t &num_nans) { + + const size_t num_onehot_properties = (atom_property_list_onehot.scalar_type() == c10::ScalarType::Long && atom_property_list_onehot.ndimension() == 1) ? atom_property_list_onehot.size(0) : 0; + // NOTE: If TensorBase::data_ptr is ever removed, change it to TensorBase::const_data_ptr. + // Some torch version being used doesn't have const_data_ptr yet. + const int64_t* const property_list_onehot = (num_onehot_properties != 0) ? atom_property_list_onehot.data_ptr() : nullptr; + const size_t num_float_properties = (atom_property_list_float.scalar_type() == c10::ScalarType::Long && atom_property_list_float.ndimension() == 1) ? atom_property_list_float.size(0) : 0; + const int64_t* const property_list_float = (num_float_properties != 0) ? atom_property_list_float.data_ptr() : nullptr; + + size_t single_atom_float_count = num_float_properties; + for (size_t i = 0; i < num_onehot_properties; ++i) { + const int64_t property = property_list_onehot[i]; + single_atom_float_count += get_one_hot_atom_feature_size(AtomOneHotFeature(property)); + } + const size_t atom_float_count = single_atom_float_count * graph.num_atoms; + + std::unique_ptr atom_data(new T[atom_float_count]); + + T* current_atom_data = atom_data.get(); + + for (size_t i = 0; i < num_float_properties; ++i) { + const int64_t property = property_list_float[i]; + get_atom_float_feature(graph, current_atom_data, AtomFloatFeature(property), single_atom_float_count, offset_carbon); + ++current_atom_data; + } + for (size_t i = 0; i < num_onehot_properties; ++i) { + const int64_t property = property_list_onehot[i]; + current_atom_data += get_one_hot_atom_feature(graph, current_atom_data, AtomOneHotFeature(property), single_atom_float_count); + } + + num_nans += mask_nans(atom_data.get(), atom_float_count, mask_nan_style, mask_nan_value); + + const int64_t dims[2] = { int64_t(graph.num_atoms), int64_t(single_atom_float_count) }; + return torch_tensor_from_array(std::move(atom_data), dims, 2, dtype); +} + +template +at::Tensor create_bond_features( + const GraphData& graph, + const at::Tensor& bond_property_list, + const bool duplicate_edges, + bool add_self_loop, + c10::ScalarType dtype, + MaskNaNStyle mask_nan_style, + T mask_nan_value, + int64_t& num_nans) { + + const size_t num_properties = (bond_property_list.scalar_type() == c10::ScalarType::Long && bond_property_list.ndimension() == 1) ? bond_property_list.size(0) : 0; + const int64_t* const property_list = (num_properties != 0) ? bond_property_list.data_ptr() : nullptr; + + size_t single_bond_float_count = 0; + for (size_t i = 0; i < num_properties; ++i) { + const int64_t property = property_list[i]; + if (BondFeature(property) == BondFeature::TYPE_ONE_HOT || BondFeature(property) == BondFeature::STEREO_ONE_HOT) { + single_bond_float_count += get_one_hot_bond_feature_size(BondFeature(property)); + } + else { + ++single_bond_float_count; + } + } + // add_self_loop is only supported if duplicating edges + add_self_loop = add_self_loop && duplicate_edges; + size_t total_edge_count = graph.num_bonds; + if (duplicate_edges) { + total_edge_count = 2*total_edge_count + size_t(add_self_loop); + } + const size_t bond_float_count = single_bond_float_count * total_edge_count; + + std::unique_ptr bond_data(new T[bond_float_count]); + + T* current_bond_data = bond_data.get(); + + // This is the stride length (in floats) for each unique bond + const size_t duplicated_bond_float_count = duplicate_edges ? (2*single_bond_float_count) : single_bond_float_count; + + for (size_t i = 0; i < num_properties; ++i) { + const int64_t property = property_list[i]; + if (BondFeature(property) == BondFeature::TYPE_ONE_HOT || BondFeature(property) == BondFeature::STEREO_ONE_HOT) { + current_bond_data += get_one_hot_bond_feature(graph, current_bond_data, BondFeature(property), duplicated_bond_float_count); + } + else { + get_bond_float_feature(graph, current_bond_data, BondFeature(property), duplicated_bond_float_count); + ++current_bond_data; + } + } + + if (duplicate_edges) { + current_bond_data = bond_data.get(); + // Duplicate the data for each bond + for (size_t i = 0; i < graph.num_bonds; ++i) { + for (size_t j = 0; j < single_bond_float_count; ++j) { + current_bond_data[j+single_bond_float_count] = current_bond_data[j]; + } + current_bond_data += duplicated_bond_float_count; + } + if (add_self_loop) { + // Self loops don't have valid bond data, but don't treat them as NaNs. + // Fill with zeros, instead. + memset(current_bond_data, 0, graph.num_atoms * graph.num_atoms); + } + } + + num_nans += mask_nans(bond_data.get(), bond_float_count, mask_nan_style, mask_nan_value); + + int64_t dims[2] = { int64_t(total_edge_count), int64_t(single_bond_float_count) }; + return torch_tensor_from_array(std::move(bond_data), dims, 2, dtype); +} + +template +void node_to_edge( + std::unique_ptr& output_ptr, + size_t& floats_per_half_edge, + const IN_T* input, + const size_t n, + const size_t floats_per_node, + const GraphData& graph) { + + // Edge order must be consistent with the edges in the graph, + // which is not necessarily lexicographic order. + const size_t num_half_edges = 2*graph.num_bonds; + floats_per_half_edge = 2 * floats_per_node; + output_ptr.reset(new OUT_T[num_half_edges * 2 * floats_per_node]); + OUT_T* output = output_ptr.get(); + for (size_t bond = 0; bond < graph.num_bonds; ++bond, output += 2*floats_per_half_edge) { + const size_t atomi = graph.bonds[bond].beginAtomIdx; + const size_t atomj = graph.bonds[bond].endAtomIdx; + const IN_T* input_i = input + atomi * floats_per_node; + const IN_T* input_j = input + atomj * floats_per_node; + // For each edge, record all of the sums followed by all of the absolute differences + OUT_T* output_sum = output; + OUT_T* output_absdiff = output + floats_per_node; + for (size_t float_index = 0; float_index < floats_per_node; ++float_index) { + const IN_T sum = input_i[float_index] + input_j[float_index]; + const IN_T diff = input_i[float_index] - input_j[float_index]; + const IN_T absdiff = std::abs(diff); + const OUT_T sum_out = FeatureValues::convertToFeatureType(sum); + const OUT_T absdiff_out = FeatureValues::convertToFeatureType(absdiff); + output_sum[float_index] = sum_out; + output_absdiff[float_index] = absdiff_out; + // Same values for opposite direction + output_sum[floats_per_half_edge + float_index] = sum_out; + output_absdiff[floats_per_half_edge + float_index] = absdiff_out; + } + } +} + +template +void node_to_node_pair( + std::unique_ptr& output_ptr, + size_t& floats_per_pair, + const IN_T* input, + const size_t n, + const size_t floats_per_node) { + + floats_per_pair = 2 * floats_per_node; + output_ptr.reset(new OUT_T[n * n * floats_per_pair]); + OUT_T* output = output_ptr.get(); + const IN_T* input_i = input; + for (size_t i = 0; i < n; ++i, input_i += floats_per_node) { + const IN_T* input_j = input; + for (size_t j = 0; j < n; ++j, input_j += floats_per_node, output += floats_per_pair) { + // For each pair, record all of the sums followed by all of the absolute differences + OUT_T* output_sum = output; + OUT_T* output_absdiff = output + floats_per_node; + for (size_t float_index = 0; float_index < floats_per_node; ++float_index) { + const IN_T sum = input_i[float_index] + input_j[float_index]; + const IN_T diff = input_i[float_index] - input_j[float_index]; + const IN_T absdiff = std::abs(diff); + output_sum[float_index] = FeatureValues::convertToFeatureType(sum); + output_absdiff[float_index] = FeatureValues::convertToFeatureType(absdiff); + } + } + } +} + +enum class StatOperation { + MINIMUM, + MEAN +}; + +template +T stat_init_accum(T v) { + return v; +} + +template +void stat_accum(T& accum, T v) { + switch (op) { + case StatOperation::MINIMUM: + accum = (v < accum) ? v : accum; + break; + case StatOperation::MEAN: + accum += v; + break; + } +} + +template +T stat_accum_finish(T accum, size_t n) { + switch (op) { + case StatOperation::MINIMUM: + return accum; + case StatOperation::MEAN: + return accum / n; + } +} + +template +void node_pair_to_node_helper( + OUT_T* output, + const IN_T* input, + const size_t n, + const size_t floats_per_pair, + const size_t node_index) { + + // for each float per pair + for (size_t float_index = 0; float_index < floats_per_pair; ++float_index, output += 2) { + // across all rows (axis 0) of column node_index, then across all columns (axis 1) of row node_index + IN_T accum = stat_init_accum(input[node_index * floats_per_pair + float_index]); + for (size_t row = 1; row < n; ++row) { + stat_accum(accum, input[(row * n + node_index) * floats_per_pair + float_index]); + } + output[0] = FeatureValues::convertToFeatureType(stat_accum_finish(accum, n)); + accum = stat_init_accum(input[node_index * n * floats_per_pair + float_index]); + for (size_t col = 1; col < n; ++col) { + stat_accum(accum, input[(node_index * n + col) * floats_per_pair + float_index]); + } + output[1] = FeatureValues::convertToFeatureType(stat_accum_finish(accum, n)); + } +} + +template +void node_pair_to_node_helper_stdev( + OUT_T* output, + const IN_T* input, + const size_t n, + const size_t floats_per_pair, + const size_t node_index) { + + // for each float per pair + for (size_t float_index = 0; float_index < floats_per_pair; ++float_index, output += 2) { + // across all rows (axis 0) of column node_index, then across all columns (axis 1) of row node_index + IN_T v = input[node_index * floats_per_pair + float_index]; + IN_T accum = v; + IN_T accum2 = v * v; + for (size_t row = 1; row < n; ++row) { + v = input[(row * n + node_index) * floats_per_pair + float_index]; + accum += v; + accum2 += v * v; + } + // NOTE: Using divisor n, the default in numpy.std, not n-1, the default elsewhere + accum /= n; + accum2 /= n; + output[0] = FeatureValues::convertToFeatureType(std::sqrt(accum2 - accum*accum)); + + v = input[node_index * n * floats_per_pair + float_index]; + accum = v; + accum2 = v * v; + for (size_t col = 1; col < n; ++col) { + v = input[(node_index * n + col) * floats_per_pair + float_index]; + accum += v; + accum2 += v * v; + } + // NOTE: Using divisor n, the default in numpy.std, not n-1, the default elsewhere + accum /= n; + accum2 /= n; + output[1] = FeatureValues::convertToFeatureType(std::sqrt(accum2 - accum*accum)); + } +} + +template +void node_pair_to_node( + std::unique_ptr& output_ptr, + size_t& floats_per_node, + const IN_T* input, + const size_t n, + const size_t floats_per_pair) { + + const size_t num_ops = 3; + floats_per_node = num_ops * 2 * floats_per_pair; + output_ptr.reset(new OUT_T[n * floats_per_node]); + OUT_T* output = output_ptr.get(); + for (size_t node_index = 0; node_index < n; ++node_index) { + // min, mean, stdev (using divisor N, the default in numpy.std, not N-1, the default elsewhere) + node_pair_to_node_helper(output, input, n, floats_per_pair, node_index); + output += 2 * floats_per_pair; + node_pair_to_node_helper(output, input, n, floats_per_pair, node_index); + output += 2 * floats_per_pair; + node_pair_to_node_helper_stdev(output, input, n, floats_per_pair, node_index); + output += 2 * floats_per_pair; + } +} + +template +void node_pair_to_edge( + std::unique_ptr& output_ptr, + size_t& floats_per_edge, + const IN_T* input, + const size_t n, + const size_t floats_per_pair, + const GraphData& graph) { + + // Edge order must be consistent with the edges in the graph, + // which is not necessarily lexicographic order. + const size_t num_half_edges = 2*graph.num_bonds; + floats_per_edge = floats_per_pair; + output_ptr.reset(new OUT_T[num_half_edges * floats_per_pair]); + OUT_T* output = output_ptr.get(); + for (size_t bond = 0; bond < graph.num_bonds; ++bond) { + const size_t atomi = graph.bonds[bond].beginAtomIdx; + const size_t atomj = graph.bonds[bond].endAtomIdx; + const IN_T* input_ij = input + ((atomi * n) + atomj) * floats_per_pair; + for (size_t float_index = 0; float_index < floats_per_pair; ++float_index, ++output) { + *output = FeatureValues::convertToFeatureType(input_ij[float_index]); + } + + const IN_T* input_ji = input + ((atomj * n) + atomi) * floats_per_pair; + for (size_t float_index = 0; float_index < floats_per_pair; ++float_index, ++output) { + *output = FeatureValues::convertToFeatureType(input_ji[float_index]); + } + } +} + +template +void create_positional_features( + const GraphData& graph, + const at::Tensor& positional_property_list, + c10::ScalarType dtype, + MaskNaNStyle mask_nan_style, + T mask_nan_value, + int64_t& num_nans, + int64_t& nan_tensor_index, + std::vector& tensors) { + + const size_t size = (positional_property_list.scalar_type() == c10::ScalarType::Long && positional_property_list.ndimension() == 1) ? positional_property_list.size(0) : 0; + const int64_t* const property_list = (size >= 3) ? positional_property_list.data_ptr() : nullptr; + + if (property_list == nullptr) { + return; + } + NeighbourData neighbours = construct_neighbours(graph); + + LaplacianData laplacian_data; + LaplacianData laplacian_data_comp; + std::vector laplacian_pseudoinverse; + std::vector matrix; + size_t i = 0; + while (size >= i + 3) { + int64_t property = property_list[i]; + int64_t current_size = property_list[i + 1]; + FeatureLevel feature_level = FeatureLevel(property_list[i + 2]); + i += 3; + if (i + current_size > size || i + current_size < i) { + break; + } + FeatureLevel base_level; + std::unique_ptr base_data; + int64_t base_dims[3] = { 1,1,1 }; + size_t base_dim_count; + if ((property == int64_t(PositionalFeature::LAPLACIAN_EIGENVEC) || property == int64_t(PositionalFeature::LAPLACIAN_EIGENVAL)) && current_size == 3) { + size_t num_pos = (property_list[i] >= 0) ? size_t(property_list[i]) : 0; + Normalization normalization = Normalization(property_list[i + 1]); + bool disconnected_comp = (property_list[i + 2] != 0); + i += 3; + LaplacianData& current_data = disconnected_comp ? laplacian_data : laplacian_data_comp; + if (current_data.eigenvalues.size() == 0 || current_data.normalization != normalization) { + compute_laplacian_eigendecomp(graph.num_atoms, neighbours.neighbour_starts, neighbours.neighbours, normalization, laplacian_data, disconnected_comp); + } + + const bool isVec = (property == int64_t(PositionalFeature::LAPLACIAN_EIGENVEC)); + base_level = FeatureLevel::NODE; + base_dims[0] = graph.num_atoms; + base_dims[1] = num_pos; + base_dim_count = 2; + base_data.reset(new double[graph.num_atoms * num_pos]); + + // Ensure exactly the tensor dimensions of num_atoms x num_pos before changing the level. + if (isVec) { + double* data = base_data.get(); + for (size_t atom_index = 0; atom_index < graph.num_atoms; ++atom_index, data += num_pos) { + for (size_t i = 0; i < num_pos && i < graph.num_atoms; ++i) { + // Row eigenvectors to column eigenvectors + data[i] = current_data.vectors[atom_index + i * graph.num_atoms]; + // There's no plausible way the eigenvectors should end up with NaNs, + // so just assert in debug builds. + assert(std::isfinite(data[i])); + } + // NOTE: Do not treat extra values as NaN. The original code filled them with zeros. + for (size_t i = graph.num_atoms; i < num_pos; ++i) { + data[i] = 0; + } + } + } + else { + double* data = base_data.get(); + const bool is_multi_component = (current_data.eigenvalues.size() == size_t(graph.num_atoms)*graph.num_atoms); + assert(is_multi_component || (current_data.eigenvalues.size() == graph.num_atoms)); + size_t source_row_start = 0; + for (size_t atom_index = 0; atom_index < graph.num_atoms; ++atom_index, data += num_pos) { + for (size_t i = 0; i < num_pos && i < graph.num_atoms; ++i) { + // Duplicate the eigenvalue for each atom + data[i] = current_data.eigenvalues[source_row_start + i]; + // There's no plausible way the eigenvalues should end up with NaNs, + // so just assert in debug builds. + assert(std::isfinite(data[i])); + } + // NOTE: Do not treat extra values as NaN. The original code filled them with zeros. + for (size_t i = graph.num_atoms; i < num_pos; ++i) { + data[i] = 0; + } + if (is_multi_component) { + source_row_start += graph.num_atoms; + } + } + } + } + else if ((property == int64_t(PositionalFeature::RW_RETURN_PROBS) || property == int64_t(PositionalFeature::RW_TRANSITION_PROBS)) && current_size >= 1) { + int space_dim = property_list[i]; + ++i; + uint32_t num_powers = current_size - 1; + const uint64_t* powers = reinterpret_cast(property_list + i); + i += num_powers; + const bool isProbs = (property == int64_t(PositionalFeature::RW_RETURN_PROBS)); + RandomWalkDataOption option = isProbs ? RandomWalkDataOption::PROBABILITIES : RandomWalkDataOption::MATRIX; + + std::vector output; + compute_rwse(num_powers, powers, graph.num_atoms, neighbours.neighbour_starts, neighbours.neighbours, option, output, space_dim); + + base_level = isProbs ? FeatureLevel::NODE : FeatureLevel::NODEPAIR; + + base_dims[0] = graph.num_atoms; + base_dims[1] = isProbs ? num_powers : graph.num_atoms; + base_dims[2] = isProbs ? 1 : num_powers; + base_dim_count = isProbs ? 2 : 3; + base_data.reset(new double[output.size()]); + std::copy(output.begin(), output.end(), base_data.get()); + } + else if (property == int64_t(PositionalFeature::ELECTROSTATIC) && current_size == 0) { + const double* weights = nullptr; + compute_electrostatic_interactions(graph.num_atoms, neighbours.neighbour_starts, neighbours.neighbours, laplacian_data, laplacian_pseudoinverse, matrix, weights); + + base_level = FeatureLevel::NODEPAIR; + base_dims[0] = graph.num_atoms; + base_dims[1] = graph.num_atoms; + base_dim_count = 2; + assert(matrix.size() == graph.num_atoms * size_t(graph.num_atoms)); + base_data.reset(new double[matrix.size()]); + std::copy(matrix.begin(), matrix.end(), base_data.get()); + } + else if (property == int64_t(PositionalFeature::COMMUTE) && current_size == 0) { + const double* weights = nullptr; + compute_commute_distances(graph.num_atoms, neighbours.neighbour_starts, neighbours.neighbours, laplacian_data, laplacian_pseudoinverse, matrix, weights); + + base_level = FeatureLevel::NODEPAIR; + base_dims[0] = graph.num_atoms; + base_dims[1] = graph.num_atoms; + base_dim_count = 2; + assert(matrix.size() == graph.num_atoms * size_t(graph.num_atoms)); + base_data.reset(new double[matrix.size()]); + std::copy(matrix.begin(), matrix.end(), base_data.get()); + } + else if (property == int64_t(PositionalFeature::GRAPHORMER) && current_size == 0) { + std::vector> queue; + std::vector all_pairs_distances; + compute_graphormer_distances(graph.num_atoms, neighbours.neighbour_starts, neighbours.neighbours, queue, all_pairs_distances); + + base_level = FeatureLevel::NODEPAIR; + base_dims[0] = graph.num_atoms; + base_dims[1] = graph.num_atoms; + base_dim_count = 2; + assert(all_pairs_distances.size() == graph.num_atoms * size_t(graph.num_atoms)); + base_data.reset(new double[all_pairs_distances.size()]); + std::copy(all_pairs_distances.begin(), all_pairs_distances.end(), base_data.get()); + } + + if (base_data.get() == nullptr) { + continue; + } + + // Change the level and convert to the correct type if needed. + std::unique_ptr final_data; + int64_t final_dims[3]; + std::copy(base_dims, base_dims + 3, final_dims); + size_t final_num_dims = base_dim_count; + if (feature_level != base_level) { + if (base_level == FeatureLevel::NODE) { + if (feature_level == FeatureLevel::EDGE) { + size_t floats_per_half_edge; + node_to_edge(final_data, floats_per_half_edge, base_data.get(), base_dims[0], base_dims[1], graph); + final_dims[0] = 2 * graph.num_bonds; + final_dims[1] = floats_per_half_edge; + final_dims[2] = 1; + } + else if (feature_level == FeatureLevel::NODEPAIR) { + size_t floats_per_pair; + node_to_node_pair(final_data, floats_per_pair, base_data.get(), base_dims[0], base_dims[1]); + final_num_dims = 3; + final_dims[1] = base_dims[0]; + final_dims[2] = floats_per_pair; + } + else { + // Not implemented + } + } + else if (base_level == FeatureLevel::NODEPAIR) { + if (feature_level == FeatureLevel::NODE) { + size_t floats_per_node; + node_pair_to_node(final_data, floats_per_node, base_data.get(), base_dims[0], base_dims[2]); + final_num_dims = 2; + final_dims[1] = floats_per_node; + final_dims[2] = 1; + } + else if (feature_level == FeatureLevel::EDGE) { + size_t floats_per_edge; + node_pair_to_edge(final_data, floats_per_edge, base_data.get(), base_dims[0], base_dims[2], graph); + final_num_dims = 2; + final_dims[0] = 2 * graph.num_bonds; + final_dims[1] = floats_per_edge; + final_dims[2] = 1; + } + else { + // Not implemented + } + } + else { + // Not implemented + } + } + else if (dtype != c10::ScalarType::Double) { + // Just convert + const size_t total_num_floats = final_dims[0] * final_dims[1] * final_dims[2]; + final_data.reset(new T[total_num_floats]); + for (size_t i = 0; i < total_num_floats; ++i) { + final_data[i] = FeatureValues::convertToFeatureType(base_data[i]); + } + } + else { + // Perfect match out of the box + // This will only be hit if T is double, but it still needs to compile + // for other cases, which is why the reinterpret_cast is needed. + final_data.reset(reinterpret_cast(base_data.release())); + } + + if (final_data.get() == nullptr) { + continue; + } + + tensors.push_back(torch_tensor_from_array(std::move(final_data), final_dims, final_num_dims, dtype)); + } +} + +template +void create_all_features( + const GraphData& graph, + const at::Tensor& atom_property_list_onehot, + const at::Tensor& atom_property_list_float, + bool create_conformer_feature, + const at::Tensor& bond_property_list, + const at::Tensor& positional_property_list, + bool duplicate_edges, + bool add_self_loop, + bool use_bonds_weights, + bool offset_carbon, + c10::ScalarType dtype, + MaskNaNStyle mask_nan_style, + T mask_nan_value, + int64_t& num_nans, + int64_t& nan_tensor_index, + std::vector& tensors) { + + if (mask_nan_style == MaskNaNStyle::NONE) { + // In some cases, the NONE and REPLACE styles can be combined. + mask_nan_value = FeatureValues::nan_value; + } + at::Tensor edge_weights_tensor = create_edge_weights( + graph, + duplicate_edges, + add_self_loop, + use_bonds_weights, + dtype); + at::Tensor atom_features_tensor = create_atom_features( + graph, + atom_property_list_onehot, + atom_property_list_float, + offset_carbon, + dtype, + mask_nan_style, + mask_nan_value, + num_nans); + if (num_nans != 0) { + nan_tensor_index = tensors.size()+1; + return; + } + at::Tensor bond_features_tensor = create_bond_features( + graph, + bond_property_list, + duplicate_edges, + add_self_loop, + dtype, + mask_nan_style, + mask_nan_value, + num_nans); + if (num_nans != 0) { + nan_tensor_index = tensors.size()+2; + return; + } + tensors.push_back(std::move(edge_weights_tensor)); + tensors.push_back(std::move(atom_features_tensor)); + tensors.push_back(std::move(bond_features_tensor)); + if (create_conformer_feature) { + at::Tensor conformer_features_tensor = get_conformer_features( + *graph.mol, + dtype, + mask_nan_style, + mask_nan_value, + num_nans); + if (num_nans != 0) { + nan_tensor_index = tensors.size(); + return; + } + tensors.push_back(std::move(conformer_features_tensor)); + } + create_positional_features( + graph, + positional_property_list, + dtype, + mask_nan_style, + mask_nan_value, + num_nans, + nan_tensor_index, + tensors); +} + +std::tuple, int64_t, int64_t> featurize_smiles( + const std::string& smiles_string, + const at::Tensor& atom_property_list_onehot, + const at::Tensor& atom_property_list_float, + bool create_conformer_feature, + const at::Tensor& bond_property_list, + const at::Tensor& positional_property_list, + bool duplicate_edges, + bool add_self_loop, + bool explicit_H, + bool use_bonds_weights, + bool offset_carbon, + int dtype_int, + int mask_nan_style_int, + double mask_nan_value) { + + GraphData graph = read_graph(smiles_string, explicit_H); + + const size_t edge_coo_count = 2*graph.num_bonds + (add_self_loop ? graph.num_atoms : 0); + std::unique_ptr edge_index(new int64_t[2*edge_coo_count]); + for (size_t i = 0; i < graph.num_bonds; ++i) { + // PyG has all directed edge begin indices followed by all end indices. + edge_index[2*i] = graph.bonds[i].beginAtomIdx; + edge_index[2*i+1] = graph.bonds[i].endAtomIdx; + edge_index[2*i + edge_coo_count] = graph.bonds[i].endAtomIdx; + edge_index[2*i+1 + edge_coo_count] = graph.bonds[i].beginAtomIdx; + } + if (add_self_loop) { + for (size_t i = 0; i < graph.num_atoms; ++i) { + edge_index[2*graph.num_bonds + i] = i; + edge_index[2*graph.num_bonds + i + edge_coo_count] = i; + } + } + int64_t edge_coo_dims[2] = { int64_t(2), int64_t(edge_coo_count) }; + at::Tensor edge_coo_tensor = torch_tensor_from_array(std::move(edge_index), edge_coo_dims, 2, c10::ScalarType::Long); + + std::vector tensors; + tensors.push_back(std::move(edge_coo_tensor)); + c10::ScalarType dtype = c10::ScalarType(dtype_int); + MaskNaNStyle mask_nan_style = MaskNaNStyle(mask_nan_style_int); + int64_t num_nans = 0; + int64_t nan_tensor_index = -1; + if (dtype == c10::ScalarType::Half) { + create_all_features( + graph, + atom_property_list_onehot, + atom_property_list_float, + create_conformer_feature, + bond_property_list, + positional_property_list, + duplicate_edges, + add_self_loop, + use_bonds_weights, + offset_carbon, + dtype, + mask_nan_style, + FeatureValues::convertToFeatureType(mask_nan_value), + num_nans, + nan_tensor_index, + tensors); + } + else if (dtype == c10::ScalarType::Float) { + create_all_features( + graph, + atom_property_list_onehot, + atom_property_list_float, + create_conformer_feature, + bond_property_list, + positional_property_list, + duplicate_edges, + add_self_loop, + use_bonds_weights, + offset_carbon, + dtype, + mask_nan_style, + FeatureValues::convertToFeatureType(mask_nan_value), + num_nans, + nan_tensor_index, + tensors); + } + else if (dtype == c10::ScalarType::Double) { + create_all_features( + graph, + atom_property_list_onehot, + atom_property_list_float, + create_conformer_feature, + bond_property_list, + positional_property_list, + duplicate_edges, + add_self_loop, + use_bonds_weights, + offset_carbon, + dtype, + mask_nan_style, + FeatureValues::convertToFeatureType(mask_nan_value), + num_nans, + nan_tensor_index, + tensors); + } + + return std::make_tuple(tensors, num_nans, nan_tensor_index); +} diff --git a/graphium/graphium_cpp/features.h b/graphium/graphium_cpp/features.h new file mode 100644 index 000000000..b98622ee0 --- /dev/null +++ b/graphium/graphium_cpp/features.h @@ -0,0 +1,275 @@ +// SPDX-FileCopyrightText: Copyright (c) 2024 NVIDIA CORPORATION & AFFILIATES. All rights reserved. +// SPDX-License-Identifier: Apache-2.0 + +#pragma once + +#include +#include +#include +#include +#include + +// Torch tensor headers +#include +#include + +#include +#include + +// PyBind and Torch headers +#include +#include +#include + +enum class FeatureLevel { + NODE, + EDGE, + NODEPAIR, + GRAPH +}; + +enum class AtomFloatFeature { + ATOMIC_NUMBER, + MASS, + VALENCE, + IMPLICIT_VALENCE, + HYBRIDIZATION, + CHIRALITY, + AROMATIC, + IN_RING, + MIN_RING, + MAX_RING, + NUM_RING, + DEGREE, + RADICAL_ELECTRON, + FORMAL_CHARGE, + VDW_RADIUS, + COVALENT_RADIUS, + ELECTRONEGATIVITY, + IONIZATION, + MELTING_POINT, + METAL, + GROUP, + PERIOD, + SINGLE_BOND, + AROMATIC_BOND, + DOUBLE_BOND, + TRIPLE_BOND, + IS_CARBON, + UNKNOWN +}; + +enum class AtomOneHotFeature { + ATOMIC_NUM, + DEGREE, + VALENCE, + IMPLICIT_VALENCE, + HYBRIDIZATION, + CHIRALITY, + PHASE, + TYPE, + GROUP, + PERIOD +}; + +enum class BondFeature { + TYPE_FLOAT, + TYPE_ONE_HOT, + IN_RING, + CONJUGATED, + STEREO_ONE_HOT, + CONFORMER_BOND_LENGTH, + ESTIMATED_BOND_LENGTH +}; + +enum class PositionalFeature { + LAPLACIAN_EIGENVEC, + LAPLACIAN_EIGENVAL, + RW_RETURN_PROBS, + RW_TRANSITION_PROBS, + ELECTROSTATIC, + COMMUTE, + GRAPHORMER +}; + +enum class Normalization { + NONE, + SYMMETRIC, + INVERSE +}; + +enum class MaskNaNStyle { + NONE, + REPORT, + REPLACE +}; + +struct PositionalOptions { + PositionalFeature feature; + FeatureLevel level; + + std::vector rw_powers; + int rw_space_dim = 0; + + uint32_t laplacian_num_pos = 8; + Normalization laplacian_normalization = Normalization::NONE; + bool laplacian_disconnected_comp = true; +}; + +template +struct FeatureValues {}; + +template<> struct FeatureValues { + static constexpr int16_t zero = 0x0000; + static constexpr int16_t one = 0x3C00; + static constexpr int16_t nan_value = 0x7C01; + + template + static int16_t convertToFeatureType(T inputType) { + static_assert(std::is_floating_point_v); + return c10::detail::fp16_ieee_from_fp32_value(float(inputType)); + } + + static constexpr bool is_finite(int16_t v) { + // If the exponent bits are the maximum value, v is infinite or NaN + return (v & 0x7C00) != 0x7C00; + } + + using MathType = float; +}; +template<> struct FeatureValues { + static constexpr float zero = 0.0f; + static constexpr float one = 1.0f; + static constexpr float nan_value = std::numeric_limits::quiet_NaN(); + + template + static float convertToFeatureType(T inputType) { + static_assert(std::is_floating_point_v); + return float(inputType); + } + + static bool is_finite(float v) { + return std::isfinite(v); + } + + using MathType = float; +}; +template<> struct FeatureValues { + static constexpr double zero = 0.0; + static constexpr double one = 1.0; + static constexpr double nan_value = std::numeric_limits::quiet_NaN(); + + template + static double convertToFeatureType(T inputType) { + static_assert(std::is_floating_point_v); + return double(inputType); + } + + static constexpr bool is_finite(double v) { + return std::isfinite(v); + } + + using MathType = double; +}; + +template +constexpr int64_t mask_nans(T* data, size_t n, MaskNaNStyle style, T value) { + if (style == MaskNaNStyle::NONE) { + return 0; + } + if (style != MaskNaNStyle::REPLACE) { + for (size_t i = 0; i < n; ++i) { + if (!FeatureValues::is_finite(data[i])) { + data[i] = value; + } + } + return 0; + } + + assert(mask_nan_style == MaskNaNStyle::REPORT); + int64_t num_nans = 0; + for (size_t i = 0; i < n; ++i) { + num_nans += (!FeatureValues::is_finite(data[i])); + } + return num_nans; +} + + +// This is just a function to provide to torch, so that we don't have to copy +// the tensor data to put it in a torch tensor, and torch can delete the data +// when it's no longer needed. +template +void deleter(void* p) { + delete[](T*)p; +} + +template +at::Tensor torch_tensor_from_array(std::unique_ptr&& source, const int64_t* dims, size_t num_dims, c10::ScalarType type) { + return at::from_blob( + source.release(), + at::IntArrayRef(dims, num_dims), + deleter, c10::TensorOptions(type)); +} + +// Most of the data needed about an atom +struct CompactAtom { + uint8_t atomicNum; + uint8_t totalDegree; + int8_t formalCharge; + uint8_t chiralTag; + uint8_t totalNumHs; + uint8_t hybridization; + bool isAromatic; + float mass; +}; + +// Most of the data needed about a bond +struct CompactBond { + uint8_t bondType; + bool isConjugated; + bool isInRing; + uint8_t stereo; + uint32_t beginAtomIdx; + uint32_t endAtomIdx; +}; + +// Data representing a molecule before featurization +struct GraphData { + const size_t num_atoms; + std::unique_ptr atoms; + const size_t num_bonds; + std::unique_ptr bonds; + + std::unique_ptr mol; +}; + + +// These functions are in features.cpp, and declared here so that +// graphium_cpp.cpp can expose them to Python via pybind. +at::Tensor atom_float_feature_names_to_tensor(const std::vector& features); +at::Tensor atom_onehot_feature_names_to_tensor(const std::vector& features); +at::Tensor bond_feature_names_to_tensor(const std::vector& features); +std::pair,at::Tensor> positional_feature_options_to_tensor(const pybind11::dict& dict); +std::tuple, int64_t, int64_t> featurize_smiles( + const std::string& smiles_string, + const at::Tensor& atom_property_list_onehot, + const at::Tensor& atom_property_list_float, + bool create_conformer_feature, + const at::Tensor& bond_property_list, + const at::Tensor& positional_property_list, + bool duplicate_edges = true, + bool add_self_loop = false, + bool explicit_H = false, + bool use_bonds_weights = false, + bool offset_carbon = true, + int dtype_int = int(c10::ScalarType::Half), + int mask_nan_style_int = int(MaskNaNStyle::REPORT), + double mask_nan_value = 0.0); + + +// parse_mol is in graphium_cpp.cpp, but is declared in this header so +// that both labels.cpp and features.cpp can call it. +std::unique_ptr parse_mol( + const std::string& smiles_string, + bool explicit_H, + bool ordered = true); diff --git a/graphium/graphium_cpp/float_features.cpp b/graphium/graphium_cpp/float_features.cpp new file mode 100644 index 000000000..315a99f64 --- /dev/null +++ b/graphium/graphium_cpp/float_features.cpp @@ -0,0 +1,526 @@ +// SPDX-FileCopyrightText: Copyright (c) 2024 NVIDIA CORPORATION & AFFILIATES. All rights reserved. +// SPDX-License-Identifier: Apache-2.0 + +#include "float_features.h" + +#include "features.h" + +#include +#include +#include +#include +#include +#include + +#include +#include + +static constexpr double qNaN = std::numeric_limits::quiet_NaN(); + +// This table is from the Electronegativity column of graphium/features/periodic_table.csv +const double electronegativityTable[] = { + 2.20, qNaN, 0.98, 1.57, 2.04, 2.55, 3.04, 3.44, 3.98, + qNaN, 0.93, 1.31, 1.61, 1.90, 2.19, 2.58, 3.16, qNaN, 0.82, + 1.00, 1.36, 1.54, 1.63, 1.66, 1.55, 1.83, 1.88, 1.91, 1.90, + 1.65, 1.81, 2.01, 2.18, 2.55, 2.96, qNaN, 0.82, 0.95, 1.22, + 1.33, 1.60, 2.16, 1.90, 2.20, 2.28, 2.20, 1.93, 1.69, 1.78, + 1.96, 2.05, 2.10, 2.66, qNaN, 0.79, 0.89, 1.10, 1.12, 1.13, + 1.14, 1.13, 1.17, 1.20, 1.20, 1.20, 1.22, 1.23, 1.24, 1.25, + 1.10, 1.27, 1.30, 1.50, 2.36, 1.90, 2.20, 2.20, 2.28, 2.54, + 2.00, 2.04, 2.33, 2.02, 2.00, 2.20, qNaN, 0.70, 0.90, 1.10, + 1.30, 1.50, 1.38, 1.36, 1.28, 1.30, 1.30, 1.30, 1.30, 1.30, + 1.30, 1.30, 1.30, qNaN, qNaN, qNaN, qNaN, qNaN, qNaN, qNaN, + qNaN, qNaN, qNaN, qNaN, qNaN, qNaN, qNaN, qNaN, qNaN, +}; + +// This table is from the FirstIonization column of graphium/features/periodic_table.csv +const double firstIonizationTable[] = { + 13.5984, 24.5874, 5.3917, 9.3227, 8.2980, 11.2603, 14.5341, 13.6181, 17.4228, + 21.5645, 5.1391, 7.6462, 5.9858, 8.1517, 10.4867, 10.3600, 12.9676, 15.7596, 4.3407, + 6.1132, 6.5615, 6.8281, 6.7462, 6.7665, 7.4340, 7.9024, 7.8810, 7.6398, 7.7264, + 9.3942, 5.9993, 7.8994, 9.7886, 9.7524, 11.8138, 13.9996, 4.1771, 5.6949, 6.2173, + 6.6339, 6.7589, 7.0924, 7.2800, 7.3605, 7.4589, 8.3369, 7.5762, 8.9938, 5.7864, + 7.3439, 8.6084, 9.0096, 10.4513, 12.1298, 3.8939, 5.2117, 5.5769, 5.5387, 5.4730, + 5.5250, 5.5820, 5.6437, 5.6704, 6.1501, 5.8638, 5.9389, 6.0215, 6.1077, 6.1843, + 6.2542, 5.4259, 6.8251, 7.5496, 7.8640, 7.8335, 8.4382, 8.9670, 8.9587, 9.2255, + 10.4375, 6.1082, 7.4167, 7.2856, 8.4170, 9.3000, 10.7485, 4.0727, 5.2784, 5.1700, + 6.3067, 5.8900, 6.1941, 6.2657, 6.0262, 5.9738, 5.9915, 6.1979, 6.2817, 6.4200, + 6.5000, 6.5800, 6.6500, qNaN , qNaN , qNaN , qNaN , qNaN , qNaN , qNaN , + qNaN , qNaN , qNaN , qNaN , qNaN , qNaN , qNaN , qNaN , qNaN , +}; + +// This table is from the MeltingPoint column of graphium/features/periodic_table.csv +const double meltingPointTable[] = { + 14.175, qNaN , 453.85, 1560.15, 2573.15, 3948.15, 63.29, 50.50, 53.63, + 24.703, 371.15, 923.15, 933.40, 1683.15, 317.25, 388.51, 172.31, 83.96, 336.50, + 1112.15, 1812.15, 1933.15, 2175.15, 2130.15, 1519.15, 1808.15, 1768.15, 1726.15, 1357.75, + 692.88, 302.91, 1211.45, 1090.15, 494.15, 266.05, 115.93, 312.79, 1042.15, 1799.15, + 2125.15, 2741.15, 2890.15, 2473.15, 2523.15, 2239.15, 1825.15, 1234.15, 594.33, 429.91, + 505.21, 904.05, 722.80, 386.65, 161.45, 301.70, 1002.15, 1193.15, 1071.15, 1204.15, + 1289.15, 1204.15, 1345.15, 1095.15, 1585.15, 1630.15, 1680.15, 1743.15, 1795.15, 1818.15, + 1097.15, 1936.15, 2500.15, 3269.15, 3680.15, 3453.15, 3300.15, 2716.15, 2045.15, 1337.73, + 234.43, 577.15, 600.75, 544.67, 527.15, 575.15, 202.15, 300.15, 973.15, 1323.15, + 2028.15, 1873.15, 1405.15, 913.15, 913.15, 1267.15, 1340.15, 1259.15, 1925.15, 1133.15, + qNaN , qNaN , qNaN , qNaN , qNaN , qNaN , qNaN , qNaN , qNaN , qNaN , + qNaN , qNaN , qNaN , qNaN , qNaN , qNaN , qNaN , qNaN , qNaN , +}; + +// This table is 2x the Metal column plus the Metalloid column of graphium/features/periodic_table.csv +const uint8_t metalTable[] = { + 0, 0, 2, 2, 1, 0, 0, 0, 0, + 0, 2, 2, 2, 1, 0, 0, 0, 0, 2, + 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, + 2, 2, 1, 1, 0, 0, 0, 2, 2, 2, + 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, + 2, 1, 1, 0, 0, 2, 2, 2, 2, 2, + 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, + 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, + 2, 2, 2, 2, 1, 0, 2, 2, 2, 2, + 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, + 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, + 2, 2, 2, 2, 2, 2, 2, 0, 0, +}; + +template +void get_atom_float_feature(const GraphData& graph, T* data, AtomFloatFeature feature, size_t stride, bool offset_carbon) { + const uint32_t num_atoms = graph.num_atoms; + constexpr uint32_t carbon_atomic_num = 6; + using MT = typename FeatureValues::MathType; + switch (feature) { + case AtomFloatFeature::ATOMIC_NUMBER: { + const MT offset = offset_carbon ? carbon_atomic_num : 0; + for (uint32_t i = 0; i < num_atoms; ++i) { + *data = FeatureValues::convertToFeatureType((MT(graph.atoms[i].atomicNum) - offset) / MT(5)); + data += stride; + } + return; + } + case AtomFloatFeature::MASS: { + const RDKit::ROMol& mol = *graph.mol.get(); + constexpr MT carbon_mass = MT(12.011); + const MT offset = offset_carbon ? carbon_mass : 0; + for (uint32_t i = 0; i < num_atoms; ++i) { + *data = FeatureValues::convertToFeatureType((MT(mol.getAtomWithIdx(i)->getMass()) - offset) / MT(10)); + data += stride; + } + return; + } + case AtomFloatFeature::VALENCE: { + const RDKit::ROMol& mol = *graph.mol.get(); + const MT offset = offset_carbon ? 4 : 0; + for (uint32_t i = 0; i < num_atoms; ++i) { + *data = FeatureValues::convertToFeatureType(MT(mol.getAtomWithIdx(i)->getTotalValence()) - offset); + data += stride; + } + return; + } + case AtomFloatFeature::IMPLICIT_VALENCE: { + const RDKit::ROMol& mol = *graph.mol.get(); + for (uint32_t i = 0; i < num_atoms; ++i) { + *data = FeatureValues::convertToFeatureType(MT(mol.getAtomWithIdx(i)->getImplicitValence())); + data += stride; + } + return; + } + case AtomFloatFeature::HYBRIDIZATION: { + const RDKit::ROMol& mol = *graph.mol.get(); + for (uint32_t i = 0; i < num_atoms; ++i) { + *data = FeatureValues::convertToFeatureType(MT(mol.getAtomWithIdx(i)->getHybridization())); + data += stride; + } + return; + } + case AtomFloatFeature::CHIRALITY: { + const RDKit::ROMol& mol = *graph.mol.get(); + for (uint32_t i = 0; i < num_atoms; ++i) { + const RDKit::Atom* atom = mol.getAtomWithIdx(i); + std::string prop; + bool has_prop = atom->getPropIfPresent(RDKit::common_properties::_CIPCode, prop); + *data = FeatureValues::convertToFeatureType(has_prop ? MT(prop.length() == 1 && prop[0] == 'R') : MT(2)); + data += stride; + } + return; + } + case AtomFloatFeature::AROMATIC: { + const RDKit::ROMol& mol = *graph.mol.get(); + for (uint32_t i = 0; i < num_atoms; ++i) { + *data = FeatureValues::convertToFeatureType(MT(mol.getAtomWithIdx(i)->getIsAromatic())); + data += stride; + } + return; + } + case AtomFloatFeature::IN_RING: { + const RDKit::ROMol& mol = *graph.mol.get(); + const RDKit::RingInfo* ring_info = mol.getRingInfo(); + for (uint32_t i = 0; i < num_atoms; ++i) { + *data = FeatureValues::convertToFeatureType(MT(ring_info->numAtomRings(i) != 0)); + data += stride; + } + return; + } + case AtomFloatFeature::MIN_RING: { + const RDKit::ROMol& mol = *graph.mol.get(); + const RDKit::RingInfo* ring_info = mol.getRingInfo(); + for (uint32_t i = 0; i < num_atoms; ++i) { + *data = FeatureValues::convertToFeatureType(MT(ring_info->minAtomRingSize(i))); + data += stride; + } + return; + } + case AtomFloatFeature::MAX_RING: { + const RDKit::ROMol& mol = *graph.mol.get(); + for (uint32_t i = 0; i < num_atoms; ++i) { + data[i * stride] = FeatureValues::zero; + } + const RDKit::RingInfo* ring_info = mol.getRingInfo(); + const auto& rings = ring_info->atomRings(); + for (const auto& ring : rings) { + const T size = FeatureValues::convertToFeatureType(MT(ring.size())); + for (const auto atom_index : ring) { + if (size > data[atom_index * stride]) { + data[atom_index * stride] = size; + } + } + } + return; + } + case AtomFloatFeature::NUM_RING: { + const RDKit::ROMol& mol = *graph.mol.get(); + const RDKit::RingInfo* ring_info = mol.getRingInfo(); + for (uint32_t i = 0; i < num_atoms; ++i) { + *data = FeatureValues::convertToFeatureType(MT(ring_info->numAtomRings(i))); + data += stride; + } + return; + } + case AtomFloatFeature::DEGREE: { + const RDKit::ROMol& mol = *graph.mol.get(); + const MT offset = offset_carbon ? 2 : 0; + for (uint32_t i = 0; i < num_atoms; ++i) { + *data = FeatureValues::convertToFeatureType(MT(mol.getAtomWithIdx(i)->getTotalDegree()) - offset); + data += stride; + } + return; + } + case AtomFloatFeature::RADICAL_ELECTRON: { + const RDKit::ROMol& mol = *graph.mol.get(); + for (uint32_t i = 0; i < num_atoms; ++i) { + *data = FeatureValues::convertToFeatureType(MT(mol.getAtomWithIdx(i)->getNumRadicalElectrons())); + data += stride; + } + return; + } + case AtomFloatFeature::FORMAL_CHARGE: { + for (uint32_t i = 0; i < num_atoms; ++i) { + *data = FeatureValues::convertToFeatureType(MT(graph.atoms[i].formalCharge)); + data += stride; + } + return; + } + case AtomFloatFeature::VDW_RADIUS: { + const RDKit::PeriodicTable* table = RDKit::PeriodicTable::getTable(); + const MT offset = offset_carbon ? MT(table->getRvdw(carbon_atomic_num)) : MT(0); + for (uint32_t i = 0; i < num_atoms; ++i) { + *data = FeatureValues::convertToFeatureType(MT(table->getRvdw(graph.atoms[i].atomicNum)) - offset); + data += stride; + } + return; + } + case AtomFloatFeature::COVALENT_RADIUS: { + const RDKit::PeriodicTable* table = RDKit::PeriodicTable::getTable(); + const MT offset = offset_carbon ? MT(table->getRcovalent(carbon_atomic_num)) : MT(0); + for (uint32_t i = 0; i < num_atoms; ++i) { + *data = FeatureValues::convertToFeatureType(MT(table->getRcovalent(graph.atoms[i].atomicNum)) - offset); + data += stride; + } + return; + } + case AtomFloatFeature::ELECTRONEGATIVITY: { + const MT offset = offset_carbon ? MT(electronegativityTable[carbon_atomic_num-1]) : MT(0); + for (uint32_t i = 0; i < num_atoms; ++i, data += stride) { + const uint32_t atomic_num = graph.atoms[i].atomicNum; + if (atomic_num <= 0 || atomic_num > 118 || electronegativityTable[atomic_num - 1] == 0) { + *data = FeatureValues::nan_value; + continue; + } + *data = FeatureValues::convertToFeatureType(MT(electronegativityTable[atomic_num - 1]) - offset); + } + return; + } + case AtomFloatFeature::IONIZATION: { + const T offset = offset_carbon ? T(firstIonizationTable[carbon_atomic_num-1]) : T(0); + for (uint32_t i = 0; i < num_atoms; ++i, data += stride) { + const uint32_t atomic_num = graph.atoms[i].atomicNum; + if (atomic_num <= 0 || atomic_num > 118 || firstIonizationTable[atomic_num - 1] == 0) { + *data = FeatureValues::nan_value; + continue; + } + *data = FeatureValues::convertToFeatureType((MT(firstIonizationTable[atomic_num - 1]) - offset) / MT(5)); + } + return; + } + case AtomFloatFeature::MELTING_POINT: { + const MT offset = offset_carbon ? MT(meltingPointTable[carbon_atomic_num-1]) : MT(0); + for (uint32_t i = 0; i < num_atoms; ++i, data += stride) { + const uint32_t atomic_num = graph.atoms[i].atomicNum; + if (atomic_num <= 0 || atomic_num > 118 || meltingPointTable[atomic_num - 1] == 0) { + *data = FeatureValues::nan_value; + continue; + } + *data = FeatureValues::convertToFeatureType((MT(meltingPointTable[atomic_num - 1]) - offset) / MT(200)); + } + return; + } + case AtomFloatFeature::METAL: { + for (uint32_t i = 0; i < num_atoms; ++i) { + const uint32_t atomic_num = graph.atoms[i].atomicNum; + *data = (atomic_num <= 0 || atomic_num > 118) ? FeatureValues::nan_value : FeatureValues::convertToFeatureType(MT(metalTable[atomic_num - 1])); + data += stride; + } + return; + } + case AtomFloatFeature::GROUP: { + const MT offset = offset_carbon ? MT(atomicNumToGroupTable[carbon_atomic_num - 1]) : MT(0); + for (uint32_t i = 0; i < num_atoms; ++i) { + const uint32_t atomic_num = graph.atoms[i].atomicNum; + *data = (atomic_num <= 0 || atomic_num > 118) ? FeatureValues::nan_value : FeatureValues::convertToFeatureType(MT(atomicNumToGroupTable[atomic_num - 1]) - offset); + data += stride; + } + return; + } + case AtomFloatFeature::PERIOD: { + const MT offset = offset_carbon ? MT(atomicNumToPeriodTable[carbon_atomic_num - 1]) : MT(0); + for (uint32_t i = 0; i < num_atoms; ++i) { + const uint32_t atomic_num = graph.atoms[i].atomicNum; + *data = (atomic_num <= 0 || atomic_num > 118) ? FeatureValues::nan_value : FeatureValues::convertToFeatureType(MT(atomicNumToPeriodTable[atomic_num - 1]) - offset); + data += stride; + } + return; + } + case AtomFloatFeature::SINGLE_BOND: + case AtomFloatFeature::AROMATIC_BOND: + case AtomFloatFeature::DOUBLE_BOND: + case AtomFloatFeature::TRIPLE_BOND: + { + const RDKit::ROMol& mol = *graph.mol.get(); + const RDKit::Bond::BondType type = + (feature == AtomFloatFeature::SINGLE_BOND) ? RDKit::Bond::SINGLE : ( + (feature == AtomFloatFeature::AROMATIC_BOND) ? RDKit::Bond::AROMATIC : ( + (feature == AtomFloatFeature::DOUBLE_BOND) ? RDKit::Bond::DOUBLE : ( + RDKit::Bond::TRIPLE))); + for (uint32_t i = 0; i < num_atoms; ++i) { + auto [begin, end] = mol.getAtomBonds(mol.getAtomWithIdx(i)); + uint32_t count = 0; + for (; begin != end; ++begin) { + count += (mol[*begin]->getBondType() == type); + } + *data = FeatureValues::convertToFeatureType(MT(count)); + data += stride; + } + return; + } + case AtomFloatFeature::IS_CARBON: { + const MT offset = offset_carbon ? MT(1) : MT(0); + for (uint32_t i = 0; i < num_atoms; ++i) { + *data = FeatureValues::convertToFeatureType(MT(graph.atoms[i].atomicNum == carbon_atomic_num) - offset); + data += stride; + } + return; + } + default: + break; + } + + // Missing implementation + assert(0); + for (uint32_t i = 0; i < num_atoms; ++i) { + *data = FeatureValues::nan_value; + data += stride; + } +} + +template void get_atom_float_feature(const GraphData& graph, int16_t* data, AtomFloatFeature feature, size_t stride, bool offset_carbon); +template void get_atom_float_feature(const GraphData& graph, float* data, AtomFloatFeature feature, size_t stride, bool offset_carbon); +template void get_atom_float_feature(const GraphData& graph, double* data, AtomFloatFeature feature, size_t stride, bool offset_carbon); + +// This table is from the SingleBondRadius column of graphium/features/periodic_table.csv +const double single_bond_lengths[] = { + 0.32, 0.46, 1.33, 1.02, 0.85, 0.75, 0.71, 0.63, 0.64, + 0.67, 1.55, 1.39, 1.26, 1.16, 1.11, 1.03, 0.99, 0.96, 1.96, + 1.71, 1.48, 1.36, 1.34, 1.22, 1.19, 1.16, 1.11, 1.10, 1.12, + 1.18, 1.24, 1.21, 1.21, 1.16, 1.14, 1.17, 2.10, 1.85, 1.63, + 1.54, 1.47, 1.38, 1.28, 1.25, 1.25, 1.20, 1.28, 1.36, 1.42, + 1.40, 1.40, 1.36, 1.33, 1.31, 2.32, 1.96, 1.80, 1.63, 1.76, + 1.74, 1.73, 1.72, 1.68, 1.69, 1.68, 1.67, 1.66, 1.65, 1.64, + 1.70, 1.62, 1.52, 1.46, 1.37, 1.31, 1.29, 1.22, 1.23, 1.24, + 1.33, 1.44, 1.44, 1.51, 1.45, 1.47, 1.42, 2.23, 2.01, 1.86, + 1.75, 1.69, 1.70, 1.71, 1.72, 1.66, 1.66, 1.68, 1.68, 1.65, + 1.67, 1.73, 1.76, 1.61, 1.57, 1.49, 1.43, 1.41, 1.34, 1.29, + 1.28, 1.21, 1.22, 1.36, 1.43, 1.62, 1.75, 1.65, 1.57, +}; +// This table is from the DoubleBondRadius column of graphium/features/periodic_table.csv +const double double_bond_lengths[] = { + qNaN, qNaN, 1.24, 0.90, 0.78, 0.67, 0.60, 0.57, 0.59, + 0.96, 1.60, 1.32, 1.13, 1.07, 1.02, 0.94, 0.95, 1.07, 1.93, + 1.47, 1.16, 1.17, 1.12, 1.11, 1.05, 1.09, 1.03, 1.01, 1.15, + 1.20, 1.17, 1.11, 1.14, 1.07, 1.09, 1.21, 2.02, 1.57, 1.30, + 1.27, 1.25, 1.21, 1.20, 1.14, 1.10, 1.17, 1.39, 1.44, 1.36, + 1.30, 1.33, 1.28, 1.29, 1.35, 2.09, 1.61, 1.39, 1.37, 1.38, + 1.37, 1.35, 1.34, 1.34, 1.35, 1.35, 1.33, 1.33, 1.33, 1.31, + 1.29, 1.31, 1.28, 1.26, 1.20, 1.19, 1.16, 1.15, 1.12, 1.21, + 1.42, 1.42, 1.35, 1.41, 1.35, 1.38, 1.45, 2.18, 1.73, 1.53, + 1.43, 1.38, 1.34, 1.36, 1.35, 1.35, 1.36, 1.39, 1.40, 1.40, + qNaN, 1.39, qNaN, 1.41, 1.40, 1.36, 1.28, 1.28, 1.25, 1.25, + 1.16, 1.16, 1.37, qNaN, qNaN, qNaN, qNaN, qNaN, qNaN, +}; +// This table is from the TripleBondRadius column of graphium/features/periodic_table.csv +const double triple_bond_lengths[] = { + qNaN, qNaN, qNaN, 0.85, 0.73, 0.60, 0.54, 0.53, 0.53, + qNaN, qNaN, 1.27, 1.11, 1.02, 0.94, 0.95, 0.93, 0.96, qNaN, + 1.33, 1.14, 1.08, 1.06, 1.03, 1.03, 1.02, 0.96, 1.01, 1.20, + qNaN, 1.21, 1.14, 1.06, 1.07, 1.10, 1.08, qNaN, 1.39, 1.24, + 1.21, 1.16, 1.13, 1.10, 1.03, 1.06, 1.12, 1.37, qNaN, 1.46, + 1.32, 1.27, 1.21, 1.25, 1.22, qNaN, 1.49, 1.39, 1.31, 1.28, + qNaN, qNaN, qNaN, qNaN, 1.32, qNaN, qNaN, qNaN, qNaN, qNaN, + qNaN, 1.31, 1.22, 1.19, 1.15, 1.10, 1.09, 1.07, 1.10, 1.23, + qNaN, 1.50, 1.37, 1.35, 1.29, 1.38, 1.33, qNaN, 1.59, 1.40, + 1.36, 1.29, 1.18, 1.16, qNaN, qNaN, qNaN, qNaN, qNaN, qNaN, + qNaN, qNaN, qNaN, qNaN, 1.31, 1.26, 1.21, 1.19, 1.18, 1.13, + 1.12, 1.18, 1.30, qNaN, qNaN, qNaN, qNaN, qNaN, qNaN, +}; + +template +void get_bond_float_feature(const GraphData& graph, T* data, BondFeature feature, size_t stride) { + const uint32_t num_bonds = graph.num_bonds; + switch (feature) { + case BondFeature::TYPE_FLOAT: { + const RDKit::ROMol& mol = *graph.mol.get(); + for (size_t i = 0; i < num_bonds; ++i, data += stride) { + auto type = graph.bonds[i].bondType; + double value = 0; + switch (type) { + case RDKit::Bond::BondType::SINGLE: value = 1.0; break; + case RDKit::Bond::BondType::DOUBLE: value = 2.0; break; + case RDKit::Bond::BondType::TRIPLE: value = 3.0; break; + case RDKit::Bond::BondType::AROMATIC: value = 1.5; break; + default: value = mol.getBondWithIdx(i)->getBondTypeAsDouble(); + } + *data = FeatureValues::convertToFeatureType(value); + } + return; + } + case BondFeature::IN_RING: { + const RDKit::ROMol& mol = *graph.mol.get(); + for (size_t i = 0; i < num_bonds; ++i, data += stride) { + bool is_in_ring = mol.getRingInfo()->numBondRings(i) != 0; + *data = is_in_ring ? FeatureValues::one : FeatureValues::zero; + } + return; + } + case BondFeature::CONJUGATED: { + for (size_t i = 0; i < num_bonds; ++i, data += stride) { + bool is_conjugated = graph.bonds[i].isConjugated; + *data = is_conjugated ? FeatureValues::one : FeatureValues::zero; + } + return; + } + case BondFeature::CONFORMER_BOND_LENGTH: { + RDKit::ROMol& mol = *graph.mol.get(); + if (mol.beginConformers() == mol.endConformers()) { + // Try to generate a conformer + RDKit::DGeomHelpers::EmbedParameters params; + params.enforceChirality = false; + params.ignoreSmoothingFailures = true; + params.useBasicKnowledge = true; + params.useExpTorsionAnglePrefs = true; + params.optimizerForceTol = 0.1; + int id = RDKit::DGeomHelpers::EmbedMolecule(mol, params); + if (id == -1) { + // Failed to generate a conformer + const uint32_t num_bonds = graph.num_bonds; + for (uint32_t i = 0; i < num_bonds; ++i, data += stride) { + *data = FeatureValues::nan_value; + } + return; + } + assert(mol.beginConformers() != mol.endConformers()); + } + const RDKit::Conformer& conformer = mol.getConformer(); + const auto& positions = conformer.getPositions(); + for (uint32_t i = 0; i < num_bonds; ++i, data += stride) { + const uint32_t begin_atom = graph.bonds[i].beginAtomIdx; + const uint32_t end_atom = graph.bonds[i].endAtomIdx; + const RDGeom::Point3D diff = (positions[end_atom] - positions[begin_atom]); + // Unfortunately, the length() function on Point3D is virtual, so compute it manually. + const double length = std::sqrt(diff.x * diff.x + diff.y * diff.y + diff.z * diff.z); + *data = FeatureValues::convertToFeatureType(length); + } + return; + } + case BondFeature::ESTIMATED_BOND_LENGTH: { + for (uint32_t i = 0; i < num_bonds; ++i, data += stride) { + const uint32_t begin_atom = graph.bonds[i].beginAtomIdx; + const uint32_t end_atom = graph.bonds[i].endAtomIdx; + const int atomic_num1 = graph.atoms[begin_atom].atomicNum; + const bool atom1_valid = (atomic_num1 >= 1 && atomic_num1 <= 118); + const int atomic_num2 = graph.atoms[end_atom].atomicNum; + const bool atom2_valid = (atomic_num2 >= 1 && atomic_num2 <= 118); + assert(atom1_valid && atom2_valid); + if (!atom1_valid || !atom2_valid) { + *data = FeatureValues::nan_value; + continue; + } + + const auto type = graph.bonds[i].bondType; + if (type == RDKit::Bond::BondType::SINGLE) { + // All atoms have a single bond length + *data = FeatureValues::convertToFeatureType( + single_bond_lengths[atomic_num1 - 1] + single_bond_lengths[atomic_num2 - 1]); + continue; + } + if (type == RDKit::Bond::BondType::DOUBLE) { + const double length1 = (double_bond_lengths[atomic_num1 - 1] >= 0) ? + double_bond_lengths[atomic_num1 - 1] : single_bond_lengths[atomic_num1 - 1]; + const double length2 = (double_bond_lengths[atomic_num2 - 1] >= 0) ? + double_bond_lengths[atomic_num2 - 1] : single_bond_lengths[atomic_num2 - 1]; + *data = FeatureValues::convertToFeatureType(length1 + length2); + continue; + } + if (type == RDKit::Bond::BondType::TRIPLE) { + const double length1 = (triple_bond_lengths[atomic_num1 - 1] >= 0) ? + triple_bond_lengths[atomic_num1 - 1] : single_bond_lengths[atomic_num1 - 1]; + const double length2 = (triple_bond_lengths[atomic_num2 - 1] >= 0) ? + triple_bond_lengths[atomic_num2 - 1] : single_bond_lengths[atomic_num2 - 1]; + *data = FeatureValues::convertToFeatureType(length1 + length2); + continue; + } + if (type != RDKit::Bond::BondType::AROMATIC) { + *data = FeatureValues::nan_value; + } + + // Aromatic case + double length1 = single_bond_lengths[atomic_num1 - 1]; + double length2 = single_bond_lengths[atomic_num2 - 1]; + if (double_bond_lengths[atomic_num1] >= 0) { + length1 = 0.5 * (length1 + double_bond_lengths[atomic_num1 - 1]); + } + if (double_bond_lengths[atomic_num2] >= 0) { + length2 = 0.5 * (length2 + double_bond_lengths[atomic_num2 - 1]); + } + *data = FeatureValues::convertToFeatureType(length1 + length2); + } + return; + } + default: + // Missing implementation + assert(0); + for (uint32_t i = 0; i < num_bonds; ++i, data += stride) { + *data = FeatureValues::nan_value; + } + return; + } +} + +template void get_bond_float_feature(const GraphData& graph, int16_t* data, BondFeature feature, size_t stride); +template void get_bond_float_feature(const GraphData& graph, float* data, BondFeature feature, size_t stride); +template void get_bond_float_feature(const GraphData& graph, double* data, BondFeature feature, size_t stride); diff --git a/graphium/graphium_cpp/float_features.h b/graphium/graphium_cpp/float_features.h new file mode 100644 index 000000000..b839416c3 --- /dev/null +++ b/graphium/graphium_cpp/float_features.h @@ -0,0 +1,58 @@ +// SPDX-FileCopyrightText: Copyright (c) 2024 NVIDIA CORPORATION & AFFILIATES. All rights reserved. +// SPDX-License-Identifier: Apache-2.0 + +#pragma once + +#include "features.h" + +#include + +#include + +template +void get_atom_float_feature(const GraphData& graph, T* data, AtomFloatFeature feature, size_t stride, bool offset_carbon = true); + +extern template void get_atom_float_feature(const GraphData& graph, int16_t* data, AtomFloatFeature feature, size_t stride, bool offset_carbon); +extern template void get_atom_float_feature(const GraphData& graph, float* data, AtomFloatFeature feature, size_t stride, bool offset_carbon); +extern template void get_atom_float_feature(const GraphData& graph, double* data, AtomFloatFeature feature, size_t stride, bool offset_carbon); + +template +void get_bond_float_feature(const GraphData& graph, T* data, BondFeature feature, size_t stride); + +extern template void get_bond_float_feature(const GraphData& graph, int16_t* data, BondFeature feature, size_t stride); +extern template void get_bond_float_feature(const GraphData& graph, float* data, BondFeature feature, size_t stride); +extern template void get_bond_float_feature(const GraphData& graph, double* data, BondFeature feature, size_t stride); + +// This table is from the Group column of graphium/features/periodic_table.csv +constexpr uint8_t atomicNumToGroupTable[] = { + 1, 18, 1, 2, 13, 14, 15, 16, 17, + 18, 1, 2, 13, 14, 15, 16, 17, 18, 1, + 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, + 12, 13, 14, 15, 16, 17, 18, 1, 2, 3, + 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, + 14, 15, 16, 17, 18, 1, 2, 3, 19, 19, + 19, 19, 19, 19, 19, 19, 19, 19, 19, 19, + 19, 19, 4, 5, 6, 7, 8, 9, 10, 11, + 12, 13, 14, 15, 16, 17, 18, 1, 2, 3, + 19, 19, 19, 19, 19, 19, 19, 19, 19, 19, + 19, 19, 19, 19, 4, 5, 6, 7, 8, 9, + 10, 11, 12, 13, 14, 15, 16, 17, 18, +}; +constexpr size_t groupCount = 19; + +// This table is from the Period column of graphium/features/periodic_table.csv +constexpr uint8_t atomicNumToPeriodTable[] = { + 1, 1, 2, 2, 2, 2, 2, 2, 2, + 2, 3, 3, 3, 3, 3, 3, 3, 3, 4, + 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, + 4, 4, 4, 4, 4, 4, 4, 5, 5, 5, + 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, + 5, 5, 5, 5, 5, 6, 6, 6, 6, 6, + 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, + 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, + 6, 6, 6, 6, 6, 6, 6, 7, 7, 7, + 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, + 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, + 7, 7, 7, 7, 7, 7, 7, 7, 7 +}; +constexpr size_t periodCount = 7; diff --git a/graphium/graphium_cpp/graphium_cpp.cpp b/graphium/graphium_cpp/graphium_cpp.cpp new file mode 100644 index 000000000..373c18f14 --- /dev/null +++ b/graphium/graphium_cpp/graphium_cpp.cpp @@ -0,0 +1,92 @@ +// SPDX-FileCopyrightText: Copyright (c) 2024 NVIDIA CORPORATION & AFFILIATES. All rights reserved. +// SPDX-License-Identifier: Apache-2.0 + +#include "features.h" +#include "labels.h" + +// C++ standard library headers +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +// RDKit headers +#include +#include +#include +#include +#include +#include + +// PyBind and Torch headers for use by library to be imported by Python +#include +#include +#include +#include + +// RDKit::SmilesToMol uses std::string, so until we replace it, lets use std::string here. +// ("const char*" could avoid an extra allocation, if we do eventually replace use of SmilesToMol.) +std::unique_ptr parse_mol( + const std::string& smiles_string, + bool explicit_H, + bool ordered) { + + // Parse SMILES string with default options + RDKit::SmilesParserParams params; + std::unique_ptr mol{ RDKit::SmilesToMol(smiles_string, params) }; + if (!mol) { + return mol; + } + + if (ordered) { + // Determine a canonical ordering of the atoms + const unsigned int num_atoms = mol->getNumAtoms(); + std::vector atom_order; + RDKit::Canon::rankMolAtoms(*mol, atom_order); + assert(atom_order.size() == num_atoms); + + // Invert the order + std::vector inverse_order(num_atoms); + for (unsigned int i = 0; i < num_atoms; ++i) { + inverse_order[atom_order[i]] = i; + } + + // Reorder the atoms to the canonical order + mol.reset(static_cast(RDKit::MolOps::renumberAtoms(*mol, inverse_order))); + } + if (explicit_H) { + RDKit::MolOps::addHs(*mol); + } + else { + // Default params for SmilesToMol already calls removeHs, + // and calling it again shouldn't have any net effect. + //RDKit::MolOps::removeHs(*mol); + } + return mol; +} + +// This is necessary to export Python functions in a Python module named graphium_cpp. +PYBIND11_MODULE(graphium_cpp, m) { + m.doc() = "graphium C++ plugin"; // Python module docstring + + // Functions in labels.cpp + m.def("load_num_cols_and_dtypes", &load_num_cols_and_dtypes, "Loads from a cache file, a list of integers representing the number of columns in each task, and a list of integers representing the torch ScalarType of the task's data."); + m.def("load_metadata_tensors", &load_metadata_tensors, "Loads from cache files for a specific stage, a torch tensor representing the offsets of molecules in files, another containing all SMILES strings contatenated, another with the offsets of all SMILES strings, and two for the nubmer of nodes and edges in each molecule."); + m.def("load_stats", &load_stats, "Loads from a cache file of a specific task, the stats for each column, for use in denormalization."); + m.def("prepare_and_save_data", &prepare_and_save_data, "Accepts a dict mapping dataset (task) names to dicts with \"smiles\", \"labels\", and \"label_offsets\" data, and returns the data that would be returned by load_metadata_tensors, load_stats, and load_num_cols_and_dtypes."); + m.def("load_labels_from_index", &load_labels_from_index, "Loads label data from disk, for a specific stage and molecule."); + m.def("extract_string", &extract_string, "Extracts a single string from a Tensor of contatenated strings."); + + // Functions in features.cpp + m.def("atom_float_feature_names_to_tensor", &atom_float_feature_names_to_tensor, "Accepts feature names and returns a tensor representing them as integers"); + m.def("atom_onehot_feature_names_to_tensor", &atom_onehot_feature_names_to_tensor, "Accepts feature names and returns a tensor representing them as integers"); + m.def("bond_feature_names_to_tensor", &bond_feature_names_to_tensor, "Accepts feature names and returns a tensor representing them as integers"); + m.def("positional_feature_options_to_tensor", &positional_feature_options_to_tensor, "Accepts feature names, levels, and options, and returns a tensor representing them as integers"); + m.def("featurize_smiles", &featurize_smiles, "Accepts a SMILES string and returns tensors representing the features"); +} diff --git a/graphium/graphium_cpp/graphormer.cpp b/graphium/graphium_cpp/graphormer.cpp new file mode 100644 index 000000000..65badd030 --- /dev/null +++ b/graphium/graphium_cpp/graphormer.cpp @@ -0,0 +1,70 @@ +// SPDX-FileCopyrightText: Copyright (c) 2024 NVIDIA CORPORATION & AFFILIATES. All rights reserved. +// SPDX-License-Identifier: Apache-2.0 + +#include "graphormer.h" + +#include +#include +#include +#include + +template +void compute_graphormer_distances( + const uint32_t n, + const uint32_t* neighbor_starts, + const uint32_t* neighbors, + std::vector>& queue, + std::vector& all_pairs_distances) { + + // Compute all pairs shortest paths. + // Because this is a sparse graph treated as having unweighted edges, + // BFS on each node is faster than Dijkstra's or Floyd-Warshall's. + + if (queue.capacity() == 0) { + queue.reserve(n); + } + + all_pairs_distances.resize(size_t(n) * n); + std::fill(all_pairs_distances.begin(), all_pairs_distances.end(), T(-1)); + + for (uint32_t start_index = 0; start_index < n; ++start_index) { + queue.resize(0); + size_t queue_head = 0; + queue.push_back({ start_index,0 }); + T* const distances = all_pairs_distances.data() + start_index * n; + while (queue.size() != queue_head) { + auto [current_node, current_distance] = queue[queue_head]; + ++queue_head; + + if (distances[current_node] != T(-1)) { + continue; + } + + distances[current_node] = T(current_distance); + + ++current_distance; + + const uint32_t* neighbor_start = neighbors + neighbor_starts[current_node]; + const uint32_t* neighbor_end = neighbors + neighbor_starts[current_node+1]; + for (; neighbor_start != neighbor_end; ++neighbor_start) { + queue.push_back({ *neighbor_start,current_distance }); + } + } + } +} + +// Explicit instantiations for float and double +template +void compute_graphormer_distances( + const uint32_t n, + const uint32_t* neighbor_starts, + const uint32_t* neighbors, + std::vector>& queue, + std::vector& all_pairs_distances); +template +void compute_graphormer_distances( + const uint32_t n, + const uint32_t* neighbor_starts, + const uint32_t* neighbors, + std::vector>& queue, + std::vector& all_pairs_distances); diff --git a/graphium/graphium_cpp/graphormer.h b/graphium/graphium_cpp/graphormer.h new file mode 100644 index 000000000..4a82c67be --- /dev/null +++ b/graphium/graphium_cpp/graphormer.h @@ -0,0 +1,31 @@ +// SPDX-FileCopyrightText: Copyright (c) 2024 NVIDIA CORPORATION & AFFILIATES. All rights reserved. +// SPDX-License-Identifier: Apache-2.0 + +#pragma once + +#include +#include +#include + +template +void compute_graphormer_distances( + const uint32_t n, + const uint32_t* neighbor_starts, + const uint32_t* neighbors, + std::vector>& queue, + std::vector& all_pairs_distances); + +extern template +void compute_graphormer_distances( + const uint32_t n, + const uint32_t* neighbor_starts, + const uint32_t* neighbors, + std::vector>& queue, + std::vector& all_pairs_distances); +extern template +void compute_graphormer_distances( + const uint32_t n, + const uint32_t* neighbor_starts, + const uint32_t* neighbors, + std::vector>& queue, + std::vector& all_pairs_distances); diff --git a/graphium/graphium_cpp/labels.cpp b/graphium/graphium_cpp/labels.cpp new file mode 100644 index 000000000..b950463f0 --- /dev/null +++ b/graphium/graphium_cpp/labels.cpp @@ -0,0 +1,1584 @@ +// SPDX-FileCopyrightText: Copyright (c) 2024 NVIDIA CORPORATION & AFFILIATES. All rights reserved. +// SPDX-License-Identifier: Apache-2.0 + +#include "labels.h" + +#include "features.h" + +// C++ standard library headers +#include +#include +#include + +// RDKit headers +#include +#include +#include +#include + +// Numpy array headers +#include +#define NPY_NO_DEPRECATED_API NPY_1_7_API_VERSION +#include + +#ifdef _WIN32 +// Windows file handling wrappers +#define WIN32_LEAN_AND_MEAN +#include + +using FileType = HANDLE; +const auto INVALID_FILE = INVALID_HANDLE_VALUE; + +static FileType fopen_read_wrapper(const std::filesystem::path& file_path) { + return CreateFileW( + file_path.wstring().c_str(), + GENERIC_READ, + FILE_SHARE_READ, + nullptr, + OPEN_EXISTING, + FILE_ATTRIBUTE_NORMAL, + nullptr); +} + +static FileType fopen_write_wrapper(const std::filesystem::path& file_path) { + return CreateFileW( + file_path.wstring().c_str(), + GENERIC_WRITE, + 0, + nullptr, + CREATE_ALWAYS, + FILE_ATTRIBUTE_NORMAL, + nullptr); +} + +static size_t fread_wrapper(void* buffer, size_t bytes, FileType file) { + size_t total_bytes_read = 0; + while (bytes > 0) { + // NOTE: ReadFile should support reads up to (2^32 - 1) bytes, + // but might as well limit it to 1GB (2^30 bytes) at a time, + // just in case there are issues at or above 2GB. + const DWORD max_read_size = 1024 * 1024 * 1024; + const DWORD bytes_to_read = (bytes > max_read_size) ? max_read_size : (DWORD)bytes; + DWORD bytes_read; + BOOL success = ReadFile(file, buffer, bytes_to_read, &bytes_read, nullptr); + total_bytes_read += (success ? bytes_read : 0); + if (!success || bytes_read != bytes_to_read) { + return total_bytes_read; + } + bytes -= bytes_read; + } + return total_bytes_read; +} + +static size_t fwrite_wrapper(const void* buffer, size_t bytes, FileType file) { + size_t total_bytes_written = 0; + while (bytes > 0) { + // NOTE: ReadFile should support reads up to (2^32 - 1) bytes, + // but might as well limit it to 1GB (2^30 bytes) at a time, + // just in case there are issues at or above 2GB. + const DWORD max_write_size = 1024 * 1024 * 1024; + const DWORD bytes_to_write = (bytes > max_write_size) ? max_write_size : (DWORD)bytes; + DWORD bytes_written; + BOOL success = WriteFile(file, buffer, bytes_to_write, &bytes_written, nullptr); + total_bytes_written += (success ? bytes_written : 0); + if (!success || bytes_written != bytes_to_write) { + return total_bytes_written; + } + bytes -= bytes_written; + } + return total_bytes_written; +} + +static int fseek_wrapper(FileType file, int64_t file_pointer) { + LARGE_INTEGER file_pointer_union; + file_pointer_union.QuadPart = (LONGLONG)file_pointer; + BOOL success = SetFilePointerEx(file, file_pointer_union, nullptr, FILE_BEGIN); + return (success == 0); +} + +static void fclose_wrapper(FileType file) { + CloseHandle(file); +} + +#else +// Linux file handling wrappers +#include + +using FileType = FILE*; +const auto INVALID_FILE = (FILE*)nullptr; + +static FileType fopen_read_wrapper(const std::filesystem::path& file_path) { + return fopen(file_path.string().c_str(), "rb"); +} + +static FileType fopen_write_wrapper(const std::filesystem::path& file_path) { + return fopen(file_path.string().c_str(), "wb"); +} + +static size_t fread_wrapper(void* buffer, size_t bytes, FileType file) { + return fread(buffer, 1, bytes, file); +} + +static size_t fwrite_wrapper(const void* buffer, size_t bytes, FileType file) { + return fwrite(buffer, 1, bytes, file); +} + +static int fseek_wrapper(FileType file, int64_t file_pointer) { + // NOTE: If these files could ever be larger than 2GB each, fseek won't + // work on platforms where "long" is a 32-bit type (e.g. 32-bit Linux) + return fseek(file, (long)file_pointer, SEEK_SET); +} + +static void fclose_wrapper(FileType file) { + fclose(file); +} + +#endif // End of file handling wrappers + +struct InitNumpyArrayModule { + InitNumpyArrayModule() { + // This imports the numpy array module, and it must be + // called exactly once before numpy array functions are used. + if (_import_array() < 0) { + printf("ERROR: Failed to import numpy.core.multiarray from C++ in graphium_cpp module\n"); + } + } +}; +static void ensure_numpy_array_module_initialized() { + // Function scope static variables will be initialized upon the first call, + // and only once, in a threadsafe manner. + static InitNumpyArrayModule numpy_initializer; +} + +struct MolBriefData { + uint64_t unique_id[2]; + uint32_t num_nodes; + uint32_t num_edges; +}; + +static MolBriefData smiles_to_brief_data( + const std::string& smiles_string, + bool add_self_loop = false, + bool explicit_H = false) { + + // Don't add explicit_H here, in case it affects MolToInchiKey (though it really shouldn't) + std::unique_ptr mol{ parse_mol(smiles_string, false) }; + if (!mol) { + return MolBriefData{ {0,0}, 0, 0 }; + } + + const std::string inchiKeyString = MolToInchiKey(*mol, "/FixedH /SUU /RecMet /KET /15T"); + size_t n = inchiKeyString.size(); + // Format: AAAAAAAAAAAAAA-BBBBBBBBFV-P + // According to https://www.inchi-trust.org/technical-faq/ + assert(n == 27 && inchiKeyString[14] == '-' && inchiKeyString[25] == '-'); + // Convert from capital letter characters to 64-bit integers: + // 13 characters for first integer, 12 characters for 2nd integer. + // Neither should overflow a 64-bit unsigned integer. + uint64_t id0 = (n > 0) ? (inchiKeyString[0] - 'A') : 0; + for (size_t i = 1; i < 13 && i < n; ++i) { + id0 = 26*id0 + (inchiKeyString[i] - 'A'); + } + uint64_t id1 = (13 < n) ? (inchiKeyString[13] - 'A') : 0; + for (size_t i = 15; i < 25 && i < n; ++i) { + id1 = 26*id1 + (inchiKeyString[i] - 'A'); + } + if (26 < n) { + id1 = 26*id1 + (inchiKeyString[26] - 'A'); + } + + // Now handle explicit_H + if (explicit_H) { + RDKit::MolOps::addHs(*mol); + } + else { + // Default params for SmilesToMol already calls removeHs, + // and calling it again shouldn't have any net effect. + //RDKit::MolOps::removeHs(*mol); + } + + return MolBriefData{ + {id0, id1}, + mol->getNumAtoms(), + 2*mol->getNumBonds() + (add_self_loop ? mol->getNumAtoms() : 0) + }; +} + +enum class NormalizationMethod { + NONE, + NORMAL, + UNIT +}; +struct NormalizationOptions { + NormalizationMethod method = NormalizationMethod::NONE; + double min_clipping = -std::numeric_limits::infinity(); + double max_clipping = std::numeric_limits::infinity(); +}; + +constexpr size_t num_mols_per_file = 1024; + +static void get_mol_label_filename( + char filename[25], + uint64_t file_num) { + + size_t filename_index = 0; + while (file_num != 0) { + filename[filename_index] = '0' + (file_num % 10); + ++filename_index; + file_num /= 10; + } + while (filename_index < 7) { + filename[filename_index] = '0'; + ++filename_index; + } + std::reverse(filename, filename + filename_index); + filename[filename_index] = '.'; + filename[filename_index+1] = 't'; + filename[filename_index+2] = 'm'; + filename[filename_index+3] = 'p'; + filename[filename_index+4] = 0; +} + +struct Types { + size_t size; + int numpy_type; + c10::ScalarType torch_type; +}; +constexpr size_t num_supported_types = 3; +constexpr Types supported_types[num_supported_types] = { + {2, NPY_FLOAT16, c10::ScalarType::Half}, + {4, NPY_FLOAT32, c10::ScalarType::Float}, + {8, NPY_FLOAT64, c10::ScalarType::Double} +}; +static bool is_supported_numpy_type(int type) { + return (type == supported_types[0].numpy_type) || + (type == supported_types[1].numpy_type) || + (type == supported_types[2].numpy_type); +}; +static size_t numpy_type_index(int type) { + if (type == supported_types[0].numpy_type) { + return 0; + } + if (type == supported_types[1].numpy_type) { + return 1; + } + if (type == supported_types[2].numpy_type) { + return 2; + } + return num_supported_types; +}; +static size_t torch_type_index(c10::ScalarType type) { + if (type == supported_types[0].torch_type) { + return 0; + } + if (type == supported_types[1].torch_type) { + return 1; + } + if (type == supported_types[2].torch_type) { + return 2; + } + return num_supported_types; +}; + + +constexpr const char*const label_metadata_filename = "label_metadata.tmp"; +constexpr const char*const file_data_offsets_filename = "file_data_offsets.tmp"; +constexpr const char*const concat_smiles_filename = "concat_smiles.tmp"; +constexpr const char*const smiles_offsets_filename = "smiles_offsets.tmp"; +constexpr const char*const num_nodes_filename = "num_nodes.tmp"; +constexpr const char*const num_edges_filename = "num_edges.tmp"; + +static bool save_num_cols_and_dtypes( + const std::filesystem::path& common_path, + const std::vector& label_num_cols, + const std::vector& label_data_types) { + + const uint64_t num_labels = label_num_cols.size(); + if (num_labels != label_data_types.size()) { + return false; + } + std::filesystem::path file_path(common_path / label_metadata_filename); + FileType file = fopen_write_wrapper(file_path); + if (file == INVALID_FILE) { + return false; + } + size_t num_bytes_written = fwrite_wrapper(&num_labels, sizeof(num_labels), file); + num_bytes_written += fwrite_wrapper(label_num_cols.data(), sizeof(label_num_cols[0])*num_labels, file); + num_bytes_written += fwrite_wrapper(label_data_types.data(), sizeof(label_data_types[0])*num_labels, file); + fclose_wrapper(file); + if (num_bytes_written != sizeof(num_labels) + (sizeof(label_num_cols[0]) + sizeof(label_data_types[0]))*num_labels) { + return false; + } + return true; +} + +std::tuple< + std::vector, + std::vector +> load_num_cols_and_dtypes( + const std::string& processed_graph_data_path, + const std::string& data_hash) { + + std::vector label_num_cols; + std::vector label_data_types; + std::filesystem::path file_path( + std::filesystem::path(processed_graph_data_path) / data_hash / label_metadata_filename + ); + FileType file = fopen_read_wrapper(file_path); + if (file == INVALID_FILE) { + return std::make_tuple(std::move(label_num_cols), std::move(label_data_types)); + } + uint64_t num_labels = 0; + size_t num_bytes_read = fread_wrapper(&num_labels, sizeof(num_labels), file); + // Trying to allocate 2^60 would fail, unless it overflows and then crashes + if (num_bytes_read != sizeof(num_labels) || num_labels == 0 || num_labels >= (uint64_t(1) << (64-4))) { + fclose_wrapper(file); + return std::make_tuple(std::move(label_num_cols), std::move(label_data_types)); + } + label_num_cols.resize(num_labels, 0); + num_bytes_read = fread_wrapper(label_num_cols.data(), sizeof(label_num_cols[0])*num_labels, file); + if (num_bytes_read != sizeof(label_num_cols[0])*num_labels) { + fclose_wrapper(file); + label_num_cols.resize(0); + return std::make_tuple(std::move(label_num_cols), std::move(label_data_types)); + } + label_data_types.resize(num_labels, -1); + num_bytes_read = fread_wrapper(label_data_types.data(), sizeof(label_data_types[0])*num_labels, file); + fclose_wrapper(file); + if (num_bytes_read != sizeof(label_data_types[0])*num_labels) { + label_num_cols.resize(0); + label_data_types.resize(0); + } + return std::make_tuple(std::move(label_num_cols), std::move(label_data_types)); +} + +template +bool save_array_to_file( + const std::filesystem::path& directory, + const char*const filename, + const T* data, + const uint64_t n) { + + std::filesystem::path file_path(directory / filename); + FileType file = fopen_write_wrapper(file_path); + if (file == INVALID_FILE) { + return false; + } + size_t num_bytes_written = fwrite_wrapper(&n, sizeof(n), file); + num_bytes_written += fwrite_wrapper(data, sizeof(T)*n, file); + fclose_wrapper(file); + if (num_bytes_written != sizeof(n) + sizeof(T)*n) { + return false; + } + return true; +} + + +template +[[nodiscard]] uint64_t load_array_from_file( + const std::filesystem::path& directory, + const char*const filename, + std::unique_ptr& data) { + + data.reset(nullptr); + + std::filesystem::path file_path(directory / filename); + FileType file = fopen_read_wrapper(file_path); + if (file == INVALID_FILE) { + return 0; + } + uint64_t n; + size_t num_bytes_read = fread_wrapper(&n, sizeof(n), file); + // Trying to allocate 2^60 would fail, unless it overflows and then crashes + if (num_bytes_read != sizeof(n) || n == 0 || n >= (uint64_t(1) << (64-4))) { + fclose_wrapper(file); + return 0; + } + data.reset(new T[n]); + num_bytes_read = fread_wrapper(data.get(), sizeof(T)*n, file); + fclose_wrapper(file); + if (num_bytes_read != sizeof(T)*n) { + data.reset(nullptr); + return 0; + } + return n; +} + +std::vector load_metadata_tensors( + const std::string processed_graph_data_path, + const std::string stage, + const std::string data_hash) { + + std::filesystem::path base_path{processed_graph_data_path}; + std::filesystem::path directory = base_path / (stage + "_" + data_hash); + + std::unique_ptr mol_data_offsets; + uint64_t num_mol_data_offsets = + load_array_from_file(directory, file_data_offsets_filename, mol_data_offsets); + + std::unique_ptr concatenated_smiles; + uint64_t concatenated_smiles_size = + load_array_from_file(directory, concat_smiles_filename, concatenated_smiles); + + std::unique_ptr smiles_offsets; + uint64_t num_smiles_offsets = + load_array_from_file(directory, smiles_offsets_filename, smiles_offsets); + + std::unique_ptr num_nodes; + uint64_t num_num_nodes = + load_array_from_file(directory, num_nodes_filename, num_nodes); + + std::unique_ptr num_edges; + uint64_t num_num_edges = + load_array_from_file(directory, num_edges_filename, num_edges); + + if (num_num_nodes == 0 || num_num_edges != num_num_nodes || num_smiles_offsets != (num_num_nodes+1) || + concatenated_smiles_size == 0 || concatenated_smiles_size != uint64_t(smiles_offsets[num_num_edges]) || + num_mol_data_offsets != num_num_nodes + (num_num_nodes + num_mols_per_file-1)/num_mols_per_file) { + printf("ERROR: graphium_cpp.load_metadata_tensors failed to load valid metadata files\n"); + printf(" len(file_data_offsets) is %zu\n", size_t(num_mol_data_offsets)); + printf(" len(concat_smiles) is %zu\n", size_t(concatenated_smiles_size)); + printf(" len(smiles_offsets) is %zu\n", size_t(num_smiles_offsets)); + printf(" len(num_nodes) is %zu\n", size_t(num_num_nodes)); + printf(" len(num_edges) is %zu\n", size_t(num_num_edges)); + return std::vector(); + } + + // The above conditions should ensure that none of the arrays are empty, + // but assert in debug builds just in case. + assert(mol_data_offsets && concatenated_smiles && smiles_offsets && num_nodes && num_edges); + + const int64_t data_offsets_dims[1] = { int64_t(num_mol_data_offsets) }; + at::Tensor data_offsets_tensor = torch_tensor_from_array(std::move(mol_data_offsets), data_offsets_dims, 1, c10::ScalarType::Long); + const int64_t concatenated_smiles_dims[1] = { int64_t(concatenated_smiles_size) }; + at::Tensor smiles_tensor = torch_tensor_from_array(std::move(concatenated_smiles), concatenated_smiles_dims, 1, c10::ScalarType::Char); + const int64_t smiles_offsets_dims[1] = { int64_t(num_num_nodes+1) }; + at::Tensor smiles_offsets_tensor = torch_tensor_from_array(std::move(smiles_offsets), smiles_offsets_dims, 1, c10::ScalarType::Long); + const int64_t num_nodes_dims[1] = { int64_t(num_num_nodes) }; + at::Tensor num_nodes_tensor = torch_tensor_from_array(std::move(num_nodes), num_nodes_dims, 1, c10::ScalarType::Int); + const int64_t num_edges_dims[1] = { int64_t(num_num_nodes) }; + at::Tensor num_edges_tensor = torch_tensor_from_array(std::move(num_edges), num_edges_dims, 1, c10::ScalarType::Int); + + std::vector stage_return_data({ + std::move(data_offsets_tensor), + std::move(smiles_tensor), + std::move(smiles_offsets_tensor), + std::move(num_nodes_tensor), + std::move(num_edges_tensor) + }); + return stage_return_data; +} + +std::vector load_stats( + const std::string processed_graph_data_path, + const std::string data_hash, + const std::string task_name) { + + std::filesystem::path base_path{processed_graph_data_path}; + std::filesystem::path directory = base_path / data_hash; + const std::string filename(task_name + "_stats.tmp"); + + std::unique_ptr task_stats; + uint64_t num_stat_floats = + load_array_from_file(directory, filename.c_str(), task_stats); + + if (num_stat_floats == 0 || num_stat_floats % 4 != 0) { + return std::vector(); + } + + const uint64_t num_cols = num_stat_floats / 4; + std::vector return_stats(4); + for (size_t stat_index = 0; stat_index < 4; ++stat_index) { + std::unique_ptr single_stat(new double[num_cols]); + for (size_t i = 0; i < num_cols; ++i) { + single_stat[i] = task_stats[4*i + stat_index]; + } + const int64_t stat_dims[1] = { int64_t(num_cols) }; + at::Tensor stat_tensor = torch_tensor_from_array(std::move(single_stat), stat_dims, 1, c10::ScalarType::Double); + return_stats.push_back(std::move(stat_tensor)); + } + + return return_stats; +} + +// Returns: +// stage -> [ +// mol_file_data_offsets, +// unique mol smiles strings all concatenated, +// unique mol smiles string offsets (including one extra for the end), +// unique mol num_nodes, +// unique mol num_edges +// ] +// task -> 4 stats tensors each +// task index -> label num columns +// task index -> label torch data type enum +std::tuple< + std::unordered_map>, + std::unordered_map>, + std::vector, + std::vector +> prepare_and_save_data( + const pybind11::list& task_names, + pybind11::dict& task_dataset_args, + const pybind11::dict& task_label_normalization, + const std::string processed_graph_data_path, + const std::string data_hash, + const pybind11::dict& task_train_indices, + const pybind11::dict& task_val_indices, + const pybind11::dict& task_test_indices, + bool add_self_loop, + bool explicit_H) { + + ensure_numpy_array_module_initialized(); + + std::filesystem::path base_path{processed_graph_data_path}; + std::filesystem::create_directories(base_path); + std::filesystem::path common_path(base_path / data_hash); + std::filesystem::create_directories(common_path); + constexpr size_t num_stages = 3; + // NOTE: Computing stats below depends on that "train" is stage 0. + std::string stages[num_stages] = { + std::string("train"), + std::string("val"), + std::string("test") + }; + std::filesystem::path stage_paths[num_stages] = { + base_path / (stages[0] + "_" + data_hash), + base_path / (stages[1] + "_" + data_hash), + base_path / (stages[2] + "_" + data_hash) + }; + std::filesystem::create_directories(stage_paths[0]); + std::filesystem::create_directories(stage_paths[1]); + std::filesystem::create_directories(stage_paths[2]); + const pybind11::dict* stage_task_indices[num_stages] = { + &task_train_indices, + &task_val_indices, + &task_test_indices + }; + + const size_t num_tasks = task_names.size(); + std::vector return_label_num_cols(num_tasks, 0); + std::vector return_label_data_types(num_tasks, -1); + size_t total_num_cols = 0; + std::unique_ptr task_col_starts(new size_t[num_tasks+1]); + std::unique_ptr task_bytes_per_float(new size_t[num_tasks]); + std::unique_ptr task_normalization_options(new NormalizationOptions[num_tasks]); + std::unique_ptr smiles_numpy_arrays(new PyArrayObject*[num_tasks]); + std::unique_ptr labels_numpy_arrays(new PyArrayObject*[num_tasks]); + std::unique_ptr label_offsets_numpy_arrays(new PyArrayObject*[num_tasks]); + // Figure out the task bounds first, so that everything can be parallelized perfectly. + size_t task_index = 0; + for (const auto& task : task_names) { + const size_t current_task_index = task_index; + task_col_starts[current_task_index] = total_num_cols; + task_bytes_per_float[current_task_index] = 0; + smiles_numpy_arrays[current_task_index] = nullptr; + labels_numpy_arrays[current_task_index] = nullptr; + label_offsets_numpy_arrays[current_task_index] = nullptr; + ++task_index; + if (!pybind11::isinstance(task)) { + continue; + } + const std::string task_name{ pybind11::str(task) }; + pybind11::handle task_dataset_handle = pybind11::handle(PyDict_GetItemString(task_dataset_args.ptr(), task_name.c_str())); + if (!task_dataset_handle || !pybind11::isinstance(task_dataset_handle)) { + continue; + } + pybind11::dict dataset_dict = task_dataset_handle.cast(); + pybind11::handle smiles_handle = pybind11::handle(PyDict_GetItemString(dataset_dict.ptr(), "smiles")); + pybind11::handle labels_handle = pybind11::handle(PyDict_GetItemString(dataset_dict.ptr(), "labels")); + pybind11::handle label_offsets_handle = pybind11::handle(PyDict_GetItemString(dataset_dict.ptr(), "label_offsets")); + if (!smiles_handle || !labels_handle) { + continue; + } + PyObject* smiles_obj_ptr = smiles_handle.ptr(); + PyObject* labels_obj_ptr = labels_handle.ptr(); + PyObject* label_offsets_obj_ptr = label_offsets_handle.ptr(); + const bool is_labels_numpy = PyArray_Check(labels_obj_ptr); + const bool is_labels_multi_row = label_offsets_obj_ptr && PyArray_Check(label_offsets_obj_ptr); + if (!PyArray_Check(smiles_obj_ptr) || !is_labels_numpy) { + continue; + } + PyArrayObject* smiles_numpy_array = reinterpret_cast(smiles_obj_ptr); + int smiles_type_num = PyArray_TYPE(smiles_numpy_array); + int smiles_ndims = PyArray_NDIM(smiles_numpy_array); + if (smiles_type_num != NPY_OBJECT || smiles_ndims != 1) { + continue; + } + intptr_t num_smiles = PyArray_DIM(smiles_numpy_array, 0); + if (num_smiles <= 0) { + continue; + } + + PyArrayObject* labels_numpy_array = reinterpret_cast(labels_obj_ptr); + PyArrayObject* label_offsets_numpy_array = is_labels_multi_row ? reinterpret_cast(label_offsets_obj_ptr) : nullptr; + int labels_type_num = PyArray_TYPE(labels_numpy_array); + int labels_ndims = PyArray_NDIM(labels_numpy_array); +#if GRAPHIUM_CPP_DEBUGGING + printf("\"%s\" labels numpy type %d, %d dims\n", task_name.c_str(), labels_type_num, labels_ndims); +#endif + if (!is_supported_numpy_type(labels_type_num) || labels_ndims != 2) { + continue; + } + if (is_labels_multi_row) { + int label_offsets_type_num = PyArray_TYPE(label_offsets_numpy_array); + int label_offsets_ndims = PyArray_NDIM(label_offsets_numpy_array); + // Only int64 is supported, for simplicity + if (label_offsets_type_num != NPY_INT64 || label_offsets_ndims != 1) { + continue; + } + } + intptr_t num_label_rows = PyArray_DIM(labels_numpy_array, 0); + intptr_t num_molecules = num_label_rows; + if (is_labels_multi_row) { + intptr_t num_offsets_rows = PyArray_DIM(label_offsets_numpy_array, 0); + if (num_offsets_rows == 0) { + continue; + } + // -1 is because last offset is the end offset + num_molecules = num_offsets_rows - 1; + + // Verify that the first offset is zero + if (*(const int64_t*)PyArray_GETPTR1(label_offsets_numpy_array, 0) != 0) { + continue; + } + // Verify that the last offset is the end offset + if (*(const int64_t*)PyArray_GETPTR1(label_offsets_numpy_array, num_molecules) != num_label_rows) { + continue; + } + } + intptr_t num_label_cols = PyArray_DIM(labels_numpy_array, 1); +#if GRAPHIUM_CPP_DEBUGGING + printf("\"%s\" labels[%zd][%zd] (%zd molecules)\n", task_name.c_str(), num_label_rows, num_label_cols, num_molecules); +#endif + if (num_smiles != num_molecules || num_label_cols < 0) { + continue; + } + + const size_t supported_type_index = numpy_type_index(labels_type_num); + const size_t bytes_per_float = supported_types[supported_type_index].size; + labels_numpy_arrays[current_task_index] = labels_numpy_array; + label_offsets_numpy_arrays[current_task_index] = is_labels_multi_row ? label_offsets_numpy_array : nullptr; + return_label_num_cols[current_task_index] = num_label_cols; + return_label_data_types[current_task_index] = int(supported_types[supported_type_index].torch_type); + total_num_cols += size_t(num_label_cols); + task_bytes_per_float[current_task_index] = bytes_per_float; + + smiles_numpy_arrays[current_task_index] = smiles_numpy_array; + + pybind11::handle task_normalization_handle = pybind11::handle(PyDict_GetItemString(task_label_normalization.ptr(), task_name.c_str())); + if (!task_normalization_handle || !pybind11::isinstance(task_normalization_handle)) { + continue; + } + pybind11::dict normalization_dict = task_normalization_handle.cast(); + pybind11::handle method_handle = pybind11::handle(PyDict_GetItemString(normalization_dict.ptr(), "method")); + pybind11::handle min_handle = pybind11::handle(PyDict_GetItemString(normalization_dict.ptr(), "min_clipping")); + pybind11::handle max_handle = pybind11::handle(PyDict_GetItemString(normalization_dict.ptr(), "max_clipping")); + if (method_handle && pybind11::isinstance(method_handle)) { + std::string method{pybind11::str(method_handle)}; + if (strcmp(method.c_str(), "normal") == 0) { + task_normalization_options[current_task_index].method = NormalizationMethod::NORMAL; + } + else if (strcmp(method.c_str(), "unit") == 0) { + task_normalization_options[current_task_index].method = NormalizationMethod::UNIT; + } + } + if (min_handle && pybind11::isinstance(min_handle)) { + task_normalization_options[current_task_index].min_clipping = double(int64_t(min_handle.cast())); + } + else if (min_handle && pybind11::isinstance(min_handle)) { + task_normalization_options[current_task_index].min_clipping = double(min_handle.cast()); + } + if (max_handle && pybind11::isinstance(max_handle)) { + task_normalization_options[current_task_index].max_clipping = double(int64_t(max_handle.cast())); + } + else if (max_handle && pybind11::isinstance(max_handle)) { + task_normalization_options[current_task_index].max_clipping = double(max_handle.cast()); + } + } + task_col_starts[num_tasks] = total_num_cols; + + save_num_cols_and_dtypes(common_path, return_label_num_cols, return_label_data_types); + + // Get the total number of molecules, by stage and task + size_t total_num_mols = 0; + for (size_t stage_index = 0; stage_index < num_stages; ++stage_index) { + const pybind11::dict& task_indices_dict = *stage_task_indices[stage_index]; + + for (size_t task_index = 0; task_index < num_tasks; ++task_index) { + pybind11::handle task = task_names[task_index]; + if (!smiles_numpy_arrays[task_index]) { + continue; + } + const std::string task_name{ pybind11::str(task) }; + pybind11::handle task_indices_handle = pybind11::handle(PyDict_GetItemString(task_indices_dict.ptr(), task_name.c_str())); + if (!task_indices_handle || !pybind11::isinstance(task_indices_handle)) { + printf("Error: Task %s indices list isn't valid.\n", task_name.c_str()); + continue; + } + const pybind11::list task_indices_list = task_indices_handle.cast(); + const size_t current_num_mols = task_indices_list.size(); + if (current_num_mols == 0) { + printf("Error: Task %s indices list is empty.\n", task_name.c_str()); + } + total_num_mols += current_num_mols; + } + } + + // Get the mol indices for all stages and tasks + std::vector task_mol_indices; + task_mol_indices.reserve(total_num_mols); + std::vector task_mol_start(num_stages*num_tasks + 1); + // Unfortunately, reading strings from a numpy array isn't threadsafe, + // so we have to do that single-threaded first, too. + std::vector smiles_strings; + smiles_strings.reserve(total_num_mols); + for (size_t stage_index = 0; stage_index < num_stages; ++stage_index) { + const pybind11::dict& task_indices_dict = *stage_task_indices[stage_index]; + + for (size_t task_index = 0; task_index < num_tasks; ++task_index) { + // Update task_mol_start here, in case any indices aren't integers + // or any SMILES strings aren't strings below. + task_mol_start[stage_index*num_tasks + task_index] = task_mol_indices.size(); + + pybind11::handle task = task_names[task_index]; + if (!smiles_numpy_arrays[task_index]) { + continue; + } + const std::string task_name{ pybind11::str(task) }; + pybind11::handle task_indices_handle = pybind11::handle(PyDict_GetItemString(task_indices_dict.ptr(), task_name.c_str())); + if (!task_indices_handle || !pybind11::isinstance(task_indices_handle)) { + continue; + } + + const pybind11::list task_indices_list = task_indices_handle.cast(); + const size_t current_num_mols = task_indices_list.size(); + + PyArrayObject*const smiles_numpy_array = smiles_numpy_arrays[task_index]; + const size_t smiles_array_size = PyArray_DIM(smiles_numpy_array, 0); + + for (size_t indices_index = 0; indices_index < current_num_mols; ++indices_index) { + const auto list_item = task_indices_list[indices_index]; + if (!pybind11::isinstance(list_item)) { + continue; + } + + size_t task_mol_index = size_t(list_item.cast()); + if (task_mol_index >= smiles_array_size) { + continue; + } + + pybind11::handle single_smiles_handle(*(PyObject**)PyArray_GETPTR1(smiles_numpy_array, task_mol_index)); + if (!pybind11::isinstance(single_smiles_handle)) { + continue; + } + + task_mol_indices.push_back(task_mol_index); + smiles_strings.push_back(std::string(pybind11::str(single_smiles_handle))); + } + + } + } + total_num_mols = task_mol_indices.size(); + task_mol_start[num_stages*num_tasks] = total_num_mols; + + struct MolKey { + uint64_t id0; + uint64_t id1; + uint32_t num_nodes; + uint32_t num_edges; + uint64_t task_index; + uint64_t task_mol_index; + uint64_t mol_index; + + bool operator<(const MolKey& other) const { + if (id0 != other.id0) { + return (id0 < other.id0); + } + if (id1 != other.id1) { + return (id1 < other.id1); + } + if (num_nodes != other.num_nodes) { + return (num_nodes < other.num_nodes); + } + if (num_edges != other.num_edges) { + return (num_edges < other.num_edges); + } + if (task_index != other.task_index) { + return (task_index < other.task_index); + } + return (task_mol_index < other.task_mol_index); + } + + // This is used for identifying keys of molecules with invalid SMILES strings. + // They show up as having no nodes, no edges, and ID 0. + bool isInvalid() const { + return id0 == 0 && id1 == 0 && num_nodes == 0 && num_edges == 0; + } + }; + + // Compute all InChI keys for all molecules, in parallel if applicable. + std::unique_ptr keys(new MolKey[total_num_mols]); + const size_t num_mols_per_block = 512; + const size_t num_blocks = (total_num_mols + num_mols_per_block-1) / num_mols_per_block; + const size_t num_processors = std::thread::hardware_concurrency(); + const size_t num_threads = (num_processors == 1 || num_blocks <= 4) ? 1 : std::min(num_processors, num_blocks/2); + auto&& get_single_mol_key = [&task_mol_start,add_self_loop,explicit_H,&task_mol_indices,&smiles_strings,num_tasks](size_t mol_index) -> MolKey { + // Find which task this mol is in. If there could be many tasks, + // this could be a binary search, but for small numbers of tasks, + // a linear search is fine. + size_t task_index = 0; + while (task_mol_start[task_index+1] <= mol_index) { + ++task_index; + } + const size_t task_mol_index = task_mol_indices[mol_index]; + + const std::string& smiles_str = smiles_strings[mol_index]; + MolBriefData mol_data = smiles_to_brief_data(smiles_str, add_self_loop, explicit_H); + + return MolKey{mol_data.unique_id[0], mol_data.unique_id[1], mol_data.num_nodes, mol_data.num_edges, task_index % num_tasks, task_mol_index, mol_index}; + }; + if (num_threads == 1) { + for (size_t mol_index = 0; mol_index < total_num_mols; ++mol_index) { + keys[mol_index] = get_single_mol_key(mol_index); + } + } + else { + std::atomic next_block_index(0); + auto&& thread_functor = [&keys,&next_block_index,num_blocks,num_mols_per_block,total_num_mols,&get_single_mol_key]() { + while (true) { + const size_t block_index = next_block_index.fetch_add(1); + if (block_index >= num_blocks) { + return; + } + const size_t begin_index = block_index * num_mols_per_block; + const size_t end_index = std::min((block_index+1) * num_mols_per_block, total_num_mols); + for (size_t mol_index = begin_index; mol_index < end_index; ++mol_index) { + keys[mol_index] = get_single_mol_key(mol_index); + } + } + }; + std::vector threads; + for (size_t thread_index = 0; thread_index < num_threads; ++thread_index) { + threads.push_back(std::thread(thread_functor)); + } + for (size_t thread_index = 0; thread_index < num_threads; ++thread_index) { + threads[thread_index].join(); + } + } + + // Compute stats on the train stage only (stage 0), like how the python code did it. + // Normalization will be applied to all stages later. + // TODO: Does it matter that stats calculations will include all copies of molecules + // that occur multiple times in the same dataset? + constexpr size_t stat_min_offset = 0; + constexpr size_t stat_max_offset = 1; + constexpr size_t stat_mean_offset = 2; + constexpr size_t stat_std_offset = 3; + constexpr size_t num_stats = 4; + size_t stats_floats = num_stats*total_num_cols; + std::unique_ptr all_task_stats(new double[stats_floats]); + std::unique_ptr all_task_num_non_nan(new intptr_t[total_num_cols]); + for (size_t task_index = 0; task_index < num_tasks; ++task_index) { + const size_t task_num_mols = task_mol_start[task_index+1] - task_mol_start[task_index]; + const size_t task_first_col = task_col_starts[task_index]; + const size_t task_num_cols = task_col_starts[task_index+1] - task_first_col; + if (task_num_mols == 0 || task_num_cols == 0) { + continue; + } + // Initialize stats for accumulation + double*const task_stats = all_task_stats.get() + num_stats*task_first_col; + intptr_t*const task_num_non_nan = all_task_num_non_nan.get() + task_first_col; + for (size_t task_col_index = 0; task_col_index < task_num_cols; ++task_col_index) { + task_stats[num_stats*task_col_index + stat_min_offset] = std::numeric_limits::infinity(); + task_stats[num_stats*task_col_index + stat_max_offset] = -std::numeric_limits::infinity(); + task_stats[num_stats*task_col_index + stat_mean_offset] = 0.0; + task_stats[num_stats*task_col_index + stat_std_offset] = 0.0; + task_num_non_nan[task_col_index] = 0; + } + + const size_t bytes_per_float = task_bytes_per_float[task_index]; + + auto&& update_stats_single_row = [task_stats, task_num_non_nan](const char* col_data, const size_t task_num_cols, const size_t bytes_per_float, const intptr_t col_stride) { + double* stats = task_stats; + intptr_t* num_non_nan = task_num_non_nan; + for (size_t task_col_index = 0; task_col_index < task_num_cols; ++task_col_index, col_data += col_stride, stats += num_stats, ++num_non_nan) { + // TODO: Move the type check outside the loop if it's a bottleneck + double value; + if (bytes_per_float == sizeof(double)) { + value = *(const double*)(col_data); + } + else if (bytes_per_float == sizeof(float)) { + value = *(const float*)(col_data); + } + else { + assert(bytes_per_float == sizeof(uint16_t)); + value = c10::detail::fp16_ieee_to_fp32_value(*(const uint16_t*)(col_data)); + } + if (value != value) { + // NaN value, so skip it + continue; + } + stats[stat_min_offset] = std::min(stats[stat_min_offset], value); + stats[stat_max_offset] = std::max(stats[stat_max_offset], value); + stats[stat_mean_offset] += value; + // TODO: If summing the squares isn't accurate enough for computing the variance, + // consider other approaches. + stats[stat_std_offset] += value*value; + ++(*num_non_nan); + } + }; + + PyArrayObject*const labels_numpy_array = labels_numpy_arrays[task_index]; + if (labels_numpy_array != nullptr) { + const char* raw_data = (const char*)PyArray_DATA(labels_numpy_array); + const intptr_t* strides = PyArray_STRIDES(labels_numpy_array); + const intptr_t num_label_rows = PyArray_DIM(labels_numpy_array, 0); + PyArrayObject*const label_offsets_numpy_array = label_offsets_numpy_arrays[task_index]; + const char* offsets_raw_data = label_offsets_numpy_array ? (const char*)PyArray_DATA(label_offsets_numpy_array) : nullptr; + const intptr_t offsets_stride = label_offsets_numpy_array ? PyArray_STRIDES(label_offsets_numpy_array)[0] : 0; + // The -1 is because there's an extra entry at the end for the end offset. + const intptr_t num_mols = label_offsets_numpy_array ? PyArray_DIM(label_offsets_numpy_array, 0) - 1 : num_label_rows; + // The normalization is computed on the subsample being kept + for (size_t task_key_index = 0; task_key_index < task_num_mols; ++task_key_index) { + const size_t task_mol_index = keys[task_mol_start[task_index] + task_key_index].task_mol_index; + if (task_mol_index >= size_t(num_mols)) { + printf("Error: In task %zu, mol index %zu is past limit of %zu\n", size_t(task_index), task_mol_index, size_t(num_mols)); + continue; + } + if (offsets_raw_data == nullptr) { + const char* row_data = raw_data + strides[0]*task_mol_index; + update_stats_single_row(row_data, task_num_cols, bytes_per_float, strides[1]); + } + else { + size_t begin_offset = *reinterpret_cast(offsets_raw_data + offsets_stride*task_mol_index); + size_t end_offset = *reinterpret_cast(offsets_raw_data + offsets_stride*(task_mol_index+1)); + const char* row_data = raw_data + strides[0]*begin_offset; + for (size_t row = begin_offset; row < end_offset; ++row, row_data += strides[0]) { + update_stats_single_row(row_data, task_num_cols, bytes_per_float, strides[1]); + } + } + } + } + +#if GRAPHIUM_CPP_DEBUGGING + printf("Task %zu normalization method %zu\n", size_t(task_index), size_t(task_normalization_options[task_index].method)); + for (size_t task_col_index = 0; task_col_index < task_num_cols; ++task_col_index) { + printf("Task %zu col %zu, num non-nan = %zu, min = %e, max = %e\n", + size_t(task_index), task_col_index, + size_t(task_num_non_nan[task_col_index]), + task_stats[num_stats*task_col_index + stat_min_offset], + task_stats[num_stats*task_col_index + stat_max_offset]); + } +#endif + } + + std::unordered_map> all_stats_return_data; + + for (size_t task_index = 0; task_index < num_tasks; ++task_index) { + const size_t task_first_col = task_col_starts[task_index]; + const size_t task_num_cols = task_col_starts[task_index+1] - task_first_col; + if (task_num_cols == 0) { + continue; + } + + // Finish accumulation + double*const task_stats = all_task_stats.get() + num_stats*task_first_col; + intptr_t*const task_num_non_nan = all_task_num_non_nan.get() + task_first_col; + for (size_t task_col_index = 0; task_col_index < task_num_cols; ++task_col_index) { + if (task_num_non_nan[task_col_index] == 0) { + task_stats[num_stats*task_col_index + stat_min_offset] = std::numeric_limits::quiet_NaN(); + task_stats[num_stats*task_col_index + stat_max_offset] = std::numeric_limits::quiet_NaN(); + task_stats[num_stats*task_col_index + stat_mean_offset] = std::numeric_limits::quiet_NaN(); + task_stats[num_stats*task_col_index + stat_std_offset] = std::numeric_limits::quiet_NaN(); + } + else { + if (task_normalization_options[task_index].min_clipping > task_stats[num_stats*task_col_index + stat_min_offset]) { + task_stats[num_stats*task_col_index + stat_min_offset] = task_normalization_options[task_index].min_clipping; + } + if (task_normalization_options[task_index].max_clipping < task_stats[num_stats*task_col_index + stat_max_offset]) { + task_stats[num_stats*task_col_index + stat_max_offset] = task_normalization_options[task_index].max_clipping; + } + const double n = double(task_num_non_nan[task_col_index]); + const double mean = task_stats[num_stats*task_col_index + stat_mean_offset] / n; + task_stats[num_stats*task_col_index + stat_mean_offset] = mean; + // sum((x[i] - m)^2)/(n-1) + // = sum(x[i]^2 -2mx[i] + m^2)/(n-1) + // = (sum(x[i]^2) - 2nm^2 + nm^2)/(n-1) + // = (sum(x[i]^2) - nm^2)/(n-1) + // except, for compatibility with numpy.nanstd, use n instead of n-1 + const double sum_sqaures = task_stats[num_stats*task_col_index + stat_std_offset]; + const double stdev = std::sqrt((sum_sqaures - n*mean*mean)/n); + task_stats[num_stats*task_col_index + stat_std_offset] = stdev; + } + } + + const std::string task_name{ pybind11::str(task_names[task_index]) }; +#if GRAPHIUM_CPP_DEBUGGING + for (size_t task_col_index = 0; task_col_index < task_num_cols; ++task_col_index) { + printf("%s %zu %lld %e %e %e %e\n", + task_name.c_str(), task_col_index, (long long)task_num_non_nan[task_col_index], + task_stats[num_stats*task_col_index + stat_min_offset], + task_stats[num_stats*task_col_index + stat_max_offset], + task_stats[num_stats*task_col_index + stat_mean_offset], + task_stats[num_stats*task_col_index + stat_std_offset]); + } +#endif + const std::string stats_filename = task_name + "_stats.tmp"; + save_array_to_file(common_path, stats_filename.c_str(), task_stats, num_stats*task_num_cols); + + // Make copies for returning in a format similar to the load_stats function. + std::vector task_stats_out; + for (size_t stat_index = 0; stat_index < num_stats; ++stat_index) { + const int64_t task_stats_dims[1] = { int64_t(task_num_cols) }; + std::unique_ptr task_stats_copy(new double[task_num_cols]); + for (size_t task_col_index = 0; task_col_index < task_num_cols; ++task_col_index) { + task_stats_copy[task_col_index] = task_stats[num_stats*task_col_index + stat_index]; + } + at::Tensor task_stats_tensor = torch_tensor_from_array(std::move(task_stats_copy), task_stats_dims, 1, c10::ScalarType::Double); + task_stats_out.push_back(std::move(task_stats_tensor)); + } + all_stats_return_data.insert(std::make_pair(std::move(task_name), std::move(task_stats_out))); + } + + // Sort train, val, and test separately, since they need to be stored separately. + // Don't sort until after accumulating stats, because the code above currently assumes that the tasks + // aren't interleaved. + std::sort(keys.get(), keys.get() + task_mol_start[num_tasks]); + std::sort(keys.get() + task_mol_start[num_tasks], keys.get() + task_mol_start[2*num_tasks]); + std::sort(keys.get() + task_mol_start[2*num_tasks], keys.get() + total_num_mols); + + // mol_data_offsets will only need one entry for each unique molecule, + // but we can preallocate an upper bound. + std::vector mol_data_offsets; + mol_data_offsets.reserve(task_mol_start[num_tasks]); + // temp_data is used for normalization + std::vector temp_data; + temp_data.reserve(total_num_cols*sizeof(double)); + std::vector data; + data.reserve(num_mols_per_file*(total_num_cols*sizeof(double) + (1+2*num_tasks)*sizeof(uint64_t))); + + std::unordered_map> per_stage_return_data; + + for (size_t stage_index = 0; stage_index < num_stages; ++stage_index) { + mol_data_offsets.resize(0); + assert(data.size() == 0); + size_t concatenated_smiles_size = 0; + uint64_t num_unique_mols = 0; + const size_t stage_begin_index = task_mol_start[stage_index*num_tasks]; + const size_t stage_end_index = task_mol_start[(stage_index+1)*num_tasks]; + for (size_t sorted_index = stage_begin_index; sorted_index < stage_end_index; ) { + if (keys[sorted_index].isInvalid()) { + ++sorted_index; + continue; + } + size_t data_offset = data.size(); + mol_data_offsets.push_back(data_offset); + const size_t first_sorted_index = sorted_index; + const uint64_t id0 = keys[sorted_index].id0; + const uint64_t id1 = keys[sorted_index].id1; + + // Add the length of the smiles string to the total length, + // and include the terminating zero + const size_t smiles_length = smiles_strings[keys[sorted_index].mol_index].size(); + concatenated_smiles_size += (smiles_length+1); + + uint64_t prev_task_index = keys[sorted_index].task_index; + uint64_t mol_num_tasks = 1; + ++sorted_index; + while (sorted_index < stage_end_index && keys[sorted_index].id0 == id0 && keys[sorted_index].id1 == id1) { + // The same molecule can occur multiple times in a single dataset, + // but we only want to keep one copy for each task. + if (keys[sorted_index].task_index != prev_task_index) { + ++mol_num_tasks; + prev_task_index = keys[sorted_index].task_index; + } + ++sorted_index; + } + assert(mol_num_tasks <= num_tasks); + + // TODO: Double data capacity as needed if resizing is slow + assert(data.size() == data_offset); + data.resize(data_offset + sizeof(uint64_t)*(1+2*mol_num_tasks)); + + // Copy in the number of tasks for this molecule, followed by a list of the task indices and their end offsets. + memcpy(data.data() + data_offset, &mol_num_tasks, sizeof(uint64_t)); + data_offset += sizeof(uint64_t); + uint64_t task_offset = 0; + // Start with an invalid prev_task_index to pick up the first task + prev_task_index = uint64_t(int64_t(-1)); + for (size_t i = first_sorted_index; i < sorted_index; ++i) { + const uint64_t task_index = keys[i].task_index; + // The same molecule can occur multiple times in a single dataset, + // but we only want to keep one copy for each task. + if (task_index == prev_task_index) { + continue; + } + prev_task_index = task_index; + size_t num_cols = task_col_starts[task_index+1] - task_col_starts[task_index]; + PyArrayObject*const label_offsets_numpy_array = label_offsets_numpy_arrays[task_index]; + if (label_offsets_numpy_array != nullptr) { + const size_t task_mol_index = keys[i].task_mol_index; + const char* offsets_raw_data = (const char*)PyArray_DATA(label_offsets_numpy_array); + const intptr_t offsets_stride = PyArray_STRIDES(label_offsets_numpy_array)[0]; + const int64_t begin_offset = *reinterpret_cast(offsets_raw_data + offsets_stride*task_mol_index); + const int64_t end_offset = *reinterpret_cast(offsets_raw_data + offsets_stride*(task_mol_index+1)); + const size_t current_rows = size_t(end_offset - begin_offset); + num_cols *= current_rows; + } + task_offset += task_bytes_per_float[task_index]*num_cols; + memcpy(data.data() + data_offset, &task_index, sizeof(uint64_t)); + data_offset += sizeof(uint64_t); + memcpy(data.data() + data_offset, &task_offset, sizeof(uint64_t)); + data_offset += sizeof(uint64_t); + } + + // TODO: Double data capacity as needed if resizing is slow + assert(data.size() == data_offset); + data.resize(data_offset + task_offset); + + auto&& store_single_row = [&data_offset, &data, &temp_data]( + const char* col_data, + const size_t task_num_cols, + const intptr_t col_stride, + const size_t in_bytes_per_float, + const size_t out_bytes_per_float, + const NormalizationMethod normalization_method, + const double* task_stats) { + + if (size_t(col_stride) == in_bytes_per_float) { + memcpy(temp_data.data(), col_data, in_bytes_per_float*task_num_cols); + } + else { + for (size_t col = 0; col < task_num_cols; ++col) { + memcpy(temp_data.data() + col*in_bytes_per_float, col_data, in_bytes_per_float); + col_data += col_stride; + } + } + for (size_t col = 0; col < task_num_cols; ++col) { + double value; + if (in_bytes_per_float == sizeof(double)) { + value = ((const double*)(temp_data.data()))[col]; + } + else if (in_bytes_per_float == sizeof(float)) { + value = ((const float*)(temp_data.data()))[col]; + } + else { + assert(in_bytes_per_float == sizeof(uint16_t)); + value = c10::detail::fp16_ieee_to_fp32_value(((const uint16_t*)(temp_data.data()))[col]); + } + value = std::max(value, task_stats[stat_min_offset]); + value = std::min(value, task_stats[stat_max_offset]); + if (normalization_method == NormalizationMethod::NORMAL) { + if (task_stats[stat_std_offset] != 0) { + value = (value - task_stats[stat_mean_offset])/task_stats[stat_std_offset]; + } + else { + value = 0; + } + } + else if (normalization_method == NormalizationMethod::UNIT) { + // TODO: Cache 1/(max-min) or 0 to avoid check + if (task_stats[stat_max_offset] - task_stats[stat_min_offset] != 0) { + value = (value - task_stats[stat_min_offset])/(task_stats[stat_max_offset] - task_stats[stat_min_offset]); + } + else { + value = 0; + } + } + + // NOTE: The code below writes to temp_data, which is still being read from above, + // so this relies on that we're not writing to a larger data type than we're reading, + // else we'll overwrite data. + assert(out_bytes_per_float <= in_bytes_per_float); + if (out_bytes_per_float == sizeof(double)) { + ((double*)(temp_data.data()))[col] = value; + } + else if (out_bytes_per_float == sizeof(float)) { + ((float*)(temp_data.data()))[col] = float(value); + } + else { + assert(out_bytes_per_float == sizeof(uint16_t)); + ((uint16_t*)(temp_data.data()))[col] = c10::detail::fp16_ieee_from_fp32_value(value); + } + task_stats += num_stats; + } + + memcpy(data.data() + data_offset, temp_data.data(), out_bytes_per_float*task_num_cols); + data_offset += out_bytes_per_float*task_num_cols; + }; + + // Copy in the task data, with optional normalization + // Start with an invalid prev_task_index to pick up the first task + prev_task_index = uint64_t(int64_t(-1)); + for (size_t i = first_sorted_index; i < sorted_index; ++i) { + const uint64_t task_index = keys[i].task_index; + // The same molecule can occur multiple times in a single dataset, + // but we only want to keep one copy for each task. + if (task_index == prev_task_index) { + continue; + } + prev_task_index = task_index; + + const uint64_t task_mol_index = keys[i].task_mol_index; + + const size_t task_first_col = task_col_starts[task_index]; + const size_t task_num_cols = task_col_starts[task_index+1] - task_first_col; + const NormalizationOptions& normalization = task_normalization_options[task_index]; + const double* task_stats = all_task_stats.get() + num_stats*task_first_col; + + const size_t bytes_per_float = task_bytes_per_float[task_index]; + + PyArrayObject*const labels_numpy_array = labels_numpy_arrays[task_index]; + if (labels_numpy_array != nullptr) { + const char* raw_data = (const char*)PyArray_DATA(labels_numpy_array); + const intptr_t* strides = PyArray_STRIDES(labels_numpy_array); + PyArrayObject*const label_offsets_numpy_array = label_offsets_numpy_arrays[task_index]; + const char* offsets_raw_data = label_offsets_numpy_array ? (const char*)PyArray_DATA(label_offsets_numpy_array) : nullptr; + const intptr_t offsets_stride = label_offsets_numpy_array ? PyArray_STRIDES(label_offsets_numpy_array)[0] : 0; + if (offsets_raw_data == nullptr) { + const char* row_data = raw_data + strides[0]*task_mol_index; + store_single_row(row_data, task_num_cols, strides[1], bytes_per_float, bytes_per_float, normalization.method, task_stats); + } + else { + size_t begin_offset = *reinterpret_cast(offsets_raw_data + offsets_stride*task_mol_index); + size_t end_offset = *reinterpret_cast(offsets_raw_data + offsets_stride*(task_mol_index+1)); + const char* row_data = raw_data + strides[0]*begin_offset; + for (size_t row = begin_offset; row < end_offset; ++row, row_data += strides[0]) { + store_single_row(row_data, task_num_cols, strides[1], bytes_per_float, bytes_per_float, normalization.method, task_stats); + } + } + } + } + + ++num_unique_mols; + if (num_unique_mols % num_mols_per_file == 0 || sorted_index == stage_end_index) { + // Write out the data to a file + + // First, construct the filename + char filename[20+4+1]; + size_t file_num = ((num_unique_mols-1) / num_mols_per_file); + get_mol_label_filename(filename, file_num); + + std::filesystem::path file_path(stage_paths[stage_index] / filename); + FileType file = fopen_write_wrapper(file_path); + if (file == INVALID_FILE) { + return std::make_tuple( + std::move(per_stage_return_data), + std::move(all_stats_return_data), + std::move(return_label_num_cols), + std::move(return_label_data_types)); + } +#if GRAPHIUM_CPP_DEBUGGING + printf("Writing file %s\n", file_path.string().c_str()); +#endif + size_t num_bytes_written = fwrite_wrapper(data.data(), data_offset, file); + fclose_wrapper(file); + if (num_bytes_written != data_offset) { + return std::make_tuple( + std::move(per_stage_return_data), + std::move(all_stats_return_data), + std::move(return_label_num_cols), + std::move(return_label_data_types)); + } + data.resize(0); + + // One extra data offset to mark the end of each file. + // data_offset is automatically reset to 0 on the next iteration + // due to data.size() being 0 now. + mol_data_offsets.push_back(data_offset); + } + } + + // Write out the molecule data offsets to a separate file, + // so that only one file read is needed per molecule when data loading + // if the offsets are all loaded once and kept in memory. + // Note the one extra entry per file. +#if GRAPHIUM_CPP_DEBUGGING + printf("Stage %s has %zu unique mols from %zu original\n", stages[stage_index].c_str(), size_t(num_unique_mols), size_t(stage_end_index - stage_begin_index)); +#endif + assert(mol_data_offsets.size() == num_unique_mols + (num_unique_mols + num_mols_per_file-1)/num_mols_per_file); + std::filesystem::path file_path(stage_paths[stage_index] / "mol_offsets.tmp"); + FileType file = fopen_write_wrapper(file_path); + if (file == INVALID_FILE) { + return std::make_tuple( + std::move(per_stage_return_data), + std::move(all_stats_return_data), + std::move(return_label_num_cols), + std::move(return_label_data_types)); + } + size_t num_bytes_written = fwrite_wrapper(&num_unique_mols, sizeof(num_unique_mols), file); + if (num_bytes_written != sizeof(num_unique_mols)) { + fclose_wrapper(file); + return std::make_tuple( + std::move(per_stage_return_data), + std::move(all_stats_return_data), + std::move(return_label_num_cols), + std::move(return_label_data_types)); + } + size_t num_offsets = mol_data_offsets.size(); + size_t data_offsets_size = num_offsets*sizeof(mol_data_offsets[0]); + num_bytes_written = fwrite_wrapper(mol_data_offsets.data(), data_offsets_size, file); + fclose_wrapper(file); + if (num_bytes_written != data_offsets_size) { + return std::make_tuple( + std::move(per_stage_return_data), + std::move(all_stats_return_data), + std::move(return_label_num_cols), + std::move(return_label_data_types)); + } + + static_assert(sizeof(int64_t) == sizeof(mol_data_offsets[0])); + save_array_to_file(stage_paths[stage_index], file_data_offsets_filename, mol_data_offsets.data(), num_offsets); + std::unique_ptr temp_data_offsets(new int64_t[num_offsets]); + memcpy(temp_data_offsets.get(), mol_data_offsets.data(), data_offsets_size); + const int64_t data_offsets_dims[1] = { int64_t(num_offsets) }; + at::Tensor data_offsets_tensor = torch_tensor_from_array(std::move(temp_data_offsets), data_offsets_dims, 1, c10::ScalarType::Long); + + std::unique_ptr concatenated_smiles(new char[concatenated_smiles_size]); + std::unique_ptr smiles_offsets(new int64_t[num_unique_mols+1]); + std::unique_ptr num_nodes(new int32_t[num_unique_mols]); + std::unique_ptr num_edges(new int32_t[num_unique_mols]); + size_t unique_index = 0; + int64_t smiles_offset = 0; + for (size_t sorted_index = stage_begin_index; sorted_index < stage_end_index; ) { + if (keys[sorted_index].isInvalid()) { + ++sorted_index; + continue; + } + smiles_offsets[unique_index] = smiles_offset; + + const uint64_t id0 = keys[sorted_index].id0; + const uint64_t id1 = keys[sorted_index].id1; + num_nodes[unique_index] = keys[sorted_index].num_nodes; + num_edges[unique_index] = keys[sorted_index].num_edges; + + // Copy the string + const std::string& smiles_string = smiles_strings[keys[sorted_index].mol_index]; + const size_t smiles_length = smiles_string.size(); + memcpy(concatenated_smiles.get() + smiles_offset, smiles_string.c_str(), smiles_length); + smiles_offset += smiles_length; + // Don't forget the terminating zero + concatenated_smiles[smiles_offset] = 0; + ++smiles_offset; + + ++unique_index; + ++sorted_index; + while (sorted_index < stage_end_index && keys[sorted_index].id0 == id0 && keys[sorted_index].id1 == id1) { + ++sorted_index; + } + } + smiles_offsets[unique_index] = smiles_offset; + + save_array_to_file(stage_paths[stage_index], concat_smiles_filename, concatenated_smiles.get(), concatenated_smiles_size); + save_array_to_file(stage_paths[stage_index], smiles_offsets_filename, smiles_offsets.get(), num_unique_mols+1); + save_array_to_file(stage_paths[stage_index], num_nodes_filename, num_nodes.get(), num_unique_mols); + save_array_to_file(stage_paths[stage_index], num_edges_filename, num_edges.get(), num_unique_mols); + + const int64_t concatenated_smiles_dims[1] = { int64_t(concatenated_smiles_size) }; + at::Tensor smiles_tensor = torch_tensor_from_array(std::move(concatenated_smiles), concatenated_smiles_dims, 1, c10::ScalarType::Char); + const int64_t smiles_offsets_dims[1] = { int64_t(num_unique_mols+1) }; + at::Tensor smiles_offsets_tensor = torch_tensor_from_array(std::move(smiles_offsets), smiles_offsets_dims, 1, c10::ScalarType::Long); + const int64_t num_nodes_dims[1] = { int64_t(num_unique_mols) }; + at::Tensor num_nodes_tensor = torch_tensor_from_array(std::move(num_nodes), num_nodes_dims, 1, c10::ScalarType::Int); + const int64_t num_edges_dims[1] = { int64_t(num_unique_mols) }; + at::Tensor num_edges_tensor = torch_tensor_from_array(std::move(num_edges), num_edges_dims, 1, c10::ScalarType::Int); + + std::vector stage_return_data({ + std::move(data_offsets_tensor), + std::move(smiles_tensor), + std::move(smiles_offsets_tensor), + std::move(num_nodes_tensor), + std::move(num_edges_tensor) + }); + per_stage_return_data.insert(std::make_pair(stages[stage_index], std::move(stage_return_data))); + mol_data_offsets.resize(0); + } + + return std::make_tuple( + std::move(per_stage_return_data), + std::move(all_stats_return_data), + std::move(return_label_num_cols), + std::move(return_label_data_types)); +} + +void load_labels_from_index( + const std::string stage_directory, + const int64_t mol_index, + const at::Tensor& mol_file_data_offsets, + const pybind11::list& label_names, + const pybind11::list& label_num_cols, + const pybind11::list& label_data_types, + pybind11::dict& labels +) { + const std::filesystem::path stage_path{stage_directory}; + if (mol_index < 0) { + printf("Error: In load_labels_from_index, mol_index = %lld\n", (long long)mol_index); + return; + } + const uint64_t file_num = uint64_t(mol_index) / num_mols_per_file; + const size_t index_into_offsets = file_num*(num_mols_per_file+1) + (uint64_t(mol_index) % num_mols_per_file); + + const size_t num_data_offsets = (mol_file_data_offsets.scalar_type() == c10::ScalarType::Long && mol_file_data_offsets.ndimension() == 1) ? mol_file_data_offsets.size(0) : 0; + if (index_into_offsets+1 >= num_data_offsets) { + printf("Error: In load_labels_from_index, mol_index = %zu, index_into_offsets = %zu, num_data_offsets = %zu\n", + size_t(mol_index), size_t(index_into_offsets), size_t(num_data_offsets)); + return; + } + // NOTE: If TensorBase::data_ptr is ever removed, change it to TensorBase::const_data_ptr. + // Some torch version being used doesn't have const_data_ptr yet. + const int64_t* const data_offsets = mol_file_data_offsets.data_ptr(); + const int64_t file_begin_offset = data_offsets[index_into_offsets]; + const int64_t file_end_offset = data_offsets[index_into_offsets+1]; + if (file_end_offset < 0 || file_end_offset-file_begin_offset < 8) { + printf("Error: In load_labels_from_index, mol_index = %zu, file_begin_offset = %lld, file_end_offset = %lld\n", + size_t(mol_index), (long long)(index_into_offsets), (long long)(num_data_offsets)); + return; + } + const size_t file_read_size = size_t(file_end_offset - file_begin_offset); + + std::unique_ptr data(new char[file_read_size]); + + { + char filename[25]; + get_mol_label_filename(filename, file_num); + + const std::filesystem::path file_path{stage_path / filename}; + FileType file = fopen_read_wrapper(file_path); + if (file == INVALID_FILE) { + printf("Error: In load_labels_from_index, failed to open \"%s\" for molecule %zu\n", + file_path.string().c_str(), size_t(mol_index)); + return; + } + int seek_failed = fseek_wrapper(file, file_begin_offset); + if (seek_failed) { + printf("Error: In load_labels_from_index, failed to seek to offset %zu in \"%s\" for molecule %zu\n", + size_t(file_begin_offset), file_path.string().c_str(), size_t(mol_index)); + fclose_wrapper(file); + return; + } + size_t num_bytes_read = fread_wrapper(data.get(), file_read_size, file); + fclose_wrapper(file); + if (num_bytes_read != file_read_size) { + printf("Error: In load_labels_from_index, read only %zu/%zu bytes from \"%s\" for molecule %zu\n", + size_t(num_bytes_read), size_t(file_read_size), file_path.string().c_str(), size_t(mol_index)); + return; + } + } + + uint64_t mol_num_tasks = 0; + memcpy(&mol_num_tasks, data.get(), sizeof(uint64_t)); + size_t data_offset = sizeof(uint64_t); + if (mol_num_tasks == 0 || mol_num_tasks > label_names.size() || file_read_size < (1+2*mol_num_tasks)*sizeof(uint64_t)) { + printf("Error: In load_labels_from_index, mol_index = %zu, mol_num_tasks = %zu, file_read_size = %zu\n", + size_t(mol_index), size_t(mol_num_tasks), size_t(file_read_size)); + return; + } + const size_t base_offset = (1+2*mol_num_tasks)*sizeof(uint64_t); + const char* base_task_data = data.get() + base_offset; + uint64_t task_offset = 0; + for (size_t data_task_index = 0; data_task_index < mol_num_tasks; ++data_task_index) { + uint64_t task_index = 0; + memcpy(&task_index, data.get() + data_offset, sizeof(uint64_t)); + data_offset += sizeof(uint64_t); + if (task_index >= label_names.size() || task_index >= label_data_types.size() || task_index >= label_num_cols.size()) { + printf("Error: In load_labels_from_index, mol_index = %zu, task_index = %zu\n", + size_t(mol_index), size_t(task_index)); + return; + } + + uint64_t task_end_offset = 0; + memcpy(&task_end_offset, data.get() + data_offset, sizeof(uint64_t)); + data_offset += sizeof(uint64_t); + if (task_end_offset < task_offset || task_end_offset > file_read_size-base_offset) { + printf("Error: In load_labels_from_index, mol_index = %zu, task_offset = %zu, task_end_offset = %zu, file_read_size = %zu, base_offset = %zu\n", + size_t(mol_index), size_t(task_offset), size_t(task_end_offset), size_t(file_read_size), size_t(base_offset)); + return; + } + + const size_t task_num_bytes = task_end_offset - task_offset; + if (!pybind11::isinstance(label_data_types[task_index]) || + !pybind11::isinstance(label_num_cols[task_index])) { + printf("Error: In load_labels_from_index, mol_index = %zu, task_index = %zu, label_data_type = \"%s\", label_num_cols = \"%s\"\n", + size_t(mol_index), size_t(task_index), + std::string(pybind11::str(label_data_types[task_index])).c_str(), + std::string(pybind11::str(label_num_cols[task_index])).c_str()); + return; + } + const c10::ScalarType torch_type = c10::ScalarType(size_t(label_data_types[task_index].cast())); + const size_t num_cols = size_t(label_num_cols[task_index].cast()); + if (num_cols == 0) { + printf("Error: In load_labels_from_index, mol_index = %zu, task_index = %zu, label_data_type = %zu, label_num_cols = %zu\n", + size_t(mol_index), size_t(task_index), + size_t(torch_type), num_cols); + return; + } + const size_t supported_type_index = torch_type_index(torch_type); + if (supported_type_index >= num_supported_types) { + printf("Error: In load_labels_from_index, mol_index = %zu, task_index = %zu, label_data_type = %zu, label_num_cols = %zu\n", + size_t(mol_index), size_t(task_index), + size_t(torch_type), num_cols); + } + const size_t bytes_per_float = supported_types[supported_type_index].size; + const size_t num_floats = task_num_bytes / bytes_per_float; + const size_t num_rows = num_floats / num_cols; + + if (num_floats != num_rows*num_cols) { + printf("Error: In load_labels_from_index, mol_index = %zu, task data bytes = %zu (not a multiple of %zu*%zu)\n", + size_t(mol_index), size_t(task_num_bytes), bytes_per_float, num_cols); + return; + } + + const std::string label_name{pybind11::str(label_names[task_index])}; + const bool is_graph_level = (std::strncmp(label_name.c_str(), "graph", 5) == 0); + if (is_graph_level && num_rows != 1) { + printf("Error: In load_labels_from_index, mol_index = %zu, num_rows = %zu for task \"%s\"\n", + size_t(mol_index), num_rows, label_name.c_str()); + return; + } + size_t num_label_dims = is_graph_level ? 1 : 2; + const int64_t label_dims[2] = { (is_graph_level ? int64_t(num_floats) : int64_t(num_rows)), int64_t(num_cols) }; + at::Tensor label_tensor; + + if (bytes_per_float == 2) { + std::unique_ptr label_data(new uint16_t[num_floats]); + memcpy(label_data.get(), base_task_data + task_offset, task_num_bytes); + label_tensor = torch_tensor_from_array(std::move(label_data), label_dims, num_label_dims, torch_type); + } + else if (bytes_per_float == 4) { + std::unique_ptr label_data(new float[num_floats]); + memcpy(label_data.get(), base_task_data + task_offset, task_num_bytes); + label_tensor = torch_tensor_from_array(std::move(label_data), label_dims, num_label_dims, torch_type); + } + else if (bytes_per_float == 8) { + std::unique_ptr label_data(new double[num_floats]); + memcpy(label_data.get(), base_task_data + task_offset, task_num_bytes); + label_tensor = torch_tensor_from_array(std::move(label_data), label_dims, num_label_dims, torch_type); + } + + PyDict_SetItem(labels.ptr(), label_names[task_index].ptr(), THPVariable_Wrap(std::move(label_tensor))); + + task_offset = task_end_offset; + } +} + +std::string extract_string( + const at::Tensor& concat_strings, + const at::Tensor& string_offsets, + const int64_t index) { + + const size_t data_size = (concat_strings.scalar_type() == c10::ScalarType::Char && concat_strings.ndimension() == 1) ? concat_strings.size(0) : 0; + const size_t num_data_offsets = (string_offsets.scalar_type() == c10::ScalarType::Long && string_offsets.ndimension() == 1) ? string_offsets.size(0) : 0; + if (index < 0 || size_t(index) >= num_data_offsets) { + return std::string(); + } + const char* const data = reinterpret_cast(concat_strings.data_ptr()); + const int64_t* const data_offsets = string_offsets.data_ptr(); + int64_t offset = data_offsets[index]; + int64_t end_offset = data_offsets[index+1]; + int64_t size = (end_offset - offset) - 1; + if (offset < 0 || size < 0 || end_offset > int64_t(data_size)) { + return std::string(); + } + return std::string(data + offset, size_t(size)); +} diff --git a/graphium/graphium_cpp/labels.h b/graphium/graphium_cpp/labels.h new file mode 100644 index 000000000..6bcfe87e0 --- /dev/null +++ b/graphium/graphium_cpp/labels.h @@ -0,0 +1,69 @@ +// SPDX-FileCopyrightText: Copyright (c) 2024 NVIDIA CORPORATION & AFFILIATES. All rights reserved. +// SPDX-License-Identifier: Apache-2.0 + +#pragma once + +#include +#include +#include +#include + +// Torch tensor headers +#include +#include +#include + +// PyBind and Torch headers +#include +#include +#include + +// The following functions are in labels.cpp, and declared here so that +// graphium_cpp.cpp can expose them to Python via pybind. +std::tuple< + std::vector, + std::vector +> load_num_cols_and_dtypes( + const std::string& processed_graph_data_path, + const std::string& data_hash); + +std::vector load_metadata_tensors( + const std::string processed_graph_data_path, + const std::string stage, + const std::string data_hash); + +std::vector load_stats( + const std::string processed_graph_data_path, + const std::string data_hash, + const std::string task_name); + +std::tuple< + std::unordered_map>, + std::unordered_map>, + std::vector, + std::vector +> prepare_and_save_data( + const pybind11::list& task_names, + pybind11::dict& task_dataset_args, + const pybind11::dict& task_label_normalization, + const std::string processed_graph_data_path, + const std::string data_hash, + const pybind11::dict& task_train_indices, + const pybind11::dict& task_val_indices, + const pybind11::dict& task_test_indices, + bool add_self_loop = false, + bool explicit_H = false); + +void load_labels_from_index( + const std::string stage_directory, + const int64_t mol_index, + const at::Tensor& mol_file_data_offsets, + const pybind11::list& label_names, + const pybind11::list& label_num_cols, + const pybind11::list& label_data_types, + pybind11::dict& labels); + +std::string extract_string( + const at::Tensor& concat_strings, + const at::Tensor& string_offsets, + const int64_t index); diff --git a/graphium/graphium_cpp/one_hot.cpp b/graphium/graphium_cpp/one_hot.cpp new file mode 100644 index 000000000..1f4ec09fe --- /dev/null +++ b/graphium/graphium_cpp/one_hot.cpp @@ -0,0 +1,358 @@ +// SPDX-FileCopyrightText: Copyright (c) 2024 NVIDIA CORPORATION & AFFILIATES. All rights reserved. +// SPDX-License-Identifier: Apache-2.0 + +#include "one_hot.h" +#include "features.h" +#include "float_features.h" + +#include +#include + +#include +#include +#include +#include + +template +class OneHotLookup { + size_t indices[NUM_IN]; +public: + constexpr OneHotLookup(const size_t list[MAX_OUT]) : indices() { + std::fill(indices, indices + NUM_IN, MAX_OUT); + for (size_t i = 0; i < MAX_OUT; ++i) { + indices[list[i]] = i; + } + } + constexpr size_t operator[](size_t i) const { + return (i < NUM_IN) ? indices[i] : MAX_OUT; + } +}; + +// This list of elements matches ATOM_LIST in graphium/features/nmp.py +constexpr size_t atomicNumList[] = { + 6 -1, // C + 7 -1, // N + 8 -1, // O + 16-1,// S + 9 -1, // F + 14-1,// Si + 15-1,// P + 17-1,// Cl + 35-1,// Br + 12-1,// Mg + 11-1,// Na + 20-1,// Ca + 26-1,// Fe + 33-1,// As + 13-1,// Al + 53-1,// I + 5 -1,// B + 23-1,// V + 19-1,// K + 81-1,// Tl + 70-1,// Yb + 51-1,// Sb + 50-1,// Sn + 47-1,// Ag + 46-1,// Pd + 27-1,// Co + 34-1,// Se + 22-1,// Ti + 30-1,// Zn + 1 -1,// H + 3 -1,// Li + 32-1,// Ge + 29-1,// Cu + 79-1,// Au + 28-1,// Ni + 48-1,// Cd + 49-1,// In + 25-1,// Mn + 40-1,// Zr + 24-1,// Cr + 78-1,// Pt + 80-1,// Hg + 82-1,// Pb +}; +constexpr size_t atomicNumCount = std::extent::value; +constexpr OneHotLookup<118, atomicNumCount> atomicNumLookup(atomicNumList); + +constexpr size_t degreeCount = 5; +constexpr size_t valenceCount = 7; + +// Reverse alphabetical order, excluding "OTHER", +// matching HYBRIDIZATION_LIST in graphium/features/nmp.py +constexpr size_t hybridizationList[] = { + RDKit::Atom::HybridizationType::UNSPECIFIED, + RDKit::Atom::HybridizationType::SP3D2, + RDKit::Atom::HybridizationType::SP3D, + RDKit::Atom::HybridizationType::SP3, + RDKit::Atom::HybridizationType::SP2D, + RDKit::Atom::HybridizationType::SP2, + RDKit::Atom::HybridizationType::SP, + RDKit::Atom::HybridizationType::S, +}; +constexpr size_t hybridizationCount = std::extent::value; +constexpr OneHotLookup<8, hybridizationCount> hybridizationLookup(hybridizationList); + +static const std::string chiralityRString("R"); + +enum ElementPhase { + GAS, + ARTIFICIAL, + LIQ, + SOLID +}; +// This table is from the Phase column of graphium/features/periodic_table.csv +constexpr ElementPhase atomicNumToPhase[] = { + GAS, GAS, + SOLID, SOLID, SOLID, SOLID, GAS, GAS, GAS, GAS, + SOLID, SOLID, SOLID, SOLID, SOLID, SOLID, GAS, GAS, + SOLID, SOLID, SOLID, SOLID, SOLID, SOLID, SOLID, SOLID, SOLID, SOLID, SOLID, SOLID, SOLID, SOLID, SOLID, SOLID, LIQ, GAS, + SOLID, SOLID, SOLID, SOLID, SOLID, SOLID, ARTIFICIAL, SOLID, SOLID, SOLID, SOLID, SOLID, SOLID, SOLID, SOLID, SOLID, SOLID, GAS, + SOLID, SOLID, SOLID, SOLID, SOLID, SOLID, ARTIFICIAL, SOLID, SOLID, SOLID, SOLID, SOLID, SOLID, SOLID, SOLID, SOLID, SOLID, SOLID, SOLID, SOLID, SOLID, SOLID, SOLID, SOLID, SOLID, LIQ, SOLID, SOLID, SOLID, SOLID, SOLID, GAS, + SOLID, SOLID, SOLID, SOLID, SOLID, SOLID, ARTIFICIAL, ARTIFICIAL, ARTIFICIAL, ARTIFICIAL, ARTIFICIAL, ARTIFICIAL, ARTIFICIAL, ARTIFICIAL, ARTIFICIAL, ARTIFICIAL, ARTIFICIAL, ARTIFICIAL, ARTIFICIAL, ARTIFICIAL, ARTIFICIAL, ARTIFICIAL, ARTIFICIAL, ARTIFICIAL, ARTIFICIAL, ARTIFICIAL, ARTIFICIAL, ARTIFICIAL, ARTIFICIAL, ARTIFICIAL, ARTIFICIAL, ARTIFICIAL, +}; +constexpr size_t phaseCount = 4; + +enum ElementType { + NOBLE_GAS, + ALKALI_METAL, + METAL, HALOGEN, + LANTHANIDE, + ALKALINE_EARTH_METAL, + TRANSITION_METAL, + ACTINIDE, + METALLOID, + NONE, + TRANSACTINIDE, + NONMETAL, + + NUM_ELEMENT_TYPES +}; +// This table is from the Type column of graphium/features/periodic_table.csv +constexpr ElementType atomicNumToType[] = { + NONMETAL, NOBLE_GAS, + ALKALI_METAL, ALKALINE_EARTH_METAL, METALLOID, NONMETAL, NONMETAL, NONMETAL, HALOGEN, NOBLE_GAS, + ALKALI_METAL, ALKALINE_EARTH_METAL, METAL, METALLOID, NONMETAL, NONMETAL, HALOGEN, NOBLE_GAS, + ALKALI_METAL, ALKALINE_EARTH_METAL, TRANSITION_METAL, TRANSITION_METAL, TRANSITION_METAL, TRANSITION_METAL, TRANSITION_METAL, TRANSITION_METAL, TRANSITION_METAL, TRANSITION_METAL, TRANSITION_METAL, TRANSITION_METAL, METAL, METALLOID, METALLOID, NONMETAL, HALOGEN, NOBLE_GAS, + ALKALI_METAL, ALKALINE_EARTH_METAL, TRANSITION_METAL, TRANSITION_METAL, TRANSITION_METAL, TRANSITION_METAL, TRANSITION_METAL, TRANSITION_METAL, TRANSITION_METAL, TRANSITION_METAL, TRANSITION_METAL, TRANSITION_METAL, METAL, METAL, METALLOID, METALLOID, HALOGEN, NOBLE_GAS, + ALKALI_METAL, ALKALINE_EARTH_METAL, LANTHANIDE, LANTHANIDE, LANTHANIDE, LANTHANIDE, LANTHANIDE, LANTHANIDE, LANTHANIDE, LANTHANIDE, LANTHANIDE, LANTHANIDE, LANTHANIDE, LANTHANIDE, LANTHANIDE, LANTHANIDE, LANTHANIDE, TRANSITION_METAL, TRANSITION_METAL, TRANSITION_METAL, TRANSITION_METAL, TRANSITION_METAL, TRANSITION_METAL, TRANSITION_METAL, TRANSITION_METAL, TRANSITION_METAL, METAL, METAL, METAL, METALLOID, NOBLE_GAS, + ALKALI_METAL, ALKALINE_EARTH_METAL, ACTINIDE, ACTINIDE, ACTINIDE, ACTINIDE, ACTINIDE, ACTINIDE, ACTINIDE, ACTINIDE, ACTINIDE, ACTINIDE, ACTINIDE, ACTINIDE, ACTINIDE, ACTINIDE, ACTINIDE, ACTINIDE, TRANSACTINIDE, TRANSACTINIDE, TRANSACTINIDE, TRANSACTINIDE, TRANSACTINIDE, TRANSACTINIDE, TRANSACTINIDE, TRANSACTINIDE, TRANSACTINIDE, NONE, TRANSACTINIDE, NONE, TRANSACTINIDE, NONE, NOBLE_GAS +}; +constexpr size_t typeCount = ElementType::NUM_ELEMENT_TYPES; + +// This matches BOND_TYPES in graphium/features/nmp.py +constexpr size_t bondTypeList[] = { + RDKit::Bond::BondType::SINGLE, + RDKit::Bond::BondType::DOUBLE, + RDKit::Bond::BondType::TRIPLE, + RDKit::Bond::BondType::AROMATIC, +}; +constexpr size_t bondTypeCount = std::extent::value; +constexpr OneHotLookup<22, bondTypeCount> bondTypeLookup(bondTypeList); + +// This matches BOND_STEREO in graphium/features/nmp.py +constexpr size_t bondStereoList[] = { + RDKit::Bond::BondStereo::STEREONONE, + RDKit::Bond::BondStereo::STEREOANY, + RDKit::Bond::BondStereo::STEREOZ, + RDKit::Bond::BondStereo::STEREOE, + RDKit::Bond::BondStereo::STEREOCIS, + RDKit::Bond::BondStereo::STEREOTRANS, +}; +constexpr size_t bondStereoCount = std::extent::value; +constexpr OneHotLookup<6, bondStereoCount> bondStereoLookup(bondStereoList); + +size_t get_one_hot_atom_feature_size(AtomOneHotFeature feature) { + switch (feature) { + case AtomOneHotFeature::ATOMIC_NUM: return atomicNumCount + 1; + case AtomOneHotFeature::DEGREE: return degreeCount + 1; + case AtomOneHotFeature::VALENCE: return valenceCount + 1; + case AtomOneHotFeature::IMPLICIT_VALENCE: return valenceCount + 1; + case AtomOneHotFeature::HYBRIDIZATION: return hybridizationCount + 1; + // "R", anything else ("S" or no value), bool for if other property present + case AtomOneHotFeature::CHIRALITY: return 3; + case AtomOneHotFeature::PHASE: return phaseCount + 1; + case AtomOneHotFeature::TYPE: return typeCount + 1; + case AtomOneHotFeature::GROUP: return groupCount + 1; + case AtomOneHotFeature::PERIOD: return periodCount + 1; + } + return 0; +} + +template +size_t get_one_hot_atom_feature(const GraphData& graph, T* data, AtomOneHotFeature feature, size_t stride) { + const size_t num_atoms = graph.num_atoms; + const RDKit::ROMol& mol = *graph.mol.get(); + const size_t feature_size = get_one_hot_atom_feature_size(feature); + const size_t total_feature_size = feature_size * num_atoms; + if (total_feature_size == 0) { + return feature_size; + } + { + T* current_data = data; + for (size_t i = 0; i < num_atoms; ++i) { + memset(current_data, 0, sizeof(data[0]) * feature_size); + current_data += stride; + } + } + switch (feature) { + case AtomOneHotFeature::ATOMIC_NUM: + for (size_t atomIndex = 0; atomIndex < num_atoms; ++atomIndex, data += stride) { + size_t atomicNum = graph.atoms[atomIndex].atomicNum; + data[atomicNumLookup[atomicNum-1]] = FeatureValues::one; + } + return feature_size; + case AtomOneHotFeature::DEGREE: + for (size_t atomIndex = 0; atomIndex < num_atoms; ++atomIndex, data += stride) { + auto degree = mol.getAtomWithIdx(atomIndex)->getDegree(); + size_t dataIndex = (degree < degreeCount) ? degree : degreeCount; + data[dataIndex] = FeatureValues::one; + } + return feature_size; + case AtomOneHotFeature::VALENCE: + for (size_t atomIndex = 0; atomIndex < num_atoms; ++atomIndex, data += stride) { + auto valence = mol.getAtomWithIdx(atomIndex)->getTotalValence(); + size_t dataIndex = (size_t(valence) < valenceCount) ? size_t(valence) : valenceCount; + data[dataIndex] = FeatureValues::one; + } + return feature_size; + case AtomOneHotFeature::IMPLICIT_VALENCE: + for (size_t atomIndex = 0; atomIndex < num_atoms; ++atomIndex, data += stride) { + auto valence = mol.getAtomWithIdx(atomIndex)->getImplicitValence(); + size_t dataIndex = (size_t(valence) < valenceCount) ? size_t(valence) : valenceCount; + data[dataIndex] = FeatureValues::one; + } + return feature_size; + case AtomOneHotFeature::HYBRIDIZATION: + for (size_t atomIndex = 0; atomIndex < num_atoms; ++atomIndex, data += stride) { + auto hybridization = mol.getAtomWithIdx(atomIndex)->getHybridization(); + data[hybridizationLookup[hybridization]] = FeatureValues::one; + } + return feature_size; + case AtomOneHotFeature::CHIRALITY: + for (size_t atomIndex = 0; atomIndex < num_atoms; ++atomIndex, data += stride) { + std::string chirality; + const RDKit::Atom* atom = mol.getAtomWithIdx(atomIndex); + bool isPresent = atom->getPropIfPresent(RDKit::common_properties::_CIPCode, chirality); + data[(isPresent && chirality == chiralityRString) ? 0 : 1] = FeatureValues::one; + if (atom->hasProp(RDKit::common_properties::_ChiralityPossible)) { + data[2] = FeatureValues::one; + } + } + return feature_size; + case AtomOneHotFeature::PHASE: + for (size_t atomIndex = 0; atomIndex < num_atoms; ++atomIndex, data += stride) { + size_t atomicNum = graph.atoms[atomIndex].atomicNum; + size_t dataIndex = phaseCount; + if (atomicNum - 1 < std::extent::value) { + ElementPhase phase = atomicNumToPhase[atomicNum - 1]; + // Group numbers are 1-based, but the array indices aren't. + dataIndex = phase - 1; + } + data[dataIndex] = FeatureValues::one; + } + return feature_size; + case AtomOneHotFeature::TYPE: + for (size_t atomIndex = 0; atomIndex < num_atoms; ++atomIndex, data += stride) { + size_t atomicNum = graph.atoms[atomIndex].atomicNum; + size_t dataIndex = typeCount; + if (atomicNum - 1 < std::extent::value) { + ElementType type = atomicNumToType[atomicNum - 1]; + // Group numbers are 1-based, but the array indices aren't. + dataIndex = type - 1; + } + data[dataIndex] = FeatureValues::one; + } + return feature_size; + case AtomOneHotFeature::GROUP: + for (size_t atomIndex = 0; atomIndex < num_atoms; ++atomIndex, data += stride) { + size_t atomicNum = graph.atoms[atomIndex].atomicNum; + size_t dataIndex = groupCount; + if (atomicNum - 1 < std::extent::value) { + uint8_t group = atomicNumToGroupTable[atomicNum - 1]; + // Group numbers are 1-based, but the array indices aren't. + dataIndex = group - 1; + } + data[dataIndex] = FeatureValues::one; + } + return feature_size; + case AtomOneHotFeature::PERIOD: + for (size_t atomIndex = 0; atomIndex < num_atoms; ++atomIndex, data += stride) { + size_t atomicNum = graph.atoms[atomIndex].atomicNum; + size_t dataIndex = periodCount; + if (atomicNum - 1 < std::extent::value) { + uint8_t period = atomicNumToPeriodTable[atomicNum - 1]; + // Period numbers are 1-based, but the array indices aren't. + dataIndex = period - 1; + } + data[dataIndex] = FeatureValues::one; + } + return feature_size; + } + + // Missing implementation + assert(0); + return feature_size; +} + +// Explicit instantiations, so that the function can be templated +// but still be used from other cpp files. +template size_t get_one_hot_atom_feature(const GraphData& graph, int16_t* data, AtomOneHotFeature feature, size_t stride); +template size_t get_one_hot_atom_feature(const GraphData& graph, float* data, AtomOneHotFeature feature, size_t stride); +template size_t get_one_hot_atom_feature(const GraphData& graph, double* data, AtomOneHotFeature feature, size_t stride); + + +size_t get_one_hot_bond_feature_size(BondFeature feature) { + switch (feature) { + case BondFeature::TYPE_ONE_HOT: return bondTypeCount + 1; + case BondFeature::STEREO_ONE_HOT: return bondStereoCount + 1; + default: + break; + } + // Missing implementation + assert(0); + return 0; +} + +template +size_t get_one_hot_bond_feature(const GraphData& graph, T* data, BondFeature feature, size_t stride) { + const size_t num_bonds = graph.num_bonds; + const size_t feature_size = get_one_hot_bond_feature_size(feature); + const size_t total_feature_size = feature_size * num_bonds; + if (total_feature_size == 0) { + return 0; + } + { + T* current_data = data; + for (size_t i = 0; i < num_bonds; ++i) { + memset(current_data, 0, sizeof(data[0]) * feature_size); + current_data += stride; + } + } + switch (feature) { + case BondFeature::TYPE_ONE_HOT: + for (size_t i = 0; i < num_bonds; ++i, data += stride) { + auto type = graph.bonds[i].bondType; + data[bondTypeLookup[type]] = FeatureValues::one; + } + return feature_size; + case BondFeature::STEREO_ONE_HOT: + for (size_t i = 0; i < num_bonds; ++i, data += stride) { + auto stereo = graph.bonds[i].stereo; + data[bondStereoLookup[stereo]] = FeatureValues::one; + } + return feature_size; + default: + // Missing implementation + assert(0); + return feature_size; + } +} + +// Explicit instantiations, so that the function can be templated +// but still be used from other cpp files. +template size_t get_one_hot_bond_feature(const GraphData& graph, int16_t* data, BondFeature feature, size_t stride); +template size_t get_one_hot_bond_feature(const GraphData& graph, float* data, BondFeature feature, size_t stride); +template size_t get_one_hot_bond_feature(const GraphData& graph, double* data, BondFeature feature, size_t stride); diff --git a/graphium/graphium_cpp/one_hot.h b/graphium/graphium_cpp/one_hot.h new file mode 100644 index 000000000..475b87a8e --- /dev/null +++ b/graphium/graphium_cpp/one_hot.h @@ -0,0 +1,29 @@ +// SPDX-FileCopyrightText: Copyright (c) 2024 NVIDIA CORPORATION & AFFILIATES. All rights reserved. +// SPDX-License-Identifier: Apache-2.0 + +#pragma once + +#include "features.h" + +#include + +#include + +size_t get_one_hot_atom_feature_size(AtomOneHotFeature feature); + +template +size_t get_one_hot_atom_feature(const GraphData& graph, T* data, AtomOneHotFeature feature, size_t stride); + +extern template size_t get_one_hot_atom_feature(const GraphData& graph, int16_t* data, AtomOneHotFeature feature, size_t stride); +extern template size_t get_one_hot_atom_feature(const GraphData& graph, float* data, AtomOneHotFeature feature, size_t stride); +extern template size_t get_one_hot_atom_feature(const GraphData& graph, double* data, AtomOneHotFeature feature, size_t stride); + +size_t get_one_hot_bond_feature_size(BondFeature feature); + +template +size_t get_one_hot_bond_feature(const GraphData& graph, T* data, BondFeature feature, size_t stride); + +extern template size_t get_one_hot_bond_feature(const GraphData& graph, int16_t* data, BondFeature feature, size_t stride); +extern template size_t get_one_hot_bond_feature(const GraphData& graph, float* data, BondFeature feature, size_t stride); +extern template size_t get_one_hot_bond_feature(const GraphData& graph, double* data, BondFeature feature, size_t stride); + diff --git a/graphium/graphium_cpp/random_walk.cpp b/graphium/graphium_cpp/random_walk.cpp new file mode 100644 index 000000000..e4dc3116b --- /dev/null +++ b/graphium/graphium_cpp/random_walk.cpp @@ -0,0 +1,141 @@ +// SPDX-FileCopyrightText: Copyright (c) 2024 NVIDIA CORPORATION & AFFILIATES. All rights reserved. +// SPDX-License-Identifier: Apache-2.0 + +#include "random_walk.h" + +#include +#include +#include +#include +#include + +template +void multiply_dense_by_sparse(uint32_t n, T* out_matrix, const T* in_matrix, const uint32_t* neighbor_starts, const uint32_t* neighbors, const T* col_major_weights) { + for (uint32_t row = 0; row < n; ++row) { + T* out_row_start = out_matrix + row * n; + const T* in_row_start = in_matrix + row * n; + for (uint32_t col = 0; col < n; ++col) { + T sum = T(0); + // The adjacency is symmetric, so rows and cols are swappable there, + // but the weights might not be, so for fast access, we want column major weights. + const uint32_t* neighbors_start = neighbors + neighbor_starts[col]; + const uint32_t* neighbors_end = neighbors + neighbor_starts[col+1]; + const T* weights_start = col_major_weights + neighbor_starts[col]; + for (; neighbors_start != neighbors_end; ++neighbors_start, ++weights_start) { + sum += *weights_start * in_row_start[*neighbors_start]; + } + out_row_start[col] = sum; + } + } +} + +// The adjacency (neighbor_starts and neighbors) must be symmetric. +// powers must be in increasing sorted order. +template +void compute_rwse( + const uint32_t num_powers, + const uint64_t* powers, + const uint32_t n, + const uint32_t* neighbor_starts, + const uint32_t* neighbors, + RandomWalkDataOption option, + std::vector& output, + int space_dim) { + + // Cast one n to size_t to avoid integer overflow if n >= 65536 + if (option == RandomWalkDataOption::PROBABILITIES) { + output.resize(num_powers * size_t(n)); + } + else { + output.resize(num_powers * size_t(n) * n); + } + + if (num_powers == 0) { + return; + } + if (n == 1) { + // Special case: All ones for single node, matching original code + for (uint32_t i = 0; i < output.size(); ++i) { + output[i] = T(1); + } + return; + } + + // Initialize this to represent column major D^-1 * adj + std::vector col_major_weights; + col_major_weights.resize(neighbor_starts[n]); + for (uint32_t col = 0, i = 0; col < n; ++col) { + const uint32_t* neighbor_start = neighbors + neighbor_starts[col]; + const uint32_t* neighbor_end = neighbors + neighbor_starts[col+1]; + for (; neighbor_start != neighbor_end; ++neighbor_start, ++i) { + const uint32_t neighbor = *neighbor_start; + uint32_t neighbor_degree = neighbor_starts[neighbor + 1] - neighbor_starts[neighbor]; + T degree_inv = (neighbor_degree == 0) ? T(0) : T(1) / T(neighbor_degree); + col_major_weights[i] = degree_inv; + } + } + + // Space for 2 matrices, to alternate between them + std::vector matrix; + matrix.resize(2 * size_t(n) * n, T(0)); + T* matrix0 = matrix.data(); + T* matrix1 = matrix.data() + size_t(n) * n; + uint64_t current_power = 0; + // Initialize current matrix to identity matrix + for (size_t i = 0, diag_index = 0; i < n; ++i, diag_index += (n+1)) { + matrix0[diag_index] = T(1); + } + + for (uint32_t power_index = 0; power_index < num_powers; ++power_index) { + const uint64_t target_power = powers[power_index]; + assert(target_power >= current_power); + while (target_power > current_power) { + std::swap(matrix0, matrix1); + multiply_dense_by_sparse(n, matrix0, matrix1, neighbor_starts, neighbors, col_major_weights.data()); + ++current_power; + } + + // Copy results to output + if (option == RandomWalkDataOption::PROBABILITIES) { + const T scale_factor = (space_dim == 0) ? T(1) : T(std::pow(T(target_power), T(0.5) * T(space_dim))); + // Just copy the diagonal values + for (size_t i = 0, diag_index = 0; i < n; ++i, diag_index += (n + 1)) { + output[i * num_powers + power_index] = scale_factor * matrix0[diag_index]; + } + } + else { + // Copy transition probabilities, making sure the dimensions are correct, because matrix0 isn't symmetric. + // Least significant dimension is num_powers + // Middle dimension is the columns across a single row of matrix0 + // Most significant dimension is the rows of the matrix0 + const size_t row_stride = num_powers * size_t(n); + for (size_t row = 0, i = 0; row < n; ++row) { + for (size_t col = 0; col < n; ++col, ++i) { + output[row * row_stride + col * num_powers + power_index] = matrix0[i]; + } + } + } + } +} + +// Explicit instantiations for float and double +template +void compute_rwse( + const uint32_t num_powers, + const uint64_t* powers, + const uint32_t n, + const uint32_t* neighbor_starts, + const uint32_t* neighbors, + RandomWalkDataOption option, + std::vector& output, + int space_dim); +template +void compute_rwse( + const uint32_t num_powers, + const uint64_t* powers, + const uint32_t n, + const uint32_t* neighbor_starts, + const uint32_t* neighbors, + RandomWalkDataOption option, + std::vector& output, + int space_dim); diff --git a/graphium/graphium_cpp/random_walk.h b/graphium/graphium_cpp/random_walk.h new file mode 100644 index 000000000..1617a7be2 --- /dev/null +++ b/graphium/graphium_cpp/random_walk.h @@ -0,0 +1,44 @@ +// SPDX-FileCopyrightText: Copyright (c) 2024 NVIDIA CORPORATION & AFFILIATES. All rights reserved. +// SPDX-License-Identifier: Apache-2.0 + +#pragma once + +#include +#include + +enum class RandomWalkDataOption { + PROBABILITIES, + MATRIX +}; + +template +void compute_rwse( + const uint32_t num_powers, + const uint64_t* powers, + const uint32_t n, + const uint32_t* neighbor_starts, + const uint32_t* neighbors, + RandomWalkDataOption option, + std::vector& output, + int space_dim = 0); + +extern template +void compute_rwse( + const uint32_t num_powers, + const uint64_t* powers, + const uint32_t n, + const uint32_t* neighbor_starts, + const uint32_t* neighbors, + RandomWalkDataOption option, + std::vector& output, + int space_dim); +extern template +void compute_rwse( + const uint32_t num_powers, + const uint64_t* powers, + const uint32_t n, + const uint32_t* neighbor_starts, + const uint32_t* neighbors, + RandomWalkDataOption option, + std::vector& output, + int space_dim); diff --git a/graphium/graphium_cpp/setup.py b/graphium/graphium_cpp/setup.py new file mode 100755 index 000000000..e1f737db0 --- /dev/null +++ b/graphium/graphium_cpp/setup.py @@ -0,0 +1,82 @@ +# SPDX-FileCopyrightText: Copyright (c) 2024 NVIDIA CORPORATION & AFFILIATES. All rights reserved. +# SPDX-License-Identifier: Apache-2.0 + +""" +Setup script that builds graphium_cpp. +At time of writing, this has only been tested with GCC 10.5.0. +To build, git clone pybind11 into this directory, then run: +rm -r build/* +export PYTHONPATH=$PYTHONPATH:./pybind11 +python ./setup.py build +cp build/lib.linux-x86_64-cpython-311/graphium_cpp.cpython-311-x86_64-linux-gnu.so ~/mambaforge/envs/graphium/bin +""" + +from distutils.core import setup +from pybind11.setup_helpers import Pybind11Extension, build_ext +import torch, rdkit, os +import numpy + +torch_dir = torch.__path__[0] +rdkit_lib_index = rdkit.__path__[0].split('/').index('lib') +rdkit_prefix = '/'.join(rdkit.__path__[0].split('/')[:rdkit_lib_index]) + +#print(rdkit_lib_index) +#print(rdkit_prefix) + +ext_modules = [ + Pybind11Extension( + 'graphium_cpp', + sources=[ + "graphium_cpp.cpp", + "features.cpp", + "labels.cpp", + "commute.cpp", + "electrostatic.cpp", + "float_features.cpp", + "graphormer.cpp", + "one_hot.cpp", + "random_walk.cpp", + "spectral.cpp" + ], + language="c++", + cxx_std=20, + include_dirs = [os.path.join(torch_dir,"include"), + os.path.join(torch_dir,"include/torch/csrc/api/include"), + os.path.join(rdkit_prefix, "include/rdkit"), + #"/opt/nvidia/nsight-systems/2023.2.3/target-linux-x64/nvtx/include", + numpy.get_include()], + libraries = [ + "RDKitAlignment", + "RDKitDataStructs", + "RDKitDistGeometry", + "RDKitDistGeomHelpers", + "RDKitEigenSolvers", + "RDKitForceField", + "RDKitForceFieldHelpers", + "RDKitGenericGroups", + "RDKitGraphMol", + "RDKitInchi", + "RDKitRDInchiLib", + "RDKitRDBoost", + "RDKitRDGeneral", + "RDKitRDGeometryLib", + "RDKitRingDecomposerLib", + "RDKitSmilesParse", + "RDKitSubstructMatch", + "torch_cpu", + "torch_python" + ], + library_dirs = [os.path.join(rdkit_prefix,"lib"), + os.path.join(torch_dir,"lib")], + extra_compile_args=["-O3","-Wall", "-Wmissing-field-initializers", "-Wmaybe-uninitialized", "-Wuninitialized"] + ) +] + +setup(name = "graphium_cpp", + version = "0.1", + author = "N. Dickson", + author_email="ndickson@nvidia.com", + license="NVIDIA Proprietary", + description = "C++ extension for graphium", + ext_modules=ext_modules, + cmdclass={"build_ext": build_ext}) diff --git a/graphium/graphium_cpp/spectral.cpp b/graphium/graphium_cpp/spectral.cpp new file mode 100644 index 000000000..3f45586ff --- /dev/null +++ b/graphium/graphium_cpp/spectral.cpp @@ -0,0 +1,296 @@ +// SPDX-FileCopyrightText: Copyright (c) 2024 NVIDIA CORPORATION & AFFILIATES. All rights reserved. +// SPDX-License-Identifier: Apache-2.0 + +#include "spectral.h" + +#include +#include +#include +#include + +#include "features.h" +#include + +template +void compute_laplacian_eigendecomp_single(const uint32_t n, LaplacianData& data, bool symmetric) { + T* matrix = data.matrix_temp.data(); + std::unique_ptr matrix_alloc(new T[n * n]); + std::copy(matrix, matrix + n * n, matrix_alloc.get()); + + int64_t dims[2] = { n, n }; + at::Tensor torch_matrix = torch_tensor_from_array(std::move(matrix_alloc), dims, 2, c10::ScalarType::Double); + + at::Tensor eigenvalue_tensor; + at::Tensor eigenvector_tensor; + if (symmetric) { + // Using linalg_eigh should ensure we get all real eigenvalues and eigenvectors. + // Arbitrarily choose lower-triangular portion (L) + auto tuple = at::linalg_eigh(torch_matrix, c10::string_view("L",1)); + eigenvalue_tensor = std::move(std::get<0>(tuple)); + eigenvector_tensor = std::move(std::get<1>(tuple)); + } + else { + auto tuple = at::linalg_eig(torch_matrix); + eigenvalue_tensor = std::move(std::get<0>(tuple)); + eigenvector_tensor = std::move(std::get<1>(tuple)); + } + assert(eigenvalue_tensor.ndimension() == 1); + assert(eigenvector_tensor.ndimension() == 2); + assert(eigenvalue_tensor.size(0) == n); + assert(eigenvector_tensor.size(0) == n); + assert(eigenvector_tensor.size(1) == n); + + // Copy eigenvalues + data.eigenvalues_temp.resize(n); + if (eigenvalue_tensor.scalar_type() == c10::ScalarType::Double) { + const double* const eigenvalue_data = eigenvalue_tensor.data_ptr(); + for (size_t i = 0; i < n; ++i) { + data.eigenvalues_temp[i] = T(eigenvalue_data[i]); + } + } + else if (eigenvalue_tensor.scalar_type() == c10::ScalarType::ComplexDouble) { + // TODO: Decide what to do about legitimately complex eigenvalues. + // This should only occur with Normalization::INVERSE, because real, symmetric + // matrices have real eigenvalues. + // For now, just assume that they're supposed to be real and were only complex + // due to roundoff. + const c10::complex* const eigenvalue_data = eigenvalue_tensor.data_ptr>(); + for (size_t i = 0; i < n; ++i) { + data.eigenvalues_temp[i] = T(eigenvalue_data[i].real()); + } + } + else { + assert(0); + } + + // Copy eigenvectors + data.vectors.clear(); + data.vectors.resize(size_t(n) * n, 0); + T* vectors = data.vectors.data(); + if (eigenvector_tensor.scalar_type() == c10::ScalarType::Double) { + const double* const eigenvector_data = eigenvector_tensor.data_ptr(); + for (size_t i = 0; i < size_t(n) * n; ++i) { + vectors[i] = T(eigenvector_data[i]); + } + } + else if (eigenvector_tensor.scalar_type() == c10::ScalarType::ComplexDouble) { + // TODO: Decide what to do about legitimately complex eigenvectors. + // This should only occur with Normalization::INVERSE, because real, symmetric + // matrices have real eigenvectors. + // For now, just assume that they're supposed to be real and were only complex + // due to roundoff. + const c10::complex* const eigenvector_data = eigenvector_tensor.data_ptr>(); + for (size_t i = 0; i < size_t(n) * n; ++i) { + vectors[i] = T(eigenvector_data[i].real()); + } + } + else { + assert(0); + } + + // Find the sorted order of the eigenvalues + data.order_temp.resize(n); + std::iota(data.order_temp.begin(), data.order_temp.end(), 0); + std::stable_sort(data.order_temp.begin(), data.order_temp.end(), + [&data](uint32_t i, uint32_t j) -> bool { + return data.eigenvalues_temp[i] < data.eigenvalues_temp[j]; + } + ); + + // Copy the eigenvalues into the sorted order + data.eigenvalues.resize(n); + for (size_t i = 0; i < n; ++i) { + data.eigenvalues[i] = data.eigenvalues_temp[data.order_temp[i]]; + } + + // Copy the eigenvectors into the sorted order + std::swap(data.matrix_temp, data.vectors); + for (size_t row = 0, i = 0; row < n; ++row) { + const size_t source_row = data.order_temp[row]; + const size_t source_row_start = source_row * n; + for (size_t col = 0; col < n; ++col, ++i) { + data.vectors[i] = data.matrix_temp[source_row_start + col]; + } + } +} + +template +void compute_laplacian_eigendecomp(const uint32_t n, const uint32_t* row_starts, const uint32_t* neighbors, Normalization normalization, LaplacianData& data, bool disconnected_comp, const T* weights) { + // Compute the weight row sums, if applicable, for the diagonal of the laplacian + if (weights != nullptr) { + data.eigenvalues_temp.clear(); + data.eigenvalues_temp.resize(n, 0); + for (uint32_t i = 0; i < n; ++i) { + const T* weights_begin = weights + row_starts[i]; + const T* weights_end = weights + row_starts[i + 1]; + T sum = T(0); + for (; weights_begin != weights_end; ++weights_begin) { + sum += *weights_begin; + } + data.eigenvalues_temp[i] = sum; + } + } + data.normalization = normalization; + + // Prepare the laplacian matrix of the graph + data.matrix_temp.clear(); + data.matrix_temp.resize(size_t(n) * n, 0); + T* matrix = data.matrix_temp.data(); + if (normalization == Normalization::NONE) { + for (uint32_t i = 0, outi = 0; i < n; ++i, outi += n) { + const uint32_t* neighbor_begin = neighbors + row_starts[i]; + const uint32_t* neighbor_end = neighbors + row_starts[i + 1]; + if (weights == nullptr) { + const uint32_t degree = row_starts[i + 1] - row_starts[i]; + matrix[outi + i] = T(degree); + for (; neighbor_begin < neighbor_end; ++neighbor_begin) { + uint32_t neighbor = *neighbor_begin; + matrix[outi + neighbor] = T(-1); + } + } + else { + matrix[outi + i] = data.eigenvalues_temp[i]; + const T* weights_begin = weights + row_starts[i]; + for (; neighbor_begin < neighbor_end; ++neighbor_begin, ++weights_begin) { + uint32_t neighbor = *neighbor_begin; + matrix[outi + neighbor] = -(*weights_begin); + } + } + } + } + else { + for (uint32_t i = 0, outi = 0; i < n; ++i, outi += n) { + const uint32_t rowDegree = row_starts[i + 1] - row_starts[i]; + if (rowDegree == 0) { + continue; + } + matrix[outi + i] = T(1); + + const T rowDenominator = (weights == nullptr) ? T(rowDegree) : data.eigenvalues_temp[i]; + const T inverseRowDegree = (normalization == Normalization::INVERSE) ? T(1) / rowDenominator : 0; + + const uint32_t* neighbor_begin = neighbors + row_starts[i]; + const uint32_t* neighbor_end = neighbors + row_starts[i + 1]; + for (; neighbor_begin < neighbor_end; ++neighbor_begin) { + uint32_t neighbor = *neighbor_begin; + if (normalization == Normalization::SYMMETRIC) { + const uint32_t colDegree = row_starts[neighbor + 1] - row_starts[neighbor]; + if (colDegree == 0) { + continue; + } + const T colDenominator = (weights == nullptr) ? T(colDegree) : data.eigenvalues_temp[neighbor]; + matrix[outi + neighbor] = T(-1) / std::sqrt(rowDenominator * colDenominator); + } + else { + assert(normalization == Normalization::INVERSE); + matrix[outi + neighbor] = -inverseRowDegree; + } + } + } + } + + std::vector components; + int32_t num_components = 0; + std::vector queue; + if (disconnected_comp && n > 1) { + // First, find which nodes are in which component. + components.resize(n, -1); + queue.reserve(n); + for (uint32_t starti = 0; starti < n; ++starti) { + if (components[starti] >= 0) { + continue; + } + const int32_t component = num_components; + ++num_components; + queue.push_back(starti); + components[starti] = component; + while (queue.size() != 0) { + uint32_t current = queue[queue.size()-1]; + queue.resize(queue.size()-1); + const uint32_t* neighbor_begin = neighbors + row_starts[current]; + const uint32_t* neighbor_end = neighbors + row_starts[current+1]; + for ( ; neighbor_begin != neighbor_end; ++neighbor_begin) { + uint32_t neighbor = *neighbor_begin; + if (neighbor > starti && components[neighbor] < 0) { + components[neighbor] = component; + queue.push_back(neighbor); + } + } + } + } + } + if (num_components == 1) { + compute_laplacian_eigendecomp_single(n, data, normalization != Normalization::INVERSE); + return; + } + + // There are multiple components. + // To match the original code, handle them separately and + // pack them into the output. + + // data.eigenvalues is length n for the single component case, + // but to be able to handle this, it needs to be larger, so go with n by n + data.eigenvalues.clear(); + data.eigenvalues.resize(size_t(n) * n, 0); + data.vectors.clear(); + data.vectors.resize(size_t(n) * n, 0); + + LaplacianData sub_data; + for (int32_t component = 0; component < num_components; ++component) { + // Reuse queue for the indices + queue.resize(0); + for (uint32_t i = 0; i < n; ++i) { + if (components[i] == component) { + queue.push_back(i); + } + } + + // Extract the sub-matrix + const uint32_t sub_n = queue.size(); + sub_data.matrix_temp.resize(size_t(sub_n) * sub_n); + T* sub_matrix = sub_data.matrix_temp.data(); + for (uint32_t row_index = 0; row_index < sub_n; ++row_index) { + const uint32_t row = queue[row_index]; + const T*const source_row = matrix + row*size_t(n); + for (uint32_t col_index = 0; col_index < sub_n; ++col_index) { + const uint32_t col = queue[col_index]; + *sub_matrix = source_row[col]; + ++sub_matrix; + } + } + + // Find its eigenvalues and eigenvectors + compute_laplacian_eigendecomp_single(sub_n, sub_data, normalization != Normalization::INVERSE); + + // Copy the eigenvalues to the output. The excess is already zeroed out. + // Unlike the eigenvectors, below, might as well switch to using columns + // for the eigenvalues, because the caller can handle this case more + // easily with the single component case this way. + for (uint32_t row_index = 0; row_index < sub_n; ++row_index) { + const uint32_t row = queue[row_index]; + T*const dest_row = data.eigenvalues.data() + row*size_t(n); + for (uint32_t col_index = 0; col_index < sub_n; ++col_index) { + // Destination data within the row is left justified, + // NOT distributed based on the component. + dest_row[col_index] = sub_data.eigenvalues[col_index]; + } + } + + // Copy the (row) eigenvectors to the output. The excess is already zeroed out. + // The caller changes them to column eigenvectors. + for (uint32_t row_index = 0; row_index < sub_n; ++row_index) { + // Destination data is top-aligned, NOT distributed + // based on the component. + T*const dest_row = data.vectors.data() + row_index*size_t(n); + const T*const source_row = sub_data.vectors.data() + row_index*size_t(sub_n); + for (uint32_t col_index = 0; col_index < sub_n; ++col_index) { + // Columns ARE distributed based on the component. + const uint32_t col = queue[col_index]; + dest_row[col] = source_row[col_index]; + } + } + } +} + +template void compute_laplacian_eigendecomp(const uint32_t n, const uint32_t* row_starts, const uint32_t* neighbors, Normalization normalization, LaplacianData& data, bool disconnected_comp, const float* weights); +template void compute_laplacian_eigendecomp(const uint32_t n, const uint32_t* row_starts, const uint32_t* neighbors, Normalization normalization, LaplacianData& data, bool disconnected_comp, const double* weights); diff --git a/graphium/graphium_cpp/spectral.h b/graphium/graphium_cpp/spectral.h new file mode 100644 index 000000000..5ecad8bbe --- /dev/null +++ b/graphium/graphium_cpp/spectral.h @@ -0,0 +1,35 @@ +// SPDX-FileCopyrightText: Copyright (c) 2024 NVIDIA CORPORATION & AFFILIATES. All rights reserved. +// SPDX-License-Identifier: Apache-2.0 + +#pragma once + +#include "features.h" + +#include +#include + +template +struct LaplacianData { + Normalization normalization; + + std::vector vectors; + std::vector eigenvalues; + + std::vector matrix_temp; + std::vector eigenvalues_temp; + std::vector order_temp; +}; + +// This outputs the eigenvalues in data.eigenvalues and the eigenvectors in data.vectors +template +void compute_laplacian_eigendecomp( + const uint32_t n, + const uint32_t* row_starts, + const uint32_t* neighbors, + Normalization normalization, + LaplacianData& data, + bool disconnected_comp = false, + const T* weights = nullptr); + +extern template void compute_laplacian_eigendecomp(const uint32_t n, const uint32_t* row_starts, const uint32_t* neighbors, Normalization normalization, LaplacianData& data, bool disconnected_comp, const float* weights); +extern template void compute_laplacian_eigendecomp(const uint32_t n, const uint32_t* row_starts, const uint32_t* neighbors, Normalization normalization, LaplacianData& data, bool disconnected_comp, const double* weights); diff --git a/tests/test_collate.py b/tests/test_collate.py index 3cb453b32..77dee64f9 100644 --- a/tests/test_collate.py +++ b/tests/test_collate.py @@ -28,12 +28,12 @@ class test_Collate(ut.TestCase): def test_collate_labels(self): # Create fake labels - labels_size_dict = { - "graph_label1": [1], - "graph_label2": [3], - "node_label2": [5], - "edge_label3": [5, 2], - "node_label4": [5, 1], + labels_num_cols_dict = { + "graph_label1": 1, + "graph_label2": 3, + "node_label2": 1, + "edge_label3": 2, + "node_label4": 1, } labels_dtype_dict = { "graph_label1": torch.float32, @@ -57,9 +57,12 @@ def test_collate_labels(self): pyg_labels[key] = val + 17 * 2 fake_labels.append(pyg_labels) + num_nodes = [g.num_nodes for g in fake_labels] + num_edges = [g.num_edges for g in fake_labels] + # Collate labels and check for the right shapes and dtypes collated_labels = collate_labels( - deepcopy(fake_labels), deepcopy(labels_size_dict), deepcopy(labels_dtype_dict) + deepcopy(fake_labels), deepcopy(labels_num_cols_dict), deepcopy(labels_dtype_dict), num_nodes, num_edges ) self.assertEqual(collated_labels["graph_label1"].shape, torch.Size([num_labels, 1])) # , 1 self.assertEqual(collated_labels["graph_label2"].shape, torch.Size([num_labels, 3])) # , 1 @@ -108,15 +111,15 @@ def test_collate_labels(self): label4_true[missing_labels["node_label4"]] = float("nan") # Collate labels and check for the right shapes - labels_size_dict = { - "graph_label1": [1], - "graph_label2": [3], - "node_label2": [5], - "edge_label3": [5, 2], - "node_label4": [5, 1], + labels_num_cols_dict = { + "graph_label1": 1, + "graph_label2": 3, + "node_label2": 1, + "edge_label3": 2, + "node_label4": 1, } collated_labels = collate_labels( - deepcopy(fake_labels), deepcopy(labels_size_dict), deepcopy(labels_dtype_dict) + deepcopy(fake_labels), deepcopy(labels_num_cols_dict), deepcopy(labels_dtype_dict), num_nodes, num_edges ) self.assertEqual(collated_labels["graph_label1"].shape, torch.Size([num_labels, 1])) # , 1 self.assertEqual(collated_labels["graph_label2"].shape, torch.Size([num_labels, 3])) # , 1 @@ -138,9 +141,9 @@ def test_collate_labels(self): collated_labels["node_label4"].numpy(), label4_true.flatten(0, 1).numpy() ) # Now test the `graphium_collate_fn` function when only labels are given - fake_labels2 = [{"labels": this_label} for this_label in fake_labels] + fake_labels2 = [{"labels": this_label, "num_nodes": this_label.num_nodes, "num_edges": this_label.num_edges} for this_label in fake_labels] collated_labels = graphium_collate_fn( - deepcopy(fake_labels2), labels_size_dict=labels_size_dict, labels_dtype_dict=labels_dtype_dict + deepcopy(fake_labels2), labels_num_cols_dict=labels_num_cols_dict, labels_dtype_dict=labels_dtype_dict )["labels"] self.assertEqual(collated_labels["graph_label1"].shape, torch.Size([num_labels, 1])) self.assertEqual(collated_labels["graph_label2"].shape, torch.Size([num_labels, 3])) diff --git a/tests/test_datamodule.py b/tests/test_datamodule.py index 824b80d50..2848b3031 100644 --- a/tests/test_datamodule.py +++ b/tests/test_datamodule.py @@ -22,6 +22,8 @@ from graphium.utils.fs import rm, exists, get_size from graphium.data import GraphOGBDataModule, MultitaskFromSmilesDataModule +import graphium_cpp + TEMP_CACHE_DATA_PATH = "tests/temp_cache_0000" @@ -45,23 +47,22 @@ def test_ogb_datamodule(self): task_specific_args = {} task_specific_args["task_1"] = {"task_level": "graph", "dataset_name": dataset_name} dm_args = {} - dm_args["processed_graph_data_path"] = None dm_args["featurization"] = featurization_args dm_args["batch_size_training"] = 16 dm_args["batch_size_inference"] = 16 dm_args["num_workers"] = 0 dm_args["pin_memory"] = True - dm_args["featurization_n_jobs"] = 0 - dm_args["featurization_progress"] = True - dm_args["featurization_backend"] = "loky" - dm_args["featurization_batch_size"] = 50 - ds = GraphOGBDataModule(task_specific_args, **dm_args) + # Delete the cache if already exist + if exists(TEMP_CACHE_DATA_PATH): + rm(TEMP_CACHE_DATA_PATH, recursive=True) + + ds = GraphOGBDataModule(task_specific_args, processed_graph_data_path=TEMP_CACHE_DATA_PATH, **dm_args) - ds.prepare_data(save_smiles_and_ids=False) + ds.prepare_data() # Check the keys in the dataset - ds.setup(save_smiles_and_ids=False) + ds.setup() assert set(ds.train_ds[0].keys()) == {"features", "labels"} # Delete the cache if already exist @@ -69,13 +70,13 @@ def test_ogb_datamodule(self): rm(TEMP_CACHE_DATA_PATH, recursive=True) # Reset the datamodule - ds = GraphOGBDataModule(task_specific_args, **dm_args) + ds = GraphOGBDataModule(task_specific_args, processed_graph_data_path=TEMP_CACHE_DATA_PATH, **dm_args) - ds.prepare_data(save_smiles_and_ids=True) + ds.prepare_data() # Check the keys in the dataset - ds.setup(save_smiles_and_ids=True) - assert set(ds.train_ds[0].keys()) == {"smiles", "mol_ids", "features", "labels"} + ds.setup() + assert set(ds.train_ds[0].keys()) == {"features", "labels"} # test module assert ds.num_edge_feats == 5 @@ -84,100 +85,7 @@ def test_ogb_datamodule(self): # test batch loader batch = next(iter(ds.train_dataloader())) - assert len(batch["smiles"]) == 16 assert len(batch["labels"]["graph_task_1"]) == 16 - assert len(batch["mol_ids"]) == 16 - - def test_none_filtering(self): - # Create the objects to filter - list_of_num = [ii for ii in range(100)] - list_of_str = [str(ii) for ii in list_of_num] - tuple_of_num = tuple(list_of_num) - array_of_num = np.asarray(list_of_num) - array_of_str = np.asarray(list_of_str) - tensor_of_num = torch.as_tensor(array_of_num) - arrays_of_num = np.stack([list_of_num, list_of_num, list_of_num], axis=1) - arrays_of_str = np.stack([list_of_str, list_of_str, list_of_str], axis=1) - tensors_of_num = torch.as_tensor(arrays_of_num) - dic = {"str": list_of_str, "num": list_of_num} - df = pd.DataFrame(dic) - df_shuffled = df.sample(frac=1) - series_num = df["num"] - series_num_shuffled = df_shuffled["num"] - - # Create different indexes to use for filtering - all_idx_none = [[3, 17, 88], [22, 33, 44, 55, 66, 77, 88], [], np.arange(len(list_of_num))] - - # Loop all the indexes and filter the objects. - for ii, idx_none in enumerate(all_idx_none): - msg = f"Failed for ii={ii}" - - # Create the true filtered sequences - filtered_num = [ii for ii in range(100) if ii not in idx_none] - filtered_str = [str(ii) for ii in filtered_num] - assert len(filtered_num) == len(list_of_num) - len(idx_none) - assert len(filtered_str) == len(list_of_str) - len(idx_none) - - # Filter the sequences from the Datamodule function - ( - list_of_num_2, - list_of_str_2, - tuple_of_num_2, - array_of_num_2, - array_of_str_2, - tensor_of_num_2, - df_2, - df_shuffled_2, - dic_2, - arrays_of_num_2, - arrays_of_str_2, - tensors_of_num_2, - series_num_2, - series_num_shuffled_2, - ) = graphium.data.MultitaskFromSmilesDataModule._filter_none_molecules( - idx_none, - list_of_num, - list_of_str, - tuple_of_num, - array_of_num, - array_of_str, - tensor_of_num, - df, - df_shuffled, - dic, - arrays_of_num, - arrays_of_str, - tensors_of_num, - series_num, - series_num_shuffled, - ) - - df_shuffled_2 = df_shuffled_2.sort_values(by="num", axis=0) - series_num_shuffled_2 = series_num_shuffled_2.sort_values(axis=0) - - # Assert the filtering is done correctly - self.assertListEqual(list_of_num_2, filtered_num, msg=msg) - self.assertListEqual(list_of_str_2, filtered_str, msg=msg) - self.assertListEqual(list(tuple_of_num_2), filtered_num, msg=msg) - self.assertListEqual(array_of_num_2.tolist(), filtered_num, msg=msg) - self.assertListEqual(array_of_str_2.tolist(), filtered_str, msg=msg) - self.assertListEqual(tensor_of_num_2.tolist(), filtered_num, msg=msg) - for jj in range(arrays_of_num.shape[1]): - self.assertListEqual(arrays_of_num_2[:, jj].tolist(), filtered_num, msg=msg) - self.assertListEqual(arrays_of_str_2[:, jj].tolist(), filtered_str, msg=msg) - self.assertListEqual(tensors_of_num_2[:, jj].tolist(), filtered_num, msg=msg) - self.assertListEqual(dic_2["num"], filtered_num, msg=msg) - self.assertListEqual(dic_2["str"], filtered_str, msg=msg) - self.assertListEqual(df_2["num"].tolist(), filtered_num, msg=msg) - self.assertListEqual(df_2["str"].tolist(), filtered_str, msg=msg) - self.assertListEqual(series_num_2.tolist(), filtered_num, msg=msg) - - # When the dataframe is shuffled, the lists are different because the filtering - # is done on the row indexes, not the dataframe indexes. - bool_to_check = (len(idx_none) == 0) or (len(idx_none) == len(df_shuffled)) - self.assertIs(df_shuffled_2["num"].tolist() == filtered_num, bool_to_check, msg=msg) - self.assertIs(df_shuffled_2["str"].tolist() == filtered_str, bool_to_check, msg=msg) - self.assertIs(series_num_shuffled_2.tolist() == filtered_num, bool_to_check, msg=msg) def test_caching(self): # other datasets are too large to be tested @@ -201,10 +109,6 @@ def test_caching(self): dm_args["batch_size_inference"] = 16 dm_args["num_workers"] = 0 dm_args["pin_memory"] = True - dm_args["featurization_n_jobs"] = 0 - dm_args["featurization_progress"] = True - dm_args["featurization_backend"] = "loky" - dm_args["featurization_batch_size"] = 50 # Delete the cache if already exist if exists(TEMP_CACHE_DATA_PATH): @@ -214,10 +118,10 @@ def test_caching(self): assert not exists(TEMP_CACHE_DATA_PATH) ds = GraphOGBDataModule(task_specific_args, processed_graph_data_path=TEMP_CACHE_DATA_PATH, **dm_args) # assert not ds.load_data_from_cache(verbose=False) - ds.prepare_data(save_smiles_and_ids=False) + ds.prepare_data() # Check the keys in the dataset - ds.setup(save_smiles_and_ids=False) + ds.setup() assert set(ds.train_ds[0].keys()) == {"features", "labels"} # ds_batch = next(iter(ds.train_dataloader())) @@ -227,23 +131,9 @@ def test_caching(self): # Test loading cached data assert exists(TEMP_CACHE_DATA_PATH) - cached_ds_from_ram = GraphOGBDataModule( - task_specific_args, - processed_graph_data_path=TEMP_CACHE_DATA_PATH, - dataloading_from="ram", - **dm_args, - ) - cached_ds_from_ram.prepare_data() - cached_ds_from_ram.setup() - cached_train_loader_from_ram = cached_ds_from_ram.get_dataloader( - cached_ds_from_ram.train_ds, shuffle=False, stage="train" - ) - batch_from_ram = next(iter(cached_train_loader_from_ram)) - cached_ds_from_disk = GraphOGBDataModule( task_specific_args, processed_graph_data_path=TEMP_CACHE_DATA_PATH, - dataloading_from="disk", **dm_args, ) cached_ds_from_disk.prepare_data() @@ -254,59 +144,31 @@ def test_caching(self): batch_from_disk = next(iter(cached_train_loader_from_disk)) # Features are the same - np.testing.assert_array_almost_equal( - batch["features"].edge_index, batch_from_ram["features"].edge_index - ) np.testing.assert_array_almost_equal( batch["features"].edge_index, batch_from_disk["features"].edge_index ) - assert batch["features"].num_nodes == batch_from_ram["features"].num_nodes assert batch["features"].num_nodes == batch_from_disk["features"].num_nodes - np.testing.assert_array_almost_equal( - batch["features"].edge_weight, batch_from_ram["features"].edge_weight - ) np.testing.assert_array_almost_equal( batch["features"].edge_weight, batch_from_disk["features"].edge_weight ) - np.testing.assert_array_almost_equal(batch["features"].feat, batch_from_ram["features"].feat) np.testing.assert_array_almost_equal(batch["features"].feat, batch_from_disk["features"].feat) - np.testing.assert_array_almost_equal( - batch["features"].edge_feat, batch_from_ram["features"].edge_feat - ) np.testing.assert_array_almost_equal( batch["features"].edge_feat, batch_from_disk["features"].edge_feat ) - np.testing.assert_array_almost_equal(batch["features"].batch, batch_from_ram["features"].batch) np.testing.assert_array_almost_equal(batch["features"].batch, batch_from_disk["features"].batch) - np.testing.assert_array_almost_equal(batch["features"].ptr, batch_from_ram["features"].ptr) np.testing.assert_array_almost_equal(batch["features"].ptr, batch_from_disk["features"].ptr) # Labels are the same - np.testing.assert_array_almost_equal( - batch["labels"].graph_task_1, batch_from_ram["labels"].graph_task_1 - ) np.testing.assert_array_almost_equal( batch["labels"].graph_task_1, batch_from_disk["labels"].graph_task_1 ) - np.testing.assert_array_almost_equal(batch["labels"].x, batch_from_ram["labels"].x) - np.testing.assert_array_almost_equal(batch["labels"].x, batch_from_disk["labels"].x) - - np.testing.assert_array_almost_equal(batch["labels"].edge_index, batch_from_ram["labels"].edge_index) - np.testing.assert_array_almost_equal(batch["labels"].edge_index, batch_from_disk["labels"].edge_index) - - np.testing.assert_array_almost_equal(batch["labels"].batch, batch_from_ram["labels"].batch) - np.testing.assert_array_almost_equal(batch["labels"].batch, batch_from_disk["labels"].batch) - - np.testing.assert_array_almost_equal(batch["labels"].ptr, batch_from_ram["labels"].ptr) - np.testing.assert_array_almost_equal(batch["labels"].ptr, batch_from_disk["labels"].ptr) - # Delete the cache if already exist if exists(TEMP_CACHE_DATA_PATH): rm(TEMP_CACHE_DATA_PATH, recursive=True) @@ -314,10 +176,10 @@ def test_caching(self): # Reset the datamodule ds = GraphOGBDataModule(task_specific_args, processed_graph_data_path=TEMP_CACHE_DATA_PATH, **dm_args) - ds.prepare_data(save_smiles_and_ids=True) + ds.prepare_data() - ds.setup(save_smiles_and_ids=True) - assert set(ds.train_ds[0].keys()) == {"smiles", "mol_ids", "features", "labels"} + ds.setup() + assert set(ds.train_ds[0].keys()) == {"features", "labels"} # test module assert ds.num_edge_feats == 5 @@ -326,9 +188,7 @@ def test_caching(self): # test batch loader batch = next(iter(ds.train_dataloader())) - assert len(batch["smiles"]) == 16 assert len(batch["labels"]["graph_task_1"]) == 16 - assert len(batch["mol_ids"]) == 16 # Delete the cache if already exist if exists(TEMP_CACHE_DATA_PATH): @@ -369,15 +229,18 @@ def test_datamodule_with_none_molecules(self): bad_smiles = (df["SMILES1"] == "XXX") & (df["SMILES2"] == "XXX") & (df["SMILES3"] == "XXX") num_bad_smiles = sum(bad_smiles) + # Delete the cache if already exist + if exists(TEMP_CACHE_DATA_PATH): + rm(TEMP_CACHE_DATA_PATH, recursive=True) + # Test the datamodule datamodule = MultitaskFromSmilesDataModule( task_specific_args=task_specific_args, + processed_graph_data_path=TEMP_CACHE_DATA_PATH, featurization_args=featurization_args, - featurization_n_jobs=0, - featurization_batch_size=1, ) datamodule.prepare_data() - datamodule.setup(save_smiles_and_ids=True) + datamodule.setup() # Check that the number of molecules is correct smiles = df["SMILES1"].tolist() + df["SMILES2"].tolist() + df["SMILES3"].tolist() @@ -400,33 +263,35 @@ def test_datamodule_with_none_molecules(self): df = df.set_index("idx_smiles") # Convert the smilies from the train_ds to a list, and check the content - train_smiles = [d["smiles"] for d in datamodule.train_ds] + train_smiles = [ + graphium_cpp.extract_string( + datamodule.train_ds.smiles_tensor, + datamodule.train_ds.smiles_offsets_tensor, + idx) for idx in range(len(datamodule.train_ds))] # Check that the set of smiles are the same - train_smiles_flat = list(set([item for sublist in train_smiles for item in sublist])) + train_smiles_flat = list(set(train_smiles)) train_smiles_flat.sort() index_smiles_filt = list(set([smiles for smiles in index_smiles if smiles != "XXX"])) index_smiles_filt.sort() self.assertListEqual(train_smiles_flat, index_smiles_filt) - # Check that the smiles are correct for each datapoint in the dataset + # Check that the smiles is correct for each datapoint in the dataset for smiles in train_smiles: - self.assertEqual(len(set(smiles)), 1) # Check that all smiles are the same - this_smiles = smiles[0] - true_smiles = df.loc[this_smiles][["SMILES1", "SMILES2", "SMILES3"]] - num_true_smiles = sum(true_smiles != "XXX") - self.assertEqual(len(smiles), num_true_smiles) # Check that the number of smiles is correct + assert isinstance(smiles, str) + true_smiles = df.loc[smiles][["SMILES1", "SMILES2", "SMILES3"]] self.assertEqual( - this_smiles, true_smiles[true_smiles != "XXX"].values[0] - ) # Check that the smiles are correct + smiles, true_smiles[true_smiles != "XXX"].values[0] + ) # Check that the smiles is correct # Convert the labels from the train_ds to a dataframe - train_labels = [{task: val[0] for task, val in d["labels"].items()} for d in datamodule.train_ds] + train_labels = [datamodule.train_ds[idx]["labels"] for idx in range(len(datamodule.train_ds))] + train_labels = [{k: v[0].item() for k, v in label} for label in train_labels] train_labels_df = pd.DataFrame(train_labels) train_labels_df = train_labels_df.rename( columns={"graph_task_1": "graph_SA", "graph_task_2": "graph_logp", "graph_task_3": "graph_score"} ) - train_labels_df["smiles"] = [s[0] for s in datamodule.train_ds.smiles] + train_labels_df["smiles"] = train_smiles train_labels_df = train_labels_df.set_index("smiles") train_labels_df = train_labels_df.sort_index() @@ -450,7 +315,11 @@ def test_datamodule_multiple_data_files(self): "task": {"task_level": "graph", "label_cols": ["score"], "smiles_col": "SMILES", **task_kwargs} } - ds = MultitaskFromSmilesDataModule(task_specific_args, featurization_n_jobs=0) + # Delete the cache if already exist + if exists(TEMP_CACHE_DATA_PATH): + rm(TEMP_CACHE_DATA_PATH, recursive=True) + + ds = MultitaskFromSmilesDataModule(task_specific_args, processed_graph_data_path=TEMP_CACHE_DATA_PATH) ds.prepare_data() ds.setup() @@ -463,7 +332,11 @@ def test_datamodule_multiple_data_files(self): "task": {"task_level": "graph", "label_cols": ["score"], "smiles_col": "SMILES", **task_kwargs} } - ds = MultitaskFromSmilesDataModule(task_specific_args, featurization_n_jobs=0) + # Delete the cache if already exist + if exists(TEMP_CACHE_DATA_PATH): + rm(TEMP_CACHE_DATA_PATH, recursive=True) + + ds = MultitaskFromSmilesDataModule(task_specific_args, processed_graph_data_path=TEMP_CACHE_DATA_PATH) ds.prepare_data() ds.setup() @@ -476,7 +349,11 @@ def test_datamodule_multiple_data_files(self): "task": {"task_level": "graph", "label_cols": ["score"], "smiles_col": "SMILES", **task_kwargs} } - ds = MultitaskFromSmilesDataModule(task_specific_args, featurization_n_jobs=0) + # Delete the cache if already exist + if exists(TEMP_CACHE_DATA_PATH): + rm(TEMP_CACHE_DATA_PATH, recursive=True) + + ds = MultitaskFromSmilesDataModule(task_specific_args, processed_graph_data_path=TEMP_CACHE_DATA_PATH) ds.prepare_data() ds.setup() @@ -489,7 +366,11 @@ def test_datamodule_multiple_data_files(self): "task": {"task_level": "graph", "label_cols": ["score"], "smiles_col": "SMILES", **task_kwargs} } - ds = MultitaskFromSmilesDataModule(task_specific_args, featurization_n_jobs=0) + # Delete the cache if already exist + if exists(TEMP_CACHE_DATA_PATH): + rm(TEMP_CACHE_DATA_PATH, recursive=True) + + ds = MultitaskFromSmilesDataModule(task_specific_args, processed_graph_data_path=TEMP_CACHE_DATA_PATH) ds.prepare_data() ds.setup() @@ -526,9 +407,13 @@ def test_splits_file(self): } } - ds = MultitaskFromSmilesDataModule(task_specific_args, featurization_n_jobs=0) - ds.prepare_data(save_smiles_and_ids=True) - ds.setup(save_smiles_and_ids=True) + # Delete the cache if already exist + if exists(TEMP_CACHE_DATA_PATH): + rm(TEMP_CACHE_DATA_PATH, recursive=True) + + ds = MultitaskFromSmilesDataModule(task_specific_args, processed_graph_data_path=TEMP_CACHE_DATA_PATH) + ds.prepare_data() + ds.setup() self.assertEqual(len(ds.train_ds), len(split_train)) self.assertEqual(len(ds.val_ds), len(split_val)) @@ -555,19 +440,26 @@ def test_splits_file(self): } } - ds2 = MultitaskFromSmilesDataModule(task_specific_args, featurization_n_jobs=0) - ds2.prepare_data(save_smiles_and_ids=True) - ds2.setup(save_smiles_and_ids=True) + # Delete the cache if already exist + if exists(TEMP_CACHE_DATA_PATH): + rm(TEMP_CACHE_DATA_PATH, recursive=True) + + ds2 = MultitaskFromSmilesDataModule(task_specific_args, processed_graph_data_path=TEMP_CACHE_DATA_PATH) + ds2.prepare_data() + ds2.setup() self.assertEqual(len(ds2.train_ds), len(split_train)) self.assertEqual(len(ds2.val_ds), len(split_val)) self.assertEqual(len(ds2.test_ds), len(split_test)) # Check that the splits are the same - self.assertEqual(len(ds.train_ds.smiles), len(split_train)) - np.testing.assert_array_equal(ds.train_ds.smiles, ds2.train_ds.smiles) - np.testing.assert_array_equal(ds.val_ds.smiles, ds2.val_ds.smiles) - np.testing.assert_array_equal(ds.test_ds.smiles, ds2.test_ds.smiles) + self.assertEqual(len(ds.train_ds.smiles_offsets_tensor), len(split_train)+1) + np.testing.assert_array_equal(ds.train_ds.smiles_tensor, ds2.train_ds.smiles_tensor) + np.testing.assert_array_equal(ds.val_ds.smiles_tensor, ds2.val_ds.smiles_tensor) + np.testing.assert_array_equal(ds.test_ds.smiles_tensor, ds2.test_ds.smiles_tensor) + np.testing.assert_array_equal(ds.train_ds.smiles_offsets_tensor, ds2.train_ds.smiles_offsets_tensor) + np.testing.assert_array_equal(ds.val_ds.smiles_offsets_tensor, ds2.val_ds.smiles_offsets_tensor) + np.testing.assert_array_equal(ds.test_ds.smiles_offsets_tensor, ds2.test_ds.smiles_offsets_tensor) if __name__ == "__main__": diff --git a/tests/test_dataset.py b/tests/test_dataset.py index 4a7173244..29cf8f2a5 100644 --- a/tests/test_dataset.py +++ b/tests/test_dataset.py @@ -60,7 +60,7 @@ def test_multitask_dataset_case_1(self): # Create the multitask dataset datasets_dict = {"SA": ds_micro_zinc_SA, "logp": ds_micro_zinc_logp, "score": ds_micro_zinc_score} multitask_microzinc = MultitaskDataset( - datasets_dict, save_smiles_and_ids=True + datasets_dict ) # Can optionally have features # Check: The number of unique molecules equals the number of datapoints in the multitask dataset. @@ -122,7 +122,7 @@ def test_multitask_dataset_case_2(self): # Create the multitask dataset datasets_dict = {"SA": ds_micro_zinc_SA, "logp": ds_micro_zinc_logp, "score": ds_micro_zinc_score} multitask_microzinc = MultitaskDataset( - datasets_dict, save_smiles_and_ids=True + datasets_dict ) # Can optionally have features # The total dataset has as many molecules as there are smiles in all tasks put together @@ -202,7 +202,7 @@ def test_multitask_dataset_case_3(self): # Create the multitask dataset datasets_dict = {"SA": ds_micro_zinc_SA, "logp": ds_micro_zinc_logp, "score": ds_micro_zinc_score} multitask_microzinc = MultitaskDataset( - datasets_dict, save_smiles_and_ids=True + datasets_dict ) # Can optionally have features # The multitask dataset has as many molecules as there are unique smiles across the single task datasets. diff --git a/tests/test_multitask_datamodule.py b/tests/test_multitask_datamodule.py index b8d2119e1..5623eb6df 100644 --- a/tests/test_multitask_datamodule.py +++ b/tests/test_multitask_datamodule.py @@ -109,9 +109,6 @@ def test_multitask_fromsmiles_dm( # Task-independent arguments dm_args["featurization"] = featurization_args - dm_args["featurization_n_jobs"] = 16 - dm_args["featurization_progress"] = True - dm_args["featurization_backend"] = "loky" dm_args["num_workers"] = 0 dm_args["pin_memory"] = True dm_args["processed_graph_data_path"] = None diff --git a/tests/test_training.py b/tests/test_training.py index 3ac31fc35..8ba0715e8 100644 --- a/tests/test_training.py +++ b/tests/test_training.py @@ -76,7 +76,6 @@ def call_cli_with_overrides(self, acc_type: str, acc_prec: str, load_type: str) "+datamodule.args.task_specific_args.zinc.sample_size=1000", "trainer.trainer.check_val_every_n_epoch=1", f"trainer.trainer.precision={acc_prec}", - f"datamodule.args.dataloading_from={load_type}", ] if acc_type == "ipu": overrides.append("accelerator.ipu_config=['useIpuModel(True)']") From 828638336662b2c2d3a6ec8548de52f6d5e3cf84 Mon Sep 17 00:00:00 2001 From: ndickson Date: Wed, 17 Apr 2024 11:17:00 -0400 Subject: [PATCH 002/175] Small changes to support not needing label data during data loading --- graphium/data/dataset.py | 19 +++++++++++-------- 1 file changed, 11 insertions(+), 8 deletions(-) diff --git a/graphium/data/dataset.py b/graphium/data/dataset.py index 064f3b5ad..8fa29e7d5 100644 --- a/graphium/data/dataset.py +++ b/graphium/data/dataset.py @@ -155,10 +155,10 @@ class MultitaskDataset(Dataset): def __init__( self, featurize_smiles: Callable[[str],dict], - task_names: List[str], - label_num_cols: List[int], - label_dtypes: List[int], - mol_file_data_offsets, + task_names: List[str] = None, + label_num_cols: List[int] = None, + label_dtypes: List[int] = None, + mol_file_data_offsets = None, concat_smiles_tensor, smiles_offsets_tensor, num_nodes_tensor, @@ -311,10 +311,13 @@ def __getitem__(self, idx): smiles_str = graphium_cpp.extract_string(self.smiles_tensor, self.smiles_offsets_tensor, idx) - datum = { - "labels": self.load_graph_from_index(idx), - "features": self.featurize_smiles(smiles_str), - } + if self.mol_file_data_offsets is None: + datum = { "features": self.featurize_smiles(smiles_str) } + else: + datum = { + "labels": self.load_graph_from_index(idx), + "features": self.featurize_smiles(smiles_str), + } # One of the featurization error handling options returns a string on error, # instead of throwing an exception, so assume that the intention is to just skip, From dca9b2b46014de64c35b48205cf3c5fda5eda7e1 Mon Sep 17 00:00:00 2001 From: ndickson Date: Wed, 17 Apr 2024 12:11:31 -0400 Subject: [PATCH 003/175] Removed FakeDataset, FakeDataModule, and SingleTaskDataset. SingleTaskDataset is still used in test_dataset.py, but won't after it's changed in a later commit. --- graphium/data/__init__.py | 3 - graphium/data/datamodule.py | 237 +----------------------------------- graphium/data/dataset.py | 218 --------------------------------- graphium/utils/spaces.py | 1 - 4 files changed, 1 insertion(+), 458 deletions(-) diff --git a/graphium/data/__init__.py b/graphium/data/__init__.py index 0a8fcd24d..b18cda421 100644 --- a/graphium/data/__init__.py +++ b/graphium/data/__init__.py @@ -6,8 +6,5 @@ from .datamodule import GraphOGBDataModule from .datamodule import MultitaskFromSmilesDataModule from .datamodule import ADMETBenchmarkDataModule -from .datamodule import FakeDataModule -from .dataset import SingleTaskDataset from .dataset import MultitaskDataset -from .dataset import FakeDataset diff --git a/graphium/data/datamodule.py b/graphium/data/datamodule.py index f27218176..a79fceff1 100644 --- a/graphium/data/datamodule.py +++ b/graphium/data/datamodule.py @@ -1490,7 +1490,7 @@ def _extract_smiles_labels( weights_col: Name of the column containing the weights weights_type: Type of weights to use. Returns: - smiles, labels, sample_idx, extras + smiles, labels, label_offsets, sample_idx, extras """ if smiles_col is None: # Should we specify which dataset has caused the potential issue? @@ -2132,238 +2132,3 @@ def _get_task_specific_arguments(self, name: str, seed: int, cache_dir: str) -> split_names=["train", "val", "test"], task_level="graph", ) - - -class FakeDataModule(MultitaskFromSmilesDataModule): - """ - A fake datamodule that generates artificial data by mimicking the true data coming - from the provided dataset. - It is useful to test the speed and performance of the model on a dataset without - having to featurize it and wait for the workers to load it. - """ - - def __init__( - self, - task_specific_args: Dict[str, Dict[str, Any]], # TODO: Replace this with DatasetParams - featurization: Optional[Union[Dict[str, Any], omegaconf.DictConfig]] = None, - batch_size_training: int = 16, - batch_size_inference: int = 16, - num_workers: int = 0, - pin_memory: bool = True, - persistent_workers: bool = False, - multiprocessing_context: Optional[str] = None, - collate_fn: Optional[Callable] = None, - prepare_dict_or_graph: str = "pyg:graph", - num_mols_to_generate: int = 1000000, - indexing_single_elem: bool = True, - **kwargs, - ): - super().__init__( - task_specific_args=task_specific_args, - featurization=featurization, - batch_size_training=batch_size_training, - batch_size_inference=batch_size_inference, - num_workers=num_workers, - pin_memory=pin_memory, - persistent_workers=persistent_workers, - multiprocessing_context=multiprocessing_context, - collate_fn=collate_fn, - prepare_dict_or_graph=prepare_dict_or_graph, - **kwargs, - ) - self.num_mols_to_generate = num_mols_to_generate - self.indexing_single_elem = indexing_single_elem - - def generate_data(self, label_cols: List[str], smiles_col: str): - """ - Parameters: - labels_cols - smiles_col - Returns: - pd.DataFrame - """ - num_generated_mols = int(1) - # Create a dummy generated dataset - singel smiles string, duplicated N times - example_molecules = dict( - smiles="C1N2C3C4C5OC13C2C45", - cxsmiles="[H]C1C2=C(NC(=O)[C@@]1([H])C1=C([H])C([H])=C(C([H])([H])[H])C([H])=C1[H])C([H])=C([H])N=C2[H] |(6.4528,-1.5789,-1.2859;5.789,-0.835,-0.8455;4.8499,-0.2104,-1.5946;3.9134,0.7241,-0.934;3.9796,1.1019,0.3172;5.0405,0.6404,1.1008;5.2985,1.1457,2.1772;5.9121,-0.5519,0.613;6.9467,-0.2303,0.8014;5.677,-1.7955,1.4745;4.7751,-2.7953,1.0929;4.2336,-2.7113,0.154;4.5521,-3.9001,1.914;3.8445,-4.6636,1.5979;5.215,-4.0391,3.1392;4.9919,-5.2514,4.0126;5.1819,-5.0262,5.0671;5.6619,-6.0746,3.7296;3.966,-5.6247,3.925;6.1051,-3.0257,3.52;6.6247,-3.101,4.4725;6.3372,-1.9217,2.7029;7.0168,-1.1395,3.0281;2.8586,1.2252,-1.7853;2.1303,1.9004,-1.3493;2.8118,0.8707,-3.0956;2.0282,1.2549,-3.7434;3.716,0.0207,-3.7371;4.6658,-0.476,-3.0127;5.3755,-1.1468,-3.5021)|", - ) - example_df_entry = {smiles_col: example_molecules[smiles_col]} - for label in label_cols: - example_df_entry[label] = np.random.random() - df = pd.DataFrame([example_df_entry]) - logger.info(f"Generating fake dataset on host... \n Generating {num_generated_mols} rows in the df.") - df = pd.concat([df] * num_generated_mols, ignore_index=True) - return df - - def prepare_data(self): - """Called only from a single process in distributed settings. Steps: - - - If each cache is set and exists, reload from cache and return. Otherwise, - - For each single-task dataset: - - Load its dataframe from a path (if provided) - - Subsample the dataframe - - Extract the smiles, labels from the dataframe - - In the previous step, we were also able to get the unique smiles, which we use to compute the features - - For each single-task dataframe and associated data (smiles, labels, etc.): - - Filter out the data corresponding to molecules which failed featurization. - - Create a corresponding SingletaskDataset - - Split the SingletaskDataset according to the task-specific splits for train, val and test - """ - - """Load all single-task dataframes.""" - if self.num_mols_to_generate is None: - num_mols = 0 - - task_df = {} - for task, args in self.task_dataset_processing_params.items(): - logger.info(f"Reading data for task '{task}'") - if args.df is None: - # Only load the useful columns, as some datasets can be very large when loading all columns. - label_cols = self._parse_label_cols( - df=None, df_path=args.df_path, label_cols=args.label_cols, smiles_col=args.smiles_col - ) - task_df[task] = self.generate_data(label_cols=args.label_cols, smiles_col=args.smiles_col) - if self.num_mols_to_generate is None: - num_mols = max(num_mols, len(task_df[task])) - task_df[task] = task_df[task].iloc[0:1] - - args.label_cols = label_cols - if self.num_mols_to_generate is None: - self.num_mols_to_generate = num_mols - logger.info("Done reading datasets") - - """Subsample the data frames and extract the necessary data to create SingleTaskDatasets for each task (smiles, labels, extras).""" - task_dataset_args = {} - for task in task_df.keys(): - task_dataset_args[task] = {} - - for task, df in task_df.items(): - logger.info(f"Prepare single-task dataset for task '{task}' with {len(df)} data points.") - # Extract smiles, labels, extras - args = self.task_dataset_processing_params[task] - smiles, labels, sample_idx, extras = self._extract_smiles_labels( - df, - task_level=args.task_level, - smiles_col=args.smiles_col, - label_cols=args.label_cols, - idx_col=args.idx_col, - weights_col=args.weights_col, - weights_type=args.weights_type, - ) - - # Store the relevant information for each task's dataset - task_dataset_args[task]["smiles"] = smiles - task_dataset_args[task]["labels"] = labels - task_dataset_args[task]["sample_idx"] = sample_idx - task_dataset_args[task]["extras"] = extras - - """Convert SMILES to features (graphs, fingerprints, etc.) for the unique molecules found.""" - all_smiles = [] - idx_per_task = {} - total_len = 0 - for task, dataset_args in task_dataset_args.items(): - all_smiles.extend(dataset_args["smiles"]) - num_smiles = len(dataset_args["smiles"]) - idx_per_task[task] = (total_len, total_len + num_smiles) - total_len += num_smiles - # Get all unique mol ids - all_unique_mol_ids = smiles_to_unique_mol_ids( - all_smiles, - n_jobs=self.featurization_n_jobs, - featurization_batch_size=self.featurization_batch_size, - backend=self.featurization_backend, - ) - # Convert SMILES to features - features, _ = self._featurize_molecules(all_smiles) - # FIXME: What is task supposed to be here? - task_dataset_args[task]["features"] = features - """Filter data based on molecules which failed featurization. Create single task datasets as well.""" - self.single_task_datasets = {} - for task, args in task_dataset_args.items(): - self.single_task_datasets[task] = Datasets.SingleTaskDataset( - features=task_dataset_args[task]["features"], - labels=task_dataset_args[task]["labels"], - smiles=task_dataset_args[task]["smiles"], - indices=task_dataset_args[task]["sample_idx"], - unique_ids=all_unique_mol_ids[idx_per_task[task][0] : idx_per_task[task][1]], - **task_dataset_args[task]["extras"], - ) - - """We split the data up to create train, val and test datasets""" - self.train_singletask_datasets = {} - self.val_singletask_datasets = {} - self.test_singletask_datasets = {} - for task, df in task_df.items(): - self.train_singletask_datasets[task] = Subset(self.single_task_datasets[task], [0]) - self.val_singletask_datasets[task] = Subset(self.single_task_datasets[task], [0]) - self.test_singletask_datasets[task] = Subset(self.single_task_datasets[task], [0]) - - def setup(self, stage=None): - # TODO - """ - Prepare the torch dataset. Called on every GPUs. Setting state here is ok. - Parameters: - stage (str): Either 'fit', 'test', or None. - """ - labels_size = {} - - if stage == "fit" or stage is None: - self.train_ds = Datasets.FakeDataset(self.train_singletask_datasets, num_mols=self.num_mols_to_generate, indexing_same_elem=self.indexing_single_elem) # type: ignore - self.val_ds = Datasets.FakeDataset(self.val_singletask_datasets, num_mols=self.num_mols_to_generate, indexing_same_elem=self.indexing_single_elem) # type: ignore - print(self.train_ds) - print(self.val_ds) - - labels_size.update( - self.train_ds.labels_size - ) # Make sure that all task label sizes are contained in here. Maybe do the update outside these if statements. - labels_size.update(self.val_ds.labels_size) - - if stage == "test" or stage is None: - self.test_ds = Datasets.FakeDataset(self.test_singletask_datasets, num_mols=self.num_mols_to_generate, indexing_same_elem=self.indexing_single_elem) # type: ignore - print(self.test_ds) - labels_size.update(self.test_ds.labels_size) - - default_labels_size_dict = self.collate_fn.keywords.get("labels_size_dict", None) - - if default_labels_size_dict is None: - self.collate_fn.keywords["labels_size_dict"] = labels_size - - def get_fake_graph(self): - """ - Low memory footprint method to get the first datapoint DGL graph. - The first 10 rows of the data are read in case the first one has a featurization - error. If all 20 first element, then `None` is returned, otherwise the first - graph to not fail is returned. - """ - keys = list(self.task_dataset_processing_params.keys()) - task = keys[0] - args = self.task_dataset_processing_params[task] - if args.df is None: - df = self._read_csv(args.df_path, nrows=20) - else: - df = args.df.iloc[0:20, :] - - df = df.iloc[0:20, :] - label_cols = self._parse_label_cols( - df, df_path=None, label_cols=args.label_cols, smiles_col=args.smiles_col - ) - - smiles, labels, sample_idx, extras = self._extract_smiles_labels( - df, - task_level=args.task_level, - smiles_col=args.smiles_col, - label_cols=label_cols, - idx_col=args.idx_col, - weights_col=args.weights_col, - weights_type=args.weights_type, - ) - - graph = None - for s in smiles: - graph = self.smiles_transformer(s, mask_nan=0.0) - num_nodes = graph.num_nodes - num_edges = graph.num_edges - if (graph is not None) and (num_edges > 0) and (num_nodes > 0): - break - return graph diff --git a/graphium/data/dataset.py b/graphium/data/dataset.py index 8fa29e7d5..54275c030 100644 --- a/graphium/data/dataset.py +++ b/graphium/data/dataset.py @@ -32,123 +32,6 @@ import graphium_cpp -class SingleTaskDataset(Dataset): - def __init__( - self, - labels: List[Union[torch.Tensor, np.ndarray]], - features: Optional[List[Union[Data, "GraphDict"]]] = None, - smiles: Optional[List[str]] = None, - indices: Optional[List[int]] = None, - weights: Optional[Union[torch.Tensor, np.ndarray]] = None, - unique_ids: Optional[List[str]] = None, - mol_ids: Optional[List[str]] = None, - ): - r""" - dataset for a single task - Parameters: - labels: A list of labels for the given task (one per graph) - features: A list of graphs - smiles: A list of smiles - indices: A list of indices - weights: A list of weights - unique_ids: A list of unique ids for each molecule generated from `datamol.unique_id` - mol_ids: A list of ids coming from the original dataset. Useful to identify the molecule in the original dataset. - """ - - # Verify that all lists are the same length - numel = len(labels) - - def _check_if_same_length(to_check, label): - """Simple utility method to throw an error if the length is not as expected.""" - if to_check is not None and len(to_check) != numel: - raise ValueError( - f"{label} must be the same length as `labels`, got {len(to_check)} and {numel}" - ) - - _check_if_same_length(features, "features") - _check_if_same_length(indices, "indices") - _check_if_same_length(weights, "weights") - _check_if_same_length(unique_ids, "unique_ids") - _check_if_same_length(mol_ids, "mol_ids") - - self.labels = labels - if smiles is not None: - manager = Manager() # Avoid memory leaks with `num_workers > 0` by using the Manager - self.smiles = manager.list(smiles) - else: - self.smiles = None - self.features = features - self.indices = indices - if self.indices is not None: - self.indices = np.array( - self.indices - ) # Avoid memory leaks with `num_workers > 0` by using numpy array - self.weights = weights - self.unique_ids = unique_ids - self.mol_ids = mol_ids - - def __len__(self): - r""" - return the size of the dataset - Returns: - size: the size of the dataset - """ - return len(self.labels) - - def __getitem__(self, idx): - """ - get the data at the given index - Parameters: - idx: the index to get the data at - Returns: - datum: a dictionary containing the data at the given index, with keys "features", "labels", "smiles", "indices", "weights", "unique_ids" - """ - datum = {} - - if self.features is not None: - datum["features"] = self.features[idx] - - if self.labels is not None: - datum["labels"] = self.labels[idx] - - if self.smiles is not None: - datum["smiles"] = self.smiles[idx] - - if self.indices is not None: - datum["indices"] = self.indices[idx] - - if self.weights is not None: - datum["weights"] = self.weights[idx] - - if self.unique_ids is not None: - datum["unique_ids"] = self.unique_ids[idx] - - if self.mol_ids is not None: - datum["mol_ids"] = self.mol_ids[idx] - - return datum - - def __getstate__(self): - """Serialize the class for pickling.""" - state = {} - state["labels"] = self.labels - state["smiles"] = list(self.smiles) if self.smiles is not None else None - state["features"] = self.features - state["indices"] = self.indices - state["weights"] = self.weights - state["unique_ids"] = self.unique_ids - state["mol_ids"] = self.mol_ids - return state - - def __setstate__(self, state: dict): - """Reload the class from pickling.""" - if state["smiles"] is not None: - manager = Manager() - state["smiles"] = manager.list(state["smiles"]) - - self.__dict__.update(state) - - class MultitaskDataset(Dataset): pass @@ -378,107 +261,6 @@ def __repr__(self) -> str: return out_str -class FakeDataset(MultitaskDataset): - """ - A dataset to hold the fake data. - """ - - def __init__( - self, datasets: Dict[str, SingleTaskDataset], num_mols: int = 1234, indexing_same_elem: bool = False - ): - """ - Parameters: - datasets: - A dictionary of datasets. The keys are the task names and the values are the datasets. - num_mols: - The number of molecules to generate. In reality, it is the same molecule, - but `num_mols` will change the length of the dataset. - indexing_same_elem: - If True, the same molecule is used for all samples. - Otherwise, a deepcopied molecule is used for each sample. - """ - self.indexing_same_elem = indexing_same_elem - self.num_mols = num_mols - self.num_datasets = len(datasets) - - self.about = "FakeDatasets" - task = next(iter(datasets)) - self.features = None - self.labels = None - if "features" in datasets[task][0]: - self.mol_ids, self.smiles, self.labels, self.features = self.merge(datasets) - if self.indexing_same_elem is False: - self.mol_ids, self.smiles, self.labels, self.features = self.deepcopy_mol( - self.mol_ids, self.smiles, self.labels, self.features - ) - else: - self.mol_ids, self.smiles, self.labels = self.merge(datasets) - if self.indexing_same_elem is False: - self.mol_ids, self.smiles, self.labels, _ = self.deepcopy_mol( - self.mol_ids, self.smiles, self.labels - ) - - self.label_num_cols = self.set_label_num_cols(datasets) - self.label_dtypes = self.set_label_dtype_dict(datasets) - - def _get_inv_of_mol_ids(self, all_mol_ids): - # The generated data is a single molecule duplicated - mol_ids = np.array(all_mol_ids) - inv = [_ for _ in range(len(mol_ids) // self.num_datasets)] * self.num_datasets - mol_ids = np.unique(inv) - return mol_ids, inv - - def deepcopy_mol(self, mol_ids, labels, smiles, features=None): - """ - Create a deepcopy of the single molecule num_mols times - - Args: - mol_ids (array): The single value for the mol ID - labels (List[Dict]): List containing one dict with the label name-value pairs - smiles (List[List[str]]): List of list containing SMILE sting - features (List[Data], optional): list containing Data object. Defaults to None. - - Returns: - The deep copy of the inputs - """ - logger.info("Duplicating the single dataset element...") - mol_ids = [deepcopy(mol_ids[0]) for _ in range(self.num_mols)] - logger.info("Finished `mol_ids`") - labels = [deepcopy(labels[0]) for _ in range(self.num_mols)] - logger.info("Finished `labels`") - smiles = [deepcopy(smiles[0]) for _ in range(self.num_mols)] - logger.info("Finished `smiles`") - if features is not None: - features = [deepcopy(features[0]) for _ in range(self.num_mols)] - logger.info("Finished `features`") - return mol_ids, labels, smiles, features - - def __len__(self): - r""" - Returns the number of molecules - """ - return self.num_mols - - def __getitem__(self, idx): - r""" - get the data for at the specified index - Parameters: - idx: The index of the data to retrieve - Returns: - A dictionary containing the data for the specified index with keys "mol_ids", "smiles", "labels", and "features" - """ - datum = {} - if self.indexing_same_elem is True: - # If using a single memory location override the idx value passed - idx = 0 - if self.labels is not None: - datum["labels"] = self.labels[idx] - - if self.features is not None: - datum["features"] = self.features[idx] - - return datum - def torch_enum_to_dtype(v: Union[int, torch.dtype]): if isinstance(v, torch.dtype): return v diff --git a/graphium/utils/spaces.py b/graphium/utils/spaces.py index 88812c0be..12f07a1ed 100644 --- a/graphium/utils/spaces.py +++ b/graphium/utils/spaces.py @@ -145,7 +145,6 @@ "GraphOGBDataModule": Datamodules.GraphOGBDataModule, "MultitaskFromSmilesDataModule": Datamodules.MultitaskFromSmilesDataModule, "ADMETBenchmarkDataModule": Datamodules.ADMETBenchmarkDataModule, - "FakeDataModule": Datamodules.FakeDataModule, } GRAPHIUM_PRETRAINED_MODELS_DICT = { From 830421045422fa830e4be2cf41e9e71c2218f40d Mon Sep 17 00:00:00 2001 From: ndickson Date: Wed, 17 Apr 2024 17:10:34 -0400 Subject: [PATCH 004/175] Removed option to featurize using Python, (but didn't delete everything from the Python featurization yet), removed option to featurize to GraphData class instead of PyG Data class, added deprecation warnings to datamodule.py for parameters that are now unused, some cleanup in MultitaskFromSmilesDataModule::__init__, changed tensor index variables to properties, added preprocessing_n_jobs (not yet used), etc. --- graphium/data/collate.py | 8 +- graphium/data/datamodule.py | 198 ++--- graphium/data/dataset.py | 2 - graphium/data/utils.py | 3 +- graphium/features/__init__.py | 6 - graphium/features/featurizer.py | 1141 ++--------------------------- profiling/profile_mol_to_graph.py | 7 +- 7 files changed, 174 insertions(+), 1191 deletions(-) diff --git a/graphium/data/collate.py b/graphium/data/collate.py index 211be06d5..f685cd009 100644 --- a/graphium/data/collate.py +++ b/graphium/data/collate.py @@ -22,7 +22,7 @@ from typing import Union, List, Optional, Dict, Type, Any, Iterable from torch_geometric.data import Data, Batch -from graphium.features import GraphDict, to_dense_array +from graphium.features import to_dense_array from graphium.utils.packing import fast_packing, get_pack_sizes, node_to_pack_indices_mask from loguru import logger from graphium.data.utils import get_keys @@ -107,12 +107,6 @@ def graphium_collate_fn( elif key == "num_nodes" or key == "num_edges": continue - # If the features are a dictionary containing GraphDict elements, - # Convert to pyg graphs and use the pyg batching. - elif isinstance(elem[key], GraphDict): - pyg_graphs = [d[key].make_pyg_graph(mask_nan=mask_nan) for d in elements] - batch[key] = collage_pyg_graph(pyg_graphs) - # If a PyG Graph is provided, use the PyG batching elif isinstance(elem[key], Data): pyg_graphs = [d[key] for d in elements] diff --git a/graphium/data/datamodule.py b/graphium/data/datamodule.py index a79fceff1..b2e748d2e 100644 --- a/graphium/data/datamodule.py +++ b/graphium/data/datamodule.py @@ -54,11 +54,7 @@ from rdkit import RDLogger from graphium.utils import fs -from graphium.features import ( - mol_to_graph_dict, - GraphDict, - mol_to_pyggraph, -) +from graphium.features import mol_to_pyggraph from graphium.data.sampler import DatasetSubSampler from graphium.data.utils import graphium_package_path, found_size_mismatch @@ -100,6 +96,9 @@ } ) +def warn_deprecated(value, name, function_name): + if value is not None: + logger.warn("In "+function_name+", "+name+" is deprecated") class BaseDataModule(lightning.LightningDataModule): def __init__( @@ -789,6 +788,7 @@ def __init__( self, task_specific_args: Union[Dict[str, DatasetProcessingParams], Dict[str, Any]], processed_graph_data_path: Union[str, os.PathLike], + dataloading_from = None, featurization: Optional[Union[Dict[str, Any], omegaconf.DictConfig]] = None, batch_size_training: int = 16, batch_size_inference: int = 16, @@ -797,8 +797,13 @@ def __init__( pin_memory: bool = True, persistent_workers: bool = False, multiprocessing_context: Optional[str] = None, + featurization_n_jobs = None, + featurization_progress = None, + featurization_backend = None, + featurization_batch_size = None, collate_fn: Optional[Callable] = None, - prepare_dict_or_graph: str = "pyg:graph", + prepare_dict_or_graph = None, + preprocessing_n_jobs: int = -1, **kwargs, ): """ @@ -814,6 +819,7 @@ def __init__( - `df_path` - `smiles_col` - `label_cols` + dataloading_from: Deprecated. Behaviour now always matches previous "disk" option. featurization: args to apply to the SMILES to Graph featurizer. batch_size_training: batch size for training and val dataset. batch_size_inference: batch size for test dataset. @@ -825,15 +831,14 @@ def __init__( - "loky": joblib's Default. Found to cause memory leaks. - "threading": Found to be slow. + featurization_n_jobs: Deprecated. + featurization_progress: Deprecated. + featurization_backend: Deprecated. + featurization_batch_size: Deprecated. collate_fn: A custom torch collate function. Default is to `graphium.data.graphium_collate_fn` - prepare_dict_or_graph: Whether to preprocess all molecules as Graph dict or PyG graphs. - Possible options: - - - "pyg:dict": Process molecules as a `dict`. It's faster and requires less RAM during - pre-processing. It is slower during training with with `num_workers=0` since - pyg `Data` will be created during data-loading, but faster with large - `num_workers`, and less likely to cause memory issues with the parallelization. - - "pyg:graph": Process molecules as `pyg.data.Data`. + prepare_dict_or_graph: Deprecated. Behaviour now always matches previous "pyg:graph" option. + preprocessing_n_jobs: Number of threads to use during preprocessing. + Use -1 to use all available cores. """ BaseDataModule.__init__( self, @@ -848,6 +853,13 @@ def __init__( ) IPUDataModuleModifier.__init__(self, **kwargs) + warn_deprecated(dataloading_from, "dataloading_from", "MultitaskFromSmilesDataModule::__init__") + warn_deprecated(featurization_n_jobs, "featurization_n_jobs", "MultitaskFromSmilesDataModule::__init__") + warn_deprecated(featurization_progress, "featurization_progress", "MultitaskFromSmilesDataModule::__init__") + warn_deprecated(featurization_backend, "featurization_backend", "MultitaskFromSmilesDataModule::__init__") + warn_deprecated(featurization_batch_size, "featurization_batch_size", "MultitaskFromSmilesDataModule::__init__") + warn_deprecated(prepare_dict_or_graph, "prepare_dict_or_graph", "MultitaskFromSmilesDataModule::__init__") + self.task_specific_args = task_specific_args self.task_dataset_processing_params = {} @@ -870,11 +882,6 @@ def __init__( self.task_val_indices = None self.task_test_indices = None - self.single_task_datasets = None - self.train_singletask_datasets = None - self.val_singletask_datasets = None - self.test_singletask_datasets = None - self.train_ds = None self.val_ds = None self.test_ds = None @@ -887,52 +894,29 @@ def __init__( featurization = {} self.featurization = featurization - self.encoded_featurization = featurization - - use_graphium_cpp = "use_graphium_cpp" not in featurization or featurization["use_graphium_cpp"] - if use_graphium_cpp: - # Copy featurization for the representation used by graphium_cpp - encoded_featurization = deepcopy(featurization) - self.encoded_featurization = encoded_featurization - encoded_featurization["use_graphium_cpp"] = True - if "atom_property_list_onehot" not in featurization: - featurization["atom_property_list_onehot"] = None - if "atom_property_list_float" not in featurization: - featurization["atom_property_list_float"] = None - if "edge_property_list" not in featurization: - featurization["edge_property_list"] = None - if "pos_encoding_as_features" not in featurization: - featurization["pos_encoding_as_features"] = None - encoded_featurization["original_featurization"] = { - "atom_property_list_onehot": featurization["atom_property_list_onehot"], - "atom_property_list_float": featurization["atom_property_list_float"], - "edge_property_list": featurization["edge_property_list"], - "pos_encoding_as_features": featurization["pos_encoding_as_features"] - } - if featurization["atom_property_list_onehot"] is not None: - self.atom_onehot_property_tensor = graphium_cpp.atom_onehot_feature_names_to_tensor(featurization["atom_property_list_onehot"]) - else: - self.atom_onehot_property_tensor = torch.tensor(data=[], dtype=torch.int64) - encoded_featurization["atom_property_list_onehot"] = self.atom_onehot_property_tensor - - if featurization["atom_property_list_float"] is not None: - self.atom_float_property_tensor = graphium_cpp.atom_float_feature_names_to_tensor(featurization["atom_property_list_float"]) - else: - self.atom_float_property_tensor = torch.tensor(data=[], dtype=torch.int64) - encoded_featurization["atom_property_list_float"] = self.atom_float_property_tensor - if featurization["edge_property_list"] is not None: - self.edge_property_tensor = graphium_cpp.bond_feature_names_to_tensor(featurization["edge_property_list"]) + # Copy featurization for the representation used by graphium_cpp + encoded_featurization = deepcopy(featurization) + self.encoded_featurization = encoded_featurization + + def encode_feature_options(options, name, encoding_function): + if name not in options or options[name] is None: + options[name] = torch.tensor(data=[], dtype=torch.int64) else: - self.edge_property_tensor = torch.tensor(data=[], dtype=torch.int64) - encoded_featurization["edge_property_list"] = self.edge_property_tensor - - if featurization["pos_encoding_as_features"] is not None and featurization["pos_encoding_as_features"]["pos_types"] is not None: - (self.pos_encoding_names, self.pos_encoding_tensor) = graphium_cpp.positional_feature_options_to_tensor(featurization["pos_encoding_as_features"]["pos_types"]) - else: - self.pos_encoding_names = [] - self.pos_encoding_tensor = torch.tensor(data=[], dtype=torch.int64) - encoded_featurization["pos_encoding_as_features"] = (self.pos_encoding_names, self.pos_encoding_tensor) + options[name] = encoding_function(options[name]) + encode_feature_options(encoded_featurization, "atom_property_list_onehot", graphium_cpp.atom_onehot_feature_names_to_tensor) + encode_feature_options(encoded_featurization, "atom_property_list_float", graphium_cpp.atom_float_feature_names_to_tensor) + encode_feature_options(encoded_featurization, "edge_property_list", graphium_cpp.bond_feature_names_to_tensor) + + if "pos_encoding_as_features" in featurization and + featurization["pos_encoding_as_features"] is not None and + featurization["pos_encoding_as_features"]["pos_types"] is not None: + (pos_encoding_names, pos_encoding_tensor) = + graphium_cpp.positional_feature_options_to_tensor(featurization["pos_encoding_as_features"]["pos_types"]) + else: + pos_encoding_names = [] + pos_encoding_tensor = torch.tensor(data=[], dtype=torch.int64) + encoded_featurization["pos_encoding_as_features"] = (pos_encoding_names, pos_encoding_tensor) explicit_H = featurization["explicit_H"] if "explicit_H" in featurization else False add_self_loop = featurization["add_self_loop"] if "add_self_loop" in featurization else False @@ -941,20 +925,13 @@ def __init__( self.add_self_loop = add_self_loop self.explicit_H = explicit_H - # Whether to transform the smiles into a pyg `Data` graph or a dictionary compatible with pyg - if prepare_dict_or_graph == "pyg:dict": - self.smiles_transformer = partial(mol_to_graph_dict, **encoded_featurization) - elif prepare_dict_or_graph == "pyg:graph": - self.smiles_transformer = partial(mol_to_pyggraph, **encoded_featurization) - else: - raise ValueError( - f"`prepare_dict_or_graph` should be either 'pyg:dict' or 'pyg:graph', Provided: `{prepare_dict_or_graph}`" - ) + self.preprocessing_n_jobs = preprocessing_n_jobs + + self.smiles_transformer = partial(mol_to_pyggraph, **encoded_featurization) self.data_hash = self.get_data_hash() - if self.processed_graph_data_path is not None: - if self._ready_to_load_all_from_file(): - self._data_is_prepared = True + if self._ready_to_load_all_from_file(): + self._data_is_prepared = True def _parse_caching_args(self, processed_graph_data_path): """ @@ -985,6 +962,22 @@ def get_task_levels(self): return task_level_map + @property + def data_offsets_tensor_index(self): + return 0 + @property + def concat_smiles_tensor_index(self): + return 1 + @property + def smiles_offsets_tensor_index(self): + return 2 + @property + def num_nodes_tensor_index(self): + return 3 + @property + def num_edges_tensor_index(self): + return 4 + def prepare_data(self): """Called only from a single process in distributed settings. Steps: @@ -1006,12 +999,6 @@ def prepare_data(self): # SMILES Parse Error: Failed parsing SMILES 'restricted' for input: 'restricted' RDLogger.DisableLog('rdApp.*') - self.data_offsets_tensor_index = 0 - self.concat_smiles_tensor_index = 1 - self.smiles_offsets_tensor_index = 2 - self.num_nodes_tensor_index = 3 - self.num_edges_tensor_index = 4 - for task, args in self.task_dataset_processing_params.items(): if args.label_normalization is None: args.label_normalization = {} @@ -1423,8 +1410,6 @@ def in_dims(self): """ graph = self.get_fake_graph() - if isinstance(graph, (GraphDict)): - graph = graph.data # get list of all keys corresponding to positional encoding pe_dim_dict = {} @@ -1698,10 +1683,7 @@ def get_data_hash(self): task_args.pop("epoch_sampling_fraction", None) args[task_key] = task_args - hash_dict = { - "task_specific_args": args, - } - data_hash = get_md5_hash(hash_dict) + data_hash = get_md5_hash(args) return data_hash def __len__(self) -> int: @@ -1759,6 +1741,7 @@ def __init__( self, task_specific_args: Dict[str, Union[DatasetProcessingParams, Dict[str, Any]]], processed_graph_data_path: Optional[Union[str, os.PathLike]] = None, + dataloading_from = None, featurization: Optional[Union[Dict[str, Any], omegaconf.DictConfig]] = None, batch_size_training: int = 16, batch_size_inference: int = 16, @@ -1767,8 +1750,12 @@ def __init__( pin_memory: bool = True, persistent_workers: bool = False, multiprocessing_context: Optional[str] = None, + featurization_n_jobs = None, + featurization_progress = None, + featurization_backend = None, collate_fn: Optional[Callable] = None, - prepare_dict_or_graph: str = "pyg:graph", + prepare_dict_or_graph = None, + preprocessing_n_jobs: int = -1, **kwargs, ): r""" @@ -1785,25 +1772,33 @@ def __init__( meaning that all molecules will be considered. processed_graph_data_path: Path to the processed graph data. If None, the data will be downloaded from the OGB website. + dataloading_from: Deprecated. Behaviour now always matches previous "disk" option. featurization: args to apply to the SMILES to Graph featurizer. batch_size_training: batch size for training and val dataset. batch_size_inference: batch size for test dataset. num_workers: Number of workers for the dataloader. Use -1 to use all available cores. pin_memory: Whether to pin on paginated CPU memory for the dataloader. - - - "multiprocessing": Found to cause less memory issues. - - "loky": joblib's Default. Found to cause memory leaks. - - "threading": Found to be slow. - + featurization_n_jobs: Deprecated. + featurization_progress: Deprecated. + featurization_backend: Deprecated. collate_fn: A custom torch collate function. Default is to `graphium.data.graphium_collate_fn` sample_size: - `int`: The maximum number of elements to take from the dataset. - `float`: Value between 0 and 1 representing the fraction of the dataset to consider - `None`: all elements are considered. + prepare_dict_or_graph: Deprecated. Behaviour now always matches previous "pyg:graph" option. + preprocessing_n_jobs: Number of threads to use during preprocessing. + Use -1 to use all available cores. """ + warn_deprecated(dataloading_from, "dataloading_from", "GraphOGBDataModule::__init__") + warn_deprecated(featurization_n_jobs, "featurization_n_jobs", "GraphOGBDataModule::__init__") + warn_deprecated(featurization_progress, "featurization_progress", "GraphOGBDataModule::__init__") + warn_deprecated(featurization_backend, "featurization_backend", "GraphOGBDataModule::__init__") + warn_deprecated(prepare_dict_or_graph, "prepare_dict_or_graph", "GraphOGBDataModule::__init__") + new_task_specific_args = {} self.metadata = {} for task_name, task_args in task_specific_args.items(): @@ -1836,7 +1831,7 @@ def __init__( dm_args["persistent_workers"] = persistent_workers dm_args["multiprocessing_context"] = multiprocessing_context dm_args["collate_fn"] = collate_fn - dm_args["prepare_dict_or_graph"] = prepare_dict_or_graph + dm_args["preprocessing_n_jobs"] = preprocessing_n_jobs super().__init__(**dm_args, **kwargs) @@ -2008,6 +2003,7 @@ def __init__( tdc_train_val_seed: int = 0, # Inherited arguments from superclass processed_graph_data_path: Optional[Union[str, Path]] = None, + dataloading_from = None, featurization: Optional[Union[Dict[str, Any], omegaconf.DictConfig]] = None, batch_size_training: int = 16, batch_size_inference: int = 16, @@ -2016,10 +2012,20 @@ def __init__( pin_memory: bool = True, persistent_workers: bool = False, multiprocessing_context: Optional[str] = None, + featurization_n_jobs = None, + featurization_progress = None, + featurization_backend = None, collate_fn: Optional[Callable] = None, - prepare_dict_or_graph: str = "pyg:graph", + prepare_dict_or_graph = None, + preprocessing_n_jobs: int = -1, **kwargs, ): + warn_deprecated(dataloading_from, "dataloading_from", "ADMETBenchmarkDataModule::__init__") + warn_deprecated(featurization_n_jobs, "featurization_n_jobs", "ADMETBenchmarkDataModule::__init__") + warn_deprecated(featurization_progress, "featurization_progress", "ADMETBenchmarkDataModule::__init__") + warn_deprecated(featurization_backend, "featurization_backend", "ADMETBenchmarkDataModule::__init__") + warn_deprecated(prepare_dict_or_graph, "prepare_dict_or_graph", "ADMETBenchmarkDataModule::__init__") + try: from tdc.benchmark_group import admet_group from tdc.utils import retrieve_benchmark_names @@ -2072,7 +2078,7 @@ def __init__( persistent_workers=persistent_workers, multiprocessing_context=multiprocessing_context, collate_fn=collate_fn, - prepare_dict_or_graph=prepare_dict_or_graph, + preprocessing_n_jobs=preprocessing_n_jobs, **kwargs, ) diff --git a/graphium/data/dataset.py b/graphium/data/dataset.py index 54275c030..c37c4fdc8 100644 --- a/graphium/data/dataset.py +++ b/graphium/data/dataset.py @@ -27,8 +27,6 @@ from torch.utils.data.dataloader import Dataset from torch_geometric.data import Batch, Data -from graphium.features import GraphDict - import graphium_cpp diff --git a/graphium/data/utils.py b/graphium/data/utils.py index aa5151a90..5136ce60e 100644 --- a/graphium/data/utils.py +++ b/graphium/data/utils.py @@ -25,7 +25,6 @@ import graphium from torch_geometric.data import Data -from graphium.features.featurizer import GraphDict GRAPHIUM_DATASETS_BASE_URL = "gs://graphium-public/datasets" GRAPHIUM_DATASETS = { @@ -129,7 +128,7 @@ def get_keys(pyg_data): return pyg_data.keys() -def found_size_mismatch(task: str, features: Union[Data, GraphDict], labels: np.ndarray, smiles: str) -> bool: +def found_size_mismatch(task: str, features: Data, labels: np.ndarray, smiles: str) -> bool: """Check if a size mismatch exists between features and labels with respect to node/edge/nodepair. Args: diff --git a/graphium/features/__init__.py b/graphium/features/__init__.py index 40984a2a4..a4ebafc8c 100644 --- a/graphium/features/__init__.py +++ b/graphium/features/__init__.py @@ -1,9 +1,3 @@ -from .featurizer import get_mol_atomic_features_onehot -from .featurizer import get_mol_atomic_features_float -from .featurizer import get_mol_edge_features -from .featurizer import mol_to_adj_and_features -from .featurizer import mol_to_graph_dict from .featurizer import mol_to_graph_signature -from .featurizer import GraphDict from .featurizer import mol_to_pyggraph from .featurizer import to_dense_array diff --git a/graphium/features/featurizer.py b/graphium/features/featurizer.py index 7f7c4b27b..70fda1f3c 100644 --- a/graphium/features/featurizer.py +++ b/graphium/features/featurizer.py @@ -26,10 +26,6 @@ from rdkit import Chem import datamol as dm -from graphium.features import nmp -from graphium.utils.tensor import one_of_k_encoding -from graphium.features.positional_encoding import get_all_positional_encodings - import graphium_cpp def to_dense_array(array: np.ndarray, dtype: str = None) -> np.ndarray: @@ -69,130 +65,6 @@ def to_dense_tensor(tensor: Tensor, dtype: str = None) -> Tensor: return tensor -def _mask_nans_inf(mask_nan: Optional[str], array: np.ndarray, array_name: str) -> np.ndarray: - r""" - mask the NaNs in the array - Parameters: - mask_nan: How to mask the NaNs - array: The array to mask - array_name: The name of the array - Returns: - The masked array - """ - if (mask_nan is None) or (array is None): - return array - - new_array = array - if issparse(new_array): - new_array = new_array.data - nans = ~np.isfinite(new_array) - - # Mask the NaNs - if nans.any(): - msg = f"There are {np.sum(nans)} NaNs in `{array_name}`" - if mask_nan == "raise": - raise ValueError(msg) - elif mask_nan == "warn": - logger.warning(msg) - else: - new_array[nans] = mask_nan - if issparse(array): - array.data = new_array - new_array = array - return new_array - - -def get_mol_atomic_features_onehot(mol: dm.Mol, property_list: List[str]) -> Dict[str, Tensor]: - r""" - Get the following set of features for any given atom - - * One-hot representation of the atom - * One-hot representation of the atom degree - * One-hot representation of the atom implicit valence - * One-hot representation of the the atom hybridization - * Whether the atom is aromatic - * The atom's formal charge - * The atom's number of radical electrons - - Additionally, the following features can be set, depending on the value of input Parameters - - * One-hot representation of the number of hydrogen atom in the the current atom neighborhood if `explicit_H` is false - * One-hot encoding of the atom chirality, and whether such configuration is even possible - - Parameters: - - mol: - molecule from which to extract the properties - - property_list: - A list of integer atomic properties to get from the molecule. - The integer values are converted to a one-hot vector. - Callables are not supported by this function. - - Accepted properties are: - - - "atomic-number" - - "degree" - - "valence", "total-valence" - - "implicit-valence" - - "hybridization" - - "chirality" - - "phase" - - "type" - - "group" - - "period" - - Returns: - prop_dict: - A dictionnary where the element of ``property_list`` are the keys - and the values are np.ndarray of shape (N, OH). N is the number of atoms - in ``mol`` and OH the lenght of the one-hot encoding. - - """ - - prop_dict = {} - - for prop in property_list: - prop = prop.lower() - prop_name = prop - - property_array = [] - for ii, atom in enumerate(mol.GetAtoms()): - if prop in ["atomic-number"]: - one_hot = one_of_k_encoding(atom.GetSymbol(), nmp.ATOM_LIST) - elif prop in ["degree"]: - one_hot = one_of_k_encoding(atom.GetDegree(), nmp.ATOM_DEGREE_LIST) - elif prop in ["valence", "total-valence"]: - prop_name = "valence" - one_hot = one_of_k_encoding(atom.GetTotalValence(), nmp.VALENCE) - elif prop in ["implicit-valence"]: - one_hot = one_of_k_encoding(atom.GetImplicitValence(), nmp.VALENCE) - elif prop in ["hybridization"]: - one_hot = one_of_k_encoding(atom.GetHybridization(), nmp.HYBRIDIZATION_LIST) - elif prop in ["chirality"]: - try: - one_hot = one_of_k_encoding(atom.GetProp("_CIPCode"), nmp.CHIRALITY_LIST) - one_hot.append(int(atom.HasProp("_ChiralityPossible"))) - except: - one_hot = [0, 0, int(atom.HasProp("_ChiralityPossible"))] - elif prop in "phase": - one_hot = one_of_k_encoding(nmp.PHASE[atom.GetAtomicNum() - 1], nmp.PHASE_SET) - elif prop in "type": - one_hot = one_of_k_encoding(nmp.TYPE[atom.GetAtomicNum() - 1], nmp.TYPE_SET) - elif prop in "group": - one_hot = one_of_k_encoding(nmp.GROUP[atom.GetAtomicNum() - 1], nmp.GROUP_SET) - elif prop in "period": - one_hot = one_of_k_encoding(nmp.PERIOD[atom.GetAtomicNum() - 1], nmp.PERIOD_SET) - else: - raise ValueError(f"Unsupported property `{prop}`") - - property_array.append(np.asarray(one_hot, dtype=np.float16)) - - prop_dict[prop_name] = np.stack(property_array, axis=0) - - return prop_dict - - def get_mol_conformer_features( mol: dm.Mol, property_list: Union[List[str], List[Callable]], @@ -246,202 +118,6 @@ def get_mol_conformer_features( return prop_dict -def get_mol_atomic_features_float( - mol: dm.Mol, - property_list: Union[List[str], List[Callable]], - offset_carbon: bool = True, - mask_nan: Union[str, float, type(None)] = "raise", -) -> Dict[str, np.ndarray]: - r""" - Get a dictionary of floating-point arrays of atomic properties. - To ensure all properties are at a similar scale, some of the properties - are divided by a constant. - - There is also the possibility of offseting by the carbon value using - the `offset_carbon` parameter. - - Parameters: - - mol: - molecule from which to extract the properties - - property_list: - A list of atomic properties to get from the molecule, such as 'atomic-number', - 'mass', 'valence', 'degree', 'electronegativity'. - Some elements are divided by a factor to avoid feature explosion. - - Accepted properties are: - - - "atomic-number" - - "mass", "weight" - - "valence", "total-valence" - - "implicit-valence" - - "hybridization" - - "chirality" - - "hybridization" - - "aromatic" - - "ring", "in-ring" - - "min-ring" - - "max-ring" - - "num-ring" - - "degree" - - "radical-electron" - - "formal-charge" - - "vdw-radius" - - "covalent-radius" - - "electronegativity" - - "ionization", "first-ionization" - - "melting-point" - - "metal" - - "single-bond" - - "aromatic-bond" - - "double-bond" - - "triple-bond" - - "is-carbon" - - "group" - - "period" - - offset_carbon: - Whether to subract the Carbon property from the desired atomic property. - For example, if we want the mass of the Lithium (6.941), the mass of the - Carbon (12.0107) will be subracted, resulting in a value of -5.0697 - - mask_nan: - Deal with molecules that fail a part of the featurization. - NaNs can happen when taking the of a noble gas, - or other properties that are not measured for specific atoms. - - - "raise": Raise an error when there is a nan or inf in the featurization - - "warn": Raise a warning when there is a nan or inf in the featurization - - "None": DEFAULT. Don't do anything - - "Floating value": Replace nans or inf by the specified value - - Returns: - - prop_dict: - A dictionnary where the element of ``property_list`` are the keys - and the values are np.ndarray of shape (N,). N is the number of atoms - in ``mol``. - - """ - - periodic_table = Chem.GetPeriodicTable() - prop_dict = {} - C = Chem.Atom("C") - C_num = C.GetAtomicNum() - offC = bool(offset_carbon) - atom_list = list(mol.GetAtoms()) - - for prop in property_list: - prop_name = None - - property_array = np.zeros(mol.GetNumAtoms(), dtype=np.float16) - for ii, atom in enumerate(atom_list): - val = None - atomic_num = atom.GetAtomicNum() - - if isinstance(prop, str): - prop = prop.lower() - prop_name = prop - - if prop in ["atomic-number"]: - val = (atomic_num - (offC * C_num)) / 5 - elif prop in ["mass", "weight"]: - prop_name = "mass" - val = (atom.GetMass() - (offC * C.GetMass())) / 10 - elif prop in ["valence", "total-valence"]: - prop_name = "valence" - val = atom.GetTotalValence() - (offC * 4) - elif prop in ["implicit-valence"]: - val = atom.GetImplicitValence() - elif prop in ["hybridization"]: - val = atom.GetHybridization() - elif prop in ["chirality"]: - val = (atom.GetProp("_CIPCode") == "R") if atom.HasProp("_CIPCode") else 2 - elif prop in ["hybridization"]: - val = atom.GetHybridization() - elif prop in ["aromatic"]: - val = atom.GetIsAromatic() - elif prop in ["ring", "in-ring"]: - prop_name = "in-ring" - val = atom.IsInRing() - elif prop in ["min-ring"]: - ring_info = mol.GetRingInfo() - val = ring_info.MinAtomRingSize(atom.GetIdx()) - elif prop in ["max-ring"]: - rings = mol.GetRingInfo().AtomRings() - val = 0 - for ring in rings: - if atom.GetIdx() in ring: - if len(ring) > val: - val = len(ring) - elif prop in ["num-ring"]: - ring_info = mol.GetRingInfo() - val = ring_info.NumAtomRings(atom.GetIdx()) - elif prop in ["degree"]: - val = atom.GetTotalDegree() - (offC * 2) - elif prop in ["radical-electron"]: - val = atom.GetNumRadicalElectrons() - elif prop in ["formal-charge"]: - val = atom.GetFormalCharge() - elif prop in ["vdw-radius"]: - val = periodic_table.GetRvdw(atom.GetAtomicNum()) - offC * periodic_table.GetRvdw(C_num) - elif prop in ["covalent-radius"]: - val = periodic_table.GetRcovalent(atomic_num) - offC * periodic_table.GetRcovalent(C_num) - elif prop in ["electronegativity"]: - val = ( - nmp.ELECTRONEGATIVITY[atom.GetAtomicNum() - 1] - - offC * nmp.ELECTRONEGATIVITY[C_num - 1] - ) - elif prop in ["ionization", "first-ionization"]: - prop_name = "ionization" - val = (nmp.FIRST_IONIZATION[atomic_num - 1] - offC * nmp.FIRST_IONIZATION[C_num - 1]) / 5 - elif prop in ["melting-point"]: - val = (nmp.MELTING_POINT[atomic_num - 1] - offC * nmp.MELTING_POINT[C_num - 1]) / 200 - elif prop in ["metal"]: - val = nmp.METAL[atomic_num - 1] - elif prop in "group": - val = float(nmp.GROUP[atomic_num - 1]) - offC * float(nmp.GROUP[C_num - 1]) - elif prop in "period": - val = float(nmp.PERIOD[atomic_num - 1]) - offC * float(nmp.PERIOD[C_num - 1]) - elif "-bond" in prop: - bonds = [bond.GetBondTypeAsDouble() for bond in atom.GetBonds()] - if prop in ["single-bond"]: - val = len([bond == 1 for bond in bonds]) - elif prop in ["aromatic-bond"]: - val = len([bond == 1.5 for bond in bonds]) - elif prop in ["double-bond"]: - val = len([bond == 2 for bond in bonds]) - elif prop in ["triple-bond"]: - val = len([bond == 3 for bond in bonds]) - else: - raise ValueError(f"{prop} is not a correct bond.") - val -= offC * 1 - elif prop in ["is-carbon"]: - val = atom.GetAtomicNum() == 6 - val -= offC * 1 - else: - raise ValueError(f"Unsupported property `{prop}`") - - elif callable(prop): - prop_name = str(prop) - val = prop(atom) - else: - ValueError(f"Elements in `property_list` must be str or callable, provided `{type(prop)}`") - - if val is None: - raise ValueError("val is undefined.") - - property_array[ii] = val - - if prop_name is None: - raise ValueError("prop_name is undefined.") - - # Mask the NaNs - prop_dict[prop_name] = _mask_nans_inf(mask_nan, property_array, "atom featurization") - - return prop_dict - def get_simple_mol_conformer(mol: dm.Mol) -> Union[Chem.rdchem.Conformer, None]: r""" @@ -480,500 +156,11 @@ def get_simple_mol_conformer(mol: dm.Mol) -> Union[Chem.rdchem.Conformer, None]: return conf -def get_estimated_bond_length(bond: Chem.rdchem.Bond, mol: dm.Mol) -> float: - r""" - Estimate the bond length between atoms by looking at the estimated atomic radius - that depends both on the atom type and the bond type. The resulting bond-length is - then the sum of the radius. - - Keep in mind that this function only provides an estimate of the bond length and not - the true one based on a conformer. The vast majority od estimated bond lengths will - have an error below 5% while some bonds can have an error up to 20%. This function - is mostly useful when conformer generation fails for some molecules, or for - increased computation speed. - - Parameters: - bond: The bond to measure its lenght - mol: The molecule containing the bond (used to get neighbouring atoms) - - Returns: - bond_length: The bond length in Angstrom, typically a value around 1-2. - - """ - - # Get the atoms connected by the bond - idx1 = bond.GetBeginAtomIdx() - idx2 = bond.GetEndAtomIdx() - atom1 = mol.GetAtomWithIdx(idx1).GetAtomicNum() - atom2 = mol.GetAtomWithIdx(idx2).GetAtomicNum() - bond_type = bond.GetBondType() - - # Get single bond atomic radius - if bond_type == Chem.rdchem.BondType.SINGLE: - rad1 = [nmp.BOND_RADIUS_SINGLE[atom1 - 1]] - rad2 = [nmp.BOND_RADIUS_SINGLE[atom2 - 1]] - # Get double bond atomic radius - elif bond_type == Chem.rdchem.BondType.DOUBLE: - rad1 = [nmp.BOND_RADIUS_DOUBLE[atom1 - 1]] - rad2 = [nmp.BOND_RADIUS_DOUBLE[atom2 - 1]] - # Get triple bond atomic radius - elif bond_type == Chem.rdchem.BondType.TRIPLE: - rad1 = [nmp.BOND_RADIUS_TRIPLE[atom1 - 1]] - rad2 = [nmp.BOND_RADIUS_TRIPLE[atom2 - 1]] - # Get average of single bond and double bond atomic radius - elif bond_type == Chem.rdchem.BondType.AROMATIC: - rad1 = [nmp.BOND_RADIUS_SINGLE[atom1 - 1], nmp.BOND_RADIUS_DOUBLE[atom1 - 1]] - rad2 = [nmp.BOND_RADIUS_SINGLE[atom2 - 1], nmp.BOND_RADIUS_DOUBLE[atom2 - 1]] - - # Average the bond lengths, while ignoring nans in case some missing value - rad1_float = [elem for elem in rad1 if elem is not None] - rad2_float = [elem for elem in rad2 if elem is not None] - - if len(rad1_float) > 0: - rad1_float = sum(rad1_float) / len(rad1_float) - else: - rad1_float = float(nmp.BOND_RADIUS_SINGLE[atom1 - 1]) - - if len(rad2_float) > 0: - rad2_float = sum(rad2_float) / len(rad2_float) - else: - rad2_float = float(nmp.BOND_RADIUS_SINGLE[atom2 - 1]) - - bond_length = rad1_float + rad2_float - return bond_length - - -def get_mol_edge_features( - mol: dm.Mol, property_list: List[str], mask_nan: Union[str, float, type(None)] = "raise" -) -> Dict[str, np.ndarray]: - r""" - Get the following set of features for any given bond - See `graphium.features.nmp` for allowed values in one hot encoding - - * One-hot representation of the bond type. Note that you should not kekulize your - molecules, if you expect this to take aromatic bond into account. - * Bond stereo type, following CIP classification - * Whether the bond is conjugated - * Whether the bond is in a ring - - Parameters: - mol: rdkit.Chem.Molecule - the molecule of interest - - property_list: - A list of edge properties to return for the given molecule. - Accepted properties are: - - - "bond-type-onehot" - - "bond-type-float" - - "stereo" - - "in-ring" - - "conjugated" - - "conformer-bond-length" (might cause problems with complex molecules) - - "estimated-bond-length" - - Returns: - prop_dict: - A dictionnary where the element of ``property_list`` are the keys - and the values are np.ndarray of shape (N,). N is the number of atoms - in ``mol``. - - """ - - prop_dict = {} - - # Compute features for each bond - num_bonds = mol.GetNumBonds() - for prop in property_list: - property_array = [] - for ii in range(num_bonds): - prop = prop.lower() - bond = mol.GetBondWithIdx(ii) - - if prop in ["bond-type-onehot"]: - encoding = one_of_k_encoding(bond.GetBondType(), nmp.BOND_TYPES) - elif prop in ["bond-type-float"]: - encoding = [bond.GetBondTypeAsDouble()] - elif prop in ["stereo"]: - encoding = one_of_k_encoding(bond.GetStereo(), nmp.BOND_STEREO) - elif prop in ["in-ring"]: - encoding = [bond.IsInRing()] - elif prop in ["conjugated"]: - encoding = [bond.GetIsConjugated()] - elif prop in ["conformer-bond-length"]: - conf = get_simple_mol_conformer(mol) - if conf is not None: - idx1 = bond.GetBeginAtomIdx() - idx2 = bond.GetEndAtomIdx() - encoding = [Chem.rdMolTransforms.GetBondLength(conf, idx1, idx2)] - else: - encoding = [0] - elif prop in ["estimated-bond-length"]: - encoding = [get_estimated_bond_length(bond, mol)] - - else: - raise ValueError(f"Unsupported property `{prop}`") - - property_array.append(np.asarray(encoding, dtype=np.float16)) - - if num_bonds > 0: - property_array = np.stack(property_array, axis=0) - # Mask the NaNs - prop_dict[prop] = _mask_nans_inf(mask_nan, property_array, "edge property") - else: - # Add an empty vector with the right shape - arr_len = 1 - if prop in ["bond-type-onehot"]: - arr_len = len(nmp.BOND_TYPES) + 1 - elif prop in ["stereo"]: - arr_len = len(nmp.BOND_STEREO) + 1 - - prop_dict[prop] = np.zeros((0, arr_len)) - - return prop_dict - - -def mol_to_adj_and_features( - mol: Union[str, dm.Mol], - atom_property_list_onehot: List[str] = [], - atom_property_list_float: List[Union[str, Callable]] = [], - conformer_property_list: List[str] = [], - edge_property_list: List[str] = [], - add_self_loop: bool = False, - explicit_H: bool = False, - use_bonds_weights: bool = False, - pos_encoding_as_features: Dict[str, Any] = None, - dtype: np.dtype = np.float16, - mask_nan: Union[str, float, type(None)] = "raise", - use_graphium_cpp: bool = False, -) -> Union[ - coo_matrix, - Union[Tensor, None], - Union[Tensor, None], - Dict[str, Tensor], - Union[Tensor, None], - Dict[str, Tensor], -]: - r""" - Transforms a molecule into an adjacency matrix representing the molecular graph - and a set of atom and bond features. - - It also returns the positional encodings associated to the graph. - - Parameters: - - mol: - The molecule to be converted - - atom_property_list_onehot: - List of the properties used to get one-hot encoding of the atom type, - such as the atom index represented as a one-hot vector. - See function `get_mol_atomic_features_onehot` - - atom_property_list_float: - List of the properties used to get floating-point encoding of the atom type, - such as the atomic mass or electronegativity. - See function `get_mol_atomic_features_float` - - conformer_property_list: - list of properties used to encode the conformer information, outside of atom properties, currently support "positions_3d" - - edge_property_list: - List of the properties used to encode the edges, such as the edge type - and the stereo type. - - add_self_loop: - Whether to add a value of `1` on the diagonal of the adjacency matrix. - - explicit_H: - Whether to consider the Hydrogens explicitely. If `False`, the hydrogens - are implicit. - - use_bonds_weights: - Whether to use the floating-point value of the bonds in the adjacency matrix, - such that single bonds are represented by 1, double bonds 2, triple 3, aromatic 1.5 - - pos_encoding_as_features: keyword arguments for function `graph_positional_encoder` - to generate positional encoding for node features. - - dtype: - The torch data type used to build the graph - - mask_nan: - Deal with molecules that fail a part of the featurization. - NaNs can happen when taking the of a noble gas, - or other properties that are not measured for specific atoms. - - - "raise": Raise an error when there is a nan or inf in the featurization - - "warn": Raise a warning when there is a nan or inf in the featurization - - "None": DEFAULT. Don't do anything - - "Floating value": Replace nans or inf by the specified value - Returns: - - adj: - torch coo sparse adjacency matrix of the molecule - - ndata: - Concatenated node data of the atoms, based on the properties from - `atom_property_list_onehot` and `atom_property_list_float`. - If no properties are given, it returns `None` - - edata: - Concatenated node edge of the molecule, based on the properties from - `edge_property_list`. - If no properties are given, it returns `None` - - pe_dict: - Dictionary of all positional encodings. Current supported keys: - - - "pos_enc_feats_sign_flip": - Node positional encoding that requires augmentation via sign-flip. - For example, eigenvectors of the Laplacian are ambiguous to the - sign and are returned here. - - - "pos_enc_feats_no_flip": - Node positional encoding that requires does not use sign-flip. - For example, distance from centroid are returned here. - - - "rwse": - Node structural encoding corresponding to the diagonal of the random - walk matrix - - conf_dict: - contains the 3d positions of a conformer of the molecule or 0s if none is found - - """ - - if isinstance(mol, str): - mol = dm.to_mol(mol, ordered=True) - - # Add or remove explicit hydrogens - if explicit_H: - mol = Chem.AddHs(mol) - else: - mol = Chem.RemoveHs(mol) - - num_nodes = mol.GetNumAtoms() - - adj = mol_to_adjacency_matrix( - mol, use_bonds_weights=use_bonds_weights, add_self_loop=add_self_loop, dtype=dtype - ) - - # Get the node features - atom_features_onehot = get_mol_atomic_features_onehot(mol, atom_property_list_onehot) - atom_features_float = get_mol_atomic_features_float(mol, atom_property_list_float, mask_nan=mask_nan) - conf_dict = get_mol_conformer_features(mol, conformer_property_list, mask_nan=mask_nan) - ndata = list(atom_features_float.values()) + list(atom_features_onehot.values()) - ndata = [d[:, np.newaxis] if d.ndim == 1 else d for d in ndata] - - if len(ndata) > 0: - ndata = np.concatenate(ndata, axis=1).astype(dtype=dtype) - else: - ndata = None - - # Get the edge features - edge_features = get_mol_edge_features(mol, edge_property_list, mask_nan=mask_nan) - edata = list(edge_features.values()) - edata = [np.expand_dims(d, axis=1) if d.ndim == 1 else d for d in edata] - if len(edata) > 0: - edata = np.concatenate(edata, axis=1).astype(dtype=dtype) - else: - edata = None - - # Get all positional encodings - pe_dict = get_all_positional_encodings(adj, num_nodes, pos_encoding_as_features) - - # Mask the NaNs - for pe_key, pe_val in pe_dict.items(): - pe_val = np.asarray(pe_val, dtype=dtype) - pe_dict[pe_key] = _mask_nans_inf(mask_nan, pe_val, pe_key) - - return adj, ndata, edata, pe_dict, conf_dict - - -def mol_to_adjacency_matrix( - mol: dm.Mol, - use_bonds_weights: bool = False, - add_self_loop: bool = False, - dtype: np.dtype = np.float32, -) -> coo_matrix: - r""" - Convert a molecule to a sparse adjacency matrix, as a torch Tensor. - Instead of using the Rdkit `GetAdjacencyMatrix()` method, this method - uses the bond ordering from the molecule object, which is the same as - the bond ordering in the bond features. - - Warning: - Do not use `Tensor.coalesce()` on the returned adjacency matrix, as it - will change the ordering of the bonds. - - Args: - mol: A molecule in the form of a SMILES string or an RDKit molecule object. - - use_bonds_weights: - If `True`, the adjacency matrix will contain the bond type as the - value of the edge. If `False`, the adjacency matrix will contain - `1` as the value of the edge. - - add_self_loop: - If `True`, the adjacency matrix will contain a self-loop for each - node. - - dtype: - The data type used to build the graph - - Returns: - adj: - coo sparse adjacency matrix of the molecule - """ - - # Get the indices for the adjacency matrix, and the bond value - adj_idx, adj_val = [], [] - for bond in mol.GetBonds(): - adj_idx.append([bond.GetBeginAtomIdx(), bond.GetEndAtomIdx()]) - adj_idx.append([bond.GetEndAtomIdx(), bond.GetBeginAtomIdx()]) - if use_bonds_weights: - val = bond.GetBondTypeAsDouble() - else: - val = 1 - adj_val.extend([val, val]) - - # Convert to torch coo sparse tensor - if len(adj_val) > 0: # ensure tensor is not empty - adj = coo_matrix( - (torch.as_tensor(adj_val), torch.as_tensor(adj_idx).T.reshape(2, -1)), - shape=(mol.GetNumAtoms(), mol.GetNumAtoms()), - dtype=dtype, - ) - else: - # Special case for molecules with one atom - adj = coo_matrix(([], np.array([[], []])), shape=(mol.GetNumAtoms(), mol.GetNumAtoms()), dtype=dtype) - - # Add self loops - if add_self_loop: - arange = np.arange(adj.shape[0], dtype=int) - adj[arange, arange] = 1 - return adj - - -class GraphDict(dict): - def __init__( - self, - dic: Dict, - ): - r""" - Store the parameters required to initialize a `pyg.data.Data`, but - as a dictionary to reduce memory consumption. - - Possible keys for the dictionary: - - - adj: A sparse Tensor containing the adjacency matrix - - - ndata: A dictionnary containing different keys and Tensors - associated to the node features. - - - edata: A dictionnary containing different keys and Tensors - associated to the edge features. - - - dtype: The dtype for the floating data. - - - mask_nan: - Deal with molecules that fail a part of the featurization. - NaNs can happen when taking the of a noble gas, - or other properties that are not measured for specific atoms. - - - "raise": Raise an error when there is a nan or inf in the featurization - - "warn": Raise a warning when there is a nan or inf in the featurization - - "None": DEFAULT. Don't do anything - - "Floating value": Replace nans or inf by the specified value - """ - default_dic = { - "dtype": np.float16, - "mask_nan": "raise", - } - data = dic.pop("data", {}) - # ndata = dic.pop("ndata", {}) - # edata = dic.pop("edata", {}) - # for key in edata.keys(): - # assert key.startswith("edge_"), f"Edge features must start with 'edge_' but got {key}" - default_dic.update(dic) - default_dic.update(data) - # default_dic.update(ndata) - # default_dic.update(edata) - super().__init__(default_dic) - - @property - def keys(self): - return list(super().keys()) - - @property - def values(self): - return list(super().self.values()) - - def make_pyg_graph(self, **kwargs) -> Data: - """ - Convert the current dictionary of parameters, containing an adjacency matrix with node/edge data - into a `pyg.data.Data` of torch Tensors. - - `**kwargs` can be used to overwrite any parameter from the current dictionary. See `GraphDict.__init__` - for a list of parameters - """ - - num_nodes = self.adj.shape[0] - data_dict = {} - - # Convert the numpy and numpy sparse data to torch - for key, val in self.items(): - if key in ["adj", "dtype", "mask_nan"]: # Skip the parameters - continue - elif isinstance(val, np.ndarray): - # Convert the data to the specified dtype in torch format - val = val.astype(self.dtype) - data_dict[key] = torch.as_tensor(val) - elif issparse(val): - data_dict[key] = torch.as_tensor(val.astype(np.float32).todense()) - # `torch.sparse_coo_tensor` is too slow. Slows down the multiprocessing of features by >3x on 32 cores. - # indices = torch.from_numpy(np.vstack((val.row, val.col)).astype(np.int64)) - # data_dict[key] = torch.sparse_coo_tensor(indices=indices, values=val.data, size=val.shape) - elif isinstance(val, torch.Tensor): - data_dict[key] = val - else: - pass # Skip the other parameters - - # Create the PyG graph object `Data` - edge_index = torch.as_tensor(np.vstack((self.adj.row, self.adj.col))) - edge_weight = torch.as_tensor(self.adj.data) - data = Data(edge_index=edge_index, edge_weight=edge_weight, num_nodes=num_nodes, **data_dict) - return data - - @property - def adj(self): - return self["adj"] - - @property - def dtype(self): - return self["dtype"] - - @property - def mask_nan(self): - return self["mask_nan"] - - @property - def num_nodes(self) -> int: - return self.adj.shape[0] - - @property - def num_edges(self) -> int: - if issparse(self.adj): - return self.adj.nnz - else: - return np.count_nonzero(self.adj) # No division by 2 because edges are counted twice - # These are the integers that correspond with the torch data types in C++ NP_DTYPE_TO_TORCH_INT = {np.float16: 5, np.float32: 6, np.float64: 7} -def mol_to_graph_dict( - mol: Union[str, dm.Mol], +def mol_to_pyggraph( + mol: str, atom_property_list_onehot: Union[List[str],torch.Tensor] = [], atom_property_list_float: Union[List[Union[str, Callable]],torch.Tensor] = [], conformer_property_list: List[str] = [], @@ -986,10 +173,7 @@ def mol_to_graph_dict( on_error: str = "ignore", mask_nan: Union[str, float, type(None)] = "raise", max_num_atoms: Optional[int] = None, - use_graphium_cpp: bool = False, - original_featurization: Optional[Dict[str, Any]] = None, - output_pyg_graph = False -) -> Union[GraphDict, str]: +) -> Union[Data, str]: r""" Transforms a molecule into an adjacency matrix representing the molecular graph and a set of atom and bond features, and re-organizes them into a dictionary @@ -1006,12 +190,10 @@ def mol_to_graph_dict( atom_property_list_onehot: List of the properties used to get one-hot encoding of the atom type, such as the atom index represented as a one-hot vector. - See function `get_mol_atomic_features_onehot` atom_property_list_float: List of the properties used to get floating-point encoding of the atom type, such as the atomic mass or electronegativity. - See function `get_mol_atomic_features_float` conformer_property_list: list of properties used to encode the conformer information, outside of atom properties, currently support "positions_3d" @@ -1075,271 +257,84 @@ def mol_to_graph_dict( - "dtype": The numpy dtype for the floating data. """ - input_mol = mol - try: - if use_graphium_cpp: - if not isinstance(mol, str): - raise ValueError(f"use_graphium_cpp option requires that molecule be received as a string in mol_to_graph_dict, not type "+str(type(mol))) - has_conformer = ('positions_3d' in conformer_property_list) - pe_index = 4 - if has_conformer: - pe_index = 5; - mask_nan_value = 0.0 - if mask_nan is None: - mask_nan_style_int = 0 - elif mask_nan == "raise" or mask_nan == "warn": - mask_nan_style_int = 1 - else: - mask_nan_style_int = 2 - mask_nan_value = float(mask_nan) - tensors, num_nans, nan_tensor_index = graphium_cpp.featurize_smiles( - mol, - atom_property_list_onehot, - atom_property_list_float, - 'positions_3d' in conformer_property_list, - edge_property_list, - pos_encoding_as_features[1], - True, # duplicate_edges, so that we don't have to duplicate below - add_self_loop, - explicit_H, - use_bonds_weights, - True, #offset_carbon - NP_DTYPE_TO_TORCH_INT[dtype], - mask_nan_style_int, - mask_nan_value - ) - - if num_nans > 0: - if nan_tensor_index == 2: - array_name = "atom featurization" - elif nan_tensor_index == 3: - array_name = "edge property" - elif nan_tensor_index == 4 and has_conformer: - array_name = 'positions_3d' - else: - array_name = pos_encoding_as_features[0][nan_tensor_index - pe_index] - msg = f"There are {num_nans} NaNs in `{array_name}`" - if mask_nan == "raise": - raise ValueError(msg) - elif mask_nan == "warn": - logger.warning(msg) - - num_atoms = tensors[2].size(0) - if not output_pyg_graph: - adj = coo_matrix( - (tensors[1], tensors[0]), - shape=(num_atoms, num_atoms), - dtype=dtype, - ) - else: - data_dict = { - "feat": tensors[2], - "edge_feat": tensors[3] - } - if has_conformer: - data_dict['positions_3d'] = tensors[4] - for i in range(len(tensors)-pe_index): - data_dict[pos_encoding_as_features[0][i]] = tensors[i+pe_index] - # Create the PyG graph object `Data` - data = Data(edge_index=tensors[0], edge_weight=tensors[1], num_nodes=num_atoms, **data_dict) - return data - - ndata = tensors[2] - edata = tensors[3] - if has_conformer: - conf_dict = {'positions_3d': tensors[4]} - else: - conf_dict = {} - pe_tensors = tensors[pe_index:] - pe_dict = {pos_encoding_as_features[0][i]: pe_tensors[i] for i in range(len(pe_tensors))} + if not isinstance(mol, str): + raise ValueError(f"mol_to_pyggraph requires that molecule be received as a string, not type "+str(type(mol))) + try: + has_conformer = ('positions_3d' in conformer_property_list) + pe_index = 4 + if has_conformer: + pe_index = 5; + mask_nan_value = 0.0 + if mask_nan is None: + mask_nan_style_int = 0 + elif mask_nan == "raise" or mask_nan == "warn": + mask_nan_style_int = 1 else: - if isinstance(mol, str): - mol = dm.to_mol(mol, ordered=True) - if explicit_H: - mol = Chem.AddHs(mol) + mask_nan_style_int = 2 + mask_nan_value = float(mask_nan) + tensors, num_nans, nan_tensor_index = graphium_cpp.featurize_smiles( + mol, + atom_property_list_onehot, + atom_property_list_float, + 'positions_3d' in conformer_property_list, + edge_property_list, + pos_encoding_as_features[1], + True, # duplicate_edges, so that we don't have to duplicate below + add_self_loop, + explicit_H, + use_bonds_weights, + True, #offset_carbon + NP_DTYPE_TO_TORCH_INT[dtype], + mask_nan_style_int, + mask_nan_value + ) + + if num_nans > 0: + if nan_tensor_index == 2: + array_name = "atom featurization" + elif nan_tensor_index == 3: + array_name = "edge property" + elif nan_tensor_index == 4 and has_conformer: + array_name = 'positions_3d' else: - mol = Chem.RemoveHs(mol) - num_atoms = mol.GetNumAtoms() - if (max_num_atoms is not None) and (num_atoms > max_num_atoms): - raise ValueError(f"Maximum number of atoms greater than permitted {num_atoms}>{max_num_atoms}") - ( - adj, - ndata, - edata, - pe_dict, - conf_dict, - ) = mol_to_adj_and_features( - mol=mol, - atom_property_list_onehot=atom_property_list_onehot, - atom_property_list_float=atom_property_list_float, - conformer_property_list=conformer_property_list, - edge_property_list=edge_property_list, - add_self_loop=add_self_loop, - explicit_H=explicit_H, - use_bonds_weights=use_bonds_weights, - pos_encoding_as_features=pos_encoding_as_features, - mask_nan=mask_nan, - ) - if edata is not None: - if issparse(edata): - edata = to_dense_array(edata, dtype=dtype) - edata = edata.repeat(2, axis=0) + array_name = pos_encoding_as_features[0][nan_tensor_index - pe_index] + msg = f"There are {num_nans} NaNs in `{array_name}`" + if mask_nan == "raise": + raise ValueError(msg) + elif mask_nan == "warn": + logger.warning(msg) + + num_atoms = tensors[2].size(0) + data_dict = { + "feat": tensors[2], + "edge_feat": tensors[3] + } + if has_conformer: + data_dict['positions_3d'] = tensors[4] + for i in range(len(tensors)-pe_index): + data_dict[pos_encoding_as_features[0][i]] = tensors[i+pe_index] + # Create the PyG graph object `Data` + data = Data(edge_index=tensors[0], edge_weight=tensors[1], num_nodes=num_atoms, **data_dict) + return data + except Exception as e: if on_error.lower() == "raise": raise e elif on_error.lower() == "warn": - smiles = input_mol - if isinstance(smiles, dm.Mol): - smiles = Chem.MolToSmiles(input_mol) - msg = str(e) + "\nIgnoring following molecule:" + smiles + msg = str(e) + "\nIgnoring following molecule:" + mol logger.warning(msg) return str(e) elif on_error.lower() == "ignore": return str(e) - - graph_dict = {"adj": adj, "data": {}, "dtype": dtype} - - # Assign the node data - if ndata is not None: - graph_dict["data"]["feat"] = ndata - - # Assign the edge data - if edata is not None: - graph_dict["data"]["edge_feat"] = edata - - # Put the positional encodings as node features - # TODO: add support for PE on edges - for key, pe in pe_dict.items(): - graph_dict["data"][key] = pe - - # put the conformer positions here - for key, val in conf_dict.items(): - graph_dict["data"][key] = val - - graph_dict = GraphDict(graph_dict) - return graph_dict - - -def mol_to_pyggraph( - mol: Union[str, dm.Mol], - atom_property_list_onehot: Union[List[str],torch.Tensor] = [], - atom_property_list_float: Union[List[Union[str, Callable]],torch.Tensor] = [], - conformer_property_list: List[str] = [], - edge_property_list: Union[List[str],torch.Tensor] = [], - add_self_loop: bool = False, - explicit_H: bool = False, - use_bonds_weights: bool = False, - pos_encoding_as_features: Union[Dict[str, Any], Tuple[List[str],torch.Tensor]] = None, - dtype: np.dtype = np.float16, - on_error: str = "ignore", - mask_nan: Union[str, float, type(None)] = "raise", - max_num_atoms: Optional[int] = None, - use_graphium_cpp: bool = False, - original_featurization: Optional[Dict[str, Any]] = None, -) -> Union[Data, str]: - r""" - Transforms a molecule into an adjacency matrix representing the molecular graph - and a set of atom and bond features. - - Then, the adjacency matrix and node/edge features are used to build a - `pyg.data.Data` with pytorch Tensors. - - Parameters: - - mol: - The molecule to be converted - - atom_property_list_onehot: - List of the properties used to get one-hot encoding of the atom type, - such as the atom index represented as a one-hot vector. - See function `get_mol_atomic_features_onehot` - - atom_property_list_float: - List of the properties used to get floating-point encoding of the atom type, - such as the atomic mass or electronegativity. - See function `get_mol_atomic_features_float` - - conformer_property_list: - list of properties used to encode the conformer information, outside of atom properties, currently support "positions_3d" - - edge_property_list: - List of the properties used to encode the edges, such as the edge type - and the stereo type. - - add_self_loop: - Whether to add a value of `1` on the diagonal of the adjacency matrix. - - explicit_H: - Whether to consider the Hydrogens explicitely. If `False`, the hydrogens - are implicit. - - use_bonds_weights: - Whether to use the floating-point value of the bonds in the adjacency matrix, - such that single bonds are represented by 1, double bonds 2, triple 3, aromatic 1.5 - - pos_encoding_as_features: keyword arguments for function `graph_positional_encoder` - to generate positional encoding for node features. - - dtype: - The numpy data type used to build the graph - - on_error: - What to do when the featurization fails. This can change the - behavior of `mask_nan`. - - - "raise": Raise an error - - "warn": Raise a warning and return a string of the error - - "ignore": Ignore the error and return a string of the error - - mask_nan: - Deal with molecules that fail a part of the featurization. - NaNs can happen when taking the of a noble gas, - or other properties that are not measured for specific atoms. - - - "raise": Raise an error when there is a nan in the featurization - - "warn": Raise a warning when there is a nan in the featurization - - "None": DEFAULT. Don't do anything - - "Floating value": Replace nans by the specified value - - max_num_atoms: - Maximum number of atoms for a given molecule. If a molecule with more atoms - is give, an error is raised, but catpured according to the rules of - `on_error`. - Returns: - - graph: - Pyg graph, with `graph['feat']` corresponding to the concatenated - node data from `atom_property_list_onehot` and `atom_property_list_float`, - `graph['edge_feat']` corresponding to the concatenated edge data from `edge_property_list`. - There are also additional entries for the positional encodings. - - """ - graph_dict = mol_to_graph_dict( - mol=mol, - atom_property_list_onehot=atom_property_list_onehot, - atom_property_list_float=atom_property_list_float, - conformer_property_list=conformer_property_list, - edge_property_list=edge_property_list, - add_self_loop=add_self_loop, - explicit_H=explicit_H, - use_bonds_weights=use_bonds_weights, - pos_encoding_as_features=pos_encoding_as_features, - dtype=dtype, - on_error=on_error, - mask_nan=mask_nan, - max_num_atoms=max_num_atoms, - use_graphium_cpp=use_graphium_cpp, - original_featurization=original_featurization, - output_pyg_graph=True - ) - - return graph_dict + else: + # Invalid on_error value, so default to raising an exception. + raise e def mol_to_graph_signature(featurizer_args: Dict[str, Any] = None) -> Dict[str, Any]: """ - Get the default arguments of `mol_to_graph_dict` and update it + Get the default arguments of `mol_to_pyggraph` and update it with a provided dict of arguments in order to get a fulle signature of the featurizer args actually used for the features computation. @@ -1349,8 +344,8 @@ def mol_to_graph_signature(featurizer_args: Dict[str, Any] = None) -> Dict[str, A dictionary of featurizer arguments """ - # Get the signature of `mol_to_graph_dict` - signature = inspect.signature(mol_to_graph_dict) + # Get the signature of `mol_to_pyggraph` + signature = inspect.signature(mol_to_pyggraph) # Filter out empty arguments (without default value) parameters = list(filter(lambda param: param.default is not param.empty, signature.parameters.values())) diff --git a/profiling/profile_mol_to_graph.py b/profiling/profile_mol_to_graph.py index 423f487cf..e8bf19315 100644 --- a/profiling/profile_mol_to_graph.py +++ b/profiling/profile_mol_to_graph.py @@ -16,7 +16,7 @@ import pickle from graphium.data.utils import load_micro_zinc -from graphium.features.featurizer import mol_to_pyggraph, mol_to_adj_and_features, mol_to_graph_dict +from graphium.features.featurizer import mol_to_pyggraph # Check out this profiling tool: https://kirillstrelkov.medium.com/python-profiling-with-vscode-3a17c0407833 @@ -67,10 +67,7 @@ def main(): graphs = [] for s in tqdm(smiles): - mol = dm.to_mol( - s - ) # Doesn't need `ordered=True` because this is just to test the speed of the featurizer - graphs.append(mol_to_graph_dict(mol, **featurizer)) + graphs.append(mol_to_pyggraph(s, **featurizer)) print(graphs[0]) From 4ee35d40a2adb4c4badaf7b09568f5585d1101a3 Mon Sep 17 00:00:00 2001 From: ndickson Date: Thu, 18 Apr 2024 12:08:18 -0400 Subject: [PATCH 005/175] Removed newly deprecated options from yaml files --- expts/configs/config_gps_10M_pcqm4m.yaml | 6 ------ expts/configs/config_gps_10M_pcqm4m_mod.yaml | 6 ------ expts/configs/config_mpnn_10M_b3lyp.yaml | 6 ------ expts/configs/config_mpnn_pcqm4m.yaml | 6 ------ expts/hydra-configs/architecture/largemix.yaml | 4 ---- expts/hydra-configs/architecture/pcqm4m.yaml | 5 ----- expts/hydra-configs/architecture/toymix.yaml | 5 ----- .../hydra-configs/tasks/loss_metrics_datamodule/pcqm4m.yaml | 1 - expts/hydra-configs/training/accelerator/largemix_cpu.yaml | 1 - expts/hydra-configs/training/accelerator/largemix_gpu.yaml | 1 - expts/hydra-configs/training/accelerator/toymix_cpu.yaml | 1 - expts/hydra-configs/training/accelerator/toymix_gpu.yaml | 1 - expts/neurips2023_configs/base_config/large.yaml | 5 ----- expts/neurips2023_configs/base_config/large_pcba.yaml | 5 ----- expts/neurips2023_configs/base_config/large_pcqm_g25.yaml | 5 ----- expts/neurips2023_configs/base_config/large_pcqm_n4.yaml | 5 ----- expts/neurips2023_configs/base_config/small.yaml | 5 ----- .../baseline/config_small_gcn_baseline.yaml | 6 ------ .../neurips2023_configs/config_classifigression_l1000.yaml | 6 ------ expts/neurips2023_configs/config_large_gcn_gpu.yaml | 1 - expts/neurips2023_configs/config_luis_jama.yaml | 6 ------ expts/neurips2023_configs/config_small_gcn_gpu.yaml | 1 - expts/neurips2023_configs/debug/config_debug.yaml | 5 ----- expts/neurips2023_configs/debug/config_large_gcn_debug.yaml | 5 ----- expts/neurips2023_configs/debug/config_small_gcn_debug.yaml | 5 ----- .../single_task_gcn/config_large_gcn_mcf7.yaml | 6 ------ .../single_task_gcn/config_large_gcn_pcba.yaml | 6 ------ .../single_task_gcn/config_large_gcn_vcap.yaml | 6 ------ .../single_task_gin/config_large_gin_g25.yaml | 6 ------ .../single_task_gin/config_large_gin_mcf7.yaml | 6 ------ .../single_task_gin/config_large_gin_n4.yaml | 6 ------ .../single_task_gin/config_large_gin_pcba.yaml | 6 ------ .../single_task_gin/config_large_gin_pcq.yaml | 6 ------ .../single_task_gin/config_large_gin_vcap.yaml | 6 ------ .../single_task_gine/config_large_gine_g25.yaml | 6 ------ .../single_task_gine/config_large_gine_mcf7.yaml | 6 ------ .../single_task_gine/config_large_gine_n4.yaml | 6 ------ .../single_task_gine/config_large_gine_pcba.yaml | 6 ------ .../single_task_gine/config_large_gine_pcq.yaml | 6 ------ .../single_task_gine/config_large_gine_vcap.yaml | 6 ------ graphium/config/dummy_finetuning_from_gnn.yaml | 4 ---- graphium/config/dummy_finetuning_from_task_head.yaml | 4 ---- .../config/fake_and_missing_multilevel_multitask_pyg.yaml | 2 -- graphium/config/fake_multilevel_multitask_pyg.yaml | 2 -- graphium/config/zinc_default_multitask_pyg.yaml | 2 -- profiling/configs_profiling.yaml | 2 -- tests/config_test_ipu_dataloader.yaml | 3 --- tests/config_test_ipu_dataloader_multitask.yaml | 6 ------ tests/data/config_micro_ZINC.yaml | 2 -- 49 files changed, 220 deletions(-) diff --git a/expts/configs/config_gps_10M_pcqm4m.yaml b/expts/configs/config_gps_10M_pcqm4m.yaml index 10faa3b1e..0487a8d04 100644 --- a/expts/configs/config_gps_10M_pcqm4m.yaml +++ b/expts/configs/config_gps_10M_pcqm4m.yaml @@ -59,7 +59,6 @@ accelerator: datamodule: module_type: "MultitaskFromSmilesDataModule" - # module_type: "FakeDataModule" # Option to use generated data args: # Matches that in the test_multitask_datamodule.py case. task_specific_args: # To be replaced by a new class "DatasetParams" homolumo: @@ -76,10 +75,6 @@ datamodule: split_test: 0.1 # Featurization - prepare_dict_or_graph: pyg:graph - featurization_n_jobs: 30 - featurization_progress: True - featurization_backend: "loky" featurization: # OGB: ['atomic_num', 'degree', 'possible_formal_charge', 'possible_numH' (total-valence), # 'possible_number_radical_e', 'possible_is_aromatic', 'possible_is_in_ring', @@ -115,7 +110,6 @@ datamodule: num_workers: 0 # -1 to use all persistent_workers: False # if use persistent worker at the start of each epoch. # Using persistent_workers false might make the start of each epoch very long. - featurization_backend: "loky" architecture: diff --git a/expts/configs/config_gps_10M_pcqm4m_mod.yaml b/expts/configs/config_gps_10M_pcqm4m_mod.yaml index e2cdb44c2..19543302b 100644 --- a/expts/configs/config_gps_10M_pcqm4m_mod.yaml +++ b/expts/configs/config_gps_10M_pcqm4m_mod.yaml @@ -8,7 +8,6 @@ constants: datamodule: module_type: "MultitaskFromSmilesDataModule" - # module_type: "FakeDataModule" # Option to use generated data args: # Matches that in the test_multitask_datamodule.py case. task_specific_args: # To be replaced by a new class "DatasetParams" homolumo: @@ -25,10 +24,6 @@ datamodule: split_test: 0.1 # Featurization - prepare_dict_or_graph: pyg:graph - featurization_n_jobs: 30 - featurization_progress: True - featurization_backend: "loky" featurization: # OGB: ['atomic_num', 'degree', 'possible_formal_charge', 'possible_numH' (total-valence), # 'possible_number_radical_e', 'possible_is_aromatic', 'possible_is_in_ring', @@ -84,7 +79,6 @@ datamodule: num_workers: 0 # -1 to use all persistent_workers: False # if use persistent worker at the start of each epoch. # Using persistent_workers false might make the start of each epoch very long. - featurization_backend: "loky" # ipu_dataloader_training_opts: # mode: async diff --git a/expts/configs/config_mpnn_10M_b3lyp.yaml b/expts/configs/config_mpnn_10M_b3lyp.yaml index d54bc6667..424dbcd71 100644 --- a/expts/configs/config_mpnn_10M_b3lyp.yaml +++ b/expts/configs/config_mpnn_10M_b3lyp.yaml @@ -60,7 +60,6 @@ accelerator: datamodule: module_type: "MultitaskFromSmilesDataModule" - # module_type: "FakeDataModule" # Option to use generated data args: # Matches that in the test_multitask_datamodule.py case. task_specific_args: # To be replaced by a new class "DatasetParams" betagap: @@ -88,10 +87,6 @@ datamodule: split_test: 0.1 # Featurization - prepare_dict_or_graph: pyg:graph - featurization_n_jobs: 30 - featurization_progress: True - featurization_backend: "loky" processed_graph_data_path: "../datacache/b3lyp/" featurization: # OGB: ['atomic_num', 'degree', 'possible_formal_charge', 'possible_numH' (total-valence), @@ -126,7 +121,6 @@ datamodule: num_workers: 0 # -1 to use all persistent_workers: False # if use persistent worker at the start of each epoch. # Using persistent_workers false might make the start of each epoch very long. - featurization_backend: "loky" architecture: diff --git a/expts/configs/config_mpnn_pcqm4m.yaml b/expts/configs/config_mpnn_pcqm4m.yaml index 34a9ca6b9..70972d370 100644 --- a/expts/configs/config_mpnn_pcqm4m.yaml +++ b/expts/configs/config_mpnn_pcqm4m.yaml @@ -8,7 +8,6 @@ constants: datamodule: module_type: "MultitaskFromSmilesDataModule" - # module_type: "FakeDataModule" # Option to use generated data args: # Matches that in the test_multitask_datamodule.py case. task_specific_args: # To be replaced by a new class "DatasetParams" homolumo: @@ -26,10 +25,6 @@ datamodule: split_names: ["train", "valid", "test-dev"] # Featurization - prepare_dict_or_graph: pyg:graph - featurization_n_jobs: 20 - featurization_progress: True - featurization_backend: "loky" processed_graph_data_path: "graphium/data/PCQM4Mv2/" featurization: # OGB: ['atomic_num', 'degree', 'possible_formal_charge', 'possible_numH' (total-valence), @@ -60,7 +55,6 @@ datamodule: num_workers: 40 # -1 to use all persistent_workers: False # if use persistent worker at the start of each epoch. # Using persistent_workers false might make the start of each epoch very long. - featurization_backend: "loky" # ipu_dataloader_training_opts: # mode: async diff --git a/expts/hydra-configs/architecture/largemix.yaml b/expts/hydra-configs/architecture/largemix.yaml index e6969f3f7..f1f494157 100644 --- a/expts/hydra-configs/architecture/largemix.yaml +++ b/expts/hydra-configs/architecture/largemix.yaml @@ -83,10 +83,6 @@ architecture: datamodule: module_type: "MultitaskFromSmilesDataModule" args: - prepare_dict_or_graph: pyg:graph - featurization_n_jobs: 20 - featurization_progress: True - featurization_backend: "loky" processed_graph_data_path: ${constants.datacache_path} num_workers: 20 # -1 to use all persistent_workers: True diff --git a/expts/hydra-configs/architecture/pcqm4m.yaml b/expts/hydra-configs/architecture/pcqm4m.yaml index 494875765..f3fc04b63 100644 --- a/expts/hydra-configs/architecture/pcqm4m.yaml +++ b/expts/hydra-configs/architecture/pcqm4m.yaml @@ -81,13 +81,8 @@ architecture: datamodule: module_type: "MultitaskFromSmilesDataModule" - # module_type: "FakeDataModule" # Option to use generated data args: # Matches that in the test_multitask_datamodule.py case. # Featurization - prepare_dict_or_graph: pyg:graph - featurization_n_jobs: 30 - featurization_progress: True - featurization_backend: "loky" processed_graph_data_path: ${constants.datacache_path} num_workers: 40 # -1 to use all persistent_workers: False # if use persistent worker at the start of each epoch. diff --git a/expts/hydra-configs/architecture/toymix.yaml b/expts/hydra-configs/architecture/toymix.yaml index 65999f304..f4ae5a5db 100644 --- a/expts/hydra-configs/architecture/toymix.yaml +++ b/expts/hydra-configs/architecture/toymix.yaml @@ -74,15 +74,10 @@ architecture: datamodule: module_type: "MultitaskFromSmilesDataModule" args: - prepare_dict_or_graph: pyg:graph - featurization_n_jobs: 30 - featurization_progress: True - featurization_backend: "loky" processed_graph_data_path: ${constants.datacache_path} num_workers: 30 # -1 to use all persistent_workers: False featurization: - use_graphium_cpp: True atom_property_list_onehot: [atomic-number, group, period, total-valence] atom_property_list_float: [degree, formal-charge, radical-electron, aromatic, in-ring] edge_property_list: [bond-type-onehot, stereo, in-ring] diff --git a/expts/hydra-configs/tasks/loss_metrics_datamodule/pcqm4m.yaml b/expts/hydra-configs/tasks/loss_metrics_datamodule/pcqm4m.yaml index d5b302dd1..8dcf2c0c4 100644 --- a/expts/hydra-configs/tasks/loss_metrics_datamodule/pcqm4m.yaml +++ b/expts/hydra-configs/tasks/loss_metrics_datamodule/pcqm4m.yaml @@ -25,7 +25,6 @@ metrics: datamodule: module_type: "MultitaskFromSmilesDataModule" - # module_type: "FakeDataModule" # Option to use generated data args: # Matches that in the test_multitask_datamodule.py case. task_specific_args: # To be replaced by a new class "DatasetParams" homolumo: diff --git a/expts/hydra-configs/training/accelerator/largemix_cpu.yaml b/expts/hydra-configs/training/accelerator/largemix_cpu.yaml index 6f5e0606a..ea83fdf58 100644 --- a/expts/hydra-configs/training/accelerator/largemix_cpu.yaml +++ b/expts/hydra-configs/training/accelerator/largemix_cpu.yaml @@ -4,7 +4,6 @@ datamodule: args: batch_size_training: 200 batch_size_inference: 200 - featurization_n_jobs: 20 num_workers: 20 predictor: diff --git a/expts/hydra-configs/training/accelerator/largemix_gpu.yaml b/expts/hydra-configs/training/accelerator/largemix_gpu.yaml index ac728c982..17ac12ad8 100644 --- a/expts/hydra-configs/training/accelerator/largemix_gpu.yaml +++ b/expts/hydra-configs/training/accelerator/largemix_gpu.yaml @@ -7,7 +7,6 @@ datamodule: args: batch_size_training: 2048 batch_size_inference: 2048 - featurization_n_jobs: 6 num_workers: 6 predictor: diff --git a/expts/hydra-configs/training/accelerator/toymix_cpu.yaml b/expts/hydra-configs/training/accelerator/toymix_cpu.yaml index 9022eeb84..f81662285 100644 --- a/expts/hydra-configs/training/accelerator/toymix_cpu.yaml +++ b/expts/hydra-configs/training/accelerator/toymix_cpu.yaml @@ -4,7 +4,6 @@ datamodule: args: batch_size_training: 200 batch_size_inference: 200 - featurization_n_jobs: 4 num_workers: 4 predictor: diff --git a/expts/hydra-configs/training/accelerator/toymix_gpu.yaml b/expts/hydra-configs/training/accelerator/toymix_gpu.yaml index c2c8e4066..ac4e48c26 100644 --- a/expts/hydra-configs/training/accelerator/toymix_gpu.yaml +++ b/expts/hydra-configs/training/accelerator/toymix_gpu.yaml @@ -7,7 +7,6 @@ datamodule: args: batch_size_training: 200 batch_size_inference: 200 - featurization_n_jobs: 4 num_workers: 4 predictor: diff --git a/expts/neurips2023_configs/base_config/large.yaml b/expts/neurips2023_configs/base_config/large.yaml index 222663d47..18503527c 100644 --- a/expts/neurips2023_configs/base_config/large.yaml +++ b/expts/neurips2023_configs/base_config/large.yaml @@ -62,7 +62,6 @@ accelerator: datamodule: module_type: "MultitaskFromSmilesDataModule" - # module_type: "FakeDataModule" # Option to use generated data args: # Matches that in the test_multitask_datamodule.py case. task_specific_args: # To be replaced by a new class "DatasetParams" l1000_vcap: @@ -133,10 +132,6 @@ datamodule: epoch_sampling_fraction: 1.0 # Featurization - prepare_dict_or_graph: pyg:graph - featurization_n_jobs: 30 - featurization_progress: True - featurization_backend: "loky" processed_graph_data_path: ${constants.datacache_path} featurization: # OGB: ['atomic_num', 'degree', 'possible_formal_charge', 'possible_numH' (total-valence), diff --git a/expts/neurips2023_configs/base_config/large_pcba.yaml b/expts/neurips2023_configs/base_config/large_pcba.yaml index 2bb9b4d93..a1e3d108f 100644 --- a/expts/neurips2023_configs/base_config/large_pcba.yaml +++ b/expts/neurips2023_configs/base_config/large_pcba.yaml @@ -62,7 +62,6 @@ accelerator: datamodule: module_type: "MultitaskFromSmilesDataModule" - # module_type: "FakeDataModule" # Option to use generated data args: # Matches that in the test_multitask_datamodule.py case. task_specific_args: # To be replaced by a new class "DatasetParams" @@ -132,10 +131,6 @@ datamodule: #epoch_sampling_fraction: 1.0 # Featurization - prepare_dict_or_graph: pyg:graph - featurization_n_jobs: 30 - featurization_progress: True - featurization_backend: "loky" processed_graph_data_path: ${constants.datacache_path} featurization: # OGB: ['atomic_num', 'degree', 'possible_formal_charge', 'possible_numH' (total-valence), diff --git a/expts/neurips2023_configs/base_config/large_pcqm_g25.yaml b/expts/neurips2023_configs/base_config/large_pcqm_g25.yaml index 7041a4c9c..b71c43cf2 100644 --- a/expts/neurips2023_configs/base_config/large_pcqm_g25.yaml +++ b/expts/neurips2023_configs/base_config/large_pcqm_g25.yaml @@ -62,7 +62,6 @@ accelerator: datamodule: module_type: "MultitaskFromSmilesDataModule" - # module_type: "FakeDataModule" # Option to use generated data args: # Matches that in the test_multitask_datamodule.py case. task_specific_args: # To be replaced by a new class "DatasetParams" @@ -132,10 +131,6 @@ datamodule: # epoch_sampling_fraction: 1.0 # Featurization - prepare_dict_or_graph: pyg:graph - featurization_n_jobs: 30 - featurization_progress: True - featurization_backend: "loky" processed_graph_data_path: ${constants.datacache_path} featurization: # OGB: ['atomic_num', 'degree', 'possible_formal_charge', 'possible_numH' (total-valence), diff --git a/expts/neurips2023_configs/base_config/large_pcqm_n4.yaml b/expts/neurips2023_configs/base_config/large_pcqm_n4.yaml index b0f4a56f9..464e49581 100644 --- a/expts/neurips2023_configs/base_config/large_pcqm_n4.yaml +++ b/expts/neurips2023_configs/base_config/large_pcqm_n4.yaml @@ -62,7 +62,6 @@ accelerator: datamodule: module_type: "MultitaskFromSmilesDataModule" - # module_type: "FakeDataModule" # Option to use generated data args: # Matches that in the test_multitask_datamodule.py case. task_specific_args: # To be replaced by a new class "DatasetParams" @@ -132,10 +131,6 @@ datamodule: epoch_sampling_fraction: 1.0 # Featurization - prepare_dict_or_graph: pyg:graph - featurization_n_jobs: 30 - featurization_progress: True - featurization_backend: "loky" processed_graph_data_path: ${constants.datacache_path} featurization: # OGB: ['atomic_num', 'degree', 'possible_formal_charge', 'possible_numH' (total-valence), diff --git a/expts/neurips2023_configs/base_config/small.yaml b/expts/neurips2023_configs/base_config/small.yaml index fd7ce3fbe..4914fdda3 100644 --- a/expts/neurips2023_configs/base_config/small.yaml +++ b/expts/neurips2023_configs/base_config/small.yaml @@ -51,7 +51,6 @@ accelerator: datamodule: module_type: "MultitaskFromSmilesDataModule" - # module_type: "FakeDataModule" # Option to use generated data args: # Matches that in the test_multitask_datamodule.py case. task_specific_args: # To be replaced by a new class "DatasetParams" qm9: @@ -97,10 +96,6 @@ datamodule: method: "normal" # Featurization - prepare_dict_or_graph: pyg:graph - featurization_n_jobs: 30 - featurization_progress: True - featurization_backend: "loky" processed_graph_data_path: "../datacache/neurips2023-small/" featurization: # OGB: ['atomic_num', 'degree', 'possible_formal_charge', 'possible_numH' (total-valence), diff --git a/expts/neurips2023_configs/baseline/config_small_gcn_baseline.yaml b/expts/neurips2023_configs/baseline/config_small_gcn_baseline.yaml index 7b2d2cbdf..e107fa386 100644 --- a/expts/neurips2023_configs/baseline/config_small_gcn_baseline.yaml +++ b/expts/neurips2023_configs/baseline/config_small_gcn_baseline.yaml @@ -50,7 +50,6 @@ accelerator: datamodule: module_type: "MultitaskFromSmilesDataModule" - # module_type: "FakeDataModule" # Option to use generated data args: # Matches that in the test_multitask_datamodule.py case. task_specific_args: # To be replaced by a new class "DatasetParams" qm9: @@ -96,10 +95,6 @@ datamodule: method: "normal" # Featurization - prepare_dict_or_graph: pyg:graph - featurization_n_jobs: 30 - featurization_progress: True - featurization_backend: "loky" processed_graph_data_path: "../datacache/neurips2023-small/" featurization: # OGB: ['atomic_num', 'degree', 'possible_formal_charge', 'possible_numH' (total-valence), @@ -134,7 +129,6 @@ datamodule: num_workers: 30 # -1 to use all persistent_workers: False # if use persistent worker at the start of each epoch. # Using persistent_workers false might make the start of each epoch very long. - featurization_backend: "loky" architecture: diff --git a/expts/neurips2023_configs/config_classifigression_l1000.yaml b/expts/neurips2023_configs/config_classifigression_l1000.yaml index 48f06d9d1..fb77ad457 100644 --- a/expts/neurips2023_configs/config_classifigression_l1000.yaml +++ b/expts/neurips2023_configs/config_classifigression_l1000.yaml @@ -50,7 +50,6 @@ accelerator: datamodule: module_type: "MultitaskFromSmilesDataModule" - # module_type: "FakeDataModule" # Option to use generated data args: # Matches that in the test_multitask_datamodule.py case. task_specific_args: # To be replaced by a new class "DatasetParams" l1000_vcap: @@ -76,10 +75,6 @@ datamodule: splits_path: graphium/data/neurips2023/small-dataset/l1000_mcf7_random_splits.pt # Download with `wget https://storage.googleapis.com/graphium-public/datasets/neurips_2023/Large-dataset/l1000_mcf7_random_splits.pt` # Featurization - prepare_dict_or_graph: pyg:graph - featurization_n_jobs: 1 - featurization_progress: True - featurization_backend: "loky" processed_graph_data_path: "../datacache/neurips2023-small/" featurization: # OGB: ['atomic_num', 'degree', 'possible_formal_charge', 'possible_numH' (total-valence), @@ -114,7 +109,6 @@ datamodule: num_workers: 5 # -1 to use all persistent_workers: False # if use persistent worker at the start of each epoch. # Using persistent_workers false might make the start of each epoch very long. - featurization_backend: "loky" architecture: diff --git a/expts/neurips2023_configs/config_large_gcn_gpu.yaml b/expts/neurips2023_configs/config_large_gcn_gpu.yaml index 2830530aa..31a02e22c 100644 --- a/expts/neurips2023_configs/config_large_gcn_gpu.yaml +++ b/expts/neurips2023_configs/config_large_gcn_gpu.yaml @@ -49,7 +49,6 @@ datamodule: df_path: expts/data/neurips2023/large-dataset/PCQM4M_G25_N4.parquet # wget https://storage.googleapis.com/graphium-public/datasets/neurips_2023/Large-dataset/PCQM4M_G25_N4.parquet splits_path: expts/data/neurips2023/large-dataset/pcqm4m_g25_n4_random_splits.pt # Download with `wget https://storage.googleapis.com/graphium-public/datasets/neurips_2023/Large-dataset/pcqm4m_g25_n4_random_splits.pt` - featurization_n_jobs: 4 # 30 processed_graph_data_path: "../datacache/neurips2023-small/" num_workers: 4 # 30 diff --git a/expts/neurips2023_configs/config_luis_jama.yaml b/expts/neurips2023_configs/config_luis_jama.yaml index 5135c5cae..e0549e0f0 100644 --- a/expts/neurips2023_configs/config_luis_jama.yaml +++ b/expts/neurips2023_configs/config_luis_jama.yaml @@ -50,7 +50,6 @@ accelerator: datamodule: module_type: "MultitaskFromSmilesDataModule" - # module_type: "FakeDataModule" # Option to use generated data args: # Matches that in the test_multitask_datamodule.py case. task_specific_args: # To be replaced by a new class "DatasetParams" @@ -84,10 +83,6 @@ datamodule: method: "normal" # Featurization - prepare_dict_or_graph: pyg:graph - featurization_n_jobs: 30 - featurization_progress: True - featurization_backend: "loky" processed_graph_data_path: "../datacache/neurips2023-small/" featurization: # OGB: ['atomic_num', 'degree', 'possible_formal_charge', 'possible_numH' (total-valence), @@ -122,7 +117,6 @@ datamodule: num_workers: 4 # -1 to use all persistent_workers: False # if use persistent worker at the start of each epoch. # Using persistent_workers false might make the start of each epoch very long. - featurization_backend: "loky" architecture: diff --git a/expts/neurips2023_configs/config_small_gcn_gpu.yaml b/expts/neurips2023_configs/config_small_gcn_gpu.yaml index 8b5a46e26..ccad70af6 100644 --- a/expts/neurips2023_configs/config_small_gcn_gpu.yaml +++ b/expts/neurips2023_configs/config_small_gcn_gpu.yaml @@ -41,7 +41,6 @@ datamodule: zinc: df_path: expts/data/neurips2023/small-dataset/ZINC12k.csv.gz splits_path: expts/data/neurips2023/small-dataset/ZINC12k_random_splits.pt # Download with `wget https://storage.googleapis.com/graphium-public/datasets/neurips_2023/Small-dataset/ZINC12k_random_splits.pt` - featurization_n_jobs: 4 # 30 processed_graph_data_path: "../datacache/neurips2023-small/" num_workers: 4 # 30 diff --git a/expts/neurips2023_configs/debug/config_debug.yaml b/expts/neurips2023_configs/debug/config_debug.yaml index 3d31e5e8c..21a8c30b2 100644 --- a/expts/neurips2023_configs/debug/config_debug.yaml +++ b/expts/neurips2023_configs/debug/config_debug.yaml @@ -51,7 +51,6 @@ accelerator: datamodule: module_type: "MultitaskFromSmilesDataModule" - # module_type: "FakeDataModule" # Option to use generated data args: # Matches that in the test_multitask_datamodule.py case. task_specific_args: # To be replaced by a new class "DatasetParams" @@ -70,10 +69,6 @@ datamodule: method: "normal" # Featurization - prepare_dict_or_graph: pyg:graph - featurization_n_jobs: 0 - featurization_progress: True - featurization_backend: "loky" processed_graph_data_path: "../datacache/neurips2023-small/" featurization: # OGB: ['atomic_num', 'degree', 'possible_formal_charge', 'possible_numH' (total-valence), diff --git a/expts/neurips2023_configs/debug/config_large_gcn_debug.yaml b/expts/neurips2023_configs/debug/config_large_gcn_debug.yaml index ec05bf6eb..236673699 100644 --- a/expts/neurips2023_configs/debug/config_large_gcn_debug.yaml +++ b/expts/neurips2023_configs/debug/config_large_gcn_debug.yaml @@ -60,7 +60,6 @@ accelerator: datamodule: module_type: "MultitaskFromSmilesDataModule" - # module_type: "FakeDataModule" # Option to use generated data args: # Matches that in the test_multitask_datamodule.py case. task_specific_args: # To be replaced by a new class "DatasetParams" l1000_vcap: @@ -131,10 +130,6 @@ datamodule: epoch_sampling_fraction: 1.0 # Featurization - prepare_dict_or_graph: pyg:graph - featurization_n_jobs: 30 - featurization_progress: True - featurization_backend: "loky" processed_graph_data_path: "../datacache/neurips2023-large/" featurization: # OGB: ['atomic_num', 'degree', 'possible_formal_charge', 'possible_numH' (total-valence), diff --git a/expts/neurips2023_configs/debug/config_small_gcn_debug.yaml b/expts/neurips2023_configs/debug/config_small_gcn_debug.yaml index 26b50756f..773ca8814 100644 --- a/expts/neurips2023_configs/debug/config_small_gcn_debug.yaml +++ b/expts/neurips2023_configs/debug/config_small_gcn_debug.yaml @@ -40,7 +40,6 @@ accelerator: datamodule: module_type: "MultitaskFromSmilesDataModule" - # module_type: "FakeDataModule" # Option to use generated data args: # Matches that in the test_multitask_datamodule.py case. task_specific_args: # To be replaced by a new class "DatasetParams" qm9: @@ -84,10 +83,6 @@ datamodule: method: "normal" # Featurization - prepare_dict_or_graph: pyg:graph - featurization_n_jobs: 30 - featurization_progress: True - featurization_backend: "loky" processed_graph_data_path: "../datacache/neurips2023-small/" featurization: # OGB: ['atomic_num', 'degree', 'possible_formal_charge', 'possible_numH' (total-valence), diff --git a/expts/neurips2023_configs/single_task_gcn/config_large_gcn_mcf7.yaml b/expts/neurips2023_configs/single_task_gcn/config_large_gcn_mcf7.yaml index e05d1be8d..1aba37eb4 100644 --- a/expts/neurips2023_configs/single_task_gcn/config_large_gcn_mcf7.yaml +++ b/expts/neurips2023_configs/single_task_gcn/config_large_gcn_mcf7.yaml @@ -50,7 +50,6 @@ accelerator: datamodule: module_type: "MultitaskFromSmilesDataModule" - # module_type: "FakeDataModule" # Option to use generated data args: # Matches that in the test_multitask_datamodule.py case. task_specific_args: # To be replaced by a new class "DatasetParams" l1000_mcf7: @@ -65,10 +64,6 @@ datamodule: splits_path: graphium/data/neurips2023/large-dataset/l1000_mcf7_random_splits.pt # Download with `wget https://storage.googleapis.com/graphium-public/datasets/neurips_2023/Large-dataset/l1000_mcf7_random_splits.pt` # Featurization - prepare_dict_or_graph: pyg:graph - featurization_n_jobs: 30 - featurization_progress: True - featurization_backend: "loky" processed_graph_data_path: "../datacache/neurips2023-large/mcf7/" featurization: # OGB: ['atomic_num', 'degree', 'possible_formal_charge', 'possible_numH' (total-valence), @@ -103,7 +98,6 @@ datamodule: num_workers: 30 # -1 to use all persistent_workers: False # if use persistent worker at the start of each epoch. # Using persistent_workers false might make the start of each epoch very long. - featurization_backend: "loky" architecture: diff --git a/expts/neurips2023_configs/single_task_gcn/config_large_gcn_pcba.yaml b/expts/neurips2023_configs/single_task_gcn/config_large_gcn_pcba.yaml index cf924850e..77837d750 100644 --- a/expts/neurips2023_configs/single_task_gcn/config_large_gcn_pcba.yaml +++ b/expts/neurips2023_configs/single_task_gcn/config_large_gcn_pcba.yaml @@ -50,7 +50,6 @@ accelerator: datamodule: module_type: "MultitaskFromSmilesDataModule" - # module_type: "FakeDataModule" # Option to use generated data args: # Matches that in the test_multitask_datamodule.py case. task_specific_args: # To be replaced by a new class "DatasetParams" pcba_1328: @@ -65,10 +64,6 @@ datamodule: splits_path: graphium/data/neurips2023/large-dataset/pcba_1328_random_splits.pt # Download with `wget https://storage.googleapis.com/graphium-public/datasets/neurips_2023/Large-dataset/pcba_1328_random_splits.pt` # Featurization - prepare_dict_or_graph: pyg:graph - featurization_n_jobs: 30 - featurization_progress: True - featurization_backend: "loky" processed_graph_data_path: "../datacache/neurips2023-large/pcba/" featurization: # OGB: ['atomic_num', 'degree', 'possible_formal_charge', 'possible_numH' (total-valence), @@ -103,7 +98,6 @@ datamodule: num_workers: 30 # -1 to use all persistent_workers: False # if use persistent worker at the start of each epoch. # Using persistent_workers false might make the start of each epoch very long. - featurization_backend: "loky" architecture: diff --git a/expts/neurips2023_configs/single_task_gcn/config_large_gcn_vcap.yaml b/expts/neurips2023_configs/single_task_gcn/config_large_gcn_vcap.yaml index f1c9bcfd4..1c021a559 100644 --- a/expts/neurips2023_configs/single_task_gcn/config_large_gcn_vcap.yaml +++ b/expts/neurips2023_configs/single_task_gcn/config_large_gcn_vcap.yaml @@ -50,7 +50,6 @@ accelerator: datamodule: module_type: "MultitaskFromSmilesDataModule" - # module_type: "FakeDataModule" # Option to use generated data args: # Matches that in the test_multitask_datamodule.py case. task_specific_args: # To be replaced by a new class "DatasetParams" l1000_vcap: @@ -65,10 +64,6 @@ datamodule: splits_path: graphium/data/neurips2023/large-dataset/l1000_vcap_random_splits.pt # Download with `wget https://storage.googleapis.com/graphium-public/datasets/neurips_2023/Large-dataset/l1000_vcap_random_splits.pt` # Featurization - prepare_dict_or_graph: pyg:graph - featurization_n_jobs: 30 - featurization_progress: True - featurization_backend: "loky" processed_graph_data_path: "../datacache/neurips2023-large/vcap/" featurization: # OGB: ['atomic_num', 'degree', 'possible_formal_charge', 'possible_numH' (total-valence), @@ -103,7 +98,6 @@ datamodule: num_workers: 30 # -1 to use all persistent_workers: False # if use persistent worker at the start of each epoch. # Using persistent_workers false might make the start of each epoch very long. - featurization_backend: "loky" architecture: diff --git a/expts/neurips2023_configs/single_task_gin/config_large_gin_g25.yaml b/expts/neurips2023_configs/single_task_gin/config_large_gin_g25.yaml index 01988e527..bd09385f5 100644 --- a/expts/neurips2023_configs/single_task_gin/config_large_gin_g25.yaml +++ b/expts/neurips2023_configs/single_task_gin/config_large_gin_g25.yaml @@ -50,7 +50,6 @@ accelerator: datamodule: module_type: "MultitaskFromSmilesDataModule" - # module_type: "FakeDataModule" # Option to use generated data args: # Matches that in the test_multitask_datamodule.py case. task_specific_args: # To be replaced by a new class "DatasetParams" pcqm4m_g25: @@ -68,10 +67,6 @@ datamodule: method: "normal" # Featurization - prepare_dict_or_graph: pyg:graph - featurization_n_jobs: 30 - featurization_progress: True - featurization_backend: "loky" processed_graph_data_path: "../datacache/neurips2023-large/g25/" featurization: # OGB: ['atomic_num', 'degree', 'possible_formal_charge', 'possible_numH' (total-valence), @@ -106,7 +101,6 @@ datamodule: num_workers: 30 # -1 to use all persistent_workers: False # if use persistent worker at the start of each epoch. # Using persistent_workers false might make the start of each epoch very long. - featurization_backend: "loky" architecture: diff --git a/expts/neurips2023_configs/single_task_gin/config_large_gin_mcf7.yaml b/expts/neurips2023_configs/single_task_gin/config_large_gin_mcf7.yaml index fdeb4b399..5abf9790d 100644 --- a/expts/neurips2023_configs/single_task_gin/config_large_gin_mcf7.yaml +++ b/expts/neurips2023_configs/single_task_gin/config_large_gin_mcf7.yaml @@ -50,7 +50,6 @@ accelerator: datamodule: module_type: "MultitaskFromSmilesDataModule" - # module_type: "FakeDataModule" # Option to use generated data args: # Matches that in the test_multitask_datamodule.py case. task_specific_args: # To be replaced by a new class "DatasetParams" l1000_mcf7: @@ -65,10 +64,6 @@ datamodule: splits_path: graphium/data/neurips2023/large-dataset/l1000_mcf7_random_splits.pt # Download with `wget https://storage.googleapis.com/graphium-public/datasets/neurips_2023/Large-dataset/l1000_mcf7_random_splits.pt` # Featurization - prepare_dict_or_graph: pyg:graph - featurization_n_jobs: 30 - featurization_progress: True - featurization_backend: "loky" processed_graph_data_path: "../datacache/neurips2023-large/mcf7/" featurization: # OGB: ['atomic_num', 'degree', 'possible_formal_charge', 'possible_numH' (total-valence), @@ -103,7 +98,6 @@ datamodule: num_workers: 30 # -1 to use all persistent_workers: False # if use persistent worker at the start of each epoch. # Using persistent_workers false might make the start of each epoch very long. - featurization_backend: "loky" architecture: diff --git a/expts/neurips2023_configs/single_task_gin/config_large_gin_n4.yaml b/expts/neurips2023_configs/single_task_gin/config_large_gin_n4.yaml index 5920a80f6..834967498 100644 --- a/expts/neurips2023_configs/single_task_gin/config_large_gin_n4.yaml +++ b/expts/neurips2023_configs/single_task_gin/config_large_gin_n4.yaml @@ -50,7 +50,6 @@ accelerator: datamodule: module_type: "MultitaskFromSmilesDataModule" - # module_type: "FakeDataModule" # Option to use generated data args: # Matches that in the test_multitask_datamodule.py case. task_specific_args: # To be replaced by a new class "DatasetParams" pcqm4m_n4: @@ -69,10 +68,6 @@ datamodule: method: "normal" # Featurization - prepare_dict_or_graph: pyg:graph - featurization_n_jobs: 30 - featurization_progress: True - featurization_backend: "loky" processed_graph_data_path: "../datacache/neurips2023-large/n4/" featurization: # OGB: ['atomic_num', 'degree', 'possible_formal_charge', 'possible_numH' (total-valence), @@ -107,7 +102,6 @@ datamodule: num_workers: 30 # -1 to use all persistent_workers: False # if use persistent worker at the start of each epoch. # Using persistent_workers false might make the start of each epoch very long. - featurization_backend: "loky" architecture: diff --git a/expts/neurips2023_configs/single_task_gin/config_large_gin_pcba.yaml b/expts/neurips2023_configs/single_task_gin/config_large_gin_pcba.yaml index de2f7fbc4..f390a7a2b 100644 --- a/expts/neurips2023_configs/single_task_gin/config_large_gin_pcba.yaml +++ b/expts/neurips2023_configs/single_task_gin/config_large_gin_pcba.yaml @@ -50,7 +50,6 @@ accelerator: datamodule: module_type: "MultitaskFromSmilesDataModule" - # module_type: "FakeDataModule" # Option to use generated data args: # Matches that in the test_multitask_datamodule.py case. task_specific_args: # To be replaced by a new class "DatasetParams" pcba_1328: @@ -65,10 +64,6 @@ datamodule: splits_path: graphium/data/neurips2023/large-dataset/pcba_1328_random_splits.pt # Download with `wget https://storage.googleapis.com/graphium-public/datasets/neurips_2023/Large-dataset/pcba_1328_random_splits.pt` # Featurization - prepare_dict_or_graph: pyg:graph - featurization_n_jobs: 30 - featurization_progress: True - featurization_backend: "loky" processed_graph_data_path: "../datacache/neurips2023-large/pcba/" featurization: # OGB: ['atomic_num', 'degree', 'possible_formal_charge', 'possible_numH' (total-valence), @@ -103,7 +98,6 @@ datamodule: num_workers: 30 # -1 to use all persistent_workers: False # if use persistent worker at the start of each epoch. # Using persistent_workers false might make the start of each epoch very long. - featurization_backend: "loky" architecture: diff --git a/expts/neurips2023_configs/single_task_gin/config_large_gin_pcq.yaml b/expts/neurips2023_configs/single_task_gin/config_large_gin_pcq.yaml index ca820e86b..d13a757f3 100644 --- a/expts/neurips2023_configs/single_task_gin/config_large_gin_pcq.yaml +++ b/expts/neurips2023_configs/single_task_gin/config_large_gin_pcq.yaml @@ -50,7 +50,6 @@ accelerator: datamodule: module_type: "MultitaskFromSmilesDataModule" - # module_type: "FakeDataModule" # Option to use generated data args: # Matches that in the test_multitask_datamodule.py case. task_specific_args: # To be replaced by a new class "DatasetParams" pcqm4m_g25: @@ -83,10 +82,6 @@ datamodule: method: "normal" # Featurization - prepare_dict_or_graph: pyg:graph - featurization_n_jobs: 30 - featurization_progress: True - featurization_backend: "loky" processed_graph_data_path: "../datacache/neurips2023-large/pcq/" featurization: # OGB: ['atomic_num', 'degree', 'possible_formal_charge', 'possible_numH' (total-valence), @@ -121,7 +116,6 @@ datamodule: num_workers: 30 # -1 to use all persistent_workers: False # if use persistent worker at the start of each epoch. # Using persistent_workers false might make the start of each epoch very long. - featurization_backend: "loky" architecture: diff --git a/expts/neurips2023_configs/single_task_gin/config_large_gin_vcap.yaml b/expts/neurips2023_configs/single_task_gin/config_large_gin_vcap.yaml index c21b765b3..75f802926 100644 --- a/expts/neurips2023_configs/single_task_gin/config_large_gin_vcap.yaml +++ b/expts/neurips2023_configs/single_task_gin/config_large_gin_vcap.yaml @@ -50,7 +50,6 @@ accelerator: datamodule: module_type: "MultitaskFromSmilesDataModule" - # module_type: "FakeDataModule" # Option to use generated data args: # Matches that in the test_multitask_datamodule.py case. task_specific_args: # To be replaced by a new class "DatasetParams" l1000_vcap: @@ -65,10 +64,6 @@ datamodule: splits_path: graphium/data/neurips2023/large-dataset/l1000_vcap_random_splits.pt # Download with `wget https://storage.googleapis.com/graphium-public/datasets/neurips_2023/Large-dataset/l1000_vcap_random_splits.pt` # Featurization - prepare_dict_or_graph: pyg:graph - featurization_n_jobs: 30 - featurization_progress: True - featurization_backend: "loky" processed_graph_data_path: "../datacache/neurips2023-large/vcap/" featurization: # OGB: ['atomic_num', 'degree', 'possible_formal_charge', 'possible_numH' (total-valence), @@ -103,7 +98,6 @@ datamodule: num_workers: 30 # -1 to use all persistent_workers: False # if use persistent worker at the start of each epoch. # Using persistent_workers false might make the start of each epoch very long. - featurization_backend: "loky" architecture: diff --git a/expts/neurips2023_configs/single_task_gine/config_large_gine_g25.yaml b/expts/neurips2023_configs/single_task_gine/config_large_gine_g25.yaml index b88314797..02679153c 100644 --- a/expts/neurips2023_configs/single_task_gine/config_large_gine_g25.yaml +++ b/expts/neurips2023_configs/single_task_gine/config_large_gine_g25.yaml @@ -50,7 +50,6 @@ accelerator: datamodule: module_type: "MultitaskFromSmilesDataModule" - # module_type: "FakeDataModule" # Option to use generated data args: # Matches that in the test_multitask_datamodule.py case. task_specific_args: # To be replaced by a new class "DatasetParams" pcqm4m_g25: @@ -68,10 +67,6 @@ datamodule: method: "normal" # Featurization - prepare_dict_or_graph: pyg:graph - featurization_n_jobs: 30 - featurization_progress: True - featurization_backend: "loky" processed_graph_data_path: "../datacache/neurips2023-large/g25/" featurization: # OGB: ['atomic_num', 'degree', 'possible_formal_charge', 'possible_numH' (total-valence), @@ -106,7 +101,6 @@ datamodule: num_workers: 30 # -1 to use all persistent_workers: False # if use persistent worker at the start of each epoch. # Using persistent_workers false might make the start of each epoch very long. - featurization_backend: "loky" architecture: diff --git a/expts/neurips2023_configs/single_task_gine/config_large_gine_mcf7.yaml b/expts/neurips2023_configs/single_task_gine/config_large_gine_mcf7.yaml index b96fc8daf..0506dbfea 100644 --- a/expts/neurips2023_configs/single_task_gine/config_large_gine_mcf7.yaml +++ b/expts/neurips2023_configs/single_task_gine/config_large_gine_mcf7.yaml @@ -50,7 +50,6 @@ accelerator: datamodule: module_type: "MultitaskFromSmilesDataModule" - # module_type: "FakeDataModule" # Option to use generated data args: # Matches that in the test_multitask_datamodule.py case. task_specific_args: # To be replaced by a new class "DatasetParams" l1000_mcf7: @@ -65,10 +64,6 @@ datamodule: splits_path: graphium/data/neurips2023/large-dataset/l1000_mcf7_random_splits.pt # Download with `wget https://storage.googleapis.com/graphium-public/datasets/neurips_2023/Large-dataset/l1000_mcf7_random_splits.pt` # Featurization - prepare_dict_or_graph: pyg:graph - featurization_n_jobs: 30 - featurization_progress: True - featurization_backend: "loky" processed_graph_data_path: "../datacache/neurips2023-large/mcf7/" featurization: # OGB: ['atomic_num', 'degree', 'possible_formal_charge', 'possible_numH' (total-valence), @@ -103,7 +98,6 @@ datamodule: num_workers: 30 # -1 to use all persistent_workers: False # if use persistent worker at the start of each epoch. # Using persistent_workers false might make the start of each epoch very long. - featurization_backend: "loky" architecture: diff --git a/expts/neurips2023_configs/single_task_gine/config_large_gine_n4.yaml b/expts/neurips2023_configs/single_task_gine/config_large_gine_n4.yaml index e98ae03da..58bad3bbc 100644 --- a/expts/neurips2023_configs/single_task_gine/config_large_gine_n4.yaml +++ b/expts/neurips2023_configs/single_task_gine/config_large_gine_n4.yaml @@ -50,7 +50,6 @@ accelerator: datamodule: module_type: "MultitaskFromSmilesDataModule" - # module_type: "FakeDataModule" # Option to use generated data args: # Matches that in the test_multitask_datamodule.py case. task_specific_args: # To be replaced by a new class "DatasetParams" pcqm4m_n4: @@ -69,10 +68,6 @@ datamodule: method: "normal" # Featurization - prepare_dict_or_graph: pyg:graph - featurization_n_jobs: 30 - featurization_progress: True - featurization_backend: "loky" processed_graph_data_path: "../datacache/neurips2023-large/n4/" featurization: # OGB: ['atomic_num', 'degree', 'possible_formal_charge', 'possible_numH' (total-valence), @@ -107,7 +102,6 @@ datamodule: num_workers: 30 # -1 to use all persistent_workers: False # if use persistent worker at the start of each epoch. # Using persistent_workers false might make the start of each epoch very long. - featurization_backend: "loky" architecture: diff --git a/expts/neurips2023_configs/single_task_gine/config_large_gine_pcba.yaml b/expts/neurips2023_configs/single_task_gine/config_large_gine_pcba.yaml index 427f7ca0f..3ce9ffde2 100644 --- a/expts/neurips2023_configs/single_task_gine/config_large_gine_pcba.yaml +++ b/expts/neurips2023_configs/single_task_gine/config_large_gine_pcba.yaml @@ -50,7 +50,6 @@ accelerator: datamodule: module_type: "MultitaskFromSmilesDataModule" - # module_type: "FakeDataModule" # Option to use generated data args: # Matches that in the test_multitask_datamodule.py case. task_specific_args: # To be replaced by a new class "DatasetParams" pcba_1328: @@ -65,10 +64,6 @@ datamodule: splits_path: graphium/data/neurips2023/large-dataset/pcba_1328_random_splits.pt # Download with `wget https://storage.googleapis.com/graphium-public/datasets/neurips_2023/Large-dataset/pcba_1328_random_splits.pt` # Featurization - prepare_dict_or_graph: pyg:graph - featurization_n_jobs: 30 - featurization_progress: True - featurization_backend: "loky" processed_graph_data_path: "../datacache/neurips2023-large/pcba/" featurization: # OGB: ['atomic_num', 'degree', 'possible_formal_charge', 'possible_numH' (total-valence), @@ -103,7 +98,6 @@ datamodule: num_workers: 30 # -1 to use all persistent_workers: False # if use persistent worker at the start of each epoch. # Using persistent_workers false might make the start of each epoch very long. - featurization_backend: "loky" architecture: diff --git a/expts/neurips2023_configs/single_task_gine/config_large_gine_pcq.yaml b/expts/neurips2023_configs/single_task_gine/config_large_gine_pcq.yaml index 07fc6d009..d541b9b04 100644 --- a/expts/neurips2023_configs/single_task_gine/config_large_gine_pcq.yaml +++ b/expts/neurips2023_configs/single_task_gine/config_large_gine_pcq.yaml @@ -50,7 +50,6 @@ accelerator: datamodule: module_type: "MultitaskFromSmilesDataModule" - # module_type: "FakeDataModule" # Option to use generated data args: # Matches that in the test_multitask_datamodule.py case. task_specific_args: # To be replaced by a new class "DatasetParams" pcqm4m_g25: @@ -83,10 +82,6 @@ datamodule: method: "normal" # Featurization - prepare_dict_or_graph: pyg:graph - featurization_n_jobs: 30 - featurization_progress: True - featurization_backend: "loky" processed_graph_data_path: "../datacache/neurips2023-large/pcq/" featurization: # OGB: ['atomic_num', 'degree', 'possible_formal_charge', 'possible_numH' (total-valence), @@ -121,7 +116,6 @@ datamodule: num_workers: 30 # -1 to use all persistent_workers: False # if use persistent worker at the start of each epoch. # Using persistent_workers false might make the start of each epoch very long. - featurization_backend: "loky" architecture: diff --git a/expts/neurips2023_configs/single_task_gine/config_large_gine_vcap.yaml b/expts/neurips2023_configs/single_task_gine/config_large_gine_vcap.yaml index b63263b3d..121d74ddb 100644 --- a/expts/neurips2023_configs/single_task_gine/config_large_gine_vcap.yaml +++ b/expts/neurips2023_configs/single_task_gine/config_large_gine_vcap.yaml @@ -50,7 +50,6 @@ accelerator: datamodule: module_type: "MultitaskFromSmilesDataModule" - # module_type: "FakeDataModule" # Option to use generated data args: # Matches that in the test_multitask_datamodule.py case. task_specific_args: # To be replaced by a new class "DatasetParams" l1000_vcap: @@ -65,10 +64,6 @@ datamodule: splits_path: graphium/data/neurips2023/large-dataset/l1000_vcap_random_splits.pt # Download with `wget https://storage.googleapis.com/graphium-public/datasets/neurips_2023/Large-dataset/l1000_vcap_random_splits.pt` # Featurization - prepare_dict_or_graph: pyg:graph - featurization_n_jobs: 30 - featurization_progress: True - featurization_backend: "loky" processed_graph_data_path: "../datacache/neurips2023-large/vcap/" featurization: # OGB: ['atomic_num', 'degree', 'possible_formal_charge', 'possible_numH' (total-valence), @@ -103,7 +98,6 @@ datamodule: num_workers: 30 # -1 to use all persistent_workers: False # if use persistent worker at the start of each epoch. # Using persistent_workers false might make the start of each epoch very long. - featurization_backend: "loky" architecture: diff --git a/graphium/config/dummy_finetuning_from_gnn.yaml b/graphium/config/dummy_finetuning_from_gnn.yaml index ca9493d30..4de1e79bc 100644 --- a/graphium/config/dummy_finetuning_from_gnn.yaml +++ b/graphium/config/dummy_finetuning_from_gnn.yaml @@ -128,10 +128,6 @@ datamodule: batch_size_training: 200 batch_size_inference: 200 - featurization_n_jobs: 0 num_workers: 0 - prepare_dict_or_graph: pyg:graph - featurization_progress: True - featurization_backend: "loky" persistent_workers: False \ No newline at end of file diff --git a/graphium/config/dummy_finetuning_from_task_head.yaml b/graphium/config/dummy_finetuning_from_task_head.yaml index 2682ccee3..90b0d5341 100644 --- a/graphium/config/dummy_finetuning_from_task_head.yaml +++ b/graphium/config/dummy_finetuning_from_task_head.yaml @@ -134,12 +134,8 @@ datamodule: batch_size_training: 200 batch_size_inference: 200 - featurization_n_jobs: 0 num_workers: 0 - prepare_dict_or_graph: pyg:graph - featurization_progress: True - featurization_backend: "loky" persistent_workers: False diff --git a/graphium/config/fake_and_missing_multilevel_multitask_pyg.yaml b/graphium/config/fake_and_missing_multilevel_multitask_pyg.yaml index 044a0129c..a34399dd1 100644 --- a/graphium/config/fake_and_missing_multilevel_multitask_pyg.yaml +++ b/graphium/config/fake_and_missing_multilevel_multitask_pyg.yaml @@ -45,8 +45,6 @@ datamodule: weights_col: null # This may not always be provided # Featurization - featurization_n_jobs: 16 - featurization_progress: True featurization: atom_property_list_onehot: ["atomic-number", "degree"] atom_property_list_float: [] diff --git a/graphium/config/fake_multilevel_multitask_pyg.yaml b/graphium/config/fake_multilevel_multitask_pyg.yaml index 918807cb4..3cce7b5e2 100644 --- a/graphium/config/fake_multilevel_multitask_pyg.yaml +++ b/graphium/config/fake_multilevel_multitask_pyg.yaml @@ -45,8 +45,6 @@ datamodule: weights_col: null # This may not always be provided # Featurization - featurization_n_jobs: 16 - featurization_progress: True featurization: atom_property_list_onehot: ["atomic-number", "degree"] atom_property_list_float: [] diff --git a/graphium/config/zinc_default_multitask_pyg.yaml b/graphium/config/zinc_default_multitask_pyg.yaml index b9435ec7e..01d20bc53 100644 --- a/graphium/config/zinc_default_multitask_pyg.yaml +++ b/graphium/config/zinc_default_multitask_pyg.yaml @@ -45,8 +45,6 @@ datamodule: weights_type: null # Featurization - featurization_n_jobs: 16 - featurization_progress: True featurization: atom_property_list_onehot: ["atomic-number", "degree"] atom_property_list_float: [] diff --git a/profiling/configs_profiling.yaml b/profiling/configs_profiling.yaml index 0ff4f6c94..bde4bdb5f 100644 --- a/profiling/configs_profiling.yaml +++ b/profiling/configs_profiling.yaml @@ -11,8 +11,6 @@ datamodule: smiles_col: SMILES # Featurization - featurization_n_jobs: -1 - featurization_progress: True featurization: atom_property_list_onehot: [atomic-number, valence] atom_property_list_float: [mass, electronegativity] diff --git a/tests/config_test_ipu_dataloader.yaml b/tests/config_test_ipu_dataloader.yaml index f0f55d197..3f63bfd3d 100644 --- a/tests/config_test_ipu_dataloader.yaml +++ b/tests/config_test_ipu_dataloader.yaml @@ -61,9 +61,6 @@ datamodule: weights_type: null # This may not always be provided task_level: graph # Featurization - prepare_dict_or_graph: pyg:graph - featurization_n_jobs: 0 - featurization_progress: True featurization: atom_property_list_onehot: [atomic-number, valence] atom_property_list_float: [mass, electronegativity, in-ring] diff --git a/tests/config_test_ipu_dataloader_multitask.yaml b/tests/config_test_ipu_dataloader_multitask.yaml index 8b8fbf417..563222d8d 100644 --- a/tests/config_test_ipu_dataloader_multitask.yaml +++ b/tests/config_test_ipu_dataloader_multitask.yaml @@ -51,7 +51,6 @@ accelerator: datamodule: module_type: "MultitaskFromSmilesDataModule" - # module_type: "FakeDataModule" # Option to use generated data args: # Matches that in the test_multitask_datamodule.py case. task_specific_args: # To be replaced by a new class "DatasetParams" qm9: @@ -95,10 +94,6 @@ datamodule: method: "normal" # Featurization - prepare_dict_or_graph: pyg:graph - featurization_n_jobs: 30 - featurization_progress: True - featurization_backend: "loky" # processed_graph_data_path: "../datacache/neurips2023-small/" featurization: # OGB: ['atomic_num', 'degree', 'possible_formal_charge', 'possible_numH' (total-valence), @@ -133,7 +128,6 @@ datamodule: num_workers: -1 # -1 to use all persistent_workers: False # if use persistent worker at the start of each epoch. # Using persistent_workers false might make the start of each epoch very long. - featurization_backend: "loky" architecture: diff --git a/tests/data/config_micro_ZINC.yaml b/tests/data/config_micro_ZINC.yaml index 88fc4a841..d2e94318f 100644 --- a/tests/data/config_micro_ZINC.yaml +++ b/tests/data/config_micro_ZINC.yaml @@ -11,8 +11,6 @@ datamodule: smiles_col: SMILES # Featurization - featurization_n_jobs: -1 - featurization_progress: True featurization: atom_property_list_onehot: [atomic-number, valence] atom_property_list_float: [mass, electronegativity, in-ring] From cf23e375adfbe330d447c03ab1a13d0570ede232 Mon Sep 17 00:00:00 2001 From: ndickson Date: Thu, 18 Apr 2024 14:29:49 -0400 Subject: [PATCH 006/175] Added support for limiting the number of threads used by prepare_and_save_data --- graphium/data/datamodule.py | 13 +++++++------ graphium/graphium_cpp/labels.cpp | 20 ++++++++++++++++++-- graphium/graphium_cpp/labels.h | 3 ++- 3 files changed, 27 insertions(+), 9 deletions(-) diff --git a/graphium/data/datamodule.py b/graphium/data/datamodule.py index b2e748d2e..1f59cc52c 100644 --- a/graphium/data/datamodule.py +++ b/graphium/data/datamodule.py @@ -803,7 +803,7 @@ def __init__( featurization_batch_size = None, collate_fn: Optional[Callable] = None, prepare_dict_or_graph = None, - preprocessing_n_jobs: int = -1, + preprocessing_n_jobs: int = 0, **kwargs, ): """ @@ -838,7 +838,7 @@ def __init__( collate_fn: A custom torch collate function. Default is to `graphium.data.graphium_collate_fn` prepare_dict_or_graph: Deprecated. Behaviour now always matches previous "pyg:graph" option. preprocessing_n_jobs: Number of threads to use during preprocessing. - Use -1 to use all available cores. + Use 0 to use all available cores, or -1 to use all but one core. """ BaseDataModule.__init__( self, @@ -1120,7 +1120,8 @@ def prepare_data(self): self.task_val_indices, self.task_test_indices, self.add_self_loop, - self.explicit_H) + self.explicit_H, + self.preprocessing_n_jobs) for task, stats in all_stats.items(): if len(stats) < 4: @@ -1755,7 +1756,7 @@ def __init__( featurization_backend = None, collate_fn: Optional[Callable] = None, prepare_dict_or_graph = None, - preprocessing_n_jobs: int = -1, + preprocessing_n_jobs: int = 0, **kwargs, ): r""" @@ -1790,7 +1791,7 @@ def __init__( - `None`: all elements are considered. prepare_dict_or_graph: Deprecated. Behaviour now always matches previous "pyg:graph" option. preprocessing_n_jobs: Number of threads to use during preprocessing. - Use -1 to use all available cores. + Use 0 to use all available cores, or -1 to use all but one core. """ warn_deprecated(dataloading_from, "dataloading_from", "GraphOGBDataModule::__init__") @@ -2017,7 +2018,7 @@ def __init__( featurization_backend = None, collate_fn: Optional[Callable] = None, prepare_dict_or_graph = None, - preprocessing_n_jobs: int = -1, + preprocessing_n_jobs: int = 0, **kwargs, ): warn_deprecated(dataloading_from, "dataloading_from", "ADMETBenchmarkDataModule::__init__") diff --git a/graphium/graphium_cpp/labels.cpp b/graphium/graphium_cpp/labels.cpp index b950463f0..0fe678b33 100644 --- a/graphium/graphium_cpp/labels.cpp +++ b/graphium/graphium_cpp/labels.cpp @@ -527,7 +527,8 @@ std::tuple< const pybind11::dict& task_val_indices, const pybind11::dict& task_test_indices, bool add_self_loop, - bool explicit_H) { + bool explicit_H, + int max_threads) { ensure_numpy_array_module_initialized(); @@ -819,10 +820,25 @@ std::tuple< // Compute all InChI keys for all molecules, in parallel if applicable. std::unique_ptr keys(new MolKey[total_num_mols]); + + // Determine the number of threads to use for computing MolKey values const size_t num_mols_per_block = 512; const size_t num_blocks = (total_num_mols + num_mols_per_block-1) / num_mols_per_block; const size_t num_processors = std::thread::hardware_concurrency(); - const size_t num_threads = (num_processors == 1 || num_blocks <= 4) ? 1 : std::min(num_processors, num_blocks/2); + size_t num_threads = (num_processors == 1 || num_blocks <= 4) ? 1 : std::min(num_processors, num_blocks/2); + // max_threads of -1 means n-1 threads, to avoid starving other processes + if (max_threads < 0) { + max_threads += num_processors; + // Don't hit zero or remain negative, because that would skip applying the limit + if (max_threads < 1) { + max_threads = 1; + } + } + // max_threads of 0 means to not limit the number of threads + if (max_threads > 0 && num_threads > max_threads) { + num_threads = max_threads; + } + auto&& get_single_mol_key = [&task_mol_start,add_self_loop,explicit_H,&task_mol_indices,&smiles_strings,num_tasks](size_t mol_index) -> MolKey { // Find which task this mol is in. If there could be many tasks, // this could be a binary search, but for small numbers of tasks, diff --git a/graphium/graphium_cpp/labels.h b/graphium/graphium_cpp/labels.h index 6bcfe87e0..73966450e 100644 --- a/graphium/graphium_cpp/labels.h +++ b/graphium/graphium_cpp/labels.h @@ -52,7 +52,8 @@ std::tuple< const pybind11::dict& task_val_indices, const pybind11::dict& task_test_indices, bool add_self_loop = false, - bool explicit_H = false); + bool explicit_H = false, + int max_threads = 0); void load_labels_from_index( const std::string stage_directory, From 5db0e2abba038f1786264ac75dbd54f5951c68b2 Mon Sep 17 00:00:00 2001 From: ndickson Date: Thu, 18 Apr 2024 15:48:35 -0400 Subject: [PATCH 007/175] Fixed compiler warning about signed vs. unsigned comparison --- graphium/graphium_cpp/labels.cpp | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/graphium/graphium_cpp/labels.cpp b/graphium/graphium_cpp/labels.cpp index 0fe678b33..1a3318bd7 100644 --- a/graphium/graphium_cpp/labels.cpp +++ b/graphium/graphium_cpp/labels.cpp @@ -835,8 +835,8 @@ std::tuple< } } // max_threads of 0 means to not limit the number of threads - if (max_threads > 0 && num_threads > max_threads) { - num_threads = max_threads; + if (max_threads > 0 && num_threads > size_t(max_threads)) { + num_threads = size_t(max_threads); } auto&& get_single_mol_key = [&task_mol_start,add_self_loop,explicit_H,&task_mol_indices,&smiles_strings,num_tasks](size_t mol_index) -> MolKey { From c75a452e7789a174dcd53c4cf603e6681c9cfc22 Mon Sep 17 00:00:00 2001 From: ndickson Date: Thu, 18 Apr 2024 16:47:57 -0400 Subject: [PATCH 008/175] Fixed Python syntax issues --- graphium/data/datamodule.py | 6 +++--- graphium/data/dataset.py | 8 ++++---- 2 files changed, 7 insertions(+), 7 deletions(-) diff --git a/graphium/data/datamodule.py b/graphium/data/datamodule.py index 1f59cc52c..fbb8819df 100644 --- a/graphium/data/datamodule.py +++ b/graphium/data/datamodule.py @@ -908,10 +908,10 @@ def encode_feature_options(options, name, encoding_function): encode_feature_options(encoded_featurization, "atom_property_list_float", graphium_cpp.atom_float_feature_names_to_tensor) encode_feature_options(encoded_featurization, "edge_property_list", graphium_cpp.bond_feature_names_to_tensor) - if "pos_encoding_as_features" in featurization and + if ("pos_encoding_as_features" in featurization and featurization["pos_encoding_as_features"] is not None and - featurization["pos_encoding_as_features"]["pos_types"] is not None: - (pos_encoding_names, pos_encoding_tensor) = + featurization["pos_encoding_as_features"]["pos_types"] is not None): + (pos_encoding_names, pos_encoding_tensor) = \ graphium_cpp.positional_feature_options_to_tensor(featurization["pos_encoding_as_features"]["pos_types"]) else: pos_encoding_names = [] diff --git a/graphium/data/dataset.py b/graphium/data/dataset.py index c37c4fdc8..f90d9922a 100644 --- a/graphium/data/dataset.py +++ b/graphium/data/dataset.py @@ -40,10 +40,10 @@ def __init__( label_num_cols: List[int] = None, label_dtypes: List[int] = None, mol_file_data_offsets = None, - concat_smiles_tensor, - smiles_offsets_tensor, - num_nodes_tensor, - num_edges_tensor, + concat_smiles_tensor = None, + smiles_offsets_tensor = None, + num_nodes_tensor = None, + num_edges_tensor = None, about: str = "", data_path: Optional[Union[str, os.PathLike]] = None, ): From 4aa1f85141da11b240f4cbb9159f8baca7587d25 Mon Sep 17 00:00:00 2001 From: ndickson Date: Thu, 18 Apr 2024 18:44:43 -0400 Subject: [PATCH 009/175] Changed asymmetric inverse normalization type to be implemented using symmetric diagonalization, avoiding the need to handle complex eigenvectors and eigenvalues --- graphium/graphium_cpp/spectral.cpp | 139 +++++++++++++++-------------- 1 file changed, 71 insertions(+), 68 deletions(-) diff --git a/graphium/graphium_cpp/spectral.cpp b/graphium/graphium_cpp/spectral.cpp index 3f45586ff..a5c693564 100644 --- a/graphium/graphium_cpp/spectral.cpp +++ b/graphium/graphium_cpp/spectral.cpp @@ -12,7 +12,7 @@ #include template -void compute_laplacian_eigendecomp_single(const uint32_t n, LaplacianData& data, bool symmetric) { +void compute_laplacian_eigendecomp_single(const uint32_t n, LaplacianData& data, Normalization normalization) { T* matrix = data.matrix_temp.data(); std::unique_ptr matrix_alloc(new T[n * n]); std::copy(matrix, matrix + n * n, matrix_alloc.get()); @@ -20,50 +20,18 @@ void compute_laplacian_eigendecomp_single(const uint32_t n, LaplacianData& da int64_t dims[2] = { n, n }; at::Tensor torch_matrix = torch_tensor_from_array(std::move(matrix_alloc), dims, 2, c10::ScalarType::Double); - at::Tensor eigenvalue_tensor; - at::Tensor eigenvector_tensor; - if (symmetric) { - // Using linalg_eigh should ensure we get all real eigenvalues and eigenvectors. - // Arbitrarily choose lower-triangular portion (L) - auto tuple = at::linalg_eigh(torch_matrix, c10::string_view("L",1)); - eigenvalue_tensor = std::move(std::get<0>(tuple)); - eigenvector_tensor = std::move(std::get<1>(tuple)); - } - else { - auto tuple = at::linalg_eig(torch_matrix); - eigenvalue_tensor = std::move(std::get<0>(tuple)); - eigenvector_tensor = std::move(std::get<1>(tuple)); - } + // Using linalg_eigh should ensure we get all real eigenvalues and eigenvectors. + // Arbitrarily choose lower-triangular portion (L) + auto tuple = at::linalg_eigh(torch_matrix, c10::string_view("L",1)); + at::Tensor eigenvalue_tensor = std::move(std::get<0>(tuple)); + at::Tensor eigenvector_tensor = std::move(std::get<1>(tuple)); assert(eigenvalue_tensor.ndimension() == 1); assert(eigenvector_tensor.ndimension() == 2); assert(eigenvalue_tensor.size(0) == n); assert(eigenvector_tensor.size(0) == n); assert(eigenvector_tensor.size(1) == n); - // Copy eigenvalues - data.eigenvalues_temp.resize(n); - if (eigenvalue_tensor.scalar_type() == c10::ScalarType::Double) { - const double* const eigenvalue_data = eigenvalue_tensor.data_ptr(); - for (size_t i = 0; i < n; ++i) { - data.eigenvalues_temp[i] = T(eigenvalue_data[i]); - } - } - else if (eigenvalue_tensor.scalar_type() == c10::ScalarType::ComplexDouble) { - // TODO: Decide what to do about legitimately complex eigenvalues. - // This should only occur with Normalization::INVERSE, because real, symmetric - // matrices have real eigenvalues. - // For now, just assume that they're supposed to be real and were only complex - // due to roundoff. - const c10::complex* const eigenvalue_data = eigenvalue_tensor.data_ptr>(); - for (size_t i = 0; i < n; ++i) { - data.eigenvalues_temp[i] = T(eigenvalue_data[i].real()); - } - } - else { - assert(0); - } - - // Copy eigenvectors + // Copy eigenvectors first, because normalization values are in eigenvalues_temp data.vectors.clear(); data.vectors.resize(size_t(n) * n, 0); T* vectors = data.vectors.data(); @@ -72,16 +40,51 @@ void compute_laplacian_eigendecomp_single(const uint32_t n, LaplacianData& da for (size_t i = 0; i < size_t(n) * n; ++i) { vectors[i] = T(eigenvector_data[i]); } + + if (normalization == Normalization::INVERSE) { + // Convert symmetric case eigenvectors to asymmetric case eigenvectors + + // Scale each row by the factor in eigenvalues_temp + for (size_t row = 0, i = 0; row < n; ++row) { + cosnt T factor = data.eigenvalues_temp[row]; + for (size_t col = 0; col < n; ++col, ++i) { + vectors[i] *= factor; + } + + // Clear to zero for the summing below + data.eigenvalues_temp[row] = 0; + } + + // Find each column length + for (size_t row = 0, i = 0; row < n; ++row) { + for (size_t col = 0; col < n; ++col, ++i) { + const T v = vectors[i]; + data.eigenvalues_temp[col] += v*v; + } + } + for (size_t col = 0; col < n; ++col) { + data.eigenvalues_temp[col] = T(1)/std::sqrt(data.eigenvalues_temp[col]); + } + + // Normalize each column + for (size_t row = 0, i = 0; row < n; ++row) { + for (size_t col = 0; col < n; ++col, ++i) { + vectors[i] *= data.eigenvalues_temp[col]; + } + } + } } - else if (eigenvector_tensor.scalar_type() == c10::ScalarType::ComplexDouble) { - // TODO: Decide what to do about legitimately complex eigenvectors. - // This should only occur with Normalization::INVERSE, because real, symmetric - // matrices have real eigenvectors. - // For now, just assume that they're supposed to be real and were only complex - // due to roundoff. - const c10::complex* const eigenvector_data = eigenvector_tensor.data_ptr>(); - for (size_t i = 0; i < size_t(n) * n; ++i) { - vectors[i] = T(eigenvector_data[i].real()); + else { + assert(0); + } + + // Copy eigenvalues + data.eigenvalues_temp.resize(n); + if (eigenvalue_tensor.scalar_type() == c10::ScalarType::Double) { + const double* const eigenvalue_data = eigenvalue_tensor.data_ptr(); + for (size_t i = 0; i < n; ++i) { + // No adjustment needed to eigenvalues between symmetric and asymmetric + data.eigenvalues_temp[i] = T(eigenvalue_data[i]); } } else { @@ -159,32 +162,32 @@ void compute_laplacian_eigendecomp(const uint32_t n, const uint32_t* row_starts, } } else { + // The diagonalization of the asymmetric normalization can be computed from the + // diagonalization of the symmetric normalization, which is faster, so always use symmetric. + + // Find the normalization factor for each node (row or col) + // These values in eigenvalues_temp are also used inside compute_laplacian_eigendecomp_single + for (uint32_t node = 0; node < n; ++node) { + const uint32_t row_degree = row_starts[i + 1] - row_starts[i]; + const T denominator = (weights == nullptr) ? T(row_degree) : data.eigenvalues_temp[i]; + data.eigenvalues_temp[i] = T(1) / std::sqrt(denominator); + } + for (uint32_t i = 0, outi = 0; i < n; ++i, outi += n) { - const uint32_t rowDegree = row_starts[i + 1] - row_starts[i]; - if (rowDegree == 0) { + const uint32_t* neighbor_begin = neighbors + row_starts[i]; + const uint32_t* neighbor_end = neighbors + row_starts[i + 1]; + if (neighbor_begin == neighbor_end) { continue; } - matrix[outi + i] = T(1); - const T rowDenominator = (weights == nullptr) ? T(rowDegree) : data.eigenvalues_temp[i]; - const T inverseRowDegree = (normalization == Normalization::INVERSE) ? T(1) / rowDenominator : 0; + // Diagonal is always exactly 1 when normalized (after skipping zero-degree nodes) + matrix[outi + i] = T(1); - const uint32_t* neighbor_begin = neighbors + row_starts[i]; - const uint32_t* neighbor_end = neighbors + row_starts[i + 1]; + const T row_factor = data.eigenvalues_temp[i]; for (; neighbor_begin < neighbor_end; ++neighbor_begin) { uint32_t neighbor = *neighbor_begin; - if (normalization == Normalization::SYMMETRIC) { - const uint32_t colDegree = row_starts[neighbor + 1] - row_starts[neighbor]; - if (colDegree == 0) { - continue; - } - const T colDenominator = (weights == nullptr) ? T(colDegree) : data.eigenvalues_temp[neighbor]; - matrix[outi + neighbor] = T(-1) / std::sqrt(rowDenominator * colDenominator); - } - else { - assert(normalization == Normalization::INVERSE); - matrix[outi + neighbor] = -inverseRowDegree; - } + const T col_factor = data.eigenvalues_temp[neighbor]; + matrix[outi + neighbor] = -row_factor * col_factor; } } } @@ -220,7 +223,7 @@ void compute_laplacian_eigendecomp(const uint32_t n, const uint32_t* row_starts, } } if (num_components == 1) { - compute_laplacian_eigendecomp_single(n, data, normalization != Normalization::INVERSE); + compute_laplacian_eigendecomp_single(n, data, normalization); return; } From c53451ae0a56c4acd9bb273238e7897a354a8345 Mon Sep 17 00:00:00 2001 From: ndickson Date: Thu, 18 Apr 2024 18:58:28 -0400 Subject: [PATCH 010/175] Fixed compile errors --- graphium/graphium_cpp/spectral.cpp | 10 +++++----- 1 file changed, 5 insertions(+), 5 deletions(-) diff --git a/graphium/graphium_cpp/spectral.cpp b/graphium/graphium_cpp/spectral.cpp index a5c693564..6011f028b 100644 --- a/graphium/graphium_cpp/spectral.cpp +++ b/graphium/graphium_cpp/spectral.cpp @@ -46,7 +46,7 @@ void compute_laplacian_eigendecomp_single(const uint32_t n, LaplacianData& da // Scale each row by the factor in eigenvalues_temp for (size_t row = 0, i = 0; row < n; ++row) { - cosnt T factor = data.eigenvalues_temp[row]; + const T factor = data.eigenvalues_temp[row]; for (size_t col = 0; col < n; ++col, ++i) { vectors[i] *= factor; } @@ -168,9 +168,9 @@ void compute_laplacian_eigendecomp(const uint32_t n, const uint32_t* row_starts, // Find the normalization factor for each node (row or col) // These values in eigenvalues_temp are also used inside compute_laplacian_eigendecomp_single for (uint32_t node = 0; node < n; ++node) { - const uint32_t row_degree = row_starts[i + 1] - row_starts[i]; - const T denominator = (weights == nullptr) ? T(row_degree) : data.eigenvalues_temp[i]; - data.eigenvalues_temp[i] = T(1) / std::sqrt(denominator); + const uint32_t row_degree = row_starts[node + 1] - row_starts[node]; + const T denominator = (weights == nullptr) ? T(row_degree) : data.eigenvalues_temp[node]; + data.eigenvalues_temp[node] = T(1) / std::sqrt(denominator); } for (uint32_t i = 0, outi = 0; i < n; ++i, outi += n) { @@ -263,7 +263,7 @@ void compute_laplacian_eigendecomp(const uint32_t n, const uint32_t* row_starts, } // Find its eigenvalues and eigenvectors - compute_laplacian_eigendecomp_single(sub_n, sub_data, normalization != Normalization::INVERSE); + compute_laplacian_eigendecomp_single(sub_n, sub_data, normalization); // Copy the eigenvalues to the output. The excess is already zeroed out. // Unlike the eigenvectors, below, might as well switch to using columns From 268e24589922024b632b3a20c0b0c9116837bea1 Mon Sep 17 00:00:00 2001 From: ndickson Date: Fri, 19 Apr 2024 19:19:59 -0400 Subject: [PATCH 011/175] Some simplification in collate.py --- graphium/data/collate.py | 67 +++++++++------------------------------- 1 file changed, 15 insertions(+), 52 deletions(-) diff --git a/graphium/data/collate.py b/graphium/data/collate.py index f685cd009..1965c729b 100644 --- a/graphium/data/collate.py +++ b/graphium/data/collate.py @@ -22,7 +22,6 @@ from typing import Union, List, Optional, Dict, Type, Any, Iterable from torch_geometric.data import Data, Batch -from graphium.features import to_dense_array from graphium.utils.packing import fast_packing, get_pack_sizes, node_to_pack_indices_mask from loguru import logger from graphium.data.utils import get_keys @@ -92,17 +91,18 @@ def graphium_collate_fn( elem = elements[0] if isinstance(elem, Mapping): + if "features" in elem: + num_nodes = [d["features"].num_nodes for d in elements] + num_edges = [d["features"].num_edges for d in elements] + else: + num_nodes = [d["num_nodes"] for d in elements] + num_edges = [d["num_edges"] for d in elements] + batch = {} for key in elem: # Multitask setting: We have to pad the missing labels if key == "labels": labels = [d[key] for d in elements] - if "features" in elem: - num_nodes = [d["features"].num_nodes for d in elements] - num_edges = [d["features"].num_edges for d in elements] - else: - num_nodes = [d["num_nodes"] for d in elements] - num_edges = [d["num_edges"] for d in elements] batch[key] = collate_labels(labels, labels_num_cols_dict, labels_dtype_dict, num_nodes, num_edges) elif key == "num_nodes" or key == "num_edges": continue @@ -110,7 +110,7 @@ def graphium_collate_fn( # If a PyG Graph is provided, use the PyG batching elif isinstance(elem[key], Data): pyg_graphs = [d[key] for d in elements] - batch[key] = collage_pyg_graph(pyg_graphs, batch_size_per_pack=batch_size_per_pack) + batch[key] = collage_pyg_graph(pyg_graphs, num_nodes, batch_size_per_pack=batch_size_per_pack) # Ignore the collate for specific keys elif key in do_not_collate_keys: @@ -131,42 +131,29 @@ def graphium_collate_fn( return default_collate(elements) -def collage_pyg_graph(pyg_graphs: Iterable[Union[Data, Dict]], batch_size_per_pack: Optional[int] = None): +def collage_pyg_graph(pyg_graphs: List[Data], num_nodes: List[int], batch_size_per_pack: Optional[int] = None): """ Function to collate pytorch geometric graphs. Convert all numpy types to torch Convert edge indices to int64 Parameters: - pyg_graphs: Iterable of PyG graphs + pyg_graphs: List of PyG graphs batch_size_per_pack: The number of graphs to pack together. This is useful for using packing with the Transformer, """ # Calculate maximum number of nodes per graph in current batch - num_nodes_list = [] - for pyg_graph in pyg_graphs: - num_nodes_list.append(pyg_graph["num_nodes"]) - max_num_nodes_per_graph = max(num_nodes_list) + max_num_nodes_per_graph = max(num_nodes) - pyg_batch = [] for pyg_graph in pyg_graphs: for pyg_key in get_keys(pyg_graph): - tensor = pyg_graph[pyg_key] - - # Convert numpy/scipy to Pytorch - if isinstance(tensor, (ndarray, spmatrix)): - tensor = torch.as_tensor(to_dense_array(tensor, tensor.dtype)) - # pad nodepair-level positional encodings if pyg_key.startswith("nodepair_"): - pyg_graph[pyg_key] = pad_nodepairs(tensor, pyg_graph["num_nodes"], max_num_nodes_per_graph) - else: - pyg_graph[pyg_key] = tensor + pyg_graph[pyg_key] = pad_nodepairs(pyg_graph[pyg_key], pyg_graph.num_nodes, max_num_nodes_per_graph) # Convert edge index to int64 pyg_graph.edge_index = pyg_graph.edge_index.to(torch.int64) - pyg_batch.append(pyg_graph) # Apply the packing at the mini-batch level. This is useful for using packing with the Transformer, # especially in the case of the large graphs being much larger than the small graphs. @@ -176,16 +163,15 @@ def collage_pyg_graph(pyg_graphs: Iterable[Union[Data, Dict]], batch_size_per_pa raise NotImplementedError( "Packing is not yet functional, as it changes the order of the graphs in the batch without changing the label order" ) - num_nodes = [g.num_nodes for g in pyg_batch] packed_graph_idx = fast_packing(num_nodes, batch_size_per_pack) # Get the node to pack indices and the mask pack_from_node_idx, pack_attn_mask = node_to_pack_indices_mask(packed_graph_idx, num_nodes) - for pyg_graph in pyg_batch: + for pyg_graph in pyg_graphs: pyg_graph.pack_from_node_idx = pack_from_node_idx pyg_graph.pack_attn_mask = pack_attn_mask - return Batch.from_data_list(pyg_batch) + return Batch.from_data_list(pyg_graphs) def pad_to_expected_label_size(labels: torch.Tensor, label_rows: int, label_cols: int): @@ -207,29 +193,6 @@ def pad_to_expected_label_size(labels: torch.Tensor, label_rows: int, label_cols return torch.nn.functional.pad(labels, pad_sizes, value=torch.nan) -def collate_pyg_graph_labels(pyg_labels: List[Data]): - """ - Function to collate pytorch geometric labels. - Convert all numpy types to torch - - Parameters: - pyg_labels: Iterable of PyG label Data objects - """ - pyg_batch = [] - for pyg_label in pyg_labels: - for pyg_key in set(get_keys(pyg_label)) - set(["x", "edge_index"]): - tensor = pyg_label[pyg_key] - # Convert numpy/scipy to Pytorch - if isinstance(tensor, (ndarray, spmatrix)): - tensor = torch.as_tensor(to_dense_array(tensor, tensor.dtype)) - - pyg_label[pyg_key] = tensor - - pyg_batch.append(pyg_label) - - return Batch.from_data_list(pyg_batch) - - def get_expected_label_rows( label_data: Data, task: str, @@ -308,7 +271,7 @@ def collate_labels( this_label[task] = pad_to_expected_label_size(this_label[task], label_rows, labels_num_cols_dict[task]) - return collate_pyg_graph_labels(labels) + return Batch.from_data_list(labels) def pad_nodepairs(pe: torch.Tensor, num_nodes: int, max_num_nodes_per_graph: int): From e032e8ed99b70c40c69f54d22a5b87c3eeae1ea9 Mon Sep 17 00:00:00 2001 From: ndickson Date: Fri, 19 Apr 2024 19:20:29 -0400 Subject: [PATCH 012/175] Deleting most of the Python featurization code --- graphium/features/__init__.py | 1 - graphium/features/commute.py | 69 ----- graphium/features/electrostatic.py | 58 ---- graphium/features/featurizer.py | 99 +----- graphium/features/graphormer.py | 55 ---- graphium/features/positional_encoding.py | 181 ----------- graphium/features/rw.py | 169 ---------- graphium/features/spectral.py | 218 ------------- graphium/features/transfer_pos_level.py | 376 ----------------------- 9 files changed, 4 insertions(+), 1222 deletions(-) delete mode 100644 graphium/features/commute.py delete mode 100644 graphium/features/electrostatic.py delete mode 100644 graphium/features/graphormer.py delete mode 100644 graphium/features/positional_encoding.py delete mode 100644 graphium/features/rw.py delete mode 100644 graphium/features/spectral.py delete mode 100644 graphium/features/transfer_pos_level.py diff --git a/graphium/features/__init__.py b/graphium/features/__init__.py index a4ebafc8c..e9cb41d1f 100644 --- a/graphium/features/__init__.py +++ b/graphium/features/__init__.py @@ -1,3 +1,2 @@ from .featurizer import mol_to_graph_signature from .featurizer import mol_to_pyggraph -from .featurizer import to_dense_array diff --git a/graphium/features/commute.py b/graphium/features/commute.py deleted file mode 100644 index a7cea768c..000000000 --- a/graphium/features/commute.py +++ /dev/null @@ -1,69 +0,0 @@ -""" --------------------------------------------------------------------------------- -Copyright (c) 2023 Valence Labs, Recursion Pharmaceuticals and Graphcore Limited. - -Use of this software is subject to the terms and conditions outlined in the LICENSE file. -Unauthorized modification, distribution, or use is prohibited. Provided 'as is' without -warranties of any kind. - -Valence Labs, Recursion Pharmaceuticals and Graphcore Limited are not liable for any damages arising from its use. -Refer to the LICENSE file for the full terms and conditions. --------------------------------------------------------------------------------- -""" - - -from typing import Tuple, Union, Dict, Any - -import numpy as np - -from scipy.sparse import spmatrix, issparse -from scipy.linalg import pinv - - -def compute_commute_distances( - adj: Union[np.ndarray, spmatrix], num_nodes: int, cache: Dict[str, Any] -) -> Tuple[np.ndarray, str, Dict[str, Any]]: - """ - Compute avg. commute time/distance between nodepairs. This is the avg. number of steps a random walker, starting - at node i, will take before reaching a given node j for the first time, and then return to node i. - - Reference: Saerens et al. "The principal components analysis of a graph, and its relationships to spectral clustering." ECML. 2004. - - Parameters: - adj [num_nodes, num_nodes]: Adjacency matrix - num_nodes: Number of nodes in the graph - cache: Dictionary of cached objects - Returns: - dist [num_nodes, num_nodes]: 2D array with avg. commute distances between nodepairs - base_level: Indicator of the output pos_level (node, edge, nodepair, graph) -> here nodepair - cache: Updated dictionary of cached objects - """ - - base_level = "nodepair" - - if "commute" in cache: - dist = cache["commute"] - - else: - if issparse(adj): - adj = adj.toarray() - - volG = adj.sum() - - if "pinvL" in cache: - pinvL = cache["pinvL"] - - else: - L = np.diagflat(np.sum(adj, axis=1)) - adj - pinvL = pinv(L) - cache["pinvL"] = pinvL - - dist = volG * np.asarray( - [ - [pinvL[i, i] + pinvL[j, j] - 2 * pinvL[i, j] for j in range(num_nodes)] - for i in range(num_nodes) - ] - ) - cache["commute"] = dist - - return dist, base_level, cache diff --git a/graphium/features/electrostatic.py b/graphium/features/electrostatic.py deleted file mode 100644 index 58dc115f7..000000000 --- a/graphium/features/electrostatic.py +++ /dev/null @@ -1,58 +0,0 @@ -""" --------------------------------------------------------------------------------- -Copyright (c) 2023 Valence Labs, Recursion Pharmaceuticals and Graphcore Limited. - -Use of this software is subject to the terms and conditions outlined in the LICENSE file. -Unauthorized modification, distribution, or use is prohibited. Provided 'as is' without -warranties of any kind. - -Valence Labs, Recursion Pharmaceuticals and Graphcore Limited are not liable for any damages arising from its use. -Refer to the LICENSE file for the full terms and conditions. --------------------------------------------------------------------------------- -""" - - -from typing import Tuple, Union, Dict, Any - -import numpy as np - -from scipy.linalg import pinv -from scipy.sparse import spmatrix, issparse - - -def compute_electrostatic_interactions( - adj: Union[np.ndarray, spmatrix], cache: Dict[str, Any] -) -> Tuple[np.ndarray, str, Dict[str, Any]]: - """ - Compute electrostatic interaction of nodepairs. - - Parameters: - adj [num_nodes, num_nodes]: Adjacency matrix - cache: Dictionary of cached objects - Returns: - electrostatic [num_nodes, num_nodes]: 2D array with electrostatic interactions of node nodepairs - base_level: Indicator of the output pos_level (node, edge, nodepair, graph) -> here nodepair - cache: Updated dictionary of cached objects - """ - - base_level = "nodepair" - - if "electrostatic" in cache: - electrostatic = cache["electrostatic"] - - else: - if "pinvL" in cache: - pinvL = cache["pinvL"] - - else: - if issparse(adj): - adj = adj.toarray() - - L = np.diagflat(np.sum(adj, axis=1)) - adj - pinvL = pinv(L) - cache["pinvL"] = pinvL - - electrostatic = pinvL - np.diag(pinvL) # This means that the "ground" is set to any given atom - cache["electrostatic"] = electrostatic - - return electrostatic, base_level, cache diff --git a/graphium/features/featurizer.py b/graphium/features/featurizer.py index 70fda1f3c..996662db2 100644 --- a/graphium/features/featurizer.py +++ b/graphium/features/featurizer.py @@ -28,97 +28,6 @@ import graphium_cpp -def to_dense_array(array: np.ndarray, dtype: str = None) -> np.ndarray: - r""" - Assign the node data - Parameters: - array: The array to convert to dense - dtype: The dtype of the array - Returns: - The dense array - """ - if array is not None: - if issparse(array): - if array.dtype == np.float16: # float16 doesn't support `todense` - array = array.astype(np.float32) - array = array.todense() - - if dtype is not None: - array = array.astype(dtype) - return array - - -def to_dense_tensor(tensor: Tensor, dtype: str = None) -> Tensor: - r""" - Assign the node data - Parameters: - array: The array to convert to dense - dtype: The dtype of the array - Returns: - The dense array - """ - if tensor is not None: - if tensor.is_sparse: - tensor = tensor.todense() - if dtype is not None: - tensor = tensor.to(dtype) - return tensor - - -def get_mol_conformer_features( - mol: dm.Mol, - property_list: Union[List[str], List[Callable]], - mask_nan: Optional[Union[float, str]] = None, -) -> Dict[str, np.ndarray]: - r"""obtain the conformer features of a molecule - Parameters: - - mol: - molecule from which to extract the properties - - property_list: - A list of conformer property to get from the molecule - Accepted properties are: - - "positions_3d" - - Returns: - prop_dict: a dictionary where the element of ``property_list`` are the keys - """ - prop_dict = {} - has_conf = True - - try: - mol.GetConformer() - except: - has_conf = False - # * currently only accepts "positions_3d", raise errors otherwise - for prop in property_list: - if isinstance(prop, str): - if prop in ["positions_3d"]: # locating 3d conformer coordinates - if not has_conf: - positions = np.full((mol.GetNumAtoms(), 3), float("nan"), dtype=np.float16) - else: - positions = [[], [], []] - for i in range(mol.GetNumAtoms()): - pos = mol.GetConformer().GetAtomPosition(i) - positions[0].append(pos.x) - positions[1].append(pos.y) - positions[2].append(pos.z) - positions = np.asarray(positions, dtype=np.float16).T - prop_dict[prop] = positions - else: - raise ValueError( - str(prop) + " is not currently supported as a conformer property in `property_list`" - ) - else: - raise ValueError(f"Elements in `property_list` must be str or callable, provided `{type(prop)}`") - - prop_dict[prop] = _mask_nans_inf(mask_nan, prop_dict[prop], prop) - - return prop_dict - - - def get_simple_mol_conformer(mol: dm.Mol) -> Union[Chem.rdchem.Conformer, None]: r""" If the molecule has a conformer, then it will return the conformer at idx `0`. @@ -161,14 +70,14 @@ def get_simple_mol_conformer(mol: dm.Mol) -> Union[Chem.rdchem.Conformer, None]: def mol_to_pyggraph( mol: str, - atom_property_list_onehot: Union[List[str],torch.Tensor] = [], - atom_property_list_float: Union[List[Union[str, Callable]],torch.Tensor] = [], + atom_property_list_onehot: torch.Tensor = None, + atom_property_list_float: torch.Tensor = None, conformer_property_list: List[str] = [], - edge_property_list: Union[List[str],torch.Tensor] = [], + edge_property_list: torch.Tensor = None, add_self_loop: bool = False, explicit_H: bool = False, use_bonds_weights: bool = False, - pos_encoding_as_features: Union[Dict[str, Any], Tuple[List[str],torch.Tensor]] = None, + pos_encoding_as_features: Tuple[List[str],torch.Tensor] = None, dtype: np.dtype = np.float16, on_error: str = "ignore", mask_nan: Union[str, float, type(None)] = "raise", diff --git a/graphium/features/graphormer.py b/graphium/features/graphormer.py deleted file mode 100644 index d62010801..000000000 --- a/graphium/features/graphormer.py +++ /dev/null @@ -1,55 +0,0 @@ -""" --------------------------------------------------------------------------------- -Copyright (c) 2023 Valence Labs, Recursion Pharmaceuticals and Graphcore Limited. - -Use of this software is subject to the terms and conditions outlined in the LICENSE file. -Unauthorized modification, distribution, or use is prohibited. Provided 'as is' without -warranties of any kind. - -Valence Labs, Recursion Pharmaceuticals and Graphcore Limited are not liable for any damages arising from its use. -Refer to the LICENSE file for the full terms and conditions. --------------------------------------------------------------------------------- -""" - - -from typing import Tuple, Union, Dict, Any - -import numpy as np -import networkx as nx - -from scipy.sparse import spmatrix, issparse - - -def compute_graphormer_distances( - adj: Union[np.ndarray, spmatrix], num_nodes: int, cache: Dict[str, Any] -) -> Tuple[np.ndarray, str, Dict[str, Any]]: - """ - Compute Graphormer distance between nodepairs. - - Parameters: - adj [num_nodes, num_nodes]: Adjacency matrix - num_nodes: Number of nodes in the graph - cache: Dictionary of cached objects - Returns: - dist [num_nodes, num_nodes]: 2D array with Graphormer distances between nodepairs - base_level: Indicator of the output pos_level (node, edge, nodepair, graph) -> here nodepair - cache: Updated dictionary of cached objects - """ - - base_level = "nodepair" - - if "graphormer" in cache: - dist = cache["graphormer"] - - else: - if issparse(adj): - adj = adj.toarray() - - G = nx.from_numpy_array(adj) - paths = nx.all_pairs_shortest_path(G) - - dist_dict = {i: {j: len(path) - 1 for j, path in paths_from_i.items()} for i, paths_from_i in paths} - dist = np.asarray([[dist_dict[i][j] for j in range(num_nodes)] for i in range(num_nodes)]) - cache["graphormer"] = dist - - return dist, base_level, cache diff --git a/graphium/features/positional_encoding.py b/graphium/features/positional_encoding.py deleted file mode 100644 index 8acc231d8..000000000 --- a/graphium/features/positional_encoding.py +++ /dev/null @@ -1,181 +0,0 @@ -""" --------------------------------------------------------------------------------- -Copyright (c) 2023 Valence Labs, Recursion Pharmaceuticals and Graphcore Limited. - -Use of this software is subject to the terms and conditions outlined in the LICENSE file. -Unauthorized modification, distribution, or use is prohibited. Provided 'as is' without -warranties of any kind. - -Valence Labs, Recursion Pharmaceuticals and Graphcore Limited are not liable for any damages arising from its use. -Refer to the LICENSE file for the full terms and conditions. --------------------------------------------------------------------------------- -""" - - -from typing import Tuple, Union, Optional, Dict, Any, OrderedDict -from copy import deepcopy -import numpy as np -import torch -from scipy.sparse import spmatrix -from collections import OrderedDict as OderedDictClass - -from graphium.features.spectral import compute_laplacian_pe -from graphium.features.rw import compute_rwse -from graphium.features.electrostatic import compute_electrostatic_interactions -from graphium.features.commute import compute_commute_distances -from graphium.features.graphormer import compute_graphormer_distances -from graphium.features.transfer_pos_level import transfer_pos_level - - -def get_all_positional_encodings( - adj: Union[np.ndarray, spmatrix], - num_nodes: int, - pos_kwargs: Optional[Dict] = None, -) -> Tuple["OrderedDict[str, np.ndarray]"]: - r""" - Get features positional encoding. - - Parameters: - adj [num_nodes, num_nodes]: Adjacency matrix of the graph - num_nodes: Number of nodes in the graph - pos_encoding_as_features: keyword arguments for function `graph_positional_encoder` - to generate positional encoding for node features. - - Returns: - pe_dict: Dictionary of positional and structural encodings - """ - - pos_kwargs = {} if pos_kwargs is None else pos_kwargs - - pe_dict = OderedDictClass() - - # Initialize cache - cache = {} - - # Get the positional encoding for the features - if len(pos_kwargs) > 0: - for pos_name, this_pos_kwargs in pos_kwargs["pos_types"].items(): - this_pos_kwargs = deepcopy(this_pos_kwargs) - pos_type = this_pos_kwargs.pop("pos_type", None) - pos_level = this_pos_kwargs.pop("pos_level", None) - this_pe, cache = graph_positional_encoder( - deepcopy(adj), - num_nodes, - pos_type=pos_type, - pos_level=pos_level, - pos_kwargs=this_pos_kwargs, - cache=cache, - ) - if pos_level == "node": - pe_dict.update({f"{pos_type}": this_pe}) - else: - pe_dict.update({f"{pos_level}_{pos_type}": this_pe}) - - return pe_dict - - -def graph_positional_encoder( - adj: Union[np.ndarray, spmatrix], - num_nodes: int, - pos_type: Optional[str] = None, - pos_level: Optional[str] = None, - pos_kwargs: Optional[Dict[str, Any]] = None, - cache: Optional[Dict[str, Any]] = None, -) -> Tuple[Dict[str, np.ndarray], Dict[str, Any]]: - r""" - Get a positional encoding that depends on the parameters. - - Parameters: - adj [num_nodes, num_nodes]: Adjacency matrix of the graph - num_nodes: Number of nodes in the graph - pos_type: The type of positional encoding to use. If None, it must be provided by `pos_kwargs["pos_type"]`. Supported types are: - - laplacian_eigvec \ - - laplacian_eigval \ -> cache connected comps. & eigendecomp. - - rwse - - electrostatic \ - - commute \ -> cache pinvL - - graphormer - pos_level: Positional level to output. If None, it must be provided by `pos_kwargs["pos_level"]`. - - node - - edge - - nodepair - - graph - pos_kwargs: Extra keyword arguments for the positional encoding. Can include the keys pos_type and pos_level. - cache: Dictionary of cached objects - - Returns: - pe: Positional or structural encoding - cache: Updated dictionary of cached objects - """ - - pos_kwargs = deepcopy(pos_kwargs) - if pos_kwargs is None: - pos_kwargs = {} - if cache is None: - cache = {} - - # Get the positional type - pos_type2 = pos_kwargs.pop("pos_type", None) - if pos_type is None: - pos_type = pos_type2 - if pos_type2 is not None: - assert ( - pos_type == pos_type2 - ), f"The positional type must be the same in `pos_type` and `pos_kwargs['pos_type']`. Provided: {pos_type} and {pos_type2}" - assert pos_type is not None, "Either `pos_type` or `pos_kwargs['pos_type']` must be provided." - - # Get the positional level - pos_level2 = pos_kwargs.pop("pos_level", None) - if pos_level is None: - pos_level = pos_level2 - if pos_level2 is not None: - assert ( - pos_level == pos_level2 - ), f"The positional level must be the same in `pos_level` and `pos_kwargs['pos_level']`. Provided: {pos_level} and {pos_level2}" - assert pos_level is not None, "Either `pos_level` or `pos_kwargs['pos_level']` must be provided." - - # Convert to numpy array - if isinstance(adj, torch.sparse.Tensor): - adj = adj.to_dense().numpy() - elif isinstance(adj, torch.Tensor): - adj = adj.numpy() - adj = adj.astype(np.float64) - - # Calculate positional encoding - if pos_type == "laplacian_eigvec": - _, pe, base_level, cache = compute_laplacian_pe(adj, cache=cache, **pos_kwargs) - - elif pos_type == "laplacian_eigval": - pe, _, base_level, cache = compute_laplacian_pe(adj, cache=cache, **pos_kwargs) - - elif pos_type == "rw_return_probs": - pe, base_level, cache = compute_rwse( - adj.astype(np.float32), num_nodes=num_nodes, cache=cache, pos_type=pos_type, **pos_kwargs - ) - - elif pos_type == "rw_transition_probs": - pe, base_level, cache = compute_rwse( - adj.astype(np.float32), num_nodes=num_nodes, cache=cache, pos_type=pos_type, **pos_kwargs - ) - - elif pos_type == "electrostatic": - pe, base_level, cache = compute_electrostatic_interactions(adj, cache, **pos_kwargs) - - elif pos_type == "commute": - pe, base_level, cache = compute_commute_distances(adj, num_nodes, cache, **pos_kwargs) - - elif pos_type == "graphormer": - pe, base_level, cache = compute_graphormer_distances(adj, num_nodes, cache, **pos_kwargs) - - else: - raise ValueError(f"Unknown `pos_type`: {pos_type}") - - # Convert to float32 and Convert between different pos levels - if isinstance(pe, (list, tuple)): - pe = [this_pe.astype(np.float32) for this_pe in pe] - pe = [transfer_pos_level(this_pe, base_level, pos_level, adj, num_nodes, cache) for this_pe in pe] - else: - pe = np.real(pe).astype(np.float32) - pe = transfer_pos_level(pe, base_level, pos_level, adj, num_nodes, cache) - - return pe, cache diff --git a/graphium/features/rw.py b/graphium/features/rw.py deleted file mode 100644 index c7eada2ba..000000000 --- a/graphium/features/rw.py +++ /dev/null @@ -1,169 +0,0 @@ -""" --------------------------------------------------------------------------------- -Copyright (c) 2023 Valence Labs, Recursion Pharmaceuticals and Graphcore Limited. - -Use of this software is subject to the terms and conditions outlined in the LICENSE file. -Unauthorized modification, distribution, or use is prohibited. Provided 'as is' without -warranties of any kind. - -Valence Labs, Recursion Pharmaceuticals and Graphcore Limited are not liable for any damages arising from its use. -Refer to the LICENSE file for the full terms and conditions. --------------------------------------------------------------------------------- -""" - - -from typing import Tuple, Union, Optional, List, Dict, Any, Iterable - -from scipy.sparse import issparse, spmatrix, coo_matrix -import numpy as np -import torch - -from torch_geometric.utils import to_dense_adj, from_scipy_sparse_matrix -from torch_scatter import scatter_add -from torch_geometric.utils.num_nodes import maybe_num_nodes - - -def compute_rwse( - adj: Union[np.ndarray, spmatrix], - ksteps: Union[int, List[int]], - num_nodes: int, - cache: Dict[str, Any], - pos_type: str = "rw_return_probs" or "rw_transition_probs", - space_dim: int = 0, -) -> Tuple[np.ndarray, str, Dict[str, Any]]: - """ - Compute Random Walk Spectral Embedding (RWSE) for given list of K steps. - - Parameters: - adj [num_nodes, num_nodes]: Adjacency matrix - ksteps: List of numbers of steps for the random walks. If int, a list is generated from 1 to ksteps. - num_nodes: Number of nodes in the graph - cache: Dictionary of cached objects - pos_type: Desired output - space_dim: Estimated dimensionality of the space. Used to - correct the random-walk diagonal by a factor `k^(space_dim/2)`. - In euclidean space, this correction means that the height of - the gaussian distribution stays almost constant across the number of - steps, if `space_dim` is the dimension of the euclidean space. - Returns: - Two possible outputs: - rw_return_probs [num_nodes, len(ksteps)]: Random-Walk k-step landing probabilities - rw_transition_probs [num_nodes, num_nodes, len(ksteps)]: Random-Walk k-step transition probabilities - base_level: Indicator of the output pos_level (node, edge, nodepair, graph) -> here either node or nodepair - cache: Updated dictionary of cached objects - """ - - base_level = "node" if pos_type == "rw_return_probs" else "nodepair" - - # Manually handles edge case of 1 atom molecules here - if not isinstance(ksteps, Iterable): - ksteps = list(range(1, ksteps + 1)) - if num_nodes == 1: - if pos_type == "rw_return_probs": - return np.ones((1, len(ksteps))), base_level, cache - else: - return np.ones((1, 1, len(ksteps))), base_level, cache - - # Get the edge indices from the adjacency matrix - if not issparse(adj): - if "coo_adj" in cache: - adj = cache["coo_adj"] - elif "csr_adj" in cache: - adj = cache["csr_adj"] - else: - adj = coo_matrix(adj, dtype=np.float64) - cache["coo_adj"] = adj - - edge_index, edge_weight = from_scipy_sparse_matrix(adj) - - # Compute the random-walk transition probabilities - if "ksteps" in cache: - cached_k = cache["ksteps"] - missing_k = [k for k in ksteps if k not in cached_k] - if missing_k == []: - pass - elif min(missing_k) < min(cached_k): - Pk_dict = get_Pks(missing_k, edge_index=edge_index, edge_weight=edge_weight, num_nodes=num_nodes) - cache["ksteps"] = sorted(missing_k + cache["ksteps"]) - for k in missing_k: - cache["Pk"][k] = Pk_dict[k] - else: - start_k = min([max(cached_k), min(missing_k)]) - start_Pk = cache["Pk"][start_k] - Pk_dict = get_Pks( - missing_k, - edge_index=edge_index, - edge_weight=edge_weight, - num_nodes=num_nodes, - start_Pk=start_Pk, - start_k=start_k, - ) - cache["ksteps"] = sorted(cache["ksteps"] + missing_k) - for k in missing_k: - cache["Pk"][k] = Pk_dict[k] - else: - Pk_dict = get_Pks(ksteps, edge_index=edge_index, edge_weight=edge_weight, num_nodes=num_nodes) - - cache["ksteps"] = list(Pk_dict.keys()) - cache["Pk"] = Pk_dict - - pe_list = [] - if pos_type == "rw_return_probs": - for k in ksteps: - pe_list.append(torch.diagonal(cache["Pk"][k], dim1=-2, dim2=-1) * (k ** (space_dim / 2))) - else: - for k in ksteps: - pe_list.append(cache["Pk"][k]) - - pe = torch.stack(pe_list, dim=-1).numpy() - - return pe, base_level, cache - - -def get_Pks( - ksteps: List[int], - edge_index: Tuple[torch.Tensor, torch.Tensor], - edge_weight: Optional[torch.Tensor] = None, - num_nodes: Optional[int] = None, - start_Pk: Optional[torch.Tensor] = None, - start_k: Optional[int] = None, -) -> Dict[int, np.ndarray]: - """ - Compute Random Walk landing probabilities for given list of K steps. - - Parameters: - ksteps: List of numbers of k-steps for which to compute the RW landings - edge_index: PyG sparse representation of the graph - edge_weight: Edge weights - num_nodes: Number of nodes in the graph - - Returns: - 2D Tensor with shape (num_nodes, len(ksteps)) with RW landing probs - """ - if edge_weight is None: - edge_weight = torch.ones(edge_index.size(1), device=edge_index.device) - num_nodes = maybe_num_nodes(edge_index, num_nodes) - src = edge_index[0] - deg = scatter_add(edge_weight, src, dim=0, dim_size=num_nodes) # Out degrees. - deg_inv = deg.pow(-1.0) - deg_inv.masked_fill_(deg_inv == float("inf"), 0) - - if edge_index.numel() == 0: - P = edge_index.new_zeros((1, num_nodes, num_nodes)) - else: - # P = D^-1 * A - P = torch.diag(deg_inv).float() @ to_dense_adj( - edge_index, max_num_nodes=num_nodes - ) # 1 x (Num nodes) x (Num nodes) - - if start_Pk is not None: - Pk = start_Pk @ P.clone().detach().matrix_power(min(ksteps) - start_k) - else: - Pk = P.clone().detach().matrix_power(min(ksteps)) - - Pk_dict = {} - for k in range(min(ksteps), max(ksteps) + 1): - Pk_dict[k] = Pk.squeeze(0) - Pk = Pk @ P - - return Pk_dict diff --git a/graphium/features/spectral.py b/graphium/features/spectral.py deleted file mode 100644 index 55d8527a4..000000000 --- a/graphium/features/spectral.py +++ /dev/null @@ -1,218 +0,0 @@ -""" --------------------------------------------------------------------------------- -Copyright (c) 2023 Valence Labs, Recursion Pharmaceuticals and Graphcore Limited. - -Use of this software is subject to the terms and conditions outlined in the LICENSE file. -Unauthorized modification, distribution, or use is prohibited. Provided 'as is' without -warranties of any kind. - -Valence Labs, Recursion Pharmaceuticals and Graphcore Limited are not liable for any damages arising from its use. -Refer to the LICENSE file for the full terms and conditions. --------------------------------------------------------------------------------- -""" - - -from typing import Tuple, Union, Dict, Any -from scipy.linalg import eig -from scipy.sparse import csr_matrix, diags, issparse, spmatrix -import numpy as np -import torch -import networkx as nx - -from graphium.utils.tensor import is_dtype_torch_tensor, is_dtype_numpy_array - - -def compute_laplacian_pe( - adj: Union[np.ndarray, spmatrix], - num_pos: int, - cache: Dict[str, Any], - disconnected_comp: bool = True, - normalization: str = "none", -) -> Tuple[np.ndarray, str, Dict[str, Any]]: - r""" - Compute the Laplacian eigenvalues and eigenvectors of the Laplacian of the graph. - - Parameters: - adj [num_nodes, num_nodes]: Adjacency matrix of the graph - num_pos: Number of Laplacian eigenvectors to compute - cache: Dictionary of cached objects - disconnected_comp: Whether to compute the eigenvectors for each connected component - normalization: Normalization to apply to the Laplacian - - Returns: - Two possible outputs: - eigvals [num_nodes, num_pos]: Eigenvalues of the Laplacian repeated for each node. - This repetition is necessary in case of disconnected components, where - the eigenvalues of the Laplacian are not the same for each node. - eigvecs [num_nodes, num_pos]: Eigenvectors of the Laplacian - base_level: Indicator of the output pos_level (node, edge, nodepair, graph) -> here node - cache: Updated dictionary of cached objects - """ - - base_level = "node" - - # Sparsify the adjacency patrix - if not issparse(adj): - if "csr_adj" not in cache: - adj = csr_matrix(adj, dtype=np.float64) - cache["csr_adj"] = adj - else: - adj = cache["csr_adj"] - - # Compute the Laplacian, and normalize it - if f"L_{normalization}_sp" not in cache: - D = np.array(np.sum(adj, axis=1)).flatten() - D_mat = diags(D) - L = -adj + D_mat - L_norm = normalize_matrix(L, degree_vector=D, normalization=normalization) - cache[f"L_{normalization}_sp"] = L_norm - else: - L_norm = cache[f"L_{normalization}_sp"] - - components = [] - - if disconnected_comp: - if "components" not in cache: - # Get the list of connected components - components = list(nx.connected_components(nx.from_scipy_sparse_array(adj))) - cache["components"] = components - - else: - components = cache["components"] - - # Compute the eigenvectors for each connected component, and stack them together - if len(components) > 1: - if "lap_eig_comp" not in cache: - eigvals = np.zeros((adj.shape[0], num_pos), dtype=np.complex64) - eigvecs = np.zeros((adj.shape[0], num_pos), dtype=np.complex64) - for component in components: - comp = list(component) - this_L = L_norm[comp][:, comp] - this_eigvals, this_eigvecs = _get_positional_eigvecs(this_L, num_pos=num_pos) - - # Eigenvalues previously set to infinity are now set to 0 - # Any NaN in the eigvals or eigvecs will be set to 0 - this_eigvecs[~np.isfinite(this_eigvecs)] = 0.0 - this_eigvals[~np.isfinite(this_eigvals)] = 0.0 - - eigvals[comp, :] = np.expand_dims(this_eigvals, axis=0) - eigvecs[comp, :] = this_eigvecs - cache["lap_eig_comp"] = (eigvals, eigvecs) - - else: - eigvals, eigvecs = cache["lap_eig_comp"] - - else: - if "lap_eig" not in cache: - eigvals, eigvecs = _get_positional_eigvecs(L, num_pos=num_pos) - - # Eigenvalues previously set to infinity are now set to 0 - # Any NaN in the eigvals or eigvecs will be set to 0 - eigvecs[~np.isfinite(eigvecs)] = 0.0 - eigvals[~np.isfinite(eigvals)] = 0.0 - eigvals = np.repeat(np.expand_dims(eigvals, axis=0), adj.shape[0], axis=0) - - cache["lap_eig"] = (eigvals, eigvecs) - - else: - eigvals, eigvecs = cache["lap_eig"] - - return eigvals, eigvecs, base_level, cache - - -def _get_positional_eigvecs( - matrix: Union[np.ndarray, spmatrix], - num_pos: int, -) -> Tuple[np.ndarray, np.ndarray]: - r""" - compute the eigenvalues and eigenvectors of a matrix - Parameters: - matrix: Matrix to compute the eigenvalues and eigenvectors of - num_pos: Number of eigenvalues and eigenvectors to compute - Returns: - eigvals: Eigenvalues of the matrix - eigvecs: Eigenvectors of the matrix - """ - mat_len = matrix.shape[0] - eigvals, eigvecs = eig(matrix.todense()) - - # Pad with non-sense eigenvectors if required - if num_pos > mat_len: - temp_EigVal = np.ones(num_pos - mat_len, dtype=np.float64) + float("inf") - temp_EigVec = np.zeros((mat_len, num_pos - mat_len), dtype=np.float64) - eigvals = np.concatenate([eigvals, temp_EigVal], axis=0) - eigvecs = np.concatenate([eigvecs, temp_EigVec], axis=1) - - # Sort and keep only the first `num_pos` elements - sort_idx = eigvals.argsort() - eigvals = eigvals[sort_idx] - eigvals = eigvals[:num_pos] - eigvecs = eigvecs[:, sort_idx] - eigvecs = eigvecs[:, :num_pos] - - # Normalize the eigvecs - eigvecs = eigvecs / np.maximum(np.sqrt(np.sum(eigvecs**2, axis=0, keepdims=True)), 1e-4) - - return eigvals, eigvecs - - -def normalize_matrix( - matrix: Union[np.ndarray, spmatrix], - degree_vector=None, - normalization: str = None, -) -> Union[np.ndarray, spmatrix]: - r""" - Normalize a given matrix using its degree vector - - Parameters - --------------- - - matrix: torch.tensor(N, N) or scipy.sparse.spmatrix(N, N) - A square matrix representing either an Adjacency matrix or a Laplacian. - - degree_vector: torch.tensor(N) or np.ndarray(N) or None - A vector representing the degree of ``matrix``. - ``None`` is only accepted if ``normalization==None`` - - normalization: str or None, Default='none' - Normalization to use on the eig_matrix - - - 'none' or ``None``: no normalization - - - 'sym': Symmetric normalization ``D^-0.5 L D^-0.5`` - - - 'inv': Inverse normalization ``D^-1 L`` - - Returns - ----------- - matrix: torch.tensor(N, N) or scipy.sparse.spmatrix(N, N) - The normalized matrix - - """ - - # Transform the degree vector into a matrix - if degree_vector is None: - if not ((normalization is None) or (normalization.lower() == "none")): - raise ValueError("`degree_vector` cannot be `None` if `normalization` is not `None`") - else: - if is_dtype_numpy_array(matrix.dtype): - with np.errstate(divide="ignore", invalid="ignore"): - degree_inv = np.expand_dims(degree_vector**-0.5, axis=1) - degree_inv[np.isinf(degree_inv)] = 0 - elif is_dtype_torch_tensor(matrix.dtype): - degree_inv = torch.unsqueeze(degree_vector**-0.5, dim=1) - degree_inv[torch.isinf(degree_inv)] = 0 - - # Compute the normalized matrix - if (normalization is None) or (normalization.lower() == "none"): - pass - elif normalization.lower() == "sym": - matrix = degree_inv * matrix * degree_inv.T - elif normalization.lower() == "inv": - matrix = (degree_inv**2) * matrix - else: - raise ValueError( - f'`normalization` should be `None`, `"None"`, `"sym"` or `"inv"`, but `{normalization}` was provided' - ) - - return matrix diff --git a/graphium/features/transfer_pos_level.py b/graphium/features/transfer_pos_level.py deleted file mode 100644 index 4bb70e160..000000000 --- a/graphium/features/transfer_pos_level.py +++ /dev/null @@ -1,376 +0,0 @@ -""" --------------------------------------------------------------------------------- -Copyright (c) 2023 Valence Labs, Recursion Pharmaceuticals and Graphcore Limited. - -Use of this software is subject to the terms and conditions outlined in the LICENSE file. -Unauthorized modification, distribution, or use is prohibited. Provided 'as is' without -warranties of any kind. - -Valence Labs, Recursion Pharmaceuticals and Graphcore Limited are not liable for any damages arising from its use. -Refer to the LICENSE file for the full terms and conditions. --------------------------------------------------------------------------------- -""" - - -from typing import Tuple, Union, List, Dict, Any, Optional - -import numpy as np - -from scipy.sparse import spmatrix, issparse, coo_matrix - -from torch_geometric.utils import from_scipy_sparse_matrix - - -def transfer_pos_level( - pe: np.ndarray, - in_level: str, - out_level: str, - adj: Union[np.ndarray, spmatrix], - num_nodes: int, - cache: Optional[Dict[str, Any]] = None, -) -> np.ndarray: - r""" - Transfer positional encoding between different positional levels (node, edge, nodepair, graph) - - Parameters: - pe: Input pe with pos_level defined by in_level - in_level: pos_level of input pe - out_level: desired pos_level of output pe - adj [num_nodes, num_nodes]: Adjacency matrix of the graph - num_nodes: Number of nodes in the graph - cache: Dictionary of cached objects - - Returns: - pe: Output pe with pos_level defined by out_level - """ - - if cache is None: - cache = {} - - if in_level == "node": - if out_level == "node": - pass - - elif out_level == "edge": - pe, cache = node_to_edge(pe, adj, cache) - - elif out_level == "nodepair": - pe = node_to_nodepair(pe, num_nodes) - - elif out_level == "graph": - raise NotImplementedError("Transfer function (node -> graph) not yet implemented.") - - else: - raise ValueError(f"Unknown `pos_level`: {out_level}") - - elif in_level == "edge": - raise NotImplementedError("Transfer function (edge -> *) not yet implemented.") - - elif in_level == "nodepair": - if len(pe.shape) == 2: - pe = np.expand_dims(pe, -1) - - if out_level == "node": - pe = nodepair_to_node(pe) - - elif out_level == "edge": - pe, cache = nodepair_to_edge(pe, adj, cache) - - elif out_level == "nodepair": - pass - - elif out_level == "graph": - raise NotImplementedError("Transfer function (nodepair -> graph) not yet implemented.") - - else: - raise ValueError(f"Unknown `pos_level`: {out_level}") - - elif in_level == "graph": - if out_level == "node": - pe = graph_to_node(pe, num_nodes, cache) - - elif out_level in ["edge", "nodepair"]: - raise NotImplementedError("Transfer function (graph -> edge/nodepair) not yet implemented.") - - else: - raise ValueError(f"Unknown `pos_level`: {out_level}") - - else: - raise ValueError(f"Unknown `pos_level`: {in_level}") - - return pe - - -# Transfer functions between different levels, i.e., node, edge, nodepair and graph level. - -# TODO: -# - Implement missing transfer functions below -# - Are transfer functions graph -> edge/nodepair and edge -> graph needed? - - -def node_to_edge( - pe: np.ndarray, adj: Union[np.ndarray, spmatrix], cache: Optional[Dict[str, Any]] = None -) -> Tuple[np.ndarray, Dict[str, Any]]: - r""" - Get an edge-level positional encoding from a node-level positional encoding. - -> For each edge, concatenate the sum and absolute difference of pe of source and destination node. - - Parameters: - pe [num_nodes, num_feat]: Node-level positional encoding - adj [num_nodes, num_nodes]: Adjacency matrix of the graph - cache: Dictionary of cached objects - - Returns: - edge_pe [2 * num_edges, 2 * num_feat]: Edge-level positional encoding - cache: Updated dictionary of cached objects - """ - - if cache is None: - cache = {} - - if not issparse(adj): - if "coo_adj" in cache: - adj = cache["coo_adj"] - elif "csr_adj" in cache: - adj = cache["csr_adj"] - else: - adj = coo_matrix(adj, dtype=np.float64) - cache["coo_adj"] = adj - - edge_index, _ = from_scipy_sparse_matrix(adj) - src, dst = edge_index[0], edge_index[1] - - pe_sum = pe[src] + pe[dst] - pe_abs_diff = np.abs(pe[src] - pe[dst]) - - edge_pe = np.concatenate((pe_sum, pe_abs_diff), axis=-1) - - return edge_pe, cache - - -def node_to_nodepair(pe: np.ndarray, num_nodes: int) -> np.ndarray: - r""" - Get a nodepair-level positional encoding from a node-level positional encoding. - -> For each nodepair (i,j) concatenate the sum and absolute difference of pe at node i and j. - - Parameters: - pe [num_nodes, num_feat]: Node-level positional encoding - num_nodes: Number of nodes in the graph - - Returns: - nodepair_pe [num_nodes, num_nodes, 2 * num_feat]: Nodepair-level positional encoding - """ - - expanded_pe = np.expand_dims(pe, axis=1) - expanded_pe = np.repeat(expanded_pe, repeats=num_nodes, axis=1) - - pe_sum = expanded_pe + expanded_pe.transpose([1, 0, 2]) - pe_abs_diff = np.abs(expanded_pe - expanded_pe.transpose([1, 0, 2])) - - nodepair_pe = np.concatenate((pe_sum, pe_abs_diff), axis=-1) - - return nodepair_pe - - -def node_to_graph(pe: np.ndarray, num_nodes: int) -> np.ndarray: - r""" - Get a graph-level positional encoding from a node-level positional encoding. - -> E.g., min/max/mean-pooling of node features. - - Parameters: - pe [num_nodes, num_feat]: Node-level positional encoding - num_nodes: Number of nodes in the graph - - Returns: - graph_pe [1, num_feat]: Graph-level positional encoding - """ - - raise NotImplementedError("Transfer function (node -> graph) not yet implemented.") - - -def edge_to_node(pe: np.ndarray, adj: Union[np.ndarray, spmatrix]) -> np.ndarray: - r""" - Get a node-level positional encoding from an edge-level positional encoding. - -> E.g., min/max/mean-pooling of information from edges (i,j) that contain node i - - Parameters: - pe [num_edges, num_feat]: Edge-level positional encoding - adj [num_nodes, num_nodes]: Adjacency matrix of the graph - - Returns: - node_pe [num_edges, num_feat]: Node-level positional encoding - """ - - raise NotImplementedError("Transfer function (edge -> node) not yet implemented.") - - -def edge_to_nodepair( - pe: np.ndarray, adj: Union[np.ndarray, spmatrix], num_nodes: int, cache: Optional[Dict[str, Any]] = None -) -> np.ndarray: - r""" - Get a nodepair-level positional encoding from an edge-level positional encoding. - -> Zero-padding of non-existing edges. - - Parameters: - pe [num_edges, num_feat]: Edge-level positional encoding - adj [num_nodes, num_nodes]: Adjacency matrix of the graph - num_nodes: Number of nodes in the graph - cache: Dictionary of cached objects - - Returns: - nodepair_pe [num_edges, num_edges, num_feat]: Nodepair-level positional encoding - cache: Updated dictionary of cached objects - """ - - if cache is None: - cache = {} - - num_feat = pe.shape[-1] - - if not isinstance(adj, coo_matrix): - if "coo_adj" in cache: - adj = cache["coo_adj"] - else: - adj = coo_matrix(adj, dtype=np.float64) - cache["coo_adj"] = adj - - dst, src = adj.row, adj.col - - nodepair_pe = np.zeros((num_nodes, num_nodes, num_feat)) - - for i in range(len(dst)): - nodepair_pe[dst[i], src[i], ...] = pe[i, ...] - - return nodepair_pe, cache - - -def edge_to_graph(pe: np.ndarray) -> np.ndarray: - r""" - Get a graph-level positional encoding from an edge-level positional encoding. - - Parameters: - pe [num_edges, num_feat]: Edge-level positional encoding - - Returns: - graph_pe [1, num_feat]: Graph-level positional encoding - """ - - raise NotImplementedError("Transfer function (edge -> graph) not yet implemented.") - - -def nodepair_to_node(pe: np.ndarray, stats_list: List = [np.min, np.mean, np.std]) -> np.ndarray: - r""" - Get a node-level positional encoding from a graph-level positional encoding. - -> Calculate statistics over rows & cols of input positional encoding - - Parameters: - pe [num_nodes, num_nodes, num_feat]: Nodepair-level positional encoding - stats_list: List of statistics to calculate per row/col of nodepair-level pe - - Returns: - node_pe [num_nodes, 2 * len(stats_list) * num_feat]: Node-level positional encoding - """ - - num_feat = pe.shape[-1] - - node_pe_list = [] - - for stat in stats_list: - for i in range(num_feat): - node_pe_list.append(stat(pe[..., i], axis=0)) - node_pe_list.append(stat(pe[..., i], axis=1)) - node_pe = np.stack(node_pe_list, axis=-1) - - return node_pe - - -def nodepair_to_edge( - pe: np.ndarray, adj: Union[np.ndarray, spmatrix], cache: Optional[Dict[str, Any]] = None -) -> np.ndarray: - r""" - Get a edge-level positional encoding from a nodepair-level positional encoding. - -> Mask and sparsify nodepair-level positional encoding - - Parameters: - pe [num_nodes, num_nodes, num_feat]: Nodepair-level positional encoding - adj [num_nodes, num_nodes]: Adjacency matrix of the graph - cache: Dictionary of cached objects - - Returns: - edge_pe [num_edges, num_feat]: Edge-level positional encoding - cache: Updated dictionary of cached objects - """ - - if cache is None: - cache = {} - - num_feat = pe.shape[-1] - - if not isinstance(adj, coo_matrix): - if "coo_adj" in cache: - adj = cache["coo_adj"] - else: - adj = coo_matrix(adj, dtype=np.float64) - cache["coo_adj"] = adj - - dst, src = adj.row, adj.col - - edge_pe = np.zeros((len(dst), num_feat)) - - for i in range(len(src)): - edge_pe[i, ...] = pe[dst[i], src[i]] - - return edge_pe, cache - - -def nodepair_to_graph(pe: np.ndarray, num_nodes: int) -> np.ndarray: - r""" - Get a graph-level positional encoding from a nodepair-level positional encoding. - -> E.g., min/max/mean-pooling of entries of input pe - - Parameters: - pe [num_nodes, num_nodes, num_feat]: Nodepair-level positional encoding - num_nodes: Number of nodes in the graph - - Returns: - graph_pe [1, num_feat]: Graph-level positional encoding - """ - - raise NotImplementedError("Transfer function (nodepair -> graph) not yet implemented.") - - -def graph_to_node( - pe: Union[np.ndarray, List], num_nodes: int, cache: Optional[Dict[str, Any]] = None -) -> np.ndarray: - r""" - Get a node-level positional encoding from a nodepair-level positional encoding. - -> E.g., expand dimension of graph-level pe - - Parameters: - pe [num_feat]: Nodepair-level positional encoding (or list of them if graph disconnected) - num_nodes: Number of nodes in the graph - cache: Dictionary of cached objects - - Returns: - node_pe [num_nodes, num_feat]: Node-level positional encoding - """ - - if cache is None: - cache = {} - - node_pe = None - - # The key 'components' is only in cache if disconnected_comp == True when computing base pe - if "components" in cache: - if len(cache["components"]) > 1: - node_pe = np.zeros((num_nodes, len(pe))) - components = cache["components"] - - for i, component in enumerate(components): - comp = list(component) - node_pe[comp, :] = np.real(pe[i]) - - if node_pe is None: - node_pe = np.tile(pe, (num_nodes, 1)) - - return node_pe From bdefe89e3ea96223129eb2779d2d7f96344e8438 Mon Sep 17 00:00:00 2001 From: ndickson Date: Tue, 23 Apr 2024 16:04:23 -0400 Subject: [PATCH 013/175] Implemented conformer generation in get_conformer_features, trying to match behaviour from get_simple_mol_conformer Python code, but adding Hs, as recommended for conformer generation. --- graphium/features/featurizer.py | 40 ------------ graphium/graphium_cpp/features.cpp | 101 +++++++++++++++++++++++++---- 2 files changed, 90 insertions(+), 51 deletions(-) diff --git a/graphium/features/featurizer.py b/graphium/features/featurizer.py index 996662db2..bdff649ac 100644 --- a/graphium/features/featurizer.py +++ b/graphium/features/featurizer.py @@ -23,48 +23,8 @@ from torch_geometric.data import Data -from rdkit import Chem -import datamol as dm - import graphium_cpp -def get_simple_mol_conformer(mol: dm.Mol) -> Union[Chem.rdchem.Conformer, None]: - r""" - If the molecule has a conformer, then it will return the conformer at idx `0`. - Otherwise, it generates a simple molecule conformer using `rdkit.Chem.rdDistGeom.EmbedMolecule` - and returns it. This is meant to be used in simple functions like `GetBondLength`, - not in functions requiring complex 3D structure. - - Parameters: - - mol: Rdkit Molecule - - Returns: - conf: A conformer of the molecule, or `None` if it fails - """ - - val = 0 - if mol.GetNumConformers() == 0: - val = Chem.rdDistGeom.EmbedMolecule(mol) - if val == -1: - val = Chem.rdDistGeom.EmbedMolecule( - mol, - enforceChirality=False, - ignoreSmoothingFailures=True, - useBasicKnowledge=True, - useExpTorsionAnglePrefs=True, - forceTol=0.1, - ) - - if val == -1: - conf = None - logger.warn("Couldn't compute conformer for molecule `{}`".format(Chem.MolToSmiles(mol))) - else: - conf = mol.GetConformer(0) - - return conf - - # These are the integers that correspond with the torch data types in C++ NP_DTYPE_TO_TORCH_INT = {np.float16: 5, np.float32: 6, np.float64: 7} diff --git a/graphium/graphium_cpp/features.cpp b/graphium/graphium_cpp/features.cpp index 98f9aff85..ded0409da 100644 --- a/graphium/graphium_cpp/features.cpp +++ b/graphium/graphium_cpp/features.cpp @@ -13,6 +13,9 @@ #include "random_walk.h" #include "spectral.h" +#include // For RDKit's addHs +#include // For RDKit's EmbedMolecule + #include static GraphData read_graph(const std::string& smiles_string, bool explicit_H) { @@ -234,31 +237,97 @@ static NeighbourData construct_neighbours(const GraphData& graph) { // This fills in 3 values for each atom template at::Tensor get_conformer_features( - const RDKit::ROMol &mol, + RDKit::ROMol &mol, + bool already_has_Hs, c10::ScalarType dtype, MaskNaNStyle mask_nan_style, T mask_nan_value, - int64_t &num_nans) { + int64_t &num_nans, + const std::string& smiles_string) { const size_t n = mol.getNumAtoms(); std::unique_ptr conformer_data(new T[3 * n]); T* data = conformer_data.get(); + std::unique_ptr mol_with_Hs_added; + RDKit::ROMol* mol_with_Hs = &mol; if (mol.beginConformers() == mol.endConformers()) { - // No conformers: treat as NaN + // No conformers. + // Before generating conformers, it's recommended to add Hs explicitly. + if (!already_has_Hs) { + // Add Hs. They're added at the end, so the original atoms + // will have the same indices as before. + mol_with_Hs_added.reset(new RDKit::RWMol(mol)); + RDKit::MolOps::addHs(*mol_with_Hs_added); + mol_with_Hs = mol_with_Hs_added.get(); + } + + // Default Python arguments to EmbedMolecule + int conformer_id = RDKit::DGeomHelpers::EmbedMolecule( + *mol_with_Hs, + 0, // maxIterations + -1, // seed + true, // clearConfs + false, // useRandomCoords + 2.0, // boxSizeMult + true, // randNedEig + 1, // numZeroFail + nullptr,// coordMap + 1e-3, // optimizerForceTol + false, // ignoreSmoothingFailures + true, // enforceChirality + true, // useExpTorsionAnglePrefs (default in Python; non-default in C++) + true, // useBasicKnowledge (default in Python; non-default in C++) + false, // verbose + 5.0, // basinThresh + false, // onlyHeavyAtomsForRMS + 1, // ETversion + false, // useSmallRingTorsions + false, // useMacrocycleTorsions + false // useMacrocycle14config + ); + + if (conformer_id == -1) { + // Custom arguments as fallback + RDKit::DGeomHelpers::EmbedMolecule( + *mol_with_Hs, + 0, // maxIterations + -1, // seed + true, // clearConfs + false, // useRandomCoords (TODO: consider using true) + 2.0, // boxSizeMult + true, // randNedEig + 1, // numZeroFail + nullptr,// coordMap + 0.1, // optimizerForceTol (changed) + true, // ignoreSmoothingFailures (changed) + false, // enforceChirality (changed) + true, // useExpTorsionAnglePrefs (default in Python; non-default in C++) + true, // useBasicKnowledge (default in Python; non-default in C++) + false, // verbose + 5.0, // basinThresh + false, // onlyHeavyAtomsForRMS + 1, // ETversion + false, // useSmallRingTorsions + false, // useMacrocycleTorsions + false // useMacrocycle14config + ); + } + } + if (mol_with_Hs->beginConformers() == mol_with_Hs->endConformers()) { + // Still no conformers: treat as NaN + for (size_t i = 0; i < 3 * n; ++i) { + data[i] = mask_nan_value; + } if (mask_nan_style == MaskNaNStyle::REPORT) { num_nans += 3*n; } - else { - for (size_t i = 0; i < 3 * n; ++i) { - data[i] = mask_nan_value; - } - } + printf("Warning: Couldn't compute conformer for molecule \"%s\"\n", smiles_string.c_str()); } else { - const RDKit::Conformer& conformer = mol.getConformer(); + const RDKit::Conformer& conformer = mol_with_Hs->getConformer(); const auto& positions = conformer.getPositions(); - assert(positions.size() == n); + assert(positions.size() >= n); for (size_t i = 0; i < n; ++i, data += 3) { const auto& position = positions[i]; data[0] = FeatureValues::convertToFeatureType(position.x); @@ -1219,6 +1288,7 @@ void create_all_features( const at::Tensor& positional_property_list, bool duplicate_edges, bool add_self_loop, + bool already_has_Hs, bool use_bonds_weights, bool offset_carbon, c10::ScalarType dtype, @@ -1226,6 +1296,7 @@ void create_all_features( T mask_nan_value, int64_t& num_nans, int64_t& nan_tensor_index, + const std::string& smiles_string, std::vector& tensors) { if (mask_nan_style == MaskNaNStyle::NONE) { @@ -1270,10 +1341,12 @@ void create_all_features( if (create_conformer_feature) { at::Tensor conformer_features_tensor = get_conformer_features( *graph.mol, + already_has_Hs, dtype, mask_nan_style, mask_nan_value, - num_nans); + num_nans, + smiles_string); if (num_nans != 0) { nan_tensor_index = tensors.size(); return; @@ -1343,6 +1416,7 @@ std::tuple, int64_t, int64_t> featurize_smiles( positional_property_list, duplicate_edges, add_self_loop, + explicit_H, use_bonds_weights, offset_carbon, dtype, @@ -1350,6 +1424,7 @@ std::tuple, int64_t, int64_t> featurize_smiles( FeatureValues::convertToFeatureType(mask_nan_value), num_nans, nan_tensor_index, + smiles_string, tensors); } else if (dtype == c10::ScalarType::Float) { @@ -1362,6 +1437,7 @@ std::tuple, int64_t, int64_t> featurize_smiles( positional_property_list, duplicate_edges, add_self_loop, + explicit_H, use_bonds_weights, offset_carbon, dtype, @@ -1369,6 +1445,7 @@ std::tuple, int64_t, int64_t> featurize_smiles( FeatureValues::convertToFeatureType(mask_nan_value), num_nans, nan_tensor_index, + smiles_string, tensors); } else if (dtype == c10::ScalarType::Double) { @@ -1381,6 +1458,7 @@ std::tuple, int64_t, int64_t> featurize_smiles( positional_property_list, duplicate_edges, add_self_loop, + explicit_H, use_bonds_weights, offset_carbon, dtype, @@ -1388,6 +1466,7 @@ std::tuple, int64_t, int64_t> featurize_smiles( FeatureValues::convertToFeatureType(mask_nan_value), num_nans, nan_tensor_index, + smiles_string, tensors); } From 5298444952e00df5566467069aa349f062c24feb Mon Sep 17 00:00:00 2001 From: ndickson Date: Tue, 23 Apr 2024 16:27:58 -0400 Subject: [PATCH 014/175] Deleted deprecated properties.py --- graphium/features/properties.py | 127 -------------------------------- 1 file changed, 127 deletions(-) delete mode 100644 graphium/features/properties.py diff --git a/graphium/features/properties.py b/graphium/features/properties.py deleted file mode 100644 index 89a90ffee..000000000 --- a/graphium/features/properties.py +++ /dev/null @@ -1,127 +0,0 @@ -""" --------------------------------------------------------------------------------- -Copyright (c) 2023 Valence Labs, Recursion Pharmaceuticals and Graphcore Limited. - -Use of this software is subject to the terms and conditions outlined in the LICENSE file. -Unauthorized modification, distribution, or use is prohibited. Provided 'as is' without -warranties of any kind. - -Valence Labs, Recursion Pharmaceuticals and Graphcore Limited are not liable for any damages arising from its use. -Refer to the LICENSE file for the full terms and conditions. --------------------------------------------------------------------------------- -""" - - -from typing import Union, List, Callable - -import numpy as np -import datamol as dm - -from rdkit.Chem import rdMolDescriptors as rdMD -from loguru import logger - - -def get_prop_or_none( - prop: Callable, n: int, *args: Union[dm.Mol, str], **kwargs: Union[dm.Mol, str] -) -> Union[List[float], List[None]]: - r""" - return properties. If error, return list of `None` with lenght `n`. - Parameters: - prop: The property to compute. - n: The number of elements in the property. - *args: The arguments to pass to the property. - **kwargs: The keyword arguments to pass to the property. - Returns: - The property or a list of `None` with lenght `n`. - """ - logger.warning("get_prop_or_none is deprecated. Use `datamol.to_fp` instead.") - try: - return prop(*args, **kwargs) - except RuntimeError: - return [None] * n - - -def get_props_from_mol( - mol: Union[dm.Mol, str], - properties: Union[List[str], str] = "autocorr3d", -) -> np.ndarray: - r""" - Function to get a given set of desired properties from a molecule, - and output a property list. - - Parameters: - mol: The molecule from which to compute the properties. - properties: - The list of properties to compute for each molecule. It can be the following: - - - 'descriptors' - - 'autocorr3d' - - 'rdf' - - 'morse' - - 'whim' - - 'all' - - Returns: - props: np.array(float) - The array of properties for the desired molecule - classes_start_idx: list(int) - The list of index specifying the start of each new class of - descriptor or property. For example, if props has 20 elements, - the first 5 are rotatable bonds, the next 8 are morse, and - the rest are whim, then ``classes_start_idx = [0, 5, 13]``. - This will mainly be useful to normalize the features of - each class. - classes_names: list(str) - The name of the classes associated to each starting index. - Will be usefull to understand what property is the network learning. - - """ - - logger.warning("get_props_from_mol is deprecated. Use `datamol.to_fp` instead.") - - if isinstance(mol, str): - mol = dm.to_mol( - mol - ) # Doesn't need `ordered=True` because the fingerprints don't depend on the atom order - - if isinstance(properties, str): - properties = [properties] - - properties = [p.lower() for p in properties] - - # Initialize arrays - props = [] # Property vector for the features - classes_start_idx = [] # The starting index for each property class - classes_names = [] - - # Generate a 3D structure for the molecule - mol = dm.add_hs(mol) - - if ("autocorr3d" in properties) or ("all" in properties): - # Some kind of 3D description of the molecule - classes_names.append("autocorr3d") - classes_start_idx.append(len(props)) - props.extend(get_prop_or_none(rdMD.CalcAUTOCORR3D, 80, mol)) - - if ("rdf" in properties) or ("all" in properties): - # The radial distribution function (better than the inertia) - # https://en.wikipedia.org/wiki/Radial_distribution_function - classes_names.append("rdf") - classes_start_idx.append(len(props)) - props.extend(get_prop_or_none(rdMD.CalcRDF, 210, mol)) - - if ("morse" in properties) or ("all" in properties): - # Molecule Representation of Structures based on Electron diffraction descriptors - classes_names.append("morse") - classes_start_idx.append(len(props)) - props.extend(get_prop_or_none(rdMD.CalcMORSE, 224, mol)) - - if ("whim" in properties) or ("all" in properties): - # WHIM descriptors are 3D structural descriptors obtained from the - # (x,y,z)‐atomic coordinates of a molecular conformation of a chemical, - # and are used successfully in QSAR modelling. - classes_names.append("whim") - classes_start_idx.append(len(props)) - props.extend(get_prop_or_none(rdMD.CalcWHIM, 114, mol)) - - return np.array(props), classes_start_idx, classes_names From c38aa065f843df0e48869cbd3c1178637231d64c Mon Sep 17 00:00:00 2001 From: ndickson Date: Thu, 25 Apr 2024 18:12:38 -0400 Subject: [PATCH 015/175] Handle case of no label data in prepare_and_save_data. Also added concatenate_strings function, though it's not used yet. --- graphium/data/datamodule.py | 25 +- graphium/graphium_cpp/graphium_cpp.cpp | 3 +- graphium/graphium_cpp/labels.cpp | 649 +++++++++++++++---------- graphium/graphium_cpp/labels.h | 2 + 4 files changed, 420 insertions(+), 259 deletions(-) diff --git a/graphium/data/datamodule.py b/graphium/data/datamodule.py index fbb8819df..2dbb550d8 100644 --- a/graphium/data/datamodule.py +++ b/graphium/data/datamodule.py @@ -963,19 +963,19 @@ def get_task_levels(self): return task_level_map @property - def data_offsets_tensor_index(self): + def concat_smiles_tensor_index(self): return 0 @property - def concat_smiles_tensor_index(self): + def smiles_offsets_tensor_index(self): return 1 @property - def smiles_offsets_tensor_index(self): + def num_nodes_tensor_index(self): return 2 @property - def num_nodes_tensor_index(self): + def num_edges_tensor_index(self): return 3 @property - def num_edges_tensor_index(self): + def data_offsets_tensor_index(self): return 4 def prepare_data(self): @@ -1207,6 +1207,11 @@ def _make_multitask_dataset( processed_graph_data_path = self.processed_graph_data_path + stage_data = self.stage_data[stage] + data_offsets = None + if self.data_offsets_tensor_index < len(stage_data): + data_offsets = stage_data[self.data_offsets_tensor_index] + multitask_dataset = Datasets.MultitaskDataset( about=about, data_path=self._path_to_load_from_file(stage) if processed_graph_data_path else None, @@ -1214,11 +1219,11 @@ def _make_multitask_dataset( task_names=self.task_names, label_num_cols=self.label_num_cols, label_dtypes=self.label_dtypes, - mol_file_data_offsets=self.stage_data[stage][self.data_offsets_tensor_index], - concat_smiles_tensor=self.stage_data[stage][self.concat_smiles_tensor_index], - smiles_offsets_tensor=self.stage_data[stage][self.smiles_offsets_tensor_index], - num_nodes_tensor=self.stage_data[stage][self.num_nodes_tensor_index], - num_edges_tensor=self.stage_data[stage][self.num_edges_tensor_index], + mol_file_data_offsets=data_offsets, + concat_smiles_tensor=stage_data[self.concat_smiles_tensor_index], + smiles_offsets_tensor=stage_data[self.smiles_offsets_tensor_index], + num_nodes_tensor=stage_data[self.num_nodes_tensor_index], + num_edges_tensor=stage_data[self.num_edges_tensor_index], ) # type: ignore return multitask_dataset diff --git a/graphium/graphium_cpp/graphium_cpp.cpp b/graphium/graphium_cpp/graphium_cpp.cpp index 373c18f14..970c72d0a 100644 --- a/graphium/graphium_cpp/graphium_cpp.cpp +++ b/graphium/graphium_cpp/graphium_cpp.cpp @@ -77,8 +77,9 @@ PYBIND11_MODULE(graphium_cpp, m) { // Functions in labels.cpp m.def("load_num_cols_and_dtypes", &load_num_cols_and_dtypes, "Loads from a cache file, a list of integers representing the number of columns in each task, and a list of integers representing the torch ScalarType of the task's data."); - m.def("load_metadata_tensors", &load_metadata_tensors, "Loads from cache files for a specific stage, a torch tensor representing the offsets of molecules in files, another containing all SMILES strings contatenated, another with the offsets of all SMILES strings, and two for the nubmer of nodes and edges in each molecule."); + m.def("load_metadata_tensors", &load_metadata_tensors, "Loads from cache files for a specific stage, a torch tensor containing all SMILES strings contatenated, another with the offsets of all SMILES strings, two for the nubmer of nodes and edges in each molecule, and optionally another representing the offsets of molecules in files."); m.def("load_stats", &load_stats, "Loads from a cache file of a specific task, the stats for each column, for use in denormalization."); + m.def("concatenate_strings", &concatenate_strings, "Accepts a Numpy array of strings or Python list of strings and returns a PyTorch tensor of all of the characters and another tensor containing indices into the other tensor indicating where each string begins."); m.def("prepare_and_save_data", &prepare_and_save_data, "Accepts a dict mapping dataset (task) names to dicts with \"smiles\", \"labels\", and \"label_offsets\" data, and returns the data that would be returned by load_metadata_tensors, load_stats, and load_num_cols_and_dtypes."); m.def("load_labels_from_index", &load_labels_from_index, "Loads label data from disk, for a specific stage and molecule."); m.def("extract_string", &extract_string, "Extracts a single string from a Tensor of contatenated strings."); diff --git a/graphium/graphium_cpp/labels.cpp b/graphium/graphium_cpp/labels.cpp index 1a3318bd7..731f8e25b 100644 --- a/graphium/graphium_cpp/labels.cpp +++ b/graphium/graphium_cpp/labels.cpp @@ -412,10 +412,6 @@ std::vector load_metadata_tensors( std::filesystem::path base_path{processed_graph_data_path}; std::filesystem::path directory = base_path / (stage + "_" + data_hash); - std::unique_ptr mol_data_offsets; - uint64_t num_mol_data_offsets = - load_array_from_file(directory, file_data_offsets_filename, mol_data_offsets); - std::unique_ptr concatenated_smiles; uint64_t concatenated_smiles_size = load_array_from_file(directory, concat_smiles_filename, concatenated_smiles); @@ -432,24 +428,26 @@ std::vector load_metadata_tensors( uint64_t num_num_edges = load_array_from_file(directory, num_edges_filename, num_edges); + std::unique_ptr mol_data_offsets; + uint64_t num_mol_data_offsets = + load_array_from_file(directory, file_data_offsets_filename, mol_data_offsets); + if (num_num_nodes == 0 || num_num_edges != num_num_nodes || num_smiles_offsets != (num_num_nodes+1) || concatenated_smiles_size == 0 || concatenated_smiles_size != uint64_t(smiles_offsets[num_num_edges]) || - num_mol_data_offsets != num_num_nodes + (num_num_nodes + num_mols_per_file-1)/num_mols_per_file) { + (num_mol_data_offsets != num_num_nodes + (num_num_nodes + num_mols_per_file-1)/num_mols_per_file && num_mol_data_offsets != 0)) { printf("ERROR: graphium_cpp.load_metadata_tensors failed to load valid metadata files\n"); - printf(" len(file_data_offsets) is %zu\n", size_t(num_mol_data_offsets)); printf(" len(concat_smiles) is %zu\n", size_t(concatenated_smiles_size)); printf(" len(smiles_offsets) is %zu\n", size_t(num_smiles_offsets)); printf(" len(num_nodes) is %zu\n", size_t(num_num_nodes)); printf(" len(num_edges) is %zu\n", size_t(num_num_edges)); + printf(" len(file_data_offsets) is %zu\n", size_t(num_mol_data_offsets)); return std::vector(); } - // The above conditions should ensure that none of the arrays are empty, + // The above conditions should ensure that none of these arrays are empty, // but assert in debug builds just in case. - assert(mol_data_offsets && concatenated_smiles && smiles_offsets && num_nodes && num_edges); + assert(concatenated_smiles && smiles_offsets && num_nodes && num_edges); - const int64_t data_offsets_dims[1] = { int64_t(num_mol_data_offsets) }; - at::Tensor data_offsets_tensor = torch_tensor_from_array(std::move(mol_data_offsets), data_offsets_dims, 1, c10::ScalarType::Long); const int64_t concatenated_smiles_dims[1] = { int64_t(concatenated_smiles_size) }; at::Tensor smiles_tensor = torch_tensor_from_array(std::move(concatenated_smiles), concatenated_smiles_dims, 1, c10::ScalarType::Char); const int64_t smiles_offsets_dims[1] = { int64_t(num_num_nodes+1) }; @@ -459,13 +457,21 @@ std::vector load_metadata_tensors( const int64_t num_edges_dims[1] = { int64_t(num_num_nodes) }; at::Tensor num_edges_tensor = torch_tensor_from_array(std::move(num_edges), num_edges_dims, 1, c10::ScalarType::Int); - std::vector stage_return_data({ - std::move(data_offsets_tensor), - std::move(smiles_tensor), - std::move(smiles_offsets_tensor), - std::move(num_nodes_tensor), - std::move(num_edges_tensor) - }); + std::vector stage_return_data; + stage_return_data.reserve((num_mol_data_offsets > 0) ? 5 : 4); + + stage_return_data.push_back(std::move(smiles_tensor)); + stage_return_data.push_back(std::move(smiles_offsets_tensor)); + stage_return_data.push_back(std::move(num_nodes_tensor)); + stage_return_data.push_back(std::move(num_edges_tensor)); + + if (num_mol_data_offsets > 0) { + const int64_t data_offsets_dims[1] = { int64_t(num_mol_data_offsets) }; + at::Tensor data_offsets_tensor = torch_tensor_from_array(std::move(mol_data_offsets), data_offsets_dims, 1, c10::ScalarType::Long); + + stage_return_data.push_back(std::move(data_offsets_tensor)); + } + return stage_return_data; } @@ -501,13 +507,118 @@ std::vector load_stats( return return_stats; } +std::pair concatenate_strings(pybind11::handle handle) { + using return_type = std::pair; + + ensure_numpy_array_module_initialized(); + + at::Tensor concatenated_strings; + at::Tensor offsets; + + PyObject* obj_ptr = handle.ptr(); + if (PyArray_Check(obj_ptr)) { + PyArrayObject* numpy_array = reinterpret_cast(obj_ptr); + int type_num = PyArray_TYPE(numpy_array); + int ndims = PyArray_NDIM(numpy_array); + if (type_num != NPY_OBJECT || ndims != 1) { + return return_type(std::move(concatenated_strings), std::move(offsets)); + } + intptr_t n = PyArray_DIM(numpy_array, 0); + if (n <= 0) { + return return_type(std::move(concatenated_strings), std::move(offsets)); + } + + size_t total_characters = 0; + for (intptr_t i = 0; i < n; ++i) { + pybind11::handle string_handle(*(PyObject**)PyArray_GETPTR1(numpy_array, i)); + if (!pybind11::isinstance(string_handle)) { + continue; + } + // TODO: Consider trying to avoid constructing std::string here + std::string string{pybind11::str{string_handle}}; + // +1 is for null terminator + total_characters += string.size() + 1; + } + std::unique_ptr concatenated_chars(new char[total_characters]); + std::unique_ptr offsets_buffer(new int64_t[n+1]); + int64_t offset = 0; + for (intptr_t i = 0; i < n; ++i) { + offsets_buffer[i] = offset; + pybind11::handle string_handle(*(PyObject**)PyArray_GETPTR1(numpy_array, i)); + if (!pybind11::isinstance(string_handle)) { + continue; + } + // TODO: Consider trying to avoid constructing std::string here + std::string string{pybind11::str{string_handle}}; + memcpy(concatenated_chars.get(), string.c_str(), string.size()); + offset += string.size(); + concatenated_chars[offset] = 0; + ++offset; + } + offsets_buffer[n] = offset; + + const int64_t concatenated_strings_dims[1] = { int64_t(total_characters) }; + concatenated_strings = torch_tensor_from_array(std::move(concatenated_chars), concatenated_strings_dims, 1, c10::ScalarType::Char); + const int64_t offsets_dims[1] = { int64_t(n+1) }; + offsets = torch_tensor_from_array(std::move(offsets_buffer), offsets_dims, 1, c10::ScalarType::Long); + } + if (pybind11::isinstance(handle)) { + pybind11::list list = handle.cast(); + size_t n = list.size(); + + size_t total_characters = 0; + for (size_t i = 0; i < n; ++i) { + pybind11::handle string_handle(list[i]); + if (!pybind11::isinstance(string_handle)) { + continue; + } + // TODO: Consider trying to avoid constructing std::string here + std::string string{pybind11::str{string_handle}}; + // +1 is for null terminator + total_characters += string.size() + 1; + } + std::unique_ptr concatenated_chars(new char[total_characters]); + std::unique_ptr offsets_buffer(new int64_t[n+1]); + int64_t offset = 0; + for (size_t i = 0; i < n; ++i) { + offsets_buffer[i] = offset; + pybind11::handle string_handle(list[i]); + if (!pybind11::isinstance(string_handle)) { + continue; + } + // TODO: Consider trying to avoid constructing std::string here + std::string string{pybind11::str{string_handle}}; + memcpy(concatenated_chars.get(), string.c_str(), string.size()); + offset += string.size(); + concatenated_chars[offset] = 0; + ++offset; + } + offsets_buffer[n] = offset; + + const int64_t concatenated_strings_dims[1] = { int64_t(total_characters) }; + concatenated_strings = torch_tensor_from_array(std::move(concatenated_chars), concatenated_strings_dims, 1, c10::ScalarType::Char); + const int64_t offsets_dims[1] = { int64_t(n+1) }; + offsets = torch_tensor_from_array(std::move(offsets_buffer), offsets_dims, 1, c10::ScalarType::Long); + } + return return_type(std::move(concatenated_strings), std::move(offsets)); +} + +constexpr size_t num_stages = 3; +// NOTE: Computing stats below depends on that "train" is stage 0. +const std::string stages[num_stages] = { + std::string("train"), + std::string("val"), + std::string("test") +}; + + // Returns: // stage -> [ -// mol_file_data_offsets, // unique mol smiles strings all concatenated, // unique mol smiles string offsets (including one extra for the end), // unique mol num_nodes, -// unique mol num_edges +// unique mol num_edges, +// mol_file_data_offsets // ] // task -> 4 stats tensors each // task index -> label num columns @@ -536,13 +647,6 @@ std::tuple< std::filesystem::create_directories(base_path); std::filesystem::path common_path(base_path / data_hash); std::filesystem::create_directories(common_path); - constexpr size_t num_stages = 3; - // NOTE: Computing stats below depends on that "train" is stage 0. - std::string stages[num_stages] = { - std::string("train"), - std::string("val"), - std::string("test") - }; std::filesystem::path stage_paths[num_stages] = { base_path / (stages[0] + "_" + data_hash), base_path / (stages[1] + "_" + data_hash), @@ -587,17 +691,11 @@ std::tuple< } pybind11::dict dataset_dict = task_dataset_handle.cast(); pybind11::handle smiles_handle = pybind11::handle(PyDict_GetItemString(dataset_dict.ptr(), "smiles")); - pybind11::handle labels_handle = pybind11::handle(PyDict_GetItemString(dataset_dict.ptr(), "labels")); - pybind11::handle label_offsets_handle = pybind11::handle(PyDict_GetItemString(dataset_dict.ptr(), "label_offsets")); - if (!smiles_handle || !labels_handle) { + if (!smiles_handle) { continue; } PyObject* smiles_obj_ptr = smiles_handle.ptr(); - PyObject* labels_obj_ptr = labels_handle.ptr(); - PyObject* label_offsets_obj_ptr = label_offsets_handle.ptr(); - const bool is_labels_numpy = PyArray_Check(labels_obj_ptr); - const bool is_labels_multi_row = label_offsets_obj_ptr && PyArray_Check(label_offsets_obj_ptr); - if (!PyArray_Check(smiles_obj_ptr) || !is_labels_numpy) { + if (!PyArray_Check(smiles_obj_ptr)) { continue; } PyArrayObject* smiles_numpy_array = reinterpret_cast(smiles_obj_ptr); @@ -611,6 +709,22 @@ std::tuple< continue; } + // smiles array is okay + smiles_numpy_arrays[current_task_index] = smiles_numpy_array; + + // Check for labels. There might not be labels in inference case. + pybind11::handle labels_handle = pybind11::handle(PyDict_GetItemString(dataset_dict.ptr(), "labels")); + if (!labels_handle) { + continue; + } + pybind11::handle label_offsets_handle = pybind11::handle(PyDict_GetItemString(dataset_dict.ptr(), "label_offsets")); + PyObject* labels_obj_ptr = labels_handle.ptr(); + PyObject* label_offsets_obj_ptr = label_offsets_handle.ptr(); + const bool is_labels_numpy = PyArray_Check(labels_obj_ptr); + const bool is_labels_multi_row = label_offsets_obj_ptr && PyArray_Check(label_offsets_obj_ptr); + if (!is_labels_numpy) { + continue; + } PyArrayObject* labels_numpy_array = reinterpret_cast(labels_obj_ptr); PyArrayObject* label_offsets_numpy_array = is_labels_multi_row ? reinterpret_cast(label_offsets_obj_ptr) : nullptr; int labels_type_num = PyArray_TYPE(labels_numpy_array); @@ -652,7 +766,7 @@ std::tuple< #if GRAPHIUM_CPP_DEBUGGING printf("\"%s\" labels[%zd][%zd] (%zd molecules)\n", task_name.c_str(), num_label_rows, num_label_cols, num_molecules); #endif - if (num_smiles != num_molecules || num_label_cols < 0) { + if (num_smiles != num_molecules || num_label_cols <= 0) { continue; } @@ -665,8 +779,6 @@ std::tuple< total_num_cols += size_t(num_label_cols); task_bytes_per_float[current_task_index] = bytes_per_float; - smiles_numpy_arrays[current_task_index] = smiles_numpy_array; - pybind11::handle task_normalization_handle = pybind11::handle(PyDict_GetItemString(task_label_normalization.ptr(), task_name.c_str())); if (!task_normalization_handle || !pybind11::isinstance(task_normalization_handle)) { continue; @@ -699,7 +811,9 @@ std::tuple< } task_col_starts[num_tasks] = total_num_cols; - save_num_cols_and_dtypes(common_path, return_label_num_cols, return_label_data_types); + if (total_num_cols > 0) { + save_num_cols_and_dtypes(common_path, return_label_num_cols, return_label_data_types); + } // Get the total number of molecules, by stage and task size_t total_num_mols = 0; @@ -893,168 +1007,170 @@ std::tuple< constexpr size_t stat_std_offset = 3; constexpr size_t num_stats = 4; size_t stats_floats = num_stats*total_num_cols; - std::unique_ptr all_task_stats(new double[stats_floats]); - std::unique_ptr all_task_num_non_nan(new intptr_t[total_num_cols]); - for (size_t task_index = 0; task_index < num_tasks; ++task_index) { - const size_t task_num_mols = task_mol_start[task_index+1] - task_mol_start[task_index]; - const size_t task_first_col = task_col_starts[task_index]; - const size_t task_num_cols = task_col_starts[task_index+1] - task_first_col; - if (task_num_mols == 0 || task_num_cols == 0) { - continue; - } - // Initialize stats for accumulation - double*const task_stats = all_task_stats.get() + num_stats*task_first_col; - intptr_t*const task_num_non_nan = all_task_num_non_nan.get() + task_first_col; - for (size_t task_col_index = 0; task_col_index < task_num_cols; ++task_col_index) { - task_stats[num_stats*task_col_index + stat_min_offset] = std::numeric_limits::infinity(); - task_stats[num_stats*task_col_index + stat_max_offset] = -std::numeric_limits::infinity(); - task_stats[num_stats*task_col_index + stat_mean_offset] = 0.0; - task_stats[num_stats*task_col_index + stat_std_offset] = 0.0; - task_num_non_nan[task_col_index] = 0; - } - - const size_t bytes_per_float = task_bytes_per_float[task_index]; - - auto&& update_stats_single_row = [task_stats, task_num_non_nan](const char* col_data, const size_t task_num_cols, const size_t bytes_per_float, const intptr_t col_stride) { - double* stats = task_stats; - intptr_t* num_non_nan = task_num_non_nan; - for (size_t task_col_index = 0; task_col_index < task_num_cols; ++task_col_index, col_data += col_stride, stats += num_stats, ++num_non_nan) { - // TODO: Move the type check outside the loop if it's a bottleneck - double value; - if (bytes_per_float == sizeof(double)) { - value = *(const double*)(col_data); - } - else if (bytes_per_float == sizeof(float)) { - value = *(const float*)(col_data); - } - else { - assert(bytes_per_float == sizeof(uint16_t)); - value = c10::detail::fp16_ieee_to_fp32_value(*(const uint16_t*)(col_data)); - } - if (value != value) { - // NaN value, so skip it - continue; - } - stats[stat_min_offset] = std::min(stats[stat_min_offset], value); - stats[stat_max_offset] = std::max(stats[stat_max_offset], value); - stats[stat_mean_offset] += value; - // TODO: If summing the squares isn't accurate enough for computing the variance, - // consider other approaches. - stats[stat_std_offset] += value*value; - ++(*num_non_nan); + std::unique_ptr all_task_stats((stats_floats > 0) ? new double[stats_floats] : nullptr); + std::unordered_map> all_stats_return_data; + + if (total_num_cols > 0) { + std::unique_ptr all_task_num_non_nan(new intptr_t[total_num_cols]); + for (size_t task_index = 0; task_index < num_tasks; ++task_index) { + const size_t task_num_mols = task_mol_start[task_index+1] - task_mol_start[task_index]; + const size_t task_first_col = task_col_starts[task_index]; + const size_t task_num_cols = task_col_starts[task_index+1] - task_first_col; + if (task_num_mols == 0 || task_num_cols == 0) { + continue; } - }; + // Initialize stats for accumulation + double*const task_stats = all_task_stats.get() + num_stats*task_first_col; + intptr_t*const task_num_non_nan = all_task_num_non_nan.get() + task_first_col; + for (size_t task_col_index = 0; task_col_index < task_num_cols; ++task_col_index) { + task_stats[num_stats*task_col_index + stat_min_offset] = std::numeric_limits::infinity(); + task_stats[num_stats*task_col_index + stat_max_offset] = -std::numeric_limits::infinity(); + task_stats[num_stats*task_col_index + stat_mean_offset] = 0.0; + task_stats[num_stats*task_col_index + stat_std_offset] = 0.0; + task_num_non_nan[task_col_index] = 0; + } + + const size_t bytes_per_float = task_bytes_per_float[task_index]; - PyArrayObject*const labels_numpy_array = labels_numpy_arrays[task_index]; - if (labels_numpy_array != nullptr) { - const char* raw_data = (const char*)PyArray_DATA(labels_numpy_array); - const intptr_t* strides = PyArray_STRIDES(labels_numpy_array); - const intptr_t num_label_rows = PyArray_DIM(labels_numpy_array, 0); - PyArrayObject*const label_offsets_numpy_array = label_offsets_numpy_arrays[task_index]; - const char* offsets_raw_data = label_offsets_numpy_array ? (const char*)PyArray_DATA(label_offsets_numpy_array) : nullptr; - const intptr_t offsets_stride = label_offsets_numpy_array ? PyArray_STRIDES(label_offsets_numpy_array)[0] : 0; - // The -1 is because there's an extra entry at the end for the end offset. - const intptr_t num_mols = label_offsets_numpy_array ? PyArray_DIM(label_offsets_numpy_array, 0) - 1 : num_label_rows; - // The normalization is computed on the subsample being kept - for (size_t task_key_index = 0; task_key_index < task_num_mols; ++task_key_index) { - const size_t task_mol_index = keys[task_mol_start[task_index] + task_key_index].task_mol_index; - if (task_mol_index >= size_t(num_mols)) { - printf("Error: In task %zu, mol index %zu is past limit of %zu\n", size_t(task_index), task_mol_index, size_t(num_mols)); - continue; - } - if (offsets_raw_data == nullptr) { - const char* row_data = raw_data + strides[0]*task_mol_index; - update_stats_single_row(row_data, task_num_cols, bytes_per_float, strides[1]); + auto&& update_stats_single_row = [task_stats, task_num_non_nan](const char* col_data, const size_t task_num_cols, const size_t bytes_per_float, const intptr_t col_stride) { + double* stats = task_stats; + intptr_t* num_non_nan = task_num_non_nan; + for (size_t task_col_index = 0; task_col_index < task_num_cols; ++task_col_index, col_data += col_stride, stats += num_stats, ++num_non_nan) { + // TODO: Move the type check outside the loop if it's a bottleneck + double value; + if (bytes_per_float == sizeof(double)) { + value = *(const double*)(col_data); + } + else if (bytes_per_float == sizeof(float)) { + value = *(const float*)(col_data); + } + else { + assert(bytes_per_float == sizeof(uint16_t)); + value = c10::detail::fp16_ieee_to_fp32_value(*(const uint16_t*)(col_data)); + } + if (value != value) { + // NaN value, so skip it + continue; + } + stats[stat_min_offset] = std::min(stats[stat_min_offset], value); + stats[stat_max_offset] = std::max(stats[stat_max_offset], value); + stats[stat_mean_offset] += value; + // TODO: If summing the squares isn't accurate enough for computing the variance, + // consider other approaches. + stats[stat_std_offset] += value*value; + ++(*num_non_nan); } - else { - size_t begin_offset = *reinterpret_cast(offsets_raw_data + offsets_stride*task_mol_index); - size_t end_offset = *reinterpret_cast(offsets_raw_data + offsets_stride*(task_mol_index+1)); - const char* row_data = raw_data + strides[0]*begin_offset; - for (size_t row = begin_offset; row < end_offset; ++row, row_data += strides[0]) { + }; + + PyArrayObject*const labels_numpy_array = labels_numpy_arrays[task_index]; + if (labels_numpy_array != nullptr) { + const char* raw_data = (const char*)PyArray_DATA(labels_numpy_array); + const intptr_t* strides = PyArray_STRIDES(labels_numpy_array); + const intptr_t num_label_rows = PyArray_DIM(labels_numpy_array, 0); + PyArrayObject*const label_offsets_numpy_array = label_offsets_numpy_arrays[task_index]; + const char* offsets_raw_data = label_offsets_numpy_array ? (const char*)PyArray_DATA(label_offsets_numpy_array) : nullptr; + const intptr_t offsets_stride = label_offsets_numpy_array ? PyArray_STRIDES(label_offsets_numpy_array)[0] : 0; + // The -1 is because there's an extra entry at the end for the end offset. + const intptr_t num_mols = label_offsets_numpy_array ? PyArray_DIM(label_offsets_numpy_array, 0) - 1 : num_label_rows; + // The normalization is computed on the subsample being kept + for (size_t task_key_index = 0; task_key_index < task_num_mols; ++task_key_index) { + const size_t task_mol_index = keys[task_mol_start[task_index] + task_key_index].task_mol_index; + if (task_mol_index >= size_t(num_mols)) { + printf("Error: In task %zu, mol index %zu is past limit of %zu\n", size_t(task_index), task_mol_index, size_t(num_mols)); + continue; + } + if (offsets_raw_data == nullptr) { + const char* row_data = raw_data + strides[0]*task_mol_index; update_stats_single_row(row_data, task_num_cols, bytes_per_float, strides[1]); } + else { + size_t begin_offset = *reinterpret_cast(offsets_raw_data + offsets_stride*task_mol_index); + size_t end_offset = *reinterpret_cast(offsets_raw_data + offsets_stride*(task_mol_index+1)); + const char* row_data = raw_data + strides[0]*begin_offset; + for (size_t row = begin_offset; row < end_offset; ++row, row_data += strides[0]) { + update_stats_single_row(row_data, task_num_cols, bytes_per_float, strides[1]); + } + } } } - } - + #if GRAPHIUM_CPP_DEBUGGING - printf("Task %zu normalization method %zu\n", size_t(task_index), size_t(task_normalization_options[task_index].method)); - for (size_t task_col_index = 0; task_col_index < task_num_cols; ++task_col_index) { - printf("Task %zu col %zu, num non-nan = %zu, min = %e, max = %e\n", - size_t(task_index), task_col_index, - size_t(task_num_non_nan[task_col_index]), - task_stats[num_stats*task_col_index + stat_min_offset], - task_stats[num_stats*task_col_index + stat_max_offset]); - } + printf("Task %zu normalization method %zu\n", size_t(task_index), size_t(task_normalization_options[task_index].method)); + for (size_t task_col_index = 0; task_col_index < task_num_cols; ++task_col_index) { + printf("Task %zu col %zu, num non-nan = %zu, min = %e, max = %e\n", + size_t(task_index), task_col_index, + size_t(task_num_non_nan[task_col_index]), + task_stats[num_stats*task_col_index + stat_min_offset], + task_stats[num_stats*task_col_index + stat_max_offset]); + } #endif - } - - std::unordered_map> all_stats_return_data; - - for (size_t task_index = 0; task_index < num_tasks; ++task_index) { - const size_t task_first_col = task_col_starts[task_index]; - const size_t task_num_cols = task_col_starts[task_index+1] - task_first_col; - if (task_num_cols == 0) { - continue; } - // Finish accumulation - double*const task_stats = all_task_stats.get() + num_stats*task_first_col; - intptr_t*const task_num_non_nan = all_task_num_non_nan.get() + task_first_col; - for (size_t task_col_index = 0; task_col_index < task_num_cols; ++task_col_index) { - if (task_num_non_nan[task_col_index] == 0) { - task_stats[num_stats*task_col_index + stat_min_offset] = std::numeric_limits::quiet_NaN(); - task_stats[num_stats*task_col_index + stat_max_offset] = std::numeric_limits::quiet_NaN(); - task_stats[num_stats*task_col_index + stat_mean_offset] = std::numeric_limits::quiet_NaN(); - task_stats[num_stats*task_col_index + stat_std_offset] = std::numeric_limits::quiet_NaN(); + for (size_t task_index = 0; task_index < num_tasks; ++task_index) { + const size_t task_first_col = task_col_starts[task_index]; + const size_t task_num_cols = task_col_starts[task_index+1] - task_first_col; + if (task_num_cols == 0) { + continue; } - else { - if (task_normalization_options[task_index].min_clipping > task_stats[num_stats*task_col_index + stat_min_offset]) { - task_stats[num_stats*task_col_index + stat_min_offset] = task_normalization_options[task_index].min_clipping; + + // Finish accumulation + double*const task_stats = all_task_stats.get() + num_stats*task_first_col; + intptr_t*const task_num_non_nan = all_task_num_non_nan.get() + task_first_col; + for (size_t task_col_index = 0; task_col_index < task_num_cols; ++task_col_index) { + if (task_num_non_nan[task_col_index] == 0) { + task_stats[num_stats*task_col_index + stat_min_offset] = std::numeric_limits::quiet_NaN(); + task_stats[num_stats*task_col_index + stat_max_offset] = std::numeric_limits::quiet_NaN(); + task_stats[num_stats*task_col_index + stat_mean_offset] = std::numeric_limits::quiet_NaN(); + task_stats[num_stats*task_col_index + stat_std_offset] = std::numeric_limits::quiet_NaN(); } - if (task_normalization_options[task_index].max_clipping < task_stats[num_stats*task_col_index + stat_max_offset]) { - task_stats[num_stats*task_col_index + stat_max_offset] = task_normalization_options[task_index].max_clipping; + else { + if (task_normalization_options[task_index].min_clipping > task_stats[num_stats*task_col_index + stat_min_offset]) { + task_stats[num_stats*task_col_index + stat_min_offset] = task_normalization_options[task_index].min_clipping; + } + if (task_normalization_options[task_index].max_clipping < task_stats[num_stats*task_col_index + stat_max_offset]) { + task_stats[num_stats*task_col_index + stat_max_offset] = task_normalization_options[task_index].max_clipping; + } + const double n = double(task_num_non_nan[task_col_index]); + const double mean = task_stats[num_stats*task_col_index + stat_mean_offset] / n; + task_stats[num_stats*task_col_index + stat_mean_offset] = mean; + // sum((x[i] - m)^2)/(n-1) + // = sum(x[i]^2 -2mx[i] + m^2)/(n-1) + // = (sum(x[i]^2) - 2nm^2 + nm^2)/(n-1) + // = (sum(x[i]^2) - nm^2)/(n-1) + // except, for compatibility with numpy.nanstd, use n instead of n-1 + const double sum_sqaures = task_stats[num_stats*task_col_index + stat_std_offset]; + const double stdev = std::sqrt((sum_sqaures - n*mean*mean)/n); + task_stats[num_stats*task_col_index + stat_std_offset] = stdev; } - const double n = double(task_num_non_nan[task_col_index]); - const double mean = task_stats[num_stats*task_col_index + stat_mean_offset] / n; - task_stats[num_stats*task_col_index + stat_mean_offset] = mean; - // sum((x[i] - m)^2)/(n-1) - // = sum(x[i]^2 -2mx[i] + m^2)/(n-1) - // = (sum(x[i]^2) - 2nm^2 + nm^2)/(n-1) - // = (sum(x[i]^2) - nm^2)/(n-1) - // except, for compatibility with numpy.nanstd, use n instead of n-1 - const double sum_sqaures = task_stats[num_stats*task_col_index + stat_std_offset]; - const double stdev = std::sqrt((sum_sqaures - n*mean*mean)/n); - task_stats[num_stats*task_col_index + stat_std_offset] = stdev; } - } - - const std::string task_name{ pybind11::str(task_names[task_index]) }; + + const std::string task_name{ pybind11::str(task_names[task_index]) }; #if GRAPHIUM_CPP_DEBUGGING - for (size_t task_col_index = 0; task_col_index < task_num_cols; ++task_col_index) { - printf("%s %zu %lld %e %e %e %e\n", - task_name.c_str(), task_col_index, (long long)task_num_non_nan[task_col_index], - task_stats[num_stats*task_col_index + stat_min_offset], - task_stats[num_stats*task_col_index + stat_max_offset], - task_stats[num_stats*task_col_index + stat_mean_offset], - task_stats[num_stats*task_col_index + stat_std_offset]); - } -#endif - const std::string stats_filename = task_name + "_stats.tmp"; - save_array_to_file(common_path, stats_filename.c_str(), task_stats, num_stats*task_num_cols); - - // Make copies for returning in a format similar to the load_stats function. - std::vector task_stats_out; - for (size_t stat_index = 0; stat_index < num_stats; ++stat_index) { - const int64_t task_stats_dims[1] = { int64_t(task_num_cols) }; - std::unique_ptr task_stats_copy(new double[task_num_cols]); for (size_t task_col_index = 0; task_col_index < task_num_cols; ++task_col_index) { - task_stats_copy[task_col_index] = task_stats[num_stats*task_col_index + stat_index]; + printf("%s %zu %lld %e %e %e %e\n", + task_name.c_str(), task_col_index, (long long)task_num_non_nan[task_col_index], + task_stats[num_stats*task_col_index + stat_min_offset], + task_stats[num_stats*task_col_index + stat_max_offset], + task_stats[num_stats*task_col_index + stat_mean_offset], + task_stats[num_stats*task_col_index + stat_std_offset]); } - at::Tensor task_stats_tensor = torch_tensor_from_array(std::move(task_stats_copy), task_stats_dims, 1, c10::ScalarType::Double); - task_stats_out.push_back(std::move(task_stats_tensor)); +#endif + const std::string stats_filename = task_name + "_stats.tmp"; + save_array_to_file(common_path, stats_filename.c_str(), task_stats, num_stats*task_num_cols); + + // Make copies for returning in a format similar to the load_stats function. + std::vector task_stats_out; + for (size_t stat_index = 0; stat_index < num_stats; ++stat_index) { + const int64_t task_stats_dims[1] = { int64_t(task_num_cols) }; + std::unique_ptr task_stats_copy(new double[task_num_cols]); + for (size_t task_col_index = 0; task_col_index < task_num_cols; ++task_col_index) { + task_stats_copy[task_col_index] = task_stats[num_stats*task_col_index + stat_index]; + } + at::Tensor task_stats_tensor = torch_tensor_from_array(std::move(task_stats_copy), task_stats_dims, 1, c10::ScalarType::Double); + task_stats_out.push_back(std::move(task_stats_tensor)); + } + all_stats_return_data.insert(std::make_pair(std::move(task_name), std::move(task_stats_out))); } - all_stats_return_data.insert(std::make_pair(std::move(task_name), std::move(task_stats_out))); } // Sort train, val, and test separately, since they need to be stored separately. @@ -1064,22 +1180,119 @@ std::tuple< std::sort(keys.get() + task_mol_start[num_tasks], keys.get() + task_mol_start[2*num_tasks]); std::sort(keys.get() + task_mol_start[2*num_tasks], keys.get() + total_num_mols); + std::unordered_map> per_stage_return_data; + + // Deal with non-label data first + for (size_t stage_index = 0; stage_index < num_stages; ++stage_index) { + size_t concatenated_smiles_size = 0; + uint64_t num_unique_mols = 0; + const size_t stage_begin_index = task_mol_start[stage_index*num_tasks]; + const size_t stage_end_index = task_mol_start[(stage_index+1)*num_tasks]; + for (size_t sorted_index = stage_begin_index; sorted_index < stage_end_index; ) { + if (keys[sorted_index].isInvalid()) { + ++sorted_index; + continue; + } + ++num_unique_mols; + + // Add the length of the smiles string to the total length, + // and include the terminating zero + const size_t smiles_length = smiles_strings[keys[sorted_index].mol_index].size(); + concatenated_smiles_size += (smiles_length+1); + + const uint64_t id0 = keys[sorted_index].id0; + const uint64_t id1 = keys[sorted_index].id1; + ++sorted_index; + while (sorted_index < stage_end_index && keys[sorted_index].id0 == id0 && keys[sorted_index].id1 == id1) { + ++sorted_index; + } + } + + std::unique_ptr concatenated_smiles(new char[concatenated_smiles_size]); + std::unique_ptr smiles_offsets(new int64_t[num_unique_mols+1]); + std::unique_ptr num_nodes(new int32_t[num_unique_mols]); + std::unique_ptr num_edges(new int32_t[num_unique_mols]); + size_t unique_index = 0; + int64_t smiles_offset = 0; + for (size_t sorted_index = stage_begin_index; sorted_index < stage_end_index; ) { + if (keys[sorted_index].isInvalid()) { + ++sorted_index; + continue; + } + smiles_offsets[unique_index] = smiles_offset; + + const uint64_t id0 = keys[sorted_index].id0; + const uint64_t id1 = keys[sorted_index].id1; + num_nodes[unique_index] = keys[sorted_index].num_nodes; + num_edges[unique_index] = keys[sorted_index].num_edges; + + // Copy the string + const std::string& smiles_string = smiles_strings[keys[sorted_index].mol_index]; + const size_t smiles_length = smiles_string.size(); + memcpy(concatenated_smiles.get() + smiles_offset, smiles_string.c_str(), smiles_length); + smiles_offset += smiles_length; + // Don't forget the terminating zero + concatenated_smiles[smiles_offset] = 0; + ++smiles_offset; + + ++unique_index; + ++sorted_index; + while (sorted_index < stage_end_index && keys[sorted_index].id0 == id0 && keys[sorted_index].id1 == id1) { + ++sorted_index; + } + } + smiles_offsets[unique_index] = smiles_offset; + + save_array_to_file(stage_paths[stage_index], concat_smiles_filename, concatenated_smiles.get(), concatenated_smiles_size); + save_array_to_file(stage_paths[stage_index], smiles_offsets_filename, smiles_offsets.get(), num_unique_mols+1); + save_array_to_file(stage_paths[stage_index], num_nodes_filename, num_nodes.get(), num_unique_mols); + save_array_to_file(stage_paths[stage_index], num_edges_filename, num_edges.get(), num_unique_mols); + + const int64_t concatenated_smiles_dims[1] = { int64_t(concatenated_smiles_size) }; + at::Tensor smiles_tensor = torch_tensor_from_array(std::move(concatenated_smiles), concatenated_smiles_dims, 1, c10::ScalarType::Char); + const int64_t smiles_offsets_dims[1] = { int64_t(num_unique_mols+1) }; + at::Tensor smiles_offsets_tensor = torch_tensor_from_array(std::move(smiles_offsets), smiles_offsets_dims, 1, c10::ScalarType::Long); + const int64_t num_nodes_dims[1] = { int64_t(num_unique_mols) }; + at::Tensor num_nodes_tensor = torch_tensor_from_array(std::move(num_nodes), num_nodes_dims, 1, c10::ScalarType::Int); + const int64_t num_edges_dims[1] = { int64_t(num_unique_mols) }; + at::Tensor num_edges_tensor = torch_tensor_from_array(std::move(num_edges), num_edges_dims, 1, c10::ScalarType::Int); + + std::vector stage_return_data; + // Reserve space for one extra, for the data offsets tensor later + stage_return_data.reserve((total_num_cols > 0) ? 5 : 4); + stage_return_data.push_back(std::move(smiles_tensor)); + stage_return_data.push_back(std::move(smiles_offsets_tensor)); + stage_return_data.push_back(std::move(num_nodes_tensor)); + stage_return_data.push_back(std::move(num_edges_tensor)); + per_stage_return_data.insert(std::make_pair(stages[stage_index], std::move(stage_return_data))); + } + + if (total_num_cols == 0) { + // No label data, so all done + return std::make_tuple( + std::move(per_stage_return_data), + std::move(all_stats_return_data), + std::move(return_label_num_cols), + std::move(return_label_data_types)); + } + // mol_data_offsets will only need one entry for each unique molecule, - // but we can preallocate an upper bound. + // plus one per file, but we can preallocate an upper bound. std::vector mol_data_offsets; - mol_data_offsets.reserve(task_mol_start[num_tasks]); + size_t upper_bound_num_files = (task_mol_start[num_tasks] + num_mols_per_file-1) / num_mols_per_file; + mol_data_offsets.reserve(task_mol_start[num_tasks] + upper_bound_num_files); + // temp_data is used for normalization std::vector temp_data; temp_data.reserve(total_num_cols*sizeof(double)); + std::vector data; data.reserve(num_mols_per_file*(total_num_cols*sizeof(double) + (1+2*num_tasks)*sizeof(uint64_t))); - std::unordered_map> per_stage_return_data; - + // Now, deal with label data for (size_t stage_index = 0; stage_index < num_stages; ++stage_index) { mol_data_offsets.resize(0); assert(data.size() == 0); - size_t concatenated_smiles_size = 0; uint64_t num_unique_mols = 0; const size_t stage_begin_index = task_mol_start[stage_index*num_tasks]; const size_t stage_end_index = task_mol_start[(stage_index+1)*num_tasks]; @@ -1090,15 +1303,11 @@ std::tuple< } size_t data_offset = data.size(); mol_data_offsets.push_back(data_offset); + const size_t first_sorted_index = sorted_index; const uint64_t id0 = keys[sorted_index].id0; const uint64_t id1 = keys[sorted_index].id1; - // Add the length of the smiles string to the total length, - // and include the terminating zero - const size_t smiles_length = smiles_strings[keys[sorted_index].mol_index].size(); - concatenated_smiles_size += (smiles_length+1); - uint64_t prev_task_index = keys[sorted_index].task_index; uint64_t mol_num_tasks = 1; ++sorted_index; @@ -1351,63 +1560,7 @@ std::tuple< const int64_t data_offsets_dims[1] = { int64_t(num_offsets) }; at::Tensor data_offsets_tensor = torch_tensor_from_array(std::move(temp_data_offsets), data_offsets_dims, 1, c10::ScalarType::Long); - std::unique_ptr concatenated_smiles(new char[concatenated_smiles_size]); - std::unique_ptr smiles_offsets(new int64_t[num_unique_mols+1]); - std::unique_ptr num_nodes(new int32_t[num_unique_mols]); - std::unique_ptr num_edges(new int32_t[num_unique_mols]); - size_t unique_index = 0; - int64_t smiles_offset = 0; - for (size_t sorted_index = stage_begin_index; sorted_index < stage_end_index; ) { - if (keys[sorted_index].isInvalid()) { - ++sorted_index; - continue; - } - smiles_offsets[unique_index] = smiles_offset; - - const uint64_t id0 = keys[sorted_index].id0; - const uint64_t id1 = keys[sorted_index].id1; - num_nodes[unique_index] = keys[sorted_index].num_nodes; - num_edges[unique_index] = keys[sorted_index].num_edges; - - // Copy the string - const std::string& smiles_string = smiles_strings[keys[sorted_index].mol_index]; - const size_t smiles_length = smiles_string.size(); - memcpy(concatenated_smiles.get() + smiles_offset, smiles_string.c_str(), smiles_length); - smiles_offset += smiles_length; - // Don't forget the terminating zero - concatenated_smiles[smiles_offset] = 0; - ++smiles_offset; - - ++unique_index; - ++sorted_index; - while (sorted_index < stage_end_index && keys[sorted_index].id0 == id0 && keys[sorted_index].id1 == id1) { - ++sorted_index; - } - } - smiles_offsets[unique_index] = smiles_offset; - - save_array_to_file(stage_paths[stage_index], concat_smiles_filename, concatenated_smiles.get(), concatenated_smiles_size); - save_array_to_file(stage_paths[stage_index], smiles_offsets_filename, smiles_offsets.get(), num_unique_mols+1); - save_array_to_file(stage_paths[stage_index], num_nodes_filename, num_nodes.get(), num_unique_mols); - save_array_to_file(stage_paths[stage_index], num_edges_filename, num_edges.get(), num_unique_mols); - - const int64_t concatenated_smiles_dims[1] = { int64_t(concatenated_smiles_size) }; - at::Tensor smiles_tensor = torch_tensor_from_array(std::move(concatenated_smiles), concatenated_smiles_dims, 1, c10::ScalarType::Char); - const int64_t smiles_offsets_dims[1] = { int64_t(num_unique_mols+1) }; - at::Tensor smiles_offsets_tensor = torch_tensor_from_array(std::move(smiles_offsets), smiles_offsets_dims, 1, c10::ScalarType::Long); - const int64_t num_nodes_dims[1] = { int64_t(num_unique_mols) }; - at::Tensor num_nodes_tensor = torch_tensor_from_array(std::move(num_nodes), num_nodes_dims, 1, c10::ScalarType::Int); - const int64_t num_edges_dims[1] = { int64_t(num_unique_mols) }; - at::Tensor num_edges_tensor = torch_tensor_from_array(std::move(num_edges), num_edges_dims, 1, c10::ScalarType::Int); - - std::vector stage_return_data({ - std::move(data_offsets_tensor), - std::move(smiles_tensor), - std::move(smiles_offsets_tensor), - std::move(num_nodes_tensor), - std::move(num_edges_tensor) - }); - per_stage_return_data.insert(std::make_pair(stages[stage_index], std::move(stage_return_data))); + per_stage_return_data[stages[stage_index]].push_back(std::move(data_offsets_tensor)); mol_data_offsets.resize(0); } diff --git a/graphium/graphium_cpp/labels.h b/graphium/graphium_cpp/labels.h index 73966450e..30498750d 100644 --- a/graphium/graphium_cpp/labels.h +++ b/graphium/graphium_cpp/labels.h @@ -37,6 +37,8 @@ std::vector load_stats( const std::string data_hash, const std::string task_name); +std::pair concatenate_strings(pybind11::handle handle); + std::tuple< std::unordered_map>, std::unordered_map>, From 86abf21df6f4c5b811c6231b2adc781cce419bc9 Mon Sep 17 00:00:00 2001 From: ndickson Date: Thu, 25 Apr 2024 18:55:28 -0400 Subject: [PATCH 016/175] Changed prepare_data to support having no label data --- graphium/data/datamodule.py | 18 +++++++++--------- 1 file changed, 9 insertions(+), 9 deletions(-) diff --git a/graphium/data/datamodule.py b/graphium/data/datamodule.py index 2dbb550d8..a44f0f33d 100644 --- a/graphium/data/datamodule.py +++ b/graphium/data/datamodule.py @@ -1013,12 +1013,13 @@ def prepare_data(self): "val": graphium_cpp.load_metadata_tensors(self.processed_graph_data_path, "val", self.data_hash), "test": graphium_cpp.load_metadata_tensors(self.processed_graph_data_path, "test", self.data_hash), } - for task in self.task_dataset_processing_params.keys(): - stats = graphium_cpp.load_stats(self.processed_graph_data_path, self.data_hash, task) - if len(stats) < 4: - raise RuntimeError(f"Error loading cached stats for task \"{task}\"") + if len(self.label_num_cols) > 0: + for task in self.task_dataset_processing_params.keys(): + stats = graphium_cpp.load_stats(self.processed_graph_data_path, self.data_hash, task) + if len(stats) < 4: + raise RuntimeError(f"Error loading cached stats for task \"{task}\"") - self.task_norms[task].set_statistics(stats[0], stats[1], stats[2], stats[3]) + self.task_norms[task].set_statistics(stats[0], stats[1], stats[2], stats[3]) return task_dataset_args = {} @@ -1082,12 +1083,11 @@ def prepare_data(self): # Store the relevant information for each task's dataset task_dataset_args[task] = { "smiles": smiles, - "labels": labels, - "label_offsets": label_offsets, - # sample_idx is not needed here anymore - #"sample_idx": sample_idx, "extras": extras, } + if args.label_cols != 0: + task_dataset_args[task]["labels"] = labels + task_dataset_args[task]["label_offsets"] = label_offsets """We split the data up to create train, val and test datasets""" From bd590981f547affd224502aabb3825cd3629dcce Mon Sep 17 00:00:00 2001 From: DomInvivo Date: Thu, 25 Apr 2024 21:37:45 -0400 Subject: [PATCH 017/175] Removed ipu metrics, since not compatible with latest torchmetrics --- graphium/ipu/ipu_metrics.py | 907 ------------------------------------ graphium/utils/spaces.py | 50 +- tests/test_ipu_metrics.py | 774 ------------------------------ 3 files changed, 19 insertions(+), 1712 deletions(-) delete mode 100644 graphium/ipu/ipu_metrics.py delete mode 100644 tests/test_ipu_metrics.py diff --git a/graphium/ipu/ipu_metrics.py b/graphium/ipu/ipu_metrics.py deleted file mode 100644 index 9029d3e00..000000000 --- a/graphium/ipu/ipu_metrics.py +++ /dev/null @@ -1,907 +0,0 @@ -""" --------------------------------------------------------------------------------- -Copyright (c) 2023 Valence Labs, Recursion Pharmaceuticals and Graphcore Limited. - -Use of this software is subject to the terms and conditions outlined in the LICENSE file. -Unauthorized modification, distribution, or use is prohibited. Provided 'as is' without -warranties of any kind. - -Valence Labs, Recursion Pharmaceuticals and Graphcore Limited are not liable for any damages arising from its use. -Refer to the LICENSE file for the full terms and conditions. --------------------------------------------------------------------------------- -""" - - -from typing import Optional, Tuple, Sequence, Literal - -import torch -from torch import BoolTensor, IntTensor, Tensor -from torchmetrics.functional import auroc, average_precision, pearson_corrcoef, r2_score -from torchmetrics.utilities.checks import _input_squeeze -from torchmetrics.functional.classification.accuracy import ( - _mode, - _check_subset_validity, - _accuracy_compute, - _accuracy_update, -) -from torchmetrics.functional.classification.precision_recall import _precision_compute, _recall_compute -from torchmetrics.functional.classification.f_beta import _fbeta_compute -from torchmetrics.functional import mean_squared_error, mean_absolute_error -from torchmetrics.utilities.checks import _input_squeeze -from torchmetrics.utilities.enums import AverageMethod - -from graphium.utils.tensor import nan_mean -from graphium.ipu.ipu_utils import import_poptorch - - -def auroc_ipu( - preds: Tensor, - target: Tensor, - num_classes: Optional[int] = None, - task: Optional[Literal["binary", "multiclass", "multilabel"]] = None, - pos_label: Optional[int] = None, - average: Optional[str] = "macro", - max_fpr: Optional[float] = None, - sample_weights: Optional[Sequence] = None, -): - """ - A modified version of the `torchmetrics.functional.auroc` that can ignore NaNs - by giving them the same value for both `preds` and `target`. - This allows it to work with compilation - and IPUs since it doesn't modify the tensor's shape. - """ - - target = target.clone() - preds = preds.clone() - - # Replace the nan-targets in the preds/target tensors by 0 - nan_targets = target.isnan() - preds[nan_targets] = 0.0 - target[nan_targets] = 0.0 - - # Get the original weight matrix. If None, set all weights = 1 - if sample_weights is None: - sample_weights = torch.ones(target.shape[0], dtype=preds.dtype, device=preds.device) - sample_weights[nan_targets] = 0.0 - - # Compute the loss, and rescale by the number of nan elements - score = auroc( - preds=preds, - target=target.to(int), - num_classes=num_classes, - task=task, - pos_label=pos_label, - average=average, - max_fpr=max_fpr, - sample_weights=sample_weights, - ) - - return score - - -def average_precision_ipu( - preds: Tensor, - target: Tensor, - num_classes: Optional[int] = None, - task: Optional[Literal["binary", "multiclass", "multilabel"]] = None, - ignore_index: Optional[int] = None, - pos_label: Optional[int] = None, - average: Optional[str] = "macro", - sample_weights: Optional[Sequence] = None, -): - """ - A modified version of the `torchmetrics.functional.average_precision` that can ignore NaNs - by giving them the same value for both `preds` and `target`. - This allows it to work with compilation - and IPUs since it doesn't modify the tensor's shape. - """ - - target = target.clone() - preds = preds.clone() - - # Replace the nan-targets in the preds/target tensors by 0 - # Average precision is not sensitive to true negatives - nan_targets = target.isnan() - preds[nan_targets] = 0.0 - target[nan_targets] = 0.0 - - # No need to use sample weights (which is no longer supported in torchmetrics >=0.10) - # # Get the original weight matrix. If None, set all weights = 1 - # if sample_weights is None: - # sample_weights = torch.ones(target.shape[0], dtype=preds.dtype, device=preds.device) - # sample_weights[nan_targets] = 0.0 - - # Compute the loss, and rescale by the number of nan elements - score = average_precision( - preds=preds, - target=target, - num_classes=num_classes, - task=task, - ignore_index=ignore_index, - pos_label=pos_label, - average=average, - # sample_weights=sample_weights, - ) - - return score - - -def precision_ipu( - preds: Tensor, - target: Tensor, - average: Optional[str] = "micro", - mdmc_average: Optional[str] = None, - ignore_index: Optional[int] = None, - num_classes: Optional[int] = None, - threshold: float = 0.5, - top_k: Optional[int] = None, - multiclass: Optional[bool] = None, -): - """ - A modified version of the `torchmetrics.functional.precision` that can ignore NaNs - by giving them the same value for both `preds` and `target`. - This allows it to work with compilation - and IPUs since it doesn't modify the tensor's shape. - """ - - (tp, fp, tn, fn), mode = get_confusion_matrix( - preds=preds, - target=target, - average=average, - mdmc_average=mdmc_average, - threshold=threshold, - top_k=top_k, - subset_accuracy=False, - num_classes=num_classes, - multiclass=multiclass, - ignore_index=ignore_index, - ) - - return _precision_compute(tp, fp, fn, average, mdmc_average) - - -def recall_ipu( - preds: Tensor, - target: Tensor, - average: Optional[str] = "micro", - mdmc_average: Optional[str] = None, - ignore_index: Optional[int] = None, - num_classes: Optional[int] = None, - threshold: float = 0.5, - top_k: Optional[int] = None, - multiclass: Optional[bool] = None, -): - """ - A modified version of the `torchmetrics.functional.recall` that can ignore NaNs - by giving them the same value for both `preds` and `target`. - This allows it to work with compilation - and IPUs since it doesn't modify the tensor's shape. - """ - - (tp, fp, tn, fn), mode = get_confusion_matrix( - preds=preds, - target=target, - average=average, - mdmc_average=mdmc_average, - threshold=threshold, - top_k=top_k, - num_classes=num_classes, - multiclass=multiclass, - ignore_index=ignore_index, - ) - - return _recall_compute(tp, fp, fn, average, mdmc_average) - - -def accuracy_ipu( - preds: Tensor, - target: Tensor, - average: Optional[str] = "micro", - mdmc_average: Optional[str] = "global", - threshold: float = 0.5, - top_k: Optional[int] = None, - subset_accuracy: bool = False, - num_classes: Optional[int] = None, - multiclass: Optional[bool] = None, - ignore_index: Optional[int] = None, -) -> Tensor: - """ - A modified version of the `torchmetrics.functional.accuracy` that can ignore NaNs - by giving them the same value for both `preds` and `target`. - This allows it to work with compilation - and IPUs since it doesn't modify the tensor's shape. - - Args: - preds: Predictions from model (probabilities, logits or labels) - target: Ground truth labels - average: - Defines the reduction that is applied. Should be one of the following: - - - ``'micro'`` [default]: Calculate the metric globally, across all samples and classes. - - ``'macro'``: Calculate the metric for each class separately, and average the - metrics across classes (with equal weights for each class). - - ``'weighted'``: Calculate the metric for each class separately, and average the - metrics across classes, weighting each class by its support (``tp + fn``). - - ``'none'`` or ``None``: Calculate the metric for each class separately, and return - the metric for every class. - - ``'samples'``: Calculate the metric for each sample, and average the metrics - across samples (with equal weights for each sample). - - .. note:: What is considered a sample in the multi-dimensional multi-class case - depends on the value of ``mdmc_average``. - - .. note:: If ``'none'`` and a given class doesn't occur in the ``preds`` or ``target``, - the value for the class will be ``nan``. - - mdmc_average: - Defines how averaging is done for multi-dimensional multi-class inputs (on top of the - ``average`` parameter). Should be one of the following: - - - ``None`` [default]: Should be left unchanged if your data is not multi-dimensional multi-class. - - - ``'samplewise'``: In this case, the statistics are computed separately for each - sample on the ``N`` axis, and then averaged over samples. - The computation for each sample is done by treating the flattened extra axes ``...`` - (see :ref:`pages/classification:input types`) as the ``N`` dimension within the sample, - and computing the metric for the sample based on that. - - - ``'global'``: In this case the ``N`` and ``...`` dimensions of the inputs - (see :ref:`pages/classification:input types`) - are flattened into a new ``N_X`` sample axis, i.e. the inputs are treated as if they - were ``(N_X, C)``. From here on the ``average`` parameter applies as usual. - - num_classes: - Number of classes. Necessary for ``'macro'``, ``'weighted'`` and ``None`` average methods. - - threshold: - Threshold for transforming probability or logit predictions to binary (0,1) predictions, in the case - of binary or multi-label inputs. Default value of 0.5 corresponds to input being probabilities. - top_k: - Number of the highest probability or logit score predictions considered finding the correct label, - relevant only for (multi-dimensional) multi-class inputs. The - default value (``None``) will be interpreted as 1 for these inputs. - - Should be left at default (``None``) for all other types of inputs. - multiclass: - Used only in certain special cases, where you want to treat inputs as a different type - than what they appear to be. See the parameter's - :ref:`documentation section ` - for a more detailed explanation and examples. - ignore_index: - Integer specifying a target class to ignore. If given, this class index does not contribute - to the returned score, regardless of reduction method. If an index is ignored, and ``average=None`` - or ``'none'``, the score for the ignored class will be returned as ``nan``. - subset_accuracy: - Whether to compute subset accuracy for multi-label and multi-dimensional - multi-class inputs (has no effect for other input types). - - - For multi-label inputs, if the parameter is set to ``True``, then all labels for - each sample must be correctly predicted for the sample to count as correct. If it - is set to ``False``, then all labels are counted separately - this is equivalent to - flattening inputs beforehand (i.e. ``preds = preds.flatten()`` and same for ``target``). - - - For multi-dimensional multi-class inputs, if the parameter is set to ``True``, then all - sub-sample (on the extra axis) must be correct for the sample to be counted as correct. - If it is set to ``False``, then all sub-samples are counter separately - this is equivalent, - in the case of label predictions, to flattening the inputs beforehand (i.e. - ``preds = preds.flatten()`` and same for ``target``). Note that the ``top_k`` parameter - still applies in both cases, if set. - - Raises: - ValueError: - If ``top_k`` parameter is set for ``multi-label`` inputs. - ValueError: - If ``average`` is none of ``"micro"``, ``"macro"``, ``"weighted"``, ``"samples"``, ``"none"``, ``None``. - ValueError: - If ``mdmc_average`` is not one of ``None``, ``"samplewise"``, ``"global"``. - ValueError: - If ``average`` is set but ``num_classes`` is not provided. - ValueError: - If ``num_classes`` is set - and ``ignore_index`` is not in the range ``[0, num_classes)``. - ValueError: - If ``top_k`` is not an ``integer`` larger than ``0``. - """ - - (tp, fp, tn, fn), mode = get_confusion_matrix( - preds=preds, - target=target, - average=average, - mdmc_average=mdmc_average, - threshold=threshold, - top_k=top_k, - subset_accuracy=subset_accuracy, - num_classes=num_classes, - multiclass=multiclass, - ignore_index=ignore_index, - ) - - return _accuracy_compute(tp, fp, tn, fn, average, mdmc_average, mode) - - -def get_confusion_matrix( - preds: Tensor, - target: Tensor, - average: Optional[str] = "micro", - mdmc_average: Optional[str] = "global", - threshold: float = 0.5, - top_k: Optional[int] = None, - subset_accuracy: bool = False, - num_classes: Optional[int] = None, - multiclass: Optional[bool] = None, - ignore_index: Optional[int] = None, -) -> Tuple[Tuple[Tensor], Tensor]: - """ - Calculates the confusion matrix according to the specified average method. - - Args: - preds: Predictions from model (probabilities, logits or labels) - target: Ground truth labels - average: - Defines the reduction that is applied. Should be one of the following: - - - ``'micro'`` [default]: Calculate the metric globally, across all samples and classes. - - ``'macro'``: Calculate the metric for each class separately, and average the - metrics across classes (with equal weights for each class). - - ``'weighted'``: Calculate the metric for each class separately, and average the - metrics across classes, weighting each class by its support (``tp + fn``). - - ``'none'`` or ``None``: Calculate the metric for each class separately, and return - the metric for every class. - - ``'samples'``: Calculate the metric for each sample, and average the metrics - across samples (with equal weights for each sample). - - .. note:: What is considered a sample in the multi-dimensional multi-class case - depends on the value of ``mdmc_average``. - - .. note:: If ``'none'`` and a given class doesn't occur in the ``preds`` or ``target``, - the value for the class will be ``nan``. - - mdmc_average: - Defines how averaging is done for multi-dimensional multi-class inputs (on top of the - ``average`` parameter). Should be one of the following: - - - ``None`` [default]: Should be left unchanged if your data is not multi-dimensional multi-class. - - - ``'samplewise'``: In this case, the statistics are computed separately for each - sample on the ``N`` axis, and then averaged over samples. - The computation for each sample is done by treating the flattened extra axes ``...`` - (see :ref:`pages/classification:input types`) as the ``N`` dimension within the sample, - and computing the metric for the sample based on that. - - - ``'global'``: In this case the ``N`` and ``...`` dimensions of the inputs - (see :ref:`pages/classification:input types`) - are flattened into a new ``N_X`` sample axis, i.e. the inputs are treated as if they - were ``(N_X, C)``. From here on the ``average`` parameter applies as usual. - - num_classes: - Number of classes. Necessary for ``'macro'``, ``'weighted'`` and ``None`` average methods. - - threshold: - Threshold for transforming probability or logit predictions to binary (0,1) predictions, in the case - of binary or multi-label inputs. Default value of 0.5 corresponds to input being probabilities. - top_k: - Number of the highest probability or logit score predictions considered finding the correct label, - relevant only for (multi-dimensional) multi-class inputs. The - default value (``None``) will be interpreted as 1 for these inputs. - - Should be left at default (``None``) for all other types of inputs. - multiclass: - Used only in certain special cases, where you want to treat inputs as a different type - than what they appear to be. See the parameter's - :ref:`documentation section ` - for a more detailed explanation and examples. - ignore_index: - Integer specifying a target class to ignore. If given, this class index does not contribute - to the returned score, regardless of reduction method. If an index is ignored, and ``average=None`` - """ - allowed_average = ["micro", "macro", "weighted", "samples", "none", None] - if average not in allowed_average: - raise ValueError(f"The `average` has to be one of {allowed_average}, got {average}.") - - if average in ["macro", "weighted", "none", None] and (not num_classes or num_classes < 1): - raise ValueError(f"When you set `average` as {average}, you have to provide the number of classes.") - - allowed_mdmc_average = [None, "samplewise", "global"] - if mdmc_average not in allowed_mdmc_average: - raise ValueError(f"The `mdmc_average` has to be one of {allowed_mdmc_average}, got {mdmc_average}.") - - if num_classes and ignore_index is not None and (not ignore_index < num_classes or num_classes == 1): - raise ValueError( - f"The `ignore_index` {ignore_index} is not valid for inputs with {num_classes} classes" - ) - - if top_k is not None and (not isinstance(top_k, int) or top_k <= 0): - raise ValueError(f"The `top_k` should be an integer larger than 0, got {top_k}") - - #### ADDED #### - # Put all the NaNs as the 0-class - nans = torch.isnan(target) - target[nans] = 0 - preds[nans] = 0 - if (preds.ndim > 1) and (preds.shape[1] > 1): - preds[nans, 0] = 1 - target = target.to(int) - #### END ADDED #### - - preds, target = _input_squeeze(preds, target) - mode = _mode(preds, target, threshold, top_k, num_classes, multiclass, ignore_index) - reduce = "macro" if average in ["weighted", "none", None] else average - - if subset_accuracy and _check_subset_validity(mode): - # correct, total = _subset_accuracy_update(preds, target, threshold, top_k, ignore_index) - # return _subset_accuracy_compute(correct, total) - raise NotImplementedError("subset_accuracy not implemented") - tp, fp, tn, fn = _accuracy_update( - preds, target, reduce, mdmc_average, threshold, num_classes, top_k, multiclass, ignore_index, mode - ) - - #### ADDED #### - num_nans = nans.sum(0) - if tp.numel() > 1: - tp[0] = tp[0] - num_nans - tn[1:] = tn[1:] - num_nans - else: - tn = tn - num_nans - if (preds.ndim > 1) and (preds.shape[1] > 1): - tp = tp - num_nans - #### END ADDED #### - - return (tp, fp, tn, fn), mode - - -class NaNTensor(Tensor): - """ - Class to create and manage a NaN tensor along it's properties - - The goal of the class is to override the regular tensor such that the basic - operations (sum, mean, max, etc) ignore the NaNs in the input. - It also supports NaNs in integer tensors (as the lowest integer possible). - """ - - @property - def get_nans(self) -> BoolTensor: - """ - Gets the boolean Tensor containing the location of NaNs. - In the case of an integer tensor, this returns where the tensor is equal to its minimal value - In the case of a boolean tensor, this returns a Tensor filled with `False` - """ - if self.is_floating_point(): - return self.isnan() - elif self.is_signed(): - return self == torch.iinfo(self.dtype).min - else: - return torch.zeros(self.shape, device=self.device, dtype=bool) - - def sum(self, *args, **kwargs) -> Tensor: - """ - Overloads the traditional sum to ignore the NaNs - """ - tensor = self.to(float) - tensor[self.get_nans] = float("nan") - if self.is_floating_point(): - dtype = self.dtype - else: - dtype = torch.int64 - return tensor.nansum(*args, **kwargs).to(dtype) - - def mean(self, *args, **kwargs) -> Tensor: - """ - Overloads the traditional mean to ignore the NaNs - """ - tensor = self.to(float) - tensor[self.get_nans] = float("nan") - return nan_mean(tensor, *args, **kwargs).to(self.dtype) - - def numel(self) -> int: - """ - Returns the number of non-NaN elements. - """ - return super(NaNTensor, ~self.get_nans).sum() - - def min(self, *args, **kwargs) -> Tensor: - """ - Returns the min vale of a tensor whitout NaNs - """ - tensor = self - tensor = tensor[~self.get_nans] - return super(NaNTensor, tensor).min(*args, **kwargs) - - def max(self, *args, **kwargs) -> Tensor: - """ - Returns the max vale of a tensor whitout NaNs - """ - tensor = self - tensor = tensor[~self.get_nans] - return super(NaNTensor, tensor).max(*args, **kwargs) - - def argsort(self, dim=-1, descending=False) -> IntTensor: - """ - Return the indices that sort the tensor, while putting all the NaNs to the end of the sorting. - """ - tensor = self - if descending: - tensor[tensor.get_nans] = float("-inf") - else: - tensor[tensor.get_nans] = float("inf") - return super(NaNTensor, tensor).argsort(dim=dim, descending=descending) - - def size(self, dim) -> Tensor: - """ - Instead of returning the size, return the number of non-NaN elements in - a specific dimension. Useful for the `r2_score` metric. - """ - return (~self.get_nans).sum(dim=dim) - - def __lt__(self, other) -> Tensor: - """ - Stupid fix that allows the code to work with `r2_score`, - since it requires the size to be > 2. But since `self.size` now returns - a Tensor instead of a value, we check that all elements are > 2. - """ - if (not isinstance(other, Tensor)) and (other == 2): - return super().__lt__(other).all() - else: - return super().__lt__(other) - - @classmethod - def __torch_function__(cls, func, types, args=(), kwargs=None): - """ - This __torch_function__ implementation wraps subclasses such that - methods called on subclasses return a subclass instance instead of - a ``torch.Tensor`` instance. - - One corollary to this is that you need coverage for torch.Tensor - methods if implementing __torch_function__ for subclasses. - - Affects the call torch.sum() as to behave the same way as NaNTensor.sum() - - We recommend always calling ``super().__torch_function__`` as the base - case when doing the above. - - While not mandatory, we recommend making `__torch_function__` a classmethod. - """ - if func.__name__ == "sum": - kwargs = {} if kwargs is None else kwargs - return args[0].sum(*args[1:], **kwargs) - else: - return super().__torch_function__(func, types, args=args, kwargs=kwargs) - - -def pearson_ipu(preds, target): - """Computes pearson correlation coefficient. - - Handles NaNs in the target without reshaping tensors in order to work on IPU. - - Args: - preds: estimated scores - target: ground truth scores - """ - preds = NaNTensor(preds) - target = NaNTensor(target) - preds[target.get_nans] = float("nan") - pearson = pearson_corrcoef(preds, target.to(preds.dtype)) - return Tensor(pearson) - - -def spearman_ipu(preds, target): - """Computes spearman rank correlation coefficient. - - Handles NaNs in the target without reshaping tensors in order to work on IPU. - - Args: - preds: estimated scores - target: ground truth scores - """ - nans = target.isnan() - dtype = preds.dtype - preds[nans] = float("inf") - target[nans] = float("inf") - preds_sort = _rank_data(preds).to(dtype=dtype) - target_sort = _rank_data(target).to(dtype=dtype) - target_sort[nans] = float("nan") - spearman = pearson_ipu(preds_sort, target_sort) - return Tensor(spearman) - - -def _rank_data(data: Tensor) -> Tensor: - """Calculate the rank for each element of a tensor. - - The rank refers to the indices of an element in the corresponding sorted tensor (starting from 1). - Duplicates of the same value will be assigned the mean of their rank. - - Adopted from `Rank of element tensor`_ - """ - n = data.numel() - rank = torch.empty_like(data) - idx = data.argsort() - rank[idx] = torch.arange(1, n + 1, dtype=data.dtype, device=data.device) - - # TODO: Repeats not yet supported - # repeats = _find_repeats(data) - # for r in repeats: - # condition = data == r - # rank[condition] = rank[condition].mean() - return rank - - -def r2_score_ipu(preds, target, *args, **kwargs) -> Tensor: - """ - Computes r2 score also known as `R2 Score_Coefficient Determination`_: - - .. math:: R^2 = 1 - \frac{SS_{res}}{SS_{tot}} - - where :math:`SS_{res}=\sum_i (y_i - f(x_i))^2` is the sum of residual squares, and - :math:`SS_{tot}=\sum_i (y_i - \bar{y})^2` is total sum of squares. Can also calculate - adjusted r2 score given by - - .. math:: R^2_{adj} = 1 - \frac{(1-R^2)(n-1)}{n-k-1} - - where the parameter :math:`k` (the number of independent regressors) should - be provided as the ``adjusted`` argument. - Handles NaNs without reshaping tensors in order to work on IPU. - - Args: - preds: estimated labels - target: ground truth labels - adjusted: number of independent regressors for calculating adjusted r2 score. - multioutput: Defines aggregation in the case of multiple output scores. Can be one of the following strings: - - * ``'raw_values'`` returns full set of scores - * ``'uniform_average'`` scores are uniformly averaged - * ``'variance_weighted'`` scores are weighted by their individual variances - """ - preds = NaNTensor(preds) - target = NaNTensor(target) - preds[target.get_nans] = float("nan") - score = r2_score(preds, target, *args, **kwargs) - return Tensor(score) - - -def fbeta_score_ipu( - preds: Tensor, - target: Tensor, - beta: float = 1.0, - average: Optional[str] = "micro", - mdmc_average: Optional[str] = None, - ignore_index: Optional[int] = None, - num_classes: Optional[int] = None, - threshold: float = 0.5, - top_k: Optional[int] = None, - multiclass: Optional[bool] = None, -): - """ - A modified version of the `torchmetrics.functional.classification.f_beta._fbeta_compute` - that can ignore NaNs by giving them the same value for both `preds` and `target`. - This allows it to work with compilation - and IPUs since it doesn't modify the tensor's shape. - - Args: - preds: Predictions from model (probabilities, logits or labels) - target: Ground truth labels - average: - Defines the reduction that is applied. Should be one of the following: - - - ``'micro'`` [default]: Calculate the metric globally, across all samples and classes. - - ``'macro'``: Calculate the metric for each class separately, and average the - metrics across classes (with equal weights for each class). - - ``'weighted'``: Calculate the metric for each class separately, and average the - metrics across classes, weighting each class by its support (``tp + fn``). - - ``'none'`` or ``None``: Calculate the metric for each class separately, and return - the metric for every class. - - ``'samples'``: Calculate the metric for each sample, and average the metrics - across samples (with equal weights for each sample). - - .. note:: What is considered a sample in the multi-dimensional multi-class case - depends on the value of ``mdmc_average``. - - .. note:: If ``'none'`` and a given class doesn't occur in the ``preds`` or ``target``, - the value for the class will be ``nan``. - - mdmc_average: - Defines how averaging is done for multi-dimensional multi-class inputs (on top of the - ``average`` parameter). Should be one of the following: - - - ``None`` [default]: Should be left unchanged if your data is not multi-dimensional multi-class. - - - ``'samplewise'``: In this case, the statistics are computed separately for each - sample on the ``N`` axis, and then averaged over samples. - The computation for each sample is done by treating the flattened extra axes ``...`` - (see :ref:`pages/classification:input types`) as the ``N`` dimension within the sample, - and computing the metric for the sample based on that. - - - ``'global'``: In this case the ``N`` and ``...`` dimensions of the inputs - (see :ref:`pages/classification:input types`) - are flattened into a new ``N_X`` sample axis, i.e. the inputs are treated as if they - were ``(N_X, C)``. From here on the ``average`` parameter applies as usual. - - num_classes: - Number of classes. Necessary for ``'macro'``, ``'weighted'`` and ``None`` average methods. - - threshold: - Threshold for transforming probability or logit predictions to binary (0,1) predictions, in the case - of binary or multi-label inputs. Default value of 0.5 corresponds to input being probabilities. - top_k: - Number of the highest probability or logit score predictions considered finding the correct label, - relevant only for (multi-dimensional) multi-class inputs. The - default value (``None``) will be interpreted as 1 for these inputs. - - Should be left at default (``None``) for all other types of inputs. - multiclass: - Used only in certain special cases, where you want to treat inputs as a different type - than what they appear to be. See the parameter's - :ref:`documentation section ` - for a more detailed explanation and examples. - ignore_index: - Integer specifying a target class to ignore. If given, this class index does not contribute - to the returned score, regardless of reduction method. If an index is ignored, and ``average=None`` - or ``'none'``, the score for the ignored class will be returned as ``nan``. - subset_accuracy: - Whether to compute subset accuracy for multi-label and multi-dimensional - multi-class inputs (has no effect for other input types). - - - For multi-label inputs, if the parameter is set to ``True``, then all labels for - each sample must be correctly predicted for the sample to count as correct. If it - is set to ``False``, then all labels are counted separately - this is equivalent to - flattening inputs beforehand (i.e. ``preds = preds.flatten()`` and same for ``target``). - - - For multi-dimensional multi-class inputs, if the parameter is set to ``True``, then all - sub-sample (on the extra axis) must be correct for the sample to be counted as correct. - If it is set to ``False``, then all sub-samples are counter separately - this is equivalent, - in the case of label predictions, to flattening the inputs beforehand (i.e. - ``preds = preds.flatten()`` and same for ``target``). Note that the ``top_k`` parameter - still applies in both cases, if set. - - Raises: - ValueError: - If ``top_k`` parameter is set for ``multi-label`` inputs. - ValueError: - If ``average`` is none of ``"micro"``, ``"macro"``, ``"weighted"``, ``"samples"``, ``"none"``, ``None``. - ValueError: - If ``mdmc_average`` is not one of ``None``, ``"samplewise"``, ``"global"``. - ValueError: - If ``average`` is set but ``num_classes`` is not provided. - ValueError: - If ``num_classes`` is set - and ``ignore_index`` is not in the range ``[0, num_classes)``. - ValueError: - If ``top_k`` is not an ``integer`` larger than ``0``. - """ - - (tp, fp, tn, fn), mode = get_confusion_matrix( - preds=preds, - target=target, - average=average, - mdmc_average=mdmc_average, - ignore_index=ignore_index, - num_classes=num_classes, - threshold=threshold, - top_k=top_k, - multiclass=multiclass, - ) - - b2 = beta**2 - fbeta = ((1 + b2) * tp) / ((1 + b2) * tp + b2 * fn + fp) - - if average in (None, "none", AverageMethod.NONE): - pass - elif average == AverageMethod.MICRO: - pass - elif average == AverageMethod.MACRO: - fbeta = fbeta.mean() - elif average == AverageMethod.WEIGHTED: - weights = tp + fn - fbeta = (weights * fbeta).sum() / weights.sum() - else: - raise ValueError( - f"`average={average}` not yet supported. Chose between None, Micro, Macro, or Weighted" - ) - - return fbeta - - -def f1_score_ipu( - preds: Tensor, - target: Tensor, - beta: float = 1.0, - average: Optional[str] = "micro", - mdmc_average: Optional[str] = None, - ignore_index: Optional[int] = None, - num_classes: Optional[int] = None, - threshold: float = 0.5, - top_k: Optional[int] = None, - multiclass: Optional[bool] = None, -): - """ - A modified version of the `torchmetrics.functional.classification.f_beta._fbeta_compute` - that can ignore NaNs by giving them the same value for both `preds` and `target`. - Used to calculate the f1_score on IPU with beta parameter equal to 1.0 - This allows it to work with compilation and IPUs since it doesn't modify the tensor's shape. - - Computes f_beta metric from stat scores: true positives, false positives, true negatives, false negatives. - - Args: - tp: True positives - fp: False positives - tn: True negatives - fn: False negatives - beta: The parameter `beta` (which determines the weight of recall in the combined score) - ignore_index: Integer specifying a target class to ignore. If given, this class index does not contribute - to the returned score, regardless of reduction method - average: Defines the reduction that is applied - mdmc_average: Defines how averaging is done for multi-dimensional multi-class inputs (on top of the - ``average`` parameter) - """ - - return fbeta_score_ipu( - preds, - target, - beta=beta, - average=average, - mdmc_average=mdmc_average, - ignore_index=ignore_index, - num_classes=num_classes, - threshold=threshold, - top_k=top_k, - multiclass=multiclass, - ) - - -def mean_squared_error_ipu(preds: Tensor, target: Tensor, squared: bool) -> Tensor: - """Computes mean squared error. - - Handles NaNs without reshaping tensors in order to work on IPU. - - Args: - preds: estimated labels - target: ground truth labels - squared: returns RMSE value if set to False - - Return: - Tensor with MSE - """ - target = target.clone() - preds = preds.clone() - - # Replace the nan-targets in the preds/target tensors by 0 - nan_targets = target.isnan() - preds[nan_targets] = 0.0 - target[nan_targets] = 0.0 - - # Compute the loss, and rescale by the number of nan elements - loss = mean_squared_error(preds, target, squared) - - if squared: - factor = nan_targets.numel() / ((~nan_targets).sum()) - else: - factor = (nan_targets.numel() / ((~nan_targets).sum())).sqrt() - - loss = loss * factor - - return loss - - -def mean_absolute_error_ipu(preds: Tensor, target: Tensor) -> Tensor: - """Computes mean absolute error. - - Handles NaNs without reshaping tensors in order to work on IPU. - - Args: - preds: estimated labels - target: ground truth labels - - Return: - Tensor with MAE - """ - target = target.clone() - preds = preds.clone() - - # Replace the nan-targets in the preds/target tensors by 0 - nan_targets = target.isnan() - preds[nan_targets] = 0.0 - target[nan_targets] = 0.0 - - # Compute the loss, and rescale by the number of nan elements - loss = mean_absolute_error(preds, target) - loss = loss * nan_targets.numel() / ((~nan_targets).sum()) - - return loss diff --git a/graphium/utils/spaces.py b/graphium/utils/spaces.py index 88812c0be..97b376ffa 100644 --- a/graphium/utils/spaces.py +++ b/graphium/utils/spaces.py @@ -15,7 +15,7 @@ from copy import deepcopy import torch import torch.optim.lr_scheduler as sc -import torchmetrics.functional as TorchMetrics +import torchmetrics as TorchMetrics import graphium.nn.base_layers as BaseLayers import graphium.nn.ensemble_layers as EnsembleLayers @@ -102,39 +102,27 @@ } METRICS_CLASSIFICATION = { - "accuracy": TorchMetrics.accuracy, - "averageprecision": TorchMetrics.average_precision, - "auroc": TorchMetrics.auroc, - "confusionmatrix": TorchMetrics.confusion_matrix, - "f1": TorchMetrics.f1_score, - "fbeta": TorchMetrics.fbeta_score, - "precisionrecallcurve": TorchMetrics.precision_recall_curve, - "precision": TorchMetrics.precision, - "recall": TorchMetrics.recall, - "mcc": TorchMetrics.matthews_corrcoef, - "auroc_ipu": Metrics.auroc_ipu, - "accuracy_ipu": Metrics.accuracy_ipu, - "average_precision_ipu": Metrics.average_precision_ipu, - "f1_ipu": Metrics.f1_score_ipu, - "fbeta_ipu": Metrics.fbeta_score_ipu, - "precision_ipu": Metrics.precision_ipu, - "recall_ipu": Metrics.recall_ipu, + "accuracy": TorchMetrics.Accuracy, + "averageprecision": TorchMetrics.AveragePrecision, + "auroc": TorchMetrics.AUROC, + "confusionmatrix": TorchMetrics.ConfusionMatrix, + "f1": TorchMetrics.F1Score, + "fbeta": TorchMetrics.FBetaScore, + "precisionrecallcurve": TorchMetrics.PrecisionRecallCurve, + "precision": TorchMetrics.Precision, + "recall": TorchMetrics.Recall, + "mcc": TorchMetrics.MatthewsCorrCoef, } METRICS_REGRESSION = { - "mae": TorchMetrics.mean_absolute_error, - "mape": TorchMetrics.mean_absolute_percentage_error, - "mse": TorchMetrics.mean_squared_error, - "msle": TorchMetrics.mean_squared_log_error, - "pearsonr": TorchMetrics.pearson_corrcoef, - "spearmanr": TorchMetrics.spearman_corrcoef, - "r2_score": TorchMetrics.r2_score, - "cosine": TorchMetrics.cosine_similarity, - "pearsonr_ipu": Metrics.pearson_ipu, - "spearmanr_ipu": Metrics.spearman_ipu, - "r2_score_ipu": Metrics.r2_score_ipu, - "mae_ipu": Metrics.mean_absolute_error_ipu, - "mse_ipu": Metrics.mean_squared_error_ipu, + "mae": TorchMetrics.MeanAbsoluteError, + "mape": TorchMetrics.MeanAbsolutePercentageError, + "mse": TorchMetrics.MeanSquaredError, + "msle": TorchMetrics.MeanSquaredLogError, + "pearsonr": TorchMetrics.PearsonCorrCoef, + "spearmanr": TorchMetrics.SpearmanCorrCoef, + "r2_score": TorchMetrics.R2Score, + "cosine": TorchMetrics.CosineSimilarity, } METRICS_DICT = deepcopy(METRICS_CLASSIFICATION) diff --git a/tests/test_ipu_metrics.py b/tests/test_ipu_metrics.py deleted file mode 100644 index ee4801e7b..000000000 --- a/tests/test_ipu_metrics.py +++ /dev/null @@ -1,774 +0,0 @@ -""" --------------------------------------------------------------------------------- -Copyright (c) 2023 Valence Labs, Recursion Pharmaceuticals and Graphcore Limited. - -Use of this software is subject to the terms and conditions outlined in the LICENSE file. -Unauthorized modification, distribution, or use is prohibited. Provided 'as is' without -warranties of any kind. - -Valence Labs, Recursion Pharmaceuticals and Graphcore Limited are not liable for any damages arising from its use. -Refer to the LICENSE file for the full terms and conditions. --------------------------------------------------------------------------------- -""" - - -import unittest as ut -import torch -from torchmetrics.functional import ( - auroc, - average_precision, - precision, - accuracy, - recall, - pearson_corrcoef, - spearman_corrcoef, - r2_score, - f1_score, - fbeta_score, - mean_squared_error, - mean_absolute_error, -) -from copy import deepcopy -import pytest - -from graphium.ipu.ipu_metrics import ( - auroc_ipu, - average_precision_ipu, - precision_ipu, - accuracy_ipu, - recall_ipu, - pearson_ipu, - spearman_ipu, - r2_score_ipu, - f1_score_ipu, - fbeta_score_ipu, - mean_squared_error_ipu, - mean_absolute_error_ipu, -) - - -@pytest.mark.ipu -class test_Metrics(ut.TestCase): - torch.manual_seed(42) - preds = torch.rand((100, 10), dtype=torch.float32) - target = torch.rand((100, 10), dtype=torch.float32) - - th = 0.7 - nan_th = 0.2 - preds_greater = preds > th - target_greater = (target > th).to(torch.float32) - target_greater_nan = deepcopy(target_greater) - is_nan = target < nan_th - target_greater_nan[target < nan_th] = torch.nan - target_nan = deepcopy(target) - target_nan[target < nan_th] = torch.nan - - def test_auroc(self): - preds = deepcopy(self.preds)[:, 0] - target = deepcopy(self.target)[:, 0] - target_nan = deepcopy(self.target_nan)[:, 0] - - target[target < 0.5] = 0 - target[target >= 0.5] = 1 - - target_nan[target_nan < 0.5] = 0 - target_nan[target_nan >= 0.5] = 1 - - # Regular loss - score_true = auroc(preds, target.to(int)) - score_ipu = auroc_ipu(preds, target) - self.assertFalse(score_true.isnan(), "Regular AUROC score is NaN") - self.assertAlmostEqual( - score_true.item(), score_ipu.item(), places=6, msg="Regular AUROC score is different" - ) - - # Weighted loss (As in BCE) - sample_weights = torch.rand(preds.shape[0], dtype=torch.float32) - score_true = auroc(preds, target.to(int), sample_weights=sample_weights) - score_ipu = auroc_ipu(preds, target, sample_weights=sample_weights) - self.assertFalse(score_true.isnan(), "Regular AUROC score is NaN") - self.assertAlmostEqual(score_true.item(), score_ipu.item(), msg="Weighted AUROC score is different") - - # Regular loss with NaNs in target - not_nan = ~target_nan.isnan() - score_true = auroc(preds[not_nan], target[not_nan].to(int)) - score_ipu = auroc_ipu(preds, target_nan) - self.assertFalse(score_true.isnan(), "Regular AUROC score with target_nan is NaN") - self.assertFalse(score_ipu.isnan(), "Regular AUROCIPU score with target_nan is NaN") - self.assertAlmostEqual( - score_true.item(), score_ipu.item(), places=6, msg="Regular AUROC score with NaN is different" - ) - - # Weighted loss with NaNs in target (As in BCE) - not_nan = ~target_nan.isnan() - sample_weights = torch.rand(preds.shape, dtype=torch.float32) - loss_true = auroc(preds[not_nan], target_nan[not_nan].to(int), sample_weights=sample_weights[not_nan]) - loss_ipu = auroc_ipu(preds, target_nan, sample_weights=sample_weights) - self.assertFalse(loss_true.isnan(), "Weighted AUROC score with target_nan is NaN") - self.assertFalse(loss_ipu.isnan(), "Weighted AUROC IPU score with target_nan is NaN") - self.assertAlmostEqual( - # AssertionError: 0.6603766679763794 != 0.6234951615333557 within 2 places - loss_true.item(), - loss_ipu.item(), - places=6, - msg="Weighted AUROC with NaN is different", - ) - - def test_average_precision(self): # TODO: Make work with multi-class - preds = deepcopy(self.preds)[:, 0] - target = deepcopy(self.target)[:, 0] - target_nan = deepcopy(self.target_nan)[:, 0] - - target[target < 0.5] = 0 - target[target >= 0.5] = 1 - - target_nan[target_nan < 0.5] = 0 - target_nan[target_nan >= 0.5] = 1 - - # Regular loss - score_true = average_precision(preds, target.to(int), task="binary") - score_ipu = average_precision_ipu(preds, target.to(int), task="binary") - self.assertFalse(score_true.isnan(), "Regular Average Precision is NaN") - self.assertAlmostEqual( - score_true.item(), score_ipu.item(), places=6, msg="Regular Average Precision is different" - ) - - # Regular average precision with NaNs in target - not_nan = ~target_nan.isnan() - score_true = average_precision(preds[not_nan], target[not_nan].to(int), task="binary") - score_ipu = average_precision_ipu(preds, target_nan, task="binary") - self.assertFalse(score_true.isnan(), "Regular Average Precision with target_nan is NaN") - self.assertFalse(score_ipu.isnan(), "Regular Average Precision IPU score with target_nan is NaN") - self.assertAlmostEqual( - score_true.item(), - score_ipu.item(), - places=6, - msg="Regular Average Precision with NaN is different", - ) - - def test_precision(self): - preds = deepcopy(self.preds)[:, :4] - target = deepcopy(self.target)[:, 0] - t = deepcopy(target) - - target[t < 0.4] = 0 - target[(t >= 0.4) & (t < 0.6)] = 1 - target[(t >= 0.6) & (t < 0.8)] = 2 - target[(t >= 0.8)] = 3 - - target_nan = deepcopy(target) - target_nan[self.is_nan[:, 0]] = float("nan") - target_nan_bin = deepcopy(target_nan) - target_nan_bin[target_nan > 0] = 1 - - # Micro precision binary - score_true = precision(preds[:, 0], target.to(int) > 0, average="micro") - score_ipu = precision_ipu(preds[:, 0], target > 0, average="micro") - self.assertFalse(score_true.isnan(), "Micro Precision binary is NaN") - self.assertAlmostEqual( - score_true.item(), score_ipu.item(), places=6, msg="Micro Precision binary is different" - ) - - # Micro precision binary with NaNs in target - not_nan = ~target_nan.isnan() - score_true = precision(preds[:, 0][not_nan], target_nan_bin[not_nan].to(int), average="micro") - score_ipu = precision_ipu(preds[:, 0], target_nan_bin, average="micro") - self.assertFalse(score_true.isnan(), "Micro Precision binary with target_nan is NaN") - self.assertFalse(score_ipu.isnan(), "Micro Precision binary IPU score with target_nan is NaN") - self.assertAlmostEqual( - score_true.item(), score_ipu.item(), places=6, msg="Micro Precision with NaN is different" - ) - - # Micro precision - score_true = precision(preds, target.to(int), average="micro") - score_ipu = precision_ipu(preds, target, average="micro") - self.assertFalse(score_true.isnan(), "Micro Precision is NaN") - self.assertAlmostEqual( - score_true.item(), score_ipu.item(), places=6, msg="Micro Precision is different" - ) - - # Micro precision with NaNs in target - not_nan = ~target_nan.isnan() - score_true = precision(preds[not_nan], target[not_nan].to(int), average="micro") - score_ipu = precision_ipu(preds, target_nan, average="micro") - self.assertFalse(score_true.isnan(), "Micro Precision with target_nan is NaN") - self.assertFalse(score_ipu.isnan(), "Micro Precision IPU score with target_nan is NaN") - self.assertAlmostEqual( - score_true.item(), score_ipu.item(), places=6, msg="Micro Precision with NaN is different" - ) - - # Macro precision - score_true = precision(preds, target.to(int), average="macro", num_classes=4) - score_ipu = precision_ipu(preds, target, average="macro", num_classes=4) - self.assertFalse(score_true.isnan(), "Macro Precision is NaN") - self.assertAlmostEqual( - score_true.item(), score_ipu.item(), places=6, msg="Macro Precision is different" - ) - - # Macro precision multiclass with NaNs in target - not_nan = ~target_nan.isnan() - score_true = precision(preds[not_nan], target[not_nan].to(int), average="macro", num_classes=4) - score_ipu = precision_ipu(preds, target_nan, average="macro", num_classes=4) - self.assertFalse(score_true.isnan(), "Macro Precision multiclass with target_nan is NaN") - self.assertFalse(score_ipu.isnan(), "Macro Precision multiclass IPU score with target_nan is NaN") - self.assertAlmostEqual( - score_true.item(), - score_ipu.item(), - places=6, - msg="Macro Precision multiclass with NaN is different", - ) - - # Macro precision multiclass with NaNs in target - not_nan = ~target_nan.isnan() - score_true = precision(preds[not_nan], target[not_nan].to(int), average="macro", num_classes=4) - score_ipu = precision_ipu(preds, target_nan, average="macro", num_classes=4) - self.assertFalse(score_true.isnan(), "Macro Precision multiclass with target_nan is NaN") - self.assertFalse(score_ipu.isnan(), "Macro Precision multiclass IPU score with target_nan is NaN") - self.assertAlmostEqual( - score_true.item(), - score_ipu.item(), - places=6, - msg="Macro Precision multiclass with NaN is different", - ) - - # Weighted precision multiclass - score_true = precision(preds, target.to(int), average="weighted", num_classes=4) - score_ipu = precision_ipu(preds, target, average="weighted", num_classes=4) - self.assertFalse(score_true.isnan(), "Weighted Precision multiclass is NaN") - self.assertAlmostEqual( - score_true.item(), score_ipu.item(), places=6, msg="Weighted Precision multiclass is different" - ) - - # Weighted precision multiclass with NaNs in target - not_nan = ~target_nan.isnan() - score_true = precision(preds[not_nan], target[not_nan].to(int), average="weighted", num_classes=4) - score_ipu = precision_ipu(preds, target_nan, average="weighted", num_classes=4) - self.assertFalse(score_true.isnan(), "Weighted Precision multiclass with target_nan is NaN") - self.assertFalse(score_ipu.isnan(), "Weighted Precision multiclass IPU score with target_nan is NaN") - self.assertAlmostEqual( - score_true.item(), - score_ipu.item(), - places=6, - msg="Regular Average Precision multiclass with NaN is different", - ) - - def test_accuracy(self): - preds = deepcopy(self.preds)[:, :4] - target = deepcopy(self.target)[:, 0] - t = deepcopy(target) - - target[t < 0.4] = 0 - target[(t >= 0.4) & (t < 0.6)] = 1 - target[(t >= 0.6) & (t < 0.8)] = 2 - target[(t >= 0.8)] = 3 - - target_nan = deepcopy(target) - target_nan[self.is_nan[:, 0]] = float("nan") - target_nan_bin = deepcopy(target_nan) - target_nan_bin[target_nan > 0] = 1 - - # Micro accuracy binary - score_true = accuracy(preds[:, 0], target.to(int) > 0, average="micro") - score_ipu = accuracy_ipu(preds[:, 0], target > 0, average="micro") - self.assertFalse(score_true.isnan(), "Micro Accuracy binary is NaN") - self.assertAlmostEqual( - score_true.item(), score_ipu.item(), places=6, msg="Micro Accuracy binary is different" - ) - - # Micro accuracy binary with NaNs in target - not_nan = ~target_nan.isnan() - score_true = accuracy(preds[:, 0][not_nan], target_nan_bin[not_nan].to(int), average="micro") - score_ipu = accuracy_ipu(preds[:, 0], target_nan_bin, average="micro") - self.assertFalse(score_true.isnan(), "Micro Accuracy binary with target_nan is NaN") - self.assertFalse(score_ipu.isnan(), "Micro Accuracy binary IPU score with target_nan is NaN") - self.assertAlmostEqual( - score_true.item(), score_ipu.item(), places=6, msg="Micro Accuracy with NaN is different" - ) - - # Micro accuracy - score_true = accuracy(preds, target.to(int), average="micro") - score_ipu = accuracy_ipu(preds, target, average="micro") - self.assertFalse(score_true.isnan(), "Micro Accuracy is NaN") - self.assertAlmostEqual( - score_true.item(), score_ipu.item(), places=6, msg="Micro Accuracy is different" - ) - - # Micro accuracy with NaNs in target - not_nan = ~target_nan.isnan() - score_true = accuracy(preds[not_nan], target[not_nan].to(int), average="micro") - score_ipu = accuracy_ipu(preds, target_nan, average="micro") - self.assertFalse(score_true.isnan(), "Micro Accuracy with target_nan is NaN") - self.assertFalse(score_ipu.isnan(), "Micro Accuracy IPU score with target_nan is NaN") - self.assertAlmostEqual( - score_true.item(), score_ipu.item(), places=6, msg="Micro Accuracy with NaN is different" - ) - - # Macro accuracy - score_true = accuracy(preds, target.to(int), average="macro", num_classes=4) - score_ipu = accuracy_ipu(preds, target, average="macro", num_classes=4) - self.assertFalse(score_true.isnan(), "Macro Accuracy is NaN") - self.assertAlmostEqual( - score_true.item(), score_ipu.item(), places=6, msg="Macro Accuracy is different" - ) - - # Macro accuracy with NaNs in target - not_nan = ~target_nan.isnan() - score_true = accuracy(preds[not_nan], target[not_nan].to(int), average="macro", num_classes=4) - score_ipu = accuracy_ipu(preds, target_nan, average="macro", num_classes=4) - self.assertFalse(score_true.isnan(), "Macro Accuracy with target_nan is NaN") - self.assertFalse(score_ipu.isnan(), "Macro Accuracy IPU score with target_nan is NaN") - self.assertAlmostEqual( - score_true.item(), score_ipu.item(), places=6, msg="Macro Accuracy with NaN is different" - ) - - # Weighted accuracy - score_true = accuracy(preds, target.to(int), average="weighted", num_classes=4) - score_ipu = accuracy_ipu(preds, target, average="weighted", num_classes=4) - self.assertFalse(score_true.isnan(), "Weighted Accuracy is NaN") - self.assertAlmostEqual( - score_true.item(), score_ipu.item(), places=6, msg="Weighted Accuracy is different" - ) - - # Weighted accuracy with NaNs in target - not_nan = ~target_nan.isnan() - score_true = accuracy(preds[not_nan], target[not_nan].to(int), average="weighted", num_classes=4) - score_ipu = accuracy_ipu(preds, target_nan, average="weighted", num_classes=4) - self.assertFalse(score_true.isnan(), "Weighted Accuracy with target_nan is NaN") - self.assertFalse(score_ipu.isnan(), "Weighted Accuracy IPU score with target_nan is NaN") - self.assertAlmostEqual( - score_true.item(), score_ipu.item(), places=6, msg="Regular Accuracy with NaN is different" - ) - - def test_recall(self): - preds = deepcopy(self.preds)[:, :4] - target = deepcopy(self.target)[:, 0] - t = deepcopy(target) - - target[t < 0.4] = 0 - target[(t >= 0.4) & (t < 0.6)] = 1 - target[(t >= 0.6) & (t < 0.8)] = 2 - target[(t >= 0.8)] = 3 - - target_nan = deepcopy(target) - target_nan[self.is_nan[:, 0]] = float("nan") - target_nan_bin = deepcopy(target_nan) - target_nan_bin[target_nan > 0] = 1 - - # Micro recall binary - score_true = recall(preds[:, 0], target.to(int) > 0, average="micro") - score_ipu = recall_ipu(preds[:, 0], target > 0, average="micro") - self.assertFalse(score_true.isnan(), "Micro Recall binary is NaN") - self.assertAlmostEqual( - score_true.item(), score_ipu.item(), places=6, msg="Micro Recall binary is different" - ) - - # Micro recall binary with NaNs in target - not_nan = ~target_nan.isnan() - score_true = recall(preds[:, 0][not_nan], target_nan_bin[not_nan].to(int), average="micro") - score_ipu = recall_ipu(preds[:, 0], target_nan_bin, average="micro") - self.assertFalse(score_true.isnan(), "Micro Recall binary with target_nan is NaN") - self.assertFalse(score_ipu.isnan(), "Micro Recall binary IPU score with target_nan is NaN") - self.assertAlmostEqual( - score_true.item(), score_ipu.item(), places=6, msg="Micro Recall binary with NaN is different" - ) - - # Micro recall - score_true = recall(preds, target.to(int), average="micro") - score_ipu = recall_ipu(preds, target, average="micro") - self.assertFalse(score_true.isnan(), "Micro Recall is NaN") - self.assertAlmostEqual(score_true.item(), score_ipu.item(), places=6, msg="Micro Recall is different") - - # Micro recall with NaNs in target - not_nan = ~target_nan.isnan() - score_true = recall(preds[not_nan], target[not_nan].to(int), average="micro") - score_ipu = recall_ipu(preds, target_nan, average="micro") - self.assertFalse(score_true.isnan(), "Micro Recall with target_nan is NaN") - self.assertFalse(score_ipu.isnan(), "Micro Recall IPU score with target_nan is NaN") - self.assertAlmostEqual( - score_true.item(), score_ipu.item(), places=6, msg="Micro Recall with NaN is different" - ) - - # Macro recall multiclass - score_true = recall(preds, target.to(int), average="macro", num_classes=4) - score_ipu = recall_ipu(preds, target, average="macro", num_classes=4) - self.assertFalse(score_true.isnan(), "Macro Recall is NaN") - self.assertAlmostEqual( - score_true.item(), score_ipu.item(), places=6, msg="Macro Recall multiclass is different" - ) - - # Macro recall multiclass with NaNs in target - not_nan = ~target_nan.isnan() - score_true = recall(preds[not_nan], target[not_nan].to(int), average="macro", num_classes=4) - score_ipu = recall_ipu(preds, target_nan, average="macro", num_classes=4) - self.assertFalse(score_true.isnan(), "Macro Recall multiclass with target_nan is NaN") - self.assertFalse(score_ipu.isnan(), "Macro Recall multiclass IPU score with target_nan is NaN") - self.assertAlmostEqual( - score_true.item(), score_ipu.item(), places=6, msg="Macro Recall multiclass with NaN is different" - ) - - # Weighted recallmulticlass - score_true = recall(preds, target.to(int), average="weighted", num_classes=4) - score_ipu = recall_ipu(preds, target, average="weighted", num_classes=4) - self.assertFalse(score_true.isnan(), "Weighted Recall is NaN") - self.assertAlmostEqual( - score_true.item(), score_ipu.item(), places=6, msg="Weighted Recall is different" - ) - - # Weighted recall multiclass with NaNs in target - not_nan = ~target_nan.isnan() - score_true = recall(preds[not_nan], target[not_nan].to(int), average="weighted", num_classes=4) - score_ipu = recall_ipu(preds, target_nan, average="weighted", num_classes=4) - self.assertFalse(score_true.isnan(), "Weighted Recall multiclass with target_nan is NaN") - self.assertFalse(score_ipu.isnan(), "Weighted Recall multiclass IPU score with target_nan is NaN") - self.assertAlmostEqual( - score_true.item(), - score_ipu.item(), - places=6, - msg="Regular Recall multiclass with NaN is different", - ) - - def test_pearsonr(self): - preds = deepcopy(self.preds)[:, 0] - target = deepcopy(self.target)[:, 0] + preds - target_nan = deepcopy(target) - target_nan[self.is_nan[:, 0]] = float("nan") - - # Regular loss - score_true = pearson_corrcoef(preds, target) - score_ipu = pearson_ipu(preds, target) - self.assertFalse(score_true.isnan(), "Pearson is NaN") - self.assertAlmostEqual(score_true.item(), score_ipu.item(), places=4, msg="Pearson is different") - - # Regular loss with NaNs in target - not_nan = ~target_nan.isnan() - score_true = pearson_corrcoef(preds[not_nan], target[not_nan]) - score_ipu = pearson_ipu(preds, target_nan) - self.assertFalse(score_true.isnan(), "Regular PearsonR with target_nan is NaN") - self.assertFalse(score_ipu.isnan(), "IPU PearsonR score with target_nan is NaN") - self.assertAlmostEqual( - score_true.item(), score_ipu.item(), places=4, msg="Pearson with NaN is different" - ) - - def test_spearmanr(self): - preds = deepcopy(self.preds)[:, 0] - target = deepcopy(self.target)[:, 0] + preds - target_nan = deepcopy(target) - target_nan[self.is_nan[:, 0]] = float("nan") - - # Regular loss - score_true = spearman_corrcoef(preds, target) - score_ipu = spearman_ipu(preds, target) - self.assertFalse(score_true.isnan(), "Spearman is NaN") - self.assertAlmostEqual(score_true.item(), score_ipu.item(), places=4, msg="Spearman is different") - - # Regular loss with NaNs in target - not_nan = ~target_nan.isnan() - score_true = spearman_corrcoef(preds[not_nan], target[not_nan]) - score_ipu = spearman_ipu(preds, target_nan) - self.assertFalse(score_true.isnan(), "Regular Spearman with target_nan is NaN") - self.assertFalse(score_ipu.isnan(), "IPU Spearman score with target_nan is NaN") - self.assertAlmostEqual( - score_true.item(), score_ipu.item(), places=4, msg="Spearman with NaN is different" - ) - - def test_r2_score(self): - preds = deepcopy(self.preds) - target = deepcopy(self.target) + preds - target_nan = deepcopy(target) - target_nan[self.is_nan] = float("nan") - - # Regular loss - score_true = r2_score(preds, target) - score_ipu = r2_score_ipu(preds, target) - self.assertFalse(score_true.isnan(), "r2_score is NaN") - self.assertAlmostEqual(score_true.item(), score_ipu.item(), places=4, msg="r2_score is different") - - # Regular loss with NaNs in target - not_nan = ~target_nan.isnan() - score_ipu = r2_score_ipu(preds, target_nan, multioutput="raw_values") - for ii in range(preds.shape[1]): - score_true = r2_score( - preds[:, ii][not_nan[:, ii]], target_nan[:, ii][not_nan[:, ii]], multioutput="raw_values" - ) - self.assertFalse(score_true.isnan().any(), f"{ii}: r2_score with target_nan is NaN") - self.assertFalse(score_ipu[ii].isnan().any(), f"{ii}: IPU r2_score with target_nan is NaN") - self.assertAlmostEqual( - score_true.item(), score_ipu[ii].item(), places=4, msg=f"{ii}: r2_score with NaN is different" - ) - - def test_fbeta_score(self): - preds = deepcopy(self.preds)[:, :4] - target = deepcopy(self.target)[:, 0] - t = deepcopy(target) - - target[t < 0.4] = 0 - target[(t >= 0.4) & (t < 0.6)] = 1 - target[(t >= 0.6) & (t < 0.8)] = 2 - target[(t >= 0.8)] = 3 - - target_nan = deepcopy(target) - target_nan[self.is_nan[:, 0]] = float("nan") - target_nan_bin = deepcopy(target_nan) - target_nan_bin[target_nan > 0] = 1 - - # Micro fbeta_score binary - score_true = fbeta_score(preds[:, 0], target.to(int) > 0, average="micro", beta=0.5) - score_ipu = fbeta_score_ipu(preds[:, 0], target > 0, average="micro", beta=0.5) - self.assertFalse(score_true.isnan(), "Micro FBETA_score binary is NaN") - self.assertAlmostEqual( - score_true.item(), score_ipu.item(), places=6, msg="Micro FBETA_score binary is different" - ) - - # Micro fbeta_score binary with NaNs in target - not_nan = ~target_nan.isnan() - score_true = fbeta_score( - preds[:, 0][not_nan], target_nan_bin[not_nan].to(int), average="micro", beta=0.5 - ) - score_ipu = fbeta_score_ipu(preds[:, 0], target_nan_bin, average="micro", beta=0.5) - self.assertFalse(score_true.isnan(), "Micro FBETA_score binary with target_nan is NaN") - self.assertFalse(score_ipu.isnan(), "Micro FBETA_score binary IPU score with target_nan is NaN") - self.assertAlmostEqual( - score_true.item(), - score_ipu.item(), - places=6, - msg="Micro FBETA_score binary with NaN is different", - ) - - # Micro fbeta_score - score_true = fbeta_score(preds, target.to(int), average="micro", beta=0.5) - score_ipu = fbeta_score_ipu(preds, target, average="micro", beta=0.5) - self.assertFalse(score_true.isnan(), "Micro FBETA_score is NaN") - self.assertAlmostEqual( - score_true.item(), score_ipu.item(), places=6, msg="Micro FBETA_score is different" - ) - - # Micro fbeta_score with NaNs in target - not_nan = ~target_nan.isnan() - score_true = fbeta_score(preds[not_nan], target[not_nan].to(int), average="micro", beta=0.5) - score_ipu = fbeta_score_ipu(preds, target_nan, average="micro", beta=0.5) - self.assertFalse(score_true.isnan(), "Micro FBETA_score with target_nan is NaN") - self.assertFalse(score_ipu.isnan(), "Micro FBETA_score IPU score with target_nan is NaN") - self.assertAlmostEqual( - score_true.item(), score_ipu.item(), places=6, msg="Micro FBETA_score with NaN is different" - ) - - # Macro fbeta_score multiclass - score_true = fbeta_score(preds, target.to(int), average="macro", num_classes=4, beta=0.5) - score_ipu = fbeta_score_ipu(preds, target, average="macro", num_classes=4, beta=0.5) - self.assertFalse(score_true.isnan(), "Macro FBETA_score is NaN") - self.assertAlmostEqual( - score_true.item(), score_ipu.item(), places=6, msg="Macro FBETA_score multiclass is different" - ) - - # Macro fbeta_score multiclass with NaNs in target - not_nan = ~target_nan.isnan() - score_true = fbeta_score( - preds[not_nan], target[not_nan].to(int), average="macro", num_classes=4, beta=0.5 - ) - score_ipu = fbeta_score_ipu(preds, target_nan, average="macro", num_classes=4, beta=0.5) - self.assertFalse(score_true.isnan(), "Macro FBETA_score multiclass with target_nan is NaN") - self.assertFalse(score_ipu.isnan(), "Macro FBETA_score multiclass IPU score with target_nan is NaN") - self.assertAlmostEqual( - score_true.item(), - score_ipu.item(), - places=6, - msg="Macro FBETA_score multiclass with NaN is different", - ) - - # Weighted fbeta_scoremulticlass - score_true = fbeta_score(preds, target.to(int), average="weighted", num_classes=4, beta=0.5) - score_ipu = fbeta_score_ipu(preds, target, average="weighted", num_classes=4, beta=0.5) - self.assertFalse(score_true.isnan(), "Weighted FBETA_score is NaN") - self.assertAlmostEqual( - score_true.item(), score_ipu.item(), places=6, msg="Weighted FBETA_score is different" - ) - - # Weighted fbeta_score multiclass with NaNs in target - not_nan = ~target_nan.isnan() - score_true = fbeta_score( - preds[not_nan], target[not_nan].to(int), average="weighted", num_classes=4, beta=0.5 - ) - score_ipu = fbeta_score_ipu(preds, target_nan, average="weighted", num_classes=4, beta=0.5) - self.assertFalse(score_true.isnan(), "Weighted FBETA_score multiclass with target_nan is NaN") - self.assertFalse( - score_ipu.isnan(), "Weighted FBETA_score multiclass IPU score with target_nan is NaN" - ) - self.assertAlmostEqual( - score_true.item(), - score_ipu.item(), - places=6, - msg="Regular FBETA_score multiclass with NaN is different", - ) - - def test_f1_score(self): - preds = deepcopy(self.preds)[:, :4] - target = deepcopy(self.target)[:, 0] - t = deepcopy(target) - - target[t < 0.4] = 0 - target[(t >= 0.4) & (t < 0.6)] = 1 - target[(t >= 0.6) & (t < 0.8)] = 2 - target[(t >= 0.8)] = 3 - - target_nan = deepcopy(target) - target_nan[self.is_nan[:, 0]] = float("nan") - target_nan_bin = deepcopy(target_nan) - target_nan_bin[target_nan > 0] = 1 - - # Micro f1_score binary - score_true = f1_score(preds[:, 0], target.to(int) > 0, average="micro") - score_ipu = f1_score_ipu(preds[:, 0], target > 0, average="micro") - self.assertFalse(score_true.isnan(), "Micro F1_score binary is NaN") - self.assertAlmostEqual( - score_true.item(), score_ipu.item(), places=6, msg="Micro F1_score binary is different" - ) - - # Micro f1_score binary with NaNs in target - not_nan = ~target_nan.isnan() - score_true = f1_score(preds[:, 0][not_nan], target_nan_bin[not_nan].to(int), average="micro") - score_ipu = f1_score_ipu(preds[:, 0], target_nan_bin, average="micro") - self.assertFalse(score_true.isnan(), "Micro F1_score binary with target_nan is NaN") - self.assertFalse(score_ipu.isnan(), "Micro F1_score binary IPU score with target_nan is NaN") - self.assertAlmostEqual( - score_true.item(), score_ipu.item(), places=6, msg="Micro F1_score binary with NaN is different" - ) - - # Micro f1_score - score_true = f1_score(preds, target.to(int), average="micro") - score_ipu = f1_score_ipu(preds, target, average="micro") - self.assertFalse(score_true.isnan(), "Micro F1_score is NaN") - self.assertAlmostEqual( - score_true.item(), score_ipu.item(), places=6, msg="Micro F1_score is different" - ) - - # Micro f1_score with NaNs in target - not_nan = ~target_nan.isnan() - score_true = f1_score(preds[not_nan], target[not_nan].to(int), average="micro") - score_ipu = f1_score_ipu(preds, target_nan, average="micro") - self.assertFalse(score_true.isnan(), "Micro F1_score with target_nan is NaN") - self.assertFalse(score_ipu.isnan(), "Micro F1_score IPU score with target_nan is NaN") - self.assertAlmostEqual( - score_true.item(), score_ipu.item(), places=6, msg="Micro F1_score with NaN is different" - ) - - # Macro f1_score multiclass - score_true = f1_score(preds, target.to(int), average="macro", num_classes=4) - score_ipu = f1_score_ipu(preds, target, average="macro", num_classes=4) - self.assertFalse(score_true.isnan(), "Macro F1_score is NaN") - self.assertAlmostEqual( - score_true.item(), score_ipu.item(), places=6, msg="Macro F1_score multiclass is different" - ) - - # Macro f1_score multiclass with NaNs in target - not_nan = ~target_nan.isnan() - score_true = f1_score(preds[not_nan], target[not_nan].to(int), average="macro", num_classes=4) - score_ipu = f1_score_ipu(preds, target_nan, average="macro", num_classes=4) - self.assertFalse(score_true.isnan(), "Macro F1_score multiclass with target_nan is NaN") - self.assertFalse(score_ipu.isnan(), "Macro F1_score multiclass IPU score with target_nan is NaN") - self.assertAlmostEqual( - score_true.item(), - score_ipu.item(), - places=6, - msg="Macro F1_score multiclass with NaN is different", - ) - - # Weighted f1_scoremulticlass - score_true = f1_score(preds, target.to(int), average="weighted", num_classes=4) - score_ipu = f1_score_ipu(preds, target, average="weighted", num_classes=4) - self.assertFalse(score_true.isnan(), "Weighted F1_score is NaN") - self.assertAlmostEqual( - score_true.item(), score_ipu.item(), places=6, msg="Weighted F1_score is different" - ) - - # Weighted f1_score multiclass with NaNs in target - not_nan = ~target_nan.isnan() - score_true = f1_score(preds[not_nan], target[not_nan].to(int), average="weighted", num_classes=4) - score_ipu = f1_score_ipu(preds, target_nan, average="weighted", num_classes=4) - self.assertFalse(score_true.isnan(), "Weighted F1_score multiclass with target_nan is NaN") - self.assertFalse(score_ipu.isnan(), "Weighted F1_score multiclass IPU score with target_nan is NaN") - self.assertAlmostEqual( - score_true.item(), - score_ipu.item(), - places=6, - msg="Regular F1_score multiclass with NaN is different", - ) - - def test_mse(self): - preds = deepcopy(self.preds) - target = deepcopy(self.target) - target_nan = deepcopy(self.target_nan) - squared = True - - # Regular loss - loss_true = mean_squared_error(preds, target, squared) - loss_ipu = mean_squared_error_ipu(preds=preds, target=target, squared=squared) - self.assertFalse(loss_true.isnan(), "Regular Mean Squared Error is NaN") - self.assertAlmostEqual( - loss_true.item(), loss_ipu.item(), places=6, msg="Regular Mean Squared Error is different" - ) - - # Regular loss with NaNs in target - not_nan = ~target_nan.isnan() - loss_true = mean_squared_error(preds[not_nan], target[not_nan], squared) - loss_ipu = mean_squared_error_ipu(preds=preds, target=target_nan, squared=squared) - self.assertFalse(loss_true.isnan(), "Regular Mean Squared Error with target_nan is NaN") - self.assertFalse(loss_ipu.isnan(), "Regular Mean Squared Error IPU with target_nan is NaN") - self.assertAlmostEqual( - loss_true.item(), - loss_ipu.item(), - places=6, - msg="Regular Mean Squared Error with NaN is different", - ) - - squared = False - - # Regular loss - loss_true = mean_squared_error(preds, target, squared) - loss_ipu = mean_squared_error_ipu(preds=preds, target=target, squared=squared) - self.assertFalse(loss_true.isnan(), "Regular Mean Squared Error is NaN") - self.assertAlmostEqual( - loss_true.item(), loss_ipu.item(), places=6, msg="Regular Mean Squared Error is different" - ) - - # Regular loss with NaNs in target - not_nan = ~target_nan.isnan() - loss_true = mean_squared_error(preds[not_nan], target[not_nan], squared) - loss_ipu = mean_squared_error_ipu(preds=preds, target=target_nan, squared=squared) - self.assertFalse(loss_true.isnan(), "Regular Mean Squared Error with target_nan is NaN") - self.assertFalse(loss_ipu.isnan(), "Regular Mean Squared Error IPU with target_nan is NaN") - self.assertAlmostEqual( - loss_true.item(), - loss_ipu.item(), - places=6, - msg="Regular Mean Squared Error with NaN is different", - ) - - def test_mae(self): - preds = deepcopy(self.preds) - target = deepcopy(self.target) - target_nan = deepcopy(self.target_nan) - - # Regular loss - loss_true = mean_absolute_error(preds, target) - loss_ipu = mean_absolute_error_ipu(preds=preds, target=target) - self.assertFalse(loss_true.isnan(), "Regular Mean Absolute Error is NaN") - self.assertAlmostEqual( - loss_true.item(), loss_ipu.item(), places=6, msg="Regular Mean Absolute Error is different" - ) - - # Regular loss with NaNs in target - not_nan = ~target_nan.isnan() - loss_true = mean_absolute_error(preds[not_nan], target[not_nan]) - loss_ipu = mean_absolute_error_ipu(preds=preds, target=target_nan) - self.assertFalse(loss_true.isnan(), "Regular Mean Absolute Error with target_nan is NaN") - self.assertFalse(loss_ipu.isnan(), "Regular Mean Absolute Error IPU with target_nan is NaN") - self.assertAlmostEqual( - loss_true.item(), - loss_ipu.item(), - places=6, - msg="Regular Mean Absolute Error with NaN is different", - ) - - -if __name__ == "__main__": - ut.main() From 734ba55f183cb27b52a27e0e821173db252eee07 Mon Sep 17 00:00:00 2001 From: DomInvivo Date: Thu, 25 Apr 2024 21:38:18 -0400 Subject: [PATCH 018/175] Updated `MetricWrapper` to work with `update` and `compute`, compatible with `torchmetrics` --- graphium/trainer/metrics.py | 60 +++++++++++++++++++++++++++++-------- 1 file changed, 47 insertions(+), 13 deletions(-) diff --git a/graphium/trainer/metrics.py b/graphium/trainer/metrics.py index 22361faa6..7b4ecea77 100644 --- a/graphium/trainer/metrics.py +++ b/graphium/trainer/metrics.py @@ -19,6 +19,7 @@ import torch from torch import Tensor import operator as op +from copy import deepcopy from torchmetrics.utilities.distributed import reduce import torchmetrics.functional.regression.mae @@ -137,7 +138,7 @@ class MetricWrapper: def __init__( self, - metric: Union[str, Callable], + metric: Union[str, torchmetrics.Metric], threshold_kwargs: Optional[Dict[str, Any]] = None, target_nan_mask: Optional[Union[str, int]] = None, multitask_handling: Optional[str] = None, @@ -187,7 +188,7 @@ def __init__( Other arguments to call with the metric """ - self.metric, self.metric_name = self._get_metric(metric) + metric_class, self.metric_name = self._get_metric_class(metric) self.thresholder = None if threshold_kwargs is not None: self.thresholder = Thresholder(**threshold_kwargs) @@ -198,6 +199,26 @@ def __init__( self.target_to_int = target_to_int self.kwargs = kwargs + self.metric, self.kwargs = self._initialize_metric(metric_class, self.kwargs) + + @staticmethod + def _initialize_metric(metric, kwargs): + r""" + Initialize the metric with the provided kwargs + """ + + if not isinstance(metric, type): + if not isinstance(metric, torchmetrics.Metric): + raise ValueError(f"metric must be a torchmetrics.Metric, provided: {type(metric)}" + f"Use `METRICS_DICT` to get the metric class") + else: + return metric, kwargs + + metric = metric(**kwargs) + + return metric, kwargs + + @staticmethod def _parse_target_nan_mask(target_nan_mask): """ @@ -254,7 +275,7 @@ def _parse_multitask_handling(multitask_handling, target_nan_mask): return multitask_handling @staticmethod - def _get_metric(metric): + def _get_metric_class(metric): from graphium.utils.spaces import METRICS_DICT if isinstance(metric, str): @@ -265,9 +286,10 @@ def _get_metric(metric): metric = metric return metric, metric_name - def compute(self, preds: Tensor, target: Tensor) -> Tensor: + def update(self, preds: Tensor, target: Tensor) -> Tensor: r""" - Compute the metric, apply the thresholder if provided, and manage the NaNs + Update the parameters of the metric, apply the thresholder if provided, and manage the NaNs. + See `torchmetrics.Metric.update` for more details. """ if preds.ndim == 1: preds = preds.unsqueeze(-1) @@ -300,7 +322,8 @@ def compute(self, preds: Tensor, target: Tensor) -> Tensor: target = target.squeeze() if self.target_to_int: target = target.to(int) - metric_val = self.metric(preds, target, **self.kwargs) + self.metric.update(preds, target) + elif self.multitask_handling == "flatten": # Flatten the tensors, apply the nan filtering, then compute the metrics if classifigression: @@ -313,7 +336,8 @@ def compute(self, preds: Tensor, target: Tensor) -> Tensor: target = target.squeeze() if self.target_to_int: target = target.to(int) - metric_val = self.metric(preds, target, **self.kwargs) + self.metric.update(preds, target) + elif self.multitask_handling == "mean-per-label": # Loop the columns (last dim) of the tensors, apply the nan filtering, compute the metrics per column, then average the metrics target_list = [target[..., ii][~target_nans[..., ii]] for ii in range(target.shape[-1])] @@ -322,7 +346,9 @@ def compute(self, preds: Tensor, target: Tensor) -> Tensor: preds_list = [preds[..., i, :][~target_nans[..., i]] for i in range(preds.shape[1])] else: preds_list = [preds[..., ii][~target_nans[..., ii]] for ii in range(preds.shape[-1])] - metric_val = [] + + if not isinstance(self.metric, list): + self.metric = [deepcopy(self.metric) for _ in range(len(target_list))] for ii in range(len(target_list)): try: this_preds, this_target = self._filter_nans(preds_list[ii], target_list[ii]) @@ -330,16 +356,23 @@ def compute(self, preds: Tensor, target: Tensor) -> Tensor: this_target = this_target.squeeze() if self.target_to_int: this_target = this_target.to(int) - metric_val.append(self.metric(this_preds, this_target, **self.kwargs)) + self.metric[ii].update(this_preds, this_target) except: pass - # Average the metric - metric_val = nan_mean(torch.stack(metric_val)) else: # Wrong option raise ValueError(f"Invalid option `self.multitask_handling={self.multitask_handling}`") - return metric_val + def compute(self) -> Tensor: + r""" + Compute the metric with the method `self.compute` + """ + if (self.multitask_handling is None) or (self.multitask_handling == "flatten"): + return self.metric.compute() + elif self.multitask_handling == "mean-per-label": + metrics = [metric.compute() for metric in self.metric] + return nan_mean(torch.stack(metrics)) + def _filter_nans(self, preds: Tensor, target: Tensor): """Handle the NaNs according to the chosen options""" @@ -405,10 +438,11 @@ def __getstate__(self): def __setstate__(self, state: dict): """Reload the class from pickling.""" - state["metric"], state["metric_name"] = self._get_metric(state["metric"]) + state["metric"], state["metric_name"] = self._get_metric_class(state["metric"]) thresholder = state.pop("threshold_kwargs", None) if thresholder is not None: thresholder = Thresholder(**thresholder) state["thresholder"] = thresholder + state["metric"], state["at_compute_kwargs"] = self._initialize_metric(state["metric"], state["kwargs"]) self.__dict__.update(state) From b6c578fa5310ab56f1eefedfd7a92bc74dbe55b0 Mon Sep 17 00:00:00 2001 From: DomInvivo Date: Thu, 25 Apr 2024 21:47:01 -0400 Subject: [PATCH 019/175] Changed requirements for torchmetrics --- env.yml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/env.yml b/env.yml index fa4e89136..64169f3a8 100644 --- a/env.yml +++ b/env.yml @@ -31,7 +31,7 @@ dependencies: - cuda-version # works also with CPU-only system. - pytorch >=1.12 - lightning >=2.0 - - torchmetrics >=0.7.0,<0.11 + - torchmetrics - ogb - pytorch_geometric >=2.0 # Use `pyg` for Windows instead of `pytorch_geometric` - wandb From 4f6e81693c61bd96d17e5924b73a22cabec9cdaf Mon Sep 17 00:00:00 2001 From: DomInvivo Date: Thu, 25 Apr 2024 22:25:49 -0400 Subject: [PATCH 020/175] fixed the loss by adding `MetricToTorchMetrics`, and added a few comments on the `Predictor` for Todos --- graphium/trainer/metrics.py | 53 +++++++++++++++++++++++++++++++--- graphium/trainer/predictor.py | 54 +++++++++++++++++++++++++++++------ 2 files changed, 94 insertions(+), 13 deletions(-) diff --git a/graphium/trainer/metrics.py b/graphium/trainer/metrics.py index 7b4ecea77..90afa5c17 100644 --- a/graphium/trainer/metrics.py +++ b/graphium/trainer/metrics.py @@ -21,6 +21,8 @@ import operator as op from copy import deepcopy +from torch.nn.modules.loss import _Loss +from torchmetrics import Metric from torchmetrics.utilities.distributed import reduce import torchmetrics.functional.regression.mae @@ -138,7 +140,7 @@ class MetricWrapper: def __init__( self, - metric: Union[str, torchmetrics.Metric], + metric: Union[str, torchmetrics.Metric, torch.nn.modules.loss._Loss], threshold_kwargs: Optional[Dict[str, Any]] = None, target_nan_mask: Optional[Union[str, int]] = None, multitask_handling: Optional[str] = None, @@ -367,12 +369,31 @@ def compute(self) -> Tensor: r""" Compute the metric with the method `self.compute` """ - if (self.multitask_handling is None) or (self.multitask_handling == "flatten"): - return self.metric.compute() - elif self.multitask_handling == "mean-per-label": + if self.multitask_handling == "mean-per-label": metrics = [metric.compute() for metric in self.metric] return nan_mean(torch.stack(metrics)) + return self.metric.compute() + + def update_compute(self, preds: Tensor, target: Tensor) -> Tensor: + r""" + Update the parameters of the metric, apply the thresholder if provided, and manage the NaNs. + Then compute the metric with the method `self.compute` + """ + + self.update(preds, target) + return self.compute() + + def reset(self): + r""" + Reset the metric with the method `self.metric.reset` + """ + if self.multitask_handling == "mean-per-label": + for metric in self.metric: + metric.reset() + else: + self.metric.reset() + def _filter_nans(self, preds: Tensor, target: Tensor): """Handle the NaNs according to the chosen options""" @@ -446,3 +467,27 @@ def __setstate__(self, state: dict): state["metric"], state["at_compute_kwargs"] = self._initialize_metric(state["metric"], state["kwargs"]) self.__dict__.update(state) + +class MetricToTorchMetrics(): + r""" + A simple wrapper to convert any metric or loss to an equivalent of `torchmetrics.Metric` + by adding the `update`, `compute`, and `reset` methods to make it compatible with `MetricWrapper`. + However, it is simply limited to computing the average of the metric over all the updates. + """ + + def __init__(self, metric): + self.metric = metric + self.scores = [] + + def update(self, preds: Tensor, target: Tensor): + self.scores.append(self.metric(preds, target)) + + def compute(self): + if len(self.scores) == 0: + raise ValueError("No scores to compute") + elif len(self.scores) == 1: + return self.scores[0] + return nan_mean(torch.stack(self.scores)) + + def reset(self): + self.scores = [] diff --git a/graphium/trainer/predictor.py b/graphium/trainer/predictor.py index 8cfb1ad28..da9423863 100644 --- a/graphium/trainer/predictor.py +++ b/graphium/trainer/predictor.py @@ -26,7 +26,7 @@ from graphium.config.config_convert import recursive_config_reformating from graphium.data.datamodule import BaseDataModule -from graphium.trainer.metrics import MetricWrapper +from graphium.trainer.metrics import MetricWrapper, MetricToTorchMetrics from graphium.trainer.predictor_options import ( EvalOptions, FlagOptions, @@ -306,7 +306,7 @@ def compute_loss( wrapped_loss_fun_dict = { task: MetricWrapper( - metric=loss, + metric=MetricToTorchMetrics(loss), threshold_kwargs=None, target_nan_mask=target_nan_mask, multitask_handling=multitask_handling, @@ -316,10 +316,12 @@ def compute_loss( if weights is not None: raise NotImplementedError("Weights are no longer supported in the loss") + all_task_losses = { - task: wrapped(preds=preds[task], target=targets[task]) + task: wrapped.update_and_compute(preds=preds[task], target=targets[task]) for task, wrapped in wrapped_loss_fun_dict.items() } + total_loss = torch.sum(torch.stack(list(all_task_losses.values())), dim=0) num_tasks = len(all_task_losses.keys()) weighted_loss = total_loss / num_tasks @@ -384,11 +386,7 @@ def _general_step(self, batch: Dict[str, Tensor], step_name: str, to_cpu: bool) if weights is not None: weights = weights.detach().to(device=device) - step_dict = {"preds": preds, "targets": targets_dict, "weights": weights} - # step_dict[f"{self.loss_fun._get_name()}/{step_name}"] = loss.detach().cpu() original - - # step_dict[f"weighted_loss/{step_name}"] = loss.detach().cpu() - # step_dict[f"loss/{step_name}"] = loss.detach().cpu() + step_dict = {} for task in self.tasks: step_dict[ self.task_epoch_summary.metric_log_name(task, self.loss_fun[task]._get_name(), step_name) @@ -400,6 +398,44 @@ def _general_step(self, batch: Dict[str, Tensor], step_name: str, to_cpu: bool) step_dict["gradient_norm"] = self.get_gradient_norm() return step_dict + def update_metrics(self, + preds: Dict[str, Tensor], + targets: Dict[str, Tensor], + step_name: str, + weights: Optional[Tensor]=None,) -> None: + r""" + Compute the loss using the specified loss function, and dealing with + the nans in the `targets`. + + Parameters: + preds: + Predicted values + + targets: + Target values + + step_name: + The name of the step ("train", "val", "test") + + weights: + No longer supported, will raise an error. + + """ + + if weights is not None: + raise NotImplementedError("Weights are no longer supported in the metrics") + + + # TODO!! + # Lost of changes from the `predictor_summaries.py` file, with `Summary.get_metrics_logs` computing the metrics at the end of an epoch. + + # See torchmetrics `MeanMetric` and `SumMetric`, and use them to compute STD as well + + # DON'T FORGET TO RESET ALL METRICS!! + + + + def flag_step(self, batch: Dict[str, Tensor], step_name: str, to_cpu: bool) -> Dict[str, Any]: r""" Perform adversarial data agumentation during one training step using FLAG. @@ -463,7 +499,7 @@ def flag_step(self, batch: Dict[str, Tensor], step_name: str, to_cpu: bool) -> D if weights is not None: weights = weights.detach().to(device=device) - step_dict = {"preds": preds, "targets": targets, "weights": weights} + step_dict = {} step_dict[f"loss/{step_name}"] = loss.detach().cpu() step_dict["loss"] = loss step_dict["task_losses"] = task_losses From 80276da6bc740a09c629f66460ec8f7b0cfd55c0 Mon Sep 17 00:00:00 2001 From: ndickson Date: Thu, 2 May 2024 18:51:35 -0400 Subject: [PATCH 021/175] Updated license passed to setup call in setup.py --- graphium/graphium_cpp/setup.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/graphium/graphium_cpp/setup.py b/graphium/graphium_cpp/setup.py index e1f737db0..abe07cc55 100755 --- a/graphium/graphium_cpp/setup.py +++ b/graphium/graphium_cpp/setup.py @@ -76,7 +76,7 @@ version = "0.1", author = "N. Dickson", author_email="ndickson@nvidia.com", - license="NVIDIA Proprietary", + license="Apache 2.0", description = "C++ extension for graphium", ext_modules=ext_modules, cmdclass={"build_ext": build_ext}) From 7933ae55b6b1d75d5649c96a6cdf028cd2c3af04 Mon Sep 17 00:00:00 2001 From: DomInvivo Date: Fri, 3 May 2024 00:11:10 -0400 Subject: [PATCH 022/175] Major updates to `predictor_summaries.py` --- graphium/trainer/metrics.py | 31 ++ graphium/trainer/predictor.py | 60 ++- graphium/trainer/predictor_summaries.py | 494 ++++++------------------ 3 files changed, 179 insertions(+), 406 deletions(-) diff --git a/graphium/trainer/metrics.py b/graphium/trainer/metrics.py index 90afa5c17..4e597a4a8 100644 --- a/graphium/trainer/metrics.py +++ b/graphium/trainer/metrics.py @@ -491,3 +491,34 @@ def compute(self): def reset(self): self.scores = [] + + + +class STDMetric(Metric): + def __init__(self, dist_sync_on_step=False): + super().__init__(dist_sync_on_step=dist_sync_on_step) + self.add_state("sum", default=torch.tensor(0.0), dist_reduce_fx="sum") + self.add_state("sum_of_squares", default=torch.tensor(0.0), dist_reduce_fx="sum") + self.add_state("total_weight", default=torch.tensor(0.0), dist_reduce_fx="sum") + + def update(self, value: Union[float, Tensor], weight: Union[float, Tensor] = 1.0) -> None: + if not isinstance(value, Tensor): + value = torch.as_tensor(value, dtype=torch.float32) + if not isinstance(weight, Tensor): + weight = torch.as_tensor(weight, dtype=torch.float32) + + weight = torch.broadcast_to(weight, value.shape) + value, weight = self._cast_and_nan_check_input(value, weight) + + if value.numel() == 0: + return + + self.sum += (value * weight).sum() + self.sum_of_squares += (value * value * weight).sum() + self.total_weight += weight.sum() + + def compute(self) -> Tensor: + mean = self.sum / self.total_weight + mean_of_squares = self.sum_of_squares / self.total_weight + variance = mean_of_squares - mean ** 2 + return torch.sqrt(variance) diff --git a/graphium/trainer/predictor.py b/graphium/trainer/predictor.py index da9423863..769f76681 100644 --- a/graphium/trainer/predictor.py +++ b/graphium/trainer/predictor.py @@ -33,7 +33,7 @@ ModelOptions, OptimOptions, ) -from graphium.trainer.predictor_summaries import TaskSummaries +from graphium.trainer.predictor_summaries import MultiTaskSummary from graphium.utils import fs from graphium.utils.moving_average_tracker import MovingAverageTracker from graphium.utils.spaces import GRAPHIUM_PRETRAINED_MODELS_DICT @@ -168,7 +168,7 @@ def __init__( monitor = self.optim_options.scheduler_kwargs["monitor"].split("/")[0] mode = self.optim_options.scheduler_kwargs["mode"] - self.task_epoch_summary = TaskSummaries( + self.task_epoch_summary = MultiTaskSummary( task_loss_fun=self.loss_fun, task_metrics=self.metrics, task_metrics_on_training_set=self.metrics_on_training_set, @@ -516,31 +516,31 @@ def on_train_batch_end(self, outputs, batch: Any, batch_idx: int) -> None: train_batch_time = time.time() - self.train_batch_start_time # To be used for throughput calculation # Get the metrics that are logged at every step (loss, grad_norm, batch_time, batch_tput) - concatenated_metrics_logs = {} - concatenated_metrics_logs["train/loss"] = outputs["loss"] - concatenated_metrics_logs["epoch_count"] = self.current_epoch + aggregated_metrics_logs = {} + aggregated_metrics_logs["train/loss"] = outputs["loss"] + aggregated_metrics_logs["epoch_count"] = self.current_epoch # Incriment by the batch size self.samples_seen += self.global_bs - concatenated_metrics_logs["samples_seen"] = self.samples_seen + aggregated_metrics_logs["samples_seen"] = self.samples_seen # report the training loss for each individual tasks for task in self.tasks: - concatenated_metrics_logs[f"train/loss/{task}"] = outputs["task_losses"][task] + aggregated_metrics_logs[f"train/loss/{task}"] = outputs["task_losses"][task] # get the mean loss value for individual tasks as they are a tensor of size --> gradient accumulation * replication * device_iter # filter zeros out for the individual losses - for key in concatenated_metrics_logs: - if isinstance(concatenated_metrics_logs[key], torch.Tensor): - if concatenated_metrics_logs[key].numel() > 1: - concatenated_metrics_logs[key] = concatenated_metrics_logs[key][ - concatenated_metrics_logs[key] != 0 + for key in aggregated_metrics_logs: + if isinstance(aggregated_metrics_logs[key], torch.Tensor): + if aggregated_metrics_logs[key].numel() > 1: + aggregated_metrics_logs[key] = aggregated_metrics_logs[key][ + aggregated_metrics_logs[key] != 0 ].mean() # If logging is skipped for this step, then log the important metrics anyway and return if self.skip_log_train_metrics: if self.logger is not None: self.logger.log_metrics( - concatenated_metrics_logs, step=self.global_step + aggregated_metrics_logs, step=self.global_step ) # This is a pytorch lightning function call return @@ -549,11 +549,11 @@ def on_train_batch_end(self, outputs, batch: Any, batch_idx: int) -> None: # Get the throughput of the batch num_graphs = self.get_num_graphs(batch["features"]) tput = num_graphs / train_batch_time - concatenated_metrics_logs["train/batch_time"] = train_batch_time - concatenated_metrics_logs["train/batch_tput"] = tput + aggregated_metrics_logs["train/batch_time"] = train_batch_time + aggregated_metrics_logs["train/batch_tput"] = tput # Compute all the metrics for the training set - self.task_epoch_summary.update_predictor_state( + self.task_epoch_summary.update( step_name="train", targets=outputs["targets"], preds=outputs["preds"], @@ -563,12 +563,12 @@ def on_train_batch_end(self, outputs, batch: Any, batch_idx: int) -> None: ) metrics_logs = self.task_epoch_summary.get_metrics_logs() # Dict[task, metric_logs] metrics_logs["_global"]["grad_norm"] = self.get_gradient_norm() - concatenated_metrics_logs.update(metrics_logs) + aggregated_metrics_logs.update(metrics_logs) # Log the metrics if self.logger is not None: self.logger.log_metrics( - concatenated_metrics_logs, step=self.global_step + aggregated_metrics_logs, step=self.global_step ) # This is a pytorch lightning function call def training_step(self, batch: Dict[str, Tensor], to_cpu: bool = True) -> Dict[str, Any]: @@ -628,7 +628,7 @@ def _general_epoch_end(self, outputs: Dict[str, Any], step_name: str, device: st loss_fun=self.loss_fun, ) - self.task_epoch_summary.update_predictor_state( + self.task_epoch_summary.update( step_name=step_name, preds=preds, targets=targets, @@ -637,7 +637,6 @@ def _general_epoch_end(self, outputs: Dict[str, Any], step_name: str, device: st n_epochs=self.current_epoch, ) metrics_logs = self.task_epoch_summary.get_metrics_logs() - self.task_epoch_summary.set_results(task_metrics=metrics_logs) return metrics_logs # Consider returning concatenated dict for logging @@ -676,14 +675,11 @@ def on_validation_epoch_end(self) -> None: outputs=self.validation_step_outputs, step_name="val", device="cpu" ) self.validation_step_outputs.clear() - concatenated_metrics_logs = self.task_epoch_summary.concatenate_metrics_logs(metrics_logs) - concatenated_metrics_logs["val/mean_time"] = torch.tensor(self.mean_val_time_tracker.mean_value) - concatenated_metrics_logs["val/mean_tput"] = self.mean_val_tput_tracker.mean_value - self.log_dict(concatenated_metrics_logs, sync_dist=True) - - # Save yaml file with the per-task metrics summaries - full_dict = {} - full_dict.update(self.task_epoch_summary.get_dict_summary()) + # TODO: Use the update and compute, rather than the old logic! Make sure to reset the metrics + aggregated_metrics_logs = self.task_epoch_summary.aggregate_metrics_logs(metrics_logs) + aggregated_metrics_logs["val/mean_time"] = torch.tensor(self.mean_val_time_tracker.mean_value) + aggregated_metrics_logs["val/mean_tput"] = self.mean_val_tput_tracker.mean_value + self.log_dict(aggregated_metrics_logs, sync_dist=True) def on_test_batch_end(self, outputs: Any, batch: Any, batch_idx: int, dataloader_idx: int = 0) -> None: self.test_step_outputs.append(outputs) @@ -691,13 +687,9 @@ def on_test_batch_end(self, outputs: Any, batch: Any, batch_idx: int, dataloader def on_test_epoch_end(self) -> None: metrics_logs = self._general_epoch_end(outputs=self.test_step_outputs, step_name="test", device="cpu") self.test_step_outputs.clear() - concatenated_metrics_logs = self.task_epoch_summary.concatenate_metrics_logs(metrics_logs) - - self.log_dict(concatenated_metrics_logs, sync_dist=True) + aggregated_metrics_logs = self.task_epoch_summary.aggregate_metrics_logs(metrics_logs) - # Save yaml file with the per-task metrics summaries - full_dict = {} - full_dict.update(self.task_epoch_summary.get_dict_summary()) + self.log_dict(aggregated_metrics_logs, sync_dist=True) def on_train_start(self): hparams_log = deepcopy(self.hparams) diff --git a/graphium/trainer/predictor_summaries.py b/graphium/trainer/predictor_summaries.py index 4cec79377..8eb808cdc 100644 --- a/graphium/trainer/predictor_summaries.py +++ b/graphium/trainer/predictor_summaries.py @@ -16,42 +16,37 @@ from typing import Any, Callable, Dict, List, Optional, Union from loguru import logger +from copy import deepcopy import numpy as np import torch from torch import Tensor +from torchmetrics import MeanMetric from graphium.utils.tensor import nan_mean, nan_std, nan_median, tensor_fp16_to_fp32 - +from graphium.trainer.metrics import STDMetric class SummaryInterface(object): r""" An interface to define the functions implemented by summary classes that implement SummaryInterface. """ - def set_results(self, **kwargs): - raise NotImplementedError() - - def get_dict_summary(self): - raise NotImplementedError() - - def update_predictor_state(self, **kwargs): + def update(self, targets: Tensor, preds: Tensor) -> None: raise NotImplementedError() - def get_metrics_logs(self, **kwargs): + def compute(self, **kwargs) -> Tensor: raise NotImplementedError() -class Summary(SummaryInterface): +class SingleTaskSummary(SummaryInterface): # TODO (Gabriela): Default argument cannot be [] def __init__( self, loss_fun: Union[str, Callable], metrics: Dict[str, Callable], + step_name: str, metrics_on_training_set: List[str] = [], metrics_on_progress_bar: List[str] = [], - monitor: str = "loss", - mode: str = "min", task_name: Optional[str] = None, ): r""" @@ -67,8 +62,7 @@ def __init__( metrics_on_training_set: The metrics names from `metrics` to be computed on the training set for each iteration. - If `None`, all the metrics are computed. Using less metrics can significantly improve - performance, depending on the number of readouts. + If `None`, no metrics are computed. metrics_on_progress_bar: The metrics names from `metrics` to display also on the progress bar of the training @@ -81,217 +75,123 @@ def __init__( """ self.loss_fun = loss_fun - self.metrics = metrics - self.metrics_on_training_set = metrics_on_training_set - self.metrics_on_progress_bar = metrics_on_progress_bar - self.monitor = monitor - self.mode = mode - - self.summaries = {} - self.best_summaries = {} + self.step_name = step_name + self.metrics = deepcopy(metrics) # Current predictor state # self.predictor_outputs = None - self.step_name: str = None - self.targets: Tensor = None - self.preds: Tensor = None - self.loss = None # What type? - self.n_epochs: int = None - + self.loss = None self.task_name = task_name self.logged_metrics_exceptions = [] # Track which metric exceptions have been logged - def update_predictor_state( - self, step_name: str, targets: Tensor, preds: Tensor, loss: Tensor, n_epochs: int - ): - r""" - update the state of the predictor - Parameters: - step_name: which stage you are in, e.g. "train" - targets: the targets tensor - predictions: the predictions tensor - loss: the loss tensor - n_epochs: the number of epochs - """ - self.step_name = step_name - self.targets = targets - self.preds = preds - self.loss = loss - self.n_epochs = n_epochs + # Add default metrics + if "mean_pred" not in self.metrics: + self.metrics["mean_pred"] = MeanMetric(nan_strategy="ignore") + if "mean_target" not in self.metrics: + self.metrics["mean_target"] = MeanMetric(nan_strategy="ignore") + if "std_pred" not in self.metrics: + self.metrics["std_pred"] = STDMetric(nan_strategy="ignore") + if "std_target" not in self.metrics: + self.metrics["std_target"] = STDMetric(nan_strategy="ignore") + + # Parse the metrics filters + metrics_on_training_set = self._parse_metrics_filter(metrics_on_training_set) + metrics_on_progress_bar = self._parse_metrics_filter(metrics_on_progress_bar) + + def _parse_metrics_filter(self, filter: Optional[Union[List[str], Dict[str, Any]]]) -> List[str]: + if filter is None: + filter = [] + elif isinstance(filter, dict): + filter = list(filter.keys()) + elif isinstance(filter, list): + filter = filter + else: + raise ValueError(f"metrics_to_use must be a list or a dictionary. Got {type(filter)}") - def set_results( - self, - metrics: Dict[str, Tensor], - ): + # Ensure that the filter is a subset of the metrics + all_metrics = set(self.metrics.keys()) + filter = set(filter) + if not filter.issubset(all_metrics): + raise ValueError(f"metrics_to_use must be a subset of the metrics. Got {filter - all_metrics}") + + return filter + + @property + def metrics_to_use(self) -> Dict[str, Callable]: r""" - set the reults from the metrics - [!] This function requires that self.update_predictor_state() be called before it. - Parameters: - metrics: a dictionary of metrics + return the metrics to use by filtering the metrics dictionary if it is the training step. Otherwise, return all metrics. """ - # Include the task_name in the loss for logging, and similarly for other metrics - metrics[self.metric_log_name(self.task_name, "loss", self.step_name)] = self.loss - self.summaries[self.step_name] = Summary.Results( - targets=self.targets, - preds=self.preds, - loss=self.loss, - metrics=metrics, # Should include task name from get_metrics_logs() - monitored_metric=f"{self.monitor}/{self.step_name}", # Include task name? - n_epochs=self.n_epochs, - ) - if self.is_best_epoch(self.step_name, self.loss, metrics): - self.best_summaries[self.step_name] = self.summaries[self.step_name] - - def is_best_epoch(self, step_name: str, loss: Tensor, metrics: Dict[str, Tensor]) -> bool: + if self.step_name == "train": + metrics_to_use = { + key: metric for key, metric in self.metrics.items() if key in self.metrics_on_training_set + } + return metrics_to_use + return self.metrics + + def update(self, targets: Tensor, preds: Tensor) -> None: + r""" - check if the current epoch is the best epoch based on self.mode criteria + update the state of the predictor Parameters: - step_name: which stage you are in, e.g. "train" - loss: the loss tensor - metrics: a dictionary of metrics + targets: the targets tensor + predictions: the predictions tensor """ + for metric_key, metric_obj in self.metrics_to_use.items(): + try: + metric_obj.update(preds, targets) + except: + pass - # TODO (Gabriela): Check for bugs related to monitor_name - if not (step_name in self.best_summaries.keys()): - return True - - # Include the task_name in the loss for logging, and similarly for other metrics - metrics[self.metric_log_name(self.task_name, "loss", self.step_name)] = loss - monitor_name = f"{self.monitor}/{step_name}" # Include task_name? - if ( - not monitor_name in self.best_summaries.keys() - ): # Feels like there's a bug here. What is this trying to do??? - return True - - if self.mode == "max": - return metrics[monitor_name] > self.best_summaries[step_name].monitored - elif self.mode == "min": - return metrics[monitor_name] < self.best_summaries[step_name].monitored + def _compute(self, metrics_to_use: Optional[Union[List[str], Dict[str, Any]]] = None) -> Dict[str, Tensor]: + + # Parse the metrics to use + if metrics_to_use is None: + metrics_to_use = list(self.metrics.keys()) + elif isinstance(metrics_to_use, dict): + metrics_to_use = list(metrics_to_use.keys()) else: - ValueError(f"Mode must be 'min' or 'max', provided `{self.mode}`") + raise ValueError(f"metrics_to_use must be a list or a dictionary. Got {type(metrics_to_use)}") - def get_results( - self, - step_name: str, - ): - r""" - retrieve the results for a given step - Parameters: - step_name: which stage you are in, e.g. "train" - Returns: - the results for the given step - """ - return self.summaries[step_name] + # Compute the metrics + computed_metrics = {} + for metric_key, metric_obj in metrics_to_use: + metric_name = self.metric_log_name( + self.task_name, metric_key, self.step_name + ) + try: + computed_metrics[metric_name] = metric_obj.compute() + except Exception as e: + # If the metric computation fails, return NaN and log a warning only once + computed_metrics[metric_name] = torch.as_tensor(float("nan")) + # Warn only if it's the first warning for that metric + if metric_name not in self.logged_metrics_exceptions: + self.logged_metrics_exceptions.append(metric_name) + logger.warning(f"Error for metric {metric_name}. NaN is returned. Exception: {e}") - def get_best_results( - self, - step_name: str, - ): + return computed_metrics + + def compute(self) -> Dict[str, Tensor]: r""" - retrieve the best results for a given step - Parameters: - step_name: which stage you are in, e.g. "train" + compute the metrics Returns: - the best results for the given step + the computed metrics """ - return self.best_summaries[step_name] + computed_metrics = self._compute(metrics_to_use=self.metrics_to_use) + return computed_metrics def get_results_on_progress_bar( self, - step_name: str, ) -> Dict[str, Tensor]: r""" retrieve the results to be displayed on the progress bar for a given step - Parameters: - step_name: which stage you are in, e.g. "train" + Returns: the results to be displayed on the progress bar for the given step """ - results = self.summaries[step_name] - results_prog = { - # f"{kk}/{step_name}": results.metrics[f"{kk}/{step_name}"] for kk in self.metrics_on_progress_bar - self.metric_log_name(self.task_name, kk, step_name): results.metrics[ - self.metric_log_name(self.task_name, kk, step_name) - ] - for kk in self.metrics_on_progress_bar - } - return results_prog - - def get_dict_summary(self) -> Dict[str, Any]: - r""" - retrieve the full summary in a dictionary - Returns: - the full summary in a dictionary - """ - full_dict = {} - # Get metric summaries - full_dict["metric_summaries"] = {} - for key, val in self.summaries.items(): - full_dict["metric_summaries"][key] = {k: v for k, v in val.metrics.items()} - full_dict["metric_summaries"][key]["n_epochs"] = val.n_epochs - - # Get metric summaries at best epoch - full_dict["best_epoch_metric_summaries"] = {} - for key, val in self.best_summaries.items(): - full_dict["best_epoch_metric_summaries"][key] = val.metrics - full_dict["best_epoch_metric_summaries"][key]["n_epochs"] = val.n_epochs - - return full_dict - - def get_metrics_logs(self) -> Dict[str, Any]: - r""" - Get the data about metrics to log. - Note: This function requires that self.update_predictor_state() be called before it. - Returns: - A dictionary of metrics to log. - """ - - targets = tensor_fp16_to_fp32(self.targets) - preds = tensor_fp16_to_fp32(self.preds) + computed_metrics = self._compute(metrics_to_use=self.metrics_on_progress_bar) - targets = targets.to(dtype=preds.dtype, device=preds.device) - - # Compute the metrics always used in regression tasks - metric_logs = {} - metric_logs[self.metric_log_name(self.task_name, "mean_pred", self.step_name)] = nan_mean(preds) - metric_logs[self.metric_log_name(self.task_name, "std_pred", self.step_name)] = nan_std(preds) - metric_logs[self.metric_log_name(self.task_name, "median_pred", self.step_name)] = nan_median(preds) - metric_logs[self.metric_log_name(self.task_name, "mean_target", self.step_name)] = nan_mean(targets) - metric_logs[self.metric_log_name(self.task_name, "std_target", self.step_name)] = nan_std(targets) - metric_logs[self.metric_log_name(self.task_name, "median_target", self.step_name)] = nan_median( - targets - ) - - # Specify which metrics to use - metrics_to_use = self.metrics - if self.step_name == "train": - metrics_to_use = { - key: metric for key, metric in metrics_to_use.items() if key in self.metrics_on_training_set - } - # Compute the additional metrics - for key, metric in metrics_to_use.items(): - metric_name = self.metric_log_name( - self.task_name, key, self.step_name - ) # f"{key}/{self.step_name}" - try: - metric_logs[metric_name] = metric(preds, targets) - except Exception as e: - metric_logs[metric_name] = torch.as_tensor(float("nan")) - # Warn only if it's the first warning for that metric - if metric_name not in self.logged_metrics_exceptions: - self.logged_metrics_exceptions.append(metric_name) - logger.warning(f"Error for metric {metric_name}. NaN is returned. Exception: {e}") - - # Convert all metrics to CPU, except for the loss - # metric_logs[f"{self.loss_fun._get_name()}/{self.step_name}"] = self.loss.detach().cpu() - metric_logs[ - self.metric_log_name(self.task_name, self.loss_fun._get_name(), self.step_name) - ] = self.loss.detach().cpu() - # print("Metrics logs keys: ", metric_logs.keys()) - metric_logs = {key: metric.detach().cpu() for key, metric in metric_logs.items()} - - return metric_logs + return computed_metrics def metric_log_name(self, task_name, metric_name, step_name): if task_name is None: @@ -299,48 +199,15 @@ def metric_log_name(self, task_name, metric_name, step_name): else: return f"{task_name}/{metric_name}/{step_name}" - class Results: - def __init__( - self, - targets: Tensor = None, - preds: Tensor = None, - loss: float = None, # Is this supposed to be a Tensor or float? - metrics: dict = None, - monitored_metric: str = None, - n_epochs: int = None, - ): - r""" - This inner class is used as a container for storing the results of the summary. - Parameters: - targets: the targets - preds: the prediction tensor - loss: the loss, float or tensor - metrics: the metrics - monitored_metric: the monitored metric - n_epochs: the number of epochs - """ - self.targets = targets.detach().cpu() - self.preds = preds.detach().cpu() - self.loss = loss.item() if isinstance(loss, Tensor) else loss - self.monitored_metric = monitored_metric - if monitored_metric in metrics.keys(): - self.monitored = metrics[monitored_metric].detach().cpu() - self.metrics = { - key: value.tolist() if isinstance(value, (Tensor, np.ndarray)) else value - for key, value in metrics.items() - } - self.n_epochs = n_epochs - -class TaskSummaries(SummaryInterface): +class MultiTaskSummary(SummaryInterface): def __init__( self, - task_loss_fun: Callable, - task_metrics: Dict[str, Callable], - task_metrics_on_training_set: List[str], - task_metrics_on_progress_bar: List[str], - monitor: str = "loss", - mode: str = "min", + task_loss_fun: Dict[str, Callable], + task_metrics: Dict[str, Dict[str, Callable]], + step_name: str, + task_metrics_on_training_set: Dict[str, List[str]], + task_metrics_on_progress_bar: Dict[str, List[str]], ): r""" class to store the summaries of the tasks @@ -349,112 +216,39 @@ class to store the summaries of the tasks task_metrics: the metrics for each task task_metrics_on_training_set: the metrics to use on the training set task_metrics_on_progress_bar: the metrics to use on the progress bar - monitor: the metric to monitor - mode: the mode of the metric to monitor """ self.task_loss_fun = task_loss_fun self.task_metrics = task_metrics self.task_metrics_on_progress_bar = task_metrics_on_progress_bar self.task_metrics_on_training_set = task_metrics_on_training_set - self.monitor = monitor - self.mode = mode - self.task_summaries: Dict[str, Summary] = {} - self.task_best_summaries: Dict[str, Summary] = {} + self.task_summaries: Dict[str, SingleTaskSummary] = {} self.tasks = list(task_loss_fun.keys()) for task in self.tasks: - self.task_summaries[task] = Summary( - self.task_loss_fun[task], - self.task_metrics[task], - self.task_metrics_on_training_set[task], - self.task_metrics_on_progress_bar[task], - self.monitor, - self.mode, - task_name=task, + self.task_summaries[task] = SingleTaskSummary( + loss_fun = self.task_loss_fun[task], + metrics = self.task_metrics[task], + step_name = step_name, + metrics_on_training_set = self.task_metrics_on_training_set[task], + metrics_on_progress_bar = self.task_metrics_on_progress_bar[task], + task_name = task, ) - # Current predictor state - self.weighted_loss = None - self.step_name = None + def update(self, targets: Tensor, preds: Tensor) -> None: - def update_predictor_state( - self, - step_name: str, - targets: Dict[str, Tensor], - preds: Dict[str, Tensor], - loss: Tensor, - task_losses: Dict[str, Tensor], - n_epochs: int, - ): r""" update the state for all predictors Parameters: - step_name: the name of the step targets: the target tensors preds: the prediction tensors - loss: the loss tensor - task_losses: the task losses - n_epochs: the number of epochs """ - self.weighted_loss = loss - self.step_name = step_name for task in self.tasks: - self.task_summaries[task].update_predictor_state( - step_name, + self.task_summaries[task].update( targets[task], preds[task].detach(), - task_losses[task].detach(), - n_epochs, ) - def set_results(self, task_metrics: Dict[str, Dict[str, Tensor]]): - """ - set the results for all tasks - Parameters: - task_metrics: the metrics for each task - """ - for task in self.tasks: - self.task_summaries[task].set_results(task_metrics[task]) - step_name = self.task_summaries[task].step_name - loss = self.task_summaries[task].loss - if self.task_summaries[task].is_best_epoch(step_name, loss, task_metrics[task]): - self.task_summaries[task].best_summaries[step_name] = self.task_summaries[task].summaries[ - step_name - ] - - def get_results( - self, - step_name: str, - ) -> Dict[str, Dict[str, Any]]: - """ - retrieve the results - Parameters: - step_name: the name of the step, i.e. "train" - Returns: - the results - """ - results = {} - for task in self.tasks: - results[task] = self.task_summaries[task].get_results(step_name) - return results - - def get_best_results( - self, - step_name: str, - ) -> Dict[str, Dict[str, Any]]: - """ - retrieve the best results - Parameters: - step_name: the name of the step, i.e. "train" - Returns: - the best results - """ - results = {} - for task in self.tasks: - results[task] = self.task_summaries[task].get_best_results(step_name) - return results - def get_results_on_progress_bar( self, step_name: str, @@ -469,49 +263,21 @@ def get_results_on_progress_bar( """ task_results_prog = {} for task in self.tasks: - # task_results_prog[task] = self.task_summaries[task].get_results_on_progress_bar(step_name) task_results_prog.update(self.task_summaries[task].get_results_on_progress_bar(step_name)) return task_results_prog - def get_dict_summary( - self, - ) -> Dict[str, Dict[str, Any]]: + def compute(self) -> Dict[str, Tensor]: r""" - get task summaries in a dictionary + compute the metrics for all tasks Returns: - the task summaries + the computed metrics for all tasks """ - task_full_dict = {} + computed_metrics = {} for task in self.tasks: - task_full_dict[task] = self.task_summaries[task].get_dict_summary() - return task_full_dict + computed_metrics.update(self.task_summaries[task].compute()) + return computed_metrics - def get_metrics_logs( - self, - ) -> Dict[str, Dict[str, Tensor]]: - r""" - get the logs for the metrics - Returns: - the task logs for the metrics - """ - task_metrics_logs = {} - for task in self.tasks: - task_metrics_logs[task] = self.task_summaries[task].get_metrics_logs() - # average metrics - for key in task_metrics_logs[task]: - if isinstance(task_metrics_logs[task][key], torch.Tensor): - if task_metrics_logs[task][key].numel() > 1: - task_metrics_logs[task][key] = task_metrics_logs[task][key][ - task_metrics_logs[task][key] != 0 - ].mean() - - # Include global (weighted loss) - task_metrics_logs["_global"] = {} - task_metrics_logs["_global"][f"loss/{self.step_name}"] = self.weighted_loss.detach().cpu() - return task_metrics_logs - - # TODO (Gabriela): This works to fix the logging on TB, but make it more efficient - def concatenate_metrics_logs( + def aggregate_metrics_logs( self, metrics_logs: Dict[str, Dict[str, Tensor]], ) -> Dict[str, Tensor]: @@ -522,24 +288,8 @@ def concatenate_metrics_logs( Returns: the concatenated metrics logs """ - concatenated_metrics_logs = {} + aggregated_metrics_logs = {} for task in list(self.tasks) + ["_global"]: - concatenated_metrics_logs.update(metrics_logs[task]) - concatenated_metrics_logs[f"loss/{self.step_name}"] = self.weighted_loss.detach().cpu() - return concatenated_metrics_logs - - def metric_log_name( - self, - task_name: str, - metric_name: str, - step_name: str, - ) -> str: - r""" - print the metric name, task name and step name - Returns: - the metric name, task name and step name - """ - if task_name is None: - return f"{metric_name}/{step_name}" - else: - return f"{task_name}/{metric_name}/{step_name}" + aggregated_metrics_logs.update(metrics_logs[task]) + aggregated_metrics_logs[f"loss/{self.step_name}"] = self.weighted_loss.detach().cpu() + return aggregated_metrics_logs From 58499270d9e7800f494b2e1f278ded16be654458 Mon Sep 17 00:00:00 2001 From: DomInvivo Date: Fri, 3 May 2024 14:41:30 -0400 Subject: [PATCH 023/175] Improved the predictor summaries. Added GradientNormMetric --- graphium/trainer/metrics.py | 31 ------ graphium/trainer/predictor.py | 11 -- graphium/trainer/predictor_summaries.py | 130 +++++++++++++++++++----- 3 files changed, 104 insertions(+), 68 deletions(-) diff --git a/graphium/trainer/metrics.py b/graphium/trainer/metrics.py index 4e597a4a8..dedcca999 100644 --- a/graphium/trainer/metrics.py +++ b/graphium/trainer/metrics.py @@ -22,7 +22,6 @@ from copy import deepcopy from torch.nn.modules.loss import _Loss -from torchmetrics import Metric from torchmetrics.utilities.distributed import reduce import torchmetrics.functional.regression.mae @@ -492,33 +491,3 @@ def compute(self): def reset(self): self.scores = [] - - -class STDMetric(Metric): - def __init__(self, dist_sync_on_step=False): - super().__init__(dist_sync_on_step=dist_sync_on_step) - self.add_state("sum", default=torch.tensor(0.0), dist_reduce_fx="sum") - self.add_state("sum_of_squares", default=torch.tensor(0.0), dist_reduce_fx="sum") - self.add_state("total_weight", default=torch.tensor(0.0), dist_reduce_fx="sum") - - def update(self, value: Union[float, Tensor], weight: Union[float, Tensor] = 1.0) -> None: - if not isinstance(value, Tensor): - value = torch.as_tensor(value, dtype=torch.float32) - if not isinstance(weight, Tensor): - weight = torch.as_tensor(weight, dtype=torch.float32) - - weight = torch.broadcast_to(weight, value.shape) - value, weight = self._cast_and_nan_check_input(value, weight) - - if value.numel() == 0: - return - - self.sum += (value * weight).sum() - self.sum_of_squares += (value * value * weight).sum() - self.total_weight += weight.sum() - - def compute(self) -> Tensor: - mean = self.sum / self.total_weight - mean_of_squares = self.sum_of_squares / self.total_weight - variance = mean_of_squares - mean ** 2 - return torch.sqrt(variance) diff --git a/graphium/trainer/predictor.py b/graphium/trainer/predictor.py index 769f76681..a56fb0eee 100644 --- a/graphium/trainer/predictor.py +++ b/graphium/trainer/predictor.py @@ -395,7 +395,6 @@ def _general_step(self, batch: Dict[str, Tensor], step_name: str, to_cpu: bool) step_dict["loss"] = loss # print("loss ", self.global_step, self.current_epoch, loss) step_dict["task_losses"] = task_losses - step_dict["gradient_norm"] = self.get_gradient_norm() return step_dict def update_metrics(self, @@ -562,7 +561,6 @@ def on_train_batch_end(self, outputs, batch: Any, batch_idx: int) -> None: n_epochs=self.current_epoch, ) metrics_logs = self.task_epoch_summary.get_metrics_logs() # Dict[task, metric_logs] - metrics_logs["_global"]["grad_norm"] = self.get_gradient_norm() aggregated_metrics_logs.update(metrics_logs) # Log the metrics @@ -588,15 +586,6 @@ def training_step(self, batch: Dict[str, Tensor], to_cpu: bool = True) -> Dict[s step_dict.pop("targets") return step_dict # Returning the metrics_logs with the loss - def get_gradient_norm(self): - # compute the norm - total_norm = torch.tensor(0.0) - for p in self.parameters(): - if p.grad is not None: - param_norm = p.grad.detach().data.norm(2) - total_norm += param_norm.detach().cpu() ** 2 - total_norm = total_norm**0.5 - return total_norm def validation_step(self, batch: Dict[str, Tensor], to_cpu: bool = True) -> Dict[str, Any]: return self._general_step(batch=batch, step_name="val", to_cpu=to_cpu) diff --git a/graphium/trainer/predictor_summaries.py b/graphium/trainer/predictor_summaries.py index 8eb808cdc..871bde725 100644 --- a/graphium/trainer/predictor_summaries.py +++ b/graphium/trainer/predictor_summaries.py @@ -21,10 +21,9 @@ import numpy as np import torch from torch import Tensor -from torchmetrics import MeanMetric +from torchmetrics import MeanMetric, Metric from graphium.utils.tensor import nan_mean, nan_std, nan_median, tensor_fp16_to_fp32 -from graphium.trainer.metrics import STDMetric class SummaryInterface(object): r""" @@ -39,14 +38,14 @@ def compute(self, **kwargs) -> Tensor: class SingleTaskSummary(SummaryInterface): - # TODO (Gabriela): Default argument cannot be [] def __init__( self, - loss_fun: Union[str, Callable], + loss: Tensor, metrics: Dict[str, Callable], step_name: str, - metrics_on_training_set: List[str] = [], - metrics_on_progress_bar: List[str] = [], + n_epochs: int, + metrics_on_training_set: Optional[List[str]] = None, + metrics_on_progress_bar: Optional[List[str]] = None, task_name: Optional[str] = None, ): r""" @@ -65,7 +64,8 @@ def __init__( If `None`, no metrics are computed. metrics_on_progress_bar: - The metrics names from `metrics` to display also on the progress bar of the training + The metrics names from `metrics` to display also on the progress bar of the training. + If `None`, no metrics are displayed. monitor: `str` metric to track (Default=`"loss/val"`) @@ -74,13 +74,13 @@ def __init__( name of the task (Default=`None`) """ - self.loss_fun = loss_fun + self.loss = loss.detach().cpu() + self.n_epochs = n_epochs self.step_name = step_name self.metrics = deepcopy(metrics) # Current predictor state # self.predictor_outputs = None - self.loss = None self.task_name = task_name self.logged_metrics_exceptions = [] # Track which metric exceptions have been logged @@ -93,11 +93,19 @@ def __init__( self.metrics["std_pred"] = STDMetric(nan_strategy="ignore") if "std_target" not in self.metrics: self.metrics["std_target"] = STDMetric(nan_strategy="ignore") + if ("grad_norm" not in self.metrics) and (step_name == "train"): + self.metrics["grad_norm"] = GradientNormMetric() # Parse the metrics filters metrics_on_training_set = self._parse_metrics_filter(metrics_on_training_set) metrics_on_progress_bar = self._parse_metrics_filter(metrics_on_progress_bar) + self._cached_metrics: Dict[str, Tensor] = {} + + @property + def get_cached_metrics(self) -> Dict[str, Tensor]: + return deepcopy(self._cached_metrics) + def _parse_metrics_filter(self, filter: Optional[Union[List[str], Dict[str, Any]]]) -> List[str]: if filter is None: filter = [] @@ -178,6 +186,10 @@ def compute(self) -> Dict[str, Tensor]: the computed metrics """ computed_metrics = self._compute(metrics_to_use=self.metrics_to_use) + self._cached_metrics = computed_metrics + self._cached_metrics[f"{self.step_name}/loss"] = self.loss + self._cached_metrics[f"{self.step_name}/n_epochs"] = self.n_epochs + return computed_metrics def get_results_on_progress_bar( @@ -189,9 +201,18 @@ def get_results_on_progress_bar( Returns: the results to be displayed on the progress bar for the given step """ - computed_metrics = self._compute(metrics_to_use=self.metrics_on_progress_bar) + cached_metrics = self.get_cached_metrics + if cached_metrics is None: + results_prog = self._compute(metrics_to_use=self.metrics_on_progress_bar) + else: + results_prog = {} + for metric_key in self.metrics_on_progress_bar: + metric_name = self.metric_log_name( + self.task_name, metric_key, self.step_name + ) + results_prog[metric_name] = cached_metrics[metric_name] - return computed_metrics + return results_prog def metric_log_name(self, task_name, metric_name, step_name): if task_name is None: @@ -203,35 +224,35 @@ def metric_log_name(self, task_name, metric_name, step_name): class MultiTaskSummary(SummaryInterface): def __init__( self, - task_loss_fun: Dict[str, Callable], + global_loss: Tensor, + task_loss: Dict[str, Tensor], task_metrics: Dict[str, Dict[str, Callable]], step_name: str, - task_metrics_on_training_set: Dict[str, List[str]], - task_metrics_on_progress_bar: Dict[str, List[str]], + n_epochs: int, + task_metrics_on_training_set: Optional[Dict[str, List[str]]], + task_metrics_on_progress_bar: Optional[Dict[str, List[str]]], ): r""" class to store the summaries of the tasks Parameters: - task_loss_fun: the loss function for each task - task_metrics: the metrics for each task - task_metrics_on_training_set: the metrics to use on the training set - task_metrics_on_progress_bar: the metrics to use on the progress bar + """ - self.task_loss_fun = task_loss_fun + self.global_loss = global_loss.detach().cpu() self.task_metrics = task_metrics self.task_metrics_on_progress_bar = task_metrics_on_progress_bar self.task_metrics_on_training_set = task_metrics_on_training_set + # Initialize all the single-task summaries + self.tasks = list(task_loss.keys()) self.task_summaries: Dict[str, SingleTaskSummary] = {} - self.tasks = list(task_loss_fun.keys()) - for task in self.tasks: self.task_summaries[task] = SingleTaskSummary( - loss_fun = self.task_loss_fun[task], + loss_fun = self.task_loss[task], metrics = self.task_metrics[task], step_name = step_name, - metrics_on_training_set = self.task_metrics_on_training_set[task], - metrics_on_progress_bar = self.task_metrics_on_progress_bar[task], + n_epochs = n_epochs, + metrics_on_training_set = self.task_metrics_on_training_set[task] if task in self.task_metrics_on_training_set else None, + metrics_on_progress_bar = self.task_metrics_on_progress_bar[task] if task in self.task_metrics_on_progress_bar else None, task_name = task, ) @@ -290,6 +311,63 @@ def aggregate_metrics_logs( """ aggregated_metrics_logs = {} for task in list(self.tasks) + ["_global"]: - aggregated_metrics_logs.update(metrics_logs[task]) - aggregated_metrics_logs[f"loss/{self.step_name}"] = self.weighted_loss.detach().cpu() + if task in metrics_logs.keys(): + aggregated_metrics_logs.update(metrics_logs[task]) + aggregated_metrics_logs[f"loss/{self.step_name}"] = self.global_loss.detach().cpu() return aggregated_metrics_logs + + + + +class STDMetric(Metric): + """ + A metric to compute the standard deviation of the predictions or targets. + Based on `torchmetrics.Metric`, with a similar implementation to `torchmetric.MeanMetric`. + """ + def __init__(self, dist_sync_on_step=False): + super().__init__(dist_sync_on_step=dist_sync_on_step) + self.add_state("sum", default=torch.tensor(0.0), dist_reduce_fx="sum") + self.add_state("sum_of_squares", default=torch.tensor(0.0), dist_reduce_fx="sum") + self.add_state("total_weight", default=torch.tensor(0.0), dist_reduce_fx="sum") + + def update(self, value: Union[float, Tensor], weight: Union[float, Tensor] = 1.0) -> None: + if not isinstance(value, Tensor): + value = torch.as_tensor(value, dtype=torch.float32) + if not isinstance(weight, Tensor): + weight = torch.as_tensor(weight, dtype=torch.float32) + + weight = torch.broadcast_to(weight, value.shape) + value, weight = self._cast_and_nan_check_input(value, weight) + + if value.numel() == 0: + return + + self.sum += (value * weight).sum() + self.sum_of_squares += (value * value * weight).sum() + self.total_weight += weight.sum() + + def compute(self) -> Tensor: + mean = self.sum / self.total_weight + mean_of_squares = self.sum_of_squares / self.total_weight + variance = mean_of_squares - mean ** 2 + return torch.sqrt(variance) + +class GradientNormMetric(Metric): + """ + A metric to compute the norm of the gradient. + Based on `torchmetrics.Metric`. + """ + def __init__(self, dist_sync_on_step=False): + super().__init__(dist_sync_on_step=dist_sync_on_step) + self.add_state("gradient_norm", default=torch.tensor(0.0), dist_reduce_fx="sum") + + def update(self, model: torch.nn.Module) -> None: + grad_norm = torch.tensor(0.0) + for p in model.parameters(): + if p.grad is not None: + param_norm = p.grad.detach().data.norm(2) + total_norm += param_norm.detach().cpu() ** 2 + self.gradient_norm_sq += grad_norm + + def compute(self) -> Tensor: + return self.gradient_norm_sq.sqrt() From 9492e62b12a98ba3abe8a3ecf6cbc703cafd49e1 Mon Sep 17 00:00:00 2001 From: ndickson Date: Mon, 6 May 2024 16:43:36 -0400 Subject: [PATCH 024/175] Changes to get test_dataset.py and test_multitask_datamodule.py passing --- graphium/data/datamodule.py | 63 ++++---- graphium/features/featurizer.py | 10 +- tests/test_dataset.py | 223 ++++++++++++++++------------- tests/test_multitask_datamodule.py | 59 +++++--- 4 files changed, 208 insertions(+), 147 deletions(-) diff --git a/graphium/data/datamodule.py b/graphium/data/datamodule.py index a44f0f33d..0b562b847 100644 --- a/graphium/data/datamodule.py +++ b/graphium/data/datamodule.py @@ -962,20 +962,20 @@ def get_task_levels(self): return task_level_map - @property - def concat_smiles_tensor_index(self): + @staticmethod + def concat_smiles_tensor_index(): return 0 - @property - def smiles_offsets_tensor_index(self): + @staticmethod + def smiles_offsets_tensor_index(): return 1 - @property - def num_nodes_tensor_index(self): + @staticmethod + def num_nodes_tensor_index(): return 2 - @property - def num_edges_tensor_index(self): + @staticmethod + def num_edges_tensor_index(): return 3 - @property - def data_offsets_tensor_index(self): + @staticmethod + def data_offsets_tensor_index(): return 4 def prepare_data(self): @@ -1149,26 +1149,30 @@ def setup( if self.train_ds is None: self.train_ds = self._make_multitask_dataset("train") - if self.val_ds is None: + if self.val_ds is None and len(self.stage_data["val"]) >= self.num_edges_tensor_index(): self.val_ds = self._make_multitask_dataset("val") logger.info(self.train_ds) - logger.info(self.val_ds) label_num_cols.update( dict(zip(self.train_ds.task_names, self.train_ds.label_num_cols)) ) # Make sure that all task label sizes are contained in here. Maybe do the update outside these if statements. - label_num_cols.update(dict(zip(self.val_ds.task_names, self.val_ds.label_num_cols))) label_dtypes.update(dict(zip(self.train_ds.task_names, self.train_ds.label_dtypes))) - label_dtypes.update(dict(zip(self.val_ds.task_names, self.val_ds.label_dtypes))) + + if self.val_ds is not None: + logger.info(self.val_ds) + label_num_cols.update(dict(zip(self.val_ds.task_names, self.val_ds.label_num_cols))) + label_dtypes.update(dict(zip(self.val_ds.task_names, self.val_ds.label_dtypes))) + if stage == "test" or stage is None: - if self.test_ds is None: + if self.test_ds is None and len(self.stage_data["test"]) >= self.num_edges_tensor_index(): self.test_ds = self._make_multitask_dataset("test") - logger.info(self.test_ds) + if self.test_ds is not None: + logger.info(self.test_ds) - label_num_cols.update(dict(zip(self.test_ds.task_names, self.test_ds.label_num_cols))) - label_dtypes.update(dict(zip(self.test_ds.task_names, self.test_ds.label_dtypes))) + label_num_cols.update(dict(zip(self.test_ds.task_names, self.test_ds.label_num_cols))) + label_dtypes.update(dict(zip(self.test_ds.task_names, self.test_ds.label_dtypes))) default_labels_num_cols_dict = self.collate_fn.keywords.get("labels_num_cols_dict", None) @@ -1209,8 +1213,8 @@ def _make_multitask_dataset( stage_data = self.stage_data[stage] data_offsets = None - if self.data_offsets_tensor_index < len(stage_data): - data_offsets = stage_data[self.data_offsets_tensor_index] + if self.data_offsets_tensor_index() < len(stage_data): + data_offsets = stage_data[self.data_offsets_tensor_index()] multitask_dataset = Datasets.MultitaskDataset( about=about, @@ -1220,10 +1224,10 @@ def _make_multitask_dataset( label_num_cols=self.label_num_cols, label_dtypes=self.label_dtypes, mol_file_data_offsets=data_offsets, - concat_smiles_tensor=stage_data[self.concat_smiles_tensor_index], - smiles_offsets_tensor=stage_data[self.smiles_offsets_tensor_index], - num_nodes_tensor=stage_data[self.num_nodes_tensor_index], - num_edges_tensor=stage_data[self.num_edges_tensor_index], + concat_smiles_tensor=stage_data[self.concat_smiles_tensor_index()], + smiles_offsets_tensor=stage_data[self.smiles_offsets_tensor_index()], + num_nodes_tensor=stage_data[self.num_nodes_tensor_index()], + num_edges_tensor=stage_data[self.num_edges_tensor_index()], ) # type: ignore return multitask_dataset @@ -1455,8 +1459,8 @@ def get_fake_graph(self): ########################## Private methods ###################################### + @staticmethod def _extract_smiles_labels( - self, df: pd.DataFrame, task_level: str, smiles_col: Optional[str] = None, @@ -1554,8 +1558,8 @@ def _extract_smiles_labels( extras = {"weights": weights, "mol_ids": mol_ids} return smiles, labels, label_offsets, sample_idx, extras + @staticmethod def _get_split_indices( - self, dataset_size: int, split_val: float, split_test: float, @@ -1611,7 +1615,7 @@ def _get_split_indices( splits = splits_path else: # Split from an indices file - file_type = self._get_data_file_type(splits_path) + file_type = BaseDataModule._get_data_file_type(splits_path) train, val, test = split_names @@ -1619,7 +1623,7 @@ def _get_split_indices( splits = torch.load(splits_path) elif file_type in ["csv", "tsv"]: with fsspec.open(str(splits_path)) as f: - splits = self._read_csv(splits_path) + splits = BaseDataModule._read_csv(splits_path) else: raise ValueError( f"file type `{file_type}` for `{splits_path}` not recognised, please use .pt, .csv or .tsv" @@ -1641,8 +1645,9 @@ def _get_split_indices( return train_indices, val_indices, test_indices + @staticmethod def _sub_sample_df( - self, df: pd.DataFrame, sample_size: Union[int, float, None], seed: Optional[int] = None + df: pd.DataFrame, sample_size: Union[int, float, None], seed: Optional[int] = None ) -> pd.DataFrame: r""" subsample from a pandas dataframe diff --git a/graphium/features/featurizer.py b/graphium/features/featurizer.py index bdff649ac..d97e70f02 100644 --- a/graphium/features/featurizer.py +++ b/graphium/features/featurizer.py @@ -30,14 +30,14 @@ def mol_to_pyggraph( mol: str, - atom_property_list_onehot: torch.Tensor = None, - atom_property_list_float: torch.Tensor = None, + atom_property_list_onehot: torch.Tensor = torch.tensor(data=[], dtype=torch.int64), + atom_property_list_float: torch.Tensor = torch.tensor(data=[], dtype=torch.int64), conformer_property_list: List[str] = [], - edge_property_list: torch.Tensor = None, + edge_property_list: torch.Tensor = torch.tensor(data=[], dtype=torch.int64), add_self_loop: bool = False, explicit_H: bool = False, use_bonds_weights: bool = False, - pos_encoding_as_features: Tuple[List[str],torch.Tensor] = None, + pos_encoding_as_features: Tuple[List[str],torch.Tensor] = ([], torch.tensor(data=[], dtype=torch.int64)), dtype: np.dtype = np.float16, on_error: str = "ignore", mask_nan: Union[str, float, type(None)] = "raise", @@ -146,7 +146,7 @@ def mol_to_pyggraph( mol, atom_property_list_onehot, atom_property_list_float, - 'positions_3d' in conformer_property_list, + has_conformer, edge_property_list, pos_encoding_as_features[1], True, # duplicate_edges, so that we don't have to duplicate below diff --git a/tests/test_dataset.py b/tests/test_dataset.py index 29cf8f2a5..56e2cbc95 100644 --- a/tests/test_dataset.py +++ b/tests/test_dataset.py @@ -15,10 +15,86 @@ import unittest as ut from graphium.data import load_micro_zinc -from graphium.data.dataset import SingleTaskDataset, MultitaskDataset +from graphium.data.datamodule import MultitaskFromSmilesDataModule +from graphium.data.dataset import MultitaskDataset +from graphium.features import mol_to_pyggraph from graphium.data.smiles_transform import smiles_to_unique_mol_ids from graphium.data.utils import get_keys +import graphium_cpp + +import numpy as np +import os.path as osp + +TEMP_CACHE_DATA_PATH = "tests/temp_cache_0000" + +def dataframes_to_dataset(dataframes_dict, case_num): + task_names = [key for key in dataframes_dict.keys()] + + task_dataset_args = {} + task_train_indices = {} + task_val_indices = {} + task_test_indices = {} + for task in task_names: + smiles, labels, label_offsets, sample_idx, extras = MultitaskFromSmilesDataModule._extract_smiles_labels( + df=dataframes_dict[task], + task_level="graph", + smiles_col="SMILES", + label_cols=task, + idx_col=None, + weights_col=None, + weights_type=None, + ) + num_molecules = len(smiles) + task_dataset_args[task] = { + "smiles": smiles, + "labels": labels, + "label_offsets": label_offsets, + "extras": extras, + } + + task_train_indices[task] = np.arange(num_molecules).tolist() + task_val_indices[task] = [] + task_test_indices[task] = [] + + fake_data_hash = "a1b2c3testdataset"+str(case_num) + + # The rest of the data preparation and caching is done in graphium_cpp.prepare_and_save_data + normalizations = {task: {} for task in task_names} # No normalization + stage_data, all_stats, label_num_cols, label_dtypes = graphium_cpp.prepare_and_save_data( + task_names, + task_dataset_args, + normalizations, + TEMP_CACHE_DATA_PATH, + fake_data_hash, + task_train_indices, + task_val_indices, + task_test_indices, + False, #add_self_loop + False, #explicit_H + 0) #preprocessing_n_jobs + + stage_data = stage_data["train"] + + data_offsets = None + if MultitaskFromSmilesDataModule.data_offsets_tensor_index() < len(stage_data): + data_offsets = stage_data[MultitaskFromSmilesDataModule.data_offsets_tensor_index()] + + multitask_dataset = MultitaskDataset( + about="test_dataset case"+str(case_num), + data_path=osp.join(TEMP_CACHE_DATA_PATH, "train_"+fake_data_hash), + featurize_smiles=mol_to_pyggraph, + task_names=task_names, + label_num_cols=label_num_cols, + label_dtypes=label_dtypes, + mol_file_data_offsets=data_offsets, + concat_smiles_tensor=stage_data[MultitaskFromSmilesDataModule.concat_smiles_tensor_index()], + smiles_offsets_tensor=stage_data[MultitaskFromSmilesDataModule.smiles_offsets_tensor_index()], + num_nodes_tensor=stage_data[MultitaskFromSmilesDataModule.num_nodes_tensor_index()], + num_edges_tensor=stage_data[MultitaskFromSmilesDataModule.num_edges_tensor_index()], + ) + + return multitask_dataset class Test_Multitask_Dataset(ut.TestCase): # Then we can choose different rows and columns for the tests as we see fit. @@ -41,51 +117,40 @@ def test_multitask_dataset_case_1(self): df_micro_zinc_SA = df[["SMILES", "SA"]] df_micro_zinc_logp = df[["SMILES", "logp"]] df_micro_zinc_score = df[["SMILES", "score"]] - - # We need to turn these dataframes into single-task datasets. + + # We need to prepare the data for these dataframes. # We don't need to do featurization yet. - ds_micro_zinc_SA = SingleTaskDataset( - smiles=df_micro_zinc_SA.loc[:, "SMILES"].tolist(), labels=df_micro_zinc_SA.loc[:, "SA"].tolist() - ) - - ds_micro_zinc_logp = SingleTaskDataset( - smiles=df_micro_zinc_logp.loc[:, "SMILES"].tolist(), - labels=df_micro_zinc_logp.loc[:, "logp"].tolist(), - ) - ds_micro_zinc_score = SingleTaskDataset( - smiles=df_micro_zinc_score.loc[:, "SMILES"].tolist(), - labels=df_micro_zinc_score.loc[:, "score"].tolist(), - ) - - # Create the multitask dataset - datasets_dict = {"SA": ds_micro_zinc_SA, "logp": ds_micro_zinc_logp, "score": ds_micro_zinc_score} - multitask_microzinc = MultitaskDataset( - datasets_dict - ) # Can optionally have features + dataframes = { + "SA": df_micro_zinc_SA, + "logp": df_micro_zinc_logp, + "score": df_micro_zinc_score, + } + multitask_dataset = dataframes_to_dataset(dataframes, 1) # Check: The number of unique molecules equals the number of datapoints in the multitask dataset. - self.assertEqual(num_unique_mols, multitask_microzinc.__len__()) + self.assertEqual(num_unique_mols, multitask_dataset.__len__()) # Check that for each task, you have the same label values as the initial DF. - for idx in range(multitask_microzinc.__len__()): + for idx in range(multitask_dataset.__len__()): smiles = df[["SMILES"]].iloc[idx].values[0] - # label = df[['SA']].iloc[idx] - label_SA = ds_micro_zinc_SA.labels[idx] - label_logp = ds_micro_zinc_logp.labels[idx] - label_score = ds_micro_zinc_score.labels[idx] - - # Search for the mol id in the multitask dataset - mol_ids = smiles_to_unique_mol_ids([smiles]) - mol_id = mol_ids[0] + + label_SA = df_micro_zinc_SA["SA"][idx] + label_logp = df_micro_zinc_logp["logp"][idx] + label_score = df_micro_zinc_score["score"][idx] + + # Search for the smiles string in the multitask dataset found_idx = -1 - for i, id in enumerate(multitask_microzinc.mol_ids): - if mol_id == id: + for i in range(multitask_dataset.__len__()): + if graphium_cpp.extract_string(multitask_dataset.smiles_tensor, multitask_dataset.smiles_offsets_tensor, i) == smiles: found_idx = i + break + item = multitask_dataset[found_idx]["labels"] + # Compare labels - self.assertEqual(label_SA, multitask_microzinc.labels[found_idx]["SA"]) - self.assertEqual(label_logp, multitask_microzinc.labels[found_idx]["logp"]) - self.assertEqual(label_score, multitask_microzinc.labels[found_idx]["score"]) + self.assertEqual(label_SA, item["SA"]) + self.assertEqual(label_logp, item["logp"]) + self.assertEqual(label_score, item["score"]) def test_multitask_dataset_case_2(self): """Case: Different tasks, but with no intersection in the smiles (each task has a unique set of smiles) @@ -100,36 +165,18 @@ def test_multitask_dataset_case_2(self): df_rows_score = df.iloc[400:750] # 350 data points total_data_points = 750 - # Here we split the data according to the task we care about. - df_micro_zinc_SA = df_rows_SA[["SMILES", "SA"]] - df_micro_zinc_logp = df_rows_logp[["SMILES", "logp"]] - df_micro_zinc_score = df_rows_score[["SMILES", "score"]] - - # We need to turn these dataframes into single-task datasets. - # We don't need to do featurization yet. - ds_micro_zinc_SA = SingleTaskDataset( - smiles=df_micro_zinc_SA.loc[:, "SMILES"].tolist(), labels=df_micro_zinc_SA.loc[:, "SA"].tolist() - ) - ds_micro_zinc_logp = SingleTaskDataset( - smiles=df_micro_zinc_logp.loc[:, "SMILES"].tolist(), - labels=df_micro_zinc_logp.loc[:, "logp"].tolist(), - ) - ds_micro_zinc_score = SingleTaskDataset( - smiles=df_micro_zinc_score.loc[:, "SMILES"].tolist(), - labels=df_micro_zinc_score.loc[:, "score"].tolist(), - ) - - # Create the multitask dataset - datasets_dict = {"SA": ds_micro_zinc_SA, "logp": ds_micro_zinc_logp, "score": ds_micro_zinc_score} - multitask_microzinc = MultitaskDataset( - datasets_dict - ) # Can optionally have features + dataframes = { + "SA": df_rows_SA, + "logp": df_rows_logp, + "score": df_rows_score, + } + multitask_microzinc = dataframes_to_dataset(dataframes, 2) # The total dataset has as many molecules as there are smiles in all tasks put together self.assertEqual(total_data_points, multitask_microzinc.__len__()) # For each task, only the smiles related to that task have values, and the value is what's expected from the initial DF. - for idx in range(len(ds_micro_zinc_SA)): + for idx in range(len(multitask_microzinc)): smiles = df[["SMILES"]].iloc[idx].values[0] task = "task" @@ -141,28 +188,28 @@ def test_multitask_dataset_case_2(self): task = "score" # Labels of that molecule - label_SA = df[["SA"]].iloc[idx].values[0] - label_logp = df[["logp"]].iloc[idx].values[0] - label_score = df[["score"]].iloc[idx].values[0] + label_df = df[[task]].iloc[idx].values[0] - # Search for that molecule in the multitask dataset - mol_ids = smiles_to_unique_mol_ids([smiles]) - mol_id = mol_ids[0] + # Search for the smiles string in the multitask dataset found_idx = -1 - for i, id in enumerate(multitask_microzinc.mol_ids): - if mol_id == id: + for i in range(multitask_microzinc.__len__()): + if graphium_cpp.extract_string(multitask_microzinc.smiles_tensor, multitask_microzinc.smiles_offsets_tensor, i) == smiles: found_idx = i - multitask_microzinc_labels = get_keys(multitask_microzinc.labels[found_idx]) + break + + item = multitask_microzinc[found_idx]["labels"] + multitask_microzinc_labels = item.keys() + + assert task in multitask_microzinc_labels + self.assertEqual(label_df, item[task]) + if task == "SA": - self.assertEqual(label_SA, multitask_microzinc.labels[found_idx]["SA"]) self.assertFalse("score" in multitask_microzinc_labels) self.assertFalse("logp" in multitask_microzinc_labels) elif task == "logp": - self.assertEqual(label_logp, multitask_microzinc.labels[found_idx]["logp"]) self.assertFalse("score" in multitask_microzinc_labels) self.assertFalse("SA" in multitask_microzinc_labels) elif task == "score": - self.assertEqual(label_score, multitask_microzinc.labels[found_idx]["score"]) self.assertFalse("SA" in multitask_microzinc_labels) self.assertFalse("logp" in multitask_microzinc_labels) @@ -180,30 +227,12 @@ def test_multitask_dataset_case_3(self): df_rows_score = df.iloc[3:5] total_data_points = 5 - # Here we split the data according to the task we care about. - df_micro_zinc_SA = df_rows_SA[["SMILES", "SA"]] - df_micro_zinc_logp = df_rows_logp[["SMILES", "logp"]] - df_micro_zinc_score = df_rows_score[["SMILES", "score"]] - - # We need to turn these dataframes into single-task datasets. - # We don't need to do featurization yet. - ds_micro_zinc_SA = SingleTaskDataset( - smiles=df_micro_zinc_SA.loc[:, "SMILES"].tolist(), labels=df_micro_zinc_SA.loc[:, "SA"].tolist() - ) - ds_micro_zinc_logp = SingleTaskDataset( - smiles=df_micro_zinc_logp.loc[:, "SMILES"].tolist(), - labels=df_micro_zinc_logp.loc[:, "logp"].tolist(), - ) - ds_micro_zinc_score = SingleTaskDataset( - smiles=df_micro_zinc_score.loc[:, "SMILES"].tolist(), - labels=df_micro_zinc_score.loc[:, "score"].tolist(), - ) - - # Create the multitask dataset - datasets_dict = {"SA": ds_micro_zinc_SA, "logp": ds_micro_zinc_logp, "score": ds_micro_zinc_score} - multitask_microzinc = MultitaskDataset( - datasets_dict - ) # Can optionally have features + dataframes = { + "SA": df_rows_SA, + "logp": df_rows_logp, + "score": df_rows_score, + } + multitask_microzinc = dataframes_to_dataset(dataframes, 3) # The multitask dataset has as many molecules as there are unique smiles across the single task datasets. self.assertEqual(total_data_points, multitask_microzinc.__len__()) diff --git a/tests/test_multitask_datamodule.py b/tests/test_multitask_datamodule.py index 5623eb6df..fdc7c8818 100644 --- a/tests/test_multitask_datamodule.py +++ b/tests/test_multitask_datamodule.py @@ -22,6 +22,7 @@ import numpy as np import graphium +TEMP_CACHE_DATA_PATH = "tests/temp_cache_0000" class Test_Multitask_DataModule(ut.TestCase): def setUp(self): @@ -111,7 +112,7 @@ def test_multitask_fromsmiles_dm( dm_args["featurization"] = featurization_args dm_args["num_workers"] = 0 dm_args["pin_memory"] = True - dm_args["processed_graph_data_path"] = None + dm_args["processed_graph_data_path"] = TEMP_CACHE_DATA_PATH dm_args["batch_size_training"] = 16 dm_args["batch_size_inference"] = 16 @@ -172,6 +173,8 @@ def test_multitask_fromsmiles_from_config(self): dm_args["task_specific_args"]["logp"]["df_path"] = None dm_args["task_specific_args"]["score"]["df_path"] = None + dm_args["processed_graph_data_path"] = TEMP_CACHE_DATA_PATH + dm = graphium.data.MultitaskFromSmilesDataModule(**dm_args) # assert dm.num_node_feats == 50 @@ -202,6 +205,7 @@ def test_multitask_fromsmiles_from_config_csv(self): config = graphium.load_config(name="zinc_default_multitask_pyg") dm_args = OmegaConf.to_container(config.datamodule.args, resolve=True) + dm_args["processed_graph_data_path"] = TEMP_CACHE_DATA_PATH dm = graphium.data.MultitaskFromSmilesDataModule(**dm_args) dm.prepare_data() @@ -229,6 +233,7 @@ def test_multitask_fromsmiles_from_config_parquet(self): config = graphium.load_config(name="fake_multilevel_multitask_pyg") dm_args = OmegaConf.to_container(config.datamodule.args, resolve=True) + dm_args["processed_graph_data_path"] = TEMP_CACHE_DATA_PATH dm = graphium.data.MultitaskFromSmilesDataModule(**dm_args) dm.prepare_data() @@ -257,6 +262,7 @@ def test_multitask_with_missing_fromsmiles_from_config_parquet(self): config = graphium.load_config(name="fake_and_missing_multilevel_multitask_pyg") dm_args = OmegaConf.to_container(config.datamodule.args, resolve=True) + dm_args["processed_graph_data_path"] = TEMP_CACHE_DATA_PATH dm = graphium.data.MultitaskFromSmilesDataModule(**dm_args) dm.prepare_data() @@ -285,23 +291,25 @@ def test_extract_graph_level_singletask(self): df = pd.read_parquet(f"tests/converted_fake_multilevel_data.parquet") num_graphs = len(df) label_cols = ["graph_label"] - output = graphium.data.datamodule.extract_labels(df, "graph", label_cols) + output, output_offsets = graphium.data.datamodule.extract_labels(df, "graph", label_cols) assert isinstance(output, np.ndarray) assert len(output.shape) == 2 assert output.shape[0] == num_graphs assert output.shape[1] == 1 + assert output_offsets is None def test_extract_graph_level_multitask(self): df = pd.read_parquet(f"tests/converted_fake_multilevel_data.parquet") num_graphs = len(df) label_cols = ["graph_label", "graph_label"] - output = graphium.data.datamodule.extract_labels(df, "graph", label_cols) + output, output_offsets = graphium.data.datamodule.extract_labels(df, "graph", label_cols) assert isinstance(output, np.ndarray) assert len(output.shape) == 2 assert output.shape[0] == num_graphs assert output.shape[1] == len(label_cols) + assert output_offsets is None def test_extract_graph_level_multitask_missing_cols(self): df = pd.read_parquet(f"tests/converted_fake_multilevel_data.parquet") @@ -313,7 +321,7 @@ def test_extract_graph_level_multitask_missing_cols(self): for missing_col in label_cols[:replace]: df[missing_col].iloc[drop_index] = None - output = graphium.data.datamodule.extract_labels(df, "graph", label_cols) + output, output_offsets = graphium.data.datamodule.extract_labels(df, "graph", label_cols) assert isinstance(output, np.ndarray) assert len(output.shape) == 2 @@ -322,17 +330,24 @@ def test_extract_graph_level_multitask_missing_cols(self): def test_non_graph_level_extract_labels(self): df = pd.read_parquet(f"tests/converted_fake_multilevel_data.parquet") + num_graphs = len(df) for level in ["node", "edge", "nodepair"]: label_cols = [f"{level}_label_{suffix}" for suffix in ["list", "np"]] - output = graphium.data.datamodule.extract_labels(df, level, label_cols) + output, output_offsets = graphium.data.datamodule.extract_labels(df, level, label_cols) - assert isinstance(output, list) - assert len(output[0].shape) == 2 - assert output[0].shape[1] == len(label_cols) + assert isinstance(output, np.ndarray) + assert len(output.shape) == 2 + assert output.shape[1] == len(label_cols) + assert output_offsets is not None + assert isinstance(output_offsets, np.ndarray) + assert len(output_offsets.shape) == 1 + assert output_offsets.shape[0] == (num_graphs+1) + assert output.shape[0] == output_offsets[-1] def test_non_graph_level_extract_labels_missing_cols(self): df = pd.read_parquet(f"tests/converted_fake_multilevel_data.parquet") + num_graphs = len(df) for level in ["node", "edge", "nodepair"]: label_cols = [f"{level}_label_{suffix}" for suffix in ["list", "np"]] @@ -341,16 +356,28 @@ def test_non_graph_level_extract_labels_missing_cols(self): for missing_col in label_cols[:replace]: df.loc[drop_index, missing_col] = None - output = graphium.data.datamodule.extract_labels(df, level, label_cols) + output, output_offsets = graphium.data.datamodule.extract_labels(df, level, label_cols) - for idx in drop_index: - assert len(output[idx].shape) == 2 - assert output[idx].shape[1] == len(label_cols) + assert isinstance(output, np.ndarray) + assert len(output.shape) == 2 + assert output.shape[1] == len(label_cols) + assert output_offsets is not None + assert isinstance(output_offsets, np.ndarray) + assert len(output_offsets.shape) == 1 + assert output_offsets.shape[0] == (num_graphs+1) + assert output.shape[0] == output_offsets[-1] - # Check that number of labels is adjusted correctly - if replace == 1: - non_missing_col = label_cols[1] - assert output[idx].shape[0] == len(df[non_missing_col][idx]) + for idx in drop_index: + begin_idx = output_offsets[idx] + end_idx = output_offsets[idx+1] + values = output[begin_idx:end_idx] + assert len(values.shape) == 2 + assert values.shape[1] == len(label_cols) + + # All removed entries must be nan + assert np.all(np.isnan(values[:,:replace])) + # All kept entries should be non-nan in this case + assert not np.any(np.isnan(values[:,replace:])) def test_tdc_admet_benchmark_data_module(self): """ From d94097c933fc67581697ba63bb951849a3708c95 Mon Sep 17 00:00:00 2001 From: ndickson Date: Mon, 6 May 2024 16:44:07 -0400 Subject: [PATCH 025/175] Removed load_type option from test_training.py, because it's no longer used --- tests/test_training.py | 12 +++++------- 1 file changed, 5 insertions(+), 7 deletions(-) diff --git a/tests/test_training.py b/tests/test_training.py index 8ba0715e8..1e007b6b7 100644 --- a/tests/test_training.py +++ b/tests/test_training.py @@ -49,7 +49,7 @@ def setup_class(cls): print("Data has been successfully downloaded.") - def call_cli_with_overrides(self, acc_type: str, acc_prec: str, load_type: str) -> None: + def call_cli_with_overrides(self, acc_type: str, acc_prec: str) -> None: overrides = [ f"accelerator={acc_type}", "tasks=toymix", @@ -92,14 +92,12 @@ def call_cli_with_overrides(self, acc_type: str, acc_prec: str, load_type: str) # Restore the original sys.argv sys.argv = original_argv - @pytest.mark.parametrize("load_type", ["RAM", "disk"]) - def test_cpu_cli_training(self, load_type): - self.call_cli_with_overrides("cpu", "32", load_type) + def test_cpu_cli_training(self): + self.call_cli_with_overrides("cpu", "32") @pytest.mark.ipu @pytest.mark.skip - @pytest.mark.parametrize("load_type", ["RAM", "disk"]) - def test_ipu_cli_training(self, load_type): + def test_ipu_cli_training(self): with patch("poptorch.ipuHardwareIsAvailable", return_value=True): with patch("lightning_graphcore.accelerator._IPU_AVAILABLE", new=True): import poptorch @@ -108,4 +106,4 @@ def test_ipu_cli_training(self, load_type): from lightning_graphcore.accelerator import _IPU_AVAILABLE assert _IPU_AVAILABLE is True - self.call_cli_with_overrides("ipu", "16-true", load_type) + self.call_cli_with_overrides("ipu", "16-true") From 11e69357906ef4b7578861b585b80b4d557b754a Mon Sep 17 00:00:00 2001 From: ndickson Date: Tue, 14 May 2024 18:27:34 -0400 Subject: [PATCH 026/175] Updated comment in setup.py about how to build graphium_cpp package --- graphium/graphium_cpp/setup.py | 3 +-- 1 file changed, 1 insertion(+), 2 deletions(-) diff --git a/graphium/graphium_cpp/setup.py b/graphium/graphium_cpp/setup.py index abe07cc55..b412e044c 100755 --- a/graphium/graphium_cpp/setup.py +++ b/graphium/graphium_cpp/setup.py @@ -7,8 +7,7 @@ To build, git clone pybind11 into this directory, then run: rm -r build/* export PYTHONPATH=$PYTHONPATH:./pybind11 -python ./setup.py build -cp build/lib.linux-x86_64-cpython-311/graphium_cpp.cpython-311-x86_64-linux-gnu.so ~/mambaforge/envs/graphium/bin +pip install . """ from distutils.core import setup From ff93c2dec0c8ee572064e19120681bedcfff82df Mon Sep 17 00:00:00 2001 From: ndickson Date: Tue, 14 May 2024 18:31:40 -0400 Subject: [PATCH 027/175] Rewrote test_featurizer.py. Fixed bug in mask_nans C++ function, and changed create_all_features to create all tensors even if there are nans, so that the number of atoms can still be determined from the shape of the atom features tensor. Changed parse_mol to default to not reordering atoms, to match test order. --- graphium/graphium_cpp/features.cpp | 23 ++-- graphium/graphium_cpp/features.h | 10 +- tests/test_featurizer.py | 200 ++++++++++++----------------- 3 files changed, 96 insertions(+), 137 deletions(-) diff --git a/graphium/graphium_cpp/features.cpp b/graphium/graphium_cpp/features.cpp index ded0409da..5c1eb4f8f 100644 --- a/graphium/graphium_cpp/features.cpp +++ b/graphium/graphium_cpp/features.cpp @@ -415,7 +415,7 @@ at::Tensor atom_onehot_feature_names_to_tensor(const std::vector& f feature_enum_values[i] = it->second; } else { - feature_enum_values[i] = int64_t(AtomFloatFeature::UNKNOWN); + feature_enum_values[i] = int64_t(AtomOneHotFeature::UNKNOWN); } } const int64_t dims[1] = { int64_t(num_features) }; @@ -441,7 +441,7 @@ at::Tensor bond_feature_names_to_tensor(const std::vector& features feature_enum_values[i] = it->second; } else { - feature_enum_values[i] = int64_t(AtomFloatFeature::UNKNOWN); + feature_enum_values[i] = int64_t(BondFeature::UNKNOWN); } } const int64_t dims[1] = { int64_t(num_features) }; @@ -1309,6 +1309,7 @@ void create_all_features( add_self_loop, use_bonds_weights, dtype); + tensors.push_back(std::move(edge_weights_tensor)); at::Tensor atom_features_tensor = create_atom_features( graph, atom_property_list_onehot, @@ -1318,9 +1319,9 @@ void create_all_features( mask_nan_style, mask_nan_value, num_nans); + tensors.push_back(std::move(atom_features_tensor)); if (num_nans != 0) { - nan_tensor_index = tensors.size()+1; - return; + nan_tensor_index = tensors.size()-1; } at::Tensor bond_features_tensor = create_bond_features( graph, @@ -1331,13 +1332,10 @@ void create_all_features( mask_nan_style, mask_nan_value, num_nans); - if (num_nans != 0) { - nan_tensor_index = tensors.size()+2; - return; - } - tensors.push_back(std::move(edge_weights_tensor)); - tensors.push_back(std::move(atom_features_tensor)); tensors.push_back(std::move(bond_features_tensor)); + if (nan_tensor_index < 0 && num_nans != 0) { + nan_tensor_index = tensors.size()-1; + } if (create_conformer_feature) { at::Tensor conformer_features_tensor = get_conformer_features( *graph.mol, @@ -1347,11 +1345,10 @@ void create_all_features( mask_nan_value, num_nans, smiles_string); - if (num_nans != 0) { + tensors.push_back(std::move(conformer_features_tensor)); + if (nan_tensor_index < 0 && num_nans != 0) { nan_tensor_index = tensors.size(); - return; } - tensors.push_back(std::move(conformer_features_tensor)); } create_positional_features( graph, diff --git a/graphium/graphium_cpp/features.h b/graphium/graphium_cpp/features.h index b98622ee0..4bbcde001 100644 --- a/graphium/graphium_cpp/features.h +++ b/graphium/graphium_cpp/features.h @@ -69,7 +69,8 @@ enum class AtomOneHotFeature { PHASE, TYPE, GROUP, - PERIOD + PERIOD, + UNKNOWN }; enum class BondFeature { @@ -79,7 +80,8 @@ enum class BondFeature { CONJUGATED, STEREO_ONE_HOT, CONFORMER_BOND_LENGTH, - ESTIMATED_BOND_LENGTH + ESTIMATED_BOND_LENGTH, + UNKNOWN }; enum class PositionalFeature { @@ -177,7 +179,7 @@ constexpr int64_t mask_nans(T* data, size_t n, MaskNaNStyle style, T value) { if (style == MaskNaNStyle::NONE) { return 0; } - if (style != MaskNaNStyle::REPLACE) { + if (style == MaskNaNStyle::REPLACE) { for (size_t i = 0; i < n; ++i) { if (!FeatureValues::is_finite(data[i])) { data[i] = value; @@ -272,4 +274,4 @@ std::tuple, int64_t, int64_t> featurize_smiles( std::unique_ptr parse_mol( const std::string& smiles_string, bool explicit_H, - bool ordered = true); + bool ordered = false); diff --git a/tests/test_featurizer.py b/tests/test_featurizer.py index e8f666365..ca0c8aabc 100644 --- a/tests/test_featurizer.py +++ b/tests/test_featurizer.py @@ -22,14 +22,9 @@ from rdkit import Chem import datamol as dm -from graphium.features.featurizer import ( - get_mol_atomic_features_onehot, - get_mol_atomic_features_float, - get_mol_edge_features, - mol_to_adj_and_features, - mol_to_pyggraph, -) +from graphium.features.featurizer import mol_to_pyggraph +import graphium_cpp class test_featurizer(ut.TestCase): smiles = [ @@ -99,155 +94,112 @@ class test_featurizer(ut.TestCase): def test_get_mol_atomic_features_onehot(self): props = deepcopy(self.atomic_onehot_props) - bad_props = ["bob"] + #bad_props = ["bob"] all_smiles = self.smiles + self.smiles_noble - for s in all_smiles: - err_msg = f"\n\tError for params:\n\t\tSMILES: {s}" - mol = dm.to_mol(s) + for mol in all_smiles: + err_msg = f"\n\tError for params:\n\t\tSMILES: {mol}" + + rdmol = dm.to_mol(mol) for ii in range(len(props)): this_props = props[:ii] err_msg2 = err_msg + f"\n\t\tprops: {this_props}" - prop_dict = get_mol_atomic_features_onehot(mol, property_list=this_props) - self.assertListEqual(list(prop_dict.keys()), this_props, msg=err_msg) - for key, val in prop_dict.items(): - err_msg3 = err_msg2 + f"\n\t\tkey: {key}" - self.assertEqual(val.shape[0], mol.GetNumAtoms(), msg=err_msg3) - self.assertGreater(val.shape[1], 1, msg=err_msg3) - self.assertTrue(np.all((val == 0) | (val == 1)), msg=err_msg3) + this_props_encoded = graphium_cpp.atom_onehot_feature_names_to_tensor(this_props) + features = mol_to_pyggraph(mol, atom_property_list_onehot=this_props_encoded, mask_nan=None) + val = features["feat"] + self.assertEqual(val.size(0), rdmol.GetNumAtoms(), msg=err_msg2) + self.assertGreaterEqual(val.size(1), 2*len(this_props), msg=err_msg2) + self.assertTrue(((val == 0) | (val == 1)).numpy().all(), msg=err_msg2) - with self.assertRaises(ValueError, msg=err_msg): - get_mol_atomic_features_onehot(mol, property_list=bad_props) + #with self.assertRaises(ValueError, msg=err_msg): + # get_mol_atomic_features_onehot(mol, property_list=bad_props) def test_get_mol_atomic_features_float(self): props = deepcopy(self.atomic_float_props) - bad_props = ["bob"] + #bad_props = ["bob"] all_smiles = self.smiles + self.smiles_noble - for s in all_smiles: - err_msg = f"\n\tError for params:\n\t\tSMILES: {s}" - mol = dm.to_mol(s) + for mol in all_smiles: + err_msg = f"\n\tError for params:\n\t\tSMILES: {mol}" + rdmol = dm.to_mol(mol) for ii in range(len(props)): this_props = props[:ii] err_msg2 = err_msg + f"\n\t\tprops: {this_props}" - prop_dict = get_mol_atomic_features_float(mol, property_list=this_props, mask_nan=None) - self.assertListEqual(list(prop_dict.keys()), this_props, msg=err_msg) - for key, val in prop_dict.items(): - err_msg3 = err_msg2 + f"\n\t\tkey: {key}" - self.assertListEqual(list(val.shape), [mol.GetNumAtoms()], msg=err_msg3) + this_props_encoded = graphium_cpp.atom_float_feature_names_to_tensor(this_props) + features = mol_to_pyggraph(mol, atom_property_list_float=this_props_encoded, mask_nan=None) + val = features["feat"] + self.assertEqual(val.size(0), rdmol.GetNumAtoms(), msg=err_msg2) + self.assertEqual(val.size(1), len(this_props), msg=err_msg2) - with self.assertRaises(ValueError, msg=err_msg): - get_mol_atomic_features_float(mol, property_list=bad_props) + #with self.assertRaises(ValueError, msg=err_msg): + # get_mol_atomic_features_float(mol, property_list=bad_props) def test_get_mol_atomic_features_float_nan_mask(self): - for s in self.smiles_noble: - mol = dm.to_mol(s) - + props_encoded = graphium_cpp.atom_float_feature_names_to_tensor(self.atomic_float_props) + for mol in self.smiles_noble: # Nothing happens when `mask_nan = None`, nans are still in the property array - prop_dict = get_mol_atomic_features_float( - mol, property_list=self.atomic_float_props, mask_nan=None - ) - prop_array = np.concatenate(list(prop_dict.values()), axis=0) + features = mol_to_pyggraph(mol, atom_property_list_float=props_encoded, mask_nan=None, on_error="raise") + prop_array = features["feat"] nans = np.isnan(prop_array) # Capture a raised error when `mask_nan = "raise"` with self.assertRaises(ValueError): - prop_dict = get_mol_atomic_features_float( - mol, property_list=self.atomic_float_props, mask_nan="raise" - ) + features = mol_to_pyggraph(mol, atom_property_list_float=props_encoded, mask_nan="raise", on_error="raise") + print(f"Failed to raise error for nans on {mol}") # Not sure how to Capture a logged warning when `mask_nan = "warn"` # Here, I'm testing a behaviour similar to `mask_nan = None` - prop_dict = get_mol_atomic_features_float( - mol, property_list=self.atomic_float_props, mask_nan="warn" - ) - prop_array = np.concatenate(list(prop_dict.values()), axis=0) - self.assertEqual(len(self.atomic_float_props), len(prop_dict)) - self.assertTrue(any(np.isnan(prop_array))) + features = mol_to_pyggraph(mol, atom_property_list_float=props_encoded, mask_nan="warn", on_error="raise") + prop_array = features["feat"] + self.assertEqual(len(self.atomic_float_props), prop_array.size(1)) + self.assertTrue(np.isnan(prop_array.numpy()).any()) # NaNs are replaced by `42` when `mask_nan=42` - prop_dict = get_mol_atomic_features_float(mol, property_list=self.atomic_float_props, mask_nan=42) - prop_array = np.concatenate(list(prop_dict.values()), axis=0) - self.assertEqual(len(self.atomic_float_props), len(prop_dict)) - self.assertFalse(any(np.isnan(prop_array))) - self.assertTrue(all(prop_array[nans] == 42)) + features = mol_to_pyggraph(mol, atom_property_list_float=props_encoded, mask_nan=42, on_error="raise") + prop_array = features["feat"] + self.assertEqual(len(self.atomic_float_props), prop_array.size(1)) + self.assertFalse(np.isnan(prop_array.numpy()).any()) + self.assertTrue((prop_array[nans] == 42).all()) def test_get_mol_edge_features(self): props = deepcopy(self.edge_props) - bad_props = ["bob"] + #bad_props = ["bob"] all_smiles = self.smiles + self.smiles_noble - for s in all_smiles: - err_msg = f"\n\tError for params:\n\t\tSMILES: {s}" - mol = dm.to_mol(s) + for mol in all_smiles: + err_msg = f"\n\tError for params:\n\t\tSMILES: {mol}" + rdmol = dm.to_mol(mol) for ii in range(len(props)): this_props = props[: ii + 1] err_msg2 = err_msg + f"\n\t\tprops: {this_props}" - prop_dict = get_mol_edge_features(mol, property_list=this_props) - self.assertListEqual(list(prop_dict.keys()), this_props, msg=err_msg) - for key, val in prop_dict.items(): - err_msg3 = err_msg2 + f"\n\t\tkey: {key}" - self.assertEqual(val.shape[0], mol.GetNumBonds(), msg=err_msg3) - - if mol.GetNumBonds() > 0: - with self.assertRaises(ValueError, msg=err_msg): - get_mol_edge_features(mol, property_list=bad_props) - - def test_mol_to_adj_and_features(self): - np.random.seed(42) - - for s in self.smiles: - err_msg = f"\n\tError for params:\n\t\tSMILES: {s}" - mol = dm.to_mol(s) - mol_Hs = Chem.AddHs(mol) # type: ignore - mol_No_Hs = Chem.RemoveHs(mol) # type: ignore - - for explicit_H in [True, False]: - this_mol = mol_Hs if explicit_H else mol_No_Hs - for ii in np.arange(0, 5, 0.2): - num_props = int(round(ii)) - err_msg2 = err_msg + f"\n\t\texplicit_H: {explicit_H}\n\t\tii: {ii}" - - adj, ndata, edata, _, _ = mol_to_adj_and_features( - mol=mol, - atom_property_list_onehot=np.random.choice( - self.atomic_onehot_props, size=num_props, replace=False - ), - atom_property_list_float=np.random.choice( - self.atomic_float_props, size=num_props, replace=False - ), - edge_property_list=np.random.choice(self.edge_props, size=num_props, replace=False), - add_self_loop=False, - explicit_H=explicit_H, - use_bonds_weights=False, - ) + this_props_encoded = graphium_cpp.bond_feature_names_to_tensor(this_props) + features = mol_to_pyggraph(mol, edge_property_list=this_props_encoded, mask_nan=None) + val = features["edge_feat"] + self.assertEqual(val.shape[0], 2 * rdmol.GetNumBonds(), msg=err_msg2) + if rdmol.GetNumBonds() > 0: + self.assertGreaterEqual(val.shape[1], len(this_props), msg=err_msg2) - self.assertEqual(adj.shape[0], this_mol.GetNumAtoms(), msg=err_msg2) - if num_props > 0: - self.assertEqual(ndata.shape[0], this_mol.GetNumAtoms(), msg=err_msg2) - if this_mol.GetNumBonds() > 0: - self.assertEqual(edata.shape[0], this_mol.GetNumBonds(), msg=err_msg2) - self.assertGreaterEqual(edata.shape[1], num_props, msg=err_msg2) - self.assertGreaterEqual(ndata.shape[1], num_props, msg=err_msg2) + #if mol.GetNumBonds() > 0: + # with self.assertRaises(ValueError, msg=err_msg): + # get_mol_edge_features(mol, property_list=bad_props) def test_mol_to_pyggraph(self): np.random.seed(42) + single_atom_prop_encoded = graphium_cpp.atom_float_feature_names_to_tensor(["atomic-number"]) + single_bond_prop_encoded = graphium_cpp.bond_feature_names_to_tensor(["bond-type-float"]) - for s in self.smiles: - err_msg = f"\n\tError for params:\n\t\tSMILES: {s}" - mol = dm.to_mol(s) - mol_Hs = Chem.AddHs(mol) # type: ignore - mol_No_Hs = Chem.RemoveHs(mol) # type: ignore + for mol in self.smiles: + err_msg = f"\n\tError for params:\n\t\tSMILES: {mol}" + rdmol = dm.to_mol(mol) graph = mol_to_pyggraph( mol=mol, - atom_property_list_onehot=[], - atom_property_list_float=["atomic-number"], - edge_property_list=["bond-type-float"], + atom_property_list_float=single_atom_prop_encoded, + edge_property_list=single_bond_prop_encoded, add_self_loop=False, explicit_H=False, use_bonds_weights=False, @@ -255,29 +207,32 @@ def test_mol_to_pyggraph(self): ) # Check the number of nodes and edges - self.assertListEqual(list(graph["feat"].shape), [mol.GetNumAtoms(), 1], msg=err_msg) - self.assertListEqual(list(graph["edge_feat"].shape), [2 * mol.GetNumBonds(), 1], msg=err_msg) + self.assertListEqual(list(graph["feat"].shape), [rdmol.GetNumAtoms(), 1], msg=err_msg) + self.assertListEqual(list(graph["edge_feat"].shape), [2 * rdmol.GetNumBonds(), 1], msg=err_msg) # Check the node features feat = graph["feat"].to_dense().numpy() * 5 + 6 # Undo the scaling - atom_nums = np.asarray([atom.GetAtomicNum() for atom in mol.GetAtoms()]) + atom_nums = np.asarray([atom.GetAtomicNum() for atom in rdmol.GetAtoms()]) np.testing.assert_array_almost_equal(feat[:, 0], atom_nums, decimal=5, err_msg=err_msg) # Check the edge features edge_feat = graph["edge_feat"].to_dense().numpy() - bond_types = np.asarray([bond.GetBondTypeAsDouble() for bond in mol.GetBonds()]).repeat(2) + bond_types = np.asarray([bond.GetBondTypeAsDouble() for bond in rdmol.GetBonds()]).repeat(2) np.testing.assert_array_almost_equal(edge_feat[:, 0], bond_types, decimal=5, err_msg=err_msg) # Check the edge indices - if mol.GetNumBonds() > 0: + if rdmol.GetNumBonds() > 0: edge_index = graph["edge_index"].to_dense().numpy() true_edge_index = [] - for bond in mol.GetBonds(): + for bond in rdmol.GetBonds(): true_edge_index.append([bond.GetBeginAtomIdx(), bond.GetEndAtomIdx()]) true_edge_index.append([bond.GetEndAtomIdx(), bond.GetBeginAtomIdx()]) true_edge_index = np.asarray(true_edge_index).T np.testing.assert_array_equal(edge_index, true_edge_index, err_msg=err_msg) + mol_Hs = Chem.AddHs(rdmol) # type: ignore + mol_No_Hs = Chem.RemoveHs(rdmol) # type: ignore + # Loop over many possible combinations of properties for explicit_H in [True, False]: this_mol = mol_Hs if explicit_H else mol_No_Hs @@ -287,13 +242,18 @@ def test_mol_to_pyggraph(self): graph = mol_to_pyggraph( mol=mol, - atom_property_list_onehot=np.random.choice( - self.atomic_onehot_props, size=num_props, replace=False + atom_property_list_onehot=graphium_cpp.atom_onehot_feature_names_to_tensor( + np.random.choice( + self.atomic_onehot_props, size=num_props, replace=False + ) ), - atom_property_list_float=np.random.choice( - self.atomic_float_props, size=num_props, replace=False + atom_property_list_float=graphium_cpp.atom_float_feature_names_to_tensor( + np.random.choice( + self.atomic_float_props, size=num_props, replace=False + ) ), - edge_property_list=np.random.choice(self.edge_props, size=num_props, replace=False), + edge_property_list=graphium_cpp.bond_feature_names_to_tensor( + np.random.choice(self.edge_props, size=num_props, replace=False)), add_self_loop=False, explicit_H=explicit_H, use_bonds_weights=False, From a892068af39fdea10a377a50253b75f77d0510f4 Mon Sep 17 00:00:00 2001 From: ndickson Date: Thu, 23 May 2024 10:56:09 -0400 Subject: [PATCH 028/175] Removed deprecation warnings and deprecated parameters from datamodule.py, keeping the notes in the function comments --- graphium/data/datamodule.py | 59 +++++++------------------------------ 1 file changed, 11 insertions(+), 48 deletions(-) diff --git a/graphium/data/datamodule.py b/graphium/data/datamodule.py index 0b562b847..1d2ed8405 100644 --- a/graphium/data/datamodule.py +++ b/graphium/data/datamodule.py @@ -96,10 +96,6 @@ } ) -def warn_deprecated(value, name, function_name): - if value is not None: - logger.warn("In "+function_name+", "+name+" is deprecated") - class BaseDataModule(lightning.LightningDataModule): def __init__( self, @@ -788,7 +784,6 @@ def __init__( self, task_specific_args: Union[Dict[str, DatasetProcessingParams], Dict[str, Any]], processed_graph_data_path: Union[str, os.PathLike], - dataloading_from = None, featurization: Optional[Union[Dict[str, Any], omegaconf.DictConfig]] = None, batch_size_training: int = 16, batch_size_inference: int = 16, @@ -797,12 +792,7 @@ def __init__( pin_memory: bool = True, persistent_workers: bool = False, multiprocessing_context: Optional[str] = None, - featurization_n_jobs = None, - featurization_progress = None, - featurization_backend = None, - featurization_batch_size = None, collate_fn: Optional[Callable] = None, - prepare_dict_or_graph = None, preprocessing_n_jobs: int = 0, **kwargs, ): @@ -819,7 +809,6 @@ def __init__( - `df_path` - `smiles_col` - `label_cols` - dataloading_from: Deprecated. Behaviour now always matches previous "disk" option. featurization: args to apply to the SMILES to Graph featurizer. batch_size_training: batch size for training and val dataset. batch_size_inference: batch size for test dataset. @@ -831,14 +820,16 @@ def __init__( - "loky": joblib's Default. Found to cause memory leaks. - "threading": Found to be slow. + collate_fn: A custom torch collate function. Default is to `graphium.data.graphium_collate_fn` + preprocessing_n_jobs: Number of threads to use during preprocessing. + Use 0 to use all available cores, or -1 to use all but one core. + + dataloading_from: Deprecated. Behaviour now always matches previous "disk" option. featurization_n_jobs: Deprecated. featurization_progress: Deprecated. featurization_backend: Deprecated. featurization_batch_size: Deprecated. - collate_fn: A custom torch collate function. Default is to `graphium.data.graphium_collate_fn` prepare_dict_or_graph: Deprecated. Behaviour now always matches previous "pyg:graph" option. - preprocessing_n_jobs: Number of threads to use during preprocessing. - Use 0 to use all available cores, or -1 to use all but one core. """ BaseDataModule.__init__( self, @@ -853,13 +844,6 @@ def __init__( ) IPUDataModuleModifier.__init__(self, **kwargs) - warn_deprecated(dataloading_from, "dataloading_from", "MultitaskFromSmilesDataModule::__init__") - warn_deprecated(featurization_n_jobs, "featurization_n_jobs", "MultitaskFromSmilesDataModule::__init__") - warn_deprecated(featurization_progress, "featurization_progress", "MultitaskFromSmilesDataModule::__init__") - warn_deprecated(featurization_backend, "featurization_backend", "MultitaskFromSmilesDataModule::__init__") - warn_deprecated(featurization_batch_size, "featurization_batch_size", "MultitaskFromSmilesDataModule::__init__") - warn_deprecated(prepare_dict_or_graph, "prepare_dict_or_graph", "MultitaskFromSmilesDataModule::__init__") - self.task_specific_args = task_specific_args self.task_dataset_processing_params = {} @@ -1752,7 +1736,6 @@ def __init__( self, task_specific_args: Dict[str, Union[DatasetProcessingParams, Dict[str, Any]]], processed_graph_data_path: Optional[Union[str, os.PathLike]] = None, - dataloading_from = None, featurization: Optional[Union[Dict[str, Any], omegaconf.DictConfig]] = None, batch_size_training: int = 16, batch_size_inference: int = 16, @@ -1761,11 +1744,7 @@ def __init__( pin_memory: bool = True, persistent_workers: bool = False, multiprocessing_context: Optional[str] = None, - featurization_n_jobs = None, - featurization_progress = None, - featurization_backend = None, collate_fn: Optional[Callable] = None, - prepare_dict_or_graph = None, preprocessing_n_jobs: int = 0, **kwargs, ): @@ -1783,32 +1762,27 @@ def __init__( meaning that all molecules will be considered. processed_graph_data_path: Path to the processed graph data. If None, the data will be downloaded from the OGB website. - dataloading_from: Deprecated. Behaviour now always matches previous "disk" option. featurization: args to apply to the SMILES to Graph featurizer. batch_size_training: batch size for training and val dataset. batch_size_inference: batch size for test dataset. num_workers: Number of workers for the dataloader. Use -1 to use all available cores. pin_memory: Whether to pin on paginated CPU memory for the dataloader. - featurization_n_jobs: Deprecated. - featurization_progress: Deprecated. - featurization_backend: Deprecated. collate_fn: A custom torch collate function. Default is to `graphium.data.graphium_collate_fn` sample_size: - `int`: The maximum number of elements to take from the dataset. - `float`: Value between 0 and 1 representing the fraction of the dataset to consider - `None`: all elements are considered. - prepare_dict_or_graph: Deprecated. Behaviour now always matches previous "pyg:graph" option. preprocessing_n_jobs: Number of threads to use during preprocessing. Use 0 to use all available cores, or -1 to use all but one core. - """ - warn_deprecated(dataloading_from, "dataloading_from", "GraphOGBDataModule::__init__") - warn_deprecated(featurization_n_jobs, "featurization_n_jobs", "GraphOGBDataModule::__init__") - warn_deprecated(featurization_progress, "featurization_progress", "GraphOGBDataModule::__init__") - warn_deprecated(featurization_backend, "featurization_backend", "GraphOGBDataModule::__init__") - warn_deprecated(prepare_dict_or_graph, "prepare_dict_or_graph", "GraphOGBDataModule::__init__") + dataloading_from: Deprecated. Behaviour now always matches previous "disk" option. + featurization_n_jobs: Deprecated. + featurization_progress: Deprecated. + featurization_backend: Deprecated. + prepare_dict_or_graph: Deprecated. Behaviour now always matches previous "pyg:graph" option. + """ new_task_specific_args = {} self.metadata = {} @@ -2014,7 +1988,6 @@ def __init__( tdc_train_val_seed: int = 0, # Inherited arguments from superclass processed_graph_data_path: Optional[Union[str, Path]] = None, - dataloading_from = None, featurization: Optional[Union[Dict[str, Any], omegaconf.DictConfig]] = None, batch_size_training: int = 16, batch_size_inference: int = 16, @@ -2023,20 +1996,10 @@ def __init__( pin_memory: bool = True, persistent_workers: bool = False, multiprocessing_context: Optional[str] = None, - featurization_n_jobs = None, - featurization_progress = None, - featurization_backend = None, collate_fn: Optional[Callable] = None, - prepare_dict_or_graph = None, preprocessing_n_jobs: int = 0, **kwargs, ): - warn_deprecated(dataloading_from, "dataloading_from", "ADMETBenchmarkDataModule::__init__") - warn_deprecated(featurization_n_jobs, "featurization_n_jobs", "ADMETBenchmarkDataModule::__init__") - warn_deprecated(featurization_progress, "featurization_progress", "ADMETBenchmarkDataModule::__init__") - warn_deprecated(featurization_backend, "featurization_backend", "ADMETBenchmarkDataModule::__init__") - warn_deprecated(prepare_dict_or_graph, "prepare_dict_or_graph", "ADMETBenchmarkDataModule::__init__") - try: from tdc.benchmark_group import admet_group from tdc.utils import retrieve_benchmark_names From 38a5510118895f40c6ac52f370420c37bcc79d89 Mon Sep 17 00:00:00 2001 From: ndickson Date: Thu, 23 May 2024 16:24:42 -0400 Subject: [PATCH 029/175] Recommended tweaks to extract_labels in multilevel_utils.py --- graphium/data/multilevel_utils.py | 27 +++++++++++++++------------ 1 file changed, 15 insertions(+), 12 deletions(-) diff --git a/graphium/data/multilevel_utils.py b/graphium/data/multilevel_utils.py index 3cfa4e49d..238588a0d 100644 --- a/graphium/data/multilevel_utils.py +++ b/graphium/data/multilevel_utils.py @@ -69,7 +69,14 @@ def extract_labels(df: pd.DataFrame, task_level: str, label_cols: List[str]): v = pd.to_numeric(v, errors="coerce") length = 0 if isinstance(v, np.ndarray): - length = v.shape[0] if len(v.shape) == 1 else 0 + if len(v.shape) == 1: + length = v.shape[0] + else if len(v.shape) == 0: + length = 0 + else: + raise ValueError( + f"Graph data should be 1D np.ndarray, got ndarray with {len(v.shape)} dimensions" + ) dtype = v.dtype if dtype == np.float64: max_type = np.float64 @@ -97,6 +104,7 @@ def extract_labels(df: pd.DataFrame, task_level: str, label_cols: List[str]): for col_index, col in enumerate(label_cols): for i, v in enumerate(df[col]): full_row = begin_offsets[i] + end_row = begin_offsets[i+1] if not isinstance(v, np.ndarray): v = pd.to_numeric(v, errors="coerce") @@ -105,29 +113,24 @@ def extract_labels(df: pd.DataFrame, task_level: str, label_cols: List[str]): length = v.shape[0] if len(v.shape) == 1 else 0 for j in range(length): output[full_row + j, col_index] = v[j] - if full_row + length != begin_offsets[i+1]: - for j in range(full_row, begin_offsets[i+1]): - output[j, col_index] = np.nan elif isinstance(v, (int, float)): + length = 1 output[full_row, col_index] = v - # Fill the rest of the rows in the column with nan - end_row = begin_offsets[i+1] - if end_row != full_row+1: - for row in range(full_row+1, end_row): - output[row, col_index] = np.nan elif isinstance(v, list): length = len(v) for j in range(length): output[full_row + j, col_index] = v[j] - if full_row + length != begin_offsets[i+1]: - for j in range(full_row, begin_offsets[i+1]): - output[j, col_index] = np.nan else: raise ValueError( f"Graph data should be one of float, int, list, np.ndarray, got {type(v)}" ) + # Fill the rest of the rows in the column with nan + if full_row + length != end_row: + for row in range(full_row + length, end_row): + output[row, col_index] = np.nan + return output, begin_offsets From f7771b371c56201cb70d23562d58c970f9f04644 Mon Sep 17 00:00:00 2001 From: ndickson Date: Thu, 23 May 2024 18:34:17 -0400 Subject: [PATCH 030/175] Fixed "else if"->"elif" --- graphium/data/multilevel_utils.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/graphium/data/multilevel_utils.py b/graphium/data/multilevel_utils.py index 238588a0d..e2e29343b 100644 --- a/graphium/data/multilevel_utils.py +++ b/graphium/data/multilevel_utils.py @@ -71,7 +71,7 @@ def extract_labels(df: pd.DataFrame, task_level: str, label_cols: List[str]): if isinstance(v, np.ndarray): if len(v.shape) == 1: length = v.shape[0] - else if len(v.shape) == 0: + elif len(v.shape) == 0: length = 0 else: raise ValueError( From 42568397aa14bd093f55de71f84230a58e31043d Mon Sep 17 00:00:00 2001 From: ndickson Date: Fri, 24 May 2024 13:04:13 -0400 Subject: [PATCH 031/175] Rewrote test_pe_nodepair.py to use graphium_cpp --- tests/test_pe_nodepair.py | 103 +++++++++++++++++++++++--------------- 1 file changed, 64 insertions(+), 39 deletions(-) diff --git a/tests/test_pe_nodepair.py b/tests/test_pe_nodepair.py index f90ce728b..a3ebe817e 100644 --- a/tests/test_pe_nodepair.py +++ b/tests/test_pe_nodepair.py @@ -1,12 +1,12 @@ """ -------------------------------------------------------------------------------- -Copyright (c) 2023 Valence Labs, Recursion Pharmaceuticals and Graphcore Limited. +Copyright (c) 2023 Valence Labs, Recursion Pharmaceuticals, Graphcore Limited, and NVIDIA Corporation & Affiliates. Use of this software is subject to the terms and conditions outlined in the LICENSE file. Unauthorized modification, distribution, or use is prohibited. Provided 'as is' without warranties of any kind. -Valence Labs, Recursion Pharmaceuticals and Graphcore Limited are not liable for any damages arising from its use. +Valence Labs, Recursion Pharmaceuticals, Graphcore Limited, and NVIDIA Corporation & Affiliates are not liable for any damages arising from its use. Refer to the LICENSE file for the full terms and conditions. -------------------------------------------------------------------------------- """ @@ -17,72 +17,97 @@ """ import numpy as np -import networkx as nx +import torch import unittest as ut -from graphium.features.electrostatic import compute_electrostatic_interactions -from graphium.features.commute import compute_commute_distances -from graphium.features.graphormer import compute_graphormer_distances - +import graphium +import graphium_cpp class test_positional_encodings(ut.TestCase): # Test graphs - adj_dict = {} + smiles_dict = {} + shape_dict = {} max_dict = {} # 6-ring - adj = np.asarray( - [ - [0, 1, 0, 0, 0, 1], - [1, 0, 1, 0, 0, 0], - [0, 1, 0, 1, 0, 0], - [0, 0, 1, 0, 1, 0], - [0, 0, 0, 1, 0, 1], - [1, 0, 0, 0, 1, 0], - ] - ) - adj_dict["6-ring"] = adj + smiles = "C1CCCCC1" + smiles_dict["6-ring"] = smiles + shape_dict["6-ring"] = [6, 6] max_dict["6-ring"] = 3 # 5-path - G = nx.path_graph(5) - adj = nx.to_numpy_array(G) - adj_dict["5-path"] = adj + smiles = "CCCCC" + smiles_dict["5-path"] = smiles + shape_dict["5-path"] = [5, 5] max_dict["5-path"] = 4 # 4-clique - adj = 1 - np.eye(4) - adj_dict["4-clique"] = adj + smiles = "C12C3C1C23" + smiles_dict["4-clique"] = smiles + shape_dict["4-clique"] = [4, 4] max_dict["4-clique"] = 1 # 4-barbell - H = nx.barbell_graph(4, 0) - adj = nx.to_numpy_array(H) - adj_dict["4-barbell"] = adj + smiles = "C12C3C1C23C12C3C1C23" + smiles_dict["4-barbell"] = smiles + shape_dict["4-barbell"] = [8, 8] max_dict["4-barbell"] = 3 + features = { + "electrostatic": {"pos_level": "nodepair", "pos_type": "electrostatic", "normalization": "none"}, + "graphormer": {"pos_level": "nodepair", "pos_type": "graphormer", "normalization": "none"}, + "commute": {"pos_level": "nodepair", "pos_type": "commute", "normalization": "none"}, + } + (pos_encoding_names, pos_encoding_tensor) = \ + graphium_cpp.positional_feature_options_to_tensor(features) + + def get_tensors(self, smiles): + tensors, _, _ = graphium_cpp.featurize_smiles( + smiles, + torch.tensor(data=[], dtype=torch.int64), # atom_property_list_onehot + torch.tensor(data=[], dtype=torch.int64), # atom_property_list_float + False, # has_conformer + torch.tensor(data=[], dtype=torch.int64), # edge_property_list + self.pos_encoding_tensor, + True, # duplicate_edges + False, # add_self_loop + False, # explicit_H=False + False, # use_bonds_weights + True, #offset_carbon + 7, # torch float64 + 0, # mask_nan_style_int + 0 # mask_nan_value + ) + return tensors + def test_dimensions(self): - for _, adj in self.adj_dict.items(): - pe, _, _ = compute_electrostatic_interactions(adj, cache={}) - self.assertEqual(pe.shape, adj.shape) + for key, smiles in self.smiles_dict.items(): + tensors = self.get_tensors(smiles) + + pe = tensors[4] # electrostatic + self.assertEqual(list(pe.shape), self.shape_dict[key]) - pe, _, _ = compute_graphormer_distances(adj, adj.shape[0], cache={}) - self.assertEqual(pe.shape, adj.shape) + pe = tensors[5] # graphormer + self.assertEqual(list(pe.shape), self.shape_dict[key]) - pe, _, _ = compute_commute_distances(adj, adj.shape[0], cache={}) - self.assertEqual(pe.shape, adj.shape) + pe = tensors[6] # commute + self.assertEqual(list(pe.shape), self.shape_dict[key]) def test_symmetry(self): - for _, adj in self.adj_dict.items(): - pe, _, _ = compute_graphormer_distances(adj, adj.shape[0], cache={}) + for _, smiles in self.smiles_dict.items(): + tensors = self.get_tensors(smiles) + + pe = tensors[5] # graphormer np.testing.assert_array_almost_equal(pe, pe.T) - pe, _, _ = compute_commute_distances(adj, adj.shape[0], cache={}) + pe = tensors[6] # commute np.testing.assert_array_almost_equal(pe, pe.T) def test_max_dist(self): - for key, adj in self.adj_dict.items(): - pe, _, _ = compute_graphormer_distances(adj, adj.shape[0], cache={}) + for key, smiles in self.smiles_dict.items(): + tensors = self.get_tensors(smiles) + + pe = tensors[5] # graphormer np.testing.assert_array_almost_equal(pe.max(), self.max_dict[key]) From 91c37a3164950f2e965ed4ac8b5fbd8a51e30958 Mon Sep 17 00:00:00 2001 From: ndickson Date: Fri, 24 May 2024 16:13:34 -0400 Subject: [PATCH 032/175] Rewrote test_pe_rw.py to use graphium_cpp. Comment update in test_pe_nodepair.py --- tests/test_pe_nodepair.py | 2 +- tests/test_pe_rw.py | 58 +++++++++++++++++++++++++-------------- 2 files changed, 38 insertions(+), 22 deletions(-) diff --git a/tests/test_pe_nodepair.py b/tests/test_pe_nodepair.py index a3ebe817e..65fb17363 100644 --- a/tests/test_pe_nodepair.py +++ b/tests/test_pe_nodepair.py @@ -13,7 +13,7 @@ """ -Unit tests for the positional encodings in graphium/features/* +Unit tests for the positional encodings in graphium/graphium_cpp/*.cpp """ import numpy as np diff --git a/tests/test_pe_rw.py b/tests/test_pe_rw.py index 938df28da..93b492e43 100644 --- a/tests/test_pe_rw.py +++ b/tests/test_pe_rw.py @@ -1,53 +1,69 @@ """ -------------------------------------------------------------------------------- -Copyright (c) 2023 Valence Labs, Recursion Pharmaceuticals and Graphcore Limited. +Copyright (c) 2023 Valence Labs, Recursion Pharmaceuticals, Graphcore Limited, and NVIDIA Corporation & Affiliates. Use of this software is subject to the terms and conditions outlined in the LICENSE file. Unauthorized modification, distribution, or use is prohibited. Provided 'as is' without warranties of any kind. -Valence Labs, Recursion Pharmaceuticals and Graphcore Limited are not liable for any damages arising from its use. +Valence Labs, Recursion Pharmaceuticals, Graphcore Limited, and NVIDIA Corporation & Affiliates are not liable for any damages arising from its use. Refer to the LICENSE file for the full terms and conditions. -------------------------------------------------------------------------------- """ """ -Unit tests for the positional encodings in graphium/features/* +Unit tests for the positional encodings in graphium/features/random_walk.cpp """ import numpy as np -import networkx as nx +import torch import unittest as ut -from graphium.features.rw import compute_rwse - +import graphium +import graphium_cpp class test_pe_spectral(ut.TestCase): - def test_caching_and_outputs(self): + def test_outputs(self): # 4-barbell - G = nx.barbell_graph(4, 0) - adj = nx.to_numpy_array(G) - num_nodes = adj.shape[0] - cache = {} + smiles = "C12C3C1C23C12C3C1C23" + num_nodes = 8 ksteps1 = [4, 6] ksteps2 = [2] ksteps3 = [6, 7] - pe1, _, cache = compute_rwse( - adj.astype(np.float32), ksteps1, num_nodes, cache, pos_type="rw_transition_probs" - ) - - pe2, _, cache = compute_rwse( - adj.astype(np.float32), ksteps2, num_nodes, cache, pos_type="rw_return_probs" + # The feature names only depend on pos_type and pos_level, so the two + # rw_return_probs features can't have the same pos_level. + features = { + "rw_transition_probs": {"pos_level": "nodepair", "pos_type": "rw_transition_probs", "normalization": "none", "ksteps": ksteps1}, + "rw_return_probs_0": {"pos_level": "node", "pos_type": "rw_return_probs", "normalization": "none", "ksteps": ksteps2}, + "rw_return_probs_1": {"pos_level": "nodepair", "pos_type": "rw_return_probs", "normalization": "none", "ksteps": ksteps3}, + } + (pos_encoding_names, pos_encoding_tensor) = \ + graphium_cpp.positional_feature_options_to_tensor(features) + + tensors, _, _ = graphium_cpp.featurize_smiles( + smiles, + torch.tensor(data=[], dtype=torch.int64), # atom_property_list_onehot + torch.tensor(data=[], dtype=torch.int64), # atom_property_list_float + False, # has_conformer + torch.tensor(data=[], dtype=torch.int64), # edge_property_list + pos_encoding_tensor, + True, # duplicate_edges + False, # add_self_loop + False, # explicit_H=False + False, # use_bonds_weights + True, #offset_carbon + 7, # torch float64 + 0, # mask_nan_style_int + 0 # mask_nan_value ) - pe3, _, cache = compute_rwse( - adj.astype(np.float32), ksteps3, num_nodes, cache, pos_type="rw_return_probs" - ) + pe1 = tensors[4] + pe2 = tensors[5] + pe3 = tensors[6] - self.assertTrue(all([k in cache["ksteps"] for k in ksteps1 + ksteps2 + ksteps3])) self.assertTrue(pe1.shape, np.zeros((num_nodes, num_nodes, len(ksteps1)))) self.assertTrue(pe2.shape, np.zeros((num_nodes, len(ksteps2)))) self.assertTrue(pe3.shape, np.zeros((num_nodes, len(ksteps3)))) From f347a0dbcab0a493b8184380ebbb8f428168294d Mon Sep 17 00:00:00 2001 From: ndickson Date: Fri, 24 May 2024 17:12:44 -0400 Subject: [PATCH 033/175] Rewrote test_pe_spectral.py to use graphium_cpp --- tests/test_pe_spectral.py | 70 +++++++++++++++++++++++++-------------- 1 file changed, 45 insertions(+), 25 deletions(-) diff --git a/tests/test_pe_spectral.py b/tests/test_pe_spectral.py index 400eb9630..052f13f04 100644 --- a/tests/test_pe_spectral.py +++ b/tests/test_pe_spectral.py @@ -1,12 +1,12 @@ """ -------------------------------------------------------------------------------- -Copyright (c) 2023 Valence Labs, Recursion Pharmaceuticals and Graphcore Limited. +Copyright (c) 2023 Valence Labs, Recursion Pharmaceuticals, Graphcore Limited, and NVIDIA Corporation & Affiliates. Use of this software is subject to the terms and conditions outlined in the LICENSE file. Unauthorized modification, distribution, or use is prohibited. Provided 'as is' without warranties of any kind. -Valence Labs, Recursion Pharmaceuticals and Graphcore Limited are not liable for any damages arising from its use. +Valence Labs, Recursion Pharmaceuticals, Graphcore Limited, and NVIDIA Corporation & Affiliates are not liable for any damages arising from its use. Refer to the LICENSE file for the full terms and conditions. -------------------------------------------------------------------------------- """ @@ -17,40 +17,60 @@ """ import numpy as np -import networkx as nx +import torch import unittest as ut -from graphium.features.spectral import compute_laplacian_pe - +import graphium +import graphium_cpp + +def get_pe_tensors(smiles, pos_encoding_tensor): + tensors, _, _ = graphium_cpp.featurize_smiles( + smiles, + torch.tensor(data=[], dtype=torch.int64), # atom_property_list_onehot + torch.tensor(data=[], dtype=torch.int64), # atom_property_list_float + False, # has_conformer + torch.tensor(data=[], dtype=torch.int64), # edge_property_list + pos_encoding_tensor, + True, # duplicate_edges + False, # add_self_loop + False, # explicit_H=False + False, # use_bonds_weights + True, #offset_carbon + 7, # torch float64 + 0, # mask_nan_style_int + 0 # mask_nan_value + ) + return tensors class test_pe_spectral(ut.TestCase): - # 2 disconnected 3 cliques - adj1 = np.zeros((6, 6)) - adj_3clq = 1 - np.eye(3) - adj1[:3, :3] = adj_3clq - adj1[3:, 3:] = adj_3clq + def test_for_connected_vs_disconnected_graph(self): + # 2 disconnected 3 cliques + smiles1 = "C1CC1.C1CC1" - # 3-clique - adj2 = 1 - np.eye(6) + # 6-clique (have to use S instead of C, because RDKit doesn't accept a carbon having 6 explicit bonds) + smiles2 = "S1234S567S189S251S368S4791" - def test_for_connected_vs_disconnected_graph(self): + num_atoms = 6 num_pos = 3 - # test if pe works identically on connected vs disconnected graphs - eigvals_pe1, _, _, cache = compute_laplacian_pe(self.adj1, num_pos, cache={}) - eigvals_pe1 = np.real(eigvals_pe1).astype(np.float32) - _, eigvecs_pe1, _, _ = compute_laplacian_pe(self.adj1, num_pos, cache=cache) - - # We expect to cache 4 objects in when running the functon for the first time - self.assertEqual(len(cache.keys()), 4) + features = { + "laplacian_eigval": {"pos_level": "node", "pos_type": "laplacian_eigval", "normalization": "none", "num_pos": num_pos, "disconnected_comp": True}, + "laplacian_eigvec": {"pos_level": "node", "pos_type": "laplacian_eigvec", "normalization": "none", "num_pos": num_pos, "disconnected_comp": True}, + } + (pos_encoding_names, pos_encoding_tensor) = \ + graphium_cpp.positional_feature_options_to_tensor(features) - eigvals_pe2, _, _, _ = compute_laplacian_pe(self.adj2, num_pos, cache={}) - eigvals_pe2 = np.real(eigvals_pe2).astype(np.float32) - _, eigvecs_pe2, _, _ = compute_laplacian_pe(self.adj2, num_pos, cache={}) + # test if pe works identically on connected vs disconnected graphs + tensors1 = get_pe_tensors(smiles1, pos_encoding_tensor) + eigvals_pe1 = tensors1[4] + eigvecs_pe1 = tensors1[5] + tensors2 = get_pe_tensors(smiles2, pos_encoding_tensor) + eigvals_pe2 = tensors2[4] + eigvecs_pe2 = tensors2[5] np.testing.assert_array_almost_equal(2 * eigvals_pe1, eigvals_pe2) - self.assertListEqual(list(eigvals_pe2.shape), [self.adj2.shape[0], num_pos]) - self.assertListEqual(list(eigvecs_pe2.shape), [self.adj2.shape[0], num_pos]) + self.assertListEqual(list(eigvals_pe2.shape), [num_atoms, num_pos]) + self.assertListEqual(list(eigvecs_pe2.shape), [num_atoms, num_pos]) if __name__ == "__main__": From 26b553150144686e2e15550d267a75a17be7dd43 Mon Sep 17 00:00:00 2001 From: ndickson Date: Fri, 24 May 2024 17:40:11 -0400 Subject: [PATCH 034/175] Removed tests/test_positional_encodings.py, because it's a duplicate of test_pe_nodepair.py, apart from a couple comments --- tests/test_positional_encodings.py | 92 ------------------------------ 1 file changed, 92 deletions(-) delete mode 100644 tests/test_positional_encodings.py diff --git a/tests/test_positional_encodings.py b/tests/test_positional_encodings.py deleted file mode 100644 index 89bf355a4..000000000 --- a/tests/test_positional_encodings.py +++ /dev/null @@ -1,92 +0,0 @@ -""" --------------------------------------------------------------------------------- -Copyright (c) 2023 Valence Labs, Recursion Pharmaceuticals and Graphcore Limited. - -Use of this software is subject to the terms and conditions outlined in the LICENSE file. -Unauthorized modification, distribution, or use is prohibited. Provided 'as is' without -warranties of any kind. - -Valence Labs, Recursion Pharmaceuticals and Graphcore Limited are not liable for any damages arising from its use. -Refer to the LICENSE file for the full terms and conditions. --------------------------------------------------------------------------------- -""" - - -""" -Unit tests for the positional encodings in graphium/features/* -""" - -import numpy as np -import networkx as nx -import unittest as ut - -# from graphium.features.spectral import compute_laplacian_positional_eigvecs # TODO: add tests -# from graphium.features.rw import compute_rwse # TODO: add tests -from graphium.features.electrostatic import compute_electrostatic_interactions -from graphium.features.commute import compute_commute_distances -from graphium.features.graphormer import compute_graphormer_distances - - -class test_positional_encodings(ut.TestCase): - # Test graphs - adj_dict = {} - max_dict = {} - - # 6-ring - adj = np.asarray( - [ - [0, 1, 0, 0, 0, 1], - [1, 0, 1, 0, 0, 0], - [0, 1, 0, 1, 0, 0], - [0, 0, 1, 0, 1, 0], - [0, 0, 0, 1, 0, 1], - [1, 0, 0, 0, 1, 0], - ] - ) - adj_dict["6-ring"] = adj - max_dict["6-ring"] = 3 - - # 5-path - G = nx.path_graph(5) - adj = nx.to_numpy_array(G) - adj_dict["5-path"] = adj - max_dict["5-path"] = 4 - - # 4-clique - adj = 1 - np.eye(4) - adj_dict["4-clique"] = adj - max_dict["4-clique"] = 1 - - # 4-barbell - H = nx.barbell_graph(4, 0) - adj = nx.to_numpy_array(H) - adj_dict["4-barbell"] = adj - max_dict["4-barbell"] = 3 - - def test_dimensions(self): - for _, adj in self.adj_dict.items(): - pe, _, _ = compute_electrostatic_interactions(adj, cache={}) - self.assertEqual(pe.shape, adj.shape) - - pe, _, _ = compute_graphormer_distances(adj, adj.shape[0], cache={}) - self.assertEqual(pe.shape, adj.shape) - - pe, _, _ = compute_commute_distances(adj, adj.shape[0], cache={}) - self.assertEqual(pe.shape, adj.shape) - - def test_symmetry(self): - for _, adj in self.adj_dict.items(): - pe, _, _ = compute_graphormer_distances(adj, adj.shape[0], cache={}) - np.testing.assert_array_almost_equal(pe, pe.T) - - pe, _, _ = compute_commute_distances(adj, adj.shape[0], cache={}) - np.testing.assert_array_almost_equal(pe, pe.T) - - def test_max_dist(self): - for key, adj in self.adj_dict.items(): - pe, _, _ = compute_graphormer_distances(adj, adj.shape[0], cache={}) - np.testing.assert_array_almost_equal(pe.max(), self.max_dict[key]) - - -if __name__ == "__main__": - ut.main() From 1ded38b1a85e16658ebd2f6d689e6ca8db2a6dcd Mon Sep 17 00:00:00 2001 From: ndickson Date: Tue, 28 May 2024 13:56:58 -0400 Subject: [PATCH 035/175] Fixed handling of disconnected components vs. single component for laplacian eigenvector/eigenvalue features, especially edge case with a single atom that was being treated as having multiple components, and caching the results --- graphium/graphium_cpp/electrostatic.cpp | 2 +- graphium/graphium_cpp/features.cpp | 21 +++++- graphium/graphium_cpp/spectral.cpp | 86 +++++++++++++++---------- graphium/graphium_cpp/spectral.h | 13 +++- 4 files changed, 82 insertions(+), 40 deletions(-) diff --git a/graphium/graphium_cpp/electrostatic.cpp b/graphium/graphium_cpp/electrostatic.cpp index 29ff430c2..56efd2f5c 100644 --- a/graphium/graphium_cpp/electrostatic.cpp +++ b/graphium/graphium_cpp/electrostatic.cpp @@ -21,7 +21,7 @@ void compute_laplacian_pseudoinverse( // If we've already computed the eigendecomposition with the correct normalization, // skip recomputing it. if (data.eigenvalues.size() != n || data.normalization != Normalization::NONE) { - compute_laplacian_eigendecomp(n, row_starts, neighbors, Normalization::NONE, data, false, weights); + compute_laplacian_eigendecomp(n, row_starts, neighbors, Normalization::NONE, data, 1, nullptr, weights); } matrix.clear(); diff --git a/graphium/graphium_cpp/features.cpp b/graphium/graphium_cpp/features.cpp index 5c1eb4f8f..f9357eaad 100644 --- a/graphium/graphium_cpp/features.cpp +++ b/graphium/graphium_cpp/features.cpp @@ -1071,6 +1071,8 @@ void create_positional_features( LaplacianData laplacian_data; LaplacianData laplacian_data_comp; + size_t num_components = 0; // 0 indicates that the components haven't been computed yet + std::vector components; std::vector laplacian_pseudoinverse; std::vector matrix; size_t i = 0; @@ -1091,9 +1093,24 @@ void create_positional_features( Normalization normalization = Normalization(property_list[i + 1]); bool disconnected_comp = (property_list[i + 2] != 0); i += 3; - LaplacianData& current_data = disconnected_comp ? laplacian_data : laplacian_data_comp; + + // The common case is that there's only 1 component, even if disconnected_comp is true, + // so find the number of components, first. + if (disconnected_comp && num_components == 0) { + num_components = find_components(graph.num_atoms, neighbours.neighbour_starts, neighbours.neighbours, components); + } + const bool multiple_components = disconnected_comp && (num_components > 1); + + LaplacianData& current_data = multiple_components ? laplacian_data_comp : laplacian_data; if (current_data.eigenvalues.size() == 0 || current_data.normalization != normalization) { - compute_laplacian_eigendecomp(graph.num_atoms, neighbours.neighbour_starts, neighbours.neighbours, normalization, laplacian_data, disconnected_comp); + compute_laplacian_eigendecomp( + graph.num_atoms, + neighbours.neighbour_starts, + neighbours.neighbours, + normalization, + current_data, + multiple_components ? num_components : 1, + &components); } const bool isVec = (property == int64_t(PositionalFeature::LAPLACIAN_EIGENVEC)); diff --git a/graphium/graphium_cpp/spectral.cpp b/graphium/graphium_cpp/spectral.cpp index 6011f028b..9ce9e6ed1 100644 --- a/graphium/graphium_cpp/spectral.cpp +++ b/graphium/graphium_cpp/spectral.cpp @@ -11,6 +11,44 @@ #include "features.h" #include +size_t find_components( + const uint32_t n, + const uint32_t* row_starts, + const uint32_t* neighbors, + std::vector& components) { + + int32_t num_components = (n <= 1) ? 1 : 0; + std::vector queue; + if (n > 1) { + // First, find which nodes are in which component. + components.resize(n, -1); + queue.reserve(n); + for (uint32_t starti = 0; starti < n; ++starti) { + if (components[starti] >= 0) { + continue; + } + const int32_t component = num_components; + ++num_components; + queue.push_back(starti); + components[starti] = component; + while (queue.size() != 0) { + uint32_t current = queue[queue.size()-1]; + queue.resize(queue.size()-1); + const uint32_t* neighbor_begin = neighbors + row_starts[current]; + const uint32_t* neighbor_end = neighbors + row_starts[current+1]; + for ( ; neighbor_begin != neighbor_end; ++neighbor_begin) { + uint32_t neighbor = *neighbor_begin; + if (neighbor > starti && components[neighbor] < 0) { + components[neighbor] = component; + queue.push_back(neighbor); + } + } + } + } + } + return size_t(num_components); +} + template void compute_laplacian_eigendecomp_single(const uint32_t n, LaplacianData& data, Normalization normalization) { T* matrix = data.matrix_temp.data(); @@ -118,7 +156,16 @@ void compute_laplacian_eigendecomp_single(const uint32_t n, LaplacianData& da } template -void compute_laplacian_eigendecomp(const uint32_t n, const uint32_t* row_starts, const uint32_t* neighbors, Normalization normalization, LaplacianData& data, bool disconnected_comp, const T* weights) { +void compute_laplacian_eigendecomp( + const uint32_t n, + const uint32_t* row_starts, + const uint32_t* neighbors, + Normalization normalization, + LaplacianData& data, + size_t num_components, + const std::vector* components, + const T* weights) { + // Compute the weight row sums, if applicable, for the diagonal of the laplacian if (weights != nullptr) { data.eigenvalues_temp.clear(); @@ -192,36 +239,6 @@ void compute_laplacian_eigendecomp(const uint32_t n, const uint32_t* row_starts, } } - std::vector components; - int32_t num_components = 0; - std::vector queue; - if (disconnected_comp && n > 1) { - // First, find which nodes are in which component. - components.resize(n, -1); - queue.reserve(n); - for (uint32_t starti = 0; starti < n; ++starti) { - if (components[starti] >= 0) { - continue; - } - const int32_t component = num_components; - ++num_components; - queue.push_back(starti); - components[starti] = component; - while (queue.size() != 0) { - uint32_t current = queue[queue.size()-1]; - queue.resize(queue.size()-1); - const uint32_t* neighbor_begin = neighbors + row_starts[current]; - const uint32_t* neighbor_end = neighbors + row_starts[current+1]; - for ( ; neighbor_begin != neighbor_end; ++neighbor_begin) { - uint32_t neighbor = *neighbor_begin; - if (neighbor > starti && components[neighbor] < 0) { - components[neighbor] = component; - queue.push_back(neighbor); - } - } - } - } - } if (num_components == 1) { compute_laplacian_eigendecomp_single(n, data, normalization); return; @@ -239,11 +256,12 @@ void compute_laplacian_eigendecomp(const uint32_t n, const uint32_t* row_starts, data.vectors.resize(size_t(n) * n, 0); LaplacianData sub_data; + std::vector queue; for (int32_t component = 0; component < num_components; ++component) { // Reuse queue for the indices queue.resize(0); for (uint32_t i = 0; i < n; ++i) { - if (components[i] == component) { + if ((*components)[i] == component) { queue.push_back(i); } } @@ -295,5 +313,5 @@ void compute_laplacian_eigendecomp(const uint32_t n, const uint32_t* row_starts, } } -template void compute_laplacian_eigendecomp(const uint32_t n, const uint32_t* row_starts, const uint32_t* neighbors, Normalization normalization, LaplacianData& data, bool disconnected_comp, const float* weights); -template void compute_laplacian_eigendecomp(const uint32_t n, const uint32_t* row_starts, const uint32_t* neighbors, Normalization normalization, LaplacianData& data, bool disconnected_comp, const double* weights); +template void compute_laplacian_eigendecomp(const uint32_t n, const uint32_t* row_starts, const uint32_t* neighbors, Normalization normalization, LaplacianData& data, size_t num_components, const std::vector* components, const float* weights); +template void compute_laplacian_eigendecomp(const uint32_t n, const uint32_t* row_starts, const uint32_t* neighbors, Normalization normalization, LaplacianData& data, size_t num_components, const std::vector* components, const double* weights); diff --git a/graphium/graphium_cpp/spectral.h b/graphium/graphium_cpp/spectral.h index 5ecad8bbe..3f3b6e41b 100644 --- a/graphium/graphium_cpp/spectral.h +++ b/graphium/graphium_cpp/spectral.h @@ -20,6 +20,12 @@ struct LaplacianData { std::vector order_temp; }; +size_t find_components( + const uint32_t n, + const uint32_t* row_starts, + const uint32_t* neighbors, + std::vector& components); + // This outputs the eigenvalues in data.eigenvalues and the eigenvectors in data.vectors template void compute_laplacian_eigendecomp( @@ -28,8 +34,9 @@ void compute_laplacian_eigendecomp( const uint32_t* neighbors, Normalization normalization, LaplacianData& data, - bool disconnected_comp = false, + size_t num_components, + const std::vector* components, const T* weights = nullptr); -extern template void compute_laplacian_eigendecomp(const uint32_t n, const uint32_t* row_starts, const uint32_t* neighbors, Normalization normalization, LaplacianData& data, bool disconnected_comp, const float* weights); -extern template void compute_laplacian_eigendecomp(const uint32_t n, const uint32_t* row_starts, const uint32_t* neighbors, Normalization normalization, LaplacianData& data, bool disconnected_comp, const double* weights); +extern template void compute_laplacian_eigendecomp(const uint32_t n, const uint32_t* row_starts, const uint32_t* neighbors, Normalization normalization, LaplacianData& data, size_t num_components, const std::vector* components, const float* weights); +extern template void compute_laplacian_eigendecomp(const uint32_t n, const uint32_t* row_starts, const uint32_t* neighbors, Normalization normalization, LaplacianData& data, size_t num_components, const std::vector* components, const double* weights); From 314d636637a513bbee6eadfc655dcee46571e1df Mon Sep 17 00:00:00 2001 From: ndickson Date: Tue, 28 May 2024 13:58:23 -0400 Subject: [PATCH 036/175] Fixed compile warnings in one_hot.cpp --- graphium/graphium_cpp/one_hot.cpp | 13 ++++++++----- 1 file changed, 8 insertions(+), 5 deletions(-) diff --git a/graphium/graphium_cpp/one_hot.cpp b/graphium/graphium_cpp/one_hot.cpp index 1f4ec09fe..b79d93f23 100644 --- a/graphium/graphium_cpp/one_hot.cpp +++ b/graphium/graphium_cpp/one_hot.cpp @@ -177,8 +177,11 @@ size_t get_one_hot_atom_feature_size(AtomOneHotFeature feature) { case AtomOneHotFeature::TYPE: return typeCount + 1; case AtomOneHotFeature::GROUP: return groupCount + 1; case AtomOneHotFeature::PERIOD: return periodCount + 1; + default: + // Missing implementation + assert(0); + return 0; } - return 0; } template @@ -290,11 +293,11 @@ size_t get_one_hot_atom_feature(const GraphData& graph, T* data, AtomOneHotFeatu data[dataIndex] = FeatureValues::one; } return feature_size; + default: + // Missing implementation + assert(0); + return feature_size; } - - // Missing implementation - assert(0); - return feature_size; } // Explicit instantiations, so that the function can be templated From e49b4daf38ea7e0c0b743a066075f1d4a03abf39 Mon Sep 17 00:00:00 2001 From: ndickson Date: Tue, 28 May 2024 14:01:11 -0400 Subject: [PATCH 037/175] Rewrote test_positional_encoders.py, though it's still failing the test_laplacian_eigvec_with_encoder case --- tests/test_positional_encoders.py | 126 ++++++++++++++++++++---------- 1 file changed, 85 insertions(+), 41 deletions(-) diff --git a/tests/test_positional_encoders.py b/tests/test_positional_encoders.py index 166929ba2..fd5709636 100644 --- a/tests/test_positional_encoders.py +++ b/tests/test_positional_encoders.py @@ -1,12 +1,12 @@ """ -------------------------------------------------------------------------------- -Copyright (c) 2023 Valence Labs, Recursion Pharmaceuticals and Graphcore Limited. +Copyright (c) 2023 Valence Labs, Recursion Pharmaceuticals, Graphcore Limited, and NVIDIA Corporation & Affiliates. Use of this software is subject to the terms and conditions outlined in the LICENSE file. Unauthorized modification, distribution, or use is prohibited. Provided 'as is' without warranties of any kind. -Valence Labs, Recursion Pharmaceuticals and Graphcore Limited are not liable for any damages arising from its use. +Valence Labs, Recursion Pharmaceuticals, Graphcore Limited, and NVIDIA Corporation & Affiliates are not liable for any damages arising from its use. Refer to the LICENSE file for the full terms and conditions. -------------------------------------------------------------------------------- """ @@ -18,18 +18,37 @@ import numpy as np import unittest as ut -from copy import deepcopy from rdkit import Chem import datamol as dm import torch -from scipy.sparse import coo_matrix +from torch_geometric.data import Data + +import graphium +import graphium_cpp -from graphium.features.featurizer import GraphDict -from graphium.features.positional_encoding import graph_positional_encoder from graphium.nn.encoders import laplace_pos_encoder, mlp_encoder, signnet_pos_encoder + # TODO: Test the MLP_encoder and signnet_pos_encoder +def get_pe_tensors(smiles, pos_encoding_tensor): + tensors, _, _ = graphium_cpp.featurize_smiles( + smiles, + torch.tensor(data=[], dtype=torch.int64), # atom_property_list_onehot + torch.tensor(data=[], dtype=torch.int64), # atom_property_list_float + False, # has_conformer + torch.tensor(data=[], dtype=torch.int64), # edge_property_list + pos_encoding_tensor, + True, # duplicate_edges + False, # add_self_loop + False, # explicit_H=False + False, # use_bonds_weights + True, #offset_carbon + 7, # torch float64 + 0, # mask_nan_style_int + 0 # mask_nan_value + ) + return tensors class test_positional_encoder(ut.TestCase): smiles = [ @@ -44,22 +63,32 @@ class test_positional_encoder(ut.TestCase): adjs = [Chem.rdmolops.GetAdjacencyMatrix(mol) for mol in mols] def test_laplacian_eigvec_eigval(self): - for ii, adj in enumerate(deepcopy(self.adjs)): + for ii, mol in enumerate(self.smiles): + adj = self.adjs[ii] for num_pos in [1, 2, 4]: # Can't test too much eigs because of multiplicities for disconnected_comp in [True, False]: err_msg = f"adj_id={ii}, num_pos={num_pos}, disconnected_comp={disconnected_comp}" - # returns a dictionary of computed pe - pos_kwargs = { - "pos_type": "laplacian_eigvec", - "num_pos": num_pos, - "disconnected_comp": disconnected_comp, - "pos_level": "node", + features = { + "laplacian_eigval": { + "pos_type": "laplacian_eigval", + "num_pos": num_pos, + "disconnected_comp": disconnected_comp, + "pos_level": "node", + }, + "laplacian_eigvec": { + "pos_type": "laplacian_eigvec", + "num_pos": num_pos, + "disconnected_comp": disconnected_comp, + "pos_level": "node", + }, } - num_nodes = adj.shape[0] - eigvecs, cache = graph_positional_encoder(adj, num_nodes, pos_kwargs=pos_kwargs) - pos_kwargs["pos_type"] = "laplacian_eigval" - eigvals, cache = graph_positional_encoder(adj, num_nodes, pos_kwargs=pos_kwargs) + (pos_encoding_names, pos_encoding_tensor) = \ + graphium_cpp.positional_feature_options_to_tensor(features) + + tensors = get_pe_tensors(mol, pos_encoding_tensor) + eigvals = tensors[4] + eigvecs = tensors[5] self.assertEqual(list(eigvecs.shape), [adj.shape[0], num_pos], msg=err_msg) self.assertEqual(list(eigvals.shape), [adj.shape[0], num_pos], msg=err_msg) @@ -74,7 +103,8 @@ def test_laplacian_eigvec_eigval(self): true_num_pos = min(num_pos, len(true_eigvals)) true_eigvals, true_eigvecs = true_eigvals[:true_num_pos], true_eigvecs[:, :true_num_pos] - if not ("." in self.smiles[ii]): + if not ("." in mol): + print(f"About to test eigvecs for smiles {mol}, num_pos {num_pos}, disconnected_comp {disconnected_comp}") np.testing.assert_array_almost_equal( np.abs(true_eigvecs), np.abs(eigvecs[:, :true_num_pos]), @@ -88,13 +118,21 @@ def test_laplacian_eigvec_eigval(self): # didn't actually check the exact computation result because the code was adapted def test_rwse(self): - for ii, adj in enumerate(deepcopy(self.adjs)): + for ii, mol in enumerate(self.smiles): + adj = self.adjs[ii] for ksteps in [1, 2, 4]: err_msg = f"adj_id={ii}, ksteps={ksteps}" num_nodes = adj.shape[0] pos_kwargs = {"pos_type": "rw_return_probs", "ksteps": ksteps, "pos_level": "node"} - rwse_embed, cache = graph_positional_encoder(adj, num_nodes, pos_kwargs=pos_kwargs) + features = { + "rw_return_probs": pos_kwargs, + } + (pos_encoding_names, pos_encoding_tensor) = \ + graphium_cpp.positional_feature_options_to_tensor(features) + tensors = get_pe_tensors(mol, pos_encoding_tensor) + rwse_embed = tensors[4] + self.assertEqual(list(rwse_embed.shape), [num_nodes, ksteps], msg=err_msg) # TODO: work in progress @@ -105,23 +143,30 @@ def test_rwse(self): """ def test_laplacian_eigvec_with_encoder(self): - for ii, adj in enumerate(deepcopy(self.adjs)): + for ii, mol in enumerate(self.smiles): for num_pos in [2, 4, 8]: # Can't test too much eigs because of multiplicities for disconnected_comp in [True, False]: for model_type in ["Transformer", "DeepSet", "MLP"]: err_msg = f"adj_id={ii}, num_pos={num_pos}, disconnected_comp={disconnected_comp}" - # returns a dictionary of computed pe - pos_kwargs = { - "pos_type": "laplacian_eigvec", - "num_pos": num_pos, - "disconnected_comp": disconnected_comp, - "pos_level": "node", + features = { + "laplacian_eigval": { + "pos_type": "laplacian_eigval", + "num_pos": num_pos, + "disconnected_comp": disconnected_comp, + "pos_level": "node", + }, + "laplacian_eigvec": { + "pos_type": "laplacian_eigvec", + "num_pos": num_pos, + "disconnected_comp": disconnected_comp, + "pos_level": "node", + }, } - num_nodes = adj.shape[0] - eigvecs, cache = graph_positional_encoder(adj, num_nodes, pos_kwargs=pos_kwargs) - pos_kwargs["pos_type"] = "laplacian_eigval" - eigvals, cache = graph_positional_encoder(adj, num_nodes, pos_kwargs=pos_kwargs) + (pos_encoding_names, pos_encoding_tensor) = \ + graphium_cpp.positional_feature_options_to_tensor(features) + + tensors = get_pe_tensors(mol, pos_encoding_tensor) input_keys = ["laplacian_eigvec", "laplacian_eigval"] in_dim = num_pos @@ -129,16 +174,15 @@ def test_laplacian_eigvec_with_encoder(self): out_dim = 64 num_layers = 1 - eigvecs = torch.from_numpy(eigvecs) - eigvals = torch.from_numpy(eigvals) - - g = GraphDict( - { - "adj": coo_matrix(adj), - "data": {"laplacian_eigval": eigvals, "laplacian_eigvec": eigvecs}, + num_nodes = tensors[2].size(0) + data_dict = { + #"feat": tensors[2], + #"edge_feat": tensors[3], + "laplacian_eigval": tensors[4], + "laplacian_eigvec": tensors[5], } - ) - batch = g.make_pyg_graph() + # Create the PyG graph object `Data` + data = Data(edge_index=tensors[0], edge_weight=tensors[1], num_nodes=num_nodes, **data_dict) encoder = laplace_pos_encoder.LapPENodeEncoder( input_keys=input_keys, @@ -153,7 +197,7 @@ def test_laplacian_eigvec_with_encoder(self): first_normalization=None, ) - hidden_embed = encoder(batch, key_prefix=None) + hidden_embed = encoder(data, key_prefix=None) assert "node" in hidden_embed.keys() self.assertEqual(list(hidden_embed["node"].shape), [num_nodes, out_dim], msg=err_msg) From f001464ee43be14a53e3467d163b90e8c922cc6a Mon Sep 17 00:00:00 2001 From: ndickson Date: Tue, 4 Jun 2024 17:25:52 -0400 Subject: [PATCH 038/175] Removed commented out lines from setup.py --- graphium/graphium_cpp/setup.py | 4 ---- 1 file changed, 4 deletions(-) diff --git a/graphium/graphium_cpp/setup.py b/graphium/graphium_cpp/setup.py index b412e044c..0755397b8 100755 --- a/graphium/graphium_cpp/setup.py +++ b/graphium/graphium_cpp/setup.py @@ -19,9 +19,6 @@ rdkit_lib_index = rdkit.__path__[0].split('/').index('lib') rdkit_prefix = '/'.join(rdkit.__path__[0].split('/')[:rdkit_lib_index]) -#print(rdkit_lib_index) -#print(rdkit_prefix) - ext_modules = [ Pybind11Extension( 'graphium_cpp', @@ -42,7 +39,6 @@ include_dirs = [os.path.join(torch_dir,"include"), os.path.join(torch_dir,"include/torch/csrc/api/include"), os.path.join(rdkit_prefix, "include/rdkit"), - #"/opt/nvidia/nsight-systems/2023.2.3/target-linux-x64/nvtx/include", numpy.get_include()], libraries = [ "RDKitAlignment", From 2782fbcb96211ea8da73532552e6ca2c599d2f2e Mon Sep 17 00:00:00 2001 From: ndickson Date: Tue, 4 Jun 2024 17:45:55 -0400 Subject: [PATCH 039/175] Ran linting on Python files --- graphium/data/collate.py | 36 +++++---- graphium/data/datamodule.py | 91 ++++++++++++++++------- graphium/data/dataset.py | 27 ++++--- graphium/data/multilevel_utils.py | 24 +++--- graphium/features/featurizer.py | 30 ++++---- graphium/graphium_cpp/setup.py | 113 ++++++++++++++++------------- tests/test_collate.py | 21 +++++- tests/test_datamodule.py | 17 +++-- tests/test_dataset.py | 43 ++++++++--- tests/test_featurizer.py | 46 +++++++----- tests/test_multitask_datamodule.py | 13 ++-- tests/test_pe_nodepair.py | 42 +++++------ tests/test_pe_rw.py | 53 +++++++++----- tests/test_pe_spectral.py | 49 ++++++++----- tests/test_positional_encoders.py | 61 +++++++++------- 15 files changed, 404 insertions(+), 262 deletions(-) diff --git a/graphium/data/collate.py b/graphium/data/collate.py index 1965c729b..cab3151de 100644 --- a/graphium/data/collate.py +++ b/graphium/data/collate.py @@ -27,6 +27,7 @@ from graphium.data.utils import get_keys from graphium.data.dataset import torch_enum_to_dtype + def graphium_collate_fn( elements: Union[List[Any], Dict[str, List[Any]]], labels_num_cols_dict: Optional[Dict[str, Any]] = None, @@ -103,7 +104,9 @@ def graphium_collate_fn( # Multitask setting: We have to pad the missing labels if key == "labels": labels = [d[key] for d in elements] - batch[key] = collate_labels(labels, labels_num_cols_dict, labels_dtype_dict, num_nodes, num_edges) + batch[key] = collate_labels( + labels, labels_num_cols_dict, labels_dtype_dict, num_nodes, num_edges + ) elif key == "num_nodes" or key == "num_edges": continue @@ -131,7 +134,9 @@ def graphium_collate_fn( return default_collate(elements) -def collage_pyg_graph(pyg_graphs: List[Data], num_nodes: List[int], batch_size_per_pack: Optional[int] = None): +def collage_pyg_graph( + pyg_graphs: List[Data], num_nodes: List[int], batch_size_per_pack: Optional[int] = None +): """ Function to collate pytorch geometric graphs. Convert all numpy types to torch @@ -150,7 +155,9 @@ def collage_pyg_graph(pyg_graphs: List[Data], num_nodes: List[int], batch_size_p for pyg_key in get_keys(pyg_graph): # pad nodepair-level positional encodings if pyg_key.startswith("nodepair_"): - pyg_graph[pyg_key] = pad_nodepairs(pyg_graph[pyg_key], pyg_graph.num_nodes, max_num_nodes_per_graph) + pyg_graph[pyg_key] = pad_nodepairs( + pyg_graph[pyg_key], pyg_graph.num_nodes, max_num_nodes_per_graph + ) # Convert edge index to int64 pyg_graph.edge_index = pyg_graph.edge_index.to(torch.int64) @@ -188,17 +195,14 @@ def pad_to_expected_label_size(labels: torch.Tensor, label_rows: int, label_cols pad_sizes = [label_cols - labels.shape[1], 0, label_rows - labels.shape[0], 0] if any([s < 0 for s in pad_sizes]): - logger.warning(f"More labels available than expected. Will remove data to fit expected size. cols: {labels.shape[1]}->{label_cols}, rows: {labels.shape[0]}->{label_rows}") + logger.warning( + f"More labels available than expected. Will remove data to fit expected size. cols: {labels.shape[1]}->{label_cols}, rows: {labels.shape[0]}->{label_rows}" + ) return torch.nn.functional.pad(labels, pad_sizes, value=torch.nan) -def get_expected_label_rows( - label_data: Data, - task: str, - num_nodes: int, - num_edges: int -): +def get_expected_label_rows(label_data: Data, task: str, num_nodes: int, num_edges: int): """Determines expected label size based on the specfic graph properties and the number of targets in the task-dataset. """ @@ -211,7 +215,7 @@ def get_expected_label_rows( elif task.startswith("nodepair_"): raise NotImplementedError() else: - print("Task name "+task+" in get_expected_label_rows") + print("Task name " + task + " in get_expected_label_rows") raise NotImplementedError() return num_labels @@ -221,7 +225,7 @@ def collate_labels( labels_num_cols_dict: Optional[Dict[str, Any]] = None, labels_dtype_dict: Optional[Dict[str, Any]] = None, num_nodes: List[int] = None, - num_edges: List[int] = None + num_edges: List[int] = None, ): """Collate labels for multitask learning. @@ -246,7 +250,9 @@ def collate_labels( for task in empty_task_labels: label_rows = get_expected_label_rows(this_label, task, num_nodes[index], num_edges[index]) dtype = torch_enum_to_dtype(labels_dtype_dict[task]) - this_label[task] = torch.full((label_rows, labels_num_cols_dict[task]), fill_value=torch.nan, dtype=dtype) + this_label[task] = torch.full( + (label_rows, labels_num_cols_dict[task]), fill_value=torch.nan, dtype=dtype + ) for task in label_keys_set - set(["x", "edge_index"]) - empty_task_labels: label_rows = get_expected_label_rows(this_label, task, num_nodes[index], num_edges[index]) @@ -269,7 +275,9 @@ def collate_labels( f"Labels for {label_rows} nodes/edges/nodepairs expected, got 1." ) - this_label[task] = pad_to_expected_label_size(this_label[task], label_rows, labels_num_cols_dict[task]) + this_label[task] = pad_to_expected_label_size( + this_label[task], label_rows, labels_num_cols_dict[task] + ) return Batch.from_data_list(labels) diff --git a/graphium/data/datamodule.py b/graphium/data/datamodule.py index 1d2ed8405..e2368d3b3 100644 --- a/graphium/data/datamodule.py +++ b/graphium/data/datamodule.py @@ -96,6 +96,7 @@ } ) + class BaseDataModule(lightning.LightningDataModule): def __init__( self, @@ -882,21 +883,33 @@ def __init__( # Copy featurization for the representation used by graphium_cpp encoded_featurization = deepcopy(featurization) self.encoded_featurization = encoded_featurization - + def encode_feature_options(options, name, encoding_function): if name not in options or options[name] is None: options[name] = torch.tensor(data=[], dtype=torch.int64) else: options[name] = encoding_function(options[name]) - encode_feature_options(encoded_featurization, "atom_property_list_onehot", graphium_cpp.atom_onehot_feature_names_to_tensor) - encode_feature_options(encoded_featurization, "atom_property_list_float", graphium_cpp.atom_float_feature_names_to_tensor) - encode_feature_options(encoded_featurization, "edge_property_list", graphium_cpp.bond_feature_names_to_tensor) - - if ("pos_encoding_as_features" in featurization and - featurization["pos_encoding_as_features"] is not None and - featurization["pos_encoding_as_features"]["pos_types"] is not None): - (pos_encoding_names, pos_encoding_tensor) = \ - graphium_cpp.positional_feature_options_to_tensor(featurization["pos_encoding_as_features"]["pos_types"]) + + encode_feature_options( + encoded_featurization, + "atom_property_list_onehot", + graphium_cpp.atom_onehot_feature_names_to_tensor, + ) + encode_feature_options( + encoded_featurization, "atom_property_list_float", graphium_cpp.atom_float_feature_names_to_tensor + ) + encode_feature_options( + encoded_featurization, "edge_property_list", graphium_cpp.bond_feature_names_to_tensor + ) + + if ( + "pos_encoding_as_features" in featurization + and featurization["pos_encoding_as_features"] is not None + and featurization["pos_encoding_as_features"]["pos_types"] is not None + ): + (pos_encoding_names, pos_encoding_tensor) = graphium_cpp.positional_feature_options_to_tensor( + featurization["pos_encoding_as_features"]["pos_types"] + ) else: pos_encoding_names = [] pos_encoding_tensor = torch.tensor(data=[], dtype=torch.int64) @@ -924,9 +937,7 @@ def _parse_caching_args(self, processed_graph_data_path): # If loading from disk, the path to the cached data must be provided if processed_graph_data_path is None: - raise ValueError( - "`processed_graph_data_path` must be provided." - ) + raise ValueError("`processed_graph_data_path` must be provided.") self.processed_graph_data_path = processed_graph_data_path @@ -949,15 +960,19 @@ def get_task_levels(self): @staticmethod def concat_smiles_tensor_index(): return 0 + @staticmethod def smiles_offsets_tensor_index(): return 1 + @staticmethod def num_nodes_tensor_index(): return 2 + @staticmethod def num_edges_tensor_index(): return 3 + @staticmethod def data_offsets_tensor_index(): return 4 @@ -981,7 +996,7 @@ def prepare_data(self): # WARNING: not removing hydrogen atom without neighbors # SMILES Parse Error: syntax error while parsing: restricted # SMILES Parse Error: Failed parsing SMILES 'restricted' for input: 'restricted' - RDLogger.DisableLog('rdApp.*') + RDLogger.DisableLog("rdApp.*") for task, args in self.task_dataset_processing_params.items(): if args.label_normalization is None: @@ -991,17 +1006,25 @@ def prepare_data(self): if self._data_is_prepared: logger.info("Data is already prepared.") - self.label_num_cols, self.label_dtypes = graphium_cpp.load_num_cols_and_dtypes(self.processed_graph_data_path, self.data_hash) + self.label_num_cols, self.label_dtypes = graphium_cpp.load_num_cols_and_dtypes( + self.processed_graph_data_path, self.data_hash + ) self.stage_data = { - "train": graphium_cpp.load_metadata_tensors(self.processed_graph_data_path, "train", self.data_hash), - "val": graphium_cpp.load_metadata_tensors(self.processed_graph_data_path, "val", self.data_hash), - "test": graphium_cpp.load_metadata_tensors(self.processed_graph_data_path, "test", self.data_hash), + "train": graphium_cpp.load_metadata_tensors( + self.processed_graph_data_path, "train", self.data_hash + ), + "val": graphium_cpp.load_metadata_tensors( + self.processed_graph_data_path, "val", self.data_hash + ), + "test": graphium_cpp.load_metadata_tensors( + self.processed_graph_data_path, "test", self.data_hash + ), } if len(self.label_num_cols) > 0: for task in self.task_dataset_processing_params.keys(): stats = graphium_cpp.load_stats(self.processed_graph_data_path, self.data_hash, task) if len(stats) < 4: - raise RuntimeError(f"Error loading cached stats for task \"{task}\"") + raise RuntimeError(f'Error loading cached stats for task "{task}"') self.task_norms[task].set_statistics(stats[0], stats[1], stats[2], stats[3]) return @@ -1058,9 +1081,9 @@ def prepare_data(self): weights_col=args.weights_col, weights_type=args.weights_type, ) - + num_molecules = len(smiles) - + # Clear the reference to the DataFrame, so that Python can free up the memory. df = None @@ -1084,7 +1107,7 @@ def prepare_data(self): split_names=self.task_dataset_processing_params[task].split_names, # smiles and labels are already sub-sampled, so the split indices need to be # relative to the sample, not the original. - #sample_idx=task_dataset_args[task]["sample_idx"], + # sample_idx=task_dataset_args[task]["sample_idx"], ) self.task_train_indices[task] = train_indices self.task_val_indices[task] = val_indices @@ -1093,8 +1116,16 @@ def prepare_data(self): logger.info("Done reading datasets") # The rest of the data preparation and caching is done in graphium_cpp.prepare_and_save_data - normalizations = {task: self.task_dataset_processing_params[task].label_normalization for task in self.task_dataset_processing_params.keys()} - self.stage_data, all_stats, self.label_num_cols, self.label_dtypes = graphium_cpp.prepare_and_save_data( + normalizations = { + task: self.task_dataset_processing_params[task].label_normalization + for task in self.task_dataset_processing_params.keys() + } + ( + self.stage_data, + all_stats, + self.label_num_cols, + self.label_dtypes, + ) = graphium_cpp.prepare_and_save_data( self.task_names, task_dataset_args, normalizations, @@ -1105,11 +1136,12 @@ def prepare_data(self): self.task_test_indices, self.add_self_loop, self.explicit_H, - self.preprocessing_n_jobs) + self.preprocessing_n_jobs, + ) for task, stats in all_stats.items(): if len(stats) < 4: - raise RuntimeError(f"Error loading cached stats for task \"{task}\"") + raise RuntimeError(f'Error loading cached stats for task "{task}"') self.task_norms[task].set_statistics(stats[0], stats[1], stats[2], stats[3]) @@ -1146,7 +1178,6 @@ def setup( logger.info(self.val_ds) label_num_cols.update(dict(zip(self.val_ds.task_names, self.val_ds.label_num_cols))) label_dtypes.update(dict(zip(self.val_ds.task_names, self.val_ds.label_dtypes))) - if stage == "test" or stage is None: if self.test_ds is None and len(self.stage_data["test"]) >= self.num_edges_tensor_index(): @@ -1454,7 +1485,11 @@ def _extract_smiles_labels( weights_col: Optional[str] = None, weights_type: Optional[str] = None, ) -> Tuple[ - np.ndarray, np.ndarray, np.ndarray, Union[Type[None], np.ndarray], Dict[str, Union[Type[None], np.ndarray]] + np.ndarray, + np.ndarray, + np.ndarray, + Union[Type[None], np.ndarray], + Dict[str, Union[Type[None], np.ndarray]], ]: """ For a given dataframe extract the SMILES and labels columns. Smiles is returned as a list diff --git a/graphium/data/dataset.py b/graphium/data/dataset.py index f90d9922a..a96d6bc9b 100644 --- a/graphium/data/dataset.py +++ b/graphium/data/dataset.py @@ -35,15 +35,15 @@ class MultitaskDataset(Dataset): def __init__( self, - featurize_smiles: Callable[[str],dict], + featurize_smiles: Callable[[str], dict], task_names: List[str] = None, label_num_cols: List[int] = None, label_dtypes: List[int] = None, - mol_file_data_offsets = None, - concat_smiles_tensor = None, - smiles_offsets_tensor = None, - num_nodes_tensor = None, - num_edges_tensor = None, + mol_file_data_offsets=None, + concat_smiles_tensor=None, + smiles_offsets_tensor=None, + num_nodes_tensor=None, + num_edges_tensor=None, about: str = "", data_path: Optional[Union[str, os.PathLike]] = None, ): @@ -193,7 +193,7 @@ def __getitem__(self, idx): smiles_str = graphium_cpp.extract_string(self.smiles_tensor, self.smiles_offsets_tensor, idx) if self.mol_file_data_offsets is None: - datum = { "features": self.featurize_smiles(smiles_str) } + datum = {"features": self.featurize_smiles(smiles_str)} else: datum = { "labels": self.load_graph_from_index(idx), @@ -217,7 +217,15 @@ def load_graph_from_index(self, data_idx): A Data object containing the data for the specified index with keys corresponding to the tasks. """ labels = {} - graphium_cpp.load_labels_from_index(self.data_path, data_idx, self.mol_file_data_offsets, self.task_names, self.label_num_cols, self.label_dtypes, labels) + graphium_cpp.load_labels_from_index( + self.data_path, + data_idx, + self.mol_file_data_offsets, + self.task_names, + self.label_num_cols, + self.label_dtypes, + labels, + ) data_dict = Data() for task, values in labels.items(): data_dict[task] = values @@ -280,10 +288,11 @@ def torch_enum_to_dtype(v: Union[int, torch.dtype]): torch.quint8, torch.qint32, torch.bfloat16, - torch.quint4x2 + torch.quint4x2, ] return mapping[v] if (v >= 0 and v < len(mapping)) else None + def get_num_nodes_per_graph(graphs): r""" number of nodes per graph diff --git a/graphium/data/multilevel_utils.py b/graphium/data/multilevel_utils.py index e2e29343b..a096979dd 100644 --- a/graphium/data/multilevel_utils.py +++ b/graphium/data/multilevel_utils.py @@ -39,7 +39,7 @@ def extract_labels(df: pd.DataFrame, task_level: str, label_cols: List[str]): num_cols = len(label_cols) if task_level == "graph": - output = np.empty((num_rows,num_cols), dtype=np.float64) + output = np.empty((num_rows, num_cols), dtype=np.float64) for col_index, col in enumerate(label_cols): for i, v in enumerate(df[col]): @@ -53,15 +53,13 @@ def extract_labels(df: pd.DataFrame, task_level: str, label_cols: List[str]): output[i, col_index] = v else: - raise ValueError( - f"Graph data should be one of float or int, got {type(v)}" - ) + raise ValueError(f"Graph data should be one of float or int, got {type(v)}") return output, None # First, find the max length of each row (likely the number of nodes or edges) # +1 is for the cumulative sum below - begin_offsets = np.zeros((num_rows+1,), dtype=np.int64) + begin_offsets = np.zeros((num_rows + 1,), dtype=np.int64) max_type = np.float16 for col in label_cols: for i, v in enumerate(df[col]): @@ -89,26 +87,24 @@ def extract_labels(df: pd.DataFrame, task_level: str, label_cols: List[str]): length = len(v) max_type = np.float64 else: - raise ValueError( - f"Graph data should be one of float, int, list, np.ndarray, got {type(v)}" - ) + raise ValueError(f"Graph data should be one of float, int, list, np.ndarray, got {type(v)}") # The +1 is so that the cumulative sum below gives the beginning offsets - begin_offsets[i+1] = max(begin_offsets[i+1], length) + begin_offsets[i + 1] = max(begin_offsets[i + 1], length) begin_offsets = np.cumsum(begin_offsets) full_num_rows = begin_offsets[-1] - output = np.empty((full_num_rows,num_cols), dtype=max_type) + output = np.empty((full_num_rows, num_cols), dtype=max_type) # Now, fill in the values for col_index, col in enumerate(label_cols): for i, v in enumerate(df[col]): full_row = begin_offsets[i] - end_row = begin_offsets[i+1] + end_row = begin_offsets[i + 1] if not isinstance(v, np.ndarray): v = pd.to_numeric(v, errors="coerce") - + if isinstance(v, np.ndarray): length = v.shape[0] if len(v.shape) == 1 else 0 for j in range(length): @@ -124,9 +120,7 @@ def extract_labels(df: pd.DataFrame, task_level: str, label_cols: List[str]): output[full_row + j, col_index] = v[j] else: - raise ValueError( - f"Graph data should be one of float, int, list, np.ndarray, got {type(v)}" - ) + raise ValueError(f"Graph data should be one of float, int, list, np.ndarray, got {type(v)}") # Fill the rest of the rows in the column with nan if full_row + length != end_row: diff --git a/graphium/features/featurizer.py b/graphium/features/featurizer.py index d97e70f02..21d874de1 100644 --- a/graphium/features/featurizer.py +++ b/graphium/features/featurizer.py @@ -28,6 +28,7 @@ # These are the integers that correspond with the torch data types in C++ NP_DTYPE_TO_TORCH_INT = {np.float16: 5, np.float32: 6, np.float64: 7} + def mol_to_pyggraph( mol: str, atom_property_list_onehot: torch.Tensor = torch.tensor(data=[], dtype=torch.int64), @@ -37,7 +38,7 @@ def mol_to_pyggraph( add_self_loop: bool = False, explicit_H: bool = False, use_bonds_weights: bool = False, - pos_encoding_as_features: Tuple[List[str],torch.Tensor] = ([], torch.tensor(data=[], dtype=torch.int64)), + pos_encoding_as_features: Tuple[List[str], torch.Tensor] = ([], torch.tensor(data=[], dtype=torch.int64)), dtype: np.dtype = np.float16, on_error: str = "ignore", mask_nan: Union[str, float, type(None)] = "raise", @@ -127,13 +128,15 @@ def mol_to_pyggraph( """ if not isinstance(mol, str): - raise ValueError(f"mol_to_pyggraph requires that molecule be received as a string, not type "+str(type(mol))) + raise ValueError( + f"mol_to_pyggraph requires that molecule be received as a string, not type " + str(type(mol)) + ) try: - has_conformer = ('positions_3d' in conformer_property_list) + has_conformer = "positions_3d" in conformer_property_list pe_index = 4 if has_conformer: - pe_index = 5; + pe_index = 5 mask_nan_value = 0.0 if mask_nan is None: mask_nan_style_int = 0 @@ -149,14 +152,14 @@ def mol_to_pyggraph( has_conformer, edge_property_list, pos_encoding_as_features[1], - True, # duplicate_edges, so that we don't have to duplicate below + True, # duplicate_edges, so that we don't have to duplicate below add_self_loop, explicit_H, use_bonds_weights, - True, #offset_carbon + True, # offset_carbon NP_DTYPE_TO_TORCH_INT[dtype], mask_nan_style_int, - mask_nan_value + mask_nan_value, ) if num_nans > 0: @@ -165,7 +168,7 @@ def mol_to_pyggraph( elif nan_tensor_index == 3: array_name = "edge property" elif nan_tensor_index == 4 and has_conformer: - array_name = 'positions_3d' + array_name = "positions_3d" else: array_name = pos_encoding_as_features[0][nan_tensor_index - pe_index] msg = f"There are {num_nans} NaNs in `{array_name}`" @@ -175,14 +178,11 @@ def mol_to_pyggraph( logger.warning(msg) num_atoms = tensors[2].size(0) - data_dict = { - "feat": tensors[2], - "edge_feat": tensors[3] - } + data_dict = {"feat": tensors[2], "edge_feat": tensors[3]} if has_conformer: - data_dict['positions_3d'] = tensors[4] - for i in range(len(tensors)-pe_index): - data_dict[pos_encoding_as_features[0][i]] = tensors[i+pe_index] + data_dict["positions_3d"] = tensors[4] + for i in range(len(tensors) - pe_index): + data_dict[pos_encoding_as_features[0][i]] = tensors[i + pe_index] # Create the PyG graph object `Data` data = Data(edge_index=tensors[0], edge_weight=tensors[1], num_nodes=num_atoms, **data_dict) return data diff --git a/graphium/graphium_cpp/setup.py b/graphium/graphium_cpp/setup.py index 0755397b8..2ed13b000 100755 --- a/graphium/graphium_cpp/setup.py +++ b/graphium/graphium_cpp/setup.py @@ -16,62 +16,71 @@ import numpy torch_dir = torch.__path__[0] -rdkit_lib_index = rdkit.__path__[0].split('/').index('lib') -rdkit_prefix = '/'.join(rdkit.__path__[0].split('/')[:rdkit_lib_index]) +rdkit_lib_index = rdkit.__path__[0].split("/").index("lib") +rdkit_prefix = "/".join(rdkit.__path__[0].split("/")[:rdkit_lib_index]) ext_modules = [ - Pybind11Extension( - 'graphium_cpp', - sources=[ - "graphium_cpp.cpp", - "features.cpp", - "labels.cpp", - "commute.cpp", - "electrostatic.cpp", - "float_features.cpp", - "graphormer.cpp", - "one_hot.cpp", - "random_walk.cpp", - "spectral.cpp" - ], - language="c++", - cxx_std=20, - include_dirs = [os.path.join(torch_dir,"include"), - os.path.join(torch_dir,"include/torch/csrc/api/include"), - os.path.join(rdkit_prefix, "include/rdkit"), - numpy.get_include()], - libraries = [ - "RDKitAlignment", - "RDKitDataStructs", - "RDKitDistGeometry", - "RDKitDistGeomHelpers", - "RDKitEigenSolvers", - "RDKitForceField", - "RDKitForceFieldHelpers", - "RDKitGenericGroups", - "RDKitGraphMol", - "RDKitInchi", - "RDKitRDInchiLib", - "RDKitRDBoost", - "RDKitRDGeneral", - "RDKitRDGeometryLib", - "RDKitRingDecomposerLib", - "RDKitSmilesParse", - "RDKitSubstructMatch", - "torch_cpu", - "torch_python" - ], - library_dirs = [os.path.join(rdkit_prefix,"lib"), - os.path.join(torch_dir,"lib")], - extra_compile_args=["-O3","-Wall", "-Wmissing-field-initializers", "-Wmaybe-uninitialized", "-Wuninitialized"] - ) + Pybind11Extension( + "graphium_cpp", + sources=[ + "graphium_cpp.cpp", + "features.cpp", + "labels.cpp", + "commute.cpp", + "electrostatic.cpp", + "float_features.cpp", + "graphormer.cpp", + "one_hot.cpp", + "random_walk.cpp", + "spectral.cpp", + ], + language="c++", + cxx_std=20, + include_dirs=[ + os.path.join(torch_dir, "include"), + os.path.join(torch_dir, "include/torch/csrc/api/include"), + os.path.join(rdkit_prefix, "include/rdkit"), + numpy.get_include(), + ], + libraries=[ + "RDKitAlignment", + "RDKitDataStructs", + "RDKitDistGeometry", + "RDKitDistGeomHelpers", + "RDKitEigenSolvers", + "RDKitForceField", + "RDKitForceFieldHelpers", + "RDKitGenericGroups", + "RDKitGraphMol", + "RDKitInchi", + "RDKitRDInchiLib", + "RDKitRDBoost", + "RDKitRDGeneral", + "RDKitRDGeometryLib", + "RDKitRingDecomposerLib", + "RDKitSmilesParse", + "RDKitSubstructMatch", + "torch_cpu", + "torch_python", + ], + library_dirs=[os.path.join(rdkit_prefix, "lib"), os.path.join(torch_dir, "lib")], + extra_compile_args=[ + "-O3", + "-Wall", + "-Wmissing-field-initializers", + "-Wmaybe-uninitialized", + "-Wuninitialized", + ], + ) ] -setup(name = "graphium_cpp", - version = "0.1", - author = "N. Dickson", +setup( + name="graphium_cpp", + version="0.1", + author="N. Dickson", author_email="ndickson@nvidia.com", license="Apache 2.0", - description = "C++ extension for graphium", + description="C++ extension for graphium", ext_modules=ext_modules, - cmdclass={"build_ext": build_ext}) + cmdclass={"build_ext": build_ext}, +) diff --git a/tests/test_collate.py b/tests/test_collate.py index 77dee64f9..6524596d6 100644 --- a/tests/test_collate.py +++ b/tests/test_collate.py @@ -62,7 +62,11 @@ def test_collate_labels(self): # Collate labels and check for the right shapes and dtypes collated_labels = collate_labels( - deepcopy(fake_labels), deepcopy(labels_num_cols_dict), deepcopy(labels_dtype_dict), num_nodes, num_edges + deepcopy(fake_labels), + deepcopy(labels_num_cols_dict), + deepcopy(labels_dtype_dict), + num_nodes, + num_edges, ) self.assertEqual(collated_labels["graph_label1"].shape, torch.Size([num_labels, 1])) # , 1 self.assertEqual(collated_labels["graph_label2"].shape, torch.Size([num_labels, 3])) # , 1 @@ -119,7 +123,11 @@ def test_collate_labels(self): "node_label4": 1, } collated_labels = collate_labels( - deepcopy(fake_labels), deepcopy(labels_num_cols_dict), deepcopy(labels_dtype_dict), num_nodes, num_edges + deepcopy(fake_labels), + deepcopy(labels_num_cols_dict), + deepcopy(labels_dtype_dict), + num_nodes, + num_edges, ) self.assertEqual(collated_labels["graph_label1"].shape, torch.Size([num_labels, 1])) # , 1 self.assertEqual(collated_labels["graph_label2"].shape, torch.Size([num_labels, 3])) # , 1 @@ -141,9 +149,14 @@ def test_collate_labels(self): collated_labels["node_label4"].numpy(), label4_true.flatten(0, 1).numpy() ) # Now test the `graphium_collate_fn` function when only labels are given - fake_labels2 = [{"labels": this_label, "num_nodes": this_label.num_nodes, "num_edges": this_label.num_edges} for this_label in fake_labels] + fake_labels2 = [ + {"labels": this_label, "num_nodes": this_label.num_nodes, "num_edges": this_label.num_edges} + for this_label in fake_labels + ] collated_labels = graphium_collate_fn( - deepcopy(fake_labels2), labels_num_cols_dict=labels_num_cols_dict, labels_dtype_dict=labels_dtype_dict + deepcopy(fake_labels2), + labels_num_cols_dict=labels_num_cols_dict, + labels_dtype_dict=labels_dtype_dict, )["labels"] self.assertEqual(collated_labels["graph_label1"].shape, torch.Size([num_labels, 1])) self.assertEqual(collated_labels["graph_label2"].shape, torch.Size([num_labels, 3])) diff --git a/tests/test_datamodule.py b/tests/test_datamodule.py index 2848b3031..2cc59b8c4 100644 --- a/tests/test_datamodule.py +++ b/tests/test_datamodule.py @@ -265,9 +265,10 @@ def test_datamodule_with_none_molecules(self): # Convert the smilies from the train_ds to a list, and check the content train_smiles = [ graphium_cpp.extract_string( - datamodule.train_ds.smiles_tensor, - datamodule.train_ds.smiles_offsets_tensor, - idx) for idx in range(len(datamodule.train_ds))] + datamodule.train_ds.smiles_tensor, datamodule.train_ds.smiles_offsets_tensor, idx + ) + for idx in range(len(datamodule.train_ds)) + ] # Check that the set of smiles are the same train_smiles_flat = list(set(train_smiles)) @@ -444,7 +445,9 @@ def test_splits_file(self): if exists(TEMP_CACHE_DATA_PATH): rm(TEMP_CACHE_DATA_PATH, recursive=True) - ds2 = MultitaskFromSmilesDataModule(task_specific_args, processed_graph_data_path=TEMP_CACHE_DATA_PATH) + ds2 = MultitaskFromSmilesDataModule( + task_specific_args, processed_graph_data_path=TEMP_CACHE_DATA_PATH + ) ds2.prepare_data() ds2.setup() @@ -453,11 +456,13 @@ def test_splits_file(self): self.assertEqual(len(ds2.test_ds), len(split_test)) # Check that the splits are the same - self.assertEqual(len(ds.train_ds.smiles_offsets_tensor), len(split_train)+1) + self.assertEqual(len(ds.train_ds.smiles_offsets_tensor), len(split_train) + 1) np.testing.assert_array_equal(ds.train_ds.smiles_tensor, ds2.train_ds.smiles_tensor) np.testing.assert_array_equal(ds.val_ds.smiles_tensor, ds2.val_ds.smiles_tensor) np.testing.assert_array_equal(ds.test_ds.smiles_tensor, ds2.test_ds.smiles_tensor) - np.testing.assert_array_equal(ds.train_ds.smiles_offsets_tensor, ds2.train_ds.smiles_offsets_tensor) + np.testing.assert_array_equal( + ds.train_ds.smiles_offsets_tensor, ds2.train_ds.smiles_offsets_tensor + ) np.testing.assert_array_equal(ds.val_ds.smiles_offsets_tensor, ds2.val_ds.smiles_offsets_tensor) np.testing.assert_array_equal(ds.test_ds.smiles_offsets_tensor, ds2.test_ds.smiles_offsets_tensor) diff --git a/tests/test_dataset.py b/tests/test_dataset.py index 56e2cbc95..3624e36ab 100644 --- a/tests/test_dataset.py +++ b/tests/test_dataset.py @@ -28,6 +28,7 @@ TEMP_CACHE_DATA_PATH = "tests/temp_cache_0000" + def dataframes_to_dataset(dataframes_dict, case_num): task_names = [key for key in dataframes_dict.keys()] @@ -36,7 +37,13 @@ def dataframes_to_dataset(dataframes_dict, case_num): task_val_indices = {} task_test_indices = {} for task in task_names: - smiles, labels, label_offsets, sample_idx, extras = MultitaskFromSmilesDataModule._extract_smiles_labels( + ( + smiles, + labels, + label_offsets, + sample_idx, + extras, + ) = MultitaskFromSmilesDataModule._extract_smiles_labels( df=dataframes_dict[task], task_level="graph", smiles_col="SMILES", @@ -57,10 +64,10 @@ def dataframes_to_dataset(dataframes_dict, case_num): task_val_indices[task] = [] task_test_indices[task] = [] - fake_data_hash = "a1b2c3testdataset"+str(case_num) + fake_data_hash = "a1b2c3testdataset" + str(case_num) # The rest of the data preparation and caching is done in graphium_cpp.prepare_and_save_data - normalizations = {task: {} for task in task_names} # No normalization + normalizations = {task: {} for task in task_names} # No normalization stage_data, all_stats, label_num_cols, label_dtypes = graphium_cpp.prepare_and_save_data( task_names, task_dataset_args, @@ -70,9 +77,10 @@ def dataframes_to_dataset(dataframes_dict, case_num): task_train_indices, task_val_indices, task_test_indices, - False, #add_self_loop - False, #explicit_H - 0) #preprocessing_n_jobs + False, # add_self_loop + False, # explicit_H + 0, # preprocessing_n_jobs + ) stage_data = stage_data["train"] @@ -81,8 +89,8 @@ def dataframes_to_dataset(dataframes_dict, case_num): data_offsets = stage_data[MultitaskFromSmilesDataModule.data_offsets_tensor_index()] multitask_dataset = MultitaskDataset( - about="test_dataset case"+str(case_num), - data_path=osp.join(TEMP_CACHE_DATA_PATH, "train_"+fake_data_hash), + about="test_dataset case" + str(case_num), + data_path=osp.join(TEMP_CACHE_DATA_PATH, "train_" + fake_data_hash), featurize_smiles=mol_to_pyggraph, task_names=task_names, label_num_cols=label_num_cols, @@ -96,6 +104,7 @@ def dataframes_to_dataset(dataframes_dict, case_num): return multitask_dataset + class Test_Multitask_Dataset(ut.TestCase): # Then we can choose different rows and columns for the tests as we see fit. # Remember tests are supposed to be FAST, and reading from the file system multiple times slows things down. @@ -117,7 +126,7 @@ def test_multitask_dataset_case_1(self): df_micro_zinc_SA = df[["SMILES", "SA"]] df_micro_zinc_logp = df[["SMILES", "logp"]] df_micro_zinc_score = df[["SMILES", "score"]] - + # We need to prepare the data for these dataframes. # We don't need to do featurization yet. dataframes = { @@ -141,12 +150,17 @@ def test_multitask_dataset_case_1(self): # Search for the smiles string in the multitask dataset found_idx = -1 for i in range(multitask_dataset.__len__()): - if graphium_cpp.extract_string(multitask_dataset.smiles_tensor, multitask_dataset.smiles_offsets_tensor, i) == smiles: + if ( + graphium_cpp.extract_string( + multitask_dataset.smiles_tensor, multitask_dataset.smiles_offsets_tensor, i + ) + == smiles + ): found_idx = i break item = multitask_dataset[found_idx]["labels"] - + # Compare labels self.assertEqual(label_SA, item["SA"]) self.assertEqual(label_logp, item["logp"]) @@ -193,7 +207,12 @@ def test_multitask_dataset_case_2(self): # Search for the smiles string in the multitask dataset found_idx = -1 for i in range(multitask_microzinc.__len__()): - if graphium_cpp.extract_string(multitask_microzinc.smiles_tensor, multitask_microzinc.smiles_offsets_tensor, i) == smiles: + if ( + graphium_cpp.extract_string( + multitask_microzinc.smiles_tensor, multitask_microzinc.smiles_offsets_tensor, i + ) + == smiles + ): found_idx = i break diff --git a/tests/test_featurizer.py b/tests/test_featurizer.py index ca0c8aabc..3336feae3 100644 --- a/tests/test_featurizer.py +++ b/tests/test_featurizer.py @@ -26,6 +26,7 @@ import graphium_cpp + class test_featurizer(ut.TestCase): smiles = [ "C", @@ -94,13 +95,13 @@ class test_featurizer(ut.TestCase): def test_get_mol_atomic_features_onehot(self): props = deepcopy(self.atomic_onehot_props) - #bad_props = ["bob"] + # bad_props = ["bob"] all_smiles = self.smiles + self.smiles_noble for mol in all_smiles: err_msg = f"\n\tError for params:\n\t\tSMILES: {mol}" - + rdmol = dm.to_mol(mol) for ii in range(len(props)): @@ -110,16 +111,16 @@ def test_get_mol_atomic_features_onehot(self): features = mol_to_pyggraph(mol, atom_property_list_onehot=this_props_encoded, mask_nan=None) val = features["feat"] self.assertEqual(val.size(0), rdmol.GetNumAtoms(), msg=err_msg2) - self.assertGreaterEqual(val.size(1), 2*len(this_props), msg=err_msg2) + self.assertGreaterEqual(val.size(1), 2 * len(this_props), msg=err_msg2) self.assertTrue(((val == 0) | (val == 1)).numpy().all(), msg=err_msg2) - #with self.assertRaises(ValueError, msg=err_msg): + # with self.assertRaises(ValueError, msg=err_msg): # get_mol_atomic_features_onehot(mol, property_list=bad_props) def test_get_mol_atomic_features_float(self): props = deepcopy(self.atomic_float_props) - #bad_props = ["bob"] + # bad_props = ["bob"] all_smiles = self.smiles + self.smiles_noble for mol in all_smiles: @@ -135,31 +136,39 @@ def test_get_mol_atomic_features_float(self): self.assertEqual(val.size(0), rdmol.GetNumAtoms(), msg=err_msg2) self.assertEqual(val.size(1), len(this_props), msg=err_msg2) - #with self.assertRaises(ValueError, msg=err_msg): + # with self.assertRaises(ValueError, msg=err_msg): # get_mol_atomic_features_float(mol, property_list=bad_props) def test_get_mol_atomic_features_float_nan_mask(self): props_encoded = graphium_cpp.atom_float_feature_names_to_tensor(self.atomic_float_props) for mol in self.smiles_noble: # Nothing happens when `mask_nan = None`, nans are still in the property array - features = mol_to_pyggraph(mol, atom_property_list_float=props_encoded, mask_nan=None, on_error="raise") + features = mol_to_pyggraph( + mol, atom_property_list_float=props_encoded, mask_nan=None, on_error="raise" + ) prop_array = features["feat"] nans = np.isnan(prop_array) # Capture a raised error when `mask_nan = "raise"` with self.assertRaises(ValueError): - features = mol_to_pyggraph(mol, atom_property_list_float=props_encoded, mask_nan="raise", on_error="raise") + features = mol_to_pyggraph( + mol, atom_property_list_float=props_encoded, mask_nan="raise", on_error="raise" + ) print(f"Failed to raise error for nans on {mol}") # Not sure how to Capture a logged warning when `mask_nan = "warn"` # Here, I'm testing a behaviour similar to `mask_nan = None` - features = mol_to_pyggraph(mol, atom_property_list_float=props_encoded, mask_nan="warn", on_error="raise") + features = mol_to_pyggraph( + mol, atom_property_list_float=props_encoded, mask_nan="warn", on_error="raise" + ) prop_array = features["feat"] self.assertEqual(len(self.atomic_float_props), prop_array.size(1)) self.assertTrue(np.isnan(prop_array.numpy()).any()) # NaNs are replaced by `42` when `mask_nan=42` - features = mol_to_pyggraph(mol, atom_property_list_float=props_encoded, mask_nan=42, on_error="raise") + features = mol_to_pyggraph( + mol, atom_property_list_float=props_encoded, mask_nan=42, on_error="raise" + ) prop_array = features["feat"] self.assertEqual(len(self.atomic_float_props), prop_array.size(1)) self.assertFalse(np.isnan(prop_array.numpy()).any()) @@ -167,7 +176,7 @@ def test_get_mol_atomic_features_float_nan_mask(self): def test_get_mol_edge_features(self): props = deepcopy(self.edge_props) - #bad_props = ["bob"] + # bad_props = ["bob"] all_smiles = self.smiles + self.smiles_noble for mol in all_smiles: @@ -183,7 +192,7 @@ def test_get_mol_edge_features(self): if rdmol.GetNumBonds() > 0: self.assertGreaterEqual(val.shape[1], len(this_props), msg=err_msg2) - #if mol.GetNumBonds() > 0: + # if mol.GetNumBonds() > 0: # with self.assertRaises(ValueError, msg=err_msg): # get_mol_edge_features(mol, property_list=bad_props) @@ -232,7 +241,7 @@ def test_mol_to_pyggraph(self): mol_Hs = Chem.AddHs(rdmol) # type: ignore mol_No_Hs = Chem.RemoveHs(rdmol) # type: ignore - + # Loop over many possible combinations of properties for explicit_H in [True, False]: this_mol = mol_Hs if explicit_H else mol_No_Hs @@ -243,17 +252,14 @@ def test_mol_to_pyggraph(self): graph = mol_to_pyggraph( mol=mol, atom_property_list_onehot=graphium_cpp.atom_onehot_feature_names_to_tensor( - np.random.choice( - self.atomic_onehot_props, size=num_props, replace=False - ) + np.random.choice(self.atomic_onehot_props, size=num_props, replace=False) ), atom_property_list_float=graphium_cpp.atom_float_feature_names_to_tensor( - np.random.choice( - self.atomic_float_props, size=num_props, replace=False - ) + np.random.choice(self.atomic_float_props, size=num_props, replace=False) ), edge_property_list=graphium_cpp.bond_feature_names_to_tensor( - np.random.choice(self.edge_props, size=num_props, replace=False)), + np.random.choice(self.edge_props, size=num_props, replace=False) + ), add_self_loop=False, explicit_H=explicit_H, use_bonds_weights=False, diff --git a/tests/test_multitask_datamodule.py b/tests/test_multitask_datamodule.py index fdc7c8818..3f7b09456 100644 --- a/tests/test_multitask_datamodule.py +++ b/tests/test_multitask_datamodule.py @@ -24,6 +24,7 @@ TEMP_CACHE_DATA_PATH = "tests/temp_cache_0000" + class Test_Multitask_DataModule(ut.TestCase): def setUp(self): # Create a temporary directory @@ -342,7 +343,7 @@ def test_non_graph_level_extract_labels(self): assert output_offsets is not None assert isinstance(output_offsets, np.ndarray) assert len(output_offsets.shape) == 1 - assert output_offsets.shape[0] == (num_graphs+1) + assert output_offsets.shape[0] == (num_graphs + 1) assert output.shape[0] == output_offsets[-1] def test_non_graph_level_extract_labels_missing_cols(self): @@ -364,20 +365,20 @@ def test_non_graph_level_extract_labels_missing_cols(self): assert output_offsets is not None assert isinstance(output_offsets, np.ndarray) assert len(output_offsets.shape) == 1 - assert output_offsets.shape[0] == (num_graphs+1) + assert output_offsets.shape[0] == (num_graphs + 1) assert output.shape[0] == output_offsets[-1] for idx in drop_index: begin_idx = output_offsets[idx] - end_idx = output_offsets[idx+1] + end_idx = output_offsets[idx + 1] values = output[begin_idx:end_idx] assert len(values.shape) == 2 assert values.shape[1] == len(label_cols) - + # All removed entries must be nan - assert np.all(np.isnan(values[:,:replace])) + assert np.all(np.isnan(values[:, :replace])) # All kept entries should be non-nan in this case - assert not np.any(np.isnan(values[:,replace:])) + assert not np.any(np.isnan(values[:, replace:])) def test_tdc_admet_benchmark_data_module(self): """ diff --git a/tests/test_pe_nodepair.py b/tests/test_pe_nodepair.py index 65fb17363..b849b28b3 100644 --- a/tests/test_pe_nodepair.py +++ b/tests/test_pe_nodepair.py @@ -23,6 +23,7 @@ import graphium import graphium_cpp + class test_positional_encodings(ut.TestCase): # Test graphs smiles_dict = {} @@ -57,26 +58,25 @@ class test_positional_encodings(ut.TestCase): "electrostatic": {"pos_level": "nodepair", "pos_type": "electrostatic", "normalization": "none"}, "graphormer": {"pos_level": "nodepair", "pos_type": "graphormer", "normalization": "none"}, "commute": {"pos_level": "nodepair", "pos_type": "commute", "normalization": "none"}, - } - (pos_encoding_names, pos_encoding_tensor) = \ - graphium_cpp.positional_feature_options_to_tensor(features) + } + (pos_encoding_names, pos_encoding_tensor) = graphium_cpp.positional_feature_options_to_tensor(features) def get_tensors(self, smiles): tensors, _, _ = graphium_cpp.featurize_smiles( smiles, - torch.tensor(data=[], dtype=torch.int64), # atom_property_list_onehot - torch.tensor(data=[], dtype=torch.int64), # atom_property_list_float - False, # has_conformer - torch.tensor(data=[], dtype=torch.int64), # edge_property_list + torch.tensor(data=[], dtype=torch.int64), # atom_property_list_onehot + torch.tensor(data=[], dtype=torch.int64), # atom_property_list_float + False, # has_conformer + torch.tensor(data=[], dtype=torch.int64), # edge_property_list self.pos_encoding_tensor, - True, # duplicate_edges - False, # add_self_loop - False, # explicit_H=False - False, # use_bonds_weights - True, #offset_carbon - 7, # torch float64 - 0, # mask_nan_style_int - 0 # mask_nan_value + True, # duplicate_edges + False, # add_self_loop + False, # explicit_H=False + False, # use_bonds_weights + True, # offset_carbon + 7, # torch float64 + 0, # mask_nan_style_int + 0, # mask_nan_value ) return tensors @@ -84,30 +84,30 @@ def test_dimensions(self): for key, smiles in self.smiles_dict.items(): tensors = self.get_tensors(smiles) - pe = tensors[4] # electrostatic + pe = tensors[4] # electrostatic self.assertEqual(list(pe.shape), self.shape_dict[key]) - pe = tensors[5] # graphormer + pe = tensors[5] # graphormer self.assertEqual(list(pe.shape), self.shape_dict[key]) - pe = tensors[6] # commute + pe = tensors[6] # commute self.assertEqual(list(pe.shape), self.shape_dict[key]) def test_symmetry(self): for _, smiles in self.smiles_dict.items(): tensors = self.get_tensors(smiles) - pe = tensors[5] # graphormer + pe = tensors[5] # graphormer np.testing.assert_array_almost_equal(pe, pe.T) - pe = tensors[6] # commute + pe = tensors[6] # commute np.testing.assert_array_almost_equal(pe, pe.T) def test_max_dist(self): for key, smiles in self.smiles_dict.items(): tensors = self.get_tensors(smiles) - pe = tensors[5] # graphormer + pe = tensors[5] # graphormer np.testing.assert_array_almost_equal(pe.max(), self.max_dict[key]) diff --git a/tests/test_pe_rw.py b/tests/test_pe_rw.py index 93b492e43..aebd6a577 100644 --- a/tests/test_pe_rw.py +++ b/tests/test_pe_rw.py @@ -23,6 +23,7 @@ import graphium import graphium_cpp + class test_pe_spectral(ut.TestCase): def test_outputs(self): # 4-barbell @@ -36,28 +37,44 @@ def test_outputs(self): # The feature names only depend on pos_type and pos_level, so the two # rw_return_probs features can't have the same pos_level. features = { - "rw_transition_probs": {"pos_level": "nodepair", "pos_type": "rw_transition_probs", "normalization": "none", "ksteps": ksteps1}, - "rw_return_probs_0": {"pos_level": "node", "pos_type": "rw_return_probs", "normalization": "none", "ksteps": ksteps2}, - "rw_return_probs_1": {"pos_level": "nodepair", "pos_type": "rw_return_probs", "normalization": "none", "ksteps": ksteps3}, - } - (pos_encoding_names, pos_encoding_tensor) = \ - graphium_cpp.positional_feature_options_to_tensor(features) + "rw_transition_probs": { + "pos_level": "nodepair", + "pos_type": "rw_transition_probs", + "normalization": "none", + "ksteps": ksteps1, + }, + "rw_return_probs_0": { + "pos_level": "node", + "pos_type": "rw_return_probs", + "normalization": "none", + "ksteps": ksteps2, + }, + "rw_return_probs_1": { + "pos_level": "nodepair", + "pos_type": "rw_return_probs", + "normalization": "none", + "ksteps": ksteps3, + }, + } + (pos_encoding_names, pos_encoding_tensor) = graphium_cpp.positional_feature_options_to_tensor( + features + ) tensors, _, _ = graphium_cpp.featurize_smiles( smiles, - torch.tensor(data=[], dtype=torch.int64), # atom_property_list_onehot - torch.tensor(data=[], dtype=torch.int64), # atom_property_list_float - False, # has_conformer - torch.tensor(data=[], dtype=torch.int64), # edge_property_list + torch.tensor(data=[], dtype=torch.int64), # atom_property_list_onehot + torch.tensor(data=[], dtype=torch.int64), # atom_property_list_float + False, # has_conformer + torch.tensor(data=[], dtype=torch.int64), # edge_property_list pos_encoding_tensor, - True, # duplicate_edges - False, # add_self_loop - False, # explicit_H=False - False, # use_bonds_weights - True, #offset_carbon - 7, # torch float64 - 0, # mask_nan_style_int - 0 # mask_nan_value + True, # duplicate_edges + False, # add_self_loop + False, # explicit_H=False + False, # use_bonds_weights + True, # offset_carbon + 7, # torch float64 + 0, # mask_nan_style_int + 0, # mask_nan_value ) pe1 = tensors[4] diff --git a/tests/test_pe_spectral.py b/tests/test_pe_spectral.py index 052f13f04..5c66e6f8b 100644 --- a/tests/test_pe_spectral.py +++ b/tests/test_pe_spectral.py @@ -23,25 +23,27 @@ import graphium import graphium_cpp + def get_pe_tensors(smiles, pos_encoding_tensor): tensors, _, _ = graphium_cpp.featurize_smiles( smiles, - torch.tensor(data=[], dtype=torch.int64), # atom_property_list_onehot - torch.tensor(data=[], dtype=torch.int64), # atom_property_list_float - False, # has_conformer - torch.tensor(data=[], dtype=torch.int64), # edge_property_list + torch.tensor(data=[], dtype=torch.int64), # atom_property_list_onehot + torch.tensor(data=[], dtype=torch.int64), # atom_property_list_float + False, # has_conformer + torch.tensor(data=[], dtype=torch.int64), # edge_property_list pos_encoding_tensor, - True, # duplicate_edges - False, # add_self_loop - False, # explicit_H=False - False, # use_bonds_weights - True, #offset_carbon - 7, # torch float64 - 0, # mask_nan_style_int - 0 # mask_nan_value + True, # duplicate_edges + False, # add_self_loop + False, # explicit_H=False + False, # use_bonds_weights + True, # offset_carbon + 7, # torch float64 + 0, # mask_nan_style_int + 0, # mask_nan_value ) return tensors + class test_pe_spectral(ut.TestCase): def test_for_connected_vs_disconnected_graph(self): # 2 disconnected 3 cliques @@ -54,11 +56,24 @@ def test_for_connected_vs_disconnected_graph(self): num_pos = 3 features = { - "laplacian_eigval": {"pos_level": "node", "pos_type": "laplacian_eigval", "normalization": "none", "num_pos": num_pos, "disconnected_comp": True}, - "laplacian_eigvec": {"pos_level": "node", "pos_type": "laplacian_eigvec", "normalization": "none", "num_pos": num_pos, "disconnected_comp": True}, - } - (pos_encoding_names, pos_encoding_tensor) = \ - graphium_cpp.positional_feature_options_to_tensor(features) + "laplacian_eigval": { + "pos_level": "node", + "pos_type": "laplacian_eigval", + "normalization": "none", + "num_pos": num_pos, + "disconnected_comp": True, + }, + "laplacian_eigvec": { + "pos_level": "node", + "pos_type": "laplacian_eigvec", + "normalization": "none", + "num_pos": num_pos, + "disconnected_comp": True, + }, + } + (pos_encoding_names, pos_encoding_tensor) = graphium_cpp.positional_feature_options_to_tensor( + features + ) # test if pe works identically on connected vs disconnected graphs tensors1 = get_pe_tensors(smiles1, pos_encoding_tensor) diff --git a/tests/test_positional_encoders.py b/tests/test_positional_encoders.py index fd5709636..2cb6aacff 100644 --- a/tests/test_positional_encoders.py +++ b/tests/test_positional_encoders.py @@ -31,25 +31,27 @@ # TODO: Test the MLP_encoder and signnet_pos_encoder + def get_pe_tensors(smiles, pos_encoding_tensor): tensors, _, _ = graphium_cpp.featurize_smiles( smiles, - torch.tensor(data=[], dtype=torch.int64), # atom_property_list_onehot - torch.tensor(data=[], dtype=torch.int64), # atom_property_list_float - False, # has_conformer - torch.tensor(data=[], dtype=torch.int64), # edge_property_list + torch.tensor(data=[], dtype=torch.int64), # atom_property_list_onehot + torch.tensor(data=[], dtype=torch.int64), # atom_property_list_float + False, # has_conformer + torch.tensor(data=[], dtype=torch.int64), # edge_property_list pos_encoding_tensor, - True, # duplicate_edges - False, # add_self_loop - False, # explicit_H=False - False, # use_bonds_weights - True, #offset_carbon - 7, # torch float64 - 0, # mask_nan_style_int - 0 # mask_nan_value + True, # duplicate_edges + False, # add_self_loop + False, # explicit_H=False + False, # use_bonds_weights + True, # offset_carbon + 7, # torch float64 + 0, # mask_nan_style_int + 0, # mask_nan_value ) return tensors + class test_positional_encoder(ut.TestCase): smiles = [ "C", @@ -83,8 +85,10 @@ def test_laplacian_eigvec_eigval(self): "pos_level": "node", }, } - (pos_encoding_names, pos_encoding_tensor) = \ - graphium_cpp.positional_feature_options_to_tensor(features) + ( + pos_encoding_names, + pos_encoding_tensor, + ) = graphium_cpp.positional_feature_options_to_tensor(features) tensors = get_pe_tensors(mol, pos_encoding_tensor) eigvals = tensors[4] @@ -104,7 +108,9 @@ def test_laplacian_eigvec_eigval(self): true_eigvals, true_eigvecs = true_eigvals[:true_num_pos], true_eigvecs[:, :true_num_pos] if not ("." in mol): - print(f"About to test eigvecs for smiles {mol}, num_pos {num_pos}, disconnected_comp {disconnected_comp}") + print( + f"About to test eigvecs for smiles {mol}, num_pos {num_pos}, disconnected_comp {disconnected_comp}" + ) np.testing.assert_array_almost_equal( np.abs(true_eigvecs), np.abs(eigvecs[:, :true_num_pos]), @@ -127,12 +133,13 @@ def test_rwse(self): pos_kwargs = {"pos_type": "rw_return_probs", "ksteps": ksteps, "pos_level": "node"} features = { "rw_return_probs": pos_kwargs, - } - (pos_encoding_names, pos_encoding_tensor) = \ - graphium_cpp.positional_feature_options_to_tensor(features) + } + (pos_encoding_names, pos_encoding_tensor) = graphium_cpp.positional_feature_options_to_tensor( + features + ) tensors = get_pe_tensors(mol, pos_encoding_tensor) rwse_embed = tensors[4] - + self.assertEqual(list(rwse_embed.shape), [num_nodes, ksteps], msg=err_msg) # TODO: work in progress @@ -163,8 +170,10 @@ def test_laplacian_eigvec_with_encoder(self): "pos_level": "node", }, } - (pos_encoding_names, pos_encoding_tensor) = \ - graphium_cpp.positional_feature_options_to_tensor(features) + ( + pos_encoding_names, + pos_encoding_tensor, + ) = graphium_cpp.positional_feature_options_to_tensor(features) tensors = get_pe_tensors(mol, pos_encoding_tensor) @@ -176,13 +185,15 @@ def test_laplacian_eigvec_with_encoder(self): num_nodes = tensors[2].size(0) data_dict = { - #"feat": tensors[2], - #"edge_feat": tensors[3], + # "feat": tensors[2], + # "edge_feat": tensors[3], "laplacian_eigval": tensors[4], "laplacian_eigvec": tensors[5], - } + } # Create the PyG graph object `Data` - data = Data(edge_index=tensors[0], edge_weight=tensors[1], num_nodes=num_nodes, **data_dict) + data = Data( + edge_index=tensors[0], edge_weight=tensors[1], num_nodes=num_nodes, **data_dict + ) encoder = laplace_pos_encoder.LapPENodeEncoder( input_keys=input_keys, From 77d27b5d302132211779a2d4b2a86e2259b69ff1 Mon Sep 17 00:00:00 2001 From: ndickson Date: Wed, 5 Jun 2024 12:22:18 -0400 Subject: [PATCH 040/175] Hopefully explicitly installing graphium_cpp fixes the automated testing for now --- .github/workflows/test.yml | 1 + 1 file changed, 1 insertion(+) diff --git a/.github/workflows/test.yml b/.github/workflows/test.yml index 64846bd91..e428aff72 100644 --- a/.github/workflows/test.yml +++ b/.github/workflows/test.yml @@ -48,6 +48,7 @@ jobs: - name: Install library run: python -m pip install --no-deps -e . # `-e` required for correct `coverage` run. + run: cd graphium/graphium_cpp; python pip install .; cd ../.. - name: Run tests run: pytest -m 'not ipu' From cb1df1998aed544637363e95c74fd525b5ec4b36 Mon Sep 17 00:00:00 2001 From: ndickson Date: Wed, 5 Jun 2024 14:08:15 -0400 Subject: [PATCH 041/175] Test fix --- .github/workflows/test.yml | 2 ++ 1 file changed, 2 insertions(+) diff --git a/.github/workflows/test.yml b/.github/workflows/test.yml index e428aff72..3292be07e 100644 --- a/.github/workflows/test.yml +++ b/.github/workflows/test.yml @@ -48,6 +48,8 @@ jobs: - name: Install library run: python -m pip install --no-deps -e . # `-e` required for correct `coverage` run. + + - name: Install C++ library run: cd graphium/graphium_cpp; python pip install .; cd ../.. - name: Run tests From f3f6a0d3efdf397fd79347abf5bc4d0cc66dd546 Mon Sep 17 00:00:00 2001 From: ndickson Date: Wed, 5 Jun 2024 16:52:56 -0400 Subject: [PATCH 042/175] Another test fix --- .github/workflows/test.yml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/.github/workflows/test.yml b/.github/workflows/test.yml index 3292be07e..d9f2e2fe3 100644 --- a/.github/workflows/test.yml +++ b/.github/workflows/test.yml @@ -50,7 +50,7 @@ jobs: run: python -m pip install --no-deps -e . # `-e` required for correct `coverage` run. - name: Install C++ library - run: cd graphium/graphium_cpp; python pip install .; cd ../.. + run: cd graphium/graphium_cpp; python -m pip install .; cd ../.. - name: Run tests run: pytest -m 'not ipu' From c5c0085310a5013f1b1bdf924470b2581760aa96 Mon Sep 17 00:00:00 2001 From: ndickson Date: Wed, 5 Jun 2024 17:08:26 -0400 Subject: [PATCH 043/175] Another test fix --- .github/workflows/test.yml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/.github/workflows/test.yml b/.github/workflows/test.yml index d9f2e2fe3..5d9eb2ce1 100644 --- a/.github/workflows/test.yml +++ b/.github/workflows/test.yml @@ -50,7 +50,7 @@ jobs: run: python -m pip install --no-deps -e . # `-e` required for correct `coverage` run. - name: Install C++ library - run: cd graphium/graphium_cpp; python -m pip install .; cd ../.. + run: cd graphium/graphium_cpp && git clone https://github.com/pybind/pybind11.git && export PYTHONPATH=$PYTHONPATH:./pybind11 && python -m pip install . && cd ../.. - name: Run tests run: pytest -m 'not ipu' From 6dd827fd49f9d17247a80b3e895afee29e699caa Mon Sep 17 00:00:00 2001 From: ndickson Date: Wed, 5 Jun 2024 18:28:38 -0400 Subject: [PATCH 044/175] Make sure RDKit can find Boost headers --- env.yml | 1 + graphium/graphium_cpp/setup.py | 1 + 2 files changed, 2 insertions(+) diff --git a/env.yml b/env.yml index e9999d2d6..25a6af0cf 100644 --- a/env.yml +++ b/env.yml @@ -43,6 +43,7 @@ dependencies: # chemistry - rdkit - datamol >=0.10 + - boost # needed by rdkit # Optional deps - sympy diff --git a/graphium/graphium_cpp/setup.py b/graphium/graphium_cpp/setup.py index 2ed13b000..c1fb1e3fb 100755 --- a/graphium/graphium_cpp/setup.py +++ b/graphium/graphium_cpp/setup.py @@ -40,6 +40,7 @@ os.path.join(torch_dir, "include"), os.path.join(torch_dir, "include/torch/csrc/api/include"), os.path.join(rdkit_prefix, "include/rdkit"), + os.path.join(rdkit_prefix, "include/boost"), numpy.get_include(), ], libraries=[ From 59c84a2a41263d8385b409d6d96e4e1912736b05 Mon Sep 17 00:00:00 2001 From: ndickson Date: Tue, 11 Jun 2024 20:16:54 -0400 Subject: [PATCH 045/175] Reimplemented test_pos_transfer_funcs.py to test all supported conversions --- tests/test_pos_transfer_funcs.py | 166 ++++++++++++++++++++++++++----- 1 file changed, 140 insertions(+), 26 deletions(-) diff --git a/tests/test_pos_transfer_funcs.py b/tests/test_pos_transfer_funcs.py index 5062cbe46..45f43fe5b 100644 --- a/tests/test_pos_transfer_funcs.py +++ b/tests/test_pos_transfer_funcs.py @@ -1,12 +1,12 @@ """ -------------------------------------------------------------------------------- -Copyright (c) 2023 Valence Labs, Recursion Pharmaceuticals and Graphcore Limited. +Copyright (c) 2023 Valence Labs, Recursion Pharmaceuticals, Graphcore Limited, and NVIDIA Corporation & Affiliates. Use of this software is subject to the terms and conditions outlined in the LICENSE file. Unauthorized modification, distribution, or use is prohibited. Provided 'as is' without warranties of any kind. -Valence Labs, Recursion Pharmaceuticals and Graphcore Limited are not liable for any damages arising from its use. +Valence Labs, Recursion Pharmaceuticals, Graphcore Limited, and NVIDIA Corporation & Affiliates are not liable for any damages arising from its use. Refer to the LICENSE file for the full terms and conditions. -------------------------------------------------------------------------------- """ @@ -17,36 +17,150 @@ """ import numpy as np -import networkx as nx +import torch import unittest as ut +import math -from graphium.features.spectral import compute_laplacian_pe -from graphium.features.transfer_pos_level import ( - node_to_edge, - node_to_nodepair, - edge_to_nodepair, - nodepair_to_node, - nodepair_to_edge, - graph_to_node, -) +import graphium +import graphium_cpp +def get_tensors(smiles, pos_encoding_tensor): + tensors, _, _ = graphium_cpp.featurize_smiles( + smiles, + torch.tensor(data=[], dtype=torch.int64), # atom_property_list_onehot + torch.tensor(data=[], dtype=torch.int64), # atom_property_list_float + False, # has_conformer + torch.tensor(data=[], dtype=torch.int64), # edge_property_list + pos_encoding_tensor, + True, # duplicate_edges + False, # add_self_loop + False, # explicit_H=False + False, # use_bonds_weights + True, # offset_carbon + 7, # torch float64 + 0, # mask_nan_style_int + 0, # mask_nan_value + ) + return tensors + class test_pos_transfer_funcs(ut.TestCase): - # 4-barbell - G = nx.barbell_graph(4, 0) - adj = nx.to_numpy_array(G) - num_nodes, num_feat = 8, 5 - node_pe = np.random.rand(num_nodes, num_feat) - - def test_different_pathways_from_node_to_edge(self): - edge_pe1, _ = node_to_edge(self.node_pe, self.adj, {}) - nodepair_pe1 = node_to_nodepair(self.node_pe, self.num_nodes) - edge_pe2, _ = nodepair_to_edge(nodepair_pe1, self.adj, {}) - nodepair_pe2, _ = edge_to_nodepair(edge_pe1, self.adj, self.num_nodes, {}) - edge_pe3, _ = nodepair_to_edge(nodepair_pe2, self.adj, {}) - np.testing.assert_array_almost_equal(edge_pe1, edge_pe2) - np.testing.assert_array_almost_equal(edge_pe1, edge_pe3) + def test_different_transfers(self): + smiles = "CCCC" + num_nodes = 4 + + ksteps = [2, 4] + features = { + "a": { + "pos_level": "node", + "pos_type": "rw_return_probs", + "normalization": "none", + "ksteps": ksteps, + }, + "b": { + "pos_level": "edge", + "pos_type": "rw_return_probs", + "normalization": "none", + "ksteps": ksteps, + }, + "c": { + "pos_level": "nodepair", + "pos_type": "rw_return_probs", + "normalization": "none", + "ksteps": ksteps, + }, + "e": {"pos_level": "node", "pos_type": "graphormer", "normalization": "none"}, + "f": {"pos_level": "edge", "pos_type": "graphormer", "normalization": "none"}, + "d": {"pos_level": "nodepair", "pos_type": "graphormer", "normalization": "none"}, + } + + (pos_encoding_names, pos_encoding_tensor) = graphium_cpp.positional_feature_options_to_tensor(features) + + tensors = get_tensors(smiles, pos_encoding_tensor) + node_probs = tensors[4] + edge_probs = tensors[5] + nodepair_probs = tensors[6] + node_dists = tensors[7] + edge_dists = tensors[8] + nodepair_dists = tensors[9] + + print(f"node_probs =\n{node_probs}\n") + print(f"edge_probs =\n{edge_probs}\n") + print(f"nodepair_probs =\n{nodepair_probs}\n") + print(f"node_dists =\n{node_dists}\n") + print(f"edge_dists =\n{edge_dists}\n") + print(f"nodepair_dists =\n{nodepair_dists}\n") + + expected_node_probs = [ + [0.5, 0.375], + [0.75, 0.6875], + [0.75, 0.6875], + [0.5, 0.375], + ] + # sum for each node value and absolute difference for each node value, for each half-edge + expected_edge_probs = [ + [1.25, 1.0625, 0.25, 0.3125], + [1.25, 1.0625, 0.25, 0.3125], + [1.5, 1.375, 0.0, 0.0], + [1.5, 1.375, 0.0, 0.0], + [1.25, 1.0625, 0.25, 0.3125], + [1.25, 1.0625, 0.25, 0.3125], + ] + # sum for each node value and absolute difference for each node value, for each node pair + expected_nodepair_probs = [ + [ + [1.0000, 0.7500, 0.0000, 0.0000], + [1.2500, 1.0625, 0.2500, 0.3125], + [1.2500, 1.0625, 0.2500, 0.3125], + [1.0000, 0.7500, 0.0000, 0.0000], + ], + [ + [1.2500, 1.0625, 0.2500, 0.3125], + [1.5000, 1.3750, 0.0000, 0.0000], + [1.5000, 1.3750, 0.0000, 0.0000], + [1.2500, 1.0625, 0.2500, 0.3125], + ], + + [ + [1.2500, 1.0625, 0.2500, 0.3125], + [1.5000, 1.3750, 0.0000, 0.0000], + [1.5000, 1.3750, 0.0000, 0.0000], + [1.2500, 1.0625, 0.2500, 0.3125], + ], + [ + [1.0000, 0.7500, 0.0000, 0.0000], + [1.2500, 1.0625, 0.2500, 0.3125], + [1.2500, 1.0625, 0.2500, 0.3125], + [1.0000, 0.7500, 0.0000, 0.0000], + ], + ] + self.assertEqual(node_probs.tolist(), expected_node_probs) + self.assertEqual(edge_probs.tolist(), expected_edge_probs) + self.assertEqual(nodepair_probs.tolist(), expected_nodepair_probs) + + expected_nodepair_dists = [ + [0.0, 1.0, 2.0, 3.0], + [1.0, 0.0, 1.0, 2.0], + [2.0, 1.0, 0.0, 1.0], + [3.0, 2.0, 1.0, 0.0], + ] + # Select half-edge node pairs + expected_edge_dists = [[1.0], [1.0], [1.0], [1.0], [1.0], [1.0]] + # Minimum of column, minimum of row, mean of column, mean of row, + # stdev of column, stdev of row, for each node + # stdev here uses n for normalization instead of n-1 + stdev_a = math.sqrt((1.5*1.5 + 0.5*0.5 + 0.5*0.5 + 1.5*1.5)/4) + stdev_b = math.sqrt((1.0*1.0 + 1.0*1.0)/4) + expected_node_dists = [ + [0.0, 0.0, 1.5, 1.5, stdev_a, stdev_a], + [0.0, 0.0, 1.0, 1.0, stdev_b, stdev_b], + [0.0, 0.0, 1.0, 1.0, stdev_b, stdev_b], + [0.0, 0.0, 1.5, 1.5, stdev_a, stdev_a], + ] + np.testing.assert_array_almost_equal(node_dists.tolist(), expected_node_dists) + self.assertEqual(edge_dists.tolist(), expected_edge_dists) + self.assertEqual(nodepair_dists.tolist(), expected_nodepair_dists) if __name__ == "__main__": ut.main() From 7bc8ade2c5991a8ea04ceff5efd81ed05cbe7c2e Mon Sep 17 00:00:00 2001 From: ndickson Date: Tue, 11 Jun 2024 20:26:11 -0400 Subject: [PATCH 046/175] Linting fixes --- tests/test_pos_transfer_funcs.py | 13 +++++++------ 1 file changed, 7 insertions(+), 6 deletions(-) diff --git a/tests/test_pos_transfer_funcs.py b/tests/test_pos_transfer_funcs.py index 45f43fe5b..188c6b0e3 100644 --- a/tests/test_pos_transfer_funcs.py +++ b/tests/test_pos_transfer_funcs.py @@ -11,7 +11,6 @@ -------------------------------------------------------------------------------- """ - """ Unit tests for the positional encodings in graphium/features/* """ @@ -44,11 +43,11 @@ def get_tensors(smiles, pos_encoding_tensor): ) return tensors + class test_pos_transfer_funcs(ut.TestCase): def test_different_transfers(self): smiles = "CCCC" - num_nodes = 4 ksteps = [2, 4] features = { @@ -75,7 +74,9 @@ def test_different_transfers(self): "d": {"pos_level": "nodepair", "pos_type": "graphormer", "normalization": "none"}, } - (pos_encoding_names, pos_encoding_tensor) = graphium_cpp.positional_feature_options_to_tensor(features) + (pos_encoding_names, pos_encoding_tensor) = graphium_cpp.positional_feature_options_to_tensor( + features + ) tensors = get_tensors(smiles, pos_encoding_tensor) node_probs = tensors[4] @@ -121,7 +122,6 @@ def test_different_transfers(self): [1.5000, 1.3750, 0.0000, 0.0000], [1.2500, 1.0625, 0.2500, 0.3125], ], - [ [1.2500, 1.0625, 0.2500, 0.3125], [1.5000, 1.3750, 0.0000, 0.0000], @@ -150,8 +150,8 @@ def test_different_transfers(self): # Minimum of column, minimum of row, mean of column, mean of row, # stdev of column, stdev of row, for each node # stdev here uses n for normalization instead of n-1 - stdev_a = math.sqrt((1.5*1.5 + 0.5*0.5 + 0.5*0.5 + 1.5*1.5)/4) - stdev_b = math.sqrt((1.0*1.0 + 1.0*1.0)/4) + stdev_a = math.sqrt((1.5 * 1.5 + 0.5 * 0.5 + 0.5 * 0.5 + 1.5 * 1.5) / 4) + stdev_b = math.sqrt((1.0 * 1.0 + 1.0 * 1.0) / 4) expected_node_dists = [ [0.0, 0.0, 1.5, 1.5, stdev_a, stdev_a], [0.0, 0.0, 1.0, 1.0, stdev_b, stdev_b], @@ -162,5 +162,6 @@ def test_different_transfers(self): self.assertEqual(edge_dists.tolist(), expected_edge_dists) self.assertEqual(nodepair_dists.tolist(), expected_nodepair_dists) + if __name__ == "__main__": ut.main() From 69032431bbe5c5eb0abfe09372acc06c7d73e892 Mon Sep 17 00:00:00 2001 From: ndickson Date: Wed, 12 Jun 2024 18:20:59 -0400 Subject: [PATCH 047/175] Fixed collections.abs.Callable to typing.Callable for type hint --- graphium/data/dataset.py | 3 +-- 1 file changed, 1 insertion(+), 2 deletions(-) diff --git a/graphium/data/dataset.py b/graphium/data/dataset.py index a96d6bc9b..498515fc3 100644 --- a/graphium/data/dataset.py +++ b/graphium/data/dataset.py @@ -16,8 +16,7 @@ from copy import deepcopy from functools import lru_cache from multiprocessing import Manager -from typing import Any, Dict, List, Optional, Tuple, Union -from collections.abc import Callable +from typing import Any, Callable, Dict, List, Optional, Tuple, Union import fsspec import numpy as np From f355eed406ac415708f8e4aaa1e0208552ca3f49 Mon Sep 17 00:00:00 2001 From: DomInvivo Date: Thu, 13 Jun 2024 14:41:23 -0400 Subject: [PATCH 048/175] Improved the task summaries and started to fix the training logging. --- graphium/trainer/predictor.py | 82 +++++++------------------ graphium/trainer/predictor_summaries.py | 59 +++++++----------- 2 files changed, 45 insertions(+), 96 deletions(-) diff --git a/graphium/trainer/predictor.py b/graphium/trainer/predictor.py index a56fb0eee..af2e15cbe 100644 --- a/graphium/trainer/predictor.py +++ b/graphium/trainer/predictor.py @@ -168,14 +168,7 @@ def __init__( monitor = self.optim_options.scheduler_kwargs["monitor"].split("/")[0] mode = self.optim_options.scheduler_kwargs["mode"] - self.task_epoch_summary = MultiTaskSummary( - task_loss_fun=self.loss_fun, - task_metrics=self.metrics, - task_metrics_on_training_set=self.metrics_on_training_set, - task_metrics_on_progress_bar=self.metrics_on_progress_bar, - monitor=monitor, - mode=mode, - ) + self.task_epoch_summary = {} # This helps avoid a bug when saving hparams to yaml with different dict or str formats self._set_hparams(recursive_config_reformating(self.hparams)) @@ -386,14 +379,10 @@ def _general_step(self, batch: Dict[str, Tensor], step_name: str, to_cpu: bool) if weights is not None: weights = weights.detach().to(device=device) - step_dict = {} - for task in self.tasks: - step_dict[ - self.task_epoch_summary.metric_log_name(task, self.loss_fun[task]._get_name(), step_name) - ] = loss.detach() + self.task_epoch_summary.update(targets_dict, preds) + step_dict = {} step_dict["loss"] = loss - # print("loss ", self.global_step, self.current_epoch, loss) step_dict["task_losses"] = task_losses return step_dict @@ -428,8 +417,6 @@ def update_metrics(self, # TODO!! # Lost of changes from the `predictor_summaries.py` file, with `Summary.get_metrics_logs` computing the metrics at the end of an epoch. - # See torchmetrics `MeanMetric` and `SumMetric`, and use them to compute STD as well - # DON'T FORGET TO RESET ALL METRICS!! @@ -505,6 +492,8 @@ def flag_step(self, batch: Dict[str, Tensor], step_name: str, to_cpu: bool) -> D return step_dict def on_train_batch_start(self, batch: Any, batch_idx: int) -> Optional[int]: + + # TODO: Initialize the `task_epoch_summary` for training self.train_batch_start_time = time.time() self.skip_log_train_metrics = (self.metrics_every_n_train_steps is None) or ( (batch_idx % self.metrics_every_n_train_steps) != 0 @@ -515,31 +504,30 @@ def on_train_batch_end(self, outputs, batch: Any, batch_idx: int) -> None: train_batch_time = time.time() - self.train_batch_start_time # To be used for throughput calculation # Get the metrics that are logged at every step (loss, grad_norm, batch_time, batch_tput) - aggregated_metrics_logs = {} - aggregated_metrics_logs["train/loss"] = outputs["loss"] - aggregated_metrics_logs["epoch_count"] = self.current_epoch + metrics_logs = {} # Incriment by the batch size self.samples_seen += self.global_bs - aggregated_metrics_logs["samples_seen"] = self.samples_seen + metrics_logs["samples_seen"] = self.samples_seen # report the training loss for each individual tasks - for task in self.tasks: - aggregated_metrics_logs[f"train/loss/{task}"] = outputs["task_losses"][task] - # get the mean loss value for individual tasks as they are a tensor of size --> gradient accumulation * replication * device_iter # filter zeros out for the individual losses - for key in aggregated_metrics_logs: - if isinstance(aggregated_metrics_logs[key], torch.Tensor): - if aggregated_metrics_logs[key].numel() > 1: - aggregated_metrics_logs[key] = aggregated_metrics_logs[key][ - aggregated_metrics_logs[key] != 0 - ].mean() + losses = {} + for task in self.tasks: + this_losses = outputs["task_losses"][task] + if isinstance(this_losses, torch.Tensor): + if this_losses.numel() > 1: + this_losses = this_losses[this_losses != 0].mean() + + losses[f"train/loss/{task}"] = this_losses + + metrics_logs.update(losses) # If logging is skipped for this step, then log the important metrics anyway and return if self.skip_log_train_metrics: if self.logger is not None: self.logger.log_metrics( - aggregated_metrics_logs, step=self.global_step + metrics_logs, step=self.global_step ) # This is a pytorch lightning function call return @@ -548,25 +536,15 @@ def on_train_batch_end(self, outputs, batch: Any, batch_idx: int) -> None: # Get the throughput of the batch num_graphs = self.get_num_graphs(batch["features"]) tput = num_graphs / train_batch_time - aggregated_metrics_logs["train/batch_time"] = train_batch_time - aggregated_metrics_logs["train/batch_tput"] = tput + metrics_logs["train/batch_time"] = train_batch_time + metrics_logs["train/batch_tput"] = tput - # Compute all the metrics for the training set - self.task_epoch_summary.update( - step_name="train", - targets=outputs["targets"], - preds=outputs["preds"], - loss=outputs["loss"], # This is the weighted loss for now, but change to task-specific loss - task_losses=outputs["task_losses"], - n_epochs=self.current_epoch, - ) - metrics_logs = self.task_epoch_summary.get_metrics_logs() # Dict[task, metric_logs] - aggregated_metrics_logs.update(metrics_logs) + metrics_logs.update(self.task_epoch_summary["train"].compute()) # Log the metrics if self.logger is not None: self.logger.log_metrics( - aggregated_metrics_logs, step=self.global_step + metrics_logs, step=self.global_step ) # This is a pytorch lightning function call def training_step(self, batch: Dict[str, Tensor], to_cpu: bool = True) -> Dict[str, Any]: @@ -601,21 +579,6 @@ def _general_epoch_end(self, outputs: Dict[str, Any], step_name: str, device: st for task in self.tasks: preds[task] = torch.cat([out["preds"][task].to(device) for out in outputs], dim=0) targets[task] = torch.cat([out["targets"][task].to(device) for out in outputs], dim=0) - if ("weights" in outputs[0].keys()) and (outputs[0]["weights"] is not None): - weights = torch.cat([out["weights"].to(device) for out in outputs], dim=0) - else: - weights = None - - # NOTE: Computing the loss over the entire split may cause - # overflow issues when using fp16 - loss, task_losses = self.compute_loss( - preds=dict_tensor_fp16_to_fp32(preds), - targets=dict_tensor_fp16_to_fp32(targets), - weights=weights, - target_nan_mask=self.target_nan_mask, - multitask_handling=self.multitask_handling, - loss_fun=self.loss_fun, - ) self.task_epoch_summary.update( step_name=step_name, @@ -688,6 +651,7 @@ def on_train_start(self): def get_progress_bar_dict(self) -> Dict[str, float]: prog_dict = {} + prog_dict["loss"] = self.task_epoch_summary.weighted_loss.detach().cpu() results_on_progress_bar = self.task_epoch_summary.get_results_on_progress_bar("val") for task in self.tasks: diff --git a/graphium/trainer/predictor_summaries.py b/graphium/trainer/predictor_summaries.py index 871bde725..083bcaf54 100644 --- a/graphium/trainer/predictor_summaries.py +++ b/graphium/trainer/predictor_summaries.py @@ -164,14 +164,12 @@ def _compute(self, metrics_to_use: Optional[Union[List[str], Dict[str, Any]]] = # Compute the metrics computed_metrics = {} for metric_key, metric_obj in metrics_to_use: - metric_name = self.metric_log_name( - self.task_name, metric_key, self.step_name - ) + metric_name = self.metric_log_name(metric_key) try: - computed_metrics[metric_name] = metric_obj.compute() + computed_metrics[f"{self.step_name}/{metric_name}"] = metric_obj.compute() except Exception as e: # If the metric computation fails, return NaN and log a warning only once - computed_metrics[metric_name] = torch.as_tensor(float("nan")) + computed_metrics[f"{self.step_name}/{metric_name}"] = torch.as_tensor(float("nan")) # Warn only if it's the first warning for that metric if metric_name not in self.logged_metrics_exceptions: self.logged_metrics_exceptions.append(metric_name) @@ -187,8 +185,8 @@ def compute(self) -> Dict[str, Tensor]: """ computed_metrics = self._compute(metrics_to_use=self.metrics_to_use) self._cached_metrics = computed_metrics - self._cached_metrics[f"{self.step_name}/loss"] = self.loss - self._cached_metrics[f"{self.step_name}/n_epochs"] = self.n_epochs + self._cached_metrics[self.metric_log_name("loss")] = self.loss + self._cached_metrics[self.metric_log_name("n_epochs")] = self.n_epochs return computed_metrics @@ -207,24 +205,21 @@ def get_results_on_progress_bar( else: results_prog = {} for metric_key in self.metrics_on_progress_bar: - metric_name = self.metric_log_name( - self.task_name, metric_key, self.step_name - ) + metric_name = self.metric_log_name(metric_key) results_prog[metric_name] = cached_metrics[metric_name] return results_prog - def metric_log_name(self, task_name, metric_name, step_name): - if task_name is None: - return f"{metric_name}/{step_name}" + def metric_log_name(self, metric_name): + if self.task_name is None: + return f"{metric_name}/{self.step_name}" else: - return f"{task_name}/{metric_name}/{step_name}" + return f"{self.task_name}/{metric_name}/{self.step_name}" class MultiTaskSummary(SummaryInterface): def __init__( self, - global_loss: Tensor, task_loss: Dict[str, Tensor], task_metrics: Dict[str, Dict[str, Callable]], step_name: str, @@ -237,7 +232,7 @@ class to store the summaries of the tasks Parameters: """ - self.global_loss = global_loss.detach().cpu() + self.global_loss = None self.task_metrics = task_metrics self.task_metrics_on_progress_bar = task_metrics_on_progress_bar self.task_metrics_on_training_set = task_metrics_on_training_set @@ -256,7 +251,7 @@ class to store the summaries of the tasks task_name = task, ) - def update(self, targets: Tensor, preds: Tensor) -> None: + def update(self, targets: Dict[str, Tensor], preds: Dict[str, Tensor]) -> None: r""" update the state for all predictors @@ -286,6 +281,14 @@ def get_results_on_progress_bar( for task in self.tasks: task_results_prog.update(self.task_summaries[task].get_results_on_progress_bar(step_name)) return task_results_prog + + def add_global_loss(self, loss: Tensor) -> None: + r""" + Add the global loss to be logged with the metrics + Parameters: + loss: the global loss + """ + self.global_loss = loss.detach().cpu() def compute(self) -> Dict[str, Tensor]: r""" @@ -296,28 +299,10 @@ def compute(self) -> Dict[str, Tensor]: computed_metrics = {} for task in self.tasks: computed_metrics.update(self.task_summaries[task].compute()) + if self.global_loss is not None: + computed_metrics[f"{self.step_name}/loss"] = self.global_loss return computed_metrics - def aggregate_metrics_logs( - self, - metrics_logs: Dict[str, Dict[str, Tensor]], - ) -> Dict[str, Tensor]: - r""" - concatenate the metrics logs - Parameters: - metrics_logs: the metrics logs - Returns: - the concatenated metrics logs - """ - aggregated_metrics_logs = {} - for task in list(self.tasks) + ["_global"]: - if task in metrics_logs.keys(): - aggregated_metrics_logs.update(metrics_logs[task]) - aggregated_metrics_logs[f"loss/{self.step_name}"] = self.global_loss.detach().cpu() - return aggregated_metrics_logs - - - class STDMetric(Metric): """ From 9f38afb81df6485a09086e5b056c67f8e2b49b3b Mon Sep 17 00:00:00 2001 From: ndickson Date: Mon, 17 Jun 2024 14:27:10 -0400 Subject: [PATCH 049/175] Removed file_opener and its test --- graphium/utils/read_file.py | 173 ------------------------------------ tests/test_utils.py | 27 ------ 2 files changed, 200 deletions(-) delete mode 100644 graphium/utils/read_file.py diff --git a/graphium/utils/read_file.py b/graphium/utils/read_file.py deleted file mode 100644 index 27d2fb216..000000000 --- a/graphium/utils/read_file.py +++ /dev/null @@ -1,173 +0,0 @@ -""" --------------------------------------------------------------------------------- -Copyright (c) 2023 Valence Labs, Recursion Pharmaceuticals. - -Use of this software is subject to the terms and conditions outlined in the LICENSE file. -Unauthorized modification, distribution, or use is prohibited. Provided 'as is' without -warranties of any kind. - -Valence Labs, Recursion Pharmaceuticals are not liable for any damages arising from its use. -Refer to the LICENSE file for the full terms and conditions. --------------------------------------------------------------------------------- -""" - - -""" Utiles for data parsing""" -import os -import warnings -import numpy as np -import pandas as pd -import datamol as dm -from functools import partial -from copy import copy -import fsspec - -from loguru import logger -from rdkit import Chem -from rdkit.Chem.Descriptors import ExactMolWt - -from graphium.utils.tensor import parse_valid_args, arg_in_func - - -def read_file(filepath, as_ext=None, **kwargs): - r""" - Allow to read different file format and parse them into a MolecularDataFrame. - Supported formats are: - * csv (.csv, .smile, .smiles, .tsv) - * txt (.txt) - * xls (.xls, .xlsx, .xlsm, .xls*) - * sdf (.sdf) - * pkl (.pkl) - - Arguments - ----------- - - filepath: str - The full path and name of the file to read. - It also supports the s3 url path. - as_ext: str, Optional - The file extension used to read the file. If None, the extension is deduced - from the extension of the file. Otherwise, no matter the file extension, - the file will be read according to the specified ``as_ext``. - (Default=None) - **kwargs: All the optional parameters required for the desired file reader. - - TODO: unit test to make sure it works well with all extensions - - Returns - --------- - df: pandas.DataFrame - The ``pandas.DataFrame`` containing the parsed data - - """ - - # Get the file extension - if as_ext is None: - file_ext = os.path.splitext(filepath)[-1].lower()[1:] - else: - file_ext = as_ext - if not isinstance(file_ext, str): - raise TypeError("`file_type` must be a `str`. Provided: {}".format(file_ext)) - - open_mode = "r" - - # Read the file according to the right extension - if file_ext in ["csv", "smile", "smiles", "smi", "tsv"]: - file_reader = pd.read_csv - elif file_ext == "txt": - file_reader = pd.read_table - elif file_ext[0:3] == "xls": - open_mode = "rb" - file_reader = partial(pd.read_excel, engine="openpyxl") - elif file_ext == "sdf": - file_reader = parse_sdf_to_dataframe - elif file_ext == "pkl": - open_mode = "rb" - file_reader = pd.read_pickle - else: - raise 'File extension "{}" not supported'.format(file_ext) - - kwargs = parse_valid_args(fn=file_reader, param_dict=kwargs) - - if file_ext[0:3] not in ["sdf", "xls"]: - with file_opener(filepath, open_mode) as file_in: - data = file_reader(file_in, **kwargs) - else: - data = file_reader(filepath, **kwargs) - return data - - -def parse_sdf_to_dataframe(sdf_path, as_cxsmiles=True, skiprows=None): - r""" - Allows to read an SDF file containing molecular informations, convert - it to a pandas DataFrame and convert the molecules to SMILES. It also - lists a warning of all the molecules that couldn't be read. - - Arguments - ----------- - - sdf_path: str - The full path and name of the sdf file to read - as_cxsmiles: bool, optional - Whether to use the CXSMILES notation, which preserves atomic coordinates, - stereocenters, and much more. - See `https://dl.chemaxon.com/marvin-archive/latest/help/formats/cxsmiles-doc.html` - (Default = True) - skiprows: int, list - The rows to skip from dataset. The enumerate index starts from 1 insted of 0. - (Default = None) - - """ - - # read the SDF file - # locally or from s3 - data = dm.read_sdf(sdf_path) - - # For each molecule in the SDF file, read all the properties and add it to a list of dict. - # Also count the number of molecules that cannot be read. - data_list = [] - count_none = 0 - if skiprows is not None: - if isinstance(skiprows, int): - skiprows = range(0, skiprows - 1) - skiprows = np.array(skiprows) - 1 - - for idx, mol in enumerate(data): - if (skiprows is not None) and (idx in skiprows): - continue - - if (mol is not None) and (ExactMolWt(mol) > 0): - mol_dict = mol.GetPropsAsDict() - data_list.append(mol_dict) - if as_cxsmiles: - smiles = Chem.rdmolfiles.MolToCXSmiles(mol, canonical=True) - else: - smiles = dm.to_smiles(mol, canonical=True) - data_list[-1]["SMILES"] = smiles - else: - count_none += 1 - logger.info(f"Could not read molecule # {idx}") - - # Display a message or warning after the SDF is done parsing - if count_none == 0: - logger.info("Successfully read the SDF file without error: {}".format(sdf_path)) - else: - warnings.warn( - ( - 'Error reading {} molecules from the "{}" file.\ - {} molecules read successfully.' - ).format(count_none, sdf_path, len(data_list)) - ) - return pd.DataFrame(data_list) - - -def file_opener(filename, mode="r"): - """File reader stream""" - filename = str(filename) - if "w" in mode: - filename = "simplecache::" + filename - if filename.endswith(".gz"): - instream = fsspec.open(filename, mode=mode, compression="gzip") - else: - instream = fsspec.open(filename, mode=mode) - return instream diff --git a/tests/test_utils.py b/tests/test_utils.py index b6a7b171c..d14584798 100644 --- a/tests/test_utils.py +++ b/tests/test_utils.py @@ -22,7 +22,6 @@ import unittest as ut import gzip -from graphium.utils.read_file import file_opener from graphium.utils.tensor import ( nan_mad, nan_mean, @@ -150,32 +149,6 @@ def test_nan_mad(self): np.testing.assert_almost_equal(torch_mad.numpy(), numpy_mad, decimal=4, err_msg=err_msg) -def test_file_opener(tmp_path): - # Create a temporary file - txt_file = tmp_path / "test.txt" - txt_file.write_text("Hello, World!") - - # Test opening file in read mode - with file_opener(txt_file, "r") as f: - assert f.read() == "Hello, World!" - - # Test opening file in write mode - with file_opener(txt_file, "w") as f: - f.write("New text") - - with file_opener(txt_file, "r") as f: - assert f.read() == "New text" - - # Create a temporary gzip file - gzip_file = tmp_path / "test.txt.gz" - with gzip.open(gzip_file, "wt") as f: - f.write("Hello, Gzip!") - - # Test opening gzip file in read mode - with file_opener(gzip_file, "r") as f: - assert f.read() == "Hello, Gzip!" - - class test_SafeRun(ut.TestCase): def test_safe_run(self): # Error is caught From 5ab9ca901b8a7bdf0f97326d503173dc2408f7f5 Mon Sep 17 00:00:00 2001 From: DomInvivo Date: Tue, 9 Jul 2024 14:10:11 -0400 Subject: [PATCH 050/175] Fixed the issue with boolean masking, introduced by `F._canonical_mask` in `TransformerEncoder` --- graphium/nn/base_layers.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/graphium/nn/base_layers.py b/graphium/nn/base_layers.py index dbc89f19f..24cb053cc 100644 --- a/graphium/nn/base_layers.py +++ b/graphium/nn/base_layers.py @@ -190,7 +190,7 @@ def forward( # key_padding_mask: [batch, 1, 1, nodes] if key_padding_mask is not None: masked_attn_weights = attn_weights.masked_fill( - key_padding_mask.unsqueeze(1).unsqueeze(2), + key_padding_mask.unsqueeze(1).unsqueeze(2).bool(), key_padding_mask_value, ) else: From 9c7504f52ab38e93a0783c7c9c65d3562cf595e7 Mon Sep 17 00:00:00 2001 From: DomInvivo Date: Tue, 9 Jul 2024 14:54:04 -0400 Subject: [PATCH 051/175] Fixed the float vs double issue in laplacian pos encoding --- tests/test_positional_encoders.py | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/tests/test_positional_encoders.py b/tests/test_positional_encoders.py index 2cb6aacff..66148487f 100644 --- a/tests/test_positional_encoders.py +++ b/tests/test_positional_encoders.py @@ -187,8 +187,8 @@ def test_laplacian_eigvec_with_encoder(self): data_dict = { # "feat": tensors[2], # "edge_feat": tensors[3], - "laplacian_eigval": tensors[4], - "laplacian_eigvec": tensors[5], + "laplacian_eigval": tensors[4].float(), + "laplacian_eigvec": tensors[5].float(), } # Create the PyG graph object `Data` data = Data( From f8358f350229c82353c7996f2bd4415a33b1c9c1 Mon Sep 17 00:00:00 2001 From: DomInvivo Date: Tue, 9 Jul 2024 15:08:38 -0400 Subject: [PATCH 052/175] Added comment --- graphium/nn/base_layers.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/graphium/nn/base_layers.py b/graphium/nn/base_layers.py index 24cb053cc..a88e6ad9b 100644 --- a/graphium/nn/base_layers.py +++ b/graphium/nn/base_layers.py @@ -190,7 +190,7 @@ def forward( # key_padding_mask: [batch, 1, 1, nodes] if key_padding_mask is not None: masked_attn_weights = attn_weights.masked_fill( - key_padding_mask.unsqueeze(1).unsqueeze(2).bool(), + key_padding_mask.unsqueeze(1).unsqueeze(2).bool(), # The mask is cast to float somewhere in TransformerEncoder key_padding_mask_value, ) else: From 692decca13cf2d427f1ffd7cb2d0926b3ffbb413 Mon Sep 17 00:00:00 2001 From: DomInvivo Date: Tue, 9 Jul 2024 15:27:06 -0400 Subject: [PATCH 053/175] Fixed the ipu tests by making sure that `IPUStrategy` is not imported if not using IPUs --- tests/test_ipu_dataloader.py | 3 ++- tests/test_ipu_to_dense_batch.py | 13 ------------- 2 files changed, 2 insertions(+), 14 deletions(-) diff --git a/tests/test_ipu_dataloader.py b/tests/test_ipu_dataloader.py index 436d609d4..39f754e20 100644 --- a/tests/test_ipu_dataloader.py +++ b/tests/test_ipu_dataloader.py @@ -26,7 +26,6 @@ import torch from torch.utils.data.dataloader import default_collate -from lightning_graphcore import IPUStrategy def random_packing(num_nodes, batch_size): @@ -119,6 +118,8 @@ def test_poptorch_simple_deviceiterations_gradient_accumulation(self): Test a simple version of the device-iterations and gradient accumulation to make sure that the dataloader and models handle them correcly. """ + from lightning_graphcore import IPUStrategy + with patch("poptorch.ipuHardwareIsAvailable", return_value=True): with patch("lightning_graphcore.accelerator._IPU_AVAILABLE", new=True): diff --git a/tests/test_ipu_to_dense_batch.py b/tests/test_ipu_to_dense_batch.py index 55c6e3372..f2e29537b 100644 --- a/tests/test_ipu_to_dense_batch.py +++ b/tests/test_ipu_to_dense_batch.py @@ -16,21 +16,8 @@ import torch from torch_geometric.data import Data, Batch from graphium.ipu.to_dense_batch import to_dense_batch -from warnings import warn - - -# General imports -import yaml -import unittest as ut -import numpy as np -from copy import deepcopy -from warnings import warn -from lightning import Trainer, LightningModule -from lightning_graphcore import IPUStrategy -from functools import partial import torch -from torch.utils.data.dataloader import default_collate # Current library imports from graphium.config._loader import load_datamodule, load_metrics, load_architecture, load_accelerator From 8891e660757fd8307809a927f1da22d7c97ccb00 Mon Sep 17 00:00:00 2001 From: DomInvivo <47570400+DomInvivo@users.noreply.github.com> Date: Tue, 9 Jul 2024 15:29:36 -0400 Subject: [PATCH 054/175] Update test.yml to only test python 3.10 --- .github/workflows/test.yml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/.github/workflows/test.yml b/.github/workflows/test.yml index 5d9eb2ce1..715119776 100644 --- a/.github/workflows/test.yml +++ b/.github/workflows/test.yml @@ -16,7 +16,7 @@ jobs: strategy: fail-fast: false matrix: - python-version: ["3.8", "3.9", "3.10"] + python-version: ["3.10"] pytorch-version: ["2.0"] runs-on: "ubuntu-latest" From c2d3c87d5781faad9f282598e3155d77517b9161 Mon Sep 17 00:00:00 2001 From: DomInvivo Date: Tue, 9 Jul 2024 15:39:30 -0400 Subject: [PATCH 055/175] Removed positional encodings from the docs --- docs/api/graphium.features.md | 5 ----- 1 file changed, 5 deletions(-) diff --git a/docs/api/graphium.features.md b/docs/api/graphium.features.md index 758d14135..d135d8fb5 100644 --- a/docs/api/graphium.features.md +++ b/docs/api/graphium.features.md @@ -16,11 +16,6 @@ Feature extraction and manipulation ::: graphium.features.featurizer -## Positional Encoding ------------- -::: graphium.features.positional_encoding - - ## Properties ------------ ::: graphium.features.properties From 0a1696f22d81744639399684343733d4e93da575 Mon Sep 17 00:00:00 2001 From: DomInvivo Date: Tue, 9 Jul 2024 15:41:02 -0400 Subject: [PATCH 056/175] Upgraded python versions in the tests --- .github/workflows/test.yml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/.github/workflows/test.yml b/.github/workflows/test.yml index 715119776..774d75704 100644 --- a/.github/workflows/test.yml +++ b/.github/workflows/test.yml @@ -16,7 +16,7 @@ jobs: strategy: fail-fast: false matrix: - python-version: ["3.10"] + python-version: ["3.10", "3.11", "3.12"] pytorch-version: ["2.0"] runs-on: "ubuntu-latest" From 50265dfcf6f155748e7b1bc427374ddf3f1123e3 Mon Sep 17 00:00:00 2001 From: DomInvivo Date: Tue, 9 Jul 2024 15:49:56 -0400 Subject: [PATCH 057/175] Removed reference to old files now in C++ --- docs/api/graphium.features.md | 24 ------------------------ graphium/features/README.md | 7 ++----- graphium/graphium_cpp/one_hot.cpp | 8 ++++---- 3 files changed, 6 insertions(+), 33 deletions(-) diff --git a/docs/api/graphium.features.md b/docs/api/graphium.features.md index d135d8fb5..fa9080700 100644 --- a/docs/api/graphium.features.md +++ b/docs/api/graphium.features.md @@ -5,32 +5,8 @@ Feature extraction and manipulation === "Contents" * [Featurizer](#featurizer) - * [Positional Encoding](#positional-encoding) - * [Properties](#properties) - * [Spectral PE](#spectral-pe) - * [Random Walk PE](#random-walk-pe) - * [NMP](#nmp) ## Featurizer ------------ ::: graphium.features.featurizer - -## Properties ------------- -::: graphium.features.properties - - -## Spectral PE ------------- -::: graphium.features.spectral - - -## Random Walk PE ------------- -::: graphium.features.rw - - -## NMP ------------- -::: graphium.features.nmp diff --git a/graphium/features/README.md b/graphium/features/README.md index 4188948fe..14b123106 100644 --- a/graphium/features/README.md +++ b/graphium/features/README.md @@ -7,8 +7,5 @@ ## What is in this folder? - ✅ `featurizer.py`: featurization code for the molecules, adding node, edge and graph features to the mol object -- `nmp.py`: check if a string can be converted to float, helper function for featurization -- `positional_encoding.py`: code for computing all raw positional and structural encoding of the graph, see `graph_positional_encoder` function -- `properties.py`: code for computing properties of the molecule -- `rw.py`: code for computing random walk positional encoding -- `spectral.py`: code for computing the spectral positional encoding such as the Laplacian eigenvalues and eigenvectors \ No newline at end of file + +Positional encodings, and atom/bond features (`nmp.py`) have been moved to the `/graphium_cpp` folder. \ No newline at end of file diff --git a/graphium/graphium_cpp/one_hot.cpp b/graphium/graphium_cpp/one_hot.cpp index b79d93f23..47485569e 100644 --- a/graphium/graphium_cpp/one_hot.cpp +++ b/graphium/graphium_cpp/one_hot.cpp @@ -28,7 +28,7 @@ class OneHotLookup { } }; -// This list of elements matches ATOM_LIST in graphium/features/nmp.py +// This list of elements matches ATOM_LIST in older file graphium/features/nmp.py constexpr size_t atomicNumList[] = { 6 -1, // C 7 -1, // N @@ -81,7 +81,7 @@ constexpr size_t degreeCount = 5; constexpr size_t valenceCount = 7; // Reverse alphabetical order, excluding "OTHER", -// matching HYBRIDIZATION_LIST in graphium/features/nmp.py +// matching HYBRIDIZATION_LIST in older file graphium/features/nmp.py constexpr size_t hybridizationList[] = { RDKit::Atom::HybridizationType::UNSPECIFIED, RDKit::Atom::HybridizationType::SP3D2, @@ -142,7 +142,7 @@ constexpr ElementType atomicNumToType[] = { }; constexpr size_t typeCount = ElementType::NUM_ELEMENT_TYPES; -// This matches BOND_TYPES in graphium/features/nmp.py +// This matches BOND_TYPES in older file graphium/features/nmp.py constexpr size_t bondTypeList[] = { RDKit::Bond::BondType::SINGLE, RDKit::Bond::BondType::DOUBLE, @@ -152,7 +152,7 @@ constexpr size_t bondTypeList[] = { constexpr size_t bondTypeCount = std::extent::value; constexpr OneHotLookup<22, bondTypeCount> bondTypeLookup(bondTypeList); -// This matches BOND_STEREO in graphium/features/nmp.py +// This matches BOND_STEREO in older file graphium/features/nmp.py constexpr size_t bondStereoList[] = { RDKit::Bond::BondStereo::STEREONONE, RDKit::Bond::BondStereo::STEREOANY, From 58fc2aa5ef85fba1f92ef48129391ed72469c4e6 Mon Sep 17 00:00:00 2001 From: DomInvivo Date: Tue, 9 Jul 2024 15:50:32 -0400 Subject: [PATCH 058/175] Downgraded python version --- .github/workflows/test.yml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/.github/workflows/test.yml b/.github/workflows/test.yml index 774d75704..5688c7c94 100644 --- a/.github/workflows/test.yml +++ b/.github/workflows/test.yml @@ -16,7 +16,7 @@ jobs: strategy: fail-fast: false matrix: - python-version: ["3.10", "3.11", "3.12"] + python-version: ["3.09", "3.10", "3.11"] pytorch-version: ["2.0"] runs-on: "ubuntu-latest" From 5852467677f05d90527f95ef82679ec5bf50d9c2 Mon Sep 17 00:00:00 2001 From: DomInvivo Date: Tue, 9 Jul 2024 15:52:11 -0400 Subject: [PATCH 059/175] Fixed other docs broken references --- docs/api/graphium.utils.md | 4 ---- 1 file changed, 4 deletions(-) diff --git a/docs/api/graphium.utils.md b/docs/api/graphium.utils.md index 5804a060e..632c6ea06 100644 --- a/docs/api/graphium.utils.md +++ b/docs/api/graphium.utils.md @@ -46,10 +46,6 @@ module for utility functions ::: graphium.utils.mup -## Read File ----------------- -::: graphium.utils.read_file - ## Safe Run ---------------- ::: graphium.utils.safe_run From 4372acefdaa5f8f73d49edd3d0caac9379be4589 Mon Sep 17 00:00:00 2001 From: DomInvivo Date: Wed, 10 Jul 2024 18:00:23 -0400 Subject: [PATCH 060/175] Fixed test_metrics. Moved lots of `spaces.py` imports to inner functions to avoid import loops --- graphium/config/_loader.py | 5 +- .../finetuning/finetuning_architecture.py | 3 +- graphium/trainer/metrics.py | 2 +- graphium/trainer/predictor.py | 7 +- graphium/trainer/predictor_options.py | 7 +- graphium/utils/spaces.py | 1 - tests/test_metrics.py | 130 +++++++++++------- 7 files changed, 97 insertions(+), 58 deletions(-) diff --git a/graphium/config/_loader.py b/graphium/config/_loader.py index 1e542592d..dc1da4998 100644 --- a/graphium/config/_loader.py +++ b/graphium/config/_loader.py @@ -43,7 +43,6 @@ # Graphium from graphium.utils.mup import set_base_shapes -from graphium.utils.spaces import DATAMODULE_DICT, GRAPHIUM_PRETRAINED_MODELS_DICT from graphium.utils import fs @@ -111,6 +110,8 @@ def load_datamodule( datamodule: The datamodule used to process and load the data """ + from graphium.utils.spaces import DATAMODULE_DICT # Avoid circular imports with `spaces.py` + cfg_data = config["datamodule"]["args"] # Instanciate the datamodule @@ -631,6 +632,8 @@ def get_checkpoint_path(config: Union[omegaconf.DictConfig, Dict[str, Any]]) -> Otherwise, assume it refers to a file in the checkpointing dir. """ + from graphium.utils.spaces import GRAPHIUM_PRETRAINED_MODELS_DICT # Avoid circular imports with `spaces.py` + cfg_trainer = config["trainer"] path = config.get("ckpt_name_for_testing", "last.ckpt") diff --git a/graphium/finetuning/finetuning_architecture.py b/graphium/finetuning/finetuning_architecture.py index 864016141..4b0de1607 100644 --- a/graphium/finetuning/finetuning_architecture.py +++ b/graphium/finetuning/finetuning_architecture.py @@ -21,7 +21,6 @@ from graphium.nn.utils import MupMixin from graphium.trainer.predictor import PredictorModule -from graphium.utils.spaces import FINETUNING_HEADS_DICT class FullGraphFinetuningNetwork(nn.Module, MupMixin): @@ -309,6 +308,8 @@ def __init__(self, finetuning_head_kwargs: Dict[str, Any]): """ + from graphium.utils.spaces import FINETUNING_HEADS_DICT # Avoiding circular imports with `spaces.py` + super().__init__() self.task = finetuning_head_kwargs.pop("task", None) self.previous_module = finetuning_head_kwargs.pop("previous_module", "task_heads") diff --git a/graphium/trainer/metrics.py b/graphium/trainer/metrics.py index dedcca999..77d1f024a 100644 --- a/graphium/trainer/metrics.py +++ b/graphium/trainer/metrics.py @@ -414,7 +414,7 @@ def __call__(self, preds: Tensor, target: Tensor) -> Tensor: r""" Compute the metric with the method `self.compute` """ - return self.compute(preds, target) + return self.update_compute(preds, target) def __repr__(self): r""" diff --git a/graphium/trainer/predictor.py b/graphium/trainer/predictor.py index af2e15cbe..f9886bac5 100644 --- a/graphium/trainer/predictor.py +++ b/graphium/trainer/predictor.py @@ -36,7 +36,6 @@ from graphium.trainer.predictor_summaries import MultiTaskSummary from graphium.utils import fs from graphium.utils.moving_average_tracker import MovingAverageTracker -from graphium.utils.spaces import GRAPHIUM_PRETRAINED_MODELS_DICT from graphium.utils.tensor import dict_tensor_fp16_to_fp32 @@ -673,7 +672,9 @@ def __repr__(self) -> str: @staticmethod def list_pretrained_models(): """List available pretrained models.""" - return GRAPHIUM_PRETRAINED_MODELS_DICT + from graphium.utils.spaces import GRAPHIUM_PRETRAINED_MODELS_DICT + + return GRAPHIUM_PRETRAINED_MODELS_DICT # Avoiding circular imports with `space.py` @staticmethod def load_pretrained_model(name_or_path: str, device: str = None): @@ -684,6 +685,8 @@ def load_pretrained_model(name_or_path: str, device: str = None): from `graphium.trainer.PredictorModule.list_pretrained_models()`. """ + from graphium.utils.spaces import GRAPHIUM_PRETRAINED_MODELS_DICT # Avoiding circular imports with `space.py` + name = GRAPHIUM_PRETRAINED_MODELS_DICT.get(name_or_path) if name is not None: diff --git a/graphium/trainer/predictor_options.py b/graphium/trainer/predictor_options.py index 04a62e84b..358329e70 100644 --- a/graphium/trainer/predictor_options.py +++ b/graphium/trainer/predictor_options.py @@ -30,10 +30,6 @@ from torch import nn -from graphium.utils.spaces import LOSS_DICT -from graphium.utils.spaces import SCHEDULER_DICT - - @dataclass class ModelOptions: r""" @@ -117,6 +113,7 @@ def set_kwargs(self): scheduler_class = torch_scheduler_kwargs.pop("module_type") if self.scheduler_class is None: if isinstance(scheduler_class, str): + from graphium.utils.spaces import SCHEDULER_DICT self.scheduler_class = SCHEDULER_DICT[scheduler_class] elif isclass(scheduler_class): self.scheduler_class = scheduler_class @@ -196,6 +193,8 @@ def parse_loss_fun(loss_fun: Union[str, Dict, Callable]) -> Callable: Function or callable to compute the loss, takes `preds` and `targets` as inputs. """ + from graphium.utils.spaces import LOSS_DICT # Avoiding circular imports with `spaces.py` + if isinstance(loss_fun, str): if loss_fun not in LOSS_DICT.keys(): raise ValueError( diff --git a/graphium/utils/spaces.py b/graphium/utils/spaces.py index 97b376ffa..44bf7e3df 100644 --- a/graphium/utils/spaces.py +++ b/graphium/utils/spaces.py @@ -23,7 +23,6 @@ import graphium.utils.custom_lr as CustomLR import graphium.data.datamodule as Datamodules import graphium.ipu.ipu_losses as IPULosses -import graphium.ipu.ipu_metrics as Metrics import graphium.nn.pyg_layers as PygLayers import graphium.nn.residual_connections as Residuals import graphium.nn.encoders as Encoders diff --git a/tests/test_metrics.py b/tests/test_metrics.py index dc5bc01b2..6c089c2bb 100644 --- a/tests/test_metrics.py +++ b/tests/test_metrics.py @@ -27,7 +27,8 @@ Thresholder, ) -from torchmetrics.functional import mean_squared_error +from torchmetrics.functional import mean_squared_error, pearson_corrcoef +from torchmetrics import MeanSquaredError class test_Metrics(ut.TestCase): @@ -142,12 +143,10 @@ def test_target_nan_mask(self): def test_pickling(self): pickle_file = os.path.join(tempfile.gettempdir(), "test_metric_pickled.pkl") - metrics = ["mae", "mse", mean_squared_error] + metrics = ["mae", "mse", MeanSquaredError] target_nan_masks = [None, 2, "ignore"] multitask_handlings = [None, "flatten", "mean-per-label"] - squeeze_targets = [True, False] target_to_ints = [True, False] - other_kwargs = [{}, {"squared": False}] thresholds = [ None, {"threshold": 0.2, "operator": "greater"}, @@ -159,50 +158,44 @@ def test_pickling(self): for metric in metrics: for target_nan_mask in target_nan_masks: - for kwargs in other_kwargs: - for threshold_kwargs in thresholds: - for multitask_handling in multitask_handlings: - for squeeze_target in squeeze_targets: - for target_to_int in target_to_ints: - err_msg = f"{metric} - {target_nan_mask} - {kwargs} - {threshold_kwargs}" - - if (multitask_handling is None) and (target_nan_mask == "ignore"): - # Raise with incompatible options - with self.assertRaises(ValueError): - MetricWrapper( - metric=metric, - threshold_kwargs=threshold_kwargs, - target_nan_mask=target_nan_mask, - multitask_handling=multitask_handling, - squeeze_target=squeeze_target, - target_to_int=target_to_int, - **kwargs, - ) - - else: - metric_wrapper = MetricWrapper( - metric=metric, - threshold_kwargs=threshold_kwargs, - target_nan_mask=target_nan_mask, - multitask_handling=multitask_handling, - squeeze_target=squeeze_target, - target_to_int=target_to_int, - **kwargs, - ) - - # Check that the metric can be saved and re-loaded without error - torch.save(metric_wrapper, pickle_file) - metric_wrapper2 = torch.load(pickle_file) - self.assertTrue(metric_wrapper == metric_wrapper2, msg=err_msg) - - # Check that the metric only contains primitive types - state = metric_wrapper.__getstate__() - if state["threshold_kwargs"] is not None: - self.assertIsInstance( - state["threshold_kwargs"], dict, msg=err_msg - ) - if isinstance(metric, str): - self.assertIsInstance(state["metric"], str, msg=err_msg) + for threshold_kwargs in thresholds: + for multitask_handling in multitask_handlings: + for target_to_int in target_to_ints: + err_msg = f"{metric} - {target_nan_mask} - {threshold_kwargs}" + + if (multitask_handling is None) and (target_nan_mask == "ignore"): + # Raise with incompatible options + with self.assertRaises(ValueError): + MetricWrapper( + metric=metric, + threshold_kwargs=threshold_kwargs, + target_nan_mask=target_nan_mask, + multitask_handling=multitask_handling, + target_to_int=target_to_int, + ) + + else: + metric_wrapper = MetricWrapper( + metric=metric, + threshold_kwargs=threshold_kwargs, + target_nan_mask=target_nan_mask, + multitask_handling=multitask_handling, + target_to_int=target_to_int, + ) + + # Check that the metric can be saved and re-loaded without error + torch.save(metric_wrapper, pickle_file) + metric_wrapper2 = torch.load(pickle_file) + self.assertTrue(metric_wrapper == metric_wrapper2, msg=err_msg) + + # Check that the metric only contains primitive types + state = metric_wrapper.__getstate__() + if state["threshold_kwargs"] is not None: + self.assertIsInstance( + state["threshold_kwargs"], dict, msg=err_msg + ) + if isinstance(metric, str): + self.assertIsInstance(state["metric"], str, msg=err_msg) def test_classifigression_target_squeezing(self): preds = torch.Tensor([[0.1, 0.1, 0.3, 0.5, 0.0, 0.1, 0.0, 0.7, 0.2, 0.0]]) @@ -227,6 +220,47 @@ def test_classifigression_target_squeezing(self): assert score == expected_score + def test_update_compute(self): + torch.manual_seed(42) + preds = torch.rand(100, dtype=torch.float32) + target = torch.rand(100, dtype=torch.float32) + + th = 0.7 + + # Test the update and compute with accuracy + preds_greater = preds > th + target_greater = target > th + accuracy = (preds_greater == target_greater).float().mean() + + for batch_size in [1, 5, 25, 100]: + metric = MetricWrapper( + metric="accuracy", threshold_kwargs={"threshold": th, "operator": "greater"}, task="binary", + ) + metric.reset() + for ii in range(0, 100, batch_size): + preds_batch = preds[ii : ii + batch_size] + target_batch = target_greater[ii : ii + batch_size] + metric.update(preds_batch, target_batch) + + self.assertAlmostEqual(metric.compute(), accuracy, places=5) + + # Test the update and compute with pearsonr + pearson = pearson_corrcoef(preds, target) + + for batch_size in [1, 5, 25, 100]: + metric = MetricWrapper( + metric="pearsonr", + ) + metric.reset() + for ii in range(0, 100, batch_size): + preds_batch = preds[ii : ii + batch_size] + target_batch = target[ii : ii + batch_size] + metric.update(preds_batch, target_batch) + + self.assertAlmostEqual(metric.compute().numpy(), pearson.numpy(), places=5) + + + if __name__ == "__main__": ut.main() From 7b89998c28b8cdd1a8aae9d2245848ad6009c73a Mon Sep 17 00:00:00 2001 From: DomInvivo Date: Thu, 11 Jul 2024 09:48:03 -0400 Subject: [PATCH 061/175] duplicated some unit-test fixes from graphium_3.0 branch --- graphium/nn/base_layers.py | 2 +- tests/test_ipu_dataloader.py | 3 +- tests/test_ipu_to_dense_batch.py | 13 --- tests/test_positional_encoders.py | 137 +++++++++++++++++++++--------- 4 files changed, 99 insertions(+), 56 deletions(-) diff --git a/graphium/nn/base_layers.py b/graphium/nn/base_layers.py index dbc89f19f..a88e6ad9b 100644 --- a/graphium/nn/base_layers.py +++ b/graphium/nn/base_layers.py @@ -190,7 +190,7 @@ def forward( # key_padding_mask: [batch, 1, 1, nodes] if key_padding_mask is not None: masked_attn_weights = attn_weights.masked_fill( - key_padding_mask.unsqueeze(1).unsqueeze(2), + key_padding_mask.unsqueeze(1).unsqueeze(2).bool(), # The mask is cast to float somewhere in TransformerEncoder key_padding_mask_value, ) else: diff --git a/tests/test_ipu_dataloader.py b/tests/test_ipu_dataloader.py index 436d609d4..39f754e20 100644 --- a/tests/test_ipu_dataloader.py +++ b/tests/test_ipu_dataloader.py @@ -26,7 +26,6 @@ import torch from torch.utils.data.dataloader import default_collate -from lightning_graphcore import IPUStrategy def random_packing(num_nodes, batch_size): @@ -119,6 +118,8 @@ def test_poptorch_simple_deviceiterations_gradient_accumulation(self): Test a simple version of the device-iterations and gradient accumulation to make sure that the dataloader and models handle them correcly. """ + from lightning_graphcore import IPUStrategy + with patch("poptorch.ipuHardwareIsAvailable", return_value=True): with patch("lightning_graphcore.accelerator._IPU_AVAILABLE", new=True): diff --git a/tests/test_ipu_to_dense_batch.py b/tests/test_ipu_to_dense_batch.py index 55c6e3372..f2e29537b 100644 --- a/tests/test_ipu_to_dense_batch.py +++ b/tests/test_ipu_to_dense_batch.py @@ -16,21 +16,8 @@ import torch from torch_geometric.data import Data, Batch from graphium.ipu.to_dense_batch import to_dense_batch -from warnings import warn - - -# General imports -import yaml -import unittest as ut -import numpy as np -from copy import deepcopy -from warnings import warn -from lightning import Trainer, LightningModule -from lightning_graphcore import IPUStrategy -from functools import partial import torch -from torch.utils.data.dataloader import default_collate # Current library imports from graphium.config._loader import load_datamodule, load_metrics, load_architecture, load_accelerator diff --git a/tests/test_positional_encoders.py b/tests/test_positional_encoders.py index 166929ba2..66148487f 100644 --- a/tests/test_positional_encoders.py +++ b/tests/test_positional_encoders.py @@ -1,12 +1,12 @@ """ -------------------------------------------------------------------------------- -Copyright (c) 2023 Valence Labs, Recursion Pharmaceuticals and Graphcore Limited. +Copyright (c) 2023 Valence Labs, Recursion Pharmaceuticals, Graphcore Limited, and NVIDIA Corporation & Affiliates. Use of this software is subject to the terms and conditions outlined in the LICENSE file. Unauthorized modification, distribution, or use is prohibited. Provided 'as is' without warranties of any kind. -Valence Labs, Recursion Pharmaceuticals and Graphcore Limited are not liable for any damages arising from its use. +Valence Labs, Recursion Pharmaceuticals, Graphcore Limited, and NVIDIA Corporation & Affiliates are not liable for any damages arising from its use. Refer to the LICENSE file for the full terms and conditions. -------------------------------------------------------------------------------- """ @@ -18,19 +18,40 @@ import numpy as np import unittest as ut -from copy import deepcopy from rdkit import Chem import datamol as dm import torch -from scipy.sparse import coo_matrix +from torch_geometric.data import Data + +import graphium +import graphium_cpp -from graphium.features.featurizer import GraphDict -from graphium.features.positional_encoding import graph_positional_encoder from graphium.nn.encoders import laplace_pos_encoder, mlp_encoder, signnet_pos_encoder + # TODO: Test the MLP_encoder and signnet_pos_encoder +def get_pe_tensors(smiles, pos_encoding_tensor): + tensors, _, _ = graphium_cpp.featurize_smiles( + smiles, + torch.tensor(data=[], dtype=torch.int64), # atom_property_list_onehot + torch.tensor(data=[], dtype=torch.int64), # atom_property_list_float + False, # has_conformer + torch.tensor(data=[], dtype=torch.int64), # edge_property_list + pos_encoding_tensor, + True, # duplicate_edges + False, # add_self_loop + False, # explicit_H=False + False, # use_bonds_weights + True, # offset_carbon + 7, # torch float64 + 0, # mask_nan_style_int + 0, # mask_nan_value + ) + return tensors + + class test_positional_encoder(ut.TestCase): smiles = [ "C", @@ -44,22 +65,34 @@ class test_positional_encoder(ut.TestCase): adjs = [Chem.rdmolops.GetAdjacencyMatrix(mol) for mol in mols] def test_laplacian_eigvec_eigval(self): - for ii, adj in enumerate(deepcopy(self.adjs)): + for ii, mol in enumerate(self.smiles): + adj = self.adjs[ii] for num_pos in [1, 2, 4]: # Can't test too much eigs because of multiplicities for disconnected_comp in [True, False]: err_msg = f"adj_id={ii}, num_pos={num_pos}, disconnected_comp={disconnected_comp}" - # returns a dictionary of computed pe - pos_kwargs = { - "pos_type": "laplacian_eigvec", - "num_pos": num_pos, - "disconnected_comp": disconnected_comp, - "pos_level": "node", + features = { + "laplacian_eigval": { + "pos_type": "laplacian_eigval", + "num_pos": num_pos, + "disconnected_comp": disconnected_comp, + "pos_level": "node", + }, + "laplacian_eigvec": { + "pos_type": "laplacian_eigvec", + "num_pos": num_pos, + "disconnected_comp": disconnected_comp, + "pos_level": "node", + }, } - num_nodes = adj.shape[0] - eigvecs, cache = graph_positional_encoder(adj, num_nodes, pos_kwargs=pos_kwargs) - pos_kwargs["pos_type"] = "laplacian_eigval" - eigvals, cache = graph_positional_encoder(adj, num_nodes, pos_kwargs=pos_kwargs) + ( + pos_encoding_names, + pos_encoding_tensor, + ) = graphium_cpp.positional_feature_options_to_tensor(features) + + tensors = get_pe_tensors(mol, pos_encoding_tensor) + eigvals = tensors[4] + eigvecs = tensors[5] self.assertEqual(list(eigvecs.shape), [adj.shape[0], num_pos], msg=err_msg) self.assertEqual(list(eigvals.shape), [adj.shape[0], num_pos], msg=err_msg) @@ -74,7 +107,10 @@ def test_laplacian_eigvec_eigval(self): true_num_pos = min(num_pos, len(true_eigvals)) true_eigvals, true_eigvecs = true_eigvals[:true_num_pos], true_eigvecs[:, :true_num_pos] - if not ("." in self.smiles[ii]): + if not ("." in mol): + print( + f"About to test eigvecs for smiles {mol}, num_pos {num_pos}, disconnected_comp {disconnected_comp}" + ) np.testing.assert_array_almost_equal( np.abs(true_eigvecs), np.abs(eigvecs[:, :true_num_pos]), @@ -88,13 +124,22 @@ def test_laplacian_eigvec_eigval(self): # didn't actually check the exact computation result because the code was adapted def test_rwse(self): - for ii, adj in enumerate(deepcopy(self.adjs)): + for ii, mol in enumerate(self.smiles): + adj = self.adjs[ii] for ksteps in [1, 2, 4]: err_msg = f"adj_id={ii}, ksteps={ksteps}" num_nodes = adj.shape[0] pos_kwargs = {"pos_type": "rw_return_probs", "ksteps": ksteps, "pos_level": "node"} - rwse_embed, cache = graph_positional_encoder(adj, num_nodes, pos_kwargs=pos_kwargs) + features = { + "rw_return_probs": pos_kwargs, + } + (pos_encoding_names, pos_encoding_tensor) = graphium_cpp.positional_feature_options_to_tensor( + features + ) + tensors = get_pe_tensors(mol, pos_encoding_tensor) + rwse_embed = tensors[4] + self.assertEqual(list(rwse_embed.shape), [num_nodes, ksteps], msg=err_msg) # TODO: work in progress @@ -105,23 +150,32 @@ def test_rwse(self): """ def test_laplacian_eigvec_with_encoder(self): - for ii, adj in enumerate(deepcopy(self.adjs)): + for ii, mol in enumerate(self.smiles): for num_pos in [2, 4, 8]: # Can't test too much eigs because of multiplicities for disconnected_comp in [True, False]: for model_type in ["Transformer", "DeepSet", "MLP"]: err_msg = f"adj_id={ii}, num_pos={num_pos}, disconnected_comp={disconnected_comp}" - # returns a dictionary of computed pe - pos_kwargs = { - "pos_type": "laplacian_eigvec", - "num_pos": num_pos, - "disconnected_comp": disconnected_comp, - "pos_level": "node", + features = { + "laplacian_eigval": { + "pos_type": "laplacian_eigval", + "num_pos": num_pos, + "disconnected_comp": disconnected_comp, + "pos_level": "node", + }, + "laplacian_eigvec": { + "pos_type": "laplacian_eigvec", + "num_pos": num_pos, + "disconnected_comp": disconnected_comp, + "pos_level": "node", + }, } - num_nodes = adj.shape[0] - eigvecs, cache = graph_positional_encoder(adj, num_nodes, pos_kwargs=pos_kwargs) - pos_kwargs["pos_type"] = "laplacian_eigval" - eigvals, cache = graph_positional_encoder(adj, num_nodes, pos_kwargs=pos_kwargs) + ( + pos_encoding_names, + pos_encoding_tensor, + ) = graphium_cpp.positional_feature_options_to_tensor(features) + + tensors = get_pe_tensors(mol, pos_encoding_tensor) input_keys = ["laplacian_eigvec", "laplacian_eigval"] in_dim = num_pos @@ -129,16 +183,17 @@ def test_laplacian_eigvec_with_encoder(self): out_dim = 64 num_layers = 1 - eigvecs = torch.from_numpy(eigvecs) - eigvals = torch.from_numpy(eigvals) - - g = GraphDict( - { - "adj": coo_matrix(adj), - "data": {"laplacian_eigval": eigvals, "laplacian_eigvec": eigvecs}, - } + num_nodes = tensors[2].size(0) + data_dict = { + # "feat": tensors[2], + # "edge_feat": tensors[3], + "laplacian_eigval": tensors[4].float(), + "laplacian_eigvec": tensors[5].float(), + } + # Create the PyG graph object `Data` + data = Data( + edge_index=tensors[0], edge_weight=tensors[1], num_nodes=num_nodes, **data_dict ) - batch = g.make_pyg_graph() encoder = laplace_pos_encoder.LapPENodeEncoder( input_keys=input_keys, @@ -153,7 +208,7 @@ def test_laplacian_eigvec_with_encoder(self): first_normalization=None, ) - hidden_embed = encoder(batch, key_prefix=None) + hidden_embed = encoder(data, key_prefix=None) assert "node" in hidden_embed.keys() self.assertEqual(list(hidden_embed["node"].shape), [num_nodes, out_dim], msg=err_msg) From ab88952738b818ee447b7f987c0b899c4adf5174 Mon Sep 17 00:00:00 2001 From: DomInvivo Date: Thu, 11 Jul 2024 14:52:20 -0400 Subject: [PATCH 062/175] Fixed the loading of a previous dummy model using older metrics by removing the metrics from the model --- graphium/trainer/metrics.py | 18 +++++++++++++++--- graphium/trainer/predictor.py | 8 ++++---- tests/dummy-pretrained-model.ckpt | Bin 1663627 -> 1667754 bytes 3 files changed, 19 insertions(+), 7 deletions(-) diff --git a/graphium/trainer/metrics.py b/graphium/trainer/metrics.py index 77d1f024a..990a373a5 100644 --- a/graphium/trainer/metrics.py +++ b/graphium/trainer/metrics.py @@ -20,6 +20,7 @@ from torch import Tensor import operator as op from copy import deepcopy +from loguru import logger from torch.nn.modules.loss import _Loss from torchmetrics.utilities.distributed import reduce @@ -209,7 +210,7 @@ def _initialize_metric(metric, kwargs): """ if not isinstance(metric, type): - if not isinstance(metric, torchmetrics.Metric): + if not isinstance(metric, (torchmetrics.Metric, MetricToTorchMetrics, _Loss)): raise ValueError(f"metric must be a torchmetrics.Metric, provided: {type(metric)}" f"Use `METRICS_DICT` to get the metric class") else: @@ -280,12 +281,23 @@ def _get_metric_class(metric): from graphium.utils.spaces import METRICS_DICT if isinstance(metric, str): - metric_name = metric - metric = METRICS_DICT[metric] + metric_name = MetricWrapper._ipu_metrics_name_conversion(metric) + metric = METRICS_DICT[metric_name] else: metric_name = None metric = metric return metric, metric_name + + @staticmethod + def _ipu_metrics_name_conversion(metric, warning=True): + metric_name = metric + if metric_name.endswith("_ipu"): # For backward compatibility when loading models with metrics for ipu + metric_name = metric_name[:-4] + if metric_name == "average_precision": + metric_name = "averageprecision" + if warning: + logger.warning(f"Using the metric `{metric_name}` instead of `{metric}`") + return metric_name def update(self, preds: Tensor, target: Tensor) -> Tensor: r""" diff --git a/graphium/trainer/predictor.py b/graphium/trainer/predictor.py index f9886bac5..d973161d1 100644 --- a/graphium/trainer/predictor.py +++ b/graphium/trainer/predictor.py @@ -310,7 +310,7 @@ def compute_loss( raise NotImplementedError("Weights are no longer supported in the loss") all_task_losses = { - task: wrapped.update_and_compute(preds=preds[task], target=targets[task]) + task: wrapped.update_compute(preds=preds[task], target=targets[task]) for task, wrapped in wrapped_loss_fun_dict.items() } @@ -677,7 +677,7 @@ def list_pretrained_models(): return GRAPHIUM_PRETRAINED_MODELS_DICT # Avoiding circular imports with `space.py` @staticmethod - def load_pretrained_model(name_or_path: str, device: str = None): + def load_pretrained_model(name_or_path: str, device: str = None, strict: bool = True, **kwargs): """Load a pretrained model from its name. Args: @@ -691,7 +691,7 @@ def load_pretrained_model(name_or_path: str, device: str = None): if name is not None: return PredictorModule.load_from_checkpoint( - GRAPHIUM_PRETRAINED_MODELS_DICT[name_or_path], map_location=device + GRAPHIUM_PRETRAINED_MODELS_DICT[name_or_path], map_location=device, strict=strict, **kwargs ) if name is None and not (fs.exists(name_or_path) and fs.get_extension(name_or_path) == "ckpt"): @@ -700,7 +700,7 @@ def load_pretrained_model(name_or_path: str, device: str = None): "or pass a valid checkpoint (.ckpt) path." ) - return PredictorModule.load_from_checkpoint(name_or_path, map_location=device) + return PredictorModule.load_from_checkpoint(name_or_path, map_location=device, strict=strict, **kwargs) def set_max_nodes_edges_per_graph(self, datamodule: BaseDataModule, stages: Optional[List[str]] = None): datamodule.setup() diff --git a/tests/dummy-pretrained-model.ckpt b/tests/dummy-pretrained-model.ckpt index b1312cffadb65d2c1eb9e7b3a3f3bcef8cbccd7b..e3df0fa4cf887aab0bc87ee7d06b81b2cb21479d 100644 GIT binary patch delta 17985 zcmai+3wRafna58;F62sx0TU}U7r}7M<=m4PzzJvwhQeTvnnq|yA%%nx@FYZ`phFRm z+Llueo7q~dRsjPPC`M%!o*U9~%XJf+r48+UcLx2KPEyZdyvBKv(a^PTga^L}56 z4~N6~y|?fGz4M-#$vi&tYTt>E`=ZxnRl6it?DL&73wuU=jq7?xdK03L}H^|$qT?Hi4R|k7r*7t0@uQ#%OIPwVjczIVot-W`6W6#j$ z$fn`QH_3-PBi|ygwO!=JwLa3jWkb)sn_42@j$M2zaN7a$;k;dusAcvY%Phw(al__y zk%Q!eF8Xg8tamUHqyH|Xynm=??O^|wEu{9yq1cKeGnf7YVavNp6UP19`+7!u2YW{P zhDRb}1U}rAOWJEd2c5@7284H+e5g(k_?-~CbZ>^hByhDXK_|;TDKk@-GN-uX+*Bba&OYPf6y3O(B`|BcTDoQ@ zoo<}WCd%`qYe}r?G&SLfPo%TNSB)QKbLFSo^LNGdk`Pt{XwPJm4d#^LCGuM#bY((h z!t@typ_iy{Wzm)AL@|i|RvJwgSup8^S%~QNT7E1FK~#6v@`A{OscvPda@D6dJdtA_ zX=t~uyeJ6aW1^3p%Ra%A)W=$W?7YB$X)ao-k0@>}RaFfB&ih!ytI`F51yfv@gb3~= zc}Wn0wYe(2EU;jT3zHDNoz{4T>dEPHvR13hwbulK`*5$hLfyjiZF3ge`F3O)om9jS1KA{#I)yYyU)pg~P zppF2OT~tSa4Hs3(%;6ph0oLfMGDTp)L>DGuvt=w>5P|jiSdPGeNiIym28*iX3PP|p zSCy#(3nsXggfmx2mDXI*@^kouILPGIm4Y017O#1Ntjo0$kp&Z6wALV-EZPiF2-f7f zQXw*7k_)r2(V{D}MKM^HYc(PZCb}>Sn=M)`)s)lZqzwBDRGG)$wop=Zw_smFOmtBh zq2y?(6UAU%t~H1(nCikTiP7Q_gTVC%7jJA=c@-5~L;^*+~{h z_9njUO-y!C9ZhVsXf1*=npmf6ts)C1x-bbFExNKm6oYlTuG}IrVX_O;u<@cT5`|!$ zuH7oKV4_<|IOByxa86s=bKI4@;nB=A*VYS?G_y|E`b8E@bkSOy*?7^FjiMN=({*K& z$b_jbOvC1jwnY?zb-ET2SuoLsN!WPNl@S_))9A#)Y4lNkqqi{8b>%)m9W6|CQ5`L8 zzG&M;Ay}ttJ46;tbYT)UUUcOFQ4H4Tx}u0onCikbY`$o_MIl(HYkQ~#zGNk8SeLB2 zu{Xypadw5CpWCN4((C-M7N3W0i=a-Qhs_^ddXrzM&%=%dUHX~GgzbwkO=1EWW)Al< z(dS{uf-b!+GGQAdOv5ISE?pGGU`K;)ydyGUJ7cB6Gl*XH*fYp@PoR6^dR3Pu1zG47 zt9=b;{F}&xDKA>DH!+2bk8|8}In&TbFInw0Qem#Zgefmf!={ib`Ey4}mK)M;kNCVq z_)2m$Sv3|A7!cVl17VKD5R!tqZr%{`5jCuK-)Dq!bt5#Dd&|O?xN6mqaFt!CwLNO5GVY^?BU0c3&rtLV3q|YWmrXQKcq95~jD6gfn8u zLYxuPEZFwMRjVr9%uhluTdhN1m0ATBR~s;a5<@FoDE`F&5E6IXzD5w0qpRRiJwE;#F!F1R-m&$CT8Or%E09 zZeAv`948Yo;MPKx;~a0d=ku#2>v1fWQ44$qK{n4igV2`qbI=3Uj-agI=b%TdW3e*d zy@s$>d>rEEooU#47fP2P9k6F7=>GOr$Lg_K(ZP; zu3+?-wX+z{6>@LM2bku%^t`|#A-eHGq1)hW3q59?wS29JpCuu>v9pMroZzjc2dx!D zm5vmR63ypYryj{){E#}U$%#jecC^UNJC5i9YZsv$6Id|GwQ+$#LUHAI5%(aVH=C>^ zT7EIViiF@wK{5CHHOK@XD=+RQLbRpbuMZNr7m$T6RF(4L6C9<2Ol?)E;2R{wR<0{1 zzr(^}2btJoog@P8Z&HHO-<`$e+8YO;cbS|WA1vnfH@(T^?B`H%h=iDvwoZDJsfw(# zS~rG@xy>75V(ZcsK@z66DrHP-AW71H`4J*wTVJn&uip?^sr~gUWlrNB$swk;ZY&a5 zFu6rJh}h0o{B9Zn%S}>Pa*w?-jkn?S-jdyPuktIP_m(UJK?U^Al2w70Q^MWOri9YU zT!95sT9kqbtt(3e5!e{fHV6!u(xMbZXKNLE_m_ZSxLhDj=2Db)! zC&{)jDrfR7nAU=2wh*OlC5^K)xnEu_th03~axLG2X)Vf;93uX5Zb!7R*4B-fz=EkQ z%8?u*e<@F&>EscX;r`7-j#jEPr=l!&*JOE?$5m-<$t?PKLj!%RrI9@7(%(DSzrA;a zIP~x3ZjFo_zSQ*nK1Ws2opM{Ua~B$4{+oQj$>Zaxio#$3XyQvBuPfG?O^aYJe9 zLP|~mzI|@RoHQPN7x3pXFMAL0|G_-EF~eGE0TZgx>rvb8shE|Pr{q)H&RS>WMpVZk z$bW=+$zr8$Bq09P&QQ<=+GU zrzlSq+WZ6HA9|3`@~42$_Tv1Iufp8C4E!JE@Xr8$2lMb1z-RhUe%Y0iKIj8#uNyaf zhHYmJz=JqW9C8zLGZXk1`%xX{6u_5Jo~m;=3-E9Fk<0W8=H^W{2NOP|UTVY*Uk~C`H={b@ zkgvhqYy|$_r#ww(FW~PtW1W3~SG1t~VL#w^V=f164jR76$DQy^Akf=5P#p5CzPkkY;FPm z4GWRX=DmP#T!`v4hXFs1d3Xfy|HND#wK-U43-5|Wz8}O{dkd-~4*3r-H@5@--*EU2 zz-LJ)et0L~_h2qR2>5p?Prb5~cLDxO30*mw7i^x?!N)(bdkw^q+fbcx$iI!b`4iy( zCWrqE;Q5PC{O}uqcVKS5W%HDE@J>uj5a$&fCk}b$VpNCxHt?4)H-7>61C*!sefT2a zFD}M?|1RMFi{r~5*gQF>eO~^F$!|cQt+%3ugeL)?z}(EqO^KMwgO z%nvsJ|C5-nY6Sdun8zW%u^kC5dx3vnI~F=S&srZ4QqcLiwxM+@J4q9zUSq}~pS=We zIS=@+!raUU{87xq1%SVUxmjp)(D^>@8A>sT6YfBD#vwn5x%@fcZ&IGx+2)Oa7cWJE z%XNTvF2(sF-;cRcZ~KD{BTr;-ZoWtjAW`=1I1%KFF*h55|1J*q0Dd0xuov*ln9DxE zTRTyGvLEnml&1d@10bJFvd*0>0@EtnYP&$@-2{|Fr$T z9`OI*@Y#UZd=dLs0=@=w`7Gd%VQx+U{x0US2KcpMoPV?^xxO&w@;1P~gSq)Q;6I~0 zwSNyk0eHckNZ*nt0bg<_sxSH!;16M5auo0jm`AgVlk58o^O9V^Tkpd8uL1mi%%w8G zpTk_90r;Oed?w(Hccc7fJK%$qr|N4i0sQ#gNMCsa;2-1o@&Mq~T`2$ikoRCN{|)#b z=J3ma{|fUs7!%-{xi2XlA zu+(brL7JC+6Y$+VsJBhX&r|=@WyrH;CYSLk=H@BDTh<`|@M*wDF*k>>w8fda@6*RX{@_uscFnlK$@pIss<6szp}pZeLXrMF z0({iLmdDmnc1dfbySp#iu3b$=^1!-lO;0qs zKHAV7(VC*&UE0q9Y_87%k{D^|?r3Of@7As%UA)` zOWO!!dC$f5VCL(LMzpSuc1_y^gRx}ro8f(5uXZi`Gb3h)oGqBzabHKwg}8IUpe-6{ zp94vl(;B%r+NxboOzcQwVq7$~p+&V{5Rwex#zX>j7!pFDrgjK75%cd4 z;g`e&hcG~flOgofr;!|WTpB9T24TEcyBS_*)c29$NRQUhsNF)$lW`fg$*+h>A~WzV zVP?nlYhwESG5v;^;Fx|3!}X&{D3Pw!+Lnwy_?2v&i(A?w+GX0UfV*hSwq0axr>6am zI7^0h8@%t;Zim-oXyZv}Yol7EDcaDfMH^eXS~}Y8Fz+DKzCX;qt;7b$c_$3>IJc4C zgfQ=d-+7p1gI!WmFt4Dvuw1*F_)CU*54`Wy?uFN6sHwK4ww>4{;_9=HgJfInAT~U% zP08)PlQ^r-0zd6NZ4HgBk*=<2mv$eF)~4DgE}04Kei*WA-qzBnb!ZR3uRV5Rd$k8` zAMgTarZ0JDv?2I?$^4@*2tW1YAapl3Kpqn(Pum4}ldYQ=hD5LG(stXUWc?j*AR^rz znzjdq>_D2dXrw!;X%E3b4;({sz*%{c$=XZmPqQ@)hP|2zufm>VGqi^R{6H}uAtpR$ zv_#gc34v3JP&mi9*N z2^j9x_S=(5Bc+S7?C}HkIB794S10VV1G8HpbiyZz1zx~GGMqffF|mU^1)x`Z8eS7A zi~Sq@>KTVdA8VwCL=mw^{oWRNmW%uw9qBoT2-7$$X!P{dPxSc)-rbVl_q?s}0#UH9 zMr+!twHM*Hi?lz$E4x6&;g^We$6m5cUM42Ii@jnSkq%?mWyH4mBe9{^xmSsWEuOqw z4TNqTxA-*}U2bo1C`J>#hP-Z%W`Z};uNvArw6-qoPcYc49f8*w_3+}eubB-b=r;y- z-&)r9CJe&0@h#izZQCq)ZEWsXrM&|Kb}u2$BCXm{@;fC)IcJ4r6@j@W~GZ5HzQgXTfJ1`gCCq(}_ zunOKMmivDL*Ka$0(l_92+3b{_p2Bc#`k2z1wCTHd_KQd(?Y_DHHO1i(CvQ&L0?iZ_!OF#|~KAV-pqMLbnB&%uARk zd-`_-i)A*TVcQIRabzCwd2Sz}#S3c^KC<`pzY)}BK1#XHaBGU(rtAlF%DK*lV=2SO zQiCHhsR}N&vHzt}X8%j0<%X3%pjXB9%>KL6f~hjK>ZB9%i+j^*QYA%z?9n0^{X5fS zRVblV@W^lp_@Eg!t^3k~DUwJ^pbU1I%;E>qYNQsl2wpN!04AdjdB{K&Y#F2YS^rq` zz^=5JQY~5qFD*zu)3Ww>Xh8+&?ljfC9!9hRL<#ag-?cpFur&^ZM{r{=HE*MQcp!pbwA`*vj1VpbpM-{CC`5;&A(avq)qh8_SYp=mwsVMO!qr6jZ=(>jzER3w=zrQ6>ax)M&wg4fe#O3i2mytKp?q<-iT zFg*bFsy+fENflsGtP1KU@}SFT6+EIT6WWlW9+RGcGI|2YDN(9TJpx2Y4R{#Q3V10A9mrJkJ)F=3 zV6SPihY_uVmy+0qtSOLW$pL_9=m9X)Y0_F&&^p-5K3QI@3R(q^U@9Dq=oAkp^aPkg zGyKwPbp*0i+ z0dr`cf1j)bm9zvzM$(DE0qaES0)MbT(qgJ;`t8g`!QWD#c+5t`^X8>&Y8CWAd z1J$$$$%Sk3g;&!uc!X0eHX^HAzNTt=3hafi^)RB9@KO?ck$Rbj6M72F;>+c`rkWPP zOGa!&RZeaGN4`S*%T2ugcTU?w9Q)y&)GOH_hzk(R$0nh}OVk-6dkDvX1(L({a}fmC*BH zhm)QzH=vpd8~sNyc_bLQ91XWLP1&4oy1Iku)G!4r@SanY{Y!PMNI7 zDwpbzyJgyc468zJ^8@XtOl&`DRfg=dcc_eRdV9atco@+Ncx0E{FSAWz&CRHQ?7G`e znb3X=^)$K75B8rjNB@}z;Y#*BM9cTRDlY=NZ8nD%WbBi9fi+Pt8?8kdhP5a&C=r8Y zQ8QCLJyY5cVOh+^IwMmWG%SqUEFm`NaXr+NeDpX|Dnbr2bD-8rMaW_1Hp?=DEAfKJ zVP-bg{c=BIXR#iX+rY-+es;-i#7bFvGK0D#9-$79;4E44N*zU5EVmJhpe~i$h(%D( z%7S=M5nx%Q2uHGHpa00wW{Xx&$d(05jyAWMEVmJhp#F#4Ml6DQQnoC4>f5vRr+(fKH|6Mt<14AB)QK?~nt~4644E^c3LCMv+ zTqwgWf!q}n^L)p-Rt-hS^BKDWxsTwil?&N7tns-!BqomJPsj}xOKcp;UjlB>0e~pr z0H{lIg9Ug#c?w38VOq_(vI9UKf>9g8ise>-n@ZsW3fNV)#RQvrFWmu&dVD=~ltQv=ibHdl7;D}?Nu>ij&ZjgWr15agZ-sr|$W zNJ^N$N`;f6=^})&Xe{hUM>WArz_fMT>~Gvt{E%2kdr51G&3934Py;;!?++W05~HB*f1`JH$Qm* zMqNllArHT3D8rk^$+`)7`bB-1{U69}g#62OAo*0s>_783+3_KdzNpaf5xI$wez^|h zp6U#*4oGE$U2pb(BexM!4?5#=eX6q^R2%(;84HZ7t{*?o*nj=_Svd9Nu@_y9sopSN z77BUh#cZs}6Qnjm`lU-I&%Ef8P3wUPvMXV=u=h>1W1`eXNWWZ2Y&&Z4B-v&m554$` zbh(XC0J)IZcC0Taxh@OvT}AR0MJ{BhQ}P3)DXH+a9^V-9Z94ej-$L>E$k$t)KgqB2 z8uO>-=NsFn=4a=VuMqN|)Sd7hr^Hu&jiXcZr@CL<9Nj^lvMq3j<=K9 z&W)@;!SO#Oct6Ky&PKBX7RSHFI4J5_(0<7W#|Qk6p-Pbxyr_1|;cjB(&ujvod*#Z{8$7`Fen3PmNQrN&7``9E@% zPI;c$8TG{|h(Q703)rQiyv1fyl?s`P{wcFFmX;uuz-Ju)3S<3qjz0ig;7qgfCBw-e zj4x;BPj)R-Pw}&!v@wK3Nagr7!0v^PVmPT$7LqMg8O`jBb)`sUP{4-(yHxZvHk+!H z%T#nfvoo5?kV+t(V-v7TMH#~|RjHDxC>hMoIKLcJoM|5v@IB>tcW3e0|A5&ix%Ous z!^t4ZjTBrpCBV##TPn~>2L=3Bz%CIbpUq|>vaK8K}5sE>d#mtPo)hLF*Y>v}v z(7w^@U+u#I&0YMq(-H*rRZ~+o$(a7aj67O<2W!6sVMUqrYh2w zqAXx`#zWx7r7|es(esgtzKG4HD$mZQ#bW%L9u!{(3%YDf%TGFH-SAY-Tta zM3tv0L?kUm>0oBYE8xW?GAQ7w3z3MvhRtRolKrQ5 zF*D;JcyWmYx;Z{>5lXhQmgB}nxTUOPI2l8=tfeTIF*76ebQHs&fX@T$x)kY`v)NQc z+EVmhW@mhbRRULVyzmU9qVzLNRirINxr*5t?|~avJ_iLHT8yy%b3Xejf}MAg`UZ|) zUW{%P|+v%tCYs#*62$2XpZk{W-9ctpTYV7#T2&(2+joL^YR@lP=x5%2>9JCho& z;IqG7hLf6^!4|e`InoYib9^htBLe;u<8UsYecCz5c|4Ef8!)a@IDQM`_*jnTgpu>` zaUAzzJR;y1Fb)^+*^|#jNsZ6uxb0k&)Nl#MkCEBVojM}m?DG)r5AoR-V(csB_&$v5 z1pNOPhs*fvvww`V!{r=r!FWW#pI{uX;Irq}A?NW*j;|-!xl`+^IDWGZB{g2n@vNU< z=QSK(F2SdAd>G@pxg4MHpU8Q59>=RCSmpRpj7J2Vbv|-_;Q~JU0*v1j@J@{D7V_EO z5bR89_;ikI>v2-g;P^N7Xm)%F#~+c|F6T=#S@Wp=FNEV~a=b}`YdL<6VCNM$zKrAP zE6^2KIfvu!6-Zk-m*ayN>$^A}cLAEM@8LKi!FxF#l3!SU-#7 zQL9i=^;(Y4TZNJuU(NCDWVUOkwsHJ9#uv78JTHo8f6MU=670)k>wXjCx)hFQU4opC z2zWhU*W=MJd^01N)WqFSMdDe8YkgNSD`Qd<(x|(d<0gWg8#aEBy$IU2&K>=^YSeedee~Gcam}9jCIS(x1_(qKNCXV06SlPt!oEUOG zDBz!AtZW|3)_oXofit)IX6OIwP*L*9iYs~7GApBbH4Z_*Cg77?vp;6D69uYDK8kXh z>9y>8KgLg5kx1Zdj<*4JiRf=KoD@kuiE@f4Z2^{!g>6V=P{21~tbEI7AH`Vz#dzkt zs2!X;a~-&m<15;6j}Y+lWVSQ%aKQxT{DcmK<3k+BFs|Fh@gsm;XDGgAB6Xg~UUlNH z9Ig;Tlc<$(ekTfHM8J0gc4>qM*lem{KX!6z#Q(;ujMLX3jk>Qn9su0unmsL_Mp%^m z1Ae&H@1LFgtNDqe=CHq?A4|TY|GqXiF>v0=`Su%_NB+ODd~lhve@}tea4q}|1%8gf Me~A5gCyyrZUljlhHUIzs From 6c58733475536ed5bfb6dad2704ab34b2b68caff Mon Sep 17 00:00:00 2001 From: DomInvivo Date: Thu, 11 Jul 2024 14:53:46 -0400 Subject: [PATCH 063/175] Minor documentation --- graphium/trainer/metrics.py | 5 ++++- 1 file changed, 4 insertions(+), 1 deletion(-) diff --git a/graphium/trainer/metrics.py b/graphium/trainer/metrics.py index 990a373a5..3e4493da1 100644 --- a/graphium/trainer/metrics.py +++ b/graphium/trainer/metrics.py @@ -290,10 +290,13 @@ def _get_metric_class(metric): @staticmethod def _ipu_metrics_name_conversion(metric, warning=True): + r""" + Convert the metric name from the removed ipu metrics to the regular torchmetrics metrics + """ metric_name = metric if metric_name.endswith("_ipu"): # For backward compatibility when loading models with metrics for ipu metric_name = metric_name[:-4] - if metric_name == "average_precision": + if metric_name == "average_precision": # A previous typo in the `spaces.py` metric_name = "averageprecision" if warning: logger.warning(f"Using the metric `{metric_name}` instead of `{metric}`") From a9a88100757c7955d1e8287d7e1a11fc2abc65cc Mon Sep 17 00:00:00 2001 From: DomInvivo Date: Thu, 11 Jul 2024 15:24:40 -0400 Subject: [PATCH 064/175] Removed the loss from `predictor_summaries` --- graphium/trainer/predictor_summaries.py | 24 +----------------------- 1 file changed, 1 insertion(+), 23 deletions(-) diff --git a/graphium/trainer/predictor_summaries.py b/graphium/trainer/predictor_summaries.py index 083bcaf54..6941f8ff1 100644 --- a/graphium/trainer/predictor_summaries.py +++ b/graphium/trainer/predictor_summaries.py @@ -40,7 +40,6 @@ def compute(self, **kwargs) -> Tensor: class SingleTaskSummary(SummaryInterface): def __init__( self, - loss: Tensor, metrics: Dict[str, Callable], step_name: str, n_epochs: int, @@ -51,9 +50,6 @@ def __init__( r""" A container to be used by the Predictor Module that stores the results for the given metrics on the predictions and targets provided. Parameters: - loss_fun: - Loss function used during training. Acceptable strings are 'mse', 'bce', 'mae', 'cosine'. - Otherwise, a callable object must be provided, with a method `loss_fun._get_name()`. metrics: A dictionnary of metrics to compute on the prediction, other than the loss function. @@ -67,14 +63,10 @@ def __init__( The metrics names from `metrics` to display also on the progress bar of the training. If `None`, no metrics are displayed. - monitor: - `str` metric to track (Default=`"loss/val"`) - task_name: name of the task (Default=`None`) """ - self.loss = loss.detach().cpu() self.n_epochs = n_epochs self.step_name = step_name self.metrics = deepcopy(metrics) @@ -185,7 +177,6 @@ def compute(self) -> Dict[str, Tensor]: """ computed_metrics = self._compute(metrics_to_use=self.metrics_to_use) self._cached_metrics = computed_metrics - self._cached_metrics[self.metric_log_name("loss")] = self.loss self._cached_metrics[self.metric_log_name("n_epochs")] = self.n_epochs return computed_metrics @@ -220,7 +211,6 @@ def metric_log_name(self, metric_name): class MultiTaskSummary(SummaryInterface): def __init__( self, - task_loss: Dict[str, Tensor], task_metrics: Dict[str, Dict[str, Callable]], step_name: str, n_epochs: int, @@ -232,17 +222,15 @@ class to store the summaries of the tasks Parameters: """ - self.global_loss = None self.task_metrics = task_metrics self.task_metrics_on_progress_bar = task_metrics_on_progress_bar self.task_metrics_on_training_set = task_metrics_on_training_set # Initialize all the single-task summaries - self.tasks = list(task_loss.keys()) + self.tasks = list(task_metrics.keys()) self.task_summaries: Dict[str, SingleTaskSummary] = {} for task in self.tasks: self.task_summaries[task] = SingleTaskSummary( - loss_fun = self.task_loss[task], metrics = self.task_metrics[task], step_name = step_name, n_epochs = n_epochs, @@ -281,14 +269,6 @@ def get_results_on_progress_bar( for task in self.tasks: task_results_prog.update(self.task_summaries[task].get_results_on_progress_bar(step_name)) return task_results_prog - - def add_global_loss(self, loss: Tensor) -> None: - r""" - Add the global loss to be logged with the metrics - Parameters: - loss: the global loss - """ - self.global_loss = loss.detach().cpu() def compute(self) -> Dict[str, Tensor]: r""" @@ -299,8 +279,6 @@ def compute(self) -> Dict[str, Tensor]: computed_metrics = {} for task in self.tasks: computed_metrics.update(self.task_summaries[task].compute()) - if self.global_loss is not None: - computed_metrics[f"{self.step_name}/loss"] = self.global_loss return computed_metrics From 2185697f169a79da8e83ae7945fa7ad237bbb4ef Mon Sep 17 00:00:00 2001 From: DomInvivo Date: Thu, 11 Jul 2024 15:48:24 -0400 Subject: [PATCH 065/175] Removed epochs from task summaries --- graphium/trainer/predictor_summaries.py | 5 ----- 1 file changed, 5 deletions(-) diff --git a/graphium/trainer/predictor_summaries.py b/graphium/trainer/predictor_summaries.py index 6941f8ff1..a22a0bf45 100644 --- a/graphium/trainer/predictor_summaries.py +++ b/graphium/trainer/predictor_summaries.py @@ -42,7 +42,6 @@ def __init__( self, metrics: Dict[str, Callable], step_name: str, - n_epochs: int, metrics_on_training_set: Optional[List[str]] = None, metrics_on_progress_bar: Optional[List[str]] = None, task_name: Optional[str] = None, @@ -67,7 +66,6 @@ def __init__( name of the task (Default=`None`) """ - self.n_epochs = n_epochs self.step_name = step_name self.metrics = deepcopy(metrics) @@ -177,7 +175,6 @@ def compute(self) -> Dict[str, Tensor]: """ computed_metrics = self._compute(metrics_to_use=self.metrics_to_use) self._cached_metrics = computed_metrics - self._cached_metrics[self.metric_log_name("n_epochs")] = self.n_epochs return computed_metrics @@ -213,7 +210,6 @@ def __init__( self, task_metrics: Dict[str, Dict[str, Callable]], step_name: str, - n_epochs: int, task_metrics_on_training_set: Optional[Dict[str, List[str]]], task_metrics_on_progress_bar: Optional[Dict[str, List[str]]], ): @@ -233,7 +229,6 @@ class to store the summaries of the tasks self.task_summaries[task] = SingleTaskSummary( metrics = self.task_metrics[task], step_name = step_name, - n_epochs = n_epochs, metrics_on_training_set = self.task_metrics_on_training_set[task] if task in self.task_metrics_on_training_set else None, metrics_on_progress_bar = self.task_metrics_on_progress_bar[task] if task in self.task_metrics_on_progress_bar else None, task_name = task, From d37d8185e812fd6b614caad42086068dc77c7083 Mon Sep 17 00:00:00 2001 From: DomInvivo Date: Thu, 11 Jul 2024 17:11:14 -0400 Subject: [PATCH 066/175] Draft implementing the update/compute logic in the predictor. --- graphium/ipu/ipu_wrapper.py | 8 +- graphium/trainer/predictor.py | 159 +++++++++--------------- graphium/trainer/predictor_summaries.py | 29 ++++- 3 files changed, 84 insertions(+), 112 deletions(-) diff --git a/graphium/ipu/ipu_wrapper.py b/graphium/ipu/ipu_wrapper.py index 0ac04b883..305326015 100644 --- a/graphium/ipu/ipu_wrapper.py +++ b/graphium/ipu/ipu_wrapper.py @@ -122,7 +122,7 @@ def training_step(self, batch, batch_idx) -> Dict[str, Any]: features, labels = batch["features"], batch["labels"] features, labels = self.squeeze_input_dims(features, labels) dict_input = {"features": features, "labels": labels} - step_dict = super().training_step(dict_input, to_cpu=False) + step_dict = super().training_step(dict_input) loss = step_dict.pop("loss") step_dict["loss"] = self.poptorch.identity_loss(loss, reduction="mean") @@ -132,7 +132,7 @@ def validation_step(self, batch, batch_idx) -> Dict[str, Any]: features, labels = batch["features"], batch["labels"] features, labels = self.squeeze_input_dims(features, labels) dict_input = {"features": features, "labels": labels} - step_dict = super().validation_step(dict_input, to_cpu=False) + step_dict = super().validation_step(dict_input) return step_dict @@ -141,14 +141,14 @@ def test_step(self, batch, batch_idx) -> Dict[str, Any]: features, labels = batch["features"], batch["labels"] features, labels = self.squeeze_input_dims(features, labels) dict_input = {"features": features, "labels": labels} - step_dict = super().test_step(dict_input, to_cpu=False) + step_dict = super().test_step(dict_input) return step_dict def predict_step(self, **inputs) -> Dict[str, Any]: # Build a dictionary from the tuples dict_input = inputs - step_dict = super().predict_step(dict_input, to_cpu=False) + step_dict = super().predict_step(dict_input) return step_dict diff --git a/graphium/trainer/predictor.py b/graphium/trainer/predictor.py index d973161d1..fd36709f6 100644 --- a/graphium/trainer/predictor.py +++ b/graphium/trainer/predictor.py @@ -14,7 +14,7 @@ import time from copy import deepcopy -from typing import Any, Callable, Dict, List, Optional, Tuple, Type, Union +from typing import Any, Callable, Dict, List, Optional, Tuple, Type, Union, Literal import lightning import numpy as np @@ -164,10 +164,26 @@ def __init__( self.optim_options.set_kwargs() # Initialize the epoch summary - monitor = self.optim_options.scheduler_kwargs["monitor"].split("/")[0] - mode = self.optim_options.scheduler_kwargs["mode"] - - self.task_epoch_summary = {} + self.task_epoch_summary = { + "train": MultiTaskSummary( + task_metrics=self.metrics, + step_name="train", + task_metrics_on_progress_bar=None, + task_metrics_on_training_set=self.metrics_on_training_set, + ), + "val": MultiTaskSummary( + task_metrics=self.metrics, + step_name="val", + task_metrics_on_progress_bar=self.metrics_on_progress_bar, + task_metrics_on_training_set=None, + ), + "test": MultiTaskSummary( + task_metrics=self.metrics, + step_name="test", + task_metrics_on_progress_bar=None, + task_metrics_on_training_set=None, + ), + } # This helps avoid a bug when saving hparams to yaml with different dict or str formats self._set_hparams(recursive_config_reformating(self.hparams)) @@ -175,8 +191,6 @@ def __init__( # throughput estimation self.mean_val_time_tracker = MovingAverageTracker() self.mean_val_tput_tracker = MovingAverageTracker() - self.validation_step_outputs = [] - self.test_step_outputs = [] self.epoch_start_time = None # Decide whether to log every step or once at the end @@ -319,7 +333,7 @@ def compute_loss( weighted_loss = total_loss / num_tasks return weighted_loss, all_task_losses - def _general_step(self, batch: Dict[str, Tensor], step_name: str, to_cpu: bool) -> Dict[str, Any]: + def _general_step(self, batch: Dict[str, Tensor], step_name: Literal["train", "val", "test"]) -> Dict[str, Any]: r"""Common code for training_step, validation_step and testing_step""" preds = self.forward(batch) # The dictionary of predictions @@ -360,7 +374,6 @@ def _general_step(self, batch: Dict[str, Tensor], step_name: str, to_cpu: bool) multitask_handling=self.multitask_handling, ) - device = "cpu" if to_cpu else None for task in preds: task_specific_norm = self.task_norms[task] if self.task_norms is not None else None if hasattr(task_specific_norm, "normalize_val_test"): @@ -373,55 +386,18 @@ def _general_step(self, batch: Dict[str, Tensor], step_name: str, to_cpu: bool) # if normalize_val_test is true, no denormalization is applied, all losses and metrics are normalized version preds[task] = task_specific_norm.denormalize(preds[task]) targets_dict[task] = task_specific_norm.denormalize(targets_dict[task]) - preds[task] = preds[task].detach().to(device=device) - targets_dict[task] = targets_dict[task].detach().to(device=device) - if weights is not None: - weights = weights.detach().to(device=device) + preds[task] = preds[task].detach() + targets_dict[task] = targets_dict[task].detach() - self.task_epoch_summary.update(targets_dict, preds) + self.task_epoch_summary[step_name].update(targets_dict, preds) step_dict = {} step_dict["loss"] = loss step_dict["task_losses"] = task_losses return step_dict - def update_metrics(self, - preds: Dict[str, Tensor], - targets: Dict[str, Tensor], - step_name: str, - weights: Optional[Tensor]=None,) -> None: - r""" - Compute the loss using the specified loss function, and dealing with - the nans in the `targets`. - - Parameters: - preds: - Predicted values - - targets: - Target values - - step_name: - The name of the step ("train", "val", "test") - - weights: - No longer supported, will raise an error. - - """ - - if weights is not None: - raise NotImplementedError("Weights are no longer supported in the metrics") - - - # TODO!! - # Lost of changes from the `predictor_summaries.py` file, with `Summary.get_metrics_logs` computing the metrics at the end of an epoch. - - # DON'T FORGET TO RESET ALL METRICS!! - - - - def flag_step(self, batch: Dict[str, Tensor], step_name: str, to_cpu: bool) -> Dict[str, Any]: + def flag_step(self, batch: Dict[str, Tensor], step_name: Literal["train", "val", "test"]) -> Dict[str, Any]: r""" Perform adversarial data agumentation during one training step using FLAG. Paper: https://arxiv.org/abs/2010.09891 @@ -477,22 +453,22 @@ def flag_step(self, batch: Dict[str, Tensor], step_name: str, to_cpu: bool) -> D ) loss = loss / n_steps - device = "cpu" if to_cpu else None for key in preds.keys(): - preds[key] = preds[key].detach().to(device=device) - targets[key] = targets[key].detach().to(device=device) + preds[key] = preds[key].detach() + targets[key] = targets[key].detach() if weights is not None: - weights = weights.detach().to(device=device) + weights = weights.detach() step_dict = {} step_dict[f"loss/{step_name}"] = loss.detach().cpu() step_dict["loss"] = loss step_dict["task_losses"] = task_losses + self.task_epoch_summary[step_name].update(targets, preds) return step_dict def on_train_batch_start(self, batch: Any, batch_idx: int) -> Optional[int]: - # TODO: Initialize the `task_epoch_summary` for training + self.task_epoch_summary["train"].reset() self.train_batch_start_time = time.time() self.skip_log_train_metrics = (self.metrics_every_n_train_steps is None) or ( (batch_idx % self.metrics_every_n_train_steps) != 0 @@ -538,7 +514,9 @@ def on_train_batch_end(self, outputs, batch: Any, batch_idx: int) -> None: metrics_logs["train/batch_time"] = train_batch_time metrics_logs["train/batch_tput"] = tput - metrics_logs.update(self.task_epoch_summary["train"].compute()) + metrics_computed = self.task_epoch_summary["train"].compute() + self.task_epoch_summary["train"].reset() + metrics_logs.update(metrics_computed) # Log the metrics if self.logger is not None: @@ -546,50 +524,33 @@ def on_train_batch_end(self, outputs, batch: Any, batch_idx: int) -> None: metrics_logs, step=self.global_step ) # This is a pytorch lightning function call - def training_step(self, batch: Dict[str, Tensor], to_cpu: bool = True) -> Dict[str, Any]: + def training_step(self, batch: Dict[str, Tensor]) -> Dict[str, Any]: step_dict = None # Train using FLAG if self.flag_kwargs["n_steps"] > 0: - step_dict = self.flag_step(batch=batch, step_name="train", to_cpu=to_cpu) + step_dict = self.flag_step(batch=batch, step_name="train") # Train normally, without using FLAG elif self.flag_kwargs["n_steps"] == 0: - # step_dict = self._general_step(batch=batch, step_name="train", to_cpu=True) - step_dict = self._general_step(batch=batch, step_name="train", to_cpu=to_cpu) + # step_dict = self._general_step(batch=batch, step_name="train") + step_dict = self._general_step(batch=batch, step_name="train") - # Remove the preds and targets if no logging is required - if self.skip_log_train_metrics: - step_dict.pop("preds") - step_dict.pop("targets") return step_dict # Returning the metrics_logs with the loss - def validation_step(self, batch: Dict[str, Tensor], to_cpu: bool = True) -> Dict[str, Any]: - return self._general_step(batch=batch, step_name="val", to_cpu=to_cpu) + def validation_step(self, batch: Dict[str, Tensor]) -> Dict[str, Any]: + return self._general_step(batch=batch, step_name="val") - def test_step(self, batch: Dict[str, Tensor], to_cpu: bool = True) -> Dict[str, Any]: - return self._general_step(batch=batch, step_name="test", to_cpu=to_cpu) + def test_step(self, batch: Dict[str, Tensor]) -> Dict[str, Any]: + return self._general_step(batch=batch, step_name="test") - def _general_epoch_end(self, outputs: Dict[str, Any], step_name: str, device: str) -> None: + def _general_epoch_end(self, step_name: Literal["train", "val", "test"]) -> Dict[str, Tensor]: r"""Common code for training_epoch_end, validation_epoch_end and testing_epoch_end""" # Transform the list of dict of dict, into a dict of list of dict - preds = {} - targets = {} - for task in self.tasks: - preds[task] = torch.cat([out["preds"][task].to(device) for out in outputs], dim=0) - targets[task] = torch.cat([out["targets"][task].to(device) for out in outputs], dim=0) - - self.task_epoch_summary.update( - step_name=step_name, - preds=preds, - targets=targets, - loss=loss, - task_losses=task_losses, - n_epochs=self.current_epoch, - ) - metrics_logs = self.task_epoch_summary.get_metrics_logs() - - return metrics_logs # Consider returning concatenated dict for logging + + metric_logs = self.task_epoch_summary[step_name].compute() + self.task_epoch_summary[step_name].reset() + return metric_logs def on_train_epoch_start(self) -> None: self.epoch_start_time = time.time() @@ -605,6 +566,7 @@ def on_train_epoch_end(self) -> None: def on_validation_epoch_start(self) -> None: self.mean_val_time_tracker.reset() self.mean_val_tput_tracker.reset() + self.task_epoch_summary["val"].reset() return super().on_validation_epoch_start() def on_validation_batch_start(self, batch: Any, batch_idx: int, dataloader_idx: int = 0) -> None: @@ -612,34 +574,27 @@ def on_validation_batch_start(self, batch: Any, batch_idx: int, dataloader_idx: return super().on_validation_batch_start(batch, batch_idx, dataloader_idx) def on_validation_batch_end( - self, outputs: Any, batch: Any, batch_idx: int, dataloader_idx: int = 0 + self, outputs, batch: Any, batch_idx: int, dataloader_idx: int = 0 ) -> None: val_batch_time = time.time() - self.validation_batch_start_time - self.validation_step_outputs.append(outputs) self.mean_val_time_tracker.update(val_batch_time) num_graphs = self.get_num_graphs(batch["features"]) self.mean_val_tput_tracker.update(num_graphs / val_batch_time) return super().on_validation_batch_end(outputs, batch, batch_idx, dataloader_idx) def on_validation_epoch_end(self) -> None: - metrics_logs = self._general_epoch_end( - outputs=self.validation_step_outputs, step_name="val", device="cpu" - ) - self.validation_step_outputs.clear() - # TODO: Use the update and compute, rather than the old logic! Make sure to reset the metrics - aggregated_metrics_logs = self.task_epoch_summary.aggregate_metrics_logs(metrics_logs) - aggregated_metrics_logs["val/mean_time"] = torch.tensor(self.mean_val_time_tracker.mean_value) - aggregated_metrics_logs["val/mean_tput"] = self.mean_val_tput_tracker.mean_value - self.log_dict(aggregated_metrics_logs, sync_dist=True) + metrics_logs = self._general_epoch_end(step_name="val") + metrics_logs["val/mean_time"] = torch.tensor(self.mean_val_time_tracker.mean_value) + metrics_logs["val/mean_tput"] = self.mean_val_tput_tracker.mean_value + self.log_dict(metrics_logs, sync_dist=True) - def on_test_batch_end(self, outputs: Any, batch: Any, batch_idx: int, dataloader_idx: int = 0) -> None: - self.test_step_outputs.append(outputs) + def on_test_epoch_start(self) -> None: + self.task_epoch_summary["test"].reset() + return super().on_test_epoch_start() def on_test_epoch_end(self) -> None: - metrics_logs = self._general_epoch_end(outputs=self.test_step_outputs, step_name="test", device="cpu") - self.test_step_outputs.clear() + metrics_logs = self._general_epoch_end(step_name="test") aggregated_metrics_logs = self.task_epoch_summary.aggregate_metrics_logs(metrics_logs) - self.log_dict(aggregated_metrics_logs, sync_dist=True) def on_train_start(self): diff --git a/graphium/trainer/predictor_summaries.py b/graphium/trainer/predictor_summaries.py index a22a0bf45..89bf0c1dd 100644 --- a/graphium/trainer/predictor_summaries.py +++ b/graphium/trainer/predictor_summaries.py @@ -35,12 +35,15 @@ def update(self, targets: Tensor, preds: Tensor) -> None: def compute(self, **kwargs) -> Tensor: raise NotImplementedError() + + def reset(self) -> None: + raise NotImplementedError() class SingleTaskSummary(SummaryInterface): def __init__( self, - metrics: Dict[str, Callable], + metrics: Dict[str, Union[Metric, "MetricWrapper"]], step_name: str, metrics_on_training_set: Optional[List[str]] = None, metrics_on_progress_bar: Optional[List[str]] = None, @@ -177,6 +180,13 @@ def compute(self) -> Dict[str, Tensor]: self._cached_metrics = computed_metrics return computed_metrics + + def reset(self) -> None: + r""" + reset the state of the metrics + """ + for metric in self.metrics.values(): + metric.reset() def get_results_on_progress_bar( self, @@ -208,10 +218,10 @@ def metric_log_name(self, metric_name): class MultiTaskSummary(SummaryInterface): def __init__( self, - task_metrics: Dict[str, Dict[str, Callable]], + task_metrics: Dict[str, Dict[str, Union[Metric, "MetricWrapper"]]], step_name: str, - task_metrics_on_training_set: Optional[Dict[str, List[str]]], - task_metrics_on_progress_bar: Optional[Dict[str, List[str]]], + task_metrics_on_training_set: Optional[Dict[str, List[str]]] = None, + task_metrics_on_progress_bar: Optional[Dict[str, List[str]]] = None, ): r""" class to store the summaries of the tasks @@ -219,8 +229,8 @@ class to store the summaries of the tasks """ self.task_metrics = task_metrics - self.task_metrics_on_progress_bar = task_metrics_on_progress_bar - self.task_metrics_on_training_set = task_metrics_on_training_set + self.task_metrics_on_progress_bar = task_metrics_on_progress_bar if task_metrics_on_progress_bar is not None else {} + self.task_metrics_on_training_set = task_metrics_on_training_set if task_metrics_on_training_set is not None else {} # Initialize all the single-task summaries self.tasks = list(task_metrics.keys()) @@ -275,6 +285,13 @@ def compute(self) -> Dict[str, Tensor]: for task in self.tasks: computed_metrics.update(self.task_summaries[task].compute()) return computed_metrics + + def reset(self) -> None: + r""" + reset the state of the metrics + """ + for task in self.tasks: + self.task_summaries[task].reset() class STDMetric(Metric): From b4524f99999676f1e5329b0b139931144085fbfd Mon Sep 17 00:00:00 2001 From: DomInvivo Date: Thu, 11 Jul 2024 17:22:13 -0400 Subject: [PATCH 067/175] Fix the std metric. Still needs testing. --- graphium/trainer/predictor_summaries.py | 23 +++++++++++++++++++---- 1 file changed, 19 insertions(+), 4 deletions(-) diff --git a/graphium/trainer/predictor_summaries.py b/graphium/trainer/predictor_summaries.py index 89bf0c1dd..ab9cfdacb 100644 --- a/graphium/trainer/predictor_summaries.py +++ b/graphium/trainer/predictor_summaries.py @@ -14,7 +14,7 @@ r"""Classes to store information about resulting evaluation metrics when using a Predictor Module.""" -from typing import Any, Callable, Dict, List, Optional, Union +from typing import Any, Callable, Dict, List, Optional, Union, Literal from loguru import logger from copy import deepcopy @@ -22,6 +22,7 @@ import torch from torch import Tensor from torchmetrics import MeanMetric, Metric +from torchmetrics.aggregation import BaseAggregator from graphium.utils.tensor import nan_mean, nan_std, nan_median, tensor_fp16_to_fp32 @@ -294,13 +295,27 @@ def reset(self) -> None: self.task_summaries[task].reset() -class STDMetric(Metric): +class STDMetric(BaseAggregator): """ A metric to compute the standard deviation of the predictions or targets. Based on `torchmetrics.Metric`, with a similar implementation to `torchmetric.MeanMetric`. + + Parameters: + nan_strategy: options: + - ``'error'``: if any `nan` values are encountered will give a RuntimeError + - ``'warn'``: if any `nan` values are encountered will give a warning and continue + - ``'ignore'``: all `nan` values are silently removed + - a float: if a float is provided will impute any `nan` values with this value + """ - def __init__(self, dist_sync_on_step=False): - super().__init__(dist_sync_on_step=dist_sync_on_step) + def __init__(self, nan_strategy: Union[Literal["error", "warn", "ignore"], float], **kwargs): + super().__init__( + "sum", + default_value=torch.tensor(0.0, dtype=torch.get_default_dtype()), + nan_strategy=nan_strategy, + state_name="mean_value", + **kwargs, + ) self.add_state("sum", default=torch.tensor(0.0), dist_reduce_fx="sum") self.add_state("sum_of_squares", default=torch.tensor(0.0), dist_reduce_fx="sum") self.add_state("total_weight", default=torch.tensor(0.0), dist_reduce_fx="sum") From 5040c473720379a45d82f96cdead031e2b619415 Mon Sep 17 00:00:00 2001 From: DomInvivo Date: Thu, 11 Jul 2024 17:40:49 -0400 Subject: [PATCH 068/175] fixed all errors arising in `test_finetuning.py` --- graphium/trainer/metrics.py | 6 +++--- graphium/trainer/predictor_summaries.py | 7 ++++--- 2 files changed, 7 insertions(+), 6 deletions(-) diff --git a/graphium/trainer/metrics.py b/graphium/trainer/metrics.py index 3e4493da1..f48d1f425 100644 --- a/graphium/trainer/metrics.py +++ b/graphium/trainer/metrics.py @@ -383,7 +383,7 @@ def compute(self) -> Tensor: r""" Compute the metric with the method `self.compute` """ - if self.multitask_handling == "mean-per-label": + if isinstance(self.metric, list): metrics = [metric.compute() for metric in self.metric] return nan_mean(torch.stack(metrics)) @@ -402,7 +402,7 @@ def reset(self): r""" Reset the metric with the method `self.metric.reset` """ - if self.multitask_handling == "mean-per-label": + if isinstance(self.metric, list): for metric in self.metric: metric.reset() else: @@ -435,7 +435,7 @@ def __repr__(self): r""" Control how the class is printed """ - full_str = f"{self.metric.__name__}" + full_str = f"{self.metric.__repr__()}" if self.thresholder is not None: full_str += f"({self.thresholder})" diff --git a/graphium/trainer/predictor_summaries.py b/graphium/trainer/predictor_summaries.py index ab9cfdacb..336c529b0 100644 --- a/graphium/trainer/predictor_summaries.py +++ b/graphium/trainer/predictor_summaries.py @@ -91,8 +91,8 @@ def __init__( self.metrics["grad_norm"] = GradientNormMetric() # Parse the metrics filters - metrics_on_training_set = self._parse_metrics_filter(metrics_on_training_set) - metrics_on_progress_bar = self._parse_metrics_filter(metrics_on_progress_bar) + self.metrics_on_training_set = self._parse_metrics_filter(metrics_on_training_set) + self.metrics_on_progress_bar = self._parse_metrics_filter(metrics_on_progress_bar) self._cached_metrics: Dict[str, Tensor] = {} @@ -157,8 +157,9 @@ def _compute(self, metrics_to_use: Optional[Union[List[str], Dict[str, Any]]] = # Compute the metrics computed_metrics = {} - for metric_key, metric_obj in metrics_to_use: + for metric_key in metrics_to_use: metric_name = self.metric_log_name(metric_key) + metric_obj = self.metrics[metric_key] try: computed_metrics[f"{self.step_name}/{metric_name}"] = metric_obj.compute() except Exception as e: From e761e08399406a4e7bf22856cab870d50ee327b9 Mon Sep 17 00:00:00 2001 From: DomInvivo Date: Fri, 12 Jul 2024 11:04:15 -0400 Subject: [PATCH 069/175] Fixed the `test_training.py` unit test --- expts/hydra-configs/tasks/loss_metrics_datamodule/toymix.yaml | 2 ++ graphium/trainer/predictor.py | 3 +-- tests/test_training.py | 3 +-- 3 files changed, 4 insertions(+), 4 deletions(-) diff --git a/expts/hydra-configs/tasks/loss_metrics_datamodule/toymix.yaml b/expts/hydra-configs/tasks/loss_metrics_datamodule/toymix.yaml index 9ac744a52..1a979a2fa 100644 --- a/expts/hydra-configs/tasks/loss_metrics_datamodule/toymix.yaml +++ b/expts/hydra-configs/tasks/loss_metrics_datamodule/toymix.yaml @@ -44,6 +44,7 @@ metrics: target_to_int: True num_classes: 2 average: micro + task: binary threshold_kwargs: &threshold_05 operator: greater threshold: 0.5 @@ -53,6 +54,7 @@ metrics: metric: precision multitask_handling: mean-per-label average: micro + task: binary threshold_kwargs: *threshold_05 zinc: *qm9_metrics diff --git a/graphium/trainer/predictor.py b/graphium/trainer/predictor.py index fd36709f6..bdd99987a 100644 --- a/graphium/trainer/predictor.py +++ b/graphium/trainer/predictor.py @@ -594,8 +594,7 @@ def on_test_epoch_start(self) -> None: def on_test_epoch_end(self) -> None: metrics_logs = self._general_epoch_end(step_name="test") - aggregated_metrics_logs = self.task_epoch_summary.aggregate_metrics_logs(metrics_logs) - self.log_dict(aggregated_metrics_logs, sync_dist=True) + self.log_dict(metrics_logs, sync_dist=True) def on_train_start(self): hparams_log = deepcopy(self.hparams) diff --git a/tests/test_training.py b/tests/test_training.py index 3ac31fc35..aeec93689 100644 --- a/tests/test_training.py +++ b/tests/test_training.py @@ -19,8 +19,7 @@ import os from unittest.mock import patch - -class TestCLITraining: +class test_CLITraining(): @classmethod def setup_class(cls): print("Setting up the test class...") From 5d60fbf8ca590e9cdd1ec19d70574af82ca1f528 Mon Sep 17 00:00:00 2001 From: DomInvivo Date: Fri, 12 Jul 2024 11:05:32 -0400 Subject: [PATCH 070/175] Standardized the test names --- tests/test_data_utils.py | 2 +- tests/test_datamodule.py | 2 +- tests/test_dataset.py | 2 +- tests/test_finetuning.py | 6 +++--- tests/test_ipu_dataloader.py | 4 ++-- tests/test_ipu_to_dense_batch.py | 2 +- tests/test_loaders.py | 2 +- tests/test_multitask_datamodule.py | 2 +- tests/test_utils.py | 2 +- 9 files changed, 12 insertions(+), 12 deletions(-) diff --git a/tests/test_data_utils.py b/tests/test_data_utils.py index 6b73110de..961cef5a7 100644 --- a/tests/test_data_utils.py +++ b/tests/test_data_utils.py @@ -18,7 +18,7 @@ import tempfile -class TestDataUtils(ut.TestCase): +class test_DataUtils(ut.TestCase): def test_list_datasets( self, ): diff --git a/tests/test_datamodule.py b/tests/test_datamodule.py index 824b80d50..a9ed6b045 100644 --- a/tests/test_datamodule.py +++ b/tests/test_datamodule.py @@ -25,7 +25,7 @@ TEMP_CACHE_DATA_PATH = "tests/temp_cache_0000" -class Test_DataModule(ut.TestCase): +class test_DataModule(ut.TestCase): def test_ogb_datamodule(self): # other datasets are too large to be tested dataset_names = ["ogbg-molhiv", "ogbg-molpcba", "ogbg-moltox21", "ogbg-molfreesolv"] diff --git a/tests/test_dataset.py b/tests/test_dataset.py index 4a7173244..0a377c35a 100644 --- a/tests/test_dataset.py +++ b/tests/test_dataset.py @@ -20,7 +20,7 @@ from graphium.data.utils import get_keys -class Test_Multitask_Dataset(ut.TestCase): +class test_Multitask_Dataset(ut.TestCase): # Then we can choose different rows and columns for the tests as we see fit. # Remember tests are supposed to be FAST, and reading from the file system multiple times slows things down. diff --git a/tests/test_finetuning.py b/tests/test_finetuning.py index 52484c4c9..8d80b8f34 100644 --- a/tests/test_finetuning.py +++ b/tests/test_finetuning.py @@ -40,7 +40,7 @@ os.chdir(MAIN_DIR) -class Test_Finetuning(ut.TestCase): +class test_Finetuning(ut.TestCase): def test_finetuning_from_task_head(self): # Skip test if PyTDC package not installed try: @@ -149,7 +149,7 @@ def test_finetuning_from_task_head(self): ################################################# # Define test callback that checks for correct (un)freezing - class TestCallback(Callback): + class test_Callback(Callback): def __init__(self, cfg): super().__init__() @@ -335,7 +335,7 @@ def test_finetuning_from_gnn(self): ################################################# # Define test callback that checks for correct (un)freezing - class TestCallback(Callback): + class test_Callback(Callback): def __init__(self, cfg): super().__init__() diff --git a/tests/test_ipu_dataloader.py b/tests/test_ipu_dataloader.py index 39f754e20..29a9c57c4 100644 --- a/tests/test_ipu_dataloader.py +++ b/tests/test_ipu_dataloader.py @@ -47,7 +47,7 @@ def global_batch_collator(batch_size, batches): @pytest.mark.ipu class test_DataLoading(ut.TestCase): - class TestSimpleLightning(LightningModule): + class test_SimpleLightning(LightningModule): # Create a basic Ligthning for testing the batch sizes def __init__(self, batch_size, node_feat_size, edge_feat_size, num_batch) -> None: super().__init__() @@ -98,7 +98,7 @@ def assert_shapes(self, batch, batch_idx, step): def configure_optimizers(self): return torch.optim.Adam(self.parameters(), lr=1e-3) - class TestDataset(torch.utils.data.Dataset): + class test_Dataset(torch.utils.data.Dataset): # Create a simple dataset for testing the Lightning integration def __init__(self, labels, node_features, edge_features): self.labels = labels diff --git a/tests/test_ipu_to_dense_batch.py b/tests/test_ipu_to_dense_batch.py index f2e29537b..157b3b647 100644 --- a/tests/test_ipu_to_dense_batch.py +++ b/tests/test_ipu_to_dense_batch.py @@ -24,7 +24,7 @@ @pytest.mark.ipu -class TestIPUBatch: +class test_IPUBatch: @pytest.fixture(autouse=True) def setup_class(self): self.in_dim = 12 diff --git a/tests/test_loaders.py b/tests/test_loaders.py index 22611f32f..d4bfbc993 100644 --- a/tests/test_loaders.py +++ b/tests/test_loaders.py @@ -17,7 +17,7 @@ import unittest as ut -class TestLoader(ut.TestCase): +class test_Loader(ut.TestCase): def test_merge_dicts(self): dict_a = {"a": {"b": {"c": 1, "d": 2}, "e": 3}, "f": 4} diff --git a/tests/test_multitask_datamodule.py b/tests/test_multitask_datamodule.py index b8d2119e1..81b5188df 100644 --- a/tests/test_multitask_datamodule.py +++ b/tests/test_multitask_datamodule.py @@ -23,7 +23,7 @@ import graphium -class Test_Multitask_DataModule(ut.TestCase): +class test_Multitask_DataModule(ut.TestCase): def setUp(self): # Create a temporary directory self.tmp_test_dir = tempfile.mkdtemp() diff --git a/tests/test_utils.py b/tests/test_utils.py index b6a7b171c..d0225a999 100644 --- a/tests/test_utils.py +++ b/tests/test_utils.py @@ -205,7 +205,7 @@ def test_safe_run(self): print("This is not an error") -class TestTensorFp16ToFp32(ut.TestCase): +class test_TensorFp16ToFp32(ut.TestCase): def test_tensor_fp16_to_fp32(self): # Create a tensor tensor = torch.randn(10, 10).half() From b59428a53979aaf95dc345927f37a5a41b6fd04e Mon Sep 17 00:00:00 2001 From: DomInvivo Date: Fri, 12 Jul 2024 12:24:06 -0400 Subject: [PATCH 071/175] Fixed some unit-tests that were broken by previous changes --- tests/test_finetuning.py | 8 +- tests/test_ipu_dataloader.py | 4 +- tests/test_positional_encoders.py | 137 +++++++++--------------------- 3 files changed, 47 insertions(+), 102 deletions(-) diff --git a/tests/test_finetuning.py b/tests/test_finetuning.py index 8d80b8f34..0fd006b38 100644 --- a/tests/test_finetuning.py +++ b/tests/test_finetuning.py @@ -149,7 +149,7 @@ def test_finetuning_from_task_head(self): ################################################# # Define test callback that checks for correct (un)freezing - class test_Callback(Callback): + class CallbackTesting(Callback): def __init__(self, cfg): super().__init__() @@ -223,7 +223,7 @@ def on_train_epoch_start(self, trainer, pl_module): trainer.callbacks.append(GraphFinetuning(**finetuning_training_kwargs)) # Add test callback to trainer - trainer.callbacks.append(TestCallback(cfg)) + trainer.callbacks.append(CallbackTesting(cfg)) predictor.set_max_nodes_edges_per_graph(datamodule, stages=["train", "val"]) @@ -335,7 +335,7 @@ def test_finetuning_from_gnn(self): ################################################# # Define test callback that checks for correct (un)freezing - class test_Callback(Callback): + class CallbackTesting(Callback): def __init__(self, cfg): super().__init__() @@ -398,7 +398,7 @@ def on_train_epoch_start(self, trainer, pl_module): trainer.callbacks.append(GraphFinetuning(**finetuning_training_kwargs)) # Add test callback to trainer - trainer.callbacks.append(TestCallback(cfg)) + trainer.callbacks.append(CallbackTesting(cfg)) predictor.set_max_nodes_edges_per_graph(datamodule, stages=["train", "val"]) diff --git a/tests/test_ipu_dataloader.py b/tests/test_ipu_dataloader.py index 29a9c57c4..2b018206e 100644 --- a/tests/test_ipu_dataloader.py +++ b/tests/test_ipu_dataloader.py @@ -47,7 +47,7 @@ def global_batch_collator(batch_size, batches): @pytest.mark.ipu class test_DataLoading(ut.TestCase): - class test_SimpleLightning(LightningModule): + class SimpleLightningTesting(LightningModule): # Create a basic Ligthning for testing the batch sizes def __init__(self, batch_size, node_feat_size, edge_feat_size, num_batch) -> None: super().__init__() @@ -98,7 +98,7 @@ def assert_shapes(self, batch, batch_idx, step): def configure_optimizers(self): return torch.optim.Adam(self.parameters(), lr=1e-3) - class test_Dataset(torch.utils.data.Dataset): + class DatasetTesting(torch.utils.data.Dataset): # Create a simple dataset for testing the Lightning integration def __init__(self, labels, node_features, edge_features): self.labels = labels diff --git a/tests/test_positional_encoders.py b/tests/test_positional_encoders.py index 66148487f..166929ba2 100644 --- a/tests/test_positional_encoders.py +++ b/tests/test_positional_encoders.py @@ -1,12 +1,12 @@ """ -------------------------------------------------------------------------------- -Copyright (c) 2023 Valence Labs, Recursion Pharmaceuticals, Graphcore Limited, and NVIDIA Corporation & Affiliates. +Copyright (c) 2023 Valence Labs, Recursion Pharmaceuticals and Graphcore Limited. Use of this software is subject to the terms and conditions outlined in the LICENSE file. Unauthorized modification, distribution, or use is prohibited. Provided 'as is' without warranties of any kind. -Valence Labs, Recursion Pharmaceuticals, Graphcore Limited, and NVIDIA Corporation & Affiliates are not liable for any damages arising from its use. +Valence Labs, Recursion Pharmaceuticals and Graphcore Limited are not liable for any damages arising from its use. Refer to the LICENSE file for the full terms and conditions. -------------------------------------------------------------------------------- """ @@ -18,40 +18,19 @@ import numpy as np import unittest as ut +from copy import deepcopy from rdkit import Chem import datamol as dm import torch -from torch_geometric.data import Data - -import graphium -import graphium_cpp +from scipy.sparse import coo_matrix +from graphium.features.featurizer import GraphDict +from graphium.features.positional_encoding import graph_positional_encoder from graphium.nn.encoders import laplace_pos_encoder, mlp_encoder, signnet_pos_encoder - # TODO: Test the MLP_encoder and signnet_pos_encoder -def get_pe_tensors(smiles, pos_encoding_tensor): - tensors, _, _ = graphium_cpp.featurize_smiles( - smiles, - torch.tensor(data=[], dtype=torch.int64), # atom_property_list_onehot - torch.tensor(data=[], dtype=torch.int64), # atom_property_list_float - False, # has_conformer - torch.tensor(data=[], dtype=torch.int64), # edge_property_list - pos_encoding_tensor, - True, # duplicate_edges - False, # add_self_loop - False, # explicit_H=False - False, # use_bonds_weights - True, # offset_carbon - 7, # torch float64 - 0, # mask_nan_style_int - 0, # mask_nan_value - ) - return tensors - - class test_positional_encoder(ut.TestCase): smiles = [ "C", @@ -65,34 +44,22 @@ class test_positional_encoder(ut.TestCase): adjs = [Chem.rdmolops.GetAdjacencyMatrix(mol) for mol in mols] def test_laplacian_eigvec_eigval(self): - for ii, mol in enumerate(self.smiles): - adj = self.adjs[ii] + for ii, adj in enumerate(deepcopy(self.adjs)): for num_pos in [1, 2, 4]: # Can't test too much eigs because of multiplicities for disconnected_comp in [True, False]: err_msg = f"adj_id={ii}, num_pos={num_pos}, disconnected_comp={disconnected_comp}" - features = { - "laplacian_eigval": { - "pos_type": "laplacian_eigval", - "num_pos": num_pos, - "disconnected_comp": disconnected_comp, - "pos_level": "node", - }, - "laplacian_eigvec": { - "pos_type": "laplacian_eigvec", - "num_pos": num_pos, - "disconnected_comp": disconnected_comp, - "pos_level": "node", - }, + # returns a dictionary of computed pe + pos_kwargs = { + "pos_type": "laplacian_eigvec", + "num_pos": num_pos, + "disconnected_comp": disconnected_comp, + "pos_level": "node", } - ( - pos_encoding_names, - pos_encoding_tensor, - ) = graphium_cpp.positional_feature_options_to_tensor(features) - - tensors = get_pe_tensors(mol, pos_encoding_tensor) - eigvals = tensors[4] - eigvecs = tensors[5] + num_nodes = adj.shape[0] + eigvecs, cache = graph_positional_encoder(adj, num_nodes, pos_kwargs=pos_kwargs) + pos_kwargs["pos_type"] = "laplacian_eigval" + eigvals, cache = graph_positional_encoder(adj, num_nodes, pos_kwargs=pos_kwargs) self.assertEqual(list(eigvecs.shape), [adj.shape[0], num_pos], msg=err_msg) self.assertEqual(list(eigvals.shape), [adj.shape[0], num_pos], msg=err_msg) @@ -107,10 +74,7 @@ def test_laplacian_eigvec_eigval(self): true_num_pos = min(num_pos, len(true_eigvals)) true_eigvals, true_eigvecs = true_eigvals[:true_num_pos], true_eigvecs[:, :true_num_pos] - if not ("." in mol): - print( - f"About to test eigvecs for smiles {mol}, num_pos {num_pos}, disconnected_comp {disconnected_comp}" - ) + if not ("." in self.smiles[ii]): np.testing.assert_array_almost_equal( np.abs(true_eigvecs), np.abs(eigvecs[:, :true_num_pos]), @@ -124,22 +88,13 @@ def test_laplacian_eigvec_eigval(self): # didn't actually check the exact computation result because the code was adapted def test_rwse(self): - for ii, mol in enumerate(self.smiles): - adj = self.adjs[ii] + for ii, adj in enumerate(deepcopy(self.adjs)): for ksteps in [1, 2, 4]: err_msg = f"adj_id={ii}, ksteps={ksteps}" num_nodes = adj.shape[0] pos_kwargs = {"pos_type": "rw_return_probs", "ksteps": ksteps, "pos_level": "node"} - features = { - "rw_return_probs": pos_kwargs, - } - (pos_encoding_names, pos_encoding_tensor) = graphium_cpp.positional_feature_options_to_tensor( - features - ) - tensors = get_pe_tensors(mol, pos_encoding_tensor) - rwse_embed = tensors[4] - + rwse_embed, cache = graph_positional_encoder(adj, num_nodes, pos_kwargs=pos_kwargs) self.assertEqual(list(rwse_embed.shape), [num_nodes, ksteps], msg=err_msg) # TODO: work in progress @@ -150,32 +105,23 @@ def test_rwse(self): """ def test_laplacian_eigvec_with_encoder(self): - for ii, mol in enumerate(self.smiles): + for ii, adj in enumerate(deepcopy(self.adjs)): for num_pos in [2, 4, 8]: # Can't test too much eigs because of multiplicities for disconnected_comp in [True, False]: for model_type in ["Transformer", "DeepSet", "MLP"]: err_msg = f"adj_id={ii}, num_pos={num_pos}, disconnected_comp={disconnected_comp}" - features = { - "laplacian_eigval": { - "pos_type": "laplacian_eigval", - "num_pos": num_pos, - "disconnected_comp": disconnected_comp, - "pos_level": "node", - }, - "laplacian_eigvec": { - "pos_type": "laplacian_eigvec", - "num_pos": num_pos, - "disconnected_comp": disconnected_comp, - "pos_level": "node", - }, + # returns a dictionary of computed pe + pos_kwargs = { + "pos_type": "laplacian_eigvec", + "num_pos": num_pos, + "disconnected_comp": disconnected_comp, + "pos_level": "node", } - ( - pos_encoding_names, - pos_encoding_tensor, - ) = graphium_cpp.positional_feature_options_to_tensor(features) - - tensors = get_pe_tensors(mol, pos_encoding_tensor) + num_nodes = adj.shape[0] + eigvecs, cache = graph_positional_encoder(adj, num_nodes, pos_kwargs=pos_kwargs) + pos_kwargs["pos_type"] = "laplacian_eigval" + eigvals, cache = graph_positional_encoder(adj, num_nodes, pos_kwargs=pos_kwargs) input_keys = ["laplacian_eigvec", "laplacian_eigval"] in_dim = num_pos @@ -183,17 +129,16 @@ def test_laplacian_eigvec_with_encoder(self): out_dim = 64 num_layers = 1 - num_nodes = tensors[2].size(0) - data_dict = { - # "feat": tensors[2], - # "edge_feat": tensors[3], - "laplacian_eigval": tensors[4].float(), - "laplacian_eigvec": tensors[5].float(), - } - # Create the PyG graph object `Data` - data = Data( - edge_index=tensors[0], edge_weight=tensors[1], num_nodes=num_nodes, **data_dict + eigvecs = torch.from_numpy(eigvecs) + eigvals = torch.from_numpy(eigvals) + + g = GraphDict( + { + "adj": coo_matrix(adj), + "data": {"laplacian_eigval": eigvals, "laplacian_eigvec": eigvecs}, + } ) + batch = g.make_pyg_graph() encoder = laplace_pos_encoder.LapPENodeEncoder( input_keys=input_keys, @@ -208,7 +153,7 @@ def test_laplacian_eigvec_with_encoder(self): first_normalization=None, ) - hidden_embed = encoder(data, key_prefix=None) + hidden_embed = encoder(batch, key_prefix=None) assert "node" in hidden_embed.keys() self.assertEqual(list(hidden_embed["node"].shape), [num_nodes, out_dim], msg=err_msg) From 632d4dcbf03a2f6a5161b45b23ed317e305d3a7c Mon Sep 17 00:00:00 2001 From: DomInvivo Date: Fri, 12 Jul 2024 14:56:52 -0400 Subject: [PATCH 072/175] Added `pytdc` to the tests --- .github/workflows/test.yml | 3 +++ tests/test_utils.py | 1 - 2 files changed, 3 insertions(+), 1 deletion(-) diff --git a/.github/workflows/test.yml b/.github/workflows/test.yml index 64846bd91..8bcda85f6 100644 --- a/.github/workflows/test.yml +++ b/.github/workflows/test.yml @@ -49,6 +49,9 @@ jobs: - name: Install library run: python -m pip install --no-deps -e . # `-e` required for correct `coverage` run. + - name: Install test dependencies + run: mamba install -c conda-forge pytdc # Required to run the `test_finetuning.py` + - name: Run tests run: pytest -m 'not ipu' diff --git a/tests/test_utils.py b/tests/test_utils.py index d0225a999..24c80ab2e 100644 --- a/tests/test_utils.py +++ b/tests/test_utils.py @@ -22,7 +22,6 @@ import unittest as ut import gzip -from graphium.utils.read_file import file_opener from graphium.utils.tensor import ( nan_mad, nan_mean, From 0fa2d86e1e63f4b361dc88ea2cb6a11a15246cac Mon Sep 17 00:00:00 2001 From: DomInvivo Date: Fri, 12 Jul 2024 15:00:34 -0400 Subject: [PATCH 073/175] Changed mamba install tdc to pip install, in the `test.yml` file --- .github/workflows/test.yml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/.github/workflows/test.yml b/.github/workflows/test.yml index 8bcda85f6..a51e5a8a4 100644 --- a/.github/workflows/test.yml +++ b/.github/workflows/test.yml @@ -50,7 +50,7 @@ jobs: run: python -m pip install --no-deps -e . # `-e` required for correct `coverage` run. - name: Install test dependencies - run: mamba install -c conda-forge pytdc # Required to run the `test_finetuning.py` + run: python -m pip install pytdc # Required to run the `test_finetuning.py` - name: Run tests run: pytest -m 'not ipu' From 2441f4326826f6bc296871e15938ff15fb7b8941 Mon Sep 17 00:00:00 2001 From: DomInvivo Date: Fri, 12 Jul 2024 15:01:22 -0400 Subject: [PATCH 074/175] Added '--no-deps' to TDC installation in `test.yml` --- .github/workflows/test.yml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/.github/workflows/test.yml b/.github/workflows/test.yml index a51e5a8a4..50d9886dc 100644 --- a/.github/workflows/test.yml +++ b/.github/workflows/test.yml @@ -50,7 +50,7 @@ jobs: run: python -m pip install --no-deps -e . # `-e` required for correct `coverage` run. - name: Install test dependencies - run: python -m pip install pytdc # Required to run the `test_finetuning.py` + run: python -m pip install pytdc --no-deps -e . # Required to run the `test_finetuning.py` - name: Run tests run: pytest -m 'not ipu' From 326b6e78b8202d88025fc20d74b0e286c588ed2e Mon Sep 17 00:00:00 2001 From: DomInvivo Date: Fri, 12 Jul 2024 15:02:58 -0400 Subject: [PATCH 075/175] Woops --- .github/workflows/test.yml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/.github/workflows/test.yml b/.github/workflows/test.yml index 50d9886dc..9d9c56edb 100644 --- a/.github/workflows/test.yml +++ b/.github/workflows/test.yml @@ -50,7 +50,7 @@ jobs: run: python -m pip install --no-deps -e . # `-e` required for correct `coverage` run. - name: Install test dependencies - run: python -m pip install pytdc --no-deps -e . # Required to run the `test_finetuning.py` + run: python -m pip install pytdc --no-deps # Required to run the `test_finetuning.py` - name: Run tests run: pytest -m 'not ipu' From 641fa37dd974157f14ee8748c2c0fb4aaabf75e3 Mon Sep 17 00:00:00 2001 From: DomInvivo Date: Fri, 12 Jul 2024 15:06:49 -0400 Subject: [PATCH 076/175] Fixed issue with building docs --- docs/api/graphium.ipu.md | 5 ----- 1 file changed, 5 deletions(-) diff --git a/docs/api/graphium.ipu.md b/docs/api/graphium.ipu.md index 2fdf82416..3943e78a4 100644 --- a/docs/api/graphium.ipu.md +++ b/docs/api/graphium.ipu.md @@ -22,11 +22,6 @@ Code for adapting to run on IPU ::: graphium.ipu.ipu_losses -## IPU Metrics ------------- -::: graphium.ipu.ipu_metrics - - ## IPU Simple Lightning ------------ ::: graphium.ipu.ipu_simple_lightning From 2b85dce3de0839705da207255fe0f2ff8989cb4a Mon Sep 17 00:00:00 2001 From: DomInvivo Date: Fri, 12 Jul 2024 15:11:21 -0400 Subject: [PATCH 077/175] Removed old file from breaking docs building --- docs/api/graphium.utils.md | 4 ---- 1 file changed, 4 deletions(-) diff --git a/docs/api/graphium.utils.md b/docs/api/graphium.utils.md index 5804a060e..632c6ea06 100644 --- a/docs/api/graphium.utils.md +++ b/docs/api/graphium.utils.md @@ -46,10 +46,6 @@ module for utility functions ::: graphium.utils.mup -## Read File ----------------- -::: graphium.utils.read_file - ## Safe Run ---------------- ::: graphium.utils.safe_run From 0c93a0f22ad0ec9055cbd721feb2f423121bf75b Mon Sep 17 00:00:00 2001 From: DomInvivo Date: Fri, 12 Jul 2024 15:26:40 -0400 Subject: [PATCH 078/175] Changed to micromamba to install pytdc --- .github/workflows/test.yml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/.github/workflows/test.yml b/.github/workflows/test.yml index 9d9c56edb..39d51baff 100644 --- a/.github/workflows/test.yml +++ b/.github/workflows/test.yml @@ -50,7 +50,7 @@ jobs: run: python -m pip install --no-deps -e . # `-e` required for correct `coverage` run. - name: Install test dependencies - run: python -m pip install pytdc --no-deps # Required to run the `test_finetuning.py` + run: micromamba install -c conda-forge pytdc # Required to run the `test_finetuning.py` - name: Run tests run: pytest -m 'not ipu' From ec235fcccb90658917772c6cbf19de9a6122d01d Mon Sep 17 00:00:00 2001 From: DomInvivo Date: Fri, 12 Jul 2024 17:06:35 -0400 Subject: [PATCH 079/175] Added tests for the `STDMetric` and `GradientNormMetric` and fixed their outputs --- graphium/trainer/predictor_summaries.py | 21 +++- tests/test_predictor_summaries.py | 156 ++++++++++++++++++++++++ 2 files changed, 171 insertions(+), 6 deletions(-) create mode 100644 tests/test_predictor_summaries.py diff --git a/graphium/trainer/predictor_summaries.py b/graphium/trainer/predictor_summaries.py index 336c529b0..4e8ee4851 100644 --- a/graphium/trainer/predictor_summaries.py +++ b/graphium/trainer/predictor_summaries.py @@ -302,6 +302,10 @@ class STDMetric(BaseAggregator): Based on `torchmetrics.Metric`, with a similar implementation to `torchmetric.MeanMetric`. Parameters: + correction: + The correction to apply to the standard deviation. Instead of dividing by number of samples `N`, + we divide by `N-correction`. + nan_strategy: options: - ``'error'``: if any `nan` values are encountered will give a RuntimeError - ``'warn'``: if any `nan` values are encountered will give a warning and continue @@ -309,7 +313,7 @@ class STDMetric(BaseAggregator): - a float: if a float is provided will impute any `nan` values with this value """ - def __init__(self, nan_strategy: Union[Literal["error", "warn", "ignore"], float], **kwargs): + def __init__(self, nan_strategy: Union[Literal["error", "warn", "ignore"], float]="warn", correction:int=0, **kwargs): super().__init__( "sum", default_value=torch.tensor(0.0, dtype=torch.get_default_dtype()), @@ -320,6 +324,7 @@ def __init__(self, nan_strategy: Union[Literal["error", "warn", "ignore"], float self.add_state("sum", default=torch.tensor(0.0), dist_reduce_fx="sum") self.add_state("sum_of_squares", default=torch.tensor(0.0), dist_reduce_fx="sum") self.add_state("total_weight", default=torch.tensor(0.0), dist_reduce_fx="sum") + self.correction = correction def update(self, value: Union[float, Tensor], weight: Union[float, Tensor] = 1.0) -> None: if not isinstance(value, Tensor): @@ -338,10 +343,12 @@ def update(self, value: Union[float, Tensor], weight: Union[float, Tensor] = 1.0 self.total_weight += weight.sum() def compute(self) -> Tensor: + dividor = max(0, self.total_weight - self.correction) mean = self.sum / self.total_weight mean_of_squares = self.sum_of_squares / self.total_weight variance = mean_of_squares - mean ** 2 - return torch.sqrt(variance) + variance_corr = variance * (self.total_weight / dividor) + return torch.sqrt(variance_corr) class GradientNormMetric(Metric): """ @@ -350,15 +357,17 @@ class GradientNormMetric(Metric): """ def __init__(self, dist_sync_on_step=False): super().__init__(dist_sync_on_step=dist_sync_on_step) - self.add_state("gradient_norm", default=torch.tensor(0.0), dist_reduce_fx="sum") + self.add_state("gradient_norm_sq", default=torch.tensor(0.0), dist_reduce_fx="sum") + self.add_state("total_steps", default=torch.tensor(0.0), dist_reduce_fx="sum") def update(self, model: torch.nn.Module) -> None: - grad_norm = torch.tensor(0.0) + total_norm = torch.tensor(0.0) for p in model.parameters(): if p.grad is not None: param_norm = p.grad.detach().data.norm(2) total_norm += param_norm.detach().cpu() ** 2 - self.gradient_norm_sq += grad_norm + self.gradient_norm_sq += total_norm + self.total_steps += 1 def compute(self) -> Tensor: - return self.gradient_norm_sq.sqrt() + return (self.gradient_norm_sq / self.total_steps).sqrt() diff --git a/tests/test_predictor_summaries.py b/tests/test_predictor_summaries.py new file mode 100644 index 000000000..05cff113e --- /dev/null +++ b/tests/test_predictor_summaries.py @@ -0,0 +1,156 @@ +""" +-------------------------------------------------------------------------------- +Copyright (c) 2023 Valence Labs, Recursion Pharmaceuticals and Graphcore Limited. + +Use of this software is subject to the terms and conditions outlined in the LICENSE file. +Unauthorized modification, distribution, or use is prohibited. Provided 'as is' without +warranties of any kind. + +Valence Labs, Recursion Pharmaceuticals and Graphcore Limited are not liable for any damages arising from its use. +Refer to the LICENSE file for the full terms and conditions. +-------------------------------------------------------------------------------- +""" + + +""" +Unit tests for the file graphium/trainer/predictor.py +""" + +import torch +from torch import nn +from torchmetrics import MeanAbsoluteError, PearsonCorrCoef +import unittest as ut + +from graphium.trainer.predictor_summaries import SingleTaskSummary, MultiTaskSummary, STDMetric, GradientNormMetric + + +class test_TaskSummary(ut.TestCase): + + def test_std_metric(self): + + # Generate random data + torch.random.manual_seed(42) + rand = torch.rand(100, 1) + + # Compute expected values for STD + expected_std = torch.std(rand, correction=0) + + # Compute std metric + std_metric = STDMetric() + std_metric.update(rand) + std_metric_val = std_metric.compute() + std_metric.reset() + + self.assertAlmostEqual(std_metric_val.item(), expected_std.item(), places=5) + + # Check multiple updates + std_metric.update(rand[:10]) + std_metric.update(rand[10:25]) + std_metric.update(rand[25:]) + std_metric_val = std_metric.compute() + std_metric.reset() + + self.assertAlmostEqual(std_metric_val.item(), expected_std.item(), places=5) + + # Add some correction + expected_std = torch.std(rand, correction=1) + std_metric = STDMetric(correction=1) + std_metric.update(rand) + std_metric_val = std_metric.compute() + std_metric.reset() + + self.assertAlmostEqual(std_metric_val.item(), expected_std.item(), places=5) + + # Add some nans + rand[[3, 5, 11, 23, 42, 56, 78, 99]] = float('nan') + expected_std = torch.std(rand[~rand.isnan()], correction=0) + + std_metric = STDMetric(nan_strategy='ignore', correction=0) + std_metric.update(rand) + std_metric_val = std_metric.compute() + std_metric.reset() + + self.assertAlmostEqual(std_metric_val.item(), expected_std.item(), places=5) + + def test_gradient_norm_metric(self): + # Define a simple neural network with 2 layers + class SimpleNN(nn.Module): + def __init__(self): + super(SimpleNN, self).__init__() + torch.random.manual_seed(42) + # Define the first layer with 10 input features and 5 output features + self.layer1 = nn.Linear(10, 5) + # Define the second layer with 5 input features and 1 output feature + self.layer2 = nn.Linear(5, 1) + + def forward(self, x): + # Pass the input through the first layer + x = torch.relu(self.layer1(x)) + # Pass the output of the first layer through the second layer + x = self.layer2(x) + return x + + # Create an instance of the neural network, optimizer and loss function + model = SimpleNN() + optimizer = torch.optim.SGD(model.parameters(), lr=0.01) + loss_fn = nn.MSELoss() + + # Generate random data + torch.random.manual_seed(42) + LEN = 10000 + inputs = torch.rand(LEN, 10) + targets = torch.rand(LEN, 1) + + # Compute expected values for gradient norm + optimizer.zero_grad() + outputs = model(inputs) + loss = loss_fn(outputs, targets) + loss.backward() + expected_grad_norm = torch.norm(torch.stack([torch.norm(param.grad) for param in model.parameters()])) + optimizer.zero_grad() + + # Compute gradient norm metric + grad_norm_metric = GradientNormMetric() + optimizer.zero_grad() + outputs = model(inputs) + loss = loss_fn(outputs, targets) + loss.backward() + grad_norm_metric.update(model) + grad_norm_metric_val = grad_norm_metric.compute() + grad_norm_metric.reset() + + self.assertAlmostEqual(grad_norm_metric_val.item(), expected_grad_norm.item(), places=5) + + # Compute gradient norm metric with many update steps + grad_norm_metric = GradientNormMetric() + optimizer.zero_grad() + outputs = model(inputs[:10]) + loss = loss_fn(outputs, targets[:10]) + loss.backward() + grad_norm_metric.update(model) + optimizer.zero_grad() + outputs = model(inputs[10:50]) + loss = loss_fn(outputs, targets[10:50]) + loss.backward() + grad_norm_metric.update(model) + optimizer.zero_grad() + outputs = model(inputs[50:300]) + loss = loss_fn(outputs, targets[50:300]) + loss.backward() + grad_norm_metric.update(model) + optimizer.zero_grad() + outputs = model(inputs[300:]) + loss = loss_fn(outputs, targets[300:]) + loss.backward() + grad_norm_metric.update(model) + + grad_norm_metric_val = grad_norm_metric.compute() + grad_norm_metric.reset() + + self.assertAlmostEqual(grad_norm_metric_val.item(), expected_grad_norm.item(), places=2) + + + + +if __name__ == "__main__": + ut.main() From 38d03e12d743813b260c6e8285ad5d188183c300 Mon Sep 17 00:00:00 2001 From: DomInvivo Date: Fri, 12 Jul 2024 18:01:03 -0400 Subject: [PATCH 080/175] Implemented test of MultiTaskSummaries. Only an error left for the mean and std of preds and targets --- graphium/trainer/predictor_summaries.py | 43 ++++++++--- tests/test_predictor_summaries.py | 95 +++++++++++++++++++++++++ 2 files changed, 129 insertions(+), 9 deletions(-) diff --git a/graphium/trainer/predictor_summaries.py b/graphium/trainer/predictor_summaries.py index 4e8ee4851..9038c4d79 100644 --- a/graphium/trainer/predictor_summaries.py +++ b/graphium/trainer/predictor_summaries.py @@ -49,6 +49,8 @@ def __init__( metrics_on_training_set: Optional[List[str]] = None, metrics_on_progress_bar: Optional[List[str]] = None, task_name: Optional[str] = None, + compute_mean: bool = True, + compute_std: bool = True, ): r""" A container to be used by the Predictor Module that stores the results for the given metrics on the predictions and targets provided. @@ -69,8 +71,16 @@ def __init__( task_name: name of the task (Default=`None`) + compute_mean: + whether to compute the mean of the predictions and targets + + compute_std: + whether to compute the standard deviation of the predictions and targets + """ self.step_name = step_name + if not isinstance(metrics, dict): + raise ValueError(f"metrics must be a dictionary. Got {type(metrics)}") self.metrics = deepcopy(metrics) # Current predictor state @@ -79,16 +89,14 @@ def __init__( self.logged_metrics_exceptions = [] # Track which metric exceptions have been logged # Add default metrics - if "mean_pred" not in self.metrics: + if ("mean_pred" not in self.metrics) and compute_mean: self.metrics["mean_pred"] = MeanMetric(nan_strategy="ignore") - if "mean_target" not in self.metrics: + if ("mean_target" not in self.metrics) and compute_mean: self.metrics["mean_target"] = MeanMetric(nan_strategy="ignore") - if "std_pred" not in self.metrics: + if ("std_pred" not in self.metrics) and compute_std: self.metrics["std_pred"] = STDMetric(nan_strategy="ignore") - if "std_target" not in self.metrics: + if ("std_target" not in self.metrics) and compute_std: self.metrics["std_target"] = STDMetric(nan_strategy="ignore") - if ("grad_norm" not in self.metrics) and (step_name == "train"): - self.metrics["grad_norm"] = GradientNormMetric() # Parse the metrics filters self.metrics_on_training_set = self._parse_metrics_filter(metrics_on_training_set) @@ -107,6 +115,8 @@ def _parse_metrics_filter(self, filter: Optional[Union[List[str], Dict[str, Any] filter = list(filter.keys()) elif isinstance(filter, list): filter = filter + elif isinstance(filter, str): + filter = [filter] else: raise ValueError(f"metrics_to_use must be a list or a dictionary. Got {type(filter)}") @@ -114,7 +124,7 @@ def _parse_metrics_filter(self, filter: Optional[Union[List[str], Dict[str, Any] all_metrics = set(self.metrics.keys()) filter = set(filter) if not filter.issubset(all_metrics): - raise ValueError(f"metrics_to_use must be a subset of the metrics. Got {filter - all_metrics}") + raise ValueError(f"metrics_to_use must be a subset of the metrics. Got {filter - all_metrics}, available {all_metrics}") return filter @@ -161,10 +171,10 @@ def _compute(self, metrics_to_use: Optional[Union[List[str], Dict[str, Any]]] = metric_name = self.metric_log_name(metric_key) metric_obj = self.metrics[metric_key] try: - computed_metrics[f"{self.step_name}/{metric_name}"] = metric_obj.compute() + computed_metrics[f"{metric_name}"] = metric_obj.compute() except Exception as e: # If the metric computation fails, return NaN and log a warning only once - computed_metrics[f"{self.step_name}/{metric_name}"] = torch.as_tensor(float("nan")) + computed_metrics[f"{metric_name}"] = torch.as_tensor(float("nan")) # Warn only if it's the first warning for that metric if metric_name not in self.logged_metrics_exceptions: self.logged_metrics_exceptions.append(metric_name) @@ -224,11 +234,20 @@ def __init__( step_name: str, task_metrics_on_training_set: Optional[Dict[str, List[str]]] = None, task_metrics_on_progress_bar: Optional[Dict[str, List[str]]] = None, + compute_mean: bool = True, + compute_std: bool = True, ): r""" class to store the summaries of the tasks Parameters: + + compute_mean: + whether to compute the mean of the predictions and targets + + compute_std: + whether to compute the standard deviation of the predictions and targets + """ self.task_metrics = task_metrics self.task_metrics_on_progress_bar = task_metrics_on_progress_bar if task_metrics_on_progress_bar is not None else {} @@ -244,6 +263,8 @@ class to store the summaries of the tasks metrics_on_training_set = self.task_metrics_on_training_set[task] if task in self.task_metrics_on_training_set else None, metrics_on_progress_bar = self.task_metrics_on_progress_bar[task] if task in self.task_metrics_on_progress_bar else None, task_name = task, + compute_mean = compute_mean, + compute_std = compute_std, ) def update(self, targets: Dict[str, Tensor], preds: Dict[str, Tensor]) -> None: @@ -354,6 +375,10 @@ class GradientNormMetric(Metric): """ A metric to compute the norm of the gradient. Based on `torchmetrics.Metric`. + + Warning: + This metric is not compatible with other metrics since it doesn't take + the predictions and targets as input. It takes the model as input. """ def __init__(self, dist_sync_on_step=False): super().__init__(dist_sync_on_step=dist_sync_on_step) diff --git a/tests/test_predictor_summaries.py b/tests/test_predictor_summaries.py index 05cff113e..51267d78d 100644 --- a/tests/test_predictor_summaries.py +++ b/tests/test_predictor_summaries.py @@ -19,6 +19,7 @@ import torch from torch import nn from torchmetrics import MeanAbsoluteError, PearsonCorrCoef +from copy import deepcopy import unittest as ut from graphium.trainer.predictor_summaries import SingleTaskSummary, MultiTaskSummary, STDMetric, GradientNormMetric @@ -149,8 +150,102 @@ def forward(self, x): self.assertAlmostEqual(grad_norm_metric_val.item(), expected_grad_norm.item(), places=2) + def assertDictTensorAlmostEqual(self, dict1, dict2, places=7): + dict1 = deepcopy(dict1) + dict2 = deepcopy(dict2) + for key in dict1.keys(): + dict1[key] = round(dict1[key].item(), places) + for key in dict2.keys(): + dict2[key] = round(dict2[key].item(), places) + self.assertDictEqual(dict1, dict2) + def test_multi_task_summary(self): + + # Generate random data + torch.random.manual_seed(42) + targets = torch.rand(100, 3) + preds = torch.rand(100, 3) + 0.4 * targets + targets = {f"task{i+1}": targets[:, i] for i in range(targets.shape[1])} + preds = {f"task{i+1}": preds[:, i] for i in range(preds.shape[1])} + + task_metrics = { + "task1": {'mae': MeanAbsoluteError(), 'pearson': PearsonCorrCoef()}, + "task2": {'pearson': PearsonCorrCoef()}, + "task3": {'mae': MeanAbsoluteError()} + } + + expected_dict = {} + for task, metrics in task_metrics.items(): + for metric_name, metric in metrics.items(): + metric.update(preds[task], targets[task]) + expected_val = metric.compute() + metric.reset() + expected_dict[f"{task}/{metric_name}/val"] = expected_val + + + # Test the metrics on validation step + summary_val = MultiTaskSummary(task_metrics, step_name="val", compute_mean=False, compute_std=False) + summary_val.update(preds, targets) + summary_dict = summary_val.compute() + self.assertDictTensorAlmostEqual(summary_dict, expected_dict, places=5) + + # Test the metric reset + summary_val.reset() + summary_val.update(preds, targets) + summary_dict = summary_val.compute() + self.assertDictTensorAlmostEqual(summary_dict, expected_dict, places=5) + + # Test multiple batches + summary_val.reset() + preds1 = {key: preds[key][:10] for key in preds.keys()} + targets1 = {key: targets[key][:10] for key in targets.keys()} + preds2 = {key: preds[key][10:25] for key in preds.keys()} + targets2 = {key: targets[key][10:25] for key in targets.keys()} + preds3 = {key: preds[key][25:] for key in preds.keys()} + targets3 = {key: targets[key][25:] for key in targets.keys()} + + summary_val.update(preds1, targets1) + summary_val.update(preds2, targets2) + summary_val.update(preds3, targets3) + summary_dict = summary_val.compute() + self.assertDictTensorAlmostEqual(summary_dict, expected_dict, places=5) + + # Test the mean and std computation + summary_val = MultiTaskSummary(task_metrics, step_name="val", compute_mean=True, compute_std=True) + summary_val.update(preds, targets) + summary_dict = summary_val.compute() + expected_dict_mean_std = {} + for task in task_metrics.keys(): + expected_dict_mean_std[f"{task}/mean_pred/val"] = preds[task].mean() + expected_dict_mean_std[f"{task}/std_pred/val"] = preds[task].std(correction=0) + expected_dict_mean_std[f"{task}/mean_target/val"] = targets[task].mean() + expected_dict_mean_std[f"{task}/std_target/val"] = targets[task].std(correction=0) + expected_dict_mean_std.update(expected_dict) + self.assertDictTensorAlmostEqual(summary_dict, expected_dict_mean_std, places=5) + + # Test the training step doesn't return anything when no metrics on training set are selected + summary_train = MultiTaskSummary(task_metrics, step_name="train", task_metrics_on_training_set=None, compute_mean=False, compute_std=False) + summary_train.update(preds, targets) + summary_train = summary_train.compute() + self.assertDictEqual(summary_train, {}) + + # Test the training step returns only the mae + task_metrics_on_training_set = {"task1": ["mae"], "task2": None, "task3": "mae"} + summary_train = MultiTaskSummary(task_metrics, step_name="train", task_metrics_on_training_set=task_metrics_on_training_set, compute_mean=False, compute_std=False) + summary_train.update(preds, targets) + summary_dict = summary_train.compute() + expected_dict_mae = {key: value for key, value in expected_dict.items() if "mae" in key} + expected_dict_mae = {key.replace("/val", "/train"): value for key, value in expected_dict_mae.items()} + self.assertDictTensorAlmostEqual(summary_dict, expected_dict_mae, places=5) + + # Test the training step returns only the mae with multiple steps + summary_train = MultiTaskSummary(task_metrics, step_name="train", task_metrics_on_training_set=task_metrics_on_training_set, compute_mean=False, compute_std=False) + summary_train.update(preds1, targets1) + summary_train.update(preds2, targets2) + summary_train.update(preds3, targets3) + summary_dict = summary_train.compute() + self.assertDictTensorAlmostEqual(summary_dict, expected_dict_mae, places=5) if __name__ == "__main__": ut.main() From d6f62a4e4611bbcabaf8b113deb095d7b36569b4 Mon Sep 17 00:00:00 2001 From: DomInvivo Date: Fri, 12 Jul 2024 22:44:42 -0400 Subject: [PATCH 081/175] Fixed the `preds` and `targets` that were inverted in `TaskSummary` --- graphium/trainer/predictor_summaries.py | 41 +++++++++++++++++++------ tests/test_predictor_summaries.py | 6 ++-- 2 files changed, 36 insertions(+), 11 deletions(-) diff --git a/graphium/trainer/predictor_summaries.py b/graphium/trainer/predictor_summaries.py index 9038c4d79..dded3ed27 100644 --- a/graphium/trainer/predictor_summaries.py +++ b/graphium/trainer/predictor_summaries.py @@ -17,6 +17,7 @@ from typing import Any, Callable, Dict, List, Optional, Union, Literal from loguru import logger from copy import deepcopy +import inspect import numpy as np import torch @@ -31,7 +32,7 @@ class SummaryInterface(object): An interface to define the functions implemented by summary classes that implement SummaryInterface. """ - def update(self, targets: Tensor, preds: Tensor) -> None: + def update(self, preds: Tensor, targets: Tensor) -> None: raise NotImplementedError() def compute(self, **kwargs) -> Tensor: @@ -89,12 +90,12 @@ def __init__( self.logged_metrics_exceptions = [] # Track which metric exceptions have been logged # Add default metrics - if ("mean_pred" not in self.metrics) and compute_mean: - self.metrics["mean_pred"] = MeanMetric(nan_strategy="ignore") + if ("mean_preds" not in self.metrics) and compute_mean: + self.metrics["mean_preds"] = MeanMetric(nan_strategy="ignore") if ("mean_target" not in self.metrics) and compute_mean: self.metrics["mean_target"] = MeanMetric(nan_strategy="ignore") - if ("std_pred" not in self.metrics) and compute_std: - self.metrics["std_pred"] = STDMetric(nan_strategy="ignore") + if ("std_preds" not in self.metrics) and compute_std: + self.metrics["std_preds"] = STDMetric(nan_strategy="ignore") if ("std_target" not in self.metrics) and compute_std: self.metrics["std_target"] = STDMetric(nan_strategy="ignore") @@ -141,17 +142,39 @@ def metrics_to_use(self) -> Dict[str, Callable]: return metrics_to_use return self.metrics - def update(self, targets: Tensor, preds: Tensor) -> None: + def update(self, preds: Tensor, targets: Tensor, model: Optional[torch.nn.Module] = None) -> None: r""" update the state of the predictor Parameters: targets: the targets tensor predictions: the predictions tensor + model: the model, optional for some metrics like `GradientNormMetric` """ for metric_key, metric_obj in self.metrics_to_use.items(): try: - metric_obj.update(preds, targets) + # Check the `metric_obj.update` signature to know if it takes `preds` and `targets` or only one of them + varnames = [val.name for val in inspect.signature(metric_obj.update).parameters.values()] + if ("preds" == varnames[0]) and ("target" == varnames[1]): + # The typical case of `torchmetrics` + metric_obj.update(preds, targets) + if ("preds" == varnames[1]) and ("target" == varnames[0]): + # Unusual case where the order of the arguments is reversed + metric_obj.update(targets, preds) + elif ("value" == varnames[0]) and ("preds" in metric_key): + # The case where the metric takes only one value, and it is the prediction + metric_obj.update(preds) + elif ("value" == varnames[0]) and ("target" in metric_key): + # The case where the metric takes only one value, and it is the target + metric_obj.update(targets) + elif ("model" == varnames[0]): + # The case where the metric takes the model as input + if model is None: + raise ValueError(f"Metric {metric_key} requires the model as input.") + metric_obj.update(model) + else: + raise ValueError(f"Metric {metric_key} update method signature `{varnames}` is not recognized.") + except: pass @@ -267,7 +290,7 @@ class to store the summaries of the tasks compute_std = compute_std, ) - def update(self, targets: Dict[str, Tensor], preds: Dict[str, Tensor]) -> None: + def update(self, preds: Dict[str, Tensor], targets: Dict[str, Tensor]) -> None: r""" update the state for all predictors @@ -277,8 +300,8 @@ def update(self, targets: Dict[str, Tensor], preds: Dict[str, Tensor]) -> None: """ for task in self.tasks: self.task_summaries[task].update( - targets[task], preds[task].detach(), + targets[task], ) def get_results_on_progress_bar( diff --git a/tests/test_predictor_summaries.py b/tests/test_predictor_summaries.py index 51267d78d..8536a6c4a 100644 --- a/tests/test_predictor_summaries.py +++ b/tests/test_predictor_summaries.py @@ -152,7 +152,9 @@ def forward(self, x): def assertDictTensorAlmostEqual(self, dict1, dict2, places=7): dict1 = deepcopy(dict1) + dict1 = {key: dict1[key] for key in sorted(dict1.keys())} dict2 = deepcopy(dict2) + dict2 = {key: dict2[key] for key in sorted(dict2.keys())} for key in dict1.keys(): dict1[key] = round(dict1[key].item(), places) for key in dict2.keys(): @@ -217,8 +219,8 @@ def test_multi_task_summary(self): summary_dict = summary_val.compute() expected_dict_mean_std = {} for task in task_metrics.keys(): - expected_dict_mean_std[f"{task}/mean_pred/val"] = preds[task].mean() - expected_dict_mean_std[f"{task}/std_pred/val"] = preds[task].std(correction=0) + expected_dict_mean_std[f"{task}/mean_preds/val"] = preds[task].mean() + expected_dict_mean_std[f"{task}/std_preds/val"] = preds[task].std(correction=0) expected_dict_mean_std[f"{task}/mean_target/val"] = targets[task].mean() expected_dict_mean_std[f"{task}/std_target/val"] = targets[task].std(correction=0) expected_dict_mean_std.update(expected_dict) From 36738847fda5a948b854ab52e11ace4525843e7a Mon Sep 17 00:00:00 2001 From: DomInvivo Date: Fri, 12 Jul 2024 23:50:27 -0400 Subject: [PATCH 082/175] Tried to add grad_norm to the metrics, but won't work because it's not per task --- graphium/trainer/predictor.py | 4 +- graphium/trainer/predictor_summaries.py | 28 ++++- tests/test_predictor_summaries.py | 150 ++++++++++++++++-------- 3 files changed, 126 insertions(+), 56 deletions(-) diff --git a/graphium/trainer/predictor.py b/graphium/trainer/predictor.py index bdd99987a..36a57833a 100644 --- a/graphium/trainer/predictor.py +++ b/graphium/trainer/predictor.py @@ -389,7 +389,7 @@ def _general_step(self, batch: Dict[str, Tensor], step_name: Literal["train", "v preds[task] = preds[task].detach() targets_dict[task] = targets_dict[task].detach() - self.task_epoch_summary[step_name].update(targets_dict, preds) + self.task_epoch_summary[step_name].update(targets_dict, preds, self.model) step_dict = {} step_dict["loss"] = loss @@ -463,7 +463,7 @@ def flag_step(self, batch: Dict[str, Tensor], step_name: Literal["train", "val", step_dict[f"loss/{step_name}"] = loss.detach().cpu() step_dict["loss"] = loss step_dict["task_losses"] = task_losses - self.task_epoch_summary[step_name].update(targets, preds) + self.task_epoch_summary[step_name].update(targets, preds, self.model) return step_dict def on_train_batch_start(self, batch: Any, batch_idx: int) -> Optional[int]: diff --git a/graphium/trainer/predictor_summaries.py b/graphium/trainer/predictor_summaries.py index dded3ed27..22e2e0f98 100644 --- a/graphium/trainer/predictor_summaries.py +++ b/graphium/trainer/predictor_summaries.py @@ -52,6 +52,7 @@ def __init__( task_name: Optional[str] = None, compute_mean: bool = True, compute_std: bool = True, + compute_grad: bool = True, ): r""" A container to be used by the Predictor Module that stores the results for the given metrics on the predictions and targets provided. @@ -78,8 +79,15 @@ def __init__( compute_std: whether to compute the standard deviation of the predictions and targets + compute_grad: + whether to compute the gradient norm of the model, only if `step_name="train"` + """ self.step_name = step_name + self.compute_mean = compute_mean + self.compute_std = compute_std + self.compute_grad = compute_grad + if not isinstance(metrics, dict): raise ValueError(f"metrics must be a dictionary. Got {type(metrics)}") self.metrics = deepcopy(metrics) @@ -98,11 +106,21 @@ def __init__( self.metrics["std_preds"] = STDMetric(nan_strategy="ignore") if ("std_target" not in self.metrics) and compute_std: self.metrics["std_target"] = STDMetric(nan_strategy="ignore") + if ("grad_norm" not in self.metrics) and compute_grad and (self.step_name == "train"): + self.metrics["grad_norm"] = GradientNormMetric() # Parse the metrics filters self.metrics_on_training_set = self._parse_metrics_filter(metrics_on_training_set) self.metrics_on_progress_bar = self._parse_metrics_filter(metrics_on_progress_bar) + # Update the metrics to compute on the training set + if self.compute_mean: + self.metrics_on_training_set.update(["mean_preds", "mean_target"]) + if self.compute_std: + self.metrics_on_training_set.update(["std_preds", "std_target"]) + if self.compute_grad and (self.step_name == "train"): + self.metrics_on_training_set.update(["grad_norm"]) + self._cached_metrics: Dict[str, Tensor] = {} @property @@ -114,8 +132,8 @@ def _parse_metrics_filter(self, filter: Optional[Union[List[str], Dict[str, Any] filter = [] elif isinstance(filter, dict): filter = list(filter.keys()) - elif isinstance(filter, list): - filter = filter + elif isinstance(filter, (list, tuple, set)): + filter = list(filter) elif isinstance(filter, str): filter = [filter] else: @@ -139,6 +157,7 @@ def metrics_to_use(self) -> Dict[str, Callable]: metrics_to_use = { key: metric for key, metric in self.metrics.items() if key in self.metrics_on_training_set } + return metrics_to_use return self.metrics @@ -259,6 +278,7 @@ def __init__( task_metrics_on_progress_bar: Optional[Dict[str, List[str]]] = None, compute_mean: bool = True, compute_std: bool = True, + compute_grad: bool = True, ): r""" class to store the summaries of the tasks @@ -288,9 +308,10 @@ class to store the summaries of the tasks task_name = task, compute_mean = compute_mean, compute_std = compute_std, + compute_grad=compute_grad, ) - def update(self, preds: Dict[str, Tensor], targets: Dict[str, Tensor]) -> None: + def update(self, preds: Dict[str, Tensor], targets: Dict[str, Tensor], model: Optional[torch.nn.Module] = None) -> None: r""" update the state for all predictors @@ -302,6 +323,7 @@ def update(self, preds: Dict[str, Tensor], targets: Dict[str, Tensor]) -> None: self.task_summaries[task].update( preds[task].detach(), targets[task], + model=model, ) def get_results_on_progress_bar( diff --git a/tests/test_predictor_summaries.py b/tests/test_predictor_summaries.py index 8536a6c4a..4a066969b 100644 --- a/tests/test_predictor_summaries.py +++ b/tests/test_predictor_summaries.py @@ -24,6 +24,51 @@ from graphium.trainer.predictor_summaries import SingleTaskSummary, MultiTaskSummary, STDMetric, GradientNormMetric +class SimpleNN(nn.Module): +# Define a simple neural network with 2 layers + def __init__(self, in_dim=10, out_dim=1): + super(SimpleNN, self).__init__() + torch.random.manual_seed(42) + # Define the first layer with 10 input features and 5 output features + self.layer1 = nn.Linear(in_dim, 5) + # Define the second layer with 5 input features and 1 output feature + self.layer2 = nn.Linear(5, out_dim) + + def forward(self, x): + # Pass the input through the first layer + if x.ndim == 1: + x = x.unsqueeze(-1) + x = torch.relu(self.layer1(x)) + # Pass the output of the first layer through the second layer + x = self.layer2(x) + return x + + +class SimpleDictNN(nn.Module): + def __init__(self, task_list, in_dim=10, out_dim=1): + super(SimpleDictNN, self).__init__() + torch.random.manual_seed(42) + self.dict_nn = nn.ModuleDict({task: SimpleNN(in_dim, out_dim) for task in task_list}) + + def forward(self, x): + return {task: self.dict_nn[task](x[task]) for task in self.dict_nn.keys()} + + +def simple_nn_grad_step(model, inputs, targets): + # Initialize the optimizer and loss function + loss_fn = nn.MSELoss() + optimizer = torch.optim.SGD(model.parameters(), lr=0.01) + + # Perform a gradient step + optimizer.zero_grad() + outputs = model(inputs) + if isinstance(outputs, dict): + loss = sum([loss_fn(outputs[task], targets[task]) for task in outputs.keys()]) + else: + loss = loss_fn(outputs, targets) + loss.backward() + optimizer.step() + return model class test_TaskSummary(ut.TestCase): @@ -74,27 +119,6 @@ def test_std_metric(self): self.assertAlmostEqual(std_metric_val.item(), expected_std.item(), places=5) def test_gradient_norm_metric(self): - # Define a simple neural network with 2 layers - class SimpleNN(nn.Module): - def __init__(self): - super(SimpleNN, self).__init__() - torch.random.manual_seed(42) - # Define the first layer with 10 input features and 5 output features - self.layer1 = nn.Linear(10, 5) - # Define the second layer with 5 input features and 1 output feature - self.layer2 = nn.Linear(5, 1) - - def forward(self, x): - # Pass the input through the first layer - x = torch.relu(self.layer1(x)) - # Pass the output of the first layer through the second layer - x = self.layer2(x) - return x - - # Create an instance of the neural network, optimizer and loss function - model = SimpleNN() - optimizer = torch.optim.SGD(model.parameters(), lr=0.01) - loss_fn = nn.MSELoss() # Generate random data torch.random.manual_seed(42) @@ -103,19 +127,14 @@ def forward(self, x): targets = torch.rand(LEN, 1) # Compute expected values for gradient norm - optimizer.zero_grad() - outputs = model(inputs) - loss = loss_fn(outputs, targets) - loss.backward() + model = SimpleNN() + model = simple_nn_grad_step(model, inputs, targets) expected_grad_norm = torch.norm(torch.stack([torch.norm(param.grad) for param in model.parameters()])) - optimizer.zero_grad() # Compute gradient norm metric + model = SimpleNN() + model = simple_nn_grad_step(model, inputs, targets) grad_norm_metric = GradientNormMetric() - optimizer.zero_grad() - outputs = model(inputs) - loss = loss_fn(outputs, targets) - loss.backward() grad_norm_metric.update(model) grad_norm_metric_val = grad_norm_metric.compute() grad_norm_metric.reset() @@ -124,31 +143,20 @@ def forward(self, x): # Compute gradient norm metric with many update steps grad_norm_metric = GradientNormMetric() - optimizer.zero_grad() - outputs = model(inputs[:10]) - loss = loss_fn(outputs, targets[:10]) - loss.backward() - grad_norm_metric.update(model) - optimizer.zero_grad() - outputs = model(inputs[10:50]) - loss = loss_fn(outputs, targets[10:50]) - loss.backward() + model = SimpleNN() + model = simple_nn_grad_step(model, inputs[:50], targets[:50]) grad_norm_metric.update(model) - optimizer.zero_grad() - outputs = model(inputs[50:300]) - loss = loss_fn(outputs, targets[50:300]) - loss.backward() + model = SimpleNN() + model = simple_nn_grad_step(model, inputs[50:400], targets[50:400]) grad_norm_metric.update(model) - optimizer.zero_grad() - outputs = model(inputs[300:]) - loss = loss_fn(outputs, targets[300:]) - loss.backward() + model = SimpleNN() + model = simple_nn_grad_step(model, inputs[400:], targets[400:]) grad_norm_metric.update(model) grad_norm_metric_val = grad_norm_metric.compute() grad_norm_metric.reset() - self.assertAlmostEqual(grad_norm_metric_val.item(), expected_grad_norm.item(), places=2) + self.assertAlmostEqual(grad_norm_metric_val.item(), expected_grad_norm.item(), places=1) def assertDictTensorAlmostEqual(self, dict1, dict2, places=7): dict1 = deepcopy(dict1) @@ -226,15 +234,23 @@ def test_multi_task_summary(self): expected_dict_mean_std.update(expected_dict) self.assertDictTensorAlmostEqual(summary_dict, expected_dict_mean_std, places=5) + # Test the mean and std computation with multiple batches + summary_val.reset() + summary_val.update(preds1, targets1) + summary_val.update(preds2, targets2) + summary_val.update(preds3, targets3) + summary_dict = summary_val.compute() + self.assertDictTensorAlmostEqual(summary_dict, expected_dict_mean_std, places=5) + # Test the training step doesn't return anything when no metrics on training set are selected - summary_train = MultiTaskSummary(task_metrics, step_name="train", task_metrics_on_training_set=None, compute_mean=False, compute_std=False) + summary_train = MultiTaskSummary(task_metrics, step_name="train", task_metrics_on_training_set=None, compute_mean=False, compute_std=False, compute_grad=False) summary_train.update(preds, targets) summary_train = summary_train.compute() self.assertDictEqual(summary_train, {}) # Test the training step returns only the mae task_metrics_on_training_set = {"task1": ["mae"], "task2": None, "task3": "mae"} - summary_train = MultiTaskSummary(task_metrics, step_name="train", task_metrics_on_training_set=task_metrics_on_training_set, compute_mean=False, compute_std=False) + summary_train = MultiTaskSummary(task_metrics, step_name="train", task_metrics_on_training_set=task_metrics_on_training_set, compute_mean=False, compute_std=False, compute_grad=False) summary_train.update(preds, targets) summary_dict = summary_train.compute() expected_dict_mae = {key: value for key, value in expected_dict.items() if "mae" in key} @@ -242,12 +258,44 @@ def test_multi_task_summary(self): self.assertDictTensorAlmostEqual(summary_dict, expected_dict_mae, places=5) # Test the training step returns only the mae with multiple steps - summary_train = MultiTaskSummary(task_metrics, step_name="train", task_metrics_on_training_set=task_metrics_on_training_set, compute_mean=False, compute_std=False) + summary_train = MultiTaskSummary(task_metrics, step_name="train", task_metrics_on_training_set=task_metrics_on_training_set, compute_mean=False, compute_std=False, compute_grad=False) summary_train.update(preds1, targets1) summary_train.update(preds2, targets2) summary_train.update(preds3, targets3) summary_dict = summary_train.compute() self.assertDictTensorAlmostEqual(summary_dict, expected_dict_mae, places=5) + # Test grad_norm not available in "val" step + summary_val = MultiTaskSummary(task_metrics, step_name="val", compute_mean=False, compute_std=False, compute_grad=True) + summary_val.update(preds, targets) + summary_dict = summary_val.compute() + self.assertNotIn("grad_norm", summary_dict.keys()) + + # Test grad_norm available in "train" step + model = SimpleDictNN(task_list=task_metrics.keys(), in_dim=1, out_dim=1) + model = simple_nn_grad_step(model, preds, targets) + expected_norm = torch.norm(torch.stack([torch.norm(param.grad) for param in model.parameters()])) + + summary_train = MultiTaskSummary(task_metrics, step_name="train", compute_mean=False, compute_std=False, compute_grad=True) + summary_train.update(preds, targets, model) + summary_dict_grad1 = summary_train.compute() + self.assertIn("task1/grad_norm/train", summary_dict_grad1.keys()) + self.assertIn("task2/grad_norm/train", summary_dict_grad1.keys()) + self.assertIn("task3/grad_norm/train", summary_dict_grad1.keys()) + + # Test grad_norm available in "train" step with multiple steps + model = SimpleDictNN(task_list=task_metrics.keys(), in_dim=1, out_dim=1) + model1 = simple_nn_grad_step(model, preds1, targets1) + model2 = simple_nn_grad_step(model, preds2, targets2) + model3 = simple_nn_grad_step(model, preds3, targets3) + + summary_train = MultiTaskSummary(task_metrics, step_name="train", compute_mean=False, compute_std=False, compute_grad=True) + summary_train.update(preds1, targets1, model1) + summary_train.update(preds2, targets2, model2) + summary_train.update(preds3, targets3, model3) + summary_dict_grad2 = summary_train.compute() + self.assertDictTensorAlmostEqual(summary_dict_grad1, summary_dict_grad2, places=7) + + if __name__ == "__main__": ut.main() From 29598a2e4458dbc7a19771485b37ca321555f2bd Mon Sep 17 00:00:00 2001 From: DomInvivo Date: Fri, 12 Jul 2024 23:58:42 -0400 Subject: [PATCH 083/175] Moved the gradient metric directly to the `Predictor` --- graphium/trainer/predictor.py | 8 +++++- graphium/trainer/predictor_summaries.py | 23 +++-------------- tests/test_predictor_summaries.py | 33 +++---------------------- 3 files changed, 14 insertions(+), 50 deletions(-) diff --git a/graphium/trainer/predictor.py b/graphium/trainer/predictor.py index 36a57833a..5d15aa079 100644 --- a/graphium/trainer/predictor.py +++ b/graphium/trainer/predictor.py @@ -33,7 +33,7 @@ ModelOptions, OptimOptions, ) -from graphium.trainer.predictor_summaries import MultiTaskSummary +from graphium.trainer.predictor_summaries import MultiTaskSummary, GradientNormMetric from graphium.utils import fs from graphium.utils.moving_average_tracker import MovingAverageTracker from graphium.utils.tensor import dict_tensor_fp16_to_fp32 @@ -200,6 +200,7 @@ def __init__( self.samples_seen = 0 self.global_bs = global_bs + self.model_grad = GradientNormMetric() def forward( self, inputs: Dict @@ -468,6 +469,7 @@ def flag_step(self, batch: Dict[str, Tensor], step_name: Literal["train", "val", def on_train_batch_start(self, batch: Any, batch_idx: int) -> Optional[int]: + self.model_grad.reset() self.task_epoch_summary["train"].reset() self.train_batch_start_time = time.time() self.skip_log_train_metrics = (self.metrics_every_n_train_steps is None) or ( @@ -517,6 +519,7 @@ def on_train_batch_end(self, outputs, batch: Any, batch_idx: int) -> None: metrics_computed = self.task_epoch_summary["train"].compute() self.task_epoch_summary["train"].reset() metrics_logs.update(metrics_computed) + metrics_logs["train/grad_norm"] = self.model_grad.compute() # Log the metrics if self.logger is not None: @@ -535,6 +538,9 @@ def training_step(self, batch: Dict[str, Tensor]) -> Dict[str, Any]: # step_dict = self._general_step(batch=batch, step_name="train") step_dict = self._general_step(batch=batch, step_name="train") + # Update the gradients + self.model_grad.update(self.model) + return step_dict # Returning the metrics_logs with the loss diff --git a/graphium/trainer/predictor_summaries.py b/graphium/trainer/predictor_summaries.py index 22e2e0f98..893bbc739 100644 --- a/graphium/trainer/predictor_summaries.py +++ b/graphium/trainer/predictor_summaries.py @@ -52,7 +52,6 @@ def __init__( task_name: Optional[str] = None, compute_mean: bool = True, compute_std: bool = True, - compute_grad: bool = True, ): r""" A container to be used by the Predictor Module that stores the results for the given metrics on the predictions and targets provided. @@ -79,14 +78,10 @@ def __init__( compute_std: whether to compute the standard deviation of the predictions and targets - compute_grad: - whether to compute the gradient norm of the model, only if `step_name="train"` - """ self.step_name = step_name self.compute_mean = compute_mean self.compute_std = compute_std - self.compute_grad = compute_grad if not isinstance(metrics, dict): raise ValueError(f"metrics must be a dictionary. Got {type(metrics)}") @@ -106,8 +101,6 @@ def __init__( self.metrics["std_preds"] = STDMetric(nan_strategy="ignore") if ("std_target" not in self.metrics) and compute_std: self.metrics["std_target"] = STDMetric(nan_strategy="ignore") - if ("grad_norm" not in self.metrics) and compute_grad and (self.step_name == "train"): - self.metrics["grad_norm"] = GradientNormMetric() # Parse the metrics filters self.metrics_on_training_set = self._parse_metrics_filter(metrics_on_training_set) @@ -118,8 +111,6 @@ def __init__( self.metrics_on_training_set.update(["mean_preds", "mean_target"]) if self.compute_std: self.metrics_on_training_set.update(["std_preds", "std_target"]) - if self.compute_grad and (self.step_name == "train"): - self.metrics_on_training_set.update(["grad_norm"]) self._cached_metrics: Dict[str, Tensor] = {} @@ -161,14 +152,13 @@ def metrics_to_use(self) -> Dict[str, Callable]: return metrics_to_use return self.metrics - def update(self, preds: Tensor, targets: Tensor, model: Optional[torch.nn.Module] = None) -> None: + def update(self, preds: Tensor, targets: Tensor) -> None: r""" update the state of the predictor Parameters: targets: the targets tensor predictions: the predictions tensor - model: the model, optional for some metrics like `GradientNormMetric` """ for metric_key, metric_obj in self.metrics_to_use.items(): try: @@ -186,11 +176,6 @@ def update(self, preds: Tensor, targets: Tensor, model: Optional[torch.nn.Module elif ("value" == varnames[0]) and ("target" in metric_key): # The case where the metric takes only one value, and it is the target metric_obj.update(targets) - elif ("model" == varnames[0]): - # The case where the metric takes the model as input - if model is None: - raise ValueError(f"Metric {metric_key} requires the model as input.") - metric_obj.update(model) else: raise ValueError(f"Metric {metric_key} update method signature `{varnames}` is not recognized.") @@ -278,7 +263,6 @@ def __init__( task_metrics_on_progress_bar: Optional[Dict[str, List[str]]] = None, compute_mean: bool = True, compute_std: bool = True, - compute_grad: bool = True, ): r""" class to store the summaries of the tasks @@ -308,10 +292,9 @@ class to store the summaries of the tasks task_name = task, compute_mean = compute_mean, compute_std = compute_std, - compute_grad=compute_grad, ) - def update(self, preds: Dict[str, Tensor], targets: Dict[str, Tensor], model: Optional[torch.nn.Module] = None) -> None: + def update(self, preds: Dict[str, Tensor], targets: Dict[str, Tensor]) -> None: r""" update the state for all predictors @@ -323,7 +306,6 @@ def update(self, preds: Dict[str, Tensor], targets: Dict[str, Tensor], model: Op self.task_summaries[task].update( preds[task].detach(), targets[task], - model=model, ) def get_results_on_progress_bar( @@ -424,6 +406,7 @@ class GradientNormMetric(Metric): Warning: This metric is not compatible with other metrics since it doesn't take the predictions and targets as input. It takes the model as input. + It also doesn't work per task, but for the full model """ def __init__(self, dist_sync_on_step=False): super().__init__(dist_sync_on_step=dist_sync_on_step) diff --git a/tests/test_predictor_summaries.py b/tests/test_predictor_summaries.py index 4a066969b..a37d6cabf 100644 --- a/tests/test_predictor_summaries.py +++ b/tests/test_predictor_summaries.py @@ -243,14 +243,14 @@ def test_multi_task_summary(self): self.assertDictTensorAlmostEqual(summary_dict, expected_dict_mean_std, places=5) # Test the training step doesn't return anything when no metrics on training set are selected - summary_train = MultiTaskSummary(task_metrics, step_name="train", task_metrics_on_training_set=None, compute_mean=False, compute_std=False, compute_grad=False) + summary_train = MultiTaskSummary(task_metrics, step_name="train", task_metrics_on_training_set=None, compute_mean=False, compute_std=False) summary_train.update(preds, targets) summary_train = summary_train.compute() self.assertDictEqual(summary_train, {}) # Test the training step returns only the mae task_metrics_on_training_set = {"task1": ["mae"], "task2": None, "task3": "mae"} - summary_train = MultiTaskSummary(task_metrics, step_name="train", task_metrics_on_training_set=task_metrics_on_training_set, compute_mean=False, compute_std=False, compute_grad=False) + summary_train = MultiTaskSummary(task_metrics, step_name="train", task_metrics_on_training_set=task_metrics_on_training_set, compute_mean=False, compute_std=False) summary_train.update(preds, targets) summary_dict = summary_train.compute() expected_dict_mae = {key: value for key, value in expected_dict.items() if "mae" in key} @@ -258,7 +258,7 @@ def test_multi_task_summary(self): self.assertDictTensorAlmostEqual(summary_dict, expected_dict_mae, places=5) # Test the training step returns only the mae with multiple steps - summary_train = MultiTaskSummary(task_metrics, step_name="train", task_metrics_on_training_set=task_metrics_on_training_set, compute_mean=False, compute_std=False, compute_grad=False) + summary_train = MultiTaskSummary(task_metrics, step_name="train", task_metrics_on_training_set=task_metrics_on_training_set, compute_mean=False, compute_std=False) summary_train.update(preds1, targets1) summary_train.update(preds2, targets2) summary_train.update(preds3, targets3) @@ -266,36 +266,11 @@ def test_multi_task_summary(self): self.assertDictTensorAlmostEqual(summary_dict, expected_dict_mae, places=5) # Test grad_norm not available in "val" step - summary_val = MultiTaskSummary(task_metrics, step_name="val", compute_mean=False, compute_std=False, compute_grad=True) + summary_val = MultiTaskSummary(task_metrics, step_name="val", compute_mean=False, compute_std=False) summary_val.update(preds, targets) summary_dict = summary_val.compute() self.assertNotIn("grad_norm", summary_dict.keys()) - # Test grad_norm available in "train" step - model = SimpleDictNN(task_list=task_metrics.keys(), in_dim=1, out_dim=1) - model = simple_nn_grad_step(model, preds, targets) - expected_norm = torch.norm(torch.stack([torch.norm(param.grad) for param in model.parameters()])) - - summary_train = MultiTaskSummary(task_metrics, step_name="train", compute_mean=False, compute_std=False, compute_grad=True) - summary_train.update(preds, targets, model) - summary_dict_grad1 = summary_train.compute() - self.assertIn("task1/grad_norm/train", summary_dict_grad1.keys()) - self.assertIn("task2/grad_norm/train", summary_dict_grad1.keys()) - self.assertIn("task3/grad_norm/train", summary_dict_grad1.keys()) - - # Test grad_norm available in "train" step with multiple steps - model = SimpleDictNN(task_list=task_metrics.keys(), in_dim=1, out_dim=1) - model1 = simple_nn_grad_step(model, preds1, targets1) - model2 = simple_nn_grad_step(model, preds2, targets2) - model3 = simple_nn_grad_step(model, preds3, targets3) - - summary_train = MultiTaskSummary(task_metrics, step_name="train", compute_mean=False, compute_std=False, compute_grad=True) - summary_train.update(preds1, targets1, model1) - summary_train.update(preds2, targets2, model2) - summary_train.update(preds3, targets3, model3) - summary_dict_grad2 = summary_train.compute() - self.assertDictTensorAlmostEqual(summary_dict_grad1, summary_dict_grad2, places=7) - if __name__ == "__main__": ut.main() From 6260fa1b1452c68d69f71adc7314626bb6310841 Mon Sep 17 00:00:00 2001 From: DomInvivo Date: Sat, 13 Jul 2024 00:07:00 -0400 Subject: [PATCH 084/175] Removed file_opener and read_file --- graphium/utils/read_file.py | 173 ------------------------------------ tests/test_utils.py | 26 ------ 2 files changed, 199 deletions(-) delete mode 100644 graphium/utils/read_file.py diff --git a/graphium/utils/read_file.py b/graphium/utils/read_file.py deleted file mode 100644 index 27d2fb216..000000000 --- a/graphium/utils/read_file.py +++ /dev/null @@ -1,173 +0,0 @@ -""" --------------------------------------------------------------------------------- -Copyright (c) 2023 Valence Labs, Recursion Pharmaceuticals. - -Use of this software is subject to the terms and conditions outlined in the LICENSE file. -Unauthorized modification, distribution, or use is prohibited. Provided 'as is' without -warranties of any kind. - -Valence Labs, Recursion Pharmaceuticals are not liable for any damages arising from its use. -Refer to the LICENSE file for the full terms and conditions. --------------------------------------------------------------------------------- -""" - - -""" Utiles for data parsing""" -import os -import warnings -import numpy as np -import pandas as pd -import datamol as dm -from functools import partial -from copy import copy -import fsspec - -from loguru import logger -from rdkit import Chem -from rdkit.Chem.Descriptors import ExactMolWt - -from graphium.utils.tensor import parse_valid_args, arg_in_func - - -def read_file(filepath, as_ext=None, **kwargs): - r""" - Allow to read different file format and parse them into a MolecularDataFrame. - Supported formats are: - * csv (.csv, .smile, .smiles, .tsv) - * txt (.txt) - * xls (.xls, .xlsx, .xlsm, .xls*) - * sdf (.sdf) - * pkl (.pkl) - - Arguments - ----------- - - filepath: str - The full path and name of the file to read. - It also supports the s3 url path. - as_ext: str, Optional - The file extension used to read the file. If None, the extension is deduced - from the extension of the file. Otherwise, no matter the file extension, - the file will be read according to the specified ``as_ext``. - (Default=None) - **kwargs: All the optional parameters required for the desired file reader. - - TODO: unit test to make sure it works well with all extensions - - Returns - --------- - df: pandas.DataFrame - The ``pandas.DataFrame`` containing the parsed data - - """ - - # Get the file extension - if as_ext is None: - file_ext = os.path.splitext(filepath)[-1].lower()[1:] - else: - file_ext = as_ext - if not isinstance(file_ext, str): - raise TypeError("`file_type` must be a `str`. Provided: {}".format(file_ext)) - - open_mode = "r" - - # Read the file according to the right extension - if file_ext in ["csv", "smile", "smiles", "smi", "tsv"]: - file_reader = pd.read_csv - elif file_ext == "txt": - file_reader = pd.read_table - elif file_ext[0:3] == "xls": - open_mode = "rb" - file_reader = partial(pd.read_excel, engine="openpyxl") - elif file_ext == "sdf": - file_reader = parse_sdf_to_dataframe - elif file_ext == "pkl": - open_mode = "rb" - file_reader = pd.read_pickle - else: - raise 'File extension "{}" not supported'.format(file_ext) - - kwargs = parse_valid_args(fn=file_reader, param_dict=kwargs) - - if file_ext[0:3] not in ["sdf", "xls"]: - with file_opener(filepath, open_mode) as file_in: - data = file_reader(file_in, **kwargs) - else: - data = file_reader(filepath, **kwargs) - return data - - -def parse_sdf_to_dataframe(sdf_path, as_cxsmiles=True, skiprows=None): - r""" - Allows to read an SDF file containing molecular informations, convert - it to a pandas DataFrame and convert the molecules to SMILES. It also - lists a warning of all the molecules that couldn't be read. - - Arguments - ----------- - - sdf_path: str - The full path and name of the sdf file to read - as_cxsmiles: bool, optional - Whether to use the CXSMILES notation, which preserves atomic coordinates, - stereocenters, and much more. - See `https://dl.chemaxon.com/marvin-archive/latest/help/formats/cxsmiles-doc.html` - (Default = True) - skiprows: int, list - The rows to skip from dataset. The enumerate index starts from 1 insted of 0. - (Default = None) - - """ - - # read the SDF file - # locally or from s3 - data = dm.read_sdf(sdf_path) - - # For each molecule in the SDF file, read all the properties and add it to a list of dict. - # Also count the number of molecules that cannot be read. - data_list = [] - count_none = 0 - if skiprows is not None: - if isinstance(skiprows, int): - skiprows = range(0, skiprows - 1) - skiprows = np.array(skiprows) - 1 - - for idx, mol in enumerate(data): - if (skiprows is not None) and (idx in skiprows): - continue - - if (mol is not None) and (ExactMolWt(mol) > 0): - mol_dict = mol.GetPropsAsDict() - data_list.append(mol_dict) - if as_cxsmiles: - smiles = Chem.rdmolfiles.MolToCXSmiles(mol, canonical=True) - else: - smiles = dm.to_smiles(mol, canonical=True) - data_list[-1]["SMILES"] = smiles - else: - count_none += 1 - logger.info(f"Could not read molecule # {idx}") - - # Display a message or warning after the SDF is done parsing - if count_none == 0: - logger.info("Successfully read the SDF file without error: {}".format(sdf_path)) - else: - warnings.warn( - ( - 'Error reading {} molecules from the "{}" file.\ - {} molecules read successfully.' - ).format(count_none, sdf_path, len(data_list)) - ) - return pd.DataFrame(data_list) - - -def file_opener(filename, mode="r"): - """File reader stream""" - filename = str(filename) - if "w" in mode: - filename = "simplecache::" + filename - if filename.endswith(".gz"): - instream = fsspec.open(filename, mode=mode, compression="gzip") - else: - instream = fsspec.open(filename, mode=mode) - return instream diff --git a/tests/test_utils.py b/tests/test_utils.py index 24c80ab2e..537f35775 100644 --- a/tests/test_utils.py +++ b/tests/test_utils.py @@ -149,32 +149,6 @@ def test_nan_mad(self): np.testing.assert_almost_equal(torch_mad.numpy(), numpy_mad, decimal=4, err_msg=err_msg) -def test_file_opener(tmp_path): - # Create a temporary file - txt_file = tmp_path / "test.txt" - txt_file.write_text("Hello, World!") - - # Test opening file in read mode - with file_opener(txt_file, "r") as f: - assert f.read() == "Hello, World!" - - # Test opening file in write mode - with file_opener(txt_file, "w") as f: - f.write("New text") - - with file_opener(txt_file, "r") as f: - assert f.read() == "New text" - - # Create a temporary gzip file - gzip_file = tmp_path / "test.txt.gz" - with gzip.open(gzip_file, "wt") as f: - f.write("Hello, Gzip!") - - # Test opening gzip file in read mode - with file_opener(gzip_file, "r") as f: - assert f.read() == "Hello, Gzip!" - - class test_SafeRun(ut.TestCase): def test_safe_run(self): # Error is caught From 10a10179cd4e4e1b1d0d15ba45b9e232ebbdfbeb Mon Sep 17 00:00:00 2001 From: DomInvivo Date: Sat, 13 Jul 2024 00:09:27 -0400 Subject: [PATCH 085/175] Fixed predictor grad_norm --- graphium/trainer/predictor.py | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/graphium/trainer/predictor.py b/graphium/trainer/predictor.py index 5d15aa079..e8fd13563 100644 --- a/graphium/trainer/predictor.py +++ b/graphium/trainer/predictor.py @@ -390,7 +390,7 @@ def _general_step(self, batch: Dict[str, Tensor], step_name: Literal["train", "v preds[task] = preds[task].detach() targets_dict[task] = targets_dict[task].detach() - self.task_epoch_summary[step_name].update(targets_dict, preds, self.model) + self.task_epoch_summary[step_name].update(targets_dict, preds) step_dict = {} step_dict["loss"] = loss @@ -464,7 +464,7 @@ def flag_step(self, batch: Dict[str, Tensor], step_name: Literal["train", "val", step_dict[f"loss/{step_name}"] = loss.detach().cpu() step_dict["loss"] = loss step_dict["task_losses"] = task_losses - self.task_epoch_summary[step_name].update(targets, preds, self.model) + self.task_epoch_summary[step_name].update(targets, preds) return step_dict def on_train_batch_start(self, batch: Any, batch_idx: int) -> Optional[int]: From 90c0ca487ab7de667d8409f5063d80143ff7aad8 Mon Sep 17 00:00:00 2001 From: DomInvivo Date: Mon, 15 Jul 2024 17:06:07 -0400 Subject: [PATCH 086/175] Fixed the progress bar logging to newest version. Fixed minor issues in the logging and imports. --- expts/run_validation_test.py | 2 +- graphium/cli/train_finetune_test.py | 3 +- graphium/config/_loader.py | 9 +++- .../config/dummy_finetuning_from_gnn.yaml | 6 +-- .../dummy_finetuning_from_task_head.yaml | 6 +-- graphium/finetuning/finetuning.py | 4 +- graphium/trainer/__init__.py | 3 ++ graphium/trainer/predictor.py | 53 ++++++++++--------- graphium/trainer/predictor_summaries.py | 7 ++- graphium/trainer/progress_bar.py | 27 ++++++++++ tests/test_finetuning.py | 6 ++- 11 files changed, 85 insertions(+), 41 deletions(-) create mode 100644 graphium/trainer/progress_bar.py diff --git a/expts/run_validation_test.py b/expts/run_validation_test.py index 06804301c..48cb0183e 100644 --- a/expts/run_validation_test.py +++ b/expts/run_validation_test.py @@ -8,7 +8,7 @@ import timeit from loguru import logger from datetime import datetime -from pytorch_lightning.utilities.model_summary import ModelSummary +from lightning.pytorch.utilities.model_summary import ModelSummary # Current project imports import graphium diff --git a/graphium/cli/train_finetune_test.py b/graphium/cli/train_finetune_test.py index 09183c69e..5a408919f 100644 --- a/graphium/cli/train_finetune_test.py +++ b/graphium/cli/train_finetune_test.py @@ -220,10 +220,11 @@ def run_training_finetuning_testing(cfg: DictConfig) -> None: logger.info(predictor.model) logger.info(ModelSummary(predictor, max_depth=4)) + metrics_on_progress_bar = predictor.get_metrics_on_progress_bar() ## Trainer date_time_suffix = datetime.now().strftime("%d.%m.%Y_%H.%M.%S") - trainer = load_trainer(cfg, accelerator_type, date_time_suffix) + trainer = load_trainer(cfg, accelerator_type, date_time_suffix, metrics_on_progress_bar=metrics_on_progress_bar) if not testing_only: # Add the fine-tuning callback to trainer diff --git a/graphium/config/_loader.py b/graphium/config/_loader.py index dc1da4998..8493c41c1 100644 --- a/graphium/config/_loader.py +++ b/graphium/config/_loader.py @@ -15,7 +15,7 @@ # Misc import os from copy import deepcopy -from typing import Any, Callable, Dict, Mapping, Optional, Tuple, Type, Union +from typing import Any, Callable, Dict, Mapping, Optional, Tuple, Type, Union, Iterable import joblib import mup @@ -40,6 +40,7 @@ from graphium.trainer.metrics import MetricWrapper from graphium.trainer.predictor import PredictorModule from graphium.utils.command_line_utils import get_anchors_and_aliases, update_config +from graphium.trainer.progress_bar import ProgressBarMetrics # Graphium from graphium.utils.mup import set_base_shapes @@ -391,6 +392,7 @@ def load_trainer( config: Union[omegaconf.DictConfig, Dict[str, Any]], accelerator_type: str, date_time_suffix: str = "", + metrics_on_progress_bar: Optional[Iterable[str]] = None, ) -> Trainer: """ Defining the pytorch-lightning Trainer module. @@ -456,12 +458,15 @@ def load_trainer( name += f"_{date_time_suffix}" trainer_kwargs["logger"] = WandbLogger(name=name, log_model=True, **wandb_cfg) - trainer_kwargs["callbacks"] = callbacks + progress_bar_callback = ProgressBarMetrics(metrics_on_progress_bar = metrics_on_progress_bar) + callbacks.append(progress_bar_callback) + trainer = Trainer( detect_anomaly=True, strategy=strategy, accelerator=accelerator_type, devices=devices, + callbacks=callbacks, **cfg_trainer["trainer"], **trainer_kwargs, ) diff --git a/graphium/config/dummy_finetuning_from_gnn.yaml b/graphium/config/dummy_finetuning_from_gnn.yaml index ca9493d30..dba403c0f 100644 --- a/graphium/config/dummy_finetuning_from_gnn.yaml +++ b/graphium/config/dummy_finetuning_from_gnn.yaml @@ -64,14 +64,14 @@ accelerator: predictor: random_seed: ${constants.seed} optim_kwargs: - lr: 4.e-5 + lr: 1.e-3 scheduler_kwargs: null target_nan_mask: null multitask_handling: flatten # flatten, mean-per-label torch_scheduler_kwargs: module_type: WarmUpLinearLR - max_num_epochs: 2 + max_num_epochs: 4 warmup_epochs: 1 verbose: False @@ -107,7 +107,7 @@ trainer: seed: ${constants.seed} trainer: precision: 32 - max_epochs: 2 + max_epochs: 4 min_epochs: 1 check_val_every_n_epoch: 1 accumulate_grad_batches: 1 diff --git a/graphium/config/dummy_finetuning_from_task_head.yaml b/graphium/config/dummy_finetuning_from_task_head.yaml index 2682ccee3..a296058ba 100644 --- a/graphium/config/dummy_finetuning_from_task_head.yaml +++ b/graphium/config/dummy_finetuning_from_task_head.yaml @@ -70,14 +70,14 @@ accelerator: predictor: random_seed: ${constants.seed} optim_kwargs: - lr: 4.e-5 + lr: 1.e-3 scheduler_kwargs: null target_nan_mask: null multitask_handling: flatten # flatten, mean-per-label torch_scheduler_kwargs: module_type: WarmUpLinearLR - max_num_epochs: 2 + max_num_epochs: 4 warmup_epochs: 1 verbose: False @@ -113,7 +113,7 @@ trainer: seed: ${constants.seed} trainer: precision: 32 - max_epochs: 2 + max_epochs: 4 min_epochs: 1 check_val_every_n_epoch: 1 accumulate_grad_batches: 1 diff --git a/graphium/finetuning/finetuning.py b/graphium/finetuning/finetuning.py index 97d6d7fc7..59902a921 100644 --- a/graphium/finetuning/finetuning.py +++ b/graphium/finetuning/finetuning.py @@ -17,10 +17,10 @@ from collections import OrderedDict import torch.nn as nn -import pytorch_lightning as pl +import lightning.pytorch as pl from torch.optim.optimizer import Optimizer -from pytorch_lightning.callbacks import BaseFinetuning +from lightning.pytorch.callbacks import BaseFinetuning class GraphFinetuning(BaseFinetuning): diff --git a/graphium/trainer/__init__.py b/graphium/trainer/__init__.py index ed2cbf2a4..1e1682e2f 100644 --- a/graphium/trainer/__init__.py +++ b/graphium/trainer/__init__.py @@ -2,3 +2,6 @@ from . import metrics from .predictor import PredictorModule +from .predictor_summaries import SingleTaskSummary +from .predictor_summaries import MultiTaskSummary +from .progress_bar import ProgressBarMetrics diff --git a/graphium/trainer/predictor.py b/graphium/trainer/predictor.py index e8fd13563..2720470a0 100644 --- a/graphium/trainer/predictor.py +++ b/graphium/trainer/predictor.py @@ -489,23 +489,25 @@ def on_train_batch_end(self, outputs, batch: Any, batch_idx: int) -> None: # report the training loss for each individual tasks # get the mean loss value for individual tasks as they are a tensor of size --> gradient accumulation * replication * device_iter # filter zeros out for the individual losses - losses = {} + losses = {"_global/loss/train": outputs["loss"]} for task in self.tasks: this_losses = outputs["task_losses"][task] if isinstance(this_losses, torch.Tensor): if this_losses.numel() > 1: this_losses = this_losses[this_losses != 0].mean() - losses[f"train/loss/{task}"] = this_losses - + losses[f"{task}/loss/train"] = this_losses metrics_logs.update(losses) # If logging is skipped for this step, then log the important metrics anyway and return if self.skip_log_train_metrics: if self.logger is not None: - self.logger.log_metrics( - metrics_logs, step=self.global_step - ) # This is a pytorch lightning function call + self.log_dict( + dictionary=metrics_logs, + logger=self.logger, + on_step=True, + prog_bar=True, + ) return ### The code below is not executed if the logging is skipped for this step ### @@ -522,10 +524,12 @@ def on_train_batch_end(self, outputs, batch: Any, batch_idx: int) -> None: metrics_logs["train/grad_norm"] = self.model_grad.compute() # Log the metrics - if self.logger is not None: - self.logger.log_metrics( - metrics_logs, step=self.global_step - ) # This is a pytorch lightning function call + self.log_dict( + dictionary=metrics_logs, + logger=True, + on_step=True, + prog_bar=True, + ) def training_step(self, batch: Dict[str, Tensor]) -> Dict[str, Any]: step_dict = None @@ -562,12 +566,11 @@ def on_train_epoch_start(self) -> None: self.epoch_start_time = time.time() def on_train_epoch_end(self) -> None: - if self.epoch_start_time is None: - logger.warning("epoch timer not initialized") - else: + if self.epoch_start_time is not None: epoch_time = time.time() - self.epoch_start_time + epoch_time = torch.tensor(epoch_time) self.epoch_start_time = None - self.log("epoch_time", torch.tensor(epoch_time), sync_dist=True) + self.log("train/epoch_time", epoch_time, prog_bar=True, sync_dist=True, on_epoch=True) def on_validation_epoch_start(self) -> None: self.mean_val_time_tracker.reset() @@ -592,7 +595,7 @@ def on_validation_epoch_end(self) -> None: metrics_logs = self._general_epoch_end(step_name="val") metrics_logs["val/mean_time"] = torch.tensor(self.mean_val_time_tracker.mean_value) metrics_logs["val/mean_tput"] = self.mean_val_tput_tracker.mean_value - self.log_dict(metrics_logs, sync_dist=True) + self.log_dict(metrics_logs, logger=True, prog_bar=True, sync_dist=True, on_epoch=True) def on_test_epoch_start(self) -> None: self.task_epoch_summary["test"].reset() @@ -600,7 +603,7 @@ def on_test_epoch_start(self) -> None: def on_test_epoch_end(self) -> None: metrics_logs = self._general_epoch_end(step_name="test") - self.log_dict(metrics_logs, sync_dist=True) + self.log_dict(metrics_logs, logger=True, prog_bar=True, sync_dist=True, on_epoch=True) def on_train_start(self): hparams_log = deepcopy(self.hparams) @@ -608,17 +611,15 @@ def on_train_start(self): if self.logger is not None: self.logger.log_hyperparams(hparams_log) - def get_progress_bar_dict(self) -> Dict[str, float]: - prog_dict = {} + @property + def get_metrics_on_progress_bar(self) -> List[str]: + prog_list = ["_global/loss/train"] + for task_name in self.tasks: + for metric in self.metrics_on_progress_bar[task_name]: + this_summary = self.task_epoch_summary["val"][task_name] + prog_list.append(this_summary.metric_log_name(metric)) - prog_dict["loss"] = self.task_epoch_summary.weighted_loss.detach().cpu() - results_on_progress_bar = self.task_epoch_summary.get_results_on_progress_bar("val") - for task in self.tasks: - prog_dict[self.task_epoch_summary.metric_log_name(task, "loss", "val")] = ( - self.task_epoch_summary.task_summaries[task].summaries["val"].loss - ) - prog_dict.update(results_on_progress_bar) - return prog_dict + return prog_list def __repr__(self) -> str: r""" diff --git a/graphium/trainer/predictor_summaries.py b/graphium/trainer/predictor_summaries.py index 893bbc739..015cacca6 100644 --- a/graphium/trainer/predictor_summaries.py +++ b/graphium/trainer/predictor_summaries.py @@ -14,7 +14,7 @@ r"""Classes to store information about resulting evaluation metrics when using a Predictor Module.""" -from typing import Any, Callable, Dict, List, Optional, Union, Literal +from typing import Any, Callable, Dict, List, Optional, Union, Literal, Iterable from loguru import logger from copy import deepcopy import inspect @@ -25,6 +25,7 @@ from torchmetrics import MeanMetric, Metric from torchmetrics.aggregation import BaseAggregator + from graphium.utils.tensor import nan_mean, nan_std, nan_median, tensor_fp16_to_fp32 class SummaryInterface(object): @@ -294,6 +295,9 @@ class to store the summaries of the tasks compute_std = compute_std, ) + def __getitem__(self, task: str) -> SingleTaskSummary: + return self.task_summaries[task] + def update(self, preds: Dict[str, Tensor], targets: Dict[str, Tensor]) -> None: r""" @@ -424,3 +428,4 @@ def update(self, model: torch.nn.Module) -> None: def compute(self) -> Tensor: return (self.gradient_norm_sq / self.total_steps).sqrt() + diff --git a/graphium/trainer/progress_bar.py b/graphium/trainer/progress_bar.py new file mode 100644 index 000000000..a5dde1293 --- /dev/null +++ b/graphium/trainer/progress_bar.py @@ -0,0 +1,27 @@ +import sys +from typing import Any, Callable, Dict, List, Optional, Union, Literal, Iterable +from lightning.pytorch.callbacks import TQDMProgressBar + + + +class ProgressBarMetrics(TQDMProgressBar): + def __init__(self, metrics_on_progress_bar: Optional[Iterable[str]] = None, loss_alias:Optional[str]="_global/loss/train") -> None: + super().__init__() + if metrics_on_progress_bar is None: + metrics_on_progress_bar = {} + self.metrics_on_progress_bar = set(metrics_on_progress_bar) + self.loss_alias = loss_alias + + def get_metrics(self, trainer, pl_module) -> Dict[str, Union[int, str, float, Dict[str, float]]]: + + metrics = super().get_metrics(trainer, pl_module) + filtered_metrics = {} + for key, metric in metrics.items(): + if key in self.metrics_on_progress_bar: + if key == self.loss_alias: + filtered_metrics["loss"] = metric + else: + filtered_metrics[key] = metric + + return filtered_metrics + diff --git a/tests/test_finetuning.py b/tests/test_finetuning.py index 0fd006b38..a315194c5 100644 --- a/tests/test_finetuning.py +++ b/tests/test_finetuning.py @@ -217,7 +217,8 @@ def on_train_epoch_start(self, trainer, pl_module): assert not False in unfrozen_parameters - trainer = load_trainer(cfg, accelerator_type) + metrics_on_progress_bar = predictor.get_metrics_on_progress_bar + trainer = load_trainer(cfg, accelerator_type, metrics_on_progress_bar=metrics_on_progress_bar) finetuning_training_kwargs = cfg["finetuning"]["training_kwargs"] trainer.callbacks.append(GraphFinetuning(**finetuning_training_kwargs)) @@ -392,7 +393,8 @@ def on_train_epoch_start(self, trainer, pl_module): assert not False in unfrozen_parameters - trainer = load_trainer(cfg, accelerator_type) + metrics_on_progress_bar = predictor.get_metrics_on_progress_bar + trainer = load_trainer(cfg, accelerator_type, metrics_on_progress_bar=metrics_on_progress_bar) finetuning_training_kwargs = cfg["finetuning"]["training_kwargs"] trainer.callbacks.append(GraphFinetuning(**finetuning_training_kwargs)) From 44b66b5f08e7af25d9cd8bc03fbf2d40620dd6cb Mon Sep 17 00:00:00 2001 From: DomInvivo Date: Mon, 15 Jul 2024 18:20:13 -0400 Subject: [PATCH 087/175] fixed some issue with older version of torchmetrics --- graphium/trainer/predictor_summaries.py | 10 ++++++++-- tests/test_predictor_summaries.py | 2 +- 2 files changed, 9 insertions(+), 3 deletions(-) diff --git a/graphium/trainer/predictor_summaries.py b/graphium/trainer/predictor_summaries.py index 015cacca6..ee5bca5d3 100644 --- a/graphium/trainer/predictor_summaries.py +++ b/graphium/trainer/predictor_summaries.py @@ -384,8 +384,14 @@ def update(self, value: Union[float, Tensor], weight: Union[float, Tensor] = 1.0 if not isinstance(weight, Tensor): weight = torch.as_tensor(weight, dtype=torch.float32) - weight = torch.broadcast_to(weight, value.shape) - value, weight = self._cast_and_nan_check_input(value, weight) + weight = torch.broadcast_to(weight, value.shape).clone() + # Check whether `_cast_and_nan_check_input` takes in `weight` + if "weight" in inspect.signature(self._cast_and_nan_check_input).parameters: + value, weight = self._cast_and_nan_check_input(value, weight) + else: + weight[value.isnan()] = torch.nan + value = self._cast_and_nan_check_input(value) + weight = self._cast_and_nan_check_input(weight) if value.numel() == 0: return diff --git a/tests/test_predictor_summaries.py b/tests/test_predictor_summaries.py index a37d6cabf..4e946d68b 100644 --- a/tests/test_predictor_summaries.py +++ b/tests/test_predictor_summaries.py @@ -22,7 +22,7 @@ from copy import deepcopy import unittest as ut -from graphium.trainer.predictor_summaries import SingleTaskSummary, MultiTaskSummary, STDMetric, GradientNormMetric +from graphium.trainer.predictor_summaries import MultiTaskSummary, STDMetric, GradientNormMetric class SimpleNN(nn.Module): # Define a simple neural network with 2 layers From 5c421a65fd482f241fc89e28df75ee0f5c2507bd Mon Sep 17 00:00:00 2001 From: DomInvivo Date: Tue, 16 Jul 2024 11:57:49 -0400 Subject: [PATCH 088/175] Fixed reversed preds/targets. Fixed random sampling to take in the DF idx. Few other minor changes --- .../tasks/loss_metrics_datamodule/toymix.yaml | 22 +++++++++----- expts/hydra-configs/training/toymix.yaml | 2 +- graphium/cli/train_finetune_test.py | 2 +- .../config/dummy_finetuning_from_gnn.yaml | 20 ++++++------- .../dummy_finetuning_from_task_head.yaml | 10 +++---- graphium/data/datamodule.py | 1 + graphium/graphium_cpp/pybind11 | 1 + graphium/trainer/metrics.py | 29 ++++++++++--------- graphium/trainer/predictor.py | 17 +++++------ graphium/trainer/predictor_summaries.py | 1 - tests/test_finetuning.py | 15 ++++++++-- 11 files changed, 70 insertions(+), 50 deletions(-) create mode 160000 graphium/graphium_cpp/pybind11 diff --git a/expts/hydra-configs/tasks/loss_metrics_datamodule/toymix.yaml b/expts/hydra-configs/tasks/loss_metrics_datamodule/toymix.yaml index 1a979a2fa..ceb4e6d69 100644 --- a/expts/hydra-configs/tasks/loss_metrics_datamodule/toymix.yaml +++ b/expts/hydra-configs/tasks/loss_metrics_datamodule/toymix.yaml @@ -1,6 +1,8 @@ # @package _global_ predictor: + target_nan_mask: ignore + multitask_handling: flatten metrics_on_progress_bar: qm9: ["mae"] tox21: ["auroc"] @@ -13,29 +15,31 @@ predictor: metrics: qm9: &qm9_metrics - name: mae - metric: mae_ipu - target_nan_mask: null + metric: mae + target_nan_mask: ignore multitask_handling: flatten threshold_kwargs: null - name: pearsonr - metric: pearsonr_ipu + metric: pearsonr threshold_kwargs: null - target_nan_mask: null + target_nan_mask: ignore multitask_handling: mean-per-label - name: r2_score - metric: r2_score_ipu - target_nan_mask: null + metric: r2_score + target_nan_mask: ignore multitask_handling: mean-per-label threshold_kwargs: null tox21: - name: auroc - metric: auroc_ipu + metric: auroc task: binary + target_nan_mask: ignore multitask_handling: mean-per-label threshold_kwargs: null - name: avpr - metric: average_precision_ipu + metric: averageprecision task: binary + target_nan_mask: ignore multitask_handling: mean-per-label threshold_kwargs: null - name: f1 > 0.5 @@ -45,6 +49,7 @@ metrics: num_classes: 2 average: micro task: binary + target_nan_mask: ignore threshold_kwargs: &threshold_05 operator: greater threshold: 0.5 @@ -55,6 +60,7 @@ metrics: multitask_handling: mean-per-label average: micro task: binary + target_nan_mask: ignore threshold_kwargs: *threshold_05 zinc: *qm9_metrics diff --git a/expts/hydra-configs/training/toymix.yaml b/expts/hydra-configs/training/toymix.yaml index 4afcbd56a..dc6c174b9 100644 --- a/expts/hydra-configs/training/toymix.yaml +++ b/expts/hydra-configs/training/toymix.yaml @@ -23,4 +23,4 @@ trainer: precision: 16 max_epochs: ${constants.max_epochs} min_epochs: 1 - check_val_every_n_epoch: 20 \ No newline at end of file + check_val_every_n_epoch: 2 \ No newline at end of file diff --git a/graphium/cli/train_finetune_test.py b/graphium/cli/train_finetune_test.py index 5a408919f..f329b0edf 100644 --- a/graphium/cli/train_finetune_test.py +++ b/graphium/cli/train_finetune_test.py @@ -220,7 +220,7 @@ def run_training_finetuning_testing(cfg: DictConfig) -> None: logger.info(predictor.model) logger.info(ModelSummary(predictor, max_depth=4)) - metrics_on_progress_bar = predictor.get_metrics_on_progress_bar() + metrics_on_progress_bar = predictor.get_metrics_on_progress_bar ## Trainer date_time_suffix = datetime.now().strftime("%d.%m.%Y_%H.%M.%S") diff --git a/graphium/config/dummy_finetuning_from_gnn.yaml b/graphium/config/dummy_finetuning_from_gnn.yaml index 2ac81e42b..75848c40f 100644 --- a/graphium/config/dummy_finetuning_from_gnn.yaml +++ b/graphium/config/dummy_finetuning_from_gnn.yaml @@ -55,7 +55,7 @@ finetuning: constants: seed: 42 - max_epochs: 2 + max_epochs: 5 accelerator: float32_matmul_precision: medium @@ -66,7 +66,7 @@ predictor: optim_kwargs: lr: 1.e-3 scheduler_kwargs: null - target_nan_mask: null + target_nan_mask: ignore multitask_handling: flatten # flatten, mean-per-label torch_scheduler_kwargs: @@ -84,22 +84,22 @@ metrics: lipophilicity_astrazeneca: - name: mae metric: mae - target_nan_mask: null + target_nan_mask: ignore multitask_handling: flatten threshold_kwargs: null - name: spearman metric: spearmanr threshold_kwargs: null - target_nan_mask: null + target_nan_mask: ignore multitask_handling: mean-per-label - name: pearson metric: pearsonr threshold_kwargs: null - target_nan_mask: null + target_nan_mask: ignore multitask_handling: mean-per-label - name: r2_score metric: r2_score - target_nan_mask: null + target_nan_mask: ignore multitask_handling: mean-per-label threshold_kwargs: null @@ -107,7 +107,7 @@ trainer: seed: ${constants.seed} trainer: precision: 32 - max_epochs: 4 + max_epochs: 5 min_epochs: 1 check_val_every_n_epoch: 1 accumulate_grad_batches: 1 @@ -122,12 +122,12 @@ datamodule: module_type: "ADMETBenchmarkDataModule" args: + processed_graph_data_path: datacache/processed_graph_data/dummy_finetuning_from_gnn # TDC specific tdc_benchmark_names: [lipophilicity_astrazeneca] tdc_train_val_seed: ${constants.seed} - - batch_size_training: 200 - batch_size_inference: 200 + batch_size_training: 20 + batch_size_inference: 20 num_workers: 0 persistent_workers: False \ No newline at end of file diff --git a/graphium/config/dummy_finetuning_from_task_head.yaml b/graphium/config/dummy_finetuning_from_task_head.yaml index 8ae7dcb9c..373bc6e7e 100644 --- a/graphium/config/dummy_finetuning_from_task_head.yaml +++ b/graphium/config/dummy_finetuning_from_task_head.yaml @@ -61,7 +61,7 @@ finetuning: constants: seed: 42 - max_epochs: 2 + max_epochs: 5 accelerator: float32_matmul_precision: medium @@ -77,7 +77,7 @@ predictor: torch_scheduler_kwargs: module_type: WarmUpLinearLR - max_num_epochs: 4 + max_num_epochs: 5 warmup_epochs: 1 verbose: False @@ -128,12 +128,12 @@ datamodule: module_type: "ADMETBenchmarkDataModule" args: + processed_graph_data_path: datacache/processed_graph_data/dummy_finetuning_task_head # TDC specific tdc_benchmark_names: [lipophilicity_astrazeneca] tdc_train_val_seed: ${constants.seed} - - batch_size_training: 200 - batch_size_inference: 200 + batch_size_training: 20 + batch_size_inference: 20 num_workers: 0 persistent_workers: False diff --git a/graphium/data/datamodule.py b/graphium/data/datamodule.py index e2368d3b3..d2e5b5c74 100644 --- a/graphium/data/datamodule.py +++ b/graphium/data/datamodule.py @@ -1102,6 +1102,7 @@ def prepare_data(self): num_molecules, split_val=self.task_dataset_processing_params[task].split_val, split_test=self.task_dataset_processing_params[task].split_test, + sample_idx=sample_idx, split_seed=self.task_dataset_processing_params[task].seed, splits_path=self.task_dataset_processing_params[task].splits_path, split_names=self.task_dataset_processing_params[task].split_names, diff --git a/graphium/graphium_cpp/pybind11 b/graphium/graphium_cpp/pybind11 new file mode 160000 index 000000000..ccefee4c3 --- /dev/null +++ b/graphium/graphium_cpp/pybind11 @@ -0,0 +1 @@ +Subproject commit ccefee4c3187c2892fcf4590b1bbc850134b84bb diff --git a/graphium/trainer/metrics.py b/graphium/trainer/metrics.py index f48d1f425..0011a9473 100644 --- a/graphium/trainer/metrics.py +++ b/graphium/trainer/metrics.py @@ -12,7 +12,7 @@ """ -from typing import Union, Callable, Optional, Dict, Any +from typing import Union, Callable, Optional, Dict, Any, Literal import sys @@ -41,7 +41,7 @@ class Thresholder: def __init__( self, threshold: float, - operator: Union[str, Callable] = "greater", + operator: Union[Literal["greater", "gt", ">", "lower", "lt", "<"], Callable] = "greater", th_on_preds: bool = True, th_on_target: bool = False, ): @@ -77,10 +77,10 @@ def _get_operator(operator): """Operator can either be a string, or a callable""" if isinstance(operator, str): op_name = operator.lower() - if op_name in ["greater", "gt"]: + if op_name in ["greater", "gt", ">"]: op_str = ">" operator = op.gt - elif op_name in ["lower", "lt"]: + elif op_name in ["lower", "lt", "<"]: op_str = "<" operator = op.lt else: @@ -142,8 +142,8 @@ def __init__( self, metric: Union[str, torchmetrics.Metric, torch.nn.modules.loss._Loss], threshold_kwargs: Optional[Dict[str, Any]] = None, - target_nan_mask: Optional[Union[str, int]] = None, - multitask_handling: Optional[str] = None, + target_nan_mask: Union[Literal[None, "none", "ignore"], int] = None, + multitask_handling: Literal[None, "none", "flatten", "mean-per-label"] = None, squeeze_targets: bool = False, target_to_int: bool = False, **kwargs, @@ -317,7 +317,6 @@ def update(self, preds: Tensor, target: Tensor) -> Tensor: if self.thresholder is not None: preds, target = self.thresholder(preds, target) - target_nans = torch.isnan(target) # for the classifigression task, cast predictions from # (batch_size, n_targets * n_brackets) to (batch_size, n_targets, n_brackets) @@ -356,6 +355,7 @@ def update(self, preds: Tensor, target: Tensor) -> Tensor: elif self.multitask_handling == "mean-per-label": # Loop the columns (last dim) of the tensors, apply the nan filtering, compute the metrics per column, then average the metrics + target_nans = torch.isnan(target) target_list = [target[..., ii][~target_nans[..., ii]] for ii in range(target.shape[-1])] # TODO: make this more flexible to the target shape in the future if classifigression: @@ -411,14 +411,17 @@ def reset(self): def _filter_nans(self, preds: Tensor, target: Tensor): """Handle the NaNs according to the chosen options""" - target_nans = torch.isnan(target) - if self.target_nan_mask is None: - pass - elif isinstance(self.target_nan_mask, (int, float)): + if self.target_nan_mask is None: # No NaN handling + return preds, target + + target_nans = torch.isnan(target) + if ~target_nans.any(): # No NaNs + return preds, target + elif isinstance(self.target_nan_mask, (int, float)): # Replace NaNs target = target.clone() - target[torch.isnan(target)] = self.target_nan_mask - elif self.target_nan_mask == "ignore": + target[target_nans] = self.target_nan_mask + elif self.target_nan_mask == "ignore": # Remove NaNs target = target[~target_nans] preds = preds[~target_nans] else: diff --git a/graphium/trainer/predictor.py b/graphium/trainer/predictor.py index 2720470a0..46046a88d 100644 --- a/graphium/trainer/predictor.py +++ b/graphium/trainer/predictor.py @@ -390,7 +390,7 @@ def _general_step(self, batch: Dict[str, Tensor], step_name: Literal["train", "v preds[task] = preds[task].detach() targets_dict[task] = targets_dict[task].detach() - self.task_epoch_summary[step_name].update(targets_dict, preds) + self.task_epoch_summary[step_name].update(preds, targets_dict) step_dict = {} step_dict["loss"] = loss @@ -464,7 +464,7 @@ def flag_step(self, batch: Dict[str, Tensor], step_name: Literal["train", "val", step_dict[f"loss/{step_name}"] = loss.detach().cpu() step_dict["loss"] = loss step_dict["task_losses"] = task_losses - self.task_epoch_summary[step_name].update(targets, preds) + self.task_epoch_summary[step_name].update(preds, targets) return step_dict def on_train_batch_start(self, batch: Any, batch_idx: int) -> Optional[int]: @@ -501,13 +501,12 @@ def on_train_batch_end(self, outputs, batch: Any, batch_idx: int) -> None: # If logging is skipped for this step, then log the important metrics anyway and return if self.skip_log_train_metrics: - if self.logger is not None: - self.log_dict( - dictionary=metrics_logs, - logger=self.logger, - on_step=True, - prog_bar=True, - ) + self.log_dict( + dictionary=metrics_logs, + logger=True, + on_step=True, + prog_bar=True, + ) return ### The code below is not executed if the logging is skipped for this step ### diff --git a/graphium/trainer/predictor_summaries.py b/graphium/trainer/predictor_summaries.py index ee5bca5d3..bfadb6623 100644 --- a/graphium/trainer/predictor_summaries.py +++ b/graphium/trainer/predictor_summaries.py @@ -299,7 +299,6 @@ def __getitem__(self, task: str) -> SingleTaskSummary: return self.task_summaries[task] def update(self, preds: Dict[str, Tensor], targets: Dict[str, Tensor]) -> None: - r""" update the state for all predictors Parameters: diff --git a/tests/test_finetuning.py b/tests/test_finetuning.py index a315194c5..768f1fbf5 100644 --- a/tests/test_finetuning.py +++ b/tests/test_finetuning.py @@ -16,6 +16,7 @@ import unittest as ut from copy import deepcopy from os.path import abspath, dirname +import shutil import torch from lightning.pytorch.callbacks import Callback @@ -60,9 +61,14 @@ def test_finetuning_from_task_head(self): # Initialize the accelerator cfg, accelerator_type = load_accelerator(cfg) + # If the data_cache directory exists, delete it for the purpose of the test + data_cache = cfg["datamodule"]["args"]["processed_graph_data_path"] + if os.path.exists(data_cache): + shutil.rmtree(data_cache) + # Load and initialize the dataset datamodule = load_datamodule(cfg, accelerator_type) - datamodule.task_specific_args["lipophilicity_astrazeneca"].sample_size = 100 + datamodule.task_specific_args["lipophilicity_astrazeneca"].sample_size = 300 # Initialize the network model_class, model_kwargs = load_architecture( @@ -250,9 +256,14 @@ def test_finetuning_from_gnn(self): # Initialize the accelerator cfg, accelerator_type = load_accelerator(cfg) + # If the data_cache directory exists, delete it for the purpose of the test + data_cache = cfg["datamodule"]["args"]["processed_graph_data_path"] + if os.path.exists(data_cache): + shutil.rmtree(data_cache) + # Load and initialize the dataset datamodule = load_datamodule(cfg, accelerator_type) - datamodule.task_specific_args["lipophilicity_astrazeneca"].sample_size = 100 + datamodule.task_specific_args["lipophilicity_astrazeneca"].sample_size = 300 # Initialize the network model_class, model_kwargs = load_architecture( From f15cd9af56dfec130d47d1ca17217ef1c68a5a35 Mon Sep 17 00:00:00 2001 From: DomInvivo Date: Tue, 16 Jul 2024 15:17:14 -0400 Subject: [PATCH 089/175] fixed missing metrics computation on `on_train_batch_end` --- graphium/trainer/predictor.py | 1 + 1 file changed, 1 insertion(+) diff --git a/graphium/trainer/predictor.py b/graphium/trainer/predictor.py index 46046a88d..0935e8667 100644 --- a/graphium/trainer/predictor.py +++ b/graphium/trainer/predictor.py @@ -498,6 +498,7 @@ def on_train_batch_end(self, outputs, batch: Any, batch_idx: int) -> None: losses[f"{task}/loss/train"] = this_losses metrics_logs.update(losses) + metrics_logs.update(self.task_epoch_summary["train"].compute()) # If logging is skipped for this step, then log the important metrics anyway and return if self.skip_log_train_metrics: From 214231335c1bd0d93c1adbb87d9e7bb29316353f Mon Sep 17 00:00:00 2001 From: DomInvivo Date: Tue, 16 Jul 2024 15:25:14 -0400 Subject: [PATCH 090/175] Added toymix training to the unit-tests. Also useful to run in debug mode. --- tests/test_finetuning.py | 3 +- tests/test_training.py | 108 +++++++++++++++++++++++++++++++++++++-- 2 files changed, 107 insertions(+), 4 deletions(-) diff --git a/tests/test_finetuning.py b/tests/test_finetuning.py index 768f1fbf5..db71ac81b 100644 --- a/tests/test_finetuning.py +++ b/tests/test_finetuning.py @@ -30,7 +30,6 @@ load_metrics, load_predictor, load_trainer, - save_params_to_wandb, ) from graphium.finetuning import GraphFinetuning, modify_cfg_for_finetuning from graphium.trainer import PredictorModule @@ -236,6 +235,7 @@ def on_train_epoch_start(self, trainer, pl_module): # Run the model training trainer.fit(model=predictor, datamodule=datamodule) + trainer.test(model=predictor, datamodule=datamodule) def test_finetuning_from_gnn(self): # Skip test if PyTDC package not installed @@ -417,6 +417,7 @@ def on_train_epoch_start(self, trainer, pl_module): # Run the model training trainer.fit(model=predictor, datamodule=datamodule) + trainer.test(model=predictor, datamodule=datamodule) if __name__ == "__main__": diff --git a/tests/test_training.py b/tests/test_training.py index b737cc478..789bbca3a 100644 --- a/tests/test_training.py +++ b/tests/test_training.py @@ -17,7 +17,23 @@ import sys import subprocess import os -from unittest.mock import patch +import shutil +import unittest as ut + + +import hydra +from hydra.core.global_hydra import GlobalHydra +from omegaconf import OmegaConf +import os + +from graphium.config._loader import ( + load_accelerator, + load_architecture, + load_datamodule, + load_metrics, + load_predictor, + load_trainer, +) class test_CLITraining(): @classmethod @@ -97,8 +113,8 @@ def test_cpu_cli_training(self): @pytest.mark.ipu @pytest.mark.skip def test_ipu_cli_training(self): - with patch("poptorch.ipuHardwareIsAvailable", return_value=True): - with patch("lightning_graphcore.accelerator._IPU_AVAILABLE", new=True): + with ut.patch("poptorch.ipuHardwareIsAvailable", return_value=True): + with ut.patch("lightning_graphcore.accelerator._IPU_AVAILABLE", new=True): import poptorch assert poptorch.ipuHardwareIsAvailable() @@ -106,3 +122,89 @@ def test_ipu_cli_training(self): assert _IPU_AVAILABLE is True self.call_cli_with_overrides("ipu", "16-true") + + + +def initialize_hydra(config_path, job_name="app"): + if GlobalHydra.instance().is_initialized(): + GlobalHydra.instance().clear() + hydra.initialize(config_path=config_path, job_name=job_name) + +def compose_main_config(config_dir): + initialize_hydra(config_dir) + # Compose the main configuration + main_config = hydra.compose(config_name="main") + return main_config + +def compose_task_config(config_dir, task_name): + task_config_dir = os.path.join(config_dir, "tasks") + initialize_hydra(task_config_dir, job_name="compose_task") + # Compose the specific task configuration + task_config = hydra.compose(config_name=task_name) + return task_config + +class test_TrainToymix(ut.TestCase): + def test_train_toymix(self): + # Load the main configuration for toymix + CONFIG_DIR = "../expts/hydra-configs/" + cfg = compose_main_config(CONFIG_DIR) + cfg = OmegaConf.to_container(cfg, resolve=True) + cfg.pop("tasks") + + # Adapt the configuration to reduce the time it takes to run the test, less samples, less epochs + cfg["constants"]["max_epochs"] = 4 + cfg["trainer"]["trainer"]["check_val_every_n_epoch"] = 1 + cfg["trainer"]["trainer"]["max_epochs"] = 4 + + cfg["datamodule"]["args"]["batch_size_training"] = 20 + cfg["datamodule"]["args"]["batch_size_inference"] = 20 + cfg["datamodule"]["args"]["task_specific_args"]["zinc"]["sample_size"] = 300 + cfg["datamodule"]["args"]["task_specific_args"]["qm9"]["sample_size"] = 300 + cfg["datamodule"]["args"]["task_specific_args"]["tox21"]["sample_size"] = 300 + + + # Initialize the accelerator + cfg, accelerator_type = load_accelerator(cfg) + + # If the data_cache directory exists, delete it for the purpose of the test + data_cache = cfg["datamodule"]["args"]["processed_graph_data_path"] + if os.path.exists(data_cache): + shutil.rmtree(data_cache) + + # Load and initialize the dataset + datamodule = load_datamodule(cfg, accelerator_type) + + # Initialize the network + model_class, model_kwargs = load_architecture( + cfg, + in_dims=datamodule.in_dims, + ) + + datamodule.prepare_data() + + metrics = load_metrics(cfg) + + predictor = load_predictor( + cfg, + model_class, + model_kwargs, + metrics, + datamodule.get_task_levels(), + accelerator_type, + datamodule.featurization, + datamodule.task_norms, + ) + + metrics_on_progress_bar = predictor.get_metrics_on_progress_bar + trainer = load_trainer(cfg, accelerator_type, metrics_on_progress_bar=metrics_on_progress_bar) + + predictor.set_max_nodes_edges_per_graph(datamodule, stages=["train", "val"]) + + # Run the model training + trainer.fit(model=predictor, datamodule=datamodule) + trainer.test(model=predictor, datamodule=datamodule) + +if __name__ == "__main__": + config_dir = "../expts/hydra-configs/" # Path to your config directory + + ut.main() From 99e0cd65c8b4f166b596f6461c57ba35f83dec9e Mon Sep 17 00:00:00 2001 From: DomInvivo Date: Tue, 16 Jul 2024 15:55:05 -0400 Subject: [PATCH 091/175] Adding `_global/` to some metrics logging into wandb --- graphium/trainer/predictor.py | 8 ++++---- 1 file changed, 4 insertions(+), 4 deletions(-) diff --git a/graphium/trainer/predictor.py b/graphium/trainer/predictor.py index 0935e8667..7d2fc84ef 100644 --- a/graphium/trainer/predictor.py +++ b/graphium/trainer/predictor.py @@ -515,13 +515,13 @@ def on_train_batch_end(self, outputs, batch: Any, batch_idx: int) -> None: # Get the throughput of the batch num_graphs = self.get_num_graphs(batch["features"]) tput = num_graphs / train_batch_time - metrics_logs["train/batch_time"] = train_batch_time - metrics_logs["train/batch_tput"] = tput + metrics_logs["_global/train/batch_time"] = train_batch_time + metrics_logs["_global/train/batch_tput"] = tput metrics_computed = self.task_epoch_summary["train"].compute() self.task_epoch_summary["train"].reset() metrics_logs.update(metrics_computed) - metrics_logs["train/grad_norm"] = self.model_grad.compute() + metrics_logs["_global/train/grad_norm"] = self.model_grad.compute() # Log the metrics self.log_dict( @@ -570,7 +570,7 @@ def on_train_epoch_end(self) -> None: epoch_time = time.time() - self.epoch_start_time epoch_time = torch.tensor(epoch_time) self.epoch_start_time = None - self.log("train/epoch_time", epoch_time, prog_bar=True, sync_dist=True, on_epoch=True) + self.log("_global/train/epoch_time", epoch_time, prog_bar=True, sync_dist=True, on_epoch=True) def on_validation_epoch_start(self) -> None: self.mean_val_time_tracker.reset() From 045ea5311270d97b8f996ced88d30994a3c65941 Mon Sep 17 00:00:00 2001 From: DomInvivo Date: Tue, 16 Jul 2024 17:05:56 -0400 Subject: [PATCH 092/175] Added better handling of metrics failure with `logger.warn` --- graphium/trainer/predictor_summaries.py | 14 ++++++++------ 1 file changed, 8 insertions(+), 6 deletions(-) diff --git a/graphium/trainer/predictor_summaries.py b/graphium/trainer/predictor_summaries.py index bfadb6623..af23ae479 100644 --- a/graphium/trainer/predictor_summaries.py +++ b/graphium/trainer/predictor_summaries.py @@ -14,7 +14,7 @@ r"""Classes to store information about resulting evaluation metrics when using a Predictor Module.""" -from typing import Any, Callable, Dict, List, Optional, Union, Literal, Iterable +from typing import Any, Callable, Dict, List, Optional, Union, Literal, Iterable, Set from loguru import logger from copy import deepcopy import inspect @@ -26,8 +26,6 @@ from torchmetrics.aggregation import BaseAggregator -from graphium.utils.tensor import nan_mean, nan_std, nan_median, tensor_fp16_to_fp32 - class SummaryInterface(object): r""" An interface to define the functions implemented by summary classes that implement SummaryInterface. @@ -114,6 +112,7 @@ def __init__( self.metrics_on_training_set.update(["std_preds", "std_target"]) self._cached_metrics: Dict[str, Tensor] = {} + self._logged_warnings: Set[str] = set() # Set to track which metrics have been logged @property def get_cached_metrics(self) -> Dict[str, Tensor]: @@ -168,7 +167,7 @@ def update(self, preds: Tensor, targets: Tensor) -> None: if ("preds" == varnames[0]) and ("target" == varnames[1]): # The typical case of `torchmetrics` metric_obj.update(preds, targets) - if ("preds" == varnames[1]) and ("target" == varnames[0]): + elif ("preds" == varnames[1]) and ("target" == varnames[0]): # Unusual case where the order of the arguments is reversed metric_obj.update(targets, preds) elif ("value" == varnames[0]) and ("preds" in metric_key): @@ -180,8 +179,11 @@ def update(self, preds: Tensor, targets: Tensor) -> None: else: raise ValueError(f"Metric {metric_key} update method signature `{varnames}` is not recognized.") - except: - pass + except Exception as err: + err_msg = f"Error for metric {metric_key} on task {self.task_name} and step {self.step_name}. Exception: {err}" + if err_msg not in self._logged_warnings: + logger.warning(err_msg) + self._logged_warnings.add(err_msg) def _compute(self, metrics_to_use: Optional[Union[List[str], Dict[str, Any]]] = None) -> Dict[str, Tensor]: From d8ba606bf55f0c8cc8963611a9e9ad21138a4ffc Mon Sep 17 00:00:00 2001 From: DomInvivo Date: Tue, 16 Jul 2024 17:32:35 -0400 Subject: [PATCH 093/175] Fixed metric issues on gpu by casting to the right device prior to `.update` --- graphium/trainer/metrics.py | 12 +++++++++++- graphium/trainer/predictor_summaries.py | 5 +++-- 2 files changed, 14 insertions(+), 3 deletions(-) diff --git a/graphium/trainer/metrics.py b/graphium/trainer/metrics.py index 0011a9473..7ec29aa69 100644 --- a/graphium/trainer/metrics.py +++ b/graphium/trainer/metrics.py @@ -398,7 +398,7 @@ def update_compute(self, preds: Tensor, target: Tensor) -> Tensor: self.update(preds, target) return self.compute() - def reset(self): + def reset(self) -> None: r""" Reset the metric with the method `self.metric.reset` """ @@ -408,6 +408,16 @@ def reset(self): else: self.metric.reset() + def to(self, device: Union[str, torch.device]) -> None: + r""" + Move the metric to the device with the method `self.metric.to` + """ + if isinstance(self.metric, list): + for metric in self.metric: + metric.to(device) + else: + self.metric.to(device) + def _filter_nans(self, preds: Tensor, target: Tensor): """Handle the NaNs according to the chosen options""" diff --git a/graphium/trainer/predictor_summaries.py b/graphium/trainer/predictor_summaries.py index af23ae479..10a437698 100644 --- a/graphium/trainer/predictor_summaries.py +++ b/graphium/trainer/predictor_summaries.py @@ -161,6 +161,7 @@ def update(self, preds: Tensor, targets: Tensor) -> None: predictions: the predictions tensor """ for metric_key, metric_obj in self.metrics_to_use.items(): + metric_obj.to(preds.device) # Not sure if good for DDP, but otherwise it crashes try: # Check the `metric_obj.update` signature to know if it takes `preds` and `targets` or only one of them varnames = [val.name for val in inspect.signature(metric_obj.update).parameters.values()] @@ -381,9 +382,9 @@ def __init__(self, nan_strategy: Union[Literal["error", "warn", "ignore"], float def update(self, value: Union[float, Tensor], weight: Union[float, Tensor] = 1.0) -> None: if not isinstance(value, Tensor): - value = torch.as_tensor(value, dtype=torch.float32) + value = torch.as_tensor(value, dtype=torch.float32, device=self.device) if not isinstance(weight, Tensor): - weight = torch.as_tensor(weight, dtype=torch.float32) + weight = torch.as_tensor(weight, dtype=torch.float32, device=self.device) weight = torch.broadcast_to(weight, value.shape).clone() # Check whether `_cast_and_nan_check_input` takes in `weight` From 1bf27349a0a097eabd3a70fbf6aff93255278270 Mon Sep 17 00:00:00 2001 From: DomInvivo Date: Wed, 17 Jul 2024 15:54:27 -0400 Subject: [PATCH 094/175] Added losses to the metrics, such that they are computed on val and test too --- graphium/trainer/metrics.py | 8 +++-- graphium/trainer/predictor.py | 47 +++++++++++++++++++-------- graphium/trainer/predictor_options.py | 7 ++-- 3 files changed, 45 insertions(+), 17 deletions(-) diff --git a/graphium/trainer/metrics.py b/graphium/trainer/metrics.py index 7ec29aa69..ab0d8c757 100644 --- a/graphium/trainer/metrics.py +++ b/graphium/trainer/metrics.py @@ -12,7 +12,7 @@ """ -from typing import Union, Callable, Optional, Dict, Any, Literal +from typing import Union, Callable, Optional, Dict, Any, Literal, List import sys @@ -504,7 +504,7 @@ class MetricToTorchMetrics(): def __init__(self, metric): self.metric = metric - self.scores = [] + self.scores: List[Tensor] = [] def update(self, preds: Tensor, target: Tensor): self.scores.append(self.metric(preds, target)) @@ -515,6 +515,10 @@ def compute(self): elif len(self.scores) == 1: return self.scores[0] return nan_mean(torch.stack(self.scores)) + + def to(self, device: Union[str, torch.device]): + for ii in range(len(self.scores)): + self.scores[ii] = self.scores[ii].to(device) def reset(self): self.scores = [] diff --git a/graphium/trainer/predictor.py b/graphium/trainer/predictor.py index 7d2fc84ef..6262d039b 100644 --- a/graphium/trainer/predictor.py +++ b/graphium/trainer/predictor.py @@ -23,6 +23,7 @@ from mup.optim import MuAdam from torch import Tensor, nn from torch_geometric.data import Batch, Data +from torchmetrics import Metric from graphium.config.config_convert import recursive_config_reformating from graphium.data.datamodule import BaseDataModule @@ -53,7 +54,7 @@ def __init__( scheduler_kwargs: Optional[Dict[str, Any]] = None, target_nan_mask: Optional[Union[str, int]] = None, multitask_handling: Optional[str] = None, - metrics: Dict[str, Callable] = None, + metrics: Dict[str, Dict[str, Union[Metric, "MetricWrapper"]]] = None, metrics_on_progress_bar: Dict[str, List[str]] = [], metrics_on_training_set: Optional[Dict[str, List[str]]] = None, flag_kwargs: Dict[str, Any] = None, @@ -138,11 +139,12 @@ def __init__( # Task-specific evalutation attributes self.loss_fun = {} + loss_names = {} self.metrics = {} self.metrics_on_progress_bar = {} self.metrics_on_training_set = {} for task in self.tasks: - self.loss_fun[task] = EvalOptions.parse_loss_fun(loss_fun[task]) + loss_names[task], self.loss_fun[task] = EvalOptions.parse_loss_fun(loss_fun[task]) self.metrics[task] = ( self._eval_options_dict[task].metrics if self._eval_options_dict[task].metrics is not None @@ -163,22 +165,31 @@ def __init__( # Set the parameters for optimizer options self.optim_options.set_kwargs() + # Add the loss to the metrics + metrics_with_loss = deepcopy(self.metrics) + for task in self.tasks: + metrics_with_loss[task][f"loss_{loss_names[task]}"] = MetricWrapper( + metric=MetricToTorchMetrics(self.loss_fun[task]), + target_nan_mask=self.target_nan_mask, + multitask_handling=self.multitask_handling, + ) + # Initialize the epoch summary self.task_epoch_summary = { "train": MultiTaskSummary( - task_metrics=self.metrics, + task_metrics=metrics_with_loss, step_name="train", task_metrics_on_progress_bar=None, task_metrics_on_training_set=self.metrics_on_training_set, ), "val": MultiTaskSummary( - task_metrics=self.metrics, + task_metrics=metrics_with_loss, step_name="val", task_metrics_on_progress_bar=self.metrics_on_progress_bar, task_metrics_on_training_set=None, ), "test": MultiTaskSummary( - task_metrics=self.metrics, + task_metrics=metrics_with_loss, step_name="test", task_metrics_on_progress_bar=None, task_metrics_on_training_set=None, @@ -241,6 +252,22 @@ def _get_task_key(self, task_level: str, task: str): if not task.startswith(task_prefix): task = task_prefix + task return task + + def _get_average_loss_from_outputs(self, outputs: Dict[Literal["loss", "task_losses"], Tensor], step_name: Literal["train", "val", "test"]) -> Dict[str, Tensor]: + r""" + Averages the loss over the different tasks + """ + global_loss = torch.as_tensor(outputs["loss"]).detach() + if global_loss.numel() > 1: + global_loss = global_loss[global_loss != 0].mean() + average_losses = {f"_global/loss/{step_name}": global_loss} + for task in self.tasks: + this_losses = torch.as_tensor(outputs["task_losses"][task]).detach() + if this_losses.numel() > 1: + this_losses = this_losses[this_losses != 0].mean() + average_losses[f"{task}/loss/{step_name}"] = this_losses + return average_losses + def configure_optimizers(self, impl=None): if impl is None: @@ -489,14 +516,8 @@ def on_train_batch_end(self, outputs, batch: Any, batch_idx: int) -> None: # report the training loss for each individual tasks # get the mean loss value for individual tasks as they are a tensor of size --> gradient accumulation * replication * device_iter # filter zeros out for the individual losses - losses = {"_global/loss/train": outputs["loss"]} - for task in self.tasks: - this_losses = outputs["task_losses"][task] - if isinstance(this_losses, torch.Tensor): - if this_losses.numel() > 1: - this_losses = this_losses[this_losses != 0].mean() - - losses[f"{task}/loss/train"] = this_losses + losses = self._get_average_loss_from_outputs(outputs, step_name="train") + metrics_logs.update(losses) metrics_logs.update(self.task_epoch_summary["train"].compute()) diff --git a/graphium/trainer/predictor_options.py b/graphium/trainer/predictor_options.py index 358329e70..5303976a9 100644 --- a/graphium/trainer/predictor_options.py +++ b/graphium/trainer/predictor_options.py @@ -201,6 +201,7 @@ def parse_loss_fun(loss_fun: Union[str, Dict, Callable]) -> Callable: f"`loss_fun` expected to be one of the strings in {LOSS_DICT.keys()}. " f"Provided: {loss_fun}." ) + loss_name = loss_fun loss_fun = LOSS_DICT[loss_fun]() elif isinstance(loss_fun, dict): if loss_fun.get("name") is None: @@ -213,10 +214,12 @@ def parse_loss_fun(loss_fun: Union[str, Dict, Callable]) -> Callable: loss_fun = deepcopy(loss_fun) loss_name = loss_fun.pop("name") loss_fun = LOSS_DICT[loss_name](**loss_fun) - elif not callable(loss_fun): + elif callable(loss_fun): + loss_name = str(loss_fun) + else: raise ValueError(f"`loss_fun` must be `str`, `dict` or `callable`. Provided: {type(loss_fun)}") - return loss_fun + return loss_name, loss_fun @dataclass From 68b93610e6ec64d79f0ed6f33a3a14010bb2e4a0 Mon Sep 17 00:00:00 2001 From: DomInvivo Date: Wed, 17 Jul 2024 15:55:00 -0400 Subject: [PATCH 095/175] Restricting the numpy version due to issues with wandb --- env.yml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/env.yml b/env.yml index aa8ff8eb5..411da228a 100644 --- a/env.yml +++ b/env.yml @@ -12,7 +12,7 @@ dependencies: - platformdirs # scientific - - numpy + - numpy < 2.0 # Issue with wandb - scipy >=1.4 - pandas >=1.0 - scikit-learn From 911dfe9544361abe527d3bc184c6175aa284f0c0 Mon Sep 17 00:00:00 2001 From: DomInvivo Date: Wed, 17 Jul 2024 15:57:15 -0400 Subject: [PATCH 096/175] detaching preds --- graphium/trainer/metrics.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/graphium/trainer/metrics.py b/graphium/trainer/metrics.py index ab0d8c757..d4d8ac4d6 100644 --- a/graphium/trainer/metrics.py +++ b/graphium/trainer/metrics.py @@ -507,7 +507,7 @@ def __init__(self, metric): self.scores: List[Tensor] = [] def update(self, preds: Tensor, target: Tensor): - self.scores.append(self.metric(preds, target)) + self.scores.append(self.metric(preds.detach(), target)) def compute(self): if len(self.scores) == 0: From d34ac601a46f1ed51d260c7a2f5b7f99b9ebad3b Mon Sep 17 00:00:00 2001 From: DomInvivo Date: Wed, 17 Jul 2024 16:09:23 -0400 Subject: [PATCH 097/175] Removed cuda version restriction --- env.yml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/env.yml b/env.yml index 411da228a..318d68575 100644 --- a/env.yml +++ b/env.yml @@ -28,7 +28,7 @@ dependencies: - gcsfs >=2021.6 # ML packages - - cuda-version == 11.2 # works also with CPU-only system. + - cuda-version # works also with CPU-only system. - pytorch >=1.12 - lightning >=2.0 - torchmetrics From b1f2e8608ab69584b3941735d9a5ade1a8b9c8e1 Mon Sep 17 00:00:00 2001 From: DomInvivo Date: Wed, 17 Jul 2024 16:47:47 -0400 Subject: [PATCH 098/175] Removed unnecessary detach, that broke the loss --- graphium/trainer/metrics.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/graphium/trainer/metrics.py b/graphium/trainer/metrics.py index d4d8ac4d6..ab0d8c757 100644 --- a/graphium/trainer/metrics.py +++ b/graphium/trainer/metrics.py @@ -507,7 +507,7 @@ def __init__(self, metric): self.scores: List[Tensor] = [] def update(self, preds: Tensor, target: Tensor): - self.scores.append(self.metric(preds.detach(), target)) + self.scores.append(self.metric(preds, target)) def compute(self): if len(self.scores) == 0: From 62b385a72b17ee0fc3c5c18e49900e6b69331640 Mon Sep 17 00:00:00 2001 From: Andrew Quirke Date: Tue, 30 Jul 2024 09:22:46 -0600 Subject: [PATCH 099/175] Updating dep versions for bh2 install --- env.yml | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/env.yml b/env.yml index 25a6af0cf..62bf95a3c 100644 --- a/env.yml +++ b/env.yml @@ -12,7 +12,7 @@ dependencies: - platformdirs # scientific - - numpy + - numpy == 1.26.4 - scipy >=1.4 - pandas >=1.0 - scikit-learn @@ -28,7 +28,7 @@ dependencies: - gcsfs >=2021.6 # ML packages - - cuda-version == 11.2 # works also with CPU-only system. + - cuda-version # works also with CPU-only system. - pytorch >=1.12 - lightning >=2.0 - torchmetrics >=0.7.0,<0.11 @@ -41,7 +41,7 @@ dependencies: - pytorch_scatter >=2.0 # chemistry - - rdkit + - rdkit == 2024.03.4 - datamol >=0.10 - boost # needed by rdkit From 7f9112a4c2daaf4efc61bd095551cb158babd231 Mon Sep 17 00:00:00 2001 From: wenkelf Date: Thu, 8 Aug 2024 13:55:50 -0600 Subject: [PATCH 100/175] Fix lightning backend issue; add predict_step for inference --- graphium/trainer/predictor.py | 18 ++++++++++++++---- graphium/trainer/predictor_summaries.py | 2 +- 2 files changed, 15 insertions(+), 5 deletions(-) diff --git a/graphium/trainer/predictor.py b/graphium/trainer/predictor.py index 8cfb1ad28..b0c144c2b 100644 --- a/graphium/trainer/predictor.py +++ b/graphium/trainer/predictor.py @@ -562,6 +562,13 @@ def get_gradient_norm(self): total_norm = total_norm**0.5 return total_norm + + def predict_step(self, batch: Dict[str, Tensor]) -> Dict[str, Any]: + preds = self.forward(batch) # The dictionary of predictions + targets_dict = batch.get("labels") + + return preds, targets_dict + def validation_step(self, batch: Dict[str, Tensor], to_cpu: bool = True) -> Dict[str, Any]: return self._general_step(batch=batch, step_name="val", to_cpu=to_cpu) @@ -601,7 +608,10 @@ def _general_epoch_end(self, outputs: Dict[str, Any], step_name: str, device: st n_epochs=self.current_epoch, ) metrics_logs = self.task_epoch_summary.get_metrics_logs() - self.task_epoch_summary.set_results(task_metrics=metrics_logs) + + for task in metrics_logs.keys(): + for key, val in metrics_logs[task].items(): + metrics_logs[task][key] = val.to(self.device) return metrics_logs # Consider returning concatenated dict for logging @@ -614,7 +624,7 @@ def on_train_epoch_end(self) -> None: else: epoch_time = time.time() - self.epoch_start_time self.epoch_start_time = None - self.log("epoch_time", torch.tensor(epoch_time), sync_dist=True) + self.log("epoch_time", torch.tensor(epoch_time).to(self.device), sync_dist=True) def on_validation_epoch_start(self) -> None: self.mean_val_time_tracker.reset() @@ -641,8 +651,8 @@ def on_validation_epoch_end(self) -> None: ) self.validation_step_outputs.clear() concatenated_metrics_logs = self.task_epoch_summary.concatenate_metrics_logs(metrics_logs) - concatenated_metrics_logs["val/mean_time"] = torch.tensor(self.mean_val_time_tracker.mean_value) - concatenated_metrics_logs["val/mean_tput"] = self.mean_val_tput_tracker.mean_value + concatenated_metrics_logs["val/mean_time"] = torch.tensor(self.mean_val_time_tracker.mean_value).to(self.device) + concatenated_metrics_logs["val/mean_tput"] = self.mean_val_tput_tracker.mean_value.to(self.device) self.log_dict(concatenated_metrics_logs, sync_dist=True) # Save yaml file with the per-task metrics summaries diff --git a/graphium/trainer/predictor_summaries.py b/graphium/trainer/predictor_summaries.py index 4cec79377..4f8c3b032 100644 --- a/graphium/trainer/predictor_summaries.py +++ b/graphium/trainer/predictor_summaries.py @@ -525,7 +525,7 @@ def concatenate_metrics_logs( concatenated_metrics_logs = {} for task in list(self.tasks) + ["_global"]: concatenated_metrics_logs.update(metrics_logs[task]) - concatenated_metrics_logs[f"loss/{self.step_name}"] = self.weighted_loss.detach().cpu() + concatenated_metrics_logs[f"loss/{self.step_name}"] = self.weighted_loss.detach().to(self.device) return concatenated_metrics_logs def metric_log_name( From 47b7d1cdb573be16f50062f7674a05b8e85510a3 Mon Sep 17 00:00:00 2001 From: wenkelf Date: Thu, 8 Aug 2024 21:38:29 -0600 Subject: [PATCH 101/175] Fixing device issue in metrics calculation --- graphium/trainer/predictor.py | 4 ++-- graphium/trainer/predictor_summaries.py | 3 ++- 2 files changed, 4 insertions(+), 3 deletions(-) diff --git a/graphium/trainer/predictor.py b/graphium/trainer/predictor.py index b0c144c2b..95d2c3166 100644 --- a/graphium/trainer/predictor.py +++ b/graphium/trainer/predictor.py @@ -650,7 +650,7 @@ def on_validation_epoch_end(self) -> None: outputs=self.validation_step_outputs, step_name="val", device="cpu" ) self.validation_step_outputs.clear() - concatenated_metrics_logs = self.task_epoch_summary.concatenate_metrics_logs(metrics_logs) + concatenated_metrics_logs = self.task_epoch_summary.concatenate_metrics_logs(metrics_logs, device=self.device) concatenated_metrics_logs["val/mean_time"] = torch.tensor(self.mean_val_time_tracker.mean_value).to(self.device) concatenated_metrics_logs["val/mean_tput"] = self.mean_val_tput_tracker.mean_value.to(self.device) self.log_dict(concatenated_metrics_logs, sync_dist=True) @@ -665,7 +665,7 @@ def on_test_batch_end(self, outputs: Any, batch: Any, batch_idx: int, dataloader def on_test_epoch_end(self) -> None: metrics_logs = self._general_epoch_end(outputs=self.test_step_outputs, step_name="test", device="cpu") self.test_step_outputs.clear() - concatenated_metrics_logs = self.task_epoch_summary.concatenate_metrics_logs(metrics_logs) + concatenated_metrics_logs = self.task_epoch_summary.concatenate_metrics_logs(metrics_logs, device=self.device) self.log_dict(concatenated_metrics_logs, sync_dist=True) diff --git a/graphium/trainer/predictor_summaries.py b/graphium/trainer/predictor_summaries.py index 4f8c3b032..7a4d9292f 100644 --- a/graphium/trainer/predictor_summaries.py +++ b/graphium/trainer/predictor_summaries.py @@ -514,6 +514,7 @@ def get_metrics_logs( def concatenate_metrics_logs( self, metrics_logs: Dict[str, Dict[str, Tensor]], + device: str, ) -> Dict[str, Tensor]: r""" concatenate the metrics logs @@ -525,7 +526,7 @@ def concatenate_metrics_logs( concatenated_metrics_logs = {} for task in list(self.tasks) + ["_global"]: concatenated_metrics_logs.update(metrics_logs[task]) - concatenated_metrics_logs[f"loss/{self.step_name}"] = self.weighted_loss.detach().to(self.device) + concatenated_metrics_logs[f"loss/{self.step_name}"] = self.weighted_loss.detach().to(device) return concatenated_metrics_logs def metric_log_name( From 9dbd0212c481588d7edcb656f4c1273e0267d291 Mon Sep 17 00:00:00 2001 From: DomInvivo Date: Thu, 15 Aug 2024 16:27:13 -0400 Subject: [PATCH 102/175] Minor gitignore --- .gitignore | 1 + 1 file changed, 1 insertion(+) diff --git a/.gitignore b/.gitignore index 289f10a4d..cfd47eabf 100644 --- a/.gitignore +++ b/.gitignore @@ -38,6 +38,7 @@ graphium/data/cache/ graphium/data/b3lyp/ graphium/data/PCQM4Mv2/ graphium/data/PCQM4M/ +graphium/data/largemix/ graphium/data/neurips2023/small-dataset/ graphium/data/neurips2023/large-dataset/ graphium/data/neurips2023/dummy-dataset/ From 5a77cbe6c4f178dab5824a352b93eb8f619e31b5 Mon Sep 17 00:00:00 2001 From: DomInvivo Date: Fri, 16 Aug 2024 15:56:28 -0400 Subject: [PATCH 103/175] Fixed the error due to time metrics on CPU `No backend type associated with device type cpu` --- graphium/trainer/predictor.py | 10 +++++++--- 1 file changed, 7 insertions(+), 3 deletions(-) diff --git a/graphium/trainer/predictor.py b/graphium/trainer/predictor.py index 6262d039b..eaf8865bc 100644 --- a/graphium/trainer/predictor.py +++ b/graphium/trainer/predictor.py @@ -591,7 +591,7 @@ def on_train_epoch_end(self) -> None: epoch_time = time.time() - self.epoch_start_time epoch_time = torch.tensor(epoch_time) self.epoch_start_time = None - self.log("_global/train/epoch_time", epoch_time, prog_bar=True, sync_dist=True, on_epoch=True) + self.log("_global/train/epoch_time", epoch_time, prog_bar=True, sync_dist=False, on_epoch=True) def on_validation_epoch_start(self) -> None: self.mean_val_time_tracker.reset() @@ -614,10 +614,14 @@ def on_validation_batch_end( def on_validation_epoch_end(self) -> None: metrics_logs = self._general_epoch_end(step_name="val") - metrics_logs["val/mean_time"] = torch.tensor(self.mean_val_time_tracker.mean_value) - metrics_logs["val/mean_tput"] = self.mean_val_tput_tracker.mean_value self.log_dict(metrics_logs, logger=True, prog_bar=True, sync_dist=True, on_epoch=True) + # Time metrics are tracked always on CPU, so we log them separatly + time_metrics = {} + time_metrics["_global/val/mean_time"] = torch.tensor(self.mean_val_time_tracker.mean_value) + time_metrics["_global/val/mean_tput"] = self.mean_val_tput_tracker.mean_value + self.log_dict(time_metrics, logger=True, prog_bar=False, sync_dist=False, on_epoch=True) + def on_test_epoch_start(self) -> None: self.task_epoch_summary["test"].reset() return super().on_test_epoch_start() From 7fba29d46b7fb3cd15b81feac93165aed9b4db97 Mon Sep 17 00:00:00 2001 From: DomInvivo Date: Fri, 16 Aug 2024 16:12:06 -0400 Subject: [PATCH 104/175] Added val epoch time --- graphium/trainer/predictor.py | 9 ++++++--- 1 file changed, 6 insertions(+), 3 deletions(-) diff --git a/graphium/trainer/predictor.py b/graphium/trainer/predictor.py index eaf8865bc..b87eba7bc 100644 --- a/graphium/trainer/predictor.py +++ b/graphium/trainer/predictor.py @@ -588,12 +588,12 @@ def on_train_epoch_start(self) -> None: def on_train_epoch_end(self) -> None: if self.epoch_start_time is not None: - epoch_time = time.time() - self.epoch_start_time - epoch_time = torch.tensor(epoch_time) + epoch_time = torch.tensor(time.time() - self.epoch_start_time) self.epoch_start_time = None self.log("_global/train/epoch_time", epoch_time, prog_bar=True, sync_dist=False, on_epoch=True) def on_validation_epoch_start(self) -> None: + self.epoch_start_time = time.time() self.mean_val_time_tracker.reset() self.mean_val_tput_tracker.reset() self.task_epoch_summary["val"].reset() @@ -618,8 +618,11 @@ def on_validation_epoch_end(self) -> None: # Time metrics are tracked always on CPU, so we log them separatly time_metrics = {} - time_metrics["_global/val/mean_time"] = torch.tensor(self.mean_val_time_tracker.mean_value) + time_metrics["_global/val/mean_batch_time"] = torch.tensor(self.mean_val_time_tracker.mean_value) time_metrics["_global/val/mean_tput"] = self.mean_val_tput_tracker.mean_value + if self.epoch_start_time is not None: + time_metrics["_global/val/epoch_time"] = torch.tensor(time.time() - self.epoch_start_time) + self.epoch_start_time = None self.log_dict(time_metrics, logger=True, prog_bar=False, sync_dist=False, on_epoch=True) def on_test_epoch_start(self) -> None: From b59dc361fb67c963fd10da3f54dd754e576929d6 Mon Sep 17 00:00:00 2001 From: DomInvivo Date: Fri, 16 Aug 2024 22:58:51 -0400 Subject: [PATCH 105/175] Added logic to avoid crashing when resetting unused metrics --- graphium/trainer/predictor_summaries.py | 19 +++++++++++++++---- 1 file changed, 15 insertions(+), 4 deletions(-) diff --git a/graphium/trainer/predictor_summaries.py b/graphium/trainer/predictor_summaries.py index 10a437698..19194341c 100644 --- a/graphium/trainer/predictor_summaries.py +++ b/graphium/trainer/predictor_summaries.py @@ -89,7 +89,8 @@ def __init__( # Current predictor state # self.predictor_outputs = None self.task_name = task_name - self.logged_metrics_exceptions = [] # Track which metric exceptions have been logged + self.logged_metrics_exceptions: List[str] = [] # Track which metric exceptions have been logged + self.last_metrics_exceptions: List[str] = [] # Track which metric exceptions have been logged # Add default metrics if ("mean_preds" not in self.metrics) and compute_mean: @@ -195,6 +196,8 @@ def _compute(self, metrics_to_use: Optional[Union[List[str], Dict[str, Any]]] = metrics_to_use = list(metrics_to_use.keys()) else: raise ValueError(f"metrics_to_use must be a list or a dictionary. Got {type(metrics_to_use)}") + + self.last_metrics_exceptions = [] # Reset the exceptions for this step # Compute the metrics computed_metrics = {} @@ -205,11 +208,12 @@ def _compute(self, metrics_to_use: Optional[Union[List[str], Dict[str, Any]]] = computed_metrics[f"{metric_name}"] = metric_obj.compute() except Exception as e: # If the metric computation fails, return NaN and log a warning only once - computed_metrics[f"{metric_name}"] = torch.as_tensor(float("nan")) + computed_metrics[f"{metric_name}"] = torch.tensor(torch.nan, device=metric_obj.metric.device) # Warn only if it's the first warning for that metric if metric_name not in self.logged_metrics_exceptions: self.logged_metrics_exceptions.append(metric_name) logger.warning(f"Error for metric {metric_name}. NaN is returned. Exception: {e}") + self.last_metrics_exceptions.append(metric_name) return computed_metrics @@ -228,8 +232,15 @@ def reset(self) -> None: r""" reset the state of the metrics """ - for metric in self.metrics.values(): - metric.reset() + for metric_key, metric in self.metrics.items(): + try: + metric.reset() + except AttributeError as e: + metric_name = self.metric_log_name(metric_key) + # Skip error if the message is `AttributeError: 'Tensor' object has no attribute 'clear'. Did you mean: 'char'?` + # This error happens when there's nothing to reset, usually because the metric failed. + if (metric_name not in self.last_metrics_exceptions) or ("'Tensor' object has no attribute 'clear'" not in str(e)): + raise e def get_results_on_progress_bar( self, From da3e3a179e84bb187619f1e0a7b4a790ea455d94 Mon Sep 17 00:00:00 2001 From: DomInvivo Date: Mon, 19 Aug 2024 09:30:42 -0400 Subject: [PATCH 106/175] Added `MetricWrapper.device` --- graphium/cli/train_finetune_test.py | 2 +- graphium/trainer/metrics.py | 9 +++++++++ graphium/trainer/predictor_summaries.py | 2 +- 3 files changed, 11 insertions(+), 2 deletions(-) diff --git a/graphium/cli/train_finetune_test.py b/graphium/cli/train_finetune_test.py index f329b0edf..ae25733bc 100644 --- a/graphium/cli/train_finetune_test.py +++ b/graphium/cli/train_finetune_test.py @@ -44,7 +44,7 @@ TESTING_ONLY_CONFIG_KEY = "testing_only" -@hydra.main(version_base=None, config_path="../../expts/hydra-configs", config_name="main") +@hydra.main(version_base=None, config_path="/home/domix/Gitx/graphium/graphium/config/", config_name="loc-config_largemix") def cli(cfg: DictConfig) -> None: """ The main CLI endpoint for training, fine-tuning and evaluating Graphium models. diff --git a/graphium/trainer/metrics.py b/graphium/trainer/metrics.py index ab0d8c757..ae075ae34 100644 --- a/graphium/trainer/metrics.py +++ b/graphium/trainer/metrics.py @@ -418,6 +418,15 @@ def to(self, device: Union[str, torch.device]) -> None: else: self.metric.to(device) + @property + def device(self) -> torch.device: + r""" + Return the device of the metric with the method `self.metric.device` or `self.metric[0].device` + """ + if isinstance(self.metric, list): + return self.metric[0].device + return self.metric.device + def _filter_nans(self, preds: Tensor, target: Tensor): """Handle the NaNs according to the chosen options""" diff --git a/graphium/trainer/predictor_summaries.py b/graphium/trainer/predictor_summaries.py index 19194341c..013cdbbe2 100644 --- a/graphium/trainer/predictor_summaries.py +++ b/graphium/trainer/predictor_summaries.py @@ -208,7 +208,7 @@ def _compute(self, metrics_to_use: Optional[Union[List[str], Dict[str, Any]]] = computed_metrics[f"{metric_name}"] = metric_obj.compute() except Exception as e: # If the metric computation fails, return NaN and log a warning only once - computed_metrics[f"{metric_name}"] = torch.tensor(torch.nan, device=metric_obj.metric.device) + computed_metrics[f"{metric_name}"] = torch.tensor(torch.nan, device=metric_obj.device) # Warn only if it's the first warning for that metric if metric_name not in self.logged_metrics_exceptions: self.logged_metrics_exceptions.append(metric_name) From 8bf0d41244cb0cbda6dbff38ca2da4f7b38b0c90 Mon Sep 17 00:00:00 2001 From: sft-managed Date: Mon, 19 Aug 2024 11:29:43 -0600 Subject: [PATCH 107/175] Disable caching model checkpoint through WandbLogger --- graphium/config/_loader.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/graphium/config/_loader.py b/graphium/config/_loader.py index 826c81c79..1d82f7f90 100644 --- a/graphium/config/_loader.py +++ b/graphium/config/_loader.py @@ -450,7 +450,7 @@ def load_trainer( name = wandb_cfg.pop("name", "main") if len(date_time_suffix) > 0: name += f"_{date_time_suffix}" - trainer_kwargs["logger"] = WandbLogger(name=name, log_model=True, **wandb_cfg) + trainer_kwargs["logger"] = WandbLogger(name=name, **wandb_cfg) progress_bar_callback = ProgressBarMetrics(metrics_on_progress_bar = metrics_on_progress_bar) callbacks.append(progress_bar_callback) From 1ec4969f7865a98ee6abeef8bce0e28b0a423438 Mon Sep 17 00:00:00 2001 From: sft-managed Date: Mon, 19 Aug 2024 12:19:41 -0600 Subject: [PATCH 108/175] Disabled caching model checkpoint through WandbLogger --- graphium/config/_loader.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/graphium/config/_loader.py b/graphium/config/_loader.py index b27aa9b4e..2b9055a83 100644 --- a/graphium/config/_loader.py +++ b/graphium/config/_loader.py @@ -447,7 +447,7 @@ def load_trainer( name = wandb_cfg.pop("name", "main") if len(date_time_suffix) > 0: name += f"_{date_time_suffix}" - trainer_kwargs["logger"] = WandbLogger(name=name, log_model=True, **wandb_cfg) + trainer_kwargs["logger"] = WandbLogger(name=name, **wandb_cfg) trainer_kwargs["callbacks"] = callbacks trainer = Trainer( From 9ba5a16da047461162b4c770e429059193bcb0cc Mon Sep 17 00:00:00 2001 From: wenkelf Date: Mon, 19 Aug 2024 17:39:44 -0600 Subject: [PATCH 109/175] Drafting unit test for node ordering --- graphium/data/dataset.py | 1 + .../data/dummy_node_label_order_data.parquet | Bin 0 -> 38550 bytes tests/test_node_label_order.py | 103 ++++++++++++++++++ 3 files changed, 104 insertions(+) create mode 100644 tests/data/dummy_node_label_order_data.parquet create mode 100644 tests/test_node_label_order.py diff --git a/graphium/data/dataset.py b/graphium/data/dataset.py index 498515fc3..6c5f7a1a0 100644 --- a/graphium/data/dataset.py +++ b/graphium/data/dataset.py @@ -195,6 +195,7 @@ def __getitem__(self, idx): datum = {"features": self.featurize_smiles(smiles_str)} else: datum = { + "smiles": smiles_str, "labels": self.load_graph_from_index(idx), "features": self.featurize_smiles(smiles_str), } diff --git a/tests/data/dummy_node_label_order_data.parquet b/tests/data/dummy_node_label_order_data.parquet new file mode 100644 index 0000000000000000000000000000000000000000..a9a165d8257a3168f91b95091329763bd3005d2a GIT binary patch literal 38550 zcmeHw34Bvk_HbTFA*CUN(3Y@D3q+Pu-oB(w+O(ycv_O<@P%&vkTj)kts4Nym*#$&l zK+q~6Dj;s41HGYCs)8)zii(Ulg5okVioz(ucW%-)X_``;v3366&-~;i_q}uPJ^MZ9 z-23VzlbH&X2QDcO{5)ntU_hWuBa_{y4>fm_$)?66(0Ve9@>R&GEZ-h7#;=ErgMR|i zW29cnGE9xpQ%u%K-(fO}7!#_e=&WH=Q}h&@HOenYMiQY4S}y`P5C2%d9v7F>S=7*Z zBgc|qG|34Z8#45+%nShT;T@g-5JNQ6htTajeAgp;Q=8GmhS*Kr=KY5C!R!M3VwM zktk9Wp$|v#BrAp!BeXn6k}MTU)GAQISG`&yf7o~w5Ln;=P54dYS(c*0=`fO)C=MQwB+pA6%@B{V97mHR2hV7Z=S1ReHAl11 zXP}xTfed1nngnF&;7TRjm3-gZATTt~bE85n<;8`QN{dVMWLRNwfeBj4mXef;LaVJb zjMP(cfieZ9r|}n~XYd!NXYrSy=kS-L=iy69F?t~>2!o3Fi`Gl{i`7#}AIQ`cj6=nR z$rK{AKaweU`1;uS^t4P#&s##+QU`%v4Fmn56Mrp~+ca`LXPF>N$j@aNJ(UGYf+Z-D z!ie>1k*7If#DM^k6@bO2-f9pI#RZ=T0J<4Y3ck)tJg|=rXMjx%DH4O!G{vwCF^Hr& zief2vAkczniVL6xMq((Fb~wqi3R2@oUwbC`mlQC|I6LJR|40@nJt&b4aciclqt|8Cy4MubQIZ>EcVuk%p-gYyANcS}!a; z6+nl9=%iq+l4425_DBHDu_DdY)CKU20GeI%i;ORv7XY-g0$sC2%QG}65ml2dV@K4f zC13(wGfTlxZ0)EB&^3Vp#Rl%OU?>tXa1cpStVGdL@MTOEFZk-!JPG_G?og5x#V_rl z6lszoMv$Z^2%^A)nu3Za`zs|uV2Mlap_(;xE+a6I168MG0V`>qbE^%1S z(Y1?$eQG{ZNE}6ICx9l?ELfq)fi(k05={%fht(X(vOE!@7Ab*bj@<2IepL=Kx!Xrh zkty?hWD>rx_#&pj6DH*qxp|))t7pnyxr{RJmCGp_?#L;|{EA#5%zM%jbm58;++4!s zFY>hta9@^NOyA106VCn$ml^~YOCY}Vnr*a@L@zZs!>KX0+hj3|Ps&EwhAOzMQN-Y( zAn2(R6Z8~c^Mx90nPSeGLFrg9<(i2j8K8g#GZuJ$)aOc?;+WuT!v%0~Sf4q5>Ca%$ zydatKNt$N`E}R8zrNIuC%YnHDkqiTWATShYDMPc&;;nLysW}d}1@Kc858wiD-qgL7SiVO#V}HMz2csm+N_pZT56ak%F2UQ{VXJO4TBNSxWsn^+>RKvHs)Pdu6oc zNwolK5~q63UpC6pAZNfkD7h?p6cHn%dc|1w$R=8LsGjl%6`?qaQH3j%B$yDhF#WI5k~YFM7&_Ed$Eg{8za zRRL=#F;PwPu>M-sDJ4>X#Y#zuJbz9KrD#TwSl}eMNlS>D1*a(yv(*d()@Ioagp#~U z^jESxO&)$)%Y&l<@ztVvfeG!wMXe>lmBAWad( zO7v5Le({SLEiBL=H<9Qwl%!zgq$QeKoI61jK#puU1u+35NpN2jq@W!CK@#|SQVIv! zhDsbPs3b4aOOLBL2~)CWI4{s50JEH=(kWCI{S=%?29t70IJ(H~5(e*I_=I%l`Sh z$|C9d&6Q&t4uv%8O&d(6|+W7ov4=z=PPKD(wy^Gh!U-u zk_yNR;ay1^i@OH#7p&AAby6H1Y)piyUG@$reVFHRex83s07NCA}1M5 zbx+VI;5%nBDBz*xVb$w?vED- z4Z$N&JS&=>03SfnL=Ko5tP`t22Ebfc78wXy#QHl&g4TjEg)xG_YeIUfN#JofG3&x& zB@3R%azaUREO~s53LNkoKIYq%V0mxA0&0HUC#AeQSR`Dyl*8CbANEm5^Q!CKQ!P|# zHYlH7BNJ<%Q6>9C65KvZ%mDR;)m8$dV8IOP`^yBC!B5rb7b;M&M2AZd z>@re)rIP0;L9@#*00L5m5^En-aJ)pk2J0fplBN}*V3H8uhEuSdFi)>j(;zL9Fa|Ij z2M<*7e)ZMC{h$j~`;LsPzcxtZ8Su3r zPpK&&n>T%+q#(q$>6H{s^291F39AaM>Xryt%vsG<4UA1{PW7>EIDQGQua|txi~E4! zF2Rz^n1A#!|Kwwy=?7NylTRi*;LLTtU?6Z2%m;jBT+ao{X9@qV_4WzvAZD>aRM)UbR>f~G+w zXZXavGES54AACgt0x|ul7C2y-=^Pk?z_Oq<3Sewd9LS9XrB;!>6|l%?Ug)dfK&%7Q zSeHqHfhZIF7Z1yvKm*-xtG4*bBvtobS_&Kv2ND?yauzk?6lz$YASgKq9wA5{AmdaA z6{_cyN}2(2b?>F1IqC2na)~k>g`PaaQMBr7rILk3P_DB17b7?T@t}93nWdcR%zrk?K@!*f?&+RN$e(D{Q!1=@tZIIMoEB6C{*X;($~n9hX$3Q84rVo_Lpx4hbG0V@xrcO^U2^mHz-m zf?iOqQK>W<)go1GkgRBYhAdk%T_ej@{TL*lT3j2g$qx*m^?VjRRCPHBqFITPELY_W zGvL==kj_z@=JNoxz*0P~YUrt8V4*u~B9#&#t~wi_fPhjn1JoSi0P(C9z6C+j@PQx$ zi4Dr`1@Qs5G#$HQQndth43=@#J3ZB)UJ|9s@E-)8M1ZI}T(z{nj1D^AO96>R%|3|9 zK=~*L#voFMOhaS^QZb-z!w4n|ti=MQ+3g<+*#*R~z-a~ywntwzEa$AG*{TSDrCZ_{ zVzC@n_hAE+5VX?Arpaj2juE3Fm;kk@Qz-!pvG8Rzcx#C|-dAP5%^#NO7+)D{PFJMt z^)VmpW8Moc3g04A_JPC0zc{#LYE*K5UZ5;+XyEWrsDG#fy9ah3#LGsm?LIX-0hd4U z7X|5Cg5D#E!?z%~9|Z}5UIaK?E@849U$pa!$6t^QLt+QD5?H4owZkB#x;m^kT!x(g)5IPY`J=brd6a0`HN@&}`{ko`G|5IFyHRBId= zpaV^u1LE2REEf(~XBz03m8T%V1Ub7Uf`X$@t6UJ0*f4;^^ElE$Tp@u{hohJSL_k>u z58zL14n=_$wgskW&`K}2p$g-LGGU(f?d}fE#a4ke7|g;= z>A^glY7Jj*^+6{j26ZnHI`5LsE~4|xVJ(5f^E33DIfOH&;aTqEOzf2f~K7kWg-HdlQO}ZEr#;vh7V=nDlZN z>XcsYdbJ-4s+isk2p2WmjsYc3FZKgA=!G;G*b8m+Jef?Uj_`qN03r8P(|1&EfZw{^ zcMb|v_UB~X)%~@7i%WBCrM8^u*>lPY^9yWc)0vz=pP+!C##Kbn_B(YeVs(l?m#jaR ztk&iHKbNf1us@e9Z(Ce{E?J)YUTr>m8LJQ%xBa-{w_K=kj6NdTCZrMHb)AqW>cPx}miCkHN-y${MJiQE)%3ICO8(?mWi|U3Yp)-S{i4Ydd@FN?I%$xI2Acl@*!NiHO84!CG1R;NnpOsk{9Ah;6}x zeIJlarkw*vN1ntQ{(v57aMi+Zfo3oc~#@V_{zr5xU9x2!R3u@wGa2w$}v3F z4p+;&_8r>jMw7-#ldZ^Bnmc!Td2zX=z)b;P`wO9acLS!&HK@lF9V_5UTV~{d1BsDe z><}V}K64`*?q3@D7i(nXLt`I}^f_4+`Cf5Rmskp>7ubq&%fWfLQFrN4LU-Szm^#-JZt@ z-NDB&b*`a3ChC~F=}#UrjvTwu_}=dG##zN%jA2Q*^CY3W`UIxVHMGY>9aGo$(dQ!_FPe!Vv~t`RPwHxUr*8WWH|heMnwbw4 zSDtT~UT7&RYo(Pto+5OwY{!(jhV_`FW6C~FnIAbU<$!VBWHK`4+r^Q)hkX|LVC7k3 zz@Q%@|NTvV4$_r%}LMz98@uaSncgj9`wgbxCHS+W`gzlTCF=eh{Jtpax zvSVKih_vU-GW?kOvhnJ8I`T@|PUGXVBxB5rRmN|Y*BCXMBaFstcY4uC46T*pzIaks z%R6POp6h@zcWqp~o6tSJ3sdGA)?<>6DJ%24o#*?mJ?^K?vH(gqd(?a6z%)l1&yekls(|7xDdf06M5_oj9Y-+$>a{>bM;`15Bz^+Ftm z(8_UNJgKYYow%zncTAkSPF{P7(5-q26XzP)W1fzQo3i*~^0hufcWgiBZ! z%4KJN%@)3s$*nmsl{@!X1h@3G!3%M?r&f;p;z?aC@5JqWwPWJk)$)~B2;Cd|FmbMt zJ?80{xFe%C88rVs5i#WS90T{*V8eISrTg!CEZwj+c%@8le${oiMx2fjkuw$)iOKJQkn~8`ofBWg8W&wqE`C({eB22_hTwuQ+Q0+F_nww zZEDEgGW*a63&{rSxfdEXrd2mgi_K^V*rPwRea+@WXNpfA8W6s+zAcsbu2zow;7MIA z?^GT*cymS}qX^4-_ns2t*5V+)F_a`Id2>-&v_?)+g)qiX_>={lzItEc-lth#sLp>ko@q3;_5 z8a|C3-w-qXgF_|K*7|pngATp+Ovs_HU-I{&ulTN3j^Xj7u9kNiA3b{0G`ef+mLr62 z_nVkT*90EZbxh;%o&6e4EFXAihJM$fo;MOF{0)~fbBu$gE;kn4a?VRQgL`P@7!FVBYI&z|=i6>HHWgCJO7d)_ z#o77gbKL^#-G3u==p?4fHKfM`9aDwkb0fFE{iSi#!l{wVvc5H*KtCJj)8it49lY82 z(xW{iSMg-zPXoNAe7L7pj^Xg6u9kPIPQB+wRa0PHQeY{vO}E<0%BRoBFL$Htqjw42 zSMOlTT*G=y;+3+5zzGuqHI5g#hOaoDe=G_rDpA2xUWj^U)rP1Q?7LCihKHi=`RmJ3 zXMcS^YW=P~xI7dRsB;~D(#r7@@GAfp_o@}p(B1!`8(D!) z$5Nh?Go!qz_SIZ`_>>6xlF;3MiqLKO0Mpg(H2))X?LF;A*Fbl=n(7a&N-Iw4_I?cO z-beo>b*aBDY}|DAi^G@M%ErL-7aRL6xUKQRl~s-Xk}4ZVidl`H40DuKJfwzuXyq6l zOU>14M{3{y-Hp1Y`h!zyZK^9y(!M`K=$4(qq`AiSn8~Z)b|?a0P)rD1=Ht-Gb@EGn zeH5QBLczWdpg79|XvcuX=t-4C>QS_RzzHamW&FJI%2{b{Jqm+H?kiOf! zXqzSl4SYNneazp7uKWHKZTm0+=??cW4jg^n@YcKcp&1juG8%_nK&nf7Q0ZYm`wQ=% zJaDJF4~p3>M{&A0P{~Ony6?RNWbU~UU7Po@QU6@9ad)qm4V%`68tz-P*1)W+G+q_D z*=@^4*`u@M_T>i^_P1Y~ihk-j6&=k{pxY~ZqMYZC8M4>EVT?Jq-e}%;uko+r`bGY0 zQ&QxSrH`OnY{TuFf4Yi3KmAX1PpI6kpL+p~n0p2#C!Rt6+0UZ8-s&E?b>QI0I%0;Q z|Ej;E#`<$;Px=q&yK4&jx;$U|j|ZlrCFBq^^MOcIn9iW{)AVTUh}+Ph@50baWj&3v zHZ3#WbJuZ0!uAH^s{?9`z2_hHmz4|NaNs@BcY^#c&pl`6L<9RrAq3o5Rt(7%Fmu z{gC0>ydTin1zP*%wdaxl=(T8L^}|SWT2aSr3q&)23`B{!_ZXsmZ$;O?I9YdQ2#cZ) z$s&ouuhD=3c66lZ3<5XwsZZeNKA~7soX0S=a?B`L8G*gn^|@N0(|ZVVAbj<>w$QF6 zx?R)w$}evAz4l80;d|}xYPj5j9n~_QZi-Ss^028n!W*nLH)G- z2MioEc*re7gF}W5AECQ-RdeQ6-#u^s zf`#|gEV}nTKt^}!TbId+fknkRw&|X!g1jKVth|S+GIs8t76|2fVdd)@# z)r)9Y-9_~M^Zw}C#!$4}tU(pV73dpHAX=`~A@pH)bn8c)k=VZCz~y7}BW^js*ZuTT zLtV(>PYqX#SE9l<522!~XOTVUA86BTJz{MFB5i7PvUgwfT-Ei64a`8}(V8I!{|yHt zURiys@v(|5n-3$|$WVHGFbW}fREc%%I3~6sYkI1@}ND-_?EB6jSBlh^AHTxpap^4K_-Wmh? zsrvGPo--yGF2wrOEf{>t@T@V-@cw|&D84ZXo!xK_tq=bPTD6+P zduxrx14l9=w(oloP5tT>v}g*4wyfWX4peVOLk?-trokEL$AApv^TWwHRG)_4nErF! zS6>W9dDBiAlI}F1uewQS`1haGd9^;wC@8LhXt1WaTA<7S2d(ejU)-#(Z!7C_rakT2 z-s&ra?!b50_FO#h;D}e-d%Q>BGd(5*en~i1zLVW9ogn6iOhoTIIT!7--eMoH$mGBkbv+kPu-9Ex8OJZYh++$0MDJQpf(L&Ag^PpiPhR*Y{BP?vAJ|km*gm){ z#2zLj7>_JFyMNNPe;OWY{4!!mnAW&pP#xN>EV4fqzTCd(!C-V>uUk;&zk1aTI`Mkl zuKIV3JGhmGfIFk>rf9!5_S-xt^7M}>k?XJTLH56_Mt@tQwEu8~u>1A<2%XwE);{^b z&*=UB{&w{@pQ6})%E)z0LS*fhX#1pw9>}1+fWDYqi)xOS+p~Kb?WZq3i@2=k>hgEx zq1)GOLZ){KyFM!jy>ubguC98|_|0Oo@%bnHB42v;q+!^Wmy8b_G@*sL4G7hbv9mv_ z>{|=Q+8-}?1`XDqLzn7%+W+~&7bvmzEyM1d+{oM-o$>hgCnKgWr0vf?`6jySNPzul z?*RMDpZVLLpZN;f^JNg4eSVz%lGYGO&DntV-*ph39yQ87aN!f^)+a6_pPIcWrZgI{ zKfR2yv>&2f>;;1+P;alL9@@X*ft_`~-k)zA8kdLqoVC|o;g;c5YpYM-Gq7rXMYOD1 z7!lAFU@MM~TrIGhwM0}u_-b0UuHx0|7sB`AJm->y!|8->-L76=OLXgT%RTqp6*W?; zT5_MYK2IF7dPz?7)9GOwR?KL)Fvs}#>b%&|e@WS~Zsy^cuim+5LqXiL9~QmxXwmCm z|9io~$4e4I2R!)3*3vf%^pBm~QJ%Cb^SM(yE02C#ZU1!7oC#r@PJH=N^|7M;f4}(3 zJQYl-TeiffW#3vH=r9duDA>{R^c4}**fZ$;p1OM$`{-W!6`P2QTo2-J)FDD#$5G!cZ=mN+OhX?( znTD?IwWGS*>(RN`d(p&)KCIITN9rb4EkGY^^*27Xs}@BqQP{6sRoHv4pMXAFmx#VA zzZLzubu`lMxv%bekCDdiVZ9@k4*xmg&B{BCkKFwiWB+qCXz?CBvX#DodXIVpCFd_j zyIBHx*RGYm{DHSYYiuI?|7-;eyBUXCVKtwLMR_}dp=?r9&q z<|*_*zgV!$CBR*A z488fe+TQP9fp%Gm+CJyS?I^$eLFE6P8C{-wH!42$h#@`0A5D2;r=e=I0nG^PY0n%7 zyod?1KXiLP`;S|K>~zIr=<&}^p?jBYMC+DLHNLv70&SJmqdvEPg{IU#kK)Dm(Cdrd zMlUWHh7z8B6xoPPX!m2NuE)j)kudOH6iY5Zi>|LU{P0;hdh4Ax(b{j8dN&cwDA>q^ zX|a~LTK}^qa&n!UiS+kkBHq`K76WNqg|sSg;4i&ziRy%|XVt0Os?wtHE2o~_Ul zXp{viNBDR>5I#aSVGuCS@!aDeQ1c@NbZv7eMHYx}d!FiOb+_eMg!8V)xdP`+FB|jD z2kq``-}tfh#*ed|kG*bAw*RPUP;b4O25fJC$vI{_Pn(8q=V{Z(9c~dgr?>%_v&P;b zn6pUj1nZ#6dow^y#NBwLCK}rm>YNjJt#mmbd9M~ZpWUe9;)H0oY9Zr%wBGZpK%cI)=c^;X?QO@lL9Xu4L+n|Umse_ZoqL*G z*Qv+4b)9;?TGyEeopqgk;#k);M}Ku)a|Tz}HHTq!oqsA-*ZIdob)A1sQ`aR2CUsqM za#7bMM+kLYdzMevwTJ3-U3n!BKK(m6cmc%b`g+eixU( zPs;h*=J-873m!NJobDLi)--PGVP@pp3Ua2mzURbidimTEJYPsfQGPZYP;sD8QCI>m z+ciBXw$8L=m*e||@VaDf}$ zRKz0pM**A@p1F}<)YW>D$dkD3K>U?C{66r1KQRZ$Z-4qf3x%epBvdQFMpzN(4JJ-ezrVLa_5~*ki&DP1zfKr*P2qf?0b) zZ4J>WNVI+*tUW~9LUjr<9&d5Ahm9AoPDID!<*OSbs-|{jc1$9l7(2eXUEplTN$I!a zF`DKokV>qD<^o%MYBioGjVa5w7RHrl-VS}^Im#MeRhAf&RA@;{Ez69PDl*cj0t{y@ zN-eX-z&J(GfEUaS|KA2%dbHzNVo_>^B`u{gGk!`w#sy%@vl9wt~=X zDac?ZJD;bek{M~{yqx&)m?q#yYDESu%>vqs9JCjtPBG_`Y+@1B8h00;TJTCNl;#32 z=VqpvXJjyW)z-9vvdr|98OgMBuFS%M0&Bc^MhlIAOE%D-U6?ux_|In3REZUMkzSoL z%faj4pFW4oW`GPpzFzTe#=$A;%#zHsIe=TU%z>Xw(yYw%qymsf6Me}JK4!L#l~Dc@3fQVTqT@t#cjy+vrJRVb0u)^yuPrI<5@*xn~79@v6cy>5$d3v83!rnu{Ip*3F0 z%ZW{%+hmuNH9eYh=f4L%zm30zU>B)Uv6ZGO+tY;>S$M*F)7wH1-nF$$4|=GAocK8K zRpz`Fn@=Fi5)<5fL`+gqc41tVB~Hr6x|2?~@aqP;kc7vbkx9o@wQFOYi)(?k5PSp! zJ}F)T-&5t#B`?0AnKrNR-t5go-d^m@Lk6vV-;MQ`iJl8Sai%35d~TdnmI?kb2iu|3 zw%h0Tjr(=EUS}sH!J3m&0{kmWr{@4~b4aW|9qITD`*pp(S7pPxok14>|K`kqHKQVv zF@v9T`r;et(hd7{xlYj#d!#3kmbADT*0i{4SjY1^k^|^qL3WY31bkC@MtVwhIz9dm z^5<=E?@))@;_h5W{s;Iz^SAT8{dK=n{g9i^ZvM)XHpmSzZGXJ(wV}-^x8I_Np0qW|?SD!q%dAWe z&Ph9tx7_@r8*e=OcX{09U6}7e@Ml7 zU<#Q|XO=Y86mFE4ltC>FVt}Lqs9(*1n3?>8a3?e4&4oE}Ql2#-x)|cxqKtG?M?7!0 zPuJ^;c~)k8Ch#0`5YT_9@7#PY&UDzf%l%YNJm6FW`hfF{!W>*rsf2n#Sx52l4f}PS zU#hUuX3%LjUz85D()fbX%rv$=o$9cUH=Tkpt8?PYTWU7xzhjNhS_n16RPZM)@l=za zch=>+*fao1Dt;P+fBlkMPJ4y^6jbU~#xANC_+66d7kvsDvjn(}e|+->iH2ESKrNs}rZ zW6VsfNX}>HBo>jd$6}g+FIK!46O#*jF|ozbuuW5jw|)|G6JY-Zc4Y7s+R3JP_$9&~ zkZB^`tC^GwdqFX5H0uoq;?#cc{Yn-Z&Hq5pW;{xOZi z+ds*s80hcV2AYi9ld1qThQ(d)z}rX@@upF<14%rFDJGU3H%^2fW zb5kl4H`~|ICOgGjPtnydo|HRg(ztQBWtxL~IyRy3{#5hFF+Ri%1y($VDK@4BJ`wlt z5Fab<)jmEhdov}@gz?5Dw~T*N_?Rlj%xJ=Y67KJYkCPv6{K1$2YvI$b|H_Rm!}BLk zDjGKqZ`Vyuc2Et1#|_67UmX02FCa719O5y}s%(aj&Thsh7Apo;rDk}isAl1I9DWqv z#wWV+S}dQqJGgP<#$+^)ca!qrTi}y%e+NHe9ZZP91U2ak{)H9U6y03IZuV!MwQLM& znpj!d#1GT>F_o?K#f?4(s6*b(`wq>%sAa8&SgyIYZ%QcToA5K4CpC}nBZXmVna3G%BL14aW~qD`?gg;w%*_YZi`Xck|Z z<=sJE1O8~jgy!)~GbT0T@01VTopzT`0#>DF`a8;Rfo+nDAEp-ko%ZhXcaAxno8&j< zhs%dRKIiB)sj|9RUlYf*_HX~`_|5ec$M|_I<3~^aoyKpjMLEW=ZW%u&!(;qn4B)0O z>A3BMKkl_VQ%oh(tbbGf`|%P@=9ckHlgBmtU#I;31N_FDa$ClWPVQhlSZKY{+qBo< zlz(N*crl6p-FRRJ@EOxGUhMe)Zan95lu`+%%ZJIRG13%o;ZM}o@J8u5Zl>@`w43I&S zJjz%0q-Qf`5Lm~6nc64L|xfVdM#$-ptO!_c5}Gf~meX2*D*_V0|hTxU{d+7g<^rNn9o zRvkk)`D-x9v_nfQrL!w+ Date: Wed, 21 Aug 2024 11:24:17 -0400 Subject: [PATCH 110/175] Improved the testing of the metrics reset, update, compute --- tests/test_metrics.py | 84 +++++++++++++++++++++++++++++++++---------- 1 file changed, 66 insertions(+), 18 deletions(-) diff --git a/tests/test_metrics.py b/tests/test_metrics.py index 6c089c2bb..9bfd936ee 100644 --- a/tests/test_metrics.py +++ b/tests/test_metrics.py @@ -27,7 +27,7 @@ Thresholder, ) -from torchmetrics.functional import mean_squared_error, pearson_corrcoef +from torchmetrics.functional import mean_squared_error, pearson_corrcoef, auroc from torchmetrics import MeanSquaredError @@ -220,46 +220,94 @@ def test_classifigression_target_squeezing(self): assert score == expected_score - def test_update_compute(self): + def test_update_compute_reset(self): torch.manual_seed(42) - preds = torch.rand(100, dtype=torch.float32) - target = torch.rand(100, dtype=torch.float32) - th = 0.7 - # Test the update and compute with accuracy - preds_greater = preds > th - target_greater = target > th - accuracy = (preds_greater == target_greater).float().mean() - - for batch_size in [1, 5, 25, 100]: - metric = MetricWrapper( + # ---------- ACCURACY ---------- + metric = MetricWrapper( metric="accuracy", threshold_kwargs={"threshold": th, "operator": "greater"}, task="binary", ) + for batch_size in [1, 5, 25, 100]: + # Generate random predictions and targets, and compute the true accuracy + preds = torch.rand(100, dtype=torch.float32) + target = torch.rand(100, dtype=torch.float32) + preds_greater = preds > th + target_greater = target > th + true_accuracy = (preds_greater == target_greater).float().mean() + + # Test the reset, update and compute metric.reset() for ii in range(0, 100, batch_size): preds_batch = preds[ii : ii + batch_size] target_batch = target_greater[ii : ii + batch_size] metric.update(preds_batch, target_batch) - self.assertAlmostEqual(metric.compute(), accuracy, places=5) + self.assertAlmostEqual(metric.compute(), true_accuracy, places=5, msg=f"Error for batch_size={batch_size}") - # Test the update and compute with pearsonr - pearson = pearson_corrcoef(preds, target) - + # ---------- PEARSONR ---------- + metric = MetricWrapper( + metric="pearsonr", + ) for batch_size in [1, 5, 25, 100]: - metric = MetricWrapper( + # Generate random predictions and targets, and compute the true pearsonr + preds = torch.rand(100, dtype=torch.float32) + target = torch.rand(100, dtype=torch.float32) + true_pearson = pearson_corrcoef(preds, target) + + # Test the reset, update and compute with pearsonr + metric.reset() + for ii in range(0, 100, batch_size): + preds_batch = preds[ii : ii + batch_size] + target_batch = target[ii : ii + batch_size] + metric.update(preds_batch, target_batch) + + self.assertAlmostEqual(metric.compute().numpy(), true_pearson.numpy(), places=5, msg=f"Error for batch_size={batch_size}") + + + # ---------- PEARSONR with mean-per-label ---------- + + metric = MetricWrapper( metric="pearsonr", + multitask_handling="mean-per-label", ) + for batch_size in [1, 5, 25, 100]: + # Generate random predictions and targets, and compute the true pearsonr + preds = torch.rand(100, 10, dtype=torch.float32) + target = torch.rand(100, 10, dtype=torch.float32) + true_pearson = pearson_corrcoef(preds, target).mean().numpy() + + # Test the pearson reset, update and compute with mean-per-label metric.reset() for ii in range(0, 100, batch_size): preds_batch = preds[ii : ii + batch_size] target_batch = target[ii : ii + batch_size] metric.update(preds_batch, target_batch) - self.assertAlmostEqual(metric.compute().numpy(), pearson.numpy(), places=5) + self.assertAlmostEqual(metric.compute().numpy(), true_pearson, places=5, msg=f"Error for batch_size={batch_size}") + + # ---------- AUROC with mean-per-label ---------- + metric = MetricWrapper( + metric="auroc", + target_to_int=True, + multitask_handling="mean-per-label", + task="binary", + ) + for batch_size in [1, 5, 25, 100]: + # Generate random predictions and targets, and compute the true auroc + preds = torch.rand(100, 10, dtype=torch.float32) + target = (0.5*preds + 0.5*torch.rand(100, 10, dtype=torch.float32)) > th + true_auroc = torch.stack([auroc(preds[:, ii], target[:, ii], task="binary") for ii in range(preds.shape[1])]).mean().numpy() + # Test the auroc reset, update and compute with mean-per-label + metric.reset() + for ii in range(0, 100, batch_size): + preds_batch = preds[ii : ii + batch_size] + target_batch = target[ii : ii + batch_size] + metric.update(preds_batch, target_batch) + self.assertAlmostEqual(metric.compute().numpy(), true_auroc, places=5, msg=f"Error for batch_size={batch_size}") + if __name__ == "__main__": From d2f84f2987775dacaf99772df6b4e5905baa83e1 Mon Sep 17 00:00:00 2001 From: DomInvivo Date: Wed, 21 Aug 2024 11:39:42 -0400 Subject: [PATCH 111/175] Reverted wrong change in `train_finetune_test.py --- graphium/cli/train_finetune_test.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/graphium/cli/train_finetune_test.py b/graphium/cli/train_finetune_test.py index ae25733bc..f329b0edf 100644 --- a/graphium/cli/train_finetune_test.py +++ b/graphium/cli/train_finetune_test.py @@ -44,7 +44,7 @@ TESTING_ONLY_CONFIG_KEY = "testing_only" -@hydra.main(version_base=None, config_path="/home/domix/Gitx/graphium/graphium/config/", config_name="loc-config_largemix") +@hydra.main(version_base=None, config_path="../../expts/hydra-configs", config_name="main") def cli(cfg: DictConfig) -> None: """ The main CLI endpoint for training, fine-tuning and evaluating Graphium models. From e9be441b217e4b2d2fa9dc165453aea62e99f8d0 Mon Sep 17 00:00:00 2001 From: DomInvivo Date: Wed, 21 Aug 2024 13:15:27 -0400 Subject: [PATCH 112/175] Improved __len__ in MultitaskDataModule --- graphium/data/datamodule.py | 40 +++++++++++++++++++------ graphium/trainer/predictor_summaries.py | 4 +-- 2 files changed, 33 insertions(+), 11 deletions(-) diff --git a/graphium/data/datamodule.py b/graphium/data/datamodule.py index d2e5b5c74..e7366d2c2 100644 --- a/graphium/data/datamodule.py +++ b/graphium/data/datamodule.py @@ -845,6 +845,7 @@ def __init__( ) IPUDataModuleModifier.__init__(self, **kwargs) + self._len = None self.task_specific_args = task_specific_args self.task_dataset_processing_params = {} @@ -929,6 +930,30 @@ def encode_feature_options(options, name, encoding_function): if self._ready_to_load_all_from_file(): self._data_is_prepared = True + self._len = self._get_len_from_cached_file() + + def _get_len_from_cached_file(self): + if self._ready_to_load_all_from_file(): + self._data_is_prepared = True + train_metadata = graphium_cpp.load_metadata_tensors( + self.processed_graph_data_path, "train", self.data_hash + ) + val_metadata = graphium_cpp.load_metadata_tensors( + self.processed_graph_data_path, "val", self.data_hash + ) + test_metadata = graphium_cpp.load_metadata_tensors( + self.processed_graph_data_path, "test", self.data_hash + ) + length = 0 + if len(train_metadata) > 0: + length += len(train_metadata[2]) + if len(val_metadata) > 0: + length += len(val_metadata[2]) + if len(test_metadata) > 0: + length += len(test_metadata[2]) + else: + raise ValueError("Data is not prepared. Please call prepare_data() first.") + return length def _parse_caching_args(self, processed_graph_data_path): """ @@ -1139,6 +1164,7 @@ def prepare_data(self): self.explicit_H, self.preprocessing_n_jobs, ) + self._len = self._get_len_from_cached_file() for task, stats in all_stats.items(): if len(stats) < 4: @@ -1719,18 +1745,14 @@ def get_data_hash(self): def __len__(self) -> int: r""" - Returns the number of elements of the current DataModule, which is the combined size of all single-task datasets given. + Returns the number of smiles of the current DataModule, which depends on all the smiles from all tasks. + If `prepare_data` is not called, the length is unknown and will raise an error. Returns: num_elements: Number of elements in the current DataModule """ - num_elements = 0 - for task, args in self.task_dataset_processing_params.items(): - if args.df is None: - df = self._read_table(args.df_path, usecols=[args.smiles_col]) - num_elements += len(df) - else: - num_elements += len(args.df) - return num_elements + if self._len is None: + raise ValueError("The length of the dataset is unknown. Please call `prepare_data` first.") + return self._len def to_dict(self) -> Dict[str, Any]: """ diff --git a/graphium/trainer/predictor_summaries.py b/graphium/trainer/predictor_summaries.py index 013cdbbe2..56c49955d 100644 --- a/graphium/trainer/predictor_summaries.py +++ b/graphium/trainer/predictor_summaries.py @@ -437,11 +437,11 @@ def __init__(self, dist_sync_on_step=False): self.add_state("total_steps", default=torch.tensor(0.0), dist_reduce_fx="sum") def update(self, model: torch.nn.Module) -> None: - total_norm = torch.tensor(0.0) + total_norm = torch.tensor(0.0, device=self.device) for p in model.parameters(): if p.grad is not None: param_norm = p.grad.detach().data.norm(2) - total_norm += param_norm.detach().cpu() ** 2 + total_norm += param_norm.detach() ** 2 self.gradient_norm_sq += total_norm self.total_steps += 1 From eaf9077a4dfeb48431341f55cd1a021bdcb53b68 Mon Sep 17 00:00:00 2001 From: DomInvivo Date: Wed, 21 Aug 2024 23:26:00 -0400 Subject: [PATCH 113/175] Added a new logic to allow saving all preds and targets more efficiently for auroc and avpr --- graphium/trainer/metrics.py | 161 +++++++++++++++++++----- graphium/trainer/predictor.py | 8 +- graphium/trainer/predictor_summaries.py | 3 + graphium/utils/spaces.py | 4 +- 4 files changed, 142 insertions(+), 34 deletions(-) diff --git a/graphium/trainer/metrics.py b/graphium/trainer/metrics.py index ae075ae34..f98061818 100644 --- a/graphium/trainer/metrics.py +++ b/graphium/trainer/metrics.py @@ -12,7 +12,7 @@ """ -from typing import Union, Callable, Optional, Dict, Any, Literal, List +from typing import Union, Callable, Optional, Dict, Any, Literal, List, Tuple import sys @@ -25,6 +25,7 @@ from torch.nn.modules.loss import _Loss from torchmetrics.utilities.distributed import reduce import torchmetrics.functional.regression.mae +from torchmetrics import Metric from graphium.utils.tensor import nan_mean @@ -132,6 +133,28 @@ def __eq__(self, obj) -> bool: return all(is_eq) +def _filter_nans(preds: Tensor, target: Tensor, target_nan_mask: Union[Literal[None, "none", "ignore"], int]) -> Tuple[Tensor, Tensor]: + """Handle the NaNs according to the chosen options""" + + if target_nan_mask is None: # No NaN handling + return preds, target + + if target.dtype in [torch.int, torch.int16, torch.int32, torch.int64, torch.int8]: + target_nans = (torch.iinfo(target.dtype).min == target) | (torch.iinfo(target.dtype).max == target) + else: + target_nans = torch.isnan(target) + if ~target_nans.any(): # No NaNs + return preds, target + elif isinstance(target_nan_mask, (int, float)): # Replace NaNs + target = target.clone() + target[target_nans] = target_nan_mask + elif target_nan_mask == "ignore": # Remove NaNs + target = target[~target_nans] + preds = preds[~target_nans] + else: + raise ValueError(f"Invalid option `{target_nan_mask}`") + return preds, target + class MetricWrapper: r""" Allows to initialize a metric from a name or Callable, and initialize the @@ -201,22 +224,29 @@ def __init__( self.target_to_int = target_to_int self.kwargs = kwargs - self.metric, self.kwargs = self._initialize_metric(metric_class, self.kwargs) + self.metric, self.kwargs = self._initialize_metric(metric_class, self.target_nan_mask, self.multitask_handling, **self.kwargs) @staticmethod - def _initialize_metric(metric, kwargs): + def _initialize_metric(metric, target_nan_mask, multitask_handling, **kwargs): r""" Initialize the metric with the provided kwargs """ - + if not isinstance(metric, type): - if not isinstance(metric, (torchmetrics.Metric, MetricToTorchMetrics, _Loss)): - raise ValueError(f"metric must be a torchmetrics.Metric, provided: {type(metric)}" - f"Use `METRICS_DICT` to get the metric class") - else: + if callable(metric): + return MetricToConcatenatedTorchMetrics( + metric_fn=metric, + target_nan_mask=target_nan_mask, + multitask_handling=multitask_handling, + **kwargs), kwargs + elif all(hasattr(metric, method) for method in ["update", "compute", "reset", "to"]): return metric, kwargs - + else: + raise ValueError(f"metric must be a callable, or a class with 'update', 'compute', 'reset', 'to', provided: `{type(metric)}`") + metric = metric(**kwargs) + if not all(hasattr(metric, method) for method in ["update", "compute", "reset", "to"]): + raise ValueError(f"metric must be a callable, or a class with 'update', 'compute', 'reset', 'to', provided: `{type(metric)}`") return metric, kwargs @@ -317,7 +347,6 @@ def update(self, preds: Tensor, target: Tensor) -> Tensor: if self.thresholder is not None: preds, target = self.thresholder(preds, target) - # for the classifigression task, cast predictions from # (batch_size, n_targets * n_brackets) to (batch_size, n_targets, n_brackets) # TODO: make this more flexible to the target shape in the future @@ -327,7 +356,7 @@ def update(self, preds: Tensor, target: Tensor) -> Tensor: else: classifigression = False - if self.multitask_handling is None: + if (self.multitask_handling is None): # In case of no multi-task handling, apply the nan filtering, then compute the metrics assert ( self.target_nan_mask != "ignore" @@ -339,6 +368,7 @@ def update(self, preds: Tensor, target: Tensor) -> Tensor: target = target.to(int) self.metric.update(preds, target) + elif self.multitask_handling == "flatten": # Flatten the tensors, apply the nan filtering, then compute the metrics if classifigression: @@ -353,6 +383,14 @@ def update(self, preds: Tensor, target: Tensor) -> Tensor: target = target.to(int) self.metric.update(preds, target) + elif isinstance(self.metric, MetricToConcatenatedTorchMetrics): + # NaN's and multitask handling are handled by the MetricToConcatenatedTorchMetrics + if self.squeeze_targets: + target = target.squeeze() + if self.target_to_int: + target = target.to(int) + self.metric.update(preds, target) + elif self.multitask_handling == "mean-per-label": # Loop the columns (last dim) of the tensors, apply the nan filtering, compute the metrics per column, then average the metrics target_nans = torch.isnan(target) @@ -428,24 +466,10 @@ def device(self) -> torch.device: return self.metric.device - def _filter_nans(self, preds: Tensor, target: Tensor): + def _filter_nans(self, preds: Tensor, target: Tensor) -> Tuple[Tensor, Tensor]: """Handle the NaNs according to the chosen options""" - if self.target_nan_mask is None: # No NaN handling - return preds, target - - target_nans = torch.isnan(target) - if ~target_nans.any(): # No NaNs - return preds, target - elif isinstance(self.target_nan_mask, (int, float)): # Replace NaNs - target = target.clone() - target[target_nans] = self.target_nan_mask - elif self.target_nan_mask == "ignore": # Remove NaNs - target = target[~target_nans] - preds = preds[~target_nans] - else: - raise ValueError(f"Invalid option `{self.target_nan_mask}`") - return preds, target + return _filter_nans(preds, target, self.target_nan_mask) def __call__(self, preds: Tensor, target: Tensor) -> Tensor: r""" @@ -500,11 +524,11 @@ def __setstate__(self, state: dict): if thresholder is not None: thresholder = Thresholder(**thresholder) state["thresholder"] = thresholder - state["metric"], state["at_compute_kwargs"] = self._initialize_metric(state["metric"], state["kwargs"]) + state["metric"], state["at_compute_kwargs"] = self._initialize_metric(state["metric"], state["target_nan_mask"], state["multitask_handling"], **state["kwargs"]) self.__dict__.update(state) -class MetricToTorchMetrics(): +class LossWrapper(): r""" A simple wrapper to convert any metric or loss to an equivalent of `torchmetrics.Metric` by adding the `update`, `compute`, and `reset` methods to make it compatible with `MetricWrapper`. @@ -532,3 +556,82 @@ def to(self, device: Union[str, torch.device]): def reset(self): self.scores = [] + +class MetricToMeanTorchMetrics(Metric): + r""" + A simple wrapper to convert any metric or loss to an equivalent of `torchmetrics.Metric` + by adding the `update`, `compute`, and `reset` methods to make it compatible with `MetricWrapper`. + + However, it is limited in functionality. At each `.update()`, it computes the metric and stores in a list. + Then at `.compute()` it returns the average of the computed metric, while ignoring NaNs. + """ + scores: List[Tensor] = [] + + def __init__(self, metric_fn): + super().__init__(dist_sync_on_step=False) + self.metric_fn = metric_fn + self.add_state("scores", default=[], dist_reduce_fx="cat") + + def update(self, preds: Tensor, target: Tensor): + self.scores.append(self.metric_fn(preds.detach(), target)) + + def compute(self): + if len(self.scores) == 0: + raise ValueError("No scores to compute") + elif len(self.scores) == 1: + return self.scores[0] + return nan_mean(torch.stack(self.scores)) + + +class MetricToConcatenatedTorchMetrics(Metric): + preds: List[Tensor] + target: List[Tensor] + + def __init__(self, + metric_fn: Callable, + target_nan_mask: Union[Literal[None, "none", "ignore"], int] = None, + multitask_handling: Literal[None, "none", "flatten", "mean-per-label"] = None, + **kwargs, + ): + + super().__init__(dist_sync_on_step=False) + self.metric_fn = metric_fn + self.target_nan_mask = target_nan_mask + self.multitask_handling = multitask_handling + self.kwargs = kwargs + self.add_state("preds", default=[], dist_reduce_fx="cat") + self.add_state("target", default=[], dist_reduce_fx="cat") + + def update(self, preds: Tensor, target: Tensor): + self.preds.append(preds.detach()) + self.target.append(target) + + def compute(self): + if len(self.preds) == 0: + raise ValueError("No scores to compute") + preds = torch.cat(self.preds, dim=0) + target = torch.cat(self.target, dim=0) + + + if (self.multitask_handling is None) or (self.multitask_handling in ["none", "flatten"]): + preds, target = _filter_nans(preds, target, self.target_nan_mask) + value = self.metric_fn(preds, target) + + elif self.multitask_handling == "mean-per-label": + value = [] + # Loop the columns (last dim) of the tensors, apply the nan filtering, compute the metrics per column, then average the metrics + target_list = [target[..., ii] for ii in range(target.shape[-1])] + preds_list = [preds[..., ii] for ii in range(preds.shape[-1])] + for ii in range(len(target_list)): + try: + this_preds, this_target = _filter_nans(preds_list[ii], target_list[ii], self.target_nan_mask) + value.append(self.metric_fn(this_preds, this_target, **self.kwargs)) + except: + pass + value = nan_mean(torch.stack(value)) + else: + # Wrong option + raise ValueError(f"Invalid option `self.multitask_handling={self.multitask_handling}`") + return value + + diff --git a/graphium/trainer/predictor.py b/graphium/trainer/predictor.py index b87eba7bc..d2758ffa6 100644 --- a/graphium/trainer/predictor.py +++ b/graphium/trainer/predictor.py @@ -27,7 +27,7 @@ from graphium.config.config_convert import recursive_config_reformating from graphium.data.datamodule import BaseDataModule -from graphium.trainer.metrics import MetricWrapper, MetricToTorchMetrics +from graphium.trainer.metrics import MetricWrapper, LossWrapper from graphium.trainer.predictor_options import ( EvalOptions, FlagOptions, @@ -169,7 +169,7 @@ def __init__( metrics_with_loss = deepcopy(self.metrics) for task in self.tasks: metrics_with_loss[task][f"loss_{loss_names[task]}"] = MetricWrapper( - metric=MetricToTorchMetrics(self.loss_fun[task]), + metric=LossWrapper(self.loss_fun[task]), target_nan_mask=self.target_nan_mask, multitask_handling=self.multitask_handling, ) @@ -340,7 +340,7 @@ def compute_loss( wrapped_loss_fun_dict = { task: MetricWrapper( - metric=MetricToTorchMetrics(loss), + metric=LossWrapper(loss), threshold_kwargs=None, target_nan_mask=target_nan_mask, multitask_handling=multitask_handling, @@ -596,6 +596,8 @@ def on_validation_epoch_start(self) -> None: self.epoch_start_time = time.time() self.mean_val_time_tracker.reset() self.mean_val_tput_tracker.reset() + + # If not in sanity check self.task_epoch_summary["val"].reset() return super().on_validation_epoch_start() diff --git a/graphium/trainer/predictor_summaries.py b/graphium/trainer/predictor_summaries.py index 56c49955d..a5ac9e5cc 100644 --- a/graphium/trainer/predictor_summaries.py +++ b/graphium/trainer/predictor_summaries.py @@ -311,6 +311,9 @@ class to store the summaries of the tasks def __getitem__(self, task: str) -> SingleTaskSummary: return self.task_summaries[task] + + def keys(self) -> List[str]: + return self.tasks def update(self, preds: Dict[str, Tensor], targets: Dict[str, Tensor]) -> None: r""" diff --git a/graphium/utils/spaces.py b/graphium/utils/spaces.py index 63ccb3fc6..c7b6a7ac9 100644 --- a/graphium/utils/spaces.py +++ b/graphium/utils/spaces.py @@ -102,8 +102,8 @@ METRICS_CLASSIFICATION = { "accuracy": TorchMetrics.Accuracy, - "averageprecision": TorchMetrics.AveragePrecision, - "auroc": TorchMetrics.AUROC, + "averageprecision": TorchMetrics.functional.average_precision, # Not using a class to better handle concatenation of preds and targets + "auroc": TorchMetrics.functional.auroc, # Not using a class to better handle concatenation of preds and targets "confusionmatrix": TorchMetrics.ConfusionMatrix, "f1": TorchMetrics.F1Score, "fbeta": TorchMetrics.FBetaScore, From 5432531e5c22bea48a35d1c8aecd4e1500bd4d87 Mon Sep 17 00:00:00 2001 From: DomInvivo Date: Wed, 21 Aug 2024 23:47:38 -0400 Subject: [PATCH 114/175] Fixed the concatenation to work with and without DDP. Moved to CPU for faster + less memory requirement --- graphium/trainer/metrics.py | 9 +++++---- 1 file changed, 5 insertions(+), 4 deletions(-) diff --git a/graphium/trainer/metrics.py b/graphium/trainer/metrics.py index f98061818..9c7a8a649 100644 --- a/graphium/trainer/metrics.py +++ b/graphium/trainer/metrics.py @@ -25,6 +25,7 @@ from torch.nn.modules.loss import _Loss from torchmetrics.utilities.distributed import reduce import torchmetrics.functional.regression.mae +from torchmetrics.utilities.data import dim_zero_cat from torchmetrics import Metric from graphium.utils.tensor import nan_mean @@ -603,14 +604,14 @@ def __init__(self, self.add_state("target", default=[], dist_reduce_fx="cat") def update(self, preds: Tensor, target: Tensor): - self.preds.append(preds.detach()) - self.target.append(target) + self.preds.append(preds.detach().cpu()) + self.target.append(target.cpu()) def compute(self): if len(self.preds) == 0: raise ValueError("No scores to compute") - preds = torch.cat(self.preds, dim=0) - target = torch.cat(self.target, dim=0) + preds = dim_zero_cat(self.preds) + target = dim_zero_cat(self.target) if (self.multitask_handling is None) or (self.multitask_handling in ["none", "flatten"]): From 8c75d77eb10afbdf4bbb0714ce50a53ad5c8c02d Mon Sep 17 00:00:00 2001 From: DomInvivo Date: Thu, 22 Aug 2024 00:43:42 -0400 Subject: [PATCH 115/175] Fixed the issue with memory leaks and devices. --- graphium/trainer/metrics.py | 16 +++--- graphium/trainer/predictor_summaries.py | 65 ++++++++++++++++--------- 2 files changed, 53 insertions(+), 28 deletions(-) diff --git a/graphium/trainer/metrics.py b/graphium/trainer/metrics.py index 9c7a8a649..6a4989a83 100644 --- a/graphium/trainer/metrics.py +++ b/graphium/trainer/metrics.py @@ -239,7 +239,7 @@ def _initialize_metric(metric, target_nan_mask, multitask_handling, **kwargs): metric_fn=metric, target_nan_mask=target_nan_mask, multitask_handling=multitask_handling, - **kwargs), kwargs + **kwargs).to("cpu"), kwargs elif all(hasattr(metric, method) for method in ["update", "compute", "reset", "to"]): return metric, kwargs else: @@ -536,12 +536,12 @@ class LossWrapper(): However, it is simply limited to computing the average of the metric over all the updates. """ - def __init__(self, metric): - self.metric = metric + def __init__(self, loss): + self.loss = loss self.scores: List[Tensor] = [] def update(self, preds: Tensor, target: Tensor): - self.scores.append(self.metric(preds, target)) + self.scores.append(self.loss(preds, target)) def compute(self): if len(self.scores) == 0: @@ -554,6 +554,10 @@ def to(self, device: Union[str, torch.device]): for ii in range(len(self.scores)): self.scores[ii] = self.scores[ii].to(device) + @property + def device(self) -> torch.device: + self.loss.device + def reset(self): self.scores = [] @@ -604,8 +608,8 @@ def __init__(self, self.add_state("target", default=[], dist_reduce_fx="cat") def update(self, preds: Tensor, target: Tensor): - self.preds.append(preds.detach().cpu()) - self.target.append(target.cpu()) + self.preds.append(preds.detach().clone().cpu()) + self.target.append(target.clone().cpu()) def compute(self): if len(self.preds) == 0: diff --git a/graphium/trainer/predictor_summaries.py b/graphium/trainer/predictor_summaries.py index a5ac9e5cc..f00743d31 100644 --- a/graphium/trainer/predictor_summaries.py +++ b/graphium/trainer/predictor_summaries.py @@ -25,6 +25,8 @@ from torchmetrics import MeanMetric, Metric from torchmetrics.aggregation import BaseAggregator +from graphium.trainer.metrics import MetricToConcatenatedTorchMetrics + class SummaryInterface(object): r""" @@ -152,40 +154,59 @@ def metrics_to_use(self) -> Dict[str, Callable]: return metrics_to_use return self.metrics + + @staticmethod + def _update(metric_key:str, metric_obj, preds: Tensor, targets: Tensor) -> None: + r""" + update the state of the metrics + Parameters: + targets: the targets tensor + predictions: the predictions tensor + """ + # Check the `metric_obj.update` signature to know if it takes `preds` and `targets` or only one of them + varnames = [val.name for val in inspect.signature(metric_obj.update).parameters.values()] + if ("preds" == varnames[0]) and ("target" == varnames[1]): + # The typical case of `torchmetrics` + metric_obj.update(preds, targets) + elif ("preds" == varnames[1]) and ("target" == varnames[0]): + # Unusual case where the order of the arguments is reversed + metric_obj.update(targets, preds) + elif ("value" == varnames[0]) and ("preds" in metric_key): + # The case where the metric takes only one value, and it is the prediction + metric_obj.update(preds) + elif ("value" == varnames[0]) and ("target" in metric_key): + # The case where the metric takes only one value, and it is the target + metric_obj.update(targets) + else: + raise ValueError(f"Metric {metric_key} update method signature `{varnames}` is not recognized.") + def update(self, preds: Tensor, targets: Tensor) -> None: r""" - update the state of the predictor + update the state of the metrics Parameters: targets: the targets tensor predictions: the predictions tensor """ for metric_key, metric_obj in self.metrics_to_use.items(): - metric_obj.to(preds.device) # Not sure if good for DDP, but otherwise it crashes try: - # Check the `metric_obj.update` signature to know if it takes `preds` and `targets` or only one of them - varnames = [val.name for val in inspect.signature(metric_obj.update).parameters.values()] - if ("preds" == varnames[0]) and ("target" == varnames[1]): - # The typical case of `torchmetrics` - metric_obj.update(preds, targets) - elif ("preds" == varnames[1]) and ("target" == varnames[0]): - # Unusual case where the order of the arguments is reversed - metric_obj.update(targets, preds) - elif ("value" == varnames[0]) and ("preds" in metric_key): - # The case where the metric takes only one value, and it is the prediction - metric_obj.update(preds) - elif ("value" == varnames[0]) and ("target" in metric_key): - # The case where the metric takes only one value, and it is the target - metric_obj.update(targets) - else: - raise ValueError(f"Metric {metric_key} update method signature `{varnames}` is not recognized.") - + self._update(metric_key, metric_obj, preds, targets) except Exception as err: err_msg = f"Error for metric {metric_key} on task {self.task_name} and step {self.step_name}. Exception: {err}" - if err_msg not in self._logged_warnings: + # Check if the error is due to the device mismatch, cast to the device, and retry + if "device" in str(err): + metric_obj.to(preds.device) + try: + self._update(metric_key, metric_obj, preds, targets) + except Exception as err: + if err_msg not in self._logged_warnings: + logger.warning(err_msg) + self._logged_warnings.add(err_msg) + else: logger.warning(err_msg) self._logged_warnings.add(err_msg) + def _compute(self, metrics_to_use: Optional[Union[List[str], Dict[str, Any]]] = None) -> Dict[str, Tensor]: @@ -396,9 +417,9 @@ def __init__(self, nan_strategy: Union[Literal["error", "warn", "ignore"], float def update(self, value: Union[float, Tensor], weight: Union[float, Tensor] = 1.0) -> None: if not isinstance(value, Tensor): - value = torch.as_tensor(value, dtype=torch.float32, device=self.device) + value = torch.as_tensor(value, dtype=torch.float32, device=value.device) if not isinstance(weight, Tensor): - weight = torch.as_tensor(weight, dtype=torch.float32, device=self.device) + weight = torch.as_tensor(weight, dtype=torch.float32, device=value.device) weight = torch.broadcast_to(weight, value.shape).clone() # Check whether `_cast_and_nan_check_input` takes in `weight` From 5abd7696fec3485ace4745e00a8d85fce607d731 Mon Sep 17 00:00:00 2001 From: DomInvivo Date: Thu, 22 Aug 2024 15:23:27 -0400 Subject: [PATCH 116/175] Fixed the CPU syncing of `MetricToConcatenatedTorchMetrics` and GPU for other metrics --- graphium/trainer/metrics.py | 82 +++++++++++++++++++++---- graphium/trainer/predictor_summaries.py | 24 ++++---- 2 files changed, 84 insertions(+), 22 deletions(-) diff --git a/graphium/trainer/metrics.py b/graphium/trainer/metrics.py index 6a4989a83..725493b91 100644 --- a/graphium/trainer/metrics.py +++ b/graphium/trainer/metrics.py @@ -18,6 +18,7 @@ import torch from torch import Tensor +import torch.distributed as dist import operator as op from copy import deepcopy from loguru import logger @@ -235,11 +236,12 @@ def _initialize_metric(metric, target_nan_mask, multitask_handling, **kwargs): if not isinstance(metric, type): if callable(metric): - return MetricToConcatenatedTorchMetrics( + metric = MetricToConcatenatedTorchMetrics( metric_fn=metric, target_nan_mask=target_nan_mask, multitask_handling=multitask_handling, - **kwargs).to("cpu"), kwargs + **kwargs) + return metric, kwargs elif all(hasattr(metric, method) for method in ["update", "compute", "reset", "to"]): return metric, kwargs else: @@ -589,8 +591,9 @@ def compute(self): class MetricToConcatenatedTorchMetrics(Metric): - preds: List[Tensor] - target: List[Tensor] + + preds: List[Tensor] # Always on CPU + target: List[Tensor] # Always on CPU def __init__(self, metric_fn: Callable, @@ -598,25 +601,71 @@ def __init__(self, multitask_handling: Literal[None, "none", "flatten", "mean-per-label"] = None, **kwargs, ): - - super().__init__(dist_sync_on_step=False) + r""" + A wrapper around the `torchmetrics.Metric` to handle the saving and syncing of `preds` and `target` tensors, + and moving them to the CPU. + This is useful for certain metrics that require to save all preds and targets, such as auroc and average_precision. + Otherwise, if using `MetricWrapper` with the option `mean-per-label`, the `preds` and `target` would be + duplicated for each label, causing major memory spikes. + On top of that, all preds and targets would be on the GPU, which would cause the memory to increase at every step, + and potentially lead to out-of-memory before the end of the epoch. + + Parameters + ---------- + + metric_fn: + The metric function to use. This function should take `preds` and `target` as input, and return a scalar value. + + target_nan_mask: + - None: Do not change behaviour if there are NaNs + + - int, float: Value used to replace NaNs. For example, if `target_nan_mask==0`, then + all NaNs will be replaced by zeros + + - 'ignore': The NaN values will be removed from the tensor before computing the metrics. + Must be coupled with the `multitask_handling='flatten'` or `multitask_handling='mean-per-label'`. + + multitask_handling: + - None: Do not process the tensor before passing it to the metric. + Cannot use the option `multitask_handling=None` when `target_nan_mask=ignore`. + Use either 'flatten' or 'mean-per-label'. + + - 'flatten': Flatten the tensor to produce the equivalent of a single task + + - 'mean-per-label': Loop all the labels columns, process them as a single task, + and average the results over each task + *This option might slow down the computation if there are too many labels* + + """ + + super().__init__(compute_on_cpu=True, dist_sync_on_step=False, sync_on_compute=False) self.metric_fn = metric_fn self.target_nan_mask = target_nan_mask self.multitask_handling = multitask_handling self.kwargs = kwargs self.add_state("preds", default=[], dist_reduce_fx="cat") self.add_state("target", default=[], dist_reduce_fx="cat") + self._to_device_warned: bool = False + super().to("cpu") def update(self, preds: Tensor, target: Tensor): - self.preds.append(preds.detach().clone().cpu()) - self.target.append(target.clone().cpu()) + + # If distributed, gather the preds and target tensors + if self.dist_sync_fn is not None: + preds_list = [torch.zeros_like(preds) for _ in range(dist.get_world_size())] + target_list = [torch.zeros_like(target) for _ in range(dist.get_world_size())] + dist.all_gather(preds_list, preds) + dist.all_gather(target_list, target) + preds = dim_zero_cat(preds_list) + target = dim_zero_cat(target_list) + + # Move the tensors to the CPU after gathering them + self.preds.append(preds.detach().cpu()) + self.target.append(target.cpu()) def compute(self): - if len(self.preds) == 0: - raise ValueError("No scores to compute") preds = dim_zero_cat(self.preds) target = dim_zero_cat(self.target) - if (self.multitask_handling is None) or (self.multitask_handling in ["none", "flatten"]): preds, target = _filter_nans(preds, target, self.target_nan_mask) @@ -638,5 +687,16 @@ def compute(self): # Wrong option raise ValueError(f"Invalid option `self.multitask_handling={self.multitask_handling}`") return value + + def to(self, device: Union[str, torch.device]): + """ + Disables the moving of the metric to another device. Stays on CPU to avoid overflow. + """ + device = torch.device(device) + if device == torch.device("cpu"): + return + if not self._to_device_warned: + self._to_device_warned = True + logger.info(f"MetricToConcatenatedTorchMetrics({self.metric_fn}) stays on `{self.device}`, won't move to `{device}`") diff --git a/graphium/trainer/predictor_summaries.py b/graphium/trainer/predictor_summaries.py index f00743d31..06e762dde 100644 --- a/graphium/trainer/predictor_summaries.py +++ b/graphium/trainer/predictor_summaries.py @@ -116,6 +116,7 @@ def __init__( self._cached_metrics: Dict[str, Tensor] = {} self._logged_warnings: Set[str] = set() # Set to track which metrics have been logged + self._device: torch.device = None @property def get_cached_metrics(self) -> Dict[str, Tensor]: @@ -163,6 +164,7 @@ def _update(metric_key:str, metric_obj, preds: Tensor, targets: Tensor) -> None: targets: the targets tensor predictions: the predictions tensor """ + # Check the `metric_obj.update` signature to know if it takes `preds` and `targets` or only one of them varnames = [val.name for val in inspect.signature(metric_obj.update).parameters.values()] if ("preds" == varnames[0]) and ("target" == varnames[1]): @@ -182,28 +184,24 @@ def _update(metric_key:str, metric_obj, preds: Tensor, targets: Tensor) -> None: def update(self, preds: Tensor, targets: Tensor) -> None: - r""" update the state of the metrics Parameters: targets: the targets tensor predictions: the predictions tensor """ + + self._device = preds.device + for metric_key, metric_obj in self.metrics_to_use.items(): + metric_obj.to(self.device) try: self._update(metric_key, metric_obj, preds, targets) except Exception as err: err_msg = f"Error for metric {metric_key} on task {self.task_name} and step {self.step_name}. Exception: {err}" # Check if the error is due to the device mismatch, cast to the device, and retry - if "device" in str(err): - metric_obj.to(preds.device) - try: - self._update(metric_key, metric_obj, preds, targets) - except Exception as err: - if err_msg not in self._logged_warnings: - logger.warning(err_msg) - self._logged_warnings.add(err_msg) - else: + + if err_msg not in self._logged_warnings: logger.warning(err_msg) self._logged_warnings.add(err_msg) @@ -229,7 +227,7 @@ def _compute(self, metrics_to_use: Optional[Union[List[str], Dict[str, Any]]] = computed_metrics[f"{metric_name}"] = metric_obj.compute() except Exception as e: # If the metric computation fails, return NaN and log a warning only once - computed_metrics[f"{metric_name}"] = torch.tensor(torch.nan, device=metric_obj.device) + computed_metrics[f"{metric_name}"] = torch.tensor(torch.nan, device=self.device) # Warn only if it's the first warning for that metric if metric_name not in self.logged_metrics_exceptions: self.logged_metrics_exceptions.append(metric_name) @@ -288,6 +286,10 @@ def metric_log_name(self, metric_name): return f"{metric_name}/{self.step_name}" else: return f"{self.task_name}/{metric_name}/{self.step_name}" + + @property + def device(self) -> Optional[torch.device]: + return self._device class MultiTaskSummary(SummaryInterface): From fac3052afb27b94a171a3bb07ec568fe21000286 Mon Sep 17 00:00:00 2001 From: DomInvivo Date: Thu, 22 Aug 2024 15:38:06 -0400 Subject: [PATCH 117/175] Fixed the training metrics, and grouped all epoch-time and tput metrics --- graphium/trainer/predictor.py | 86 +++++++++++++++++++++-------------- 1 file changed, 53 insertions(+), 33 deletions(-) diff --git a/graphium/trainer/predictor.py b/graphium/trainer/predictor.py index d2758ffa6..64521304a 100644 --- a/graphium/trainer/predictor.py +++ b/graphium/trainer/predictor.py @@ -14,7 +14,7 @@ import time from copy import deepcopy -from typing import Any, Callable, Dict, List, Optional, Tuple, Type, Union, Literal +from typing import Any, Callable, Dict, List, Optional, Tuple, Type, Union, Literal, Mapping import lightning import numpy as np @@ -200,8 +200,8 @@ def __init__( self._set_hparams(recursive_config_reformating(self.hparams)) # throughput estimation - self.mean_val_time_tracker = MovingAverageTracker() - self.mean_val_tput_tracker = MovingAverageTracker() + self.mean_time_tracker = MovingAverageTracker() + self.mean_tput_tracker = MovingAverageTracker() self.epoch_start_time = None # Decide whether to log every step or once at the end @@ -498,14 +498,14 @@ def on_train_batch_start(self, batch: Any, batch_idx: int) -> Optional[int]: self.model_grad.reset() self.task_epoch_summary["train"].reset() - self.train_batch_start_time = time.time() + self.batch_start_time = time.time() self.skip_log_train_metrics = (self.metrics_every_n_train_steps is None) or ( (batch_idx % self.metrics_every_n_train_steps) != 0 ) return super().on_train_batch_start(batch, batch_idx) def on_train_batch_end(self, outputs, batch: Any, batch_idx: int) -> None: - train_batch_time = time.time() - self.train_batch_start_time # To be used for throughput calculation + train_batch_time = time.time() - self.batch_start_time # To be used for throughput calculation # Get the metrics that are logged at every step (loss, grad_norm, batch_time, batch_tput) metrics_logs = {} @@ -538,6 +538,8 @@ def on_train_batch_end(self, outputs, batch: Any, batch_idx: int) -> None: tput = num_graphs / train_batch_time metrics_logs["_global/train/batch_time"] = train_batch_time metrics_logs["_global/train/batch_tput"] = tput + self.mean_time_tracker.update(train_batch_time) + self.mean_tput_tracker.update(tput) metrics_computed = self.task_epoch_summary["train"].compute() self.task_epoch_summary["train"].reset() @@ -574,6 +576,13 @@ def validation_step(self, batch: Dict[str, Tensor]) -> Dict[str, Any]: def test_step(self, batch: Dict[str, Tensor]) -> Dict[str, Any]: return self._general_step(batch=batch, step_name="test") + + def _general_epoch_start(self, step_name: Literal["train", "val", "test"]) -> None: + self.task_epoch_summary[step_name].reset() + self.epoch_start_time = time.time() + self.mean_time_tracker.reset() + self.mean_tput_tracker.reset() + def _general_epoch_end(self, step_name: Literal["train", "val", "test"]) -> Dict[str, Tensor]: r"""Common code for training_epoch_end, validation_epoch_end and testing_epoch_end""" @@ -581,59 +590,70 @@ def _general_epoch_end(self, step_name: Literal["train", "val", "test"]) -> Dict metric_logs = self.task_epoch_summary[step_name].compute() self.task_epoch_summary[step_name].reset() + metric_logs_cpu = {k: v for k, v in metric_logs.items() if v.device == torch.device("cpu")} + if len(metric_logs_cpu) > 0: + self.log_dict(metric_logs_cpu, logger=True, prog_bar=True, sync_dist=False, on_epoch=True) + + metric_logs_accelerator = {k: v for k, v in metric_logs.items() if v.device != torch.device("cpu")} + if len(metric_logs_accelerator) > 0: + self.log_dict(metric_logs_accelerator, logger=True, prog_bar=True, sync_dist=True, on_epoch=True) + + # Time metrics are tracked always on CPU, without progress bar, so we log them separatly + time_metrics = {} + time_metrics[f"_global/{step_name}/mean_batch_time"] = torch.tensor(self.mean_time_tracker.mean_value) + time_metrics[f"_global/{step_name}/mean_tput"] = self.mean_tput_tracker.mean_value + time_metrics[f"_global/{step_name}/epoch_time"] = torch.tensor(time.time() - self.epoch_start_time) + + self.log_dict(time_metrics, logger=True, prog_bar=False, sync_dist=False, on_epoch=True) + return metric_logs def on_train_epoch_start(self) -> None: - self.epoch_start_time = time.time() + self._general_epoch_start(step_name="train") def on_train_epoch_end(self) -> None: - if self.epoch_start_time is not None: - epoch_time = torch.tensor(time.time() - self.epoch_start_time) - self.epoch_start_time = None - self.log("_global/train/epoch_time", epoch_time, prog_bar=True, sync_dist=False, on_epoch=True) + self._general_epoch_end(step_name="train") def on_validation_epoch_start(self) -> None: - self.epoch_start_time = time.time() - self.mean_val_time_tracker.reset() - self.mean_val_tput_tracker.reset() - - # If not in sanity check - self.task_epoch_summary["val"].reset() + self._general_epoch_start(step_name="val") return super().on_validation_epoch_start() def on_validation_batch_start(self, batch: Any, batch_idx: int, dataloader_idx: int = 0) -> None: - self.validation_batch_start_time = time.time() + self.batch_start_time = time.time() return super().on_validation_batch_start(batch, batch_idx, dataloader_idx) def on_validation_batch_end( self, outputs, batch: Any, batch_idx: int, dataloader_idx: int = 0 ) -> None: - val_batch_time = time.time() - self.validation_batch_start_time - self.mean_val_time_tracker.update(val_batch_time) + val_batch_time = time.time() - self.batch_start_time + self.mean_time_tracker.update(val_batch_time) num_graphs = self.get_num_graphs(batch["features"]) - self.mean_val_tput_tracker.update(num_graphs / val_batch_time) + self.mean_tput_tracker.update(num_graphs / val_batch_time) return super().on_validation_batch_end(outputs, batch, batch_idx, dataloader_idx) def on_validation_epoch_end(self) -> None: - metrics_logs = self._general_epoch_end(step_name="val") - self.log_dict(metrics_logs, logger=True, prog_bar=True, sync_dist=True, on_epoch=True) + self._general_epoch_end(step_name="val") + return super().on_validation_epoch_end() - # Time metrics are tracked always on CPU, so we log them separatly - time_metrics = {} - time_metrics["_global/val/mean_batch_time"] = torch.tensor(self.mean_val_time_tracker.mean_value) - time_metrics["_global/val/mean_tput"] = self.mean_val_tput_tracker.mean_value - if self.epoch_start_time is not None: - time_metrics["_global/val/epoch_time"] = torch.tensor(time.time() - self.epoch_start_time) - self.epoch_start_time = None - self.log_dict(time_metrics, logger=True, prog_bar=False, sync_dist=False, on_epoch=True) def on_test_epoch_start(self) -> None: - self.task_epoch_summary["test"].reset() + self._general_epoch_start(step_name="test") return super().on_test_epoch_start() def on_test_epoch_end(self) -> None: - metrics_logs = self._general_epoch_end(step_name="test") - self.log_dict(metrics_logs, logger=True, prog_bar=True, sync_dist=True, on_epoch=True) + self._general_epoch_end(step_name="test") + return super().on_test_epoch_end() + + def on_test_batch_start(self, batch: Any, batch_idx: int, dataloader_idx: int = 0) -> None: + self.batch_start_time = time.time() + return super().on_test_batch_start(batch, batch_idx, dataloader_idx) + + def on_test_batch_end(self, outputs: Tensor | Mapping[str, Any] | None, batch: Any, batch_idx: int, dataloader_idx: int = 0) -> None: + test_batch_time = time.time() - self.batch_start_time + self.mean_time_tracker.update(test_batch_time) + num_graphs = self.get_num_graphs(batch["features"]) + self.mean_tput_tracker.update(num_graphs / test_batch_time) + return super().on_test_batch_end(outputs, batch, batch_idx, dataloader_idx) def on_train_start(self): hparams_log = deepcopy(self.hparams) From 6603014a32b1418469d80e82dd286b5d4ff2b4ff Mon Sep 17 00:00:00 2001 From: wenkelf Date: Thu, 22 Aug 2024 13:44:26 -0600 Subject: [PATCH 118/175] Fixing unit tests --- graphium/data/dataset.py | 7 ++++++- tests/test_node_label_order.py | 24 ++++++++++++++---------- 2 files changed, 20 insertions(+), 11 deletions(-) diff --git a/graphium/data/dataset.py b/graphium/data/dataset.py index 6c5f7a1a0..bf55e0418 100644 --- a/graphium/data/dataset.py +++ b/graphium/data/dataset.py @@ -45,6 +45,7 @@ def __init__( num_edges_tensor=None, about: str = "", data_path: Optional[Union[str, os.PathLike]] = None, + return_smiles: bool = False, ): r""" This class holds the information for the multitask dataset. @@ -75,6 +76,8 @@ def __init__( self.num_edges_tensor = num_edges_tensor self.dataset_length = num_nodes_tensor.size(dim=0) + self.return_smiles = return_smiles + logger.info(f"Dataloading from DISK") def __len__(self): @@ -195,11 +198,13 @@ def __getitem__(self, idx): datum = {"features": self.featurize_smiles(smiles_str)} else: datum = { - "smiles": smiles_str, "labels": self.load_graph_from_index(idx), "features": self.featurize_smiles(smiles_str), } + if self.return_smiles: + datum["smiles"] = smiles_str + # One of the featurization error handling options returns a string on error, # instead of throwing an exception, so assume that the intention is to just skip, # instead of crashing. diff --git a/tests/test_node_label_order.py b/tests/test_node_label_order.py index 974834405..1baede2b6 100644 --- a/tests/test_node_label_order.py +++ b/tests/test_node_label_order.py @@ -43,13 +43,15 @@ def test_node_label_ordering(self): "task": {"task_level": "node", "label_cols": ["node_charges_mulliken", "node_charges_lowdin"], "smiles_col": "ordered_smiles", "seed": 42, **task_kwargs}, } - ds = MultitaskFromSmilesDataModule(task_specific_args, processed_graph_data_path=TEMP_CACHE_DATA_PATH) - ds.prepare_data() - ds.setup() + dm = MultitaskFromSmilesDataModule(task_specific_args, processed_graph_data_path=TEMP_CACHE_DATA_PATH) + dm.prepare_data() + dm.setup() - self.assertEqual(len(ds.train_ds), 10) + dm.train_ds.return_smiles = True - dl = ds.train_dataloader() + self.assertEqual(len(dm.train_ds), 10) + + dl = dm.train_dataloader() batch = next(iter(dl)) @@ -73,13 +75,15 @@ def test_node_label_ordering(self): "task_2": {"task_level": "node", "label_cols": ["node_charges_lowdin"], "smiles_col": "ordered_smiles", "seed": 43, **task_kwargs}, } - ds = MultitaskFromSmilesDataModule(task_specific_args, processed_graph_data_path=TEMP_CACHE_DATA_PATH) - ds.prepare_data() - ds.setup() + dm = MultitaskFromSmilesDataModule(task_specific_args, processed_graph_data_path=TEMP_CACHE_DATA_PATH) + dm.prepare_data() + dm.setup() + + dm.train_ds.return_smiles = True - self.assertEqual(len(ds.train_ds), 10) + self.assertEqual(len(dm.train_ds), 10) - dl = ds.train_dataloader() + dl = dm.train_dataloader() batch = next(iter(dl)) From d0ed816824980d5c659ab316ad86c987d6402a80 Mon Sep 17 00:00:00 2001 From: DomInvivo Date: Thu, 22 Aug 2024 15:50:03 -0400 Subject: [PATCH 119/175] Fixed epoch_time tracking (because train ends after val) --- graphium/trainer/predictor.py | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/graphium/trainer/predictor.py b/graphium/trainer/predictor.py index 64521304a..33755a533 100644 --- a/graphium/trainer/predictor.py +++ b/graphium/trainer/predictor.py @@ -202,7 +202,7 @@ def __init__( # throughput estimation self.mean_time_tracker = MovingAverageTracker() self.mean_tput_tracker = MovingAverageTracker() - self.epoch_start_time = None + self.epoch_start_time = {} # Decide whether to log every step or once at the end # of the epoch. @@ -579,7 +579,7 @@ def test_step(self, batch: Dict[str, Tensor]) -> Dict[str, Any]: def _general_epoch_start(self, step_name: Literal["train", "val", "test"]) -> None: self.task_epoch_summary[step_name].reset() - self.epoch_start_time = time.time() + self.epoch_start_time[step_name] = time.time() self.mean_time_tracker.reset() self.mean_tput_tracker.reset() @@ -602,7 +602,7 @@ def _general_epoch_end(self, step_name: Literal["train", "val", "test"]) -> Dict time_metrics = {} time_metrics[f"_global/{step_name}/mean_batch_time"] = torch.tensor(self.mean_time_tracker.mean_value) time_metrics[f"_global/{step_name}/mean_tput"] = self.mean_tput_tracker.mean_value - time_metrics[f"_global/{step_name}/epoch_time"] = torch.tensor(time.time() - self.epoch_start_time) + time_metrics[f"_global/{step_name}/epoch_time"] = torch.tensor(time.time() - self.epoch_start_time[step_name]) self.log_dict(time_metrics, logger=True, prog_bar=False, sync_dist=False, on_epoch=True) From 9b7063f2bb3c45d10721c027352aa07cd4167912 Mon Sep 17 00:00:00 2001 From: DomInvivo Date: Fri, 23 Aug 2024 10:36:58 -0400 Subject: [PATCH 120/175] Using the `torchmetrics.Metric.sync` instead of torch_distributed --- graphium/trainer/metrics.py | 29 +++++++++++++++++++++-------- 1 file changed, 21 insertions(+), 8 deletions(-) diff --git a/graphium/trainer/metrics.py b/graphium/trainer/metrics.py index 725493b91..1c500cdb0 100644 --- a/graphium/trainer/metrics.py +++ b/graphium/trainer/metrics.py @@ -651,13 +651,7 @@ def __init__(self, def update(self, preds: Tensor, target: Tensor): # If distributed, gather the preds and target tensors - if self.dist_sync_fn is not None: - preds_list = [torch.zeros_like(preds) for _ in range(dist.get_world_size())] - target_list = [torch.zeros_like(target) for _ in range(dist.get_world_size())] - dist.all_gather(preds_list, preds) - dist.all_gather(target_list, target) - preds = dim_zero_cat(preds_list) - target = dim_zero_cat(target_list) + self.sync(self.dist_sync_fn, self.process_group) # Move the tensors to the CPU after gathering them self.preds.append(preds.detach().cpu()) @@ -697,6 +691,25 @@ def to(self, device: Union[str, torch.device]): return if not self._to_device_warned: self._to_device_warned = True - logger.info(f"MetricToConcatenatedTorchMetrics({self.metric_fn}) stays on `{self.device}`, won't move to `{device}`") + logger.warning(f"{self.get_obj_name(self)}({self.get_obj_name(self.metric_fn)}) stays on `{self.device}`, won't move to `{device}`") + @staticmethod + def get_obj_name(obj): + """ + Returns the name of a function, class, or instance of a class. + + Parameters: + - obj: The object to get the name of. + + Returns: + - The name of the object as a string. + """ + # If the object is a class or function, return its __name__ + if hasattr(obj, '__name__'): + return obj.__name__ + # If the object is an instance of a class, return its class's __name__ + elif hasattr(obj, '__class__'): + return obj.__class__.__name__ + else: + return str(obj) # Fallback to converting the object to string From 136b8b0a64c68b9ffcdacd816a712562f3ba7b08 Mon Sep 17 00:00:00 2001 From: DomInvivo Date: Thu, 29 Aug 2024 13:25:00 -0400 Subject: [PATCH 121/175] Fixed issue that NaNs are always removed with `mean-per-label` --- graphium/trainer/metrics.py | 8 +++----- 1 file changed, 3 insertions(+), 5 deletions(-) diff --git a/graphium/trainer/metrics.py b/graphium/trainer/metrics.py index 1c500cdb0..8dd40fba2 100644 --- a/graphium/trainer/metrics.py +++ b/graphium/trainer/metrics.py @@ -396,13 +396,11 @@ def update(self, preds: Tensor, target: Tensor) -> Tensor: elif self.multitask_handling == "mean-per-label": # Loop the columns (last dim) of the tensors, apply the nan filtering, compute the metrics per column, then average the metrics - target_nans = torch.isnan(target) - target_list = [target[..., ii][~target_nans[..., ii]] for ii in range(target.shape[-1])] - # TODO: make this more flexible to the target shape in the future + target_list = [target[..., ii] for ii in range(target.shape[-1])] if classifigression: - preds_list = [preds[..., i, :][~target_nans[..., i]] for i in range(preds.shape[1])] + preds_list = [preds[..., ii, :] for ii in range(preds.shape[1])] else: - preds_list = [preds[..., ii][~target_nans[..., ii]] for ii in range(preds.shape[-1])] + preds_list = [preds[..., ii] for ii in range(preds.shape[-1])] if not isinstance(self.metric, list): self.metric = [deepcopy(self.metric) for _ in range(len(target_list))] From 2724b4c52c79dee2a952b71daab0c6003ecc1ccd Mon Sep 17 00:00:00 2001 From: DomInvivo Date: Thu, 29 Aug 2024 13:45:01 -0400 Subject: [PATCH 122/175] Changed the name of logging variables --- graphium/trainer/predictor.py | 12 ++++++------ 1 file changed, 6 insertions(+), 6 deletions(-) diff --git a/graphium/trainer/predictor.py b/graphium/trainer/predictor.py index 33755a533..22895a9c7 100644 --- a/graphium/trainer/predictor.py +++ b/graphium/trainer/predictor.py @@ -536,15 +536,15 @@ def on_train_batch_end(self, outputs, batch: Any, batch_idx: int) -> None: # Get the throughput of the batch num_graphs = self.get_num_graphs(batch["features"]) tput = num_graphs / train_batch_time - metrics_logs["_global/train/batch_time"] = train_batch_time - metrics_logs["_global/train/batch_tput"] = tput + metrics_logs["_global/batch_time/train"] = train_batch_time + metrics_logs["_global/batch_tput/train"] = tput self.mean_time_tracker.update(train_batch_time) self.mean_tput_tracker.update(tput) metrics_computed = self.task_epoch_summary["train"].compute() self.task_epoch_summary["train"].reset() metrics_logs.update(metrics_computed) - metrics_logs["_global/train/grad_norm"] = self.model_grad.compute() + metrics_logs["_global/grad_norm/train"] = self.model_grad.compute() # Log the metrics self.log_dict( @@ -600,9 +600,9 @@ def _general_epoch_end(self, step_name: Literal["train", "val", "test"]) -> Dict # Time metrics are tracked always on CPU, without progress bar, so we log them separatly time_metrics = {} - time_metrics[f"_global/{step_name}/mean_batch_time"] = torch.tensor(self.mean_time_tracker.mean_value) - time_metrics[f"_global/{step_name}/mean_tput"] = self.mean_tput_tracker.mean_value - time_metrics[f"_global/{step_name}/epoch_time"] = torch.tensor(time.time() - self.epoch_start_time[step_name]) + time_metrics[f"_global/mean_batch_time/{step_name}"] = torch.tensor(self.mean_time_tracker.mean_value) + time_metrics[f"_global/mean_tput/{step_name}"] = self.mean_tput_tracker.mean_value + time_metrics[f"_global/epoch_time/{step_name}"] = torch.tensor(time.time() - self.epoch_start_time[step_name]) self.log_dict(time_metrics, logger=True, prog_bar=False, sync_dist=False, on_epoch=True) From 141f48bc2d91e92cf9f1b71bb9133762731853ef Mon Sep 17 00:00:00 2001 From: DomInvivo Date: Thu, 29 Aug 2024 14:31:53 -0400 Subject: [PATCH 123/175] Removed some IPU logic --- graphium/cli/train_finetune_test.py | 46 ----------------------------- graphium/config/_loader.py | 9 ------ graphium/trainer/predictor.py | 9 +----- 3 files changed, 1 insertion(+), 63 deletions(-) diff --git a/graphium/cli/train_finetune_test.py b/graphium/cli/train_finetune_test.py index f329b0edf..4cea1ee8c 100644 --- a/graphium/cli/train_finetune_test.py +++ b/graphium/cli/train_finetune_test.py @@ -52,41 +52,6 @@ def cli(cfg: DictConfig) -> None: return run_training_finetuning_testing(cfg) -def get_replication_factor(cfg): - try: - ipu_config = cfg.get("accelerator", {}).get("ipu_config", []) - for item in ipu_config: - if "replicationFactor" in item: - # Extract the number between parentheses - start = item.find("(") + 1 - end = item.find(")") - if start != 0 and end != -1: - return int(item[start:end]) - except Exception as e: - print(f"An error occurred: {e}") - - # Return default value if replicationFactor is not found or an error occurred - return 1 - - -def get_gradient_accumulation_factor(cfg): - """ - WARNING: This MUST be called after accelerator overrides have been applied - (i.e. after `load_accelerator` has been called) - """ - try: - # Navigate through the nested dictionaries and get the gradient accumulation factor - grad_accumulation_factor = cfg.get("trainer", {}).get("trainer", {}).get("accumulate_grad_batches", 1) - - # Ensure that the extracted value is an integer - return int(grad_accumulation_factor) - except Exception as e: - print(f"An error occurred: {e}") - - # Return default value if an error occurred - return 1 - - def get_training_batch_size(cfg): """ WARNING: This MUST be called after accelerator overrides have been applied @@ -195,14 +160,6 @@ def run_training_finetuning_testing(cfg: DictConfig) -> None: ## Metrics metrics = load_metrics(cfg) - # Note: these MUST be called after `cfg, accelerator = load_accelerator(cfg)` - replicas = get_replication_factor(cfg) - gradient_acc = get_gradient_accumulation_factor(cfg) - micro_bs = get_training_batch_size(cfg) - device_iterations = get_training_device_iterations(cfg) - - global_bs = replicas * gradient_acc * micro_bs * device_iterations - ## Predictor predictor = load_predictor( config=cfg, @@ -213,9 +170,6 @@ def run_training_finetuning_testing(cfg: DictConfig) -> None: accelerator_type=accelerator_type, featurization=datamodule.featurization, task_norms=datamodule.task_norms, - replicas=replicas, - gradient_acc=gradient_acc, - global_bs=global_bs, ) logger.info(predictor.model) diff --git a/graphium/config/_loader.py b/graphium/config/_loader.py index 1d82f7f90..9596e2eac 100644 --- a/graphium/config/_loader.py +++ b/graphium/config/_loader.py @@ -300,9 +300,6 @@ def load_predictor( accelerator_type: str, featurization: Dict[str, str] = None, task_norms: Optional[Dict[Callable, Any]] = None, - replicas: int = 1, - gradient_acc: int = 1, - global_bs: int = 1, ) -> PredictorModule: """ Defining the predictor module, which handles the training logic from `lightning.LighningModule` @@ -328,9 +325,6 @@ def load_predictor( task_levels=task_levels, featurization=featurization, task_norms=task_norms, - replicas=replicas, - gradient_acc=gradient_acc, - global_bs=global_bs, **cfg_pred, ) @@ -347,9 +341,6 @@ def load_predictor( task_levels=task_levels, featurization=featurization, task_norms=task_norms, - replicas=replicas, - gradient_acc=gradient_acc, - global_bs=global_bs, **cfg_pred, ) diff --git a/graphium/trainer/predictor.py b/graphium/trainer/predictor.py index 22895a9c7..0a89a4b19 100644 --- a/graphium/trainer/predictor.py +++ b/graphium/trainer/predictor.py @@ -60,9 +60,6 @@ def __init__( flag_kwargs: Dict[str, Any] = None, task_norms: Optional[Dict[Callable, Any]] = None, metrics_every_n_train_steps: Optional[int] = None, - replicas: int = 1, - gradient_acc: int = 1, - global_bs: Optional[int] = 1, ): """ The Lightning module responsible for handling the predictions, losses, metrics, optimization, etc. @@ -209,8 +206,6 @@ def __init__( self.metrics_every_n_train_steps = metrics_every_n_train_steps # Wether save preds and targets for each training step. - self.samples_seen = 0 - self.global_bs = global_bs self.model_grad = GradientNormMetric() def forward( @@ -509,9 +504,6 @@ def on_train_batch_end(self, outputs, batch: Any, batch_idx: int) -> None: # Get the metrics that are logged at every step (loss, grad_norm, batch_time, batch_tput) metrics_logs = {} - # Incriment by the batch size - self.samples_seen += self.global_bs - metrics_logs["samples_seen"] = self.samples_seen # report the training loss for each individual tasks # get the mean loss value for individual tasks as they are a tensor of size --> gradient accumulation * replication * device_iter @@ -545,6 +537,7 @@ def on_train_batch_end(self, outputs, batch: Any, batch_idx: int) -> None: self.task_epoch_summary["train"].reset() metrics_logs.update(metrics_computed) metrics_logs["_global/grad_norm/train"] = self.model_grad.compute() + self.model_grad.reset() # Log the metrics self.log_dict( From 62f22244598b99563cd873fc5ba2d1bcb520eb95 Mon Sep 17 00:00:00 2001 From: DomInvivo Date: Thu, 29 Aug 2024 15:33:26 -0400 Subject: [PATCH 124/175] Fixed the syncing of `MetricToConcatenatedTorchMetrics` --- graphium/trainer/metrics.py | 6 +++++- 1 file changed, 5 insertions(+), 1 deletion(-) diff --git a/graphium/trainer/metrics.py b/graphium/trainer/metrics.py index 8dd40fba2..6bac93416 100644 --- a/graphium/trainer/metrics.py +++ b/graphium/trainer/metrics.py @@ -649,7 +649,11 @@ def __init__(self, def update(self, preds: Tensor, target: Tensor): # If distributed, gather the preds and target tensors - self.sync(self.dist_sync_fn, self.process_group) + if self.dist_sync_fn is not None: + preds_list = self.dist_sync_fn(preds, self.process_group) + target_list = self.dist_sync_fn(target, self.process_group) + preds = dim_zero_cat(preds_list) + target = dim_zero_cat(target_list) # Move the tensors to the CPU after gathering them self.preds.append(preds.detach().cpu()) From 2b58fedf841e864363c8e16c0b8178bff427ce86 Mon Sep 17 00:00:00 2001 From: sft-managed Date: Thu, 29 Aug 2024 13:50:43 -0600 Subject: [PATCH 125/175] Fixed classification metric calculation when multitask_handling=flatten --- graphium/trainer/metrics.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/graphium/trainer/metrics.py b/graphium/trainer/metrics.py index 6bac93416..7a20e0049 100644 --- a/graphium/trainer/metrics.py +++ b/graphium/trainer/metrics.py @@ -665,7 +665,7 @@ def compute(self): if (self.multitask_handling is None) or (self.multitask_handling in ["none", "flatten"]): preds, target = _filter_nans(preds, target, self.target_nan_mask) - value = self.metric_fn(preds, target) + value = self.metric_fn(preds, target, **self.kwargs) elif self.multitask_handling == "mean-per-label": value = [] From c23dc023343beaeb1ea94c7095cbb1b17596bcab Mon Sep 17 00:00:00 2001 From: wenkelf Date: Thu, 5 Sep 2024 09:12:39 -0600 Subject: [PATCH 126/175] Partial fix of node label ordering --- .gitignore | 1 + graphium/graphium_cpp/features.h | 2 +- tests/test_node_label_order.py | 262 +++++++++++++++++++++++++++---- 3 files changed, 233 insertions(+), 32 deletions(-) diff --git a/.gitignore b/.gitignore index 289f10a4d..41dbc0e45 100644 --- a/.gitignore +++ b/.gitignore @@ -29,6 +29,7 @@ draft/ scripts-expts/ sweeps/ mup/ +loc-* # Data and predictions graphium/data/ZINC_bench_gnn/ diff --git a/graphium/graphium_cpp/features.h b/graphium/graphium_cpp/features.h index 4bbcde001..034e85630 100644 --- a/graphium/graphium_cpp/features.h +++ b/graphium/graphium_cpp/features.h @@ -274,4 +274,4 @@ std::tuple, int64_t, int64_t> featurize_smiles( std::unique_ptr parse_mol( const std::string& smiles_string, bool explicit_H, - bool ordered = false); + bool ordered = true); diff --git a/tests/test_node_label_order.py b/tests/test_node_label_order.py index 1baede2b6..35411cc33 100644 --- a/tests/test_node_label_order.py +++ b/tests/test_node_label_order.py @@ -28,75 +28,275 @@ class Test_NodeLabelOrdering(ut.TestCase): def test_node_label_ordering(self): - # Import node labels from parquet fole - parquet_file = "tests/data/dummy_node_label_order_data.parquet" - task_kwargs = {"df_path": parquet_file, "split_val": 0.0, "split_test": 0.0} - - # Look at raw data - raw_data = pd.read_parquet("tests/data/dummy_node_label_order_data.parquet") - raw_labels = { - smiles: torch.from_numpy(np.stack([label_1, label_2])).T for (smiles, label_1, label_2) in zip(raw_data["ordered_smiles"], raw_data["node_charges_mulliken"], raw_data["node_charges_lowdin"]) - } + + # Delete the cache if already exist + if exists(TEMP_CACHE_DATA_PATH): + rm(TEMP_CACHE_DATA_PATH, recursive=True) + + ################################################################################################################### + ### Test I: Test if atom labels are ordered correctly for a single dataset that contains only a single molecule ### + ################################################################################################################### + + # Import node labels from parquet file + df = pd.DataFrame( + { + "ordered_smiles": ["[C:0][C:1][O:2]"], + "node_labels": [[0., 0., 2.]], + } + ) + + task_kwargs = {"df": df, "split_val": 0.0, "split_test": 0.0} # Check datamodule with single task and two labels task_specific_args = { - "task": {"task_level": "node", "label_cols": ["node_charges_mulliken", "node_charges_lowdin"], "smiles_col": "ordered_smiles", "seed": 42, **task_kwargs}, + "task": {"task_level": "node", "label_cols": ["node_labels"], "smiles_col": "ordered_smiles", "seed": 42, **task_kwargs}, } - dm = MultitaskFromSmilesDataModule(task_specific_args, processed_graph_data_path=TEMP_CACHE_DATA_PATH) + dm = MultitaskFromSmilesDataModule(task_specific_args, processed_graph_data_path=TEMP_CACHE_DATA_PATH, featurization={"atom_property_list_onehot": ["atomic-number"]}) dm.prepare_data() dm.setup() dm.train_ds.return_smiles = True - self.assertEqual(len(dm.train_ds), 10) + dl = dm.train_dataloader() + + batch = next(iter(dl)) + + atom_types = batch["labels"].node_task.squeeze() + atom_types_from_features = batch["features"].feat.argmax(1) + + np.testing.assert_array_equal(atom_types, atom_types_from_features) + + # Delete the cache if already exist + if exists(TEMP_CACHE_DATA_PATH): + rm(TEMP_CACHE_DATA_PATH, recursive=True) + + ################################################################################### + ### Test II: Two ordered SMILES representing the same molecule in same dataset ### + ################################################################################### + + # Create input data + df = pd.DataFrame( + { + "ordered_smiles": ["[C:0][C:1][O:2]", "[O:0][C:1][C:2]"], + "node_labels": [[0., 0., 2.], [2., 0., 0.]], + } + ) + + task_kwargs = {"df": df, "split_val": 0.0, "split_test": 0.0} + + # Check datamodule with single task and two labels + task_specific_args = { + "task": {"task_level": "node", "label_cols": ["node_labels"], "smiles_col": "ordered_smiles", "seed": 42, **task_kwargs}, + } + + dm = MultitaskFromSmilesDataModule(task_specific_args, processed_graph_data_path=TEMP_CACHE_DATA_PATH, featurization={"atom_property_list_onehot": ["atomic-number"]}) + dm.prepare_data() + dm.setup() + + dm.train_ds.return_smiles = True dl = dm.train_dataloader() batch = next(iter(dl)) + + atom_types = batch["labels"].node_task.squeeze() + atom_types_from_features = batch["features"].feat.argmax(1) - smiles = batch["smiles"] - unbatched_node_labels = unbatch(batch["labels"].node_task, batch["labels"].batch) + np.testing.assert_array_equal(atom_types_from_features, atom_types) + + # Delete the cache if already exist + if exists(TEMP_CACHE_DATA_PATH): + rm(TEMP_CACHE_DATA_PATH, recursive=True) + + ############################################################################################# + ### Test III: Merging two node-level tasks each with different ordering of ordered SMILES ### + ### TODO: Will currently fail ### + ############################################################################################# + + # Create input data + df1 = pd.DataFrame( + { + "ordered_smiles": ["[C:0][C:1][O:2]"], + "node_labels": [[0., 0., 2.]], + } + ) - processed_labels = { - smiles[idx]: unbatched_node_labels[idx] for idx in range(len(smiles)) + df2 = pd.DataFrame( + { + "ordered_smiles": ["[O:0][C:1][C:2]"], + "node_labels": [[2., 0., 0.]], + } + ) + + task1_kwargs = {"df": df1, "split_val": 0.0, "split_test": 0.0} + task2_kwargs = {"df": df2, "split_val": 0.0, "split_test": 0.0} + + # Check datamodule with single task and two labels + task_specific_args = { + "task1": {"task_level": "node", "label_cols": ["node_labels"], "smiles_col": "ordered_smiles", "seed": 42, **task1_kwargs}, + "task2": {"task_level": "node", "label_cols": ["node_labels"], "smiles_col": "ordered_smiles", "seed": 42, **task2_kwargs}, } - for key in raw_labels.keys(): - assert torch.abs(raw_labels[key] - processed_labels[key]).max() < 1e-3 + dm = MultitaskFromSmilesDataModule(task_specific_args, processed_graph_data_path=TEMP_CACHE_DATA_PATH, featurization={"atom_property_list_onehot": ["atomic-number"]}) + dm.prepare_data() + dm.setup() + + dm.train_ds.return_smiles = True + + dl = dm.train_dataloader() + + batch = next(iter(dl)) + + unbatched_node_labels1 = unbatch(batch["labels"].node_task1, batch["labels"].batch) + unbatched_node_labels2 = unbatch(batch["labels"].node_task2, batch["labels"].batch) + unbatched_node_features = unbatch(batch["features"].feat, batch["features"].batch) + + atom_types1 = unbatched_node_labels1[0].squeeze() + atom_types2 = unbatched_node_labels2[0].squeeze() + atom_types_from_features = unbatched_node_features[0].argmax(1) + + np.testing.assert_array_equal(atom_types_from_features, atom_types1) + np.testing.assert_array_equal(atom_types_from_features, atom_types2) # Delete the cache if already exist if exists(TEMP_CACHE_DATA_PATH): rm(TEMP_CACHE_DATA_PATH, recursive=True) - # Check datamodule with two tasks with each one label + ############################################################################### + ### Test IV: Merging node-level task on graph-level task with no node order ### + ### NOTE: Works as rdkit does not merge ordered_smiles vs. unordered smiles ### + ############################################################################### + + # Create input data + df1 = pd.DataFrame( + { + "ordered_smiles": ["CCO"], + "graph_labels": [1.], + } + ) + + df2 = pd.DataFrame( + { + "ordered_smiles": ["[O:0][C:1][C:2]"], + "node_labels": [[2., 0., 0.]], + } + ) + + task1_kwargs = {"df": df1, "split_val": 0.0, "split_test": 0.0} + task2_kwargs = {"df": df2, "split_val": 0.0, "split_test": 0.0} + + # Check datamodule with single task and two labels task_specific_args = { - "task_1": {"task_level": "node", "label_cols": ["node_charges_mulliken"], "smiles_col": "ordered_smiles", "seed": 41, **task_kwargs}, - "task_2": {"task_level": "node", "label_cols": ["node_charges_lowdin"], "smiles_col": "ordered_smiles", "seed": 43, **task_kwargs}, + "task1": {"task_level": "graph", "label_cols": ["graph_labels"], "smiles_col": "ordered_smiles", "seed": 42, **task1_kwargs}, + "task2": {"task_level": "node", "label_cols": ["node_labels"], "smiles_col": "ordered_smiles", "seed": 42, **task2_kwargs}, } - dm = MultitaskFromSmilesDataModule(task_specific_args, processed_graph_data_path=TEMP_CACHE_DATA_PATH) + dm = MultitaskFromSmilesDataModule(task_specific_args, processed_graph_data_path=TEMP_CACHE_DATA_PATH, featurization={"atom_property_list_onehot": ["atomic-number"]}) dm.prepare_data() dm.setup() dm.train_ds.return_smiles = True - self.assertEqual(len(dm.train_ds), 10) - dl = dm.train_dataloader() batch = next(iter(dl)) + + atom_types = batch["labels"].node_task2.squeeze() + atom_types_from_features = batch["features"].feat.argmax(1) + + # Ignore NaNs + nan_indices = atom_types.isnan() + atom_types_from_features[nan_indices] = 333 + atom_types[nan_indices] = 333 + + np.testing.assert_array_equal(atom_types, atom_types_from_features) + + # Delete the cache if already exist + if exists(TEMP_CACHE_DATA_PATH): + rm(TEMP_CACHE_DATA_PATH, recursive=True) + + ##################################################################################### + ### Test V: Merging node-level task on graph-level task with different node order ### + ### TODO: Will currently fail ### + ##################################################################################### + + # Create input data + df1 = pd.DataFrame( + { + "ordered_smiles": ["[C:0][C:1][O:2]"], + "graph_labels": [1.], + } + ) - smiles = batch["smiles"] - unbatched_node_labels_1 = unbatch(batch["labels"].node_task_1, batch["labels"].batch) - unbatched_node_labels_2 = unbatch(batch["labels"].node_task_2, batch["labels"].batch) + df2 = pd.DataFrame( + { + "ordered_smiles": ["[O:0][C:1][C:2]"], + "node_labels": [[2., 0., 0.]], + } + ) + + task1_kwargs = {"df": df1, "split_val": 0.0, "split_test": 0.0} + task2_kwargs = {"df": df2, "split_val": 0.0, "split_test": 0.0} + + # Check datamodule with single task and two labels + task_specific_args = { + "task1": {"task_level": "graph", "label_cols": ["graph_labels"], "smiles_col": "ordered_smiles", "seed": 42, **task1_kwargs}, + "task2": {"task_level": "node", "label_cols": ["node_labels"], "smiles_col": "ordered_smiles", "seed": 42, **task2_kwargs}, + } + + dm = MultitaskFromSmilesDataModule(task_specific_args, processed_graph_data_path=TEMP_CACHE_DATA_PATH, featurization={"atom_property_list_onehot": ["atomic-number"]}) + dm.prepare_data() + dm.setup() + + dm.train_ds.return_smiles = True + + dl = dm.train_dataloader() + + batch = next(iter(dl)) + + atom_types = batch["labels"].node_task2.squeeze() + atom_types_from_features = batch["features"].feat.argmax(1) - processed_labels = { - smiles[idx]: torch.cat([unbatched_node_labels_1[idx], unbatched_node_labels_2[idx]], dim=-1) for idx in range(len(smiles)) + np.testing.assert_array_equal(atom_types, atom_types_from_features) + + # Delete the cache if already exist + if exists(TEMP_CACHE_DATA_PATH): + rm(TEMP_CACHE_DATA_PATH, recursive=True) + + ############################ + ### Test VI: ... ### + ### TODO: To be finished ### + ############################ + + # Create input data + df = pd.DataFrame( + { + "smiles": ["CCO", "OCC", "COC", "[C:0][C:1][O:2]", "[O:0][C:1][C:2]", "[C:0][O:1][C:2]"], + "graph_labels": [0., 0., 1., 0., 0., 1.], + } + ) + + task_kwargs = {"df": df, "split_val": 0.0, "split_test": 0.0} + + # Check datamodule with single task and two labels + task_specific_args = { + "task": {"task_level": "graph", "label_cols": ["graph_labels"], "smiles_col": "smiles", "seed": 42, **task_kwargs}, } - for key in raw_labels.keys(): - assert torch.abs(raw_labels[key] - processed_labels[key]).max() < 1e-3 + dm = MultitaskFromSmilesDataModule(task_specific_args, processed_graph_data_path=TEMP_CACHE_DATA_PATH, featurization={"atom_property_list_onehot": ["atomic-number"]}) + dm.prepare_data() + dm.setup() + + dm.train_ds.return_smiles = True + + dl = dm.train_dataloader() + + batch = next(iter(dl)) + + # Delete the cache if already exist + if exists(TEMP_CACHE_DATA_PATH): + rm(TEMP_CACHE_DATA_PATH, recursive=True) if __name__ == "__main__": From 2fb7f4be3b8c3c90dae8b4375ccb490a050c66e2 Mon Sep 17 00:00:00 2001 From: DomInvivo Date: Fri, 6 Sep 2024 21:14:08 -0400 Subject: [PATCH 127/175] Fixed all unit-test, except those for IPU --- graphium/data/datamodule.py | 6 ++++++ tests/test_losses.py | 3 ++- tests/test_metrics.py | 22 ---------------------- tests/test_predictor.py | 2 +- 4 files changed, 9 insertions(+), 24 deletions(-) diff --git a/graphium/data/datamodule.py b/graphium/data/datamodule.py index e7366d2c2..edc5e207d 100644 --- a/graphium/data/datamodule.py +++ b/graphium/data/datamodule.py @@ -2098,6 +2098,12 @@ def __init__( for t in tdc_benchmark_names } + # Create a temporary `processed_graph_data_path` to store the processed graphs and labels + if processed_graph_data_path is None: + processed_graph_data_path = fs.join(tdc_cache_dir, "processed_graph_data") + if not fs.exists(processed_graph_data_path): + fs.mkdir(processed_graph_data_path) + super().__init__( task_specific_args=task_specific_args, featurization=featurization, diff --git a/tests/test_losses.py b/tests/test_losses.py index b2f343bf9..82e8090ea 100644 --- a/tests/test_losses.py +++ b/tests/test_losses.py @@ -26,7 +26,8 @@ def _parse(loss_fun): eval_options = EvalOptions(loss_fun=loss_fun, metrics_on_progress_bar=None) - return eval_options.parse_loss_fun(loss_fun) + loss_name, loss_fun = eval_options.parse_loss_fun(loss_fun) + return loss_fun class test_HybridCELoss(ut.TestCase): diff --git a/tests/test_metrics.py b/tests/test_metrics.py index 9bfd936ee..cdeac5f7d 100644 --- a/tests/test_metrics.py +++ b/tests/test_metrics.py @@ -197,28 +197,6 @@ def test_pickling(self): if isinstance(metric, str): self.assertIsInstance(state["metric"], str, msg=err_msg) - def test_classifigression_target_squeezing(self): - preds = torch.Tensor([[0.1, 0.1, 0.3, 0.5, 0.0, 0.1, 0.0, 0.7, 0.2, 0.0]]) - target = torch.Tensor([3, 0]) - expected_scores = [0.5, 0.75] - n_brackets = 5 - metrics = ["accuracy", "averageprecision"] - other_kwargs = [ - {"task": "multiclass", "num_classes": n_brackets, "top_k": 1}, - {"task": "multiclass", "num_classes": n_brackets}, - ] - - for metric, kwargs, expected_score in zip(metrics, other_kwargs, expected_scores): - metric_wrapper = MetricWrapper( - metric=metric, - multitask_handling="mean-per-label", - squeeze_targets=True, - target_to_int=True, - **kwargs, - ) - score = metric_wrapper(preds, target) - - assert score == expected_score def test_update_compute_reset(self): torch.manual_seed(42) diff --git a/tests/test_predictor.py b/tests/test_predictor.py index 1ef69775f..3d3b8648c 100644 --- a/tests/test_predictor.py +++ b/tests/test_predictor.py @@ -29,7 +29,7 @@ def test_parse_loss_fun(self): preds = torch.rand(10, 5) target = (torch.rand(10, 5) > 0.5).to(preds.dtype) for this_loss in losses: - loss_fun = EvalOptions.parse_loss_fun(this_loss) + loss_name, loss_fun = EvalOptions.parse_loss_fun(this_loss) loss = loss_fun(preds, target) From 607e71b3f0e90e290cc9101c6ca553cb30081df8 Mon Sep 17 00:00:00 2001 From: DomInvivo Date: Fri, 6 Sep 2024 21:22:03 -0400 Subject: [PATCH 128/175] First pass at removing IPU --- .github/workflows/test.yml | 3 - .github/workflows/test_ipu.yml | 69 --- .gitignore | 9 - README.md | 13 - codecov.yml | 5 - docs/api/graphium.ipu.md | 43 -- docs/contribute.md | 15 - docs/design.md | 3 +- docs/index.md | 11 - .../feature_processing/timing_parallel.ipynb | 2 +- .../simple-molecular-model.ipynb | 22 +- enable_ipu.sh | 29 -- env.yml | 1 - expts/configs/config_gps_10M_pcqm4m.yaml | 6 +- expts/configs/config_gps_10M_pcqm4m_mod.yaml | 6 +- expts/configs/config_mpnn_10M_b3lyp.yaml | 8 +- expts/configs/config_mpnn_pcqm4m.yaml | 6 +- expts/dataset_benchmark.py | 1 - expts/hydra-configs/accelerator/ipu.yaml | 18 - .../accelerator/ipu_pipeline.yaml | 22 - .../loss_metrics_datamodule/l1000_mcf7.yaml | 2 +- .../loss_metrics_datamodule/l1000_vcap.yaml | 2 +- .../loss_metrics_datamodule/largemix.yaml | 18 +- .../loss_metrics_datamodule/pcba_1328.yaml | 4 +- .../tasks/loss_metrics_datamodule/pcqm4m.yaml | 6 +- .../loss_metrics_datamodule/pcqm4m_g25.yaml | 8 +- .../loss_metrics_datamodule/pcqm4m_n4.yaml | 8 +- .../tasks/loss_metrics_datamodule/toymix.yaml | 6 +- .../base_config/large.yaml | 18 +- .../base_config/large_pcba.yaml | 18 +- .../base_config/large_pcqm_g25.yaml | 18 +- .../base_config/large_pcqm_n4.yaml | 18 +- .../base_config/small.yaml | 16 +- .../baseline/config_small_gcn_baseline.yaml | 16 +- .../config_large_gcn_gpu.yaml | 2 +- .../neurips2023_configs/config_luis_jama.yaml | 8 +- .../config_small_gcn_gpu.yaml | 2 +- .../config_large_gcn_mcf7.yaml | 6 +- .../config_large_gcn_pcba.yaml | 6 +- .../config_large_gcn_vcap.yaml | 6 +- .../single_task_gin/config_large_gin_g25.yaml | 8 +- .../config_large_gin_mcf7.yaml | 6 +- .../single_task_gin/config_large_gin_n4.yaml | 8 +- .../config_large_gin_pcba.yaml | 6 +- .../single_task_gin/config_large_gin_pcq.yaml | 10 +- .../config_large_gin_vcap.yaml | 6 +- .../config_large_gine_g25.yaml | 8 +- .../config_large_gine_mcf7.yaml | 6 +- .../config_large_gine_n4.yaml | 8 +- .../config_large_gine_pcba.yaml | 6 +- .../config_large_gine_pcq.yaml | 10 +- .../config_large_gine_vcap.yaml | 6 +- graphium/cli/train_finetune_test.py | 2 +- graphium/config/loc-config_largemix.yaml | 424 +++++++++++++++++ graphium/ipu/README.md | 15 - graphium/ipu/__init__.py | 0 graphium/ipu/ipu_dataloader.py | 434 ------------------ graphium/ipu/ipu_losses.py | 196 -------- graphium/ipu/ipu_simple_lightning.py | 169 ------- graphium/ipu/ipu_utils.py | 162 ------- graphium/ipu/ipu_wrapper.py | 235 ---------- graphium/ipu/to_dense_batch.py | 186 -------- install_ipu.sh | 112 ----- mkdocs.yml | 2 - pyproject.toml | 1 - tests/config_test_ipu_dataloader.yaml | 6 +- .../config_test_ipu_dataloader_multitask.yaml | 16 +- tests/test_ipu_dataloader.py | 256 ----------- tests/test_ipu_losses.py | 172 ------- tests/test_ipu_options.py | 149 ------ tests/test_ipu_poptorch.py | 29 -- 71 files changed, 600 insertions(+), 2534 deletions(-) delete mode 100644 .github/workflows/test_ipu.yml delete mode 100644 docs/api/graphium.ipu.md delete mode 100755 enable_ipu.sh delete mode 100644 expts/hydra-configs/accelerator/ipu.yaml delete mode 100644 expts/hydra-configs/accelerator/ipu_pipeline.yaml create mode 100644 graphium/config/loc-config_largemix.yaml delete mode 100644 graphium/ipu/README.md delete mode 100644 graphium/ipu/__init__.py delete mode 100644 graphium/ipu/ipu_dataloader.py delete mode 100644 graphium/ipu/ipu_losses.py delete mode 100644 graphium/ipu/ipu_simple_lightning.py delete mode 100644 graphium/ipu/ipu_utils.py delete mode 100644 graphium/ipu/ipu_wrapper.py delete mode 100644 graphium/ipu/to_dense_batch.py delete mode 100755 install_ipu.sh delete mode 100644 tests/test_ipu_dataloader.py delete mode 100644 tests/test_ipu_losses.py delete mode 100644 tests/test_ipu_options.py delete mode 100644 tests/test_ipu_poptorch.py diff --git a/.github/workflows/test.yml b/.github/workflows/test.yml index af85c8616..601f30208 100644 --- a/.github/workflows/test.yml +++ b/.github/workflows/test.yml @@ -55,9 +55,6 @@ jobs: - name: Install C++ library run: cd graphium/graphium_cpp && git clone https://github.com/pybind/pybind11.git && export PYTHONPATH=$PYTHONPATH:./pybind11 && python -m pip install . && cd ../.. - - name: Run tests - run: pytest -m 'not ipu' - - name: Test CLI run: graphium --help diff --git a/.github/workflows/test_ipu.yml b/.github/workflows/test_ipu.yml deleted file mode 100644 index 886c4c2b7..000000000 --- a/.github/workflows/test_ipu.yml +++ /dev/null @@ -1,69 +0,0 @@ -name: test-ipu - -on: - push: - branches: ["main"] - tags: ["*"] - pull_request: - branches: - - "*" - - "!gh-pages" - schedule: - - cron: "0 4 * * *" - -jobs: - test-ipu: - strategy: - fail-fast: false - matrix: - python-version: ["3.8"] - pytorch-version: ["2.0"] - - runs-on: "ubuntu-20.04" - timeout-minutes: 30 - - defaults: - run: - shell: bash -l {0} - - name: | - poptorch_env - - python=${{ matrix.python-version }} - - pytorch=${{ matrix.pytorch-version }} - - steps: - - name: Checkout the code - uses: actions/checkout@v3 - - - name: Activate SDK + Install Requirements - run: | - python3 -m pip install --upgrade pip - wget -q -O 'poplar_sdk-ubuntu_20_04-3.3.0-208993bbb7.tar.gz' 'https://downloads.graphcore.ai/direct?package=poplar-poplar_sdk_ubuntu_20_04_3.3.0_208993bbb7-3.3.0&file=poplar_sdk-ubuntu_20_04-3.3.0-208993bbb7.tar.gz' - tar -xzf poplar_sdk-ubuntu_20_04-3.3.0-208993bbb7.tar.gz - python3 -m pip install poplar_sdk-ubuntu_20_04-3.3.0+1403-208993bbb7/poptorch-3.3.0+113432_960e9c294b_ubuntu_20_04-cp38-cp38-linux_x86_64.whl - # Enable Poplar SDK (including Poplar and PopART) - source poplar_sdk-ubuntu_20_04-3.3.0+1403-208993bbb7/enable - - python -c "import poptorch" - - # Download the datafiles (Total ~ 10Mb - nothing compared to the libraries) - wget https://storage.googleapis.com/graphium-public/datasets/neurips_2023/Small-dataset/ZINC12k.csv.gz - wget https://storage.googleapis.com/graphium-public/datasets/neurips_2023/Small-dataset/Tox21-7k-12-labels.csv.gz - wget https://storage.googleapis.com/graphium-public/datasets/neurips_2023/Small-dataset/qm9.csv.gz - - - # Install the IPU specific and graphium requirements - pip install -r requirements_ipu.txt - # Install Graphium in dev mode - python -m pip install --no-deps -e . - python3 -m pytest -m 'not skip_ipu' - - - name: Codecov Upload - uses: codecov/codecov-action@v3 - with: - files: ./coverage.xml - flags: unittests - name: codecov-umbrella - fail_ci_if_error: false - verbose: false - env_vars: ${{ matrix.python-version }},${{ matrix.pytorch-version }} diff --git a/.gitignore b/.gitignore index cfd47eabf..6e7be6e74 100644 --- a/.gitignore +++ b/.gitignore @@ -54,15 +54,6 @@ debug/ change_commits.sh graphium/features/test_new_pes.ipynb -# IPU related ignores and profiler outputs -*.a -*.cbor -*.capnp -*.pop -*.popart -*.pop_cache -*.popef -*.pvti* ############ END graphium Custom GitIgnore ############## diff --git a/README.md b/README.md index 53a7172bb..7556b0a65 100644 --- a/README.md +++ b/README.md @@ -13,7 +13,6 @@ [![GitHub Repo stars](https://img.shields.io/github/stars/datamol-io/graphium)](https://github.com/datamol-io/graphium/stargazers) [![GitHub Repo stars](https://img.shields.io/github/forks/datamol-io/graphium)](https://github.com/datamol-io/graphium/network/members) [![test](https://github.com/datamol-io/graphium/actions/workflows/test.yml/badge.svg)](https://github.com/datamol-io/graphium/actions/workflows/test.yml) -[![test-ipu](https://github.com/datamol-io/graphium/actions/workflows/test_ipu.yml/badge.svg)](https://github.com/datamol-io/graphium/actions/workflows/test_ipu.yml) [![release](https://github.com/datamol-io/graphium/actions/workflows/release.yml/badge.svg)](https://github.com/datamol-io/graphium/actions/workflows/release.yml) [![code-check](https://github.com/datamol-io/graphium/actions/workflows/code-check.yml/badge.svg)](https://github.com/datamol-io/graphium/actions/workflows/code-check.yml) [![doc](https://github.com/datamol-io/graphium/actions/workflows/doc.yml/badge.svg)](https://github.com/datamol-io/graphium/actions/workflows/doc.yml) @@ -53,18 +52,6 @@ mamba activate graphium pip install --no-deps -e . ``` -### For IPU developers -```bash -# Install Graphcore's SDK and Graphium dependencies in a new environment called `.graphium_ipu` -./install_ipu.sh .graphium_ipu -``` - -The above step needs to be done once. After that, enable the SDK and the environment as follows: - -```bash -source enable_ipu.sh .graphium_ipu -``` - ## Training a model To learn how to train a model, we invite you to look at the documentation, or the jupyter notebooks available [here](https://github.com/datamol-io/graphium/tree/master/docs/tutorials/model_training). diff --git a/codecov.yml b/codecov.yml index 60a2c37d6..94e8bd149 100644 --- a/codecov.yml +++ b/codecov.yml @@ -16,8 +16,3 @@ component_management: target: auto branches: - "!main" - individual_components: - - component_id: ipu # this is an identifier that should not be changed - name: ipu # this is a display name, and can be changed freely - paths: - - graphium/ipu/** diff --git a/docs/api/graphium.ipu.md b/docs/api/graphium.ipu.md deleted file mode 100644 index 3943e78a4..000000000 --- a/docs/api/graphium.ipu.md +++ /dev/null @@ -1,43 +0,0 @@ -graphium.ipu -==================== -Code for adapting to run on IPU - -=== "Contents" - - * [IPU Dataloader](#ipu-dataloader) - * [IPU Losses](#ipu-losses) - * [IPU Metrics](#ipu-metrics) - * [IPU Simple Lightning](#ipu-simple-lightning) - * [IPU Utils](#ipu-utils) - * [IPU Wrapper](#ipu-wrapper) - * [To Dense Batch](#to-dense-batch) - -## IPU Dataloader ------------- -::: graphium.ipu.ipu_dataloader - - -## IPU Losses ------------- -::: graphium.ipu.ipu_losses - - -## IPU Simple Lightning ------------- -::: graphium.ipu.ipu_simple_lightning - - -## IPU Utils ------------- -::: graphium.ipu.ipu_utils - - -## IPU Wrapper ------------- -::: graphium.ipu.ipu_wrapper - - -## To Dense Batch ------------- -::: graphium.ipu.to_dense_batch - diff --git a/docs/contribute.md b/docs/contribute.md index b4fef7ce0..4f9f71fae 100644 --- a/docs/contribute.md +++ b/docs/contribute.md @@ -18,21 +18,6 @@ mamba activate graphium pip install --no-deps -e . ``` -### For IPU developers - -Download the SDK and use pypi to create your environment: - -```bash -# Install Graphcore's SDK and Graphium dependencies in a new environment called `.graphium_ipu` -./install_ipu.sh .graphium_ipu -``` - -The above step needs to be done once. After that, enable the SDK and the environment as follows: - -```bash -source enable_ipu.sh .graphium_ipu -``` - ## Build the documentation You can build and serve the documentation locally with: diff --git a/docs/design.md b/docs/design.md index 380ac28e4..43594fc98 100644 --- a/docs/design.md +++ b/docs/design.md @@ -42,7 +42,6 @@ Below are a list of directory and their respective documentations: - [data](https://github.com/datamol-io/graphium/blob/main/graphium/data/README.md) - [features](https://github.com/datamol-io/graphium/tree/main/graphium/features/README.md) - finetuning -- [ipu](https://github.com/datamol-io/graphium/tree/main/graphium/ipu/README.md) - [nn](https://github.com/datamol-io/graphium/tree/main/graphium/nn/README.md) - [trainer](https://github.com/datamol-io/graphium/tree/main/graphium/trainer/README.md) - [utils](https://github.com/datamol-io/graphium/tree/main/graphium/features/README.md) @@ -56,7 +55,7 @@ Hence, we use [hydra](https://hydra.cc/docs/intro/) to enable splitting the conf Examples of possibilities include: -- Switching between accelerators (CPU, GPU and IPU) +- Switching between accelerators (CPU, GPU) - Benchmarking different models on the same dataset - Fine-tuning a pre-trained model on a new dataset diff --git a/docs/index.md b/docs/index.md index ef01e54be..32170ee29 100644 --- a/docs/index.md +++ b/docs/index.md @@ -25,17 +25,6 @@ or pip: pip install graphium ``` -### For IPU -```bash -# Install Graphcore's SDK and Graphium dependencies in a new environment called `.graphium_ipu` -./install_ipu.sh .graphium_ipu -``` - -The above step needs to be done once. After that, enable the SDK and the environment as follows: - -```bash -source enable_ipu.sh .graphium_ipu -``` Finally, you will need to install graphium with pip ```bash diff --git a/docs/tutorials/feature_processing/timing_parallel.ipynb b/docs/tutorials/feature_processing/timing_parallel.ipynb index 477251e71..22e3552e5 100644 --- a/docs/tutorials/feature_processing/timing_parallel.ipynb +++ b/docs/tutorials/feature_processing/timing_parallel.ipynb @@ -476,7 +476,7 @@ ], "metadata": { "kernelspec": { - "display_name": "graphium_ipu", + "display_name": "graphium", "language": "python", "name": "python3" }, diff --git a/docs/tutorials/model_training/simple-molecular-model.ipynb b/docs/tutorials/model_training/simple-molecular-model.ipynb index 26a45cfa0..545109ebc 100644 --- a/docs/tutorials/model_training/simple-molecular-model.ipynb +++ b/docs/tutorials/model_training/simple-molecular-model.ipynb @@ -405,9 +405,9 @@ " zinc:\n", " - mae\n", " loss_fun:\n", - " qm9: mae_ipu\n", - " tox21: bce_logits_ipu\n", - " zinc: mae_ipu\n", + " qm9: mae\n", + " tox21: bce_logits\n", + " zinc: mae\n", " random_seed: ${constants.seed}\n", " optim_kwargs:\n", " lr: 4.0e-05\n", @@ -451,28 +451,28 @@ "metrics:\n", " qm9:\n", " - name: mae\n", - " metric: mae_ipu\n", + " metric: mae\n", " target_nan_mask: null\n", " multitask_handling: flatten\n", " threshold_kwargs: null\n", " - name: pearsonr\n", - " metric: pearsonr_ipu\n", + " metric: pearsonr\n", " threshold_kwargs: null\n", " target_nan_mask: null\n", " multitask_handling: mean-per-label\n", " - name: r2_score\n", - " metric: r2_score_ipu\n", + " metric: r2_score\n", " target_nan_mask: null\n", " multitask_handling: mean-per-label\n", " threshold_kwargs: null\n", " tox21:\n", " - name: auroc\n", - " metric: auroc_ipu\n", + " metric: auroc\n", " task: binary\n", " multitask_handling: mean-per-label\n", " threshold_kwargs: null\n", " - name: avpr\n", - " metric: average_precision_ipu\n", + " metric: average_precision\n", " task: binary\n", " multitask_handling: mean-per-label\n", " threshold_kwargs: null\n", @@ -498,17 +498,17 @@ " th_on_target: true\n", " zinc:\n", " - name: mae\n", - " metric: mae_ipu\n", + " metric: mae\n", " target_nan_mask: null\n", " multitask_handling: flatten\n", " threshold_kwargs: null\n", " - name: pearsonr\n", - " metric: pearsonr_ipu\n", + " metric: pearsonr\n", " threshold_kwargs: null\n", " target_nan_mask: null\n", " multitask_handling: mean-per-label\n", " - name: r2_score\n", - " metric: r2_score_ipu\n", + " metric: r2_score\n", " target_nan_mask: null\n", " multitask_handling: mean-per-label\n", " threshold_kwargs: null\n", diff --git a/enable_ipu.sh b/enable_ipu.sh deleted file mode 100755 index 63dd34987..000000000 --- a/enable_ipu.sh +++ /dev/null @@ -1,29 +0,0 @@ -""" --------------------------------------------------------------------------------- -Copyright (c) 2023 Graphcore Limited. -Use of this software is subject to the terms and conditions outlined in the LICENSE file. -Unauthorized modification, distribution, or use is prohibited. Provided 'as is' without -warranties of any kind. - -Graphcore Limited is not liable -for any damages arising from its use. -Refer to the LICENSE file for the full terms and conditions. --------------------------------------------------------------------------------- -""" - - -#!/bin/bash - -# Default location for the virtual environment -default_venv_name=".graphium_ipu" - -# Allow the user to specify the location of their virtual environment -# If not specified, use the default location -venv_name=${1:-$default_venv_name} - -# Constants -sdk_path="${venv_name}/poplar_sdk-ubuntu_20_04-3.3.0+1403-208993bbb7" - -# Source the virtual environment -source ${venv_name}/bin/activate -source ${sdk_path}/enable \ No newline at end of file diff --git a/env.yml b/env.yml index 318d68575..a0f2fdb98 100644 --- a/env.yml +++ b/env.yml @@ -71,6 +71,5 @@ dependencies: - mike >=1.0.0 - pip: - - lightning-graphcore # optional, for using IPUs only - hydra-core>=1.3.2 - hydra-optuna-sweeper diff --git a/expts/configs/config_gps_10M_pcqm4m.yaml b/expts/configs/config_gps_10M_pcqm4m.yaml index 0487a8d04..bd013fd7f 100644 --- a/expts/configs/config_gps_10M_pcqm4m.yaml +++ b/expts/configs/config_gps_10M_pcqm4m.yaml @@ -212,7 +212,7 @@ predictor: metrics_on_progress_bar: homolumo: ["mae", "pearsonr"] loss_fun: - homolumo: mse_ipu + homolumo: mse random_seed: *seed optim_kwargs: lr: 4.e-4 # warmup can be scheduled using torch_scheduler_kwargs @@ -235,12 +235,12 @@ predictor: metrics: homolumo: - name: mae - metric: mae_ipu + metric: mae target_nan_mask: null multitask_handling: flatten threshold_kwargs: null - name: pearsonr - metric: pearsonr_ipu + metric: pearsonr threshold_kwargs: null target_nan_mask: null multitask_handling: mean-per-label diff --git a/expts/configs/config_gps_10M_pcqm4m_mod.yaml b/expts/configs/config_gps_10M_pcqm4m_mod.yaml index 19543302b..d19a97f2a 100644 --- a/expts/configs/config_gps_10M_pcqm4m_mod.yaml +++ b/expts/configs/config_gps_10M_pcqm4m_mod.yaml @@ -223,7 +223,7 @@ predictor: metrics_on_progress_bar: homolumo: ["mae", "pearsonr"] loss_fun: - homolumo: mse_ipu + homolumo: mse random_seed: *seed optim_kwargs: lr: 4.e-4 # warmup can be scheduled using torch_scheduler_kwargs @@ -246,12 +246,12 @@ predictor: metrics: homolumo: - name: mae - metric: mae_ipu + metric: mae target_nan_mask: null multitask_handling: flatten threshold_kwargs: null - name: pearsonr - metric: pearsonr_ipu + metric: pearsonr threshold_kwargs: null target_nan_mask: null multitask_handling: mean-per-label diff --git a/expts/configs/config_mpnn_10M_b3lyp.yaml b/expts/configs/config_mpnn_10M_b3lyp.yaml index 424dbcd71..9bd43f78b 100644 --- a/expts/configs/config_mpnn_10M_b3lyp.yaml +++ b/expts/configs/config_mpnn_10M_b3lyp.yaml @@ -244,8 +244,8 @@ predictor: alphagap: ["mae", "pearsonr"] betagap: ["mae", "pearsonr"] loss_fun: - alphagap: mse_ipu - betagap: mse_ipu + alphagap: mse + betagap: mse random_seed: *seed optim_kwargs: lr: 4.e-4 # warmup can be scheduled using torch_scheduler_kwargs @@ -268,12 +268,12 @@ predictor: metrics: alphagap: &alpha_metrics - name: mae - metric: mae_ipu + metric: mae target_nan_mask: null multitask_handling: flatten threshold_kwargs: null - name: pearsonr - metric: pearsonr_ipu + metric: pearsonr threshold_kwargs: null target_nan_mask: null multitask_handling: mean-per-label diff --git a/expts/configs/config_mpnn_pcqm4m.yaml b/expts/configs/config_mpnn_pcqm4m.yaml index 70972d370..358ad6c05 100644 --- a/expts/configs/config_mpnn_pcqm4m.yaml +++ b/expts/configs/config_mpnn_pcqm4m.yaml @@ -166,7 +166,7 @@ predictor: metrics_on_progress_bar: homolumo: ["mae", "pearsonr"] loss_fun: - homolumo: mse_ipu + homolumo: mse random_seed: *seed optim_kwargs: lr: 4.e-4 # warmup can be scheduled using torch_scheduler_kwargs @@ -189,12 +189,12 @@ predictor: metrics: homolumo: - name: mae - metric: mae_ipu + metric: mae target_nan_mask: null multitask_handling: flatten threshold_kwargs: null - name: pearsonr - metric: pearsonr_ipu + metric: pearsonr threshold_kwargs: null target_nan_mask: null multitask_handling: mean-per-label diff --git a/expts/dataset_benchmark.py b/expts/dataset_benchmark.py index e8bf2e24d..948a03688 100644 --- a/expts/dataset_benchmark.py +++ b/expts/dataset_benchmark.py @@ -21,7 +21,6 @@ # CONFIG_FILE = "expts/neurips2023_configs/debug/config_large_gcn_debug.yaml" CONFIG_FILE = "expts/neurips2023_configs/config_large_gcn.yaml" # CONFIG_FILE = "expts/configs/config_pcqmv2_mpnn.yaml" -# CONFIG_FILE = "expts/configs/config_ipu_qm9.yaml" def benchmark(fn, *args, message="", log2wandb=False, **kwargs): diff --git a/expts/hydra-configs/accelerator/ipu.yaml b/expts/hydra-configs/accelerator/ipu.yaml deleted file mode 100644 index 3e6fb4429..000000000 --- a/expts/hydra-configs/accelerator/ipu.yaml +++ /dev/null @@ -1,18 +0,0 @@ -type: ipu -ipu_config: - - deviceIterations(60) # IPU would require large batches to be ready for the model. - # 60 for PCQM4mv2 - # 30 for largemix - - replicationFactor(16) - # - enableProfiling("graph_analyser") # The folder where the profile will be stored - # - enableExecutableCaching("pop_compiler_cache") - - TensorLocations.numIOTiles(128) - - _Popart.set("defaultBufferingDepth", 96) - - Precision.enableStochasticRounding(True) - -ipu_inference_config: - # set device iteration and replication factor to 1 during inference - # gradient accumulation was set to 1 in the code - - deviceIterations(1) - - replicationFactor(1) - - Precision.enableStochasticRounding(False) diff --git a/expts/hydra-configs/accelerator/ipu_pipeline.yaml b/expts/hydra-configs/accelerator/ipu_pipeline.yaml deleted file mode 100644 index 996218646..000000000 --- a/expts/hydra-configs/accelerator/ipu_pipeline.yaml +++ /dev/null @@ -1,22 +0,0 @@ -type: ipu -ipu_config: - - deviceIterations(60) # IPU would require large batches to be ready for the model. - # 60 for PCQM4mv2 - # 30 for largemix - - replicationFactor(4) - # - enableProfiling("graph_analyser") # The folder where the profile will be stored - # - enableExecutableCaching("pop_compiler_cache") - - TensorLocations.numIOTiles(128) - - _Popart.set("defaultBufferingDepth", 96) - - Precision.enableStochasticRounding(True) - -ipu_inference_config: - # set device iteration and replication factor to 1 during inference - # gradient accumulation was set to 1 in the code - - deviceIterations(60) - - replicationFactor(1) - - Precision.enableStochasticRounding(False) - -accelerator_kwargs: - _accelerator: "ipu" - gnn_layers_per_ipu: [4, 4, 4, 4] \ No newline at end of file diff --git a/expts/hydra-configs/tasks/loss_metrics_datamodule/l1000_mcf7.yaml b/expts/hydra-configs/tasks/loss_metrics_datamodule/l1000_mcf7.yaml index 43933a7fa..53c753402 100644 --- a/expts/hydra-configs/tasks/loss_metrics_datamodule/l1000_mcf7.yaml +++ b/expts/hydra-configs/tasks/loss_metrics_datamodule/l1000_mcf7.yaml @@ -7,7 +7,7 @@ predictor: l1000_mcf7: [] loss_fun: l1000_mcf7: - name: hybrid_ce_ipu + name: hybrid_ce n_brackets: 3 alpha: 0.5 diff --git a/expts/hydra-configs/tasks/loss_metrics_datamodule/l1000_vcap.yaml b/expts/hydra-configs/tasks/loss_metrics_datamodule/l1000_vcap.yaml index 27b89d862..e385bf23e 100644 --- a/expts/hydra-configs/tasks/loss_metrics_datamodule/l1000_vcap.yaml +++ b/expts/hydra-configs/tasks/loss_metrics_datamodule/l1000_vcap.yaml @@ -7,7 +7,7 @@ predictor: l1000_vcap: [] loss_fun: l1000_vcap: - name: hybrid_ce_ipu + name: hybrid_ce n_brackets: 3 alpha: 0.5 diff --git a/expts/hydra-configs/tasks/loss_metrics_datamodule/largemix.yaml b/expts/hydra-configs/tasks/loss_metrics_datamodule/largemix.yaml index 921960cd1..f307f0441 100644 --- a/expts/hydra-configs/tasks/loss_metrics_datamodule/largemix.yaml +++ b/expts/hydra-configs/tasks/loss_metrics_datamodule/largemix.yaml @@ -15,16 +15,16 @@ predictor: pcqm4m_n4: [] loss_fun: l1000_vcap: - name: hybrid_ce_ipu + name: hybrid_ce n_brackets: 3 alpha: 0.5 l1000_mcf7: - name: hybrid_ce_ipu + name: hybrid_ce n_brackets: 3 alpha: ${predictor.loss_fun.l1000_vcap.alpha} - pcba_1328: bce_logits_ipu - pcqm4m_g25: mae_ipu - pcqm4m_n4: mae_ipu + pcba_1328: bce_logits + pcqm4m_g25: mae + pcqm4m_n4: mae metrics: l1000_vcap: &classif_metrics @@ -48,7 +48,7 @@ metrics: threshold_kwargs: null l1000_mcf7: *classif_metrics pcba_1328: - # use auroc and averageprecision (non_ipu version) so tha nans are handled correctly + - name: auroc metric: auroc task: binary @@ -63,17 +63,17 @@ metrics: threshold_kwargs: null pcqm4m_g25: &pcqm_metrics - name: mae - metric: mae_ipu + metric: mae target_nan_mask: null multitask_handling: mean-per-label threshold_kwargs: null - name: pearsonr - metric: pearsonr_ipu + metric: pearsonr threshold_kwargs: null target_nan_mask: null multitask_handling: mean-per-label - name: r2 - metric: r2_score_ipu + metric: r2_score threshold_kwargs: null target_nan_mask: null multitask_handling: mean-per-label diff --git a/expts/hydra-configs/tasks/loss_metrics_datamodule/pcba_1328.yaml b/expts/hydra-configs/tasks/loss_metrics_datamodule/pcba_1328.yaml index adc3321a0..72f9fba35 100644 --- a/expts/hydra-configs/tasks/loss_metrics_datamodule/pcba_1328.yaml +++ b/expts/hydra-configs/tasks/loss_metrics_datamodule/pcba_1328.yaml @@ -6,11 +6,11 @@ predictor: metrics_on_training_set: pcba_1328: [] loss_fun: - pcba_1328: bce_logits_ipu + pcba_1328: bce_logits metrics: pcba_1328: - # use auroc and averageprecision (non_ipu version) so tha nans are handled correctly + - name: auroc metric: auroc task: binary diff --git a/expts/hydra-configs/tasks/loss_metrics_datamodule/pcqm4m.yaml b/expts/hydra-configs/tasks/loss_metrics_datamodule/pcqm4m.yaml index 8dcf2c0c4..8eb878b62 100644 --- a/expts/hydra-configs/tasks/loss_metrics_datamodule/pcqm4m.yaml +++ b/expts/hydra-configs/tasks/loss_metrics_datamodule/pcqm4m.yaml @@ -7,18 +7,18 @@ predictor: metrics_on_training_set: homolumo: ["pearsonr"] loss_fun: - homolumo: mae_ipu + homolumo: mae # Task-specific metrics: homolumo: - name: mae - metric: mae_ipu + metric: mae target_nan_mask: null multitask_handling: mean-per-label threshold_kwargs: null - name: pearsonr - metric: pearsonr_ipu + metric: pearsonr threshold_kwargs: null target_nan_mask: null multitask_handling: mean-per-label diff --git a/expts/hydra-configs/tasks/loss_metrics_datamodule/pcqm4m_g25.yaml b/expts/hydra-configs/tasks/loss_metrics_datamodule/pcqm4m_g25.yaml index 047701f6e..8247b4c47 100644 --- a/expts/hydra-configs/tasks/loss_metrics_datamodule/pcqm4m_g25.yaml +++ b/expts/hydra-configs/tasks/loss_metrics_datamodule/pcqm4m_g25.yaml @@ -6,22 +6,22 @@ predictor: metrics_on_training_set: pcqm4m_g25: [] loss_fun: - pcqm4m_g25: mae_ipu + pcqm4m_g25: mae metrics: pcqm4m_g25: - name: mae - metric: mae_ipu + metric: mae target_nan_mask: null multitask_handling: mean-per-label threshold_kwargs: null - name: pearsonr - metric: pearsonr_ipu + metric: pearsonr threshold_kwargs: null target_nan_mask: null multitask_handling: mean-per-label - name: r2 - metric: r2_score_ipu + metric: r2_score threshold_kwargs: null target_nan_mask: null multitask_handling: mean-per-label diff --git a/expts/hydra-configs/tasks/loss_metrics_datamodule/pcqm4m_n4.yaml b/expts/hydra-configs/tasks/loss_metrics_datamodule/pcqm4m_n4.yaml index 494843464..2ef471be4 100644 --- a/expts/hydra-configs/tasks/loss_metrics_datamodule/pcqm4m_n4.yaml +++ b/expts/hydra-configs/tasks/loss_metrics_datamodule/pcqm4m_n4.yaml @@ -6,22 +6,22 @@ predictor: metrics_on_training_set: pcqm4m_n4: [] loss_fun: - pcqm4m_n4: mae_ipu + pcqm4m_n4: mae metrics: pcqm4m_n4: - name: mae - metric: mae_ipu + metric: mae target_nan_mask: null multitask_handling: mean-per-label threshold_kwargs: null - name: pearsonr - metric: pearsonr_ipu + metric: pearsonr threshold_kwargs: null target_nan_mask: null multitask_handling: mean-per-label - name: r2 - metric: r2_score_ipu + metric: r2_score threshold_kwargs: null target_nan_mask: null multitask_handling: mean-per-label diff --git a/expts/hydra-configs/tasks/loss_metrics_datamodule/toymix.yaml b/expts/hydra-configs/tasks/loss_metrics_datamodule/toymix.yaml index ceb4e6d69..bf2e044b4 100644 --- a/expts/hydra-configs/tasks/loss_metrics_datamodule/toymix.yaml +++ b/expts/hydra-configs/tasks/loss_metrics_datamodule/toymix.yaml @@ -8,9 +8,9 @@ predictor: tox21: ["auroc"] zinc: ["mae"] loss_fun: - qm9: mae_ipu - tox21: bce_logits_ipu - zinc: mae_ipu + qm9: mae + tox21: bce_logits + zinc: mae metrics: qm9: &qm9_metrics diff --git a/expts/neurips2023_configs/base_config/large.yaml b/expts/neurips2023_configs/base_config/large.yaml index 18503527c..938c958da 100644 --- a/expts/neurips2023_configs/base_config/large.yaml +++ b/expts/neurips2023_configs/base_config/large.yaml @@ -325,16 +325,16 @@ predictor: pcqm4m_n4: [] loss_fun: l1000_vcap: - name: hybrid_ce_ipu + name: hybrid_ce n_brackets: 3 alpha: 0.5 l1000_mcf7: - name: hybrid_ce_ipu + name: hybrid_ce n_brackets: 3 alpha: ${predictor.loss_fun.l1000_vcap.alpha} - pcba_1328: bce_logits_ipu - pcqm4m_g25: mae_ipu - pcqm4m_n4: mae_ipu + pcba_1328: bce_logits + pcqm4m_g25: mae + pcqm4m_n4: mae random_seed: ${constants.seed} optim_kwargs: lr: 1.e-4 # warmup can be scheduled using torch_scheduler_kwargs @@ -374,7 +374,7 @@ metrics: threshold_kwargs: null l1000_mcf7: *classif_metrics pcba_1328: - # use auroc and averageprecision (non_ipu version) so tha nans are handled correctly + - name: auroc metric: auroc task: binary @@ -389,17 +389,17 @@ metrics: threshold_kwargs: null pcqm4m_g25: &pcqm_metrics - name: mae - metric: mae_ipu + metric: mae target_nan_mask: null multitask_handling: mean-per-label threshold_kwargs: null - name: pearsonr - metric: pearsonr_ipu + metric: pearsonr threshold_kwargs: null target_nan_mask: null multitask_handling: mean-per-label - name: r2 - metric: r2_score_ipu + metric: r2_score threshold_kwargs: null target_nan_mask: null multitask_handling: mean-per-label diff --git a/expts/neurips2023_configs/base_config/large_pcba.yaml b/expts/neurips2023_configs/base_config/large_pcba.yaml index a1e3d108f..12554be9b 100644 --- a/expts/neurips2023_configs/base_config/large_pcba.yaml +++ b/expts/neurips2023_configs/base_config/large_pcba.yaml @@ -324,16 +324,16 @@ predictor: #pcqm4m_n4: [] loss_fun: # l1000_vcap: - # name: hybrid_ce_ipu + # name: hybrid_ce # n_brackets: 3 # alpha: 0.5 # l1000_mcf7: - # name: hybrid_ce_ipu + # name: hybrid_ce # n_brackets: 3 # alpha: ${predictor.loss_fun.l1000_vcap.alpha} - pcba_1328: bce_logits_ipu - # pcqm4m_g25: mae_ipu - #pcqm4m_n4: mae_ipu + pcba_1328: bce_logits + # pcqm4m_g25: mae + #pcqm4m_n4: mae random_seed: ${constants.seed} optim_kwargs: lr: 1.e-4 # warmup can be scheduled using torch_scheduler_kwargs @@ -373,7 +373,7 @@ metrics: # threshold_kwargs: null # l1000_mcf7: *classif_metrics pcba_1328: - # use auroc and averageprecision (non_ipu version) so tha nans are handled correctly + - name: auroc metric: auroc task: binary @@ -388,17 +388,17 @@ metrics: threshold_kwargs: null # pcqm4m_n4: &pcqm_metrics #- name: mae - #metric: mae_ipu + #metric: mae #target_nan_mask: null #multitask_handling: mean-per-label #threshold_kwargs: null #- name: pearsonr - #metric: pearsonr_ipu + #metric: pearsonr #threshold_kwargs: null #target_nan_mask: null #multitask_handling: mean-per-label #- name: r2 - #metric: r2_score_ipu + #metric: r2_score #threshold_kwargs: null #target_nan_mask: null #multitask_handling: mean-per-label diff --git a/expts/neurips2023_configs/base_config/large_pcqm_g25.yaml b/expts/neurips2023_configs/base_config/large_pcqm_g25.yaml index b71c43cf2..0c0d3aef7 100644 --- a/expts/neurips2023_configs/base_config/large_pcqm_g25.yaml +++ b/expts/neurips2023_configs/base_config/large_pcqm_g25.yaml @@ -324,16 +324,16 @@ predictor: # pcqm4m_n4: [] loss_fun: # l1000_vcap: - # name: hybrid_ce_ipu + # name: hybrid_ce # n_brackets: 3 # alpha: 0.5 # l1000_mcf7: - # name: hybrid_ce_ipu + # name: hybrid_ce # n_brackets: 3 # alpha: ${predictor.loss_fun.l1000_vcap.alpha} - # pcba_1328: bce_logits_ipu - pcqm4m_g25: mae_ipu - # pcqm4m_n4: mae_ipu + # pcba_1328: bce_logits + pcqm4m_g25: mae + # pcqm4m_n4: mae random_seed: ${constants.seed} optim_kwargs: lr: 1.e-4 # warmup can be scheduled using torch_scheduler_kwargs @@ -373,7 +373,7 @@ metrics: # threshold_kwargs: null # l1000_mcf7: *classif_metrics # pcba_1328: - # # use auroc and averageprecision (non_ipu version) so tha nans are handled correctly + # # - name: auroc # metric: auroc # task: binary @@ -388,17 +388,17 @@ metrics: # threshold_kwargs: null pcqm4m_g25: &pcqm_metrics - name: mae - metric: mae_ipu + metric: mae target_nan_mask: null multitask_handling: mean-per-label threshold_kwargs: null - name: pearsonr - metric: pearsonr_ipu + metric: pearsonr threshold_kwargs: null target_nan_mask: null multitask_handling: mean-per-label - name: r2 - metric: r2_score_ipu + metric: r2_score threshold_kwargs: null target_nan_mask: null multitask_handling: mean-per-label diff --git a/expts/neurips2023_configs/base_config/large_pcqm_n4.yaml b/expts/neurips2023_configs/base_config/large_pcqm_n4.yaml index 464e49581..6eb78a2dd 100644 --- a/expts/neurips2023_configs/base_config/large_pcqm_n4.yaml +++ b/expts/neurips2023_configs/base_config/large_pcqm_n4.yaml @@ -324,16 +324,16 @@ predictor: pcqm4m_n4: [] loss_fun: # l1000_vcap: - # name: hybrid_ce_ipu + # name: hybrid_ce # n_brackets: 3 # alpha: 0.5 # l1000_mcf7: - # name: hybrid_ce_ipu + # name: hybrid_ce # n_brackets: 3 # alpha: ${predictor.loss_fun.l1000_vcap.alpha} - # pcba_1328: bce_logits_ipu - # pcqm4m_g25: mae_ipu - pcqm4m_n4: mae_ipu + # pcba_1328: bce_logits + # pcqm4m_g25: mae + pcqm4m_n4: mae random_seed: ${constants.seed} optim_kwargs: lr: 1.e-4 # warmup can be scheduled using torch_scheduler_kwargs @@ -373,7 +373,7 @@ metrics: # threshold_kwargs: null # l1000_mcf7: *classif_metrics # pcba_1328: - # # use auroc and averageprecision (non_ipu version) so tha nans are handled correctly + # # - name: auroc # metric: auroc # task: binary @@ -388,17 +388,17 @@ metrics: # threshold_kwargs: null pcqm4m_n4: &pcqm_metrics - name: mae - metric: mae_ipu + metric: mae target_nan_mask: null multitask_handling: mean-per-label threshold_kwargs: null - name: pearsonr - metric: pearsonr_ipu + metric: pearsonr threshold_kwargs: null target_nan_mask: null multitask_handling: mean-per-label - name: r2 - metric: r2_score_ipu + metric: r2_score threshold_kwargs: null target_nan_mask: null multitask_handling: mean-per-label diff --git a/expts/neurips2023_configs/base_config/small.yaml b/expts/neurips2023_configs/base_config/small.yaml index 4914fdda3..80099b9fe 100644 --- a/expts/neurips2023_configs/base_config/small.yaml +++ b/expts/neurips2023_configs/base_config/small.yaml @@ -247,9 +247,9 @@ predictor: tox21: ["auroc"] zinc: ["mae"] loss_fun: - qm9: mae_ipu - tox21: bce_ipu - zinc: mae_ipu + qm9: mae + tox21: bce + zinc: mae random_seed: *seed optim_kwargs: lr: 4.e-5 # warmup can be scheduled using torch_scheduler_kwargs @@ -270,28 +270,28 @@ predictor: metrics: qm9: &qm9_metrics - name: mae - metric: mae_ipu + metric: mae target_nan_mask: null multitask_handling: flatten threshold_kwargs: null - name: pearsonr - metric: pearsonr_ipu + metric: pearsonr threshold_kwargs: null target_nan_mask: null multitask_handling: mean-per-label - name: r2_score - metric: r2_score_ipu + metric: r2_score target_nan_mask: null multitask_handling: mean-per-label threshold_kwargs: null tox21: - name: auroc - metric: auroc_ipu + metric: auroc task: binary multitask_handling: mean-per-label threshold_kwargs: null - name: avpr - metric: average_precision_ipu + metric: average_precision task: binary multitask_handling: mean-per-label threshold_kwargs: null diff --git a/expts/neurips2023_configs/baseline/config_small_gcn_baseline.yaml b/expts/neurips2023_configs/baseline/config_small_gcn_baseline.yaml index e107fa386..2405ec4aa 100644 --- a/expts/neurips2023_configs/baseline/config_small_gcn_baseline.yaml +++ b/expts/neurips2023_configs/baseline/config_small_gcn_baseline.yaml @@ -247,9 +247,9 @@ predictor: tox21: ["auroc"] zinc: ["mae"] loss_fun: - qm9: mae_ipu - tox21: bce_ipu - zinc: mae_ipu + qm9: mae + tox21: bce + zinc: mae random_seed: *seed optim_kwargs: lr: 1.e-4 # warmup can be scheduled using torch_scheduler_kwargs @@ -270,28 +270,28 @@ predictor: metrics: qm9: &qm9_metrics - name: mae - metric: mae_ipu + metric: mae target_nan_mask: null multitask_handling: flatten threshold_kwargs: null - name: pearsonr - metric: pearsonr_ipu + metric: pearsonr threshold_kwargs: null target_nan_mask: null multitask_handling: mean-per-label - name: r2_score - metric: r2_score_ipu + metric: r2_score target_nan_mask: null multitask_handling: mean-per-label threshold_kwargs: null tox21: - name: auroc - metric: auroc_ipu + metric: auroc task: binary multitask_handling: mean-per-label threshold_kwargs: null - name: avpr - metric: average_precision_ipu + metric: average_precision task: binary multitask_handling: mean-per-label threshold_kwargs: null diff --git a/expts/neurips2023_configs/config_large_gcn_gpu.yaml b/expts/neurips2023_configs/config_large_gcn_gpu.yaml index 31a02e22c..6c5be45fe 100644 --- a/expts/neurips2023_configs/config_large_gcn_gpu.yaml +++ b/expts/neurips2023_configs/config_large_gcn_gpu.yaml @@ -59,7 +59,7 @@ architecture: predictor: loss_fun: - pcba_1328: bce_logits_ipu + pcba_1328: bce_logits torch_scheduler_kwargs: max_num_epochs: &max_epochs 20 diff --git a/expts/neurips2023_configs/config_luis_jama.yaml b/expts/neurips2023_configs/config_luis_jama.yaml index e0549e0f0..f71ab65f7 100644 --- a/expts/neurips2023_configs/config_luis_jama.yaml +++ b/expts/neurips2023_configs/config_luis_jama.yaml @@ -251,8 +251,8 @@ predictor: pcqm20k_g13: [] pcqm20k_n4: [] loss_fun: - pcqm20k_g13: mae_ipu - pcqm20k_n4: mae_ipu + pcqm20k_g13: mae + pcqm20k_n4: mae random_seed: *seed optim_kwargs: lr: 4.e-5 # warmup can be scheduled using torch_scheduler_kwargs @@ -273,12 +273,12 @@ predictor: metrics: pcqm20k_g13: &pcqm_metrics - name: mae - metric: mae_ipu + metric: mae target_nan_mask: null multitask_handling: flatten threshold_kwargs: null - name: pearsonr - metric: pearsonr_ipu + metric: pearsonr threshold_kwargs: null target_nan_mask: null multitask_handling: mean-per-label diff --git a/expts/neurips2023_configs/config_small_gcn_gpu.yaml b/expts/neurips2023_configs/config_small_gcn_gpu.yaml index ccad70af6..03bca8b64 100644 --- a/expts/neurips2023_configs/config_small_gcn_gpu.yaml +++ b/expts/neurips2023_configs/config_small_gcn_gpu.yaml @@ -51,7 +51,7 @@ architecture: predictor: loss_fun: - tox21: bce_logits_ipu + tox21: bce_logits torch_scheduler_kwargs: max_num_epochs: &max_epochs 300 diff --git a/expts/neurips2023_configs/single_task_gcn/config_large_gcn_mcf7.yaml b/expts/neurips2023_configs/single_task_gcn/config_large_gcn_mcf7.yaml index 1aba37eb4..197934a09 100644 --- a/expts/neurips2023_configs/single_task_gcn/config_large_gcn_mcf7.yaml +++ b/expts/neurips2023_configs/single_task_gcn/config_large_gcn_mcf7.yaml @@ -196,7 +196,7 @@ predictor: l1000_mcf7: [] loss_fun: l1000_mcf7: - name: hybrid_ce_ipu + name: hybrid_ce n_brackets: 5 random_seed: *seed optim_kwargs: @@ -218,13 +218,13 @@ predictor: metrics: l1000_mcf7: &classif_metrics - name: auroc - metric: auroc_ipu + metric: auroc num_classes: 5 task: multiclass multitask_handling: mean-per-label threshold_kwargs: null - name: avpr - metric: average_precision_ipu + metric: average_precision num_classes: 5 task: multiclass target_to_int: True diff --git a/expts/neurips2023_configs/single_task_gcn/config_large_gcn_pcba.yaml b/expts/neurips2023_configs/single_task_gcn/config_large_gcn_pcba.yaml index 77837d750..ac045310d 100644 --- a/expts/neurips2023_configs/single_task_gcn/config_large_gcn_pcba.yaml +++ b/expts/neurips2023_configs/single_task_gcn/config_large_gcn_pcba.yaml @@ -193,7 +193,7 @@ predictor: metrics_on_training_set: pcba_1328: [] loss_fun: - pcba_1328: bce_ipu + pcba_1328: bce random_seed: *seed optim_kwargs: lr: 1.e-4 # warmup can be scheduled using torch_scheduler_kwargs @@ -214,12 +214,12 @@ predictor: metrics: pcba_1328: - name: auroc - metric: auroc_ipu + metric: auroc task: binary multitask_handling: mean-per-label threshold_kwargs: null - name: avpr - metric: average_precision_ipu + metric: average_precision task: binary multitask_handling: mean-per-label threshold_kwargs: null diff --git a/expts/neurips2023_configs/single_task_gcn/config_large_gcn_vcap.yaml b/expts/neurips2023_configs/single_task_gcn/config_large_gcn_vcap.yaml index 1c021a559..5f2651fa4 100644 --- a/expts/neurips2023_configs/single_task_gcn/config_large_gcn_vcap.yaml +++ b/expts/neurips2023_configs/single_task_gcn/config_large_gcn_vcap.yaml @@ -194,7 +194,7 @@ predictor: l1000_vcap: [] loss_fun: l1000_vcap: - name: hybrid_ce_ipu + name: hybrid_ce n_brackets: 5 random_seed: *seed optim_kwargs: @@ -216,13 +216,13 @@ predictor: metrics: l1000_vcap: &classif_metrics - name: auroc - metric: auroc_ipu + metric: auroc num_classes: 5 task: multiclass multitask_handling: mean-per-label threshold_kwargs: null - name: avpr - metric: average_precision_ipu + metric: average_precision num_classes: 5 task: multiclass target_to_int: True diff --git a/expts/neurips2023_configs/single_task_gin/config_large_gin_g25.yaml b/expts/neurips2023_configs/single_task_gin/config_large_gin_g25.yaml index bd09385f5..d4d7d9553 100644 --- a/expts/neurips2023_configs/single_task_gin/config_large_gin_g25.yaml +++ b/expts/neurips2023_configs/single_task_gin/config_large_gin_g25.yaml @@ -196,7 +196,7 @@ predictor: metrics_on_training_set: pcqm4m_g25: [] loss_fun: - pcqm4m_g25: mae_ipu + pcqm4m_g25: mae random_seed: *seed optim_kwargs: lr: 1.e-4 # warmup can be scheduled using torch_scheduler_kwargs @@ -217,17 +217,17 @@ predictor: metrics: pcqm4m_g25: &pcqm_metrics - name: mae - metric: mae_ipu + metric: mae target_nan_mask: null multitask_handling: flatten threshold_kwargs: null - name: pearsonr - metric: pearsonr_ipu + metric: pearsonr threshold_kwargs: null target_nan_mask: null multitask_handling: mean-per-label - name: r2 - metric: r2_score_ipu + metric: r2_score threshold_kwargs: null target_nan_mask: null multitask_handling: mean-per-label diff --git a/expts/neurips2023_configs/single_task_gin/config_large_gin_mcf7.yaml b/expts/neurips2023_configs/single_task_gin/config_large_gin_mcf7.yaml index 5abf9790d..781b888ea 100644 --- a/expts/neurips2023_configs/single_task_gin/config_large_gin_mcf7.yaml +++ b/expts/neurips2023_configs/single_task_gin/config_large_gin_mcf7.yaml @@ -196,7 +196,7 @@ predictor: l1000_mcf7: [] loss_fun: l1000_mcf7: - name: hybrid_ce_ipu + name: hybrid_ce n_brackets: 5 random_seed: *seed optim_kwargs: @@ -218,13 +218,13 @@ predictor: metrics: l1000_mcf7: &classif_metrics - name: auroc - metric: auroc_ipu + metric: auroc num_classes: 5 task: multiclass multitask_handling: mean-per-label threshold_kwargs: null - name: avpr - metric: average_precision_ipu + metric: average_precision num_classes: 5 task: multiclass target_to_int: True diff --git a/expts/neurips2023_configs/single_task_gin/config_large_gin_n4.yaml b/expts/neurips2023_configs/single_task_gin/config_large_gin_n4.yaml index 834967498..bebfdc424 100644 --- a/expts/neurips2023_configs/single_task_gin/config_large_gin_n4.yaml +++ b/expts/neurips2023_configs/single_task_gin/config_large_gin_n4.yaml @@ -197,7 +197,7 @@ predictor: metrics_on_training_set: pcqm4m_n4: [] loss_fun: - pcqm4m_n4: mae_ipu + pcqm4m_n4: mae random_seed: *seed optim_kwargs: lr: 1.e-4 # warmup can be scheduled using torch_scheduler_kwargs @@ -218,17 +218,17 @@ predictor: metrics: pcqm4m_n4: &pcqm_metrics - name: mae - metric: mae_ipu + metric: mae target_nan_mask: null multitask_handling: flatten threshold_kwargs: null - name: pearsonr - metric: pearsonr_ipu + metric: pearsonr threshold_kwargs: null target_nan_mask: null multitask_handling: mean-per-label - name: r2 - metric: r2_score_ipu + metric: r2_score threshold_kwargs: null target_nan_mask: null multitask_handling: mean-per-label diff --git a/expts/neurips2023_configs/single_task_gin/config_large_gin_pcba.yaml b/expts/neurips2023_configs/single_task_gin/config_large_gin_pcba.yaml index f390a7a2b..b69bfc4c3 100644 --- a/expts/neurips2023_configs/single_task_gin/config_large_gin_pcba.yaml +++ b/expts/neurips2023_configs/single_task_gin/config_large_gin_pcba.yaml @@ -193,7 +193,7 @@ predictor: metrics_on_training_set: pcba_1328: [] loss_fun: - pcba_1328: bce_ipu + pcba_1328: bce random_seed: *seed optim_kwargs: lr: 1.e-4 # warmup can be scheduled using torch_scheduler_kwargs @@ -214,12 +214,12 @@ predictor: metrics: pcba_1328: - name: auroc - metric: auroc_ipu + metric: auroc task: binary multitask_handling: mean-per-label threshold_kwargs: null - name: avpr - metric: average_precision_ipu + metric: average_precision task: binary multitask_handling: mean-per-label threshold_kwargs: null diff --git a/expts/neurips2023_configs/single_task_gin/config_large_gin_pcq.yaml b/expts/neurips2023_configs/single_task_gin/config_large_gin_pcq.yaml index d13a757f3..fe320e443 100644 --- a/expts/neurips2023_configs/single_task_gin/config_large_gin_pcq.yaml +++ b/expts/neurips2023_configs/single_task_gin/config_large_gin_pcq.yaml @@ -235,8 +235,8 @@ predictor: pcqm4m_g25: [] pcqm4m_n4: [] loss_fun: - pcqm4m_g25: mae_ipu - pcqm4m_n4: mae_ipu + pcqm4m_g25: mae + pcqm4m_n4: mae random_seed: *seed optim_kwargs: lr: 1.e-4 # warmup can be scheduled using torch_scheduler_kwargs @@ -257,17 +257,17 @@ predictor: metrics: pcqm4m_g25: &pcqm_metrics - name: mae - metric: mae_ipu + metric: mae target_nan_mask: null multitask_handling: flatten threshold_kwargs: null - name: pearsonr - metric: pearsonr_ipu + metric: pearsonr threshold_kwargs: null target_nan_mask: null multitask_handling: mean-per-label - name: r2 - metric: r2_score_ipu + metric: r2_score threshold_kwargs: null target_nan_mask: null multitask_handling: mean-per-label diff --git a/expts/neurips2023_configs/single_task_gin/config_large_gin_vcap.yaml b/expts/neurips2023_configs/single_task_gin/config_large_gin_vcap.yaml index 75f802926..539123dec 100644 --- a/expts/neurips2023_configs/single_task_gin/config_large_gin_vcap.yaml +++ b/expts/neurips2023_configs/single_task_gin/config_large_gin_vcap.yaml @@ -194,7 +194,7 @@ predictor: l1000_vcap: [] loss_fun: l1000_vcap: - name: hybrid_ce_ipu + name: hybrid_ce n_brackets: 5 random_seed: *seed optim_kwargs: @@ -216,13 +216,13 @@ predictor: metrics: l1000_vcap: &classif_metrics - name: auroc - metric: auroc_ipu + metric: auroc num_classes: 5 task: multiclass multitask_handling: mean-per-label threshold_kwargs: null - name: avpr - metric: average_precision_ipu + metric: average_precision num_classes: 5 task: multiclass target_to_int: True diff --git a/expts/neurips2023_configs/single_task_gine/config_large_gine_g25.yaml b/expts/neurips2023_configs/single_task_gine/config_large_gine_g25.yaml index 02679153c..17f78ea4b 100644 --- a/expts/neurips2023_configs/single_task_gine/config_large_gine_g25.yaml +++ b/expts/neurips2023_configs/single_task_gine/config_large_gine_g25.yaml @@ -204,7 +204,7 @@ predictor: metrics_on_training_set: pcqm4m_g25: [] loss_fun: - pcqm4m_g25: mae_ipu + pcqm4m_g25: mae random_seed: *seed optim_kwargs: lr: 1.e-4 # warmup can be scheduled using torch_scheduler_kwargs @@ -225,17 +225,17 @@ predictor: metrics: pcqm4m_g25: &pcqm_metrics - name: mae - metric: mae_ipu + metric: mae target_nan_mask: null multitask_handling: flatten threshold_kwargs: null - name: pearsonr - metric: pearsonr_ipu + metric: pearsonr threshold_kwargs: null target_nan_mask: null multitask_handling: mean-per-label - name: r2 - metric: r2_score_ipu + metric: r2_score threshold_kwargs: null target_nan_mask: null multitask_handling: mean-per-label diff --git a/expts/neurips2023_configs/single_task_gine/config_large_gine_mcf7.yaml b/expts/neurips2023_configs/single_task_gine/config_large_gine_mcf7.yaml index 0506dbfea..83b79bf00 100644 --- a/expts/neurips2023_configs/single_task_gine/config_large_gine_mcf7.yaml +++ b/expts/neurips2023_configs/single_task_gine/config_large_gine_mcf7.yaml @@ -204,7 +204,7 @@ predictor: l1000_mcf7: [] loss_fun: l1000_mcf7: - name: hybrid_ce_ipu + name: hybrid_ce n_brackets: 5 random_seed: *seed optim_kwargs: @@ -226,13 +226,13 @@ predictor: metrics: l1000_mcf7: &classif_metrics - name: auroc - metric: auroc_ipu + metric: auroc num_classes: 5 task: multiclass multitask_handling: mean-per-label threshold_kwargs: null - name: avpr - metric: average_precision_ipu + metric: average_precision num_classes: 5 task: multiclass target_to_int: True diff --git a/expts/neurips2023_configs/single_task_gine/config_large_gine_n4.yaml b/expts/neurips2023_configs/single_task_gine/config_large_gine_n4.yaml index 58bad3bbc..7ccf4ac18 100644 --- a/expts/neurips2023_configs/single_task_gine/config_large_gine_n4.yaml +++ b/expts/neurips2023_configs/single_task_gine/config_large_gine_n4.yaml @@ -205,7 +205,7 @@ predictor: metrics_on_training_set: pcqm4m_n4: [] loss_fun: - pcqm4m_n4: mae_ipu + pcqm4m_n4: mae random_seed: *seed optim_kwargs: lr: 1.e-4 # warmup can be scheduled using torch_scheduler_kwargs @@ -226,17 +226,17 @@ predictor: metrics: pcqm4m_n4: &pcqm_metrics - name: mae - metric: mae_ipu + metric: mae target_nan_mask: null multitask_handling: flatten threshold_kwargs: null - name: pearsonr - metric: pearsonr_ipu + metric: pearsonr threshold_kwargs: null target_nan_mask: null multitask_handling: mean-per-label - name: r2 - metric: r2_score_ipu + metric: r2_score threshold_kwargs: null target_nan_mask: null multitask_handling: mean-per-label diff --git a/expts/neurips2023_configs/single_task_gine/config_large_gine_pcba.yaml b/expts/neurips2023_configs/single_task_gine/config_large_gine_pcba.yaml index 3ce9ffde2..9b64240ff 100644 --- a/expts/neurips2023_configs/single_task_gine/config_large_gine_pcba.yaml +++ b/expts/neurips2023_configs/single_task_gine/config_large_gine_pcba.yaml @@ -201,7 +201,7 @@ predictor: metrics_on_training_set: pcba_1328: [] loss_fun: - pcba_1328: bce_ipu + pcba_1328: bce random_seed: *seed optim_kwargs: lr: 1.e-4 # warmup can be scheduled using torch_scheduler_kwargs @@ -222,12 +222,12 @@ predictor: metrics: pcba_1328: - name: auroc - metric: auroc_ipu + metric: auroc task: binary multitask_handling: mean-per-label threshold_kwargs: null - name: avpr - metric: average_precision_ipu + metric: average_precision task: binary multitask_handling: mean-per-label threshold_kwargs: null diff --git a/expts/neurips2023_configs/single_task_gine/config_large_gine_pcq.yaml b/expts/neurips2023_configs/single_task_gine/config_large_gine_pcq.yaml index d541b9b04..b4c7638a8 100644 --- a/expts/neurips2023_configs/single_task_gine/config_large_gine_pcq.yaml +++ b/expts/neurips2023_configs/single_task_gine/config_large_gine_pcq.yaml @@ -243,8 +243,8 @@ predictor: pcqm4m_g25: [] pcqm4m_n4: [] loss_fun: - pcqm4m_g25: mae_ipu - pcqm4m_n4: mae_ipu + pcqm4m_g25: mae + pcqm4m_n4: mae random_seed: *seed optim_kwargs: lr: 1.e-4 # warmup can be scheduled using torch_scheduler_kwargs @@ -265,17 +265,17 @@ predictor: metrics: pcqm4m_g25: &pcqm_metrics - name: mae - metric: mae_ipu + metric: mae target_nan_mask: null multitask_handling: flatten threshold_kwargs: null - name: pearsonr - metric: pearsonr_ipu + metric: pearsonr threshold_kwargs: null target_nan_mask: null multitask_handling: mean-per-label - name: r2 - metric: r2_score_ipu + metric: r2_score threshold_kwargs: null target_nan_mask: null multitask_handling: mean-per-label diff --git a/expts/neurips2023_configs/single_task_gine/config_large_gine_vcap.yaml b/expts/neurips2023_configs/single_task_gine/config_large_gine_vcap.yaml index 121d74ddb..b535ab03a 100644 --- a/expts/neurips2023_configs/single_task_gine/config_large_gine_vcap.yaml +++ b/expts/neurips2023_configs/single_task_gine/config_large_gine_vcap.yaml @@ -202,7 +202,7 @@ predictor: l1000_vcap: [] loss_fun: l1000_vcap: - name: hybrid_ce_ipu + name: hybrid_ce n_brackets: 5 random_seed: *seed optim_kwargs: @@ -224,13 +224,13 @@ predictor: metrics: l1000_vcap: &classif_metrics - name: auroc - metric: auroc_ipu + metric: auroc num_classes: 5 task: multiclass multitask_handling: mean-per-label threshold_kwargs: null - name: avpr - metric: average_precision_ipu + metric: average_precision num_classes: 5 task: multiclass target_to_int: True diff --git a/graphium/cli/train_finetune_test.py b/graphium/cli/train_finetune_test.py index 4cea1ee8c..b4b9560b3 100644 --- a/graphium/cli/train_finetune_test.py +++ b/graphium/cli/train_finetune_test.py @@ -44,7 +44,7 @@ TESTING_ONLY_CONFIG_KEY = "testing_only" -@hydra.main(version_base=None, config_path="../../expts/hydra-configs", config_name="main") +@hydra.main(version_base=None, config_path="/home/domix/Gitx/graphium/graphium/config/", config_name="loc-config_largemix") def cli(cfg: DictConfig) -> None: """ The main CLI endpoint for training, fine-tuning and evaluating Graphium models. diff --git a/graphium/config/loc-config_largemix.yaml b/graphium/config/loc-config_largemix.yaml new file mode 100644 index 000000000..621ca04c3 --- /dev/null +++ b/graphium/config/loc-config_largemix.yaml @@ -0,0 +1,424 @@ +constants: + wandb: + entity: valencelabs + project: graphium3.0 + name: ${constants.scale}/mpnn/large-no_l1000 + tags: + - mpnn + - large + - no-l1000 + - ${constants.scale} + data_dir: /home/domix/Gitx/graphium/graphium/data/largemix + datacache_path: /home/domix/Gitx/graphium/datacache/largemix + scale: 10M + max_epochs: 50 + name: scale_mpnn + raise_train_error: true + seed: 42 + variants: + 1M: + mup_scale_factor: 0.27 + epochs: 50 + batch_size: 1024 + accumulate_grad_batches: 1 + depth: 16 + # mup_base_path: /rxrx/data/user/frederik.wenkel/outgoing/mup/large-no_l1000/mpnn.yaml + train_frac: 1.0 + 3M: + mup_scale_factor: 0.505 + epochs: 50 + batch_size: 1024 + accumulate_grad_batches: 1 + depth: 16 + # mup_base_path: /rxrx/data/user/frederik.wenkel/outgoing/mup/large-no_l1000/mpnn.yaml + train_frac: 1.0 + 10M: + mup_scale_factor: null + epochs: 50 + batch_size: 1024 + accumulate_grad_batches: 1 + depth: 16 + mup_base_path: null + train_frac: 1.0 + 30M: + mup_scale_factor: 1.798 + epochs: 50 + batch_size: 1024 + accumulate_grad_batches: 1 + depth: 16 + # mup_base_path: /rxrx/data/user/frederik.wenkel/outgoing/mup/large-no_l1000/mpnn.yaml + train_frac: 1.0 + 100M: + mup_scale_factor: 3.38 + epochs: 30 + batch_size: 512 + accumulate_grad_batches: 2 + depth: 16 + # mup_base_path: /rxrx/data/user/frederik.wenkel/outgoing/mup/large-no_l1000/mpnn.yaml + train_frac: 1.0 + 300M: + mup_scale_factor: 5.91 + epochs: 30 + batch_size: 256 + accumulate_grad_batches: 1 + depth: 16 + # mup_base_path: /rxrx/data/user/frederik.wenkel/outgoing/mup/large-no_l1000/mpnn.yaml + train_frac: 1.0 + 1B: + mup_scale_factor: 11.0 + epochs: 20 + batch_size: 256 + accumulate_grad_batches: 1 + depth: 16 + # mup_base_path: /rxrx/data/user/frederik.wenkel/outgoing/mup/large-no_l1000/mpnn.yaml + train_frac: 1.0 + 3B: + mup_scale_factor: 18.8 + epochs: 20 + batch_size: 128 + accumulate_grad_batches: 1 + depth: 16 + # mup_base_path: /rxrx/data/user/frederik.wenkel/outgoing/mup/large-no_l1000/mpnn.yaml + train_frac: 1.0 + 125Mol: + mup_scale_factor: 3.38 + epochs: 30 + batch_size: 512 + accumulate_grad_batches: 2 + depth: 16 + # mup_base_path: /rxrx/data/user/frederik.wenkel/outgoing/mup/large-no_l1000/mpnn.yaml + train_frac: 0.125 + 250Mol: + mup_scale_factor: 3.38 + epochs: 30 + batch_size: 512 + accumulate_grad_batches: 2 + depth: 16 + # mup_base_path: /rxrx/data/user/frederik.wenkel/outgoing/mup/large-no_l1000/mpnn.yaml + train_frac: 0.25 + 500Mol: + mup_scale_factor: 3.38 + epochs: 30 + batch_size: 512 + accumulate_grad_batches: 2 + depth: 16 + # mup_base_path: /rxrx/data/user/frederik.wenkel/outgoing/mup/large-no_l1000/mpnn.yaml + train_frac: 0.5 + dataset_fraction: 1.0 +accelerator: + float32_matmul_precision: medium + type: gpu +architecture: + mup_scale_factor: ${constants.variants.${constants.scale}.mup_scale_factor} + mup_base_path: ${constants.variants.${constants.scale}.mup_base_path} + pre_nn: + activation: relu + depth: 2 + dropout: 0.1 + hidden_dims: 4 + last_activation: none + last_normalization: layer_norm + normalization: layer_norm + out_dim: 64 + residual_type: none + pre_nn_edges: + activation: relu + depth: 2 + dropout: 0.1 + hidden_dims: 4 + last_activation: none + last_normalization: layer_norm + normalization: layer_norm + out_dim: 32 + residual_type: none + gnn: + activation: gelu + depth: ${constants.variants.${constants.scale}.depth} + dropout: 0.1 + hidden_dims: 4 + hidden_dims_edges: 2 + out_dim_edges: 2 + in_dim: 64 + last_activation: none + last_normalization: layer_norm + layer_kwargs: + mlp_expansion_ratio: 1 + layer_type: pyg:mpnnplus + normalization: layer_norm + out_dim: 4 + residual_type: simple + virtual_node: none + graph_output_nn: + graph: + activation: relu + depth: 2 + dropout: 0.1 + hidden_dims: 4 + last_activation: none + last_normalization: none + normalization: layer_norm + out_dim: 653 + pooling: + - sum + residual_type: none + node: + activation: relu + depth: 2 + dropout: 0.1 + hidden_dims: 4 + last_activation: none + last_normalization: none + normalization: layer_norm + out_dim: 84 + pooling: + - sum + residual_type: none + model_type: FullGraphMultiTaskNetwork + pe_encoders: + encoders: + la_pos: + dropout: 0.1 + encoder_type: laplacian_pe + first_normalization: none + hidden_dim: 2 + input_keys: + - laplacian_eigvec + - laplacian_eigval + model_type: DeepSet + num_layers: 2 + num_layers_post: 1 + out_dim: 32 + output_keys: + - feat + rw_pos: + dropout: 0.1 + encoder_type: mlp + first_normalization: layer_norm + hidden_dim: 2 + input_keys: + - rw_return_probs + normalization: layer_norm + num_layers: 2 + out_dim: 32 + output_keys: + - feat + last_norm: None + out_dim: 32 + pool: sum + task_heads: + pcba_1328: + activation: relu + depth: 2 + dropout: 0.1 + hidden_dims: 4 + last_activation: none + last_normalization: none + normalization: layer_norm + out_dim: 448 + residual_type: none + task_level: graph + pcqm4m_g25: + activation: relu + depth: 2 + dropout: 0.1 + hidden_dims: 4 + last_activation: none + last_normalization: none + normalization: layer_norm + out_dim: 25 + residual_type: none + task_level: graph + pcqm4m_n4: + activation: relu + depth: 2 + dropout: 0.1 + hidden_dims: 4 + last_activation: none + last_normalization: none + normalization: layer_norm + out_dim: 4 + residual_type: none + task_level: node +datamodule: + args: + batch_size_inference: 1024 + batch_size_training: ${constants.variants.${constants.scale}.batch_size} + train_frac: ${constants.variants.${constants.scale}.train_frac} + featurization: + add_self_loop: false + atom_property_list_float: + - degree + - formal-charge + - radical-electron + - aromatic + - in-ring + atom_property_list_onehot: + - atomic-number + - group + - period + - total-valence + edge_property_list: + - bond-type-onehot + - stereo + - in-ring + explicit_H: false + max_num_atoms: 50 + pos_encoding_as_features: + pos_types: + lap_eigval: + disconnected_comp: true + normalization: none + num_pos: 8 + pos_level: node + pos_type: laplacian_eigval + lap_eigvec: + disconnected_comp: true + normalization: none + num_pos: 8 + pos_level: node + pos_type: laplacian_eigvec + rw_pos: + ksteps: 16 + pos_level: node + pos_type: rw_return_probs + use_bonds_weights: false + multiprocessing_context: spawn + num_workers: 4 + persistent_workers: true + processed_graph_data_path: ${constants.datacache_path} + task_specific_args: + pcba_1328: + df: null + df_path: ${constants.data_dir}/PCBA_1328_1564k.parquet + epoch_sampling_fraction: 1 + sample_size: ${constants.dataset_fraction} # use sample_size for test + label_cols: 'assayID-1*' + smiles_col: SMILES + splits_path: ${constants.data_dir}/pcba_1328_random_splits.pt + task_level: graph + pcqm4m_g25: + df: null + df_path: ${constants.data_dir}/PCQM4M_G25_N4.parquet + epoch_sampling_fraction: 1 + sample_size: ${constants.dataset_fraction} # use sample_size for test + label_cols: graph_* + label_normalization: + method: normal + normalize_val_test: true + smiles_col: ordered_smiles + splits_path: ${constants.data_dir}/pcqm4m_g25_n4_random_splits.pt + task_level: graph + pcqm4m_n4: + df: null + df_path: ${constants.data_dir}/PCQM4M_G25_N4.parquet + epoch_sampling_fraction: 1 + sample_size: ${constants.dataset_fraction} # use sample_size for test + label_cols: node_* + label_normalization: + method: normal + normalize_val_test: true + seed: 42 + smiles_col: ordered_smiles + splits_path: ${constants.data_dir}/pcqm4m_g25_n4_random_splits.pt + task_level: node + module_type: MultitaskFromSmilesDataModule +metrics: + # pcba_1328: [] + # pcqm4m_g25: [] + # pcqm4m_n4: [] + pcba_1328: + - metric: auroc + multitask_handling: mean-per-label + name: auroc + target_nan_mask: ignore + task: binary + threshold_kwargs: null + target_to_int: true + - metric: averageprecision + multitask_handling: mean-per-label + name: avpr + target_nan_mask: 0 + task: binary + threshold_kwargs: null + target_to_int: true + pcqm4m_g25: + - metric: mae + multitask_handling: mean-per-label + name: mae + target_nan_mask: ignore + threshold_kwargs: null + - metric: pearsonr + multitask_handling: mean-per-label + name: pearsonr + target_nan_mask: ignore + threshold_kwargs: null + - metric: r2_score + multitask_handling: mean-per-label + name: r2 + target_nan_mask: ignore + threshold_kwargs: null + pcqm4m_n4: + - metric: mae + multitask_handling: mean-per-label + name: mae + target_nan_mask: ignore + threshold_kwargs: null + - metric: pearsonr + multitask_handling: mean-per-label + name: pearsonr + target_nan_mask: ignore + threshold_kwargs: null + - metric: r2_score + multitask_handling: mean-per-label + name: r2 + target_nan_mask: ignore + threshold_kwargs: null +predictor: + loss_fun: + pcba_1328: bce_logits + pcqm4m_g25: mae + pcqm4m_n4: mae + metrics_every_n_train_steps: 5 + metrics_on_progress_bar: + pcba_1328: [] + pcqm4m_g25: [] + pcqm4m_n4: [] + metrics_on_training_set: + pcba_1328: [] + pcqm4m_g25: [pearsonr] + pcqm4m_n4: [pearsonr] + multitask_handling: flatten + optim_kwargs: + lr: ${eval:"0.003/(((${architecture.gnn.depth}+8)/24)**0.5)"} + random_seed: 42 + scheduler_kwargs: null + target_nan_mask: null + torch_scheduler_kwargs: + max_num_epochs: ${constants.max_epochs} + module_type: WarmUpLinearLR + verbose: false + warmup_epochs: 5 +tasks: {} +trainer: + logger: + name: ${constants.scale}/mpnn/large-no_l1000 + project: molgps-pretraining + save_dir: logs/molgps-pretraining/large-no_l1000/ + model_checkpoint: + save_last: false + save_top_k: -1 + dirpath: model_checkpoints/graphium3/large-no_l1000/${constants.scale}/mpnn/${constants.seed}/ + every_n_epochs: 5 + filename: '{epoch}' + seed: ${constants.seed} + trainer: + check_val_every_n_epoch: 1 + max_epochs: 50 + min_epochs: 1 + precision: 32 + accumulate_grad_batches: ${constants.variants.${constants.scale}.accumulate_grad_batches} + num_sanity_val_steps: 2 + devices: 1 + strategy: ddp_find_unused_parameters_true + limit_train_batches: 20 + limit_val_batches: 20 diff --git a/graphium/ipu/README.md b/graphium/ipu/README.md deleted file mode 100644 index 3f592d7a8..000000000 --- a/graphium/ipu/README.md +++ /dev/null @@ -1,15 +0,0 @@ -
- -

The Graph Of LIfe Library.

-
- - -## What is in this folder? - -code for IPU acceleration support - -- `ipu_dataloader.py`: code for handling dataloader on IPU -- `ipu_losses.py`: code for computing losses on IPU -- `ipu_simple_lightning.py`: code for pytorch lightning support on IPU -- `ipu_utils.py`: utils functions for IPU -- `ipu_wrapper.py`: wrapper code for IPU support \ No newline at end of file diff --git a/graphium/ipu/__init__.py b/graphium/ipu/__init__.py deleted file mode 100644 index e69de29bb..000000000 diff --git a/graphium/ipu/ipu_dataloader.py b/graphium/ipu/ipu_dataloader.py deleted file mode 100644 index 5aa7828f4..000000000 --- a/graphium/ipu/ipu_dataloader.py +++ /dev/null @@ -1,434 +0,0 @@ -""" --------------------------------------------------------------------------------- -Copyright (c) 2023 Valence Labs, Recursion Pharmaceuticals and Graphcore Limited. - -Use of this software is subject to the terms and conditions outlined in the LICENSE file. -Unauthorized modification, distribution, or use is prohibited. Provided 'as is' without -warranties of any kind. - -Valence Labs, Recursion Pharmaceuticals and Graphcore Limited are not liable for any damages arising from its use. -Refer to the LICENSE file for the full terms and conditions. --------------------------------------------------------------------------------- -""" - - -from typing import Callable, Iterable, Optional, List, Tuple, Dict, Any, Union -from copy import deepcopy -from dataclasses import dataclass -import numpy as np -from loguru import logger -from torch import Tensor - -import torch -from torch_geometric.data import Data, Batch, Dataset -from torch_geometric.transforms import BaseTransform - -from graphium.data.utils import get_keys -from graphium.ipu.ipu_utils import import_poptorch -from graphium.utils.packing import ( - fast_packing, - hybrid_packing, - get_pack_sizes, - node_to_pack_indices_mask, - estimate_max_pack_node_size, -) - - -@dataclass -class IPUDataloaderOptions: - r""" - This data class stores the arguments necessary to instantiate a model for the Predictor. - - Parameters: - model_class: - pytorch module used to create a model - - model_kwargs: - Key-word arguments used to initialize the model from `model_class`. - """ - - batch_size: int - max_num_nodes: Optional[int] = None - max_num_nodes_per_graph: Optional[int] = None - max_num_edges: Optional[int] = None - max_num_edges_per_graph: Optional[int] = None - mode: "poptorch.DataLoaderMode" = "Sync" - - def set_kwargs(self): - # Get the maximum number of nodes - if self.max_num_nodes is not None: - assert ( - self.max_num_nodes_per_graph is None - ), "Cannot use `max_num_nodes` and `max_num_nodes_per_graph` simultaneously" - elif self.max_num_nodes_per_graph is not None: - assert ( - self.max_num_nodes is None - ), "Cannot use `max_num_nodes` and `max_num_nodes_per_graph` simultaneously" - self.max_num_nodes = self.max_num_nodes_per_graph * self.batch_size - else: - raise ValueError("Must provide either `max_num_nodes` or `max_num_nodes_per_graph`") - - # Get the maximum number of edges - if self.max_num_edges is not None: - assert ( - self.max_num_edges_per_graph is None - ), "Cannot use `max_num_edges` and `max_num_edges_per_graph` simultaneously" - elif self.max_num_edges_per_graph is not None: - assert ( - self.max_num_edges is None - ), "Cannot use `max_num_edges` and `max_num_edges_per_graph` simultaneously" - self.max_num_edges = self.max_num_edges_per_graph * self.batch_size - else: - raise ValueError("Must provide either `max_num_nodes` or `max_num_nodes_per_graph`") - - # poptorch mode - poptorch = import_poptorch() - if isinstance(self.mode, str): - if self.mode.lower() == "sync": - self.mode = poptorch.DataLoaderMode.Sync - elif self.mode.lower() == "async": - self.mode = poptorch.DataLoaderMode.Async - elif self.mode.lower() == "asyncrebatched": - self.mode = poptorch.DataLoaderMode.AsyncRebatched - else: - raise ValueError(f"`{self.mode}` not a valid parameter.") - - -class CombinedBatchingCollator: - """ - Collator object that manages the combined batch size defined as: - - combined_batch_size = batch_size * device_iterations - * replication_factor * gradient_accumulation - - This is intended to be used in combination with the poptorch.DataLoader - """ - - def __init__( - self, - batch_size: int, - max_num_nodes: int, - max_num_edges: int, - dataset_max_nodes_per_graph: int, - dataset_max_edges_per_graph: int, - collate_fn: Optional[Callable] = None, - ): - """ - Parameters: - batch_size: mini batch size used by the model - max_num_nodes: Maximum number of nodes in the batched padded graph - max_num_edges: Maximum number of edges in the batched padded graph - dataset_max_nodes_per_graph: Maximum number of nodes per graph in the full dataset - dataset_max_edges_per_graph: Maximum number of edges per graph in the full dataset - collate_fn: Function used to collate (or batch) the single data or graphs together - """ - super().__init__() - self.batch_size = batch_size - self.collate_fn = collate_fn - self.max_num_nodes = max_num_nodes - self.max_num_edges = max_num_edges - self.dataset_max_nodes_per_graph = dataset_max_nodes_per_graph - self.dataset_max_edges_per_graph = dataset_max_edges_per_graph - - def __call__( - self, batch: List[Dict[str, Union[Data, Dict[str, Tensor]]]] - ) -> Dict[str, Union[Batch, Dict[str, Tensor], Any]]: - """ - Stack tensors, batch the pyg graphs, and pad each tensor to be same size. - - Parameters: - batch: The batch of data, including pyg-graphs `Data` and labels `Dict[str, Tensor]` to be padded - - Returns: - out_batch: A dictionary where the graphs are batched and the labels or other Tensors are stacked - """ - - # Sort the batch such that large graphs are paired with small graphs - num_nodes = [b["features"].num_nodes for b in batch] - packed_indices = hybrid_packing(num_nodes, batch_size=self.batch_size) - packs = [[batch[idx] for idx in pack] for pack in packed_indices] - - # Loop all mini-batches within the global batch - all_batches = [] - for pack in packs: - if self.collate_fn != None: - local_batch = self.collate_fn(pack) - - transform = Pad( - max_num_nodes=self.max_num_nodes, - max_num_edges=self.max_num_edges, - dataset_max_nodes_per_graph=self.dataset_max_nodes_per_graph, - dataset_max_edges_per_graph=self.dataset_max_edges_per_graph, - ) - - local_batch["features"] = transform(local_batch["features"]) - local_batch["labels"] = transform(local_batch["labels"]) - all_batches.append(local_batch) - - out_batch = {} - - # Stack tensors in the first dimension to allow IPUs to differentiate between local and global graph - all_keys = get_keys(all_batches[0]["labels"]) - out_batch["labels"] = { - key: torch.stack([this_batch["labels"][key] for this_batch in all_batches], 0) for key in all_keys - } - out_graphs = [this_batch["features"] for this_batch in all_batches] - stacked_features = deepcopy(out_graphs[0]) - for key, val in out_graphs[0].items(): - if isinstance(val, torch.Tensor): - stacked_features[key] = torch.stack([this_graph[key] for this_graph in out_graphs], dim=0) - - out_batch["features"] = stacked_features - for key in all_batches[0].keys(): - if key not in ("features", "labels"): - out_batch[key] = [this_batch[key] for this_batch in all_batches] - - # - for data_key, data_val in out_batch.items(): - if isinstance(data_val, Batch): - for sub_key, sub_val in data_val.items(): - if isinstance(sub_val, Tensor) and sub_val.dtype == torch.int64: - out_batch[data_key][sub_key] = sub_val.to(torch.int32) - - return out_batch - - -def create_ipu_dataloader( - dataset: Dataset, - ipu_dataloader_options: IPUDataloaderOptions, - ipu_options: Optional["poptorch.Options"] = None, - batch_size: Optional[int] = 1, - collate_fn=None, - num_workers: Optional[int] = 0, - **kwargs, -) -> "poptorch.DataLoader": - """ - Creates a poptorch.DataLoader for graph datasets - Applies the mini-batching method of concatenating multiple graphs into a - single graph with multiple disconnected subgraphs. See: - https://pytorch-geometric.readthedocs.io/en/2.0.2/notes/batching.html - - Parameters: - - dataset: The torch_geometric.data.Dataset instance from which to - load the graph examples for the IPU. - ipu_dataloader_options: The options to initialize the Dataloader for IPU - ipu_options: The poptorch.Options used by the - poptorch.DataLoader. Will use the default options if not provided. - batch_size: How many graph examples to load in each batch - (default: 1). - collate_fn: The function used to collate batches - **kwargs (optional): Additional arguments of :class:`poptorch.DataLoader`. - - Returns: - The dataloader - """ - poptorch = import_poptorch() - - if ipu_options is None: - # Create IPU default options - ipu_options = poptorch.Options() - - # Define the collater function - collater = CombinedBatchingCollator( - batch_size, - collate_fn=collate_fn, - max_num_nodes=ipu_dataloader_options.max_num_nodes, - max_num_edges=ipu_dataloader_options.max_num_edges, - dataset_max_nodes_per_graph=dataset.max_num_nodes_per_graph, - dataset_max_edges_per_graph=dataset.max_num_edges_per_graph, - ) - - # Get the global batch size - num_nodes = np.asarray(dataset.num_nodes_list) - accum = ipu_options.Training.gradient_accumulation - repli = ipu_options._values["replication_factor"] - device_iter = ipu_options._values["device_iterations"] - combined_batch_size = batch_size * accum * repli * device_iter - num_batches = len(dataset) // combined_batch_size - num_workers = min(num_batches, num_workers) - buffer_size = num_batches // num_workers if num_workers > 0 else None - buffer_size = 3 if buffer_size is None else buffer_size - async_options = { - "sharing_strategy": poptorch.SharingStrategy.ForkServer, - "early_preload": True, - "buffer_size": buffer_size, - "load_indefinitely": True, - "miss_sleep_time_in_ms": 0, - } - - # Estimate the packing size needed - max_pack_size, max_pack_size_per_graph = 0, 0 - for _ in range(4): - this_max_pack_size, this_max_pack_size_per_graph = estimate_max_pack_node_size( - num_nodes=num_nodes, - batch_size=batch_size, - combined_batch_size=combined_batch_size, - ) - max_pack_size = max(max_pack_size, this_max_pack_size) - max_pack_size_per_graph = max(max_pack_size_per_graph, this_max_pack_size_per_graph) - - max_num_nodes = collater.max_num_nodes - # Log the estimated pack size, with warnings if too big or too small - logger.info( - f"Estimating pack max_pack_size={max_pack_size} or max_pack_size_per_graph={max_pack_size_per_graph}" - ) - logger.info(f"Provided `max_num_nodes={max_num_nodes}`") - if max_pack_size > max_num_nodes - 10: - logger.warning( - f"The value of `max_num_nodes={max_num_nodes}` seems to be insufficient compared to `max_pack_size={max_pack_size}` and will likely crash" - ) - elif max_pack_size < max_num_nodes - 20: - logger.warning( - f"The value of `max_num_nodes={max_num_nodes}` seems to be large compared to `max_pack_size={max_pack_size}` and will likely waste memory" - ) - - return poptorch.DataLoader( - options=deepcopy(ipu_options), - dataset=dataset, - batch_size=batch_size, - num_workers=num_workers, - collate_fn=collater, - async_options=async_options, - **kwargs, - ) - - -class Pad(BaseTransform): - """ - Data transform that applies padding to enforce consistent tensor shapes. - """ - - def __init__( - self, - max_num_nodes: int, - dataset_max_nodes_per_graph, - dataset_max_edges_per_graph, - max_num_edges: Optional[int] = None, - node_value: float = 0, - edge_value: float = 0, - ): - """ - Parameters: - max_num_nodes: The maximum number of nodes for the total padded graph - dataset_max_nodes_per_graph: the maximum number of nodes per graph in the dataset - dataset_max_edges_per_graph: the maximum number of edges per graph in the dataset - max_num_edges: The maximum number of edges for the total padded graph - node_value: Value to add to the node padding - edge_value: Value to add to the edge padding - """ - super().__init__() - self.max_num_nodes = max_num_nodes - self.dataset_max_nodes_per_graph = dataset_max_nodes_per_graph - self.dataset_max_edges_per_graph = dataset_max_edges_per_graph - - if max_num_edges: - self.max_num_edges = max_num_edges - else: - # Assume fully connected graph - self.max_num_edges = max_num_nodes * (max_num_nodes - 1) - - self.node_value = node_value - self.edge_value = edge_value - - def validate(self, data): - """ - Validates that the input graph does not exceed the constraints that: - - * the number of nodes must be <= max_num_nodes - * the number of edges must be <= max_num_edges - - Returns: - Tuple containing the number nodes and the number of edges - """ - num_nodes = data.num_nodes - num_edges = data.num_edges - - assert num_nodes <= self.max_num_nodes, ( - f"Too many nodes. Graph has {num_nodes} nodes " f"and max_num_nodes is {self.max_num_nodes}." - ) - - assert num_edges <= self.max_num_edges, ( - f"Too many edges. Graph has {num_edges} edges defined " - f"and max_num_edges is {self.max_num_edges}." - ) - - return num_nodes, num_edges - - def __call__(self, batch: Batch) -> Batch: - return self._call(batch) - - def forward(self, batch: Batch) -> Batch: - return self._call(batch) - - def _call(self, batch: Batch) -> Batch: - """ - Pad the batch with a fake graphs that has the desired - number of nodes and edges. - """ - num_nodes, num_edges = self.validate(batch) - num_pad_nodes = self.max_num_nodes - num_nodes - num_pad_edges = self.max_num_edges - num_edges - # Create a copy to update with padded features - new_batch = deepcopy(batch) - - real_graphs = new_batch.to_data_list() - - for g in real_graphs: - g.graph_is_true = torch.tensor([1], dtype=bool) - g.node_is_true = torch.full([g.num_nodes], True, dtype=bool) - g.edge_is_true = torch.full([g.num_edges], True, dtype=bool) - - # create fake graph with the needed # of nodes and edges - fake = Data() - fake.num_nodes = num_pad_nodes - fake.num_edges = num_pad_edges - fake.graph_is_true = torch.tensor([False], dtype=bool) - fake.node_is_true = torch.full([num_pad_nodes], False, dtype=bool) - fake.edge_is_true = torch.full([num_pad_edges], False, dtype=bool) - - for key, value in real_graphs[0]: - if not torch.is_tensor(value): - continue - - if key == "graph_is_true" or key == "node_is_true" or key == "edge_is_true": - continue - - dim = real_graphs[0].__cat_dim__(key, value) - pad_shape = list(value.shape) - - if batch.is_node_attr(key): - pad_shape[dim] = num_pad_nodes - pad_value = self.node_value - elif batch.is_edge_attr(key): - pad_shape[dim] = num_pad_edges - if key == "edge_index": - # Padding edges are self-loops on the first padding node - pad_value = 0 - else: - pad_value = self.edge_value - # identify graph attributes, pad nan label for the fake graph - elif key.startswith("graph_"): - num_pad_graphs = 1 # we pad with one big fake graph - pad_shape[dim] = num_pad_graphs - pad_value = float("nan") - else: - continue - - pad_value = value.new_full(pad_shape, pad_value) - fake[key] = torch.cat([pad_value], dim=dim) - real_graphs.append(fake) - new_batch = Batch.from_data_list(real_graphs) - - if "num_nodes" in new_batch: - new_batch.num_nodes = self.max_num_nodes - - return new_batch - - def __repr__(self) -> str: - s = f"{self.__class__.__name__}(" - s += f"max_num_nodes={self.max_num_nodes}, " - s += f"max_num_edges={self.max_num_edges}, " - s += f"node_value={self.node_value}, " - s += f"edge_value={self.edge_value})" - return s diff --git a/graphium/ipu/ipu_losses.py b/graphium/ipu/ipu_losses.py deleted file mode 100644 index 6bc434ae4..000000000 --- a/graphium/ipu/ipu_losses.py +++ /dev/null @@ -1,196 +0,0 @@ -""" --------------------------------------------------------------------------------- -Copyright (c) 2023 Valence Labs, Recursion Pharmaceuticals and Graphcore Limited. - -Use of this software is subject to the terms and conditions outlined in the LICENSE file. -Unauthorized modification, distribution, or use is prohibited. Provided 'as is' without -warranties of any kind. - -Valence Labs, Recursion Pharmaceuticals and Graphcore Limited are not liable for any damages arising from its use. -Refer to the LICENSE file for the full terms and conditions. --------------------------------------------------------------------------------- -""" - - -import torch -from torch import Tensor -from torch.nn import BCELoss, BCEWithLogitsLoss, MSELoss, L1Loss -from torch._C import _infer_size -from loguru import logger -from graphium.trainer.losses import HybridCELoss - - -class BCEWithLogitsLossIPU(BCEWithLogitsLoss): - """ - A modified version of the `torch.nn.BCEWithLogitsLoss` that can ignore NaNs - by giving them a weight of `0`. This allows it to work with compilation - and IPUs since it doesn't modify the tensor's shape. - """ - - def forward(self, input: Tensor, target: Tensor) -> Tensor: - prev_weight = None - - target = target.clone().to(input.dtype) - weight = self.weight - - # Get the original weight matrix. If None, set all weights = 1 - if weight is not None: - prev_weight = self.weight.clone() - new_size = _infer_size(target.size(), weight.size()) - weight = weight.expand(new_size).clone() - else: - weight = torch.ones(target.shape, dtype=input.dtype, device=input.device) - - # Replace the nan-targets by 0 or 1. Take the value closest to the input. - # Give a weight of 0 where there are nan-targets - nan_targets = target.isnan() - nan_targets_0 = (input < 0.5) & nan_targets - nan_targets_1 = (input >= 0.5) & nan_targets - target[nan_targets_0] = 0.0 - target[nan_targets_1] = 1.0 - weight[nan_targets] = 0.0 - - # Compute the loss, and rescale by the number of nan elements - self.weight = weight - loss = super().forward(input, target) - - num_real_targets = (~nan_targets).sum() - factor1 = torch.where(num_real_targets > 0, 1, 0) - factor2 = torch.where(num_real_targets > 0, 0, 1) - loss = factor1 * loss * nan_targets.numel() / (num_real_targets + factor2) - - # Reset the self.weight to its original value - self.weight = prev_weight - - return loss - - -class BCELossIPU(BCELoss): - """ - A modified version of the `torch.nn.BCELoss` that can ignore NaNs - by giving them a weight of `0`. This allows it to work with compilation - and IPUs since it doesn't modify the tensor's shape. - """ - - def forward(self, input: Tensor, target: Tensor) -> Tensor: - prev_weight = None - - target = target.clone().to(input.dtype) - weight = self.weight - - # Get the original weight matrix. If None, set all weights = 1 - if weight is not None: - prev_weight = self.weight.clone() - new_size = _infer_size(target.size(), weight.size()) - weight = weight.expand(new_size).clone() - else: - weight = torch.ones(target.shape, dtype=input.dtype, device=input.device) - - # Replace the nan-targets by 0 or 1. Take the value closest to the input. - # Give a weight of 0 where there are nan-targets - nan_targets = target.isnan() - nan_targets_0 = (input < 0.5) & nan_targets - nan_targets_1 = (input >= 0.5) & nan_targets - target[nan_targets_0] = 0.0 - target[nan_targets_1] = 1.0 - weight[nan_targets] = 0.0 - - # Compute the loss, and rescale by the number of nan elements - self.weight = weight - loss = super().forward(input, target) - - num_real_targets = (~nan_targets).sum() - factor1 = torch.where(num_real_targets > 0, 1, 0) - factor2 = torch.where(num_real_targets > 0, 0, 1) - loss = factor1 * loss * nan_targets.numel() / (num_real_targets + factor2) - - # Reset the self.weight to its original value - self.weight = prev_weight - - return loss - - -class MSELossIPU(MSELoss): - """ - A modified version of the `torch.nn.MSELoss` that can ignore NaNs - by giving them the same value for both `input` and `target`. - This allows it to work with compilation - and IPUs since it doesn't modify the tensor's shape. - """ - - def forward(self, input: Tensor, target: Tensor) -> Tensor: - target = target.clone().to(input.dtype) - input = input.clone() - - # Replace the nan-targets in the input/target tensors by 0 - nan_targets = target.isnan() - input[nan_targets] = 0.0 - target[nan_targets] = 0.0 - - # Compute the loss, and rescale by the number of nan elements - loss = super().forward(input, target) - - num_real_targets = (~nan_targets).sum() - factor1 = torch.where(num_real_targets > 0, 1, 0) - factor2 = torch.where(num_real_targets > 0, 0, 1) - loss = factor1 * loss * nan_targets.numel() / (num_real_targets + factor2) - - return loss - - -class L1LossIPU(L1Loss): - """ - A modified version of the `torch.nn.L1Loss` that can ignore NaNs - by giving them the same value for both `input` and `target`. - This allows it to work with compilation - and IPUs since it doesn't modify the tensor's shape. - """ - - def forward(self, input: Tensor, target: Tensor) -> Tensor: - target = target.clone().to(input.dtype) - input = input.clone() - - # Replace the nan-targets in the input/target tensors by 0 - nan_targets = target.isnan() - input[nan_targets] = 0.0 - target[nan_targets] = 0.0 - - # Compute the loss, and rescale by the number of nan elements - loss = super().forward(input, target) - num_real_targets = (~nan_targets).sum() - factor1 = torch.where(num_real_targets > 0, 1, 0) - factor2 = torch.where(num_real_targets > 0, 0, 1) - loss = factor1 * loss * nan_targets.numel() / (num_real_targets + factor2) - - return loss - - -class HybridCELossIPU(HybridCELoss): - def __init__( - self, - n_brackets, - alpha: float = 0.5, - ) -> None: - """ - Parameters: - n_brackets: the number of brackets that will be used to group the regression targets. - Expected to have the same size as the number of classes in the transformed regression task. - """ - super().__init__(n_brackets=n_brackets, alpha=alpha) - - def forward(self, input: Tensor, target: Tensor) -> Tensor: - """ - Parameters: - input: (batch_size x n_classes) tensor of logits predicted for each bracket. - target: (batch_size) or (batch_size, 1) tensor of target brackets in {0, 1, ..., self.n_brackets}. - """ - - target = target.clone().to(input.dtype) - input = input.clone() - - # Replace the nan-targets in the input/target tensors by 0 - nan_targets = target.isnan() - - # Compute the loss, and rescale by the number of nan elements - loss = super().forward(input, target, nan_targets) - return loss diff --git a/graphium/ipu/ipu_simple_lightning.py b/graphium/ipu/ipu_simple_lightning.py deleted file mode 100644 index b2fca086e..000000000 --- a/graphium/ipu/ipu_simple_lightning.py +++ /dev/null @@ -1,169 +0,0 @@ -""" --------------------------------------------------------------------------------- -Copyright (c) 2023 Valence Labs, Recursion Pharmaceuticals and Graphcore Limited. - -Use of this software is subject to the terms and conditions outlined in the LICENSE file. -Unauthorized modification, distribution, or use is prohibited. Provided 'as is' without -warranties of any kind. - -Valence Labs, Recursion Pharmaceuticals and Graphcore Limited are not liable for any damages arising from its use. -Refer to the LICENSE file for the full terms and conditions. --------------------------------------------------------------------------------- -""" - - -import lightning -from lightning_graphcore import IPUStrategy -from lightning.pytorch.loggers import WandbLogger - -import torch -from torch import nn - -import torchvision -import torchvision.transforms as transforms - -import mup - -from graphium.nn.base_layers import FCLayer -from graphium.utils.mup import set_base_shapes - - -ON_IPU = True # Change this line to run on CPU -SEED = 42 - - -# The simple PyTorch model used in each of these examples -class SimpleTorchModel(torch.nn.Module): - def __init__(self, in_dim, hidden_dim, kernel_size, num_classes): - super().__init__() - self.in_dim = in_dim - self.hidden_dim = hidden_dim - self.kernel_size = kernel_size - self.num_classes = num_classes - - conv_block = nn.Sequential( - nn.Conv2d(in_channels=in_dim, out_channels=hidden_dim, kernel_size=kernel_size), - nn.BatchNorm2d(hidden_dim), - nn.ReLU(), - nn.MaxPool2d(kernel_size), - nn.MaxPool2d(kernel_size), - ) - - self.the_network = nn.Sequential( - conv_block, - torch.nn.Flatten(), - FCLayer(4 * hidden_dim, hidden_dim), - FCLayer(hidden_dim, hidden_dim), - FCLayer(hidden_dim, num_classes, activation=None, is_readout_layer=True), - nn.LogSoftmax(1), - ) - - def make_mup_base_kwargs(self, divide_factor: float = 2.0): - return dict( - in_dim=self.in_dim, - hidden_dim=round(self.hidden_dim / divide_factor), - kernel_size=self.kernel_size, - num_classes=self.num_classes, - ) - - def forward(self, x): - return self.the_network(x) - - -# This class shows a minimal lightning example. This example uses our own -# SimpleTorchModel which is a basic 2 conv, 2 FC torch network. It can be -# found in simple_torch_model.py. -class SimpleLightning(lightning.LightningModule): - def __init__(self, in_dim, hidden_dim, kernel_size, num_classes, on_ipu): - super().__init__() - self.model = SimpleTorchModel( - in_dim=in_dim, hidden_dim=hidden_dim, kernel_size=kernel_size, num_classes=num_classes - ) - self.on_ipu = on_ipu - - def training_step(self, batch, _): - x, label = batch - prediction = self.model(x) - loss = torch.nn.functional.nll_loss(prediction, label) - return loss - - def validation_step(self, batch, _): - x, label = batch - prediction = self.model(x) - preds = torch.argmax(prediction, dim=1) - acc = torch.sum(preds == label).float() / len(label) - loss = torch.nn.functional.nll_loss(prediction, label) - return loss, acc - - # PopTorch doesn't currently support logging within steps. Use the Lightning - # callback hooks instead. - def on_train_batch_end(self, outputs, batch, batch_idx): - self.log("StepLoss", outputs["loss"]) - - def validation_epoch_end(self, outputs): - loss = [out[0] for out in outputs] - self.log("val_loss", torch.stack(loss).mean(), prog_bar=True) - - acc = [out[1] for out in outputs] - self.log("val_acc", torch.stack(acc).mean(), prog_bar=True) - - def configure_optimizers(self): - adam = torch.optim.Adam - - if self.on_ipu: - import poptorch - - adam = poptorch.optim.Adam - - optimizer = mup.MuAdam(self.parameters(), lr=0.01, impl=adam) - return optimizer - - -if __name__ == "__main__": - torch.manual_seed(SEED) - - # Create the model as usual. - predictor = SimpleLightning(in_dim=1, hidden_dim=32, kernel_size=3, num_classes=10, on_ipu=ON_IPU) - model = predictor.model - base = model.__class__(**model.make_mup_base_kwargs(divide_factor=2)) - predictor.model = set_base_shapes(model, base, rescale_params=False) - - torch.manual_seed(SEED) - # Normal PyTorch dataset. - train_set = torchvision.datasets.FashionMNIST( - "out/FashionMNIST", train=True, download=True, transform=transforms.Compose([transforms.ToTensor()]) - ) - val_set = torchvision.datasets.FashionMNIST( - "out/FashionMNIST", train=False, download=True, transform=transforms.Compose([transforms.ToTensor()]) - ) - - # Normal PyTorch dataloader. - train_loader = torch.utils.data.DataLoader(train_set, batch_size=16, shuffle=True) - val_loader = torch.utils.data.DataLoader(val_set, batch_size=16, shuffle=False) - - torch.manual_seed(SEED) - - ipus = None - plugins = None - if ON_IPU: - import poptorch - - training_opts = poptorch.Options() - inference_opts = poptorch.Options() - - # Set the seeds - training_opts.randomSeed(SEED) - inference_opts.randomSeed(SEED) - ipus = 1 - strategy = IPUStrategy(training_opts=training_opts, inference_opts=inference_opts) - - trainer = lightning.Trainer( - logger=WandbLogger(), - ipus=ipus, - max_epochs=3, - log_every_n_steps=1, - plugins=plugins, - ) - - # When fit is called the model will be compiled for IPU and will run on the available IPU devices. - trainer.fit(predictor, train_dataloaders=train_loader, val_dataloaders=val_loader) diff --git a/graphium/ipu/ipu_utils.py b/graphium/ipu/ipu_utils.py deleted file mode 100644 index c5140ecb5..000000000 --- a/graphium/ipu/ipu_utils.py +++ /dev/null @@ -1,162 +0,0 @@ -""" --------------------------------------------------------------------------------- -Copyright (c) 2023 Valence Labs, Recursion Pharmaceuticals and Graphcore Limited. - -Use of this software is subject to the terms and conditions outlined in the LICENSE file. -Unauthorized modification, distribution, or use is prohibited. Provided 'as is' without -warranties of any kind. - -Valence Labs, Recursion Pharmaceuticals and Graphcore Limited are not liable for any damages arising from its use. -Refer to the LICENSE file for the full terms and conditions. --------------------------------------------------------------------------------- -""" - - -import os -import tempfile -from datetime import datetime -from copy import deepcopy -from types import ModuleType -from typing import Optional, Tuple, List -import torch - - -def import_poptorch(raise_error=True) -> Optional[ModuleType]: - """ - Import poptorch and returns it. - It is wrapped in a function to avoid breaking the code - for non-IPU devices which did not install poptorch. - - Parameters: - raise_error: Whether to raise an error if poptorch is unavailable. - If `False`, return `None` - - Returns: - The poptorch module - - """ - try: - import poptorch - - return poptorch - except ImportError as e: - if raise_error: - raise e - return - - -def is_running_on_ipu() -> bool: - """ - Returns whether the current module is running on ipu. - Needs to be used in the `forward` or `backward` pass. - """ - poptorch = import_poptorch(raise_error=False) - on_ipu = (poptorch is not None) and (poptorch.isRunningOnIpu()) - return on_ipu - - -def load_ipu_options( - ipu_opts: List[str], - seed: Optional[int] = None, - model_name: Optional[str] = None, - gradient_accumulation: Optional[int] = None, - precision: Optional[int] = None, - ipu_inference_opts: Optional[List[str]] = None, -) -> Tuple["poptorch.Options", "poptorch.Options"]: - """ - Load the IPU options from the config file. - - Parameters: - ipu_cfg: The list configurations for the IPU, written as a list of strings to make use of `poptorch.Options.loadFromFile` - - write a temporary config gile, and read it. See `Options.loadFromFile` - #? see the tutorial for IPU options here - # https://github.com/graphcore/tutorials/tree/sdk-release-2.6/tutorials/pytorch/efficient_data_loading - #? see the full documentation for ipu options here - # https://docs.graphcore.ai/projects/poptorch-user-guide/en/latest/reference.html?highlight=options#poptorch.Options - - ***minibatch size***: The number of samples processed by one simple fwd/bwd pass. - = # of samples in a minibatch - - ***device iterations***: A device iteration corresponds to one iteration of the training loop executed on the IPU, starting with data-loading and ending with a weight update. - In this simple case, when we set n deviceIterations, the host will prepare n mini-batches in an infeed queue so the IPU can perform efficiently n iterations. - = # of minibatches to be processed at a time - = # of training / backward pass in this call - - ***gradient accumulation factor***: After each backward pass the gradients are accumulated together for K mini-batches. set K in the argument - = # of minibatches to accumulate gradients from - - ***replication factor***: Replication describes the process of running multiple instances of the same model simultaneously on different IPUs to achieve data parallelism. - If the model requires N IPUs and the replication factor is M, N x M IPUs will be necessary. - = # of times the model is copied to speed up computation, each replica of the model is sent a different subset of the dataset - - ***global batch size***: In a single device iteration, many mini-batches may be processed and the resulting gradients accumulated. - We call this total number of samples processed for one optimiser step the global batch size. - = total number of samples processed for *one optimiser step* - = (minibatch size x Gradient accumulation factor) x Number of replicas - - seed: random seed for the IPU - model_name: Name of the model, to be used for ipu profiling - ipu_inference_opts: optional IPU configuration overrides for inference. - If this is provided, options in this file override those in `ipu_file` for inference. - - Returns: - - training_opts: IPU options for the training set. - - inference_opts: IPU options for inference. - It differs from the `training_opts` by enforcing `gradientAccumulation` to 1 - - """ - - poptorch = import_poptorch() - ipu_options = poptorch.Options() - ipu_opts_file = ipu_options_list_to_file(ipu_opts) - ipu_options.loadFromFile(ipu_opts_file.name) - ipu_opts_file.close() - - ipu_options.outputMode(poptorch.OutputMode.All) - if seed is not None: - ipu_options.randomSeed(seed) - if model_name is not None: - ipu_options.modelName(f"{model_name}_train") - if gradient_accumulation is not None: - current = ipu_options.Training.gradient_accumulation - assert (current == 1) or ( - current == gradient_accumulation - ), f"Received inconsistent gradient accumulation `{current}` and `{gradient_accumulation}" - ipu_options.Training.gradientAccumulation(gradient_accumulation) - - if precision == "16-true": - # IPUOptions.loadFromFile currently doesn't support setting half partials, doing it here - ipu_options.Precision.setPartialsType(torch.half) - training_opts = ipu_options - - # Change the inference options to remove gradient accumulation - inference_opts = deepcopy(ipu_options) - inference_opts.Training.gradientAccumulation(1) - if ipu_inference_opts is not None: - ipu_inference_opts_file = ipu_options_list_to_file(ipu_inference_opts) - inference_opts.loadFromFile(ipu_inference_opts_file.name) - ipu_inference_opts_file.close() - - return training_opts, inference_opts - - -def ipu_options_list_to_file(ipu_opts: Optional[List[str]]) -> tempfile._TemporaryFileWrapper: - """ - Create a temporary file from a list of ipu configs, such that it can be read by `poptorch.Options.loadFromFile` - - Parameters: - ipu_opts: The list configurations for the IPU, written as a list of strings to make use of `poptorch.Options.loadFromFile` - Returns: - tmp_file: The temporary file of ipu configs - """ - if ipu_opts is None: - return - - tmp_file = tempfile.NamedTemporaryFile("w", delete=True) - for s in ipu_opts: - tmp_file.write(s + "\n") - tmp_file.flush() - return tmp_file diff --git a/graphium/ipu/ipu_wrapper.py b/graphium/ipu/ipu_wrapper.py deleted file mode 100644 index 305326015..000000000 --- a/graphium/ipu/ipu_wrapper.py +++ /dev/null @@ -1,235 +0,0 @@ -""" --------------------------------------------------------------------------------- -Copyright (c) 2023 Valence Labs, Recursion Pharmaceuticals and Graphcore Limited. - -Use of this software is subject to the terms and conditions outlined in the LICENSE file. -Unauthorized modification, distribution, or use is prohibited. Provided 'as is' without -warranties of any kind. - -Valence Labs, Recursion Pharmaceuticals and Graphcore Limited are not liable for any damages arising from its use. -Refer to the LICENSE file for the full terms and conditions. --------------------------------------------------------------------------------- -""" - - -from typing import Dict, Any, Optional, Callable, Union, Type, Tuple, Iterable - -from torch_geometric.data import Batch -from torch import Tensor -from lightning_graphcore import IPUStrategy -from lightning.pytorch.utilities.types import STEP_OUTPUT -from lightning.pytorch.trainer.states import RunningStage - -from graphium.trainer.predictor import PredictorModule -from graphium.ipu.ipu_utils import import_poptorch - -import torch -from torch_geometric.data import Data, Batch -from torch_geometric.data.data import BaseData -from loguru import logger -import functools -import collections -from graphium.data.utils import get_keys - -poptorch = import_poptorch() - - -class PyGArgsParser(poptorch.ICustomArgParser): - """ - This class is responsible for converting a PyG Batch from and to - a tensor of tuples. This allows PyG Batch to be used as inputs to - IPU programs. Copied from poppyg repo, in the future import from - the repo directly. - """ - - @staticmethod - def sortedTensorKeys(struct: BaseData) -> Iterable[str]: - """ - Find all the keys that map to a tensor value in struct. The keys - are returned in sorted order. - """ - all_keys = sorted(get_keys(struct)) - - def isTensor(k: str) -> bool: - return isinstance(struct[k], torch.Tensor) - - return filter(isTensor, all_keys) - - def yieldTensors(self, struct: BaseData): - """ - yield every torch.Tensor in struct in sorted order - """ - for k in self.sortedTensorKeys(struct): - yield struct[k] - - def reconstruct(self, original_structure: BaseData, tensor_iterator: Iterable[Tensor]): - """ - Create a new instance with the same class type as the - original_structure. This new instance will be initialized with tensors - from the provided iterator and uses the same sorted keys from the - yieldTensors() implementation. - """ - tensor_keys = self.sortedTensorKeys(original_structure) - kwargs = {k: next(tensor_iterator) for k in tensor_keys} - - for k in get_keys(original_structure): - if k not in kwargs: - # copy non-tensor properties to the new instance - kwargs[k] = original_structure[k] - - cls = original_structure.__class__ - - if issubclass(cls, Batch): - kwargs["_base_cls"] = Data - return Batch(**kwargs) - - return cls(**kwargs) - - -# PyG uses the BaseData object as the root for data and batch objects -poptorch.registerCustomArgParser(BaseData, PyGArgsParser()) - - -class PredictorModuleIPU(PredictorModule): - """ - This class wraps around the `PredictorModule` to make it work with IPU and the `IPUPluginGraphium`. - """ - - def __init__(self, *args, **kwargs): - # Import poptorch in a safe way that will work when working with cpu/gpu - self.poptorch = import_poptorch() - super().__init__(*args, **kwargs) - - @staticmethod - def compute_loss( - preds: Dict[str, Tensor], - targets: Dict[str, Tensor], - weights: Optional[Tensor], - loss_fun: Dict[str, Callable], - target_nan_mask: Union[Type, str] = "ignore", - multitask_handling: Optional[str] = None, - ) -> Tuple[Tensor, Dict[str, Tensor]]: - return PredictorModule.compute_loss( - preds, targets, weights, loss_fun, target_nan_mask, multitask_handling - ) - - def on_train_batch_end(self, outputs, batch, batch_idx): - outputs = self.convert_from_fp16(outputs) - outputs["loss"] = outputs["loss"][outputs["loss"] != 0].mean() - super().on_train_batch_end(outputs, batch, batch_idx) - - def training_step(self, batch, batch_idx) -> Dict[str, Any]: - features, labels = batch["features"], batch["labels"] - features, labels = self.squeeze_input_dims(features, labels) - dict_input = {"features": features, "labels": labels} - step_dict = super().training_step(dict_input) - - loss = step_dict.pop("loss") - step_dict["loss"] = self.poptorch.identity_loss(loss, reduction="mean") - return step_dict - - def validation_step(self, batch, batch_idx) -> Dict[str, Any]: - features, labels = batch["features"], batch["labels"] - features, labels = self.squeeze_input_dims(features, labels) - dict_input = {"features": features, "labels": labels} - step_dict = super().validation_step(dict_input) - - return step_dict - - def test_step(self, batch, batch_idx) -> Dict[str, Any]: - # Build a dictionary from the tuples - features, labels = batch["features"], batch["labels"] - features, labels = self.squeeze_input_dims(features, labels) - dict_input = {"features": features, "labels": labels} - step_dict = super().test_step(dict_input) - - return step_dict - - def predict_step(self, **inputs) -> Dict[str, Any]: - # Build a dictionary from the tuples - dict_input = inputs - step_dict = super().predict_step(dict_input) - - return step_dict - - def on_validation_batch_end( - self, outputs: Any, batch: Any, batch_idx: int, dataloader_idx: int = 0 - ) -> None: - # convert data that will be tracked - outputs = self.convert_from_fp16(outputs) - super().on_validation_batch_end(outputs, batch, batch_idx, dataloader_idx) - - def evaluation_epoch_end(self, outputs: Any): - outputs = self.convert_from_fp16(outputs) - super().evaluation_epoch_end(outputs) - - def on_test_batch_end(self, outputs: Any, batch: Any, batch_idx: int, dataloader_idx: int = 0) -> None: - outputs = self.convert_from_fp16(outputs) - super().on_test_batch_end(outputs, batch, batch_idx, dataloader_idx) - - def configure_optimizers(self, impl=None): - if impl is None: - dtype = self.precision_to_dtype(self.trainer.precision) - impl = functools.partial( - self.poptorch.optim.Adam, - accum_type=dtype, - first_order_momentum_accum_type=dtype, - second_order_momentum_accum_type=torch.float, - ) - return super().configure_optimizers(impl=impl) - - def squeeze_input_dims(self, features, labels): - for key, tensor in features: - if isinstance(tensor, torch.Tensor): - features[key] = features[key].squeeze(0) - - for key in labels: - labels[key] = labels[key].squeeze(0) - - return features, labels - - def convert_from_fp16(self, data: Any) -> Any: - """ - Converts tensors from FP16 to FP32. Useful to convert the IPU program output data - """ - if isinstance(data, collections.Sequence): - for idx in range(len(data)): - data[idx] = self.convert_from_fp16(data[idx]) - elif isinstance(data, collections.Mapping): - for key in data: - data[key] = self.convert_from_fp16(data[key]) - elif isinstance(data, torch.Tensor) and data.dtype == torch.float16: - data = data.float() - return data - - def _convert_features_dtype(self, feats): - """ - Converts features to trainer precision rather than model precision. - Necessary to run IPU on FP16. - """ - dtype = self.precision_to_dtype(self.trainer.precision) - - # Convert features to dtype - if isinstance(feats, torch.Tensor): - feats = feats.to(dtype) - elif isinstance(feats, (Data, Batch, dict)): - for key, val in feats.items(): - if isinstance(val, torch.Tensor) and (val.is_floating_point()): - feats[key] = val.to(dtype=dtype) - else: - raise ValueError(f"Unsupported feats type `{type(feats)}` : {feats}") - return feats - - def precision_to_dtype(self, precision): - return torch.half if precision == "16-true" else torch.float - - def get_num_graphs(self, data: Batch): - """ - IPU specific method to compute the number of graphs in a Batch, - that considers gradient accumulation, multiple IPUs and multiple - device iterations. Essential to estimate throughput in graphs/s. - """ - num_graphs = torch.max(data.batch, dim=-1).values - num_graphs = torch.sum(num_graphs) - - return num_graphs diff --git a/graphium/ipu/to_dense_batch.py b/graphium/ipu/to_dense_batch.py deleted file mode 100644 index 9198ccf3f..000000000 --- a/graphium/ipu/to_dense_batch.py +++ /dev/null @@ -1,186 +0,0 @@ -""" --------------------------------------------------------------------------------- -Copyright (c) 2023 Valence Labs, Recursion Pharmaceuticals and Graphcore Limited. - -Use of this software is subject to the terms and conditions outlined in the LICENSE file. -Unauthorized modification, distribution, or use is prohibited. Provided 'as is' without -warranties of any kind. - -Valence Labs, Recursion Pharmaceuticals and Graphcore Limited are not liable for any damages arising from its use. -Refer to the LICENSE file for the full terms and conditions. --------------------------------------------------------------------------------- -""" - - -from typing import Optional, Tuple - -import torch -from torch import Tensor -from torch_scatter import scatter_add - - -def to_sparse_batch(x: Tensor, mask_idx: Tensor): - """ - Reverse function of `to_dense_batch` - """ - return torch.index_select(x.reshape(-1, x.shape[-1]), 0, mask_idx) - - -def to_sparse_batch_from_packed(x: Tensor, pack_from_node_idx: Tensor): - """ - Reverse function of `to_packed_dense_batch` - """ - return x[pack_from_node_idx[:, 0], pack_from_node_idx[:, 1]] - - -def to_dense_batch( - x: Tensor, - batch: Optional[Tensor] = None, - fill_value: float = 0.0, - max_num_nodes_per_graph: Optional[int] = None, - batch_size: Optional[int] = None, - drop_nodes_last_graph=False, -) -> Tuple[Tensor, Tensor]: - r"""Given a sparse batch of node features - :math:`\mathbf{X} \in \mathbb{R}^{(N_1 + \ldots + N_B) \times F}` (with - :math:`N_i` indicating the number of nodes in graph :math:`i`), creates a - dense node feature tensor - :math:`\mathbf{X} \in \mathbb{R}^{B \times N_{\max} \times F}` (with - :math:`N_{\max} = \max_i^B N_i`). - In addition, a mask of shape :math:`\mathbf{M} \in \{ 0, 1 \}^{B \times - N_{\max}}` is returned, holding information about the existence of - fake-nodes in the dense representation. - - Parameters: - x: Node feature matrix - :math:`\mathbf{X} \in \mathbb{R}^{(N_1 + \ldots + N_B) \times F}`. - batch: Batch vector - :math:`\mathbf{b} \in {\{ 0, \ldots, B-1\}}^N`, which assigns each - node to a specific example. Must be ordered. (default: :obj:`None`) - fill_value: The value for invalid entries in the - resulting dense output tensor. (default: :obj:`0`) - max_num_nodes_per_graph: The size of the output node dimension. - (default: :obj:`None`) - batch_size: The batch size. (default: :obj:`None`) - drop_nodes_last_graph: Whether to drop the nodes of the last graphs that exceed - the `max_num_nodes_per_graph`. Useful when the last graph is a padding. - - :rtype: (:class:`Tensor`, :class:`BoolTensor`) - """ - if batch is None and max_num_nodes_per_graph is None: - mask = torch.ones(1, x.size(0), dtype=torch.bool, device=x.device) - return x.unsqueeze(0), mask - - if batch is None: - batch = x.new_zeros(x.size(0), dtype=torch.long) - - if batch_size is None: - assert x.device.type != "ipu", ( - "When using the IPU the batch size must be " - "provided during compilation instead of determined at runtime" - ) - batch_size = int(batch.max()) + 1 - if x.device not in ["ipu", "xla"]: - num_nodes = scatter_add(batch.new_ones(x.size(0)), batch, dim=0, dim_size=batch_size) - else: - # Can't use scatter_add here due to PopTorch bug, will be fixed in SDK 3.3 - arange = torch.arange(batch_size).unsqueeze(-1) - num_nodes = batch.eq(arange).sum(dim=-1) - cum_nodes = torch.cat([batch.new_zeros(1), num_nodes.cumsum(dim=0)]) - - if max_num_nodes_per_graph is None: # Must be provided on IPU - max_num_nodes_per_graph = int(num_nodes.max()) - - idx = torch.arange(batch.size(0), dtype=torch.long, device=x.device) - idx = (idx - cum_nodes[batch]) + (batch * max_num_nodes_per_graph) - - size = [batch_size * max_num_nodes_per_graph] + list(x.size())[1:] - - out = x.new_full(size, fill_value) - - ##### CHANGES FROM PYG ##### - - # In case the last graph represents padding. Drop the overflowing nodes. - if drop_nodes_last_graph: - num_nodes = num_nodes[:-1] - idx[idx >= size[0]] = size[0] - 1 - - # Raise error if num_nodes > max_num_nodes - if x.device.type != "ipu": - assert ( - num_nodes <= max_num_nodes_per_graph - ).all(), f"Encountered graphs with {num_nodes.max()} nodes, greater than `max_num_nodes = {max_num_nodes_per_graph}`" - - out[idx] = x - out = out.view([batch_size, max_num_nodes_per_graph] + list(x.size())[1:]) - - # Create a zero-mask on the right device - mask_sz = batch_size * max_num_nodes_per_graph - if x.device.type in ("ipu", "xla"): - mask = torch.zeros(mask_sz, dtype=torch.int32, device="cpu") - mask = mask.to(x.device) - # Can't use mask[idx] here due to PopTorch bug, will be fixed in SDK 3.3 - # mask[idx] = 1 - # mask = mask.bool() - if drop_nodes_last_graph: - num_nodes_with_padding = torch.cat((num_nodes, torch.tensor([0], dtype=torch.int32)), dim=0) - else: - num_nodes_with_padding = num_nodes - - arange = torch.arange(max_num_nodes_per_graph) - mask = num_nodes_with_padding.unsqueeze(-1).gt(arange).flatten() - - else: - mask = torch.zeros(mask_sz, dtype=torch.bool, device=x.device) - mask[idx] = 1 - - ##### END CHANGES FROM PYG ##### - - mask = mask.view(batch_size, max_num_nodes_per_graph) - - return out, mask, idx # Added `idx` as a return - - -def to_packed_dense_batch( - x: Tensor, - pack_from_node_idx: Tensor, - pack_attn_mask: Tensor, - fill_value: float = 0.0, - max_num_nodes_per_pack: Optional[int] = None, -) -> Tuple[Tensor, Tensor]: - r"""Given a sparse batch of node features - :math:`\mathbf{X} \in \mathbb{R}^{(N_1 + \ldots + N_B) \times F}` (with - :math:`N_i` indicating the number of nodes in graph :math:`i`), creates a - dense node feature tensor - :math:`\mathbf{X} \in \mathbb{R}^{B \times N_{\max} \times F}` (with - :math:`N_{\max} = \max_i^B N_i`). - In addition, a mask of shape :math:`\mathbf{M} \in \{ 0, 1 \}^{B \times - N_{\max}}` is returned, holding information about the existence of - fake-nodes in the dense representation. - - Parameters: # TODO: Update docstring - x: Node feature matrix - :math:`\mathbf{X} \in \mathbb{R}^{(N_1 + \ldots + N_B) \times F}`. - batch: Batch vector - :math:`\mathbf{b} \in {\{ 0, \ldots, B-1\}}^N`, which assigns each - node to a specific example. Must be ordered. (default: :obj:`None`) - fill_value: The value for invalid entries in the - resulting dense output tensor. (default: :obj:`0`) - max_num_nodes_per_graph: The size of the output node dimension. - (default: :obj:`None`) - batch_size: The batch size. (default: :obj:`None`) - drop_nodes_last_graph: Whether to drop the nodes of the last graphs that exceed - the `max_num_nodes_per_graph`. Useful when the last graph is a padding. - - :rtype: (:class:`Tensor`, :class:`BoolTensor`) - """ - - if max_num_nodes_per_pack is None: # Must be provided on IPU - max_num_nodes_per_pack = pack_attn_mask.shape[-1] - - size = [pack_attn_mask[0], max_num_nodes_per_pack] + list(x.size())[1:] - - out = x.new_full(size, fill_value) - out[pack_from_node_idx[:, 0], pack_from_node_idx[:, 1]] = x - - return out diff --git a/install_ipu.sh b/install_ipu.sh deleted file mode 100755 index a21022bdb..000000000 --- a/install_ipu.sh +++ /dev/null @@ -1,112 +0,0 @@ -""" --------------------------------------------------------------------------------- -Copyright (c) 2023 Graphcore Limited. -Use of this software is subject to the terms and conditions outlined in the LICENSE file. -Unauthorized modification, distribution, or use is prohibited. Provided 'as is' without -warranties of any kind. - -Graphcore Limited is not liable -for any damages arising from its use. -Refer to the LICENSE file for the full terms and conditions. --------------------------------------------------------------------------------- -""" - - -#!/bin/bash - -# Default location for the virtual environment -default_venv_name=".graphium_ipu" - -# Allow the user to specify the location of their virtual environment -# If not specified, use the default location -venv_name=${1:-$default_venv_name} - -# Constants -sdk_compressed_file="poplar_sdk-ubuntu_20_04-3.3.0-208993bbb7.tar.gz" -sdk_wheel_file="poptorch-3.3.0+113432_960e9c294b_ubuntu_20_04-cp38-cp38-linux_x86_64.whl" -sdk_url="https://downloads.graphcore.ai/direct?package=poplar-poplar_sdk_ubuntu_20_04_3.3.0_208993bbb7-3.3.0&file=${sdk_compressed_file}" -sdk_path="${venv_name}/poplar_sdk-ubuntu_20_04-3.3.0+1403-208993bbb7" - -# Check for Python3 and pip -if ! command -v python3 &>/dev/null; then - echo "Python3 is required but it's not installed. Exiting." - exit 1 -fi - -if ! command -v pip3 &>/dev/null; then - echo "pip3 is required but it's not installed. Exiting." - exit 1 -fi - -# Remove existing venv directory if it exists -if [[ -d $venv_name ]]; then - echo "Removing existing virtual environment directory..." - rm -rf $venv_name -fi - -# Create the virtual environment -echo "Creating virtual environment..." -mkdir -p $venv_name -python3 -m venv $venv_name -source $venv_name/bin/activate - -# Update pip to the latest version -echo "Upgrading pip..." -python3 -m pip install --upgrade pip - -# Download the Poplar SDK -echo "Downloading Poplar SDK..." -wget -q -O "${venv_name}/${sdk_compressed_file}" "$sdk_url" - -# Check the wget exit status -if [ $? -ne 0 ]; then - echo "Failed to download Poplar SDK. Exiting." - exit 1 -fi - -# Unzip the SDK file -echo "Extracting Poplar SDK..." -tar -xzf "$venv_name/$sdk_compressed_file" -C $venv_name - -# Install the PopTorch wheel -echo "Installing PopTorch..." -python3 -m pip install "${sdk_path}/${sdk_wheel_file}" - -# Enable Poplar SDK (including Poplar and PopART) -echo "Enabling Poplar SDK..." -source ${sdk_path}/enable - -# Install the IPU specific and Graphium requirements -echo "Installing IPU specific and Graphium requirements..." -python3 -m pip install -r requirements_ipu.txt - -# Install Graphium in dev mode -echo "Installing Graphium in dev mode..." -python3 -m pip install --no-deps -e . - -# This is a quick test make sure poptorch is correctly installed -if python3 -c "import poptorch;print('poptorch installed correctly')" &> /dev/null; then - echo "Installation completed successfully." -else - echo "Installation was not successful. Please check the logs and try again." - exit 1 # Exit with status code 1 to indicate failure -fi - -# Download the datafiles (Total ~ 10Mb - nothing compared to the libraries) -echo "Downloading the sub-datasets consisting on the ToyMix dataset" -toymix_dir=expts/data/neurips2023/small-dataset/ -mkdir -p $toymix_dir - -base_url="https://storage.googleapis.com/graphium-public/datasets/neurips_2023/Small-dataset/" -files=("ZINC12k.csv.gz" "Tox21-7k-12-labels.csv.gz" "qm9.csv.gz" "qm9_random_splits.pt" "Tox21_random_splits.pt" "ZINC12k_random_splits.pt") - -for file in "${files[@]}"; do - if [ ! -f "${toymix_dir}${file}" ]; then - echo "Downloading ${file}..." - wget -P "${toymix_dir}" "${base_url}${file}" - else - echo "${file} already exists. Skipping..." - fi -done - -echo "Data has been successfully downloaded." \ No newline at end of file diff --git a/mkdocs.yml b/mkdocs.yml index 0d9f34bfd..0fb199fa6 100644 --- a/mkdocs.yml +++ b/mkdocs.yml @@ -23,7 +23,6 @@ nav: - graphium.data: api/graphium.data.md - graphium.utils: api/graphium.utils.md - graphium.config: api/graphium.config.md - - graphium.ipu: api/graphium.ipu.md - graphium.finetuning: api/graphium.finetuning.md - Tutorials: - feature_processing: @@ -36,7 +35,6 @@ nav: - Using GNN layers: tutorials/gnn/using_gnn_layers.ipynb - model_training: - Simple Molecular Model: tutorials/model_training/simple-molecular-model.ipynb - - Training on IPU: tutorials/model_training/running-multitask-ipu.ipynb - Design: design.md - Datasets: datasets.md - Pretrained Models: pretrained_models.md diff --git a/pyproject.toml b/pyproject.toml index 78a5869da..364d8fd1b 100644 --- a/pyproject.toml +++ b/pyproject.toml @@ -104,7 +104,6 @@ filterwarnings = [ "ignore::DeprecationWarning:pkg_resources.*:", ] markers = [ - "ipu: marks tests that are specific to the IPU (deselect with '-m \"not ipu\"')", ] [tool.coverage.run] diff --git a/tests/config_test_ipu_dataloader.yaml b/tests/config_test_ipu_dataloader.yaml index 3f63bfd3d..0ffef963d 100644 --- a/tests/config_test_ipu_dataloader.yaml +++ b/tests/config_test_ipu_dataloader.yaml @@ -1,6 +1,6 @@ # Testing the multitask pipeline with the QM9 dataset on IPU, by splitting it up into three tasks: homo, alpha and cv. constants: - name: &name test_ipu #qm9_full + name: &name test #qm9_full seed: &seed 42 raise_train_error: true # Whether the code should raise an error if it crashes during training @@ -266,8 +266,8 @@ predictor: homo: ["mae"] alpha: ["mae"] loss_fun: - homo: mse_ipu - alpha: mse_ipu + homo: mse + alpha: mse random_seed: *seed optim_kwargs: lr: 1.e-3 diff --git a/tests/config_test_ipu_dataloader_multitask.yaml b/tests/config_test_ipu_dataloader_multitask.yaml index 563222d8d..4c41cfc70 100644 --- a/tests/config_test_ipu_dataloader_multitask.yaml +++ b/tests/config_test_ipu_dataloader_multitask.yaml @@ -246,9 +246,9 @@ predictor: tox21: ["auroc"] zinc: ["mae"] loss_fun: - qm9: mae_ipu - tox21: bce_ipu - zinc: mae_ipu + qm9: mae + tox21: bce + zinc: mae random_seed: *seed optim_kwargs: lr: 4.e-5 # warmup can be scheduled using torch_scheduler_kwargs @@ -269,28 +269,28 @@ predictor: metrics: qm9: &qm9_metrics - name: mae - metric: mae_ipu + metric: mae target_nan_mask: null multitask_handling: flatten threshold_kwargs: null - name: pearsonr - metric: pearsonr_ipu + metric: pearsonr threshold_kwargs: null target_nan_mask: null multitask_handling: mean-per-label - name: r2_score - metric: r2_score_ipu + metric: r2_score target_nan_mask: null multitask_handling: mean-per-label threshold_kwargs: null tox21: - name: auroc - metric: auroc_ipu + metric: auroc task: binary multitask_handling: mean-per-label threshold_kwargs: null - name: avpr - metric: average_precision_ipu + metric: average_precision task: binary multitask_handling: mean-per-label threshold_kwargs: null diff --git a/tests/test_ipu_dataloader.py b/tests/test_ipu_dataloader.py deleted file mode 100644 index 2b018206e..000000000 --- a/tests/test_ipu_dataloader.py +++ /dev/null @@ -1,256 +0,0 @@ -""" --------------------------------------------------------------------------------- -Copyright (c) 2023 Valence Labs, Recursion Pharmaceuticals and Graphcore Limited. - -Use of this software is subject to the terms and conditions outlined in the LICENSE file. -Unauthorized modification, distribution, or use is prohibited. Provided 'as is' without -warranties of any kind. - -Valence Labs, Recursion Pharmaceuticals and Graphcore Limited are not liable for any damages arising from its use. -Refer to the LICENSE file for the full terms and conditions. --------------------------------------------------------------------------------- -""" - - -# General imports -import yaml -import unittest as ut -import numpy as np -from copy import deepcopy -from warnings import warn -from unittest.mock import patch -from lightning import Trainer, LightningModule -from functools import partial -import pytest -from typing import Any, Callable, Dict, Iterable, List, Optional, Tuple, Union - -import torch -from torch.utils.data.dataloader import default_collate - - -def random_packing(num_nodes, batch_size): - ipu_batch_size = int(len(num_nodes) / batch_size) - indices = np.arange(len(num_nodes)) - np.random.shuffle(indices) - indices = np.reshape(indices, (ipu_batch_size, batch_size)).tolist() - return indices - - -def global_batch_collator(batch_size, batches): - packs = [] - for pack_idx in range(0, len(batches), batch_size): - packs.append(default_collate(batches[pack_idx : pack_idx + batch_size])) - global_batch = default_collate(packs) - global_batch = (global_batch[0], tuple(global_batch[1])) - return global_batch - - -@pytest.mark.ipu -class test_DataLoading(ut.TestCase): - class SimpleLightningTesting(LightningModule): - # Create a basic Ligthning for testing the batch sizes - def __init__(self, batch_size, node_feat_size, edge_feat_size, num_batch) -> None: - super().__init__() - self.batch_size = batch_size - self.node_feat_size = node_feat_size - self.edge_feat_size = edge_feat_size - self.layer = torch.nn.Linear(node_feat_size, 1) - self.loss_fn = torch.nn.L1Loss() - self.num_batch = num_batch - - def validation_step(self, batch, batch_idx): - self.assert_shapes(batch, batch_idx, "val") - loss = self.forward(batch) - return loss - - def training_step(self, batch, batch_idx): - self.assert_shapes(batch, batch_idx, "train") - loss = self.forward(batch) - return loss - - def forward(self, batch): - out = self.layer(batch[1][0]).squeeze(-1) - loss = self.loss_fn(out, batch[0]) - return loss - - def assert_shapes(self, batch, batch_idx, step): - # Test the shape of the labels - this_shape = list(batch[0].shape) - true_shape = [1, self.batch_size] - assert ( - this_shape == true_shape - ), f"Shape of the labels is `{this_shape}` but should be {true_shape}" - - # Test the shape of the first feature - this_shape = list(batch[1][0].shape) - true_shape = [1, self.batch_size, self.node_feat_size] - assert ( - this_shape == true_shape - ), f"Shape of the feature 0 is `{this_shape}` but should be {true_shape}" - - # Test the shape of the second feature - this_shape = list(batch[1][1].shape) - true_shape = [1, self.batch_size, self.edge_feat_size] - assert ( - this_shape == true_shape - ), f"Shape of the feature 0 is `{this_shape}` but should be {true_shape}" - - def configure_optimizers(self): - return torch.optim.Adam(self.parameters(), lr=1e-3) - - class DatasetTesting(torch.utils.data.Dataset): - # Create a simple dataset for testing the Lightning integration - def __init__(self, labels, node_features, edge_features): - self.labels = labels - self.node_features = node_features - self.edge_features = edge_features - - def __len__(self): - return len(self.labels) - - def __getitem__(self, idx): - # [label, [feat1, feat2]] - return [self.labels[idx], [self.node_features[idx], self.edge_features[idx]]] - - # @pytest.mark.skip - def test_poptorch_simple_deviceiterations_gradient_accumulation(self): - """ - Test a simple version of the device-iterations and gradient accumulation - to make sure that the dataloader and models handle them correcly. - """ - from lightning_graphcore import IPUStrategy - - - with patch("poptorch.ipuHardwareIsAvailable", return_value=True): - with patch("lightning_graphcore.accelerator._IPU_AVAILABLE", new=True): - import poptorch - - assert poptorch.ipuHardwareIsAvailable() - from lightning_graphcore.accelerator import _IPU_AVAILABLE - - assert _IPU_AVAILABLE is True - - # Initialize constants - gradient_accumulation = 2 - device_iterations = 3 - batch_size = 5 - num_replicate = 7 - node_feat_size = 11 - edge_feat_size = 13 - - # Initialize the batch info and poptorch options - opts = poptorch.Options() - opts.useIpuModel(True) - opts.deviceIterations(device_iterations) - training_opts = deepcopy(opts) - training_opts.Training.gradientAccumulation(gradient_accumulation) - inference_opts = deepcopy(opts) - - # Initialize the dataset - num_batch = device_iterations * gradient_accumulation * num_replicate - data_size = num_batch * batch_size - dataset = self.TestDataset( - labels=np.random.rand(data_size).astype(np.float32), - node_features=[ - np.random.rand(node_feat_size).astype(np.float32) for ii in range(data_size) - ], - edge_features=[ - np.random.rand(edge_feat_size).astype(np.float32) for ii in range(data_size) - ], - ) - - # Initialize the dataloader - train_dataloader = poptorch.DataLoader( - options=training_opts, - dataset=deepcopy(dataset), - batch_size=batch_size, - collate_fn=partial(global_batch_collator, batch_size), - ) - - val_dataloader = poptorch.DataLoader( - options=inference_opts, - dataset=deepcopy(dataset), - batch_size=batch_size, - collate_fn=partial(global_batch_collator, batch_size), - ) - - # Build the model, and run it on "IPU" - model = self.TestSimpleLightning(batch_size, node_feat_size, edge_feat_size, num_batch) - - strategy = IPUStrategy( - training_opts=training_opts, inference_opts=inference_opts, autoreport=True - ) - trainer = Trainer( - logger=True, - enable_checkpointing=False, - max_epochs=2, - strategy=strategy, - num_sanity_val_steps=0, - accelerator="ipu", - devices=1, - ) - trainer.fit(model=model, train_dataloaders=train_dataloader, val_dataloaders=val_dataloader) - - @pytest.mark.skip - def test_poptorch_graphium_deviceiterations_gradient_accumulation_full(self): - """ - Test the device-iterations and gradient accumulation in a way - that is very similar to the Graphium code - to make sure that the dataloader and models handle them correcly. - """ - with patch("poptorch.ipuHardwareIsAvailable", return_value=True): - with patch("lightning_graphcore.accelerator._IPU_AVAILABLE", new=True): - try: - import poptorch - except Exception as e: - warn(f"Skipping this test because poptorch is not available.\n{e}") - return - - from lightning_graphcore import IPUStrategy - import lightning_graphcore - - # Current library imports - from graphium.config._loader import ( - load_datamodule, - load_metrics, - load_architecture, - load_accelerator, - load_predictor, - load_trainer, - ) - from graphium.utils.safe_run import SafeRun - - # Simplified testing config - reflecting the toymix requirements - CONFIG_FILE = "tests/config_test_ipu_dataloader_multitask.yaml" - with open(CONFIG_FILE, "r") as f: - cfg = yaml.safe_load(f) - - cfg, accelerator = load_accelerator(cfg) - - # Load the datamodule, and prepare the data - datamodule = load_datamodule(cfg, accelerator_type=accelerator) - datamodule.prepare_data() - metrics = load_metrics(cfg) - model_class, model_kwargs = load_architecture(cfg, in_dims=datamodule.in_dims) - # datamodule.setup() - predictor = load_predictor( - cfg, - model_class, - model_kwargs, - metrics, - datamodule.get_task_levels(), - accelerator, - datamodule.featurization, - datamodule.task_norms, - ) - assert poptorch.ipuHardwareIsAvailable() - trainer = load_trainer(cfg, "test", accelerator, "date_time_suffix") - # Run the model training - with SafeRun( - name="TRAINING", raise_error=cfg["constants"]["raise_train_error"], verbose=True - ): - trainer.fit(model=predictor, datamodule=datamodule) - - -if __name__ == "__main__": - ut.main() diff --git a/tests/test_ipu_losses.py b/tests/test_ipu_losses.py deleted file mode 100644 index cb18eee47..000000000 --- a/tests/test_ipu_losses.py +++ /dev/null @@ -1,172 +0,0 @@ -""" --------------------------------------------------------------------------------- -Copyright (c) 2023 Valence Labs, Recursion Pharmaceuticals and Graphcore Limited. - -Use of this software is subject to the terms and conditions outlined in the LICENSE file. -Unauthorized modification, distribution, or use is prohibited. Provided 'as is' without -warranties of any kind. - -Valence Labs, Recursion Pharmaceuticals and Graphcore Limited are not liable for any damages arising from its use. -Refer to the LICENSE file for the full terms and conditions. --------------------------------------------------------------------------------- -""" - - -import unittest as ut -import torch -from torch.nn import BCELoss, MSELoss, L1Loss, BCEWithLogitsLoss -from copy import deepcopy -import pytest - -from graphium.ipu.ipu_losses import BCELossIPU, MSELossIPU, L1LossIPU, BCEWithLogitsLossIPU, HybridCELossIPU -from graphium.trainer.losses import HybridCELoss - - -@pytest.mark.ipu -class test_Losses(ut.TestCase): - torch.manual_seed(42) - preds = torch.rand((100, 10), dtype=torch.float32) - target = torch.rand((100, 10), dtype=torch.float32) - - th = 0.7 - nan_th = 0.2 - preds_greater = preds > th - target_greater = (target > th).to(torch.float32) - target_greater_nan = deepcopy(target_greater) - is_nan = target < nan_th - target_greater_nan[target < nan_th] = torch.nan - target_nan = deepcopy(target) - target_nan[target < nan_th] = torch.nan - - def test_bce(self): - preds = deepcopy(self.preds) - target = deepcopy(self.target_greater) - target_nan = deepcopy(self.target_greater_nan) - - # Regular loss - loss_true = BCELoss()(preds, target) - loss_ipu = BCELossIPU()(preds, target) - self.assertFalse(loss_true.isnan(), "Regular BCELoss is NaN") - self.assertAlmostEqual( - loss_true.item(), loss_ipu.item(), places=6, msg="Regular BCELoss is different" - ) - - # Weighted loss - weight = torch.rand(preds.shape[1], dtype=torch.float32) - loss_true = BCELoss(weight=weight)(preds, target) - loss_ipu = BCELossIPU(weight=weight)(preds, target) - self.assertFalse(loss_true.isnan(), "Regular BCELoss is NaN") - self.assertAlmostEqual(loss_true.item(), loss_ipu.item(), msg="Weighted BCELoss is different") - - # Regular loss with NaNs in target - not_nan = ~target_nan.isnan() - loss_true = BCELoss()(preds[not_nan], target[not_nan]) - loss_ipu = BCELossIPU()(preds, target_nan) - self.assertFalse(loss_true.isnan(), "Regular BCELoss with target_nan is NaN") - self.assertFalse(loss_ipu.isnan(), "Regular BCELossIPU with target_nan is NaN") - self.assertAlmostEqual( - loss_true.item(), loss_ipu.item(), places=6, msg="Regular BCELoss with NaN is different" - ) - - # Weighted loss with NaNs in target - not_nan = ~target_nan.isnan() - weight = torch.rand(preds.shape, dtype=torch.float32) - loss_true = BCELoss(weight=weight[not_nan])(preds[not_nan], target_nan[not_nan]) - loss_ipu = BCELossIPU(weight=weight)(preds, target_nan) - self.assertFalse(loss_true.isnan(), "Weighted BCELoss with target_nan is NaN") - self.assertFalse(loss_ipu.isnan(), "Weighted BCELossIPU with target_nan is NaN") - self.assertAlmostEqual( - loss_true.item(), loss_ipu.item(), places=6, msg="Weighted BCELoss with NaN is different" - ) - - def test_mse(self): - preds = deepcopy(self.preds) - target = deepcopy(self.target) - target_nan = deepcopy(self.target_nan) - - # Regular loss - loss_true = MSELoss()(preds, target) - loss_ipu = MSELossIPU()(preds, target) - self.assertFalse(loss_true.isnan(), "Regular MSELoss is NaN") - self.assertAlmostEqual( - loss_true.item(), loss_ipu.item(), places=6, msg="Regular MSELoss is different" - ) - - # Regular loss with NaNs in target - not_nan = ~target_nan.isnan() - loss_true = MSELoss()(preds[not_nan], target[not_nan]) - loss_ipu = MSELossIPU()(preds, target_nan) - self.assertFalse(loss_true.isnan(), "Regular MSELoss with target_nan is NaN") - self.assertFalse(loss_ipu.isnan(), "Regular MSELossIPU with target_nan is NaN") - self.assertAlmostEqual( - loss_true.item(), loss_ipu.item(), places=6, msg="Regular MSELoss with NaN is different" - ) - - def test_l1(self): - preds = deepcopy(self.preds) - target = deepcopy(self.target) - target_nan = deepcopy(self.target_nan) - - # Regular loss - loss_true = L1Loss()(preds, target) - loss_ipu = L1LossIPU()(preds, target) - self.assertFalse(loss_true.isnan(), "Regular MAELoss is NaN") - self.assertAlmostEqual( - loss_true.item(), loss_ipu.item(), places=6, msg="Regular MAELoss is different" - ) - - # Regular loss with NaNs in target - not_nan = ~target_nan.isnan() - loss_true = L1Loss()(preds[not_nan], target[not_nan]) - loss_ipu = L1LossIPU()(preds, target_nan) - self.assertFalse(loss_true.isnan(), "Regular MAELoss with target_nan is NaN") - self.assertFalse(loss_ipu.isnan(), "Regular MAELossIPU with target_nan is NaN") - self.assertAlmostEqual( - loss_true.item(), loss_ipu.item(), places=6, msg="Regular MAELoss with NaN is different" - ) - - def test_bce_logits(self): - preds = deepcopy(self.preds) - target = deepcopy(self.target_greater) - target_nan = deepcopy(self.target_greater_nan) - - # Regular loss - loss_true = BCEWithLogitsLoss()(preds, target) - loss_ipu = BCEWithLogitsLossIPU()(preds, target) - self.assertFalse(loss_true.isnan(), "Regular BCEWithLogitsLoss is NaN") - self.assertAlmostEqual( - loss_true.item(), loss_ipu.item(), places=6, msg="Regular BCEWithLogitsLoss is different" - ) - - # Weighted loss - weight = torch.rand(preds.shape[1], dtype=torch.float32) - loss_true = BCEWithLogitsLoss(weight=weight)(preds, target) - loss_ipu = BCEWithLogitsLossIPU(weight=weight)(preds, target) - self.assertFalse(loss_true.isnan(), "Regular BCEWithLogitsLoss is NaN") - self.assertAlmostEqual( - loss_true.item(), loss_ipu.item(), msg="Weighted BCEWithLogitsLoss is different" - ) - - # Regular loss with NaNs in target - not_nan = ~target_nan.isnan() - loss_true = BCEWithLogitsLoss()(preds[not_nan], target[not_nan]) - loss_ipu = BCEWithLogitsLossIPU()(preds, target_nan) - self.assertFalse(loss_true.isnan(), "Regular test_bce_logits with target_nan is NaN") - self.assertFalse(loss_ipu.isnan(), "Regular test_bce_logits with target_nan is NaN") - self.assertAlmostEqual( - loss_true.item(), loss_ipu.item(), places=6, msg="Regular BCELoss with NaN is different" - ) - - # Weighted loss with NaNs in target - not_nan = ~target_nan.isnan() - weight = torch.rand(preds.shape, dtype=torch.float32) - loss_true = BCEWithLogitsLoss(weight=weight[not_nan])(preds[not_nan], target_nan[not_nan]) - loss_ipu = BCEWithLogitsLossIPU(weight=weight)(preds, target_nan) - self.assertFalse(loss_true.isnan(), "Weighted test_bce_logits with target_nan is NaN") - self.assertFalse(loss_ipu.isnan(), "Weighted test_bce_logits with target_nan is NaN") - self.assertAlmostEqual( - loss_true.item(), - loss_ipu.item(), - places=6, - msg="Weighted BCEWithLogitsLoss with NaN is different", - ) diff --git a/tests/test_ipu_options.py b/tests/test_ipu_options.py deleted file mode 100644 index c3cc9aa3e..000000000 --- a/tests/test_ipu_options.py +++ /dev/null @@ -1,149 +0,0 @@ -""" --------------------------------------------------------------------------------- -Copyright (c) 2023 Valence Labs, Recursion Pharmaceuticals and Graphcore Limited. - -Use of this software is subject to the terms and conditions outlined in the LICENSE file. -Unauthorized modification, distribution, or use is prohibited. Provided 'as is' without -warranties of any kind. - -Valence Labs, Recursion Pharmaceuticals and Graphcore Limited are not liable for any damages arising from its use. -Refer to the LICENSE file for the full terms and conditions. --------------------------------------------------------------------------------- -""" - - -import pytest -from graphium.config._loader import _get_ipu_opts, load_ipu_options -from graphium.ipu.ipu_utils import ipu_options_list_to_file - -import tempfile -from typing import Optional, List -import os - -CONFIG_EXTRACT = { - "trainer": {"trainer": {"accumulate_grad_batches": 10}}, - "accelerator": { - "type": "ipu", - "config_override": { - "datamodule": { - "args": { - "ipu_dataloader_training_opts": { - "mode": "async", - "max_num_nodes_per_graph": 44, - "max_num_edges_per_graph": 80, - }, - "ipu_dataloader_inference_opts": { - "mode": "async", - "max_num_nodes_per_graph": 44, - "max_num_edges_per_graph": 80, - }, - "batch_size_training": 50, - "batch_size_inference": 50, - } - }, - "predictor": {"optim_kwargs": {"loss_scaling": 1024}}, - "trainer": {"trainer": {"precision": 16, "accumulate_grad_batches": 4}}, - }, - "ipu_config": [ - "deviceIterations(5)", - "replicationFactor(16)", - "TensorLocations.numIOTiles(128)", - '_Popart.set("defaultBufferingDepth", 128)', - "Precision.enableStochasticRounding(True)", - ], - "ipu_inference_config": [ - "deviceIterations(1)", - "replicationFactor(4)", - "TensorLocations.numIOTiles(32)", - '_Popart.set("defaultBufferingDepth", 16)', - "Precision.enableStochasticRounding(True)", - ], - }, -} - - -@pytest.mark.ipu -def test_ipu_options(): - try: - import poptorch - - ipu_opts, ipu_inference_opts = _get_ipu_opts(CONFIG_EXTRACT) - - # Define the expected IPU options for comparison - expected_ipu_opts = [ - "deviceIterations(5)", - "replicationFactor(16)", - "TensorLocations.numIOTiles(128)", - '_Popart.set("defaultBufferingDepth", 128)', - "Precision.enableStochasticRounding(True)", - ] - expected_ipu_inference_opts = [ - "deviceIterations(1)", - "replicationFactor(4)", - "TensorLocations.numIOTiles(32)", - '_Popart.set("defaultBufferingDepth", 16)', - "Precision.enableStochasticRounding(True)", - ] - - # Test the _get_ipu_opts method - ipu_opts, ipu_inference_opts = _get_ipu_opts(CONFIG_EXTRACT) - assert ipu_opts == expected_ipu_opts, f"Expected {expected_ipu_opts}, but got {ipu_opts}" - assert ( - ipu_inference_opts == expected_ipu_inference_opts - ), f"Expected {expected_ipu_inference_opts}, but got {ipu_inference_opts}" - - # Test the load_ipu_options method - ipu_training_opts, ipu_inference_opts = load_ipu_options( - ipu_opts=ipu_opts, - seed=42, - model_name="test_model", - gradient_accumulation=CONFIG_EXTRACT["trainer"]["trainer"].get("accumulate_grad_batches", None), - ipu_inference_opts=ipu_inference_opts, - ) - - # Ensure that the options objects are not None - assert ipu_training_opts is not None, "Expected ipu_training_opts not to be None" - assert ipu_inference_opts is not None, "Expected ipu_inference_opts not to be None" - - # Test the properties of the options objects - assert ( - ipu_training_opts.replication_factor == 16 - ), "Expected replication_factor of ipu_training_opts to be 16" - assert ( - ipu_inference_opts.replication_factor == 4 - ), "Expected replication_factor of ipu_inference_opts to be 4" - assert ipu_training_opts._popart, "Expected _popart of ipu_training_opts to be True" - assert ipu_inference_opts._popart, "Expected _popart of ipu_inference_opts to be True" - - except ImportError: - pytest.skip("Skipping this test because poptorch is not available") - - -@pytest.mark.ipu -def test_ipu_options_list_to_file(): - # Define a list of IPU options - ipu_options = [ - "deviceIterations(5)", - "replicationFactor(16)", - "TensorLocations.numIOTiles(128)", - '_Popart.set("defaultBufferingDepth", 128)', - "Precision.enableStochasticRounding(True)", - ] - - # Call the function with the list of IPU options - tmp_file = ipu_options_list_to_file(ipu_options) - - # Check that the function returns a temporary file object - assert isinstance(tmp_file, tempfile._TemporaryFileWrapper) - - # Check that the temporary file exists - assert os.path.exists(tmp_file.name) - - # Check the contents of the temporary file - with open(tmp_file.name, "r") as f: - contents = f.read().splitlines() - assert contents == ipu_options - - # Check the behavior when the input is None - tmp_file = ipu_options_list_to_file(None) - assert tmp_file is None diff --git a/tests/test_ipu_poptorch.py b/tests/test_ipu_poptorch.py deleted file mode 100644 index 4f951d504..000000000 --- a/tests/test_ipu_poptorch.py +++ /dev/null @@ -1,29 +0,0 @@ -""" --------------------------------------------------------------------------------- -Copyright (c) 2023 Valence Labs, Recursion Pharmaceuticals and Graphcore Limited. - -Use of this software is subject to the terms and conditions outlined in the LICENSE file. -Unauthorized modification, distribution, or use is prohibited. Provided 'as is' without -warranties of any kind. - -Valence Labs, Recursion Pharmaceuticals and Graphcore Limited are not liable for any damages arising from its use. -Refer to the LICENSE file for the full terms and conditions. --------------------------------------------------------------------------------- -""" - - -import pytest - - -@pytest.mark.ipu -def test_poptorch(): - # Run this test only if poptorch is available - # Primarily to test the install and SDK is correctly activated - try: - import poptorch - - opts = poptorch.Options() - - except ImportError: - raise ImportError - assert True From 49e9984889ebd06b7dbcb5b3a42670dedacc2494 Mon Sep 17 00:00:00 2001 From: DomInvivo Date: Fri, 6 Sep 2024 21:27:39 -0400 Subject: [PATCH 129/175] More removal of ipu --- .../nn/architectures/global_architectures.py | 44 +-- graphium/nn/base_layers.py | 20 +- .../encoders/gaussian_kernel_pos_encoder.py | 6 +- graphium/nn/pyg_layers/utils.py | 9 +- graphium/utils/spaces.py | 7 - tests/config_test_ipu_dataloader.yaml | 300 ---------------- .../config_test_ipu_dataloader_multitask.yaml | 336 ------------------ 7 files changed, 9 insertions(+), 713 deletions(-) delete mode 100644 tests/config_test_ipu_dataloader.yaml delete mode 100644 tests/config_test_ipu_dataloader_multitask.yaml diff --git a/graphium/nn/architectures/global_architectures.py b/graphium/nn/architectures/global_architectures.py index db28adac4..52bd132c0 100644 --- a/graphium/nn/architectures/global_architectures.py +++ b/graphium/nn/architectures/global_architectures.py @@ -40,7 +40,6 @@ ResidualConnectionRandom, ) from graphium.nn.utils import MupMixin -from graphium.ipu.ipu_utils import import_poptorch, is_running_on_ipu poptorch = import_poptorch(raise_error=False) @@ -1476,8 +1475,6 @@ def __init__( if accelerator_kwargs is not None: accelerator = accelerator_kwargs["_accelerator"] - if accelerator == "ipu": - self._apply_ipu_options(accelerator_kwargs) self._check_bad_arguments() @@ -1529,45 +1526,6 @@ def _check_bad_arguments(self): f"Task heads have graph level tasks {', '.join(graph_level_tasks)}, but pooling is none." ) - def _apply_ipu_options(self, ipu_kwargs): - gnn_layers_per_ipu = ipu_kwargs.get("gnn_layers_per_ipu") - self._apply_ipu_pipeline_split(gnn_layers_per_ipu) - - def _apply_ipu_pipeline_split(self, gnn_layers_per_ipu): - r""" - Apply pipeline split from accelerator options if applicable - """ - - if gnn_layers_per_ipu is None: - return - - if not isinstance(gnn_layers_per_ipu, collections.abc.Sequence): - raise ValueError("gnn_layers_per_ipu must be a Sequence (e.g. a list)") - - valid_ipu_pipeline_lengths = [1, 2, 4, 8, 16] - pipeline_length = len(gnn_layers_per_ipu) - - if pipeline_length not in valid_ipu_pipeline_lengths: - raise ValueError( - f"Length of gnn_layers_per_ipu must be one of {valid_ipu_pipeline_lengths}, " - f"got {gnn_layers_per_ipu} of length {pipeline_length} instead" - ) - - model_depth = len(self.gnn.layers) - - if sum(gnn_layers_per_ipu) != model_depth: - raise ValueError( - f"The values in gnn_layers_per_ipu must add up to the depth of the model, " - f"got {gnn_layers_per_ipu} with total {sum(gnn_layers_per_ipu)} vs model depth " - f"of {model_depth}" - ) - - begin_block_layer_indices = [sum(gnn_layers_per_ipu[:i]) for i in range(1, pipeline_length)] - - for begin_block_layer_index, ipu_id in zip(begin_block_layer_indices, range(1, pipeline_length)): - self.gnn.layers[begin_block_layer_index] = poptorch.BeginBlock( - self.gnn.layers[begin_block_layer_index], ipu_id=ipu_id - ) def _enable_readout_cache(self, module_filter: Optional[Union[str, List[str]]]): """ @@ -1934,7 +1892,7 @@ def forward(self, g: Batch): node_feats=g["feat"], batch=g.batch, max_num_nodes=self.max_num_nodes_per_graph, - drop_nodes_last_graph=is_running_on_ipu(), + drop_nodes_last_graph=False, ) # Check if at least one graph-level task is present if self.task_level == "graph": diff --git a/graphium/nn/base_layers.py b/graphium/nn/base_layers.py index a88e6ad9b..84954378e 100644 --- a/graphium/nn/base_layers.py +++ b/graphium/nn/base_layers.py @@ -26,7 +26,6 @@ from mup import set_base_shapes, MuReadout from torch.nn.functional import linear -from graphium.ipu.ipu_utils import is_running_on_ipu SUPPORTED_ACTIVATION_MAP = { "ReLU", @@ -243,9 +242,7 @@ class MuReadoutGraphium(MuReadout): Not quite a drop-in replacement for `mup.MuReadout` - you need to specify `base_width`. - Set `base_width` to width of base model passed to `mup.set_base_shapes` - to get same results on IPU and CPU. Should still "work" with any other - value, but won't give the same results as CPU + Set `base_width` to width of base model passed to `mup.set_base_shapes`. """ def __init__(self, in_features, *args, **kwargs): @@ -725,32 +722,23 @@ def forward( Parameters: input: `torch.Tensor[total_num_nodes, hidden]` batch: batch attribute of the batch object, batch.batch - batch_size: The batch size. Must be provided when working on IPU + batch_size: The batch size. Returns: torch.Tensor: `torch.Tensor[total_num_nodes, hidde]` """ - on_ipu = is_running_on_ipu() if self.drop_rate > 0: keep_prob = 1 - self.drop_rate # Parse the batch size if batch_size is None: - if on_ipu: - raise ValueError( - "When using the IPU the batch size must be " - "provided during compilation instead of determined at runtime" - ) - else: - batch_size = int(batch_idx.max()) + 1 + batch_size = int(batch_idx.max()) + 1 # mask shape: [num_graphs, 1] mask = input.new_empty(batch_size, 1).bernoulli_(keep_prob) - # if on_ipu, the last graph is a padded fake graph - if on_ipu: - mask[-1] = 0 + # using gather to extend mask to [total_num_nodes, 1] node_mask = mask[batch_idx] if keep_prob == 0: diff --git a/graphium/nn/encoders/gaussian_kernel_pos_encoder.py b/graphium/nn/encoders/gaussian_kernel_pos_encoder.py index 44ddd5578..19ff813ac 100644 --- a/graphium/nn/encoders/gaussian_kernel_pos_encoder.py +++ b/graphium/nn/encoders/gaussian_kernel_pos_encoder.py @@ -2,7 +2,6 @@ from torch_geometric.data import Batch from graphium.nn.pyg_layers.utils import PreprocessPositions -from graphium.ipu.ipu_utils import is_running_on_ipu from graphium.nn.encoders.base_encoder import BaseEncoder @@ -116,13 +115,10 @@ def forward(self, batch: Batch, key_prefix: Optional[str] = None) -> Dict[str, A """ input_keys = self.parse_input_keys_with_prefix(key_prefix) - on_ipu = is_running_on_ipu() max_num_nodes_per_graph = None - if on_ipu: - max_num_nodes_per_graph = self.max_num_nodes_per_graph attn_bias_3d, node_feature_3d = self.preprocess_3d_positions( - batch, max_num_nodes_per_graph, on_ipu, positions_3d_key=input_keys[0] + batch, max_num_nodes_per_graph, positions_3d_key=input_keys[0] ) # Return `attn_bias_3d` if the key starts with 'nodepair_' diff --git a/graphium/nn/pyg_layers/utils.py b/graphium/nn/pyg_layers/utils.py index 83dc4f737..886788940 100644 --- a/graphium/nn/pyg_layers/utils.py +++ b/graphium/nn/pyg_layers/utils.py @@ -74,7 +74,7 @@ def __init__( self.node_proj = nn.Linear(self.num_kernel, self.embed_dim) def forward( - self, batch: Batch, max_num_nodes_per_graph: int, on_ipu: bool, positions_3d_key: str + self, batch: Batch, max_num_nodes_per_graph: int, positions_3d_key: str ) -> Tuple[Tensor, Tensor]: r""" Inputs: @@ -82,8 +82,6 @@ def forward( Batch object. max_num_nodes_per_graph: Maximum number of nodes per graph. - on_ipu: - If model rus on IPU. positions_3d_key: The key of the pyg graph object that contains the 3D positions. @@ -92,8 +90,7 @@ def forward( pos = batch[positions_3d_key] if self.first_normalization is not None: pos = self.first_normalization(pos) - batch_size = None if pos.device.type != "ipu" else batch.graph_is_true.shape[0] - # batch_size = None if batch.feat.device.type != "ipu" else batch.graph_is_true.shape[0] #[Andy] batch.feat is only available after passing through layers, not a good attribute to check + batch_size = None # pos: [batch, nodes, 3] # padding_mask: [batch, nodes] # idx: [totoal_nodes] @@ -102,7 +99,7 @@ def forward( batch=batch.batch, batch_size=batch_size, max_num_nodes_per_graph=max_num_nodes_per_graph, - drop_nodes_last_graph=on_ipu, + drop_nodes_last_graph=False, ) # check nan with the pos from to_dense_batch, # and generate mask. 1 for nan, 0 for other values. diff --git a/graphium/utils/spaces.py b/graphium/utils/spaces.py index c7b6a7ac9..0ed17c0d7 100644 --- a/graphium/utils/spaces.py +++ b/graphium/utils/spaces.py @@ -22,7 +22,6 @@ import graphium.nn.architectures as Architectures import graphium.utils.custom_lr as CustomLR import graphium.data.datamodule as Datamodules -import graphium.ipu.ipu_losses as IPULosses import graphium.nn.pyg_layers as PygLayers import graphium.nn.residual_connections as Residuals import graphium.nn.encoders as Encoders @@ -78,12 +77,6 @@ "l1": torch.nn.L1Loss, "mae": torch.nn.L1Loss, "hybrid_ce": Losses.HybridCELoss, - "bce_ipu": IPULosses.BCELossIPU, - "bce_logits_ipu": IPULosses.BCEWithLogitsLossIPU, - "mse_ipu": IPULosses.MSELossIPU, - "mae_ipu": IPULosses.L1LossIPU, - "l1_ipu": IPULosses.L1LossIPU, - "hybrid_ce_ipu": IPULosses.HybridCELossIPU, } diff --git a/tests/config_test_ipu_dataloader.yaml b/tests/config_test_ipu_dataloader.yaml deleted file mode 100644 index 0ffef963d..000000000 --- a/tests/config_test_ipu_dataloader.yaml +++ /dev/null @@ -1,300 +0,0 @@ -# Testing the multitask pipeline with the QM9 dataset on IPU, by splitting it up into three tasks: homo, alpha and cv. -constants: - name: &name test #qm9_full - seed: &seed 42 - raise_train_error: true # Whether the code should raise an error if it crashes during training - -accelerator: - type: ipu # cpu or ipu or gpu - config_override: - datamodule: - args: - ipu_dataloader_training_opts: - mode: async - max_num_nodes_per_graph: 20 # train max nodes: 20, max_edges: 54 - max_num_edges_per_graph: 60 - ipu_dataloader_inference_opts: - mode: async - max_num_nodes_per_graph: 16 # valid max nodes: 51, max_edges: 118 - max_num_edges_per_graph: 120 - # Data handling-related - batch_size_training: 6 - batch_size_inference: 6 - trainer: - trainer: - precision: 16 - accumulate_grad_batches: 4 - - ipu_config: - - deviceIterations(2) - -datamodule: - module_type: "MultitaskFromSmilesDataModule" - args: # Matches that in the test_multitask_datamodule.py case. - task_specific_args: # To be replaced by a new class "DatasetParams" - homo: - df: null - df_path: &df_path https://storage.googleapis.com/datasets-public-research/PCQM4M/cxsmiles/pcqm4mv2-2k-lumo-alpha.csv - smiles_col: "cxsmiles" - label_cols: ["homo_lumo_gap", "lumo"] - split_val: 0.2 - split_test: 0.2 - seed: *seed - splits_path: null # This may not always be provided - sample_size: null # This may not always be provided - idx_col: null # This may not always be provided - weights_col: null # This may not always be provided - weights_type: null # This may not always be provided - task_level: graph - alpha: - df: null - df_path: *df_path - smiles_col: "cxsmiles" - label_cols: ["alpha"] - split_val: 0.2 - split_test: 0.2 - seed: *seed - splits_path: null # This may not always be provided - sample_size: null # This may not always be provided - idx_col: null # This may not always be provided - weights_col: null # This may not always be provided - weights_type: null # This may not always be provided - task_level: graph - # Featurization - featurization: - atom_property_list_onehot: [atomic-number, valence] - atom_property_list_float: [mass, electronegativity, in-ring] - edge_property_list: [bond-type-onehot, stereo, in-ring] - conformer_property_list: [positions_3d] - add_self_loop: False - explicit_H: False - use_bonds_weights: False - pos_encoding_as_features: # encoder dropout 0.18 - pos_types: - node_laplacian_eigvec: - pos_type: laplacian_eigvec - pos_level: node - num_pos: 5 - normalization: "none" - disconnected_comp: True - node_laplacian_eigval: - pos_type: laplacian_eigval - pos_level: node - num_pos: 5 - normalization: "none" - disconnected_comp: True - rw_return_probs: - pos_type: rw_return_probs - pos_level: node - ksteps: [4, 8] - edge_rw_transition_probs: - pos_type: rw_transition_probs - pos_level: edge - ksteps: [2, 4] - nodepair_rw_return_probs: - pos_type: rw_return_probs - pos_level: nodepair - ksteps: [4] - electrostatic: - pos_type: electrostatic - pos_level: node - edge_commute: - pos_type: commute - pos_level: edge - nodepair_graphormer: - pos_type: graphormer - pos_level: nodepair - - num_workers: -1 - -architecture: - model_type: FullGraphMultiTaskNetwork - mup_base_path: null - - pre_nn: # Set as null to avoid a pre-nn network - out_dim: 16 - hidden_dims: 16 - depth: 1 - activation: relu - last_activation: none - dropout: &dropout 0.1 - normalization: &normalization batch_norm - last_normalization: *normalization - residual_type: none - - pre_nn_edges: # Set as null to avoid a pre-nn network - out_dim: 16 - hidden_dims: 16 - depth: 1 - activation: relu - last_activation: none - dropout: *dropout - normalization: *normalization - last_normalization: *normalization - residual_type: none - - pe_encoders: - out_dim: &pe_out_dim 16 - edge_out_dim: &edge_pe_out_dim 8 - pool: "sum" #"mean" "max" - last_norm: None #"batch_norm", "layer_norm" - max_num_nodes_per_graph: 30 - encoders: - emb_la_pos: - encoder_type: "laplacian_pe" - input_keys: ["laplacian_eigvec", "laplacian_eigval"] - output_keys: ["feat"] - hidden_dim: 32 - model_type: 'DeepSet' #'Transformer' or 'DeepSet' - num_layers: 2 - num_layers_post: 1 # Num. layers to apply after pooling - dropout: 0.1 - first_normalization: "none" #"batch_norm" or "layer_norm" - emb_rwse: - encoder_type: "mlp" - input_keys: ["rw_return_probs"] - output_keys: ["feat"] - hidden_dim: 32 - num_layers: 2 - dropout: 0.1 - normalization: "layer_norm" #"batch_norm" or "layer_norm" - first_normalization: "layer_norm" #"batch_norm" or "layer_norm" - emb_electrostatic: - encoder_type: "mlp" - input_keys: ["electrostatic"] - output_keys: ["feat"] - hidden_dim: 32 - num_layers: 1 - dropout: 0.1 - normalization: "layer_norm" #"batch_norm" or "layer_norm" - first_normalization: "layer_norm" #"batch_norm" or "layer_norm" - emb_edge_rwse: - encoder_type: "mlp" - input_keys: ["edge_rw_transition_probs"] - output_keys: ["edge_feat"] - hidden_dim: 32 - num_layers: 1 - dropout: 0.1 - normalization: "layer_norm" #"batch_norm" or "layer_norm" - emb_edge_pes: - encoder_type: "cat_mlp" - input_keys: ["edge_rw_transition_probs", "edge_commute"] - output_keys: ["edge_feat"] - hidden_dim: 32 - num_layers: 1 - dropout: 0.1 - normalization: "layer_norm" #"batch_norm" or "layer_norm" - gaussian_pos: - encoder_type: "gaussian_kernel" - input_keys: ["positions_3d"] - output_keys: ["feat", "nodepair_gaussian_bias_3d"] - num_heads: &num_heads 2 - num_layers: 2 - embed_dim: *pe_out_dim - use_input_keys_prefix: False - - gnn: # Set as null to avoid a post-nn network - out_dim: 8 - hidden_dims: 16 - depth: 2 - activation: relu - last_activation: none - dropout: *dropout - normalization: *normalization - last_normalization: *normalization - residual_type: simple - virtual_node: 'none' - layer_type: 'pyg:gps' #pyg:gine #'pyg:gps' # pyg:gated-gcn, pyg:gine,pyg:gps - layer_kwargs: # Parameters for the model itself. You could define dropout_attn: 0.1 - mpnn_type: 'pyg:gine' - mpnn_kwargs: null - #out_dim_edges: 10 - attn_type: "none" # "full-attention", "none" - attn_kwargs: null - - graph_output_nn: - graph: - pooling: [sum, mean, max] - out_dim: 8 - hidden_dims: 8 - depth: 1 - activation: relu - last_activation: none - dropout: *dropout - normalization: *normalization - last_normalization: "none" - residual_type: none - - task_heads: - homo: - out_dim: 2 - hidden_dims: 8 - depth: 1 # Not needed if we have hidden_dims - activation: relu - last_activation: none - dropout: *dropout - normalization: *normalization - last_normalization: "none" - residual_type: none - task_level: graph - alpha: - out_dim: 1 - hidden_dims: 8 - depth: 1 # Not needed if we have hidden_dims - activation: relu - last_activation: none - dropout: *dropout - normalization: *normalization - last_normalization: "none" - residual_type: none - task_level: graph - cv: - out_dim: 1 - hidden_dims: 8 - depth: 2 # Not needed if we have hidden_dims - activation: relu - last_activation: none - dropout: *dropout - normalization: *normalization - last_normalization: "none" - residual_type: none - task_level: graph - -#Task-specific -predictor: - metrics_on_progress_bar: - homo: ["mae"] - alpha: ["mae"] - loss_fun: - homo: mse - alpha: mse - random_seed: *seed - optim_kwargs: - lr: 1.e-3 - target_nan_mask: null - -# Task-specific -metrics: - homo: - - name: mae - metric: mae - threshold_kwargs: null - target_nan_mask: null - alpha: - - name: mae - metric: mae - threshold_kwargs: null - -trainer: - seed: *seed - logger: - save_dir: logs/QM9 - name: *name - model_checkpoint: - dirpath: models_checkpoints/QM9/ - filename: *name - save_top_k: 1 - every_n_epochs: 1 - trainer: - max_epochs: 2 - min_epochs: 1 diff --git a/tests/config_test_ipu_dataloader_multitask.yaml b/tests/config_test_ipu_dataloader_multitask.yaml deleted file mode 100644 index 4c41cfc70..000000000 --- a/tests/config_test_ipu_dataloader_multitask.yaml +++ /dev/null @@ -1,336 +0,0 @@ -# Testing the gcn model with the PCQMv2 dataset on IPU. -constants: - name: &name neurips2023_small_data_gcn - seed: &seed 42 - raise_train_error: true # Whether the code should raise an error if it crashes during training - -accelerator: - type: ipu # cpu or ipu or gpu - config_override: - datamodule: - args: - ipu_dataloader_training_opts: - mode: async - max_num_nodes_per_graph: 44 # train max nodes: 20, max_edges: 54 - max_num_edges_per_graph: 80 - ipu_dataloader_inference_opts: - mode: async - max_num_nodes_per_graph: 44 # valid max nodes: 51, max_edges: 118 - max_num_edges_per_graph: 80 - # Data handling-related - batch_size_training: 50 - batch_size_inference: 50 - predictor: - optim_kwargs: - loss_scaling: 1024 - trainer: - trainer: - precision: 16 - accumulate_grad_batches: 4 - - ipu_config: - - deviceIterations(5) # IPU would require large batches to be ready for the model. - - replicationFactor(1) - # - enableProfiling("graph_analyser") # The folder where the profile will be stored - # - enableExecutableCaching("pop_compiler_cache") - - TensorLocations.numIOTiles(128) - - _Popart.set("defaultBufferingDepth", 128) - - Precision.enableStochasticRounding(True) - - useIpuModel(True) - -# accelerator: -# type: cpu # cpu or ipu or gpu -# config_override: -# datamodule: -# batch_size_training: 64 -# batch_size_inference: 256 -# trainer: -# trainer: -# precision: 32 -# accumulate_grad_batches: 1 - -datamodule: - module_type: "MultitaskFromSmilesDataModule" - args: # Matches that in the test_multitask_datamodule.py case. - task_specific_args: # To be replaced by a new class "DatasetParams" - qm9: - df: null - df_path: qm9.csv.gz - # wget https://storage.googleapis.com/graphium-public/datasets/neurips_2023/Small-dataset/qm9.csv.gz - # or set path as the URL directly - smiles_col: "smiles" - label_cols: ["A", "B", "C", "mu", "alpha", "homo", "lumo", "gap", "r2", "zpve", "u0", "u298", "h298", "g298", "cv", "u0_atom", "u298_atom", "h298_atom", "g298_atom"] - sample_size: 2000 # use sample_size for test - seed: *seed - task_level: graph - label_normalization: - normalize_val_test: True - method: "normal" - - tox21: - df: null - df_path: Tox21-7k-12-labels.csv.gz - # wget https://storage.googleapis.com/graphium-public/datasets/neurips_2023/Small-dataset/Tox21-7k-12-labels.csv.gz - # or set path as the URL directly - smiles_col: "smiles" - label_cols: ["NR-AR", "NR-AR-LBD", "NR-AhR", "NR-Aromatase", "NR-ER", "NR-ER-LBD", "NR-PPAR-gamma", "SR-ARE", "SR-ATAD5", "SR-HSE", "SR-MMP", "SR-p53"] - sample_size: 2000 # use sample_size for test - seed: *seed - task_level: graph - - zinc: - df: null - df_path: ZINC12k.csv.gz - # df_path: data/neurips2023/small-dataset/ZINC12k.csv.gz - # wget https://storage.googleapis.com/graphium-public/datasets/neurips_2023/Small-dataset/ZINC12k.csv.gz - # or set path as the URL directly - smiles_col: "smiles" - label_cols: ["SA", "logp", "score"] - sample_size: 2000 # use sample_size for test - seed: *seed - task_level: graph - label_normalization: - normalize_val_test: True - method: "normal" - - # Featurization - # processed_graph_data_path: "../datacache/neurips2023-small/" - featurization: - # OGB: ['atomic_num', 'degree', 'possible_formal_charge', 'possible_numH' (total-valence), - # 'possible_number_radical_e', 'possible_is_aromatic', 'possible_is_in_ring', - # 'num_chiral_centers (not included yet)'] - atom_property_list_onehot: [atomic-number, group, period, total-valence] - atom_property_list_float: [degree, formal-charge, radical-electron, aromatic, in-ring] - # OGB: ['possible_bond_type', 'possible_bond_stereo', 'possible_is_in_ring'] - edge_property_list: [bond-type-onehot, stereo, in-ring] - add_self_loop: False - explicit_H: False # if H is included - use_bonds_weights: False - pos_encoding_as_features: # encoder dropout 0.18 - pos_types: - lap_eigvec: - pos_level: node - pos_type: laplacian_eigvec - num_pos: 8 - normalization: "none" # nomrlization already applied on the eigen vectors - disconnected_comp: True # if eigen values/vector for disconnected graph are included - lap_eigval: - pos_level: node - pos_type: laplacian_eigval - num_pos: 8 - normalization: "none" # nomrlization already applied on the eigen vectors - disconnected_comp: True # if eigen values/vector for disconnected graph are included - rw_pos: # use same name as pe_encoder - pos_level: node - pos_type: rw_return_probs - ksteps: 16 - - num_workers: -1 # -1 to use all - persistent_workers: False # if use persistent worker at the start of each epoch. - # Using persistent_workers false might make the start of each epoch very long. - - -architecture: - model_type: FullGraphMultiTaskNetwork - mup_base_path: null - pre_nn: # Set as null to avoid a pre-nn network - out_dim: 16 - hidden_dims: 16 - depth: 1 - activation: relu - last_activation: none - dropout: &dropout 0.1 - normalization: &normalization layer_norm - last_normalization: *normalization - residual_type: none - - pre_nn_edges: null # Set as null to avoid a pre-nn network - - pe_encoders: - out_dim: 32 - pool: "sum" #"mean" "max" - last_norm: None #"batch_norm", "layer_norm" - encoders: #la_pos | rw_pos - la_pos: # Set as null to avoid a pre-nn network - encoder_type: "laplacian_pe" - input_keys: ["laplacian_eigvec", "laplacian_eigval"] - output_keys: ["feat"] - hidden_dim: 64 - out_dim: 32 - model_type: 'DeepSet' #'Transformer' or 'DeepSet' - num_layers: 2 - num_layers_post: 1 # Num. layers to apply after pooling - dropout: 0.1 - first_normalization: "none" #"batch_norm" or "layer_norm" - rw_pos: - encoder_type: "mlp" - input_keys: ["rw_return_probs"] - output_keys: ["feat"] - hidden_dim: 64 - out_dim: 32 - num_layers: 2 - dropout: 0.1 - normalization: "layer_norm" #"batch_norm" or "layer_norm" - first_normalization: "layer_norm" #"batch_norm" or "layer_norm" - - - - gnn: # Set as null to avoid a post-nn network - in_dim: 16 # or otherwise the correct value - out_dim: &gnn_dim 16 - hidden_dims: *gnn_dim - depth: 1 - activation: gelu - last_activation: none - dropout: 0.1 - normalization: "layer_norm" - last_normalization: *normalization - residual_type: simple - virtual_node: 'none' - layer_type: 'pyg:gcn' #pyg:gine #'pyg:gps' # pyg:gated-gcn, pyg:gine,pyg:gps - layer_kwargs: null # Parameters for the model itself. You could define dropout_attn: 0.1 - - - graph_output_nn: - graph: - pooling: [sum] - out_dim: *gnn_dim - hidden_dims: *gnn_dim - depth: 1 - activation: relu - last_activation: none - dropout: *dropout - normalization: *normalization - last_normalization: "none" - residual_type: none - - task_heads: - qm9: - task_level: graph - out_dim: 19 - hidden_dims: 16 - depth: 1 - activation: relu - last_activation: none - dropout: *dropout - normalization: *normalization - last_normalization: "none" - residual_type: none - tox21: - task_level: graph - out_dim: 12 - hidden_dims: 16 - depth: 1 - activation: relu - last_activation: sigmoid - dropout: *dropout - normalization: *normalization - last_normalization: "none" - residual_type: none - zinc: - task_level: graph - out_dim: 3 - hidden_dims: 16 - depth: 2 - activation: relu - last_activation: none - dropout: *dropout - normalization: *normalization - last_normalization: "none" - residual_type: none - -#Task-specific -predictor: - metrics_on_progress_bar: - qm9: ["mae"] - tox21: ["auroc"] - zinc: ["mae"] - loss_fun: - qm9: mae - tox21: bce - zinc: mae - random_seed: *seed - optim_kwargs: - lr: 4.e-5 # warmup can be scheduled using torch_scheduler_kwargs - # weight_decay: 1.e-7 - torch_scheduler_kwargs: - module_type: WarmUpLinearLR - max_num_epochs: &max_epochs 1 - warmup_epochs: 1 - verbose: False - scheduler_kwargs: - # monitor: &monitor qm9/mae/train - # mode: min - # frequency: 1 - target_nan_mask: null # null: no mask, 0: 0 mask, ignore-flatten, ignore-mean-per-label - multitask_handling: flatten # flatten, mean-per-label - -# Task-specific -metrics: - qm9: &qm9_metrics - - name: mae - metric: mae - target_nan_mask: null - multitask_handling: flatten - threshold_kwargs: null - - name: pearsonr - metric: pearsonr - threshold_kwargs: null - target_nan_mask: null - multitask_handling: mean-per-label - - name: r2_score - metric: r2_score - target_nan_mask: null - multitask_handling: mean-per-label - threshold_kwargs: null - tox21: - - name: auroc - metric: auroc - task: binary - multitask_handling: mean-per-label - threshold_kwargs: null - - name: avpr - metric: average_precision - task: binary - multitask_handling: mean-per-label - threshold_kwargs: null - - name: f1 > 0.5 - metric: f1 - multitask_handling: mean-per-label - target_to_int: True - num_classes: 2 - average: micro - threshold_kwargs: &threshold_05 - operator: greater - threshold: 0.5 - th_on_preds: True - th_on_target: True - - name: precision > 0.5 - metric: precision - multitask_handling: mean-per-label - average: micro - threshold_kwargs: *threshold_05 - zinc: *qm9_metrics - -trainer: - seed: *seed - logger: - save_dir: logs/neurips2023-small/ - name: *name - project: *name - #early_stopping: - # monitor: *monitor - # min_delta: 0 - # patience: 10 - # mode: &mode min - model_checkpoint: - dirpath: models_checkpoints/neurips2023-small-gcn/ - filename: *name - # monitor: *monitor - # mode: *mode - # save_top_k: 1 - save_last: True - trainer: - max_epochs: *max_epochs - min_epochs: 1 - check_val_every_n_epoch: 20 From d8786e9e32f6a1fbacf02d4f26e1dc8d26e7226c Mon Sep 17 00:00:00 2001 From: DomInvivo Date: Fri, 6 Sep 2024 22:25:37 -0400 Subject: [PATCH 130/175] More removal of ipus --- expts/configs/config_gps_10M_pcqm4m.yaml | 33 +---- expts/configs/config_gps_10M_pcqm4m_mod.yaml | 4 +- expts/configs/config_mpnn_10M_b3lyp.yaml | 34 +---- expts/configs/config_mpnn_pcqm4m.yaml | 16 +-- expts/hydra-configs/README.md | 4 +- .../base_config/large.yaml | 31 +--- .../base_config/large_pcba.yaml | 31 +--- .../base_config/large_pcqm_g25.yaml | 31 +--- .../base_config/large_pcqm_n4.yaml | 31 +--- .../base_config/small.yaml | 22 +-- .../baseline/config_small_gcn_baseline.yaml | 24 +--- .../baseline/config_small_gin_baseline.yaml | 2 +- .../baseline/config_small_gine_baseline.yaml | 2 +- .../config_classifigression_l1000.yaml | 37 +---- .../neurips2023_configs/config_large_gcn.yaml | 2 +- .../config_large_gcn_g25.yaml | 2 +- .../config_large_gcn_n4.yaml | 2 +- .../config_large_gcn_pcba.yaml | 2 +- .../neurips2023_configs/config_large_gin.yaml | 2 +- .../config_large_gin_g25.yaml | 2 +- .../config_large_gin_n4.yaml | 2 +- .../config_large_gin_pcba.yaml | 2 +- .../config_large_gine.yaml | 2 +- .../config_large_gine_g25.yaml | 2 +- .../config_large_gine_n4.yaml | 2 +- .../config_large_gine_pcba.yaml | 2 +- .../config_large_mpnn.yaml | 2 +- .../neurips2023_configs/config_luis_jama.yaml | 37 +---- .../config_small_gated_gcn.yaml | 2 +- .../neurips2023_configs/config_small_gcn.yaml | 2 +- .../config_small_gcn_gpu.yaml | 2 +- .../neurips2023_configs/config_small_gin.yaml | 2 +- .../config_small_gine.yaml | 2 +- .../config_small_mpnn.yaml | 2 +- .../config_large_gcn_mcf7.yaml | 22 +-- .../config_large_gcn_pcba.yaml | 22 +-- .../config_large_gcn_vcap.yaml | 22 +-- .../single_task_gin/config_large_gin_g25.yaml | 22 +-- .../config_large_gin_mcf7.yaml | 24 +--- .../single_task_gin/config_large_gin_n4.yaml | 22 +-- .../config_large_gin_pcba.yaml | 22 +-- .../single_task_gin/config_large_gin_pcq.yaml | 22 +-- .../config_large_gin_vcap.yaml | 24 +--- .../config_large_gine_g25.yaml | 24 +--- .../config_large_gine_mcf7.yaml | 24 +--- .../config_large_gine_n4.yaml | 24 +--- .../config_large_gine_pcba.yaml | 24 +--- .../config_large_gine_pcq.yaml | 24 +--- .../config_large_gine_vcap.yaml | 24 +--- graphium/config/_loader.py | 121 ++-------------- graphium/data/datamodule.py | 106 +------------- graphium/data/normalization.py | 6 +- .../nn/architectures/global_architectures.py | 8 +- graphium/nn/base_graph_layer.py | 8 +- graphium/nn/pyg_layers/gps_pyg.py | 68 +++------ graphium/nn/pyg_layers/utils.py | 5 +- tests/test_attention.py | 4 +- tests/test_base_layers.py | 6 +- tests/test_ipu_to_dense_batch.py | 133 ------------------ tests/test_training.py | 4 +- 60 files changed, 112 insertions(+), 1080 deletions(-) delete mode 100644 tests/test_ipu_to_dense_batch.py diff --git a/expts/configs/config_gps_10M_pcqm4m.yaml b/expts/configs/config_gps_10M_pcqm4m.yaml index bd013fd7f..f91553779 100644 --- a/expts/configs/config_gps_10M_pcqm4m.yaml +++ b/expts/configs/config_gps_10M_pcqm4m.yaml @@ -1,11 +1,11 @@ -# Testing the mpnn only model with the PCQMv2 dataset on IPU. +# Testing the mpnn only model with the PCQMv2 dataset. constants: name: &name pcqm4mv2_mpnn_4layer seed: &seed 42 raise_train_error: true # Whether the code should raise an error if it crashes during training accelerator: - type: ipu # cpu or ipu or gpu + type: gpu # cpu or gpu config_override: datamodule: args: @@ -28,35 +28,6 @@ accelerator: precision: 16 accumulate_grad_batches: 4 - ipu_config: - - deviceIterations(20) # IPU would require large batches to be ready for the model. - - replicationFactor(16) - # - enableProfiling("graph_analyser") # The folder where the profile will be stored - # - enableExecutableCaching("pop_compiler_cache") - - TensorLocations.numIOTiles(128) - - _Popart.set("defaultBufferingDepth", 128) - - Precision.enableStochasticRounding(True) - - ipu_inference_config: # Optional. If not provided, same as `ipu_config` - - deviceIterations(80) # IPU would require large batches to be ready for the model. - - replicationFactor(16) - # - enableProfiling("graph_analyser") # The folder where the profile will be stored - # - enableExecutableCaching("pop_compiler_cache") - - TensorLocations.numIOTiles(128) - - _Popart.set("defaultBufferingDepth", 128) - - Precision.enableStochasticRounding(True) - -# accelerator: -# type: cpu # cpu or ipu or gpu -# config_override: -# datamodule: -# batch_size_training: 256 -# batch_size_inference: 64 -# trainer: -# trainer: -# precision: 32 -# accumulate_grad_batches: 1 - datamodule: module_type: "MultitaskFromSmilesDataModule" args: # Matches that in the test_multitask_datamodule.py case. diff --git a/expts/configs/config_gps_10M_pcqm4m_mod.yaml b/expts/configs/config_gps_10M_pcqm4m_mod.yaml index d19a97f2a..4f885cf8d 100644 --- a/expts/configs/config_gps_10M_pcqm4m_mod.yaml +++ b/expts/configs/config_gps_10M_pcqm4m_mod.yaml @@ -1,10 +1,10 @@ -# Testing the mpnn only model with the PCQMv2 dataset on IPU. +# Testing the mpnn only model with the PCQMv2 dataset. constants: name: &name pcqm4mv2_mpnn_4layer seed: &seed 42 raise_train_error: true # Whether the code should raise an error if it crashes during training accelerator: - type: gpu # cpu or ipu or gpu + type: gpu # cpu or gpu datamodule: module_type: "MultitaskFromSmilesDataModule" diff --git a/expts/configs/config_mpnn_10M_b3lyp.yaml b/expts/configs/config_mpnn_10M_b3lyp.yaml index 9bd43f78b..69113960d 100644 --- a/expts/configs/config_mpnn_10M_b3lyp.yaml +++ b/expts/configs/config_mpnn_10M_b3lyp.yaml @@ -1,11 +1,11 @@ -# Testing the mpnn only model with the b3lyp dataset on IPU. +# Testing the mpnn only model with the b3lyp dataset. constants: name: &name b3lyp_mpnn_4layer seed: &seed 42 raise_train_error: true # Whether the code should raise an error if it crashes during training accelerator: - type: ipu # cpu or ipu or gpu + type: gpu # cpu or gpu config_override: datamodule: args: @@ -28,36 +28,6 @@ accelerator: precision: 16 accumulate_grad_batches: 4 - ipu_config: - - deviceIterations(20) # IPU would require large batches to be ready for the model. - - replicationFactor(16) - # - enableProfiling("graph_analyser") # The folder where the profile will be stored - # - enableExecutableCaching("pop_compiler_cache") - - TensorLocations.numIOTiles(128) - - _Popart.set("defaultBufferingDepth", 128) - - Precision.enableStochasticRounding(True) - - ipu_inference_config: # Optional. If not provided, same as `ipu_config` - - deviceIterations(80) # IPU would require large batches to be ready for the model. - - replicationFactor(16) - # - enableProfiling("graph_analyser") # The folder where the profile will be stored - # - enableExecutableCaching("pop_compiler_cache") - - TensorLocations.numIOTiles(128) - - _Popart.set("defaultBufferingDepth", 128) - - Precision.enableStochasticRounding(True) - - -# accelerator: -# type: cpu # cpu or ipu or gpu -# config_override: -# datamodule: -# batch_size_training: 256 -# batch_size_inference: 64 -# trainer: -# trainer: -# precision: 32 -# accumulate_grad_batches: 1 - datamodule: module_type: "MultitaskFromSmilesDataModule" args: # Matches that in the test_multitask_datamodule.py case. diff --git a/expts/configs/config_mpnn_pcqm4m.yaml b/expts/configs/config_mpnn_pcqm4m.yaml index 358ad6c05..0ba8d3bd5 100644 --- a/expts/configs/config_mpnn_pcqm4m.yaml +++ b/expts/configs/config_mpnn_pcqm4m.yaml @@ -1,10 +1,10 @@ -# Testing the mpnn only model with the PCQMv2 dataset on IPU. +# Testing the mpnn only model with the PCQMv2 dataset. constants: name: &name pcqm4mv2_mpnn_4layer seed: &seed 42 raise_train_error: true # Whether the code should raise an error if it crashes during training accelerator: - type: cpu # cpu or ipu or gpu + type: cpu # cpu or gpu datamodule: module_type: "MultitaskFromSmilesDataModule" @@ -56,18 +56,6 @@ datamodule: persistent_workers: False # if use persistent worker at the start of each epoch. # Using persistent_workers false might make the start of each epoch very long. - # ipu_dataloader_training_opts: - # mode: async - # max_num_nodes_per_graph: 20 # train max nodes: 20, max_edges: 54 - # max_num_edges_per_graph: 60 - - # ipu_dataloader_inference_opts: - # mode: async - # max_num_nodes_per_graph: 20 # valid max nodes: 51, max_edges: 118 - # max_num_edges_per_graph: 120 - # # test-dev max nodes: 50, max_edges: 116 - # # test-challenge max nodes: 51, max_edges: 106 - architecture: model_type: FullGraphMultiTaskNetwork mup_base_path: null diff --git a/expts/hydra-configs/README.md b/expts/hydra-configs/README.md index 40625917d..77d2569ba 100644 --- a/expts/hydra-configs/README.md +++ b/expts/hydra-configs/README.md @@ -1,14 +1,14 @@ # Configuring Graphium with Hydra This document provides users with a point of entry to composing configs in Graphium. As a flexible library with many features, configuration is an important part of Graphium. To make configurations as reusable as possible while providing maximum flexibility, we integrated Graphium with `hydra`. Our config structure is designed to make the following functionality as accessible as possible: -- Switching between **accelerators** (CPU, GPU and IPU) +- Switching between **accelerators** (CPU, GPU) - **Benchmarking** different models on the same dataset - **Fine-tuning** a pre-trained model on a new dataset In what follows, we describe how each of the above functionality is achieved and how users can benefit from this design to achieve the most with Graphium with as little configuration as possible. ## Accelerators -With Graphium supporting CPU, GPU and IPU hardware, easily switching between these accelerators is pre-configured. General, accelerator-specific configs are specified under `accelerator/`, whereas experiment-specific differences between the accelerators are specialized under `training/accelerator`. +With Graphium supporting CPU, GPU hardware, easily switching between these accelerators is pre-configured. General, accelerator-specific configs are specified under `accelerator/`, whereas experiment-specific differences between the accelerators are specialized under `training/accelerator`. ## Benchmarking Benchmarking multiple models on the same datasets and tasks requires us to easily switch between model configurations without redefining major parts of the architecture, task heads, featurization, metrics, predictor, etc. For example, when changing from a GCN to a GIN model, a simple switch of `architecture.gnn.layer_type: 'pyg:gin'` might suffice. Hence, we abstract the `model` configs under `model/` where such model configurations can be specified. diff --git a/expts/neurips2023_configs/base_config/large.yaml b/expts/neurips2023_configs/base_config/large.yaml index 938c958da..6b6e9b3b7 100644 --- a/expts/neurips2023_configs/base_config/large.yaml +++ b/expts/neurips2023_configs/base_config/large.yaml @@ -7,7 +7,7 @@ constants: datacache_path: "/localdata/neurips2023-large/" accelerator: - type: ipu # cpu or ipu or gpu + type: gpu # cpu or gpu config_override: datamodule: args: @@ -31,35 +31,6 @@ accelerator: precision: 16-true accumulate_grad_batches: 2 - ipu_config: - - deviceIterations(30) # IPU would require large batches to be ready for the model. - - replicationFactor(16) - # - enableProfiling("graph_analyser") # The folder where the profile will be stored - # - enableExecutableCaching("pop_compiler_cache") - - TensorLocations.numIOTiles(128) - - _Popart.set("defaultBufferingDepth", 96) - - Precision.enableStochasticRounding(True) - # - Precision.enableFloatingPointExceptions(True) - - ipu_inference_config: - # set device iteration and replication factor to 1 during inference - # gradient accumulation was set to 1 in the code - - deviceIterations(1) - - replicationFactor(1) - - Precision.enableStochasticRounding(False) - -# accelerator: -# type: cpu # cpu or ipu or gpu -# config_override: -# datamodule: -# args: -# batch_size_training: 64 -# batch_size_inference: 256 -# trainer: -# trainer: -# precision: 32 -# accumulate_grad_batches: 1 - datamodule: module_type: "MultitaskFromSmilesDataModule" args: # Matches that in the test_multitask_datamodule.py case. diff --git a/expts/neurips2023_configs/base_config/large_pcba.yaml b/expts/neurips2023_configs/base_config/large_pcba.yaml index 12554be9b..074d6d132 100644 --- a/expts/neurips2023_configs/base_config/large_pcba.yaml +++ b/expts/neurips2023_configs/base_config/large_pcba.yaml @@ -7,7 +7,7 @@ constants: datacache_path: "/localdata/neurips2023-large/" accelerator: - type: ipu # cpu or ipu or gpu + type: gpu # cpu or gpu config_override: datamodule: args: @@ -31,35 +31,6 @@ accelerator: precision: 16-true accumulate_grad_batches: 2 - ipu_config: - - deviceIterations(30) # IPU would require large batches to be ready for the model. - - replicationFactor(16) - # - enableProfiling("graph_analyser") # The folder where the profile will be stored - # - enableExecutableCaching("pop_compiler_cache") - - TensorLocations.numIOTiles(128) - - _Popart.set("defaultBufferingDepth", 96) - - Precision.enableStochasticRounding(True) - # - Precision.enableFloatingPointExceptions(True) - - ipu_inference_config: - # set device iteration and replication factor to 1 during inference - # gradient accumulation was set to 1 in the code - - deviceIterations(1) - - replicationFactor(1) - - Precision.enableStochasticRounding(False) - -# accelerator: -# type: cpu # cpu or ipu or gpu -# config_override: -# datamodule: -# args: -# batch_size_training: 64 -# batch_size_inference: 256 -# trainer: -# trainer: -# precision: 32 -# accumulate_grad_batches: 1 - datamodule: module_type: "MultitaskFromSmilesDataModule" args: # Matches that in the test_multitask_datamodule.py case. diff --git a/expts/neurips2023_configs/base_config/large_pcqm_g25.yaml b/expts/neurips2023_configs/base_config/large_pcqm_g25.yaml index 0c0d3aef7..a6b4bb81f 100644 --- a/expts/neurips2023_configs/base_config/large_pcqm_g25.yaml +++ b/expts/neurips2023_configs/base_config/large_pcqm_g25.yaml @@ -7,7 +7,7 @@ constants: datacache_path: "/localdata/neurips2023-large/" accelerator: - type: ipu # cpu or ipu or gpu + type: gpu # cpu or gpu config_override: datamodule: args: @@ -31,35 +31,6 @@ accelerator: precision: 16-true accumulate_grad_batches: 2 - ipu_config: - - deviceIterations(30) # IPU would require large batches to be ready for the model. - - replicationFactor(16) - # - enableProfiling("graph_analyser") # The folder where the profile will be stored - # - enableExecutableCaching("pop_compiler_cache") - - TensorLocations.numIOTiles(128) - - _Popart.set("defaultBufferingDepth", 96) - - Precision.enableStochasticRounding(True) - # - Precision.enableFloatingPointExceptions(True) - - ipu_inference_config: - # set device iteration and replication factor to 1 during inference - # gradient accumulation was set to 1 in the code - - deviceIterations(1) - - replicationFactor(1) - - Precision.enableStochasticRounding(False) - -# accelerator: -# type: cpu # cpu or ipu or gpu -# config_override: -# datamodule: -# args: -# batch_size_training: 64 -# batch_size_inference: 256 -# trainer: -# trainer: -# precision: 32 -# accumulate_grad_batches: 1 - datamodule: module_type: "MultitaskFromSmilesDataModule" args: # Matches that in the test_multitask_datamodule.py case. diff --git a/expts/neurips2023_configs/base_config/large_pcqm_n4.yaml b/expts/neurips2023_configs/base_config/large_pcqm_n4.yaml index 6eb78a2dd..3674bfcec 100644 --- a/expts/neurips2023_configs/base_config/large_pcqm_n4.yaml +++ b/expts/neurips2023_configs/base_config/large_pcqm_n4.yaml @@ -7,7 +7,7 @@ constants: datacache_path: "/localdata/neurips2023-large/" accelerator: - type: ipu # cpu or ipu or gpu + type: gpu # cpu or gpu config_override: datamodule: args: @@ -31,35 +31,6 @@ accelerator: precision: 16-true accumulate_grad_batches: 2 - ipu_config: - - deviceIterations(30) # IPU would require large batches to be ready for the model. - - replicationFactor(16) - # - enableProfiling("graph_analyser") # The folder where the profile will be stored - # - enableExecutableCaching("pop_compiler_cache") - - TensorLocations.numIOTiles(128) - - _Popart.set("defaultBufferingDepth", 96) - - Precision.enableStochasticRounding(True) - # - Precision.enableFloatingPointExceptions(True) - - ipu_inference_config: - # set device iteration and replication factor to 1 during inference - # gradient accumulation was set to 1 in the code - - deviceIterations(1) - - replicationFactor(1) - - Precision.enableStochasticRounding(False) - -# accelerator: -# type: cpu # cpu or ipu or gpu -# config_override: -# datamodule: -# args: -# batch_size_training: 64 -# batch_size_inference: 256 -# trainer: -# trainer: -# precision: 32 -# accumulate_grad_batches: 1 - datamodule: module_type: "MultitaskFromSmilesDataModule" args: # Matches that in the test_multitask_datamodule.py case. diff --git a/expts/neurips2023_configs/base_config/small.yaml b/expts/neurips2023_configs/base_config/small.yaml index 80099b9fe..49fa3a6e8 100644 --- a/expts/neurips2023_configs/base_config/small.yaml +++ b/expts/neurips2023_configs/base_config/small.yaml @@ -6,7 +6,7 @@ constants: entity: multitask-gnn accelerator: - type: ipu # cpu or ipu or gpu + type: gpu # cpu or gpu config_override: datamodule: args: @@ -29,26 +29,6 @@ accelerator: precision: 16 accumulate_grad_batches: 4 - ipu_config: - - deviceIterations(5) # IPU would require large batches to be ready for the model. - - replicationFactor(16) - # - enableProfiling("graph_analyser") # The folder where the profile will be stored - # - enableExecutableCaching("pop_compiler_cache") - - TensorLocations.numIOTiles(128) - - _Popart.set("defaultBufferingDepth", 128) - - Precision.enableStochasticRounding(True) - -# accelerator: -# type: cpu # cpu or ipu or gpu -# config_override: -# datamodule: -# batch_size_training: 64 -# batch_size_inference: 256 -# trainer: -# trainer: -# precision: 32 -# accumulate_grad_batches: 1 - datamodule: module_type: "MultitaskFromSmilesDataModule" args: # Matches that in the test_multitask_datamodule.py case. diff --git a/expts/neurips2023_configs/baseline/config_small_gcn_baseline.yaml b/expts/neurips2023_configs/baseline/config_small_gcn_baseline.yaml index 2405ec4aa..dcefbcab3 100644 --- a/expts/neurips2023_configs/baseline/config_small_gcn_baseline.yaml +++ b/expts/neurips2023_configs/baseline/config_small_gcn_baseline.yaml @@ -1,11 +1,11 @@ -# Testing the gcn model with the PCQMv2 dataset on IPU. +# Testing the gcn model with the PCQMv2 dataset. constants: name: &name neurips2023_small_data_gcn seed: &seed 3000 raise_train_error: true # Whether the code should raise an error if it crashes during training accelerator: - type: ipu # cpu or ipu or gpu + type: gpu # cpu or gpu config_override: datamodule: args: @@ -28,26 +28,6 @@ accelerator: precision: 16 accumulate_grad_batches: 4 - ipu_config: - - deviceIterations(5) # IPU would require large batches to be ready for the model. - - replicationFactor(16) - # - enableProfiling("graph_analyser") # The folder where the profile will be stored - # - enableExecutableCaching("pop_compiler_cache") - - TensorLocations.numIOTiles(128) - - _Popart.set("defaultBufferingDepth", 128) - - Precision.enableStochasticRounding(True) - -# accelerator: -# type: cpu # cpu or ipu or gpu -# config_override: -# datamodule: -# batch_size_training: 64 -# batch_size_inference: 256 -# trainer: -# trainer: -# precision: 32 -# accumulate_grad_batches: 1 - datamodule: module_type: "MultitaskFromSmilesDataModule" args: # Matches that in the test_multitask_datamodule.py case. diff --git a/expts/neurips2023_configs/baseline/config_small_gin_baseline.yaml b/expts/neurips2023_configs/baseline/config_small_gin_baseline.yaml index bc96d1057..3dbdca7d7 100644 --- a/expts/neurips2023_configs/baseline/config_small_gin_baseline.yaml +++ b/expts/neurips2023_configs/baseline/config_small_gin_baseline.yaml @@ -1,4 +1,4 @@ -# Testing the gin model with the PCQMv2 dataset on IPU. +# Testing the gin model with the PCQMv2 dataset. constants: name: &name neurips2023_small_data_gin config_override: "expts/neurips2023_configs/baseline/config_small_gcn_baseline.yaml" diff --git a/expts/neurips2023_configs/baseline/config_small_gine_baseline.yaml b/expts/neurips2023_configs/baseline/config_small_gine_baseline.yaml index 431235bb4..90389e008 100644 --- a/expts/neurips2023_configs/baseline/config_small_gine_baseline.yaml +++ b/expts/neurips2023_configs/baseline/config_small_gine_baseline.yaml @@ -1,4 +1,4 @@ -# Testing the gine model with the PCQMv2 dataset on IPU. +# Testing the gine model with the PCQMv2 dataset. constants: name: &name neurips2023_small_data_gine config_override: "expts/neurips2023_configs/baseline/config_small_gcn_baseline.yaml" diff --git a/expts/neurips2023_configs/config_classifigression_l1000.yaml b/expts/neurips2023_configs/config_classifigression_l1000.yaml index fb77ad457..16d6d1c73 100644 --- a/expts/neurips2023_configs/config_classifigression_l1000.yaml +++ b/expts/neurips2023_configs/config_classifigression_l1000.yaml @@ -1,44 +1,11 @@ -# Testing the mpnn only model with the PCQMv2 dataset on IPU. +# Testing the mpnn only model with the PCQMv2 dataset. constants: name: &name neurips2023_small_data_mpnn seed: &seed 42 raise_train_error: true # Whether the code should raise an error if it crashes during training -#accelerator: -# type: ipu # cpu or ipu or gpu -# config_override: -# datamodule: -# args: -# ipu_dataloader_training_opts: -# mode: async -# max_num_nodes_per_graph: 24 # train max nodes: 20, max_edges: 54 -# max_num_edges_per_graph: 60 -# ipu_dataloader_inference_opts: -# mode: async -# max_num_nodes_per_graph: 24 # valid max nodes: 51, max_edges: 118 -# max_num_edges_per_graph: 60 -# # Data handling-related -# batch_size_training: 50 -# batch_size_inference: 50 -## predictor: -## optim_kwargs: -## loss_scaling: 1024 -# trainer: -# trainer: -# precision: 16 -# accumulate_grad_batches: 4 -# -# ipu_config: -# - deviceIterations(20) # IPU would require large batches to be ready for the model. -# - replicationFactor(16) -# # - enableProfiling("graph_analyser") # The folder where the profile will be stored -# # - enableExecutableCaching("pop_compiler_cache") -# - TensorLocations.numIOTiles(128) -# - _Popart.set("defaultBufferingDepth", 128) -# - Precision.enableStochasticRounding(True) - accelerator: - type: gpu # cpu or ipu or gpu + type: gpu # cpu or gpu config_override: datamodule: batch_size_training: 64 diff --git a/expts/neurips2023_configs/config_large_gcn.yaml b/expts/neurips2023_configs/config_large_gcn.yaml index 1219401df..cf56dcc3a 100644 --- a/expts/neurips2023_configs/config_large_gcn.yaml +++ b/expts/neurips2023_configs/config_large_gcn.yaml @@ -1,4 +1,4 @@ -# Running the gcn model with the largemix dataset on IPU. +# Running the gcn model with the largemix dataset. defaults: - base_config: large diff --git a/expts/neurips2023_configs/config_large_gcn_g25.yaml b/expts/neurips2023_configs/config_large_gcn_g25.yaml index 35c4e27d5..80a72cc3b 100644 --- a/expts/neurips2023_configs/config_large_gcn_g25.yaml +++ b/expts/neurips2023_configs/config_large_gcn_g25.yaml @@ -1,4 +1,4 @@ -# Running the gcn model with the largemix dataset on IPU. +# Running the gcn model with the largemix dataset. defaults: # - base_config: large diff --git a/expts/neurips2023_configs/config_large_gcn_n4.yaml b/expts/neurips2023_configs/config_large_gcn_n4.yaml index 61d335c12..616feec09 100644 --- a/expts/neurips2023_configs/config_large_gcn_n4.yaml +++ b/expts/neurips2023_configs/config_large_gcn_n4.yaml @@ -1,4 +1,4 @@ -# Running the gcn model with the largemix dataset on IPU. +# Running the gcn model with the largemix dataset. defaults: # - base_config: large diff --git a/expts/neurips2023_configs/config_large_gcn_pcba.yaml b/expts/neurips2023_configs/config_large_gcn_pcba.yaml index f11d8595d..d95401cf6 100644 --- a/expts/neurips2023_configs/config_large_gcn_pcba.yaml +++ b/expts/neurips2023_configs/config_large_gcn_pcba.yaml @@ -1,4 +1,4 @@ -# Running the gcn model with the largemix dataset on IPU. +# Running the gcn model with the largemix dataset. defaults: # - base_config: large diff --git a/expts/neurips2023_configs/config_large_gin.yaml b/expts/neurips2023_configs/config_large_gin.yaml index 6c3f47898..b2a0186f3 100644 --- a/expts/neurips2023_configs/config_large_gin.yaml +++ b/expts/neurips2023_configs/config_large_gin.yaml @@ -1,4 +1,4 @@ -# Running the gin model with the largemix dataset on IPU. +# Running the gin model with the largemix dataset. defaults: - base_config: large - _self_ diff --git a/expts/neurips2023_configs/config_large_gin_g25.yaml b/expts/neurips2023_configs/config_large_gin_g25.yaml index 538e994b1..702e9fe68 100644 --- a/expts/neurips2023_configs/config_large_gin_g25.yaml +++ b/expts/neurips2023_configs/config_large_gin_g25.yaml @@ -1,4 +1,4 @@ -# Running the gin model with the largemix dataset on IPU. +# Running the gin model with the largemix dataset. defaults: # - base_config: large - base_config: large_pcqm_g25 diff --git a/expts/neurips2023_configs/config_large_gin_n4.yaml b/expts/neurips2023_configs/config_large_gin_n4.yaml index c51e0e07d..3e41cf95b 100644 --- a/expts/neurips2023_configs/config_large_gin_n4.yaml +++ b/expts/neurips2023_configs/config_large_gin_n4.yaml @@ -1,4 +1,4 @@ -# Running the gin model with the largemix dataset on IPU. +# Running the gin model with the largemix dataset. defaults: # - base_config: large # - base_config: large_pcqm_g25 diff --git a/expts/neurips2023_configs/config_large_gin_pcba.yaml b/expts/neurips2023_configs/config_large_gin_pcba.yaml index 8bd33609b..af0e4945e 100644 --- a/expts/neurips2023_configs/config_large_gin_pcba.yaml +++ b/expts/neurips2023_configs/config_large_gin_pcba.yaml @@ -1,4 +1,4 @@ -# Running the gin model with the largemix dataset on IPU. +# Running the gin model with the largemix dataset. defaults: # - base_config: large # - base_config: large_pcqm_g25 diff --git a/expts/neurips2023_configs/config_large_gine.yaml b/expts/neurips2023_configs/config_large_gine.yaml index 793304ce0..6f82d3233 100644 --- a/expts/neurips2023_configs/config_large_gine.yaml +++ b/expts/neurips2023_configs/config_large_gine.yaml @@ -1,4 +1,4 @@ -# Running the gine model with the largemix dataset on IPU. +# Running the gine model with the largemix dataset. defaults: - base_config: large diff --git a/expts/neurips2023_configs/config_large_gine_g25.yaml b/expts/neurips2023_configs/config_large_gine_g25.yaml index e8002be3b..cceaa448f 100644 --- a/expts/neurips2023_configs/config_large_gine_g25.yaml +++ b/expts/neurips2023_configs/config_large_gine_g25.yaml @@ -1,4 +1,4 @@ -# Running the gine model with the largemix dataset on IPU. +# Running the gine model with the largemix dataset. defaults: # - base_config: large diff --git a/expts/neurips2023_configs/config_large_gine_n4.yaml b/expts/neurips2023_configs/config_large_gine_n4.yaml index df07380a4..d298fc183 100644 --- a/expts/neurips2023_configs/config_large_gine_n4.yaml +++ b/expts/neurips2023_configs/config_large_gine_n4.yaml @@ -1,4 +1,4 @@ -# Running the gine model with the largemix dataset on IPU. +# Running the gine model with the largemix dataset. defaults: # - base_config: large diff --git a/expts/neurips2023_configs/config_large_gine_pcba.yaml b/expts/neurips2023_configs/config_large_gine_pcba.yaml index 505935a57..668b7dfc2 100644 --- a/expts/neurips2023_configs/config_large_gine_pcba.yaml +++ b/expts/neurips2023_configs/config_large_gine_pcba.yaml @@ -1,4 +1,4 @@ -# Running the gine model with the largemix dataset on IPU. +# Running the gine model with the largemix dataset. defaults: # - base_config: large diff --git a/expts/neurips2023_configs/config_large_mpnn.yaml b/expts/neurips2023_configs/config_large_mpnn.yaml index 365927473..ca280f68a 100644 --- a/expts/neurips2023_configs/config_large_mpnn.yaml +++ b/expts/neurips2023_configs/config_large_mpnn.yaml @@ -1,4 +1,4 @@ -# Running the mpnn model with the largemix dataset on IPU. +# Running the mpnn model with the largemix dataset. defaults: - base_config: large diff --git a/expts/neurips2023_configs/config_luis_jama.yaml b/expts/neurips2023_configs/config_luis_jama.yaml index f71ab65f7..dc25ed212 100644 --- a/expts/neurips2023_configs/config_luis_jama.yaml +++ b/expts/neurips2023_configs/config_luis_jama.yaml @@ -1,44 +1,11 @@ -# Testing the mpnn only model with the PCQMv2 dataset on IPU. +# Testing the mpnn only model with the PCQMv2 dataset. constants: name: &name neurips2023_small_data_mpnn seed: &seed 42 raise_train_error: true # Whether the code should raise an error if it crashes during training -# accelerator: -# type: ipu # cpu or ipu or gpu -# config_override: -# datamodule: -# args: -# ipu_dataloader_training_opts: -# mode: async -# max_num_nodes_per_graph: 24 # train max nodes: 20, max_edges: 54 -# max_num_edges_per_graph: 60 -# ipu_dataloader_inference_opts: -# mode: async -# max_num_nodes_per_graph: 24 # valid max nodes: 51, max_edges: 118 -# max_num_edges_per_graph: 60 -# # Data handling-related -# batch_size_training: 50 -# batch_size_inference: 50 -# predictor: -# optim_kwargs: -# loss_scaling: 1024 -# trainer: -# trainer: -# precision: 16 -# accumulate_grad_batches: 4 - - # ipu_config: - # - deviceIterations(20) # IPU would require large batches to be ready for the model. - # - replicationFactor(16) - # # - enableProfiling("graph_analyser") # The folder where the profile will be stored - # # - enableExecutableCaching("pop_compiler_cache") - # - TensorLocations.numIOTiles(128) - # - _Popart.set("defaultBufferingDepth", 128) - # - Precision.enableStochasticRounding(True) - accelerator: - type: cpu # cpu or ipu or gpu + type: cpu # cpu or gpu config_override: datamodule: batch_size_training: 64 diff --git a/expts/neurips2023_configs/config_small_gated_gcn.yaml b/expts/neurips2023_configs/config_small_gated_gcn.yaml index 8e00d26f6..5a542d96d 100644 --- a/expts/neurips2023_configs/config_small_gated_gcn.yaml +++ b/expts/neurips2023_configs/config_small_gated_gcn.yaml @@ -1,4 +1,4 @@ -# Testing the gated_gcn model with the PCQMv2 dataset on IPU. +# Testing the gated_gcn model with the PCQMv2 dataset. defaults: - base_config: small diff --git a/expts/neurips2023_configs/config_small_gcn.yaml b/expts/neurips2023_configs/config_small_gcn.yaml index 114ce26dc..d43080a4c 100644 --- a/expts/neurips2023_configs/config_small_gcn.yaml +++ b/expts/neurips2023_configs/config_small_gcn.yaml @@ -1,4 +1,4 @@ -# Testing the gcn model with the toymix dataset on IPU. +# Testing the gcn model with the toymix dataset. defaults: - base_config: small diff --git a/expts/neurips2023_configs/config_small_gcn_gpu.yaml b/expts/neurips2023_configs/config_small_gcn_gpu.yaml index 03bca8b64..e1223da2f 100644 --- a/expts/neurips2023_configs/config_small_gcn_gpu.yaml +++ b/expts/neurips2023_configs/config_small_gcn_gpu.yaml @@ -12,7 +12,7 @@ architecture: layer_type: 'pyg:gcn' #pyg:gine #'pyg:gps' # pyg:gated-gcn, pyg:gine,pyg:gps accelerator: - type: gpu # cpu or ipu or gpu + type: gpu # cpu or gpu float32_matmul_precision: medium config_override: datamodule: diff --git a/expts/neurips2023_configs/config_small_gin.yaml b/expts/neurips2023_configs/config_small_gin.yaml index e018f722a..ff86251f7 100644 --- a/expts/neurips2023_configs/config_small_gin.yaml +++ b/expts/neurips2023_configs/config_small_gin.yaml @@ -1,4 +1,4 @@ -# Testing the gin model with the PCQMv2 dataset on IPU. +# Testing the gin model with the PCQMv2 dataset. defaults: - base_config: small diff --git a/expts/neurips2023_configs/config_small_gine.yaml b/expts/neurips2023_configs/config_small_gine.yaml index 111bebbc2..4ec66a4f2 100644 --- a/expts/neurips2023_configs/config_small_gine.yaml +++ b/expts/neurips2023_configs/config_small_gine.yaml @@ -1,4 +1,4 @@ -# Testing the gine model with the PCQMv2 dataset on IPU. +# Testing the gine model with the PCQMv2 dataset. defaults: - base_config: small diff --git a/expts/neurips2023_configs/config_small_mpnn.yaml b/expts/neurips2023_configs/config_small_mpnn.yaml index 357a8f560..12c7a17e1 100644 --- a/expts/neurips2023_configs/config_small_mpnn.yaml +++ b/expts/neurips2023_configs/config_small_mpnn.yaml @@ -1,4 +1,4 @@ -# Testing the mpnn only model with the PCQMv2 dataset on IPU. +# Testing the mpnn only model with the PCQMv2 dataset. defaults: - base_config: small diff --git a/expts/neurips2023_configs/single_task_gcn/config_large_gcn_mcf7.yaml b/expts/neurips2023_configs/single_task_gcn/config_large_gcn_mcf7.yaml index 197934a09..9c2726e04 100644 --- a/expts/neurips2023_configs/single_task_gcn/config_large_gcn_mcf7.yaml +++ b/expts/neurips2023_configs/single_task_gcn/config_large_gcn_mcf7.yaml @@ -5,7 +5,7 @@ constants: raise_train_error: true # Whether the code should raise an error if it crashes during training accelerator: - type: ipu # cpu or ipu or gpu + type: gpu # cpu or gpu config_override: datamodule: args: @@ -28,26 +28,6 @@ accelerator: precision: 32 accumulate_grad_batches: 8 - ipu_config: - - deviceIterations(1) # IPU would require large batches to be ready for the model. - - replicationFactor(16) - # - enableProfiling("graph_analyser") # The folder where the profile will be stored - # - enableExecutableCaching("pop_compiler_cache") - - TensorLocations.numIOTiles(128) - - _Popart.set("defaultBufferingDepth", 128) - - Precision.enableStochasticRounding(True) - -# accelerator: -# type: cpu # cpu or ipu or gpu -# config_override: -# datamodule: -# batch_size_training: 64 -# batch_size_inference: 256 -# trainer: -# trainer: -# precision: 32 -# accumulate_grad_batches: 1 - datamodule: module_type: "MultitaskFromSmilesDataModule" args: # Matches that in the test_multitask_datamodule.py case. diff --git a/expts/neurips2023_configs/single_task_gcn/config_large_gcn_pcba.yaml b/expts/neurips2023_configs/single_task_gcn/config_large_gcn_pcba.yaml index ac045310d..3d0f4e4da 100644 --- a/expts/neurips2023_configs/single_task_gcn/config_large_gcn_pcba.yaml +++ b/expts/neurips2023_configs/single_task_gcn/config_large_gcn_pcba.yaml @@ -5,7 +5,7 @@ constants: raise_train_error: true # Whether the code should raise an error if it crashes during training accelerator: - type: ipu # cpu or ipu or gpu + type: gpu # cpu or gpu config_override: datamodule: args: @@ -28,26 +28,6 @@ accelerator: precision: 32 accumulate_grad_batches: 8 - ipu_config: - - deviceIterations(10) # IPU would require large batches to be ready for the model. - - replicationFactor(16) - # - enableProfiling("graph_analyser") # The folder where the profile will be stored - # - enableExecutableCaching("pop_compiler_cache") - - TensorLocations.numIOTiles(128) - - _Popart.set("defaultBufferingDepth", 128) - - Precision.enableStochasticRounding(True) - -# accelerator: -# type: cpu # cpu or ipu or gpu -# config_override: -# datamodule: -# batch_size_training: 64 -# batch_size_inference: 256 -# trainer: -# trainer: -# precision: 32 -# accumulate_grad_batches: 1 - datamodule: module_type: "MultitaskFromSmilesDataModule" args: # Matches that in the test_multitask_datamodule.py case. diff --git a/expts/neurips2023_configs/single_task_gcn/config_large_gcn_vcap.yaml b/expts/neurips2023_configs/single_task_gcn/config_large_gcn_vcap.yaml index 5f2651fa4..5fb6eaf00 100644 --- a/expts/neurips2023_configs/single_task_gcn/config_large_gcn_vcap.yaml +++ b/expts/neurips2023_configs/single_task_gcn/config_large_gcn_vcap.yaml @@ -5,7 +5,7 @@ constants: raise_train_error: true # Whether the code should raise an error if it crashes during training accelerator: - type: ipu # cpu or ipu or gpu + type: gpu # cpu or gpu config_override: datamodule: args: @@ -28,26 +28,6 @@ accelerator: precision: 32 accumulate_grad_batches: 8 - ipu_config: - - deviceIterations(1) # IPU would require large batches to be ready for the model. - - replicationFactor(16) - # - enableProfiling("graph_analyser") # The folder where the profile will be stored - # - enableExecutableCaching("pop_compiler_cache") - - TensorLocations.numIOTiles(128) - - _Popart.set("defaultBufferingDepth", 128) - - Precision.enableStochasticRounding(True) - -# accelerator: -# type: cpu # cpu or ipu or gpu -# config_override: -# datamodule: -# batch_size_training: 64 -# batch_size_inference: 256 -# trainer: -# trainer: -# precision: 32 -# accumulate_grad_batches: 1 - datamodule: module_type: "MultitaskFromSmilesDataModule" args: # Matches that in the test_multitask_datamodule.py case. diff --git a/expts/neurips2023_configs/single_task_gin/config_large_gin_g25.yaml b/expts/neurips2023_configs/single_task_gin/config_large_gin_g25.yaml index d4d7d9553..dca0d2b82 100644 --- a/expts/neurips2023_configs/single_task_gin/config_large_gin_g25.yaml +++ b/expts/neurips2023_configs/single_task_gin/config_large_gin_g25.yaml @@ -5,7 +5,7 @@ constants: raise_train_error: true # Whether the code should raise an error if it crashes during training accelerator: - type: ipu # cpu or ipu or gpu + type: gpu # cpu or gpu config_override: datamodule: args: @@ -28,26 +28,6 @@ accelerator: precision: 32 accumulate_grad_batches: 8 - ipu_config: - - deviceIterations(10) # IPU would require large batches to be ready for the model. - - replicationFactor(16) - # - enableProfiling("graph_analyser") # The folder where the profile will be stored - # - enableExecutableCaching("pop_compiler_cache") - - TensorLocations.numIOTiles(128) - - _Popart.set("defaultBufferingDepth", 128) - - Precision.enableStochasticRounding(True) - -# accelerator: -# type: cpu # cpu or ipu or gpu -# config_override: -# datamodule: -# batch_size_training: 64 -# batch_size_inference: 256 -# trainer: -# trainer: -# precision: 32 -# accumulate_grad_batches: 1 - datamodule: module_type: "MultitaskFromSmilesDataModule" args: # Matches that in the test_multitask_datamodule.py case. diff --git a/expts/neurips2023_configs/single_task_gin/config_large_gin_mcf7.yaml b/expts/neurips2023_configs/single_task_gin/config_large_gin_mcf7.yaml index 781b888ea..d07eab6a8 100644 --- a/expts/neurips2023_configs/single_task_gin/config_large_gin_mcf7.yaml +++ b/expts/neurips2023_configs/single_task_gin/config_large_gin_mcf7.yaml @@ -1,11 +1,11 @@ -# Testing the gine model with the PCQMv2 dataset on IPU. +# Testing the gine model with the PCQMv2 dataset. constants: name: &name neurips2023_large_data_gine_mcf7 seed: &seed 42 raise_train_error: true # Whether the code should raise an error if it crashes during training accelerator: - type: ipu # cpu or ipu or gpu + type: gpu # cpu or gpu config_override: datamodule: args: @@ -28,26 +28,6 @@ accelerator: precision: 32 accumulate_grad_batches: 8 - ipu_config: - - deviceIterations(1) # IPU would require large batches to be ready for the model. - - replicationFactor(16) - # - enableProfiling("graph_analyser") # The folder where the profile will be stored - # - enableExecutableCaching("pop_compiler_cache") - - TensorLocations.numIOTiles(128) - - _Popart.set("defaultBufferingDepth", 128) - - Precision.enableStochasticRounding(True) - -# accelerator: -# type: cpu # cpu or ipu or gpu -# config_override: -# datamodule: -# batch_size_training: 64 -# batch_size_inference: 256 -# trainer: -# trainer: -# precision: 32 -# accumulate_grad_batches: 1 - datamodule: module_type: "MultitaskFromSmilesDataModule" args: # Matches that in the test_multitask_datamodule.py case. diff --git a/expts/neurips2023_configs/single_task_gin/config_large_gin_n4.yaml b/expts/neurips2023_configs/single_task_gin/config_large_gin_n4.yaml index bebfdc424..544846613 100644 --- a/expts/neurips2023_configs/single_task_gin/config_large_gin_n4.yaml +++ b/expts/neurips2023_configs/single_task_gin/config_large_gin_n4.yaml @@ -5,7 +5,7 @@ constants: raise_train_error: true # Whether the code should raise an error if it crashes during training accelerator: - type: ipu # cpu or ipu or gpu + type: gpu # cpu or gpu config_override: datamodule: args: @@ -28,26 +28,6 @@ accelerator: precision: 32 accumulate_grad_batches: 8 - ipu_config: - - deviceIterations(10) # IPU would require large batches to be ready for the model. - - replicationFactor(16) - # - enableProfiling("graph_analyser") # The folder where the profile will be stored - # - enableExecutableCaching("pop_compiler_cache") - - TensorLocations.numIOTiles(128) - - _Popart.set("defaultBufferingDepth", 128) - - Precision.enableStochasticRounding(True) - -# accelerator: -# type: cpu # cpu or ipu or gpu -# config_override: -# datamodule: -# batch_size_training: 64 -# batch_size_inference: 256 -# trainer: -# trainer: -# precision: 32 -# accumulate_grad_batches: 1 - datamodule: module_type: "MultitaskFromSmilesDataModule" args: # Matches that in the test_multitask_datamodule.py case. diff --git a/expts/neurips2023_configs/single_task_gin/config_large_gin_pcba.yaml b/expts/neurips2023_configs/single_task_gin/config_large_gin_pcba.yaml index b69bfc4c3..ade583ba6 100644 --- a/expts/neurips2023_configs/single_task_gin/config_large_gin_pcba.yaml +++ b/expts/neurips2023_configs/single_task_gin/config_large_gin_pcba.yaml @@ -5,7 +5,7 @@ constants: raise_train_error: true # Whether the code should raise an error if it crashes during training accelerator: - type: ipu # cpu or ipu or gpu + type: gpu # cpu or gpu config_override: datamodule: args: @@ -28,26 +28,6 @@ accelerator: precision: 32 accumulate_grad_batches: 8 - ipu_config: - - deviceIterations(10) # IPU would require large batches to be ready for the model. - - replicationFactor(16) - # - enableProfiling("graph_analyser") # The folder where the profile will be stored - # - enableExecutableCaching("pop_compiler_cache") - - TensorLocations.numIOTiles(128) - - _Popart.set("defaultBufferingDepth", 128) - - Precision.enableStochasticRounding(True) - -# accelerator: -# type: cpu # cpu or ipu or gpu -# config_override: -# datamodule: -# batch_size_training: 64 -# batch_size_inference: 256 -# trainer: -# trainer: -# precision: 32 -# accumulate_grad_batches: 1 - datamodule: module_type: "MultitaskFromSmilesDataModule" args: # Matches that in the test_multitask_datamodule.py case. diff --git a/expts/neurips2023_configs/single_task_gin/config_large_gin_pcq.yaml b/expts/neurips2023_configs/single_task_gin/config_large_gin_pcq.yaml index fe320e443..2fec0d6e5 100644 --- a/expts/neurips2023_configs/single_task_gin/config_large_gin_pcq.yaml +++ b/expts/neurips2023_configs/single_task_gin/config_large_gin_pcq.yaml @@ -5,7 +5,7 @@ constants: raise_train_error: true # Whether the code should raise an error if it crashes during training accelerator: - type: ipu # cpu or ipu or gpu + type: gpu # cpu or gpu config_override: datamodule: args: @@ -28,26 +28,6 @@ accelerator: precision: 32 accumulate_grad_batches: 8 - ipu_config: - - deviceIterations(10) # IPU would require large batches to be ready for the model. - - replicationFactor(16) - # - enableProfiling("graph_analyser") # The folder where the profile will be stored - # - enableExecutableCaching("pop_compiler_cache") - - TensorLocations.numIOTiles(128) - - _Popart.set("defaultBufferingDepth", 128) - - Precision.enableStochasticRounding(True) - -# accelerator: -# type: cpu # cpu or ipu or gpu -# config_override: -# datamodule: -# batch_size_training: 64 -# batch_size_inference: 256 -# trainer: -# trainer: -# precision: 32 -# accumulate_grad_batches: 1 - datamodule: module_type: "MultitaskFromSmilesDataModule" args: # Matches that in the test_multitask_datamodule.py case. diff --git a/expts/neurips2023_configs/single_task_gin/config_large_gin_vcap.yaml b/expts/neurips2023_configs/single_task_gin/config_large_gin_vcap.yaml index 539123dec..82c212b70 100644 --- a/expts/neurips2023_configs/single_task_gin/config_large_gin_vcap.yaml +++ b/expts/neurips2023_configs/single_task_gin/config_large_gin_vcap.yaml @@ -1,11 +1,11 @@ -# Testing the gine model with the PCQMv2 dataset on IPU. +# Testing the gine model with the PCQMv2 dataset. constants: name: &name neurips2023_large_data_gine_vcap seed: &seed 42 raise_train_error: true # Whether the code should raise an error if it crashes during training accelerator: - type: ipu # cpu or ipu or gpu + type: gpu # cpu or gpu config_override: datamodule: args: @@ -28,26 +28,6 @@ accelerator: precision: 32 accumulate_grad_batches: 8 - ipu_config: - - deviceIterations(1) # IPU would require large batches to be ready for the model. - - replicationFactor(16) - # - enableProfiling("graph_analyser") # The folder where the profile will be stored - # - enableExecutableCaching("pop_compiler_cache") - - TensorLocations.numIOTiles(128) - - _Popart.set("defaultBufferingDepth", 128) - - Precision.enableStochasticRounding(True) - -# accelerator: -# type: cpu # cpu or ipu or gpu -# config_override: -# datamodule: -# batch_size_training: 64 -# batch_size_inference: 256 -# trainer: -# trainer: -# precision: 32 -# accumulate_grad_batches: 1 - datamodule: module_type: "MultitaskFromSmilesDataModule" args: # Matches that in the test_multitask_datamodule.py case. diff --git a/expts/neurips2023_configs/single_task_gine/config_large_gine_g25.yaml b/expts/neurips2023_configs/single_task_gine/config_large_gine_g25.yaml index 17f78ea4b..3a657c70d 100644 --- a/expts/neurips2023_configs/single_task_gine/config_large_gine_g25.yaml +++ b/expts/neurips2023_configs/single_task_gine/config_large_gine_g25.yaml @@ -1,11 +1,11 @@ -# Testing the gine model with the PCQMv2 dataset on IPU. +# Testing the gine model with the PCQMv2 dataset. constants: name: &name neurips2023_large_data_gine_g25 seed: &seed 42 raise_train_error: true # Whether the code should raise an error if it crashes during training accelerator: - type: ipu # cpu or ipu or gpu + type: gpu # cpu or gpu config_override: datamodule: args: @@ -28,26 +28,6 @@ accelerator: precision: 32 accumulate_grad_batches: 8 - ipu_config: - - deviceIterations(10) # IPU would require large batches to be ready for the model. - - replicationFactor(16) - # - enableProfiling("graph_analyser") # The folder where the profile will be stored - # - enableExecutableCaching("pop_compiler_cache") - - TensorLocations.numIOTiles(128) - - _Popart.set("defaultBufferingDepth", 128) - - Precision.enableStochasticRounding(True) - -# accelerator: -# type: cpu # cpu or ipu or gpu -# config_override: -# datamodule: -# batch_size_training: 64 -# batch_size_inference: 256 -# trainer: -# trainer: -# precision: 32 -# accumulate_grad_batches: 1 - datamodule: module_type: "MultitaskFromSmilesDataModule" args: # Matches that in the test_multitask_datamodule.py case. diff --git a/expts/neurips2023_configs/single_task_gine/config_large_gine_mcf7.yaml b/expts/neurips2023_configs/single_task_gine/config_large_gine_mcf7.yaml index 83b79bf00..71b2c1d77 100644 --- a/expts/neurips2023_configs/single_task_gine/config_large_gine_mcf7.yaml +++ b/expts/neurips2023_configs/single_task_gine/config_large_gine_mcf7.yaml @@ -1,11 +1,11 @@ -# Testing the gine model with the PCQMv2 dataset on IPU. +# Testing the gine model with the PCQMv2 dataset. constants: name: &name neurips2023_large_data_gine_mcf7 seed: &seed 42 raise_train_error: true # Whether the code should raise an error if it crashes during training accelerator: - type: ipu # cpu or ipu or gpu + type: gpu # cpu or gpu config_override: datamodule: args: @@ -28,26 +28,6 @@ accelerator: precision: 32 accumulate_grad_batches: 8 - ipu_config: - - deviceIterations(1) # IPU would require large batches to be ready for the model. - - replicationFactor(16) - # - enableProfiling("graph_analyser") # The folder where the profile will be stored - # - enableExecutableCaching("pop_compiler_cache") - - TensorLocations.numIOTiles(128) - - _Popart.set("defaultBufferingDepth", 128) - - Precision.enableStochasticRounding(True) - -# accelerator: -# type: cpu # cpu or ipu or gpu -# config_override: -# datamodule: -# batch_size_training: 64 -# batch_size_inference: 256 -# trainer: -# trainer: -# precision: 32 -# accumulate_grad_batches: 1 - datamodule: module_type: "MultitaskFromSmilesDataModule" args: # Matches that in the test_multitask_datamodule.py case. diff --git a/expts/neurips2023_configs/single_task_gine/config_large_gine_n4.yaml b/expts/neurips2023_configs/single_task_gine/config_large_gine_n4.yaml index 7ccf4ac18..54d81b3f8 100644 --- a/expts/neurips2023_configs/single_task_gine/config_large_gine_n4.yaml +++ b/expts/neurips2023_configs/single_task_gine/config_large_gine_n4.yaml @@ -1,11 +1,11 @@ -# Testing the gine model with the PCQMv2 dataset on IPU. +# Testing the gine model with the PCQMv2 dataset. constants: name: &name neurips2023_large_data_gine_n4 seed: &seed 42 raise_train_error: true # Whether the code should raise an error if it crashes during training accelerator: - type: ipu # cpu or ipu or gpu + type: gpu # cpu or gpu config_override: datamodule: args: @@ -28,26 +28,6 @@ accelerator: precision: 32 accumulate_grad_batches: 8 - ipu_config: - - deviceIterations(10) # IPU would require large batches to be ready for the model. - - replicationFactor(16) - # - enableProfiling("graph_analyser") # The folder where the profile will be stored - # - enableExecutableCaching("pop_compiler_cache") - - TensorLocations.numIOTiles(128) - - _Popart.set("defaultBufferingDepth", 128) - - Precision.enableStochasticRounding(True) - -# accelerator: -# type: cpu # cpu or ipu or gpu -# config_override: -# datamodule: -# batch_size_training: 64 -# batch_size_inference: 256 -# trainer: -# trainer: -# precision: 32 -# accumulate_grad_batches: 1 - datamodule: module_type: "MultitaskFromSmilesDataModule" args: # Matches that in the test_multitask_datamodule.py case. diff --git a/expts/neurips2023_configs/single_task_gine/config_large_gine_pcba.yaml b/expts/neurips2023_configs/single_task_gine/config_large_gine_pcba.yaml index 9b64240ff..b02de4f7d 100644 --- a/expts/neurips2023_configs/single_task_gine/config_large_gine_pcba.yaml +++ b/expts/neurips2023_configs/single_task_gine/config_large_gine_pcba.yaml @@ -1,11 +1,11 @@ -# Testing the gine model with the PCQMv2 dataset on IPU. +# Testing the gine model with the PCQMv2 dataset. constants: name: &name neurips2023_large_data_gine_pcba seed: &seed 42 raise_train_error: true # Whether the code should raise an error if it crashes during training accelerator: - type: ipu # cpu or ipu or gpu + type: gpu # cpu or gpu config_override: datamodule: args: @@ -28,26 +28,6 @@ accelerator: precision: 32 accumulate_grad_batches: 8 - ipu_config: - - deviceIterations(10) # IPU would require large batches to be ready for the model. - - replicationFactor(16) - # - enableProfiling("graph_analyser") # The folder where the profile will be stored - # - enableExecutableCaching("pop_compiler_cache") - - TensorLocations.numIOTiles(128) - - _Popart.set("defaultBufferingDepth", 128) - - Precision.enableStochasticRounding(True) - -# accelerator: -# type: cpu # cpu or ipu or gpu -# config_override: -# datamodule: -# batch_size_training: 64 -# batch_size_inference: 256 -# trainer: -# trainer: -# precision: 32 -# accumulate_grad_batches: 1 - datamodule: module_type: "MultitaskFromSmilesDataModule" args: # Matches that in the test_multitask_datamodule.py case. diff --git a/expts/neurips2023_configs/single_task_gine/config_large_gine_pcq.yaml b/expts/neurips2023_configs/single_task_gine/config_large_gine_pcq.yaml index b4c7638a8..7c797d744 100644 --- a/expts/neurips2023_configs/single_task_gine/config_large_gine_pcq.yaml +++ b/expts/neurips2023_configs/single_task_gine/config_large_gine_pcq.yaml @@ -1,11 +1,11 @@ -# Testing the gine model with the PCQMv2 dataset on IPU. +# Testing the gine model with the PCQMv2 dataset. constants: name: &name neurips2023_large_data_gine_pcq seed: &seed 42 raise_train_error: true # Whether the code should raise an error if it crashes during training accelerator: - type: ipu # cpu or ipu or gpu + type: gpu # cpu or gpu config_override: datamodule: args: @@ -28,26 +28,6 @@ accelerator: precision: 32 accumulate_grad_batches: 8 - ipu_config: - - deviceIterations(10) # IPU would require large batches to be ready for the model. - - replicationFactor(16) - # - enableProfiling("graph_analyser") # The folder where the profile will be stored - # - enableExecutableCaching("pop_compiler_cache") - - TensorLocations.numIOTiles(128) - - _Popart.set("defaultBufferingDepth", 128) - - Precision.enableStochasticRounding(True) - -# accelerator: -# type: cpu # cpu or ipu or gpu -# config_override: -# datamodule: -# batch_size_training: 64 -# batch_size_inference: 256 -# trainer: -# trainer: -# precision: 32 -# accumulate_grad_batches: 1 - datamodule: module_type: "MultitaskFromSmilesDataModule" args: # Matches that in the test_multitask_datamodule.py case. diff --git a/expts/neurips2023_configs/single_task_gine/config_large_gine_vcap.yaml b/expts/neurips2023_configs/single_task_gine/config_large_gine_vcap.yaml index b535ab03a..12b0a56a8 100644 --- a/expts/neurips2023_configs/single_task_gine/config_large_gine_vcap.yaml +++ b/expts/neurips2023_configs/single_task_gine/config_large_gine_vcap.yaml @@ -1,11 +1,11 @@ -# Testing the gine model with the PCQMv2 dataset on IPU. +# Testing the gine model with the PCQMv2 dataset. constants: name: &name neurips2023_large_data_gine_vcap seed: &seed 42 raise_train_error: true # Whether the code should raise an error if it crashes during training accelerator: - type: ipu # cpu or ipu or gpu + type: gpu # cpu or gpu config_override: datamodule: args: @@ -28,26 +28,6 @@ accelerator: precision: 32 accumulate_grad_batches: 8 - ipu_config: - - deviceIterations(1) # IPU would require large batches to be ready for the model. - - replicationFactor(16) - # - enableProfiling("graph_analyser") # The folder where the profile will be stored - # - enableExecutableCaching("pop_compiler_cache") - - TensorLocations.numIOTiles(128) - - _Popart.set("defaultBufferingDepth", 128) - - Precision.enableStochasticRounding(True) - -# accelerator: -# type: cpu # cpu or ipu or gpu -# config_override: -# datamodule: -# batch_size_training: 64 -# batch_size_inference: 256 -# trainer: -# trainer: -# precision: 32 -# accumulate_grad_batches: 1 - datamodule: module_type: "MultitaskFromSmilesDataModule" args: # Matches that in the test_multitask_datamodule.py case. diff --git a/graphium/config/_loader.py b/graphium/config/_loader.py index 9596e2eac..1b06d3883 100644 --- a/graphium/config/_loader.py +++ b/graphium/config/_loader.py @@ -33,8 +33,6 @@ from graphium.data.datamodule import BaseDataModule, MultitaskFromSmilesDataModule from graphium.finetuning.finetuning_architecture import FullGraphFinetuningNetwork -from graphium.ipu.ipu_dataloader import IPUDataloaderOptions -from graphium.ipu.ipu_utils import import_poptorch, load_ipu_options from graphium.nn.architectures import FullGraphMultiTaskNetwork from graphium.nn.utils import MupMixin from graphium.trainer.metrics import MetricWrapper @@ -62,51 +60,22 @@ def get_accelerator( if (accelerator_type == "gpu") and (not torch.cuda.is_available()): raise ValueError(f"GPUs selected, but GPUs are not available on this device") - # Get the IPU info - if accelerator_type == "ipu": - poptorch = import_poptorch() - if poptorch is None: - raise ValueError("IPUs selected, but PopTorch is not available") - if not poptorch.ipuHardwareIsAvailable(): - raise ValueError( - "IPUs selected, but no IPU is available/visible on this device. " - "If you do have IPUs, please check that the IPUOF_VIPU_API_PARTITION_ID and " - "IPUOF_VIPU_API_HOST environment variables are set." - ) - # Fall on cpu at the end if accelerator_type is None: accelerator_type = "cpu" return accelerator_type -def _get_ipu_opts(config: Union[omegaconf.DictConfig, Dict[str, Any]]) -> Tuple[str, str]: - r""" - Get the paths of the IPU-specific config files from the main YAML config - """ - - accelerator_options = config["accelerator"] - accelerator_type = accelerator_options["type"] - - if accelerator_type != "ipu": - return None, None - ipu_opts = accelerator_options["ipu_config"] - ipu_inference_opts = accelerator_options.get("ipu_inference_config", None) - - return ipu_opts, ipu_inference_opts - - def load_datamodule( config: Union[omegaconf.DictConfig, Dict[str, Any]], accelerator_type: str ) -> BaseDataModule: """ Load the datamodule from the specified configurations at the key `datamodule: args`. - If the accelerator is IPU, load the IPU options as well. Parameters: config: The config file, with key `datamodule: args` - accelerator_type: The accelerator type, e.g. "cpu", "gpu", "ipu" + accelerator_type: The accelerator type, e.g. "cpu", "gpu" Returns: datamodule: The datamodule used to process and load the data """ @@ -118,53 +87,11 @@ def load_datamodule( # Instanciate the datamodule module_class = DATAMODULE_DICT[config["datamodule"]["module_type"]] - if accelerator_type != "ipu": - datamodule = module_class( - **config["datamodule"]["args"], - ) - return datamodule - - # IPU specific adjustments - else: - ipu_opts, ipu_inference_opts = _get_ipu_opts(config) - - # Default empty values for the IPU configurations - ipu_training_opts = None - - ipu_dataloader_training_opts = cfg_data.pop("ipu_dataloader_training_opts", {}) - ipu_dataloader_inference_opts = cfg_data.pop("ipu_dataloader_inference_opts", {}) - ipu_training_opts, ipu_inference_opts = load_ipu_options( - ipu_opts=ipu_opts, - seed=config["constants"]["seed"], - model_name=config["constants"]["name"], - gradient_accumulation=config["trainer"]["trainer"].get("accumulate_grad_batches", None), - ipu_inference_opts=ipu_inference_opts, - precision=config["trainer"]["trainer"].get("precision"), - ) - - # Define the Dataloader options for the IPU on the training sets - bz_train = cfg_data["batch_size_training"] - ipu_dataloader_training_opts = IPUDataloaderOptions( - batch_size=bz_train, **ipu_dataloader_training_opts - ) - ipu_dataloader_training_opts.set_kwargs() - - # Define the Dataloader options for the IPU on the inference sets - bz_test = cfg_data["batch_size_inference"] - ipu_dataloader_inference_opts = IPUDataloaderOptions( - batch_size=bz_test, **ipu_dataloader_inference_opts - ) - ipu_dataloader_inference_opts.set_kwargs() - - datamodule = module_class( - ipu_training_opts=ipu_training_opts, - ipu_inference_opts=ipu_inference_opts, - ipu_dataloader_training_opts=ipu_dataloader_training_opts, - ipu_dataloader_inference_opts=ipu_dataloader_inference_opts, - **config["datamodule"]["args"], - ) + datamodule = module_class( + **config["datamodule"]["args"], + ) + return datamodule - return datamodule def load_metrics(config: Union[omegaconf.DictConfig, Dict[str, Any]]) -> Dict[str, MetricWrapper]: @@ -305,17 +232,12 @@ def load_predictor( Defining the predictor module, which handles the training logic from `lightning.LighningModule` Parameters: model_class: The torch Module containing the main forward function - accelerator_type: The accelerator type, e.g. "cpu", "gpu", "ipu" + accelerator_type: The accelerator type, e.g. "cpu", "gpu" Returns: predictor: The predictor module """ - if accelerator_type == "ipu": - from graphium.ipu.ipu_wrapper import PredictorModuleIPU - - predictor_class = PredictorModuleIPU - else: - predictor_class = PredictorModule + predictor_class = PredictorModule cfg_pred = dict(deepcopy(config["predictor"])) predictor = predictor_class( @@ -383,42 +305,15 @@ def load_trainer( Defining the pytorch-lightning Trainer module. Parameters: config: The config file, with key `trainer` - accelerator_type: The accelerator type, e.g. "cpu", "gpu", "ipu" + accelerator_type: The accelerator type, e.g. "cpu", "gpu" date_time_suffix: The date and time of the current run. To be used for logging. Returns: trainer: the trainer module """ cfg_trainer = deepcopy(config["trainer"]) - # Define the IPU plugin if required - strategy = cfg_trainer["trainer"].pop("strategy", "auto") - if accelerator_type == "ipu": - ipu_opts, ipu_inference_opts = _get_ipu_opts(config) - - training_opts, inference_opts = load_ipu_options( - ipu_opts=ipu_opts, - ipu_inference_opts=ipu_inference_opts, - seed=config["constants"]["seed"], - model_name=config["constants"]["name"], - gradient_accumulation=config["trainer"]["trainer"].get("accumulate_grad_batches", None), - precision=config["trainer"]["trainer"].get("precision"), - ) - - if strategy != "auto": - raise ValueError("IPUs selected, but strategy is not set to 'auto'") - - from lightning_graphcore import IPUStrategy - - strategy = IPUStrategy(training_opts=training_opts, inference_opts=inference_opts) - # Get devices devices = cfg_trainer["trainer"].pop("devices", 1) - if accelerator_type == "ipu": - devices = 1 # number of IPUs used is defined in the ipu options files - - # Remove the gradient accumulation from IPUs, since it's handled by the device - if accelerator_type == "ipu": - cfg_trainer["trainer"].pop("accumulate_grad_batches", None) # Define the early stopping parameters trainer_kwargs = {} diff --git a/graphium/data/datamodule.py b/graphium/data/datamodule.py index edc5e207d..86d4297b4 100644 --- a/graphium/data/datamodule.py +++ b/graphium/data/datamodule.py @@ -492,12 +492,12 @@ def get_dataloader_kwargs(self, stage: RunningStage, shuffle: bool, **kwargs) -> """ loader_kwargs = {} - # Get batch size and IPU options for training set + # Get batch size for training set # if stage in [RunningStage.TRAINING, RunningStage.TUNING]: if stage in [RunningStage.TRAINING]: loader_kwargs["batch_size"] = self.batch_size_training - # Get batch size and IPU options for validation / testing sets + # Get batch size for validation / testing sets elif stage in [RunningStage.VALIDATING, RunningStage.TESTING, RunningStage.PREDICTING]: loader_kwargs["batch_size"] = self.batch_size_inference else: @@ -723,64 +723,7 @@ def __init__( self.epoch_sampling_fraction = epoch_sampling_fraction -class IPUDataModuleModifier: - def __init__( - self, - ipu_inference_opts: Optional["poptorch.Options"] = None, - ipu_training_opts: Optional["poptorch.Options"] = None, - ipu_dataloader_training_opts: Optional["IPUDataloaderOptions"] = None, - ipu_dataloader_inference_opts: Optional["IPUDataloaderOptions"] = None, - *args, - **kwargs, - ) -> None: - r""" - wrapper functions from the a `DataModule` to support IPU and IPU options To be used in dual inheritance, for example: - ``` - IPUDataModule(BaseDataModule, IPUDataModuleModifier): - def __init__(self, **kwargs): - BaseDataModule.__init__(self, **kwargs) - IPUDataModuleModifier.__init__(self, **kwargs) - ``` - - Parameters: - ipu_inference_opts: Options for the IPU in inference mode. Ignore if not using IPUs - ipu_training_opts: Options for the IPU in training mode. Ignore if not using IPUs - ipu_dataloader_kwargs_train_val: Options for the dataloader for the IPU. Ignore if not using IPUs - ipu_dataloader_kwargs_test: Options for the dataloader for the IPU. Ignore if not using IPUs - args: Arguments for the `DataModule` - kwargs: Keyword arguments for the `DataModule` - """ - self.ipu_inference_opts = ipu_inference_opts - self.ipu_training_opts = ipu_training_opts - self.ipu_dataloader_training_opts = ipu_dataloader_training_opts - self.ipu_dataloader_inference_opts = ipu_dataloader_inference_opts - - def _dataloader(self, dataset: Dataset, **kwargs) -> "poptorch.DataLoader": - """ - Get a poptorch dataloader for a given dataset - Parameters: - dataset: The dataset to use - kwargs: Keyword arguments for the dataloader - Returns: - The poptorch dataloader - """ - - # Use regular Dataloader if no IPUs - if ("ipu_options" not in kwargs.keys()) or (kwargs["ipu_options"] is None): - raise ValueError(f"No IPU options provided.") - - # Initialize the IPU dataloader - from graphium.ipu.ipu_dataloader import create_ipu_dataloader - - loader = create_ipu_dataloader( - dataset=dataset, - **kwargs, - ) - - return loader - - -class MultitaskFromSmilesDataModule(BaseDataModule, IPUDataModuleModifier): +class MultitaskFromSmilesDataModule(BaseDataModule): def __init__( self, task_specific_args: Union[Dict[str, DatasetProcessingParams], Dict[str, Any]], @@ -843,7 +786,6 @@ def __init__( multiprocessing_context=multiprocessing_context, collate_fn=collate_fn, ) - IPUDataModuleModifier.__init__(self, **kwargs) self._len = None self.task_specific_args = task_specific_args @@ -1304,41 +1246,6 @@ def get_folder_size(self, path): # check if the data items are actually saved into the folders return sum(os.path.getsize(osp.join(path, f)) for f in os.listdir(path)) - def get_dataloader_kwargs(self, stage: RunningStage, shuffle: bool, **kwargs) -> Dict[str, Any]: - """ - Get the options for the dataloader depending on the current stage. - - Parameters: - stage: Whether in Training, Validating, Testing, Sanity-checking, Predicting, or Tuning phase. - shuffle: set to ``True`` to have the data reshuffled at every epoch. - - Returns: - Arguments to pass to the `DataLoader` during initialization - """ - loader_kwargs = super().get_dataloader_kwargs(stage=stage, shuffle=shuffle, **kwargs) - - # Get batch size and IPU options for training set - # if stage in [RunningStage.TRAINING, RunningStage.TUNING]: - if stage in [RunningStage.TRAINING]: - loader_kwargs["ipu_dataloader_options"] = self.ipu_dataloader_training_opts - loader_kwargs["ipu_options"] = self.ipu_training_opts - - # Get batch size and IPU options for validation / testing sets - elif stage in [RunningStage.VALIDATING, RunningStage.TESTING, RunningStage.PREDICTING]: - loader_kwargs["ipu_dataloader_options"] = self.ipu_dataloader_inference_opts - loader_kwargs["ipu_options"] = self.ipu_inference_opts - else: - raise ValueError(f"Wrong value for `stage`. Provided `{stage}`") - - # Remove the IPU options if not available - if loader_kwargs["ipu_options"] is None: - loader_kwargs.pop("ipu_options") - if loader_kwargs["ipu_dataloader_options"] is not None: - logger.warning( - "`ipu_dataloader_options` will be ignored since it is provided without `ipu_options`." - ) - loader_kwargs.pop("ipu_dataloader_options") - return loader_kwargs def get_dataloader( self, dataset: Dataset, shuffle: bool, stage: RunningStage @@ -1365,11 +1272,8 @@ def get_dataloader( ) # turn shuffle off when sampler is used as sampler option is mutually exclusive with shuffle kwargs["shuffle"] = False - is_ipu = ("ipu_options" in kwargs.keys()) and (kwargs.get("ipu_options") is not None) - if is_ipu: - loader = IPUDataModuleModifier._dataloader(self, dataset=dataset, sampler=sampler, **kwargs) - else: - loader = BaseDataModule._dataloader(self, dataset=dataset, sampler=sampler, **kwargs) + + loader = BaseDataModule._dataloader(self, dataset=dataset, sampler=sampler, **kwargs) return loader diff --git a/graphium/data/normalization.py b/graphium/data/normalization.py index e57a9bcc8..d2e8444b0 100644 --- a/graphium/data/normalization.py +++ b/graphium/data/normalization.py @@ -112,13 +112,11 @@ def denormalize(self, input): return input elif self.method == "normal": mean, std = torch.tensor(self.data_mean), torch.tensor(self.data_std) - if input.device.type != "ipu": # Cast to device if not on IPU - mean, std = mean.to(input.device), std.to(input.device) + mean, std = mean.to(input.device), std.to(input.device) return (input * std) + mean elif self.method == "unit": dmax, dmin = torch.tensor(self.data_max), torch.tensor(self.data_min) - if input.device.type != "ipu": # Cast to device if not on IPU - dmax, dmin = dmax.to(input.device), dmin.to(input.device) + dmax, dmin = dmax.to(input.device), dmin.to(input.device) return input * (dmax - dmin) + dmin else: raise ValueError(f"normalization method {self.method} not recognised.") diff --git a/graphium/nn/architectures/global_architectures.py b/graphium/nn/architectures/global_architectures.py index 52bd132c0..cea3dc16a 100644 --- a/graphium/nn/architectures/global_architectures.py +++ b/graphium/nn/architectures/global_architectures.py @@ -14,7 +14,7 @@ from typing import Iterable, List, Dict, Literal, Tuple, Union, Callable, Any, Optional, Type from torch_geometric.data import Batch -from graphium.ipu.to_dense_batch import to_dense_batch +from torch_geometric.utils import to_dense_batch from loguru import logger # Misc imports @@ -40,9 +40,6 @@ ResidualConnectionRandom, ) from graphium.nn.utils import MupMixin - -poptorch = import_poptorch(raise_error=False) - import collections @@ -2004,8 +2001,7 @@ def compute_nodepairs( batch=batch, fill_value=fill_value, batch_size=batch_size, - max_num_nodes_per_graph=max_num_nodes, - drop_nodes_last_graph=drop_nodes_last_graph, + max_num_nodes=max_num_nodes, ) n = dense_feat.size(1) h_X = dense_feat[:, :, None].repeat(1, 1, n, 1) diff --git a/graphium/nn/base_graph_layer.py b/graphium/nn/base_graph_layer.py index 66869f888..0986cb9c6 100644 --- a/graphium/nn/base_graph_layer.py +++ b/graphium/nn/base_graph_layer.py @@ -258,14 +258,14 @@ def out_dim_factor(self) -> int: @property def max_num_nodes_per_graph(self) -> Optional[int]: """ - Get the maximum number of nodes per graph. Useful for reshaping a compiled model (IPU) + Get the maximum number of nodes per graph. Useful for reshaping a compiled model """ return self._max_num_nodes_per_graph @max_num_nodes_per_graph.setter def max_num_nodes_per_graph(self, value: Optional[int]): """ - Set the maximum number of nodes per graph. Useful for reshaping a compiled model (IPU) + Set the maximum number of nodes per graph. Useful for reshaping a compiled model """ if value is not None: assert isinstance(value, int) and ( @@ -276,14 +276,14 @@ def max_num_nodes_per_graph(self, value: Optional[int]): @property def max_num_edges_per_graph(self) -> Optional[int]: """ - Get the maximum number of nodes per graph. Useful for reshaping a compiled model (IPU) + Get the maximum number of nodes per graph. Useful for reshaping a compiled model """ return self._max_num_edges_per_graph @max_num_edges_per_graph.setter def max_num_edges_per_graph(self, value: Optional[int]): """ - Set the maximum number of nodes per graph. Useful for reshaping a compiled model (IPU) + Set the maximum number of nodes per graph. Useful for reshaping a compiled model """ if value is not None: assert isinstance(value, int) and ( diff --git a/graphium/nn/pyg_layers/gps_pyg.py b/graphium/nn/pyg_layers/gps_pyg.py index 3d8671c53..8dcb5b31b 100644 --- a/graphium/nn/pyg_layers/gps_pyg.py +++ b/graphium/nn/pyg_layers/gps_pyg.py @@ -18,6 +18,7 @@ from torch.nn import Module from torch import Tensor from torch_geometric.data import Batch +from torch_geometric.utils import to_dense_batch from graphium.nn.base_graph_layer import BaseGraphModule from graphium.nn.base_layers import FCLayer, MultiheadAttentionMup, MLP from graphium.nn.pyg_layers import ( @@ -29,13 +30,6 @@ ) from graphium.data.utils import get_keys from graphium.utils.decorators import classproperty -from graphium.ipu.to_dense_batch import ( - to_dense_batch, - to_sparse_batch, - to_packed_dense_batch, - to_sparse_batch_from_packed, -) -from graphium.ipu.ipu_utils import is_running_on_ipu PYG_LAYERS_DICT = { "pyg:gin": GINConvPyg, @@ -286,7 +280,7 @@ def forward(self, batch: Batch) -> Batch: # MLP block, with skip connection feat_mlp = self.mlp(feat) # Add the droppath to the output of the MLP - batch_size = None if feat.device.type != "ipu" else batch.graph_is_true.shape[0] + batch_size = None if self.droppath_ffn is not None: feat_mlp = self.droppath_ffn(feat_mlp, batch.batch, batch_size) feat = feat + feat_mlp @@ -376,49 +370,27 @@ def _to_dense_batch( batch: Batch, batch_size: Optional[int] = None, max_num_nodes_per_graph: Optional[int] = None, - on_ipu: bool = False, ) -> Tensor: """ Convert the batch of graphs to a dense batch. """ - if self._use_packing(batch): - attn_mask = batch.pack_attn_mask - key_padding_mask = None - idx = batch.pack_from_node_idx - h_dense = to_packed_dense_batch( - h, - pack_from_node_idx=idx, - pack_attn_mask=attn_mask, - max_num_nodes_per_pack=100, # TODO: This should be a parameter - ) - else: - attn_mask = None - h_dense, key_padding_mask, idx = to_dense_batch( - h, - batch=batch.batch, # The batch index as a vector that indicates for nodes of which graph it belongs to - batch_size=batch_size, - max_num_nodes_per_graph=max_num_nodes_per_graph, - drop_nodes_last_graph=on_ipu, - ) - key_padding_mask = ~key_padding_mask - return h_dense, attn_mask, key_padding_mask, idx - - def _to_sparse_batch(self, batch: Batch, h_dense: Tensor, idx: Tensor) -> Tensor: + attn_mask = None + h_dense, key_padding_mask = to_dense_batch( + h, + batch=batch.batch, # The batch index as a vector that indicates for nodes of which graph it belongs to + batch_size=batch_size, + max_num_nodes_per_graph=max_num_nodes_per_graph, + ) + key_padding_mask = ~key_padding_mask + return h_dense, attn_mask, key_padding_mask + + def _to_sparse_batch(self, batch: Batch, h_dense: Tensor, mask: torch.BoolTensor) -> Tensor: """ Convert the dense batch back to a sparse batch. """ - if self._use_packing(batch): - h = to_sparse_batch_from_packed( - h_dense, - pack_from_node_idx=idx, - ) - else: - h = to_sparse_batch( - h_dense, - mask_idx=idx, - ) - return h + + return h_dense[mask] def _self_attention_block(self, feat: Tensor, feat_in: Tensor, batch: Batch) -> Tensor: """ @@ -429,21 +401,17 @@ def _self_attention_block(self, feat: Tensor, feat_in: Tensor, batch: Batch) -> """ # Multi-head attention. - on_ipu = is_running_on_ipu() max_num_nodes_per_graph = None - if on_ipu: - max_num_nodes_per_graph = self.max_num_nodes_per_graph # Convert the tensor to a dense batch, then back to a sparse batch - batch_size = None if feat.device.type != "ipu" else batch.graph_is_true.shape[0] + batch_size = None # h[num_nodes, hidden_dim] -> h_dense[num_graphs, max_num_nodes, hidden_dim] - feat_dense, attn_mask, key_padding_mask, idx = self._to_dense_batch( + feat_dense, attn_mask, key_padding_mask = self._to_dense_batch( feat, batch=batch, # The batch index as a vector that indicates for nodes of which graph it belongs to batch_size=batch_size, max_num_nodes_per_graph=max_num_nodes_per_graph, - on_ipu=on_ipu, ) attn_bias = None @@ -456,7 +424,7 @@ def _self_attention_block(self, feat: Tensor, feat_in: Tensor, batch: Batch) -> ) # feat_attn[num_graphs, max_num_nodes, hidden_dim] -> feat_attn[num_nodes, hidden_dim] - feat_attn = self._to_sparse_batch(batch, feat_attn, idx) + feat_attn = self._to_sparse_batch(batch, feat_attn, attn_mask) # Dropout, residual, norm if self.dropout_attn is not None: diff --git a/graphium/nn/pyg_layers/utils.py b/graphium/nn/pyg_layers/utils.py index 886788940..331caa975 100644 --- a/graphium/nn/pyg_layers/utils.py +++ b/graphium/nn/pyg_layers/utils.py @@ -16,13 +16,13 @@ import torch import torch.nn as nn from torch_geometric.data import Batch +from torch_geometric.utils import to_dense_batch from typing import Tuple from torch import Tensor from torch_geometric.typing import SparseTensor from graphium.nn.base_layers import MLP, get_norm -from graphium.ipu.to_dense_batch import to_dense_batch, to_sparse_batch class PreprocessPositions(nn.Module): @@ -94,12 +94,11 @@ def forward( # pos: [batch, nodes, 3] # padding_mask: [batch, nodes] # idx: [totoal_nodes] - pos, mask, idx = to_dense_batch( + pos, mask = to_dense_batch( pos, batch=batch.batch, batch_size=batch_size, max_num_nodes_per_graph=max_num_nodes_per_graph, - drop_nodes_last_graph=False, ) # check nan with the pos from to_dense_batch, # and generate mask. 1 for nan, 0 for other values. diff --git a/tests/test_attention.py b/tests/test_attention.py index 28b9cd2a1..d1a8a19e2 100644 --- a/tests/test_attention.py +++ b/tests/test_attention.py @@ -21,8 +21,8 @@ import torch import unittest as ut from torch_geometric.data import Data, Batch +from torch_geometric.utils import to_dense_batch from copy import deepcopy -from graphium.ipu.to_dense_batch import to_dense_batch from graphium.nn.base_layers import MultiheadAttentionMup @@ -65,7 +65,7 @@ def test_attention_class(self): attention_layer_bias = MultiheadAttentionMup(biased_attention=True, **self.attn_kwargs) attention_layer_bias.eval() - h_dense, mask, _ = to_dense_batch( + h_dense, mask = to_dense_batch( bg.feat, batch=bg.batch, batch_size=None, diff --git a/tests/test_base_layers.py b/tests/test_base_layers.py index 2093619f2..d3d04cc39 100644 --- a/tests/test_base_layers.py +++ b/tests/test_base_layers.py @@ -19,10 +19,10 @@ import torch import unittest as ut from torch_geometric.data import Data, Batch +from torch_geometric.utils import to_dense_batch from copy import deepcopy from graphium.nn.base_layers import DropPath, TransformerEncoderLayerMup -from graphium.ipu.to_dense_batch import to_dense_batch, to_sparse_batch class test_Base_Layers(ut.TestCase): @@ -78,7 +78,7 @@ def test_transformer_encoder_layer_mup(self): biased_attention=False, d_model=self.in_dim, nhead=1, dim_feedforward=4 * self.in_dim ) - feat_dense, key_padding_mask, idx = to_dense_batch( + feat_dense, key_padding_mask = to_dense_batch( feat_in, batch=bg.batch, batch_size=self.batch_size, @@ -90,6 +90,6 @@ def test_transformer_encoder_layer_mup(self): h_out_dense = layer.forward(feat_dense) - h_out = to_sparse_batch(h_out_dense, mask_idx=idx) + h_out = h_out_dense[key_padding_mask] self.assertEqual(h_out.shape, feat_in.shape) diff --git a/tests/test_ipu_to_dense_batch.py b/tests/test_ipu_to_dense_batch.py deleted file mode 100644 index 157b3b647..000000000 --- a/tests/test_ipu_to_dense_batch.py +++ /dev/null @@ -1,133 +0,0 @@ -""" --------------------------------------------------------------------------------- -Copyright (c) 2023 Valence Labs, Recursion Pharmaceuticals and Graphcore Limited. - -Use of this software is subject to the terms and conditions outlined in the LICENSE file. -Unauthorized modification, distribution, or use is prohibited. Provided 'as is' without -warranties of any kind. - -Valence Labs, Recursion Pharmaceuticals and Graphcore Limited are not liable for any damages arising from its use. -Refer to the LICENSE file for the full terms and conditions. --------------------------------------------------------------------------------- -""" - - -import pytest -import torch -from torch_geometric.data import Data, Batch -from graphium.ipu.to_dense_batch import to_dense_batch - -import torch - -# Current library imports -from graphium.config._loader import load_datamodule, load_metrics, load_architecture, load_accelerator - - -@pytest.mark.ipu -class test_IPUBatch: - @pytest.fixture(autouse=True) - def setup_class(self): - self.in_dim = 12 - self.out_dim = 12 - self.in_dim_edges = 10 - self.out_dim_edges = 10 - self.edge_idx1 = torch.stack( - [torch.tensor([0, 1, 2, 3, 2], dtype=torch.int), torch.tensor([1, 2, 3, 0, 0], dtype=torch.int)] - ) - self.edge_idx2 = torch.stack( - [torch.tensor([0, 0, 0, 1], dtype=torch.int), torch.tensor([0, 1, 2, 0], dtype=torch.int)] - ) - self.x1 = torch.randn(self.edge_idx1.max().item() + 1, self.in_dim, dtype=torch.float32) - self.e1 = torch.randn(self.edge_idx1.shape[-1], self.in_dim_edges, dtype=torch.float32) - self.x2 = torch.randn(self.edge_idx2.max().item() + 1, self.in_dim, dtype=torch.float32) - self.e2 = torch.randn(self.edge_idx2.shape[-1], self.in_dim_edges, dtype=torch.float32) - self.g1 = Data(feat=self.x1, edge_index=self.edge_idx1, edge_feat=self.e1) - self.g2 = Data(feat=self.x2, edge_index=self.edge_idx2, edge_feat=self.e2) - self.bg = Batch.from_data_list([self.g1, self.g2]) - self.attn_kwargs = {"embed_dim": self.in_dim, "num_heads": 2, "batch_first": True} - - # @pytest.mark.skip - @pytest.mark.parametrize("max_num_nodes_per_graph, batch_size", [(10, 5), (20, 10), (30, 15)]) - def test_ipu_to_dense_batch(self, max_num_nodes_per_graph, batch_size): - # Run this test only if poptorch is available - try: - import poptorch - - opts = poptorch.Options() - opts.useIpuModel(True) - - class MyModel(torch.nn.Module): - def __init__(self): - super(MyModel, self).__init__() - - def forward(self, x, batch): - return to_dense_batch( - x, - batch=batch, - batch_size=batch_size, - max_num_nodes_per_graph=max_num_nodes_per_graph, - drop_nodes_last_graph=False, - ) - - model = MyModel() - model = model.eval() - poptorch_model_inf = poptorch.inferenceModel(model, options=opts) - # for data in train_dataloader: - out, mask, idx = poptorch_model_inf(self.bg.feat, self.bg.batch) - # Check the output sizes - assert out.size() == torch.Size([batch_size, max_num_nodes_per_graph, 12]) - # Check the mask for true / false values - assert mask.size() == torch.Size([batch_size, max_num_nodes_per_graph]) - assert torch.sum(mask) == 7 - assert (mask[0][:4] == True).all() - assert (mask[0][4:] == False).all() - assert (mask[1][:3] == True).all() - assert (mask[1][3:] == False).all() - assert (mask[2:] == False).all() - - # Check the idx are all the true values in the mask - assert (mask.flatten()[idx] == True).all() - poptorch_model_inf.detachFromDevice() - except ImportError: - pytest.skip("Skipping this test because poptorch is not available") - - def test_ipu_to_dense_batch_no_batch_no_max_nodes(self): - h_dense, mask = to_dense_batch( - self.bg.feat, - batch=None, - batch_size=None, - max_num_nodes_per_graph=None, - drop_nodes_last_graph=False, - ) - # Add assertions to check the output as needed - assert torch.allclose(h_dense, self.bg.feat.unsqueeze(0), atol=1e-5), "Tensors are not equal" - assert mask.size(1) == h_dense.size(1) - assert mask.all().item(), "Not all values in the tensor are True" - - def test_ipu_to_dense_batch_no_batch(self): - max_nodes_per_graph = 10 - h_dense, mask, id = to_dense_batch( - self.bg.feat, - batch=None, - batch_size=None, - max_num_nodes_per_graph=max_nodes_per_graph, - drop_nodes_last_graph=False, - ) - assert mask.size() == (1, max_nodes_per_graph) - assert torch.sum(mask) == 7 - assert torch.equal(id, torch.arange(7)) - assert h_dense.size() == (1, max_nodes_per_graph, self.bg.feat.size(-1)) - - def test_ipu_to_dense_batch_drop_last(self): - out, mask, idx = to_dense_batch( - self.bg.feat, - batch=None, - batch_size=None, - max_num_nodes_per_graph=3, - drop_nodes_last_graph=True, - ) - # Add assertions to check the output as needed - assert mask.size(1) == out.size(1) - # Check the mask and output have been clipped - assert mask.size() == torch.Size([1, 3]) - assert mask.all().item(), "Not all values in the tensor are True" diff --git a/tests/test_training.py b/tests/test_training.py index 789bbca3a..5457fbd7f 100644 --- a/tests/test_training.py +++ b/tests/test_training.py @@ -92,9 +92,7 @@ def call_cli_with_overrides(self, acc_type: str, acc_prec: str) -> None: "trainer.trainer.check_val_every_n_epoch=1", f"trainer.trainer.precision={acc_prec}", ] - if acc_type == "ipu": - overrides.append("accelerator.ipu_config=['useIpuModel(True)']") - overrides.append("accelerator.ipu_inference_config=['useIpuModel(True)']") + # Backup the original sys.argv original_argv = sys.argv.copy() From 6ed6bb806604cb40a3d92f26fdff1d7e4889b319 Mon Sep 17 00:00:00 2001 From: DomInvivo Date: Fri, 6 Sep 2024 22:34:09 -0400 Subject: [PATCH 131/175] More removal of ipus --- expts/configs/config_gps_10M_pcqm4m.yaml | 9 +---- expts/configs/config_gps_10M_pcqm4m_mod.yaml | 12 ------- expts/configs/config_mpnn_10M_b3lyp.yaml | 9 +---- .../training/accelerator/largemix_ipu.yaml | 8 ----- .../training/accelerator/pcqm4m_ipu.yaml | 8 ----- .../training/accelerator/toymix_ipu.yaml | 8 ----- .../base_config/large.yaml | 9 +---- .../base_config/large_pcba.yaml | 9 +---- .../base_config/large_pcqm_g25.yaml | 9 +---- .../base_config/large_pcqm_n4.yaml | 9 +---- .../base_config/small.yaml | 9 +---- .../baseline/config_small_gcn_baseline.yaml | 8 ----- .../config_large_gcn_mcf7.yaml | 8 ----- .../config_large_gcn_pcba.yaml | 8 ----- .../config_large_gcn_vcap.yaml | 8 ----- .../single_task_gin/config_large_gin_g25.yaml | 8 ----- .../config_large_gin_mcf7.yaml | 8 ----- .../single_task_gin/config_large_gin_n4.yaml | 8 ----- .../config_large_gin_pcba.yaml | 9 ----- .../single_task_gin/config_large_gin_pcq.yaml | 8 ----- .../config_large_gin_vcap.yaml | 8 ----- .../config_large_gine_g25.yaml | 8 ----- .../config_large_gine_mcf7.yaml | 8 ----- .../config_large_gine_n4.yaml | 8 ----- .../config_large_gine_pcba.yaml | 8 ----- .../config_large_gine_pcq.yaml | 8 ----- .../config_large_gine_vcap.yaml | 8 ----- graphium/cli/train_finetune_test.py | 35 ------------------- 28 files changed, 7 insertions(+), 256 deletions(-) diff --git a/expts/configs/config_gps_10M_pcqm4m.yaml b/expts/configs/config_gps_10M_pcqm4m.yaml index f91553779..8fc38812e 100644 --- a/expts/configs/config_gps_10M_pcqm4m.yaml +++ b/expts/configs/config_gps_10M_pcqm4m.yaml @@ -9,14 +9,7 @@ accelerator: config_override: datamodule: args: - ipu_dataloader_training_opts: - mode: async - max_num_nodes_per_graph: 20 # train max nodes: 20, max_edges: 54 - max_num_edges_per_graph: 60 - ipu_dataloader_inference_opts: - mode: async - max_num_nodes_per_graph: 16 # valid max nodes: 51, max_edges: 118 - max_num_edges_per_graph: 120 + # Data handling-related batch_size_training: 64 batch_size_inference: 16 diff --git a/expts/configs/config_gps_10M_pcqm4m_mod.yaml b/expts/configs/config_gps_10M_pcqm4m_mod.yaml index 4f885cf8d..1587d2d06 100644 --- a/expts/configs/config_gps_10M_pcqm4m_mod.yaml +++ b/expts/configs/config_gps_10M_pcqm4m_mod.yaml @@ -80,18 +80,6 @@ datamodule: persistent_workers: False # if use persistent worker at the start of each epoch. # Using persistent_workers false might make the start of each epoch very long. - # ipu_dataloader_training_opts: - # mode: async - # max_num_nodes_per_graph: 20 # train max nodes: 20, max_edges: 54 - # max_num_edges_per_graph: 60 - - # ipu_dataloader_inference_opts: - # mode: async - # max_num_nodes_per_graph: 20 # valid max nodes: 51, max_edges: 118 - # max_num_edges_per_graph: 120 - # # test-dev max nodes: 50, max_edges: 116 - # # test-challenge max nodes: 51, max_edges: 106 - architecture: model_type: FullGraphMultiTaskNetwork mup_base_path: null diff --git a/expts/configs/config_mpnn_10M_b3lyp.yaml b/expts/configs/config_mpnn_10M_b3lyp.yaml index 69113960d..8403a1ba1 100644 --- a/expts/configs/config_mpnn_10M_b3lyp.yaml +++ b/expts/configs/config_mpnn_10M_b3lyp.yaml @@ -9,14 +9,7 @@ accelerator: config_override: datamodule: args: - ipu_dataloader_training_opts: - mode: async - max_num_nodes_per_graph: 20 # train max nodes: 20, max_edges: 54 - max_num_edges_per_graph: 60 - ipu_dataloader_inference_opts: - mode: async - max_num_nodes_per_graph: 16 # valid max nodes: 51, max_edges: 118 - max_num_edges_per_graph: 120 + # Data handling-related batch_size_training: 64 batch_size_inference: 16 diff --git a/expts/hydra-configs/training/accelerator/largemix_ipu.yaml b/expts/hydra-configs/training/accelerator/largemix_ipu.yaml index 090600e98..115cd9e53 100644 --- a/expts/hydra-configs/training/accelerator/largemix_ipu.yaml +++ b/expts/hydra-configs/training/accelerator/largemix_ipu.yaml @@ -2,14 +2,6 @@ datamodule: args: - ipu_dataloader_training_opts: - mode: async - max_num_nodes_per_graph: 30 # train max nodes: 20, max_edges: 54 - max_num_edges_per_graph: 100 - ipu_dataloader_inference_opts: - mode: async - max_num_nodes_per_graph: 35 # valid max nodes: 51, max_edges: 118 - max_num_edges_per_graph: 100 # Data handling-related batch_size_training: 30 batch_size_inference: 30 diff --git a/expts/hydra-configs/training/accelerator/pcqm4m_ipu.yaml b/expts/hydra-configs/training/accelerator/pcqm4m_ipu.yaml index a7e23f383..c49d10405 100644 --- a/expts/hydra-configs/training/accelerator/pcqm4m_ipu.yaml +++ b/expts/hydra-configs/training/accelerator/pcqm4m_ipu.yaml @@ -2,14 +2,6 @@ datamodule: args: - ipu_dataloader_training_opts: - mode: async - max_num_nodes_per_graph: 16 # train max nodes: 20, max_edges: 54 - max_num_edges_per_graph: 60 - ipu_dataloader_inference_opts: - mode: async - max_num_nodes_per_graph: 30 # valid max nodes: 51, max_edges: 118 - max_num_edges_per_graph: 120 # Data handling-related batch_size_inference: 16 diff --git a/expts/hydra-configs/training/accelerator/toymix_ipu.yaml b/expts/hydra-configs/training/accelerator/toymix_ipu.yaml index 1bf28ce0b..8f5fe4941 100644 --- a/expts/hydra-configs/training/accelerator/toymix_ipu.yaml +++ b/expts/hydra-configs/training/accelerator/toymix_ipu.yaml @@ -2,14 +2,6 @@ datamodule: args: - ipu_dataloader_training_opts: - mode: async - max_num_nodes_per_graph: 44 # train max nodes: 20, max_edges: 54 - max_num_edges_per_graph: 80 - ipu_dataloader_inference_opts: - mode: async - max_num_nodes_per_graph: 44 # valid max nodes: 51, max_edges: 118 - max_num_edges_per_graph: 80 # Data handling-related batch_size_training: 50 batch_size_inference: 50 diff --git a/expts/neurips2023_configs/base_config/large.yaml b/expts/neurips2023_configs/base_config/large.yaml index 6b6e9b3b7..748680247 100644 --- a/expts/neurips2023_configs/base_config/large.yaml +++ b/expts/neurips2023_configs/base_config/large.yaml @@ -11,14 +11,7 @@ accelerator: config_override: datamodule: args: - ipu_dataloader_training_opts: - mode: async - max_num_nodes_per_graph: 30 # train max nodes: 20, max_edges: 54 - max_num_edges_per_graph: 100 - ipu_dataloader_inference_opts: - mode: async - max_num_nodes_per_graph: 35 # valid max nodes: 51, max_edges: 118 - max_num_edges_per_graph: 100 + # Data handling-related batch_size_training: 30 batch_size_inference: 30 diff --git a/expts/neurips2023_configs/base_config/large_pcba.yaml b/expts/neurips2023_configs/base_config/large_pcba.yaml index 074d6d132..ca016e27d 100644 --- a/expts/neurips2023_configs/base_config/large_pcba.yaml +++ b/expts/neurips2023_configs/base_config/large_pcba.yaml @@ -11,14 +11,7 @@ accelerator: config_override: datamodule: args: - ipu_dataloader_training_opts: - mode: async - max_num_nodes_per_graph: 30 # train max nodes: 20, max_edges: 54 - max_num_edges_per_graph: 100 - ipu_dataloader_inference_opts: - mode: async - max_num_nodes_per_graph: 35 # valid max nodes: 51, max_edges: 118 - max_num_edges_per_graph: 100 + # Data handling-related batch_size_training: 30 batch_size_inference: 30 diff --git a/expts/neurips2023_configs/base_config/large_pcqm_g25.yaml b/expts/neurips2023_configs/base_config/large_pcqm_g25.yaml index a6b4bb81f..12b6afb2d 100644 --- a/expts/neurips2023_configs/base_config/large_pcqm_g25.yaml +++ b/expts/neurips2023_configs/base_config/large_pcqm_g25.yaml @@ -11,14 +11,7 @@ accelerator: config_override: datamodule: args: - ipu_dataloader_training_opts: - mode: async - max_num_nodes_per_graph: 30 # train max nodes: 20, max_edges: 54 - max_num_edges_per_graph: 100 - ipu_dataloader_inference_opts: - mode: async - max_num_nodes_per_graph: 35 # valid max nodes: 51, max_edges: 118 - max_num_edges_per_graph: 100 + # Data handling-related batch_size_training: 30 batch_size_inference: 30 diff --git a/expts/neurips2023_configs/base_config/large_pcqm_n4.yaml b/expts/neurips2023_configs/base_config/large_pcqm_n4.yaml index 3674bfcec..5741854dc 100644 --- a/expts/neurips2023_configs/base_config/large_pcqm_n4.yaml +++ b/expts/neurips2023_configs/base_config/large_pcqm_n4.yaml @@ -11,14 +11,7 @@ accelerator: config_override: datamodule: args: - ipu_dataloader_training_opts: - mode: async - max_num_nodes_per_graph: 30 # train max nodes: 20, max_edges: 54 - max_num_edges_per_graph: 100 - ipu_dataloader_inference_opts: - mode: async - max_num_nodes_per_graph: 35 # valid max nodes: 51, max_edges: 118 - max_num_edges_per_graph: 100 + # Data handling-related batch_size_training: 30 batch_size_inference: 30 diff --git a/expts/neurips2023_configs/base_config/small.yaml b/expts/neurips2023_configs/base_config/small.yaml index 49fa3a6e8..a8b706b2a 100644 --- a/expts/neurips2023_configs/base_config/small.yaml +++ b/expts/neurips2023_configs/base_config/small.yaml @@ -10,14 +10,7 @@ accelerator: config_override: datamodule: args: - ipu_dataloader_training_opts: - mode: async - max_num_nodes_per_graph: 44 # train max nodes: 20, max_edges: 54 - max_num_edges_per_graph: 80 - ipu_dataloader_inference_opts: - mode: async - max_num_nodes_per_graph: 44 # valid max nodes: 51, max_edges: 118 - max_num_edges_per_graph: 80 + # Data handling-related batch_size_training: 50 batch_size_inference: 50 diff --git a/expts/neurips2023_configs/baseline/config_small_gcn_baseline.yaml b/expts/neurips2023_configs/baseline/config_small_gcn_baseline.yaml index dcefbcab3..22fbde029 100644 --- a/expts/neurips2023_configs/baseline/config_small_gcn_baseline.yaml +++ b/expts/neurips2023_configs/baseline/config_small_gcn_baseline.yaml @@ -9,14 +9,6 @@ accelerator: config_override: datamodule: args: - ipu_dataloader_training_opts: - mode: async - max_num_nodes_per_graph: 44 # train max nodes: 20, max_edges: 54 - max_num_edges_per_graph: 80 - ipu_dataloader_inference_opts: - mode: async - max_num_nodes_per_graph: 100 # valid max nodes: 51, max_edges: 118 - max_num_edges_per_graph: 200 # Data handling-related batch_size_training: 5 batch_size_inference: 2 diff --git a/expts/neurips2023_configs/single_task_gcn/config_large_gcn_mcf7.yaml b/expts/neurips2023_configs/single_task_gcn/config_large_gcn_mcf7.yaml index 9c2726e04..d4dc601fa 100644 --- a/expts/neurips2023_configs/single_task_gcn/config_large_gcn_mcf7.yaml +++ b/expts/neurips2023_configs/single_task_gcn/config_large_gcn_mcf7.yaml @@ -9,14 +9,6 @@ accelerator: config_override: datamodule: args: - ipu_dataloader_training_opts: - mode: async - max_num_nodes_per_graph: 60 # train max nodes: 20, max_edges: 54 - max_num_edges_per_graph: 100 - ipu_dataloader_inference_opts: - mode: async - max_num_nodes_per_graph: 60 # valid max nodes: 51, max_edges: 118 - max_num_edges_per_graph: 100 # Data handling-related batch_size_training: 10 batch_size_inference: 2 diff --git a/expts/neurips2023_configs/single_task_gcn/config_large_gcn_pcba.yaml b/expts/neurips2023_configs/single_task_gcn/config_large_gcn_pcba.yaml index 3d0f4e4da..a59215b90 100644 --- a/expts/neurips2023_configs/single_task_gcn/config_large_gcn_pcba.yaml +++ b/expts/neurips2023_configs/single_task_gcn/config_large_gcn_pcba.yaml @@ -9,14 +9,6 @@ accelerator: config_override: datamodule: args: - ipu_dataloader_training_opts: - mode: async - max_num_nodes_per_graph: 60 # train max nodes: 20, max_edges: 54 - max_num_edges_per_graph: 100 - ipu_dataloader_inference_opts: - mode: async - max_num_nodes_per_graph: 200 # valid max nodes: 51, max_edges: 118 - max_num_edges_per_graph: 400 # Data handling-related batch_size_training: 10 batch_size_inference: 2 diff --git a/expts/neurips2023_configs/single_task_gcn/config_large_gcn_vcap.yaml b/expts/neurips2023_configs/single_task_gcn/config_large_gcn_vcap.yaml index 5fb6eaf00..4a260e3ee 100644 --- a/expts/neurips2023_configs/single_task_gcn/config_large_gcn_vcap.yaml +++ b/expts/neurips2023_configs/single_task_gcn/config_large_gcn_vcap.yaml @@ -9,14 +9,6 @@ accelerator: config_override: datamodule: args: - ipu_dataloader_training_opts: - mode: async - max_num_nodes_per_graph: 60 # train max nodes: 20, max_edges: 54 - max_num_edges_per_graph: 100 - ipu_dataloader_inference_opts: - mode: async - max_num_nodes_per_graph: 60 # valid max nodes: 51, max_edges: 118 - max_num_edges_per_graph: 150 # Data handling-related batch_size_training: 10 batch_size_inference: 2 diff --git a/expts/neurips2023_configs/single_task_gin/config_large_gin_g25.yaml b/expts/neurips2023_configs/single_task_gin/config_large_gin_g25.yaml index dca0d2b82..2e4cdaf53 100644 --- a/expts/neurips2023_configs/single_task_gin/config_large_gin_g25.yaml +++ b/expts/neurips2023_configs/single_task_gin/config_large_gin_g25.yaml @@ -9,14 +9,6 @@ accelerator: config_override: datamodule: args: - ipu_dataloader_training_opts: - mode: async - max_num_nodes_per_graph: 30 # train max nodes: 20, max_edges: 54 - max_num_edges_per_graph: 100 - ipu_dataloader_inference_opts: - mode: async - max_num_nodes_per_graph: 30 # valid max nodes: 51, max_edges: 118 - max_num_edges_per_graph: 100 # Data handling-related batch_size_training: 10 batch_size_inference: 10 diff --git a/expts/neurips2023_configs/single_task_gin/config_large_gin_mcf7.yaml b/expts/neurips2023_configs/single_task_gin/config_large_gin_mcf7.yaml index d07eab6a8..09425ec37 100644 --- a/expts/neurips2023_configs/single_task_gin/config_large_gin_mcf7.yaml +++ b/expts/neurips2023_configs/single_task_gin/config_large_gin_mcf7.yaml @@ -9,14 +9,6 @@ accelerator: config_override: datamodule: args: - ipu_dataloader_training_opts: - mode: async - max_num_nodes_per_graph: 60 # train max nodes: 20, max_edges: 54 - max_num_edges_per_graph: 100 - ipu_dataloader_inference_opts: - mode: async - max_num_nodes_per_graph: 60 # valid max nodes: 51, max_edges: 118 - max_num_edges_per_graph: 100 # Data handling-related batch_size_training: 10 batch_size_inference: 2 diff --git a/expts/neurips2023_configs/single_task_gin/config_large_gin_n4.yaml b/expts/neurips2023_configs/single_task_gin/config_large_gin_n4.yaml index 544846613..9ef7254fc 100644 --- a/expts/neurips2023_configs/single_task_gin/config_large_gin_n4.yaml +++ b/expts/neurips2023_configs/single_task_gin/config_large_gin_n4.yaml @@ -9,14 +9,6 @@ accelerator: config_override: datamodule: args: - ipu_dataloader_training_opts: - mode: async - max_num_nodes_per_graph: 30 # train max nodes: 20, max_edges: 54 - max_num_edges_per_graph: 100 - ipu_dataloader_inference_opts: - mode: async - max_num_nodes_per_graph: 30 # valid max nodes: 51, max_edges: 118 - max_num_edges_per_graph: 100 # Data handling-related batch_size_training: 10 batch_size_inference: 10 diff --git a/expts/neurips2023_configs/single_task_gin/config_large_gin_pcba.yaml b/expts/neurips2023_configs/single_task_gin/config_large_gin_pcba.yaml index ade583ba6..ecf18ce9b 100644 --- a/expts/neurips2023_configs/single_task_gin/config_large_gin_pcba.yaml +++ b/expts/neurips2023_configs/single_task_gin/config_large_gin_pcba.yaml @@ -8,15 +8,6 @@ accelerator: type: gpu # cpu or gpu config_override: datamodule: - args: - ipu_dataloader_training_opts: - mode: async - max_num_nodes_per_graph: 60 # train max nodes: 20, max_edges: 54 - max_num_edges_per_graph: 100 - ipu_dataloader_inference_opts: - mode: async - max_num_nodes_per_graph: 200 # valid max nodes: 51, max_edges: 118 - max_num_edges_per_graph: 400 # Data handling-related batch_size_training: 10 batch_size_inference: 2 diff --git a/expts/neurips2023_configs/single_task_gin/config_large_gin_pcq.yaml b/expts/neurips2023_configs/single_task_gin/config_large_gin_pcq.yaml index 2fec0d6e5..13a44e1c0 100644 --- a/expts/neurips2023_configs/single_task_gin/config_large_gin_pcq.yaml +++ b/expts/neurips2023_configs/single_task_gin/config_large_gin_pcq.yaml @@ -9,14 +9,6 @@ accelerator: config_override: datamodule: args: - ipu_dataloader_training_opts: - mode: async - max_num_nodes_per_graph: 30 # train max nodes: 20, max_edges: 54 - max_num_edges_per_graph: 100 - ipu_dataloader_inference_opts: - mode: async - max_num_nodes_per_graph: 30 # valid max nodes: 51, max_edges: 118 - max_num_edges_per_graph: 100 # Data handling-related batch_size_training: 10 batch_size_inference: 10 diff --git a/expts/neurips2023_configs/single_task_gin/config_large_gin_vcap.yaml b/expts/neurips2023_configs/single_task_gin/config_large_gin_vcap.yaml index 82c212b70..3e716d5b1 100644 --- a/expts/neurips2023_configs/single_task_gin/config_large_gin_vcap.yaml +++ b/expts/neurips2023_configs/single_task_gin/config_large_gin_vcap.yaml @@ -9,14 +9,6 @@ accelerator: config_override: datamodule: args: - ipu_dataloader_training_opts: - mode: async - max_num_nodes_per_graph: 60 # train max nodes: 20, max_edges: 54 - max_num_edges_per_graph: 100 - ipu_dataloader_inference_opts: - mode: async - max_num_nodes_per_graph: 60 # valid max nodes: 51, max_edges: 118 - max_num_edges_per_graph: 150 # Data handling-related batch_size_training: 10 batch_size_inference: 2 diff --git a/expts/neurips2023_configs/single_task_gine/config_large_gine_g25.yaml b/expts/neurips2023_configs/single_task_gine/config_large_gine_g25.yaml index 3a657c70d..345620aff 100644 --- a/expts/neurips2023_configs/single_task_gine/config_large_gine_g25.yaml +++ b/expts/neurips2023_configs/single_task_gine/config_large_gine_g25.yaml @@ -9,14 +9,6 @@ accelerator: config_override: datamodule: args: - ipu_dataloader_training_opts: - mode: async - max_num_nodes_per_graph: 30 # train max nodes: 20, max_edges: 54 - max_num_edges_per_graph: 100 - ipu_dataloader_inference_opts: - mode: async - max_num_nodes_per_graph: 30 # valid max nodes: 51, max_edges: 118 - max_num_edges_per_graph: 100 # Data handling-related batch_size_training: 10 batch_size_inference: 10 diff --git a/expts/neurips2023_configs/single_task_gine/config_large_gine_mcf7.yaml b/expts/neurips2023_configs/single_task_gine/config_large_gine_mcf7.yaml index 71b2c1d77..c7a03a80f 100644 --- a/expts/neurips2023_configs/single_task_gine/config_large_gine_mcf7.yaml +++ b/expts/neurips2023_configs/single_task_gine/config_large_gine_mcf7.yaml @@ -9,14 +9,6 @@ accelerator: config_override: datamodule: args: - ipu_dataloader_training_opts: - mode: async - max_num_nodes_per_graph: 60 # train max nodes: 20, max_edges: 54 - max_num_edges_per_graph: 100 - ipu_dataloader_inference_opts: - mode: async - max_num_nodes_per_graph: 60 # valid max nodes: 51, max_edges: 118 - max_num_edges_per_graph: 100 # Data handling-related batch_size_training: 10 batch_size_inference: 2 diff --git a/expts/neurips2023_configs/single_task_gine/config_large_gine_n4.yaml b/expts/neurips2023_configs/single_task_gine/config_large_gine_n4.yaml index 54d81b3f8..edba240f9 100644 --- a/expts/neurips2023_configs/single_task_gine/config_large_gine_n4.yaml +++ b/expts/neurips2023_configs/single_task_gine/config_large_gine_n4.yaml @@ -9,14 +9,6 @@ accelerator: config_override: datamodule: args: - ipu_dataloader_training_opts: - mode: async - max_num_nodes_per_graph: 30 # train max nodes: 20, max_edges: 54 - max_num_edges_per_graph: 100 - ipu_dataloader_inference_opts: - mode: async - max_num_nodes_per_graph: 30 # valid max nodes: 51, max_edges: 118 - max_num_edges_per_graph: 100 # Data handling-related batch_size_training: 10 batch_size_inference: 10 diff --git a/expts/neurips2023_configs/single_task_gine/config_large_gine_pcba.yaml b/expts/neurips2023_configs/single_task_gine/config_large_gine_pcba.yaml index b02de4f7d..32c1af644 100644 --- a/expts/neurips2023_configs/single_task_gine/config_large_gine_pcba.yaml +++ b/expts/neurips2023_configs/single_task_gine/config_large_gine_pcba.yaml @@ -9,14 +9,6 @@ accelerator: config_override: datamodule: args: - ipu_dataloader_training_opts: - mode: async - max_num_nodes_per_graph: 60 # train max nodes: 20, max_edges: 54 - max_num_edges_per_graph: 100 - ipu_dataloader_inference_opts: - mode: async - max_num_nodes_per_graph: 200 # valid max nodes: 51, max_edges: 118 - max_num_edges_per_graph: 400 # Data handling-related batch_size_training: 10 batch_size_inference: 2 diff --git a/expts/neurips2023_configs/single_task_gine/config_large_gine_pcq.yaml b/expts/neurips2023_configs/single_task_gine/config_large_gine_pcq.yaml index 7c797d744..15026ae74 100644 --- a/expts/neurips2023_configs/single_task_gine/config_large_gine_pcq.yaml +++ b/expts/neurips2023_configs/single_task_gine/config_large_gine_pcq.yaml @@ -9,14 +9,6 @@ accelerator: config_override: datamodule: args: - ipu_dataloader_training_opts: - mode: async - max_num_nodes_per_graph: 30 # train max nodes: 20, max_edges: 54 - max_num_edges_per_graph: 100 - ipu_dataloader_inference_opts: - mode: async - max_num_nodes_per_graph: 30 # valid max nodes: 51, max_edges: 118 - max_num_edges_per_graph: 100 # Data handling-related batch_size_training: 10 batch_size_inference: 10 diff --git a/expts/neurips2023_configs/single_task_gine/config_large_gine_vcap.yaml b/expts/neurips2023_configs/single_task_gine/config_large_gine_vcap.yaml index 12b0a56a8..089aa8ed3 100644 --- a/expts/neurips2023_configs/single_task_gine/config_large_gine_vcap.yaml +++ b/expts/neurips2023_configs/single_task_gine/config_large_gine_vcap.yaml @@ -9,14 +9,6 @@ accelerator: config_override: datamodule: args: - ipu_dataloader_training_opts: - mode: async - max_num_nodes_per_graph: 60 # train max nodes: 20, max_edges: 54 - max_num_edges_per_graph: 100 - ipu_dataloader_inference_opts: - mode: async - max_num_nodes_per_graph: 60 # valid max nodes: 51, max_edges: 118 - max_num_edges_per_graph: 150 # Data handling-related batch_size_training: 10 batch_size_inference: 2 diff --git a/graphium/cli/train_finetune_test.py b/graphium/cli/train_finetune_test.py index b4b9560b3..6e43f2877 100644 --- a/graphium/cli/train_finetune_test.py +++ b/graphium/cli/train_finetune_test.py @@ -52,41 +52,6 @@ def cli(cfg: DictConfig) -> None: return run_training_finetuning_testing(cfg) -def get_training_batch_size(cfg): - """ - WARNING: This MUST be called after accelerator overrides have been applied - (i.e. after `load_accelerator` has been called) - """ - try: - # Navigate through the nested dictionaries and get the training batch size - batch_size_training = cfg.get("datamodule", {}).get("args", {}).get("batch_size_training", 1) - - # Ensure that the extracted value is an integer - return int(batch_size_training) - except Exception as e: - print(f"An error occurred: {e}") - - # Return default value if an error occurred - return 1 - - -def get_training_device_iterations(cfg): - try: - ipu_config = cfg.get("accelerator", {}).get("ipu_config", []) - for item in ipu_config: - if "deviceIterations" in item: - # Extract the number between parentheses - start = item.find("(") + 1 - end = item.find(")") - if start != 0 and end != -1: - return int(item[start:end]) - except Exception as e: - print(f"An error occurred: {e}") - - # Return default value if deviceIterations is not found or an error occurred - return 1 - - def run_training_finetuning_testing(cfg: DictConfig) -> None: """ The main (pre-)training and fine-tuning loop. From 495f3f6fefa734598b300570a9f68813cbb5ff82 Mon Sep 17 00:00:00 2001 From: DomInvivo Date: Fri, 6 Sep 2024 22:38:22 -0400 Subject: [PATCH 132/175] Remove packing --- graphium/data/collate.py | 30 +--- graphium/utils/packing.py | 330 -------------------------------------- scripts/ipu_start.sh | 2 +- scripts/ipu_venv.sh | 4 +- tests/test_packing.py | 234 --------------------------- 5 files changed, 5 insertions(+), 595 deletions(-) delete mode 100644 graphium/utils/packing.py delete mode 100644 tests/test_packing.py diff --git a/graphium/data/collate.py b/graphium/data/collate.py index cab3151de..0c1f6ef44 100644 --- a/graphium/data/collate.py +++ b/graphium/data/collate.py @@ -22,7 +22,6 @@ from typing import Union, List, Optional, Dict, Type, Any, Iterable from torch_geometric.data import Data, Batch -from graphium.utils.packing import fast_packing, get_pack_sizes, node_to_pack_indices_mask from loguru import logger from graphium.data.utils import get_keys from graphium.data.dataset import torch_enum_to_dtype @@ -34,7 +33,6 @@ def graphium_collate_fn( labels_dtype_dict: Optional[Dict[str, Any]] = None, mask_nan: Union[str, float, Type[None]] = "raise", do_not_collate_keys: List[str] = [], - batch_size_per_pack: Optional[int] = None, ) -> Union[Any, Dict[str, Any]]: """This collate function is identical to the default pytorch collate function but add support for `pyg.data.Data` to batch graphs. @@ -76,12 +74,6 @@ def graphium_collate_fn( do_not_batch_keys: Keys to ignore for the collate - batch_size_per_pack: The number of graphs to pack together. - This is useful for using packing with the Transformer. - If None, no packing is done. - Otherwise, indices are generated to map the nodes to the pack they belong to under the key `"pack_from_node_idx"`, - with an additional mask to indicate which nodes are from the same graph under the key `"pack_attn_mask"`. - Returns: The batched elements. See `torch.utils.data.dataloader.default_collate`. """ @@ -113,7 +105,7 @@ def graphium_collate_fn( # If a PyG Graph is provided, use the PyG batching elif isinstance(elem[key], Data): pyg_graphs = [d[key] for d in elements] - batch[key] = collage_pyg_graph(pyg_graphs, num_nodes, batch_size_per_pack=batch_size_per_pack) + batch[key] = collage_pyg_graph(pyg_graphs, num_nodes) # Ignore the collate for specific keys elif key in do_not_collate_keys: @@ -135,7 +127,7 @@ def graphium_collate_fn( def collage_pyg_graph( - pyg_graphs: List[Data], num_nodes: List[int], batch_size_per_pack: Optional[int] = None + pyg_graphs: List[Data], num_nodes: List[int], ): """ Function to collate pytorch geometric graphs. @@ -144,8 +136,6 @@ def collage_pyg_graph( Parameters: pyg_graphs: List of PyG graphs - batch_size_per_pack: The number of graphs to pack together. - This is useful for using packing with the Transformer, """ # Calculate maximum number of nodes per graph in current batch @@ -162,22 +152,6 @@ def collage_pyg_graph( # Convert edge index to int64 pyg_graph.edge_index = pyg_graph.edge_index.to(torch.int64) - # Apply the packing at the mini-batch level. This is useful for using packing with the Transformer, - # especially in the case of the large graphs being much larger than the small graphs. - # CAREFUL!!! This changes the order of the graphs in the batch, without changing the order of the labels or other objects. - # An error is raised temporarily. - if batch_size_per_pack is not None: - raise NotImplementedError( - "Packing is not yet functional, as it changes the order of the graphs in the batch without changing the label order" - ) - packed_graph_idx = fast_packing(num_nodes, batch_size_per_pack) - - # Get the node to pack indices and the mask - pack_from_node_idx, pack_attn_mask = node_to_pack_indices_mask(packed_graph_idx, num_nodes) - for pyg_graph in pyg_graphs: - pyg_graph.pack_from_node_idx = pack_from_node_idx - pyg_graph.pack_attn_mask = pack_attn_mask - return Batch.from_data_list(pyg_graphs) diff --git a/graphium/utils/packing.py b/graphium/utils/packing.py deleted file mode 100644 index 6db6856b1..000000000 --- a/graphium/utils/packing.py +++ /dev/null @@ -1,330 +0,0 @@ -""" --------------------------------------------------------------------------------- -Copyright (c) 2023 Valence Labs, Recursion Pharmaceuticals and Graphcore Limited. - -Use of this software is subject to the terms and conditions outlined in the LICENSE file. -Unauthorized modification, distribution, or use is prohibited. Provided 'as is' without -warranties of any kind. - -Valence Labs, Recursion Pharmaceuticals and Graphcore Limited are not liable for any damages arising from its use. -Refer to the LICENSE file for the full terms and conditions. --------------------------------------------------------------------------------- -""" - - -from typing import List, Tuple, Iterable, Optional -import numpy as np -import torch - - -class MolPack: - """ - Class that keeps track of the number of atoms and indices that are added - to each pack. Useful when doing packing, or other forms of smart batching. - A pack is a batch, but with optimized memory consumption. - """ - - def __init__(self): - self.num_nodes = 0 - self.num_graphs = 0 - self.average_atom = 0 - self.indices = [] - - def add_mol(self, num_nodes: int, idx: int) -> "MolPack": - """ - Add a molecule and it's index to the batch - - Parameters: - num_nodes: Number of atoms of the new molecule - - idx: Index associated to the molecule - """ - self.num_nodes += num_nodes - self.num_graphs += 1 - self.average_atom = self.num_nodes / self.num_graphs - self.indices.append(idx) - return self - - def expected_atoms(self, remaining_mean_num_nodes: float, batch_size: int) -> float: - """ - Given a desired batch size, and given the remaining mean number of - atoms, find the expected number of atoms of the current batch when it is full - - Parameters: - remaining_mean_num_nodes: Average number of atoms per molecule - left to be sampled and distributed across tasks. - - batch_size: Desired batch size - - Returns: - expected_atoms: The expected number of atoms in this batch if we - sample randomly the remaining molecules. - """ - return self.num_nodes + ((batch_size - self.num_graphs) * remaining_mean_num_nodes) - - def __repr__(self) -> str: - """ - Print the main attributes of the current class - """ - return f"{self.__class__.__name__}(m: {self.num_graphs},\ta: {self.num_nodes},\tav: {self.average_atom:.1f})" - - -def smart_packing(num_nodes: List[int], batch_size: int) -> List[List[int]]: - """ - Simple and fast algorithm for packing graphs such that each batch has roughly the - same number of atoms. - Has for-loop scalability issues `O(num_graphs * ipu_batch_size)` = `O(num_graphs^2 / batch_size)` - - Parameters: - num_nodes: List of the number of atoms per molecule for the entire global batch. - Must be of length `batch_size * ipu_batch_size`. - - batch_size: The batch size per iteration, considering a single device and single - forward pass. - The global batch size is `batch_size * device_iterations * replication_factor * gradient_accumulation` - - Returns: - packed_indices: A list of packs, each containing a list of indices, such that - if we collect `num_nodes` from the indices, then each pack has roughly the - same total number of atoms. - """ - - # Sort the list - num_nodes = np.asarray(num_nodes) - argsort_num_nodes = np.argsort(num_nodes) - sorted_num_nodes = num_nodes[argsort_num_nodes] - ipu_batch_size = int(len(num_nodes) / batch_size) - sorted_num_nodes, initial_num_nodes = ( - sorted_num_nodes[:-ipu_batch_size], - sorted_num_nodes[-ipu_batch_size:], - ) - reverse_cumsum = np.sum(sorted_num_nodes) - np.cumsum(sorted_num_nodes) + sorted_num_nodes[-1] - - # Start with the largest element in separate packs - mol_batches = [ - MolPack().add_mol(initial_num_nodes[-ii - 1], argsort_num_nodes[-ii - 1]) - for ii in range(ipu_batch_size) - ] - - # Loop from smallest to largest molecule, and add each molecule to the pack with smallest expected sum - for ii, num_atom in enumerate(sorted_num_nodes): - remaining_mean = reverse_cumsum[ii] / (len(sorted_num_nodes) - ii) - max_expected, idx_max_expected = 0, 0 - for jj, m in enumerate(mol_batches): - if m.num_graphs >= batch_size: - continue - expected = m.num_nodes + ( - (batch_size - m.num_graphs) * remaining_mean - ) # Faster than calling m.expected_atoms - if expected > max_expected: - max_expected = expected - idx_max_expected = jj - mol_batches[idx_max_expected].add_mol(num_atom, argsort_num_nodes[ii]) - - packed_indices = [batch.indices for batch in mol_batches] - - return packed_indices - - -def fast_packing(num_nodes: List[int], batch_size: int) -> List[List[int]]: - """ - Super fast algorithm for packing graphs such that each batch has roughly the - same number of atoms. Not as good as `smart_packing` but - faster and more scalable for-loop complexity of `O(batch_size)`. - - Parameters: - num_nodes: List of the number of atoms per molecule for the entire global batch. - Must be of length `batch_size * ipu_batch_size`. - - batch_size: The batch size per iteration, considering a single device and single - forward pass. - The global batch size is `batch_size * device_iterations * replication_factor * gradient_accumulation` - - Returns: - packed_indices: A list of packs, each containing a list of indices, such that - if we collect `num_nodes` from the indices, then each pack has roughly the - same total number of atoms. - """ - num_nodes = np.asarray(num_nodes) - argsort_num_nodes = np.argsort(num_nodes) - ipu_batch_size = int(len(num_nodes) / batch_size) - - packed_indices = np.stack( - [ - np.random.permutation(argsort_num_nodes[ii * ipu_batch_size : (ii + 1) * ipu_batch_size]) - for ii in range(batch_size) - ], - axis=0, - ).T.tolist() - return packed_indices - - -def hybrid_packing(num_nodes: List[int], batch_size: int) -> List[List[int]]: - """ - Uses a combination of the `smart_packing` `O(n^2)` on the most important data points, - and the `fast_packing` `O(n)` on the average-sized data points. - - Depending on the expected complexity - - Parameters: - num_nodes: List of the number of atoms per molecule for the entire global batch. - Must be of length `batch_size * ipu_batch_size`. - - batch_size: The batch size per iteration, considering a single device and single - forward pass. - The global batch size is `batch_size * device_iterations * replication_factor * gradient_accumulation` - - Returns: - packed_indices: A list of packs, each containing a list of indices, such that - if we collect `num_nodes` from the indices, then each pack has roughly the - same total number of atoms. - """ - - # Determine the parameters based on the complexity of the smart-packing. - # The bigger the complexity, the more the `fast_packing` algorithm becomes - # statistically powerful, and the more speed benefits it provides. - smart_packing_complexity = len(num_nodes) ** 2 / batch_size - if smart_packing_complexity < 1e4: - return smart_packing(num_nodes=num_nodes, batch_size=batch_size) - elif smart_packing_complexity < 1e5: - big, small = 3, 6 - else: - return fast_packing(num_nodes=num_nodes, batch_size=batch_size) - - # Small datasets benefit from smart-packing, without compute burden - ipu_batch_size = int(len(num_nodes) / batch_size) - if len(num_nodes) < (big + small) * ipu_batch_size: - return smart_packing(num_nodes=num_nodes, batch_size=batch_size) - - # Sort the list - num_nodes = np.asarray(num_nodes) - argsort_num_nodes = np.argsort(num_nodes) - - # Smallest and biggest graphs are often outliers and will benefit from the `smart_packing` - biggest_graphs = argsort_num_nodes[-big * ipu_batch_size :] - smallest_graphs = argsort_num_nodes[: small * ipu_batch_size] - big_n_small_graphs = np.concatenate([biggest_graphs, smallest_graphs]) - big_n_small_packs = smart_packing(num_nodes[big_n_small_graphs], batch_size=big + small) - big_n_small_indices = [big_n_small_graphs[pack] for pack in big_n_small_packs] - big_n_small_nodes = [num_nodes[pack] for pack in big_n_small_indices] - - # Medium graphs will be packed faster - medium_graphs = argsort_num_nodes[small * ipu_batch_size : -big * ipu_batch_size] - medium_packs = fast_packing(num_nodes[medium_graphs], batch_size=batch_size - big - small) - medium_indices = [medium_graphs[pack] for pack in medium_packs] - medium_nodes = [num_nodes[pack] for pack in medium_indices] - - # Pack the big/small with the medium in a smart way - big_n_small_sort = np.argsort(np.sum(np.stack(big_n_small_nodes, axis=1), axis=0)) - medium_sort = np.argsort(np.sum(np.stack(medium_nodes, axis=1), axis=0)) - packed_indices = [ - np.concatenate([medium_indices[medium_sort[ii]], big_n_small_indices[big_n_small_sort[-ii]]]) - for ii in range(len(medium_indices)) - ] - - return packed_indices - - -def get_pack_sizes(packed_indices, num_nodes): - """ - Get the number of atoms of each pack - """ - pack_sums = [] - for pack in packed_indices: - pack_sum = 0 - for idx in pack: - pack_sum += num_nodes[idx] - pack_sums.append(pack_sum) - return pack_sums - - -def estimate_max_pack_node_size(num_nodes: Iterable[int], batch_size: int, combined_batch_size: int): - """ - Estimate the value of `max_num_nodes`, which represents the maximum number of nodes - needed in a batch to fit the data. - - Parameters: - num_nodes: Number of nodes for all the graphs in the dataset - batch_size: The regular batch size per IPU - combined_batch_size: batch_size * device_iterations - * replication_factor * gradient_accumulation - - """ - - # Estimate the packing size needed - rand_indices = np.arange(len(num_nodes)) - np.random.shuffle(rand_indices) - max_pack_size = 0 - for ii in range(0, len(num_nodes), combined_batch_size): - this_indices = rand_indices[ii : ii + combined_batch_size] - choice = num_nodes[this_indices] - if len(choice) == combined_batch_size: - packed_indices = hybrid_packing(choice, batch_size) - max_pack_size = max(max_pack_size, max(get_pack_sizes(packed_indices, num_nodes[this_indices]))) - max_pack_size_per_graph = max_pack_size / batch_size - - return max_pack_size, max_pack_size_per_graph - - -def node_to_pack_indices_mask( - packed_indices: Iterable[Iterable[int]], all_num_nodes: Iterable[int], max_pack_size: Optional[int] = None -) -> Tuple[torch.Tensor, torch.Tensor]: - """ - Given a list of packed indices, and the number of nodes in each graph, - return a tensor of shape (sum(all_num_nodes), 2) where the first column - is the pack index, and the second column is the node index within the pack. - - Can be used to generate a dense packing of the nodes as follows: - ``` - # node_features: A tensor of shape (num_nodes, num_node_features) - # num_packs: The number of packs desired - # max_nodes_per_pack: The maximum number of nodes per pack - # dense_pack: A tensor of shape (num_packs, max_nodes_per_pack, num_node_features) - - dense_pack = torch.zeros([num_packs, max_nodes_per_pack, num_node_features]) - dense_pack[pack_from_node_idx[:, 0], pack_from_node_idx[:, 1]] = node_features - ``` - - This is useful when using a Transformer, to avoid wasteful padding when the - the longest sequence is much longer than the average sequence length. - - Parameters: - packed_indices: A list of lists of graph indices, where each sub-list - represents a pack of graphs - all_num_nodes: The number of nodes in each graph - max_pack_size: The maximum number of nodes per pack. If None, will be - infered from the provided packs. - Useful to determine the shape of the `pack_attn_mask`. - - Returns: - pack_from_node_idx: A tensor of shape (num_nodes, 2) where the first column - is the pack index, and the second column is the node index within the pack. - - pack_attn_mask: A tensor of shape (num_packs, max_pack_size, max_pack_size), - that represents the attention masking for each pack, - such that the graphs in the pack are masked out from each other. - """ - - all_num_nodes = torch.as_tensor(all_num_nodes, dtype=torch.long) - cumsum_num_nodes = torch.cumsum(all_num_nodes, dim=0) - if max_pack_size is None: - pack_sizes = get_pack_sizes(packed_indices, all_num_nodes) - max_pack_size = max(pack_sizes) - - # Get the node indices associated to the packs, with 0 padding - pack_from_node_idx = torch.zeros(sum(all_num_nodes), 2, dtype=torch.long) - pack_attn_mask = [] # masks for the attention - for ii, pack in enumerate(packed_indices): - jj = 0 # Counter for the number of nodes in the pack - this_pack_attn_mask = torch.ones((max_pack_size, max_pack_size), dtype=torch.bool) - for graph_idx in pack: - num_nodes = all_num_nodes[graph_idx] - node_idx = torch.arange(cumsum_num_nodes[graph_idx] - num_nodes, cumsum_num_nodes[graph_idx]) - this_pack_attn_mask[jj : jj + num_nodes, jj : jj + num_nodes] = False - pack_from_node_idx[node_idx, 0] = ii - pack_from_node_idx[node_idx, 1] = jj + torch.arange(num_nodes) - jj += num_nodes - pack_attn_mask.append(this_pack_attn_mask) - pack_attn_mask = torch.stack(pack_attn_mask, dim=0) - - return pack_from_node_idx, pack_attn_mask diff --git a/scripts/ipu_start.sh b/scripts/ipu_start.sh index b50ffcbd9..1b3cdf64f 100644 --- a/scripts/ipu_start.sh +++ b/scripts/ipu_start.sh @@ -19,7 +19,7 @@ Start the ipu environment and SDK source /opt/gc/sdk-3.0.0+1128/poplar-ubuntu_20_04-3.0.0+5468-0379b9a65d/enable.sh source /opt/gc/sdk-3.0.0+1128/popart-ubuntu_20_04-3.0.0+5468-0379b9a65d/enable.sh -source ~/.venv/graphium_ipu/bin/activate # Change to your path +source ~/.venv/graphium/bin/activate # Change to your path export VISUAL=vim export EDITOR="$VISUAL" diff --git a/scripts/ipu_venv.sh b/scripts/ipu_venv.sh index 826fcfa12..f5ab07bc9 100644 --- a/scripts/ipu_venv.sh +++ b/scripts/ipu_venv.sh @@ -18,8 +18,8 @@ Create the pip environment for IPU ## Uncomment this to create the folder for the environment # mkdir ~/.venv # Create the folder for the environment -# python3 -m venv ~/.venv/graphium_ipu # Create the environment -# source ~/.venv/graphium_ipu/bin/activate # Activate the environment +# python3 -m venv ~/.venv/graphium # Create the environment +# source ~/.venv/graphium/bin/activate # Activate the environment # Installing the dependencies for the IPU environment pip install torch==1.10+cpu torchvision==0.11+cpu torchaudio==0.10 -f https://download.pytorch.org/whl/torch_stable.html diff --git a/tests/test_packing.py b/tests/test_packing.py deleted file mode 100644 index 3b378214b..000000000 --- a/tests/test_packing.py +++ /dev/null @@ -1,234 +0,0 @@ -""" --------------------------------------------------------------------------------- -Copyright (c) 2023 Valence Labs, Recursion Pharmaceuticals and Graphcore Limited. - -Use of this software is subject to the terms and conditions outlined in the LICENSE file. -Unauthorized modification, distribution, or use is prohibited. Provided 'as is' without -warranties of any kind. - -Valence Labs, Recursion Pharmaceuticals and Graphcore Limited are not liable for any damages arising from its use. -Refer to the LICENSE file for the full terms and conditions. --------------------------------------------------------------------------------- -""" - - -# General imports -import unittest as ut -import numpy as np - -import torch -from torch_geometric.data import Data, Batch - -# Current library imports -from graphium.utils.packing import ( - smart_packing, - get_pack_sizes, - fast_packing, - hybrid_packing, - node_to_pack_indices_mask, -) - - -def random_packing(num_nodes, batch_size): - ipu_batch_size = int(len(num_nodes) / batch_size) - indices = np.arange(len(num_nodes)) - np.random.shuffle(indices) - indices = np.reshape(indices, (ipu_batch_size, batch_size)).tolist() - return indices - - -class test_Packing(ut.TestCase): - def test_smart_packing(self): - np.random.seed(42) - - batch_sizes = [2, 4, 8, 16, 32, 64] - ipu_batch_sizes = [2, 3, 4, 8, 16, 32, 64] - - for batch_size in batch_sizes: - for ipu_batch_size in ipu_batch_sizes: - err_msg = f"bz={batch_size}, ipu_bz={ipu_batch_size}" - - # Generate random batch size - global_batch = batch_size * ipu_batch_size - num_nodes = np.abs(np.random.gamma(2, 20, size=global_batch)).astype(int) - - # Use the smart packing - packed_indices = smart_packing(num_nodes=num_nodes, batch_size=batch_size) - pack_num_nodes = get_pack_sizes(packed_indices, num_nodes) - - # Use the random packing - rand_packed_indices = random_packing(num_nodes=num_nodes, batch_size=batch_size) - rand_pack_num_nodes = get_pack_sizes(rand_packed_indices, num_nodes) - - # Assert that the smart packing is better than the random packing - self.assertLessEqual(max(pack_num_nodes), max(rand_pack_num_nodes), msg=err_msg) - self.assertGreaterEqual(min(pack_num_nodes), min(rand_pack_num_nodes), msg=err_msg) - - # Assert that the total number of atoms is right - self.assertEqual(sum(pack_num_nodes), sum(num_nodes), msg=err_msg) - self.assertEqual(sum(rand_pack_num_nodes), sum(num_nodes), msg=err_msg) - - # Assert that all index are there - self.assertListEqual( - np.sort(np.asarray(packed_indices).flatten()).tolist(), np.arange(len(num_nodes)).tolist() - ) - self.assertListEqual( - np.sort(np.asarray(rand_packed_indices).flatten()).tolist(), - np.arange(len(num_nodes)).tolist(), - ) - - def test_fast_packing(self): - np.random.seed(42) - - # Start at 4 for fast_packing for better statistical significance - batch_sizes = [4, 8, 16, 32, 64] - ipu_batch_sizes = [4, 8, 16, 32, 64] - - for batch_size in batch_sizes: - for ipu_batch_size in ipu_batch_sizes: - err_msg = f"bz={batch_size}, ipu_bz={ipu_batch_size}" - - # Generate random batch size - global_batch = batch_size * ipu_batch_size - num_nodes = np.abs(np.random.gamma(2, 20, size=global_batch)).astype(int) - - # Use the smart packing - packed_indices = fast_packing(num_nodes=num_nodes, batch_size=batch_size) - pack_num_nodes = get_pack_sizes(packed_indices, num_nodes) - - # Use the random packing - rand_packed_indices = random_packing(num_nodes=num_nodes, batch_size=batch_size) - rand_pack_num_nodes = get_pack_sizes(rand_packed_indices, num_nodes) - - # Assert that the smart packing is better than the random packing - self.assertLessEqual(max(pack_num_nodes), max(rand_pack_num_nodes), msg=err_msg) - self.assertGreaterEqual(min(pack_num_nodes), min(rand_pack_num_nodes), msg=err_msg) - - # Assert that the total number of atoms is right - self.assertEqual(sum(pack_num_nodes), sum(num_nodes), msg=err_msg) - self.assertEqual(sum(rand_pack_num_nodes), sum(num_nodes), msg=err_msg) - - # Assert that all index are there - self.assertListEqual( - np.sort(np.asarray(packed_indices).flatten()).tolist(), np.arange(len(num_nodes)).tolist() - ) - self.assertListEqual( - np.sort(np.asarray(rand_packed_indices).flatten()).tolist(), - np.arange(len(num_nodes)).tolist(), - ) - - def test_hybrid_packing(self): - np.random.seed(42) - - batch_sizes = [2, 4, 8, 16, 32, 64] - ipu_batch_sizes = [2, 3, 4, 8, 16, 32, 64] - - for batch_size in batch_sizes: - for ipu_batch_size in ipu_batch_sizes: - err_msg = f"bz={batch_size}, ipu_bz={ipu_batch_size}" - - # Generate random batch size - global_batch = batch_size * ipu_batch_size - num_nodes = np.abs(np.random.gamma(2, 20, size=global_batch)).astype(int) - - # Use the smart packing - packed_indices = hybrid_packing(num_nodes=num_nodes, batch_size=batch_size) - pack_num_nodes = get_pack_sizes(packed_indices, num_nodes) - - # Use the random packing - rand_packed_indices = random_packing(num_nodes=num_nodes, batch_size=batch_size) - rand_pack_num_nodes = get_pack_sizes(rand_packed_indices, num_nodes) - - # Assert that the smart packing is better than the random packing - self.assertLessEqual(max(pack_num_nodes), max(rand_pack_num_nodes), msg=err_msg) - self.assertGreaterEqual(min(pack_num_nodes), min(rand_pack_num_nodes), msg=err_msg) - - # Assert that the total number of atoms is right - self.assertEqual(sum(pack_num_nodes), sum(num_nodes), msg=err_msg) - self.assertEqual(sum(rand_pack_num_nodes), sum(num_nodes), msg=err_msg) - - # Assert that all index are there - self.assertListEqual( - np.sort(np.asarray(packed_indices).flatten()).tolist(), np.arange(len(num_nodes)).tolist() - ) - self.assertListEqual( - np.sort(np.asarray(rand_packed_indices).flatten()).tolist(), - np.arange(len(num_nodes)).tolist(), - ) - - def test_node_to_pack_indices_mask(self): - # Create a dummy batch - in_dim = 7 - in_dim_edges = 11 - max_num_nodes_per_graph = 20 - batch_size_per_pack = 5 - - torch.manual_seed(42) - - # Create a dummy batch of graphs - batch, all_num_nodes = [], [] - for ii in range(100): - num_nodes = torch.randint(1, max_num_nodes_per_graph, (1,)).item() - all_num_nodes.append(num_nodes) - num_edges = abs(round(2.2 * num_nodes) + torch.randint(-2, 2, (1,)).item()) + 1 - x = torch.randn(num_nodes, in_dim, dtype=torch.float32) - edge_idx = torch.randint(0, num_nodes, (2, num_edges)) - e = torch.randn(edge_idx.shape[-1], in_dim_edges, dtype=torch.float32) - g = Data(h=x, edge_index=edge_idx, edge_attr=e) - batch.append(g) - batch = Batch.from_data_list(batch) - - # Get the packing - packed_graph_idx = fast_packing(all_num_nodes, batch_size_per_pack) - pack_sizes = get_pack_sizes(packed_graph_idx, all_num_nodes) - max_pack_size = max(pack_sizes) - num_packs = len(pack_sizes) - - # Get the node to pack indices and the mask - pack_from_node_idx, pack_attn_mask = node_to_pack_indices_mask(packed_graph_idx, all_num_nodes) - - # Assert that the nodes to pack indices are correct - h = torch.arange(batch.num_nodes, dtype=torch.float32) - packed_shape = [num_packs, max_pack_size] - h_packed = torch.zeros(packed_shape) - h_packed[pack_from_node_idx[:, 0], pack_from_node_idx[:, 1]] = h - h_packed_unique = torch.sort(torch.unique(h_packed))[0] - np.testing.assert_array_equal(h_packed_unique, torch.arange(batch.num_nodes)) - self.assertEqual(h_packed.sum(), h.sum()) - - # Test again with additional h dimension - h = batch.h - packed_shape = [num_packs, max_pack_size] + list(h.shape[1:]) - h_packed = torch.zeros(packed_shape) - h_packed[pack_from_node_idx[:, 0], pack_from_node_idx[:, 1]] = h - h_packed_unique = torch.sort(torch.unique(h_packed))[0] - h_packed_unique = h_packed_unique[h_packed_unique != 0] - np.testing.assert_array_almost_equal(h_packed_unique, torch.unique(h)) - self.assertAlmostEqual(h_packed.sum().item(), h.sum().item(), places=3) - - # Assert that the mask is correct by counting the number of False values (the sum of squared number of nodes per pack) - num_false = (~pack_attn_mask).sum([1, 2]) - num_expected = torch.as_tensor( - [sum([all_num_nodes[graph_idx] ** 2 for graph_idx in pack]) for pack in packed_graph_idx] - ) - np.testing.assert_array_equal(num_false, num_expected) - - # Assert that the mask is correct by counting the number of elements in each row and column - num_expected = [] - for pack in packed_graph_idx: - pack_num_expected = [] - for graph_idx in pack: - num_nodes = all_num_nodes[graph_idx] - for ii in range(num_nodes): - pack_num_expected.append(num_nodes) - pack_num_expected.extend([0] * (max_pack_size - len(pack_num_expected))) - num_expected.append(pack_num_expected) - num_expected = torch.as_tensor(num_expected) - num_false_row = (~pack_attn_mask).sum([2]) - num_false_col = (~pack_attn_mask).sum([1]) - np.testing.assert_array_equal(num_false_row, num_expected) - np.testing.assert_array_equal(num_false_col, num_expected) - - -if __name__ == "__main__": - ut.main() From 90c7af24284e19ff3a6d305bbc4a940d5f9fd6eb Mon Sep 17 00:00:00 2001 From: DomInvivo Date: Fri, 6 Sep 2024 23:52:43 -0400 Subject: [PATCH 133/175] Fixing most unit-tests --- env.yml | 2 +- graphium/config/_loader.py | 1 - graphium/data/datamodule.py | 20 +- .../nn/architectures/global_architectures.py | 2 +- graphium/nn/pyg_layers/gps_pyg.py | 6 +- graphium/nn/pyg_layers/utils.py | 5 +- scripts/ipu_start.sh | 25 -- scripts/ipu_venv.sh | 30 -- tests/config_test_dataloader.yaml | 292 ++++++++++++++++++ tests/test_base_layers.py | 2 +- tests/test_mup.py | 2 +- tests/test_pyg_layers.py | 3 +- tests/test_training.py | 14 - 13 files changed, 303 insertions(+), 101 deletions(-) delete mode 100644 scripts/ipu_start.sh delete mode 100644 scripts/ipu_venv.sh create mode 100644 tests/config_test_dataloader.yaml diff --git a/env.yml b/env.yml index a0f2fdb98..d0bc91d25 100644 --- a/env.yml +++ b/env.yml @@ -41,7 +41,7 @@ dependencies: - pytorch_scatter >=2.0 # chemistry - - rdkit + - rdkit<=2024.03.3 - datamol >=0.10 - boost # needed by rdkit diff --git a/graphium/config/_loader.py b/graphium/config/_loader.py index 1b06d3883..7550c6cbb 100644 --- a/graphium/config/_loader.py +++ b/graphium/config/_loader.py @@ -343,7 +343,6 @@ def load_trainer( trainer = Trainer( detect_anomaly=True, - strategy=strategy, accelerator=accelerator_type, devices=devices, callbacks=callbacks, diff --git a/graphium/data/datamodule.py b/graphium/data/datamodule.py index 86d4297b4..ff3bcae99 100644 --- a/graphium/data/datamodule.py +++ b/graphium/data/datamodule.py @@ -102,7 +102,6 @@ def __init__( self, batch_size_training: int = 16, batch_size_inference: int = 16, - batch_size_per_pack: Optional[int] = None, num_workers: int = 0, pin_memory: bool = True, persistent_workers: bool = False, @@ -125,15 +124,6 @@ def __init__( self.batch_size_training = batch_size_training self.batch_size_inference = batch_size_inference - self.batch_size_per_pack = batch_size_per_pack - if self.batch_size_per_pack is not None: - # Check that batch_size_per_pack is a divisor of batch_size_training and batch_size_inference - assert ( - self.batch_size_training % self.batch_size_per_pack == 0 - ), f"batch_size_training must be a multiple of batch_size_per_pack, provided batch_size_training={self.batch_size_training}, batch_size_per_pack={self.batch_size_per_pack}" - assert ( - self.batch_size_inference % self.batch_size_per_pack == 0 - ), f"batch_size_inference must be a multiple of batch_size_per_pack, provided batch_size_inference={self.batch_size_inference}, batch_size_per_pack={self.batch_size_per_pack}" self.num_workers = num_workers self.pin_memory = pin_memory @@ -203,7 +193,7 @@ def get_collate_fn(self, collate_fn): if collate_fn is None: # Some values become `inf` when changing data type. `mask_nan` deals with that collate_fn = partial( - graphium_collate_fn, mask_nan=0, batch_size_per_pack=self.batch_size_per_pack + graphium_collate_fn, mask_nan=0, ) collate_fn.__name__ = graphium_collate_fn.__name__ @@ -731,7 +721,6 @@ def __init__( featurization: Optional[Union[Dict[str, Any], omegaconf.DictConfig]] = None, batch_size_training: int = 16, batch_size_inference: int = 16, - batch_size_per_pack: Optional[int] = None, num_workers: int = 0, pin_memory: bool = True, persistent_workers: bool = False, @@ -779,7 +768,6 @@ def __init__( self, batch_size_training=batch_size_training, batch_size_inference=batch_size_inference, - batch_size_per_pack=batch_size_per_pack, num_workers=num_workers, pin_memory=pin_memory, persistent_workers=persistent_workers, @@ -1284,7 +1272,6 @@ def get_collate_fn(self, collate_fn): graphium_collate_fn, mask_nan=0, do_not_collate_keys=["smiles", "mol_ids"], - batch_size_per_pack=self.batch_size_per_pack, ) collate_fn.__name__ = graphium_collate_fn.__name__ return collate_fn @@ -1673,7 +1660,6 @@ def to_dict(self) -> Dict[str, Any]: obj_repr["test_size"] = len(self.test_indices) if self.test_indices is not None else None obj_repr["batch_size_training"] = self.batch_size_training obj_repr["batch_size_inference"] = self.batch_size_inference - obj_repr["batch_size_per_pack"] = self.batch_size_per_pack obj_repr["num_node_feats"] = self.num_node_feats obj_repr["num_node_feats_with_positional_encoding"] = self.num_node_feats_with_positional_encoding obj_repr["num_edge_feats"] = self.num_edge_feats @@ -1701,7 +1687,6 @@ def __init__( featurization: Optional[Union[Dict[str, Any], omegaconf.DictConfig]] = None, batch_size_training: int = 16, batch_size_inference: int = 16, - batch_size_per_pack: Optional[int] = None, num_workers: int = 0, pin_memory: bool = True, persistent_workers: bool = False, @@ -1772,7 +1757,6 @@ def __init__( dm_args["featurization"] = featurization dm_args["batch_size_training"] = batch_size_training dm_args["batch_size_inference"] = batch_size_inference - dm_args["batch_size_per_pack"] = batch_size_per_pack dm_args["num_workers"] = num_workers dm_args["pin_memory"] = pin_memory dm_args["persistent_workers"] = persistent_workers @@ -1953,7 +1937,6 @@ def __init__( featurization: Optional[Union[Dict[str, Any], omegaconf.DictConfig]] = None, batch_size_training: int = 16, batch_size_inference: int = 16, - batch_size_per_pack: Optional[int] = None, num_workers: int = 0, pin_memory: bool = True, persistent_workers: bool = False, @@ -2014,7 +1997,6 @@ def __init__( processed_graph_data_path=processed_graph_data_path, batch_size_training=batch_size_training, batch_size_inference=batch_size_inference, - batch_size_per_pack=batch_size_per_pack, num_workers=num_workers, pin_memory=pin_memory, persistent_workers=persistent_workers, diff --git a/graphium/nn/architectures/global_architectures.py b/graphium/nn/architectures/global_architectures.py index cea3dc16a..3ddf52a94 100644 --- a/graphium/nn/architectures/global_architectures.py +++ b/graphium/nn/architectures/global_architectures.py @@ -1996,7 +1996,7 @@ def compute_nodepairs( result: concatenated node features of shape B * max_num_nodes * 2*h, where B is number of graphs, max_num_nodes is the chosen maximum number nodes, and h is the feature dim """ - dense_feat, mask, _ = to_dense_batch( + dense_feat, mask = to_dense_batch( node_feats, batch=batch, fill_value=fill_value, diff --git a/graphium/nn/pyg_layers/gps_pyg.py b/graphium/nn/pyg_layers/gps_pyg.py index 8dcb5b31b..15bda19d5 100644 --- a/graphium/nn/pyg_layers/gps_pyg.py +++ b/graphium/nn/pyg_layers/gps_pyg.py @@ -369,7 +369,7 @@ def _to_dense_batch( h: Tensor, batch: Batch, batch_size: Optional[int] = None, - max_num_nodes_per_graph: Optional[int] = None, + max_num_nodes: Optional[int] = None, ) -> Tensor: """ Convert the batch of graphs to a dense batch. @@ -380,7 +380,7 @@ def _to_dense_batch( h, batch=batch.batch, # The batch index as a vector that indicates for nodes of which graph it belongs to batch_size=batch_size, - max_num_nodes_per_graph=max_num_nodes_per_graph, + max_num_nodes=max_num_nodes, ) key_padding_mask = ~key_padding_mask return h_dense, attn_mask, key_padding_mask @@ -411,7 +411,7 @@ def _self_attention_block(self, feat: Tensor, feat_in: Tensor, batch: Batch) -> feat, batch=batch, # The batch index as a vector that indicates for nodes of which graph it belongs to batch_size=batch_size, - max_num_nodes_per_graph=max_num_nodes_per_graph, + max_num_nodes=max_num_nodes_per_graph, ) attn_bias = None diff --git a/graphium/nn/pyg_layers/utils.py b/graphium/nn/pyg_layers/utils.py index 331caa975..169e1e5a7 100644 --- a/graphium/nn/pyg_layers/utils.py +++ b/graphium/nn/pyg_layers/utils.py @@ -93,12 +93,11 @@ def forward( batch_size = None # pos: [batch, nodes, 3] # padding_mask: [batch, nodes] - # idx: [totoal_nodes] pos, mask = to_dense_batch( pos, batch=batch.batch, batch_size=batch_size, - max_num_nodes_per_graph=max_num_nodes_per_graph, + max_num_nodes=max_num_nodes_per_graph, ) # check nan with the pos from to_dense_batch, # and generate mask. 1 for nan, 0 for other values. @@ -149,7 +148,7 @@ def forward( # unsqueezed mask size: [batch, 1, 1] apply on tensor [batch, nodes, embed_dim] node_feature.masked_fill_(nan_mask.unsqueeze(1).unsqueeze(2).to(torch.bool), 0.0) # [total_nodes, embed_dim] - node_feature = to_sparse_batch(node_feature, idx) + node_feature = node_feature[mask] return attn_bias, node_feature diff --git a/scripts/ipu_start.sh b/scripts/ipu_start.sh deleted file mode 100644 index 1b3cdf64f..000000000 --- a/scripts/ipu_start.sh +++ /dev/null @@ -1,25 +0,0 @@ -""" --------------------------------------------------------------------------------- -Copyright (c) 2023 Valence Labs, Recursion Pharmaceuticals and Graphcore Limited. - -Use of this software is subject to the terms and conditions outlined in the LICENSE file. -Unauthorized modification, distribution, or use is prohibited. Provided 'as is' without -warranties of any kind. - -Valence Labs, Recursion Pharmaceuticals and Graphcore Limited are not liable for any damages arising from its use. -Refer to the LICENSE file for the full terms and conditions. --------------------------------------------------------------------------------- -""" - - -""" -Start the ipu environment and SDK -""" - -source /opt/gc/sdk-3.0.0+1128/poplar-ubuntu_20_04-3.0.0+5468-0379b9a65d/enable.sh -source /opt/gc/sdk-3.0.0+1128/popart-ubuntu_20_04-3.0.0+5468-0379b9a65d/enable.sh - -source ~/.venv/graphium/bin/activate # Change to your path - -export VISUAL=vim -export EDITOR="$VISUAL" diff --git a/scripts/ipu_venv.sh b/scripts/ipu_venv.sh deleted file mode 100644 index f5ab07bc9..000000000 --- a/scripts/ipu_venv.sh +++ /dev/null @@ -1,30 +0,0 @@ -""" --------------------------------------------------------------------------------- -Copyright (c) 2023 Valence Labs, Recursion Pharmaceuticals and Graphcore Limited. - -Use of this software is subject to the terms and conditions outlined in the LICENSE file. -Unauthorized modification, distribution, or use is prohibited. Provided 'as is' without -warranties of any kind. - -Valence Labs, Recursion Pharmaceuticals and Graphcore Limited are not liable for any damages arising from its use. -Refer to the LICENSE file for the full terms and conditions. --------------------------------------------------------------------------------- -""" - - -""" -Create the pip environment for IPU -""" - -## Uncomment this to create the folder for the environment -# mkdir ~/.venv # Create the folder for the environment -# python3 -m venv ~/.venv/graphium # Create the environment -# source ~/.venv/graphium/bin/activate # Activate the environment - -# Installing the dependencies for the IPU environment -pip install torch==1.10+cpu torchvision==0.11+cpu torchaudio==0.10 -f https://download.pytorch.org/whl/torch_stable.html -pip install torch-scatter torch-sparse torch-cluster torch-spline-conv torch-geometric -f https://data.pyg.org/whl/torch-1.10.0+cpu.html -pip install dgl dglgo -f https://data.dgl.ai/wheels/repo.html -pip install /opt/gc/sdk-3.0.0+1128/poptorch-3.0.0+84519_672c9cbc7f_ubuntu_20_04-cp38-cp38-linux_x86_64.whl -pip install -r requirements.txt -pip install -e . diff --git a/tests/config_test_dataloader.yaml b/tests/config_test_dataloader.yaml new file mode 100644 index 000000000..c1ef946d1 --- /dev/null +++ b/tests/config_test_dataloader.yaml @@ -0,0 +1,292 @@ +# Testing the multitask pipeline with the QM9 dataset, by splitting it up into three tasks: homo, alpha and cv. +constants: + name: &name test_dataloader + seed: &seed 42 + raise_train_error: true # Whether the code should raise an error if it crashes during training + +accelerator: + type: cpu # cpu or gpu + config_override: + datamodule: + args: + # Data handling-related + batch_size_training: 6 + batch_size_inference: 6 + trainer: + trainer: + precision: 16 + accumulate_grad_batches: 4 + +datamodule: + module_type: "MultitaskFromSmilesDataModule" + args: # Matches that in the test_multitask_datamodule.py case. + task_specific_args: # To be replaced by a new class "DatasetParams" + homo: + df: null + df_path: &df_path https://storage.googleapis.com/datasets-public-research/PCQM4M/cxsmiles/pcqm4mv2-2k-lumo-alpha.csv + smiles_col: "cxsmiles" + label_cols: ["homo_lumo_gap", "lumo"] + split_val: 0.2 + split_test: 0.2 + seed: *seed + splits_path: null # This may not always be provided + sample_size: null # This may not always be provided + idx_col: null # This may not always be provided + weights_col: null # This may not always be provided + weights_type: null # This may not always be provided + task_level: graph + alpha: + df: null + df_path: *df_path + smiles_col: "cxsmiles" + label_cols: ["alpha"] + split_val: 0.2 + split_test: 0.2 + seed: *seed + splits_path: null # This may not always be provided + sample_size: null # This may not always be provided + idx_col: null # This may not always be provided + weights_col: null # This may not always be provided + weights_type: null # This may not always be provided + task_level: graph + # Featurization + prepare_dict_or_graph: pyg:graph + featurization_n_jobs: 0 + featurization_progress: True + featurization: + atom_property_list_onehot: [atomic-number, valence] + atom_property_list_float: [mass, electronegativity, in-ring] + edge_property_list: [bond-type-onehot, stereo, in-ring] + conformer_property_list: [positions_3d] + add_self_loop: False + explicit_H: False + use_bonds_weights: False + pos_encoding_as_features: # encoder dropout 0.18 + pos_types: + node_laplacian_eigvec: + pos_type: laplacian_eigvec + pos_level: node + num_pos: 5 + normalization: "none" + disconnected_comp: True + node_laplacian_eigval: + pos_type: laplacian_eigval + pos_level: node + num_pos: 5 + normalization: "none" + disconnected_comp: True + rw_return_probs: + pos_type: rw_return_probs + pos_level: node + ksteps: [4, 8] + edge_rw_transition_probs: + pos_type: rw_transition_probs + pos_level: edge + ksteps: [2, 4] + nodepair_rw_return_probs: + pos_type: rw_return_probs + pos_level: nodepair + ksteps: [4] + electrostatic: + pos_type: electrostatic + pos_level: node + edge_commute: + pos_type: commute + pos_level: edge + nodepair_graphormer: + pos_type: graphormer + pos_level: nodepair + + num_workers: -1 + +architecture: + model_type: FullGraphMultiTaskNetwork + mup_base_path: null + + pre_nn: # Set as null to avoid a pre-nn network + out_dim: 16 + hidden_dims: 16 + depth: 1 + activation: relu + last_activation: none + dropout: &dropout 0.1 + normalization: &normalization batch_norm + last_normalization: *normalization + residual_type: none + + pre_nn_edges: # Set as null to avoid a pre-nn network + out_dim: 16 + hidden_dims: 16 + depth: 1 + activation: relu + last_activation: none + dropout: *dropout + normalization: *normalization + last_normalization: *normalization + residual_type: none + + pe_encoders: + out_dim: &pe_out_dim 16 + edge_out_dim: &edge_pe_out_dim 8 + pool: "sum" #"mean" "max" + last_norm: None #"batch_norm", "layer_norm" + max_num_nodes_per_graph: 30 + encoders: + emb_la_pos: + encoder_type: "laplacian_pe" + input_keys: ["laplacian_eigvec", "laplacian_eigval"] + output_keys: ["feat"] + hidden_dim: 32 + model_type: 'DeepSet' #'Transformer' or 'DeepSet' + num_layers: 2 + num_layers_post: 1 # Num. layers to apply after pooling + dropout: 0.1 + first_normalization: "none" #"batch_norm" or "layer_norm" + emb_rwse: + encoder_type: "mlp" + input_keys: ["rw_return_probs"] + output_keys: ["feat"] + hidden_dim: 32 + num_layers: 2 + dropout: 0.1 + normalization: "layer_norm" #"batch_norm" or "layer_norm" + first_normalization: "layer_norm" #"batch_norm" or "layer_norm" + emb_electrostatic: + encoder_type: "mlp" + input_keys: ["electrostatic"] + output_keys: ["feat"] + hidden_dim: 32 + num_layers: 1 + dropout: 0.1 + normalization: "layer_norm" #"batch_norm" or "layer_norm" + first_normalization: "layer_norm" #"batch_norm" or "layer_norm" + emb_edge_rwse: + encoder_type: "mlp" + input_keys: ["edge_rw_transition_probs"] + output_keys: ["edge_feat"] + hidden_dim: 32 + num_layers: 1 + dropout: 0.1 + normalization: "layer_norm" #"batch_norm" or "layer_norm" + emb_edge_pes: + encoder_type: "cat_mlp" + input_keys: ["edge_rw_transition_probs", "edge_commute"] + output_keys: ["edge_feat"] + hidden_dim: 32 + num_layers: 1 + dropout: 0.1 + normalization: "layer_norm" #"batch_norm" or "layer_norm" + gaussian_pos: + encoder_type: "gaussian_kernel" + input_keys: ["positions_3d"] + output_keys: ["feat", "nodepair_gaussian_bias_3d"] + num_heads: &num_heads 2 + num_layers: 2 + embed_dim: *pe_out_dim + use_input_keys_prefix: False + + gnn: # Set as null to avoid a post-nn network + out_dim: 8 + hidden_dims: 16 + depth: 2 + activation: relu + last_activation: none + dropout: *dropout + normalization: *normalization + last_normalization: *normalization + residual_type: simple + virtual_node: 'none' + layer_type: 'pyg:gps' #pyg:gine #'pyg:gps' # pyg:gated-gcn, pyg:gine,pyg:gps + layer_kwargs: # Parameters for the model itself. You could define dropout_attn: 0.1 + mpnn_type: 'pyg:gine' + mpnn_kwargs: null + #out_dim_edges: 10 + attn_type: "none" # "full-attention", "none" + attn_kwargs: null + + graph_output_nn: + graph: + pooling: [sum, mean, max] + out_dim: 8 + hidden_dims: 8 + depth: 1 + activation: relu + last_activation: none + dropout: *dropout + normalization: *normalization + last_normalization: "none" + residual_type: none + + task_heads: + homo: + out_dim: 2 + hidden_dims: 8 + depth: 1 # Not needed if we have hidden_dims + activation: relu + last_activation: none + dropout: *dropout + normalization: *normalization + last_normalization: "none" + residual_type: none + task_level: graph + alpha: + out_dim: 1 + hidden_dims: 8 + depth: 1 # Not needed if we have hidden_dims + activation: relu + last_activation: none + dropout: *dropout + normalization: *normalization + last_normalization: "none" + residual_type: none + task_level: graph + cv: + out_dim: 1 + hidden_dims: 8 + depth: 2 # Not needed if we have hidden_dims + activation: relu + last_activation: none + dropout: *dropout + normalization: *normalization + last_normalization: "none" + residual_type: none + task_level: graph + +#Task-specific +predictor: + metrics_on_progress_bar: + homo: ["mae"] + alpha: ["mae"] + loss_fun: + homo: mse + alpha: mse + random_seed: *seed + optim_kwargs: + lr: 1.e-3 + target_nan_mask: null + +# Task-specific +metrics: + homo: + - name: mae + metric: mae + threshold_kwargs: null + target_nan_mask: null + alpha: + - name: mae + metric: mae + threshold_kwargs: null + +trainer: + seed: *seed + logger: + save_dir: logs/QM9 + name: *name + model_checkpoint: + dirpath: models_checkpoints/QM9/ + filename: *name + save_top_k: 1 + every_n_epochs: 1 + trainer: + max_epochs: 2 + min_epochs: 1 \ No newline at end of file diff --git a/tests/test_base_layers.py b/tests/test_base_layers.py index d3d04cc39..d91d2bfc7 100644 --- a/tests/test_base_layers.py +++ b/tests/test_base_layers.py @@ -82,7 +82,7 @@ def test_transformer_encoder_layer_mup(self): feat_in, batch=bg.batch, batch_size=self.batch_size, - max_num_nodes_per_graph=self.max_num_nodes_per_graph, + max_num_nodes=self.max_num_nodes_per_graph, drop_nodes_last_graph=False, ) attn_mask = None diff --git a/tests/test_mup.py b/tests/test_mup.py index b60e0ccf3..f5fd48987 100644 --- a/tests/test_mup.py +++ b/tests/test_mup.py @@ -151,7 +151,7 @@ def test_feedforwardgraph_mup(self): def test_fullgraphmultitasknetwork(self): # Load the configuration file for the model - CONFIG_FILE = "tests/config_test_ipu_dataloader.yaml" + CONFIG_FILE = "tests/config_test_dataloader.yaml" with open(CONFIG_FILE, "r") as f: cfg = yaml.safe_load(f) diff --git a/tests/test_pyg_layers.py b/tests/test_pyg_layers.py index 03498eb35..52caec668 100644 --- a/tests/test_pyg_layers.py +++ b/tests/test_pyg_layers.py @@ -231,7 +231,6 @@ def test_pnamessagepassinglayer(self): self.assertEqual(bg2.feat.shape[1], self.out_dim * layer.out_dim_factor) self.assertTrue((bg2.edge_feat == self.bg.edge_feat).all) - @pytest.mark.skip_ipu def test_dimenetlayer(self): from graphium.nn.encoders.bessel_pos_encoder import BesselSphericalPosEncoder @@ -311,7 +310,7 @@ def test_preprocess3Dfeaturelayer(self): # bias: [batch, num_heads, nodes, nodes] # node_feature: [total_nodes, embed_dim] bias, node_feature = layer.forward( - bg, max_num_nodes_per_graph=4, on_ipu=False, positions_3d_key="positions_3d" + bg, max_num_nodes_per_graph=4, positions_3d_key="positions_3d" ) self.assertEqual(bias.size(), torch.Size([2, num_heads, 4, 4])) self.assertFalse(np.isnan(bias.detach().numpy()).any()) diff --git a/tests/test_training.py b/tests/test_training.py index 5457fbd7f..ab8648aad 100644 --- a/tests/test_training.py +++ b/tests/test_training.py @@ -108,20 +108,6 @@ def call_cli_with_overrides(self, acc_type: str, acc_prec: str) -> None: def test_cpu_cli_training(self): self.call_cli_with_overrides("cpu", "32") - @pytest.mark.ipu - @pytest.mark.skip - def test_ipu_cli_training(self): - with ut.patch("poptorch.ipuHardwareIsAvailable", return_value=True): - with ut.patch("lightning_graphcore.accelerator._IPU_AVAILABLE", new=True): - import poptorch - - assert poptorch.ipuHardwareIsAvailable() - from lightning_graphcore.accelerator import _IPU_AVAILABLE - - assert _IPU_AVAILABLE is True - self.call_cli_with_overrides("ipu", "16-true") - - def initialize_hydra(config_path, job_name="app"): if GlobalHydra.instance().is_initialized(): From 29229ffcdb486f737f9355244d4c8262bfb8536f Mon Sep 17 00:00:00 2001 From: DomInvivo Date: Sat, 7 Sep 2024 11:20:50 -0400 Subject: [PATCH 134/175] Updated env file --- env.yml | 12 ++++++++++++ graphium/config/loc-config_largemix.yaml | 2 +- 2 files changed, 13 insertions(+), 1 deletion(-) diff --git a/env.yml b/env.yml index d0bc91d25..af4a2be02 100644 --- a/env.yml +++ b/env.yml @@ -70,6 +70,18 @@ dependencies: - markdown-include - mike >=1.0.0 + # GCC compilers for linux 64 + - gcc_linux-64 + - gxx_linux-64 + - binutils_linux-64 + - gfortran_linux-64 + - gcc_bootstrap_linux-64 + - libgcc + - pybind11 + + # Optional + - pytdc + - pip: - hydra-core>=1.3.2 - hydra-optuna-sweeper diff --git a/graphium/config/loc-config_largemix.yaml b/graphium/config/loc-config_largemix.yaml index 621ca04c3..2c96e39ba 100644 --- a/graphium/config/loc-config_largemix.yaml +++ b/graphium/config/loc-config_largemix.yaml @@ -392,7 +392,7 @@ predictor: lr: ${eval:"0.003/(((${architecture.gnn.depth}+8)/24)**0.5)"} random_seed: 42 scheduler_kwargs: null - target_nan_mask: null + target_nan_mask: ignore torch_scheduler_kwargs: max_num_epochs: ${constants.max_epochs} module_type: WarmUpLinearLR From 019be26c065927fd2cda76859f85bebbbccca5b8 Mon Sep 17 00:00:00 2001 From: DomInvivo Date: Sat, 7 Sep 2024 11:34:35 -0400 Subject: [PATCH 135/175] Fixed the dummy model, toymix run, and most unit-tests --- expts/hydra-configs/training/toymix.yaml | 2 +- graphium/cli/train_finetune_test.py | 4 ++-- .../nn/architectures/global_architectures.py | 4 ---- tests/dummy-pretrained-model.ckpt | Bin 1667754 -> 1663036 bytes tests/test_attention.py | 3 +-- tests/test_base_layers.py | 1 - 6 files changed, 4 insertions(+), 10 deletions(-) diff --git a/expts/hydra-configs/training/toymix.yaml b/expts/hydra-configs/training/toymix.yaml index dc6c174b9..bfffdebe2 100644 --- a/expts/hydra-configs/training/toymix.yaml +++ b/expts/hydra-configs/training/toymix.yaml @@ -11,7 +11,7 @@ predictor: warmup_epochs: 10 verbose: False scheduler_kwargs: null - target_nan_mask: null + target_nan_mask: ignore multitask_handling: flatten # flatten, mean-per-label trainer: diff --git a/graphium/cli/train_finetune_test.py b/graphium/cli/train_finetune_test.py index 6e43f2877..513e5a1ed 100644 --- a/graphium/cli/train_finetune_test.py +++ b/graphium/cli/train_finetune_test.py @@ -43,8 +43,8 @@ TESTING_ONLY_CONFIG_KEY = "testing_only" - -@hydra.main(version_base=None, config_path="/home/domix/Gitx/graphium/graphium/config/", config_name="loc-config_largemix") +@hydra.main(version_base=None, config_path="../../expts/hydra-configs", config_name="main") +# @hydra.main(version_base=None, config_path="/home/domix/Gitx/graphium/graphium/config/", config_name="loc-config_largemix") def cli(cfg: DictConfig) -> None: """ The main CLI endpoint for training, fine-tuning and evaluating Graphium models. diff --git a/graphium/nn/architectures/global_architectures.py b/graphium/nn/architectures/global_architectures.py index 3ddf52a94..3ee75984b 100644 --- a/graphium/nn/architectures/global_architectures.py +++ b/graphium/nn/architectures/global_architectures.py @@ -1889,7 +1889,6 @@ def forward(self, g: Batch): node_feats=g["feat"], batch=g.batch, max_num_nodes=self.max_num_nodes_per_graph, - drop_nodes_last_graph=False, ) # Check if at least one graph-level task is present if self.task_level == "graph": @@ -1979,7 +1978,6 @@ def compute_nodepairs( max_num_nodes: int = None, fill_value: float = float("nan"), batch_size: int = None, - drop_nodes_last_graph: bool = False, ) -> torch.Tensor: r""" Vectorized implementation of nodepair-level task: @@ -1990,8 +1988,6 @@ def compute_nodepairs( fill_value: The value for invalid entries in the resulting dense output tensor. (default: :obj:`NaN`) batch_size: The batch size. (default: :obj:`None`) - drop_nodes_last_graph: Whether to drop the nodes of the last graphs that exceed - the `max_num_nodes_per_graph`. Useful when the last graph is a padding. Returns: result: concatenated node features of shape B * max_num_nodes * 2*h, where B is number of graphs, max_num_nodes is the chosen maximum number nodes, and h is the feature dim diff --git a/tests/dummy-pretrained-model.ckpt b/tests/dummy-pretrained-model.ckpt index e3df0fa4cf887aab0bc87ee7d06b81b2cb21479d..41e4df2b49ca3a1fd8b3b54732800c4e190865ec 100644 GIT binary patch literal 1663036 zcmce<2YeL8|2|Id9i$_nC;=fPcUvxj6nZ)8A{bE&M{w2(e8LcS^Ej983Q8NC)V_7O0)74dWfbyx3Is+MMRj*tYHYlxptE3f zX`}`vr4Dc<#iZ-*G*6+fM~g()KxW!-JrypSB_#}s*HaQw2E`07`=GJ;0lg6c?krQ_@o@W{9nnr(|GYba5^&mEhh}N>nM;J}K3u_d<1KmhzO2E{HP0 zo-)yev5ighddfO8bWgdS&H~OTXQ2t6^3Foef)hLyCdV`V6?IP~%~Lr#;{O!HfCN{% zr%F5&#+Qm03PL7J^*z*c>=b8VkV?y$EOjAE8lBb!`F+sT<(_C~+u+S5t zdu*D={-1SBt{&bolAjvcBa4nvjE+%R$2f$LP~8)zd0ITCkkoWNCOIkXq4TAspAf|p z&bp<9?ulSZ0-TsHk)m6ogql{mr?uv36W#r>=F3CX@J?y#r$zO&L#G7jp7vZyrl$kz zl#W71C*9Lo^Ejj1Jdz9=%8x(;69~PMl1j<0q=XSJc3jIj2zBvm+TrPnrm@#_V@+4c z^mJ!U_Yh6@)IFN!=@s4T|JpQ<`B}e)Lp{Ak!+nf~pJNS2i-w=qJ$*IL3(?&kr(yG< zqiJE6{bl?9~#C?ZR%W0A1FUjUfh9n%>Q<$WvOiwD4lqMv3bPs8s^yujSpCs`< z()|V5JzgPbh#_bw6EsW+8m@arXr7Uectkb!5%YYxuaCqjJZGO7<*!BZ_}H0#stk5f)?nWg_>v4e>Qz`Pk_dC-|PNb9G=C-sb>in!%jU*#i{2FA!wQIc~kQ& z|IemRt{~nuEBxhzdRB_AS!Hz1YSuMtgp#$oXPxF*AKfH(S5G5%ObV`SUBhu@jf*nN z>9E1C30tsdBa678?%9MkKr%g>StDCSBU^RPTbgHEbl@X4!lQWGzljjfcB6?MIZf|E_3ZzwSApc@9R` zdsqux{d@lY?Vk4y^@noQA7<)55WbJ-o}-%Q*aJr){~ln>yAS;vkvt!XBT`AxX~-EjQ}bN#1qeOveZrFrf|*Rggj{-Whx|Mv505BA&@UjH$? z-h)>vfImCAV=4%yQz4*6g@LAF_3IhY0`@JVW2guU`!^LrMOhvOx4KTnKzWNeC{l4G z_;Ha+fQg1ng6UKWs8MO4^^l0!>Ng}*2BrO5wozG@#|DPVfr(J5JSdB{0=A;jieNfb z0%}wlXgySZjj~RqDk$sUq@A8%dA!N0U`CVGKv|R3u@z0$0Mn@^P@`Hv>mdvY${Rwe zjbi?-NmPgB@z$OMGg_+)%32G+R zhO<3r?r?TMLJVg|Fwt-)Fr7LBHF5&2htqB`oLx}buVpEiy0SbTZ8tC@+U}q%+8)@7 zMtg$kqyaVR1+*Sczef3RK8v#cO@>f!mdBgy17CfvoH?zThBFR@{oAyYo8^h& z90-~_oP&@M!x;}I8cqPyDG{jAU|@83?s-Iar4NpYce~=!gG|(u9QyB9hLi+1{*ok0 zW_j!mCZ&KeMH!it3d*`64O^kc1Ev!JHA)9YxA@=H@Cl&9iN6+^yeyAv83JZ#84Aj@ z48vAv84jk?2%ttIfzit2Xu%NS264_QaTMzN%TUP2@-RxlI=u|abvSVh9gTz-r3^5k zWek{3V}Tlt14c{#gBIQ)<5AmRiAtF)k9WufFrz~zg0c>ogsl)U8BC{FfErB!K2C?^ zUYChWKbngA{xTdijpdc?no&}x>7YzUWoO0&nt_BfgPl}nf(b3Nz;v1o)MyUyaXKVd z3-6G*sO_⋘*L#vd+pny$Z&KWYTM(tV8BwD?}^+(`g}4qeZ~z$cK!Io;u8i*8lDA zDtaA`{Pf7dw3y|wdo{EKjOmHWq@|!t&l}hZJmC0+XahX>3$f8gmWMqE7nPepxe~Sq zZAL=uL0iCtnyp|uy#>^08}Ol%H(%cvYIvu-jr#st?6jTb@lM$RW^~F#-iL&^(ta0AsM!yu(*dAH z2Z4{*DY=7TOsA7TjZOihTRvps{+k@$ zH=n|bzZjKHvpg>5GcZHU=b%i?7uX6hUxMj$2B^_lV6^&JV*Gcaui(I6hl9?sJj@pi z($}C|2-}IiK|<_A-+~D-=fQNk0MzIrFk1d!#PBY;gzElMLg_n}$GhZvFr!N@gR(C9 z0b8Ns3Ybn;fg1e?e5@|XowUX-^b;KT=}>~{8q4F?Y}dh67>33LMqJVnBJ=q8xZ z@-vuDzW_D*75G>^lB0I`}K_A5bQ*Kbt!D zkPuU+0G_}Ii3P!MHvx#d3BYLQe-T64@L@5c~Bo!V1?NuedA38s)!6{ z!F2vU0{`osD)EAqfr&ZBCs0BBX`kgT-`K{dIjT4qn7|C1-fC)MT->zEzdAID+dqab>cyQ*t; z^=MCCjdwrtT;+ctO#$3>J+td}|C`GEu7iT9K6l>0>^yHjfB5K{2K;{KL2;CthC>52 zJk2GA*H_K;^DY(tZHIjDD;(tDK10nuAMH4I-xFpE>qpyhAL<&1(J1b`mD%~Dy}$Md z&ix;iQfuB+8*@{ScEo;^rgDzhZFwW@%#A$S5&KaZF&(kn^L9Fz+j+DD-lMc5E;gwn zZ=;jBjsI%kL#Q)%?le39uLeHfqf_d_o9b$A>c1NJkla&sH{M8hb0h!Nz=xPm)jfDS zJR|B7Wrqg&Ez07Tt$vswy_Yvq>UZ%IXOtpMv%(DCN;?J3j*UDd<-02B%Uc6(> z|LUP=UgddnmCE_6h`*1hFE9FnxoF-6eZChc=tW+*pSkdZ-%{{<+k#%=Rb$Lm^PcqP zYbSSR^yiga=1Kw9ao}WV+QkR(da>quwI5i|*k$8*9k;p8gE}IF2J*s#%!MD+5&3ti zcwRNZT=hZPa%WW{uQS+Ohy6__{_4U*Y)aw9xocD!!7Gh4SE_E^nD`5M6ff^Hmw&*k7-INbg9hOZ{nR*blAB)Ubw``)=FJVi zJ`r^>bt!|_8Dp+vIZPommX{o7E@>IQ-$8Rcuas%7WI0UcN=DC3;8iD@t6C0I?wP=- zH;LDqY_9hJou*-Wg;$zlu4Fk(#)|&!COu^c8F&E_TNm`hrQZ!_=Ib9tqC=1P{sWUgd%_p7|>Yv!t!!(_|1H_hku7MSZj zK&NS#7V=7q%#|#M$#~#Tuk-SY&E+kJDdzcx} zn=4rklkp&*Ht_Nr&E+kJDd)kzd6+iwx|_{)Er%(mE*qvTyv|m09si()B#_Rohf?qX zl<2^8dW%=uX0BA%??`S@31_#rdA042S1p8g@M=3BubPc^@oKxx)%<%t-=*FjUj7|( z`Mk4v-lHbk%gbe%%jI1~=9M$OAwk)^%06?Iy#BJR5`>p3=v`iIzqwl8E4dG>X5LE< za1RH~9)fZQ>cJlJ^~`(R%ll?8Pd|beWJA#{@0KV`1}7&3CNus`K)PMh82J*u$o82(_LKI0WXH&<->kZur9KWxu6*T<~?ntS-h?7{I!y^*uu z5 zVg9N@zUhCHyZPDdrqQD)F^>4Z@an&st2ci1>X`huxP#x!4j%6%2mfRK@7&!VW_J%8 zS^pO}a;E;D+|g~bqldkA=kLgLsQQchx?}eButSxpg zNJ~x4x#3rg`zvnt7xoBIB$9Xr}AKP)h14=pWyu8_Y_+#{{e;*jfeyYHoRx~@ceBJWVx+3Si zNtL+U%4WCi|F2r}o-kJ7-k&ggFO}mx21?_oDlbvZRKlA+fWJ7Bl$x4`S2DQ01~4$X z1gZ{9&|{c|YH~v@W{5B9quLhRJ}<`jw<;L>B`-RUuUyi4Z` zY^1s4s4mQ1j}%Gg>ui|(@WL@^lsX6lolXJVS3RIuv_ntTT}f1*=QhaACA>I3oEjp{ zR_X(jE@F94n@XaoShR%PMk3eCU$2Sv61h`!3Ph%l8Uq6p zc?0f3iQFZ&+zqz8qPvhcx-42wNXOr@_*$L^LQPO{l9#{FZwfLU{lRZ0<^}o;Z>K45 zM}*fbw@s&L(pkuxDC7j?nF?|>&VpWVG`prXHRAH>smXW&A~q&9O;1S9`EK7dP|bOp z!N6#CTz+_6?kG=l2(NEr^;w@A&A5r|+#mr1ql>`8R#hRHXDTc+QADV6ZVa2~)uTmn zVszvg^ST^SvX(};DOF7AQ0_F$Cx;=XX+3v z&%$n1Jg6tbxr_*4G`seFNJY_&tW)y(GLn0U;<`ldwBnrgj}zLj^By5+u78vGtZr|KNJ-0iX9NUfU$RUd1ygQBetuehZ69*9d16cgIrhM zVmDTq9pt)$L^EOeI;00L){_-8rm@Dk-#`gr1w@DB4qLB>7i1kW*_h?J8;21t?OEjd zs5h(5r1b$w6gr+`d4>+QdXG3rnOE}cgGG+OHD}8 zM>a}MO-aDgMvcM(x}~PLn+6~w&W%)~_y;->Qc zrZkrxPXl3zo()6u_|#-9VwIYlFsylp1btX@ew~rvO>WK|rN`j(VMLqWJRP^7lA5u% zM$_GT^FgsG&27OpsabGnvtTtQB&3-ww3*Ew!}5Ydd5$gAW^bP2_EJJxI{8U?O08OFr&_``%xEmi`f>UC2J^8H`zl_-_vm30=>DGdxS_BL)TUgh;5 zi4Vwt_<#%;%?6d9`^9+kmUL!F`hU_N%~{)GxV&f;CowhwbI;0{={VM<(-7GD(NLg| zh5^Ne6*tmwo;!l&I$<8kNFqCN`*(^=qgWpA6d#zWQ(i_=r_n%-GJt%ya1Tq1aSa(n zW02wRNujYU4=;9C&}kefdvQ97eM2%H3God{CYb2)31B)+1Zp%1$QNESoX4blXfpEr z{j2l}%M<>mfadz2iiGe#4NUl-4yMx#phh!+SdhU-vp}pm!9lZO5uGpx%+v{Uk<@7( zP@`9YiT4K{O0U6S2%8U@D{KK0LfAquA#4$tPOk$sS`19QFH$v_mcU>*Tnd`&@C_t{ z!)0K?;hSJOEeC3}0_b%X^3h5Vi%8WFS_O-UbTybM(ltoxv=*q*I^g}0sy12=0~1z4 zrwyP?SO6R1jYtS#o4|yy&0soh0cx}rXc4KM-h#n!xD7Pd;oC?Ehugt~!yRBc?F4GH z3+Qzg_R($-t4Jl<1B;0C9WYa*dy&*B3#d^x&>~Wq_Q7BXdlxiU*nT90umfO1*g-Iz z-UDj%KF}gkg$}`BI6MrR>+l04gu^3X!r@UcosI!D`Vi=K7V*(XAXbs8^f4?V(&J#J zNKYWC(?Z0xTlhi(sZ`FCnSZcR-E4 z2i_m8BZMx)z@*{pzaK!EwEk=cUqM0)!&NXL>_;%2egbNA4S0X74jWyE!Eks3G}qxx zB!t7C!GyzKz;yZ*sL?H;*ICR*zkygqYNy{}5t04@W{UJrBz3wC)aWmuMWhnlfq@Cb zcQ${63Ss!p<}MQAJDY#Ngs^*HIu*cQcp4Q1T0|;SAs7sYZ0XKiheePO4vT^bhsD5j zDh|}B1kmd&?xT_*R*@=H3KkJ*X)sfyWsuaVED&GN04*X_sXPpZunM5L!YU#mgjE6) z!YYI5R0XKf6F`ed9aI$t!(lbhT!+<>5DsgA35PYobgBi^s5a2+Ea9U%AXbrv(vz@= zNb7=`A`L)Nr+Pq*>I3hOG&GnRz+eb#2%0PGDI|ojr@@40o155@cu|c@jU_z zhQnt-a~(ECLO5&&CL9KV>C_ykQ83W!Ea{^V5Q|7dZDfN*L}~{!MJgewlMK{I0a`?A zClv-J4BxpsK$)-%_LW#D5<*xQm=M+iOsAGWjlzKzkxCQ+gW)g|G}mDi62f6CFyXK@ zm`-hg8np#_ouz!#4#X-_ncBl5BJBWXinJq=I&}hS)EQ_IsX|T|m@piXx_~laQ7qD~ zNC;uwz=W{wU^?{xYSa^G5vfWV42HvApt%m8MM6024JI7+0n_O@phoNlkJnk+N6&*; zMe3lwu!u-s05e7UB9c1w18VdV&?3@Mih+R%!$+h2L7A`s7O4vfA#4Dc5EcujQyfqu zH_$&)9ImiLV=xVb#qc-?G}mK162fBwnDCegrqf`cMoB=gvy6|DLGpzve1}j9Od{4) zFjK5)Nb2MPYD7T)SVeUkrNhFc;qa@2GHLx;uwEpDv>{+Z+E6f^h5sJiZKui+3O{-hp0cSs#r7u?$wCu`r2X$AOuG9gn0=nLv#u0Ihc(pFIRG3;B| z3RT;{bb1@8(RQFkm?1XW0gG_96U=b73zRwAjjeFD2TZ4TfEw)udYu(~lm%iLqMfo~ z5+UvbGllprk~-}NYIFc-6{183VG*j{12a^;56V;>!d9p{45rfuK#h(7twNOPC@jL+ zF)+i~hoH>aN7xEyAA{+19H`L=px0T^N1uRLhN#dBhiXTO8#^aoI*KY?E76F#~PVilsz zMt{L1Lc9ZJ3h{3wb-D}G=pUd}h<3UM3##M(N`az6RfS9{2+C9y!d9p%45m{NphiW3 zRv}7M3>M+6I2a2N&PsqXXC<)}&PsvlR2ryJ8KBo$)kkGPEJKv3984m_@?fSADIqP$sw%cZRW&f3sslBu0kjHHrJAq^XSKi#XSG3@vpU!c zXHSCZR2QgG0MP5K=A(KbmLWQ*K1?FS24JQT8zQOGQ$UTL23my}N{wJa^(b7MgE3WD zmZvc&Q`H1pq3RhhotgqQY6i3j(H=}eun1?(!3<}?pv+kaw!)bWOeZ@~BMInrR`-z% zVilr2gcO)Wh$@&VLOg#tAS16qV=w^0jNgsPTchN^H-rYZtkp(+wgrzoICt$In2YYxt-Wh-HWpb%sfV=max`*ab0n;fJs8Jfw>#XG? z4~SKWSQC(75+SC8nL^Z&)X59fXb8|EL@9)Z!h-4nxC{qls&M)k4$4%Gz*eXl38vF1 zphiBRMTnA(UWP?D8x3YS%K&B0#$YR)jRnKKa-c@zfnH~AA7z4AhG?e=Fo_T+f|){` zgrrWBff~I6vX(?FT3>DUTYGr)A33Djs7&?-ckX2T+!%>gr<%>`x7 z=3y(Gy$Yt&Ye0?W1HH~VK3V``8KOc9VGmO}ckh7axZ4ZP++|@a++~C5v=6A!yFjnAu8;PEScNIu=m1P2%!6R2 zFyBK`r}u#x9RgZ~X{W=m7`i?H&CzuPoas7>tjZzWlRFA@yHyEn3Yo4D$nW|r~6{>y( z)9Dsaqu+oQL1Lk;-(eBX{s1$a{RzsP-Nsfp`wL8`J3x*826~%FvFPy%ACpA3TFzKPAX6%2hi($%15CfRw3f)a2QM?#1>$t z5L+UtQ#eqg2%trXcmf;=i%=B>W~gcf%2c(+R;X$Nrc+y>M(uzWA>x5>dsu|C4q%3} zj-bq0Cv1hY&R{w@ff{uIdYw=Es4IwNh<54*lL)apm?^{_Nb1xRsF4P=3Q?k7un1Mp zf*GoMgECcpuobGF1JfxQsL}I4s}N=C3yW~}0+`|KMNsCfAGX5TOJF+105$3l^g0{i z)g}u%oJiAk~+D88Vv+mg{aaXScIy0Fhf-WC{vY)txz=>Os6EEM#(^{ z5FL~Pi*S|-W;ja&WzIa<3TFhSQ#w#19q4ri`p64n8Dc06fk}io6wDOjFeG&v4%BD_ z&>}=U=p6}*P&Eq7P~`(OP#M9qbU=krt0W*a-6-k|@0X3Qqv^oRUd&eRUcz3 zR2>J?=>$-tPk>e-+UX=L!r3V>!`Y{x%-Lydg|pAVbov~q(HB6kvzd>+1hEWJqBAgw z5YK{{Li`Fzoz4L@`Wk2zqDoROvcQBE%bDrVwu;sngFujeY@Ih3KGP zVG*isff=fP17)gy$5yEN15Bqsfg0TgT7?)&f59T0-2pS4{SC^T-NjZo`v**?dq9l} z;QdmsvpK#11F;CvhDW-EU=kq~1~Y|N1WBEW0yQcIygx)+a0nHLMW`wPW~eF&%2buY zR;Vfsrc)W9MrDEbhlm$ysT?fAS$QzSSp`t$tRl9;StT%?Dg!mD0`xkAee?u~Wr%jF z3X=%28ki}>>PYHT1E^6=pjC(x)q(}pGjNR!##CkCb~Y$e^(3}JRb4Qh0)QIT16qYB zQ+-&3vj$*>vxcC|*;CjGXHSFa)Cj0iAkga!!M8LZmLV$C1SS#UGhn6=nC^(K zQA?oLY4cGyh-HYO6akY6F%rxaVib}(wE}9?8fXzBp3Am@MW|{EW~gch%2c(-R;cO# zrc+0tMxB5bA>t`*XIO+YCz#=^3n+8e6zq?B!u5i~lsk^GJxFX6Oqhq`d&9(~Cfj`T?!t zl;|Z`439COxgPr?Aw0UkgvSA3I>iDtiUWF`l8@XVmchz25GE1qATU#~@kr{F0MsZE zXceqNgJD5+{Q5%@7*~ed*`RC)Qm_@OQo->4IZz`H&?-)q2o~Wi9n5g1gED7cY=yHS zU^)#2YBUV!b;>>(4q_RigGRt4LL3QZ3UL&YI{APay$rMpF_cEbg6dJY+6H5)a2IC` zC{r~STcK(km`>w?8f5}4Ld5gh39txf6Tu8;lR%lX$=C{KuYl<^1*p+fpx3GRXc~xB zhL ztpZwwDAQ_KgsL@QhN`uoOw~GUg{t*nc*`58(MF(Ehzf0jML63GW;ojd%A9S*RycbL zOs8!?jot=&oem#u2eAxMr5!Md5O;!^LfnO5`?EzYa=%9CCVYflC0{&i5CX4@? zMHUj`o!o3NA#ER+PVWLW+7GmfGn5X%Vt70Vn(OgBB!tKJ!Gy;{U^*QJYV-ln>kReL z5fH0j@eKDUOd{B0V5VR{L{g`ZfEs-avUgQ{1Q=I_8`_|32u@-vRGk9D z8{R;TP6I9C#FN|4U=hwf2Q!>~0m__xiLG#U227{3K#jfvdYxfDItOAIqMg2mNrdk<{rtP@@Y#s}Lo+2n(tQ;JO=(sS3c&Y*41^du)ZO%V2n|8>rD0pjC)6U4=zB z`w`4=_7f;`b`4wM>^hiEH-H-51bUq0+R^wS1?nEw~*B7H=stp1Fb?- z=?_?hsz1RDRkuNzs=u%ms_uZ{eQuyecY#(RI_Mu*gtL2KhO+|r5S%$Hh^=r|2u!EK zK#ht3z0Q_CDhgs5Vki}ZNrYG&%oJh?By}nY)Tk8DB1Al)Ee(rMRR+vZRTh+~Du=C5 zRUS;I3P6o20xd$s^V&+V2xpbS3};n9nX@Ob70#-H=~NATz7*pRs1(V>LDS11EfBfP}Tqp3+DqhdJ1S2 zrbJJ}A{;gX%XJutgmBmxOgL-;rqeS(jhX_z&Ilhh1F;NMrXZL^sLjDlp#~$VQwUHa z8_+6Lh3v4PIu5ZC7*~dy+MsLz6l{em6%4O%12qZ-T7{`n7%akB3oyf3OHk%499!Wm z0!*h!phi(ZuQSp|tw1b8bWm%UM2Ky`Od+;KQm1x6joJgPLJXx2u%LQ>Tz`WxRd^WC z36!bojIB`R1jD=AK#jTrEkeX|+itK3XWhXJXFWifv!2)rXBwDJy?`1$3&c0G{L5Vz zxsvgre5x0#JM>0=Kk5U-{nzK%CK?|=7Dysui#>FR__9g5r^2P)yrTHQnwU6utZO(u z&&(*v|3%{|7d~pm*QV5$0WXT zz!I$ec`^2Fc3i^1f%v30MNi-c7dJ441HEiThXJ{@6YxoJ0{bAIC>EI=B5wV;ebUAF zpQCYyp+FkzrgVyfp&z+{`1sLT5U^`$4=s_$lE{T8a=D3IUu2=06FzNKoDzko zL2%_P>q6V{rnaw$5&O^EE+YEIXgh(m=q%?fkJpJRIxFFiu`13doK>CGoYkE*oHd=b zoVA^GoKHII;?PphS>M^f+0glv^J!-zXP~pOvx)N=XH#c0XOOeGGuRp8v^nig$tgP( zr|NV#L!DvH7S5K=aA$-w((QG7UHl{CB*OQ_#u6Fnln4R+XfP08^)fO1vwTdNm=tev zjQCcYl3{Sh@!CUOlG%cui5I`dvQx+cpW zv5R(yFlJqc2xAJcj$=D~ zyqL-{DXwHU4MQa!_h9tMpmZ9}((IGq_~B`8is41EL<-^@0SmtNWsPv<;``-T^iDdB zLT+r%2k1fgDJK`j#wY0RSnQ4N^qh5${4z_m$8RijU`y+C^(N^F&*HZ|ySeqDsWiA0 z`IrbMRkUG9dzlLu%>}SUYp{-TjA6>)rZL>ac2@LNTtYI9MTRqq_dI?|hG&gqS^aqc z@dMc zpU(2wxA?hBA?a*DXRvJ6L+J@@?HiiO(yXV=OQzB+mdnEA`e(CLo*eFH4$EUPr41hx zh6Mp=ZjK|N*%+63$nC~g0phZS4U6BfwRqT%`@9PKWU{J^uy^D0B=1L|a*eBD^wAoif2XcxP6K$OZ0RsshcrJor+brWJxj3lbaE#iZD3hU zB3n9xHnJ4{7chQcCY?4R<)h8OK(5{RF|RE=dn?aoQ!eKRY50VC3)y@kCb`m*T(JqR zlo(9cZ7i2fy@V9@8*zi(!_(<)xB&r5&u&{T3_}6R2yr4K%@nEu;FE(S^y4`8L-1y;3 zmcim+CnK@c7wuzJ3-S4vm&<!yv||!l>Y(pB^ z1+J3q(O5<%&OMCIvfMI!|Kqv5g|ApDw=)A;4cbE2%}%>SC%N{sX)q_DC|MRZKw(uFzQ{40z;x&;j6UB{N%{*CAT&hyxQ!D}U94WcCa z1DU4r_>(1Bd^vr58yU{Z_ni^{V%Zs{6XG3~ao-8?Z|08en7ohgvXoUH|C3XU_whZJ zGWT%-oFD$Jj|;;7uYFty1@ewtg<0zUJ}%-{s3=li9v@%cmx>|nqvF6oK9NjaSb|k# zvrSCtk}PTN!cxe1=6Ns)nSGssr!q!5XYyUJuq}Npla@LI(bF z$=`#uS$2S_2kWqm`+D$6<}SAf>#~$p4+i8E<2_i9rOZ889~uAFgAHK+*B)%>=l>~| zy1xgX_AAr~DYo_)c8CH=sp;u41HCB}h)g`e2IlSHO;|O4a&@`s8J1wZDQ0d{mSlVV zfLON5+@J(Kotm*c)+Kyog3Ps>BjM#;oj1kPDHyp}G69&^sEt+28#Q}QQZy^&B*k7K zvt(|Iioa1sQnX2~6s$QGlkUcy69;mgO?W4<^AZ*d8zFv6GZY3Ng#iQk(w#0WGRdwI za7CvNk4Z|vYG=4|j8DZgag^ytEqO`S*{ozjY%`qh2VjZFaO4{S;O-Pfuq3}1ZUe14smA* zE2cGzjmO`_ZfeW2*=ZH)qbA@l@@DQN{AEk2Db$Wx*xI1{w<$B7d^Dni!Urln{$mT02rVp2<#2xbqdm8cUB67Z2&wo!eNi;Sw~4Ul~Gv zj0wzM8s1M{9B&;0Mnob%w^a6?_2QJBDjEnJSxv4id0l0HpK$D_ zJ?J@>Vm+DTl~^0rS5eP13+n`SRfzJ`7a6#$VpGBY!tVuEl1*xU$as;ZSoCoTSY|IJ z1%rrtD{K{5>IV~+U}bw@j(GgN@Dg%;6ax&*jaZ29&s+(|5bt6sKk)-tj^&kDEVG!! z$05T~yqlHG6+e)rti=z43F6r_F)sAuk>jHTV1D6=%uTNF!7Sw`Jc;G7qi_Dp`($Qd zO$=shlBQD%(#{H41y$Td7R!|ye}$&P=A$%V?(Jg_61g|S{BK5zi&8Ac7#p8=M=$4U ziMXqD?uy;a&38vnhr#lWo|hR)@s7^9={5vexY@u)CHKZ*PBTMc>&D~84{Nh~Zz=c# zG#U4#Xc#O!?u0>@TUZp5rP<&0$?Ue3xV*zLkzW*#U^X_k1Cw03?oOeRIT`rTZc2#7 zjWT5T$Op_jOux+YM(5@w;}nsBRNf6WzPhBFf($REx^i#N;pQ2RsHJEO3O61@W4WJk z+z%TlQJTghBlpyki9}-lkS4Iq`?ODF_1H{E_jujz5$+h#GwC!5hRNc%EBZ=LWs6?Y zWR{;e9Mj21udvFjr*SLIogSZ>6qnP}Q()=l9Ux{MemEo@hf$h}%tRxMY2ZGZ4h&4> z+WGvNzcznIrp=7Yq{XU6-*EMVD&81qH#Lgf2s5l}3^=<;GAYI>cOHUuu_ zC6=%fqPN)jUKCr(ise`L2FrYqx@F9Gzq&V(&5s#uw$pN!V)JW2LW+xq(+Z^1bHwvv zt|4heE154tuNTpa&%_dD=*d-jU zs)(|Y#SeA3aKehAMEj88#eU6>7`bC=^y|B@<@D=*a33828vP2ESAh<)qWQb`J(l^P z?tPyb^SU?Z2y_V9;+92DKORPosUJUJ$=rTC!cwMwJc@KqKOO@QO!VuyMBY(}ykins zUnCmwoBp2q5ZPE;kymEN^PC>|nCBno`Ro9fXn9aR!7Myx^9kh>mV1A&Ct0>}{75t( zOi#h!qfddNw-YUopQl;%2lek~%y@s3pR??oCe4S;FJQpS^gyFY%cJBOR{cRuo@K`S zoBWDp8%^SmF8*7);=IacaL%gWSX7_r9E$qrYhd1a_8X-7(YL_d)8Tm}TzpQ5zlkpJ zA{SYayyNI4UWm;U{>RvNyv+BkOwJ#4msyGX1o5*ryN1S^%k%?^bmKb~t_=p|oD8q< z+}t}p?BX?@t|HS%KLW+hD{j}Z+eAMhFYl^=EynEsGfSnqurppa^DmB-qHCyxtAp#@ z>kaOe&2wBG#N%(Mo52#v z^e4;0F^1h|5WOOX?KbigjX3`T_t70-U?K~yTOt>q$h9UiQImzJM6M%|H=W2EOU&`l z21NMhBmOrk<+#5K?xTNz!o9ig?y++DTQj=g!L50t`S14`6WLH#^nLih zqy$}0aJn>Qc2r8BtQ7FDpR*Y*> zR_(E6TlD?z&An6RE(-7T(w6XZQ>SD{7QU1fv1Cld<&8gwzv`U0cbOEHeQb1O_66mY z?BTZKSzFG167ET8A2F^~xri~9CufJ0a%6v$+$Ae*>fDH@&NK*@$}P^S5nLeRYS{N# zHEy-dt~{WBgzZqNY%1!_`r^WZ?3CBKMa*856w#v3h45upqO*5v^Rw^nACX;S{lJJi zTJ?yDpO1~$Jf&eoiI?99|NY*)Jt-p-vM)^;mpy03jO_P1+zy`uKN)nY|VcH?$^vKGGfM0S|AFRP6_JG;^3 z<`FM;?34Y(r`02_>?|8Ge#{FI2kIwh2ZX#7(dD}e+5P9A4%h` zz~rot++(u0t*aanvSvYc&nDp!eLBQPoM<&WBE9CEtS>u`%GSLT!^7>3BCbqckp1k^ z$yrVBjWIDZL_}kwn$d{$^T?M zTWCu5oG<2O-#+tWc=I}qvZsx1n|(^l+WY&y53>>`9L^d%wq?sgr!%q!E`!Jq=?P?0WC$*I|8Bvblh^)V1bY#hM zqa$BDIXbe|GZ~R1N@qkqQ73_6^bdf*WaE+0jum=84dmBWA0WI&M|>m7l5h zjEz>8evzTH^A!u-7!stcU(?cYd!Spb_E9m%g-L}%uU;zSc=^moN249zDHDBV)m0rP zJ6gQdT1~AJ9@_C*Yo)_S2h{febXS$hfsSKQF^)=WHY*L^?xKGG_DOa0xC2Vs)-s`U zPgZf*if>YX-c`Y|dDA$x^NNk?Xyrp?TkTKOiWNIL;;#iNg$JEcZk<}9mVN)2GCB4` zWn!W8j`QCgP?r2w)Nw7UpPI5@gYwGEaL0Q4C`a9QK2sN#?B>I&R8psvD5cEqvQr5S#1W$9Ds}FZ*OZsze^!b< zcTzpG_IdT!%;t`s6;ssWi;6h9{k}}D(zctrpsG_n_VzoXH$J*MTD9V;oj^AtTmgk&KSI=B8 ztVV=xQM*;Ss{R^OPbs`$v6A|0PxX!zrBIPK9XDG>tGyb9sq5FQS8pz^r{1pDL4Duz zoU-#rQqRPGrdGK8m0TrklrraFoSM03sB*8Is{GJujAPQ6j>;dqJF4{(s;bYIy(%|( zXOHR_c2Vv5RB3ho;j?O`O5N21(;Lfm$A_qMC%e?X7~93OPb-thER#01y)@ECc`x8;-p9qOyiid|QvaV~Z0kR3|svM}|)vrnyPIKgE zZl03oBzIN!W$%%rD%%{=ANAy>OZF%gkF-;^b*ZNQdasjur~MlF#bsCI!VShL)2I9* z$F=-ZO?qdU@?E^7oLm~Lj=7kmTq?Foxzcl`ylDOx%Eyx%s8fT7$rrjcQ7VnPt^9Jj zno@oJPWcmgySiupIQi>avy^LP<|@TMe@A|OdvUc>-zI8@uHi~_oy$sv-W?|N(&E`wl(W0ce~z7PJin|X+rfd_x1$w$lfkSAB0DNUaJ zr*!AJBhviuX31;2e=k?=)X#o8zPhwsR;5AhZPJ`s8>I1TrYgxrrbzc@R!~lU&{_JT z{jbu&YgLs(o%<+zTy>NqN@2zIT0>>vyOpHl%V$Z~yZ$7%8#6#Y`fY~vTeGvatF2qf zqrPupui%<5?ar)bFL-XPJz)QC`|@euNk{3Wtdthd zDlA9!zH9f?+;6XUp@fw6+*qmPHkY)w%k%Pn`(N^et23nF%{8Sz_fM4$_Gm8ctk+12 zJRU7Q^W6@6$ph=9kB@b=4?pXc*KVmV#Sh*iy|f`hYP$LbTgKec(!Fbe@~{&HKWmmC53BLJz4f&7a{bAr?A87tsa28f_L~)VNaxp7mB+SNXpbpA zK-!+1CaIfR+26XkT^=^Jn7r!H(^4OOg4D3#2lm5h!BWzRf%d0%R+g%tt0$MZd(O75 z`APexakuP;3(U5kEOE--=wv^+ZqWvI^-z#J)w|W+_EaTl+xMm9*Lw|+_h;;t8oJg< zlgeM0S`;m4Ul_L6{@$=3r1Fu0Qq;a>_Fr$*Q7(6A==h;T7iHzAd(}%{k5z6pYpjmz zK2klLB00JiYN}4ZK1)5cB-&B<_7Qc*xXq5O-xYEUDKx?Hckdp`v4q9ypo&M-b=_U+ zh?3Fj`pUmJLRy7|2B-d^{yz9s^{ZcgRENJJsb@1AsGIB!95GeTC_xilYHwwx()RXA zN4Zmr)HV^4p79e2;gI1bl*-m(9;dZBfCE>xd?_PDy{?dj_BOGDKu z=NBlKw2Gl$?X)ZM$<~gaj{4NfbE`Y*&nz3-waSx@tIH=idcA!^so$ZVdita3j;yGj zs@>i*G-PRiCH?Frb@j7ps$SRb82NdMV{USS=mvc z{ytTTs^{4B^GwzK*Dkg4`=2RMaX+X(wCw3feWt7O(Wk#E_URv~yW?*wqZWUnblv>7 zBYNgBrOe0@j;n3PsbzX>Ql#b`9lKr{=ja@CLM_+AYAH&7||+HQRx3^@Q|>%3zj(tY*S=mr?K1sMxz&cn>acmWg@}&l(D0VE4^oqQSSb^TXw(llyXw{$%VY_lub+5 zDMvmVAtlF-lrKm>$loY#dC{dBvU^^DI_6qaIeg7%rDB6!aIA_rBpsdH}alFz=7 zqP+F{I>m8nu6iRVQ#n$(r25i7o7F|@%ByRtmv?0Ld{Mnx@1i=P^ia9@Q-3S&HZ|4n zyYEzHyfR<;KCYAF%YPO-`frF-KRobj2*QBd*r}tK=RewIGj#%DNZ8CAU8hWOe+C8v|T>H``rTB|I)PpxV zsfCXClmNG4n|Q0O)NOdOZNh=O_8mpW*j`DkX`eg(1$)oN{iXSHI@o_ZbI`td(J;wz zb-&$n`a>zOUM(rP)*|WX9;fYE))M;<+g^)pD!<>dgKb=vSK4uAfqm<) zM$&J31NrUd4Wtv!6;g%f_3UXCs@bP}vr!5j>b3v#afH*heoYX7AUj zpp@NWjMTVTv~Ak3?`+j0H`q%|yKX!B{!QD8A?4*Z2^VaCuMd)HpU<@KYjMc-^nziM zZ`)F-&$aSWlP;sAt_SN&%0K08{p&8Ye^oV zmQv@`Q#RYYUG}Bl&$3@zxmdERL+x#!`OO~s`39+N>(Y|?X-WCDDn99Q>kp;sKiKUX zGD=BBUau_GI#)q@Z{oZ5R}sVEtQGS4_c}|3vhUajM=8>jY0Kaf3&qu8H;; z{evXOFo!+!Y7=?&&6D=ND>I~m3-8${O|NAumNZX#tMhfK_WpOIagLd?Rz#8(?<^;& zDRrey`*zyjRrlMzuRlOO-)Xc>$=WP!f3=9zG&xQ>CJm5NDvpz=_&3sv`!##ZiT&;N zi@!>@_sp_i4_zsb>Uq)Du6l^{RLBbZm5--LV_!HT%?{A)jc)C=ztg&&bb3iw`?xFP z*F}*w?4_UHH%*?gS zja15(qCz6lCfbx%sqAYgRJM{*vPQ~1*W4;aNy<`aqfL8KN-OE{fBU?6j^jE0FXrXU zam~!NoZruNo-=Wm;Qgu?xLw8z{*u@YG}A`WZlM}jW@w3$(y?cM48s&V!PU2H3R620aF*;Ns$Tu%f&SRy}$OEIW9x zd`%A$uDSrOi~oh&+;Tll`_8FTJo6yyp%!bN*p zp@ORxbm`Eh|Fg0Jdb&Dv_t~jHe{62}H7<}|`)(q0&deWtud`udCW?VTB}d?4rpHvT zJOu7Ei~}VTV}X3A7`;BzjrKmalldugl5sDb%IvyY1|F)-hNN~i^Y)tybC9cKB!az} z0tXM~?Tu-Gb(+j*Zm4Av&9mw0u8%FmCUgnYhqM z+E@Ms*g7Q`WSMp|f25O{`W}iAAN62jCq1OSR-IwGhxLKt-fU|BU;*vZ8Bd?fb)&y+ z%B968Uk0B4{h@9AO~CMq>vX;-iEcgb4K^-K1OGNI2Bl6OVAlD%g4;4cI8qFyZN; zv9XZ_E5TY_WNLvzatl%Iz1a)USkCWN_SvpnKt^kx(*bc+6-Kx8^Ez& zI-q&CA_$+R0%z>2X1rP*z|Eb$OsHoq)JshQIWY!MAu9^VF27E{ANxxuZlWR98>`dh z#ZgsQG^o)&2@Wog1Ifo+LGRAx%s$EQ(5PISDc&#>z6f>$@wxrrT~j~QD;I-TD^^2Y z@rR6do-~Lr*acIkV$dOPjEa0_G8%&OK&=Rpt!Eh^Iwu1BsZvJz z!66N}7Pa|6m6HP?Hf}tOUSb5OBm=0uqZlIJ7$)O^IM}-2BN*a_O z_$GKa%o?s!4Tbx9(!rjq8SvE#MWmiL#MowLfl?PgNDU`|;c2Phw8u&G?^O)A{j46| z(NTxX&=w$nT?Q?PV-O0@gFV?QXvyaDaGYsAGkG3{LW3%pWONuVxYi8TdxtQ6<{|Le z<^*Qu*@@6N_&Brww>NMctKrH9V_?I&NRYJn2~dBY2OmBZhgZ&ghZV{8P~W=|>VDYH zT-kLK=p>Jai?5dgx9$v({nZF%ZjM9Udko?Gi#LJh%xTcDH4#+whl45M8{kwTfE_o&fp3<2>^W0>(B4wC?KE5dY#0oewKn|Xgc84D( z-2^WjhCp+U1ZdaNfUr6PN<0o><|n8Fuc5zSw!;)KKVBJ3-ZKNePF4jMSY)vojL0ms4m4O#XSK#! z7sRX4;$IW3HQyRguO|glSQt)!8Cpg)r#+|ovXW`b)%U5jlV8)HyJyfR3(M*LiQ(48 zzGtbYW?fWD*JPy`b_Rh8y5Z%`g3di3$HQIzDlY4?jwRM^Y+Ryx7!=s9vn>H2eNv|sUJW{K~6s&&gd>h;7{ zN_ixW%DP-){r;jo^JV&KYlk@p>7d|B%H$MHhi5X(X=5w;fi0o!FU_O^uNzX6bZ*gC z?Bc2Tz8I$BRj#%C=D(E9{6vZ#4y9*I%AuQfh*54ca;d%{B|7HIh_%D4`HYf>FWubz zoyvM`M;nP1Q?|_?sgy%%lxScAT{?1zKK*Z_wbHCV)@nVqRNzBNYRN%aYIU3z6MOWh z^{~L5PCuDO31|JVUbE#O?d>b14QCkBN?A#C!6aLn4Vz(IUwww!XeFSP4o{}T-WyY| zw;It2v3B6Hi8F2W={FPfc_m$xETT6H<}rR3=K$;e8O)p)^~}Si9$@PFBa9h$3AA67 z1&TGXV5asadY-SBy-n5 znOWa+c8q_lU~>KE!`IroY2)=bm|h=e=JhjcV4%4lm?}5W?n_gcy=8xyaT$bmxuyb_ z+S`MlnKzl`Wl~`DbQJUDYc&&ark_5R(#j-PtpPD3x^(Aw5iR%1mDyr)i1yZbPxo|v z2B{t$G|{jGazQjRW#l^DP!s^xY36{3;*)@4Wf0hXSQR)tm89vXUd+|G>flJ%T}EH; zDQ)^A+B*1I8l57>TWdbHp_z&X>$TTXsmWC$YR0)rTIowJRX$#u_G-ONU+nj+K$qVUuyJTsleO~m#xLm47 zHH+FQoXuY&A)H~BeI;SF*8Olkan|eoR zZKdthp_aYWK4}@o)<#IB8aFUCx9Y8%JQ!NisD&CiS3}x1heh;;H#EbLl5f z9jWYF>CDE%>9oHIO}8w`psSoE=x%Rg+GAunb+UIgo%8o9{c?Gg^~CdwDQ|sqdd>YM z)WAVg$|YwVW0yLgvJ~4zH=S$T~C83q+vxRXS6XnFM<_h0`w=sDiwIc??|;3Tm?Q!44TQCjItqM)9H| zShZS;d7k72Q=A3h`w~guP`?Q9jb_Sf+`U#E=@i5A`0_g9W27@L) zhX?-)fL&K;)O#iwKCxFotb8Zq?Kcx9c`Sh|KlA_;)CHchQD~}KBigh+8s6Bc2lF;; zgH&w=2=XWfimhc3sJj9y-)`V_Y&3}G_CADn40+vZHRrslpfy<{l))+$?qEAk8LCHej^1oHQoX5{no;q zoYf$E>lt)yV=jDYZ4a9tRzTb7TJV>x6MVU8GgvAg4fi|sK>B$nQ&SNQ=I41q*O!sN z?bmWpD;!=%$wr7W{q$`<-H7EO!ffot_L= z7fHc;rE;kKLM{+0Xu-i|Pmsr6LQ$c+;1MlNlvBA526bmJa5G;aGb6AVQ+LY(FfFH~zH72t$SUmOCOr}bf5$Q76;QvzfDP0>n|07R?{Vc@$t=#61Ld=OU7)V3wVd%teO`MX--;~OgQ z+QeXM{(Tg^`E#!I&oX`b^`S!R`LFCLmDveYb!a9n+89M?7faJGx1OSp-x#JGSZOA{ z(1DJBc$*F}i?{BX)ksNzQR>{u8&pW$S!(#SIdcbRGIl}h*R9Og zS{eG=Vp;m}=U-IjE>ytQG@^vy0Wp=_b#STxkzXAy_4*;3ljlkno zD-)4i#E5?%tK|-#Wgafzn5S1wnOEBnLoI$eSeO3}JbX%npui9GfgU@!{KXWg@YxdH zk5xox6=wk>a2fa;s(=NOF@Ou-!MH#B3@^QIq38QP0(^7`I99C()3lzzD92C0UDge* z`+10I(oh8VlNZ3W^Z$V#nign&rU|q2$$fycRsw_d@(@|sz)L^Mn0r4jGaX8KVCBFC zX48Ni(yH16I?o*fijszKmfaRmTv7-&Hl1K@@I7Fwtr1jc^kSq!rGVF-f1pN78Eg&D zXC6y6Lt$|+ll<=}(0Wh^oaY{3BuiCc=P3j$hID|}_z7@s!bu>1vz&esO2C|p1t3c5 zEocby27ax-v8q1l_8E^^I%6K$ZqkrMVBbcXTrm5*FwxJF%42!D;HcIty)z;Z4SWFj$$KD3d59Gj&rfuNw@~fcJrW+}@ z5I{ajA{&V%@UbQVYr3q_u4!A)37rNw&1W`pl6(OdI7PcMCVchD|*1wmt{?V^6_b zKiz@DiH~4Q?l^F&+y_l)ngZ<;>tN4!b@)$b3)8^ghJCudQ2*ou_;YgsdUI48M$MiL z^Aoh8{@>5QNmC9i)yPC?zSkMq+`AAT5rgG@hvD8esp$9kqfn8^BHcgH;COc~i0@KC z!byGL$C?&oWqFUWJ?IIGT>HQo=Sy(G)sOJ=ekX9%qZ4$m3V_c}yMnjl^3j0iX&7VV z0>^b_!i@)1V98nloo57so~3?p?w9+pr1}D5sOAPtR~W4BTZ=2kIB}vwJA!fF^&qej-=HVFQbn4eW1cz(`c^qIkj8#lK%ADk#0(= zp&M-ztuOgpqb_O=QWGYZQ5ANZDUD`LW`=DvbEj6F9;#`lw7qrcd$M~dThD#eytbwE zacM)!*ZVqE<3g-WCrx6$*R)aNZ-&y>|2&}THvhI#@Y_x=TwY4MJU&7Pj`w9oue_p? zx+G}zfQOXH{R(P_f0wmen=_N;wZ(doAd6m;afvc3#`KjYmNC|Lpkt|e+SX$Rbr;!E zS_U1o#j+^s$J>LG-K-EmPO3N z!9d#O*5WYyPV*P&R$g3Z-)P!>x5z1Q82LOF*Mt228(L6 zV2PqG;yjyye$OH}TCxdvt2864v?H+hmpNMRngUJ6)iWooZPETU4rt}Sd1%t730P`> z0=)ZQE&TBNAY9Pr2OE?(1AQcem;Y1)>jMPnlz0%_b4~`iH;7?F{fX#();=UU_W?vf z6{t`ijT&#bz}9IjUep4CmDDX*rM(Qk`e2AAuJuH1I+wr`SzGw#XbZe~{T2utR>YT- z70{KktMDI`Lyv>vpzeQX;r@UYu+QuPRIHqZqK!4d;$saUKc^5De7*{%TGfC@!5>kt z{vBYX`v>MU`@^P-Az<%-4Qf)}gc2g#q0{?iD2Dw8OFYY&y$5&0siS{j&7xOuU*CVQ z(r`J|c~*y6et0=m7=E7?8_}dxg(K99)8lD*;aui`Eu@2|q|)08J(wn+rF6;3}%OxRMS!nL|&>RHm~&=~L<7J~7RqFRi&3Ec5V13?0kdpubrTGLL?IrghiEGp-jX z+R3Vb`kb7NOQ8Nc z{)KUj`H$&1z|n2np3p`gHq!UM?WNa;ucX4}rqHD}3mBKFlc+S3M=hP=Nk{wf)ZQjp zy7SjPMm+rprTK9xV;M7nPTreGg&SxxwI5=b=!pspsIg<#9MERw?I+eHpR{RtV|B*y zeltxuzou@hzO**a?WP$&F?v&38lCV{i?Z@RORw17L1~V3&{~V*scTBlD36PCnT`MH zG7*YC%mMiq^pE-#%=sfmlxUR(bDj z%~6zx(*!y`qL=nv5K9dN2`MYN7V2?cI^!H-OIvHk(c>=YBe~(t!D$B{k*;T)-d~ z59Ur^9%bA!MD2QR%BcU7qf_)31J$p|)VxPG>F_0!=!M2n4E*cC+&wr%y=cm&{En*A zUa}3e!LTeaV_aze&Du1$TE>J|q|!$=jnaM<^^|do2UF;1#+5FPdC?)Ziw7sPTGya7x9j;$UJ6Q+Q;jS^(7Z+$T zP6sV%(<%+7?q4EJ)qY@(8vKK+uUi07KmnX_XeKC`GX(BLXuu!8B~jw4AuxT~R#?`j zi!lXb81wi> zck80ok)J3|F9`P49E6J$*T6?_=0cI*0?>5g8LHkY0p@R0M_IF%!b_`O!_c5dD0FZH zN>6h`WzyHcT8H=G;yXX&n?D)qY}dk7mb&2Ml2b5D-2|#fDWhN|25oaZ3U1xgf!gN^ zVanDnU`^g5Q^`-Tf<6e}Y09A0rT(ybQ8ru`P&&4zUWHT43{YaR3NWaD0n)n*;VPX9 z;3g^pPkwPo>qHJPR(vp~tz4ny-2GsW_7x*`$C*+28 znF9Y*%c7HT5!{A)pp1AdQwlzVn$c-6=5jeudR_!HZ04dGQ926Ehwx9&BM>h(W?kq^ z2Ya(4fumduoO09{2+Yd?yS|P2IOYTYpfd^*I?|w1+H>%y#*}&Vr4lBjo`FZ#oP-hY zwxht2XFz(d4!m613l3y9f+@cDnBXh^XkW(;#`9e^oLSuheg_9Ziy>dMCU!482|D0S z$yq>dODGt5-VJL=A#fWkKx!jN%yt(`nBaK{oVj@j(swV6?QhD0p9gb5y!JPIEh-h=_HIo#J>1!I%!;W(tsZ0`5~Ms=K_nT8#_ zdpQ;SPyXQGG3C=@GsnUQW5@qj{$MPD^1sa=h}q{j{7)OC-}FcRPweo2j{jF1{;%|v z)lBREUH)ov%Q3Qe$5z4St3_;;_6Kf$b2fR6Dg}RT8wboQ&|Mg7p`o*urgXSg|4-ZhC)*B!97C!z$(rE~ZtIfdoyqQtm5z zF7X=CIA+8KO{^m?Tj$}4^N*A5wdJHRJ7N6gzn?B^uFoqPoOv|N+xikQLKsLBiQPh$XZ*1N+a`99IbEwr zl*c1>N~tl?)6{2;tXkQ8pG<5zO^RUo@A&YwB(lx%B0m3mI=k3JksQgnOb*ah>_^2e zF6fLhJ7=LC)=7NF?RPLI8d17}>n^S&Axeu)jR@t;x@3vwN@MsbN|9ah$BB4*sj%yG z64=?!DWph#IxDk>CHB&rka!PEX3g~=-8v8Oi02kE@Xee3k{~U}i(^RVsnz&v?{?gy zu#YTHHN>@BbMSE|L$d79LcyrVX;$~?bIv%sh(*4*cLkD)&cbEqCS?&-$%-idLPd##2M2S?$PQ-1FiY z?EGIX_<@Et8oq7D+QjRTot{dp+aYP({KM?geBu81+e8B`||j2R{xp>IU4yE`F&02 zs`uW&@15mHJ<>i7?0{8d?^;XZeXWtR;ZoTe;SM&<;x{+jrj48T#uC2O+0J^LS0QaRuejE{rQU|w=3b9&4Hpca)dzHM4r-o&b#hS$F)Zz zc)nalB&`z0`PgWR?9$Wl7KI!9%F!}Zb}k+}%uL6PhML%G!5yw8L&eX>vb#nZb5g)8Pbvs^~F3G3LAHlJ?oKR*FEjp)# za5VDc9gOy(F8wgvWf6%53+?$k8+Pz<_p_nE`3&B=DHC6}vLJ8&NQyoY9Jcl$cKjr`y$j!k>=MwMyBY@ED4}?w)x_D8jg6JM;;e0-yB$>xE_}-XLsOhda zd*;t9{;75*$@RR@XSg71uQ*GH1bBRe^~?C(sZzsv})N#2To{<;pQ1}ciUAvF}X za2p97S4-Oc+DYCW6Fk~mfZIp5^ZVuRqp!mOyqr@o*^{Que>oC~K3KbQYB`6n5o3f8 z_1E!Q^R6P!B!P&orSnI11Gr%EOSQVsrO5Ht6?n;hN&foy?fjW_5$J=r6&~D`N<^y; zBlEc%h|^zXPBl9hwark&XF}CP@2iiZb^WsZ^1-_(t~3|5jLl;iO||6Cpcqb&vLa&K zF@AN14gU03h2L^%0wzo^w_sojURpJP3btM#gLQLo#ThMLa(zCpe)0{Pe_k6k80xSp z&!^!*krR<`J40Fyi{a?8e(BudP4v17k?lkp8?~Q?8qy6!(d`wo^}mD~4IiLd=Y@Qd zax3A99=B|@B-U;5#R=9nqCx$5-p<<{DPOn8%_F{|{$B%^J>1Ol!>d(u0KhH><$R_hWUfQB%yG%s-tD5);LuP2gcvJkX!WaK^ z8sxo)cH!BoW@PT3ZTu2o!MEmH;;UjZJUNhrjY?SFwEQxEGBO&^ob(BfdambD~2*Lpqe28pt1kUQY#%tf#5*_^+fp3l6Bj&Q@e9(s| z{PVjL_e@`z^zPh`2Nfd6-o|IXKNRC+HX7SZa!0A)A-_Z>9iQ_J#;1D^V&@-%57F6ElkWr*E_ z<80bqTXNxPFLsiMWZ&FL!>g12BSH&RmfvJS1|=b9cnH)PM-lEu?yA4jHsCB;Uv%eB5Uxxp;p++tOFwGPxs$_}#5$-LCYZ z)HHeamFhB*5cq)%bXE%btX#f$x&9c*VAK z%=cGftL$*h4jJ+W?i&SjYZU}0yAAns=cftgp13AR6ss33ED{Qac7GxFChR8tXgr^r zaYUfX81TgE5o>E{Di~an$xhdKM(h)_SY?T^bIlFeyGNd}N&N==K8w$!ySi2oA<;~3 z*Xjzi4aIS^lsX@>N|JY7cu`RKK9ThaPhs)yTio~L>3r@s9kv3b2v(2N=IcKfv(k!7 z1(*Nm^ZHgnZ0MY6{5N+6)=^xYpGOS%m=z)H@&f{X)!hgIpYcSH8EnEo^SFxJ6AlYx z+YR~Sa2+9$^Voa$P5IP6@-~sB@dCB3iG0FmhF>&L%>BNfEXao@yz%oHeEkax-n?+O zK+Ah7Yh<&X-C=3PFLl0hxY&FgOJC*`RWH{*uvg|C&%A>3?pPrIwQ=O8V?ACnF9y`atmpkFy5O2+X)t}x z0`7^AFP>DF2XEV}@~&%jv2RfhSGXk|sf9=qVU*68x4IvPd^BaZw2keNid&+;)mr?; zFXpIwu!6h#+UL@K&0OK2xF<3U=@JS`&!QRmR!E^Z2UEBEU}vre_Hl2+U!!KC6_Hv* zqNoF>J$8aWE(~Ht!6oQlwvL1Muh#16wITkD0kL_IC~W>c7fnbW;nw6gz;TxZc;o3$ zWOMJXOHc2~W0TvT(Xo*8;IvH-Hxb_C8l7jt(aDQpA)^5Bh9W|j#lnBJ|6t!xyzqkV zZ2oAWEFLO3!u8qgWA@8WWFr$UiY~_$^P@FWv7yy;Ht2~?T}G!i|HWmzXykc`=-vUy zMod0|?>#>uI@BS<-zb);E9;-cQcqn(e}Z{(*?ultE_D#gn9!`ARSVp>_!z!ibzMYc zRmSv_AD_3fo2dU;#_xL$*n4{oMQH^aM6w(7*>`m=qL$6&qW<6#(UltqMQ^=J@P^4c ztU)=$F0IQJF_xOFziu5rgIOyoy6nrl9!SJDKr=t~iw-tye()6Xi4=B?V^49G>@Kboc}r~-dGtG{7=1$Mc3RC-{ne*<|hITy&FrM4GjuG2OC*q&cbaQ`0Q*%cMx0 z_j{24Co>M`Qr}2RQ6~4NLWmqkow2JED|+!%6K{{!HTzJ7NH!e9U6qwHtn zRT92@^pnrih4JMq+`Gf4ZMg!!inZX77w{m_i8Ukyx*YLl=nQYRbG}h*> z{g}S}hqvv%z~0c16B=druyN*#Nbm2%?00BELKd;a_4^#+_`#GYo^`~F9%XZqf&17A zt1WO+c^&e2agMZfG2Bn1DMamk3A?)fFW#n3lZ0l#K2YjnMT(zU39IkyxGF#L_22vV z2-|5fm1_^QVw-F#Ik)|aBrx>?r!;bu?M6xXcc>N&JAu)>-WJw}wP+ zsy@3<{500qRU(F6t+lVqM>sP&1?B6Ea@Uu+lJBMG@bm~*lBBti{i7x=I2&j{R{HJd z^z^^559(}5M?eZOzZA`xx>ykH-7*603y0aKYC6Q!%849Xb6yzjn9c6FH>qa<_Q6$(v$e<*nw2GB_vh}=)r=;hH(|ni z+na@1!;#qMlM$~u)ddxab7;P^6p2qWr;txy(%B*VXZXUGVuW_bpj+z? z;gdn@(3{RHn1As^_|wOZ9ItR7TV5>2+D}eHmq=scdms_+ZBt}RPe!s?cE+4>*f?CZ z%ZtBZpoqFC11yXy;a;B^;GSfNAxB|2NfLd+JLD#ki1+L9y{jx|zDU5HplaFrf4Z=^ zUxt@2ZpE&%6cE+&o=g3`glo-dgX#bDxFfGM$M$1eiMH2S;r4|K*^vPcvfz1>(4*!T zT5K!NPTA*(99Efd5f44dExVIkQ<(_A(z?SPU+_|>4kNki6|q8Lv^eLdGgY{yC`b72 zW*Db*$(i?=OQ81a-}q6MJoi8$2(d2lyh_zw&T4les^2exD=$nI$t~*P@~hj(V&?+> zlanq^&)!Jn!rFODIsx_7Ch^&y05Bkv`$Z+nFfg_zv#Yk*_FXTrsb5Q;j!k)xe(bl=)6up1-#(k}pZR!iz54L&#JO zR|u4G|Ex&f;;<54%^`lJwjqDZMv@FT%*2j+W66d%ZTzv=9(SWOB#&iqQ~x%+yX-Yz z{kH_I<>&E&tVP6lpdFpNr;itWEaJZ_>csKzmxcV|FU5REq9oS)T#BS$ zB=fuW+T!ymjcE9SD=9iP5zqVkfp6P?h=0t!Mgg=1F5g=L+isWg|8^xKy9f(B-2Vv? z^(5YIY7z-Bk07TGPr`n=;iP!Fm0(LoEIYEHj{7!m37OX{5G<%Yk4|gf!!G`B*^zAt z59%7inV*t1Atx5b#;Gycv#d1J4EPN3j<$UPQ5ALWK&JXu|1&q*AsV0jIHlG?MKNE_@V zBSI6lWRipjL;M6ho73w}^n%`VsU5bWu? zPc~nj&*o??6_~$YjKho*1#{GIkr1=5Y~YA45k)>D*3#8viS0S!U1`rLf3P6~&ld}p zZ7Ih2t0mYChDv1m;|$~%caMl^TL~n`on$x1J|Lgdc(%AQK~R+&Drj(x5gZ4-B>3_o zB4$uWemf$8}VKGZL}Fgnd88yU&WO1(ioL1)&``$sV~j zbOO#2^nM^0S|0G^;FBAx#HQyy)Bwmp3k1>}e#|q@f_{)n^gX9JO zhy>31#%2l1u=EDNUKBhaZJly>^rw>On7kLC4dc<^bbZVZnTsCEC=k0r6~4E>g#Yu> z0sFr5CLzyv@B-lhu6fmSe#s4Yyz<`?zBl+U@$z@Wf4)xSvY;$}|DynTeV>R!FH4I; z3eO9VkBObEGnGVpmnL3RPvc4A8~IlkTzJPSJNzr-J@JsAE{c(V$WwZRBV_Srcl_VeescC;3=h}M$FIhZT}KyVQ%(o3moMQ@4C?cZ*Upn^v?9?y6Np0xKaruM&-gdDt?|;t zous!Yk{2#i!RKK)8E-fZKj=7z?xb2{`9e)>cliKUKEV#3Z7buo{`<)Hn*p379>=}8 zUBJ(i@x@1`Z9^MA9pt<;+;KvhktoZ#g!daicCFc7xMr3N^0{}IpV0jWW$uF5Wd9o6 zoc)$OK3Pq!ebnQnr+8x3-OtG6$0r3_5>B(@gS5%Gz2kW1v4qXm)hg_tq5<}W^+AFA zK`p-jvKqFkND^qt{=tFMUykX33H+0769p4rw&Ly6bohZy<)r!GWM1^_C>LpzMWPZ; z3#?Z3xVb|~uyL{qZ@5-RkT<_lU{-iU;6|qkw$@xB zODy8al@GG~eGn%o|1QOk#?-N4l&PTI#*f|Xkw5~&D%nM+HTZ=VbJ(r-n%H!15})$y z0imws32M7?Nc7fMY}GzVE8Wal{nP6=V_;uBeB$$IipPmwnXtHquZ`~^E~ zW%%U75p2N;Cv;9VR z#+|(={aYR1v&aCOsukiu$ra?N`!u|8@_D}XRxCevogen@x5W-&1+|wI4)eNBJ*d9Q z11n)md}Y%ZFD`zCD#rTR)oTzdW(MPN8*xbm|= zJ3obq!H?-<$gxIRv}omRvgq$@QE%v4JngPOKVj}FqP{8;SKa=Bq8p=e&DVd(tf2@k z36d9yUri+kj~*kbG37)h;{tJ=aR@)ywx6%J+{2gd4#UERTl{FnC@;3>ArI9?_~X6) z*w!`&XBI?ZU(E~ro$N?_ek7W2tUSWYZYm>w88h*MIhy?3 zwqTE!!~Fay&bVs3EWd-=14Z7BxODL$Y}}|Nnm!?w1cuu2W?|X*e*a1=dG!$Y02T1+ zpT;-@rOKnSB0exW9}h|egK75;V1rrFB)x&gUFECrCSzS*QW%R@OWfw?_9%#k99H6+ zCYT$y$DJQYPQ{|rI_P>sK1sc}5gYGbBRZYX$~Rrdcy3P=-k~}LT?-!Mo2ue)QT%2c zKRFOTlD*B(vFar&9{F;2%l6$N!}!0_820%- zBLOFIoRnnj#*bDm7P!~P;cG9J3-12&C$rW1*nru`iP_#70$!aX?N$*)-q;-PcI)Fd zPL5y`ZW*vOhyLORleQDrn;LBY_gr$sLQR0@@$7@5Y_e4A9Lu{*5KKGxnT-hW61aZI zBMXzGN!OgKr0#alSZ$w9P7I!B(_R#_&({>886P0|>SD%r^p+E8e11V*hPlh zYsjBVF#@-TOUXLn7MADsoXBc%_RO=#T0lV(~AH8+;Q%iof*Xfs$*Wf4bOMJ>V?dqNPk^zZ~WQ z0(Y`IbBf6LEDKWIa1TGx&L*aFKMQkbW|G@03rVEr=i0&j+L-8=vkp#YaO3wN0#AgY z9Vug6qGpV*ox1dYC_3}En!Yv;H&3KcDvgp-R79Qi>>ZUUnn;7G6v>n#e2p2JR0>5= znvja-sLp=Y77{5HG8Sb@MMR=V;yv#V=MOmNI@emyaNnN=Wx;hq+3~0F=cY(pdNu}< z88c$%63d-S2@o1|XfiUqDI@6Y5^k?mha>9BRJ-vo8Mqw)559DBJ&?rx@*7X)ez73_ z4Rc|)>T#mKF%-PaMseT1X)viaM;X5d=2m=e4_;XG543F)$Vl@^&~#=uSCcpn*48%( zcNeWC(-KUuLL9huzfK4a`AW3+vnt5!TLQqg1H$}Na!&&q{0QcGEMp@aIaux(__eURS=q}yFjMjW15_N z3d~QPC!c4f!_bY>e4%14njXrh>m5vahPw{RyPk2f%w~SyOEdPwF&0GmF4#a ziZdBUt3kfwJv^x_hf|)Xu&yPAJo@~e_B!cOtMAU#>$WC?A#Ri#dl6Vhnvd@JNAEu@ zgN%VL`YZn_eSCWauUb8aXI{6!_SRRh>uxe^=ZEO0<^2Y{-l3Do3^0VWU@-niX_=ezdv^837r_3V{& z&BQ-2RCxsITI84s%X_f$MG?$xyh}5d9-|-Z8sWCVB3$@95-dvA;M=GKnCg5Vc3X$j zqzPrBJ=!%Ym>9u7vp4~Bw_JstD{j!Gu6N){y#pL<`-MmSQsDO4L%eeHOS(*S&ZanC z0QE9?a$v`Ix?jQ<`jei5z%dK-#w_MN4b$+`f4k8>J`Gx=R|_95EWi`S@3=BykyZct zFXfl#ox^D{lSnkzCJc}(CH6Hg!d;;=(AF@@YFTzB@$ZVqkR7_jU;iE%-S?Z*)4ofA7(SV#|8rK&Cb^*!_k` zyd1>z1CiLMu#n!gSuXI1nT?efWjTkKqh#hvk&jTl5bZ+eL&I-d)HHra%HMPd-pBb` zK3x?lbY3}|n4I1wSiZ=O6SFQCu1n1olsNznM$2%YS`L$We1vdX%?ro@(Oo9G{ieBJ z;l^z~AXxJ$1pCYtsIPRHAVbFm{H>$N^DD}nid!ZeskEzOt+)qZDTewsQBD#<2E zIp%2BWCu!Tb>P68EOL28Gwq2iBJaGH!l7kO)bZpO5-zGgE+=1t(Sg1=&eM);oDoF7 z-EgMaOR}hiY%uoU1+IUV7MgpAlda48FzZz$ai1Cvp9Tg{(sLNk=)EGYugvgDdOWec z;6Pox+-SLV1yTRmKw49xN$dQpBrYje;G7UjemLra(M&3^i`-5dI^#I2W8*0mS4PcE zGT@O%aCF2+>OJWkId(Du=Jf^B@P>upG1G=dij4!$=s(<7xlg2PaiTz)z7%RpsKG{y zEX0PB)ZD9`%sKCmU0E{JAfS;<7ik^Uo6E`PVkO|WjN@0zh0%5|9g_a-8zIN;3Q{7> zar5}kBy!Onl6&PBiB39-6-(Xl>n}T;KUhLO^t)o4^hP$G|H!O*n9D_c)W`2jjMx)m zkr43V3OSTKkJ)<18l$R=L6|j`uDn)F9=}s&VMI1w{P7lnAZ}K+)blE7+kv#J7%qcZX-?X-xMk6Su%?HY%s>ZYVR1DR?PG) zQ^rfb^_ZEPDw#f&UhYA>2hnVqk25;Ma8&$UHhXUaQx>d>)pZ+i;?&uk{`Y)lh52gi zOi{ta&H(JzOrh_?gkT?Xg1KGgi)@i1XjkUa?=!5Z-qvD4^|mdfs3w@|oRY%f!Ts=l z3WFmf9Ekah+vLx1Ca9kXC82A+;;#*9q%U_CXL+tn zgj4;KMX;&xGL#1Iri(A`fZGdClNE_ypsC#rl&lR%_Jm z#d~i1Sy2!0!2}O^2a`?RJ3%t%EvBcvBDz~%3rzh+K+C}*(p9#WT0YI717QlpZ}6PZ zxa};c{_3o-Fi1wpm|4P$qrZ|BwdaVZ<}#%I?=VYnmAjq#AEu>M5-tDFXqR=Ap8Bub z@|M3kOdT3Uz1I{8+COZ;GtUo`MNS(zEv;y>qw_si_0JAITWAx{tFkn^Knte3`Bjvx z)WEl1fv|J?`U-}R;pPiA;H})FJw~vY9+D zb=M0H91t;Q8ycxf^)^mQNu7WFXAKye1=GgqF1XlxGwl5$K`zXi1%u+6u&TolvMzq0 zUN-*(CLyt$YFiN=_ZUYEiacQ7^&_-x$9eoO(-|Uu3uu^+Hoy1kCz@|KhIeypBe_}i zT-joPC+6qrigR0G<`N4!X?HB0X;UY(2|f&3HMMwb`eLZ;Jc6~aM7>t`Vpx1Z3!d)# zMt`g@gip5dRQ}5i?v?sZcvkBHhLfvk?#@Se?@Tg2yf2N4ks=r3p$`VOzM(pM9AI$b zd;Dp6lp2L^ffB_$)MXaKkgqM4eP0c)W%>!`s+)2#%iVT#M`vygeJfL@-bRg91inj&e0Ar4dzsV0(A?>ri*eUnAPpFOw7Y1exq0h zI1e||_Oq`!>*RYdGOZsha<1@)BCjJlo~5fd|AdFTWEj4yuYXmd2uaoA);mq>N>{W8&$+2LmSbyET6$`wk!CtiXgby?yj;>7NOVwR_A7G4+0vQMaqNfI)NzcN&S=I_BYEsDdMWB5}Y{9|LOmO^h#`at0P`iG`4m zY8o>=AA^nr@Q1P!KM(iH=EHlP=;oMr({Y5` zeA@W)5Wf9+0lsB^K-zwh?um2zHg z&=!TjT>o?ebiEyU4^hvNdKIYai}S=j>@=i4I*qC~2Ixea-B7eG3u8ugQ?rC~@E}8- zM&=xc&DNo)triBxMLzHq{^Du=DtOGL)Zw=S&-(6zM^9@w#Rg~U{3TrN~tSSwsrn+PRVOR?I70|^%`X1jk3_c$uM!txR)6s2&Jn@%&B zP^L~;z90;emfAx~@pTT3O-WVSWlp3Kpsq(Kx9>_KQ~O+pdo(eeJNrZc3cWpCHaC{j zkvs)q1Aif9QxbQ2l_$CFIg;`Fa)fbKzeBFBol8P0WN`etW8A=i60<4)IG6clJ!XoS zy6aK<;0Bi^ynAIH)c!cm6_2?O50h3i2Lq!yDZLlOsiwp#%7>7?wF{wc<7V!ikr|gZ ztBDKC>;q?cBT$|%#l2PBK~_#k=C(^#z?NG zt*wX@4weLQ2j0hXUcK8n&RCyw9=r?I1OALtXbNZcx`Tv@m@HDn2n{}ma;?LL5Tvr0 z^ZyykEHcn!yvh!8rdq1p7LV~ld^caj=PGk|PStbk7w+XIniXPKLM+H!UkU}|6S&Y{ z_sEvs-E`tM2Ab?X2}C?2EDMq1!+%~R2WE_*pM>|)H?IICH5mp`mRcW%%acT|7bh|=U)F+I;S53wLJRsBhc9YoOA=KGuJQNlEu zj9C;iivAo3K@)8SDzm$c`Iac>aqLzF;D#W<6P8q}KKJFQ_f4eLj{eC|qw(csv zQqEu;{FUi`OHq`e%LS(C(HGpYLG<|)#ZI_>7Q-}OGo{07aQVe+EF`Zm#x#W$o36@e z{g=*c_?m~yOMcR#}yft;qzGC_^;T(tBKg!Y3$x?d75Y6&YhX^9D3Kw(RDw+u)mUaujkMWFBKS6{dI%C4RZALu?oJS`aTr*E#)=$jbwzT zn#{Ulb>`<;Y382x9n^RsB)0opdCvuhsmr6oRO;(G=1o*Q9b7hok?vi`2Ms&%qaVpL zn+%+I&GlA%;;$H9;cObO5Gr64q79kE{X^hY=gqg5{1x4&O8h7jP5#FgV>*5BMcBQb zQvbE%m@>&ROu@B(u>P4C!(5-voV;DhJ6YX<3p!tD#28=vC(MM6$Nqr80r?SfmKgr~ zBDHX|WdbJ-z)o2c=DTtM?SH1sOq#C7Uwrb4MjS9^^bEu)&3gxK_wJ*1R2!VxG?m%h z-$8{os?3}c6+Urq1K;oU0|H-WLdOkL{#oZ_X5;Bdn7PuJZV2xOp=~2SZi6HL^>sH~ zEs$qS<1@&lJ}18I)nj;|r^(!YGYpIB&WR!tzMw+(X)HDQ4O*_g6=6*)sNs6m}d@q!DSdATg~Lmo|Cfq@&TK zX|5Dm`~9N8EiRe3NU4Fnvo?LCD&lvBH$ZLQ6V$I(Vxm&Vld065zDoN6B^o!#D1$IK zI&TzzQ1p^RTGTD7c4=iJak$2S3h9aRqDCuv@?UU;k6JJ3UXVu^Ck`yfkD_ZgRl(Pk zh4?J>E-CCCPc5QfP}!er$=#%H#O^(ZNvs$roimNJ+;AgjJoAOa(=QSG{OF1ruLxL{ zQ6%`C?o7YM)Z@O!uf*62UzNhVv@U^2YwL7T68;wncUsEeU`h*5Q+w>EDUcZmmr(2;(SVBv!qH+1hT$rZf z0s{Xk(9-3EUOT3cnc-vj6+>Tf<&q36o7TatEFD9?D#e2JiT|j2V>`82wgs#%$nr|o zM`-^I6+U(FHP!aifR5q<2tDrs##QgBw`hiLYJZTvgk&0dYBv2gRTjRix2IPX@<8gr zFsk2{qzgwZ0nXwvXdg1+OTW*7e{SZ~MEfV#a%?t~I_w02;u>CRodY}@e~LcXdjc#s z?*K*9^U%2@mu_Af4e{&3=rI)`Jv!U~D^8B1<=)%i&)-vsKGER)ECYf@dea8~TTogX zN9_;T^Y6W)fXa>()|p$=#qI@Qm{!WoySN+or^bQK^F;n>+;!18qXmnF=U|$ZJ#p>5 zPLDZ70_&9sdVh~Yh_XK4aHtzSWXGr0}U25lZAhm!{2-J`8RAl zr@eSLmC;k@8?#P9YM7{BcsZI+yyhyZ2W@zBmvH#omrYxxGlXL434jkQU})}PsD8MR zQ<**%CYiU0CMS3Cq^u#AxGDfvN|ut`rby7As>~CO!}R{2IGFt~jItJLyz(Ij{@KcL zy!plr;Ags85OC26p1Oad%X_0hezP658Q4v2+ z=fBy9cXb~6ajW6^;v(3QbPNA_MnLxDCVJV-l^-5^ z49>4{L!Yb0e8!>Gp!A<2thiB)#}0?W^p1)AGTkAX9dZy>+&c_uzxxEYzt_{R&*wm{ zPZlh63x^L2vglAwAa^?YGd5m23M1RHAoKT1-uAqQh|76FjQ{3B!=_8TyTxOCusD|v zPngdSu5N+d`Gcr(&w^iiJ`X=^I>R58ybpt(&!|rAT(r)A0>u|%pkFla{WLnC%+^{1 zlk!*bqoR*tmPsa#S@Iq)%j;s9%45)Pk>VF)7F{HM9nKtkN3)~$^Lwp*dHaws{!%JJ zdABJfd6q)V`qBL3-;L1MafF_lbeXP36Z%Xu7FL+~(_11Y&d{w1KUbWlbqlLuT;)_a zGt-@CO#@-~gk(DWXA>D)#Q_6vVXl!QfAXv$z4l-Z{ZBI$Tn;>ji*M6W#$`G$^*skn z*36*aCrR>U8n3|0`5tF4sxu3JBG^E`i9%$!%N&jlriDlo7ePt+Dn=A|a)!}*j~aAU?D zP#SEbJH|Z50Mq@r%(fODD&GO+NAbL921ozc{UcER1S(&4@H565qN{E_Ee+hvcb$}E z{yq4`b7>WI-b+9EYg}0ql zfM;ARFd}dim6#ch554 z2cqxo$*-;a0s*CoRJBozPum$xw>cdIogF*qJZ~9hNNWLQH#bx1zHa!gUkMq`yLdbm z1+8yu>GbY9T!zd&_%Q1Y?EZ6{cdam>D<7<<=G!#k=B7`ucuZPo)u2uVCiJ0yGG|2VI#j!SBUvs%t09q!mo#3;fdfUFIJlu6P|TF*f1r zI>Z^dLD6LUO$UCA`v$Eo?fkrFPCUuH2Cr>pna}(=vTuq9udMkFmKljNoeO_J_2Th- z+*wDu@4rQKp{Ra1Kk1e5&Pg43E2`C+M;+%ZZb{+aY*TiYd>U9dhA~Y255~OG3eBun zFhAy6vGM-)@NZ!yGw#JZE-N9Q(R(tPt{v@#K92I#ZP$LTUSb<~PF}}ZCVOIxmImW6 z;uhThwwS(VL&5*EDf(E``mv%3s~8( z{iyKt0F&bJjWh?{`(i{1V{#9oz`fKx*^M| zkC}$*aW`-y$%euLS!|n-$b6P8WM+AVV@;zu*7kY}qO6CQ)o$`Q@Fx`84*uf&rd(mN zT-V{7Mh=&R+Ms^eKWraY!vw z7!|dx%-NLye({s&k53P%H8Px_8YAKn)2t4%Jk_Cn|Y3gHi`gV~`l8(i* zYn9lrq*^j5NRysm^=N`$8LN_bO2p=#X5!4xa(g2t)7_t0#^7=ad#86C?Pw2SLYvo! z7`$oJJaIo0DyK>1_SeEqMO8W~R*@PVwPe@;BkI!`&e~dcvj6?krM3TpK+SJ2+txUR zDo)>y_p%AIe$-5=5vyp;dz@fB9Tn*8A2X<}UITabXc&7xXA}(<1Nt>*8tp7KVY4Od znH{4_7(1J(^i1-8L8P4@^ZvIfqi&VV4J0nChhr=t! ziTuUgF!|U#I?W-7n|vsf$%>Ar3g7?Zn9Ev3TuqBDPpo8COjKkCj=tgwe%*w8RtuWEl+X&k)$o*+U-}#Dki&tti6kyp_ISm6e@Y6|tuqVbZEd&Lb=e8Up9S`*?LO zSXzmB@NEZXJXFRY{++NZ^&%boyo@QHdK+!z&O%6Q3nwOL#z?<@%{(vJA-t0~2hGR+ zCb1g1WM`ryO?|kMTiCu44#m#0y0qF=peb^8brwCdS}ZyXs)v&CZd?JEX&esS8>HE| zq?cBY0_LG>a5QT4ALG8>X@fsiuFS~A9h^&FJm(oH6sXNT%Q#(I!|4pV3D?4YZkA3W zHGCR|e~ZqN-Fwb)ef}Fz?pZIBmTJH}v_HT-T)z^x?njT-B4{|rVf zEFmR%(ro(h4Wf8)JG)+E4MsVZGD>ks5oIBqu*%8_GbQMG;9AdF>(#u*Nw-K#$CZ( zr84Z+Z|m^++%e4KgU^|*$Fhj>KOOGUdr77(G6oIqv?1N5K zyzqV{bEDn>)I*md0<*CPv=l;1UHl_;ORL-JPO*Jz; zH3<^tOomm_=WtQLT)3Yh&d<-&qIvfnh{wpJTsaO4XIN=yfKHoUatuM;^leOO@D=I!{Z>N zs~hh+FCfa-H^R=tIrJw5dam9X#-vMAR5s>!H%s&L-zf3QdJ-_NX^x0p3Wq}ruhL^% z_JDf4DcvERMrY?Z<3+74Q0*U&^HxT|h6m%&cF{@NxMwEV4w-{ZD^KsP_k@%gi>Xv- z6t`l}JZOF74<8@iqDNPVTrSf<9F%!Vs#> z^L}Tr`eRD*znUZ_-~}*Smfyz%W`msDw0``;jKD36G||y8id|q_3^RASK|-?*<7e~+ z%v>Jetq^*JrR#2d^`Bu~UV?N%d&vK+$Zq9V@jq2j}&; zF*P6L*u(a2Otnidx4c`x>^A?3PW$2*-{kj9i&HYjE^ER%jR~x9^h_*YWXV+fJtV$Q z({cLv(>Sy;jun^HfWQA`G2u4Hxr2?xXu8Y@mNZDST3hn)X89b3Hyg{&z4#GN#vNb| zJw`Oy>H{5oCBDoq!<#vynE~g1ymq>e8NFf$`@6ji<1UB8*pr0eJ~Mv;}=<;1)=^AG$UK0rH_ zK`b0fVrv$ZGCEJg=2J#@x|X|VXg9GrZfRoORz&I}eU$0w(Hot*ZsZ0tRf%KFxpKzD2{R6G)Ariryf#yTCk zeRvN0%T9}3b4Zy!6_a7-3vRI5ThFp1N($I%T{kemt_H9E9Z&C^*u!=hsM3T}x0nUp z!2TQf4xJ&Me z)0zwxM3MT(e8KIFJHy`7+?b*ZLNq&U4-x^Sp%}P|?e+Fyn3XDPV2hbm>NpH<5 zCv&b`qp6QV=)&Wt$?(*(WZ|k*`uNv#oU9jwjR}g>_Lw+WqdI-sQB0z`!ilzcG}R0> zgPnDA1QO&LhUC8_genRL#_uOb!}maFlo9Bs=g@lr4e+{96~-uA(WFOdU_Ppj3=nlX zX+s#JC)r9Ilpm1^KY>?m*hd#E_)X_8{6xr`8r*hxJCU=?!fBg66Jz~Ka&UYUDY>mB zxG%Lw5EgxnyT+K4(Sk-Sp{}&~rWWMdd*SWhqIZLRrjVhoE=+jq2ihSk!CY(~2^#nh z+G~C3&p8aZq}yRo*BE&AXB-`Q;|HC~dXp@VCorJBnywz2EC|%q2iaF6K}`1untCRY z)&=9ZX4!Bkd|Erk+c(wS}6By@%?Ob?sKmH(Isqc!GJ?e)j$ zD{~F#^8ZDEFS1f_UP^s4RN#kv6*(=b02$WPm}d({L4Jw^E&1@72+fzljKEgvx_T0Q zqHIPIF$8pbSMl2SF5sgcSE_nJg5Q@H1?msabI(#<)7H|pxRto`_44kZ?RA1ODo?F! zL~eUBPeuFRP`GG!oy=XR0xuKg_~jcG;^~?J^m!qH?8@VKMDI9grzca{qD-oLKL94~ zJVHwwl=%iaf*?uiN*Wuq2HHo=ho80T{J*fj#5CTPj*t^taY;+zTyZHRIU4fY%T}Yiml@R+ zvGw)~0%56z1t-Y6O!+!j82TYc`<_0baw4aKTxh{QJpR z4D5oR(#`MZ@%NU8!RpFy6&E|==*6Ggpw2}X0-UDMx1ABNyTqA)QF@i`*t!kYnIDF` z15d1;^izsO@dEy{n25~fu9fWTue zEJ_Q3@upL`(RCeyg**DG1QofjgJ&>jOcn=nvNTg}33!fmMw7M|RN~Kgcqo=mhmYLG zzda}D=G`~Z{-QsLHx&Z2W&sSGYN3C+KahkyBdBoBdHO1|7^k((<(8Ga!N1Oj;NU$) zdZ1N__Up*PfvglrwJoB9w+Be!(jvU8q=ZfJkI97)6__Uq(8!t~%18NXPM5jZ!I}5t z!S~iQQ&a|z6q7- z4EHho=~P`9MMjYqrzL=y>k45PwXys8S-fJc0)>_i{CP2P`q}UrQ4s^U`)M+nIcP$4 zZYn_yEQDoOC&R6iomAiY8}?QRxpW^9nHrh0@JQ_}WQF zC<&KI_HPfe>c}lRxcUmYnP3W|j8|Cg+))p`j>q|9TaxK>vx%@Jss-ksf5ET%!{NQ{ zd9;t+!5?@k$LI$9L3O`CK18}3WkhcErd>+RO`*tjZkMO#v!t0vD`KE%=5H8%(41tq zodBC3VLX2Rgk8rDaZwrubVT-clKEDK@lZAAU3^~9`0_!h2-N1kZOq~qMJMy)@=o!2 z+Y8~}a%ouP%Y${fBmZ%l7^5-mJdLWUq5hi5bU`bH!LlgoA~%8wJF|lJO(>*O+&)38 zbRmdY#qzSx!=e4k!Z?+o*!lSSTJ&ou=YlOoGZxj&j(HSXf4 zTzCg2AwE>HSDp6`8iJ)wuW-%2et05zA8*>VLy%e*EMK{lUbL5Fo;zk#S z7mmSL>9~dO#9rVeHbU}Yie7Ev%*Dp%e7WdscK=fh5?&HawOtfRlXT}554S_9t_<@i zVF1kH#_%yW#Hh}gCA8H}f*DhqN`jBBrA8AL;aq2itor+m?Ct2L{Oy%k>>y70|1QvK zg;*BB;_!RcJ)8#9tWb_4x> zt&1xRsX&RMMHSDjH>2u#X-GY+O(ze$!p~w-K<+Bie#sK5UJyxlT3(=6HU-0T#}PzU zwUgK;>(RuXcJkgrh-F)h=-7$Y7;q<*oD_LhU2f?_xgX&5`gdH>W=pV-P=vV&d9>rN zB0ShyfwI$Q3IDEnPu2>g05(R@XGc;{?BPW8amoU3b2YesMi=T%x>MPy3h-%bHHyU_ z$8i$?Xc~*9?;FS>E}h#D`;IKjm4y%L(YXDj3@mjUPe=T&q>huMpzl|1w~gEqUvZeYqk(#fU!rZ%lF;2F19vKw$dSML)XYR2o~LPmwbmqf@n1Oob?q6K zUy+YinTilA5@fGt=mj0f=@q#6G-LC_N^&LeD;ckpN!GvVA^&!t=TNCl;Gn*X&N4K@a+~9rXNufR ziBMcQyM*{0+DsO99Hbl5U5JKtr-)6f!eOg_*z6z1jqQ6W=vx#E%MMS3@pFEm)gcvP zWGYWKkIKjZD4z*0;&Po`P z_xr_(l8z?KxSdEV;}(%^>M?lLISd~7=8_zp8DM(6iIZD#nAm8<oicN&DI53Ae>+K?aZ|%7GT9=4+T_YKk$)G1jT99A650H(kUy=IAVKQ27DhZ6y z#|Vw_q~>NiT|T88U-k|P`jWPi`4;)2w|S4FxbJhp#m^FOyCICMSvQ*&J@UcRgK^}^ z-PdG~#TahUw|wS zB+VbanKf(IG=1JH49<>WZ1%Zu zcO0GZ^vIvW%-z%BN|+Jjq3p_inz$RTZJdJFi=P3`Y!h)?;%Igx!?JMJC}z<(z4C#; z8mv?>LdVz95D+ViR`0!rF9>0_JygTQ+Y4acFFkhM9zRaA-2j7k z>X9|6Q}F98j>#a&%=g#oxGq2)TZ_=jthtTlI?u-kvS%hs4QYtx06oB<+U-}^l<~Laefp#_9O<OIn%PbU7U5m-@1o zuPd`YZ5Po)ClU928_iuxZ$bshci3|y9ZLd5mpyxrIGq>^jBzQxoK`{H{mf|R z_#a>`D@lZ}uac2&3j9(PIq>k9L4)1S(Xmdtz?EJR3LDe>d64p}N7Cr^NYxgX(hlJokk-frJeor+1Rrq!))BCrQKVabzChi)ZmfwOBp(lz zV2SovXr7b?%StlfkKX{O~eiONN-VCQbRpMelZy~O2#bj$+HWe=Nf%@id zJh$;OJYOUQ+pb=M4IO`QM%O4 zH0bv0rZA)E2EO^8MRzHWfp}4Gw%Ey@&T+Sg)2#|{_^6`5XnPmY)A>eyJ_pkd|74O` z+)Ji3yb<(xMAKhSzK}!1;*k9N6Pfk9nZ^=(yc;$X^hV3V$v-UQ?4H2;oV2D#_5P8Z z&eI@qJe$w3l%!6U$<$vNnt&^C?CtY5IF;Z@P^Ux(YJuJot8Htnc3iV)A zUIB0SuK@=}pWA+M6=>-x@H!8gG0w6Kl>=%7lCJGslz%ClG-3F$?KxCA{RSv`uccC3 zV|YIUhM%}UjJH{wDsu0RL-LOM5Vcs3AL&#L3wFlS6x%G?GISjEWOu{BtZ7teRR$k) zdr8h|WxA~UDun)Z1lhF#{O*%J;IRB5t+_uJr@T7>Y3Hipcl>z%#MBV#VK5c%TwM!Y z6AM7~t0t*;!WH~izAj4vOC=l z!Fo2A8B(=nONUBPetIzX{pJQXwCNl;Cq=SbrpMv)4dQIcz9&}yqH1w;?Fr<$GAy~J z3by&8{JKSI?5C3|%mYS|*;o0J8<{1~M18z~Pl{#OStG?*y?^CM>fSKzo5rxQ%k){L zx7O_UeTT93;amK^>IMdH8p}$>=Hr`!az<6~fms;)3C_ORiTRyDjPmCObjz;fE>Dht zeya?8WZH~m#5Pt^aZxOTaosij3(1izE(qQ=4F?|1KEDT;aiQK>Y82EY#bJ)2MyHnpVj{kONkJ(Be+Nx)It%aK{RiyOMG#m@GN zM)!LS_;&mmoPE}a#lxc*?Hw#sK!vFG2#0l(7x7u1Q8Xmu3^^_0D0V)N;N9BuIX6FJ zk&FF@&b*KWGA?4=qkRpuKGcKwTWIk84l}rO%cwGy$T_flONGC!`~e?j&ZmW!cVboLSxCs6O`F6V_$MtJ zpfG3=XI1@z?iKy-hZ65$s%Wopsiqv0`op3Ay9@Ov{8K+XIn z-r6k77nX-Xu3Zy1im4=wb|LiZX7D;ruj$Lr(_mZnW%#ABl8~cM=($y?aP0dUIPoV7 zK0I1QABN{*k@+(E3QmB0K{6cAUc~Qkw4zP3-;-;mm!V{cugfxi_WO8T^5--!b*>8J_7BnUe>(W);XBw^mI!ktYT$*&8R1fiMR0w&E$^@O z5^ufD!28S_?#zf>oG9@ay8D&*g=0D?y?h7G3hvRHWIumOY8mfbwuW~(cLtPio`46E z9Q+drp`&J0gIjD2y;Jy_8hDD+FCOdQ=4^M`BiRUJZzW-zyBGD1y#eA|ZbP``c0Ovz z5rR%$r|;{GQU7WI%V+H)UXh|2CqQHRtrk>UxJ$aD1Jsw1g~=M78G1C zD4#JGo~|;WlD^f@s+q<2>zMHCuUvxUwGW}{l@}@Qo5*jr%Yx>vS70!v4jvhf;4|;O zNB*veU+R7g&u*;))m``Lu2HdA*di&o^lcSP-loI9eL9mEZE3*yTsb|FJ_*)3bU|V~ zM;pznaIuFH&+Pb4vg<{A{I~*am9&QjYiYt}n}F4yE2JW$g{H*>2 zBZpq0>gkyfwj@nx^`eqKKd1^DU&T@9XDXCA5l8on2!}jBVLnxvD0F<9QfqyNFlIaEL zvcv)|NdZ5#L7YBJKTLYIPXmjV=|p(gg=X44B{wriL(~Qtux-3Rza_R{eOVQEbH5=h zU-yQDXBN`QZHF*D>I7MmW)FAos_{y*V!4g-DwJ!ip&Rzv!x--k1L^4Mql)u=F{Hc(JIsx(UY_5AfBGiF zlrbJOd2<|nMqNP>5@?ohJ{4@cNqzM4D0mYvKBW(>Pt%A*>MdH?U;;DNcHqd4NpxoV zTP)mri|mfRhZe~;aM~}FYufLP`9nSs>$3!!CHraZ3>A{P&z8>O+9$a$*g^7E~*|trzVQ4vodn(Y=17`H7i2wd~*nsA4SBG3x3(n2O z0`94KLTAt@df#aq1iO!<&HF`}w<#u&ST+f4C!C}si`T)P$V#GA>xl6ROM$O3=2OxW zsBWhWd@x!HH8pCa*<}$uCXj^oNp66*cS4=sQyM-;4;!Y;qq@=>q73O|P?r|)TXbaT z-*2^~)TjvTozC;qqiVLU(ox)4T>FQT%Quj!b92Uz#%ENIEk zqdqRr;rhNa*g9$ojZY~6sqQN9FI&&2Z8!%*#iF_L=U#5c!V4gC_AUhd3*y@jy~e%U zTxi`EQ6_+GIn?`|;=X>D=5PNk1qr+V=({67=-8+2um^VlX`UAk<$DxvLF9=86-ggQ@T(kok<+UU?_?&?HHCNJ?eYtJ_X;iE`OJAZ zG_DR_ewjqfG&K26sVw*%`vz*7D`C)0g})g_@z~fH%n76LJ$@fdT{?rm`AL_`;cKG2 zy#lnhm+}e2(Ksa~ifSde^FD?3p#N__-udLgZ#R#^cMlSIS<$0E4r;P|Qo1}wg}s2# zz98s%F3RV&X1Jpvnh<(vIv>>|Nyk;n(4Ub@Q7P;=3I=Ln|5O>iy7mDzcYFZecH+Fj z>b1PKi#_k6>&_3FB!J(%6iA6V1A9f@x3@gP%!KRIFujF7ovlc#a*jbyiVxje^9&qv zO|fqDD||b%3T7+yz~aCAM0pcoP*Zh{p7}h;J*++r^BQVl?9p|6!MvNen>y2_6{{gj zi5Jc2(uHo*`lxsQ9U#_G^li!`I$pjNR?q6jSNu8HEg6P~HWb4MmpX{~;X^%|+rVc3 zPb&U9hUd@Tg6B3q0`tS8`Hzb}f%%UXa@8f_wW_atf9C7LZqo+O{RC~nT)UdzT90sVY;KW7`0LB3g4axw+# zWd7{qWHEhEG0?0|4_hb-s~t~j174(#jL!>t_ z3R7k;A#;CckQ(Qg6>Y8hA(!TnYp$@Ao_h`R;0)asDUUj4XL*oWmuh6NK=nUm=Mk#=VB zRDE$9N69QAl#H2D6xJTr+6gHQN<}GYph!_kQBnO%C36xoPstP+d;6Zx$At?Q&N=J1)?Ux^e4nq)l(=IlUN!OP<@&G@irO-XJQ<*Y&v@O% zy$3JgWJ^bS)qo`Hw^kADR5hipXlhe$_iblSzgvu0fmzkXuZA-Y_rc`RB({A`EUP*? zMey9~!44B$G$x(HuHH}x8z0PtSumdsx&4CO{`(Ny%a0=;Nv#&WNLq;`9cIBN0VcR_ zM3TL=Yzh0|W<0CDs05l}Z;FTdKi9*KrF&rf-j}Rc;Z4fo%mjR-J)3@Z&=+PcGhh#EUuXLwH$YL@Ty~{> zCY+GMQbxXSMVpqdXG`N&qHrcd^lrl^VGoR<7kS6B|D`q2uP01^O}}E;Ltgu!_l5=- ztTmpEM6%Eeg|h69EV|0^IQvMxjisoGFkNAQedl|hR$lQC=4`*uI&Y|B&^m2{@>Vka)k`GX|;&eCk$A1_<(f9{P+qa_8S1tHoatB^i62OFQiNv43 zG$Z#<_AuG97XON_!LtSa?;=HIyH{v+f`8Se`7S5D>+*c{gF{-!b&;-CB&M@IP2y zg_QDpX|(ppD@-Zy%<<{EOuA1VUU&OF9{iQUYDUar4hzq0)8=$xZeue(zgCl3m)rx} zGFG5rmwPyO=`6S~R+|5CtAx|6v7l>`Fs&M%#J#(2#AaRBg>_D{w6akz{Yqs6{RNyu zR-wySFWkkg)bWCqf=6C)raQOlY5@J<`U?E1O96UL-UYvWwT7==OyIw4ltFcR(rBrE zEcf!16e{Na&{#|k4R@MD)vMF!Dz|vi1yYk|?-X(6op*7Hpe+ZhM&V5(os^1;HTPSM zrQHHoqbQTp?5BcJZ2Z)O-5Pw7PT5G)2HG{;;YWYDpRI4`;Sy!sZx|1C_ja*Goit|m zn8A177eYh1+oG6VSMkjEMYyw-XOsWF!z-?6!Ik#gxjr>hek|lZ6{tNA^<2;4+Ts_{ zL6u$DPFDk3=$m2V!Y@Ln-+NYO+ZQ+}t^>QbZ02qsQ|7$gZqj*68gS6kFfKIJn^G~F zEb@=iq4V{o!LBkHUjMixchdbA^;=pTR{Gu~!>C>?HBk~8pIJt4DiGG!{WO$++8!JA ze#Oz@|L8|!YhbIi17A9r$+_9=rAIE$g_XM(^RgG?*#Nsq+^ooQF8bnLxMd$tcbTrm zIf{CC`!7A->F_D2Su-B5HKVy$EpNE9&xU@ z(gU^^?V?hAR&&~3kFaQ6GyUGj7DpB?gLkS=aoiPgzQEoWE8&F zMX%cF_peVwYtc+d%e8ZQw$hw_JdG7ZN3kg42n^r9o7=HzI$vQZ@GRp>pv$!f+!GUl zui&i5&8QO?HNW-n+8Jx%l4-&n;qf3^RW%&$NO-~3XpiHk2+Sst>Sa9cPBK@w8p8KC z)Y%%PcWjg1e{j(C2;V>FHn(3L!us$qIB{SW-7ef;E{lkO;Lu`tj*fwT3$jI5$E3OC zR!g}lxhLTEJE?5a=SX(hiNpA5R|VxbPo8~q^g3LXU;{tT$f4a9Y^BZz9Ao#5B;Y?q zfpE=pIe1Fc4^@i1sLL5^=!K8=u?Z*j*~F1dV)AFm){jr5sXS?l_4mZ(tIOz4{{zr~ zbPh@? znrl@_HHQmy%3)d5_)XANC+w${Yktrx9xr6ojV{o4myO4nB{!*S`{eLvW*XWdre2F? zuEcqb7WBQ_uc)9G0)Jtv8-zmP`QK66B3izRG{TEkBIvV!(0nuNNTP-=J(Nc@B(^`vSXUK$)oy7xM=2VWM{>OE6M zobqG3eZv6uEx9E!zAA?{-+0JdEqKJ;{COPyJ86xiROq_Bmvy;JX$$^uX&1lV*bSw8 z3g8aze#W1!FyX#?O4l84S&D|;f_dqL3T*D;fy|?(qvJz%XntrC{r7DPtjT=FAKgW9 ziu#(IiLnAc(Vc)db~~ZnM|1fNzP7yk^|k2AW)J>o!w{bg2l%Zzqr6UrCHnmCDO~bg z8#TYhyn?<9B4zaW%NBy~<6a(D?5m0Lyj!`)sq2to&nRB>@iBMAWfi(tpo*A~cYOW2 zzcA^DCm;I69*?+MqU@J*k>2*_{EE^dj?+2KX*k5gEj`Q7u|Z4jVSWbhz%ECVbi{ev z<6ivt7mLxR1s)u!I2Zl5Z#K77k3u{0T~YOl>s(asI@IEo%-{beS9j;}5_B@mNa#?g z;t!OsK{t0khl?5lICF9}at!neNmvj6mrX?5>WI!|Scdln<`Li^`%i??vxtx5y< zOK%On{$Dp&QDnxRZ+^$MCBLKB+)d$v{R5$Ehap__$_(Bx=%-!19Px@jF|=bDz<;CN z@ci0c_{*1U!LzXrUldn_UwsQW&t+3NotCL^^1`2(l}_cPVv9MK$>O}kv@OuCNR`%l z`;GoJ-hivQstLu3UaYY9F@AJ!F0mi7?)|gSPUTK+d#q=YF1$gKQ>%o_W&H z`1WpL7VE&$?&Wmunh|=_wh72!NXTfmzu^{}OorWs3JCs^uR9wthD~I;xiw~IcrMKl z)y}Db=8-{s+3XHHPUaggu~*<5_?Ym+o3?X_s7ui(XZfwZvgrA(Sg!rMEq^d_2Uk;Z6mFiD&t1+iLBz~*Zti#^e&$^z(A+IacxJ;y^nB_c`0UXvPJP@=)K<5Ex7^ynYvwAVx|7M+ zJZmN|RW=i;JGfA06^OGvqJ`pZ73(Z7`SY7{U&E^x^igAaEB(A~DlCJ zsA`=Be{k#|w`F=H_qA_2Iz(x+2SXZZiO6xNW!pEA*0fGh1iKBL2}^`^mgVf?x~iSkWJWui( zE(+g<5Bwa57dRBdCV`2;7|F13+zWV3X(OIKPJyj0X~Rk7&rxZYDcsg}2vsSrqR&n@ zfl{+`*f4`5xOdNSRteVQ51JY5`L|k_=H1wfi~g_$*|*6led}PZ7!ThqUB`ya8z4h) zI2zEuK>Lc0!_()gDQdqD`=j@>X!`D1)N;X}7k0Ok>M43BbV{FNE2HG`8OadzD`Gd{ z=rJ4aySf4XC_kWRy0hZ7-)R{z758;+5e>djKxT3-@VUA@|s1G%g4TXO{aQD9MA9+-`;wA!$r3tU7owPlpiv~GC#z#gFwGJyRc zBuJ+G0o*H*A-*)=h<%@`i5*g|IR z;|Dm|;|_e3ZpRciHo`ppI0jt4fs?m?MUwr8p>bM2KE*lUHF+iYQsZQ7vnv!kt#@TS zi;SUvav1zO@g|&Fd4f)T*Mf(aPGT%#D^cx-2JG^>2<;VmJlvjdVh&ur3-!%=4t zt{UpXZ5O679g{n8tNvEBA+!}0h2DXhw>IHAhaG5iNZ`qIJrcN+XVKuRI;?$H8}E7R z!o1u`;ps2#BT~0Q9mv<7?Okj_kOFRdEq~YnG3bl}quGDROAybQX`Cn9e9?A7EIQ3OsW|DD1su z!rYOq#Q)OHVeruiaqpY(vgk9+hZ}%dr63Xj=QnZjeSNlPh-Pf9^YO+Xo!BGgGVY2g zME_p5!l~dloZ)#N`>hCsFQ6K~FvAUB*V{s8WJSS)lK=5r*JrV()HVrOJ!wANH5YEn zd_#p8>GKWob$I)?jr`)^gYfD<%vJs{#N}xh;l*?(__`|>UQ}Ao+Js%BeN(h}cukq> z*_4Ru7p%ckJ{4jQ_Z+Czd!O4~`hfcvkOAAyKj*~7#yIl53NLQ2$XEMV!p;YEFe&W} z6unjAv40$tb4cWV)n4QlPn6~EeP0ZVG32z@UxhPo9Kepv(>Z@3Z}#TVC3wWcil2RO z9ZbKvkz@B=WO)lyxc_-6EH-!K=hn&KqwNd0zir3i$*kkh*WZvmU!1{7uZe-*uP@-> z-mBcXAGz>*Xd(8V84gPg596^V$Dm~VCFnEXh@-*_p=)6(_pf+4Z+|co+O}D+_jAj* z0iTO-olZ2pW@`>MTM-WD4@dHSm!ET!B%I;)87a`{mK|*$Fphtf{~z44NBB(V{s+h3 z{li6_|BY{L)xz%|L_;ID#jIrNJy$BTH@$afme{uhgLRyp*UZ!%Cwg9&=3riR|CJ>E5lJHTuzVPda%jG-k=2`D4C|MQ<4>j$7}RNVXW~ZLlQ(Q|T)sHND9>bz zl~?e&Vm@NO0-EpP>^=XQsqqiE^-0MQ`->%K%^fN(7W65JSNfzP;qZjPo>!rvd zYdmhM=%yofi{Z6v)A6`xvzbs8bCfkwz}lDR;3C5_Ot|7%I43O=WjA)n?GsBQD{ePXloG!VJ7E&Jpi&6go%TT^XY}YvER_ zl`y+G6P{aD0}F4LHDh0)lcz#(YheLCdnp@tyy-`CJo}*6 z*JE(*?0l?rC)Od0mSIVW)4okX^8&uqNy^DM^cA)5eD0;D?1x5Z_>0rIbi3Nk+@3BhE3>bMy3tXIOtOt&KD(OzmH3iZOKbG_;WIv z^)DI6xGcoM4+U*(GCC)C51J_bJts1^P7_QR=`Wpvs0Y}Sr0K-C9x=!~1%^!RuOcwp=BoY zv-Zck*u#k`%v7Tk_S(uBXo^M|gQq1Ux1>4v1~Cm)1V^9(*HB!rNH{}2 z(!(+_?r{4&8~k=Afh!x{z|o7rc=E)@g2(y`+IejS-r+R^w#P3;&)QwFB$@~7My9|m zcW6B6gAL=LumbHKs9~Qhu*SC(EZKGaZb;ct1*hey<70-#`1OngWR~j&C!VdK7bMTb zW!|nhNs(c4--IFOxs9yP@o@a-=>bNn*9y8`3_(E>!$_J+#$H;(^xKTfsOI+na=Df; zjA9Os{27L()#{^|Ke0Hz`Ukt`b^=~_Vm#K!An>;(%b7X{DZ*>k1eiAZ0rqac%zFHH z8cTO)AkWpik@DMQ+%z)@y*ad;xi-##$#zjh)z7n#O=o}i?XG@qmREv*#u)>B<|47Dhcx^ z<*q9>y(+~J+45-acoDoY^%|V{rUhGDV&9S)g^eJrMn1?%lu0sBXWl-(bR+O!lhnw@I zxyO$xm{hw=^!pk3w zIQu3w^sYven|0HWbDB_u7S~yDCSQ*-n>@ZSE~PWM3kycb+rl2%Bbv=={_e&1u5Ur> z^DVhS7lpZIkr|BJs;QijCg9v$ir6X5XBp2BX|7SqiQAiG%2{UyF;?(l`EOcn8CmKiwLD3Al5F8oRHei(RN4L=z#%Qwq5;={oy z{0>U+97PG<4W)hD)WB|d(La=yUf2)k9=Za*S0}>PFH?ET-4c*EO$j`{jaEXh$E6rONG{N9fZFseOGJhW!!v!ua+!pT%&~@Hx znC_nem(;rQuxA6edn(1bI_Kken|t8R<^!DGr2Ty7H4Mv8F;?DX%wPD7VGBOWS!8s= zmvdj>$4jok;`|&T2Z`}cnL%h#X3RVNj^d~JUW2C1(%iqbv-z=+HrO&wQ6z<&vG<-K zICS_HzqHbXFVHH0b3(qu#y3al;z3Pb%<~RxDQOZI6)%JysH5C%-LLp$WE3ZHKx%fke!F?qlX^K3#SOZ@*oi*PZ@{{`=w%?P+rkUT&VqYs`*?Awssb zctRRiGDl#@9I}Vka^K)zTBl*Pz(0AMk;9eMrNJ386JfUBRNm}Z6K&CRoGWg*#2Tr` zLFw-iF!hNNKN4fe#jfkX65kKACD(;>IGcwb>rdv_$_i)wPn@Qw1OMP@G8~)S!U`m!T3iXxXraWctDlq zx(;2UoA#{YoCQrW`Yi?X)Jy53&rIQw;o;f{O^(VaSjoLiIEV9<(r|f30&^d4-_BUYrLADN^X?}(cO@YQdm@_FDZL56X`;snjvFDZHlJ^t1YZQa$WoFR% zN%GLc{X5t2HJbiysKcFe4uaXgTWMJ?3qHJK$oljtLGM-bIE%yf+~a)*U_?zUMuP5= zIBgtTBHVoR1X{pbem`-wiNF>7w}AUTaE)6vXP6eBVn#=r4U?^pP2r2>$!uHD4ZQW_ zFulv`J@@CeA7}o&mmYfalkPq9kgBnB;*PmopugBohia8Wv_N^~jMjEy?^7;t@#!gW z@#}n8AG?hY^mF0NZ7jlfgFyTi`g8qg-o>0WVYzz^XZtaJI1;|IY9Q4!Ie? zA3VfC=JzS?##1?NB&q<;9l1qsalvqJm_C%KmVohx_wjRI{$PXcC2?Pz1-G*}0e8Q< z0=-skaOlcI&U4QqKBmopKQtu8JD;8dOW(%Bl|J#%p-h7>@4EnBh1zqQ z)iB2-&*CI}e$y)B^*I&uc9{KO4yT{e#gUpRFf%y_+S&c&s*n1^Su^HvCDJ9LVI=gz zDW8U!GE4X)0|Qt@xo~ZBKhXo4`A}w0m!KOM@D8&Qp`MBacLRUtj;P&+#>b4gdbSWA zeWQzazCR1IE8^g&oe{V8W<6{gmF908vgOU(F2SBgOB|` z-wmzPTe+tP!ZE8Uu=tzj!qa8d&|lz_Ygpf)gPx7&i`)D0<98BVhI2bTbay(Ja4Hwp z(*<0r&||**bPVirRp;Ha1G#z`bw11Q3#ZY!147?uxX@!KoYC-uGqXJoYn}&jJ0It8 zVFAwEJ!2sYad!cC?U4xD&isnKR-B;i5)Q$$)1JZSBM$tx)Zg@l_5dyr3%$L99yn6I z0q%=G$t$U(K*zrqxtFrXq28<*xM-*fUP2Mv`QS7-bLUAe^AyXK{?{yYfyi@P{>2O3 z&o41QJ`?WjsDP;kN4b~p??AnkyEva$9=!jZ<@#?8&BgFiH<;hX6f@qCX| zDE1|t$4^SRNUTPU*qx&tmvy+Oj^6eEk%gGQwo1x6aTlKNNLw?q{>8vrbT4{yfo2^5>jtS6xr!7`}c#6#R8Dixcdqo{> zD)`9y1K8tC9-KK#8edU8gYpXGnR6vGu#0mQo9Fov&3s7VRi`5{BT9$S!zOq|+6`1t z(~im#QMLF#P?F2vCYn8WPggPegDx?=#{p`Z9X2f<1xe; zW*?$+S8CIyZ9?8$_YybJZY%p&=s`O=x}PKUg?aL@0y_^xb8c&dxwMPr_VviY3yFW& zka!c;!TUO$ldTDX%x^km{uq_|WCML8#)R{a5V$Nl?MWAQN^-s8 za?r~)727<2OYiFMqvefbV4-Rg?H=bu`wwMtUypRrca~qn8*Afg?-JTDq-+AM5hBd* zD?Yd=G6NeBZ)tY#SNd#rhp4DLn=2NRfH2mHb&JWtgA4m<4aWsg!&eh-nGl4pZMsG8 z3e#0dWsugJ`t+z$0Efecew+N5p>^kap;N^;EV-7>FU0#Txq1x ztzO2{ij|TuqOynhHg6@jnPw z_ca1(uZzM%53pf8#eTO1G624K%)A(b$RwjGfh9tZL4&o{29|>4kZidGQ)9iB8A4r|RJp{eyTA z%)utBPtgbSbFq{+P~_b-SR89YnDx(5@P>`@W& zbCSd!FADIp&=A&P(o5v+kc^#7?D2uE!I-@F61G3+hna7VBJrSbTt4Ro%)XGpj~`~a z>njml_f`qz?G>*J>3@%(yZz!ywqM}Kf7U~j7dNqfd1?I8(QfSN*~41{RTLm(d3l{b zSk+-V8gV%Z?|+x&X#V_hLXpZJSi+u;L+ZEe(rynd~@V0II_%KA&288G>CCd6(~d@vHTc$bJ0)9M!4e112z> zf3A^oc}d+pD$6EjI{I&u*!ek9}xekBSl^jqWW_SPKX zAAS1_^GbA)$KnTcIklF*WGaD@XdP5{SqCixYxot423-2WWG?IcB(&;GHvaF{D(3!) zb*Mb_E337s11D|1$2i`;3$y;+M16|KnYnl0Vq?%jkC>li#x`>B>h%lEp2RwQ;KL+_ zT)`A~f~L~<6l(NaqtK#c_Vg2NG`9Z*E^1U^%Dpt0 z|8DeSI_eYZIk27iq_CNpJ9Q{hjI4nV-l%m4Ez{isV~x`>;d@WW(L^&U zYD#GP{ROBc?IP~7=)&Wp@3SrzV$Anpw8X-Xsa zeRC*2b(e)xp9w6Bg_q#%v{HJ|cLThnrpSNr%){cTS8$7JGERYNSR$t!{&JS)Ju;KI z*S{~r9~-W6rXRd`#b|ThJZ%$ip?(6+$V;bHA7#QC4+%cwN(F3@-pP4cgmPDOdU5TQ zRZy$ko})VpVX(s^Jkd*&^NB2l(Kl`2+o3i5U*UOsDoo|>MNMVzD4d2963bxFTbd7c z)#ogMF6X;U57NR}t=2J-&Al(fYphL%pMeE8*XRkien|~HynjDdkVuEV-m!QiodHV+ zlcB-rWUfa!57J2r{2u3>yt-93-1LQ@-|bc6Px%$V_W7Fd;#&#sp?4KLz@Ow7rmFGI z)6-%1@jN(XzZN~9Ex{krh=A)7?!a4DO5vFgJg3fv;inb?XZmvb9?J74oG-e+9H#=eS0^(+6w8TRAwZ52TW57>ue_YKv)+V6>_-Zs-M zzB26P{qwNyg45`!oERD|QHT1;l2Gb$T#ao~t;j6@IP7*g!^RbsVx{r>pwB)DcExy8 z_P_QZ_U1wjR-!)O7jdz4WnVMC?`cmJm|ljyH`a;D7DRJ!GpZ4sD ztJC3^3gJxeE_D2Qrm$k^)zD+6Kf1egvZ&-dg*C6gg(@ZuqRV=50xL>~4YixcW{(SC zBOX}OeEB>a*;pf@9rNL*xhv`GdQ(|jwG!I=2rIg;eS^&UQdz6AEF8{E&t)UVoKd}U z6?`x7i?<|oiek6r6X)(F!l!k6sHF|#*-7OJtU0-sn|{;;`Zp|sk5!hj8ok<-vQ#f# z;c^WpT3v815Uuf;__duwOwY{AQ_A5e2o zj?vX43-HZY5oxj|i`wiQA$suO9(}~<4W;|+E4~)|o-UcK2*rM$!uxA%p!Lh~^wh^| zpzhltcy4_nRnk&G9PgRTMH>p5%cf7FuilebK3fdSOQ(uB-%5OIC!%)m+=Q3)AH&L< zmDv+nN%T*{Vo}gau?!6i~6sL zM)zlk?z+vvj`9(7w6Zo%$MT{bZaj8vcErzzo{{#|xloa_!WAVaS&a!3M2Fv}Qmj)H zYu8ypCA~T+vfjCbUS~5zb&D;=VvIlRsWYKNPTJtvCYE^e^>nJ`(HPrgF_RwIaFIfv zYH&oz4^jSoe^ExB7_D+WjuP9ai(51@=y@A*M8BCv>fBsos5|2dEB&$qe{zta54HPZ zF=KPa>d9{g;I=jwCVDTtnffdtvBtjtO&o-A#N)N*+D%H)Edoo`&G*4yGfo z2Um48p^_JG;j+47{6qo>T+JfvY1B=dRUX5ar)x5HPCH@h3y8hu8w%gw5^N&$fb6K( zWD3Q0p^o>3IL-Ajy4fAbRO||5#B6+-HFYuAqG&qonYRkRQxap6z*T&4+G;ekY&Du7 z+Kk?FEKZxNjkxGMJXrM*$|Xu6`G7K9k#2`44mdIwqUEse@+#zE;K*jHUBnH#`>@l0 zI?RFt2-&n5p~HfoXu-m(_-1L3;HPGo&SR-K^AU+`r`IFr$W(k}g$LY!;3B>fS`3HS zWSlyy1V51yN4vI*aQJB{Ce?W(6A4(X7`hFP%K9==pObK6i7hVi(uT#?tMNcV7^AmG zgMkT=cwT%ZrtXbr4_(n<8Xrbs)xk1svQLCVa4D)z9-t?s{Dr$b%JE7^Dd`rM(IBxi zVq*XMv(j-HV{y=QxQ271o6m}BKIf1cR-|JUpCAsH~V zGKHG4b|I<1?u3kHtphZEWV=L%K4a{{gZNP;eOjRXx^pRGc}RKfiXNz~7XGEl#wmh7}k zBsF9vkn?WXf%(4E2?O~;kdusw>|Y+Fw{Hfhvl_Ch+oMUl#Qe5Wzw(W=@e2i$?kxgK zuA71DPPx=qs79;^833&Jpp~G(f&;t4h$iza%3VAQ46c4ego#}upY+&)9&d9Z^JOOZ zws{{>*mwk}+`k0$;+_Kiu~bl-JDGCJP^Ak)3c-e~=iu&)n-r6j2qd35fy9k-2nqD*x4#i+bAdyw!sYf{i7Nc7uE>oFN`G; zj>;09lW!7#Rl7l1;AFzdWIs{yTaoyIqNpoAW#K5okR#0qKOoytZ0=?gpsMwAtRO56p;ziINvQ^2O_E^6U z)Tr9fG7}Yuhx-0Kin?EM9FEbjotAri#THLHjV{01-}o=b^^{vu+=`2p+49bj!= z1F+fu0;rgbQPKasCKE*?VB}#qS-wY|9@p$hPu;kIj_K?Hx_2OWm*fJ{GQ{Zgt~#uLLXlSo^yLuB8yUQqFKCMd`Xr}tH?2i0S1s2leG zfZu2wurW9TW}oh%Ds_U0mvO3uw!vaB{kJq>V>d*UY7JAW_P;=q_B8S}`H~ddbP^0z zuOSY>Z(vp26GG$TEztf_iV&MRl}K9J4a~yVQg#jA^c;=xgqpZDu@0Hge>Oe_BkecA zw7w)ls$l{_l$Fyj-ecW?;P@)Z}teZ@kMoZGJxAX}Ml`Z6^9Xn{p z7wN!O{Un{f$ehr9=SUGc_d)C~RpNf%JKz+hNH}enO_aHY0>jKP>P^R8Vq0}8FkUi) zc)jTth!0XAOjX9yUD3T{{~K*WS@tfenC4Dj_X?oXtzzg2?h=H}+T}p|RTOA?B}VHJ zMnsyiAvLgmjGW#QNUCpZ00E7e1b0W1IQdM6bej2y+%stk(IWhfpMNZ^BYOm`A4{mLr-U z^n;`c=ZOwA144RUH{G!D9CdirD=@Rvknq*pYMGziM4cUa2BfEG6Z2PU64#y8Df@jB zh{Ix^iNufk#Gv0EVBX?e*x!rdqK!xH2A%3 zJk2FcAyOP>Q33sb$XVZZk<)Ci11;6F#G%(}#Q8)EQml2DoTQ~dLpml zpR=CY?mU^8{Pa2qFs%iDWu@p;!vaD|dJA#ub$~F_rx4n$?+JTFX?nJ;0+ACmpS*lU zp89yM8d!f%CUVzG6JK_;5MgRvU_q`1k$Yn+aX@n%(H(c3TGw!tz7i+wC8qX7!%sl( zZ}W_p zq_$P65(;|Kl7QVIf#;s8BW!x5ylDN#OdPbkNMG~kTS z5ubW2>EI-5V5>h&7170@d|M>R&e;lHyf^@Ms%`}rRtJN|3k$$!%bCDe{{SU_*MZPt z#KEJoX5juD5+*PT?C4)ine6f){d%H+w1Op7L57 zwDhS^e;>~U+}mlu3^he31$?@+pD>?Y zNM5v<3DnHK5(ak301KZn-|PvV&d&jp%_2bDv$J4QRvURMdn$eY)&=0dq6-{pte}?e zi3DPAWI$p{B{8ZMrA~~DVGs+KKD_=+#{pSQGY37q#HlC!GtuDyc98Zk5 z6F^=M7%QZLA33SOGx!vkKR1Z_XmE_odlnB$Hq0eM7F1D6n+7RL@(UHa=MX4pl?9Fy zr9tBL6O^}3I^fP%5SzO!!SV+x#Mi+jAT>Fku+E-G?$3z>M_&$DP1q?z zKQH`kb;G!q^wT>9>U$i3mb*DPWq5|lmDV7xJbVOH8-}dzMq~mVnXQCpYBVLgIu@K6 zEho1AIY+KqLxGjI4GD{%J3r(rk$5>!s+5G=gedxs2%37$jG}2K1aGSLw^jDfHe!3*w*f z{@AS);`Woh=|fIFMC8_6)Etjjvk0OIc4wM3AA9JT1tGum^?W+HU& z8De474q9`iJ+b3J1<<-_L#praC8nKHfX}X`QRj4~6JCKXMAJ-Q6|~_JbwEp#P&)2H z3`#P@I-OJ0^2!8a;{k8-K+I~QG4Q2T)W(I>Ri))*pr?t|igH6xc6I{2BSaUR?7l!w z+!#eIzUczDWJ2=>y(u^dQx@ZUh1X0;twc1+h(zp!$0tP(RmY zrMYV*rKS85jMP4}GVrScgBgnm`9%(3+z)he!xRji1m`hsbMH6baqgH-iJkSeX4>s&ACzkEN&K>m*x>O%t?;c?p3ydH^`SxVZGAQfSvhRwMsEW)@1+P0 z+ZB}2@Cu;6d<{`n7E0ptGJp*4B1$ElfW5XA(c-5K-kwI}CT9Qlf3C+aO zNNK{;_XI6h8bQWZnGp5=vWS^M|A_TUIYg*$ES);*9A&$&gzyzRM*U;z=^x38@Q{%6 zeIk=Yyd$R)rT!|!NLDW0J;W1>?hR5(VWrfJd&<;lVg=!wrbFsY5fSVgmJEePyg=~8z1;zj{WyufrGHqM)t3qR*kx8%CfLv^ zpZy34^(sRAehJ~eyo=hgt%5L0Oe0g`s)*A|Pk_IA+H^#48M!oH27G<3NQ@i%Li6jN zfISv~(mSF=2Py-?sWpa_FBU?Kum)@tJ5Bp-wDN6fq<%&l{$!I_iu2#3*e zWO#=WZQW!_ytuNHWO{WeQCcThrFEL1EKG>&E8i1Qt0ai#Hj;1G+hL0lbKPUJMW)8~$9636`{iSngo#M_?~amRF+=6;@`q{~MEx(JB`?*vN)pT|_X z{$pUe$CCInZ5DB+3sL6P6NuQH$>hI3R)kiy52*e0f{OEgNhZu}uv&2X7I?Qhmwx;2 z6qx-}nJS92QSu8|^gD)}HBOb5`rt?> zH!Pr6?rj4{<933xm0sYp!7!EhRh-Z)nob#PA5Xa}Bhn-=7W9@ZCKivAASNAHLVlYh zM@ilL31-}#1Jpn6qN@ws!9!vpm2d_FG5d5-?fV~?Ja>dz;p#yc(wao#PCKy2QHpqx zUPXl67^Che4TC+DB>6H#miqKK3h0zQCe9dt2A6f4h&<)1pjG!f@E(~&h;17J>Gi9r z$aNd&Dxvc38SoN=qc$4)HoPTo~jN7kI?=Jg5{(LnK+>a6`VmqJH zk6vE}#3Pyt7!vxeKN%D5nyzH=^yBm=+e(mg@hEM)m>?=wI#4AqV2PU~{ z6F)jB;?mu7fZrug$IE^srZy#mA2nt~=SDT+Zh{K2+;xn4-mOZVUow>#K^Mu7QTB9- zV;KGG{sG#tLxL!7I17}%p9cx5Q|bPvri7f54RzfAe<-^0sG7bnUMi%NG>~r+NfAwN z?{m(h86gT4B9)MNh*C+Rd7fvL6d5v=qKHUJnllxplBtkrqEvqO_uqSK-L>9b_q})a z{!Dw%q}Kl4M0V&BTXccA^J00f;=@7>e9cs9-0Q93u(DUz1 z{9f6I3vcv;MWiB*)d0?om%{}Iqv#{4C{~>}8yCMc$GRvzM%<=gJBL$$i&SH!xhBrU zk1WA$A1#~T2@akM(V!+Tr zSVu2yRwDJfvv`MMHQ?C^S>|RW0W%c?N&Xsp8qC`b@2~lynPCw#5GWtqfSrk zG$!V!ZQ+jUC_evpg7(Pf)BOvs(ql)KKxN7Y-s-&{ctOIZbmPo@@c5Arp*@F*tKk(q z;wJ*uqcu=Xw}X7oQ7nD1l-#a#gs1TZybWqk>AMxVyf}qS5^Spt}rw62kCR6 zDaH{D>tevabsba$*O0vfPwDjN9e@e@;ClOMn(Al*%iX4eVim`0W$p#>Mk)4v>P_Nv zNCkvC4nvmi2krenKgr4E(onR+9)uoj0GTCwh|`M`P^Dv!|LxccpP$=7S8WH)YdKGj zsg?7r1s}s^!E`3??hTed1*u@cTY4gS48&6AkWIe~S#@bB*tM-pOrfyag-Yb;6(5lW<0uCG>tQrwwtpF;mF~ECjW2ia|Lv{%nSKr;4dT_FMA(KrhVU z@I_v&1na!Dk{vf$#txsYh7=J^P+wvKms4A4r2Su5cuSo2sJVccwg+S1^RLj8{{-LY-iM!O`e0Gf6zmyn zg=2?TP!l&BHp%o4-0xLJDNk`G`o0ND??yrYD}S6AErc1f@>wO{m@53f1a5_65WjG< zj+coK74$y`maqCCa^Dwl5Boq~%l`z2gm+lmKLiQBE1<3X5gk2LKy3V@dC~G`psdZG z?T+__A1^-G&Ow=7l(|_R_#()XnoNzV(%{-NKRB4N4?Yw>q3t!gXkd}G-ze;d=}I5 z3j9n5LA3WX{o0TZ10w!#SLhMmxbzy*mqxRwb6QlcI0EL{*298$9UY*Is-QbQJCh@TldfEu_-f4K{8UBP&+;(ET|*G-cZ(I%m>e@XzVx zy)2M}Th=l3_0DK8lNv{bXWb{M1KIefOcE^rHG`d0G^n<=;(y<7kkD%haQC?&T!@!o z{iPGYCZ?Zk$kPEm6ERr2W+DubaGHKe2s=D)Kz^whd@YZK&@oNC&kj=&KR;ME%@wam zN0S-DD)2a^2Ul+NgBAKGG1fx|@!K#dvnxD-za+CWAp=GW|Z{ z3R^yi!0#Y`{PrmpRG)0H%kdV6dhK*n%zlyQV}=lA?hi8kj?h_>Kzxo> zLFuD&xLW)atiE8(^)0vQ;Lm-8?DpZ6a9od>Tp&wN-T?Q^UXoph?$I7v0Y1+4sJ&R8 zn*)9njCndtTk;iXzH28TU&F!4^cJkIISCI13Zdr6Q&>7*2AW!K&?yQYIFxS!by8xu z+Pi-A`wWt&>>Xt*1?-N>Qc^i(TtO5^lj!!DA zgSNwwY}H*I`g4B(q?~vLlGiPDTAWr;)%HjbmU{vo6{R30{gb5Tbbzt?P1F>82HV{D zu(9b5)fx#P4*pBg7b^;l{~A5M6f$lI?0Zj^j71o4EjlwrA2eML~GH*aQ}Yy$6iSfyW#3K(Hy3 zO3Ixhar|d+#6N)eRrk;-d}S7ArOMvfUWIjE7C~kDD)@HeCQYq>0cll3M8c?&j8l{& z6P#S3%U~^DPpSdsj&VdX_7ZvMQ3S_dD}YwS5+*CF2@g`Gsl06#jKq6GiSTZC__>;{ zxW57Ix@E9abPjC4_ZotdLh)?t1FBH-2t<;;;;2S7Ng40}H6;)9A1a1Tf~i=0I04q5 zs|1lfGUzl=3TM}9Qr(%FOfaSz!V*TIM!1{a(Yp&7wn6Z=U! z05|OmzSS+z@wITIM@H#D|k4l3cTCYA^7Jtx_dN~e14nB z%N$Gs&nz!iY~%*}Ek2P;Gw#yZ(Z`@WVHgFn=CCibPQa8iv)D`SY?O`pLSoPl>Q3hX zio^hNy21s+MtD9&1w^gVsKam+&a|!?n9Yv0IA4- zN1YYr*_f~_yDxekcz?~nvRe<9X&2IA?*1fykCC;~g(N9pHqo5F8^rD}#UEDH5NInv zOtdq}nJEQuNK_U&e*^1Wrw69F(`atXIoMO=22Qctp^Y2wc z_?U=i5(=n!*nK#p@D#NoO2`)TeV`qbiZPnE!Sz!Trr!&Ig%VG}G*ceSif+U5iL>a^ z_65vcr3|_`JY+HUmYS{3hT>=kSh6;Z!{t?wsp!vSJ}T0J55b^!uM(>Eo9KKGSxd!c z_<^$GLkKX-19JR1`KI#{k{9M<*VbatjVS?%&70Z5!_(>Ul*KS8O!27cEWU2>2&kbi z9U5B8Y^z<+YFq}9vv|z58FB%{s-#8CSQs@KP9{;g%pFhHVm4D<XVbpVkBDJ;IY^o9A~EOMX-B&jn^UI54qUkk29g96EqGAxR6%{( zo8ilw@pMvt5n*<7h|6tL2oal)pOarg<~&`pu=y&vNN>Z}brWIRKP%SrbPimdvWS|s z#6Xg)18lzH05j}M>8hdGxM;@^xPG4ls()MHx=kL+H#g9uC!c`mhvyjU@QMhTtOnzg z$1yRk401Dz&|CHbxZJIUGqK+xVB9@8)ijryAJu0;x2oYMhxu|lhv<`po3MF;7aS;# zMCJHqh+ZGXP<=AJygwFvp4Na%*cKgK-}Tfezzc4gR>6&;A`mn#CVpu(Fn{nFYW#W$ zi%fsQJEwT|;Xebaop2iTHhANKHCy>16&#-Xe3@Rd@Mb&3FQH=JFQU{a#Lc~$fyWGo zn1)C`TIROWn02dAJ}(~Mh95ws^AVU*)QmS4PDYDf2X@Y2KNPZ*i^nMnQ^ZgTWm#;s@+7VrsHIUf1ZuGr=aJC3bGH9>BdbwJZsX0LUWRF-^-oE z;?n_iuE@ljVx_p~N;nE$lx7Y#qWrbZm(XxSDdspnV?E)aIAXL7V^=U@SDB4n+B^6P zdxq(DnO!(-ayCx27U5kLi9+tdg+$I?lvEwnzrhVE+I2};o|yv>t3cn!HO)GQ@oze`oViF5)R3@1V@kf(OAA0SdInYGp|r`D|QZebgjc=D{sgVTZ7s=R)R}L0A$k} za9!37z8+{J2jYKFyG?;m{vi>%JmYBQ-6N3sbQUzL5LA*62bZ&)y>k8=A`-U*UIhd} z(XiMn=S94`(yykIc~+$b{b--s4qD@xsb*Azk%HC(X1`r99^H8(U{o_@v_}4%od-9 zUz-(h?#`X~E9?#^{U^%IXGxQu!rO3iED#?XhTwAJx%k&bp6yLgrt{xxqO)oxnO9@L zM%}M4SC?4k-=K_9>(0W2mg^u}zKk6bT8a7gQS{jHHMBUID7*oHHLoRnuUn+cWm4_6mI0&7?|(vUo454o-_ag{?l??4m*n+E!VjdvyjZ4K%|? z`C{b75Meu;7>*5_kn)!Xv|>pu{Dgy~QU&nliG^g?RTM>kuE5#qYq8_lLX7ztO!a+l zva1!#G0!#_#eVK$!+U3Au~Hj2=c47~tru_d1KJ^Jx#~ za_tW!_+hpZ+Po&z!pjgBwJ=hqvIJM`-hf|>e=ysgQd;@F0zNH&f}*wO`4am6_%moK zThaKA@xG4XX7n2VtZy!ubzhG^aRXNm93@3SJHvgb^Tb1c7HxKl=R}>yL@qS(*?51tQ);8ipkdC+jvT=8!xQ?fmyPx_@m02ee~VRS3LR~e@&Jk zA8ttS>tq_y{8b1p+IoeAs|u6XmRI>-W3|{Tom@=x5+DZ-@8@l(^kFAO&*10z{rKGI zJ6r4 zD{7$8e;f~9%K*ba>2P@NA(}tpPiXFS7@BEI`i%cj&01k*<|4ujbB}<1@^7APC4(Q$ zF|_{DT@d@4LtGuxNTShSl(e(}WpOE7&~_J`PfsI?_dk$W;T))uc+YF&X8+1b4)J7 z4WQdT5A`*#!0$QmWXf20_4o?M5nIt}`EnI!Ae%7go_svwbDL3!3X0~8(- z*m8Ocs;NB$feS(G*X5aXa@YZmqbP(HiD^3a?px?bFL&4$l?%ElSE2BI6j{vi&+9lf z>0jL~=u|L;Uxb;2n+Of+Z81BZVPCpJw6{<9FXaWmytb_YEYVhzpduGe= zf70(1*lPU_5YBu+rVqz}oKZG7)g{2+6=%VvAQBu`?1N18Yc&6)Kc3q`pg(mI#;;w8 zwhecnfIE-h8C@i{-uGehz+7^rdCUeo-D^5-mIKv58z9NWNkhZBi*o=rP5ioyEE0@#(Zjj=OJ!1bs(jj#=bXO4z& zUUdWXpKYM4e@oyZuCLW)>ab=<8*Fln#+=Njbi>(l(8;-sy)K`LL81+8?vy9b2P?tN z?%T|g7mABv@pqxk%jL-aNcA*`2Gu35V!-r^K@YA=g}t?i!Qv-( zXkVuo3N^c;+W2*t_vbJk`+E&n1+{>f%rZ9Z`ZBV`S^~>w+G2>eOmW9Y4J%|IRPM zVg3M)6b7TajUO@R=3DnDoxtTQp1^_CBTP2?E99K4pyTULW0BW<>{OSlHf zM;LA{?b12mqrhhFQpE)}0cfOi8Yjj_P!o+UsBN^BSTzJ-lF>M*SmIA3G&G5a_!8bj zm$gu~2AOc2E>uMXktK3Rs3r}9UEbN)eZ7WGcrFjd>p#(;JuwiQmO^6dZ^ES}Cy3WF zhL_9z;KPBNpnL8kufg#U70fZj-DOjtY}z`!+86?TPKQCf=PfDntGZJX({1z<4Nf7W|-$b zJ__;;3HlxjrC(Y-K6ZVWv`W}KvOVYNJ3F7pPNkMprDXbp({^o7g%1>jli0d2J- z#E#piWSAHP+fOBc-kU_4GHVa?RnLWjczFzX5(1rz^x4M9zr-nu57VCnfVH!r&c*1N zG;NA8+#T|Tk!~NDk`zR2rf0xVivy}92g0gvg*;oW`Lx<@8M$}enWx`Nz&$~Td2W{n z*>$-j|41l}QL}@=>#?X`+fCimWT0rt6`FSZAl&@mKt$SQpuu4&EKK0R{PxA1=j{QM#TNJ8I&l#Lz(+lG)b+5CMcex zZeJOca?i3^{xff*p*wZT-vgVv3&=VFKeA%@B8D&h&3m-tDtv1=0G+S0F~-4$WRtCM z%%FldUgjfR)Blq9SN$U~)#t;3?L9n~o}awm*&FG_jyeuAhQRWd4ZOgN!yxa<$D-x> z)K7K`EYz^T#>soQo(HsZ5r6BkFeh^l7gj=Up!kk+fq*&)Qwa?oH z7j8$x@)be!Z@m$;)&1qkv>M{*s{IhFBFfGVClRT|n$V!<2L6KRBG2(qEaD(Ij!eNG^c%vxo%=D6CIg}+9wb(HG@Zf zdmv=CAnRN>1)pmyqV!!W$@sL3sNT8+E6-S=hK?Bi3z|f>rUj6$js2j-dB+Wg{MZ}r z8BD*iiSmTRF|+d=X#WX@UWEa=dQ%u~TcUw?!#9Ey$e@D88x+1G!d5O9#^}~LEc;v(Zov8t5>6?XD;CA2yl;|3Vhf9TNYDf_* zS<{Xts-n0z;x26VpU1Q}^%BbspLlCq$D@r=H*=GI0gAHAsOF0KY~Umhzg^fsmhFvV zg{S%<{7o`*Rx-fq3^R%kd6+29Vd7V#ApB%8F3PmV>Ef|4Ylb50&~HJB)6byj#zRaj zib4t3Ihb+s4-NiLfG$?$;rVS->FdR&>_kim<4q1_)?*T=b2iM&x1^~7hH^jEPyOuw}w7ntAXz3b_tyv5aN zp?g)JdQF!V>s>;TPuH-?y8!-<=wU5>!LbT0=JaMRPCueavX`u;u`7SW!dYK2E>9ho z?G_}KXGL(4x&}_U9*2880r%#n(&U;mY?}}dcfH-f;cUW`o+_bR<70T+*@eQ@eDstT zG}Pru1<{`V<>>aVMH1iAUlhR&04@2C#WW(zP+&VufkdJJF7OOUMFI%spm6|J*m z_~*<%P@`L-Se`hZ$h~jnCw{tv3q=Ssm?g#!{k(u2bK6gE28Htz%HuG@-;;mdiIST` z*6gm`O+0o(mW&#YVH4*m)i7R7PPr$d2!3J;3BSnujI&s!-hdA#zQu-WS#s21Du41$ zDJHF?M9$RY(9EIb{I{xpd;!HkzIm4f*%NaZ>$gSXk{kv8uOv+pyJaO?_hOX-iHQmVC7nz*Go;@XM}e916>Oq=b^S`4QWYLkom4Kk7UVJbh+ z`4TD6T15th{IR}Ti)`&5C6xi(JdQ1MNH53F|DS)|+?$J?3Nhs3up+Sx>?5ZXexR%M z0un2+l05kP>kMLLTT(W7)baFg!I(y#LiBsD|>9O5vA9NHY_GUu*ts45hr5v3t8 z%<<&bBm;7>2%d}^h2@pTzzdI~Q@nDqS$ru(x|V~&mGf{q^C86cKcd|gkH|64O0eC% zhm_A7$F3eyWlK#o*|olWxN%ksrhnss(c}s`T-pH-%!i24rz|43ONFSs_53qOGM&?W6!%XBhfP*&*1b6(+=O(o-94BT%CR~lTVYD_ZQ6F{FizDI!(D%8 z!`_IGuvx4COZczp%uiJyarqZcJ@JGr{I(so)ULrRI<;`(RXX;ood?JLFTi=dF!G#k z!zyt-dI0s9{a`ydKM_XyL6n7FxC4=|l3?ax51gml4$)sCSq)E_{wg{HCz_k#`sSrN zQD^qjMA73Qsqh?bK6(PH?v{}N&on5R+=LgJUcs`3MbP$kFS~Pl5sut|IP+T%OFInvEAbuEW-w!MOg9 zGP$dAn1u|x;;eHw$+nG$>CY%doUQQ|Q#tHD!&;vFH_9;8${Y73-$wFt2iDF1L!aBc zU^905qSL7Z7_)@iL(6gB|7B~UTAnCLsq@9fJ}P{LA1~-?HHNuf{^)qQOsC#ko{8+8 zhAQt)qvk|+Y4t~GY^ud4czR?MNyteiujC1vvKFyPzyE>2B|$32y-!T9hIh*O@GtEOWG;FEYaVO^ zKgSEyF5((4+$qQT0NSC`UJjous)C@$_o(*I4l*e93Yu)}$;%COR3_JqEq$)eM0>fM z&LLSy8rcjw%KxZj@gL~P=TT=jaXRWELFP)|1p~$5Rx=QN7M|=*=9XY`KQh$L|IOne& zlVFRLY9R5CEqrggi%L`f!r-}Bc5c}e+Aneod_=k+{J#{Ps5E_g%gh^+t3E*X{ZhDk zahR;z*$i@GKQR6KN3aSW<5`ABQl*q7q;<;%-jz2yAl%xNWxFAq@IFHxZ1Sfs)+B-P zyF&b=^o3TxRDl%jf7Cbd9IV-!O>QJv!@m!KFllft+`Se7=~bx^*!q(<-7kPnlw60F zhZR5)et_v+Ux?%Tf%}*ZeW!4Ud{B&r1kSc%*pW-s?|-7dLq5`+=eyu4f0!3`M+(O4 z`qQVLN$@E43OTCfPc9$r#;wB|VDC2$mz_)k#oyWJdBB}$x<*2f>`R_Z@HnQz@ejkI zuZVqd!K-iw!PK-yN&^+}%SgtBcVp26&bB6~Qel=s; z9fk0%)eIW)={AT-H^J8#m!M=pJ49Wbh^?3T5byPjZgWn+WxI9(ykCZE!|%iNqPMXA zSOHZZ<#rayb;H*1JhCxYin;#PW=D4#v9G_2!2OvCcpF(lL}4i%SkVi2)cz5J(dWed z<1(^HC>Any+u`bwesG>Sp3ED6hkXCj3BryH&Tchh+jK0T&_5U_A?8fqG#WjNjyfHV6N}^pY#glMU53bbd;kLhJ)?VbU`3bb|8%z%xC5igIU_5vex2yJnw@ocH?}odsnlyqZ{dSuS4kE zEJx+0hO&=8s#s6&6E@p=Jz5psfCbWxkf`CvrtI`ap%dJuAjPfJr?P@<{xcCbXmoR4 zGk0_}P9&*LbLhA^+tAd!04!g0v3C~D@bpm;b-8GQK{7Jvq0IH6S%=uj?YHQ*KNCOP zt^%LQemMTt67uire)fIy0sJ&0oP3?-LLVQQjGF!uL^yN@+U+$WUxW!t)tzJ00(Ty|h>9W6tmc+AZa*T2KW!gpnVmmtGnaO-WE`3_NEqFt4YGDFEB=Y$)10`bcdf2 zTd4ORyIFo4M%?DXCf!I_8S#gXY7IlyY@};U)aiLuNg|wN4*V^usFf~+k^(V=AHJQ& z&-?&-MJplK)RFP6)`DmjP<15_*!VOGWE`TvY563kn>GjKp02@B+yc=Tgm7_96y9(g zpr_p1;mDsHRNbvcyHd}mckn-wgDM4|6Owrm63RPNqZyy`Rq!zF|<^07C2dU6W+?Hh*&%gUg>H5Ds!c9EA6QOrm>1d{~w$QhR) zy5+hYI)pc4ihl^|*xC}$)$8%GUL?A1jz<57r!aH-ICkG|kPZ99qE7HxlzfrTZtJ?D zkbp7Tw=r^~>i?dJReaqm)pS~;1@h)6qMi8{oxyB%mbY*oZn++X<_Z3|ZOoZAK3ImC zD>jhx-Z2QDJwUumoN2XY5QAskyymtc5TEmkG3R_JSvQZ4*$J^hE^EDD;U+Rza*}Nm zxB|g$C)vS5b@Y9_j#|2Nd4Ly^_~TJ81dU2#%&0zQkGMhUjnA~Ct^<$sWkS=YQP{^D z1RGOv4C)%szLbm5n=Ag~hW=C%%|sLS(!_&xRQt1~A>&bthQI}07|4}OVij@oFiCw0 z_5CJHb1J<^B=0Vi8z!Tl@GO*>IFXdsoFTKPPs3G~yFqJ0EK5)efQDi_nvpDu^6ML5 z(&Aj$F++x}_PdUnQZ~5Z;7JG?w*d9dv|>lF3_II29V5h6k!4BK>E_oZkPvhl2g77B z=ffN9miz?EIbA36J6AUbBv4&BiU#)kGqtfePYq@9S8qt%3`h zU$ML90kmKFJv{lVjO3o5PHO8ty7pZS$W+e60WMq8SE5dL1P*chTrXbZdfo{m7SP@M zmcBDNORDGI;2BjF0hDF1Y(*Vg6iSbYNoEMV}!%nHKBn(6(FztA%{g?_jBNbvPCa?3Cl z{u$Y#zS?h)>X}MH`WpyesvF9smw>&j4a=@v591H%(1%BB;YZ0?@aKgPjY%mNoz2+T=2{A6SlyA$w7gD5yt0z+ervAQ%C9{}Q6&ZBd+GLy|! zSdM0kP0?$NKVwRI5YuX~(BgJMLG*S-o9)6#ST=r@`*ZvwvS zHpFmAJ^asNEq!fkjPsArAhDWDQL{`8Zwb1vhn3b;h_?os!>nt>j7E9CR&j!{4Scn66hz-f5nv<)@?YRdFXQ-Yd+9 z8NxW%=QR~{LcDhiaMO@1b_pl27kxscY+@rG@E5{lmn2+js6pPIj9{8Y{#XzaNi>Di z>7HVO{=|bEy%dZ=!<$HvkQMS|!ceT_78YI&!^5UOscrNyyZ-SZYb$^BU z=?OpKqs9yNCFKlS#oeL}0<&?qt~U<6*n%FSR`@COIyTJ}#K4WnChWH+=?)Drwc`{T ztc^#FUJop6ILTr*?x+2_zWC>&FwM=o%>s=XyM6khd0h~j{W2bh zM@#6M-~%*MqLTPqPsg?F4a(O=;PAF{L~o-4wW>IbJr`~Pf4M9_Z~rHVX@5yKUAD!y zY0?Tk|pY-)}B|?|mIBymMHr^%2a;(BtxQOGw1m zc)WB-moF|=NduIuaJ@?;Zj+nFyAdJJPR~}swwMr{SL}^p+&|*^1P$!nxxA6ge`LdsrN+V8<7RAl zk{E`rm`Be>U4xPOZuocQ8JESE#%O6nRCQbn3tCF({OBPR`V#`j_Kinpt8U*RR@w_2%u+5%HH6-4(%=7geZOO$RyJ zw3C!_S$EY`Yy93a4joJ+2ruvk*|+Wotoy4A;-@?~46zH&kacuo%|Af>RJb5=7GA0h z&~M(yQ2)$A4B+x7E4c31de0Z!=J%8O+V?|OwG6Qc`9p5(kAo{JLHL>LHJhCu;5hXf zcpfE;5$;-8-tZC13O7-S3ESD7Fky_fGvpjZ3T)5mHn{3o1mu1)?y?ug1&?pCyTkf) zZEFEMIWC0D`gZFiT#BIk&z^@V;(tJw!(cX(1?Zu2LEQM^D|!tHVs7|#h&Z~8?Rxl= zWGu0OV>(9o@!AZ2i`GP3;&zHg#;#)XJ@(_d%b$sz{&QA6FbFcM@31$YPGMZf1=>8V z4{FF(oKV2!ps)Giqm=L1Z(0l~SN>ynr4Eyi+ulRn$R=DG>xkWS9Xg)b%R;BGr(cbB z;enfC)KEHz6|n+VS5wYh6-`kEiox{oL&#m@z>?iu(Iq04YIrTCGFx90`37n1-FF|O z29BUW=RqHS^q(A4OK z=e5@Iy@WLIc4;}yl1O8^u37l-#~EUhI-b8chvAN~L00T_4gJSmr%$quyqIslXk0`pe6rlN@JUqA}0zGzEGrRC$s@0Z&n!*3kI@x^Y zv{r;aZsw0N@Wh>z<(L|U)gwi z|9Tl6;Kkrc`;Sm;FU$W~+yVM#-Bgp~V%aAZwDJBAg&epYAie@5LE{ld_qT!N{wp{u zaT96UmBvamAvVw< z$v0o0j^+Y|*tp^bYj_xj+@vDpSNM{P{@JK$zJ)K{_L=5<-ig**GEsigTweCvS!t0emmv`?@ICxd7;aXrjIc zTzzr~8_c3fUW6LhGzpQc*G_Qqt0h|ZQK%8~hUV#yAoKD`@UE;QzFg1RFnk)G<^;i! zH9>S4-wA9z|ASM4r%`TcC}fsuF+tBRQf;RT%VWI3%VV+5S}!5GAZ-mSetHV@FWG@| zbvPM`ZUPzc2$a)uhM2PNJhi&bv`=|4F`BcBcd6R~rfI3M=^;F*Jy=Z4>Jq3~l^^W2 z2}ITD0_e+8KkF%IB!wh8E~qOp;4{}p<$yq zR4p?=5sup&r>e#Jgd52*)0J>p+z+bOs_B$*TH66HfK{`7ASS>C9$KW6q&i<%d+Ib! zJLU^c6I7sSB#&Ai3?mbV5_oZE(&6v>K(?oN54e#=k|ubO4wcrx`yC@VY%-fQt=a|8 zD&^Vt*f)?LF+ytQavUt&hNQr7*l_6)Y@YuL(s%J-aA7)a-{prF4{5>HTNmI*XCi1W z%7>Z@XKAbYHBw*x7@igHCNXO`Pix#Pc4ECetIoa%zhFMR>_9Ng$fIZO)q`0@JJ~$v zKKUFYLmo3**vnJK#@A2b)%x!^5zmoexqGlfQ55vu_^kV}Jp444rCGOEvtdi$6 z-%YVltW^fv^uNI4q}xzwh*Yj%0dtP60Hd8^GdTMnBVW#Gk}BD(U| zR#MfU!V9lD56N%yY-h#oh_bE`3A-=(L)jLTAV$i;Fo9i=&V|^4RG4{SCEn+~1Sj`U7JpM$FQpHA<;M+$7LSV(P&mB+*11vW2S!0Fjj#UZruqs(L1nLW*NU= z`W(FS=`3Bl!IhQh#G`m>Jz2S`oBgO)$C5*x%wuXg*Hf0zw@G$r;unb9!yWKfWGK#f z@&NPRe}|oxTUhHj2eQUr0!=GUVNvQud{O3&UdqQ=n3X#l~)T>tt1h zKUh#*jfB#tzs;$YxTVmc7P zCjJS*{mZV99gicZ`%_JvT`opUCI#U)X)DrNZ-ukA#Nv|4wdh;RX^uG~bpFaQmSG-? zseA9?w(jd}R*g4)eXWPBO*4r2^CVPWXUgA{`h}i2zZzd3OG4uf0=(J{3s_RfVw~y` ziB)c)_+2`Y=Dc-BQ;oyqi$yHXZd8P&JvZpY6MKn%O*}7@q(YajFWcMY1$uQYq}Doz zj?8!huRn^B^+bgQ$$5kMkr^!E%4?YMRFK{_4S}tP@?cGN5?F=iK+)4l_+(HS+N`4K z>fi|UG}8o^6L+ARtAC4*<-odC=V|l4bEM4k0mvkH63O7dbX}1mi{;N|-50aq{se8< zC7=OymDlLZZ&mQ-;&Hn$4Hq!QE2NV?Rqk8@m=t;YeV*Re>1^F_;0aoM*4% z*?dU;BuyuX-GV}~lVC4s37#kK(C1%|qw{rP%u}BS;!7Gp@jQpCF6PtYw+lIaz85{# zJ|tfvjzTK;{ip9OfCGgQxcE^R9Q^zQMrA~>T<|tjm?J&(Zysy1E(0r>76|eDN)KGk z1=W#2=(@TUb)MG(`{&EeM4U$r6vJVULnUawr8;NC_EQsQcPQHa1h(9{3$MP_lKw*v zz|uDtSMMoE_X;%pqe3YNq^;v@?&U z>Wlxr%wxt3iIPx;GH-kxp{?t}q6X__1n#?n11n9ZA7CfH$#`_?6brD-Y5e`3d0?%ss`949QXYbyOp%ZMZq zL7B3>mYilHUC?kcZ+z(R!DNby@X$UTiuOmCv?=nG&97gTG%4C)u!OVtyI+QPtRW)9C z!Ppjy;=OQ;Q68sTZHK*!_-yGXYtmx)4u)_lwsScp78h6Jm>;W|caAIly44@|%Bs_3 z`B=8Yt(oa{bg)}3&bat{3pf^a!?jc1EYSV{=Jci0^1Ow#oZTT7ht=@0_EX%qG#Hc0 z{~^4Ce~E>_AFudTL9*+2mh|Q&bWN+HkBYr8Va_OYh#!f^Vm7gRmp>q1=q!erJ%N7i zURkrzn%J25u_vwm81^fbOix%(t=_8Qr&(=SFYk+gOU=o#L56O-_F;l}5vB%u;Jh6@ zRQ1P0Ho-IuhdNH8Qb;np80CuV1C5YBZzR#>;hbKJ`4wpR6vKR-?Qo3UCb}k68sB|2#`%Yous3}&W~c>W z=iTEF+$P0T$0(A*jg{bALhz!7D84&Cfz!2)W&2la(C4-U3(oZs@3|iA`Sf_^n3l}= z%hd4Gy?6*QNrr`i6WMW&@3qTw1+}bGrIQ^~$t%r!u$a?@o!7O%=m!?WYy-#N{%MF= zTlc_GnL_sLXEwwvIYf0oYvKyi4={e$4M>!m%%EhetQBhuv_;L4yp)^&fwn?Wd2j-H=&cS?px5JS9(w8xXc`#bHl+2b%qRzdg zaAflf%omYk&vw|r>mQ?7K;0#HwBi-9uh;;4XJmnBRWPTO&xW}-Zb6o(B*pzf}fq+m=Q$O_AWc)Aw5DmxnP z9u=aF9Pi-4Re#v(G8-c27t$a3GjQeiktiEI3d#!Wp!~8Q8m}s#d_e)MAy?4%`B{=P z=mp`g^l^PvKBQ&`;DCE5Z1{8@&dRD_aDNsU{1`_y^)#5N`Z>_Nco|;PCTicB1{?Rf z!MLSUIZbK}OmN=8KFk?S`yB${5|@*{^}8-_Re}u_Zrcp8T)n3E9M4$xI=QwjA8wtB z#RHYa@LHuCp6>Qyn^qcdy7^GJE#rYM$vna4rU@8uwt%{g-NznsI{b}wm&ujNA53VH z9P;Duv8a0|aObY`v_M%5FG&RB%5{sr4 zm2_LZA9bDGMLzQ;VZ6*=jNBiMk1LZ%A`zqGzU<^!CGD^$T2_#F{xjU_d_;Fi@8P(| z+IUY}10_ENv#elovU+MQjuPC1+2`U>sAmZoD;CVc9v#Fjf!V~MbT@VUIu2(%G9+0a zLou(&g`BUMi!?qK+LoC&xx17;6)D0(c1?dIpS1au8Dry@d>gckm!0ku%d%IKtgO93%SE_ zT>gxw_3!qBr0Nq`8c_>(w7S6B%>a`{9ihJSG+pdahC9=@z+R7X2r_yO->U9FRzN3} zRBt4b4SlfuYA6Z!Rb|dEX0fpb_Ux!b3%r=P97bq6a5{zi^i9dBWRr;@yl1($n zO^G;oq-}-Moxg%=1m~@@ID+in{ua7~j9_W$23GOa9tQm>UC+AUx@ZEl1?~l{m5(^q zqBmOl%3+?Y4Oq#D;DL{&*tqotU3=se^bTl~@FOqDx4dY$Yd9BElQ>OOUJ+V!mP2pl z7r56v8hI5x(6DY5O>SJk+OBFjYOZHe<30(hq*Oc(3H4y zu%w^kX{M9&xPVy^&_&0x{ zr?~@;4vFDfQB{0sz6}2C_)70+m*OT)AM+#303UB6XzVPE5ewR>S5Fu5?vh8G*hW%_ zK6?yi0orfRf}_8D9OB{gNF8xQd!3^VEF zpWOe|2U)CLB8Rt4$5C$?6?~r2j|cw9<59gMVE0OrsZJ{)CU<}FPyYQ2{r9_>-<2FN zli*WjYbEx6-ULkC;Y?mP9%4QjRgf!rfaOG)VPvKwU7uVBgSN_e%k&4xgpNknvN18j$+ zG#<6y53}Fyg=ajD^`38pG69QdM3^GoSAURPIoSkHpBzGsFg$xCf|_g5=8+ML4XXPTH|{1p>7euOtY6OpF-aj{2r>|Jt@e*b!uX_x9_V67Ey z*gAtn&s4;jJ&m9-yBH@;H$tC=r|h*|Fx?*U42GWTYSI`G4f=&H8xJx-hhRW6UOD|8oJQxT!NtQzU2mIv^+ zjgSSEmMnQ-4^&$ju+cx?gGiGsJub8tLadr#+`CG6HlrC}fi8ZL0Pg-DP1VBNuyxTA zn90qNzKC9e0RLLhnsS|5k8B|sTJ2CWGMIEfdqXW137eKrm^H^u{ncy^iEk!>!Lz&6 ztm`E>rH`V!CzlhEDf5W2aWIIDHpTlZ9s&Q-Z|rd3Vt8H+V75pfYBsyF(6YH8$eT*5 z#&^T6v7s>gn?F?7w$RFd%#jgsbb2)lo;kkd)T{yc8m69R|eSfslw2J{)rij2;?M)xtTPPU8vm{?`y!1M>9|A(-REh-#TEOTod%@JR%on<-xS+HTdLB zH!#B~Aak{v*3L~Jr$650S8&?KFEgXqCZA~d89$QV`R6|UyXzj%K`DYy=drY-fv{x3 zWTyYB51x&apcbuBP#f0))8n#0YTyFLYyJX#+h@S&`I&Uywp{$2GY@V!JqAUhh_}aa z%&VKFbm^!|#J2t>?2Fw?9v}Wm)do!1Ob0V2da07r$l5>vEC#Z&gL;mC$>pyoQZ?E| zl54a`^T`mX(l^J<$z1MbkQ5R5yPxcu(hB0M454z4D=YfJaZ{d{(QEZ*05A;Je+mQX zvDay<-g11(#aGU0jfEJu53rVFSZceq&`0{W;F-)%^gVrx7~AcJb5D}-WqTVeIg^9G zrWC?+<;O5TOdb={8bK+2I{m=xnB&S9;O{JlpEbl-R?%hnZhjcLCUe{}?^h6EeT;jS z8Btr?Oqjy%!PU+Xp2N-c)Lrs0NL1W`pbZ`HVrw5!dtD0WE_}qRdiQ~^u^cjOU(nA- z6G@EX0DoT1W6&Vyn9=V8AgrWGgS1~$^9F9E@r*agCBAIkU;=F2w}hSQR>oc%MjNVX zz^n8(@JBy}gy~Y~s1l9^XWc;ArIs!|IfP3W?g8yThS-z+1`NwTgVTtg)ZFzeDZed> z{@e0N?L}pFENL+d|Lx2gTA#v3Z;m7V#2x(CNwBBCxjfGcgq|sVOVXwMNd4P1sLWZ0 zM{7pn>f|xxR8l5UO#2A)-#Wm=jp1zO=iQ*0zM86B9gXJqxcsAYiD08A#Qd-MVqcaG z{yeiDMrkVHsk~lXDfOEMMvG!g;b;=B|Bc|$WNyB{56eYGP~!6)j2LwhthUQy=s9j~ zllUGYZ@JL~T|XvVAcG2jd06?60b6d_2fNob!F(>J-12JKz2mGncO!M<#yo2|K1-_S zQi1NKiO6{lQJ+JhOz~tky2e~4LF<08d|yTESoNJHOejIWk}Gu8shy}wV=z(Oh2yJ* zVNzELI&tim>7qfb=;jjg;oAtbnwO65l>}|3MdBLEP!@1Eo;vSHM1$t>^wo}fw&;>O2#D%Ih{vGBn}M8vdo9F z0;S2B7*ci(1>b5}*|l(N@1Kp|+O5e~@d6Cyc?<3|Kc-2IEAZXH9GtgLnBOzqghk2C zL0j82yigW{Z>W=P+)54E*NxfVx8){Q0o~ zrU?C_1=IZSMcWFO_R&Pi*ac1maZF*+5t#U8A^GQ-ED@4G(xib69|~|0o<2=gwC0s^n0`m`@8xxSPNvZ z&-W*l=;1VF?{|YzbO8=jO5iH*8aCuRlZFQ8!_|CIY_7NAIiy8U`KM<=PxA+C)b9uW zt2T1UdL}L!U5gh`1cS96@cDaP>9;?ML@Rm{->4Jd{)BO?dzUKQIS@*27P!#u624$@ z{RBo9a_q=mlE6>DO4UL`z~xLF(Qnj+&NYic-pLFul&*x=hLKQJTgRW?W=U-`^ifjk z1ApW8JZQL?1S%t(ptOk6pB^)bk;O)seP5HTHjbi2cdF@_$@R4V*&d&pmtc9;LI zZa&qU9t4NC#S(Sv4dhe!C6pVKha5p0#K~_3>5_OfOmZRH7Ow@&LOZzv{hz<`>^a#U&seVKv zbSzxy*2PC0TXaHz7S_~FhADwQFe38|h`w0~87ZA4(D*v-y6VT}QSE|vzxUIFK}*4V z^k~=+qlTwcc7mPB1ST@QgIt`h5AK5jV0`I;nfMDydhwSo$k}a%z_c}B-?5im=syZ> zR&#Ortxb@yv<}p`*oErgAn}Zv2)V5$xNf4dVCY3ZtPj{qJ8I{#1FoylNH&3_->qTu z>y)rqp^}Nj`eE4VSlVVg1n;Bm(3ImMT@g5AUCAL_V0;GBWhSz3Jt=j9G*3N^8m9hmv%C%1RtHPzKAM-DLaYy5Kw)Z)vMxSoB&E zfA)NZ-(@S9bLf4nHp<3_X}3Xg^5>9!a74$pdb*?i3cJ$01{KdnqE}J~`y@68^U^s#8PS*c%xnYF zubg*lcLj|X)uu%*pdqC4*9}5{S@$xY=*T_(y&EuKizO)4bERPh0`);LHmyju$^NiZ(jcm*fv z-w#H(hxY+G^TZ(R$a&b#55cmglhn#C9U$`x%4p<~FY)$Z_rf0E<|o5?Q$Lh634$@a zY}h>T9v-xu08>vj`sa^4yZb&HD#EWqM^XcI9!v!HEO*#pI~6y)DTb*DJ}hoPmUgsm zgtsCk;K*0z&Ged07bm%Z^3EJk)IJHbCG*J4HRs?!^OS5Bya$t!ftAq%aG|Z3rMYmuIq`gP&j#Ka))iUxe4!d(ta2mtyoZ zebhCQMc*lw_&O~C2RP$5HS-9BN6+2%nV&;J+c;WhM=$j@?_FB)w^_h|a#kx#-dzJ|La_+N-a(Z|@ zIj!`KwdP*XX^%f-9HQ#T)7LG zyKX}M7A}`cx*d%DpJK|YM_})42WC}|Xqh0EtjxU0PabN4&U9<)?ki7W=v?bht(j5<>#B2(xge-w;FY4&J ziOu-#*an!<*v4sZq%h0jC0y5gL}zrhaa0{4+-;ss#we*XsSZcxJG6{#Sa2WQo-cwA z2N%LMF1{CdLmE&1)~A2}c9QtKjU?%3B0Sab#8IP1;8R;6a)0?r;yL#xcy2L+ioO6= zW$OtImUcAC`WCF5l?BWF_rXi+0a|-tHx|4ak5=FWR#)Y4!-96yUi6Fp)cFMyidD!; z@{#0Ui~?ctTpTLyhj@!-)ZAMM11H4M@&_ONj=YD?a1Scgy@LJi6Tzxj9SqwwmTi06 z3tl@b;m?)h=w&5_PaR8G%De^i{7sInqbY%5)?U1s3liy&&S_W~Gz4Cc1|Z6B1ij|b z4X-yp#O@W`e4sQI3a!Vo)2x-Wolu28L5f(F`<2Bmy90{Omh`*gXf~#vqMG11*>xj< zS>Nl1=QE>Oyr~uTdHGYDs;i)_V1%#iMq=c0imoT3@cU3K*lrMKFjI?!+%1CCDMlD1 z(8b0k9bEoei`^fiO^dgfp-%ie;^MZ5IdnuZAI%u{<)u2x$>hS^xn7=-;kXd;5;BC77?AZlc{%hH~f%J$A_65 zYm8GYOq(KtCwNoQG%66SmGv=TZ4#Y*`6Tlx7of=TVEp@S84K4PhXpzx;m%MKUZ|ah zo8SCp1{wQl?%EL;`+7P~_CL)#JM9Kl2zUnKQGEP9YBF9HnA2cU9ki^_B6o}!ep8tN zhrbrnnctt1%WtdrJku;_xUhr8O@yn zAweF=O0EI3JBGxep_%$$He#chIO{o|39TFF!_Wyo__DB@Mu@d>8XipwWpY$kY!E+A zoCB-2so<0RPEhK-v2V+GqfPOKLx#mmK&=N)P zwB)=Lwv7Vwk)ISZBtp;18dx>dzfs}I>GwYgwfA5n>Bej;+auf>B#w?p+*~U zN-P(Lp67zrOO7DFI1TuY!2Selky4KDF;zPr`AWxdaFQ8{F>Pl3SBJJ-oryB9glXF? zFD_>!jTt&6vy|idc-1uyigoJ2vy9IqHRfS-!A80($ekAVr*SNaUeI=S*^UI8h01p1w(3m+Y{Zird^($%G~9c=k58&Uq{fYlLT_{);^N zV|)P{u(!pOZ(QEuoE7XqtudAt4uO^b4;*J~kM%{80+|)rbd!WE$|){HDc@qAxmFid z$&tbz_hw_rVZik_?5NPqJy`B(OoBdH<9gd-Q2(mV?A^5K?LleKvK7bbO%GYc))FYV zz4z1RENDhJ_UQHuA+%<3#gK{C278y0}^us5K;g?-#JZ!t(H>H*RuHJ=>eEC z;uLF<3j^EH2dHOXD#X@QK$1rul;1}7(ZCmn@>U>TSqp|d#Nf3J=w7GK=GBfxzrb;1 z^@`c_PwRCsbMt8<#GFLMreEHCUV;h*V z)(5J%YwRJIv{i%zI|y*!yf196=yv+`;Ct9J!w_#&UghnmPol-o-oux(2An^M27b>o zptYSFQSQrFGH}ZT^SnY}z;OgiKT=Ej%ii;=xbdZ%QZq}bE`^n6`LtI~fxSrA!29`Y zh`;zjCZp91_LKbCyUqIe>9{Q&+1(2A(W8LCQMCP~|(^bvRV!Ibi=Oi$d)kk3KVIQij zB#Z5DFGBs+47ht;iMgAc#DHX5Z1Lst-Hr5dgVhZTeyzYhSSjP`-!n*>hzbn|uLE1L z9(28;fK`?KxGeb@th_T8AEw&j>PkgSkrucjmV=q5S;?%?*c;Rys z57~`H?`@rI&5Z5z`vEQwIYS%Acc<`9$epK=YBwNhsV3fT=ANz7b*YG}0>*a##7ov% z_(4qxin{%2z?CF&r{xsiZs9g4ivuRwvj)1)iqa_^4OF>|n}7JF;xy1@?llvj_G2I2 ze4d+6+>9W1L%blaVGEpj;SI;WZsHijE}%Yg0?ZxQKtsH&@YX|BaEiGBK5tFYH6R>X z?%Gi$4{vgG=ny;}p+xR^B+^3e-Y>ZKl%DMj0jIU1(9vK9y7x}e3ClBK%oGJmpS6=S zNoBY^UJjUQALsv-4JR^`IUXqIWpFJVrnn2iRf|ci;gK@5N{G^l9B2EmrxuhxR)+=o zF*Gdp54>4X3ipa-Vf+1Tc-m;srLP^MXSI(*-p{@G(<_?+`QR*FseKUGd_{Qqu#ofQ%ZAedbC~g& zKB9PdE6k~ghmxq3ym|*=I{)=zSS=P0qYd`J$k)e{2{=x)#B2pK!Q0!4xL78Hep;}S%?l1gji))}*t#aBHmHx=esnXk;RsHemQC{- zC!)hGUp#bTHs&4Ngc{99v10orI4rrCT{&Y+suHBJ>FQoI&ksbUt_|2^KcD?Fo);KXsXFN}0z(vjh)TMbW`ZJG_HZt~ImE-nW)+;=7tD$jWJxja zF*>0T9{Ll3?(-)Thr(cHqZ)+W60t;Q)h;^JtA`(wZetz#V);!2qMl}sZ)-!*!2Bu7 z{`N)bl8>~1-eY$1Nhm6F{&hMF;+Vl2A9M=P#@wslaA)cPtdiynCN;mNiN=5?Bf@cU z+iTwOl`70@HWz!{7>u)T1mgztXj+l60Y^r-68+U-cvLqKBG>+*d(&!(o8E8!eUVon z^`)A%|49Wq1ueQ-{u{MkriAxgR*=g24eXlyc^GtB&E)0B;`!r@s`QDWX|N>zGU|f& z-=$H1)_mNub{lBD@1@h;enVs9aJctUAKg?%F|YAA9Md19^A5_><-4U(=8K=*j_k35d=a{0q`rWU`5F1vUQ8}hl3%!I83B}0=fLO zKQdf?hB!8wiPLKP(Kv__WNV``dO`$$zTHSFI(Q#rdRlpDay@)?PP6@E*$2LCnmWnt z5u^Pz%HTI>jXyh_=rsZi^Zmia&!2xR zSduzi?t!E~6?}V+O>@1_3eFgbaQC8@R#qFV~Z1gdCZIQdSCL> z&gbz-U=F`UYk*WQ83k(%#*-gobV$CI8=mnljuva3c4*iGNQ_8NP&snnkV2B9}Uv_&>U3VOfbB zl-p~P9d1j(_rnkzxvT{FQc>Vhq`{8zDoAdI5^OV+M)%}b9FvM;q>txry$YjP*K{5( z>pn_GDjj7`PVL~J8Oio5&c+fSUut#rF6{lOkG{X9P;a9dT3!uCSBnTJ6%t_sKQzew z>H^sK#sCX9%HW|QEu3hg%kG_4rCYU(Q0~rqQuuQz%YAf|`J7B(kIU52Ng)Cn#ScNq z_OUF()B?3u+0#f@8Jd=oN<@}Fgw(c7Bx3=C4iNJE*nM(-t3H~X+5>%)a+v4$ROku_ zrHxO=<4otL;K0TFP94%?pFg$WqFW(In&ZHLnTQbky_X>`E~oRnr1c>ZcQ{bsvrQq=a$5 zLp8oKqPWrVGn=zDicSvs36|FdxKuQT=QsO0UAN;on7=o{ha2^AsT9(@-)dODU7p;P z<>5Q-8E396&BCQcXwich{@s1Gu+%n>;ff@Vm#;))nnhW$wlGG%ZpQ~Ylyxnyhn0fW zEbfCeZcP0`bb~bUw8R|3&PJbR0LFbH;#S9-OI6cLY$z&}DlVB}6b7TTc*ve`1Lvumc zUYo8yN8%7Rq(v#lwWQOO=A6h897yAMkloF)m8G z36AF<1FX?R@zy7LqJ3}KI>SkOa5 zI7KkFgJip z#DMzwwEEdKrYQM^z0VR6l+_-0{64H;w=0*s*q*~?V~K_Gij-{MbAWe z^3Re~^!T)!yuC1yc5ga?;}5=uWc!hVrB8%$bK_kaEVmy!MQl;%rV(~P$iT&kiv^FTin6whl^FOu zhvRDs@ylAKGru!)@k@R>%8p9JU2f_0to25Gzuupu=5gm#_GFNf`iW2t{|4pErEtsCsvM^^g{ z&_>VkY}aQU_HFTLkQFS22jFMt9KzU znx@b-=bgdc^#oqbIYn2F=;uE_aGJVX_`nSNBP5!O8Fa>2!%Y(dXv&=qeh&`A;GDDk zlSlMv#>FuhvHKxE<~ld;j2I6(Mzf%<7wD`!LUvzugbsIULj6POtiXJlJ@!1^Je9!? z)oOl|porg=WkDPFu7+Qw{^Xe|5LQx#VsA%)n8$V4TfPihRo&6KSc*)4X9Mb|PVz5v zKFzlj&-3G-r;t0cQ@H%ck>K9iY2N!&f!Zd@k{L>a|IX$)>J`7z<4R_hW zw3HAuOk6}n=KSUdsx-r*7+bjStce*pS|A*;9B%zS2@Bp>!pd8<KB*k{a z(6imNT5%>f=ahvDQLn-D%4#qRQ(=9V&yvJ(DsVW@gY#QEYc}`PckgWXveS-0cjRUjOsI1p+=D#lm zpLA3azR?RdY1>514*J2;#d7hJS_Lggip3_4gQym7he6*GaimQVKH}I+Zt5W{@~$=c z-ae9BcesZhqt@Z6ws4fpTf-JOh0^x4IJ~1YmtHd|XDxxBS+V1LwqnLVxW1qR+E2@( zP(mm(HA=-)p?qq1EQ+S2zaz=9<8Tk>gSDqK34O$R$@%!>bnK7As1|Vt9^a4>oSyd? z1|!?){f~b5<(4I;yH3D#vjnD-F@hu*Kfyg;ZbR>qEPQywiKq%Cvm+~x;10tK;wp5C zz6>OMeE95a<*nVE&0(Bu=N8Ok6l_ z3ItRhWkG(wI*Fr}x(S-T4$|`)c4*IOiSA$c$!j@p#Nze2`2W**ynE~@jx#+%g|G^@ zr2CL(6O+(Qxe0RHCvszCWoq(N5@@eF{_y$Bq@TCK@onDJ_LmJSs-BDK9Dg8HynvnS z{sGZ{a@n5lC3tSlI=a?d6(|4E$MkYZjD9J=MQfb!OIIeR2Q*-pQ{LlQmr_Vs6^oj} zaVTAAhGT7{n4Xv>)jT;H6;c$a;xz}B@#7>rIwzj>gpNZ^tyFmMw-8Pjm@r+9`S@#k zFcnT%Lp{=C2!G~lC=Wb|vb*fD*!?JJ@Lfc=Crn2&zYxZ0oM)O(OX1q-qjc>wXo zB34oYjpjUR;48`Hzs$vJC*JYif4)vTJ>;;?!U|P?Q&bpONejjW<0L0t;_p2dH^d!- zNoqPw?RGU`!4k02L;*Jkd}q9Q*C71qG^!Jz#a?<@;JmU>;`A(@%}Boif0N>wcZw}q zPuNSPA9cgFF9t|OxjEW3U96W4#QsNzVbgSBR_!Wo;6Lvki z2|F4Op^a`PT%9%--xmmzB+6&wn$7T>{CW}+Y)F@+zlMX4PGSEF6P#=Q3!^Q>vHtBW zbeo-mrw&iR=GGiqQ=Y<%(=3p@;Bu6z-B`tFUG!Y^4PIZV#{l0MXe{}gX+1hl?`{7J zfgH!-$AoO2-%c)$_vR-KCwC0S4>rNE z$(iuP<_wHtSKyKPBB<&tp#|e3u?=vtxbQqiqS96l!-*e4sda_!_u4=(Aj88 z)^u@RW>1=+Y$G3bmAWv?>DEB{XHfUrFyPAv!Gz6TFlW{qnmO4XFZGGywfWQFLf-%+ zxyPdCcFy0*tqVl%b)k|&FSiDL0Iv6QF+s&nX!5VajoSH8q4k7gCdr_(&n?&-0(6YJ z6H9D-#rgD%#50Y*XkSA!yiwxf?_6$Y_=xwgx+040-V8LeJP%fEe+UniqIiow+tYVz zW571y9;6*@g3Lp2$;xG7c;NJVlq5jyyO1?asDYVEl+`Rm2kS|VGQ}JTL}J^_M|VA|_kke)P(R;g(+*I8ANSNsOV zQ`%^j_z5twSqb)UBJk2K&Lh%cC!2jrgpL>+0&TTb;B-faSK&F0?tSYD+9wL(Re3rr zeV9xxWVAw^O$yq!aWUrEGaz$M9zEQClg#?w%-?#q0De^-V2!=L(7I$i6|wk2SDoyJ z>-%qU{Hm4g3bBUX!#d2`wGWz(_7Lj{hESGw22yR)V2}Mih&!DKtCn#57+x}6TeKGE z-I&0|M-#y#bu2Er&9U{q#?g<@P7&VMHn96JnY@!~r!gkL9tp;>=5H17cD5oMn=~5+ zdV6TE2FFNjFs0%-Qnae#55{UP2HUZ6=)r%^X<%lMH`Cq`AKfA@_emej`hA$w7|si1 z&ve=^;|2|E8h9R8hZWhkY1BI|AJMh}U|M=-hUY!TQ%s-ej64n=!G#VpFuKokXl~90E^Dr zLEHY@c;WpU_+S*pB#cMVysKHjyLu1Cw(jSt?4ChI=0$+Qq1&7fUTx;zJx?;=@U$?d_uT_LHn$R)q>L|{J&A~p!7UsNB)ANUyMLaIF@>~#p7JL$&kCR8r~hM zg|gBj*zx2HHPWdinI}0go~UWPysyuOl9k)sDruG7|I1j;pB=P z5Piu3_*EC^8}n)C$Rx0UD!{Fo?I8Rn9wS3*sj++wq^}r3B$69QD(9<^{zrhxB{gu3 zmw<|z$Kic`D;IB8!47gBjHHa|-}R=3c>_kL`gd`=oz5i=d5SlHtdh43?-rGD)*RJ+oZE7|JtY%Laa{2$-gk~nr z8E%94%f4Xlz(T`6|F1So|G(RSRr1--0RAr z-)-hY@3vR{(TJ}y5zVf8J@~Sc|24$CAV^hUy{4_oJ}XpE*c5DjXNGxo*4}Qxfwh5^ zS*w0lxol6UswiA;uK&%sDq5Crp_-9bnZ3rfYD>*=^AO2>0`V6v=I1V-E5Fs0TlIEn zv%o8Tg?WGFDVCk_P)BN`iMipGEP>7OCFXMz2braVMb(+^nUz;WQ&_O#(#m@WkbM24 z$rja@(BaOR6``6na_~%PbBpZxjWq+Oa}{+;)XZO(j-Dt_vp0Q@94_(!VvrX;uxN_oE8ncW(+}dkVkMRT%2qwvtuODVQCMIlFE{+&iVq6L!>^^I zCmHViXC%LW%MCCz`DQx&p7P%QiT|}@c)w3|lHu^NhszD`54UajSi^M`wrUg(*dN5Z ze@D_+SytxlbAB_6@C{XGUq}mv%O=-H$_&2_w`IAbi+NA#=E^(KmDI^|M)^Sg4H*9H zzpek9q#*Amsj~dQXL6U_&95CqabTV-netYl>a2|I^vmI7^C#z91gjR)Dxo7|$}cY3Yfeu*H4pACs?4b?7exGASY7RH zYQFjU7V{`mUBUR*Av(mRtnzAyyTI`9%BmYtX67YpvIHATXH{Jp13TO;y|zrGJU<@Vwxuk13H&rnv)kC`w*>A$~!_?7=8p-5Ds z_x$kh@j8QgmE*4ITYjayJe$-^tTdh#BvM^$}{FoMvfBUpP zF3d1kI*cz?5zouoFsE?%v*t%va{p_`@V+irX872{<%ai%+ctcx;W|9-JHKRRoV#TD zflt(VRqGzG(Y<}yJA%T4Jr$c zZzXqx^q35HZahY{mkrl{A7)M_1|P53-*cTF%J|ND2l;G+wqK>9>ZNTpAT1+`ie z$YvUWV0>>}MXkv33N2M7!HSG;>|*#e>LhK)CfXHLu4^n{yxKB$ytKGPDeb1V;rk?3 zE?v#aSBJ96Cytd%xqqn;wlZR}Kg(&5dk98N*5bT%Ka+s(F8}uFbwjRCx1B2euAX1K zafGX$qI<*?@#)rOACG-PdT^R z;`;E@_uuV~t#7Vhgj%7V&j0Syrf{`?uhT5|*V$|4Ihg#iI?kK6!{XmQt&eTW{jVLv zm1C@5XAd8HxZLpmaNCBD^_#1+`jA>-?mubglGVP#w#ob0=lsLvcbCth&7#5oUSGGR zJOAy|;kF!nxs=r|T)-yUyH|V)UQ@O+HQ|5sDS4Mqb4QfYhIgYX=HHECvzQjB%n1AU zyzKtGzw*fGqAHDSy{f%WUsdyT7ge9zqG|rr{#~_aQD~LJDaEQu%bNt#igl`tZoaDi z7Fb)|S9?Y9biP8h%$&Ark4wtt+TWj5NnR5Y$l8xLUt6tgK4f^Xdhk_#m1Z@sX5~v4 z!IjI#f}Q`1t2Ymb>Hq%6lO<^p+O^O!ZQ5t%E|G|2m#t7DTZ<^kl1Q6WN_&c=no8T$ zG|RpBIT1=5DwHKck)3SWm)|_^e0sfqeV>24yiRkL$DDJ|d7kI8_4Qi+dU@c=RnxF5F^KQvG#JkPc3{j#ti&Yn63Wc>u-ujh0kAj^Z4@zz8ee`&Mj zZ7vbjwy8`Tr!>L0+(>$E$-JUg)ODa-5Ga#opLTei@oC#LHsL;D0>xQFO#cDB|MCv{ z7F2$>`7T_F=(7p+sp`?rp`Uxy;=WHXgY7riG35{XK=Vp5O7!cuaL;{(kJ8X#hO`mQN7YU4PMt;GK>Stu95IS+mH{^5F4}wWfn_xl5aA2iT8(? zvlbAb;+hWee%Ddro0B{n^6G56MaL9k$(vFKyfrr?8FeLgT?%Viy1Pf#-lv-qS?|P4 z$-3z^tehwkoRIhi_$Z=K&yR@Z)B^o4)N5~w`uBO9ID2d$Q0Y&=m#WB%Tf(-1oY|kL zXYyll#|Z^gWZowbs&pGPn|{VFU{1iq*j9Y-83QLG=799MPIyw1DeiSC0-qFk0(fZ6 z1Bzi*VxNu}?BD1u*xk8uV#$3eV75XJFn%in#|t_5XRAIS&RZ#V5b9BN8yySYC#zvM z`!9h~)ze_Ua|AUpHHR3QlfZUZ^Oi`z8c#USKh8dWKb>HxchcA6_#E}3gG-kotGdV9 znYzs~&0I5RTj5hEm=P7huHP6uBBpys>)FU1xlCURv0wVi_L9kE0RxY?>gQthTZ-Fx zw^CnDg8qPG33ZP#Y3^g|`{~&yAf1&JUj_f&qOrbw<{3flZcITi_l_Z*r|Ew#mJH(< z=7F{$AEY5RXluVj#z~@c@4F7(i60T{66-whMdlF29kq+d<7d#dcT|+TJd}Z0-eg+*}D|Jl}z|V$9>SqqA9%q9VF3u*u<2iEm`;&1gNB>dv9!_m2q9 zo3HFF2YK?@=oHfaeIj!GhB9_|CW)N1dykAsScD`*rlQuXO3{DH6Upqx!)U<4C+x@= zOHALz2xU9p;RHS(rsE;j$z1z~gDBxK{U^F&440xX45 zNEQS)Au*CYbmjsPaj4$_6%KZaPF`%+dvNd)A-=Son0OgSwfV~=zc<{D>WvR1mwWsb zZvP$0vYOq)YK&r#OV8!eu}L0KD^j#B7uZPp5cO{xj7Qu1+HB66!$ukey;l!QYp=sG zqvDqho|a4cv+Q;qlIH2b_STYN9MIp|2aJlLZO8{{hz;6;GL;gYH7O@~XI_3`DtSAi z3x4?_L1%2x3V%yF76Vq@be=*iMp4r-<<*Vk{e|CZUmykoMPy{{ZMi>0r2mseYpq|i zJ8C@8y%OKN%8;e|`cIz^QeWKtsf@*chnoE8DDud)Tk=v*pjdo6%HHh- zxMh)wJQgOA)57gQO>_|W{P8^%XYWsj%4tw;t2?Pf+`YijuANvk^-#e#rC!QSZVD)o zXMi_BZiMZsZK&h>%VY+RO&XN(h=oFbqU`P%^6{q$^!(F#cVjWH;0E#5W;LHJ$2FPu zt;a69_(9EenFb;yEjAhYbaOnyYHjn^afcbuSOI%81UBS9=156=K)<4mZN zUyl<<3(tsjZh4K!)6l_K(xoV$EL^aU_~~T_CQE!X{fsT`m&w~z*xLowMCzAoh^Ms( zZISdl`MY&V$G9F;P+x~?J*-80*H#HO-NKORIoV{>MQ1{xNQ)2zwIRO-XP|R8o)x*6 z>!afX!bl(MTq5GMO7v5oY>C(MqKVl5*cDD#L6$4WVPfS0zI?r(PP245T?g2 za&=`dl3^fEuJay?bSB->IQ0D%wO6g9WY)^*WO=@D$XTa!X{{S zw%xX5X^*tWw5enm2aI9jgenfS4f!Aqu|Zo<#!RB~_pun>?5C1vQ}u5msg}`XP`C_Y z)Vc(PylzEZbe=*iEe6X8Bt#BfGG~?WLbb6#vvOpf1_)fyjfwKeO)8%JI!OjSDDh3j zhRO6b`%v72j{9?hT3M-w-3<7Eu9!L~ihEc<+7(t(XESB+zL~w)))$tj{ijCkZc-;T zGfW34?;L~ebv%GqWzbn(bL*D=j05s4C|zj|Z9_gt zLu}9%l!=HJRxhjrycW(bvXJn}x`qYuQB4r&eUY z=4rJ0#X&^b#E5iJ8;4f7WuX4P2hbivSz`U%GGr*y8s%wjLpP*+7R~pNyf0-#A=(x< zB>J3DfjD!15?hng30((k^5=sB#OUWX^vI(E)Fs`3=sHe_qK|GveO+Inq2INTV{0!X zuZ-7`9&Zv+&yeXTu_m0|^sZCyeRm@Az3nc7TT;l^h6~6jzj{{goaMqly&G6BCGT1H z8(hnLmgOq={vwY4ei4{z(Y(DJ!DsdInp5k%jHiC~wK=*vnE&g&Xq1UJCf2Dg4YZkLEZNRGMDBZFsiTOf6;wrm(J!G?Bx>FVmY;->E71me?dJ_&@{Iq0c32 zsC2Bw^*wRjQ-j>}kwyJG*oTGnpQDQI7!X9lSFAX@hmsE+z@EP4VsqZg;^n!9ME%bM z$}ey<=&C%7_P8lv=LYA32_7e@DRl#scV-|IlE=h^#vsuS79}Y)}vI>ib&QX$!}pt1@9x9G6Lwn*CK^9noX1ml&z_hVSoKKjyHV&NSoVYDP7C79hT0guRT)BP2}5ZYdSLR&W}Aq*H)E}S!TmHpnQ;f zYB{tG`5+CkL0eGfr9@}s{y^S$nVal~zKf~*cgCRK^-o}0ANQ@sLB`=%T<>_s$9*fNQrL<$^8|mXSxTD4$bo?wS$)v z__i`CYN8T#D?1TAbp0Xm;9e?OKk6k}*%66us_{TH29A;*myeUU%^W02H( zuZ3+kkf*wBdMXo-dM_$q5ARV|N`ZKu_*`%aGDL;xQy&paz3*XKn z*NuLR%C7n)IoV}`orteTs7dX5OPl|ZgZ7~!!4-R|P)-A@zgK}?J{C))rnix4AyKT0 z^CV~Km+hC#!w2Zv8&RuPbSJ)4V9PGAVF#s|bg}(x&U{c6{uw{SRPq|x_oDC8tqK-M z^VG?262roxh4Ji=D)-$o*CNBENj&f8hyIJ{gqwU$-LHE#N&@M8T0TYE_fBm6!|B;) zZu2xL%TA$0I-g!mY$_SX0sVQ%PoWsvhJ28Q*q|*a19k3hOy)g1JS6#113L zC!k>_s&q_cCrbNXh-GlU73rRuOD1dxL2j(J6<(_^8WGcq9}3iu<&tMI9uA^Qq9#*e z65qJlT%-NseBmh-HTpF5VyrR9zoRL64Nf9ZXb|9rTR#E+yl!xMe<`pYD}(1KVc=w9 zCupy306lqG*u&B&(CnZm_Q&M$ZxJiOq1j3J#K|-8wPYopt0MsO7Kp*r{l;RgoI-5h z*%<8Cz9+!or-_)7$))}PHz54ljz=1N1Pl9O#kSTO)aOMv^d7Bk#5U~B2J0Pe0hR7& zWZ$aGLZiz~$j3`5NS?b4G20ts{t)Y+-Z@cp?S0&)R>bcv6@(ouuNj@N&iH5H4x31& zV7_gJ2g^?;W<-xvBA0HJ`9T^}r5Gy)?$yjljL_ir+}fa^yZXN#$x6)UJV{sO zj;WKzv@=k;=P*(40w<&~u;#5@mR)bIn7-8Dp>A8;gP4Ha;mq@DOWC0S~DQ3ocf*o}wI7$o-xMq)40eaL%7lc=^{ zlIn4~gY7DAz*Zg8#jQDoWZL(;#QfgJn3v2<{73E;?67$lX7a#?GPIt6@6?yUl~g~G zfj_zna_%}}9@ZB4~u0D{fc-la|8eDk$#m{6-R&2;f7RQngHW+V1t038%_LD$~eU8+HaAM_^)*^dOqzO zqh&sf1M=!^NhyQ2As?h6HfT#Z>Jk60MCYdm9Ntus1m6`U{5iFTy5ARu9Xb7;4S73N z)+fS$A(qKTeVBHS9(H7_5%ML^LsVM%h@Lm#=Wtj*^k%=>7K~N9JtudRJ;1sozR|g` zm-dUD(rB>h%NTrZ&v7vLGFr^}AOtm`Dh{8Qnu{N2%op2*P7xo;Sb&SVY5*&(5xg_1 z0N7zo{C7tl&=#)~yI(cHe{n@1dGr8YX}uRmmyLDsPD%vltfz_HtsTSxKEJUEye#at ztfzRUJvQ58Zmt9sTe$nmudgCZzg#77}>ChJ@ei_N=O+)KWQOTEk?%-jJMW$SMYZdi+?y zybLs0i{%&V^Hh)vuqWhh^<&h5zRYNsIruU-$i|M=e4-A=R zw5IY+CEX+CJ5Qn4r>`fKmM4Wy;n$tb6CRyqL$5#68RyOq86At4YaY@ug;=^?OcdXoVUAK}o@7iA zMg$-;*nQRJEEHQ(7Z@nRJhM^6A1P`2B{*%M=%zHpT$$=dtR~ zjN|Hi%T8~W<+&d1E_!kCA8Ti2#(yz2L>84~e*9XKlV3&0R3<#0&dIZ%$Cn+dyv%Ez zF@aTc$e;dO$jzr_!#JR=<;wD9&^F|QG{gpNL77~M&Pf;kv8prf5#CA%DevrQ)Sa(+ z)WiAy^jhYK{C&Ddf>>6}Qv~y?2a$KULOM@sMpb4W5}hAbU~!Q235 znx!GjJTQPVpTA~N+ELS}fK!vukmO)u^zSQ(&Yg=S_n0%)!T%r%N$k|)f0>VLO4lM^ zZHy#rJIjcLU%#>@uGAw_>yNV9+A;}O=XU;kjoE@??O1v~)vr=5dK_3P*cew{Gm5jp zIPuPJy8*9YzWwMSrpm;T^XZRgX;!=YrSqxz!Cd3)>{iC-%~QBV{Z9JIRhhiVea-)^ zWfF$?oP~a>T&CjmBNMuPm!=oFcjWY&o$dSDjpZDvFNR&>iU>KE!f&P>!79=uet$%;cA{ ztOV%~M$V@eD*Ehz@x!lYx=aTWaCwXH?OanW%TaJ4HBDQIQM6DZ!bA=pL&bRA6F1wPm6;V%qCMagA*- zQ&BZKFDMj?_+~{hyXvU%h8VbzuaCw%HY3|(2dD&DI}kbV7BTQ}6s4t+jNNXor=ETp zq|R%VuosW((7O>(MAaB{P_eHklgbLMRJVN+QPnh#5H(2d>q?%}dCszCT4-w{E~)qF zJgt7ET4W?&CdltBuTi^d%kaFk!)7SMR`Brk7beSNWZ%oYt!~qLUm8=#N?@$BJH70! zml}8U0U!M$oqX1h)c@UU+cHVO`F=u+`(yDWB^Z~BhSHe2Ims0FhdNtsV`kau`AheE zbLWmS8^!^7&Fo}~p>43gk7OXrLND2`WeZxbQCfy$$b5-!?)#R~d8(x1 zf~P+7#RgN>;j!OWI+)H`h?}f#209@D4!T>l96aqNI9w(sf>*yU;bI{X4r)HkaW_~K_iVDu?FknrL**wrP%om^xc*x!#kya<0zX{w}B!nfNU zer%0%c(BS8KP4ZFYX-ap8Z8D63H@yj8ei%$-J`8~3HOVE#^iH&p6D6wvfLdHVgRg+ znL`@w`$V3qtYt6icV_#QJtEiMn@sz_%MC5wc_Cf&p|qmr_4%Eqeu^n}DxOc7i?2{j z_fHQ-)ZXI@In|AmZqT)NKXRqqVIa&rcSeKl)(bv{{(oGARc;Tbz!<>c8+(hf)7;6X zs=Y)J(hn@6tl+=*JRHq>JR59#pKY*t5SB#Gr*}AoX2UojkK7JNGiV#~K^kI%wsNj- zu!)oC4C$XQWM8|&)Yv|N8%*)Q<$Kbx(Z0^~T1I2xBzhkSVo`pMIONW^r}XlFB86Y8 znSOsR(fXiHIQ;zokP+_;v8`9?uq%!#4mJ|ssOcxresM28NXhS2!^b>(N;w)t1N#VP zs;S)&Uw6VERpDWGYqkGK(Uu zTCr2sU4-DIDt_v%9Jpu`4xB_DAe&u?UW~XdIX8V8XdQe7)^9w3+Vpf#4=o(=jbAo^ zKS~8)4_3@(kLu7{%%HIE0rk}A&wr_v4kg5LeP!b3-81BZ7mY;3Wh<JD59ZMuqe~X?#`HNCYhIa*^Rv$#Glj0V@LcywYOz&y49=s9dt}}?0*~Y zUD(EW`bULpztcy*k2=Xq()(YY&c4d$1PHXaZ#txV4rLdmYnkE_i{gW1BtM;4AZ)3U z*53CU+DnFUKwlp}Wl;=mLq14DY|s{ziIM1>WuM9`_NZmcI>>@{yK3U)X)bx${12U9 zmEN87dKKcwl) z?WlL>8zg_D4WajYgYfG~d*s367&@lw+|`Ok&ngozGs|oA7jH1WfYsHm?hCV5vp6B# zlk;5#`gC}NMF-g&^SDRPTWNY{&F)XyTmEF3I#+WKs{dD%&O1HofAi^d+dR&ud{ypu z>;s)Q%80b~mf7Aib2&NNa^~YKJMRDO;qSX}tz;Mnw8fL~K--WH(hwW8wM8NmBGIX1 zmC1WK^`!977+X;OeHSHAN+63>jp+Ou+#{V&Ar`G2kzlmtR@7xj9$&EZ32XZQbFZx- zQVDek7n1MV|B14bb^~jPZ~9viT2F9@DQKB}4l#db0e<6#V!5fuz?wrR@LEh&ylCPC zadyQ^VELAhU0f6oa@Jn}=dK+BL;bPrCw+^7)6>mj^5+RMX21zVR=D7OCK=d`ljraP z$pegv>v}+83MPJiwFYy1!b4?S&BP$fOZ;+XEX7^23V3cR$D12IfGMZviCOm&(Dj#I zo*VQ5NQqrB_^ogatPFQ09o|P$qf`~izwu+y!M@|d=9;0Oq6#rEgkBFV`hSYGh^peTmQKSk|6zi>O8u~B?7d{XH-74GoQ{BE}i8{*TS3T zX6C~ns0!dYFkDa zxbTs+xdrIv@J!y^bugnM4krIZM3qwkAb)716ko)#xu?1^TsoD)Ij7Ly^ah&FH4xE$EFm z8OV!GjmW~kZsfR`t5L(R`DoFW!|V-#H}&$p%GqZR^dSSU6;Zw22=cP%7;Eg|CBm17 zB3NIZMX}r^ztdm8ZLZ*O;4wN+53f`$vdSzGIKMBedB5M&I6&3UW<^pk|FZlL^Ls~R zGW6;22>F(7l??jmdRXSK$cWr^URd!-rTpp3XzfA4{J7AQJh}mX-pSD^DU~}T?K@E(FK;b zZeyD}4KeE(S0qpBydW3;@TF#3F2y$-s>Fb;1O9g!0}ESd2XeDl;@o5d{H*L%b}-eZ z7ro*rC0}`8w0`j^tiP>_Y|EU46>gh{u0ON^O{kYgh%*KvVTFu{A$N|hG1e1jGk%>a z6ZFn^=dTFREK9JAwpaY}RJi`zE4z7bC3hA7eqg4O6f2L&I?;%zgp*fnMKFmJ72!-Jny zrZtrel3G!A(3 zPLbM!pCjG12iZsF)ROls&Y*ck1_ZxGnr|!CN%jV_%LG)yfc?xdmr8$+bKoW7_w9d1 zx)IxBVn@Vu`jWhwO%J}%*YTT<__8_gn;0H{RkI=2VXYStT&34ymMBb-urV%bub^s0eLoi?<|J4As?h6HfRgV zK%KMv*9)m6HzL+tMQpM$7icMXQQ>(r=ve%_=S1%#K`hhw+r*2w+;M`g*Z>3Dir&$!Z%-0gD1J@n1UiQ`NeW<@k1Qv3HOm%O=;MJz_Zk$ z{*UCH&L+x!?Mked{R%5K+lSANSVS&f7D(=TZis7s^TGqarlG06PT1$F=Ty>bBV4>^ zCO%d;7yWv!Ly!C0k{a)>h%FoAiv=pDq2FtU&`a{>=T~%;w9s_l_^9P(Cqc2YO(E+Hxg&)ZdH$l?vX1}d>7jc9j_r1B z?lHd}y7ner;?T9%uAO7HWy2P`LfM;wGsWZR+B+DaT{esZTC%y$F@v@tAEY5RXzQFr z##N%T>!QC%_B&6I7cxye;in$Rxx1MXXKtf=q@C+z>AgsZrdkp z^DnsClIpPQ??#8-yO=}6do57){u}PyG2LO;sSezI+Z3w*{Av8uLY_lWk}1hGD#nYP z&w|@UOz@Z*CH~m^2ydNZS5MEc zwK!bGJYN3Y2J)|79%nj?1JVxrMLgXS_9 z+-grO<~Mm8eT_EfIMIDC#FF=6mBV=UX%=t1B06~g57X=(cf@?!u0Mg?e`P*aT@{73 zGZD$_<1NWMKc3Th8Zy=#%dhk#bAKme<4hjlnV)2EBw+9At@3Myc+DV5Os7spB;;rk4JVWyYcF!=#b@Z;f0e9p;wJku{9lm8Pk-49j6bAQC+ z#TQm$v$OW&3-%M3pUHLdpW_AcrvD;zxnBUW{YEM2?I?W~Vei@|qlIzhg8u#GH6GjL zOux-l<=cdmGb1`yiE5Qc-fLSi+NtJ2hjedHHlolxz(~iezWoeOc~5r!7POG3`XX&I zi~$^B*FGC}H73~V1WEVsryiSG0skKA{*P0z%G7SPa+Y0%|6@9)3yU;c0mG^fmk}P5cmmw zb!yS8LUOgE5A%?yiPi@{hvROQKNY`m1;+TRhZp`-$K@rykv}Ot=OF*Y5YY|kB)9CA zLsf-isF!>r$~;sJ%`87gE`Ko^4RKe&{_6Rl*uFkA-F`JC6TFqGRlSF{zhsh&pB*Lb zIdh2K3k!+#i5;ZRURe@Z?nyQkwUXNRU!y;79HrJej1_fWctOm%v7g)&tx1NYUnd-I zh)9$>nJixAL?-M{rz&T^Ah-=T^px|WP>%z?luM*HdOctQg?$-^RZnXt<77Js(V7Rs zALqh|F`Y{6o$mYSYnH9(X*_>gsbJ`4dClDK6~_D@xSi9tHT>FJxA+-uBj?kAC8KPe zw2#uY_k!UkW98Up#;aU)Zl0Fp9KLoMZ?Q?+e`^`uD?Z2Sff{#Zh;$!mrN%BgPtW*T z7yo*LS?>2;CY(xn(evs2u$Gcx9MIQM>#d8SZO8{{hz;6;GR_j6yYHm)n%9W=&&zz# zb!ytwFTH6b(#8YPO1j0}6@pI2BaPKom z@UGuAlo{6+S7ebm6)M1M_gYcA%BJJR&bAKRgD#Z$49VV+$PyI$ex??dE(7;slJRij z7#?+5-eKqbgA}vz9m#Eeg^y1n{B1_# z@XUb{{Kwi1e41l6cDPFs`|xTZ(w4D|xH@(dd;8r2_SSvIMf<7>QbhHM@&ZFm~v*vHjm$$o@Cr9Vlmem#( z<$Y3Q*%>WXfqZau@#7fA`WoDrctFpm-qmC2`Si(zsA9A5dv=_Ovsp+|I9hI<}XXg zFCO*i<8=q9$)Wqe-{Omu23HlYHA8{H1q__|`H{jr_h6s4)>EfnDT9|QFH+C^nOLs> zHR|Ww%^=tMD0WuVWKl~c!nxP?L^c;uWBGTdgLV<`S%-o)-(~gzfT8t zx$CJt)^SwPPBUEL_$-huBcL{|yiBULU!8_mo~8aBn*dG(EezWM-b+-%{12|@-6&Ejf?pd?*ku!bH z;kJwgy_O*l>KF6frdlqPJa?FJQF?YId{3pxFb*ht-Os8R+J=0PhS;F3lM)%I6P+Nj z6<&u-Q&@p|KUV-Iv(Hj_ul4EmDL?WF-6KIPQ&|>Zv%4j-P3ICacbugF+mk$^_O_`! zL+jjFCy46XG!;`YG{nE{-Hu)BN}^awy7=Te zT|9b`EdG7gDe|A^R_sq?4>~x1I$CR~O$O~_U@4o-@ToQO$V2`y>`mWUs_5bl(kS{Y z^+#|N>pIhqMX1lf-A4%sP0espbiy3Z>vYBCOuwMbTpjH8+*+!m;TMK&w8Jk%uEzq+ z!VGNn?TH^N95CcMA$e!^9PCNuMiF)WEpnVlM@Jo({0?|?47;Q;9o^unL|@nVR`ntm z*D}GKl;!r}*-7V>uEp57+QhQlr&PQV-|i}$?W`CRc?I$ zoWdQYb-alymH+E|zg_)Re5dh=4fpp)I&b#eJw?aVeYa)t-TLWv^=lGX$hI+b?QMIj zVm^!m#_+*Dn__4i@jbV~zotX%QLPy|=3)pz)`%b)UFlTNvvnc^^4 zsUClF*d4!W^b_-sssa&R*YQCEQ-_PVB6eq}MUQx1K@}Fy$CXZo<4(|%B3}gKGhthAq&(`I1;_6D9) zv!N3F+ntjSE8)Mf_pHl~Tx;VQ*F_4Vg>7`4*N$CVK8yqMn73G$LEDfI(hwW81!cxc zbiV7>WpA9=Ah0lu#J@Rb5_ew*U^Q2i={yzs%G1{n#FE21iN7u1g55J+DoU2TD=r~4 znw~eHPBQx!>1#t18=!VxR{$TDmr?JV`+(u^57dlL z^KrA*Q((jRC{T1O7(B4Z$9l;e%6&l{ST$Ql?7KD`Gr|HuiPco`ua6uoN^Al`9Sbm( z^y?T1*?}u>*$NWle)krM z_g_0rsS`cZ9Tq3zhobj@@5yt3=LaEGcIObK<64Wb$0ehe*CXgP#D?6sV>vQ=Q7YY2 zOlEUApRTcyD3^*F%g=J>FYK?eRo5&KsyK5*+wWBgRw_s+8FqjU1>d6qfwtg(wqyTbQ6 zGmZY+jj@U4!#JR&7bPrnXdCiD8e)UC4ohSPB|39u-PpG@IHH|(_F(AiHL5ge8)c^x zNylPI;tV>b5X+LNV&KhCMXkF$Md8y&BmO(<`B2~Rh?ShWef=bfn%DN3757vC9o z44kRHMRkBB$lg*XOnlOgw2#<@+{~)#>n*P=(FUDsNYuM1^vYgu?7I66^hxwYN~RxfODrefdEjK_{GwF|R z+bXwH@7B=?=wQ?}^m3jnX5!V4JbKMQn2tWk=h#Trkmunh#qXM? z6bRi7s(zT57cIkbYHgB!siKlff(kcQZxtxSo`dx=gR-AvvY{clXB(`?M%?isQ_qY*h-7)H+tty`sQ z8HlBzI|#FO*F~a2)rs{di$r_2jXaN}7ypzsrL%zfw(KI}%bSP!NPM%=Vhw%G`Y23C zG6Ow`6?^%Jyl(-j{iPAfxS>xn0u#}lzQ<7O!jTC31*<+G?__CdC)_H8{4Mmbx~uJM+P!#e&oKq|eI;`I7TUp63K1Rb@3Zmv1nBcirE{X-_aeJ$;C2diUsv z+WUG!hE?2>Cv^P^%4Ra2Zfs{b+fL**_-xU)F%0AxGyZo^JV$b`t#6M8_r_D{^D^^P zwdu7?)FBSX@aTEV)aER^(?PT7+?}G>Q8J7J#!$nX!-2LTAEY5RXbZ}GmFWDW8N^#4 z|CQMyd7r|v8wkR4S;ZO^6HdqUl41va&H-ZSeL5d~IcFO)XORb=+&x2JpgZzjTlEWF z_GcSIW~bkEwpXkPnkexNwxC@IZPppPWA2+Zsrcodn77$VoN?hS=3J3O?RVdSSKZXc z<)%->JFFtfP_;OTfF_Tpf*qDkIFf&L;cQ;!-9(3h`rrv#M8X<=y*Z12YhhMp(y0bNq(=jJmW}ZyprBjit z?xRtxXX$I1l#ZKVH7-31`qo;t$St>2pz9?0jgC!T#$E$yHrED&`8f-Rn8cNl&$T7C zrdu6KkgiYpm=WWmavNjYZdIvn*9qPuYd4InsIBw)uL=Fb){Q`VBoCXdCiD8e)UCpv)tQPVIl;ytao!%-JWF zprRaYBy&wSb2d|YPkf>2O*&5@mJ2fuqG354<{H@yz8=?75GuK+4)qO>tg(7*1(GLd z`>c$NJ-!mnm-uGSi%ayip6*gYcE)I+KTkDMZ+&9HgOtgW)V})jN621; z%GLst)`d%I>yS9%e1@OE$U|d9kCbiEVKY#;h5pXgW-jIFlYf*gIGx0+;BL}?rFM_i zS~Bjx9w}Erz)@_9=5_f>^E4yWlJ19AvrNqFm1X%n<4n5*S!G-3U(4!F(_tKtUN7N+ zwjm#+AvS1hsYFIuqVu)qJr=IMPhcu~K>i?RfjBcyV#VYE8dtaGW_c(Bv6#n)f#1nT(a$A*ZArJBaa5)VephXvPg$=Us6k%ey|d|U2mYM zyR)de`3j)2jv{iG6=S;cX<+I*6R4S?hpk0l&Aux$ z>Dnaro=_oJsb4l%XTII`>^1uB`gOd55S1yzyy0@3(pR^qaa+Ds(EB=tfp_V(Xv>Tk zbLFuacFDEI!ht8UJjmZ<9BV#|1Jdmh9%vi#K^kI%woXZ8Cf=XMos;s1=RNJC@Nq^S zP+TjboELVHU3IrNpgHisl$m-5z=5%j2z}ufq}S33-AsM)!d2+Ip0q<3sY5 zQw0W=j3q68@IgeKGx(U(je+n_)Vo8cfmo=HKb$oh^vOO(M^p0nY~FWFW7-euqRKH! zeZvnBzH|&0RWb^T-XQ|tW#T~^dmDJ=7)#|VPQ|BAS_fw9{Gt5$SJ72#JMsdldhwkV_;*{HpN`Sf20`=Ci3y-pA{sTKu2E*0n}me;&| zx8C@3$0oa#R>AyHyM~x9Y9sfN#uubpJ@JI$jW@aL z>VLIYohjf{tEzIHy`*`1@tySC;E&N`i?6@&w5;gOvJ0q{&ZpazT1$p;Kwm!@8dD5y zLq14DY|s{zX_DwX+n>(6zV|oNI57@H-O|V2+EA1IAj=^W~6tUThT(J*N`*7|$ zXZ&z-B`LmX>@dfbjUUd~jc0eSCe2euAg{BENOm z78e)HH1rnoVhTkO+t1*Ull5_{6E+Uf4Wp@7tOe-t=QViO)?4^mWGa5@F(3PLktD^h zCM@N``U37{>RR7em{S57H1Dv;}46NOZbe<%vEb+KB8+0ewk zak}pKqO%gA48*d{tsQq<5RGcbL=n>x{tAVXa}w~g;Spt@Laf{{hz{=hj6?)p!l7?= z6am^V>g{U8P*E>&R%A;K&6!Uv8he~nyK@HZXs#zU6$i-YlKtVoJtpXYuz~c7-9c%3 zT2d#pXCMRTQptYzUaFTy6{&th_K--WH(hwW8wOJygE7AGa zvzSLZ$g{4>P9bzlr&0cct4T#26BgtxcbA@BfmnWQ@S(8$6YPT(LcUnnO5pwf^u1oA z?z1ih{371$R;EV%@gN_Zeudb-X{KY^HQkmf?K8pG&(i>m@EPJmr+mSOS=IRX1Cc=V z^AkX})`DjT2Qd7JCKwE?rH1~=0LG$+m`5H88qiKqS^O2-su=`k*Jt6+O$>0`52x{m z$JN2@B^Q8CJqJ8<+K27C=z)zpdJiaCXoxqw7!9mU48R7bD|mxrJ2;}g4ETE2qc6I3 z4Ty2t;OPr{a87q7u%A{;CQVDG@=F2x@2odS>xP9aO3stHxO$^-N4_;3)AO?wj906i z7i`SEY=5S_&9s4-Z2M?a4qp)Yf;mQIWK2Uoq+2PAr1!*SQj8gHH`^F|Ep@K(YDw)4 zJa?~%5U5kaC(d$gR-}Tbk+m+i^GK>SpC1d+9 z4zvyVAPuoWTTlk-q+aLqf=gdAeLpz>MlFYY&q|~=DG$(j`n5wEQ;6lb<#Et7c#*y4 z)(*b+0Y^b`m0(1ka#tTB`w!2e!c8j3ecd?NBk_&Up_0BfmJVzu8^5??DZv(0`{M?x zqR5ca8W3R~RdHDA&q2!gQW~|YA|6d|j3#GSdtxQGo|Az*cl7hK^Q76P7}7(>3f+H! zO;t(udb{f~P}BT9*rcdO=&>3B<p_h5>mzcFWVi*(<6!C0**a%Y)f2dr=V)*JuJxMm~U=U%Hm{w=e5@5q=M3DT|Rgh^wHEqG`A z0cm5{J5A=Ewe-EvvpKkMb0b!^7q9GN9^I(2?Egi%h}{)AB24C8He6q=gB;? z@hE+*&;8ksY^+lyr&sJoSKQFY4%~@Fr(PUOX)#jKo~i|y<}qdLT7eb$G~ph)(MEt= zmT5tr7)>J+8K=t-Ja?9n-EWk;O{W&kOo|%WG7RY%q4U@UYoY8o4*f8JBMLjFi^iW=Tx_yPFtQ|J1lPqRskcYl?YHtzG}c)ahXr;~l2O zy;>s8Q!e$>3dSe(rA~3*I%~_P=~;FueVc0F-;&~+ONMbkdRLE5F|-Z&APuoWTTmtq z)#5$~zRU|)l*epp4n?P!sv=Kw53$GVw9xasaLZ}>>R9PhB0aGFw_~*#-||*?aM0XrI9bX0WStAIajzXfX4&J$7Q5C8$UB#T~CyfMaPK zZi1~8FL^rz!p}bh2if7+)|3?>YvEUrrdkZXYP}+?-L3yWs;)d9s_*?%DTM4>vLyS? z%-nnKBq}W`$|sRlrI1o-Pn(FcW)~`AY-1g>+-1@ti7ZL`Mrq$yDXD&Q?tFbdKj)9; z_2@b0{l3q0&pFRo?m5(wE;8f`c`a(Qcsw>|ybv5Zcm!)q&!p70wNr@~=ZPKu zCWA>XgJ}0oPcdG;Mr>n0i&_~ykxDvHid~^TQuA+SiBA@tBolm)qKt2|u>rw8YK&zr zwe0yGs(tA^GWul?%J?o1ei8b_*xU6;$XW(^(LsgQ>%?$KMUhj~CPj9LgLOKO3&fXp(1IYLN3|eDqSDS3%)R+n1P{po|{|fw~sB7tk)=m z-z~RMZS0!ikV+NW`sFkt3_pk740(fm*HR zsZX?Pmb{R>fOt)hPAT)?jNy0Le57_*BU-3jlH0&Ep;J61MP2WyBa z0?I=+m}jsBd-hgmY)En*gvmf8WoCGmwKg!BT0LtzcGX&gU3sDfYgODqHWzU4OI$_v zrM6i1Goxg3t@&(Bl@L=7x`Ws}+emg(<~4BQR)?YNp9<=7bY zy6VuT!>PS~b*;-PX$-N(aDo|=$+t@NKJ0Y9j$gb}ua@^iH|F1ZdiA}WMF&s6=F<)h zWtbn|CF=lxi&L&zgw9@7f8yR{#=^ew^qhm=y8en`9#H4^{ZHvu zw;LI|bRjX>!GZ|yxg#RZ%qH~Szd$_CTcAP2Ed1)n6hyK?MYJ~O2EHfpiAcf2nYBGU zU9_k{B*KhVBY}Yr@DAOJh@Zcws3Uwpl;NR_F87ilGCSlD!C{1TTu%d;Ju#8V94tn! ztlH0N_q%Rb9GK2}{L2-=-&P=z{>LK?QFs68(-=7 zI;cjo*qtgB*cD0k4K7)3mOLrMan%Bj^L^tFjJLTX`_nhaGVE3P()r%Wqm9kadA68G zJyx&L`?J=lvoe!6^XPx)n4PZRbA}AGYXV)Q_4LYE=~~;@_k|oS*Nt{<)!EKd6s7Aj zLq|JGhj~C-Ctok*Ky0W7WylR;!941dv}>N4W%GoY9~hcbc!=Bu2V_zVA#gQ#Pd|_N znRM?8G8pCtE9mqn9S+9}k+Lwxe>r3u(Bt$!#vRd;IELM3YI(m=4MxPW9NDpF+uM@dVj z4NyIqNUAhVr#SEDgPyo6WSf2+&{oO-J2t6fv8`&Z zW)xGOR*WHyQj*9ykL|IkU*}VEn~N!@qfG>3`ZdF!mUX9GNReFN~Gsj zi^m*@Wu5ekLB4UDSzf6k(a)#)bRC%gU9)(NLxmvSI^X$nbbuLCKbY^6JBQC*H;NhK z;gAk(I-Gw0&aiL)>xe-4%9?Jo=eK!Wv`?$%lumEar)~3uOQ!xemYMUO&&hqNQ}e`E z8YB7kNaNl;oh97F=wJ077lQ?|LDCp0;r`{)VIHvTz-uL3hz<3i47oupn5R}!refth zq5s%dj9=RN)Ni+Z;7+-}F>K!D z3UK#p71$O(igXBH4pKB`V6BhmVhqcA!WH0P=Zibo%(nZ)#q(J}(N&%t*bqYA(`qKO zelanZ!AoGreggZ|V@)=dts%01@56qOnb@9P@jy1G5d4jJKqf7pC?DlS!NcQY!86^X}!iZ~g@QQBVu3c~6_D)N-C^ll4Tp-*6E&nrSF{ zh@Naubk-dgV6lyG<6G4o5bDo3gk+80k_K%$oa#5-b*ORCpzY!SPFgV|pH(FBk2{K0 z@{8^)@DQGN-}P_Xi~0#TXXZ?*S@>N#mbtcIJRSGWSDVWn_4JAJ>bsfFXV?4EG0=q% zQ>=!0K&)i-xm<`1^`H#7L9BR59!gSXgN}#rNc2G@*vu1K>|hAoH2>m>+=cY>BTh`hzD{>&+ApO&R~oNPPQ zB>NXU^gK-kRVIpkWKFQrnfX-j&^j#c%x>)R--nn=ekWyj;u!^$62*y=BjH0L#gszquHC3-NYUzs>B_|l0DqJIt=SxwW4NkE>T>rx_C!^1L+*g zCAu7<3A0KuQut^PUyrO~UK%%zv6RWA-^Zu{t>WK~;<^y2+^U zZx%kT8aX~4H6G!$*Sw|c&On5cdF70&=HHDqYy1YVBG+as)>F09|Gopu_V78E4pj4Q zo|Cq{V-ICiLpur^r<5#TM>s`97YRnalg>GaT3bqoc|e`~8jBK$4fUW5xj`(L2bLLq z*@`u5LzD19p{7`1)j-BB(YtiOoItg4neB2ikJM1}#>;Ocdm9 zBZ>0a;O6HEVAV?l?B4wAIQlS%=n4G*Y;XPrOPihm6IUKO=D0nn>EQ#6w>$#bL-nFV z=3R#W9J6G3)?mq&2OTx4C(R+3~L2`c++9`73ir;Y97o{&el?4ErcW>G?>}t7Euo1AEH13$h5C=BKAmmT zS~|>QSO;Q5Jt#wN5NoC+kI4hQnhiq?k>`t1cu967$W&U2D(@VR`nL7a&!f0V+V(;& zMvqIur}hg%%l-bs#4>xqaV2ZIeTHSiba<*J(y(DKDs$Znokwh!-0<{6Q1eZNuJ4MX zZ{(7rGSvL349a^WCbm>hrHq!`!xV*VF{U>`Jl*CcwKc#F+cJJXRU6k&*=atZ!k4OJ zkCzZsQQsahK5H?izpjK@wJjG5%h-T@c=rZ-nlnsx=F2?815RQbq6ugG4gOK)PpJXDxYIvqgxYa zD6OX(u1jMiXG_JB)dMf<7a3(c_ohq7GB$W?=`atN*Fayf1Y$!yC_`=#3+8c^lxchj z6ZLXGGW>!zQ^KNb>X6nM(tpfZ6zUqSlb*8;xy)447w>)7f`+e+T|YRHFu&|6{Z>pSZx!6gp)Pe>12KkyAZ>vA48UgjY-uwsGuwtO}_sMl<2(7oxH zqoK#-GR{vb`amD{#odm5h4~uGYE{8{=X)YO56#Jb@(Oyc5yxwnhY}`jqi7p=uG_~s zbw>w{a!KVsm$SCYea>=;XgMq7H9QdJv1HProraTYfqvcez3KG0)|(Y&o!sMTJ^jl8 z$MvHdixy02;C-o~G=_D(o<~|7d?~C|JWx#6(+|s}Bu+n41$MpEXY-3`2mvA2aM54f;#zZckz4{BICHJ$dw z>a>USJ_pEUfAlH#lr6syTg7D5b9A#%GbMdQf4XOk33*s@6nGzY0QZduOFm8H`J5m(1grsrpGtxFtsHo|Z9H*~7ftLb z7=!Jun1St|Fpm6WJPB-H&`Ns5odZS(*8`tDf03SL9frHpH3{$64D>UTMY>JauBZr!Q6WoFSULLDde6brP7 z9}H)!k^QNCTZX+?mUO8?Me4t)TV%UA*Ds`rvQ z4R2}O%YP^JY2q&1l9FQzc7Yz*&R_hcXGq!~YbhP(0rTz&wJm|zP!GzG8^nTnV43du znJy`|KN+qu188f6GpZ_nitcI(r`t^5bJDR4*&klBXc%B!~3h|44GK@+9(tdp;Ssevq8% zEemvJy##_LEo>-t5)o>vK*paj!76{+VBvcP!Hn}0LCz8b$~(vq+q^dkJgyib()Y9( zUJcs|j$UVh#;V(()@>>IjopllI>)2u98#X$F^h_A zU_C36>~(j3VR2zimQzl4HM1!q6iGRdI->10+@fE@z9_vXpl{q+D<@u>Rm9b5o|b-e z5xa-aTX0hC-?;aW@9460g@?79p35mizZ`iXPshFM8l6f48Z7vuE+T@dZZSOgf0Os= zbBB3A8N0zOf!I(F%8(nx`gH*j+?13lPMKJrb|e(}^{ER4Y;z|qzpX~k^~cfej^xy7 zdi@k~`Tm9jzG~^Bir!}!UOj2dah_90#7L~EQRpgFAv$i+Q9}1*GPozP%_=?VTAR(b z7vztUx8Rd8O3i&bQ*8J!ib^?wV|-IDahBjO^>@X2Dz{PwJFAvU{ndO#ac%@t9@F=L zI4c)w-k;TC<=RM~v^a|Dq4r{f4F;ITvm(saKb}h6be$TwxlgPkxev}P#}(W$STA1F z5h$)cgi>q%@t{)tZ)3NMOvJjQbi^0xI*E9Se@5MQ7IpV=5jCZ?mU?-29HrG&P2T%` z24&XPve*@maT58)I-co{)|`x>>uJFCsuEqsd|Y$yL+6sm$1G*8ovGLRxKLm`ZZod% zF@1zj6XPa0wCsCN#~H`wj;Rdr9A#A=Z%|V&PBT^Tn=RVwa`fMNdTG6Y6Te-TM;WLl zL%-~ZEv$mSGHYWhgfolkcY9|!k2fr&$1)FC!Q5dUQ02wam{e9`Ofru;-~ zgKZG85il`os%J!73Xq0)BlLIQ1>9_s8?x^CaeSky3iD07 zj?%8tS{P(BH8YNP{my^$y<@)fIjsvNd$yING18?H>DlQEuUVB;-dS$>7Kd6 zpQU3N$Yu8E7f9Ns5}cQo!(U!%FA#Q(oJU$<&c;_eZ#(z0}6{co*uzp08tg!O-H!sDGiBIUz8u&(KE%Y#>kGnivB9>PC}X)*-e|w-RlLqb z7Cr7>i|UlFLV7J0BM~bmqwj^U==V|erdILK^)`tAc5Yq9>$LLRjUkR-{L+L$-Hptf zYPbK5W1($FVQ(A?&7^U!U7)^o^GPFX>zSN7`RKf&oQvZ`MQ@0I>nZa^y1A*#3&*UL zFX$NQ!PdES+xx6@a!Hwrf|FdDN zI>YjYQ^>_XXw0MO=&J^Q;xNk^dnB<aCQcUTMCm}5dY zs0qnyNs-hZ$tHlkUy86JtzPVd&nwB7#;1vg3Ji96jt6^z%Xd)o%oN+DoI|Z1x`&mY zj9?d+A4i?by9`VAbI8ISW!S>~jTn1FJ|Jv5k^GRW___!mJVnhEZ7b6d*hRNVYWL}%f)4X1b?*f8zZlNWIg>^wb{|iN;*CrJ*Z}*q2FTe zU93@)8yanN?|cgH`i}qhn59_?IGyV1HL>@lXEGn^k;X{tRqRTJ>JHkqHsm)iho53dLYUuWLHIwiJoohbb- z_s+$M)b`@>n8n95%6&(@IKpa>D%@qj{=G6+9NNE09H8PZUU)bZi&X8PmfUa)gxvt_r>#D`?e+&WchYMi) z^b_La^GNaj4e6BEt19Y6pDVlWMTFS6xn6wmVJ;~=rC8K+e<5()agLf1q9b16vVihG znm{yE9VS*BPZk}!au1gimg3@TfY{Il=z98ohEeH#r6yc)Lp5I>3ou)B-q6KTl*bHD zok+N0&()w!hf`~aGH=2Dee}DYtmR~8*>7ikx8KeAxNE;DUYN>q$)Bw`tQ)*jz_C*< z=Uq%tr~A{j-MZEA|LS!%mTbOI?tE6!S77k-bRGO#4-#K7%md1DTNji-Y^Voi$PHq_ zJh05x>{Qmp(1}EbzMj~8gAb*sGnRaa=+bfT^7;Ps`YGhH@_>){^FkvspU(4ZX2y+1jlu**tG;6%Iwu~N5^OrBLvmKELs<5ZTC4!Q}X`;aMi!=r)h7e;~p zyIp|Xb`%^8n@heqJ{Ry0FQq!~=Aqf^M}rG}8RV6JxPbLBk$iu92f+?p2?7m9k%#@f zv280Hz^(nAhTUO1$;L}}(KWNG$tfG!L8CzdMu z2mjmWZ;Z66k!#{{NilWnyyED5QKy+!tw#=9i=ND^85OwRXz|lyyo-^S{`Kk9lJwH! z2AVaaMjxYnD(5zV_UU@w$%;1rDfMx?vYmZorERZ4{0!@19?;e+Bsvfq>OmQDgIK2| zd5B4xHA?~#c(o-z8Cz#of%;j`(I0Cs;=POQ=(ZO*DP2E>T&8U61!Jx3@f$ndiEc0x zL`x<2)4=kE(}^oxqOy1vQH`!7G*U)mI+x=a@=Gh|ImTdi0lFx8AJWaV0%^NGy|0**2E0hx{R)QwXZ z)+Aeqg;ISe_dti?81rl*o?{I*z4{KO`09dSh1a5aJPaALr+^i_S&L<~?lkh+>mU4G zf;?>l_ASlg;OAumUj8wcxt7b!zLb4%zGJk7f6&^ARrK-k@!{u(zh^II*!verW2AT0 zA=cNtB=jRLFoLf*eVjgk{^VjpdEOtzNBy`GxVxHrMXyyTUkeSN#e zET$n#`tARxQEjEeJRr_S4f7I+4fUW5xj`(L2bQ@-f^}z{3rj}s2dFf*1rq|2Sc7tL z^z+Osmwp2UxpipGq!7w{I$qT?S}CQOYnQ*fiaudL9^TtwmfL=4)- z^U*PSthS@Wjcl$PAgUstqesqhuo+7iVz>T`rlQAKV2UOHC?3CyhupnPrp|bb-dt`0 zJ_fgu2kV;21AmqSU%hB>DL{rgG2=ZDfpOIS$#=;YI-g1R$-&@*g9rGYdT+HD9d9Wcz8MECo8SIms;W1Cs9-Gy*h`Ky=7Rk5mF#IPf6%6Tu3wt*mR=;#20MGS*pn zytVN2!eYcwNibr3+Pk0QxLKoww#$*(56#Xy6;?zKoZ$ufe=f?bi02Ka{I`DkQ0!RB z->y@$gDss$%JzRo_oo{#&o7ywVp4bY8^>kI+EH{|tGfGq=`asyYpX?zOCUDXgEHg> zv0$FnJ9KKgKdJIc0*evZ^)i^6j60c9(u;iBAxF2F2X093fr4Ci{}T^nW{(3q->eYL z+GNap_pRIomN%T{S!$pMd%B&L%okH> zOY+I6SJN@PtBwlOi=tL7k(^=w{y6Y?_KQ4F8cPxL{HTBvm(WQ|RjFjFA5{G`FYwax z0(nO6A<$oV4vgz_!_V>wyg{2cIN$|6H7*KK z#%P?{4LU~JX5eAgoq3D-k7O^w&QP23tH~jbJ6M zP9=Ar-a^)&W71<8hhjec4HV>}Q&&Kx#>)X4pU24OYB!cnc;tw-x1)rM&i(k6Fwb-( zH?5SMcRiuj#WMK|UEd?(%g_~SCZwY312oVgfV>-akW8IjKvZ>WQ>yZ=;KPX$6wm2G z+Q)_h%dw5<+irU@m>|)ib&*8u%E{EE@=$Q;a{NPTHcAL0FL? zb@R9dD_c$(1Xyk(}9HxcWW;f^OxY+}|z8 zn|4#;ADJX{A9)mA?z#l6)%3!s!&snI&PYThhFFvT^I^NSKVzzD^w!ry9ire>ei!5N_QCqt#{sA445u*b#ipqn}aYG}5Q7YqIRCUr5i0o#wT`{D^z2 zc}tH<&9;PfMm-z0@=Q1WH$Jt@-%ZV(IRNtBeCylOk|57E!yD5{~OlU^;gbe~`)lWN#-}?LTsSin0w^-`q-FrtG`L3hK5*OC`2(DCwa4 zQ#Rug@$Gvr>S1-5oHI3o+GzNIw6OaDWH#kf<~yfT?{-b3UU`}W+0y&u*yWnk&&Vb+ zd~-8eQdCL4*DIph$X4{=kw$X8Obcjl2?NPN1K{4h1d=&bmkJ5zQ`@uvr6xRysX_S=z>Z?n#rlP+pGCxrG1?5`3m zU~+N9SmuXOqGQ6b1iBw~FqP#lA8T**G4XcYn{PXePWj~tNAvN2+ur@VEX(#XbZe5( z0lMwIT_K?7kuKdoZP_9+tzSGOIqP*Ymu`C#3(u7f^MF2lCDDP{P!GzG8^ltT=u2yP^VL@TLlk$#TGuB4u>)u6#1J zaVwTqwjbNKRC11|WKzG=24HKJZNQ$?FtNm1O;8=00^X&3#FXc7z(RC6&|K_EvfOvD zepWpor;P0a2J<|zmYt6l1WeHr+aKSAK;8U^g9g&pWTnIbe$vBpyka3oJ_NK{Bz;sKI#AE3P= z+q8;XFO~}oO0Mx)n#;^KAJMLFD@f->SZd+nGvZR9&TtZMnC$p?i7{Jh^-T zhq_l5N#kC~C8{PA`~7(dtM1EKdOme}MI_e~-B=iX-n{QzoPB zb=kMh+prHW%L7@x3@qzG3+3^l4`Z}%W`Fa~2eSQlP2>;FpeiDKu}TLwY-ODbCb{Sx z^gWFr#DOxz+AV9)jpwz9MNh{Qe(lDz4ccz3HS1O@6HG7W*8NdGP?o)Ba$U#1t4uYu z_xN7q+K91?e`j-Dnc82vZ31mwX7ySRt!{!XPFQ)ViE?^Aue4Y1-+82)`7Kq z>o(pj_4l~Wl?9l$uQe9vn2#2z<KiG(U9G^?@)yyS*+YQ0Tq|+nn zsa|si@fB+l8b7)Oo?T49wn=QmN6ToRM%){XB)WOxyQW@5s!ztFKZ(Q0wDpsT^zSjK z<;iF?>75%|8{~zr@^41+y+@(yQ~Xe!5A*Tkrye6=56+|1=8gCiyJm#fdk@#JDZ-nh zrxTCn$fMwhU zvRfsBsDX;QA8VGIWv&Qu9QeGIud3P4@Lw@r-+MoX1QP0_|6J~cMGut_QoOE4gMQlgu zFc0XDGfV3^5F6@28FGVIFpuPvzM7Pv1m5^#{fzfxZz5SYs*w6)Hq5|ZN9Zw#k%M&3 z0di?*o`b3-9AN~g$Mf-N_JR$P@8V#2!)dX+G3yp5&e_$W7U}N1gFN0Gz>aD8v^>!qr`pLa%$C)TV%}avmmLkMdGg2WZ*qM!QZ5weocfWDDndZw7$5$~7L5gH8>kv9} z9;y6UhW%y*Y253~_+w_o6&P-!mR_R54w z=Nt@H8AI&$)OJg9 zMu+Zx%E~qY%vTRZ*Y_FGbAwg;q&|gQ*e)JavY!G`O%>yp2h#;Fc14ZwDYn^=l-!_) zKCJ}k%w;pEjS}13@vx-d4PMYOFz4L|QdvP0n;hrHK657%>wBF{J-LjqtHT~()rIv~ zx%z$5ROJAs*>eh0h};5Z4Hc7avu&|u$L6pPZZ0SLCHK5X64sQympbLuRZPuxJ%e2` zC$WWN!`PjxQpv(~lgXc-7O}5Qna;kX8;R{|HOKZ%Cn>YLLs;;_FIeV5O;Yvt6~k{O ziI}EEG-g=v9Lt?NpLD0#;OqG>sLG0ah)m}N#G~UqOMGbta@(nq_NiO3;}oa1DXdD? zjC%Ro6=go2xz2*h*&_R#WJFhI#fb4~KU(aR)$^EcdzBoYSPwQ_uc+DFv40=63rk9MiuT(>^`2y_N1yul(7`ttO`Ota415<}SCTZSQQnbeIRs zTPnMY3$dXdlp!~WWiQEtNXpz(6UAHDkc=3Gy~1AA&m`-T@_=i&8v=E6?^E=eIONjf z^%}F%GXvc%)%f1^`Jz95Q)qudJ(#$x`@pVi`nZPPEzDhFn^+}1y6u&H=1OSv z{Sg+7dW)(QXSRV0uBBo z;Cplq@CuiwdN(`=XB?HW2Ae=p$Bz!f4W2GUyWB*us%a%yZLN&PnaoGa=35avgZ-HL z>}ckPcoW2`CCw$=ehmG5AqLvTi@VANhTwzCt7G3v*S{I%l6+_@f9e%6vvbm5s`Tll z|8A0;=MZTsz1vI4{gIhQRg3xG=CQm_ZtILRt21~!*Z=CN?nD7c;j~tb=l|~Zs>Y;i zZGj=vtn+TG)wj!KJB$0J%F5oNuQ-<;Kzy&PYbEnoK(_LRfjY>+dvlQP7=?)mqGevJ|z#wwS$cf0>tVg6ARwBo%(h+hgxjZ zNbM6SfNhR4=rhOrRHbl&xYGYF8PDw}%|-+4YHB+5kIyvmm6j94O!-d3%kDCm)AGqw zc)&U;iLfOL*$AM+um9$(CX#|2{`kgHnCbH9>Yd z^Z`sqe(*W(YV~SldZqjD+2f^qK|d>4ltepfyD(R^2(AG92*<3oz1Lkf2HL(O@ zLp>-%ZV(IR8C9oOv!_g7G%#zLh_ASvx_Z-?oVp`}jIfBI>({f5((?u(7t6=l)U++G zyCStgva9&k&UDc2emq6Wq9qVVx>C zscE|(q?W8D6*NziKiNmg<^_!)&fzTD#D0k>x85SQ#U>EhJ2SBfstQ;rSPjz5bwSaQ zE9C2mPXQ}*9O(5k70rIxX&6-Qh#d>P1d_uCz_ScbL}#frdd?}A2-Z$w&YXLg`FzH5 zM*9XXIAt;TKEzSkL*MyHUO!`A>c|+$UfH{L&nsz+ z^s?Zk+1?^$u9cQ{P0GDBMw(eic=G>mO|n})IPh+ z@7d03n$o+yyxUqzhj~CB?z11@Ky0W7WylR;!94wvGHq<5>n2b5&WL?H0H(-zA)Cj` z6OBb3bUpnTC9S8B%hHz$*d)ztA~V5K&@I=^A6r{HqCc%VIg|DF#AiIVdnsZm9smUr z+jv+K^gD~&{~_vvlE@RMoQchzqk-n59pDtu2MnqdG%Q<1zDO}4%Y<3z`@973Qd5ue z{#;DW^@&2`tP07j8RI}r!4y!iBA1vNZwx-WzC+8NpkT7u4PuiCo7|L_MZSF$j@G)q zNA<4TV^`f8$R}7I`mXC7v0pOaEtvTL54w>_ox0#oSl&=6I=eR;i!};CY~o^w(BdIt zT7e@OtQ7||taH&ra=l2TOuguiMm(atGM{c^^53O#7~g9}d$0EJ_ugu?=-(dcw64!g zumEJ|7eL==1Dg123nDz|boO4>m?6{+>^+Nf7=NuY#J~zK3 zKe;Avy=xNm3*tAn7XIp8`#Uh}T+;uG%1ZR}u zrea6d&%4rl2Vt2o0n`q)kB`Nd9$rFZJo`qN{N;)Gt-W+PTb*8z#;eYfPmE@eOtl(Z ziL3;>=avw>p&?4I{|G74=_G@@707a74*77$eQL3L1GUiPmE=tG=fs&yKdI-Re~|NL zCxZSr8YGTu60v6W#Hh$kRLhPO@_v})eBT+iph3j}8CM%bJUDp;{(z|twC?mAm^ z&GGl7g40Z~@6?T?fx21Im;@KhNX?Vt6@MTjf7wyZ7L&<|6$z-%*2QSljGMUC%Xdhc z$P6vNo=p3+qg=cAW}+%G=BbLyOefT0QkR~K@jp9+&+}!FygwuN_bys|&Y|TIO1EDH z0-%ZV*c>$)hVN6J@Uw`7>pa?|*)gSf)Ak zxsoK-vT|r&RBbY)$ET3X3-1Ez>V>ChZtD!Zo1G@yor{mCr%PR3B=^}Gq0<(&kmSH@ zDn??P;Gx+B#7yISB__KSgAJ-i%_2JA+ti7bCNu9^J)33X$j;0wnwoh?SfpY3Z-1)p zjGO;C(9GKraE6Y1`SmKaPlbY1>lJ6wdanw9A+sZeuBW%2oi82c0kMoEIuIM`K^by` zSW6^%eoM+M+I3La+`0z|v#2HY5eofSyPh<_^l6{UR!H~ZLoWLJK9EIf0cgvTGx$2y z<07u^QXwpFI1x1+h@N9Qd6v6`Fx%`xZjjjKl}NfzI9}Hjxn$>sH+t}qk61Cf-{1N+bTumvd2qN1Ipn&CP|)&4 z!-r0zCglfM$F_7!W_znxK6+0Oc?~6W{RtG=-hP((^re?j{!Ic?bwLc1Yv#|OMDBvH z0qOZ7?jf4Rzf?*ELw_pjk`q>#DfNXqns15VFIwBr@VPp2kC~3-`_s1P|GQW5zLWVA z%{KECwptBGXPwb*l_NaQg8%MSoK(W+6kXG-d8#6vbEw!VopYEa8_Rhm*-f)ACC7Oy zTY83Mv0+!~Fb`<&bI*=&AU4#4GUNuaU>;mj=7)q>p7Nc3#`d!l&{_R#F*(l^8Q(qSIZhi#oIB@i3xK^by`STN5FNtx~6 zqj~Pfzcc!^il_zmbSYQ0JNV4%P4xKmTcC8#0dg7R{ejvTsev1$#);7RRsuhTkz<*9 zhghUqsTqILP>QQ`zNLI6wn>c6rR`GUVT+KKo_J$DiI`S!(UWEG5H*e^k@!opDlt0& zUFESFeV?!tkJx(;2`ZICgW6`IYc&n=0p=ZqxK)D|-1Wroy7wZnxIS@uX$k%}mr2YY zP(-hMUXGUb)S-F3#`xar_eAR5D^SPP&8X@l5}DlHhN!;tCuAl2$26v&Mmf_Cur_I2 zH&iR$#oDs@5mFbajHV9(KNzU?>p2_UP)hbTa zED=~Ja_hv~mYKyFhBHJ-GZ2Q{eHXt9Hg z3Lov`c^~_)o(3-EbAH8Y)@=DHo$p=TBK2wf^cS4!>rr-^wK>kS#!Ktzwfe5oVII(j zMrJQK5F6@28FGVIFpo-{W=+SfgS?4l{R~|bHPq7I6{)(f&kQ~Jkglht&eFT5A(!5B z3(%fukznPhXuk6WJ3)us$h9`Xh54+ttoi&+PPbWUjh5(XiEYL#mY$_Ed2KkbiE1T% zMSVcY^(`jPVqw>|O{1({lwx+d)!5P_nOLTY7kN3=9236t!z!>ZAgy9I;We)ln4KHN z4!Orb)t2^y_wHjT=U;K;^A2ZfwUrz8cK$K!ucjuud$kvthD*jWW$!W1d!yN$K3VM9 zxZ9v}eK2(>`!u#I-HN@)Ar3vfeDgP9+<7sBg%Div}^D}=i*VPig;x!ivEpz*CwS*DUys&Pd(g8ueF_z7FR?6 zR9;||Jb8A>=|-I`)A6lzUFO=pmeOG!P=|WsRsyl19+V+Bhz0W;my~%|ox@vj`46)x zE)naI{ehZx&R}toJo1#<)E z5S%=hwTurp`^)@q(WTnV^leddDTCTq{*hSq+%E1d8RJB+>^&*3pE z|NFLrS|i|uyQ$Vpn8K%h8dxJ8pE{~`SRc7Gs@74m*5>=#o(FB~Jz+Cxop6E;WT(a;&+H3Wij!D$ti^s9jOAtka(8+)_EoNw z?QKP23^_w_)1@4C_w4QL)&y_%)et}G=7GnU-Rs3{o3~c%#QFEg5T^&&gqS*ZQtWe5 zHnI~Ves)nwxk*%Mm9co%oYCxH#Rcr@-YoX6hsIRSSrJK`Dqz>`>0p0!%)xLu0XE5q zEq<97!M+pvfUP=o4qQEQobc$2Vtpw} zX4dA*5r_Mv=kxg&Y88LD?Tw7P5LnN$&MWmXF|N&Wwqc&foRNbuI_l7-!zp~lS4Yj? z#q{_hXj_(LMP#%2WU|MJm2kwgD_DnB;QZlVeMbYyK75y!y153@IS1ur4SH|UX4N?B zRq`*K$6TMx)OV78t4MD9UOvnNVnwURSwn272W7|&V!=GOBxUMKuy_)FW;5j2kI_`@ z)Z05?`@lwe45GuBFdpWCT((XQVi!*DU}?EHqPq>m0QcJ|Kt5n^(OYY)Memu$==ndF@r`P4@%fV__ag+)M^}wjL4Vo4 zM313vIHP@)F#E4D*p+huoiw-}_q92Pd>yrdIHWrtZCi8~HShMsmCL#e$4@O|X$I^< znRXiJ?FWmP8uP-C^WUv-bz}*1UYjS9(!YeU&?rO@w@11k=}wSlaYI0fAeyMC`$rP@ z0_{-8rKXZ^E4sfij9W(brwi_9*w2!Yegl=uervX&WzcHl1C5%zz#yaD9f>^4v;Xy{ z;rI9)gDYA!N9IanBGuXCuj*VnaPB zLv9cY=0PQ8QZtizOK0~p#yQMFd+lSGyGx^3^Ys?e{xUo#{q`Sn;Z65LtxpaycYR6V zKWEwrj(r~)BQZ_XkdM_x43B;Ri$^gfG4v9~hx$r7Mw*hPjD+gi6RX)(h&eeLRad<& zw4K|7H-A?^7T3-}ue44;Jx-K}hN=>g7j=dr>J}S4>}JE#3-UtD&Pwjt;@Tlc62>3_ z*Ph}B+s+F$^qcU2BY#H4Wzxn^Qy|V1jx(_bE9z3B{C>{!qJlJ=81tDr%ye7`1U(PmOvO zOI39&r`)z5C5e+q$#*(T@#>@URI<$xGAmFPb4@ZJ2KO3c86lBWUj0c*He6dwOq&Fh z#*Rj|jaL(|ylo>s!mVhSe*isX{qp6^| z2pJ6yk?YAk{L7Jkp?;tP5fOa`34DSgS|6D-_o0C8oVTAlSo?0Rba^(JAtp1}&D*0|g0(5^7e(G_s= zFNE?e-nrBL>HTSKRq*dhS?ev#KE9~yWJty(_2Kk9gz=f?(qSG@=l1nr3y2N%pbWV| ztQnF#uuLRGLGK7XnK0>;3Iq*XHYeQ`_n8;5Ydtanq9AYLHPV=>{)C-d1;J0C}4%XQ!5rW8>|lvK2|_fV-c^n0K8{p#~`{;0>fUgLRRuY1mUo!41=&5>Vk z1h7}21#zYMEScB-k>n38Lc7m)5uF1pG)E?YyuL?3Ha)(L&yw;k{-hI%hQGgujI1v} z&bLXBZ;c)C!_M9K*cJ^KJ4fO_)md<|bqSQbmqTN}YRNBCix9>4)#=-6Z;z?iU2f9g z$k^3*I*|=6)f4~M?tl%-9mVR_q}I^Cg#edD)g}?Es~Sd5SsPaj=~-J8akoUP{>u%T z9jL2VKcLF7v|61Ecylgnp#66@C(SeI`h9d*ac$8i8!@BB1Vpc~ME zI=}{W0iG8RRXK@>c>VduDD+f$9U|HBm2eJMCCt7n(6P7HlGxO?(hbhc;LG!h3UlVe`sZ!m9TSv2~j&w%P%N5?rus@ibs#;qiiMOHB^Yt_#+F6ebmLXQ|@3aMKS#TbXWedl`+hYnzao7 zJX`*Y4;$#dYR6Tjk_eAVo`C~(p96J&uU&VrRY&**+b-u325-T{zPHyn-C`h3IG#$n zO*339t!(NaJcFa%Ftf0FUKY0~>A##q_2q`r{!=qK%jzwYfc;$W2>ae9!?Dt=H{Rw= z9n||WvxI%`;Hlp7aU6g)cPF(umC_C9KpkKMx&Y5xO6F5Mn=8^b!oZ_7p#_S%__2Fo z_=R?LdK^q!CLB)z7T@=~@h5&$@GmhdS&QGDsDDD;+XCc`mxzu1jScKjeE*{5_^x~9 z&>_k;9@4@*Qsn-hMCgh#f>}6IF#V0Zz(v~&6?3~y zdLC03m``pbzZ4sxPup?w;(}9xtihcq9Q&9ot-#UbHK)*ReRAxE?Qh8Qo~nYP#2mpa z{xISkIDlB5iW5lh&l61Al0&+E%_I{)8n7!?FA~&GN)_n5nnx%sds}q6eHp3ZbdNL_ z6&LW)+2mh^7DBKv8H&*3!4{K}u+Ps>{`tKP@RSpsv`?Ea30*0a(rIuGI1G5B@i9`cvohl%V-`ZhvXup>nG7q=GUhZa7>o%iMr}u+H<} z{M1*9SDNAF!(B6PHSJU9AYqI&m|$LJ=^R;~qWhP(N_{ik4*N^n%g1p5J*^>{Wk5He z19gB6=mI=IX2B){zGvuqY~guvfna1diP@8cYUUg|UbPoHL$_0ag*x0RSp1|G=EkgL zNmPqN=RHIxw9{U*D{%kg5aRkkKapgvEci~@ru632Ue04jcQg>w+m{lp&K_(`!i}v@y@R^j%^Ho}eT#hl z;Sbu>y_S77G8CS?w_CH2^`6l8FhIwqW}_PMlhJI=RD=_`7&ki^hC@B;;GX+d%(=Hd zVas#G=)TG^NvY)QA$Q2fq^539f|o(YWS+JC`~jw}<{bX*(TTaiio+`{e;iAp+rwzF zvgzXhebbBY5$m5GiCU|Eo#Wv~Ohw=WQ0~YbFyigySzjIyuFE6^C0c;bVUHEnw>r&j z-Sqt!+~B>|;Ijzr3svJdKpmoJfNnqs>Hr(iEaH3ZG+a`DG7?b*bs-ju&xcr5A&&|1wAp2`-|i7mBh?!UEV! z*3}S4U!EYgO?771a^l#mb1rD;o_(mwM-hSXJxlhrFUQymwtR;NdOI}du}=`&m!3o~ z-v5q{>c${Ts*;h$#}Al-86w1*X$PP)zO4+YeeJ9Y*(-E=gVmHunnx;m?y{F{)sA@? zv|m$e;O0B?BUG8pi?b4QPOFKnLmo z8_?xAddenj|%(l@Zd$(!@wqh!dyGl3<0;#?#)#8$zJ5?f9BmT_KSu{7>YN^0 zx*tVUZbcDmM7ogZ0bO*jvmBcK&Fj0 zkO#;{SOop?fd!{O>CqHtErd7p-i00OgOGD^weV{!7hYD~#aB(^HzvGcuud=7#3Ejp zKyUbkbbHI*r&My{WhIYWN$sI6T5B*{atBNOC+oyJSny>yHD?}Ymt zj@r&PY|3q{{Bv^#M{aS1?yvVJxH@0{8&9_#YbbraL!G0aDDUv0c`22w_)*)iu=Dr>|6=60 zGZ%j_M+Har*Wk($HpskN-;l{_9=Kfl0Ypx;nFz+^2`L6itek!lo>w{*wJl76HcmN# zi}*QWDb6Fv=TS#=W{)2}FJJ?5rcH$WF8GN^y%I&{cb#J;-?^@7tl@wx$XJg2KBt0w z4cG*?=NUqYcaHHlCmBO4bz+%+!h0Ci2VmAM=`i~HwJcI9se4n-`x#JO_c+Sc*!c7& zt33y{H>_VX!Z@ojvG3g+lwr{(C-kZBv=0V0AFdjzep2AfIvb$%)G&oRXWM^!%(AJw zUY+?1t$w@}`qZXVm>X1+kS{Z^G%=4CPpv=xzddFuC$5!`;{f*l`%SJ4=mvD44zK}T zfCtFzDLBDB+%>|ukfVyc6$yrMzgE_f#6a2?;va^>*(UVO0R3ApYC204i#qol}#PoI+a8E>iF z?q0OWnN21~_mCIFYsu?GD3bcrfs7X85f4+A6BhhjxbpB->MTVUIg|An`!Y$CtYEG{ zO$!r|q-}xd`wS7{>7f$x#$;R4MR6gLu|E%1;dYYqrhOq@&TA2ZqtnRIHF;>Xej~X$ zx`RBAH?o|t>zZe4Hz8Gbsa@k6*yNJ?gSh<*4eUdHHZQbE4YJOdhIcLLf}|%+VYv`< z>EHFhNM2d)Cn+4OUsT`x*UKOw(8{{8D>rGr)EiH#d!Tm1 z__s=&Zm+%)`<_7lb&HsPeecV8ywY!5)Ht4n!twODov`n9n`u*hTz0y-@#;*Q!$*ZV z{Diuz<>NSjtzT=|R0G|B4%7iQplgvwrj1mH8s{ga>w_rs;~2AQ5R8(mkc$8Mjk%+7i+ znY{Wc9&O(3$==kwlKk1f4|UaENKSJ+OitA)B%|sGR8Q|D+Gmx?F4|K@EOpBy;#Qfn zcTBNncXTpPhX5n=<%=%zm!BNFd3geRXn6yoyyLjeu3VVh@#h38;#q~Zow7kUvNs|x zUG76V<{R*-XRR@Q*HS1y`56DPohTg(h#piaS?rMsh2Ax_EmU7_d@HPpJGEsAFZQ?( zre`I464-RSZ2H_+r(jh|x9_xqnkuysP18#Q7Tg2xC5oGO+~r0cJFGBHJ9cYw1sA$H zyVD?@j(aUcgl`;N@JXr~R&i@6j8kmUXx+xI=A-lE6{Yn)YyK041t_pLvSgq(ay$E&?}7t!M>z!Jiq%Kl^1290J9p46lx-Yp%xF2s`TfKr=@Urq9&>X0JZ*u-f+RA& zxDs`g^5D7k6SmCQZyEwR;+KhYKa>_He6ais6t` zbLAm)IFj2{tb>a7~${2Xa?#}!F*V6!-C`Y?^a zf5@S_Gq0nl+BqU}c`%e7u-Aa1H5@<$eGWtr#5nZs&o-ko(Q+HTaqsl+U z*e-L15E;AYNY~$d^8Dp;^y@ki_RpbVXn50gO{W+8sg$E7`bcpv`kJ!}R#FJSQp@M! zKe}}A7aK1@%N~0%s#856>M?G*4=quSHjGgygB(xS+HQF`xoUdkW~+$5Wz02ItNEd= z6Z_uE?dcZY1=;j(LE_jXLj~U!!^dY8Ikn!>B|0f*xWqz*8NeQ(tPPT_5W53&Q%!|F z?LQ%$pK3moE^BRAX5;nql+7PKVT@$mN4@R!KOBJfQLI84&<*H79bf~x0MB$vX7HpU zZcFZJzMgF)`rw!|y#DQOyvv86+t!egz{h{O&F zB(D}Hm}>`MsY%Mj_m5vNpA!{u>$W~x-XC5_q15kdEKIKa>L zPWQoQTZw4j)yTTdQtc|w8@v7rrQ6%Sw~8gz zMIKQ9+727}Pu>PU>`vM4c$&s^DSgH~lDKR_-0OX!xlY?>Hr@9Yoz|_6U;o-fsfl`X zAZD{xRBjSe?CR8i^HWLfV#9|4N*pP(hcdu^)fGx+;Q!loORF_9ub9<(XWDqfzpcP$ z%ci%NkK-7p0lEPlr~_<3mp8?e?ytm28c1S#{CLkC2tQ108x2HOe67H0J4T-gX{4kp2gbUuQ-1Q?^l*a-i*Eyz@L< z99oB1tbc}Ynlna3f33q8?fyXB!IF-u$kYIA0CIbWAMvY{Kz}ShhJU$-!8KyB=(&wA zDY>hWyAe6~tQqylqI^q&MQow&;!a1VxRmPNv6MtQ9-Tn)d7XHxY9Q)0GMlja?8X1? z`3*ZMx`R3S@)~@tb0;g{%PralF9v5A)-kU@KZ~fhy#$UG!REKDOyruG=y@l8(aajxhgEgbBT|f#)pzOnsS@>C)Hn{{Lwl2%H9$9@19gB6=mI>;D47ZCH16@9xFE{ABaPOwd&qv$o*I}7Md$?s2L`Y zY2G27s9RO^jDpBJ27Tm_uuAeny*}*qWDYrP2oj{N_lB1k8jvcovgr5h9^}N^WK?so zh&-0lO4ens6*|`++VZ_0qWiB*!Co11-AJmLp zusqXX_teXv{^)b=gBDe0(7msWf>gG=@Y@Uj-Itkek>Hk2k4dFYFRG^;bTBqst<162 zU8D6XBZq4+Bj{h;OD>HwY%Z9|F?5NgW26uNyJwqYXI}MmpS-z6x)X1T*DAW5=C0{1 zAIAYSl~6Q5H=qM`fDPzcO7Ya^&*Y@K9pYv$8fL`3DP^KJ^~ zrvS^L5D~%VS9b8_eOB0khtnEEyO;5RyzvrzEeX!vUPv7BSx9u;eJJwo=CvQ;x$Y79N5BK68fk%;#gf$pOfOCk}I)?BMx< z`dCvo_13A{S$Jxd1!gDF0sSeOjKfp&u<#Z`{LRobxJ=6#)*n6#?^9RDo-|zL=kE7~ zAAE0zeRR^HQ*BL9o6izF^~*B&GLZ-0v5#ck8tkL`UJgrHiO#7bf zaPIfr-xxOGGH}G}Igq^6D&{lxVR}qx;R*MG0xWM1Er91p?qxV5kquE_&3Q9?C+6@^ zm};`bveX&w#{DdJb$!^4vdz55!aZiL@E4Zk!2oO&ZGg;GQsCc40;ubw6Yla-5B}tr z0IOUGg?Dgx*nPd{klA7rIG@dg1J=7x??^p@s%mTCud`2Jg^h2aPsR-Vg{c_caeF>~ zFJc*7#&ChNu|M!_Jq65f7l$wFUH}^zH^W=s+wu>c8i20m?8o1J&4jCudBN9xR9KgO zb!+CooWq)Vr3v~xEDifvvY^*92>F--Q;HH^3rJKl-s!u`G53#mJ= z0?K*Jh1GT3G;f1)Cgs!b+Z(z*e`R?8nCMfb1L+nD8Nzj$m;;jyrA*olJAWu}q&wDY z`6*>`)9?J3NBTB}S4zmLaJ2t-*K2#cFy~;zdsrGT;%6SXH_Jx$wlGHW8M;pc~MEI=}{W0iN5GOqt{?Zo{S##^a3XFf$#2Tr3QjZ)LLSnD6429=h)ZST38o z!R}WMFq|cd8*c0}=e_No=+js7KUfUSD~$*Ids%&B2Jq)6YZ$VN{x?6JWXeCId<63A zdyb`iIY>;4?Zmy#jlhrAdl7B%op}26qxjZMc`!Odgym1L36058ME1Evc<`b)@%&y2 z5xiOwv9tYy5B{PKF3hJ4}0<69I3AXKpJy9O5jFtY3gI^ZJ zGW%^JY|r064jH@O5#|o4x4rJ}s^poiudeejnOoVL>1F9Sf25H&eS{&lZ{is!@=Lmf z=l||{O-h_*Sdi0hm~W-T`INI>%Ub^w_lwAX??9niyizAo>TcgV!hH@Kw1si+VYjZ* zOkdkcCNF zJi(GyE+wYT;L>xD$zsC0bO4r_^5z6tHXXuFcd;}siSk!*C;D`g%{KT=nI7SfN#GV` z%Q231gl(7JEIQ7RFUp0@IkiYyt~XMpKZ-t6aYtp^mXYqgmhAJhAk@%U3;l5YEs-u= zgH#;(j=oVWMQbI$5dnRkXz)=j_C`Nra@x*$=r!>N#O6T_(*1oH`A$6y&C51IIs6#5 zzXy}}7!^cRP0?lFPWg7+=IN|%*F}%(N29-@ zGf;iyQKXt!ju#Hr!J+}OaO2nvxLmu8b=W}yhF0s*x$2Kg9u?~(NbpaFeyWfAbh0A5 zKg(tce=7gUKoZNtetH_P>3HF^*l@|M*XTUJ!kyx##`5ik)jc1%a$&*6FW;VQR2N8O zjMKWEm}b~`U6rGkx*!>R_9oG&7W{uK&Zn%8A=Yp#y^%3A@`WBx|BShrj^h9{3r+pY zfNnqs>Hr(il}zy%Q8Hh1PBs>YC$l=0bI=vlzYw{vitu%v8+3f&eovFmIRGrV!8cHy z8a>1<&IT=ByO^Yul;h6 zd#elM*xXgTfMMYtvrBf0B}YXoc~?8C>o%Eq8?>>x-te&bE5q!^#CD4MZ?MQd zB8+=2WPTgi`Cl>Ai&W;=HMwftpMHdUQ|iBV`bwTxTIx81^RrSoKXtz=?0ebN2Jw{( zj+nQOW!fB)7p{q4Ionn~jsy5mK31^|=mvD44zK}TfQL`X{Co5$SH}MZ_NC zY%$A)j+!LUb$Ck#`f(Ab$4f}me;X0O=46lN(i(1=(&+&>jmja#dG7TGhfKYxRu;9uA)g&0`5g#UVG4FPkqA30tD@ z{+?XYGJr|^^6Q5e+TDVG?tF-o7y)>$rL-dpE>uUDkkbq;Fs9hWZZ^R_#WPDI_}L_t3j`c11x1ewdCe| z@8FH+{b0$nN{w9u&J*UR8P`RT%hfj(ywk(ZW8=b+<6cuGJpX*NsCG11DNNclC`g}+8ch4sG4xUHt4<@tk zi>@F8>@K3b(Q0y3{s?(>bc}rKmw_%F7GXE7D`NA*sXc7DH3S+O!*1_9%!bn*p?_sk zP-AR{VA#=(J;n7hJ9PFwBwMCObB*$1a?ZCxRPn72+vHsVvaL}PdC(+_MHc<#Z$2K) z^p9Q37=HbP$14`by^Ft6IfvjX-VN$bzgAtuh|d(WHREq@(6@<%Bnu|y1`UieEV9Rh z?X>Up8v}`Zt%lyC&$-5Z0b0glgJd17Piy4m%{n!r2*YC>rFiC zfVxvQ0$m>6_ad*`%g1p5A69hgmI2*>4%7iQpbPMXQZhe(-pF0ci-Zo2>aZgv&mhU$ z{=nx-#A#o=ebz?r*#=nLF4?h3Objb{l-tmhW5GLJQ8>Y;tY2Mxm19!Kk);{%pGh`s zRmwIya2;CC-VQ~?3K+_41H-);*%<_f#|;Qbn^r-bILDgxGX-IdajA zix@SgA`4c$#XH1~AXC2uq0>9WiPDxDhj@R8HP{pL%RWAsg_Gf;fTowiF3l9d-C9$Nj# zH)I}I`i*zu$Hd&=p{dkcnZ}doc6xRGs6mudhas9O&rNq;r&W5hguCOTt^)7@DB0TA z48m7xaRTOL$bkCR^wo4bb%Gz8PJe#Inp3=<>EWhNuaiH%`k;Co2cQKsKsTTRb$|`% z0zA@`%w_6j+~t8Iyv)j2B=nmVv8OKvdE#b5x6_K-v+3~^VCmhw3K=PmMcy1`!XNS^ zm>K!&>HZnW1SQ4H3~`dNMjG6v60ZA#5DsM<$-5bJ|F!*&B5~ku41sYM;@WXp9lGx`Fz{8l)4K`H$0`dKoQzm>XY)SkStP=k2=`7+}4 zbUj(R0wV*K7eGt)&(x`LEQZ{Srjk;1j%0VwJAB!h*xRT=`~etBemCoT-$@fc3SiO9z8ai?tfqXFxJ}K zK!00f$uNgLJ0W%Fa``w8U~51FbOSn22iSlv3yNn7CDX#_EziyFHN*SSQnKRd6e3VI zhDi4#X@8Zz66OX0mKk3&$Qxt>OKj$4jHguE_(5}fBak;<`a>P@z-7A$M}r+N*k zL)m7n*KFFSsaiV3SAGN{2!M&fKUzfUdSCeBW(y>r)sLS)rBAL=_(<E5c{|iXq^@ zn@DZcFGSUHHj!^!3ZL(+B;Jh`5`PZhkiXwTA}XyJ{x!KAj@UUww9fEFW6eK6 z z1dkzu@Sj7IaP#&g{87|#CNoNc3HJ)Hnb$7RHn`4CGN4xSd3~AHb@wbpjLo<2v=W^m z*RTT}VSMnO*iKh^q+3k!7rq&E^{0{Htv#KFJI*O^;*$cjrW#jp?f>6fB>X!Kr7j!u zth$#9=Q8{x;XVg-|0!iLZM&@oIlheT|J#TE!Qoo@I1XU%zb;eCfNnqs>Hr(i1$dTI zGFKE-awB*njGiZ}@Rlu?;a{U!u*yxGj^p8J!Wao)QPC=gZ6$p0xWyUF%aw52mMLkzEqnM4Q;MA4?cFXXLIqhes zpwVZ$HDPWu`F5iiTYcj^Hbi=%6^^~Iiz&5dBFu;Q8CMP+w)JGipAo^0*f`xzvtKBc zOwAgFa=ROBW8SijHwM`@cx^?Pj}uVV@Y1^{flbFt%bL@cA)={tI~^|YGts`{ZL0oL zs=naDbiKNuRPL!?zW?SvYfU(Y9#54y`X+bizBf%n822`-2h zU^~*|DZs*O+QuH!jD`o_h(oT$;rvv|e0tmfbfCO|wh?6RGURzcEHaD+u`_KvSye+) zv|Ticmcy}yxrCA(2W~$37uWphM0^z?FkhVogr9B{LY7nOm>y^07h|T#$we0kW2I7J zO7CRks73%TD`r466`;u3j$W*{Z8ARgG8X1XePu0^T1d3!Ac)9M21!aDgs;Aof|bW^ z;#-3IAP?nz#Jp!#(9jw&{Qb&Vg!M;NLixmUETqm<=f3tZQGR%UFy0?S{CuoNo!>bR zJ6}GH$&FNCllq5P71zBOGY;yoJhJU+-^jmKEQxF>=PkQjU3b@TtwGd?vvqFSVV;=Z z2;-sd#29J*m2`_)iO=XXmcMH$Bd@X($k`pN7U=Eg#1LY<&d(R0?zhI#37LfUeCH&tppF z)`1ue?RS;8IQKvFIt-Qcz3#qVbzB-wUw3sk0_-E!MzT?e6@w(f3(Z@Wi=H zJ z$m+l5WPNfZ!BUqMRHfL$w^x0D9a?h9f`T}*y9*}wX|5v{_Fh323c|=vGf9$Zwh^}C z?bT6KNhjOQkCG{0wvlD$YvHpJXW;wZlEmAbBY6GFCa5d1n7^T^0DmGPOWVNzxk}09 zi@MCTT{|06Y`+-Kx*};k6l=h`?>~tzbN;*>u<3YNH@mOS!yu2&1B~W=H270|&ZMeu zzIES8Kdl3qz0ATRA7_lyYQFU{iu@&JwK(AgJ(v0ML>Tvaby}D8iI-VRQMuPy1VqP3 z3w7Gc$8iAufF|oQpc~MEI=}{W0iH%m=G0TY%pto;{NZU^NR{*wSSr8|FFm44x6?Nb zN9nl?!15r?lT6WHNJN?3;jc~AW=)=%J0b36!m>osKpXxhe=}5L?M+H274rvI&85fF zutWX`=j2LqW~?UK%FjR_x_rc!CKi#?FOCuinVo1;%Q4h7dM{zQb_OaMMI9!G!-@CK zcF^>Jazu5746?rK93$J6iy(iFlI5lc2rNE;41O+4W*C*BnI9Qs=%gNecD^*>)S-_a za34TdwqGLt{Axka-)G20x5Lowg;UV-#ul8&IHxV^sz=t=&4vdbL=&pZgONIundrCO zC?RskglImf2{-)=!(TQ3#3MePp?xa(!n^FFOdh7wSzUJ>Q!!e3!`C+MrYr07qBq#P ztcl~P_FZZ2yc@A}`~IYL&O}dlx#`YVVm5rAwOYd~GP#3WCo2LUfD&25D=m>NwYnVa zpA70l9tm{ZJ8NBu$@}nEW~IeNjYpLr+Go?(JD85+0JQoP4bTnfKpkKMx&RN5Ny@yf zdw%;hHir5WkGjl}%`^KEjgdUsr(%Uy>HWO`OIG3~Wb&;tBKe>OzN5{pQC_oXLX7nL z;atL8y8s!MK#|XDod^MCo5mHwH*Mh=hR}37Ib_agAHI>=?@)F+6Tjymg7hmnLq{&e z5?<2L1ahMenk4lHU!vcP$A)VXSJF|Kn~vh!7W?1_;*GJ5!$tT)OcT+$m<%8G*F=_1 zJB{17c@g0lli2F<1u|fHH!i8QC9L=75Vzk?f+b3J(c#%F_L|%Fh&acOqLq-x|!d+k!kbkyfBZn+p?>C90%~B z?QOCY=mvD44zK}TfCtEQ{eGG&dT)erF(C~1d9A?G_5wUZMq=hZtYI4mXzm)&Lq zr=6OCw7h)+?N^P)doH=aUFaXUeMc_5t?4e`_3UoAJU0TqeQpOtY-(nS?2mz}w_b)H z>>GxTJSu|*OSa&}YCZgxrwec$!Ui&%R15#uZ;nisosTuYqt3LJ2;k~sJEZ^q7?z7$ z!gqIP;J!;w!8n!*Prnw!QoVRxvwW(JbPZ0~_5v#}X}) z3`1mxmC(hze!va-Tx2;{6g~J|iTES^9C7P&K<6HuL;T3~C0gI4B92Gc)IRMYBGmmE z@+sGv9JD`;_hiW-PPXd!nVW-1_lPvP`AZP;WKbX9q9BT7=PR?3JLO0ZbXKRxw}ZUI zxB`DHC?ek3Uq+18D8F7#hPU0Fif@o#1qHWP@b{-;n0{I_-G^>TQ7TcLU&Ui{sX3qn z+t}@Ziw$*OD&y%+vT?qN&4lsPf2#@CU|8r=-7h~4eqTRjx-X*Mnj^MLOWdM{yW{_@ z%gp-TP@U2Cm|d-aN2f*!=Q0Yq!khzz)C>xr+;9BJjNmKP=x(t3rZEtm{d4;z( z8X{ag2`Sqd10B|nDtfZ{71M%o99|HX0mpl$KzDwJLQl0Ic>VyvsN5UP{Oh)aY2kaL zamgFuKD(LJTbU}Bu8?&QwbnMc*5G}?CaXfZwY=O{Um4KaiS2Z$eY%CUrO>Ahj&}_f zb;_CKI4f~pCI@J}OiASK-v8hF>GsanlBLx7s2-Lu?tMH**!T8SOO&;Tt}}O_`q6WI z;aXdCUPt*j4&cLv(^TS>(hcZ99bf~x0FM(T(|vL>7xNimD5IJ1irfrH&Tj`CbZ!yd zXU3cm#=QVbw96g%19cZ`J0{0EEnUv@GM?zu!wQG7GXZtb_PIQkMsyuKOxfoCMsGSs z+E!_Toik0vQ~pfB_B=4fpKpuD@1`rl9r8-}ohKHs_g!N+ta&m#;6Sw`Hi@eTE8wr@ z_Caoqk=P;&44S`47G63_8o!;>0ks;i;hXzd5KAPHe{b3;Nd2`WF{keibXK5K?^`pQ z*i$+OGL6cEvJ&rM(IMZUs_Y7U!{kO>(O(SL%j(s9k)#RFuDp%CIjIY&eh$Gcx-+q$ zi($}+csXxd-+f-TvnS*1i6f0$3(V+tnl)FkBrd3m_tmDlZcVn2!TNAdo6WaxTfct( zm64<|(Wh~d85Rq3|94K@vDr9T{)%D1as|#1xn3)3X)*Uy?|*l6ygqT%;F_i~r_@jA z)9D9mUM%5W zD*DBk`D`Y{QwzaELl5!KK8U8jPg9F z*D~w@K{KI-bfXW0TaoQy#}MPl-LOQM6Kef!I(v?_2;#hMHR==DM^69yjF8D~A*+Yt z5D|M7cD~UO6u#6+e3!Q*vSzW_y@UJN=c*QvJo_kg*7IqCGig)VJ9E6*my8n;Qq)k_ zmWZ`xvh4=Sb-%~Eh|G1d- z>5EFkv-A84uf9Pma+53TI_?Nnrpx@Bn*iHRk$q{kV6yN{u&kvg4XTYIO{3EtZTvH2 z^~w@lS>`nx{>8mk(!GYS@`*a7-MRGKB7W8&9ryYii!IB-=h*h{n$@6lIEapWr#!t> zK8^$E*{u>)26O{DPzTt6F2K`I$z0Ux%9_#a2)%xJ58<}VK~H{Jh9sb0=)QO31~Xb8 zz#7x;^P2}0=y@2B3Caj+kD18Cur{2&k0kjzp)h3|qp$sRJ5~88 zMNYTXB9h!;vV$EicvpLo6rXnvb={IF2oI7GR2PX0>g%VYp>JZzfl5!5qhaVp04uLQdo! zUfA{;UUKI-zxT2&7FZntr$+SAeU;vNrIJjvhWDf^uKrX0{fc)nH|w80R$#4+J~;d z4sGKvt-^Nt$jhqix(sT&RuISAmu*Y?6zeHB8OH(m>y}%V0o{NO)B!f23-AD$Udz|> zuh-?V?yc7pI6ogGD#T9{_7>{&IJlwTj2=$`mi&!#1vjU~;!lvzSo-H1jo6;J33;SL zKHA7T@(8h^y&D_+&J+OKC`>v@|2ERT&BTx1^FyY5jzG>BOS2=k>_p`zCy`MKOm;~N z3vEgqL|W|($m{n`Ag5$n&=htYs)5ZVT>`z)=45fUpL!sj=)xmesLsMlOifo75k0pKs+TK=ttMN;%0`!1L&m1?H0m01V6Rp0xCXfL zPUz~FEwqk$YrlcIfk?zk{U z0$6TeI)_Fa=!Xw`@}NhVKCEG-_6hBDjbkOO5TAlH`K94nn~P9jo3&wUX`hPB)Q6Ty zxnYNWvZ45(r{5ZLn=}p2^%;fKM(yDDQJHYo(0hK4`AL56-KDTuhX7_@ zm;`mYhY@Gi& zirHcu!<!AGpb??Pz#|hUW;QS9aof zdhu17Mb!W9rc2$SV;GjwVaPwG#BquZ&`KRj<%%r&Z+;q5*-&bFUWqeoCyaYD{ zEU|;fQn8-G=!p^T8(i~1xGn>*e0Mg7TV&!HHR(AG`|p_ZdRix*feJKKW9=ut^Ek`h z7)uVA!q>{yG8Pne(c@{0;4<93$Q=(Z-GkIFkU*{2I}rIR5~S4CKZwp7OLWPsHwZ3n zO}I|yBFCw`-?TJo^hU=lVy^!M#Q616^eIn=IBZyoyj83t&foPTuKfK$1c{zU?3k0# zO7tE4!TcQFWlNo6yb+A%Y}kb+u3JKUxg1aJa8xD5A|TYW-wholk3-_42XxYBX&|0o zt|0{;_mERIVX$eG4vZ$|K$Ci&VW0E#`Lo_DGX1{b*pP}aS3Tcfsl-mMig!-?DmRE+ zZ{!j;uVGkHkAK&BF8`7A#C)&f?I)IP4^x=H&Ya>UrZwHtmAji&I3Xwg7K`6V;c~dQ z(!t+ASw2SHovx?Kxp8@UGN>bWgnjS8nb%c&0^XU0-VATNJbQ$0yW+1em5<{9v_mDY ztAK7m2kHPD&;@v;^i?@0en)X1yUyb~<_#j}k8Xo6E!YZIc^;tKm$Ga;-PZ#w7J+|| zksBBIeu>ku0bQ)olpjv}3&;ee`lubwKc)vWO=rVPKRrdJQnvAYIF}wd4q>A z#rPyB@@)$2a4;5n4S&J*Yu_ z26y)-Ls3$HG4-Su&{Gj3+|hL(OSAr#Chu1bOHQr;x|RN}aoqwoBpTVD!ZFM`ZZEtwm^Oa%YGV#5( zhD8|``k#bxZ;t9S1B=mXhR!SHI1MucwbaV9xOn`3cV5*}cV4}YRpA^q6Sh+mBVi8T zxL>ktc3`S`kZG39zBpkX>D!j>@^Ku%)*sp=%Ybe`2kHPD&;@vYQ!@3ma=7c)d}Xk# zsNaPb3a!r*U{eN!?R0*D@D53UMM-xhlzQtD*4Oa_TCQTj^FKT>?hSO+Vx^2cg)Hzv z7V_ydYxqTQgI(NpI?m8FUIAxyZo`7D_QNB`<&li#?y#}_GyFmKU-){32~wG+Og%p) zkB9mf!%{snkvY;k;mc~8xYBzL?4Y{_`Id48J85te4qaD*r>QQ?3oNz2Q6f+cOLRAUjUcbCBmbua{SN|17wE9bYvrM4J#1s(JU<;g=4c! zkVSdHu-My4$k%QZXQ&)t$gvjk6)ma0H*5_f`NwYN?3+*M@s#1NRB}wCiWj!3y3Q!XEp8Gz-R%&?dyZ=+veb3KK_Igjbq`C&^3%{QNp=Qf!%z(XjFo*yk|%}zVaIP+^0;4 zUS=X1YP-nyqqf8u_XWg5rxIx0M;l_lnlxGUfK6VkHO6nv8p3ba2a)NH_wa$?a_GS$ z7*~E43va+Hp!|i+1U2;`(p*)^@HkOqZ&f^I>X1lwEu;3UxN8&oGtc6e2E>p<9Uq9V zk@aLeV;6o#zC*JIy^Hv{e$IIuIlNR-ppVEDkS^dFk zi@i40Y1s^}d@`(aRs9by=S|kXzIVnNUTHD)_DqIrE4_X?FYYb924j(~JbO*?Rqo}_ z88+x2X*%xxbEmO<90$YQBrg)G`4@_e3Of?t$a5oj4U!j4uR8J~i^|^tR3V%*Ib}9&1 z7mg$H@0ifrmkh#gMGani>o@b)@ggiURt>MMI7i2TS6$l5Le}2FINq7{Kk}cK$9di=5jKgj#+_?KoPIsRp-@dK>rpb^(1RrzWJM8R9S3ebGccseCh?prNqzw z=CgxHrv3sgRnDWnA$o4H@?I1@KbWi-T3;IDnpuKiRW^ zZa@d>02|P?j^eSRWImI)z?jwG&K#0Z5PVv82vIz0LZqMmK>K3X?h1Nc24Imil@u6t zh7so<+`DCz#Z_)1 zGz;12VC7-5^xQ>qMCS|U7aM4U;VG0emov(^!K@#)vK{cdi}KYavJT^ zsw>9TS0ZMaf4G@xqxV&KFX&=NiK=lNKo6h+x&a-i18hK79mRvZ5VK-mDCSP`iDBG0 zD@__Bw{RrpJ)RtQn*KgPL*MCr4ggE$VU(0MmcT{Ntzp$ExiM!XPt4&5l6SD&Z+-aE zEk*dw98Ho%*@l2np-jwl+^4e(~iy{1B6XC-oHI2Q;$bIQ=&Gz)%`|enCHW z!9x-LCT0(tY8-`?_h)00EA(J({cc!hOE-4!h6?QCkbti``4d}wBoqH(tOYkLtcGI- z%3zZ=3#{VE9&8{p9^P+03F%c%fFAd&!OJ%{;16xD!1Xru@OuYE)?jO|=DF3rP{2oH z`1XDWcxa6pB-;LkdA!J}(f?00^W^Ts%w%djwKhH3Fd}!1_6;&gsU$tMlxGxCUDx>8 z+u(!zW~;@U!yC$TMi}}pCeCGw>e4L=Fk!ye;w{@S|8Ixk<@s`)tBW>h^&H*I&2|0n z4L(tihSJ{)lsOlph4+G02|N+c;pr;bAIm+;r^NSjbS9Sh&nm10cnpUGw}`SbUbdCD2$N+mVoNr@SZRA zjG+&E8@h`vc=ptuVjyq4Kq*G7sqOC>b_+ULng*-kg_LcML&g{KN3PYak&;LLS&-zAOYCR5rSbdV%ccz7yDPM)x-j{{D{%R1hjcDp?iIvhy|cu0?*Jm|hdN9iUIAC!lqNlgOi6`1e{k^# zN#b4RF@(^4O2}^5MkY;8WbL8$n8nwz5&293ULe~>ESS2VKV3QpUoo>4>q7QHif%`- zS%W5wiWg2yms8W}-=lif?J^Vh75vMvdIJ*PW}ue;&RQWYwjuA;Fyn#W#5~eLDBYqz z&z|nH4`_}Vgpaft@MEjI1WIw$k@0H=mvD44zK}TfX8U57Uxq~G1n=3gwehK zIivB)bwg>3(6kz$__<~sNp@T=HiNX$xpEh*vd&&dy#tZg?4e#M? z#NVgSXUQh@5P1(w82!U24*nipIuowfLf%uo!;*VHDyUkJnZ$f**)a*ANF zcbSOV?gaS9t;M+dQ|dmW2VHQQb}zEJ_7`*aqgMEll{;}5S%a7-N+LVcl2}ney_&bA zU%|ajlaMp!zu?Igu~6e^M&tX)*v1cM4lyr(#;_MsYZ-UU9eA}5gl|tDLKRCMEvDXU zi>t0{kMcD*nH_4gggRUCe_UO6JXQbyCkbWmQC9YL-Fwe@pWD!)tTbpSm6Vo}b}Bnb z3WcV<%jI(2Gj1d+EmBFlRGO$y(oVngKKJYMdH9__c)WRy=Xt%(`}Kak_oM7D>?cb` z=Do|4k2{r9KJFSBg!ouXkJ(4IGMbQM6|kPen1HmHkD^JLO8NAEu==pFr= zed*f`bZS66vUHN7NOVuqmrE{KyArHf@_ddXjpYOf5&_dso#=_nd*Y29*H%q&47^(m|YCH~~a$nF1z>oxy`b zd7@j#1NC^tN6$GvMg1n+!S5-2LTj_1gSCfziQ_JJ(D(FL-KTzl=yO*h7ZV1+MqUeC zo~w*~ugb?G3{8mbe?tK0%xPfxY8LpCUWsPP8;}P&CW4wL)nHa&BiA>#-B7{Q0!&Qf zfZexz!QvSSsM1tp!h1_*&7aSaoH_Qfob=2k>~ZH@h1s%_aipLL^uD$+#X>3i-dK4J zdjIMAbxu1DY^@HX*Mb(09~pZ)HR*afLDD}3KI#@Gku4Uz^gRY&41$c*7AuJ+aQ^Gd zRN>VHN7&l@qQ4TKwiZan9O8Dj6qs4AwvRoN!RjxS_%w8TThYHbXdg~q)=~gshMQ14PyEDI24%M%M0K2sfm&w z(fdz7j~quD{bB-l>?Av4+57cqX2(+CMcYQWSkjj%d7wbO{S-^*HoBDgGZDp}GlQ(Y zJrkRKvyCcz`(N4)}#-JC1wosi1R#QGhffNor$z2aLsM07c@t7UY$PIrR$q6;; zSc6LzBE8ZZi?v=x-B?~j&Auq4)V6H}q2M869B`kSv))deyl^h6e?*U3vpgKDI&qfb zAW`C|3pdFuE;S}Sa_nC-|PWfNKVeJeS zwr~a$doK+}mkr~9v;#B^)D7ui8DfLFAkV??I(*qNxdLlchOj6bMaizWrW6xo$hI=c z`H4o>UzxlYVhJ=XrHJERi0GOQa;>3_yJFo)pMJQL#{Dh+2Orp)LfbO3N!4|#wvSMu-@6biGiDLpia912_$F*-47gGFMzW_4TSIhEEf+_z{sf#*Tz@*zrfofPjK6UXQJB41>ENyTk(tR zAILq~8;FV3Xf*!CC|rRZh75jFBf3K-5CuQKh`g?@MQrYA5xF|*x9^0(YY1Cnvu zAGlV&ieZs(&F9j}8GeE0bC0cedQbof6rBzfrdp@MP0Ny<}JG7R}b>A>L$5?cM-92 zx{KU<8OymDwS;{tt`3==w}!Dn(YrB)qXJ5V5!z*y!GC`jOJ}Wj@;5N#`sfd`ZCgjS zr*@N$J0!_U)(vjkreHBXu+`#Ygcg4@FK^G!VHG z#XQVHsEcQZ@K49a5q(nTvkB8bU+=OQwQAdC7;G%e5Ium1_RH#{Wd5%JTo69P% zt*W#NdYxAFX5Il#^PM#0>A{hA+RE4lI=**Ql=KG-Y;1m}r<7pF?+B{5s!b#ps*66( z`T4I;3%1l+cxDIRC6hH*e28ed;t z2z5g`Sccf3F2AYitU_9*pP#yjU6F=#6#fD$mei3O+)a>>4U)b8i{Bk)`Z5s9*Ri8O zg!D^vL*EVL{o(+QvFFHp6;lq*M$w3g05v*DPC15y1ll$~Y_gd4RHgM6*Z89kuG*B2 z)Y50$4HWfr^hAd^bO>n*(RuC z>@`tXZ7mX%t3bTzT!RMZ3ugy;dE-7azA`Z;fUT8(%b{49pjuY>H?(3-(j>KKSTqK#gD9i*d5wV@9Leu>DGZ zaJLJ=*gJ)jTuF%wP#rmd`oWY^p_zT{(!ZPwAo z_QdYMc-}*(M}a&r|K|x#II!>wrmARw*C=kgM+;JA<;mEf>AOz;Ho2*Yzsx0;QnXRA z-nH4)UtUWI_m1Bu+R+g=Vr;NG?3mNv-P@UW9_5&16Xq9N_A13i;BI;*U*6Gz?W8;9 z-+H=iP}5=>KhC+uOmdz>+dW0bKf=*PWx!9-B^KA>l%87R1bya|V{1K(1D18=iprpF zNC(Rh8`PCZ^FWz-@|J8z*GN%g!vRdkNgBMk_5gjRqQ$%q@2xj8e+scw+&_hBPgg__ z9MUJ6;}&w}eA8yILz(a?|MDE~yzUOByi*0wydtrsv~7-uJZ0qE$TUNizw^dDJ_wP( z2L)&i?9fG=<&~B^i}XRl;w5?`Jvr{ zkbd)t^hSSF)RTkKiygR6N;(bAr&MtDcfUf6N|n$&dg&VxNuRUlKBs1*-ad}%wm8m! z%K~;Z@DlD%kc!1#?uGcF-YW5$=geQF5aE) zu@U>ee5f1J!7{`Kb%|*nH(KToyKMpvG05Jy zObxvlF&WuZGneD$bb#^KYVI}WEFFlYV%u!=ykjhT-k;Ug->U3{i8DvmQ}>gG+&>#W zxg2WiXs_MNy*~R67-EYTYAr9eW|6vypqa+~fKAt3J9=t&6&)P_e zw5q|qSOx4Kz1swKw2!dE!@xtM(_mde2HNaA2~^!Mq!hCY0au2epK1ID^|`#ubhp`k z%Kx(^)uWazDiiGnnThuJixskD*+)10AFJtT1+XMei6`Pnss)oHWy)wTA z@1LM;19d|>Sccf3F37X*g)YC#DofDnlEx`laX|lh;{#Tss^o7QGaS-JPLtdb53zKe zTn=iV$de8ZCP>nT@0`{D@7{`O71p?Bq7Lb*`vVOLHUZ4UeGW9$+($EmSkNc@L5y}J^~Y4b>Z?P-+mNe!$?W&uJ4xDX~=S5rMjRP^oZWb6MrOKSxU5bgpyif8fgfE%%kJ{Cs448YO+) z;VM^?$&vJiMp>LOmACGW)#m@~Y?wH;{gB{$=YM+@^>u{>Rfeyeh9V?Btx1>Uy&KeZ zY%edk>wLDuhW%`78q@#${pL#1Fb;^RUqiV|Z%46#97kO#^P;8e4fa5bM0Hf<-8I zk_~7B7&}E8OY}Yo9&DxeDK62(Rum*)&o=8*2@g?hm8m1;Gh2a5N0(9!iJf5R;AE`+ z{ZVYl`7%M-&LZU<>aphVLTqv6YoPCO9-OF3pcZZ|!ahqCW3LnwiR;E~hHEt!gASEy zFvcVhtDm@?ZV>h6yNjYY#aAdL-8 z19d|>Sccf3F38hF%dAz1s3|{@B6{4s3QKYfB-6G4Abb$drgK; zVy*{uqP&QFVGRoW@s-U zw-&EMw#LG)QWb)9u@lh<}5yOf0-h+=F2Fq+S^meT9wcE5?P%4)xd(N zo>_`q{r`LaZuez$RpzpOVU(Q+L(??#C@!s6!T7`LI z743Im87!YWlDt=WD2HQK4-6;p-rvL zCy|>zPWXj^awhH!9Fz1rS9Vb0yVaNB$lOz6Hb3V z;$H{GgA%uVke-uDL=?|O6Md(EuyQe>-tYuDI7Xc?T)Cfk6Dmch*&jq(gF^_jvr$0o z{Rg-Icte!icNSGG|AS=5C!?dKA0fz83&Op7JoWr5DdK~%}K z68(Gh7xL}XZBeLy1>W4w;v}XX;%M0{VSl-Kzh;X^921Ai=`-OjrIiTj6Ui#y)GRe` z=*$x+PTE?{z4C>9_Vvhdq^s}J93mD==4G<26)Y_4uUHhWSLN>~i2Q-gQ35Be|9omd z-viZnMvp()N8;0kg_77Sj?gRowN>AKX+Z{ySScC9U*vkdXc!0d;gvL}LZ}Nn`YsdZ8~NXlwJ$w5ua_#L72X$-=U3~|8`ewM-hZN*y!YoVSu95V8EU+K6!svdhrS z#J6C4a~7G)Pa^X_OrZ)D&SL(=N$knVU)TnXZRC=a9mF_MJvQ^B1JC0Ed`s?o`s|?*Hr`a`sYIe?M<5q{akLb&Mf3mR>^}fQ? zQ{jEa%_8TIup$Z{kOn3{Jy6r+;&#c20)H=;iC2cvsYtp$Q|&A}01WH@_T;oZ)M=SFeKR*g z8^u;;SAe~vkCBkJQ7p-kAeP`=ZCI@!m2eq-9q$_VP&mCXZ-h^`K3hrZjz0)IPq%`6 z=LD>f(^#{^@fB0w)fLQ2?7q=tVqb3rleG;16n1J34^hXOi=Hd@b z?jhT*E24|8&O*C2&G2H4E@bNc3bep<8ZM5!jZ~E>5gXU!;bqcxgpkriXS(>KZxi^a z;l&BK_SNU2Ne5S3w&=FuebIStQgb)r z+N6jcKY<`cYS!F4Z+vR}zV7E}n8k5gFZr_{KJpdXX-oPttM+Q-@7i4?G|;iuI`Qz8NdDWZmd zzX2Yc)D7ui z8DfLFAWtVPvuXD#!QFd725R+uY`lmS`kxo8$75vtEnug_C#d>Al3y&!A^FU7-6W z#@LTFv#INbq1bGL(O{D09I{VVmYNYZlbkw18+#bH42!4N4zdCau}eWLOim}0%Gqg+ zEt}^~4X$2FH7%S?H9YJCM;dwH@6{G;!_-M6*>{BG66IKSjU6_jAf`Hyhpg9+>q=Q=fL)8Qv)#~Oz_{nx8u z`IN;cYvWldRt>#e);5{ciQI+ftDfhAf9ok{&Z~lHn_~n=%{!U(NKZ}cnR>c0@Q%&n zg;Fe~MfAGCy9kr_2DUva9mWA==dZkD19d|>Sccf3uGusXlv(T3P?guC#x4}sfsHL^ zS?#%=pn3KHliMV{e8l(^VzE!L!K%j^vg0P&A^HL8HT$E%hzEq=Go{hM$Vwa4C?L`19JN5AZm+OnAq@Z9F;HZz)Y=nif7KZ5jUjz5;2Kgyr8=Dv%Y9uBaz-apQ32br9lLC>KPk=Fc7||X%}%Xze_w_w~pjYP0H0cABT-Q_MWQU z@PIm77eo2Eo+2l$P9;8%RRr-X?L^+0F^F5;FGTEkoXHb&Bee3Z>o_>q;z!k;uEplt z1MFCRb_Y3PTOm#i-5rtlR_f(drbJF>_?*ogOYsj>slL`J!TA%YQCR*Z!EHmm6VM0n zK_+FIZ=O``Wcjs&iM_@+*B1W$0lXR zgSu*I9w>8P$a#UIwh(tPeMor-qo|9gYRU9LOJ*(T)jTU^90_7s`3i`il|RN?-J)>( z=TME4^vLt@r42IC#TTW?TTe8p7i-3f6KLC**x4~LT>EqZdEx#IVDD%~4I*Y@X_p`> zYr=Uf_`8bu)zM$n_LGg&2HCfuPcMm5vZ$hd%=e}qg+~F~1b6D}-1*}51#5sjJzqW+ z*@4O5RK~W_=TBaK97T1%Zl#jifcTR|1~`(4f<#+8@ogPU{OCwDxkrUfO}TmlL*%8z z9>3;^FKW4y$vSiM&b?N}9%|cAu8M~!eKYz#z85>msb^V;zfA$soBIZzY@{X{KbS8% z5xk0NPj4)BEiycF9X}tuyjrnmLNRVJSk=>`T@w_u)2TLc^N4!NL8F{y;v{G3%%6MJ ztSR^tgf^KNB3 zODVZbxP6W~% zXe0SD9A(=1gj>bT7(T@EDCZe9tLiE~ZKfgdwxxq>taO3tTSJ-f30u;K%o&qO8Vse7 zE@pX@9c`Pu$3aZ|61|qj*moYFni&lsr%r_N-J?KpzX4@(&k_?fCShA$!|Agl)k(R) zEKs%OFt&S9CpLZQYh3S(CRS_x5Ig_=HqJ^v3Bu0s$f8~u^4MAv>gg#zEO^UaY)?}u zwm{C33|)STh?{>ObWu++4_|L^MgAXf|TW0&ne9F9qJ^xZY!|& zt#h!a!V{>WK_&Qm<1+E-!B66nlPS@8Kp(YUFpaqV1Ga`I$<^|!t8{`u92 zqkKM^9&`o4j++tzj}y^9%M#E>-xs22ng<@_b`RMZKMuV)a~5iJ#TakOx`WjF@zJhM z48J^Z6Y(5SA>MDx!p~Ye5X!TaQDtR6)OKe%D(5vG-+u71D5PZ-+OVV*{jxO^iMrE> zoJ#g3mMSbnUysd2zpviMy%KTPFzNAknPzta=iE9!gL1{ zN6Fp_k7cG7#^E# zGNJw5Hu`Sx(UKfVD)n~JFb?R$yaW3Spl(P9%McsX1$j=>GK1gj6>Ru1$S#yqL$!lY z#P-bxcDub~J<{Bz*O~SdVp)71?FP5* zoE7Ry+eW%XGJpF0svoI+<|2AJiXcmSW68Gq%|JTh0cdwjBOh*9Lv6fA_iOn1!s#pI zscx1x`3^{th2qg<>HIvR$K(qsh}ld&khUV-lP`n5Eu&E#{YFroUqVjW&85T*i>av{ zeAMi<_rnF{UOvLa7)^irY`QN+!d`lx(bPtyc;V*| zr^<2ZE=rfbu!GQ%>yeV>jyoJMlFYU3c->?EX{~ixyW@93#?S^MM`5)fWy^p4(|Zc@ zEDGA99V4Uv*FXI#8FSdVUA3_F?ks!5%nX)roTPuME!|o)j05`c#v0W^s2kG3GQFmmOQ zLG?n{lOyUW%XBQbo{)pGkL(~!Jin6xGYhM|5NW2K>e-D)YCR6)J)P%~hj&h*(&tVh zCMVU2TLv4^s+xw}vkM|?*8fKy=?B#Je)r zlqJRmq~dp88W9l@Ul6}V?x>ypK2*(C8Sn3I5zVtyN3WX-QR{i>h`m=e(h@#}FrPXZ zWj{zkhZ1wS$L8NQeA!UKy|wWfl6+YKZMB@t{gu|j*}7w14Zb3VGk0nXhvTz^eN5X| zXfRh&Pu*-Z^FLG<3vYzb_XwN_G&hjac3wx{TXD^JkUfw#vMcsLb#H)RlrB}c`4^~rt3y4zBt;o`D^b@<9l<^IGVtY~7nb1thnQpFOnRM~ zf!)>}1ScZigPp6Rz~+lnskG7au*3}*Hh;`7j$&w=;r$6_sPy?|pgp1$+!#{lblKV= zcVymh8aLW;c6UT`JQYZ`@ntWhJkyTJ7Yrt7=fAZo6)v7wR$1R?RQ$CGbDnxgowLq$ zkd3O1tfx&EJ1ZZ#OU98>{*JQnPc^pDi_zl8?9n%y>U&7wf&8}~Y4Ch?!IElien_Zf zuj29wN&mD+PNOgZ$#bGF9c4LkB=a)wC$ts~V|Z%46#97kY_V3v$Q2q zFirH8z4T)%*rdCK_ zJGmKI=N*Z3N`0j|CB8XN292 zeMW?GP7&k6Rmcq{hl$-ikN&_`!SCLDNW5M2bE8O179{2W%=5SR5*fVo(;4a2GOh4?)&7+0y zOiG2Xq|aAwLzkGh3)eX1eQi zmeJzMJb~8G(SLI){rSQIsnIOwac3m;)cU|GrhodE6nq@fj?ydS?N&@7+xX z*26epxrU~Jx*;7bLu^nNHnR(vOwJv-*#37WJ_&F3Ll&VpHu?a&tAN%fjR@qG0MC+ z0qcp0qh3pWrveNt$QYR;nCoC3H9usMc-v=7vG`8{F*b?bzqI`ux#H9dQQ#(RYM(t$ zy6HQL-%p9B)}4ta?E<~Asd9c~>+NXFVBs$Eqn^6hlAw)oLW z(}1WhS8|2Z06X+(;&xP21UhgdY=g>}k@h)?h#-}t<^$32ek$~>cU%93p+@ZY~)VRRH}6yS@6X~VQjd?y#jNKX{Z*mOP_ z{%yI26_euzxH#KjSIVlSrY;c5bxBvj|FwPpX+4aCp`~e{ZibHj1H=Y(O`v&>(lXn^ zS69`BI3j}Me@OSi`IPzX1LUsyM5aC6;;6y&PazggVi|RMni0Bn{g5c>wT5WJTglnO zPzQWSja4AY@-ngh={NGy(xucm+BP#ozB29U&QN`%BEPI=u}wZQWq%^NuYWg^wn~lI z7daJ;+!KsGOYTCxXyl1pC!`=ZF5!sUsYU36Ml1ZQ)dj@E^#rOkH(Jy*rw1`Q@*P*) z5{1wGJb-U|YJ$#@H$_)!9Y85P1AI;DDUt0p9*T~tLdl5l+~g;G#5-|3v2uq3s$(_= zbiZFg-_6lsxUX!8qvT|X_PG0^UwbzoXXHhqjn(%>ckAOhlWJo)``a&7RqXB&t+LZ) z`ZC`iYUSTOULagcpXqn2;9k)~x%Eza9)(qZO&erKj2k(BTD1JQgRX|8JssF@X~Av2 zZt*lzlYf2VYNK%ODZv>0zu0RVSzSnGqL_V&Wae@4dMD6{4G8SqWig^x$c^abj}4+XdsEqo!JF}=@sb?r8-0f4p@?E3KEAAy3S4GhKRc~5S9^AKl-D5p zAFq)<&3|y(iRCMay>TbMnhU?RT3}B!`T1F1Mx%mlxf!GWn-hm~ZAl%Pd{K@h_R6f6 zilKP;JY&LZ}1$b6lWI`0Rf% z1NW1)1r5vcxZ|Dvpv|;xI76yTJw31BhwZs*M%mOY!RjU%@{;QK^k(!}>hNAIp4F8J zJgp5%yyg^~sL~c=&OukOg3)s@1>Jm5v&RnmHDe9$#bpi5dCfTN?#a94r^Xk=N0}6A zRCykj{OTuWWGBm;IX#D*)u2H%~l5@x^s2ZC2ueJsl0QCliSv%%y`}1&=Q-L=qnZz zwjXg;TsCc@+lk8pQyuod^yq?DW>Va#l_-Eb}!J}Q>Yz=d7 zXZSy#=;oqf9I&h)y{!=HhIFtDu|Zw0X&xwZj|^8(d~G=rc*GL>_%i`(&3p;M#a_&q z!=L}1hYzvHZ1Xnc??0}9b79YbzrYGBR@CL4p1KAL_${W+#y!P$r9H-$#GJt_Rx48lsavsl>37)K zibibL#d=b2?Otp^*^qav_8IvgdIuJE<^*;4kU4cDshDcIx&xc>jKrpsvb?b7LNY`y zh%`B2#e1Xd!h5sz8RB8;gT3c2`%LzVtaYF0@c##vU689HR3 zoD+HIyVO;P{Mo@yQ9+6?*@4O$|{K~l^Ax7`hS;Dk z$fHBcoMw7dkm$4hmw{nFL*kmN`-mI?W{u(i?qk>8~r!rO9* zE^{qM_GPwM@kKu<6jgb=7Lhf&hVk`hRZHFWWb$Y7Z7_Ktd?^|JPaj#>2*gin&r)sS zC$Yo+R^sdD%cv6NWJ+<1F=nK1N!b|nQ&%zxigQ~LGyb`s`snX2{=IoVw&(C6>eI>< zSV7!kjL+v|3FL9g!Rj34niDRbM)1LdH%CCwDI0Nytg(2`?>$s2Vn|t@slg`tjT0~F zog+qt3S?FPa?{<1%Fyp|vnXrN0!p)P4|#FwQBrP29X$m#Zc80#G9>Ydyw<}_{cTef{C?!CGTwJTey2O$L48D zgGE5u&dS@1-sNwzJ1@|?Uifd0^o?GRWMiSnU$--g$&r+H6_rE(L~NEVTY35nOH}qn z_~p?ACT}`BzO`r=2c&7-AzKD@Lpr8RV}rWNXr7t0OxtxA1oxa{I7KD})a><%WLB4m zOt(v7e5!u<4zmXeVma3Op7P81hd3L495?G5#n~kM!t~#uO!yeg5)!+j3&@+%F@(=1 zl7hCGb-9VL%bMl?pe)lyfcDv;pV~3(nARAqqBD_L!o7)&?UM)T2wg71fVP3N?o7EDq5uRZB_|WU3od z9>of zJh$%{@!;7t(3>(q>~HWR-)c=GHcfeleww(I+{D`hELuimf4Zf?cHd%R8(seb>&JoM zoeRm{m%iYpj|&(#76Wdbx!~$bHaX_z6p%ChB3EwnRCMmw2J)s9mppo?pFXFjk64Ix zg39i5P1OyTaP-dU{yQ(T!YZj~&&v9WvknT({OP0Jl6+^@r=;Rf z)9+V?@%4q#sF2B#HcHhM4dZ~e-qV^?40S^~Sccf3F36Kf%lz87nBz|Pi|!UF0YUl` zB0cRhUfpEO#LO`jwepY$V!;bNh>496=%RQvJn1rztGR3Zh&Z_~Cu1zJA8!^_i;=k<-1P&$%yl0nR** z$-h3G|BABc_?qmvZdV;M*VeGVjH#zjomZ6}vM;ZSz%$v;N}QQ`T6qvJ8pZ+jG`XxQ zg}NafEJJKiS0v3d>RYno5o?Uo^`=FHP5p>99U}pf&IjW<-IyHdtY$cKMhC=V)2qen zdl`f}KXb=jH-(DkOgY808&D>EHfR} zZ~cZuTCjjzSrPi}QWAlZERdv}h#JobN0XzK>9HhF^q_GDs$$oM=6<|@&)Gc%UHZ2l z)wyd$BpfW_&VwOnyGNd$Iz5?4yb=(lG#B-gorU>4T{I zWp$L(mPx2K-A7m6mPa*i9^l%m-ZGqBwUWD{^gZH#R1tj>eU|%O6o)=q=w0*Ar&An9 zB961=Mu9eggA;dgnCrXI(E4l7pp{vI~}*2Yp@dGJ@H->_O#`Ys|Xp zjyue}DOrba9jtHh+M(6L=pS`{$I{hCMbnN6qL2PJ-n%fNx}f`=7XN~mWUt~TOwvDH zd(okwwBfA1)RatCv9=^fn%{b}Xcz~y_0d>|0;rpzqstH*)CGCI(lYOoDS}e*AX_j) z70tBH=X#8fMdu`EGjTQPlVq)?*AEevk#5A^yN~eMxv}Kxg`LFSRl8Bt*M{7B!Ik_`xSRCa zIu7mY8X~@bJ1knYK#`Dr#^N53Dkrj4*(CR5AF|2&AEFsE1yym0NXNz(sP=3?H`6p-h`%Gfi>RViW>50Nu^daCup%<8XdhwHHese`?&7~W3 z|MXITd5vP8^Ma3A!u_wluz}mivBBJr#~tQgl#KT_TzF#c)Y)S3prvV+w&3uEP$oUx_Nk z&OGn5yBMnJPNnrNiA{Z#rpiSxv`kRo=PRZP;;{2iU{#HeeeQ zPdy8Jg^9YId0l~as8y9*-u_oY4EfTIr8~>>s2&aSWo8r67ubt7b*|uc@3AE2${V9S z56nf*!LdyHBezdC|I*QH5!rdZ^6oUZ;>_bm1t#roqVcDjkgF|=Q=v_VA3ftbr=7`r znH*w+##oz;*)^q7?NU|P%9o~F>WXW2F7y63_BJ077A(=;U-@R-1tu1ruV2B$Ua6b? zrM>K)9t5xgWGq%B}k6K1JlTZ-pnu4}B(;DIHShoKJzz*YL3?6X+^ z>C?oF*M`^%pR~DWK5rH1IRoLD`kZ5Vi$?dNDnNM8(;d zndMq)u!=`EN2wDx!d`+YZ3K4j=@2Pr&<9eF@1n5Fc9;SYjqNboi|+e1Ik(k2lv?he zLT)KsNY08%2Zd@ZApO3cXs*d8rf8f%X8i8Q4b4;t?O7tm2A9ibn|+ALtSRa4sakYa zr{vB(iuGb}Z_Sj8a`;U1k>{Azru=l0TOZAQ$9K)-OS!j@Q3v=x#m|E_#vYxl4E@jSD`lBXdzv1w(RvsMr1>;3 zXajXaI#`C-psqBU2g-EyKg>-El*1czrjYe12f+-Gk49X$#P|ZMjb`#*h-JFA1DKz# zLbCkY5joCGZY6hMM0;w503z7#m#Egu0rYg{5VL68Ox!q|X;0n!97yRgbHV6}H{`7~ zH!15|-QPIdFUj=_i@ud4yUxS4%wV)#P_EAPMpQzimYrx*rhv?d^zp0#6-zew6A+*t7iaP5V zht)-7Q~Sd|QJ%n@{1D-nH>YR}uzJxzURkL}8JK?~YgY*n|2Jc~1HT6G;}^Q|(0=-E zuyCUq_cI|(uA`9(3Ro?Gdp&VX^_5tkV&6Vi)w{O9nkU?+oS_uM5qWQ}S#!nmUmc83 zU#HGA->Ifu+S9waQn^52dPmWPBkUdex1K(>>MSb9v-x(XrZ73u5%Y2;jt2b>vG#5g zv(yfMaS8q+Ih$^^OkL414(P+r((A0DZb%2q5F6B`Li0or+5C?RE*wF}V-DwH!uYgis)YjNfmjk2CQ_;2#i=(ftQZbze_)75ZaBt|M0QrvPJ*YNZSgk`ym# zpBU*c2QPYOgXCLU;v3nE#G3k1)QKa$)b1mVn0UF4xaqTn`1eh9a^TeI+_%#v)8Ele z>U$VT6cAhwoq~Dj=)^Q`v#v71?o%ToghHmC_9)~Qau$>d+g;C8+fT2z zEZiF6RA{Qqz0Wnl%g&A*@7*x7rsCInEym6PZokcAwd`%Y9qB%((u9e234L6TY5&~= zb$e~TMOC~mU*WK%J>Af(!Ngv>Q;vm4Q=6-d$rNGoJ4@zosB%-$Fb-(%HLd1_P&cH5 zWrz*xf;?N|=zVPm`?xDyOmMTO1jSWQr0#c@kYm1>GWDzHj1p5%Ar@Z;d9nR?SJJhh z7wHfvb4?plMzp6priY_?ur7rC>CwH!>A?|1%19`ztsLIST^67jbdaOJdt=9cW9u6}B z-VKdta%&2i95I_HJ9h%KTqQ^<-xDkCmcb`YEh6|0Pr$Qh2g#(%Vi0%i81Z`=o5<;i zMh_XO(s8~E&ygz1Q7d1BA#4v~X=)%)8Gn;##z&Km{$s$qOA(0swxOE60r}ir;p34o zrw$z2>*_tN{G(SKkSA{mmhrtM=Ep-PI6E5eVxN=l6_GPX`V{P#QO@1GnW?A2AkyaZ zgqdZxzG(67j5g-Q6y*tWb}OVoneh2EQCQIHbYNCyCzjG=T5CZMeqs{oL81rHVW}$m~UgO`DJo+XCWw!^q>ruhluN{i>RL6z1aQA>EgcEI%4CDht#Yw7`FXX z8f9;xBwnC&gW}ohV1Y}|P;qL@#9kL2F}L+a)KNJctCHP-y=ZxdJ*!Bk>e9bcyufrZ z&*d&i+*b_92Q$UK-l5`6XDq2hCUI2#+_Ajshc@D7&LOd#cNTd&)xpH<-FdWjNg>tt z@H#b6C`XlVG9bTAiVe^VEDh&> z+taBlgaxXr^!U&1CG}L(b1&1L_HB7krmT$!0uvrov2`UroiWr{HjD$>`VCD3bwfH> zhS;F4*9RX|+0ruWSMs>?)equ_0&Y@=4=7NZ4;YbS)3cbkV>7rV3GzTJ{clvoE;6Hu z3ObMIY|RobbAHIwd-#9&wS6_lp8ZRs1KFF|$_`)}qGv^zofeVrQ) z&UnC(GMZyIg^*uL;&1J>3H?1k(B-0;sPuFJsyZtl_r3a{Mm`V)p>=w|k2?yLj(vh$ z+Bb_(ntC18nUe-Q-(+xSb+;S(#I_K2GPR<~=4z;K^)lqi`5VH3y&UwV*q;-|j^Yli zS;_`2aa^`z8uMMR+o6?jm405h{1$z-iOgVech+C$OG9DRS2F4Q+AK!aQ%l})hxHwj zxwZ{0Ru=tdM%xtc(dKs*uQGaf=a}GP)qiKx{k>mZ@NtDEe{H|SrxRyM`Z6y5-wVX; zr=8LUGg#zF$-Xvy^r7`I4ruGY;Clho4e4MRVuQLM&zO~(e1kcug5-=J>~7K+WldO# z{CVzzwwye{e4~zvl6nfUJRR~xHGcb}&Wgr0!CDSNAD@x^)8Iu7T;UNNyyv4TGHDAy zFVMCz`0T^@^q5LJIbf4QQcq<`*(!M~c$@}iH*}f&nP5##X(=NWgEUsyMy@jbhdi(N z0O={xBrAejNPO;5>VZx*NPTNbSdQx;<3|a=%dPU_=E5R!oF<+#!K@aWKiVB#9Z?Jd z_spQ;LiQ0ywT#jJJ=*BcranAs?q(4y>pgPVNOE835*-iob@MI=oi>wIQ)a#@vD>Za za_;Y}YMokn(Ys|M$MB#133aqAZDQ)_qQLj&&aPK1w)ECGCi&^*^_?aKM?zEot=Dg! zRu>qHk2-xhD7mlA(nm6X`eKV)q4D2%!DxrC!T`P1Og;Qr+*~w_1NxBHIlB<*hIFtD zu|Zvs2g+QgB=U_t^Ol1yBS zpl4R{;;_ROe%PHCe%LndG}M@5i;Y>k6|0c`4JO<;1E$La0q@2tED3oGn&v7IU#7{B zOP$^$3m(oSS^R?_^rrKRLxs=1Bxg5`7L)6D19 zwpdu3j(0ZPx!B0#4t+1)@0FUvwB64Pi+4Al7TnpJ#I&cHH}^2H*E?xa;s0@U{kq+Do*n z-*cb&>htn@{^@m^bMARN=Q;PDd+vSi9hi_NiaM3dY27B@vpu%DtEd+TlCRn0k%x36 zK13sINEedX&9rGh`Ml`udP+JiA_3mjy9H-WafC){&TMUP#{bSSLs+gf1mc4iub^gH zxq?R-8>Q_h0{e`oss62`imei~+PoMl7e9wn8QYxr9>?YxH-Dv4*_#P)SMr#eaEzuU zO_wNO5r8|-J575mKR~;_o=7je8Vz$xP3Q-k*VCDcCezpOb6~jtVES0oPg+g1045H7 zK{auV@PfIOFc5dg51o8O>HgS24=_}ceH^|8nh4rJ%D@(SZLW$;JUyB^{{9B#VYvfe z$IYQ#e{f~{MHl^8H!yG|6llT5rsJNao+HQp9!g5%j%Uv#8BxN?|{fj%+wsdCMjHDn|PFDQF z+S@C?SUMkw8k!Swl5&Pfx7%hWB0lsK&CIg7yXKH<>?lL14td2hA8Zw)5!o6u3 zV;il|RyIx_sa_!3o-Y7@(;pF@cH^kaE_tNW^PSlErzfc0uU)8fTP9KOmRj;h9IYfz z`m0g<2dyOcKbg*TZc`)e&GIST`Bg+1bMM&HglMAi{9(-CqczyLFozt%?02$XC4hDZ z!!dv69`BIhy402CO;oYFI@LbwF|jCU4lp0)K}CkDFlVtacc3ciX1{-?Lai)nV7^N( zBKNy$g0Kzyh@fec$)qqTv1w@p&(%DTdt3P?k%b>%-^VlQ%v~f0^Tmgc6qm(5n{Pd* zW~FNvwn*H-ob@_*ao>9HBF!Y{60v+dogJcMGxJidjVM7|s2RV+yz9(i(cV0-e?ASa zs>uCucwHHYl&^{JnlATg2;Pu8cz{qD7u+dYtNOKcLNB=&h?Gv+?Zei=-?m)Q&Y+cl)ZW=uEtdsyFk@9i0h&iH-#9TEW}>j?jJFExK&eoRy*{I7=J{|WP*xVNFuLsjn6!g_fP|4-WLT-oJnkPm+btX*&Usg90r(VSp^_M!ql+rN%UklrcbMFAi`RT>oog}W2R>}9Se4w( zH7)Dw(*vf7&X=D5&!;ObY#KJ#*|ldvPEi&P~b6MY<6mq7gQv3(2fz+Dx%%D%Vwi z&ozkjqc%J_iCHF?VdmHNv+FX%5P9BDtS2nJn8IR4K6v= zgWD?;p!S-4;_5SPq9J!OJZxwVOMQle7?qXatR}PmGk73;axMl=^4ZD%bnUj;$l6Pi z3EFiad)r%Z_6mk+IbG(R@|Yv3x)9DYvhc-B56O z9kBavE;H>;Mee6@I>PU(x26wVzG6RU>L|b*P26}c{_=02lv3sU1`(D<4`oMimK7D(WlWeY|7mPk) zMSpz}jVIMc(EV=xrbC=(Qwam>%?4X-9(R=wm7HzwIRdX}nwPVV3P#GPHu6>IklF3`A=`81iU-2 zf=Ey}N*$`xpfSca{+AB1dB*1U{n#wtS1S0ED#Jlfi;5$n_ za4%*hml~jmzdmvXcJCWaIe)9BRF)S)EA3_Yjt#FVlOH@hbxk~-Kl>eZt9d7FQj!k5 z8#dxM7Tw1?XWCKm6+RRdsU~>%x)R^}+k!eV_$w4WJVg)RTZ@lwJp||1D1q&bH_Z~4 z&c($cw)iZs7w{z<2S1(hfElsCTNit{Bm|N`p<}#UT--`qB zJggn;fOI21L?diS*Dgk871L(x@>9G;Zylrq9({q0=^Qv!=@T}w@FVL}pB(x6DZ=v8 z(Sxa-PX)8LFT@ObqNOUE3;N_p?PJS;w+0{lwVnXS`1ZgW#x@b^Z`i-jZh!<_F7T(O ztegk%)uz4by`_};*L=@oI9jvqZ>8TcM(W%Tm_0|eE?B{%vlsb>DBGCR8?- z?cmA#UPq-C>%t4)?Tl|ccGEU8$o{)ILzLzBK?nH&Jr6A#^7Ahm3ip57#;(h}m&)r% z7SX)C@KyFs_mnPId})=(X>>xALoW^_yYms2hjb%8L?diS7m^uWZ77tyIU#y9tdVQ} z-jzDWoc;OeTm<=iR}1Tlv}f{t_y~)c^>)&xT#MRvF^TtC`7}?%Sza$eZK5aU%`!~; z#9*R)rYE@5Wle=JwyEqu$sS zQ#Gwm!Q=~N)P_0*dh+S@6#wc9aPj6W>PgcqI<#dHNPWzuT;}`*beSGY&FP@?Z>&<@CloPwkiM=Ug1JTiS0> zNtsLTJnP*}pG930iz&mUApb$GcI$yBUSh%WX7wgl!mJg-qx5k&393J`&a{78LnPa(F?$gP()%m?R zdU=p;#D{2v4e45YJ()9Rm$7iJ-YL<{g3sKN_%_N%{Rin5G84?#m7hmkxojcZ_aZDE zTFSI}m^F5G>u~8dY@AqaSl=9pzvwN$!)ydNcm4$tyGn_UU~KbHXE59M24+vheqf5! zlOfu`IKc=0IiL!3_H>imE>9!$H*lG|a36yv>M5~gzYu zz+0XV1nE7bMR5)A*sTpco0fn^*PCW_#V1Ll{@Pe{aD&uBQjO(!2Jol+Iw5`iR3B3} z&f^czy+iD~XpZTBtY&?>QKV=yN~1_@UQ%49YP`QNY{*L29Xn=KsKj=2Gsg7od$r0; zL}_V{S)aP9|FrI(QET(p{*vnz9VceiJ5}%;zKmThIBzjrj%*ZADb#!^tYSa;x3cW zjxQx~BQHo+=jXFy$&)u%*nJKN%Y)({n3jDwk-4XqTb^bsK0mjwPnX^&r71H`W6tZp z@;&OxFg?aLj|#Ndct4->i)wzL!oOT`f=Yk+meRYf3-2&@g7}@O#P+_MLAMvrqUlr3 zD=Ey`JK0+Jl*5SVq4}L=U4qehFc}}>{qQ+XR3Z8lXFtVo39|E+XV0wd7BMx4&-%huW&!3ynD2Z)7XCLs zefPd1x3$esnEOOtM;iIsjonipV&s$;=;=_t&Mk!#V3p>E+PdL$IlmVNqVH%p=nnn(s=Nwa-SEvD0e zz2Rt{c2&3};e&jQ9sQ0T=Ehm@R1Q7Vl1IRI$vI}6(%%?6cgN=c}v43F+0Fo%z-xTg7D zsMVH$%}+Q%s$&|^MInRy`^O$u{*HpHKN0ZfPZd(skpa|ga-inOUvS#k{ZyA;7T7au z85!s@3hu~Oz~AnEhV3lWw{)D6Lb+>QBp>AWhbN5}6C*#YC%3P2!Nv_U#9pna!2(lM zd6$O?z>-t0tPSEvKKD3&Af*uMq`svpvODmnr^4}b*&Fcz z=bFjebDeQz-B`R;;*49*t_Cv;&cQb$v;_H0R?vOnL3nNbD>7o66}64`fztas0e|N? z89$xXio#9{iCNzdTud-EoYA7L5yvlU6{}MR2cRXblF7! z&aDm5wn|pLKri0 zInH_GL^YP=LFSx&%J}<1YQUZ6lz!)ZxRN=;|ES}1Tr&0tiA}B`vn21J)6WQ8%V9p$ zy_X6NMh&K`7dzwc`vcsmXb6}We$&ij^F|nnDd2pE8tC##4JzIwFe8Z=yH%NooiM&7 zHOrgJ8}D33>|GMajvEow_PnrqBWxyf?s43fqqa|euW)s3*e9td@8Xuf&gru*Gj~?p zS=T~&-up-L!Ma$YVK0R3&J*2BayBKWi~hPD`Zt~?oiE6by=o|2?JVzmT~5pM-lZWb zc}FLVb=v5j!uh4+B0@fs6>B>5;z0DZ7Lz=r8}T6;VMDq?8JT-EhQdy>PSIYQZf^AB zYFJ&X4o8gUgVlQX*uTeOjQoBXgvGRNFuw0;meg4zm%nxRG0B$x;eC9XY2YZ;e6t4R z?kNHt&syQ(oVh$pja+tY@%nHT?DWi`!nc)4&-gong@lM2$J7MdwjO|5r8JDao&vc6 zrl91SA$4K@PjGmSI=;C(hTl)K6?kvAg|Yi%v8NAy^S`a7Ftu+oFyz8Pa@NWs;52?A z9Fs5xK1s*`fiDBerUD728F2w#(pW^TJD?(d!84>>p4no*`O5T+>p@T=-jFT+2w?fF zFu2O6oSJd>J}e474PHK1r;Hr$VvBH+cs*1H<0z`~?Q>Mv{BBjVPNw8?KT@6fzW&s! zmO@M2{uP79+!33Yi;0$RiGAYq?hNfRIx~ii)9HhET7NljUld5)bKNuex8=Z4ed*T? z|Lwy+TANq!*}+(7+N{loh(Qt0NHT9mc&5_7Xc03(AWjI!{cna}- zRRvaNk^}y}yNa#%v?OCY4gocl1K|AfnP7(ZM4}<@E~dCa6Lgx51>@r_h@7w{OwY3v zbc_=anOAROE9$jKO_kGx$q;99in106eJ~S9jLv}HgNG79SK6ere=P#hYihyps^geq z&}A&vWg6MlGXspXI}M)w*~!24qS4H-xQPFu`U&>IX#kkwfMGVnYj}12CrN6qgz@$y z?BlK2GmpF0*GJs-TD}jT+ohZHwmer{|D~ub@!NcBhZ8GZvob;|eCoQnp11m*yTYfF zoV9qPxwa>T5QIDOy79Y0J#>t}GS}-~ZqFNK{5e?)eH`VeKaQKK!KRCTz{Y z$66s*@lJwM^odl?gnROOuRVP$zZVCx^^$vkbC7PthiHTi>55@wZZmDFQp-f`BfoMt z-`4_h2{;BvdGiQcd2Mj(eEA)G2#b%AFR;5B&fRonN5zHdPU5j$eQSfOZkq9j3_a(r zwEi}K(_&j-%Gl=hO!+>1>xs?e(=a{k;Vea}NpA|R`!ki&E1d`p-c6;CKfgigOl)K7 z5K?e4aw65iYo+ex-DdXD1F#}|C6%$-lU|UX2_|gZMP*AKf?*tehz0F}cL!XfR;dW7 zprxsFjNml5+`blM<@wSFv)t%A&i!f46@FCEnMn9;PYX4+DVE-^Y)j!ax6B5W@~FqT zs`QGbB-K74g&OJi49jSUCTIIZ@cxw4OSYPv#bzpvAV`ltY+v>3i(XD`P=R>5$xV*1 zdA_wO;lO!p6I5Y8{0Y|D+&Ay-w-6fCJ^rw++G#4CZ<5YOZ9%L#WBwKY-!N9QX@S1W0 za2&5jy&Ss;s_hsL=}L9jf2tFy9bN{EF0O%sFa4ln-9=*Nf*7E5R0>s8eTekOaz zdHy2k(&kGXV9t6SX`u(obJl`-_et=K;wfq6j!Qfa<}VSf+QU=zh~T}u63DgvHdg$w zM1HU0Zdx~I@Y8(pq)WwRn=UM{UQn>i?Q&#D#pB;y+_T&IuAi>3OLT6kmHSliT*b!s zVV#XK&=F1wT4ZjWcR=K=^WR)XaJeGaZM3d%fr`9_KU*ZP;nzQQ%u65+IH`!3v+0uM zKHaeHW_~XYWb3-yj(JEo;zKmThIApB2~3+)haME|y84w{YCIXf9Fi(kra3&f*YdMo zQy0q5F+*5RcujyE%L2IyYH1Zi%bmr?)%(Wjxc8>~DeGsruNl|CADHb8a~azx&AH9? z&!1dOs5Fr}9BjUxyx}$vFIOChhu-w1w7@<*&+q`%KTt}oX*@*5ZP`S97mdI>1JA+G z%M!|JP#U~f`yD=fpha7@ABM!ZJhEbR6V=kQjw+5`2qU|@$ugxTT(Y%;RQi)guD_Iv z&k0X~uZC*lOLs;A)1F9bnwJT_yy`e<)pZlQ+23cF=jdcSrkI1tTgEb_KgmII1d9HSXs6 z2I(F}Htl`VJ65>{zYk&Gb?nW!;**)5i>Aj75q@m;U`~3e<Ddk)??>%~$EbPUBnB;yIdlPGd_jkDL-mpl><{8DqdY~{m0sP&U11)9&eD)L*7(e?##wt@%eD^tHp32kxl6& z9s})xJ=F97JG^n{7JP9+0o7%BnoOEsg<<~r)U^jP{I%T)|Q5Um2XMfzZ9U=FPAz6kKjD$*z4mN@s&5qldwoK%<|$@aae zXK&@H?7YC6_olck#97t8G5u`$UlSmmvS+De9q;>KWYgY9h01ZIEZo@nsk+%K>oVsM zJA;6st_yA}WX_+{DUP`P-#5j9gT%Rh=M9AIPj<6Dy`nDfd-wY+D{NloTAtKw#&aIF zkDZ_H+_c7_7YEX%?z^lI=|+5rM%a)pBon~2c{r$3to(DOMA6U_+;8^+nfDvvE%(9f zek7%JgW0(Z!eWu14G*4J3|_n)BRyn1MmoH~tWV#&|3)%dc{m&Dc?_fS7Vcm^@OQ-M zqi+A(?bTpb>N`Tmw6N5k&bN8hi|lS*{x|n=`%{ryQ$1K% zULfy#pL~$l2ET85nR~%A-$^nxjgw#^uMNt+-pKF8fqdva=VdO^jrb6aupwPY<{s0g zlFv+0>(OqmO1l;CoUjgaX`RTse|{h9uVW)`vip$`7QwKUfaERXE+|=1p%CpXt|{z$ zhUCV67W_jRs$ADGCRq0l4^YC`X0(<3PFrsEC9HYaAL$sCE7bo2>DfhJhEF&5lN4PwF z=7_tDi5#9E#GL6n-IM2C!#>@b3f?oeSvx{Lp5BfR0#5U- z$UA@Hz?Z^A=nc;SI<}i!JnB5`cr_Ys95xV|CApEl!gs)`*$@6rQHA5T&mgwl*#KUt zSHs0k)l%6N9BAu6GGNnWvZt&+rS;hwR&OxtrUa zc+_bkp2}(Wmd9z&{HFX~9LR@LcHYfJx)C3u5jLa?$)GleGT#(?{`$-va(5g|8G9C! zs;%V(Q&DW3CZCqy?S-)HeiQ`XonOX18BtJih0okuk&)G>=75d2;4giAge$yuf_HfQ z0(j-yT(04XJM5VIHev;JK|6!GKcJLyS)eE5Z&^!Eby|rJ`SFlWP2NuLJvNK^=e3KN`r$Y`o^temSR0)!!D5?B%5-%1 z+P-aAC*n7IH12 z+W1G5_oH`I_h*b6lx1M2(JrSwr37_(0#D_2ke7X5aP4 zOeiT-q0X=E!2Iwbuv%h6TD`8Ku*bXL47U+jaI9n% zne*I%oD=r2h?lJNT&$M+LmSz&_Zd6e$c_5n zcj6vLEs8D_RXbdC8{;-@@P6}})rF$|f!hD_UPsNLwr=IRLUA07L-KRp$@et8KdX|L zvV!ZnFEfSneovbl`t9$gv-Z6>5Iy;{N*>aU_z;b-Azf=3nX0q8!sJN}u2bJW5DP}= z(bxX&foTa*uzIf-nqNLmk?64A$Juw`T-JTUIiIB+HV2KiB91Ov}afSZP;Vw>MKm=(HA$8@LJ zfsMmD!2lCua42qyWSe_G@?t!|eV9Vycahk3+N&2-mlfLN+9bA`~ADAKsarPd_3*kBp*-Rq>6bLq@$d2VG76gi+q0inp2nGiv!tnP@!TT(vA2K zjj$nINXC_EGyKXyky>XL*ShHo$muo);&uJW*b;diNyAFMZxCTQu51BaZGIEGHI7yU z;Y-D9{@*=Nyc?$cV_qK8;J`%e%!NVl3}c(;R6q7zPpD(=fSH*DTXxI?E%ht#BIaJx z@#;$`{Tf9VzxHe zB1kEcsnpr{UNjH}UTq(K%kZK|vgXjgI32}&tEm69qilJY`9LIZu&IOf&!WSwd8HE~ z+``@^aUwHjvUApgz^nPaI1q30481(08}T6;VMDs+GcrS%HqV@^5sl3_LdaJ5;8O#$ zK+POy_>1|54Dt5!)Me)~2+KU>a6DtVIx#R02X6vDR@h0R`ow9=VlD{fbpzEa>AW=> zGw^=5u8TX*`>^dSbQysO+8u~zl~Qc-q9QOsqX~Qc)0+Gdl>mlZKLEBon+uln=MZg= zpI|ndw1MM8KQMy1uX6LS``G?-G`Kp{n;3Pj4YLc=BL(;J34d2NvV}QQbgt=b`}csHFhpPv61cGNaKJc{Q+`*ZKxO%fC94zoGZ>07!vl4*Hji`1gBbpHj` zuYLr(uKf~HA!Oz<26y|;WnTFtIrEz3^S>o<$3RdGxhnsIFMiVX79~Kx)C3u z5jLa?$((1}e0?=q^ply(?ENzooDLOWl0OT04G-jTTK`C18$?)6zn%det8V9pm@cSj zn&~87cfapkCa2q+U$cHRSFxgzFWJEb&nbVdPpG`!8!wH-&kLSW8*FIYwmV5sBCEi= zr|Qd^0%8P1cOMj3ZCED=6B^P^;|2?E+4u_DO*`;MQDx-G_2+SE^+iEGd;u1h)Z(P` zLwZdXpLVm>mo7QK(sqXG$f)kP^!P4_acon#co7{4jnXd^F z+?t}`;pg}l8c*9`$|@?t(}a_XxbBxV~1FzW|>&vO|2KKShQGWq;%v@FZo zPp87(enEa&@wpuH`oJ072`1hD;#6_mVB49Gz1$vte#zDxw9CG+djvbS4z_>5Oy+;B zn#vQeAIXlF?FU8`_u@c4{LSzn-G~p-2piIcWEL`QjvO+R`+M5}u=ka&;Mmk7xW8Z_ ztUam4j;B(;R(39fu&8*i5Ty8zCg=A*hxr}Sk-7v->ysndE9a0U?ag3(qABiB#~1uz zY;)wPd`~H^c^q3+JPPyKUU)nr}L4RVLleQ@P!Klm_y8}Im{XyV`( z10wgJBbMvE6stIN33Jj)fsZ$z!X8|0#DsBCWL)t*ZcUUcpUg@pzVjuZ`gR<6eD6Ln z=4?LZz92(-vVs6T1~0M04*>W)Xdv@(cOj;bw-H*0bzvJ$-!{t|??OJYGzS+Ao)YHf z+F<;MS(wK4{$RQJV8Wu_7|WPHhkLsukoYulJUcF$E4k&KXf2l%1{IfWBNA;^G54V* zeL2qEfqmtE^XfaEg7t~cN9W1sG8^&!Hf1BS9ky#53XN@+n9p1pC5pcCU%j_u?rj^+ zHa%f(g?uiv^{Kq(P!w91n|^bZ(`UO>&X&vaIQ=^IT7EAMvEB9#D{2v4e3HM z30w4pZSDI-oh4tn1NBmfxUbrvQC|R-$4l6JRj)-po+2!V2294Tw7kU{5^qQXZ;s~r zhxeVI@&a>-$5Wn3$5rj)PuDBs&1P()9=wa~d*ja9;z8+i=-}n&@LASU!SYeh@Klu< zvPAEbf|Rod1yA=(5a{kqq>IKF3!I*h60C{)gcFk%(sSV_Jbl{*f#dG|^qWCyg6Y4E zWk;70O6 zk$eqVdWU0UoQq-e#l>Y@Xk6rlUFSGH8_k`#FaS&cI{aT>ifkMG33nY_;LpAbCer8D z3g4THG!FQSI%Xy3@OGUN{XF+D5%HnNL`|H#(b`z(SSMeXSuaRn>wuTMt@E1a9pP;7 zOW_>57tM~BZI8a&_u@dZ5$~})q#N-e8ev1akW4hwrltPKvXj*TSnU#hL0s-WT>Ifo z=mO>U%XD@OWye#5C2>!v;7fuxrQ4puZ)*uJA5}s08BZ4)e<#0k&cU=aSKKGkS@7hn z1$iztk9{`|8BlP`qY9qF>@Uzd)kdJ3e+&O#m@L@4usnR?Cc-PDG zxJu)8ygfFP%4-b3r?l|}S-+oAi>?RZt#dr-3&l#b?dA-+j7Q_q7FqbaB~WlA=_I8+ zU76b3Y9z?^7$I2Sj^hWMo$%KRr|2uA9^<3J+VI`eG6@I5FiSQe9WOr>i(A#6#+N6o zg@>L*L4zr#umCS72StQS=a28;e>r)Q=(05+k-fI$>*rju8BG{qHsuy?=2_?OTE_8u zRU@8eqyjFq_1%v&Nb8_mRXvBDpDvj-Y((bX)x{C#+g$H_Twp%oP9%TQ$UFb~-djy6 z1zQ&y2^WV(vwiQpp(U(Ob-Fzrh86taw5~|uM3#SI`(D%Q)%m?R5GGTG2kAz9h(_3u zF4L7M9EYVw!ddN+{3sh0ppkVNryDoH8wVO;zY98S-+LFA??*ydR-SIbE!zgb+X{Sg zp#Bj4UF*JgdyQTkN5+1Nh2eh}Kw6^^-_6+OV5d17r<3s)l=X*QaL9~R^o7);vSr2Z z>6cxD1m0Wc$?nY@D=TO+l+80(j_;mym{v%5O=oL;q17hO#jRIgp%3S!${t#U;fEHz zqw|hG#7(Zp;`FrAt&t0_4uYd?8{zWO7PHZ!sp zyos71JNtT-%t_q}3!A{=Fuzd__6FooPe0oWaHH>m_ta-g=?*rd%i5c)1VH{3K>BzkBu zHc4(R1qB)`vY!5QTG?&gWOgn?FLP$+En$Rm@!bz+ohSZG=ICp}GQ^+!zM`lX2cl1j zl#7vW#D{2v4e3f{WF|0ehR4*3)bMU@L|7TVJ?0bn>eK;XPy*Qax;k9G)`qYQ?q7zl zrQAXMtu5pTMP;d|_+X#D_o&WH;2S+0j7n<5#uuN$n|tPSJr_#Y+F+2#7VKD#DT)E6`5kZu%=ZYGTvy(Pn>uN`)1mzg>2gUx36StgAaUD*}nIg z*XhE5^$whSw~{zZ2F9~-%1=u!?!|$0)iOLtH{wGy!iIEhV`Na9-%rlq$N3z>X4YRJ zhHJlot)=N$Li}NN96T|WV#ia2#r2aCzHNOux#OB2A>M3-xvf9b$EU6xXZXVhz9v=} zB*To8u5bopn@~gfIopdqYD43ed(iK}4mcr9O)#9d7LUvDr9;mf3Od6)@BzoQ@QTHs zsSy*hU`1m$zOpABKX~9X)%}r=4^Xiel-mK?!+R`lBvGUT9fs3_Ma$^WX=%7PY#9C) zUnkIAy@7hHJd-+dMOSd`kfJ~#b07Zmd;_$$-$>uPas%fM?=R5s)r6ga^DQ`sUc;cS z9NclnVSM$AmC)pP3i#;lgsraa0K2mWf$tx(_>aoQ0_C1{?0fBCIM*hkC`nov#T=ie z^r3Lg2S{7^lLw|1?A4x`V2$8Q!u#G8`5Jr@0o zu#`4mz`Zuqfm7W}fq8JEBr;|g8-wU~^lT9>gO5Gm1Gj)AaO!n7zJ;;Pww9r6|F!3l z1%09CE4<#kpI+U%US?PuO>azB6_Bc-vfXYzvLhKrvO~ZQztDb}o-fvv{knCTF1lU> zH;l`qfAeEyAH&LF!-^6*D&QhM>xnOZ;jV(xRs@xYpV0Tl*3pnVj`m;sfEqdC zKIqX{1Fr0-Bwwzm#EQD6V82zOrReWEyfQe?i7OOu{iw=)6lZ0(4Ca;D%2Fknr<8%m z=a_iJ)BA+IIp#dWN$%4N9|qgC9ePp}Hqk^l>*1g5!q(ps_eb%^dU+GagZu~Hmcrjd z3)whbo!-dCX-EX-aH(iC$F;u?*ZWT;+xPb5d@1V1f#lci#T<}s#D{2v4e3HMiDN8< zD>Zr~f|JUi=v@=7FnTl{H7t)Ja--OH^xQUO4J3oG%r|qCdH9b9ufnfO=RTdnQ`s|y zi+<~UwtL;e?6a;?uJP9>m##nbbH+C5f_%1}LY?i{mkShp`)VfT5VD#4HE|N<5<$U) z^{?SFJ0tpU=R?XvaVc1IT1?&CGy>nddMIvJmkGw2A0rPldo;6qzJRgLNz}W7YIuU# z`}wq|5o&j8z`g2%xWyg) zZQrx?a~3BTiHBZc_O56&+pN7)@1~h&suW7VS?Jol?E_Z|-x9+2XoL;vTFl6xHkbWO6V)y`LgXc9gUY}FEMa3PIp$g#o7;Tc zC$A$REH85@Fe76kN$5t1b#fb|n&PBBKKFqek4}5aTLJDEn>%?PrhUFCmg7dPShM(!FiZs_xUhX`L) zm9S^Mrmud_#_8o-X+`C#OI=%}{lz`sTG+fPuI!$DFAgMo;AUD8(vA2Kjj$nINM^;W zA;RJjwW5vz1;iQ;4XC%Sff{I}fUR3~oQYUdJ?z6hYunG^ zMe!%~g|qJg^T^vCl8)E^eJB2|liA;EIz%{lmpo1lZph=5bV?}FUU`foSd+|IxbaUZ zYM1X{o8OBA(PtSRq#N-e8ev1akW3@fW_Y8AG8JIc808VOm2X(3E;L)P7(0>1WvfAwx6!+AC?F&wUU`Lvq@qH@Ln!XyYTeTB@ zeR+|*_OTe0FZ>Dj>{O*L-x&#|%$){L|7uVQls2{3S_G9!t^tnZGvvoalGB1-5+xlX zIB;n+bX0r|K2J;qwSLM}{`f<%@yuhe{-ra~|NKoe?HRYg?Dj`Mblw?$nz5IlUdDs> zoIw6Xkr~k56Ty2rbr;wA<4(+|X9eqH4lOU;`XM~aS}`EQR|!_}h!4>S8`6bjHZW}l-rpx$ z{HTlTnCk)qf_D*gnzUfl6f-+)8;;YLKsxvh>2J9RdIVmCqn7liW(Ti>OZ{e0QA&fUd4G>kJ64!O zH@|W?;-oVG&(@OVIU`6dPjmP}*#QSh>hQ;5BbXFeL~)~9pxx%j@V#0E(1og&TYc21 zb2X~4=8!3DC|3Zb1FJ#UB(BudcNqCd(*tWC7|eY}9FycH$E&JGWnD18QNt~_vX#w|h8>XmRBen~9?x3cDgA-IxN(yF49WXR%$>IX!@R{XLBQ78n838Qa`bR$$+Cjx$qtt5}Q^XKSc$ zJ5SM(7c;2Y2?G3SxR};a&8J(c_S19!2Ef@Hr_#$^KU0Rx-rH#w=fUg!8Pw=&_4LKf zu2|azC0g(5LwNl2ahT0Cg4!I+8 z4LDzBjm>;}h@EGS+UJ=6VE!;dsgiQ{Fy3r8kn=&f^I(EFApSG|L%c>GpYECX)x~PQ zIXeyubk16jJ0@^A_WY{rXuB1b?=~lktY)|T8&6BW`4>nY8VUDm8L%~nfyPbF$UoB^ zn;eYE648YbDV!#cC+uA2RPqx0UL1($62pUZBR)hUY)BW9`TN93_~ag5?#bY*OUtK* zgg&I^R@+lobPd=z-89pfoy#CB18;=VOHZZ&Z+nT<=;3f;OQxVt-~02u71)`>r#`ek zqOR>YMPFNzqN*w%emePy zy5Fxqxnsu-@wJsF;N74~sB>vN)l+W)Wy`9m2TE_@pnwtdcIJ))LxWMnzNz`*_XnHe z>hbg8EVo${uPmJE6z%|u%sHdmpAI3q`0m6#vys4MD#4DY4GzxM=2@l0faFHbTR~l6 z#YMkzrA$}JVh~AW_H_2~sb+41Yvz6)`z~Z1_H#INTcya@x7_XJ9}Dxn5B~6$I_m!O zX%AUaV6!$#bf&geUK^aciyb>VHm`ILM>%pzCtWvN;mMGCF<`wfdw8=)_Lt zZ{MKKS5o5jS!+{@w}d4_Cs3aLhSbn!s?Y{=Cw9$DV(k*SZ6aR#%Z^$|9l*ms<_f;5 z{lRe!Em_LLlLG7LC4#y^xZusQS#;8F63-I^34CK+1+_bM=@Pq_I5TDn&YIZLA)&2! z^3Hbp$80>!$*=O?w}!t#M^NZ{^>{ zr+p49e72h`lFSMhwN5=}dFEOJFK=?>Kc7}f#JQHiMXo-H)ofiyJM}K>)8)uTHnAE#>8SlQ;h`$Lk{C# zu0=BYM?6`7>7{wHaf+~XuU#s*t-2k4_-apjk2xkyB+~ozy~{;5K=raGRPz5n(!M+{ zrtkm1RZ&V>EN$9nre?kSEj!tjl%$2Ih@y}s*_C!_r6g%j+MAhsODPd0S+Yb@D1-=Q z=R5b#C+|nUe}DJ!c)#ay^g8GHoY(Wb&UVk7!^gi{0M>F6-*A+qXAIe0Q6M8E=g^0S zOd|Iw>s7lcts%qi9mW&kiPdEOb<*o4|08o!IJgG50?Nd@~=*M=}F9OYBf-Zoq^9C)+c8L7Lpp}F2w$jbK+ph9Tm?9>aTuFd@-j1H1E;CHz z@V-jNNQ-tVnqQxEnPDI&Ia}q+d_x7TqheV8-+#XSs*EMAo5t*TmKZuCaQ=G zmRmzK*&Y;ouL2 z)G||(Oxm`BELYx6Y-lh-8*-zFfUQoXk<1=+s(%5|w<-ZEz8ZrC2g>36r{Toyo6AYj zw*#c%b4zS7Ad6knJ5QR~+#+}UQ4#JKK1QrRX@)yoM9FVW$H_F)wZy_h6ODJOm}8ud zHuT4WYlwL7Hq!6iY$8T244dOVh@6)FDqQ^54$YbU1ld2JLUZTGsFCP{z$FY`c!W&Y5<`~}E`y3@kww)gPc2YDkrD6EZl zRag`@Wk;ncZWI=k$5&G3<@M_2xgQje^(J$War@Sj(XNT;w&T)uq{%j?==m_kB}X%y zY?7Q`zcgevvbiQhXluq9)%JGa=4kBENYv?!6^Zg(2+B7m7lY`!@ppH)*!z69xZmom zSbtd~VNkk`7|6HA9xhcO=lD4ik*)?ryLvhL`SdzsbNOcCOOg*!VY>&#j>;3W&NE5x z%4}5g%M3zco-4*b^a!mlvB6|MV+4>mSMpnOPjdW~U8u+YR|qJ&OYHvsm8c2q5!u>I zAcWff*g2alqUxI-8R+LG#%A9#I9GH^Y<7tZeOI-GKC~{Tp0y(fLoIDdR zM3-NkO2!_E7Jrrg2+o)OiuI~PsD#tBjOwNaQ);YRK3CgpFL+A`Zx3l zxv0aAwCmNv0_PN={U!lqa8xV#!SxK52xbs}81r#(-YoKs_boDgmKK2}sAoQGUyrYg z+m7DeVJ5a7zD&F>_a(?1Dp*T>apfvUMK}T3i##&cML)($$BBPtX6Jmg{*J7C7Ae^H zCXLa^zE`&E+yr6p-033!siXHJeTY!y#n?(U9iHIG(w7Y{yl_*u_<_@lth?2X!ifxp zM9Q!9Q&5@xW2Ii-q_SA)bN-tN{!PO`&$u&``v2sv>fTVD|b*IdW=MI8Ct|3 zldyT%p)J8^J~>IaZhYh@O|`0~pouoyMB8_}5|^Fqi71J0^8KIDnr=%zLipBy#V)B_ zBBJENiPNlNEbhrT{7BdW{QIUD@@cj^dDXc=bQ+vYu4$Zrzt{8;2RIoJ0{L>W?vZeD zie^4x-G~#;QPXhqp^3z-6D63&)5DnOSSPYmmcaRMT|~wfvqif1eejZCBXaMBDfryJ zEri-T$$2ZstqJ!EeZsu{uQ>GFO#`gB3Lg__kHf{&@b$&#CqSEPJcMTe72^={Q4cA(%ElVhSe{tp~fYYgB3OJg9zlh+a7B_Gg-2gV^SY#UL;#4JRkix6fkxr2OY-ic~! zy%7byQpP?E>BBxVPr#=(5tV@z%}7DP22}62ib&z^V+3IC7HKZqgRRbGOQLRvHSA{Ga(Aqzrm#M;Tus4{RKe*ESzw9MB9jDW;&f&L}BZ`o9*o#p77U&$>i zER7YfG1Exi#4G=*0HhTSfi3^fd4oU76FA)2(m37!W0Lu?MNQ`2t2B#Sl-3(gofc7c zfc)>QSA+VB>}Q826<_-!T^r22CmkbgyeXd}!_;8^T%9b)jg;;&yV`#zcZ3I};oMsJ z92#3l7kLtwt0iz7Z#`p*4N>QWu%)d0LdRvI%Kh zUyr^$eh(=L#l#rnmh7*w`xJ$jXWNObgG8!~Q(n&hs;t?2K7xuSKxpAhXu*HE1g zM??lLpOEZ@mf~$W^`ggbR*AFC7oyL6HlhZPl2Ni|vdCnzmZ*u5fZjOQiRw1UpicZp z$PRpu7@2SYHQ5@8+8>F7e?;FksMF7b%@%YcTJoBxUN?%wW}gzG;Y+I&p9CU5;A2pT zjx$(8POlsrB^~!3mh`9NXJuF394;swo8w`ok>$&CC=IMQxMK($yYf&h<l$nySXDgD6HhMi?bQ(BX zmu+m2$56SjQD-}?X~8V1rW6+_+z&M$I0nLpBPzCxWmitPKYHI_%*sQE0>4W5qx>G+ zSpta6r2llh2m?~17Yl0B<>AZuFnn(j&X zS~-#&;5k-*G2>KQ8v3J)kSx+YqP&_kxP?p|3L{PbrsIp90epIGFFvVEx%yS6e)ZO% zoaz~tDtMWl3)bL#s@nEaNwu4;EBV4FncN4dTA#mdSN*Z;U3F>2Rbqint3kuQLwHh{ zO!aJKgKC#hPqJ=Nr^r;%ROB}fLLXc06wA&Jgb5D@O32@!`wi0&$TvRsD z`2C-KT2)2%>|A?Z)5CKylrHx#O{L$#oI0pnV6*lt|N6ICRRvD#>GpW=^^M#S9+Yi* zlyU)uP1#XtiW`MR<(VNV(?XLdo3?p;l}(;e^~A(=Bw{{E{LOJE9bXF^rF&N>F56Z) zRUas95ly+>B3g3jC&a!!tBNXb@UP`5f8o)+REZFasS6rlG3ciRhnk$?zG52uV|&fLc79iMHoXMzEI; z;eK}q6u?Umr^wTYi1%9Ppc(^x;9U_ZedUPXM>(Sg(*t4qw5gE(v^=58I9uez_e_~IPEO5Kew{sTw!T%&lC$c^l15oi>8%zbKgR1;`Aqu ze1Gcm`De-d(>hyvyzIN((%fljlld*lIcAGJJq@k%Bg@t*{5L->Op%Oxzv&cbCrjfr z#zuPX%4D;qY|f3(Ru3d|&53)Y^HXb1bM6Qa3g@$OQ#OT7*->eV8-?{ylIOmpOiRP) zvJdbZsPW1XboTxg$f?yl@s}*=`C~7=rSnsYOKX}KRb075JYJw(F>UM$VS2&nw)f4q z{cu$%ABhKjpuJid=-{7v(C(^_n8I|A*hXZ5bFsr!w+VR1WU~G9L1KBR2 zv-AuXZHX_Fn{M%MjMNjKlou4PT}--3`%~ThTC_f!oB27D3TxPITapD|U8I@{&l$5u zcu+X$m-#sqHf2YpDQ*-NmB%zjyZCocLRlhHLG;#Cf!xoj5sqmK-CpE=*9IbT=2jvmZYIW$w<0n%7m&4%^GU%*6uWez zhUiZeUd9hIiIQCL)-0!f*i zwb5lM=Z3(bIefxtQ8#hDQC;XAyNw>xww#sTQ%rFQG}9$71rG`L1wRDttL!U}m&}Ez z@V<5MfDvI1yh%+RUMpJhm?B8Ag|neg@DWik@x)$ z;RiQ4k<7wc) zG81J{+SWFMn{PWrTVK8xtv-RGzp{5ilRy6yxwQVSY*@Kdbj-r?{`2&!?`cuA@1ls+v+b8IC@W|cN{!$86)wr zeBtxkn#DJZ*BhqKgoL{v{nz$hmE1-0=G!j=dhq_Qh#WR4yqag3EkmDOruc~@m*$4w(NM&cV&u!Qb& z8O}xc)qxN)+y5QzUz20Kv9=RmxF4ymw1~Gp^u5@+Hax*Pqj5ZWyaTaj$5~l#O;oX- zmePerk}bF|@1FH4$7%S={LA=*KVz#^ugQ}Jj=bvhec$jsJ(1RH)l#f0wwsZsXBrVR z|B9^rAOEn{TC+lwbmS9m*5O=T(-LZ(a#zvDcU1-U=V`^X*N2XghGojurM+g>8!Iz$ zUH!e-v$iH=$MN07-MLk0i`^BGEGu8UZ-*bjcR*6L*qFPGivEE(zk`#tmVA}1x=cF%UsRU45PcRl^JKA{}-oQJy45E z%~SkEHwRTIU6$`IETTTy@$)Zie3~rqEnNpCU7JM5&3c7h1tUC0Y$$BXj!IM9C@eoo zo+?S1(_L2ZoeE2lfdNZvM&4PR92bGj+}TJ_wk+Xmdd-32@(SBy?L6=j7T)p~+m#2v z1v9-ytqpdKizSwGqS3U4tMIIii>wibqk>(ev6r&Vd9^*Z6NB` z?;;|U^3aJ9Ww;_H#P8G`#Wn;B@VRHsic8MSC5(qZW1k%m;Zzq_d~KXJ-dy4@TJ`Ll zsQqUxHkIK@G@=Z#Y)>?PZHplpUEe33^id1b+@pg7!F`h7voXk&7dco6oI<>NX@IRa z&%-CycB0P_Eu)YQYx4Crb+p{~IPrNS4}Yu%pbb#Gc(Ls%v2y-*q*iHS6}x7eXs5WJ zuGc>@%gufqlR*=X-v+RiCm4MSh86Jd7$|miBw~~@dJW%jx-DDCsk zGuzV2tQFj{&VG#nn>7>vX&SXdD_$E%wrg6!UgX#G@X(5Z{*g zW}w}V)^zn`7CLrDo@9T1fjFU3hAjP^LCl#{ggN*O5N}Fl#2Tj^u%%OUunBVNgkG8m zJyPh1nZ$80<-%T(aC0$nUULd|v1&F^lMsq+>$JvPgC}7NqQ+sS3uMJYM{iQ2XD4bq zd=kCmxRUIhzF2IY7DxQL@lE_ez6@(W+)D7;FJp;{`DnyR$pLy!8_9zo70BAXVuGzw zf}YE|hyF46jF=H8#PDr-vEfv(s-^&jXVq5InkF33&cgVSNVK*NaAARq+0d|h#h)9K zs#-L2k&J8SN5$#J7#Ch8B)xlj%ezU17Gw<5Bg3)mtmaRX=L;&!VkYP(O1&kyb4_Qf zS*>OqzlsUcnm+87j*+}GcIUignv~yJXI1rRg>=35SKQUy5gt_j&o--bC~V4(N>kh@ zEGiFGW@t@S+3l7bk+`MQ@)aNqCUcZ z(+NB^u9ZCc@-o@)8buyEP=v|wB1oXjkUU?ng7(Mk!#4M5knPwVvdPjM6TUSkGqZH@ z^Rl3OzhsE?c)kd8wNSu*-TFqFZq=;T&M_zM7%P!?Qwnk7!3Fa2 zAy)O>-x~?mm9ES`rdBvfc8Sp>fV}wU1~KzZlUT!F3HASS6b-FV5#LTK5Keafjfj|K zw5Gf6>0}-0$P&fZY%jB0@hi_ILb+mv$02Z>dkI``A35s1%$nSbrTa~F>FI>mXMTPCNkH$xgR<(iZ~sON~}z09}Jx4bCqXEc@G*C>52(~vQl)xGnAVAkHc ziub?n({Tt?HRg`+pls3H$t((+vZK-zHww#Jl7}ku%%>aBpC!d`_X~{V=iDS}1jodt zhf?VH`n*QE$Bg0vKR!Y3p4N=iPB9fL8XF>CSvM-E@jIQ9%Sc?ld@eD&K9KZH zE)Y6qD$;es-mioBVxxsMpT0{RFUN=rZ)C9rFG4YS&+C}N)?_k(5l`w=@(7h(vZUdg z9CWR^1NLsyOCq*+wdjJ{5whr`1zG(!0X0%Oj5&=vM=YIn6l3F0(0z$sWV60K{`ONb zdZ49DeCEY?^5KC>a?q>l}7}kR)1DBIC~|MT>C^+n8>PBe5o%|4Tz`v7LPSwb21|HDo-Afj4cQE=0A41 zQ8xGU)QZnDdf<U>LFxRoHd%U(nXP+GwvmN*c^xOb@^ip;dd=b1m)6`79+bab#!2oQl3-JI zRGQ*OVNrRgGVdP^D?6v!1N-6(EbUe;Q839;_`@ic?&p`ExJUn{nBp@3g$hyb>n&bq zwiG&bOGz|5{y5!lP-RljeBbN*F4e`TSw^#XQpN-Eu*5g^mDaST5B7JX^^S{(YahF@ zv-{fdOZp7lcjsyHjn*Oji}7jP-^~tRBiVy8_LT*`^uh;>$hwDB_qP&Bo0RY>b}XL0 z@B>ksXos(MjwQRijC?uiZLNY|*5M zS5GY<$$3q9tCu<+rl5{^?rJqCsEot>kG;o+Vwx~K;0^lqbtii5;CnieK}Pcrxcfz&1U$rnd33vmQ=WK#uU-p?$L9ZtnX*h#8Q3iXjLD2+N%j$ zCGicT0jA4Qmc5IuJzht6ZT`d^+Rn|wPTzBEqun|PpFRK0FgZ_7}I`wn^?p2N9Ni@;#HD6saM~= zBNCTfK{7r*LO1$V;Z|*CWS>VVcD7ZOtaR_kLaKjb;nDRdXH$t$!?F9s_T_n)cSa`h zvAh90HGK)<<~vSY;P+NE>8s>itTI2*=D|uNFlZ~i9`verp;^qjH*mx8IebQii`n{Q zuHfEq9@KliwraNJ=sRso9v&@ay$ztp8FO=IGv|~btjgt6iU+*6Wjk#Eik5E+!@{c862TJR%xR=X(y<>vnt6}~!vzAk%&wnwcH zLySpijQ<;Ch3P>60bN)NXj& zn+6g1BM`+d^rDM&6wohztp;u4!>InB4QQ#BIT}~AAN71?BJ%yCh(s3@2^W1j0zF#k z0z%oP&}{_DJsPjF&}=r} zI7OiCCS7yzMsDVg@SuF1cl=j2g-zK}X^I<#MdjHpDf9Q7l(N6&L*N;bi4KPzK<_O4 zP=(D8rt4_rjQ{QLm2h#6tw6btxya^kjLKh~?8=FIM$b=goYxVy?luszQ(hzLbJEZ_ ziEla-r2BjS+(xmT7bai}qmN^I6+-amzr(R5oG--nhgEo}%2K>Yfs1$Wl!?u9IoQxR zcYIuoq*?qBBfj9%hv`CMoV=ATPPUE0^e&Rb@+ezkC`pc#RWrpy<}&e9R%N)ZL!$U= zbG+odr9j+5^Af&vRT}1AAB8C>s*zrftMIse&vEncV`#>Yo05flfp~el87@cY;QHnp zvEF4OCLfw2djby=x4C%OcP`R*Rga?Jy-^0(Luqit#O>v{J zs63g|bc!oZWtV*`kQE|R^l*N)JC@ClMD-Wh(*2;$V>NnD8^xvT`4U`j1p`$x+9m8; zG#NR%X>{9rfNLTy`E~;KskXotuHoQS65klaaOt+U^TAgnb9=nli9L*b=xre$htDNS z)CI6@goy}^wndpc_99&c&Zwq4SG3e6Nj&AWQ1s;H7PQ^UP)r0jiKi>2pl4eG#VaQ1 zVja*-G=0B}cwoIUdZOnDs+dw2MCr4=&h`;hts~XTKd0YoUzD#LN+oF?%F- z`J_hZ88wJPf3zAL;HVJFCqu=1mz^dQxLdGGfv#xH#-OUp=`P~D)WeY3M+CW$UM!5; zCq0+?bBuP@x*PeGC-MqQuZdkP*vKKiqL*<+9%~4^u`yxPTxJhHfphn@bZt=e{8aND zv-TG}JT|HLb%Lj%!=J>m(B%L2;pZ$!FyFdPyIAtRjBX2OzWCps?GWvpy`4E+v-lLj zytPtIhZUQ0M|e;gzV+A6p|B}CDot^tu&6xq*J>B5??^6dv-}E9e|(BiE#8GqI^&GK zwM?Tmy`3W6vrTci{w-SUsdoj|XT^!O1nXAL>mMDb`+^e03*RqBtH&O~_O_iTzI{ND z;gC9dzBB8=GHlDa+eDV!cwAs$X8n51cKpHcIdW0J6l?1f&G_m1B>d{jF=QEcA+FVu zg}ZE=gB!i9CZg|K;YgTJDXFNorj;12MH`7I`yJ7V8?_zq4)OGl^+2-vqGC^|3=(o2(9)ChW z#RL^y=lv?(vZf5tz@rakboww!)~@LmoQ>ABObKgIUqz`At2+U^4OU>{Cagf-(J z!m?gTEI^ayoy@#R1mVD{)3}4M8Bf3vtT^=sx?V>W(~#ULqc~KEl*k@|&Bvb7^P|Oi!uDi;|u6H-sX8fS>u-N zDcC~_O1uNF88g(|p*zl@nv@3AlRU7?Y|^`5rDmJ#>G^5?qO0_HsC+Y8Sa^plH*XjM zU$jc&G&4%QaD)eym*ciTq-0Qy3V+g8D>N(!I)f@{5%vuF$!TbYkmOzxuQW-@V`<*`DKx8|<5jKRKR;*Ea`~O*V%~9Y?5|x55oW z4T3Q&Z-DHYKDPQ`sV5oIuTMJH72y|L-jJ6#*`)8fWOVrgYa_(2kvO4(#c#j*iFbq=qvv$$MV%+2#q#}EXicX?bmeUIjS~SYunONrQdiW^oI?^2mB9YvsC7IAVvHmegA^GF7{S z%u8~Xm$iE<#ZdpIaYMT1Fzt`*15b36kHO=23t9o&Ks|ga?&=DzTxkDLX1n zaig%ReUk)5k}?NP%nsr|cna26MOpZr?>M1&ay8n0oy5&w9jGTJIVCPD1^&0f7IOM{8G=LR`AK!Byn@f0$dp~mt#>$zZO=B)sjK{OY zIkjtOO@~bGMJU?}?gO8oQz)y+FBtn(}Wy%Gt#V z=$M@(Pzb~5wl{F8Gi!tgWfQLhvnXuJj!IM9D6BD(JbjWf@7;}tK5Y1i_^uM6%Phk1 zXZ2z%O+Ad(^yj=u^caca(vT;_Lrf+S*_`E~pTXBEf=ouA=P)>ZHld_A13Lle;kO@* z!)28&R}3eAq&2l)qNM!y{v17-c3BzfKl>%}3NkXv^qHd+r~jS_GIL}+w{O_wyUUyF z<+;<7%jdzib7sh_`Tu%EKQthp!*0;!c^S~_pNi=A#yt37*GpJw$unf%SYyP%qZz*c zYYZHj$%Q*BK%{k-JW{RQ0kvn&g+EMILpO~(2VaaWho}Bdhnv5whF|6Pz#b+q;Gr@1 z;i58UGJmkL|h zyo7sKX#=P9_Dr68hYXhtQK9R>i2q!JE?A|!uc7rLiF(tg2A@Ov1Bj2aC!Ulj|ZUw8tE`l3=j6v>CFy$q!?4tcL ze!&hX&Qu@X;fKM6{08*Jq~pjzUK)~>QH)xE7m>LyossOHSK#`_GWg;t4Ya2G7UG>* z06XeVL;CPqRDS(-#CY6(ME~kS#3q}GNDhHUti}0AWcpr&|A38|-kS@Zo3afOT`osA zE(k|`iAaP~yb~U5dWU@1oP+X$UD0ov0$_0NQC3hAf{=CvNZR)%MCNxU^q#h`m0!x2RzNF?>F-NiW_9K1fOD3 z>BUp?Erz{cnRM0IGjqSBTclqcZ&Bf^X)$7Z{D~85;)euF^v`@+)2`e*v_Hw|(Tsph zS=P~eYD|2akggx@EuYy>EY4UiaLuIkctln(C~Q~ZE*4cD#f`#pU**I+R9I~393bRq zUb(~FlRlkalkLS@Um4Cf{cTEXk#|0q=0I@?I`=_Pl!^%wV(R!lD)Kx*^bn0V!m;nQ z05JYh!INJY$8#=Q!v8R)j-xZMjJEmEktCR%m;mPplE_DH2q|;p#N1QFxzk{bR_%;VBCBdcvfr%m}Pna@A`58 z$DKRDT|^`hB)kNjj<~=dyuXm2IO{r?guVv{z(}As{h?sd%wfP(H3Hm`I0H=ey~kU3 z%EfGH_f6nvssI%3wF2fnwg!CEo(Lkv9fIFo-h%RuaLb4i7B}>FFn|2pOj^_7n_=wT zN4l7{x7rxWjm4a_)*nosT|aln1t*ri_h?NOZj86o%T1v*Z9Q3Kku%t5BAmQ}nRT$k zN^iv)t5xrk%}4yznd-`6vKfq z!L%n1@T2m*u*}Q_(Da!3V7zlO8v# zte9dc=-m*(uberSd+!sM*O@hud%d8Oqc(Gpu8$sr7OWLV`k2IE3fm_(#!|!W1@pXF zyw#;@P4?lh1GG;^d@%ogCEHJKowTn1K4IbB)@O3isK%-?Hr--Nu9^i~uhwM5c6GTE zYo20<#j7P!O{XsLr1K>-xN;xt`pY@0qsH9B4WirXq4j&%_FpYnJ*UUfc-ODEaw%*( zOkG+JB-|)0L*@!*M6erc!pb14#g0cg@8=ajQI&}TYuO;dhxE&|?ezBZw5Al7{`xwo zGpLB)&=tu&d`-+v`?&jG-$3y!*w5sZb#MjZp(sI^&IM>^ml8LTxsk5xgFB`|&I7(s z_`wP&(*Q)ZG`it=89(9BN(8-nj)|0H_rbj{^x>ZVWpLxz1SEO(JjD9I0Cc=$7u+eg z5{)!1gSW=shULDWgCou!h3ActM-Cq2BY&6l!-K{bQRkv`utt?p|||otui0EVpM!%@70>)fuqdzqG(s_EV4*&*mc9g_@9M&=&Aiz-++Mpia~zkSt#KTsUGlsF$#)De*MeR#^1<~=1)k3@i|$tR$VRyYV2 z3dVCxi!~b znS2^NYq{9+ti^7TipA><|FymEx}8|}XOFW~Nsgp(<_+Z2pHp{4GccHmQc$)Uud3l6moaFAq=byhG%g7kumJI@XxT*aNq|w zh_U7q)RHKJz$bFy+4GDczJ3v;>~j{W+WM70%&TX1uUBu^jbH za@=x<3iff$<($7B1Kf;d=l;bV<%V39F!rC)CfYBGgC(3|=WmQh(WzD%Gge!vKh9-O z^K<#vR>!wHu@W8C*_X$4&~fT8?H>I(k#&cAXlx+od2J!v;l2i4R@9a@j#={+j!jY7X&4Oa3b--@p90AK>wZLWDH-5`ob3sGfQDEHI6@YmX!EdY| zCn&P$0R9S23A{Bn3Icbf2s*I!g6fP?0h+i*;CXYCK-N+Q1pEVe_a0^PuG>8YK3UHN zo^5g$ydATfKjD@h@Ubldcy`GY2-J7r>4&?RmGQ6fwY3cdORivimTs`1bYd8s88PYg)>MOE{dedh@_}Mf41J7El7LBn;o$_CtUMX^7dD_}pGW+M!;~*v5 zE3`l52cj6E)z3NV_3BLJOg{a$50)D^eZcbGyD-h5Q*UFg(KYG z#y+Do?YKQ&AmdiU(-Q9!D9yFwzkd|T@g*JUK55*Gub@8|2{j2ap+_gjqvF_7SVrkP zT)qDrlC5Zh+%;;1WeX?3@|PI!M9alUuKjrAkcT{c9$yC=J87V4H6qyA;tD)}MgiQZ zy8~W&cNpH0^%Y(?^Z@?*VmJC>+iozvFAsbcD32aedxVg!B6wL%B{cTObi_901mXb~ zB3^q@pa?&1^?p`0Z2SEWyknXLB2w=GZ)-;YMaf#gj6D^Cjvqa|sx~z~dzXX2Ga{Jg z-WE}B_58t9-k;a@oWqC9x$ABiaW%ak@*?Ira6jJ%MvW=dg?Bg$a_`fA*|u+)ML4#b zB~THu2O$Rg$@o?)P&fQvTtl*(cuHdg?4VN^-R3GdCDU>GrbjYHo2tvJuF2w@ec4Et zwLf4J_f*^qs{)l7bUj<_txsd`>(r&w6gLX1xjK|}Bwe>+8KE=`uMIV^#q8J-(tSw>?tn73|8<8-T^HtX;k3)W{_c{)z# zw^cAGY(+STPE*_{teojC%%QuktS%b|MsTyEWl`%P!ATQqzT>z}T*qqZ_-BiLDjla3 z7s4)Guq4Zob3$RZ)ym^>mLk34QFEEjT~1tYViwoJ2jS1tIU*ohj#|#$-$>WJuX)qK zgH!*4LD$+rr&1rpv)&3`KRpzVdfE-Iy|NsZ8G8qc_q+)0%m<<0AvUnloSE?EOQ)d> z@+MSerHWis-wrpFu7HIVYvC`S)L@m@!SJ_F@1bj_RN-AGk0L(HUjw)I%?I~k;}DxPq89L-s56@iM1-tppglE{s3VOOTnC|ugP|cwd2r@ni4f@1_o%N+)NjL)- zIKLLq*HGc?xiXX2BAX}>^-rejnA@g4VLxy$;WVoBS?1gr%NtI)!pS%D z8l~xqc6qj9GeO6JZ%&ZKlZZZ(vxjWh4v#b~i%VZvZ5wy_Uz~m(ylB-i_MVmd&cwEu)W1Fn1R1a4Q4A|2Dwv5{N~+gmAB+9pW2QCP{1 zu9k21&th*(jb}Gl{Nik%c@na1H~}iGdI%10zDCFCt;-i_O(`x_GaO(qaV+34A30<_i&<9RM$L;L00g0aB0x6c5^wnpGwt`~e} z%X@IQLjoi-{T^g{&J)_xD+`s?5g>G43%nEC1MWO-4i!e{gPX730&lK43Arph3+?j= zh164aLH8Wl&W|>p4mDgs8F17-8M9!dkF`jUjHY+$T; zGVs1H8puiW1M7>9TC&!c(d~V-ej4~H`aBf8-WB@rgM)^|4#4-%s3D)-WYP3HCGeB! z>tTmUYv78kL(u+^-AH^+I1-$`6b?C&19yK@NB7j|BU3(qg4OM-;Lh1sVD4i*WHWB!YFG+9I!+eDG|;9`MGeSNv5bLO!hD!)dJh!jTgy@lQO~qGRHGyakJU_!(1W z$!&&uG@Hq8`@~o+n{M^_Q9gT~*ZNU$DrXXAIo)1|*5>4Hb%vH*pUHk%TdVV`8O)W% zau$gp`Txef#IPf48llG;{43S;RM$4Ceovzq%@It_ffPflX?v#AW2CW3HJs=3f3a3A zQ=;>0C^RrB>_5MlG#15;!ur@h&FbYtG3%m<2O~nGiOn3aMeHW=!3&*7!4H6RF2h(W zZAU0Bp$$inQq)ioaC8p$P|a)MSY+!*=^Hulg)u21a-3(taJ z*2yKno~7}EgvpzEZzDJI4xU`Y&rGwT$4F6Ums(x5H{|U46v456p~$Ut`o(Uo^5?FY z^pbOF+UT|y()8l|bdatiO%FBU?Qr_Rxb#VeeKVoJve>YRA-BuuU!0motmFhr=Ds)G zrR#ggj~meX9PlgW+HK#$c5oWU8LQV%>pY{zimUc(r&VFM5?#+G>r`+lY+?K}I!-BW z6qY=0#cRnZrNdx9*x83a#2 zlHgY+NwC>YBlyU~I(T(u3VhR`9rp2_jQR*>fk=-X_`$jr@zJhC*2i1H!}=$nuHCvw zfxZvYerF?6|0GIqI-%C`=q7d8&3`MbIK2_RVq*`EU&jFpa8m$1e_l|ZwvM-WEzDnc zZ@OS0s+89Bd-6rLx_2C>xSP*$E_uy~<4$4EU$>sy%iF*uE=~NW5#@(E>)-62EEC!< z4;1I}^I7wmHc=lqu;X9LnCe*Wlc;C^?6arTae{xnVE?gLNXMcXI-aio4(*@02i;S- z)r_Z{>gX4=X5tgk+`<#pY$8h9pJv?qLSwHCK2Kv&+$gLi9~SYOCV%71{c?uu?XiS6 zC$tfk^Kt;^WbFp+KUmOh??k|w&O>ntS)`5BJlY8Kw7%dUL*n>PPCOc=Y2l9I(gD{V@6LkL^w`^U(8wqUjS>-gpf^$O;`ak^-czAruH0}VjYfb z>}-V{17E{6@0HN*#7oGQ&q;7W;}~SQ{bf{t`5q(_K92MbY)4d=86n|i`N)~rY$ORt zM3UQdF-IK%v;nYzpi715RM!~vZ}CAyd~p+8?k$VHK7^w^`)yH~eLH|zI_oW613eJZ z*ZUT?KD-o|te#RkGs-MI&O(K47K>}?8Ef&c)$jhytdl0P?v4g zb>yG@yGkRj)-xBj`Df{SncvFNc}bxgz+GTi$X+u$l(UgJhmNWFMJilz?p4mzTxZ(n zf6wAHc8jv~zBq~-g>`mODCeaEmz(-?6Q`zX53h8J2swOw7i6a#1ucRxSGEY0TSkt|Tf{TEDVawt-N-K^75Hp(CeL@l zcY(&wNo-@=pPW^FzuCXvPUdi(rE{50_bpf-w{$UcPTpb22eLW2@~v#gw1q6@V<%SS z{?YxZr_mtehF(1#Tff9BIKhu!nPeWfXEKr#EOoX>){Nzn{>^2|4>_>}eh)2OF!gy&ep*%37ik~nqnNxJ(JFV&O;35H9=i<` zY#~|7_U)Ye^YZ@r-T!#xK4+fK`+ClG&75nlnFG=dRoN*C{_Nc;Ex_(+DeI|}#CncO zWkocr9GW#*@AUO-LQx{?5WAYy zc{~m}csbEIj4Pe--5<1WD+I&9Fjl0L$kdK4XH5+WuvWP*XkQ&dC760z^nK#W4$j)o zuB`6IdbF-#wWpk;JJk$mLY1O|_A6ow->fBahVLfdc&!p*Z<)kOVp!BFj>uIYim>VU zr0CD0d#*Hj`TZ=+ef_`l-deK(xQYE>L7SYN8}N+Icv0Uj7xA{8nO2gPF_Qg*cm37$ zXRVv$SZbl@uD6w7>tSzHh5N$#B#DC89+14K(G>eDZ4op}xv^N~SAI=eAH{x_vA5B-?;UukXi(C#_|JGqjjv=LQN|^qU~h_wroy z)dzzWj|kc#A(Gk}yNw*(Yo~xGS5!e^ZmuQ$?eK59IV_z0s;$lTAvY2pyT%B1dC`0c z=zhfDz^&m>ebX;)!@^?JdF3Lye)$u(W${}yX;dbPz3~&8BzD8*k#?NpTt&|PSQ|Xx zJ_+4{D%NL}a*@w~eaNtN1KNJe6YXujiU$9KhmY{uqQ~J=%e(EQS$x7v2oL}%&`U7^j8nDX+CYZ#`ej0wWk$XhIvWOTEE7huPc-3*8IS{Pvrc4Rv4X@iTC>1B|ZP-jbO(e zbD9KAiEH0wlh3-#hG}TaDiU@HZ7qD;g3qIqu@Z0jI?dJt?ii1quJ=i}m*>V~`OB;* z5S4>hhTp>n&GH}zKJG+stR8{dVbNgf=uRQ_M&*4G?%}y?n>>sAa7dNqMh&3vx`j}5 z#V~=xf0|M+R9P$2=|C(x1vF}ZqeQhrvZ`>1(9TrbckJtsJTQD;G6;-tL`imq;Hg$T zG@Y3RpS_c!Iow(3Kid#IE>wrzBYpz2!cE|3t|GWAivbxnJqw=sEF4WJAw-i<9LW!g$_$;`}&9n6tzW zYLI5SJQl}!s^HJE)?y117vL@L+@-@k%!oJ3{*Aq%;ttXJ8Kj__>~p5ohJ9`3`{U9i zMRl20ZU%~0nCAcTz3Q57lIa&3t?~oq>!%|h83_F&erJ;C`H4fsPs$9RT&^m#Rik4d z={u@HI%t~wS;knUOvGb{Upypez;olVNJlf==8mUi%RNQC=@eK;Q81!>5ZrL) zH2NAc5siJ)7ykARfl6VXT(E5eyyRL3&CYLtNW23&{_cRwWi0BYz73B4vzJ>JvzINr zQ^(j9O zASte&SolvaZN?N0>|()Y!4FD*H{+j521tCdsp38zcVv^I)vQ*8{BJx}ieH9Zn|j|$ zMxmA{0%Suzt}sh;iQ0l=!QHu$aCYd zjyreZw{n+ZeLSX%E{zBvl;3)xL1R1Em0JF6rAv{ZX?%Q^pefHK?^7mHGS*|p&MLrP zbw?66wz>-({?qj8SwNq{CG2~PQg&g@JT&V0KBDt*nGnM_=7fTdHRZrE_ZxVi!f|Uh zH^B9p>Zse3OdOnmIk?meIjm!$yun@@)E@FoQ!6mR{RC-X|JBg`V43G?v(&0~2xJYAeCTXb@|;5*!!>y~6| zh3L>{7OyQ0kvT1xNF*e;{f*W0u{iwN+$9e=Dx6Ek-<;rd4h z3pUnK0mQh=G9u#0LgAXA%Q-lYee%#4;cuQBkCp#nK5=$aF7D;vMraofBE?@Pp#Y+T zEo`Y^M=^#%%%tl23HR_^Dz9cD3;!(UL&|4rQ1w>weaXLdgR#Xl^DcA~n`L=}waK22 z0*)*rw+-qov@=JKA@1$HP0YjZ5N>lMG^8JsAMRRHw~E#2hx43!-s}67ymVt08J#ho z9G@CYm0g}fg`Z@|@{&Yy%2<+)3OA&d*Jx73hMH8yykfFX1x;<-qfZr`Qlrvmh11>U zM{qZnFZk+>+O%%RAIduH6&WzUgYZ$0qkeQBp^6h8Q*ke?@uVBGEsl)aORiH@q%O3Z zQ0B_%M61~fV&iQZ?={o_3p^Ar?e$}i^w_p)S^OONyw`5FrDPF!DBiQ?hUm^COl&#) zvj|vDkj(IKlY~8;^G_aW*01-Xd{&;rAM~r*D%j_>IZB@*w%(Lw_49Ubs{`MX|IU%d zk9U=LW!$$qQRE;zpZ>Cz=kN{2CW(G8Rg_3RYl&Z%%X9b&hxH}m4OWt6_42j0oL8kH z9^3uGN@0$~bK|jWvSx^_lszSH3#N$-{amHkmLb%l!gRv+VjS_tdxy|Js9EyZ%X8Vt zE~j>vxZ*J1UUsB)gH$xM=%2W=%=9?6{lPloT~{P=6v`;~o*aJT5_t}P+;S(j{E{-T ziB$u-?;*-DJ^^0Ke4y#7z3|c045&Gngx|ZS0hbvuK%M;s4j(@PzHS)@^2j7G(CsyR zld26B5)9zSARMaIAL*$#84EwFRD=6-9pKlAPhraabSCVJ2J6-K0*36 z&dNH?YQdB?^DOaq>CDDrY3$T==@S3@f(HEc8GGC$_a_dL8MQwU#!PMF>4Lu=NnJ@6 zO$j4E^?HpxS(zuabzeGNvfv|!-S=`8{w`==N%GiT?qR~1&vWCk6#Ho7(Fa7bJ_Cmd7Ebqvq((-2yb3 zwMRY}lsfd`q&ER)c60&fp7D&?iuTKH z|9Arn-vy%4*YlBc{#IbJU@tR9r-Gid0wbS18A^TmXh-$kz>)Lk{19T)=AiSE_hsIg zqvtRDvv?O)dB&b7tZyR0pE{!NtGd4$@pf>L48sqO$Pn%uHmwiA^yFY2jXp@u1!P)z zNGFqve|`LWed-c7iNeYZ?0)-J;d%#Ecj4dS=zT#ot?%fVjjzbJGmWT&f36E0{?pW=yNG&JlE*Z-_yWD&$53)3L3Jlb z3!3^E9RNdnX~8%C25{wu!)W-OcHmp2gdU8mhf8zoP_)-#G^K4H2-c~CX(pq=&A}BQ zxpNq>2@wI6MYZUDVM~LJ|mE? z>=#;o>j_Ji=Y!~K7vw!`7V08(QQ2~q9?;-vQTA&zd&=oHP(BUdh?*13;ruU*t;RX( za>gb~yRaO$8L)56Eb04a)!+UCTc#f{OvxPYl&!uBb8pQr!^z)_#)ZpZO z1fxGdhy(m}{6}x)d^%0(BNH~;TL^W`Iu9kHEU!pKDJu zLLNCIk=q|vZs1*i1e^6xU_lW4=@|x}=oh1Bn}Sj4<|!~}P&qsmy^?E=)<(cjAKi=W zf<_1H;1nYVlu+P=zAhbtGJ~#i!3OtO)>RW=o4mP&E;26WlOZZzp#vAEC!?{~J5c*6 zLvFzEgG}3aAU*U$1;rm8japAGK{cZ>pq?Ja%5ugtTig!OL;Ez6_r@2K`?|I0`=+A> z?!9}SIq!*Q5V|s3%yqLHq3kTig6?i0?Gn!siuT5W&E*#8Ie$Ah5brj~5$s%3>5Mlx z_K~D*aK=_t0Ga)n&A5y5m%o0R|8z61Q`-+KjA<6^Xs#V7+~pyV+DCqJ4G^O6hUloV6zALJQS9{UuDzZhBnbR@Y+!#<)=S`CFd zOhTuzwcIXq8CN^zHq0G99tE#)w0^cK4Y^#aK#S88(5)R_=&^GfTJ3GZxh!l$x%o-f zb1ue#F{7gaan8`%HABhT=;2wETUQCUGbFcWZ8lfm`vJ!^MzQaozLlD5en9Jj&AFeI zPF&dUOJM$hE38tUEgPHqp1ueeYMI#*%A;Z(y>)J(ps6_jE*_fhj+3{O@Q7FT#GYHm zc=hRI(r)zv;+FS;e>A=P#hDm=T|V#C8a0Nzvgem%@t}*iXoI=5W7cKW` zvx-@lD93D1>OBcjEZdeDYVmhaRPf@e1{EZG{75^ zH1tkW0+r|jh?z}77o4Kuvt{b=fyWIH=DZ42#uUNVpASKs1Mk7)P)&Gl-fWaO_W&&3 z6$>Y7u7OK+9pK(+yWm(Q97Zkx@ICejy%fJ^>=U=Kc_!n~Br6f}4zh-#Q$s-1>>to= zS6`IUvrlo2Q7N-`qN${3BN%827Qv2t;458U$E&AuoLVg3XtFt4^dk=E0i$=2Ef zwEL`8LaZ|V;))HXbtG4}HHaQvGsQRdWS~dwk0OJ|DC3hf{#|SPbWDV)G(y2!&7RN!WLrVOa8WO1R$VlUUGa@^5A0#dwvT=bsYn zVWWr8XPmlM5c`ciu!U2%3DUVuES~ z*mc$q9H1Imd_%sV#Zc4PLK{36%{PbO?xY&#PhBV-)jfsEd^zr)@pQUs1;ce6WN-Bw z4g#Lt1{3R3$zKJULOYISR&2!6Uu;5^A!s?6h)x%{z^;Nl@OPCJx~d)nZ?DjW3C3qY z-Ndb+rurI8jfjC=xkcdP>jBUqCmsd$kA=2BRzUBhS@7uT!SMRsEZFY77A~J)3zb#! zkr`}f=1M);-Og!f&^ZeE_^pEn9#01m9&ONik}}d-_#7@)_hJms&9vy>n}w6(m%;t! zBcZcc2Il2FW4`vj#b{scBKywSL0Wt0P|6Vt$v6ME-)xxSD55O)k=X7)CiW&#nb?0_ zhI{$WAQlJrCOT{X%_9}Rt068rUl4qeaou6HW3r<7rHH{%T&C59r>a)F=XMCsEqrWy z>gOiOxV=Ms%u&8BqvR%kHgukpBuN><#~l~O&MD{fx5gRgNMAV@nK8-O1FQ+@hrqj(zS*H(IvnCT|^mReK%GW~omTGvg`ZC&me-El{BjD`yxp4HZ zOfHJkLe3*3sMAs%^;4^bpKYe13$dFJaG!|Al|1FXwVY)?fd;nb(_HRVtS47$ZGvK6 z8o)=2IjF(p7h006#TDGuX7GpplGoZQ$h#x}%^eqy%-dOT{e2Xx`sg$h{$mGy54%hz zrdv}JrVpW4drXttpxm6;qBoc*%w%K^Yh3U?Uv|g_pHn2iuTdgPSHAmaj?`7M6PuhN zuaEeW5kkSuu9A1I9XJ`@OLnV3BrZOr{`dMh)g9Qx+s|cQx08iF5HmzqcrKYSM?_32 z?L}BuM&bG$r0`v2Yd)^&IE)ziRGyEW7$6~d>r2OS9E-`pc~I6zW;q>z5Ww3cYqhQ&i)WpTP%;||7rRq$%0WE zb%-rB8w^ZjTTthz_vE;r6NGlgKlw@PKCWi!U5+v*FE)UqAL7{^7cR0hinfE5f5roE z{jaQZWf4={XB^wq-o#GrpTqtBYoxnC-s0GnG$FUjbQrNW2GB#wP4(mVjIycW+#xbEURluegyu_vP}0Tw z;|(I8?sojctxnOH;Ai-^q(MZ)w153{dPIllN2)yMaQ4n|;>n3u=JfPP$?;wNr3EX_ zNjKj8`FH%iKoGd=-nWvTLOa2C%WusU^tlj~BkYBl z>48QK+pV)ic|2}Q|&4xMJRKrHyd`J6m1Bn*JeL~+W z3hH^Eo!@TIPa_#LT&K~^3(J7IPBPRKO@k|sPD9V`x5Lll?7`6Wqu`7i?%=2>4t$)V z3Emg<1sh9;BAaWQ!Iei6xb{jil*tBFBVF#% zX`R36u&zbKg4OP%x6LX#bo(tq)8feG*odCD+C-h6B1_eOfdisRc4Xas!aS-2A7mT) z*YEhgy`bfhjD6lJ_?GF~ZP>f49pZk#NjCEO8(>)-Kow*F4cM3uubeba^O z9d60jToik9`1xM9amAq-xM|o!!KVTjWnjVES7Qq^uL{@hJ33sc8u3g7eGc&xI< zm-ylVpRi%eWQ5vcJEC}vGZx|TPre6==aQ_V3U%8M^I7*XwYflz z9C>`kKbqRI8=17xB&+#8nf)0z4fwQ7Avan>p>JppZe~sX%mwzxV?eEL6?&i-48Oj5 z1?@(}BAV4hzsj>}l|TRXk{Sw?YF3k3&ztFGZXG zm@zgxTrCd#n$R;QuYu=#?(Jo2%?@>W#GKu2!vwUiqZ9J|$aZsE>am6i?d=H!P0#)g z!hY;mB&&b)#s@XN#;0}~Vy{+~5_Rr6#Dz-Je>8R2JPjXN%?Nhh7_WmLNLLYsw;jOF z@7Iw{h>VlY*Szuf`d^=mut%=8`gKp%pd5ft`jsG?86(f@R8AHX zQM#!7F=bGCzZTfo5sC))U5fTMM52@KA2^?r1A*xkEs*$h z8#jjt<*seAMlWVK!QQpCC}6Y&H*Oxr9e-TOtQw~%UF0_i&C-fS%GXnoc{j4I+J+_WoD++R?-D%BBqB?G&2$ z3h4$3V8Xsl78)(5#3%$&RHhC@|oW7&_q4^B=Ba^x^TAT7Z z6Z?;*;}aYhgO4$+?~ia0oxKMw&bUBQUGnwQCfho2pmjDR=jXzYebudp2h^e46EwMj zUw(4y%ZG4HcdC)I%mU53tA@;e*l?w)+T1{iK8k8vkM_EM>_!LiTG%Oi3PreAUnmf3G=dN-yH|HTU+e`4AXgX({y@#uLaDZz$ zHHa-yQI!=ZBgEDeqwjW4(WDI;P@G}|^4~Qv1*!gwXubmV+as82TYiDQwZlm0*9YfV zN|=5R#Wg#E@Ylb?3H^+oD#X}BWd0F55)0G(s}Uc28O;|mE}9kW;u8>Mb$Mt%F=uyO zHt*C=@xFz>E$f=E{JlO!!%ZR%#_*vg^7YdPQ^pILdNF}y%Ztl+chyS#diW1vJhfdL zhi^OhM7m+8JcmDFc%XniW#|<_AD$bJHTvpG+{OXNvl4#DbZ%cGm6M*LpMIKf$LGZ$ zF}+35H1K-3a1YO=3QXn7ITQ9LV@lr}Hi=4$lXK^P3;zkd+J`M^;eg)$7r?pcGg55} zrgY!M2<_CQyrd#p`!WIZdiF6nDg#ZGWlS%PNM_ExPi$h}PG--WcBaTSfF?Lx|w z3#9gid$Z$zSF`we#4erZK$}iFz*J71!!pI1;8yVxvg3i5g-Ti>y>Uqzv)ad;E%@R} zt?KuMI<$Krm00S8{qW7hLb`nLz@}p5*NGzrj`&DD70W0S%7RrqwH?=v93aH6fn_xD*yuhH)cZE^)I&l? z89X5O4Y#3mCu-1%o2OFeHguA^n`e?m{TGl&^%hga@5z*A4I;O;Z6$Zr;`GCDMwHD| zZR%cuHdXlN2>HbaP~%kfsSPE4sf}rCX=D3Byke#T!JOBo*ZB9M+n%+NqijAAGmj=v zd0+FWXJa2zJJs&Dr@x@^2AgQBLEDFZ*L9ALFHYt{GC| zL$%C*qkOH+X_tj$6}l?^WqnFwndWTrrpfFF+W>g@#7owAyAOD{{XFoV)d(hAYyuv4 zyx6%p&20PYBv77V2SC;zreXPPcKnkV_-tGg`{SH3$W!ilA%yKt_T$6};Az$pQ0Nj3 z2HaAD3+C6-{;Ml!Nsa|13>QMr{vX*_%SCKQ@ewe~`xnTuL(sr=F*S6^Op6I8GT3P< zy}{(l4d9~cW%|O&Kstn5N9n{_6Dkjn;!tM>F+qnT`;B!MV&OwoOUZ=pRmSSgwB0&UxX;NaKBO_ilVD~WltyvA&& zjV8%sTUhTG?%}!dSRzw3?EHf_k}Y=|Fr_WiiPFm(zz3Tv^wQ+*bkl`T!tePJCSPmg zxg^$P0;}-7RGY?Da<ePefx^(6lHENvc0mi?48_fm!(|?XlWnv~6(-w(GsqDSQ6!zsXy|v7d>C@Cr1CIpi zc+VL{x+Ny;tEV``(iI!dMAJ!umX~?c`KM@ z4yFwEI*zt}v6;$Sx{r2>)Mn1vDY6w7X@t*%ITr0@E9vcnEtrLe5i?39fh<$Uh>KH~ z;b)Ic#)ch;m)guzmubAHmfee!`{~c<3xw!Ni&*zaPkqP|TzqVppENCGg5=&zH;Ie) zzx&#>J>H9=3g!D{zWOPMhKz4BPv7k#ZYa_f7ryCbbw+!~-!*ajQa4FK^z_F)b67w9o zUsU2kt^%}wNeA4rqc>dSI0L>5pN_t*R7Kjyt^!AMM>s-< zb~Dq>JuC(VrNA?*{=k?C15v8x5U|yK4BNVQ3uEFQMVnUsBJCSX$Za_;bl%AXp+C;L zdP24;r32GGlaGg=s>g!-rr^5UY{@S_t`n-~M+oDYyjLqIgas(zo#&4UzH>2~BkM-* z7aN#u!viKONS3@kj$dgH`a4I`WVT3I)tj;hr6@qn76Fs z>0xM{cRtyrVkflIdd37iu-OdO1s?)hxjzw>(o^4X=Ml{Q{uH%8lAxpqO|Z1)Eyx+i z!C-rYrbWGgA6*-O8_mL^BU)V3nC=xq2iH3QOf?O@5MJFs9=6HGku0Q!Wwa@U50 zvbs^**_tySkd^U0bkkuc{L{M>e16(fuhg{>1=*cMrVF+*eJ)*>Bt`FlXTRTr_x-ie zb>n8X)&sF_&*w6m%`efm5x>c&L!Xe7=FO!wZuko}9xsv*`mst>OzTm6RGS((%5o-= zdvHJLV>Xu9XB_=EhrsJv@7<1}->b)(&?1q^5IFN(PTqD#ii>wM zH+zm--Rkm%T7LKJKbp3#@MWsEy0L@nj9iVXPwhIL<)7! zYr&MnU0}(vPvFEF2Wa##4%RbGK+)(4su-=y9*nSH`}WU9R;d)ak!26>Cg_8UkK&+l zl>$mM9D;6*ETOx@JuLzz2E&ggh46#vSx8lsF^%(6nAK8UdY7JzN*GyBh?Tme3!EwP zXseCz`@&vXN^JH#7SE;|MCFSy@t9^mY*D~i$#!gmWN6^Oxxt?=+ePL|@;Q>`qs>+Y zO^3w+>s-V?7i3x;{175pn)kmw#S#78C7OF$ar&ryJe_mCzu?31s!5`dO^OnIPc8AV zhf@Um|IBR1E@+D+!`HV9e!Ic9RK#O{y(`ZR^4xeVCC6!EW94V~J^yEt_w_3=y5Cu7 zdb^Ctp~{#6{pSmP^nH;$kHm91xBMe~RXmK=>KaanMn+4k?f+drO$`X3h~*_r==cz3 zJ$D}dQGP1*F;5c4Wn5GP9yF-J2A8q$?&%_K)0*+(nF>7NSO;NY@nK%ftjftS{EbbK|PKe4jQI|3>o-91K2mJiaxKJ7F8YhNT zs?Gz=gL|=Mixh#j$2O3f(E)V+BmhI>80OJzBd~wiI8fR;33PTJVG3of%-$y>U`Evy zX7S)qcIi$#hlxnqLi9g%kC2Tw zqaHjmBCH4W5`O#A1tQ5Z^j!QtPa-Sc^Aal!`ij}V@xi)RtS2<0^#mLLH=gcs{3vpB zs1@#W+a@OV?ig$3_Q^#I{I1E~DQH-|`1HSd@8r&&Io7Wk(q~HY+LbM0`B@72ku+Hv zG+f$}*GJmAS6OI(>q-rL;!d$7Y=?aPHEa+|^4OZAv;{1l8;@l;KthbLjg@BQD`WRa zlJpw6jGc417aMc(8)JAtUSIX~wtSAna~Z#47>H?J%A9xGhrc#HPVW6>{Lk+Zrv;Je z%DGHn^f9KT?@o4A;XU#}l01*}A!!|xg%s5|fXQxEp_#ZH?WU~T&_GP~lu&}YlTyCq+Yo``+# z9>F~8^$9x%7pYg41$L+l;;CEyjlEM=c8EepycXs%*1_a_KZ@HP*iNybyQ1Y~`_+ z=aRB28laQoDErZx2q^_$H|d%!xbw}t(i;@GaJ{GNRr{lWO}*)X)y4xR{CVTW8z1S_b!%*)OCtm}S^ z?Ki8O39TIuhkhK)&U-2YFPw@&?Zql4@a+nax-g1W5)XqJM|8p16GNDF1&Q=0&28Y< zfMamtjaBSx|1kD#jRJHV@dtQaRtBdsf~YlGUq6`Pb2sY?CU@7TS_Drn0hs9=Y9f6zARlt_pKEyLEH1IFi z-2cW>-VRD{KZ+I%mOoRDe6ZOn;bfb6wbV}5j%8UL#P#s}+y6Vu?s7TA47&_vHJ9bj zGC`l^dn*R+N)mnczKjL`R>9uQmhXqrnd%|))f*~RhjKs7Fe?-B*l8*+1WkEvJXYo? zRZRW37gpQ85qJBJ<2MGyv5LPH7`KOK>D_@_gqX>i$u;GmuAsphIG;Wy5m^5aW_%wPYIeX&5|)HmSK#wc?=W&v4M3j59~SD zW-L2=Q5rjz?hnEgPq8s6icCUyI9pHjW#{@1V!s~xL6a$!bU{~ePwfN8B#X~5(UWGd zk3z4}Mam*Jd)!t=>CZuSzV|S&zqyR6R8FSe6J6{SV;X$l<;>*v228}_^q%*cbg-%h z(QKF1WOBKlm$ap-g>JYS%S5r}Y+3YL@^r;gvQ2WIXnDPzwClnNm%Hs)M^yo}CiJSn z{q6;X`;L7gUb*vz$Z;Pg{&eHB=w!k*@}gZbHmvF2+~CN=A4Mm2cnWqY@!Deb`rQlj z;-OQ;fj#?W-hViQ_xZ5#Zyy+YO^4`cpDDf6lq6iA=#nkq)%z!j#G#6keQt4Lh1phu zX6ZWi*!VBQ@muTVe!A*@sffp($kq$bB|JACOYe6QKI)Jz5%S8di4*CO&yH`x9lIy=}2?sBPvmvfGBUvKH55n~Mz{jD1+EV&DBDLNym&MtJafkpW({jFd3 zR|NHIezV&Lrf`c_1asC;Jy5!`1@x-ggOIf*=b|Cwc3kbn!Y5_Y?41ixKZSVoA~+Ad zTo?wF-siIsuC|O5d_lk6IfWdm9ZLqDaHTgKO&9#M?DQ4sDQH2?Zb%S6Qh1NgZ|D#? z-BhCny^A1j9M<@&5pRdH6Y_ASYqtfBJfv;-#Amq@lVAm0U$<1Y?D}vbYku0_XP;SX z=i;NQHsBL4^bu@yHKswh&m}g1e0xHSzwXo!U%S3T_`e}sA?{|_Ay85Sjwb7%Y(>{9$1Ft0v; zXG{uyGxaTX&~w>FV0dc?Yy>YsGG+;SZyv?!H+zAzb7GjBX^ue8CLbuRTLavb)&l$U z6`K zox$$e?F9>5#Bi|XDe&Qqg#Pa1X^|XM$*i86$}WD<%4#-lXT-sP*3>PeH&@n?yYGx3 z1`L}|h7U=kULK1R?B`BZ;7cdO;p6;7va1CrvC4Q6-nwZHad-1KJO}+-W9ERCW9s#v zg}$E4Jh2joyfUA$c&hmJCLPfh=WmwUyVL*9lYex(N?Oi55l_sO@2yB$YAy7Sk7kbv zr)B0c+xoXs#z0eO%h1_|c;Qijv+i+%hWf)E6Fjy-nMBZs=f-0l$J?cB(LTxN!J3%; zK1-tSz-R2Mn>!gUPJ>CQ{2*v?wz-#}8_#9u$!S10@IF0jZzlQlCQUw9lJCFyPt$eX z3bgdJH?23$l{ujHi(P$Y9@!SVRfyrAjr>4f#AVRbTLUgDP2~CxbU+K^UD5V;k(|S- z1hiuoi(FV@`&sML~vo(<8dWHcT8SPVh1d#!<&1aWeVkcif@+=!wo7c zh{)?7#Okj$;0+FUuv_~6q?7L}!frbEPpn!qWiQ^l+efhbg;+1k{&kv?MQ6s~ogt;N ze*TI*LU;0b4D2upITLj@DJ^zfx>q5S(FB@72aUYO6BL6 zc@3cjJf6$ZXYbG~?;DJHeL5x9qNr;pRsQi)13Q8#e!82jD)I$f!EUs)=`!i$FjesP z8I2*-^2$qe_b&hH>&-!zEYu2@dbu=JqEOjlsHuE=Hv;7uga(nw$2^^9ujQl<;% zKcSW{{YHN6vx(XEp_^K+OwuVgOX<*9eM*+ilS;b`B>gzxE+aTigpeK=zOhe3C;bl+L!;wdn-8+3g{9>FjSRZ|*U|OErra ztFQ(yeK-(1@wh_jv_3;R{DK+wNP0uCadV4>Bqa5rIPsu1etpp_skLOO^vc*IndLos)mP@j=emgPUT0dl=k>Nq&-q`@;m%c8$@Hp5tKpaB zXX*5HF%)#NwYP=eqkr@N&KvCE^7*6%9eq9VS-N$0hR`P}vo%@0DZ5ysp0i-BcMgHq ziFs^frZ4EMNClY#hQg7R=fJ4F1KCSTB5-d>Z*c5k0&9Bf7qhrigLJ$kkbNgh=xCaZha zWL8H62z@phhUfA!k_E4oed&*q9b|sRW5Ttvr_}TR^GW?)M4p-ENI!jl zp3a!00}}M|$vZ36gxI^d@)29xdjjZZxe7eCC_%BY1L?cZaCMsuEjgu)7LEKwGkRvy3xfy3R;Sr; z`uW{(>X$L>y2+ZX^W|>ZtB;r--z+BaXBWsDuIH)E4gG}iN9GqQ^ZT8Kp~V+)U27jA zU8e>sn!Sf;J93}M)c^K(&cmDHg^vlY|4v?ekmuWkdo58G|6nof&9+&xS&L5KOyU0$dd(K>`E2?=;_2RGTt)OzI{eQAA+{5F$B6-2dt}C)^0RZ4 z)b7Y+EF>V16&k$ z1NCMGPJKC#vl^*_X71jAY!nA`ck(Zx{P8Q$nEJIadP*_udu0v+FYVB{hqK_flr!+{ zlet`L!Y~x0ZHw@8uE@{!0zBZk0#!trqJb`J5Z#=`jTu_a-rj5o?me^ORKlFOPnn%? zeGLO&&p3~2M4wQ~AXDy1Y7YG*aHd5+i3#%997OHd{rWb0Cz|1bd#H z_f+}@A5Zj&R=|SaRS23kp3jn4lanMy)8#eW1?xUbdF*ME#tZlG+<2_6%L>>)qZq7T z>lrN2@&{qGJRE8D-^gCJoy|(Uvji=Ul*{WJcrGp1b5UpfRHlFNJMv?TA-QOwoxq*v z$A8?*PSE#s2D7Us`?B5f5oqMkN@CcJ`$9XHjh3^d^Dee*=U3LnRDwjSjsfk*mB4o1 zR3u6WhsoO+80m44HNH>?j1%X>K>J$o=FtS!oKgY1?W2*x*k+b44hL!O5PaJ5oz-=z z0OTPpc;93YEUmkOA}sTnnyP+`3@kzkb4DV)fJ?xPxy5cCVF=&X=fjL;`e^9Y4tlw# zmqqdiCHU-=JsjOT0sEygP+Bw{b&dP$(32Ojw7-|;l^O$Ro!N=k8MyeREYgNHy*2cVFqc@dI=jm(u_zB+=`p6jDSkd#;~~#sf_-l zjY1#o>Lbq$@?5rQ7Q#UdlbDf5n~CgCSINB#EdSB;)u;XBA(!8D;Hm*^<;xAw_RDNC zuu5M09pSi*H8{T!Y%=T*qL>gQGo1`q*=5031HQus&zijhk4@ zGAYWO?12vKvV%vBw}Ij?6;yw~0=4zjn=>l2ERI=`Fmd8mxW>s8&KwaArg*($ zLMPs3FcD2#o9h!R<~9*`-Ct0iJD&@&*Ty(W))vzu&biWyd}XAM1)c1VyB@diIWy;g zY_G@Gzp<3pY47NF;v1BFjx^F@pH-FdDACi?<>JPkoP)uyI;oZ4%g8fnfLsV zT;C_pBh^OB*TmyyCy5@IZNdxhN8(EZq0l~q6-Z~sCd=mhlKZJ$&sncLcF7re|KYju zSOHHW@k#oO^wj_z+4LG2o-@J}Zn9kiEayhDJ10C4{Hr-kewGf;#qZH&7{32KQ?Kxq z+9Ml}2Y77wM^mX*UnX>{8Ee{M555l>1#4O!^%R;<7v{Y;w6?*SjtWSkI2zS$I%Yk8 z#6s@Lx>Bxp&Q9y=J5F-7aqgV+qI>8-uX5yZ?KAfy;utsC;snYWF_0TDs?hr7_av^e zC61HbkLKd$;aqKaA$P<$o@-kX$#tvf+BnUUz`Y3zq2-}0>)=@t)`@fGb3=~=qivu2 zSX+POtVe`+TKBGOVRIfj%Jwet;L^OJIJ2PrTo|4JW7V=iea{*EGnPd%c?Z{0@L@g` zJ7ga{0^))VjLqVRXPT8{_@Fzu>XGM!?v@7PxCce%co`6@r&s)=>H70maOIhQ1iNg; zwvj7kJ$u1_y5TkE>DZ4_Eqr(7qQ7&hf>FBU#Kjx1FI7o`pVnB)*V>|fMUq>z)G4<$ z2eB+xe(v=yWI-nWh$mJay)O8g!Tb#*j~%3=F7!K|8;|ANokE(;HXxllEAjigwMe^& z{hax#M5s0LH*nkcSkS^j`GpX#c`k`_Rjrd}onWVKTtYYIj;6X=M+oin{geOrT~uIi z&pZvXS51S>vx~S7emkg}4MxIy0$oamvyM8i*)bNiY{u0*_&ZG>P#20o^&U-_@i7Ye zPqc(1oENc&zXpN}1)Z!($6yd1_Juj~XBj)@brSS*Kf^k(%RrJ>E|8_fvi3TAfIB-G z^pa@936spBZZ9Rq^=&piX08}|U9W;u>|V3yuxQpMT?M|?90*a3ZqK_L4Cq(RGc63_ za@YZ~%j}Zp6|AW75>syM&yHx%rMEtJr5#!%cth(O;`_P})UrSFn(ef>RLLy8{-oyT z29cdrjO2}3r)bOl{-pEQdBoeSoPRVm9Ni(x3zYlmTEb7t^m%DM?*4D_{vjtt_tnFs z=681f%{c_-xk4HAU_4Y-$ zcF_~yP)9+VRfkcvUkNl>nG3nu1@QJj4=9Se0jJD}g4echgDMWT+}$OR#V2iH4H8R{ z@1nKn!IgN}6dD734w|FlXipUIIs{Gpbd_d~91^X(@D4m5Fc;?fU4yfi_hUcZInUhI zjbzr;Dfei)hb%udf_(WlkGir|UQ3d6rG$v}KZ)yy_S+4r-JVOw*RXvA6fSm-tiB&Yr!(nu2b} zOGXO*vumuaEaZ_7si@bCEA^bs&)YZ1`3-LSp@ayrlFzAiwEDEk%ZRhYeZlFYtXA&oqc35n!TtICulmqVzJOB&n34-9X*|z%>2mSLKXQM zlk*Q96*&B-sqK~>wAbo(#$eic#{cadm~oaPdpWU!zYhtY3Z50-0%UbxIB^ccU4M29 z{;;=39`V1>JpJuG=Riu)T5|+9&e#m6)Q>~YLk-cE`)V+^Dhigqm2rEo+=Qb7714@Q z8c022Gdyf7L-khOP@3KIPGikwocLlkyKGhoYk5$QGpQfQo6KV)@o^=)ya z>2M3WcZ&*kWyOX8I<;>u6kkrRVb{_H(ky)GuWWv^1}&=o^Fn-_93`nap72kn0q;*s zh8!U!DmGmG{NL(aKAhkffsLJFge<(>E?G}4VC6q}?Q%3jcRp$w%jGN6D<~HG!plQgQ7vb6dLZ;1`yGbM&f{TlAsRxK z4AR4GHdf#Vni2=-G-G2oaj(OJYD+vr72KZ)S1O#ud@VS8pVhPjyZ<*IZ(a5k>;En# zO_%5p;vbWUykH~pkl!$35m|}PUbPD!C$%LWcuppRTr!9ws`Kzo^jW;OX9>AKTc4Az}X0nRv6d3IBR}0_BIgVVizhV(mZAlIx!hC)E#k<7xF$JmOa@ z;omADcV=B8LiQZTHWywLY=5FkbPgL$WOxVSlfvwX_l^~~hj}^{bYM3p0 zE~kp!=o!bZYy8yjqLpR?Ul?=v6Ywk zg46}B%3Lj->7*4Jer173&@qmcdFj0pK7+mf3m1dNaAUBpdAbRXnEMF(OPu&+Yrl)u zdM(GdEV3uIzJ7<7>T-GL3P#+s48vs;dWUFv@(}ZQ5`;X8-XqynHZTtV3lfSg{B<1{ zN7xeOrkVKOxEPV|BQADj^0CF#)V#yghy`-Aa%CdC>?#G0zLsD{X*B#U>-9AGvo?I|zN`XVDvwR9b287&_h3f-WhqrmJ(a!3Vs9?sZd^j&A%)x&@P@;i$dPA0y!K zR|)jJy=_#xnj!Fd`3$JlyFpsv5OMvJk8$?|5Ad@m8yre51a8d<)Zlq<@kiBz$@05D zQH9H0Xq-_mc6!lv?BO9UzPGlrOEk0YESm8&UR+hAk3`$0Bf+gFMRj`a!q2_~<9pMh zbVSF#USrqS(#)&4_w7e~hcrjQVi!lrp(;b~X-C_5VU@50lhD)DAE2yjZU}W(TY~A0XJox`$4ZX`lhR9j ztH|RG4kGhl9=!f{4b+*R2VeR-(w%jOsO!3gq&nS1n3NC4^S5rmZ|&YfOqbP$Wqz9( zx&o8GG9EiUd@vH9cm{pNw?;;w-_aY7<1qR~*?|4jr!*P)5X;p~Hh33``VLhUK548* zd)~Yhhiowv=dV#@b;OK|UB<+kHM7yJ$uz5{*vf^hJk`WQkyy`+o~J z%|hkPS7KLAa($pI5_kxM{lw%7`(1_`g9ScmVv1=FXqIOIa@QmsI~kD&+qXN@SJLF^ zlid#N*=Fk=F3yVKA|<}T>6M+N!;O1*!uL$i?Sy2Jjr9A^{t3JS`I=$ffkbL0gFbv2dUBR7K zMld_O7d+kji?%zr6kH!XQhG*m5qMSI1V`?rgG~xSz~T5?(0jff7-$>=^yKZ*(KgXk zq^yxi>G6-yx8f4?c-#nvSRSKOP##=)avPLuIt=-@dkFJXFXMA3c7QaZ2e{X3z`49v z)N-vjQdg8j*81Nd76$}j0j~G3`fe2>?G>NZsnszAn`k~5&3KcBTo9A;%Bdv?7neWoK`RhZFU&(Z|!v*GY0)N zWU%DyL@pokp<*{{M}HzcF--?^B&#?@tQo|`&Wjt@qv0nIY|=DccI@byxfp|e;S1N3 zkKx8(EuEc?DjzdKE!K=hoq2LNTB`wjk~=6TzKELc!o`L)_!_YDgyFJhj5}N+cZ}>c zoPk^3NW_(%v<$FQCqo|DzxOuTEUcia9%{n{<+rfxAnx32SeZgL{~bcbT&F0LQ%694 zQX;(+*+Dlw8v=*Ktff1@4WZxey-yK8PEfwzHiDe#f9dj<#nko*sx&{Y4IE9KM^lF8 zw2GYqJ*HJauOd@u&xiNvahX$Tt1+f*zV* z;o$p}8t`gj98quVV_dQ-nttusLc^kQK(rx+{HQ*TF!(!!+*DH)>M+?4M4bUmcDdSfMUVsL*8Z@A7!W>eT!JALTvUgJ6$Dh^crA zd%v2cJ#tbzR%{oP%iim+DMlG=K{@x{i{Zv#^`xCcX4;QHx>I+fQ`SGm=B17Uy?Q%H z`+hOG@6$APd{VoR9fRT0(QFC&Rfm(|Jy95WOdh|Xd})AAZ@qEE438`)Mn`&(d-oWD zE?GST`=Azf?QPidk_?vHL1|V8Q@wiDK)bY&3Z&fW^2nJ$wNo983A#h~Y}ce_RoT$F z9uw%#Z+)ojxsS=+J7mwb1w|mfegidM(}rI3)RA_+?nJE(b)dhGzDq5fe2UIn7z+Nh z{31q+PZGz?0C*b`1Tq(UP=2TEsX>UW?{bSWSb+$^@Lxvcub{cce$MS=pZi|w?*|?F z$vG$Ls(~7L=l*k|duS|W{?ln|k(&@l z!i6`RIiJIx1}=BXJuI1TY*2%w_KZVvjy`Ai%SGpOgzxvP7bdRf;*oB*mhc(uM_$}_ z7;X%freX-TEwEE~Wc>}KzwQkBqd|wBuI@|?ci&BVr@Ua-or?Ed9W#c@*^PztUE_Wt z`=mLR@S+@3YMVSjr>}SY#A^qSBoB=pOUXL*(Mjoc*zMKlS)E#T=Tqv#QMzxGEzO(y z8; zO&0hOw-(F{b_J8?%X*MZdji_Sqd-{lOzDV?e$>vX+o(wV8N7abC$#mc21f&;Xp>z- z;qEbu;G&HQ(0toAa;eohVL<0*kT$FgP~rxV;jc%dJCtR;G|k9prnX*a24;UL4s)7u z3)k*G%Ib9P?1Sj;_gaVstP@YWbQt|HF;DU;Y8|SSa|vzgvly_Sc3ylh+Nhhs%H^j@ z1h3`gYeDe)?TB&YR`KzcBE&Jr_TT-c&+R2C=3^yJt2v!kU(#XM1^%GRlKUejU~|1~ zkwuzfR;LNc6H(#GgJ_eYHaqt7gUu2KyRv5n%aP&6VC`RSi@Y9wPGV|t3|W`2j9nPs z0P@X8(}>iQ@^PKd>f+a(GweKMxLAZ*zynX0l3Mbk@REiJSnRIU0XqF~GKTQ78$&$} zRHFB-eFgC4$yj3PCw3f@-z8L^I*geX~28sLD-g<1%B&6P;l!O zJvsk5o#}}}t@l4bxJlrZvLRSDJOgCDXa?=W=K;;x zqon%-0hKe&l-gQ;3LY{`gP{uZKpnA~e*3i*v~~knZ=wq$_IxCy_QOSs22BA*by>h< z_zu8(GK{u8Q%=oVJ&4Sz!bpRf{iwm~?U;U+JhAt|Ty|bht%^prc*Hw>^c6Fx(idWoz~!{5nfCWx+$FFA`usWi_k&CXb-S>Tjv}bGiKP#Z5oib2-DMV2(cQ z{b)@h55sY*Q}3|dK74jwF~5bm<~C)J_EnYCU>8MNy>1s+9Jdaw&{)pOMN6`X#4p#A zaHtNY=6@QLCmp0OJNwc*#$N;#n%t7=H0};se644JGS5qn9j!}nd z6Tq1^b$b3dZF^jL`5_|eOFh@UceV4(qi#!LXE>-33N{(lJ-Cl{h{xjOE( zSq)QO)Qkq%siEXIKXwhpE|;KzMNwF9Pz`^1yn|TnJceW)n;_XX&rSF#VPO5fw{ zm+?5CL%LiXZ~h&g(0Pss-QStZ>w77k>v{D>oX=rg02kleEUO_| zA**@SSEnW@B}rEPndZ)@ql|~3D~r|pwYw#J27B2fhm@psZL^g-(vAQtd$@Q&ZxO|oMldW2}oS3O}3sZcFN=iMP2k7)t+gjXv z{!r4n@EEQ-$_T`(977#DMXa3;osFXbJ&)1nMUT*n(N9&KkN7RqN2ZbNQX3Zk-Va6lz z%xy#P)3-JdFVl`8@4dbwD@xRf(hxaf>gqRGWqUXJ+Ato^AAcB+Pi@8h*3Cs$Zt*it z^Vo--^_Iu4g7J9g;Vg7pixcW!P9T#+DM-^DW(F0l5KddFDyuCzh$ zkf`CWSbzxPC-w4O0%r)zQr(2;`+<4l1uF0P338lHZFWrMzj@bbbnl~sU9ICEYg#hD$xmJwH9eyBGu8qMq8?u)Do8iV_O?k9NVCm>Bw0z^pzh>Y<;?L9lkkvXhV#mF)q8+OT=7|rE!jX!hbI{cjqtOjV#dzI|7?ImbE>ApI z`ySN~H0h|xPV}q~E%5T$1R%{#0{JyVq+az#!!|T`V^W$nA21`|myjP5E(j z#4#1{;faxS=?)L@)jtTFqecKZ@A2SX;$E=$feH-Hjs%KwF;dn1$yDSeU0LtG(Ncx5 zv#|KxMldBz))Tq;1$c7C3m!ruWpTcyWR0qyXyV3faPYug5OZfRtexvVl`^ep~TJ;=H4wK=4jH2sYt~G zPS#fKuB;8tb5#*OR)Kput>r?l4@`@_k{esf28M*o?3A z8-sTStQcUY>$hl-6N5)m=Rqx%@OCgff7TnD_aT!Vrb8Gc*Q)`L7eD2T}!4 ze$Q0cQk^2bp|J}7%+QBgwwu7?(^=q-br<}dk_$unW`mDyw}DIkG3l!+ZP;co0Zuyo z5fpE31j>hJ!X2x2!DH8W@LQgdX{zaKYVMj})b-C9(!|wnQnTTfP*Zyhuyxr3qhxu@ z&n8WhPF2b#cb0n@zwliNRXbv#g3}4u>9v|p)i9@``%aRCbRDX-V<&dPp%hCTEtb`l zS;NX}h4V1fxVud-$@nOu-Rh5c2InI!iH4{kPY<0i>A*i7F?yPDaxZ#g3|B91{|bQ5 z-hP?CE?$hrGA%{7H!CJm zb4}c7x9HPwZ|6nq=5(${nAb)j{mElLeQwipS}!|Dn#D7K8!HGnQx=;v^u7ypoiqX_ zKb;Szsm_&EgFFty14AIV;s`dJy$uYPM@wIi(ST=OYr}ZEXJGEqTfphN85|s93-=i6 z!R!rn()709RMq<-^qo_F(xRKV^oW89{QXA{IQ`xRBRoGq^IA2j*TYFr{YV>)UBq?$qnKmIVeDp=GSRSr+q*0XBcxK5UM*O4fpiIx%axlUj5sB*QW;oY`> zbHG*mBx?3i8(G-LoqG?ZzG3C5(s%+jT=G~VEm0R=)}6`T4=&luJctw$dx49mIdi9k z#Woxn$bN_6#$c6}PC|r_%h0g+osz9D zpmk9a*|oI^^DnEy-p35AW46*|7+JnHkUG_*O)n@+h5=X(W_RHaYp0_m+SteHT+BSg z2b1bwA$D|a#m(b);_K7%iBEEm@Z&dkoHq*bt8nEM;Ca z@$QZv?l><3-`tRjS8g4Q7uRpag(7=g|57qOZ+;q~kzs|dxap7j-LfZw%;Sk=F=_a- z_8@Fp{&W0RH6UhQ2qBhgA4fa4`5Nz7eI0Z7I01LWH{u)9EbxnpV$|~qfshMmPkzJ25; z{o(34%n*OzX?t`UCHBk_MA3P?>{q`{QWyRg-`oDiRR~oZc;N|L9DcbMx1SzHWp!8b zkxufX`Qw{W++6Th_3 zO(eLs5x<{rfuA;TLB9>-YTq5&$;J0FTwJ`q;hn2DBU|dGh>Ig)MNX>*#`pdS--fKc zNuwJ!^Kh3lN%)wHF`^~AxczkSkay$~&(l;}$3kjp_y|bHHPSUJ3+WT_7I5Z~R=V=a zQCj)NA8J!d5Ur?b4(wiBrS%J{s3UeB^u_8$5V^mbw%S`y|9Z8EPU*6x?asE*8A@xx z-#G=e^OAO0RGvvXdt{KlzpSCE$sMqM$|#_Fx0E`UIS7oKxgHGl$OV0LD-r1LV?1+| zC(z|>2Aj7<0OgZCB;q9{w7*vq3I~4?6{B7wIBXUBZJI&s=N)C&Dp!T8C^TsotbSO- z&pEY7qB)_Le?9O7I#bPEs4#n=AE`U*Bj0KecSf2RzK?grW17(Du9IMUqmqQq4@Qee zmi=3MALjQcSd z87?aiWr9hx5xF(zGXBu_96Cd57dt+;^2+)!s5_FxA1!j^sH0|851cLUa&oSfv2Z9_gcUwKAy6RqCt{f5j&*e}dtHn7YA$B{8JC#v$CNsSrP| z*F4~iWHet$+FxEk)pb9lmOmQ=-CZ7II=8tV@!30elEz9x>hoD$YW(o3~^>us_Ga-ShGwq_vA)bl@fxbYVMC=~xLIFE|6! zvROpsuw^38UzAivN8{*0Lo8^fh=|K4NLUDwZ1 ziqE>JFKZC#ub2taN!kW5I9drz%gKgYUS5Ujr=G*{A4iZ*kFmwU;*8=9(V1O~TXK%j&fFM-eaaRIxy5;X}lIhe$H%(+WxWrnrA=Z`}EE zWbcfFqN#GqtQl*Z|#UdmHJGG)LgXhiJO&H}>z`N?G=-rEFw1sThv-MZWAjWVrlzn+f&2 zt4LpsOSrA{3VLW1cph*}oCW%N_rr94Jw3EwE8Xb4kFI%LNx`5m;QP1>K;oH0cQ&-pp%x*q zr(z-S*A;`;`XA^aT|>d_vva@`k0_uZI7>(9bVB{v-DEG2P%3^&P+j#e2wqtTB|HbJ;k4d3QPf>`rG5C5~K0&20`O<1^Y;2Fuc^SR{edG3sK zXJtGuC903lTa+Rw_0Q#v?lcj7_nH1rrw?T{vkqsfh)mi!J6&7F+3Au`N&I3;UYJ0u z34-;wp75_$$B0JPy99=$G&G<2x=u=%XrqbL}|zJEjt(Sp5VW+LwXd0V*(l$pfHtZv=Ef zS4)ko&QlAN4pKs|A5a+h3TEV1fd2Y>^q`l%P~UwwoOdz>${mrA4r$&Z!^kY~Yy3-~ zy-yC-4Sq?@?+&5TQ=G{D#qWv!kTgtv_cA=XD3X|ZY9c$Y@5fC5<$FTMlQ9!e6Ji}#I zuM>CWTB_h9dP??3~(TRxUj~74(!r zf9bg=76WtbtJ2T=m%}@EQeo5OgVH6=gf!ANA!{fsi0hfzk zLHVV(((jA)q00w9xcQ|$w5$INT1Vb|Wb-#n3hJq1W$%uCcVcn^}3{!+4gSO}(Dy%&v#m;UXK zj2wEw2Las7#j+>F8lr!->iECDok8PoE)|;vT8imfc~+jxz4KowAnPr*NtDdhSv}ny z&%NKw9Q#vJ^ECq9KFCm%vUMo?S+MR_^lxA|vTE8vR^}T=4@DSk!X}peo8iV_J)msR ztSvT(XyYivPLPC67T$qd<|Wb#Rqg33bHZ4iDyTnT=M}?6bM$zri%^$}yY~&ZZvBO+ zdh8vb(?g$9N!Lrd^pdV{`kkN}YMpGsqEb`YaXNDh=mkN$>1REMXsx9x((}dzz~aJV zS^uA3a8#oR9?oqCL&x>dN>eSt{X3p;_j6si_}ppw!fqbu`e7t(a83s4e~Q4l5m`W4 z*%C+>)B@+{O3(#)2GSDOOOK55q#kH4rusg=gZ=z=sGxEY+_pGG|5h`Ci{`{b_j?&o z7Sl&KwziAZ&CUW7FC}Q&qzfl3`$W+qg4($`gDej8B5bu%G2vV}e6~wJ?rA%N)oIV^ zP*l7bqWhoE6ldpC2%)GeG0T?6mc<35(;M{%tX10sK1*J|9LdV%;n^oRwBIU>blE6L zYB3Y5r)`lauP^ww|4wg+K!C?a$@l|YJ-GhDvF!IY2|l6gmMNiqyCV>j`=i%pd}J=Be$RBG6Q zde*)s&O66)X1IKQvJ6)5IZeua^uyKf_2UMw*0bX?bB?+8U-(Jviu0nvbe~XXXnlCl z>OI!FhO14dxXBD%dhsPXHv9>CDosq7q_tzjiAnhI%n^jiJQv)2`7i8Gf-h#Nnv3n| zT!nvaalox*X-Y~IUZh-D(q+jJbS(|Unt4_SkBeeg5ZRw=^No+#qS_Kd^j zR%D@3b5EiU2cwZ114CquLZWCNFGp0o>bkfzZan+j%`;4dB~P0KURP@P&9M@JhQnw6 znaj39WgS_J*@=NMW)e{f0)C{oFr(0kZ+qBHq$(Yc>x|4sJ>9F(s?9T4n~y5XV%HXii*#`)zGTrZ zM5kenSSc`8G|hOR58wT20`l_WIdm958l8AWgpVkW5s@LB&*8}HI!YsRn=H;Ln$Dcr z3!N_?1jUPvfSIG7!PQRk(8T8$n44Qdhs9r_!yAS0@l8J5usMf5rl}896nmg>=nPO8 zu^hO62jFb501PkB0zEHo1L?X$V0qW0OB7~F6No>_SZI9$)64`u%a zcW3y)3*j14>oHrXQJvn#d8=R1y6@irr6vQ|a-p57BoTbv#8-IKqXl@H>k1^_u^;($ zqW~HC;0HTrH-{Jt+mqV_W$m^62m3dP16th0iQ^ti49U~tm9q!tDh@UO#CLx;on2oY ze!b#FDZDni?d&KBjm+hJ?^5L5o$=qhw(B}>!aEZicaAUBxx2+Vsed;az`f{vzVZ|tk zu+0Ekx68V6Sm+VUoRZjeCuK0_b6~g_-Ex5D3qRn3zkkpNF80WS+gzR;XQgs-n+7H0 zf7KA&#q}>4uWtswcAi5#e4er6jGnU)l?p>K>RB&(X`?$4E^)#se|LOIga&bww;F#_ zsgEzqi^9P1O_)JQG%lbQ;C6o;vCgRd*x%wA#B(u%^L702@G$_l9$AI;tGnYHCnI>P z&o~@C5lfs{cOP+lu7lFLk>& zd(kQm2k8~EI?P%5LfYD`nhte%NKeWi1U`!|!g-4GRiT}p9To*@?uy6~)7jX@F%;pYwE=6pFNd{0 zD8Pd6ccZ8}SKBE}H&JMIzg=)*o(iJH1CnJiOORH@W$4GHT}XJuK;K~RiC+FKS=>D{ z&#jWec(lV)zPo5Oa;?ln+^Uv{I5}qiJNIV%Ru+Z^>=zj|bhEbh$zH_De3XKgsQXxt z#IsW!nLDM3{cK^ljxebHl4$88uIAO0o3q6X_O#4i7K`D=V3~Iy$QPqCqK>t@kWIaF zQTym9u(A~+LqaWx>G(+2zV6@Q-peptCO=LBp6+vr{QFC=`FM!2j%2D5s4N z+Oi!Rd_I$~?27{pH}_$DYcabocg$0wQ<|30=R(uzQ44`|gK-Zq6Dh!E6(DW>@)3A< z?*hF+(}BuR2xgt#57h(Bp>g3Tpel_ACJN5dEyX{<;Lq*AdebiOO>z*Ne>Mzun_I!J zL(hU?zwb&n)Z|koAI?!zp3ahvA1^O;Uo;5L5Z$Cb4eX%J>638o{8!NXi4VEE$IIB= z{W9=TP==`)ZcyL)8uhx`g|ccAk@g4T2)A96F>}92Z0O^I_=Kp(?Ai-2tP&|`bP3)% zl9K+>_mHx8x5X=U3y^um?~!mrm47;7n(AIFU6-`*AQl`8HZS;YKnd5 z7ok4pPyf~BK7Dny@LFZ0K(ix;)r;+_4pyfVD(7Gx=emSH29J~I$8&j%lw~xcv1Yx< z?<41PIOXGrG1$YEoY?O&+!(Cv2d4P>r6?I&rs>pJmJ{S`_L@` zT)xb~!gRXoiaek~rvT;3yHe4#L>RxK5N^6(A=T464YRct!BKbXfOXehkQtyS)n8Bz zLj)yYtCJD*vTBlMq;7>TPi}?x1m4i-8V(&s9)aI_PD9Iwo8a{PNu~~C?dW7IkOpBD z(j@nt()FX0V5C|!n6UR3G?dy)mnUwN?$milUTb+R%x&KS549D-wNdxr?cyT(w)=Og z@K!D9_LonFZ%)B9a5?;YNE9*2me0!I`28H=<{@*?vOU_!#vd0E*Q>WBs~U^Z&)f9S zo*@H$gMC+%kvC_#SiUpwB>y#^pAmuur}lIEJrQ>JZGh?GPE z|MqUtfOBsc?;L3}e+WJOT|Avx_y~T6PqEFhf$TVeukK@?GB4q)+TUZL!nXQCg zy(xi3sgmnyYhu<)H$tsB8=rZ*0qCv}> zs7qHSN*B2iIekvV!2n(2^B72l6xm@Lsma)tt*b~l(VYBkq(CBC%kf`=UBny>Ez*B? zC1HHF6#d-mV{BbI6CYNgLu7uKj<^4rfi>5hC+^pW;|*ddy5@F`WVLCIsQ;^p(>(UZt#R#)VTe$vbcoLU^{ej=U#>zgLT&@9jX51E&P6NzR>xNlgOmc2LEF{6H{z@ zMojGGa$!nObGiQv7mJ9u#Qlg3c>Fg9@ptdt$hSAU{>?eYR{uWkK(zwPh{Lapup{Z- zc<$vGQ4y`ruHjnuu49+~61eY(TtA^sSQC1%}e#G`F%u;9`N z-1j^n+{}OA7iZ^SXJ&ii_vHHsy)A*boUB&dp)LDxQPwE@7hQ=rpVuIwuRg$MwcR14 z(+{Cj-fl-7zR8jLW(wp~M^F5DOFvfiRe^Y}?ML+9oJeG?-Gt3^^fr!koP}>1eH+(5 zIT1%&TCvL3Ox%6m0j$Gb9XkmVMEu-r(MV|;YLB+D^SNe*i7>&uNnlkX^9`<-2;k_? z{56l(3Gb@83Da{1>a=#>2fj1L>GV|W2VPyMEJo<|9Kqpbc|1&cFz-qD(0^-hMvR+q zqu?oTlqsiEB_HmL1Xm>U(_}T7ej~o z+!(BeneznC%zT6qx-R^!>s&=gXBp#bf3{*ta%Ff`Af)`{b@g0xmajh()8+=H0)CXwUft7 zn?g%K)37lxVUxV9KJ!jE(*%Xp{zCzO@)pomFdj}wdI!M8$MlB}&LH5#Xz7(D`CyLb zV=#4BH&`)YF4$~897@aIfYfUk>^STrEzF3aEVWaqwWOTXxn!vHnDsdjxww}u&0h>( zpFaSpArD|qurleCZ!elPCI~pHK9c2epm2CwJsEMsnrav;>#b1jPn?Y1jYX*4#_qq| zkMA#(%_G^>R{cn*dvF=T8$Jnb9Tz6~RQpg;t|^E8S#}z!RvUOm@>(}Vu z<5=PF(*ohPY$R@Jt>|~lNDCcujk&Mr975nzo#{7tGFq#OMH2I zhWKz=0lQy538+CXzMm+Hy0VOwCx%}ZGuR6FM|N*yxG`9xK9ypEsXN3)c(J5>`3iK3 z84A6Uo5;nR=TrVC%GkZcivNHegW=M%EC6=&ydhqd#p7!!4Sa(t_dbF7-OOcT{etj0 zGo4bL9776&0c_=6!rmKsu;c998%OKh9}YxIF|Z&ZUiy5p7%m(Z1s7+ol#O$&Ehn?~zC%oJsJ|`Fn z{fZaDYm1WLEZ{_6w>d+3sa_$SS}qVLrz_+AuSi^{cpNc*wFj%yGv2a(zb>)Z>vS7Q z(#LGH&!7M~vEB#UI5HhwBs?|XjC3F)2N}~fp4E-fA`v!fE-tJ;7=dW16iGh)-XVX7>Ia7b(_`KxQhs#B>DGY`q0_jlRIjKhA6;D(Q;C#vzI9 zJx9OqVXP3_eWpuods4Tulj2CCh zngu7L%iR)0;@O16`NA#a)vGjCevzw9ge|qL0-~^npXXvJRB`^o7v%O!CK<i{20_AIsR^V>UC7U3(cW;g{p7gR^t+`kU!k=0OES;{pC} z9bt6(M^b^cS#=Q3hY|>%g@#noLa}69+e&sGIhCQb?T>Ogkl#VKJu;P6&iV|RW1OJ( z?Or${cooc!R)@Y#W{J{?5#=Xey^lFp4=u~MeB&hu8r7)>NA*o|2@Kc%0bpn-~Tod z-USJg&3@UEl{$vVLa#_9y`vU=GTjO*SQz!s?id;HYmOuE^fuNGl6~WOMMJ0aO`Qee zX_k7THBpY@uGw7QVdmaM1t=-;Y!%NZxjGwI%VKsc;!%<~ec2?ep~)PX8^^`nyY^Qh zZ9~jN9~N=-b&Y@Ji5cvw)st8~G29re_(E4?eP+63_3rBuUX3%>a8Vwr)Xk18aS&jh97g?-Ljr^-u;u(ZLFvKT}QyK z$Gb5bzgg@!>xakF&F_YRj8n#7)SLCv-NSUCeY_1Uu3IDRtvLd3?(l#I3p~Nf=t^0h z%oV8LZUb!>@xb1r`#^6~rnJH10}v=`L*Lc%u>S0EP&CIH_F0(0aqGOHu2Yrti^4C; zwD=iSJ3LOh^`eh7N0x^e9%uwIM`pqCC4;1Yoq5tjm%bAwE9M#>Ix`0Tc;f&qXXZe= zc}0}>A9L!MMLcQvdk-#5c4zhP$j67bH-9{UBOcjXD?n@*ea>YCR3laC)tN-SJ$>kb! zq_2-;Njn##+bF9i#eA>%mp1Aq%X&2!7mBPc;@+)o{bh%=4~~-b{I+FfKD}!+%3zmE zxc=V^HwJ5djS8ljTaSKHoQjTDi^K}%<-kF5#nhzPIn;@%gIHZmJ=4o_V7OR^*1}uk z4v@Mg*?7|C(fBM?{{cEZQmIc?v|3VQx?`wsA^9-TFae8jrrB}EmgdsA&*eemD?Q+R z)JB^3{WTc(L8g-^3#m!g5tw$)30`z^0CugV;Q5auaC^QBbgonb`<8A2s?nRI2M#rX zM&BXOY@Zfwz+()JYoNWGx zONTF>H$fk>KG-p1Ex9qp-8iaiFx=8Xz&~Lg@Q1P%eXgmPaz2_*PMV@fUdjS^={gh- zFam^&E$4T+wDJuqj(jQTNspHpkNAXKeg6Xqe7*yn)_)FDS9v>NKb_c}ENNBc;z_=) zGsfR7KE)3kd>CErT`$B>+(vfHiT-#0R?X4q#i}>r{dZhgIX*EoW_9}Ek}i4^TZsX= z(}+@?8GA;m`J;>e>3)M+_RnMg)-cvX8SMABLfP*y+!(CYrc%7k(-6Hs<2mAIH5^x4 z<^eC6YSZaW0>To!0Sj{kd`PWk(YY$%JO^hb75hvoN@1un;_Ddz`K z5ohPqv5q5XbdDi$aQh(Q-54p|J?#sf8}Cnbo-ZWJPLg!9=K{LeSBI`S{|+x2?PJ{9 zA52!7RgrqN4%GDiG-9^6hoJNl@ikI&ENW*qmas++o0%lWT=olDd9~%;78bd^6r{G) z@?U;Uk&L2LC2iDd$;(j_~(g zZa;n05G7!+&&8Z)u^4U)*7=hckhKop!X=NciBn(4A}<#^Qx}J45N--1NrO5?cHKGa z&+VrSm&GABC{?F7c-u^XJ+8ekS#YMDU1ONv!d&~F>*DKw_Ys9v2E>b|AZp~LKd6t3 z6+6y}YJGC+`5UBk_f0a-{S+*?~n!{{4Emq_?v3*{RlPdx!; zWa6{EC%)al7|<`4@o>!K2n)vW}(JW}ED#R}B0>R57J`+E4aWIMd^PyuM$&Y*Wylz1nm2Yz!>uvZa@P zlGD+wPG{XjvBh?~G2Oq;!tv{BkTPE-Vg6Y!q~pF4W*j|mKh4NfkUS52%gQC((I0;w zG={%&c7OoAZy@%@31oCW*Poq{Stsc({J61^*SCSwsmWokmO^)44f1q-Ao6122!Z2_ zTddt&TdyPR?nSW)_Pbe59)(X323z|*7l+SqW3UJ(f4uImk8ty;5$Lv3vZX0Ut3v*8%6L!O}BN4=$;y6mN;(}qI-NF#V6z)ZS_nh%f0sKJQK<{&I{ z8F;yG4?Oq74EooP0mSTG;A`1@sde)+U}*3gSS)M@yyATz*53$D{UxjWkfsYI7tc$j zTU#iLj%q5Rd79Ml*jTA(p$eQiM+G?E4TbcwItaI^Nbf&BN1Ehs5RD8y4PNaqg6q$? zLAO#_oYazVNpRTILA2Fd^0iZw{mp}IN26;MOoYk%>yYlDp5n;EW=Ob^+`ltY zO6w3bJljID?Swfy2aGE@pF`@%9g?D9SJ9VZf27KXtFt31%0T;YTq515$Ldr=`>2G$ z9tJ8|EQT9{B@FUIr&j8qua~DIj$<0p;+t-8aT87lz*AJzgmLVC`i`L3dBSjsy>JYE z-Mxhjc5TC>d{)TL+85aI{-;xuPC&w2@zhf7~UrGFwbr-7F4lKRueS%J8j&d z3x;ekd&OoarF7%bU9Ab=xJE?MDCpPa!EmnL_ z8(WpP9rHuET+DsSd|`I_3M8S|04ct@0a<&v7^$_e70x{^!8X4iXr~Je`jE7}^{mVe zPQSpj{A<8(wQ~|2!%Ib@{A*C3p?ClJ2G_Q_3D**yXlOO(8~k&IX1_H9&c!Q|A zVMgcXorfRBs$s((b3MX(S~RIX#cR~?p|#Z5WHTtYxP-2Z#z5hp?_hN425@WoP_VcI zr}b<3^t;E}pxYP+*Cr4&)lonj{r&=8S%0Ce;xs|f=y&ueUMAi3&IlYFrwE>X)CUQ& zSi;z#?d0;GugTHt?}GerH5eZ=7Et@UDXrB}AoFh$_>piAq@5_i_11V9r)xFSIy$-_ z-NG8IPN*Pmb{r&r=D)%3#~s11RLSfi#S{6m_yUgeI6qP&k}0X}=|)SiBN8yv6YX^Q z%&&HG!2C?35r@KoI(=9-OX%v+#MQZ#hQWqf5;(ekll5+cw84aB^dPk+uv~LH_&=`RJFdq6{~xck zcN&^Ra?ZKVIoBSij0z1&g_4F**()<-wbP(wRT`2OX;MNYk&*@~qaq_hOGf7RT-Wt^ zd;R*IKe(OSb@ja8&-3woJjdht7(|pju_-f-kli0jMaDN1=+h^J%l0@zNim7|+oDO% zkJ(F>i*rDL+zv{XdW1Qgx{RqFUrKFsFr^~<mP_l7NN`G_3w7VE~q(DQz^}i>#ZLaK_#dS zlvVZvy!JUx*dJ6P+%Uxndb?f^s!NmjCnLHK6xSvSrXfa*ZVhR_gVsi6@}BNl%A2Fo zFIZos3$`fw{OdF8*DFEEr^HUoz=CNVPI zAvW;QTxl+MYHpHwCt&PZA_vJ(IRO{zJZ0LWpVOb(V@uJFdE@cQ*bms@q-ZiDY!4je zd!A|M!Uj9SrQsW~utS~f^tKXhF!@Z9n?oq1;SzPn>jbs=j3M>nVkG$^W-D3mKZ7c~ z?@Mi&ype1js3-SAs3>M7L9Nxaq^6(nr0z>MlV{#-rZ#6uQRlShQ2U!hMEpzn_|^7(Gg(}$uXHg+A3C2+XO9C{nAY8^0_XoX=#Gs+Q<)L;r46fOx4?@QdJvK z71x1F%1L023i_Bp!2rsG$gNTA`?JF;CFY&%5mUgs)+@6V_H`_px+;$#l8`8;=|p zY<_!|=|?*iHW$6lk}S9+uM}!SLxpGe=Q7Xs2Au%a-?a;u&SuwBznCnT#?~>bWw2;& zG}d2_ui)Ytrv>fjL*d_)GK_2ACi&w!2#x)}aUau}J?TxLcFPz}#FVFHaV zB*1GvQku->hoJejVc`e)-2?+LKWQu)1j+^e2C8CpO5tYXM z)WqRaWdFKEGOV_Xnw?TeMi1;FC&|?j8fsa@mf0Y+J-C?cTXB(?pFf$*-Pb_98reoV zneQjtthi*YB}A?mJWI-qJ|rLP2qy0GjT%f zV_I`>B_;ITK}8v@#l|(O@Mm0ENlrXiKu&KK$BN&|#z*GA#h(V4VS5&BLcRBeg1tu4 z@cm~RXmlrQ*C|d;hqBEz;K{%=&~CvT!7=fj^SxtEkX@lXxOZ;E7@1b>T_jMG(r4u0 zGI;}Ha$TJxW8f(Gcp*+u#vG!izN~d6+=`6v{ zhp}LBk0H~RK&ueXbu;{BS1dNASRfmllFb z@9IeT#ksfwu0+Hg?_y-Jn$P-}(OlqmGb;Dx4$LaxEQ}flAnCqc3?7X^e?HY1VaA6? z@mu@$;a;Kbq=viV z_(O;cB4HvxepMwyzNdMR>@*UL6Z@EL@`~j13{&~D8g2->_p*25^BYWo5#lk=4r<~K z%@y(pgAuN|>O3GvaS1>ekG(%#dFvy$Z6#~Nm%KKe8!Y-@@~Op%r}8O{^LwE3HVj0@%-FaAG51VPK{%n)C#P=4TX*2k>i=mXeM zW;YV2-w$hl)x}Qhy+?*LJCW5LN|;L0c661`Oti022SLi4kUk&)J^11vdRXQf+<#|3 z0*)kO*Pfk0w4VJ!H2psy{g2}i1Xn}1Hf}}p))=7`iEfq9t~|-VCdg zJc-x`$RIbX=c38S3ebVpqv)&;b78mN?xxYL&9IZLJ3`zsLO8AI&=svCXkc1BL$OEKGkuU{zyZQJFL`>4 znz?7sKN0F%B@0~j65*7FcwvW?_n2BFhx<+oA`DrXn!c#vOsRTnl5*LZm+q0q83~o; zV6Xm*6PJDM3YfaLbCxEtzM#eltR2ZzH-;N!APM+PMqdOV^wRr^Xk*Q04D5NzJK00r#p1KaopeC;#p-1KKqx0@=!))r+K~J+4OgqZm2(ji# z0RctV5{gb5qEp+7NX2~;)ER|u)W=9iDqHY}w6fA9JvZ$o<(@C1BD~e9nxvnEWtIzB z#{opk*PIvI<8PDeBg)Cc^EQz`jQ^4!Er!Uu?UK}5=_%Yn0ART|Z-9Xe@@}2rx zokKde+#;60vlE{|lPMjjmIBDFm~2KPuq8H(tdILj26qUkY>Ncked#p(qxoj6aqB{~ zpeq%YI5P#YJHH5>-l@r~8*|@(=3o69E%d0*6=9aB#fazkTn)9wnjo@3p|1v#hPbh|AVd zvs2Z8W~h6@tGa!_bs&2lwQH4tv}$q%Bjeb(2$v8o2K%1dI%a*PxzSjMmi&cw1giO*6D0FAHvOn8l1^ z`nS-Z&lWJ|x&p&*E_1`Lp2Mh5s?vzRf*#Y(qRm`)I~ zlof$vOSsESf4VOsvuZDqvh~kMQ-^v?I!Oz5U0Mql1bBhw;aXr|)qNqi@`fNPo?TCm z>2rZ=L*2ZbTg}{)GJ(P;$F2#gMa6)(c;C8V}hY1(t45;+`yHZ4QeMvvaRM5yP$bkpxIn_zzPGxCICJ(e&}37K_n zKePYgtzz@{(p;7w_)7MEZi27A&lGkoNfuUS7mb;FSLTO-mR2WV@l_i3a?%@eUHvU# zZ22l?9Qlsi#y?FtOf0iZC!TIWL}9z7sFq6x)NBJ2(GD$5YHEBwDf`iiJmaKKo|D}~ zX%_*Mt0766J~=^xPkEv(y}!iy@PCpD1@`RpkEUmg+A*R zLTe?$q0Qn6lO56Qzkl-vxf$&s(|6LV=YW308+otpoDu4b9uPRq{RTeknefk!lu^7& z(5v4fn0MWt83QliPDV$v6^y|hRvU!p`gifOCfPF2URmvjDrT(~et#d%yfghh1k%`7 z^w^w_G&dSc?&DtY`IX5+LoIpebrcTYGc=-Z*sdX_&CbI05__0=Ctq}zX_Mv>c_)gJ z`h5VqggGKRO&s7B^{r#{G~}`bmcGUxcR5EA=N5g>;-fiQ4w_J^8uxJjq#6Et-AmA-*PGf_O9REV{k7nc6o_i@G22 zfzWgJreyM)sBe`*QBn3I>^b0RN{Ym>I%R*!Amb2fs_rD*OLqpIZ<~Q`Oe%%G1@fV3 zMG_!qn=bT2Wg_$2=dGOvEG~S-TWD0n-*nJG5W?9Hc^S3{J%ZYtcF5RabG6*q5MA6kJXo0->=<85m27v zfS~JhCshMSkjcAN}U^z zP!Bg(5>tL2CG|@~iM_W6i8*?HL=YTHESkKB3Ya*T)F??J!<1f=7lB@4aMDB4wOW!) zt`|_1wsO=dbshY`&vE#KQIwi~^$wL3?oU{?9wD-tZ7JgTOlrr5EONQxS7f!dyXpPy zvc%dKX9(YCUkU2)YOG(%09WU&$9B{oMi*&4gl4Bdhc(?@(X&fK89nWNuLR~a3xxVb zrv#q*F;FD$H@~YP9%`wLg3>Bd#>`dz4ujl?!xfC4W;Y~r4tR*`iGzp0tqXN|{+Jvm z4*SpMAL|q*=;cZaGu=UE?C_r-Vfy^yA1+*?a}f0D@C2V1IS!Ai~~AfpSEksFF9$LMLLc+PdXJOh7P=YVa}lps?ho{Doa%QNkaDh=a3 z?egN-e@9|owjULGV31Hv)+05xZl;_=2FVQ1QnGc)Timx@iJU82OuqF?ByY7B;0GfN ziLYU~)aNI9L{f1hpPL>LC~oA*t1MT zN3QVkn;T3&6DM9F(FV)8d2T-ig-^t18Kb=b-umR9jpANE7uwbTT%am`tfOXo(;{!J0ctM@bWadOeVnfI+;*N?f zp&vD#y!|PaP`sCk$60>I6|X1~e+Cia<^E#4y812dLw+M>Pu@tx+6NJi^9zYNexo>M zeU|t;KMillyFk2kyGleHk;I?^Z|wa}Niy-weA4vZbKEjK2w!tWnjFnnBws$}5!Zcw zBEklDQ{URh_`V?}VkMY|JD*m>W2Q)8D+YSeNfjqiBUcA#&+Tf+Z2mVSw4jQetFi=w zw{JRm>+0-)DKamFcFLVRt(Ef7q&#hK5I8z!J)N;&h+Ci$&B(#(lngia@_Uo7F-L?S zwl#5XkIHf?kNj7Q1TuF8F7{p*n1JjaNlu-$Bb64CY?uIGS|iG)5at6Vfs+=sLdFePAHjxd1pY_Pa`?}ZD1a*RFx0CABLE* zJ|*uZ5xL)i`~<|4h7AU0@e)mxMa6UKt{2bjwA~kKogjmHsi#TZAJ(9*rX3Nv7viE7 zC0|JEz3ZuwBcR#TAGy>4VIEcHw}onaa)9a~dMVjbUD4Z)%hbWeNoMQhatV9K6hglS zG&}iVK;#$NO3iYsAoD{3kzH1f=yKCvQBYGFK0nXhv{<)~`n1A86lv=sn)&$}(S0Wl z_wTmCF7TW%HAAug%9+z}#m5NL;zS0+eUDPSFe-63e1E%v@Im8ZXu-=WaHFp?yskD7 zwBgVHcQ&ENHoU+ejQPge7U@}d16E+Xvcbz+%c6@E;Z#mt$w?<)p}=8~OwOyq&ZV$oB3k(hZU z@WThJUE+Uwx<~aQHpwy*x40ipnCxFJig%BO_xIgk#)eb%GA!f7L_Fum2-f_43354i$%QO5;8x$N9)8tKi9567>5{IGSBz&Dq_!1`~j zPny2SN@zE<75Eh#$H@3Syp2y|m*lc`B$^wIm3x02--r48HeFtmMp%e+&3*$o-P&~u^e=_xHb4F|mT`;rTF6f(8 z)x=li7C-X24I|UKaEvIczd@K#L&UWzN0H|Jag=tK{q&1947K z8|r>yBp#trhA--m#v}?#QRR**c%$?wxMRc~H8wuL^nqlFEF2)`EZi`sLHLnMgo@9J z{R52tz$+Wnq4E=F$IQJO=c$2^bu-g1Ihk>s<@Mu%(&_S`^;uQ!OU=*1E6& zG^oliQ|P9_o=bbSE5<{OOLsCd>}|^jdv3|X4Xg8+wuNUJ80^cZ z29#ZP3~q`6F*c#Jx# zRfZ|dIg8Y*@sJmXPc!ZPPo@h*i?GA>SMWdqo_>g8!AN?}R}H85)_6}y8^wXFUkI+`(oxGauW)LwW<^az|K>L^=E zz4zgh9-FGE1#9#~SCwk0?yN?%xH6mjZHh8gn_)+(&Rb5MSQbchzceFqmORH?KZ@s| z9vvh;%obTDxP>0-%V%VosdZ6!KJywxVt)l211CXYzVC!rPVa-E+;@!hJuwTu4E)vu3?fU*J*^iRFZ^l{jETV`IBZdn9P zxFfFd7uy4Nw6bU2RRTH4eRG{K>h%q#&wby!GT3Vm&1AmQ+-R(OIyKNhiz&o2f+241 z88~RC_%~_?5%r^ch~poI8Cm3Nk<7TDxtM%vrpyc`;N@@^VqHH;Tnmk@Gxa~2YNe~- zA}4bKT)l^=RPm;iugpef^w}}(G%FX3r_L09&_$ z=CXWxDQ33l1gbpJBN*_QC&=cE&C7Pg?Iv^y7(wknoq?h2!PuSaKlw}Nmoj7h_%8@u z)$tUa`sWz>^Mf7!?dc^nu3|j4u~Y&FRQR|>CJD7=dM{8)9C=)o9Wj8?HtL2)Tr%-)WJW}fUp!yUCSi~dX4Pag}MW37TzRQADOhzV}|JOtl3 zw+A)rkVkJl--}&-{s3EfQHb51egSrRI+%sUoAe)1A*84g} z=W4`xK@ZeR0;^(GdF?mYI5YRyXmDQkAvoTYy;tyWFXhtMdlT99l;%cb`7AI+J`cTGeKBm0>vIV>cf;`C|T< zxp%&W*u#Sxg5F*lhRi*40fBFyfdXfRF!PC0hzzk%_>#C1`GRoR87JbM_oXt{?WdFj zkBQ1VdDN9-wdAYgrKGx_HyIbKD1tL@QMys-r1eB)%6&9L^m)m8>d8%aDxHT?$=fDS z6<33)^*e$n09K(=PoriLvPQ((H{zOZDTyN9Wmq)x*%qoTW+oXGc9qJTp(m=08HklF!QwXzCH$fWQQvAo#sYkt=lpa%(+nn{<><;ms|J;yd?gOLj_($a=tnd z@oFw3(}B5cJrkOXhrG0?H?$MWzI+)jOIQq@&ze0(rb{pEMV*2g@r%o0BBS^smH4C- zUe00T+G3TX5XA{KNZX5QWS6uCUY-+-E_6MKK2q1hFl#9+JK`+5?{5XN>QE@U_KY`X zOua`x-n2p5V(%imRli~2R26jJbbVC#av6FyL>b*f1flD;s9-W`WoYyqXWZ_2F+3FL z11CK)!Z}er*w0l@(U`yDd)D{e=%#=mEc$*f<~+Fs(S9ccyyY#Cobm+J@2DKg4edja z6?v%3F;#TwZ(qpv;bFc}OTKXAjV0Xrm(BZjFYGESJ*BzPSimVK z-uvk=e^!my590M=et)$bdhBN(ay8>Ss`jUV8KdSd?A%LpSsySJTO2nLEva^g&WxxC z7iwBCc=VW~KdoL(@S3i2bVtQ_BtLC6dfFvVm~o|*nZE*-=pmmU6(jfO?L$^{&&Fgp z7N{Yz4V7Nhhn@D_i5^*IfbNh;LiqdkquEdHV6Nwpa?G zis5=#N&=9h8n5;& zTRUUEM99G?;q1Twou4Q=lyn-+iX= zQI9n#NZ|H(My7Wf*;*ttmyoj}3>eOUgJP2*ue$pn_mwi!9xG7^iP4N>Fdxc5mfulB zQ?5NnC&z{GlRmS4ZM?7YWR7$gxitG4nW3&}7BzH>>hhf|s)~Ltn$tN))ZW%ksfjR3 z@;gi^mkUKolVwDABs3|fkN_(8l(gCTk1r^hhC!;T?<=)6B!+4nKSA{BR2|i{TtejY z%-u|_D1{hUT}8OQ9~M=%Jr^}uU!g8!wUTWkn?$9jlSJ2|lSEyQ$}mv#BY*s*V2WIE zi`rrFjT+riPmJuFMC|!%ioZ94F}sx@@-})h^5aMYI(}q;8IR}h{RRu|#AkZB72va} zgJ8~=cHuOG8F1lj4`@$B@0huFy}0hzCBJNDEX3daIW0ja5qQAqf?n@+5J>HC6b9^= z@=p&Q9Ek;sCY}|BF0^3UXcEVO(tY`8e<2vw5Cl!wwE#T-c!23+bV@O(p{xLXu>8sV zed&u!AdTH~F_^)kxzSi2JDtJpt`(qCt2ET{qZp14_(pkpH4tu(#nplDRxtZjs|yF1 zHfb)s0SiRq4L$G-o;1qy5bybCM30$!JEzuT^FoBg*1ijbi-&}0L4q-&;d-5E$8Nq6 zktj8v@a-%nCOGJeE`01Fe?Naf);{Bjey8J&dfucSwotAIU?xej@OCF0RtK1aI0oLFAnN zllq}pMs~DR68kVi3i_Er6{?C0tu=qfuG@QBST2B9!zJ*UO_pelkvAjLqdvxb)#!IT^^Q(q;(b2YGq?@3_b&rI@2EoFGGos& z(`!EQqMcaXIp-L|-&rdUGYqCSh`=XZzWIcw%JvT*1;%(O{!>HTwn$~yW6bG)UB%wGq2T-CJIgF}I{C_wRM2?Ql9- zJZwnq=DSgSHmX$Vie}LR&tUxP^Bg>oH&3Lzeu{`&txPo@>n8r@ZlhF(?othbO;pl1 z15C4dkbn8EG*#PAQd{NLQEgJc@bIVpcx6Ef_VQvIy2V%p(JA`{|84h14bN|5Wco2| zjnE-43W~1h3Hxlrz{j&fL9_EW;cUbeKGeec?fq}AT06G{EDmIIw{H4#0Je@9&;5NC z1P99P1?#p{f{Qk~{nKluVpZX~p-h2?8<){fiNp%#|I>*n(4(k3;E9Gd@O<|{rvJ}3 z$wDc=b_jQLv**Y0yi|zB-bS)#Tbdhd4HVkATQRq>m&~>aIet_0aY`R+(KI{22+(&GetylR0 z7vccZ&Y55h_?LYFtQMXFD_&K@48T}K_mU#2)U^v$R*^-`C906r>w1Vmts-Kdq=c?+ zT8zHlqlADH7b40@erRmjYvjkK+sMS39mo~!D1`fY0P$EQgFaq%1Mys9g1I`nL!YWg zA+63e=+}#pXuj@LG~6lZp4JF-`y)+&=>V1bwEk=<&DbXqimck_^bmo3Qe zUG;FswUdyMgO2dw_(=W)i|s@HX3^>(v<4ku$x_)@7H0FP$Rr<#=$c{H{XkM&2QxzSkl zmzMLE9PtE#+!u4_7kw1e)d0wwlmO(_yl-&Q2R7zh-}pCU%c8lA$f}|ql37rnWfs&@ zsVMM!t@&@v(c?5I#SXMddkAx`dm^9aE<{R_!k}5d*qrhYGhFc_p5JhtSR+EQB9>Yj zGm+fW9Zxj8j^GmXfpH8>ILiTs zyQ1ZR<%;P7cLyPV*IxtPDFR}~3Ef{c$6bLp<{bhRn7sqdID3=%yTyu%(Bu+vZLfFD z;BO`Nj>pAw3v?^hOmKX28lz*G<%$rEy?B(}uh86RtcB1D_-JpJ;HP`EP;Rd#WIU8f z@{JoYUC$fXyt!G-S!SymTl<3MqWiFw%w461X)E4^2d_Say{EBrIn9s$)bA-pgWY1V zz|2_8TRDf^`D!U-5Ms}?Gq}SEUimZ&K4+Q^H}AcVW^0Ea<=w$Z{r+O{p3D#(E&7T~ z*9eB)^A%9RvN8n!;)iTpwjJKz(E}&t8e+qVAmTJ322oxWhm^_Qg^}&i$ozo>#B54E zBJ-u2i=fP&!q3ABM{f<7mulNai{EaTOVY|2KF|7=^ ztzHAMOOit0-2MUG2oXW!&g(*(H|PjG-XG>adn^KXXe|{!j!|R!Uss9)Or2)~>rQRv z+TH+plIFJjJuzOun?6@y#@zp34Zh?-uGz!Ij2@VQVVntC?@V+Zop|~!shpytGMt{M z|NMK?#W}C~0@^uIAbXY>{><)sSBuxvuLmUo*%38f`{4c77ZmPny)xiawM)HLY$%O+;bDDP);z0zD7 z{%E2*G`*qDpt-Pm$#H%|>{wq=g^8Di3;zVbX^+IU%}80)YrF_NoA8YpSAXVvBNq4{ zWM<@YWcs8g?9Pc?6dPKOUX+!^j+^~K_wBfk+QA`6!XYoz*kC;N8C1t?lpPSNZ7Moz ze*q?HRYz0B1=UrDz0l*0j}Rox2VGm4hjvU)K+~->ahv>Quzl2GcqI27dTgK!+x?M) zxh*`1ObSRv^*_jCN!2ANZ*?4W&%@KS@a8bOpjHkG+o+F@KX4j-{8AGsu>JyXlnMuR z7DV#*EKlKAO!9<=lLwjpgNnrY!+Lsn9{ZZP+OHCYzqCiV3o4TZq2pYEF9T!qu58m7 z;O=&2&oYUStagdbDmnP1Uk#?{UmO(5VI7bwzUb4VJ#Ddp1G{_j|yZJHa674zAXXR+Q3P+zl0 z@Jh#pzwDGgdb#{GS{4?LSf?s7`-9_^Y@9gFMJ87rdr&QpAjoG}&Et~b_tuI3<`G&? zzxn7OiVndDu_pz6m^KMLDbC;97{vPbP6I-TP?HD5mNp$Sw2>0|>}n$&4a}*SBuUYo zE8=}=niUn*O_AImXUHWzbEv6DG%1hDQNp@q4SCUUrYJJ`E?MR+MOFS)qb!>u$gdbc z6-thyY_`}?%Ci2VapzCtFKo}^Uv}w>?j}qY)t3~JwFBeG{{2TNKeCpZ{+ddq58ubHKeWNOO7k&tGznEtH9)LNWf1lDspwus);4H)p`1T^ zJ^&x_@f12_WkUg}7?}KA39(B#0s$*lj#+GahZy}iCpycFA(N3ia7T+OXm0cYJcXt)&vX@Xz!$
4l6`Sa~ZRH_2z84N%_gGM;iehfFnzJSuC^|a#q zJ#<`ekM8c{u*J6?gS!Pl`BWM^h|Xp9`d(Q2Q3y7?;Y`uh-og@cXkv@JO^4RDR9we# z0=^oyky&pmFnKA0{^eg`%H=42KCv8gR#e09v7sofNk?i+zbhznjUbb)1Ee^EQUi-_KJhak!2_#ns) zw|{#^BTm1B#qD+|h%Ev`4__4g?Lc3tO|WkDWmklWtG`$FeT{OZXg8z`<7*RZd?;uVqds z!+3hX?}oq|j##?r=_MH94gm4aA^OKbg!;U$C)T_F;FassH15VQ>7ALP`_`<$JMZ(b zMzojO%OSdV<)dCs739s76W6)E;QVMl66bH+U#e2x&v=E9DZxpd21M(-d3G{}P! zSbh2!+T9L44fpV12|sG@<0b`+&#*NqfKF_vrg?h?iN@OVbo6~ZL|Vx}p_~@TkOqh> z;z)Uz?2W2|IcPEI01`j#Xz=}S;8-Pvs~&p8Ybgh?%E`t}8p`N0dLOP#jl!KIE}*`} zcroHPBrJap#dnl2Og8*w9gAiEfLB!EiJLoQm{OfBrD?`LsYm;0n^Z=nlRy(R+CQP$zj)I}=J)9ch4z>797rBZNA~%>!MdJ~BFGjR8&1h)Yr+C?dnz)4aaf^&A>2z=e^_NBH&G&+} zBD4qAF}(D-q+X~V?Li*BP+UH2f(82xp+j4WtQ_UTr*rZ#;-v)G=W>CrUON8dJP1;c zs;JV=aOl+fgC)bEC@T>MzXXyXMQRxe?`y%c@5fMuwH5s(PD8NgBwV{`i;Mo9hy9Cf zvFy(i;2-*pe03i{`_vOSYQ`ZcW&aYl`&P0f4U*8{_Fh4CfW+;WwRN;%OX$Itzp` zv@i-j?9u@39K!ogJ#FT|O$dOx!tbLEmX zZ>-?LzZ^*8H^vYHf~&$^pl$ybc+mbGibVTi&FWw_j(5XN^-W>bPa^O`*xr-U}wHp`aOUtu%lv%>RM^~}rXD7ZMK2FXbuthQE zxAwS78P7TC;h5kAY+v#bPg%Ue^L|eB?jI9UpyH0dM4pnH2aNG*5{INPqkX2^wUFHT z9_$R8XbevHh=R@+K=}SW@JgtnzjoBX-}+Xt$riv{`6Fb-nOxk&9|3dvL+NqnOxUVx zhuiaFux4N@`c$_vnVSV<;`KOLL6_h`vn%W!F3vD=L;@2}uOlVtZSXgt5*9BLpt&uV zBt-VVq{-Ja(4@l+IW}1nPQ@q~S6U7N=8o)8txVKnSnLRq8PFRZfobi%ne&F3jGV#v}haaVGa4z_(w>SN;bRJ`_VRXDjMwG3>%*6`oYdL*o+- z*laQmop(jq*On_l!FwxU-*$&JS27{UumL_ZIo$PraxIm*3OvYfb z`3UY?6+sO48NoZ*CNe3Sf_Xu0cs#upr!$Yh{eK;BMVTWd*uMdWRMMf&*ad9s3-L#%T=TgE2E@F`{B~j7>vkLVZT7=g;ySDcqeIxWV=g%%7R6}ZB{b$h z5N-OA^@kVV6}k?Kt-U7jvdz(p63Xe-F$#F95tr{aCT~BP-m=0DMknp&FCDO40L# z`On5+-LZM77?lEFJTzgs;{sX~#qcz=7h@kZ!C1jUyb?7Fm)&1u*t8i`qVj%vHpz@-d8-KdqQ_y}Z8ao(ZiKz2uV8!jAM_8u z!+N%KK06oL_${6X)}JcG8YV-LB)9<<893uQ;SwwzZ^fm3Rk&|KA$gm(i70J60`1S| zq*M=cVI`jzd!uhDeq3&ZhVBC(|8fu_OftY~CzFTY8BDm|wy_rGR%3#|WU{cdG#r?p zK&#KqqVk?5yvVMm!bekx_h}bwjeQ3ZidJY56-JG#-oU)~EYO;h3QJAJz-HrXcI~20 zSgqKHDU+epmi1gb=~gRj(%1?50vGUVbuw%<5e5IWl{g-;Ho3ttg7WJpU|QyV{G0q2 zjJ1k!^jZR_4(UNqB|qhoS%*#!;?YO^A8@@jW%0Mn!t-O{xYIj7q=uvfjAUO)@r{r82KtvG_wfrtsa;)%0WFnly& zqbt6HeN0AHPOA)3p92VSN1*1gC@9;R;@gX^_}Q*gN63yq$3vD}Fon({=^ zTXV9(va|><`&dJUVmj7TD#M<*op9uOA$#ZULm<(dk6CHka5CW^ZSJ-p%C*1Yrf?w` zoH4`o)>_|WqP^&W6Gt({H!kU5g8&lA7vNcUhOGTB2JYS20u|lTsAVos-aiwj18MnC zKfZzx%`@O6=2m_cIHP)JUmV|Zoxo~dS5w9{C7Gix9hA$c7 zIg56>w(Bf(R{ewOgbHTX{Dt+0I3)GVtI#$)6%P3-(cgl}Oix28ID55&mGn`#RnB;3 zbsLfLjH^z@zyse2+;o094Qnb#H5M0^dmEDw zwNN}U9*GPkg)wK+h~eo8P#Jzi2NF3_rXE|tcJT;oe>8|{l^gNh>~gHlqJK^bm zh8Oxgf&p<1pU562&+eXtfz)`6%+#k|yxKS;%}wHYYcW9975C@cf)B&yaZk*{%DK&0 zw9uO#PtGSt_XlC5@^-eqb~nENxDLgu`r*@2Svd4z96kw-1OMWDEZAd)3-&p~`=JtQ z!;^+Vf9~PwpxyX!P7A!|C}Q}*?YLQ77R@3!C5uiwQQjSESUX}}ActY^H5}i-FNHm}1gnUFzc9+HRng-H(zNAw2H1u@pr;sa zxan3C>xH@`hHhZGUvv>ooHpZ0x5c2*REaL8t@QSO3kc=QrO^j+sJP`M?ke5H?0|pC zinsJnhll4#&v}_&>P6?`R$t zM3{kw>n`Z}lR$^xbHaDiT0Guq4H5zeQRaF9cn74By_2=L*(H{}Ymg&F;Z7@zNOob* zKo;^3e}wH^ZIH~T1~GMx_~nZ{7@688Xl;x2&a0t2eEXbxt?>_yWq zKfGJx#>`;3_-^?>6q4!(7t|zzQXz1{TncTLl;Y~gHeh+AiDtT5qgLBDELCUbK==C? zd+$8hDTM=fL?T9o=fO6H0j)NVVsaaiEKNf<;EO#DUEAf!@X|C`X;#4#+b=K z*|6UK8pH6-X~^9ZN2X6Qd-JDYkbTIFWB)zI8e`6szWslh@1Gc&?L7uX`n>ezvb`Yp z&;q3jxZ&&zX(H#+4`0O9aNDU8_I^uVI6p=Jt`&onSS}tj=puZLd61^mO)uG|Vo*#X zw7>q$8gM=ZX={{G%;P(dNJnxu9;rIx(@VNJ!&;sFczLLs@u!^F{x=Vk!c{q>IqEr8 zADM>_rfu=@$WG)WBJfl6CuCQ3lFGP2e7Eujtl4u4f9VL}Z-$@uTObY3yejCgom=7O zCoM(;4gl%C0w&iSkGzROu=`XuDs{Wj1!p-Wlf8VwDLfSedTT&#eg?|Mm|;uDMzlyT zrdR8))1|*6v6Z`6B66J@F*~pf(a;2yT7STk*2{2M^e0I)c}V7InZe3IA2gA^1+Vu? z!htAFaD1%|T>{Y>MgVE&M2NOJMU&1=Rr4Wl|ok=VC`Xq_<(S zzB0MPq_lB+Tj>k0qTB^$ImOc>QwF9+_STIpZut3`x3WL{x zg3`idY8A~Aj;IBCIu|!z+XmYk10l6Q0GHa^fx_lc5`BC*Y6Tes$3=ZI_eCy|`KyRk zNnU7c5sA9~%V4Eb2mPHSj=E)r*zor$dJLH2t1UO-(|%Vtnca)(;ibvJ3aeng<4+)M zNf7>709kWHB!cFCr<@`ANMGy1=oc;!eW8OAOaFtPeA}>2C4tsRJx0OQDxw~!Lv4Ll zfoJGrda%Tp^Ju>V-*U;F@o|F+au^*+6lw;v(T@)N?v!>REE@7Hd7%05C2%%zpki%SW%;6n_(zBF!?T1W!s-|=+^&)q-0(!9-L;S`=tPAgIa5rD0?yRg!tIg3#*Ft~Ywf^Ozp)_U7^un(%hbza{jeirpZ zL@2|O1Zpu_-xs{m_MYi2n}Be)NIEArfSrEzCNAzZ!KZ#p0eIuk-24bRb<_pAt|(&g z(beQypB->;RKp_ec&he;$tOHx_S2>T4BA$T-yPM!FV_YBTnL3kpZnl?UkSA>_TWtg zPqJ4!1JcA;l<#;Ie!Omv>-{58!0jBvBbR~9=^U~*<1BFY+90oBBe=z9!?M*Jk~gAW z69Y|2!n$e(yY_BH+gNjSH8=-Nr%P~cg*QBtD`EAieXPQ7kb5Tm z+BJwLUT+3f@izP;%1l5B=ZX^Zxj$d11_SI+lmtNmEcZ}KQ{TP zVq;h%a@y}l?rn$3n>TgTTSXJ3SXrcmh=Jn!cAQv{jVrJ1hN)$N?0W-WAuwYPeBJ7Y zpQAl6tvG;g@MZM1H%hQdbP~-6)mp)5)gZan8cLU3 zPrw8DgQVlR1#GmI1l@&BB;v;td|^@p1#$)0uO>h?4DW+*=Oxf7(L~CoKeIVogwald zLvp`B6`UE@hA}!pqU)rIOhGLj1UHgzJVuvn?}RW@EBJlj1HH<=1y|RoL)xifQgmhr zHEd;|yZJHBj`*+*sx2Ys!C!h(y^ZeU3TLt~xe)(;5Ds6R#D#8Jlq~fkbN_`y`Qg3f zd6od>+i61WuYbc4DHHa2NG6}PT3IoR#qj0@#*;8O%^DtK`Vz(#!{5J~z#}LW{=P0i zY%zqPzhB{I%>w#0=`7i^)enZ79d#g2GRN!2|C64R=hAfo?~zUH&VVrx@8S=~q1*l}QZ${=pf837)i zK~yjZ$64nh5~h?6=3lB2nH;UuaSM!GdWK$dEP$_jPoTPB2pwXscNc#Uje7GQ3|212 z|9DP=-t;iEw_ieEt_j+)$`qCkyoD>rI3(w^j$+S+rz=*ts`_^|8& zRC{&Oozd&4vXc!F6*@)-#5ckgr*sg#YJeKgZh&r%87e#;#6X2sR??1WSRI!_{<{?d zwm;mlQTzkAo_4~Qo~Ox{m_p!cPGGNK=1*3|PpHcugOKtFI5ap7qtXE8IrhY=wFh)| zKB6t8h%}AIqGAd!WG-UPqZj8vocl3s$}dNqN9yG8x-qIJ_!T~_?u5SjP<;Mt6kZ;m zPxmd^hu7C#0+yFLwB5F(Jb#*SClA2LL3^lO^bx*(HfP7{`=Iajcvv5M6ZYKZMXiEz zhLNvgx33w;wW|Z@g`4KMYOXA7%8ta*qkmx1^(W~xvchNi5`=u-!0qFu*zFL2 zpL62S^hOFQY1yK#%o`Nk^@%9V{fu*GdRc{+s&U7QK1-JcmB!_s4~mgZTK@TJ{>1 zNP&)? z9WVY|jSIeLkXx()T;!{R>lb~YOX@gb7AHVa*8!rpFdbDP*|_(>1@bG2=>phR2$Ez2 zk#b*-C3l!k!jTJbT)G*qs4ON*hpwZW=MebZXoSC0zwn^F1a64%#lLNT;A{DLP!?;% z!yH=3EBBio`EUyDqnzN((oh&Kaba>E8RXUW77(gk1`_!az{mVn*5*}GmUS@DIq@Lt z{tr0fnjtwu3rC(Yops*sq+;18EFG4FzKIlgwk3wnd({P+pFL5H`CF-VkKs4F6ypw^ zg%{nyV593vFq9Yg`-PzUixYZ2AA|7#rmHhM2n^awNW!upVlv^!%o`k%?*{JClRXPa z=%Pt5?$ZF{r|ZD4Zk#k&HL;_O67c*>d*n`RLd#AmT$Y|qd1lkWSZx(7GAo0gH%;Kr zW5W0he&F!@HMVv~AaBq+(x4oOmih0Yx&9SNzy;8}cY(y#Q-|4Vg8!l1yFVy$tB1B6 z{RBz{eV|$rLwx=PLVB4cYoO{AD*H9UFn0}3--9tz+fy=>Uw00EbVvST7UbGrf5)SzeW&@ohmC zJ>C0+VHhwyINFeQNDzj)d?8$JJ67%}N29QI6#uo3_^aNAGYKzIG|m$^ zGWLLUJrDiK6$I_>(U`v^4%3F_LjJw)RJl3?{HPb1w40=L#!WboQ-twcvatNTH~0?v zKtr1ea$QQqq9}&b+ggd72gKpess(V8ClB6aibINJHa##+(8W87o#CMX7cZxww|zFe z-gg*|>EFbC$?t%B_%QkCu%6|8Gzo1|bTDw$HKvoO2IPl1QZA%FhA!(0>OH0iWAGKN zisSLuy$djqZ%aKCMR3Zciap@*1Lai)pv2Y$_rLIfM9%eK_~kDB`9uJ2f9)h3^YXCC zJQO$cOhJm>NsN%*j*sXtaT^yPmNWM;=_|px%T^Qj=~R^5q5yTBEC|z^p%;Zh;gDD< z!+q?bcPs+o;6p!LyJbI(80CSt1y-o3K7&Q+x|rnf8|UW7!hBH+oL4CVX1>~>T4qS^ z)K$Y>D@MZ^{fA|83@doake)jf2>c@L;2DsOQEU#$bhRcZQq4x5R7v!a(}SV?TzJ`x zGbQ&|KNadejjO+1WOdFPrO~e`O(<=HJp~P*{Ld5@87xjxyK|4Vd#(^FDTP3;hy^^~ z)QR0jJ@oRXYNn^m5fu4WV!Mka-I(D|R{s2gV#$?IE$R&8DWMQlmkM%CbHVQ(!}8r= zIy#H%S)(`k;lu!kBoD6{oY_1BSu;FjdY%zlpG-l)$Lq-cxX(mD+ZPWFF#J-O3GR{! zWxL5sq3YEhGMP9Avv%opi;FKf?py#t<>Sz0^%2{DUBk5d+Q~cZ&Oo-GDLMDE3GP2+ z@|G($lQr$N=y>rv_&RT(*JZPTVeiM`3ST3QW8z!~wB(vexbutd^*OXOdaC_-zB|Fw8)=wI?)Lj)~CBEbbHGfETwb@kdoY^nG3(uy*EHxTUxczvXR$D7 zmU&(87&dSo2LBg;b^CL$$i4(-AGzRsqb)?xT8gN@_(2pkU%=v@ znt0j}Or=L?-uS{b_5yeQts(VnGvKcg#)GaL*y3#f32lnx_O`_^cgz}n1{!I= z3o)>o%E8N>rNCBBLzX!K^`%j``ehKj*>I3{^7Y~_7-O@07h^!b9;wS;1*g2b(Q6YM z7v$`vZ&#+H?1&8}x2scw(}(fO+>K}{?+7iE`%rMA75l`Sz;n@7;L!|VH0XAQ%e{?0 z7JRU5EoX|DOa-Wp#NsQJN*bzmi(2kj4}nV^&|7*2M|9IL(s!1Q$Hb7>s}>+^s)v)| z3z1hb6g>iN<3$ZEeDHP(cE}CGcfo^jY9=UwgAsaDIv#k`o8ahI3%FUtq*Fx4 z>BrJIC{)a2A5e~?rt()QWp?Ckaw#CUvkl{_nLLWZOISHng95Lb@eW~h{;5`sUcf`U zl6_F2yBHU(48SF6%mcI{rdhjXO5j#@UovmuPgGu72Ie;_P_o4jM5J`U@a7P0Q0%9X zj>f25{R~#MNWlHz+YCd;1#^u3;a7({lro&e-QG_aJkg1^&X(Z!?+5PDFovDq-(m2E zB)ZaPBf8j{EED{TPRaT&3PzvWvDj>R`g$4}-L-_7Ka@h0^45n;B&lSFaYn;J*vpznQ zEu$+USh!ES33x)MK{EV5n7O1)cdd1VF!4z^yGaeVtmz`reCtuk<_jvjaY()v;vi9* znGS1_F_>S`hc@<7u=8&e*fM^#+`DGbuP!HwzxzNdnh!tJf5$hRS7H0gWY{87N3WXB z&?6y7XnM9Gz`#meY4jbdMJ4c-rzWu|^JlV6e^GF0H5FLi4UKZ5keaK3OkW%tyc5Ts zN6{eEG)$i?+zG7QJhcfT?(D;Ey8)0o z97!w|a$^6z9eD4g9r8*XOCJ3#jf(_EFwXfEtk@g^Z5Fx|9&aPFvZ2IyG6Ux?9l@0J z@1T9vk4miS#kuT0+OT^NoeUV3*@Z*0;#?Nk#W&JQ`#xBudG5q!+P`h1sx?t&Kmq%UQxhO_-EihE~@{@wJ0D3T1m^B7ZfO zsM_F_TT5VVtrM}<*oMPrm0)l3FjZPV11S&cp(fpvHfD<9vdto}>qwqN>4X^SYg8ob zaw)*Pxy#^G^jENw@ugupugPhrN2u3v0y_pjlJonS%$vs{d}1pGIW?mgn*Ry547Ov; zXax~SdX6Vw?}H$ZU2se!jveT3M8bKjG5Sb__(n%vh}y-AjXU^Jub}|slw833%vmxT zT8|o|d1z}CiH?2tWK1>!xFq(&hZ~M?yqxJ@O4^Qm&lPcxttXVZ<)Ct0F8#431Fdwj z;FaMJl{*{{YGGUO=f*M2UgHEQ-kj_s3n#$lL^eDyFofGZ&UF7G2MF(#M{DaUTqo{} zE3q0=(Gzd*EQXzmp`g3ZkA3;NJ9v|=43CzH84>=_Z5xYQHO<+!>qFtVe*oOoIDqYo zw%~}$8a%br4iu{^Xya!OeECWu>G9QCC|YO;XD#H}M#D>~sHP8;D^{U}PdzTP>}GA@ zO{AiW`BAm`8mjRg2GxvA=#3IXBTo;yw&5lYI(B32hZi*dL=2t%xE+?o#^Yi;BhXK7 z!3zvCResF}4;t6A-tNt3kMfJ)`kXe{KRJXOzDZ(dksI@52oY2fNraJ`9XN;KVO!^$ zLDQ5tas>yY*4{E~==h3DoXcUeS02@qcf%mfDOelJiC4~Cg~>>UcNnNfu4S{J8g>}R za`Lc0atw|{2SED$X%PH2ie~(csF7)jFTE9sk>)?-_}Pn_{N%jf6PUSZJ7m`7(i+)0P<^&0q*~GH$X`)@cj7KkING8uDftzEbDVqI_P`rqF`V%WJ)iB^z(rnZgr4GwG_D zF%V^G;}g59;1-dLOQI&gX?-=;UuE)|Om0|PnCZM(kb$p6Zo*E5r^rwJ>0W0eICXTG zj^3R|W#bt>Pax0CF`M&~Xd@!|#hs6awi%!;c?GA< zgJFIn)9aVB8sp^KP^AAPDn*`yZjD%+oau(YOJZlL|jI0Qvek+&eCC)%&hZjprt{~a+$XY0u8HbAoU#PW@0^Yi@3=dB)!G!;+fK%O$ z^h7A*Y0p?pE?N%fPF3NWdC8y<=La8ay|7(mZL&a@Huw#_#$)5(aOmVJc4}iC^rl8) zj;R0HGg(X*CbKqs33DH%o*Y~W zo|CfJBCL!*b(v>cjP~KdrGfur=)B{xZlf@cN>fXej0Q@3FVB6D($WxVtF%k0G~QBX z_Q))ovXX3>&wcFdBr{uNWM*aa-hcV<7oOj^&$+Jad%@*ntD%;flNTAha$zY7aIEhb zE*?w;zwTBJzwmAt<+7Pz^(a|#YdlgedWW&ug-H%6~>cIxpdN|Y|Lwi~ulJl>_k#%v9h1-2l zJnH~n{n*0wlOKk=ZsjmYCaBQkw_yKl3^p52!asy{K*_>T_`&DMZolc~7nt{R-o5y0GbZ5oU9JMgedfo}2L$YCR2s@96SHk%O3wB*b5rq{Q zT+7{+@a#`0tkH`k8SetHn$30s+uh;#-#*mVGD2ea0+S`>xO2pLNQgu^YUf->%_lK1 z%S)Og6*bP)<#`S>r#3+Bls!~v#b=t4bpkfMZh)GZGcjyKG4}TQa?77}pvk;U!~B$ zdej5)V2+vX?r_Lv6;@#sD$a}{8j4#%KDi2O*R-R8HHF5yRQf^uJ47k#Qk|{RpvV)7 zu1On#|3(z7bG^j5ebW#+XRfE)gWuu0j70qD69ebI?S?CsM!1LH4M#k)p>ks+WA7h^ zJgIeLmb z<+|X9fFJ5S^#CaoFJ_0VqBpvSxO}Bfz%w2SGV52+dd9csi8aC<$I~HrqZEGBNF;-Y zCg8&1Vf@6GLd{wXXq8JI%l9Wjq|gD_?J&X(==?)fl=A4sW$)nvUpapGtHd*9P^|{22jk&-xfP%+AtgJb575#f9oC&IRumeW+Q~j>gyKl2DCgaNK+yV;5`! z#~3HrdSM7Q{S<*&Y;Ui8rh;qd@E^3gULhyX@yHd*JOm3Ag>*T z9#o)M+Xu+AD1&3GDpZg4(zNze+!W%JC2IA-0l$-$hs)vK-}`)v;bq267gh zCGz97P<&e!4^BBi{^nW3UC$oae`OGN?&$@=@)h`*-w5VLeWd3v#3R`hfX8DD5^wsx zgyo6`a8`f^b7!-i{`^Ite%K4_Et%i!Ae%+@6~USgrt@0uK&j{cIO@UJfHl|9T(|>P z1m=^({sEd0v5YPkVa!(Ldzh$ZgLQ|xWZRPzP*m$Ge)`i7lDY}7M2`89O~&Aqrw{2< z?#H&P%n!V)2EGa<%KE-z48WWiIL5OdtenO1h(I=_Jv#p9aZxN-*Pr2Hw^=3Xg3E&|r@Vp`N*4DbfE7t2}C;C(DP}7Jnv{ zrjj_tW)Re$7~`q5S~6hH>dV$YC$bvl*td5xZXaI>{eE%ql^aD0F80FwnQKwq z`aRH&0Qw}r3NP%igI!--sP=}Lbj`~~c(7K8Q`{*|{*?Pc->ZLUGqVXNf}?Pb)>9Od z4+oC53!Yb#CU2v9sa*DacqWiXZkre4I?iXtnefEa2s=#IuOSLE%&fgT)DSo4_9Dx>`+k8igK)T!DT}tF zVw`146Nq=MVOiz~j-2G;e!Bv~yW}n&*4PaB&+DoB(0fq(EDzFwD?n8RvCj4=kzH~J zY7B=t?(=={Lsuo7opKk}M#+Kll7}c`#s|x)FTgf$E9Sr3j(eVU(7JZ z>@OJX+D-+VqTwiyD7p6X8JZn*BGD7WBqwfwh@GkA4lS&pS#c_m`=|(Ks2&5Jt~Pvi zL=!LPyu%&(r5NVOlVtF1j6`f;_n&kWRyHyop%5RgF{{BLT@Q#VZNpg#-sqkl$TZTU za9+uP*w{Xx5-&c`v5GEyrTrG{By;hRj2C@-xtF~E@`LW*y$$&y&f^Cw6Q~X_!s^U; zG^U%d&nlJ9l;Ti<`Ihv$TpI~%{fmc*I7V8qeA=!;67bT8-pN(LEywF*t3Kbxt@j3S z+}<14jc4Hyy#_ZIoPYy!Y)F&V3=rC0%5-+tJ99GyH8w}X?(+YN#$!K0s* ztcra{F2kBNAJJRl0^HTRgWKNufWZMLtXy>-BoD+w->tRK9-%`T{95tbiXPb8WC14H zl`yfm4yPSi3g)w#phdTY<2gAKdh~gbcS41eHfn`scQ%qen|?DEunkv0K#Q&~-HVlR zJfL7`$j$(f*x{B6UNW_y@oP6{vz`U^Fl|>NJPr5^TVTOdz9fa2Ga>tLCziUcgXB31 z@KR|Acg4D+^h(D0d65e4zD3-t_k=-H%N=ama^PoY2B}*Y2TRLGv2kKG>DuW^r?fia z=<+!F;7uj`Yu1B<=Vgd}#u}V$Vus6q8*rX6)6P}RG}tQk7S_!0$4JWx?)Pu4kYN}H z!`t}a<98iW#2AeycU7Y9z(Uxnn1}%{wlbf38D09(meaf`hI{z#VtPz_DGYlq!s(7Y zatwS)auQ-;`hpZXc0Qa0uI|P+zcP?V`4|i|ErXSsE5JaWHz}NH7ryz9&=y`qi|ZXp zPlgO?Ixm6^>6h`%g$CMu&jAev-lL^J7;M@%3{jh}aNKv@^#*_v0w0V&350eh^! zmqQE--_lo$-*YdV83V;bbfkbcK-94WiIBc9n>} z%Es#o_aM_v5wb#Ga`U3HQ8-f;7CV_jweMoCm|Ga`S-S|z`Fs#w7L%R2H}U#wBhDe? z`>?6H2#Pdq@P)50Hcf`(!r0YpH+>gZH3o56=md}PGRB2k9~7h?$EWcrNYX2b+u75M zEq8^x>BfApmfwvj=Xc=j9nauHbv(E<<&(4aZrE{NoU>Ct8{VbWLdsY+3b<}UtwUWf zUGOytC^kTu)fJf6bOMj83d7u5Qy93$j}3tzFrd*2v?AA{gjzLRmQREs(GEId9*tY= zVo+Vs0ed>wY_I6TK9h9VrhXGPSPr84QZwwk`3Dn2gz&}Jb{f{y2h;C==k7VVkon10 zz^SnyjM1~iAXO{cE9Xk`3Jzm*?lgGTIv2z3I!H+L4%ipMI8urlFvYtSgpL%ziXTy6 zVxR|VOVnZ3T5kxyUWm8)e8I5vFwwel6+fh&#B&=OxJ|bVVc|a;JR;o2J)G-~GuZoS zcc!Cl~(3m-ZpBv$vgAKeNMSz=R!)x`jjeHGD$^w9Fx}?02!ub~ z1-pg0+)ySkx}Ps6kt=qPte!I9J>8BE=luuWKO{+VITz%__~3`O4H=OdL48dN=zN<; zw(@k7H)q`OQ){*A)LokM^L;L#OkZC=p_Zu zbRM}qWDgu+S%=u&?Wp@SooM-`<9?SBICS6|+_D%$rK>&YI5Q3-WXvIStud$8AQ$o; zFJ*l|?$xavQ)m;cp@zGf_YdOI8gPjz72V>w!BT}0aV zap>OtmDnM;0^d3O!$9$cu=7w6{5zzHPc;|7A&sAyli>)?mp9U&jB~6vXBiH*4?%ZL zBX;!F;p*K#=|ol-y`IV=m*mIxBdd;KeVIJ0-mnk!n-=0Z#^&H5J5*a8 zofy1i9CxywgA*kQFekb=j!!@x^832swUrr_8@1vd+3W|LauHbBbq?%)&WG&N1Xlla z0eLMaSbF3Gu!R%OtiJ_#x*FC_4uZ7+Pm*k>D_N)+4<&cJ(KTfW-0}{D8~j_~^`~}X zFrxw9=PV`{Pinvw7cR(}&c^ElU06^S$Qig21(QP=X#LRv*39}sO4@dlGyY%U>EHL< zEnt9sL&H#1*8zRY#_^(FIZ?Ym2-*vOq0VJd5U}OKQeF-UUW!G%%a-)!^k4YVjaN=N zb~;Gr?4rvqGkunInr&n}S%KTNsLOAH>lbyB1jTUrhNBF*C%;2;odhn7e?rG%T=8g_ zB6^qn#FDgMC}TX1%Pw7q0x>VtEO-UAWoejrgE1(}{RqF{F04Jx_(Lbnu%4o5%ZkJ>Ew+n)>9iqx>GMiG3XeW~QaK3wIngNAxh0LsN@KfO* zCO&Y)O?SAISLiV;sO-Xhd)PfOegoVejM8Njtb_6=uiR7K{6wKSYtd8P64cjN;r!ZS z&YVSQME*h_uE_2}uJ2{Y4wQgdN1X6+@F)1qw;TPnt*CZK1ms3$!}!t5Ad)AFgXQgb z-ct$Nlt!?*TnIv@#DMR(Ee=0j33hpbtV41U#vZgJUkc8m)O062=ETm3HV#z(R2vFA z+v9D;`6S;u6c(-(O0;?~6-EvcsNIlFu5|Ar1z`JV}#H zcWKpB6>y!9fn+udb*_lVI2Rkp-erSNHa&-5L4N4@E(@KN-oxvw_0XgD22RB^;@oQ! zxZ_VZC^H|e(8V`+=FfVf?A?q<{<}s}Ys`tfi8W~RD5A{HRH9Y2oR&%qf!LueRJ}M# z9g97o$?^|wyI2n!gfz*-D+*7)HG?%D(xR!nNk9A3iMt-_a#OVc|l= zI!m#$&r$ZtQZ4+ZCXbKDK7xG{7kkU@Rwd&UI!wuWJe z#39+8&sxc!u1QY4XbGBepVNIR<#;RCn#(I{xclwDdMI5&$w&8IP&xe^Rdqe6$+~hV zQrk{4PK*=9^C9%`9i|icwUC^z%VmdK?{Ihc|3K7kfZns!cwaOggspASMBNV#ukWEB z`ns`2I2ZR%^+fvhCaBKW{+UihPSN$ z;#W;8=y7Gcf^`A#v@#s``Nf0G0$-xdAC4{k9~gJv4ZAm$(SJEo+w2XETTk95wuPFTA|4#XqF@R&Zk7fN{KW^T-aPm#PyNxHmnR(leO;0^3z zJQZspJ~_YHPw>O$ePE}QmFVXX4BvVM_JMX^q=LYtNW80)61fRbKYc1EITud_A zIkgBXUTfZBLen@t zeBK9Z3RsUr<}MWa6p8I~&1jQzECw=9`iofBU3c*vB+@$kwN3~3To%A@2?H><%@h|4 z8l!laD=5kQfrUTE>8bU+Num1Jan1E3xYlza1UXbuTZPZ`c6=+$eqxHebq*N%-~eQF zea1pdPt5+^9yqf6+S_DP90<(9wQ=g-JSLJE{4)CUQqt39ll0g zf-MI#h<~*+=xTkT#Wf=^Dk4Y|?6`@NN$b$=uo4D_=);lxgYe;A5V$PjnmiRt$wuTgt%oM;Dem+ye)jX8bPA*zsPza3UlKy0#@zOX2gRGA;)#Z`eReOb-ej zyo8Yf0vPyK5sd8Lz=!u_u`XOSaJ8myJhe=Hl zAYIM@^BEpEPrw6A^{0Z!*Ess+=nUL0ufdf!%cAt&C)9cT2TR<)VApJ3xiyJJ(D7Ox zPF|i1A>(-v`Xvt%_PhkS9vSWvsS7Y;#26zsKZfsfv*6kQ4?JLg?lg@7(%r=Tj`=;7C&_Y|5>J9jnBsbXOH;F25~4i(0Zy1bzdrhwn7r+UX&qOZoY-0EQKXBrK z3(UCp5=R!P!Nd5bP@jAaC@RKzfHvPn>Q=hVo<7520D2_3=5@6a#DNM8$q$?PAd&wIS ztlgY}Dr@+V|0Ivxtb?9#!+1V?yO)Ue8YkeZ{|Or4aTj^KPr{N7=2%sKkwnW2G2h%T z6c}-Y7e|LVr<=XuZPH@ovsp%7GB5M~`~cXbYJkNzvq=5+4j4Gcu5I~O*!ju;{Oh7= zlEOI@<>5(EI>LV6E^WA5ZpwYSgdd`&N5e0_0Vv7R$HK?}T*Z@)AuRh^e#HTPnG-%JKJ7DVDKK zl_;`_K3@)`@NbUtHP7=HdaofZqhFO^{Ho_r!V zB@6&cAtH)De;R9K)IHQUZ=KXN@oC7=`XWoUK{vcPl5qT|8RU0osZM5D^NF)yf$ZqV z@UPt*<VzFzT5rtTOcE?%sHb6IXEn1N2kC>cJV#%91RYkR2tP zekS16XC_d!>kC)vb`)xh)hFi3#-sE=KQSpWhyC)=I^M~ruQcF8Dee{WSp#KAj zfBr1-ElH zD8Vd@uP+t{@~ zDDGfkBTn+nfJ6aDC=W^_5>o#_8%ObVZz9AARp4+>0CaY&$Aaxk&@@yWBug^L$E^os z#detC+XANFPgug!{m1c7ZxI@3WB{L@F9v!&!=DAa$m-x|8nF>jaat!`F{TAI`|Lps z#WCH*3Jw{o4({!PWDpN>GNHy+SHcqCFKcfxGQV{*O4~|~tm0NcF z4=%cufXZJM!aZ>nTye$=3e}w9p`Sn3{7yfTBP*dz#tfv!kX}%j%B3CGLB&TFKB?Rz zzM1Oqc4j_fs!Vdv+81)GZT#ubH$Rxa%@sp`jpI0Ga%0b}qc;^(QO{};&$Z=3Ds7 z&`H_(`AcBbEg5nYH{tiDGWf@yl|qZtP)3vB=jIUDQvU~T4~If&Cn1D+S`OZ8r+Eu| zNs-E3jF@p8_@ri%4Y!-Yckn9b+>dJTn&0F%mYMglhoZRq*zcMN1J9Y>Cl?5uJ0Iwl-7n!t+%eR2(1p`RkMPoxXl}@)2P~gF zO8!Rjq4v-w)b=}$E2534pjrcjB-xWbk43og1=Clqr@&f~c9^UD8X6C2$X?m%4HgY; z=(*gH{?+&ovfc*M%C06(d~ZD*wMa$l$LzC@x`LhxJW0*-esUzYX_C>MqQucpmvMN0 zqxnjN1c}SI#dL%w*1e%UTYIt6_6w>kXFWnvFG<*+YVL>bL2%ZaD(9mY1>03iKy`g! zVnBB~>ZK(TrR#!VC(Am(zF&my+sAhQTa%Al|GC5Sq+t-;--kKtUGQ}2dXkrv1bwwX zU>qh+bnfYFrRb4Tu9O|kRA#NELES}cU zlW`{0TxdJ&%bX3DXL~|>$PPF&$qVb)Za`8n0v3fa589({@bhzn>2`lH;qqZ@m|Y15 zjL-M(?mw=_rFQz*;Wx}UTmg^dYVck{A=Dh;k?UYg5n=s0bW+u&Gd$wxr5zpggep7h z?{0z$lQx+0eKvL-Y@@T}|6%B9bubi~1sbXgS#<0q*=$gTGHNw&Y4=azVyUj3m)Pyd#;7NDnJt-C6)s%Lk&LGEmwI374C&YYk)AlUXzSXJ?gM3zYzLo ztwhe0Q}E-bABq(UpcCW$OgYwwKYx9J@HQ_47bV_-C?R=o;=1u0zAXR<7$jK?sl?!CAK{9u(e8ZY1Z! zo=F}!%{z%)d=7GIg{0lX1v%3mptYSH9N0PpGS+&sdmMiu-^*ymmY8Jw=L~FJ?+ZDv ze85@R9v+YuT3w!u$M$IArG!~{s<{I$e(HrAW{cS~I-O%eT3|K#P3oQJ;aNRa73d3 zO)uDDN9$e)4%UEgOXi?%$UOKJp8~!cc9Rf8F5aK|6pNHg;P+}xw5WgMrA8ZQ3xTQHztG5`fBV- zqS$Bu7m@=*8N(}uGnxAdHtf91{%8N-#b+zL^~W9V-s^(EjU(h_oIlQa#CjL@mBGr< zG-#511+(^_f@Sl!z)R&`oVool?!`E=W6%S~1$pE;^`)?JRxpm%upXusQylf4K+Btz zcw~Mrs&ukWxCI{Y#)#=Q%_v*1=?&-ZKcrJ4(umrYe9%9tOR~#qNXGYZc=RTX{JWWl z*~%_ZQCk49eE#@)cL3-~gwt0`rEzEeW;EP61*-yh6~_iBsA0IfQ0U8>1>RS&2`M25^*CHDLXsQf!!$1B+UDl3wK+Cu-fhL$+kdA^iOh z2iG!=V{9tipFW9uYg~)flRqnN!oNKp+o5wU*9Yv>3Ya1(~<* zJ)GmOX5D-rX<hN{emXg{K8PpztAg@h&#`o zqs1Zb!7kI3n`s!f^D3*yS_U>Oi(W4?giru%XYkK-9@-wZ{h0f3lLe!I>Wl( z64e@G@RQn1GTvXpt@Va9Z3ZvK%X9Of&$u-l2V zG&`2q?YM}|GXK%#mrSAQGH=qM-Jw(rOTl2?6naB37iZ}FgN}>}(9-|Hx)&F4Ek5NV zaSCMfYX`|W`3HQoPeboA1>$k+2YelgLAU56JSr~=D(5d^x=s*${a#2K9M^z!Mj`Ay zc9Y!LKyd};vcEeBPd$@_@W^Xqn-0@LzvSbHfj8Vq_=TIhX9Iu77Gm$Z9#u!mQH@U& zzx2Jw&&T$YmLF>H-C-+UWtsa$eD8qogg(xC=Ro<2qDj)Nuh1NHEMpp`V zwa0_1c@CNNaO{xE?1m znjcVd-gT7kUI^>sji7fy2i}Zs!B;OCm+i?je3D!Ummj2}W=0PzeZ7r)Uhokf-=T>< zDf#eENe-NpGGL*o3i_1vL6dM1b(B>jz4}b=ZSugTM^3P&(}M04g~Wt z{H=TyKHdL;%NAJT!Q)nNZFwKeG7rLrZ%e7!@6}vYt!`40+X?(zc$3J%uXKvp9N;be z#rm5TLGhzO>KgNq=7-;;$NURmPNgekOh%w%a3Ee?{SDS1QbnITxe&JA5G@|NqsG;L zTx?kfZco2JbKeb6+5H3d#I)dl9&f>6@oJC~H$v$H2NR98)6nuvB6+r%7iBrS@y^Os z(5BE2KOSVG>Wv~?E6OY9yoG6U0aNJ$nMrbOC=+UGY_R(B5B$Jefr)zJM8Bz(Zn-0g zG4k3Zd>{z^eMy6w7(c4Z>A-|lA+TM?uY3IM9 zE+L3n(j+F-NWHuX-zLUsutf_!bv6XgHDqDq`FF6k{v#|O8OKPC_nhfX-(lj51&Q8i z4{^N4B-F(VCa{sNxFiPWOlE_3`4`kK`GTcazJlfZ)o?_z5-YalV5s&_uo-TFt9heH z?u26JRwQZ0E=WR-(m&?}3D=<=(r=Ye_gHi2TzHb`#Pl;pVIXQkhW zD_my_zkOc9co*{%Ef536bA?#)J_vUAxnN1N5ppG5V3*DW_WJBakGDVQ{b~PT>-0ys z&RzqbdI__4>KokF>caWGHWl4vw_tcSJDb{G2XXtik$JHlxEa3 zTH+c*Gni@+gg@)wlCqiAaK70ee~*1av2EVa@~4n>YNSz%@^Ly{x*CLq2ra%U1{eM- zf{=O{c(llzXsc9EE%gSJyw3JrLHT%NtOO(2P6O^7qzU&u>Ak9Me1GLO_V=FSY6xw@ z!m)IsbEE^>WUFb1&^YuDDq`ObCwSAVjvE^rxe>fW9RJ++(ApD1qJ1J!@z5f$J=cw; z+X~QQaSYxY`#`ogn!>KHvq5IZZ@47sz!+|osCSL+3EWM9Fc!zbNF7*yUle01{18sW zV!<+9G%L-*eNSR(;`DNOajpv4ff~=o$X3!E3J!b zA6*~1jeu%QhAF*Z71t%BJW?tj6&J@tF$ws$&KIw(>P5Ts!x--Sgu7X>AMJYci9^LKjM4u_ z1ro2oT`8WVw$-0uv8OP6&K2ZH7pf7lB%Y*4*IQwqNhlte^A(l`@4#uR7ed3Kg`hv< z8~qx&8hB2ycW&rZm_5E1rG?TU{+bTy&a21f-6t^fkqmZCO~v~>zw!K0BS`F5AvLuW zhmTld(-(I3YfFV}cUMxaFak8W2VUuHqUI|@LGMBtNNpU4ZF+Y=$kYMDO4?XKkQqKr zv-`*TIFVxEYrhF+7_VbUqW-9x`5h5H?g~@3kv(2pnA<4 zj{6NI;wE^XBld{#_TQv%Xk#+z+80LK5}Ua)E{D?pqMF2n!!eTK&u4&Z6cHt;o&1mPbG@%sE` zPDlG!GI-wvXc+4_t)2tEakXG+KOH`L9Kq&=v(d@a71C9`;E``DmdJeq4SfNsb^aBo zs*ZttdJpi_ml7MDQtqYy+Ho+d1|E(xB(7eTl@ezA4)Zd9D6lUWVjg9d3b34)g zHHhS09|R1qftM@2Fkq8BIeX_W@F=*!rsb(*(FSueU!G+&HpPOYz;CQdk48m-vnZk# z0ll$_P`up`w~Myn!`vpiGuITKXitTQACBQk35CQ7zYY|22t=7_>fq<bt6T~_%#`8$fvv{k9j5X^GA{sU3R$rd~=8UQVfu^8JoiEsOy z=&>t_5O{nyxv#K~7~cE<*-2Ss!|@7;yf*Pb?^U@MzWs-h;-L^5sSO{WJHmZ&ZRp(J15Zn=L1djBN(^4at(Aq4@WK=9 z_p!bG8-EDirHl9X4g$&r;IeD8pzDz{q}1}rNj$iipnRvDC`VXO=@lKAA7BEG*%h3n z%Sv%(SuNvmrQ*YD?;yha6D)jD2+BW(@v+hrJSOlRA{bj~lZY2b)S(ND-$!Bp`aB%b zTTAR0oyJs~@5F1k82MHhLW@!om>Si3YP0rvUa0go@tRU)1Cb_Uz z8ry!lvyKQOxXkub+6Nsm^M@tvb!vw0BjLNwyN+Y%yw{v5Cl26k{~gd(7?W#A`r*)4_0c|Jv>L%Z;3+%v}Vq&!Xa|Igsi-PUFpUCT13$vqnq!^O2Fc>nDkxTWw1m3z&xr2RTPKkygca&l4e3~$oOWNir0-HXE6mtfxU zF<6=u2X#eSM05E$oc~h>SGWG-Bw9=dZ=)#qoz67Ky%EIpz6k2FJy1`PH?EJ$$Adp! zaeOPx;q2QWqALY~%Xlsto{}R|Y(l_GmePNpxN=xDO&PeW=y#lz! zUJtg6)Z*ivbsYN*w;*yx2$cR~yA;MY*1W$BQip#Nz08S(6@Nb?=b8nE`)x+^mR(q} zCLJwb=3-*t9auK+2RSBT25!_0Df@cmNP?hZ8KL9%FO%X-@nHYq&OIiR5@R z(~!*)t*k7#*I3RW{d+z4*winqBP5InUTa6wq93@6?fjKj%)*M#t6AD5uqj zJqySQr`wz;eqR(!LFj)Ii&yS;fTmeIr|X*%%}x0RKV9B~b7~75h-O`*T?~pW ziygUqH5{w2L?=NBs-O0O{wN894+D8nAVV>$iE-hLg1A!Sm3aI88~T1#5dC{|8Vcs~ zalL&bApPn&?6XnE#bbl;U-l*3Go>H@a16lNmTArG8d~`7Jb;LP32mi(C3GKLk?oQU-g0XW&xiT-xBXwPGVr$_2Q zYx*)Y^EnLlM#I48TulOOdK2!lT}fkl_6X2{ki8;bs>EF@EANjRxTRV2@lc1#rwQN2QaVkes>_rruy|@#E2) zk8>$X@mIlts`pqRas{o!D4CHSh~C$yXoc$2ON-sF0mD}oc=PoZ|&XSn#WiDtPi zhrsCN%=6}e?H_l7M=0CnioPKytcdmaH{6xR-KxO zs&8y?>1G?c@6~=%w|^>b-K+~51`FYYSs8hgvIlZDY=^hySInc`~vFj(XTR@sM%9%*dC@JSet_Wxafh@DcAlZfOjUT-C-PybzLr z)tf#;l86IY6ER9W%qmgkW)8YviRR4VoF?1X@seem1X1FCK6M&T1PwtSQnld|(b&wF zH0`7e>;V}pt}loC*^D91^8X70AA-1&7Z{|8;D+M8ApFn`_PfVG#p7)7Oy$GEs2zym zp%AH;33{@;NzP}gar*r4_^>e)T3D}-o4Pd)Ll7~};Bf9MRgjm)`&bXu5!ih>2WQ`` zCBq6na9Z~n?7a65uS%}K9dRvitmZY!ylj9+&k8W^qa(VjEauGfG60pNHF!ys53ehl zfXMDZNQ~V=W|`TLm_^-S=%)^QdAH!}lf9T+YlJn$f*@cvjEQHLk_(~URA)36FASJN z@au<=R?q-Ck4ACgYBYK!uz6Xv04=1uU>oD8tm_cQ0lTlTQEEMizD}pF?~0T6izcaH zsyWSn^bJyOjibgK9b6)jN^dKuV)Ix7j$Z8LyyX>ve#X-I;ZQ|_%csGI9ufS`MbJEC zMVyZb;Xu=JP?LGj_C$#o@o_5M>|jKO80j^^%z_iX>u~?QF#N-=`&1VR zSf3)#34SsRyyc1bWbrm!;GYbAd>t^dK9vZw9A}v(^V}~F0Q;5-l#d-I^>Ja)H}eH-bEhywJmN z^;0b+A$octE-sa#b_qc=;Cv-4xFgKH{y+jtPc&f@9U_4>6*%vTC)ygHgv)C_Dw>w<4Xh2P^BJ2-ai#isw6MeTY zhAB<2p=@XrEi2UEL9{w7rP;X4m%Y0qv!GXU2RN?p!a7BY6%XYgeb;@i-ee5D^*fuZ zJADUcZwQ6Eiv&<8HW=|x2t1hDNDis}!F^wc(d^F(DE?i7&LUgUQ8|xs`T68l)Hva} zc}wUziNoMLKLTFe=7DE}JaXHGRq^J)Y&vS!2cMGeAeUut{7$Mu<@5x6e)b)@b`*go zW7;gUwueXF5g>7K96io1gxapDApalp2VWRRtu-Qe?`b-w?~O&fL%}HNW`kzQeq_(2 zJq%W4VAk^_IC>xxbUQ9!*l8Qo6iLPRzPxhtj+%gql@6Y%JI3^wWV9C##hstuz%8*n zI{)DqJP12Q-|2Rvi1Z2)9k!Lm?@0pbq-l5~iGybtFL;w2ublR+Vu-lLau)0IXiS_L z#*a4Bhx|rF=UyWX61WQwdNbg}_NkP6ct0Io7XgY=Tgj-zDf)1ODXGv}h7V+n&^K2L zR@I4P&c0?mJ8uGauH}`h?vBTM%dBZexHyWb{l$l8uE45+4sfa}C*qsV;iPRMW)@jt zE6)^|*maw`eC8btOmt*Bj&M>xH5>Q@f?=MtDTpdIlb|h?sJfvZ&lOaH@EoRRukYp< zu3w7{#?wK6rVAb%{-2`rj>h_ZY1s)GiOeW7 zWM)41WzQme6q3DH_Uw26{^%U1<2dy^kI(0GU)THndi{sbqr6aGMF(q*j*uKV9qdbZ zjul~NxIa{Q6EE?OLx=CxcoG_1?Rd18$3Bv%3-W$oa8oF!O)Q^Wz^zwmA} zAN0g4X!EN8oK{?eLc8YUyuLolJvjva?*6za<2mKwpI|!~9_6fMtbg;=9`;@ur5+|9 z;A*7b_JOIo90x;ROsIKAr^ZCX$3Zu2e%ub#Kaj}F>p|5SH_U(U4r`~C(r?ou2_LF} z-6VzAw_VVAu$i%?8X!#SI9$DYAGTSWZZBfoH5ciRsGj~C=h9`cZIdxWQ-w?I7~FA5Ix` z!py&FWSy%OU4HT@&afP#WK#sp{x*acWtWrV$27Uy)J}7(pPr82t8@dVSy;hiySX@O z=L=Eq|D%^DL&@BcrO*&Ch9j$bF?hjAZ0BDM(pJ^P=5#)ho?T2g40Aa3^PBMFjf+sa za4D|v=TScM!JRI$9>a5*vtT>E0j=xvVE+{%7(d(q_dhtmg?t|6{8uqJMf4vi2zCYc+99zl zD<=;!DxmgGCe|M8z@02t6`b~y8b)8`{&!_6u?YW-w_nC!Owewu$SQ|;yG^W*Spa@# z?n8t6Ke+!T3|8}g1nF-nkeYP^Hs5eS?sQ=^n>q};S6E{6-x=uXm=5)B64=dnJ2uNJ zf$v%vF3rqQXWBO>msY5)vHQUiOD6UY)ldZ$~skT4OY>j%(o@7Enr1uwe|)TcO1C zUK8CM8VH&!6RXF3wF`shz}<(_$eXYZ)RAV{oq4ygVX+zn`%J@|)h&4aiz8{jbDLZX zia>V{d+=W#0sNuY=ox}(xiS&*rZLX$$7ytzoF08Vas%e*{-v8F zx!hLYtsv87gwHe5sh7ANIEZ(U`-fAQGg|_J0?dHVYcFTJ`fQy2a1nZ&n!~zYS#sZ@ zpGtWa<3PG8Dg?&D#Q;UJ8=r!SLLp8K?Sz@$D zDrA0xp1oAhY%S!yzl-{O`-!#=KfI%@sNTH`cY6iGw;u_(ZjT$s=x#I&E)&C6^n)!S z#&Fm98Lk$rg2enBI27|84$Do4Yhppn(W#9ir=7T#G!4dY|3)2=2(n+N5%~Wu<2?GS zgPy*LIP7GKUGM+W=D0m%hvNxg;1bk4RE;+-&Y?s^5-t3-@XO&y4D}A;w5TL0>2>mh zS)R4h+m5F=9`+bTWHUL|-Sg3FD2(pxo{fPY=3_!>8_4y%CBqLg;WuNVR95DJ#k1FR z^yp7~FXe!TwXV_r(Mi&gF-{{^bz%2LJJcO21t|v?)Yu+`eVXAAv*Zzaye{O<$?k&L zb#)L{SPd$(!cZ`CE!n-tkE?VlTdDdo^TU-mLVipw$_D&{xG8@jm(6`-YRk~-Y5`>R zt;DVOexUfWvk;soPHKugVBeY;4J(oZ53=E5n32yTyNFovvS=YL$DIXa+g9|Sd`s7NO+wem3y}Yr4%c~GslBE!9!?5ElW%QA*)sxm zG<`yWYB^M%k^`zyHWgEjahX zD|~OzfuyMs%9Wx?OWYjrc3lM@nwm(P{az~Rbq4hQ{KMS#ssxeLUTmHgPEC@U@vy8i zak?}BZMW;;*G6_GzxN5EcDA55=73Y!XQ(W zFhk=x3h$1E!fXS4%jR@T4{yW&dPGt8Uo@(@?IvWcTSCW-XuLix5EqN>B37mBelEQY zVzzIGzA%22*7Cxa0qb#Y#uwm;E+t7CR+KBcfbP>Sg}EnLk9lMQYCgB(%fa*H>0=Wt zJZ%k~s+wTX-3hxIGa$?E44A1tqerKpBiWyrei$=vBswwbnSr;}N)h$b>)nLGhZ`*_`)}J;-teUx&l!26?Cj%T=v6?SanPQ7G@oScy||=CDVtr-We=KhEP1< z2fo~IfS(@uL+q#*e5xqJEq<3_&tMp=`ICq1wFF7L?hwcw83x|?EI75K1Lf{jgYrrd zZgp=f@MrW;%h#G%;9?KLYfo|%V{>u6lpxjGE)CPqhSP0jZg^^?F=~rmp!N%;asP^O zxXTyE=D4Py@IDO{_6y-1ezsQq8n}O--6GMSr}Sq34aBVV^HsbPoz7+Om-NT zn1tYstClz%`~yZT?qkKQ*C5ec3v1f5;J+hF;98X(__Hic&!2bLFP;c3x&wIP*lV15 z`xw`bM*8ebOK>RQArF4>fZx*{04tko+l0O+;c%8{}CQJ$NU7UE_k6Tg|0dofQ#dk z88`U^F4k&=uru)-_3NdufBy(+e<{y+#T8J(W(0~uJakz6AH5#w1B166QG&5J%KLe! z+Mye8zQGK+BXQ`yFO_BSYcWLM7+fyNqUhmS5W)C$=Lg-mhlvSzlt^I4^7`!~UrccJ zXAV42H38G4B+e_LU8JZY9j!7gKx>u@9&w38>sAwvU%ehzWJ@D1E@qjCKhdc4tOhgs zi}3WL58&?l1~Wf1GfhkpZtNEsgT zSP-?xcI`IIso%_If!VmP%}>djClLlk9O>_(RM@kJIpmUNvl)>Hd|%xHYQ4trMYtYq zcQEH*?;yRqQx_-A*3%z5JJ4-B8xHXLp+)=zawHnF624<0%bOYa z1jD+IUBsw;3viq?Va_#qIJ(CH6D;O{;?YLhxs}};52VBP1rpFUua!o;O@imi1<<8o zL%ZMSg6ax)yr%RDQUYYTr=_Imy3L~aclHS5h9V6hY!9 z_z7F8d~k8X>O<41q8U%3?7U^@_v{-ubMJC`1oH5~;%-z>+roK1qJ`=!nSW1yHmYf4 zv+JHOsMr%sSp5@Zhl@Z(XAWF9R)rI9kKo3InXqt6HO^5k!G`lcakrH-4prZZpHz}X zx99#CJD0gquJA&w{4E%GfZX7dmPEnq8T?UOhz6#jw98g_w{U3cCm5!T*;Ln#C@mztvhf(w{wX%tZ`u&%cgBYbT)jWf3TQ zIMCP~yP(G>i%v{;f@e{tFlu~{8xj-;M&O2d0@>uBel%8}O2^B!VxYdy9JIP3fNzH_ zh6F})+*Te(*UuK<)f^AU9e035@^d^^QV)rT#o@kzAgD+6ah|Lyn0ydMSME9i_Wslu+SJEBhh&;|CNLCv!ld4}hsnam?pSks7R?+W46 z)L=~e_J(_3t(v>DHxGP-hH-E-1v+-8;;IBaPNRDz%d_i3?UxE7b)0q9K9%FeD`PAJ z>OqHZX@k=ee|Rrx2>yRxgO7IswAPhDeq=rry4pkjp>G&yeGpuSnn1P16!tx|#I8(Z zmiO<2^aby*^=={arQZczGiO}E>xOzd-7*gHUb#4mz*b(7fx;a9djd+G8H$ z0Qlqg!Yf4FbsZ|EK7-YIt6{No6sifEq2zWh%8CzwO+Wz6&w5WcaJhJAdK#=eFN6Gr z3MjQK6W`6r05@+O>HB1Xo3JVFDt=^$6yj!jd$K#ok%N`~1@DsG~gg2RerVIGD400YSh>~erolr?yG@(3v6&M`$gJ1jF z0rbO&*6Al?f&ONEXcdgrvU)JT*q@}R-NhW)S77Tf6K-!df{R{KoXo}Yuv6X%w;fN! z##16V^Mp0%?v_H8nIUj0Wh+Rrj^$C!XdLZ61pSt8$Znk`GHOyuO`hmMo4{V2;dKq} zKX?oC&OZb`zT-HJdm9XYdXt-1a`4cppYVa5G0Y1cAbaEn-rST2O?E%v3~wEjRGmT> z+ap-m^_i^CuAxG)yostGWzoR;8!qhHj$3`kpkzi59Lz6hg1BoRIe!NJvM!qqPE+Xvcfp02rX7bmp}Ndl6^6!7 zyl{RDFAnXpz}Jjpq-1&k8yvLB70nn}5LyfiE`B7Hn|yHgm>hiJw4jLxF9@^nmc|?h1P5;3Uj-jHJ!XQ~fAK2|8w9%x0v_V85UTthlKBa7!{pE z4vc>#U*&x;w?hGsbY2Jj?)Bj8R0I3o?!sShN5C+gxus8?fuXxyN^!!w@vGt=6ml?! z8}bpr*(v~1F1tYC!aq3CUWmT_LC9JCh~`def^1I{#wrydL>Kff5mCu zo1`P3ENNGP2J|E+f)>j}pAT6`PR`;@ELuJrLw%h{@o@lq;U@Smq93aZB(OCr0Cdm3 z0Vl~FSUP(yG^Pc?`Gr@xPOJlz=BY%!^dH7eynKnCZBM{HU>=P4d_g61FCu$)4Zex0 z1>M#M80%68kD5f|=fL*W&g7ry zU+nSu0W)>e2*=2b#@_vda3mS{Y$D)Z`d3(47lf_eW(i-}Z0$_1E1H>R;=M6nn9D7p z7ayC0;{G&xahfcR6pBueJe-N`I&GN7cNF|8Ot|vO7rE*~A)J5xBIIX*3z(QC)7RZo z@M}mp>J6~>?5Q7YbA64=2lnC%FvWbk5pcKfLeCrh~7n~d<3=s27|YDMcGdxKZ5E2c2ccyPshICTZVyV(F%_7)>|+7}RN7Q4s#=M{e7t*mQc_izRnCbodzo>UmkD?_tOjP)=27eba0y#j3GAK3ZLswgKP9s^5E|atdMrZ%@=>*$>U+zJN^vhANG@s zT}3GKnGbl|^C3(%7=>IufvI9VU8YitUj9N*>}yMsj%J}|X9BzkjKTT0is($S3A)e1 z7d2lEK3x$6x2#Ws zZQWIRaHky=SsI9Ec$Mhe3(gp1T!tda$!Hb9*zm8nq02sNw0f?9@A(#^n1?&|e_Vr> znKOv?^=zE#T1Qt`Ex_|uxfros6K+S&#XXP+Z=|v?r_C6Hwz@;YRWXpboe1ZqZo|Hr z!#F;u4n3XXD17*J!l~vj7`*!>1Y4$Y8&(QJ|IG%7(Y#C=d5ht(?>W3x$l;uMQA5WL zhoWg?I>G%lxK~9Aa=I;8zt0|QF16y`BkXHyUko$W;112_Rqogo18IZf;6KHQEQ@(h zLPQg&K6f+xv|GS>0(Us-RfhmqDZ`C*T5#CpFV+5&jQ4!6BosLYpw`lK*c)z!BT2H* z{o*>FUndQ5VF1&!TbK`RImFK8C)ag%yeek&GJ{Uc@1Z6`NxMbT~jN^&LC$yQ(9tm|S<-^8Jw(v@05%J`2g?z?7rVS5J zv0^=%=v9LKS$Vu*69muQe&K(-B_vkv9a^(%z4?`$93Ip8n31c2(`PwC<0fay@%@V8 zCI*~I!)rKK#*Ok9oyNqoWso9eNmj@=V(^G1t&t+s_wqkdW*?3>G>6`@P- zXPkFt5BETz3Qfx~!Hro$xMZRXLr(A}^5@pW(Kok2WAAME7L^Lwiox8qyGu!`jT;dU z-bwtO^+-kG3)DY;4kxd-LdxM1{C4FNtku~GydRPvI+JAuUpC{Y$`i~v9tV39UEr^! z5iTd2;rwi8aQ?Itl9FVR4OG~BWsKhsT*3BFW9T}xg>jR@(fEf94xDy?!|eCdw=Naa zc4$EMS^}rl%kk{;y|-pC61*28#{BSnKjQ0`%(dLq3;JTA z`011y)EIj-_W6^gbQAL!4e%#AXdiK)xX&^*YRbc?sZ98MkjIN!iS(Y^3- z#aB3|-;WZG-qcep0p$wHIeJG8U_(w9x{g>;v2A0h`YQ@$ZI;r2?|QhRHwd~XqabDv zo1Iq5;YdI+y7O$qX>)AJcENJEyQvyY8tv(-(p2Wh9)f=_tYOUt4cNe#o^GppFnIe6 zT&h2ccaFJWnyClQ+WQ@PLJ$VtI>EcxQs~g#0lq;taIvD35KRl4aoJa0*xbRI7-J9?oD({ToM(0Yv=WS%>iWyYVlKKDsdx|ej zs_=q@E<9D&0scuZ+@@QLRVFetJvf+sCNjvQ=jnL;&JYGI{Q!j@Pl6=-xiM?YQC>cp zR0)=D-{feG-Mh+_v`tu7V0Rq2F8W2b==#9&)ql`uxgZ8ylLD2_MZ~CH4<>ALQ6-?4 z^KRxb6qnScuTt#LeXB0IUKythM~??Qav>tym|k^#4}8`uI7-=WWwZKYXrHb zd3d1XG9*sz#j%nv@Uy}jl=viJ{ta88!`*PZ>^qTIdYe1V;yB#v>A)Y1Bb5=>!g)O7 z6kM_!!iRI6X>#Kbb|riOjX+o28q|+XrjhjYi6!9l(u@0B(TO;m5F$=_K`=J+2x^>| z2Gds;gG`ex@c9OU&Uarb&%2-bfLn0)nmm%TNP?~o2*fTkW7_*)3l4TpBXg!Cp(E=i z?be@;!8(21j5m8AYB-+d?z0jus1(8<{hPR?!WE=4dZ9Ql6Ftxb^He?HZ{=~kw->?s zLmx^;1Yx?tGP1SsEh)B2f**r1Xz}AJIet!-Ieio0)%;Ir{`DI?-)Bi?%&Oz0`V^v~ z$zlBR&=WpyE<%+TainH)h?|(a1vSO;(8E9nm*f|~p6iRDd`}y^4#|S9IUEo<(B&5vZppgmt}%1*0gb_F0sT$WNc5THq<@whOrg5K>Fe0golz@AnvD+ zx7V;vu7)$7yw8OgDOo%vo`-OCFUx6Ep!e@_uA1#HrFk>g;^oed_|qK8EdgI>EoR;w zH3!-hvJc`a;$TV3eUubPfyf7D7%sdCA86N5)4i|phSMTcR$v2}hhZ zvDp^O-Fw=BZ941WX|z+&v`43tE-?Dp47VGWRMz&xEyZ+}Kl(+ThZu`>tc3Wzk%rHTa`5215X7|^6J4`t zJhwy-yqw)&k?bY9Ac=0f!q{}h3u95>lpMWW{}A#+W9T9*C86^JFe9;qdP{Xt1F2h} zbN4!In=nVE-aZU_oDF@RH=(j13XJmdP+*erysO2zsTwEH@e#!Z$xc8@WXY7>9^`U+ z73wEmg2ppt82H|mW{10hfq)5|%a)`Sz9PVTF&HIUxp=BGfew8T#>9J}xOPb&da?Ny zZk{AJ<)>i57~5e=@pA&}12)YnYJljPI(@qCUr2$7dE7=bCpxPi!3XL?QI5w!u@mKz99<;apT~ z#`WLZLG8bf_~4N%Ox_uwn+76KPR|GDuj5N}>YRzuzb~Uquq?!PErwWIf2`a08npEZ zYz%)(Ohv1qUi2cCXtbi{K2zW-X_M5{K9sK6L1!&`O>VvvX1Q1+5HQ?^9?y1?^-Cd$5=$=#uXNycI~kzVKNoi9Q^Lf&RPL?& zj!?9R2R!oxV9G=b6od=GGc$R5I{7QkUZX{}XcbbExC5B7=p6K3`beLB%ceIfV_}De z9SV<$GH#v`K1wzQz3fo>>um*wDk-AeG;eejsezD!HO$=-1TQU_A8%eC=5Z2e3;-bO~@WpR1Ur-ij28+Phu4Xi^8cr~e-Gj2{lJHT= zcl12@6?dGoA*Q}fV7xO0)z)pt^K6jgT!AecTqz7sabG{c82|^*GaB5aE4$aA-r3VTyOK%*Wm!{F3^2~WQ@rz}n2eF$! z59e$Rgo#hxa5u7w+utXn3>h7od(G4`;@-jv`3E%@IHfx>o2Y$+Vpm$d$ zZcp8eeZ})Y{$LWd@aBaHxr=z`QZ1TZ2&RP{9L?GErf|B=emYOu;b0YZPz1M4Ki-ZwVbm;DKjsae8lg>_)p+rZ75 zvWck0e@9owtSygO0g_+MY4gbk%s<@6^%dVp4}PkILe{Iq^=7cfuNv-TQgXMh31@rC zax`Lgqw;5I6bzK1-u%CDtzt7Y$bH4Y>Tn{E6AQ9zUe&VVBUi_&4PeDBu>VyH54_dr zshVVNpYS}~vs)h*?fArf+WQ(*U!EauJumTQvLV@jZyA_)ou^*fwRqCZ16(cU!g#VR z89UMmvjWsP7jkb=Gs`f>x;lXinsU(bzzC7r;R&3Qa_pPcNx6n8xcT8R%ynea{Z0nwYm*QVAV$2Dz-ZBLl?;dWC{5voN`+|r zn7WN5@Bc_{U5SDK(@S9V<~Z1SZ9@~UzbI)#@V}j+7{aZDHmhDTr@$E=+Gi05;U;vE zOT%Zw<#0y*E%Y8-N{uH@fQf-D9Qxc(l2ezXen=yZY_r8PItf_ZP!6^mdg%D!7&tIf zlW1BG!q}XdP`x@8?ihbV&3@Kfxl>Qq7yIIq*@yAwnhtJNPBVrSj00CA5p}JtV6Nai z+_2mhE@m+Xkm)OsDe*$T%iXZ`U=nUmUqQZPGf!%!FszWVkM`4;#k~;qb`5CXV%^}bFHk7|5|>=IM#)DZNM}^j&A~rW{o4@x`~5gE*9KQl z?o(3v?EoWd=c3gEWm*D^yTY!M^+I-BHSOsMvFAUaR;(9VJ}Uvq3={bGz>(~weR$|Q zW6P(+pzFGC5HUpp-z&YxKP;QE=Q(>Fgc&z`_I~79@RS5f<&uiq2B=|HOp0$BbEdCd zgs$G{oX5Y7!QG&S>Jlzx{)=WqAu=;D`?*h6h-%yp1{Cg@35pi z27e~+g0`oqj6Sg3}|^VZuX)G%rs+jm+LWPm+C_Z=qiMn z)_)iS>jysHmPz<^Ylzk5t#s#w3UdDXd#t)xfB`GR;6QvNtQrF(5&tk7%V3sZD;oV| zmaSE)bV^hg>M40Z^w)TVRSl@hyr)JlH93P)*(mrU05aR#IP#8xxGA_0#0#6j;Exd~ ztNIYPN@-f`S4Em+^kBH{H1+mupeqfg!^?Y&zm~_o@AnSWN^Qe(eN!rBq)ig5~PfUjpRsp2qhz%(1nhf@CT%=V@0je0%9h9|W{u_1au2 zE8j_z)^EUF?U4|$cnt^7k3&lPu#(p0IZ!%pAss)CocYU*$k786=n!*@&PeURWo6Q= z3xPN{;RJqfs=yR2AH>Gb^vbJr_-M?RcvIvjbFQ+PY&PRxOnt^&*|j9SM+r~!-NOqN zN6=m;9CNnb!fX{JyBTk^YUesIjgP`8VGO; zV8g>7Abj>0EL^mgYOjcc9nuGBOil)_t{T7_L<0_`Ie-%Di~rT{2kVbboX($w%oksS znJ%HQKHVHH`5B}6Vm^FQAD!TNrV;*_*5iNt?zB1nH_kNtj~3Lsqw%d#{Q5i=BW8J` z)^jCX=2=IMr)Pp{d^lqU9l*75=2W7t8?Fl);mC?yY-gT;fx8>vrOY=_VeZV9O)e-r zTOR9<5gIr89Xvj*!)aTKab=P|)_Ak*(TY`gH*W`2q&cHCn?-Mrbb+-`0x;-zHiR6! zi84{^VP=an(2sMEeB4K8|SrA7_oa=hi~{Bu|4rB z{xf@krxyPra}z(aYr<+;YH$wMnVCaTZ6_^ZJ-Mj8QBYl9i*YwI$fc)k802J*)mvJ@ zxa>C7StSb>2TeFC!XI%*xf>X%Hsa4d2aL6f03uez75Cl-I}`$m(pP0j%_xAg<}XM} zVF>qtq!K(mK?xYqaPWR$Iy-bo#7Ab zR+>}G2NYiIRmGFy!no+pLB>II1)nj-D!lR&OT>M^VaHF{c*hQWLs|cI>T3Mt;((cH zO5hUE3$`JZoGVA7*uT{oY#$ck-S>$^o1Vfvj||R?T}9xxY$L|4VD41aD{R*?fV%hg zlHM~134f9=EndiaC8rZH&;JwMrI>`{?*#Cjn<|LCEkU^{=E%PzgS^~n13A{cP`Eo7 z`Z~;j%Spvb`|n_=vk;yv-HozJ#gKgKI$0<_4`-(DL5Z~U(9+}yfh)}EkIk%eHRlf; zH#f&%OMg6SvJH+Otfea(hv4(&1GqfYf!@^~foaBK^q_hN_oUPn=(1uiJMnD%^Ljh- ztoeq6yn8q{weKN*V=*jxs!bd-#kZT1Pwp^#?iy}Brlo^9~ zp$p!+mx3yXtMSt81T@#rfvAm_k<-TZ#bH-auAv=*Tb-CIGYGw<=fQHzhww6k3mK;a zAYVP2-c4H1{J8cglA8?AeW6Al@w6K!&9kf|Z>;b{(yR zGQMm)bJ+)CGi^AR3g+X^L(+IB@)#UdzlyK4Ka+~DiDY=8BD!AR4Zg=;LHTbE1dhGK z)Tb82P39dg6HdXjwpR3R3{99Pn2P(pU&T0)cd+Z0lCGA6@Jwt7e3Jwdq`8xzRsS0I z>#v8tEqQdqS(a7Z7=YLJgu`d&H{|og6ihjJmzy7O1hyZvz!`SAuy1Mu=ce*GtlHEG z!h%-7C{LW^4O~b+Z3b^<>VV>MW3nJJ5t=VWW5Nw7%)gnAyUO38+8E-nTq^#~bHi3W zO;RkPM>VFzp!x~My*l-k{V`=++92}wc|IB&O5&SK9bf}oEKQq*+X_R-*jB`7_Z#3= z@)i5Mm|HIG81^jJLs6k-e7t%)&X8OKZGR<+yuT|gQ4dD(qb$F`=48JIjfi#CDtwXp z2ws;ZVCnH7oD=2%Q)RSa(8`}4V9ySd$tifR_%cqxL{Joe4ex>;K(1R9=FEx5tc)#i z=3F?QZrcYNyE`#qnBbcQGNhGn9EXVNv5Ru()7P{#+q=$xH_|z2EBaP}F-rp(91f)?CHaZ9{tpvU=Lkq856 z!r-{MgOn%OaWb#*B<{B$aH&%P6g;NVgljjDyzWLR2`f}|55t#B<6-s}HS|0ChQ1Qx zRTeG2fEOE=v2H~uZoK~u*R2ePo_fZkAArE&rHt_{@YbL4aLqp=MzlAx9XCSLC!;6#7^uX{*9O7eht#K3B zUb6%{3Q9t?ewO{UOB{y3hb=)l^ah=2xSRSKc*D0BLO9xE3R|uR;)AFGIA!L}UZWuHag7R8 z4_OFP799es8#~~uC}XUuZpHx@Yf$4;g6|BSgA#RMO$Ra6rV{(BZlZh-Qnz0rj8`HE zN9saxMJ^Xa-?`zLI>we_Z0&01XwZD*2p4X#owMK->^mC@?*g~O;+!@(Q|^zw9>I7w zw^OMyA_fvKyoYHoT2M;AGk%eAsnXR)6q-Xrh-gE7{04{5^xUpEXn5fS%cOdVZ1o$) z@{S};4zKXP2j$zlO`2e0xCHvM>mb1QH?bOwMW3;4n9}|Y^|yV65z7P)S?@&Y`bIQY zcSW_#N&0H;cXWDF3?>@GkU3w3J!gbKxIz&&1ZhF?(POywKL@gT#|v89Q9+~alVDy) z9^K8(JKu*!m99#Qz~r?CSg&7zyorr4I(HIpl(%73a2t)gYY8-@5<8yfVXDX|u{V5! z>&nhRdfiX>5-W@EOmtbVP#6^DrotY(ZWP=YNL=#A;j(fI=E%jsNf9%+`{ENG4s3_@ z$LE2x$|Bfez~*MPF_6%ehu2rkfDLbF!5?j2<+dm7@N+M(vI+l3Y`HFhp-)25*RKkN zY9~;zCe8<9D2z2$71+KWe+|tWSsPpnb5=> zaxL|3IP&rrQoa&8?Lq@t`j7FS%QLt)_Z5LGd(YGrI=Hq29;jm2gO5acm77P$z;{O? z{pQUNb(_mzU^E^cdjCYJond53hYz0e6+n-WH+1WUWw^!Oh4ALH=kak*682RaqVGjw z%ieAjW7&eEH+)d%`!~1gpvf0-d|}SvBo=bb zcKn5NZ)Ab*StyO*X%Hi1IZ!^oj?b(kFCfI=~B7FYI5sby{m5 zamo$}cCX)zznb?G8k7QM5%L@fi#^y|kwpF2xsyI_Mu&Q7P&Y^d^}y>;N|uoQvc}A- z;m_Fk%p-b-G5?EPusw#&_GDIbgs$AAy!X9;*Gms%S?5+uAr@R}%!&Aie$1-Bh1+M| z#QCM(ko)Bs?Eg4{EB|X@d-P0tCn^CiYDUA_b%=ZWcoIk6x#Gvz4{%>^F|1k^25V(E z($it(P_{3H!Xb6AJ)?rx_yVYIa4Y>6)P&+i4N!P83*QQj!2c*Z?|7=;HjX1jG?ezz z)Id~9ocp4oq#-2@=|^c%qNQjcGa1=ib|jI_xi3njkeMABWhWV#@!Zef{n6`nj&pzS z?{$4X@AvO{x~vmgfNNjzDK6c01Ii`;VsG6!#*Sq>1=(mCzM~wqmbifN+f(G2dM{+p zKL_{$LFjIw}!o*B4~Y=vP>1za4B3 zy^z1~moaOE>|yE8LI^p#4+aeVVcN+KaM81Y^BUF)sWHm&e=s zr64v@3@;BZB^opPpuxVB?6nD}&+^N#ob)A`hX)DjTB4o)43*skl*VCcfw8!ixJ#6%_5B zzyetn(2`q1%o|U#3|$4D)1Ba?g%kOQEO(kEaSZ=8s^G7OQ<;CJ7MyOs#WO#DgWv1` ze5{>KLW{<@gSL_Ql*=5&O}kNYz60vFUxH4@C-nC%IeOD%2QF9F#umvy@^V!pej13t zzT2jF{mD_vcUc?8+`r@3kEikWlU?|}@goSA>p=ETH>}n!!k)1lTGds9H}lFVIYB`o zFM~9P^T3)D!^GUCl&G$rjoxx^7;oSc`iBnFKW}crNAta?9aM=!SI?tbyf;zrd`Xqd zU!&gIDJZ99MC}?GU(6;JTC3Ax@5n8>do&PE+J-@}>l<9)`U>Yx?1%E@mvBGofQF_o zOj$Yvw{FCO>AL`q-wDP~|9gpW`(NOQ>{FCAIvQ0^g}xv&+=j4Xki(8Uc67Fc4i7lUVo z(hYSkpjcjri%$n(Bl8jI{tyIp!3@vW} z{dG9_!!;v*<0xtgu7giMchW6i-a}mVY3$dy#{InHCcU_}96!%{2`@Jn(<$yMc*yt| zaPJk7$VeV;=Fm6HN^_^0`5(b=-(Czcr+C=2kK2BJ3a-!V!TZO>QDDj{lx}%VGCPr^ zE=j>92RqqZ=pj6Jc?*%BwqTo76P~|)AKrGdUPH(r@J+D>S&tG1#HM45lTHqt7)77=7CeQr(Mbwel%+U38kWtH=V*thItJMNP)PPr{K0 z(~!^7ole|ZhELhNaL$iM_+oY?ef(%61~v4ONdpUP*DIx}+IH~NJq%y14Tk1BAE@)X zzhsr_GYl-B2Ocg_sLEw*XSXJpvO6EN?i69t)Ny!h(12X2VtO%)C-c796WCMUfL4#= zNOUZLJB?0M=V&>%o;gYeuH1pM1~;kPpQE7CG#{QF4Zx2L>X0Yy1y`n8K-YjR4yh(! z(*8D*C!P$qf4l>kmk9Iz{Xp$qX)tnh64ZZ9a62ZxvK~?(=&PTGJS7*xCme}fe1o@b zKI4H~yKrICdEyuP6Miqt!$-xTaM@WAa^Bu%JkK%|&}l`!j8NiWHHmL%6?S|#0*8oL znpPKrPA?mXZ(oGM-c1e=HkwSZycd2yO~gZcs>tMbOT4h)176Uq0H^Zb7&l!A=eXR( zu0SsKlOB>PR*JTs_7FMF)(d52WSVC`<2V9e%PHw8f^*z4ncTN{xjZx?F$`y8cM$+!6Y36~CsDcH_{CHa#yK9y7O?Qxs}eW&eSw{<$C)#0 z2)7EexvoSS4(Q2qOT1?gYc&xlwymIis6Z4qu>7}?ArxBwV~hjVX-PVZp_{s(fB#ju z{znEJPik_~&+#a3Qs;x%BnOuH`wyfHo8W}*GrW_#kcM>Gk=##0R3*0v%}zCNS{(B5 zgq}NQP2InQ7E`kKh18%l=z*#a@ z_>t!~nKIjrBRa^uye)t6-hTnGyx0vzA0Ef?@jR$ol?pj#t*BFK0o=x|sMPI&LGg_k zVq=cmF0lRmi2$_k`N#dymW*ykD`6&cVUAYnK$Xx2c&sf8?<(KIQnt6f&C^e+^$cKc zUKtHqkb--*Rzk#YUm~D-6|yBd!PxaJ5qM>Ul1izN7c9@%03k3oUxOS`P=`f^eAr!m z39166q3}NAHyscLo|zF48PrV8SMZP{j{-3J@l06#ybS_&=7PJpA=@LLCo1Z{;iNUj)yLa!JUXC*1qehS)Pvg)7IaDcQA#D;y~b z_wsBZM`eIGobbTtC)SwSAOM>^OjyUa49$C=lGItenU<<6Krd)MK0k3En~gToQuRu5 zJU<0H%)8;*9eEJc)xbpSc~HFX2zK6RhlwvAP~Tk$Kd*I$9fJ)p=<0@&Ckd*}oXEH^ zTMJaaXCk?~8Ma?2!$%R;bXDm>)SGXCwSlvU}=%8biLaS z^CfLT&B6iP1q&cmcnpQzc{1}{%HehHB$5A%m{O=se3&EZd-hkvq}#Z4au=Q~vcm(6 zX}Y>16z9lwLCBMCY@er!CJR5q)CnWfy2zAeZ&m3B*Jk>CUkvzu>BF#u=a6(IjP%}1 z#XY5xaAG)t1Z*${tGg}OOztE1X#$#?ynvR`Zn*I?f+S`7gUF{DFw_(dX>*3j>utVp zI3^KS6lEdbS`DgH{SddV-VIY7{z1ajauQfuLHu_O5_uFt-BT{~@9qJ}PW(h_&-FsF zFw13T*TKq)ap>b{(m1V7jAI@6!vjgMjAtF5ewhpsJ>@Xlp%v?&w!m)ZSai3z1W0VMA*t}r&+&DP%unO1aGnTqSHtbNb#}$K&*T;J6=>;#-QZ2Np z4r4rzE@5tkm~JrDYp3OBJFz?yz@9xXyQ}_#Db-~t=7{9e!zAc%o4{>WJc`qJ`^n9J zH{fsRb>>W)4od}-Y0}rPcyk5oM%*hQ^)p9dkoyOO^ouZZX$iWVodNac0^s~p44Tcp zkS#Mu@QJt!8g=}FltrQtBW^$r{tm#&iFmqwZxuXx-OgqQVzBzdeSF!d4wfQxVZ^QasomxcV@>f*);*2WYp(y@s5mqKH zzyV8Zaz2xbUpqv(x-Asd8%`kXse%XRcavAcf>h3{nijA~m(+|pWj*AS?g5y8 zt`#msdBUg@K%w9y>5(V{Poe}@Z^__v#}NGT-3tpf3b@J%ZMZTm292Xy!G8Z1xEbXK z_+l6C*SU*}m(It$_7?b4bDpas!h^pC?TDh7Ca%mAP)KmiLz(6jFj&#U`lWNYb4A}_ zqG$)*FI)i9EEhB9FWb>bO{JQj2nZPXA?HKd9mYETou+X!RG!G)m8atbGsdVnXw9-Y*CP)Xn| zy{Te{WvTCATD}pUSKI-7%`4#g>wo0j>`ua~p~2206Sym$20GtVVe{91j`wdnaIk&= z)0}f0;CwiSO!t<9D{-bkIyG^laxO-meuEbV zqOr1~j~tLYg+n=hL_+-lYD9g&#}Oal@%H76A-@@9lUJbaE)JV@?#4L>55P~oG;(tA z9^*aLpiIvgCdr0Ez=2f!w>tuhUwotY{JS}?Yi>~S-xujfU;4*m7o z4amo@bVIE%I7Dm0(w847|A%mDQzuGz;%~wai-Yu^O&i=7ufiQhO;qKT0!rMPhnN0V zV>Lgo;+YS>@wPbQa$opJ!#%Em?#B%fqn<#+*4o0eJ0DR(%NiTM@PKkt8!em@f(cfi z*m*mQU)&`I!T4S~oGv&5$HaVTReA&p z2s{PTsTW{o_b{B{|AVevCv@Ah3hG&2&}{N4{?4KF!8(5M{&X9%x71@|e+zv3v74)3 zISz|_6InOxF(jHsVN90;>kb&eYEyss#P-R-D^lc@>xZcb*P9sg`(WbHPcYf_0>5m1 z3w0vB)c2_^d^s+dv2=+vto_2==kl{~!$b!0-RlK6&rhQAc77agaDdK-1Gq=v69;nV zq?cUpB*NZR+#8QgaOg}L?)w-IVf%W)MI#@(TKSNKaSD$d-Al5Vm^DgU|7$cs*kX zb|$HjD9pfrH@1_w$u$`T3-a;8mIB6aEP~la>D+4{*PvI(B$~6orQ3kWm%S90++pR? z^nMtLtRwbYUXs5@%7{tS1XNYn0$WXBmuojsdzXR3<_(}Nsfi8(et7@vLB_p%0X=Ge zu&})YFGZGeC#8a5n_&ket*e7*i(piFV*+_j?%>g!NWIOb!K+!dICxIuGw1eGTQz zA+yol2*#Md!tClMn6dd8TPE&?oa&qCxvz(|ktXmy1|=_{bZ=lu!l3{bEy2EKj^IX69a~>V)*xRobB-G%Asr7;@tG!AXgM8l5FWjOEZN7#Ht6~b*gNo{o^wK%u}C62d)-D4pT z+W7*M-vooyqfm%`%>j#7@nl;cKMeo2qBB$_xT~*r!)0%Max7ec@$Dl){b~Y~*N?%q zQX$T&^B)z~%Kdi+_}&U#iJu}H%A4O-lb5H1&-(( zbPkN?+JR_68OoWZq0ki>Shm>=wUjrB&Fq+7Hjy@F*Uf`U%52vogwKvry#L z09XzzhI_NuLbc#aZ2p%3B>g;&$-jl2Zv{A;FWQ4awE%X%t-;}k%4`P4xYb{|a8vys z+yFnEIzJ9SwJqk3*S}``heq76a3-fV#1rD{^YQ3WK~&PS0~PrgIHb~y!|!+%OPKJdfRF7{3rLt&`e0j5s5fKk^iaZ^Vb8kNN3CC|5*IWB`SPBmb& z)mFYq^dCGOji#Q*ufX!e`IsAc2sS+I0Lhu{knP9ZHFumKa`rL&yPXS*^_ua6*DWfv zzMQV_yv?!t&clRt+2qruMEsT9L+ZBo!XNoF=q4~9v#efmv*!5`!B`V4*i-_SmnI^& z;0*3QJ%(YmAL#u0Qm%*X2iO{)j7sxV;n0F0R6CQ5$BmYPf!{^0SWqxz?aju__)mDn ztqkkCiy%Mt3h8B8x5sK0+_^*7Y3ceDTIxh`x2GqT;SoR$DwwPG08v+vy|=^VLx@F83>cEn9L7d=VA&Kt#pmv8vA3iIBlTI{QzwOU$MXlA`7^>ziKl3MtRE|~ z$057?3RWC>4w&@H~hCFZ427B!CPi6$T?S&C;@i=eQ6JBH8S3X2sZaQfkL zXxFeo>q|B0eEU0IUSbS&Y%cP$?+3J-rGoYXSCG@a1q)x@!G%M=@y?DWsDJE@^TtG?zXPJd9`s_yC^gpmg@)+>(V1CeOF z&l^r&4P>I83G>a?U*IZL?c+)aZKnoRRAI!?n|wC*K(($bAoKAV82-q?s(W5wHHCG^ z+(#*Nw4)#Y48-DhbX=?p&+#OpSk+wm(xroOnGIaXtM1 zeZ}OYT&|Bt0X}xxf$MEMars;`A{wKIxhJETJH!I-Y+ylhVPAZ4J^=IIMBs^I-7vYX zjdTUv152BJ^xnM~;=VW0{B+h|=3JrohabSwhs(*tr9Oyk>1Djp0{pYtjT~FF89zRk zBBug*Ggq=b@tt*Zxfh0`K=67WW>1+10;|1f(UliXvEA)`1IFeOuEB5oWe_6M2D)Jv;APhyOjzTNhXr`? zcDFhRM=PUG=qtK7>>U_3o5JAU2J}@u4BwCchhc98L0|^U>?n%kwXcdquah~mR%;WH zs;{t*?NbHVtSx1iK2|1KV9yL5#jkFENli=>cVc7>+>9^-KACWkS`x~sz55r_S5`u^ zsUBQD(uU$E2f$It7P2Lysm^0xJowdyPJc9rLiH!fj~%(t(wRooyQRV6d<^N)JVcu; zcj0XLL5y4@2W{h7;K*~9c8;;Gc+XC3>g31AT|A207hk~Frz3IG@*i;caSr%JwV~(q z52&|38;(ry5lxl<|MuJPN3=1?}BL zQB20BOk!V077>_e#;B57lyNMgc-M;Uvl;gz_Z}D}JtqsAd$=DLh2vV2m+(8g3>y-H zpx?a*r>8ImuY8C?#J^hVO;?gLxmh4B{)-bKmOy#fo~vp51HAg&4L4qygPY5faP(UY zh$uP3vn<}smV!=japffrTWavZncX;hO&%G1wY6fp*63p!J~#i)=e!+rR)e&gg-8f5V86 zn-#3t1Q7A*0+x6&Z=IDICGH_~Pu(}XkpG(Yh|VYbkFKVHJZsSORv9k7G>J<*ZDHEo zIdrTf01~SD@Yj@^p!D($NU`ppc=cuocDl^C7Bwhu{}Y^yqM?_~L5WKpTHkw%!bgsQ zwvq(qX_UZ`4sn{gS{?rJKLi2gy(n#L%`sOnCrVSU;FojmB-L3Nzs$bGnBFS*Y)LV+ z7R%GG7tLUka3#d8OToSBuSuQ!T`WAr?yt*QP;%EVI!|adJ*(u$+150M^4&}5Sg8tL znc%?z&3vruyF<6Ij^v)>)^xkQItBGfo+prq+}7xj#~AjmVGO>NLQiYTuqhwz2yIF%r>LT zL__;F+%RZ_@~bP*d+|~DzNwHshZe)Ap#ff-_7!t=>cOw05+BV~hs5=4mhZiovmkAP zEL^{k)c*f$ENmqIraHixC+#47Z6{v5vJ3wihC-u|3%c6LlgYwLm=d-akACz6Ph&^g z{oxr73TDtekHcJ_Gz|!7XDq3ElMo?k3cqg*GY&{EHp)a0`9?Kje%Tja{@}?Jb=ZTS zdEeloEzWSD1<2hbGlkd-xmcUbg@G_PbUI`~YIaXzaQXSNBM3twBV42fJ9yzW7*F#*`oBRSEhrfqN@j*^Oe<5@1P0z47 zosIH3PtoA_R&42AK^7$jVrKp}e0IMNQeQWK=F;i3UY_x*@_94uZ(8G%5I?k&O-6zF zKJeS}EX|*EqCC1~$SY_Bd$ZUbF7Gmm3G-xLf4YzCcDKtAd(RJtXXntn!F||$ga_UG zw~;nWp3KrepRtCAPqEr_5=+^+QyQ9&F$Z`R1KAFxV21z}9(SKh7T{V^TD>u zt{~i7fKry<*?gu5d@eo${|RA|qLmCvdXaE4b0c``ZROrua|OTlvCPNEsq)cE_V~m5 zH5FHHf+7R9qcLF&N{KP3?K*|M6Zc?hPaUUl+9>=fKTU2G$YFv`C+w44h5;ANajL}{ zmT`Lt^4fm%?D+!Zd-Mxem-2DmJf$dl-;mR(JPYmAH_-DErO;`ymaJ@!=VV!yAy|(Sj+3P9zdI1V<`3SG89?bPZ;8WdRk&Ek zIu$-&(CgF(G;E6_Gk->+iSr{AJ9iYjrn0iu@1 zqTSEUpktyzO-2{P`LR)q*`tlxGv^TZ;+rV=cooiS{sM=DOE}VPp+wfc8-@lpK$O@u z7@Ac;Q6&qte3H3Oj2^*w;0}=8@d6jFya^LIejr`!0XvetiRyN12=7xy=~bU#L1Hb- zj2D7XL;~3;hA`AxOq%N0OrB+H7Fg|sTIq}6tT2n3#Xn*9;}l5yZifww6?V|&4>^)K zpVZfP!NFr2@t&0&shK{;U7y{|9o)VK!ukGD8m59<~d!0tzrP*>Z{xpiz2 znQLqWg5O`jI`_{YzF3MA^xzu)3NPhca~2`PLoaA){93S9W&MxZjUa#eI@YLE<0`%P z@a0Vx9{&7=bW52)xtIZUe`AHh%E8c||CWRVy(S{oEo4;k53Sic4u)1QU_*!iu{Wzl z%Vo#$U6MU$$SZIfF3Mu{mrPh0zKk;^nw|6T4`2E8!bI?1RH?Rt?7IoHWMwlbZAzks ztGCmelDqLk!%+;|>r180Ot=z8b`Zd?fUmRtz##b-W2#9(x-%cH6Hy{BBR#S z&3~zb^RHNJ+5H$tHh;u#4PlHmILPisD@j?TYlTia5`t8d~Xv=d4ke)#Q>Dm-R4dA3^0L~iZf$Oq^p`xuM~}nwkhx;EQu^U=feCY=dt-_04h`EkA|KvcmZxD7|nT=mJG{Mz5L&(|5L*!+@!=k5J9BWA%sQWhtGNC+*^X%0zFu;oN zFD=K|oesbmoq`6>b5ZAQ2$X6|XY5ep!Frj0u<=+5`jv-rXU$=sQ~Q2YRXq+ZvlZd8 z_DmW!@*8d}+72}@Oyy@@)rIH1!yM1nc&aq#2XZ=iGY>ks;-3N=^nHC1x*nOL%^6BR zSbxUgTj^*%^aup!zM|simC<2sBJ>{V1M9*uygxaFD}1j*mFinu(Nai{vEF>PgFm{R zy9349@^G;;65KtdNw_Kd9)jC(W>zfTxWEfhYu1u7eu=CrP6Vl>K$JD`gSm%3LYPP|amjcAYm~Nd zpL=RyblGRHpW2HTPn^UzBa?7J(u4*cU|fP1Ke43u2RQ9(h5J*V5t}Yyj@$T4nCUVP zQv>Z3a+QWr_~$Ro%2Of(Qt!#FgtZi090AUpfio;m1P5fv<~8AD*EvfN2x$b<%XYBc z{Tf{8ilw!8YGC=(6HxD%4AOr@Fl|K~@o{He6@A{!@_G5N;?PRG*WrLxv)7}dLmXTw zEyl6lA#B}j0W(Dye{LiVvo;*y?5@*6!ymG^m1W$gu6{+eUVnhNbB(lUupjrdaxv7m z99Kl0Q`@F=$KW-h2d8&PdQ5zWR< zk?Q-xM1>b2UNatz|9b?_I#Y3d{3MDdxnQhj9IV^!3f*yT3RKQ{toWnGw!hI;v9H3YZXKcDC3`W zKQ!EzhZ@C=tnX|?FJCo=3=N*lqngi=B+tQf*+!&Gj3?8+%?p#i70|tFa?v|g0Olv& zfXg4NSm*6GShIUkY13>JUH+ABPV>XZB8hmSmi^|fbrCA=qmzdr-Js?JF)5B{T5AJK zE~k?3vEyK%&s@-snJAcfgU+@1irqeuSi0jL*TXIaqi>s|vW^cvUg1ny>TjZ!X)dUG zJV2K_p14I+oL{egppZ(}H?T ze1>0X&vE<6I4FPrMRXrH;@xDH&-6%RnPSGuy7CNVYTeM>fpAk+;lMLUG2w!b5W9z-wnfz+Rp@&?AHteuadg)?BWKDL8qRS+VOK7^ z`m&tCp9~H8w1lSEyaDc7UkIMVx-t0~;1m>&CbO$?JaaFq9d96ql=A4< z*5xo`fduCKTZuojpF-2_Y2;leax_jQfUAoy95pC_*oaEVSyqZ4O3z_W?0M9eeh#aa z)`83%J}ir!z#TV}@lVhY8kdKF-p%Kzwpt!iYL5}|oK#w+R0URp3t)D21$3{U2a)GM$*Dwy={^ut8AXyo*-CnjG{qmgkK zOvs)xLU#dyWenw#9PSK7y_O9t7&a8yEmSd$`D63=)-s^Ts1xz$KUMP89@ z*O|LY_6ZhNa6!pm3N=4XQqfmMm?o)A%*XigWnLSSzCpU9`x_lBG0xDOT0*xj4TdSt zCm>Ea3AgCQp+n{is86uRi{(vZhD$TP$u2>+no+zqx`(_C7Dk~vc|@NZON$2f!s>g@ zoSu+8?zv(sxK;3n>vEXGJ!<<5%#S`HVrN&OyZ9y6DRu(^|4#I|oCuG98o^AHai}&5 zf}a+^x)Uj|L~%A2$kkx355io^lR5aVn9hv455fzQF{APY+L-O<#y4tR$a zQu~&8lE$^*Jbx4n&lbhQSf?OHuiin1?(EMnrtDrie;Vq@cjEDnHq?D4^Q`cAuy=4CT%{F0^sJ`B7r!9ocf-Hx1=#M809wahkfa-~_`oI+v@eZtc1b6~^V#+o6v{{U zU0lxk#EcEWIB&;}g`%dk9pneBq;WS}IY0N@fMcs&apfS7qPI~GotrZiW7+32uI?g# z)6t9k_->q0u_ce2Aa23g_G}n#x88(V_clTPqWiS`Ng4U+{+t9|R0EVOfw<x{7o#@Zcuc6F)?kt~mplcXrdH zPodCTp$A7YY*02}2fmOL0p%8dT=n}ECf%I{XIiu1@(phk82m_!T{dFOlup(GI}3FZ z!+5Y~6yLQLLeJY^#?^Gk&t-+^qP`0q@*HfY_kk*zVJ+Ih2gZDMo zCU(<#2Sqr;9o95XVFHp4&lX11FJ3{W)_~q@d|FADVxNWjQ6b;(BD6>!lH_ASt z+ZI2^+(=tUmOTahdv{`=Bo~ZTgP@`B6e?@@V2ej1n|n#1&!jilrM|%g|LJ7T;& zfomA-DZ!Z#w?UD%C}OYe%JuEK4f`*Tkez}h=s6aS_mf`W=O=aKJI^mvSFgZlsZC@w zt{>k0N}wWzr!dfmxs@-}!{_X)^nhqPoodyAKg=3ooeSeynybV0VRk2aV~yc_R^Zde z7=7UxSaSF$@$m@5ZNp7?ZRbuBEF_LEKQ)1HVjoPZ{+MyUZZ))%qt_+_@^{R0o?ie`jzTWN8wFP#V*foJ>PfXzw9B3kWDdG!21E><6c zRa4P*c{iR*t%lR$j>uyW4#U$c@kgWCpJWZF2oRQGc=*!z-c@&VZMWdJA6Fi-6I zJrF-*8MJOwg4S96oIRJcU}?VwoMt`S{!QL+^2B0T$0rY84BwLz59>jHr4y{*-UP!b zFQ^~OXoZ;{1Bz?`qDZJ5GW;2S_>T5VVR_a2H;4L`Z z@Bmfwft<8u`%JM(Y)G_VE}LZ#qVNn3M=|!wMK{v$yd3T=^8=;uuXJxo0^I}IFx{*b zDnp_`+~fzS7=B_7v`P3NG!G}5f%$1D+>C{Cau_2m%BhOK}IEV_S zl+#BMGPu{l7bSzm;DcK)tO;bUf&3J3PsoG{r2^oYZh@AL`A|7?3_R|Z5Z>=ByL`NX z>!S~F`eGDRIy0{en?3)|ZGjv7Vd&l!4GqDjv_HL*yfci&BV5Lte5rv?uI$7tk+(#+ zw2j88JHf$mOU5b<1V%%|&GjCraPb!0s?CBU-JSS%e+XVX?GIu-M?kcu4$V8eF=BZ* zJh>Z>1456;e=F>;Y2I`6Wqq@CKc}G0fqo43cm+2qr0B$O2Ru&ri^gppkS_6!1V^q! zjc?|3!M0FRarg)3KJ8=o-4xhX?+MWl;$WIy94O}*a2LvC!;CI}((il-9G4BltU)oz zyWgi{hBVxbg9?^os&{AE4ZdHwh% zfLAfDnf-mb??=U3U*Uy*5jj^$!>S?d#+5XeJrKpME`lt6~rAeCI) zfJ$a7QTSFXdWormgl!Aq=k0^``U={wWKL)2_`}Pcrug%t1FZgdj(dG`57d+ugW&=V znCcn;(>7GXWA`;sxV#RkZ2f^VjpZGbYe9^0E2oG=5&4c`INA9Yd^g@kA^tP?{DB4Z z3H0MXtrM_e&l{{*G)#Z(>%({sMJ2oCE$d~N~RxiWgTxke*;7K;~V0Xfoxynd$esCY9rzi--+reEn zhpBbj1$kb+c#`vk_$^zDvmF-V0yh`n*Vu&1*PK8vy^AzE!4fZ;4$`y#COL|G`oZ^H z2+#!uxczGkN?u4qqp&HQY1dw}oO?O>u(^>F)OHiYAG(sQ{>~8C%a4MeelcctJo&Fd z3h$mbL`%^o_;M}?RQ!Lz^-V2MU(LnH?tEfU%9DPLosS#5pQBD=A=1JmC>FdyQ&#;) zFi-_zyGFRiPt9P-o~`6o=^ z!K_?3QZo#n+x+m+)(R9@-Gmj>OyIia2u>-}#myV0WQxD7fpjCD%<7HT$!0kr95NDz z6C*i<_j(=_B=%siTOw@PIgT6e{Nz4bunhMtnZ%jd${=#v7~OrCCyV8-yj0sUJ*^+_ zecO$PESk`L+h^3YD}XffC!{zw9#Z$cAVGKdvHbd63{kiZJoi&bHDd+E)WpHe%gh;Y z<2DZ7aexzFw&5c2E-0Q?MGN*8qGCoc;fyi%;q-%OCbu0{*iS;1R1FR6*@g2Dw?P)` zFI-)59^ZFFa}K;Z#PTX<;O`qJY&hwKV!v>??N1^t9kQ#OZeSU155 zcduHXUw|atS~lL z6r+CHg1X)XGEMY19z5;{qJ=w9U;8HbyM0G$(gF>OkK=;jILOEj!@WfpP~ZdWIOYC< zFgJg=xIKlOe)J5-a;GYO;+({Zz-1V*aVC@m{DDIvk6=R}FCNYe2Q%;oeJgQNl(CXA zTV9h5rxZc>)iDsOx5nAWI8;%RLda!G!cWFAcH(B}oM*!v$5I)gq19B(H*d*;RQV>5N>hNocxO;)4#RrAe=rB-Kr_|S3?r?2L>R(5+j$eh^ zxK1qFBmo(s9dM=41Vsd`VPef+%`t;RmF2wLg@Od5yIIH{Q=9+ zZkPT8<^j&k)0GKpr(J|#t9xk6dIzolo#h_!^vjqK^*~2GJG`j!8R|j}K`V7P>(;s9 zFULEaV*3Q5-|>)Y`A)*t^Enw#r;b2-)EIoqU^}RxbbK_h1vjqNg5ZDS#3g5(dph?T zJWOU;ht3Y#-2DelwL9sniFj0he~sF?7vkNRK+v3?hso;q@a(1+bf4EKJW|)E<5hm# z{V%#8NUa`^efv%d*A2rVnP}z^bwcyijhr)udT`@R2yrTM!lfS9p+_YYG(~t6WtzDt z%C%vRz-ZiHH4 z@hD#3UjdcEx#U+*5Xb7J3e0jUf}*q<^1@IZ;y&HNp@Jw3H(|lyehw(yKSE>I zDxj~GGnI4fLb+?q1N!?W2o-+9yPol2bzm6z9iCwR3R8G}bS3Pq&PU;6EwJ6Mh-3A) zi572*;S@%AfW+4u*x>z(8kq#rsmngXzf+wgEx#8Ze7gwZ%DQmiR}whK4&$BG@9-{h zz=`K2a9D~TF@7x^uN{T*j(W)SYQg0T8Rx8{2~Ry(h+bBOSXQDzJ*$$ambW`DVKarV zYF_kwGjn*`8FJ;SFX8Ch1=wAb1B*_r;T97W>hgXVv`%@!VAVRhMn)HlTE_5{;t0C3 zb4qAd8qR2YjHU}aI4`s#@rO`6^jy4-nr$g~(7PCaN=)3wqj*x^70<_1LdjSye4Bk5+SNkfJiVk~x%eMG z8&iX2>{+9Kw+RxLZiSJS8E~yN6>cg`((#-EY~uZn{pY{I+`ppG82$$H&X(dV!6DR> zS&TbM0%+5vUPAA#Kn*cl>^#zh%FaBQJ<2{%mRyZ9V^bmfLo~?D`~vz{$6)W>uUI&7 z2kMG*h-r8d5%X_@&z;xE?@89_N^Zw%iUP2>#|Hl7Edpq>MulR3x{Ova7eGH(%I+Xm zv;D*~F&%vR?-BgEXo5#0OUVJX2&lghKy)il;GSJ`=;W_7Zq`5}RHs5t<)>09BQv_&x)gg~DPhOS9{4ZF4QKJ{!Qsqb zU=o;trt99&MJ@fLY^MOMUAhqizByxjU?0jpZ-SPv(^&hh6O*3VfUVh7=(;-&Z;Ktl zoXr)bno{9cO$j=PvHAbQ58SlA95Pxm3Z`ZU;m+_o9N6+3p6T|}@ET{_{%#11kNBg$ zkpEzohd7#=oV0>!R2 z1(9ntB%SX#Stw)!mMmj2|MztezZ?z+8!yq_u`M_?u9_At)gUUZtH8+Q0_-ezhdWo! z(PLT7>RR0LO|%26z1FEkalL%nGcue*gb~ zzpM|v+TXx&mSvKklY)yURoIw6pv5-N6~r5WBs*p zoKzy3XlbXSqVcQbK9@*JrA@oEB@H3k$zGXJM)t_wBlo$;rVv7e>^+kah37mk|MrKt zeZS{i*XQ$oZ?-3{qm7_zp9$yPAAxRSK9=QPfthzYv8`(l70#HAmS25fIF!9}rSwVk zwfE3yE(C|t-e8^V9Gn_6jdk5)VbzXA?AGuJ|hi9!o#HWBIU&GD8h(YGx2g)H(dBy zhnG4n@bI_e;J&F3GVSh@iqc53@#RlE7FG?u(1$mc@Zb!_>Nnkeip>d@Fcw%ihIkV!zfNuIs3gYC{dGc*z^@V!)Ko8+vB<;Bv<3>M^!~9n*c~ z_BRA@hcoRV=3Ek|qvs0b7kIJX-&-j6x5ChN=B_{8LF!ZIK^(6DB=WX`=2I!i-_V9n zpSh!rz#M9D;v79+X@=(8d!S-lA&48g5yj99c&s`lwg1-!D3>I}+`k!Sc=}+Ey)f=# z&W6R3j!5kK$(lJO^ggc|9J#v~(gyn=e!(kTeA5+HXavw_m!xq9UoF01=i_HP)#;lk zCrG-Hf&+W9SpRD-g!Rkfi^HzCb>S9#8k0+mcm5*QQ-fgE^BnlKQW`Z6J;taMJF+aK z4@|^=K+@MpXbrH2t!bg)%NS6VR^MUQTQ29ovNlXBWAhnrv6Mdg66pdzoO13v$Fp-G z%mxV%6Ya5j061Fe<7J6gFg2`!9Eyq>u{zO#-=ZzGX09*1Kq|G}%FDOfg?1aFSzz}CKEl78SSN927cfd6fr zcR&$ERI7<6e1<|RE)=9QU;4xeD85O^i`D`Z&`^d)TR%a0MFfiRi$VUXi|~S5L7fii zQmN0sAnKP1@T3L95ZmP$&OC#6$JB5*-v+xX9N=O|4VCW6OOK@ zU>&Grcw=};Dah9FH-J6zqc z6xSy>gGYxc4s{<#(fpbC`fCHxuQ*LTte;W)PE{c8zEE$c<5 zfB6rMFV*00ziQN98G<_k0zrCA9|JWi>GTJ0Q22=rjwv~!TUrNIy0!&6E;T~qnfdVZ ztW}DyVKBO$y$`YPOVPa88O36Is4Qb?wq6p$(H$dXVqYY@3>yONo8M99l_HuitbpM+ zR*+MC2+UZIck7EEQ1Z1N*Uj1hd&?R*2VX^#Ew?>j%a1f%I`;}Z65J01zwPkN&>ra1 z+E4r+zvJ3$OeBhJe9*kW6k0l6aM0})Twpz38_Rz1bp8jD**#SG=_(THU)nkCgKU8VOl-_Ur5+9q zc^4uZ$UhaXdG^D$y}YTfH;ceDJzJu!x*kRNSK?5~edG~3hjDW0FyzfVF{2$clB8n6 zl3VcJVK3y44q%h(U+{Zh01yxYa{l8~&~Q4v8vg?SXpZBW_t#+&?>dz1{YWpTrlNA9 zGcKCqLK{|RVe0lKT%n|k9~Y#6`sO#V(;)!01DiR*dPkACy=RQ4^DHC7LGB&}NHI<) zr<}7Pmq!C9a%Rlw(`i1zJ zoi&38zK~PPV{wbEHzYIuKq9{H8BMA0$B2;qCCUAOw^Od#QaU z;~lsP0na)&Y(A!m3P*Zz-r6^)7w?K`7P6#dM4SFx%=YNMS)^c{A6$>$guHj_p)s_H z)~^yKDN|a=uj@v*=({rWdnLR5D=!ev79P}SW z>3M_Tv_qca+H3G6xgIW#<}ri=yYt^nfWqA&AU>T7X0m@kBHa?7m`!4p!Zqv?^FSAU z)VUcncexb9_Cdh~b>ol z4!%R920@6cL*n;w3yR)(LU&8BJA&3GJjKu4^uGYu5gq1>|0FXt?&5ku9?bR&0>i6~ zFlFfj8hk4Q8=Nx1)HVSx%-at)L=147m?*Z&#G#>CCC)gOL3mdb&@`ia8y^{$bUka5mpGBL@~vLHoWX5R(=Iwx*llQD7~pZ4*p6 zI_O7TN1Nm>&rjR2gJmRlGX`PP02k1WNB%-m1pR687q9M!#rw~1GnR-tQTirC_82j) z)y)m~aG^B(owR|TYh_r!tP>vJ@4$oEzL+#t10g?NbDON}abt@G9hS~yd^CT`PVPj* z`47xz9n#G*`(b8d4SGHEqNNK4@c1ov6#U%^yJG&~yt}o)w=9HyuZe(S31|G--+}$T z{^U{aF;H~-i~BtJxtB;T{L;5(Y}qB;>^nX9TFwM&H??2_&ji&r(L`yL|IppZ5=1Lc zz_qCdAnx`?co}*N1ewlCRiHf~4GOwmBClkHS{mMrK-)`9ct(^P4j8Cg|u1m9Hm zf#4_?*UhUVT{m}wo&Qao%zKBNo1dV+Z$6snYN3@#3tiOF0N#)H!PEEWQ1|Q;*hn?# zYrAa5tW1NQ2E90@u=YE5v^iSkcU;=&)e-CqJdeI*loVA%^a zm=r~)(iPC@U5e$;p5x+=cGN343icR;;C74K7nZ|2Jl$0 z1;{s3`cQ{uIi;%M%#$y8>i2K>?7{Z%Q>Wrw#YhtQ&7I!0>Bp5)!SLgyB1z6P!^M0V zcsS)CZE8#A&ixdON~YCR=gM=;R9I#Bc;<-(Nt15}wpe-->XiS{a5Z z^}v@|P58R+H1nL5U07rn&=u{t;=UxA0iOTb=a1eXeELeq5*Vr6Je zpQKoU@+TjzkTpARR(ycqXFhn_as?KYjuP8b1HjE|M!O&NxKO?a2GtGFY4$6Cd&6*= z%~A9POF1G!y0A>vA5~O8;+3}skT~2&ou16%Sa_+Su-SI>h;4_9+v;(%j{^AY3+DXy z+8t&pq@adg6-wJ?(rb@>uyTC_1e`U5tVR1UUN;DC1@_|_&7Cl;T!QHcD4D$7x^i(=1#b5vb~A1tTy!(FeK zl>9UI;82ey5aYLutEWKroJ@jLJs!}{e1I`)wn3;SKNPnoLqOa=C`*Z^zY71Ms&xng zuYj!nQFF{Zklav z@JCPgoe+LofV2AEeN;;tA=7W(04w7X7@h4yuVi~eQTYG}bY((bU@hq1{74g&H$aV4 z0c_4Jp^|A4u*d!$`}wBPDLduSvhXpg_pgI6IUDlfegIKaFTmN7ZRG5wStL%l6vTvD z&}YmD>E@@XxVabj6+3Z>D%&^A&m+HkhOy_r#*YGebj=ZY&V$^c9smCm!^%6zhNTpoT7oGJ-{G>P1VqNPl9!D0K9Y1A7ln_)P_qf+eF?zoHED2T zTNHUy<_nJ(?I%5B?QrScCfs-?6b#MwP#s0~`MJ)MY9i?Z(}(}iLh%%Qb#w&M7~5B` z^CuXuSA~az@3}3WF;LkRjCS%D$gKY|Fe$nO1^m84+-GrUv2i2U_cw!eJbPXm$CHMC zlW=j#AM9AEfk7)j5QW|*uu@$HlF@xoHcuLkiF?7$^b6$jOeaddw17>w4(JWoP!q{F zup(^?uW_r$z`9RF`A0QqrQ3rc^G+GXo5IIcDKxWr7i5Jk#ygd~bjpW)AhOYw?j9_L z*6w+DK_G;Wg)&nquH_XB#w``{m2OL!C-33=Z|=%0c^#FhEq3~NDjb8TMkF9(}diz zxrua^Gw{!jU@Xfn63q5d{ai8f0z0Fsz@i zhXZP#ux^(Wl>4!ao^}aH1{UK(B^~sPoeC|i^JBQX1m8ZG4hJ<9@a?r{#O=fpXij?q z{AabGWI-BxcRWDdmF7^_(TpMuO`!fZ34LDs!Ya9DI^I2>OsUa^WX21=Cszl?krUAR zaRvxJdd^YSzp-mPK9w$;=!AL0+#-Hv5y7n$!Doi029-~C_^c?Kkse$Q3Gl(CD`HiOD#hq`m==m@X zG`@->k7b&1!^Q2e#%Tv$$r!}VFBR$HEh|~h=?04Z=9MqFatfYhS>TTDlL&1~AgP0O zJrZ=$M@E~wJfVo3k#hr*=7t75t>j0KHpJ=E6!9%?Xtl}}lW%^5thu^W?$mqGl%9ka zYdi3BRTs73my?q;2!o`w&nOoCll$M3NqC=l3?~C8waZnkN(|B;p-U4oau2hg$P(G|Qm| z(~no+1K~tW9dm(qdsrqoG3@`GAoy7(LKP$ZN%)`uW_owvEr}EKsA&(h-Q$N6UQ-Z{ zzRkF0DRkM-bdb$jLCd!1P;pT+&fjQ3u(wP?ZCwjcjk^v7$D6Tia*PNFt))IIrHEqB zEl!8XD8RTOu6{p2*00NgiqAaqKWm1t_Wo!5=fFJ9;l-Tg=a@fpMZJM(IJ0#ze5fkL6^G=(@2e5$CC1VniHlL(p&EoUeelEt%kE?)!E*Z?=&-s# z{C`Jq{KjSj#CBlDd~4vn5k(F)Ycgk7BPJZ4fy$?VD`jC0{`S$Jo0ARVE_0!N^%8n= z=nR^^&IXC&!w{@ggWHUb;3KiW;3O^smjoD_!fG{4H@Bkde5K%Bz>^v}EtD?2{efk0 zexd$~KKwa_m3-p{uxK~ii&(|NUVRR8zL!J0<8RpDY>hErub|LEcfwzth2MCcQ8zUX z%t4H?;#R?m)jZhNaS-D;L7cO1)}Y;UUil?f&X{{Ff)jZ#pZ+QQKm)zo&^;jw%G1&P464k3{pU?KAji<J-PsFvKC<{-!Sf|3x*>J}Ss(;vy4*Xopd>L#{X`F#4gwD}KL1PF$Zba6Y zCsAYINmY9rgd=l?L4Rb3Zpirv&6g!%Y;_@8>>I&a`zBcV`V1<6a-d^B)F6PHhegqa zG+fIQX0xAH!_(I^RVouSYt%4p|4A&~9SNn&p5vdaLXOy3*qOM3 zC)MxLd=PUxj46vdA$0vRkaFLPr*(#4eR4VQ{8z+PIO~WI zjPV_l@#1+;*jkZ-Wqj;C7x9D)WKIC*-h8aN{f^#TYzAkukD!5*6O9S1M$3iU$>@d^ zSZN~5mE9bOHxyb)ghc}GQWu2Q77y5T{UdaR@p4WE+R_&Rhaj=15wn+$({#Ttu)uj4 zUF~kmtxR{rX?^h^&$uc*K2pf9@(@(@c;)wWxnlR-2DG|WhLX_^^rC(py}n_~7P0yu7)K zys^us7DxL@-(V-mwz^}m>=0GS4x z=Q^Q$e*r%CRe*~U=P)g=i_DG;rG}46VNXgIpa+dYBk!g`pn&UAsR0^v#x~CTr{bzAz^u)_;QOY?K>XM z+5eDBv;6;Hz_%#!Iwurr6z1RrH-&lhMB(?I zi~C+URbCH7_qK7vdF*gQP9{#?@Zp+g%tUUvH!f0t2+4VAP;liTj4C|B#JYtz`j<;Y zcFY9X7yjsB5dz0eSWaM;F{;i!2yq`5bEg%$;k%>nh}oV%bYv_ROEEDt?z6-g-C<1h z@E{W|8eppN725WD!0!2>Fg42#G>Z2?{u~?ZEe~V!vnue1L!06&p+o zsQs-nxxV05Zcjo!7BJq<$z5LXBq|OQ@jdw>#vz=2p71rk9IEgZ?9K{-C!Pba^mh}+ z^abDt-)-QY;spV!G2EtsdRY5;8xS$(c9PpqJSSyfQLZT&tp5jgH+taY?F3TcXOFFq zw%|0Kd|2jqfa2{W(2?5@ZT2swFzq?>Wc^rS1*g!AO7rN-D6ZsxB0^ICL zOs!1B2m=qi=xm85+t^EIK0j!U{H4;G2f=&WY2>*6fz*9`sHFOl+zo1__2=91wfzq0 zDCW|l*brRC6$HP8Vd%^cBTCzDFjkNkb-T}IJ1pzoXLyc?k>^BajVXR?;*rm{pM~#o z2g!lqR+9X(6%sO*!Y!vy_;mYMytj|_8meumdU+r`>V5$Z)%`GcMJ8xJ6eC8-toO3{ zKMc;90QrPy{CX)1>?e9b^o=LjYObU|1%Ff9Tg|(E#}6SX>7fm!Z74T`A0BTqg?XnY zP;n#zzUdUpoc|+!wddP2V#+BagRMxi) ztUqVaWNCNK1%4Hpcya=Gk0zk;lcf;x*BKnv{DVU)-_S4jmFc=5BPjpMk8Vw))KQFa z1QJIiz{w-);F7rp=opm1G2Q7z zP@oVJoBY8H?m_;}T8#NQNt2nw>A%YoREPCjfB2{49(7m9XMAaQ-dNnxk&fd5Zp6I8 z3_I)Wz{jhHxrJ3R?)FDma$5n4SJogDngC~$AldFa$PN4|fP*W2h#_O_P5oPm^Wvny zGpPl<#-rfaqg~Y5=rwSzZzWGwZ-rXg2TM0mQa5`OxA$llIEnh=meNvGaVv(#Q%Fsi zr^9c5H`y=liZ>qjusz*MwB`z-MRqeJ-M)(HQV%hc-w25rKO7m_2lY&j^#&kI(2>l`v zSb6d_TnO%`r?*RT+8*sf-;;W9Jx$r|0WRyK)?$b+8;{V=_48P*F9k6J~zU$<%YOx z>3*EGpaMUtJj5k4swt)}!)r8zV=9(Glro|?vy{K0y>a+WE_F1Q@x&E7|H!1HQ^%)B^EPbXYQ z-mNXT_KYa5@HXeB9?pY@>{IJ@VK(Yl4AYaPIJViC%Dzk#vTEYwn|BAFgP;fW7#s?ioHh}`4=J7#BrP4P4!67%TO z2c@`IwkDqHkjH{&AjGB_Kw0UTz2BG+cvV_Hod zJZE>Y4Mw&wMN0!Nyc2`udo1tA+l*7Lm16Lk`|v2NAtiGeAO2OShM*8>=7x9=-{uXF z5|6JS7Hq+CvSn!4)I(E>J0N}68q`;{gnd`$(92aJq^t59{kI|wAF;W|?211)+M9?I zDW#yb^f;zBjY6J~5lvz}nB~gxpfGrv=0>~Vbn9{Ww!MRx(TC&{PbCU?v@%~rFef*1 z2MRo!gnqI2DArtrf9H1+9ep187?pE4^HC+*9RCh4dH7OiudIZ92H#-D`6&4QZ5GZP z48X)y8?n~w5Kc-5fVEH;9yE(%d$DTLF6571-;$7bRXg;3x8~%?n-Kktx5<$3AjxLu z!Q>oM63jUa`6_*&*V_%p9R34~?c&I~gZ;XFLREJY#+!#tfOkx|(foxl2SmV21r9oKsh! z1{Z$=bYDf)B{pE-e*_H+^AV%oqTpk7qVifBl_Hos`c5h1nKD+mfEn)kTT9!;uCh$; z5SBO!z_xq0=|IpUqHsHy4%=tL60Rl7wiZG1`gGj)uN_kl{sS4#S9&wgi_Bhc2iqK6 zsAkzX{#Q~(DcSxd6LEL0hhhY|Y647oiy9 zqV*H1QU!Kg_TD2_;QIpgd{OuvcVfP;&_vfW<2tPNg5Dg5l&@Q zSkGEgnMNrxr&V=7vEI5Eb(GlL?0hEU-PgmtLgpFgQvsKbxp=x!137UQp?sSs zS&et#nsy#7KfM<4#4eItGz6kA3$Xf5FDRTlhof_Pz(}qI-YPH;&5jCim(Ye691%F1 zV23kBKcK?>ef0EmQ&8`CkG#AWaJx>B+^GB$xJaaMK5T)C>YFg_TRG}k_d@&r0-Ba( zk8{mfAHaMzC^mw|Iyz|8h_xS_7xls$u%&&0sRQ0=F9s18?aos(;p)-C;U` zCz(fnonIJnXfz~8vv^Z$e(6BNmjcqQHOjbm@x(_{5Pi=_V}@P|F3=d}UcbE!tJ?B$ zUHN;|SKI^|qgIgGzkucWW>E3FRdD=g2k?y=Lz3GL)I9cw?yu6s(KZ_pG6;nqS7+mn z$3oDtD*_HhM?s%+31-Js!(z@y)@2ERIbn&w$2e|>x)Oo^v=!KksbbxoFkExMjWHok zU}Nh$FvvQLtEU*lsq3b6{X6zKjOYPRu^5QT4~MC<`thRCP5c)>omOzGpkDDW9TIoJ z`J@r~!?%K)-XHX=_(*$9rsEI37SP?=hz}V%Z24iH)C#jn?D-rHdqVC(%d|u9O|2?JD z`X+N=rvg`04*lC(h*eW5nk}v(F<x*UZO|ulLnC-?DuLp_T z0(%&BiN$}#j?^Q=hPbR|_lk?jcyx?C)9N3BLiz~Ko>f6b756}Y{UT75e?VWo_TYw) zRcv;gi!!z9xJ`cyJ1!+sivZ@_iI@vLYb8M8@Jz7zybI^Vy##MxA0&TnqKXysNUz{h zU*DH7U3&s7{*!@s`TXF-*bE%Uo%r-?IIcWj3F=D4a(u^Zq5HKOu6>q>$CSm{_Z_{*=A935I28hkswcvZMC`I`2D*|6O zf!O{b+?=!*JN1Vg`36gK%q)C0>%KfS@Sm?Gau>z{L;ZjE(4- zk;mlj3YPhCd<&;V&Vt$R-`Mb@61P0$h4&X2uR*hpd`=RA4Qd2`hx_2kWlCgA!Cbgu zC$Ot}hAa2cJ$6<$Z$j5K#o*IaL8P7?gku*5Nv?YwRi8VFJMx-H;OQ&y-%uFwzV8S7 zemc<;26p&r*Fj<&-G+NRolxpu4l0N+ciy?*(7yUL>NsCPw`V=zI$x9{#5oUZZLZ>+ zph|3PF30PQ9#nA6C$^VugqJ_=L;smHdfQc->lXAG0}bw=Kuk5Bx;YK$v>Ct=4g{2QTV)^IKhXakWBwGW!p^I!K!MF)9z6RDybt^F&A(EN7>YwasNt1=Eip)SY!hkN)db`YkKsMNaWs5)kBo~GnBaYbf?gBg7IYVcHs@fH zKn=dr--=bqDkP`UiPrJdQNz6d;1QSY$9_G)t5ruB@7oCOy7j_|8*2D_{U65oYlqTR z#zf5Z9ULr{fDhgw+@ixXY54pvXybVnI^~*S!KWDT>t$Tl(^+`rcQQ`3(ZcMk6quf{ z04&bM!{?POSNU%Uy|dX4My8IQT)Li|{_6zX)1UC%ZC~_SI|&;1pMlrRNZhBCO*_{F zf>xm%o7)E9^068mP&a@hRn^#B*aMg6wc*7XLHOhmo6#O0!MryPz#**jTEpcM^XI%C4)PdM-R8x-Mvg6|fFpxj@3w2az?`du94pD@DQZK-53Zy|_JCZpq< zld$5HFZ5bi(qF$cp_6e1^4tbcA*levPuu{V!kwu5Y!W-nYKWnYKl;rr0ue(8=-u}R z6qDjPj}1d%UWX2**SRtJ_;tLxbtSA@{24rZqamd1F{uwJ#J$q7 z@aW|@?w^?jb57RNRXq;e@nQ$quNFk||MZe>mT9r;bbyYVv5sCe_nt@}HQZzf4^=(k@q|0E*R16-bSJ!%a6qSZ%rzA?f;S9}u;olL z$hGNVw2m+HL_c7h(NQ{m=2a9voPh&8UkU$vJFqsG1D<~eIFoZ#$SJ;TRA}r+tt&(5 zH$IIviFV^cUmkg0NfA^^_X0uXNxXW)7!7|!K>B49TrkUr`|a>N7+v-QWjub}*>H-@ROEvR?%T2h75< zNBhaf#S=KsYcXfHT0F>J_CbRacR;Lw50|gWf_aKoICxPI7l=io#lsq^J4b*7p0b4N zrtMfUFq`gumI_s10w>(vQ0lu2cg3=GP(L7q{^CiX>No@Sf(lvI&9VdB>~y7`WV<9^%rE1{OUv6=l7$t$RuteT3}MU2=1-8i6im5xuDlb zXOtv^cH>jjDS1yd+$W*4^9y}7H3yYSj=-7D%Ur9K#n}C10i1o8jI~Za(3uue`ds7m$@E(OQ-Cpc8=%qil9HRS0&)}c+tUI+R zDP>ya6X11J8rS&Z4& z-$uvvY*Bh>7Bc(~91tsq(7A8vfoxUClBmNhQ#pD_`V&Y?HIchhGVsR3dWc<-fC_cp zxOQz2E;gD^bzgqNVxhmVK&A?Q9FPaT+4s>?G7E;UI^xy-lX%9r9u~y(K)h}SEKO^J zz6Dd{?M{ZGSM6aq$vPq~O>!{5%@v+ssE6W7B}kLW!n4k8pm!|=6_ZwR7R+)0FUB7@ z+hq@$e~0mcw>?(ybQ7U7DqtO)jU%jMljipmHXl9%Q)6||P=Ny{r&Z(XD>2aPZ31Jl zO&I4M3MS5@H1K3OUHfsX+};f)5bpI3&L54(_G9PJMsbAQ2TjpMs|vFGg&{NOD1Fxv z4oxkwF#kygdHiG}X4-{ewy-%KJ!XS-ZIbkfY#FX2LXhxc4xXH;2mCt%@z|;#xa{>l zF8@+fEO?m#9@?F-`DYC(=DZ^jJKMo9^d!W!SFsk=JG@xzi`E~U(Y5XkiObOe3*Y(B zf9o53dgDcNBbnDq(GbG?{h(+|B-rU0aU+Etfe06alokaszdpRZhC+jEKHgoWf#pv& z!SsHXg*@{b*DJ5&axh_!X?IhmS>LoLF)5+IrgfrhmekM1zU+UjH!j zaRogt=7xHY&Y|^0FixkuFq795|Ev0etHjQrd%+)a#?Tx_B+MZ@P#DcuXW$$*2Yy>$ z3DFNk;l`sYaQFK%A{;mc*Y0)3?BOG*6Yh&s{l&mI=noz>5~Td~#jxH`1Lm$zB@gv{ z@QvnfEE-UUiOg&w|CdL8WAg*d7kWqk&G$faK4az~or4>Hm636u0j`y45bg`U4~-c? zG(5c>)=EoK4D`VKwSS1RO(1cb7L2MN+;L&nB;-ic(FPcUnd)cY>x#K>Eb<{PS60KM zNHx}l??LmUZ^`5XM{L&^Cw?{78294_^CPq8d!;3v<@*Iz%l!x2HP@0Qf;o6YP6*nr zw}P@vg~G~M zIKk%l)iI->^Rt&d_myeylcS(ylLoI=u(?T724~&hUtoK;33)#15bK2(F!X>6o%ZYj zV-RPeS~BChJs#v$MhKg4Fq;!b6*CmjhhXSbIfC;on{~-e@ zv*Fm_5(tcS!u|Ufagtiha9(Z!l=m0I68@u*yG;&Tlf+Qd^d&fMP6u<%)#Q-<6xgiE zE3d-3Q8S!)U`wPFxVs2LnpZiRXLpi2J3qtrwIyJ9SDMs~9w5UmrkK)bLE2_oV^72! zcrDB;f7C{Vevs{9&J;(Yuwe#ue*cP7y>SGO&uU?OvT#!T>ICe+JWQs4-Un@6mNYOa z3?3Z6L{@pfCkf5rkRMq_L^2tRYzoVA?KDR1ROaNY%tFz1C$Oi-8?UuGfu$x->LK6P zcz4HhwAolkd5&gc{5csm*WXPanMsr2{T0Ypyp!sF&c)d|^T_mEc?gm$X8iXW$SSjf z&HvpYU3=p&D3Vgii03f8DwZy~um?!AJ%)FE#Y2zcAVnvZ7NzCG>6T{B(lHs32{;1F z|CIvAYz@ARxQ(rvS>*Cy7S+%H0$-N65apM)_+is(=rh{_Z`6~}E_ImJiWfnfMiKbC zS>l@A?_j#|ExX~*cmM?PSbmS?+jbJZ0tX0IC zM(#v<>UX+0gJoY*{m|A?0)rYuuuuZVdf|?m-)>D7sF|z9yZ5J2cgB=ApLPA zn#IMV_#$4kRL+E{yG_x(Rh!taYy^!5*_>MaP|67?B}=~g;DB`uKJYd}DOFeIA(O#) z)gj1OwHqRzyrt#x2WhZM23`@*LWiwQ+?g@?`0TDDb`cA%d#C}-ei{H?w??5NQwAi0 z$HCy=6)dawMxPIk@UY=Gsd5h@2Np!2(g1U43G$>?Z}mq-fhe?y@k4>==NKB9N?U|w za9(i=c1l{)7hk$LSB~t*Q1)+iFY=^j-w(#HRk`4Ab`Iy~&O+XSZbE_=;p*;cG@1as zKRpjee0(|HFNJ>*FVIecCE0 zJz&5c{d0{~c*4)zAFP(C#i6R$<#?`~Td2rR*>S8LD*ny~tB23=e7 z4W@0)#Ip`5w1DFT!Q)F{Q(H4^b5FxQ@i!POK*5qZ_#(Ih z&!;{ngZp^oyFJvw?0!8~d{QFyPjkq#W#)J$O&d07nW4PE0lYf75*ytPq1wWDJmw~k z#nRT;#&Qi4C+C3R=`e2Z$ql$As2Gni7R}N)4RY*yCIpuLqrJ{{H0;}Qq|SOopy@qs zWIP+L!#U<^9N~)k4?>gDPCT^@@z#Gk$h!HbQtVWH$&7(=xP9s;)$+OphdQQ_P<}7W zyL%6EU#LNlun}`F|DnR2YcX=i1mr}WfhQN9U_|NzdRD^>f9#zK5%a!Kp(%)aCb&?` za?!b4R>PM^5g;El27*TZm?SbvjyNIdS=&oqu`}3;OQx9l+7I{cR|Lk*-gPHF=Eeh{k7!A`~3+6ZSt)!_!eR@cg6{?0t3v9K$!0 zwwu$K&$$G`Us>ai5SG)p`N@LYBX4$ib7TCP2q^Xw-Ka_PaZDrcah%#Q7TNAarQFHp3Yfq7OQR8J)bPnEf& zCCh$^J=j3+t#KkAbAN#U!EyX9md4>V9Vh#)l;GkwfpGWIJJ>qe&%ILk7Iv1jl4qVW zcxQ$O?hR$T1jS0K)no%NCM~(}=rz30U4~Avd$2S=jdg{)N&S_hw6^IT&5@CXIj@vy z`>akZmdeKD1W8bC`Hpk8b8&gFINEG&rJez7J`}Et8aL|je-xc}Jk{?T#%ZXuq(XaX zA*HA|&n=bE_-biViZrz(m8MZLG9r7AvNJQzb6X)hWL8%8$d=9T`Tc*qUe0-a&Uv2u zy07bc-+p98xr@R<@rfq5C8?8>$$|L$x<3>@%|mfbP1rNw!8$x*ajv^B?AEHmT?P3N z<(YsIVJ}e3FpT)+^2q!5`qTa_Q_jsgcK;o^N4LI+$FyI*RJX_&=X7^rncP+M^?w0} zLc3v0&_g`?u^D}SSkZ>$4mo-K0`&jzlY}H1;!j#kU#u>t!JE&Zr3!_lkISgvv+dZm z=>ir__|V;1n(TFM&}y+0RP~PGXXOttR&9y#yL0hKp(EO`T-m}E&78UXc9@vQLxN7R zUQ>mCRJ;Epoc+)aLq1nA{lhtk(#gb?%TExWF=hIO=~=UVI^n!SAY3WQr~TP>5a`4s zzuZX%ZAKeVqzX zZpmSINErTBK28T-Bw~S0G3=jG2ol1_@yx7vkQZ)&qcaF*C;T93*5dT>G(K!{oJ4ce ze;D36n%LPZgX{NO&!Vv&YuMmAYy%!3tV+pUIEH}E{2o?qLLSbb#TnwKFS9^xQLw_S` z&zpk^Zlq_^9)vd{{hoP zmTmb~#C8>vC>nnuQ7^iOrhYgEeix2FtXL5}m7~J335?@9^-hu3zE%j^HM(hp&Pvi& zmJX`x6yz*2yrGjngj1{64YJ0}ch(aLM}>SKb=w8VzG8(1SM0#nk`IJ_cwqRJ5~$g~ zn5u3(3(8&ZQDEjk;%T)eOqA|McOxTIJmdw+=G}mjr^t|D9VlLD1(Sb#uw1H_ax>$w zu@=K2@wQXBymy8AjP4ebz5q1ebT@AUSJ^u!nKHzC@90YWHDuRyz0uPLk=O zp}_WqFk99f;+pEw+pL83u_@^#gs`95_4vE?wbiMn|sbpn_vE3TIiseEJCj8h)YrwJbQMl*5fM@4(hkJLcJZ z3PE*AICXD6);J%hg7NMM#cr7PQv}ZQ|G{qiM3{(lK-n8Du&_P~T9mltzGw@!G8SyA z+f>p!gC}XzD=!E)Z^3wvFf>9p-1s6Cg$$Zu<6Fi!jg2Gc?*0Xxc5gI!>p{fJ6LF%? z1V3MO!sx;%PW#I*&^#v-tM1ujxHlIIT!%Q>tqDX%*MWGyk;UB61GsVx!16n}m@hO2 zmD?XME_x5n>)L{*Dmu9L#YNbp`4{)96vO&h7g*qAfgATqM_ATZ9Qw(})}x zb;+Rmjat}hUV@AK$}o{h%b$;#Kv!mYqV4MSG)c$-+*aIycZtW?eKa~zQ@NJ-MB8Do z<8C^nA_VtykDvw5G_b$D7k_KCLGf4{ZrRZX&a$dp{-Et(U2_p5_m@DD-56ck6N%r; z13^gY6~(@5)a{BbROuv=X!lfjSuj9OOwR(*kwsYRx`^-(2jEJVYUU?n&!1N*7NH?G zb5<|fS0``{rx~GO$qMhqvNBLLs*lrM98ROc;r5IcSn_)n+ zl?@5(lEbRrV4Nozfpl|KQ;pB5bP!cZ{1B~Ar?*+9oFHJlYF zi?4kBQ1t3dj@P>k%>P^nFJ*l3zp3fCJuI3gn;DZ=7fey+XaHWctwJ+lO&CtHU$O|$NT#Uz0ml9gr59t=FIBJ>> z?!F~N*Hs3_)LbCzmMH{_CWE#WKmO_*Bw9wXsK{LfN7$ay>3u9n_*ui7C+r-%&;h*5 z-N=Vpw%^zPf1Xp8k_-mpAo3L%@N>Boy7 z;bBrcHcQNaq(naOykpAIov?(xQ|b~PD}N>>rvA8VZvwo!8wyL-`a;W79uOIsfSR|N zaNb{*|ZL5}fP!YS;BfhsPlOkaaaU9G5{$d6-NXL7GdI)FXXdEKw2Q1z3h z)armK#;;vWmg)#4{;;UThV(J~oV1r}-;97r`3iQgUIDgjZ#)^+%vs_s0av7p!1CTT zRNowp2gd%xaHbnZ+&haWM{3A{jTtEPP6+YNO<4293K}JT!7QOBICI$^@9mpJ+4|@3 zDJ6x?YHjeaQVS++`e38`Lz+;u2-<{MRxF-_5A+!q=#(owIJ}EHIP(QDxiz0|40#Qn z-i>nc7SqXW48!Bz&SbVL>o^L{!P`ptWS=zKhgF}XDi3;b-q&6L-)A*}-}qU&Xnh{?P5l6t(|^Hj)lssH&ww`n*@FE4 zwt#%sH}o~00y-vl$pzsMdVixH>_2RV*)xBDa;*rQ-Cu<9EdN(-KL+!qRuEe|Gi*d< z;N&#{Hm@g^ZIxIObRCADl`v188+eyYf&;%UT)*=WW(7^6#Z4D{bL0Rt5kWk$GKtQ7 zJO%REZ=9`M3YbdY;qJTd!tI#F7>PUU!6d2+wm)ej2Y1~9*Z*!4k;~a|aQ|cc{l$q) zWn9hFD@l0C#13B1aKM+O4dz)nBY#&dAs2tZLB%APS?YtY2Rk4TU9fF+F+I_C4+Izb zP|vtjxU(XHh;WX9-b4>AOm_mgKi+uw2Tzi}b1&M-x5GkpTez&X03v1BZhIr^wLHkj z=~U_D)ZdPUBMCmZSb^fl#ev{Z;D#%ox{}GA%c;W8W8{mVFKYffh5F7@af+HNjNkR8 z6?}bY_Mi+rC5~Z0RTMe;u>{R)6X_fqGa8gD#k!i@@Y<(j#IxaeeCl&pyW%!=(dfos zCW`&w?Q^z9oq~TEAlMjY&qVQ+#Y?>ze7S*|u&?zGV9RU~c@=_@r$&bZ(0*w3T z`W_alhSP0{)^wYZ5od?*TzdA;2{spg#+bZjaGS*||H<7CbOcty2S;zn7TgOC&K*Q( zxCK^cwlXG9IMK|rA<|C<@OR~eT%CS8eg!WgGA2S!rDWh~9!nB=!x92>`KU&{4Nb}$ zf}>)g@NmU1crf3EDj#s7A`2eDg;YOUwX6{W6${Z)>N+aD;^NfnL-4xL2_+LXpeB5SSgT=|uaJjEB%vXL7KXm=@4E&=f z)-nErojIz0OF^E7GDzk*hhFnXz^O8hcx1@14AdvQ_MaO)RV)qSN@{4Zlig3})M0CA z1fAfmAl3QVr1;qeBI6$mnI==wPvbG@rk=$5Nj~ImN`a_zMV!-GE8$&{Fu5+^gG)OZv7=tO9Ba_EuBUyQ>^ZLj*TVT6 z9uVCVO}I|;VE;lpPWbM5=xNbJ?=`Q2Kfm+Qwy*`y(^iOzn2FQ$>oI!@Z_?7jCYZ6Z z5a!75z;b6nqM>mDtb5q^=Svmt7cIrF^HeymGtS`dXS2xNm@vA!Tvtx!mK*f5zRLu? zw;-Ujm~(1RCYbLyhX*#K;Ne1cx25LbyCnm9O{{UTMhf`UjFFe0tI=u~+sW{YL(AoV z+y%3qfM z#5})uJj}sht0xu<6qCZwX;8V%lqg$CgAYfZYGn;m?8)cO8EMA)(J}g{DHX1E3xZW+ zJj-nThpU96X}kDs&aU%fD59iDScoDEWiu9_|1w=EV`^&4iVoma!{{CM4Z7 zB$vMCu}=2moEa7`@l;nY*=qI<&Pk19mAf=fd9VsAtz2PC!aljF$7!M>^AJAy+Y2un z3X%7$HO{TP&N-)d2V4S*8C%N+!lb{_G|4ScUSS8{cksf9;}Cj%x{3j3y-Ao<9Zch? zhuWBQoR`X*#MfFzYc4;5dyWGnQ{NB#k7vSxRd$>?PPa(O=S$#`-UcHTjyN2V4JG1i z28}JFr?`h8%GC+(&9#IgiAH#=&-^cZA;|xz5zPC8@Mih|70kMY{UeEV{?+w(ASw|4 zYq?J6huBgJsXw&p?P646yqBH08BZ)Wfs|LZ5MsxHDGy>HMQsADq^sEE<;S?B|EXPfpcbQw|gKK*+eB~leY)=8qb&JLXuQ5#eDMr7kWYRx}Z;%7*@8bD!yJfi?}mlHW3kIvpLU@j;`+BRMYodizcbjG z`3r`$-oWW28*%=S5=ja>NvzGs@!9nv5FRQ*&66YCk0DFA)-U2f+PI5b5MhZcEBO<@ zcYg!169u3>&j#q^6y$hy5=UGKtLqNK5<@QIX}6*Bp`B>n{ZejU+%P=c{}OpTuEOFC ze`u&|J=3_F;M2{ctT)30?N<4Lf@L9Gs`jHkN^x-JRx9=!H?z<3D`?jUfUmhJkYn2u zyr$29G35#55$VPMrX_;yewKasd>Bmk8IZ@1d`XOuA??pI;tnr+4?m4xk~u0d;QX%z z{0qa;G-Ls|u1v+}!y6qZxD)N zzT*5BHV}8+3s*mDMv<6VD7ds0zMtgB_YxC8d}l+% z$S+>`P3?iGxm*wq=qI53{5@3hvNzon;!J+-<%jkjUTFE*fS3PDWAO_aG9=<47t2^t z{E{QEUb_h%)HQ>jy)=gKvaH^@H&pDOI+1)~L;hXr!UcA7u(GlX8X|9##C5Mw`j$S9 z{#uK@Ka=4Xi3Q1Zde|&CLQ`&zVaW4x2xNZv2Tv~%IwS>6zU|P>vKn{IUFqTck0?9w zi3IA{VpXs{{LLvK@o`ZQv?m7Jw}t^{T{Q|=J;I9GD%ShwhYe$uaD6?S$!zkVeO(W{ zO1g_~^ymFv2=mU-KU}AKng?*WI~RX1(Ym|!ja=FU$NT_ zccvO+hKCZ)cKeBXnUBFHs1kl<{l=J_K^lFp8nfoJ9z^Xfv}kd`>mDCr-7a&+wsFS4 zhCg6^!%ljaJzth_Rj_AkDn>5xX8c$eI!v00XIdy#RlWg#1?9=4pe2@x@g#MPJi#8- zCZe^OgIimULRQT%9PiGXy^-3mkhyB$uNxnc@k8w5PI%GF=`dD zj&!vg`ghhHIAF@=InxSwRRw50n^i#sN#IVmYTU$^2sOStNsEag7O~kbVl6vkMM?k# z1Gpz=zs3_Gn_=KyH+>VUfmMCR^zNr!ATZzs$>D;WdsECYE2EnDADzSv7DYy~dd`rXO?{DD!TArk-9sYF5 z5ie4&HJdD7;f(#~*P(&d0Q?x?M`G^>Lzin{j@=&+;#|Sg1&iQL@->|PvJf`t?uS_m zx6-ny^I^da8*trs2K=5*!qwpgP z9Wp_-p6kb>@0?+IOES8h8-`c35^wQ6!H9xsV7+;Wxa*jr)6P7&xyT)k=)b3y`wj4c zSSu*@_0mFiuewJc(X(A!SrcDda_7?ukC!<4+!}H&WZ{x5b#OE=M-``FDBRJ`EjlNS<)v-V(;~_lsw1WwE@|dH8oKfyBPn zfTzX)Q@GUwZYs-7su)Ut6*gF6|Rdrz7z8EsSwBg-X4(#p;#2XWN&?Na1 zl%X6;F0pf4^AUF%+m!~*i-X?P1B{JU0~eJu(aoz7CF1MwKM7HC@njG#xLOR}yeXia z)c~W!3jh1t2X0cf7|$RCUF!`$!b`BV(~Vk{xOTT5=F6;uYs?Sp?9aHld$zOl zwTVo?9KqNGcEoK}Jai;}z-bzWz}NboE@E6F=TF~| z%t~Nges(OEwjTZUj6qe!kEDih=%C3m+nNZm-LhvT zMR0*o4E^{mA1^qMli2WP+_l;l0;O_*E^Wfy3+ynXvl;g_dccm5RuI&900K*6(ATq! zr11N|Z21=~E3geE*qjBNy)@~-9NhR=8(tpU0CIDCVRv-`oqARQ2QImwncrXbT5CGe zTnc`Q`XsBcmb;vH5_ZY4t|yBlD7V8Ln^-Q*JvbJ_8IY^# zjy)znKwxbyyn11bTYd9z{K*iw6^69zba0RNc&7dx%09WR&g`1gi^za!~^hu?dc>g%AF=VsCzkDV^c~aT? zO)UKV9n~w{K_)&3MtXjLa9R_jw!ej^zJ0i2=`XO@xD{=+s`2(f23iEzLa=HuRxfM8 zQ@3q6I?q>Q;nN*B(>j^dF7*bsa)r$Le2h{#i5D0Wc;2CtRK1Q1epPZztMbRujf2!v zsR6c$b)bO)Kkk}mk45pfVNhxmeH*zjCGyJ`JYDbNFGd&|26Ua12hF_~QDSxv1DUiKQD^KT?7tWX8mkKA)fXUTp<=bbTdx zY~YW+!K>)MCkJW3Y<2waWGC6o^v8ZjKkk|v%tL6w4=$fmU~PIMb#OWWvPULyw@oSN ztPKJlXMUpl<~zKw9fYt`Vkm#S9sYKS;*sif*uLExq7QxrkLu<0ggB490j~<4zAcD> z^I4wYo*-P4>cIsMKeBtPI=p9h2Jw3ar0TIVklW|s{q4tCc+w5-Hr3EI7RQ-CLlUFz z9iVtH$a8jj^UlV#TCV8V8t`?}u}S<;W$%ig1Jn;6ct<;9P2=Q$IxY@Z<|oH!8? zjn7{FgutxVFue8%PO@AoeNzo?yXL`mk6^g4ydQ?EuYlzh3*?cFfcaLxtg=Tv|e#hu(l!s|3d*AR9HLzQY#%V|Z=17X%GOld}(a zaKPjnzTL_^-;FFg*3|;Bnrn#m<{DrJ6WyZiK&NeY#E4HnaY@`|XgSQAl<6A@MP=VH z^!q7Ne0DZ*xsVJ~hO+VCM;q>0t#=r0WrU`ZXE12j2wj^#44$xwyAi#q=<83Q;5ZvT z?CgS8r-NkG_E$u>Et+LSj?y2uw_xY7LWu7=4mYj8fLKBlRKNIy|1lm^fQuL|y7U5k zrG2=UZrjrw!*1%#mkm!09)t0*26kUb2B}{m@FvF-55Ew?+uo+|YUCDP?ChZeVt??# zSv~MncR;BV(U_%t4JUX!>DT9vXx$L&3rsF2yL0DppUcO9OcGDh&xv*ZHKV!DzKFOZjB5-;e5F|1wI1f{y6CJ zIFp#I*pFsI|B-!+!`Ae?6gKSfgl)ns*Pu5YWo!g7LCg&PehtRx^&@!3;y#A{+XtaL zj=-`=UruoUT68hEjM-XS(J;aR_g!z{Ub^9rZ}j>(Bc^-t{qiIb(pU$7d=G+YLOv!1 z^TA0qYcPMj1iSse!@Hy^?xcVsc^t@-^!;uue25PK(&&oU1r2fj_Fts*XdCSBb|p*C zC*z!Jy|gKgvHMtWN85%S$P?CyLb78pB+&qN0hv&k(T?*r7tvL#vtjWuC)k*M4tFT= zV);x@c(u;~_7(?#Z1^C`Oa4K=rHorq8jt0=)+G4Z5{yWBfz9z>akaNUO!c~j_mqs# zs34sl^vZ|P_gv`lii8iIDJaXo0hVZHqHf1$qNTuCS@Yb9hvh3$v_20!ZnwhU_OEzH zSBO4$7$YZI6hKg2hVe#MFb!}M+*EIZ+i?N(xkbL|_+e5QoDC7aw>5Jec^jc{Ng@pMpJdP7Bn^`|2)s7Nbok$6oO6-o zZ{_N6uqF^C>Pn#9Cj&JZi#JD)f@cy!TBZwi_bG!LXKcXMo{wA|2*IQ|ju7{h`Jg*K zLPyjf&h8L`uhbmBI|Sqei<&l zzY@Z>w!_1F9=L(Tv7FNi&@c`_^G0vtx+@bcH1dd>SsqlX??cC+a>#jpovysA3<+x< zg7(7+2x2|8&TG%3_MOde=*2wzWt9qtrRQ+MZV5v`%4WNFj9)@-$lO;== z=_>z1xUeQ(PT(jHG52-inpg z_7xDmV{0H`W&}L>;01cyT;SNf5779d5sn!e5L4erAo#q_JZsl{RYz$mGE7$kRR z!7>j+Ay^41{LAR;=Q;kyxo6_Wq1x^L5?GO2=~H${p~0mqYoe89Zatk z0%ctvvT->({}dW<+iG!WGv7eRVN?=|R6@8mwpy+9m) z`=Ex{YuJ!!4)PJtz{uSNYZ}5}prn-u_n08mjWI9fR8Ga`AUN7)k4Ke0qTG$eFtp(f zO|@JF%iH7Os#+q;qR)o)`L?im_Ds~cAA|ow*Cw{J4zar?g7EUZC(&3~2%}rSg4nF3 z_#w%k>Yp%yRacxKpKCyen%{wI`y=FT55r4S?8&Z+Z(swz4t5TtaKaa~LC&xbDCu~k zqmvu4dghFwkMhBXzXZDV0#xmAXJn9u3rd$O}OyxtUt|nHibd$ zc({D$D?C-)M$0|xal8LjR9m_YuRdcQT+@HBS;LUV1dQX#xrKP(y$N_T|7Gy|yKv;c z!zA^lFRJ;f!p)~0+*TKRxW9D}iRL(HM;URxx-Ot=1n+?G)4fm{wVo=i>xBi=O7LLE zO^m$mh!?J(MFTTsj5yVgSy3*u;no*0kPW~Cw^)?VwUvx6vO-#E3hidfFs1e*m6+{< zX{$>xa`FI1#~I?$dPBTbYykBG51AjtoW_C{CLK*d@#$wUprR39zMjBmivnmQoKo7(nk2Aq?-IEm_~li4aDz!S3vu_ zBuqb*3HGzu4py`r-k0$vjc#TA>r-o~%j6Ug+~2(Wwq+H*UpNYWmhHIe;on66FHI=w z-VL)PI&twiQTUu|NYwQwagX>mNKi3{U03K@@hS|xm?Re8FG8tH zCcQ5v%X!mpjiW-h!7{B)E=9Hl_w)dk__l&_<}5P!Llhf7v%K4iNDk-9Fq}59grAG; zSZCD>)QmyKlRpS!ku}(I>Ms`A?8gRBLj8NXlsr#D4c3h{T-HiIFD}FCN=eGSSB#?u z;`p)c8m4w9!?vS_c(e8jQ7V%HJ&k-)I5P(9-XDeG15I*LIRco!`Xs47Y76?^6S(c; z9=fw61G3zgp!K(>aNn>If0cS+;fpb*Bi7RS2P05p_Y?3s^N%JQr&H%4)|qM1j4uT? za5@#!$lNXUaJ!QY!)s2i1 z)WWpdlXf67uand>Esq+Xr*}r&(8Yfe1Y{W>^xz_F-q`|I=hbp^G}ogdpEe#S-hf&q zy12!u0`|Uojo(r!{C;+qM46eO!oyPZD)HuAZQq2JRc~RfKrp1Qwa3Ff&2ZkmATePw z4<3H7hfOPQqkvB=_qu5&mMsTZJWq&o|Lzo2ABZK*jz3`cc1twRtHA&T9~>NF-;amo z*xdgGZ1&2+##{4Y-=4X2Fwu*Cdt?t4GwtwW$Qsmi4S=mp9dtp_Pv9*13I%-D;C7Z* zUe)s-E^N$)U5|!AsLl(c|9yie+V64nX&T-iUX0uBZh`7=Q_+5H60Th2M2gV{_9!H? zEOR?{BzZ&eDNFd_+{(S7*$C2ge5C362(~-;;RBZ46A?^-nIi)jlVOEP9AnNp<%gVB zH8Yx4yb~3wc?fTK4s}R~fC~@Gu%xXU@t!}fY_fv0l@{;g9f|&enJNh}E zg*S4+(COZdGle%$N$btnbD*6ox3>XoBver;O#yh_`ti(rHoO1p#bcT+;2TkZh0c@M zq&yF_6rO?LTq#I=evb8u9*4Kj3t_xS1B|w3aGIkgIA@xcz^|HpoHM&^>Aeal`L$Sa~OR+0=Z9Rk=Vau5SUY&9_1R=u(iL z7(vP0|DU62U^!O^p0j%A9xF5haHH+Zx1q=tpX;rEb;!hD{VR1 zNM$}n!rlu3Y4tr@wnFtrU+ol_X=$(FV|e1mJJ&JyXC5vnl7 zAGUq@2W89G(6tBGV`;=4&@0p+tLHY;??$IV@@F~Zs{}!bXB!Or&VUbQZk*{ucHlR< z9i3l>;=s`g%*+l%nbJ)9sOUP}X4+&-0ix>zZ}8#el|Q2Cb{$iF`#_|s3~%H&Q`M9<>{svt z38`D4>&9k_3oTHq&j%+mgg~fg4&b?!aKomUim!;q4}VG_@IeO68chK*d<{C%Js^v9 z6iH{6;9>0V1bIdSzX z_|&Jv$8VY7R?j%dTf-P@aou?d=FcEL(nZS|KdeOGKGl zso?NBjBGWDpbV{y%T@1EZu&mh@%#|L$rUis9*eu~z6JHy|2Q#zV_ch0#kld<4tiVB zo@E^3KuGCIVyMz_&Rtcnc;lok_}MH2W_V=4SfCMviIr1<{kOrUyozasg^;Ugg{P!! z;c=}ZSNv!{yhs^`yK#}2^!q284L>Az&K99@%U4J-%Hp~=%!U_IU6A~soz!}D;SApq zXzeei#OM~PtqtUcoQ~k+XA9teFPIOm_aMGANW}4HEjVqlH&NqjrY;9wLa|m39zV?P z!b<`%#LNsA*V^G+&0^q}^@c+aQ}Mxh4{j*amtA_s(Zb>-9o@T!R5o?u$FB6m=x3A& zx4-15ysyUkeazqR^apB6_|mQbFLfR zD9+YSgLegnc<#v&SamgssvqgWU&Zy9!C9X zL;6d&W1@#S$~O*!I^SaA^Jok|)UYo4rYM%3`34Dm>GWZ05q@}D1p#klp?Q%9eVrFi zMb6n`ZHO)7Oh?0L&NfcqZ56WFtO4^}rcphKNn9Fk3ImCw&}c74yxAT!afuB*wre9u zK0SyM23v5BVmJtlzX9j_8EBp82)6rFLCxNW{#b1UFCD_+@mEz?dZHMGw@k7u&o9ti z*p9DV-eLLdSI{_L1L*vMZQu#ZI(T0; z$2SsQ@O`rn%YT;P2?Z|Owdmk&wLMsL_bYrZcmM~!jl;uiSGcujA6Iq!0C$z23G*b& z!&!%L?07VZ>GxPZYDqB>pQcIvO7cM(?<5J>Ux;R#jj*lZ8qN6Z!@Af`!TrHG7(M?K zzEawTVJm%c@Qx_l5x+z(6&+;Wl8p#+`=D3nG%YITlK1~+5Is9FUNFBJ!==hcKFAp2C{@`C2FLwz7=bgjwG*uMV zp%9XK7~;moX@-sp?30>Cn+n^wvrq2Rm24hIc4s11Ixv1zqeE(uf(vE|X#F4uk zJKym@@tP%cXRSFLqeq~GZioQC z4XNTGWG{(U%@Erx?v zlHj%iPi-_5ndGR2vo`Jj=w=WmA@#TDRK83z-dtV5w84_-Oj zGFB~P2G(Arvs{chzX#`|@6Mm7Qpft*0$3Kd%89$O#tN(KN=fa6JO~S(Bqs5fVfPVIXOl)-19-bOO4oLq)a{Q#-UyCCdf;Dg=Afxq{xUyjC#}ofAb$>*1UK4=w(t{4$f-s}wGxCVFP{9|8oF9IIH0R@K;ByMZ;QNxeCbj}~cP@fY zg7Xzy&W}Q-xA(uv(SL*rA`{YF`oReU>9%)--YrZ*Am0`e%R=*Zj!L zXWOC6Jqcub|6xSt7d)={0)C`BV5Jgcvn|Ph%NhE3)7cPqyDGq?J%2GoqzTJhKH^im z6teo*PwLN;0#5rH$#0o67$u%T-8UrBqtm-^saYdAbKnA2U(SMQZ@Loo_jUp|(}M^) z4T6VBE{SFSGe4ojbaqP(t`rys@ue5(Ue5s;b9iwXm*z3`37TCq?52!S%d5TI+{$9Qi76 zK~p52>-+$77KK|KdG5JtLGpcMgO5vSuK) zpcYN8nqgdf4UAi#WWJ*uddOre)J+~H+j3I>-|bJ7TYY73j%Q)6!8p2a`iLzjeZWp+ zE_~prgJ*F{@IY7xnm!8QOP+ZAD?5Ob5{00cos6aL`%p~4fN<_KLK9uKeg zJV$}kw_xn@TfFsa6M722N8zGk$QQqWx!HOUAR`ISU#GIZC~r*VKp(n1*+Wt9Rh? z6cgf=KjsxhCHCjwv%8-`H|jR(&$Fk!QMs_iI|0|8K!WQw(f@3lAsJMO-NMzN;*m%z|3;`z`54jXqur$s+_hq=ManNP66s}!Gk2g0~=3D{b1 zLHY1-dZzLSjeGJ5AGvFzhtwo_WhVm{BCfz(z6~_^XD^-&c!MT0TgaAIxo}8ajx%S( z30gNypnJdoEG(ITzU7QB)fE47wB_mF$^U1pxe(+Zg5B@0(~HJMm^|IS;*@^m3! zsG~>bbcle`RZ}W+O%W|8y5Y~_TA1DO0(dODV8cZZ_*6Q{y}V)+9e*%in1vCH^Zvpc zt=-UA%w~DB5ESZtPmC^#!?G`fOt+lC`khAfUzIZ&`vlVi-VKcPdlr+d25{bY8#W(@ z;iCa-5HtwK$~CjdGFulEcdNy?)-SN)d^pZL{DImCo8!vK5Hy_#=FGYzi^uApW6@u4 zsLi@9xAxpeI4ZgbRQ*IDqNfxU-X_y0J3le+hBvGlNC447H{vS)7aUgQp>Rv&5{kzLYPxG%;e21o^zh39eKgBq|L!AyBL!0h?uCa~>8)V)gAk==Sd)oe6C2bsfR_9qV@UWPHP=IXmQf zJ2&IHj2LX1>kB{SbfK=>4-ej~f!B3fU@t9;&8q{DU%?%pC|9HU#rybr+YvZCjPPut z8fTbB5{dt@_U6%4egFTsAt^#MDitYZzQdW%*_0+UsU#&DG-=SJNh-%~A?pk}@b=|wxIi7p(v-f^HAJ505{s#zs z_7T+D$q)kq|%>t(8H{gSJ-Spxu z5{Zlb7Z~#=R${ZNh#ok$7Hrh|%AE`9AYs=F6EqtDo*@U;9(86%PObolA5=hI_7>1! z{~0{0f5C3~T?UrjP6tzayCvu*E{7?218%wwKn2?nCP^+Cc-y6bNH25f)U*X!B_08a zf7b#B#}Tk;mLYX%+f7Mp&o@w!DhBxk$4x!=2MQifh8m~FfxF7-z}UJ1Ze3giKXgom zAw{R*=8Y{tbkdJrf1wh5^dEz+>|dC$gX3rI42C0H3P4MKoFwYc1tPBP#XG@jN9XADL9?^qo?>B?+bPGV2q=1~y3!q=; z9Js5t0^ByKgmW(0fO!*IxH?!qn=|(+xW6x#xrjdo8F&|?KJE{6@Vp2dxOxhiv;+=c z;cDhgI>5@XwE!vbg!6rh^jQzW{gju;Lg)upyttD z#?;9fEbHwB+iVvAt<3d|nyw2wac&;KxO}AitWcn%gu;}8Xx55d1|%cCFf{B3)Ybb9 z#qHBz%TO|)?mmU(0S8z|jj8Yt9R)cD1Wsq9mhm$#VccRkJ?`NjtmM=q@Z$AdW~Z$M zjO092KCF8P)*j%rRAO8~tX3Pyuh9qN20P$q({wg4q6DhuKY_~{df@qP5gk7q4MCqA zoHpVDY{rKHZ*m%}3yA<9GTR_+y#WL_c*4M+6G7y)rw}!fHi?jyGWobT7#!+%18K`| zf$X+cZV#&q?VcY1-6g|t{|N?8Ja}3HOkJSE!FZr+UImI&EkTIs4%lNn367a8hqoQ~ zup4IOfT=SBff#0iWGgXLBU!kr$BlJq>4e20X5ib2QTFGxzpS}aHN5&f1=7cQ*b>bR zAR^@}vw`w1E(US)>t zWTA4+2JrBq2q5S5z?TbeAnl03FY~!)R)0BM)3k;?@XQy?SjO>hUzD>E9YbtfLOVVA ztpn`d{u8J@eGS?YcT4=slAyVi7yH2Y0zB5#4Ob76l4FdM#0GfMA6m=Uw@);IlJjI{ z1*!nnYJ3Otay>xPmiyqccQlBtiU&5Esz6TWS>V^CM2{%fGsRk+U`5M#c6jh5FhAZ5 zl0Q!Z&&SSzKF6;>D&;jm60Jevt#k&cfvtYlETJ_qEl=kjrJ2&^i(z#IvG0p3it<+%6Lp|bX4U^`p`=OncQ zOHmH|ICPIaH$?`1_Fo4sEjy=fcf zI>0ww;`|?9GnZ~|0(-NwfNE$Pz^C1UzKf?q>))O5@r@exS^NmJbkKye%+g@#2~JZ< zMVs}y@(Nr(@&`P$r+~eftARdb4wJM!*>5K$@YX~RP-wCSB-_g~7S64pJvT zY<3;2S)k08n-4N?P3pnSm0s{lwK6;%!~nIDTp;22r4@1Y@TYeJxRgB#k0`Z4YRO!% zt^vYlg`Y9Yo9|25V}rv`?@OST)2r&XJjO_uazl-34jH4i>auhVU$3fK}EQpTW0UCZ+gA+?ufKGkRlORiv z>+dY!aiyFQx%El*%*z1%ncZ}*%Uv)mKN%`(8Pg?Jk?@<1Iom#qVgeo(!q1go!1Qm{ z@W~Df>Swzd7{}aUOb^O%nD;@r^z&=*`SvT;{(S%(jkybF{|ToH+P1(0og79`oyv~i zdI%`YJq|;^b;04ooc7qu>!332FdRL525da30+;D+13xz;!m%(;rwyG9A1vhbB3q-N z$pIy}csP=G5_Ty5k4m}2w?R`kcPZk8ob`{7(*{tu@} zE`lCZ(qQ;)@0f$9(%>xF3})%gRA$BVQm7n1jfspI21-po!29iYq34oiV9q#IW~u3I zrZm?PYV7{N9x^XtPR?imme1C+^(#5A!WeaC#JC4U{d@xaw%5WUDg^$xq0Jn8x)yYV z%5nA7fp7+O4#X30IR7A7STXSfl-uxtS$NC~miiTd+*6!4GwpRTaHuj?0}K>;;E|Dq;AXc5EI8K(@>edWCv!TO!|4lP zeOok}GvO!w$2$hZb;^VE9uv^Atqk-OF9Xb+Qs7|3={Q`^2U!7PDBH++A(=Q)Gaq-d z<+{$Wj^i*)K8L|g+jpH4AToGv>cQU;SmKVf$6M&-M{a)>s6$=C6gnUiE^Q@ABZL#*ANrQlB)K-M1dBwC`k#R^J0DZ(f62 zXWPMcK_pxv-wppv%Z8KvInUYhhj9G0L1-S|0D9idh2p;>FmYQad|BuT_|?^*^6*69 z?w zZJ5d^w@8~rUe#igFBSnU|Hq6<=X$WlEC2{E$3m`RoSp6d5LOcYAp0+bOE`>w+ut_k zr*kE^v3d~phIfI`TRY%wp%k=ToB{6{ce4G63o|LV6BO#MgC`Egv(6?Vbo9*@u>biJ z==P)urY^e5NZqan2cs9jmKS{ZWW!5FC6dD}FBWnh3g2l}>MnR2uE0J@jOTn9^4OoU z-=X7Q&M%?EgZ2A00m$V$f)cGCpms@%IBxhPoNDL?^Pcug{tWp5I?I~VD$RkJwskP^ z*k?H0oC%u0+kuVa0)h6@Wvu!q1*YrKI5up?Rlr6rpj-~O!>#+?gO8K1f?e7N=+O=9 zn7zd@@Qd^#$*Ok;8EyAykQN#S()K)N&7Llh>{c~k72i(*AwT3{)4Mwm9ZjNrpaE#+$Da}ZM7aI4o%}5AXGv_Sq z@?qfgI&)_J4Nm_pSrw@6tOifKa{!(4fjwwD9>(R9KooESZj~GbBNoyoVYR22)A6^# z@=rhDi=<9y!PUJfulp^@Um4GgYF4uS2etv5tZ?|gB?MZFPk@6JC)rz~Q}8it2hM+z zfSIQpc>nN^xISqNB)7=Ifd&N-wjKiuLT@v@!#Uu2Uk#U8vWL%490bnsAwcfbH{d_k z0^ctlq67P8!h^~I?6jLcwEOuMIOCB$D?jp;ossl}g38<2#X7fOL470~^Iiqa@Bad> zP8Xr4&3VvN`I?RV?g8pvePRx%y@f}U+Mw!&YUZBAUXt;*j^n&nvP!8xz|V9uk&N0P z*rwUW3aeu|UiEh7`VR>kIh@0)RzGB$t2Tf~+2x?}Du-j7StY5;{R{47^}+I8BOs?I z65??s0Iz9f;)aT7)3GrixbDQ*U#DS7!y~Z8(v{gvjPRH*j(A$3x7VhC{q?g`-N3O(z_BAf-g@<8acqiup z6I}yW?|A|zoxKEpR9}PZ+P{I0;}M{1BnB8qEQ8LE;u#@z2G(QBptfHMR%#Riv6drz zTHXKxs(!+v1#O&ns0q9u^crN}_`@bC`fNtxcTo?^yMdbAEm8SR25m?a1;GGjNdD21>0{ssqC%fRjFJ>cf{%|J2K z7fucc0L;x~=ookj%y|$B(!vix_qI=x>Q_k&*ZgXW%QkV?A4CvDe(u|^MkzQ&7D#s_HkxOR}5?g^$UXTY)mDU(|py1@DO zQ(*F)??AGv4y2f5z%5B$@JrMU@W^F?cr-Sa^Q<<4NclT9rK29s|OE1BL zbuBCvxfC=d=Yy%k9M;vtfZV;`S+%fuTBAG{7Vbmfb(d!>F?$H!ah(h$vpRtj=P8lE zVK1g%8la1t0_-i=2I8tF!V9L?An~~!YP!sWF~{WK$>?a{7~;b^$!Nm6&wc?B#}6{S zHv`5+w8MaF+OS(c6W9cG-!63jO*fa@Ha;7UVXc-;DfM0I@)^NQnarP*+6xpxRNc+?bL z8Yo~!{keUTua{&c^A)^GxeZ6pWwOYnt6)$+77QGG55iUkLCtr~uyo={`1rCnebgq8 zgu-6$$F4!pVXr()i>T)CHyzOOdoOF}&4;_< z+(Cf$eJ1(BQOJM243?fwg%xM|xHD-a(4QXw3eMUBlaWq#EHWF+J-T1K=tBt@5i7yW z-><+*%O2>#bO0|dm+FvN$l*pIAn&awJ38||Y?zk=4;*<1L+rA^;`!0w`MsZPT1FeZ zS02u=rp>@mvJm#1ErVLwOW>=Z$M7lVDb--Jh4bahg^8>_IH>mu>deuE@AqI(rAorq zyL0=rP(Aj{Wfso5mjf!+Isw9PC;M3`Moj+nhN+GX@P^Gsph?XKFtd>Ht-1$(eh!60 z<)$FS#R1;pYAt_LILKHR2Bl?&C1p3a!bLUR>|m)gbI|%VSX3|`EF;%Yf2Q1p$B=&L z;q{1FZ_)+B6HB4XTF!f|>?7>M%b7dD5scN8DmZxkE~qBz*?kVNj6FYqZT1d?J`2{e z%-|$=*)qrgIVdVCVX+6sG0S5Zr7N4iCvD z((A4rXM?Y_!9oSj6EL0RYT!Aoe;)`Hjpo43TW7FAYQyw|7u!LZTsElAmt1@GzuwTI53%>(Xcrx9tb^$6iBNWd?mgH}iagl*yK#GR$t-7huj%0=%kJ0hj1K@sSBzyJ_i)~8_gmRzIqtA6-3KUVghAK%YPh&*J($W*0n2Af zv2K+WU{_)S@F|@KbB{HHdW&z&ujs4LR`dtr@^=9AF^4BKy5QK)8ZcS;FN{-O4w;C{ zpv%D)Jlh)#B%;UQ%>74_El00`d-X1CnwOXzJDko=lAHvy+?t_Nn>IN5vJ@&t*MWN< z{K08APHivh4t2man=x1z3;Z`~K_ONKDSj$A<98cA80iFyt_HJDs$0ZDkr&wD{z1pW#2Q}ZC@F{o?|1vDgYy{_GOLXBDQ#T3roJu1#Mg&zHGh=r|-?>@Ae;tEw@`a z&9dKMU8@#!!1BSg1B1Z*i4K(iynw~1jbJwt!k)=jW+zqpf(voiz$W=R81?)FD2ksA zkKeC>C)MOZyk`c{%~Yp%X7J^Do%%b0jJ?D=CsLD)7a(9K9D}Ae!x3>i{R#Y zs%-1D=im>pqNPp6V0H{2YVIBZ(>6qa^2|8+C^(i8>t_J!C#P|oFbH<$_Ht{P8B=qf z)16A+1{=6J+Uk}8&^P1QS<5DhINN#M0Izcud+Ikx6B>c#{Q`pI91`a~Uh)>|r?`KfYZiMNvi)AUFm*9D~ zHCT}m$J~g!0Nx7=fqvjsV5r*$9IC>=ia>MNsB|ApcpF0phVdCn`Y9W-;SYD_`UGE2 z7K6R|Bk)~#9bD$Z=`60F!_IqS1@+ZeGTZOv&~5sc;mP#hKvS{;Zg1QOtGc%^`o}{d zc{3gC3Qq;cWoEnZcuN)wCR^i=lh|A<%dE2+Rv3z|5}# z_C?Sucw*}*u-K>r=u5}Ia7zym*kaAhU*!e+26LIJ&T6pcxFHDIdl95WNZl> zX_M4PbGUU~!d>SLcrl|KbSwUb$lo65+>*+E-X0FCYACo+ZUT7oWhZ;{R6bk~mJE_y zt6**280UHG&Hj!xhSSf*!LqwgC7mCj_$Xl9xw8-oK9V{on} z1LzS+u=$)RpmO`b!pm=fec%e9*Qx^3W z=IL~KxWb0>Lp-t?YLwh&f6R@Chk3KWv@@Oz@AOx|n^*#`EKq_bAKoxN?x#2$GzqjG zxeCU&Eu_?z_d&C!HsC+Y3yxk0h68g0DE%qlVfDOTdWBvO2+>^)&Hfp`_W6=p;YH@f z?@2J_{%W|1)2F)rj@ws;_ro+D&J!&1JVY`Fpw{GB>>tB^_|8lLq#h1n9d5{iqSXTE zZaIe;w3A3YE^ESdjb3nDVmPa~ITi*-oTNjdxH^dbjjZ6pOBhg{41P0t91ao%b_+kV zonxa;S;y6P*l@fBg%1?imP|9JdC}jm`96;t(^(c?1SHl*8Fm zOX#iIrjnZ!WbBr3eOEM}ZMf17{;Yigjx?2niCLe)hcHk2_d8|S^Ct_0?JolvnM2^} zBM%POw}moSUxC-DobG1tF?f1|Ep$+sz~oz$FazXM@VmYM9zC*z32-Wc>pQc6kjv8s zoqPzQZ@&Ud{?q~Il6%l}{~q{C?!9esT&aaNED?J_nTX)>0w=dcROJ2t@)@c)2s~Pg(mI=oT<1`*yG<%sA ztpQN}+7|GJX8>x^8xZ)@n6U^N4;m$Q0ct#uzv zu?>apVhA^TIzs2|k@WnAxAgfrQl+kHXTjI&?VP?-B4e=c2l%>Y0kGFx03VjU1pQo% zZ^-0UV5Qg%ejjUNKRG|-vY)@eiY3w}^IB7Z)Xi0(&MlJmn~(?0IsS!v+bKzw(Il=8 zRLaD|Sp>FmIXH_k6AtI7hcfrg;Dsl5;Vq7cZM9l{LZYkNQK@EWsehW)YT0p8QWGXf z{a>%iQf5-SPBrl3sP zMtD+#F|LZxMqkd<#^W**_Vb1a*nDZ;};DHPL|6GJSN8)Uw0zVt#wjZzDXk5Jbf1)9R!X+O}!-{HdR;P0Xl1? zlnSzX3*0IBgyq!I=(i%TYn5c- z-{4Ma%yTVu>e6$O1zjp8kQ(s-pE0bdiq82N`CH6fKKWK-~x*q0z=HWc~FoyiJ>C zp_%t~<4M;wknO+BunhtgqSSdKZn&mNxJZ*mwwj>)KzIXN)vJ$%$f#knmer&4-cAwd zmi|URI%c7A;i1Ue?h0YG%5=WdQBQPY$9FX4Hi;=01)-*wyU_7j0cg~pE;QgG#Xleo zG52~|%)0C-8p6AdXm6Q=(T_Kw@8sqfj?B8wmmi5hABHaylo@oRwmPmx1 z@h{E>WQh-3f;yfc)301DslFIOz9>CRR%z-JRr2y=NvpJ|W=|b{Wot}{;mU2e`Nxr> zr*Iz;uqm-s3A@OfiU-I7LkDtZ!JLvzYCf*7;U{X8UtRJuNL6%^=q7Z}DG>hO zzZVPlTr1fyW>V~Y?hAfr-$dfq=v;FBE*;{Z{(mg=srp~_KlbnYe~^(zlnqP;p1X_$ z4_;>@vvmptCu-G%bN8`A^TEf+_o$V^k<60_Ql5pNy1l}rfu+Lf%1@CAGH>~R8dMOk zE3D9R&JzCRVh@3lG+*FgeqZpcW)v~nE9ToW0>tCVTXe01Hu^_iOF-DI5f1MA!P7gx z8yPA#5_TQy5YCpyL==RsTfcpSPqyCgQp9muai8j(|wgY*}RKpThslzhUObiT_pn@qZuxe?9+*e>wkr z!-=P2gtl(z!nU2bL|T@4({3$+^vkI?blxmjLm1u(G*BO&My{J%N5z{&(%$=~kmk!C zbAoQsxZE-a`pH-|RV6KruXNZ=bz1&H{o;R6{?^sxg}1rHc1}Oa$V7`d5n;~s-xr{# zD z(w|;!q9-;v(v)2sm9_gXImjQQ4z-=Zr_XdH5BH6yze~wb4!(Khb*DIb?D|V`>M=*- z6=o4UsV|{q+=ehh<%A)%bFYv7uP{a2R|4tdQbYK~o_%DAixHX7V@J%AX`o6bcVaD( zm&rZr7Sn7EMK26Fs_hwv<&W03Myo&iQV+w_mZ zP4xZE!Q|IJJLu!Nxq?MfGmY?g6Ox2ah`0^g5jM9N%_AzPyse=$+b}{2JydBG6maTM zUgETy|98E-XPMDDP-Lq)jVBrEl!$+8QZX_J&^WRJ0d@$!^Cyd|4j|;k~sR#K*>}q3j?^zxC)O{m5F=Iey zX;CRrnx@ZT3J!}nOHpF@?0vT@)znj z#gj_8mraJ4-X`uot)T0J3h1xDKGS;+^U-muMHrrAjn~Np&^tPJ(zdbYq`v1?8rkwx z5LbJ`$mP#5GM1P|-WoWEEL~=c1*P>+C4ysg<2yMT`>jTAksnX3SNuRm^Y2o0pgQ4o z#faR#E}3$PeMUxS%aMcI4$}F}AIOtVM#i6J9_HDatC7~j{)8L9mF%Go>RW#JQJ7xj zPVaX3iF?^;lE-I#CtiM4AzlQ{p_(@I7kS)SK?*Li)S9Cc>F4__bw0&hCN{m&GsxZa zm{|OF7A4rDJ0x7z9+;#G{=^|ru$1S`|m=vimRwr!xmXotex9Ru3i$kwZmZQ5Oa&X;AQ^SM<*})s*qplD;8Slhp`Q^+dFdO_>PHfZ zTth$`3xn|!QmV9@!VOBzF&qukQKc5hj>lFgo}pgmN0W)3>V*CU5nZ9zMz0xqN-Mc3 zp&q-Fuphk%c;=d$^uDMa^w*pWvU{H;tzJ+jm_1{Kk@%Z7IT~_>xc$r*d2?_sCZ}0J zx!B*JSJr)@Y=0`zV_JNQ&~_&0J10|dnz6_{^Y!FMQy1!(Xf?TE$2xNAyj8TCsDoV6 zsBR3i*71yFY{UrXV?a>)jIO{`i(@Eks34M&Qyj=lQQu`J;*oBNKE(jAil#+ znt94ut9!4TP7OOI(FIF$1ykjg8!dH7)L_e@!OXMF)4rQ=9C2)|H-;1zk~U^QtsGnFR0l&C738^$e$oznNP> zKXH4alM^E#^?K3_(8;^Wlw?&pQnsAFadZXgad{#WvFsr(LW5|VR0X=f)e}#jev6tn z9>>lsoI$_5+)GY3e@Fz$kc{PyRZO$X_Xjbka2@H>__(Hc3`uF5?~O#rGANz2iLS!qF(Y&uxgBnqo%j+b^Kg zMW1k|&{T5Ca&`Kxwh^_h`ZJk0>m~ic7@?LtJYxK%DwbC;qk^2TT|(?tI6`%wPtgBx zYY7gkM(JyITZn}_a>+=?>*T!4Tk*xo!-VqGD@8fq70JiP6R2!9gtB-RuerS`p1^)= zH251iL|oieLyBBPRNOaDV)fu%Dw{rs*W~(BSg#7DwdWF^6Q)RsW2R$OQZq#gr(O}} z_l^*)?h5qx;T5#2@l5)Y{a0klk4#LU@dm5bo=Q))Go_NGHy3+YXHnOSn*>K?*BfoB z@+VYWXA&m+_(<}G-)NZeA?o({PgH{;LEZLRNfqpoB|BmslNpWOq|YoVVs`0j63YuA z^8#(jQ_ohC8m7Ofu*y(!nZ_MsY2N}~(T_M>xg-iTDGn{!qO(YU|NG-behXr$gTW6l zwp&1cpZ%Cf4NV}-r4G@uJJ;bCqx#65(_-n(6LM+ib6a#;4}B*r)E67Xow6Z^?tQ0J z&sEWvt{9R-+vJ$lU4FR8+@CHP9Hc_%75I#gft2y@MR!(Uf6YJRmmTLYzxMahJ1-himQ(N3UDF;5TqheC#azrJLp(-_N8>_} zD~B#%LsvG_`dGyER2jV)1v3d%2Afvdr0l~ zkLb&Bij=f>hH>BhPTn({5^})j8gY8R9~J#NLjT1Rd0fM$ovsygF4^RD@{p?^$r{FD zN{?8|{i7IjIPi>obzhHu?<1gd6nm3RKFg7&kFFRjJ>*Dgm-yqGThNzL7^M<%fYh93Lzv{G(jB0jR$B6s-dL!Bn$OR| z>cqbIvz{D!8@Y{6dJs*1dSpZYe&!)Koor!ry3UHU{hm!6&bW&xbOskOR!vl;Y$n}c z*F@QFmZu-Z*i+}n&8J@PDy2rPyzs_BhV1-$lA3VEkMujtl7WtP^p_)w)Z3WT#<)xz zPk-qtvPxYdR_Gbo_7z_4!Z&Pvfe*k&!V(ZeVY z6RSq5d&U}E4r0iXzY$dT#8q_tOx!*F`v5izdxT7%wfkxR9gS>XN6PLziK5Sn7P zjyknck=(i>g!p~=DZQ-uBOUxwjX6}VjegCl!pi+}@fN#0+NOLjZDAuMZFZibJucJ; zvfLIJ{W|PLUjF`pIDw@jl^QD8a=Mk8WLiLX`+uW8oY1GWf1jdqdbKF+%x+5e9E-Q! za3+%m!>Df6Ow#T~BPs9YM^De2L`^)JXFTfumiJ6{2{{(wPvlJNB)2vA=%;v3#M~~Q zqt8CvjZa(SL+3m2Xx8vV;;8)f<+|n?a#$%$i4>DoqB>-F5^#rn;~umhUB%gfAEh1g+zh(E+o=HR=DDZ8o$Tz29X=N zg$S|=#v)b%B1XlMUvD)9^_;#7RrqlO+3nLPcvPavuf0}@pNwh8qn29W*XRh`?}`yR zBBOvsFBD)Om2>#R`;Xw+&I)+;v~(o-%T46shJ0MTuuS;dd4r+vYXknH6-&{ja=U~N zzDD34lY9+|ztr&tAAG`#A65%q#7xImTdqMf&fmgzJuAnGT2AqTC@bWFOdOVLLF4mW ze0hmeyagFC3C6dm45ZJ+6b=718&`~4f~bx7;Hn31@r|uEV}o~3VWqtjkyDplQKPdu z#^u!`nB4v$?73BnaK-(rMESiP_;9}>aXzS7SchK_bbZD6XwL{vOK##JGajHdS~u|9 z>?rTG-zB8;a3ON+d6&@ku&!W=?RVaM1vR|=27_nk-@ty&{D{5JN<$8ZN1}7yN2Bi( z5Po6AbIkIkBlgKk5lO|?BJb|c!^_7V7jAR>WDq<_iVwTuk!4ZR!WY_KFy+U%0o&)t zJ07(OuaU|V3@Dz#JQ6=6MghGX4yKC_S&i^Ebe%$Gbp611lS1(2fw{a%2PyC#avtqbf@kWumZR-(qT7x`4Va09C$ax_*-Xy{`uU-l9?swR$ zuxUaz=&3<(^L)O4s5c_^3oq=D!m0U!$+y&amm_W?OKi+hNxmv>|9&~r8)=I_`BBVI zp1lp5G2RAi4tXz(yz7Qe&c0*ZtJr~cOs~X-yvl|AvSWnNtV8(072^rv11V%UWVb+H z_7?BKm1nqCRRC^QRft;1x#J5Xmhq8=@yMQ#SmgfA5#gqr2L&q<4frAGczh(r40qe- zft`E#5Ig)`j2wG>1Km{}f{s%Q=3gaiv9VJ)wr}NJ#DCu@B*#n{pP`m4yp{IEpiE;T ze`bFca$jSIstoVT{8ahr5*WI-fPciJ|rH|zy zjyk&d60;}xoVsG3%&zAGRmbtX?R(VGyu<=@epwl=HIjqGzgHy|We@WkT$(Wj)luxn zz%2ybQ;g2s8D`wGKN7zZb_##pv=BLTQk~QrdxoDFjUwJUEkKl$M+H)a@w}CKVT5}L zL71;Rja{fwCEO62-?Hc#QjXc6X8B0w3wu5AfDhW} z-9uw2IH8R7rug$Kssa9L(Hrck+;wD5N+1HeZ{u~f6~aSkj-g$N0zXD&4jO*ZT&T6u z9gkRGVlW{xgZC>~jLY>D3U-Wq!9D|ZG)2W6PjraFt->w%W{3i6xZDpXEv6H(vlaQ1 zcM`&sHT!wXd23M1gDO~=r!3LCRUX~7Zx0ci%@>>-#PKx+0(`VT0|~X6hmAiwV%#j2 z;IU=5aB9dI`Fh8eeD~U%P9nYiH8lgz}tcLlvXmwwg`ne}QpqEr$k zw0J-Zy5AV_vwtRHFAF=cr{y??x;bN;qC*i!T@!Sgv;e(b9nQCDbHN-BO~5kFd=vhe zr;k)mt-z#TMF`(yKQN5dOXiPlaYM!&G=<)qPGG&h()vEXZt{MLKV#qTKNO@cR>sO# zWFxg-w&KQtckuYtQ~6KwULihvo$)oklZlD?v-wX?$O(NP{xy~lSch)wXht8+oKC#f ze~v`NI1=9?mhRG}*e-khm&4 zp5*miMnImHa3pyyUsLNjv7*eMh9Kn8z5C!`|{KC_( z!Ut%(Vb_kc{NM+N(349p3VFM#a586u!Huvtyr-KLi59&_g8ZpV@L2n8s7!w__IPF< zR_&e2OFw@N37?&Z*;KB;`wI(s=O*L`^0gir9~rufY`?w%)to&SSIMzPvP38FFyA!( znxls>;ptPD=Bg24YNrqSRdEK-ZSgnkgjO4thNTMknR^heL2mf6{@H|vL$44I0D_A6 zcwWVmLA*reCa$r%0=@P5CVtO+B44c{5Q#{MMO2Y&p=;zmLDZ}Td_m)E+-U3^o?PUM z#cz0zMHS{Eod@I4V?-Kif7*i2oAeZG3|or@%4#90QQMInt~$7Nc8>6Uwuj-fJzMw< z!FLgtUL)ZM+mF4TyvSgzC!VLNz7hXb+M0APGILYSn|Z57bD+Y z?66PtQ_N0c#|usM6!h`rcsd0^h*JGmrC0tTkRx|@3H;M53Ezc!r5C$ZlV;v{N6l#lEMvM5fBEqzvavl*k1$8s%ZQ;>1)5b9VXRer z5MMoSH6D~T8QDX9A-;&S@cx-zMC?8qxvWzyIQTV{7rtdJp;s?Wyg6fvbzYDm%7%6L zdfp$Ao5vQSqOL7Sc-alXJVz6LSHLCQaa%ebhe_j3xFx>)qZ+Ct{EKS8{DMvsr}2|Z z8Qjjd6gv=PgIHe;MovHW#oMm53T+*S3`d$5^H(QKLIc|Eg{wqcarWFfgH}Nj&oeg$ zU;U#-5P#t{)?G!T|C~Qsk1S38|2}`HNGO|u=-51xbjip*G7g0y@KT<%= zt7MzVfiiY25nt1vLuurdQEtBJ;^WD=)IIAUkwWz*iNyj}>g47%64ytT;8-YZsjg}BN@lM}66cPtq>n32ptf&3Cz`=&6(Z|GONtNOrof0N zRmP8|POZK~jIBE^Hb^QIbyUovllZ^Il|Hw`=M*W)NWG`nX3sb3tI8$XVone}=$J|I zf{RHto!RuTONYqt%q)p(rf*K}C2Hl%*+hrk zE0LhcToln|Os6skmTc{RL#gkSrR$%%mt?$?r|s9x7MFM0ORl?DiAS`5i%a|J zN(L|4iG%F*>0#ANq6h1C(0iYs7AC)lD#dlR34luDOwRDs*Y(CJ6e4#>UkF}o}#ddx+WJT z3T%?4<{#Wn&(y3Ur|)SepX&b=KMK97+26Mv>j1jO2i88D%wPlYTVYd6!zn? zsKMqlHQ_{u7)Y;|{H#nA{~B)N{`Or|*&Hq!l4wY-9=%HX@7+SN2h1piHGZO1!MONo zd#z~C9iGJIAr!l5_KWOm#>A5&vqT>bG>dkrI8sr?XGL#~)ac6^GW6#!4Wd)V!6Gx6 zwc>ee^Td}tJc!x3FT{=EDwM0ycX3;Ijd;n-!0i;5*fEs;$Ow=KN zOPnGVO=;eAr-u1j;@n3=RE)tB|rr}xg*)N!4Tjo-SCry$pV|v9elw!rt zy)DEI6`E8{*A!CkRv*>pWI)e9Xf2+H{G}F7s}T7xDGzl+B9HSux_MF|{~;c6AkqH|{vrJTfq(XI^S?rlgY|FtPwUoBW#;j#=>22`{ddc8 z+R(6=mffSy9QeDG$>wy&Ip1w&K~5#Dm!?dwpVIZ8c>S__qt4^tqJP@|6fd>^eqM82 zE#?1uVEeD=BF?5_#N1U^i6dvn5tb>lh$X{iM9@kV^3_l-?qffhbg)N>8)vF;7#sB; zz6AXZcluvlpIb2g`}+Sn{u~GA-^8CeYAi85Fp<3A*+`gqxsY!XGK#PIl#n~D-G~~g z#WWXUsr>CLu@VcKe$ct5*s%C9PllCKOP|M^~~T_F1BeU{2}YTs&mamM62 z;-Amgo$lg0lC5KmW(v$1=uk z@$Go-giAv9#&X;Qx5G9sH^Rwd5{$cLDR(0Q`p>)Ul|js1h~v|u74UyPr>D0du6h55 zwf78);`#pl$yrG%ikJ~hh{6K9?3`W1fS53#D2fS0L=gY^s2ndQulC-;X zmY^a^5HORG93_bXg*zYbzwU$Izv|xlh6htqJ2SOCJ=5Li^yxG2*E>Cv3od&73hJxo z3z_IuEstB9^8U+Oo9JQsJzEpKf9j9R4!62f*Z;HqOZERRl~tzqw7{xWh+(t>~J+?XM>FDShvILuQ47D7WImas4fgTKZ{H zbC_56nCVlGOkhbg{C(aF*xwjy-!yUCUM}SRb6j|-3%$-gk$G@!s{S5p!yGT~R{hdr zA)G8T0e7_sVeX3)2KxUz|9M<>(w(~gpY30I{#5SEf7<@8rO&2!D|hO>Jfx-fWH>^{ zwVO>lleC3id)QBR-|>#W^*XS7h4yTg4*f=cht89+<$4bF$$Fp8%F$Cc$NW7XIalUy zJIuRyMc3!n3%w89l>fHd;{$8-F4wQ8+i@%PM$T;2+O%ymJxM=+W^Eozlb+kIm-#;L zZ~NQlUv}@`s{el>{{FAEKUo3x{}|J$;g9)0hV*|aA3JcFJNwN-ceb*R8~b~X8(Ub$ zoh_^4&c5VK?M2+#v);L}JE`r1v^zUc-kohhT`R$KWB(3xW4oY-scd2_FT<6ZF_q(xs#N62x)ctR_ zcW0jsbz?h*y0eY8Q1?Oo-#_g?{a~!ZzmtDP`!Dz(QF|yq@}OElYq+f5lLIdb_ERy% z)teva9e5y3cWV-11oSuNMQxMGe;4P^IJ7OP;OK+Ne5W~X1@mvOVq6U}%h%A`m&Xwc zE^rhP&2u_;HUDGxI!><2ctJ(-&Af=s+Y1i0_b?P&y697B0tIKTsWCE|Zx!TJh2`G} zS;~=~8<_u3{o7W*?EZJ!KkdJee}1mOi)L2!LwovJEA1@V^IF<>^VUin4WJ!A*`>8^ zeyg@JCu7aMJYwyzvlA`w;{~l8hojm~QPtX|L54IF23PyZ!iqIVjP7YY`ZTrXw@#W? z>^6Ct6k5~rrgyKYKfOac=dBm*bDtzlrmk|$%wBt1f3W-7KVlwhH``gzPHrvzn}0Z9 zQ$6qBsz2pV{ioyKzt;cd1pa6KJ&p3I{uBGNI&g?rwOoQbS#XOX_qdnwTzd!S`1eoT z@Gt|eMzAnRnFIlToQIxkz$~)cIiDk9Ax-Y+i{}-E9N-l#u;l%7-u`g;-~DeVaWZtw zh^KnIEdQV5#uB~x-nl)znL|N5Ir$@5r!-FRQnGyMd6cP{J2;nI(6F5Mh00$NEj&%H zNzUWEFKX3$Dl?BU=)8ot{^kaT0LSmI{mWRiHGj*CV13d1+Zl44CCnO!7X964#u!tZ zF8w`!QY8BCKHo}&sU|~k^M5Y+>zA1-A1pZdR)TAQ-VCbzwFCFWc;>MV1yuP0J~@2u z!SAw6#r3Z_Zc<&07)b%{#VIwezU7nrjI`g}Z5O&3w-06hwXagfz4?#jzUzC=j^OZ? z33G!?{R_g~bhvub{(sLeuCo1G{;N3;x#7V}xyxJ=|N4U)M$Z*kJ-N;KI$FW~r~D=J z8@Zn&h5nXr`20Q7jra1e{hf%eU*k@F`|;2Auh?JOeiT`(0A^gu6;1XLVMTe1Nuw170?UU z>oO$luhHLYFmxSL-frS&uD)}-2YGco6r8D3uvCx$r`=HAX7W_zU;H1 zdrt=bEypHs%in#IY$2nZd5i@p!S z5C2~(#Q(J~=>AVXi1Nd={tZW{&St5;|5tT=&fX1w`{FPEd5{0C@+n{V-zon;e*TmX z`ky@i`d_;lspjI0)N`S_k#QykpSLXHyd7Q5IT7`QUb8-lF|qm)y|25J)5-l>@I?K4 zo<~#xJ|U^(%MAtz4Hr`641%noZQ7)|$LWE_rTNE($EESss!nMV#4+$K~Qw&YRH zpOe-F(+#&2td0!V)A+L{zo6<%p0w@jg1P}QM%WKC&e?cf&g;n~oHfO<1%2vFPFL(2 zj@2p0eC=g-b2sfZXMA6Jh@+>!lk;4B4P7fDne+3*K3&^;l^pWICAvi*i($HbuweV% zH3jSCwlIVRbs4jdiZecHuo+H65sV3DD92@u1SkB``vRWORgUz9d7QQMFZsIU+Muy_WY{Pr>F1x=#hrVrhVx{G z7E^kCJNHnj0`uX*rM$zZK64lCDJWRrFU{M~yPqNUW|*s@=FVAmb}4stO25A1o;f^E z(wnPkrq7L8@K)bSZ7%Od&o-uRk{D0@>~rq5SzCG9JATmxO*Od7<{-D{6;J>2vMXHC zx)Ti7g%|}Z#(_y|$3}H4b{G@-vrh@yA|10VJW1awI zd;i(~75l66U-17`vbAZCZ-3WjE)k^>ua#*IPt0hyl1ylND_?1s>)fMdWoppk?!KVO zB&*Q=bgR&Quh69Ze9^0YIcWz?P-X^g>rP=>Ca+2R&7TFd4*ejSswItP*E5fHXssnJ ze-oQ#W9~rPks+n?v%s2W!y#xpn%rq`j+SYAnTpZGH+|LCI&qyg9{--Usz;v|n7xVS zmIky1l{MO5eKyjTd_PNbUw)mo`u%Yld-jC3al&%i#<$wEMuRMxG9#Y$-9U;a&STSF zUl!8gY5>i#_KWu0lZ$DQwC~z&Tb*eRj&0grQOju6w_eclYW->Q-qqRM4sl2dI zL6kBNgBL#qz@Ru4m&d8%lHTjcAaY4ny=QRa1Q$2e{K4JATd;G(W?=3;0E_Q@!}=DA z*S$3eXMdr-ZKw37R0$GS9H}7n=H4M$yMDooeCG?NmwC@nVCkbAt zz9p{3E92#M5j@FjCuUO`_JxjuEbEXUFxMN0>nSbRuaQkum58v)O$v!?rQ$5VhEwn} zv=jBL!kKfr8lXl{l9krYBA8!Z!=eYtIP=E}a$!l6e!kBt5TNV92L5%55fg*4J2Hu( z8_lqBt3Jjq;E~xB`)H3DwS?eYhI(fx?UmqAIPK(2d<(kAblm$D!GzyKeE&j(T)2vbW!@Qh zETRjQI(wjKo&|1>?!h(n5%9J90MWSH2Cvvp@EYxIVeB?>R-?@zlU`OqURWo_e{h(O zqKhh^VJ92QBi7>lyXs`n_TS*Kn9`EEtAv7w-Em@-HYu`H8Vn-dqOihmv{vtjA8RC8 z-E*(Q{&5NHwYmi-_GrM%AB{)~G~lO2W8kt-8U2@9F~yE5LYPzr-qNxss(eB)QAdmw zQ{99|Qo2wvJ`>D%Ol*!^3ZZL$!t5k{s9Iu-8|Sx>n?HQQKNSMxk6-#AemIokd^N(X zfFPi8_HZM%Mq)1!0BI(AczRJRvDs%kDZJr0Zay3h+^5wJwGDGhhh@O+{$v zeFVFPbkX#Z8Nu3}2zq``c=z{DK}>ok$)ENPXLXMgA&EiQ`hEzCF0;tKK-_nR_t=VAoH4kOCujj3w?*Ud8uSrc7 z7Z1hXg1oVETog*NnFvE7&a#LMI`e?{=ddRjwI(y~Z}|x~yOZJiyI`_(buSq>vJ{33 zCcx0t5kFJ+dI3st^XNy|Sb7VxGY^3`#nW5%a0E84{sa~L3@DTJ#9{M=WX4VbR$cK; zVlk7C#ZN?FvDIb_tQP=&qbUDs0maGrN%4IztH96>12jMI06H|kfRcAG$+(h?OH}*_ zHvKN7FD-`}b7!2MEXE%bJBX*X9iVy1Z^A9w84IgVlgeG9tm$1sEYXiQFny#Oo*W*+ z=GZno4oAdu?YN*X=*#)b(i_Me9BN}~Lod*+kE-7Bzmek5EKQ-qTzKBC2b zKhi!`l*L*m!MbBp2CLk~ATqs@Xk2m~kDuq@t80=hvAw0t^dTlbzc~X0-~?3v;lldg zwuI5oW>RaX8Quvyk3ApfVTGv>%fN^aG-o=Yp_GnS$BKzay#ky8_9v0I*=0I-R^J3woj*7wu?*%FJ44;{K$NFAC>v_)K%y`Qg=HOyhIxO; znezm|m8Xh&s0c@z#$o#PHcZ-9O1{(2#2D+v#A7W$>$ReMVVB>yY|%GRl`n=;IT6;V z9;MH?Vk0U%x{u*w+wt))gdMwAf?`%3Y2dq&w8(e`+PYtHRjMFAw^)+VLNwuq#hKu@ zwwW-NH6+KP?vu_kRaE+-PAlL!Xrzzi<0B%93^!O`Fe=vNTnXISyz z5@mCW1cpK7{vG0+ehlsrgY^3i_-Cn%qS%O6edt@ zkV6(^6yXXbYgj^Qz1>*(25X&%h|!lDlsNKJQyizC9R46s8FB_gRt@)1?AqJ3Bz==O3)zYk+;_SD~?~ z4UB9TptH6u$T%xvPO2xGE)n7j`aXo|f!X-ulQW8)p9cY)Vq_kyfM>Zc;kA1tJUx^Q zwgWF!MDq~dsQwAc5}ugh;7R6dkl4f1fK_ZMeyCs#HU+;&7o|o}ddCKzq*Ive z+K-D~)Pv@iIJ7VG1TLe8#~+J8p^TSMYi0-QV}#)MOp3j9YXlR@Z(@<{5MHA6@vRKo zK_o1Nm~7>@Wb}2(F>*Cw zL=@_Zv7VXLfk|=>9@c)3;!EaGOfR|oDsPcUS;b1WiTjIa+8e)sUGS`7HNV$t*PsL!ee-c}=h4?(xQl3QD zXR=F!1`fm;93Jw3Ti;Y5Sn@M`bb3VTcxDh6iSH5T;U z9TR_=cH=nB1ua86Fmc*Vh`;axUez!u{aP3DoI)OMY5a^5cG+;u&4d0_?ZxYp&^niiHb_y)m<}reO-S{V1ZPmK0yfWi|ORR2VK(9Fru^ z6Ciu43!a6{B5hAxC*O!mv9@yN5vq?QS+9g8StEzvz#W?q92;k&eb6ubUiThWRn)+I zt;6u=sx@h0`x~zRvV)g$(s-}=Axz%6jZdsz!Q=}OT$p?Z>W_GkY9GhR8Er}MB)SXO zOA}CUwl7|kuEO(50({XP9i~pMB!8S7#OW3#z)aKv+jIfev)pK~zPS)v%)UU<@*a4) zVIgY1qBvlKvdpVz77z;b6PRIs7CswGu`KDv@WJmA_G1n{IFpSDC5MrzI}J>h6k!gl z30FHeU`3ZWpLtJ%6tI55qmpQF#SRd`b#Ln0L6aa zK0grOdRgJh1uJl8cpzr&j)fb`M$xoCgn9C69i-c-WAm*<+}j|?I-wDOEv_@5RSd`(Pyh1~)97V3x^75$r34gt2Y{c__Y@QkCTL+`k~6i`@Zlt^iL>#+6LT z-A4}GY$6vurr7kE5e1wKi3h8!Cp-Io?g_^lt4%I zWZcgZ=9iqMcv0$u*co~N?>Cr&)%_yo+s<(e{+Wkd-3iPx=MsYFGSDo)5K6|>F_P-f zmb-n?S?w)MTIGO1|7q}lF$vsz`$?LmJib+yB8*G%E?zzp4HhxB!C z?7$rEDxAOIHbgfB^6V)MQD2ISyh4=H|2{Yl23ASrXU-s6zu}{1(hTg`D2G~-bo}ib z57vVe^X9z?!Tnv$i*mjXuQwz!TU>vE?7DAQDGK;#V?D%aHK3=K6AaJHA!ix~;l)u+ zNKp!f@3j_W=EiHFeQXeq{4OE?RH?!{%FlUeSVit?z5(Y>zJk#W{`kxx2?|*NN;`50 z6EPR)J2XO=%lk2{_EbS=;6*5kPKOT{p1|X=Oz2*66y)OqVdAM5qzsH>@5yE;6K?`> zYL0r9=Y`jA?q)u%O9Uq4KK9F&5}S8sz+nAzXzt7)gUf#5=!stt#Lj}m6E2Xjk77%B z=8$1tTfj~znMf0=z&$ZJ&?3zy)YNWaD7OaZe`+JQ&gbDeZ3({VhFdV7?7*&@J>-V2 z?_{}&BQ{a<&kl15)~w@_{Byn*K)xL(1U4VSJyC~YGEkD$pdiT-9d0L6o)+<<&$&}` z)>0h1D#M?WP{yBo4w2ui9uwI{ULdbhi`thF6)%Mn!vPymJs5Iz@r;x|@o`xRxsi4q=dlWiy(>0Dcex;*4(KQ8c_eP@ z@*+pC(qWIh3_kgg0EPQM;?9Iy7+mraC-tWRx#|v6NAdyeY;y$Bb}9bJXio^Z+YVdG zCHXyn#97Ao=E1E;Nq9v?6xNYhq_cG^`WlJxed6At-- zLmRO9zoA9Fg^+8hg9R^iiNPmjILcyR<*_1s;&vJzPTR$G84zO4dM|;Ui`{WKBtx2S zInJWyruA}(sAU+%D>^Sn3VGavHkAO}*T$!KL1W}|gAT0!o=;BJNbuYK48Xg)3P6lS z!`hNE;za9HToDbHC4fvISZfzRRQE6Z zT}3grN}}*sbqG!-3$bd7Itbr*UwEnW9UYgC!wpkeGV75qv3!LvzmwhuA@d7hu01tZ z3|Ydf4=h2EQ~k`+{XZdDLl!=Q56mUQ$jfuK;;EVCOy2!|JT)?#m%1n$>duFu7BK}f z(sdxeMV#lFG6*;3eS`Ta(u9KHdcd~@pr(+6K|hsH;`1AlZSWk%4C={xr#PrI3V{=| zm*b|ED%3e^53w0BIIpG;PLv1W(sj+SnsW!exoKp0`%-A|kYFv!Izq^O=)jpybwa&QeON8R6d zAaU6s>9_qe3=}nxwUz!^AsniQr40Pld>lA7sq05b2V0<{s{LfY~jl7Qh3*$OysExLC}#j zklftB+rl=(EvnC``SUECc}*VmHVg7!*9qf}u-Twl-bQH+hl5^}C@Xj>6gMV+g~-N?TTP$?!+Zf1;YtV>}}I36>VO5l4T{$2sa<@Z>lZGZktCU$=$uBX1{ebUB66 z<8Ih5EyxeLaR{3XU!&h>F;B+pC0r_SC);Y@!Tz)kkTHnCOa857^(kS#b?OqBRkDG2 ztYV zq=*|%m*#UiqYekf!CT9;MABDQESe{ajQNMySfS`f5nq8lB9v_)__SG zpJAi(8@!-;o%|(S2*+NpBm%Jui^q866B90|Z5o8aSrlI>@hNIAw8He51kex|2c?-g zm@<2a^sM-ax%<|lV|g-8Oy;B5sy39+k>+#4mqRXo2kz!4xG`}vI5)UJOLrg7qVofW zn2ti_?@9PBda89cH-;eg@;c$P8`x_c5ye%(i+<%R^md(&=| zw&{WJ+5i|;{|09H%@CF#1#eWY!^9Hx z@i1I-<}u?BpCnAjN5F5B7sU;GO*r_dW37=mj^tExNkPxdU%9%zgvcuyDjiyH6Mw77JI!ze3aIbXZX8f@k|8$dCGB zyyZrx5&eU4M_(71QvR3Yl^JA9odg)RiSq4|B=~OYIP^6U!=!!fDP$g)(w{Cnh#4&h z!L=ir(A=;M_I4CPjMG}kTK*K*=ItPDR$hRe2Je90ngo~1YjJVId&G@r@#gGuJos3W z6&kW1FPnEjY-SMb_5FYuo@c-%R1KUZ_@wVDRouat&aZ3I{SseI)d|Fb-kZ|n(|wwmMA%U^J5Fa^$z4uZn@ zaM+$1geI1Dgrot1%V+fhue*V4H5r4BqFii{-$=H(yn`oCZjd2NNxqi!VK_&zI;8%% zlaYdLxWKL-o0jsJGdHw=$(tLHQ`iJXvj*^~1Rc50=b_!FooGH^9iqABXxI4&E}1+< z`=(yuc&3qxh0~4y6Fr~j1!*>irX)S@_roj}i<^`U9{|jRlb->XoE}11# zz$^MA3m3jC;pC5dsF9igFOE%MeE4cOVOvI8w+53x^B(gUOVUtv=PNujm;tUoOyRW9 z1pH?9FbxVhIBohN+^c&BtiJiftNchfLNT4`ALAhEQxa2Gyq(zH--vpYJz!Lr{V`-GVLXv?vd!7UjV)If_G2 zF2cGNEXn^dAdhOAZ;7-^saW{v6R?lQp&G@(l&AbUm`KG-#*a8j(qeoqVNtR``z`Mj z729u#_9Ji1%7s%_<}jB-!-rFIZ`$0b(W%sH`xC$_+BCSAvC5Ij;fEOTC7(U6+V; z_xzz;ScHltoPg#+E-a}^g{2qANcHQB;n1)oEAMkHIcM2ZG}ZVHCWaxPV=BtJdD8@6 zvIStsFax)q`iS8HFG;A%!VMKa;R+!Dm%5js%%-o9R>;)P8|@_h7o7tC6Ja2Ge**0u z|Hd=t)9`CMiB8rLcugV~3MoI?;(%4#nk?vKeyd_DL1?1)Rsn9G;MoLLh5i>Fe7cUzHg zT6Po6ji#WEn$v1)Pl12QYmDsJOYG-Lu(n)tgw_&E=-==jf_k5!(fn8FGxZ#fjFl4h z$L_(dOV1!e;5U>kUyoLoD2CYT4+MLMB&#t@8dkkJh-ueVf#czUNvj1R^O7qO`DYjt z(`RB#>}QO4K7f&-%OG&pFw}c5#I&5jf*G^xzzisVtLQxv#SfAH>=iuzHHHQUEbwMj z1DxiI@wvy|@}kF<;^a67#alWt$-Nj4J=MaIwacMt`5s=OwGzze`ijvZHP9w*i4B2W z*euJ#gvK9)%bZ5gNqElWON0?OHKq6hTQeZn`Wd8L?T6s?;;7J|NQmnX;)?7|py1ev zkK!uPaZWU*94tiXT7UfTkeZj(KP5NLn2XtVT$uO~LnOvMB*UZV@HF@#e3t^_k^xV9xTJhnA9>2-85X%bCz^6U z6N}Y^`7hKz5bcZ4gZ(in*2(itRBW&c(v~a1bXqWsji-?)F9MPqmiyf~{nhQNb>v=AEw#X8e(4YIjEj>l95CZG%Fx?-+Kv0l`qMiU4?{!q#DS0g_3{1g~Oo%4|1QV8fK`Au&TZ3 zc=J>wK+qXNRid7d&LW|7@du{j^-$u#lo4LtPvy1DdWH3yuk&s;2(YT3+QDtJyaMaj z_bE1~Fn@<#I6RP947#;G*e|pPPu=o>4$mN%m@h|Mby)-ovMpFZanE%>OY%n)?qT+M zFQ^ZCpl{|@j1Rsn$HQbJe*KZBUzJ2)Jhd+1bH)?pHOooW%uX_B@EDqKoB;}f4BdJIa^+XapNthIBrasd^9J{@jUSUxlWA9=pqYmK7-A_BjKET9nyE4B%9sY z+@EDH2&3X2SUVU**!up2(1e|&?bkbGcLyG6;O3~KVFmSM6)R|R$py5Pp*Ct zylpR|<+F5nbF~;;+MmNMkJgARPh?r3l* zI|@1dBQRBZ9JkH$BE5DeqRZe5>>KLlhC3MXt_Am!17js9=I@2SREpr)BNw7a*PHy- zIt1Uc6A6JytL$7C@z^DRc(o!6 zSa!Dfa%ci-RCvVs*jAkXxgBV|NG2?{!@|aQ7%v-#`m4Qh%lBxoRStx}vub2dyBWTH zG=N9^MbzP@aetKy0HanL#2?l$&DHCMU5clK1AcRI{0Py z1f%NjqLoGwY}KfRTOzLb?c94@7ZJ+5TVI87mR})hw3t|ZHWf3)gL!⪚;ls%Saa2 z1=d%7g}Rnrip%E>Lmo9m>aRlB_j!oyP?W`87ivg%m3nG@r5jAr6LIt2FnF@i8m7&7 zjn?tPEC*F_RzJm0bPF~D@iSr&W8{uAxAo(2=zS0|*iKBY3&WcqEQ!>mPsr}e2OxQF zBXNR17v3DpCUWPxQfv7^5Ro_l^VCaWOJN1fD;h$pyabs1_!*Qom*bRA3w{f+WOj~F z%%$C3NH~q4g>wfeIS!#$dj#IIEyqV@pYV|4OY~%ZhHxU4IJ%*PxNM(;o|gLwxwRc= zv$z?T?i@z_mF{?Peh}n5y$k+aEj;d&4U_hxXmnec<^DJT(*1T~LXZT%bmlHH<;ZD# zy|fvP-x$HZr^-b5+`B+O!y{Ne9(eA#IKTd2Cd9QrqFC8qVBnr8bUw_0<6;u5BkQO7{rx_P&Iyl(3H?1o)5X=gSJE5 z)a3XAZyO<&KJ65I6aEMn9_7QO+&@sk4T0|rA^zIN>+sWL0>7O(LZ;2}!6?;XSRJGR zyV765P5~i2KYtZry5boeT=EfOYsFYY#jRAoeTib*ERbfENE)A%U@7{30dK|ggl_6h zw2p4U$o^5V&dP!&2{C@j>MV4Y_kkn!)A+`rH}PEj9S9!m#|7{1;f8D*9MceG>Bw@( zg|CEI^K`o4lXeqNZ-FpBVn;vbZC;8&a@Wc28C~GVuOuDDp2C7J=i#!wADCDAfv(&L zPHV12Tfq`uBN2w0mhG_ooHVg6M;mCTJmL0&S~$&+WbH07gcT_%Fycjm)lWkpB-(It zmkgiYeGCUW7ct8g6fq4#cxYI39W`yr2$^~bmQ}3|%-zC+dwIps_)-@S9Ec?k%}T*6 z{xgg(t|SKaKY@eCLwt578jkc%M?FCe)SVHF?Xl8$)546|b?OY+yTzSM^*x4nO{+oJ zqZ-+81qIGq1FPx<3{BeN;D=kpNri{V>l=o0@m`2Oy%X+EyA6lrzd&@M9_eo82@2U| z_?u0Gv+J*sBP;!h($`Ip)@g*(!lhXY#rwdd(1^GqoDW$ub0Er04aHQ0C@#|;av$s^ zWsfFe)#{xvDLI1c>!p!rJ%#&?r1?5w!u)+2cTnlQ0cceg;nU|yaQ&ngZn`SKy6c}p zCR$u1_6oaTs$c;w4k?A6$|@A&jY5w6P0-1ENqS0ppt$87bUc?$)Q&5_u|0R-Pxcvf zxMvIfbr-NxRE~I#nAqg}oluq+WCcww1H|7@1k1Je= z@-Z)Dh);vHdoGf_+gvcxX%cNHcHwU0b*QIa7x-h#-;ov;r0yo(1TFfTTl80^1<3FpiR-RXBg#-*85*BK)g z$^0NbA94b@-L81<=p><_td6Cw0eEi<3r$N?;Ll}IzLWP`G?~5-KW~@DloOxf-jZ3c z@UsMeYwJxANvVWGXD(sabt-0HbC=YJ7$>3=I9NYri=As9K==ET__9u#-)beuSDB8m zr~MG`fN2W8zYbta9KvvOWeA^9if?Nr(ZAXq(;P3lVZ2U_-!{6!TW3*YD)LOGr>WW?C>OV zrIg8>;3DEU){x;h#91lRSmn($G4M%E^co=f@jsLUiv4dD%M_CY?n1#Gbp@GVd{jyU?L6p#zK=A(*bXneb4e z@))Eq0WnvIRdyon=#lf9YY*%q1(?N z5d2^XdE24@B>rKIE^dSC6o>F?fIRO{i70Er%8)o}!6R(kUgO*Y%A}CSIECH;#l)g;PFp^@!Y=g4dr8bDIn0ex@5qbB53sLpg6A7IhTH8*AgL-8SNi(H)}oud z8H&QJ`(@&+s|hv0`f!{yp5Mls*yTgmMGlge?`NZcObo2hY$JY4R>1QwTVNzL9_D%l zkQ+5;;J2VM+?qLq)ZDFyV(02fo6j|%C-t3BeI>*{`0)!gr?r!J;;q2_h#Ed_y^pPj zIw7W6jBhfy8@d&v%VKqprbrMM{ zv&dOX`|!AKHZ=a(#B<3khlKHFoOP@nkD3dj=*$JMm*QbZgi5e#47GS`)%eV<%SHIo zJvOk+<1=sq9un&(JMgjJB4XJ^5!U`hPY9jy1TU`>Vi}Vkaiz>6QmnBZ#%BA$))~V5 zu2U52KKd;l@+-xIQt6~&!g93exd?|A+2OU~d_2ORB#Z6`L*%L{P+BF(pSyksp585m z+)inh&)ZTsF!pFmjvmd;AriL5^Ruh9kSza<%gUA;zE# zcbFf>LnlM=YNa)yM5X{x-18Yu&Y-+l15r(sTx9o{S+}>6In7iJq71J?&2btvW)|a^ zXaU&Nv_RLX5xg?!h~bldkSx~#i|wCbUCjwxlP}1x%aJEZH8rB^x+mzd=A*ZD5EO+W zZlcyaOXP(314R=2M)Es|20Vi0Tlnxswg&T)??Y7l1Mq&k8ayNu;munfG}@J7Gc68m zFB#(l>YZyX5NDltOT;y@=h0ru2pX?QgQv16zjDP}%nR=YXUkX6^V0)wXg$Rj`8>iA z1X%_NVk|?hJb;5Bi+v~vMmWpRnDUWFq(u1UX*XesYACkViI83cugM9yF971AEF-I6 zl%d*9t5738P5KJUHY-8a-Ds#N_9uOerOEQiChQw~4U&JvSPKjiAx-rSXexLR(Ki~& z>Ek)%p8#W&4WD8@HLQdDr*ar>^##{a>wC7Mu?>4g1Y6LszinuN70XPWHAX=&XVN~tL>f;UQv~MLeUnzy&cke*z z>NE26$t@U(Bu1Pz=M|_w!|hazL9nnD)jr)L)a;VbiI+uu%iv?v!eW@O=7*0HqRHD- zPR)#E%ZWzucW{#Z5L1GdgZ70MqQh}6;T_1!vyXFi+X!0~%k^Tg=M+I5M$~)0v`wlAR zp9NQ^WWe9V75V0ih|+c57=QLT*nlAHSs{fpBIR*kS^!h*-8oe3e~1cyk_m2!4<3Je ziU?l(hxD#eCgnm(a9d_1DR-3OHV#X$j*iSCc3wM;L8o)Dyh51zHY9`Co)yM(EUCi| zby=L5ausFIOR@G&c9IQe6M*9?%8Jaq1%{_5!G%4ApC7%)8^xXI&s75b-J*QLyAX`s zevpGIIpmiVah4?K6`rq7hZ5dmc(ZvS5p4=QO;-v+^uh!ea`bTjcqaJHm*yAGOeK<^ ze?r6QeGqc@CE0WI1MWNP$BXaUf-CkJ5<^cXA^z1Qsl@CeHYx9bI~GDPP&kglb%J<* z-y>9V_=Y|AMOg(Gk3+o75JrlpfNRbd*nBgUd~v>!w|%|?gb$XIgZ6xCF1ib^6h^~? zFA>mUO~vlkttJ=jh=%xgs_5P)%u*G3$W!o-MjNd*D&`&uW@(|Q^=1app1mSMrNsEA zTl;a8{(<0SC*jH65nz2l4&~px!KI6?!>&XL{>GVA;CU?qBp)8dcR4E{iRgmMtQ@p_ zh2WST3TckT#EV@?82FlswdaVk#$$Kj9Mf4at?n}(Inam;4_iXT-miqFS|m;+XM?d~ zA0A{(!9jZ;&cVWNv~4{`xE`a{YxADrV%w{5KQj|@hq;7ZQ5>)R(-Y|PNFwcThd}hh zgaVz`QoLM>X!@)I*1Wp{yI$S_gN{`AwB8O1w+(>Srvxxu5e;LsO@ye}SJ=DyA-u6L z#JE`-@k>WBZi{lliM(;VKl1`aWS$3UK|%iBAJ0i}EyT8mVQ9J_4Obq|0^db1p{Fd0 zOf*v`5+a)6fpH1xl+T6Ca&6pZxgDbYd_m;Ia||%a$6t^1QJJHyKO*`WNB0V%$}}rz z=M=-e?8`izdqvoEbbvYdED@#7mqO;Q5&Whc0UtlL5&asfkW9t-LilTNcA6l6hWI1i zv)S2%_vU)oW9b1jpK!9WyB)_AL|LMBThQSr4N6~rhnK5+K${pN>gdbyVEY*4+Fk@3 z<13)xeu8*9BM2&}7_V_zBay4p3y(I%B7coPkts3@5B^>X-?zTum8}pa%5@yUj<*vf zx9^6>b&-%u_Bv~xy-N?_Vf!yb_C?BN`mbwe@n__Rl)Sc;IY#)ZB@9UxY zWGq^pxrkmpA7PVk6pre%N!2=v_ibp6J8un>eN&a3iw>Q5Ksgz`GG&3diV`@Po z?;~Dsr+D%o3~*(FIebA07MsflMTr)S_RhiHJ+@fTvLB9!X2AogX8e&V!jD`T2VL9h z(et1;9=uvlc|=+?gp9B7ji(LMjh8|6$`g=SE(RL4-8fg&4v%luCtoJ%8%@aGaVa9 z;UCr{Xo~WO-uR&le+foqml2Bpi=y-J$Lf9KxV@uEdnu8mZ9MmN(q2?(=&Pj?+FPaU z$Vj$~2&pK_%6RVU*p!t`DIzk9l$rRQ-(TS6bod^F2D%kQ?!fG*X)G}W|^+d-^I@p2A$yEubvF;~E!6jIbeKJd&J)+c$oh_oc-F**nlX5mLZEd3A;fo-wPZy^vNU)v{qsb2$2A0-~qK}Lw zm~&^pIhzctxm?y|?mbu6F+sPk1pI7Yh)=r4Fi~p;4yUXmxuL@BLC&8Y9vw!jX^uz^ znA5f9w{X7UKfJFtA2q&+F?kc5KuOSrbi2mDvXK#dJNY`gq-ubDP6r;q=jp?n-f~^B zG??%p4gTm&B)xz7@!&&Qm?;xN!>-HX)XyOZW`?{0p=@xOUx#(y9MHx}5;yLVVrnzi zqv+^+T6jMZ{1%s>r{hXI^O@^=9Z!aUNzcJ}R_L4AMB^U;Hdv=X zz~i?N_Noj1lSt+6UdlL^dp7eG!%0(d2i2DGL{CjVJm7MNJ5*ZWm46(~l70YJ&iCN+ z4K*k^Jqb5iy~OymU2yV}EO?9x;>Z3BI4I@;MS9`5c>Mr2s^5b>y|T2Zb-8vIB=eHi zy+m1$H2yg*zqop?FP`!Jin;n_IDf$l>MGkuFSq?h*9rGf^iVqgO2I=e^W#RA{#OpQ zk83Pa=mK*UCAR({?D=FpT1eAxb(EEhKgujQ56 zQ5R0bDG_OArqTg?|5%dEZV3ZfsTH_SL5i_*j>hN}b?7y@4O6ySgTAE}s;9QXzl;W2 z_@o2gabCc{`z&}Po(_%PiLk*$g8QGfpf1CMN=nJ{wzXEn*ONXt(kjHBDn0?Ljf9x> z*1~WqX*0PUQH31=+vt-udHAop0WOAco{dxw%FO!$VO>5{=gULzEM3eM6DGif1v_xC z(-^G{*J>N5NHXfef{ei|OE~W$g#umhHSI=PW+FLYfq8rdb#2v@|NR;zFn9+2W%cDIa zQM@ZqGbNCGoZv?CrK@16OfzWyQ0G_sc;V>8-ypi&DZRJ15b{qB)5l9B80X#faO?1C z^137k+xIV|V_c?o zBxBdEK^~G&{!;DlSe%uHPnL_Yo$0x>->wdO%q{S5hakJnssqhqf6yG~cE~->QcojA zG;_Wu6vbdN>Nn5kd}CHXP~f0^wEz4S&J_qVHXb(cGk!B#rb#l-KLz57kpL`f*{5xN z=pjluf5VWP7pS`{kM_RwgBSA*;laO78W8b;$iEO`t4{Pm^_K`5ZzIWaxpMGXb^~U( zjKWz1KHh3_$0?HDIJ7VyPmE=gO`luIueZ0*_VO~&h>k=5^8vi3TUij9`5ZL9*WtlS zLAbZ+FWerFgyIV+(52Rhf2uz~K`IMMRDhNGEX17uz8mMSkzv|RSKzuiVfgjTWL(jH z4-%)lksojY?wOfGmwX<6A1wgm$Y>;L;rMDLbv-(mEW=%dHi--Ane$*3*ePr@7R zdB2~#;h%(Jm~g-XL$Vby$u^fvy)=eb^}=AzMG00kVHKJ*_h8CHDMnMUkM5W_ipAG^ zupv_t{Qf$DS>rRhP)>=)6gU$R<8qv{z6REe^~1*X_F$u`&UZR%NB_OJfFH|UaC%rH zXhIW6E!vJ1W?UwBBnIbtzsJ%gaj+sK4Mcv}!-C|$_@-$H1~R$c&ea66wyh5*x=p6F zo}qN9@;ac;MbWhX7aFPV!AU(CIND)@)2Gzo>^~2nP&teU?(2or>_6!FUp2m+`-bYs z_d}}YEby~Zp#>|%z{CC@{U;WTYOe1{)1s$%Hmm~AzS#jP{|@5jJBQHnast(#AwiN7&u&;Jc7 z+tZ0ldnx)`Izz#N8(5?M8xoCzNZYC}XgK#iIWbJYW9SiNdj-<82i!Ay)C9&|ePprT z8i-_UiEpSFTe0N_IDHXhCPlYk+|%D+X0nV<+I$ITznqT_G4`-K;1brir9!rdGsYFi z!$glh(3t)beV=;pB)aFJZMq)3QcJ_9`&U4NNda`NjKqIUhag@07lhO{f^6w5G`e$| z(}zEBJ=boG(UoNPa+>$?_@(&H@f8TSRsy;Uvd{0##Z6pSWb=G^oQJj4dRHgLHgnm} zBSCNvx8TL9cJwSLfu)9$;4S05P3F3jzMF}tl)VfxlOBeACzr*1>(I{tXzM`+FMQO#pc08jj-spuD@t-h z0^l;&?RDND#U$cmY}~gG2Y-j+)J1!a4b|*+Ne_I^Mb~7nv`r@yUxbZAt9EDXb zqGa-914u2h0og85=3*-66`S^9&m>109PkR}95#a<%__p0wZMrVl(*it5b7UqJkmLYQco)n|^ zq6im#p)mQuJzC;w3jZzt0U>>c=`rLvU16WevrtKf<;D@j{*)iI9gf$2e4>HwmFMB- zY1zcXtqmW>2s3l@)37#T8@4>U33Fv<^YghJfcERBXmrK`9d1>kQcxR2O+Ew`9qUl< z_z64|)`sD^RcPtI28JWkaK=hcFu0!!uTR9%u@hq`m+L@gaPQ|Cr5sdpe*$;5e&kmx z`tZWmEaF{f(qWa48{9}Q;2&+N$46CM_mCfmewU@$#O@C$|7;ySGVlr5)!VV)nFPD2 z!4_MLL|7^FC%oBGgK+giJu2bjoqz>eYvv5zA z7RieULUS)UEIHhUlf^~Zi_Q`xH+(M)cIEP48Scf+j$HRr*BL@KzkzRm13*GU5hv~! zV41}dtf5L9WV1Om)1(n&wr&HBgtz!qWfQEm$%G&C*K;0NHTaGfVq|C$GJ( zvuXLo06ZZ31}46f=YlU|HU7u0BXC}?K&!CWU-4}N8oN=qn zBM9BI3)^WHS$1gv&gH*`sQq`ax>EvQSO%jTw}46E=O>IZ6=Dyii?9)Luh4IEGVYry z#7ww#mKKb)<8Pb2blLP85WOkP^6g*3#nM$M@;ED9rD_25D=Ubo#ZTPPs6t-+`$wln z+{7~hk6_BfFx;!)1Ny69(jRl%NovX}6!z?ZNRCT!d*})OXV7z)w@eYHDkh*pYYs*j zwL|fsIQ#9PKRT!uXv-Arg=4cFK#a8UoQ(}2udWXL62HOM^G$>|h3i#!B*Bya!g1TI zLBfxFgtPOcn38ZWPJoV}hal$WEkxo6#VkgQF;d$~t zGGM+LYfHXRso|sPxhA%dnPmoc$+?g-EXsUU90#qe64>3=f!>|(F#e>dy3n$!*AB?2h8>Ju~+eCBQD zvf+ajZd7XrfxLH|E`3RW*56Tq>VWIGdVBz*s&B!qs#q-SxWHFaX`?MiYv3RGL)TZk zbGfw#upvMLqqD<7rtu+cw2r01qitaSyNLLB*C6k20A4b8!^_t*p>NXMo z+k=JpVxrA453Dl&pzN)4XcXtoSFaVIYD2qFFd`m4+W0~H(@xmwz8nrnxB(XQL4{-Wo2}&}Xp?K>|P!h<1N7mQCT{eK?hC(b*Oh>AwOU}9# zlN5$RM#wP?iuwS5SC>HK>-W?wAp`b?tiufl`p{tcZochB6Oc6B4$dLA(0oXO`Hrgi zpT!G0oMi*!DI{`g)>N;+9KTmxp zJOSl%ksvf<3P{LJ#K0f_Xd3Z{!E^dBD!&J!o;O2?>Hzfm+=hG|b13WF4NkQMSoQZU z8ehzT%YxDP*ZLB1O*19sSGYd@1QAB3N050Z$9XAw6|mj83g2e^#Jdwmh*fnfjB!2m zzdsw%%fb_+-adqa@>;rJx-@hA=4P;!K95&Cevs&K5wNXFL&L+aL~HF|qFl0=kUJ_E z8afra<({GX1ut#i-NC$ICpF&dOLI`yAsIvG4a1HNC(<^lspBlK1RQu2i^-cVO)*3g^YRp=msD6~|Z`MWs>-*~jrQb*bJ{*@A}jIs>GKUsw#^5R%x zU5V`l)-We|AK&z%G~*;zNH;9bq`q}=kMxha4%k>qvom_La1}m*$~WHV)LI9B0&{8F4hhz{c@|Ezkzuaxdy9Yc zTH&GmFzyrl1*!aOTqxWQCms%?DmUXEsSsqBZkA#-41eOdmkDI$8juGcW4ONAacbph zhg9hu-BA3Det+N(*|P?SrqdXe6t*X+f`>5if)#8Xyal=!li>HmF4AY7NE+>iP;=QptS1TOS8oAj{HtmUqdqc+b&>|0v-Ts}Z<&2}V5XVX5vfw!?9caUKl{K@1 z$tTXw-0L9BEDe@qA4iC@KeH&WKvIBtI9HMh`!+yl9O}gAt@d>F!*}rNPYB8>1;Cq% zU|cs3QPkTO!giMOt{iVdAKNTgw9gyLXFMSOyE)ysw~cS8FaytyRC7Fmmv}X^4ci*C zv2m$3gh^+^m|F_eo*Bd9x>AaP6>zZC7#~`c!-pncj@j`T7jnM)xg)|%Wx!R~pXCm6 zmBvJ>dKkjZed&h74LE6bJUZ43bB@YGTzs@e42@gtUs36ZztiY z6h>w3IDO_H*9&k3=NSn^reY0vjaC*iJ$&px92M?7gXVWs}y0+XWT=FZZ~b)lvJR12XSM(8#))aqQWgFd@(H* zcg(zpE$Y!ApBIA-dV*}=3kUcl9gS-~Q20RnQLr!umMn~h86K~wgxgmT(k1BjA{|e< zA3@jteUKve4Ayd5_Ug;_kZA$XW%mSCCJDnr(dFQ~M3hmA6lW?jig1>@FBO!2O68mt z@Uy{ouu6LZ*-bBT*IGd);MQ^U?5(5I_Kx6Ti;EcP>Og`n_H&wPAkO@K1dJc|lDDsu zVdlmyAYH74W@$yxS(gl%T@U$7=dFP+!d;-yX#@i5EG8K^6Ej8bH!5j+djE7W7+EgJ z)=nQM*Om*gIx+1Sd!Y%BiKNiY{Z`2S4oAzg3+ZhIOL%^vh;9fBfyA*G8aF=;hW{j> zjFufLN^-NhY#J;$IhE68C7CS0`TURDv%p5P3r&l6z$sNLIGyv4mSw(#!$nT0(CdvF z!gC-zRTMksX2a1*2l26i7U0SIB)k0;20!bABZEn>^?4)wR=q`Y&vc=s{Sf$GF2j(z z&7itYl5xKJnwZoDYpHH(MaeR0)~@UvIj)OvU#|zZtM7pC+tXoUV?}z>#U|J^RSu`r zsgmR8__$!L3~MzY%3NLFjkfw;sPmD_zgNAW^%d{Xp&}N~M(&2uKsTbh+6|7I_`@ne zT`WFUOEdn75JR0B=&8L+U*3!2ho_BW+4NJShSQ0pd2c~wsEQX-G72W5_9(FKIkw!s z!z<1cfwFip_Dhrn>dz<#QNJ>@aq9@gGm#5IbS7MD{1W?n7Ec>m&}nQ8tVy^ zp6TwEVvNm%`}pa*AdCmyhQDu$&~}0(bBNcBA0=0Ty@V{)o7zmSU-=JX7;V&6Be-_m zapE|+nCPz@fWIA0xGY+LF%|BhS_X1(-SDh-UF}4UrF4wz2geXA#u842iebRRG4j@N zKPX9Vgqtfr^L7OLgZ?`aChfF4zS82dxryA&AodQNb{D|Gp3^9r{~UiZqnN0}bt5fT z5w|8Tqt^TYMGkbrkZKrickRPY1xaT4ZVxbXx8Zx$chc2ne6l7=gxTX)iq|A-;83^| z?iarbik`P%$aW`=H4c&cvLpCS{z3ZubrQoeM=>b?(r=FixuKbq<=ij|5`z9u1K?H+g;$t)aO*kr;#VR@erzv`NG*}(Xc92 zg6)%Op?blbXCI}8*IODeO`YRg=03#ezsBg;R|c69MSTD6_b_{xFKy}y22SkK7XE8O zHGMaMkkkvbS@;+fQtQ!7W)DhkUxD-giZB`{Mse5U{kYxDinKB>py-D^Gan6HM`mu8()mO`F&8OHUL(+g)>(6FHjrA8uAY~=$ySMw4wR>g9wLRxvJ z+RX58iywqe5@p^f?Su$BRX8D#2;y_|!7sQ9+Wf?sIe9tQFm{eyBFVs;%w;fttD=oh zCBFHXrtK}~hvM>~m`3hH+$nReS7}FIc~8XtQajYhyhN9psnP;NVH_Hwa55{2>?VBp zt0D!w%^dx?)1 zch{rMFL&6SDayPscnY5V%V5*>02=+}2OJK5jgmqC(V;dO*3IEAIo2{xe?RcY-5YX1 z?r|@eZ|A#42OqMui}H{OMc=RDQPD7-B(z!WG&gH z@f_&RJJjmb1K4d|fjXzcV7>MllK!?4?yft8&9yvC*%S@BbA)-v94Fwy+u@+#@&rfZ zn((sVCush&pR}q6gQ?zZblp-(^bQrm5!Xzfmu4LrI>f_52|lPAt%JIs4X|5#Ax}wh z2ma);k`nrZbkvPIr+rM|fo&=IYUjjd$pW!O*_bW zKl6?9xx9isXohYA@#DhG*$>?iagX~gu(ZM>tCQe-zaWj}yf@1wtFSd zQr<*%mnmc(i6)!YaO_vfCTgRR%Rl5L2RV+X;nlMUB)#=`v>+UwKe*3*V-Cbg?J>B% z*Tqh+5?F738%hMVxlG$uSn_l>ewkE&P1Yx2!%{=+N%#guM}456MvVEfK9&}*;PU)= z@o?->FAQt<(!J%gp?qZRwO zqinr3cvz~E7sUa%%E1ZcKJ;VDVh^5tj2QDy>9-9PI;nw8`KgfE7Kg!8Ovx$p9+LMY zn|Ni)urG5e&_hfUEPWHnsrHRn7qSY+@7v?~PXVCp6bdtLWuRT-Mfln2hzm;fASl%a z?^`v4q3;K{{wo=G{4L;*)%bu&fGF-GY4DBf}Ccxlv<- z%^@hcB*iL~i!s(=58zZ-1pb|vh-1^U;o((sRc77>3>hZvhUN5({S)ZSe1n!o z>u_DiHZZW=3a#tnXnLD2B*nca{2kqR+CK+s?ifJ6)je1c`;vFwhK0Z-@8FPx7z*yJ z!lI9TPzmdC);x|8u`C$_MqH3Fug6b;?p*&~idlAi5}mJdkSJL#p?3=h@Z`ncaM<87 z`J+<}W6M9lbf<0(miY|oQ#e-BNpUuL<|yveSVlZJUYg}=j&Z3M0LK;_#|4L%V!NFH zSZcZx@kgy_JXVP-`(~p+*-;wD`S8UUkMrWUfa8oV95WJSCHJY}OUF-;+xZgyoDs!T zl|*vyvpdbK~Wh%3}>UnjIG-9kl?d=&I7!@arUFp~J0#x6*~?3w_w zqNGR5_Si%?^}`?C65Z2p$=-oVeF|z9m(#mX?!b~0M*y3}Sk15_Ap9i(L!wsU%>*&l zN=6K;IUVtbNEbfUiNVB=anxfS$Cp)Z#Qzptqans0u;A=d5ES4tQS&9}foGbawrc<^ zHkN?lBPD2m{|u_%M!~=`Kk{P034WZPigR2{zZ%K6Z^ z`!TAneTuCLuZhu>MsVqOAaj*P*g}tq__@Rw^Lx{v_`mz$sU^nfX*ObnKt0ZqQKxQp zZ{hgSB${pd8g|M$@VvVh;fhP$)IuyA?L+R6^=1Chx#%p{e{|4VwIdVv+{^>MUM20Q zv&1uR%i+^(Z}9oe<>!VQV7!&fThDNVCX0#S=KLQ~T*7%SZXs0i(g^o`en;=L2GXPN zKzKeY$eg|h=uq28_g%dMbyrkyzWUj@NP) z-~^ooSo`q;&RXGy#*Wf#(Y9xp@FEd4rU)>7$DiZa&F3gMXBgwZeFM3y*HClF7tZh8 zgw;P0XMQWCZSP}Aq_+$B&+A9kH78(qp&jfMPQ|T@Wx@RCY>tKg4X(Yf#(Tr#*yL3~ zEUL_L%R(27GAQ8Z^i@H0ND7^uC(P`6x}P+^D!^v@HqevR2XP@8To)C}t2y-vrG}(g zmkENbKG&t0gEvq|SB#bLW{KI<&v@p#AUiF)6Zf9A!Jn4_rb+F@Jw~C}<9z^@z7K)5 zJ5sS=S_BV_o8b4xLi7l6!vn(gP;;aL@^2=9`_7%XbNhVgoUk2Fbln3}J3G*gc0`XBMST7vTi{BTdheK<8e7LVoK z#M$cx(0NG&yfcXb(TStP`i2NAb|W4qx#@vTTpU_|>ZOXIk?6Erm|eO;g2|Zog3P{T z$3HATfb3aYn0sH4y}e})IDZMpYbG3TG$F|MupNvQ5` zZPSf$d~I1C9xw@jyGi%(r0ocVGD`9w!nu`{=m6j3~FA5XZOaS z-0449aX6PMEa}Hm?tN9n>5v(45#=ubh0Bmlgo}6>dhR1EcZ|d@$pa8z7fyZ%=Tet> ze?c`XjBH=94qeZ_!Bh!tbhYlr7d8U?Z@Z&VF!C|jSm)t9;}mkWs~X>rSz_^4A=Y}R z3SQ`Ooc_kWFrKmrZItrp`}NE4V#zd!e4GQC9}hyJZWi67VuIaALhwl3B2b(dir)kM zur5xF`P_ONb0BZ|7Jm=`05ildd7zN-MI)aSsCa|I}Sk%ghsWq9A$7b8O`Ov-j4 z=Qb|{FS{??yXi*wPFIIFUCX z&XyKSu)iJ`Kz4gS>{VWYb*)jPp_lWVcT7dT!!Sg@8RF8wMdZ)YKBNr}SeW$&PjQ{K zzeSDOFE)lEadn2(ul-TRwF)2C41&?^6f*R46!OCE(R>kKoMcx4)AJ=+ZLLNk7QF(S zx9%lzYL~!u&9?N%rD9BpFE_JFQE-qSgzyDCd}r~G8dY2buag6Gy;3uMwwzDRUR@-g zJ`w8v-!eGM@d{10B3!szhCK};jJbIj^?G|75A5lLWCb5wCh;C!WgelF=McC$I^n$Z zzwmt703J{C!KBx*T&Fl0%;^Nwk2p=`X-sUAHK0k;4FLfB!~K`U$b0rzhg! zz`-<^Cvu>@Hvk&16~p_L8JIgWs$kGa>a^3_2W1K($5zX5!oms7`FaXUn*bT+L~I&f5oc#*-IV z`1L2sb3CDs%9Z%1Q5;=9Nw6y~9fd_3e*#3$!>k*caLjEp@AMoKlvT}uB{lw3%|I1H zmD5p4TAH!+aKZEwLd>{sB+<^&p?M<@G4Rv}3_LR(;x7CLGTXo4w^d=}V45(ToKuB4 zTV0?jMu5F)`5)J{<`@#`xo9uv56^5I@kF9Lbv-pBJ^n!eZ^oV)*!I~CtmQsKpUFWy zvD!3!R;CMo(uXqmlzANWDpm19%LvBRR?tmCN8!#LQ@G+XhL>kcGBG(4=(f#^F4ukx zAI4vUw8veXx~&iEf;%~G-)%T!BuX!G?~(s7;?2N7dVBi_=ClN&_r)_f{xubkZ0N?o z@LA;3bXyuGCjvs;j)*RP0{&gv1p3+sVA~_^f@k6*` zjMH$nuA;z9R~%VT0<}w?;O~W}AvfkBYE2yD?N1Hn&+L1JQV;E*{rW$cnv@M&b7$j$ zs|MgFu^9DZ1zCk+M`}?{Ay#!ez8-eKEgO%(CrtsS<3>5x#VFz!FUGk0Gqz6rvsaz|*N( zd(lHt#-dJ~NwL_>D+%^S?L$l9tKV-}oBA9_wlt6loE~|pxeW_m&cM0W0<77tT)1DRURx2@+m(vqbReGp;aZ-(qJ zjK}v2^5D`t9b#WGMuRRTX#d_T#N0nwheW2CT7^5}wZ>*V+9iWbmmwaO{)TS#HE=pO z5{UE-{PvQPDSm;lI7}UNXS{=!J2j{-d6@i^jK_-NXn3F@$fz3TqE!Ab6ims5lL0$9 zPSrk;7nNpWs<{0jUlXC^dNsUM&Lf*I#-mboA%4y50nuv+>cKsPvAVB6}ZE( zj(1@lg5dr*eohLsb1w7o!b>1W| z(aWZjaO)8>Jh{4!PRTt^-yi3?J)K`6`q?*3Z0m<>gLa@P5{1_~xcu;kI4Wbz@RZ6bu}I~os8GjoT)0;zi3Q%#K}*`LE7~l9ZE9c z`eQOUBS@2oul+~&n{d6bhFKuJ!H%Au;0wlAd*RvZA6zkx(@erDKt5&wd&GS(^2{$- z@^K6Tjt|qx`5bE@c@-FV^*~i_FABml=sTZ6QlEx^!aFHu@8+po?s*wX?R`Z&h&ZM% za-+36u5=`A2(%VG84L4Zvr=*1+sXvcD!vx=a+9!aeFy&C zYD|7xe!@b&FhhHuX)A}6!|DB=aAd9%E;=n!~{!Q4H5|0gfHk+^{Fo%IF0WgsJ z76Z14X`_J`@>hwo(Ok#0!b}}3JG*H|Ocq^j>I%!E^Wd7LGMyc|gAR;Iv&o7gOw#l9 z6c0Z^mF5Ov>1;t}*Nq@ZT@*x0{}@`n>%*?E#<-7diyC+5JZx(OitbrRMNMPS{Q90+%K zPcJ$T($znjpd)@C6c0Hv-6j$?w1pZ5+pIBhW%+j9kc zWHo?RJSB?BcBCjS9%id)LjX4)*L&ncjGF^$SEi!*{a22KW6R;DhzF@VXl0EOMVxm>EeGS*HwPWCB>;0_s8bga6Dve|oh_qQg3KT*FW6D?S(l^x-|~auV)To=f*0uvpLPX zt??GfQ9Gz^9mS}Klc3xy$k@G$B$el!;nu@N5HTXc+&SxnH`ZN=6u5W*XUSkfJILR4x-S^`_6{p#My+^K=_0gY? zPN1hYr&CILa~_91*sBF#`A>bo$DA*Jtc|*FMkWp zbYBmJ8=Rqjj|BTWr-?o@e?sKdE75j573H4(fO3Oq2);foJ#h0$Wrd)Bw2aQzY{BmwL;sCk5b7}Z(0XkfO8zIwPEAk2+=xE2+gob0UcnuOy$(NYVByKM=MogimMhV5}ocPORh@}C{&?&~I_b@nV%&c!;9wDV*E{wMQU;RI)|q2a%PzsH zuhF<)QIq)VZYC#a~NMq}VBEH{qt+@1Qy3BHo!B3{v|Rkus0>crf=DZuj2~ ziK{=+{jHx5po;~E4`g$q$vj5~K{i?hZyV~|)h6UrYsK|+`r+z#T{YSTTq{UJh_BGm*d z)_RgJQu8o4XdImD&Y;0kE!=c?2fDd`gQOMR5Lj1$H?MhP$Zjdd(eMf6m2kFThJ2qfBXL7~+^XGubhbK9K& z@Cnz)ZpRk44&WQThd<4nM>!`FJP)h!`dl+%n(zpEH3Y%ktt}wBwSfrucOrg&1$+Np z06DKCF!zHYY^e6fQ;NSpXOugKMY^fVof%}~$#g0i$MMo-1NkF67vdJVTG0Hk6$D=z zz?D}b?5g6+kUZK-vwT&#d=~|&bQiqa*8$EAEg1gMiF_?@qKZemVco$}{C6u0l^)nY za>+v8q6fmvyIK|4n#HG+rB(1&MFcM240nN;O#bdI<+etE}Mqoxz=Xb z<5`E>M0=p^%VZMybO07|&s$`u4^Q8;Bcrot!ae8Zuw=yzYdzHQ zzK9iKfnZ*7234>8;?A*nX#3-i_uoe0;-O?>{=Af`-w48$n*(s;?W0(+m+P|zZ-7mn z?a1f!_tDvk^ypU)GKG8WKe@R?{kQ>A=#R&wlo`9CDs@Dtqpei@~>vv%*2 z`LxUD5h`mY;lw!?@u0|doU=}xy|LJZ|KW-njz?r*TvrYTRldep^N%F&O(^ei>Pyd2r#8*bex%AC@>0l8UXkZs|H0oQ!7E>|CRI%p6N`;+))K$y)8zYoUSG;nLc zGf?pz#{`3v^gGjyNCnq%t+?`w-anqo>swfk7CQ>5{BvRUzQGhYm+}zx9enB63$M}N zZ#^!3C5?x!t)S{l(@|kHx5p^4ifFl3lhk5S_RbkSICJ|6ugxk6Iy^_f@8Nac-R*8L zFiRMiw~-*d@gDxpSwN0{@5Zf9w7}SzV;mn9WL6#$VK%&(44LoJu~2aa?i5eMEsHwf zlDH=vvdHE34sageqm!`MFcu{R{a|gzaXR^T6^XJ~irey7zPq9-DOKv^xI-)HY9+4U zY3dE_2c=mRAwec_{}?~Qn#B#%e&gY*c{t|RN7l%Gfm`)su-9+^t-SYP@(C$6I5if` zyh3=FcE5sfzYy^D3PQasTTo4l$EC}B$dcCtEQI3l(sz#kmsk$K&Hjr!M97gXJnWah z1NoOFasKp9{52@aT27k;*DsIpLl2ih73YJVx!;2~i#A}VPy@bNA__TCW&F~x>u9s! zH+L*Vf<@U>*!~~`+gBVzgPkV0Vxc5kC_Tn``hVa6`~y*&MYP~qJ3c*<52~4Bj4zMd z-?>+m6~_>=XU1-nJh_=%ajqdo{-YpY?u!c-6%#f|5B+WGacp)Fl)c=DoY}{|(3W5` zHOl>F6_odR{MLB36p9Zd>865ZTIZk)Wp?9;DNw`cH*MXS;E{CPqjH@EdvL*p$ zkG(jP*wao`o%-=Yb{w_eegc}qE1~~J8-%P0fh#KIAbsx|jy{pZW9yfKe8Eq6pmY_( zL`0Z~!_%>kFTmKDPJ#VK$N4?EQaIcZ4Nv)l;B6(%7%b8SnPZ$^H<%A#ybtbqKPC;F zf49Se)7g~T@SR^h1pNu28tb;8$D=E#cQ^&ZxsG&QMiT$!lt6U&eGPl%8)0vSH;Mj` z&gDKz;Y!6da`Lh0F(omw!F6JnDh~~te-xcnKowmVhUr$YFh2te0}xxzUZaAjUDT>_FKpeP}wG%6|l`@iJoTySR2%wFq#pJ!qnsta{N`29SLZ*;?t^gXi0ON_bj zbq2a*{zC7=Uf5&nhi-eTP}Jiac&smiiuc{Cv#;D4KmeUl}?BvM}LSF-Vv`VV4RrD5h`?R%(hfODFrH z?1AUt^WRo{WV8?$+EUl2gDh0&i!!H01-TPb9Wf=i-F24 z4`O}K532*}vEAhX7^MBd+M8zRIxP^5Q1_hFbf_XA|l;BfPx5ZC>d?&PkG!QM(i6f>9v zi@&MDRAm`Bs`DQdpHf7P?Iv*QMH{}|*^lcFD1)JwE89HbA?}p42d{29ynU7cKUUp76Yu z_6(gn@Iihgl9n(iJ5>jT3BruGo;S!?Ht@G!TSuLMv^ObQiEXv>@V9>p2IRhATb{hf z=Wb~bancC>SV%D+(wEX6Y&MujRl$axCFp57297l%c=Tcfn!otUL*tRkie40OCF;#g!{__N! z=2e4ZzX12oxXXj~6j)cY^YiJiMLjh$DpZ1l=R? z%##$X{iIDSnnamf1p$!jVt^)e7vI)X03R0%GE1L&!%v#gYy6Q-1{XrDzS@SeSpKDi&t@XvxD0FjQ#Ln&sHp6Zb}?GT_{&a2YKJ4cs0u#ajL8+ z(;y?lrIzl5Eh}31;hSoBNn&(1w_!f$l^DYFygmFvpCBhI+O_n)U(s@3becYvX)=@p~7rdeM%Rp#%bQMVQU;$*AFW z4n6*h!W46JO#G5Y{b1|hihB!0v(ntwx<<^_Sj02#@WUsztKg1C4D2io1Jy7gCO6Fi zG?eM?E^r!o_wyY%%H%rUY|M|g{7J2 zC%$1|_^%W;f%;gxTyY>E5I>x?LNQ}6_<8p<>#$Oa`z1uP$)kZ7Q45a+XLauZJr-^j$CY7^#)KjMujZ zfb%&oEXWXH#I2}@T{sE_)y25UgW{Z#)ex4-1pynJ3I4CDQurG6G|Mf-995Br;<>lA`TZ_ZUh1K6{O48mY5C2Zt9zO9c~rO02zB* zUZp`$%B3f_*{Zn5sK0$ZD7D?hDHiKV?#n;usB{6GPsHNdXF9x%+e*-y?g>A>*anC4 z?a7RJaqM03A*?c5q$aTX4ft;jgV|C3uuV#v$P|=8qx?T|6fZ+fku4PeGQ}0uhb?Hg#tFnuNicH;_rtzldf2)&2c6|6K*o^} zymvYXQ?9Gge`+^$&Jf}rHYbvq-Um|ER_OBiI;FVyS}|_?Cd1XLw6GiXKVrqgm+0@3 zg@4tSvC+dd;Nv%kRUW<$X5R#v5OY7&q+R3bGhRcvyA|tkCkFNA?Sw1Ti|f7iD_VNf z&X>D4x^Mi$9#YH$b7?1ZyqqS0H7< zrtJGnd2^jGKTL#qT*Km-Kecct@f%87yRq%@RhZGF1iz?1^>*JyJfQr6ymGh=vTea| zn(0_8%EU?6H!FV68E2>?yu%xD0`4ehUs3#Aux#?SZ>MRP#}cq z5AQr znt5pMco0k0n6S@Qaq#K+5V(J=#gesr_&U3d=F@Uf^OYF$VnZ2OI?}{%pA|*(2SVKT z={@W!^9(W`V}c%(Q~&+TG069m;4&E676a^TCdD&_qBU$W0{b=^B;A?ynYls%p zY5@fRXiFGewivLQm9c!qz-ri+QwyJa^T9X%6C`V_q0jPmei`k!*c<3!L7F6aM|a#_ z_E|8X975+;(o7z`M>UVmQfoP)fp2a}F{izfv9ll>3gZvr?TJCGx1ATRR*QoJq4zMf zu$A5QE{)B0`UVFC?($WCKS%zYQg;902t2gf1&+B1v$_wcr&(T<`+P41tBPaMr`HE{ z*G;ATM1P#XO~vsk{?ILV0p<)nhv;1bT)fj4p7d82@@e5@aQQv}S$AW3(o?OW_~R%x z?OTI9-#ntRHJ8X(mE*#0duYqQi|P@<*sr}BcaBp2zWPhpuvV1YL>)1>GzlKPTm+hn zd+=0CIo%BuL)d9y=D}zbI3(-`1DQlnSr&{}Vy}Qi;VAK$JPehyP2s)HMAUwq!Zvt_ za|zM{oVIZs^=c$z<%P+t=xPB*Qfrj51FFFL&0;LQ=7GsyH{x;Sdj3sO1*o(hWcNor z0{J<%aK%#*y*D{SL*Y#DKerE*mK}xH3k`A6hA}v=V+Y-HdWo!948LZ0I(1%Bm+h?- zI1(BN3AEoOYDc>?8wPRqI}6^E=e@Y!#1Wcw)`C?;0X92`affV%*ef%|!CX@XS0!JA z4ud|-AKgjJZkfVS;Sp5teFb`+k0EP^2zPLW9A$#~;rpf#Qs7M8v=hQ$=YKocl9U5j zS|-JfEE3@c^X~H|zLaEUo|nceBmJmsMi~UxrI_jdcDTWDfZefpC6pdYK{CD$Lk@UXM~CLu2iZhREjaI!izaaoF#S_EsVyyJ?J}LpO_q_sl?>q)ANxIjx z?nIrk2Jp<@f`QYu`395QFuYN)C}bw$FlnAwdmXK zgi7K!F-&1QuYt58%&x%+t`aaI@hoM$@lY<~^xDEv;+5-G>#Tr*nc%E0n% z60mKl2h6`L$y66K!!c8D-ofAEkot5K{?Xm2e0BjZ-_8jG_NuWrCltSvZ^+LW8Y>L8jp1X5Pm5d+0h(f;;)+Abz5}G!^4Q zn0Y!Jj65Z{O-gYj-rxx3(Z_@8MoV^eZv}>Pi?MCIAD>-&4Vp>yIMm?HZ)|>m&S|tT zX%&Y>N{69OOq2;pih+t>>9BNW49IV^#wiZ<_`%y5&C_b}rGqdtpZ4iXnqyE-^(8bW z8iUYV6Sl+j00~NJ#*)uw_)*axqDE=<%F}|k_CX^o?c(^7(*>9Z&;FvZxdivbOps9z zq%4BUKm0z4K$s}k3au;G!Rm=0$m^VFoE#pH3+8rXU5Xzjo+=?liJG7%o`4(8rxHKQ z3lMy@A1|6c1gc#|naPrBXTtZw`Y&JLcvTE;qrE$x?N8(>t%k>i4ZO>;$Jw11;?+V7 zOktNQ?W$P%g2$gL7}qa{ZVqBh%O)58e#J(%LvI|LT_|5*p)J~q4B@l9USgtQ2x?;>EOU>OEC$utfx}e*KMR+owVC`%@@oOS6nBT~Mf%4d-5$!VQ(la6=(T(OFln zfj(caZXTEfe~0adW8kgoUyP5A2C)=dSbDq~K~d3?)8*x#K8H(av8d7~uL^Qw@fe}VgLP7sS};SkZP z2ji;Cv0&PAura57G=DntPN3(7?~$kwC67s~B*}RlLFNoSZ%X6~q482n&{^e*n`Il} zrE)asjg#U;Q-rx`d&2P3=mpF!6NUjd8@ylAhV3I(AUeDs+cw`p?n@!wt(Rby+u0!( zvK}SY%YvecGR$}~9gi-GgTMuk*a;hKp;^xt&!3ruax@Q8G-)3yfDial_sSnn2CNqi zfbF*i^mhaDmt12>p%#w1`hi4#3k1Iw0R3X>dcX03y(jXU)!OO=ldikM#%M>Fs(uxe zE1e*N&KIw2y2&FO9AM$i?dS^TWvAv=VCdj%erAC;j{k51ne^|d@YjtL-pK;Q@35o&m4Ed(JZ6dK-`!E0EJbGm2L|wn zelAMt(e7TkGF!tmV0Xk1)b1a_h~i-4vViVm9p1w8)*K-8T_|IcFn4ECI|#R?LZpTn z|JjsX?6#6$glCfv>@*?9H9&+px{bPN%8Srcaw7g1amHSwArNt%3?C#v)9d69UOFJc zG@bi_H%cquQfL`!i-fU?ZZA@x%XduLWKPCLHcBd!i8AlN{nhUNI?1hPQRN1pD)geH0HO|~z2MY5t@I!?n7CLuB^(rsk)rc%~ zsX7lw^cf503A2XTSJ`V@={ue6Xm4=cMu;=UEhDS5E@e{AUlu zWfF`JJ%AjcrFeN(5uAzZWxa(cFYDSL%*l=fRiOxUolysyXVmj06rylAI-XVccE|0H zgrU6L5L=RCV2+eM%@a$2`xDv&wzWVXm`-N8nqlj#B39%{FnjrRC2puKV`EqJLiE#I z{8)Sop3hjok~Vr~zGMxrzTQL2H}%9|X*_H(wIn7yC3t?Z3Es?8g4;Elv3~nb=zH)G zr`raCjz8%#NwD%)=RufZ884c|J9*(sxb(na0E#KTXlMSkM z#TjcaL787GPB4`xihU=@qwYK4vR)O-Z#^Wj5}W|9nk)Ty4o|vICI*icMaT! zQv~*sI~v3MZ9{!znY|c@AO61+<~c6(>IC7^8~F6U9GFpIg0({%!PUE)wc%dElc}G` zlwK7)^8PV~YFsDBDuppJLzzq|`+*%Ff5OJS#jH;86a2O30mR>0fsb|Guv2pA&i%$e zc+htRoV#X2N68MdH!21!4E;beZa@CF#T2R?KjYluP7>X-9Mb(SlN!D?ktq}B?EN#y zl5i=G-(f@`$qG-lPbAeVB)ONKV$5S$2AO8U+{oH@_$jHM{oHsLvX>e0)7g1A)8zws zurUS==Q0qvautl;9>CnPaj5#S02du|f!J_KT$z5DoY@-1JCRKp`9FHl<>Oztn{xr8 zHBE8B3=w`_RS(hs zNC0)^KH(+1%HW897A~?l3Qkqj4Vh!hnyPD}OUyw`NKU{Fm;^5qMVR(625O=e;m{Fh zR{F+v$onaclWc^T&yhy3OF@LYf43BhtsJmFEfF-58}Yv>ny7~(aBsU8PChG6?g~jV zmlW>7qU1Z!u&dlaM>cVW$4QEsMw9qX>E3p$6^!APPVIEXA`o!-bWN(U*=Oe_srKed6c$2;Cm zxgL~_)r87!4_y9$;|phAAkz9A94#8eeEmeM+hfHOIKd-Bb1#$AM}6?-(J+3;WI@j3 zk_2<>s5HX`Xn|g0B(6H9$(~mT$I;NwVH;OI~6{-7`?wb_fZO8Q;Zg|CCl3Og9L0Lmf zA$TAQt~Ut~vtL4-T#UU zkvrfGrm{~#vu7>nKiZ9MSLxYo{swYFRhk(Pwd3iz?ZuB*4}!v%Vtn<(2E^RYqW_^o z5VTv0nYlTPFSN#rZJn73Q*#CJ_r@^3_FY+Udm2c)jx>i?x)r}0hM;MP2Q0apgX3;p z^fyHlzq}G>R^8@fihe$di~oY~G!xjG*8#JyaO_*H7i@VZ;kS5i0`Z!?xTYZhW4se_ z-(@j2^2SLS@An=dJp4QXeu+?4b)h+msNRA(*i9K4CHUiO2R^<& z7ivbjNRII?sEQ!C$b+y^84L005o`RtQ-n*mYeBU=$6%-GQ9R!f3xOAAk`+x?KPqr3kbk8W z6%ID?f1Z~`K?7Ad))Itw-$!BXymaa&evgqNE_4T-f=4?OV3+?k>@&WL=Ou-?x}H{e zxX%R7w0C1~o)!+zyMkjfFIeWe2WEck0u#;&-^b=deZ@J{cX5E2&lz}qnhSg8j6al= z$)bGtciv`E>gPOs1r??Ua$myQVdw#MD3yrwq8py^C$!B2jfP$l)3E{UL;7IA=O!K! z48-LM&S*`a$HVIq+4Sg-XhXE{tgHKul?_9CX({8gK19)_cN zli62w4v>GsgB0Z5!nL+5p@njOb_mMhpsx(?+#(&&jjzTbdXM_(yg-E=k=PRz$KG>M zK`%)`#wn0bUU&xMmBo0obd_;rZ$~>}R`zwZVmo^% zrOybPombiW5=~$(7l=(KQc2et5k_{xDEVOa9K$t*II*AsTx@*>-w0d+&9i~<-^Ova z(Zn0B=SorsU>Tb%Q4b66<+HBw8zJZSP4do$!yj{Lx8L6ZS~?9O$G3<&Yrlg31Ala& zJD$q@>e!z&k+s<61ACGqN%Pbu*cmShsoYf3C|ia-Wu4I7_Z;JB?`)n<5bU6S7PDJY zP@mI+qR@*G_4gsYl6u6}N?`Sq1*l?U0CLs2m_D(Jt$UM6?%Bk`pO(GosJxH=Hp>&U zClNH>DMk_^%D_Ew3uyQHfJK8p1PA>E)2QD>{$T`Me_e`Or28>LMv}X`1ZTYfwwm3$*XQ!fru7$mLS_^J848<8C(JobK-v)G5#2eF=wN8gW-4o zTmUnY67eKQeQ{z7v89{(X`&y)h=K^q^S=zD%sV{)yBE)2rCIdx6iiPr!8xYUC?8t@ zMpAF#8O{A^@5#czCkbHvuAJmI{lSJG!(>9K{Hm zpU1)ep+wAFL;W3pl~^ZcB|Lg|8>Vmm11)8}p#I|=uH?7CSoAX}SO)NFRv^wA&SfvA zbmO0X4}_C3aG@ig9kY}G)w4Zpb@po%bNq{IRop?MFGw zlqTWF>BArgl<7ag72-8BAn9N-Y}@;rWzHAF>Bm9jNLxBO%@ZMOWgTE^&IbsXbsc`L z*#V(OF}%Hk`=NTuB=T?*AH7|h!A-Xt7amea37>yZl{N%@JwCX!;td8~E`ip8Rakh4 za(Sr-bWxiqqoNamx(+_@Id~>+N%{vNL$fhWQWG{bxS(P;-Dg)w;;Lp5P=-Or_Yg)NvY0=k3+C*e&HH{DVU*7JV!W#1vSuy%(Dxa~ z4U|!Era;H&kvX!-+5OMxZ!zWU(akwZ91dTCISWYqrpHX+DTH%>u13 ze;7Ct1<41KQL%ajyW~nb+}4SMio&mW;ly7kE2O{4fw2&|$P*8Ap8=y_9UPgIO`fTK zLPOgKQvUn_Y`p&=^^R8|PCK}l9jo1kk6u+!AEgsO%nMj4lZAqE9{lwWsH?b285%}K zxC70etX)Dff22f`8SYO-&yBt8Go5aB-EJXB2A#tXK{=I*h@upWVx?uBS z0nTtzF|Yof1+12I!f#jaV1knx&;Ls=81g2NHxu8mAMe`3%yVtv<|52AA6!qi%m0L6 zgN0Dq%ChytcOhc6D0fF=73gJqu-`qKA>P;*wRQ`_y1QSYQcIFea<0M)K0(B*B#-b5 zW>P0cG=zKR;5+^c_+c)|Z8olgSsRi`toJzRt4pwp`o)_1eLf>xzXup>V!e8QP9_$@1?ze|6bO)WaLy%tk)7W5P7x3?X zf^&RF=y_d|aow>29t65T-^*s~%vIy5p=#fthjC?R=)FnDeBxqe4 zj;^{7r<4O3Vp?Pe_45)I=A#$n-gC%E$YAw(8f!Bh1FeCsy-l+4F?tgyLZus^J*lTLY z3vM4L2SPusBNwlNp0)5{lSf$w80HLUzl^1)$m)jz#ziYcm{q9y^raTe>ZKVapnNLL7%J z%3oo~?G5kbn_wP=x#BN#Q7-Gt4yYdC;o+xM?6g4-Ot05u=Xq`fhtLG~37asYO?Nt(Dpu^QsX3I#wUvyYeaGI? z$%Xnae@MB_AtKrF7s51SpjbwWr`^1Z@HAwRKQ9J)=)E*;%LHzi=1wlmoQ9_<|7XdQ zc>L(hz`4m{AaTGFu1|4*WuxxoR#oCA+r&w%{iP0~UY$xR=v@C|9v{@!S3{t6AtX@A)(OD-d};?B-G{#4bNT0+{6XJz1S^wT;amAv%sZuxcRK?>>Od8)IXej# zQ+9>h=Q#dAo&a-w4ekG(*vu*z6vGAeBAi>e6a}p$nD#Ua*wz(_qvcU#>1ILD2_M4l zycZic6i70mM>25RE+d}m#|AX`$zaqnGYs*v;xQYp;i#ZFFWhGb{>*QJV?N%nfv<{< zv%2A7>jY$H@5RZv4B54#h2QAxgeNE`ByD{Lwg}~uZx4FezK=8uSEBcSe>q&AF%52i zkA(9lW`nzI0Y2Poji&kENI{g(PRFlnfD!H`(^7nt0dBL*B_Cn(a2i6LSo49d8hgu7wev>}m+5 z`N(VYG|0T`bl)H1-Pe#=k%;e0$8cZT56tg( z#i36>$iVMh-tZoZKsgk)e33z20(Wa^^pF32fKIvg}&8pP_iZglgdU= zHOv{BO9Yv}t8#F`c1zqwXEt_&PHdJ>KjcbJ#TC>E_tHcF_6mJv3#a75YZXL(yfC-$ zMK`o_hv2Yb0vP>zPab6kvo;AUcrRv2aJoq;7_9w@?KoKj{{mxCYh@3%(`TO{!?6!p z+I96Yp+1xW=z~6dW*@++BuFtq`(}aM$8z}Md<0Ay%F*o7Nf0v2$6vl*AnB_*p7i!Y zd5K%(jpj}q(6wSkN`xTd#ez-B*&{gg;W-o*+p){C*1^*0w%ET!kh$u=54+l4lI|t@ zS;M3}3$A6_5-%Ees@uoc36qp8|6shCzWF54be_@{nxx)?a5Py-M53XDvk2~6v za7Bn6#NU)=y1J|(CL#?_1of~6Nzr(Jy%1cX{Io#HTypPCFj-l*k39_C7}lJFa>?KD z+7xdL%$BAU{xUc}w8$my#jg9y)#47K!1(!#*uq)>~xVBzF?{RCWD@(;_^=e4ay-i+Bl_t*$`|#0Z ze{d@s=Q;mK;eRYjMyW9ueCAQZDvmB-e-_z6fBG=XJ;+Zjr|yPA>WF)B$rqZ8f3V;8 zYk|+*7F5hAg{>Wd7(G*%d46LJ7_Di-N1KXZ?BY*y#$6Ve_%UKL$qKa-hvBKeKXH7p z5I4=SB1>CK@Mq)zo~GW5VP^|qS3kvYCqXvUUzhAH2?X!A+W2fF2n-J!fxA};#+^=N zk1)lQ{~t^FpXqdORs)G=fG8y9k*uIayi@XCV5u@0g^o)zf!8nKAA@|@ax)T@^(1hC zNF?mD7GWMTmN2>EH&&dN;6g+j(Q3hEo|st!mVER^hs{3hBsZ27ee{Vxslu3q>TRcI z`zVyU><=4F0@O6L98unax*ko1n94GHSgg;X!c!^yES(N9$`gp;tUt(G_XdBqlwdfK z!UCmK-n!Uq$_llB(#dO~eoGH3^S@yl-v{a!BglsyfkVErc%-3#)w$}6t8Y=x;=5ot zNwX5uZ@S}wux6NW^9;EcW*?84wnDY28NqLii&mQ@K zl7ifn2gQ_&rRH_ksOidhk)JG@kks!B%u#gQlTc)UnQmw{xgh@wNb0 zSQy7ooL|OHv-`>COGbldqb6wy$c7yC%@{3r6YlHIV~w8L!H&VFAn~gR^A8^<58eF9 z<@t9&?7bs~j!}lw&o^YyxDnUCkt6>q0`Y}F6FcM4IAj{NK$dMC%t;$enX>IRwq8*J zdEFBb`I(;0=0>2Ex+ymI$MAV(f3g0M5&3U!Bp6)i#>;~W`0cDR@V~Z`cMCkR>X$j> zrWoM9;4<)^C&vBJPKT4?Z76rI9BdbjLkry#Z`Iqv=X_RT^x^@clVOVc)H6Wzzzg(h zPJ@t3@nEMZ3!*un$O;=DupAI%%I*xJiEkKtV4DzI=eL;{?b;2)!0H>8tV5aphXpU3B=(B%`CVf^X`j}&C?f}+I6Q(<%K(gH-3ob6|gU-1ZAaVaC z9{W&*UbE%+yIP3NERw(pI*kbC-MBri4zm<~HdP)R5X3gkqw@@;kC}k-B}bH=B+Pj2 zIgGa>_Yw6QT9`19$9)cQ$h<_l8b(Y}7j>^}o1q{l!w<`HD(32^UBuY<+O zbG+zfud!Cw2?WO^m@Q#HAwD1o5_hzd?2X^Sa^ZQHpnDb1#%ht2&fj=hyBSLJl2K_% zAIj98RMWPa$O}j`Ww}Kf5FHmuvmI|}M!5mh#uwm_Y-Z}73;uYjT9zJ zqdVRE_|`fampUxP`>Ax+U>ks8`^=CPbwQ&8$6$8A6;KqZL8Fie>H}?o7uzJc%f1nm z4UC^^4v(%r zL$AsU_|nLQRimBxKe(EADNLHV<J@AkU^Il%(CCgc{4%3n`0@re zvgQH?o>l;5W0w68l?e53Bw>(F-tdMQOpSyih^A6SH z?=uHsWtuUR&pOOs>l=>2G&d}ET8L5d7=j`94R~w`pMTD8FZnQm2b}{&cqt?rv~nvz zarbOkd8r6hF50sb6b4zXy*F`-Ln35&f8vS0K932RA?Uj%0OKn}nZp9M5S~lB99xCx z{Sbtk&aB7Ujx^N!7mPR8gn*tk-S>s+lB$W{DT7{&d-lhdERYa|qo(rgqZM<=Nw+T4 zd;Ni^cyM67$_Ab|Wa6)uC#3g~8J==!fh)b{P~^M8nA^cJ<^XiOivNo9Zv?E^qtB!oA8 zM*#&XSUqe5Stife_uxnt=x&151u@vQGnf5SGl1ps^jVlUhW5^tAU15y%YWm?%IO#5 z`iZ%a+pmXadvY)~+5!6>3NS+7v(P5N6xQzbz`5(4a5T6YwLk7fQ>#3ZDieTL(^BE~ z+$HFIw-rKeHey52O|Xw}#GNf;gu4IO03Ao%arhqmvZ&+h*O$SXr%n*Q<_r3`(!QEU z2dfb5i93WdSS^K1IQRD-5MD3Dou?eF-6y2DZ#Ta|=dGXcE}#>R`QO9X&?4Bf>o42# zvY7HQ4&pT5R{q%#QAR4$58u#dhSCX9=18+MWDA{#xXgVAuzo=eHeG5XqY@P`UM&mqe`x>y{chM0AP*5+tiX2jkJP>+^Rd;S8Wk5P zz?O3x!R*;u_QU=t=--iwH>=&*+6WQm-SlzZw4z+hu}UQRlgiM?LLScTGiMEsu0bi~ zd#sOL8QR+yVqaqfWQ3KVa-KRII{u0LEQ=uvgv#OB8gsm0Ckb7<3wXP9zu>FpTM!>v z1+xG7WBvTol;@>{np|A(uaMFAfp#J-V0fc9T4@|7RqKC& zNm?=O14Y7)Xm?P{^uVVlf3TfJzag#*;EQ`4Yg+r8{Vksd#*dyuJV?i8FKO4g&eA!jsv)4rKI%c?TmGqF1UP_REK=8M98XZ|Vy?GP4f0 zd_Tk5E*<3REx;OjNvXGe2rGkA+K zYd^xjPbVPzbT5X7#=uA6#c)~T6>3l)l&A1j_WJaGDBJB%8r-FshI?_u;Y1o715f^D z<4jl)jpUo9I43pwiA2Y&hP1Qo_^-Mez6J_$laAO!s$f4(_6%l|;_K9;>fW#;YhQv= zk01J%<$!OH5L~#p6L|D<nYlvYPO?>mZaed##=r4VyhuZF!Hor9gTDGNaN6j`^e18yc4;aTf^cGuoqXwo%> ztVvd&I%fxLqP?Mjv3kZt`V z-jIZ;5B*@0`d{+-WCko)J`G%ZC7C?)&+Lj#d{{b{`c?MRv;7xAPPNbieh0N;T_&Gh z^=KWeTx3Q2&7b){&ep|syw0~zcU;@ zZWUt66luO;z8F_|QVjh&2Kn>eo&=H_Y&8D@P>wNIj>DF~epa7W5PjbpqH^J7F!Bv#?e+@5(e-6` z$;kuM-@jm+M^%WCQZve1^}vz-cAOhgh^}u}1JJxR6Jm&Ww{#H21Q$GZx{$tq*g)NF zA#TcvPaxpDpOn*`?DS3HsW;!wp|ieMMETij*fdF+33jVO&$kI^>yydO|Ms5tr~0tz z$|sB%FvS0!Tfk4VF-%pU&vo6`CXA74G}%M1HP}zl=joGeA|V2TQ{u@$cGvRV&C&8xcjf9mNPwHe+0K5wx~T zasgvcV8$BC;H-H|N{&y)b8;VXm~wB4_ikeRs}9#jwSv)&nQXL;C5SF@fXFOU;Jt6+ zFS3}&ehn{zxVf%SDD4FuM)Wu03hl-3;CNq7ev8N8XYnuwz;?}U0%Q_C*{@>u?C%C?MJt|drXxfzSA$p9ZMe&275b?oSSknCz zK8iFzb&oGk>IgulcRHRfwg#OO0{l%~(L3)cZmm8IE9+lCO|%H~Ny%{8cpdh2IKgb5 z6z9x;fQOjxIk3!?tT^n6jn!o^8d8BXE2AN+Y!$R;KE%zZ&%wKLWpGSzf@!Hd;@@?O zt~oyl#+~^vv%HaI?%QFsxP~6OUBx;adT6sIk>-k?1>YCK+@$a!tXCHTjROXxZ>1B+ zm|mba_df$m>wNU_uB9U3A$Wr2w@FGC20iY_P*E>T?vfBVd%cCVV;==r&9%cvc2*Agjcv_-UY0T-3fD+#DTKYQrPkXiHZ0t z>`-l{LfTDmPu&3+shHSGeHr0pt&X;aRRYwXYfMeh(7o z?qS@Qkj)cb@dz)Lv8H7VgBv0jD?cIe~TTe02B;wskn8HSrX zyzpGDHVEg|;pARJh?QaUXPQT{L?t;hQDH%5$yD|{s)vm;ccZKD7bvP5f#t#VXt9mC zSmdLq^!HRet$mxXF4IZ1UF9HdhdbWcT!B~IMY!V~IxyQ@go`?#0SgzZLdVJlblJtd z@GYkWQy$9U{3G#XyI~2Pr1Knliel)Eu0Fh+#kg={?A&avfyV5AKe>B8#3WoHHzJfU zXX0x#|5pOfpEtwdIr(T@)=j7E69pf=pRizY0qmZ*nNIA8LlZ9p+z=Q5OM*4gUurJ- zH7gNMAMQZw>7Ar83G~SrooW^9T>Y`5#QAW@{DbBU^)AI z3QDZP!~478+k{5qHPDW;Rz{M`>WcW=L0V9jb05>*M8SNPMI3zShwB_VA!O5Y-YnJ0 z*fOynnoCk)Q)L>McWi_WZ~`WRu)uVl3BdSZaAKd$PToy$KCFdCyqgLEzSY=a`Hf!` z$e1&~GckKWhn#x$h(9fU6y8TSfwDpxi1kjS(aw{gNBjxcF&EXdkl&C$Ittz9pW#ri zFgI>b5wTv~4iCkyfqa~VVE8}gc#_;s&Wzf_;pEkDW?>iasm&0)tqXwTUM5HmNC=J< z|A!N920&Y502-BbqNwg;bjy854DPVkS?yXVwM@a7Y)Qe%=zeTWpGq3?yy?Xc!kiUj zacvKONWLg-C5~SC{1y58z*C^(6Hn$rd&{9m}rJqEAHiwe9fqESCI zgt<2Nl5xiau;6hO1pKRm(hp%&Wv31#JI<;aH39DX%%^XjJi)nZOh7!&j)Vpt zgR$;SYr4sENnLQZI4S^L9+=OozA)J&b$%5IZvl@lY!t zWmDQ|#7scP$vWV(dJ_IN%z~KVgBbFp7!T)lVdS$1ydU-DK&Jp~U7JYvuQ-cUPDfz# z#BpG0EY3MBxtH`-Eg!cGbz{_~I+!&5E6y7&fqj-8yaTKk6Ce2o!qhL&_+t{>W8GH% zmYh<&I#SO2<@_4Co>si_(+p<@iwX7w6yx&s8>qjZEu==c!#0OvBCPruURpHsds&t@ zH&Rkysosvxei5i?rHHCF?pR~-1bn+XY43|6-Zl3iXjL}F9Y%~T@}h%C*o5(qex3-q zEhUMIbbm63TmfnNRLXWE`BY}qA6xbtfrqg+SUV8h6kG-mJi}4JG9AOeMnK=%sqkOG zL3;4M9Ms%#pLazuxyzWe=9&6x(!)3RQ~@`}nK#c+Wv`(pU?@HX&yI?6 zp)$i{hMqrllJTJZuWX3ZaZ5}a;?ta?F?eaesNlfPU(5?^i<^dR;UuW?J5&)@$tJ^m zm&LHPu$A&z4#qBS2RKAXa+mEJK#J|Cw@k^w}X=6Pv&`qaDRhW#RP$ z&+)&VgXHYZ*NolO2xAw2fr~wt=$X_W6sT9JYvfeJ!jF9Locahv5B(uW7nMVI7h^50 zn}^$t($Q5(ovZL_ zi3{vs=nsqLrIIP#T~IV98iOSMCfGT?;jitB#|C!4h_xSuMn@S=@#JeL`2B>6rhH;v#jCKmpb!Nn zx+t`gr2qvgN*DiaEHO{6t%+|x%ov!?>=f?cYhOhACb5$~X$Qc?P*!xS$4*BmJ=o6(r zFdbixU+V8cYBhl`5vxHr71>VKiOA;t!~LnR!BnK4cRcO@y01Kh`s){wM$hs@;ua1U zt1RGcwT!?iDF+*;kHD8u%v5qS1q zHmH640}{^FAa(02CT6v4FkO|6cVDML7R^F~5&@M<=HJ!((hq=^Uy z`+md2_80hR%whgojYje|Dii{X7ollp2EU8VI3L~pMUEePg7f6R!rOhp^!s}UmNN{3 zb4I(M!1V^~r#?_Jz<51ofAECmPv|uf<%Wa#aD0{sXTG*y{o+o2xW8)*T$nrrxts)y zJ=#r|9MgmaQsSKAT#9AB`uu^KsTdX`#$|S}jO|Ke5OTdkM<&M8KQ-|roOw9g=6;2z z8Xt(nOg4wOPzg!dns~4A8#%bRjou2_gwp%o&_y=SuAQBOG}01tPB7(=hA%!9qwOC^(}|O4lwSc? zDoY@ycM%EKi~;L*d6?7M4@>o{@%fVkus={sYbqEEH@XHVmwaH^s$Mu=y^%3h7+3e8 ztRUt`D|3&X#|NlJtRJbO&*zWK*UNm=6&iTNCz5^+`lW5 zh-e33ymmN59STEJwT}36Slf%*iHP!uahs_U0CpRzyJ+2;-5X1x+K>V+Byyc8X|Lt@N-g}=-MA;cAX3G@ZGkgu6 z?z6)4lWk$w1RtJV_p87+&f20t3 z!WZyg=TjV7Zb{x9V4a6Ge7rIz3uSZ8!~D)7_%g#87S3c$qIJ#mQjRKb4HrVPAL*mq z)!X3vqY*NnS>e+nElgRL0CC@sq1=oX)}P~J^F0@o4_(E3xz!lGloH_3n^HO~9thvt zKcM(t5pHwAeiYNa4^a&w+>gl9SW7t2AvI1?n4 zn?UuN8I~QtiASE~!N1IZtyVAE|M)c80`_;Pai_+t%>c8&19O?im*!L{fc z$H%KnZr~~z7v6{YRXFj#W~#AY2W~iJ(OY$*X2ivo<-66KDKA0s$udLKGO4k>qpl$)B~70&%-C=GW_l zhOT8qPySU>q;)CZRJ8(P-@n34>0o~9Z8Hq`84pX6&2W_Qs-k8(1DD=`jl13Pan2`v zQL`E9j)l_F>l4wf`y+0@*^TLMB)Ho*2N>6~7RPU!OA_Y&Ldk8FaQwp?Dq6c8Kl?8Q zr%FeRFZaOkNuNOD&r^IilLMFHaM=21C0H+Z#dEwOo>#*?GR7bf=M^Vm>RbY!ZZf}F ztwqw-N$Q}MT}E_=lCW**M{x08gNyd~!RH9(E}O4T;>5cl{k|b=wqSgR?{e_Ci#n(4r<@o6 zn5eBd{pNf!^=u9n2!Ekbw}rXi6AQ5T%RO9f{ut^)m?QXW7@T)yz9xHnA_%;LiPgiX zo$v$h&R>8_T`XbD`vhoSB+KP{Y2$+9W)PPt#Yx{%#p_j*iI=n@7EVpVj>0NZ)cAz> zTD|~*ixwVsorpJAiE=C&%-rSWxS{_B$$5~4mH|`nn5+Ws{}h9+f!Elxa|{<}QiIb& zJP_ln_~*CD2)^p@c++OP5|zi@c-8tyCe#C$;Kg$Q-J%Grg$px6LuTR;|Pt# z)}Aj!B(5H&1=iAlpOTmzc!_*1bRpYgzoT?$g!*gCT!_6x@v4&^JuJzgN)Yo%)r$$# zd=lZ{j3h9*=|QBjhQPA$IbUhjKfdX>7OH(h>hfKUArq>~oGcHvj>LFY2g(0q&gR}Mn6#5YtGZlKMD zsnD!dKt9dXK?Bus^!;OTd}w6>zO%%*lvj3mrcg?dE2@QI5w}soNm3wT_!H$Pm(hT4 zhhfT$#rVkX8_l+Ohv)b?BzDqwymRX>N@tDIvs3Ruvfcvx{b(i3l=j3|!Yjxl{jYFe zVGCyM2qOJA#RThS9)|R*@)&aHGp?v(^HZEcPn--QPQp*YJG2K5XR!DEzW=aoysY4I zHk$#lv)u5#r*J*J0yZrU#kW%oVB(K%kXqRbvqlogN_q`WDZN7bt{^Ncw?X4PYbx*Z zmUqufiW|J+OS83fVO&lm7!?W$da7J#;fWq-4@(9cHY?(t(cvU06 zIb8(#!5&b1>J90AAWSv3e!{$I7hu=?BPe_;5KN!}H=Gye-tBAS$rd_L+halK7Q~0L zw^JcPF9nx2wen_O665#W|A1m`Tft|^B@|g6jnzxuf{pTWI)`N<;%&Z=mDARPmRKxE z^~k`D5n*oD{94fOwnZ*81Ii{XK`H(~QnyN_x_ z^&aftvwf z5A{-92XgE4VZu-yEuQcd#j`%4%>8RPqcRq2NDnDjeTL~%iiy|pQvMJ!cSnB?ZvRgg zcWh^Qj+@)aw)_I@uYONtJoK>c$aB1U;uy9wj*&y$C-wD9jOnp?UD&c+307&AlHli~ zyzA%r{Atp`@Q3&@4%$6jXFioC6tlc$gAqJuXX6ON*VJRU4JR%-K$nZ1XN-|h`l4N& zdp^?w8?=`2*PZeMHQ9D3@1BF9FQ@U>tc@Y#-+f`s@zW^s%n`ROa)P$lSh`&93^|&< z9o0RwVKmAaLG>4Psx9I&7tKezr9J*|C z0p7W257(uhpwI*jlsoVMM0D@r70kp`jmgCRf)p3yrOcljRQ(01`VmbVeWbbSGkXRp9R$Q3dt zg<^StA=WKcK->NKkg<9Ke{#_$Qd(>b^EbSu`Jv4Hvu`hC-o6D&0>-@FF#?zJKErIG zkJutM!n^qJDsR-U4Ttx-VWyfJW05A2C+fra=J)`teUyTIs!{^N&w+h6SKxt5U*Nd- zbu?-7CWZ@h$oMIvkWy}i;SC9}SNA?z>p9cqLvQioI&rRjW&#|Y)=}PpO6YsiHv5J$boGh4``84I>U};O5C9_<`+*Bj41( z>p!Lt`ZAGMwl9ht*ceCk&2(YAo;M!xV13NZx#%VJ4X&K52E49-YxPq2r|BxZ9;-|r zGv}npJ7bW~@W&uk8GLd33syuY69wyLy4mX~))bt9Ez9&V=kW!6EFz6vhK2CY;tV*| z9K(se39zl=8P+!}MUzC ziicRDE{7sTaraIKlY zdPFA0otwXt+5474nDA{he>DVWFCW9;-ICnC??L49f?!-BBv0i<*E0@KI=*Vw#&1)V zsl50LoIa$A`WNiUrmhZH|D+1P$0nlM8U>74vK`-8XQEay<5BxG(dwHzm=Tai6h5+f z@sRB`jlUe3|)q7l7XFL}>e-fd^zy(`_-ALB`5~SWV2u?IMj}u-+eb z?3NZ7bdSQhSCX(mMUiC9_a|Y`4R`}}>Zl$RLniOf-I1 z*{*X1XQK*Gu|AIfVz1%L1JTgg*MV62nWkq>z)9(YFc7JM_u{*8Zm$pY+V+!wzO|5@ z`j5E?9I$ERE0(%OW7hSBL^Wd`9x^=u^HSU}QO|&E-=>0&+vk(hhfB$1@ddd0bS*Zh zCGs~MHN*a^am4*;2)!J|a#DUBFhg35oA>G-uW7KIm}#hBL(xO*)_RAx{_3Fd2`Rz0 zyA^nfW9})PH!#n-220BtV6EH}xWM`f(f|FS<9~Rd7|Vc0mHdU@r)R;t?=fiOF%_4Y z*nr|T0$(e{1yhS!af$p>OlLc*&J!DnwwVZ8U$De7&tdqzRsfO9o*}aLPD09lbarJs zE(0ObF*L$k@2>{`v2*y%t~5yQ5D_%&&!C%Ht6?u=)|5YHXO6j**!rg)!b&gG&KfB` z#7w~SN(X+&78$|C+5|{rIU3IZd2;rnA?Pp5P|sA21SyXvIC5I{_ z)iJ+-s30OqimP@L!hC}+cz*Ob?DGEvJ|h>fvv&kUj70?s6>Gqw_yl7M_yE200G14g z!yRm;N&bZ`+A+rORwVu;nEjx-SaP(xQ{_yfjwmB$H2Urd8pfhk;Li^4wyw zaNcrJ!FpLYdS=~bSVKewMNNmm+$uH6@sSXbH(3tLIvD3|`T&2rMK>1g_(tOzGWmn& zufT};KH|A%6&_R@0G?kb>{SV)mj2^eNAe9U6_)0lJy_PuRRc@s>hNzjX`)`8Em-OK zGq<`0`t!0-$=e2}&rL$z3&praG6QdBtwu||5tKdqf;Xsk3bLIu(5CPv41V5*U0Etv zxPKUH7zcJ@Wg5=OOo9!Qvq>Qr4zapYFwW~X>@F_Cw>w->SLHSK-bx_ROC(S}NSr*2 zpNkp&?))RF-e{ZYi?onHKQLpwoEp^Xm`@;)qBw+O{@o|0Mgkc7WEOfgG(nBn z9pbZX4w13*hl26)^!vmWsP5$i3o_dIo3dZy!OD-=VD$n2y}gBrSK@G`mmBDv^8jhn zx(&~_RI=_jW9DEFcsD< zG7!LD{R((1$JkCH!h(Yi7EqSj#h>O>0+R78e|xnErY&+p=UWrdaE3KC(0Y$^XT8Ix zOC{AqT!(;24uH`EF~L{Kw|M<-D-}0O195|isG}z&xGb6oN47gcgw-&9p3;st%;kxK zS3L>Uct;QKkro_U=z-qy?NmWc7A~`W=NhvJa7$%Q?=8;QcQOVdxkRGqrwp%-rjo*| zd^GBC!M;`ra;U+Uc{|xmCQn4r`YH`g*GLGI6+Xfdb@tp~zsq5-3Y=Rro+|GC1Z&QF zfXvl-&|IEEA3kiQ+w(K<-`KIl+XLZ2+Yn}jO@@s@zaw6R7RXqSp=w!Vygi`m0@r`|WT5%wO%n;Eiw5_|F~C znveyrcGVD{Z*7pS&F1@)#5sD@m&B|Kg=0sykrRe}sH=I8ryy&9D-55bMX3;1@T`dR zUWoz?!D5K2ln{8I9>evnXBpktDfoW(by&(+O1|b>;hC~4H5j_WI>X{zbi#Kusve6k z#h${>Z>bRAehIcW_OL#YCU(Z`hdD+~@OA}b=^Yn^Z#%^V$9^JT@I@O*Pdrqcd6Ng5 z3qiO)7`sOTaqmbtV=Aphkwh8p%33|##(IGw%TrOEoq?vB`;%IcN_2DdWL?P5#P?1S ztlS?0)(1b5XOHJVy@VgW3>^d=gK2oph@+SH`@#I(6UkkxPBPY|2PIGN(KgBzt5!$j zmceznZ&4Z=td$THRRprW)dAee7$lQco}sg@W*{C~K+P(C^8bWi=GPsxhSQ53u=`ap zh&|3Aan)Toyju|-es009XVSoN;yd`Ti#f^adU44wKDiWQgW>~h4=lG0=R0!zbAGmX zU~wNz=ZkaWBUh2?)8*)Ks1zdn-+}e7J6NK8i_PABP%pm=`aJ5<_jM&sSGs|dB>qy3 zFG8^TZ#EruE8uzCWn$ITRpj0IB$Q)!{Z95h?radM8~K61!UOEQvLD4Q?$aG#PC_-; z0!h2#aHvoomWD3}oxm;FbX_0S*0tl^sj={zI zgti%Ip;G|UgNNWLdrvQxw82`PIQ-Sw3Rlak`TM<@`%pBVjE1B`?p+s}78(N*-3T`V zkC6=NO3Zmsjqxwj=xi}KPs*g2TPc@V_qY?iEaSN$R1L+aWPt~TnpxB0&nqbDBFX%Q;3A7|)QJ?Lo zJavmeW$7r~ulUTEbgvMjdg%;XPuw|81Ndv#;R}_iprD-u=G&yXtH+uMjSQz@j#Hsm z^aG83`GlGkOhwacA>n4?VTJsQsD(NS|<;o)nA1grF*nP)UN`XBV*7zzROsEu|k; zQM~{8LXxzlFqdUJ8+(}J)MoNFl=qmv=;=y~For^jPo{|vF zXiV8LAGItHP96@yFScDUJ>V_$-DpJ3HPtwM>L2{Wm66{+B{_xT2{?Mn1y4q$;>6F< zP*Wf*cz1g_{F?BdpRvCPzg`=U!Lu&FH*X;>(NiCr|AkXEn#@0Xk!6&59(Y##4b|Eu z!;=K&ys{Dk7wsYXh|7d3nKnE;?*Uz}AOpK{Q&2y3DK^ev`NyFnDBs*EMZ|>q`lN`t|{{R~^F5=ErcOcg2In;WzL;pHAGJbRnov0>{W9z?D&LnMv zR*<;h_N6XpEps7NUz(uGx{-udc;do@cu1P32Yi9JpocdVdR9b{(civF8y_0;gRKUM zL)dekQbaP$o^FHF)a!xwaXhY(TuP_=3kkN(^Q6b)U&F%C-_TR~2K~))VCi^0)J<)I zV6pQsWOEf}>juFm@fMIeuEhJy4gY|P z|BJw9J+fSdJwbDQA@0MEAyk~+$Q#`qhsO%*(cdzRbe&B`Zs&I*8+jWq%KgRV-h(i> zG>#rro6oaa;)wZ*{XE$dS+H+HD~e=$;)$xqw1>;XqvGvox`f@6!{@5gMk8!mAw`GS zuIBC~TX-?h37t=61p5Y9Z)?a9ciyq3TCB&So0Wv|z1B2+j}vi_KL>&xL!?^FgZ|vM z2_C6PaYB4PbHlvFAvP|e3! ztA8F+iyq;r=Th9l(eJQsLu2R^UPilRhJbU(%lQi zde7jkKKom)TMfgZ{+PB-0>tf2AaEJttcWLUZd#15A9vAnM}CqA@kQu!@d!Wp^*KCK zG8?=*r8%pzL>wwlWzYRvpt7nDMRs3=4f9rjM{5U!u9M*gT6+0=#A()`1)mho=ZHL`ky4;dNK&@I{(PA3x834M+SNi5?m^x z2iJ{?@a!)c^)c)6P+Tn)H#@1(ah5)){dYSQCyj#=2|2j)xSR?vSBG>vac-RVLP(vS zKsE{g!=>i~NK@J?I1v+o%~lQ|{x}16B(fe>I}7KoREGYomAnE!#ui|Gk#UC);H^bI zjE&|*Twe#Or!03z8&_}U0?;AKe3SU6yBt027-!qUk8)zn_paE(ZwyT)4@CakVC?f1w+7hIADt2Gp?ifqwkjlA zZ&k=Qhi7I#$-KF0oyCtUA27&~VaPd)n&PDqx(jW1#Fv2!|X)k?>ox=-P)W;ju_Rm7!jG8=rq zHPVxIiEM82iZ6KZ5&p4kV%6br6fiEsrYj$a3GL_ac<~b(lJg1(CsFlE{* ztUf2f=~Xcwxz(wqz~}u=Thtv9;93I?D+>Tje{e~YoKm_F!)w5579>-xRZYYKY99NvVH*MnyY~b z>s|hQo(I)3>FB1Ej!&JCPVxE*->+HVbxl4N)M>)4JNF^!r4$#nfE|LrKg4BoBkA^x zT2xXGr?bYUp-JUVun>wtF%L;H>xu*yT;IgFb8b*7oDcnzq`2>AFY+rp`-sOeaqis4 zP`c`ICr>2M1e$w<1rL5~gUMOKf`1nxz|gc6ZO$5i)o>8b`4LW+v24nfXxpb1_k_$irooW6@vr14*meh)$VK zJmo++yejt&J6_e{Kl51Pe!37=>j={o-g10TscHCfnH^LtP{iZ%hJ-BCL-XCeID1Y6 zMkuGD+rAFf8 zc`y{2jeYahv#jbU+KjcuABt)C=gALj-}8d+rBe&HuCBrvT5{aYNkW1ZCKsV`wTPfO ztd~r@a~opwz3~p~>R(v#nAS2r>#}L@dEP#UaPssgxX!5;TML&H-V%0)nEV<2MpJNx z>jT(kAtlJmYlKtGQ@MT2KlpIh5&kMWqE}P^oi1LM#54hDkaS;n}k zZ2_LPB?kV!40Ri1(0P3m^v*vCeIb72&4z`rcdj%S{KEh=oBt7y3-73ZqYaijZU@7A z4V2r@daCboNpuTi2v@Gcy3Q0F%Vrmcx23@@X9-xVUk9lM2Uw0P13QFAVdzf;cE|hT z{Y{B<%RdpplJ#=j$&G`!_l`WgKIBgpcC@0|dB#~-xETGmID+twI((G%gz@Y*LBQ<- zOqW)`mc$T}{G}Q7^B2PC%%|uj@WUM`4)FAH1}?k!=zq@0Dg}$Xw>)=^wo8R((o>tY}1d*b>AW9ln6CEcme`6&yiV$0-Sv=1t$pNv6J3C}6bq_lj)U<&cquJQ%@&JO} z-+*^3<5KIa!p*itFj71do0p8hzEekG_{J`LvZV~(r$m$A{SsWq-L3TEghE_u7K3l) z6nR(7yYZIyWz-oI2eD~$!NX0FK94lT{(1M-O%Gd_=moIzj$@ zS2&O$PHsJ1kW_H#1}R^>l`I>Q7T6q;;l}!ZMdMN_m|(LJ!oKI?fp{%^l5_$-xBB2> zQRE-}W(LDDdAR6oDclTbMg6yNutC%jZNIr<=sm`pWAmTT5)O|{7=ZQS&)WVo9_IK_shaJcG=(6uI zJ7blh*8z1By=NI14=Yf;;RSSV{XrBD40-VvKz?Bq{t0u3{~Y+Bd%%}SFgJ|B-;f{bDg7GD&zVlEP`%Rpi%)HptO> z!ghNFsNFIT)UuLb=}SwTI?W46-U_NBc@7s9e+SPzUpl#PC*$@`g!QQrEX(qf=s(N_ z^(js;g*}VsRtgDT%&22|#a{fnEtqUf9mDy{9ib9$E~q<)E<EmXQX3rq&D(t@0D zd{^d(KTid~>YYV2{Oc^-`CLX|8YeC|Yg_|kY6l>Rc`2shZBjd2T{aqCm| z+U%MQrpFoM(OR5SUYti>zxGB2yH#8i{p`yKWxp!njR8F=Ll9-Stz5sEK`qp!|vW%jO1{bOzzE z!6&?px6Se1Ljm;F%!U@>w>Vs)21|!;VZHnYVw{}~8CSY-lyf9g-BbCv?;AM(YGPda zt04bwGMF?}K()q1urhi98~#P}=GfXnysa;cHtdAMh997MZ84~KZX|7Ir3IVQ%y8h1 z43}Zu&-(6T;dXjCOmI%ckeiMCS#1??&HXlBn3~MnH$D>PO-Ut_6pcV&nFL`I83Xy5 zESJpHlhY$Z*fl=}isZ*YB@8pKbSYWWZpPTKE-1=!c_d^dh7=rP=Qu;=t9y(YOGfDV zBuO~%coe=zN^pl{5YA6mgy@F>_)sby-82JXk=7bCQg%gXPlJX}%W-nF6WC3XfZel7 za8_akzOUR#rB{DOp^|DG+I@iz`bSeO>q6SEa*^%M;mDkCIoMB|NJgyt zd2TTp_a27ZKW2h;g%_&dloZr|JO{^C%5ldpD#6Ld4szgEA@t^DQ3gsR=Uk)(k<(M) zic%cZR3^X^;YK2VsRB%P70`||k$8K02{dkbMDnAI0~Rx4<3Ze3TOL1kY;+@o#to ze9hg2Tlj@I_N*j#@tPebT)dAP7U}Tzd9Q=wbP2%^{j>1(&>NhXFC%F9TZk5eO7xI( z5tZMv5#_m+u;X45Y3^2lnAJI`TGtJ?L;t|{T3PP0)mC(9%)uJn*UXE<_G_$x z0@ZG$qeoz2)lxie;f>=IpW;yyUFN{+uEfAAeM#Z)|jDH{@F!^~B!jwfR-xBFMHve@k5f*&>_yJ08 z2;t=iFTiqe076u%;Kk7+FxB$k2HXDW*dY8AcV<H>1A$%Dde?69hh%f4-e0}qmthc>rU}tU4AoN{9c&)`uo$bzGhhRPny%{IEfuM zA47KNcYL={CP8<(A@0jRL{72}(AF2rvFeRK{h1&{x@M1nC#0aY_A?r!*MtAHK7<)# z{HTL?6hzHpJTEaxPIr6=4h^Wm+Ob8@bbdKm6y`|}Ft^9y9%Hh=U#JKUR zS0We8a@*VpyRV1QE^~s7O(|6Wqdh8L%i+2ZuHIctJ>rYiI0(u}7V7 zcykf7zAHm1DI+MZWITW7DJ&`VLh(OUuGjQ5~yn~Fl&#V8Kq_pods{GOh^&~5l@tuIYT1C`|ImPC?+sSwt=Fm2l z;bt%Yhtayfz@~H|-W{|htyYs@?eu35uxBp~!ep2%J07g`Cs4abeQ=)LBVH)%!SQ@T z15;9i>nvO=EqC+|^+w&iakL_X4k4jM9@f0qp zK8B&SVWf8LEO_KNf{~NP)AL(BX@YVAG{0%*Z@#}C^H*u0^^lw(KGB4vr^Ue?mf38u z4Mw@5>o{ZW8EkL+1R^V60O!IoTX>jAy?j8@-hTzzJ}>fc^GXbK6cyYFtpXPrS-5(( zmsa}3(e=07siz6+wzdtD4F$?%m0TZ8u&f8CalXU~Shqv_F+S1khy3hRIwC$+u<&9i zHWvQFQ-f!T_;2Qk`J{p8>JMU2XblAF@qwFB5B+9R0zS_T9OXxuYsV3?Be#OJx*EC{ zmEw$_>pU>$vW6sEyayS`r+1;5vT}XNhFLf;(Io8+4}D$3E3mU zh3`E8&)ov~Zw;#OmZb={$6p;~X0*VgacqvC`~b9jBp_Wagv6ddLY6jf2ML}l&QFSf z=-2&v8W_>{&k^x2y?}(cGWZt~pq97z(%k~9FsO0k+80^)-Guvh`pK}n-ckjfz za-+CGo4F#hWCY(jOQFG73y&(^!{aN2xn(vjcw^jQux=5D(fSIEp2Ek0ppE>A3sbP= z&^-9D`3q*UUYBBeG=_$*Qcv&vOM>G9an6DUdLs4%QMxn$JtG6K?6oere$R_MyETXY zcu)*KGJb>IkP&J;l7*_51!!I>!c8~00l5YMd; zf)~)p*-^Z*wZekQ@v&6N-xIGj8c}z)Z{BxER1m);mTC(XfMwSQ2#guVd&&9OwQLTG z*v9f>E{kv*RAb4VkG5!S;!m4Tn?cOgU{LV$!LcvB@%X1B{2y_iU}a^GpEj1lJ}pV9 zw#vpWLPEkq|NFg9OH@oqNJ@(R@PEGKg*1guS{Rz1xoWIpbn@~^l~ronHfsp!{r|oa zm%Ale`7KXcqiK>PqRFWp*>ELkjr2P8pp@Nwl^MqBIvx4yoBpd)C+8H@i|t;iKlN2h zdav>}aX@AzZ`$wjq%|(%cpt~k#4X?}nXcyOSbik0#0eqe=lFUV>@O0CZl1_>a zCi#zPNNTGu-q85NJjueLF-f@Yd{QxqPcpVFPjniZ$d?rC=XJ;Oc!5*?hqdzxiYkcK zHAv1m=bW>onZ0@d0Tr{Db3(Kd%4&)S%M;bNT;U*Wc%V z;QF^_M+ySoMhZN3LVl&LBT^#;^-H4!6Lv-l9u!3if)gSI8x0}_OJ7C`R!)f$3@?ZjXfKEoIOj(S z4mw8({&+?SlEk6}9i5SadoEFe@Q^5h+sY_G$)+el?_BN}Gb07&u~C90FCqmg+~1yc zCQ^|2n)_Yu*sHmB8;wZ8nEBkXG)D?fbH~vY7Aa5<<&Km4`df6AAZ%NtV0L(vpo%-b zmE7y0LZl#ja+IJ?IZ|+>B2rLsFH$g$JI2z)NI~lAC_$WCl;HJb?!38k3%Skxy}n36 zc|?>z$tgtd8(=jlj6K6egL8IgjL&`5!hJ1@Cz?&lXp2|jY?BeNz- z;PWU_@E`Zz==SGLQQYwRkJo=^{Q3S5-2cEyQNqbL&hT1zUi{{o4AHBtsU^w>97XHa zHwfQVz2%+XBJO)Gtyt)(XU>0fMVz>FvD10LW=}{Pg)93P^?#?mwwjwM$aSOfu=zrFC<8m*+TiHYW}5{ z9X1KQJBNf}Q7xsPjokTb%od6qYKw&$SvA7mauIKqb&*i@i3;z{%IXp(LzzEaF`hT+ zZ>exgxe7nM?20fneZS~VN|NZ~RYjqH!W7Y?6H|rvInUMm<=1(I)L+;pdC6Dirv$&Q z>Q3o+rw6>Hh3iF^2KfBf$ri%R1rzz_4(W^L+m7P<-BJ)1OEaP=Fmk~J) zKNkM9)Du?ddy3Yj06({OjOZ~E-iU}LqBUZ%d`F+_!YQqDgyYbHf4;AWH^#R|*yY6+ z9veA_pW#s{)H-)m`2K*QD55o(w^Hp*>9H+;ecjsq`I-|QO6Hz%yx|716 z3U_(qV#S36?cpVb=9=C|&Q=Mfww&N8fQyg%xOCy_qoalG7Fj%*$**}$bxTTRzJYMU zH9b++9%X`yet#8@G5-p$k7o+kWp?vPOWUJ9>U?y+!;PKEHLO98Lx#b4fPO?%;|$}HiX zyemB2nC8;dE_)O@R>nfCw zP2|m)5yC$+ey{INb~W!+>U~?G&M5oN%d1tI&}rZ zV)>rMpGW%H_2r-Wm!_Ii1-Ub@+TM&_o%M{d7QBXW0$CzuYedRNxWlh6ooLy^z~?Ft z@-<^OJUWxatPpY1z_#hoU?0!r6y3znRz76g41aRK;19dYunfI2Kd_g2tm$9v+gxU! z73rU%PDdQKqcM%gv1vgmiXROm+x&cq$5{~$&#uF1XYJ?$hae(!=CmGFwbkKL}7smnPv!-zQ~6br#NOf(TC>dKVY2a+tG8D`=H?I13c_-3ucX1 zWYP+a$cW|IH1)F$EVQ{l2=R-xfr9v)Wl7QNEG z1S5jhGP_^ALdc!~zjPZ!Hmi=pehW?d=lFLx_AM1kwxlx(Y$1rP9zkbFr{L;;?{U$g zhp?;EfsS!agKb%UWV_2t^lKasdVSOI(!qte7#cB8_C1z#Sra8LuO@e31Z9S*;mxoj zF3(Y7?wXAtOOper+j~f5Q^$(F8k+(hk&fibB^NT7TFZs5e`Z&tJ>+S@KRi6gj!e}ShgB(`u+_(k zKJb49W2;7z%jP0R@QGu-+!L_6AHTw2y$oHVbrBS}zGuR$-*|A&S-hEX6%Snbig`0k z$nxxFj*&M4vlxclx>(5cW%`l+92uB3!IWH^41rw6-nk#Z;~M$kHzK=SmrhWIu57tMr{HH`uU?^ z`4t#!eTxHoR=|}vvivEY&#^p|%cxoO8LsY8rV&rQh^L|<#@+EEsWNGxs;W*^(-yON ztB_oEjnh|YvtGIjp|asCW14DDj{j7qe!-eVD<%XXmBS7VxnNFtrv}aVnkm= z2Gh4ki=dO)2Y=FvFu(FM%)6;e|1lnuy7KMj7XyoWA@pwAuM_y;~ay`n3~gzbS+h?{}ipNJkKleh5|C0n}fP+sg|P z@iS+9LH*r!L@v&iW+}LmtD6;Q$FMmW(lsEFCI-agaRuX7?S`iEwXoCx$j~Nf+%nb% z_RpKcWEa?y`U|!s$$SY`AF(FUP7d5yInTT=P7~R!cnJHCtCOkoF2bg|+gP)7Ds=UF z!*)4uI?lfps}{M!_i6#%zdM0royt({iRNJJ-HT~+rKtv&Sr{AZMf2|k;)P)|x@goC zrfs)68Jej^TQ|LC4ip=c*E^O$^$9|(KAyw|dYH8tTM7$xY#Esuo@mrj!4`MC1F7*Y zWLH2C{czff$d}t-@$!Z6OLh=c4MoC#9Lw+x$HE;n9!bw6ay(~qJ-TYT6yq!x#q4tU z!8)&!AnQiA7FRyFjwpfmU|d)4U~mz;OFwrN^DoZG}3@)dj%RulI3u#CM1(TnP(G zqH)7>E?;Le4^PM0QKwcPTBODh%WW@Fzbt@w$zBBM5eVOs>tWtYZ+so2MDzb0#Fl%? z)H{3;_YE)RtA7L-NRFV%mCjVJD~0pN>BAEPPmoX=LB-p7L~eB;iQ#Ne!>`{nUPY^z zj-~ai!@x)W%7bMfvqO`P3V8_&f0)r#^DFV6;ctjJHlLS##D|Ow;n9Y_h|0?Av2&do z-IseC&u{c2{^!e>Qy=O@H_pmXlluWA;CT{u-6dFFW=TsFR$}$q6!uEX0r=4X7?CGU z1j}qmT!j`H=g*JxYy@PNY%Sfmu+r9O4Xbp}X%XW|`kDu;KFW&KjSA zfH)2I#MeXMU}Hq`*jNatKg4NW_*7@73+dZ~w7u|mrXzX&Erpr7R-9hGGw>+WqfI+OeH`bkH2uI(xQ!w(h7C* z>wzV)=a^!<)||t?f!=h3p)Z}f$cFq$_>D>Ho`GmjAiZ-}o3c^icxILpnH%_%_1t)d z8)t+o&I-XgE1bEbJC)Vu^rs)Ez5|_x?{GU_ok~42qsG}~*e!Pv2js@%&As99()>1~ zWC&z+`4(*FG^65|zT;N2zx-Uq8kD_o6BCkwR%`n*PcFY?k0>YL^2%P=k@y^^`kiMs zO2%SVT@JISOoi6^YT<=IYf_f}l^^uTh0W7G4$-mU(2!KizPWe|Zd>1nLy1nL$jF@a z8>>iPwp)_f(@x@Uv)9mIG>p+*3t{@=B0RsjpIxH%3%_2pCOZADc(>;a-rC{=CBYg* z)=!U&k~;up1=qo~U7GHhWKQ04bH&p?pKx4-GP!$iB=+2S0L$GwFmR(Rr5Yd6zsj0s zS+9WyW6m%!<0jzO*Y_E(DsQsg&6g-{HKDdHrev+J5nbnc77GK7sPvr+=rHLZ+RllB zqvs?cr2iiC%{U$H<<+R1#U*?bX2*U}HD+oPI&s5{8=(B94BsV=#p4;LkWE*I8HW_e zuKWwE-lh#;sj?iFYHx=XwFS%zj(0J7z774jj$?7E9vAxBx{{?wyWyq{$3z}Encd~- zLUP-BVFLA`X9~1P)rqYbqAy3zCO(6=Gi_<%f+2`GqDrN%Nz!u(oOXNcAGV1phi9o~ z)NH#tz8+^mEcQ&t33veemzy%<|LlUtW>a8jK^a=!wjr*A7UbS(eOAh=4|f$h((j2? zZ2yHYPDg$f#(EQa`O`{Fot%XE;(?T@JOLj9njqb|9uiU>q3N@mxKOweN?i`2r}K2E zop%9y6^=4##kXM6T~qpHofqeiWHJB8KFnHDg+Z2IaZCR>ym+|;6y{mbFEKAghtB(u zEpn&XF~!$$a!nJ2nCXzF&UV(cQG%X1U_mMu9K-ZGap-yUHuL_YJLJy1AzI;n4{oJ$ zIa@!QaPt1Km^jOre6cg4{3Iu$a`P|O16HJ|hLfR+QQ=7(?*+RXpBTr%5oF1_9ylV^ zg8z;t;--E@veeF!rtj7uf2_Vj&WQEi22(mog&a-!4#h80 zQSD^_9Y6IR$o>tWn;J!s_<9!%uZa;=+*2WAx~H*Op909&DPlC%QHhus|6%kL!cpB! zlI;32f-bDw!famQPOePrWdl#$g#t@S472wpQ)N1#RZo^2F0e;X=x4syt%RMqnJ|0C zF4%wGmiBXfWTyo;;LI#dDwHn5!fV!)e3c=cb5v;pr>c1=*URKgi{^4UvY4Gcj)Zx# zi}kKlryC7|;Lh$3n4OmZg?3d~dCQg_sS5?=(|%O@jTi~w@~yijXptXd9kIGKlbulb z2xejgC@yZt6K78_DdkxZyQ_g;Qf)_ErX@i0;b20p$8l6e%*qt_Slffr zZ;o&}4=ZwhLnhq#p+Qnr-!Y2GV(f3K0x#ZrlEC?Sc(PZK1lcsg2sHuuSj1)XS&EVO zEB%OUPZ#{?zlUvIK%-*x$ha}?RI}ZjG+%WotsnCO{0`}18@i+WhzB^MmCNSi);2R! zvSILH9z3g=4*RBzq!U(OfVq`Da4Mk#r8ORc#pWQYV>bv6vWjH$vPYt>7D*a^r-7B= z_@g(M@#r)idHOMOGA=gfu9+gA32HQ^BNnA%#y>+^;^s;7d4voczlkqzr-NCh8rf=9 z0A73SF^?|+x^G>noZu`R39iKI77ubc8)&9m6DHfP0k5!LW^PR*5ba7(J6OdH+P2ij|5`yf!lT7C1f^)X94}5 z!}LF?gMh!DWR1NFeOP>(-y%JL67S{6l>;W!!zmRP-7q5S%8tUc%`+h9zAH{&c^_xX zU&DO{neh0o4Q!gI3;NgRvC+3J$=Ir5oU`{HtEi(%ZY`0Q3|$bfq))95Ql3#x2L_?RQ2=YPiG#KkgH^qKSj znko~u=qoThQ;8gjgKw`5Tsd)vL5oO;7p8a}cP0wLWRy@f%tD2JoQKJc?ev7Ns z{K)bQRVtU`K|g=)g^5c~A%BY#)!k8nkrKAdRZfQ@rryQ89PtmXo*PMiSbbnr<}Sm| zVtx86UjqZgOvq$L4;5~Vq)~d@b9PcZI^O-s#O%t%jE{xTzc>&l9CRUuWBkab_t$Y- z>HvP-Y6mHp!v^Q6)89^>WcQqNxOAdF%~{E#0+)F3G3~{kavjn&dKA5+tVvGtQ$S~w zDqUL_1(rX#eChT}_<_@Y{rJ%gdecH-SG^=X8gdu>T*g81WGh_6agEkxeF2m?14@^p zK((ljy;3G*KYNUWnje=q7Loy(m)rC`C1A`jg=tI@`GqnWQ%+aUy3;b}tl`2);WJ~0=g2{wfDV)7G1NbdEgrxyc zR40aK4!=QL@zZdvya86N=X8>}PIPODB@yl)hTX6JLLT-)XqFz0yV=id(Ag(?zt53~ zFFk+{QhX?H3LmBJ8qjfPx-iB!n9SX2Px|$ipjPW4X4R7pc4}r2N$aQp@1qhlUbPTr zjxr(}x6EaoEnD%_n@?zPB7lg3l}Mlp;c5nPvLVwH9?4sf@A?fm)Jj87sn8CiI+&h46DyYb(QhS{IO)VZ7`uNI`L-aM->cV(zvo`WjOYCjoWY}8 zHTY;X&6CJ;jHn%x#pu14)ew;HL7v}~rz#lEJaI6jp6#W)9`}RTKT;lzB>42TiXWZR zIfAY`=}ATu8Pj(st_TA>9^>9gHuU-rIXZa~!pIwcFlEzI=5%E~OwxCN6?xXQ^@=l9 zn=Q*`y)@$0uZ(TJI&IgGBXu2*;PP-S<39NWKgv86BSyZ$ z``qX0`jjVN_JT*UtMA~G&OlVXnuZgctHJVuB?)jL5I7 z{7Ne_TyhYccjQ9*&y(2oZXN2*@F6GDP08<9s&v=%KX6~di5$$j4XM`WAUfU)4R-m{ z>apwLdcOtf5j@Auzx3ES`{ilMOEFU5YC(f`y~ng;?&RufC76=EN!LUfmGWD}2_WjvyW zaH(@Dyx5^e`y%(Vm6e#QHqxE==wHp!6@={w-au0YZmy9Gwod4uf}HGFM$ z0@_b;bIqwH`24_=4LLfDZTXVqYNs@{%e6r|P74A8p5l;~1}rGg$8(iRIAcdXM(Iz6 zVEzHr*EK%KN+c*55B9>CeRBFM zwu*gNkr2&sZ0hj-;ss!P(g&=&^oU3{nzcOi0-t=+rF9A;$?d9ic6qBaQ80STMlW(F zIU8@_DSbk;BGib=aUaq(dlRlJ(V>I0yojofEp0vh6&>zz9A8gc(%*CpW^-QFCuK*_ ztHGRCmsWv&b?I>5j`LG=2T=X*o^(Ti4~eL=#!Hbv5`IXK5P5xARaVUljh}&kOLvQA z?TtkH&u382$bx>09pdVh0?aDQWL}RMgeO@>WWn+TeD>KFWcaywXT>$V$@NLkwh|maRKkU}2e5UVHLv5RBpvneIjnJ52bVUjVp5WW_-&)DNKTF&`SvoE zweTB3wCvo7#W&7#@cJgEMflR9l0YI%O=9hriIMGrJj@HXArFrg!kp?FjOrc>X=WMt zRmO+bEH2>uvz*3NxCON=Jn3r3MR4A35z`UoPhakFrkB@w5I9@MK7DOV7TiyRb*o;0 zOV3DhcAh2qlXw+l3}-W%7MI}i+9EdSMG8PhmvDUu?Nl4hy+n_bPBEi!9{Gv4xzk zAV3<;HN=VYlv^+`G?FAb7V_l>FXE9G{cPpc3U&jxR^Cuq$!-;K{7^(#q!~o|!Vbco z`Xt!L>62#2jlzrzd3bfrF^2jXl6uPq)+61Knk+RY%cDPli69O~f8L3w_~$TrK#yEc zR3?#05;PrSIbW3(bv~y}`nczK&S*|YbQnnJB7LH)xCOT)_+oy}C-|{72M!(ZrR>vx z>_4A!tUdG^!-liL)X|^3MFB0-OaoWnCa}F^NS(jAp^@WA=3Uoq%*rvMOCqy4PNEyx zzc(GPZzeS0OFm}EFm#^uAN=^Gjs2RSNES&oqUt9}SY~!jbY!g`h@X3fG4F1oaa#<2 zUMxo^a(a!oN8&+xeh^iBt4dbe=+mK#c4(5*3ZGuMPzm)wGO5oURh<&hu{Rah1dSvQ z-EN{B<3R)+rF?wpg^L5FNbl8RY%<9JxUver-WtXYnk&%_ z3z(C&THKoJJ5#UX$IicQK+@qSdrsGZ?mePS&aC)=Gfg>u-i{>LYrY&_Pv+Bg3qnEd znG0RO@sv!B=HSis<5Aq^7i)1c3-?ZVjC?OO`cZZx{xK9ILT?3P!X#t(193VqjYlST zXTkjmH{nRlHyA6$D(2SxetZS1djgpU9Wius_LW?)?_SJy7T`Ry< zR+-*i=S#<~jfdU-3Z(Du45m7MHVR*9)3_6AM1OvgNLywU=fm+LIUxcv{8d15SIUw! zrKuvf3Vot$u1d!ob0k+ye8_OW1TNlQis9qO;nm|ooUe5)Y!E7wgBb#HlpC93-n&q4 zu?mr0ZB9lj>Cv14WHI*(n^ERa z6+|lxqr?mwF1K?LW5V@C3TImo_J=!7d7@3~MjeKIadu>fr7JxWA%*$2ULZ0^=l?O` zH2uAoVbI5ha9djB`lT8uXl&-2-m!-LjehjUw{z|r5uU9!aW`w?^3Z`;w*P@;k56LfVn;$=+=Gr@6FeII z8&?$1U~9CJA?aZWzc%#%RC3Jd{j*w_k~Sef|L1eKR5Xs+I#ZjR3nTQdj0?OC^&%bN zDVTJ`QRK9E1xybW;j}eIgo{LIotXndyEUmkK4`l%v#;L zF+2YY8W*2{i2=%VTFgNZ$gW0(W+mDZR*!=mFUWC7if-HT2WrX zZCV~%7BLqeT&aO=Whc;brX;mum088wMpTe;p!AKTNa*g(R)$Ma-`DQg(J4jRQ`f`e zUt%ODTpJ&re$MbO48X1eOK=^h0{)-kVLss&L{fd#xeT%V@ zPw2J{d)O>5XU^MS1(B1*$nH-D)cltZr?O-GUV`u`5=GV z61)1XY295NVjpso%LQGD@omS2Z#Tch*?*3Sw%j_8<-TW;ajSstvNmjKh(K2lN8Hf# z1sBC@61T5LBoTfyg?C$^|D-YT@T)_ync{eOtS@QqI}JhOgYde*gWS*$ffFIYROzh+ zouBMOZ$IpSm`e^s|A`;GijyIcB|w9>-N$${j&=U{Gc>QzqVDcgu?j1pX15{>0&7`pu0T}*y=4QFyZ#Cb{@WXV5AnmM|FNw~^!^tbiE^?WV5 zuc8-)mz1z%QyAa!o;AIk6@X>g`c$E|4%5S@eE4>py)gU>&%R1UWx^xVI6wQIO*JUi_7p0~D^@>uEv&sEM)MaQ z#m*olBKu36s*kawyVD0?LvJs95G-Vv4I`=3Zc|czKZfDI(IoyE9<*`(8z%VRN$j>$ zCD!w;u~y`Pqx)TG?vN5aQgV{ZNIQhf2UA5AE+0VkQVsHTJ!!*22b{Rz1Q=fU$8KJB z0anjaq!(Q{o{08gw!28kERNOUD=g*H7)>9-aT?oH`t zindtM{!NN>RnT+jntK_q9*jbRls?>{WKP3NY=~NbA(tD{hrVVj8MfGyoJp6b?VF$C zm~Tc{6QV@ozZlcF?Q+!SO%tQp5(4$-p5cUBT6E6h5~#U*8^ab(Wu_lahCy+6I9m9N zsSVbEhEK6rF?fOFvt9w+ZFUebA`o6^HshKCNpg_W^_p;XwQ>7RY%sSZ#aDO1hzaSi zUg{C{l2vFG{$D7?|i!ULXX$yrb{!=jh?l(qjOaPsb&t;HrHzu+3^@(N63BKrcJzVw* zCg*N2Lx>Ab##^=N z5H-6W8?%CN{%Z)b`Ide^i_~BzYHDsjpgf^OyuwU?N1+j>5>gzI&{+%EuwLM zEWYnP%#PN-j6Xj9Ky`+Bkgz)2>5lr6T<`l1HS>9suhe z5B}K#I_(!9`_1KOlZ!t*AIxC0q`7$`f=`-eh+tKn1Qx1Fk%j{?uyExk7<6fYSk?r_ zwi=SCtu{DjP>ELZCc&5+i=m}Nmu9}X0D+vB-zsj9sVMMd=_(%mQ13>2@=wCKUKipu zItLcy6=QN$AcX{5cICrd{mxq$dhbR0nnuuxR=y-e`XTQ@i9GqVV-^&pzhWn=i<6nzzcFi00C61f zCCkp7#rxlQ;88V?@h;ONl8Lu~ll9Yk7yW5zzxT>i-s~M7=ltOggG)hRqet1vC3tFq21fgN({7Hv(w1@$_0Ou3U46(9P`mk9*me{^hvn+yL`R;sw5&^BPU#Lt(ri6?*YTjs%WLFhhsKRd}0<*s=N)+@uVkB1eb% zUA_;L>tnly>cDc(@uG|Sa$z0s0s7U9qPnCLGRK{RX+tB(BF7PQHRnea*%7iSuN{N8 zTwphcsWEMC7um9}JYuWy2N~;F(CPY!8~0qtZhJrKyD9``w|UXEF6X~Q)?8cb!5ca%LxVv2qf<8-<^0JRiQhqmzUF6ZB**p?+<0stL{fUdSA7I*QNw_WMJUkJ>=WS~b{ zkYp*5h07mcb(a`zNY{bNIi{5Ndk`&4(~wBb!GdBl@?5QeS+pV(6@CZOuLpaffZ-UV zO$Q+GJ`WU^*paH+YoU$f2gd4d!G>*t^u`N668tF<0~l8_=DIq3QNM(r zC_}kRET-gYQM;bMtY?llUHLW@ZxtScw;>~FdJLgQf-SgQb#F#;%~cHI_C$(lIc!w1 zB0*cPfx3k>6{T5H>s3GDUG6?;J7LKFdGrhll-GgDiWmqAaU;t=7Qlg}(nMigRq67j zcJR*N4c?SBr^^OCsFUJ;#;@Ip)?DGZ)9}$n4Od|PMs=bqv?qhFKVimd+{Ihom*HtUgY0ETehV6B*&1^!0L-N#N zUNlY|{snDIM-b^-;L;{$n<-%u*6o6C=RZ~ncA9U(VV@kTFq7v6{(Vr zY5#EJx(mR!PXR+C1-i7&on}srhoOv5?4g6F(Z{8f*%&8JAIxZFGQ8fxlf9CpVo@#I zJ#8PXO)#Zmz4GkjiG-9)x1!8te{2+UC4)ZVWQu1OiVfMLLXs~1^UwtS4UM5j*#RFV zcH!6VXh^vrLtXo$@kiwY*5#cvjWM4DLBcYu+897w<~_g(Y2|F~x1YFQd_FwnJ_n}$ z`{4B4sqER!2QlKh6UqH)ORPKl8Iy))>@*rh*zNAfF8dAr&gvx4OrQ1~yv=YuCvvpL zf|M`W$-map35!P?G2*|(P-k5~%=zL0%>nmt)iRFP7NS6o_+DftyK+8xu8*aBs2OZp zY+&L*6uVhAm}Do~(|f;nVnf1QaG7gP*4)!4)FlNSp0+Zj+nmYEZX@!(?FE!iHlt(K z_><70O3<@C!1w-S1)Wu0n037u*52o|hTqP>z?MO9U!g_@Px+ARbv*JWy_~;xa0$fa z6)>MRND@hTRje5}hV5egxKKBqcYOD8Tx0G}vkoc}V`FZ;R19EhYDWjJa=f!-4`x|m z50kft^TXG0`w;>1SYxM4Rd)1a`nUmDEOVMse0l@57oA|7PewC7)yl-}oGv-{^9bMC zK@)j)9%TP9uD89UfjzduhUgx60E(Z^VCj#StVVkSJWKbWU;ImP?lNn}{Gcf*YNPQ$E?=$c8|B=qQ7kP7l6GpWh&eSi`*sQ${ox^f|}??VP`cp*>wu3qJ*)_g$r zpF8c`vJmgi8bx>hav--`rJ(-4G}#()3r7me*q!->mHv- zILVN(Wx9fUijNZP@b zY{G9osg!HLRi+&b|L$*06t^e3YsTRB)47mhnhg>=Mw0ZcwfL@uPn;8d$Y-ru7}Ds3 z)E6G`W}P%0X=gy*CKjOlwiTe6sn33XzZ`fk(jd-bJha@LgmcW|REF^3C|Im-P5p}D@LVPq(YmXVO~KDC`Cd5FMiW^dZdVIO*Zh0VM2P2hh$2Ts>P$q)1EgtqWJs&=>gA zPM@yKIgGmg=V8d`IiAh6rS{vbnVQ>5#3H*EqjE-~#t8}fEMR~cce4|Q?>$3}y;@MN zsz)yH_rc@GsSx_Diya@!{r^jL^nHstb$wq6_Frts%GviZ&^LpBSgVxrVqBp3WDnE# zg3D%7`2?F5g)#%Leb_Hu$3b;Khiu6)pc}*AqUqGh*ruvUk4$ZcQ(0DIf`&Qe`B>7T zxvek|BuA7M_F&?8O_cpSfNE+kuuL#V;i8K%>)c6N6<@*mo-JB?2cUxv}8HIO?gfT${rV;5{620uds@R+PeCvnes z_M#MsRS@DiWt{Mx>=fMZrB2(9xzMG4YB)&67=uz5dhwAQnOv3zL)rdBF7+cDrkDfE zZ=QAIrGK)yVQ zg5dhkcy#svyMLq(S^qK+>TgfL*`3aG?JP0uy68qk#{{I~{2^HAU`JQ~YsL}q50)zp zvNd&8;7ijNbEqE*JXRbd@VT_GBF-7Sd)_pgiE_a@?^^OK)%g_wK{nO#f zBqK`r+T@$r12pAyMJ0IwB%Rlcd)lOE{y;U%nUjG#?^AxRd>o|weTGvWI+Ad4fBNxF zC+Zg#z=Vm;G+f1$e30tJG7AC8x0EFJj%A_W4Q)(5?Lzk)?Pp%-6tEL2eL1d+8a(kH zMc$p##DU1^XyRi|-pI^>gSo4ihf^M~R#$|jE?zR^!2=l%{d5SNeE}eGcg z?!v@J`(fDmADSF%gZb5Fbi>(=tQN=ED6H0`BgUU+Yvw(IO|vCPhu|Y??dXG!u3qHY zomA$?u}gq10w@qq;9v3m0{Qw347$0|B~5t{8g5HI%f!LPHSQ$1D;{Gz14&BZd3>hH zX?13$f`W}dRZ;x|l7`|WR!yJrHynl`Jq_~YHkVtgAVz&InvsFL2!K{;`ZLIeOk4E@ zd+raz)nm#e_?r>kBVP!nYVnv7B4U1T`;7PX`Q-blG8A#Ozs@2hDzo|~v_)Fbz+rnT z_R5IpkIupNt~Yq3`wlK%SpcT<-06Z%?nG%^AQ;&0W2F}Ak!0O!HcP&j-Ih_!?+)N< z<#Qw9MS2aK>((b8=G7pzO@-dMtwVzMl)$xOAd1&D@nLQ}gc_Nm;m~iGyPzHq?$c!5 zX8nXCMxVLNR&`=;)&=IOU)V7})IopV2$H~;rQ4_;$$t0&{0YIygR<811Ege*l% zF0)OKxVTu6?Thy_1tl#^{-$W$_$LU{et46yTJ@~`R8yLlu?P;vyO9k^j(pxEZ&JYV zTu-T9fs&`c*{zkwp{V>6Ueq?B`@Z>5llMX7>S1Z>e$($Ai z`aLWu&>`I)dLe0uh3-^Oj6q-ew_ly=)R@xohS3b~xelHB)ru^U)+L@Z-RYR$_fYSg zDibi-17c1|kT?6kKxe%eJ#*Kds7|pY$qyB%Nt_ma9g&3Ic^dTTA&!}t{D$*fn1k)9 zB0MQo$aoKSVlIl4@pg_--|9u?F^z1`L54O6rr6NExp!chrYl*#E&)Yn zA2Q>ujEJJQ71X@Xmji^?*7usG4D&69ly=! zH0sJeyZ#=x5^lY0=L_j7{&aFmDI6I*3~z44^TX8r>F~%yP~PH-6ZaklqZ>TBvF{*s za5+W&#VI2Bc5ZBo{lkxC16=l|53HF$h}pW8;8tA59`p4e-fu1GE&e^Ge5WT}wIGCT zbOzdb--=Zz-v<|y)rp+BAJLtqKsLT}q;2cA!!*uQa~Rg)@1P;nwX0`V9es)Gt2C%u zn+>niqBaXxAF|Lx?LGdegEQ?N?9U)K@S6(G%4@g zSNL+ZOrIzLhT+M$4Jz9SovF>fcdijwL;LKn%oY$HNC> zA!D>&Km(XgoPPBOhDjTc+>PTP-upAY>x{)?g~9Y(1(%JZCQg3H-oVpJGGt!!D7v&F z3@=tVk*7T(xHa)5$NaXU712rjZeK!H)@R~C?m_%_uY+xfRA!%#wV@w6g}8Q^AMNv9 z$sg};N>A~UmUC*2Z*Y?O(=hTNIGKcD{7FDGgkf zg>l~D@OVAvt2_0D%TObRjq$Mnd@T#b3)iJ++#N02kjZ5X=t#nn2nTZg=n?Sm2qNc8mB}ezZmpTSiXB|a zt?i{olMSpK-50)@33w_^LLSAUMxr)lcKXrNbu)0GuYDx`Dk zUzZQWyVQ98nvFKyVnO$m!@Iz%6TpMuGMZeax7iHGl$ z;yUAdjN*J{Mrv!6P&Qndwi)s0D%oh1-qpn3o}fx5h}JM!+Jo?~;V8sj9?3Q<*P*JT z7hGx8gY>ZdxP6im*%9i@rvEF&O@gcVfNy|T_7^fI4F=#Z$iuZi5fCb20wr>fF)6{F zthy~j7`^YPDq~Mpa%_Z~E`h*u-i;Ag)JdYJ5zU?BPm7GBVR~2v3})-nl%(BII)l@= zKX2wWu38Vw6F>5_=rmjwLHlN%`=MedlA2^X$zbl96Lp zo^_>KXAZ*{C1X;aXGgF7P$F}GI?;*jE4)=*4+j{p|HayS|JC@%fBfyKz4zYQyK_BX zM~KME3fW|jvS&p~DJqguWM-8TO6Oe9S0qvhiM%PJl1hb)kodknx9?x@{d_+^_{B}9 zbFRy|UhDC=-$9u65Qi>yrB|7=QKQq5&Ky*ymnI76rCLv#nmdFRwFgi=_3wQA>^jhB zyTZ@zNJ7bU6*6iBM@-KQA@4dhaYTF$1P7awYZK3c^6m9#!{&QI!Sb+*jKolXI}CYn z3T`~pyj|Z89~M?Xk#hy8IcHqe#H zho9{lWX;qruF6{uPh4rjLvfwZzHlG6zIy~7i9XNaG9vzbS-{y@`w@MWE7w z5ze3MPsVc}urW!3hOZe4i@$iVj_OwUz4090S2+uj>&>a(1AXXJHKS*OU2&)(+l$(J z9}YAXqO#m4SSG&CPrXT~@0RK4Cy++>ymFZTDvR^UxPfuu_9T4cZS*r_dyB`5P*`k1 zZ=A9t?Q4Ch59rY+yN$?@uPNBIISUhO^l`x8B3wM`K<4V~;<9}nL-w~X{BUg_5_n#g z{5)R>pJmkPpq)PHF%Zz!CyDrHQX@D2b1yucI! zfet(RGF6K_d3O{~x3InL(lc^+`l-O1v+{-AJDK)P1G!HFGGbkcKEI)BXB1bGN3?|LWU80 zCl{hu-UH{_30sR)!-lDe&|qs!JeADoWH$?1+={S6N*VJ!pEFjuIh7(ToG4b*-rhUjiK zmTXHv*E>^T{|*scH|#=^lZUMxkI-|ODSd4wiS`*4c)w{8q|36onTdeT3UTE|nYTe; zvmZ$^H=&)8&UB~CU*=cX$7x=eEVyRqL}zvUC)jyG1(vUOr;oinN!#8iyxDC{&6Y9O znvx2=bz6eW$n3@|%YEqBD1Gwxm_625Wx|&+mbBG1fjis%5?|Io$KE6J@JPEp32S-` z5-IvLGb$f1{5}ZF(pBk?(_7L0pE6n9<3{#>%)_+}Dy%P746W&5qMdV1$jpy>xMIT@ zoQmQWfwxvK2%M66bC++haFrVl>D43i2S0*uq$Z_q-ejHZeqOk-ieIc%D)8F=0H>B& z(RmdksLeui@@3XRTz5VWp7E3)y3LC&Y~2GWkt#Z?cUQdCwimR%jYdbd9+WcnMB&pe z%v^OF$3z{)t-dPcWHURTvrornwRk+)Ax#WDJ8)@@3(f2L3VO`ZyhW}9s}k?RHgjFd zyce9pi;t+;bsr8rY=JZ8tOrn(DK5Hf2mLH>xW+aLJRWQSQ^p6@-fd1aRF%N%#bjt* zXH6~pQlUg?2Os$KI}F!%r=zT8>E{u9a2-~{3;%D>XEG5koDbkt%BAQ(yof%HY9yOI z%QZV)Xvvi!WLCWkl~TG2Rguq8uW2%;SNi~s4r=lNhJWDwnq=tvY!1S2Ytbm)h(;P7 z=0{b(;f@>L$H0aQxFW-hcG@>XeOW1vwU^*~@A?yw_W&$YGp47$hy)If%}{sn4NUK0 z8M*xrKxwrE>FbpxFWZ*F_$wJ`-Ynvtr|N^s=y~8J^8?Qr9^?uvm58=dDRiEZq~#G0 zxbpA6P$Aib2>%WWmN~M$?$h@485;vC29BfST6a2JYqB`QP>Q#SG$L8owsS&<^N_Jd zlQ{LQBSsJ7ru;MSq zC8LxVM;B@d$if9mG-UR7q?FGR6tUR_wy^Lv}=QdkxOG>`a%x zs^W{EZ{uX26yqpw4)v$a;r7jUC9@LNaa}J)kXhClXq;aNYHixkWH}4YpE}IlxaCJb zRaD}q(pP*I@rEaBMcC7?4xmLH}MCECadbE%6FHV5?Z9Tq^j_BDt@sgikDCQqvd@Ngj6(j5;bU&yl^Ia# zc9&n)l*WI4u^FB7OsJXRVRUZj;M?sKX#dK2sMojvi(=a#Tg#E|3Y!3SGtYt5WsYP< zpMX$Zc{rk7gN?fmi#9prg3g9Gyc~LnH~i;tHrUOZja(5+G3;@w9K88cSoASedvMfz3>#j+aLP322%yMc* z(G+LU&%FwlhIjK`Jxm_b= zXj0QWO!o638+L7H`)4`+9&%?e^Z9n{P#*&wJfYQ%Zd8NKnO?X*;q4RZxLa&JGRlLo zV0XB{lrQSkszwj@o_>N>>wKslcL9afPI&ICDmq-%q&a*ss1@5&S(XLd`^AK1A;i!o zeGm%1e8aR&HsW?8H}S7W0R*mH;4*IB<~I)bQ2jC^nsv6A%O4X+C39wDYp6ENxIKcz zI?B)?zlV{RvyBk~7l7mmfGPY)tXc61-`L;fv}{u_oY$w1s@maG(i1SLXI$4}B{(c9 z<$^4Rg6vCkqP4C7ObZ{Qd+2^hd~h302J6wbcsy#GY{KQ)?5OndPdNDg6im`nA~$8c zV14>;(C&~YQRmtE`RW6h^+bU_wlbul7WU-qjBa>)>MO=Oeuh_zIjL97;6I4Mp<`AXE`2J34xRv(;VLAij7CWcM^-<@RJ;tNI%hG=_54Grj5fjWR^(B}aP81NrZPtnWSG zOjn0k5Cho~oHN6Kj1CzlQqZr3#sj(H57W=#C>0fcM~RSJJZM2R6D8=5%!lBa0d&MM zPZ%EmAME~c2HQj~q#NM>MyqCYr&YnD*`_S21 zpFFEM%$@jr6z|Pw;XdXza0lDp!SEU1ao0t2vir|{tWq$hUHAM+-k1~&Va%367ccT% zLyHCq-04%+7xb9w3;kDL^2G_se3P9E`PC{(T>q0LgSBqd%V#G9ym2FAlFcwZ$Bi}$ zhTv0+073`9pscb2-Bor2GglbXw^iS`Z2`x?&QBY@Zs5<#;)!#^d++oBypcN z%D}5tM&#?AB3u|NOJX&2$%f!Nyv!&Y;vTO@0wRA2h8ro83g@LPyAcTE+79t=cexP7 z(CN6zvKXQ^heFDVAcEKSLg}^(aBiv|z05iSUwdA{)axuCz536F>Us)=svep=ciRD7X}231Pr?K)dJapG<`&@us2@@AmQ(9`(Q!kMggx2F%Dti@lR3*ZcM zNhghHz^cd^{+F2-$~;R#rR4{?Y1)R^mobuVuGb+`V?Mx>DrfFwf+T%meo|aGU7P$l z5(T$k4uH{cf3kP8JM8$aK^3NK7PKE7yzW72bFyKlMn4XyZ4v1E z*c12HhUC)H0=(TFizK2GzE&RtC%gm6uarr~{vkx!NrsFxio+G{ulW_n73tOV-TY`l zHy`kc!-1{O`FSh@)BDte{&8d6s&It*;D7K!>k^E5VL* z9c+4RN;^L=M#%<3zNE$rbe)Yzz;G=HNem(bQA6oqjWHD-(jZUM-(n5(^Av1MX5*JG zoxZyyTVX2w9+=POtvV@(fN%9AHr55>XCKlwdc>ew#-0kd->xaIQ| z$uc%JMbEf_Z9DwP>#bkGM`H%eZfW48B#KarIVwrn73Lg00L6W0a70lcth%R9Z_De` zme_TC#Im38=U5$V8ubdchKmq~?iIAWcz`FeJed>A7vqaNFxbtBYK&v+Zy7y&wdV(P zHMzm0NyCWe*Fs#kI|t`{@uj-5YtehkQeKQ6U@gy_K#jS$f4_jtFm|IoJt1IU;SUG; zzN6-5ds6mT3McH@2cK`rk}aslhphaDE6a|9uEzm>OOOY7zd)KOe5}VMMI)f5N1mMV z@Ft3B4tV?eY4qPIOJ_^5{V}aKT)d|S>3zJ1mpLq?tvcG|evc$kxuilql$el3o6dlJ zlPfh;Jqm|PcH!}{uB86kGk#H|B7M441}~<%p;wI^Q9t%quzRi+n=iKF_T&J1?v6Bh zn3m5?m8%jqKxF>c14p&o=XSEYa z`1=@&gD#-)o?2}C(aC#mRpXy4%hR5nL&%d71!_{EO;5L(kzwrrjr^r1oL4=?V@Uc@4n@N3mpu0{MN%f_%Eufd;DsX>ijp=J_(F zV|156s8c2Pb)_*WuUNsoZMh4&Z~W<%hq7e;lW$z_1Zy(>$YaiT5O?94Vzhi)6X z4m2!#xV^bo_+|al#IyJftjen7H@IX#_lPW%D(-|+2CVycT8iGd!!l#K@z59fh_9be z2SMrj)MrCIZ)bgsIi96)tX4mKF!!Om+`>VwMZjq|p5;&YOHhaTE#lAP`vop+?xBq% z>FdyI%v-n^2eqt-%Dn)hD}5fi99HostORuKJVP>DTa%PLwxFW+gG;3dpC$uno#Md3H`;+ z?2juMu|C*OzVO8{3@T*(+;~GWbF`Ac;H)dB9aRhu*4UBlWfe?uCrSToa-gRIU*M;S zX2f8UFZu6s3P^1#fR-9Z-2KR(s)zcJ&+nXx?k7h&MPmrf&^05IEmY~~FGpb6>ITk+ zvmhDUy{Q+wUN4V7gv*C((DAD_skV@zme)>+*9V=zt3nN0CJqxcRSv`5){^M0=nZJ= zK_t_A_;;GAg0hdtVZn?PC~g`_4pchQ_-H>`a{n2;Vm;&cx7|sv7i04u>*BnhJ;8NO zH{n>N3f;1<9xt=CZO=qqO#f_&N9J*~;FAftebI$dp(C|T40FSKL0K zGs#;10hh|Ck&S0rpLc^gNjz*rjndy>;dvceEf_+{w`b_Jw}hWos6>}OD*)TnYq&g!Ahn5R!Fvi_lSw+bE6aSIL`IFO@Stgm=*0M$YxaFe1pzwh1`+68>c|ULFwbiFQXC%fK|C~L1$S$~HEz`7ZG7?^ zKbjL?jPv7CLHK4JKl)l9_hy0})v0*|o+XMTs z_i_44XUesnWp4OMFj=M(pM^WJ^C=Bz$&sYnb^!f)A{2txcJPCFdQ=Y$X~yda?&*fn z5Y1d^TCIbyhUK(kRzByp|0;*nl?L?ZS4Y?x^@`iOU<7$}*&1v##ascL#L#3F(&Qvh zEjH$1)n65X&I>lKEYcvUC%-|K+(>$Di4AdNW8*eYPx?`0LCegqaG%Bgbn(k!)Gpf` zn>>Oq(WMvugl-hdqYqkV~*dUWx2D;7(tU8b+Lt zRAQlU7g$h9Qa_>#=d5OP^V2&a;YTy1T{a?0M#(7i0xVV#!^2B5)HWv?YPVGTt7o!K*Kg*j@gBgiXAAiL14S6* zPz592EQMXu)#V}loSp!ax&S-h%2OqE zN9ubu7zR%D1IL&w^`m4++4;SE)=LHMx|ABJ-8LFO_^|cXG!4?I{ZBAr)Gq){d0J3y zNq;lu%EU{h7&=su4mP?_C&o1$lkgoa^!D(^3kuMpNt>w5{Ef8BlfIQYEmB`?PnJyA zpe;46K*z;%j@X971^wbq_W9Q@Ve3ErZrqb=Os|b#-M(0JGQZ>(C;dAN=PYQ3*rfkp zik3RjOV*$#Bc{WY2?Jc8XA-ZPU{42J*t7D@Q2JNSnC(gV(ah#k;`iU%`0$0&MBB3! z2j?7zdv^_~GnFN0_46UWy#}uC@uI5@GI_tG5(rxDj&run!`6{a+@}vV#6F4eM@n=_ z;j|pglG=>{&r)F7{GYh0bFV)?uowH!eu7!28UJvtH1TOT1`?@{zy?-9+j{2C;H;@( zhbu{)oQ`7Y3lJl(My~waA!uEEgxk=3mEU}J41Ckrz`yXaB`+8cDO<1(e>vB~!WBGs zQCfV+on zy0$OiNji_Mqr}{$4qejQ-hwBOOb3~NzI3p{p6r;WOQrOLpmkP>>d0Nj!}Cq(5aAk} z|FQ^g`+esf3QZs+Y81_`tHYaXuW^|yPw{%wer`F-L&`YGa{sxik<`#Vz)d;~)Z2wt zO&Bc}TsNRHsSf1g^Ox|g`8h_{E`>deqd;$XlKT%l#bv&h^g_rl+ylX&p5*|9y)Q+p zDg2T!FOqHJLR?x5(cved+xOd%Mn6|t+1DgE<5vl)QEE7pou7YbYyd%c7|Q6XQ^P?a zG4XUFRTk3)3yZ=bPpSf^`m2+JdH;zPzZr=hzuh4Dh>!+Z*@@<@mEm&7E(C8y7pfEJ zLwYP4@z=5v@Lg~R+t1wKXUjB0O#W>=0EA?_Y{QYCIQrw{Gf-Mph@++FK}BalK*`B= zvC%$T`saoO$R*WajdlviRVdPA$pETi;?F_96M1TEPWOH+fbCnf=;Y4{&{XC_CHaO|o!5ohYc>^UN?yW|DVaeB?)kp7Fu zp2YLz>UU6{jlt30J=_h(#WKFU0_JEa(%S2$oUgh89CU5MrxnZ}M#Hc)WKbYjdlICk zEr&~*ZuE0;7WytK<62SzvHA3KKEGxJ`Fdp%8^4aij4wl|(chJVSCReP#MO^sN%1sT z|DhUoKGCO}cq3}PWf;jk(+L}mS3=H=OWdthL*d2jE@;*I2c{>kfl?4-em;|i_PlGb zb8QR+I(t&NP-i;0!yFffJm=eztzn5|BAD!!rctFC+?UiJ+~<-#T;E$AGG+Axfz}oc z^qA5NWl0g-mNC(IIO8}<&i}~|G3kVkFax&MZibNiYP5334-nWr#DztK6ZI%k$Jf4O zC>+AYAEd|+-NSI%it$tnY;j3+Ed*>ap?dvZbk3AYko>@rX}vS?iu7N&6;Z?4RLat? zM|bo4e1F5~Of$0cwlPf}--Y^pKX|*LelRR84=M)(#j`!wUVOO>i9Kdd^`8joT$TrY z-5&`le~-hXqefKXq(0HLlcm2ki$SjO8RS_S(Fx4KxO=h}x!cx-;cKkuM7M7+E8#MC z^Oq43zSSgQ+T-w_u%9;@9{^1S6Cq*BOPqfFwcy67I;i+8zy*aGRO?>{Mkv@)?cr*~ z#K#9Tw@TA_kA6a-<99B4cptn~oe6rs&Ims3|HUo-QiYnQbqN{n0Xsg~;%UF55Y{t{ z)-qPrwUurx_plecV)x<4X$laQIEs$&`wn~cJ7G}wEV}3h(&)b|m%G=MZdX&HlI9n2 z`e>F5%&o-NM_uUkE0yT9_%ZxmsY4Z)@59o^3S@@*Rv4fA7AluSVZ;SDQnBz0yejw% zciLs?qd7iAb7dK-ueGAnLR84MnY!e%?FV##ySUz~i#xvf9#-F0MA`9r^dN6Se#yPS zZ(kL~*VMwXC2S8!+<(o_@_hzlevG8O_M?c)1syEC%AVN+zBp7ML6jr)$ZhT)jyS!O$(v^%;(V$@|HJFh27W1XQBSR>HqRADg2$sXhRwp`q z#6x%-)*|-){tusw_7M1tEWzb<8aR9We!*@H3)m3FIuItM&}6@Zb6pvRA){>QWq}lZ zA8Sfw%s-(02{)SY?+CqN+ox%M?lB%cK0Y7fo;= zB^>YknTAWcTHqwhZmf|or#sL2k%#kAdBGzAX;opYl?Cs?NkR+Hxm>`mRrPR7ox)$n zs0ul3N{R~fxxbb+G|wr8KcN;tz2!{lbjL=1uf%U?HB~2Xr{#l2Xe@l`^CEM(I?zt@ zfWVVS;NePr(%q&BH7SQf1syEom}O#5L_#an)#Zg4wEc zw-WA=RjtL9WFn&pJm z>asMlTZ3jkv;+GyML2cccyN&G<X9;rP*jcuD@V;p1JU0a59a~)VM4Mtgz zqvN#+%qbs%5o_r(A2xG6NyAGpcK2zS}X3b&{+pUl2k;oKq2U9&Yt=*E0Fj|`ZvrX@z0`8`H> z+C_s4-bbXuZ?kA5@5T0)GlZC{Kq z_h5{0cT$XSKKpmiXU7P`)nkQm!(xSj+OfhR8L>jGt?cJ@m?y|9R%rG-Mo5pw2qSOC z2xX08goezy^X__#Ff1%qXqCd=$Kz;W9sB*j979XYVue-AuXC8aM$Z%G`U#2^YI`!L z5cB(#bhFQVh5fFJF~YUiVuZ(;Td4j|jBx177~zBvc75_=gwE1z(JYJ=4zkzNV(yw;p04a`Hds)xCT*fpMP z9V6_ui4_Ll{(t>Fy%`o~|2O^#CUE)x?a$h#)>EFGaVJ2_Z%4rI#p47{w?6w{KJZyI zZP#A^@UQa%uB}cM8#hMy*VT*{=Sw$;W__&_IlpZcZPUG9eqir9k;lw5|ALA7Vkz|n z;xU$rg87A(f;@+{<=c)&2@o6C=Hfqlh6aeu4aFIvB2oN?D$%PkrU8qWr;45}`4u4glP${MryY=_ zx?CW8Y@cXW=uCfG-&X+|46=CH09Zi6eTlp`i`LTl2h{p8uQc3iJzX>RFa4TqDuUC z|E&>y==mg?9hFobpf^`A-u2bvF&kl<&FP*$y5H8~_-sJq(@AR5O z0sr&;ugtkV`G0f&z5WmGf1ZJy|EV`k0iLmg1(`>*1M+3Qi}WHD{V(m7_y6vi zC{Fu2F5r@PkT~G{X3>er)1vYT7esyWS**tCR(>U9eSnWAbCYQYi{o}$2<~S(2zLCf z^_SIOEC|sHFOP6;Qz8C!9VJcg}>YW;{kcGS)#hH z=3?h^$pD8%w&Hn@cZ(iRIVXxq_X&{Y(?pr8)CJ{Zq{YdXGXkQ=MG97HD2Xoz=K4GN zs0n{==~V|d!{s)n_=aW^MEexuuORY->DT!@`G`>;fS zn(uf)xkRk!PyVd{>zz{q4n+0&XQZzZRBzB1Kg!xGI5I5R@0s)TfQ_#b#5*B9VDVWA zv1i3~QOvgp@xhTM;%GC!@(o^@qFeqE0XY-m1y_$~i>F2v_zzgG_x+#yzr$zGz2XfYOKd3!WreS;^ znoX8~@2%(f=F{3RYr8M^{(~W%kZ419{!B;DW2^bY_cQSA@&|UWVRXlHz-BvlTI|c?!`V2tWJRAMW_%`LKkYAHy8s*UMB`wwZOjFZFTuy_>N=vL3cGuE&m2H8LXgBUG=s49k90!KZsJ#sIJ_Iw{=y!Iqc+a^bLE8PK2S9h|lN`@}U-wLfG9ZAascf77sh+Dh=@vi9> z@M@$w{U&2d9IhNd2kjWRS9n_d;%gOrA5)3Bk z1b^Gn|8zLcn0Wvml-`4%ThBvb`CXLI+{Yc1uSVPd4Dg$8H3Y1Cjt@tAkX3fI@F=Yh zlP_qKl3`~d*oAq4(iy|;*_Gdb4{2Vi>G5m+x}Ng8h7 z=eJ}Q!QU7avSyML%1n_bHAmR~*Q6Z&-^F-dmt#HNqfg;KuT|jbwHWU4<|Jd7It|R; z2A!q9IEJdnKIW{6?h3+!>M|Vn+XG6Qya=iHrlYe=@JO6A?cj8Yo$V(WD(6ObdYjR$ zJYwh<6>9A4NGu;tg?Fr@vM0R?_ItB@(tr2h61|Dx%=0ZLWG<4BIhglyKa6?tSs+v7 zNXMU_#)s`=-JEi5c)IK@$ZN~dhf|Fqw9^SaG}^d3AC#bfr5DQv$kObSjhNi1N)8oT z(7HxFQn%iK=r*X3g4Ay~;y2Kq6#^G~gFW3i- zIG6PwM^6j`o6W8yR4c=OoV6txTUv-M<;rx&NgeWI*bT0s=mgG{oDKOOpF`JCGqSng7>bAF^^^CdV6fB>hg9RTFMxS#YLF$p&QzNSF8x>5*|BBt%Wje@Hn5n&-9g^Vf}BF+T|&g%t1$Un4Ie&ob~`U*KlRKDesj4R5R% z7h$vkS=nkr#r_BQn;CL&ePapp>1w0)n4cKeH4^VUP2yhG4F!2=mZ#k*$(YtnaQnFl zv3qV!EI-Q8Q%9K-$L1xED^0kH`B(g7=9q$~p&NN~lkG*Y9EkL-Y9tGEX+GoIJ=t^u zcAcLE*KIqw?u_lQVPih7736clnG%A_yXV098`of#(3Na4ai?Rm2O+;+mq;Gy7AdG?c5Nv3?$!bE2NiPc?>cmLIg3^P65{fd^743=`BQ2(rHW&Q zU_0wpE}dF|gZ1Z_Qh|c=*J-$Ynk-3QcLIx;%jD>$aOmAD1&wcgcwMsyZUScFiWilz z)KHIBe)|KHauUFZ@#)_sbixJJpAkhQpOjY>Goz|+D`89_>yk)V^Tn0H+#VSXy455SB4^u@Wr-WHRN0vU%~*DW zt@{uC*oAFf67-3yGN=E0f}lWc14u>d<9a1s+}%Ene>d5TtPed31A9#QL-R9m^3<&` zYHv2Q26)2E+#)BqlUZYBehu`GipJC5k(@5B))CS*k9*Xr2Xpu22 z|6_Yxfm|+sh&@q8R9es_`pG!Ug`RyFb=r&+Z)^t5zT+_J!8>s$J7@V6XiXa#7w(fw zBkI{D@*+DGQnKSd{)+jB=^NkSTjL%ac|4i>AzQ`r{YrG=A3toPQlxWgGhbPgjZ+Mq zY0*f+d;=QPhuU#RD%GgCREn%9RK(C*x>RQH7xz6^o;sAhnRb^a!pb3` zAbBkY4tuy$!y#()w23O+RrmzwA2TC55sLJgf+?ksP3WCHMtmvDau7+zrT(LhtvgI; z{QHAYC|LuG3K7O#mm=p{%;`VPFwXDdI~?J04HOiAN>rXyK>6~@%bb4p(<=p1h?8h+&^fA{(_E+qRgJbT-PX_vpizso0Z()k+#({U%! zYtDFnB`Z--0L_bNEs%7)F6fF3t^s_C%wV;@OQ?`g2{ywXfaif4~=+? z$IMyxJN66C{_c%~?^!R}P>#%7zW{7mM*3r+Jb>s3FXKTp z`WFT=MUC(>`=1~_E|4?b;=-I`Rqsr>$3TbEndsFgb|Q~ifo_C?xr(Mtho>4SBr5%(L?OY8;RFidAzgdCcNsg zqOyzy(O2{YUMJ~thHXFisXrNmb>I-L8ED~y-bsVS%2qsovlN$<+L6ea`#3fI2JVxe zJkj6sM6~g6F28t{2$Pd?p}KM}Uu`K#$G&nS{c5G0?|vEDF_v&94I<22lY=Q^+WE7y zMsVhyld$HaJ;~}eW#9ejI5bj|TrxDL(x%g3%rsdLrR$Kt)RjEwJ%m5&KJvE?3?Yq5 zV>znpO&6v6W3Qw!?O++)ESZz|yUdAB{G1KzWrvW>R_$0iT@Ah|`SErVl`vX236)ok z=7d|Ak1R}!g5@&^O;3RJ4F_O%`(rMWF;1gHC5X$lLhw@`LPJY;LttPnpVoL8vtpzumlX%ncDSR3Cjd6(gNg5Dd{n3OT0i8OXW-3&v9Mgqg>Dy|#zQtd#BX<^SHo55v2`9;5hzJ;#(mhWmIimW zZRY~-M2hprKIfvHvIL#Z%i)vF8BSoG&tExXM?4oRQNisups)H4BBsCPO*D?fRo`ZA za>GX$dsLC^{MRDLvpy2=QN@LH7~F-^>OFAA*N$=ay!g-N?sVX6GH%&rgc`I79@A!s zJeMlamUkj=d3N4hW-SgE1me#t5wNe5xspcD=l+~xUYMvW=+q=jG~YU*QSTSA1>+Si zVx65$Y%ldqWIaCJl?L6{&8Sb1JH4eJ0xkWum?M`OaC-D*s2uNw^Y>kV$bk_2H7kVM z&_4(4CKuwtEK?%QTyIlt-{W!PcEgp?wbLG&dFoT}}`%`Xkk`_)R#_tYf*)PI>cF`~?_#JeNwBuGB>&3aJ9^y!s ztB6VVG^138h>VZIdTD3+r>p^6->Q?7r`pAxeFs6_WDBUL{lRZ(U%=#(CQ-A?;ZL#r znAh-R&={>u@Ah4X`6qrurfeiUkg4U*$JBu8y7AbnVpcxC@E;uZ)+W6&`gD%+RDjT1 zyu?Cpl31LFp6+U7aLZ%y)NjAgYKJ|IyCFdz4SvQ-cT4gc+5z*XiH4iV5>sIYZoA2{ z)IYqa>0$vN_@6a-DYhZ&M-9T-P18ZIi}mEDXYymGDih7l9Il^A(x01+>DV88aMUhU z64ss%>KfDG#}@^fkXOR3Ai8u+bv5U8-=2iD6oBZ-XNVcEOOFqKA~>2Hid#K4;fs^3 zfvr6f&Zk?D;tF?czwlM8zN!u6cZ$G+XPJjFB@p~N4!$3|0MkxC#&_qGG1vW{_=NXE zd_R@Pazhm|;s@*5_}bAQv*hU8V;7;DGM>4I8d~be(1Eo}Poln(oX1QTRC& z&+{OT2b(dmyAc!pJjwEm8mznNMpG3!utP!$HqA4py7kiJMUyO&0ST(|Q<79OzVb!~ zBaE76$s5n@5tP50#VI`Q;Quktz;Y`Y`f!{yJYCH^Rg8c9=6xJ0jyB@v9{Yr818?A! z#@h0R<|F)I|8cm$pg`J5o^Wm)BSOvy<(7RJ3j3%E^GGOx)SfR~@1Gn{ynhsuUF*Q& z?s9ZIQG@Hp{J||Uwj}toG}SBo#BJp=kiHxWxL6L&yz^m|wK@IUb$~$oZ=3&^>E&_$Akx;P}D+Kz^el>5baQ^(}Xx zGX~vg^NM3IQD2R`OrOBL{+PulPn*WO5;dBve-+4?c5s~PO0pO`L43L%XDyz9dQ$H6 zu)6@SI3fyW%})#Zg`QG;n}FXyMvErR2wRX9#9iz{7j$=rY~Fs$SpY7WeXGwfW`Ni-5CZd0e@ zH?h4DFF)*EU_yqr|0j?u&cqEZXW&9u94sl7fFEWRT+VI_OsVuF26q2Aoty6DzcVK> zWr`w=m~a3VK2jo)u{vb)&&N=Nm$2xHI;q$yL*}frp*;(VVg5-09Di{d-uJ1JUXcoI zX8fqO;6zwIsT3;@QVeMkBb46zMLpJ8X$W1w959(u+O;I!IvIBw9Hm`w5$ z86=jVqTvuIwRWd+!3ty(%iNm>Y~VVUJcMp1InvOtLM5K0p~((u+{LYcD@AsUAM6x_}oXjdCWFqP1zN6Y}v(xAP-rx4^dVGT5Za_5tOx zkZ-Nz#I@#hv4acQar`Q*2yFlrO-E=983);qoA8>M8i_MfrJ6o&&)=DXt0#x4e{E=(nGKC_ zOy*X(Xpn=Qg?Mk!l5|usrs@K9*i-$JKX>*yI$JtYX{|M|Afb#G+BV_^4<($y@@#I0 z?!o@QHsqe)6nxCM$HSl5((kcWFsVa}%%-|z#*3SPs~6%4dwZNdE)3p3t$@rutq@)z zNmmc*(2{I(LbbxMyEz``Uy~<4UfEM+BXweUNdt~t_=TfEo?bKg$S1LER+h9IDRFuZ zHw~@PtF#hZKb2wWF$?lvW*kJ!@}hr2W$1R>{ct|Xh1h)W#LE}P!w$_8aP_qnb(XJ# zrk`q<6|Ddhc1jV*{fjAWYoOM?3YFrS1Eyy$?z4Icf)qKyDu&xqU;mF^dRmkE<`ls4 zump_%U`r!d$FN#ThI%wx@M8omuqEvxe8)=hgLi=tB=wt+L{A)BbFjuu2Xlh8vTBB_HufrU5-(E=vTQGIiWuh7%6ClHB3l z7+RD9Z=b2sr2I6jUZu&el5OA%P8|b9aub*M>QIk6jf{bCkxQMjmphLqKq9w_(|(Fr zc~}5*w#t)o!62IIpXSW<&w=4cYf?D%4@R`RQD+Tp@=~@3e$U;*GDyneXVH0R_#p~b zi==6-rXt<`Rf%dDxl&72gs5s6QgSc|{HEkX;%-}duPh4PCyj;f8;WGtn-ILK#kk(D zvcbUGgdQ+4Cl-pQaX|Yu{5@nsFZX1L)9$*#4vDew-s~mVO>^f?O_L;rZI|)T##!9M zUyCt*{IY-*mqRes?*VLb=J4pvcbxt*XL87Y9h`X2*ujy{VP#7>JC|7i4_3<%-=IbO z$DTiY@rXpYVZ!zn4BfeZl^?isJ2ye2&JD;uY(SEnoar>pKTs}j&U|pq+^XI*s2L+c zmr19Ak3lEyEj0pnKlXjw%{bsqRx~9;hFiR&0=nzILgqbJI%b&`!b>e2KT3y6<&MBn z1Bv|1CkiA;rxJ6MuEOZm2N=t>2Gd1Ti(e-y-{BO5boW|I#!jxQL+^|KMxzaHY5g!{CVC% zDF@y~%wc1I8*?+eQT1tBH2%l`L(zGMbNRh-+@9Hc@4aX7dG4e7HdNY6JMBfgv}B}I zRESWaQd&Zh_&oP%5oMGp*+PpFDJ}6kzrVY#E?`J8j#@AvCvMRbyD@yYsb4DlW> zNIt4UO;(n0{Xqk~z*~bH_G#yPF6hH$-w!y{cMiw@&4pF@XK?&XYZA9VsHi4qJM6lw zMcy9l;{M%pB}?wHH+#1$Oq9`~`$n|Evym=zczp}*S)fdeJf!KJpgTDIjw|WyZG$Z} zVZ6WARsN*K0}SfAg0*^LT!wUM~*4998jih>F^OPVlBM95v!1ZSL2(-~M3u1vnHXBoL+#zG1pqxJ z{{^CYbKv;6QqFwtbS!Rq!}U(J!W@ZZ=${}#d_y1OhKOO#KIsgUSyb`Xjh|t0r6~q- z@~CHX4wJ_Qz$aotdfzh#bL4Bd?ta$m!yhp!xokgp=Jcb_%6{(dCTs5D2POJ~Ip9X0 z^~2cHf4I2|*{rRIaa&oRTWNA67o{EvkzX3H;*S{8W@`wkvBPRBEovZ8B`-E4W7`i& z8vJ)AoGG}q4hnO+>{?)?F5}BY_3+5WQ2I zIZ-d;;m7S*a>0f+8@+<*JUm$-hs(zq(#cb-399Xegbh!Q+4(`>W(-*YD^`<$fSM>%wM{2_J>^YeD zkhznqm1zGdEiAjjyxz(p@X@ggo+N9KyQMPp`c+Gke#)3UB`vr~`Yvn@?0_((i}<)l z4xDP8V0feqaoNuHQWZ&P@NqFG-!sHHD_p`!YwvP7;aS*}>_)sc8PeI~A8;EJMTye# zBha27&-Y&!qc!4nz&-wgGd@~#ZwJ)T-)SN)4oilo?yTcTvN+X++H~PmS!%B44TgEz z>*e5Fd8f*7aVv;q0YVbI=q15UI`5IyF{cx$^D#sz+5nE@RtuX7Kq5;x&S znNrA2oe7n0IuM-04u+ZXL}C>40Z>(1@ppk&OkXE=&2lw5+AqOdA(ydC=LN2Od>q=w zcW{%G!f?YPReHb76+bBJb7~dQK+YP_3)Ajnt+qW;7&VDory)zLm7_7*{UpC4&l_TY zHo)fqJ*vq&(I>qzaOg6R2PDJs*lKAKb+CdTy}$@QG>Y;Q4n=|CVp$k7L4aFBzvGv; zF-7?8G42^}gZVO(!A5cbgnzC1fpu#*B@rj`ce4aSr!%!0vj|QpQAkWq7D#WYX5Hx( zAjX_8C0WgU-%T~LZS`Ny()1hLZk)*9)i-ATB@sF+BMODK54ld))wr_eJNVSNk^qB5 zTq~J{PZuBd&PsBHAd#8SFkP4G_aDX0B@>_k?CD|V8u%F92#4l`3mo`L3|(>zcPISC zFKQmpx1k0iW}1-hiHs$7LJ1cgJcK_kWT4}3XmBJ!wLdYH^-1mt-XpJD2{Jy+P0ljH55;rf=G6q;k=ew39K&+zo;lny9Xq;V z`#<>nRgHWZT!HZ`d|~&-d>H&%44)DhOFiTaDmd=rq!#N#l8YJNoO~b3CKa*%(j@F0 zoCUx8#PQWLGpN>5qW2ztfDVV3=x8fWc%Nc0KdejE2Aa~lAuVvFt`SF0(WNIh9_7sr zxKJ``1^@Z08Cf$ZMlLDbfVX9t7|nEersq5Gv04-03I*nT(}Ul;&f}_qD=?708+>ff za>h~yV4#tN8#8X>ca=Qe_?9*ESk=SxmDO;yM1w}wvU|uHp4-m4iSIq1;O{0C;@bZM zN565RUzL7AbC@cbsqI0I?1@7-nhAm#Z@9VA&oF$wCVyvc7;F-~3&Qv3;6D#31~mE( zU-m1rGZ5?jy>p_D9;Hw**oF%)G8bl-IWE_%!`q^Hko@`*r+Gq)j9g!fMW3QL{U9v* zuib$BsSU)l&t?e^WiP_p0uRz8Uj#17>v`&4#_4-rDbnz*!m#dlaR1kN{Qc|={B-ie zy@Pj=kG_R1hb*y5d_RuVe}_Bm6scLzWb81yg%f*~h~K6MeDM8#IDXoc%BlIG?m~O| z+1-Mw^;;9AW#{l#W&)0BOZO7@zl&Z;&p>oXA{;qjLfk9b z{6?K*+|i=TUsG;_cMstC!>9%8xb3DU^!TH>FrIwJ<<^P9k+!mAm47Dm5GqL5D-#B- z--4@Dj=_%sdphQqGO^3vhUs2s`5vol!d1hzuS{Lg;Afm=m8} z8^uiM?ZpKUfMa0R%O8+=vX}Gt^4-gNLpyFhuR+h*hGY55t<3EwN23khsj2!dyvKHW z^1;a%T|$ChmllzPdLU+(r0BXS0={r$;VpQ}UXd$y#l zFNSZCD1;Fz!~A~j1c=ddghfO8)FiAA?`iHr7cv{JEYydw%PZl*dqueR(w0`-kfv(c zigbIdCUrElC0&DTf2))Rx12=T&oc;wojpV9)#>m%Yht*d1o>TOS^wY<{+M?Sq~|D; z;DST2(WCHSUxA1e`DlA>ug9&5o>EkC7ifhr?q$(DUBFn_{RykB++&kK9mEY^v9clibVg>&%11rt8%yf>UG zuZ2$~M{vQXDzubnfnDi`@kW#}HO=zzP8K&MzhzsodvqzpmJMQBz8noXq=&1OR>J%T zp^QoN8V+k&F@DNiT-rVh{1`u2;i)7Yz1Icyinj6BSWnmW{t*6tC`-P4e-5ukI+86> zdtrv%RX7%x%IPI+0J~xzun_$Y0s5NslHXOxx3won-jr}L<;TG+xtfV zf)ZknV||?H9EGirv}`UED3!yC`Ker6$Xm`b_BAI}X-CbB)M1XtMP4iTrGT4!4Cn7N zBaVMn=NDKR?PuA)>}jq<_J#u$eDi>O&j#FK77MR$Wb)5Sj>Gf&o=DxS>ALVK z=&f=FoE7Y--+$8dm{=Td*LnsUdUc`NCIr@mN^wNOl|-o7QDx)TsG^$z3zA3By3fp4 z@^u?Loo_;W)guKhf99a!Ed^rx{S9s&S%ODZMv&EUUT|vrCalZ;jEO6Z=^wdbyesR2 z*;PukdTT1wAJ>AYnFb_prWKvcRyn@Qvb^v5;*84>tat5`TCI};>+ExP!E44ve82yNG^c-zmLi4h_NpnH&Ui`H#{6-OlL_r!B>h_R2} zZ7VOjm;V8&yc_u^`AWFtmo|y@HHDF|SIr`ji)SocPoYTe3)_##8#E;(S? z5mVC4{H{69NqEHY6ZRih=PTrM1x{rSpmpUmzPSF->(GEZu8UEpV^Es>&iM`_@+ZTX zUAK6P7CE}F=M3IpGvledg3x(-8(3Dva|Zh|QN~t`OlWKb<$rg;)lQG51;y}PDRT6* zR4n&xcL^T5d=h)J&A`+yo5y!oQBPcqT>m6N3&JqafYx`LEwaSTG)~}CAp2J|%PjQ-Q7&^Hh zgDDeFgV~+^ynNIrczM3lYfEr0KJQ`<@5~BxoHGobOBMM4MrJ|uht06bnK^@_9`e=( zGBkQdmbZ$&GF`H(79Pl}(}-uWD0#vTy&Y~s>AFU~8d&x!#SbQSnbTP-UvrgR`sBX+ zEC%{~3qQ+rsMn7gd~|CCmMSgBtS#?QV}UzeQ#OL8-BYE*2am(P`yJdmcF#}QG>i?P zLs$6K;utoEP2aCe(hChSpN;#cdDdXT+uM9r6mvHFNV0RlAC~Fb3WnF0!kgfBg zPiD+(7AD24!zU%y_-astj@WHQSN3OOb`6hW%|^s-K>(zi8j}|>khw-;KNlWgLJek`(mi*EVS;iuMg{kBBY7p_X6A|N#9-jDmDmU3yS7 z1}OPu)?+)}O3W4bXBuIS~OGM)*~?@^~a1&YwI z@ePD;LfC9)0II>)(c@_qzmrSj8^4&K3EB{?N;mp9J07n_b-|Q;1G4vfDe_|GlxMlI zpM$cXbYKPZDyBmG)mPk=a#^xi7zrx_W$4#5mO&Q3kJmnK27m|97v@H^@3@e*H7B4j zZxpUHy2ib_`v&9Q$&%1Z?YJ=472BHF_v*d3pw6C=jQz*>>(3bP;&m+k(mKp7ox_~L zYF6mwehGKK^PRd04M)_CcJpy4Sy(X2x}x#p@Xn{yB>!NVw}jWMa?jI)qQW@4|O>C z+iysk`Hd4ViiM*8?%{+q0D0zR8~x=js{J{P*JG2|S^X$#er4=q6Jz>tWDC>>n}E?I zL-KmuX+FTT4SUuMqFBNn?!=J|7;sJk+a3XA*9gVkE zy3q^w5@1)_5Pw3{p8m*Jpr8Az@SC$3-Dy{c{d?R%cq|JywEN%z(M0}dkSwUMdC%v` zOVN-e`#*dff!mtCVSuO;be%uSulXlI`8-jQax@D|#iYpl?*-i3unKSxzYKnLw{T}B zo2l+<;vEa8z<%beQ9ZN(tUD&)iK9{66NlR{ZmTp6GG55lO&9|XkDXv+L_NeU)*!?# z3p3>|lkZXAd#4>iUjCdbh*cgp1CZgnTk2wuedjgk*A2{*t$}NRyBWS;t?$W70=K1tH6t~QZ)IH75$Z!43@t4acOg< z@X1fc7fOqRT{mX&1-hqU@0laaEzfgnHAC@;c^|%x)1)SE#JM_Wc_QPUj{8Gp=|i@= z6Y;Po4;Q9jgc-r(!(}ih*qJzvXN=#KH?Zz3n@f$ef&*3$iay3>3VwbPhp~Cq}aKzE}q)-N!VAdL>4b@gXFVza9GBgn$%x}%jJ*p zJa=8z%%5zE7xHWeDwqedr7 zPJos)J&L=H$iQS@82{!vKA+zXc^u+`iz{LMeFXuW^o1v%rRY-8gP?F(oEllBqpQiOW@9BeG(aU9cwLg>B<$6_(~8938QWb!yS;PZ;G*Hy9!vG zR;9e`V@{}72}^@YLA2*6czrg2IA2$Ks7{|~-kbze#iRHarzdlj5r6n~4->%tc^ZE= zL6La9Z{=Egzo1u>7zr62g~{TDSYSVdRc?x8?TUPO>uF5>x=(>KEWhjLVot2D-Gz|# zmc;1qBSFL2Zf<9M4t@y~r@~e4_Gjw%!<^o|X-*IOPh|6=2t3ZalOrRQsKQ=NvdwKbCt*!NxBVB&>JfDFJI`Ia+sO^T zbS1B(?C624(WtrZG;E)CU%0K*nVh{V&bzP7L6a@^G+$p6AK}VKI!&ZNsp%KRAA_1Ptb!K(|Sfq-3uuq2{ANZB`@NZ(=)&edc`d77g-bq&As6 z-IPqLRHbrj<%o^rQMh5Ufg4CPqK3O(;`5R>u%&s`%g2f-Z7oRu6|4nv&OFO!y^Z)oG7IF}f~MC)eR9>bYyv)~s=8pDRmJ z0wTfV!!JB_vY77~!@d(YI6+5sGYa>#Kv{(Y{qigt*6cqHX@Bm5_5Kp}J=MZ@R5G`$ zxHEZo(3MPjYD!dsAHl+hg($~Mk@3&>VVk8T{Fc@vesQ-Uw@3v%&;J6G&=}x)KY-&^ zJNz(Ol0I(A;f{2>(URNsT>Q(2IPdl}{1mTDu5A^?fyR^E#D{iN+hZ}lz2-!>JC}om zIP-ACoxxcb2Ox8l2FZ=7Ld{jog`#(!$F(Vp&-77fn;nDOwrSIjLgtj7xE=1l(xm-e zg*c`m5bis_!$KtqIzjX&9<(wb+-faQi1MU^;(v-Z+8NSUuUQt)@<-8&_3BixaU6GH z=M4U6;ucIvvLivatKrJ?e%>eQ9lo`dLhaW|^k}>S;rENuTXVHY*f&?srs*nIvN9P1 zm)j77k{0~B-iRDaP$Hgp#__$kwQzGzE=uT}W=;|tdheJ%`EhCpQwAP^euxuY`y&E` zCyeN=kE^)ftv_&y72~m=vEynBx_D;;EmBlo#M?g~fX$&=cv$^7zy9h?!F^>T;;S2q zNpkXdqsWYt4sjwq3)7);iZZwzU_GTB>@Fjc3(LO0g)59-yrcg)TB$n{pJ|EwX0_4$ z^UOkC_lp9#JVuG0TH}o6e>ZR{ZED12tuDC`co0^8v-v*{2U9hi$<%F{U6lmUKNZ%W>JgKQYjaBMLUFMPxq;}-<1Sirie26(|u^6-NF_$W5Ktty+R@g-& z-1{Mub%!j-H*00GV-;f|zjwx-^{sGn_9ysLVNAEBTT+oNaonO1Cs3_W0fpF5{>eyF zlCN?b4qg-?`WH;8r@K0p{N}(iWtGg8_Y&K3WVk?fe&6`*D=euG6V5tdNLTB`;W`&K z!#Q;nt53G!P=FABzvKB`0bvlfs}j{bdU<*A8kD+o6)F!HlF8YOX*f>~%UNFIrPCAP zYvE&V(8veuYT|H^wvpsY)&VRU1pC3yP5msL=y9rj{gLrlif(jJQv6<+`?(G zbH2HzNhD%SX>(E2`aj5vT8HD7wM+^GVI) zG1r=?_wA0Or+p{=NRL*S(D^E%Hsb;+f%y}IwbksPCR<84~=F#N) zIQ#7d@N#k^GOZ$z_y7GvwgPG2YE23|FF*x5?`P>LklgV3I1pU|HYVmI_N35?m9ZfQvwszBE6MjSsIv@RcWUrSU6@WnYFy zg&xr55{*JlF-X`I5+efzQ276hL@JYm$Byb&&=I0L)4LsTG)bRvkMu=>Kv4W8&UD^ zB49kjnr4i)C7nn0xunPn4DmgKUJt%N%GrKcCfx$hJ$A$B*aJA>p%hFk6{S~lKl35P z6|$Pyj$wKqe@^Q%WaTb}2fa@q$kvK>)JT(gEz5+RSPc_J%E9mCnadUqOl4cFj^tV)dea}`dm(ZcRc6ZomyY-#0v8#$n ztP>u=ebuozbF2k+76*V-+Da7l6sM(*ti#}FM$Wh#;$KX5gwHxwpWd1aCo8mx zVgzGm^|JTo3I{s<-Dq&Q`IvWK+bb}zP$hNolQHhC>N2y~iUsLxp!`u4UV8Gk^QI4H(|--c;i zP}ovD!DgW=J*D|Fm-*Nen1Zq~6k+W}C&~)y?0$ zu$J|4((|Fn<2qm2>p;q0zTnPgbn_RpiqL$`V!_VCIVczzfx@X4^r^)g)JpycAHL|5 z+=hcVf7%c1HkyW=Z2r(+(18LrBWtd^1p%&k+@HvNUZ8p&HZHvimky|tN#0)sB6h2w zx^WX@{n*j9(kJlc;ivqR%MKX4F1OIaT3F0jW;eL^%O7LWz44&AxDaaCyr@9^D*X4?lw66_p{=Qx;Prrj zPfZ)(=Dxbj9W0Y2`NtJuD24666j#8>*HxIdq7HhWDv)0fP06SAY`?NB9u~Z`q6_qO zsMctIzHN6ju37UPXN|s!5)G2X@mv_nF^}3tkfT*QC8)x;RGeC0ipi#Z{IN}+&=hB) zqM`~_UTeWTqT>StcGZ(tGe+9q zSd88!10N=A#Um#_qUUx`ZgTWTe#-4`Zn?z*nD@qxq=Ydh`UFYxVqppDEboIDu^#R- zJ0nePe8WkNSEJt}o$z(xdl(oJfjN?2v7nfTx%Zo~aYD2pXpJ4o|7S~7bahF%|8?Gu zKgAjR7(whpZg5k=BjBW09A-~@jnh}Mb6(PEJgaU+TfL0v(M$6D%NP~9JlvK%vDgY? z-pBc%quNx{&;vZZ)r1U|PC~~h(6^$gIR5Jd%vk${FMZ}o^3M)&Mz-qIAif7uzqR5r zv3ADp(jYQxWO%zzdVI75<7rJQ1P^`^=uNgIE~8!Wu-ORmd(v$j(3d0!b-PjKRW96U z9YA;GSKj&wkNAUHn?dWVCmd=JA$P}Ytq`G7z3@QJ}`pgP9Y2H%xdT2j{l>f^o<{aA3aFwN*vXoHW2q+hWTf#wA$! zRhoAH9FNC6EIangr3!y5_Q2qG`xI{aX!_P+08kYrNf8YYVD6pj7&Q{PV)5A;p zj>1`|4XNQzS6c66Puc^G$?b!0v1fif6k#ab_R0m-|E$P=od>YS?;l!?qK%=_gx#8_boUHEZOgx2{D!_&z-y${C7 zV8CEHKgOUH6JJ=FlS|(K<(l_V3*w7$K7e+7l{Zr1}$dy@Xt0FmsUIdxm?G%<d}|Ygu8dDP*ba9?!jy+ zQj*n=#z)?O`k5dc`Dqa6INb%?S7NmL)k$t4yLUA2v%;sNM&tB_%nQ8R58?}hVR`)) z5c;XZg5C(InraCyfny;zjdDxR3eA!%h6}(3ghrK!u4_UK_=@h+${KudJd(6l*oVhiREpgPOs)Z?%>fA+5Fk8 z3=40WlJOml;Fqun1DPvv_BlBUtK1-LvN_q>Cr#DeG|5;8G3s$ZjXYIe#YZmfhRqHR zbah@1KX6YA%0nN)zIQ6bo9#=Q_WniXv&Qstf*7&;H4pR4UVwFgBlUezj_U@SfJ>Jk ziH%k8WQs37YGwDcQ9`T<3ig)$@{1pB83A^iw&C)1yi z*B>^IOycjJ(xV4%bn^PswCSGx%Q?@~_qh3Y1pabXVth_9I_BkAIAPlbcCT*3?ua?i znv@D(o7CX_#8RB!br;j8is8#}NAkT#2K!Eml0$v2KyR9nwZE!34T(>%y6FqQ*q8_K z{)*QR%TY~HU7D6?O87)Slzui6mi?Fx*~b!~+A82u#~1Mf0t%GO+2NQYE?9XuI)H&Yuz;Eyvvl52m3(@~yDY~}Q3I(r1 z;kQi}j+lH0#)nIhaoM6oB~6C*5~d#Civ?DnASchaJg-JqcjY&7PC+czDv&&b?f}5>JRRXOi7h96u=) zLq_&+zSC+^>ZTMOo1#r3n^~t#Q;#fxxf4zWx zUd-M1#+vx~$daaTLvDQeaSTb&p`Ru{K=DbZQCZiOu3;|uw2bFaAL7e(UNR@L&W2>z zMvDxT=#jy737})4KwsKf(uElYbk4;oV76}s?6G$t>JmCc?vXe>|1us!7*je`Sc|o@ zr{kn_KWJ!nAl25DD6`jqT7A2Tv&Zqg!okf@`zH&1*?e37k0^;47=*`>6L`Z0HHO)< zAfj(YVd3n%LchP2-1iNC`TYy6sO($?+LR|t_e^%6^|h{K6w51U|Ga~v*Yk)5hV((E z3|XQtOTJ2((4sxL@Dog_{h9wj>3ArNpZ6NC8Z0UDJ%0((M$hNBq^I($4^%MkNI6%Y zR*%0AABSRS#_thb==q;Jkze9YVpvAucfd%z$+)W*{@G9;sVY>7mLb=ll=JJvCqc$7 zYYg&XY;5cIg1b}8aIPPRk6TR1p$0MhPr49YA1=jf)+RJ;l?k1vWJooFM2Yv4qrya* z$=*{@xPd@!hO-|Fm`<~ieE6ITj!b6q}41}Iduh4$H!=q@dlSwilO}DO5vqR zKfz_f1PFXK1S!{(u<^V&`RV=~ze&||<9)Qr;@(=gW@AOBYaPWUXUwSlrD7~Kw8iIZ zB*`-Ic+{vqhR$2O!12aquJykVEOB-w8BVXb;p9=QD_}`GV_i7EvU1SX_62hnE3$)m zv->MP;{2^fe0gL8r2Ou}i0Q4|%zam3$@X(Nt#S=svnYl@p8~AA&YsgR>3HQ$01DNH z(CAM*eiQ~`U$F^&zWgz(#-Cyt2yx!}Tn4&yWn~g4E7;FyG6VW*+{E z*FHVPmh27aF-@G@8(~YK%Y`gWL4Jz;3-CIZ#h=MxJo#*8vYqq17G!Fwv~9Lb)$ zJ+9Qo^Ap_EDZ|VlZT`a$bAbHl!JtzG@NeW7yn9`NcCwzUh=Ml#I6sD;yi=Th`E3Fx zs zGafTbQ)xArwuuBw_c}snxqExc%m?V8Lc9T6ZH4c!LgD@izwM z)!A@=HYw4BUBGWU+={x*jLTAH1MV-B$n6JWv{_S$I&AyKi*A~V^t=k)T6YZIXHSJk zTlCRAJ5k^_LYLY^q(f0%B%k>t=bmA-8Q*YQidZjyh0zzp$>=^^V!lP1j9&cQ0J zPHP#LWWBtUv&N+L3di{vKf&(x%H(~y4i*}H!nJB;MbDq!!t0BD zU`BT(_oiBn`m4vo0p*3T`M_T8TSpJj0A2b`n=vNh&2W}pHr(vgBd5+h0yC=@+_?pe zi8JXY&Kzw@e9}_EyXi2Osx3g*Hy0sg(nroDbs=&q8}X@06hBIKBq#FQ6$@q+!IW_$ zNPQjQZlW^V7b(%GgbXg@vo*YBd3H_4%Z}dq7Y`0I_xy=S)*%XjfmkQH_i733+>#A3 z;$4vM@QNFqdy{KtJyVy6PkeS$G5WlbpbHDl$(Rjpiq4J;`QN58NydlOqdHbdD`RVU?$9Ff(Pm&=cZlyt|oE4Rr z*#>_--G$}1FGIzEK7Rc^hjSh*htqO@@Lba>RJ}40=vNK)yLRxsy?XTZI&ETZXA5TH zk~CdzDX7;u&{>yan6%`_p1ES1by2w!DX~5pQ_;;3tr6@J`U{XhPDq=7G15F>|nF z@(R&MQPL;UlOoPWis=?>E4`{|u;uX85@k!0J z=#(^p`A)y^tI`AT%&z9>m`Es@+J^4pj%2LDSvdGpmP}sjM8yj>!^u_^GN*PDSMzBe zEdDczo6~X_=LH{yf$i}ac`g;Mua$-)GeW?ps1l57*xf%vmGEmS;EPQP-ft3eDdY9g zuyhRsD_WDOq3^j?Z6n%dEl1N1OoR36S;$E-hVx5PY;LxuY9h?PxvLPTGZ&2P%(Xm> z(&yBpzo1gJ8&-rHlK=bzAn)u>Onq1lT9S8A>t&u$VrMsquP(##97EzTsvUJN*wUG6 zB}v}-zi@Mv3SQS@d?*2P2zP$~+aktNp(0eNd@XLinhMU0!xPgJh2yjnv2eu^Y~FbV zPi!3G{0dT`Dl-XE4v29F7k_|JLB+Uv^**7^GATOqnG5|ln9cRyV0fB$q5-b;+cE|o_NU!DrUU&9@ykcpPpviqaR|J_t1rGI{pckp05M-9$WY^ z#+uB&`4nFVf8=}T9p{HwKL0E0G&eC%y;PtcmG_jRZT(H$+PG}~MBGTY!liK*uByU~dkj{P@jHNM$T)hn4uiV=_$7{egn5lH|`h zQ7&ds1n(XEhg08P16>y%_#*Jgb2f@Q^{yW^uDFop%7%2T=R>X~ z;4j|YcL5_hdoig-p8EWip!z4W`78cykhkzV99et@B8pbVMFGWb3F zW3gK^6Rt0IrL&WbNOwXWq@2f>!E_VlE3+&6IWOl;Iwd6tqN{szT?ZYHz3El!}DEN;pHvrFt}fZy06k8 z7nknDYPkT6{B=$s6>CDjFAavdPOJENxy&ir=ZWRN*ReC-a`0AaLVEgd(W$S>)IsSt zx^*5$#n%#CqYV4*cJRjEbp~Xi#R6PfZ!DO3(FalQB#zyXhhJs=g)?MSsY~lxp~X)% zsu{5Y$LR#1a$pkpvwKqM;5YCU+JG^?2xbXdIG34E@x*F1K44Y?IIVUh*$Wb2bbU0$ zu$jbVrw%NcodUn@{_+iOder}>JUJ@o3Emlp;J8c_j;d&fDCJ|^gI8|!-2JieWvv?w z+fRhSQ;H{i(_os@%o?)&5M zw)C*YQ`B+0g^f2|$>5>yFy7C*$T@KmKj(fc*zI`)-VefI+#)TiI(a&reriumTZV;O zon6S8fnE@vXPvG^BKZ1NEgr5t&!1&XK>0Xh(r_{n{eGlE`py&lgt5^OFzo`$Ca0n0 zp8N2$y9F}E9^#)F0hm6Dv2<>K0h4tmbT@BGUJlm^Laa99F?JT;cukr7I=U6qtX{*n zcT%|GOA<(bv!P?(DbR}!DsJL;KkS?BPS(VX2W<;~+&bV)-x=3} zWR)B3H3?-s#Z1`S=LU0C&w)Y1cy4*07Ik@52xAA%;-BBnbnxvbULnJj3@)tYPKJyW z-i$~_y{GnMTn)k2i!MZNU=C_nKIDIQUxLkh&wyZpJdrFfg|^}WNKUmQ8#j;T7W`p3 z+{W$L*PhF#EEj<94o#99WkOC|lcIkzs^P+iF?guPkxX0E0R{8g;OCh05b|Xl)N4r) zd*(9KxMD(xcQ*cPvEoM8G~m&CYifJpH-Bhq7grSEM0Pj@V13+2EDvNkgES|=2}*Dz z+ZKPx>(gT=+R>)ef_S~NrRg>|1ShR*iNPZ&lGM8luC{4lQp+Fw_(_SZnrTRDt>@q| z(KkiQo0Z6~*$em=SEfK|uN>D@`kD6(jK_G6^-Q1S!>)yH^k$P8(duIEE5i={nu8sQ zs}?|Hs1DU{Xc0V3)B}Z!0T>|CEby$~!*%lSh0DH(Lgh{;>c3cwt}m1TGer?{&-pU; zADxG;6PdU3@=Vk!FhGqo)~S@f&ME#If;l~bT*Y5`Vy3$Ya*Dp-#{Et3l{uD%{_e(| z^b(%Vd4*pK9iiDf9Tkd2!06dS!L(a?C?iX`jh)uC_;W9$XWatJkS093%bWyVxggx> z?nbrNY~y5DFU0E1TTrd6z!?{Wyy9L87>p7b3M9FMxs_dTYAO zok;>khhwNwGLHM?HUZXpOS8{YjC=EbB<`2@#UnNH@ZFqcbnKN#|F6F&Old*W_=kA= zu|2Vu&BP`e&TsW$I~9dUu>4j7nT)ZZA@&(eo_m8!W|Tlu<{So}F@fd?BU-cM42(*9 z083UiqrGnrq%7vKQN4J>@dZ!uSI-3;E!kZ+YV^ zGdjc0oP?Vm;kJZ(;lWXRFfG*?+_pH==#6sJ_XLH)J^3skZ%)XOQ{J9-+a-?WV%RGGe;UAl8 zRp3asY*?1s3+D|vOwV$n{{p1wCbJ?wYv+5`J!kG2#-Q}AQ6=3~b|7nb5t9}SgZ5hk zZmdKK9&_;rN5yamd^8i1Vkg78k;ZVP-<-zOFXBWDAL5c;mLu{D^A>caKJ=VjAXn8?935vrc%!`OcvQ0HUC#dZv$+8ryJ zWjh_m-VmV^#$`a--~o*N{g(4!GnacQhVVY3vaXW8&^jvSq{$cenGeT`MSdwGL{^C0`L8p*Hs1I=fL1(}S~ zp!`Xed0JM1`CQ4MY>X_j?E-vqt?5UR$Xfv=@5f^ncJmR_alZ#MRF#`W#|fxt*~vRBDpl{FIWVJaZ=*osrAJ`oP9K!=>_!)v7EY=$ihuXt zF<9<$0-Zu7=;yunusdX!pY&V2$nO`RoLeZQ-iSiE)okYGmySVkd-=gH7{-}@kvc@B}xL*{w>=ophoGL#~cC<>wIocFy|RES0;Nm4|pC{c-i z`}_R){P+BxZ)>$$tj@dB*{5^%zOU|$k{&4fPermQfH=5Q-{sPo;d|La$Ygk_PSA$dzyb}u^yic&Bwz| z^;q%8lH}DHk}tQImtycla37z^8y+2nGre7?R<|n^J+UI?e^rRnuL?Bt5pd`0fABg> zWoc}(7O3D&zJmR1$93(v+L$rQ*Go}N`}-K;^b8sp3uTVIFLgK7#f*d|II+GNufOvl zXBT}$)s-c1&gT<`pZ1|IKUKl>Sq5}*lm`u%Uy9@QCUT*DR&;B03QSphiLtV;;Q15- z($q8=a`deTn(P6+tNP%3_bd8u)S$(Kr68xqh3s3Qf@jyf=A-I5xb(?PhtLgyov+$C z$5UsxQNlN{BXca4ZK*>ab{6^fLl;7se*C8<2Rju~uuj*6+z2s*wqu8J#ArL1RPdco z*Ht1lD=uMrmIZNADuVp`YGg{dInCJ2!#lNcFn(Pr1aZ5tt@1S<{CfosJh+YE@NAbTgB^bxI!)tFP99R_uNtO-VNaHimwpf`c{FLyz z&tfD)XMW-2J?~(Iv=Ke8WF}e09Ywk39r%LfE^>8C`7fBy&l*UD!bo-E5ZVJ9Wox-Z zX|iOL`CER8O%*=Nn2itm$KsH9Z{q*v172Sq%|BO973_C>$SvNO$yb%y(->hN7(S2W zU;Q(N*{ya|oy|V_Mqk5C8$W_M^QkSEdLGM%JJ7V=?>Htz_%#mOySDJdN(Pa%of3@9bHe)RB6$C^6@CyY z6059<8P5aZUlGDEXH&9%rYq@CW!+K{_9!DAhwTSOqeoH>+-azQs)I^YdwCCLpIVGl zUPSW|EE|5oay55W*%#N@&%(RYKY_nYFK_mCF@OHUS+sXg;GIV+aVvu?@ign@9WVP7 z&5w1k?3XP`E9-)LV<(`~Mh~Jk@*L**e}t3rUMO`~hraEXq0VRjz%LJFu60NpG;|x& zBBm~7x2)iXT(h8)o;XrZD}55o_ImB*Oasfa2H(+|Fj;6p8~CeuMDZyMbDs{RJ(*YW z+YO_0R7u|6Z(PQTeS*pyd6c%0B~y=?lj+;dY5wNhxOko}b)Hww|2WOgnNmu$=Sm19 z2Yi5QjMbd9ydTcJ+sq%7=6IE0bCNbD6OVhy(Zf$eAUoEcF3mL~;z}>N?3^4;kluiS zucyMcnfGA$)N*VMVEl)JQ;~Q52@}6I;RpjI^5BsP6&(44VcT73V0RmC+4Y#;dZrjM zE5&%;$e5g9UT)8?>NJUO<_ag4!AeIv%-*d`uQ3nulR1f!w{xySV3{@Dq%KcQPD}A8 zTH7E#B^}hat>BVlJ!n|w9KoEKtND@?ONpY27O|@*u}c%n)`1_io53W2lvR3 zk&h0j)5`&*i}j7fn-kR z4SbHRM8A&bxNk}?C+=ZA8p&~qKDig>OKomN2_{wkLea%I{`Bih z81B9VuQ4z8Bs&*cw8)ug@C$HtWPm(xz2)>BwP?2(RHz zm%zm}<8aeM16t0WKi_@Ssqg!PaOTi`m=W_0R`2#Csa4Iq#}_*qY4rj(&K6?_%gAO7 z5^=V{7kFX9a6TiG;0FyQ(&*~~k2iPmL|Tdzu$<8fMNgXL&lpiVyO?jz3ZC@3ky+et z#v3#uGn1=fH{*iZyvanVCz{-;tCjp9B`H4Ax}BSOka@A{9ca8tJmlv+gXp8O+)on~ z2Gvl&-`mbfrtMK7C+%$EP{jh2zkCV5p--e=Z4;i=2To~a19H3x!HXhG{HU_cmEd)vqw#aO`|Re43l#}`-cw% zC0~}9JW{|dA;u{D=77<`sgPs%1)lXe(KKUgIw&_0&sL9v$Va`jSp$WpiQU zQrLaZjCd2e}I{~5=_&L z;Z!@?Is4*dynXKvU!1B&{vEyymQ5AnnPE@3!<%GKX@@u69d!pTy;Y$eg(tvspcDsv zdWlaKI+K>`RrulEd~V25D=dC#3q3x`m>^Lms;{KU+{GePx_^NWQ7q+xzOxSYpBpic z&0j*iC*uLZ1z4i&1#a0{_@A;7P0cI^=e!=?ZNv(^mi`_`5AOx1h;C?9@`D4rK0|%K zGA*CbfClrfU|v8vbX4e}#(*W=@bVx()t&XlUNogw#JXIS_#DLKn&SRHwc;818bqMM zIz}h?aubZ0cYWz+Oc=Em+su!!-fIJ5I8KKgtTG{I0|(KWB`hQLWHH=Z=t1|tn)T3sTap-Zj+3h zHW?c}xA56x_JMelEM0J?6wB3&${EJ_FI)Y*{f zsz)T{YINao%6}Yfk5`?(W2$l!KHIEKTslp;U11F{OQJ$2kFuvVvzT9GK{WWKDo{fM zd9vokalBV+2aTl2asYFQ;IgYAv2! z{g~6Y8N=CJm!*}pE@YnSQSS5u9oRVUC)Rln;0A>sV16nS2aQ*w<0h-}ak2%t`;-^S zy{Jy-mzWAXTV!e4azHuPEu7x(f8et-p1V@QvZ4GTG<(eYVhhXhq_HfmeKd%iy=nsa zE1Mv7!Uyb?i^ukz3E*^7o0vuG(zpqg*ptNe^7A}tMwls`6JkJjthXokr@eaO_m*MlNC|K(- zz(3CPBBO4Pf)M3QSh`i4mOoB_X{&pBDfd)Gy(dFb^6uQIiH(9Yqa~-w>apMm#ek;qfpvxXe6< z-&54cn(QY~(QQtn2Nd9<9L3Q)C*UHn5#(7~_F(~3WE zVPP5u#wYS*$r}8gZ%cahR=~jTcr@5(M9U{fV|^aWtu8OdCDomH##9M!dD)Of<0ruJ z8(ySBH5=ch+7TTWcajI|P;x|v6!bQ;OhXrxT+N5&Q%z{qM>mjO;YwW>h#BYo2reZY zykr@###zY_K2D0vdSXF4%I|`6jW#;zDvj`e#5Vr z-F*OWoj0U*_w`8mOm(_+?R>my@B-B3mf-E$XQ(o%0pwRo(_#Ls&-!ExdJ9HyA5CAv z8vh&owq@qj;HML$wae0C*+!P{OXQ|@v!1KnH@G2(dGu~M1RL3Wwzyc0Z0dCHHEJ8-L`x5O zQuB3Epz@V5y2?#iMr#2d#Mn@KCC$8Mks}QrB1PT_FZ;z?_wXkgFTkE_?qqS74p$>k zq@Bm2aW?Dv7`C_%tIs;KoQXFHKBGxr$e7^Mhkb$&4+HMd;s<>H;h(%`r6SE3VMn9# zGr98h``8(|9ooER;M~XexaG6H@J}w*!O8_X^zq-@0+kniIQ8;K==QRw3g%vP(tUe+ zNB=r7`GqUq9S%b$dXP6UdPMiVIvHIdl1$bL#AWdvd~A^r6&tEKQ_=!T9ECB~KE#}Lvm`{_z>!t+1+w=xLhJjLctP5YoQm+JdLQ57!aweKYBS4WM%41c zvPtM+;q>lK%&oMr#>=rC(F{0Vb<2n0^E@^q* zkLRo2^7P9E+?4VH&M%AQN7_W-Li?9ofU-Y^7%5Ou+DmR}!6+QD(2Rskm?=2*{2AWZ zoXoElRN&qv(VWf_Q@VQ`Kr_2t>(^@7R2&WeElPtC($e(I&uFM~bihk}->`(ybt{KU zk#e;`WXwW)sw^W0ysuAYg=`1&xOkW?`$kgnZaa)o)TieMc~B7L;?3i_P!{CP<&3K1 zyyRtabjhuX3+Qz@kE>j}gkP{V2}c}U z>$ih3#B5uBaayIvA@xTf-n|) z!QLgQ_+|(5Q+{=(&+R^8s+=PU8T^V{-K0wooBRW(+m1A;v>$#r)WWi|AMj$$NiK2X zMJToW3Z15N;q-)3coXspMz)(!BgTMm8gPdtOAVnim}MaAA4?K{YtujPns5SmLsPgt zsSTDUOJ3eXb6sbuA^!?1xhyzey;=wFKdSK zOFr;n#tS^=`xG41yI}AWJ35>3?7IXqc-$ZjKTmiM_21k{mR&7mhM8f->0S7Gv>L6` zcjAY->ClyTj$>-zLoUjm^)+T4kZ??oUnxk%bmMN^%+4u4-frf@cCE*HMQ<|u#BJE< zp-#d@b^MVqInZH#-Jz>;VXJB%o*d!>`d?OIXi+t<|9TO)8%N`O=fUKF^IL(r^()Su zc|WWlW<&f<6@KqZFPJcU0#1_DKnPdFiP-D5$t?@=QuQ!=J#!b9g{Dj5fBAF0b8U(1 z_Jip5!GVNynUV~P{rurT7jk5oA^G%DjZ}`-ChFJ5f!r8t(hN;qF@MQ&Fip}IqLf+t*s3v@=SOBAk?!=@s2hr|l1@G4~ zb@=;XEI+7YOhe{56Pa%xz}@o~?x@wHa*DQuAE^t{tW&e`=v~3Q=3Y#GQi$_!EyaeF z7w}y}157b91&^hSwQ^CKXlrJ`{D^%1i1b)|Qn41bI?`}mx)kk*zlP)YZN+4HMRH4a z687!(gSM(qaOx>wjq_Rf-0DqUFTVuSa@cFbY*TWjOOH0c_9Q2Z9%1(;DXLNzDGAMz zB^p+*Aa3naC?4WN=G6V+M)>e(T4qZmjnB9<{ge5zEc5lXrWZ5vQ&6tx9`1kdMwX72 zqsu3Dv6;mw2$fTz|$M(nJ2FCu0o+3j%w~U27hpgz$NqhOe zT}7~RdK;D;Fs5_zSAfNrF`)GyV}LJmB%aM`#4>RoJ}Q=@b`2T0agqUCS7-(Z2*ieS z@4$rRkF;#n{N|UWa%vxYaK5}O8MC_`e9zQCjzb#;-}()QmKxF5B3Ve!wx&@t6TnXN z9wWr}1nEbO1YeVMBu?Mz`J-BMLHA}PURd}IBO)YVIcy4Sm?=kw7P!$7vMNL%6$Lki zHzY+bb1`O!7{X@Ckc1=hWVNHcV1@7+%(2}9vmRT5`Y?5P6?g|e32f*>b2cMBrb1u+ z{Do(pS&{v#%!sA&O1%8}51t*%dM3yYbeST;_uIyU%ISSj>8412%*%p$>0Yp&`3Up^ zrKq$11AK8Lm%F;alHN*5hPP9m`9&BoKAXb}P{=f=HV@)a=4A&u`X}JXVl@!1pjVcT3f8*s>xEi<1VE8R|uxSpZ`Wzg45peOo2Z29@L6B~ef@T#WvE z--Ff}BXaYtnDtFg#+663$g~18qBho%1T8J#zgx9(&+W|U0pU6Bq5caf^4Y~dU8sYN zZy!PLJzc6=8^*`=pT=3e186LN5br3ko?51tr6lLzSe-ersd_Wi= zK18xyfjIXP+#_g)_GMlCO{S&&7^et>=P};t-@&l-&{~|Z&;{nT$m6O*2{5SEiGR#G zbB?^}#yqEYXQ54z{IVwhrauP@b26eE^5N_5;1FK``8j^~J0GfDEYp|o9v&5`da(c!l>$?ASX@@nl| zo(K-Xq<`IT=BFNZ%{~rt%=bJl_Kjp`&2X5U(}>jxCUm_^E}VLC0p#{Ep64+WLPjy> z^YJ*CxitqKoa@2mTiW?0+rD7aZD)@2awA8iIxxAx6#ivqqWR-qbTZQ=4;oad(-;eq z!k91b?2W1Uz;t}oC`6lE*Wm76ds=kbm@ZPZz(19qG%w`{9%}0IYgVxCU_2?;w1GseSA^vWC8|aumy`T*4`^l5o z2OoG76E$qjF=5#a)(!PjkE$e}M@v67dM?x&pKi_L%r~0jdX_t^*>V#x$j9%r)Oh@0 za~!Ko0{smSr>Qh(!O6 zW%!EC7Z$SqO};o7R_|B9!ENfqX`(e<_UbPNiDI}h7kF-`^ALELSP6Q^9f{pe8(NE!88Zq%!b6_ZCB2N8u*7 zdd~f)CY|PKjkOacaN(#LeSCxUI7TiMyjZsl3fdoG;W909dAl#|tu&m3GPmkpzoTGpk~#>1?Hc{#fwj4-)U#s z{7n^I5B0;n3%~Kyj=TK!e{%Fz;SX#``G9BFw!ykeW%_jEEl~euN58)-hwYE9iv51c zP_?0TFn)9xw>#x1SG&uf`yb72B7KDmn_^+p;}!|tD#Z9CE7BEM#mOvj1>=kZVyB5J zG_GnTZVAfd*VHw_*AJm+GWrB)ma;wb43+_!_8fF>MZ@^uT@d!ZS@3H3FBtB+jF0=M z25D<#h=TVCxbvb86naNt!evGJ(d|AKj?aQE&o+X`hAq%KFc#-6$P@%0y2~FY^6+t7 z1pn>kK5pvWSr}31N!{<=1hIw#6~@=&i+hSRW{wM)5Y&xS&47lzd;L*fD%t*|~=+3*0 zAFvpwd$9gZPP3TZ`Bm&sDC^Z&X46x^Fq0Q{sp*S_AGwUz6SCkdqMC! zm@6nUC-W9BYz{KdYaKLek!ayw^V#^wiy|= zo6U>7wqAU%S@g&h^ z&fJ$GH7Y6`4<0$rfL;Ps>ZoHFUkI@>^G{f_OBy@zYz)<}GO!_JhKla#B)18CKuY?%9#n`ha&JP9G z6Vh<_yb8G_tw%$u-a*g2>tGXh58l81iYrH`k;3PQzJFQg&(AvCf&EyTa}9JiAH(Y( ztmu>Xo^;cH3dCvqevr7bvyx6Kr+e8R=4ZBHf^G$1LYBnm-g`L7#Q3I=Z>Z(`RB|Ol z6Ewuf__sNOB{nu?7znj6yOKZ@q5S~v>QN`f3gNW;IPkQkESe!NE zCTK~@A?u?^AU#748+(+g?L8wp&hIw-r*atIG#S#OdK2P3U5^Z%_loV)Ut@sPGn8&n zqzAX`g_hoLaEWKEJJXNwWJ@%D^F4uyllx#t_+yl-up!=zhiQ|w4Og0}kadCT#LDvk zOuxq%E~869_}P}k9?~Vt4KDl1St!$YAN5gTy$2BYa@;j@0?h5yfP{vTV0pFzA};lS zlb!=79B~6zJri)dBM(BPo;wYSw4jE^eBh*dCX8);+<~CG@Nu0N-7so9-(=#+ z^oKYo9N|G?dR4fyTdeT*q85)u2aH8IQCcu?oMtJoB<8rBVGygN42ZrP+j&JC%9fi^Q@9i4*OYke^~S)Kl;CxIC( zN7b(EM0-nZiFTWqyYTZhYE7~yCB_2i^4W)j&!zAJ8Y9ks#Prat4-mYr5*2G&VAn_< zEEa{r+7G()P!T};)C7p!^po3}_y<%on{ceRG(F^em7B_PYpJJsbSqXs9gEkf?erPv zvs|FwB?~e+yPNCSaf^9$pJ@`~{q^Niz;KmS3diXo@1Iw6EC!Kn5_cfquTF%sQ zx;yRq)QFDhMUunvf1&Kh9c=hrijJy`V{7^qPwPm-s3FG0q`VP)uX#v5Iw+Fi$;veJ z#cNRaNdq@Qg`aMqDzUrv3j_8_k;glaaml>}_~B*+_km>#J_qR22?;&iE!Fc_quqh? zjQ+qk`(OCCRgDZb?&K3&<>>nS4CD-bXw9I9;HV}~cJCTQLUsta9Ge5&hODD-C4D5u zanY37YrXU0_(>}h`9FdoVjm&5B5*M{#~dLORccA%O^q={L>Q$G3AU|bM9 z9gBmLuy&UQRZnT>=lUCPGR)^R`EWhF{jN^uaWkP%vr_zbbTxYGMZs0E2Rxmh$)2BD zw0zhZxV3&T(|wXrcj6m9=lgMdC7OVR%`4!)*f{vcx}?&-ce1@o49cv3in%9FqG6K_ zHYzZNw1FcXZ7oaf`m*s-!5!2gCt#P|V*DA#wDG$ixHVh9GQOQ1`Nj5PVI%FSUHo^v zanp&re(Mi>xA`lW;f{E8ULR+ln}RICf-0&yZ#e*!B^ZddI-Qs}6YaZzasDIRM@@ zQ=wDKo`l}ip)+j@`R^GuP@JiRm0z_<@cHqWS#pYx&X6PLcKkwhEiH1cCKfL=+tQ!s zccI%eIeu#1M>yQ}0&ZM1q)9sC&JU!tz7<;t1x&|-8e`=m$XF<0Q@S()VMNOdf0>JIw;bV zq&V>Z&yjw%+YUq`xs1gGP69dlxF=qc#mXJTe9y zLdxMs(OZ~qIvUNU_=DjOCvvi*5PoIu;aB>8;(lM$BYok!P$5s6s3_>N{)pqa*?@Tf zWegzi8OvLYh=l)Y-O+rX4cGKphFaXPBa;$cNbRV&$ zHg1L=)`ze=S04{AI)&>pe28~i7>sg%#HDOACJl1V{P;XAG(_0;=GVq> zXKb}V?{+iFf3u_J!%lId|1uD>-Vl-^m;+}98Bl4~=f{=waeqV9iRRYV{IjD9BqeA7 zFOG`^4@VElrY8HYw5~pINwK)da3v;W|2}O4HVJ?dY3y zi5oO!Gu||d!rN2zV9ji4nmb3GG_~C04{ay~r?u7i3mRE3Y91I*JIQZ*qf2(BWbsdS zy@2>81-kq{W$b&i4?|uKV71z2NM*cJN9!%P{Mlj@C?A1;8G3ZKtUg_8@)#pWA(mZ# zf+UsY&kkslf^)IFc4`PH6rY3{Ci}TxQ=RBj-$6JoguzP6*?z5}nz3zIDTWMcm z?7kGPZ}w$)v(lL+%N&4?T;^?&5kUeU1ha)6#J9tacqI=e=hXJ`&-`04a)ce(w)!>H zJXy{K&ya;#``l>!rxw`Cvb~#Tu7&9HFYw0)UE**39S-WU{DoQuoEX=Jwu#dv7anBe z{(w(lZmU3l6>E^3mrtS6X*AsXMShx6Yx2{bZY-kSBF<~WjX zYh!VW-D-Z)zxiPMz6?Lao6~KJok@;IC;v#>hOCbAz(w8(7+|po?^Y>MRnt%C=`scH z9cqS;;6p}9o6^j;Z*Xy}P~aa|fhAg=BqGw06fvLD#W`#bFwvXbv-GBw>wj~vIVYOZ ztxW4TdJ(sS6kqIp!W%eQ(HF(-I4aPU2j1@FVjq zbZ0+=FAJUNbf;8KxF{2i^3vgtnHF8OF9mGvb;%RP1bUW`$TcTQ5u05)#7gCqWQNgg zm}=>Qj;*YFp{o)nce#?2Au{Au^bu^0cAyWd?qZD`aE711!`Xy&%tzry!y*6&-!>rD zyOn6l{!4hdYCkuuZyfZ?EePI=A^F|IJyCxZJ&l7b{(XC8p3hkwA>&KrV@%k{}T zzcy}_^H+TKw}|^`HG;p`eMfS8NgRwSO=o&cH}2o!PNw})Ao*{;aw8m%;Ga%AqLM2E z8P|^Bq%cFt)CmfTof(_QO2tS?{mMNihQ_m=uDANRbC|QH53U) z&kYw&Umh;ZITkLgJQFUwa5`Lg$ue9Rt|=0(R1*mu9)$~c>xqQPULs+Zkw{p{u2;am z|4l`>aIcw2xLR2xJQXSu4*M7`>|ZDnrm^cUbrlKY-9^Hf-QmK;BSpfyIwGNOM7U6= zk=-UST*Z?m7bC{iT!@)rqR{^veU2^YpFiiE$;hYQ=xMZ%-(zLdMO z?`hvFyv**;iv=Q~vQ)UR^!;98R7$wejoq%KI$Wq#9xhyaS|rr%3>R)3FA{3li-hAw zi-gCM!-XB9aAB;jNEqcK5`MZ6E%0vYdS7F|H@n_;6OnM0sYvK(B@z;Ly+Q2$&$tpU)XxbQ$_@<|nr#vZea3_feUGun zn>~Jo?6GNMU;k(Qb!ST&MAA}H|MU6(W&MZ#FRcHtFDLvmXU`P0M~(Em^IzBS9m1o2 zy9O*JO?QjMLoaFw?imi`a{4jDZSChtQYSBym@YdaUXh?KdG&IHA@naDn2>TYh!BXNo8OP>~ozqPxV&|j@5)>Tpi(Hs14%hfBK6)%te5+`pxNKOZ_~*@yex7oF#p~K` z``O3`2qNYh2ux(3igje`#akvPOO#aA1!yZRd2Jmj>6)@tQoC74eDhR<;B~dBVE2+x z$+R2C#nYxg@SEUqRD8TbPEwhESfHe?pR0SS*>CSe=Uf+~_kP`WrGDiW5^+%TIlsBm zIey!|m-?+gpDETz4UyE`^%V?nI+mMvUrF+F+#kQ8Us}acX`H}L=7M+y_gx&dY`#GK zQHtND3}tazTby6~MNfh0kV6t%>(AnzS?-b?1xLxvvD*bRCU{6j-Z(5MUNu@0VdFYn zbZUwCf7bt#Y@o&e&H4}fUs(U@^d){-12Y8gN6qlt$?fs$sEqYXOE;H9OWhR9&CnD) zRea!gAZV~()fZh!lfp_#(7_YpCM_AsoXlwwC*x_7ZOt=sRnC6O9TxFYT-x>8FE1`s zpdok1ug}p}{PBUh%QejctCxhMQNfkjc{5c3)Sg0N*d;)Bi7e(S<+=5~1L ziIc7?NuFGF_Uru9BA#qd#lJeXhi6=`HF5lC1HYL)1L9EwWq#FrCJ9{A)CEf3 zy!gopqujs4_eJNe%+J#n*BdHrt6u7WVng+WbyDP`XsG{Fy+^&Hg05qO)6|*?Yq;-d5kQY5Xd` zH*Y-!bFLnc1YG(eZu(~_8D;DtIXbXMaAxHYNqy)MfnM%-NuuNG;ihj?{-5<*z;Dgm zi@9;{`N89=c)k8oY+7o7g9bXGu;BwPefkNyq%^34Py#Od&!J3ZCO#Jy!*X|DHeYY@ zn{D%h54X1>WHt}-ADdvctQ;L_=S+#iDM&c8oqPP(6HeVv77zUrjCGsEu=J%59CNdv z!!k{oFF78!ZnPsiXNxiOvK-tkPUId+XTTDLRr)Af3-TC z7A!-YS#INCi8rx7TgL6$aUH^QhT))d`tZhpE!NTci)#W+XzLwS$>D`(5U#cg zRxH29&Jy0_Q&1{|F>VC2e!q^P7rn@gHK*}utrdN#br%D-Oh#b*S)y(LQp%0kuW%af-!`Fd z{?=jgB`s`B`GI?f+~d5A4YA_pVpJW+_HwEXX!MhL$S*y`tx-d0$W+F*HoA)1%_U+% zuq~<4e+{QSEUD3$^Z0?yi`xR*_+)nOYJ2eu2f}KxY=anY#CF5a1?D6>l4V)RD`3!7 zcx{t~DrIkRWAjg5d6gRZq$Ned`g*uIDfeO5zw1ykuZ8)jpWw}=C;W?HF0kTkCRgOp zkGRBzST9+C8d-Pza7_WeZhwY~vvtXT(>&2nr8bj*YF(`?fisg5;Ni)k- zykBJ`&P?47sU}%mPse)nzj6gi9hTvEm44V%bbAT*bVI%AG4O1oQ%*(T!<<8%fl|VH43@g-b;T;`WnmOz_ zz8_Z2SCr|1OQssl4E3hEc7@#Z$?13`^fA1dXhClb8U)tUFNx{iaK7(_8Ol|$y{Nt} z`7=R=D?D-#6s|7E>)*$tV&WT&cv*%`$K0ujdKj;=XAb9=-vmp|4)U*>rorbB5h`Dn zBbgTY$hY6*6EAw;@-K>HRLy%VPHV@E3dVF(c_zN-`x8$**P~kFdf{%91Xrw>z!Npr zvy!$GcG9otG1rmG6tv>Zji2y>n+I`i&?A3dRzY&7BUY8~hcT+E@FCy?q?*mfVE-1p zvO15~X;dXPEJNV&D zzAGEuL{E4tI@C;`#_X5^=8v2HVZWy4ec!qqHKS zS7r(PIiGk)M!A*{q3@L;di%J}? zwg*cd2!JbfW4v!QFiS~Bg`XcKnJlyXb6E!fzn^-AMc5?*9GItH6JjxIT1vj3KWF0^y;oh{KFZIIIY5x zK*KF`)pdoEea>Wdfhpq%$dV-+FQP)%EtnE;hI=qB4t<9j(2F|?&|>~K&V9NAEkCx6 zuedZ~a;n*o$cIA7l z`kjamh6KUrBhD~eo^^&de8HAt#9tTxVfkJYa?OPK)GjUo9~TOLms*nC(9gJNcLJ!Y zoW~{7p_qJIff|jF#wW?eyy(3l+1sr~monz-;^x8BZu2{Sdaf#)|J~;+JKBN&u$FPY z{qYcAi95_KiCXssc%0--)fS!<|3RX;VBY%x7spjz#Fq}66LnheJ zu~LW7QC^1bkiLd{CpCep{%FCCurFB4KY`@#J!qBb3_%|xkjQik*%ck=x$Open<&Fb zPZfG1FAdD@1mTp@$G7beNRQ$|1AtOCVSwvaR%hm1_8g6Wd_#R*2B^V&*9=gC;pO2#k0qA z@rU3Aw*m_!Qi9bOd*>jx>eqi5<=Mas=B9#vj3(iJ8__=5JnT5`K;Mr&hg(|g$lS|z zj5jRdV>8F$BmWxQWp7HW_g12*)MQN4HX^c;WEklrML)kXCaY_t+yg$>MYK^o2rDa|a9oGWp6Qrnd$2aV+)xyay|3JLs zKcMgQ$?)3Y&=pXQ4kbB|>>U9UepbUmJtq<{+zI2V5`kK2(G=zuj zz9_?#=PEQQ;vO~^`NOK|VjN$QiT)F0i8CCB+ZSu#mYoteaRZN+<`2N;o?3qFaygU< z|AtG9IyhBK!S`w!43w_H~1#*1t0n% z9q6iT&aHYpoZZHF4S!W7)@KJpLV+|%bA8EMx0{pp4~Epk;iqK%2zfH(=Xf0U*as4$ z-SF#7> zE_BSO2Y7HqKXlu_s~pI=5JozNyA^NG8~UY z7TkeCI~n@0=RUrxw&N$MSmL^aC!tpDxy0o|7p^#zfuDP<=-oe0pwxE;=8kjco+g^$ z{o{5-cK2y+Zl)_S+GRw3++%>1DFL}F_7!sPdz|SQ!>!zGJuB*D{{o_AOF`Y&m+=CY zV*cqMunBi2PfHEyu|Jly&{{?E`(p+__HGx}goyYymwk9>Y%ZK^&VdlER0zs?1rBWX zI;Go;2Kax*$O6_`K3tCk7saFRGHbdx)c`bBSkmx!ZTR_H6yr}up_F$Yh%x~~CRf3K zGudlksSa)uc+<>^czmZ^gh^I%bWy0E2RdX&;~4mL zWF-zB7xjNQI`42S-}jGO*?W_{S7sWX`@AYeNgplRX_uykhDs4-G(;##N=7P?8Se8! zBD9E7sElY!Lt6M<-{1coM;#A6_j#Y^`~7;A&8nRPC7t>3WM(ON8Cla+Ge_arbS)ZC zMO}PmYt#DXI82{@9^-DY4trxaWZmmQmwY37JNP3i{W{J0Y8DH|UC@CI-gy|kEg8O> zhd^k0EM9N_Cs{f&H^p9D*nbn_zXZX7xt;ul-rZoc-GHp` zdJNzH2EbI+x9GXoAM67p$Te*}`eTj)bodEDyND8%vk%01SHrl&aWZ68mj$t#;Ye0~ zumCSZdw>WvvS0Nw=b>2v%3hM>p~{wt=A{$36gL-GwJy9YV!c1`!sR%W^BnICh|puN z2^Wx_4r_G;NhP8*ts^J|9Gs1>zgM{*Rbc!?-N~o>ewMHUn{@@%k|JdCIhX#lkskc zEm{6klbldIfqC&~q3I^``9;^lxWiu{Z|NWO%KnY7ZN$;X=q|tc{VO;{&*Q{u8621W z45bD{x%+31GF5gw9w-{ZsDVT@J$(=Rg(cu~GGhpT&A?FBXKd|A!UGSOo9s#m-0Q4@ z^F#5RZ^?8>S!T_L-@DK0ggxU2QqnOtF%4U#c4J?r72Q1D5$*;$k%ND_LE=^=x;E?Z zdR_h#r$a5{D6<^lhg=9~P3CT<^>g;s+Hhs-5`K|vI0UF?qQxy)^0q;V4mHi@?pL(K z(7a2$)Y&Y2D{e%kK5d55w@&1V#vLqqb_Elf9mv`sMW_ymMwg}}%Em=lHg4ZX% zhTienh0-*_L7Rs3D09w_PB0#GIjSrWr_VEgahE@vlApO>z_ies_|Cq{J4~#>eBon! zM)+tV^Y}F%yYV%!?4DqwWEJGH{(r^qt6WZZ4jlNJ!7V#^2VPHz#F&93sM_%eHwMX3 z?muZR{WI$_*?hvbA;w)#XRR@LAxIcdMGsGD_%uUH2u6 zS(zj?m*WQIHDDim2=6Vbf~-Yzp!0eOR4RtSqcBk#7oClgjxV{sS{rbd_{&>pYto04 z#i_=DmpJ~RBX{Cq72u@_TA&E~MuOm%VjLqo4cT=))RyKdNumX14 z=+NCM%r7Ao3TABYV)ge0_-7|z=q3$1GHgl@t2IJYf&_nX;2UZ^zsv`g?8G?%-5Aj< zP9|E(k$SOt+@#UX>}Rka0PFWC@N=44@T8t3DXWOXT)#NH8<>e1$>ucU!aZ(TQWA)7%4Ou*7 zMcV!fQ%?^^svCZg)7;d;i$-rn(;a3wZzt=MKAnk6mKxxnU~!Z!D}vI_hgi>_!JCG; zyp?JlUU*?fuYS}*IiXbai8>DB8wTNK*KK^F_!+HT3!$`8hQ1$e#>nkvWeWNdblGGB zKKq3oX>pdLUwv5)!P<~m?Y{^IyBxrK?lrt4uS0(8-r=kN8q;xU@%%Kg<x8iF09-?zdDcH6i zgyIwJ;KOF&<1L>dH?RuYwMFP`mpFKzcmXbIuRIm_aX z)ABtC7eLoHdzLw$ag-vk!)?Lma zaxDf-NBCB#L+32s3v)mE{Xaj}J35Cuczq1*|0fM^$J^4AHKu&*3xBjc+=aevk$ly| zkD#*P4sP^l!Aw16q7&Qz|1w(xvgQ}T=Uzp+QY~QOI19$Wlm3Nut8b&^l*N$o>JbKuNRg!9b z@A>dX6NtK`@{Ri=Vf&Ru@MhwDC^}M&>FZh7XrKf}dE3(6bHNRe=YYO9JD*bSNm?$GkPye(bxoJnZ&+OfG z<3}BSX+l2M+$exWZ=PbYzZrGhyA%`hSSL2Xh>Ab^$Uj$V$Fesf)XTR8r^wk*`~COv zjI}S0{k9&buQ8=_o%V6zH?`@!ZZ`jsP6%0_2dBj@VNzT^L>9b*tnuCOsF?yE{TLcv z-eletWqQQYi2A&a<#+j1<6U#+nvhBZxr_JF_nQN847 zA#&Yq1O76R=4VOH0rTbloUo7$tciQaD=l9HcS6G;Wo`sMZJ2^{v@G}|BaWmujpB+? z>oB${v~1VS4zL&gi>~gU_%D)S5V!XjAAM~ThPo=#Q5&6T-VzhWr`4oJem1neF@iHR z>xPU4DkM=p0i_<>xr}zF_@1$$*J&G(9ka{%kV8^5D6AQ;hHn9fP?oV@Q-V*PDUv<= zG>J^dSNJ8>gCl$1aBpEWJ?1`{3(8$7n9*9ueumogLHAuuX}$^L4!*^nBuR2+tSi61 zUmYdWq{;diCs6dh0V|y}Ne;(u;YOFa*kA>+v!;Mg>^v^WDP9ay1C40qefInwR=~Rk zfpFrvF>MKBu8A8? zt7C3t0}?x)vGFg>$3J7COZLG&ZD;hm1Jq{xCl1P$bH|ki{VD z)tGA~hn3l*h<}U;9o(Ue30K5Gu2qr@2uDKX;%xXIWADJWL3p-lCKMe{#%k3>eD+U= zE|AFKX43UoyULq8Ze>qxoY@=lupCi{{EvIGlx0K&QY7S#Cau-l3n};hpwaL(OetV9 z_qIFy$k?wS^G6zm*Hm+3>+8TxYy_{yOA?FQI=C`386O0`WK4`?*cqY*LVr}KPOusg zY!$;D({AD417~sHz7H^Z`(~)PdJePns-R@CDW=yq!kLep~dL3UIo)JGMR&Wtif?L-H`zqjSO|BYH47GYX~VEqG2Yqw1=Uwv4t~z{O;n-DsTZMLt^kU=t!W_s1M^2~_{fy65PRUZ;NgW6jWJV}{9H&iF zA0=>w=WpNy?QNw`rsqM(>w6eFAGnTzMDXx8q^CUUplkRb-;^p!Bi!d=fF1K+j~#@T zkJG{b$QN{~iAAGmGeT|!3H%uUtjkP=?l?XhN}tF{hCpYE7nn9K|>sO{>HX?MUf@qnpmjW$2t4RyOE&1A!Cur`Y zM^$(JfY~B_c*Qq?zp=%fMkfD*;V^ApD&hC31!`i zx8bo@NZx+HR)x?Y0 z(h;M37+2$5-enw#4TV7?c|m(N+r6b2lPC6aBuH@ta}PO^RhC9%dW;h_?tO(S)=B)Y zsA*jBqtEcT1^G`kigX|Rf;A2|xU|}z+?e+<=(W5HUQLxIWB+Oj7F(;+H#=Wq*2(t} zII|6X({jOw?I4MeQg6aoU25~_Nf>l+l-@SvT)~;3-I{6K8ZPU z4R$uN=W1{_=vjI|Pt#;LWXyOI>cgD$L^;~~!iOubIm`QvHlPwu+088@pe*aZ&l7*G z)}lGZx%h0rgaVt_C3XG-+xRNj(s~@vM^wRK?f3lk3)%wFxI(USrGTq0*}@l2P^Xcv zrk07%b|AgJz3e;iGA2FUiiMJlYZ_uFxUTXHo}BT+BV%O9_UVk#7PA^eJ+z6>cst^9 zF^yCDT?u;b^31vU2>vnlP>_QaS@%?erdd>=5W8#K{FYKWE!PC*T{EFJ4!Y=Udy?~- z_7z+=tC0B-U9fQ(^9Wl%!z)`_dDU0lNEt(^OHKm@w$8w!>3Xm))sb9dJF0*Y3gsd? zL?tek?|wfXlJnF^n5;4(yA|24!vHRSF(rQ^ltKP)I+WF060MaFpk_`inz8rP7%rSQ zyg3Z+o6I3y>ot^Iwj*{&(%Jix8l5OS8<|-GRSvLuVQVRlKd=%OrGDWJmS2Y2(@79t z)DH(!O)<(&p8q`KfA7G`#AB{3ZSSszPZDQ&Wz_*VDOSL17<~li&9?M{l>&Ei&Kcae zUx!ME?#5L?_aKbDbB9moQS3ttx7Doxy`E~buCNk$XeUQoe)-~(S{3SgP=qe&nZlG~ z!o=*>1;|-91_x^laNH41>iKRLPS-ntw73u7&%Fgd&rjeTmdcPdixp|t#G4SGIgYI8 zIRS0fx8U>AOK>~C4PBMqgQAW&RePQ;@Tp-rHF;GcKUfWsF$Dfwt4n8(y@mJR{szYx zZ}5zU1@+`y@x9w9bh2fcTqi5CF}euGxj7Tt12?$tX+^kb{R2Fq;fC%{Wy$@Xb9|nO z48}H;Ldu{K=}3*>*YJCo56zk!XTFIOxyr<4j4bJ~kfAn~5|sZlikdX0amLY_v~%DO z7+!0{M#UU%QGRpDor&G>Q11&axFJsmM{eV^J(U<;)6Y$*7AFth?(YOPI9-WDON_MFAsL0jVfSR2jL7UA54^B~vhg`;=(!~Az1Abx5Zz6>iz z`HDv%exMyn^^e2YIksfmb8CKd*BG23^;Gb1MKCTl$$-Ps6Y%wid4lsj%H-z>Wuh17 zK<}S*pha<&5Tx9OE0^8jwzxHL6D*a;>fB54AkL81cb`P>OP}CoYZR>0(Ie%_9jxEu zM7hRiXt-XT_9Z8j{wT@9ye$D(fBg@HKea`hZWY?U(T3kcMw50g#y+$jjg}p|IN#wg z{*`k(UfHk@1+Wqp|9*n%18*?PuR`Fu&Ky6D5@YN)DKaUo91@vtDBd6e_V%5}&m+t| zRFDhiFFr%{KP{S6FAPD!i$LGSjvNo*u&HSQ)L(rD27N^sdSncpFz%sQOCNJ5Bk*5^6SpmN}8F6XIIClZX z6aQhe_Z|F5UVY$fjfXM)iwadU5JI~kZS;A%yUcj@ zQ>?#eLTe@&Sb3vl2a@P-pu?5t9OBWu_9SxAy0Oa9_YL@3)fmXaP3pn z%bJuw;pf1&f~Etfp}gE3-mF@S=i8F`!`+YIEPEF?Rl?&v))9Gob_A!i*inIkBe9wq zhVLC*(ckeQ&_{OkilHslJ^l_mT9oNGiy{mP_Jck%V=C_ujfbU_=t~=Ga`?;`k}tfx z?4_tJ)e;p$9S=*AeEBH<{@8acH5^6dn)dMNAJ4*H4LwrTY(zxZo_g67EwUv}jLr4N zpuL|!S)K!xmop)&&HQ2TNfazqZ-IvTN=$XS!G(F5(u$qtIC7*4Jcp7&TEU)NxqbuM zGgTP7)`m=XlO@EmijSV~o@G?j@XQfm^5fz=RI$-P`52a&f3=V+(UPLJD?j7F^fRE;6<7{U|^5vWyCf?!U$G0bF!1&p@EMq%UNS&^0(jwP2t;qVt zzHlMji5A$(Q4{ko*p?+kJU^&$kB>$I|JWRI_7-s5K^5FFL7fc$DhKUCGqPkwB=n>i zkepXnFuB1SJExw+2MIDT_4PlleA`USc-qLV6UoQ&x8?9N)|wkFu?3_}RH>BvUOb}f zL~kEtx0uj>XjZGw|CAj=oI0(k{kaA-xS>lGy=!o-e;@4CWOK~HcY;TLHy}VW1p=rg z7bl`czgo7ywvSGztKtXymgM2zU#y>+u0g~_42f$00vBCXIr>|~7u(nkrZqSk2Ui3_ zcJvUcSpLDCs|Mlc-hKSQL^r5Ce-*P8>Y!v$pPFxxq5JpWFM@F0Iox>UJZNmuMtR2xe79LL zivD2FgYj{^${R!6JKG;$mMYMziwn_pu{?e5+yeG_jf}%;fiCVB@kX>5O--<;mxp<7 z-P8yOp04-xnBOGeUCgHf|qz0dqq7&`B3z$xMG7z8?lR=Uu{v z#UfC(UzH}myocWBCPB!C1pL&!ggqmrpjzJsc5Ph%mLkHK?xaOT&h>M%%?m-+aWyFS zRB*ox3b{!so#4KbeFmM2VDFm%w0Zv-n-|}zlkQqeFf`C+4Mm4cIM{o z8{kV8sFRW&DSEZMNHFP36%73Q0miBJq&rCs{~no!Uc-&3qHzFjelO-xcPHLgI|BMv zXSf#&e}nSlL6|zio)T z-Y_rw?>T>o?Hq)o&Vl9OAP5q+C&A4I^y!)y%rC5lEWKa2Mca#@fXn}x3*RASJNadV0`Hv zR6WrFsvfGeVCx{x@_oZ)+^*$yG&*5V>i`C^4&?`vX59ai`R}GGkbf3O__15O;o8Pg z)VIA2ULcY~)L(I& z+r6k6+Fqz}g)3LXM8*V3^0owJRSCRFe&YJ@Q8amNF1%qfvvRd~TosTFVnI@L`1^C7 zJg_I3&QWM}T#t4J`!ip{Fx;zKjCX3BF(g)wzU-OIXD@2ywnz8Cu!Sb98^`!i-*4f5 zQU%F)HS3f{7rEQvpWjfHxG(x2>xxBLt|Hc5ny7jNQJ^+MEkW`4Sy8+=EH zB>7X$ctGEqxzMQ>`6DV(FzVMBn$uxVwkE333yC*SblhvfiIw`a_NW>?FVAk#vzRma z<{+AOC=mIgGUl-T!0QIepu5;@bcwjm`E67J_YGoX-iAlGD*PQE&}c*t+&jt*@NT?D zv>dHW@8m}HOfwP(QT=tyhME zyPN@?(ZZY-n#Hi}Yyke+VNaDf3vTLBF;dOCIu;Gj$^!1qK<#865<8{Hj)kky=*wB& z1Dg1%hE~oau$Jzpt4HgF6X)YqdP-TrWp&B%S_ zFVXX!I{fC%i0*7Ta&{zJV0r%xHzVdFe%?{b4Xc!wZO>Gq`K*8W`|?UYXI4JAh6Hh{ z&trj18D_mfF{1MBB!qUf0u<-*b0c+7CdZD{Ox}VttO`bbiU-F-*D<=Aa>0&+uym(9 zy}#RrT#l-T?HRS);$wrbE>@02%}T}Zvmbz0xfD^Hkj#Dk`;Ln---x_{`?W?@W80C+c)!AQs`Vt?C&rsgOU{d=EbW=0rZZQPC)EBf*Ld?y;)lK?deY2YDs z0qR!n#pXHsWb>XAc=k>^a#jjtYe*ohOM2+?tn)P34NKAmEoSu3)Q~dUbwVU%-bUDA zbp_I|SinWae30*5041qWK)x2@@@<;r7 z&VazWJWlAjEcI4V#&?HQ=sm+!n4e=z7j8_!y{lzO;Z+Ory0%zQqc*@lY987v>Kz{rfMc<8SF3SlohNoKR1(vnZ7#qx79AoM*^?&p40D3;CGp9%v5!0Y+CexXDRs^uN!8 zeDuR$!SDQN*7eTE5%Do(?{3C=locl}lRiSn26Yr0RgX`_tKji#HS+SL42DO@lh-yp zx~<>G9nUtQpBv+_LD!P(XT8fTd3l<(I*G5C(9VB&e+7b~`uS7-2Gne0TFKx3Pk6}f zBD`o^h%%wEAp7+U=)`5?GGjJJ99aQ5BOkbFmo=%3&o=Ct{TG)pzuA@BG3fsC58hd} z5H7}x(|55cd;**OdNb7M{fXzGwD%M$3wc1T2;+Ag{GYqZo;tkBz;!Yr)UY55h6K!4 zwaEiZ?j>^F*5>HEUx(vED02vfnOm_oL4&SR^MTEN z`jp!=ih8IO^0q5aAkcJyoPGiGBZjZ@GSTmXH;S)B2`W*nLzNgI?Txp;{zh#3C~yFXgu9^G6t zii?NKhg+dl%8C5jDn~qr!$4ns3_ZQ?Hh%G(gfqVQ!R@o^uGeDi%JG5#g$^z#e=OOvfb}_!5EQs5 z@*RDR@Wd$;t^0aW%)AW$i)Ws=D`Uu{TSqX+DGg+s`|cF;IL}rRXX*L)!GX4E3k= zV$SIEoXXbM+{9xVE(+HbNODgmN|$HArn;Bt?)MY@m)u9`70lzO^$U#h_JW*KKSVT$ zQT6M0;qmiSaDVB8yJw~F{~ekHwVvmA{^4R=?J=5u`>I2QHNHU=>#t^dDUsUEr=V#| z5m%`bi8I?-mT{T|nm_l4?=!DL$njDrTbacVx%zOE)n%w>AHw-*@>H+*As$Hnf)xh} z1ii=1_0*{ZWQMjR0hgbk=%lIW2*2p6NC$` z%H{}FTyLcfGK+1f3dfvi9^y1q=^7t4n1$t08Q3dk36FxM>92>3>m0KULu-Ta zo|^*ooz;vNhJP48G|V`-X(C?5q6Zf+{g)lLt%~su z>3&E}Q6UFs|KfYaX5hS#Dm>=W!&Nvv#XYC7(EtWP(5 z`v*6E3;4gYtO!YZ!hAdOG{ef8_WYA#tR@|nV|;|$)8u$_NnskT?m!;;P%QktP;fz@ zO#chFAfA=C`SXTjIVlx!;``5@4xM3t7whv(E72zzfp6hQnke}=Oi_DI7+k(K3v+cS zm)x`xxjsFtV9Cpg(TSETUqF^aS}Bv8;J)m9KFzHYRk9}F z-J%Ze&gzpm;g&oxn5#wJ3&r6;UkFhwQM(Txokt^I@pfmxII~KWKkwv`4|3rQhwjmU=)`%rUN&PvA6C%S8FBW z1c&#FQ%zwPwDeOa^RB$*Xr2%{y2{fF>utz3mCq0+^O39E)68p}c*UP^Z3R^yL%QwQ zfFNFSJxY4B4nCXF-s;`KwiHV;WP23kf2k4of1`;@Y9D7>n~PnaW%!gy~1ug$~CIeqR2hG<|}O) z_PhaVb?0;Anra|q{9VZH@x*B#8`x~6$g6F903-d&G2XcgXY`N4tT8IIc6=6WSj2dm zHi|U9vx;BalgTD6Lp-Qc4By-BX!cKI`sVsme&>Bt+Of@+cusPM2-yZO?OzQmm%Zke zTz(5KGn2Td8q!q1Q;fPDx&#wrE<=F$9Ny@7Y1!4C$|RS$IgUL(i~VzQ(BPsmsgv{I zhdR|Tz*mjtG>l{Uq6+x!x*y7opFr=NW|$@MN1&>d!dYG#MI`bah>3C}EjGoufj0l7DSdEtUmWx8qe6P)_!Z&|dO6&C)KAaifL;rfnU5}X+wgc5$e z_}}3Sc*OR~BT0^A;Fl%+v}+wscq>Jt-aP?vDN!;fFh%epI|Eu`8N-Gg+NBzqF%X8C9H2`4g~OG@a|7Tn0W$p?EX#GWKH@c zEaMeFO+E*OP>o*vt4NM??8EQzr}>*ThIHSKO<1y4p635EfL`-;FrU5CHbE+W)RMwo zm;OQ9j`J|9WHh}v?Iv$#9mFs9^u?PF)p#^mm#obZBchCDI9mKQzp{g%e?vW9{P9%~ zyu%2-W=fIMO*9Xs;6f*j(sg!1^r*Znm0Eintxk8rp_O7}XW|%=)OZt{%vnD4y#nh$mcUYD z12VzOp4|SUMn9{snW#Ok5`28>(Dbr74SD(mEtNeXLwzi``!i=OoBJB?7*XwUg_z2C zc<;Z7kgBC;xY#Yz`OzklL`cS(j_Y3zYft-gMGf;JnDuBjUbp@WrTfOA$A;b5Ruc@1Po2a85d%UJc5xmPR>05F zXf{`>!c4Q(W&h3WDeKB?1ikbEkaj-sZ#}}JAFjfV_4~nj;WsGtiNawUA-w*~ zlzAjpa<5e!Xi0A<*q|8%SZ*ff;lrpD*EUPg@8?Ya$7lXhcH$yx|C)aECi^9IpiCAu$IiiG=rgu|;{ zaP;eCxS-%PWPK2(ACe?kZ&sJOvD=%>EC+hzLlK|UtH&5l#~^??0zckKgPd@4^0BcM z4!6n@cXtJPXqzF)HP#3HALe8TO|f@MAB6suBO`CZQQ`C((5w)HEz7UMwKg;AI>Ur= zNhfjQ>2%ELA5CKNQa~YnJl`@e0UQ-VLG3?byno6Dc`8OM&24Gm-XU(%&OywV|Bh`h zBxpqFMi84a$oO8Yk2+{TW~jtq-qaB^eyIhrZ>Gci>Oxd|`T||^Cs|PQ+uGnz!|bF*TRmD5$G`4 z4uQJLWQ!@AB`qZg^=yUo#}oK<%1u~q*8;~oFM+T22;7?Z9F<;+z(U@XEU9-QK0oer zoou%+uvH`*`_{qoNBXqJo@K`$gh7R!IL#b9&Rsmkp21s>q2=IpTzv2^9IY>d5B+~Q z3He~$-q$ZkE%^l>#q7xo`^g|p|KYq-MR+(i6TSTMV62A$onF)kv)2ux=kPSJopBpC zwTNP9p%XoBH7*0W(h)pP?l)OQ~W;S%>HDipU~RU&^DCgW8HbMOxrraObKqG?S(Oi$7! z=f{eY3DM@{Ot&I!i`T(9n}_*LN7k_SCUNTDe~;zGtVvp{J=vZtO7WWpeOuy!s}2SM zF{wjm!8$=44TG>iVY=Ss49?rs0XBDndHHi!xkMu~v^+rB-tiTjiIqdqBx`J01u#AJ z4w&xOhU2HT$m9kmy5hAEYf)ATbQM#%1{YCcaQlfMLhcLeoJ+F#=`TFUkR&}ZY}d8W zj+nn)gCWx<3*=tkz^yZGa;DmKSbxln-q>$MlAkAW+j6Xk@CPXvC|4kD?~h^E%rWGX zs2=?yu0z*emZggiWkaUlQn)(ChJ3YFBoYJH!R&1%o)vN+zrL)1xhaMupoDe3Htogg zUgoc2cj@ar+N5jUA84(d$sb$NQr2{2EAS_#qW>Ke8Z$lt`(3V~;{#zBq2Iw~)nq=? z^_RdWF@gV?Z%2RmM?tHz2hQ1HMV4icrD?S$bmLuP;*)MmYt)UY_UuYfa=V8?`){I8 z-FgsDsSx~G*HgB1<2X|0qfbBcw)`W;f4l6I03SZ5b1j#Y=(D;qB!`sA=~=g+rXq^_ zBP&TQ_7CzWZ(Eb_4eYx#G6Ee~M#b!>1DO;iL61i$5StMH9*e z={3wHDj&{0-f2f8@5qx^PU>_`s~Nppa|ib%Nt2hsGW?otA0VjQiYy6_Lhlr3B6D^t z|L}z{X&RD;!CBRyHm?>pjX%YEOcLiE$GnDb-oMcK#w1QHU6T6TDdtlb9Rnf%t&n(2 zj?6NmaM@R%KCU^>d$PNw=$!si)dQb!@OLi1{PY%3*Ga~AuRZt{-v&(GaD+>{dl}^9 z{-9h%K7MI_f=h#0MlIEj=so+#dp@n?V>UPvH(4ige!V|tEZGGQ-RI%y8)-PG+>J*Y zJHhuX;-zpcQkF56$jl#vqGfjkOI=kcUSf_Ov0;1?n1FSv;jrl6R}P2kv3&kYyjWsH z)~pQ%d}%;^cgtXt{X*0~+=C5sl}NpxBKet82CoAAG1pE7R#(KqK?7Bq#_ph*1rCgR z{|Pot+J~k#q4;!>BNs`ZfkU7YJ)(6H&#o(m5*p0+RGU(B%hBkW_lPfZ(xHzN-oU|W z*SP1=f3QBnfe5vSqflW6E-%TPIPgmg&+5PBu1H10nDB@AZ>=Ml!Q8lu9Y>QUkMn#N z;|3(I{fYwCQyuoCcz?4sPd6@vSr_fe#CB`4s~{G<`to6>aUl3b<$~rkOLDgGHmV$C z9vq=QNYNd_6%`UBQ=&_ta95d(di@9n4iA8ngC2?7RFAUz$D?Jv7CH1yg$_&Y!E1+? z@%OVm1Qyp6$%OxAq36vpWcZ{7i7OEW8Ez~Qwzj8jjODO=;0j7+#-g&QJU#kh5vDD2 zrc;(FkPoj+XxIKQl>J){_osz`kjX=KgUi9>c?5otJ&+fp3*QdMbGC%^_4`S?S-OqOP zH?G2=zjkErB>?j|PjT_W-TdW~FJRh6SA5wlMDqr3@Y3UyXp?g^>)Gex@0F^2nDloz zW}eS0ig)6M@_c?#0sB3dh=;w&BM_d@#6LCv0}4};IFC;kxkrobp(>d<2S2`rh-zKZ z+#^j-n)u-3a($Bd^fZ<<3gfSoy%@CFggi~wgUy3-u=_)AS!nAZ%-O`4%9V!bU873_ z+~e?mzBD}`)Q4U#a=_gr0P-JX;QZ;G{O}Y5G<)+NrYjxCrFWTI?IFvp9(p4%{=~XN z&oyAx&WD^W%YFFhYtv&}22eliRoUuS86eBNA}K#zaj)TF&{BMZGsVRCd1JkK`vWh* z!(|X+y=L-Bfx5Kt({dmys`1nh4dNA}LeAF2gJ_dJeXsEo0t2O~lWHK&zVaUJ>SW3A zi>Lfq*2kSL<%uJjw;PU~2HZmKwa(jiTi z7~7<1=Ut3$UkoSmY|D0NDdDH9W<>RTCTFW<42};+5u^4vn5~+Mzl+AeZ|T|mIL1&J zdG;OCF808`@J(QTJp$abSiaj;moD>kgVg>|_ zqWSW}PSnNzHP+cPZ{#rhd z)L4&UM-dn4szvUFH}i8-mY~aLUAorG8Da+#;MMohbm(6c@_(bST&$IOh>hu@0uA!R z)eN1NbqI`fB2b01AeBwualJBQ3`?>jgo_%@? zKS_rQe&o%7kgiq7jXYk!M-(ZTm;q;CzbD z4;3O+Z~x)ez*ZcL{0X~8G|BYc?Qm$@Tl}uU!xI-73Xew9*cH`$=XxdLXk|%ee5ipn ztgCaq^D7Gd4T1mMc+h`gOJ?8t%T?{pgZWvZ7&yNR*CxKhnwGbyDq>Bcw-c^ttI*Zc zAEUkSC)8fi2sX@zaJN!Iu&FA6`?AUyiWWE!-G+NSl?}xWZAN7Iw&y6*IRf{(Z*bY7 zIe6~6I%(Z`2W86kfQV)q*jN<6%iGzwIV_A1xcv{l3U|2E%R$nqD{p`7<~>O_7da`m?jD*Rg~TM1$WI*y_@SVY6aD?&&t}@I`%k zAezmGgJQ&E^DT%f(4gmPWT=5~Hl)r`q>e=c_)W7H_Z!{-r@~x;td$5HN5Q7p-a;bftpY-Xx-7GlWULT$4)VfnRb?2^hdxIn6527 zIkW_;_i59LxDeRomBVdeox-CLZ$RH~A((Cah*R5UWBaW`Sd_98mS@Ppp9lBRbCo(B z*3>2sRAlIm-{M4<5mpEB zLTz#;(}wn+zR%oXv6y-_0*uBeL-yX9GQ$(4@VNFlP99bxBSGE}J*p6vo-ra*MML12 z8sm7GS`sW-#lLSd0H^<)i0$<{UcL7|n(aA;7LVh3#qkDo(HgePNIZ?#8Mmt@fbDWV zDbQm-+soctHz0RQ zKAL_1idPkE>GsM+tfRb-x82Nx`!h*0`Di_V!0jkzjI|;@PsX5dcP(zKtLC>K8elH? zM1J)13uRZMAM$ZK{=-Mr9B;630^aa8h8esW^@^7#yDw$pdrb?nbGie}K4(Pp;!Q}g zS2MovSj*|1aG=RYMTjtJ(A7Ik`SUZqP-<&3N(5Hpz@Ou|zOsau`)We6--yyBp`vtQ zzYNK_phl_}$kA^*}G;$=;b1Kk~^Lrdbt49xBFqttwNBw zVn@^Zanq2FuRIxB{%Yqg|hLGFmlt*3Gr(bHA#g-2L>K2E9=`S4s%t;ke;RXD6Za6 zcBJhjH(;Pa|Jz&zGv|ckxb+QOZNmrb4;#zJm7PS91MlGXhf#D>|4yvDDMm|z{*SBk z4#)C+|G2&P-aC8K;JMGMXlYVuPYvx4rMa6Xs+P%oNZ;AaTZ*~S zCJvYL-=!#ahAkD|WoNq1-ef$d`T@z?V%+8@irc<3{{9R%`eM;}l+wP+8<=#VnD8y8 zAN`Dj4x;4lvC}x4`AFXBc)>JXc{I}yr5nQJ$t4E~dUdKHO;J;&j%-JIq;3i9-TV@& zKO2%&$B)Ci!8Q<7E70IGPPE1~n`?3UpMy@1#*|4=Sma2DM;Q>0t(~~f@EIO)EXPet ziue!@T_Qi`5+5}FCk``i{mvCybXNUv@<0P6bzOYJmGMJQ0G8 zA#*fYQ)696#&^~yElSl~^hize`nWYps>#yF!GB=m!vH?y-DMyR;V9m2NFM#tq*7nq zNTP%?wXf)b?YXM7u3uHS-t7UPZNA{Oi8dLwNQR%94xSkYKg0CWL6z%~IBM=DPF*Y* zW45tp5s#BN*Y^n42fo1QjB31}V^7)gY|OGo>5fZAIg;$N7lFC_Kv?yA`vo$rP4x>t8EQ z>_tPNau1LB7>1#J;&ifu1OK6saouYBP_KmIe8$pPdPJL^sSV&ek|NM(?km_erk7I; zJApfuzw<}N{SzJx(kK6NRLChWRr=ZB7cM{k5!L=p5oFww$dyjhqt_IT$+7j?wD-&a z-@$UG%j4e*5`7qh;l3~asiK0j$rQIZ^n*_LC+?_tDBOOt1!jefMH}5FOuPC6JjWHm zVTbw9{dhV)f1^nso3>#7R~5)JlAzOFZRx`4Avo{pG2tD(3z!`@f!i)si=V_<_vlXz zb~?)u^R=(gMt=pgsy&0e*OKIF@?Ly-Cz;z?Iu>t_3gQ-OE=9!~79=i=@hX$8>BtY6 z@Og(S9?dnT&&3X5=XI7@3A)2AcsCnj6!mDT5uq z8y(R@qL7FO-<<3Bn}UX9T;Kitk#1DVqO~vghH) zHIW$4+$~S!b?LjSkI~?y6j?-t@Y>yw-1m=xtH(vDqvT0gDSeS2SJQx}KYRzL&39l+ zdKm9KX(ZcSD-+zVL`#|()1vs6V0kvn(Og%feo9g_JjsT8GiR!YVK$GGvaCq2hdpO^ z@G^?C+4llAPdw=QjNk09&Oa*4!@Q7EQ1u)}w|PHsyVoNa=2W=cq1XI4)5Q=suar+e z`wI;U8qr6g270tkz`W(+B=@5R7qa>QEU9BXkqcq)->oW0oSBAdGZjJl(NXBQXpa#^ zIUp)I#Ets=5lgB}X#M!gabq#%A|JiqRi8=5}!!rBwXIIrFiBR4415Q$4%TdrwiB!JV{A5SlDFhU3ch$@KX)kaKecWNos5x4~=|{P!=k_Wa{t zcI%Kw&55|{_(gtRgdzD@-i*x-4m9gWtsqRy4kq0zf|n0k;Mg4*(pTY3f9rnWK5Vrk zdsn7#C(2tr-G{{K3fD`5SEnN3@2_z1bqs}P@2fzd@Bx}4-1wCB6kS!={LShn1e!8d z>e(X5RUX2I`-=4Fsd(&eti-|=SMaZq$J#OzeDUis{K|~u#-l1(vs9mM$n=7kH>`ss z@&KFcFALwwF`FZ3d9#!td$r^Gv{;mjhSfxfrH_FiC zio+<$fuBZFma6*jy>`ob$*-i7jrytNA+bW>i@!3q^E;rXBRh!t1xn6F7l5% z;Okcl>U!(1P;#FQ`TE@x3boW>bj|>`a6pL)m{a|+wH;Az$>hC%^$FK+DHnw6=EKmQ zaIpNe1dPirfXR+@LGs8*q4i?+|BQ7cvo%fVqcj85T`NyVd`yR{J%bSH{SbPZ*e+&! zE6Dxy!PZUtguh=_LW*c6{Jr-OVpt}nv(VB<3Lk8~010!d zaBhwsmCwjTtJ^x{Wa>J8Yll9GeEw7*_rZabOiscLOg}YCgx5~qb2p3<&q@RZ5?DbL{6I2SX*;&CKEAj7hV~~Ow@aIkr zCmR_HPS0&f$SE;cHQFA$O^48J?q}qUw_s}c7i?D_ zp=StH9Hr=mY0MQoUz=8R`$6arTj2iYaUp-p`7@^{qpha_$^Y~MW0hpd$El;JSHdSx z%PAJLw@iR7f16-TmI=6*I#9XeRuH~Ron-tFLhZ`MFfk(mW;8Kp8mCKc%r~Yt|GMEl zk34KlHl%O&ZiME-cr2G?-ml5SFfKk4PUYr+#_&?8c#sQOJ2mOeEaeF^H5`f#;Z z3qEleg$0H>bl&u7IP=g5G_x0{s@aNUY?}nzy-db?jHffbtppZ4I1VxA?qR{fHaMK& z01L7~%6)}+Hw3d(ce@S#O!#LvY7X6#)K z>l>}PMPEuVSf&b3{p&~hz$v^;>`9Q{rNC#*5+k-VwMnPkWqylPDeB*%f`yH5(X+W3 zyE^U&b)qh_zh_LSXbsH9YBqZvz_m{caE_52eHnBNU4zbId-4U|ufCk)Uue+Pr$R8s z<~c5?oxs)U-i>hx~c_Fy~44kvCq z#@))$fYim}H1&xADQEZi#H6L1L9i>`TYUr$w0gt(jlc2IDIK_`UkQGd%-tk;8(l{l z@i)JRVcgh{*!oulRf1Tac;gx_?eSTdwPgVA7fF%0JQXx!ouq4@rD(eJ4?MQjf{Z!z z5AEOP;7b!TI-%2v4$J?5-n|X5WStguljl+KngcoM*~?2F5TR?6UFpA{4)n%IGgACu zH1>$7V77}73dVF}_=GHI{-{C%kC$;0+X-q9m_Ytx9Xhm(-7g*$@da2W8>co61!3=Se&eR)hu*-eb7w1FTG7{DhCP#KwP&;QpLO zG?BayqUS{LP4rz@dDDlU>3fj$w|y~DAGYk~GUlc53-7;m|N?R{N1H%6O=sU)*Zs~Fj^S{Cjl zDbdaRA22<843wgjNV~!d!PEJ<@Z^Lt^<{IJrOwB2*j0@@T6a_EI8RIPeUlm;S5|-* zG{0aYu_kL3$92dD{z^ln(? z!Au`3lKka5XVGdyX9%NU-Ag0#u*8AhyZ8yc1CPU0o3n6Yh9>P=rUti4&%=~?GjLwe z2ZmAK&6lJ|&?pIK^xylJbK25}t*dgd^S2l&W{+qfZNR6RS>Y_p(Y>dLhI`wRFKK9LJ4 z?Zs-b(=b=$ER^cy;xe|QSY6}>TMuayiTMtsH@y@jN6q1m9kZgl1gukK5db2gKY6W$ zZ@j&pEi9VVh-8H|*;6P&mN{mlmCR*<{U}znd}Kn#^=q{LGT>HE^XxE2D%qmf!f5EyeKuWdXnF`4`UT zD*N6)&*J|S?0}lzu7Z89<@tXpQQR`28j5^iS$(Ml;hFaiWTl2C^_?hA!`HmPpQ`_O z_X`qSe`5#cDKCTZTNen;7X0KUGk#b5m`K>t9t_#KnsiGDWAx!IjAQ$`t0^)V^y?AG zt)7Ijapq8yIR-K9DF{)920I6U%Zp@4kWeB&?q0(BQbiJe;5jC6>KL%43fi|=bGean zG(BD&pBI^uOLO1j*+5#RaJHolR_V7>ViIPSg zH&EXMVED(HEM{HyZ890$q#Z}0aJ2z#-Flz-$vUua&2DVklP<8%nUA}8DQ?eLc3$g3 zm>8}@yslXigBc^q!KcFr&R;?P)Gz#etQNQR%r7d199f&bYXBNUbA!{ z6Rl5h(yWUaBVtR8CVfGA$bgRh@rI9M`-;aR(b%{`iA+~gp_{%H@NbUGks}IUaDchk zYn+X#p@SCby=FpfOBTRSB}e)(pc8-VUcfl}2Uzt}n$IqN#`DoPaQW&obYxu~%^Ol= zN>vemvG53Lh$~R<14`ru>ohA$wL*w>7s{U41C9+2G&N9)r-T}XO(VI3b?~x8~ zcOQc|aX%CfkR_$N?&9WIdbGyPj>|H*g_|dz#hpfP_&pAhJb$roNs z^35>cX*ruqzI?&UolQrZB`MtDQG;-gBc3*6{^v&Y^sMpYY;#DRyqv zpcd<;$vby_GNDg_6ui^NvY0J+BX~A%m)gd!Qx+vb_Vci3pCOgomWT883o-WKX56>( zALB5X(%|rW;N_o+87sZ;eb+H`yBC4ioz-c!{xJHCt%hPQ4s_S?Ff?uvDqB2*i3fhO zZfF(${{BIrG03`k-|gx7XMZq4aG$>ru1oqlB#GScJJ1@ZMvb-}g7wx<_zPM-{IA`Q zuzqR<9E}wx7V^(wyOASl`J{ui@gv4)72==$6@u?QejxTwmdyX;k1aw4`a9H+PG8Iz zV|S%L~vw-cl*-UJWH7vMm1`Tt=F~U=YI%_NPW&YFfyb|!| zXF1>`wWoMh#)>u!pTQ}ABx5X>$I1@9glYsbNfe!@NfPlLqD zeup(nqBxuT?y%@hDE4emME4>sY7id^k}dsw&>0^%yU!md_o$Q6F@^k1tiz)tIxsio zi9kuV6;`}YgFqeD1Dzs3pO5zBY6QEdKh!28wBI3|W3!5qO&Fcff+@SMz_^85fR>1m z)Di8F`sp(cO_+ z-(bU)-+*QYjCrC&>RFck^-~{yZ0&pKQWmGS$Gb5->?~HwY`{Gd*0g%;G|uto4{pst zmb*RK0KFeHxcT`lxc5Uj`1Nbk<-413%SPrG{-;EAOwYqidFCP?Hw>j|@emR<0A3>3 z!J;sgU%fL4Yablq7JvGP7d|PY#MN~Eg4;E2d1Mgx`Oq3H@=k$eeLuh3y2$_33$$;%fRa@bhiyk?a;!XmLU1`d=LC71_hGpd`Sl&Iz zD@>@y*3l!;tza(fT-Xe%;`X0!UodFzl z=bPc4r3LVL*nzzFE<>Adt$b*T20v43G1Mi;<1gJX9J4SUE>wz~A-ToKv(p)q@Qvv68K0}-Bb~s=D7ls6_=L3J-gPhZ)*t1ib zbSIq={@Wb{>q<1ZW87omDg{ZpB9Xa^ezG0m1}myCa}B0USEp@hi8xVqBLp`u;2$#1 zwtcAq-PkWpC)t)kfT{*H(C^3ofbaDn=K2lhos|vA zwEj4JR=W!dTcY`0#m+S4r5mx>%>0LoEosggSv<%0@Kcl-ga1P=+Bp2g@C#OC^3p1J zKg}EKkICRPDR(lF{VX@@SK*00>iA}%9G&~P2HPi1;U$?{rYGtbOf<^iW3?Pp zPCNhd>pL**rzU+RW<&j_q`>J{H_>)R9SsrE@g6*ZbbX zqYP(q{-YF0UoZircOQkEfWwS+rbldCmUI1Q(?HM97Y_vF;f=kF3FDB2KCY)wUP_Ui zSfWK21T%h8sU1D?<36g%9)sBvzw-rzc~)~|m=9PC9V$8?-0%`kKDYov$An`=pCVm5 zv7P^LM4qPlIidVD#tUszV!eptkZzKOqe~^Jc<&HuHY|ZB@6_pvcNfuKPLvi0sneW9 zS2*2FvcZfBF{^H(Os$;LrwS1d$MmpOzhThL>61N>O>5%)DnlMMaK0;#{P zyeP}OT>o`tzEVxwLFcNP84NcXfe!j?8f2FcQ`WI5FQ8D)Jp!0<_}00InH(2 zT!MAg@*F#Y<5)6mnS}!`L$Lifg=@!YeoB2_`E4#FBxHU}hkW z+k_JQ;F@;qJNFu=U1SUeDK}y?piis@S9AFWR&@W`r*LulO!#>6GODRqk!?TJ$n4eM zd6lMLsP29cmX2`6(9Pp{b@v8b)#*ZXH69D{s@8H(g=skMgCSizN}4{MImG$i$VU6$ z9_T#Z9`44oJlKZYFzWFUc&6mczuxIejym2zVb?#*c&blqQnbiC!-d#vat{{&r!U++ z&L0PhWk@VsX(oEsWh8J-3hch+spOyd2Dhx;Fuuf}sBQBnF1!GSo;U6J$ zj6`dqLytZ^c&8bJ`&5X@e+&3SsYc|rst>=N*FgEYmuxQ2_P1Rb*j~3DzIUulHG@$=Nq)D}u8@1hb3M%^b$iZLkBq{j?pYza_)JvOz=)+gg$@18P&at zO@uNM3NF{4bULOsCufhec;` zUe^!d@H56qN_VHxLvmc7BFoD7ug1_f9h}INVziGv1jU_Flt?B(d#(rxORa|(n+>S# zp$qSpnbDZeTnJK`17gZc@k5>^^At=GgmsOe7oK_Iji)yB*7~=&tJIK8u3iB}#)mQd z(iu!rlqMY-J0QSYnPl3g!HdnhtONWSZ`}TeqkP}O$jkOr`0WXsL6vcSD%Y`Yqy;_j zvV*g+IE{PWCSzZLJKkYTyk@-{o(rdV3!+cvfL=-m8qATVZ8oCp`qQOJdkay2uL!ZZ z^$ZuZBtiS=uNYkR3l!W3ptvs1!=mxI@X*;2*cDX;H9Mkl^{Y6X_tOg(=txq7OiNlh zO@o?Ax=?@V*O~*%lfxh`cEdMD&Oug*MNt(q;q zEh1i+DcTG^Cq>ElaMlSJEkkDHGv@n9Q|1z!focy@G0M%2sMkK=8Zvr8G^~o7=r2wN zM(PuVNg9~bX-YS&v7*DwNu%gH2CmB9=5CAXkvZ*0P_y2Jrb-i(%n*TlRweLzi2=Fr z_BA%1i-v}7DH3q+15O(omv_wg8j9XSJn&^7nt!yQv3pCgfpwTu@?1&rlO0&QZYtJP zWMM@0c)oGNT`)Wyj!LnoxF2&*!61DHXHwpwUy%h-WX~1jJ45r@EDX8DPR!x(_dNeR z_Bg&jWkpwP96@&{AA*>$McAWrk?V|*p~W8FV7b2u3VOxp?WILf>}`wQ7nI0;KiAdL?=LxG3I@5mIA(0fLRc zL4PLOE8psdo|zH+7#9_qdqSHlDV63Ieahyw1DVh4_!l^Lw;8%N9){7skRKQ~k#q7n z2YWP*3U~Oa5f8R&=pN-lAF^5Pf4_S9NQt))dCr0^lrzQhU5y~(qCp;>cZQ0E`OvQ^ zglO&-+Kvf@fqCL&ck?}Hdt1PlK4Y1Rd~NbfXO}?j-dTKd!;wz0X~FYX%t%@8CHT=J zE}6CHE*3*LHM3x7^k z;H!yeu~Xiv3uTFHR;*rOQ&M`B%R!hcHkNd@o4=&@AOy+`?Lm;2_=rlacb|E&q97unY5jn^5e?QhU zAHrB8)}5Hjec#}M@rTt=Yd;Yd}ZOJ4XQLw z`V=Jp5+TOHV?9s3QzL#~G`OALT}j2blgx2rPA_)rL934`-4?1v_XdiP>V=j#T2qH~ zvi-1RAM;Lqb*3t1n$%>qEg5N{P05-zIM{s^cPmsvz_VgpCsB?z-i~x=%0IybV>$li z(nj2C;*QH))`9E|FJA2MGIV~ULSBvOKxv>Z1@ zv~Tl;dnC~Cz6|NS8p|h|{l_J64V=|0e~^e^>^nDE`mwtTeM)oZKqvHWg zSp6Aa|5Ap)X<=MqPLVJX`HeRj^}MbwZUktx)sI$lK;(R zJEh>w2VMCmmiMAqtOIRT(LvX()qMK4FesK~{+{m|(6_0GA5uRK>%_$Q@!OR6=Tk;- z?uE9nZL|hGb3KA<5P5~#9j?MO4-4vbGX}0KJT06g=?F*Bj6S<%$QR4oalsbDH(-!1lNxk`-wgWG6W=|tsk_b@I|5w{`6jb9+2#tl0SfbHdML~~KH zOfuB-zFIh>bx#C`)C2s+hqt&pl~wp{g*Bh~>K};zZ9uu#Li~C_3@j8?h^4C${l!?- z&4UVzzZQ?N`u4P|OoN)(W`pvUuiWB=Wyr^F#qrJ~N!ZGlFp{w?HfgGpmLn46jI`K-s#ghRs$$rZ$`e3a3i0t#$##;#}AlPfls$G&7FJ!{XRBB*^6S3_*Vv} zo4yNIj=soUP1Hfl39|ILizD^1iGY;lA=n&ZmhTt*jk~TSM_v!OgF)gQ$nAIx5s`*a zGeQq;y)>n=foeo(W%4?k}fk-nmf9OJzGA7fFx~b{^w3pn}31PJZExzu?W4QB}4BW zz6cA?NYKDN8*$t6A6VNllGu(6=jGhw$&5LNQJJq4TzvIgc;dMvy{Nhn2XDJl>DkLL zQu;X88tnxh+mvY7sy1BSH3m!%D9}?H$N34Xx&=0K6EOF20#@C4kB#egqv+s95YIFs zX|E2$ma=PpRG%5* z>KDiPvFY9XMw@7EcilbC_Wd)Acp^<^bI;+?#PhJgxDY)zUVtMytjoCf2vjUG2V2P& zzC2QcZX7tw_vQTLZZ}Kg{%ZzQZ~bq0IZl&*laz@z@{Z&%o4<{6@4@=L!8nFg!kvo= zkUu($Pky5f1H%=d|N)BIhr|GpCq(h+=XK$?I zL(k^(HBOfBW%w_Ods`Ddt7nj7*ADSEDkSv06z8Vm#;*0N_|fw;n*G$oQ@6rkW946- zT&dTEkgDy>C(~NO3Z;!49|2}?yQu6k)6w)Q ze6TU%6l0+I3x-9{gWc>jys_Gn+ErMPpD&JpPK^aFN;M>AjmJT5`*N;U&XDMAZNQ<= zbNQ(sv#?}?1Z|(aM;QCv2PfWEr*&Hq)1Tjk+3PgvqGQ9{OkD|T-SyV9bbb@G>6p-~ zy>ocu)2d)TO`R0`WN_aW>f`=Ab`Ne~Gy{{z@aeZ1F?nCdy&2)-sd;fGjtZ|v`;s{@ z|H^pA`%t0Q3nZy;Cd*#hZN=?w$JjM98y_ywr&pS@VLJP~fy)A%cKxiNKK&|01+?J| z|A)dcrBd{-T{!pgcPzA8>e8qGwqVPWbWm_>qn0X{=#RAo#tjMH*mpeSrB3IO(@Y`0!NpO;p4O%h{Nb}o@Zi0 zaZmhJ`2JEI@*-VG_3LT;p`=gvtXqNV?-oLji3D-Wjs^W>Rce&z2WiYM{$B`~GVA%;{^5WwIyj{1J-&bTr8?MdZ%>|sZ{%d)~hwS81HkiwIUDx`gedP zn?t-`xd|lH#9?W&5Ux!A3`0jw^U9VLIBL%vSXQ@|aZl@U%(WscssD#dknJL0xq`Ss ztKjc@1xn}^oIR-;m(HDy$)7%e|Dg%Yw-CyKl`0hpEZ{CTXkmIpDSyqi0hZ1V!jPD3 z=#LU&|B6L?49ltgo&F6&O73#K>H1`E(o3kTx(r_`MDb}W&fAy?Vor zEVz}8S7Z+1Az=gr=En=qX`7P;^LD`NiX#wWDM8&Tw8*vfpW&YN4NxM>VZ$sxs9yLS zb0z)JKYN{EM+IYj?0*3rc5gXd?d32TuJc>=4+$uH|C>I4ilb};@WL%yY8W;f&h;8l znZk###AGt2W>n$mg?CsbVGzf;+E9%wL#m})kAFV3VpH2~7+m8@+a}6z4><|ydlbF#4Kk61VXqaIAkiER>(bA0NKYe{KxJE1fg5N$cZlc`3yGA z(V&e7D`C`s*YReX4!xdXPJPZ^#8*E&aki2wQD?cotk2psWSDiCf?~LRT^iIlNrS!_ z`4lFV#6XeuN2p`7(Y~?DWS-!?WUFOZ zs`Qw1^I$I&W;_Ig@yR$w@imu!&58QzM#Fb2mSvUo zsJJrWE>)vU^jyq5GL|>3lY}*yj92dd0@uflMTLu_XyjRTZ~RaMRX3C2!SFZFzZMU` zFQp9rtacT!(GYy+45aBf(mN($U_ZJOXNyIk)EQmU=v&J_ z+^0o7+O%+TiUBH~^%n4r$*{q9BsGXFfI7Vg&|v1sFATm0!EX@MHmC3t%#T4;fDG2g zGJbUTGdyXy8m|l7c|*@i$S!vzo<%D-(Yzfn-`1T7ly;-?rW*L0lL0gG>S4aUAz$dr zI=-(}sYcM9{J$&jaSt0NqhRnFYA<{OG0wvf_3sC_>aaITUeqLiOxZKQ)od8Nv=f`V zF5z;&Lr^x+h^S5o!75EX;$l4;1gp%6^XRFVJW&^#-x$-4Nz38Z7gJhxy_idg>=xYW z%;wknk4DQN#`t;o9?p35ay37hmcTlxSUtl%_u*cc<91dx;Q6 zoUVhyy+&lHP83XBWXT;JJCNHyh{G-tbkw+I&|Ll$Os88DFI2=EhAiV`UB|5nmZz_$ zXpquH;RuKFA6x-*Mya4q#QkB=}ay;hmcsg%bkaK!BVcrf!xZm66tTTzxxl zUj7p*$BV#0*89_7xvGXf1dClSJxA+Os0%%bU)Z@z!1D3`tvds=*nR0muRWJqph5@p zuAtdsCo)IV5G3cai~3_s?k?hRY|w43bpxMg3$*Xxe%c=J3oBD zPkrqDJ9;#}s@ct5PZPl#W)B&+@GlBmLqX)L47sTInPsZq!VPieg_)>}Cl8-SX$^C7 z{EG}}Z|mY;>GZ-qH4lM93y+eLLYQ~F6OMU{k7`wrpjV>@c49nujCZGz--NK_ zjWsuIfWRS3BYMK)JoILqhQBAB$^EHXge*P}-N#l49=6Jm!Ex5qchPY)RrThT7YeyI zH-<56BJ1f|tI*v`j)Hk+26(kdq3z1!Xgto9kc_MF;runo?q%%j{+}>c#fawYdJB50 zhk30r3gqs^_jqBe5s54I^ZcLibevhPsyZveLkG2chN z5-wgSNyoU?V?aS98XO8De zVmKqVTR7sC7;)Cf!oqStxMDfLMT%G9;T~1;+(8Kwd+y-NKiM!-P6S4@n$g@@k3m>& zNwxeQ=Y-m;c@(uj_`MsDv?m64drog9p3MBugB`sg? zg+(RLxHavoc(F8bh|eHURd5q$JBX2lKbB;JeY`-jSBFfyeji=l9pmc9s8hvF=g@L~ z2`rg03jcfLNS=Ot27~&$*}kJ4dB$?cdU>0F=-tOyzV#3)KOU|IMstDDBXMb`F7B;2 z!03TY=!&nvKO?K~!)hLya@?^c=^AXBu0Xf+4M9hH4`>9+U_!kZ`LXaeJUD&^43=)k zdC{kNPYVNXy1OZeDdCPnaz6qJ*c?@4xs=>UUd->Nl9&i0dD4ZTbZ`-;{*Me)Z!B%fEPe!W-CmGZVzZ(z){rSK)5l8=SvF zoJ{?1sxb3XD)=_;hyHQvK;rBy%uZ?HO8dTI?h0@IV8~{i{P_`Qw;@%K%{qhbnxD{i zo+nDOTwKSgN3iysBdH6QB3q0u;QgytIqiVAs6AAM-Q@+iwCHF$9zGm zD*5x*h;;$efLeE=<(y8G(bA=AzValxY?k24QW3KB{SkP%;Q)Lqbb|O{%G0<}`EjRT zfI)x<{5xMDPR!+=9Rj}$o6t3+vAiq}+z=5$N zsk&_rxNA_|9C=KO9~S->mI(22B6QD_3ph8Jx!y*66k644KvLH$ zyfb%*f8b>ZePTA8{H?v1VJu2U%9Zln=9$p?;~e}4X%rKu-&F+{`iI}K~-4?IL7mS2NIMg6?q33>8so<6DHrb6YE`PC*O<*7qYuWn z4x-+rUASpy7Wn>7z{_+c(5xD?TdD!sYt(7W{RTL&-82S!rmhxDt=!?{WmT%+ejJ>x%h3s!Z0R^+4aaV@L-74Ja8sWPyNC72t}nIFro51^ zw(SDtcN%p1#zWkou{q5+E<$^4j>7wLXX5^(0lLPHpg}SV`J>O(z|`P8c3xyY)K@D} zgYE1zf2orJ^$fOyHKfPF+A%ZkH$GagO7{vP`2(tB;Yy$-Rr~x(DD11^r}eP!d7m{M zRdEA+5*dGovIzuy52N6aAiYMQaR&Yc_6$5z!+^Xqqu? z5cvY;Cz`=C?J=gUHwW1PSCVb6f+fKtm`g4J)aPmuwHI0#E4S(8i)(E_tJw*D>~g1@ zCTbG>yZx}D)qsThSdjLEfiUyz4_LLj0p=`~B*M)S^vU*DP#4YmJD=~sM(;#eD{n!M ztS!Ojd;|LH;aaR^jMk>KIc&zkJVu4aboczz&>A|5cs1D2Ep!A;{Jap>9lMW-;{%!V zwidU{>*8wvcv5bU3(;dc^Bu!eY5AmPynX++KwU74q>cnAZXJo|Sr7DAd<7WVpW+PZ zetaIj9X<1;Sg%i`$W2YVBd}_y#l|!!7QNVnpr3z$E;CsQkYuzZJ z7>B(csx*uB(n=pcLb06^92R)v;C*en!fQUcXg>#Jyx+m($KvF{%li57YoN1qg^f@UKI30aPWv0pWmTL`OOe79{(jT=-O&)G~!u<``0mZMV?+O*JG>os6}|O$lxHrK@L~ zkU!<~=`Y!9IO6XVB6a;DKB_K2|Ew-h`QDA@8%IL)ksDaulnP(WWT;tG2gY5{ps{U+ z=<$6Kk-GK|{#ZNHAMAYPQ$LmXxGpAfC(}6>>$M=XGa<9h{K1Mq+D2 zaMf}r5|r#o_Sx)!yk^a527F$%X6AKl^ zmb>5k=N;qzaewz^j57vfaM&l-+Iz1x=kt7?@3Tz>H!1T-WnK?T-r$0RtJEQ8>N~Nl z#sO!1{{jXde!?&9JJ5CK8hYIr#s^<2^M5HSyh$+)8v1#m%FP5EZ{me(@2i5cc^7Ob zixm3WUW0Dl2K*;21C6I&gJt=XgSr@%laqxFJRV&m&Xn@zo(s>MB-CFYY4x;M^J>J_bk@UGI36a8VFdB_$5FrTp1A(SE$G?e9ki~!26j;LV#fphXy6HN_?eW#ahF3-`P(WX z{%#nawVXxICdLbQ`j4l32FGb{@(w!Z-x*B;qHvjhJ|zD84vRaSf*lce0|q&F4~fTx$2L@bvyt6BFUv$Na?o z(Q_zt-7j&raUbaQ?u8hCE`@J9?x4>j=0iy3CfKt74)mMvBg|QAjC(~>9^@Cqg}p{o z(b-l#C^NHGovNdM?E>VGSQn=Jo=M_RRD zs<;H*d~~q9jww=pDX6QGH|{*`%(qvBqQCD8nD%-pZFu3!OGdnfZ?A5Ezw;$peCIlB zRG$OB>#}f_^E1giqXuI(^x{pf-dH#l(5Y=QWyP-)k1e+p4YmvyV@`d95hq>?X*wai z)Nv_jf7fJ-8`Hpb;YqMeyh{nt3Ga+uEt@ERN_Im5vq&2s@3BFjqRzbLz9a7}590v~ zI-*6Z5_ZhhWWPK6VL^c#oLa!{LBt#_DyGHlGsgc$Oqp9;392N)SMKL^KQPQjJn~p zXpJtG==9>`=@~4x8Ss$*cF0ms$8tNrZV+Pt*s0!)o((ahoYoM2Z0t*Jvo;A=z6C*v z_X?V~LxTm0FZXm(XZ$ntBOT0NBglrB(74+N$vi_})U$QORDTD2@W6)*TU;pW=Qr9{ zbcJ-+=u`F0Zn!W;8K!pxp7p#prFKW2=bDI}?GFeUs=aB}i6h|NC6QOgNZPX03L$rx zHRc8TqRH1|iYt$Dc*z_Gi3$0GHV=x%!doL~;G$vFrutV*ev^O`-`2vD2@=QQrZwyy zv=Ro%`;+BUsm47!l8xM+QhK5;eyK{r0Y{gCS)}Bn^x6((^^&*#vs4Q&)#Dx0Hi}1! z5WXm_7Ut!rqkQRbaetq99P`hXLq|WSrcrIeo-^uLUpf~`K4oKk@H^W2J_#2MUJkSV z3uvarrZ;?EHJlFIN(>sR*h?$6aX;&1H8Q_R$G#XxI5^VS2$D$6-$QZFudJ}Z0qFOyf?PQu+& zd*B(Ti?p)I9{=U-rneLNfWLVcb~~OU`gM|OfL~%@hVLq9@=D~pmu3o6`?z5KFK;xL zUxI?UwX)|iopHCcKJ+fr<4&z|adJsK=P70jaXXg?6s^x6E@ePYaTipqk#R^`I7cPb zOSS)Zg3_c6PUz@Nfh(L*Q_YVTJQ_y9etu+lPy?-}NDN(7NBp(oAz6Ht*j4eiWYsSO z|4LZ_f5Laspe<48=I_pz`c4Ory^U~i^9C`dFcdX=C9*-~KT&?*4INcF01AgJG)d25 z_n=y`zu1+Ro&G_K*Utwvmnd0-pDQ0;+>ZZU(c{i{<_H;~e!RoTl_z*hdSPF0Jdk0F z;WjsD(3>Pk4f!fgxPOFtYhQz%BRcW^v|UwF-wMyz1#^XyKe~7t;Gi34Vg0x!F+H%H zRys=igmyNZGc1#B4z`KMTRmaV_zX@g&c<9R)2q4PBH<>lr5o;sQFt%3^Aj$EMEM7|TuXv=shYs^88{nhiqc>hvTitDJTsqZRtt|$a; ziBY!kK_GX(w~1m8Y=I}=TIq6Lck%c20xJCQ55%YMX;w=VMjgKexgRV!%EFzLt#^v^ zS7x$yd*nZt{c+rJE8P9sSW+84t8O@&2MeGb{_Yu$rY0Tm&wYQKzebl|NxlBs_qU;A zxD#JHmLaWQeo9(O8${zHkQb_hcYUtIh$WwdZeJqkclK97s3@im|GaT!kPIi=&LhJ| z=iuwDVbn|48*|=8@ZE>1a5H40_*}I!%1zD-<@rwFv8*RAtNcuTi+iAPLy_24TmyC+ zym@>0YuKp7G-}fxu@q9U&C?Q3B!uwa@-x(3%DXsgZ%$8(JWxC#Wo67!hzA#IvaE5S zkk!uv2YhnIZIMLNhv!kjwN)@}qKy4te-{+dL&4i12PXzx6UDeJtk=)Nn1ZJim@4(N zw<}@f_EVzXpJuZ03XZ)11 zKM9znypi_WO11swi+m^khguSy!Wl>k&)nveXUfqL} zp-aS+Z4D&u_ZH0l{)2u!6Y!hFHXL*NxReumLNI{GH0EmzMAhFBqF$+D%r`&u^SlWI z>)arvPGSpO_vdSyZ$L_44UQ03$irRl`FJ=3Dp;x~ok3_2tYDI4Zljqs!I_4PE%Bv`%QH2Kt_+xj4 z6B+8O@QpfKHkLA+K1Ebg;V2K@ zCDdPKHyv@X2f}f!Jv4pX zIx#go0G|$2<{ue{rMd1%*5<%-=jcmW)0d(3nGMqU3!v?)4K5QGz|TEWZs5u^41A`` z!-kmQdWm7P&&h_fO^-o4K@SI1j-#Ky$HDR|A?$wqn)uK>46WpeXy~*;s#}!7%(OU( zv%U?SimYL17jMqDo&gb>!{KnoZ#tCH0GpTSv!`7u`eUGYF5ehU)(j`Z+0W^Z`twS1{Ic$K#Yc|lotr6UF zy*VHIZiCZrWYDzhF1Y2}M%wVICobCcAGLZV;gPI64_#dYjqfVqba@c3IvdQF z2!-)kA3=r!j_om-zB%;ZUs)GG<=!A6dEPpi{^$;P>h&dI@oG5+taC*5>3yNK&JH(U zUjb7ZdvK3|(U_-D7hX@iDc-1%=F`_ue1FfC$4@*2AC+3D=jdEosT~3n$8M!+si(tV zr2X#iL^`y34{Se_jx|4fLG|+V-b+*Iqz-h13+BPRyq)_^?{D^nYzqGwUvUUfV;CzPxskJAmEc~IZ$ z1K8f{&bKd25GQ9K6h7@L>4UP6V?+3X2I5hVR}e5bnH#hA(S*DWkZx(ujX#p`R@N@c zkK6+{4mpC!lX7ZnOybU#vxQE1zMT3#l#WY`39Hu|$+7ewd|w*Hg;U%)u<{*TS9(K9 zQjO|Po)5K)wI;b~Dwevi_&UyuOSV+ei{6Xi=n-Q{_aB5BL$}e-8_H1SZ-YAC6?8CQ zfY9&49XdPVmoR8-CQfyg;Wa%2cIfc{E_LpK2RoV5oA+Gt98lwk1*zcOLkT3c`ZJE?8*c z!N=e21y|?i6p;4B`yFUeJh~k+W!+3|pvHSP*F(m58 z;oZZktZHrnE-{Y6Y02Au|JF`ec1RU$cj@EtnQN)HRJ+S<)ezN!;@GjzZRpmu3-)T{$E7q1{){T3aD&&ds~Fr8Nj@Jko}c-$8roUIf$$Nv@Im>-4{2U5_bqQ7AADu{!sB6z9yUU0~dz?ks2 zWc=bW+&vM94Ie{rz-n!LP_E1Wr0lRsVM@Y)4(j~!a}LiuR!)PK1z_~d-uU5}E!uuG z=XpVG;JfiF*d#^c(%vdO_pU#tZhIvTxO51@4gj03(WF=-ZT@p%8rknZE=J{VrgHmQ zI5#Z;=cUC%#gsGDK`DZ+{98!B*Ub}V*_MdTCCd0Ceu2>KbtKnkmXh!6yY#7LBlN6~ zM78(cs5qGd&KbJwuklrp*S!O5Ten(P6ZRFB#oUG60SifAzMTde*9mhr?GvWHStGl@ zEuD*BCGnz-QFtsZ6ZOA@^88wf(Uzu4V*g+C)iaBiTP%c$l18UM^9umwkE|HieUF*KzFA;{W-AsAI<@jbn zEqM&q5}OAVQ~y;e{PDUY7v}ZEcX^>WJMlg&&Dudzcdn*kmCBg-!X7`4?u}vTS0N@~ zAFXVEhW-mlLED4oJo18!`y4(cCcf@Oodz|+LaFv`vLJzOi7s4aXp9$bheOG($7IrF zH1(RRh5m)^xOhuEuW?o8tloxD(~t}Q*6Q#mqh0Wzot^MK_y--7_Psj$O%z9NErPj+ zFM`!fsgJzE3}r>(_)%J4j2XX~zGemS&u|xvOiblzKkw1}uQ4pM=tvEZq|C8PO89uG zH{S_R$D3n!QagV~@Q|{n>b*xpTa63e&?o{wwR!NmaVY(Lp3L(OOoQevlE%sBCnz-l zR*rs0J^TNLC3+@!V|7Cij98tzN{VE<)O9?7(6FmtgFSJWe9 zO?)Aa4yzJQ57$A3VHDdpo`UxS&y&jLt~4$98Cj29O_2_J;c&x4*sSNt+Ips7HpDjQ z$xB0AU-aEq`(YyPN%F*(JtFwBK~fs(9DJ36HtGB`xodG$UyP)LKhxJ&8N_-?;aJpj0Qca>8YQGEP#pTD~%#p*i!Y>`yNIJy#4c0r$r% z=G?9 zlthz{>hNq~XFmEu1+Q#MV3+UfV8ywEWR@%72b*`)RjSVzYuj)^i~*0@b5x9I>cZcX zP6!EARvdKxtf+VFz4*AGl%BcjLT&LM+9=i0CXJg(bwmHsUbop~Gk7ZuKmC__eM;n9 z?+A+AYRtB$GVyk12`M-A0q6NQC};0B*~8i4^tD=5Mcyz2>F9Q_go*y}R& za6${0a9%k$lQWC6u%o2;|Cjwi26fK7Ialh?;28tH`dgsBVYPRwFxGdc{8PRr>9x=C5!GJHAOiBIdN;^nMa0!99%;^p6k z%v(PcfwuxUG9(zw&M0$=jTMz&{y-Zygn)~xq{*uUoU?EQ?cAP-svU0!hF{mE%}UXn z?x%(8F54<{R(Y`No;z?aOa-)$eWlo!>O3IWm+ZS;gTR@~A+4hpPYN*SJ;MTVz;Xvj zzgQ2GE0Q_Di*s-l#XtAFh^tkW73e=_AAgJ%)?($^d*S^o6@qgVArf2Uc44!ZDZC@!a#J zvfKUlQ<~cjnjWZut+$qoqwS;kS&~9_=T|CaXWHrwnEjvC#(9d zZ-*-^0&tGLl;<{bsqpCGLUI56i*z|XhV6>)k@kh>(CN1e$-jp(EEox4AGd;LKsSh% z^856H9Qotsa=P+1R19u0-~$KN!i5>VB%NCl=8s%OdYb0E>aY{`(~@d7>UQYg#f6_r z87iwzUV~Lh@u)N07#-ULzHhRI93P~LHy%EKg4>;0{!q%?z!b8-C;4R6^+HAlwCVZ`s@mF0liPLTMFq{Ud}A{Eu^Y4)2jJ)by0QI@3b+uS0VWsA zgm0IU(O>d>xGh!26t#zR(o+@7o-7dZhfNgDOavZSd{&sVyeqpr3E)EqTVdki%|Q*j z2hrBky2R=3*fD*r!eLrRez_oBVu63B;@wtwSqMN~H7nRVB8D%{m`h6w0=eW{C>u?Y zn2nF$Q-MPQm)m@!x)1K$@tg}6{m|!F!wimZpC%hA)r8B2xY9%Eoar|B4s5E>Lj4Y} zK(XZ=MNi5FXN{%M@pf0vJsOP3lcoND=@YR>P!>Nn{GadL3pc!%<_<*TV3jxU!zU3n zxvbt z{a@3NOUZPz+cdg!RlvvBLixz`0$5xUgOfHcft`_WDQe_g(5sLbkqb}3tN|s0-Me6} zz8nMce*Sbn;RE#R9EEM|OGr0xi)^{V5B~&pqKqk;U?_R}pCqhwa*>P|@WG?HM^MOXZ=nUTv{DEZ?&kBaW&xjkAuNBwth~z~(f5~e9wgX)yz~IU) z(6^-r9?)5=AUBJo+~MWy1qQQ45C_)c0PzlNhmS(U6)r8ri z*SIq9JJ%x48tlbMtqsEPekl+&pj2^SP5|A!a9EsNJ4{Ga2^0PsH<4lQRvL01I*Br3Sj0mTezKF3f>OC zVd4%e?A|P~l%6HytjFsiV5J6Fk3COwerKTOl~?rrRaX$TD&fX4ci#Cv3Xgv(tjo{M;7<~R>tqtPZag6|(>$^7vJ{T^wgPHbmeR!+W|-owBs7mLll-+6 zbm!w%`fpw^uKvk^g?)Q)iBnDWrzAD8xd8C#&_J$l>?WIK;D*l)>xH-5=O}j13}?5O z_I!TU99n7P&n37Ac28aiXR-r%mT_;)QoS!ErW&HyP(~ls0`c-*8D=Dgab)%((pqTA zvS)V!d`=p1!_F$wsonwSCe*>?TxZPb7>Bo(tb)JyeW-M77-ViqMaSGUT=z1Y&rFR$ zm85yp;mjIp@ek)=_*UxY%?H;FCb-A!HTe!Oz~P3;yr#k$zn6Ex14E*?sm+6Dth9$u zb=M*9a|Xuucm$jNcIJPf%fZ+tkq7L3PU+z)u)uXHRA@!;zT8aW79WQD5p2J~SCJ{} zE8ZL^=~0z*aFSjo*FX46+b*o68-gQ~JQ@>LBw>}*KMDRE!bR)8P)+T8nkyeKyzY_B z`p z79D>q7HwTfvwKwvIVZP^dOM%PuJt!*pl2h{n0Q>-d=NB_njrfda)+y>a3n2=`_6AK znxEF9#{F41Vbwyqx5Wo{#Hr!=<}8Kw3}fuukcn9itrTh<$75onVD#+gpnBuA5I8>; zkN(xA)9-CywWbSy6D;ueQdPc@(JGEkG-7Ab7;7Y7;xhw*vz9%P7>YBi|DKcBY~x&o z)erS3Wt21SQ@aFut9HQ&-zMtu_B$vJXyNFm>EhwjuLJ&6wI|oD+Y}$ib!G49`;c}r z7XRDY2yUaU!{#4RX!)=+*1e1Ymn;JuozfXETX?|PFIm)8%EmTaBxRsKpAFNWy7KY# zSa!_Rp@si+;lX)rT+`bN=au>JkPXJNv~jlF!*P?)Ez(cYH^=aSBs1Z-y8+G#N#|w0 z(Y!$QtnmJ6n$+(uph%TCd}Ei0>r&59>ZVa(-c=L(jSS=SI}Uj{M)%1=T8X#j>7OU+G^}Fy0F_;K#Kc@#0u*Od1x4{VQieSgZj)HYkyDGE2Zt znuBMxdvaj58CPk}hiAdzy$Ly)m{^Qqenpas1u;H>jXWr z8Ac_^Y8bB(jITnx=!I1XPdh$W@$9M^Uf$_1#A>FqY1>;Ny3+@V{#6B5kNpueG;u+a zHy5NOLuj^}sPo+y)@I+Mns_PesPQt*YqS+i<62?!@ldX8_Y_>U(=nuzKl}9R%$BR% zz=$NRRlF5AxkmBOnrY(X%vcO5?~Y5_I>Yw(^Kjy~bUt5`fw*GvUhvM@DWu-{5>#j&k6Dh{!hsq3 z+d(@ESt;}1+quArf1eQ;)~37S|{(#k&-)U8^V zUcTy27$|3*Cntn)LpyPuyEaeWx)`3>e}b&3$=t0i9DdIY!aIHUL1A21?iHHKt?mcJ zwY$u*e#}(y@7pB4+C3W`PIu+Ko0r1icPgwkDgX=y`;b!kIq}U)ElM+xJUOqgQuK$} zbpE>q|7rV2g)Y;NTcK+ z*)4VC+Oj_6S@s|FS{4Sukvqj}`j$AR#T0d&oiXXIC8i&|E^Dc97Bgo$;aYktNHH*| zd`e5mb8}=XEmOX9{W6%H=ztrSuZC3)u2__xh*j-Ql3v6dKnp{-Cp3r+VL{yEkTaW) zISnPfYG~BA{WQnoE2aKZ&?%iW@Vm@{HQ$|uqhVXbms-!^rP&7xpH~8X&xgRUMmyZ9 zt%V*rO9ihy8Fr7hXCY-B{Vc1J{0|Zv{hKAPSu%@uc5_Fw6UeG#PeV{h4*K=pK?%<; zh;LkS!Qts8Fwjt3}vn-O=wT&?waE+cSEju@rJ(3 zkC9RFC}HgR@6!Fm6$W1wIR51eTHHq!M?09JujJ?4+~6uKGt(5GHd=6HVJw-&so;gY zYqIREsW`FJmp@opvhWA^{ZJiVI@%BXat(3KNI#zP-H|tZ+d;f%?)v7`{)z&Id+t&H+zA&%A&WprLF_3XU-74 zHHL$1h9xdOtBxbIf;g!~ha0;}8IeB52wvY{3caAcsxsX0Ur#pF_vAR8V&TD&E70qF zGVk|Rfpo1Lg?fIo_~bt~zJKfvUFc~c+*m8=P{Xyb5KHN%eF{En*a7;3O*pckH+PWy zbYGGRgy2cX#O|4ewC&zCu(sPu%~DqEk;KmUq39%yOlpI2?9Agcy-0n13z4~&u(7CK z48A%C6suhLd`lQ>e{)Au2PucFtq_V*Lik~$BTRl)u9yDiTu)E3 zilM1FRiTl4h%`KVL5BZ!>a=~9hHrjhWX((FMIy!W{8DB_7G;^%`%VSg2}#gZe1gXy~!<89aPx}a+ zryAqCy}w{>q9%VS`U}-fJK=Gn0VlrfL#MYTP`cckhxdwMZ7CaQXmA`Brwk&~io?PH zYkyw+*ocNa-V1vYdnk5FnG(K}HdUW$>x8ovU0BgSf?M;_IN0z6bdrCgDVvyXjejVv zl=gyaSNh9J*Lp+si*aOJ_Z2c8-6xL)E;z7L03Kfy$(w&Iq$;~QF@Ds0TGt^#C^TD6 z`^E;KT4{lpb>J%{dp@B4hZ5MNM?0Pt;{id>7r^>xq78ZpvRx@p;8@vrAtLa$@bHr^ z9~hX7+u90f&i)O8;lnH9nIC81W1t_qtVw|E0Zx4TlRKV#x}S9K1wekrBq65cDDBSr zE&d8m#>OYl>1F&#IwpBk7Wb5RNxw$I^37pfILeUP9DDKlphwglu88}l?m(g>T2&@QzT zZw%cnCf^QZv++?p@2M9C7rlXuj>QzY`WJnlT2B2GS@dm%1&t8|e9$KaecwKZsVTqd zV4uyzE9!(jXVuwcYdF5OQ0LM|ariXrclFzVXl%~X!v|i)g8Ir#v^%z15Z~;Es@%ck z{4$Iy-b&e03%xm7+XQ#tJp+~JCP9}8@%YBWf`<*A01GyC6v`??+2dmd&*cmpHA#kb zt1Y4Fdq@6T`dR2Du_bJC!sxQ0jjVQ318IJ?M}t%C_{GFu)U}n_;EixX+MyBCMj|8P#K+bS`3aQ zA#n3moABno8Ta$^#mdA+!RUnm*`NG)cJnwY+uMU|BoFPWQco;DBynVxb>eGYDtzw4 zSX#8xfup2a$t`Kky)Y;MW8O%)MNc-0&%G^IuY0(S9(?AIFXyuS+iH17wckRPz(bUX*0Qf8Gy8hBut7xuWm z8#azdR-c#)a|9DiQtc?V%+SPhMrOQSCV4O;{GiIUm`pZEc{z8YxIo&=hb1P#flHIY zc1xu=_4)_l!5RxxvVS62S3MBm&O;dDaDfbKL+SJH18}l60^jOeNqW%BGL@$ac)D7L zd*yj>!i{VpMZ*^U9d}}1X>MFKF9G4UI!~G7jJv~Uz*n7R!ic|ngfL0d*kRy8v2&U$ ze$8kg;M7Tytu+6a6&%>_@KZiCUy8^m@s2{=@@v#hAZhD}n~2=OKh9Aa}+mO9sAC_OJ!z@sVR9pP4@akQ-+LIQMpp;EUz#lT&`QP9^o!v9`aK+V%I59!1}bi9hEm>m3+O$^4uAem!u0b;;P-f4 zOdgRV`4LIfFiGMUUCnv#6JO4YR>r*BH(`U+e;Tsi4Wsf?IV5bWfR(YF5p-;u4i2n|$22s@w@o*M*7=*oW1p<}aHuLr%v0ykCN;eD?vpHAG~zv9 zB9Jmx(0;W{9JMGJA`eI#1ssnrC#~qG*^lt)B@0Nj40h&^dq$$O@&V`mI%PF%>iB#>I1oNC?GIQu7 zTrHINuo(edcK0&W_Ojz!p4oiK*b#WOHEw*e7F1m=d1e=i4s>fQ`ozKyykjx{}J}A99IjCN9U8KjXt(iZWXEmHi65n8o?ICLaV%! zlou4tCZ)Y`<>D2v^I@#S8{bP8gT{gDohAzW*Fr`a6OlFsQSM<;C2U&i% z;n4~wOf*VIjo$ObNy z6$5G6GkZFJx0+V&FeX!}AGr3`A|ckLJI3Aepw)w%G2`Gl>UXpP>i!-UOl!{*qKQ z8k>zrR`;ZD*X~fNolZ6H*aej#>ml-OS17t{gSEFSg<-3_aKxRSpuKU2_;am4UTj}0 zOYCbT4E^IqdnYJkdf`0!Bk3L&NbL1b?^Y^qcpA{GefyxDeI_g2Dx@OI5?Vg*so3Ur ziy-(E8NR*_GkYq;f>m9lc~&64)Lseu?;N3JF9pnB+nK%Ef>C?_Zb(j6;?z?qy!xob zqv~o$Cfnw~;2sJP6HR!>p^JiBvp^o(q8U9cVM!-B)Q389_r@N$c}NrF7XO1|YQcO8 zk@G5QV9Y`#Y=2YWv+fz_B?QyWJKkJxV9Q&^$J5l$?8S2cj1KGBBUE7Ja0JBMQ(qG`sGa@svnn|!u*7uGcg(dFTpsOEGW zuKm&Cfex7%8#11DWGh3@usuR$sSTgq@(e!DQ0IG5;jFPK2pwwF(S1!N-8Z*no7!k} zf3*pWiVS%8M}PLJub|uuUn&0LJV8G4fRNNDlN$mbgSF%-KXo>W@A-RUTAzdDeCHL6 zQp(1&k5qBdfKE8>kGjyVmj>MN7Pz9onkT*VLchf^9O`fi++tO@eNix8celgPB@r|L z9*FNwn{s~VB>b7{f4{;iM)CFYZ@_kwOF3# zj(@b;gw6@-mAR3t>3xqn*uIM>!K58c8pAZ}^Db}??1MAh?RlL32(dh5h&XcD1Nu>D z&LhrN(FY$Fwx8yR?t9Xu+8=;rWftU+orJw7*kIysQ$ASN2FbZYAjZ}Mf17pZ zk3U>-f|ELY^l1S1YhUPOq#I4AAr03cSzZUS_pl#xcW^f8;srrle&L|AiwDl zm%1DtO8Z+k@d=fcG{eLr_0aQ273n*vaQ}`uoG{7`ua5s%9h{H^qRfnsuI z$pU-Ue+I8?FI;rn8!a}fq5eS|jN5Tn+~6}DCd~l$nNdO`E7yq=67|HX>b-E$3txQx zWjSoSnuH0BkID9i2dvg{=Y76{#QtuEK_^zi?^s9ii=8e{>1K^zwoDakt4wj#N>koj z?S%2~ZNWzn5S^^>n^(dmc2zP3qmZ zxX!7$4TwAh@W)>I9Rt0F#v+K)cW8HE%aD;#!WyLgV3$W9IVTyiT=ffMi4CC-2 z^>m*p4*O!qwV%83dOHJ7=%B^H!#ZHQ?NMCv-jB}rIYFCr3~<_qK4L?k&y;%I2TPmn z`DDs2dan}8i^>*@F7FDUcZV>kreno7Y7F^5>3lsJW&x*OcH-_eC*k~=biDWUz3lnN zE3|pzM+&jrN@1g(z?p*dER!HlavgS!p+|UaB4ji)K7NC?4U-O&Gh-Q(*B)s{iHu))xptFim5o_j2Bt$-b9C`+J4@mNLJ< z-XAwz4x1W8N~^svv;M4T-1e4+?CZ|YdYQvIb42Bpk*w0s1*@u~aHnn}{F(U#XzhGS z#}Uj!wwKea@EFom zl=Q%_5{Ko~CrKAjeFLU(HX57lqVn>dFiK)cR4p?i`%b>>@mCH1y|Ls{%UW8N7KsBg zmD$RqNiaDR%VUg`*#3MBwj19J`?r`2$BP&S68m4%iS_n}5* z4z%-(#y66uXlJfe>mAXVzxMDG3&MytLVP>j2 zdV0rT@7Li%XH|WCl$Xx`JeMkbvh1kq)l2m8Mgm4#cffmH0{B^DG*61Iqs=$hK`Gsm zcv-LDyPXTJ57FUeCB=eQ*jb9gyZ<*oVBwsl9!kLyEHq$u=F4x7D3F)EohcuR`SN=80(pD)0{MB50{QGo1@f!I^5yf*3*@02 z3*^f;=F6X4ohd&yC11Y3N;+P0zC2Mnj`6Gl`McBk^0KG`xuscw{EBP7JiB{=+~h&N zTz7SWJY!D2eADiHdG6PIx$5(Lx%0RJx%&(0cdLAPMR>k^m@HquNT)zP`bfU~#nXKG zHR<)UGz;WGJM!hzx97`?Lki?;vJ2$X&GO}gN9W7?&M1(l7#GN0Bl6`}DhlM+dKbv| zZqAqglU_TdE?>TFWxhO1I==OvnR1tp`SM;v^W~eRB}m_!NrC)nT!DO-^qEwp&o^0mPHLwD zx#zH%^4)p)a(n9n`Ty`E%IE8@Do|EZ`k%-Dm+|-fUl@OT#fIt|*;A@d7Oz%x{p~Ef zck6mpwNr@fi}$JO&9&-^@*)daOT?2vorHCY_WidioQfC9!WM@IjcIER8shk@x~%L{ z(19dhMe>)63ZL-o>P;={g9=xys2-x>COh6dR*`GvFFUIwm(`?pP~fK%K|gkzDqO#) zRL9TI404$FFL2SFv9gE;m+BuUOk|En)MR_sl_~~Y36SM2J6Ls5%uqNeepDBkUa20T z_~c(JZV8$ny;j!!+QxtnPiHAsJa4S-VfU@NQ=V>h@T-s2TaweOCx81L}6eS6LR6p>M00-&~+@8W|V#@1>^V zN8G5YwA$H<;g+(Xd7Bfe`yC%H^Y|}9hSjNxH|KS$O<%joe)`u{dn{iQls`Q}(f{t{ zz?#?{ifC;;#k)R-g8paxD*_iZ{hy4#*Z;!!&mIv{?Nf0ju>R1X>c*9G18w3XtHP3- z0Q+Z4GNY&v=-Jr4iHdelvlvzD6u%`0HtjU3|gua10&)WwEyX+3Q^xHpBrT>iT zrqe&G;{JLE9=p;Uu*U99p#FS|pz~c%RGqfAsy^_#FtF7;A<%ueSyfcd)he~+Co6j` z?OeTNq-W6Iixz?RcWVTGj{6hXCqOf3L)yq{i>DofPHpKFRJ?k2K#awkO0$j|tEOGb zsOmfHZl&f|c~G>~u*&CO7FNI6Y+n6$uyb|ZOy!_sU1F>Bvz@9|*98U!{ykVZx2mr4 z*0t;Yr;a=eh>rdk;7uC>w|4Uk=>9yns$B=mzzxM`18+~Z2>AOeHE5B6Z=j#~x~i9J zVk*7uZwD@H%nx+U?OHwi$-Sz{+nfSj^*U6`j;=oOw)s(@i8QY)qMWN#yiEdY&5Em( zj%}}cdGvHu{oaw44{>bZqk#+j2cB&Sm|3@}YC$4Yo_8A)Fl1YPmBGtffj+nYRQ=rD z=I{SVrMfgmy)x`oK-K?@|E?w3AOBCr-}`@I{MSuulQf1p_**o`7&FYnOS z+oiMutLT}}e7f-QEV-%OgSprB>E%g=Cp{jL)r8-WdB%zFeAr5%t{}ehX87#j1NHi5 zXtc_R0uNjg>dg0%)3oVS*VlwgVxNJ}*w3W7cn=NlK+yaBC~?!ROLT9tCEQrk1vd{@ zgOCIN2~^OP*F`R&o1G3*M%)fMmGKPz)_fKEOKgv`XKSQP;C-~NucpGmsu;dmDT8^Z z&v5F|PAWXE%&B)$ptZu4yPUa6Bfd#7%##e*Jgo)M+4@YUs}P$mlfC%T4V^#Cf#;LvlAeBxXc!R-4U11u!vGyNlCOuk zQqEC7tuf+>Cthr}q=DuQ_e7M~xm(m?fi8szC;u;s&ch+6|BK^k5AChJ_YUc~=VOKJ zz2{fR3}vsPgsez=NGcjcO6s}iBTXrxO-qzfN~N9jyT5;d+kMVG@ArAV_WOZB1jjLw zE$5~5WkAs7F&LXv0{-0I>c!P}%$ctSbiejrn3NNOA?rIZ*xiD#+3{dGB8hr7-cWe;~0&TmB{L(ZqUM3tOO*^4bLD{y3NFHUz+h9&-z zbi&kg`1DOKtok*J36YD0O4nQX^U5}uCUOBXbF`@ZMM2UzqaWK&mq1B|JpG>0i|X0d za6qF8O}T6=#5^f~A0LHH8&YzXJiP^bykntJ z-w8E-EMa~f)gjX^=R?t+X0)GeO9F2;u)pOm!akk7@L+v42tNPCadCx-#kwtEop1^A z+%4EuoOkNb!TC@t5Cc*VdZD}iBiqUnHM^wWa~BmH&YC zLma1n)hz6L>I$>lWl2}MA9KNeGJX0$mWn2ADMNXZrfis}WRxX@^ZPw!O?S27+oi@ZJq7VgLt57a959O2g zGi(3ob98%SE+ zW0O|$;Pq8&;(5`XnSG!K97Cqyt1EU~W~4y-4?kpw=h))GZC2oPEDL|H)PvOn%1poC z3)DU|4sv_d*;#@nH0-4+9lXM_2fMT(zx6LFtl@T33(Yy+>>vDZ>ll_V6@t>WoALd; zPTs|(t~}{RQ!)@W!k+MmMTrq)o60>wfDJ^|xh6DCU@_*nMdQ4IFsPbh&E@KisF3rQ z9W1h^DFKRDHB*ZA#Vz80vDK%2?YdMv!hi<+bVc_?1bNE>adh%62%EVYsvo4nH||b% zmj~yy(M^H#+%tK!Oau-(|Ah?(#&}jr6R+`(u=Abo@Z8QAk}4`rbQUfijrjkJ1Iu{^q-mKE4Yf~!xo;$?>M=WdDk%$j2HDJwRqJt+ay4q4m50|8 zZ^6)DI%^Ua!%W`O4aZlNcXeZmwn@W|$HCFbTTBW(a~Np8)%Z z`gG@&Mod55k4po7GYef#Lh;cn=qeTg$CX7W^VI{ARL8*cd>y=QX#@1gVhnSdp(>z( zdEJ={$=MXX*{;U8CLJ2vQOgXPUB-5MPgedWAFM|caL`nMx}H{{F`eS1A^biYy_?I1 zH%rp-!=-podx#k?>jPKK4_Kicii$V2$*d}Ob~5+ASi5}`MV1+p#S_GdP03r>n0W}A zg(gDbU_a0AydfsP8HR&Li+Q@|a{1RoSfGQArN=*+fj5b;t1DspYH-ot_j zt$oPK!Z1i0#KML}K3HG&9ITWBv3jvNQCoi<`xlCl+lEF&L2?B~^l)9RD}#8@QwwWc zP3dBdHqf8-0`JF7Ci_Osz++V|^NH)RPg^ZO|BFzd^HM&r?KAbs8cq@9;JX`V#QcKu z5w^@uZl^kRWio2|q=SsK9%j36w**@o;C)LoPO|Z4^8Up#uAGnE?qvb)5UT^abv>L= z{0gJvEf91{i0o67BF}G%QN4@i^!m>E;A|WK#Q!`0X|6S;76NG3nFLO2DS*9>)pnWtaMNvOZ>Z&~?~| zdd0oM#|O>mx0+NK5oVZ;LD}%zYZ{ZMjl6eLDHgg&BWbSuS$cnTqeZrnf{eT75huOUqvzdpp#K;$^ zOYDi=DH!G`NsYH|!#@Qa>vgsQQL!rHb^@dL-KC3dyV?(bxsGjigbZy|Z2{@S3-Q>C zM{rzG7i&^X&?spac&WZ(_PQ7n8CHUhb^n6K8zkAUWw%_b?9mRx zoq1!x?6)Pow=?mRP!{?{F2|kKf;1|v3zCv|vm>=H;8OKctUJujh$-)J_*p&bm;b^^ zrxeJUU&`dn#BNB}TMkvVo8Zxh-IzDEk2OfJC&CA0SmQWl(zb!)e+-G>pSeM-Pu&)# z;6e(X-A{-uA15EnM0>PsQgpxMq$ zsMDk}!Zi@%eT_|Q6DFa_=V7^e3+9yw!rh=W+`V6y4k})NDKWKJ7$rw+FFeFF>3kFm zmnL^|gFxlIF-hrHqQgU9;Xt}2&Enkj9d+?Mg;6cK^UP-OJH8i|h^qo{+~>2wqhNMT zioBC>X4Ix?QXjtwRO`Mq8E*cu&W8nl#D!INEjx~sK3_w4|_u>d@uDXFlpTegn!^{*1FW5fL<%&zRP@_r)K7G5! zyf~H&XJ;j$S{Mr(cyWyLm?`acna|ADDul{JbJW*830Dtm(seBlF;p}JnDGo|{v2h} znA!lvs`DZAnKCKy14iv>F)m%HKs0xp#xK$_%t*fw&A)yfER%xJ_;xl^y2}`Bp6HVU z;u@fI#vV7_41thaWz6=Mt?b`lisZ}!4bmEJg3FKUl3D&i=&)U#ybAk`iwZ=Ev3CGw zUDV`!>}71Rau;+LiIdOMKB7?bM;LwVK+LWh;HtVbwxM4N7iStVq3yq54(By}yP3x- zI5{#|`$WkV={M}<09`7yM3EY9*$qvPY)JuE$H>?t424VV=)kT>7_9h?x(#Om%WuLG z&S#*<{f>)T7UTF8QQDtiLE2YHa6CsX`qQg~{kVN1O5bk`@}OwGKRSo%b8ep+w$)9My3H4&y4ugsz($`mxd4>J zUBTjBSz>U&k!&j{!uo3>5UDkhD(KvW=HKb4?&q<2EJsRYN%=#;f5#m$;@t*i=h_{j12z3$@U?0Xof?noN0MuXS|hX1-% zj>Lr3V*XJnI=D}rygMunQSrL8+1ZqwFjb`A^MAl!UN07h*I?pq?xr)<1U?A1z|t#% z^t|*<=pFdYE1 z5{;Gu(CxRRWAmP2;hi0zuu74vNi5-YDD37<6%52iPZzLNnFNWsopA5-Jg~P&fI5yR z>3_N&dQ-Bocm7e7T&+dFzA56j#olzz&d?)|V{*Z@-khlZQm0+@TsQs$=j?nTNB&MZ z$<}y@vU*bLn7gKdEl*vI?$N>cnB&qWc3eTfs4FO^N zBx9r*cb>Qib2x6>XB#Wx@_m#&y4)YOOqYSh<8PTf*AZ+fGa^MUQ*aBHkN@g2Aze51 z;Fr+^k|sDCl#ZEW-QE@OGR*=X1Qg>ub{zV)_A}L!<wCqU&$M+;$9Kr zw?U1l{9XW_$%UwTtrkqb9L67??Xh?1O)y%kM=#ZLZjt3?aNzq%D3|#M%hpz4Z-Okl zH%pybio2lJ-C9r-AHu}dTsP5EfM)&4LX9q4Ixjbi4R4O&-4JPJ=DQ+%vNr>@1D+tg z{4V3hJH#A|a)j4n+%03h7~P>OO}>0EK;ySRInH<_44L{cudQs!vc*lXtt}10kNTqS z!h_&?Uld$SXTrzRCPd()B@yU3$tx)qA=|qJh~PaQf032gPzPy$W5A>+pDE5ihl1&bcqT@GMx}p3o`xDMS;x8Jj_Q;2 z`8s51n&O| zzVj@2=zPVT9y3y=*2LVs6a$Zi|3OIP0Lm2#(<5DyaQ$RECf<&KF9n0x+{x$eNTVTm z%5wy#+YC?S9eVAtqc6pc$o`z?xJ=m{AMn?}T~~_r+%sG8crEj8r4MX*u>e9v#fXdA zKTOqqANGUsP$feIU7279eFlHVu=muYmgyho)5so=PILd%#az^wPzJ} zIFgyq8~`FVf}cJ{5luZxNx(2KcD=`7rwI+zT0sc^LSDPKi7cz$VB727m0YgwUr4@>xQg_y)e%Or`aJv?5!Me?4G4Wb{#{|lnnz($!lPiG{rga;!a%m z;WN+uz-=hmcN;e@sfMjmnslYgI*|N3!oS=x%ro9@LepV3S{UEMDHrcBSCn#Ld|010 zixzIRQf)-?UBBt0=rmnLa`gPGj7)k}`?-?dAPMhtQe$MOW^bN9l# z=ctl3tIWuUlm_O*24&*zDowP)4=~yJUwH@bpJ0xx&?m1x>CqLQ;h?a763kxwg`4L_ z;E$Ucnf*``pRVFOt!W|5{?D@HA=kyMIJFO_EDT1U%qxuRj68m7`ZIV+HO12ye>yaqEap@);?a(3)unVRvID~E0i&(|AN@SjII+`0l zL?NMPn9&g6>|6Z?_oP=s@JvNgo>9y{uz+)yF8hr4MektGtqJ7!Wg()ssuNGFJdcG- z8o*+|G#qerpo8H{QQc&iS9r^S92Ybo=e~x*3C(d>e@mF_*nMPzc*;av;3eyRr|ntC|y>`?RR$C(E9h>--KKfUoD7pdi+^{fBU#D!hIV!v)+b&IQ9v;Y6JMk zck8kXCm+nw|I*ES6FS7N$h*#@3F(j>Bp2V8NYj9M9B+}WK-FD$@beT461sw$OBJiZ z-tH~*A5tJaJ3iz3&0^5Vy{k2MZvgIoiA?l1BP#3e!81-Fo66GT=Qviv!qag3;|JbzFA-SaE=cu= zFzuDJW7K|4Lfx3>FsjuEmA2fS6gSf!4g83eLtQ!bN^f`-8y)FCr>RhSssSfFQ>0fy zm2u)tRnq-{^V2Rg1?BU9&@Sj2l&{*$sQnZq*J&*7dps3oRZekzf-gDrg9>>cSOAOL zC(^O|He^?SGW&F~CSBSb2Z3vvnd~t)*q*%;PV?eXJ|+^KR;Zt1MZP(bj!g3 z{22Ng8+*iv!>9XjH?aW?<;(DVt`wbX62smxh;ZILDH%j&7?Zl7E9kxTHCRqx2NIm? zw0BgQyuoMu{Eu&NO4v&{m#0QDtS8Ysrx&8nTdwz|nhFN)z3jcyjkwJIHZ=L?^O~gq{+L-S$KbWm1_n4sr zSHL2X>r`Kh!?gTAICAp`8zRR!7l<^72;6{kC-v!qo&tPjm&#YTWJx_8il8E+oxkFN zCpZQgl4X1JXnD*9m>U)VTiV>2i@n-Z+^@#jz9$<}WgDPcNuEj@^YM0j8jdWA!irBD zAahz7o4X~HX=qGCu`Vy>u5JS76%*pwx=E3eUe4un!IY_1d;nIe<}}A67-Rof(;6is z^5zV8v-nb(NDQQ*@hUYc;~fDp;->VUayr^D_aV8}7~=ng;L1fAX!Rf0ar2aRuAcf2 z7d=s=8vf^*g!&k$X?=sE59NsBj}Bbi^@L*{6)~6nw`20uCD13O2;*x_Xi`T6ZY&>R zR!^FaLpvwax6?)Fn$P*5lokU=cvdv#_!XX;$S&NLtU(?_9EzogVs2$OROibQ+b69I zv;P`CmyF<hbiQyFt6RL&YrtiG`z)nV_rUS=nDo}sSf{F^5(&4X0M6}5gS~qhn-M_Dx z`Zb($G*poqv&MAh8g+U-eKsqoJ_BB9D^k0dNpNsr6DwJg#pq4r=d5nN0->jqndtO$ zAeE~__HN~)m6j^KJQB>mJM<7{?>q=+e%*z0!#-9h6wsG809d zkozBGI+lE9JLaEfYZt!4Dc<4GCOw2U7a_bm{Pl zM?BjT-Rxt-TOeF$4KB}bvF~z|fC#0qcW!@!?V}F#7so=ico2`cqnEAPU_hj&i4gmL z2f=@T5O0OcC(KQ1hba?HX~T=O7sEULqNDsB{;486@cgnE&OUk!@76!a_UdXvxvu{p zeE$S`x1khN?qspolGE_t!l}$kZui=T+T@|f8NR5GJc;+M0=rp0_*VKMevo^GJxk=_ zx>O%8q~|NbzY_eeWK6r%HZdB(%A_aLj9u60%N;ojl87EfSgl*lFA8ztmDnnStfMJC zJ=1}fPAy|cmBnf7z%X=c{6N{18r;8Anr@HS2ZK%@p>lNqm~uBb`B%h=#a<&)`|}>U z6$OKAb_y(>W=EG!&BMj}9-)Eib|^G(Aj#Ygx_|B{b8|x}>VGjH7fw9KF_Q-PK)TtB z`5&2guX6E7t~NhG*qYAVScqy5C8&9V3uvDcrJoBQKzrH%zAlxetCMOO4_^bS-^|^v zCWPa$%OTLK+yRf}ZOEtd`CQJxV!E&g9OW;@wR#%lti)pI*!BbVl?s!va!q1sqC*~U zw4=A)Dbs6zQ?QKl1`EVIK!G(P?2MMfjOjyDRyetliToPQu3RrnAGGn<$zdjpZdefx zs!ZWY-&7>aLxyn2Z)Yf2djlj6+E9aef;3ypj$GQteUIeJ)80d6;Qo0c!9#agxl4X1 z^D6}5ha6dD_Y2r}dpJN{F)n`E4mx?RuxS$4gFh3C`g?sDW%D}TjN`-XK%XKx_jUqZ z^i!CM-!>(uE{CCOoHEV3(2T)emS`~24<_6mT%pV##IF5=Z{6-JlP1b+D1Xl`?3AFs zn<;i|D21a+U#C^dj^MKw!5IAZ>NNeC-F&x!XYk&w82XQBkw1C0;@G=2aE9~H z-T!fm*{0~jD-97MmR1Wu-m?@>&A5g$`fcgSZ#uMmVJf`eV23AnIziqb_YI!*kKMA- zmKu#I($C>XKu};l^e7qeXHFM`Z6m#qZ!HS5!(TB)PL{-Q&o5@p>KEA-|M^4RMP0hi z@&|?+P6G!e5lrnk4fbc(!Y_9XV)3;I8%#f;T*FpeQgi~|8oYy^k-zM7g)h#@igLJ7 zItR6Ki*UoEyBPdLhiVGT5aTPr91!`3-(_?79M_gjSo;JkF4keH4wqR;=#t07`&n^| z5quRk6;HU-&N|I>DulS|LoG8`TzyQd7TS=^k|sDlc?>H8DRZTjV~Nb2ja5g!VfEGBY;C#@5j*3@ zdA-IGG3-oM#SswlL?;1fbkZC8&KOLbVOF;Ki^Z(fcJ$ z?k~N<^(W3_gpvY1a>kkGGf>Jok5y6lStDO&ALr;@RS3zJQY1p?2wS9D3%~N(u#|J3 z&(V2?IoGd2tcVQTvTclg9nVd{K8Y|Q^dDPm9s+Z`T5*kDBK&v4mt#CnpkHm0keO>l zlGSWT$v0(kUs9jgozSA5m94OPbsl8=_G25ReOM34B>eMQm$h6aPr^$w@WIp@czF6a z=OurE(<|h7_mAddYT9y;a5p9=5^usqV^iML6LMtH#kG+2LW2x#m!dn170AFN5fsyT zfPMRZqSoj>oY8k4j`fM)_)%;6m~YI@pZfU6M~`-KZ0`?K_T!qL7r`O_2Wx6EnJUE< z;1lI-@X|B`J^mKJ1r5Sqn4yfhe>hLh76Y=0V=--)tYqU_bkV_IoCyn;AhSMdkd^L- zc#R+JXqMbFIGS4qO%%5@L|1*B5+6mU2vvRT4JgnQj3iw`$ zXfmD*A^jG#)x91U72m*|vxRu^M*z-#_74PqYtihyJs7p*6RwSBp^E+!r1=DpHpxi4cBs5VK={GJ3}cP-lAEi}=P#WXTSDB2f~Jo&o7> zc%&_vb94`1{pvM1#j(Haf{bBfk1lmH9K+B2zi@}!E9|Ws#FJtOqMy&eN7spjudh!< zSqXBplYnbQ4l|aR2wCC|(`EColqoJr<&5x`;P#2l%vP_4A>Ta6XmX|(VBL);%(ak zzNP^%u_hCX#eQM4&Kp!gOA@)1+aJ5ClO@ByQGWvu=ElzD%e-v|r&cSxBWXgX=wHJN zCl$%!VUDEwPZr(4;I-JrsZsWAsS& zTraqu)Q^kK+(maAu9yE^g?@H=j&Cc!g1XfMmj8y+Pd?9pZ$9hL-CYP~r^a!fm~43b z=n=NRRmAHNzhRNK75SDHjpq-nWtQ*Mq5l?k@Z}O!$d<9${OM<|g-0i?6{0LVkE`nQvD&)fG2s6#*GDseH#uj_ak%3xE z*8F=g6dkF<=2Mkm8|w?%uixU2xyK>yoGrb5C>Gz%s6^9U(~z$mg}Y{U;CUG#df=@O zjQ^cTbgN^)ve%es=f7wBr}@KLjl0m0u>pPL&%vna31+Hm6=?lN`FUu zg^A(`@TNqQ>ECo2+CF{7u`>?Dw7UU!YjHdk*~#?W*9xc-x_}S48>E@8!FZROm;Y%! z!Se|_@$(N!s@ze)WY_!SFBds@Hrfh0D>zkJx+{9zu!XdUOz>!phX1xLW#^=o;@kh< zKrfY~P*%cxPi)2N(Ntzr_9^y3VFmkL)PNj*`H!ut^2ddpqs-iOSK+;E6HeM#iJ}v| zxQyEiq~mm{NQE69-)Kyw)s;{nSCX8Twxl2AZRw{$5mFr-1uKqB0Lv;<`f8RXsVS4i zWvW`lU(JZ^Il{v-Z!vmAAqKX7GonhmkI+5q2Fo+*g5nxA`lk+a2hz zcMYiMsjI9)M?6TZGolVNXTaUfb&w=$O4zM2IRCLbe(62JuF0#wi8fc@ywE3J+3f%* z;6Fl|l7qV=O{k4SI(Oq@NrPtfg11&9_DtRa62o%z=L=o*J-i7w@r+6LJX885FB$@3 zUf?EMQPNBs8JOZgg0p<^)Lf1s_)>)iRdYGhl^Ax>)qBXi%4foQdf1DsDchSd0^emo>_SaUJ?RVDZgT8N;dtPQN|CMOeN6K-Z}iBl!8Lzv$b9ZQ zI!n)(hMkmw+Qqx!7Fd)1iAkIT>on|MEsP;G*V$%)wRmi45o{5p?Cgq#IRDQtcFpc> z>?$vRWOzpO$0r$CRe05m#e5f2{fNf4|a=@ zy6KfT>y0hG>@p@^pH^m1Fw!GVTs|s;ozS=~9ene}(Efc7EZg%HB;HOzPtQTHwUNf3 zA3Hb~@gjUaQVxpJn&+rRMFO@jIlmZ51o z;J%OIG_}Zqc$~S3KSGq~h2mro`k_R4;cMAR%mmsv{W+|>(8Ty9uj9pOOVB%Gb~Gom z3u|^tP?uPFYPw|{L*CrPX74)urz=Y4NrthCt7fxfZvx=n1x@*~ z@ol;USurP-C*|kP8qZFHfWi^p8P^)H2-P7;E5vA)$q(kXgc>=0Nr_uyiW!pwQL@aV6B;=Ac`YU@oAbL~4>JE& zE6`g@+F^L!4!rbCpPUK1gl0jK^r#ENg#Yltx_|Ew^thhuGab78djr1y8pYgym&4q1 z=*DVc1NvppE!g1N2-f0h%qGFt*tDX69r$HQdfXWBdVUENPv}8@S0H@LHK!-vgg|4< zJB*mGLyy;JW9GKgZ1z_!=h*!m3;&9fL!ORAJ?SjxBYO@Xl}j+XnPc+(WKpr$2b-Qw z#%o+Iars0UD!=xJ-bQOYqF00o2X1hkY#DO$K_5=esRfM-p;+0kO%^7egfC3jV<||Y z{%8_|_}kz(Qx^j^iINR$GWg8ar<>ON!#%%!(eQvF@l@1fGry1Fd<}b)UeLm1{rHGl z?pjp)!WXp9nus3~_+a*0kUnO=K>t6Mm&ubNM%p4{$RG zmn|Kw&j!h%3otuv1dj3SVNH+{zE|6fU!G0ke62TeeNhN6$J-N2xt^e1-2^gIJqW`W z8=@|coi?+X<6*gR9W9al>_AB)lWdd9tNF5kA9bjU+5Aw3zI*T+YlS4RKyW@9Nj%1B zT<^wF>K=G=8PuZSD~$3(TVj^*leH@{B#d7+dq-1|Y}lB`?6|ZHx*E!$LhvHYXdl3n zI$Gq+^LDm%ALpgg@5LW~|8dWm3=KZ*38Sl%@ncaINY@tQk!67(c~}uPZ|0s;cO&A; z@#AJq2!+QH+fjMVJ2V;bM-FUG7sjPSV5%6fA~A4*lMCG_6eH!b;uvX!`br70=c z{~WFTp25eJ{!Fy39QiQ&B0ig^MxI9Lkx}lOVe_>B_<6YoW_D(Q$1Q8B?4pbs9XizK z%v45oo(H%_RKf$NK&W~rMNV7l68rU{RGO1B$h??}Nkv-Z_O2exUbC6=@yTO-{3}pu z5XICmQB;99cyN6LP7l$B6{=F?8NXFShG_Vs34y1dO$z@z=G`b47p77ZQ)lI3`Vwe=GRPj&R-+W2!7I zN|!6g!MU4Md$zLCT~P*!OZA z7AWPHCxiD!%C@isBgPWd7)8cq_c($Sy z{{3EyE+)bxXnO=I@+8?HF1L&hI1RsEvM95W>pyyRFcQoAG1gRqJPKZb-g8P=wHY1I z_%jtWB-EfbPLDnp;r2?ibm)ZXdSuX8ncNOgpf@Bs*=suwGx6I-Krz*tmd)3t`xy(+ z6ld`LH$9x0>F&IZ(Sf>`cKmkPS8#G)0$!Y_{{!QIbc6A0CTG!!*I3?!N zJ4f7md;=~!H-X#;K8_F9S@Wje(I=A<%*m^EZ9JGENk>ZUNwk#+xi1z4$Bk6zILGrE zekRJg?egJx!fPRJlLB}tW-*(&y{6E1t_wP>#j#H*zp#SPv%SFrBZ+`$H+l@(E z!x4N^S&6+Z8pLInC|M=l!gJ;hW z9SK;tP?!pP-^LtquFISn&MJp!lk#tfgYvF;UQd!1UrB;}HXLgXhHzo23)~KT%3i8} z2?Z-zSTy$$lzM-K1zY``@1G53RQhb`rPF%E{7pH0>=#CrVIN4=UC5q47l@@!iqyk! z9{bEY5bgE29mpXWVrTFVM>In~psboLDB(D2X5Czt_5_vGIhJWxG;jh}a(v$(uJgW+ z8Gq+Moqx|_4JW-qTMJEE^|2f76s}`LxWE08_Eaei}790&?zcpT9_?qx0fZE0a|oUuq}-|Bu{=My@sx(8!&$SE^3N8(j9vMa$0J* zzMUOey22Nm1-Wi?wIvB&a38l!{R`>$EWsg4n6}00(S2{V=<5BtIM3l4Y+v*-=TyNJ zII>fnOy2PwM*L^8pQUWb+e`V*_6Ki(-QWk@aY~f)9$sZCupJi!B|>bmG#OEFBuk{4 zp{`Vhm~d|Lh`Bnvz4N(QvEVL3RUD{ot#@kOD8bkV&4ut9GwPgKz@F6iyN7V)xpe@5*B{NxUuFttmwV#Qbft8 z3MNT!MnR4n7B~7F-Hc>tY^NIhd~HXD2PUvDRz|SPq_4A6Ugt5kk%Ab$))(z|uER~@ zl^kbi0)3|zj5VrRP`tg-Y1<2Xs_M$^jz2Ah?#mKf%~y$vi-f~0#ZkOx-o#dw#PAN~ zY=Hb?L1@063|qW-=#;+;!Zt>-EUm=Ary0x(y>1-6u?`<<`M~`4d(1%X35+~c1H9lG z#6{mxu2K%PE;fNdZa18)`^k%3C_wkk`i(UnHE^}c0G1d(V8JR0*9}$TkDC*DA?GAX zB-dZqtA7tKnHtbZvyw4+VltLi$~B)Bl3Dm9#|YOA!S3WAvIBtIJgA!C7h5M z+FJ@9CnwOQmI^qsua*sKYl7C_uOKhi_A|H_?5t;(0o1?(%1MnU%pAd;`BjwaC61QUqhmVQzW`8*sh|&bO5^&NK6I z>@4SWDEtaDy1m)VZN7NG*MvS<6adAh&-f8L$AF{5kWOyL@uYSS7Hr~}nfVVe`h^~O zE-gf2H|vo0tJ;uafv9eM3npbsP=%c$+;`IpMxaIrG6fP~W>yP3{P|}yM)}`oyqCDLXKp9r)x58e8mUKT2|JJsTX!DKH3r$rwV-Irg1L$=ec`1` zGjlFt(S8}4=U0x_Qhz|aQigv%N{)Tra~`WhPxAGC>7#wsc8uT+!X3Mtc;vYlO}%xQ z>)gqa?TyzVE%^|rU|_M`VMS$m*KbkhgiQvl4@}s6!*PakhyLMEBkZ+B@z1l zWoem#0W13T3moG|Kz>*X{tLAwPRr8q?}8tA{CfmzF8_irjKZ|%ToDRg42Ifi58-gM z9_iStOsBn!K=IdN=o-WEavxT}(ogdk@ikVMu|ouIsv3|JV^i=m`3x(XpJT{{D45r| z5+$_4@cdRu@}tk3*R#o%oH(aI>+1u-QR6Ed3|x(t<#R#vBX{$-Mv{CRE#=+ec(YSJ zrsBQKC(f%Q?}MwfDm9rZOI~ZmLh>`?%wZ;chAXD}-h(+G zOt@^T6SCF%z~>x`fggqFF5P0-Cw-rNR^y2auG^5+8yCXfunZj2Wti+rix((=I)mluyvyr)V zDh5QO=7CXr6Zl%+W18kJ#rKaBAh5iPpCtDkF8np4%WeqJuO+5P?0j)xu{pl^`~eM2 zZm?&COi*4n2G(SK058`(CNj#FBn~@*p36k?&^ryAfMxHWEM})Wu7K$i3gE=VP{#d$ zu5)yXJ+0VbMe4_b;pN$RU>oeqo6-25w|HeAHcm^$P7PgBI@K9MOZrgau^g!^v!Ft5 z;v{#|6A*PQ#oepEz~|}^d?DSegcXBY7w`!vF@Gd^H>M4raka+&Mi`>^?xBE9@~F5Z8h z!247_9~0W_=$`_X`E*f@&R(WPiH|7V!RSNc9Z6d7K?=6qELg~vhs8{d`=S@@~ z&wT?i{f|D;Su~LtiA^L5RS#i?VHe|eM4k2=awI7m2e`en1kF#mjvc%nY+msPBM;=` zANbCU5^3VFwHSrh>Y`kTG_h3s1<@iVAj)L%{py`j`Pc%KOOqyTRiS9LnY%SwDG!x< zOvzM*H*9;|PV@^6f@_;{;ZKAyP3hhSDR#*qs?d#*Q;%`E33pF(>MNT!CO|fB@5Z(D z+Ei%123=wGlzAp6M9*H;qG^3HM1RE(IDhRTyY1E|zM0_!+Hf)%y#;@=C(Xw3J6ypc z-NO*=Kj6H;#)Q6iSdC#j2hg@j0O#HSHt<`F28#>~HSoXD-zxL+=y7J82z!ND?A> zpOtA9zZ6ur7vrX=r~K+36P!1_9fN-l!&yHo;+=k;cadt5AYVP^^>IzwES!ULJ%=#= zryWGu+R;(Tw{S+vl!kb|Le-;E*eu?F)y=^;>6;{Z|Jju8SK)4CJcqG7`xg_sPmA0$ zjR3X&3}$HkB+`*{7f%OWVk?cJaQ(tgJR+73Z*{-0Kl;LWLhnDoz^CsyCZbX_AbB%v z+4>994b17-kS>jm55e@Ov)G@Gp{O=!K(udEV5~oN+R=S*Gob{8l(X^QkUl1JPTB*5<6O?AMsy=| zuzGnGe%fEnZ}N(UZZ{vKM_=WEu7l7A2__*?|Hu#s7W78%h99$zppR8|TM)s}1 zL%}@!=^umpT{3aT32icvJCP>XiR1DzQP>rvP9Eg=upxJ)=w)6nM9mN*J@VVw%TAYY zr`Z5%YjIAe(f}OBWQ>n-z!^Oy@Y`xJI4_R|*_noL?ZGANHNB40+5}1b{vOuMtB|)_ zUK4aASlGL|2bRCk#*}y?n!KYE=q2ayfcvRtZGq*wQ=PtZeC6jUC$e zLF>(KJmfKjEJz_3F7gtW?F)rZ^*2#zKMN%t7W8eJBAIV`5xeTF!JUc`uc~@9y(vK7 zH3vXpwH|q^rAP1Y`h|PsE&v&{qU&v=@q4fp#2>e#v+e%DMr{l7(IwAIDKQ1v{ zeJV8loG4i?#I@sWZsGB{9B1Rh5XOAvV?nDk>{t8A<&CPiOUxQpaopr@P{H0%n27~b zFT<;c^7Q_(VrE5^4th;CA(6xDaqLh%>a9$Hj}d01{(ub~Sl0`O&&$!r>J!LSQFVCO z7s{$?PX_b)d|X}q7N6`hB<3AbR9&3AeST5Lml?N%rCViSqv|RoSsz$yp>n+1DoRqK zI7iAKAO8O+IuCy=|1XT&d+(XOvPXmGKA%dZslEGZFYR4uSru7HWkm|9C?g8@`Gkff zlu9HbBc-JfC4QgZfADy@pZhuII@k5S0)OjJ+mq6qr@JzBn-l;QpDse(Lpj=${T3^x z>2MO-MzpH!8}!}y1@iWWWPQqeaNIbW9B9~#ZmAmNuE}|RaL54S-%FDzw+2CS@+)pQ zVINdY&F5C+58yFZQ#!$*53X$<#!3Ba(5*HQ@2oh%`)#$SZL=-VYLpHw9f^a@ivpRy zz6qVt9M#U4@uu#!cMXDyfR_=P{V{WTXVQ3%ohI?#3BKTvR<4u{XZ z1C5_6SU>M)^o|iBw^(296kTPqR3aQ?r5@v+zn`$SLV_A}s*p-sd9vf)S!~GF0`*%9 zx!X2nyiu?O9)G7!3@Ta#{mIq1Xw46SP*N}FyTXdhDV@(}Zy$$AFYe=-PD}d!rYnS8 z9K>PQc79Yfg@_Nf(5j#Yvvw7;E?J9xe%OK`sdB%sRITG7( z7diQ^XuKU^O-4ME(RkioAL%|OVCEPQ9 z0i;hhrPqs`i7-{5V($6a-e*o`?f(cBW8QFTx7jXr$cQ?xJIW1Pd+@c4 z#Y<)7i0Vrt&|Lix)EHZ!PjCwO&swCwF&bU9Ey(I62hsJv3vik-KT;w`)9s$u;k9_A zAb0o*lJT!WH;8#Lyr!dpxEXn&<&Rj@h1*+BW4PTBzAw&_E@IEc>>on(aZw_U+g5;q zDU(26d;*l+*T-OeA@Z-~BgC6N!$a53aDkutx$~E6L3aIe%uw|$%x!5xg@uD~XNM_W zCp#1O?*9wZqDNC@rV;0Uqo{sSgmE@R=;FRbDEU4fPq_WUai-^CS@INaU6~C?P0dEH zIX1-LX;yKGrXh-N4uF5>J8<-m^H|a|kH30PgtnBL$sOV;`mq zzOyckko(DCGUE{pUX;N#%K`*u{@N(~OAji41Gg~rHgBKkNM}s=it|z;xR%`_Vqj|@iv07y+E{ax$1Zhz+ zCAfk6EPfcK8lJ`%DMpa>Sd8WzFu`TtionuUk}jU7O3J%L$Wr?Us9*3F`!)?@aI_|u zGH{0fx%wO|d$ABKjs8ITOAY*8Fsk@i%LNR&W6$;FGNt%4r) zWSmtT2U0^%@ZHCW@b-l!K7A7a$<@7FM9eioc)SXkwOx(ccXi=Tt0;(-t48A-d79C6 z7kb{caqh-`a5?Q0#z>}cyYF6t30$??tfl3k?!$UwUzKyE3tr*Z%VJP}mhI|Qjac_$ zK4i_l2%XoKaon%huw=gjDbsQ$YB7WSCn;lmIQRpTe5b}fSQt;1a1FrPS z5&j(O26^_G{K9u!4E_D}ES2$U`gFSqA4>SEv#lc6-jZetw0A8oZ08xix&| z3<(nRaW}Yas>i&9Ozb*0TVQpk0VfL%!U>kQ+G%Y`WdXMy$VPkkpt{7;o89S|e8N3!w8?NXd@5V`nEq)0ig9X?I2 z!idd1*c~Ga{+GL8-0?mTTdzpx<;su_uW0mX&gFdXjHVi^KEhnbC{9Z*8&>`lB0>4{ z`4=lzVE;Q!dbZDy#vGR?jot-N_<&`e15Q9sR46E~R3o=0GM%?9lQUo%>}L06#UaAh ztZT4^b5baW*s;#^qNz4_&xGB_>Q!*V$a(C54sMsVBF#>=;48Hp=(ANkH1Ik^*!vMK z9Bu(Ub3^iG#!NnOD31^P`-%5`5eu0@{~&RcI-SXGSCgLE;ATTTvc0q(F3KyyjPfP0 zc?M&~&bA{;v35jmya@>ve*sVQ=3vH!H(bP|4$M+HjlQclagPk#`SW6u)a=<;oIOE@ za-FQVDqESPT(flbyklE7Mwqb|5T;8ivHJVe}6Bm zIigEe)e*21)+dh-i^0pS!kDy?WlQP;VRy_E-00#DIY)xPYncj3UZjhPE%UfDw`bwo zmFjNUOs|QOt%qZ5Mqiw>9$pn_!k)#W(eX?r6d2lrXp$c7FCPsDCq?tZ$A93S&eJ^K zX%6DG?eMv0G#z%eC(Vy^A+6VhEFMY2sK1N(2@}1sb><J z@r43LQPxI88rrh?C(=<&cZfunDO#{{HS2wPkPU9^9zA`J1(nh~&3ErnBgs+W+&W`@ z`es}uZX33ym7Bh>Sz!Y=ewpzrPlaOs_hA(JkImn6_k!jcBO*~!ijs>8m^aagIAAZh+}#gL z{{(V-Oj$MT(w+9xdOI8h}!vo#IAi@$`y8KbCBlP4Z3s=z%nBhVxNDPES70>=lYU$~E-_2rFbkTF%c2I^6pNrs^sj}V~_S=2(ohUun zzX6}_S0Gl4lfdVRVR7w(eVp8EYs&d)(aRnNB*&le^Nfn1T1|?c+wuX@FA#|H4CU3g z*K+ey^-wHCl@2oxL)11An0VNWKce~xE%hvjMPDxFdD@csDvW9E8_Xa4rB8HbJJPS* zaV~G^I*f2Vf#vIT$Ujeg@?=abu5;fAW72QIE!9~5)`>^lyzN?KQuYg+bZ{B^9b^2u zIkliO-x7Ax8Xga|v2fE z5?#h#gBy9wFFQ(+dN=8jy)sWxB5VwGi@MLa3GqPFwCU{WMxkJ=yI|r zcQ0G;BU+WHyk;9@mDWP;qZhy%{KVVlHZUai3aHOR{#K7UUB1Vh+`q#-mS=g)R@29{ zl38%IEuNp_C`r+S7wkCp4i+8hgoRV&iAjqmMn0J2x+F(}3EME*6PJq z^S(3wgEh+R9Dx_QujkJa*#{4t-7J>&N84_{|p~>+@sSU^WjOJN}*KiY|4) ztQE3kzW-`WWWUEh;-k^*g9RxHl*Y1s-yvX@967Q~o3mVQOl+inLx<%te0}XHR0*!2 z;z8ywxYLC<&TWG*^G1+p_eGOr9h}%NL=E4*M7=p$)VT2~7qL=|UQ{W<(%V)vUZMb2 z=a0hV(&TY-mPpcJXA#IsW}Sxl66A?ZF)ntj1xJ-GoSdo#V->>iR~TS`%V*{%RVOd= z%*cj`Yp^Gnc@-$d-+wZB%&+6FrH1p;k?Fg_YB%z-LoMKZum+5K zuA)Wc16&y^Mf*Dcp^pXgTyDDr|0#y!ot7DRBIzyvN1}q8^NnSQ*ND(4t3CXecrCi{ z)NwG=?gurC@4)?73ClY>IfK4XSJPij_$5b!+|P}{;&lP2{iFrIeP{cYh&+kY+=Hj( zjL6~zJy`tLkqBqmV*<+)t_X<*wX?}E@#7PiC;o)9oAIN#w2R$XcJ$$msw3P&%l+Wa zI$b7h^XGr&%FQBW2)vY`X`Z_b3aOH!e| z-u(LAk3ch1k=m$S#TjX=S1%!*ANb=Be=`ipk1=ATHA|hU_jgn4PNs~4yQ#l<%Rgta&dw8!?z6jzm2vS-4LdR~ z{}A}eOeo$qw1v$BN>pNG3TV}&0LIGGx*fx?bpI*%rff(Lc!*Io_WzbMFe28s*8qP) zfCJn0=-d3!GyoTI5{CKkqtz)BX`WdCm`N@(k(X2@ZT4(+lbN2!YZ`VQLm+M;ui8!E4Wb z@H}3^OUv7m1Th(MfiVQ^r_Ff=@U+@M<`VG1x)(PG&2Q+L*H_Fqm4>5Yq|LU zzyGl6vJ;UUFUEL8*YTcy9(o&{!SUyf$%FS3G2&}CZuvEa#whiH$NUrULF^x1I&+Na zd7-dj`vUyC#F*|q=2!gfu?_s2Ru0YaQfRSb5W6y%_OQMNYts&6SfDfcrQuOLqx(7c z?o<&zpE3Xm38%n|=>bn<18`N;MP7QfF&*_*jl>7B?xt!RqTrRsudPc&`S8->&Rr{u z?KSME{X#tw`Bs~jwZ4ImaaMHMi%>|_i$$@A>P5+GrgGPBSkdV5W+*oJ4IXJ8De@Z? zh!0o1#=)20@kZG=d^Bkt7EhBGoPP5LTPD4PQ|osXm-|L?(pjR!`Ti5wS?C6Df7D|A z(Ixz^be88htVJ_-S3|bBH1P>JiCRNBDEM>_z{C~(KL%oNNGt5P{|8PNN5Ss~aj2N1 z#V6PYqaBvP3@>%!Wx;qRI@vhmUok{0Sdz8|Ct`J~A2z6J(miAKNWi~L_|eWfSV$uu z`fe)9ooVJ*Zu!VMiBjQajVY*wx^n!N6b_^gFczH@U7jRIcRXaglKtv5if%3zIP`<{ z{C{k|(xf$dcH~u81!LVsVbHvC*z9SG&rhEyUZQr4WxJHg&v}*1A1w~I=Sk9fk3qMw zQtEX6F2;h}-OPVxbG?bnV~p~;jTbA*arRaddX;5jb0;r{b$~G0!kDVAF`;1x7oeih zT{OAz65Sa0rz+qQ9}>HTPteiFgO$zP>ZEo~D?br4OyVF@hIvyxDsbk-G+eq`o|?;u zQde&sJo@e$e=hEEp;w^^0mgxRx8po?%<;iTZbFNv0B4!E!=f+^vg3*Z zE<377q>otB5XSh)ve=4a5@N9^;ES8zbRk+*vP*Dj^)Bx1KmiwKqfe^mOhX9U2OA^C zLFpzbIygn0d@SyQ7g1u=LqnNfI3z~@zE-0LxK3Dhzy)4RJ&e*->rsIa!5onQ^ewX@ zYlXjK0DHcp9K>j+q5-vE`L6J~eKCHlU^_E=hU{i-1w%PmQb)B(@%%w-{(K(iNg3Pj|DhV#cen}=i^L_;%I! zl>2=6x6#zafb~_5lnau74?&j5R-E2qPhM>Lh#sdcakh{#@%Yr|HtXJft~FK$)-#FBK;yinl-|v>B%fpMifHPT+i__0YTAA2RxaaGpggzN%wg>jg=`KNcsI zkG)~~%+VxDtrSJZ|A9rTMCr`2w|J2q1a{Xdk~h|M@ayOc_%-?l^wDMLwD={Q?yf-3 zDphI~(Exho2BaXJ?XWDv`FvqHTCOsnw?^biwajb`Ft?|-R|%8-z0xG(Ne})OX88=w z?_7czdEiv;P$=_fa8EZr6ZX)p=e{Thlo~Hgurr8Y(xN5c@{f>!SM^ zzU(pp$#VvzrvEheg6W_6a1MmE!f|6X%MA82Kh9F}J)EvdM%`g5KYIF@+98YsI8no$w{tI9-U4=>( z+7RpA-}o^B8z7jyr2b_D2^8Blr>@CvnW(wYeu{6TG>5)HNIT%44ZFB)8KQ8G$Eg{q2K7k z-?76e@3{klcE5me+E#REf;E}*F&cVov~a+;fwzjGI8YM+Ka@quh>$7CDg6cSt|;Tt z#ifE#tNvoYi3U75H52OR_F~FHMcjYQjLOEj2x6SASm#(X26?Iy7kx`UV3sL-d!S9n zYWCp8Q+x1_$0a=6^ADt6ck?UXAH_lq2UHMg!BEC$9<7p#MRjdp?-zm7Di*?V-D6x0 zyBTz7oU9!g$e&Mp!8lEI?un4JGb}Yynb6U zxWE(NlsduY`8Qx=z>A^}PnALIpFh`{vJ1;rFh%dyb$oE?68>A^hhr~Aq13rAU^#Rb zmdq&U1U)ObxhnSDIRD8wX=^lki|>Z%HBR()sSr$@|C5ilHN?IMdAi-n6Q3k3MK9&i zWS#LTzW(|leBkn}cvAit>~gw|OFR|Gr6qUpz4;Oxo%@e#y0Ha23l#YSO23LDAJ2o_ z5+MRM0#Iw*#JQhTBooH5+((4~zn>dLe4jg!>Z9?3qQ)D7&d(dUxFxBaXUMK%#W8=N z)?1Ox7#GHE2>Hx8?duX8@eRg*>xAhuu8?z0W4*3ZYM}Jdbuiy!OFugak>^(0Aed=P zp4Qf3q^1aISssSRlU_rl_zx()&2s72g^~ALFBpB~+pz%uP zo9WY{ahi%nYyBPwl&a66??!cqS)~iMXQjY4-h~R_}U@9lvC71>f3~j#|F4 zFiXsZMox+6UmiS+?-<`@l~FdPu3v@O`Fn7&mnmrsR^tM~>$t%$*06kf6|9x{361Pt z_2f+w=zd=ftM7f|lo-e8nbbvSDt&~h^RE^yHjC!Z$Aw~$pAhY}S6yM0$d^)#_qYyO_uB;b$M^I984S3ce3lH?R-P4vhuLG1 zvLbeHQ{h=~Bwjd_3lX#S;LkfIw7<-W9@C$R&h7QsT^$QWhkGEkJOo`Ucj4^d`{*Up z23o9BBW>PjLa+Tp?~-zUiP1esJlP7#eQns(UWpbOkMV`XOx%9djEr}-rH=+qaqh=T z(bD}X?mNhs*vHN2vYm%fe{lgEtUd`7hrU6~&j=X#+5><0sL*}Zj<`%(1pUf3Ver$t z7&f2zS;`EE3!e%vqlJ0JhtF|V>n@HgFJQjDcVJkkPq!zU(k@XI@@+*Ep6G3aw=1T? zEhkxvdS3vDbtsWK<~MjBTmZ4U zVUTVhN4kZ-fUsdKAMnneJY~A)sTKNk=vNMyGuGLZGpeMXbq>!toCsN{zqnp0is4l65u;(2vBQq(L?ZY=IVM;8%3WL_r-yE^hWT;HO0 znCJM^J43ivw+N-*oX2~6Ucok1X(SKAao$*a5ZM>coo|+)syo7PQK}Grb?)d8RngiP8exwD<+7Wzna~*cR`NEA3wS_sK?8%0v zOAy|YI2TfL z0bNvG$k7S%^j1>>%=f$pGRZphVpk$|-Z{wK^m>FH0(qJe7s!}?-|$S71{Lke#CFkp zs1UGQP*Cv-;)RsRKpfzhsm|o%j0OlWRU_GZ&T@`@C$O__Dmpbj1SzX|u;8UGk#-d! zmuHE?PAzSsXITUmnRf@GH++_8m~43=}p6mAm-C+X8j!OE_D0#JFvK`OH~uxQM{A&hQ>Zc=?|X;rhk^> z^jS_cCP9geb(1B9Z}Rv@#mF^TNeFbhRyyfe@-a~Gk9yjXGR#2VzfR{xj^5LT%l^IYa4`sE< z@eEn|tlhdYHo!LUa-hE?hj zWp8b=$jg!Hry0;Guhhuk{SG)Aehs_*Q zWk&&wvY}g4U*j&{7pUGl$X`fWA=n%{2(GOAdCEywSc#0`Eh^0Y_S2!?Ki(YP44FnNgY`F0f+wyZ>-m4DfH znz0xDtKm=DI00|(LT#d-;2|(0BOA{`))o<}nEM$1lg^L`fWNwlV`UvCChgaqs3gkfOdEJTi{0{1x` z7?YC8&0DYq`f9pC#Opi%N4Jz^7+=8QTMG20))tma@x;OBKRMl5;oSaM!VR!o+S5f! zyoR|WaTo33(t?fXIXin2Z>B=G%johkBn3`asq!kSap?8049tgF4kWV}Qo~1+tNjM_ z>Pr!-y3dK!G6vSxc||bsxHgS@xEBNol^~Zr9eqxOau-C{&U=7I$CueScGFwl%21y+ zKP$vnIxMfYa3^4;64`p!oN7!=adk?&30~i1sPX9Y_km+bgVqA{pMmbuk8vv;qyBAxRr` z>fz251Hv1Qrh9w)xeKywtP94PJ3TXqzb`ihqx+=Mirv?jN7V3w>~p&DQLr=IR*|79d1(-IPlFs%x?EiLTbX%f_u}Wv zXIXCbF-~0b7{vcpLb2^)5UczSN0dcrA$yO`-n#`F6oX*tqkjCCQ_oE(lf>+JZ4&<4 zkX#ao5kqbms{$!(*1E!9&A%gf@vel=sBs{Pg}3>+cG{%($pXBaphKoti;*)=PodW7 zT0V571UJi+plG!=Nr$cIWqJqYO%zD8K_}=m)5OMw=V4Ex4ECe3rBCk zlzC4vc3KNBA%2tl?_MA`{r(sHZW4t=dnYD5lR}f4rTpSKiMZ%WAqG$R3|dD6dCv16 zjCh&BL{BR^IoOK+o%@CPp8Mg^z51fUf^Mt`F(QE{7>CGqmtbkuQ-R*(eiVI`jov~x z;Ne^aA~#);X1x_6Pm1;F`#GcNH`6b$qJI#N28fUK3u^V_AY8YxHG^WKHZa{)uX!6zD1I5TmFM5M4jGz@&Lwh z9h_60BfnIy5;i6agwGJX}J}O6aUkugZeV?Wo;_H2~;8; z?qW+F*C4F*$vr59-o2$}P58ns7eh5q zVcTgxOcm;Y2cS)h& zu{O@~O*81tHz0**_{U_cVy_frKU7JDxU8t zpA8(>$1g0t%v~;1Br|JX!Cp68B6IaJbe{Xb@jI<>RB|6|G?b>R9U8FAP=TnPehnUh z8E8~m2*RS4L`Gteu?q9KNh29Bdyx#pJ$Qmw7QH~7ok4JZ)l+`)ur>|(I0{U>0=W|w z@^snm$vE%s2-6t9@JFt-!E3*oV&e&7G&X24JkZl;Zxa=IMxp`JEnb7dhzT7Jy5#T^ z6S{Si9gSKQjN{hb=8BJYgK@43{j>9a@v?q9=${Y-W0$=v^eXxW-gm}<{y~}l&+wf4 z*Ag}h*Kw`yyLn%hr~G)|2_%M^L3`(W%%A%jFKPy0m9rV`x$Q_hjLT8=nE-zaF<-&n zYw)j-aX6O?5kvFMkX+*oCe=!G|6~PPU@t@eGH=nutJiSzrm@^v_8dsDo8H$PpZ|N1 z$_Mtm=V&aE#%Z;Bc9AJO5ftewgD6_Gm>vQ(_EPjx=R+<(cLpqyU4pYyx8t%DE2w&S43{j5=3573 zF*R5j(%?BSUvEaPT~ei2FcgHE>?mp#p#M7+Ok@2apBYc*k+n4@v5Z;Dy4j$UC_|@C z^@NxGA5s5IF19hhhr>l%D)#;p{;(-vM3GK(y)Hzzv+T_DktHzbCQc_GD&>ZpPopyH z2}%u-q^enZWTa^x4z>EB`ObDs-P#Sg7gOLGD&hNInow~`j-FrJ1hNUe++*Xbcxl=~ zh&Qn1)9;U^*-=p_XF4G8yg3U$jaU#i$oOx~ujkD!>_}tH1 z%lVO)AyG!ju zobp)_XT3Lpfyxo^o+CmY+KwXIbC&WN2@>GnbaHIPjirJ?Di605CHB zJGfrR;1u&*NK<|d>s>kk+negpc9K3R_Z^M@{gx*!FKii>`*87h0na}(9c_G?3M-C-8Q1yPD@_I_U0)dzDR>EI$X*tw|vD#)xYtm!FJd&wv1c!I0S7y?daP9P0H{4jGm%T zc}GoIE|YniE~&2)w4E>}>qs`|UYb(0z;_hX7Hxz5d0F7(`v$FB8LOkZ8$0s-L4(aw zIuQMY<{PGb}h;4-M(j@a?Jy`NeXvIvX<3@3I)V&KRz(DYv+FV+?3j`58R=b_~!7 z85p_Jf!s4X0?w~9ank%RI8{x73_tV574_e7R>XTg^olkeS``GfPkgYC@gwZ6n3JyP z*KXNDk|gobBz#t5OD12G;Sc`NfY$C2Y}&`TR?FV=do2^;za_HNwn2}+JKrEkVEGQ^ zC=q&xdGs5WctF%YdD6VJ92@4pfn%c!p!#zI@`EC{W$PMF|Ar9FaF!x*XX{{btsL2X zrWpxc!R=o@z$==Ja3y!x?wPMZlTJv}j*`OSXN(>GM`S1hxzT$wNQj0rT z&q3IkY&duA0W3Jy3^7}*N#O-&j2I(A+XjCkJu{k$=Q$Ib;#$<=^>Haa$J6KAxVrg9 zv|j2YuNy)6WfhS;p7Iy0HGcpbH1+wb^|O%#IfCUTe-OzCfQz2yM15T?RK_0UHUy`@ zZt)bjsO5+&98)=&ovtW$SC!uP(7{7xhxwyNze3LBu~clgCXHy7rxgL(1Q*6&^m`Sw z_UPjVRVulUi+uRE%a-uYk;n1R{(ih4HWg-mb|5kREf6!i5b8f!lYP%`uv^1vxW)da z59&6w(J~uy=MBKzUgm%855rJZ2{J+ZJ{({S>?fTzWL2dcBn7>dwzuZ{djkq?+J>%z^oV^O9$oc#|X_)8lL`GoEmZUuLc>)}Mm^`;AWb|8x1 zH7$+X@JEjxIk^-MU1gk!Z3cLBv~cm_+F;B*{~9-Te*xpm7G%Q1IbgDK2rU-zU_V(H z?wHyTCDyUgGN%=-`<>|Spzk1?ElnS{24M2d#UL_Ng}Q#O#nyLBK+dP09~^H$)wOi# z{N(-K$Rrh1fcu$9E|>yf?tp8;(>eHG2*;D*tW+*TU{~Q>0O4Q1NIQUOpbIM zX)b!mbh_H-{&=#`9bc`hhVOG7$>^da40#(4?_ROIz*jvw+t!leaR<^~Rl}=XZ9>1w z=ls#`uHvmm=i$h1zUcj~E(n`GieC9O2HQ7TkZta{_`*bjun7xAqJDCfw-sq5m8JO? z*JEZu3G4F|B~It>aI=5rV&ZLE5;5UmvAKpA`8_R+54>SdLp@hP%-M2&vil!iDXFmN z%4H$4D*6zt2yq6ZQpWuL_7ejag<+DPE-7Q3Kf65MfS8yZS^TpL78Ph0PdUff*tz~N z)=r;{FQ~=M;pLcf(Tb!+7J~75Yw#^-2H`4w5<10!t{wXp+KU*w?3FM%zOf%<-!B2* zEec%m*O%b3CLDkJsdM^;hl*`>NYLZ@QE)*l2c3lnF>bOE-9PjleqO5u%{gcB&3HGI z_<4*E%M>BMH@}2{+fHQrrD?2Ba*%shT?oN%3~1+}IM|rt#yCF?aQwh(ewB<9y_nz+ zZrmxRS=-TCaUq(zNgg7+df{zF5|{Sn9jqyEgjIExr2O`CRE$i;UusEMUv5IyI?e-f zEDl6Mec(vtS6oqJLl%Fxr3&3C5dI(I!W^%Hhh>&j?ZGQlHapI*mZ=8U9Ez(B%iwyE z_4qzXpW6CV;_{+iey_eB(Yld`%f1f_tZtq~b6$;DuV>l(S5dfFe>Eh;s*=lK5x-c3`4(46kuRAo+%Ectw_%+Q2M&i~4*M)^7xJ+C z&msQQl4O+rn#y`YlieFrq}+l1{Ea4r8}g@fxdN6)Rj&Asxy8!Oa=o0=l`L^;=EHPL?ytJZw!hlf|jB z9m^bF&gAp_+3%w_>zK=wr5!=qbkeC9w2#v#CyKObhgvooVG_SSsGZ;VOPd%>`U_Xo zL-4G_pfcz&n=9)-pU2Iv2P)>cg_Fgt}G6D@F8q9u)4JBq$*Y{PdN*P$_{ z5gJdKvzv$=`RnH^@c5F054OF4F_wtptFEFir%0OD+%C?Qm;ptaj45|%98UDu?s{yB zH65|Bp&5>MAn=VGSz`1bls|HyT?^B3&OTWpCL|-c*kVf(H%QSq~c>AEP zY7~GLyU&Rw^NVORF5jI7hr=7tbz}#=#()pL6syh_y7@Jf|=msV< zEaL<3&c^9Se&O7%liVWz@wk9>5cdW;(Mab`-ovX7jpOxb&o^n1-9_=_P(51zRU`_^ zwVcp2ircE{@RNlqoo^%#vlefGTUTb{{m)%8O zmZTP5XTZ_p9k*ts3GvLZuuK#DWJe#aaUo)L#&n9X zJaO1Eilo*ph2s$opXY)ioKZ@pgQsJ{hxS51?b0G7Ue{fcFh_NM8PNOo__J>1ywb{IXinglWOk z*PjRN7GEgxUe7+K9QDW9u&XnO&;75r_;I8@l?>y$p8iP#VN8Da%LTZ?` zqc&?Qcvt;E)8KNLlqd=7gbis{K^KHHAlDFB#ji8g#i6N@cw}ibuCq_U_KrqaeL4nK z_bAd^zM=fHe@<-vxq!m+WpJ)@AEXVwh2S5$;4#je=6XK_OMAxpIM5G2q|9m5C3*P! zEsEbTN1F2MBxuInOypQ6mBLnE*exMN-2#2k=Fke}&;N@5o;uLEw{OA3_7ym7lLqB{ z7>_tigVvbnV#%Y=SU2zm`8*DHPYOVvSr52?IV_76p+{F(-^E}leJD~kqE`eP;KYg` zPBGOL&T@sQ;y(^T&zjS_d>Py`c?4gV6rkyb?O+vf1a(giV$qgNkP3^y^z#=nu<mohKdY9_P3&VLy2O4&52iVpu;QO?MsqM};C|xm@*U*my zx8Dyi%(K$ z^IL;{8(swSuJ-dKN~=*~>r)60HlUv~-}8gZvr$>65j5{wpu&A|Y9-Ui?^)N52S)Eh zJ$XqY_|S>UY)`+Lq)Ln;tZ2!NU%1f!Egrp<$wjS^f?%m2!LRB#3>;p8+K-gUSH}b{ zbJPudJ5v-dZD0)F*T$UtR59{JNR&j?q-u7iprqHsT&mU4JRs#F2DqX z82--;85-Zw#Ok`&@I&hg-m}n%W;Vv4fvGBbeljN&FZV%x)K_-z_<>%`$3C34z#Od- z{HJ9>XB<3%_cw>Z>+DIG$T`t_Q;o=z3x?Fj?G3&^Wkqkdst}_!$I<-sF~LuV6jaU3 z6X+B=5VI8#e69Nsvf!;6o?~^kFP~MYLzN`A_QPOelAFq#W_P1*5#uK9*a64lY^dK_ zF`i9kdq?Gw#O~x?%nh5*%(26BG;C$eQ17)ct{n2vb`e7Nn1H)Q z3QpSokn6GnNEzc240V# zj;BuJqSUGAqPhuc&J^ObiQA#B;v^h58AWq-&Wb1AW?6_P>ipi?L~)Buy>nE96zzPD z;CQo_FPW`G%0vI*nEpa=(6FXU%L{O7^>5_Ozu+q^#^;>IVd0rM;L5$?9U^Rqu=q0w zNHvzd`G`Z!a^QCOE3Wy~BmQ5$5ego!;&Wn0k{1Uq)=1o8V@ekP z?82nqKd|m7^Y8hVL*!%`x^Q|poME+GZNJg9O*IGCH2#Ao@jbj_z7>qSCNg%20hLH} zA|)d{Ffm7)S9!D;42!?vk;Vu3V}cD?bao`UcrO%_buQuMPrb;c5dNIW5@#>tTCw*T z9*;bz<2JAyhl7hxqlcCko(=Nhr#q;UEoHJq>HJ{y99PDR0ye_VL-SD#kD!Ywg%2tA z3ML-=1s5h&;26O@xZm^`l52RV_1KLq3%+q7R)$1U_X!RUGzAZTZLV|IOEh$_BEtD? zaNTu2p1Zgmf@|M%SH2sQR~a4n;8hrugcYJpaxU~pxqw#oc09HEDhy@ovQ;KuzY6C+ zR7=vQS)0+-PnF8dm!LmIj!c*0!tD}0;%eUJuY5NL8GVuE)`sORc?YGuk{OQjivln3?*#s8$=o3ML zAsM$(lPrC>39bB@=9*=Z_AZk~*$W}?H-*(*ZFAv$q!F!&~E+P~%TO+Vz|qd^fKjvPXc{bF2mu>lF6_K;hEjB)Wq4-&L3>6{g2M0HLl{G8+p zM~w}LPkIqFEL@7}qyJ)hu`&H)Sj9PqD3aZha&&TvF17s{iU+D$4l@iQFOJFMH#a3L z9rFhdjJpexuW#ef1wJtMn-#IQOTrmNI&|>l)o_<>50*uy<22bu%nF}|BL+GIpX@E@ zV*W2I`)5w)r%4grOkJ`%OP(}lvi!@{T^O8l9!$GCA$;@;aFi8`H;yzQ$*cA0F&kCx zUc)D}P##TBWZ4jx%1@ltcTM6J&;ZxIJFwd0DZ!9s&X{|e$J^eHq;re|4Vhp_|77y8 z^S36;J?i48tLl*W&RIGtok!-i*S+7fi_FUcq*2cE9znkg2l9=YVG#Nbo62zv} zv-qr`;ouTqCkm=7t=>bPf{ZPkh5yteub5m;U_Cb*8(1Ry$8xX(T zYLMFZ2%bFugX{gBaQ78^;ytVsx<4Gm#7A*_Q9&7>XOqu*_?oo;t^t8K4m|9pz}U^_ zFuC|Rdah6)=C^qC-p#!8%qP8O)Nqi0WX(ZpDe-zMMz4?X<&S(%I}8^-fRQe@?QBWmcPO&-5Fh;JM`v2{iw1Z1^x z(mO55#8O+j;PZ31?^?(W8tMf_o9}`5Q5$rx%g4{h@4|x-S$LhdBMM_HptV{gsLX5T zv+}k8u)k{GAy{qh71 z$Z6pFew2bofD}>tt4eRGN)W>#-{7~S822%)^X(Pd=vSamhq-9e3yZut?OEEiXpAfk zPyT`nX3Hy7XmH;pq9r8$`&}?wT1G-bL4p1Czx}97Op#c-YyHMed%cExuHCbC_~_9t zlZ6s1{@;FfBn1fP8w3cSnfeO{h6D)RkN67@?hh31SQ03dkO&mo*aQf7+zJr->IDkr zM*9mjJ_iU@{R4z98v=!DQ~ZT-zXFBJAW-OX++S#S&R=M59w1bc4ipYE2oyd`4G>Ca z1`4eO0m2FA0))e z5I%Pa5RSg*FI=V)D7+#J5ROV_W3chsbOVH0+5SSchXKOZ?AYztdmNkng_ceJ!V7== zg+mGhgm2kA#=H#>emx!_9DFB0sPfxi_^NPfvbmP{3!N4P z3Jb>v2=`s}7cTkiFDzf`FZ}QNTmMnUOhSrXZtVB}{VTQ#;_O@n|9t_9s^!H@1YaB^UgR97TqgeVC9wGJ@@Zm+!~}u=sQcno za*cwEdnE)X<4=p9-H#K`oV`b^vUiE|ej+6pW85yGst#Jh{0IOk`Fh)3K>bFO}=Sj@jK5r_Rr z5Z4555hv}A5!Y{WFLtQmZ=J5t6Ai0hFH%|XL$qmWI6Ify#Pfg7p75t#TKwx?i+j=Vb@ElF zo%eT|jvHN)oV%YZkb1hdg(OxFU%SH(EVHIN(U;)m^~ z(o|tiC20SVqE8q*v3toJoD?jAF>@L~bMQ~d;~${wvrtURP)6qx3mOyZ1KG?+U^=0f z3wk6^w>+>SFXvg4?OVFIX@iytNY6`*OpJidz1v_c-_39R`U`Ds4~gUJB}rFeF&3N{ zjn=va7O%w5B{sr91nBtmW?Zgj{m~(XjBk@VD z47J@NMQ-0$ps1pWzJ3ufmhA)Vyu+acwY};FdY?OS(iv%f z`<_22<)Kb(E-I3*QL3bI+fm$$Akrdga!F*}+Wk^}RA?Zukq6UiZ@bEM>QrTFFa^en1^H-xL*QNn~ zNs?H;lcvu$-R0U8jmgiA)@0rk9a1u+ytp&pjB~#D2x_t%@wme#F25@cJj28B-Vg<9 zR&7r_J-tOSYVUyGbe(rnk)ahG5AcZRKivJ`7gtUHL7SZ|xCK@6S2Q(gkZBe#U2hDI zOSXVk;swz0FF}ncJ397w6%6tiMA{zT#|>#YxcAvn6c}jpmFnAgmm|+XDDMGr)PVj{ zoCb}5Dq*!{Hb%Sm!?qe3x_|6CZigEOUE_w}F;{&|9yAEY#QXr84Sj+~xB_wmL}+mG zCzc8Zvu0X1q6+s3adw<1YXTTqz8av+%hpm%y4 z{xS#!Us8@Uwr1i4u?kg~`VMa`mP5+4^&Rn*=s&9;ey{PvwT~4^wiW@o!yC{uQVV`( zDp1#Ny|7^JWO$ws1yQReg1_DXde4soI`;}6y)BCs;-i+;U`ann+)#0N6Z!EaS15>ya{`vyJ6-R_sME=d=j?{0(-gRJNTi+iZ>&Q#FJ zc&bHPKcPvfHNAczpWFJ82Zw9=*qXDSFA6l`uWDDI{G4={5fh5`s|~2NOgIYSB^Vn( zi~2{k!?wI1c>D7_>^~F=k9R(Urw^s*$_tgSQYL{Rbm7gL0r8&-t$OQk{r=hS% zk?Kr%3hp6Ef}VjiI6vq-bUc@&o4cYh%%B4vh5Mnt+Ia{&ItLrQZQ!B&IbKag1SG?b z+r&8GgYKKs$KHkT@^dM_y1g4E6eQ>xp&7M$Vu>ZWDr7;KHobnzm_%tw(ziozK%TcG z$%y44Cif&9Gxo)*Vr_UNDMCl)%O7^#jEvfF8%G`cgZd4+G%Q9L=9X7OLxDZ{zO@DU zsN1;Kw+B@oJ;4|=#>0M}F{EpiICt$s zHn(uDyz>RFo0pA4G!T>21l%>R$IORzop+bI10Cbz@Y=z*yy4%IaG>Z5o{0*B;^D*i zZ3p+@W?LgL+Eoen+}q%@V*x}CUM*fW?JGa~2-B=N4~J#1ogi7HNI`8XKW2^!-Bri> zvES}vw~rW34l01ryX(=dO_TO6{R)GBDAG}*CFmXX9#)H#ry&E?&@{CH?>rleU7i-i z+Uhb6%KC$~hI&*%M~w>0VsY<*?RbsxC?B*G;{EV!c%T{#m0cgO)bSadyC4HcTV=Sc zj)?-7vSJwYARa#YG{WlseoP-~L=?-n;7QY9!A`T&0K*x~yRZu`d$Zc)FaxT(%m)h; zRq0O^L*m^r5vun{Q{ns&*yy(!eOET&gu(HGC$)^hb+J(#X&=Q$9V)?oCrL6#!JL=} zXkbaLFBW+*hC=;av}N3bf$f@f_tPDqIm;P}{jFhAnI3uh@D^hs=fba1b0P1YJdr=4 zPETzt7T-)ug4gH2ai0w5;l7uv!6Ph|d;a>2;M_|Ua=rQwI2{>%<7Y zar#AE(dEjme%=Ilh**v5!Z3bLM4{v6NFq>E4<iN!RTE&LhyUkDX5l_hlgWA!SLEOyjBv!SOUz??f!&mqpHLbwr{}F+zz~w ze&ZDTI~c(n{;bg9+_UN`>SUXd#H^7pm-Wn+E30zDgj&$)u^;~In#JGvJb<0{He|WM zIEbBI%K24f!tZGYRCGCo+wM9ToJV9rYE>zwc`6YMdkD#W^RRS%Jt%rJ9pU6cSoLWk z_g-W{t=wipXN?h_x_gq>JEcr>J}}*@Y~BQq_|H%qGKjpYeUE-UL$NI8BTi{}&byX2 zL+xEjy2{*$YPjjaohP5*d!aOW-WUO0j>%}G-h%svtI#2FIryp|j%(9g3h{b5oSFA8 z3~kNAzdmhv=gAzfotlB=EMNQ%%L#EQlA@aw)#;^qTC~nM2cPc!hjOPpPe=AyONpyB(!i=AhoXDyZ|9v(mF82R$$GTLJ zcdio2kqrlnU}w-ZYX+^?N3iYpNzD9If#Y(+VDs*~AeKJ|cXJzYZ^l~KcEgIa3FYbQ zCkj-1`(#jDzZOP)48bDBJ7BZvCf_}#3(NlPhfTIa#OqI#^M4~Yi?!pwmEs z8V$`VQa#n&|bGXDGj5MRx&YLjvTL|o*TZspK>}ckehp;+WlI$$%hSR4` zf^gbrcx+)xe)L;`{vJ(oeMTB*RNuwP$e-X9HYkxjf?e31W=2Qn>eJ-!>iqcjEZpyV z42mb2U{!QIj+eR!>s`J1U$77+XwCw+MQ?F}(OJ%5`#Wx<@?*$eo5+8<{|}yzY=gra zlJP{vAX@gh{l(T z;LUxOGjUdz#(vI4&ngwN%S4&Kw>J}~@cA%l=UF^8Wf5*%cM#|OQXq}%FXPvI6>{Q7 zJ}@yQrM4+H#A}0%?da+>#q3x^I+aSYLhtQ#ZfLs{yg8Hq7Ncx zYufo>A)N01%8zwYAX^9f;w2MV`ee*uY?AbPjl#W?lLqmt>7fa-Rx%h zyJgCGg1kTp(qevv1DT_IA-?AC=CO72;d)d!s6f&dyK$OpYvG5|BeB8G6kKB`M`y;F zQk@hXqP(4XT_Rlg;`Rh|Y?mh6rPg9^`x=&YZ%H1>X>+&bH)4L&Y0ULyYmX17`H0bK zc;&esm)OR50Ck_mQI$*3Vy_9k^Lq=vh_|Od$5n#ExQ+bLyiqXI&I!ykWoUL_Da-0T z0?QZniX~W9?d>?0XSn}8Y%*=)xx5YBjPLriwpWTM#1??!{TKLbT@hv)x$wcPW_E1K z4$iyP0%MZw>Acsb;))Vu`mt#%7k@zo8YG8emi`&|`MwSlm5l@sU&w>nAlCm6+KDGq z8bmVd`!LL1lY|yZ)7LSjEWV1>`1|~@{Of>tib4>WXa7U}OUgu45)GezEP*3K9>VhI zSop_Zh15IyA!ExhL4?LM=iPs=Aa|}Co2%o|<;??B9Bl_i(sQ|=r~SFn<5KXR)&ra! z5ePlszr$9>I_x|m!?{JI;ek;0y#APkz29>{b@2k;-QyeHU<@%$lk@m9+K|X@XByp! z^EvH-5}bWu6f|U9!16nmRH3Sz8$HUHnvAc<>gAn$E!csx!V8GLtwEjquA??vdq^KT zhbr}ju;lel?!pEQm_F(n+^@>Q1$jqNWhI~GR{|3Y z+F*>P2*<`x!ik-E;JC-0vw5LIJnkTTuRVayI|p#al6ov;O!dfSNgVayH@Gd!Lm3Nc z8ky0{E&lrlyW@YL%uaRA6J)9RE?d4g>JqGS(uHAP^NGO_ zsqTrF-zbuxRTf03O7Q8M?NGzk9Io@`;EFRQblHVY+`s&ZfcW;nZMBC`Y=_XavI~-@ zn~<6f6LDOUH1%`b1}@8!a9WWev?>qhDO>4MQ5ScATE&erlb{WM zUP3VQ`QE&)MS8^6G|bYB3u19TI6F~GExlWmu$rc>J>aNJkCy48y9o3?^$@(q}?)*CCn=fUDu7o4cwiWaTO zxM#qcHW;OgZU26PWK$DApPPfpmXZAY#MR^O?_+ zx5WEH%&6G28OMH>B-M!vF(bf+)7b!q_t|u%WdBJw$dS=V>!+x}Ae7H3RX5J9QqHx`V5eeaC;&XvD|c7o%pi zGMThB7=0yD;BMV0Ty3@#Uab>C-S9Y=C8LGIV|~C;Bu(uVKjJRtPum#RFX;TT5~jqf z(QA>nK{@p-FO3U1xrfQ%Ccg>)i1a{p=rS1nQ4u{RT*T#Tt?8l>9dJ?eCx38vBOZv3 z;zzu!72Eq%z*l+3tMO@q^0h`pE+HG04i4}$zOjDWG^P{Q55aFuZy;QwSFow?Iyj`h z#32t%sO#nuJh)MrNQq7Py`OmTXg-2VTdD|KhX&!u(Bl|CQH2)Gv<9!E4RHE!9O!j5 z;#2ov)Jj!_J{87{(3uXJxyzx2<={2_oX3|K{1y3}jzv-4Ey3n}^8Cy3LEQCj6=FRn z8CI>dCeiPTanspg-b>kpgsRNL>7Lf4T`Pqj=Gq5>3&T)jT|KsXh4CJ7n_=p54RW$3 z7&DE{VB(Nc46U05<$ESTntK}mqM!mj48L$^S)XxzjUr89yoj!kTe&7HWjf3Lt{~>h zCDb`&L=;(i_L6NT{B`V8xx zzQ>u_&HRV>67J!1cL=;`PPQF3r=8mzsOM2r`qwlWK4fWrA$tUop_ zLxPmEby3~^KVZW2n=?A&K*oSEhhy@w`1Dt}#WmuBp>bzu@xOGdMIr z4}LE&VEKCk7%YGPbI3Mr5oc*#@uzf=xM)#@$6P&^6 zv8(t!4;F*Di7GuFAxkE?tI)n3i&_400WS1bBOi_o0^QnqxQ2Ng3miM)ZhQ|e___*8 zH@pEIjq^-re-nqi9EPr$55WD&63ElY;5MAUgHks=V5ik6=-gI?35uuTMEez-F);_^ z8BcDQVteu8U9aK8vR|C(Aypz_IT!sE;+Wr4gDPKY=A|D@fXicC;r($X@jFWk+-&z9 zHtUSxbuYgV9F7`^zrPw|=zs>fR$hrWp5EeP4?G3E8)^LZS#QCV1dCfoTkzeo9|TEE zpEbAU0#5PKArnxZwz15N*SQyP7RzP5)*?w!p&EMjs#8~UZBlbdiCD`daY@&!P$E5= zSFcOuXAl1e>mRv5ld_q(V2B}2w9uquo#e=YDVK1($_n^k$2is>KEqH~mc#h8OK^3z z7O~lI27KhsK#0>w?AUHYV~S4k3UNi;WQ9u*9QV5D^1T#(?4M&W`us{14YMO{ofafL zr4t7G&)|12$i#Dhy->>c2lPGCB<6J)kTU!?cFM?t?}2oQU*p0Lo)chD z?g%+r%@B6So<69RqC>1&IQP>-CHnIQ8o>J{4F{_k9^)I%gDDGJ6@^P`(OvA56&` zNdX)<^8);i%*U8Q9!J<%F+IUulzqC2Gb*~yPd8o)x>H?|Rhq@qD)r&ap&~jcpcSj^ zv27q4jRS))x=$I$rfI_j|1Pk8S_=v_w;_G00lBp3228efL*IRhv~R*0c#*0@bESj9 zO8F=s`6&{eud488l!N&-mG6N>*J9{(SsL$@3D16{!2-~~r zeg*F}*Wi1z4X?9%oO9bYJ3906BHYgMS?UiggrWmyxvi>w-2C^JWbMvHFz@eP+_^0u z7cN(%=Y3d~(vL1)`@9lqE;r@m@EyeM@4;crXJ_^9G$?Ovh2b{O;9^5R_FuOp0SRHS zVpkomJ^mak*m38KsN<)cH6We8`|;>#Rx?cPyt_ z`af96#?xfIn1&t8@yV#iFnBDh`DI!#*1>o*eZ2zmixaufMk%TF~2pU{;J4&0iFZ{~QC5kDCfXMMXHCvB#!%DGCyctT06`4nN5=o$YX4 zI($M8eEo6O`42O%1X5Ppa7SBeT(gH zbJ5L}h-NbGOT^m&EFXG6{JQlLI9|QMSjPhN2|0rKte+Bm|0HO&$2d!^6Jx?z1KP6g z0ROIAnXJjH$LR_-boU@xXfu?jPd`=i-@h>*w7e?l6nx|Fs=WuZY9H{LEx=Rn()m4y zThVT-8no+-foGZKglE3wI&V$#L`jzD*WDA@Yi8odsz3PpFY{NQr~D*)V}3!0CYhQe z!RGcBpUH>9@iG(eV&_g$(})}_x_}9h!T6$dK5oCFN2|1hd5_|FmiK;<%NoIa=qJlz zhn5{3!8{KOA|&ZXOo5F{J8*C(<95rAL@7I2GD!XrXj<;X=H`#$t>lnkoumiqXB)tG zeF@wZD}g1J;W*k=leP|afz?;#>7lw99EnVqlV1f_*}5Ql1M_>?S&_!uqxr12DzxH{ z4mJ8)D`<(3rQ3R?88h-N=XlzhmKMsAS+jnl=H-WY_Z#!E))aU7^lo5~%EzJfPY{>yqlhZZL#X@QV&!<{a`iqFHv(r{EQh@PwV-76e8Q-Jow&L26jXeVhSk0y+`B_E)Z&&IxpVeB zH|0tb>PQ>YldIC8_bTh7Y%?N_myU298;-+c8yy-K7Q%b_K1H1sYV#*UtT;Tz8Vl@~8NS>6=uI8#IGJgVNSqc5U@bBs;m|?F(hWBaHv5hPbtWbeA zr1yYpxeUoMPr#^0$KaV?5x&zhBe#~SQTJsA3 zXf)2hXhr=ZuEWm*p_mh4NM^4%#rn;q(9#|V)|Fb28Pv{OcGrPuy#^ii;~X6G(}IEd z8Q@~0PhUN^;jb$8i}$fwX@&h9+}3AIt}MQe}2Wf9cLHeo$nC}tD@4UtK>nDFhb8a)f8TuRPlMe7pP=-hEQ{b=e zR7`zQ&;4vk!=^P4VE7&>@;8Nw4mv8(1Gf46o$ABj`eZk*A&T_MaWyiMX;6!DoZ*GP zESz8S2jq4)prU*;H&^EjowkymimjAN*%`WKWK6J*@9Sm*gG9HGyrsCedyb$D`S(!N#p*(4w({sVBUq%zV-O;xddG){}gVXIu536tujJNhMc)%2-;~j zq+d^!bPuh8Qk#c3_>?tKHQ-^*;wVTOGJr#ii(p;TF!az{09zk+AW2IBhaaho7fztH zUyhdk^WrkK>`>O%f?K~)pHv^$NB?&VxLwkcgx;>g=I;Rl;gx?l@%b5CX!smsSl-7+ z^H*SYry6Xk!(sWGqxh!MkV_kFj{EziY4XmmST#-_PVB#mr>{D|;X^?%|HllJEIG%$ z`4)|9evE~673y@-^DuZ{IRcZEWyoG4K~vQiV(FS--ucXR_`PAH;A5NuEFEe?xv3{G zw$6h5Y+8%=ZLFyNR4o`*l7=^TDY%Pt&)r;O=QKefSP_Iw$=% zPCh>o>us%wS7apjS>z1;wJzMqW>vK9k)f|UoM2*D6nIURriEKnXvd~fm=+{MlTM67 zPZ*8j`oVZ}rzIAoqPLz$e}|MCS4}e4L|2;zX@1WAij9&VPYt;R$adVDLzGbD&3sq zuGgYBm7*~!(Uyizl_tAVJz!A%1C%BepkVhKE^IEx#f*<%%CdkKpZkCkpT6VN?f1nm zKbVtd$qZ*@#|kcF=RNo}y&BC5Lh-}IUtH?b7*u0f_9=r{59Ck}C`lXxKUUB6-1}aj zypCy-w?9Awy9Pem^fV85r-?MDYSGJ6u5sJ4orBjuN4gLNfQuC6f=HyBJg6&A}2*15-Cd zf#(Pd;__GqcCApS;a)6{g55(8N)Eu$<4&+zp%*UHeuVtto&02%U#Ne05MIw738M_f zg7ID_INR1{PWgN+a8Vltt%3^9B*}uNaCf+ZF?XW6fL&+n(8KgHRvX9>n_&)w zFPw!&jw$fNKM!0chJzBT*X_=j!=FjB!TFj`QS0M?K<;`DtX0t@8%4jl0@WBa`{xgK zjBg=!2*T!HCPaGIbiV24AmB#i!V!BD+W9RVB-1IsbYd^~ynM*LJ(3Pn=agZQdNn>W z8-u+UA7G!32{HW{j(VQs0QBx*ZL1blv=a+_k7tYh_gd5S7d}FqcQw3dHvkj&ZQS|l z!8k&KF##`s!wbMT5BH40Qe;hgOMdV>tJ}nJI#&38u{KE?u1;c>f5Y@n9}JqPNPm{K z@_Ly+AYxV^#`@gA$T_wo#aEFwU(LZn_hXpfngOAicJ$Ptbg2A#2XlI^@<|?NF=C|_ zT)UBi(fYk8!8{=<-SY6pYd409m>8alfIW$lKbAE%dwa=FP*r zTzZaJXtJKS*sI0gTJrx~d5I5rV@F^Vj*8KnljzCvV4~CDHCnLvtz|6Ko$O}{;a>WI#I$=R?SEloBWlOQN+Zu<( zUdK5(ro^VF8T!t!Z0+to?3}K|-~L<$#SNB_Bq(A0?~RaHZ$_tU_VBk|C}(qYKU^-b zrpKdN@$f~~gZRD`X0-3+UPQje@mGoD#TUgid3TS zH{V}wMyf}8A(wjsu9!K9C7sHU`n&?YMUCi{AW1_9Df2}xkr=nkf+(pmea~)loVM!{ z+mo0;f}$=NKi!@@bgjmqQSsu6BldWIusxCNujVExhjT~vIq>;T3-O+(0nMJVo?D>C zvM)8?Vx?;yzD&qKr(eCecdr$B?i$7oecXv%3HRV!lM0nsUJr&pK0(L&Smu-Pg=Li< zyzUoZ%%$6?diNNPvuuWE7i2N?ixJW24u$NrcG%iv57(tXJ6Auu2S4hQ;F>T8_MN){ zi7zigO8y@_Ufcv-s=6dTyA5Wm7=v<6K5*aUsLpN?WX+33s}4CjRqz*bQzGExlOLS6 zq9hqzavMcCQnZlOb;6pJNMDu#F_*1^udP-rchHdBGP0$S8(T3|-h}j&TTnBxBC3a4 z)98?8kTEh4ER5VhmHEl6?l)uQ7iDT?zJ+tom4`Y9FTTRK94|kvPYBVFQ76006lNdYZW76rud6x&_!E$Xn<*x@$ zuiD91k=fjhk%gG>&y46#4ZsI28e!656-&t07jmBj3cHv;oO@Yxa*@f-JkFhZZqFm;;w!0-QNYwg8$+E z&I+WjLiqTb*I}>1QP}5aKrBDDbNSc2IQLV`H(53d#(dh2CK56<@UkMc8qL_}lTY*Y z<&vaLU7F7M+0QG*a1hmY6KkK|6~9+EB|WNou-Nf4AJU~r_DGsz-rF{)(U5~HpDpOq z=y+6KSO{-JW{KSrHJIs$X8q8a{V66qkcH~@NsN;*AHQL&B(yP{Q~{ct-S8n zWN`;)Plp*w(bx9BdEy z1er^sP%XBd+fRC6Rfaa~&7Xkz{V(}Pl`@onxeu=~Ud^S=&Ek(%k#N?k9@{o<;yitB zq1dB?kGR7$Pp@!QCyq!6x@ae zV$EPFOx)cLvwTF@yQdw^c?47D{YU>$NYC1VGgwZM`_m`5tlkZ$_0?f=&vU*l%?T@w zO~~~L_L#17PY?_alq{D=|Hiw5x2w#F`*dk|>Hi)T*Zkt6t#bet=&+uPCG8$>K)nxG zQJdkJP}tN8%a*%=lep_#v@kv}eA= zK<2G7jj97>#VFQ8v!!m4M)U>aL>*9hjLEJ^xW79BiBl)P?Bj3vXMG=o`WV~#RSEb- zD`I%i7dZR26vNl+5T~+nf_JMn!|Fj7xSk*vY`bkvdLHXj>G(dp<~$NI=5%5FFb;w` zB(W{N5p0jBz}8{$g7+S&xMbx9ZtaN+Se^9<`tOf~yry@!R>hgs{hy-lQ8WBJDS}&g z;5=s|lm&i<6hT8PI_K&budd{FD!1%WC_skDBoO(l$ZUDp$xm zyBrF_rh^JlTKWW?!SRe1}Ir|_2h z00#P6G2ZNXu4||odGvn66Gd(re`d$3+nkA{tvE;T#ltYTJ18gPa z$lJ-8eE0)P8hw2oYBnmhlldIp^HKFkY8W<;e{90Mu z(^`Ui_SsS2ABXsZe!J0@F)xPJ&x4RBX?!!gpN0(|irvSIN%X8zJg=un$3A@piElHo zIzWhT+wE!gL1*l5UB?$2^+2xaJ(#}W1ZO@u1CE0wHSaft@_k~_=#$2@S3;g1*9d`G zd-A|-I0vz`=?^V)$?$+o3SRAT;QsT?s9{dsdLqd<@}=x zrXRgh0DdE+sXFs^Jr>8qjJtBw22Mf{fpgn=W?ao^~-r=^PonW=s z4L+DAC%7= zct`gZ{!vWFHOh)q@AV{9jI+Sg$#!VXG`rBz0R=PD;KF(lA7W!ic8`C6HylrkwWFs6u-rQE$UjMP(t9LO0~Jyy z$?71Zzd;-8wTiZAK>O!+*mF*c77Ehg^S&|Mm?lFyHA<1rEIZC08-EBgUyOp}+;rHa zlq(>ihqxCuEx7s-hn%`J3B4mrM#mt&tbf6HW}R62kg@1@n!zgf3;6afVwbKw8T7@1 z^6w_0(p4K6Gnw&+vgPTf8`|`<+d6*sofI&S-G+OP+S8UMCGwj~2iGbys`XcyhT5({ z;pEMDglRZmD({1sr_HE$WGTu;nNY11L%K*N2qF|%T}2Rp-rW@4gTmztfl3d-@(@Zfla=!nHVI z#!0TLb2s)$*J8cmU(o$(LJg0s;3q~JAZfhCPa85DyzZNGtLm?amWN!$D5dA%t|J9+ zK$(gjGM&+BbF$-i2Itpw8n}HAxd3HNa;CwOERmO?W}+Be$L>8@_p|U>l@ZlnWQl!y zm8tabOQ4%DR($l5I)O087|P{@q!vjTz?z4+I@h_p8a@kMgUwC zeh1UrbJ2FrJbrPk3AtCJOmzZV!Ay~9piWqj`d#U8An-0Kyj_Ojf3CpUxQWnlO_M}1 z#_stWkzm`M1j`l7X_aFFR$O@r2939IoRd9W*JeRqvwe`pz#s01br#-@w4|bEI%MsO zUK}lL$oqO+!+$>P-m|(HtIb*gf+LoN=Fz z3G)A7;k3W_HFYEIi~latYPJB|GoN6~ZwjJNEmC*K71tY_hfp^qdS6S5=1w!mm#bCj zIi@9iU#w4VhLqyI{yv=dbUr5bAAyZICwR{;Ng}kDrpEeBxX0O$*x3z$@{SQ)@QHM| zd`cacuCXP@S1XgURbk@P;)Cc_phC_`*6_O1-@;=>))P5Di!ZBh=dCMV!wApE@I06G zzU`}Ezln%@GjBTtWW@6qhwGBby)tBfTs!X2(4jN*^+;Bt5i7w~he*pg+2JZ|SLnmi zbF;*lv)2>Z9{GQ3);Qm*h4AXWX7M?dzm7RqhfzUL|k{lZBfTQj+ zZ_Bwb2%Ojq1IN8`jA2$HT<9hxI>R#!ePQzsVCk z*i(tN7hN&I?HTU0pNh^iMnZk|Ls-oEe`zEN3e%-%QH~xe%zB8=3mU+0jR;0cF@H_8 zKC#fe2T_x4C=GiF*XR(4A2y=$wP6_W%YyWO(If{0pYo*(%Aky8 z3rXFN!1|uIaO*wGZ_QJqw>MWnZ*&IQ%{L&~g_dMPrP3WKJiVk{J1h%#`;zN z-sqEU*&)IdZz+0jdK`CsWeYZcqulfj`?)F1MS8$gg{79*Z7JlDO1&; zPv#awWJWTICb9jZsR&go?STbpyWqj1>!5n047VBhU|E(q*;!p)(B_keTk{5SWRpHU z6sZlb8-uaHQH#9(^cgmlpT`5e_qop-yl`TG5z&d4p=QDH@N?M<-XSa;Wds%6*$1|u z``QeQ&-TC_k?{~C#`ZJAu~^~$21+9Kw+fP@@WVp?%Jba+Cg~f z@fufEnA62MKVg9JOXE$H(C(!&T_&D{A&WOdcbOCW((BR>PU3Wh3Wqv+e$e|+lkA#i zLSiEu@mH<_UE25x5894`qU~S7ar5uGO_&sth zT!E`9DQGbz2V2wd$;nrkCZ7OzFcH`5-o&h_(x7RV!A;MpMTsdDXno}()Wv^-cfGZO zQ6?hf=hWMf8qtV5*gZk>u{e=BDNSY`%!1yl`lMFB8|Noi0F>3>=12cjVc5VP$!g%e%kf$n|-#x75A*^qs=|AQ0M+s8PJW92XJS+;Y>%D7@T)R3ewscdau3knVkI{- z?3r+}=@s}b!8lfXo}-4m6GUF!gaap}h{4#Rf@vm~VAAO`81<GGd9@$2Pw zlo%|)3oEAJhP~zd&GYWu^{b=qD2ecXVIFgtE=}BCrAmHro&hom=p3`4YSzrwId73OTJwRVe2h zj?x;Z_@nJXxI5)OK3`M>-xVL>*krc0 z&42NiH=q3*E0x*vX7D0x2-<@WPMO0CQ41O__K>^Xe;-Qr)$!3+U!&i8GnknXg>C*u zq@bh-%ID1GW*m#*UPi6tJ2N!#+JOOVl*{CwTZe+O%vY#670aL7{2fnT?1R!ciu+d! zAf7VMt)&oNm#FZ!y#LiEwcyUru0{C!$anSc@T-ioF-A4eFop`xr;I_&4R8uqj6uwBHV7mG6xHvz>a6D zA$_+B*^_dUkEphRM6Z$XlX1=_MVsM3y_>_6I|=YJ8n`3A5%_o1UldFq&u8bA1A%rd zolyeQH~s?cQC4_q@I7b##*V=) zksG*UtD@kFNg$l4`2j|Z0e0tjBj*+@2Rn`%(MJbIaNf8JttW{Q4b}nSLJbAsbytN( z6&mOsEk-@_OzAwSD()}a2jlHRyy|U2v$bT2_E0O%%D4>qj8UdlqDJ}bu~4?644s4G zxX=&P+~6kz^0_=Q$lM07eCp=uT7N*b5 z!UN+9;p1%&R4x#Ouf>%(X%FianLHNUx|IhaA4I8Ztu(1S_ZOADX2Xfqtspmf46HUN z#8Z=Q;d9OT`~^X`pe5uj_B34J&E*?~vr1H{_pmEIEO-P>JxzFKi6;b3Ga}|;*}^@^ zqWp>8-B_1278@d;qy39bIN?qkYCS-#STv3KWO^YvzEH4t{Y`FY&l}uPU&kL>a~{IA z=Lzn|WI|1x1+6en!}vdQ1zICB;N*cSs9Y_S3d~)A5Rq$UWXj3bOx!S*C^vW z4CNPB&_&vEfS12`E9@7af-97A@j+V;etq)}LR^ht*~bhVKKBvVoO6dq?vnI}NH#ymy3|41SDb8a z$%2n^A+W~rCpI5=1{2N3!K8l~0*6^xcw-yEz{0FCD^#EKA|>Uq16pdqd20 zO?rc|xXx^M;g%#A&~2|Y_}gDuK2c-})THYYo#8$%;7=K-y$y4iSYHBJno;0%*bS8g zIhZYDNoF({kwi63a;3%$yKY(%Iqe#FUaCoKeeVg^Uao?54!Jl>r3#$Ssp0f=4Ki$D zg)t#}_|2?0sV*^u_jYQL9GwVokvuQFr)ofg+3bA0=4hPi83K2|oCL7|mLD;dgCA)l zgbv+Dr|x6^wlqfr#wl~auw{-oBz75|I_IDraHutCK>Lf-$gY$^@V5TQ81Kxr^Hz;~ zVtoLS)2ift?K4zyH={GZ8DLLKE_gMo)A3c&pxdENj_*w7Q!EG3Bt#BYX&1qm^D4Ce zk2)!wWXCanDVR-Fgg2aqabmSCZf8E>a~*GC>gcPS@vkUYsbxv@Zx-SeiBC{3 z^-qv^X#j3pvGx?3?WygOAA%5cqYSmHwV>Cn zy6|oDF1)rQ3^F(^I-2bdy?$5|WxLn7Z*d;94Lsz3zBUBTVk&rSUktM@Fwc%b1}bV* zVdXz1a${8&n@}@hq#-g%9VdQ>T1oqBi&iE-hqzs8vC5 zx#m0X#Li^%FEn7LZwKuBCQ0H)g+czYL4M!heh|cH(~kCKp!Tr=I!`BKfNvLwOl!qN zzl;3QgsI%3#W&EtOIlbY^%d9LEJ3B~7Sv>B3UJ@=VMh6RxR`YfZlB2m`r;%gOYh{Q z*iN)sK@H~ACt}n7%e>o52@5l3d zRjAG26<8x2L8b~UaZ}$i)b~l^740R-HN#t&;rs@pPej4}kh@5i+<+4yn$-K6Kfh<| z2>M048a)@9k~{HiHgMU1G@rQ%S0jp$-(fEB35kW;RqTBmTf`|E4RU{epTXSLN-S>+ zLSaD$Zgb6rXE~jmm|Hol4$~!qhf;K|f-#LsI>$wM2B3RK1H8ctm^r5yGmp>2poT2a zDh|S?`fPA7-^D$twh%N*n^2E;`(QK68ywSIiaB{f@M^a)wH(Uj=8TiUyLm%+@@u6a zbCC`8YupE;Qf-OP<7=FV9)-_;b*Zu6KA4o52~y1_+_a7H_<4jPi7GPWwn=2;8-r)i zDRUjf*qqCmWgMP#U&e(cLGaZ>9NLb^Po7wn7Q<&hhG*qU_KRo3bnA487^M8Qfh`az-iXt$=@OWuh1Kb2A?sQ! zIuA&(UdmIPsdEM^#wn2#tm{x+`wM=Dn^Bz=Joh#y4v)K75hb}qu6N})zNweOha;y2 z`88Prv86ml7&9kOVij&%`-7|Nn+^-oHRykH&I*^s_OgCs6)aq-OaB{jnqL|#Ltp0I z#gnrX_`Gr7!JT!0%6xX<_Pb$xW4snUohQI>pM9|E{z*JM`5BIi_yVNunLUG>P4U7mSHg;>bnPy z$zBJw73x%D-b-AvVhOz2_J%u=Gqs?{B9%D_+3<&_>)ICj*aldJ6oHTt@GiQE1C%;?tjpz%grcJjhrNKHZbS zV23iDw&5HmP9MgfoCP=Tsx&#Ye=>@VH-VdsQGav^MJ+KQHjh1t!*2%hXT%7)aY7?Z z&2Prp+fU;GR|)!c=5&6HH~aoCxrfiX;xJ=LJ)|Da#+Kdy&Ng5wOf-*y@hnd~DNqu& z`diUe>6JpsWZy2dN+5Jx8iAdMq^Odb1QFUldX z+kcW<8YPc!7irT<_pzLv<_q|AI0~asl>9z#{1Y_bSMjh7O9a_;RB47 zdl)*ecXB`6--COeHa-%FQ<2Pf=u#z1Jng%n>c8taI75l>HQ5kuw*<)SwQ$@17i@Iu z;3nzB@i80b!C&V;*pU4QHZ045@S#jWkU=vvTy_+y9sP#dW6Ojae`KM&X&3H%Xhd(k z^u@Pbf8cYr7%^qD5t)_Rw3Y2KUwJdf!h~0#md&20TMDu5hzhy7ltPh<1sz+Rh61-q z(6QzQ#{4=19xXi({p~E?*w@5uI2#K3pWm^et~BYlVEI8cWuhr*!1bMb4YoU@am+ed zGU2c?=b7{utb6OZHPI_z=puzQqf#3n1MAN3WFwX!*h z>{7U6^#(e0mB~<24u5lkDxYJVh2xykc+HnZaPHw-c6UF>>s=IwfqY3aV{IGX`ML~5 zn;wGMqMNwJOrG4cT@1B#ogfz70KF5xz@#JR@fiCIQw55o$3>Dvd`V!rI%9A+(1(}2 z-@=p4`sCR6bhx?jHy#K+3wEsAamA^XU*7%~mYgx8%YHrOs8t(u9~$I!8B5swmknx5 zi4n6KF=!CGY%0AN%-h~5XO4~{d>Nt0ol)L~A(6k}PW656AIq;#?)(5wQ{KQx%WgD( z^9EX>0(_17AmphTZHeE-E3`CYW^fQ=c;(^!TgFuT;%pRIcv2wpZ#0beS;=XHoEI!z znT$8MA}DPuhNxg8;-7m7cU)1y(;bRL!oY}5a^Hh_b^4@blp#9Z%|XfkWT?>lGR|Lq z63$$V1{M7#4Mx2*A!}UH`2$~> zZT0jIeCn%7Qjbuawju}y2&5*`iy1^;ff8Y%wOfc)@Gd{5RHgpcG;zF3; zU4-31#y8oJPeGm_zVoos&_jmF!I$?LJlU~9`x!I_pi z!S9?ekY5`Elg%|rOME+bPDX>Ce3OQeiuy3-#CGi5`h-t680NW?HrQ7=AN+P|kep9O zm@lEj4ZqkAZCew#42c+SD45OWy>q~~s109Ru7KNPR)PVi#}%Ju?&$|l@%+dj7?X1r zYKcDA-7*eKJ(DoDLYY{_Qp7eum3?}2q}>|A00LX?H1?RHobd$v! zP?Ix)7*TODP33l&^jhC=wq$Cx+r%w11ClH(Gj_x1QV8vWfx|ikhoF5E;qGLH0 zvkGP0s0;94=}36q9gXW=Jp@bXB^dZXiY)k20Lzu9qJMfU7Jf6PiFd`w(L>2_*t!aT z?$#vE@2xRfWk`6@B1aGsUhXi*{uP*Ju46lIX}YB1AS`(~gs$(SKy6qX+kdIileW9K zz?M_6cF2-0v)c2bq;|A0()udm|WytbgCGs^|n{MGw@t4x`!QnzC-&d+ak9g0- zZGo0#%BT=1m_|`)pco8RUW2iHKLxcL3en@AHLM?&1Yka{i+@Dv^wudLI)QPl-$bC4 zpE#<_K8;(;*v`06i|TdN;a#gv^tn5dbr)2K=d}TFe(%jqpCC@(t?k6Q?}q1Jcd*X(O*XqKl&vUC#2&60*&F$J`eXI_(eM?_Cb_eK|OMY(4CEIR`0$ zx}^H`4g9xz6vUji!u7M`;ok~Bu$HOk)_QfKh~)vHntm*V*8b+iE^Xw`7uMjtz&!3@ zwhDdh`U&NK3<`_t@=@C7G4hQQ;I!Epn6CK<+@59gafRox+uuje;zQVdF<^E8-^PK|>I6{^6J5=d!MeT74 zF!$O>_@d{}x6iQ=j4-XoKlfs}=B+0D)z1NNPHY&v7EJ-gGGm(gy%n{57J$RrI`F*Y1%iF+h z5rx1J524_TG+ksK20KQpb4GF^V7)9Cb|2}+XZn6HRL$I@pK?)mI%DB)(1u$JpFyxn z5jZxm9k@a_q-$!^=rT)^s^ZGsP?09v|Fxms+nc=oWD^vI8i7*#dki1t2H)C;QF+5B zROiINygLSO+iR0TQUY^-t_InpL=gFT35%Y1VL^2#%%84Fo~w1>shhuGqk<6(s0Eov`hztxR?oI(32Tzu>wuF2CQT*q}`hC`OadP~Mg z+$2dCA9Tb7g>wF~q1-NVc5u~11FfI5GC>okV*tgv{;}GMq$pmr*%BSI7 zaU!&oThQ0nFXL=Qbu#VN4%p$(_+xpKVcynCm|V`>gdtt%O-Ez!wmx)bnUBGQ&G?q} zW%qph1a~Ww;B}W89lr_rSg_`TuHED({JsvWzW`=gEafI|mc%KItI@^Z4dw~L1()ml zV4Y){aJJ7S7^`p!CyjK%t4-;A)gCE!m$89@`s?t|O_u2O{p36*FUI`Aja=U$mSevk z%Ue2#(P>F59ezCdfb&&kggGt6_~geRH#gYy6 z-*gcQKQ`d^e`{NK+ ztO?~LKAr=c_!P7gvVNn{F8x~3a(K~r{_e+iSgl=-{Hps9!}3+;$A5zBc2$z9%ba7`=IGq_8V>iQ!w~BiB{rQw zSFVA-#QM+IKUd(xLT&mrvI39gp63d}?qOeo812qFj>@WmP||Kd!kFLJ>_;PQiMCKQ@29$luzn zNT%Fb%t#&jMC0%hkU07bQp>uz?TqV{#={& zAk6=D9`aI>_+Zg&{I$6ru1aYU#pO}_omX8#4-hgfKF?7?j#!Xl6z@FwDL5_4b79IPG+xxZ2hQxnJ%p;*x zGal=*{=!wACJbiW_g4aQ5|u7Ww}f4RO6?19(6b09oRB5<(W-QcYaQ%VnTBcUxlk&n zNw!8jg*Nkzyw9w!xbEpqP>Bh~!(Hug#A_vVWIe@W-}_)b%L)!MLdvchm;b+C$=fec zAW<`hkJng)&6*XsOe_xPizyNJulZbAkt%)k@h(6O4&?{Z=?UvmP4ROl(T!HrB6U_x4}xP7++gFHbuP@~`@F z(8vvqg{8dS1D2=giWS;5S3vZGK}^x#iM6MKU|NzhGEaUkk-Guki@;m7jYsk7e_<$W*Nxe2n6DF2#Ys;Rbbbd$ucoWR)sa z8aRo&rR_kP<>RKs8_`oYl*ye09ojHT8<+YR(AlCU#B}Z;Htf?Tec;`jZG#`&Us-`;yL72%tvNX9g}}a{ z+j!BqlX>NyLGk^+@Hq7|-`&R8PBs6KGrf-KowcARuS6DI_7yId!h?->xbVwf1ei z_V=dzQk!gU@yZx%-?b56ESZMqOswdX^>Q@%{2ZLDcmSHxZV7f9G{I4A6B4%}6;0f9 zsnO;X+~QOL)gH-^6Lt=@J0`*2G#PrSybEWYFrzjg1QkzBQubPh+_BigkJ_gRtM#^F z^s^jXQ>#NduAhQug>U)hF(U<{Up2^oF0SC%UXRjs+adg76V6t(M7$D)Z>(RT?}t8| zvSAo5L`%~nNA!u~91zx9T0?0S>zi&DCEnvrsKOIlPJAB4h)N3UN7kU}``swLyA)n% zzrgMOC(y+70-m4rNZ7II80OgZqv@DLZmi%cpZY}tf9;k*>4^l?E!p?|+Hq8^zS6s>}JzZDo$SOo1P zwdqFLvp8@a!QO?>g+YCK^!#_mjXtFT%g&zR$IVnErNTSHX$SQ&Ve%&^XfmMH`}X3F_$*i^ zdyV&R7NhIfK4SDW#uYb{Azyx8$M(dz#?6)@e|wAgE&FA`rv3>WeSU~HXZdA* zwk$ulE{?INUC>}|B+FQslDe-dprXM=}P4RfL$fSqFDIKf|s zgh)!G^oa9tEc*-IHdlu`oiXU4m&5Yd8ifASBF5`R!Sy?r`ERpkVq%^MIoY)k9?2%- zLVYW${COlcZdeD^wR-r{`W#o@$vRy+zc75+U96aDMPo`zP_zFarYp$P=Z}Wq)B{ca zwe@i>wq1rQebb}nbjG=8aA#g>FYi@&an^B&ta$)eMRsrt zmYdMvA?Ewa7bBjF_OUy9Fg&ufgU78u`M@PMZ02D`-28Ol?mH>k-SGy-ohe1_MLOjE zs%EU|@4_vCMl@%tDXGtj#NU@*V2vT$UxllXUukbK&t0E(O$dXO8M53_@kxAyrYD&E zW_yCv9T;@lk_*{$h5xwlHi$&WV?djL+pV-5-j%n4(co1~Xgb5qeq%^C>aRgH9X*=6 zs{wqoBhk&E1EufAOeU2;-}v!=sab5uAg!mWU98`JyzniFE}dEKun%3(#L~LBE;@ zSX8%(?aB)|#eL@V;f&q9j7|ca5Wm8uZ&abyKb*iIRufa|BuTt^lkoZH2p40vxw|Gq^qG;oh=7LBSql z`fr~D%zhdUt~#4huw)xno>YRwyR6qGA_g1tRLPO(4ldBh7p^Inqe+V_37?P)OJ85c z$hBJ7(5X)~w%vf+Awt&2HKr3ouR+*PEn09m4j$EoVEQc+5`CcvU0Ghwfw6JAD%RuW zZ!7r~(Q5EhX#lP?JjeU(e!QxG3WVRaq55OP`3v*J>4w}l*xPUgANQYwgZjyMgv~N8 z^?t`OCifuG{0-_es?@q2cEE2h#u86iFy1nW%hk}L2ag!jMmGr})fNc<3}s2S1AEVS z=kw2!^#v*HjF4>5jgKcvL80Sgycv0!o6{Nv+|_ExpECjX4y(}>D+UCi8&Y62Cq`YD zzku75tA#5^rK6cjAnOE5l3-5(7yrYO%y%=Ox#GU~>EvZz=XBeHA7rLnMDbQNs=#&>nI)b4o)NA%yQ&9f`bv@amsNBC&T@=43_A;sm}0Vpr->wZuv=5DNkBI%Cuh%!5Bs5=15SIZmrg%X4GZuj(JJV$vQ;$Gdx}dQLHH!;iqbmDcdqep}3nYou zBvrbn^aIy=GT(cuXzb(OG}fj(|Od*VxN7-3S@(%J|uEXHE`AusrzO%-t}fRhAxI9mDzZR-hTs028cq=|B5w zoHnx*L(BSM|1Mn;GUW@bAAbW*TV(NMzy;Eif8!rx3ECe$z&|V zhjnJel-_`1i5Dn1T?1N*=lG#NpKyujacKXoMvd$1U}*Yeyt<9`suL^mKbD_$-mOR# z>PF(mb6>gj8a@E+b20G|I}fOc6iBHjf|jHb$t={TLm!ms-gBP>6Z#|wtWLlO>v4wCnFi#MxY~(QU?w&?dLHzQe!$FChHAD%(G*((HrU zkoezG;d;g2(6_V&TA0`KT!A_Ksh$RtKSe>V^cddLPlMK`)}nNNA3l4Y3YNQfV#=o? z^jM>V<4eV;U|ANJ+EfBbFeN)IQebPCI9)sA3ykwH#4qt?ggf;DCP~!5$A|W4%s9Zm z8CvAwE`-HfHo*eh-Ix?$NyNjXxVW%ti0@P-gUbxK!v}iNsmGd(d8bM**K3j|rN-o$ zQ4>bot%ui)85kTm4%fDNNLk;F*}j{Ez8_aXO2!&o`(GS>Iq?x&BUJqxZQx2^!S`+Q);M~t)FfOka}iHX;>YOS z1jCsNVchyG?y+J7?mrX<*s>TjW~r0a^Zocb=H_0$N1oEfgILJ8={HV%=JkwZNM__C z+-lCguL9&OqJ>BX%ckeX=f-u=B*vT3_OfIX0fm;ED|Mqk3pBWG|zd~a-T-Y5|huF zeC`@|e)>vXXl!cXwJFgKw4>mB?;+0cxx`Jn76hxdcH?`uuQ0!|3FaKW z#;N9ghn#2Yc{#78ym@6L@Bif+*qz)9hf$Soxi|&By_R5SZGEcJ^aTQ|)v1@)eZ1x@ zL6&&<39K(6XpDc(-&xD!-1VkJOroA^n06DC(7F! z(v&@-AQzm36VBBLYZn{R?t{!>ls=LFvg3+_c#s15Fs=%GlVs>p`v#CZt3=%L{=w+= z{dinMn|iZ;afp`>Mjj<-z}b?8pT%jxQ(f+YX9kY3^hASWwRr7d3a*`T9d>`#rDk46 z^hud3sCG)zNoRNS=fX^3$y8aYQeV!_%BpZLjPas3Z^rrK7(2((kXVKfB0E%Ya^s|E zhw~hq9p%VHja7yF-K$|Om4?DJ4Pijjz^EX=?IKjCeVR2tyWD@>8mLGiJ^CGV ztpvE4U`P)c4svuDK>BtTJpDKs=6u~0B9PmBvb zVcG>1VtPS>#>zk9$`+{zB91t651*WWjMDec zL+H*UaF{)p3#5%nD!VtwHkpx^^4U1ezK$O&U_9s~E2`^mC*1PHn!2(+!H|-T5ZQNa z-K_@P?IOgQv3Kz1Xg2dR$i*2|KKQ6b6V_~zfnV`95Y6TrDGA#AX(t<~o@apexS_y@tp6klMtiz65<|f5UZ8rxS*@LH2TvC91874^&jj!UuQ^e zwoQW13)P6--@CZ6`<8H3o*7xYTc7rBILrO*(}b)^CAc%U9j$_1^LCEf#Ct_Nm*f!# zS9i+M7UpSx;m@4X89sdIx3jDZP|PdrRi&Z@H=!=)G48Sn0oO+&kP_g}1>b7J|1KI) znAr;_T{B>oa~jr#f98UYt5K2tPlV3;`!TI*EhMdY4r8|bLg%)4;h$brx^nSphH8@%hRK{b~$ z@3r4|zCP#{_aI+`e14mVwzIW3=XHD0-8Kc%NEvt?u7mRyk~HBkwDJ;MFDuH+m+HeWvns4NYk@r;OW52Zx#qS!5456^Q2yt`48^KjGz-X z{Ne63OH%D#dEz!xn?`tc^4T#xAo0zN?DhG;cWyGKhVvc3R?8b_`iH?*8HDbvXf(H! z!%4eYo+e>0(tX9eOqe1)#yloHvLE>dhhC7I{gaE4OobgYmh-dUZ03&7ILzJnw~Tu* zClzOfsbT4OE8bVmjDGRH0~TrCP#Sg`+vb=NakgXX`_KjQ?76@2z6V_Bwj?GlUSOl^ z5475p*Ujxjl*4UbCjsx2z6y1`#A#Aok+4@NPkXb} zh)^OLANqgfL}OlJvf>|%&on{D5%OfS;{*KVzmOB##duo^Rj~JIEq7-0FZer=@q(u+ z31mE41$*dtc)TG7Y7f_Pqh}ryZU{7?`&MP)MgyCjul{JMn5C!D#@C4O?pG>i(IfSVf^!Pe!zaOWd?2TfWCyAPg#G08G~ zS3nTnu5pKNXUF3P?POT=pAOwsISfy26~I7Fm78IN^6bhByUW{ zs*HQ!Hr@%F#$M+FL4eZo25g?&B~)Lr88Q~A&^CggDmjwhlrKk~{LbMonJ$IYHrDfJ zuDR{q$=Fvq8YV~`$KRonoSs5Cba$%2Qis`G*#~P9DPc%mhh%8hkOjV)q{$pp5;S-Q za$COd!kev5==$n2UWwHp=GCgyB>EyetpChOy(+=9j@h_uxiwuOeHb4(D&q#pnK(tR z1CP5$!of&mx>ih&i+n1Azw)zCZSPYMy<&uq#Xq9!MK-To$YzyoRq(mZ7>1l=XyYp# zN|P8VxH;z3+mvJxo_ZJjs zNXAM2X0sBR&{oVH^OwWTiUkmQQiH~3%!MM&Zfr2^vbR*xA>{{0;YyQuzT4ISI`sXa zB4157>;7X3*_TblwYTdGJVZPg{CB65)5{sF4kY&n-c#QC$V(X~{YtcdyyF9Y)-uRw_^ z9@C@3)GV-d&%u{Zck|9H2eGnw3)B@T(~=8GV6tKszweMV&HYc0mhP0Meog)GNUi{0 z?0E;rN~hto6@k3N_iS{l(x&gsMagshP#lxg%TE-|!%^w)u+ee|ZiZjRO6RvAasE5= zuWf)AQgIG58%5!J*9$z@`;ePj_Jv>dTN8&|P76qM6FN-^MCqB!L4M#Yw$9X`yEQ*? zmFXG$_L~;8uJ|`#LK~*3Edj?Feeg261~V32=N7)PBs(6NbAk`mkjS6sl+TEf2e;#J z%1}QD>bsD{zJ~Ssn{eUJN>B}wC#^>(3cEao_|8F^dVO^Rhq*>1=tmmHpel~*{ZcTgstsqF9>nd# z0a&QK41A6^@j2@+jM(&&YwA#>Gadw>#;SOzeE0)&4m}Zeep`t96_#*^Efff+ zm&^y&j>BPP4I1PBnfvo21z#KwgW%F08Z@wi#K_DZ8tnp<|H0^?-w2Ms0A% zUW{f8u`}j2bxc-j1EanB;m)cI&SuaJ)?apnwrz}=ffh9XvK;l~t%=F~C_s^i*nVG) zIz9CU|7Qz%=VL5GH+vaMux{=ND}CBlm5BbK%=hLS%1KFR(gz!I@bb76-Z51d=6&pk zm0S-V8u=K$$aup-p*0?V)X5#JmEdk}y@vmU2hrku7TXiLaP!uh6V1&UL}?-iKPQ*t zwMZ@W66r#P@hppKybs&oi9xyFcaS?~hBB*5(AD$0z=ivXC*HpfsB)J1sk0grY_5Fwi*tPXsQje>j(oTLJgW5$9-cOkqS zwG0D&89Dg7D(POGjuQ^IpweR#SfXJ8b2^mC=8!}T&VGxkjq=p^=w#v6?j-ylN9P@u zB0sK}ZOum8&PWd}vc^*GjbKcx!Id&QxzJ4cZ4y8&(WV?fY$o3lGBPMTz- z=mPszJlZk^cdX9DSx<+d?%!O;0|}>5XM+ud%=*XOuG66NS4_tAQKrOOG15H4+S( z1II7D;A5>{c%|hre5sNnxxfOdmyM#nKLy};_ zf(@+aESDd}^*j~9;HY%Y>1UzSk!^!GW?KdLOLu~md<-A=OBzzuWyqa1g{ZjU366gj z2cjpRG~-3lH8j53FB`Ylg!)81d$4I z7}NL`U#i}Q8!o?4?(Ro!Z+bLylSIPQ8Qrk=a4Uw$C!=^`8>)XbA_BoNh%)nqngU1u z9;Zs*pXh|qTOv5#{t(Raascu6cz9nkg!fMQqsuutBF@~{aaEqYT8Sm;D-i>&MUP?4 zh&bJ%r%kJOijj4UW0_LC8fr^K;cLib)N34o1dm_%ddfqHm*V)W-1F$lhhX*a2FR>Y zr0w%msMT8wq8*e4?}sI+$kH4q^{a+-nu8@j|3oIgHmnGO<{81#89RB+$~f*%NFeMD z*nW>npY&u?tZ*Cfq(+jrZlB7UrX4!4uy6bqUa)k5v z+yX&8##F9QnZA;Li0>A7f^wh}_lAD}?4}R=S;k}EUxJxKzddO9ltoH0aGSVfm<@6%o*GY zWA}J-o5TLW%`^#OHE%!j(F{QFta?1=+KuCl49LQ@!o69iNSw)aH9Vzo{_DC z(!llnt%JL8|Hh9)M5<#)ffh##H%Mbdh2m zlx$DTm#JZX?G>P4lK~a(a`ZwxK#-G2 zYAx4&J`X>Ap9zZ$yWxI&A6jRQ2N?r5Y}a{;PY0L4se$wOXCw*a)p~eUZ+1%p1#J3V9nR|ZYM7Lb039PSWn8rM$-MOF^a2wa3NyLCH z#ze5r3?JC5qJ$lDVtf3BgO)a^o$&yV-)w_jn+v(KHz$Hs=UOPaB~2~GU*OoHt6b$c z0~(g82SsEpe`jMdn(xc(TNEl?mXm8U_tzZMdkB}vfuMck_$)%>>1 zJ{U}6&gdF#@{w^5+pNX7f~a;>xY%7J?3Mv;%4)=bD~F(CvM8scOt)pH!@?pm49^nCN3;O205Lub7M`E|j(9hQkaeiI|{IXz9 zQIlbO(R&Mm@Cb@Me+EGziZDrg02`)Mu^#b1{`M7fa`nHf;IJ!;?`BSN;|r#cDzttgj+Y<-a_GZB|7Xmm`YB?-?V<#-BT=Vg$KTA24kG z5DxFsBbz2Lp3eg>#>O?l$Z8|9tlf}iC~L#*mJ;;QXYSx7pCH+MF8IAT!|sVXB#U*< zX0?eE!;^~i#gE%~NnM0cuMc=8=nHl;m(+{fdi3aQVaC8agDq9J@xUi_s<&Q=OtF0d z`FgUXBPa*XG}z$m4aqQXt~Ja&{t{X=kASk3H7<|+#&W4*)RXxKzuyv~uZbBkdU+0V z|2jZ*Qxg`K;kIV4i>jc zD-(qo`2y+cd3dp1nMy8g6C5w5T(n*w_cQ4a+g-oJ+1I|~jDmT%)Z7hs%um5i)mR*N zFa~Q2nqaN^Y;3KJz(1=qVNcySjPUpX(i(rj;#VBRD$9|o-WJ}wM~bL{UWf(!Gg%x>x0*k5yw>zgYoi# zi+ud323$HrhF;(1!rfV`Nl(^?p>l2R8zs#usu)PXd@bQOcA{E z)S1^*6g7{paLUu1kC)px5URL~H*<$zy3JZXO2-D8Ee!BrdnBCei{ZyBh=4=l5W6QV z|?3R{FuFA2K9IdjUMOG^2Az4{$m5u3X3;8G`s1&7)M|$CLe# z^gRGnKfK^atChf)Lc*WCycNdErobiV&wPTrA?Y8GCpH1?*zipnM~pS;!rz~;eaHhN z$Gkyj>mS_nR%K{5`3Tu&j{N4}29S(occe+TdCz`3l>gDd%dBLsw&T9gE?*9PcO|Lx zT0iVg)S+9M3qZI+hfL@$fX2=OI89xuW#(>A4`Rx)NtJ{)M>9)lR)tIwVFuh`XyM zOuc1Y@ZHNP++p8XxRG%Kmxi9iQ11~OqxK38KZrq#CHLX=Iv%xpYC*8+A37I=@>9Y# z!TF?KxaT?z*0I~X;Ts#l)w$y2kX#invHTaDbt~nbMMN-e>x)vo(^zJK#9>yFN#E7kQ8l(I+_~A)xU@217{|wEr!~H_4uO z-(C?DlN+Jn?j!u>u14-o<*;-G1)+R>dVeeHEzf+9S4|Z#^p_khI@$-oIKI1l0$CSG zgZk8e;1ZfU;o02hIL1(#OsHfm|JUsNVI>ZX4QrtGZwgxM@Q3B?R#Z(? zhD1b*kipAeFh;i)mlSH#DJRVF{6abI4eQ{nk938;-)B+CkE7O;& z&SI-$DZC2-*#GP_Y_NL|V@78Izf+w44%DF%q0AZUsz)c7je$Sl4wqe{al5k$RZRK@ zTUcLBdby3$HW(5X0RttD0YHg(2K|lUQ^yQ*SyL9l-NOGiUB+vWh85SpEN^K`>u(zoK*r=iDA(6@fBo8Nx&72>DW#~v0O`qJlIwsxRQGTWWWC8 z)VmzzquuG+`(ALdvz+qgajhe)$4-KimQ>mea7983CcqD&QmjAKKj7jtTnm zH2s|(+&C`F|9GfJ4r*@XthC}d=j@rdMyD0LX57Xi=|CL0s6aQZ{Dxw7>!*ae8IqN0 zQ?b4GADTK{MU`tmxdPd75TN%PH61Kyn;@2-n$e0mzu%$BWFD>M_rtWSm5_QsoLJSb zLIdY8wx_twH$T#$>fJV|?wiU#bC;oATOOd!^mM#GZ6<%|k192s{D^NCKghhREAjbt zFX#x^%LO!Eht>{t@~%;Wywh#PJFm59lT|cGUsWep^_U~QX&?7^`vX|Xyqi7?V&Hwu zMNF4_1QoqeXi=Gt_U_YRW0)5<#rojrY%}_{zRl^5i5%^oXbnfl&H#SQVHEwXhYswS z*dn|JT0hzeLYf_Um))27Q=4z{$LF;nf9VGnXB6V<@gn5b!UwqO`YIfI!i2tie2pLQ z@CFN6gja2*&{<{(JzC;qUPC)B4Ibfk_}|0U4gu@kOVR#47vQz)LtZveitfzHflIkU z)V(~C-yHdqAJcXSmoF{DzGM^nP%|B6+olUTU)gYpKNjIU@WZGTtS5Ib3KT}R!`v;; zz}!9`$Hy`T9e)AG8lQ(UX_gKBH^RCWcVSogSEx4_gsXConA2ejU)%Hq@0Hi1?O99I z+Vz7m5YD0d5i6(6|6F13u1_#NsAo#2$~SJxE=_8*Fa%zly$(|uN5Nx5BAlB3#OWfp zA5sfd>DG`2H10pc^YdbHNJ;?3Dyo1>%wb8`Oh+8o3IQWIDk*>|9 z_;^5)=sq$cQjevHpI$cl%PY~T>vr(`e#Xh1tOO6+{-L~2EXatd(Q!L};LlA)FsHZy zE7)91miz&4OU8Fpi{P9$9N-c_8p=QB;F;({n72lQY@H)QbVsq*rjr)u!%cu)$VH|$)ZKo_p4N5|)-V9b~x1-gt+pu{{;>3?zIvS<7+H#u_BqykEU z7*k8Gm3uYI7n{XYh^ygO=x~&#?j}#5OuG`A%Gx=p+cyLg$K_#EY8!vJ<0rlqF{joR za&%;t0Gp2uK+K=#$R#mHh>tkkG4&ao-na+ChwET}iU~fxEr}$|l^^n24Cn5-!)^T{ z6b&&ZN4@&sQ?eErkTxQ*UYexiqapd~*N6TmKRVftWTH^x18BME4=svgz|j07qz@(I zwE8um-VuU}^Lw$qLmHoEJ>t5~i&9NC1I!s?N?(?m;<__)u<5mG@v5X8_{@4GVL?Xp zQ>8Jjbeo0Idb6N^=_?p*yB|l#baP7^#>0{N1b$gS4xe{1AJSAG!0pM!P7b-sR8-+6 z@)c%ipQX<^%=X72Gc#!W7y%u+>oMZEE_M5=#($Q|al8)1*kdNpTH-@jB zeAdZPMT^dU62e^_!x*cPDfqI)12=!-VfM@hFrCUW4*QEhw$Ts;D_?*Ma~K@bT8BTL zU4m57ikWhC+~SBBc#zxozfL~Ryx0PtulB%xpFI#(Yy!bL23Yo0JiO>(hx|k+Yf_J(>7TE0!Nor0G8jnAcAqpH9`FXFQUy z_q-gfbz+RN>79ZWUDo^gGKO-KWu7EKzKlE@cjD$qF_Zom;cc2gLthv-TdT(5qdyRR0~e>^LA!t85ITySUf%)d&p$!ona}tg^SAM% z?}+2iI%%@fLxD(E$6{yYY1WT?B6xm!6nR-ILDtv?LwRNoXfLtF9?1|;c<~8kua1IQ z3g@|v3mf>xM+K-iqo1Gh&>7#~4}rDQWJuna%-sz`Y9>y&e!Sd_DNCL4@sO&_Gj!G$iNxV7tuFKjXK}*!s+pPbgADG zKB0OI9xX8=^@#$o`yhmR2Pmii={Rf}(xzIQ_Aws1C0Xx%mAiF434;9|qmVuUKa%PW=@lA@i9!el0iQA{L5}9mO1>S?AzS5zs6F&^_gLT0#N{94A1dcFY{cnU%YFD+ zuLzcbGPQa970;M_0|SX+IAy6wRg*=@s!+#o;?0MxsAJ%(vSOM zr8zR$kTy&dBK@v;FdQqw9e8*UCsa;`;Mz5Oz*SK?Uu-Te+uZ}xe3M0tl$pYl)<$l zI+$$1^H-Ox0jHa9aNM@A;^QtV)Q|oCWOWMP?J$Vr{wYwE&N}?KxPnu4`pc)}*zo6~ z?{S93%qgMm%Ri}-fb~O5&{1nZ@P4fc$$hU#u%wmU4j7Y9%#g;+*P}l-=+ZVnJt$}g)zm~yqts`W6a6uicgT^@d!2gW6|nP zJM@?7(A*_rbauiEoUfol*R8$@dA7^IcDjhum64lx;juj|4lyD3q_jzv?mE8oxej^u z(TEE6M}m5K5$@iR#klOXuwmR^7@TTM%T?v^l$AOJuN#F&D@5s@v^ulZifJmR?E=%E#kz( z@-O_Jy~@cj_Z9q;FN0h8%Q0X0BSfcM2jYw~U>mQ|3YrH5nT)Sp^h@YEFVRAzHQ zNqfQjNGuL~nu38Tru5uKaT+z-j9eWsArJj_;?b!)aCCkq=RP7ys)UYV!|Jh|cPE94Z6fn}2b3mpGlAp+GJ<1)x-l5$)(@9g+hHVEi*4URW8zR+o9O-ohCdT_1}H zVH}pZyaegoe|)BFJbp5`1PeZ0ghw`-G==r{$95$`@%twD@Kl$aGg-)&|M!OLTdjxb zdZn0kREMm7Y(giA#&WZE2k>?ErRZlRN(7&eaPtcZ{I#z}@42>)RTtl)&F4Or`xnNO zVfQgC!oua@2tA_W#V~ z`!HY6AqNy+`VBu!&PAGM%I-X=-13Y)Fw^BAw1_5g|J_@I=^bBS&Z8X2d3~mM$8-aM z-MV>*X95H-r^Vx6??#9zlY_N)=EKmhaJ+L`fhJ$NkH)52lv&VF>Xa~@b!arE{W{0> zrK%H~K}-5`$5rXu{k;VcHd0jkAnG`HxyfkjTnIhmFfY*vk~0S6Q;`k~|pgor>NqH_`5r z2Jy&Lz~MDU_|td_+~52Y-x!`pg`x$F&!9xi+RGv9s|_qZ#s1&naZn{W8h`o!!g;4N zipnoWa-p@GAb3v*%*g!ApVu%U-Ln2*tCa<3g2l-ekDG%19~wE|_&Jy$rcR1#GU1tI zFivQC!&e%;2eVHybd8e;g>x;C?YIjnruCsI^FJ7iTz~~iW$>BpOI9(C-qAsJa}pP$ ztLP3MaNDi6aL zgXMFIU##PQj-Ag>9TDaqc=3>SHx}>KT0o5uj}qqrAJ@JFZ~a$T{)y$u6tAQA-VTVi zl%zXiR-@-)wwrFeg~w})FxBWev?px_T_YciwVuZ3jXeajM_=Vgq6sP4HXRZwmSEsW z1@qO$!Ca9W*fYh1ZeX2}^6Q(q0|%riGIsX?st9kQK0(0tFz!W)F&Qt~k7t$SXwF_0 z8tA|n)B7s965-2OHXuy$v>ri$@NC}UOeDy$J$%JN4RT=LT{t+g1M@a5bdc*mMqsH9mh^$p}#J7 zdHyCrP^y4?9x;Thz~?mCz5@%6zDK<)b}-#>3yjOYi^`(fELAyc*N)fH{@J{Z#TQ+n1VsL zZ`Oov@y>jKhacSY8Wyy!%Yx8A?zv2G%hBQy|GA>>E8VstwgG!S+3Ewq>Cci(z0v}Og@jw6} zT1sSH=_0N={Q>WD*PPxKG{bf?bMovf^CkagY~RVP_>hyOLffC>jf1PuY1B5ZG-?O? z`8%EVUW|gk75{KCzZn{|*TF{fFF2HJ#CyG}Kn#6~O>3Exx7CoWOBq9?Ch8KSTQ9+C z`FXq-;|WJS6rNuB6zm4{d``YW2p5f zDBZ6{gKKKwzHb{SOlg910i#h&B^ur|O3@AlQ7})GE57aP4^#JPfN!b-G1|R@`&Fw; zyBj*t)xw09ic@c7jz zyZkZd@o6Fq{*j_?|3$!!S)X~WI7zA~SH#O;J??j0#qsZZAyGyh4KL+k-2pE&?lPqz z2W~=zu^Q1Z5GIFffAJBAHgnU=WBBlsKS0;rkw4++%q>=zAbC2uaCF`kC=yBH24t0} z#i~-iR!5I4Uiynqtd2*|zY@e*Y7`f^W&rxM3NR;Cm7Yuugbj|KPE&lC*Xpb?zO87- zr|f6tcz1BH=rsnP7URD4C!j`%87*$nBBu5^g2E$Gq_AWC-j2lJ7X?U z<~}ZZ+k(HRkEYMdFXOG?Z`gV$jk9}UhpAn|{BF;~;3gage(SD5M%8J!vXHSFSSLq+ zt{PqNC5QEYlOR<`ij?xobgQodp0$;wT~WRGt4;^E95pA7*YtU*(nkDVVFk4l-ec~t zDDAHaZxf@NQiR2AOG-l1cK^Y3eRGwAB#~h|3Y#{zxuA>jbE7cnwpntfBDG zZwzrMgO}E&d_so=>s`M=ecLdo$P^~FPp7loWiY&3)W^SVSE3Qx%CzvbF4^V(nJR%xJvVWB!J215`x31k=UQ zPBOlMj!q91AZT+JAF$7gX#M%kM=tz=|1pNns(FoYmy8};Eeiq|v zZ)d^h1B@l4pU)}y<>C2XBm5Q{V;bC11qBOcp;l2Q%ubM{I&0-v*E1G1IuU>VeF=I- z3gqrtV*)Pi(Eh}Tj&jxKZqV~6{{9`fy%C~!$xaCL6yq0))S+C`YHIRZ{swD`q~emV{uPK0kq)^G~_xx6gv)TF)6 zM1@x_AmX4$&SZ+yYxA|~)|Y1V=;e(td7L?o7rKJeTz&A9W-`=PALI;H2orzCRaeyC zg5F&tlf@#0eLaljiEa1 z6ilq$fSH3L_@K?0CdHT#?U!lraETbkZwbJsKU4ru+OnIaJdCX_;%t;Oi2X!G`nS@E z8ri6juSrwUb`N9Uzm+97I+RForzy*6Iibo<#yuLPLN3fI!(Y1%FuS{xTRw^9JXt4u z?c@s7`oQpuJAT92ssV6gxw!Bny@IT3eh~6Ao&O_iOrBoZ!QGfw2faQ`%r9^2RC+@g zwv{h{S)146j{asiT{w(F{1D6-_TvLL_n@Kh6zEs+14eYibafIX-{yEp2`{C!=+x44I!k7YAP(kQ8}G3_orIE3evc z4nyDBeIXFthOh7{1qb15MLh0qbimMa#xRkw4)<*Ri;L#%K`Gf3+;uA(y_OX6tKACu zNWD=|a_AjCJu62dZ+r)rYHyBr&cN8qtV7p%LSPm-hTF13R&ZsN3U$sZ!HS_&u$NOH zQ>?$@rKEZ;zBq%E4DSVt{bEG@up(8{)uSi&Y(^Ek^W4xKKN!)gfcc5#@S-~iU-jIB zjB{)*f2lwgg*tI{(rX0aPcHJSH%icpC*%06ZabXJoa{@Fe8xYcn8&u|H!Mq5qc;b) zV)EYsP9)YBXRly?tHIOg>Me`69NRfx)~!e=R|h=h#BCrQAYR#q;uXR~>FXpQC-Y$R z)o65?B7hTzq^X0&0L+m|!;Y8UxN?&r-BoFimzKQ+-FLqQSI6AJE2HLcH!8wl;R23d z`3XT1w{rc*zoPhg9b);^2V$m0;8r$&Ej@dKS7Wov@pu(_bgd?rHxPu?_y}W`%Rofy z9x%`mAqV%GQSBLxocCpxi71HU^1Xd9?#2uFDmj^}XH1{)WpB}A>^(d;&Im(;=i%Zj zmtkbRDC*}>Y)dmjn~cL~c%vBpmK8#o`);^uxC(tgl;biGr`yRp{N7}V2D<8`XZ=eY zTwTN6o2x(uePWqQwhw>V9pwzq_rTcgcTxAL5#4r=*?CRa&HjWEO)GEaQp2vH>&cI} zaUbJ?D=9E7*d-i)@BzfKz7-c<5Br{{!rREl7&g&_{_8+nE(`v`rs+E`@S?#Q#9eW@*y$ zzyJpBE09`iDY7WShJx%`Gm)kd9~Y_Q)g{ekn=QHz?5I+nTUc+Y(EA1?Z^0liO~|`hg88 zxW`iR1SabH5vMVd82jGI#x7 zNVa2q=x7BLU3E@iR~iS?z2vB2-UqPJFM@4lli-_nG7lT{K-s^J-*S>M#uo7K_O%jS z{J@BAJZ(VlofJd$0A1|OxrRIMCBc%@G2qk79NzW?T+d}8Dpga%80+dZTw0H6)z0I} z*K4z9FAARjisog&6DM3chw&3%gD2G?p;wn-Zm~W&bwh$5{ap@j*bKpCty%o|_-9=E zW<&aWjVu{;y$;9aC-RHpg~`|zu8=7vPL>+Y23rqR$KIfa=sAsjM)vB^e~a3{L`R6m zez=D-dB)$j7pFfbCJJ%`8LM(tEF`}}v~{kAYExaZ{9P>m-ZjWAY%Axpc4`tS(Hby} zTZ=uc6Y_Y$H&*^L(nb@S~o$PHf5#Zt)*V5FQZOTXdZuIbs$a}|I|rQWjs6^ zCqb7;#h}4KWlWcSfl(ehh^Oi&J;I2L4FT7bxU+#L zQ?MxU(7q@b)dt+SjXwqLS3aR-*K=-KnHkP$5g{S_WN77KWqf1S1NOC5PSehr;%EDK z)Hz*`%0|PS{+h>-eLI6+7F~swjoYBU?+kxXw-u&od=%Vv^?+{M0_a>~Oy-B57A$7Z z!X^)M$JXN%ZQEq%@QQR8nUcnx`xXy-eVpLfU0rlo^^WoRdf?kYJ1)E;4T7p6P)@wz zWc6E^?TXY$jl33_lV?H}ZN3Y?e=dZ*`)%-B#A1wV^F`;czqx@6dm&~=6FOO+}wrJ_M!OB=~G{65~?7LSaEaueEzC z*VA|iXZXisbweEPO}G!2rEcOir&Sm>phR|t7on@Y8J#ixIzHL;hMQ12h|Z#+_~g+| z_%E>yzolP?&xVThdb%-*A8kS#ZMCTFxe7RN`W>1+R>CC{Blzc4&2T|XlD-a;BYG?| zXDZZ)*d^KSmV{I+d#@QwII=v-`$ zo662lUf!LHQ(1@DOXDKA8f5VA*&S2I_a~nd_XN~;)HD8l7w+9*LjGRSf$!REKbWe5 zGgA*kpSUvFd_Ia6tq0S_uxJB;dQPS z%B#E}tLX+_xM@sBO?(4UHdA$!}o!dk`Ggb#otba?x#k1Q&L(5pxf>z>F*f>U!9K{1w{`QlL-WPO_fE zW^F3${uk2TeMQOiYl5-1XE%~@50ODH*m6c9Ew#7(cj5m`Kos< z{0*N1Y-9{BTZfCN`TaPbvndVp553~o{U=Imlk9nc`ajlZh=6Fu(|w_43m39iIkl=k z$BeiVC@Pg82bT&^Ut=eyIzyCxlVy7b<=YtcArTrSMBxj$3)_by!M60psQ zxg(C)!M}n4kV}}v%LSc9@w2Vi!uUX!ZN8zwI7iNWXDa4~zvTb4rA;Bb)kuEuAUuy- zh^}o`P;%)qzSu1Q71KAkz|R04W+!9!kRH{UHw(i?C1U@e9&PEn1u{p4===Mju*OY? z9-GOWi)&baPdgOSh0I9o-On)lgC5PuS2|n9Pyt%ZHoTWP?$cnmMh-e1``8! zgRt{^wA|uV6kI7zr(du}m0Z>>JO2z8?_`dmNjK5U>^`WAjm0@qx8MXdqK|%lW8cYi z=v1RVoDhE$u_}u!AAJzxFbcs#J_o)gm_vE5KD-lSm&e#N-LHF{)xUyh<`6 zQ?{}9v1ki=de=?F07cr)GG_Wh*45h{0-qiGz&OH$oKCbL*Jk~P3k-wMet8`C z-dvK-9s3`etuMqC%;oZOUL#)~QNSI3^%+JrP3DijO-9AthhfX_{b=g80_)cPh3A%r z#CpjqbY0zyUK1a{1WheIcN5DleJRH~R031p-2%y%&u}I&r}KVE(9w*aVJhFvT}ZwI zE!I(Jy1n12F+QK4V70A6A-NffLm*8>MNm_O8sbl|+G-=k)2EKlV6ba6RunD;U3re#Um20H*^h12Jvebi%eS)RmVK_n^ds$XnB8=Xr(46*U$JoL_A$V; zmvAZCi%VR;8`A47oMZ-Am$ce}>)fM5Bii(+hPxClytxST%}mI$w!?5A3t^(WvC|K+ z3Y?H3LfhgLG1pldl#PZ2d5hSrE3HF(muu2B<=V8fQky0$Vb0F2)|hQvfs&ppAj@?Z z<{DIR-7)LnM7S~S3^2oO8`9zUG!+{1D;yjSUH~1VJZ|($_L-gKitmjVU_vDaLknd| z5}yyBUcSY1%x$zxZVa2Fvmt)65l%=d=ktXW$)ho6A#^~ItWsNmm$!%DO2;d3Lcbk% z>U?5dby4!;iaVa-eFafnUpf9@B9LX2|L#$Oe{??a<|)$Xo*j${xsC9!qMw_lY~dIo z`5Zh{M7UXv%4Cg76`YDIgk|HtVeYwNeu3wH`0gx4Cf(eLJuO87qZe|d4UFm1Uy|gj ziYi@pN|*XR`Gk3zA($?r0p5c_V5zT0BNm?G?buDGElU8)9TNHZ4^QE@L2DR)Mufx` zh|;JGIU4f#H5Wa3G(91vMXMve;;r|g5Y=S_)AdI=*$KOVAgCUOQ#f3u-Gk~cK7*$9 zPPS{AgqBBmyyyBK9=UW8^~!nl7$;9J_cDfuf)lho`pZ?iCZOp3eAGGajQM-@Y5Ru* zoW86ze{SakzOp$0i*o$N@O6ekE^yH#@~_;iVoX<=AR#-Ft(oE&HMwnLn`e`XHaC&Y0x)m)q= zG{}-ihcAK9C3W6+9V1yxMyKJ63ri{&wZgFZSaXvFD*SAOrUuIKl2T z?7eD<7A@+|fO#`Zp!S*^y|uR#%a4oEyM?(BR$9t1(BHXKvq%&=#Q5e)8dN@r@e~i)L36kP3sNLyVQt1j*0vo#t|)vlOZ3A{^7p5 z|6sg}2K-!D#?KfnK~`Nnj&ti%z`IJByvsGkGV_agDC0UjRvpCnhvl&GPCs8Cwho)z z7va)#JK(fz6YicGfcdF^pl3@tB#hYyt?pGAU#-MXbTJ@)+B4aALCWP8NRUnJrqeLp z9mF#z)E!LatbALc_0(fr9M=G|oz>{Y2mw^;E0cpWqWKRSpJ1l0FDgG8f%79?f}Bv+ zX`Ez*yW$xi!Fd3i^3~}Mp-@z^Gr`7eeL8W1GKtHM;{P7#1WzR`IwF4=uC34`W)(7I zoW)*D-td;wyBWh(N_@fTD&|y)eKxEUDM2k=6FRR*h(yF1(8Oo~X#TeZ*UsWOF`pDR zb40?rxJQm{G#mF#Rv>f4D#21+o$Q{dLl>_90V`8Vxs#rKAn@CYUd}9=%DAedW*dUg z?mSMUD+^b68xx-@Dcand#A|OZ#ves8(A1p78w3S#CaNk#MX>?vCmWE@nu#c2JjIBU zkx<2Us!slIz+j&)89zn}b_fN4ij^KYGBO$8u-DasX(pVC^f?SP7N+Meeug&(Ov#)= zQ;<&=Kv}XOxw#?-pWZFP^iN^v688a>o1SArq6UGzEZgYy1FptK!t`HC=$s=0m8=Jy zl_F2tL<$9*{WDm$YY+>so`rl9HIh-UPkUytTm+gE!-7Ko` zQk4#C2lA`0enXrb$Z4N3gVnAEm~^ZGb9W5GlJ)?!eQiwkCHBINtTR(K^NLjOt_5j& zmFwtHV?`HQtr9#v-HA#%O2lusISfD3qJ^Fc+%vbwkjDCTN54kHpr#h2v5v1uf;yc~ zqrov?9v%#gfC3XQ0o_){|F~TSuS-T<0TQ?gXx{k#suXKq*&;zV&OJp7P z0X)bKz_Zx?(2g8?PO|nV44oW?w8S&;cw`4&Z(B|ccANHaVX zsKL!)H0)oE&L5}v7zQ@p+dHkgg71EZJ4Ax@raOrYUqB$)eI{%d5 zzCj~AwABn}&$$P~UaaTaEJl8;Vtd;_Kaehe$hAe+{{J&L$y5WLzbfSP{u6O3v})j% zjT;Bnds^Z6+-fc?VKR=XYv4o+3`oVaN1&}A&nbjI0PnW3@YNs?lE3>RS^OO2RUe_* zo*$SmIe;=1YX{1Y;?cyLh$7L zGycP9X$W@JCIO0V&@LFotA#Fyt}0b3qHO5YZ(YtWZfS>sJE|ZZX@^~``?T-Kbqw(_ zhTJdTV8VPwqW4vZ-naVU^fZ4EW^Dfp@}8xz?(qAV) zjL(9#A%!TGw+;SNd;tEv3;B=B=YoHQIlXRWKqqyJ(bO%JZ$74lXDj%)dXi83s-XlL7x%Jg;v?Ti5-Br?bbP?+Rb^ zU)Bt%ucxAaR{<7lO~QnHMY{WOC{~pI#+40z5WG~822X6|t&4b=e_wzr))rzB%O>s0 zafCsKDv)wj;KG$h@T-gwSKGe@CxqU^^I0`0RdF6RSLQ;{m}#Kpn}Eyh6ydpU3B=A_ z!8((1+?Pxlvg?c*X>vf|*8G8lHUIct){&hlDM!mC0`R-UWKOeNp15RQ#u4oTc)CN2 z`dN$Puzd;SI{G92x&*H8J0WgxHeb}ka!!)>xM1%OsB-%2|FQO_;aElQ-#3L2nde#N z%o(q}&vltIrZi}tCykmkhti-ZLs6nap%jtGb?tK%r9mT=B&DK+CJib@_x|1Qp65C4 z7x(}9Igb6p;c&S2vi90*o#*%a`HIh+s>CXDbz;qXLdw2qVd)zyyk%}c2O>|gYS;ea z?9L(l|3;rUJyIkuoqO0-&ksPOoiwfaT+Yr{`H35*8_={hO0?Bno}1vF4uQ%%Kju*# z7~MLKDgFQWz2|aZhCav6d{#u-SGTJlp#Zy02e? ze<~4NZ>179XWlGa@v?+HxN#OcK|_r(G~SC7_n4DAO~$x%&l0S>osau?KgRE4W~8zC zGyJ-(NsHYD+|B8-v`}X*#`631%r|CaM6d+8!TUsqo<0vIIZ8~=`(*IHxfzO2SrX1@ zDHJa2#&(l=+{otX`1U#P)9AmC?W+oK#cBc0a<~PbUL1z6i(jI>;smjeUj%l4FoD{5 zDLPBzJQ`OeKuh%}aKD*_t#vQKGoTIXmFvZQSB`T5x;zW7_biM#q)7~>>(gVh3(?@| zYxaE!LiEyZp3B*cL#)+sqLDF~)2Ts&FO;I@=JnvPb2pCQwMP?8o?+an9EjH~;)+cd z(SwJzU)82?y06U_+$3eoA1g#&kffj2;*7>yL6hp?rqN z6>Pfaz~^+Xz=J`R?8A&t;JR}J)d(8TrCJ{0e)jE#qrC2;XH1Ex^;|3avG5OmnEseG zI=m5TD4!>1HWacJzJvzH6xe=#J}S-_!S~bm=qD2>fPh5SN{{{B#O+35&uUQ-F%C+U$nw``&5)duL} zy=rD9BK$H>l~(n9Wxtx;LvgAUy1q|>oDG-IZ#BQSD8G+adbP>qVpVFb&)3$w4e+|D zC6-^G!j20WCM%gxW-W15LgGKyxR9Yz5)!hq{CEGaFC__IiMea$FNum-=&)e!y15Q6 z?g2hRiE01;zWPtSsJ=8Nnc4ic&!a-3NWffpCEhhEh3WZrmw7NpBtF^3Fgw1@W#}7y z&$h{lp5EF)OhxZVah2aOCXlHR|J&Zd)ZSXigqC_U8w!tzhF1iL?=F^NrC;x4{+$0L zn)0!XFE>2MuKOON@+?1BY%sanFd^kjj9muH_?`O`5V^fouwa%xQrAkfWX@fb; zX_?a^PnB3E;Y1g6(|?V)_g^$~ySqkw`q~GkYN!$sN4h}KE%ux*L+B2WvLrG z6DqmoOOHaK(20IY-3xtn5u`0|ENM;rgW1g^adYi?)_X`a?%%CSnv*->pVDaJ*=R^t zUrpkqd6ssVpDj^y*b63N8>01l5MG{Q$=3Vcq>FW;CH%QPyVDp zc!*Jxc6+Wr7?0xW8@Oq^JefUZfSnfvC|ShP=X}0i_Ox`a>Bc48+j5PS%k-xbE5@=0 z(;mU4sE6#GdErDZ)`}Cpoq!&3@hr9lQ@s^eAXsA9WRb|r5j}Ase_^c-a6|?mnbNc(M<;A#q*JLakU@W zzJDZ?jP!z!YX4AgULUkQ+R3fBcms!o2T{wpdr-2>kB)Bkq4AR}Z^~ zX_JTJczsgVGnA85!8r?+=!1nSwDJfCE3%F0=$*rOF7Z`VkX;D5JFnrwBxBkk(4vwr z%D|-UHFvb_8cI#Fr7=D~xPQk)c%!Zgw&>-Hd!4FKwyuu5xUCa6jWeg|mPCmc(y?RgE-2}#m$=pH0~ zd4rp0hSA}V(xKp>JuXWBCukXZ7Y^hY63>AcSp4D|jQMIsn5#4R&nH9I@cqlgHT%)D zYXBv>+$r5!1P}W^<1`O<65iJUlli>r2m4pCCz5w@dv9cwcHA&@-D-VbwD?3U0QvtlE=CS?S;w(*~GQ@r2(4@O9}Ahhd7$7QRN(jxAn& zFuQUOF3(b-FGAf(V(B;Wxpmo`@lzKv`l|q*JRU={)LDFXZ!1pjJPXye1#o`vaC(t{ z?yhIrlSiU{a2L6ffA{(4Q_BYq$EeV+4>U;N6Lak7e8IGMxl^Y*?>Sr9E?6C%18b&z z=J_!ZAm;Ldkv zpgLp;^iLe*w#&VN1hW=iKlcM;Lnjc^V^6_De*ky1Beqs+U-d*&!k1<~ns+YcPgzMLw{3#aZ^0 z`%`$f{4+lPBEnnpd|mmRAzb#pjNXQR#Am55xiwD`W17pwJ08j~cRudmv$*c##lh>G zRi`6um;qGhhdiS_Q-Rf37f6_KPWWQ=ShkF>7tBk}1%1uKqUSCL_UDMr6<$YQ=Nx)tkE7c0trxNw7M7oEabd8B`|y!3)I==+4)TRL4osam%%7o`nq2;%iPiuS|%v zM;I+@97-a(b3UPrA=l(y^9AlWYV{9kWe`&5$_h-Yr zNj*HPQzCNVp18bq6~vBI#9tnPWKW(x8Eb0;6I_MFdMPrEKvs(A!gPAp+W)kDdn zOTTg4yOUgb@&&vys|4a)Oo&FgGT!~DMTUPq0^7R&!lavBU^RLK^?4*s8?=66x||ey zMPNrStnwwOwx3Q8+2M&h164q~R)K#A^ML19fB(($Lk+}4a7-&)S7+H|6NLkl;7 zXPanM)nJ#lEluCDoBgq-oIP*IXPKMl<6;M28aTp@?1*tDl0TKmH}`z@Pc&cOc@#j# z7LSHUrmxtEF}JvtO-@vPdOAe?(x4MVbaC!^C(<-qNZgYbaw~c=JQf%K!O#*3Y?l3m z_pJTNn7++$`Sd?te-=Vs6|yjPMlcyQo@dcn8%p(T_gCQPS2JMtlmyQ_)Ay`oh6+h26O!^>d%--zk-oYvMV(rJWM*E$eUIHq$R#@x zb<~$0^Rl6>dc?RnlcHLVk+TGo~Zo|)*o_qkVXjmd7D{i}4u`2j7;Hdw>RMxgCI%aDm&y}d6v3}!))8diG`{XQLy=DN zyeC+&sDk?u_>|B1l|sp(a7Cn z#@k)SbXv74I(plY-<#yALHHOlDQz@O21~Ls%7Jz@p9P(#3!u2$juh~WfIpgl*sX@W zxVmo~9lkUX!?pwx^LaI>P&%AM@;wKivOmzo*KQ{V9YPy6kbGoXp>)j;2+?#R8$;`0 zT4Dfw$Dgn7+f%sc+X-RUJ<)P=82C;dWJ9m&;uK{as%QBI+B^7K!3cuS>-0#0#})j$ zqz-o^e&o*U=u?Z1L|o$iooDI$p|#OrjG3VchO^4BcwIFHwoBk$Yq9wK3qqcUzJtVl z8sz=jY4Fha6{rU`pu*;C{KMB7mmN~3!*UdMe!>>RgZm)F>^4{V;Wjp>JcsS~ym45YEp2+uX9#~D z#1z+$aB1Ny=5jONOO5G;$JJ`k{(d#Q5M{FQMa^8KOe^-B&xGhxcVMB^L3Yg%DHrX7AX71|gIiQmD$Qie<>hB5jgrAwyME51{DUcHsDaTH0(^YF80T z3#R^pzhiBQ<3V2xJ~bAt&f1fx3tsHXedpnwA)!V;{Hc8PRs47422k@y5NLS=Ry>d) z8To-^`ZI>MFXOWyhEROJCy0*fsu9)8SdlxMui^E8Q2JveWnX1*;=$A7_?*}V^xY;+ zj|bT!zlkCXejNaAx&rO?8bZc>`OJ>yQn6^Z1-;epOcFPw;|7|5sV&za@_Y$wGI|UL z+)sk;wyWsS&f((Z7));LfTCF$SoEZyqiXNiXO>AYrz#V#@Hsj&c3aRjFUF7^8p>4h zuQksTFe2KYOyEqrDuhbrvdi5riAOO3!8B^EWPUM{T zLyYXWgH3MMMAKq=Is*@TVpqBRsRRd4z%O6H5<|BQy_h4If81{gpd)9N6>ro zR&I5J39otY#IfTR<9X@V+|JpR%$=v5;!qDivgEWn`0ZBWzTUnAMwPBK$E1!EpO-|~ zc@o#jR-)Xz0DA7@RJd>NPtWl6-Yd6s=%J?vVPn{Su0R|_64amIl<$%38rjEiE8CK0 z7--TRZ*|bYVmF&Zzhk`B9cZ+=J-P0&0J16lNw2rGux^xUn1Lc>d^UI;Ga1THK9cy%tH(E#+5n zNX#Vq@;yU?BYQE#$%Q7FX2QRuN*rozN{%d%z;-(YTGhznAE`HRT~iL8ZV2T)7E53t zlaN}O=TN7lK$I?x!sUNFX+sc)T^W|tD0&PDY3b*}zW?DAuX@vahk?xB#Bv|@8sL4o zQs|k~f>Z8}!kKUlBvd;f_;(?PTfI@`m@Zx)uSutnew@FRpZnXq$iheya`AT#M2xhc z?^-6Hq}FwAzSmfIKkGGmZXHe%U7ZP4RHp-%v=OxeNO`99X7c~LNM88l#RpPTEQpJgLZA~Wm#=oXld4>Rxt%IvE~!>%$6lv zN(?Yf{W!B?i#1u4_!qiur0MubNis(K8n@aS61OqhRAGxb?O(Nsl{}BAzA*_Mq$gpJ z^d(FkVn|{dXW`Ou`^DMMR)E)_5t-cML|arG$g>_lJeX)p-*m{3#-E?L$!E^P^1dq= zCM;r)7ubVEN(hmhxFS?D0DLdg(6dGx?mUVy7T~> z^PEXf!6nT3DNFFTGszxKfqQfpBUf3{z@0^~iucORnCMOX*ItC0&0*A0w+Jh~tOv!S zO!f(%pEEx>h*a?XiRkh?kXb#KorvS<{ou(^Gy4(BX9}TLDFghP598Ij>U1HmcYB## z$E`b2$>(Nm+)8BrT-PSGYk7ZeG{ZKlg)C!yWO1e|%QRJw69&(Hr`G5A?8k`(x+Ev%9%m!71bY3f>5&98Sd#gk zb#6KcABr7lgNqaOdJZ1AFQE5z1mxH@WAUT!D#W9DJ6QB<(X(ns+n)F<`$L zO_@`Ot=)vm-mSydDdm{nF%pDn(U?9*4p#)5lFpWVkaHyDCC}R#eDoJPJz2Vj(V`pG zD2U6FneheYaP^Hc!G>piR$Ui<*B?Q%Ui{*IIh5nkulew>I-Z?=(VeXQ-HX4IQhE2N zA-O0aAa>!W*w41V@Ml{fv+qkNF}2gB?c-Zn-!4a@+@?&t^)orc#e?FK1_D#UbV=F# z`#AUNL$KpHwr#tDX!GOc9Je`>U3(!9-Gm`zeuW9G0}bL)GM216-OFc!xX{lNzG6xI zMOOQO2er639g}WXg5_AAcX`yF+62CV?Q`z2{-eX;g4bSlV2%`R{iy~MJouhM@n%*v z`z=U}s9}d_ujA5B31G=7WjgN@Wnb{Q$-0%+Bt+#7#+OUc(Kp8s1-1$w=7f+RM%IK2 zZ-9UAj-sSbAfI8D0q10b$>xeD&>7CNXEMC#_d5r0lOTXSQE`tcoYTR5+0%g0d9C7C zi6g1)T^}y}@_W?KKMuanRx%m`<@lQjaM~zG&OyBjWuD29Wd8nta%xnxi_pQ14%G9r zD#_bwO-9dH$cEeM@n>^6a&Fvwh}kF&*GDAca@UXSo{$}&;K!1~XN9EohbcY!Gm%pjEv{6RW5-&1@$U@{&#LaHC{W|f*sCei%%_z7XO8rj5?V( zkN1r?Xwb+7B^bK#0SNcH(G-PU;B)O4^y!?#HQA}SVeKCL%M;7e z>k_;>=t)bCIAPPeLTo&^ofSPzhfro3_`Tl;7Xu2xC9($E^^}Osp&sPI(pcGTx!~|@ z7U$_(!OBLALm%&PRN|6|GrZCTTMS&uy=nX&RL~1iablda$M(UIjjPFET$ycc9;nM%3(D z#1xw862)x?K-0Dk&!^|0M!PcUvnWKJX>w>WL!bPqILh{>&g5F+!pVcodVIFCj!Rys z&1VDf`>sZF_U@ztfc=GJ`Voj@1Y{>#`+n?LUe|`l)O|xF31-F$^Dm%K@{~ zSJ)q66;S+4mHw=n46VM);lhwAxcyawKCZE&+m7-+oK_E1Pq>7Oc+JE2K3TG!XMSEE z{Tki|9>i}WLImY8N|cj-$_~wvrBi!f!0E%)5HxoSyy_W4{Tw}EltL2B{@uY$j%vb* zBOgF^dK0b>KFQi1vH+=|AE?_k3iMpj5N!$we- zS*N)ReWPjL7I!*+xfWeL@)-VD{|MF$y$y=PQn|T?%3>+{1zu`h!nHkj;A-bLwx`Gf z6rOp*k*z8;=$bY?v|gX&1iR9{XCdfUyVJev=R$aQfCIZBrZPKdLohao&3 zN_R~?EAFrS2W}6gX}5<1+o+~Xjm;C;)iJgB{iHRK$akTZra-@4%K&AiKw6PDg02=j z5$m--Fw3wKO@!Y#rOo```E``%dwv0pvp$qLJOBeaGE~P-gdHYDJR|=-&QIROHvK7s zsO(w@CW+`%rAd126=;*C3JkOMA(OZCvmM4m=(Y0!RJ+-m++A9T?Kyuj%}AeZGt(sZ z+S0idfilE0Z#31u`wKrwxRSH`$H0~+({ayoQwV^IA{F0L38O4r`{#SOnKK|)?ClSQ*VNdJjM z=%1!SrY2dFofRx~oUKm$s$7YCrZs6#9**}H?t}wPe#A@_Kv2MtsGrN=+qsiq%}CPz zolDu~tbQmtcmx98zT!5#`UF`+6hL#$Ff!-1K7Hc45G4v4aJaq$)onP->CU>!V;vnx zyUjif2?3&b`v6ES4Wd?W)3CM2mzwcsPTW3(=1YvB-_CbInW-h|$s5Y)wAA3R$#!s; zpEti>o8(d5U)~9} z?SGK)Jc^w5YIcnF5%f}i%1xXjAs%eHc;g4Z7W6>njRr{JS?L*6j~7WEZ;y>|dbxu4;_LpV=vvpOtskH=96dQgg1Edg|^wLh!%JRWshmcgQJ%A|p`fn3v1oGx&q zzZQH(b^dJ5q@Kh$(}!prVo68y@17^g0c4fsSk&0=LUj}zfW0Ngk9zXN>p~FS{3sht zL>qB$^bdS7-w1Bbat3J!E8bTm&E0L@!5RG-E!WZe@_XkXuiYvczw8IO1H_z1%Ac4<;O zQj4DYu0#czmaJj#Uf4F_09W(ckFr`KEaT7hPG&c-X6+yGgLalI=KcD~zCrXUp%^hy z%-+saBs)Xv_`IWjv?%U%?fIRVUO1H z{m?OG*mdi%SdNh-jnkCL&2*k6ROU)=IK1G7AGYNTubI*Hqw2wzpTpxjz1aCBO0?jq z6Y0t|;*?cP$@~mkvLe`-nB2RFMg0fh+p~v|vgNecAZr*sq@qmc$LxUWif@>8?iBxS zy2zbP%mz_kD;#~Z5vI&nAxr10V_!@uZX4Fj6)1U==pP;=HRmVpPt3yUo7Q3F(qX{q z+0d0uAtcd#I%wy=hpzfp_;!aJQ9GZ2pZQvHPrel$b#n=asD1cU;7=W|bVHb? z7uh&llE&o?<4*IwaVD++y=%nyH(s8L|CNpJKJpo>yzXVagajR8qzqm>x5K935Ux6X zimR%BE`FIFN?JTkpcrdG&aW6E^E}C_XMyz3aBJH5WC>m!6^Gw?L&#UZI5xH3mHcG# zQ8z0Q`YznVze)9&ufcOa!U{#>)dum;FkR}C&5&RFZ^EcHe>!K$3+4&G59w~*30{(# zbg6|DPBs}y-i|_%7taj7<&Y2aX9mHQ(s1}Y+LT_)J&qS7wCK=f&R|t;NIzw|P@`?m zq`0IBS8a&EF%Rxx8ehX0&-cPsTx(&k=XYRqmkqh!e1`pg&JSOexpAew@}PUX3DrKI zf*TsaBtW3Wu2(1lg;GLy6gFd@vyh}+38S{p&FD3YYVa<84(qlji#zQGA*MuyG?XpJ zdFl_j$T#71=L8nh^}R{!xubByz=VrW6w*s7x#&^P(8qV4!#B}I_T74O+SBup74zd& zKZef?+&2{@3Or$Sh!Sx$Nj~J)7d@OgEp*@_ZlI3%hJ5;IuST)ht39i6DnSzn@1yA{O?X5i(!ZV=Hq?nb37hLekvX5r8d0lUuB z77u5dkyWnsu&d9Iob@vXGo>MPvY|H3mvu(fAqsSt>>GrI{N8(ZA{LsN;8piVoN8o8 zf5|A(y?er_dHDlKF${q|iMi}1FJBs~aUM{oEHgD z{YfF7OtmBzf5fu6V=qB!O*2+bFrWtoRUitxRDISnA1j_6g)@&&p>$L;Zg<`Z`!_ufSn9}dUAR;TWBa!-HG}2m%hBfgu(UD3t-Ow2GE(MT1 z`mRJRTb@cuKgUbjePF8>NN?|UCD-1mQSV!RR9I_4pF9sH79QJR$Id`FuCL1`+%lyu zt@fnNE*!J2yOX-%6JZ*O_Pn!yEa$mFh(GGH*!k0J>Em?+P?uv%A{O|P?nj1npVl4N zZ4-^hlZFw|KrVPsEXJ@I%Xn`80gO6k%iTQli*-X&HV{S;trj!9Ww;g(gmyuB^KA^s z%@zk2=N|CqCOyf!+n+vYKIr`EPt6ZiMLXA=IQdkUX~wCJpC~GeoBs?eYIx|Jc1GMn=Md*Prp%|Be34^S_`!&Z<>=T5>+aT<<}H3gOM4cfB-4+$59aihoL9M zNfq-1A1>2do6E23|RW_I89Op@C!x)Yo!m_M^j5c&F&r^13D!PZ5) z1xg)a!STJXMLVYC0bnP=BcvMLX=ay#Do1U7JZvhBe=5ntmyt@198Ic1d&;F zxIpntoIo{WvEau!3DL>6!vYLf5~UvhD423|nD~?0c+o-=P3HcDKZ0^i8*vl2Uxd;^ z(S;m!!JLQ3M3-N>d-h}%2yB()mGP7v|6Lm{_6j;W(l5wOHZI@;KpXQJip*PMpx?Q>^e)*LLAl`8eUj&{$#a zvF$>uxH#d1$J>Qn?Qz21qF7<4T)fbl|J~r9vBG4HIHA5~oKS9gtni;gtdJaw6$a$S z3AJ~|3oppW3(sGQ6}l$I3P*j26+U_#CwyKUCya^Z|IXz&;qQuA;hdYXLXDxZ!c!Y! zh12R|g$ZwCh1)j936)jig~C^H!YO;>gj?(4gwZK+Lfz&#;rYL@!o@RUg*K123nRR? z3uo}3ZS_6==e2Rd%>%JQOQSepnSHGAbXlyhYhIjCBpoMQdY`|?{y1U6Q~vXfjulQH z5-0r6`1`(;==`6IztewV{FS`ch!0-OWWJVO75yCYkePn7gemmM5rk85b>#JPVrEx@ zr$5h@c-$otk7kcE7r*9;mzv*Z&d&KMdd*XWZop*0AYmMtgQBG>&V{d{))_J#>MXBeRC5H-_LYE7S%C}RZkyBGd z5noP-U;VNZN0salg&bNg8u{ZiW2W?$8FT+7b2Y1k@v&@Umft?XXm`G3oO*OU3l;Z^ zJzp1yr>yT4TdhCNWN)cueoyLP{67?WSS}L^+9cD(-8n}@;zh@pjmslERrR)s4y!z8 z`Xw7goa{OAaFYDL=f6wX%Qyd%@pt|&jK9tHCT{-R0`{iyKRo_#t>E&fWG?l{T_AoQ zbiN3P@lSUgSn5p=*e>uF4fl%bzYrBj%~wb z9}HmM*5PzFkz#!J^#CaC!n}zN#79O9V=XPvz=0tq{$3zoZAJXFWr$Kn7u36q#HJl? zR4dSt2J2Z-H+5~&K506deNm#@ypk|0p#xIA@)u-d`FH<{9;Cb9AVV`C zN|DdE`l>`KhDp-E7yVe{($5eC$D;w2j;u03cp21Zukx$UZSk>qz= z)aXo$A{OI^aLV2I)X2!&RC97)8L}wOoKBA6`Snvx$*w;uQ{6&qerhM;Zo0-@}ck{qT6&c+P%eKW<##536fTV4;&N zkroD!sS5t|?5yEf`7aAjWo<^CehJa`T|#nc(^Z^0)1G)P>fwr}OHjK&Lz&hMFQ>HQvbp7a_cg(BFn z)QT?VYtmD~&EeAs$Ssy^aG4J#5NmZ!&J&0KOhS88Xt6 zAj!j+o^!te3vxZliNqG@5sBG)rVu;#*^vp@NR50C!zKGynE65n&s+GC!~jRSZT1m7 zwde*uGyVoL-F4uee1WZfRm9(a7Vov{g1*CNz-I6=wAai=&o!1jll>>WS>a5ZrkYd5 zCns*6|M?19E6c9W^*xOe|qT3L2;VreSB!gLH(}3AgMZ<4jf*9si)&{@1@b?rhy!J zv~di&KTW}@_jhB8&PF&gxET!2?89M+OE^)fAu)bBj~y)-O6KtSUY5tt;Jk&F#L{&k zD{XNIbL;)-mLn`lJv)+&4>2Mq);6H#+mrY~`8i0P;@LZ!6FDPY2h!2I6?E&q?6<4CQ1<9^_Iq)xqp_MeY6qu`Ap?c zrt)-jm?0(>7cvJAU1NXczkzdE9`x`?XEMO^8a^M?K)qLdA5%@4Xbmr8i&VYIq|G~U zH{VO1X<3Gjv(3r5wYFsQ-Lufr!S^he^BHY+`Rt{lENIQNz_O(}1SiT-YqbqFNA2`4z z^IZLog^Aor86g?l{DhrjrG(t{dR!dymiaK+fS6YE87^j{h(zTDZtah7oVWEX9*S(j zZ*w02o6!JN-x$Rt2&A6rky2S@Qlz{Q8dt0ZM-s_r%_VaWYB#{6$oHrvDr6g5Mw0lx zS~$}4o%ys+iA>^s6$@SY_3HO1R2%XH_bLVA#~dR%)r03?cCb*TqCl6c^<%_!b-J{_ z1AQ)+bNj71l)Ih=8`k)6=;2GVmpc%5-WzG)*#qZ>mE#WeJUDl%ol7|3gW1=+nR6+> z@q(Qr30#|oI;y(l`}CWFjXy`?vQefaNk5cb>taKrjvG+nh)kTH$a6S9d}EuYx3k84 z9;Jds9gKhGOm0m`V*7?W()Y&=FuGlb{G7cGg4*TD;-{4$!)sbo=axaj>O6>l=uWHC zgP}?!OLZpIv1ex}licB>v8;U`Y(LWn_Xe#nGT9o=SzLs#tOpHOmn1r86=}NjFKAqx z$<6IFq;ajcxFKrY?3I)(D9lkH{*p6bZ#>ZLlgy}-Vj8x|usHsz9@)ZplO-E%h*C-- zEL`vhjKAv9h<|;!Ln#6K{r|BG^;e_%wdG6$uO&3#y)EZH5fXmhpVlqmeF{&V@jlPS zcbMx#s}<@v?OA+I?ww|F<$71LV=w<1`txDe3mYPt`W!Pf?y`=*w_xN9WjbntD{YEB2V<_^ z1MMWG7B z%X$v(SFM1hMt)S8s?dQ~j)blz#Mt;F4jxgYC#V2#B(QWw=vu7Qd&6p}IMYRwN8#*4 z`h0GU4ZFO3CU;Qql)G5h3SC?ot{(f0%kZ@#hP(>vLiBiUfx;1Zd{Y|gO|L+LW)!Ql zKL@8w>4m5Lt~6!IGdQ?y4-RAwqbAY@(C0^1WfJ71Zo&&cA}oEC1@ zj8r(SMo#A zmPo&Eh3C_L!n|xvV)RRn)IE6%RZr4jrk^g+8&DzVpZJp6hgKvjKLzfG1kztm_`N_J z&j|nOMk4237k_cR34cE+kVa)K`m#lniVisvkNB6^#c9)%y8g6bmj}&OzXaM_ocJuo zL2kf(DQi8~n$CKw0`dYK;VNCe?g{4PmEw8Wli^NIT}QCHBF*y^g*Zb(AH&Rw!FJIB9PVmN4X@0^ z+&O`?-~I;b?F;33I&J9kDI4S0I}+FP-XgcL7Ug{D$5)+GJ_kDGc^#1lLvS z;DAm{m%DI!_zVIcWiz4aYq(?d~^L&k?hUARc3R;RiiFBSC_5JmX4Gc|y&?%l| z$HLca{%b`teyt=un66FQZky2&u>`q+Ug((>L_MNZ#ELQ23OP6L(7CTnhfHZ=cCA#KgH0N_W%cO zbwNSvaa3^iBsM=r(Y>!qaaXW3nGh-iEB;DR$*B@#Z;mqcEWF6(NorH$(1V!mqd+$u zI1JpC5k$S$g%rb*)uTx^=OK04L~h8&lc3i`ALL!H6%?Qp7^IT74v)nVyiKX|;6C!ZxAK)lvvJT%3VCdqixMJ~Kn zugiuit$EMOZzYLigFUUdtx9e+m!Yzv28m=Az$Y6a8JT8CBd34k+{SvcK{f+;^>iQ9 z^8K{xv|UhZ&;#i*7r^|%L2i)U}!lEYdz zu++^E-$!O*p@tmEd{Mv(g1(~t6GHy+dzkF>`54iv#OH5%(q~VnV!(w#6pqm){l0l1 zCnuop^F|W${R8+y#|ljxc`nYftIWde`A{0>M*6D>%>6l>Ui)?rH? z!2q{degw6NImIf?0p?<=B+t87q}zRE$ge|=usCZx^Xf(znsk@32kj3*>u^bWvvq)L zb9x~*k$A}NjIyLAE!sGY=#rOl{xI4m1NQrsLWTD(=pE?72krvQHLt>#*G$QbC;sf1 z(~mfXj3n{s&f752su}9K>$sO!8bPprCZ0aj0o92D`egHVE~dvJm8d%*U1SX^OBPamwnjq?o2sQIC=x9Ts%T%CaL?b5i1#}i@M+iD2O zI|Cmi2z3^jk&9+i@W!+fJT}3J<}b4%-OaKz-);o9O8SFKnIx^;B1>LJhr>H90hzHu z8aC4m>VUL{^`K;b4ey$)x9nP1+zSt8yd+#}TomM7M@|)Rb9&Y4*>u74M@E2+O zdE63v7oPG=3cvk3;o?d^YLur&6cUo~uXY&M7J*>xAWw3pB|vkMEU}6a(qH!jxf@n% z#U8%ev~PzZeR!r3LtV~6@bCgml^SGAAFDulWEDuHl(Lq~+E8>^hj?w1AilX5VAy2= z)y#>PgudVn2J1Lq_Ba}w?ki=RqS ztHuy+osBK1?B9b@2eNU#pC9?FAB}b9QCvJ*ikl96WCPb*P=_=rqLk=CevYkS-PL<= zclT*rm*Ps7WsIg%Pi(<0S;NTm+U;;#R*FjfVo1ufseA_FNn9y&3>#lK(YSR}@yT9Y zRCKdo-J=5NLhW?CH452nVk`1(bT^jqOxmpEcD6i}Au&Cd*d-=Dr1&SF=McCThuTP! zng7Z_cO1{Q@%n*X*%Bmr;sjK?VFkuY<8b<07wW%NiY7kJ$B)~Qoi*K=q%Igu9HQSc zOO|_*iA{Fopwukxmi!*@`yn97-=$Ie>3e3|21jzhYy>`vvZ6&3Wl8(+`*<+v2%op} zjWyu^M?XUC>ElX%4e56wrjosk=7VWHWQXGbJFBmN)A786G1Z-*pCwIo^7ww?yl(bQIsc5eM1qb`NI%K%#_3f)c(h!R zj+%TMCtcQ~KFM?7dagN8Iej|KUY}Of{b5U9dV}7}a4^@1z|IAMRKjWxTsb@!glaPA z{;UIL9<-&Os(K(nF9qUxSZGrCP*T=B0ithy!}?pEq?rY>(ytG1J$}M%TznjDkN;pR z7F@v*>9VBQMS}d)+Xp*%9`$yeQRL>U2QdDiEM2ki^8cdfyyJR$+c@6R)ZRmTPwnwN z*WE6ezl4yo_Z~^4jF1sZNrMW7w9WdS>z0gAvLZ?(%8DeTL7w~h%b)dn<(zZh*Y)|l z-_WZv7D66&;N2KE`p)?#n7paPR%vNcq1GWdaZH1b(&E5}{i`I=i-B_X*f@}dCk9&K z$%0w%Eqxf&yzTL3cn}%vPzCP^@-%&#J*jhL9@WDEq-;hB7qs1)jw}G$vN8u;mP!$; zw0m4pwHC3-FcH>DG9KUzLKg;~!kXssJp5=xg(L|o%h=#U!le+nm?JMb7$@?J6gl;G z6P$P)0NQ73VDF6WC^(W1Ybx#w<-^8sdcP|$?oT@!oeMzoOdeW4hw!%3`}uk+Gr}Y? zkSH>m=rvlw?Md_D(*kG4z|tr4<}o(?u3%X7$&>uL`U;JG{^61n+O*H-7WRMe0cYja z7^waN{_gQ1q3s1I`q+$Que^g&K|DV{hPl3uJ%I)ZeR6Y+JHLNJ7jNlkOxv@cL2j`< zuhMbPt0_RAWnrPQiqY zj%4d(A!r}^!KHL*lXJ{_Egso{p{!HprOrC1y7KVJWFKsOxgAD?xY2DAZ}{1R+c3ZR zBX9Sy6BL4-$=CZ{wCYDPuMw_HzQwkq_oH0gA{;?VL{CaLZ~fO z)V;(RJ5x+STXg=Oj@_-vJbAktmRqpdw%rjv*nN`V-)cP?x8xtl=0!l^Hpb@v)erQF zHc>cEf$f(WbFNTOxsPS{zrTabyIpvD?pgFVf6gm6pNGqS!&oEz8=bs!A-G$b=scYR zYH<>z=%FVWerSNl<9`a~M)aZe3|XSy;|8Z~ZK<&EAV|V*nB9{CAJp6Uu`4pLTG5A^ zE|;Y{uX)1jwKHJv7ioIqS0_xE6wdz|taV^h0fVQv(CIy(wdw|n4j+Yql1?trQ38Xy-09(JF*0fRDazmJ#0`>d{O;BO zvLWRR|7@2f^YWRJ(H$;WrfEiIjdi4XeWU5?$UBf3+zanEr{eDSXE^Z|Yx3?61s6$i zDr=nz3yu}w(1ZVQZ)+G<8O-K8R?5>qRqFWB-iO?0|Hq!OpA|bZ1zcLxU|pg=eyH*% zCzncFQz)2CWeX)B=r z+=_A0vO>7`(VpPGK)PHd6+8F*_WZW{1}7Om4;v;M(MT;n8Z=bHycH|a(9{P1Oftr? znc-YujwtooU_$qC8sug9cAbW?#JEp*1_)Rmx5~_v!Hje7x8)TK&Lv5$2|s!6WT_TGmi5>Bf^H| zi5vKmruCS@?*6l<^`e;fIo?O#k#1GI&+rWs`8OZ(*_{3hyuULS>I$#nlM94qJ_sb) z7Wc6wc@UP%tCGDrW5M`@Ch?CDrAHqW!mbeZ?7APtYr}oq)w3ylovtIPGGkmo>&GyE z`9HzG5$e>(F$do@tibty<%s>E=ct=hfCCPi@NWECeo^u~6wTZXGLKq7;qVFm!ShjM z#3nKNv9klu81IMh*^Xq!{Um{x)khRJX+?g+Gw5Px-&E`WGxQlla;O`pm8sz6*(x|n zPJ=F;B2A;+pJC6=TFiV}4ZlBxa-$c^)7XSo`11KJH$_DU$Nkc#(>YV3CrpGky>FN^ z!T`eB8ZqH?0OV!MkyDLIG}3Vj`mA>4brQ27U}7)KYx)OEHeJTuW)*O#)ss9rFG)PS z(_!Pmc$C?oPERKiviO(EG&(G1jFX2CphePG6R49`c5 z#F)q!PTAjwKT~@JhUgvOQj?SSwt!=QDwiI>l|1*hg)f+=mv#QFL;?owzu zPRuJ2mWNky$#?W=SXv!-(+%UMVHXgrUxKx>@`Fls~|jJqp>^1(** zC7%w7*`iP!$haIe8f20zdzWhpg*{)}v1EX8kZ&sC8pVsaH}O8kA5DXgML+T1SXCO4 z+zaLn&$tmsjF}trK8!kINYB4~j+?G&qb``@)?PWf7C9hrx%#J>)t=Ey7MtqA5}#&mjAJm{)bez z5@#iMLEA)jcW^3)P52cebUlcygBz_lAwg1=i{X9oNHhvz=Xqme+DZ#SCe?ye+#leb zeEN~Klu=yw8@5Z#!-c!opoH~ENM`fLs01YvROmu_ZR41FW+eIIAYLmar z3$VkBx&Jn^&V!&FtS_39sJYg3$-mv4S+ph*o4XCpdxzqfKfRo1uL2EA^&rj8$(S1= zO~Q8u;8unGs1x=RiPtKaBEmQsd-6Ezr?&V^xtSXbnT83QSl9XAUfd8-BAj5z+^Q++ z^!=`0%$v-zojYymMJX$yAZAIY__O}WL0LXnp@(}@r2&_FHL0wZJqcMY3;!LBfi_JY zI&r)X{m^`$TXxrmCY}i6p9NV^voCSH56EF?iX7&Cai{Y>W^j)Z2jE!seAF0iN^bhv zbJq7Ja$6H^$m8L9lnFAWb3UJkfBIh`z}+4Cwzfmc!rA=Y<^qU)Xh>7St%%L;pU}B~ zCtm;b8bz=dqyCk1Z+$wkf8#vp{2q@p1pP3^W&uogGa)iLn)I>8!zwN5DX=NQg&veH zgTE3s^w)efKKkWDzT>7ixm2i4LY-eAJ-rwjZqCN?0|(J?wLN*%;74bD)g%@PAs~9> z0xrs2fx{2wvEjA}{eIe$t1DXuE4B^s5o*C$W*bP{gSJ7)ug7Tk#*kk6<3pA5mom2O zY*5b_MM9YT3F_jQ_>ew7$WQ7wjojJ&$Ii?m(5f5lm}{t#B~OjGQ~`LtiVnfZZlNj2LN7 zkBp|cWY0~|8`#TDdAbl>dhEz$+h(k_u0WS76G6qH3-^?~Vf^0p=sq*lbD{eegv4oR zblZo_o3xd6Am;Ox7I*n^T{_^At4Bi=&Y{#`CG7B3B|$G+xtHNaH2hpP=IY&nOFFhh zWR4b1A9IjD_w)+tZwrR^R-@r#^;ocJDHrUy`A!g1Vo9HN>C!Xf7}uafj6UrDEp%!> z2mfrRL&o-4h!PdUxD9S}l*UWpp)2BW-d};P2tNiBUj*U&nfIXf+9}kXbEn$x`W1-Y zY7b!`L0!&pxP9yy)D@E@&d1ka*KvKiH$a3M-55h=Jz+EAwsQD;t`nxSjIVZiFUWk5 zr6c%9_%`b)E{Hc4L~i&3UrOKO*dZJ8>az&9KxrhYS>^{@hbqCp*_kXl;Y3z$(-fBG zl)^vQhdQOhm@(g-%pN+&B|R6T+K+;1%4rFzRRLthmM%_1X#u}o_9KQLU*_r8G@Z+R zlY%)9%*p*n&4op2bIeCO=PbQQeui1^CkQrZ4d3 zCn3IIGgs$D5-|JSbVxgy2;bHG7|+9$|L}4$K0IItYnr^tpRCc;!hSdA-wGhXX-Cm- z_!&f3o6=APPwxAp08aL_A<=NuCi81=;hl(Oq~e1R;NPO_~}BM zVGBqGl)$XOSR_M-V4SWZ zAn7-5@To7dghK~cb3=>O>6qRrD7m^2I$Dj$rvD;fgY9puvI``$d&D8k;vv7sJQFlz z4XDW+3%Ij2km{#Bf%rpi9!`ZVul( z_cpfaSdjIjeYj5pV=&;wLzu8Roml+x|HzW5udtjIN9d!PyN29hEqRG!8m|GEmt}@GT&6N($UF0qPG7->>ZbH_b)uT%^ zlF;~g3kU-Ugz17;J0qXO~sDH1ovawYf;LH>yVZD9z^SaFS(*I$n|UW@C(Mj zfpdjz_$Wf392H8^1*=DpMvt{{+k7XsiMnIhp+K@~lnt?pD1yVdi|b(d${DH0VY8Db zm6*jQf^7=4s@$2ev(s^G%@7t$b3xrF&!PLG3#qp%1GoQnqpTU5v$P5rC&Y!|!e;b( zz7yAs>j9gD*n@>a!O-x{TVM3r~MVhPM!ywcb_33RFZzVXF(nI9pZwMcVe-fJRP^v zpH@b{gr4W8_?OZ0n3rKdKgq`9gOKZ-$a!BH^O*Ih1>Yd&Q#;?_=R>L!CCIDm?_iGW zccdqMsLG8p9C~G5C3Yf-@2fb>Z=Um^x}$~d==J>Q%}L9#C&U(C+T23X-BFM=RgC8R zqkL|R26pAA^6ja;xHI@JSSB9Cp})=CiLu61!M+YNK6+rOt{t6t-xI>C8X;040lwvx zgP`15=y&5jmfr7z%U$s}?&X-O8TQ?{(eE|vPrAz$Dh)z5b;R>;<%q|;JTUOu4fxQH zdW=7Si#vV^!_zOJvSlNx-bofLNoLNDfPN@7ItS$wycxsUnrv*+BAdScLG4Khp}P6J z#eh9c?N%Vm;@;r3*f4(P$r8MHxC(S{jG)sh+^~;v#}s8)SKaRvr^mcGrp$pn=SwxX zM7mPLF2-SwG$tOCZ!^yMEik?xjf=V*Nl0R!;IKsvEH`|N)2_*ofnq!A_Ksm%O!nfU z4kaq)coUz;I?8O3>#K6=NfJkr&7P04TzR?&u=DiMC<@EvHaQ4{d z=}pI_nsF+OHQqnLn5Huh($!T)WYYH!AbmVt7`0854vevdZcS|%I4K5&(_}%*N{y`O zzQp-#upk>|e}VH0139DQiOePUoO^Pk5%*eZg3EC;x_MPLU-jFMDmzx-nB75ea>8H8 zkQXOcudrvMX$Gh-XYc9-sob5jpV4K}BaC`&1UuuT$dP_A;y78Hs8nmxJdti(;-yBA z-P=Oij7e$O=IVnr6kor7ieEC_h+-zs|Fs-V51s8q*ccCTa1Q3|a)l9rs-*Cmp)e|} zlb@rzntQunhBh}cR*Ab9jQyfX2c6cT@|qlQ*Lx0G4YiZZU@KNKn}1O`e{ow zk~{w@uASG<$(}LhgKG*Pbc-dqFFlDl!h=Bl)i8)3;NbDBJACY9E%eCUgD!Qou-)Y( zjI7FsFwbo$zPgp~uFc^7DsP7a+X~=gj15t8{|}}PNP=h0f2cO|2jqR?$c%ReY*(&I z9`DbG%U&{MZd@sMYW#c{vzt(#dLD-+)q~ngQgp{*=3Tr3ml{s< zy<6YmH$5jZYQ;~qd1XL68ZJTRcPXOZFc;i6Xw$%`bnbR&4d%(Ry{}{gXA*8roT5E( z?vBq;l0bRU?UNwvD3GTk5tTDDK4BVN{`Gcd<9GX zRKmNDr6_x`9+U!a^3%7U=1o;b!&|-ISUe>iNr4L;nWj(r{*#~&$VmJyT8)G6tf|hn zkJw{#j$1{V;D*qUEJ#T4YBcJF*fTctQ*|+hR@ZRK-7ZAL>L0XU@S}+eLR469L%uk3 zBx&$EO#G}$tehmVP2?4bnLdTa-)t^iVGT!{{bB25E#g-Bjio(v!AQfM{IS5DkTG2kJ8N@smsq-R zQ1dgmjW=P>tP2^nZ3I1XaU|8h=}Pqsj&aJP-@)*IY4EVL9LnVFNGscsJ_vcvizgi6 zrss)4)bVJV`&5BVtgBy!f z>IUj%{scb*5Arj$4l^~vF>Y)smzX3**Ep zjG!?K4d|JTlcC^PEo}08&+Yd5AiNOU#~0Oq!THxzVbW_8I$G)_kgatX)$_Mndc6!O zD%*pa`@1=VsSfnB>|DH9lZytuD68Zr`Kb7#?YdHWJTp9P6d5`ZNqO zzYSnVf;jD!mnPd+GJdU{6l!9BO$D(SXKJ)=H+F`338oM8xF}H!I!dC1vwup{sz2MYOvRrT zNqkH0MyN9R`Zy0y^=T5r@9dnv z*PpDCI=~Os*8%MEA(yY;fHT^vlslqJa?XFo5Zw}ZYk!7|@b;led$ovnry-59btHGs zZpWnsK6p&eiwx+Lb4=_(-B<=${q8+WzL`(0zYiBCKH;+a zmiji>+QKu_2PjM}$KC|pnj55)!4+=fEI@7L&d&XFv*;6m@cEak&DjGMd4rJO5WhE)D?B4bJQuHKp2~JS{Kysx78%iAtGlQ&Di-|LUBo(eKQJlr zr;$a$C_c>2_^Zsx=BK~;dBS_xxXGAA?M=bb#eSGpVNJ^-f@qsa4(ypRn&lhUKulx; zoD`4dU$Jj)KI5S(_KhIPiyuL`6x(Z^F5s3|OL4Pr1mN+Jx`Pu(X8qp@7DVnQq=M)b#&?eEhK`3} zXb{L))n{B`t%MrwxC2DWI}IPKN`m$qwdl)w*qIy5sHU9+x%iPWTSO3(fEI=1;Fjf#6I?1o7M4fwt>-v;aH9tK*&9UiH$$Z>j zOM(+UNRNIvUYv6oL+$dJn=%6ysJ3&#-KL}`kY{e4K2*{^4dqAD@PoVtnPt<1i@n2e z&Y&JCI-pGz+`Q;UnKNMG@4`4!x6$um8E(_?r-2#jRKNHRKCaTF(%suI^pQK+nO4O$ z$E%T&qwJkrB1Z?Kzv7=$0={~=71erM3;PzOU|ZHR$YSpw^V@;k^7?K(G>KrApEf^1 zv4oFfyr-GA=2U`lNO!ODroVbt!LlU=G%e>dO!t$bW$x#2Wwt-buf2~Vwi)o5Y=I%+ zf0$!FlGI)hCC?5^()T;$NT7ol9ecYDv<`@o+K4OgfRBaj@t5%6O+~K!3EL&yy}(J& zye!NyItg|1#&p*cF>)i5ac3t7(6ljEph?UJk9S+si?Talc*Sx43Hs84rIv)6D-qkQ zllYrur$>tK5{~=40pAV#!^+93WS6fpmyl#Z-EZuJ+9mSz@7&X9|J9V-HT?!_ekDNK zI&ZRyKy@&fl_ zkuiBScLlDV)C2lnaadEl2ebo@VV0saeb*#Gq%BNW6ac(z+(SoN`Vyp_?H(XoE-*`E5P5;7Ub$YCE|m6MT6TeSJIPyDAyV&bxBYHX6g?#}sWMCo*?cGI#ZF2rkg!(Bpmt)^9$DN2MC!+<#Yu zKQF)FjdV7Gz(t;hS^IGd|37OyW6L|$0*ubT!Hs=g4j=l@!|+>OvS6_h?axsc{OzvyR1M8Mt6eQ}a(yi8v2OxSJv{J7ixT_G_ zlbFBfWfGjuaUgFH=;F3n=qPg2>*9xKYzBaMT{pd3AK4*CIL6#XOw9jxI!n;ZZDqGbCs>UIH~<22}gt4lcsh1W($> z^DgeGu=+^}PQHDGH_5gk(QjNygJB`JF%f9n2%yc)tkdu!5}$jGB$363(f55bPOZ14 z3)nYr#0w8v|K1fBH0Hv?d9NX+<_Id6vTR>y3eKY9#7Z_8WY6kylN+`8Ckm9m|LFjZ zpIeIWrkYUC!vcD{LLNOQwL<4d5!T<4C4FVOWRbKMRsQQo3~YY$5sa%et1<%U&g*zW zC_&m^-N&#vd3codiI0s96C}HzK(lgnkeEIJ_Ie)ySZRtcUYO9cH7<0n)Lm#YHYJe* z!+Zuk$a@?HSo_S9m~Ux=Q*XRz$y#|b|HpBdT5F2&HHP$GnJ*nG(c@Jd{Yld`O>%0* z4SZEnB*^#7hJ`^&6twY~{B?&f_=9)5xOfns!bg)!T;2BDc%68Ku% zv98Mu97vGnmU|hH{Q?)pfxFH3F}8&C&phy3BSB?)_2{=_349E@&--mW#c?v_FnX~L zm2IiT#^09cHUASvT6;1clPD>U(io3Nf zADs^B6X(?JxI^SL>M>`1nOQqNx_O;t&YdtjSQjOiyOGa6`*2{dJo)eaL!7FjMqabC z;2cE-vdLMGCjZ;VmnR75zje!axtFHYSx26pIJO_t&Md_KYf1dr^D`j6>5Ndxv<>_o z9l?N6NBCOJ{kVTIV_C(sJ*9*gHXP9>54YOUFy~ys-!E1CH7g_1s*-?#TE4`my#ozO z+=RMuY<4%d6&*TFN&A}Hg4D6?kg-CV_zkeTLA53QE;S9aUYnr9iP5;zcr5OpsY5Cw zQ8@5n9{+PzF*flQWVcYBG>>E7ls);bbsC?cD>~kylr4cFQKXJ_Oz_D4JT4R8phmmGVZ_dTr=aodyXV_PGW>( z9j@prx^%&tL9S$2hSUd_aDU4}xdm;~-n|C4bnf*f$oZ&#)(tg4^ zb;jRwS0E{@JJNe11>DM9iOM`(a`5g&I6c{rWm1Qj>yYx_n47dHPM~tfL@KMace5neeYT8(+phfDaFs06Tx8 z`M`Kk{^5E8VG3tHqFE3Hb5r+#I`0|Mq$=qN^ly<9<_3oPV zd&o`U$EknN@BVuJ@zokwGQp7iykJCbS&5PJ$pP>q{xjrRs}s+6;Vy?_fU`kO6FT zlO??_X7nwe0MQMev|>GBc?%EPa*X3_-F^bEBt@P_-4HI;{ss2$g2AL?8*DL;#9vEw z$#dgv&={8R5k z(!1x+Bys4tWAioq8FXL`wk3+oW`~9Z=uo|lAUbDFtB_y=_g>mYyYR;ZG=gqp=3 zM9!iYbw@U!WK9+1UiBnbQj_uYx1SK@-hoXO>NNY79?M2Vo!p@ba8L zJSe-2pSld7H?bPq-guDQIZr@uiW{k`EPzGd&trz#OcbcHtVYTqn7v~bc(Wb#foniF z_T-}|%K|S`Qy^l@16Hh54moT;{%hnT+^uCyzb|V5>*z(eknK3bW2a-z-IZ{LWzJ5Y zccxF?SkfHNW;}6R0<{{%sRPHleZv1>(-vJK&SoO(#@h2A3Ip)xf&~7_eswClWF;m} z*P>B3A!$S>Mn!9e1AG!ZUnRz z1!A_zPW-b_j>h~@rkkC8L8(fVCar7c7rN$wTWJsabgbrb9Ms9Q`gqt8>`KB8_~Gi& zb~Jn|^H8vNSjezD{XO$2x+>Qpop=bfe*BZ<+o65|EENcD{8a9uL>QItm6;AFa@LeV&t#bBYw;W9(PPH z!yi&JVRvN!ZJ8)VVnuCerJOqW*ExZTRw)GBdLq2IC|KJNdKVFt4!RqFu z_uddLI&}x?O8iKT$qM}9w}x97=SQFS*_V)T7`Z@{Ei&C zeNdIYC3)ac`~bEVpJHdtOE|iGJT`uO%;uAmVdKRB+PJSBR&0?Wo3EOZn)zozIIId* zYaQr@@lU|3&VjV_zQYI6Zd_)I1*vqfp<{D)@#=vVbgAP5F3h3<&K{K|an{dpSJEI9 z_q*exh89pacvSsk$_Y#@dMq$$(ZtrM$^1ITvLBP~Ob*wn5zD#uu&(JEzno>N-;W8# zHK|!Rn=iqL91n8yT?mAVxzg36L`e}>i(Z;a!c1cqbC<@DK*wDK1kzH-Hz z!yDn)?_BuyT8G+gWX*We&hncph3Wq~P}{KpoisgZ(JdAHZZ?&dKQxH=K6#D>2=cNO!s|k-(QKc z$pf&fz#it_xr4E*#mU8GlelNQ9LW3iZsghdB0LnC3fVi7xGu&i?(8)qD4TMJyKsya^U&%#LV=Vib=8n2>Ph+Nzh@Sh zJwJ*u>@KH3WU=g`60Ejpy( zh9z7ucP1ZeooMKsQn3G`MH>T?ur)>*yTVfOR=63Fe60*VdJ{1>@-$>N7UC?~&-m?n z1Ap^xES3wdLwuYyO)OO+C%bpT&_*3PO{h&DFGva)`8_H)QNBN zOTL7Ad=ty#jYI5diDfC)lR>Z~RS zu9$n02zfVZ*5^bD_eJx9Ytod8X%aU#GwK;4K{qpx(56-$Qg5a%aAm&iHPcW=Hy^@K0o`F3JEe;guUvnD_OjK$sQr&({+isfwrSQl(JPGfU@2fS8Y zb?qe1Ts8z*S*3VzS1hOWy_J9c!jNRl*Pw65?*P%Wqqz^N#c-hAmK@w?MJg-Dz&#UH z!ll{Mf`2~bia?)k?O4Sv7<|Ti^;=R)Ng+-*-N#jY)FGPn`n1jU1fKaEf=14%u+Fpr z7dVa~2Y$u~Z>+V0CMjw7R9}ww7pl|apQ}*0Fb_-=*Fd@KEA%HGG@i{;+viWhvO-g0 zIiUpK=BnX-G@@y5G>GcVdaQGuh8?F6!V_2~pwON;j?^N5bl4AwOWwI`}&9)b%^}$*%!TJ^ZNpdQY@Fo`#QnHAu6n zErEd%)ZOVO=x*AL!^Lgfj1ezzs~TcqwHx`g27wx^d-Bd);)G&SCj*pJhq_g^t@^0xqb zyWEa6{M16z5EB~hdJ4;NG?^~rNfNYm=$ma*ar&?~iS+iR2R7?40!cLXC@+J`{r<#K z`3+k9YQVvE5(8AS=@dbVhaz<%WqpLEsAw`x`pOpq_7l8FjPJv=q zH55JeA*;JPt0P~%#=Bjz^pR{T*m5tyIDQzWbUKoG!J#0}SoHB1^{E!?Pp9474$l*u zsM54M@Zpy~Zh2D2$FX-LzxzE%ocfRZaLj;&n%)LU8*e&$kpz)1Xu{VQe)Hj0p%~Z^ zjwhX~xt_!NWU2o;{8{Enihj9LgLnG4aDgp-k}N?tCgpP(>2sj@ur|G}l?3Zc?Xb|q zi@uH?!kKKouw+UBdc1X@uS(wV&$qeJ8uOc2?}iC4*bSyjgw%^GFg9%ed$k$e6It&H?Zff8Cpb6h7`7!$gp_K9T^{m z84IW4fVCgiENz0MekH2ENttXsFd0l;1Gs^mH*rapJ2hE3l9<;q&zjO}SiZ=fOnrPE zK32qHZfFy9M6}?5?Jj8fpiHM{9>AvKx^#Qc5UgPxvi^K$diQ4&YR%7P{jqR@S{8#mMKYd(eIfb^~o+s&8rwQG3 zuoW*9heDn5Ui`Q22H&Ra#2c)e2p9MG5&7-6!0qQ)=5^f*XT&dp%7b&5mGu`(TGqht z(N}Q9E_b4@>QCYizJ!WpCRE936P7020snj_ddK|-eig*w$X91LesTqtfB6VmTm8tB z?py&6vxMlF3l;O<;)7k{bmF;gR4Qc|%xCvt$@pFHfLkaP8g+@5c{(PN4w$yHGzu0mgW>sJfjVj1nJ8YcA9)2nQ#IEP=n z=?hG0vn9bpUtn}B6%?<%4RiOmajHul$nDLBHdU8_-!=?GS=_!c&{~0Q6Em#=w+LG5&yq2OF?_1cmB- z>9}oBiO$T?pnJva=~Tw^{@HFomhCCVBPoo1^1B2+cIxrjLFVMps&hDTy@2Q+tpaCT z8SeRhwv)JNOWp?c^Us%jM%Psebg6+SKX-#HAK2oE^{i9eK79xl3~8e2841iCe+<=& zy1?(+4$fKaG)(ewq$hGDNa@n`D5>z2>j>EZDHFW7o&B}AkgRNSgZd74 z`gW~3DRjGxBVH(D0>4+VJ=vE;6{Yddbvj@uOOeVcI#B(1d)hI*3T{|fQKtfVnq=Wf zl>`O2Y)c=OF24?ataGk;>kal!l7VjW@g#_$Bl8=@I6{(Ol+4tUY-=aJ2(A z?|2S#Ox38gxj#+5>PRon=!eseI`sB?H?rZ~b)0uig#PO@COh<9xq{!H;ezf>94_*Q z1BD{g*;!lj^S#woVp$6!JgFO>0n#JU6paSkxSmKUZEl)xKJN8%x? zKq46h1+^W7{*CkALd`I zA5(olG7+QKiII6_#i+s<6w?=268rv1m>@Y9mS&1z5j_uiYCd2eXC+*3xC7Eoxf16% zpq!Wr?;Lvq!`-!siNRUeU*n1o3UZnE&YMnsZbLjfw+Nay{e?3%>R?!~1WpBdlD)$& zG$|m7i{}@?rB8;0Yq<;`52SkaZBN0J7klwX^CEsw+nRJ=HDmiR6Z+t~1fA%z2%1!7 z$=?3aq}D=<{4IEiS|)Sxzk=QP>gPYc?~^grdUy(^6m~cy z<0(-)QsY?(xkY!eb;(|cT)l>~8O2;=V;Fzzr3Crb%-A_U$HR}&(zIp!5KOhOAg2c& z!ExCy+?@^)QaI6)ywvxGU11lnlyR$HS8-h9_*uf`!ewwXL&#U}B;>_ATf$FR0h+Bo zH2r2P7%mZ`(rVM8ds!_+xY^SQW%AT*c>(_AOYx9NE2Npokn=a~s21~3)LJ=`SL19* z{zC!v%t}Sa%|7&U_;{Gdb`G=27j$e;CWqfGgO-ENRMJtF7Nu$utLDE%Y@_ZZ*Zi{z9EKLHb0x|kYjB$q}`UFn1{=5}0ipnEJ#W#c&W+SO`pi!=C<6}zMTqN(=f6zx zAe$7vV!GWO{4sNo_qeG}MQ5JmUcP$^F+ZQ-j)B*xo8mye2DxzMH@i5KBaz&9TVMLQ z`wdKHjET9G_MF6Iwxg9;&Sxzw!$rTwqH_NpAZyyeWY~i!4LAs9-QI;a>P-EgsP8NyrRWnw?5iEgW_k|d{G(*rgV;iPOe#Glyqv2@; zb9T5!V_d~KNXZGN#g{^G`6A|k3G9JT6(90RL5A-9^_ly7;Q=_;NPt7~LbPpFAx%fZ zpe5e~cCqZ?`np1X&iiVdw8R&7bksn$*jLm}-H7XqZgGqI&T~m42EaF!IhIDp;;Uz) zXzN8e2X#nC>2uSd>8&+g8}CD%b1CCT`B4kz zNqbYghwpe;0n%q3X}YH}O^>sJ%@uxlZ!6>LpDM+r@pmz`MTr|R*u}4TEkPA4b->%{ z8>FsQBIQ|*H1k3+>Q9&@Y<5*6|3DwT#}x@OHcSKYQgbRRPEjFX3mCbKqJh~?#Ld)- z-aL`Qi)c>3&$78Ze%7LcH_mwN8EHte771v(!)07~`3bHw&>&Y8s-VuL9!B>xVYo6! z56_E-Rh$W;lm~r%?|?vV^<$W|mO1kxWQa(}ckuh+&3$fT-10PCT9mmKg_@DD)sOYb zS$|u(jLn^{%W<<0+R>${fG<@>lIuSgaZ`hBiDUxu^@A)EF^{>s*uG_3iUNHgU&0NQ zeul~6ukqKoblCetiUi*3z?2(6kNv&PWxrKLpGp3>u-AuN94SNYKc#SQ=0W^CARsTM zjDhuKi(wrdK`*t035VZ#fqfY3^iBN*B*P9%L;y{Tv{)|RlKlAc61~mc>4uyd{Kztr zF&2r{MmXBK|<#3-EYGK*wtjc;^`qW0TQj zXYx)sQf^Hg6;DIGvIqW3Pynr4>3AWIanl>`LiZwdI{W7%xFTfp7&krg`Ew6kou-Uo z_4lw{--n{|D?aGI0#t_&f=fD*aP9aCKEza!?ff{J{4tg5{3%XGI0rzn$V40}bt9)7 zt8jmuE!lJA1^4Bi6WAZ?6qL*~pcc!j;5@q@UW}{7`%0|qzBzywSRMh#!AY?9&0bEV zoH>O*+i%Q%8H%r(TRlH76!+EWW$r?d<;-d_S!o7(Yvy*gIhCx@p9X&qb6Y8_9*P~->A1TG$snxHryWE{h zMV}PRxVi@;X1_z}>RI^RX&h#!$HPU#8R)ToEVr`s1Ai(+l71egP?={74W z=70eSikMLhnDv`?pC4b<{&A}IscqF%NwFwq7PDsN>Aw5Ag0RU)6`Iew^YwBCvApPr zbf13)70JI~PSbXp@ZbV$Q6DSz4wl#_EswxFDGCNle|P!NKCtI32_{;0to!Mbcx6#v z9`G+6c3a*7jT$%n$t^TQL!0opxxJ#|p@;4XjjpyLG{Fn+rm zlrOlV@6@}Xs9#IU+qMZV=60Czp&B-=Fa(*BGZl&NK|?SmUx^*}WPmOEoeIJq`F7}& z=Zq7RD`1+vj89}b)3?4P9IbdnEmQu%BCQ}wi0FW_>gUkV%Zv7^Zzt6pIVi@_NS~yFMUu`(O zTm|=qt%EX&<$7pUDwx=Ppt3t_sVe3rJ?Lu5)7E>kwUlk|Eu@KclcX8EHWf@+VGf~a_Z#`*yGC{>DMvYJV2nLXY)u8ZNN*>K-IOg!*oK8*FP zfz8UBq2q%VST@?8%^Gz0MdBfHO3~$K?KVNj+3E1YTo-kxFBUp2u|zjVfs+O`z(uP$ zQ2Mo8_>^bJ$J@Uri@yzY?ZpSWE;(Vwt+7B4$t@v@l~CXH61*{33sX1RaqhGilAGO$ zd=C8fnj_YN*PqqGqMyLo0a9MQ&W`&SU!gw-)p=B(-Z=FNLiUFuICs2CVlJ8E+VY;5 zXZ}k(cdbnx7Wf!^vX9BTd%TiY-8m1w5}%^H*%!aYxbk}Tlj;)2^!s&J-cjD2x6QaI zJ_*kU+xUyr!R7$9`+bbQ-CPbKue#x)MMkvc*h*pA1BtP*WTzOIQcrIqr&HgcLeM&t z#H8BLTNTSojIn;c5vMHLOJemk%3AZ8Or?CPulp77jcAAAW6e=@QzNYz?Sk8X_TcoF zMl7=%PNfouC$G|wt+y(0)!y^4Sx*b&+m6uWiCZa8avt6JsV*!&*8zWa*$8{1p3?e@ zz7&^DaDJ}~E1c;>lLI@+oEOxSLGMAd>7g>KjTZRvN>5auv4?C*bs)*`H@Wfv8a~*(213`&DkzI`7d6|8FLT*tpx0y|i*EacCDVS>!H0U(y5pueHaA?Y_XW#J5l` zzM$yb4Z^`%eGE{{hH?FYV|QrbsZ+gRvPTnLk=#hFEqlqYAemMlxGQ?7E|b0Q>cnU2 zDrxobqo5C-;CM@yD@OGOMd^G#Hg+g|z5I*X2PX^0svYs-r^VFf-v}EYSz=int@Ha^@dNrXG|PAN$F=FCdyzGn@@|Lh0-^Ae@!olfK~&Bku@ zHGqq4y|LYuvs7BO&2#gyEz~BlPkTEkqUy3D*lE)ZE7o10Rev9gyj_~1f1->7w(NkA zzLsn}ej^M^le}^dhES*1FUjuKLh5$7JI5ZoAULHTr?klg&b9aGX>vEgaL-SAUu+{j zd74j~CN`2{kruX;#enA;g4VJO*+GdR{nlQ}v1D|?OS?@aH{2}{I@}e*Z))T4$0wz1 zl^M_f;sP^gEr)!gN3avCgh7vfQcLL~VZz*KN?BG47DH|M+GFYSf4ziqKRa+qpFU_^ z^g_O0n%R9hZ!a}I+fTWdR#CusB~Gb6O|I8RKucR8?XtKn^#ygf)vG5OgmvQ8 zBz+ts`Jx_Z)(8O_m*83S18}I;M~hR+G}~I8y#qAJVQ~kPKqf*wSYpY&EZ7^8A$EpT zTAQH9-4gH8pn})X<&`fLmfoh8qkCxhmuyf!Gm7@=N^Gsy{m}S^J8DS0ghKYg@S$(W zqq7>=Uh&2A(`)ITuL3--mY((Bwd3Z|J|sVS8KSi`!STg!7=7%caAk%s54e>n_0OY) zbdynFT=Q1;bov=cm(~%s?e>To9h`B}TVU@g3Lw9;La4G*l*z|`rzTfSBoF}mPj_a_uW%p5h+vD|2$vuD72?d`nxcr3&H%{+EHq!ckx8w|( zJ8n7Ux5mNtZTIM1lPzCOw-Ik^`Ef$Bj*zlO3*X0DQI7&UoL;h>uGPfC;&%t=L$l=O zvAHI6(C(l?65_Um!0Dr`IUuZyo?I&ReqdZn*R|V=o*0bglg4z%%lh;c0S5%j;f6 z3VAZMMju(bcb6->EMH!kzfVin2RvmZVt{M}cartZUGDY7D5auMt*GLJXNtV*-Px5j z^<|#ze{b{X8fIL1e~g3Ik?=5CcuTdHV?S?MYoeD-`DTs`6idBM>lez8{-}^Iiab?0 zqBD85MQ`z1z3!mbWLhqJ?Qz3P?|h*=SHG?Dq@JV95?4u#tdr7Xtl=gTgr4)Kh48_$; zZkZf^?F${cBxS|c2GHG(V@3bxJL%mxSI!+Y2Mj!sUq4*|Wou7C^Tc1W>fqlH_jd}e zldd_P>$SO$e}XjQoyND8X3_1h2gSLvd>Gwo&m$=V>$Y{m-z!&esZ6R3{+7IRgX<&wHsY`V6J=9l)Dt+Xwo!mMRnbYUc)=$ejhw|r1Y zsG;FzdK}Oa$ww>3(Ds8;HqCYg_gz#7w|CdU(qtoEj}F4cxWTMB>oy&8K1>x>N-RaL zg;UZx_?N^XYqxSFOxk}8zTa7aKR*w}lj8=XZKpDE)oeK(tI$KAVS`Y3=g4O#IP;?9 zaQdB}$+p>azDc1x955#JoKaYqVUxBdao5vE)lxkAXPQp27bnkm|C868ZV zkAE)?4GeFj8x*97ya;O^e)Kt z^pH4Zv%$`N4JR$gz>&IxAn4G0=w{wQ@M%8}Z#1;hho#eC_>{-6>u(mfJPe1(hYJN7lU;GOf1QSvAUo^{fibsk@&<$JApsVp7etXxK~ zHjSX}Z)9|5q&?cacL4eA>A>-I5Hn~?WoJbfx}i}(YrhxM&%$7~|L)0`j;|-}A(v?E z@G6oA?4_i~d2sskTj<>WJ{2W@mz{jv8!HJ1;_`#k26tI6W8@_oCtw+Q7_c|xs zd9eT%-I~GoybMu1JPq8c-q2#VAw24!E)7nzyPGKGf-ggO_W|*UQi&SsAdmDP+>jy{WhP?dBAU1t-kNUUlf`0=pQaMcKlqfxRTN=Uo zi)!KY^%ZzJXQ=owYc|dM*p+6^$l+Tj4uk&{Y0XpojcnJI(I2}o?&a7@XHzAAbl3vC zIa%V8?aG4*XSHz0y(Kted;;o7TzmJZOvvffo;P+*qp0d!IQr@)bYDr-r`a2Glsqw6 zn*X{QbQPYq+YaGT19{hjoiHsofrszTVJiy-DMvYj3O0`A{3GXNF)`_Q?z|WN3;7OL z{mli}mIZij+F?>XTmu#3MpOG^TKrX(Px?|8s`K!fc%ZmedUjBw&HjtA@5jkFJ3t-3 z6}nQ`$xoD>rhr4QWV6ckICQC1!b_t)sC)P%u8(yFbM5DF_?HGhOps;?Cah-doZFPC z=}78#7o#vr32LP|$%O?sL1)7|NX|*+`oD41+i?Vjg}yuG&mN;XOEZ&Jd1Rcc z4562g!hmaff_l^rA=BDf@(R1LPOpVL>TM=wNyphDc7wGi(onzWYq4~$8h@^uB-O;1 zu%?|R1Yefw<5T;xbMXp^!FxIU7B&X4SI+ zLeltC^6~1%P~379T8^pX+))M8&E1Y~^%+2$lzvEjgjgQ`O%F3;a>;yYFd9^TrpfzF zvA{}K)b&-PcG|Tt^v`fg{hTe@D_(?UJIAq7{dX~7a2_kSDe$qEIy`!8o;d8S5``3eT-1sQGt)M}fUsU;OX9L*&uvE0!y#ll*OF6N84}4@Bf;t-W zQN{3~Z1!$_Q2wC^2XgiKqk1AP3A+a!zIl-Q_6D&+Hy1CA%#j@6>Qat;udul5O5QrY zH}6)B=eTLp(R0UA-kyAbx1vpyO_wEY@>bubzitTN!XtZeS29f@N$G?9g&Gsb4fFrwg$c)>3R7wPoI zrzUUUWk9gk`g2(@!TXz_kUEODtk$QAwkvU^iY>Ghgkg8{bet8Z!z0&fg5TxKqU(&+ zbm_f-T6zJTJNSrra_$mbiMJp@E1QSvbm7~dR&dz*NPIJW8-3Ih>09zBN|5eJ1);i> ze@>aZcTu9tlRpX#X~*g2v*~vWb~?9=8m-50c-22Rlsk^IZoVds#8>pe+mI%Eqo6F!sQmk?&2whwa!`iE z#cbCO?lkz~+E(e=lne{WVqi(dYTk8p&u(raS(p9+b&;ZVwr3HbnDyTeZ9;}`>g?tXT z!$bZh@Fm>=em=`Y!EP{@O&h}nCk!w&>@Q3YRK`)09eF@$Po7vCA+Bn&;C#nMuNVKC zC@4OWTd%6I!_rlpx^FCMY7WMN4QpY&#MWCYOJOPqr0hOE(lwwNE+s`^Y_L6Q*>@IF zGKO==ZD&-UsV};_dhp(xYsH_hdg9q9i}7oU8@EdHij!x)1BWH2=}Lbmh|w|QNNHcU zSl0@JMG^8%iKZD zY$Xn_naN6N10{w=2LB*Y@RH`(G8OgG zUSpz^ank)#vaz1aB>_XdRu-CJtIHIW)kSgdwKpZN)_8myAcMwzQ@DLifJ|tL#3rkB zUb$x|DIQPfFOlDc9asf_j@jUza5JHPQXj6aUBkhf@*y&+1sp+xYm4s-kA25t)a{x0 z&gF$LLVpxxCj@cA^g&p2ei&4q-F)iLn-`!n{1-&jRD#pv&RpGZ6#i_>5jK2_$59KX z@^+U)Lc)7rS#&}#R`Z|BiO*N^SzTK?o>2^m9V7V4yk7id=~{kc9fS(M*TaSW(fFVB zhnux`u7ZZN{*$)<$@)){UV8sONiXC6|9_s{@5*1b-jH8wzAU#Je^!1k;hcP9`v>w3 zm!HWmo!KFeZh9n_o8Oj)YnI7t$6xy|N9Vzv-s~dn?tixb+y3hQH%DjRKtHYjzQ5uB zQoXfzq9zW) z-Z!M5X|wx3-=8Y^8vpaUZIUa}qbk+=^nw5L^@-9{?}tB9y_a^De2*$iyd@ig_x@X{ z-nP>J$4LH2-H_D(@-H5Er5LC!yfQ+OEfS(lUHS^mbOa!|_z&l@9h zDh=+${MX-e*O{76OQfIw&-On#evki!RFEbv3VvRcL>-e*1 zuF2WXVQe_AKNd{xhu8is7tHbzD^9J3w^C2=%-_@EgMTUXRJtCmw40Iqeg8AqdTTjR?~eFSVpBmv zBpUhpz}^K+eyeZ6;Ih}0c)=Tn{d`GfTkq0Oi*g#Sr@*1_!?~rrS~jUHnFa;g;}glP zeyiD%U5neq`LWBSo{*0GK))V*_>#n;{Gx)FYRjp939`CQ2+nCZN7@d5NV%#Tu35K? zKE3XW<(t|>g>M6Jw^1Ow2##pKQsTngStlGizMIypD~3Hu8)0~A52+U*`PM=Wc>EEW zFiLm4?A5aZ7(<&ybFP8tRCpZTjZ?+X>9*Wf^@$EA*zv53c@%cg5uHa? zQu5QoH1<{qpBDP^qS+NdM@nc?|4Ec|>I-z$eJ9F#o)QhBT)ETXli;M-2o=6VvE57; z^b2f)e_d+DA2ABpI$E6$Whn5$U%RQ}xR;RD>Wwb@+ry6ZemIU&V0ViyG!MB;E_dbv zxP1}(j9&}Jz54Td$!}L3?ZM~gjio-XnrP{4B{oY^#ppfP#C?x-(8$Y$Jq_|@w(1GE z`s_MrUL~MARe~(vgc2``v^`-JJk(Og#Yb$=aZ4z-eo(-%b&u%F8el&gw!O>z+scXw0z^N8!N|Kh8MjgckaW9IoFHyAH76BR|K9BX5=o_%obW zEL}|n^MdhFTn+TokaFP}PwDB`j%?)^$7ic{LQ@4`29F^2$p&%OM7U9C3RcHt?0?A? z&MQa`ocM8&e!~v`oa(`U-M509;7+`= zt_vm!WtpK-<>T_1#(hc7JZtbh5m^;bl`_K**6M&*}+TVQ7OvZwrA1g zty&a4I}{e5SH&qo139n08w?LGrfa!&e8TAsjBFi@tFO1`sp-4n-Y9iEu>7ddI5h-C z=T5vx;kCqyngd0HL-}p2GJbxrSjuw`!Fxl#Q{9HSG_+ee94m=q`RhmoH;uE!xAlbh+_Ui8fBYypay7ncI z=^`<1H=F?XkFh+(T$-0`mbj3wtvGsbJbhjI9fs-$qw?}K^r*#}XD)Z9^L_i_p)OWj zxyPOhZGCyuWG#6IiTC(Y0ty{pH4QRzhH&(tF4$~RN?(KDLiPPxD!~Z+)nt#G|Aphp zW06?f(NQ>HB(cZx7Ld}7<7DB~2j^~*7!2oz(Dkc>xFY4DP?!8$Jn}OM>dsi;PxYVT z`0{;X^rroy>pNT2Sz;}Y++~PoPG1&U%vOuJBP=-V*map?qoVNhHy_~Ju<)RXF(ItiCy!BJDXx;O;K#j0^xt+Fs6u8DkwFf^-wNS@w@VSOh(beXe+ zo)6hb_hxK{q)w_lUvlOu|2_iszqG}Et^;^?H($OKUkIzGC{a{fe;5XvOplwDyYiqLXXK;eAAreSN4A&x z;muJm#DxR)Q=M*q?lZ=S3yve@%n74iwe_&hQ-$Kn@4}0x1<F5ac?vy{a{6LrM)oegki`wYrGAR}@M~l|ZJvLSR^AQ8LrH_#VCQ%M$-%eJ`jL=-$4PvXiaana z1Wi=p`0CSc^t#NOn?7qwOpJ?i?Ph)M-QI=<{W}KztcT%M6B`b&0-jtt5R<j>L(H1Ngw(EVLxT+?mUL z(*QP^ux7Cd?y_!%4#TQwaNjxfXIgJ@Zd|=w&Fdme?m*mcH~$Hk><{4ddoB-T394?zHZ9C-L7kcRcoN z0vJhbIa_0C7W1Q&DNA$1BeEz6ZS~>QqdRE!q?1r`X0BAX2*K2NbFdXbf?;-m;}hMe zO5(}Cws#U~^20?wbvF5}-ux`FhSEM|} zjP<@~SEY*$SzD?4-X56R{0vSm@Z(W-QSi+BooFrf%s*bKgrt|{mH*WIC^F_BefaVh z!owYL^qbjY_rZ6?4xgSvX?i<8(yqy4Qae+r9;b_m%JKNctqE-0j?s*V5&WTOix^wB zfZQbiTSNX8m^WS*EhkAF<+9UISJ@!SFMOc~P6p`oR}s$k-0T(btp_e^S5KQ~dGLMh z5Adnz7N{f}imTfkIM~<+4?mGO)9)P7L!}>vnuK!kW-Cw>!ZEK#o7=0XaLl1<7}CWD z9WVRIMp&4!-s~WJa_YKN4|e6(Ql02_jidB@)>T|~d?@Pp>CuPAj;vDKhsQ;?hjSmR zz{DsJCzdLb@&Jj==C8==?!)oQ;1zJlauHmgGo22X>GAAwZsK+CTjFj3*(YBGmwkFe zAM<+vOuQx2GwsP;-$!wx-#IC#uZTBPV_@mE2HG|H4t%;2iOn;EVdCa-lwH|Khc>IB zO8h0^bn#cJdwm{M7P(;bLr>w7=_k+`)kl22%!nIa7R&cY^uOgx4??Fb6WDa6FEpt* zqxZ!FRI4HRa2&nm`_}7nuEeNPXo}~6qZ7rY216h_F_wpwm}2Qrd%B?36`F>f0GF(6 zvB+vC`0lEJ0jK+Oa_=ZoSbYk-CLEyX?xCm{ABCs4+VcF}J<$477^@ky$BQFmLfb+~ ztJ>H>oUqW7yFGa!>R7x5n@(93^9_DQ!^6b7 z{NcPUY?2Uf&>zh{59Z4^Jn-k!6=bh+7|wea(Bf^KxMh?Fm!3<9=n*b>(oq4IObD0e z`R(x0notaQJd`)uS@YRdK3HX8&E+4e$n{CF*&|^*4-?%^ ze3K{b(8c%7()_An3M4Bc&nSxr-IxUwekYLC6!m#Y-}bnz(=upVtcx!{`U&P%Y z7!99vhH)zkp`^DVmP^l{+PaA0E;s4N9ir(ghw|5)1F%XiggY3#p;Y=w2fuXVzK&*WeB~qLh5+vOc1E4< z(%K^`3_o>A0uh~sqxIpWY_(nPuW*>Yl=$-mt$w^>pQ)HD6q9$C8>FHltbArOBpv zvE&OZ-1rTgYt}>bA#FbCp~*|%?uTzukF_r4wYaBeGfi1DKn$Cp!RKO(v7na%4r;dI z{d+sJ=C-|1^>{m6O|BKzC|RPgsh0NFM6p$fF=`$kOix{6d42h3p}E45x43xWwdxQa zKkk+Mk6Q!H|JM;4CiUmG8gtz9%pN+sDDm7?vmj>LE_f0s)$Z5MmU8`$xY;R@dfl>w zhS+5ku{R7eT&BQ`--T3HK7=n?Jp8UqGFussYhR?p?O_@`NiHWId0$ksd0ct<`CIyA9!>uC zI()j-g`2yKh32KEbgz04PU+GhKC?1{?P&z%eG>5N5ihWM_XFDR9>hNu%3;yv{WR&N zNbv`OXNTv&s7YP%Rgx9=vWmhRUZ!|1T!jx$ejuKG-jD0{x%2oWMN%DeUhMsKD!d&+)ff}n8hRS?cKnpDzq?nwcK$WYS3WIveIf0eJ&6fY@7eCW z4k%UiV@Jt@W;Ad<{qtyn>z4Dx%hTRdxWxIXA0wkL@2^7#D;Z|o(k730UbtthA|6kP zX3fWnm^N8~r*F2yq*OoZ8&FPnb{+;Z7ywUqS>TbohRjvhDE)j-+|z9!x4nqtw)1XS zzvwOP+Npttr^CgyKPzcd;0xOFzMg&oA3w!S!!?F+l%{+I38 zw8$M&^))$hWe*%$Jd{T&HdDu67UCKIcW}CB2^sXVK+7&I(6qHl92H~B>*sD1&rA1= zNjiDdtRSb$tDQJES&u(ENm;Yt&V13bC+d$4A0Qn02=w4)M4pcAor{o?Wu!)3HObBFNRt;m1_5 zYKbE~eOp6)6?|aw#hE1jb)yv{{=)goXln4iZiqLMup*yT($z4URTkGhW`?4Xnt zUZald6mQe`LdnVcM#=%!4#n#wcHI7fJN68*6?acYNO_e4U8`?^opv0rEFX-|r!R%H zewV=Ea=rZeIs;tSD8sw$b7=8;3oP#aPwZ)Wi#k?1@ZaUWg4d-Zw7b0~@!+*k7G6eu zX2oKs)6Qr(B~apC#%)SThZANF}r z;H)~ZFg!}`sv4QZg>skFVY-tYlmmB{6!7? zbJ+|%7OkcWybCH`KZ0@cU|#5A%8y@1Ko?CdelXLCgWes01N-eH?pGXKJQ~cVnJeI8 zg@#y}AAy>Wcfz)7QcrDYAfIekE~^IhX=e8ALk)^)d`2^z;|1B=5X|aUNzJ-9VXS<;_$FqLuxjTpEMGF8 zJbVI$(Je~+?wS$bUSucnwL9Ye8S29E><-fTahe3JF%-)CW>~MDhx${=c}cj#A#N7Ze^$28GI4$-R0_ST;;9n4H}X+fu*6 zxdEAQb*c?^>5(ObJdqeQevyosgXIgHdh_-$WgaQjR-Oq~d`-^}N9{-<)yo2ZQEZO| zGYoP0F+EJ)Y|X8YnVx49P?}DeeAKu&ymaK5Y(%|}+>2idKRhh3kd646Ga$C3YaZ8OZHFes9Uc|LeSLvRBijqtG9bk-YXvit9>hAa#;s4&v+QhNKY)b z*#!FOnnI<6J%98vV8c`gRQxV6Vj2#>@rZsnqW^c&OwvTda}JVYIYc@~t=TT*FO-bk zP2JD^pqBThIA_})njwV4nbA2UJ~QTAi~ZpH<(jk(>C0;u`+_`e3%Lq_&WZ=<_X#7| z)Otj?zUQ*&Gi8TdZOv>!$y};onQM~)U7;%9yQF%?6w`AguKcQxaQ4p_ST}VrxYzw7 zop#b^lk(*+M$sNfKHBYyhu zC5@EURvV@J#)h8;QfB{ztkz^8XFF-aUdj74wx2Tw_Nk*uH#*|h-W$NM;JB#KvkT_t zpQYA6k*L#@BQ!-kr)KH8sqW*;jl(6@;V6QNBkREGXgWkJUPyOmBp|jvCY{h~TDDCC zeN42m+(C^W>1*&}$zN%;z=+RpE~O%KiDjx^2PP`tL3BMOIv0(CS^LA$E-MJlL`~fG z%~i^h#!~3rTIjPoj_^;z0&8*E7X~Xt@1{v;d@{}{zI~tyhytw|3=j$ z7rd3J$NygD2)3ch9J13=#xCDP`_aqj?8VNQyJ)pw%|Uclwu@ZdeZ*V7#e%DpseLSy zacgB5x;UwD*`v##<8@HbpXA5R+}QxSwi2iK#aa0MtsEo_y6Caql7?FbqG8Ay`noEZl6zE;;W`WSe>Y25 zJlhvX&bN~lZtjIK#am=& v7`(@yBelVYJKU6s8?kZ#_IkTGAbPy*>9QyA|pvE^= zJg(XWMvUo+cQkbH=Ol3UQ$*|W~6Wu{4w-B(wYhG_(i)TJ&PE?Up1G~%pxVs=w3sw^ao<_O<%lmT@jC#Tkv1) zm1Lj#tx{=B1iqcLUwnD72YNm}DY%Z$eos^xIwdp4o7C_r;p^CpU`H zxHu)2E3$T+3++qIqApYW;0cM*^Dp_2kg+rhSN%dTtvBYxNk>6_y%NUw4d8pH@6eRB z4Wi9F$r&tDXa^^Y+E?$m;%>em{=PH{YeJAC=hkR$okBXD9PFFQVIr z57CZJAu^wxY82_3MSov7;nc`rY#YN=+(QQ~m)UYbpu_>Qs{`Sa6ZVii2*KylsI9|5 zGG9FvuKIfMN8=Y{s;&U)t9sG(+9QB}-#}?o9E8pb=X<+~DPo@~r#4lP>m?h0E8k50 zjcicMv{87vyA@i>46$jICB67AG1ij}@miLX@ixsTdzI@qrhrhL-p754*KRl4sU=#nnfr3~iA?rvyTx0U?% z{iB!5w9xsr0XLn~!m>a9bmFrknznTVw-Y(aNy$fGZ~6ZULW(J1e2&E`;Vg zVd?ed(6XjU@~(I1g6aME${;J4^~Qz%sJQdFuFGlDQ6l^8&RDX}mV0{_Q=HCw=x9soAs5|Eb~uY_`cn|or6(2IkwDl&u*LW30u%O&o9ub|5iv&T<(v|=dgEoh9~x=qF}CuXi88>R>y3zv&sYeVI*9Z1vD_ zwJ|;`-9r^Bn<&X`D8D|m1-?9ACmwyH#~k{+d=; zEm+{V3tg$h{q3@-TMr?AYajgjFcMWAze0Jb0}Bl~^4X9KHtq(&u7A+D9-- zbj9k51L9ZHSj@8v=e+Bo_)wk)eLL(FI|fY<20bgL$ZuM3%Ge*vTjVg-<_5j~5yVgT z^-G|T7&M7$5N@0G!G)c+$(C41?z%O4_{p)3 zQj5Fsoo?D**)L*X$npo`tIwWrzyEsJ|3{SamtJ_>w~Z3>qH$sKR@$>U1mjYw#Kpm7 zRP}iyEWC6F8XQdJu&N(xDMz8|p%^|@Vj}t5&xz|BR#9QsQjlG^A=FD;rNPfN#ELW3 zg7bJAej#1E=4WPzrjk3i-}nB!W4I>FwCj#zYWs4h5<}EF{aEPzUXe%otFv8PDCX%I zamQvooZkGB6bGo{&!63Q|EdL_wE93gnL$`5)mJB9xkGaZ#2@t**mF|{e7v-T zZjbfD#xrl=;L1qqwsr|Lw<@yJ$~cLyRS7*;+=E2NebDa5QZVsPgS7D{=!ff4VWDO= zX-!n;LB(H%wP~@mCi^^Xyy+lTOTS}xj>L3Lnh!6{R5|fN6fPcmNS?S=nWCRX;mLoA zaCo*a_X%GoK9^jDJ>Qz*Kc5efR@NWOQm;Y7g%1$j^Qh2ZG>>M!aObB==i%)I2ikoo z2g-F;P(n%{PW&;Oma1DZZaxPe?ie8$t&#;cZx)|BX>($r4Q5SuN3ZKUsaj%sJ-MgG z@sA$C_h2*r<9vf^*6jz&G6OoG>&Zi2+!R+Rb>*(YQdr?Fv2UJv@QC8}_-P!$u6C+? z)bKA&*zSr;3O0*R1yg)=!ktNo;C<&KxIUx`7FO>9B^zIu?`+G4Dj^s($CV5AZiM4* z1$2D26$XAV7rJDtvyWX2zb@S%jy)d;Bbt-N(zkDD%#AN{tHUdASuIqAJ{OG>ror^LWnxJ{FwOL}h17oo*d^HkPaHMlE_2(U`)Vyt ziZmlbjcRekwji`vs7x26Ssuf&{|MI&rDMx#Y5NZ)YXAB>W&Y8Iz;?~ley|r$(35iA z``<%RqpiI6Ul)A8c#yeA^A`&EGYMvXFb0yN@O$i8eB%Y+ZNL4 zlIwEow^u=+@k^JzA$6c5Xt89jVLv0-ry42urqF4b1JWzB9n=cmgzO0~F0FE!jVvmfkJO$U?b&!KRB4;Uw2 zq6|TiVgy&|8rhjA_qP)xq!!YB->tITh})8jpf_%c(%@I-HR8Y}Rp4{M6oYkDC_3*x zq#AuB-B*|4_{-ViuXn>B?1@bHcP0wMu3n^!^Zxi)Q-!~*-6-#K_oHldtc=I>Sq$a9 zdZV(Y2`{YE;y?!r#u*EtUrCD?IOYI!J5)(Y%kPQtU;U)HvLg7an++RoMPvCPQFymD zlvUpTga&OtG%maeb}wEFs9OZnFYYI=#88HoJeWRv6PZi>?!i;isfYeP;a;XCA5D*^ zk50$IT1kGaWOpmFq}1eYhm_9ReEC7g_Uizd4WZ58sHR*Mbdosk5^Bi zo-5_xugg!y^g=H?PqH#}#Q8T5(85De^ecW4AFyho`NPU(H7(UNZ*Z}=XR;@bliU+K zBxgalx1s24rp(Dd;-IBt7K|u(0TH1;;n}lnI(qtzU^iSJ->#9|9jl{IX?ZXFCHdno zl;0!u+M(p;?#U0|J|!!=`9l3!KmIaj9(`R_KuKTLl4+T6FjBUWtRN(Wws!x1xS-g>q<`QLl*Rl7Wpsq+DdZw*GLl#9am?FV2} zMPJ6lLoi0}%8yhvSkp6`e#`q~YU%=^z4dDPeoK#E9dU#1x1Yj^4L!I_s-@SMeUx3u zI01U)D=5P`jNB6zlFI68u@irge;gHyW3;Me%lAm`!6qW3Wmf2zszxg&#BW}@MgV1=!J>jC8D#e`2q~)Z@;%5WgqyJ6* ze5^g2jFsG0$Nti`)En^c*fx4SIfDwdWAVmZUv6L6pRZ{&P;`U^hRpf^!M(}^<%)7? zX7oLc7|<1MC%=P;4)yf$c^Vx4r-mubf2pTTo8NqNfalvcg72$A9JcGXFwa%MPE!J5 zV__6}Nqm()McOz{_X7O8X-k8;9E3@W&3S^Y4&QPPMSB&ApJBU`-i$Nltu_ZKe}N5M zGe1v%XAfn)mv6vk`fg!cxjrxIQ$#zAZ8>s$08acdLh9F>am8I#-W2#vINrHTh;e=` z#BJ%!ejB#Q=AL0erSv(d$M2y~N+dn+6nOumH{5&ELg~$FoSvNvi8l)2*2IUPW0?vC zGk(%K@g7VW(wRrz7Gc20j#x15iY!dOFOL=%35z@AL-f2`6xZGWmnvm|wpa?5y)-cU z&oFNHVG-Qb&ZLur$AkI3y%ZN5j7CNC#0SsJ$jafIxJQhKoSTDTtHWEEJkAYQETeWu%IVbE zW27=7mtyMuV4+eaYz_KHeslYChtuhF?1CECFS`f#PiP|tJf`3XT|D1w0o0EhfDv&? za9UzZE|J#Sxw`{c!9N5WGtAlU)EUtw>K2VP9n8%ZRdnXwGcjFsfbz52s8$)l-96eM z#pAbF-BlUa9a#ulABFQL`8+Y`t35WgTJr6R9J-vPhAm4XxT{)!P)r@pj`>PBY@RLO z*p@_(e+pQr5W`OoMR5OpVYGPEO)Br9h((Vdz_z?}$kob%@S;zmQA#`}e6-`KL8iR% zdN$Z4S;%|n-V-0Z?I&vVFyUcQz@xUNfrhW-HVJkRd!#G#36(pvqau<|6zZYB$wm3e zd`DKW1zMzP1~$JG*!yM?)zy!I0ap|0ZA2{JKT%CpT7Z}PXNX7Y4X|JCYI2`v&MTJp z#Mb<1m^soPw=)ltgZ2f*2p2I5vfBigkP5QYTf(A%1H*Y!PhyKE-pu3RB`YYmyc zI)nH9*<#PbKVV>SAlr_SQ{3-65D;U`s)cK*z$2Q2eFm|UqYBPH8-oLtPQl8P=PCc> z8DUz7@51>lz4(@t!%*8aQ0j4*!r*C^Tvnrtn;sF2T=|4PNx8Q_EB=U{!Tp4R)1?~l zjZviX+ne1#2pBhS3E4_(-LIZjoKT?0VYjt7DD^(I%gqp?Uo?q*)ra7)>(R7hSgyzhLHPVu{<|C#fB$TXcsL&)aeLvvH{QXdr8J zvcePdY~ZNGAK9g=hlT5csO?B7P8ym9M;>}%xWsupwf+$$tY0bE?s_C-o}LOmnUBGTIx#=4sp!LIbXlxGmffp<-CUyXoOE;}Hq zeS6;5I*^6rT=7(uhS2@ucPeNW#34Ikcvj;RL3O7J7W*>ECN@dlkB4w!)-KzV+DnBXInKSd7?)QBj zD3_^X&uoJGSJXuKu?{!$r6d7t6Y=&Sk}Fyl2!onMJWX{(&Eoo9zTuIkZBvw6`|5IvNzBMquOX zgC;JKe6(Fnj2k9-Z0B_6q&?|0$!;qJchkXxxk21h>W})xU4W7MzX)^Fjq&uDgHUhZ z0e`yIg4(7HGBe8vbQqIMWsSCEZ<9}x!?e+5vy^SJnnk7$bWvrWn>cEp4+j?dVRlGg zEY0n}Iv=(1$BdqMZDat~S$9O^tao(aQyg}_(w)B@^g`!~F|xp|3LJA^`aP~7uT*L$ z3(1GEKy4@J7RKSmf%Y6RI*>EXZ^P7Q^}=)i>ojS-H*Lsv zpe+cSwg%wu?ia+NMv{Yi`v!-p#n;X|?+`leZ(6yDK z^@A~g*$<(&sE^Pv2#-jar@kw6dCs%TU{ow)8_6#{F4mv43)LvRn-BlnF3s4F)xeP+ zJ@D|lp1k>03A`!z4n+aeAg3~#lk;9t<^5=GY}AFmE2Lbs^q#+X`&t-Sr~w1Ybl5C* zBt9*jEq7OO<%Fnw=+XF*jt(sZ*Iq}cI(G>F@d@SebtS^PqA$WPRbR;!9Ehvu9g>F> z>PfD;tDw?$7499Ex>?h9(+0OuP;+;ktcTfpXzC#OwSF~%=MZlmdx&Y*_7C#ZWzu|D z;?3!24G{z9EEmrY_yg|q??6C&E2$3`sPA}$regysbovhHIpYQu99HKIrz*ukhEKpC zC|ee~A`=upuMyII41#D&Z;Wd8kl3(WgupR_(Z#Bfj;t`?k>e$%&FyZ2(tmzfbj5_7 zzPLk8hdx{qG7XZ%;i7C>yRfq2C)~ABWxaYkG@Lg;SfuHKmE#Jj>d-hb>aGT!sI%rp zQnq@ioi=wnq0hq#YT(pi8_DbbMqF)`4B-hCBpeeY0h}}gDu+qvEtdcv+iC4Fm#jxKNwa^PgmZC zU4{*`!=)$QIntM-ZTn-!_;O*2ip0XY)r$*VeDU0Eb#yu^@WV@Cyv6P$^wbl@c5^e{ z<#+>5_wIxJvwy(<=)a_ud`kVFWfG}S^1sqQ2>jpBKbY;iE~wv!g=Z6E4TAD@PXyiE z{x#@qi)+xrnbxu#C2#pfC56DFow~}bQ-20l>8+I=d~qzW+u-7}2i_>0)#>!^?BUX| zvy0za%9glV$unE^WLmmj71u8-2b%J~ivQ9#S9JQj{p^^Nr@BuwwUd2|nbj9{fjG^&ENk2`xm+j%K!KNiBzXiP>A_dLRV$&A~Vo6yZ%?XIbQ7T1nKy?=!@bAHX zLK%&{`T-($X`$zj`-0-11sv$4#RHobVf=$3IO4u4j=pKiU8icXPU}e+F>5%locv16 z8h8=jz%3{={RvkOOcx$@aL0|g^SN|}GB@-?oI|FxI?G2OQ6Zvm0 zA=iPDBX9cw5+~OQ*FDp4eWJulm)JDF#;V}Kqxu|_cN>;|9*wzS`{8)PYS~89aQyWk zlIsM0RGZa@&Yhaf#XfqNkT^if&Wt1dxF#x(ETZ3Kw)C!S0b5G^%Gb}8VEJC@Eb?hF ze%2jFV}@FA@>VCF)=!t5_V=R~hfaZ2&PL(A>S6HFQR258#}h}{30a?qL#f1Q8k%&H z-2HtBtH*J;|1=zA6Nr+@NW$Nw^3p-^IPv08{1P{gGhPjY0Li})yjRA@t9&FzvZfH) zHw97cQlt&Zl(~5h4!?rEL!>(pOb%TM~$aPd*{{J|WO|SexLQuS0{zOrooH7hs*+UYh6~#q0Bv zU{#tiSNSV*sI{W-b?i^L|8yh0)Ujmm2_NCwT^ruGHwI=%4v0Rt&db%8ub}-6s+8TL z&#Q~Npy~b+(ZT&8m6>Vtl&3FXz1)`P{JRH=@A88l_e$aVd)GtNN19o05!j%h0ZLDe#}(e4`NTUT z-W@sw>owbTT#LxjMRlW$+w6 z6f}^{`zFAE;6&6KY{cBj02|iZaxcwtYE@s1r)$^3&g2HlSn!IB^hd*k-F8@|7K~rd zmUBx3kagF+9&Vv!8xH10hsWA~!|8xSpeBs8`tD52cO}VVUy#uRh z$Fkp}aM)#T#M8cAroR#=_&`z*HnDSJok%OH)v1xceBy>reIt1E?~!;bDFs*eo5$~y zJEQ;K2%NpMCr7ST#^-+?N^WNroVxxfjf+=ilLt<6_3YjB-`yAR;0v%gAO=5o-$O6= z8e#gbee~Dag5b0fL~TvvrKg4oJM-;7Kt^RLy=5W@b%6sG-|#LkBwbV zp9Wl|7`;crdnYBlpEw=d5%<%uwQOtUbudmz>8<&o0i20Glw z82j$%%(>_LfaTgVkTSeC{;t|7lf93_WrG&*B-UZ|!STF$%Xzt#kqz@=S4>^>jh4HY z3g_Ko@X`E*9Ktp5N;Z<;#Qp@oovTPz8OJYM-C)n2twNh}DLmL>i5Vp|;s$B%cE@Z! z=7#R3qzxl+rG-1{k6*~X8#a;c(~&A zr(7zT=D;(;UI`y-`=S4qC*puxrDS*40n6jH1?_JJaL8~5{)?E6J}tAbcZXzN^u!%z zR)^!y*NIp-F`kt#3V2fg1I_55z;o@Td)j+7mN(ebxTei=X)oQfE(z3GbPJDnmxVpdoVW}h2pJIkEmv{ zEvgR8qF|RD)bsv2x_2|2S2jw$vSD|qK6E>*ER5x~Ppo0Wk8li3*+n+XN5GB7h3ueN zLmk#e;qLXf;OGH4?0)tbKIKP{>}?(CznG5Y(!Ff`iv40~ljO=c+zYEu9i$CKOtHUh z`F)3}@bY^ouSto+)F}e~o76{t@bc3eE|sLL6VY+TMtZ2KgDr3Nh5tuu`h09H*GuRvx%#Qs>dW#)ot!Ls;t?LWHDeI%4SwEBq zj&zj$X&S(<>X*?71us0Wcv!qg_hHDU60rW}idnh`!MJ8D&zO}j9=LE^xUXAJy(4?F z-(?5>eCs1<=N+M`5tZV9-L`{ie^(rAse%QquK*W}$I&CZ!>{zGlw|S_^7HSC`wHjd zdX2uErf1CFdX8wN(1m4PbudERnJq@-!-i2Ku_fyU6!tggcO!3r!ILp^&$k1qt#kp0 zc6a8?vB&9U9-v=>4-Yb$#zlUT_q=!yq<0_73$`s~t+JQ09uueY>fwDMw8Lw$J0;_Q z6QabYn8Lq`Ou2_z4^iKKvv6*}}M;APY+Ts}``(T4#q-XBkBgcRicE#vr z3KX=ZfsXt-NuwfHlhLIluAUZwgQ6cp#Lg2mH6{!*6RP07)dR?VZj5EeZQ%!O6%AL1 z;55g5P^jUGi7R!nQ-%wFT|A!-hxb8`UXmYM%5wMDYLxbX12|`(DK1o$*ot0S{9xr= z&J0e02hB-B`FwwDzcvIGR#?%bY8y<|cgBSG3K%fc3%8G+$Ws?f`_xQb?9tU4RlRCx z?yNNEFglX;tp?+hb$Nnng%3_I&V<*&P1H3=7nhhjVVs5J?YetHIR9Lo>)!@&XX8rA zu{)f@B;J$i?_h4+>n^Upah6=YzQTxuDVSvt!bb|T=)}y9^lN?sSDGgB#pKTv)}Bav z=Z(Tqse7R3%F!5qX)rsE)R6A~4}~R*!dT{1hgny7?Hx4jq|6yb>TeTqr70S7}9*c#`GR{Ip$&T103e10i(Dc8pr7EdWr z`H<90Oy*Breu8I{5@KlzuUjye22O2&gq81L#Gx%6{srz(*{+@biE~lu4b>@3#4HVd+I$IbtSP21o&`M{d;kIgmRYQJ^m) z3^6Y97kp4##P1|Gz}DJc@-chLpmcs`>~vu$CwG>*0rE#6uk?cclQz<&hJQ4n#T+8% zWzdSmtx#g{l=7y}#i~x%mSY+?vkZ5Yil3D>}PkrKz`%kbcqzR)(+ zjE{F8!?tI-@TZcwcy-oknTNSCo3D()b5oAP>xY0jgFJE3oX$ALMiu+PYY+-+;Ox$J z$ZX$9*Y?k&t`$#c>v&DR&@=+iNiIf*Z#{AL-)m6z$RDk~S)hq>nYf~Cs^s{(20d;R z&{-!bd$bX7T3jNozhjO)zZB7{9?$5?x8c~S#R3~wOD>QDy;x!CGs+x#l`5;PIQ+#3 z9xIzf^&$Q7%V`ff`|d8hHasc&yg!}`#R2%kVgxEhjHZ`g%V2GO4O~mK=MUx5%zm=e zRa*TEMvUplnudyRe#h3@>6 znEwT5p|!(MENZN$cZnB;>@z9kUpt5&)ehtV-KXQ*1NOp?3R4`sVmuF?5{9Ca9v&`M z#0(Q>+}gpJb5}$N4Z}yVv%_i8B6$!-*DIpIMI*G?pwEgXiR`dqEsf~b12=T-hxdf} zI2WEmcc&TR*P@wR-g$=DWw$9lKYIc^HtnJv-`lCqSsyo~jT8c-a|N619J-x*7l!Z7 zpd}M5F(xe%T#UBT&x4iX@Ci*6HyW|bK8H3vI@tsAO91=mHaU=&KG}eN~AH8hfw&FOI>=Y^Ta{Bp=@p>7>w)8ZuJ8o z>~{((MM<64AT^Ae`A&H1@EzuyR7Jb;d-O^#mt2%h#5t>#aF5eyemKP+%szF+54aQd zG=#94)V-PbHHptJ3g?=6gLqD#ed5U)iDjBQmuKpX#Z~=hgFMHXD+>>Fb#3>fQ6NmD)9|AZQ=D>-8C zya8yYGz^Tt)k4WIeO&UNH~AMQigsDysMl(Zzn)zn=cSY3MB@%H?{SLGkrj@V-K0aN z)eR69Wz7p@zRx|_{0A!e@dby%HPp7KKQUw4Xz?ifO@ zlG}A`io^t(Vb5Xy8Kl!<#{v70tzZ0tklC-GHeVfY7zFT{@jKyrj2e#WI}{2>w9vk} zZ|Gm94w?oWlzQhYK}DfA&&p3?k7fO_`ul3(c;j%~|6w-g4``vG)>GL%c?ynnGk_U8 zZotahJyE4CS@@SU0?u6w(7}kq zhVBu5bU8>h`%(q@EPGU4F`WP8NUULfHIC1X!79%fzBhd`H5VrFK8*nWEzL`p&gurk z`i$kI2{%QzYHjW{%MaIHk!BNB(LB&W6yu&p@h$(WbYk{C7(Hq*e^uLm3RyJkQcsN9x>h#lObuk0 z?GnV$i!fB$&2RU903R$(SmEJ6P&Rl^e=}A=;z(+6#v@^CTxr3v2MLEuqmF4zbNwuv4qY`XAmv3 zp2A>1CA2tGDn0Y|&?PDRzU|9Jc;xQG1@B~dc<5+2FYC*{ujPr)qztxjX$f|yE~Bcp z!91&@7Edgm4c47q@$2b#GUdVJuu9rvwQgR-XYhxh{J0~oES-vPoYMr4Z8@UD7>RAs zW{tmBPh?%QpERf_0Ntx{DJo_NORwmxXs*nwE1!!yiZ_Wd%Olu+;Vje%?Izq(n~!-P zPmrA<;1laDus$=H4-N9g`5zszTRrf`aZ&i9VHjB%SCf)?9>vb{;k_ecv18m3vP(5% zj|p>7zHT8m9!HMdz8k7%Cu2+dZ<^QtABB!nI^jn<7ox|*fv$1wK#lnq~96x|liw8M!Mr+POR&5$(ju?%FhdN?& z+*+7%Z9n;Uv*yiaV=-*6f^3J1C1q`^t|&gG$~StoLh_~*7&12$?oYNSOO0aqZZZ<% zcWB_(S(9+ckUR)|tAxMa9iZnGek>oNf)9?UfYBLkak82b%$|D(Xws3S(TX zqQ;kZl}SE#J9;$Mk5AvVqHcSy3p*XYQon}3)FKYS=N&9r_x&?5xk!3`XWSsooqgFt zrvo1w(2w`t_s2hOP1N4?2srN6z=xbluCp4zUE1HCOi`fcW{1IH>u*Z?7lKx0)A?A= z9JW`)X)bPce+;+ zgfH?wQTc~hq5bM;uCoCQE?U4%nU?rKVpP1SY9#F^BS`qto3{@+K$$Im_;qCju5UHQ zs)uLE=ka1{UVa5EEatGz-f^sxHj^Lkn}d0yo{ zaIM)E{$9x@mERj6KF=1tqEzWm_r;LEVhK-4Oy+{JZ{h_%0&mg#}YDqWi)7W`dC;m~S%^wCV!Tkd>LCL`$)hp|W zDlBoH_6zE~dIJpWdja0Gr{Udu>)=!OY;n3Q0=>&UF-dZ9Ts+Z03v7L`{csWK9jhmQ zH*@O$%L*-BM)HDCBfQn6MCh3ILGmaC^U{)Xv2@Nrw4U;oM!wuD=w{Uj#?r29afdm$ zz26+}qW?*-llu6-`;5hzex|6mH=K39wu-Gi$6;;5O`&q{ADG)?2uBsKC!frPoIWUs zYqoXe;t(e~k@;81nKF;*!6|w+B&niwx&&7$*+eJ&TzIm~8I*j_30qZzWS_T=qi`4^ zE?ChUqn`HRhc0(vX_rSdtauxYwo_)^fG}RP>K=?}vQg#e~hL+hEP-ftd2#5-*G@hq}2QFx)*)sO>f%e@ysJDf8mt%cleK zrOL@TNy8A`uG#P~-zHdf;UKw2_~GZWXx5#c%sHB`pwFKydN_1Be3&s5w`lnCqjUvQ zymx{Wu4~}MW#{R7LJY3ipdwgGd%vfFf9af}i5RsgiMKVsrbWj#(A)vT*+qS|JVbhq zrJOR8zK3hYbE|sbcz-KilC%~6YDLiTj>9nLO1X%-L-DTDAv)W=506%9lV$d>;FTw| zIJTodI+stzps%iXX>St+Rzmy*gn2%l`JHm%6sTf#fhznijW7Xbm6lB>#-CXLa zZpS2!4NMmYE;EFKiH11WJ(%8U?}zPu>!p7?685Lh2kLr}_?0G3@tDVVU%wXy99Nb6 zy&X|`_Y1fmWJSmAyWwWdRIWT8Lv>r2f*h=|CneyX&MT>F(IA){V?-HKb7FPv}xPnHYrm?LH5 zJ2<1m)wz<#62N>x7Da}d(}!(`D0#sIKG5ShkmO5^^ev!$GmW`ub|Otx%ml09A*f?{ z2ewK*oy`4%aXS5^+1dByyHlds?XDqzcfJI#n$N>H-Tz=0jld3r??OoWPl`&7pbfb> zLew`UB+H&=`HkfK| z%C9Z|(Y`URG*9b?C?9LXXW9qwz4vKsx3htEk5EjJJwojAKRJ-HrgEcf`?N|{1+M{lPGPRX-rpao_c%&p1zlEsd5OtCz zcG8o#9k63t4NnYhd;yo72jXPyF8reJBCz?;A55hgQMYMz;^vG&uw>vmdY?EGS0s$% zPxFnbU|0)z=JrI>uct|OP6|;#9D3~^%|lKXQ-xWalz;WZUPm|5z^g!GHnh_8KH*Sh zvD`HQ)W-1LlCFIEUo~iUxl8NL%x8J0HR5_HN4Nj+c52^ri#{AWMB6qya{Qg4 z(!-k;>{1-qoY z@he)OQpU;cD_Ar2oaC+iK@aMd@s_iT`QMRRq4vs2x~mw@WnRd5iFReYlJc76Hv zf+~6#auY%}B1X9?VA;lFvXwcSh!!Q1v*s9HTz&<6u0BC?l49|QN+sSZUCm!#+`>O& z#lGw z3ENC)Mu0Wf>u#e1?(M?$&rhMxwBvZkP{=ffivb8tPKP@o{ zl~j0^p9e~VShD#gu`1Ga*s6O1iB_|OE!#ek`HW&XwB$HB`WbQ4=+mUpbFX-RlP^Bn z?*S?yMPzrN06fNDqq;lVyxgmW9v*YTnh7=Xi=M?o=BU}=WHLfH7hE7_U3?;JZ7Cg5i3w(hGMGiULc!P!adp;uig7yzKT54x$eJK}bWSG!h55!c@68gG* zg}8TsDSA6uK=-`$fI2Ed?a?EkR%-yqOeFtYi84kH4hCI65hDM~4+^sNqB5s=I{VrJ zqiW;9-(4LxZ!x3NmT;-BjM^fcYJy>3v8M-(b_|chp4*n2mM);JARj- zDLFGVMqH+WXMWR_5uQBP`a)oUSS}b#KH-+YJ`nQrA^GccrEJGqP@Ziju8aF7ZhGyE zm$tu=e|8L|LgQCt=Jj4knCOlMD}F%i(3N6zN=KNevJZmd-Enu7AqV6u;DyaryvJ$~ z+!ou!L%j?jS?XpDcw&fsYwIazZ4``Ja8gK4u7u<l^pgggq|+y z$m0SuxWOfz{=F^(x4TDWryR6+t&TFurL2;R=LGn;yp|LsccT5kN?K-9NP~ic;DzTg z@bJ1Q%|m_)vHh;ms`RVSmibjkIR1hhcUj^gpFnWC{gl$1kHd~_H>j(`xLsUoz|pQt zVZ$Q>OtLG5enB0vf0z}SC6~Z~i+5pqPKmJd`xt0`q6mo-|3TMt3x&T&Eb;LGYrZbi z67LloqK}OR?-=n<)X>wCo-ebguGybe#s+Y@sl+rbe)-uVQGKl}^XjsB!*LJ$zqm2Zj3*AvU@LR()AS9y)Je z-t6U&KSD)Nntu{>cS!REVCt%7%JRvHvhaJ)AS=NW_~12&zWr1fz4Q|39Z-Y>FnwOV@aV-3(#vcX7KeO-UbVetFZ@qZ-#ORh-uIQ+SI-yQYM+6f zV<&#T!A?4(S>VWo_Y%+X0bG9YfNt-~0Eo$zyk%SAv75yFmAxdM6i7Ync0k`SDKBK< z$R$rDH@N>?x_k(*Ip1ePnzBDm)@iOrqkRkY5tPd6&s$W(!H4)c;0g_ zjd`ZUvbv>|XY%)~)1SLAvX_bc zTLVWQcg8z;65H*2E znu_YZmW!X3_)B^3Hqubh!lB)baN`JsRCeaK;%hoPa~rsHsiD-VD(K(f4v%f!AoGAb zbbe|fxkxvOud;toz+n?ix@O38hTjpC16IH~nFi`ed*<#@UHIJY0??fO4j!M?#6wl} zux~urF5CJE8mPyW}Q%VSwW>_}i?AwOZGq`ti1)CC9qL=XNdbH&Wt*o@+ll zR8a*lPP`==-EreL{&Rw^KasB2m7O_zt_shyx(4swiInM(3wrlk#mSS^pm>`hjxPKw zbu&A0y2)SIIo6ARNUY!d1@_{m+xmP?DS)$@EO@q$HtH-I3t#>|hPTUGpxXF3IJ;g4 zjb&Tmi?#}XT)h|!7fEw|t547{+ZnG~-=mnr%gElef>t}Pg|Qmn#mXtpXuhc*e00(0 zMPmZ#hD=f7R}Z4b>{&2p+ju%47~sz| z;)4T%UW1}w!K-x2LE%9?Tsqa4+MBG|rMn%^xHFbIx-5g8GBv83cu8n){0NT@SAeMA z4yI=Y)4F3VV67NO1&1to!U1#GIJZ*xP-=^Fq%3^qM>`nlExE}T90&J)hJ5~#D+L5@ zgA2Orp|NE-jH&j>!dw|AF{UuQ_&5EZHOssE51eQPLw&2t*4Y!5#t zPZm~Q(Zr9QkErL5wO|p{nfHIshTgqB(4e$JY-)c;tsA!kH6EtyHxi@c^ki}#mIj*w zcff;fmuZ`?4qV@8g4GAzIe(uTrv({`8iq=EQ`$xEpQz50)VBrQ%rfD(8fti4n(K)^ zkDzd>Ij%2@gk85(VO-WFv2&=ith?$k`gglGt%z_YucH+-bcH)#p7l<6-}pgT_9F%w zUP)b#$qG`2$BMhR`r$6;iDVnv40TSe)Z@@9N`GpHiW%)RUmJ+M|t-eikqcYe@y&qi1$G0{eT)}{E$t#nlRs4!s> zi+_i85rd5XgL}iI{_NU2FnHNlNc!`K^z9$R*I}=u|Fa$DhV~Kfo$MxD$ZHTDFa1Fm zXLZB}1Hy&beXX#tv0ShmWy&S#A&?-=ufHc=fob!5Q6ImDaJg>|v<04r%_V1qs@z<9 zHL?cA>R+d~m0hrSa5vB_ngEGQtywI0gkV=2Y(8X+)@LWuyfH@Xo{}lKy}QU}{%C>` zy;bngZEs#X)(lhIjhWYbqE_opVaZBoG~U-kOthRuY_gK7odQJd8xnIZ>=A4+Y^3`? z)=+Y+Dfg~Pr=>r~lgml~%133TBggy;w zlX+~sK<^6Qij9Y#fY}ypY{|Yu2Zk9@(2N`?7=D<3?e*r6oA+SyH!}`jx*l}9Ee5s8 z523TE9ypko;z_?=;ymvz{AzT)uuytGj+r`?(kA=ymrd1BQtUw)6AbXhK38ZMu$J7e zcHy&Sk7ZrfMAM_Lo2d5PV`#qYEAIMNDfrkJVr_99WdvN8-f!Y5RrQUSmE8-bsT_h7 zgIKC7eF&ir=E7$+8*(LyGpyN6m1{d-wud&lYjk1tSv%-}f+C8SjzatRYMR?*gvx^n zu1quryX4LAtf>o4nsg0rxEhe7ix~$vSfF{F#2fu-jB#NH$kL`&h&pkBD#rg2TJ{CX zrv4lQhM%tsu2P4*y0;p+H+pi|njFz~hO`fI50fuidz>xFD)dkmR1WVI#)SBAzpN&~wz50D+jRgU&Tkf%Uj9R8^J{3W z#DrO&+665i&7kia{t-=*cANc2K(}j=bks%-Gu(%e*#dtUR?<$A&5XLgxJk!ck3sKi z0et)GYTEzQ2l^{%^Ryy6y0?ESY4+#??snTCU*h%**s)i1-5E^gaeHOwyw{TJ)&%JC zw}~cpROV^dCP7MMGZ}dlLf(`Ni0}&?5WXvqIy*e(6u4IGgp;jqc;klT0sgB2=F(Xv&E^u^D~h1D zi}Rs%?P0P1RzJ`Pye7PH43Reko`RR5`7nNEJwyeaq+dtuL1ptZ+F0t055A6)>r5_y z*6~)7m(d%w%hg$T#R$31n*QX~{VYEpLc<|v0R z@bh9PH0yC#xSHgKbu%l3#gpC&)o0c~07_hc18;uY=7-~3D}?=$8_DU1AKe;zRd%cF zB;{S}2>lJ_lhLqTiTz#(dXAYwkDm(ccWl3G)DcVY`S<~D4^e@I)>WiZ^oee&Ul3!X zjfJ`zub^oYOt@sD9)≦GCr@SZW`EywptSA!XWDjo3?b>%NiY)n;+#YBlJ$uM4k< z?u6M2E5t+nf65Pj_#+-IdQPF{N5R%;7BqJ3OMxzPz*eDB+|cry?CU0zZ^0LNvEFK7 zaOF+9u(dPK^6-GL@iOZCr8A6__O`jpbA&Ft|46;zN_g&I#apWP!MMf_jD`CtXxBN~ z748fjuK*;T(UaWiB9;Ai;0}`x(%uK}>1e+$m^Q=@*C$P)jA34Ud&*a^TQo!7e}*;= zx-yW$9otCd-4#lnbdshG*Av652g4Y5$uZ}CfkLj|6fQkCl5#SVCuD0lZLW)#8OY2b zVw2>Uv#g}L@{{DbG8cwzoG04l*kJeoMI3DGDNJuYNTW~c<9@j+kL#h&*#q~BF1_~2 z$DBS5q0g^E&#_Ww=j}?W-Kovfzit7YEfP>B6m8 zp=QxHqVx=C}u8kBvvJI*fr9hum$#lj+75fe;g9EDq`2C49^k`cZ-RqqL zhey5_FFAP7y`pENRIZN74`-8_i6b|}C{hQB5pn#XKKBpb4>i(`|GV`AaIp`8vE_Zp z`rAa=i*vbDaAr0nK`z)QucB04GrT^)2Ty)C$1RVN#Hw92!rb@SG^)Xv%jM1RI!NOD zJn1Klb1MbyE_veHzwNYC_a3b2aR~hH+N0~$Wx_j!E;vC{C&ex5xW?ZJFB_`z@avu& zq&ZyJJO2PBE1Bc36GgN;F`7a%>IH{ebHHZgJphF=LAYhYpJo}NR@Zf~OFC~I$O{+F zK`GUYI!jHyMUb5G?nok=y)1q;zR5?H`v9&As1= z8mVnyG;k)|h)k4w4wig0X-gnyfj`C>>?CD_zfjcgCavzb3GVOPB&IdGvRC)Npe&s^ zJ)UMj`QSy=HO`YS4zb6Ic?~delM&81p$R)PkI)nqOKvIF;4#wP|GVinYWK6GRfbYN zcybO@7$eAlgq`Lz(mB@b~p;xmoQqVW!GZ z;rXNMV#II-u9m6e5&g~duCD>^T^RunKD)8&$;VPp+Y~z-8wQK6?}H8_H`De{k16@F zK3-WCDUP~2IjCg*X7bd23&9uL!Cga#dpxvd)y-PC(?kusjZ76R&HVYt#*Gxhiso=d_T_+x{ z?$YBK+7-kb=F@5EJW)`*TjB&d^HLo*;nm_^;PqSLWtay*+RYpA4;?vXup-ZquE*>n znyB&9krVhBWd%FYFFZo01|6g3<34QGH(pFStc&MQSYd+HN1Ic20`6Q1rY)0h!wupp zsbcEQM^t~=gp1BCf%j4#_?+Ea@YAwJ=Wg>LF{J=iDd`2wgx<{q-qR_a%XBf-vJ; zF-%$$M%}NQ#Xlb!;Ya3SP+cwUXs>M(GF%qPba#bF3_o=tGRhUYpD2Ls zpH~T*>bda#kP?6XZbH$$oVY@|?w+oHMQ86=bHrQ)7Qe2Col7M4NwPBc_n$zgs~aJo zhSFi3G>9H9m%alMV`JB4*lzd+PDXo+Ayf8J_R$iVaBLa$7+WVX4c|euGr!4xqwq5o~V*=!TJpcjoHL>5O zPoR;l469!z!rothXJwwHO3?5>h|u8DxlFGv3qs>b|h_j29m3)serwz6-UE zN5y3^@#H7ALT?Esx8yBqgnWN%M}F2v1HUaZ;h=Z(ASbtp`qgE_6LlkaZ!K{!JHCPV z-LI)rODR0=XvRhoEZLx}jyz9Zf)4|>lhZEgRNjllgFior3JO~2I@bd`hjd1xXI*$x zXr(OD^#Yvpm!2sqPPlK$Z8$YpM?T^8ExOR`h||5F(2axFA^KPqZ5^tQr@E$!%{gXh z+PxBre%+>@u8#Q9)__ybAA%F#yfJ5@61ye6Bc0qWu%*Esy|yhACVuDukAFOrHE%WN z;K<|jEd2>Q`+0~0j#SGQPu(M5->V?#RxcM}_{;~SczlHf4MLewf)U-=rOL`mMQ}N} zLb!41GYwn$g02{ymG>*y0f%Os7G)Ne7+zaUP3vC^ioMLy>Ggfu;guzPH~b7KYZTcw zp`Cj7>CCj`lPEqv14g;K1^bECcsnN*nl|01HBAp`VwOAEym%?A%&vzwMK2*QWi?%2 zvkCUATS~I$23j1ih;@=%bx6Dx_Rl0ZadfcIz58hL$~VJxWjBR4XUZwR^s?w6z5CAF zmM1(IvrFD@^=ElXp&~o3G2~_cr0nC?g8ze|^Ki(q3&VIzd+)vX9!Bc9k3AwJd&|fc zQPyWw3WMK6L>HIB#cRp7!?3^fh_-ZWr{#GE3Yh6fp)?tV|lE7USy^iCJSi1*}skr-T zIC{jKpBgrRciJP0?1S_hDAD#w3T%l_i2lu zLg6&--V6;gf2uAl^0THdYwSt+I%lq_q_RjUs~k&9UZQG41z!2~jz5zkLH;iD=i`h< zljEYXsPo>8>*UqR^-gK@W@pw7?|pFBivn1%#*D7YC~lerJ;}az-=rGWr-EnUHL{8b=n)DKs}VIAo6S&nr!t$HIFf*$zeX$ zpHd+n4`RS`@+s(@6M}2Dj$roljd;E+&HZtz5b8+AfYXIOh^Y_dC$0kk>Ddo`|IJ_jy%%F!vS!>}Om2!>S~ zlll7$NnPy=cU#8v4BIDwua7d(v)O`#9&;gob_ZkA7g>66Wd&>!9>zV%R`hqxPi~Q0 z0QmUM=O)Cy=Rhf!FAV$y+)pKvGMo9@miogF&oZp=&>`D1G{_N;4KV5JBiJ_A2zE}E z;=U`sO>6$F)%?N(ci0&~Lxf}<|ADXP7NEvPFW7x- zFYcH79Ax;ACAr*_&@<>gBMbyVFdNH>bJTUd`;%>r2d?KLjj5CMlK{(uj5dbHjAu;A2rDPmTri=P%*l2q9{;Pq9M zbnP}JInRRG_xB%kscO*dcp+YF?Y zlKbvLq}SlLTZxc7g?JGX@72m{9IJ&s`$~2Pdd{h9orFVDjrhdW4{A3@0X<+$SMU0W z8?QIGx1P5kUH7YT&fYs*-@e~i=y?tuqW^H8N4|i<*=z8pV-wCAPT+@!uY#e=3pmo= z0iUM0!Iv{Cu;buq3_Nxpf2$y77FJ_-^%YQitwb9wHlX}W6zCoQkGrk!h_goK!Gq>= zoR`3y2pqjJY+eY4Y-Ro36frnhqE15w*j@Lk1MKZkfjCodbeKPihC1jWe_=PioTN({ z7KL*&T^gYJPAjZh6b*J%mQI)L2d^qK{8=*>CY`#^hbKFckz3j{vmy$vdv?Rf>pK3% z;0gZlgGvbLIE;UtqVeYX0{%g#7U|gMhxFevPDOea^O=9a4Yxl+fN3~XO4-m+c0*Wm zJq~w?9|aGOD=4G(8P54B(GiRJP^8oW>%JsGq^&+V9~aDRON@hA1NN|fsSaJA+{Nu% zc@58gdxj;!^$=1l3N!Y^LHCXgD7CB^evc@T!{S;sUN!ec9;DzQ5HMI{J~JpkzYGvMTgR3!8vC&Z0OJ65?9O6x`z_v%=lc! zo?nQ0782;YMVtnI_zh0(QTQ!Lh&+yohiO^6@Zhr^oV0iuc)WNIyQ*{G_=y+XFM$#{ z`BxfG%XfmJSs>(AWWu$JBTzR>i8oVAM(Z(Ee2RZ2nEKAgqMTwZeJx2o)LT*cB?^!| z+ku3Pevd=5?D1EkpQiCjV4{wB*}xFs&qk# z2)XyG6t^}k$Jg#Iuy=^LMI0}qdq)7TvA!M(S83BJVR^vhgE((>HYZ-!!a3THrLL7P z`Lo*U5y{)Y{wmUovZtqjJCDeu*&lj_^S=#EXyT${%9CZ^6Te} zp^ulaNW=VTI>hx+HNNwFf+GP!r1n`Cmm9bi@^L*@pC|lcA!{nfGMBQ9rF;9{HaPpK z6YhmLlC|x*{KFOr+VG6^rC81-W5q>CzZC^1rN)r!l{p}!xf-U1?BpjNCaCDygR^!T zPzA*r{Q1h1<_5Ts*LC3BBS9A+ttXcsnCL1rBDH}I79I|d|U7XE&eM3&({T5 z->dICWI>TgN3{t;1Bm1E?->2eP|g!JSkE!DUkl1?)U5WqJ-Z?hD|+ z}5OiqI0Wp zys;6L@EH%^#4U++#uJz}z6g{p9dPg2C0HA6O*PLnLd*VKE>LX`?hDW3hox<(qgM-f zj|l_GVh1{N_cPWH(4!t!mc&Ny8fScQG{uPpuwj`!ZP}&HKgb!y88<4C$>W8{w>@cs z#vFvD#UkL-p-c73O!*m8*$#$dxn31_MBT0Cq#-a{NauNp2jaF*1U_*F-#CKCmPB-a3Y&$o&Rb^^6&oS%ks-{+Cc#{ zH?bM|t~1cK@;bJ>-v`@VO=xbzTyym_GojYgFiQd^77 zTdr{;K@SAtUmn5^7hCF>T?w|DoA6%oJ>Ee>h5qqO!QLZEw6N&C``>M%#IHXMTUVB1 z$kcFj?3e+jo6A6bWiOa?m4jl+bND^&H(HvVz++!E>57^X)|ZqcTjhH2^R3-zTpTD! zI`|TD_3X(;IU~&4r%%E)bJ0cr3YXSb3q}?{;g44Vv^}u`wWIss-0#ou*&&=O-EIjR zUJPB> zF9s|UCu(fY9?(CEoKxDypT>jSrd3<;M}smnOA<6O62lOSAV^46Cn1V9M9E=<^E9#K z%r&dH)>b)Md;StsWXRCSv!QrbVlK!B--O{#XL@}0I~ZP}O;XPe!Bq4AxFMJb%Uaby zBKQeB6fEMWL@|zRx-LEUPl4!#%eBD!3CNORm3_q5~e|PEd6qV6+ayV-LbM(O=-d@Dt;V3`2d40bR+?=BMr$!lB3%R8cm8@eaPYDZ-ep z(aZqF&-sEF*FSE<>E0NgItAPRY=R>b5}>hFnA~7&qw<~EVA-lkG-CGfveOU3&g{Q% zXV8-M<#zE3iI?&5srPV9LW;!iQpLP-2~ymV3ZreVA>N-3ncr`q@#Zwhj{X70P6ni8 zVG~UMxd0=ho8ZQ}UvS+h0K(f6=VDz6j;C{tnh^7KrESHJT_Ue;$4x?%GV@nvEKNB-Ax#6YN z1|(E26kOk)#@ElyYtkobyaQvff98KC!W-b-#14E^`)g_s`?0 zjiG!{)l>9rISvbST= z?mWbi2o)+X7LLzewPF?Ha4wTqC)sbj_?YEWd7Dx9;Af;6iE|u{>&DImu}R`|#=C6y zwB!Ov6x;-p>RwELl8hsE`51QOC@L*$6GSf`he2P)V58C|w`KBnq-WF+B)@our#@@~ z@mu0(tU8}>EcU~^jnlEYaR#h?n1b)NZ*jADu@>-Q6O_Cbz?A9rXf7j$x~rYx$ntJ< zemPU{NiP}f<4wuf9(ywThXBVhm-Byf{($Gf2N-+c7#}U9PNu1ifLcN*Z#w)Hmqu&R z14(7boehIBaS0MFc*<8#b)mtYHuP4(9rT98@n5!8;+2<%w5hKFRIJR&)?blmsdkqW zeZ}(4GY7Hd!C#muW=&bk_uNvgQntd->w(%UCx}{7^SD4U$RaQiuIRSkB$%AvL4BdChm+N$~ph^n* zeDnUzysOt(`uczwzdz#?)*lfe{s))9JYN$S@%+dQ<{W@-v)5QX%bH|cN|LL^@vySf z6~Aq&g9%g2dEv@(?!OEnS~dGTr>ekqt_u`FU*j|$QC6jyF)1kh&zx?a%f4qLZ!q}x z0aT8)Aobt8`P{5VnAFz~4dZ0Vv06Q18gh!Soc$Bs9USR0vniN$LkT%gLvmV0f{Mw0 z!Y%csxV!WgJaaTh9IS!s9+Xe7_yf26A3{^51ktq1V!uP9G2JB-I`l8X@r`Hj%h)>P z9xXug&!JF%K#8baHUrCS3F=7w@B+&ar8e6WC^8Y$KZx*HV}VNje4Ii;2*8yQE90mNeztR_6g3x*X5Sv zn(Z?f?a$aI-{kSy`%jp()r7?DW!z)yi}1HG5hP-)sk+#Y@h_JKxlMOXLyc%ZFpO0P zUr#eSvX$M%yrpRB6D4ZsdJ&ZR&S80In;5}5h z%_w9q2jKyZ=sNc+wg>OwLkMOb+J;i24Pon4 zN0QXe-a|bsW83$Qb6u-NOSh!L@c>a;!n$ier+b5tK{HI+*eGyOlo9BQ=W*5=AK+)1 zH4d(Q1saNbo2{Ls?dEF z$Cw$D_{bA5W1O5yzY8!Ss9y^7N@G^_r`VPVVP% zn!$W%d}R)k*}UtIYCZ}Du66HBz6tA@)7--S0`3u&B)z#>bYjN`e59&E+VQfRV zc6L6FWiAT2%ZcE)NCveJx5Jz$bCxw=P7-MqTHP0hi;g_xggySbDdm^mH=4f<#sm+$ z?cMWQFgL`M?CG*1#=bEaY9NM|Pcl(q^g)bStd3OyDzq!A7XoI9)5OFdP=7WE52VY0 z#c(N@J{^Rbqeo$Fu{z017b3GCy+^S&Gr`#b=51_v495-J;m>&yVsrKfv|m1f`|L&N zIgcVP;~L;UDMJjAQzf3J87M1p7-M5I(EG(Pca<~NWSZU;LFM|1FtAbviyrOcXR=(} z^1JFZRPqaVz|fH9jjjcSTg)AHa2W=ETnBTXt;92LDllA4l=k;`qE&qahRt}+uV8)J zVwOGlJdBO?3QIlp!lzFXQ2 z6Pjk?+Tmt?XZ7#9)2^_BRP{=xsKv%g8l8k&_6?t zWE+O!ci}?3DXl>a!^H7lj|KN5P?hl))aX;Y%V>YcnAZ0hlQQu;P;sdnP3sh4Vy`n5 zStyHUYgTdx=6mtuLXWVVX)=s3Pr=xdJnWoynv+@}=f1vr4=xHQKQj_ffRL+{OcH@3_cPaUN9_Fl?3xRF>F>EQtfi`nN-@$%7@uwdQ#a3XY zYBzs$Cjm`^9!P8#r4NmEVcMr-oXs*d^1@;oL>$h*RvzGcLmZY>3(+0-1EB3d6dz%|)6{IO^)q~E=cofl&;VC+7~I3>ay+QxJa3r>W*aff5O%OEj{IZ`4S z3np(DE?wWkFFqK|JX`;`BH?WKtSL&?>q=9@`VRDC*{YV8-?o>OrVc(jB}Si=RGG{(Cp>_J}KDCyU96)Jh_3wySI!>XMjsOg-E8l4KHz?!)_cCE!x);B=e?hDt``w%bMsgZCq zMY@XdoM$Jg3SQg~#s!+;yuV!^TE3bH%cEbw`^ZXuUc-G9-hUU8E5q=WZ4j@K;Y62R zlqdRLJ>28+bR06djdd+n@aeHF-LceyZcXZjIyD=5)IbG=g-@Z)l?T{)!hy&?eug)_ zBY+uOlcwLKctzAg$1uv+YQjaRyrObQ39ykvd+U#jY zcBqeryp5Jr>f<|ZfxQ&<8b6BK?yAGO3}gCus}p#N9)=b3p7D-HY^mI74-l<6fd5DU zCL48P?2lnyX}vx3`P$>)YSy*7J_4VI!(6P@AMObv4Cc_xG`?&tN#tDs!z z=ORg&UGVE^6;9i`6`zmKhDQfp;k8B6VP%{MSsg1zC;tA7eP0UstZ&A2+74U#)9f-# zT|bCoUzCZg^f&ItCudSVRfvdM!e2lyA`yO~VQ;f7mzZLMGnD>HTg?VmAhlB)F^%#Qa-)j6_E|H&bB@QHhUB)jv)JXnsIqIV?VK+=gbG}PGdnc9*UBUo^_ZTq(eIcr(&4767yV}(R)7n zRM{MOzg=?Ns*Sev{F`B1k~@lVPQ$s$Ui#Ggi9Y$Y=LxRvV435!l61@+O_CJ<69cW- zJbx_P>BNdcXZLAXnv(&w8^6P_#${X+bqr2k3dUIz6M>{JLQiE6AePOX+V>umieYY^ zwBYd&zb zuKT&S>1tH0=N|XB*7Z@+Yuj&ljyZahbsQ}0b=jByXQ_1M2(L(;5IO?pY3XN zoTWgT7=zYDKbTK!cnxAc0buMJ$+t*vM-!Ig^Kn)o>yM=JM~8zk`fnNtOD%%fthu=9 zrz!p?sYh#Uw8*mH#~2ql0m3%3h5OShm@N>8ii9Y1G_wSubMn;4>In{Cd4vUwFA$_skn!HffhChA1aO2B9G>E;1b=M1V$~A5JLqZaj6W4?D zeAX8mevdO&>e7SWp#sGgQ>1go(8haTLioF@&PacX9Jf`(T-IJpL_Hp^-kH;c(DySe|Z0GCl0*(CtHTI;b0_u)E5~ zi(G57nf3q zJh2nj-IAi7&npFQqZivt{yd^hCVYR)JGm6Vrep`Q&*Ut2MXHd} zT1l`n)`PbthXvl>y7+l%>RitYBbX`YzungTAItA=U$r87t>%pF3WSyRCkVP|&aSq{Wcc#M2bG)Hv#@+QXuNrcP3y&|nX(>9vKYohsf!@# z?*o=itC9D%qTe|qx+P;Dw`FH2<}7;(N`de5SToO~0gZhuM|R46#_madhNyM7FHK&i=P3T$*r)+ zTa>mS!>0xo}P4O~I#Z3B1(C@(AS(MJ<)(7^C+PUbeQQQj!;Fi$-%= zHb3##)mr}gSWWUu`4>KI`VZ4r8qq7oR>Y&r4-=C#iBbBbQNEvRf4=~q zY(E1(9uHx}!U}X(cmVf|rQs^;pWVqG=Qi6D@ulZ0PFgJ!vfZrsIk$I0jKTog&K1Bv z#U@ydmc;0i9ce1IC+}_llhRpnko8{*-|imIy>^p>wsV-ISr1=Yxt({A~2k8LO+gd?4BA~{D-Uo4yKZpIkVDQY&{r?Ii%5u3=#7jWFAgiJwJ%`vpk*^e_e zMq_%SH9QY(N)>eUR`XNSAYMQZbz7a|fu0vZk z1NS|=0E!M+pey_Q9&#zDXw9pQe{n`zQU;m)D-aG#3yg@E@u?{GG-HPjjJ>Xii z1-;!c2p1a6h~9fWGSa@9e|R$rcj+18){{~+b5*-wb59DsRWl$vTz%1bjw;E&c75E8 zTjI3;-wp1jxe;wRCPvaO#Dh-TFPt_hAMby;26qm%gPq&}*7m=5({lccWi~?8R@#|M zu+|{8ar5!{YeQP%m=BI$9I*4+Fu&haoDS?i4cF@Lv+RT({i?VHmw(*^L8;QT=XMe5 zl%3$$*u2J~!OulK8yrbTz6NdWl7{?5brht|a9b5@15ysAbB&8365Zg zv@(4dEd^&!-hfS0p5fDE9kTmuH(symrT8*3quY+AdoA#f+aMe`yPq%H+|37FR7cbD zO5S3YEGHyh$&XHs#KaZi^iQ1>Ih|t+B6r3T-*P8pC(U1dy(Rz59MpX@1? zB3GUFVrcFVI_}jXz7K2poOdc@ZrU$&zn%k)d#9pz%s9b``|9xD3ij^}*TFLr#HfsU z7aA=PB8Rdbfaysu%ncmCC5?7;!7~jw>SW5>SI_5vX0LN!DB{CCOlF-)!Bt#V>_}9- z%OGC64wlVi9$Vo8Ty`lBqkeh70mjF#>or6P9dpve*m-r;m7MIveUO$v$aj|90?!9L zn8pWjseA-n$Yt|RiO231vv}zE_e?WIuF#NuyOv6;qfK{Uh+)$3;R)0^1l-4R(=QEntuz7y`R5qK-dkw}NWxQFA z9Vzzwf)npL(hqkm$jpXau%*I=Xy9I0z5FG7d?!Qi2eEv2S1D}ZSmsAsoFDr z9xst6H~KWmi!Zu@?Ny>I|I8ejHOp{R%@aXLJ7bK@upzJ2F7aDg_tN#42}yMP#%&jT z=j<27bK}lOLUH;%&?ryEn48adEh#hR_W8!<{%zPa=O-*M-il8G&!9!jr}14YHlxqd z7(C1~hkCU%u4A70<8ir=@xvT~<>g3`h&_RuMzrLY1lez`MoT`N;GgeqgJz`~*maHl z?dMf#lfoGk_Os;9?LCZ!pXYJ%^2#*e>t4aAq0NvXodQH54rjAY|C%wZ_p|&td+uJN z*N!}RRi**6WQXu_{wt_ZQ>Ihwop6R=8#rV|!eP58oPQw>HjIjeC8pgtWkLvVG}4J; z?rMPVp>P=JqJ!!donFr2uv5Zy8oB| zhI43s1;K%;%jUDht#gzK~6~jHnt%4bOCe(8C zJFdKx?NS1TNV;kb4ju(=Y?=a%+TVgvHabHzjp`{!LQ}$BFg_du)lbdnf= zjIC8EOZU!_pf5IW=Hl<0QiJ&4-0)91+#exLC!EiM)0$NnCpwlZNRT4Y)6}>s2V**e z%^`|YthnQ>FS=VUACu17(6aMSIltmvSm;sAMJ!!{Cs2w^(XydC{cduZ9~J2ucQ1b9 zQ3EdFU^-{Tu^e#0X&gy4r?cdk%V^MmHn{5%t8MH%@Jp3$=qQ0Ug(bL(GbI(MNt<0S z2<~{rV41HSX;ZiiPFv)OWtShM`L5#LwBCYqR&9c@bT1eOIZ?Tj;xyXbk$AWma{TEo z{`)FnNK#WHoX%Nh!gQPECuCaUFtQj09w~76APJEIH0)>zi%&xy;~A+&QKNPXkP~_8&Nv; zMlZ^5&u2TT05tG=39~=hku#Yw0!tHp@~F2TW8V9Knvf1zC%7djWOJ}-eXfGo4+D@r zR+^p-`&QKEb%Gxw9s}yC8hnMCGx;lKO3qy@(EEgBn64D_V&- zHR)mB@Npc9)ugL!vfxO#B1s9hqOvP|5H$bbAI3IXVsjK{j@-nzM?3LO$u3;SeDm)g z+0e&3&1j~fCMgb*#qD+>oP*5;9M{I9Xf7gDKgZ3ECiLYKO}hWvBOnL8@nL8!jK8Bz z$I|1tyrKz2Kbw<~%f)b&YvaF}-$N&t52*jW*WGJ*9i(Ju;PzSt#_Bl?+Fx2>XPXpW zxMGJN=KSV*C%5sMx+tK^hw;|*nVj2kdumxQiVQMdgGc5~GX1_JE_F%b?y>qr@ju41UBP-U z*G}R*8%>a!a9^-Zrbcv4v%e{-`qkH!@-dIPR+?DIc4Kij#j+hXi3 zn9u5- z9Gb*yzkP%+25;b=BP^n7cpO`MWNB$&DNK+)<*uo0O6Tn`BPO#7@KLP|QD*zv0Fhw4 z5v@%s$3W2`!6`znzM%TbWw`H{^k!a(=K zX}rODlu9%QOr3RU+(St^hr8iA+29XI##)l@B^NM4;XQA4z=$qUmgJ|fKCF{i3@*4UNB@h)dxtvVw{8($4AUX=_zNtH_a6RER3l3) zQ<(Q63L#9G%_yBn&{G%s{0gG$8Bsd_j~uO!GsGh+*%@1W9)7B6=k&wEv3z7b-0)}J zh<9vnw&y+SnGeB)F{i;T!wl{{d53DY_NeeN3GdHO#&sVuvEvH6-?)E)R6lvhs6fO& zkMSW{&yDlfqBUC@@KvlXZJO{D&DpNLdD;qSTOP)XT$7|mf$#a&Pzjpyl1Km3?QkK; z7#AFjcbl}af|q9V?x7?Pm>p+AZ%y6__Wv$|^o0fxnD2m^!m03d{x*>LYe)aP^N#!J zzmd1TYfo$IE1~bY0Ub3-3;sSa=9AL{5N=1oOMgqQ`mqo>lOV&5E6}EQSLlP+Ol=a- z#C~T#K85ouovFB9Iv?vA3yyv!B-I>Ji#a^fv4@LcKO`>1_7QeD*^h&D@ z>Aijc-;C9u+lzlQ-=G!q(cH$YbFXvJ$+F|>)b?%w z|MG_(nf*Kn->i@$8aeTpo7E3dY!?3N&M;>vnhU*AYe4On5M5s)Pd+5n;F?_KxEcgB znNfstE9ODRQya2lFSxDs-H4*~KOuNq8tVqFfWBScywagws9lx?SH9ShMF-7!lZAHN z?BGjoSsz+Z=J+x2o)E(QQ@gHm|v;0vz3 z@Fm`5j+vb$oUPV;i}g!aJKpr%x6sb%^fV)A^jSvi(irY2BYdl5dnji$u`=7d^HQ(lPn4g+H< z&8t`r>=(5AO~Y4-ws;Pgpm^I^JYW^eubgxc=j}{})e8#n@RipfD9FYL%^>)7NSbym z-o;n4Ig5OuF_GFxV9dmI@NtVJHS*D*$?^#}+CiOkPkM@Ls>EnRt_2-;R-Rb&7V#c3 zdi15J16B0Q!tsMFGvcC0RQ4~$Q~5&llA|rXy44@uT1q)F{x+1w&4H(imUPhiEv!@s z#!|TsbY;8J1ZBYHL!+SNhca0<=NNxt74vn)>(Pkk=5*@VMaXa74x=|S?#!%cw0@q7 z!pSGOOV^l7=x8F_Ybg`2XQxot*O|_}tby}82Jqwzf4C=OMwg|u!q~al{Cd{sowv-1 zvHmGmO?(X_`tSH%T79@RCklRlodLbuKe)F(EaZd~HsGv~L2ioLY8WqGhWoDng+1)O zBCqN~iULIF+Vwv8=*B|Oe_H0g_Td7&@kq{nTtp6Y(#nu(Jvkz$F`9fEqek+QPB7r) zC>k({N4phrwDfWnH}LQ&RvE~WSV@-gjWi}Im;PeXpe?bNxeJcNYxtlxMe6ril$gfd zfx(lBFt>CzjOj7t=WkIRl@j_u`3q1jwUl%K63dK_Kw}2jOq9U<8rRT zb|WLKV9b@HyZYGgTn;}#c}+TEy3`3#avwYt{AqBQKm*FZ?G%(8SZS4!yr_HMOFbA(tQGrCP`A6GfqTh z?H&GyLJ(wZhy;@n9uhB(BD-Sj$PRYbuV7B=REoiWU>ea!uRBt_=x z-baJC_OyOm9{+D@EbzNR;7rOtJaj~#cCowG&fI5k{yIRKl?`)@Y2h?I2eNiW9{Mhh zXIx1aY6)WC`FEo{wzy?qNV}Cg*X!dFqtA`5tL~F*ILxN8fBkl4dr{%gU*cS36q4=3f`&C5VvN zc|t_o%7S0odYS)x&6xPURVNkq?a70ZCU6yK;>_qX+f#-^cu5Ub4?W-o_L~Lc zjb^~o(%0M>2~U_^7$yihA`DW?SXX@QJuC?u(42vyEf&B{_WhEtU|DPmy9ip zA>ki#5^`d6$h+;!7lsscm*ad;!k z9wGykiN>Xum^$SFb`Lhfh;s)sfJ0#(>=M6@#x^WNFGZ z6>?zxUTy}PO&i>c#?JUk6#1e`$3?PUeX=%bSvvraJksFXSXr`yW%aJIokp}{D7qvm zkW0o6^tSgalrc!btqTpwxX2cCe4t7c{q(5*uTy+$I5OeF^d z&|onD&$-{cqsRrWK^uBt zOVbmyiZZ5^4bh0J80#|4mh^dxvK(b8diP<=RjwsC~-^i z0^t<|=7^Vb#(g{Sf>tr=Mp)q;zaQY>Xhf!e7orx2^@!(E4YK=C54hrG9O*D2en%Kz z^s7I76>H$Qk}tf8mlciu^bBl{L~?i6wZo}`IzFmWjy!jp!TuT60Fh$4g!UgVER>Qa5_4d5F58f47-1d}~_(TzP%$ww1Gt4oLO zQPv_^O;@?E-Y-DU8QH$-9X~u2=ib(O250P0bCVQ`$NA3L*y+>@+a%^dm-0z06u!>Y zFFTLBhx));)`>1~>gCsbmB3@eCM5W(j2H5VsCm{Scbj~s`KDmiYf zxCMGm9fl{D8Ee4rBaZthM4`4C&ORB9UMtJtbnO(r@V5+A*0Cptj;_6~;n&;I4307V0F9 zVWGDA;lns< zI;f?_@+{`$vVAg4HMj!lsY*ouZU!d5vEUEfE#{04u$_FT8r}L|1RC9vC3oE1VN9wj zJyaM8Zz2-F$3y{wH;7X|7fqVu@5qO3Jqs_srm@~&8qA+;fxXk&9b(QU#_N{k%hhkf z4xug(^S#2q>U@n7De367_$218ZD(gZM_wXZh1LgT;w(dJI%d8lT@)FB8fV(^x{4#8 zXs1YDM0Dc`Z)vjN&{J+P>&?sgs*+=a?pXIC9g9Bu;{uUlNMStFag(F?@!7Wp^Q0PZ zx|j_om{Es`Zyus0V~bv!U`q!Qbm&7DV|wZFWvH4y0*;5CJ2yrNH65a@;gVmIPg49rW`RKrY(Q@4O#g*-#Bn@?^-` zUw0wiE{^q=Tu9JpQ)0BIADwnclC=pg#P59>gbb{LjbA#^^-nCij8en$e!z(Ijksaa zNnWed7gBz>kP}}SGs>`v^EE8u{HJ7cn&S4f@rwqzeMuRIPo)W#pY4M^j6tHt*cb1l z|AYLZxMr%fvZsf|4DWh ztj&x;(`9v0?ou;i8NUx+T}B{sGsJVBWT4G!hoZDDVeKlqFCN)82K{HOg=6$sWLY)@ ze=lWGo`Mbz53$EdkLIAHQMFRz{7%SM^}zbd9b(5h<*4#$3h-mzF(d9TV{}3cvF1x1 z5_xe5nVYGMwJm&Ll&3I%tNDYx8b%=LJPI0G)bPmz{ZQPJ4i``9VEe)pfql>qTdpie z%{IftGrZTqSfQ>SJt6?2g?hhf_aesajVvkf_riN}4N%UgB6L__y?G64;GApPLQbVO ze&PLB=nJ=rCr`FQ=~_C(aCIs#+qMCXnR$=NYAO`p+2M!B#&RD34StVw7;(rJ>Av|4s(sMiQ)w& zH}A*bQ#D_3|tR0GvaYJ*a)1m$Mwkku^~ zTOJL0SPNUVY&!Mgr3*bY~893@`Lp0r{ z2<(<_u-=$}4uyM=uuY>u=0Y~C&?po*IpxTF)-|{qtcz0{_CkF}56n~@gR=E2kX4)_ zKArUmUSvOJMy(anwLNFoQ*4FzW-ehC zty~X>Zwb|1?~3bPWyqLGLdH^AH!_#^g()A*@t1klB&A*r z?{E7EQ^$FuJ%iDxdg~y%<2QoQR?>w>^g6~|sH<>Btay&VMVY$t06MqMms$BV4>G1C zAyI)Rsk8oJiEoiA8f#U$wel#aQw z6opJa3yUVC!1W>#>K};T;)Xm^(8Xd)!)%VU0?F~NV(9{H^8co^#~K@jKhXM zL1k;AA+z)>%*-tSIU&pVgtH~df8ZkM);#h3Y8~9Ye;1?mZy4Djqf9EIMa=Yh(*at{ z@MXq=*h!=yU!jCOQg85sGp+IZ@fu`d)>Ak%ji61Puh9k}w?b3lIQUQ3fXcQ^NQufs zVKV1on~)FNmR^M>p3^1e3S-dhlRIE|S1-7bJeXM0$bVV=iZQFq6~fT#!FRzT@n>rp z{QCEI$X$GbG4KBa)q9grwXLxBs&+uZ+KW=Vo_^+RqzmZ8Zih$roY0n?KT+FMd7P!V z2DCp6BfFY8^z-UHG}&4WMSgH6;UW(#y=F*^w>zQW!44E_txqcdJmY(e_3;iXKdhJk z8rJR|g-lj8!nn&yI3jET!)^Zu>ZB0 zeFW1(EF5E-VB`ZC9KGf?e0li~G}ql{`m2^8g=x-Y?7I`Nq45-`NJpc8`j)~TEfA_t zHS*7PT49-r!*D;+n|Zjl16F1iGAG6wkQ%5bH{2CmrDH^=0vK*~d(%>R+@#Cl^A_3D1B?70afSF)u>SKwp$O&b%qGRKpc` ztGPu;@xwlJJJy&S`2I}D7Z7xMNP`%xY=%mqrux9dG2y>u@LfS;RFOkTcYjoa>^ZVR zt#ug9o#95DI~71%=*z6w+=t{RnUG@VD(2Ca+vwWA6j-dFk4@cLQM!&FE-scKk&(jO z_rf6Bv!@bmfA^CSRH(R?9OGxQ2l-aP!=e^f$p=b5fo`}lIe%VOTvj(<{7LT{_>~@E zG{fa^wwAzd7<>RTCt2Xq1y|5`Zxd2IuK?X)>`{}zS4q@($ERs1lRtV5!n|>Sv3Hxq zXYBDJ_s~(ad$%09BtH}y&a%LFsgUu0H{gAS0a+R}1U#JeAmc88hVxT!vT8vkL)_7} z^=8O<`30DFIiLAbT!)qh$HKMeW_V3Wwe+gMRA2V!0jx^aCg0;9!spI>vGemga5d-w zy03Z#&P)dAvW-GvHtr;&Nbn9N3Cydd%g}`k73?V^1=~O~ymwgzQ?WA_rA*=A*?2oV zCeW0~c(^jYjm~7!I$3PMc?f;n$LPO@Gtt-QZfJh|QRa5tbmr9CA`p#HBI?mA&@>B6 z{@X5F(rOz8Dft*A$JfHn_l2m)paqiXQz)aY0D7-Qq0Xs$QSH=DP_0dxxV=JG) zFBxs}&Qh2Mp34XAf3jrMae{oi@57jnH9}^yE>5c+M7#F0Na2|qnN@TJZ62#flC-?h z$$+8ws=O`UJx9<9p36XE=BGe{Y!8eaDMcAkLrK$SeeCTq8;yxngRE<2*zw|1C|ec< zebi|b(_8|kr3X;-{cK5L2Q}E_Dmu{}vlO zZQeUHdVwCf*EIn?H@e|zMjK%uVi?XJ?}VMMUPE5i9>jzTLV;HLXjt7swA4XQyz{_v z_}ZKVse|v(%(`E^mU%u*>y^Q)*Pa5MHbs2cW*&TS?}6mK$~e5=jcAOzh{RJJ$U4`_ z%w;uK>~*Ie3|}NM2J+?ns>`3GzljZQEL;pffB!|*!d!9ZYD4n#{8+SOiwbf5{ZP7I z@B(>s8sJ4So)A1i6>T4FjGbl)Il(h3z`x@*L}kYme$=_6aBrb3G4kjGhdq<{eU05{lfawYes3hYb8kPV zc`zQmkP~ug>n6hitgtnQvA;b#aCyi*5M0j-MufHt3bjqdu}lTEv$(O}*Pw7$%V zY|=@V&N!?@-dy)Y-!jhd9|!fxQ!6Xna@Gd#itr>e$8@3nsakmVKt8$*frxGogZBzU zaoipUe(MO9LBG@CLXH#?zX-e`u~5JMdBR(ai$cN4?WptXKJ>?lHj=$t`o69g?KWq>cP8;2sq-`H&VQ_MP5Be;9Oh#cH| z4C3XZnSJ7au-1xL8vT9}xKF-7{ti-ewPh^V$v>vbK0S zy9vF2YmMb)^{`#$5HiV2hPZA&4m*N2fEk=a?E!YA_@EN$>DIs}yq`hBB2ChB_7uE6 zMTE?X1pe*To9OfkFH|MuZe9Kng63rfp`m5xnfS6hXj|nQcB~a_Tv$kohwKHI`72G3X@qtv>)Nf|mBj zjN@>rzYhH_s7KN9f0=eW5r6EcHo_&tk!O%OZmm!!@3NM_VI2o}_MbU^<@E|Jo_ic! zeJAkJR!bmP>nJJ^KSjGPF5{QiU1GiqwXWLo1(49N1r(i_~!Q<$^gjBvowFJjAzMEGgrNFr{8pj(X-Au+;?%rg}-Jj^OkquEvH zKYALTum1*-FO;#v$3z&^YDGpVN>Fnyhp$YGMECPL#q#-rCuhn#eC>XSIMJuysL;bcj+ z3GAqK&l}Khg&Y*pXoay-5SYxV-)qmfE zpXQ-xZuU_8aH$M>GW`?MYcCY)a3eDD!Byx^!BFp?0{5R3Ap`d)bg|(8S|yyzr9<#? zuhL{PZ<>>o14iQ5*27SE=>dAvEX=fWol%>z33+Yb4^A|_lD!^amHw4 z#Cb%EhvEiZTWsIwL!NoeU=kNILfzB{j#5t&yh@gQ?tB3w@^T8;&OkGso0Q zYkb4Y24CE0h989uBW0g`@zTT)_}F(qta$SsXzbI*nJY}m;;lW5b;}P_em)Z!EbbC( zq)$f22EjsMtE$EZ?p)kWw%*xR$tZNH}d)S#Avy6urFG1)0 zkwx+zs^mU$MnBZ5k;PyTBEzC{+ClcuB5yT=M#4|B5L`w=9s8IG1^ zDse&0kKl4`5`%-DpoaIVc*v5KXv3X0CZs)Uz~$%9UvAZd1# zJ>GlX1^eg;*<4eKVX4e0-Z#u$$azaf`um=u%!{XxR-_%#`nP~7ED&-62IawKzb`)B z5RW{}jx$ex-atlO%@C&B$wwr-gxEb}0RY0+Q#9iM+PA;w$zum_Q_o+yy? zhS^})rhvWdTG2UI;hkOf6}{eRM|w0D@w|>6dF3*IUMV`_>68s^Qxrxi7EK6D}gmBZTy$FI`BUR=JU@^sL7jU;`b(k z-6RJz^mZ~h`pRJs^A*VATmwJ!-aZH%sX}ITv_t5nW2koiH8}H9mYh1}PC9riT;_b7 zuY39kg{{!S)+?i+Y>Yf9h?Bvn@*emaSmAMRXQJbi8{xaaz?&BMLFxe^utnf?B~3oT z)Lk~h@*rfcqzM^t(HZ>YgI=K6A{2r_$*Es|RTxky{PyPq$ErQlA z_zI%G6hd_T0dzO=3Hzu*o;0+#BAZVVaNZHmXwRvDo)h=^qpe5atGpwYU2qz??{LD^ zd3yof_J#UKT8#GSThQqsLI1|=7xp~L`0=?0v_4K5dzL%mgZaaVrhl5y3mS%#Efq-6 z>?G)0bQIzoW1(3-1}*wh3)`Hy|E-H8aNrMWKNT%Dczm20*VBPCKMO3e75flcRD$BZ z`Jsqm9{lmddq7(hLs!*wxZRb?gcbir%3t22)YiH1b%YsS^5dAa@mCmMz^jp?XO5uo z5o?fLj|-_8;mf0^Zp`ZgGT33tK~OST553xM=<|&IKo0i8=FM_A_FN{)k$(t}%dhff zgIAe`iYZ94$58Fk_=X$}(+2=XdL3 zg>l!x>83q7Z<@sqHP<2Ug+BRueK#_)KpEfr@)+$_lEBYTS5S>-GiYxAg0wG0GJ~s? z5Ii;^3bW24f2J0WyuZZr2bHnY0(E=DZ>$fhZCwZ@0ta&Sv0F@3iWSjJGsKxW3($9gL+od42-+)h&<8D3ac;K{ z(Y_(WaCYjF7Wg+T67kaz8DaN zTS>4ld=9!K-UvaFZBQv_itb)i!bje3N6D74(kHD;A@YDVQTEv?cG>k0*_wwV*I$qM z?Cr+**+Xs8zi7XBr-LEkn`6LzbuRSDoP(9Wu86Otv?IHeSEysXuF(5FC#_aiM0cxe z(Xp!)s4(j(|3_F0ZiiL!do9nS#mgU|v4aO;Jntl3!4cjqvlP7u_W_gWtwP`X2E^D` zfZc0DobYEq7`zsGCq*03Ou;jLc%m}?6JmtV9An|;{zvdV=RRM)Fc`g7$w5t89_apu zBXGDw9-VmnioYr&gR5h|Fe&?fAmeIHG;Gxo&=#1%x3DJJY4|~4P^yr-N57#|q2J%5 zZwr%7zek~S7^d#M@IQQBfWEUVuPSQi&kZd?-u)Hm{oK!DIsQJZbQoj~%i=ZdXflCClmdUWl zLKoY_gzjbT!1F_UiStHy`I6= zVLs@_u#ceI>xnc3=ICwPFHr4dK&G!uMSWJKu$|{1EuBRXO3HZSvjfPm>oPRV(7-zi zFQCp_g`g@CYPgi)c;eUNOtFnG@z^Sh<0gFpxi43d%*y5PR^J#a+s#4uPqqrooMA}* zls~dJ(8dE1Lr9*nVBZ+B9h~$hqH5z^Fz&(&bT460Y_K;;Z0(heCVf`Kym37Aw*`aO z<(X*WogrlH$$iYDJDQBiujNeF;e0f&@C%yHj79rC+Mw-LUgWOfTV68663>~XOB6jb zf!B7%FE+YC;<(GqR=$eAj6G4pngQmFi504G_`+-79!ADp))%;{buiOVLtr7Lfq&x$ zq5oamNNok$r;ZzEgvm6}I@s zze)VGxvA*L-uKAebu#iT@x3@ zR=%x(-F;UeGtLyBDQt$P3Sx*DUc#?_-3ccXlTk&;Fr3(2&A7hO1pDlxu>H9!TBw>M zhWa;R$NTL_(_v40(8Y%gu7Wz-4FC0d5v@16INZ1ul4b}s(be z4|tM9Xo3!F8hiy0?|-wp(3VzNGSD#zbqq?8_axytsxiSs{dd4wyB74fyB9{^6zbk3 zrr7_a4KgPa(41{1kjxp8oQ1Z;<|GB{*Er$H5;qd{Z7Alp{Q;fLcX{T0FIqC9mG@zl z(R&Y5{9E)2HdXW?bmugqa8j3yo-RYKdHg~5f-f+6Q+uF3NZ2PGy9=6Y_Mu^RKhW#q zf3Rf_g(7YYgFWTTnbI8z{Jn#hKx^MnvN@s+9jVJi)?s##cFBemy>f($a~=sEwR{-e zZbw2=T*>A=rI2^t7}u^#LMA0Mn2h|7((q{)QFZ!MkQW={KOfrR$IaugB|w4fJ%1AI zFA%g@bFUz`T@v&cxsiajR>)NmJZWY#QG`|{3hqxwUmFY2&?qJHT}PKJy)6UHS64Dv zGzH9@ZNSOlg23siM)mLfk$Ku)7`amwFF)@<==ZiHC|n8mR{RIv-u>X+=0?7XWXQIh z-@vkHGG#g>>cZGiXq~0p)rd6YE{wVEI&; zskf6SWiA(BiOf){K9MTqww>YS{|zCI?}O?2JN1eG&Rlq)X(XQ5B#XXp z4C1XM7BKTxBK%kP0Ofcd;fu=K(W;p`>kuMt8il3_ z9`M^Q;?YU<-LQYkbD^J-gG@Q$-C1x9CD(-U*PghNDHG($>%2O+T;+wo*2>@qcHg0Q z&=s$GT?|bXIS8F`ATmRru^V0Em?No~s8!dPEME3mEE1RkimSV!U#|mA_(L&Q{+i;y z7oCX4COK04K-g=2%%@#GK7$#0KjGJ}bm$xU4}GmkVs1~+gLOZ&$y%-qW}$j`w_503 ztM!APzb9e!ZHUDjXVSh-4J%#HN0);oa4+~M!Wz2d)T{Z-N2RN1PxuYc%gqpP<{O~5 z{{&3D^b3NIJc1XU-YDU59Q<2{A=}9bFB0C-_l_%(VdlLk>9aMx?zuK8OUU8ZPp^Q{ zMb+r;_&(J6>j`=pa+CSEL>N=OJ_1X+ThWQ}<|w0UIO&i0i`?B^NUV$}UbI-qmecpZ z2gizFKu(X$2^|Bw(p#WnlL#&RrGh*bKY+}SuH;~r1hpQtgM5$S5GK@FA2w@YgN=nq zQP3}pzIYueSx)C=XN-otsY;mp5C;LM1igHsL$2%+f$J!BeBnYoXbStVqC}zh>1{{! zGNPFSQ-pkX{0Q3ivK327|~3=Ebz*qOq?J!qCa;#4%P8|ErzMG-~r8|2fM3?T~2-TXXm&|6N?tB_A{HE2nP(-{XHWEDh3S$_? z)Z;oc%`6M_0}P1oYav@lVnQ}nXM^Q@Q~cn`H?;9!CA{hzi@Z!T(2m;MD0=vN^dr~^ zulZ97^m=>z()tbm__UqC@!t=lGAzkhK`(EzHvKC$l<6aEGcxsrCUw#{+t=`a)`i(4eNzjJGJ=S-$%gSc?hPsOvJyn=MR!5NX@+f z9*f+FPGTI&OUmbaP2R(CRR@f!S|B*fnb^MDg@W4U$S#2i_c|vP$yVM)zjcO@Rij)< znV}&X{b@70GCB*w=qHTd!9TEW#xR_>J__Bbnuz`t2y1b?Ay`*sGuf;6gUQqL$T@jH z(08?q?ewaUtN&}>TgZHISZ#_=p1BWUS%pa9+%EL6{xM2;B@3cih_q|SdKA@t0wM-?QbDnn zaBN0B+no}DB%6H2+PBA|1xJMEZ0845sJIjSK7NB>y%}&W`5BrvNATV0Y2p51C9vN* z0fkRogN#}Y$=GMUkbhDhyKTHDePyRc&it{$20f2q+1Cyvsg@`GK1Mj*zzo0A?uVF_ zj`&Tf7M}b=1^2x=0W0p;qt|zA$Uyuh2sd~K^Y(k;w_nP6`3ZZO@5T;z*9}#Y+!=#b z#~(wt>^u2cZoin{&yFHhA6c9zBhbNQ97!XSgoX${EKR`+;au^F|4}SUv=6;w-kBc- zmEW3}Khn;R|ErGE(ua{70YgbLor<LE{1eRCmtlWT{K7n%{5owj7R*b&S96!H|f8_4&X44DLj(3jH+RemM>w89#s z!W{-JksaB-Lmou&zVLZL3dng}hDRaVL|*VJ>=iT7cV}E+(WYv9pi~#S3d{+lTpyrY)*In?U|O zO?>msDC)f^jjg{7iXa> zp%?{B&*C?g3mJ(79X-&$h?f0u6IgKiNZ0rZKOr!KeKkdk z+_#cP&rke;i*5GefRO1h!6#8##rN}t@^{!fd$owe=mW@qbPiNG&VU(X7V}dE)}iDl zD?p-XiBi8FfE(JC=t-h6?z;K`Jrvg8+dcj80DIzjGi3OO|C!<5*tsb1d;w(8W#GPO zh7?j-q$P4mjG$gd4eCndTYwdwPc313ckY8#UYRgD>py10+^0;{#b!voX^JAab^L!d_V=ny1IDfie^|EYl&Z3stcab0ceU^j3%;1%z|<1 zB)m-mrakeQpG@_z9Gg8hyhEDlh^z@|--rFHXer?fcue~Mt zSK5YR!YxT>UpIUbSZG(renxQ}2T>d@Wj3xk2(~j#$&|$}(VWc#$fQdJuYTb{Y<~P!o!RpXNG*&^-;8@BM zR~;q%V~+;pSWFY<9a>~o=}c5JTNRJ{CS?4~o{lE`$D^O)w8*5;%P4EvcyY(iR)Crw z{;^CA%-Z%G(yn)*=HZ)Q-@0%iOR`ZsBHIE>wHBjh!Mk>>sS_;|_|&@v?O))#Cr}%K zQNGboJSipty&O3eP8$s&a(TI^$j%rRR~e$5I#ZG)puDS37QxFKkJ0J9!(eAs1D`rq z@c(3t=EEj!0sdkI(l}ZK`o}cLn9;9LXWt!kN`60kFu?&|y)<8%Qd|#vv-+6j8#Qqg z1VC%X7j)$3PuRNOkvv-e7$zU*kwsh!Y`F9sULR|ReyxYh()0u&6IvbLIQjwA>n=w! zTLcZad;>aercJ<2xK7rBHb3?&lX*cM^ZwJ&XCdc#)AZ#CUA+i8f3DLemY1O5(0sV| zDH0MJUNQw&_3`5Aj`-m(C&CKXa#zPv$RqkY{TQ_prT?E?4opHjMjZj_CBRo6djL&Nqd`;wv>$qi-NrL}p z+;&q~DxU?LN2`J;t`KQoj7L8+Y34!0KX_;EE~JAYkwjCyIGlZcSWEdAMc~dD&9nI%K*Cm^%aV&2!N@R3Z)Io<$#M1!N&!Gr~uE0 zEVp>L=#Y=nf0TgxgCO*2qXzD&mm$ZB%rG@yEM!=^p;ey0(bKgSxH+L5RcY=8`7OmT zy-^$cE;T~tF?x8VQ!Gjq`g%Uog>zOJp|7+a*;)P-1q=1wW`TG1LwgglelVPokJh$&~D0BO!8#>+a3U=$)A`6oq$Tk`Qu2+PtLXi!$kKf2g-*QHae!E~bi@hjy zp&nT;un@Tu>SWunez3pzhF@{KTY72darpSn6P5({pf6srC{pzp>hhYvFl92NXAeOq z#@O=40>94o{8e}obQca-9!9r5W;1E~L(%0aOF%0w9F=T6%*WpGEa7?6RprU;D0U# z#cth*rd@N!gHOzH%3NW86K#Y~XJ6;1rmq2evm%sNrpOAe7(r$rOn^eqf_^OXAMa^hj?lAK1j1ux{ z{Q|E0dOXLjs^wKTbwjVJayPI_`2u}I&s+x?H<(Q{an?^m?_0fNy}rHKkXRO zZEsoCk-{|h69yzLQ>b0JAy9*?}}fjXSp5i_G0Ua4x~d$lFZl;xwrsjgGd zxBdXBpDJ7jE!3iKHxD5zFKl7L zD0LDe0zvJgeNVs4-US`BBR@2*|YnY0K+c{wp=>orhhk_zsX|cVHHiFeRa_}}z0Oo5!GS{)xDWSh#E1Ld!l@@45oKS&KS)nQ6^d#&X)qRg<%v~^Vbw`Aery$5C0W6o zQP&i*yb6#*_AwOm(GDG*F%Q<)9ESL>8f0y36Jy`ri$*+jCj-?km`c(io*hYW*W84h zoEePnkzhz`Im>sRia}+y_SkNS2HA1s2*2Y~G2pXbLC-{vELmzz%sp6i)5il&T>A@u zlYMYih$TtpzoTLwWfbwf6veWap)S1*xx+E^>&0B;IynjH37(zJqmLo0z*qcK-PKTb zG7Y7cWPmPS$Eq(X;^VG$qbHu8L^Ak=$?#Cd<_n!ke(*}vVXudchhBxBr^4X`cMKUB z_~Vo6{-j>z2yZWB1Knh9puY(k(tuBTc-%c(oL2T4)xACoF?~nj!a{vKy1D>G3fxJ< zHwNfJrv<1zdkiDqn+X2yqhKn`WYUg05z~g@WZlvL>4X7a;l23^J<3zV_p|oE@j-X^ z@vepQYDt9h`wnDVsxwYG>V|ofA-MF3C+VG_5B3XW@SExSq^em777AW(jr|Fz{LCok z0Mx;x7zW0Ms|x%`MZ7G;oNrxKfZWHr;otd=m@ck?$nG9Kx-%YR6TQh9-FDb@_5w`3 z%A=r<*)R*7NlQ7$gvPE$3pcokCpws+d9B*y%F7f8)v|@v`F~MM|8=SB;#d@((a97W z=A#88cS22iGE7`^1hn31;#;CVjr6UsLFvcep-+OA#b>e%jKI3mHTDYVv`!OBTW`b1 z&7Ou_T<@W=L5BF*pJXu0e#%c5bbiKy{v`FXI>|bH2h10KMOPJvkY^|Lu!2Skn(Z9P zuXeBFuSD@kU1OruUSh^44yfU&!9$glMIU8l(q&}+cT5eISCElWQWF09zkM`i0%bxr zEnl^ITd4brkS!tZe%@mPM##+nKl`-*t>HY@cuGrVQQZ3EV{}8Ag7}oOkNEgQLrUSl z1JW_O*3nx+Ld6!6PO9i)w0Oj>Wzx~_GPp-&9@2ficH+q4eca0xkx~j9Nn_gUsNzFM z=?}f@r0=v1q(fIdSSrNsww$@?kBmEx#`45}uzVX`9QJDsu*1llcdv6)FA^^Crs}$J=UP_zf%ZulY z@ne6$Rd!CNv3OdAg?Ls+3^(}GN}NRB;5LstB@RtKExsEdFAZBFFTR8Gxb>9>#O4PK zq+#>MvuY#LXpN>F;&9C9c-0>F7ISq<5FfNKah#6|dy)&@Q|;K`_ZY9&Hd^_ubh=Zow^r6n|F7!J{lXSs_S#;fFo}t=TlFz15-j+{dAzr8a}h9 zmpGNrx^PY6vrV5qclR~r9g^mM{kWt&yL+s}V1*UeGHn;zpF5HRQx{Hegtvd6<#Um> z^)bnEEn9A3aTL2cX#zLf)Rp#(_vOa@VapnCl}L^&t)LozZK4AwxNwo1l_jIiA5hru z1AA;^8^s>5rDCiuN}{LAQaV58(tb%dDC@(AsWZE@xiJd^>F2iQ+@`lP*a7TMIj5OO zH#$^N9#y+b4JxC!XR~ICXH5)}PPv>*Ie8n1Yq}28G+7k+VxcAvSKa;8#QEi38t)i=5I7Y8Umxeavqw))Fyng|s;GW-}LhFO)9+SwkiDRM8F*GsH>ju+;YWd#dcmJn_hTXE?V!U9sxW zTsrz>kEF2a44tycOT2c}B|4tZpUir-W$mM)F6m5xeorIrL&QcJ!sla!=y z_D}XPrCfV#xyAYh^tl`@(H0+$-8&|S-JSW4(n>PsF5ce5&e@|xWwi0stWrJBHuemw zy>K44%2<{5|Mr|6cUZ*jE=i>9U*DiMy(gj*amw6RpPLk^ye8487%B19QsYjVoMabO z8nZpi(3y*sSs?Rlc%iweume{^Io*Xz?O z>(#h-!~Xg;DFXEe?V*y!)UZbjwPL)503est#Kf2`8T zIx7F1E}amc#Xk6Tjjf!oFS$KSk&a;wv+rA*MW0hDME|Y%LMf%LrmhuYk+zxpzJ!q|W+MKgOph(Y5gLX;FU`r;yq0BX=Qk4O=lC*~tsT0}5=?=RTDqMao zH5Z$5eu+kO_gYJ?HvcheAM`|2`|72raY#e?#i+BT$9&)UH~zXVFva-=5C2x$N-2U-tkzrqpB=OR!av%2s zQBO{=Xk<)~r2WQk$@LUDKiN$)CI7Zgk)VQ`XP4< zQzaQv*YcccV?{qrhfAKA%CRYhisc&@kCGI4jgquZT2?OEwNb*7*6VIoqsr~4+$dK! zyCJfFpepJ2&J~?G`&LxaKB=_ye7$6T%07SX>gv+@%03d!J}XM?>~MB=_I|$?-Ooh3 zTw_IsV?IbcwwqEMWi2u_ep{|LH%}s?(I?7LJ0+@oI)bv8rOfK*e-p_lno^#}&q)df zZb~*yZ7G|aqeJPjCnWlPvZ8BG>;09x{)pn2Z4o_~I!(0ySA)NIC>E&?Z<1UabxG3S zuF77RtirnHgiB)UANrrjmk||gsg!8iJ(K+F^k7wX8d6^AZzM$*RYZxUe*PKXGDUCl zMAYAKV|H`XM*mPP6KZ^TwnT47he)j~RHU-oxI7~+R5T~bjM^D`NOZ<;kLbX0N4EO3 zHTAmEg~dk=WWl z^zW%el+oJx5-p3BqGjsQqLy_ZMLQ#V{2RFv(f*$^M6Rl(qUOZ&lDGSxNjB6qOS~g~ zOQMUaCG(fo_y;@2i(;y`O3r*%WN+@bXUA;PrNZoEC3ihGh;AQL7o9zLMPzGdLIuki zu;J35{x$C7D7o?ll7RDnL^jf7QB7<_*}}g^L<37?sKB?oL_b}^M59at*?&&MsY~%A zS>F*Btozc<{*w=lr)NxQtlq>U(n=lN!PM!E|a-+SMIZyrwvN<$K?2|ly z<4MQrXGoqc_~M__cba;?^BlLZHHWS{s7MV-u;T*qoH+yY1+=G3HfKF(&Q)gSQL`_O zqYcmGbEySZ+}?!G+&`Hl`rD6D+~?U<+&i5=R7iIqy?=5F73?0(Wq+PR_g(9e%uy89 z`ol-K!1bP-#gS-suW_SfrLP|QB;gRvwj0yEk}^8Mq<}7tjH1{{A#C7(R+1fm4$^kN z-01D29@70$S2*`s$LS$`a%^FQwy0}Bl?zqw;5=ttp%ircg@+R)>KL%u#&BKeMj>6%|6kHph?ss-CXwRzKN_y zTPLe%UqqeqJIvnosbp&k$5YgQYpA$_IRAyaS$6!E2~@A@SxJy&qvYw2qwM-xGqz*H zUrD8Mw1h@qBqkq{slbK>)bJ~X)U*i&RA{t1n?}rEFc0Cl^J+FoR0u|J26$eSY!yUH#14B8arnB9dI_z$TJpVPidh8z#+X~W(1@SKUIRqVOb`>3P7yQv!=nke^?3RJbSgqmD8lS*22pR#?Q z$NGeQVn-c+%tnmwWL>`0v$dMFqQ5P9)LV^_?9(d-+=D7tuHmx|eZ}WL)?#roWxY|C z`Z=CZiK)Zr%=d;IjyI#;=FO!iMvP-u_x_>6sa&ea*3G|WSPAv9;tQp*eJ?dUB8rmN z9nbyvrc3Xs8o{0IbmF}39}~^~nZj;f7cTl(ILrU^zn7A!)5ED=%av@$Vp*#6!vX4K z@{Y2pmnX9qQWB}`W>eO4#~uIab4>mA_xvsI?e7$sW(AAx$WCPkww|Pt2X~f59EfJe ze%E75_4KHqkn_|WXz}OP>QOaY+^L^u7Ksv*(j*N@6R78mETu7L6Xh17BAR#2gB@ag z+P^B-RFZjU7rX9NrGL_zU}{M73z651JuJJdTXJ~+Gm%5qJyBzi3FUUAMYNVzrmhU} z^ZzLKMWVFlj%c8)nBqU^iY)x(snYx3MKkpV{VP=ss0(xA%HNEN@yC-~MBk?FV>g{T zCR*`g39IaPg4N8)DVG)nQVYiCmftD-=;yJzTr}zJb=CsTQ(JlzM4CPRYQq(|zliGpt}YQX>E-_>=d*vh#H?w`A<4YqLnJXjy(Q;*bE#@~C+eo?4z)s5PDLI% zEONWLS<)W3ru>^ifE+lj$+Zr^4-qPXA{iuBE z&tF@o&PD#mrrN2pdM_@suPxO$l|oJWzgNoay3DcMgo=^W zO16{LC_cbWANWz4FzE#A6!?KfYu2%aYr@#eo?`l~lNvX)(1(6hV@lsV9V4;&Fq@t* z(~*_SJWw|Abrv-~b36B-V}H;oPgwrrg0AXS&~Y7@hMCu|cJAT-Bmc(ks z7yV4x>Ea)3n>3YmFHz*Sp1w=HxoJQvsSHxdlXYpgbJ}ziQeuDUzhYyTEunfV#?iL6 zi)q!H442h>nA)t%v9tZgvm5<;+0mQ*xv$Sg(;cO=*qFXlPSt!X)t_j|9h~-*RdhU7 zuI19s!b=@aFX$9|Ht!6pxMm+cQwMQL2e#9Lsv&eTcT>`LJe#{(>Pn@ioG;JaC&O)c z-9aDNRL}Wp&+zXt&7cRu#?mr>^EuW2COYly5So@=VDJ7~%RNiy=#<1T`f-$oSaQ14`qFMUr9-7XXs6l@zIYcd3 z(??xAe1W@5Hgn--HQbIp!dmsllsgrFh+3zaPwDQ-<`%5Y;3k2rv@WKZHV-(@-N`Gb zHXN%F{e3c>cBxht|JU5YO{>?Vw5{LLJt-?W*O&tOnea|_tr(QNT5e899{WPac`T$= z>L@x){|7g^;5*&B?hj|~mCHr1K1*q@HKnhQ?3ASE>-%XKE~gHk8N-cP=}&J^doBq- zZNT+)tY@dHXw$KW#&Hk+Xi8rB<%^27PEZfjKC)9URI&;y8E(yXNBUcj6Zd!Ma4zW4 z59(p-N2+Z`zNB&Mc+Tg@LF!!pDz>?H7W;p(_GJM*ePP!rO_b(Qqog8Dzq9w*dq)Ey zN+PA;qE`A56joas>JI~|H(qYIu_(j~vI<5h)k@ZhpP*fv;>^rtuAFA6UOr^YxMt&xgj z{M3oYZAWsW%!EDFsKiqowo|VaAB3Fo7wPRcR_tYyIT>+5mcC4#&w|1ta7TbNlZwb4 zq5v*JJL^)~6Rg4x?#Q4~UP<(PnFm?1*PK24;!6Hu6Y|>crf?-&m5kO56K>7D!b9a4 zY#R~BsLL*rA%2986M2@Zs=)p=ipbrh?QHmHNfsp?f#*&hOTvCeG3njXY`*Jd_Iq6r zd2+&#>HjHU_aENDp^-*p+{9qKZ}WV1>#qf|-E~V?7H1~7IEAqw2^AKk?MwS@-X&&Lp^DH?3+k6xPM zlSr6y*3DVI;LfXd&lxnt5kZ;*3%&2xmQ9+KVjz-kh zoIfaJjsL)}0R>{4o|EEo;x7o#j*6Ggmk^BiW{6+3{uBp<*`Onj#v`3m%g~mXwW#Wd zI`UPu7nd2H70=nPiYDIoM(fM6(b2#-0jP@i@v4*@~(* zs);3x(}bD7t$AKwruNj`*J6WDuaRL~i{M(JSUWd39_?7~QZ`hOT6h4JH)svvODUGxEiMZm*-q&WLOR{C`g`x2sv`;MI^uKj(~l-A7iB0e$PG36BM((hAHI5X-(#z`w%bd z3%~{$<46L^A%DN6vpqi+<7OX9GCE$-ghgvem{^)fFRvGm->gI)j@n23<0XjGo{w13 zv6h9-%^)FiRg5U7uscx(SZ~5-)asic&H!Wi{<9{?`oJ3cpRpIt*`tLPcD4yCLkERp z6E(1Dpf}CymZWP+*C3ZcBm6oqoF-iE6|}-C=#JQFc-FNqLat&CeWehMW?YoSqs;fA z-g}nx=4&(Tv2La~&0+=r=4iy(}meSI}&O2+VHA z0)uj%HXk#@laPpZ2c`*==QZ;?X73ep!n5hJq>(h{;tS#1oi*6gKZU}Eomi`}9eI8Y z7xeXl>F51Vc(31nAp5`*p~CSb<{urSQ+>ktUvUeuTZ01rw0I*u=2L_>N*16n*VRIA zmM-0@6^8AQDL!3t9m5sL)K#<(OWNn*vcO4nY{6XmB1%Urc`pYSiHG|0H%aK@hE!D9 z6@km!x{=h<&uGnzSkzng5cN6))3#Z*LwrOkZ4*+cS)VLb3in2q7ZTA0&Dr$N%mwt` zg=*UMCm(-s@TH48(@|pNZ+?rxXdLzTBA)o)F{=Og6W@8g2y2}lP4{ouhFiIJD9+-% z_}iMdXt`$*R$ZovHHrq&U|$st`BaMM$WR*X8%BcW0(jC~vH$`%GBv(uSjq&!fSz39lpxm_xF ziG~aj&xt4RWp^=CDMLKHGK~z5c}V-!rje6AtypQ>NMWsECvJJYl6ZdX!e%YE@xfae zZ2SIja=tm8EhT|0{*@MnCYX)$Z9~T66~$?qxztcHo-|(%XStn!ct=A!QZS(tpRRFc zY(X^fxEF^Na|xAPF3q<6_9X5~a%B0m0%GXDlIeeUBlVI|bAacf;Xg+aRcngQVt-`bKIAJ@#LM(f25>w4pVGncGuvz*c%uwSdEnIJj z!}mIA$`R2m8ntUBvtp*Wh^H0(9}hAdZvIB}MBanTpA6RJAOMT!@xn zGww|%weK(ET#XnZDC;t|dL2!)6b7;Ts#YBN@c^4UH=bBDrm^sO&dkIc;u4vB!Coza zzqe2XqJP{HTc;JFS?3Q44I3H1v?m9R+OU8hJRC0Ud3pvt^~)AJMkUvr)~yupReQ?M zt}sV*ln=UWpCYUnUWT|JBh{1c5 z-)fIM%%AZa`utE`NtrldjK6U3Vz<~gTu<2T5-hk!nDfi8bI9@Qj9TeK=>lC6Ev)HU zh?cSU zO%566Rq*RR+~UJ3%7uX9`N(U{5#j!YM3K$YYFvvv&_I-xaJzgrQjVL;w>d=c z?;q6H7MY|7d*UsSTftb~(B@A~(0dK^FY$s{4LH{R{wB{aO;>J>5cnW76ti;LjkFlEmOMK>7 z8jd|G7Fx<2shmU%zUbJ2v+{IF>}F}!=#Y)8wuor(zY@XhZ#tD|RbyC?BMrl-kX$~7 zg=7Zd^|24Biql#;@OcYA#$_ixtoef~#&4$Y>H?{{t_kt17{O*68~P^S+xWhQJpb`UXqX?%A`gsWwLS@CMArpo*|d{R-yMkWJ~Ss& zydv09Ge>r^xtaMtEF@>kvROky2|J&>h}?X&lI(pTFB~`-(u%aJsb4>5XHTH8~CGng-nI6l#!V(Ac$;!uZ?B6*>rWO~209Iww z15257iZR=)G>X+l+#=bemPu^CO}1=EC&xGV(_L+UX%kw6-mFUy8?#k3<@QqYV*GsO zlawq-O!IId;@Q<@}z$nhqKmAK1AbK2M%e=BQsPS*z$^zByD6A z>ChJFtr@bc=!z-Lx(rEw)lg;vzKL$CuSBlrW|H{9SJ-Q|D{j9jpxz-aOs;t<8JJm?;(l3%Lp;g8VS1!@;RzC&8qJ0W?I9kvG359`$kv2UVo@{9(bm5QNpX=R zOH$9nhW+U{JUNi0sMg`Rp~d`idDZ}!)8v|#AfM?WRD(A#-)?X z@Zq02>`JE}d)IQ5L2OmJoLQDUS%NH@qY zB<9gQc-K1z!J+;uHp`14l4Zlly~fx0px1HMJ}HePPdU!2c5P$@!8X_ggcGlCpM(LU z0scn(Yi#8CoT<#bOQzPhpyI-ftlr3nxvtA3wWW92f98ON=3T~#Z9B-SoEuEM!=N^)GwV2Hkk7C1CTHmEWri`j??E0&(otaYS^i<3a=fydt&y?q%Qj>GnGTf@fqivLc^1ZEadqL z-mG^4-X5?RKW+7-(ed>-BE|#R81$npq3@_emOziui70x;Le%Tkj?^8b$qAr^Ei3d0 zdLm1nwHy=bYIB5F+*j0Gqd;yYOXIp(y5!1RDYkaD4yllIMGQ2dReefy#rrAjJdq?t zuNNbuDT`^*g!8!Oelhi2*C04d%t5hN)(akgt=WNl2WhDGQhMuC1D<_Ri@B`m9OAT( zqSskN*z}$eiMrJw>=jyOJ- zx}I>RhCSn$*ZOoCm0m>UlK}14$fG~hl*qR+132d#M>Nksa=EV#m)VS@>9&jL^7nms zWr;QkDP2pJ=6bNuX|iPPibZr?gjfh|Z>F=}Y+yyh=8?q#_k`#}K`c;m2=DARW_81k z)2+>x$nfC_n(@b-c^B2wr+-SRc-1}9dRtBsoIh?-O;l;d% zKg5gE7tqnKCb8b^J4i)z07s2+!N;eZpbrx7;#n7e!H^HB==#awY~7}FG(fXR{9jiD zAAQFNi8qZRdt?^k|IS#D2aQ^!PAaNaHH8UZmPDidb}Hn;#vrWKX-jS&)n;*m0lA?a z0b}`0bS`ry-L2rm`p#>S9b>-nxjt1iAmlo>mAg%M*r?F-{|=&aj$Oj+;f^fkcL}{3 z7ePONksyicmdtpbB5Blc#GiK>&>7MKOL!NB`vZodN+~tA-uDg}_3a9C?!SY7?x`hN zCmBwEl+XU#`G|09L+FgW3PG!>lPaf-CUb~8*=}%z41ZY39-O*MMl1>>O__NtZ{03- zWa3Rc?(`cXsuok53jYu~0!e?X3~s3lCf_&7lT6(mwE1ObVxJ;3PZsxr{YAW)Y1&#q{b}GxBxRZR~J+E;*+jPuAAtvc#nG z_|eFIBBynnrb@>U<917OBU}wxv`i(dC;5>*6(8}}3*WJ_^HcUNE}g7Wzt5DJ0PAG_#8{q^GTACDe%d)@}2 z=#(Xu`x=X@4yoYde=cHHznOOZ7GcfgV%*}{DNLC=mfrt`gq~XKL^ce?ZMnmj`Vyqk5K84QsL-&kR5}gBQ={*@1y@4{6gue+-;=$Bmans* z_H)Xqm2?K4|7KPgMxnpKhAie#J(9bYQG0Mq6z%H{ zCa0ym*@qvqP*a}{xtLjj{k|EqapFbfVi~~x;R>`}{yq&FJBEaszQ7Ok!ih_m6O*}V zO}-uQAW!BF{jYz@vQFiKTBWb^$?Org>7cngR$GekOtCpxwek>l={$;mI@#du(+**Y zpDOJ5xbM_@XiT)@FJj&k#dQ6{k@(bKZ+!geAcZknEV*qx+p^G;4A&jWl-qo<$){6j zI^T@%Ms6l2O1)U|lk3PvWeG8IY@|~a#t@hJML69O2<~%A@Uk7A zOvE338|aS{SA>s^5y&}r7A{UL#H%Qg$J%!$tnc*43Fj= zo`~sneODa5y-;}bZfK^{bC@(LXPzl4G*`vku9y?_tK#p4c8KjX7=qvJA-xQXcSDq}jK?H*pT^9HTI+lOs}qly&tYfEPoXB%;@ccb#}hY1c{@=WVg1B>69M=Wy^g@iwetS8i*Y%E#9hRwJ?G+(-liqfyq z&(}_}dI>$Y>AM7*9aunOrj)Yn1t*CA6&KQEvYE!GOWP+8X%o0OeblKKOwu-b*7(>0)&Q{dv5FOk~76hBu{dB*>R!<183+}hFmrhr$ zJM?B=-Gt^Fq*`@=m@7x&)_z!*nb=CM7^^XVr4RI8!rmr8!hVbHO8bb8nRT7twe0&I>ajd2 zGpa=x%M999SKa7lAA+~itUTws+I#nijD&XGh`eO>pTl~bH8p{mZ1bt}ebLB1jVxw% ziN5wFcYNytyB66it{7qOk*LA`x(%nMMu&MN`&+fL&u0mj)Ry5dyFKaJzX!xU_TSMi zpC{<~IxV{E^?a=1a2W+pnB6MQ$W}{Y$Uz)SMl|Tc73Wh|Xb@ciK1-oE@3l~4I+C=% z9MZVlSxI8f&&6x~+)1ciKmKMFKtlcX+3LRatn`8xJ2`7J)2`mc4nF=5|6=0_ymXPe zIp;A`@oBc#_%QLR=%&>oHM0Cw8J=7`ot&=9B06zL*oM!?@Qk=_a<%*_ov{{47{xFf-dYgS%FJ>Pvd_(aQRO!yUI;iK+D1N!N zt#J2RDiT(v3Fdbhsv6Y5I%B5@wd)Fny?Kw&;%ln3-dh``q&T7+`K!Xvt`GF#Nu|(H zk|fN2YJ&CdUll$clcOgyCJF1M-yyAD18h+jgG{yx=+wPOXwp(+VUba#kfZnpnV%iP zCn7C8e_uUX_WPL7QvC>RvfYZtHK@^RDhlw0sti^OQp0N%htbYIhj^J`$HmIeN8$A5 zdX#YIAetk)6s^DYLdbe*#tYY?k-X+Oe77wd4T3V>$a*PCn=~AqnpB9!-sutI^JRtC zB(1i|>XabqFn>Ux>4Jcp}!}C^ny)UZWKk$!EF{p|0YrOM(TMYb+kAWQ9Dt zpYpeg1o5h@FhODOcTjZUw%F-ut9Zn%&En}{0!;3g7CvVeiYxTS2*!#9z~YK58hv&t zG*h3;pR+MVza3B3&RZ!jwEuDuOa6601`GRm{g+q8^=G8STiIiAUv8qf&Evips&e9l z@TdIwUGd_}Is1f7fh{#5K`mlOtva#Ox6`%tcVCIshM%sj?0H|ie3gSZ^}`HwD*L23 zY0whg7yd(9gXgN8-k+f}`uC!`xuvz;<0NSGg)jI{&2!q_s(>=DpTw!01&*CjM)mSP z;e7ivw9|B-a9#I5TH#xa>+?2ZljRb`SMx3vuf2#b{eFPA5BVtno0~}UyBzrD*Ke@Y zTQ4fPx);5+YC!V8PU6-AJ)D(qg)PN(XzyriyhkOCf_<@6?O!#`*-vTcZdZEYQwsX( zAc=3-t)Z^&rF8T1L8@`L75BTvQ=8>~(d?jUh^-02e;fPo`6o}P>*NTuYoH&`bC^PV zf=Y10euy8eTq-_s!2t(l-^Gr6Uia;^imkynqz6 zPrDDw+r1IWCKlj1{)g#X*AHl?MG2k~rHEu6MAB)URrssfCm}iXHPl_OM$quwipFA7 zoEc({eM*w)^fHRWZ62ZX|B|SQ+*EoxNea(#yoJ$8IpOXf1N?rwIpzf^oT?QmegJ%M zcDy9kXFIWSvaRs!Qy`t7oFM$TGav-(MpCUTeKhsNT>Mx;6;;<|(~=vjsG*+=KC)&$ zU-Hl#w|swy3VdvEb*Ti_``|_WWbdK*LM#5VV4oNtnu6V(b?}5kkNNVoJYJGnhGhJs zkqJGG?yFSLE5&v=^Lq|07^tE;qhG;cj}7PnJ56-zTQrn0nkFpCl*R>De+k|8QK-h* z0EgZFUE?*-Eu8s28ox0p63le`cuO5Uu@K=_JN0fn;{9ci_oYw5zcUl@lz2COw6zS? zzj;JB%;g}Z5{z@!#>1%4W9WFuDfG~B1Uk38U7Wn^KAJdfAENsuaIW?^-tGNOLG!>> z82ah9n7$uIZ`&>7TP6+7gp?K_mru&{{(~Gr_vH>`dd3V{3}L^OzDrSILpGWt_T~pG zlZ8or3z7fFDR@@<7XFAr8v4p5qPm5-{LIYO|o)pZ_RS+UYxNcQ`m8% z9Rx0#te+#PkbQC9>|D;DAoS?_; zomtphC6fJDf>n0wvpm;ooY;FE=U$w|2PNyX*>{I#Vp(d`Y~62Rt?gfW`_y{+xL`Ro zj~);0y_xzg{W$6y3hI&mT-K5C)opXyF`yw__yJ; zG5S<{^HL#qb}o$=>K9yjLvlALP0&ak$Nrtp!WWvp(3r(rXmqtK+B0)Iby788{RUaI z@pA-SJi(Son~h@@PwYrdVL!fE@l?nqhiTD~|B%{)>orRzbPFRFUBzP@1iEP$hkh-| zz!?$$P`Pgk{StN!&rO#`l|P^`%6u$c`)4z@DxZ#BqZ{$ly`@y@MlnA7;t1C4TT7dR zR#AnHkNksAjo5RmE-ff;M>n;uqhZ}qxC^%;o0~>>QB)O@96k=~NH3t-ic9HOpIExJ zBbmz2x2LC4R-(6Vzme&66B^7?XliW({S|!z7sokM>-+D}viCknHE%Lr`AERkX5~~O zW*f5nat~)?MLHrm7T1;Q;7@~{wMl&@IPO+9e$cIp)6|r4#k?lkezO9I_Oa!At#OIK^JT()Dvrx1XWwM8703&(y|J6jO+AN4ykEz5HWd(^_lo@Otv`gz zI$c!4+Kfb78W5|DbYc(USjxl{k|eX3=m8&QeA<~gpO}b0Y$+z=W$w_?V{hPy`AxXF z%a!O&+JdE@sgtMEE3od5(PT!aI@>qdp1IdgVMDvzSp6+)CbJLXP0v2!OI`Qr;pk8n z@;8Ge@7PCt|9+)QHn-sE+zRZVqDLr+CEXs;OiMc$Z_GGN-uGRiD_8rHjEY})z0xG{ z%X&G|SGb6H+r7YXFIsTI|6u=*ktERYIJ>$lo|P<+z`u76aVSG~Te_>U*!^%e74nkF zhQdg;w&N!Hx6^|}Z@Pr5I^CI+b270?QNXGrmFVSHgS2zqB=XH#iRcHWla#nkOh0%9 zxt9@2>?e+4<7CD$=Qa-{rJg`4)_$UP%IP@w$4=~NK8^f(R)$e}8)k1qaQwb@{8z`6 zp}I z{iX@zqwgMe?CDti{b?GB3V1?+g$o(0{{-92dn67V{Tjb(3?bIdZ}BS4hjDDDj4drOloKfuPUjup-|Hf@1 zD`xc~2n{%mMPo=HJ~1_i@M4C4T~2zG}eJw>4nrxwXQi z&uMgncod#EQ;sC*YLMCJH!V9c4IeqYn>P2#(pS+XbXWUVdbClB?2=cbYuX%Hz(Qjj zWblX@pG~AO9lOQe%15Y)>y;rr-~wuMWD(tFYfZN6{-)bgO-ayaEh4)sQrPWPA-GBB zK(SqfIOy|mAvQ4*zm`}@UptivwP{HxX^#!QHcpGy;s7j{u^8J5Rf1|t01aJQjnhpf zaevu+?C7z8zU?kR=e%2S_LIBfS_38Owrv=VxvGXEm#m?`rZaK2?oPqoKMh}9wh$No z@)Nfu-Vg^bn=jD80$O$QwxIv#5*7VALFoY*6sR^GIgc6Qf$C4vGYS`l@9LLndgF0i zzL?U9#%ILB_r=1u&P!;{h%b1;^Fpe7_M^DT(g1f44(WC!d~oo%Xw*|`FFd$zi#+Z; z!f3ZW9#g7_H%!?_k1s!uBTG_gtn7Ya!x%Ao_WKeOHg3V2?pW1+9&f<%FG$&MmK!p zaQg#WKCt--`>}qMjJ@T>IgBuYYz&!7E}qR3)ZN`ku}UX7^T6Urk_hG?YohVL4Lzuu?FqUB=YU zAH&=7BB}nmNXq;)*n@z1H2PZu9oZU0qug)OqaAPYASX#aj@2YP0*we)^BS934W~Uj z57B)uPvgl7;|M1oKq539*eUZH_`7=oy>32=ZnA$uh1%I{{@*F&`jCfZbjU^){Ob@_ z8uF>lx?Mr%6(sTFRO;wamkDh7t5O>FE|*qKSx>S%r?TX*)nvBKB+_fyEbMvZM2_{H z7o2Wf=kKjKfwRUJvwJm%iT~Rp=$Hv)PwyBob)!JiaweNym^`FcMF;R}w@GB_vn)2> z)`S%WKWF1J;)#jTJoaNIVUdB3+jXD-Ix~ECFd?}gDoN2^< zS?1E-18TG?Aef5e_LK7HG32~VDVeNwlpKFjh;s-1>07y1g5UKB(%)iFbds(Rn;{-J zCp(E8iUG7^VR5a@dL{PW_X%rmJ~`x_a1l&F3VYWzndolZ#wwpYq({;R(b3h{=<9?F z%y*O}8`V0JU0r;NXogT`-c&_0#V#ae{eJrS^DE)T?9cqV^{sG{Xr!REcOxos2oi@kpFy6d~LiUWGm@g`o3GdeC;yBvhif zPw4Hj&|QZA(UF56R&WqtQmbVQLta| zQ#_&`2-NMbuxNadSgbRHA^H*Zn3;-ORu$2g2I<6V*nSqQu8AhCawI1^-{U(a9_;3*`1pThRe499QYRHCo{q>0ScA`%vtz(T)up+7g0Nc?75 zR<7wq+^62dSI))?LB03zw<$^FeA;lb{AM?HaXHK`CZv%|EjjF^`WzNYr{Id}VRX8C z8xO~gfUlZ30Xoazfn~pieVidW_~##*+i;fu()d6)q^E*|PfH2sU4PZYUzHM^TE`*} zr#SScz8ppQz7wqXjl%IGwxZ*=hCCE=j|$&xEYOkr2AIEiMLhcc3iRT@dh|;#96dOg z2MabFK_B!Skm2ZBr2S;LaIe2osGPtHGMo1Ze$JN!61<;ZbTJbZ-VCqxZ~G*SpWZrD z6V=Fl^g^-OlQ8sKTLT>>N$AJqRCHq9MPb{x*spJ9Y|Pa+0D!L+N=wbGo4Fu{Y?Q{xR{q-G9MK*YW7H>=;_^ zp@C;gD~jFTJQFUcuM@m9Um@gbNaN+dh=cdn@m@uT(6Enhg(VXTgbB`ew0g*kad-7k z!81>d2I4aG%JUprHRZY;@1aeT_GO^^lIjBA@kxB)^LxQ^-C{xI@FL+E>J@*@^BdB8 z-a&pbXVLbUzo?@^0uQX*jq(N?#24%gg!P%H(DOe}(NW0>xN4OujU09r=@3I9N@AIK zvtgDHWj&r&4tYyV7v+ho`iyA%h5hKokgu=v+j_ybtPI|a!n zX85k4OD&4caPO2}gvYmv9omPp?Or!f!_s|j$>jIRpK!TJ%@WiX6 zztbo>o_sX3MNeL&2wxT?&|9&0@HVrP_=W0Na_F8iEA&<((%#Y}?9Y&HBc*|cb%&rM zW5<){&Jpy2mlO7}7>_l-KEsA@>~KooemqA?2Rr;p!3*Y`rM8b7XmQ(fDzWVYbsckz zo*mJE=DiEV`QDMVO8pmYT0D*=#3+*ZEBO>|@xT%Cbx1w10Ph~7L%t39Uq3GGM30h0 zLmd1{YPD!cL!%Xqw|;+6YiD@?@2dHS<&7io#I!Ih(__uj=Btv{ITP5)!*Xmo6VM|4 zIrO!bDJt2c2VXx*7y9o7<6(8)^lZu^G^5ZBZ|-tN?$4|#K%ux|zMP)4ak03@lM`~ktKb!+0cHOA0UtGV zAk%_(XpT(`5-mP}Hm8NsQ60uuDJY00X$H`dzxMP0lRaR#Y0n%9r=j43q33_gKKTFh zH~9a9`~@1b6(~Qewi7~>ZO0gnY#g(Tcp?hZ7i zE5WrTSzNGTf!%GK$aNIYhC$|b;LW|y+~2L{aHsAx=$H?oY2*&zxkZBW7@h=!O@D}b z@}+J39wG4W_y<@X`v6`Ua^voO6bp|=cmVGO`(SUFx60Q531r{n`^y*Sjp|A&9Nxz1Q*r8`mU)<-zr~8jthAV01e4 zc1;2)?yJEE_u>4Vg96+UB+2WZKMGY{21S-Tu0xgUc3kK8aGUjScZ2F`3%(s`@^Pip zxgCoy!SAQjz^m0_xFdBu-~Vn5=MQW-ua2i+y1`zk9bpc4&$Q$dfAqtrqkh1-M_Xa) ztO$6eD-?#vNONs-E&0#gSHX1&Z*afK2fR9x4aXN^uqm}4^mva2@6P=Lwbj$0;`4=Y zeQ^Pl&8>!i*I7Vsg-~wU#td-WdjNdjT?57Tru;6uA5iU=37l%Z2WUI)md}*cFT$XOkPho^zf$3+}yd4UZidX;%V@K+(w&V3yWZFmEP- z>%VW|e8rDNXYefGo%jpTE7QTn!Bp6lItmKs9|Dl<0zWjkfy4fZV3*_>yMdmu(B}S3 z&;mY*xLfw|Y4MPHIyz?V`_ zQS!qCYx9M{+@E+J<}B2Q9}Toc?|d79)&wGcvUW?V}QGsx(vixZv=y~0@!t}5e)Y>1Q&hxif&e>ac4qRfl=T^5YP&tO{XkW zTrvynZVuyIFL;Vtz8iz`KW5PXrxFz8(nO=fTw!BrBe$@q94veBn49y=z_x$vcP{zv zN6=FAg|qg(%3V;H2B)4-gU$b(;h*=f!EV!?qSRIYKpvhgLM7U^x5Y`^`~4X(?dUf6 zB|}1_eDEcR88;hP=Zt{C1F>-RlRoa#VMESg#R-sm$Q@cMWr9?oDQ#!PqX5lnsIhcP|98_%1gusav$-X$|+K%Lpn@ zEC%8=n_x{qFg*HE3iQFZqOeMP?xU$R3_1M)9BLJyLE3)!Y|MCgI(!DFyXCdrO5G6P zhXdrR$sPLJgCGw z-|7HL%F3WJRsvkI%!WEwg29`?3c%TRa3z^1fYC%>SfAklwSHDWUsC~w?D+r|4O`A_ zSDXa+>*{ckX%;+j-k6tr@)h3ZUEppv5m=QW#f=+~gd5`wcy8lwcz>QHSKn&Lm&~~c zRLc%R$8o1XRldkh@M7SI@=I9I^ay-kRtglf#_=(Khw~DD%y~$v;g-CWoc?beXwnrc zYPz3fU98r?y$y+iPN^aAUuA-*<)hlref^#Tw@1N~kx}r$qg|rSswpDy*%~C7-3Izg zBEfgV{qVP%BYbe%0hY&)hrhk90et)%aC&1Ux+;o>D>MzkryKd)fuMZuDfbqWkRe;d ze)Uv1Oil&rbZrqG9y;Gd`yIgYXEHE!wCCjfriiZPi@5itE1-^o1@O94&z;%%4g7n6 zpxpswc;C+w^vsaw=9$aGVcZ(gfMKr9CYw?&u{Q>Os9FU#Ek+^_CkgnlKpV`DmV+l3 z?}MKkR&w3(9ismH)gYo+5w2Vn1O7`o0k>H=!6jQvVZg1$u+g#*^vW=BUwf9QP-+w0 zskIcmbPVN^@d8eFzY5$kT?(v>QUWJq0Jm6A0W8|{4Cwh5g2D8m^C-54`OBTb>pizc zau0I3nu-$e{N!&ieS0Qcs=Rn8MByQDNlWECPOKFrb_9dr8?xcsb1R@iX}TyUDi+E; zb_I*PWZ|TERS@lCZ}<0_9yrsy1XeiA0n#FUP!*X9FLrN-u`-$P@hr_Dr}`(+f@Dp& z!!}oRsxjXB<;6Vi;LqJ~{zN}`)M=6E(Cd-V$nOE?v}HIf%-9PPeuawmY+WZhRR_R` z*qgvGKOFc~?}e?B2&y4l_$kp0D(|QSIY-LC=2ivK>FS;Ezxpwtx$6+OW8!Y^`H7d{ z$)cm&9Sak%x6_RirrCnS&e@=2VJXl~=m$@orJ?SgKw!G7Tx34uKKI;m68LYI4CDuP z!1jIwHPxeli^XY*7FC|0I#yq-UJQl0)3&dKGZ<>H!dEb(|Z#uwN8d(#Kuw%myBB3%R&wTCmb}83;Q! z3Vi742D8tGa|zQPa*lmhMNUR505z8c1$vI6t^Nx|wyFNTfW!}c~&aQ{e=wCNjB z)y!=mX2uCFbzLpEe>fL}<*IV4ZAXeiH)V<F#s*gu zZijlNox|Hy@N(c9cco&wXr8LN=&wQu_-VS6+j2{m`z5O{I->kkbY0B_$c?_iofvr- zXb+}bPjX>ddDR=4ZS}yl&1DI`a4J-|K z0IEXGK)d4t(Uwe2=#SDxs}?-4zOta2`}QRSUdEn780fL++CdeVH~$P5HBAy)UJZpy zax+C$?(U)`8Y94Ug7tngPAIxYd5UIsU zfkI;iaOa6EY|k6&8*+_d-$6%!CbV#Q2fm0lZ1M-9Zv_7G84rzgKZ~|KN`$ceCYLB` z2ZQ^DRIC1rZBqiQ!15Er;Pp$A;O~%8-I~o zdN$prdE+HevoxE(qZh?%wZ?Gg6esW|GJk-=Dr5fO!5n^tWf-Rsoy6r1_`!fBU!l{^ zTsY?GVLs(*AfI3C%TNC2$`5{TgZDe`!;?j0xNBz)^Da44py7r(@Him{NVm!Jn&s!f zS5;T&v1bP;-81wa9RVsQ-G%>rRQai)M*IVt{cw6lGpCm(0pm>;LaCKw`Q74!eCpvq zzHd@49JZnjwAt2jFE@KgGYEtPfMZDDgYL5NNJ#XP5POJ`|QY z@GIq}LMuB{Sh1s+FFz2<$8SEym)x7i*90_hD{M>P4vALL^9Ac{U7J>bHK%6tt4-|q zvW3${*R8W*CewxPi4CwLAM>%#Pl6YB|8d5X1L3mCiu|x0GojbEd_K8TiT5~l6K=Wb z$2&_;f&m|sU|qU3cVe|6KQccUPWQRPtv1i(#xzRv*JM@Ta-0HI`4@3P5sN`(U>Ce< z9RTmow&8vIOnJ4@wIKA`Nv_AYm3yU859h7ShhH!5VQQ?5P!(h33@n6!{ze-@vZ*_ z@m+h@@?%14pyi>@9Q#oS!e5USS@Txbc5PN%{@sz#hkgS06TaEW?>q#Wjn{Io!c)Ob zIR!XNMo-kCq$tX~dYvl|2m(G?iooc~C^+WS4G{e0GLW9lfD&{DyC*t;fBMCCJCsMj zb+m>PGE6yCt-x)Q+XL?H7I7O*tGK4F7oyG`m${!FmY`rc249o*gFBBiz&WSyoTJ!D z^zrsm?&i-nZn|tb@H;R9mV4a+-|IC%*#SqcS8A+i%ZQ(xVD%exRXzq!CNCAGx9Gq* zFoSDPhy*bUOSp@1JvQXTd9FMAESUJAn%g!xi+dEO2`x&VgZXZ1a8KeHAY+Y1$LEX& z=Tc_aMM<^(Ki0nV9m@X=Tehra$(Dq&r!e>Dx@J+zRw+>=rP97A?c0|&`@SnX5lJYt zn7MC}B~nS0LQ-gvHngCkp85R)&x_}H<~ZifyqM!YWkO#ra{|t|6XkVjJ@q518ANTgz;Nm))nKba=@$K>=1x2Y8}%Nt zJZA>RD@vfBlP*aQ-bO_|mAD3wxZAS>Y7SS$!l<;iZfpR7vW98&&$F*ZeNz$?v4Vo@W4wRWaRi%vra z)Pqo;J_^!}u|*-Bsx-N1R@VK5yv&G3*AQRaPnA&{Kh457yg_okNCQ9h$6%Ch7}XvX zLk-jeofUR6C-1&MFLBY=&h$T~K0oA65sRqFfx~v0vy4pYC5yHZ}5z zUDjWy2uLDovMb^KyeLv!xgNAQIgsDUz}*~iJmoYEyROGUqi-WsH!K28?Tg{eB@MK0 z3qvRA6_{Hc1bd~rNQ3t)iqq%^j%6osW$qdrd)QC$Lvm2|t1P&)C*$D<#^9;F&0;LX z20lHRi>(dD5Yi$KrxG*pSAYjjTabow_KWburY!2(j_>3-b%k{yOx)b_>>lc&rYh__ zA`00yVN}emk0klYdCKR1`>|*iEO|JCdV zDt!;h>%U44SY(qL#VGRYhOi!a%O%lqC&}Hc$0T-88sv2!k*iu!B-P|C^)SJK`evm< zLNgU%-iLNVe$S^?Sgin?=00jXAecnzbx<3={k3?|F_k=d(oI63in3}=r0&et2j%)c zQoB|IG`yr?gM0w%Ta!G#X-uS)YHygwR2P#~YPP7c#tb)BBQe@2i;wL5NO3?PIO(p$ zp!3#bd4neP-`oarqh~YdTiL>{hMB0)GX)LpSa|qj6?AbL0mGsp=-Dz7{S+|B)DkC&1qA6#2bx73gtOiEWDyyr`HXL|I}%r6>jtj_1R`>=ekc@uRkw>AhpvtzAZVEeo6irBdlDw(m*GNqnL8Qlo`~YP8U=E2p&yEvoFo13OW=r= z7z9j+v04@N;A&t6bf3IK_FpR`%kHnoWp^iFhr2Tl${1i!l^K=MMFC?O#`-v*V-fAz zLVfi62A$+SoaZM~HTQIY)z?SXjID;$mR3mC`$sMov{NU73EWuy0ZuT(WITE+CMjPA zkA#(QwbU5XpH>jI-vUyJN5x&o1}ZJ_UG4E1a9D)l+899}pn!`#4GL@jeI_58It zQPrWr&1aN+(2>R;_Xofsa5H(o)tTB>{h4~V;1<*x&jCNvx#(mpiaXZ6grjZ6)ZK*! zlv`*i;SG9X^G7l4j(EYUlBfcGWfnMn2?lxRwZzIiG__jSwqz%&P<=z3sSh=S!%nlx3e7$iht*L-3;tAD5+wW6}dn42aNV9lq9p z{_eM+ukQvqyCj)(TtAHdzm@AupBVaHcElxRqbM?m!I!HI@w-GfQEZK*#?Rh{FJJy*ch()WSH4ZXPwvM3{EL7e zx1jH@eZW!K&QfFfLfQrzWlakpTip*Xxqd_2_-5RH`WH5D6!vDRKR7C%bkmH{Zl3S*qP5x^b{ z!d@Q-6zy=s3(dNC@!uPW+!*gxz~jP=&TsToNm z`O6--*OCAajx(?)=m+?w%3`iZD*Uahr5>!71=E6ga8_RvW0L~VO2H0KTJC`+mwxgp z?+^9d)(@84*okM)t-yDu#;BV)7jfqMaYEk|{$@VLuvl%rg~d-h(7CFE3zBDo*JVkt zEKI{9DQ6USO~!|J>~Ibzn!0xWJG8VLQ;TKRncKd;NYY!wans@hs4$1cTIk;g<-#%E zle&}f!>u#;<46WMcy}rpOb!J}Nh7>-FBHxf)Zq(z8$3R)jMx1G(Qj`QwA5CCTiheo zlgh35ttSMgiW-rKTOx$F!~g}WX2B1WUQ+c*oGde~AU>xh&}uLhB=dy0kBkf#H^}B{&;qEm0s||9zt3Jp#cbcqtiJ zmrZ(X8>msO%cRAW3kyD2K(J~esO?LKqn0m7M5Z<6b7&E9@&86T3L?PGULJ2qJ%Rhy z1~6f4M5I3aW$ktyB3vOpm@w20@^uTT^E;LBR82Nf?-JHOle3A^BQc9X`!W)^vI;(2 zt|U^DspM3=GN$Rh2RN*Rhl?)3#^(;y)|pX|IZJ`cD5^AT{%lAVPrDC#4{kuy>UgT< zM;MIMNs=a=ouH}S21+LmQA68OSWU94$ThW(Wci)@WW|@u;4Lkz(WFa2O4$L9NiKrC z!wN9T{X8Y=(+`pnK#GQt{Ny;1@EMw5P?<{^CEO*e?#Pi`nYY9vuZ(=Ypa3aV(?Gez z96p80L+?vTs`q6qG3#|Do-2Ie`k$+CquU?Ww8s#`R1cCfGmaYhuZEP*jD-SzI7q(= zpgJ5xa8i#dnJU8u4_+Mfub-+vCyl~DXGR0;m?)F$F9GtmB)ksR=WoZG)F#iQbo?hs%w~@+FJU}gb zs08cAufRQx-JtEV3GapLHb>4}H%%r!|UoSsca1M|t3 zrhAaC;70_FEO1=1b*q6bzBC*iv^ z4>XRMjaGV_z~D_Sc_ZCUH9U5PR|X-Jc%fCqim?{aLFeL zx8`ieh_ivzg>7NTE?59bUnV06zyF4o9E-b46Q&e-picb~kTF$<$;T6Lw&gL*C``gf zD(X1c_ktQ(L&5&DI)+Tfa)00vKqvw~y|Ksi5^Y@W9QehO+JYyNEV zm35k0-P21BzK#aBh*omhSqEcX&qMaib>O4Y1u1S1h|WwUxFb=Bv zeLWTx@FfHcsW`HEmp19{;Zj&7N}^-8fb)ZX^7E1q-+7r2IU$Z@-rzi{Mk0XHb+LjO z^(Nqi;yB~dJ-D;V3y$14PwiPbgLU=yW%5r~4Odh=fZ)tsteJziL7n@Q9KF32sx6&J z_7+Lj*OSW#XJsE)tgj_^Bf^MC_#3Ey_!jWpcUT@{0~hO#QkQ!|aM<<-b@ts{^QRnR z$g%3h)P#GelB*!Rul;f1PAh1=auiQ2e1m}td#S&P@2ENVW8v{@eY72T4fI$KF1daY zGo>!$(s_~S_RI_qZq~ucaxE0v_v06Be`sx1g!=u{pv-R>s+D~uTTAjm$YPQ9MG;=X<|5Ld0X&xLiA;hD- z34CPhF(;)E4G(b0n_v-kvzI6A-Rh3RcS2xu&n=7f!D;Y&Zv-j~h5$9b8N3($!bK0S z;^ON+@#3mfZ2R$tdhuu`?wjmL1-Lj_u*AEG*1!`Sy>Ls|Q(G`;~+cjd;*@2TZjg-|L{Z9RcHnUJk!0OXtUGsm$lI8 zlWR+Y@5~^ArAb(M{Wf?$6@#1?n{*{ zZhj1=ww;fI>W~9aXI4!Wgte1=?KCn=_aDg!@`a<;AIWfiCRO~(To+#Ci;>jeLRMtE1nD;Y46hWbNa}#YVEQ*>W+{js|qEMdF(k@oXCMokL4lH{~l%35y*P`A&;E8+yyRr`EVxt3d>+l z9c;cGL3+R*CdaNMk*$^%Mte7q_G1@>{dFA45ZOqwX0*YM^y{!{=L@*j;|``qT9ofP z0m?Zml10laEl&10f$^{_dzTRZQXbw-tqs3~ze?p%ed#@%D6nC7G@J(s_bw8)a2I~w zE60ABzZxmgRCa)_3R`B|GaOf0%SQQy82Tp?HHv4D#pXur1m9hFMR$l?p)*OreGzu> zdSyIRkpwX=`J`x30DOAWg|@%;qhgIAJ3L_~dqrn8*o0mpyM$OiRA0w}8O4~?38`%oVo7gkuyx5(IchIqHoHRwKW3Y2EwW?Fv;;J-5x>Py%QP&8^ zdcp|jx;oZ>SWa3k`XG4N5u+aD5kC!4N+KhjeC3-#{~`_~$%>$CrxeP0*`T^i5-4nX z1E<@=p!Iq#DVly5leUJFu|wa<97+Rz2y4d3W5z`O-+z#=UP&G-$bjV^Cc|@$c$o8d z6I@aYg>TLRcqg@k$|!Y$3bm(Xg|h@6egm)AX@Yq zcI9$OPiHVT2@a5*4i{kGaY@kqzK!+C)dKWZCj$3CGD$CtB~xfujPGfIX(rZ~rHFXj zM~1q%GYI+@H?i&&l$+NktB{wov`{rk43A8I%Ca2wh7bC^!AHeIaT`V@v$RR1@}8lE#yJcy;ADIpJ3|s z^m|aSc#Nz&=Ru0oe5lhB_GGGS8JLRAgXgi5sFObmO)b%6f6fUi!@i9&sYr&WVJ9H7 z&mOHTr{S_kcVPI?GpesSi}ldy2U)JN4gYniV{ULWYs_2uk)wXo?3*=VC`?I4GGnQ%wibaEba5C0ZzHezwIC&emzL z-@lS%52@Qx?7n!EwJe5zHJNCYt;{x=dx;!gzn9279tWy68F%kj!_gfJ*gM0!@XP5+ zbUE}BGuJM__MdBU!E!rRN|FrwN7En-j7);FCB5XH?j>x~2!IBX)4@1kxevQ9DfwJoDNpUl%dl zBVo#(kuS<#VmO&x2%pZjZ|nqRk90ixDi2s2l31^tufgvXzwpEBUU+4G6|RaIvH8p8 z*u@J>*F-jy8er*tPHRbhauz-)O@P~MECKuf$7m*aH2>k4MhYD)7vQV32 z0jK13*s)V&*vmaiS+it?&x1mpRZkkWWht@cZrQ@Y55gKbYBmlgJ;d6pGqJp27rX9! z50353$78p(*hbwNIJIw`@JtJ)wEQQrsYE>-cep`(3?b;Njg0B&v!L@LXMy}Ve7EoGaFZG#_(fwCQ? z&Y6Jhe{0!i%T(C4-X+wJTm`oEb7c$(FTfn@EVx-VgK{y-gw|wT;Vk1646l0tS3^wL zdz1{>i`#)Mk#io0ddtXpCm!C{JWkE2{%3CbYn*hS@5Q6zwOI6hfMuIA2L;vnQ1sOu z{~T??><<7J7KIVhj&ESxn}W_Kx*;~-lx?G!g}IZqAWQBu#=mNT)ef52LaHe5^`+=^ za|DWq_K*)lCginLJcd#tQg%(q} zKIE1CN*w=gjv@~7?9{qvXr^=;jFr{yx#t<_~PCN-P=bW$g7HQz+#nAIb%;T>c| z9l{UG-9bM`of?r^1)E}iU|n4c`lx{2+e2CP!Yl z4wxyQ`9{Lxui%D)1pH&-NLJk6hFVk;DOR122I4nyA8#`G*t48E6ao+zTn&HMT?IX} z5S%jjAIf{i;=kPn*!x8lBSQwjICweHkE}*fo;v(|p$_{)lwpR~R$rBcs@dcFkqAoQ2qfjx;uI3gZ}Mwo%qpRYjU4kOH2D}zBrx@3?4 z6U`X#;wnmf^vA8Mqv1j?3wDLO5Q&wyV25}jCJl$-huozklpes(A(}AM z#zK+HhoQ_Q+JZ~P!B5|1n4jwimxQ{Z{q4`O<9H|@+4dN}&xk`C{Ss=>t_x~oPg9Dc zIc8fHZ6;kl)3I=hBtG+NqFnV`z-zKCxsX@{?=R2BS0Xf(^`eVau%>|g=|s5KpauQU ze+%K1K3L!?hVNgcL+!5!xMu4HQ8}H|>d$7FR2N5N!=s61PZTk^xf2e|7bVtz<>8Nn z4Ow^F6aq@5z|e6!_@!(Hk0pW7)3_YQ4%<;5FTWy{i8-WVTQ!`a#W6?XF1Ve4P5xEq z5b5FuszZMo$YeePjp(PKq4SEmzugx3%eNEDOJT4OTFB@`vc&^?5tuuc0ORW~kcpx= z5~g5`m#vy$RDCAam41f`wgMG>{~)~3eZiVvS7CPFQn?AQ<+u=);(cUcaxC?wb3e(r!Gbnv1y~at1-|QJKxym( zkv(8Y$!?xZ_^U*~WZQbUa`G3%uoyVYnFFq-KdFnKudp6i)sfq`$KXL~9aN=FrWWT9 z2sx!Ja^%=n(7EVGPI)?8)YU|g8Q*S#)}j)Uy(5$anf(M+pU2>?I0iG{`oWi-4%EJ< z#lrt!4mEU^GD|kToBNNGbA2h$>Z^>CE!C-eQ=u7G>Fwu#3zaE zFgkYzoXBz@pR!FUg-%hB7V0i_x$i)!^)U6*Y8HMfOC_t%M1aJpQX+k*$Glg$kvJtW zkn*64NVH{=K+~BhRxAl#g+fKF5ucta5CBjcR8Aa0pGj@Vewcy14K0xM!5B8|(jj)a_hEA!)M$jt&DrvSXs6#~)ocEUW3Axu&!K)b*F7(a9lLk^0Q zRgo+4#OEfe-e|z=#(~%5)bkRwSecB4M=n$Ux!d5S;VHnXVk6hH6xT1wro7Wis9J|Z z(9|P^C7Z7U@6R;^+fx{;@E?vuZNbA!$6%=HE8Md8qOKU1qUC!JxTW`;99d9B4%cf6 zb>7WnB76_5KbS;}gHJ=%gGUe%^a*ZJhPdh~qDz#p&$anZbsMRJQ}G$FerbuFUkj03 zJcI1sGB_#tL!^3NQ`$NiFtt4uz3oHs{vA~^<#!#vb~S^COZBm&-WB3w_F6;>b+!4v z>u}`aCYad71wFM&EOH4Ka^N(6xU?OkYx}7A*`=^7Q-;bPZL&Dlxty%}?St$73yMuw@ zY}j`w8y2r^A?@Agaq(@vlH#(lyJX%1u){fF|v9Gs-L%l!DJB<&zvFblDBXmMIL`_ zuSDnPF(@K66(2`MlG7Dx)Pj$}5OVDyrmjuEngyFEsb^`JRuKYjJw`Y&*AQM^|Bv;z z)dYM-&ZF#_U?3f~@Kw7MxkcI7bXtJ#p6FxcvFRkvzzx3JTxN~O+%bQvFqLp$OXB*P ze((?rrrbL1z;@9u;;(ZO1gpNnh1cK7vsnYwVRh*O*duk5a_qfDJ$=*wy&BD1IN zLysOgT$LzI{W#MD3GHmyFP0?4Z;q2-hkLBdnghh69`UEX5|}KjB4)-@@ko^cj@;A2 z(4HM2e&8|1D> z=hls{yDp(YQvtkNdKL!H<`9=x--R0Ea#WaJf-(mZ$UKLcY|9H)pjo#YT{Ln*^w)^R zjqbaUIp%|<-lt)Y^EO!JH--IWaS^UnkYXRoZ9raF6Q%M`AG0+tQkHeiW_oCBr>(Wv3?8!82SRRYNMboK+)~P~W(JHw3=nF`U$Ahcdd9+M*!Z=?$ zoJg|42fJ!v^+YXnPnDy5pGV=)oP|(SoJ%&(i6pB>#Zb)THYsnWz&dLysY_o7K6*Ey z;Zh~MPgB5*gPQ1QasoQ68>vGgePr9O?GT@(g1m@WTp#3y-PKueGU5(N5Y3|WBTqxg z-$2yauoVpyd#Q=;4D9Zi4(r^caQ%4`xa6?KLS)4vXiT?6!nT5YHJZ@norc}YyM^a? z3f^%c*!#7f%3tM<-`~8Vy4-5bzIVSNJA+D)^(G0gUu&W6m^t9$*)o88xY(_A9Y2;< zP~HbGP^C}yLZ7`D-Y~fi?-H+|`p{|2zH}DX^E~jT`6MJ|gRmexm!h0*;F+OSFj-DG z?-2Nis7tG(-9!TkIerW#v9m~KR~RHbX#nc&S2(GygUSQu*mb)Q#2Y?SJG}M5%IP3T z4KKu_e=g&p(f`oduK+ZZB_Q$eIOX^|3*v3hp;TTF(mWw=S#l3^#ErrJ&MYjQvlix_ zkGF^{^8sn|mB?P|4(u%mR8}RbUJu6iWqh>j*n=jM+NfI)0y?}0tXfx13uo==WL3Q} zdZ)@`jnyWqR{S75u#6ve4H2WUuT*uEC-j|>#l2UIg?&a0vMN8reA{$r z*trso>}B9l+eU~S7@;h+ena|`-QW|#r=EN(rfwAsfZbJn2-~!Y%<>7Lb{elHi3_^H zYcGL|`ufQKp@hTB63P1fNNQ%p7`0vYJ_K@33-P3FIOC5oPPz6A{zbi_YDXogiy>V)q z$pZAB6bX6nMadP(I4txOAr}^{GH-Z5U~S}g+!)_3)RiicXOL^x~V_p&lL z@*x1L8b4!XR~d5UKLe+k1O070va_rUR-WoWk#QQ?C#=bVHfgrk(qrIU<%Q*$agZz1 zV8PP71aK+^?cKe$V#vUjk$Q(UY;M(x8-Y0rUG+@p`~%T>5AMlHh}|xxbG*J~~QGKY9!T zJ2saM2^ebD1Fg@eEUGszf>#@~Fn)#>%ybfm(@y6wL(c)v zZivS>ISVj+_#8D`nGZGx{<1cDyfeQ)`7l}3z8gjImf+=sOQ~78mqFvwBB2JY6OuHY z@pirf+<9Y6qC28M*_PmWogHv0vKHCSv+&XSE~wZZChYx}fT>BAkV6@#l=A0c-(Ui0 zwGC08P)jv->tLV$QqapvClda*sPyAu>pZd{=-6Wyn%l9J`WS?lyz(AmOZ%#6J#Zl^^3J!p#ybt6Dy{a-wYiiQz<=@2D52C zFR6x`VGx$IAMPJ5pc-x6LF`E~`QUW_ceqcGZL&+`=J)f^i$$~ z&&X2tTJrZ)7g1%d5xV^W2`%|bzJ1PN{nU#iM`r7h;UkMc=i_P+5o)UDlsOQ8$%RDe zc?>meMkE>ArU&uXbA%eyIO@)oJ8*2Ef@(5-PevX(5Vxos7Bb%g$)2-s$bPjE>RHMF zrLZy?(rt90#Ucz&Tsj3a`?j-4VKkgoe#9y)oNRup4G7-VMAMIw7&UIi2zjod%T~`7pL_IvDo4p`UXX9Pumy=hUg__1`SerQKj>iUaFk zwJ6RzpbUEgA5u?}gab*2<>0;gDhaq8OhyJTP$TPi5TD<%@TAQGDy`qZ;M-SlsN@=X zc< z(?J|0Z{@(;`E&p8=YyYNh_7y$a)V4szc!E*Li3;WQZ?#LJdRSb?Rb`23aI?6LNyA z{aXS56{tgE_c7d-xF1)(KaJNnbb^!+@7QKF7v^3JWR)CHHGh5KE@eGf2T^-3!M4P6 z7W0KQLuQJva5jkx&6{q5*w82Hd)Q)X*8Yd2=j#bjqVJLbcWJyU9}e%HIY7VG9q^xB zP7G5f!Lho9EE~HlxIN_|VexiSvMJN4#QnRVdZrIK@1aCue$1vWJ4{gF35&q<{cAFR z<9XQa9|Nae%oolFs8I`*11M`zO*o)D3r@`XD4dTjfvc;Qfa3Qk>Y&6g3*Ngl^4?Aa zuR=Mvz4K!oepd=(nI$Bw$pPMY*^{bGMHb4l7n7N*??6FD39(E)MP!?r;N#)@@Q!JR zjeG=~H=b0kNd>I6=TLu+?lU{`*MZz!HXFyj$>I<*iRyfG6%173N&ABWaM?c#w@YlH zikmE`x$N&GXlVqrDd|Ijt0`_*`2_ZdyJ2)rA4K}DhY5X8`0%rg)to&OMciMJ(s>-x zFfpC9`KN%b{Z{f~(|uCwI!=|PKPN$1u5dZp4FYTmAUyE~+ z$*}gzcQ7`71=itl6j?L})rP~!j@99?p;kcdH9j`Chd_C_UXGg|2IDplh$@(~Av1 z(;k&Ug0a`Fbbr=SfCxai)Mo?V*EvU(>35Rc0v0h@QJNN1*iWIIU6rR`AtG zjgdfUTJPmWdYi@^TJP^wI&$764`J|E2_>fIAGC0t-uVH-W5D8r0=Ucr<%iZJ2x)9FW_^aPL2 zSTVN$SajaZH_U#m*@9II6`0kNC7F;r6zzT}gI?!1Mn}K7PKQLRGr^8Eg7(~3bVScz zx-IMy6B{keEeN(}jJ$yMcY8(;B{?wlD@W*OwR;(P8&|>XDHoXKi{A^qTi(zc*h}dZ zCyz6ylm*NwQNCd32_Jf?_ZaPNXw95dx29tnhXoT_d9KGfbdJO`T5*0V^E2Lr6u{H|WZh~4xTERPH< z-*Olb-xhL(zSnHY-qSAhY|L6Cc=If#+|O@JAoER;zEK1M{fHK}><%s`Qp1STxXO+W z%$KJxc=7p%r=DgHy}Zc&wlx_VJ)DFcQJgggk_1QoT`J!{$Q5|adX7g`<_J86b9{n^ zrGjWXS9bY>_vKz|Ed!%*v*Jv)?_*yRjVr&V&f_NNmGNgfPq2fV^Vl9q-#NdHzOqjroW{O0 zoy&Ia5T_mNj9w}f!{*QNAX8F`%6D^os2zs zO3fPFsfzEox}TQv^zJ<7R%-0v36V}t@JdUb*Y@?i$XT+M(@rLG?N+Dp_HOgzCV0-{ zJ$#qXYp7ev)0oDU>hea()Y)+cW*S#{6zhVK?C&+3LWZucyK*PYC1ITm;?~12NwDdGopNXWilU z8;|nNmc;P5KXiCo#^c!R+DE+bDJ|SpzV^HqXIwbdg%N1Lt>X;vIF@36&T*}V*H~H? z+w{HRcZ>J+Tr1aTMUrKp`hAY&t_a?m-=#cdxniza z)dHS`Qx4C`V*#(q;5JWV#TIUW#5vxwkzyX%yU9|1FYpX>fAU_fXytOM!+B@FNLyZ$ z%H{oZn{9c?dYq%<=3$vJ5x`r~9L`h!?>$#LY?kF~bGBv9r`_BYsRq0?(|ma5mcMz2 zp7FUaWcG7w%ME!3t4?$G$XD>977cPAX7Aw*u6V=UzdwQJ(N@U2Y~I41-f)=rKGV@M z%%q1GQyjo!zZB&dZ_~CMUKP%}+1k$gkRZ-ouyPR9jchn2eUeX0gyEuhmnp~|P^_)@8 z1J1Vs7w(x)vD|FS9*(_~E@vw%f-~@Ti1SzZD)&d-C2pw0b$9+{2k3FUgA%5pb8S*~&H4e8>6ut(o`KFBd%*e`CMi@q~MJ*p|Ce=y7j$ zLWVm?gt_L&CUJ!k9Xz?feO&qXBAmaG`#H%G8r<_aI=pv@9PYvQHDE>%z6?f7?NZ&JLkJFKdPryq)m|%u;P-KhRXePn#VjQDdZB9%2~mTFSx@*pSBX@ z`pQ%o`YO|**v`wlB8`-1=2nwi&-8gM{7k+RxdRU;$tfwRC;2CC&XT)=X@^W**xUntmB>9VRG6`cQU(;ebgV*wPWb2vK z1)D)L)nDknagHi@lV)%qj!sKq|bT%7JM9$qs8l;>Cyg0^uZOrbmH-Kf(vR6 zbe)+e-8C{r=n0oVzevfU^;gFTLh7t(txq}he*?>C#=4aj-88`e6{ybKjccO+oHeH> z?BwXFiyLX_;hW5Bo9q1KskH+4SCzD#_AOfHo-#c{bV6{b^Dtfe;FUnP)re_**DENt z7iOAz6bcSznbOxsq?m7tW^|fV2%TTqKyOqL=8uNB(8p4z^GgLS^!Vks^hAa>6FzN? z;CScC@-)s6B>vDBdc@_!GyA=qs}-t(Oi>5HGI}px+|!Tk`^k%wC32Fj`Dn7>kJfFr zX7?n{-tFZ$s>BndN8Mn%dTruU4O~t?RnDIF(vdR|wT4rE`a1ich-&$VLRog7bt!-E zc3FYTi6oAj4WHd;aDlCLs-4}YEyoT_+R2%CC(9PU#N!9v?!j-6!a4n(f^R#E*nOe- z0{V-7d4Y7p&HB4SzxCt2?AvGh*#ERG_)lZa%bEK#*&>Hd@^dx5uw6FJFP|M8$Nu1$ z#^wq$Ra@vka9T%&wP8YCq}#II!RGRTQOOf1t(8;GTZq>G7IUw@s|V=d_QKusgl2P zU<)lzQt2aRfdWxllD>W66y2T52#(q%(r4zZ5x5ul(~0v+=$;}w<{+xkPo?hC#xKhR zg-J*0PAL&4@M9d^{Z))Pz3mJCWQP?qS9c4YP;i_M{oN_peOZqAy+D%*m0m3{K0ld$ zX+DL1bMpZ`vSgKD)i_JgcfCtqQ| zm~N!)ms&DoV|VG1^@g<9L{WK_^b{tV6-KA~*3$ET_3;ZUt8cZNsB&y|`#C=6^6^N+ z6OLL_d&dL3UILV#X+$S6z?v2aooC+lwPF>v%wv&c6_tWlj?kI=NJ?%Y<^S(fgzS@_= z{}jZrc97vFnQF8B>_s`Bjw|!#E#~r$><;BzRP(^EGosj>7Bl)9B`r`R^XWu+aqjO~ zOSs2J^8`~)KM>gN^5@)1twpgv_FP5LogB9&1v+DQP5E{WUv5&HiQwL`0~`$tZ(+{u zC&Ag<6P#k-D}pM?qwI$r7X@@(li-Wze%jmZ52xzJOnUdJ7XCrKBEd<~2>$c*9|HNi z`J7JKB6NK3MN{d9T-Tk70^Ym-IIZ)x(+UR_c&B~tmA}qw;M=wdv%HLt(x7sT^Y>^w zd(W*hj)C)5j?bEKZs<-M&g>`;!LZXd_U2C#^oi~OZhdwH=f!4W-Lq*Q%?;G0=SC-U z_Wg*$`KIzTJ&VOH4fo|O=&mGAjQH6Ph&QTwoKY6+Vadz60ivWiZbqEtRZ zNs==h@Si}}a|VCg?s~eMt`khUvz%|0GqYSHqmuUglv5t9TPyfeJ5?~{=NwLPOQGOR za1j4*?^3~&oX6}Lao16NqM9Q*bsHUT(8#tD^X3Pt&twlMy9m4m?wmQBrqb?OnSAXc zSz+Eq8vQ3cMX>ouzrgDq%{lSYm#J~d0*&W8aN9i_x~MLX^F8Q{AU!iraPiDSdddb` z;8ya9epy~GI5q1wdvo0ezFKh{dy`{3-E!@TK;Jn;xW+U1ZG17divDN*s|m023t|== z)AE~iO1wc$H@l|v+bv9?!m z_(>&S#%4L4P*WxtUb2;mzF*H@>gY)qW}c-N1x5?b9{M7Pbq%F2oU9V?bj#^iX?B9S z`?k|Q-6`~mQWIvay$C%dIY7^;zbmj2v8PiOJfZWn$gr}`!cx|*YCvz`rfl6#1tLuEZ}Y-q#O-?~fx@Rgx=Pb@1BI4#cHT<=4h2PD(& zzdQKVQkQSltNRNg>rE^6UW}uc71&lJ#!V9D1Gz8`v&1U86Em1qk7h8-`-=p}MuQpu zKk4+}b112veg}E-H&U|;e%miB#+IMLr^WpUcW=)7| z#X#&R-T3_-bE&_IW<3mKw&ad8Ly}p{yyJ=$Dmd6$dmb z{=E6kyq)PrHxy_vrVN`|oHfX3?oFnd>pZ&i&qrFQuiGD z!2GM+daWd`Y3wIX^SN4E@CY!TQHZUvz`Bw2Ks$%9BnqpW)5FIPiMxyVHR(dVs_1(!%WtzrUf7NGE%}Na(~Ag`nyapqqs=D zqD${O^J42J#{1g{-`{S4SvDn&0mnh+@vZ4}k;~`ujOi(y_W#4$nFiGKh5tSpRg^}g z6ooXNbM{($uU%$}GDSp)$P^h$5>gs8kD61GGzd*PXRVbXBvXb|%9toBQzSnb>Yo40 zdtco9=Ds-Z&WnA9z1G^#^L;)a?J{0sppCql`~2vZNqk*SDF4nlluv%E$N%eih2-V3 zxizN~`Ipagk*A4<%|#c_Og(F>Np*%DRVW_BM&HNM*qbw{&!aDN>Haw4zVjG$zqW;V zcBoO4mofBDm4KefDWJu9(RkI^9_noUlPH~7!;Sh1% zS0nW{Yf#U>nM8k&HL)3fLQ=~9B{rKXa8zqD9W$$3vgPb$B2cIzT6$OUixwdX*mIhU zHMAy4rz=SJt))25KaFH1<`KK3y)N}M5g)$-rNyH=IpO0DPcdbm+E0M#~#pC9#_d0KLq0W#z_kNz|p1L z6mt!}eu5AgDF)fZ?9AnkRJ5Zx4JrJ{FUrXCl$1T(63z!M>PNFbMDwWWA@^xy5Fh$q zJiqJ$Z=+F`i%Pt%@1%wli)c=@=xxdHzK7 zL0;KrsU(v>Gc*tF1Im20@I5m9{g5B+x)Q~9*K_}VC!qRu<(#9xF+XZx4x(O*_{5H4 z+B;#f zWXNJ48GF!(jM<%zhZV>Zwtv|@=+(r z+Pd9jbjwA&|F$Duu-}XneTl?J+Qj4*r%9Y0^6|ru33&JD-*}i!AyFT34HwRhAjQ>X z#C+>=q8K@q)U>=4&r6&|cE-k$ck11w3!C73fBG+#1nhwq-p?Ql;9iM%<{~owP^maL z@g5gax`gEH&%^!6r>f#(Kf{}~A`iZt$7C`{{e8X zQ|g);FUNfxp~oIvS9fWYLGGo58wVsqdS}TfjnTMJyMcHY%p<1~63Cu{AX2g0h`e?@ zhRtOTipykA5r;F&@z5@P+9Ka6-dmVSI*0U$gm+`C)@1b+p2(M7mk6u|@Y4JkQa1D-&QUl`5(ei<4DFkUed}Rzqb~-p zTzC%ux}8iOSk9;B@tqR)mK0ntG@LwrZAR95?8YBYCgQ$>8l>XIN}M)$iMV(9VEe*% z_}flPJg)3Hp7!S|F)zA~&;2$cqq#him@ln2WW2!|>b~M8{WHY)QU+-bC?;w@KTAAL zt`xs)I)h*Wy4rw6Ad5xoyQv2k5WOl%Hd<)7QArJ)bGp{qx~ z%iNdfc(8QE3TLuwk|&YI2*@Fw_2kO+5b`FxgUpCgCjoUYG5T&#ZqW~a zU>|;Nt3q^j&k${Yo@~?nLJqu4#ft4sqzPNnnUWr&_9K@lM^}=(`%2Xx&Lv6W_!@k_ zLXENq9+CS29O-Z6Nrvo8GJFUSPscENecVZMsO=6e>21S1?BjqG;`kz4omJ&@TR3#!X@3=gf3$o%!f^s{KTakl(r<}vqVi_D-`v=$P#{%!kws=$7Xzcu$ z$M(J>$vioXPlYReM#i{dTbl zuRyg^mtyVDsx%PyvcFkZ+&>^D3txGVH_j;-ZEF*ko#Z65lkZBt`~vjr-)KqjgDa9z zUaLq!!E*7Q)-0^vY)>9m$4h#OkCL}v!zF^Hj^w`=9>iTwj;1a%hhIjfkqx8v;o|9* zoIRTN0JG(8|`l^nOGQ5#o8KfvpM##O~` zKS?w<#Da{1(X*62ZrCHql9#EgbHGEM*%$-Yc!c<#sl9DC+Z0|Ad z=)opd)?OVwc5}xzcSPL$v_V$&=r~*&sfUi=cHo?xda!dv2AgZhNJ>{oUA$*=pyy8| z#9n{MJ=*2Y1;))}&zf1nv(xv35As#q5-tNORUPKCx5jgmtVG;YsDvGKXJ`RJUITK{`cz#Jaw~&``+NXD5l?|<8k6l78wYPv<;xwGCpXA3Sx($fmzWvTknAOSti(G{@ z0-uWATy40Cb23nXTnH<|(X8GTZSednl~`w)vCEu(vqqtiQ}ak=clG?osc9_5>et?| z-J5Pf<%Gv@V8t1!ui`JOu||6DuJ31EMrvc*B_pcZAE;Dms{i5+`$*j_+v2$6+Edv1 z!<@mIuj|>D2X^zz9dB`&c}MvjR%%?J+hlZNyg=$!Z$R&6EkvevI$UUVCD*zB33u4? zCjW8jD6YlLiNkJh(Y>f9HhYINnq%+^{rRMZKB$f0HS-U19{O178+(iT_Ga*UmOof; zS6g03>QjxXIgeH;kLRm&+L2QN&u`E*;w+Az;HN*9MHXt8k^D*@?#Se&yqnHS&TQch z?#J2LXvye_{}?JjB?v+7gSSYS!H z2?>&yLr83!-ahoWeDVN@8!tGKp71g0z3~3E%m9mO26}5@nS}%8S;~AkAU4 zr6Z1X@}uyY8wF(ZiC(AVH#Y);BcZ=#mqT9>(#;{1S)!srrajz2@U-p&j_>qMb>-O+y-Y)u|!42+6 zuRpT?JCX>#P3CQyf+hDgj^klZw8_jdSy=Z-F{yD5!$-bVp%*9mxM4l#(J!}ClU$v4To%=XY281 z<0P#2;{!e_Z-`y@g^`*OCvodYfCnAh@yqV@c#h>Y>}YzNWJR3Bd`7$E^iO&6d)Ehi zxK4*msWHLpM~bo5%f-aDD4%$58BX#ZTauVLugHoQ0*UZzqhyY_3y(42$mRMq5?_tK zlC;xzu_7|U5l&;se@-Iu?294}3V$GJupC7WNY7$7f9c{QUIhd&m&miJ`b6g8dx^u8 z+f`~+-|&H5F2vJkJBb-glFT<(gs(z2NPaEUqegGyamlF}RI}&^Sv#2uHzxA@od0Fo%X5#L{TfY^CH zki@N!dQuazNofM4TkX`zN5>-KV|pAvkMxB$nT)M@QGo(4hKV_=&s<>CaRl?_DLtWz3M|$+k|3nSUD&k2S;*#}^W_NA8$W z-aYcm~x8sh-p5E>jaR#Nc(0*6?0YJ8xYeCjtMZtf4@%&R&Q=l%!mCGWfR z)yF)^=k1%hl(%hUrcjox?+2vr!zH3;qDYdj93WAXqS)PkZ;&&7w~6JRWZIL{#_I2= zBg88W_iQX9Mu+w3BiS2-UuQw{RP&(QtOV)06~%e^>tI)JS59%3Ep2=dhV+cn@cgtD zz|@@muXzO2{ShZrVxWfT)MdE&5GiMnZO3<)b znAU?fe>dSPd?b97#U_5T!h4h{dyG2~v4d}MazJ;S7TVm0`#999%QrJVy!N~G$T{~1 zlAqwrAAi+@{zdQPEoO9aT|M^v&I=p(J0;_7rtN%#ENk=l#ix#;l!n>7v2O*ReS1AW z?c+UuiCqHMI9=ODWxftSu0fAa8WVt;roQB_6~5ya9n(Yyy5Ay)1>^Xr$`Zal`v{8J zG7-rvdVnU~9*LS{eRz?PJG$Cr!0V=Dp;0&H^G$Ri@9{DWRsZ>jlphSUNpDEzv-3>( z+$Dq+PI$^Yml*MTULN5uIA(Fv`%0nx*+o)K!ZrH(?-cUTxP$t=en+NM3TY&tNgIv| zXpH$q;_2axj}5J+XILjv{zJc7YlR)YQaqW0D(PO+<3J*0%gA@B;|?FnAu+G#(PrbP z`1HIn^!I-Zy%Y44%8u+LKf}VP<(OsCoPY*hpqoI&_7=3RznGTKJc6B`eW%q`Iuy;5 zp*ubYk;_J9l->G~)>c2kdAEv)(<6DhwJ4mjS++!VrXhi)n3M>0iR+DRbVa)#@lY8c zWsBF7W*0qreTUSmR==A}wEadzw;$4nwf& zEMC7|hf6$>#dpfcpzf?HUgKyLIw zJB>RQDbL5h(c(8(uUTcS3IEM(RAjwA{H;P_eeYS>b~ z{mvA$zvmM&LRVD0Oxh_uu<$WLEs1yJjxr3%-lcN&DMK@Qg*droWNJMs5CZ#SGLx zwh}#jQpg{KEFZeH9!(2$VcUj<@s{@TyzHO_uXo*tD{+a2Cm$Y{Y>awB`y;H#wBiBU zFt!^{kVf$*ng+OuivA~CUWP{X4)MUdEv7r3C>y4qpfLToTziR4R`(W;yNI6R*c z%_tGQmK#I5Tt&3h+!K2SSkQwr=hJ7mWviR!_hLtv0J?S0VlsRE6dHa0B%SV!=ocL^ zjk8q6g^Pz*k1-Y!X}6BZN$Wx{#$Tq@3v-G8d3oa0c$L)R^<>-M2s(3DGj=o_jva#E zYx?s{&Xl;o*op>l6*dm zsX@(8od5nJ$u=pI%t*EqKe23iEB1tiQzl}tMRo`%}#;RXHDK6{@sy&_YHxAiAWvz{Nxg;^)a+=C;imiZa%KfIMV ztNbMK&(G0%GgGp?-iCr{hNNz96VceVh<+Bdk=)^jX?)BST+g18`rg7x^xKEzM|2N9 zV6mOv?bM(q&xNt*7f7juOvlEhPSgF?P=htMXbb zmMkv*LH3SVjtk`ED64BlS`&jwg|~zZV}gmTl{QJyP{k|#B?K)yk0+OlXy5fBiH7$9 zGG(hh`D&kw_pZ_;J7oiipVuOy)hs4@@(y_88iu4?UPGD}$*Rj39jF4 zNo1LP;$|sZ14K4V*_kIE=>(HYM zF_AsGkc8Fz7W*7-BAw;#WUgB)=`0zB7p`7VH7sdRVk$2|zq3_2?=3gc+k|W6RH_U; zu{jeh{Gf-v{oBcv!5DFcmMI-^;yO-VjriMhuEMQPf0JKz(@{zMIP&#ZDyQ4@gEKC& z5-(X$Ev=_J{ITqVt}&>7V@rY@JFX*ZVh-44qkvwj5iFMT`Dm)x>OaP+Gy2 z|87d5%M)DXHON&4D{%XK9&zG%`t+ieHF4)+Nm9f$L}%%v$8}{~+}I#-RAoLgTRwuW z_p_#kMr*NBeVW+WPU^DDs^QNa(L#H@yLjK&P9#%c#y>um%qJZ)<6lSgqrBE!&a`b4 zZ}sf}%2t_R<9cWf_lOwrR^<-7%G~)VOd!L{$y@S$3sm_-C!P7jhex66UE}$WB|^S8 z<`=J9au20#ir@<@J&~HW79VPu#&@1KG9i=lub?G*@XsYsb6V@+mq$c}5kmp<5$>LFn_<5(&U^5AIf7XaEJv>3=5-*U4 zRXb?VOk*PR`?GGQ=u{*<1-@GSW5KuwkqP6U#{y4Ne#h4qV63zLdTU^XwBDegP z0t)xoigNOUxv{^(+4+}Nai=yPWcO^+LGEe`&|O_;PP>y8@4j)C)7*22d%e^Uc}yP9 zz5IEc8=G|m^;Y?yf#8pvR1(hB-*eBWv9EpoZ5kqgdb0#Oa9ru25Z9%7@a!@fh z&>(GER$f9Vs4f$X5#u4`u$H>Z+RcOB78sso|fz#4c zlH}<)k%@DXaO^v8K6D9&k;8n*kktF!Yjg;!mAvCJnjFy&x0PH>Yz&uwTOPZ<-Hq;c ztVYX*+3>4{W66%=^O5Z9D=ZyYix!1kMeXA~kXA-L+Lan6X(>12l~v~v-)#;!cgtOA z9j^_2pUl(XLQC#q)F8Jy^a_4&6Ny~ye^hOrW`k#vS2$ZS3Ln}PN#-uQg5~4{i1x&B zIh$osxmhro@xv33|Ff8QxW7e{JABc*i{@xUkOe$o^a?F+KTh%wB$ElZA4%Na_%q}F zU6$l-oJi%}_TdOq0gXGgh#dT$OXk1tCnJ_+5LM@Q*nM-VWN2?8dAs@=eyX#DR$eKQ z%(KoVPNSoV%MmeN6)i{PI-^N_V=F!tl}eI7GdTET5XpB)Cp%tEqe?TKNv*q>_;jRW zE3pfCSlL1JuSb)L-XZdizbWbIwx!w=V~IokCUVR46Mj>pO1Z;Z=!eIPajMNA{_U`o z_?O)wXLG`_OLi>&^WKVVc8tU|ldh1|jnA<10Y{R!Boptj@FuGhFOhlj(&yCtO{D&s zfG!(dLBs~OWO{y~ID5V-%}~lEtI``u{xT)Jy?wCC(N>mQ+xP|LmQ3PS{TjwEwuwWF zy`LiGo=Q|&ybdkds)h0c)VR_A{Xv%^R8hfeV}4`U1y-=G6%9-GM9*S>aJLp~qwyPe zp@w=*1d5B%&%$gjQR)lL?%IZ8R}LccvRQW3AH9nVVvYISJ0ZMBz&Jjwu^4$O zZ{dXdZTWYbHX@%-DmI7R?YV)A+WhLUseDr(KsPsH^h%n4_?A$KHUx5f>(E~=FiVkt zFx-Sszzw|Yjyi;|`0(u}((KE+zexVn5q{_634G*~WPbSaIPP(HJ>M`|gO6XR#h0!* zj0TOe`S1zP`9E!qoX{;3S;Pz>(;fc&A-82HKExR9K3s(Cp35NRd{_QU*m2bC{}QcP zyb8_oQ{kUO5nog2iRNaF;0606{MTYfUVFYQj~~0SLA~j`q)m@Ek9X(CUP$MR>dat9 z!EuR)S_s;lqsz5#YDSTt=9AllX~d(>5sg|j5)IB;#JQN|!HB7eB&yK?S84U48)hHj z4C#BSH%&xYzXENGc+RI) zmc0M82)#9aCh=lU;6=(xWSnED)G7Rk{B`!nr`OeU*2<>b^q=p!XSOv&D3^hNZDq!(raXteTH9NXJGRd*YLe} zS({L$m)z#@$87ebI`E$_h43fS3egwAY@1p6vNku`B2mNVxqLj9&RKYB*zB5Djb!rX zA-5VC-er;iEqBl6*O`dXKuQcRNf^#|b>{J#4TJc#{pV3yE99SiaksH{{Ev4vOy^5F zhH;bXrrWgLcI4lmDB``DC){q~O?cf@%KA5!;iFw$xE{YQH0?_b=fuC`zMQ&<_L%KM zAzGWb^f%+gPhZVN4&9#Y*{KWoxI9nR@~m|4y1fA1x3b{szq@eHvo|36M2lNG#tv;d z*a_Lk?M)5CwE^^zyWgzu}9o&v3)6u!Gbkt^l1XZ=9qQhCbtXYf>|IECY zJ8|h9$G$kto^WqNvfAlrO`Zw&(BnS0XHFvbs`@gTS^knuy3@`c-4n=NS{cCZyFU&o z$Rx8jk4!*67aDTc*UmzHa$`|fpe47wkmvfgzD7+b6UEfu=l<)nfWzHhp_?_bNPCGP zN^*P+BSm^}LA|>quELYHB;CM^J>uy0Uw3h4^E@&#Lqv9$KW1`*XrclHmn>l20JGZ#03+2=T~XPCf+% z}MCYYC#e|_4L%x4aCa;+9&-9yR2p*+b=6=Si(j!5=w=6wF~o;>d7 zx>fv!-Wo3LmI*RHsmuQ?sz4#9tWdzsuWW>9IvRH(o`a*3_z3kE?Cw32kV3o!z1v{R ziI8+IA^&W#270LGgT`f_WtA>ERPFEA!Ihgp^?x4mWZH#A)iNDkL}pkiQG2Uez2`ta zy|8r@t*qIBBbL7)M+cS>7sbTtRnB7kAED&!loE;!KVq}12DC?*PM7`uEX~hkQd7_C z7_UyEXX4M%vR3En^LB^H_E%4-g+(P<`!9?>G?A^goSH*_da73oOT#dgxKuA%Qb4RK1NZpcUIZNqFCQI>ze?+0fKQ{cLQ>)Cv2sAkcx#?#sdDb4Sq8h6{2)C6XLWtjQan$O4b#OBl7DX5VW`>uPZY{O^PQuU-m({LijkMv1J2BU zK3}Gj!y3MbLcgyCOJ{wKbHA;Jq0x4s=tFx8cho_;|F7P~`9?USjOFo2R|fEDQ!SCv zs(Z+EKIVpZ&PL&jn$VS10ceRxmPfDuvG?wn^IGE~P~EFAl>S-~{YW0p@Bd`Y@2#82 z`K5j4?w**5{#iUk1y5FUQi3!$MIw(}nh$bydZ*EVw+hul;9_0Kx4XxW! z$aPJMK;GA<@v7|@1uU^bV*+mbOy10CWSZ-&w0~%j5i8IQ!<_!+(p_6k;Q0&=c$ODJ7hX{ zN=3xo9@&UBEgJE)z6|o_!T=jAqsfg>7P9M#I`NRRED3z@5bwCO6c5aN#O}+vK={wf zC}!bP?o_KiXA`@DbVOyZtjoR18g_7Q#Eu|&9{gzI>E8)){5h=0a??y{){ z%6DAHmi?er%d?6lxYnDv&QFq9N3J1~rpI{6pMAtSz>G8)UcytPnU)o#Tw=j{lU1wE z;JGRH2pMiF(ej&4?oAp_=};8$nzc8VdOTF_wtRz$SIc88rYMxfLUbD}JX@GD+tb8u7f9X$?xh;LClxYsnj5-GXvDCfXRZxNvVKYV*hKX7TQd z3{p#ffKqN*@Vc6nsP5l-e%HqTxXpjY@zak`C%GCv9rCI8(AUe9d zk}iJWOlPW$piiEa;MBTg8Xb{K>ec5}PjQXJx-a(8RiAdyOJ2#u*-3#0s;{C2!J1UQ zC7AjpTanjxEDcFnOE(6NtbVq740S6gr)%$LlFx5MROU}9EjZ*zGnTxhANE|AW`{;p zKLvJFE@&AIoL)&DUT&nua}BEd!c|F{Ym>C^wSexENNe}n+2pw4M)LiuBK^8!6EXjN zks4T4lja{o@vz_@>*KL2n2!6+%4;D2Keuy%7~WbO~WLK_dwps6p-sOuz0IxJU# z_daqSUARoqmlzk?AG4Z%y<$b%%#~<(_IgSlO{4F6p3|uORDRGvLi2wCRGnOa7JO&u zm29A~qSDQt`#}`XQ-RG)dwf0D8BuRp+znH-O zWEbE*w~BGFZxM74e+g-O7&FyhI)ci@cLm=ma7gSvsBqS>yLF;lM z{O_nII69~UBGXsG7`H)oVfR5%V@fU)A}0%F`o=+>?ht03lPja4eiBT05y6mS-(W=V zIc7mfA7iopwCL^91K`$_N|=4|4`Vy~514hf2rgP$4KSGl%Jyy)n8URY=|{o9F(cT0 zZ7!nP_=!TxwiF@RZ3^qQ+KK!%cfs@dJ>X%JFI@TQ4*29W0(`5+$EZ+F8GhpO>1X!F*?D80>!#B*kihRb3*1<*X4vl-l)utCZf=Ad`sZcuWqfSFiYAqem&fw(MZNaNsx#-NVDc zEcbZk@`)m5=A4;eisn=1PH7H!n-(bg-RK9*tfPQn-*cz~Z-A)MexMxsU37?>3UZH3 zgUg+cgS46h@aM)=Lf@GkP^VWF<|QozyC-xoJwJEDd*5o=1phG6z|-^0YH#WH7c?Hm zNFPl1DF~UlHvYiX&7PSaiebvjC}yd)0ZN(^4wfC|@hI57_{Ht2{-2cPGKy%Qa!w_-r`b z>o+Kh5`dxUS>|%B23#+iL_XK2L*+ zPRZ~=iw3K{YKv&^Qx)ONIBk)&8iCWjDnuh@|AIYvPB5^k8>*--fwpO>;I5w$sOb$5 zhHN)wyG;KvD&G&WIy;($a}=xLkF;W`|8b<$d#nML4@(A%7kh&9v&O+&rQ?_o5DE-_ zmxGHn^6WREEp$%UBy|TZXFknw1Vvy4#I^-sy4^+iS$()DeWEP8dr~UY{?-kwZg_(` z9Z~SB>H@Cfw7cl8`E{o3=w|5uXg4fqe9rhKiWyG2o^<=xGLI+m?7&Y`kSW##YV1Vj zuW=<S`D@?z)iuG(S6Y0w<2opVX zgrQZZ;9*cCYPs+RM&D`&pW^)BkrlRZgH{N*l=hk#ds|J2{idC zJ$yba1^CU%WFGB&&SYn9197J`fJw#!@S(R@WMuCR8fK(`?CMYOis%mbxN|&wq%fNK za#aW{r`y0KyW>D-qQFzZFwmoP80tDaVk4h>h-%d?GLbh2 zfgnZz4U>YH%|SDm7GWf)db5TROYeXCNWz5pD*?CjZ$znE!-02D8vK{k$XvSh89b}J z1J_l20PP>Qfdj@u!N*TW;h$L^Fz0axG#xWT6kC-pIJ{a`_+SJDS>v09e8*p4S!fFy zK0gFEwz`6KFqN6}eKeD9He2wgR|}50+A7*ucNmUx`y;54oB_r8#muG&GR((2ip=PY zNM>M*Bjf&N1bC$-C%R@5$3*-sWvHH%k-B0UP>XW_lk)6D`>Pz8J3Ci`(oaQ9EjR~W z8a@}CR#bqCxnp3e_#QKOZ4IL(js?0#7VLe+6T+qujiRbM^MK{ME#TadTcY7vnW8C9 zuNX5!EGi2yf#u`WnSs|6m>}N`0B&T$hxy7URGS zsQ&_TO}a4P;&#?NI2wNccLql4vB4M&GBWW)2LfQMo#JpFzFd%)Tg?o8+gx~NM~6FCUi%pJ?N*=Dd$Km8Dm z`x7d(yqqcAm3|G5K3Ok%xOf;_v*0lZE!+X`ZgqpIpVPq#<})L{GD@g#S^!I|BAL`J zM_BKgcH!v9Jgk0P0?HfX8JmHRjOUvuFw9I1oNDa@rMrtnP5TqT@3R$PQG_z>&uY$W(N5y1AT5Ts@Y#xBCgWuO-7W%0oim@A}M*^D1CgQ54%qcSDQ`mgje_^pYM@-U@O*B_P(f2&UwgnX_N$aeIBJttK~S`#Nh3oBKSKbQcl zWRpSZyBbDEuT!+=SSPnZQiv8c+tTa-X^7_s6= z=Jbod%-O0kVY_P$V|l%e+5X%LzFs^G2$SqVz)4$?!{u=1wQm?md*903tEdDmn(2bm z+8k`Yz<~Yv+02*-9cJsntDx@b5OlM=DHH_`W8UnPvSN#mfZsPlL}cSe(aP!B%)*Po zBLBsofvFi5!E?^clBth{JBn%<(_Ix{Tj_tIO`*d;eU1)XE9E&3zj})`Pt@t_7}GRKUU z34Wej3QtyzX5MOhvM*Ym3%6UggHWejAiWIcd-geIee({`E0@Q#z!H#9FBBcDPXZg- z&j5c9Id;dD{Xpl&2)O&%Fy@ZJVxTmAA-uak7r0D{hS__^3C(sZvoq$GfiEWjFbg}u zy#18`x2%=UVr@(m$(7bI+NUDH)TIEbKy_x_;Z~8!3#n6l!f>XqX&Y4fV8igg^nvq~ zSkcF!c;IAV4tkf$^oIGO*n9;iuxQ%=gl^l8CxMCSwHVCPZ6 zSgu0YrWphaqvJ&fYl@*$uMFheqhWKF7Hl+K44w|&Wi$_e5(sr3z-s}iOvJLO>;j8Q z;p?RJ@bts2;7WHn6S09X{a+D?*`N-dv|I*iPb8vSqPZX_e?REkB!LQxx`1ty63o$8 zV7_cu0j(ON;T@qTIB2vLZYxR?hF+4u37Xb0!8aUKZqWdK@^j(x%&%xsbGImfYj^Q0ky~t=JVSorf2^pQS1&MV96H21xjC-kp^;b zjCLMux4sG#z3svMI1fSfvITGp42GxA%dpySmx)U3KM4+vjSwyvm;p_#*NR-NL!i>& zO%Q>d;X%P0;5&I7$XRrX*)I1;(9@g^SwnZGcghf)aV<<}dDskI*li88PJ}V88A;56 zY-Mnk>0oAGhy)cTks^i78elKON?k8eFwVaeD53}8vim)eLt!JsKYa~Sc36Pj>qU?s z*eonFiH5aL6`aPsvy`cT=Jut0r0arpE_nnuLi~CFQ|6Cm(^&2e&}CgDP-%*lZvUuVo5L zKM0J|&qBZTA^yP6~50OQE~4kfVW$9YCsaF^*>S0S>k z*#W8*qv6u5>x`3fJGkGS4qF4n;GFtAaOSa)BDSGZJ)b|opX@+TqG?|29=HM+z6cW0z-KYfrf5F%8#pvfwAcQO0gkFx<=snBzN z2|O=HK(xy*W@WS{7}IqMOg`iR_+}mGBT#1qAO>V^x&rA?}OO1mBC6|V+wZ)L(o-a<|*(nI7?eVeI%>;!|y zc)`|orh6c9Ibn=#XRBbd1Q91KIpn9L=6*{W|pg>r#5@X3|4;N_SUCiUBWrg`d7 z(7fvp<055g?m3$yn%12Ps`)Z-Z=oW~N8SV>sZ-$2Ge%798z+#2mq8~|0-}vlpD z@S>F}JK>i$T;LJ`KF16&^J0>r_x3yN!Q>Rt)R+fM-;a0TcHm+-f5r}`rd!0!eG>>8 z+Ey@6b+5rhhXO{U#|Vrpt`_NTI|H2MVr%vaN=_|4Kz)$RGG@i8cH^>K2p&4x!OtDzJDz0ZcL9 zB@FpKl2tnTnptUllr0LW7xuq61x-3jfotApMs}%|)N_*ra&$L>@yj&f=W7ZK-w+1M zd@8_^Y$bN-89g{5a4pofUdXJTxD?DfvkJPl6oP#hDqyWshw!(zEc@L$2&S1o0~wI?ICB^^YBvz!Z_LpUC1FGEeRwe;k2(3pflXT8 zAk?XMgF5FU!R(%k%%W3onQ_}z1GTY}K)lp56rNls+FIfPcKXJG=wGeSdTR$*dUGP| z>^ERm&Nc>0uIA7`JOXU{7X?=d3WVAJofFF+9p!t8X_U6%4y>I+Cg~(7c z7g8vc;p}~1_da`T5Je~?85+@`NF_>WFlH!o#zZ2KB80QIl8{REsVJpMN=c?hD#h=7 z|NZ^(JZnAcIcuGN_c?3r`@XO1zOMK6ejV_{JAd|5WS9r}dgKDy^iUr6SMqV*FCh=` z)H?L*S0<=jI*fkp=)qH8w4oh0Mo{;kPSR{m5{$PB`Cp9#Xq|{GKCXBj*SK^;UCn-2 z9OTL4U+}??Tte~1E91ETu_Jj|^ghpi^f(b3oR6giRpff>SJ-HK3%oX8hvjaV<0(%= zL8>bc)rX$siLS`O*92=(+y;AU=D`Ny+JObwUndbl&qtx-9!03=r87MIcMF9|T?R4w z7)ftE2J`r*;E_i^93+5050$KQYCUSj|kgmBXOB&fO;g4eRnM5_KzoY5|hn?2k> z`DiEFnsgY8++wL)J|SetwsQ1&Qxj}FD)dYIV~%{Sbdmq~4*1Vx5t@DXGCqGJ7Nwn( zhUk$LGBz?2=H83I^?H>^Z>S!Q$lS#~&Ye(O&WGS&EuQ_aW4PkYO8l|432zLTMV42T z@;1qAC+geR-~ofxWS{a0{EIvdM?6e$+T?z)G|-2>Yl&#;`6eD&5RCQTSR<=FLiWU% z4{>U-EN=b22zu8$BKQ4qNPfQ-eEw69@V`vxIu=N}SF1qXRZEa=O~VH?PC!P^dpJA4 zo%ECagkC=GgISy@ynBZ5`Ib$@^2cdd>-BpW>~exlAp*4O&TQ;8H;ppCIGMaI>5P_s zFNG4Le{iu|A1w}_ihO1-htNq1Xs(=)PoWTv{%t5m(W{ckBlpdq&BqZNs3jrwyi(A= z7lp%O^C5AuJZyA*VRFzb>5*RGxAYX!eZ>PZHFO~#(X*zW_^Z|<8c#;}R8Y32r=HXL9exhmdC>kH14AYac zAx(2B=!Qz+yWhshE#J?=+$*=h{EQkEJ1`gT-|dL$mDWhQ-3!L=?!k`(>LBT21zv3O zifCJ)LH)ZKf?w`xgmdw$puMF4pPsyczepvUR2%F<+k$BPcK%-M*jJ5~+{;E^y@mYO ziDG1UMw42rCNkTs+t9*CX?r;KVL<3|)+Us-`lDwI$`=@eeA zoG6}%eo9_*OvO85mk<$AC*XNt6YBZ)j+C;Rjs}+IpeJiB(7f@vuyCa~c`&g8{kYbN zJg_=;PEv-|Sw0YE9!Q=z>W5;&LO=mOLB3Kq;me*+ys@h~SZaJLi0=P@wEN9bh_J2{ zG^k8DZD=7BnnjRO#RBNjj)xC_u95W5aMIGD4w*R@k_wUX`0o!1v}lF}iYyx=&fI#7 zUT3z0@q`MxU`B(ink@En(*;7S5H0ArVB$Z$0%ol)gj2rr@m}>X;=Mn~t3IwwIPFlu zZ$?zetbtWnO3xOC&b|e&MWyh0?lKo!IeX9xIBS?Eend_ZblBNkyV14w_k$U zkECGXxhdG|$P8k_%ni%uCxXdTb@(x|7Yf{5Evs$ownainW5@+#d9EBsi9xiw7A z3N0b^#J-?8yj!Gr7Y|E#639=6>iGGOu?f+Tg5Dg@n^b9o}b!zr3-JJ)oE`MHc&O;MnJj;LG3Ju(CW3T#pIR z{$y!%OCZMktE+&u!mg95irM($2~8ra^9)Rs{D)#6PLOK;QfN~~4mvwbqNfg0pnmrq z@l}FFKZG2oNEtosUv32NBJJU&lP!73DIERX5dn`jw4uYB8bPm{<89`QvF6tUaLK7IlVmXRQGG!*{wYDo30EYf24Q)Dx?+Rk(q=w z+R^@pC<(rYM)*I#>E%43f4m`F36;Y;f@Xu0R~8zn%rUtrl<<%Iz6Pn{d~6<+OmNpt zdFc_Igv#W2Y`^jhDZcLpUUzOTrfe_cxCdHzG;u3jOBzP&kIo^Q|Bhp*s6?5<_ac6? zj!>$K#lcs@;lW5Z`a0z$Iuq#x!H*V#b;2{S3-2Ur0@uR(foQN2`ww>td-HmMn%Ku} zCUOv+1&ecZaB_Pn6!k@6yVfdVf9X9eGv5VP5>`C2X=e95wZ*Q|FGIB$g~%ig$)PgW~#hG|_tz?Gs}1 ztd@-<$1jy&k$9H;H`5SAE^dWQcS~^E?-q#27sETXevx~NCqcZA1eWu5hNJh_;SSpv z;`ogcJcw2CQlC&bv27G-jR9_%@qlWkHONgyL8xA}AB+s;am%PLIx>h6;T8@jBG)13 zcQ6duMHG=$S!0`e@D+t+@4fya@5p>8>jOy-{!9gv_7!B>mckHv^Ro8v=>W)w@fIIN7 zs#v5RdxZMCSAn!1^2F2BOJSX$9@+5Qkj~2k@E~vkwMaaHlJX*QlqiId%4(ppr&7z# zPQnfTb1*3CqJf>x(3QI$FKnuSn__41&snhqwosx{XBy$VKchgWSqY+2GqLT>5z1tj zfVAvu)=QE7fChX1XM0^?NmGm1;=b)bHU1Xx#(d-VVb5 zk0egS^Fb}65)7a2hN5kGXmo=(GI=M$3ml$|_kO-YK5IITb^Lzv{^}*dD~md`v)~OG zxM(t3@Gk+)PhNnmXHFnPpC`nt(j0U&`Wkwqseng(lp(>!91O2fJ0So)s)z0eSyzxanx(5*y*rsq&@tB4j?&4vTRf51LwI@n)HL{Dc% znk=814;%U}z~F!mK6gHxsC+5KOEf=DoUWLH-%m9pr7S7@H@q3P?QzBj(sLnl-BYy1 z`5?OeB$qdE_B8%`bS(<35o*U%+zHcV`uNSYpXkcQ806Kk0r~VaA|z6WPD{JOmy;o+ zvuZm!_hb@G3o^q}eOF=6jtnr05_05&Qc$CQ9+(x5q3bIr;nJ%bf&Gt>yj!b`#FwguY!eh!){w;ir2E=3zJT*61}W0AzU z-Kh8eT5`C03K%^O#clh9=afqWZ2NB>_IzyvH-&dc&?NKM z+~7I5S`ZWMd>lDuM@||F!pq*o!lN6CxU%m#ObSx~tCf*xs!}{J?3gQ-Tcn4|Qhwma z2M-g*I>J4kAI81KuK_iK?h*8vu#Ye6nTQetWrvG!IO*#N?`J4pnJ%qitgU`^Tju2?7 zp9zBh#^LCtPBMi#2$WVntj_$5Wz~hO)^DbG-eVG_Wm0hF0N}pqnP3%s0z1fECp?A+ z@RIfwSWZv_F8Q-zTE}5*D>Iod{&x#`IH45nH&Dd4cdxQe$I2oGq-yJp-et<&`qNzPSUx9upPvy>^0E$a3C-@xz#>brPSi zkfwIDY$d&=F7Pf=@x<9>?%4HB5ZO4Ejztn~gR`3(o@6A3b^V0s)2>SNO{;@v!xQ>% zJoHC@Gn6Ppr)?=f_3nju^Pm(KKjRB|`ft&Pp#^whHA~gr zoKNm=J%}>2-$3E%sd(0lt;n-_9uk-Gg~zcLsN_-wmUPNRi}mUe{ptca^2iP7yAi@V zqA+IBkHKdk9t*aW0NG^%F{c*rmh9o-*OWD0+TV=tn`)C=ThH z0J**o4`2mF-&)5kbZ?Wl3c$7 z-u%=@mPZS)=d=to)h`0wj))*H+|UqmkwWlWelf}oYzEiBwfOJ52xxX1Lr=Vam`tqr z1Nonp;VJnIc<0ucvS_0lu2_Ffpe9Xo)PY8sJ$z5?`U2;)Iv z5)@TcU{w19@%K-`f1+>WX!n;89<&Zhmu=t`-WIM6)i@!CQi}TAx{myvF^SMo)gkj- z9^pFAyX4$I($s!~KwN%iGBrT?U?=+$u-vZ-8uc#3`4kIEOXMF4?2e(X|L7#1=eOg3 zeRZ&Dmmc`X$in52T<~<+2-Pl1Saz>6()xM|B3+xI;Fc2gEY=ghmvzUE2CLCZ-gcO@ zd>7VN6M&15NgggWLhRbCM0tHF#GT{|@V;>zD*p&;OH;i0-d*8j>C`9aUX(A6-xG-E z8h%86i|(W7;c3|a?k99&#F6U$qyr7!`q1pChFov4u=}_cHIc~&yb1G;8eXPP6DVwaK;#5X#`YHfkm$WQ2z%iL z=eFjcRT|&PbJml1-cEO5d{++H^E?=zKL3)}lDGw`CoZ9()y+p6pFY)?%g<&w{1O{tB{3^zU?E6BVNPE zulwYkd|hM_)I;2UU4+)k2pNkdy=2Tx3DErY9bN`Zh0WPPNVH{ziI=c8v2Q~J4C#pC zYOp8De9xIgt>u#sonL+!~B8M2$wcp_vf{ag--)zeYi;toLW1f}@o zAV72^zHKZ`eXr8S>zpsZE$S6-`P~W(p;M`h*KyR;KON+|Z%IUyivqbe{2ESjh@V4e4@pO#|wKKB^UHy8D+L={J?3QoFI}6I-=!Y5L za9a+l%u*q{+5>dmrr>#HlF0gdBrI&a0*5@MsmjRZc-2gMyfmJV)TTRt9K9Yl1)YP( zhcDr0#7E-j3khmVNIGt|x(${EdqLbi3GdBa!=LQqP8#;#K$@1@v1EZ8UdMEx^hGyO zO{WC5TYMKOb}tor38{hOPgRh$nu8j*7eKs{0+njG5+b$iFi|s%uZqdzm%7Oika&Xk zUrGu--uedr^4Uo7m(`L5@5KrFUkUNMw*>DEZ6%|lKVW=#0L;IHVZ~f)JWy~PEO)*| z{h10xN-2l0JEtP+ZSItj!XRN{XNL`@XF$=FbI7*pC5ms^2jikM;I!pixc{PxO!*oK zTiVV-g5?jq{746IY74M_%6xP^hk=*#m*D1_EO^qIiodC65i;7p@ktvU%x}$t9Su|A z(uFV_{_6!b78*kaao15lD~by$t+4yaNOXI)IU48yWXEm;E{1E0}faa+W8@OUSV^RafE1d4ut2WJl*Bl`P= z?_{d$tp{kiG5bgjI&ugZbPfSR-^Y$$V9S zwzrpp*gY8-`X+{bU8fO@jwjyqIvaY5Bw)}p5-ncWD@3k4Q!}$<$S8S)9=_TKZoI2- zX|gEF8mlFzt`Y;qq9IaK+aC8+=%a|@D3owjhYWC-4zCZY;)yeE=+C$}xTl(8GgU{R zjNYQS?mm;p3%^6^i%;-b$k>fb6A<4XpWt1Ol_M*Rr&ECrYZ01nNjb1y_`$h;9HCb$ z)DCpRHs85$dAAC2dmE3sxx5o?V{TH42EpXuIu?Hz{|BR2HDSpvU0^nsgWB>n5Z0iI zW$ndKnbIGa*&~4qo~)*dQ=IXyM?0|FiDSs<=NT}~EW%$;i(zl?@7S=ogSeIDP6g5N zIJvP2c2EbvRIL}M+_B;72dE(rI}+FcCk!^Hu}}Y7^!!T+3cvgl9EN*PysriI`<@g$ zsEihB-6kQG<>OG@C`V2B+QS#^)%ci-A@xsqr?HB?33F1P@M_Zr@hj7pc-g{@)crN3 zNQ{;D`U<`IXf;tZfFKTFQfq61!3VzjeIabGdl>10K?@ zQKCkK`cKCOZ7liL0{Su&(7~?(NR~HM*ptdeYWhBK=)E&(_{b7o2=${*+w<|iwwrK3 zq#735J|Kg|Mv&9(N%&;S8hBB|;Mp&Xh?~pszvwC zUIf=wLY}>C0!%!Vhl5A?JSBlC{xp9%{wvpnYfTi%(iaLuT=62pXJjU}TE{14z1Lx{ zpJ2 zm`9RvU1lGV`?ZmGGWQ&@u{0Blh_;e{I{(A^A)|0rB^+Bu)3{;D9?+iGfc`$M=RIm` zz;lIp=D}IERF>Tj;*;kpJoY;ZR2qxX{_l0jZ?QEj-18GjM%{qdW@pKy+z`mgPK2{U zz1@oZcX08k95&NZLf-20A<5kkJGh+y_bcJpPA!Vazt)3`#%AO4iBL!mm4NQcJ~;1I zHxn7pvLZ&c}U4)%CMuL6FFha}Kh}}YOzB&5}(cN$Bsk;@;#J%HQn5UEu zLO&!_e&s!~y%zvUF|*;p!EcZtc8BaxH~^X_PlI~mXB=?*6A;n}Uzs`|y>iCz&swPa z96SYG50kOmp;AI?U;xLw=i|dd2I-q574QoU#wjPiQ>)xJk@rrNp*6wNartj6oVYa^ zo!#Prq>ttbpBI9hy*}ek|4t;LH3d}m4v-2zGC=KP8MdexM$&S!_|fu5IH=(ltQMZ1 zE4*!aqK889qnAS7bdFGy$#*5Ml~?f8o?Ic~M?7(D?kQ&<%ggS`EH21t?v$93}SYgSVF?tf{GmaD%I4 zkg$&Q{-`U^OY-rj-<@ExSqv9$`bR#=A3?PnMKCYY4o=K=!0Bt#iHe73aj1qNUdzOR zv%WOM-_yshzJ8)gd(=qG+m9lJapH;HH0&lBhMJR|&=$LNIQMNUn%N`dJexG4xyNdd z=uj0oDQP2oRxQADC9lc3A6{d}xdkvwQV%DsS|!w+90A_7EVQJE&(jzT#F;1bkZ<`~Dy>_T zOtcWep~i`jH0ps?EG`xrC9Q-z!xvDfasl)na3uE!$ARYG3{V~ah+VugAX;JqR-K-L zE>-d1tptVhpPYhj;|LtSmm+qw|HeV|9ms5F!0LAy+I2A$i=96~sogz89{bmTuD{5D zaG}?~Q=$Qy5!p+IY}5yvU%II9d>o!R%@NtWQUKB4`$_LdC*hOK3T$&I75PKSGHa0dygLccs)~nNUN2gEycM-fxWI8M zMR@o87Nm*ZCg){@!eX6th_LCwo8#4S`(s7?-yv1x9c%;%KL9VAnglaqWAWn?=ZVIX zLs$}R#CA<5;XFY>@xnt`@0>c{6cWhxGqotwMiZ|sL-=FN8FW)U7MTw;K(#?WIx$(E z^4s(j5v(j6m8&B^{FeZ;F2BH;h02gxt%e1knlKuF36+c7;j4HYFP%MxP5g>+s^@&F zzD3AqRUimf@+!}GY8-Hp0wT4Sg{JRPAbl?$w3aM@D2olKx}udluW`;qJ>)5TI~z}q zEunPRp5n`BG*Vq|JC6|uqBk-vC3;co4L)T*M8`PC*kE&mUh zf)cifHm+${_suuabPZaFi@tf1dc1e;na~%V071 zlQeiM0@v*>L129|3f$;|gbYiQgb$nH!|O;ml`t7!)Lu=vPoBq{aomcKzGaGAt&fr? zJ_g{0ztdsaVoj{^wHux#P6O8`ZpfrSigze@I}W91A^FDx*wwL=n7#Q6y!fCc+}Ep- zn8hWbAq2f^X@pcSZuVm7}a zH6E%%y`?gmY#59woy90Ixd16|oKIT5r{MBjTBy&ALWlMz!26kAc=%l?l!pIBH!jTP z-TgBi&tj+H$A-xm-+WFiZS3HUB}5ZjUb^6!v%Sekr)1m|D(wGHbHQ>u#=vA`0sQXE zM&t*M*LdkP?kd@UZs^HTzs6$;vOpgz-?xC0zsFH(4vyYYWL%Cm zRQaxg4=#Ksm+nGuM!?qz~@>i_yXi+Gu2Chq)afUZj)p{)LilkacM z!S~Zj;iX;_y1uCt#m)DI8QS?Mx33URuCpe0{VD<#)e5j>WT+Y2SU77c)IWrYqt6!W z;ri$GSSG#^&T|SLIZz#ec_D<-rF(JG+hL9h8vS7X8VH_G-iq<3xfB#@VE^7M% zzwTJWmiYO+8I5^(Q>!_CaHtViH%%w!ta~l&J?E1DM*lZN$hga<*ne?3Xg_!mui zuGgksR`}tUHj{C|_;f1Iqm#d|zKc{nwG5s`e8(%PVPTHC1NJ%Fz`50l_?*gGh?BWT zUD+sr#gnt(gwiF{9>l{|quJE_KaXIC>L6~N7D#Qd%EmgTYWUGMIpSuV0u}wxhoTZc zQ=x~<(LS$m;(3G=F=RanuT?iDXZG>%GGhzKobwz8&GVsxxsU!@>Y~3}Y2L;Qs@QAm z02$;~itk0NA{s_YLB+Hesk(?GzUge#CJ@%Cqdk%BuompxBTp_v&rrC}chv7i7M6TQmLZeytug}X z6|V-9I88ifz6qQyy^4AU&YSo)JOPim7MS9?0tfV-C1i_d^T;((#DTUg*lA4$`QcOs z&fQlHPrh2=bCpwYt%jlSUVRb$_o0TD^fU>N%4|jU&MFk#xk;QmIvd}yvVy~kf#_E8 zS@a@G1(uRMNdL`g&|H6#)VIU%_=Ph>2j}9KCIvutzK4)A-^fi}-_ZCkQ9M0h4H)0I z#ay}{QSqq=%lC`m#ywlW;7&V|-m?%7b`(+DA32e49tic)r|!dpO_KP}PY3k(f(p{2 zU7<*uL?s6K_(4P}3QTH28t+e%{nt0Z6d?}+%L#Mx(=D)cQXGDH{tVb{MQ~n4i?^eM z#7~Z3-2AH=gS;~N;7lS$I z9zoKAnbe8yR^nEt0^%7gfM(M~Xq#O@rjs$GX6_9Xb|a5mP#}ljKIkX6-u6K8hZM-A zE8ihI>KQ0#D58d8ZHQj4i>L2oAYA1t8fiaea>c6ya;6kQ!Xqt=qr(W+ScNAyXEpi9 z$%K-Kazr;z@1xRC68_epMQyWf$0sG^aP^#hFwuUVP&3#;t=?=5slNuPf`ol!Df)w# z-qgekndP9)+X+wGe!)?j0*Id2DcqZ`XnoUETs3JPwsSv9?RO8u??%h;k-u8!B%`#S{Gjy_IAukDeBF^x8HlP7-f5p^;)vQ- zeBqB89QZYi90C_$jk{C9cj7Q5D&`G#UfHnyg$LU8N&~+zT}<^4MgWv%9ljrMl)RF=w6ehX(%mp7zP7aa!4z&ZPf(pB-q`K2njd9p6~c=jqB)!GK1Hf_fF z8%p77%omi~dlY?moXE2_FTm5=*P)Ha?%~xkHbh>YG5)M211c^_NG)p@iV^1A&eyJ^ z_=c@Qy=^c_#=k>CI08(i*5jzIM$iZ@gazUGjm_^nR+noJ@F}354pz-?J&wgCuOa`jCfHuK82en^23JM?pe|pUcmMP-*i5X(8Qu4= zQ}kT&o7e}Q+go@q#)T;qkuv*KatxmoyuOA9PCm4YwNHlv`!p}h3;E!eVH z1G&0%WfRz-o+cxTJPUTX~`*cb<<|hdbybVE-C`K6QMhOcqKJQ2ySa3Hak{L z7XN;Mt)G>_^mH?9-)@1WOvB--Q6c(zDSN{rNW@&a3-MEJ}gS-K}(k$zA`3(GQMG0#UpXy?o$w? zeH3rfb|s8-e_+z|Cj@Ta4AV+)qsxQGuzFe{wd=zH^7+g%^f9R#{O+4zVK_%l-)Eo$ zCad7|;C!_2L>3O58-*^D(r{&d8X0-%1T4fm@HM$SR2E+Y#?S8JGY0P#Fjy zERJESiHBn1EbR*`eYUQxjzaNbXSwFj%vc?Av3UzNW}^&C*aJ*J0Kh|A+MZy zhq`w5f%i8WKKCK~XND2csg{n{+24iQnLO|cFGefu)p4gwJe96IldKB$M9v*KASE*Z z_iE;#yd_`B!~{dQWUq({ABN#&yEmYPQf27Ev1l@20UyX&EAW+~7}O`!uJ8I4hTr^4 z1&JMsV5cg^yZUerKH;K_*GQ+~cC}$5jLPI~DtJgts;S`qLULOWLDl_hpa!QCFh=5E5G5tqSg^a?Igzefz;lcxN< zR^kVfZb6*+e2Alx@au_5d_izFnX&Z+61UaChA($v57AN-e|8U&@xKT|jwg}mv%k1L z=O;=p(SqyYlIVU)Ej$UV!#gL5L!J9*N-*I?UZ`G0f5bTRSrgurE_la716fWr%AIiB`q=2X%(f#{?1+AUsJm;=-D`Be@v^c3Bx+}x2Eru^wk z=IK5^wuat7t2oRzm7l+k8|oY6?wru58+>a;>yK}yPn5jnF20<@^!(VsG>=Cz8y*kv z8>cSf0?I?{*rRuu12>j1Pd;Zt5o<(YwKNO z0-PJ!ZI%N}x8-rhcl}d#(0>jc_0*4>)bWoNtS4eW5Wp4!)ZUglA$~oMD zrc&l?mOc)X9AWphhwz!F9RFDB5jxW-jBOkV6KJ>h(MMK?ahsE>=~J+o{kAreK15F8 zD{8G}SBiXN<*db-+F23w76V@jNyssc>!bM+L0WVzdz&@e;m40|7_HS-C21?KPkbk1 z3+B;d87{Zs9Xq?#pUc=YndV(x&R2Xh!EV2?jO`eY705n5M+dHFXzRdr>{#*!&ga-) zn()Y{b@x7@YeM+kc*s0vOV@;80yc6*KBDZg7Yc0T9=BSJNilT1y&V7B+i$eZ+Mjgc z+QqDJ&b4-a`~yL}_B+0Lv^?`!^DF%=x~KMN$#21i?UU&bJ2C~5Ua#oJO?CpYG$G5p zrd~~=PKN3rOi6EI*-!$yNAPR#j3rG#$9lxeC)2vOrLwScJ&X;48EGh40oMl=VyoTwHx9D z6=NdwsTYfwx?Lmmxz_$#HI>Ee@Y7tnPEC%jE0tqP-_){l4eo4r_kN1%X%<*ptq|PS zDWbou31Gb+y`dr#FVnI+JlO*|4BxBj5Phg|2kT~bfd7Zm5NHU}1S|W^=`+D|giIVh z>weOb&if?KAIm%{P!nCkF55YkJG@(wPwcwIU%2=#+t3?8SsMn@vzzPaCoP+6wJLwt z1dC_UFTxX8nYKRm`%+u>n)^cflW{Bm@-7WVdVeM>J{-r&_HLzL$n^`(deZFq10Hnx zyPY*(ZJjB4qc7bVmLr%M_&`wg`yXX&(Jr{wP$kgTFy<>eeiFQjilGOTqXd^$U*`Aj za$$dFS^u_(X3zJ2`hodE_O!?wYT@-(Hhk7ex=2-ysouqB%yt>Ef6T-M zb000D#>$U;he7wUMxmMHli|#N#uZPrW9{R$B z2CnDA6+Y44Gd$>Mb}eV%63r<$_A>7lG}BYIs|iY!UNY91cWCsqj<((E%VoL;Fk?(2 zli9CXdt1Gm4w}E%G)O&{+b$$ss-wkqucLEl&lkz`(0!4*8_Ei_Nm3T=LuWE;JXM%% z({SwiEL%6BaHqnjz>x}v5 zt*l(KgsDN(O=kOZ87?#2hKghsaOr*DX|JoDjNOM1jOlJq*1sW^JyWU6w2b^@r*z$A z+(XnE3AuHG*?W%)rnPs_$}^@2*UWOxRJ@ITd^eXi-J8k%dWxCnk?Yv309V#8FrMxf za^BnS5rTkaopi03FKd4&yEbuPh_(Xw5g-@*hYcrB4;+xxt$x|Cd*iE zh@fYuB(Yl$RI!Jn%^2-)NiMS7l1(`DTfixca;nP$=&C?1{;IJDg=7OAOOURDnw^%4A-yD-;wj zTFE&Lsd7Iq)iT7(XY7%S7r0F;wwO*n)yZkuFwDz?$Jz1B)oh!j2{W^JHS@6i4)-v^ zi`81%MfJ$ma$Tn?Ss|v4jW}q}px#~FfO$HnDSwJ@!OLW&rs>xGJ{!Xvis)iK+?s6~ zRCt`V-;&8%Eq}#K`L&(heeDX{>mJR?w>@D0Dd;hSYWqz4uL(G%05#6d@e6AaI>A)9 zO4M;^9}~Xl9&_hd11)ag#T-wqU^l<6<=o^F=*V4PxJj-BT$J&5+CO%wAVpP|NpXF{ zu2AE+INLj1@svDzVZ9!mGE<4Wo7GRZ%&y?NrS7tOcE&JreY04j$BLX&%riDsKaqL= zdMleG8Oi10g{EX}0axrhhxsTR%4%7Gvz?iteU}*9(SD|^d?P#8cOBaQxJ)kh=VCVJvOSxh zV{b&)6n3*UPoC3a^J2KgDqq=6nn84GcOu`Z{T3@0>do%(H)Fa!Utph79qfmJ8%(f= zGbjEdnI0~>$B&C&%Gyb%(UWr*(RiByH^nH0ioK=CZ#7ouCe_AL$1{!y?!BB%rx{6b zB{3eH^CBr)_A3@R6s2%(KMUxct}e{3FNw^mFS&wm8{P{pt+HbF$JDT++EPMqXE|=` zsqEVOITva3tbS%jxdCV5lFvT>`-DFu)R-wA_v8k5$}y{~pVQP}4DIbA!X)z?Sa~7i zT3s)Y)}6A28}|FiZreYd4tcjsAkz6+$elgO?)^Q9)0SemFFD(2PiqV6yXR6?$^8(0 zv{r`e8ZD>07;!E&eLuf%eH!gEKaAcaWKK=*vZU>X9a#lTWD*a$G5S$*bnwGjbdoa1PKoNJO}6MV1J0g;;bT_x zu4QK#@uqRM&UObEV10&rI9-CC`caf#t+bUZTCYcMPYtAZ-jHLwgj|8;UgueF?N{8_ zr6!E>>rDC^<3jH_w2@o$B9zgJc+Eanpt+_>iuHHyW``y|^M9{YVD=})31lvAVKVF$ z8768jE2_DtHfKvYn>?_YUX-Q6IyDR^GzS z9qXhLuFPR3^Doe;gUOt?nHZD2Y$kIfTB>f>+8pMGlyEH^Qe($c3+Zh4A$Gx~7u+YwjHQ$@5pzS?({wK1PO^tN;@rpHe|U~t z;_#T;^56>pNR=WTpqIy;8j)h$N|rLt-4qk`;~k}#^qq|%jp|&5%m6EpU_OQ}U=O(L z=C&>>VZ7_tbB5k3oX^bD>Poy&$*=9{9!Ehhzzfgld;Ie=V zHb|gFR|`En%_5keiO<ni)EZYDSDwG7?db(SCh zwuwDZB+c?)tYLfI_R!>nDV?2qM=ovZY? zy1NzvsmqrI236Df8uv43w+Ghz-!Jdb4q#c@Jt*=0*(Kw8I?9w_9I`9wjZlu zE7MDvcjwhj%iVvn$_n%O(H5qrQ;ID)Q}fAn!<9O9Zh}m{e)1~XaeSD2v@eQrbFO3- z97|wC_4sUs9*^7lI;Cz+%@gLAqdU{*C(SWJ#>1Ca(xw_uu5h+HeK_$o9qgia`CR=5 zJ*Mk&2Q%vNhYguJ#{8WT%!HwO)_N?1PCt5{v#yk5&h*AH&wV~Heh2M1(cN45PQ7pF zH(mDJEbcm!@zsz2YhgNFKIgw0=_$qne{NhbwMUVjUc8GhF61^(wO3_$pA6X?$G`mV zw&JX=j!m;PA41s<$D5_EgfQ|NFUwFIsqvZ6BS+{s|r9 z4@6F3Y?3qBBSWUFPE8i;X3$OlQ)p&aT%h;@dwC(3{w+VC|0Mr}T5HYWFm2Yp`EhNJ zMh@$ybefIh3-}Xs3jf-TMyko?m0+N^k&S&3&B_iwslDGA$G;;YBgp^vk-z^_9J}d> zG2^|-ms?ub&X4O~#^@bsq0o&QFUP7}t@ zqxoXo>6v-VNNxrt64)$gG0tR!lRR|Wzr}R(K}oLg=T0UszKA(zR={S;7t%MrO*6I0 zH{*Ur-RIn1{o|A@Y-qirL-da8*El>RWa$~dq+ic*WQxB;(bwYSg^Xs`y4kA=8FJu1 zy4!39UB>;#h1LIH(hc`=!-Ywlq}V0)-t2?i*^d?U$_J$k7&)-(eswZ`t^61xB9DD? zyMzDk9pZY{zNgh+on>~-En!~E`m!m{l-Z>|GEB1PFBWglU{)KB*P3hmqRV!d3O3dc z3N&05*zrvV>3n`Q6`=K${T;uNxmxp>mHx1SE_}O@ogthcsCX1gM=VLB&nUiN?f=eV zCla>s7mfF@`O~ae{ioT24Syo(Ej!KGhkN4%KKkXgdK)&emi@Ci^`Q*C_{Sv% z1eVda*iB2D*fb9%LFT41{&JooyL;VPK2@L1?wKsfa$X6Pw_O;ezDJa`S4kK;-8#>SpDaE`S^Q3C<4xZR*B(Rb5LvW`?-Ie&Edheix$Ei5 z>pg;im2&LzMpf3Rdw^Q_=@=ic5@Sd?M|QpkD;WHi!VmDXqe0{ZTO2Tlt_qN#J&afK zJ%?rJ%YU`l?bDX97v`Vj&m<2C`IR%-dHQnv#|2$AYD2~}=UgNZzp2UIJ>$SKo%#Hj zz1_7ECGmnR=Oq69GBei0K#h5=N7BVvo`PeKi|PHpmhc~X=x{qO_0qri@^tOsUamiR z8YBMgq`)z_ku5shK+D*vviC;3__IyoX*HurHaMV>x+?T{A0Q^N3GZ3HjaMmNVfc;S zqwmSgF2Jl{ijAPTX)O~RC&jjf&E|A=sB=F~`wDcoy79w*igQM9dg&F-63lW}U)m*Y z4=Wo#QY&*==(oD4g}P{Ufr{lcCXfG`4eq=z;4A72R-2TtJxV_22MaB>L5d%*IH3!P*pq2A!p<5><^~;7O=jqXjBbfGU^`%tf2znS~ znY|^R?CbvPZ1^ul?yJ{5epT*y{+8aitmcJUs>0r^*3%)MwrO#osg;rJC4ZV*(sq`n z*5BaE%WKi=WE8ncSO3!Yj}8gMZ{)HWR_d(ToM1L&OHpm9e=z;0<2k}P*1PQQ{C&6&_u<~#=Wxz0Q(N8DRUN15Q^}pQ)aY1x=wmCk(YJ|x^8GWb!49W~ zRTR@{_pj6Ey(DO5%Q@^ljd*I#Bu6%;FMwvPZqgaaA)JEwA*%UhHXV0Ml%vf>kAHnA zithL2=@01_`LYq?nYbnq_ORuz;$1B?Jtu8Fcc@v9^IX?X`>I}N$5zaz{u7;Xo9Eo3 z9H*+$7aqEE8#~+RZRth~$~NRiWyDh-8akyaKOG3r^mqFl09+3iUw^ydeMX$fv6opsBP8(X-Q z({H>&ZF@tQHCr;c#wmJi$o4)i!iiy0q*eJThx4hkw`MWIS4GtFwtlMi#|O4%N*TAS zOM|_uF_eEkRg_aExr*xUeMIf)s}x=x@qv|DKbl{?GMG!V>t#bW)>AW&Z05R6ovB*` z<=m>QG1T3?r|5~788ULV2^%cCNw}ot6jmA%Ney%j=k8pTqivo3u+1}KsZfg%T*Eavsbgx8F^q9VWZd>9*6@hlcgIq-Q+6`L!Cw8cY$E%b0K$k1;m|byJRqN76c9 zHn97vw~O#2zOb=#MR}ZuXmKYJXf$^5lr$^6L zViGl9@fG*KGWWjD;XU`3@Kdckxn&Z|coU0oTB9qCX&BkU-q`<`(Wd;F%NrWFowvHk zf1-2W*3s3Xe7zaW#)iwxOer1CQOAw@YogCw@ayKj-H&G$FRG%Kt%+ggNqu8=%(c0d zJq6sJ!;$pgbWbEprm3%xI*B{B8 zbMHz{V#P?t^UXW@W5`NI)=9|OBq;DpBG=JP#fO+N;Y0aF^AoX(`7wTkMj;cEe~^E! z9>T%!+-J`I+^X_@U%Ke$@h1T0$|9Uf3AP$Nvnc%Z(4v<+C-It@k$2 zffFBcPfTW%n7lo}TvhOmA8+HU_s`^<&(XZ>?>u_Kk@L*T z|BiBXN!g4`x*fCbP8|2HwSwAismhNZ;YVi{pJIMU_%r+8?d6_NmgLrt|4L_cmvV{f z?#zW73hb644vc%rXIAl07WYXzn)AM^$*lgkidAsN{N9uInOQGp@NHVHw0cu4=Z{M$ zGu=e`@Uf4y!&Q6!{a+y zhVvu+)ETEgHcYcw3{$G2#wCrr$?D!A%sSC~vlBCNDgV%CoYnk1PH|5HJtU}-d6q8P zGi%J?Waj+fW^4~-JjQ$SXToCX*!S*?&*IheFU3+ibW8$k?qbAG6~)vix@_SM%Juo; zvt#MDe4Z*N3BUs4%q69=0;ag!8-njUEwDN$*ZM$osrI zM@|3QLYlfg_+w4}T=NDKPBHTvy}*At-(;M`#-X-gRVck;Uq)m@9?Y45w!;9 z{d09rW!h1$MdBhORPW{d-R^RGME2oSsmDpJ?55jK&N1`&5yFQr;JMn2X~O#%8H{K7 zLw-84W$rm|XFLtp(bLp6GRmu4xt-;Se2FM;$D<|re1C==-&dVL=PjqWu9PVZ*L{<- z*zL{7=Z5l@x02~FLkZe#X%}bo?hU;*&Wl%E-6gbJCd=3qU1QUQdqg=ihHwpqwe+#4 zk-{xEpK(E)7vthr!g)0Y(ED|_a*qN$$-vgd^ty&f`t0d$u6~^amtk?0qEj2WdUqc# z^3!d0N^mw!V>@m_VLNLUBg0-RT*a+aqj={JrF7<)-*id1G1Glqit{tLz{(kYrZfFLkB}kL=x@wl7vy#+9>Y+>Tg{3vI=hhvMa1Z z(`)K}wlTNsu?(%bCYN(n?xEHWEMQJ*HL{l9CUcke1apT{M8}@YN-C9}#7m4yXEyoP zanqAR=wUZ+aHofKk^1NZ^vHYaOk{~JKlW-SM}3<^j(FNvJ=f`wvdf%q zQ4aU}`z*R<+!^LGrNCFca^xEJ8SM?@3`9n}T25 z_ql!C*~y!jugiqo^qN90UP{0$keA?#l?>_fvSG}ykv`nl{2cCAMG8$>XLIVwnbi7@ z?WF5-3^&zDpRt}P!<}k2rNjQK;_3=@n6^g>T;sX3!r@EL)4CcbxIy0ohu>FwCB}*z z4@7_ePre8LcmD^${}28T%T64?L8P7VGLzxu(|Sly*T4tUFXE5g`uIk=8)nkv`XByf2eK(%`UzrGt zb0qdr)}XE`X_IGh>5x$5OQw$-Px)3CkYGP4>~l$#>U#ec1s~JJJEo2RkIAZJYK1PS z7ab)By*HB1$Xs~Sr2#tXoiNJo6eJ|26TCx~WEgdT;P?yhcaX<7>sCV5J!LX^YXC$t zH}Dz{YqaTzExd$DVD?R2gi$&R>^(Q&T`yuubLwspUR^fqoc;l{U(_L${Td)uI1(Ga z+>TX=3m7Ij3*Ibn#(JtJp!z`@%I1b*b*E6cwRRpJ|FRLCc2$Lhi)tjqXbzrU5P|=E z)gnqyuA{W`?*(5X&fvst@$mJtB5s(J20yaSLcgO6Jn=n6rn&CI7ILc4ery(2OwcED z)Lvn~MSxVZ#H zSzo~!WHJs&=oa}Q|3vIhBXG6*iF}XF#SeH6e>`JkAVcPg!BOXnggT1 z?!*T@gNV)|j34`jg3Rur_(M}EGHj5C9}*_mW|}jOQwo7O_G1P125a%bkM6MIlM2{0 zkHdi(3t-bcj6XgY0RfXmx`>n}BWG;D@75i_OCm;*A)#Gpv0Is-|K(|%aHJHb9vhGE zsHMV0gGBJKSqBUH9mvbmPI#B(LTI$!Cd#ugj(i>0fS2rF0XKG!ARDvg(U&p)5IiCt zZEv58i_Tl1@};48-EALy+LJ|=GqjN8{y9*)_bR$?k&5HACt#zn-{^3d3>N^Q;nWhNk zI$Dr7dkNMyJ%+bLvQXcJ1d4CA@O7x(s4ETg6bDCDkTFOjFuGz4sOCptbqs{d_PQC9f;*`RpOR!&G72%5kW_5K7Mli z6%2<(fFw?1Hx0g;pN3WUw~zp*3_Rz|WBj)5Fp26m2GenN$R)A`*OklQ zxvxEOTcI>QzE~a`{Yrzz>!rkFV=mTR?G8Ir^YQiNfyDbOMP?r3p}t`w(fu8U7WwtT zakUp{Q~OmMeiyme?9kAlo>7vi3n6Sv~(698ao44NpyowrmpqaZQEvj&j9orL(ZPP=p!VJ)EjJ zFqAlw(@?izBguH8Ky}F`kzm&WOHHv@!P;>-zhkh#_xq*f`AF=ZREd&5b*S6?*#JKrJ2+o~uf zK-6bREwDw}3R{>ZL1kV${E%{h12yZ3(hdhae_S1^JrRzhCd?!DX0I_$+yD{tb;D2^zV?KL z5$Uz%%-qP>P($hrLih9B2K(XG?>?0F+1u_@UQl6JboM8`|cvT@)`j9(6TU;uR?# z_{#n!q^zopE?`p_7<&`R1eD^-+w8GlM+2IydI!yH<~Vj@?8 z20FYT0;j-*nnjp7R|fyJNRwd)hf)zfmvPo#5q?~2G0u+O4N=koQ2Mh5Z*J+syN{dT z?5;?`nSy%Ubkqcow+w($-%2n#c?~XY&%+T`A>e3Q3Ny8LlQqUqu+NVMyiFyLRQB$I zCx7;#Z{s^~)$Y}JM#C*kie+%sxYao2S}xR@7m}AF3o*CG80@9g$ox}9>P}T-_+{#f2n}c_sEk#RDs*xKiU$AW1JTULSf(~ejyasF3KzQAZd}E_= zR{v=*CO+WaegVQ#6=2fOi`eIT5q#(yPnP<`qLWqI!C5m2)(6hUTNMt%X5T6-QTc|< z+#QQkY`=ivc{{8r%55VE-4B~zM__;Jay+m0AsigCS}-o|IyO*XL3{QNuv_bk`Kj^n z5bwivaV|jKZ-a~Jn+SLD2A*Yb7w-!6AvQ7$z}ZPi|IkzXHf<>8i|nzzk_vX?Kf|2% zVsMGiBk5&4W_P9o)0>5bZvx2Zzle;Sk_eQc2U#_698F!(0J%3hk@Wr)JYlpua&j8L zW;z|%>)t3J##ZQJ>T%F>7!DIDMRH|L1Xj?VAleTs0oR3VVd8xNQXPeLW{dpgohZ_- zkd9P)x5BQsXQ8c#V9$obFtp|YHk$c~$dPy~zwCwEUvL zWe!3&(jH+pM+S4@2k}-Sh0BdJ@wz`nFk}2xLh0mV7t1~Hw5=RRghr82HHu702nST? zL2L%X(BjZGF!s5FZd^;l#RlGJqr(ug{9zL|Zy5_~OE)05rLHh=Kpr;F8%f5=_+yRa znb7ID7F0A&gV4+XRyCi)2Zs&8^VaMpH}Bm=7hPka#km4p=N`h_pI-q^W+*YeJcjBy zUW+?@CgYcx7jW^kxu6;t2j`2caqdD%(&em;Cl;O;D0{ud`y5B%m_6YTx>bnnIz>5m z8dC6+3}Nkm@x7k^i2L!qq@<}Dh;|C;{+)#OA!Xb?atKsqHsSsuImmi~ z5!qm*Ow9A`ptUd*dAVGINfMrsnu1qhO6v}I=~jW>T~B=6 zVl%3(+$gx?@(^y>tbj4kN?}d09L#W-4R;?^A@8=!cxOZbOyYJU=K^Uw()|)vyhr1s z*4b!HVi^APC_!)|@->RA`zaXq?lOFiX%qx+`U9J7-oV6H)6nh>MS@f|9sSNwM{TNx z_=mMW+>{SMS<}6dovSZu|7naoozKC|TpPSl)tZ>+HK6PO8SH+r8`^%g;03mFILJw}u6P~`xJ+!f z+fCHhYS=MxG*&n|lS7$^TUZPf9}IbygP0DBgnN zMMH?&xIb{tcrsaXtr*!&^MOUT(_zOWCp>R(pK$3GHjn*F7VZ?udifV*Uf+O`;W&tY zaTMC-p2q|3)i`5E7wnD=5EOo|!?*MPgNa)Yfbzls>}gpE)2h?(?50ET<=atE@jpUl z=+)rj(q8;-Z3vlIzYxsk=%9^XKI3nf#^c#wvs9PAUBqL|*1=8BKpfh)o!G23AlDXW!qaSXa_N&R zGBtVz2`KP?frEZu`NFTKG5YB~6~yDZebH%4b-PJ_lqN$_9y8P`~N;nqHF(62Iv zkf>E6OiDdSlMcW^^>twCF^n`W+Jp9lDZ#M##jwA)8#)w&!B*xRj<_2|LXIGuUm>!K zb@tf1T@nInbwt@E4&colx88nOb@Ux--k3;Q~3Re(RcFRt~as8qw&d%`hrT3hwBr;ii?= z#Hvplf4c4fDspkK;{0)R_4z$C%31Bk-F0$53bDJ;4j23TXQgG;zEm?s*}JV{|IetQUvTmW)~vj%Nfii~bG0 z-gtqa_6-ZD|v@CfLH(s1BGNc^0*_REQ21 zzv1H1QCP#+9!x&dj(LZsbtmEeBbwMO zT?tE8l;WS!iD+MbynrO=;1$FZ*6eJ7H2cM1G0Pi9JsSqENA}}kpZ`GBzGSpz-ZZ?( z;T@i^JP7|rb;v5_Hh#OcN1$Od0;bo;*9x9iX=g~wnmrwKapcDUB+o-tcIk_1b?>9$bCDU;8sv;=K3`6^T&mfag z!DuxKK;~~t!RkLXeBhcZsb5`&a=wK@zFi@x+}nlq9+yL{Xs_0Gbp&!4FI9Eu;-;_jaVc=fY!wREVGb*1>BYFA(8+RD=H3dnC5) z9!`}y0-ID7NtaU*`7T&Xo~YJCbW{paJADx~%=rLA@_W&h;!+%E%^~YpZ4wtD+Fw>% zgM)oCn&g!ZFBTXE7z!^H;vKEE5Nl*YGVKFV#4-T) zIxpDMuYu<~`N09JYP|cuyCimmJ3eOI4fC^) zrZgEhZs~zFEn6YqAOyGEIS3ofj^RZQ_JBjwY1llG5M7rXJa9b;y9KQw=@fwD13<3k zxj3?79R4onhhyKj!yXYnSaxp+yp<815l@8TPyG`>a?wM4IogjTHIF7`CWpcOu{+sv z(^-T|9}oC_G}4ms#;<-KLW*XOaXPBTZa+uDtI(ZjjQ(;+m3Sk<;vGhi-(GB;X#mx6 zR^YC<93JKLppG-g@s77!q1~$#XS5oiDC!eB-J=2*+RLHTXbiOcUWtd+?InUqI#|cm z53YTe#k)k<)~l0$q9AY4xHY%Nr|)~gM_XM%-vHp^25<20eTJrPSH(Y%7(u(i2yDZ* zqF8GUNWa#Cw|vHUtC|&lym0{A9V!oG&;$O@Q5Vs?ZZ&RLqSsF(!KE=4O?aq=HMVubmIvF( z63zRlNzV#qdF4Zo!9=V)CkC!1|HiAvNl{ClMBrbRs(9(uWIV8YCVaH^g0zk*e0<## z%!KLT{elC6xe^a>N`o>EpB)9s2a5225uUI`5PoZe%=QC?drtksTO$Cp%kpfh_LU6nqlYq+aS615_vh~CZ4pOBKima{RddJq(E{D&6Kwvo#OBLipqySO(H>Wn%k{GmFIavbEMCqNEK1461~s|RA-5MC<$UlS zqj*T1c?dUGtb<)Qm%tI5)g(`|0FQt&>|Jw&EK4?qIU%|zXZBruxJd@Xz-rv}>@N%x z&E?erv5?r2MSiMe;V9oT@W>?_k9>NJ+*n{tTDHW4>l;_1Exj5ky}1cjobDpmOQPJh zt%s4v?>^k3cN;6|8o<%US!mj-<1l8JEO^Aqkifuj9ClR$4(>363YA6BYBe6dj0(k4 z!?%Fqtx&xB{(zwD1q~0Q-l9>OiuhEq7F2%|;NpcRNu|_6oImS~D8~y6jRt)njq$ZCm3dp$IrecqCbMC$l(4beB+!7 z-eI=~Zy5FlJJtjsPq$}+n6n!&-{=B2@Kfk*J`RmHH$r2-Hl#jsBHvq0@TYJN*$h|4 zDv!V8y|tzI<;DZBGW2101)gW4hFY5U-)O9H3`|+M8T}02hU~YbL+AhtQ-|t+|0dov+$0N5|ECPn z9}c2lhhm{)LmJwBg#|;K2;|*#5piY~co`>zjX(3C+-(bf=_|@JvfEm4Nb5SN|5*k; ziNzq@`5tuy%mC|;ndo>@434r&fH8|`bT0fm_%A+-AHO!jih-hg%adOC)7l6D{k0yA zi0%+1JUk04Gin89NxjhVr4c?1j7C#yvIOZT6HxpyIg}QviJw{=f-afk=t#vL3|DzctmrlsmPH@I?{`I$&4=vp?a5c5=AkN{ zzO)r-YLA31ZvJc0y8wwYAJ%P;H9oRHI2=;{kLWSY_qOG0`jKH9aX0AJl_ieG&jKt<~&MVQUneQI`B!wL@*KU;gHO7`0th>D!A&5g2$g0;l~!kM$cx^ z`kQRpxHto5{J4iks4PbJYTZP4K>wlRN^vlK4MuAscc4%A9C5KuBCPvV4a&bBA`GC+bI8Xh739iaLd0%)NOo^B{dBGsZpul4$oXEB4<#Cg zu|Lt#ujSBu&KF(Wd=?#iEXwOc zBAlIZ5-7U6lwF6{im*_ByphHS=O|J8x5^U-*+|GJ zuqC{d92IGvNKym8z^(Atgt`6=t@KdGwToq-rE>@g?CeALemRq}WittD8wa1D1O41q z4n8v5pl5C{q0K*Gr4?6U-3}IZe0mKlv=%|{~%%0dH(;6{{i~{!T;d@EB*rf zf5czdqxygnd0t2)s4EqGl$Fkcw55(1G0{1DQQs}*Y1_@5uz)FvM zoHs}_bCi%&-H8%@xKzQ`#RpTVqYP2LY$gmo8!R=BmxOl96UDsw1WCHkB)E8>aX1@Y z<4a~(Jrky%I7ekW0v&S=*ydv^*_YFY=kC9(MZcc1f$dc)BKz}N@eg11Vl9OYxZ`d# zb{$QCP={h!Z!#PF;BU{-=Qd5kZiIS41U&`r@~*CxG1c!+`}LI zozJ=Kn$9Uy9^-6g*9&*kpK=Fjo`g_b&ZcSe4^{5>7sP6z_x_`SH20I4jRLCfm&9c?-dLe)k<# zIQQ3i!t9vA9XtMw_yoxEE|Zhl|8bU(P%EaXKhNa&x|c@zG1eD^4V!a?|L2+YbYkBHi zD{=-u@6Yw4XFpFaPFi-C*1EZh-jVi(vOG~tb zRodu>e4*m$YXyVP2Fs8jQS(*%#X75q(4hD1NT3V8Fh#-o=~P=x2Gx)eN7X;~qwC}4 znDLH5^t6PfIm#S5@w2NXiF-~I=H7fnP8RfliRU;vh*WGS0StsCY|7 zdfw<_Sntpw?$^)tYDM?j^i2o(r2Ia`memiX6ur~9Sw#Y>;=~ZD?R6`D6){%$@vVl~ zM&h&^h@1Lc?s?a2PHuCyFy@hT@snvI|Jh-};|lsz1}knCOo?RKDHLtcUP1!90@88HChq6p!sUqg}YxGx_ZB=NWXj$fvKM>JoQ! z);#vWuTNaWmn<=j_s>$QDs48`n-b2|H$9+whiGCn`S`!_ui2QyExGJ0wymry>*!^h zrqR#m__C9JYg3i;FlR7yI<2YSL@y4U!#59WLje%_fb-Z2FpuV!q)A>hmK772>g7wx*2!OLO>$ zl?)?2&&N5UMVa%RJbO8uziDN z`rou`LnKv3NP!Ug0%jl>}-?ia1T@FqV!W*A~OEkq@8|r z=)8%{=8U&wPediOEgehse9*;DH;qj$DF%RL|IL(evNhete>q~@(S$=Ld6(?iZP^k?ODgdRv0_m$W2 z0CaSXlgS`|&l&sq&ll3zgNGk-XJSV2!JF=K-c|EBo1>q(YrpP_X@b_y<1}I-dC~TW z&(+A_mM5n|RQacW`ah4?@JHvI7V}R3WW#-ymExy8TguLPJe>213FTaRrCHx?aoouI zt$gC&D1L01R-TN$D|c_L72ld;$BvWtBU1+I+3N=v2+ut{#@#O6zz();uuQM1W*bl^ z_g~hf8UE{sv2tH6g#UXYAz^)ZB46~@UtDkD)|=eGwFCLfoWeLC{-JPUuA}g)@gv@0 z`4iUpZV{*J-64#Q2o%%KCR&9xLnXxXS6TTwPNHcvG0{28<*GQcbI%;+>iXK*9iQiO z<#o+s{g=n=V~6ax#I+wE!e9O6&*q&PUr=uU2>qXS+-H^eN2@)>Z8q<2WJm6p!p?2; zEPUGkf<3I=$xb>HLRJT=apnyjoZH4L-0RmGd0KZr31^0sa$h`BN&1~uyld7avi##V zVd|=UcIrOnpZ+JVTgeJ*GTDuh<8tt+55#|G!N2jZw#}XEKeJ9;ulDkDoB>y7GUok9 zR=={Tc>9I|GEQHL9y{Bfl!c0LbV|DMBz1d!u+1OMSL9nRw!1d?x%ICTf6FyQ81U;P ze=y)Tw<-P`m(aVOyT5%1KhZ)y9r5@X)Wx)C0uFg4u0|Y)56Eo4{{4Se7W04AF+4(X+D1|&lgZpynnGy&e~^^e4JVq zzwT_S@O*P?;m38uS#^gMr0kRr=ii|EPyT8{3GTMUG`@C{MvlR+MDoZ>^Pl{@nJs_+ z!T;(l(X{08reC?r!$$B5GREZKDFar`QK1ABFK{=tZgY#RhqF_+M9_onm6}WPmG1s; zZ9mtUL(7RUiWP$unTc6pw3*Tjy11#8>Upk2U*B#emj7ybJ(YY+fuS5{Fb=0?(JNEp z3*>`%@%k~y@2ad5bI(st%)8qk({Ha=(URvzl1Yzxs6Fj3sNIjUDI6C|n;)CZG)=Ch z?V4qBwDP@Zqb_x3j;0j#&N2eVd+AX+W}3K%2YSQmo&V&IFqEcV2tDcV%bapc!fdG( zD$f7L|AwO%=$}&3;(9eT!s)JOVYxmigkF~Z7oQsYhv+;HWTr0DqdQVDt*e5F&-Sxo zT7B)(eMLvKL@`%~(rBOcG4!zN$#nDbi?my?u2}xl23oYu!%uY5 z#c@pPad+A&H3jwD)%<7wM`MoBZrZitHt!EKrC*s(U~Z027h1fkq7J>#qZJH*vYQ)2 zk2H~Gj*I;NtC|@TZ!JV0P?cw%21HOFwhbx%X_JW8=}J+{BVFl>`hF&xa!~d59pdtThzFU>wR}2jb0*~V){2ClDhEWDpAwUEjCMsFIkg+87 zBs+`#x~Yc#UEWL{#=N5c`qwjA8_zL|hq~qEJL@t@VTH^N^L=E}nYyB4IeFGXjU`V@ zDyYSy+_=GZ4wjE=o$0#W6w`B~J6F2t348u`+&}qe#%*MlJh2nkySQ`()4pxL>9Ok_ z^xf(}GAZkV@Byc7_U^?;YD-8gJ^R>pihpue%vbQsOMp^Gi*@_%#wITDLnPTSe>m4H zYsd9oR^${1I@w5 z*&69I_QAl{$Yi37sWv@l+#G@NU z{qj<9WZgz#$CzBf;e*fE(ANw9>EB-su?O~V+8z|2DB=rmk|vg3(U&%;YWf_t9hM+%zR7dBjC#L*PP2f<=48^~xBGVA_Mv=6z24LKjyWkzIN=)Q3eT_wg+O;Ml+BzuLVk>UF$9%HRP?&+OqX7;Sf_)$58wJovF@Qv*{fT*v-8%`=Isyu=|5muI-?aNDXurqCz)|R_|nA2 z^(?b9b0IagR-Z0b3O1YYayoNGX)UAhZH@3ztE5=}KTZS&dUL2__$a2YLFD9u{-(DRTURa)qK57Mt=Rc`l zi9+RYMtruxD#6@>RUJ6=QlU6vH zEFS+4i7WT1=@nai_XyWNVFDKxQVTIJ*ZkZ6o4-$@O1iAXeS7@jcur04EH@zaopAke zRD^UBx$+{2>R6b^6-Nbd(uof!bLnBZxA%%-^-q4|zRkGBmKS=HUpq_L+zZbH#%mI} zodL7Oyawf;mo|*PX;wrr-?y0Du$SjDy`uj0jp~>{&SieN`1|c!6X~TrN(E!Dm2{v z?U_l+^t`)g>6$80j`8FWs{XY%rBjkdtOgSW5VXqn{SonNT$!BH|>8)+p+_s zszDo45)>(mi{r4`B0XBy#zV|&u&vY7?8xOICiJvF898fqO{dJBS^u+tLt9Ha$>x%n zw!nWLO}z^-If|Cjk6wSr`D6P??MhSTl!+w$an1)yF?ScPGc6L+Myt&z*m$#8EI<0~ zPIa!?RXpFroqjm}6*Yc zpMUc2rlaYp!5755n}?sEf_vXlaP3YJ>ij~sdqk2O*F&ik(4wc>jG%v>8$yj0)a5K0 zHHDJ9IgwUM=qEk#n~+s?J2~5&hG$m3rtbM|_$PnSvrEMrHx^N4#U?pkE9OxOf4=>* zf6HP*2|Uciv}^o)sP>g-ax0GM(iWA&$qV~NQn(?C?p$_;a_>r{LW~oNw5)7+ai4CE zM_-gI#Pfg3;R@b$RSvO8lq+#P=EDcw{=ikuXyWo_Cv$pdvc>kVUn<2v(NiiZYMN4V zWWbLPxHc9NMfutXc@35>eXo*Ixt(I(YR-FjIR1qff##9Wr)Rm}hjX|UGrw?uy|Veo zx`rhNeOLK)C7j6!Hi&mq*C??Y>B}iis=(U$KHOZBtL)QF0sP!g+-k*I}u!_H2 zc-X9J+BTt_>w5jaeb(T!vHh2L&E3jk+JMh4ynSPX>Fo*W{Hb>@**MX5W}9wAN!BF? ze)eBuo|dR69``$0ymrb+G!}He`y#&oJ?U%9j_l@yYFma=%{M*SS<~Fup4|drpiV5C z`f!ri{_Aig`8cth&FGpcjLnZBYSk|awm)9;PyhG!XwolzZi(&EO{cWjZ)^qY+9%5z z=SmB|ENd6Wr-qZ83MYiG`n!drt(4iY$MbVX{~St*Ufi(Xa;_4Gn;pgHTrUVq4ZlE- zd>@&3@aRAN&+8+?j_efn*jy8n+M#PG=p6gc{#V8%3Y*nmie>8APP0?w`# z8u=g-NkY;RxOrPG$@$sOg;#qjh3A%pF@yaEiQR=RyPC!NFOU4l?0i*B9EgsY+PFNX z&)Sw5l_|rRSsF0zD`doTWbMA;jJ}qh*NGevc-ncwwM zbmpWc7@4+#e(f=ajMx#vtQ#6Ho~H)edgOn*@v#TmTPq=U|t>TpauW#1gt7b|T z8;R?^Wp# z*`0#WXOsTv|L~n{OwCIpG4HMFGwGo#pVRN@2om*2g07kHNI#Yb~ez=po0aGy9C+k=j}vJXLB272E$7l;h{pyHZ#ElQCksCe^^7r4`O;<<|5} zp>`^p{?mV~5=VPR4;9z@AUTyb42+%Og2U*qr|#m(qra1Dmm`^vkz?rS8w~wzcPg=& zH(N}rvdgJpxx`ZO+$J}FIbApBdhrjb3_4wepZtHZ_T6zgzR~|hOIt%*Y44ur-q)Rk zWHwbQ3S~rQL`G-bzt3OwdallO zz0dnx=bZcQy3W~eM-eMM(g^NACXuS>FT|gUXAx12G?+0w2vQFGA*A$o)p7ewCh_~~ zs*+#lJrb^Y=*k4)G%EtEF4sqw9gHQ$s-uaaqIlxs4>xeMYz&OVx`2l2g6fqnT(I;} z54g3a3a^ZeLhNl@am$`V=;qU+;M&wVllYDO*W>VUePAP+Rb5KU5&KRkPvTGYTnZX$ z3xs16BWyw5X?f^%hb<6gDllg+8n^jFkiLTYCNpPr6Hmk?(L-163T>rwu(8hRfj&yMUqc_05w6Lk*jZK5D`7o3Qn+1+|L18%@~*zStvS5XmYW<*j+$lF z8;4I!=D&MI&)FW4_Z7-Mw#bu=y{TpEI8i}dyd+P`U?!y5-fH07T}Ia33b8%cs$rYE z^u@(}#pg)8dbh2a{w?D9%@tITe>Bi!HSjPaPZFwHF68N-{rkq6gST6&$#2e&tu;PA zC1w~pP3mU8!x!?Pk)3c}kwq=!yl3W+Pf-+jbC3 zyoT6+GNns`FP+a&74GfwYkK?xqqel;;9P!``5SiogKSny%Y-fYoWSm!BP-*HqwxRo9}ziGe#B!B;k;WozuAi`PhGnFp_l!kwwT@p&1QBs zbnqqWLfKs((^zVH3C&BM%Ks;_5e*HFKY%_Rj!*|iULV508$xq)U${#eWXFqX+ z*$Ca^taPKaP)BcOY-F$JMe`Tz%i_Ov?_izK<%07oPbT00tliMb_u5z_T*vEc4T4LH zf&7tv5jsI`DT}<{!lv!CWSiH?^Y5Bp<$uVU$DiXq#+}U!vtG9|`FpkN8JWm!7%iH> z*zI0OYgs;KeV?~Y;-4MAovA4n<%ds*Uuqzmn0=Sq>3%bvm3np zC)b&djb+S^?IBF$@E%$_D~6GbUMaNyrPIRbrqj=vTpe}x@69XpwdEI(mWuI7{1%V> z*v$`D3fJXytsQf|=F6DZ7W2v+Wtg#9f|?Ds>6FMEX$EiF&W4g*jHD@_V>M{Qm_EM8 zq&!_g73h1{`PGorwN2Bg(YJ4z2%zwrfdogw; z9m09L^kSJ$Eq^(&_ixf?y4!i^n_`|`=P`Er@o!X*d<$(GJB1FJqADEgz0;y@-=dX5 z|M%A{kr-4>=dH5R0`H;ul#-}19siAwUwyfXR!IHt{hyrdJ^c6hcR}5eb41HxeZr$Y zt8RZ$)nxpW+pA2Q_MR24*>ssEAzv#>RBq74&LoP`8q96pd}Rvv*)>eO2zMZ4gZt@f z#iHs>i**R@CxkHHmx`YcNQ3#MiUe;|sebW|ETa6Q7V#g$A|m@2##8e9tqDKrb=5cE z7x;Ql`AOZ3?!%|)R6lPF+Q8o{!lVWE%o={*B( z9Lynu-I7S@nluoU$kg2zv`+iKYvl00lFGJ6gg&c1EE*_XUP`u&o+Gv{`9%z76acla z5OaUu3!(&jO|Gz3B3q(UFO~nwB#PaO$>o~KM9`eGyvLtI!6tEeBILs~(&*)0p{#%M z%Liu?k3Nf#CMvr*N)9u@oNuy|-;Fha6>joW;TJB(s0`7N#Ym$m&i= z_f;Tz!*VWKlQi_ClJR{y^H z_fht0#46`Jg8%*8^yni@d@4upyNp5QYQym7MKh40h6Ko?DIOU#)56Xy8Atu+Ih)S& z%RsE}sbb9cK@{aC!<2e8}Ad5f|ZUoVuAcSt33w9!hSUeUZ=i{W%LXq7j)Q6R60i6kF43G-=!38OPwD z)_a&o?jbW9>jos!@vqQt{#)0b?`C+@vzNlL3#Yun<~8(~&-yzZm-e~>mCb5_pDEua z61I6_7rxBHmN|AqQ^R9~x_YrC&s?YKqmcihLn6r8GohS&ZpV<=9ffda-5qGX$VN!^ zb`(^vLmU2^XR1#kq;u{p;&HDIxfF-P#am98XUXw|fB*0MclWlSkK7c6vgMar!)8Ux zk+)NqbKb=K<{hRl4R1e&C=$LTqZod@R zFDce!N5M7N1}?Vg?5pFXu1!KCp6kJz&r89@MM|8`g9Ad2u8Y63a#eB=@?WhRf{O^& zo^J3F_nNALFHv&B6y0y3eS1W($eWCi|Lc2Sp;N`=2?drz7}uZ1DjN`Mo!GcZ{zEnH z5|@u`6v}>PmyQjJUB#DpO2A|6dQ?YncQJC+TGUq1s&ny%aAK>+F`VaNY`(zjHC8>_ zjPM&zL9+acI0}tb$ejTVsPN8XOb2zGhX=uR}je#F5^g!^FdsC@jluJ2tTIEtGyjROqV%Dq5{_ z;(iP9OLV8<+mC;NbnZp4Am>0tsMTUQ zUoEYbxw@11PapkGcv>A2uA^P_l;Dm*H~uSr5bE9%i{7017}cM?PteoJ0rwI;N}O3x zjwh&9nyVc8jhP;|Bi1NpA)S(?oTRMF$o*x1xz_jy=8^3-iT~}j9N1&I7;e7pgsD@! zD7yXZvPt}Q65H_2XOxBW2DdBXjZa<8@zZwr%_&!*5hWF*yy-jfHYypjBNDMV|1M}F zent4V+hn55_ga|=u_brm$eu6LIHQ_8WV2o$+=^+zUKdV6rn5VsogE*9_AeQ79=Z_R zg&3KNp(|o`!&uS<^YGT0=KnbO_em4WMoSNKg|gQat%6fjGY}I(hSS=OL2CkzL4wxl z&@~4+r0b?Fx+8o8l0Q?~#AcX+SIm<{my|hk<1h)Up=*&`s`#n3+1V2Kt;T}_;iw+L1K+Qzw;cG&piP?ht$Q3ci%oE*4vHbzI#C2{oQn+wBr{Q-s za;@nRH>B_x_T{a^B>!8PB3Rp59Jg~cG`%mVDbxN7o8;f}#74aRtBP>mH^-*q`Ag)@ zfBq)$Ydi8Gt-dJ;HuQ_=s*S}mh9a>oW%1CE=ssc2*f#69rEKA8A%7)Bs_2i=BXCSr~&6J_u@SxNKaA+`?7gHV%(vE1e8lbU)KwIz6m~WX3JT4 z|AK}7durY;%&sDz8@J{?HgBe&pXf&d9Ta_vG{2EV8y5~4{~KSp;4vcm$OyOSv%{Uz zi_taj)>-b}7BI;_O6TDVIh%yC%b)Pj=C}szkOIvesr5uoPOU+>qS=V`4M*(ZuATVQ zmzkK`j2bH9bJ`mo4J{=v-wbv!;wYuv)@KaCsRn`!Qt_ zWB>UC&XcS&uo=I_M0;s9cdqu^$^D;x@&`O;L%wiq!{JK!yIq76U!MUhx+=grPb|1`TLy(3ndNF)Z3|p3v|p@$BR2Zh2QuWf@tN+gQ8UAPRzG@bChfms_&OdTa!DwAZP8_P!#fe& z4|j$7W^P5qyvvbu)L&%g=^SkJ&k+2KQ#uy7L&W@1ts?ebem>rR+7Vf`^Q?{FAqd&y z3PYQ#bTOMa5uvPqx|fsmgqHd!V>4_FO~c^x$ZOyIle!5!XoFSf{uc6I`Be?;($6&m z%OWuGqt~Dsi&!|$T$4CjRF1AArqKW0+Ha~%)lyI0pWMVmw3Oo1IzCav6ig{lku;y@dxp= zDo5yts>d1ZUHn)4k>qnodhjG_c3>DS)fMy^%-DvHT`3@Z_R#o#DOK|zcO`uH#nnV< z6(7l3@5wp!@R6W@`U)t>L>fPJD`^t{l~paU%Uw;p{Bfiyt7?Xxkq8#r*+1QAmPFx~ zHW&!!H9l#9x9oQ_A8B2UXE?~fi`BSDc!dg>npun$KFG$_N?n79btb~G)BKXHY)cY_ z_~jMOVhR`TK&$iB@xY?h*x0)=RH{x4)m^d+<7|z`nqFD2Iz~ugy(@=vyPt(WJdS97ZHvEKd zE)AXJf9aKE%$&l7^NvKAW16X}%rmR~vHLm6+?cI8@cAKqqMFV}e+T5F>6!(QMMtDi zZtL+db6@kVLTpLmUy+>AD>gk~C3>549KpxhVTtFCa7t?uyyxK-q5aR)^?~yRUEwO0 zl%UD$H4&YgsuphdgC_TX?i_ne%kim@mmhVqh_@t)`Uf@HjOvX-Khz^<7NekDnhoO37fQ(rATG>e|!2L$AkW}xYGA_ zAnVS18izh4p^jHElg~d9AG(p97Y+;OP5mB^+#UO9@@8fpk~ltYlMxZYeHs>rC6)hz z*LM%VWu0ejPKLJ#bv6HyvsKBWL&EhpcA8;VT4%w2HOH{mf|_%POBA|MP@mx0G-*`C zPD>d7=C0d_yl?%4jmpX52Tm_TT{31{LszOM?O!z~14mtzgtA8^%~7LQ$FPftKAd8) z1KHPEfy8Z{hkTE0Mjx2z;pHMLu+2}~&7Rv7p}UpavBdc}ymgH-w`uGkEX6J5L+WZlZ4JJKpoSNu-M3Zx9^M2K8G~8}8 zx9is_Nae+IJTPwx3RSA3xyz@)U}2Zg{*#swR&Pg>h4ybxxZ)RAq(VtmckqCDzp>uG zDE57@49$=1M~zgBg*H-E$f6c`fAA-vDnx>9A{Kh4*=o7?|I2@z`%4I;q4`4D8`)i0 zMcz4FPC6b^7Bpi4CTCDhX%FR(aVlSSbw%uH0=?%!8 zoQw>4Swi9$>ajj2Godd3@n5954~|Ld!1n1@n=ZlH5pKbhN!@VmZ19+8uY_ZV!aiWK z!F$YKw<_YFeq4nLUp$4)mRu+1wD@2%jGVBGOLjv;w*Tc$SL>gd$B9Cvk-2*g`-lZ#xAfU6%AA$E6K;FgmXCMM0g zuuFwM^61b6=dt4kfaF9j*B*$N$$aA*X>%*5`+%`^UmU3s-g-ymk z5y_+Qx=#kedF#ix@XOK`lh*XLa8KYy8}}XV9GejXw78`b3V%`pkw1!TI#Vizwxap& zxp|eEt`I-RbQbD`p63p2IgV11FOVm7Mo5xp8r;t9gU`mV723ZBTmW~KJw^>0&ZD-; zdl1QBQOh7J_sRIjdLG06N^*s=Qy&H(hqGeQGY0wGkx(PpWi%EhmL7zkt16+tvU}0+ zHS^Io&qPh1#UDpBzWzpEXIz3-zFBFl*L4EYlw~-Vj#MLVbxS5WwrGv!!U1K-mzAqc zeXh&G20J7tbyKOPjLu7@gk#4N>kxlT(p>rJOJr7u0arBTJC`Vo##HQVkPXI`Nbwwh z?&oq#p^cs!-D_1k^56QuTb5zF)@_0gR9NC=kSCUQgtnqqHzJ)^GzJxE;Ec6 zzbMA=mA7%j*Ui}U+1lpaKXI&hX$UR@>m!FB0?vrR3`90k26}vu#I$!l5bE-u{W~=F zK-1d@!JX4bCi6DLBcDLgr2qd@nTkCdCxr7BcrC`lqi>oy$sWbt_GCbv1xMh%L$ir8 z+q#?CgXqmb?%kOUm##VjO>q{3%-(;lqg9m$Slk>6ElX7p99LrUUUd zxY!Bq49gMjsU3~n^V;$7m*xtDpLHH~Qg<@RzM~Bv*9<}=_I%;YwlB84a(@a}-|(U} zQK|!PsLz|^I3206IcsDDzjZ1yzBW3SyX=O~r2TJojf4$Wn+xZyGq!<`#9T7D(GUn1 zzh7$8v)hZacBLsgaFK;Bsux1@Bn)ioM5=`N5B>gTRx2ed#P0(yL~LR_IjdIWAxFM{ zgnuen!?|6#kPR{p^<3C01$-%v~#=Xw$D<2OIi&PI7#V%;E&cGw{z*2jis~a?o)}=gIhYm$X7&()Wd9qqr>W zon32kaUHvp^7m7TOB20eJ`v;Xup<4C-Co9 z39#%B4Wj-Wj)$m6VBXWlP^(}=%u}&SsG}#3l(4UY8q9gNe#D=-k1?Z}o2(x{51EXA zIypayv6lbxzrmy{*!b^pTtp`U<`w6lt6M&xd$lT2wlfT0pI=PyGcV#fV*wWS*A?-B zLLb6&_Ip7cjBA{5vr)wS*l|eg`*d6Hgw<{h&UoY4Y$7G;?G{NtFCk1KrU)-~bZ1}|J< zb;&_ph@Thijr9Exg@?<}AeCnRNC+r{H+tTLhNQkh`#Bed&wu&eqHyJYee{Bi16q8m z27a|9%i_&>qe=dc-aLl}ZpjkLUh?rYd{4g>p@t3NfbR3qw$c}n6!Hu5Z4E@~ypU&?wwidB zlt3Tyc1`ksC>}yQj9&}KdfYZfwz;;Njr+(Vv9DS`Cof%F|qy5FCk~sYSpo(d`H}%_XX6o>pP<7N1{ulWYCD4 zcQI`XF~S2?!9TprG{ets$I^zXaOjjBQdlUhU)|EG0d8Gg9? z0M@f0(M+qX4C&ZeGpUQIS$9pAT8$aVYw;;SlcuO3eswaKDNmU z{a@$VlaLbi5z*2VMZbi4!Hxx8=F2zfPx4>ta;tX;M}91 zC;i{*rrn&p;#!#J2%8K?CUMufJfHM`DvmnHu3WxQAIpcD;e$OEA;Tos0HD;@u{>$x%ygP7VFo(0OV*%o88U-KlWgz;N zHB{ZU6B2#YD9nEf2`mla@0<$yVDVmgWc&KrF+dR@D49oe5mU_~>1 zge5tg;F|kf_?bS`0iO}J{3 zCDvlpe0V_e@+AHr&hi|M6K?R-9i*{ES`fFm@AxGC_wIG@RrfW*d4HYT01xGDFj2am z4)3gXvg!D8gi~VThRW^!1ogaag=Vei+4Nn^6vqGOx;Ly^LJNiYOVX$Uo*`ol$L~qN zOII`4!~r?%YR)E9^wd?fQSHBXq^p7sqfUwy_?I6oc%`6B(q%hIYt2E~N&7##B!~zb zoFbI{bH_~VQI#J)Z^kS*N_IMWZelAco;eLIc-?_LI-yGJ+l%39YkbUGRpYT2<}J8A z4G{UT1dgPMJA!3tL4iwlW0m0Nr2Y2~t$|QK_F>9DSoExvCaMbWN56Lc7bExWM^V;hE56n%16RH}1&iF`ZS{WJ z<;naHQ$6_@}7Rh-R^7R&kbC$ccu@}+$U48O2t(||98po2ijDqKPl%6~_6cRXIc8&4Z|~p>_PvMdXNIDj?blJW=*?)~mIe6j ziCDsD>v2InZ5@jXE}yX3j5QG)a~wH+hs${$&Oq3XK-(c>4eE-8Fh;eOQN8wZJ zMR3mtQ%pmCiK1yYW()W5zxbD{wHF)MQv)@}BntJKd-Je`lz+C+|A7@%h~~Z^uD7KrQnFGd(k%@N$A_RU*Um{Xv?-xD<6n1S9+GR!5zZ?n7Kdcj=$SU>&qIc4 z&vM?~UC1ps5{`ZMErkPuSU9*ro!ccMD#S1An`B`%xLN4GN}xpuS|!KXv#kvIRwIIV zT#AE_!-9J}pO(QPI<(OL$8I_aUHjFC#0>T!E{h`J)6Y4UcCn`?pMN?(7NR!vf9sgH zZv}kOGZx9Keq$53G8DQSe+BAYvlIH#C5oJ1;fCJ$kcRxuw=gp!Z^Id}3h3vT>D=uO z&#l_e#d4Qy^R$V1kOAK~KXVd)>RuJD&5v&Q)&VQyA30sz)J*9~{L8M{A`2Jw3deT2 zOu!=3985Cie1%OuNpYB+4>UzYQXmB+UOF*hX2-d>Adn-?(%p z@yU~c^=wWME{~gWy{z|G*WF00qwECs-AYPmBV(6xvECbeqFL@Kad*@Wf8H2k)feS7 zY5!53JHWcD|K*l@?mFOWJVS{f^aGrGTNNv^+JxyXP{zWuKH=`(T0qc3m?*b&Hp{5V zz&mdD6QWi)`c%q;W3*s9y6a~qq+^kayWSQT+Vnpg&)TYp82rt|3tZC7c59wRBl!Pu z`0qP;k7-2jpEBWC;&3@``|5{zaP>?4+Y%2r{2(7m7!dS}dTESb9#zAed#d1n=MQ{M z8_NEF&L80a3+E61r}YD2yRjN~9DGF0JZQnhb{F8GD+s}% z;Cx>3o9V2caSu-{^btWB?7>g2iKeS(mNTzKOlbL>NM^suOd1Q$r#>lNL}%vQCFqM0 zw4-kW-I+REt9tf0a4WgPsH(?ZV;j7G-oK#y2lbyXSusfqQ<=RMhd_v;IH)epqVYOCu&_r7^lExg z<;TOB{hK=|1Mw_IBq|s2<=ZnySKI|_KGhRFE5(>)-_q&VzlNEZJSpmZ$P}h%TQkEd zieMMBUjt8?AU>bD%E($=A@;Pm;#*E%z&%S&(<^SzV)lfU5G^gmRmF~cLTmpN_Tpz< zhBBi5uVy+i5!)%xVton_$euqBZ@AV%~hHQ>Qd|M2V*X{zf@I-?t)LX5ff zGAaWN=+_@f_`8U$JfnNE)Pj?v^pCZ=Z2WsUaP`qFG}M9wzsI$Rj@}w7hx)|0E@>vF zdN|^HcJveTr`#ectR0bqf69nwPvf+!=Xrzq4G##5_aWFo6Q6$YIi5JXR?stN+wQvf z$Sfj9S&mgcuEIFDsel471C}~^pSprb*H3S}!8<+0h$wk1MtHTt z#IArHOv%do;DwzPqjf@sm5-F<)qEjoeZl$p9!o(F#e>OA3&LX*!|U;5R;IrOo)4Gma!elsw;ndCn z@^-PzheuK%yz{?%yek$gpg)&SqYJGL(~|S$80TJ;ceyx+$4|AyE$5}tg)5vu+|F;j zk42J9>0R`qRl#efBGaDOZ_|Zu=GcOSrXXhiwGu$V$C(kwUEo-^1e>6+jxplzo5FAV{A#RITQ1YVoq`mrC;G`!-9sL<#RsU=j_+}hNF1Wgs6u){4 zE!%LAjth?``R8=7?;kJMt#6plI27$9H(OYN$~OVzwB|3htB#xlKi{u|YnD|LbHm?& z+>BjhvDPkZMEx^1BWV^GmZ&AWf5*`#TodMYsGuLU|2VMPYRfcVsNfARgy@#^d!Sw1 zfjqn-mu9r?0F~P>@!_mkFt94A_S^Uj{A27XYShLRP)Fp!Dz9j8#cl~$cK9RXuwx2X zl=+sqvqg-IZgvB$pSLo9Tuq!W%x7#j4_W#^V+V4pr6m8$sEB{evl0g%t=CqUO|ENp)9UR1W)<&Y7 z2Q=*?=#7T!$@1WkedMKcrFl-7mY`Z&o!Mjkg)aML0%mTsqpQAd1wOkLLf>!7k;|>> zNK`A0oVVnopuc%Vt%pk(cvd-Bb1MB+{oB}4O7`1IGEUr)H)pyuHE-HJaH>_D&{0@T za?h=1Jl7m2r#`wwdTS42YaXW(vD*_#;!h?y^4*oZ`=JsPCf@;J4&Ol8g&Fumxu;-* z&R6hBj!(#D9HiZPAMpmJA0w>XN^tESQPMGa3Ry^~5R31|61HW#=nJElL08B*I{Rb} zaMzed70pp5|K$`u%yazoZY`ppSO@C8tU%0wE4ENQ9=r9n6Fn?C3>=pa;XbdAg7pRRq)5vGI(D{L{iv~^r=LzU zb$;bakgax{=zKmC3;gCzrg$}g1uCzAyU#{4>$4vCkgQI7zS>2aU6iWVkO&41+4
n3EcoZ;97FPpqo?i2w@9++#cisv#Gl$)^q1(Z)we}%5LE7MOY(9J57!>l zvTd9SVZTa#(tElhu-OOJfWFdmt_% z#*W@Ky4({CuWSwJ{#1)GEa)dg78cnqXcq&YZ@(fWUXPRXEHkiSnFiz1m<0xobd!O1 zkCI0iMWV0bLakg!G$DUJn|IGAid2u0v^CzO&q$541n0RV5Y%y_k94{*FQ)}ye!elJ z>XJ=_ak_!+uBn?)!+IX*v6w*)pXY)sQkFcg&jl|sdlJaDI;3L7^n)&c}krWwv>3*F`dk|1aJV93Prybwx@4IMZaYl`rjH~pAy#ZKB=3oZl2bwynBO(xxTdWm{rz_w84}w@ z_FQVG=b3FKGozb;#EV9tc7>``or9wv+W4arO!UDCTZl-v+}2bbb}bfeT!r=?rbP|S=9=};ZKO5M_Rn`VRQ1@0VyV`Bb*$b zSVaUzk`w)dn~jsY2|oI>8cu3#InSeMNEWkGnpY9=G>wRjFA7No?|uhd(O z=ZwP_8QY1^YPP^HTM%!{SbW|&b^zT%$Q+jp|4GfeWqBf08xqYGsZUB{LI4ubJSu zkh;^-4~Y0gmUWHVL=5HmvnxgQ`FDR_A_Bd3(ZR-C_LSBKy7aRtSU)j~b%{=7E%m(F zIqA>n#M8b=n$;TS{dG00CSna+bWMZxon8*S(ylQ{J-W=p&``qtG)h!1_TwGsxhyCt z+Cvu}NM>JJNAU71?=s_7B#>{6W?jV3G84R)%&8JVpW#26)bcgAn9p_3*iCBoRMZS# zvhSiVy(9ZHJ8OIq0|(xwC!SKYAtA}uuNh+QJ7t4Aiu%=0p6vxhzM$u9%~d9(HJIo2 z@FdvI++$bn#d#rnelr)sr;{zqW%znmTp9icV`BE)801W8EZe-vqRv)9hxyS1v5n15 z?9UQu9_(+!ySMlqqfNE4vXv|8-leLntw{^Tt^G~Eesz#0V?wB}(L3pVb$6NNXU?;G zn{{bh!zA{esyN$OzmJJ^Pl0p0DR($zM|< zNgj9^$iA=?lJ+ngSR;kUbwCN>A-A9#~S=C2pc^c9;(^$r;`xvpL$Deh- zAPv%#t(kxEUo&?jlOEqj4e#qFcGY{al!-ep@zNAZv1%#BpOeFC*wh0DK0<+pnQT+3 zD1t=ivy^lP!Ss7E+~x`pKEsv$#YO;cKVz2fJwjbQuFt=>5U~8Qcy{FSd^RanaEHcq z8hF^+LP#Ey=im79iH%+t!F=6T#kh38ra4{iKn6a`uJ!tXDewP{wqeEW<;E!b+tFBn zxQ^4@y6f1_?vw17h=)woymY$3UWe~C+Q@p%=F_V6%UIw%fPGt$#z< z*bN7Z*#2)3luP6jkSTA?Tp5$4;>4EJ2XBn0zzsd1+MkEXnD{W7hXgedB<6wslJBU| zf-t(aX)1AG`c)#lW+PZNKaL5weUuW& zqq9`Pndurfgj!Jw!qIm zNe4A)`b7Q2DCKbFXnmwkA+U{6XI?))$XhVuptWwJF&GXc>9g<#AaSSyRWI`9nbk6Q z+tTyI?)&fP7nPd8TeqK~rWDf2JEg$EzclQmUrw3TpRC`M=tazxc?M2yx=(lAh_5$D zX~TLdM~L&YjhL{9Q*nc)48}rA6)@>xOviIyYFH%Il2yp6Qkp<+~gA;q&|<}mU2WkM;ej`7p7Cr-)x6B+1R zCOlU|&=*UBP`GakwEjvka{@OK+s`*L=_7*v=5JlV(w<^oCUugz`cohHUvy%8x#RU; z*J9YJ{fS?W%SM^KH!oOiM@f;sI zdYO3b9l`H=QN!MKc!B>Kx4{?ZpiD)=QvTj|Tx$2`+3dP|3fM7IL;eiRiD!dV5~r_Z zU>UW08Bfnj_JZ*tHbJC~4M`ZJI-LFKYIrsOZQU9MotVPT>S$meP7Ok`9xnw}*9+Jx zb$bvRrOdYpzsiT!%x3m&n8(ya{9v=%hG=~Gc2Jxa%+}6O<#%3f&8;X@YR zY3liOe6`3u_8bcHzqDl1>l;Rxm042EsZcROY6k=?K^&T?9K+UqI8MKt;>>^4G_U@| zUzS;Hx0$t*UC3@$Enzg@U1k!k6!;UoE-JBj1=}hb&M$Z*L)jFzF^ikU7_9?Re2Yvy zHg;MBQ4An-BjHz=^669fK2_8Bk|BF}&i4#~pzJWedc=@U zIVZt>yGeq~%fbA;L*8u6K7GDMf;6&RmSENEwjt9#c`=GZRcw!e0snQiI(|#0g=gh= zl=a6#`7P5G2*;Oqnd^(Uvi=7$*gN_n;LFx-YCa>z95=nm9{Bx*MK+r;wvj2U_MexG zOOidC#P5aeQWsF4rgP}cYwg&BC-*Q(a#HN+<9C>%><(Zkw~$F4Vt|8YI2*bwiz)UM zVORGaV)mhSL<0R5uO1p_a+)_%KXr?!Kn*kgsVX(LM|u~3QR_hclQ?Zw6CUN2HFr>Z zUdr$zj)?Q$^DxH!#Td%Z76Ds}!!bDZI9}u>Gg4 z`AFDCtsVXg*b`@Z>EpW5jIy*7BmZ&}ZdppRUw(Z+ycUY^V~@?}r<;}Y*QVyOPdw(p z56u2BHbu+Ol4rlEgR583M@qNx2~vdj3(ll&Jc(j`B6XQLk1>8`ax>%8Xv4pewS)hB z^Fv-aUpAY%#fg35;K(k@i)4ozOzFnuJ?!Uw#jM($eD=3SBQcT{ zgmtwqp(C!E2x>!ZX1(k-vi_$-nHfKC(i_*Drs5rpSoi)2T=woMwmMahIl1|~@c!_u zRE)U+_D~mRH8S1)-t3=DdEUO5GG21qD(d!=F}D49JY8CiQRi@TR&jF)glTH=ik;)_U(9dL&7S z`mQL?H@dZ&HGZ7S(u24gFD@d1E}HyOd=Emo@!f=ApdUSLD*GZ zRdFkR2~hz(U-CgwrytNvs714CcjIq^rjk#>w}EGu>_JLyGEfQ~t5xbe1X`xWgF)SN zVDcyjc+~iiOHG@JaZd*G_B91bEsJ5>8c$%rmnJ>CS_#?kZenfg_WG7D*|irh#u3p| z8)~U*$G{3_3WPvTAS=xnVWeojI2VxccTD;n%Ve z4l&i^9o0TaU5SwZULUkUaQ`EoM!XmycJ~N)DxUzB>Kp$xkI!u{h zTEOjt8ZV|;nl~p<5-*B1AfK-F0?9e&cztnOfQF28?ec*W;PSQte12dyXg;CL$PWlN z4;eyk?n-9*GE>&auNPljU%=+m_LS@gLHz{F3Z7fQN#GiCk{C{|q?+ZrnP}5=);9DA zv+$5Teff|P-L?>P!dfW;;Yzk-ZeIKAcl~} zEASmg1RFQB`fjYkZb4&4)VKVO>D;%l>*n_6mQ z9`7XaQ(l&lKdeVo^OWfw=X0oYM=hA9wSNe>SCE4TF!hh_mNTD!nvgn(Hha~0Dr;H* zGjiW!*;J2Cx?IYXQQKp|o=6rgw{WzJ5ZCRl`tBX%<%^(z>Y-=}DJ zzB(B2SyH|2aU^J$Imr^Lres1~YQ55#C{X0#$2P2TCG zL}oD!C-i8i4Rdi5u$B4DT7#C<-E8Li3t*muHoGdJj7ibgp%!0#R9kvsJ#YACIwf`6 zp*}6(J>9*okoJ`&7z42-U^u6ck#y4r2M(<#e4OSJDgKS**6xMmminc{z1@%Sf~PE) zv!;!j_@GCB3{_@be@n33CxI~QfEoASstpNudHlgFU z-RQoUSZu9)8@;C4ntjoi4xByjU>km#(j5y*n3It!G49YDw(Yh~p3w|%lF`p4q9oQb z-|MUx?S0#cRn;nV-bE$BKV(xG?=9)9r2Qzfkta%iw%-J|uen2|mekaJThGIq&g~|) z*>TA0>s)ys_H4%0j7PAt{#m4zMGUK^c7{B~DI$Wr#Q0mHB~j-ea*W@Z?<}~K%Fe4Y zWP2*j*Fq8e+u%3O~ zsfkY%D>I)Jiin~E3m9xXm;U_e44afBLgwG75NGQ z{Tg^#>lN_XCJ$NLy}e)quYxCjD4VtTJpjAD-_HWhZDL541=SM;ASbqh2~{;9nVK1F z+gq9ly>pDM)|pWsfCcc@%M3qP4C&S{{(y|(6(d=Dx)0e;O;naNQ z;fg~bQNMx}a|XCKX9m7EW-f3sIY$^d^)m82O(xg+FEQKEp8vhdn_c&J0(*fbBO708 zGA;gE{7d(psT1!F+5RC%)Ky=MAJ}`le!AmTTydjKy-xUR`lmr8%iH144tmG1cTh*l z^uPdhWZHcGqJ|2ltaKl`}H*p^%WJ{O0%lWB#2#=5g=5&w0IH&)2J6i)hc05@)U3L?(7P z@ftH;*>0XV6UTjPWWUPCk>xYw#X&}coZtIv_|{b+pZX%3fA#DI6?oF1PGJv2PRHz* zK(2b?(F2`zcDvSM?+O<(xWk1MH*@%#Mi&@!eSp+=#3O@jchT z=lk3z!HIIDwQw1>4m83Gw2u;(gtz$W-biZe&Bqn~CK-69X$ny)EFo>%j94Gp0{X#J z74h>Mx?DuvW32c1DLB6Xic>Bp68$(C@s#M%0^4=9_;}xU!IJ((_~;1%ahsqj&U`e6 z3iW#~n53Odn4j$gE_*@W2^L|qY;SVwl@}TMFq1m7;|rVd^dRo@uOc;mF(hE>AKq5Q zn4C|Vh1qYD$k<;?Y!BN!WLK;}ylSo)DHxF?gT7KB&Y7Wj`Nhc~Rz4o{>n;M5B%07E zSNOm740%1Ak5io%(5SYIUKEprS1q{Co^@B_9RFM?N5 zEZ($2hyCQs^DggA#P^eWh(${rxi9w-Pjs12?i`*>F69qWA9iMwkj_lfZ?umDub4*9 zhVInI03&P>(#1tgG$Z;eGRXaKV{B8T#J~Ba$LS>1;)6{gcsXeySYe3zawF=0=l{0? z7p!;dCYRb3jW4IxwUd)#~<(=ygl+)BYRH&PaH@2p-zCU?eZCk|>^ zEuP{aBhGSm#sx#QILJrDv3)0q=s^J~&Qzxjt&R!vrlt}1A5QelV=QizAH%1d|Hqc5 z^pRx&cEo8+7PNGxM_3#~e5k><+vVm`#0Ei4TpBkzr|cFZX85_TDB=p}es z<9PfoizicuEl5-5C;nKmu<~9aMf$ob2@X&aXRY-Uo!pznJ1bj?MK5CcrA1xX@6$J4 zcAlMBH((8kR8$oo+a#z}``d$4^wR_{lo#Ss$856xoxZrp%tkoZ?-hHiBaC=af(uoe_YMcdLH{O)|>k`$Z zp1DRfr#|MicOD^ocWlStkO`SGF%K^@&w;5)xp>y{e0Y0h3hC)C#v4xWCl{hj2$5e8 z?sRH#4vk~Utb|~$&^37>`Y&bKLsP3I?o&Ea*b{#w+a1d8< zy2jyj<3&$#WYt#I?w=a%p6g9KoC@*gPcMnqp+#hS&P1`?$=%}fsuu)h!!7)*AFkqq z6DEqU?Zc$nybvc`TSd~Mj**vHU2M+pQ^cIBCWB9+h|`=VN;OD{Do!@T@5WYfE&tp} zrCb%sO)TZJaSY$2Y)&^7?Y~9_+^!55;%lbv!aL9Rn z`m0buCTW%83rA8{KYc?dAb9kHHeMBJ1VV-8Jgnfisyo~)G5^})_YyG$g z!mM(vLdwjrDB%bRo!f_3#i+2^CM$r(VN8sy!ueIU&%nhO;jp$~1DX6PnO~kcgFH*I ztR3rCivM&5+bVUu=2KnPkj2walAhbXBAF-J+%erBSYM%;sN0l)$RaTeoKS-IG`8aq z$_)Nz_l})#y&CL4eTmaLd5Ux{m=Aq!=n)Ggj?WIA!QZdxv~|b}7g`0b;_K5#;YrEq z{4u{|teqQyPahkCnz=1SAUhww^FCC4ftYpW(&z4z60c2|D$e9bA{$Pfpl7z z5wCbEfUuS?=%E{yl=>n9ijq~y#?Lz`1Icf6UiDVi729y25-=AYy59k=m(uw0=v&}u zYBgMVMXAp6_6JZdUrs(cnGybwJ+83#vaO+m@Ww0!;mDY2f*EE5V8B6!KmBu#aNUC? z^!AjKRBW>^cnm}FsxWD&F?lr!NVrDgGt^ioQ5tV~>NHuHyOm^Y9!1oP7SrL2cv{JQ z1S`&rl6hk37U5Ie8@-~0d4N(w(apupR8*YkRLs$68eEL)|vg1uKzN$KXx5cwmK zATwbvr=y*X)n@1jZhbmN9_4(cXY`H2(+l-+^}+Y}%JJhc$aXwg_Oyl%`&vj2GfL#R zlmh8}rA>52vGnV`ZJ=oF8*t0Yl(>JJK@_47@&W()F_1pVKO5Y^Z?4%)+OE8%vwg>k zDz|&#N;CJmaoSRJfuz{Z_mmZV?b#7BwO|RKTW}K}nWs&RUfcmO5wC%|>2acPXePZV zAP9R6jt3q?YH0a5CGu(H6#2H4;B|}vYASpTbUyE(^X5+BKAt}XOpB+ZOOof%<8vUC zE)#N7s+~|OYe~5rX$F@WM_N5R2}r+8hx0|V(2{%A5FEHeby*(faK$r-BOk%1`g;7P z?L}~({NmdETqRVfX@Q^A9)kmCgJ8}~`1K%mEMo*7v zpkNPW3S3!*(u(Bg0x^xfKz*IhdW4Ag7kvE`3x*OGYXexw;j%$vjo zjh@Q?eD;YGY+?CmsSIfBUB+wde+WLUjR9l)67f)1Zk_LuSyrT18P_^Yz`60I{72Vl z;VYN-T&VUCOepnw9b7s_0qO)t zz+#QV{%eu%27J#TV{M8jW9q)pX1M-CCsuRGXFcbc~M+g7@vjwlOl)~-1 zzJOOyv_=aCGV9_j52E5-(R!R@E}J!*)1&r}LLW^#I4iFeU~I%EXrgw%b~vycb{$WH zyRU3RN@0Gup=PzMXY^}se(`i0S^sOa(Tmxus>I+yiRM<^9nZU@_E1X(FM+P-Rp_eUHJEO$gI30Nz^wi` zydZx9zi-oIIN@FrV*U_lyhzdymVX)^k3G)m?Z$j}*D_S$-bg1lPZ2K7k3mtop|th& zbNuz|AE~%OCGO1I_t0d>0K?Ivu*&Bl=;iMQC&j-6#We@$q5q`0EtVTOzep#3md|*k zw=on=E0#mI{>z5OzdV@FUWMr7JIrlrW!V#Zg5db>Y0TFbLG*Wtw)s+zuNz2{V;l?< zs5M5qFjI6}a7Z^2+I453=O5hBoq&22;ch^$={rgXFq@eB_BY|fv2qBu7NhooT-&wV z=0eqsWb`dChI>)}8|gS+U_7*bf)m0;P_W<*G7;S5O6*U8|G08=p!^S#nV*f^?SsIm zyzCmE1L|;E{0f$6-b7V$(#-h2FtB#7Jo;+Y2(FtogO_eE_ya#i({4w7(36W&P*KsC z(LOm@m~o*93fBf8t5G4Sdbtk_DZ32y?<+FbB7DH`ifKq{^*UyreKd87O@Vsn*1`41 zN6>7$rRdsGcQ~~96>yBmLfXaiQ2zrnJpR3()wjcCP<3iAYG6iC*sv32G4CPYI%6vH zvd|kS%}_*KuMAEcT*~a6?1LQtN;BPar0YiSwnaUSKLuAZyy0>g5lS2x%dB*c5)B3a z7TWzxM8)^kFu(VV1LfW(XwRdihnrS$f5tRf{W)HQ{nUMzSUP1!h z7}kxLgRSU{v=>Ye4heNnEAjz1Sk~=oGq`dq5mk=52zK;T(LHxQPzzhWAnpHjIX+*L zX?JuuQ>VTF?GM`tU21FK5bX;0>C1!VD-zL^X~u%6n~zbUf+h2C{spF5HVx@-%&*xN zG7gnE->N-r;S4^Q?*~rp8yR;WHChZ~=v%u2QRlce&=Ep2k9?$1;PWX=@AWxMkNQO_ z^IHn2I{F_o^kO0-9H3CfTRWs&zYU$;Y>j5j2Vmy%yQqFiI7+WhN7w5^xYpNRRIcg| z;5}Xw6g`}eRNH2woqxSxXqyPM{C-Y%TPh>dW;wXMx(H4FJP}swx}yKr|3#5n&{Qgs zPF%MTk`J4a%jpE+p@v9Gpp`{+M)soIFBGiw^QIrQ7^0(;d)=c5i4QVvvc&)AiZX5V z&|qLbvQ08Z&-p`0JvszMJxXRymM=pEt5ukOj~i%}QY0wL{SJJ)o8bMT-ORP~^O=Y( zZ=qF44|I|hgG0vJNImNnvRjreEYc4b*(wrL^vDamaWqFSqBa2e*iQQVVFf17&>YS2 z4M9eEbD8*q`RI$_JiPWP1SRTQ(}qbCQQQ+rqT7|1K)851Gxopxv%jF!R@Hbo)*k>dca3`j_US>_a}_#@_fEE&G%3=!e_Fy}FA~Wn((Z zV7l?jl1jKPCL8#h0?6(;1#3sU21HOnJdKbZn zuZN+6TmagdvKCg>_rQHww^8TkN9^efGs%|emjT?6fR=w+3%4ve1aclw;4%MVn0#*%awyY+MgKO!izeUsqNk5=b;)16Qd*6TICM{hj*rHh z_s9TSx!n>5PV&F+6@#|T8bBM2#;}P7qrh10mz2!EbJVd(E0C}AR5U$Z3p<^vvF#T= zhIb6@z$tS9urTb0J2e#W<3=0mL8~ z364w81y42n(3g+ng}=(qBBMD!&_hEV=Kaon_)eyD?UH*eC`=iw8-K2vI;3yTHy^!# z`qpslg}@;iJj=vG!vq7Z44?`He+__e4)m5&xb!1?3n{!@1T}@5FKW5Kt=)0{20<=Ha%_@>9yLROLvpNB~ul6!}2lO zr{@VKsC^Qg{%;(kp0$wuufGHDa*PKt?rYi3H-jkt!*lp2;RUocna}iX`~fdra%Gh! z7Nh>ydvxKfy&(OU29}jwijtLk5&tI!y;yq!>Fl`-hV|3H_Mx-vOPLwei-MQ@=069q zHLp)!j+stoW{c}iOj84YycEwcdcv(upUb^@ ze-mqZT)=JJV?oXDVqDNwQuBNKWK3z9lg7f+IKXKEQ8$}Nq)dYNU9L0m!nHrKzE26Z z->FYlS<4fJc@gB&pHDc!B!+fO)vlZQ>oEVQQJE^<8;n2qDUk_%5Jt~m2Bzrm&Fr8k6wR5euAR z6^gGJyu-7F-R!Z&@!<8lli=djAbhL!Djj>`82wk;6px`7;gf5E`NQV3ctxFt3zwZI zV~y*ryn^i_*znkjy`bAl*IeVcEpK-7FWbdD|5P2$XbB~ava3k=#taf48fxP<-Hu;2 z(^QkZD`|+Vudqi*L-_!4s z07>y`0_SR)`LJ8@&~gGT(Q9)^-+&Z%#^XCbea}In!dBvv7Xwhj5J+ zIqcyi7Ds;xA#2TU@H1O|Npe~h6YFQl>?=FMj+x>q@`|p26Ri@N9~Sbo@wX=^(m|by zaNNVp4K8F&T;76;-wNP;ZxtN$?LlY$d_Y^TOlDlRUZUT1)BwMxGmNR9GRocPj80tC zVhpD@+xAbL2og**7`2xQFtces6D2pvPN$<0I$ZI98>(%Y?22^By`^#gk~Q=VBhJ(zi=8V!qI*rAFst06tv9!%+-!Y{2kM8}+H zLj83iU>M{w?GsK?<@(QHW=lCLIa!ZxJ#j^``d^^(zpadi{|^va6^^=o^US&lUUXUG zVwAD$JdEmJ&8$*RL^|5a=yS+U=-B)ZDHuc{yLDsu)T=XQm@b-$b~igPuacKBKs5m_{+jvSs+tze~~aWN93|m}5?k$wK{ycQd<3{iy9&lZ?QpgTj$rSrk>Lj&zddBAKcE&?WH(|68jY-gPkK z1sfXBuc{xgJ~s>n8AroFsaM>dqi)>%q+$3vt$?2A&OZu5{E`lu4Z<5N_gmJ z5h!kO1$D8(2~<0|Fg_a{nO8R=;b32dFxSKYre#IbZc42n_Rk19_-PUpjBTS^3)X|SXRZ@x8I{)72l#Q-EK*GR>I-ax)8>paVA>bgZwoOpyeqcowO&vicRkUT&9??(moTOV5;OvRlUDR^Yt38Bo<5PpvLNn{;k$S<6i z4CZuBz_Q2f(YuDX;9gBMyw`Tsc1C&_|DR@IU8wj2JM=aO>v!Da+xMKQJ6sXPc3M1y zUaqEyj5^0@l6+|J^&VgR;52{ayb{0L@E5(wPQYzDe+BkGNCc)E3%Kk>^Kr$5V94ch z{ELKLHO23bi{9*2;M#4MK(R#$|1NYRY}h}VOHJGcGbe;{(;uGV_Wz0icXqwwd)@Yf z?Ju;j>8%YcHun+EwmZOwDBH1CaZa?+?IO5#z8MVqD(SspA@$FP2OZ@s@lfYlCnlnG*!$}xaZo|sa@CA_OO@EEz9(zxtO^iDsrvyc}jvPV# zk?YPB);QpB8-2K0rV*MsUEsYpS947#bKq_d6~UU?vylIu49!h1!f$RCnEO{|9ijP{ zFA(s8fmv33zSJ#xq(vQ?GO_G%q%G&PJ`G+xVh_$9mGl)FyTO`0De&+jCG*oaf{%3`lq0q8bz5`)v)F`7HaLsd~b4#$mMM+G`V70ThllO#pI0R)1V2Tbuk7P z&io1H`DkHlS{btY!r-2yN$9$26bO_zMb9Qa0*hoO@`WE%_+6Souwp|r3VE~@CGO1T z7u97zW}<+;KRpL7{_zGXj2g$iZlZ*fYX@MJmkb|XahK1(ZV2oeHMrmeS(tYr29E76 z!mk{E!|bV*{Ij-hF7b#oeDp~`_us1I-3KE1VhN^Hm#GOK?5TrWh!(ipT?p~}6S&~5 zDM-%hpu3v4;1utf@|?+>XY`n{6s&HUx$?g=+6zcrEf z2Y2{Ox7P!g+-m${!Zb4Lh9Xfjiy+pjwj@t`3h#L&#CDHJ2`_VKVn3dKh4GIk`1OHL za9_BjQ#5Ocx61C}7ddaliL^YOcJvPjx&IQ>tXPj@y}N`XO_tbsfemlHM?|z`U2uL% zEN+N%$E^P>{P9Wv-#kv8Ja`yQb*Aq_t1@(9q^muC@pBza3z34h6^=M%YYXUj6GrQt z+ytM+Ia+}g&tZ$rI(+YrKI-4yT&JS%3%^hBz@rnLz=r<&`2Fu*nBVn^)Zg$TA6Kj< z6RURE-W|S#e?E*CL>j!}40~;n^95ZZkY7phSEcD(>M|}*0?_wzJr}5|LB8~qfIIq8 zKY+3)da*1&k)<}U*K8rz}w-7wnq z55=9gvL;-C=3sX0bAk^vaz>9De%1re`^TAZSxMpkKU%#M7;w1;{9_@ zUS=HM+^h)%IY%Kk--rG@Mg>nXmnJiU9#9!4wv#zlm3p4O!!Yu?E?!q(i@je=;ih}n z@_jpLGHqXWZE=kO@qDjCioLu@4Uk5QLc?qq&zlLIm7MCr@+7MysR*j>ZX#Y_c!Sd3 z>qYNVHYH=vbi>0ssU*AI1eu5DKyW0U6n0w)^NywQpW#!oMrR>;Ivj^hBE5)r)OEO& zNrLxs-cy${!b$7&HW;pJ1D~5^^GR)ulx*#NU{IpMuRDGd*SRgkp_i_KTa`_$VtO?f zuzfbKmlJ_w^KKG6H58U^3j1F-mDk&4_(Z@P_S@*OSQ2N0vu5{-Vwnnw})n ztMvvP^LzyDthz7Ko;D2J722sogQKAI?Jy)Rib3X;mB{?v6T0lX85^=~1#^E$9(~?a z3a=~npr=t$wGRi1VMA#-YTmyHG_8_l++#8r-JURr19IS7o6AW4_)VbWb_23K1RYwZ z%N#SPLR}{wQR!>uRI{O9f&Mcy(b~Ta=-X<@Y&4z^=T{k^wCa2?x_S&0m2Cs7EF!rx zGv=Z69iBcr1(mEsHX9e3ff;V|^h@)Oco z`$W){JdR%YK!-8vSPK5=Y(}+aGk|}^8m7o33(3Eq$^3jMZJQSR6TY81UN`~E!K{FJ zNNtxgb11-=GgMzmm3+xVqlyHK`WsWOz|j?%UEPk>g_WT54Pn4c^qpR(Rtb00zmRal zdE~k!7H)Vy3|ohuz_fl%p3~=T9|(%k*fN9h7C2bFgAvuci_+HdTd5 zP+*xK!G5Ue`InlK+N*G@a!}p%k1wc8Rfp-Y#vtZP+Zp!Gh#4*B36gPZfo*q`nCiRB zkWf{PQK?z3?zLs{;S%Q}B%BWqa zJ)Su}KY<=ywj4=)kw?PldngXAMRjHe5U&@9{8}@ZjyIdp+bfHh;=Rw&1wJ0w?l=X% z(*5wQYaqkLY-A?1u7ZCDlu_knKsy{gjmA{BqVs38=(3l3br*ykNORvX6=dR!vNbHg zNRT{dn61S8+&UQ*{47TAi`OwHZk|NvA{yZOO$F%ZxktZlejj{A%Zn6!irCa*$ppa@#(39`@eA5_?kPa#k0OmXd2*^?Cox{Nn!l}IXcM?( zm><|96xvBVYcu68ke|BOz{~7~Sa#b-USE(w$CY`4Y7C;^y*)YgTLM;?nC8x zTljG-cV#T@dgM?3+mVdl8I6I$$%2a3TVc4@QB2zYx)JA1$s+5yQS^zjGO|lQk24w@ zjxU6I!uWN=MDS=E9%u0%DF_~CyCi4)=o zsfVmwo6LQlZjPyy7crS3O>7rl=CV{RD4!)i`KJXB@u;hWtiE%J(=a+oEOz+dv+XOf z<<^U~9sv{B31dWDF!10HG;PG)^IG}bvV*waI0$wxE5o;sJHgWMR@`Jg$V*$uAWf%8 znDS;K*dvG$&0OM$L4ODro$`r&v+g4K7C=e3h<_w1Ax88){5|d-S0{RK&xgAl^$SbM zMicV`Ui_%4Wvr&)DK|B&nu|DM&sY6PCMmYvTyZeVjcUpRa;a9NG`d?T7r~QFKi29! z%%91xz8eZW>J4zZQwFZtcZWUm@eUTW``0-%suFpd@5H-ioVaxR8{BTIX={IZGrslu zgFxqiFQ4pl3y^X4q~T9F`@9HqY6ZFYkEcI);Lu9cmfWP5r1}!4%CX`tmsYWVzVrN- zzeeI^VQ&cNZN`yHE@9EDc>H->09O1;^96Yh#L>S4pG*G8uSi`+N1pEynU7EAazBmX zT@r$bGAH3Ft@HS{lso+D?pVP5`i#flQNV_$f{EJeK(O~L`Jey41D(Qu<(>F19&`Cg zUJ2MQt3V`mWDeUwS#n*tjF=w{;C}>9K- zrVNGhrCaPsSE?tL`8`40RT@Cr-UO4kYt|F16?eFnflS{1MkqbIZi`4OvkZpw7 zGeknFoyoz+$LPq6RIS{2!-EtWzN$!zT&o76!@0POzPx4XCt4qR=Loeu4`j%kp%oYIB9njWiH1aIp zi_Qe^rN6yeN@N^&WGJb)|Rt%0NAT@+YhNSzyP4zuR2Lf0N8z;CT>fKG7a zm^E9F$69Gnqa}r&mp7vyVvefMY=9^4hf-3z7NF!e(rC`s3qa$BDwN%|4e`_G!F}5l z!IpMQtEvzTO-2pEPThmZ&vQQY`27cZxV#KyEA9iJT@lXh`NOt~pQDCGb2Lq{0m)Q3 z*Z!06V@f}_iVoR1Kse77-F7ZVZbxR}y_eLxNK`s$RQ_krGtj@0z7)9eoOKuPE47!>eD9dO?@S%sBXBpF#( zX?VAfHh%XB{;ggIrj{7e<#Kw|@oCY>M`|HEq5TG0z4r2iF_U89ylfTR?U)U7{`&{E zC!6tZRaG#D$*}!&s}QXjtU*T7n#?EbQdm6iRPEes6;B@XP+BUG9 z$_rV=F##5`Iy_PC674KN;5?Ro%7s*oUctr4hQ4dQocykF8m5QrhK9Ms_w$S@f*;Va81Zv5YYpn zM*LlcAo#}06h2%a$wONo%(pbH`QQBCd0GYjaHMDttu-(+WFlyrzD+28{#%ggah-ZL zHxns88VwtvC%sE`9MV#oR~zy)1xcNsz@76Qi_{vQz~gC?QH^5-6e*8Ec4pb|M$uTN zey1{Wi!4R=J-4Dm^qm#$icxK>E@u}#0{;zK0VUl5xS=N=P0=&w z#9E1U)h*eDPTdHl@NpZxa(OG%OMU_iRUg2+&YaNWfTw6fF-DIyT46}z zR$wx|7CyLNXKPw;2>N$Fg;w59@VA*Z;>zx#h*SU3bM4IG_>r41c{qnXC(ef795m5J zOLrtVc?~)xN=xz{TB^OjeF6DT5ErTZ8#+I7Mg9k5pnKmMNcJ^Q2J{_ZHhC@ZdSB1R zpMC(RJ5J<%1qsM7Jvr$JS3PSid@W9d|D*_X(QM&cjpD5i+|_}(tA^nkkAJXR zXiS}5+{j;Az7-mERMG#&`oc!%fAs1gU!;7%4z0a?0&Q40s;;>TgL~~CYA;kNLYox? zwt0A=3aRDn%hM$kpCE@yV-}&|59;8fumC>FvPN_CQelu>B{<;LN4rJyuwLdi96nhG zt^MDEyE!3nPKp!E{Mi6ww^!KGA@?PHAisoLckO};*YDyCQ`b>Bs<~j&o(@TVMh4Vy z9{??zSHq=^pvghRzj^sdE+xkFN~>5bR@Xh$x(E>BzvV=i5RJ0~1R3Zs;$ z!2=6H>Ya^Lq2+$IucH$l?>Y;YZncJL{6?G7q$4mDYlC3BN%V=1v9#r)7nGc-I^E== z5B6>}K)UXq+3UMQkecz>tT$=8aQN{ckdVn&n zHncz*ZYhXY$)L84Dg?>y=TMgMLgbst!$4^vEP6i#Z+^ZCbB;MtjLinvuyGI$J$wbd zzcQdR^P))U?P+jvt~;&l5e8)!e1dakkAX8Ir-6X^ja;GZdT_I&8ZKTj8(lek1$62Q z{-^(Gexc#5PbF-0{1EV63!ony5)RcG*Bw2+i#}xSg(3pW>8Zew{=TpXvgJ)R`+YE6 zg&tFbUG=bnPlhtV7I1#MxE=>BqL3NID$2tZboNlKP

shm5@HlVgSRzMaf=fA zTE@}dD}%us%NyX@CCR+{=mQOud{LVhMVEEct=_G?^ ztW5)^|IF!>(a){y&LrWRC2wJaa4q!QmR_f>P{{kP&_xMYmH#Sl15dXb<5jt*(a&G@ zFkmDFuD@$lw_C;?&f+fDMp*Z9kGJ1qCv34s8`!DTZ5+Vv&0EJsp^va?Fje$(e-d1A zy_Ww=m4Y83ZS=+Vi`?w6;{dqE!izW_UY}{pTf}4m%f2jlT(?T3)>>qnpg2H%RR2o9 z&yR%$Ue2K5(Rgn6=u~PrT>}<>)&T5wH+XH{G5*QqGoUA^7~C|Tif?_^;(jugeB#6t zfZNQozH?5|SEm{vpU5VD+q$*z*^@J(tn0_2)nrA{Wiv-QHDmw|NYz8*-ZFmulDxW$ zzyIUq10}h`&j#q+AB*ALd=Y=fXAI{ts1EH4rlP=;siMN**~sh2n59v*HGU=BP&Q(DFCH zpE`tc?#EK!Ue2ecPjP^P8~=fN)g5R!aul$=dmb|CacEh^|b33EPbP z;hDe(kh^7q&o5p@{l5A}IPZcZs=lbsI~@NAo2~8mRg?`^81a|uzx{!l$-9GsyW?Q9 zekicjh^DWkYyxJrpZSjF`~0`dQ_z{r|M=oMssFwIOTBNfdT-MyKE)4QyRjGwGcAQy zS9S>X^zvAbKPu?cJ2SpTt&TN#brQ}QQL(i;>kG5$o^c-c&4A(dI4moih;n}Z;d{@9 z^Fvpr!up;yXz$PfFqd9|9+++h3(s5do~=%p>yd`Kg`ViU?soKI>P!0ZD_8jGk};Kc zd=`}Z`32sUEf;=nzFwC%t{GZ^W~%w)J9fA6ey~Q1V(*9Cq=nm}p<6&WJ~(qRN|0n) zMdi$<3LZMb>m}Q`vyVC@+U`2OI}Wn-enX1)@w^Xzi+)aC*Cv}9IYi?9g*-4|rfZ9V?rD0%UWH$TM(Occ56NoD*% zwz^pQ<7gt&s|!UFxv@kp<~F_s!b!i20?CTya7v@I zcyw|MpYwh#Ih~XxwwZO7+MDmnJ1WQ$bizYCM zZNH7=ke6oXL{FYw!QT$3i}TN~6lX7KVAp3Fur~V%Nq0Oce%(Hs=dX<-@o7P1Z&f+@ zzCDV*G7?0GSWUufT)vSfzb1$;l$+ppo(kgC=CSyTfeXp%g#t>co0a=(#NC)VgtcEt zc)s6n@awZO#7?6VDh}z9uH;O()iRw7zsbROFRsI%llSq1e`G*l6HCv^F(P|JHr!6T zz3icMU-5)(CnXGCyg2HuG27O-lid4K#Hv5}%td?~B5V9q#PO5FIPhABX!Www;JHgb zJAQ)xD=Y5wlKd(7SzN{F_8PDSR-{2aK zcJbxq-*80G(pY!b^bh&HGD@smc|-i7@FDrU*WcDZdM_ckRA6L$lDqU&GM{z!i{Fi! z#rj8zC@Ehxm;Sj*)fw$`5Hi$do9f~J`rO7zbnMDt&51WfdlR>knqaUvxtnA zqF9eIryEzb3u5=Qk<#>1Zr^l7n9*-IDYLnJ%2*e%@U{-QUOigerg2IPJFXIyp~HC0 zuMb2oDU@4#+=pDy4*?B|d*GNrTC6yJi@2%jI~mIwF>9ZvBEzex^fvz!)W_}FaOjUB zbMKA;w`jQ^+LiU5svhn~PwxBI?Owi&f7^b5TajuCz3i8v)5-DZ_2E$T*lj-@Z!Zh} zt{KZrlxl-_ORqqOqLb*#qGG|994+wgRSnX8>?PSVwxcnL*^Eb=3DjK}&S$7xM>2NC zP@x>a|GE~U$!Ff8k@39Z+y>DN`)dBqrD)h5DZ_-A(g^vN!yegHkWE~H>?FQ;3%j#?oOzZ7m)`+> z`|lv7l}V_vquw?HHw!boq?qddIBxFP6!`i;CN9=p#C&?{f?6LoqNdAXHplhmqQghR zg*6SU;7`RsbiPrWDVMuR`PD13p6bicHPvwpmEB7}%KQqy7%HHW;Qi?Sl2Gv2wSucD z+Xof)SEBzT=}i1$`u;!OzDp&gluD5-rDf)xb0$LBi;yfWNJK?MveTxG7O52N`@YV- z=bR~PLL#A1*@cp&Y+2$r-_P$qn8(b$bKd9ue!ZX1Fhh(^@O;3drWoY4`6`^WUwj@b zPOob-=;LLti)T*TYM|o1fl#E^3tK)o!uwAeFfRea$Ua|uZtN%cslNss3_v*Oc_)ri z;o+U^mq1|(_Q*kxpvwSr24L2ZmDvE;7K}SI}a|NCG=qd;{cjPbFy%3nb za1t!9P#5e@Sb|dbu0z9Fi(oyQ4g;2(G8gNFXuB;B-MG37CV7kky*aP=tGO(oQ7H%P zx1L0^?NiWnpE!nnAP-Wd7qJKGGGS`Kco>Z@prn>8CTjfa|IYsdRz1Rf>7IhK(>J2lj9&D9k1uLpIuAs2z6aL}``ANfGX?YW6$G|(L*T&E z9;jkz03%-Vkdaa|>XL5f3$Ik!#O*wZHa|bZJQmlu%N|*RhNv3mkFmO7d5{DOsVGAy z(;Nln`voXpJYjY2zm7s?%P=82AKQ2dUbK`A(Yy56T;udW0>8}YiaR69*`QgBQw@2(XX9{t*Zt;<0Y%#0CMpRa2RMH|NP6SV|zTA zGe?cY_p6YyiaS^<`42u7ae=J9w4d-!$&eH6PT=gMp1N_}6fD}A$^Z0x2l@GEK242m zAz78`L_ci;4s?jeE3&UL@}uuDR(G!uy!;#Me_WOtJf6uPyHXKecxgnPJjMX~ARVkF zUQKCxkx!nOm2f#ypTRV{gLKFHdF*31Yx?P!0M81G2Xkt=7{9+PUMbfM3>#X>2G7;R z+phx0UDU8ho|8uU3MEKUum=t9YvN^#-?J~#M@h@HWnietl(6gT$Qdy&@w1eFL=t<7pV5pVW4+7`8;p^}Xud6TzLtVt& zDX13OU(usZ?hY_8E{e#N&SvrcV)D>q7Qg*;w$0d>sWdX%if($ZPmHA{*m%W_*u|)y zXHb;PxUM`$ESKt&f`L5i7uDm5$KD=tr_6`l%1fqBgD&ZV4;qQr*m6=6u>!Aon8k}OrxxTsdv z_WDaADjdshR=Q6$zuNK0WpCi`y_B;#X-MX)AEK(dW@JDznm$jf=M3|oa7*gG(Rp_V zLEVNBa%N^YHQut?I>ko36FJp@dXDW9^Zm=I^WSZvt*?DaeTX;l3Ajq%&OL{NJ#`^6 z^~AgzIn+RR7wzb1;kGSKs@+mLADrEs&0j5RO+~3gG)7ws`+m&CZuk~nsLlg>p1Hxn z?eCZ``Wq$IK|;$eGzBT`ZI1n3w%r{S|riSNsIG`;CmU3Om_dDvxR-61oU z=t*p#vt>@xKi5t8xwdlboV>}@p~76GvV1l0*&?1Fd@!VM8no$cj~TG-s}eS}f5&vN zRwaT4jW79=^BUgb=)k!5$(M_;* z7LV&v>4y0l8|YVmO;Yfsm78KPhHMKW?A(kJCir*;S=6|MsVXby#w|KW_qNoNXTchx zSEpO*+Xv5*wZmsvmy!YDvU`Ny-P=re`hLdFfwKJhE({z-8RfsbzUV7qup`K zF-$k9Ig-y`UQnmKs-n;t-fYy0g~Bm?V&+qO9WCk%p_O09QO_z}nm5{?I&xWD>#^U& z;>C7)@3)@st#ZWbb?%-7`PBaWu;%cFV= zzj0RE^s!f9l$cfXlPd0)63HGoLG*vmBS*KyVQafE8qX7R4>IL})|!5sy!^#<{XHl= zC|OPBldfGpnw_ypl5&Fkj54?e z@1GeXDhc^Y+lNx9R8yFsX38yOcI-Gi$MA$O=a?28OmGoYwfb`H)^RYn_7u~2-$-E6 zyP0>|BbWOd@sqD4W;0`CnplUl(o*Vv zsc$4r&BxH(b}@^4sic4ew+d$W^nu8#*3CBt`BC=%!3R@{7nuSAw5| zUxBjq**~6ug`*0Dx(BYHMPH^0Sg$a=K;;kYBLm=dRR;E$97&W$jxyWd>_OuW$-waz z_JZej6Zjj?--D|p*Q25L`{DQTMNrXq9h4>}0{4{TxIO$Z+OQ*7u(NCdV{hpSx9IJI zd#)-AcCGeDH*Ph;{V!9&se5;k@^XFT=zbs1{57?5O>QgXg-8f4bSVgclr(>!l;Xu_ zy#>d5W`o@;8sVwzL~?fPcEMoq5Y!j@AiU3~)cttFpu;*6Hc=yop;_QP)F!PXNLXdf z4&2UTC++DmIS}!yqwO!x2NVICF3M?az~%Z(x<>`QcWql}=Ec(U#$ z)#xm4fgetOg?s%r)-Bstz`r}>31W?>A>dR6r)BiOnx@;(XN@}5S~UT9$iF6%nuihT z*#T3VAHu!ebpYuW;U>8>c1+lGq^1?Ycqz|k5^O95UE&?$4_%uCMa!aj;w>JeK8x}T zz1kU%`Qlx^urFxIWU*JsRKRN&=SrjAMsdqE&x7w5A0pk?lR#leI&)oiA_)uC5J+~5 zSwQ*9f|K&Y=UloBQP7rR;C1C9yi$G?%C$7$(_-dj-b|hPHOo7YQ;4mgdr7*WWZXHl zBQmSbqEQ*`nRd5+PO=7&&d&w|D;EliciS^V3j#SBIuX&HZuqiWO)z-U0cnph6>LAZ zSD-)$@3HS2@O|?(!H3-%0{@9yP@h^htX-#o4mTV_f2MUaV_!9+e6LdE+fs&K>e~g;1 z2Nw)Vf~9yH%wZT*==8!`vm}I%`n;L1zTeT$t`QLZeHB}FY%=PHbL%ab=>nrAA2{!b zUFh)z6QpEPgvw7%Mz`E*ki@>_C_E!faAB?oy5>7maI*h4vU@q5F>5Wc+COnDRG1Jf z*tl<;Aj{}7+#fB0eB(#Ke`^4;Gg1(&OO3!|kE_>*LRWNrEd&)k&gjl=Kd^u1ALc#! zg^0prGlySl|HP`CMRb zKD-Q7q}ySH(I>DdIRObTtpYQXjN$$tN6_ccCU$?mJl!rkj^Djy7UF!@gT2Q%pe>St z;gVnAtf)!g^~HJQU8qh4jM50*vas4s1-Pq&*QFSdx8jkM>uWk5>($QM|}EwpzKi=`X`t$vkh~t9dzo1 zO#v+DD4suMvN257gJp2zu@1n1>D=NO+u_r?W~R;h6Z5~?W<%amIagytFw!&SS4 zTCW+>=B~~BY4&4A_bot$Lr-DdwlJjeXGF+-e#bl(GW@ZdY?;>64e;+4YjpqGDB^s~ zl0PT;C`fMJ#mW5+1qYMN(aqG+d=5=D8lx{aV4f$+N>v^8i(bp(BeHRL` zQbpF@qmWhnJTh`O80^|Om8lxzip0C@%u$_m;&aB3>ooes-ts#voc*GQ`E9-o)ttW% zEj}acyWuj3Xiec-O*X-$zhgk5#couqHjX6RI{x4Ne^t&qu+3>FQxdGrDs?6UKiz43 z&*?RMYiCLJ^~VBqz{ZyJ5F{5p=l(xE9M+hKwq~IUN;faO`39=2EUWKa~lqBOZbd z`vjQnWQGcCH?z0&q&y>B zKQeslFE!s{%CF-5XveI1!eeDpqIChDY@|*W%Rb8IdNiDfME*%SsJ4zeZ5O+9>KMHL zMg;yDIZ+h#wT!&jc!k^=bCz08i>yD|`2atNEvH_v6U2*aHx9*!-rSl<@TE=U@w}^4 zJw`mXyp71r-)pF__B+*zE~4>%dvW}W2t&RVuDpZ zFTxhZmPBzz2raQ>>4(k{eEGIJ{XI#B?s+8C zDtBpG-ZWNZrJ%~rUpGj8R4rsxTl={mPj1n5|L)Rhs2f}Tn#tPV9p<*ooX&Y{J%t^v z9~HARHNX+~O7?VRHgk=6MC&#iGWHb{L^|!K^xF9m1jt2@Wm~S3@rf=(D=3jG%RNnP ziWP+(>u=L{h6+Zq#z$G0J-#1m>cT{Leer;%|j~tU7Bk6{}XL{CH=w!k`VWyR3rIWL4^L z;VmARTSps#?ui{Q|>NA!RqdiOM&`J;Xi{w*wQ#;~9yf0C@w-uDk z+so;VGNd(fQ*0cDJLu(qCjtG-&_hmcBqVOLp=hGu9QiY(Opk5N!uL%8y%TeR>M1Jlvu_A^6M4l{ zeS;?6(0!2{a(ssKTcVjF*hw!fSW4}c^l5eAERl6)485MKO;#=`qC3tE@B@lv=CW$^5Y(yvKmMpm>JNX}iE&%hjTPEJN9y zjV?4}b-3{1<;NiVSthgh*$L+B=xm~QO_L_Z9w7>@Pq@Pmr{eVVa_HAF50-r04~F_Z zV8rGZb?vJnVeIgG@-6rkkp5Ii0Ob-j2U^2RQbgXYyx! z#-mS)vqEe*x!@DQE0=XAlWskO@#Shj{ZKr4m!pBb7LKC6;$ELcVJ9!uo&y0em%Z6D z3~%kv1|41ViE(xw%H|4n@;C%JRR&qJXw`h_X=+^>mpw4zyc zH9c6pT1fgP|74V}oP+BU-a_qNGs&VQlH8NEG9*5;yQcrU5m8-SXhY>c*fWMESV;z^kNKjcoYqEcpty@3CZe z5;hzAmj|YV5bG2L9W`bMUgE34P1%4K`!5(je|Zx)pScC^%6ixDlv)YZ6OCxkrVM0L zwH+ROD{T!6DAS^6XVVAtftgd0(BD#gmK|Ep|DJHC{&jq#IL|a5r5hXIOC~;OT;6#` zV~rd2m)yhhJT38FGgJEKhCs|g5R&^fz2x4*0yu4rB$R`MS>x|Qt|s3A2dOWbbU6o{ z>h0n4o@4xFw=uRpXHG5LgUHr=L!gbD@z-Mt0H2J$6>zm=WEFRpg^b1z%60&@I5|*9efuH8P3|QPNhtzL<#CrGSxO=v% z$?S}BVxiX$*S`HnOl0@MZu<;4M|wVeW78(Ii8JG;9hwOLSrx&0yJD^%l zMM`AodXcUxo*^2%j=8z-WAJ8&Dp5oIaUv1CnXX-HC^}P?S65W9igc%U(I>COncO$l zB9F4M4etxZy(&8a+3!9>bh_6SyuEG$rp5fD3FQu=w}U@u+V^Oz+_uQ((e7K=SaKZa z<~m07@J6htZs!-WeZoE(W_KU|xEO;U9a{@^JvMUP4%g|B-d|jpk|a70n{AdzpC#81 zy`^b>Ni?WRlkRm6AS<`85Y5UE`@j4ME#4~>4cm3F`x;%T$?0!oByg?BzB->~460GM zU?x^ok`;BVNTg*SBe}#{n<}ro3H1AB3z6;zH&M#&-}QxxR=BMYHj|tEVqTBB) zkgXjRqD>bMQnX;XXhrE&>wY(X8q}L5oLrks)p8+c8#31 zUWGd=Q9Q&2Wn|IMQ6^MF!JZbX4-u!se9*n?JqZeThiT_u&^-|o>G?(J5 z^5S}&O2rGr*|wFZ$!riiaK01Hi6 z;MXr{g{R-6@I||Mq_~Z$EfqBC&1VvQcSbu71(js3k6*~ zN))XYL*?GH{O4K4Lvhh?eQP-E_qtS*~#c~GvW^sm-UHKKHFI!F;XcT(&&kH6i)+sm2ql)8E~5(2EuR{Ov!x*uSHnz`HHMB;QjFhX&TcANRfh zntqNyG}n@eSNC8nH;;y9nH(&zxm>%&lk6@PI)*9F^h;kGQ(B9pT|PZlW*m-%dxaM8*qUSio`HCDn+eq%>>&QXMwV z&t}g&xx~g_x59fB)QDo~Wvs7O%v!?A(j|^o={1KK*QPw(0_07uPP7sTj=! zoKh3lHY40duRmbcxl?FwSu}b6N*4X^{15n}$W=QpXSkj~7#VsQqt{uyyKlw|x8#oJ za&K)%MmLX>UTYOG-b4@GcpG20K}rsp8xL^jr@R95f_P+<`XD40g*eID63keW1KD18 zG;aJ3C>$D%cD+0Ya@-K!TH1{-YuQ74E)!WyT!-Gz&?F`3AJk{6_!XBz;c){aabNQk ztN2-kr@LV^(kXb#c{J_ANxCBNLRN>n*PRHLK5vKXbY|m$yGdx5;SD?%r||c+X2BoT z75vC1G5@OKC-+iX9Yyx=knxQUV0obd-q!4ff!=b^Xv;LNt*3@{NtVI!cb(CWp0hmU zS_aB|8lFp%u6AD@4mwaE2M}R$cUaxv1rEF4KU=2CMX=c1?9~>1$~v(z|%*S zv7w1WXcf}^wS9afG){ySHxkuiD;Hd)SpHzvz-EYU#>vZ06a9zP@V0p@P+ocft$m-#EOgR?S7Z;N zU3tHFkstM7rO`=zYMC;cWKoVy_PW8ws-xibnQ3TU%QQId%pRs$udQ6xcr_gC>WB7P z1TJ14!5cc>#{QB#fEqrCoeeX^{NPe8V!PK5iBSw7O1v|Y6d7q9R@^gM_<-Vz?yTO2-|H$I8jr~gfZyi8Aq8QFMYtxd9i>S z(;W_s{`5ejhL!v_wJvO~5u42JC{?tYDZKlN?=A>>1vqv=KQH0}kwu!I6h5S;3^L&nyk0GV- zd>IQ=H`t*Lrwa7&-3-oOoJE%VxerCkk3j(~%h96zGI&kwE87MbmAt`_2L8F)~1V*7d@sKMU~1mSPZ*UBGo}e1i*js;sIs~l~gBj1CqI!=%Qi$tZ zU02m?f>zD0;2teA2cc@Y&?C4Q2DPh@R`E{9t}*VgkIx_#i<5An?Ij3WYGJ~PeT?;j zOs+HaF5cg-E_Nq5p<3S!ILiDAy!T@nINdx9Z`=^`y!sw-Q)Cwk9m;s%teJ)|!TmMI z#vB8gGw|%6FckT)0K1o8!JP$r(E2z%{`WR}DbtF>d% zi8=l7Nt`(E74VpbRY=j?2NT(ehSNC7x*ELYw*zfb)MbmJlkJ{7wwD1l4esVH{2me^76O(mkxr=qSeAaa*#jnJfU8|?W_M;zh8j`fm+pGrj z+m*;OQ8<@hzme>nRY_aVpJt@;reoV>x^(8SJ8;-J5#rnnu-I`L<=ISuSC)NbH~n+A zmWNq*jKwIHDN7*tSKHB1NiL*%&3XKBfhJdFw+I|p?q-^~b9@mZ;_S2>T;3l?B~|VT zg>hrZ*IfqW{?tx<&3XV!1SyiMFVw^{_g9S8T1^*Cg+Jcs22+#6c^Idi-M`iip^F!(2w<=@%|GrejCaB@sNgA=2axWY&tC;xy?;+lc#Kwh;4d$1NYC^ zTDL!~fSr6Ji7{GdP9_G3c`b%>nM>t0T(8FmaQcV1UKp>y4+ZzH>`9+3eP9diFf2ygPWeEyo7nfNZu z&Mo4T6NhsvtOU;k%sx(h>fl!s~#kclWer>zs^V_e{Q~GTRX3lC9{>O zw~-8W<24iie|cnu%|Xtsb{F*<=7>ZdOJ*f~Cbp9va|O+_K=#V}{MCK`NU2i@P`fl* z%)#sjb{ThA*M@px{QegC5Gdqp(QHXF2b=_cCfGTF*cB0D!M4Eb}DoJ@?o0BmP^U zN_WriA{QRya*yZca=m+FsM@BJL|1MriT-tuC}_miMf8v6Ui*1cy+u>;tn97S;kAtD zvyBrsPof@M{+moo^qt8jMuyz|UP~_GAbLLi0x3$@zzaPuRRt_^!A$!M{w#|oVl)s( zJN}dcgI9-$?H>tZ6&1<(ZNJam?(GsTT^~d`n4e5-VkB+vTguC_nM6M1ssPF8@w9tK zCYj&wN&Yb>>Axdg+=j7Ye^qAGkJORCQtNG7kh4|C=5So|fPI}G-nSDK{kRvMWP1F55;FTXH~@YC;}CmDA((l?HCu@Bux(h3)m zhuZha=vqhIbZaG-T&YbY>|c|st(G*+L5h_6tJ6mt?a1r_6SC*ChGlJ0qA+5*8DGxd zkepwK@$f=vJl5(F&i--`5Bbd}zU?~XPJcUY_PW5SO*;XjIz!;Z(O%q|;phD5g?jMV zsU{vUI9Pw-_gQM5^qPDfl}34UOoW-zvq+v(2J2{imRo8jPt}#3=%(ijF&;CScV7J8 zQ?o9fJ-99ik8By7F;FkrDN~o6hg3GNC1ZYR3!LJMrJlio(W5Z*ioj zJo#E+flu$5N)=|R))$ni(XT55=+A!~ox8UQE9-|?tEGAqYk5nX(#WIS+N;m`w?;P8 zpv>iL+^a*}qAw@NHKz?wsFOsUZ?bT&&p$F|c_uaaJ6U*l$}(1c`!kyGG@0J6FC<1A z#?Tf2Mq#5DA)qY$8s~a0f^OOU6Mvn&iP2~~z-^LWYE#jdEY7f3v#Zj+6N|y~GQ*TAE=Ng z;tbP;LBIv|v{-*n+(<3goZyPj{|2@#b6n&i<(YxPAJ)Vz5?zS1;e$0YnjZorObgbFYz4f z+0dRW=Gqf~5})r*%&&Dq&~wt4m#F85qpXLSEk4>XMm-1aIAKBE25aHuj4-C4@jJ6i zfD=8|Wd9^u4e;hc_USDg9(mq*}|0vXuvf|&h$*a3tXi66MO0YhR{cWEZBVy z){*rnR#sg+JKPJK%w~WcqL=Xf!xX48yA)_WTthCWN~6!OYJmNu9_D(rz6F<`3bk8X zVa8A(^7Kd%D(zecEK=5>Jxx!T{!(SqsPGRQ3a%nNBLf=w^#s}cG`FrfLkQl5)Cm_2 zJY$l>|G{w%2RQY;tnm040kcQ48f(iKpa8QCc-pRy#A(SxU{i9E@w>f;qy*M-Z*o(} z=%4f9C5_wgRiq(v>*@;FbSf391{_;k!hD@%I- z-}C`{MmCf@qM;=7;R!x(X(?H9G4}j(U1$3mcF00qrcY9lI;UI;C(u7ybEz%|m-{Fnwki-g9^!Nm=L5e9iJ?K4^X=Rf|@@ zXh$dVdrt?%uF~zhV15POzpw^f~FqBT)IKKg`^Im-*g*4$L{1%lGpT^Ebx|iN`@F zx_RpdaNE7|zxjXQogOR+&oFPLbFn6`6s-5swOJUyfFE{Ep8N4^Dvh5+p@7}QPkMBX z@ILxk`^))3k5&zka<3W=lW*|I!#wz$3kSbORN-@ZDSUgk6fIqk7-j8sWW~+N(1$By zwv1!&5`H>XiPWY~dfpHTcOS4NPn%@FiUXW!CLBL64=1CecwFJR`g;9|G^Th72)ub7 zC>ykMOa1qA!)0<+ADOiSoI;TiB`S5mub zHN4`M6^fkyl6iWNphJsiq1S?3?%`5NX3Q)-Sl4qEEg4MbKd*fQO+1|W9=v1dRG2D1 zzf$ZdxM3+AWqb!%WsX9U!;6u4ff^NR>ae;-65wZA9lAFu3G6-}2?xfQquJU_z3V>% zXb`>u&G;6Kiyk_ox!=YMu3W1F#?tD{)r2uErvb5n^|JH4S!sbn2*4z{(#``0_`4liUn{3cbJ7v7`NDOk` zv%q@u9Cg@vJc0l6)o65%Zh*<;E?DZi2VGcn72Jppg<2CnGxB*8L_EfN zzbksAvjJr`$AIMl-8}CF(Rhh}2De1vs_@GFnW#Q$nAtk2jLEDr1HSLW(9HV%LReOX zist`K_c|Q|-`0e}UY8;8Da#c&*g0_l3(ds*TUxJEV~uz#nozxwfneyIHnP9fY$ZA2 zIBfB0s_*X0<_@vfnB3tsv~le&HvP+Befr}DILTl#IV9PFjAyw)!z0OvTziG)`;SMw22cYQoEp)&7E*jgp z49*Q#fs2%4fKFl)94&rMB(ym0oV%BB>3L;w?yi(`^DBjaCu%X8cDDRO#z{!&RTos; zqk=jEl*BF?E3})b1)Z~WQBaqPaNC|yu(|mVepb4lX&G=rufA+D(agz zgvRW9iryYO%~ZZmW4zps3hwkeB1TW#b3<~1-v;G2+g`{c&6A(dl@D&v;r>KHWWobM zH%WnWTw-C_bp=6Ixi`Fzzj2i#= z3%*QUz(xOXMAJJffufkrS+ms{v@X0V{Hl_Kk|v62SnE6lYs5FT;D#dD_pt_Pt-pXi z3a-JQ{z>pmxPoAtr4+n65RY2Vo)m;X8x*c#UWhq{>!4!2l;Cs8DrBV@0vErGh1(C@ zLm?joNS*s4p63sgUx=%LKS#A7e&PV~y>oeN`QkKRX2d zcHW3aX(XZ#TW$mYEklg*@9S{xHyOcv_73VEIu6VJxgf7+X6RV9F4DYeUK`+<$ouLl z04L{1p`hQ^aPZhBwB#TM-Cd&L?xo%ER{cfL3`0FA}EI~ z4zB2&f+w{5pbavbMhoN=EO^EvQv~LB=Lkx_cMGenj!JZAX1UKAwApZ{rfOwK`ES zw&NTMRI7wzRPLe`$wiFstvP7@vleDm`5aW0xk2#Wh%ZpRJBa=SOch;Zbm((C0Wgqz z%l})whm1M)nz}sc<5cSB(TYb}tjU*p>h+W5 zO*`wEvXFIsq%FD_&`w-e#F4gfE2!k~p!L3qdEol@KpI?l9Y>0Hv4fW`7AbD{%~=+k zklB|qsrkNV+{>N>@;huR8D8H;Kgo@zu1e}yy4lBi{&W?h&}q-MJ2G^dm|13Ic@9ek zgpw(`Sy;Hp42LAm!HWa+7=4@RROxj$cq}na6#4ZLZ^_OO^61tLlAtK|dPWM#W_CU4 zDi+a%<*zUv64#c6qeP9ibLy`o29lhyPw|e)jr8WIKs*o~i)+{Sn4bce9)poL)e| zu~Piu>nToYB#UULThoM}CuvTmBY4vQf zKVdwPzt5005tl)Z`*`Z&vmP8Ym!#)Lq=@eH7#vunOP&f=aPPXL!CAvXJbO0bulcLU z?az8Zw>=q4k6uy|-P|BwpU4X*pY_Ao{nhz=-X=sJjAYU!bL>gz63O~8S2Rek**DIj zrvw|Qj-ueBHTN*Tn%(a4km>nwijHg_=6|db8q26!v#igX6=+Jpoh z;Ra?crnZ4n&|F?lWNJGMO;3oZf`_a~cD{g990|b>evcOo&!pw@(lo_Y34i(Zm|f6$3jb1WgQ4ev(C$(}HzIS#W z@O$S$-bwE#9#j12gtHBJ{l#bh_5VK8ZNw_qlpkd5LV9`%iQ~%wq3WcALdmH9a85OI=lxUSd*LSedweH-dSpF)p=?7(XKf_Y4?Buv z{y>_x@g#NM5lheCorw?qnTS7%nen$qR7AS3KGFMq=g7n3mq3o*XYPBY4!xJOhK9?o z<%%V*@K(5G(Xh+|%vNp&@hPwof43Q|0UV)+Uyq>)XAjYjUTPxaYIEu}N)hmD!sscd zSfRVG*e?^41V&!m!|pa?MX^_^s8{SI>b)V8##Gu;^ZZ68IOPoUaEAs7v3*QKnm_RQ zNdYwctrBB0qmth2E#W1N=aWr+LZ{L@S)hi(sLoB zDNXDLgH92{P3X=b~)s@ez#UtEW&?&_=Z^ zZwnv)u7w6qRs)9sF)t$cI$3bqipv)F;{wv&kf7@OI6(OdFkLpSvh{=;j8?r*F6gWy zezq~fix+JnIMhRfr`K`ORnjzHt`@4K8i}H+g6Qs~Xc{@Y&-!#*D%lwxWpl$!iVW7AXk+VJ;m-id+qN%=BC&&|yEi|(Z91Q? zu`o&8LrY*c#f@fCrK+iu_&y&#U<&tqRTnN(+fR1>+Rsfk>Eh&4ZxB~!8~7%58#C;v zM~2c$sn4%?{-*vRTE1$`_z%^A(6}a&n95qv0j!J1Cq#3_0eQ4-pS;cOJ^56XNv6#b z4x&|8dcfv4&ugu#WWl+I7WFr23x96@9PF(tEh>MrvR+E6keME|k{)yi-2A;oRND0v zk+}+~+CEc})9O%R)?haFuhtgri#bjIEwiVRs@imD_74zkEh6s|t+_QXYG~x4a_lXh z9eH?7$397l4BsUXt1au}#=Pkyc?wo^em@T{4AR8vZY#0maq;iq7(q5&{vhUZ+QZ9# zRsWBp^YEwo`{TH>cecnZBa)2!`J8h;NkU4KsE`JgqNS8}h>SFZ?2Ix)!o8o*IX9z8 zB56t~M3hQp$?g_uS9vkNnMOX@;AW_WiO(0sD{(B&&Q9Z z=#gftX+qk^jJy`_BZI2W>WMSO0rj;b3LmMs zLyDhC@x3w=$itP-sqL>i$<wxjnR4jFhv#sU$!DfOEF!!w?~ zoqd;N>V*=$+;DE-pc+vV^Q?_6m`eJkSyJ62OTrHnld0HJSm{59)0*RpPbpR5chx>v z=8-Mvv4_D8$6aarxD4?9Ic0;LChN+&BOT?YK$sud z^19V!v8u#Ge8D@PWZW6YCaD>-XWt(p?@t<#H#K40V2%~hj4Hu)kFR3iOh2sm=qIaP zGKA0Y|KRgBKEy?SCCke2IeD2rEa3sTsJC@9B=no87q_aoshecT=frH#`zH^3CQQQ) zlNJfpGcA-%Kti@k$?4Cgsb1FSfhPfpYf33f*Z(?`!n5_x5D zj7~<9RyPgoAlrhK%@l+hc8B;~7ZmWxK0P6AwVAXznvnLXcktuH)iB&uiiC&-aW`Af zky}AlSn(H!4AvqAarh;N|Gj&O1wv!t%Z5&TT!O}n--VO(8A)vHTqC?TU5B1= zGKft5Q9+vJwv*U|#bi{z6>TasMj5w9X^SWM)baBVf&9v3)SCZ|vPdz58RU#0W{x*f zpmzyw=cdrPMgG9nvkM&W{tO*9E~d+>{88JVT*2QfzI4atlgRhGKKL~!1qLts4v)3Y zs7V=#=2SO|yxh8p+`!8(aQaD8W~)b)Xvf+rI9MTrs{hDf_cbg&w89Iz&2dE)2lAoU z|JnR#bB$k%EJ)G*Em(N{EeyG|0xc1_n=)Or;kvVmz@gWO%P^1THmw&IorT*l>T4c& z5+uqN8SSImJZ+~XpE8v}w zi40oa@$O1T!0mY_0kbX@#f`7!T$DZ0j#Y_v#|=bt*Ubxg$Chjbr^P80FS``gX3gNE z&ILRvQ*Ri!vJ1WZdzY$t69rD#_`>83)6v^M!JLe&16!CM!}%nPN9u29qfvR$`?cO6 zeEZ@Z&EyNgrHPAbOkcegJX*g+aL~L7j*fGNS?>-2x;qN&{yYPd2VXhAcx9aY^ao6l zUIZs8y@DGC+JJCe6uV|u3f=c-6TF)x$J2e@C7`^sk@g2kXfAdeZLI3#Iqw(kV{$+8 zlj(f=4EP3rNxGp^hJ5hqiVk(}Wii*;FG2flFQc08(CFtHS-7mjgoW`CIqGH|lw*M3HCp1hnQRv^p0aLlJ9 zdfaIfYB6t<$4NM37Y)w_Sb>p>0MIC{AaV>!KvKDhTRHLnxa_HIPA~;F@j$p;Z&mjM=RgGlCVsyp-81k#QCGQu%$8g;d>MXSPv(Bo|=u`rRAd~VI5Fp9%KH` z`#K}m$R1hzmz}736Ur)QG5!W7)Es3+=Gn$+)NRE$=H;_E-k|Rsd@3!6ZSmR;<7a$C z;tC&;cA_FvT(cD1h=~FpC!A%zZJjF8(A%TH>x#_U(FOKuWfu5t8er_7?gQD+{h5-( ziZf#lUVuL&3!!At0_MnXeUxXZg*4Rnp$oiYjPUnQv~6!E*lC?;H~e)eToWxv{m}V= z4oo=8nA8k&N-I>6zSuFSX|6@J*xRrgzCWqU`!6H$_Yb)DiTO-KA;%B>r;VDH`lH63 zH__jx2T}3Q@u(y^kcoN<;j^+(#ExEKMy*VEt5Q9o-rX2z9<`8pcf%0f->igQE~$gr zfzr&zWdX=>ss^rgD48yIS{r4ZbYhnL)MwU+&!T4ANZ>Oj@yy4Ncy5g@K-7p7u{R)0 zn8O{!`EF$%dl=O|Qn-LTqEFhHw6pNi&s(VfYAhogW=_THT;g3YxQGrsKf!!5m`hKq zwnMMgGZ631PqhDgGA-_|3#!)lz?o~Mm{T$q%$g5f;N$G8Xp5aO3ZEH_mcLT4d%rx1 zIv59lt=~DMJG}w6a-nEL=z9^Bb}b$@(nYtz7h;EkD@b=jGkoTLl#Wop!;Q55V>=(* zW1H{hf~(Fc)L~mI@ORoS=Kg|mq_ek#S$b!Ypy!JV(wuae`uX-R?cQ(COm$kwIFEIM zuF0zQy;XJ)y1k&!SWSjO`z4r6*@^V@KQAc%+AR9Zq#mHYILh zPue4JV{Rs+^YM*c^1?iNR^1)cP_YHSejLesnag4(#*;a8{|4jexQqI5)B@g_bep-> zd4lQNpvQz*zC-^sUD4JjLFjU<5{O<57+>di=*W{WG@~q;3O4s=_XS(Rm)lanzNmlb zl;sPwV}Cj5*I5DgY1uG5@N@@AyEp|+6cdGY z*UY7y{BkIhXOi^dhew#Vt8TF?%%$m-s$$HwqS#vbm0^tAm2;fYu6;;DcRi|FJB7(L z+k!HE#*lr}2?SofW|)xS&E|%UM>EeYglBbE&~~;VjMKR?^jrQT%2IA% z)?430(mR!4(Q0?bBVC-`BR*A#a=MK zZYAk9NP;74#-pBxqPv0SGpw-u1y^0PnBIC{%DyM22uGzT}j# zBh0$v0E*hJVZ)Vqq@&{z4ttab&i1at^V3sccv(6A^2^ySSy6!=jgA(mmvv(EwMsb2 zJ`bvUCg;K`u^93uKztHe&R+ytW&yV5j6Ruhb40L3@|IxH zmrBfk^@%;UGLvjC(c?Whb{22k`jsc1b5HPgmjEAHZcI9kSmLV|wRUd*cZ*z!W;JS; z(Z`B3M17(-d^LX}&wEb>w7=;__O)LZ81iS4MM!;;X0tUh?G1w7#}na&nVqa}KpPm| zYRdJ@NVWfRy9d)=A{J%Jdm>xqNY2huubEl80Gp&esa`nxiI#eN5zHLBPDZ_SsG6wO zntMI7xvP6u;pS*V@b=F9Arf0Vm$p`S1R}>%jtG9+c1e4r$}M=xFdlqmFl2rp11E$U-2=1#nUB zD#V>l+1Qnz@Z=I(lC)2iv>%irxos0k(}X_m(5H1DwL=Xsc{Q%0iyv^w9m_#sbOO zxk)on@4j3*{?G&xqi_^FbA3VSDzP{(e-ZH3nnntEx525zK=^j@d-{ha!6@`ItukDJ zi>BM)7AbSIujMW-Rys>!Z4JqK+sVw-+}#KX6{*E}c5HDdhWkr?Adgjz^hb0IeZBHR zaI*e0+Of`*TE#cQ(lIzkl9g9GFw^K-5=N)x7XEAG(tIrrq_@d}G3>#av zApfY9b~bv8phfddG^>@suOUTfo97B9_qsVZ$a*B)K4(`NcS zcnI0OpkrevRA0$Zl6B^7J31h4)^-YsZcO|1McDB-$NrlgptJauY;3 zWPdhBMUg3t9^$Q@SPA|1w8Poed~~WH5zafzhi*kRNR~?nBi}4w=p<=I!9AH5y)udG zn79T`sA)v8vLi75(@eNpAxN}2n}&X<V6^1z_OZaTW2G$XmOren-O&9yitkMcVtl2 z!29TNfjV~I4JqbSBJJp7U|-5hXtWce&l=Gvaf%J>={E;!2F20YBxjUBUqmZ)yrIrT zXJpgv3-`61h41pMYf=)V>HHl}=#kfY2mniX{)-VcV8g(p+E#GNwsEMjc`j_N*Fo|# z-h;TGOTnKj_qZ}$DN1_JRlzTXAmAh>P45kg;x{_}LgQ)zQN~F_Cf7xVn%DdiHuuF+ zQa@P!x#1Pa0nbChX04z|0oCq`d;m{IdI9&OtzdUvC(M{zAow`oM=LcLgSjS+DCteV zz)R&NQUS?2gK>P=9xqRO*opG=zfIw`KNH~Y>BT7W+jG0PFg}v6{fz{>movI?eK70U zv6@qrH{j#im$hprf8w2+tIXY1)ndNS-9Y`-lc24$5>f8`b*#47D0)4+5PvO8L=}g% z8JRO;w62*f^lGzZ%G9Np)@7wI=-vvlg8#vFg3;L_3;ppa3j#_IgeLUDN77(NB9|7S3Zr zk7+XTKBiDY#P?t28x5|snZx&$4QRt<8oYWLU#q-)1l47g(+>-`K+mOffpK^ft+wYU z@{61a)%Ko2w|r+aMh@X<`SsPXT>AtnyWvDlcdvj7Zw_#wF3RZ1H!bD_^#(oOa}f!z zZ55sfO&}EsFWK(9^}NXU=9n+b6JDR7Bv_pgM_Rl4cnkc-2?MAa>N7^TXqGkiL~w^& z7CV=yJue^;jb)@V!j?6f;KmUrC!u?QKDqQV7E7FLC)*tKYQ`H?;*I^;WQ|x5UjKEB zv^+g8^k0*PXMWs(S5Lf3?rzwF6AP~6bKzG=b%cu0_RD#)pnE&l^P{R}vh@e<#$|b` zaeOhEvC&zm_*of0$eTb$zbxfcycDtG6?ObB%ZRsg(qwX3!iN)R&Jwb-)A=ekGjXnL zJ(;EwLw-$6#8FR!adek}tk0Rk{q43VBWf#!Z4N2|&GWO!-?87^-@1!r@ZvJk+#iZN z6GCyRLN&3SSC1QYY&hNHvR2YD%EWB13z>Ggm-IS*fGQfXx)j5}gmHCd?wd!Ij3%Q$#r>ozA6uAwYE`nrJsH7HXc;N1n*6! z(#AlLUAUgT>5@V!*M)IF(vo}{IfK2dvT)Nv7wlTT6F)B6M5u;u+>Z@MiTJOt_|e;7 zx;#sR)1V4)Q_>5;itbAO4EjBJGhqX%UbIwreBM_(e^*seF1(n()^aKDbDl2vham{=)QB9a<=BIu0(J>iva0a-_0%lv5aN*XL6a9YQn-Pj?}Z2j>2>opX{4I zrW1Q@f4uw0e6qw*2AfZm5#8y=3r*JL)krjJ3F)UlNdB5EVaG%d(&Hv+XOPR`z2kP) zo(o83dn{gZXMFYw-$XVEF2)=X-1uEaRGJ>)n=(Bl=g4&O^x`<7_n83Up5vJUtqcVm z;S?Yo9(qc~8wyGB?^(oJl%wA66GKSK0DG|JCGl~{C(c^8$;rc|T+MB5-kC2}*zwdJ zwp2--$PPu3+negR-Qh8;U&L8%@Xka0T*TBloce^AtdkZoBs2ee{;zu)f{oW^vVIFB zi8CibJhZcTgD{@=X1_B#>CP14I8QTNvNm7Pb9FsA{NkP6xf*?8pQT9kdc~J)OPNUm zUf&`AoOh4|-P_3}u~ZWK`KuVQ3s`7>?} zw~K!KjlwonW!&HlXK+i@i}$KarlODR;w-A7fP%&Vc6IFHGD(f#zz#`#apeTKLj>XV z-TR96tGihtregt4D6GZeAy>IazZm}KACh##wBua;Q%ykiPR5?qW5CO219>3gnY`IH zgRDQ;T(kUA1An&cI{ODS$jRr}KG0)G2mjr)RCl2Tkg@ z)MO9rY_*%)^F@;Wt8R-gyY0gV4zI>NCc||5_L(^A$OfEGFU1R0&1?Pga|KokvXt}p zOzhtG2#g#GV^b=h1N$UX%$J1tx?dz`nIp=;X=+2U*FUI2F*!D?WhNc8ZauxVAPOEy zct~X|45tS*e_~u_ivQ;ABd6{L@>V6*a90a2@l6dF{(!y#4p^Cwlb&(F|L%UC+>23> zKe(Q~wr@Rcr{4~Cs++N^$~E{%dOtV)93;{SiUM}ElW6}wKifynmMi;E$ewvBiFY0{ z2e>|lZ6DGh<>6!YEqo(lut=Bi92Sy8Hx%$Ug@d*dsgtBoGID+j^>wv?b#ad-%X}Z-r>w22Wyv6ymlXzdw#Rir!1OjN>a)#nJs z@?BU%^C_e*Jc|91_>06l`tpls&BUE63+O?KKpeVaANeEWMnYHCk@sO0Y`Y!cBqQbu z-*lMcD{AIs-u=sDkXmNvx;>S9-*|>(7d8s6PWwP6`&9_{$-3d^zGhg`=$(kn!g1Se z9&-DVib*SPgeY%HBN{(*S(jPs?aTy=InRWf)Qs3-GPBx1cxyL}=~*IH@zYnF$gaUY zG*E+!g9@q8ZK>o>-T`hw&vao)_d0%8dIWaRUr+8j-^5>J>#(i#418d!hOqJZGHxis zfP`;dAylz)w9Bku$l=%F*w%EEB%Pi`E}GTjxu-vJ)we3h{1i`ePs3fnywj~PH|W7G zy#wUy12N%`y{*->+-HGY=3-&d@?m<_Xb(=fP)jX4xm0*cL5ZC8`$Q^b&)C^-mm{HJ zclilc##nkmEiso@6zZie5)_&lQ)iDXCSUIb2rsUVXZw8A$k{*#5@K0K4uQ*EKgY+{L`)mywXtR5d-D}Q;1zZz8~ew&p`WXK1HMjX543vPY2 z12=Db2ZC;dV~KHnU`FC{61TJ+Zys+$t}Kbh?|Wa-Esd9G@pOHnpSytX?IzAnJZvT; zc0cjsneM^|Jb%HxBo8uqViJGm9m19-NeI2DZ=@h867R_P#-H6V02C_g1%EHi<9xiX z5vP!A+{;_n+2!_=Ig2Z*!dioJN?{Q#92>l^dsG^d-TiS`sqiAMKPrKlYZSQrk|jF5 z5A95^J|JhlP87bkDihl8DJEj4v+N#wsFHu}FKRySEfHKT`^*)YuN3BKU*@}Io3nbV zYsrF})mY_-rf{^~o0NUG6?WWMFWh4{othjwn_qs)QwW!631jviBNK0Uki)Co$O~>c zDZ6UL+3ey-;=CgAVCfU0b24Xnxl4EY#)f(uJJO=kJrB1)&O&?op8 z>};#1lDuC;pg;2fDGGPy;vL#pqrY!Sbm3xhA^s9+i?kHp*9<2+ZtubWoHEIbqDict z{xo8Fx|X}pRY;eL+^a5fQ-$-pipi*E7g@i{pX}}4DdFr*|LwUT#15$faKX{!TNu(Sa%oAg;WRjb7h5W z`L632&ou_C_Oe7x`kMWcIz8M9^$E4vlFXT3j2}%u$EjYAB>VQw=9Y^ql9y#oWZTF? ze%hSN&@9Ff59Y^_Npo0y+gAbqnqz|#la$Gl=k~bw;Rq-lmA~Xmyx6pH2T9xgOx(P~ zf~T-N1DHL3OFpfz2D`qhW4r7ExPa&iRj~|-`s`11v~Ji1wRhkrpN#9xQ;JRK7>u%t?&~YeXJzx z<3eth;fb0Rz|Oh|ysXWJ?>F&y>l+>k#(2G4^|;0Ca<4$a^w=U2vPF}8d)h!MOw}nx z*C8|si zKNmlAu4K1)te{pz9pa=SPHMP-ya9KB zRhYql=l@OKc`i~ioxRS@!Fhjc@Wsh`0>cw-e5hb6_~6z=GOQv%foEQAY_lWTtk7<| zMo>-m^(umpYXnCo25?}u54lw9P9K=BM-)ms@tYrVLep|-Vh^v7(al<<<+2!fg7nxE zrxkEYgQ76FqlF}%o`Fx?dxfnxHF8}?M6>D1$s}}vHI44!ZP(+>)t? z+aJBHcDi{gNqY7QCx^A+CBq-crXVHq*5L?;2G^4>bGEX6C!+D#jxKN{BLP3LKTC3J zj*>tzSt7Gb9L`Lu11&33sXuFU**|mdg9V4)z<;Lzds;~yxec$Ow|mm)&6=Z>;U)$! z5trx-zjD}f<*Cr6M;9q1)xi~NE}-&s27TpK42;{g9TrbM%=*6Kvp#Nx_Ti<)Kw=hy z{DY3rm%%)gZjg!=*~mlhnszYelL4nLPk@h4Oog4aCy)#0xS%HfrMO>JQ; zc%YFkxK^eQ?dI)*tscd=@k|B#j=lkOm88L7u`ifVyO^#v)q(#^&Vt3O0-=2fpO++S zCi2NFf@7ZNq129A=%Dl+ylAmTTRr!J;*FHRX8pd4l-RO9dQ;;OaLuZbTX*jt*qq@eIv6j2PIe=>HD5~*G;0E${Cpf<`QRQ=oVUca1j?Gdh8jz2V1#Kcryty1tu|K% zPd}P!Z#jJ?2oHWIcr|?-=e+eB^<#}HC@Tnrw=BYOOI`z9>gtG34cWu2++cL>uPsmV zv;`QDX+jr!PrybM0n95HgC~C4lPkeP_>abH52K3eq`mTH`-K59A)XO9#4GTrn07Kiy82 zJs#to^!`JIe^=wohZKcQ`g!#8#8bF%PJ-ad++O18Ih*R3ok#!ah-TCBm*C}-wvp(m zF8JT+)8yvnz3g)1@!WRjZ^WfIAFeok3#+vz5{E!vyM`xqa9yi5+05O9gR|qw=8_2E zDj6T_=9SI%ez;2he7ww7oo$B;kJoXD`{K!Rt<@x^D4Ufx@2Xj|w-D5bN`&Jx=aIhp z@xrXDTI}Ava#-sG%N?(J0Ce6bLziKSo;ha~S(Dz)ZQrFQTgWo^scbght?~~j zXI#hq;bvUN#no>6d-@N8H_;iu zAp9Pd&|XY-T?~W|Ua|DZrlW9kmme7)l}%pe$M7}%Hj(knJ5I|s1M3PS?PTNb^G2to zvYu*9IA+8XOT8Gwn&~MxOMD!PA8iIpoQL3;izhib$lyzzyTP+pN?aw81=?qn=-OL% zamCAQYVpP%0llc1{NqJoxf)a9z1=r?9#$g8pkXgxZEdwc{XiT!E_R>vPJPBHHd6ed zigeg)uL8Dv+HuP^))Bb@$W2!>XUjj?!1uDLBG$2@V9%sxqG{fe<)iZ%r)XMGs~;AD z7d{$ze+CQQxP8OTf|qtRc@xQICvl;Khm~-6`zG>wU6<{LNk4GZGC%w71$?$hTM_Ja zZYQq>r36o|ePut|sNlyB)40GOK9OEEj-1WvBN0}{!i4eOR9=EPn0HxLh{m@Qu*9CY zR!WfQoDRGzP@7aVUt =#!z2h2+PFek?MIf&zXdC0iky>oW&f5w3vry*DLY+P?(M zfpjj{ejK+GzQq!i2Vh@?HMaeZFr4l3-}C<|D`3N<^aNbD2!@UrjSl&)A34&FLuHw1^sg^BlQp|LfQs*ojz7DN#blT>_H|0GFUWkQrv z41~ws#ffDrOU8dpAU2Q1xZ=qLTyr1~8-+a~DO*JTqnIKbFee!wZ~w-Hxd#*J?q$T< zXA9U(2JB_U1(=a7Vugc}BrHLP^XG1`cP`}+DaR6gEHi+7{aQ&}L^=Gawp+OgX6eMB z--0TP6DOSebHQAJ3R=J?BrsD}*sdr|o{9Lw;&JsjH>8!#om|1@8jJFtIj70zGZR_) zir4U9VHdC7^Z^d!$5GUa`JDZ&R`zLaE;q|{IiBHq6F!$I#$R_mp|QOSrIL_LOokmG zvvP!cE4n*Q^K!83Xif)%ee2+S<=^1MnQF`{RS@oodce5^rU3ituW`+5kexgyTAoxSE@ZR$xpgl7D7@Cv9aQ za&G=Rg$vwNsKD_v=xM)1o~>goG!%=OfQuMa_Dnzwf3AMb~U@Voxi8@_+WnlnkZgvuK$MBMB~5xf5#PrY-yg6{U_igIim^$0iVNY9iWYC+vW%0v(>v^g-_QN%Yit(y= z>c9R^UC?_FJbN`&WirSIXD)L_w)tAn>kYjWyhXlyKSQG!Nr-t{B;hk7YDq@n2Y7_Q3y2%>ymP?4D{*qOHwv$aeKdyTch_!Y6JGf%nJvK`}+k4mUxzZFjQ z%tH^_qM7ZQZ9wKsE$Dsu0vYlRuyIu=))S1Q55-Nyt>!ig)f>+w9h9Qvf+IM0VGo+K_Bh+GG!I%z0T6cE zfLWk77416t6YYLuS^N3t2Y6?VE>H5Bs9&5Dho;3#Fds9zcz)5ep!K>apMx!#2^(d& zS4&RN#~1mb%LPYK!C`Csa)$vYwm*?Fsy>P6t;bP*=+A}*nc|-`blI!?Hg?P?;fh41HET?4=?TT|MUM&uS}4>e+TW;uM5-Ue!%^& zQ|zgpeLP~PLT~g+LN}Ckpq8E-s~?jAw=BI_Ls<$?uD&YU)n##n`mASM^8Oat7BDX#DD71(M@Q&L1T*`iXyco2P(i6Z(z#xNP6Ur2b zVEuIz8O5yz6z@+T^i&sT7m7C{rT4VpPD(HPNH~MtGyON{jg&{@d`(bzzyhSuIgRF< zvGm07cWAGOF`s9h0k@7GLu)LHs$r2F=MgH0em$xZtc{33g6C@(Dd`EIDL)lhEzU;D zIiJDl^$>VJGeI!+7*eUTM6Qa8!2Gbey`<#=x-oc`{n6hPip~CmstUzmno1%(ZLJHU zL#KeUbETY~#oXHcA@`uytwP$QZvZI`KH%@slZ0W*<wI~cmk0haoCfgc*VNKWTB z|5M2*Of33`mIZ4F+D=`EC%nwyOZ|A1ey;|41nh-%&&$EA%R%VV-pz1spB;TA>`3KY z$+=L>WgR;I_!Js6GO4Y&yIZjF(pw}WsR~wK9Rx1%o?P(x5%lrcKPdOK5`9*=Zg(;; z9?JM*`-PUnAY^0(`mp*6Ne-^*_@y9ITH%Hk+P-uiOzRkd zeQ$zb_7Mpj?Kqx}575WX8go&}oq2FwKoi6lC%~<%F0k<#ZGz_45m5H@0|EN-R^VG! zgpSu-gQuTLF}>^*{!+F9e!m;Z7wYc#F`xM9=P=!sJ(hqg1xC0L^lZkqBqq$_@PCvwES~* z^eD;>YP20iFSoV9_M2rWHgr5=;bb7#a$E;Yv-^%lKAk};A6p||tG&?ZR0Y)TH$f*) z`-%Foo6&(;vrw)-g}yj+utTCs>%qZMkaMbta@~3fPEs&K3zT2bc4gjRR@4`I)v8G7 zy~%*SF-sFA&H4+zexCl{`H%Y43SyQXq3-U=g#7ELp!_bW+BfGH*Y5eYk@~KmgU*Ts zIB8B*g0_KI(C(sM?Ys6`L_YbkZj#cd^6!1<_QoHLb5=#!2|JP2EERNdzd1AGY7rdJ z9YWi^*P=7a*9ox3H87lg8Lap^jtLc3qj}4s;4D_;aj!B3+0A;$yH|9>@@r(J%O!Z4 z7LVXg%Pj&;7d_Pbs(}vk%cWgigHeBK6?BXc@krXck?MzH__KfqFUml)p~IdR=gD@fU>gRN8~V!>VI!?>rUwXO$(Td7(P zW*({ndvr1P4ZBWY8S=auvJN8+!6Cz~N^X_8>h zCo~8G{5&Xk)5FLpOP48e)~8Mc%tFy8El{<>Zq%m20or#qpoJIU-{E3(?`JvcaudZ2<0OD7%AO7V{l9t^&EyW~gOKH0YD-g7eJ6p^B^$w7B&W8iw4W6)1?d zUYCZB^@*_h&Id|-;XL~2-d^^;jTQV?_lG|qWkKr#b!L0-Ty#0YmGMaQmay)=PUB8d{TSm@&mMQvm_IHFpjzW z))J|ude3)Ou(ZY)?^VT02(2J7RB9 z-nhAp>y0+*-+X1LRlbgCKdQ_GfiQIG>rV7BryRN#s-b`VlfZ-FVH9YijXF0RLZ?4{ zpk>SM3f6p;pubJwfu(mRqw68d&|~)w+H2Atut+}^#4D!2KXtp|m&^S~t63YMF311Q zfA82aD7z?#b{`vq-@8rG;57+qHWu*9w3{fLAjxd4__|@Nz&U2(`RQ zNhp6rA1kM#f$^)+)5bMS{n8|Kcq9%c@sm)t*iou)Tn9Y6Wj%)T!nVaXh}8OI`rJK+ev?p%BfK4jS7a7b$6m*i)eUD^KCu+e?avjQD;9zhY6=jS z^Tj3?a!6@#2?^jelL1lIs=8?+XSmQth@7QJljlF&KR=bs`{r1yTD=JfyB`yEV?A!4 zjkvHx^|J8O!c1;SQ4hC#*)VBb2yt5Q5%7Cs2syt;T`2CBL*oBk}}vq#UX)_)*O;wO>v_KXA6`6845DbKg7o* zmyq)gNu;fQ99bHC74O@vD3ra_Oqb>!5c#@7gzQivt93q;8`)-yef*?_X=j{BtVb4_f_tHpN_58ek8xSd*MVm#I9W;MiT9c z;5gYR;m-P?! zcGXwG&cfgLal8%T{1Zi)KuJNZOb)AR6oI>4iU}=xM-ExsO0S->hNv8Sk3Uro;FRQj zc6Tk8von7=aN`&HkZFgvVOaM7>u>7DOH3BR@P;+m$1(vVc M8FTP8Da>g*+JTh{ z+raqh7~1B^YivC(npMnr%^q4H%BgA3B(|*2CyY9TW(Ckqc znOL=qQ*w0Y-(Don!XI+%mm?kA8ToT$b+8&J3y^0QvNM2@orLh;j*HYEMP=bNz1T@c zmxl2t-v%zK!xLyHUvUBM&(skNVXHD7nw~h4OuYj%vlSCC1a|E?l@_<_L_nmpL_2XSPr#dq7LX~x2Bziq(*Y)vGlj^cW` zexi8r0oFTlhM1HG5YM==8L7bv}pPMX}%}NQSh2D+|q&4Rt zar@^keA^=8`YvX0`R)58q*{ZP$#5Vyi<7{F7utBsDKnvy!BpXut}+roU-X?C%)p=0 z?WlEM4$;-IG5GBkTQWn#gdL|~LGpD@Q5wUhL}PU)-yrH5=WMLZde527ZE2CfQZ36! z!i_|H?57F8#-N>NyDXD9ESgR}lpUk_Pfz2=&XzU59HmKY!EW*}Iv>-+?j%Fy61jh! zqc`cC<%*AVf?umlI3+0R1x?yY68hH=`J`!NZSsCjrnT~aq@7tnP2U^$lc+S429>En z8qT!$T5IofqDUDcLm{D*Mukj8lID5RKq*OslIG!@y?2sSl8_;V$WRnAWGEEg^Z&h9 z@AbRy&doXJX6@%$&-49$KiG$jZ2J2>S(JW;WESa<(L20NNN?#ndOh4nP505D4^hT+ z@vSo=TWKBIG4LrhCi#Z0zkEn&9WCbB2ks|^!?Kw}_w=dJ0~hESw`MR`UHYjzGvZPE z(j(&GQUo*~zd|;*%2E03DU4q4D*A1rH<2jLXX&1%BfP5UO#3!3C;d!QS(k%s(4=1r zJ-`p6{v4iCUq1PZP!xORFSi;~UflQewwTSN&Au~CMJkC$%Ip9c`>yLzll*r7(%h z;+)bZ`o!&Vy7Qq6ec$61E&J~i{j7T`{qXiqY&77i@S5EkD&I+hNHi%V`u8fMep`aL zzbb={w75rmcI=~%{j(q+os%H*IyaC(CsW0D92)*;Jq7Jsj*u_IY(%aXGmyjQC}tp4 zg*iVpo%#OFPWYY;X=kY%NMjXUt(BSq;t29hqgJ@mdCZhtRqbRXR!{iw?SKLM0l1qBK@7U{*~j zuXS`%U_{=|OxDc?#@v4jZL*Jxoej*VSdVeA;e;V|b@(Z&+bv+4;(LXCmRE#3%vG5M z2ZBj(vXnXSYdM*{GM+ZCj%QjuhJ-J>=FpM-Vy>dLSXZ+(oHTo?#(cA^gy}MC$)T^| zq6JZhnew!$1qcei;-InBeMw;yV)Xw+2okJF^&LxJN z&G_W{Py8Szi+@5Yir`QBVvhm~hzLPW%hITZ0d#{9G^`QzvpudPe z62ub3@6^0D*ltp|Uy4kQOXQ#5vXFm$ias59OpR?{EKf$< z{>Y!QYde3t<$PEc>p^>`DGTa9e?#k+Z6S9}{G#7K#su`dCuDDE8J}{utb@-T`Invo zus(h_dFswm{vG-!|4^R8wAp?r=1>vx3hQ^)xWS1!((d*U%wesv1zG$ofj{5O}xD?W>y zpS$z-_Y~mRkMn${U0W~&!UVUSf0HxM#?>xZkV`6cc;U5a-bBak11RRh6Fzn66MVS4 zvp#iPpZ`^Seva;2DA2txNAEq;CGM$R1nXZ;L!y;wMAuqhfxC&4;B-U^KhQr~%1ZH0WROELT(GCG8^#X>SV zAPyD%k>T%3EhAoTzRrJSe3e?P{eSQO0CCIiuN`>FLLRS5ZCDH3s{G?iKQMvSC{+`4qq|vKw zg2eYb`9I8V@qc*7^7p>lMA%F9i?*)&OicO+1U_GoAYN%7*>PWqul+=W6lTug9}6$$ z2iUfW%-i*ZxOmQ@cy=LStDOP&KmJCnE1ZP+62JK`ZN4Y@1eI=kMY?~kNL@$H}H>5Mlq1}iw^N!2N!g_hGpHpl=75x zM)&7VWd2f-*)!gcKQb(2TA9Xrm%K>on(7MCjw!cAE=wdC(b9d)&_}T zXNE6R8LmWIcN9_A)@m`=ybsp>CXZ9o&R8=JzjRUJfg{YeeVm5c*kQ`qJb}{8G-2*k zIMF7n_R<{@ka0Vk%XIVBFnU`uL_dqC)P0fip;UaP3D>pFVhjfNG4e7?$ncLTR7+qc z6;PQks`%bSgnUU8GGHbBq2mN|J`BmR zC10!2<{!#v^-uTchn}Xw+>&+3XH5|kmbw_34?dzk{O~0i<{e}GS%JAT8Oe-%=&n9~ z<`BKDAPJYce4kRcSV2$g31e)X8iXBbtLo#oi0HzGLdN9C6ZG(FG9CGLJw14A24nCo zjo3a|E8-ZmQqMg!nMN;DCgJ8JLEbq-v_^>N=JQ3A;hTrGpfw44kLgij!vuY);XI`^ z)I;;NzfrL|0>ZVe8AiqS5{ph$(8|$oD5<}SblROZq9OYs`g(&0k7ci-_I_9@VsC#a zIx)qTlR?1er`N7C}te!ZK@&Y6@q$;x$~)}zB4c> z=A7sit-xq|o<^3geki_`rsA%iVpiOoi^rY1!LU}>X`Q^GDE9Mxr#eNS>FX$uvbVZS z9Jnvduv^CJ{$1U~RHOwl9viPRrI}HpUFk7cMcWPv-&9(6{H`t9@GF9H_S?^_(Om#{ z?{`7%qY6yJVp+Oyh8Dw})l3;2@MM}tZZW}9%EmI#nuh|2H~IT9@%l52Xfl@~P-O zn}x#-A#}9z49eni46|`l58)y8fBnCd{4G)b#RgH9K@jaxAoc*SK3D%^$7{qsWehZ~s2mu}djwI>+NF_PFlYdS42-$7Lu8!#I99xB}7D?Jg?L$3{M zWEO=zqiZbEn8lB#Fq+?&hz2T8qL$*H6xQ{fId1=exheSrE&OQ26h6|U6#hP?m6S4= zq2es`Zgg#ZdA}Ufyh%moSOosx=Wt=A=4;2{PT*)Q?iXI z9m*0O{^dol-Wo6B4Dgv72?CIseV2+FQy2QQ?WB%-PGeU33hBi@#!SemGFX-;O%cge z4B!S5XP{WOKshr*iW}>k8pQh5w%c@KcRlqZIEwkMwz47WurVFGrkl`_k6A$Es??Nebs_yGZ=2EAi8sYE|VM4!1(`4p%YTgX}cB!%6|C^ zVa0eO;l-*HO>9wSGmR2*0W}bQv(KEe{(5=qJ)czCV{c>w7y~{w3cKqsD5m5Do zRvXu1Uc`xarm60Dqb8enl0L^M%-v3GlDdX&-BPCASzSz3OD{doGKYB;Af9Ran9v<{ z3+nA9{ix{6;)POd0b^vMFABbC26r5(qyvvPFso}n!eb6==}etBboenhMz1D>`0+=P z_@N|rpJb;oVK3#Gw>opFuCYh-Ef22P`*V^W*fqk*Z+wbdWQ>8F#D&bt%oFs1#l>`h zQv|hzlSByptB4&>lL#%o3RC4^LEk;Mnkn|}r8_3)5f(`gMdey9v@qfru=z0!Rp;k1 zLK_>}@$p;6?4?`1#EJoWfguC>B#VW|{4OzZ_g$DGLvK3Sy0HFANk6f`H5uh*NKg$E zvW%#B0evXFUi9%oGF4!a!?-44LU6Z)QS8Xf_K?(JCTi5_FR~W2vTd&DDl3{Wi*aWT zrFPe?vq@!CKOAND1@anVDZqe+?3(b~g|zwD!TQ^lx#;G}Z`t7=SWpX?zd`qy`6p-7 z(^wTunY9#i;xL<%^pU5ZEwZH@cfM!bvJ03?Gd9o;LS|uGaLSnXRFACZ6)>A#|PlCO}&Id<~S)O@Hc1)08vSw1* z^g(h$;6NPz48`E0C|WW*Nz77HCK^s%Ce#~3_=_i$D4#FEM8-CKzVX61>gDJ-b#?a~ z=BZvNd20gE!E&Xdp~I0i8R3pX=la*+uGr@(qo6FDoj5_vmNlm>|53uYr1w<8gRd~J z@1m&gng=<^OQuqEQu!Vs7wX;YWytNak+i)ZqK}u%AfsO#Ls7cd#hQRq)ZL zkuIf%OD-1Y)XjQMtQ+F;`=nkFZ>HFhvmK6!Ja?+|Nuq?v-Nk;R!DTTh*-~UjsK*V`ClWK3X%y6lSRO3YY0jmPeZAnb&2T6&W!_i!2cOraWENeqb&e*Z#wntrQ))9WKVR8frF{OxIb8zHp_ zOCkJAV(F;gwdAZmMZO0o6Iw@1M*=36KI^uS|NV!M^2$C(S(eL_7T*sNAY2o!k6uix z&0a&+ohqU8s#rvXm!t4FnMVA0o{HMVyZ#UBWl7tE1o3>o6g9Utp4zLGNUE;gLmk@? zNT*5lpz05O|K0!Nl*VqhONK zBI5jl=o)Hy5b1xL1fRMVk~WhUNVTqNGJ88gTB-Pua?7VvPi5rE+lwZqU|Y-qJLO3l-NU&hBG2*xye7I^GX%kWu6S(d#_V8meo$)vywp85jkY|#Ymz# zF^mqHyO*5Ysm$+uUP+dzr7@A+6|}ce;TBB1s1`RJ_=fcCPeh{MIQjP2&1J&05SLd7H`X?ro%>jc_TC&aF(( z!Ab6^h!VO_OOw%H`O{nC&6%$2;Y?I*8a3;MH$9Xu&BP*0dUEIy9l17*IpKMRd1)>7 z&C!XXh|{Wdc`*f4oXPA!mWNv^vzOt`diX&s@GFQfA-xg_SLTu)@bad zpRTGDarWxcUAHdMJ?~^Gw=MFt-+KjS+VVEh?0yOQVw;a?QO8G0Cq|3p2HdCf?#>tX z8od)uwO}!-9WKMaEy{#NG)+nMpH~CRBbQ0*@!8oJM&Z5UY zzNaFk_L2{|Q|N@sSeo+9sa+@s;EFTTs4Yv38GK?P5&QWut@etbCG&e}mwYx|H8+#? ze0r6RVOvoHORboa0z3L)YqoHXdf*P-8? ziD71ol!oh?!Ukk1nBo|_=%Y(Hl9s4fWS*UwNls4)-Y!t*3;po?#%OD^J(kR?^NwpDP}CePsB|KXM(r)&}r+v znJq7OQg3=EfOL~Owa6exWXq4FmaM!-A73pesvc~BiroULVShSJ=VsFr)+tQ6_a#D+ zhy6GI|D7>RJ$C8#OKO8 zlrTVze(>Z2v(&YK(fM6NOzcxc4=SeFXxtDhW2+ zx<_GO-iy9D$C7hK%lNwt+WDUSL;SAZ5maM(g4n)%wSd)sk<@=RkAFm7JVVi|z;|@K zqi*r02_9|#O406`f~i6Mf(?DQ2#;wOhyo=^!OpY{(lO@}H6Yee-7=Xcko3ID@1drW z(^BTw+I+Sn`M(o{+IQ~oeU2;^%r>&3?dD4Hc8VeBE_Ec#jPP(U_6>h~#pn^83R;&d(FA(}7P zs44?Z%-;fY!z6xL#Sww(!aS&@uE&2dVNGlcRgT(nixSli?EA z>0k5)zRjsDvdSrierx@Ro@}kAOb7T>&bSr1>-#nINBtApG~GeaTF>PR?j9B#`SlI_ zbu#B~9exHQQ#KOM56uufVQUIBWkw0twN~)P;Bxw7d61}=872D8{@^D>wNvNRGEmd` z3~FTkEP>8%Pm$A0bwTr?9Xay~eMx0Kv8ZcAkstRxo7}uLnmWI#jDN_63*=BQ|L-R8 zfWK5E*eAJ{KhXTD7GFM%?;ZGqcTRmJ@xIK1SWqPpBv#6zEhlX1-@Mt*Kc^%?c*WTX zRvCV#{#a`Xo*E_!wC~@9*{iikh3azx`|ve_Z8OgD&)-VtAD!Mq-Wt;4?_SCxEEg*Y zr1awWVMXSAdsiuXk+VCH+dY-c`MaH1a^niWL86kcrDsA4+|5Z1&IqzLHRd;&U!#^f zKH?8^4AJT(~aARxG&LU@e&`t?m=Vt^TQ+zn`nmiyR-z#1upd8H8wzQ zZ-F@PkXPq(i4;tex<;iOpUZEJyh84eddSBO?~!p)H~ErIR(yMti-J|dnf%0lV?kv4 zeSYoqP~ycS4^jTwx#X>>F@jOq0|Ih?Bf0IZ1s^3?lE!w{{7;Ir1xh6=D0bMJ`b@y^ z?xc-RlMNAvfKQU>Hi3YgVO~|5-y5368iij zPQz5UTr&OUr-ndUdWFC_y@bEPMp`C0xYI&HVv2avIYnZF%~UA~2^pFH?LGhBM@hm` z!sXy@FK>T$wwudg7d9fTmzzsC{{KA=Yix_#@9{+XK3l?OCz$+{nWYw9&3H@}Ix2Js`v99zp z{yuCy*tAp&9rnCk{*58TF2&iIDz&In_bCm3P-ZKfezZ7UeJZy2wAp^noF=AvmEW$~)v<#5^= zoQN6gdrF(hFb)SvDt}6v!i*G@x z?TM(y%AED==^?BwfRBU5CJ`AUY?*?iyz|2m&^93oPa86XI}%dxvur)E zQAZK9%-6+NeF=h6JC(tBr#xCV{{xV7(d6n}n$NxR!3YxkVa`c%8>il+5?S=00-P&z z39v?%ec`zzT(ti#EAq@d?mGS@Wb8i=t>s#QBl7oIV|4*|Pmn9Gr81VM;Oz<(+o$3` zzcg_~w^w2pS6@V>0}7mh)IVs?rW5*=&%)uR^=kooIRK__uEFFUDq_NIOVH4qLL-gl zG_>&IL6qU}g_!C49NxTh+c>ZL9QYCWn5B2F1v_H%)=c# zz2~OrnbvK%w!sO-%-Dgk-lwuH4;;s2)TFu1o~ytIeO37L#4-Ho+AyBGdL9-NH_{}IyKOr^)x)tv<!s35Ez&GlD0`skR5j(pTh}S2jz@*M1kjgvA%Q99$X@|$4HZg_Uq*87g zpj;2v_x1AxH^N}vm>MrzZH%K7z7w7P@CQtPQVX|#e~64V1K`n(9H4*WF|^z+#|lCdLxXOW}RX|AhZ69~u_ zadyPrgGCwJ*w>=7aQogjycWz4uR5d#`}{YH@n|cLJlKW;cGuuHeN=#I z2@Nky;~H1KBEfoE0mdrng8N$fD5gGukllCzf6*q1+fFV(-x?dSjH^v~YnA#Kiz zJ?5}*ks3bp&=B0O)Cl6!*W(ffH!wGo99Zv{0dnorcq&;*##hXa16Rpd_)(UN+FH3R zUOU1xgyHDz{7<}h5`RIe#xA_Cu@lX__jhP!%bT5oW{-0tTPQ@D2(O42M#(o zpz!4d>_5{}*ypQ_Vd%eBF)Q!`x4btRJX!Jstk;Z(^OS3_IV&Z3_nX{tVU`Z+IlK|3 z=I+G@`pQAU{DYuOHy2Dw7Z`6H&u9CVPvf0Fwh+4CH^i^Jdj`fdmf{Txl3<6B!Q0iR z;D^2)^URS=5i-UlMLfLDiv_9l=~xtd)vT|554HOTnCG; zE&%VJoM9R7DdpUIVUGOD?!v^`TEOCJK5J`>6J9d7i&yQY!Ta=H1vYRhu=36@&e;tM zFl^lhlyGpE{k$L-ZOq!HKkKkIoHG9*{wX*WJd26OwCzfH5`Vrxz4O&ZZl_kD?MvAx z>R38q?-UFF9?Lbk+_V8)oNB^8`KO3?I3pJuyQ@K%1(dU0brZPBV>_YF&jX-dM<1o` zUJrln+KyV*b)b%1dv@@Qcf3HIU+CQvQ?%5h0GjN*3Emqh!VhhZaQNSKZkN|5D0m$L z7x?{v2hKQQ!XPX5Wp9W9+3%eEd+K0q%6;%!TMcj8xQSQl^Nn}cb_4jS`4@K>u7y2~ zhw!cJ6+_KsS2cR*c`L4e$ckN z%{Vv62t^;u;kL^-!ocnN@LYEn+|3+`poi8s3H^HyFIK{r7)+#<%?S&cKW!m%L zrSl`)PbV0T)NxI8?s6Kq^uiC?sHP%jmlhm%>BSE_=fRDJ&p3-DuLI?$4cKhm91soD zke+-X+G5`iPt{){*ftzw`dF2FW&L^XyotHE_%re0=^d`c{N?cQh!ppsggZ*18`-Oh z4}jay0`nQ24c29rLE%efWPZyTbv-WTWRz_n*ye=7T~dtJE|h~$ zGm2pQM;YjHsu_J#GfxrfofS1gtKu8r?>Mvyx*CN=g0UV z3LSt3te5z?1ZR98j*lvy=)mIb5YssG(YSiC4qRcd7#WK5w+j!1uy&sMi0#>EN*sPO zgax~ug5PsyfPG?bC_FU^J97RZ*@6(0vmP?w%Zaz9Bkh~;@pcvD{A(2L)s*5~Ia0;; z)25)==pJHLmOsz;hc28Wih{}MV$N`PIR0hXN?yvFuXy}D8X5ii1>5%A#n0E7!mieZ z@TSQeczN#!JR+kVtkX9DueTKAk;C_4=xbMS zZ_ZLwF0}%d95>+B9-GMxRyhS578G+97}Rl)wG?qxXAoos`x8@|S?sGZX3%5u3acb^ z5m#-|1Zs+mK-LqPpm%VT^>KwXC^oyodw=8-FGyPmca56iL%B5Pw9h4MjQbI-3A5u! zZm=MHe@*JSxwgSopMAmhC+^Vp&ogZOLtSj(N(}nx{oQDz<^#0cp&70JU`lQx7oh0& zJI0r9{{W|Q5?R1N68Cplj1PP$B0d-_d;LSAruI8r*Tf`k`xkfi2Z{P(j~ z_SR$Xu&~5U+~AQsprPUdlN?I%Gsl~F&WdNThdqPf?~D|Xyl4}edUz%{ZzF~NSO0Ug z2Jy!Gv$^+gHGvBaiIDGi(bQ|$o4xVSKaS3;8WiUL1iYEB;Yfe*hK_lgOkOD5Ko|DS z;Xxf)w3E{c&Cec2A1c(*VQ;a2v+N5z)6+>j-P8fz6m2IIg>ERho5q%ly74=u3*hrB z6~vpPYl-Toc3`H$aUd`~$2;`d9VJ&gp=a0m+!q&Lo4VM{K|GaNn0fyTn7exq&$-N) zJ4;xAt_>SQ$E~vBGo?x_T6PgG+IIl7Bp9GM+4|hW^8;Wt1Ms^i?O@wX6GFmXkaMM>q`HB8iEs+z4A`-W+fqD-7F7yy;raYbl%s759X24`gN_jp=#p3p&xjbl(SR?S3)Ohn| zQvp%v*1N4x=H4O@$=ZwC&Ax<=Jg~=QKDNTzPaW`qhBa#bYdsSA6`gtOd%lQXrbYW^w*WF5wvV4a2GI zLLyXo7SHg726W9k2cuRZG+>&7Y2D}W*1UX#vlo3v9po%jck~Y)S%u($`v`EGzXeVm z%rowMEPf|G{LZUa{03dil|dj=4y`lSfbrWhAosE)7#dalSOpj>Y zSAhD0PJuVB*H{i;?D6Jn6xW3NgvXkcLA7t2arqVdxE{^%SZ1*r@%3O6$3!NOxN^fl zzfVpD{kjzgHh!0fTMQpy8DE7w)`N5C!+?RYJT{$J9n*(aSZyT@t@ojx$~05^sj~37 z;xV>b?_13O%6Xix{DM#t`E$g+V4gkx7rOOsfP3m6qg%nA$b9)S;^MzB;vx4qXSA$` z*ZS}RF?;a?^ra&L(Z@oeL(^;6-Mt1e-FZA`>ll>w+Yw1`{e<3g)G$+BH4bQ7}EoPfSp2Q#V{ZVT{&@XE+{L-vgWEB%(Mm3-MCg#msUe@HU1It$JFBQ;A23yESoO zc3>_zuHMai=30nWHMyZ@v)*%RzDclWoO%T}2T$-ee$Yglr$qCnt7UPI)>oqa3GuKL zs-Q!J7V%yz3H2xo2QOAHL8dE@aQP)!@KbI!KERZM(!w=_7G^;dh#UUz8IvgDQv}R* zY~_UcD{?EfjjSCcGxkMZrvEl#({^0u-hFO~ZyA`0n%9zWfLVdReV+t!<@A7ZKn&pfcN!Iz6tG_@ z>+_~=Spd)cQpay=+yw#}!*f1m;fkqYc;vBAOlC+H2G}Ixsgaei2nz!$j(0eYt0cK@ z{`bJf{*|0XYLIh9cLQ=eY6@=5Y(lb{v8VQ?52IYKt@ktX%BiXawht&j7DB$QlLizQhiWIfcetx9ZynWycOSC3OAQE_VB#WZo;M$7pT(EY!?S zh19%C&|y6dF4SKGiI5-MMIV}A$_{hzKqVTP$GM|-Uv9w{DYe8iO)FIF4sd9ygB9Iv@cF_$sFX`_QcU`J z5pDrQh1Cd-4mNnZ#j)KY2skz|$(+H^RZ3f1xJJ80TbIDa;8K|y^$M@asM((rK zaJlQcpvck|!P*uyGP?p=S{U#~Jf@g@v$DeHy`ngwdwh}Gip4}gn-kbxJP)3T*p6|B zZ(zj2o!E^P?c8C-Cb;X?Eu4|uKuBGD$ufKO8GG!x0zJYH!AY5eWMUO5d^zhUgO`mFR{&%t5Eww6L|5|E^O)IFHLobx+|5!~dAjXvfWh0i3$ zu9X9wKMBN0EWzHZXbYWGr?HPk>T#Q#rHD@+Vgdqn21v!;VWs{%i5Dqt=DpFb=Q(3n zVaIk)JbCvZXKIiGR#+lI@QR%|u2q)A{u@j5W=A}M4&V2KPH}d1>-(3Oi2lfvw>gE< zHz^qNl0Kp(evi@dWsqz&!VyfLZkpLI16z$rR;F49*81EE&m0pG^~)#NN1kZ%E=LW) zKl2UYj1@Fm+&K#koKYk6wDuAXn$jGLPvg8Z^RtK@{%uHEeI1&&zz*79s)SvZ8t5iB zfVXd6B$~Lk0Er=F$hB%Cwu-sI-lp^ebA5e;n{y-`Y|q*Wse(s%{i-IO;jet`tLi84 z^RhAs9WX&U=k>tECYAr{|CaJle7EKpC(z>&P~V&hNAi_e9)@)4Mo=K^N`G(Zg|3PD$4BdAqG3zfZ8e_ z;-=wIq^Mnm?L0Y-&tolweg~_GU=>@!EBFLxIp+ox&t>xN4+fxr(#~kF?pu!U@Oswm z4U;f+M-9)L_yh|^w(ySB?c$!@T8duhn8D0u8C0%6o4EJ15PJ8Af{!0fk+nCM>kt_Q zx8w@&X>Zc4?`Rw{SNe@EUQCC7Z`EW%>8=YvuTVM5p`kx*6Y$a7xRbWR+ixO}EJ;A(V#pu+gXM|<4s@D|h6$${AOm<=%Z zqdjsc+rjn}xUfI8{{*KZE};BZt=wr=D?vr#ZE!Rv5=wu~#@s8Dxv9i%e8YqWnk{Pr zz1D8Pn}~AIw_*~1buR+!zwBvTF&f6M-x)aT+z{ROES~o4Z)Rv+^NIeYllX zX_W;WOZ-@Ue{wj(3OeZKvPNjCEd_4(2w46x1Rfc(o+qcT!YlEA3icR8VjbCB&bqNF znBP)cWaC%Cb{C(w1!>##V$)RM*z!z#Xf*`GM^3XEW)EwFfz%1Rn>oIa5pzwz=Y3M0EB2F!?k)4 zykCoc)8)wS36C8go5i!T=tZ9r4bWgq6g=ve51ey;fR`T);WUFST)SDfVfO|*c;ZA4 z%y^)I4SrT(k8~(tvL}i;7Fn(MHT*olj}2itx~jZ|Yfkgx(`JGnmtNz4t|Y;8TSM`^ z9@_ua|52!cvu3t(F7DC=vcIYULo78-PU>JS^z7&ClTkpO4^)BNq33Kp>1LqXA7-4P zu7)bU`*Iii@ZdKGv2Um3D)b+Af)ciKU?a8&b`&lmwmWNsSef%E=wUUi+p3G1RqVxA z>DYl;et%It>nys;Zosv?4&c$(e{hF9&%-VIGGIW^c~0N+-KLKi@pt**Tq_@CSlpDt z4e8D0v|1~pf3Dsjwrm@`{x%W~KahmK=QrS;Q{v(1ixSSa*)?FZXA;&v!gP0veI7;hl8+Y21{O zj`w-5;I@!W=-$+1qK|k0%ofDLo!neZ^%)Pp7WV>sm#xiP@J1b(@dokf;)GKN_eJb$ zlKQyJnqpLV^bZ_aS_wAZtiw;A{)(DF6mF~V9*){dk_SHC=Exd*BHC*qn6u{#dNXSX zdQ>j~JC_FI&vP=+g(VqqX>}a34Ec7=>>^UkpJ%-%fTT9KfEgfbt`)jWPJciOod^GrXXx+{SF*R~MW&N{$eFPYE1WMoU6 zba{v7Iw(UI-9M~2hi|y(PYSPX+yFZjHwXDdx8N=fR@@aa-!a?7UBt<<9FD0*9+51S zqW@Ce8Vy&~f!1%aF!1zt{N@<}7I^hNa;#G{rZz7lsl1ZzBH$AU>q$BMrX>cvO8ZI$Pk3sMiX*GzD~@)s}W=WWcQ!V-2Bvx_2G5M`ymx$ zh%PRFj1u!>QI2>Y`M_-xk`FQ?#SsAT8AnKjJnBIr9a~&GWF0X5cND%IRwV0$r9`$| z4+t|T1AeD@*u(fc$mnYw>US~bPPepUUA^Li#=By9en|+O!!mf+-(_%ppQsY~H!`6N z3`NAh2;$cl71WG3fx({{NbkHe*LO}ARR4Ja56Sh1N)9>14^b>(mh~DnbjlIiQ%=La z&zCuAL2T~BEduPDTQZt{CWw1;=qPx{3gDcupkZ6*VRkjM1b>v>$@BXej-yZ+7}4?% z916{Z{Lcv-&NO4*MwJfKxe3SnkIe>d%kp51maEZ>o+|w9o`i$sEE9TU7xj+lj2hc73+gh zq-zd-p(ze**9haqKC{4*<0E)3Dz;oZ`zf$x@)}kZeGcusIGr`E%?RsE4uf_g8XS#_ z!T(Ol#AZ6*hmzlOukGl*jYe|Ph zRWz9Dn8Z_0o^L$0(EM7VLyW_(!Q509T|jGaEL2bDk3xZ&bzm=~N4L?-E+N9J{$ z@(VqHtM1K_ms-xb_G}MI*svJn^ZSrXWDV=qx-)=rs9^1GNae7W;$kMoE$COL4uZ%? z7I*hs?DNtr?$=Olo^qlzlA*-4{y^%%O=XM`$y(ujs4ac23vnoEaA1fl&$nFnLB1=4!hQG2A9HSq1XCcXvDQck3R^Jm*Yxy)#VVLOz%tdJXaUx z6s5w@jhSF%^CVE3u!ALOueo~8t*|tBKU7NXhV$NU;^~|)U_bYV*mkL5&cDpRcrJMj zbOb7jHR3yYGhMrRzYN!apldn!>AB)yvK(jp{0Z~_>VI*@6-?&KP0kG;L-4V(4YYZh znqDoFXA7}B&WpLS=s<`jSlqIZ12|3Kt&5CF{>r(Cn;ps(Zn1zTay8+FQ|&NqhBq8+ zG=)CP4Pkb(38AtVfpx-s6t;whDi?IHXcZ^CQ+XFSY4H!aEz3YFrrpL@H6-FMx}|xa zUl+l3E6zg$xPo(VyBh0mBMHOQ6?lfCS+K=Bi`(_3hO^&D330a{0hz@fu>aCQ^kVK5 z7~b4~=P%EMm%P(BWn>3%{3F8bvbDi@L!vml@gO=B@&ex1kR*iS8A0LaVosF;m+Mkl zip46Yply=j+zypqAXj;T6Epf0Cd~$VhPH$nFcG*^>NP*4Vh@p`--h!9L9;LErWWIyV1z05Zrq(5O{?f zVuR*qv6t*q*ot*g+%-L8U@W-}llDqM$IiWES*539qfOy(VweNNTfX46^U^WRzgJ=A z?<<&=ZyRtoy^V^!*K)MBXu~V?VtjwaAbfPF6r^s`$D=L)++pK>m|(vjBnQ;;x~2S$ zH&ky2PeYR7mCG84p>Wpdruo=4b5|59FOS`rTD(vXli8^?5O*WoC zjU%r`rVeuNF^h!dPHkC0c|T(P{r|s2l0;TUX7-+v*f57$A^Ks63-0%0t?a{}quWmuv%M?kFK7c}sCYm{N6|dY#<4fv$ zaOAcE!KyjusU2-&Se2EQ@F!#ua*eOUU-lZHNUtW;J4+JlJ*ed#cT9pac_}EmCkiYs z{fDz1U(l?E7&G}E3-lH6t6 z^r}}$LY8_G+7=p!f28l^F3vAuy^8wbVw;7@0Ui;B!vIa>Hz-j%LL;*U2tZ_ zDdCXJefHv07{#=u@RO!-gRC-<>(zp7W|ri?54EH`&WD<{OpFsQ z{YZ*0QDlczJk;#V#nS7aVcSw;$edon$OpUvh3L&-o!$UR&&;{U?+7)Kw&3h@)R=$H zmUxIA0~1Wdkp#>L2NyLJ$2k8Pi-1Y^jxk zmr8}3-Lw;@K8+pEOv;)|DoiHh%NvHN#Ea8mvR1XgGf9P9S-Y9}bmRZEU2-sS?Pd6+ zwF_n?slj0_Q|N!m@oP&K;m40f`=)gYpfY|6cWbRB+R84(8;(fup=miJ+c6h?m|czb zsoS#OGEc&tiY9oJmQDM#=b44Y<-pjYZ_Hio3BbgZGtPnrT5qc%@$L`D%HQ_DCjSJ{ z+j|J_`eLvC6m3UTm!llo9_?Ban$g9K#y87X6 zW@MK!$}s&ynsx>=(_`krS+@*2vI9s`OffYt;sBmwD#Lk+miSGP-3!G5%4F9y6|%LX zk&dz3&)U_pyr=92oOV4CyB^dhlRw-vj$EM&L&}*@{b3F;zEOm1XybjZHY4Sl*+_qD z0I_1^k=E_2*fVPvnW9omQV$=3h<;^uTeH7lk;f&Jzp#*Ac8eoi-%C=-&cOFiYe4h! z8(dbt7+Q4J2}w5Tv9~R~VXQ?xGWJ*I{Y;yvhtE{exYJT(gJBKmg(%}ah0-Y6*qB@> zi$*({0*Xf}DMjpUj#|Bauag0*9HJ|cY8Uye}ji0owh&}J72VK8iLFV>YGJ3WKt&CsH zrtBGyV-H7@!}qU1_i#CS(V_|q!nMKZ)mOamWt(98nOrKlKAbhKDgw)84-wjI1fz=! z&?_AYm_Bn6uH5?>ZRp8>r;{l>A|XqD1xZ7-vof>Lf0$k!%7Ml0Ao`HZar*J&USe?K zIo@qFi>FY(aD|sF-0{<+`nCJ%iSZ?5T{ulnWNpSyza~*Po#$fprP^G|lzi5yWEHd} z{o#5kH~O*FYwqVr6}fIPm#)|(#;4B({cVSnATi4k*Hk&e*zgM+Jlw+hSF9xC&5xN} zzLP4{K&uDXI;s5IYSh_AAj+oATd7guv2NARu#+XHm;Nn$RO_`d7d8BDmJbvzOmJi_P-o6-jeIA#1QH z7aG3EveTBUGxxm0Nk*aQju+`HuKYOOd}bKrimk(rR>ROcI)NG1Hi7D(pPb;qQK%IB zCRP?bMEDUCG)tE6l{*0`lQQYu6FKHa9m~zQoG9{BQkZjLNjT(kB>hLWS@d>`rmFiV zp~a`0*z`>vDA++sl-K)=4bGkgQ#}{@QXS16k$O%JdK^VlqA7ftFNL;qr;V;CCZU(+ zS4DXtBecW9h#dK}kGEHhL3U9i+{2xQM2C;#oU$s>nrIt1ST>ssRNTZZ|Ftn1Hseiw zbW%C#&6o~Yc^Q5rhmlpL4QO-0F0dYV`7k+2GKtiYm(Az{0#z zd~4Vd*{AQ~()OQ#2d0_$$$?mQ(Z&{&3opX3Jj6o*u_1RxADf*!zksXn_a<3V7unbg z@9Y0+@ zWO|u6XGSq|7wTalk%qQ2xiI3V%^m75U`$r5LvOPMCGZs9 z>oOz=C7IMM`$DF3h9xYraYE8Zn{i=?HhNL&h(eqrk+7O@MJ5bPsu@LJ##Mnos=|hM zvuT~{!*u3~8(4qI8v5rihBhtqCAzy!vC_}iB=tnPaQyE)-1OkJup#~`{XNW_{5ka; zHm^W<`(YIB3#OFElBV+GkD_LIswkwA8|h3SR* z8o=DyXmP$dX?0`Cl971+0XYV-P04~5ZVkS~x>MS3 zF0ktzvbd^rbzatMD`osOoGF}P3r+9c@SGq6(%~{4eB8W=pk4%;{$NGftUtuw826kQ z`UuH!;Td=)(n)>Alz=_72(lE?nM=mEU{g&ns8P=$Wn&(j7RFIBS2<2Tpo#8S{}Ej% zZon0$pEMECInoBB_tX-=Nha{2u@@Hf9tEX1 z8m!DIFuu$2h9VE__p}0{Ilc{C{>qgSr$51kv#zr;O zfQmvo+!^wqrEFu&x-Tz;cOP>Y2bKbth4&e*D3hKbuS80fBCxrBBrNMZMDSDzSnFMh zmWJj6IaWYN+qB|=EuCCp{WP3wm`bR@V3J$?0|b+^__kU4WMj)B=J&Qj#wMVF^BUCQ zS(Roc&}kfWzEEPWu3tswG{;jDw#Z51ZpbNbaYvlHiu~c++Z@ z=tQA9g-Jxdb(pZO3iOPJ1JneuY>;kU&ySv!WbIrBafjw%X#A>15@x>RvcJf&t0YdL zXxkcMykH0dCf6eA4?1A|yBRC~k1K0xg@vc%koUcb?A~oT?6SN9IQwiB z%8hwI=L}kMM?Bt>inJrN?JP6i7{}=KkQ=a3iN~8hO@i57rCeyiE4H-x2nm*cXmY8# zo&2(`CLzPqMEqS9vXk>L{cG?ZTL^v%7wX^Odj6Y%nv%})dg0sX^=2c?%sqD@-%SPv zw&amlmSaeu`*=R`+5$eK{TJ2$bC7*6?$7&XmJs8+Gf6v7gPZFK@VPdIto1y=&d^vx zzO0%;sIST7vA~Zzn|OwDSkTK=oONPUd;_sf{9=%MP=>TxPO^a(0o?JC7JM^R7q?LnvH2QFKdQzaP8~}=m|g~dsf}b%Z!&2X`B-NwUqL`bJ%2K^3`=?1@QWne$?C2k zu0*C4EgvTiztmItIhG7x9=HUH-<*ec%Z0IhqPLTg))Hb-+(|p>IteXbb-}8)qwJB+ zXAmP{!j?=}#ANj+kozhucxV2AoyW%U$S4YY2bSQOqI1D#l{j-l#v3a93{g;U2bKeE z{@`6n{>QB#(j_0ocRg1m|E(FKJ%7hD8%@QKf3rMqa5$4OxJqE((=Pf;t_|sV6-tGF zdyH=Gk>kwThS7|oM7VGB7!F%nkr#z^^lIq@wt3Pb{=tK-xcE~69)HV*xO|Ni45#N~ zeJ&RE=s(55nVH0u`p2K$CXFrE*`P8Jvw^xf7x^Bo#pm*M3I9Hyd=`YjB^3+~Gzjml-DOy3c$Td@Y_e&4~LXEJF&hek-L@JFxHoyd+i zj?jf}BDnF3if;XIli8z0~T$WqX z-uQG9@3xPW-o<0oe$8a? z8RNhYOmJs+b`Ie^FI`1%sPSaS^APl=PKHgKDUH=u=aIwjT0rP?2idNl3U`g#v69$$ zXkT){v`p&}ouc`%S5hQ&4KW?-+KB_c zTo7(rB*!h0_`=!0OyG0F&Qe8pKQS|k?tw0t7e(j-b;uNk%vZ_p=a*Rkvaqb04(YB^r0#}lu`x-V*3?oB9^2j_jnzDaJFMTbo17mD(FQkrb3<#T9xkpwQ* zPGr#a16Zh?1ncDuAhamqA6s0+`tw*mH^-ioQvsaGk|N~uc2wli9pfdj3BOLw0lVL% z@qLZMthR;~F_=e42SJSGpK!B_>=-ik-)*M*a3jpoH(-B%6=Tk=h#>y#c*tG<7^WW+ z zb}?f1o6+ZgmVAfr4aQ-9KD_j7q00nrWW|A0>fe)QJoSA#M-OV^&J8D_@!U3ILhQ-) z?;q*5*a%kQ>rVc~xOzN%?HDfmyNTG{6&I-H?+2xrFW`U03ucr*B2sax<`>0X@FJ9g z-fQKO^G{EswijlwRO$}V&i+f{w3|R$&VtoD?{8Al`BLOy6wtq-Vn}j}Ccppr6r64n z1Xotcp*8Qlkud2L@{kg;TQ`Kk9OW_CKX)oG+&f5xx9A{wKR42ou0UQd*TC8$U%$F! zA1P5CK-ucK;BYLEZ@9OIPO!{?wH3jr?d3MoJaG@Ga<|co-@~ZQ%aA->Dw?IbGM8I+ z-culX`2qepEKbt?J4dQs3WX6LlDUlg0$!aOj~q>(z^TCZ%%cXJ@{gcBvePBH-8NO_2FMCy8Qw*oqqxmSKN4q>JMzbK^T~v+6v4sDe`N* z7~ZKA%zixMh%f%@Cb^S(pv<`unOswZ*(4aQx4*-l73G5Gb6-$*?nbdTUH9PO{8qH( za6H6ae1&ZNEkVrX5+1TP!$R{S=#enRWYn9K1WUs1o#UBvUMft<+bG!AS3#FCXX&5E z4f)MCjlpzZ5Z@amMoIgvfD*lNR6$=k9lk(}*IKlcXywengR`#)YwjM$IB6YM?(4_u zUM_;+XVS=cQV#7Cw-$Ll9?$!JH=t#%xbvFJt@JlvO#xrYY8>EF4An~SIpcv$?nz%d z8IxvVBL8;`zuB>gJXBs_zUtmP*rl2*c)H^XmYRD>=xHa7SdA%Y+uH(u(@O{1_WlD# z{PP_0VYewHr?!!CR+?mg!&hRvCxHLHxq_1SJ6!0>D>4gZYiB1&9*7%K5U5`xheyvx$K1If;yCWw38hyF5}i& z_u{5|Q$cNaAsLH0&~vfj|J8r$a|irIO^V6&{)$KbrNMX0ax=S!LaKkQDU+Qxfj@35 z%2ar6rK4;b0TO--BqcgXir8D`6%^?ElryI#v>prM|SOogtWQa2JlMje&!OYSC zp3YuGhG-)$Zk9Oq>)HliqI!5o-vD0D(-dEoh`|pQ2C%OcW|Gt9hscS;xlI0%QnS&3 zO(fO)4IB7AAMAK5Hho|mGmpJZqC`2hrz zv!3W2P@>~g%TRXH1n!xrtr<8hMHcLP0xFYJ$OCOvX8iZ5T=1H+{6lv&93}P@A9FfQ zPIm7zsXeNO{~o&x)+2SOV*FU%bLATI)M+%HniPpLn#b}2QEt^-SpjQ3=ShI-B+-7r zXRx?8hW+u%%*=JKF>=#TVxpGDk%N~!`LQN}c)yDfmc35l#NF<5XO{3HcC9XZ|4=_z zmQ6$ROk#Kw)2qT+`z6quYnutVQ~;WOmAGSbUz_5gP&I!h2`#6i1u1i3k+#vlJwD{N04 zi?%&*V`F+^r_2KTz3y1GO`xR{mw);C)<(E!P)%U zQlQJCzEG_ThoL4Tk6(39jGgKw1|{n4Q0I1t45V-1_O@`=LTG(Gkx>oSVx|UL@JA=hgXin3e2b4cmFBriv>P>p zS|q=d**{yGKVwu)o?R}(FHTLQau0fm7Ex?vrRNK{*r*qvmf?f4#9q*y>;JNA!ze!2 zN}V=ae1IRi|46@c-ekhOBiN={2IdmgZ|E0aHZPab{bA462FQ{>cGF>n?Jpu_B17(1+VJdP z0gofc(*cdLT();3-+D-qU%-Zw+&n*6JoE^5IB62=W;=F?WjV2&=0SS-TC%|MG5bRC z6Lq~v2XTiUFq7NHL1lXbY;tWuO>-x3nyX#8nmh6!FKL9&G;AP?LgwQ*`2YETZ_8ZN zTa?3e{E~*r>~WYUkzjV;=OlHa&WHr#o!PM7VlH*?$2HXU^a}fUs%U_| zuLWxq#z3G-7)&##XjMyjw)U&Y181J0{QMWFJ1-j|wyZRYirj_&XsW@f{fRjJkvf_3 z&x!B5co-FJY~XY*{)S;l<0MiH@%gn=J{J- zyUU%t4BCPkR-MN~^ZdBZCt;iv)5zrt6Bui`I~ZM=h0d%#OuY3EQ3uY%aLYyWw8yC?lR>5TxiSBjglJ0855ra{#e@qkE zcxWHE$@=6M1BnbE)Fw8I)$!Wcau+g^ce*Y{sQH z?0lsRcViF8pQw*ow>2`}A;(Zyy(-BwH2^G#Nit{XK)GTC`#}1s#-r!5A+UzLA`QuGIB&O9G!s-n=X^+}UH$Lk z-LV3?>AWrdH7I~syj+h1%iGArm5oBx;!GUUA1FM#hV)?&Q_G|S@l7eRJ~pY?jmhh zVoN19s+kGDbhO#Z&wt>JEd}aNAHt7`O`P`8S3)Cg9q#CoK{{Xd6N)3J@CL_UT+tO^ z7d(>S?&WA z{bE`LK6YjgSNyggM?MH9FWOg;X}`}vPWnkc!(Wm3{S+|!rq(lczA2oWi5mZOU>vKT z{tZ7hFQM}$uO?*=cT&?f{y8l z6R$+#i`T4)IG=YlWhxTNfcj;=W5d^C(({&pnpR=^{U_ zeicC_#-Zc0J`%ryrCh`Oh4A~;Ta+8Rh`0Lo7J9~vt1H7@CLS1OaK)(b&~kjnlnrN%47%DpQ(qP@M3miE)3BPn#{)u$p&Egxr4J9wQC6>8Z6 zHCSZ!P&gv;$Nc*)lc}wqWbj-97E8Y*?2I-=9+pj_oOwMPel!@!^aV&f`T_ku#*quS zG)ziNFm1493f~+ft@q_>2UrB^U|HuPoF&=CjnOJ+6MhGfkxxHOn*QD;gJ&9ujlaCP zZkh&hGiem8y*VC!C=Ce*T+BJQ$6CmJIG6upyoGN5(8gqlUxDw}p5TOCx#aKHdPvm% zO|<{=e0=N$>hR%fYz5`ZKb&)pG)n_wU!w4V=pSrptPi(E(SW=0G?gj7BMAzJTRBOQ4pjR!Q}}9*7H9PQ zI4n8v9Xl?MLLE`}I2-0Z(DrTISgE_X^2!TxH*Oo<7e5`IyjDh`Iq%{AP%YkgP7`&k z$mG12g@7N9#WuTx*ou2M1cOU_@%?piaQ_BNk`$$cGX)&?>6JH;+&_j}Hda!UgHl3L zqW!kV%eR1Omm?XM^heNEb{GwI?=-8*%;y62JizJ8deX75kXmN!O4VE#5AVDxNPvA6 z6B4lrJCyh03BmF3#X5k~p3%u%Y<5D(#)PP*n1kYVC-g9<4o^y@@vYs_xa{dBffZLu z6*h{qeKiVI<}R4lAywS6GX< zH*gQzUOY_4Z2XU|p#zBFI*es*z9$Y+^+I=_OsrRWTbQGHg?7D56N}AlFu_LyU*44} z^lQQ>F|>#IgDlwAH)Fv6auzpXbpRdpZ-8BN$DIT?eWH{SW1PW$EhoV5KT1kUn8SLP|}^p{N*sxrh^VN+wuP0KWL93#Mjuy<&~^(>dAg zQ+UaSbJTX{(~SDsx!|^EJ>HqCMXpV>hHvhx$j`Y|WVwwUmHV=sjhy{~ocX~Ki(kjV z{Y)*s{^%zbex3~jBNv$N)EDsY`WA59`4}#?kKv30Im+#f78jxUnbvIoi~i7sxVJ?P zN#^Zf%{|9*^OcQoi$ga$t$zXvI((4(KJvf%f9(8|Ybh(F4>(h}+_wkU%T6`B|GYt% z5Pg>Rc2p!OqFi8clr&ApUdKAivju(S`oyYrKjZgq38?F9!5Fa?$kE*m`MxG#JAEmf zoMOt0mP+BZKKaDYyc$Y2?BT5KL(r1W|L|%zDPD1FF0q?&6Y&p|(T${;Y|(-9P?=Bw zrA`ra>9r!Wm-f5BlBr{+)hmML)GJIv?=^Zs##FMZA_y~XYk-^zB03~yPPw`6a2IP)RA1yN&%?J#n&?Y%8o;qC4 z_{D31s_5O5XS13-|MQd*6C6OtFHGTn3SXn^U(bqI6er=J6d`&=rS!<sM&3XE zJ^J~t4nH#E$Qr>M)00{>HeVhD46Mf6l8cGwvN7f-sZJE4G9RtjT2B5fe#~VJ8{rGG zeQ=-eAZGQ;V0qCF(b?80Xy0jrcH5QE!J^El^G;d5?fpL7pEHWvni9FEW3#!7j(fS3 za}A99xnoc_G#TlgTgDr>{t*@pNuxdEenaX>QT{}KEjF0*iF^HeFZo{fhg&uQ;3VG5 z-?O)*H7dJdi&Qtd{wxW>NVHRpPNm9u{C7PF>$x@N!fGu3e!BJeqeNw1!!CRtu1amv|wu0c?KeToATs|P=2`DxFBrSGErr-56@KifnGu28!DpMwt z+a(hGr+HT>eXTU=R{Jr?Upa&SRQ!q&Uw90!{*wkl8kl^mddcniB4Bsd*P(^mMY)o{ zpJD5x=cp@bAKYy@h>L}@p=2ma;JEe}75B=R)r)%oZ@k6vB}ogoop%;}z0i+7{4m2y zV^46x%=>WWwjb_lW5~2A4iK98ln#qfWLi{>VC{xNdgS6W+TrXN{%mYLzPKh(#BKX3 zj0@I>Q8O)yn=^)CEK5n0_I~nD{|+uuZ5K`)AC5ZWyI3YCg+1yQ0eL#|NY5^u?$;d4 z>0It1^+EIK^QN+VpIxH%DSh3T!KsI+`3$a}C#5({q zg+nC#p)>#8JB_kUG2@Pp+s?l`c$>ug(nKag8JzM?!WgH^q8WW1*6Y@ClKM!3^q1HW zTjy0=wd+YLS+SO@mEF$NRb9cDT?^6Em!lbLuCb2q+u2FeF5qO!2FC`e5Pjh!e4)GX zfAt?R^cpRRk7JxHtMEn-12Ea7EDT=dMEQ)YW1Q-e$>Q)7EU{@3J^aoC62B>!4m`L- zp5>-8k=yHmscV8S-TR5aPlrTKxev3=V!=qKh7VgefcI{v`CBjCNPTT5cWkHy&C96A z7OJ7VUZNgfuzLqKm_Gp@aL-|{pRp%XLx4nwpP|1`E;kE-LAY^>W`8`afS4T=`>OQ^ zt(#p;^8O9uS1kr)j==ulUAF zJjg+ttQA3;c?Rz;pNCEnE!k+Y4(t84mz;g-gzK_Y@oeT6#5mgs4(({fA1k*(ug87d z@?3QGJBRSnkMqze+kaehwI+$_d&U)d2Vsqa6k!^C2^wPs%X{h>h2y+HuX#6`mAZz$ zul`vyPZU88nBl|48vGmJa-t&8JA6;N>0EhadBBCOf?0U5MrkZETNsQJgQa#y~W z1NSBz>?3N>V$D3R26e+~V-qyxxgscAza_JTh@Nvw2#(Wb(f2K{plm@mp73WjvR6NX z1Sl5LCmq9!eDYaE@#})1^INgB=)byC3`L%zwD4%ta!#i#mZ&ZH!#OUP3%!0WsMqfw zDN?%vc3%65#4u-S)ewx~e;ejDxov}&se#04%UNoIbRhMyZxKANtRi{Df!!iC zgkQM+z@ty&p|gHBTER%O@2-kr=};@O#B?6KbZ|uHc4gw}Pnz(xZAbB@C5r^n4yBZ> z*lw2F=?3SwxT28nzpb*D^4(sRR+wh&cq^_pPY3~fZeg4WR)mq_R2Sq>e{%3D}83jM{KhqLvg2J zqNb>(5+tCX-vO#GkFY(Z8iWen4-+qlH23Xek>v>uDt*2!_f|B`b+b_mk5-NaQ6w4# z&huhbq}FmXLf;{BF}8{mKWe3Xu$oKFdaM-Ynk-)%d`u&B7?puc-rA@FxXi~a*_$W@KD1|D+)vk zTP+>4s~u1Oo5pRC(}#!oIb{F&0Mhq*1YWjp=fCF+0vD&q45TG8e@jz2N!xFvD!Yk! zxAGOfdHfaKt|B1P&EZt&#RRl{yA&6vDe@{610a0P1_+Zf%hP#>Y-78>|+oQxRyNBO;;0rQ)YlPO`ju7PxC6LjKsn~kw zO_;Vcg6vwb0JbDgW|#c8#H9K9Ah*;!g5GjpiL|ZhA>XUyv43%wf5=BG%IzgJ$ey$)i~hxz$fNc=XVc z7krDNYyD3`(~}M8s{>E?Xk8I|DGB{#JGldK@4#u|DDHbcmb)S~~VTq7|%UYZ*JOF(_|ubBl)$)fOt z`!J)XigZf8r&ONQQLE!NL70pVfBWNirhZd8mZ-A>xnmz-`-LR#Uhh+8vC{>#{zWv& zHYtX_xC9hiB@Q!R9meg{Qyi0RBiNWdL~YN|V!N7iV8G@A+Bw@4I=xPz&pR$6g$-}f zqhEKptK$=3+9Q2@^TR~4Nlp?b^p0b)^;MZTfjK)#|Um)7^~3xg-oV@0`vOzc{AjyvUlogL_yOI-f+nOI*)C(pB! zi29`We9MXFFi#~<;QYQE8w<|}WozED2e1F)F3tDjw?4l}oyl!wb{L$58_n18T9p9e z9G(H|;%*RgT9cQUT}$n4xWhiPoy+gEk0KwoPayFz6Cg@`72I4}4*%KxU}P-x$u_G; z;BLQw-0Yjo)wgV>+?298#@mQ_kmHS=e+nU>^C&v)6UPR2Pv>;3l5nuYH}t~b9bE2g zM%}M3{ICAUZKKh*o#k}rfoPniqXozIT{qh@R*L#^rG$QSb}Lz!lZDUDGoTYf4B*t< zYXY5z$H)zXE=GSt8GJb(0WVJ*6UUt$5Zo9A_WKJ!_dyJwR}g_q+hq8e7qtnWaezBw zm5Yo$Jz>wJIW8mcbo8UI)9(kee0RKhE<2f%!;pm&U^m12Y$m;vdjf{=PO`=&? zm$j`#+sTexT4c=c?JWm|!S}S2lr5v!K9f^J9tc|Zv97Ae<t+k~ z&&}XU4$h$uyM71T#l>Ws>uFTrcol2xf6O+XN#aiM6F85o5XSrt1Jwz#Xm{de(x$Xc zc)dfLTi*Txl7!R2(ykHZFZjqcYlsoHwvkh*)q%e&75LDG|7c3X2^^-Vqtj`BKzoxo z{2kehE-cUBg3Bu*>h5W*?RfXNF|0LLlZsf=P13Ve-&!F`2ZZfj_k&3!aUf5(r9HVjoj! zYFKX^cSGws7t=PA*G;@n#e1hRGj>ITBAbUlBRkSmnhK>6@#LrYCo-`rjqA!?}&VF&KdfbH&9_RHfj6U4qhOa(V zg$?;@%rcIQr?!pzk4{G-O{iuC&gjynXZ{$%E?)J54C%!r&MAmlbvqX7%mGY|zk`xw zD127t!NTe|RLZX7EzUaQ+L3Q0Yveu1hiY&(fg$L>rkgl%6!UV-0LkxaK@aCk;0gJn zGk41%^i6LCtF>A5>6faaKIu42o!igEUR(lyu6<;@zciP z@ECL4jDO zWun#NRG3dOt2w>X3;91kTxgY^6a3_+0DU&gkhI*0#w)M}%tHj#-&5x9x_ls?3T6mi ziN5a{#)MB^cg%eI(lFAt%+PGdeh(14^jN6$Hi=`s5>Vu2CG#HvTWPJ68(6t+A<2_- zgUQ`;{7PqYvgeT>Kc%{spI9N%Z)Qqx{@;7})3>Jc)Po(2*p!4=yGPvqRVq zZ!Z!j!%*TRI%|j7A@*F13a#g5hEi*qn9S4Suwv46SQ;RQTdpW`epEXbc#eXG$C_9d z){}290`b%{q71v}*RbCcc`GC{RqX~KMan@=UtL&X9!vFF*)vuxuSsbgz{c96w4_-K zOnUj%bc>!L?;c^#4(Yg(s~URbaKuAmaUhVi%rPWQ(Uzpr#K_$G3Iijv&+r-1jpW47 zQ7%+c8%Hj1gFSuUd8#9eH=r6Z_D#gesv2DRqk6LARuN&i26~;grZ7BBkIZwZV9yWm zL{|ibUX8UQ#}~&3PQ6_KE&}G z;`pzPf&BDK&q(U`jr>jN5wKYmY zu)NM}Fq}Z}tCXYUTh2}XZ|Y_Aa{VMkNd*!0*x-0jX@!?BAxP!JX*8*55D6$k3U~9i~eym71U;&LGRQT z6DzMNuyK6~+Sz7}Rz+Qc3BNvIFP$Ik;@0CPx7%B=r-X5hCpq|T3{hm*@`$($xKJF^fwzL?9WT^&dDDZIy*{_0Tn;j+wQ zlQ7Z`Ky+tsz+<1K3I!%Xs58BQ^_R(HwP*{tKS2`NswL5>msGii;tNFU)lz!2<0YA* zenxK`9RkB*wWwPq0;jZBac_ULvr`&ok&baw1>s8ZWZvFFGOO(o|9eaiR1Lfq{P@|2 z-#p$%9iKCS%Ubk^tG&0Cf0+?XZTY9b4h&_(wYMkm=BfY^ZQ=qos@0?+L7PwG;;2!T z37ks1CI2ixgRH&y4QdZb!1`<#2z_t}W-a)`)c(;VBkW^n`=?DZ9=mh?=Z{bgNhdi~ zyG6{&<`C?|TR@YU5&Gm8&t8}{k*m>lz&C$ltW!J!j%I@>eM`ds?*GoegnjY+e$5!XA?i1|<1v-UITUfLPMku+v1xc_^&&n-AjT_pXyAky zrO3OgnEiP|kt95pCAUXwXxKDhw(?*lPyz||%Yz6wojs9_O{17ojmLIFJihzz3GglE$eL%v;ZCF!f12_e~)m&(r@(8hqMGZ@x7-r=`d@_vFA+w=#O- zw^@u!9LM!eP9wz-%LpzW$A(HrX^(e}Q1)jQ)jZ}9f(`Fkm+t*Y>53L?82=YX)>VPw zyft*Qk`?>i=_OIzXN!{4jIigjyRdOgj7f`ZBU&lEghvw$(Z%Hk!8mvx@pJEkf$oi@OpF)xI2?jg>ae+nr%6>%1a zo&Z^s%U$>E#}Uu|k({4EOB|gCh2fLY{IUIT?)XCtU31W+z!jX|>?Fw9lZiiNr?VP; z1A-u*LpX3tJiM24BrO4hX37TZxmLpv^6UCLR)2#SB)Ej4WhOVsJIWOVr*@E^OL^1O zgw-gq;)Gf2j3Np_IOeiPTZsNIo>4VXocUgom2`;15qyVOpmO zhpsm;kB?Jmu_8-;-&hJJB}dQ`!>jn?umWyKT9RtRb%HowS*^Uy-?x1 zC%7|y8B$Y-M~!?ml8$cUay_hJ-`B^;cgcOxEZccJJD{6Z-*}Bq$@zqLcI~GX?`YHN zMtjNAb`vb~>peMps8qOIu^Ou--4h-hTTOp*SxiFHUxQJth*LG|h){}t#GUZD!mQ3U zVw+o3;N{v%&UZsFz2u`j_r-Y=sj|FFbx3ED1#1U&K8w~q#VzYc9=Dd@J2FSPe)W9T zPxl+>d?_~>b7&q(Th9=!Ct18>UozyFiV1#8d*RpnZwjaHzRYGw6^qyk`n=8771Wc^ zP{vSs3Ail}!cxH$=`rzv52gO1ENC_PuhNIQ6LyfTEBj1dFEu6qG|s@=8!dR_YE7{E zC*pb^xXC>8c?ee*9|h0v(qti*#9pz{rFtZIuE=wc{#N(_<%(Uwrn}|P&QLS<4mZpy z-gL&_{*R+ zhD6bvAwv?Cl2n>UrKG;+_xb&OpT~0_&t2!Nz1MraUc-_xz8eOkvor8S%m3&9hYvV` zMtr!*95{}M>F;O6Ng|aT-f>1!H>ZGkcR-2CSWF?o6SbIeyK>0iyDC-}kvVPIoxqy6 zdB7k$fuU*B1q0gxAg#v=qQ*`J^Sk5t@O3-pcXw< z7E2~*90XIFK>BRF9xNHyffFCcg0bi}^Uu7II0m<)$)7orbT(Odo*bdQm%c#PB4<9f z`US}3ePGP>eApQyiV+j=f_i#Zux3k#gTwVsW?jWlx+KF%KvGv?zk~0%`bk;%-25qU zQT{ZXFj1i;-_n@O*`eIzW14(*oS4+KIFrwZhf;$QEXsf9OT@;rU|7q3k}hztZm+z` z7tZ>HM~oEVHoG0PXqgfQ>kzU~N0II`52w23j-WNOicK$y7g;2WuuOIcv(rqM=FGT7 zug7#^HQi=1Wvl~wc)1D$qUrPg4??Hom zE^RxYW|dqQgymxwi9Pgs(Re6@*KKL^PG^I3x5XOiou}^b)I*Q2+rF5KXul`y*O`MT zqXxJ!)A1K$A@6mjAG>G=(4!}#;qd8jY+Y4D9&U^#R-5yP#Wokw1(i~%{fPBkb7Cwg z?2ExGUx+}3q~PsY8_2v%7G%e!Be=Y9H$;ajkPrts+Ai)Q=AAzo+etd?!5fpH^3^V8 zlCb|jaCQ-`wMizrKg{_A<;Rjkxu3{J{kxL1ylN&?l0skl7*g9jhPYNAl7#&jKrU3@ z;NDw0at$-qLYdEV^!dzI=HrSJsQ61dy&J91%(47OJy$F;-Pq#+z7eWq>H7}SGba>f z=bE5hX%p!4o9itXj*F*DPW#f9v7LOG@l|kavbQ>M@EG}XZokyP*A6+9e?k{_yYO#= zPD;imI9Lm*ai0H0+7vS}|Ika7&7HxjLSDKO^gofvtvRBe4lgNo0Phc!}wAK zPf{s2gg-QS2(@utfy|es<0kLB?JyZt?n)ej z)rI=(xg8tnyPQJl(80E(!h1RjdZ$JTqsxGQ{Q-WvuBM{kV#YA|5jRGAJs;m=M4Dty zl5gX@=)_@VqARzq5Z>oFG%B4U(`Cx3c!i<$G~pR^)Mpn?Y%im82am>|_N^xytA)&c zxa4V#qtr8c!ziBf*99L+4KXL6%x~&|LGM zY%e_rH(x4}ya;Q))LmctG8)|NWo)dv6g*=W=(U901)8mtP_r&CQH(s0)o zl8)LQlsndfZ~UN+U;jM>*)zW4E0JD&-17l6?~ah$|EnzCMAgWTz~K_F{q}g0uL3ow z`A5fg>oUtd6dB&75%g3y@Dut?xFyB9uxech^xZe2v41S_ap&#a;H)nE_Gv9u{rVeH zcHhTS{VZYR)o$XSWdhP=cSJIph;i_{$_?H40_vT9V0~v_xH?*nJj>WhDz8|OoHwKK z=~wsQ?UcEsbCv_$ZoCL=Z?9&VpF3Eev{0Df`;w`=P{$~2RNzn8JtlPGLH_X)V`)~) zcqlXbCHbnN!fq?_>scXi4JmJA8feCTeE=Q?&-$N@0aCtWO~6@TN~yw1$5OB!H4i_ z5Pz|MJ-@(k2xAiCfZ`X#^6KMDXl=PYeeB4A&g@Lsba^oSs~*OMrW~Z!SB>c_oJ8e+ zRiVKzFH6NEA0lbi3Rdzailjt%f#c~C9H7<2`J_i8uOV;9CGlReXo@`zkJKc;brt_R z|GUh-gWsABWGD1D5!*ij@bu_JiTr0TY4hj5%ww(d^!~0VWJk{gCf3Ir_PuQED{H_w&6YcN zXD&Pc(ph@_{WuW+P^6ivdOT@L0>!OPB#AesrPqN?7`a+F4-Cb#$7n&kxh22#$58(J zgJ3E@n)1qPFTf__DyDIqBm2TU46RD6qAru;*^$lVWV1mUv+A)5ov<%hT6MzI4P$sZF`q);Q@wiJO|86}x zEvIf1u~rM$nGmMQC}QG9(?<<9>BHeQUPzNc3ccUO7dYEic2a8VUmRex9_`{OC&NCn?`j3Q0i zGJ?$RkC%)a&*L5T<2j?@{#;SzG>BlnAcIC%W|NTFxxxif^iqkbTX>Ux_!wf66S@LA z0xPlE-B7ZahNHJiIb4*}5IVGHr=@iKa{AhP63w1h#NQi{EATh7MK>EF$bxj@1tF1pG9iCOgHm7<(Y2-N)VKsp&{Pu_F{%GnK|A$UEvPasMd!GC2r^QF` z^XR29LbfLJF_8<9z_bm4@ag?ycAi*?23GSiYnXHs2eR+|A98E= zcCn~pfi$fwg$W2}=Rqp_JHG``&jYrZ{1Dl@J4y|z!Op;D{BrS8X5u99k_P`PbaF&8Jz=g$=Z*{JZ@6pIwA}%$zGEKSuj7Mu zeNg9n{ReSJzds;aUlEJ!ZRug%taAB6N*sqK`+XgN(NYclHyuifFryN}k0s1-GneR+b{)Ja9OvI43wsU)6B` ztZtyhuqnvyP7!M$68bd9TKu|0g(rU_C3&0nAgfYLYc}Y^_~=%Abb}P@GH$fsr#3!q z83`-yZ{}~_X<&99s{yswJ%gO=-6&dVD7SL| zPF(JrO84xj0)-icc=x7WQm4^OZY}voP-lh6Hf00T`QbYEujwcx#AM+$$p>J&rxIDy zGKd7PyMX&fJx6ncGoiKJn9LSt_^+Kuz@^Re*q3N48xWliyAo@d`+7ypNY^&HCntae zIZoq$Svg5Zj~fkH<8DX-r!s7;K{q`xU@?vAovDc{Td?MV!1gvGihx1Da*R7 z68hpv99@_<%({862KC7?6X$F^PwE;5NTVDI(Xu9jTbv{KF^)r+<8S`4CT|KM_pc^Y zwZ_te7sk+Li$65>DDb-DF%xZ6!QK3_ftR}&O?93c(S3*Q;BIpW_*!aGvktZ4kMCtE zvHOdYb-(>r|7{n}BZK>;O!BjnmN^ z=}9+t`Xj4n3+VdUj*uL*ikP+_{G5b1T8z~p404Hkc?dZ?FV#M7bpH(~h7zI90V z%?i-B9!?rm*YmG_%F*E3VSInPwbiw<2RQeGjO3j5dK{)Ncuwt9`LZ>tOk-1mbgs%Z zSQi|~Q&l;xKl25F;vA@Um?StN0+CO&8FzpABkVb^m1=tppwszYd{HzRc00zBL)UD< z*QQ96Fi(XkV-vWLG6mY-JD7xDJp|Pr3dADqDPFMpCzgHi1@+1Og6|fGh(o}7n(E{R z(=N_nWv6ds2j4sfkKd&;<$K$hw^j;#@~mQ#vM7r8$r&QG5qKC6{%J_heHO7!sj|FJ zMmWuD$Rhp&sw8{G*4X^iKyK91T25_o5vVk5z~4`oFe7W`ppk!-`CQ#7#=dn5AK(yg zy8GuhnDvt@D*D0kDL9p-H{2D3dE==)L)>ZUc4cd7T}jbv_1nKjki zblqe8p^{s4=k?XJqfP`J-PhrYry4C@DdtkX@1)182^ELs(hvh9bWb^5+VMvP$2qKF zr<)!n`}}=i-pngFTe+E=RJ0Do$;!din=)jg$tddPEGG50RR8P$>rX0R`c#E&$-PgS zt9OC!`#0in^CQw9C-a$ao=G(SqdXjV@62Gv7EG2zSjqjqM>jlL%&9tS)5Dvd!uU@I zg|iFL`n+0Lba((|_B`dQW17jmF;0B?-vGL1eI~N%evgMnsX+3i4Bnfa!RPDjBEuU! z$XO^e9P2$R7y;X%f4egGYvy0Lwr(01U}De4L}t@y$B8h|#)RJS zRp+5U25LNJlQ1)mE?H&BKFr(?bMj_lS?vUJvt}IMC1b+JRvn`gzX+thTnVj%5@!6k zD7L0B9XVW^%%8h)kIfwZiY(apoAG;anD(`HOJ4wO}_a@$#iJ zhRd?{NvAl`=l%Ss?i<*;;1jvD!INUcxt1fm?MTt~#c=lSKRmlUkD8R}Tjwi}BrtzJ z4zs>br{5TYkJ_k^%EG~PZdV!ISeF447Ws1>)w4wBy*~(k?Mmi^;w(BLitsIGfXr(i z2%=5B+?&le(4E_5=wZuAwl}>%U@UrI=?4eCt9q=oXH`0qn?8;v4g@H?z!MwK;h5_= zCCpErqN?01STHu0m)#o63`g}~v8)OQjE)q%gQ3tCWl6rdXQLu^1-)?D0D}9Lpf!eB zqM|5u*r+gzn#p{l4_+UVENaU`@-dTngZHs0Yw=QWReX$FM{VYR?x}=Bn+EW&XWX?i z&>uveO#L9f`BiYGC6A^3tRi3UWX?P!snWR9cVW+r0AAh5h8y+t9>Hs)p|Q!52Bnpw zyH{+us(lH#>tz|0_0y(r_&QvaxETV>kC0MZU2s+%Xw`q?gH&(m8t#Y6Jt&q8C2y-d zpxZ@CuFV-tMx_*Eg9|G#bD|f9Kio(Dl})3=T1`NEmzZ@i|H!C1od>m_mCUz{66U&> z9DlezoAeJ1fR|UFcA&fr1dgEG$H#rS6S##{NR7S1$08v5Jg(tR}Zr zrW}r%8o-zH7pPQSTVRr_^AkR7;T`_nl^QXv-29$kKBVv(H8HiLA72cE@m3k2EAXtE zbS84W4J&E>Fg?2DY9@_AG01Ucp49uNS{gHj^^nacTAO^q`uk^bliP9WSG_w-*1jO>vHA;bB&eK{d@GN?jUZv(r~IV?JDp_fwY7SpcR5g&t=>*a4c-%laDLHf^kdv z!FvPf%!P+hFk6Nvo%=~Xk4)m7bSUrnB7iLO@*`8*3b;Y>PBiiKa@yH6imjVzBx#Di z2NxUkxYC#h@K(W)dzWs`E{aN_KPGF#@JVVk&RK@f=!=5_!x@5S#+aUZslX1i3xoq< zEAhmTL~@a{;vEb|^T)n>P=)bJ_~v^LAkOGMqque@yYbEr)VA|8<+YO7?vo!#w^JHp zz1@&1?p`C6&pd_uE;MsT&7!fvvMq4%!a?}@Nr7f;3T2dh_i-C1kK|v~5g7CH;7~_bnkK^^|K&yoFwkH%2pzjXS89g(07(_co;>Tm}BD?D~pdsun;H_Ppzh0AQ_|3Cq3hA@s6Rp~klW>3K5ApU_a`@<}Jh(Ie9Bm}I z(hp~Xq#BL8;ZM2-zjbyv>u;4sZYhj~>8&;3mb(L;7}&?YUws;HaoauW87SO{W8v zm@~RxzSGR*cSh&a4M4uNVTa>6M9w||*ju_w$9@98O|qXRYsgYFA^&=J?KIeTP+(q9>SPbCG^L)smC$T8mM*_r!j1g8 zMCvv(1pPA6WwSCUF<5O3M`z5$(ph`B)P5!OL^FVRDr%5xM4le=zl6iK#s1g-KN1{K z>$L~4wYrDNoslZgA?GSSo7yblKZi5vZ9C~A+wDZT;Ez8b9fxtI!cCbI;Ks>^|MIGVq%5d3q(HFi4PR& zclEuY#OJ08iH}>${hl_6ModwlgD)RqxLQB)mJ=5tNK(Y+Pw)hZ_y^0Lyv6*56;yh^ zm+*O)VN2LOx_hz%oCxc|eZkG3b32DIG*kuixpgRHd@gan^M)oZzfMg$2vuCOhwpS7 zL5~c0%dB%o#D>GP_&X7Y5F{zNjQeAykv20vkK19ljFhY-9@#OSMr0qriQ`@2@evoktlFHBD4c<#h9US~zdg0foC$WW$+*R4HGX5&4PlF0 zNtDSeZc6?K$z#qKzNCtXdSJVHsy@tWc4pV z%##YdQbyRB@QKBm8+wRb&P-xwbOBdSiUjXDTBJahpkt_~xomMnst`EK~>wF{j3 zvZY8dEtyo6=abL}ZuDbLH6F1h|G)b8lJCJ2B{uB$-vvb9bvY#b^%Bn(i>1v4dTjBo z>$IEqA{vg{n1WM*@N#5}XjptIb)1{bsy?lRoaJFK;LIf2@BRX&xF3Tl4t3CSx{P-^ zdzqx~V);jp#nh`K5G`=Ojt5>h1a?s(KlnQ3qtEG(Rr{pGW?TrjBWMII(Vj$SO`pw% z9=RpHW;L2Bl!tLU?jMA~2Fl#bi)yTUULx)J(??GGzJupChVx5Ko`i&Jdr8GMIhr+0 ziG=_wI3 zh0IMn#4PEJ2P@5uQaMkK1ReOs?a+w8l4}j*!Vg=R(-H?hg-*;MJ#DURWHv3_dIG;& z_YzMGKMN>0({kx%U!tt{oEUhE$SFSss``QQ{UvEQ$dC++S1@yk z0gX~s0%Q5>EW1N$B|R^XH}4q7WTzLyPS3S;)B$h2F!BN^4wOS*ls6;0qfuzjp#b)7 zbPefP)r&)at)n-7^-HGY%c8hRf{Wvf9PFK8gCi0$P+(3joZojFrNs4-V|y>riBEe8eH0!3&b=ho%~oZ8)9;wqG{H0+`O}nIFA?n1xrle z=3rM`l@g#9bG}bHU z@cLiEp+z@h6D}(lK&XcH` zTqTbuua!iE7ck7W$+UN25~vAG)MI%!CE<_v;^+_y&heQ#cWUKO$X>h*9q|~$%)Tu~ zWla)Vm>wgIbqk`mHGT}4XDU7?93Okj@T$i9{>tzt^Y=Tk0hFi}4twZ(9KMI`M zaDLJ4aZr-0XBn_wjbu$O5;u-g#TP%iA(fdQ=*hPSnJc@z*wDob;68JXj6XP-o_&=F zI-@vRb?7*~bY`qHJT96W!?w}XO|tZl>q@AJEhVp3SCFd3_8@yahWlz(4PU34!MnO4 zG|4v}b$#HaDiwp!{Jt{geMLD|9N|Su=NSpE>e1Zo(s=Zub|z8YB1eLyM__rnHLm4M z|EvFLn?9i@OY)i6XPTtyvm#_E8AyJGJ(l#`y3K5;`vJ2*$q@NL#>@lnXre5iAxb=` zOwW1kVg1wRL120#iFG><51mFp+wT`-n42w>wCnI6t;|VB!9}{kFA4mOhxCx?=B4|AF>@_VaFxr+twVSJ^BPvF}+BZPTM5B{~hR* z?Fq2siIhJ&p%4b2dBC{K?_~2D!q7pP0$R2^ja6!|CK-0AjQGPB_@)sgm5WTkX+4)X zR2GU8?5D%7G2>xd-hEi`FNKL?BDmnG?`Zt0op@)5C0Wz@9=7OLTGWqGAvY((qk-&MGX?>XH=sg7U=m~c2OoiZvsWfq=J}4hKgxmDYAWCzi@<@wG|G zsKY)JWYr5uY<@Yn(qWXSa>Ge->TL-;%W) z{Gid_!oXun1U+#2w!5JYGUFUDt*a&ImhzY673=p#%Ifc!+H`R>9?|4~gEWm&{VX z`^=A1GSD|6h?!XL&2+0Bp<_#?kj>|Y@Wn_*`a3(DWK}CjZ}webo{U{X3tLoaeycGF zm_j6NMGXFAI)}^Lw~X^H7zMY>pP-|SVT^d6B3jvSfR4&L1$4<$<^UQY+53wJ}2XL;AJIj~czYNgh8jI21QDjo7!8@Wxc zL2nofzI^!wsejIO)~jnJxXoNou4{1Asc->oiaSQV9)G5b!Y@fH9ZNVb`=NZ*?8Vd| zq6LPIdrZFH7G8C$e1YEo$lg}?1A1M#@ViEddTd*Xe%x@DX08cCE@nFHge-INPeuVA zI#0zOE^E1fTdHXP&S~U)Kj95%i8A*!XAZ=-d}Eg#|H*9s1GLz!k&HDs4sy&Xx~yan{Fw0` zSC6j%Ua0NK>wc2F(1Yksgcq6c`aJb;JVqZIXb6s{(fs?4Iv7>e#9Wq3WRDg_qpz3G z(`cnDZ0_2pWYU^4=G3!x_;5Et+TpK2a$XET2b#~~8V`Z#H+2$h&#M9VLBfLMwe{T4 zg9G^bD}7idf+r)kyoL~qSt8$kj^vG&oRAf=B+oSMg>gnC-(33`KPecBH~UVed3sOK zgylf=jyJ-asAY6)geC;t{>TQ7YPa09Qw3k#o6Rio`2m}+Cek;;Y(ASI4b55iMFdC5GHP zmJFr8z38?#8j{az)}qqR5V@PvN>dRp=2i}im$~Q zHPPtq^Dr3V6G%7jt&%oO_mhswkizcQU+6CR(cGNV7l^8@EQIFgz=X?#vE`o*_U^JQ zd?C(>&Ngy}fm0)~adaCA`mmLhoemTL1e4N1g`cCW}CS;HgF2xoX2}E^8Qn@VUPU)D`mxCg2FCJK5HhUGioKsJqqzphdMLCCXK9I$RW6SqJpR_ zm_b!P3eJfiF%(_vqN12c>C&#hoYxf%zMgZSOQxQO(W2kvrlB!ZU5Ez1Pxjnk3uWq- z8wrDCq%`(b9y(tzM|$6UC0Zr;Y6oX3k%y9Eq80Cor~aJ5;p{-Pt#uvAw$&&65;w7O&n^%(DS2hg(RYNLd zgGB))_EdZBU$*dX5Y#A6gmV_Z;m6A;h*g~rvWNWP%sxjx{tb})nVs}}#(Q{p`Y`(Y zCLJ503bMXf%*W+Dq*_kB*k7X^yH7{l&!`tru>L9-mFltKK}}-Q5s_eg?jHBrY9XwB z@P%F4^MtANMfB(LWHRrka3&Dk(l<;df#>>>KaRlF{|p$*xaZigdLrXmnQ^yHPcTJj*X;%!Vk_pK&LoQBE%S zrEC`GSQUaR_soSVb~xx?z7G`z1)dXE+B)rZMbCqcpB8`2Vvz0aNkEcn0DKkWHeTh zd4I!1CaKq?R}0lSy&4yoHY)=6czhuGPVTr#IRl&gj>Dd6W$5I=rSR(N8ywhs4bE3p z611zAd9AI&J|CtCi*{XOu3rgb{`CaVtI2c7zyw8J$L_hL_hecFc!uC`uUF~$$s=?-Bh zn^I!c^OIz?PQcxlmvOb{-f_ths|dS06GuE2Sh>40aH-7x|LT8kMFqMfqs;b{jV14k zyGd2&dNJ8Xlcqqja0Pxm zXDJs~_6CYJ-+~`+^O?y0b>h7GUEsSxk(*gO56l{0vK~LaFgxdAx;&wjfO{G=e@me8 zng-BPQHL*iSHYDrw-}Gn`o8wII_5~h_+fLQhU|G^ojomn!nr^>ZYGze*BTJ z{qEzCj=&PwCtA*~b2&(^=}u!lt+)c;?wLx5@5;mM%rs7~YNFtr83J2hX~MaJIA{)@ z#^|J&a&PBdp(B4M;qxLjGXC0Gs2SoQs-CGq(lQ#!my`lr>D3Fy8cX@__KUHOM+NFD z{slInThLV33VcI3P}nt8qtEQB$@!_R?7!kF(X8ZXG*am(GxYFDNTzG3?C|?oYQKUU zEH2_k6;4HO%azf7XB+nF$}Uph$Ki$Uaa6rU;PamrquWCc!4=g;@}s2^D@6t%%Z&4I zWXOILpjAMmL+Yq&&;Vw!ngv*%km7fdSK#-9bL76W@Heb)psJH z4PQ{T6iZ#R(XC&z;l7vPt^c=8N?*^Dc54rSZBvd5%$x`|{^%mI#peV0^Cu9#OTE#F zFO}?ye=~7Iq6!W8gW!1d9IUBZNL;TCBd?x>kmvd4qLhjQQq{em*-2#>_L~Ia=mkef z_|IXu*!C{=LWi(bLoS*+bqtI-dInp3IRQs@#*q>0Qy7u!31*OgCb5x6Ov=LPjI+EQ z4e+)k{;wCY`%_ zqy}@-$;u;xtWuwfY(H;&+c{u{`t-Is@$idtH_eu~z5FW7s8#rz z3I=F-)H3++yb;^>&E|iuegLQBd+4cKoH$=pfu{}b5s&v8iC6zprdFrl(?zHCnTHqq zq!&Vk^MLz2USmZayL3${DRw;w3pDg;-JvCD!8mR1sPG#cxT1{eU4I0*%C$J)B7ti= zB8b^dEx0e2D!RPfoKZUh+!f_|c<}f+zF6x5A=4}Iu{-y1{nP>ElvgHdJaPq0&o~q3 zFH>ku_*kf23hZt9&8%0*PVnD%n>jk^7Ng|ZN8^Q^)>A`#c#FBRQrj$3cvf&vVp(s@ zwktQ#x0}4E`Q#WfEFnjNi$K`pwBqJ0kK|f~?5DP55uTU+gE^a_jb46#LHE9y!${;! z_#YRqo8#AFV1#ra?(fFrdAk4(*!D-}35^ zE()vTf_Hlg;)n~h;<$vyzq5p!A0xr3^)2|et8lU%LbmR^3jMV(kb1`rL?-0CRPOwB z)a>BEqC-cBd;D~8nVf+?+7)u8-xr~^xtGba7A3Oaq#PY#KA0GGb^TZWHA0KRd>X`V zbS@`rq87o93PV>oW-`I)SaY9c+@ZU*Vm z{oqy7M}yvb!oX-d;@xOU2lwf)dX=sav(^M})D(8I=a1w!Uz6wcG6RI{qB0+o8xFlr zMa+#oliB@Z7Cqh>PA{+8#|~25MmFkhVP>9egt@6ix_B^)%`Qf9o5pOz?HLA;bZ7wR znO}t2@70(QRtDUPlzO_VX(d(@&QL`%*Wjwnck`_z4Qo!nNS^pF$ILPfsyucNf7*DB z@LS45zVCEtyUGDHvn(Fl{n-GIHtEwg`Jbd&y^`G&&|rCFtP2XA>c;G_I0q&757JF@ ztMT}`TS!2(FV}x`7D~u`%IzILojvng5yqbLMHE4-pB-xwWxlNWQV0L>W8Ool* zqf}0!+V3^c7PuW*v~-Y^OP}fS7J0_y?qty3sEY^Iw8Ni-RuZW&9{a_+BflAep!3$B z%$jhUdk~i`dTqLv#H1bt&$qK_#m>`Wxx>Eb>+ubAVEq;D(aSr;yl4~dJX=f)>?R9y z*>%*?HO}fy_yXaXlPivJKZ~5d?}oOItEf#;lGJ`OC)G620=1CKG_@;&y_;l9nq&Wv z`vYQNMgoU!+!nm6pGM%$)duv%p=sbE+(~?%T}l%8zqqeHo|O4*77d>5DScv*&d%?i z02gi?z+FSj3G$kOXD@ccUq+jo1ZZ3F) z^7g!8TbX5?{c$4EFbtd%D>B~ePX^uu>G;0=+#ccZZ+uCM zPwrx*`Dxy4y{8q(X1EiZi|X`Z(=>=3xt=O?7YO|UNM-$8xY)jDwDZ+ydhA6!VJ&Q8T2_QgUOg>MsBKClJU<6iMJJ~dqe6Mh=N$mV*8e-Qw8MWX=ub5P$d z;uU5R;jZQs?e)10do(rB0N+Wt)^IYh3+|!LLSNvqwE+t*TO2)Cog30x2;YQWCiK;O zM$Pnv_(;K2NM(n!mgg*C%ZDTE+!L3XJYL|O3cFfc2l>LZ%8k@^tt{LblZo$@`vZLv z%Pb1KLI(XyL?y+;$gUNU)c)@Q+NbmoD%QF1odxpLOZ_Kv&@zO*muZgmg`H~6M>VYb zLT9*rKZ{Z1hS7Cr3Z=ik&LB!ZO_9y_cI=?E4Ag{t6I964j}^U4x?3nW;^Y|qxxp29luSXpBrtVvr% z14}dLkC)!yFmVv~_}3fDu=)(_;+xMT{js2x<7(-RUL%s~rw+bz3Q*%C27gsc!ToM( zTwv{N7z{V@a5F{zGc-vT9#F-d^|JJ%^d7A3>&GXj{X*Y=3rw1nQ#h^DR$xPn;~CK^ z#>X=P-fs86-;OYJSid23CM06H%OCOP%14kD+eGx%|KU8xwpc3L7n4MzPDtFDK;Jwc zC5gF_h-lJH8pS?CcMt4>)nR#9e1PST{{9AD{zY`h!ShyOI}7pHtp(zZ)j!Z)z7{5K zyG;MxZWNsHDbnXdg1~Nt4sSHPocruJO30kr03P23Hjlfpn$KeH!CIbZor@HB;jy4u zkcgk?>I(OYN>CQ{nA}HmtmMv@NdFAF!0C-X32Vx3>#{2of-Wmgt=F9pGv$2f|;ORR5~Ls-7MUP zcYXRTN&ESbNt$$;PB%28*mEK2dsi-rg8*{s_B`~w-;Yb3v>2{9H=;EMBAA>>ztF=I zCDeJ43UjYy0Pkxo9$mII3{tQ2r26Ds7%9M;7pAXA4=>K4o2aGbjr3GH;9NNE&HKeO zY#YQrJt2Dkt&+UT%aT}T|3xn&n$ePt8~N<~6y|4TE4#t`3rKD}Aodl9==-1&==Ch5 zjEON{JTF2jyCwnIZFk|_y+f($i5~c-GX|7=7J=f83>dvXk^85#n95JO0h!^G>G{sR zSa&p%{`EVI2H8wuC+}ZDj#r3a>n|VdxG9#?S9L~`^CyVv`Cnv9$znQV)F=FG|C#^l zKgC8&%=8DaNpBKJ7e^r2NlRiMDlfe?OPh5FE285|6Uo_ou1s6VF^D@j)2cl_l)hb9 z$e!0p2ghxjpuobIZi=dcjPKunwlN z-;XvdZzeDPQ~=UfhIdL^ux)`uhQyzdZ`Kd%x2NxjOnx*j@Uo z`;+FloPa#XJ526aZFV*vhSZxvsUzpaK4>{d0za_KML}Gpx7tj)Or%b<%R0DDLoeLf z{GIHras|t-B$zJe%q&Ya<`lKAP#bz4n>xI~PZM(CwY9xy)rNz_;oMiExos-(8>LU< zF3snw)sJCS+efIS`470X-a|J}8<96|_o3Dr2|g}4P`gsidTMKlp34jSwE?C~@QxZV z7`2PO&+^6_o|F)4&DWe!ZZxViDnbwDC$R&bw~;9s+p&INAf2W?Px_#mLBuZ|9*;MH zxQZbBx-bRpJb4*5Rt&=1>R%H!y^Vg{`9G4*!>_0J|Nm)g9 zm(0C)&gP zjWg(zse2{kGN(!Y_UM3;)aOKtR5{muM+xXQk{eTfL6jkff6KM8E^$)6a*P2zDJPA+ zNM_*Or(;R0Mm!l8>_>`RPYNnmdq}R%(c<)un1RAl7PnP25cC9apoTNiI&lo=&2q$k zveK?9l7Llos^OJYAn~19%p4PyF|Ps|$&XElF}C5PTvZT_ZKNdlRlim5NQU2^%jOv&eG?ZUJ z?-V6TRAVw|!;fpGPTF!%%?>4JDld^W?-rue%=a8z9Dw~%dKPgDZD{-Hk@U4|25+@_ zHoQ$u5qzA(lKm5u#GXOh(XOT#1V3K5OQXLmD;TK)PXjAhu5n#Rj0y!|Im2|&w0P9!UobIrG81iQ}EVcUw{;D_7 z_z&O2!TO`o-XOsKa_HJf@g-yN|IUA< zrvYxAmdZ@r!xB5yUSc|ZmgsNCIq|;CI7V-T34QifhE#OllMKvgAUFD|1nD2g(gN#X z)_3Mym~rJbVJ_Cg@%gi$`im-zTWte}Y+1hIikR$pSW0tNp8=)di(LBC#rRF|0kSz; zm3QF_srBtVMlNlW71zntma3e+k7i|xndVR5f?xMvuk1h z7Dda-T~V~??{L<_%7EJ-tBWt{rSKlbz&)}Lhh2Nev#H--Q)PKYCUsvMUMw>QE&nis z+-VWehk~21|5zDym?O{5`7s5RzP!xO3eY64JXFBQvzB_jiV*02G$NM9mtac#L7bkY z#nZLHmh0cICvCsa;c%N#{3OFvykY_+VRtZfFw)}<-YC#C@m+41E^FK9;$W1p#*pt2+g?K9Ct`Zycd+lxZ^`2q$Rg^ydD0hUfUREW}a?`-e)l#U& z<_b^@EGK`BOR=TNV`}4Qij}9|0ngBHeDNrA_Wh#y^rMLa={9*vyQSaK_>)u)GD8P{ zKAb=sZ%KP{jZ#q3GgYaXa*;GYpHBPNS@9iTev40rSmC;h;rxZY3((aFsXIwLjZFMi z!|&c^O(nh_e0BCe;ijF*WE3$GZ#O!Go%gS%b)Q`M5dlk>OiK+$YnnD4qH>L|h(lav zvLTpNe}JX>yXg282}u8*Jr|XxKt3Dm^1ip6Xx7+i&u3SmvbE@Hs+FWw=^eKFBktfko-i|(*Ftj|dh$MYJOox8k3-aA1?7N-Q zq+Q9kU>!AKm%bmy_D^!)tz%4~-sd{MX^O9;Pml!LFCLIg@JeOzNy=}ictm?0Uz0mo zKg11MUiffBJU77!p;d7jG~&PGIB&FwJ+xvhdVPHkUomGI^Cl*lmjou5g=i9*?%+dS zh0DXu?gjYtN)aj-Jf(X{iiNU<1wVZ{%kNQ0upI8Si~cCRChQyU2bUv!B>_LjqvnYr zSj|Jn^8RBr=D&k3-02%8bZMYH)Hv(%V=h_IH_Ltbm5t~4B3_?)7442>^*Z@i(dPVu zO9$yabO2)iX;CBpsdR4fLhd?SNAJCjqdN@x>8%n~bRo@{5iFjE&kVc8KJ3tiHl1cz zq$SNruTns5QxlMp;tVKr=7__@4K#Od47r+Q|G)hIogGJU%=rqo@|6}`PppRPNuNaW zmEDp@F^gF(JV?V@#>0=cB1X;RFo<6H3zff&<$st4b257uQ|~c`v~zxgG$XK$mUWGx z*`GK%#(}e}b(;=E?sOI8D zI((G0|9s1RX2~XZaj>loU6U1-`t@1)6IZKvA6_dFsH(v_x~- zRkHl&@=QFW-j=-hG5{``D#BUohZ7Hn4)SwGH~y%wfS&z&if^iE$IIvTp`+SvH2Zo3 zI`v9{EPlKn%D>ssk-l=ElW>+@_V2x5YRDNh{K;mfQt}KMqV`k!+(B&pBau+2hg@U# zR^<579r33QvTcu@VASwlD(PFq*-%!9P+l6RF0oY79`&$=c7sl_&5yeqB5BIB?lR;-w9M>-&_*V ztHTQqd=-l&<-}-rqd2XnjR~{}qW0#}nfsC>_V-f6FBeV03x2a)2j;mUYsWyHgBdpd zbC6lO%MCs8Op(st$;>JBdK!L9({$GLFi6n2kIjm=kkM0@p%9Z@+@Zy~R7GuwKx_1N zx?{9A6;&1S3-oV+(|9GJ@zQ;y{M0S+-DBNc@Uw5o@TetUB=cADZ%ij!Tk;ONSRw3K+f05_Ymf^biE!_1GQ`BKUk902Y!M&}g zAZy<>JhM9HfAfFqwZDiwh+;}MuOhqe_mCj73em@@U&N|8Cz#8XLh0PyPrmGwW%g`& zLo!->1P|lv>35qGtjy|NFk#_VDGT`<9QW1$)4l+rLU%)AqBH-`8 zGsaUX63PJwhdFi>^XV@h4{T!Al~$RA1OE%@+jk z4+7DedUoshAI!^PguZ$Ff~=f;7z8C*)K4r6tDW!QBh9t2w(1k3B=?1^5mlk0VWGtS z_eFa6)IPdM(}ezNy~AgzET#3KetW3+{&d_G69VJ<3;5dHTt;X+kq({Pjo0`W z&;?Sq#4WrRKN1Gwe=3t{x?m;98Cs(=4|)Y>BHTcs$BBl_(&S4WW{dyw9(ds>J^rUa z4Jn>G2%#G!#QXAA9{Flg{;w%7*YiU7?fp>VdiS8{bm$~pEdhE~+EcN6mk#qcxmHpj z%m?w5R6a^Sh^u|_kVxX|;BvMn6)SsVaf&fltlfp>qUHF@T5@!)^Keq+yBnN0v=Wh% z)R7okE^MvTVdS10a=Ia->E*FTBw$)5G`}mxrt%I%BV{^y@pS?|uQrrE?Mov``}R=H zQAc2H?nKtp%Yl{Ejt9?0XPM$9pP9s90e@=qWik*}#(%=B#8pkY_dF?74RPoj<8gM?3sss$fTFM2IIWY=&-LdU# zS=(M>dt@tk6>Mp=Y_wo&HF6b0g7q+)S?ZBbq$Km23@F```R8o2y4Ic3HBo6MmC( zf49J`{fgpC*ihn@Fo_)&caHuFQ-+Q}8|H%dI%vtu6}qf_P3LueV)sN1rHTHkR7p9P zY8~>SY0X3F&>ITWS)rFd@J;GwKDCNJRl1AHMd{+fpX13PY2I63yNK7|7V&xZxuolo zJZYIMWult)(-Y1ssB)bddj-pg4@-SD9p8J|m)A$o6N{#DILwpvF1|^1(;Y!{W;%`O zG2W`%#BuCgy50&8lwcX6BQx#D4ce>mDf~J;fQ%;SMUqmvOXKio|aCCQ(8d+D<;9^<~^kQpDA^1U(Wm6h2Y5} z>QPVnFL;*x2W=geK|*)b!p@g-Y5e-Pq%P_vTl3vXxYyMf*Hrm1rU|uBC%2fkuUm(k z|K*WzPY>jF^g1%K7>8$VZD*&5?vw2qS?Ez#4E0u5kq9sCL`BcyA?l|ad|p$GQcP%5n{0V75_esX#Fk2q;3V)OwaTg7 z=O4j>o&!92T(uGw7!9S1{;7#sYZWx1(wYtg-Qi3w{3QctJCSeGO(|=30v5L|qpp+J z2pS){;KeHrh+5UIpw>U;a8`CHHRy?#Y`!*GvQwf0ul_e6*q{=YNfT=U#Tj|B#3bx4_o>~C za}!=6{fD-o-&^M}^(TgTfh|3oykvoOs&N(wG5Ma!cUP-tO4 zOzAYT7@BBJ%XgVkwj+!$7-tI`uY3`#95#(CoMs@g%`WEpa?+8Trwl)|XqSXnOkoFm zqhXdAC4-y}tz0L~Xu2c%y}Xu2KAI=V`v>T+-*2jDWkN|}9CYruKpekZBFQt1K)v`F zd;8;Oh(0_Yp469vk^EKESI9_Kbs3?wXY%amSXGi7|BOsX>_AO7)^k}e^0*Za^5ij} zMDEJcWdoTk;z#uv5xa}I)H5Lq z*7W}7&YlXyrey)de3}M7N4n2^bvOk#s-46I(fZs|>pjrEuM|>zG}*wBd7@o9rC`u- znhkaPLu9sJWIyIyXTAz1P{o)C5|FnT^iF2eCtLc6&)r76J#;Tzv^>iw1>_O4or{rn z;y+T*6h{viIa6NlJ}g*XM)_g0pms|&Gi$36J0a5?r1Wzsf5@1ngE z)sMc$t%-+-;Xn@dA83aCo_c)Ej`^4wvL8L^xC#$Hg`>$qWjJfna`-*&AuJj^OgQ_! zZ1k8+;m(K%bTN4(God347WWv?&>Bzt+TT{%)0xlZSm`3yhAAj>ku`fj?F^CMg>c}j z`SgWVl=!nzC#QOFCJZ_qC%soTVgf$MMcUtW3nG!}j|>9&S@eO_XuD|eCFy-|AH5Ro z21UzgqA*VukFm}~&I)3<+aW!E3HDsypHCJw!*z(`0t(SO@4;}hu6V0_ zc~=r0_Yk^0KAW9#dlFf?eLs17i34R(5Ngoc#TwrfW4*u*sL<{wFIH<{`}J`|R3SsI z=u9DBYLVb}x|XOT*`@vX;DcQ_Z2MGrIfBKo`xp72|3B^ts(zNmw61@L z#jB2x#!4BH?}irfC&gGM_euqy=PxItDb z#(=4V5uC7lLKc=Qz{aGVL&Wmb=96)g6mJiJD zPIq?O>QwaC={k)Jyw2)X=)?MeJ4~FT)XlrNPLjA`H+C+NLAT;`1y-0=6bo-3IY?wln++Tq`H+SN^!Fhz~a|WACPm)(2M1>b~`2lH8rN2*z z?F<|!*dURJim6QrvKaEmoC)@2|7yyFoLxkOYb==hUL?C+Few_SryPy0E(6dt|su zo4(SRhD%-Fk_G36@!Gc5Oi8a7JXEp5=dWqdQ;CmBR-LqiwZR%6GA;)3{L93nE0S}y zdMJ=gKSDNgr(x)<`P6vXC~=!ELhDZDQVqXXoUYY481carKX@>d?<*>XF4=54sD4aX zV3vzl&94z{eO!l*C~Q6Ofhi3iBTf z!^@P$a5Y}lxVUAB^bAOQ!s-L@k?nWL>X1aTbjC5Fsvad+Twf)*lBmV~mGUI_zc0c& zJVt=^LQA~yl_A-@aV*Jr^AaV8PKWuE6^Y-f1~|r_Cv88!F^2-aGFpE3$gYA|=KZ-S zCVtjwy8L?%G5xK}x7VK+cb>mQs(lN@&dXjiM`r|5D+d*NMR5|@dAd^kw5$T3{d)}11%y|GBrtAg5( zjiKRj3oXp{2GPbv0o0)P3NKT37(DbBTP#NYMBr;JUeI@*b1$t%A7pI!C-#vN^_yqd zpHjE0*Uwet^e9hiG_VN981JS}w+-h<>kM=(uVa1)k$XhFBGE^O-E z0J?qmS>2tZ>Aku`P(Eoml{p%VHb_z=i^S$q4@f6-qI3h9H*7fg1PsSot5Ude8((l? zTUV2CO%F1D*B~ri5{v(vzv6%MAAE}Ox(8j%&>;aNSLFiY+mb=9KQpf#p_b&aEZO1P1(f1aO+nr6_igoFPoDbATx{uCpO=T_?sj`>;rJ|zHDC!Wfl67gxAnhC0 zFvf?fYiyx5iB@p~g)K6n8ViA(X7x5>nSeUy>kKU0w zD8!1F*=?#qi*qZ#BJn{-W`ayUOm1+eB`6d-q=%Dtr7~z~uN~^1CqzyHSN7|FFNwNL z7E1LGrIVh`lJF0l(e%~`h&cC?DCfrE8{7$GUvnASglTBhr&=;*Q4>{D9mVt<(uQW& zU+C@b^WZJb;Y~;yi@jwIqP7nqptXLP)XRT{Gg$f6LZ#h`+*}j_U3Ycq;@Xd*htr26 zlPS{w|j?K3ze zQQSI3^6PUSnc9~}=ULxiUB}HLTYRg@+y{QJLQ;gHNHy zEUI+ONn1mp*iuUJ*y#tOP~AQn@uXT3t{u;6O8Xk`YRn^JHY?L?7jsxKVJf{=TR`Jq zcu8CqG;z9n@6qlrYV>UTX0WU)AUPLqk=&y8YX$u z&Y=9aP0WMNuUKC{mmJC;f``igW|>))+__|9^2+8K?#_>e{Rd{^BQX>Icm8vI>@f04 zW?s0ekaDw1vR}PbwBD{-+&|v*?_oM869@>C>W$jpZx_T zQXkzbCX1S}ZiSAhRiTt-@6TaH!G#3$6PUWsFW}H=H_77IPJGEh7WKGi;T`GY;jg(X z$h^7-8ZCXyl#?qsY3n0BGG`SQWRE5%uC+tYxHf@u*+Sx&^OtNJ@ewPu*wTmNQ~004 z7VLOXAOHIc)bLs->Z<>akHvSwMZp~E_QwJyUH-~$3sDt38~zYo{BIpIBccNy%k86` zb|v`PyOZRk@BydvbQ{{Lu7#9?_OSnstHPKd59}UrmM(dqFW#2A4fVN|gXqv_GIrA; zymaVgv}C^mP4Q92nNohs$5oaeDZ?-gh7?>pG;!SOPvCH~hs+osfkU%Tp`6**z{Mnq z$Zo9TREyLE;knyM;{0S7`^So^-@GAOa>*2RK0HO6tWKbR&voGG_F4GI)E87<+aL5L z!F0{{ZNg<5Yp~I*PLXEjWprD04-}tUN0a>zO9Eo%NW^w?VUzc3`gmP9D|)ww=xGjt z(4b^k5|@a6?NQ;jbi3noVV3kH34kyCEAii~d{Q#npJdM6Luzen1hcuFk`MP(IKQhD z?h_BJ9ybgc=A>YOqcUlezQ1l&-;X?BOoyW>O}J=T1AN_=O|G`zVhl@#AE&~r>kZaBSL+6N(yUvR6^9dU=tY@B4j zn^o_&;g(y?fJOa#(5^kxn4}{ET*Lt{S`{*!+1*n`cR2W)#C#hE@h6|)=hEQv&zb}1 zxj__Hz3V#+n^A1>I%=`hg*KU*2A1-W5(5i2=2&cA=|X-~)Qc}Hn~xq}PC+&A#_;#f z9g^tjA7Is2CBhM-JaWfkA|2_qIdfW(D-lb^~6nA^mn46!Y=@4fM_PI;NZ!g@A? zu^%`s0OpR>%)y4uamxNG2{^iM>$khHkw%M^olnvl<)E zkfT0xn4G^-ck{z_66MSVxT-3GyZU+!o^b-f1)>LZ@D{u=S;}~wu;6}N|4o0#9>mih z*b;F?BdGRyO>k?d!kblm%GV-4$p+TZFc(S->iq zsl$$S%J`?-4LXV0Big1?xS37C$*o0$>LeP&1hu}a)9NBU64)?{dNZ{g~N)Gy0fjPKPHFmk! z$H5Y1&X1)Nn7`aD#k<6PsU`lD^n&gl%m&x)EP6*2AzYAo2&=A`BD$sX5xss8BXu{V zP(2qb$@pk%$vhniBptp+LszA+nO<`V+beyovy^%(W2c~0&x>sBvC}vufKXSL7#Lo- z8tV^fC;f_l@uuiw#Ql^|ST|#aWQv^u7r8tPRHdCi?Sr=oAG;2(ON+!S4fAlz&m5%v zWg*mQ_G0ydA~1+4AY)_iFl+zy6HaUagS) zBg&HSiE)h22p?)Ttsg#w=#%`oDDj+C<8kQ=6>d_&67KKF(ZJu`gVx;pCVBGc1UE&n zkxmOyJ#tGbtvHb zx5k0)ryar%yV6LC?LqPBOLAy-SSq@Lf#2_!DT$by#?JcZ4|g{PlH-d+(oSoD*D>Mr z^o(b;TTWFHZhe?b3K`D(jh;cnQyW02mrE*&M+0e@2S5E~xevAj;2@U=^W1)cN98$= zRdJU19^HYd$if42FvD4kxv^JPi_%4dU4xKNtWca<`J5kyD7lvb6#2j{aRP}?IOr`b4?$TeF0 z7kr+s8u1z{qNjNKZCkEudJ`OecpsEq4>OC$Yl@u>VnN}e8Q0t~1AKZfvBHiAOzCPd zEfl8{*MJ(B?w?6NIB3JlQx~vcNiqDqyo;IAf0Hz%oPLH+ir9YGeA+=W#^)h!3;n>w9}K~( zCi5WiHiEM^o`6FK#~3Bf<{5Kag7B**Uu3+g z0Di#%a?84bJ8+2z%5-*;LwN__(yvAIzO$vMn>?i_>A-_(niWRvSP>4IDB4U)&7 ztT@9v(;@JF5T397i1aNB!A~O}V~^Za+#piNp3Sr1eaAhV6i^RJM+=C|`76vCXBpPn zQ~|nAY-3i|doezDg6O;-4&+|wOR9JIm{`v9Jh4*j7azNl!5rKXK+oLihVhrWu%}v~ zcvL_y8lIoSzIbfNJvxU$5_bs&jE-c?LJoi13@+C3-ux^-!=$x%xfr0zjVCcBHXGc8cbh+}B- z79p>9;HzZmtw?s6f;SM0bi#RyXwQJOb8=@S4Kcq%o$6vFZ^SWN{kH*H7cZpw-8E3) zeuX4P8-nt}?Qo+}lhbMH1gn-bNS59`H0EF8lFgS%I_57%XJvmfam$p5s?7&dw|o|U zE3=&2yu}3B=7o^O2M^;NbMwKoG67p%UiZKGAK#gRJ`|QSJw-O;;gK&=*0)8(eaaVy zw+1j~dKz?z_k1#1QH@D_Sw+gu7Yhc{tf~LNQTF%C7)bhJ3K3i%l*dJYO1&6B>;sPL z=JKP9LdjaaMkz1*4)*H}a%U>t@J(?B@k!L?b^OoM94U)tBa(K2J!ZHU1-C)Cs2c8F zI>7`6H;VqV^MyM?#3?yUgLz}?*|m>qne#Ts@$iVR#%p)|MeR z3^U=#*6Fl$i~_vAsK`lFdITY%?@?2C1k<$TK787fM7KKM$0znmJJKA~k;{iL^l@xB zdN^?}tDs;4ZKJLt7x!kmPF_=TA=D4mNr+t~2FxNi#R2p28H1( z=$iFNTs?0qvWZHd8=6L;RC^iNyk<7eVBgY$H0hqAVkf<$q$Ij*nShsnP7@vTZA9J{ znb15fzI=vOtyYfpHulpVmRpv>-x$aHuvr%*M@NP z@naYG;Fdsr=wsS9`>o8sha#VTWG+2(r36sbC9+gH3r=Nt!k3%x*z~|&n0`7< z>TXx0I_iOiiQWX5IRC*R#o zgs<(U(mx|}*;AJ8utoZf7AyJ=3+2<{!{2%EZIV04T%64>tD8@_{zeKnp1~&@H)K&2 zi7Tbg%Yji^{GA77wDiCb;$M@E=l?gGOMH44mMJ#Fm*m?_isnM`o)cT(Y4{{g>4qKj zslH(kj<#Sgb&9FUwL;>$ISFKSQs}HGEf7k&arA~F$ai|ee3|>6xID~9qrD@@%E|&- z5S>aVhRIQ=3*>K4{0+uel-cbqvFtwCEVQTL8~qeh&%U>ih41(^bA0z$>eqHwQu3o4 zYnjL*y$?6=(3c+I-?M|kL+kQd6eIiY*nG2`SDsu%HLxc?@ z{~)ib1coiprC)!xNco0$czMVZ5}htY&G(L?Ihy;>xkcyLzBg+?Cm<5Pq4KWhPYmGOP~Yao7jhGP9H$-8Z-5g?5QR_MMh#HZ!Cyagcqzd28+(kPpjzjdPOq`sk3j6%hNUr{UvO8vspm)+; ziK9XwH`6o&lI<(-+Z=OHdy<3GGW5uMk6!$sM-$tRONNTEy2LL;p2oO#lVMnfeU+lk zUTMZ)do_Vc`I*UlJb8sC>BW%FqDj0_g0jT1T?xVzKZ@trJZAo8AEEmLSsLZ$L@J-v ziLEQb@e*w(ZX}n$ftn|DwZBB2>ywzY+cVLCN+GTC8kA_ueU~~?%S;>0wnNxBbCRuk zmGqY6q4`fga<9#2Q1gv%EQTdy(8tkfbinrmf5ZD8gnJJPwNF$L?<0|C3=Nvfs0Q=6j(iu!#`uoisu36s*grp$q)R@#eL)k{JsxauJJW^WYgrFG_oF zjb13hr^0zq{k#yKrO)GTedg$i?bjgXlmk`JV$nKLyd;dIBl*3CY{nZ~vRT^08s@$o z@0xLtQ#cxk4qiYlD>}kDZqV>rf#xMUSTsQrb zxLR>E>ny!Xih29<|J9A(NYQri>tevV*ux-;p$O zmnNNQa+@h=YZMjVJrDbOW4MNsiD30ng&X->g)LqbMz^|tARhJApj>jFnvZgaD_RT@ zm4ATmqh(p1v4C+ESCL=tNuuzuhpu>ilPWGF^m51u{u(kr$hdxnj0M!OOP`;U!Mxg zIG%o-yaeS;c_Zm^{@BFN&d&&2uNE^z4Hj>4zZuy{!*Uj4{~ zp2v%&%&0r=Zz>^q=O+=FyezU_QfzVVdA4L@qzPBSA*fDTgon4>B6kGdc(3IVygls@ z-WZ;N=BNrG(&IetA1H&0tURJ^qsAWe{?53K=_Ruz35@p&8|IkHdRpt?LaYlqsBmwu z_}|B)WX7vDv030fCaTGaEcU6Kd2x$K_?2T&RoM#_;X4bKUM z_rLtf!E1Z<&AoLQ)d*h48mLMEMP)B^d{jm=GULI^M;MlsgP#P8QxIGF4{; zqgI+zuuo^Nc6oyJ3IiCjq8)y}-U6!gjliRCK71{<;;-*@Ce>1oxxMTzn7`3LL8I2< z)b-)y+ENW(?PW356kNq^QLk`xvn_Yk?E*Z}s{yCGTbXG#&Z2C$1ei6xjCj% zJL%W69Z>%E1+S*8^0jhUuU)^{r@g z^<^|^X)xX&^p!uF6~WZLk#HM7$ zE!J|pyxUH?bg~cht{8xMrwwWK<6WF&avHsF1117A+?km)(WF)9d9Xk-W65iy+3sr4p_?6C>b96AG^Ck_YCs!Y%%0Vk> zM&oemS~!w=SSnhs@*DyKc02jcMu+Hs+g0%R9F{n_n8C?!^}NyaKt9_mpU9Ghq`tO> zEB$tewqM;!Wd@p=pQ7#Ji3vk#oc$0~c2wGj^wx`;x!al5WIL$eMKAc<>Og-QVg6nD zWjJCUP6}fJX;v~}M{GI*y-jQID7=qMn)M%F8#a$Gep^K^8qDV_7vBM8uRca^cqaQe zcOMD~ZKj5c6WBY(x5$zSr5Kgn3bBy6|#4!8VY4i2$$2Up2Vh89HTgH$Tj1nyLoJtfXtfzNsD|j-x1=m{tKy@t} z>2&ih)a_tEC^RX-G&cR{e5O$HB^=hiX-TyC&YTe89>HULN@mV@9RiB_WIppAP z3EEi@h2o0$qx8EG>^$TFuF7V3g!KJ?+v8W_{|45eX-=IW<7fa{U%GKra|;R?sX_a^ zKO*;BSwj}4e{3YcJL>QSo&)T>DO<_>4Gfew+=p^O4|?lS zz;^rY!$#U$>F1nSP(Bfbmks+%R)1bWl%m=R)EpL!o>MO2kq+1MJPn@QK8FK`jF5V? z-En2zBfRiz2M(BDg7j+QU_sSiJShJU<{Tdi2MS*>H#R+Bo;d5l6Y)wWK#<0aC_6;& zSFRuvhnVp$3-61${&q5RS-yDV(C5rPjYR6+F_Gpt@nqikNb%9mZCG?8lk4bsh*%s^1l@VtX9m%v-7bI@(M5()-`A6Fon3LUiSXciD zFuIi^-P13mPP#imlw3*UH!JWL%nwOwqvVl#fhq41wSgvie}Gf>zLEZ^i{PW|9*8aa z%dR(6rpiyQz<);s^ttLTbU@8P5|EUROfRUgZzLu}XNDr|pX7w&uGw+ww{_9-k_9Bg zP?fBoQVZ7_`*6?Rod3=Lz{H)>-TX=B&)p4V$2m2yoLee3b88iE-JHkV%eAH7d-f9T zfgZ`2#WIjY6_&1-c6_Y82Vo0#!sqizuvcb1pZR+>De!(n zZGx2Pb3HGlD~!X@3l0+NiIg8<)5`{oPvKlE7tUU!Lsw|LgutISnSG{*MMv_J zq+Rt9+`65~@Q(Db%+x-{Lv9WoA>Co0Tz3IZs1?!-Vg?SA`f;#d8_ZsHiTOD~3!-&z zq3Yo&q&D#(9X0kGt*~cl&9{&Iw+9h)g>3*U-}9F>jv0#&uZZHWI14%9jWuvbm9Xvm zOX&`+7RgS%P2~3kDf2FIBzvzZQ6(RBy7^ZIeKhJW6W=3?4E#>>&vi7(-@aF*v-AkP zut8p!X4y(iq<6F7uIq_xjWXY+;A0u>w23@=RfOZz6!_bdmf;t7*N}U0$@FTv3jh3J zAI$n3&DEH#6?R>Y#)lRTXRAvN()F1>{I>0(B(Kv0-s@!}>#6}1=DQN@FgD@}rR>-K z`w#JihMhb+XFxpe|9I`n7`i8^7>xI45SjiUJURawb!l?NCp7OuGH&4Kg_bZ+pN*lN zf4fQqa@oVAWhMR9!e(PXj5R&{(T zCyZ|{lEWp>E8uap9BK0^|1R-g=6;C}X_?vC~*179=L%!}0bsX^`-?FK02X zTNRncIm4-iX92%_h8j2b7C?_y104S2L-ShSBjahFT=&S|*wtn@e<4fCC-uk^r&lSU zRACIiG+bfamg@rbRe{W4>v_)johCgSkIBiza!B)_ByHGcqEowqY+Wvg%am2=g3?G* z@^=p%IxHCc7Hnp_d$QT$%qGaOlVOufmDm~=2VU1t7yj9u=NA%bF1a!qBz57El##R8 ztOy}*t6W0gEICdt%x@HjFD%9pwlQ3_-EU6aQkL5KrC?3H(JXTFK^Aw!{DSR=nYXQR z{Lw}| z&#D=C^k9!9=)-j+C0Vfl_W?e~)PkX^He7JD)azw18)SbD;ak?*(n_zXyk6#Me(1FS zN7|Q$Qx)}ZBeRmBK}t%4jAz`#S_ctHG%2Ob&7o3CC6zJr*ksC3hC)P!v)4K#Ln@IB z5rrlyO_~SPd!Fap`@gRD{qo<}wZHAN*IH-ob?vq9`~KbcO!m^vOL&VnchJ2bl<5sJ z;ncR35#$!JZkmy|qU^llxjCLfYMZkMW%2eg6%gaVYqsfQg?H=l19w+&6h2Q7X9{kR zJeCsi&q0;V#RG8OMK7zN%Bm`m-@?(djFshL?q5>S}db330i@WfbKYF^S?9U!{(}x1sLW zDN^ARY^pzfHbCMW=78>Wy3yq_)x5cd=Mbk#_@7=&*0+74JC4NDmOtZ(R`*mwR!foh zG%{73N18?9FFkDA{#ax2XAb3?70tDq5c`^k@8gbt59a7^lc1N)KSl=bv7%b$xzT>6 z56GFF!R&&NVx#S^JCw`dyB2uaGFdgr^k*>BAQCAJrXy^88B<5tWPac!u zG%iZvdF(8weqA}lIW+Qvc$Xc@zVu*(9CSBlU6>f=Zyv4T&hS0L|168-Zj3% zK4pIOesR8?x+Guu&?+KVU4hb>>`d*rX-lgA?%^DgQ#1%b()=Cwjo8<`K9IHvsZ@RO zIlj(UvBoh#llPISK^X;0u?};Bc;)At$ymi5_cm`p)mue{O9_axSN+?d4Q}A*he)77PEkvBkPD*$GSMInQG8d;I6QH zNDTRYBW6!Tkh`Y-;&nI5aGzxg`CC<%QVJ$J$z|o<{5d8u#FzL&!k%-K@HMM8__RKV zWj|>uw@BHZJg~-^KeOc{p_}5z-#29)p%pgD7w)&=S1F5Y7>-r(H<#Wg70!eauX`HV zLm%tetRMe~(#4Hz&AtWf6;IT6T(B-u8~waH5|25bCiA z{F_UzFH$B}{C!0{RyfX+DsSXIcb-lSsBhKRE!sgP-!-E`Dz4K_6W(OQ*mZ*}1#_Y= z?UAv>xB~CTj&$CK8H?zfpD(lWnO&UNuW6(pIgPk*SU@Ga#gjQHhbd9;ORC?SVr{=U z!gVarpkH0)Q_ozA$-K=&Vt=S#gwslY@=e$%r)}Fz>Ql@y@@K3bb+9^|=lw@q;jCiH zo5v_|oZJSze19#Jid+Z)xYb%?|%eV^WCpS zvzg!agny`{xW;advDwWL`!}~sUs(N#J~F4?B3lPWS)J6!DXHW_3lBtIBVIT zG~r8;g$l39JulqJ#;fzl5x+n(hIXc%Y`qEHZG+U~jcugRq>~F&Jo!F}r9_Gppey3v zQn!TH_*<6EBo4SLa-F|akj=$!NWE{b*gnzbMswW|>5x9k5eyrVQ46X#Tb7F0rv~+? zCmp#&>xP|V|5S!DSz|_i$8towW-O^7yvEkiY$W!!B=FSSKNGo=uTp%|1Zu?UEqU(U zKkARzhwYH}G7kM^Dd+N{Z9M;-iB#=iEa#(J0pZve$5vLqLmumMVHwID%$@=9j?BS&}x8`d#b*iF~ukc2em>yF>`Z}u{9K<@rxjEA0H2pvP zE-;50_TNeWdlAjQxk8K|=PFU-qGX;?^IRg=wv5y}v4nal&Zk&2y^Uk)WNkR*>U|#T z=nl4TN(FfyoubCLNqpP;;e>-i57((8mv{1X2yf=U>zrif2XQV#$kXVJr6|2IV}G6H zJl5e1(kE;Nsgs??-@B%eck4ned1J;HkL&-55RJA`9Z9>{nfcSnQ#?h!TTBb-E`6I= z(`d}E9{s~hE(|4&|Li5~A7*eH`n?T=cddz=r;d|Lisn$Y+S10Z>I->YuMSi9bEJ5G zu010n9R+;ru_mhG{0TC&Xd~t3_}8HOoD+Ylo~zNAb`8%&(}mm=wvt+soXpaGZpDh( zqE9|AFQJ?)(l{aJfyA)VOX859*hg%*fVcCp68D@{4nHnHi~7FWoGcNn<$ozpB}Pm1 zh(pdh2%ioa1KXb=tZlyXTycp9IW^sizq6o`5MH$8TeT|^57KV%>DLv!fqV@zWn%%q zUyL_@%Q{Xhc+$`Qu!i6ytnDB!&d*>sK3dLRf6a-?Sa*VW37e_oCAr4T*%V@A=!~)6 zg&20K+A`|S;~Ql1`da>;l33#@ashln-*?W6*#_M2o2ALn!6@G9@pbHL>m+#xYVD{A zp&8rcS}L{mgw=v=^VCRQN;ThZK^!6J;K4g4&W4e8dr1a8=+ytCMp5A$U8?v?oVae+ zjr{v{*q~vimDtx+m(}&Bl&c?*!%O(BKqt66vKEAfa@ttCNQ>^hgvfh3)pFN`-2T;$ ziWYk*cNjte;?75N39Esw^yn;SVQR8u(2B%))^VB$Y;H;bEM65PT zcBUqAVZJ6$;X^WiY~@p8L_3A_m9HVYxzX&cn7(5VwN;j zrx8P@s^q<=n#9e6Ueq76SUR}4lV4kB!0&HaLV2yv;|-@RBeH^3sC~-e)cn~~$YYja zoY2HBef6F5_z{1#*>b$;RAoy#rP;WHe@}IS_);{NclA#-uhBV{SFhB@c~Un*>{H0# z|9YBFB~HW}TaP;MN@Aaof)Zo$QC%LtCn$vHD$a*_GfeVJS7?*b_ay1^P6PJHkICeo zXL zb$E}(`l;spTplCoMJClw<0op3Q+qP&$%V2Z)F#wwU{E=UA7hklWW>$k?G@UP-?v6n zbw!S>7pxVmO3I0xc-Tsv6CL3=rQIj$Pic^&B^hMcw+`N3(w9s7KIV7$9Hahp^T>*V zF#g7*1Y(F4Nn{C*5qtTC2Fk1Vv&s|9xsMIQ$#qgQ_#ZsykrmpC{Oqe<#CMCEe5=cn z{7T&gWap17d_I3F)jpg~6u8E+rL3oMbgqmNXD4`UjWAXAogXDs^rE9gY_=A?mnTYuA&3g7dg7+`IjyEj9r*lMUtXT*5bJn+u$YrbU5HaI? z%Je}B`StE`YSxD@)Y?x*tRr*edD~ua=}Y?Nl$NKEe63nTgm364=*#h>RP`M0y58B8 zZufE0s9-T=HE72xSUq7}D}I_z&8uc#*ip}4r!!2LE>Pg3&7s`0lo#(pG)df>ag^BB z_=jB8Yt3)3-0UlF8rpDWqu7ERi{_N1$K&L)hS#W_ax z&Eyq#b>5FZDf~lkz7U~Lt?6{xmz17o2LE$^JAaPNAjcuJk=%EshvbQh+0vYgM#&aA z}{S@|+o$rW@ z*Kxdd)>pz|K|Q6Zc!7E-m_i+yF`r)T)=VBzf5Fz<<;ekklKhsA8jOU8M`QYm(a4MSFB@&hcYjijSH zc-MjhNmKn~YF$e`i=LLj@;hlv&R&P8zM?R0bX6lUdPkLP6MK9uAI{`!rYz)st6oUh ztF58#T5lzP`fuZ(+s*Qjt* zPlN=k)BG(@jBSKfL}p`)vHJCE?8jSHQC5XRbRIr%}uDwAYGCNJ?b)*=$LT|#4VzWZJSv)-@_{ZCt~Mksjqvj?j85uVgjV!xTb@f|xEmU{)4BX4$| z+SAV>j_;?~e=|0aZ`#xh2^D)vOT(SBXYp3@amNxeX3{tEvSB1?{K=ZUT(^~MeRzPD zQE?{RyxOU^7ao$86;?dgJq7&gfIQ;HD@!^=`ZEQ)+WEUD9`UsTmT`CZ-6XlABV=iw z8Aqci%}D#mck(Z516S?IT(UzX$=&*`mA&@NF6#I8iv*DsOWOSGr2gtGCC^RPBz}Bu z7q>Uc_DXz8Eb_h18w)WbWxp_#n?)jZ=b$VlRq>6U@*s@L%F5=neo*7C*VE)XEs3Vr zmfCS`c5NV&7u#~Q?^aV=%-*q*JadTmYYw${o+6c^UqZ#&erJQ1hCJSy z>$Kc#4MHF-K`xJ|rM&Z+3`#D)B2GHoAz!r5AZDj1(x(-?K+Ay`Vx+u*-y1uHo}yvT z*O%K!$TF9xHK~*6bvrbv17AzH`DT-iP8?g#SAIOmX0>EfxnBb5?!r`JYLz>A?*_qp zF-3*no94nB`u&Eped#1>5wn)4`4mBqN3|QjT*c;}I-5crdr?Fl+IoWE2PE+q((Tk( z@G8Djlq5C%+f%wM?HPM_8A+Y7t>^pBs-ynyen>94Vn`IL$@A+5W2jwW{$#mHG;c-b zQNx#mE6K+a-qg9z3u!mG7shpj5x+({hAs_T#}7IwB(K~uC9;Rg=tuiCDPv7j+Ei`a zuyubZv1eYG@sM2*pZng0`cXjA-6P5D>O~}b(Tl~@SMQs2`bM$;$Gm-{17X@#sk$ z4==WfQTxAWzIpr!OZF)rHnfFVZ6*{qKCeSGh} zrQEMYZ@8OG#s1blrThWS6`V@#c;5GeBpuh5%9iLzq)lGUTv&cYgYu}mN$_VLCdDOS ze4}?m+{KpnsD{hm^>2GDpx?2yXb068P~mopTC*m?$T`rMbjdMgu~f|YUfv;mnVXY9 z`YwOA?MBEgIc7lF`0gbQY?bK4n&H%{8!q&(>BV%)flX|w4a<4o-gna%rYO@ZoRg^7 z&*5abrZm+V>_OdmJ;LqTR7EWu@~3is%h7J_D!lP;+U&o+3;6qLGdU#}J`rEGmy>_Q z-2IDuIi81s9DmXQ8*=xprQ!^vaO#+(FJZ`C`(OO;^36nkZBZ~M_~&HOVbWW2cYcSl z^~PdW%&w`N>XidjXC;@6sLf(WwVWk~GolQ;_s^gmw#0E2%xtM{W-`^QSV^hOTTQh` zD^iUYEhz=oeBgVNM?O1WO1EAUQtDucccElH@u9(u44ykq2kEBJI?wWm5=r^zCJ8hJhx>soc(3emaNhs)f8Eui0WvMFjV})>+O8uea3wg=a`56B|ljaXZcA z{v<2fiwTEM?v(EFaE|twB682OD86@~GWpH!7@gbTNw4~Mi&|$VCqJ$1(};wGq*&=7 zDY4FOlC*?`y!?N35C8X}EFmqi)pz?I&m%ioJGLI)%A&28{pX6HrT?4rKke)1O8hhz z-|0W+|GWLE5|;m?eYW+or4mm6yFBp$eUjQ{^WEd3vS;8-sc{BQa& zKXe=H+1><}=`Uc+zs`V9*X85u-f^H)eJ-BKJ`J_cOoxxc_<(vZ4_~s{X_K|Dkh`la zyf0i1A~FZi-{d_s?R1^#Iphl}jdx(n?hraPx($qkTMOk^(O8w1#sA3PwEf6_IC|s^ zC6gA5<&x*%#cy*#gE(hOS-h32S)Kt4l!0vuaT_%QM_rM0e)4S4~84;=*-$) z;poA&fIcJzXZ^U0PIX3uGUQ7?zbXQm5>Eh_NN2KUO5(rM3xM|_EnxTRD~MlOhCZ`{ zV8Zxq#9dwj-+VL!k=}XCGjSHZVe>q&Y_R~&DzXNR9`@L>RD_l+ZUZWRp3o}V>40Zw zg;skFFyY0gfpTLVIHrTp*~ltvV*p|N*flUDpNK~rHF1Pv1WvR{0EJpy*#D#o4cKMV z@1MBgUFGla%%EAQL;DHvUl4@H?X%$(jWA{-dl`IFtqKmTDu&|~ir9zZQoCmwf#LQ^ zSjuQ4Y`7{yF4-TE$K8$C{Dcu^v9Gq{79Hxw*}lSN}h4s3F;5*?G2#M96K`jb|~$lVE} z&#YI(O+K@6%O)rI?3xP{93Nno)V)GpN6zA=!MpJ3acMNAvIpI%WMf&N4{bG12vRJy zafzW9?p{y^B{nrc&k}XP?~9T2_qY_iuUrrAHAuo*`7(lpgk6HNcPcQ@ED^ufoG-Zg z(?S%tsgs$ebqm{F7vS>4rXp9D8&F4fH73U~sMlA6Ls7D#Gg)IGtFsGU5{Do@+*}AB zggD{(r_RC~lMSKbB`2tJc?1Qql+d?LGPu`gA*ElxRP@O}0q@@vjKhE)_<8>?o$+=W zGYA)h^?J$3Tu1@!_ji#+{3@ueI~DlBe4sv?4?JXrwA6D?l%UKAV^+t5=;wFQnd1W} z2EU~DMwEj;E6vd!=Rb5iPn<9L$dR6RUVze6tno{ydJ0=UW~5n$G(Y?*YN-+-hj&)Y z^dLpLKK3&+^`j;neJX`ZjAgJ@ha_z@s{%wm`iUN_R)&rD_RwQr#u-h2chGf65lUA* zU?jfW0^9915bM4Rs_L5z^_E)E(gjt>Vcr5P5>3W>fm|>v*9;j|svw`kG}(H;4!Apt zGxj|10-ZQSO&xv*<|R}MU6ZnzV#&9lEHw=sQz#QA^u~aT2|je5TOzn6OQPil>LC8< zLUg|^lZqm{=~bgEk;@Dr+BE|R&wTkxD;~&014_G*Ms+hadBZl4ED?=%OBm4>8GO*9 zH3zu~Drsfy67)8qo(cCkCk(5}0p-Ulk>pSzwHSV_qBmqRr_ZjY^qjjv>9qHBrM(0? z^-B)jT`-AWmb3-UUE4+lxuk(Pj@yB)vMH11K?3b_hnf3++h}^^BAO==11{Y@4O}_- zz-P%yH`vX4v!*h(Iecf)D3Z5*1K&H=_RZ=wm79k`;n415uFF-MjufxQfa zx*}R>nR})5Vv#TN9)Ro@~%-mORg(Hu$76bv)XHEML!pcO?ou&N4{%m!Hl-stJTXQs+ukDe#`NYCi0W$e;P@GuZi zPAgY2b9Cb9mZ9~a-QuxuOfW{z*y~28Joqh4{wD<@ZKpAsh9c(nH4`*d!we5hpph~ z;uDDe^&KA5@-If9;4DKKJ~2+#3*#GHh@V0myl9zL`Z zE}ZPn>>2um9%t6Ug*Qgf6Nh~K1}EX3yW8+vb6Kdex(l5-<$&MMQO5&qAAxG71bQ&- z7KrgML3E(DK*A=I-n+jRa0@u7Esz7&yvwAwd^89BxP#XJsR_Fa#XHpQR#J> zSVni97(nMcHbAp^vCN-vA;?%21#EuO(Dir*n#6OXGp8Amdpb`cc8ENDVRsenZ^}Yw zIF4zq)I>V#<3OFD7oC4S!4%k^LTcLjbp0bY5L|zW89X-)?0dZg+Rg~0o71m?bAh+v z@lFf88CcOScIr%GzaFaiW{smYbC8N(0=D{P$^`$L1xD55L1Lfs6^te!9C#aZ#Jagt%eQVk^6-L- zc)(;1xE7UxaF8!A=OK6e#F zm+Cg7RZn<$EX9nOoti_7CX$)0<|~o&E#f^MtD2QVeCi zkQtASs=8LS4NXn51a^aSK!uIFaN67=rsdi!)SsIQ{4;KeLzl-HtNm| zyu3!Z&{qTHWl8|$xvRjfXJOz(@@b~AE0URJZ%w-tpF!)to&-K`XTfT(&-7BLfNJzx z!O}Z*Xydv+Liy+fbo~7lrf=;#H0SO~aI#(!HHMghJjrBwW)?;(Yxq@Z8rfi}@zxrz3}(BW>8~$D;CJEgk#DpL|xP;bnW>RO4A|~br%H*1!G}&lTI!wWyQeV?47Xk zfjVlCi$E`e197jb3Cz`WL>1>7&_R!-xO?0M)-Bv1*!(>K@AzkfRa`$pJeUhxM%4u} zoBNn;P46+=NdRB7i;%hfQ!7IJxB9qujQ1x0P{*_pZ!gm{s z3^dQcb0azE!YeICV5S5uqyON{=O^IYy)kfhSQGrkvx0s%9P#;SVQ?a7BX|&F4$oIA zBGm>Ryg5N!Ps2|D7G^-Cd|r_myH|mKeQN~=eMS^Vf@7pjO`$-J=i7s~I}!}S5ZI5Olltecq%Z*`9$e^nK1a6A_4x=0Fs=gVVJ zog3D8*@f78s^C^}t|0mDYSin8M4+li@12TdektVoi>N=R5W&p3{##IU`<&kN>I{IB%APiHfrq`<)(o5H?(>ioaWeXuo zlV!2YGky$nHbf5G*Jp$eJ|!~_&IQctnYu!s59PE(#4n*`_GZR$mOC9|^@ve9G7IF| z^w6!oI?QwbCTjPUF5y2xIQ`;o871$N$kaB)3d17VOjtoNGgn%JmN?C#-!1t^?T$W6 zcm0TC+z*;CQbRpVn&2b-^+EdR=~!mP zZ4YK)^i83E=?um>*`J=0IhEX(8%Mur3ZRdySx3h??;&5~D8_ct7h(Sa0o{`QnR%^n z1f2?aO^@fwmUnbMqTeo@3o`b6q&?pk(Xof6k>{sM;feVLRS)$^ba8w)9avAvY z;JVY~ao1neyoc8r=QVy*mkq;gF3F)JI&!Fn?UGc4^H1TaX)mcG8y3@ly5G?=)*olI zP2W;#<+6zG(_n zUC;yQb)UxH%^RWqt03UKUy3otMnFf}3lKpj)W0qYdbSXFX|*cOJ}ie;G%Ay>c^v3E zmWY3eDbZ;XQQU zE(%IHhhVTpPDD0eK*Tb6*tnX(CP_l5_TU=ho^%)nygLtKj*Y^{K|y$d_H<~mDH-LC zTBFXJ7oqK&V<;&g9dGYTz=Qh&vFiyn_`FvZTMyhtJB-hupmI6bBOGI_M1R5dIZA+& z`U>vD#lUl5Kd5$z2d&P-OzB<$3fdNmEOdi`qts>Gv}y#*yH$#->l2W+iwg3}bVmc* z`h~j+Z=o*LLGBxyVdVe_5E^a zd|((qOkYpe9*M&d+ufMGQ!8=w#4%WwRE3Tyb_sV)ehj2e$6}51AHc(gqwvDU70|Wp zGw_=<1%KUB3ZzfT!=1%90K44=Mdv#UFV22L#prkf*Y za7H2RD?sr#Z$M-nMSp5lfP(2Us^zc+*cmm>_*2>Vc;Rbc{46`2o_t1! zFY@uMwo70a_(3IWZh?=M>w~d<&iLGc!{B5|5tgpD!7dH&#aPaHtolgeYrw6kVAQ|$U2o|;3~9MYCmfII|UXi$l!YRFW_-N0bcKyfon`|fJoIY z6nw)1Hi@y^6Hi8fP2dfB`>-@_N`6isA8`jGvW+-5VIh*qcgIij1>lXza-4H+KEBcU z6ie2RK>fZIBDhykaCuFkNPDXj{*tMWm-_1B8KSF5=TsHin3RWS_vXQRvrahBQ%x<~ zHy8INN{F(z%@I_)S>V557VbQ;87s}d2v^$I3LNzdu=0i{c=W_axV>CS&~e9CU|#bY zy`MRRE$Ut1q(muEF55@&v@A~$!!3dPdSykw)|?hhU!ElDC^v#9?B`v@GdSw>xPm<_NbjX6^Y^V(p;P$+Jil}8H)nK2kGyMFCceR6PwrUgdFQ}+_3q8 zD41V{=l_g?VbeB>HhSz4d>!-!j**K6d#;8u?aQ|dPHz2(rMecumXdx%Xvm4@4G3=6 zZ^o}Sbm7d!rI@9YgYO+LgO@k^;+iw3u=gW?1JA{SNeiB#O%BmOZ{K9_&Ba3SxIGps zK5&EbNu=nG5(D2Yb%##AQUc>+BD_!|S+J!31AegomtaV!3Evs+1gCsHf?Hfgc)b5T z+-2JVn{F%{C$152F!|D~#AzM=**!>T0V%-4>CDQQetzR(AZx}C}BVFw}0|?@_ zMMD3bp`ugwRYdahuZpZM-2$_|9}(!rKfn^xYhdU1^PZqP=jb z=&Pxs!1=C@NGV}Cj#GP#CuW_4S|=HJpy)ODXAR{s_x5I(5?m9^lOYCPJo_7Zp~ zd7Zv=avK=e1!z%kEw~cmf~McngwBUf0E0c(nc7Mt>0BiDY8q|`s|CX|OhNBbIh@}RfmnVs z@Y=&?=3dkv% z4oi|!5e!~IeO0pue^uO(JzWEyY(0r){|rR)fA^vDo3v2o{+%Fd;sUC#j1UeeVtXX$-G7R|E%FjT>%c1oDP-y zR76@FOB@IzkSiCU-A1tp{RFt(E|~sUt_IKVIR$b*HK-SL6Z%{2!7|D>5&D$~G0`+V^FP#L(wPozIjNfZh>w}m=~ zt>}jzC~*JQO<`NL8qk^MO*J{k0J+{*jJ8-n3r=|gEwyk)Wx+}&OtS&?YowrTP65!E zH5rXo4+*d12gtZ$FQYTC7R-1SiiX3 zj?xl9!QKM--rh=QR+1=0KM{P|90cY<7Y2CF2OkTgk)P*0G>3^~ey`4;k2XosMx&>| z2Zsk}1>Mdpk5fkv3ep+RVGX9lNfXV}QU#ea4FG%IZaT0?5_?Akfi+XwY0|0{-Sp%` zgRea7IQulb+LVQx&aA>&lXWri*a(&13Is_;bKw$VkS>m%jg+PZ03TrxT6`c5?fI}C zH<#{#^`7_9>5DJWp*eZLF~%FDHw?pTe;I^=8iR}Mrb2_4m+)oRDG>Zxf`6=G zqxpBQ(S@UZxKc+69(b_@{_WTX7wuaFuPu_s?KbK_DOC^F_6`aCR+lrozKC&%t^2T_ zD4AAjT7&oRSHp^;546&nIy_T%9ya=H0^b^JgBPPFNY8I^f`rs`{AYKhz_v9X966u} z@Lz^rBsm#n7B#_$6Bl8(w5{k0bpfAeXTWRtFzn&6n0-qzx={kcX!S0#*qVLm6VdOv~Hwl)dqa%l@aiNdIeo3H5iZE_sD|J`2y0W9rjFg!#j>K zV7h1%*jJZ^{LWrN5f|F%sz;SLOfC{=Rh-6Gx;0TrLjbdQwFd`YGoj_S{lK;V?9F0{ckfh{hR3no=SpoH;U$d_QH3~L*Ut~ zLtwXaB)lW4r2Q%l@UZbYV5%>V``@`hBijM^r$qvuo)m`mM`praI@3 zRRn~71+I|yfKPNUiq!+!Af505hpnS=qw+nVpRpSHWT`{AKM7oMXh7*Q9l*Zd38qFz zfO}hQm}9a^^rscY^cFo0q`apAH#=~#=wuV3@gwT$5obEgd{mepqRsd^Ev~XO+m0j- zCenOkXJq8pO8K7*6Pf`TO09RiLgg5%vTN@n=JDBgbk2H`-ma`y<#a@rnZ5NCLvF_O z=Okt3R-q%a*6FEmBBWNx+mjEPY$_==-&xc@zoqouSF7pTJ$L9?=r=0* zh&D3x|4!dDPXnIDB+Hq0A87TAP#Bd+F2W)=Sy^C{3r`1YeM;@y5tZ`SW*u3bXZ zkLBBF-1>qs*Ae}*^K#(P^1;` zkvZU=%WR!Wfg;__^by~q$Rt_@cs2)t&1w#`TdpfoBG%IG-A9?0q|3}!t}|omx(_@F zR)K1>pEJ>Th`gXA!*rYp0O?t$fI;ALN-{o#p1VsDoRP>yYZS)7(3jhQx3`H2wBCch zZN34%=t$sq`umxH6;r|ASr-t0Pc7Q8yOd_XUX7k#Wzo0O+UQ$u1t<#Flihue;Lz~_ z;CJK!2pRgp*yRd9k6Hrw^4A-Pw9g@@Wr}n_T@bPwn8A3=PeEDRxJ(AQgQ>{v#wYz$ z;kr*2FyUr7tSDTGZB|$alI3sU9LckSx9z_HnczS+TQt!LM#n*PT|fLyo`tw^n&^$P z7gotx2Zyacf?tN$VbM@EetqH*IA$mZ3*W{-^-J;CbNq_v-^MO@amrTsz*8W)`KS=j zeX~jw+?@=--N}N@a@+BmQX`If1Rs zUYHOcDwx|kjD@fwJ3tJhAtpU};t;kd}}V?2&gbNo%E1>!1Nz4z9EMRZFL9g@3p}sxaLbc4TWM0*8 zq(grz(acm4C81}7YF_T8n+JNql-uu_hNFwfStk3y=c$GCuZgRmW>A&6zuy$qrpM5q zrra0KR}TcsW=w|TzfLiI7RKnU%o%VxzaJ${)e&AeKLb^}e_{Bm%TS-y8o*B(VA8QY zGbL(1@Os$IltyOLRQf8g{EP$e4ORj6Q)Z&Y3HG3B)?NBifD!n&Hi$Vt`3}?KW6fB# z-=$YZ_MkEO??Mwr7X(k;z8!fP2 zI29;eQi5w+GvW1Z_uz^ybeYQwg>xe;zmotRj!>e;1%I&lm7}8T4|f5rG6jL*+zYt6tq)G>@DO!=YZJ-;Gsn;R zp261VL88v;96{Rjx9IfjIfABNRgetK7fAkIC^~Bv1HX9ZqTgw`cvp{$z-?_N?t85# z@@#Yvb<`+`+WQ~C4XG1&g;)=;a8W6C2eHtzQWI(wO@h8A3vu!6YQZzD=b&@#U-;PI zGk9xz3-whjLT7!n@t2ASptx7eqg|7LS!y--M}7^OA;gUITzh(~frn4Yt$+RnNot-3ck!GI<58vi5+CSk9aDZZ778t$|IQ#z6j+ zI&`X@fflaHKjMw zs)BPk(EkJ4sWKZ4q|U=$4Q1%pLtUs99u9pYEx{#rT$MGch|9b$(66Gx@ztenV0NPx z++p(ze6(X>nO+Gf?>+~;O>6{~PgLpMicgU7@Jgic`W$$X5`ilg&PNl_2wH740>|Rd z!H__G3_lv7DU)Z?Le0qlm?}bne-DZlCc$$@)bRnQC(x?w9zALKY@l+uhYq)TOh;GB z0=ne|kX4w269*>YIq_!9$fmEvgMM|mrP~!dI@Ex%aW-JSF9KihW7OUwDVpei1w?!i zm{-ce@3k$kc7{K0pf!QSgZrS(FcJ;O*ui&KcH_u|4!k`Op=jdic=HbME}mnQGumKh!6W)m-3P#L$w3Qm9RixY+4Pq&eJ~|| zl)3Et1AKlmgx0lN!~GXefS$@Q@SvbaDAM>t$-AWi&XP{LHU9$A_+W!x>G^?((di(( zSqN59wakqb@97^gx9NX-fxS#;1o0^gMFq7VHs1@&juaA%<@+Bv)oubt9X^)hxE z40v%At!x6gTrAI!-_ zW1(L`V?r`Ew)g_8jKk5A!zS=Vy9SVGZ-)1Fka*L9)j%VCwJ=>!iBzgWp$%IVkM+pm zqFGyU?b|TxrT@|^(^Ce~`|C!quXU0rVNV#8*H|lB^L`YWaL>^8hb(E3FJx379mP9TrEuO~ zA^dX248A>*hgSSsf`UC9aFRzgP!3K-uO>ZXvhJ?HL7xu5>QWa$v40M>G8{n@uOVpoHLCbgW)vFt+6wL7J;m$)hQWHr5~zBt0lJDM5PX-jcyg0t;CFJ+*5qH`4*ZS^^w{9B~*Avw~D!zz8j?e zYNlod^T4z>mdK;i1^LMRW-4avL-YG4n8RLq^tbv*+Rr}_8P_caG8tp^=a~b{8A2L; z>0>GdEsXVe7V}C|3!J~x1A>b^;nH6T_*kKshrHwj$ZNqOZ?&tit0YMO+l;%djRcxjf8e2$`Z(`hJ$|luAMcg17VMU?#D_h@@$SDuBr!XL z8O-`Bc(OwZITpOdgBxW93N}iDMR331YS#qQY@#bF&q##t_kY1wP4a@|figjTb{EtV zz(=F`0``xsAiPx7t{)QHVQX70zlXzyUW?;dWSrJtjF~ zqe(%y*4h|0ZYTf;%LD0v$sE)a?;{APc?&x1}r&x`Z^oPEyO`(D@my{;>Z?4tfdbDqA&`oD&-%u#>h`F0jc9ru70 z6Wk_RP0^$<*OnbWw-I?aEoXy0>hL2SWj?qk7UlhSk%$?2u=A98cG2l&Xk1GTsjXbY zE|1FM3%6&(9c5Gbju>}xa`!pDGe?%a6flPjn>Pa7o`L*~=JQtsWyoiOI=gYh6xO^* zoOPd^OF}k$L+>}oq3HpsXx^UL#MfsU$uV4m*Vc~0C2@uP+U;#vGrfZdd=HX8PwE+s z9tn6>%Me~#H4mP*+<`7`O5uvsyeb5O(c5AE|Ii%j;Ii;5pX+A5J zH?9pZ;UO+4Mrtz-llB3B;y>Yi=W7TTb^@kOJ^@&Lkj8 zw6{LT0ZU|Icg;9xb^R=EziJBuYpHgJ3(=RBMiN9iTl{Fn{y2f0Qr6qobNvx zI2X17ucv2#@Y)Z`-A4!B57fm2aqpP0?BCqVze_6??|j9DdCLI8?hzhzpoFTwA25+m zHSv6UCirnV6vSIaf#RO^RNZ)=T(-G@B}*@G>8S~zJ1`3DEtAE* z2I|5ltEqTJ_*!IymC5)deJK6r6#i3l3qIHiflOWw^e086cJMZ5vLR16N%=VwJrcn{ zcaiYGYeJiyzd*j|nM^e7fuvR~=V*2czn^gf$7sFdPH)Kt-{h4^o1+$*a@!MLwpm8B zpKgLmEi`&OstrH$!hHmQZHzB7$zf6~CKZ6(m(em}CiT@U45vm)VoO6Yj|8a%dH1HQax zfHp>+$JT9eD8O8WSl6YYxKVewzvn3IklO+8{W8M6L;q;IR1+i(Z{Zwn3aOA#!~E}d zsPm}?R;2DFL2p*U{V7*q0TRQ*mj-~#Pjy~j=Nl$_Jdx%SJ#05?BT;e8C9?VDu=URc znEhWo4u8D{MS9L80kfVGx_v6~zpsG&R`TR#MJd|f@*K{Ixq!%AIe0qnDH@&bPYTdn z^oJ-|D_n>Q86>3tZs?F%PQ9$J#i9-Bzsach+2)5jfHrb^})RpG7VA#_;QiEW#9 z2%8t)VzjOh^xWJS*=yBs&!411qZw(8wf|0GnrJ`Vmn6i2qcKn~w+l;U=%b+4wL(4l z+3@@_(Yf7iFG#E^<37E0fkBzA!mtk$aLA29PQTg;o{oxxh5bWV&#V-{HZM4QWd-v> zZXf=9fFcvx!oVx})v#A%HK(@c0bai7Bt9zHTN!lvB6sUx;dk@6#>0*v3UZ9t3 zlL3OR}Sb?;oI>0Tjf?0b1H1qfF8RnVt9%03&aBhxL7<qMc+0IChjV+u<^YE()sWb z4HhgXYo>U!bqDT{fTCzN3IXJKOcfSftborm6v5V(2xKQ>Ki~fzC9lQ^i8L2+FWXho zpSKmr`p!iD9yb|9FIs65_mB?E1(tNKi zKw17~#BiY{$+ue1Y7CtuuiaGnCC?Q24+$U0gE>!7>7P1se_tHgZy!v$OkTnu{se42 zej19u^uxYCQrUj!fh2U_BfB5Zuta1V-1uh%lxI{xvv4C~b=()pP56cnR{V#2Q%^!K zIbG;tPyj?223NAb0Lo?GgGpI~_;F7I&alyh)MH2ZHBu5CTRV=V`Uk*y1&3klT}dSL zNx=2WeI)mRG+NU03959sk{=ctBu4Tg(RE7Vj4tVjX1^G`Xe<}5+nz#h-s>Z0XTQf? zX)*}6C6G@xZD_LZUc7e19o0HUz~21(h*<~8$XXLPyzex3QtLkMpMM55y?KR&rBjHb z>w2<#g)wm*Y=@!k;w0)}I)0fZ#9VqB{4X#FxGFJFFFBkuDr`Vv&o96UzhzN#^AyzE z)`~MCmcp0TpP`*(G554s9Jv&>;OCuD&`BW!UiHov?P^ot6U+15;b{igEDpoXC#AqU zXEn??WHLJsx56>{KRBy9irhCm3Dt)jVXj6rHz9NxIJ-zUTu>`{(B;Y+Ug?Br&kO4q^e_{{@=#FTd?MNr0}|hE*Q{{#;;^WJY=6W zJ~SqU%>VlZatp$NO4<=1DWwFy4^+arQIeq0K^Lo>OcLJPkq^A|t^#F?mq1x25%lXq zC@!vq*Nkq24+WXnt5*$A?HGqb3>N|^wj5vU6bB1_+`$)H7F4!<%Y}CizvB8o`r)*f zGQ=n@Q{-vz;tnLNhPOA$;tyYw;Cz`joHfOi*>IKv2W6i_pLx;Trj?6v&H_U?YgZAz z^(zjZFN?-!pdYtoY70JjK^MzP3~+r4L)_AUji9#qyzrnU5WVjk@Wa;^pyb0z+;K=9 znLoe1$~xkx1c+tI$8^@rGo?HB-Il0*Si8e1DFcr?TI?dT{{3L82kOOO8ycK?%*2{^S z;f0G1R&etVYcl~Es$g*GQ%0wJV|kcl0p}sb3lA@rhL_*TGc!XWQ~WxgR{V28XjZ1p zy(qE<-(ub}hi|5GIpe*A0rY)Pw{Qb@&q@u{OWgs(v44eMn$Ck+cU8cgg~z$S?yo^=*-21!?$&sMUWvfX)Z z&8dyR;8hr~4Xk1eRo2jU&a0RMpJbUyv9E=AI2?SsXv689+z-^(#8-~0z2W@i%bBnF zGv;i!R`*9IA5N<5;exV4ttUn-CcOW zk+ZnZHUNi@Q{$_xCh*=s|2vtWD??%;+w*s zlU2r@aQ&Sp;Qg~IF!Rf2*mGSIF^Wdm|Mn%67O99^=g4ERU$3!$@l;rTG6sDMR3dMr zbconq=OOht*FsSX2hpy>W;Y}j~QZvn-*acS*W059js-p{xYpR$4i6bcRp(fV;ZiSD$ zVBm@2o6xyP0W=N9V86IsXqICuzVPHT=byJrq<1V~-klPO$E(L7nKhreX6#8Cj+H>? z`Y^KS^Gz&%{SwkuZ@|Isfc!af26b?=iQd>T~g3P#PJ%)C;clU_8Wpizb`PCA*bXdd#xy;6S zWsZ=ZE{0>C-i2pZ?}zCro8VNFEx`TZa%}ss7(T4{$625v_;&Cob9!3=o?ht>U1#-y zJ(m&rd_4<^%s70aXv}^ zs7b0@&cn4qORWkTu7R2((h2UDA^2^^mUH>7+4Z)a+gFLsDH zU)%}l7ZP}0Zz0_Ntr_e!O~6d=RnBu zQN-dDp8pSXi|z`4xf&Pn`!l#cZ zXt?JzBQsdbtTYW59v4&LCQL{H_omq~mLmq7!fA8P-_{CDO*#wyOWp;Z7|h{30^T#q z@5>lX$^aBSs}-(Rx4}i5cYy8?2zI1i<7`U5RPIR;$C?*U2ouZ`!LfbD951Y;%LJv| zwY3^pQ9GG4w0O@f*@iiTVo$gw_pET&%ag#lDoGf={X00b>LfRR?mA(wU3O*J!X9Ds ziRYj}`z+_UJCti+Qn-YthMe|+`^-p;F(;R4z>yI(u1I1!_>vt#=LHQ_JoA1bObpb7 zen#HHB_})pz3~-t?B&R4lrq`1TZ(U^hS0cKr}@riDb^?XHQ(rcnk3yV6z#|o@gv`l z`0?d(61e|9@hUcCt%sf=gFPx_)17LO2UM9)_E2MGZyJ#Pn#Dv$`2lHkdWXCg4YTVP zXz{x;JBduhR@PqKiv5|9#%g~M!)*0D-m0_}%{=#n$k<(EYcBP$>uwjKN%e~Scj_{m zxv7O$xF3j=dz|>!$KuF3g%frR=*Q4lVar}f=<5+I`bk=3-3HE@R z5gREnp1&|rhAr$kgFYX96eJ+}TACa^*llgCE;w-meFUm+T=F`7q5|6Cu ze9Hz~SR))l_L(lkImiYaJ3NtBNMFsjz9_>#RIZ^)10~*YmnA!VzJjcOo`o7rBT?o{ zCpI4^6ScQlSZ~x7t^r9T6e+Tw#&?j@U!uw29#wuq@mZq!GYB2r8ATdz(jq+rz;Bd4$)X`&rOIrui;X6Nz!=P4*9LW0sKuJ zVUoEL%s=}G&bYDwCT=bO$3kDjjCI|>`0rPkT5=6~J^hBI=8Q2VRg=(^!2}|+z7h&A zpClh6hq3Cr8Z6UL2M0IDqp0u=c>GN>_WD2wF-us=jwn5YPrBB@w9RKBsR+RLW;>z6 zXAWqq_*2r5dyxE+R72gyis;9QvuNq!a#GUW4Y#?uq3^XZBF*3m-ZeL!KRM$Iv^jPb zDYrF|kTgP?t{C#}_w8Y@LN_w2TuN@bB_mTo7g@XS5Z{v#P6D^Yp!20Ocmvh#>^YB1 zaJ1tOdf7>Uq4HdIM6Qo)%-Dc-HNMB2dW?wk_Nna2A2Z2mSWm7`X(Oki^GNZ-Y_vrA z9Ql5IF=aJVMs-`XyQ)>#q0ix`hu^6-4*bUt!pX!gmeLKpby1W9(3o;^&rYzBHUk*3>--n*m zZhU=-EE<2ojAZuQB!jE6aqTWCbS85jS$HcLi5+<(EVi~r-AEI|yS?TNhoT(b7782X(15sw%1!OHrnc&hkKD7Q0PxLRBl zKK@wF_#M53gbzX>yEp^h8>)hHOJnfuT@OK_WhwV7?g-qIatQaghr?;7W{|ZqcG&%R zH|MQ51sm-%5#E?`8aqGq<77JL!W%PAfwLZ}xbWCVF1M(Z`(ZVu@^tV7Zt*NF?&!!g zJR%th&nd{EbdmRxEX9Yo5l3RDx*7T8YoKi zte9l81AoUO@aleBGXC2dxHBjaH)@Hza<=kh@M9p99hWB9@W%wJIq!naVt;V*-*z}K z>;_*){DJp#7&xDi!_(|m!Ea6Bpntt2{B3B*3Dx)GS6?=8Vb%L!lcXV$zjXtwwta%Z zS}7tEDU16`OyMu{*KmbiI<_%VV5=ViV*OZ#O&=cuZbS`(w_?VeX_F3^EIS98iO-*w!=xP6H!oC5LtBD3T_EhKu5}Fia5tMFlq;Q)zW-85z8a3B0tjd z#GDM5WDtw|&y0Vn2RdEw0pBjSMU$80km?3we(Pj)V$$^(ZkW(OEVVS*U6D!9ck~W= zqhbM8nf^j~hNnnf(@!{7G70WX9aOSWO?D#wvJUnTJWiRtIrOJc*PJN8sNhdBzP=qme zi&?ug0REcY&ha-*Fsl)U6BZeOSmSk|JXst&j48r5nHD(NZ4u5oJj%_we-BvN=Ha^K z`{1bwUznRR3~by}Cd%iDfNZ=Xe!b@glM_9S`QR1GJreEGe=bkt)XzQRUjN>MrAB0N z8+#FGe4WCXp1#3V>=w`gM#^CMMtiPB^#G`!@lCi!K>#N%R)SF(Qgm}?Fr1Nf1?=iO ziqHJqg+uluT(abv7v_=_C zI4cecE)HPdvTY*vyq^0gF&R#}z6SY6Wxxi%5-w3s6U~%!BS$yMR31>ZBQLTZ0z2IW zWZ14Bu1Pb;UGKLF>1nuo;;H z!Bz+ExvCc2weJM8$YBd1SIRlz{v}wm=sF(w+77L3ZQ+H`aY+5yE8LB;k=g|da$x;G zB-cA1>SaWL3l9V^>fJ85XUYM%>+?T|PU(=DWeP;1ELON^%>iiOi%^i=N^-k80&32k z2fNeXz$U9c>=LcUpYU#k>+mO7QoNRYAF?I`ZN((rW)8QlTo?Vmd=(EI6nQSx2~pM` z&&#Y%$0wT%QS8|Y^3!t!CI6MeW)~^clk^on{Mw5ir>!I^MK@si4>SB`O$C;Ec>~F- zzr!g;TZ#GY6w>Xol+@kofzvND(rs=*a5|hT{rLx7*7yNt z*-b=O?)t&-)+DShv5^~>@e(T*s-TmdH*tT}T_|6kO;+Exz~leE=X!Kj;#6jV(DRcQ z{-+dy>x_@X-JlQ(#b$74rN45Yn`?3A#{t;jAck{sJ{O%T76sl>-f~B)=j5D8%?MG5L5M zx{x>6AVf6tNS`C#sYhVa$?MFg&7YZfvv$JS=hl*~xi4XYX(_CmaJ8(+$2$284)|mTeLhB$2Dv@JH1;R9d@X}NPm`{V*BEVEajM z^}zvndm@Ej2KdtR*Gj@GVi0~=GYLmbcObO^q0nCKJ+Ns{#gB4Ru!iywo>Vzecz7Tg z%EhYVl982gD0V!N%5x|EC)M$%7+*N5H;OOKvP2tMRpGyzdGO|;8E~$P0dfzB!ZQnc z0s5sZ+|*!*uSXBS*idU6FVc?t1DdgKL^7T_aRwZ}{yZL!+qoBCov`K>3)m~_bv=_m z9=h)PDEx8G6p2rb0JHasm@rv4I9mMx?kN0)=a*WN#mS-E<=fSyv*sm423v{kT@}10 zFSE z=eBW4XGYOd*=;bsdNY1YIDcX)Ww$Q z%=2RGX_broy~|1OgNxWR@CMoZd{AgO+(&}vpGBR12k`5QQ<1saQ~aSQfhgRbjK0^H zqn0*DDA^wcYmZxz@6*HKNQEqV`>_lT9F7pG@yRf+qzNh%?FAABUy0gQk%qOmAJ0DG ziPH}*g9$ggU`U$}%5ak<5>B_#po%MLJEX>5*L8(gmiu$Z;uz@CJP-DXxW)LiMd;-- ziil5gB|bkF;Xbu!n0#{-+D$)6s((hq%fB0-&j1I{e`v$Haz^}{jf%+6zzALW>q#o_ zB$8#)k4Rl$De!*fi$)ty;y>Hxpbvk}kd@Ds_^S_fNW}1NBpR|w@9X~%cQFz7B_|*! ze}9;q4V|$gFV~ANM>%MqecpYaB~VkUR1)@5N=|1vtmb z4Hw*RgF){TXjR)xXgSLg&)z9I_gqneg*CM>sc|C?-rNr6`1e%qEYZUCD)nGXhBCf+ zQ5pv6x?r6%roxKz|8X>!}4ft0Y1U7JWXM(tu>v!RP(aw8S!2owm!wPmb z#L}vwIpv1zRpE7YQ~1g0Aj}?|3D?cC;vOw22f@8#fUEspnKjoBzxXl<4!!fh-@85v zb=u5Cy3K4(^TJ24FJlQQeslu1pJtFr>jiXaX$kQxKEht&>PglKkxyp4DO&S>JZd@e z5k`iI(AO2aQODR*#0RSK<3Bc#Fe7=itke_Tdi@%0QPSXrkG6<*8ScnW(F`?rY$1qw z$=fF`K^G$)qbVkOyjbE761?I#A71nq>Luu~`<6wLjD5rCRgf3oJ#w4((RoNrv|gYX zpGbbi`w}+yS{GiH>cPtXHvw%}n98mmKbbH2cp4p3o=B!GN+v(AA7$@Q5&3NhE zEdPE~jz9Cf6Yb3`C)Fi4NP^~3k{+`ju`#D$_E`mVs7{0QT`6VLEo4xn9*@?KD&SBQ z$c)%_(9U}vfuVr!-2N0pc>e8qF7@bj7;{h&wECIRLw6Q}Ym%FV%Ebr3O)MuY(6z;X z_q*V=D1-L6Hl8{snv-^VoZt@Zv;_z1bh%QaB9OW50k>0hp6P55hssXo;O5{eJmhVH zbxwNYbAxx8(53)r-6JbB(RdCPMycb*yhgm%{uuY)%_49y(H9SCq```XT6ov$#qdQy z7)XgKhME3HxFE_Gv?lb?DybzLwhM$!3vO}p+if|^7#IAsZ;(q{HUI+4*I+M&0AYZx z1Lun#0@?FwFz=`ph#$`8R>rr&hcU%m&S7U5>w5vMH+&Dqc{r1Tqg}*U-x3)qCh)G~ zpQC~|S^WICgSgK}8fQ(P$Hguopu9Mg%!{igPmXA@TkOTyD-jZ8<4jeeF}s93nmvr% z9~IzoQbtZq3?ngZNicI`59{i-h5TK4g`9S?WtrcjDEp@$Tm8NQuivA>i-|moZe6Y9 z>tT_{_hb*7_d6V^kq5*nEQHOjzrj;^GYI>}jsF)W+Czlo@>OkC?2g05#O3F573sScw2$-l^HFp7BNR1{hD1I6|)AYImhAob-qPR=HUTNN$j z^;X^>0g@S{=F(L>0?u=R;}y_}dWe)3CF4$0D^|TXgy?T|V!!kS!@6(tK;hUHI8j3q zvi7r4%2yHh_U;jJ^q)^;*l%#MuRD5BYk_X<zpIfr7sV1)c7#YzJ_4p!iWs}I24te}JbJR?9qH%y@!3z^iDRD|Ix%Ak@5#Bb zHzpY3375rK_&Ex)_h+(qFFhkm6BnTyf(D#xVoY||tFi`H)yQGlpXA6nj?~XSLl)Qx zQ0KN>a_!(|!uanY8nFSWrcT8-crGg(-5{dMKb zy&`|``-!+_>=K;hT+b<`{pR}4#8;M7MG6z&YNDkIyTO67x7@Lg3t(!kKJ2tu31f<{ z3v<-Fgb6dez}(HJa8lJ8$j|79ORkIo;46)rgDRn0_Emgd#Hv18>Vv0-9ODjG-N$YT zhvA_%1-wnm3Z5+41eS{!c-dG#&TKRU=D%G9wVtnn(p$fBy8Yk5fBx2RzC{TbQRv{7 z&6S1*|NZ99SW0sF59@HyGY4E&t_=c{>Pe2>60~wv4V*f>3k}C^BApj!v(1;zlYd*n zSl!;a=#89|D34Pe#v19uj;mMEj!(PLsvl#-QX`NokrHKzbtoYx`Et~{`2zW%xCAzP zjzjkHF-YRGA88Y&@}-+=kn#H{lm^WB0zoF(eaVx5vhpZY745AqpkOkgauND%V#(XS z&*guNSrCT<1xRXeHm_?J%a-k`#rgsAtixx7dh-skAy@yAk28&sznu_He^^Y~o|>@x zCKQqh_r&?2C0e}ymNK%YCJwp1x=ntyE+gizFOnJayJ2Rq1=`Uq0S(gK$iC;X?4_VG zXxje+`BoUCgS!Q!@2(H-$rsIF=d_W<_TS_}hbyWPO7O3Z&R{w9r`+k;o1oW+m3Ybb zQS9@39q~f(D5t6oex36K7t1L^8MTST^1cr84VlToL!tPQQ8&Joc@=8*YqGbA4e?IY zBK6x#(CLLXXx-TgG_89xes-TBR^qy-K|zD0&UuXDCws9`4{k70FQ*e*)m!K*Bg=P3 zJQe!MpCMfzeaPays{ESv2sBaq6!ufJ;Qq+!qB+%-#H3{^>fsBJlVc%TdnW=NE*2xF z3)iB#2PYt#+E661Z94JOuEf3~ab(WUQ4lL`#?xjJdQvReG1*UOy&(_EJ$nEH_qX7tzm*w~Bn!bZIN%>MPJbJy#CQDIn@%B42WqGATDsM}L3KV|%b3I)9)W zhwu1^4m6tcpM~Rab?+?_-micwMiqFg_a{*C2@9fjXfFP0Akub^7n0eQ#>m6^43cmT zLSGCG(B4VjWP6q`I;wCTR$pY1Z+{Fv>qy}Z%_Urf{x~2xTaORja~rdwYp6q#75U*`$MqxbxFPl#FbUd^chH&P?Mbc*MiLNt58r z-ZrSPZ929|jsmeBW86;B?*GKl87QvTEv(#sT%@(E$J#d>ad?y?d~@9tH|dRXvsZ}D zf7RE8E)!$v%FE`Q&#FhD^yFn2BZZ*WItys5PzJXgvS!Se6mkCEC%6^O3%G-#?47CR zULfeZ3>p6_i_z7S0bto;;f^`efNp*Q-k&fTgR4DIEkPO{xjY?D>FVOzSs@qXWeCp% zG;vp-`*IPUtz6bZP4KtU4h^f%6nR*laF6#2VfjoupxoUA4F+X}t6f_0afLyU_vLhD zxajP;QfC@cy;cTDw=7<=stPXOEApR>lZTyk9A-wG=^xn&8Q=x4eO+Aheq-JP=4_f=vBHL+pKZI68fbeLm>jYtk4H0m+s=&D;KfV?L1)ZybMjN0pUPCA+HeiXWHt+L zDSTpTrtha!#oy9CL=!aMBg`8S=jJux8RHP)FI4FmVD{vC2@ii2byRL!!p+}%l35h1 zR{13>oh}<%#2IZJrN6z30U_7F3)AjJgOnLc+*8+J@RxSQ8QOp-)D4mDz$PSFc7k7L zEX_X6EanIGMY$Ai%dy+75RBd!k}iuyI8bXmzszX>32Q#0%9WxXtqXfGR&F8z*QT@g zj!KZa6YI$CVt;Zr*BL!nQp*lp$|HGt=|s9goK?$GXH)lhvB?Ia)UcOz;b=e->L{cR@h92Eb zL;<%C^ST!mkl5V0Y(T^kcHY97?1yhp$=wx$$l~HrG|6&3%5mr?TVGy-hCY|cSk`aO zJS>mD9=neaKR~1p^?+Al28jPKh5S+-BEI_jkX*+){>!tw=v;6#KeF{QcQdR7HyCE) zly!H3&+cwKDW!@8m^Yw)-#4hV{SwZ)!;r9+Tyj)m1nD1IiG5|RlH*z#c>25xaEWgz zyMT;hyuF;n7~Vp6Hi)rnX6mzQ%`N!0@FAJJCJIHb=_1eDl-LhqH`v1lIVeaaiUhm+ zvIX=pUfT3JPWwBXKc8Sk{yq)mSNn`(ucb01;@C1IY5fo03tG>c`#7QCPIdOJXtJ&_ z?m!2pTqD6!57BRjlSri@2kl#Xh+OkGLK2iXne;sc?+iG|e|O6zbw}k%wb5s|)yzvTr7eMUMIpk?Y}u~#~~5hP>f$3YQ~=kjdqtULvtEr(Z`H;xc$p- zAwRVOM=#Y!PZwa^xuG3MXN`bI!N*|MXbM)CW`d@y9DpAvQ}}PwI-s!ylKC!~xPG>f zOSX|EGV=cT#?0N&*`^lUoxmd1S0;pCD#}y!%@BU_Hz3-rr$~{hJo->Hi_bJoMlC0I z@b3_|or_G9~IL)5=Hn?2`lLcDI}5S7Xu+%jH0G3rqGnYPho6b4&wU}v5z;85s2NLNJAey*&EWjkO-64DLwJ)v z`JE+Nl$0(+kr;neRKtGUkXVlgg(Z|;(QQ{hIRELxnV?SI$ z&p-Q!vTjqN=FD%U&zHZYpJyimk1Z^-BeaR$XT&oxs}?e{LQ|kq)J|=Dx`VNhT~2Qv z45ugj`N-_PaRt1+{)ZX~vSfa=O$K8A8$p>@Hr*|~oZ0efFFjp87?}UJiLt(wOZC^E zp)8{c>8H8h8K(~unFHU{nMlJrW?_f92v+}2+qgZWdE`#Hy0lQsDVkpQjnKV^8>o(B zDYS(5F`(O+NM-oPGwZjYaiIZS=~9YFs>?(Z1LyC<#^; zyuglv=^jEsg49K7rl&fkD6L7K-ZqJvTc0KfeCHq-buFSonmMX5FGNtW*jQk#5=Y(W z+DYxzG-BG5?^E-Whp76zNZRB6Z)z&@iMG%k5#;OVQ$OlP1a6b(Q`hrP(83NUrn9?A z@b2ne>Uc(-s57dAesFXG)v-*5_VT?Z&@a(q#K+mtwi4lj$73-99qddE-wmMu1*cJ7 zZg(j6gAR0VXtF2;rg5GGb3-+WiR;LqPg|b=iy9tKF+GtK^KUyli0?7Sooi1nLaysblLEn6FwMv}^Yty2+@A`ICAEO#drM-vByH;B*;K+2#m_ zL_KyrhbA#=V}fb$XBEhF_hKAX?CHGIJoUaSm@c%fWK`rI&^v}K7)eoI&5J8b7@LV4 zoo7-;&$7Hs%_@INWsV)8kM-NrMC>=!wa|uk-{b}cJ_S)fu30lCmqr8^JG&@(8VHJW zDyb8uPPF2-bb6LbGew16XW|`W1rp~^QNyP11vm231@Tww1P7lHfytYL)SomldZCOl zJ@zn0Fsu6!J;rO%4r?N)D|%vr5a&!P$Yd$il>UVIFgK49`RA$lTd7p!nzMAjjv2FD zyjRe4R6w;UoTHXUf1#E1rZSHw#V}{4d=c!iI80v;G-ql{qL^~aBmrj}M`?#QQqlh{ zXFMjXqiJS2eYGh~AT}|Fddt<&e%?Bi|!}NcRE!4z|zXiF761;k1ME9CC zQ6;Kwg6kVa9XnQ^sI}ObUL#$^B&vtdm3=a_SmAg^+G#26eQgaBTu>qKs7#_23)87z z7td0w^i-KG`qfOaT9)9yS59WM=Fb9WUdkR*k4;q8a^IR#PzE#DTqwZ~NjXu~-{Mcpfb&Nv|WuJxK4+CIMeR0%m%NU>%f8Qow>DD+zpt?N1tItC z!hE~!UqAS6f9Tc|>Po7uAa7EV{goCwLC5YILG{(O)V`Hx?cX%Nx9_RVx7W346%=;6 zQb#tGQ1HFJeeYe-=Q});x~G~#-R#w{U#2E2n6p??uxb8jYJBZlLFXa|L0N^8J&&X9 z&q%cjrqtHhzdsZtIA-NY4di|kcpT6ZtoCLEpFQW=FHe=Ue?m*A6@t^=;?SL$*HDy$~<@RAU9rOxel2P4J{&yz&Qsf^(_$Cp4(p z{pM7o+d?YE^DLdGb&L8Duf-f!-bjyIb)IUxWK6l=8=PViQFg#NuLk~aKyl(Bj$&T#I3ng8U9sr}AV8H*}MS}8f0cH|2f`GdQ`_<1qZ zcFoW9wiO?lK5YsVfAFT0wIvwWU1oH<6b+K5+A|i4<<#?P0rf24Fs&`0!@OVomfi{9 z(TePOX4MO6=2-t_`Z^y$Pa0OE{%nY&Zp!P>-^CBnc;-o}m0e0FA4edi%$6!JpTnf} z3+V=(EV^s?MM{0se)`qW9Xj523zNKOHobAHE3jTtNKK+OskzZw)YcpRl-8~Q`uzb3 zdYtPd=If+gbUb~ET9vLxsSNkgg@w8Fz5GmS;l*}An_2-4_`|gO#bqG*V+(EN_>eBV z7RY$47GpZ@3@~p^3H8H6f^m9cO&|R2L!W$?!~}Z!05OwNs(xM{Em7FTd|##mFkN;j%%`vQ8xw{uMMw^};rZW|pp?>Lipbdc^7=F;J> z*3(tPqPB#;+o+&&1Nxto2c7yPgW5R5l3rM$C932xr5OaO7cjEtB&A?DKo`|^(o^=_2VlsBDV?p)s#C)ljOKug!S|@` z#(S8Qrbzm#q7I`c^7fRK-3IP|V`&))7iJ{d5yY2=f`)bb=t8kzCi`qCZPI@MWdBNH z>YWzR*JbC>ho4l?T^kg@r&D^&<-v`N+lfKOTd<39yRFHXuI!_`SIf``XPMKc*G|wc zwaVyopH=9vd7*UC>;y1nl@eVfh-BofM5x(k69&F4rQ#PA(>1%MGAbJ|6F5{v|542a zF0&nIqlQh?>6T#XCKOU%Q=ia}#TmMKcomayD4$-W+fRL=BdK4r?3hFRM_SUMgZghB zq`owXF|}cR^i4rN@Y5AA2OU(H5Br`oiz^p0E-7`S05Cr5Ey1nONMN=kjh?wVfN@NUpdZzo0beg9Fh3nXQrX-8Q0i~<>E#XI zm;smBj98O3BcApj6R{02MW>{gm97P}kwgk*bn5|S_H`}&B#)!pN

f13oLvy3CEELYbraJdPMOh?I+JFZK2*2tfpKBW>fpz--&Dse&ScHRS5t; zB$6Q(h}>EyQ9IqZr2EoK)YLU9BCtYSS7Dbeb~Jl7f1A$`nQi7M&a1zNdYHDGP&{!T zFF9F_RnIcPG;5M!`-~Gvz`bnr_v}b!Tz3~r!q00&uzKku;rd-J(9Z4kpySnr@V2%I z@L=hje=?%S_0`rNVCwsBrk{hqq$9g;SM&UIJD`=D;)ILVJ_7Cd=Ku7-{=ODqk{2v+ zRJCBHYFh3wp01bk$$(+V~+&uMNn zmcf^^NT5@MU>>|3>R+1-H$T}!c?<;*P?s+8W#b7(rXR1d@r^W>n4J$P-C-sC@9N9Q z=f%?KlfzOB2l}_rAFqYyF^8#r`2FkN1R5Mj)p{8tO8K@-J2A-|VsQLo;;iO5;+QE< zwBv0Hnc`GUE-kSV75vqvrxdsyIeCHiS(5 zkxw3vTtb#~FC_il>&T09rc=MRG?TYv;zi4IkKj+|TjJw=v_)235~4L`ZDeXiDsg_N zF6DBefGWf*DUZw&^oBUzGi|X7^q!1UOe^;tdG3YI%d#M|u=CgZTwg)66%9qp#-|UNG zWRw?G2EDsm%KQ2GI&jtP9RFLR9wcAO+85~eN%tD4$S()z{_>ZR^Ys-|7(Ba#Ti_;( z)1W{}jrWD`!aOsae-sLQoG3iXW%KTwfifo=L5|y_ zVa_%?%F$n%NM00+*S94zdz)cXdFF3uE`{YMsXfgXu^lB(#JTGq!`ElHGSB|^dzL+| zK~pm4;lq<8h)+N4D55_VHs3#!k*T?tJ&c>FA@6m2VV6zs(3HN1h@EIEYTe+9&a%`& zm34cO=PDBsDSt~Obx0p=zAuOB#MZ)*-df1a*Da{3IFH1)pd2JweFDm_v_nR3X`t1j zT;zIw2Qu(G5e>T%23f2wfd0;0hH^T0p&42ik&l6Y;i`2`=-Lwn=(5l})T(R^*sHe8 z^z%j|WEp=S0uH`Ld}bFyYo9HKIFsz4Bn=%QF?j)Wv*a|t!^aUk-_QDNJ6$jZmV9mF zU0K!4_4X6;Bvozrmrd<}Wv)wr)3e6P3w5R)gC~g{zC2-q=A1u?js^oR|uh4BNVpXpBZby z4Qy-$&E@`?aj4nwBdGb4rNHG*7+>r6*w~5{K_Os$-%6-7B>~c1Ig9|qA$(AY&5PA_ zx(^H6Y>U6W?}wLM+D*2GY7?U?-3Wf4h{X5Hka0^c6Z(7g@m246#IRuyq4N7TF7v!x zoLgcB{=j1!IrpnK9R4_T46%&KiP$D(2hs9onK1s zcHBnpf0T%?=&{B}pJfu&uQbV%DvG4$wyOvz_A!&L*iKxvjU|qUrW1+w-_iE<8_}Y1 zuh0RPrEtf6NqEg&8F6m|%sWySguSYmzMJEg!7*}tYa$;VBDnWAmD4b&$T=&z`ETyM{45kweVfQ{4jy6F z%w;JTn7@No$8g~*#lpismJ7Mc!;GAh;zhzENj%}CdQ;|IBk>-I#zuVxnC~<<8cRk7 z1yB2hLFo&J`Bx$*L(+%Si90R7P>=lxR(P_7`90+H9SjGWi|VqA#54s7?2M&1;{W*w zG#Zf2wD&(fy(myZ_ZG#VZse%V>Y}Kh^|ovvN<6N*UURh+}M(e+1A-(`UVlW|J3*R zgY-N+a`I!yKGVyz;;IXdg@f$$SFJh z)}-8BT(^Rs#?cojbKaT#r>9Ahu0WwrJI6l$|MtCm*?q6=z8LP`_)`A+cdERI{p?vr zVy!mt(}n}s|6dFgB|gj0*dG;dF*2vQ(OA8;?!3HfoY z2QEmT#I$3n_XM4Fdj|#;{YJ|_853I$kHe=J_h4GX;kd{A<9I=?KhEV3p;g+Cu}M=U z@uI&8IAL`Q{c`*;_Os#y;XH3Q7Jo+>zmrvnIb>O5s+T@t-AFRN*qjoqp?b* zJb0fAJpsY1VL?&SLVl054?b}~h@q9L=*Cnw{`8&&dzPWOwCY^NBQGUDfk8as^O9ix z@4sWuryJES3s)Iwp;JygLhUkxaLu1L`8r}BRa%el?UNyzH(en*-qsT{6OW3vY0RW} zBMYh9?MFmQCkv>)EsbQ%j6_nlnM)4Z4NxZ1Cn(h$QDmI#Ai3mmoT%fQs(=&P?SXvEIxDYUOv8`13dL4F$l zVfG8p-fkC84&M&C-U||{cH9IXhs+1}ud;_S?ed}V-^RvPXk1VQ5A0-fzv*w^%$fA^ z0dLwXAEChsM1c9_3GQTy{_T@@mhFe~JI)IQ@AH{)zvdeoU%mx?3wmd+0c-0If=l2Y zrvFx?M?=uIgF>p#n&C9@`UjB4CR*8iMl?4X3+HM>)e|Bic^g?!DAxvsNhDJBxj}^I zkK6b|^~ua02?=0*hG{O|dG9HMW0$bK)=_Yl|0k&XL*bY`QsA-K=zYuM`1xlG36=O$ z)G&4repSW#BdrTKg~gRk$KOTF!4LVyk&{=8YsG)_Ax!2xChKc15HpvD5%tev@Pl;< z#C+LVCjM~fJ9@wN2}<1ECP>Xt1E*o;=&grenDytKiw;;g(-zVa=L%bs=PZ!@ zGR&R#vmH9!@sw|IZ|olF?B5~o#sjRL?s4DG8L9Xx&Qbr0pWg2z@VY{-W!d;kU4A)$#U6+0vh|M zM-3wbnj4K(KQk0;v3v+_k!=9$_$NRq@oyY&Z-$$f?Zjm}*?XB&M_He3n#=k(Fu5ud z!t#BMQR$IiP|bJNU-*CP>Fp0**l5!#+!I7_&mAWT3)5`jj^}J1#zXZJ5QotW#48F# zbd!>?x#yG6T)Ane>@0iCwzL+V6w{6x>C8r+cYQ~??q#A~)~Z;`TM5Lx;s%np{05er zUXK_=&PI)T-B8!ZrAX^#JM_z!>1d2z0BY}h7*mv+2wTrlgk9C%q4!-UVm1%Yqmzmb zB0p@7q9e1$VQu6C^kB<0sNu4w>3ZF2l9~zg0_7q0Kl?w{tmLrk; zT5;`ZuyO|M-gJfOPlZQLK>F{eJQL&rcg1{==lAY2_lQojpnt0?@Y`|hdKzT=iTnKj zV*dv>wQ#0ey)|jgb>eLrO5-TX{Fv5o;6FRkm3c0}vc2t`vO}y5KPQ3Br2{UD;Zk!X zfn^@*Jk|TGk6GSqEkO0JGw{KKjT4X7E927GmP4!^iRMOQeO&9w3z6{z3Y8aeE9w{V zci2^`>^ijw#o`7`G3CTSH^l)A5CxNX+ar z=%L!@5M&vO9QYE#U%i!`d)M_}#R?s4@l9{n;S-Y7$oTjSTvhcj(KLG$2Zt2NME_J` z)Mz#y{Id?fY0tw$Z7<=!zDeT42QBf?C;7x8!)N&D6l3DTfdnFD^*(&!s+GjCjY52b zbQ;n5h$2#Q640+BdFYCP#Y7{eOddC$iOa=fc#YO=LT64n@$_pZ{(RFl#J<)9DBl%> z7B2dMWp(6Y>lD+`AH6*Mj)F9zJ*N)tu8ZWqEI$CKE4{?usvwB(@TfsRV+H0cM+RO)5r13Rtq&Pm0 zZU5=%dejxzHhchl+sdw|L#tSs2HD1NA8wEYu1Kr$ev<$*_onaC0;Eq%3hoTE@u${t z8GITW31rt(nj4L^A$kcf_wGTEX8{O3mACR+9_3;`-lm}`X9W0x+m_7SJIjX6(LrOL+wDOP~*54MGmiv@O8#DxWge*q_k$LDE{;d z@>kd>5s`n88WP93eIy=G4<^4um-hzoR=QV{PXsztRk#)9J~)L~p0^GkrIp*89c^fTXt!{_%O2P+eif7fbpAV=(DT(N zk{yc*Owe$YHU%X^9C?IEn3obS=XHIg86ElWv zGJ7B*{tftA!d>9Kn5{88B|Q_=J8UAXco@dKll!KJ!8X~z<}stW(O4z#tl*7z9)p1j zlJJYi5isCmMM-5$Ax=c!#iw_&wn3ok7{h_)a$7Hhg0L$X`u8ic)XoK&Q>i>=J-s^@ z!2%b@;0kTp#IGARRF&>>I32(lo5Q-KIk00>6EbsgJ6tx=16yqpgFZQ(gaQdXEN4?b z(!;ew7UOd0si-6*Ds~PgwedPSYeqg2a#jPq)NFx;n_We($R9v${cTWLw>4Tv5mYREbITm?OX~yB(|H0SDU-weKAQvz9J$-c z8^xNr!$$VPLsxo$=#*CI&CyMQ*y^!*YO-&TTYi|WX=okCkCSAS^wfF`qO&{qMmI6k9WKUCj@U`+WX&n`ocUOv3qkE>D#mtnGz?3RzD8~ zj|9sw;|lnrNbGD%Cz2|QiR9i<$`w9AF0GQLtnCY^Q7%fkFMmXC-!q=nZsn00cQvWu zkdLHjt29Z9Y{ukW14ylalZk5;VF+4&}WhpABQ8i5z=eBK0i( zrlXY-oVrCF*>#Kz;uH}%Gv`v_2ZO1t1_!Api-ys_o`=BTR~vGf`#JLDNEZog$-wU` z@p0=1HsVqX(P;ht2sq|U7woW@KzrVuX8Pc|&`zjhsSfE8g~IDw6Tq`B*+LfsML4fy zCA4Stf-!yYb;=U3N1M&F9aWMCk6UvLs8k3Bx5=**`0GstzbY92lj((7N{#*%foHf$rcUVU!8LytCc<8iSofvay9UHySJ7 z@fuv3a9Q~A>o#!O`Kj=oY2DBx86@?4tF$xA@FG{6tC0*J?`M5IQ$ge zm_#Cd<|^nLkt3oKHViM#cZJ7|Uke}EvPGOKX94oD84>#_M8oGIAQWuTMlNnuLHg8H zk?U{s;m$z>c^Wq!c`2`eeB|##!KwM+je8@YVw*ZzZ!Lj3FX@Hn8x24K^$|#7Sq}2( za2FEnWDXup^f5Kc&VWPWC6S0DlaR+H$&kFbj7xtp3a+@WE%cud#Rndp=2O3J2uy2P z+u*@*Qvoogh4-Pck(;Fo@;I|bxVf9`f!WrsfOeYa7(3Fh9Ur)B)7bTN!!k#1e_Ef( zr{j*iXe5IZc5rao?!EurpLRAZ1|&1vILgviOg;eL$E=^jN3k6#&{q=JC#S(n9%XHV zr?!j-)^JUMFa=3w3|Vh4o=vfG*OG&dTny?ZC`W1t7HNnsK9guE;Nk zcV{Sd1$#aX$WLJQNHmwv;hT`6t1jUAAWOlME1`U_VQdY)9-B+T<9AT#mSqss^izNY zOcLi%TE*^>9vqrO7%iwLei(lwyo*CcBTdNvqw328YU;kfOCzb2G$760@yzG!eVeF= zC`vOCnKEQ1l%au2r4%J3l_pauQ4$duLL_8Jgp`>wXL#>9=lAuzoqwL^=-&PL+`aZ% zd+)W^S{v%f>|j$;q-^6}e|Y$uE}T7iHLx1J8gz)-pv*B3X58=w#v>ZQP`gZ7-D5rY zCaxZ&2$vl4&dJGmMPmh? z^4gU29OuR9X;yy+#6R%4$SzPv>UdcNb^Wjf@!s$aiyTvpxM$h@i@|2?)e_U;sQMcI zevnUkTLmSy_l4s0wbD@@2c=uDs|&2n-2cs|!*)op=}ng;M-K99q>!=veFds%Md*xu zR;ZSbHzE(S=Hz{}x*zsZXBqNw4dlN6*SZ{KvHM-(Z34@U#mdX@M$Vfe*nqV@!Uo4N zcov!i_kM9>Voul79*+GvJ#A}R!r6G1%MRc7aADp>$|5+3Xpg*%C#me?{`NoHYjxR- zT2r!%K0nizx$1WSsh zy^N1t2av-vo=UtuR3u&X zr~cV$UHP3sN|Ec(vaS4HX4(e6KGF~EU3h)OK*X-OK|0bPglo&x@ha+8tAnW*9OmA2 zU3cLu_D2sb?w#evVx`WwCcWbujtWDUBHMFzqWekgGIepJ-4A4rq!*mj7a{Xew@iaZ( z$)EY!Ih_HKiO^=bEy(Cs1tiO*u=w~pAeC+f<0syb*WOkH&(3cHx~Wq^%eV84rTtq* zuWBwd&B_3mTekqii77y4Yyk!+9|X%0J+O?}0Xp{;!B2k_=rCdhUAAT`M1s9w^u>Na zS?<>ud{-5^n3}_Dx6I*jyOHEP$y6)t#)Hg@yAm*2xB&!5>(L6v4@oAxg>;ULA|BP} z;IH0)!dtVAi1+dsb9TIrG|3Y8NTQ@$0&%R%&k?m3QDj!B4g>t&M5BPep)leV!xcn&%rD=7VBQoW$eNr3On*g z2e}^=gl9Kz08Xp*>2Db;sFT$bxp{EB`~;8X%5rgNEds;Blqth`7l@&-ACVs4_dfBa zk|46@_y@9nmm;mN7!4@*&$#dlf1lLTL5nEkm5o%tV`?;-eIBf=pU3F5GE7kBRnS_y znd$#>0n_ax_umI((5~;b!Jvkx%*Bc0=^vpdXoCrffSIYuOdn>!{MmGp&P?c{^#iRL z@7rUUjc*9%wL&#G9;Qkyt$R-{&0h_`4_7dF>orF2j0!zQmdL0-G6H{N%0YmwE8+4q z&}#M#0X_G54}GP&lfmr(NzENVHeYd}luFj)>mIn`PUlwRkG+rLr%v$qk{OH^h%c(; zU=K4((d-p8a@YMb@^L_+x7Jtu!{{R7Rml7B%f9Gwef_7nNRs?wAd-JlOEja5=4@}-*KO#!r9$!X z0Xdx96;+g^EOvhMes27*+*quE5tBqK>+2DZkO1LYwMg{is4iw5?MzmLL{dAnOt~?- zDueeUv0RSM{mB$6%%mmmIr|u{v?5|prITv-K=PgWYo`2HFz%Sb z?}>c9tf^hW3#i9G{3y>Kp7Qz&k(7d$5uH;XNnii;l8*V4M%#uyAqz%4p~lwf(juqj zblAxn(&@Ywb^O9~=62%*YF=3>rP^If{hg*mDI9!9O7|+U{=vtHPfykp z!)==A_XcyBnftF&V=a}bOUM+O8hVt@e_TZy`a0l;7X?}!Y8pX1q|T)+mP+ZX2kwxk z*M<UNOky~#$LzbK*~ zXS7Qu*7VJ-NFVlI7!$*MO5fcwbN) z%iG>(^}Nqv<(wp;`Kvn;rdmrh{w(jqFTAlpvg(vjEMfTCbe|k6IP4?0%{h)NHx}#f zem{})dY~9g@e)2ND?|DfOX*?#n#l_plSu1T8r&X4qdu2g1F&3b&DPP;;RDIl!CuH8 zKXvp^W?vtE>~t+$a;!hu_Wlz2=q^e}oH9m>7shg9W89p0rf$GTCXo>Ug;n`7Zv{6v zQauVvEK_8L`upHPc@A>vn*(61xDec!qadq9Pr%&s3BaK08_@b%C{q@Whp*kdp#BDH z*d&j~UPQ(~{ZVOf_@SxrsLdc7t@{Q{IYKjYbN0#Fy;sT#-2I?NvNQO2=n`E2!&(;Q z4`qLMmQyNIGbGbb5wL%26wK1khUX2pG1225(`9k-)PsVR@|;!@-yTTd&kpY+mm5Fj zxGQ(=Lz}ymkb>S+WX7>}^ngw|^0!G9pY1gi^LwuNZ*Rhm>tdG5lM46dE|E z$oky0DCFj#1hL)MW@(p=>c7v=jyQ_WziEwZH4<^37j5EWjV9Gd@ISs6(YN`t&~G}b zoLqcbwo;27_ji^Xi&fcfflcwPLvzXoVkNo3c&kM=d>6Ql z3Cz)BHu+!X=F?M4N;#RbTz<_`lWkF{r)o0034^h!gt2`l*B<*@*r#;jW9q5V7rOr^ zf2P;G5Dq)t4}YQG#o6#DuVvWP&}M8x{Vl9LLqr}~^#C7e2Z+LqfuzIdNyPmLpKv19 z7x&2y!TW11BSvPr6DwN@eCwkGd~Ov&`m7#8q!XIN&tY1GpzaXf{sAXKl5`3ED0L#` zOa!^t?J#=6h!0=v8w={~`bG%W@Oo;a*H2hf(q-|m)kE|pC0np1x7YeuZ0f&S zuVF1d;>hH?g0UnY=U^0M#L4@EPm)lrKvDerx{fGFpWn+2AFn6AImTK%?^P9pK}jry#b_3{3qq4yf--ftJP;TwJUIDx2m2=dY;j zq3>02Y1chadb8cOJs8JT z8}NJ;89DvQJ5Em{GPdAT_qbxJbwz01PlyE_*@BK;G7`_0(wKViA21a`fmPOvU0sSUr0f&bc@ZzzjvgjRC;Ol}gSUYJXoMvMTJvZgS zc;n;n*}`P_w$;UEZoCiU2>qFyKWAhUGm2ynM})v7)3$=C*IvWQ?`|^t-%+v$_6^j^ zo*3!d^D%IcRtXGEc?6TzZ)7?gw3yNHxm3W}O_XhNJ8oAsk7x}3NDgS$;JA;C(mcBt(zb5vgY2S<9Vu=k%+|H+8$hmx>?QitKZy_|Gb9qk;bAQ`&51}z#r z0qI!ehB-Y=`!@!5ozs_=kMxtOE_uSq$u`}YlWEtZI&9}JW%>HG6rFMDIw$X~>qg?C zpL`H+eZJ1NB6^j>HgxCnQCV&*)^AHKykyf!X}R)T>}7H~9vk-vewZ4`s3+PoE4RPk zWIEoEkN0G`Xz63JfCs-Q%NTRg|BE?MTH(oY_@7K?kN!jXst;$(scB5>foJe(xe`%W zY{#{8W$YU|?$%~`d_x)&xuzXjhwTLB^17Vq-%r7UzdGcO0I z*Fn0pl?fY=OTrPI{ z*y27iwQLt6N2n%F54xj`(6{5iiDcTknAyA-=>qyb3chOp`+R+-H`e?7l(ej{o|E&= z625kz<%mM;sDTkSAS?&{s=b1f>HQbC(c%kwsPj|Z5Z5#^etC%G7AmXQc|q zedhAp*zTj9NPX5O>4eZ&q0)~P)Vf%yC0t+ zjIn%zytIlJKW=}G4!NxT&*u={UWA7(|1CAH)wU6smRT|WD|s?>o#D)M zjQv0tHXr5mD?t4)*Cxv)_;n@JwmeD=@h~M%*i0hOZGHEr{y(o!9oO#BI%+s`eB>0k zY{XOiwaq)OU!G>+nAfe_n9ZgpY;>fEd>i^0e>5Hv79uV3Z=?rtdfpdY@YD}KZy1VG z{!0iFniAO*B6VtDRlg|T> zpcB2mqX*juk$;aWkQv@x`1a$Uudub^Yu8;iBiCy9~^_6sb_Us>BN`EQMMXrzz$ z+4sAGlA*l6*Fle8BdwCxT_M*iiuVh3M8`__HPV_Ndg3yPmDqO-Ul0GfxI)Nc7p>yg zNGvxNEA{?NQRw@r;y*7ug^anc`evXhWB1CI znHYBtK3I?l92RDP!)f>6!N8+n;)(enaQqEM7V?4lS#J!LB_F`&<26i0I|a`8w!u}( z5n%Dyt>DK|4=}Aj0!WX8@_5hl;KsI%;7e1CtUF>hZELoKK3(4ovE8ZA%O@CkTufvJ zUg-cOZZo0xfLu6cSsIm^I@M}PLmar^cnQQ@90c8`@1}Emoye9vO;U8coD`Ou;9a#w z*x#ELWRX0Niv3-Lu2B3#`dBp4N?t=UxBwZrzDHPAyavC(_{s0Y>l=@hyY{`X>Wdy{ z+d`GzpsVq%BKpo$Y^C-Fky8J1^o-koK8Ldk+Y!--!IHSHahzX4t%}b#+q*nT=nyQ= zmuamMyDrG&?0|0D5OK>$kvKuXpS79f=ZaWtNh_bL$Z}(`66)TdWxIDGc?&DhMtnQ! za()oh+_amvnSYEFg+fkG7cAt@+E^~{c8!D`hKI=0n#sh3mL0hG^?-l#J^S0o6+%ku z?hbPFmT5HHY5=8qE6`u@hdBFxr@9c+Rq)60Dn-04#h&z?tV(QD9Zl@Dvm>qM&LI96 zsSsj^P@JrqPpqfQi3`8I2@N$zd}&DrzO8N!DV7W;VwM;a-3G&mV-t?!HivBql||mf z{MsRe!HOqjOYBp$p+AOw4&FdkjWHsx*(wrf@lgEA))Zp)&5OkMTWaKOH3zKxT9DON zSq}cfLWS6GVMeSmdW83ui?GKBywTXuV&s%=lH}5Wg{VVd6Z$Nd*Hd$^YlxY}6Olc< zB~(mAMVaeIOFo5<7cT~W;?kVHdztEcAB6KrUQcTpvjhS2yDawK@f1}&*(dn$QcVyU z_+S31XPU2gptwa4GJh;5Q^`%W13u27 zr|Gzm#nxWI%arBDV)gd;h;rPfi8GSCgzMrjNv_nG5h{7@_<)g9umJr(oXt&t$j5B6 zT%sNFi2lZ_FrVK8F!IhGB)tw%9|D`?#OPmKgS^QQ~#*H#f)M=~AS9 zx2&S0-$v8PSB#KEvQKVT@vF!wY-A4i$>u`!J9vSVPv4o50~V;-HeCjk_ty9l&pS0T`DY4mwlr<9pX{6^uIegFHF&6m7P038SYFMH>8RA`Qdrsi=n) zSZ?`mY>x$cCr*fZTKxbLvsSgg!U0pb+; zMY`BG1$i4QuhRpUGE@Awke4p(q!Od7IJ+|@jKAB9<+5~bEOV>FjS{R=z-JrDb8`YP zu6>pt``|XC2@MOmFEDo{*?DdSGjW@e{Om@L(|6BsR|*&z(bL~q(pz1!K=&h0#!4xI zNq(mdraj%nw1inRaZWqwjG7!e|IQoc;(=`D)4~;W_`F8iXm14Amn~s_ti_pvEXY9T zKQvXlfN|_}V2(bqWz13sLGw0k%E;S;I#2;Y-fnY{)ab{Yh!fILwI`WV_l>};&SeaE zTtU2WP7z;OJCkXhxskax%#{hv*QPV9?opd9`w<>9Hk0ZzqtPa15)US-kDN zaSWs0Jrl(VRMFu-Vvq%{pN0Crqp&GY4r5kN`ucl;)?{Sv4L+u`689AxoYrZ9@41Ss zdfFwv=cOrlpSzE4_Wd}Bz1A-&)VXq9_9Me(=b}<)kt9D$E`*$hh=awWhzxs z&3nf%<3K0w-MetRl*LYa!{<4$+*qt1eydT+J077TGLS{-QoKOy%4}-XrWY@0rebMP5M~a1jfGG z9J};oBga!a-4p!neg&MXKZDtW#WrgXUx$~w?PY6o3~g3BZjnj)>&X=3-Qe1WwNR|T zS@sgcWP7cQ;H9ojFsqHWDG>gI10Sl$4(l1o=%(Fp$1SOh_-rie)v}T`rDWRF;m4TV z8MRDfy0OibSPh%;*gN>g`xVIEl_Zm6&qdnh)&vmN;;HGLHv%}|63=&uZhDR z9v_3`eVotrQEgf?_g}330SbJUV=}u$q(C>FYpY_oJvP;9C#p1&ud^Zy>OfiSoX?5e zcUW#LR&;w5k@9RCX8)uLjbjS%mpUTZK!d-`uE`0^Mz1sf#7slY%CFb`%RCX^yb{v%>>eV( zEQ;=)nnHCPR-_+#52n^{zCn(z-%G|Wcc3Po-awSPtR%aqt*2BkZ=?b?EvE_}SW_0C z?CCQ~>hy=#z0~!}R7z>=7iw;K02y3=h16_Qq?`Xn;^T5(6D^fNRG{8X^3abA%3Q~u z)SYKek16P;M)gEed%kC59~MuyqQYZ{YpL;M6A?$6_G`lrU+GV55^3Up^roRMIc<`F zJWcFmP$(Xpf0FC3SG2X*{O>){6yYtQWU-p0Cb>uGg-%1T3SaT4w7&63S3*7rH9qk1 zNH3l%3quQES)94;DO#1BEr?kD*?K|MpMPiQTju+SnYR0aMfZ5$;E)Ehd@l>p619>F5lv)9Y(5@SI(p&Ta?h_~dCh=0%xwUR}am7Fe)uh=Dsk#t(5zVH+caor-m-Z2ze za{a$`?;rVDo3Oe?aCsBIM%vuNuaVx%b1O_nDT+ji@~74+_mC5M9jM&o>D*j$X)d2z z!E)*9(5J1ZTq18C_CW4B#h?~Weed8~9nMB(s#22e8_9_wJE_5ei!tYoe2u#(t3e?D zKs5MpHyacgzm@G$lNgC7Zu?ooxNwC|UQ)21;aLBK@6~4n_K9P?T^R_R~1Sh$}E=@Rd%=xq2D3 z`{!uF`}X|d|LEw;OKhxGB_O3eSP+drSf-{!;O7;p{I1pFELyCeLX zEHx$?aW&h6IZSLt;*az9dw*Vf3h~_HjlB?z)>@jfGp&XHLU(FzfW_*H_(#* z1Z1xm4b`+K!t1v0L6KFPY=zSwdcuyuOn!5uOw)ga?4r8`gg*^I)`IQu$L&se%nc)3 z8B;>!U$`X6*N%g}U70Yy<2=kt$fwNimXlR0PLY#)x`^)wa6D)IbZnAFE-^BCHTU}} z-L;WD9<8ETt-9!#jvREe!b|aiNkM4N%>#($&9i;xQ=3=65$_SZxbJP=ds^`9Vwc6O zG%c*=!)1}Wrjh`B;Mc@#pC!rbY-FYvt0uX`t%Kjyyo_eY{|!hvnlhy(6OwZ{c+ zojh};E*9--i|lCS&t=BEe=TCMx4Q82Im?a3GI{Hc{2nz;>|E9?^xmL^t$%nP)=xe_ zAx|byhy|}-H7aj79F|ML>08ia+AA{obRl-)LOdoEns8&}e=?011(9CCV(Q6*ofOQf zgPPc6Y+=9soW5t@wqnM37BkYqtIX!`v9h$zUXXBcI5cWtWHYyof@keMfl<-pz(I|f z^4O$Y*fDJibQ@(1to(O?O^pGvKTF>M!R)_45OE!tp3Vnml_v1S6g!BQDM6cCr)AxD zkI{3Duh5$ZjFru>94xEqQh`e^zF~S_EQj>mdbq5-8>${&MEMO;k!A+Bf~&q1oZIUI zy$i&Q)`@U>%l$Q!j;0kEOs>V}kk$C=ikrk^&;A^D)K^LRc8nz&-zY+V+GL;xm60f; zFdjQnI|jQ#^!!`%u>H`!!UR3HZ!Fg@V**~_uU_g3MmxJmN0|6Xi;Oa{qST^)>t55l zzT(`?c}U%Uiu?SY%;DT#e#C=?XbKvM&%AX5IWS`^XKTH*n~>^Mb?N+RmYh6Phpk0f zY^8es9($G>ixsW(2e-lVks|kOj3@}iTi*G=agW4|+qW`W?RXw%cRnwaa&59)o=OkG znSpz##WBl?;5HjVsf_n~{7s8RyzK`gYC6Hw?i6ri)pFSjo#phH;!||eD>?0e0udx$r1k- zXzR$UXyvdv&JHZ@6l0nmR?wOTEd0$#Oz%sRwD(&q zb^xlw{R-7|=k`Q;@JV@mi2PaAuE>ocmdjctU+6j^oWg#%5%v#;;{6lS`snF`%_3_0 z@aeR8%Sk%pd4G7d!3i%S4{&3nD&rBI_}HDHKmzkcOI>zpU@2H>`~hq)dk5bZDZ`|P z`$3Lq2za(x2Uvd?4J#Bh;lb)p%$8BUz)hklgKN(M_mo=D){+a#f3E;n-+q$U@^}OC z)lY%-^a7c6{$|>7!CtyHhCeGz7c9RUH+1Up7Tc7BWdl zKgIk1K0o#BV076RKk4)Ti@P>xddu~vonX5}bH*HWgI+GW$A;o$Sg!jR=?ciiGNfi) z+o|6Dr7U({4qs=E<;G%Nnmz_QhM1w$?GB`5!e(4^=|Fh(^A+0mXE42ZI)9$>CR>SX zljSnNZYneg@S)QBEhMhZT#Lu$zu?+ue+&E6#$6-_p0lPGORH#=QcY-KuZI_%tL9{C z8aR{b9CV&J`tBP8Pm!{)ZMT6zq7bU>(w2n>Z-xi8EaA+X_F$%G5=d8C3S;KdFwNf> z%xR1T`H_swpz#S9q@V!pCVU0zvv-4I^_H+OMGyWNEP&-{sWO$}C3IGNJ)Jt+Ty_mJ zkQo|01lOuQGA5Iv;T7}@y#4bkOk7P;C(=KOLriLb!j13Xv!OLS-u;4pTDFV6wrUc! z=Fng&v~U-$f*0Yl8`hBXTh?~^ z+P4-AC1+2>w%J~i>ZNHZ0}dS#B)W#tZ4V&zWdU zZ-F|dAD4`8=x+_P-n^t8MGf?nu~D3!j+VzLv+`oOjF8{gb@Egy6?JPLk?3YZjCt9& zCbP!a9aLd#9zDRijxKhPLbtV2YM+bqn&x)z8F= z8NSD6 ziw%(H%Y>UL6E9|MAy#LY5d#)B;;XavlIVjNbYW8=cH28ag3ig3c-p5Ux#xI)Z>51i zT)C}9bY}2Pp>GW$qNeo-TithvcObsvK_Pu}6X%QIgDOxZo zTM+P2QE>M6fBxP*5kBIkycR)h8ox$z^W*oYE3A@)$}&Z9;!|x=WF#MB25Se4E1ZPl zJDL3cROytweyl($}fy)_)wWiUS!31%&`C|PS=^UCH_?A8C~Xesy2P3Y!ziP z`X{wyk$^fNXlL%ys^P-VZD41^bqOF27+pR#KEw`ngZyQ5drzI1+ zOexZNsuq#CrxfYAnt}Z}5sp_o^KtQ0x0*?|mlaBes`r-+$U?;rnuX|qCCccn5`DCA za$nzIO~@xv790*Q0}L9XqxtH*^b_KenHoSk5~u~_c; ztE7b!e@MK0eS{O<<)Axu+0v-}80xESD_LT-wlkJox}P;xR`D zpp|#Z``Df6$*F|?=0d^~#i?0pBk2a8M9j+`j&p}|eAqKXgWwO1Fp|C4* zCzQO~Ejt=n0M~_$h2-bsV6k&PIAHMu4oN!(Z6+Q9iw+Kkw=N%)DLq^O$=3^@cb5xP zHZq2L4yVBxgX5vboP}`t5Ph4?oie7lVLFqH=gHFMgvsU^g~3WD2yB1<7~ZeMWru^t z%T9c8rG6Y3Cvo*13T^ea!{YQakhtc{*dMq{Ptmzd`TA*4$&wd%TjpE57WE@fjy%ux z!Ly2m=snd}$m?}?kY7>+-SgZP{h2ie2PW~@BiC*J=110^U9E3G?v#(=Y+Fi1D+a4= zM7sp`m~`7d@xq@n?Bkbj|2}_V=~(GDCSDSvw~t#3uTxj$*1e}07wjH(A9Y{qi7ot5 z%e6J8colXhKNLN$UdO#>Y9&X@tP}X*O17O zO))t}#~7E z8;Qs<#FsDMg8e>t{-2E4J}|8OC3TnQyRiS?b$TJ1y(CeT`TV2A<6?~DUUVdCvF*R} z$)_tOiAozkBFH@LjW8uuNR%jhF( zAaP?4Mfn91lkz{{#jCP84lIB6F?no9h1G1K@7FibdS8aaF!1ur<7+v%djIB)pxb7t;^x$V^Ib|ZhGpHV@^cFIj>Dr*U zr4dG<%RqX@CJ=fk6d2W7g0|)PpkjP6h!5Tkp19e{U;?C1^z*0heZ34v9#4l}{nEh1 zdFz7Gsfj@G5uBj;njbM~%$F@J7`t45=B zZvI%fSu$dEY8co5+2fkgU!@;VuZ7m!`_}K#D2u&5l#dro1WCj;XU|>byZkYx1f(q|6DF6 zfutWT=}n+jvAdwuB?zCOd4;pRi$}OJW>X$BjUT$0oMV`*YTh}ZvtA09Yb(e$|6L5% z`)R}1c>sKicLuhN^Wlj>^P%LLE+A54K=oo;rgiKcxGwty9xYXb&*p6h5wpzYIpQe% zyUYOow%;t9u9HlENIpfo9eNL&RkdYSR=>a|*$3u@-*&9y^i(r2?`%B(f z^`}Yy=BqE=QRw*4LlWH}{;X~20KSHH$V^S-S-1t}I-y6Jr^DxIq-LH&(pn3o0i*c3 zpbBSn5Ek1olfPe+<;G&Iy3->q$l8ofXbHmRiVx!x&D5c-?J_2qE~GQgxp4Z`t;W}o zWV!ra7!C94f+=myR6+)Fad|i*H)eP(RZ!4Kok@MI$)tTMJs8ceTJYE7=>&e8udTUg z-41$CpaRoAiDbrU)WPL(>p-ycQs8R%1V&yt4$>D!0R_d~%)C)`jPf8|xJ9uEyqL0q z345gkdbQs``7{mgWKROG{|o?cOq78}X$)w~Ed$S5!a&ypMwauArmHVo)AjSp;1*;j z9Cj}SRGZIax-@Em`+XZY?bS5+Q$LX`o9HV(?w$eOmtFvo>OCN^7Shig4d}D>0!rv$ zLz>ausN0)onC|>sy!tDD_q6sBf%tjPchn^!M$)o@lv*grq$}pE!xT((Q0!1&AHJ^Q z9PzO%c^t3&LGqLFQ`->nFVj7es%u#izjJfZ^7O_3)=20FU-3t+iISvue64_#?I^eY zDS57oOgyhBF1V#78Z^6@`?oVtUp#*t5GVcR=TD;z)(DGjevz-=%W`9}KG)^o(_Eq? z)07sWyKPL+-M!slRHBUTI5m@&xy5q&b>bR-F2i!!>WIK>Ax+JkK8R4*dlC=A2mTv# z>~C1PI*&|MIK6h>7f`YNDb@op9qj8Jko_$ljQgBt-Av*ooxjD zx=>iP#1`6p>H^&}pUI+LwbPPO3e0_Ci7Ya6kZhO03f_MBmofMm2~U@`L8Rz6{JpG{ z`T_Dq4&papP74X&b_PL9S9j)@U?;tNh7%R6ytREB?9R*Rj~X+tO8piXa4TTe~5swR#8CQ%(=9=QM=Ne>a) z(e5wzP$_2nsQGsi>H9SiG;_h2K51`Emu>$=&26=zf%{f!Xo4T*6uz8JxVWAef61NH zdU=XoScA}08jexthwLM-6B)GD;#qVyxt31H58xLKr^#ajrc+_rhbU*yRH}Z>1oBqD zD&pH_6|#0s659Xv9^~&4fR3||z`_&w`hNlS0>J)5=pubkW&HMkx;jZlF452 z=%GggW}Rb{z#fo*#jaqN1RS_hx|ny$|*!#>v9}1VeK7Y-l@w zDQr5T0w+|bL9e4hP-$-{e5ckfbDUtn_-P}|hRX|O3mUv-Svo>E_wq>4CFy{f_+Xg` z*OytWIYC{w(vmzV6+`Et6QS!gcvsyfE}NA<#I? zaqs^&9O+1Ela_u_MHf@HSV;0k^ile1%kGji}9q}45l(}Sw_dyyHI zFD(p11CWf?IBC-oXQXKTlYc(^FgIUulz9>QP9v22{En4;&bR6gv2;QDIBb`S2RfkC znEPA8qpR5WL+cTR&x_rpxK6`>a=OhS!n^zgdaa5!Ya z3MTJC2OW83H)k`C+KuAA!*WR~z6l?89iTd@s);TWZTuYdxsRS!jz2?g8B;+oZF6L( z&M4Tj{4_owb~vY}hH+TS@0up571GL0`(_9f`%_Q!K4M1Ady%r zOElU^PpUdj=Q}CMzJIHO{dz8eo`91~ZW9LYd{2eS7q`NIF+SAQMT(MLuabfB;R~SJ z>^1N{q{ZlHX3!N1!PL?@22|3WIDDdE5*|-1AfFj!a57D^0qCy5m!wxrcS)k|d1J#H zlaVK?o!FiDIyA1I=|5Y_%JjmqI`nw>QLf)RW+@7$-{`P#7~q7CyS7?_?(CI_OLhP0 zf#XdE^NYPBy#t4G{pb;4$=Tlf&K;s5E^TP}ZcWkYtNdQKV}1@A+A$UNkmq@@IJa!J zN?7dqk$gN7%ZDF^DO9evjTr@V>$AUHp8?a(UV{ z9hyGbOZj{~NYJwn<8K%7eoYpGeXymO)HVGA`k~opx~TtG(0zF(?y1201`Ev9nWWAg zjGsJT;^-e|S;4Shpn13te6vVH*8i#x{CTn+%o(&E+;(;WK@;Y~TLWo$>zxfaGdd5< z^EZ{C6^dzBe1*Jz?g1Z<)GfAGCrH)wN?uWz+D zKMr$li^L+297WCs@b^R`pH5?8KW&gz!}yxXo${K3EH*CU?;m5iu~?gK97SGCvquwL zULh((H>!|s2Cbyc^t9Ji^tK*nPQUggp!^t;bJ_516Ko^)Q|6hGM3hny-gDw!A3g2# znoAixj-|~G6wpswMexeQG#uD;bNW7AUgIeIYXeo2e2|(PA&=qo^QI(Wi|EtCSJRST z)0v(OEoN9{EjekUE$z8|1=Z_tor=)DKxR30kg7*KnD^<1l+fx0_1JkSZ6J6=E{lZp zWz{{DfxbLvAbS#BeWHPY-y#Up+X?jYMTyMp>QricsxLKvW+h#5u93bQY)v!q{)EfH z`I66?y7D-LQTyffm$C=@l0%m`kjAeWdH8<a{A0jSMR#^Wr9Odbuz9gv84- zOS0|~e(&<+u@eMREh~kQ9$^b2vy2KD>&UZ(2%hZrDMlFV!N)cz+<`QyTGz$@#>)TY+Tm zA`jBdJ0B-2Oo*R11=P~>tB8Twmx;Kk+X#A!6|wzUH8FUzC7HIdis;&>Ng>|zu$BIC z*bTo^q?TbJ*=wCmfDZ@o08=e;*3t-a;ky{}#n1cL?vX)OuDCMsJMILLY+ypx3KxCnG$81KV(KKzaR>y%ARH%Op5opnIr00m~b zcM&};)&iWIZpch%{zca)e5DZMOuBds$yC|CrBAlrq^@a4)0x4pVBwE6I)2V4+S=Hb z-hg@08{(U3+bLpZM$=7td2BDxepW<|OH3f2Pn!*PzEK95uKG-=(nSgr4PY8_;+Qi^ z1o%CF6LJ0fbgNgFpVJQw%$aAuw=&6%J1B(_N3l4v6kl6VhM3$m#eVLYf+J+DH0U~C zzt^cwAeKCRBvKGMp;re5V}}~b(Gz>di#<$LrFrlA%5>EDPr{@ryi7fJeG$yK+G&Bv z;|6m*_6Z7NbOi&b|8oCNNBW8uGMpaUqMn-IdoY-3iRPz*ziZ z<~7cKZSz$p{o~ZA8gl_z@yL=sdlRD;Z>OjW^7Hdoz?ag>?N2Qj8BYFw8c*#>I6{T3 z-ACE2)hG9~4WL3#yU|CUpClhY*Q3ZCMpXLl17uS>M&VzwDaQ*VsM`Uf7?Yzj2>UN> z@@T$RdT@vpJ$_*cX}qhLG=8_9QkZT;@ArN|=jM&aJcuAGE2|oEfyOSXE?R?rvLOpA z{Vc`I4K`s(3i`;GSr4%*2hU3Lt7@frANaF2q{K>WVbmhZ+BpLKQXQiEw|U z1S}fq1-#pH;jW`kz@4Hru>Z0bW1{(k`D1tHDpGOU_e<8}j%E`7-q%@%?T z|KT9H@d&fgH2|E=-3k(@1TYa(m+3P@Xl+V~9;>z&>Y|%q(+5w`_;D1o+OPohJ~xI@ z2bV%uGMajzIn`>)y*8#m5D8YEUJKqI_GFf2=+nvJ=~TPpVZ7(#0Q|Vw7W~VZ@pz*B zc5XZxB%c?<6}I^7GY1g8S{F17Uxx1gc^UB()uJ`YrhQ}@f9x}|=Hz!yrf*s{B1c@- zi&BQ(7vJk?k+#dvN*qo8iv?Z%!a_PNLQypE7Jr{sqtQ&RuPg_9Vd}Nf=;s})k!cRR zf8fXS02El(i4U*j&;Ksn4!~IK!MS{$IhGrXHGO3^7Vz3i3KB~(ok`W0_usX^L^heZ z*px(HIONC8gXw;J4Rw}_&xU?*+`C=W7~~C}>aB`5kJIS0M!FOap zK=qB4_<~h@&8weXj!ZvU8zWVC#k_l?CL3+F8;sfT6-2KcAj6De;PNA8Fn`<&=J$y> z;JAl^!-*51*P?=%TTOslMgp1dgcZP`Bp<|2J_58H34jN`1EJ`3V0+#K(pN)e!}OwQ z3vwm>GR<68d4I5M)T?FS*`Vu8gtZg&nv(~W@DuPwj2ZRp#uTf-ar3~Eq;p^&Lcw-< z-oZy-7dm0oOOiNzlzgIe4o}V>O?=#e6Qy)M*9S_w!z3q@o{OrRJEcvf=BVBIog&>c zZ?X3a&Z4gEeSHp>g`(*k3eHRnKI#dz(eM+JO|EK_R z^<-cz2mw}pqsX68m7>ARchkO47BdN9B}_nJB4zj3iQbzqm)biZiEvLij%e*S$4B3a zB_`O!bA8<3x&YfS;F&1%OoMc^UOM`fdLk)|6l068>Y(YyeRCBJ%?ptt)g(?&^Bye~ zD5@t3E5d1PFdZbl8?GsEe7oe|y0?4NLG(bz6H)G~9!@T*pZGIz#g%)}y%QD1FP-dA zq9&I6eAEOt?BnPHEWCpEBOaQQ$6;sb^0t@d#$x?Hs=hm*r|)!9j``7EA_dDI!c|1Cg z^L*ZOp65KD=YH)R^lf<|YV;^dSY%>?u9p6dlO5uzH!Z1jxq~IEg^}+lfQlRecC;ul0rQ9v4Bc-=Rf)l3z+$6-{Fr zcAcX3U9F=M4(y`T^a`kF+tlcXYcEk_=kjP*Ng1;*OOv$oK24Ohlrsm;eJKJT4 z->O@T|MB{ZDZSo5n>N59)08&tLeto583a{`TiZ- zb45;=u*wh(Q7dMDx3=;mI{CCYviVFd`^<=zpO_50Z`MXua~U@o7KjbS>-FT(3bSId z#>m-N`aU+{4{%<)m zZtHMDknn+Q4GJTNRjir#6Vq<{kuI0hrO!l;p%xV?Q0M{+V!!1V?BViKyfPFaO~!L}svkjd)_Xtw~;I_u9`D@F9a zJY?(I;r}$fT9PO}uHVc%Fp|qDe$vCud;5e@_%HXR;)qr+@d0rq`@Fr{QM9l823n}a z)dJ|86NSsLb8}X+n#;J!u<}385$n856c3tEf;`IEinbl;pc8gUb4yx_sOzsepIN$= zyQ4fWk}jkqF4L~{84BE4!stV2g?qDy9+$Pv#61vFXk&_xNs-zI->;r zJGH>JY3sqXW74yu?e{>-hDtE?%MuVXDH^PcehunE`+y%727*MP66?xrdah|1-4LfC zJ>#DV=l7{V3!B4?bFL{I`aBJ4eZLP6o{6T`T=%lleHRLtjW0p&b`=*NFGrEg?MSB@kZkxH)@C7f(3!*9(Eil?MKYvWw_0mk`uHvJ2bF%*Nh{ z2kxI{yxfUC30cJYuU{?^JP(6*i#Z1ui~ZF_0)xYHJjb;E)-t`TJcJ{!_#y9XxV6kJ zVg%b)$5!6JNDUptN6kT85)r~`c7I(H(qbeK&Tiv!ird!RV6lfaCa~j2#!ZGr>WmfN zHC}+cNeK~~U&IOZDCr*Yl;uq4=3O*Q=I$+RV7TwdxDe&*;lcbY>gvK~JW*{Kp>>7Z zga4nV_sw>br?8HH^(SGLGwv)_a*Ccw=#b$ba&l0%Ii(ou&zh;U9r-9AV zyJGSI0?H>PGc{uyfY*u(;Ar!DYQme5)YPFpz~X5rENBU5=9bwqK9XV(+V>Xtx~75r zJNJlwleHpkTQla(nRq65@DN~l$(jPG1r+HXLhg4%$ixqxxNxQletw-AxoOx`wvP+M zaflV4z>`)7h~E^(pedc5;yvb>nBnC%^vm>t?^Ac}UVfAYca9(`TR^yl-Q`&*e-eFM z9U+?V^p?cPF#T^TnSmRcZAOx$3PZJLscxECeImpb+;fNJxjJ8*1Vl}3E zSYG|Uf4)?vQ{%`!{+?1Fwx8|Qb$Oo_Jm8PsegOePHj1?F`-yu>BmT{i>>FPrrlI=C z**iw;^P4{`W9|E3un-e@y})+l8lX0})7alxI+vp>&zOrV&U@ei zVdia%TTGZeV2)H68A}$A`bz29?Vw*?(1#UEt?;NS?i_x2c_B4=!f1MQ_ZZs0w+VaFgWX}0wIii4*pE7L_aG4Uo&@1dO^o`WC3IBTJLW|F zRNz-^0S;TOCy#8LW953kfhh?Z4E#PmW}YMy^o#4d)QF!ONspmrgw9td{NbL7Sa*9J zQ4?LrYPxO72`mzpqc1%L!qCZOJjI5xHD#98HYqNZ0iCpjwItI!&+21UC^+l9KGc%KtGR~De7o? z%>1Y$=;N82s0EQrSxuD_x!ivlmw7F=K+{@UNZYUy*Ifm%_SJ(1XgcK98gl>1AnNu^ z6MEk7=S=SoC2VcuN%onvk?-i?s$Pu$pj77P`Js}|;!-d~aWD)sZicNL8qn74FgR0h z1?XWSus_QV7WV!C7kuQw#*KbJIY>$3+xZYI_Id<AOJOY0nvlPn6n2GzPxfT|;>m>Q!=u*fD6cEsnyarrALkJ}P)qr*mEol*y; zCd$EGJ3DD)1wwyJpG<{!k03Xl4#y8)J&rpzB@)wKl(3p^?$$xWoFnm~MlYmX;EL6$ zIbuiNr{Z__dt+0c_y5yKrm2@x83G}f|8mW@0yUrNEST%?9hrUYllb}zHF4eP?tgab ze{2<=i1i3doQAS?^JXTuHvIc{5w_s87COs+KN?Usjjr+e|V!vpig+wY$v`BvlTalxhZuU-{6{rw^Qrruw+olklg_H)sF%zJzT zw)q5~G#vQ^uQGvzQJxxk+hHmp8uS_OIq!)Ne;9z%>sJym*_9Z%kivHb#o--uP*Ur! zK4JbwtNM$)>=&4Y-@-Dboekbx#lZDn=9>T7B1M99_INia&AmZY^ zhe8baVbpsI#Li9NRFusVD*v)hzWrZ)q>5v+gx93?@OM~pdEy2c#_af@#PR&#WpYBR z*kOX)DO~OJJ1sgwA!#kVafFKvHasll%dkh4b1{4wHyKvom&JnX^Sp&Y$KCmkCp<-W z#Ulu&H3M6fG#xA6z{Liq&)m=M%gDIwb_pbANNY&i{unC`mPCjQpAU=;KAKgA%=$xO z;Rc~t&qWcTaVK1qzm<#OFZoFMA5VGZ`%eKan_38&oI=IF)F zdqHq3krk{(@_dJo8JmXD#|{l9rE_R92<@a4lcK1mWD!*{ZWod8<^+i{2dLH76Dg(O z4TPWeZK5|QhVD?lLp(1qB`v@86O}JZiKh=z$kMkbsdpu2RFN3c)9s`Ol*NnJSP?-TXb-2gaDAc$`dd|Pb0GQSrci08vZ!4rM-UeX0p^nt zhMtwzM?zlj6Wx>JqcitZiwl~$eHrzI*1|i#TLsUgcNb1pK?T|;i^MnbCkhqjBIvcY zf%~Tlu$y1}B82sA)t655o;bX-NYkG#xOB8j82dn#XIb@M3_oAhLug$6kT=tU+dut@ zb9-$UV&eIQWpcv4P7T5AAg(@=icnv;E0!l*igKFHUi(cb!%i*W;z%-XGOU*xo`U@~ z-a>TQO#V#u3Fyd}EGlEEIWfm_F*e?wvs3$G&Q4`q*6cn;Io|C-yW_pZQM;p&v&g{u zNX^1BWKfs1zPn#2<`!5;d5kkh-=|Jz$IQ3eneiVFETm|-rPGDYm#W9rztEK5gTHyOE0r23{;AO`JCiSi# zn4RDXey6VlBI8T2vb&FpQT|F9u9^q)S|-3)+cS*jE>q@^Rw+2?`W1-VdVpN*5^}uR zJ%L-b5_r9g0@=(O@VhFBx^`8O&Q?4?^0!YT4|m`=wj>k3Z90YA!<4gYnJLR+5Y_tt zuNl?Azfn+yo>f3Zr9pe}o;*+C7n9b1b|*8xtu5y0NzE2kQ{Q_4zuwx!KWsMv5g?C+ z;zvq|s@Z>eZHhKJnCc=s>}FF5`+WWa6&BCJ%m8mKpMtizUqLHQac5WdUiCzrmnR^u zDv-4`)W!gpVedLVhwWb(HyPHLp&oeb-2!Zt^nNQH=5u?D<6o<^Nc5YoTphO+h* zdBBtPEix_vUyg!|N2=5m)z`$0JITZ|ZLXfr|9nYaqcL@~%YZ((FqDc-+W<0NH(^iz zE@yc*XGoYI@qZZOJD->f7Y|ALQhea`ce@J=uf+AlYP53C|#mZ1f=Yn|8C@ce+gw>sC(@w_Z0vEaW4wCtG>wL)R>P=c?IQ zzbbdv#Q*x>P2w)BaPM-qpB;;%c;|l!c^1wIi21R*V%#_c&6&~iZ;q6AK^^r^PelVC z{birG-^=;YubO+XY1Ih&j!s3ljpJhUnwu5z$LHpWRUf#r&mM6}VzFscuBNw)n+(e- zY6>RY^bEx=#-SR^_hBms1;Q2TR~g?%N2dM6Y*thK#uC=Q%D6PXcn3*ZfzHv*A~X(n z;#+gMe98a)JxX`ODf5_-jFZ+$CUL=L2qwr86HyV{j*8Q6Y;S=XZmq12uO8%1&M>zi zH2Gdcg=Pp@qqv9=eNiUFXG3uW%f*9yl}L}`g#WhB~ zWaI?Wa@hj(*S>jH9qHHc;-jO82T4)H_Qpl{r>A_(CKI4TT=bCz_Aw&6uuPFj+!>^; zS&JRdyx~0I*MP?YZ)rYYcmOI`=lhxOp6DbjE}SKdNf@?PD0 zXVG-YO|U*Xo44?zBCmh#f4;W%9B#cU<;$?A=yGcr88;c$xr(_0Q)xX1*Mb>*<^ArW@6Az!u~`7V zxq3Rb%|V+T4>xi%*g2AnOSoGNVcQaqB5LEsUvrR{`%Q?8`Z+{ce;BdoNHVdgD3A1= z9)n%7^1@s^Ysn3t(#S0)xr7Umi_eXJNsLI@PcA;5O{)EWiOT_QPEa8_Y1J60E?(5>8vEekmNGM1vb+uXSEOZmJhi3CMsUOxYJO0!3;?G&a z*N-0Z%8LF^Q#y~DVPr9xq7M@Qb#J>x%AxqD)_Ic$2cUPv2%OCsUyiE_UpB6w;ox+|Q^Gq{!fl4u{*!2?_EZQ6H2xQXliiQr#kZVoOmSx!%=`vYa)Day;cjl;)-pK3k0GN!5pl z-G(M4-$9Q&s5*r(7kZHm5#y-X08Q%WLz;S->WWWjbHiJ-{HYDQmFSbp0O=T+s6UZ4G}EO_$NC)}D^a@dBNd2bML<$MPwK9qp2&x}M#ix^Sck4#a7 zC=l@p;_B95L#>5ItDXuRqU-s?r=fzYmn@LjwfVyIwVuK$f`Ru|{CUyAk6giN8e3Mx zi??aBh`c{d;N_mpTXagE=NS0k-CkZ}J%mmtGAyD203BU15E)~ z&1o9drYBU!ctWC*n2ewvs%nX}vO6>s7|3cjr{f_EjZXwQU@FCm~s+T*AdY{^xs3=T5{%jB3WN?LLV2 z>y4wDPlSsq#|&ZZy`X3!({!VZx!Tmty!|DS1nIO(V?$=J;f21WSiA!+R2Tux+Ic|P zKNJj834qtn>%o@7A>jF`o#3|mWXb;CI?(*`E10~g4QQ%GgSBf+;Mh0A;DoOhaGH9u zq;+B;-8!d=p4`NjWH;(d_WXVV&g=YO!aUZ(`O7cDyBN7psQQ7xAwWL9V+UMA8Z-0j{GLk1VX+V3#xw{Chvj`XQn-Lmal8TLYb#dLDT>>YathtZ?M&_b1}oNe9uI+W+j} z^pAe@ublv0U7*grPb=fuKA*AW5oYCZ1&N94Lrg=s^VKHiE3s8Nnc}c4d6pC1_JqaW z*5Aa=uVmb0SV}`n(c!KF%=Ug1*0lCDrk5dsCmzc&UQ{Msc%+bx6$rn6X7OZPwzwoh z^-CwHo7$Fy^X^)_G43bJ;eVQbZn{IR{WP0C-u;%oe9S>w$odEVDVN8sWpaPfOUJ%q z%7>RTgZHkLR8B_VjxIMiJtb8_Bx%5=tw+H1nK@wE`6=KAHCWO-qXvdM9RLpd(uS<*3ZnTS&p3VLQVkBal34XfFkenGzZN*W^pznwzw$c`+ zA2kKS7rAgXt|Ez3F_K(sEg(BAy{zgU>A*U#UC>G=1>Roc!hGXBprf5l={Z~8lWNWN zxRaqGv1->2vW(ip&XJN+46z*->cr7aZCJ#VvuI1)A7oCvf^-EWk6yfVV!*stD_|5_ zdYC(p^qhQ&+D0D`Hk@`r{dT5`Ug^%nj{QjfHwNZfL(FyV2vNr7Xts|}x%^`F8l^c3 zyJdPD#WUrwXiHA-`&rYe2&^j6Al ziICFPP$5sCA>r2pSHz$2%+FHFGGuapO(Dj>N(utPZhUpkQ2HkYY6^$RkLlWyXgscJ+v04@8*1O z%EqZI_7@2^N0M=qVNE))P(V!e5k{}LB-DP;AYP$rMETM8h~Ax!!L{N6;2{iHY*|1*U4hfS+#ysM;UI zq`zusN+n8?*wJG_ok|iod~6TM+OG+!1=XPT!DSFy@C*h+j-89GSCg1!~{5cXwW zg(}(hfchEDL^|n1B6}&E7A4JJ)ZR}<4KoubeY6Co<8pv+?Q3u-Baz-Yfu@y$ohXy- zkW9($#h)%jiBXT=67SBqv3A;+cv!Uk_$xv6pr_&p3dP>r@KC{sx#*fHJbcKf!E8VM z&sVL=?%}KE>}CDe=1&ILkl1#MN?cof1cZtUU;#?CFZ^fMdpbRY88-{XK@Zf~=dTOg z+5fKV+9O_mAq81hr6zcbbG33le%gXvuNf*1t>(@nC1?*pWZ32VxHAhfZZa&twtbl5 zmQ!MvGrHI&r9gaw;TteY?+U~$j*QC`8i%*mMNmD7ONixij(AY> zHuiV_`+MRxw3C9pC~bZ+h;DP~01lh137(uGYp1)z#$aDG)bOJ753qLy2)S?QM|}4q zTVnGfb#nTlX++!5ulSf+AN*==G+to4iZH2@-XmlL@LCgTJs6KM}yZo)vN0s2hRu$rJt%x|iG6RcSBEVem$!O{*eZ=1}R`h1$0nz%$)#9mqD=I7_(Y+rYJqQF8}>OBGia^w&xi{)@}+ zEULe|2}Ui-=1o%jWnGe$`0vgj-DR_cZ=N*q7JlO9NX^0A9LYu)&$m{T6Bb2k2@HR8 zbEHB&U16@iwa`bztz{-%F6GOx??rO)UKuwT)`aXug7*gA!ct#%e&zaEqHO_ULRTjf zo1^H31r>2|q$kx}jSd-?CrKL#HJ3p2{r7R=3lnyU0^J7I@DiOaL(Gb8F(>kz|BDsIkCPFDO3K8^~vG?;={%! z+Sp?I3F1?4R^rxPQem<0 zxj$mRE8`}^iZw9BHE=!b=%DY|qcDU}wbPRnHoj(tlv9ke#SzxN-sZKk9AsPyf0qv=2Y5A#-)0=M8S3@@luOm+sT;iL|O;NP?c>f z$m{P@m<>!eji$xWi_N8=unhU{K@v~BWe~u2FUq!c1%A#*XwGcgs0nzbpFIk&sKvWh=(9sVw zMY^V=iK#rUo|M0yB4XENBo6*n&;O7RBg|RX!?#$x0&5@QAg+JKdPV#T-2T#_>mza>DX* zO@T@>w=d&7O;4y{tcYYS;cDlM50Tc=kz%j^!L3ha++rM_o7m@e51DgR18Ya; zVj&CDvHhNN(DdPZF(1V<*yaxg1M*J|FRwtm+nd>WZ0m&xUUs0fAYfx68Z%5=teo;m z1b0mTXQ#F&JcNy?Av*T|?kT=z-^kj?#G$Lu!@v~_zbl7!EkD89sq)#m*b4uTNc17@ zK5=)^MpTCVai%N#9T_(nmi}`&bcdo5IzFfh`KUSzUuo6|Rop|Fc@-~d?+GtizZ0Ry zohOxX$-g&T;$3u(+Sl5RUk{Y-yDrEaV5hf7t5SJS57Fl&uFR@G&2YDd4l%8a%a4o7 z)?!k2m@=q1nAyhr26w8YgA-%ZK()_MiHBtu_|!Nivm|(TB>V=~1l;l7(k( z!hHX9K($9PAuIa8_`_Qu6P^U8zcZ%V#$^l0`aH1rS}&k=^k7A7A8lW~nr=_pOU-1; zN#lDym|^x!Tt!8T?3Z3gEgP$pw=xDCtgw;PaN&~JP)u%`KB8)}0UuxAjl~iH1AOo1 z{U6XjU^J^~{_odBbdjkr3_eFfu9=BvuFyao6S%pwtle|nCb(Q)lQVp7>;Mw zC!d>Ro6K+bH zBY8A74w|nJL8HOJ;Bwzy0LK)=i@CvY|NAMxW9k`@EX^-X&^ChEanc1{f+4I+xdQmX z?hsw%2N7~KZ2SE{(q#XL4%n>7oZk~5A-6h8u4d}NhVw??-i}=9We~ma4{9VigBwvF zwao>|WgkKBZ7*0qHVnR98OvmhP^SZjpCb7)&X9+wH2lN%FZf{B`NXJsYgkR&bF0OM z-yR_kdh1bLCw0stu?|UWyNcc4I|NVl`S0FgSs!%S36M3?y1BCdulm?RSgp?Fzx2H- z4&Ey-ZZxRJ24^MwoA=V$9>RC$pP?7F#jxwer;op|-y0qrkCB@_u|L))kVnE;c6`{T zZ$L&&el6}?%$+52+OivyVc-7g$$m%1O@@n@5r0gcB!?ehT7utGo!XiyrcGeKi{h2MfGQ5~3 zKc&&F6-JWfPTG=VqFdld-y23VZY7-Yp-fsM;3OOhhEZhoPm$%3OrSVM2?pp|z&)Gw zn2fd$hw-wY8pAu1amd3#$H=C@H;M@LJD#$MI#vo z-@j!MmVa^JSKu>I(pYRKYk&svO%b^I{n>GJ3oo5`&2y}$oG7VBek_A;CGYvfC<6;vlA_OCC(`LwyJ4(-9SKZg$B>MR6%!BTr&f?= z*XoIhx7HAoF)p8xQ*_XvysuVN-TQdD`GW#8I6?sN_;68RQ6S4<`jRqw#FV9soL?-n zHC0KnDB~C~NY{d%Ri9yT@?W4GkN{q%0x%|Q5b&=b2g@%HhW)zw65Pk83|CHVZ?5#V~hj7dX1 z;M8NWaM-de_~4oWbw}RYYTpkZkg>cHWZn4zP*pwVmGX6|Pd1@$CA=gBuaodCH}~QD za-R}$u7lV<)=c6F7v){T$Sx6DvGOyjbu$3XxE6YK)(MFmUtfnWsxwG~%F00*KVed;f>bS;q0;#ejx{u#y+y9@Yn>M4=o25S} zhdn>(Pg)Altv?w*a<`ndQwRAww9YJVrmA5vbN5#Ztm{excEj%g)7EnMyXz&meJmF6 z-zkC_c7K>VD=guW&o4pv#ujG2ItHd(>wrFur-9C&BJg?20kBBN69o3$28k7yLB@l0 za6WB@#F2ENGq*?3gG5(gMp!01w=NrK{)%9f`i!A^`#fk_z8Ma2+Cpin-VyA)iGf+I z8NmG8EoqgPdfE@pp^fDusZQbp`FCeM-q9#eME@uqs=oH zVw+7Yku@n%*vZgplo_k6Z( zrz^F%44ZcR$=ZyJn+&V9CIWjtUlUKvT#Dj_C^qEFEuiUFPoG6H>D*DytfqVRtY+Jg zaS;!*g1QPN)Ni?~#LQs|g#CMs0h-=c@TLB`&Zm2Zm(U((8o`l^m+?dT+?nkWvtpSa zye|xYGY|aQ8X#F;tO6fOP=C=bIO`%{W{jRrCvR@18l%j} zg`>2Hy39s=VNxyOI*mIws284&E~*kD;(}+$H_tM(1vDTxO84V_XD(m~9eW1Mk<8w0 zM6^qHakiDCiu|UF`L(+v#Or2Si?-ZzM^TNV|1?!@@<)z}{~&9oaCZ!O9a_%nLxr@V zF9(NXBRZ>)A&VT?wmzuWphhRs(N}ww*q`ffx1%!b__yhtZA-byuwFOrMh~b$v_9iH z8WviB$3^afYNsU3>DMQu1(9=E`$`XwV#lG3%ZvS&p?~jJYTLP;J;a5T3IT{Bqv}lp<9CZ`vU+ z;c_}ixpoaaxTqot4R)u&Cp&sp=1r*nybdl{p9S7+O=5n$kcXw0XTh*fInWvTPX1gl z*Gj|A9eio21UBo2!9nM)Q@7VisGudv)XamKxWdhscumwByzQk8u_lJg?Np2A2}|sG zBI_Fs{7qlRAg4@j}V^Gb`$4bTEQ=T zv+1_2o#hLE znjowm**D5q>{>e!tFerm3~T&yIb!H18}ZMZ>rvRX7`q%e3@)B|gE|wJN7<(CWBty( zHm*LBjLWbbE4V4noKne!m_-drD7`mkIsDJ}b{;M!l4s4P9&L`LK6&fFXX;IOBD~3t zjb8z`=!WEA=Gl*ErZ`hqqQ9d7Oj`U2{Mb80vgoG*JT>MZD9bShLnBl{l*W~6~jK11Q9O(Wp9O2-kB8!2{rw0!_z+rl3&AwibWVphiXa}!?!nRHcKI5pxE`tZ0|^yc^|q|K>=SteTPO3GQO8d9T^$ zU%Th9c53vc8M|+ihTfg4i>4g>!S?Ym$F11kJXL(FVK6&hmQH@cV$UDKICzK+_lU}``%o$*(sQvrj`hc<^X%lig_zR^6e;^k?{>3|I| zdtwY+_3o%-wCDgFalr-dcv1ppNbe!mlio#dqIn+1r{w}~r~}7dKPzd!>JO>(`S4Au z2u6k*!P2FP&^Rd--cOki+ZUMIq*9|9&$^k+r*X$64JSe+D`sqie*5QvLq-j7o);sT ztnM$F;gm=X|1-zx?<5A^blwlsn=0T0-7My6S&8&sz*tIen*l|SWbiGPhw*;-OT=Pn zTv#?<^#}V2mj%5NOu3mN-nyh7X<$a6J3Yg&i8?1R>lTLr@6)KaqtVr!cUX-}@ z-WVb%*_Z#!ZH`evcTXYobNqeJ3i~Ym5(jPysKU z&tmM8{?cb}*s+=}u;tF-%eWXg8%gB4GAW<;Z*XKMfgdgy7#n>3-HV#%c9R}CKZ2=h zu7*F|&Wl*?Cow=|uia8%cGp-@tM8Etqf%m>nx^s*Jv*=nL z{n03#K3SDQ8J!(VEYO4GuZCz*+QfxOkj+S3;npBl)8B``irXBX3KoXf^E)S@0^7ws z{POk^r2B%Z_~XfevBB#+p5W6lE`~pDP$ln3?^}y;#B{-os4U*}ekGnc_Mh*KRT?d- zJFFzKvgb54x8q`iD|F-e%cV6s&Ug+JOd7+*@K5a6E?j!zt62DrtI=Vzp_DJf4s+nn zImo!lu%;-_5fsfjC^}L#gMY4?6ra46M}IaBr{kRRsp5z3tlv56&BgF#TuQZ$GY^h$ zBG9LU@OhvDK|cI<{#Z7iEfYgX3p9(mUlK+i$v>o{rnC= z&zoNWYLmT~2WOWtI)xj+(4MPI`0ivTKOvI7JF<>G=6sqNrdG_1oajjVW*5-s&nE-L z7iLUQ{$!@4cOvt9&ow$(V;-~VRuXgb%0?#g@&O<*=_GdqekTu*T3~tqJTR&`o-qv@ zO}{di#-76s!O{n(nB6_j7#xboxJEoj!Iw#_X~}O?TI`&ri*+Wc_kWhgf93 z!yk!U_Cfr!|2q3SyBk61ZGi*!;}&;+Ucj}}A{lns-e~ssEH^3ES_37tWqJYz%OlWV zG5JWb$yny4@d(;k<02IzPGL1|8^OhUWnA*#-eD9b)R2b8bBLC~hjF#1T)af4r|iRv zDI{}l7E_%$igd-zsm#coDcB>Ui|klWGgM|g6aO+l4B8pvT4eMjP0{cNSqxO`aP)tCgf`<(o)}M3)F|A5XZ$d1NqH%4)i}I~^4|4#z&! z_#t<_eNc0E5-Iru@E=CUF)z=D|K_o>KDd+Wk0QN!tUnCB?vA=*gz(ZXh-{toUg)yc z1Szgk|MxzPF5QQn3ol2NqDXe!_YB^~_SNBx;rN3|N6M|UqRO=U&0P$>_$7c7U2gL*+HF}Fy>Ro9=c*Yx0Wfl zDr3iijElARL1^!nK>5V)BAyMS3D=Qa{X*H_BKyp9@TDAJ5It+oGA35r5%x_%h-!mL z?AUnlzMm%B4>6AymodpR8A*TP7ohn<3^yM&k?@~b!L;bhV62=c*t3ZbbS{O%Jc~)t ze2W|SUYHN=O0SLd$kT;8-Ydan(wy+Zq6#q6Q~>FP22fw)BUt*QK;qb2MsJi;VXVK2 zBqsa5LV?8}&>rxaQK{YuH3H7TmNgfl-Kde&nUEGiZ}N38?35}DF%v@XAT1{LWHjv- zzkw?9eNNixti?xs{fi$?lhzyj#nm3Zb5%`rGrj|>Yg&ff2#duuRCb~b4FqneI1Rgy zePn>0n(ds0db;?tIti#Zn4vRfy4RF9&!V0a? zFwvzHI`G=ytC^|r?BsDa^cgUZG+C<^#0z49xR^FMqg0)kN zBhN7RjVm#y1@hR@VUMtLBjS(|=3%&&bbcbefM|fGM0&fl&Xf(S={?I@^t~?-ES@7l zH`vRGjXm;^VNU<$tG3^dK$R5$viR_RR?{CZGT8q7$gDwKU=fDd=b_C!G252A@ifdA zJjWbfm9x*(PTqvduxIQqVto=JGUi8J1NrGo8ije2XLvgn22i2Wu}!X`wJtS>;XEu13kHVQ=V z@ADa;shs&|zUk<0)($k)ih0L+AM%cS3`I)YritggP~bIh`fvaAu(W>fpiPf>#Ur@< z(+8vx+ZIAd>kTfF6FxqmA*i_A$+ktW)Dx=ynIq=TkWXd4jo4);yt50F6#qG<;xHz87GRM2fXv#1p1lbcG6|SJhdCGZ@8O3rS$1 zsx;P%LTj(ja0=)JP3waIce)NbKg`WT(Xobp1L#Zz9=llv!93Z5x+ zr$-8{AEV5?+HFHE+SWw9mDd8dwpLK1qo&h-UcPjgW-j$2cN(o|vx=r-q;X`m3E;jN zO8U?1BZ|aPjK!&!jFze zcpwpoR|>gY#qG))*#5+9nH#s3d6Zhpmtpt( zS3hSDdVt|YP-aCd2&g&MPM%D7yfY`{Ej zzCnDMEyRVsYNW3C)BrpEenXl30JTZvRX4Q{7SaCshq1BOxEOQjZEfb}q+DjQOFc9D ziL>P94|&MjY6%mAfP}PjgziiiFsyO{dFE~)sB9mEZj+(kdjXL9odS#=xk#>JpMXul zAeiga2+Da!!MDYhu+5YQY0tsXYIKEUuTBkJS@xcOmN-t5GgM2WxjwL#1L z@LMi!!Z!^;$lYLhWZHEFOt)qd+n=&$TctJ^-I|4-T3W#Npb#C*&$VKBbx z^LRK$-yY!ieTegri($z4$vznhF4SIZ zFMV0VkXgB8GTi>L0XMC4;pSSSU(kiR_Ds6LHs(v|BWN;Z2gsyL0A*GKC1Kw|)YU*K{+G0-VD54A}1Lq*$(tJ#e&b<13)2(fQb2J;B8Yg81*e5 zIM0|V$vfyt7vEh=D_?vLzh5{DKc{a8t-f&#@_jILn&<~LqEldfZzpNC%ggG>{5X)C zbOUs2YCw zJ2EcLz7Eh$*@8+88A+HIoW|YsFR<;){ubFs=Xff)e`OYRvB{0LHPD7*jz7a^6ZhD5 z*6UhOYr4-+FqX`!OS z+5`8*Q`NoMMOZcErgz5sAbeB*HL+3bQ$ioo&@++gWkG0-JDR0vSlLmH$XrvNU;JIK*2&$5erxlr72x{Q@VhH(kzhd21FF>z1((0iVYM| zu%doRc>X@;e&6$b|MT2FbIu+TvOCvruQGGZ%r)24>>874J)&*7v$o%K^}P93O0*J+ z{((;xaytLc$()|!PJN_%pK@lpgVfK;**PZD(XVZ>b~k$ng5C#L<_a4X_XYJ zqxx7rhgIS|P!ihi0b_B45^NG$flX_z7S8jcwgxP;T=U3-R&?K$t-HH`nKq=yT9l(E z)zRUQF_}uRh#Zcwue=Ii%&O&EKW^MhyXf@y_+Fm^E^DgK6=vhvXlBxxW$b&8VrlS> zv3w4lZ5LpXyBhLwT!45Gbq%=Lhoj;=#pnZJ4G8x7fVx2i=<m zYOUCVoU~L#t%7@jY=S+yGh{out7|!$a#I;yy0`@0wcrH0MyCYrsH9@Z0{{ysOgQK@ z0SsyngVQD%sN1|9h=S%vbmDtEuySV^SSOgt4YXWl{_RBt`l`4UbzL=r-iRxM`@PGd zCbfCcj9C*nirKGOOJmfzQO`nY+&1vSvV*XrdaFUIchZaN;bnLe@HlUZzi zhf!GY_4gctq^-&=UPQMIu#~o6n%|F7dY9x|Eoiz&zkb}^I^t3%4aC^}_LJwlcBjsF z4`)!d#`@8dzuroD{%Bzu{r$S@j4i>sjLi$frERZKM*$-zj6+qrnk#KLm0nfTCA240 zUYCBCV3W{#G`NLnYqg6pLnoQeO#j8Y7cqkRZ#)DKadf%eZ^z=1?i?GN8{SIeP&hYxb6u8d|ouR6q8#oNW*8g0QHeDw|L zuDroHly#Ha$uHz?tlY~zS24t1o3Pw`QT1x}99>yxVxS?kINgXNr?H*Ab593nf}#)8 zCdHFsu}Pb0e=mjE-8y#u;5s`C>f~2L)*A9CD{3HTNkK&ByFiuXrr) z%83?j+U*@;zc)+Os?R)rto~J#sVRXJ>W0G~q~G7Ui1*frX?Nmdey;?ZgjVulKP$mX zFKR9vZyhRsh{5PEgqFi59RH`i98n=%sxw|;WzzAK1PkqaDD?UE6^{NSDznicmcfZm z{M-9jot@31P0wea4p(H8UwKgQ8#U$<|FPH=b`=oE=iP@>-YX#v)C6FY>V_`n7NS>_ zR)Or>i_jrP55m*Gis(FDiLjnsLsgblq9t7?kdck!(d!lopv5~Ay?;CiWtOi*-B($l zC!Xb?G<*%JWjh^BzVi~e@yEfH1IyqMY%lmc(;A%L?ST4kbwHA}dQsJF8i3+61?;mE z=V&XbqXkUVLqq-C(LDEL^razyKeJ-FU%Li4b+k!P%!?3K?g=MW;m!iK@U^MbPG2rC zW^5~YYIXM6dg{QQNp!`@A1!^5IV^5l52M=A=C{uyvD1pxN9lc|^Q5*j`EolWoZU{@ zWjcu-&`YsCqSj#@mUQR0U6(L%){uQF?S1xBsSen^IVt^H0dNM%@!Yrj4kK3=EB{bZ%fy?IWF8hH}}WTq02ekuo?#QO!5xqfjv#k2pul7Hd2OFz5W6HOXq6oyX)MtpvLDsv+B;f zVR`q5wbi5DA1!ax3K%`Vo>=dI{=TQ!e#wyKGudvbY&51FrOev)(sWVV0;_33#gycdy;0y0GSbvgNFDSt@h3nw8+yn2+>j&SYw6f))+z z9-Bw1OFC&Op>1$r%uXfPB(xredsq!k_NKm$UTmp$d=q15-xEk{VJ`Rf!VBUxbdJ*+TGvs{w)x=qSaVoSgqLiawki^}6CYIH&wwEQd zAs$L!8x8F&HRO1fHL|D2eCMpF-42Kd`Y>cIUXNTEd%u`$#qN9@Nhc z_bET(#@0`ldW`K=eCeHH*+h}0c7&^0?P#%;)<0)~Hg()YE9%)TV|x|P-9Bk4p=~vL z%tw-7lhERP_q00K;zhma6Gd|qh0wQd{=qSLm&CfP)xZuY%9ge}S&t4#+g=G4rm7tj z81;K(HvRjbNUMnnOZ}_IqT2ud%v-eB{L*HVblrJg|w<0sB!$^a@ zHaf>52>W$p0#MxM3~GXwVf(M<0r_JnXg|LOJrt3JrT`LHzl{Vk<_LPDVXGo*6{hZL3dt>X19PbXZLpL zI}c>OV=eG^w=$+2Vh&#rS#!fj8QW|AeQx2VJ;}@sXU8)(=ggJfziQJhshyrlmSGp{ z%U}%E=QF%kc1i!|^u~)ZN#Q&5=Et!;q$RIq*b>^>;d#>ECDo%dtyG3-+oLG;X_=`ZUZA?LI#d|E=SZ|T0 z0!wrS`4Y)CYy(kd1>D3~@wsbHt$@>hb)Y&;3ElDdG`!vyMFX00P(!Cg^pP+EIwFX$ z3Ll(=`qo>bTYpYR-}Z*U?e7k8W%VyWjVjMMcPAOM((mW7rY?%$Y&A%c>b1RW8!NLX zhDCjzN8fa&m>Kl;9NmdJmlcdJXYTU&`}lO}TR(=Z_!vVgvH z_i1gcdp()bd(vkcvLv+K-xo`tm0*+5nm@Xfx#MChbMq5@X6&12)~|`CDCNHR9ATjn zT$?_&)+RFeCB-1Y(m#@h=1d%hYC|uxhh`YFuYJ8EtxtkqQWAQ0L+1-SxUD*F@YhB& zboZ1z7A18oCl9}y0aI3`!r5yH_*1_&7}6FX2g05p-P$VP+jvK`dC_;|G3y!)ex>4t3d3;R0CiPeD4B`q^ougE~E6BN+*Q!%tT zq?)_%%oj+TrwvVXHQ*d8-p&##-C-_e9$_z%!K5;kFA8GxROT{ek36EK<2m%Qnh4tV z=wfEv$w9`nb7S-FV;qo?QK5dJ>$M)0$~5TxC#Heve9MTvW{l|9F4nRe4>3m*_y6`Q zqux(s98?rpcYGR)Pi)N|i#5x0%3$`bzeg`TXTqRP89RS=A48sLJ8u`GM|4KY^MI~Q zriAvS&*;RbsSkbAVSRTE0LM5A;{pwA>@>$IEVD4yU4|_3BW9`lv~tR$926h4lJwbLyac& zBCkGNf|=G_6zRx9<(kB?;j3&Q$Ii*Loyk{_E#Gy~Gs{^hGvy9e;?aoFWRy>x zj#l&>K|i`gqtM(CG;e7SkpHd$dus;6Sv+q5JZFP=^)$3G%^9K3lL3*sDqw2eT+nBK z6N>9wZXV|vj+(wqMEg97P|Nff`0JKvsC0WaM`hD~_Ro5Zwcy?q)^xr+dzki@^!*g- z6Ro#3cUn0n$kEHcEn!qxme9YehSSGgoyPQ!|9jh;TUt%;iyg~-J+IKr@|oHT)Ah4Y zSPA`hS*^SO&Z2nTzt5Ms@oF*E&#aARe%@Z{2c`d2DV6tlasJ-NE^`=a^?bU0y`EI2 zN1b!6*9#M^YaPb?-c=_`tt7O+ot`7r2MIO_t+mSb^oX*@G*Oct9lwb&pU*5p*EPn# z1ME78|5v|MM_{9JTeKQTc1HlVRPOvuEAZ z`j5>cNwD0Xasn~Db_8mdQ()OmAz9te{w~uQFTFV1)jmVcosHsLR7uDRKPodwYiwO+ z!df;A6|IJiURJ)?B~3lO=rn&`)blZfKh z`7qvb8MZyE06yE8BXxxlNRC@DvKRdd%O5z5bo#d;$=@C!%c`xw0hr2l@>S$+NN7e& zwJ)QN9~K~HU-rQPM&nS2gGqZQ)$Ksm|)`S7pzBqrjQy{$ARy4U}lo4eC7@8Ey})k2KmbriHfBc`+2$uVFpr zA>Goy$@E2bF5_$dSbiJ*>7N+BmMK=;%cHcMiBoC1VO`7u(c#~74vI@wSg-3JcJT}KcKX8uOJZn00t@={ycOCDb1;{(!s4Uyov0N+ng&dZ_C->~Ou$><6*sY%(hv zniHVIk?m>bn5~-+ZTo5n-TwB7b0o2y9R`9pEsJqzd=w7~y_3U!+&;|SaBdq^E4Q38 z7YXNt)iiNhI}JI8vnn}Gs5d9X=L$z?>I5mZ2C=BkVXS$rVNff3H}t~&Gn?9+#Qp(P zpn<4l=tvrF<}oNsCFO?P)Zm z$9|zRmD|SRSOxMFs;uWjD_YDA%cY;6SbO^oTgEJWMr(TFL3OhG`3 zP_weENRsM6?P4A3ewHQm%g(VJ9d>6=T1sf27LJ{(C&4D6<(cMgMLXz4RjyB_UfS+K z`{3irZVLOuegJ33OqYD9(KZSp-&wmkc{a2h}`pI=&9zj_m;EpyJMC})ZOPO|7bo!+g*eRH} zHFrLpiKKL%MlL=bMT(u1xvP!E^AahB(80|{oLArx%S5q+^*wwq`-RsFshz%UKR^q4 z-en~>V9Z>}O<@Q<%INXtG7Qg{M5gP|{=e~ns}ahW=c*;;o$R$#O3aT^tLR_(^p>(F zYneVZ3VT7$Z=cHE+n3R@VHVx@>zGWpC6C4LM-H&5N#VteI`P?FhrP%4!wl$mGjfu2 zt(|%QiX&t?c z_T`y1wwF?Z)*` zqxjhR>FqZ@P~ne6uHQRZZi<2o+V|lvSl99foSoK>>^#PR%>%A;hgvLP!|pKd*)lqE z+vA2fZqxuyo9+rfitR+y^kk5L^hWqXTmr1xm>>F_QRhDzbOtChqMg+DIyE6H?ReBVJR_;=FyKOKrck56+<9fa?^`!e+O2 zb7Qr&p`CYYSy@iKY^`S*^p&A7>y?)cYeb+g^(ivsC{(#|-B#M$Z(4RYTG6*wFSK6X zx}8xFex2&z{o!viUBmQan4e3R@;SD)kCNtRVO4ub*LsV05ydsE(#qiw@mr>AQu&OC zd536mRbw)(9`{qK{~Uu9%h9yQ))WnOt7#@TrS*5K9;T(mT&GRnIktYfVDm{!32hdA zY>%=8n}pV<_dZrPOJ_2+?Dj|q4!-CJI2;|5@ zUMj}=w&1u%JL-mlmDp|dtl;X-I0k*C=5Y)%Kd{!C z-ewu?=WyJ7NU2QOchgv!E(*-|Lh+iot0rURVH$(*)?^7_RaTJ0_TMs+=yipB2t9E@ zj#Q>L4@X(%FJG9ZC=JqKr6}5>AN|yr1DxOYXUsxri_{O$ZA!;vn&zP*<(0GZRMtop z!qh%nOsCY0oe>{aQOn49lg1iaFm@*2f~2WZ+HXwbq;@62CZTo8{TD0haxvX%?^H%; zbtX%}HxDs#&*ysFYUJ`>YfHx<@wY0aewqYJowqF7W_(XPKJ8#@wQglUWtB*=OWs9N zJUls&OnMl%gBr?h?JP#@9nZ2t)-RLR6Pk6O`|+F=%nFzTuNW>w1sgD=FLEp58<2#4 zWj{i^G}4i$-;-endc3L@Z%qNpzDbi3tvpTGyOL@-hncN%f^j?~@KFPaCN=)V*&S3v*sB3U8*R7x# zIS`r6GMbbvtw-@K3`gI|gIR5@@Bx1=P>$qlzkdi^-+U9ermQF-BpgfeISp{@Zh2` zt&?HTW1COuUoCOff5? zKREH6>B`7pdF+<^TU&)sRij^9xkt(u`4g{LwW>F)jMm+zU*6_OOWwAU)uwsm_x-7knK=Gv-yH>yO7^o?*FgluIf+GOC14pK${Ovs8SqPUlOZRHi)bYmBR%gI1;GXRJxiT*j1nlNmS9SFl!J8D?HoOZjbg61|Sd znn^e9$dby)>xMmB$myY~ek)?U+}lgzrJ2ybv^f5j>AW`wt(R>O=W&_oBAs)1s5!RZ zDc>@dx#}>3HL2|+U2wKSTGynFSxh_59fk&|~bzQIWAfFjIm->BT^ln6QlG`hQ`t`=1!V z>>2nUVDOQ1;pHOHWVpv9p4*lRLL=H=^z`8k0`i|AYB=y13(d&^plm9>{K-ZBBr&$ zjR@m@=4CC@!S!>JafN_S!Uw%tJln*5=!(Wl-lA!G>^*t5p*q7Tc`iHPV8Jd+LSz$inGNBPl?W$lRJt$wfp+jV}K=t9#~^4Q`dymhU% z*vH&Oc)S-*2GI%3u2hd`ihf60Y?wt>Vo|*Dkp#y36d~HOX#j|J%-}^RToZ0C$i-h& z%aC$9hP>8(6QZCRwRzQ6Cmi4z^E#L0lRGha(L}3mp<{}nOoW9g{N)!C`cXc-@;#3+rlJ}C-C#AT zw(kXgVTfa@a<`2-W62ny-T|EL6n2Hd|sruk3lWU9!aGvMynT>0Es7mrulw zFh#6w#UfE_i79r%BTJ;ut`eT~YQwWfjtg>^ofJh5hLA~RlZZ*>Jw&QX0dKB+AJG=3 z(tK;_al++VHt(sr5qUFFPBg9{2n6VNk`LF8Vz*56d5Uw5af$A2n%x~MBPUio@%8`G zz5k{E*q`hF46_A5za<{ob*Tr90E^%^f0cvlHEhIr%5IQ9+YAm3mv+(km)8NT# zDtzoOOYql=53pcuHg?VP5bSg1D6kvX3oj{E#^SM~@LdOUtafJ={6;qsteRv9ADlS{ z@B?8|y0ZX4c!=Ao-!R5SEnA5@n$x3`!_Dm+| znb-g~$XEc`9iy;QOFjCKbrBK5`GEVd0WN)e3=CqMVUxyW@bH@oGIz^Mbdl>d_?+ed z7&a?|Lm>=2%RL0oD_#sX7djw|!n@FH(_3&}pBHFIIDp(s&IT&?83^%}14O0P@O?)G zV05Myu8CL+X3nUFA7+Q3z3Y=T zAMj;(!?`-5;?gZxi0J_DeImo=a7-@VSLoP$E~^F1PcUd+SX>}7xEs&gC8sUI3SxPI zMJ$_$=pfMm?;tO4w<^zceF;f;c9R=d3VC+k+BS2UM$NH$!8Sn?WAW4@QDjH6m58|f zj+a1xNoG7M=3P1>Yjbkk2tgaQw+Y0@+ceY>BK4SwHo8`E%`yH{ZM4kS<9nJcL?_QL zXjZ#F#Otp;gBNw=;F=^~^v$G^_i>&01Z%^SL?6}0#{C*%^DIt-r*_nqXKtd}e0yRf zubFPsd?SWR6V_A0v$9MICk z79U9gv)K(;|JuozwWl}cc|sp+9XAAyxTj$89TZG^dOkSRKZHH>=m$CBZ@}K}J}^G7 z61a8Mf{A4pFb8D=tj_;4C<^Cb8SyM^NdF-w^GXBz)xR5)n>h=+1tnn>I#rklZ4$P5 zO9_}_Q;t1lAH+Vms$nnu_h9X7M?l0?Jv_pH2-`5#62vO&V}gQO%sx{Sn{`?Pt1G?( zTxVVYhb%QPlcVdfw#x&cu8fT-La89qWgK?&?nSWuPB|vw|4#RHO8berJOz^pyds*Za9^jp!l#34Sv~3=#KzYYX6GfZ% zDT|s<7V(w^N7-<)vhjx-JeyyKpBA4Oj*=RSop^=8Dm+L-i&tSE$g8rNk1O<6;kqTi zMCZva-sEut(TcS;Jauwf^FUn|FCqOrkGrr0?`)12m9{Lxm&bdOd+CRHkZ+8%{iO_k z-1v{=5C6IR6}{=$jAymj_LK&!_U#W)^j!d&uyxqUvnYlgnTEYK&c>9&H{p~jJ*;sS z0SOL*#*u@1Vm`3<7pljBIsXbD_ z92S^jl~YtOY)(ElY&8M1>-z@I*6#zG#>Iiw2Yf6b7X#hT^T4-tdx6CM9#htXf5d;} z&-wqy_9q_0{Yn4ldvsj$xocCK)92QTy6&ft_fL7)pnV-SKC^Xf;Ig^Rva=70h$KIr zal5PN*xpv2Vtpq5bE&RqAtV$zo-S#=&(pV=dErs>@o_me>z1u(PPrd1>PeVuL(3i% zm5iwH9<7>6?qUTuQ~H@ApI;17TA}w|{^HsJxAeZDsR#FNNkcV!!LI;bW0S z%_Xv`EvZdW&o=%(h9_)NYw}qqlT&qZa z&#AHJCG{N@UsL7BzUL`N@uO9v`0E%^{2-kuK4*LsKVMvT%-u-7_M|9&>*+}TmYpGBG_LdKBOJcohGgeiXlcO%%Umauk2V=P{g;y5Ccx`0A~Z{H%|We5dP?d{5JV z)9sxa#a}MQFS)Nv{2c9_7*}Z|KQSbdulFsIpDF%->#ayWS2v2UtQN(W*q_nu_S^qN z{+$29_(Nl>5dUzzRbW$UDi7nwxYI)iZ;D0lE)$+>?Pm_}Gp1cE+RCG6M8xTnGBCyB@LWYCvnm=MqFgjb9r`UA(&qdgPQBO8X0o66nDp11qZcya|Ya&QuANL51@ z?4611$)1ZQJhp?G3aO}jy9;tKU=(TG6bwu7-x`&}CD{LC{Qm|2_mAZ-_5uGy{%;G9 zl1I;bk>esK;V9ToT58Jhjtv#!&n@s{N;Gmb072$cF1Jr( zTsJwM_J&fDa=Q(?#M6Y?C3%gKx+JC1Hc8}kc`7FP49`i&Cgnposbnuqa*-!r46eb} ze>K34$i2jpzQ~ClUa$x1F-pgCR#p(zRYCaRn&rY-R|^G{lfGD(vZ-)nW*ydX$ykb0 zQs0VIABFcEvH;_CC(Aqk4aj@=3zhs|QcjJ3O$^&i#U-D$C#@!(W+idb?@S@%u&5^g zXLZ6NkqK|guAATx^&42>c3CQaO7LOn+>^uy_`mmm|9k#a-1PqO{AtkDc-m+2a2M|wB&kX+OiXbdqBR>r1v;^sJK&DG^$62aO}xzwoS=+VUQmw`CPGR9d@B8hInB0oV2MdAiFJpG=25U1&Wy;i9FsE z+_p?dI{&Bf*(~ax_+QX}!T%zQl@JNm>AGt4;nhXbb=(={CCD48qZ=g_kET zhHEmH!9@O7Zv6YPv*B|N-sC2(Ux!BaO+)S1#KC@t(&?=A$9}KtwtJg`;xb=YQg=o* z7hbe}Hu84WY>v_WTyA+o1-D>{DK|7i2f6fMBw%U5}1f26y2Gv%ohq65a zSpMGoxra@bvd{epL7sNzN%vk!>N8w&m-8M>Li!F1DT-RU-0AXtQoEJ>_CR+vB3wCM zN>{}RMsnmDt){!oL|#!0Iq#=cb7tt-0>7Aac--b`@YC{bY)Si9b)d%NAMyXc;Q#$; z`v-}Aus>=4rkTu@>S}ve2(nc@Q`)xmznDYr3F-kohB0{7=rlZcK$)n1p@&60eUBN8 znn`*4c-;~F;QYJzm)pUlsWuPaQFsfHk1G~PWFRSbUW^k(EK-%)#mskXyds*4mrR{O zwEy}}c3JMkd(nybjjByJ5>-fEZyAq)y|$;uYm6sfMj7B6ekBt!pNG3K)c*8_*E6@GWBZK9 z^$Z~WxxXO)Rqe1-n-`Kx%|c$)@zGOlTamk0#`Yc}Ma4+2fi@ZuQ;s~?n~apcr?59- zrPA0I36FBAQ&6?p)zZ3o$v()uaR9M<5(9lp?nBJfDi9-GRir5=1HnITMJhHeK>W1_ z>Dd(V-iPt=$iCchNO%qtx|raMh80(HKP9`O-#C8Kx+Hav8&*WN^xQ@2emc{~tbJ!h2xAD@% zMqqX_kl1*-3G5sg6)w|03}#U)gb7RX!K=IeLOtJ9pzTBv9uAKOf%``UcJpGvV0eRo zzAOsRw88|x{2M{h#z4Y2x(V=;M}@(cE5XEP3B;sEhLy zmYv#)JI8erd;SW7wF}N3jl*UwX`Om!=BR`<5KebuTBoPu{?8u8b!IJagXG z<+0>JmvVgGz+AHPB#fU5okG6cbP>y1{GHfuFatZ;bBnMAzCb^-iGVw3!Z=V)P_Bk1a^Z8NWbmTTc-g-Gv zeoH=Y;f5hHdNYG}uc=$?V-Dc+0`HS=Zr0#Q=nb;gFBaeZ(Xa zSLW;_%f>h0xUDm3`d2NU(Q8H)%hll3Kb6R8x2sHs*C6dhopk6|9*KOPliDWwBO{V~BKEE${% z_7SADG=jPtfy9n`O<>oDQDLO=VerMELipS+A81wk3!ltP1?GknVX$vJr~@N{UN{!0 zFd76yGorx#_)tON{zi~1Zhz?YO`!4RsBpyKFeryBgn8llKy6BZu&5vvG&NI%*rj-& z(Dq5-bu1R#dUaf|t}O~Q&D|k*5VVGey_(Qmymtw&@<4&;;zbAEuZX##af{i!*4yJn zH_w~%>Snf*N6s1Xg0=RNHyiYLqXIffKGEjwce_D!`#X^0lbv}{+ZK^$Rg3Xrb3kr> zJsW>HIF_)@De!?vMlF%`$5%h`ipQ>pB zE4xO8?kb1DGD?MT<(hopG3+mV8I=lRQz*i-$KpZr;!gtAlvtp-<+z}$Gzx5Y+b#&E z?^U^2~2B0j_g zGeByDc*snwyymO`&z*_U4je_>ug$>(Pgj8N3f?@MO|s2q9j-jzl1$NIXPDQG^F@1K zoAMU4$cetW>G6{5Pm;Vgb>7#EE#$~@72et~J<`T|JWqa0IZ=MTmAuYg$ZOx&L?-u* z<0+5SkmXa_@y72Jr0%;+{A_Fq`B8TTKEltLPbb-x&Dar(sieu3Y|Qub z6jCN>Jw^^+Aghgoc`fhSNbNgvydU$6NmE`KzL*(HP9L6wZ?E(tFKE2NOpWJ~p(QIY z-=!Ei5Tye+FoRTI{g-gviO;y)nLN??De^qpQdqQ8|0_Q1^pjltr60dq)Jlf$e~#-c z${};kJ;ZgG8_DLTySU)I9VvJ0CcbcuKH0wWqL6xOC05}nPw;IogKZ8vMCtf8aQd?u z;oI5^-d>>+r^K;smTRqu!*i!%dyknB%A*`?&4>}PMQbM3WhVfNi9y6I6#=08{SbEP z90n)(6+(-7`G9r7Ul_A26-=g!`|lm`z-{S>AYef(5V|!8npTMQ?^>w9$|C`9_%unR z(i@LQFRCZ!sBOeo#)pt^{N~}Zqn2drUIceedrvIcW{$5se~c)XF~HeRLx{|F4Lrig zgwXR@NQPceRiy9<7;nXrYe+B z{cJHJkInjwncR>gd8r?<^wb_gC+av7=gRb8LdLKB?_M zVsFlCl6MV?uvtPBU!>7Xo?MIK6<7*MD&ja}z?r<-YJ;2GOeM)IKE7?5JZXDk2HtJn zMU0fs#y|L+BR0%;#(z525>KNVK}&off#)@W(27xE*ZoQ`d7xZ)RxcmS7WcnJ=BePE z*ng{-6%Qs)9T7Ye`){7&_Pb5&zpbqg6>JS`1XkMviKh4_V0&;>nDe+2RJ<=2PPNJh zNV>n+Z%qXiG>WicRXkXhJtCNH7YkmWY7ju;{#)&Sm>@su2)-He5{ZuI;G;R+q|1su z_zIuBDR0%XY%Y|>m?_apwUwBe273_RvAzW!34|2W_3p8HGfFbn; zLF=PPurNAQ;B&Eycy@t;TUvJ!C&EGev$eO@lC zRm%s>e*VHy!&LC}y@hZdFCOq!Mg+GtV}aMy27$6k6sXDz72GXr1SA6U`3rr#Q-OjrMHm$s4|GnA2xcyi1-h*b0>kxDVDsfLK}%;L9AC(jD8#mXAKT)2#q9;p=3)U-2_>hIR|1WC6mUAO}(i1!O5>O-5r-RMzam3aJ7*HtcDV~`IPh5HK|#Qx*bDHP#3 z&vp$g4SyC{OWIox;`NzvloJY)xMOv^w&bcN@Oy#X)k=J_?VT zwx67-WQM<96HD41XvLf}capQGQ?bW>A*8u$4BP{vi8ZJfKKnov5v}|Pdop`Jf!^DP zUDeAXe6($_DBWyg!V^u5T%1Lqvq!<)flT7D+Y6v^HJmqRWd1|#!C@B}b@Akh`1-%kzyI(3BhY^#{>d?WAKJS3BA6Z%i!Rie2*N@_(Syh1 z#K{KNqMIM9p(i=C998fj~@tt zid{cJCJ(%z8k5b?QJF>1mE{wm9rx!#8^aqod-*e=dF~rIGrkQfFY39@B#=zu*W9|?)HMd&^AK_6Ba?Q{3b#l zy5>TY#m`@MnhEKz-6#&=ae#8)Q#jmoJ4oI8BfHlt3Eh0`7I6Kz7cG}j1p(+5)S+V^ z>io+W{W4bzy_w*S8hAD!Z<1V5yN;DeEW-(X+MtEBT$zd1ZN3O!@2A0*=2ea_(=Ly~!FPEwgtB8m!So-!AriIS95gxq_sb4pZ-N&{&g zlm=-=N%gy**6+X1AK&$Pzu$k{#j@p`H>9RnwJ_JTyg7qEQVHIR8H1L*BO4|-({f%xbIDx!zjwa`XjYF><8Xgmw7 z%okzzCi2Dl?CJQkd1C#V{s#EOP#so{>Ot?Rvamh#G>G~#23oxx0sZw8IH2_nt0Lcl zQ@#7KP~TU;W0waeRJa9qo6II|ZoLV$p8MjGrAR+s?H|-W!DRdkt)n*=+`V{f#sdI^!!ZEj0w0l~ISjydI3q zNFGKAg_X$ol@Uk;>W!qw%OeZroRPMgT7F}MJ#y(_7=Lo4Et0Ka$ZrzRh{S=fyw_o7 zaK2wW{yWJCHk;PNm~buFztI%tyj6iW=CpxV26FI(@n&E@YXWo|nFTh5O2UhyeONF# z4#r%p!e&o92Y;`SCLVo10ei>c_>&j=;5W5X@U?P2oLxx4TDc5Z!#)QoeMvC-U@owi zPk_!AR=}?!7S=!dgYC_+hC@#a@DD=}Iy+v1{(ODdppCFprVMH&8{9|~NP zhmu&{ygFu@=h^D5GUyJ7=5&>4ogAJg|%SC zukS$O^9qn#{}CK&R{^(AzXMN8JF&8}{UD;T2%FPXh-@)$M%&3uB zWVP6ST|O9$taTqoA|Cl54Raz8^$Ffcm)L%^J#<38CD!u$=h-8QYs2_C_O?jgM+1Ic z7L9m|-~Toy1etcS4qYR;4sj3;Bcrw}5#^Eyq-MQ0(tJ!Ffi})a(92rBMU_30_&JPU zP-Tle^)VE`ZZvXh{5M{MUKf-InnJ9a*bMXgoN&(#)v#yAdHC7542HQcf}PQOp-k;K zm~dw|?4EuMIG)aj)t0NlsY!WoMw%AbtvL^>b?(7`iTmoMgKxlB{ua>p*nGI5R}b#n z`Vf=@EjV>f3Lp-tL;I_SV8@Bc(5(3xHqS)`4%+U+o*WKAE|%7z3Lk@!{KLb@pxH{K zHX#DxFZD*KQh8*zi8JDNrnjJTtRHpsRTsa8P;(3_ZnmW>N(3)i?>%j;Vkp*N+02!eVUsVGwvZ38Nj# zz@A?e_Fh5;+e^O3#s#cJ3rGK;%kosvySEVZg7JOih|ex$_X+{>qHKWA4?l%G9m?c` z_R~o2f@J;y=SF0c>QTOh;VyWgs1t7=N`kk?6mTwfBb=z63d1^l;Aepf++gYf`)}5O zM&1IbqqZD44cS4tABx~;n>DPiK92>vFMx^lyYWf;tf0Sm{o}To!c$W7p<;qA{G#{( z)UMZn4SSM75j_QVJe>>pQL6At^J{GPd}Y{qsuKJ8MF5hlKf@@aT5vQJz`ZMufQ2jr zChQa2t^?w`pM3>DFHs)chB+Ydy9m>ol?66@PR8i%nLuu#HFo{GKGgDv#%FZTg4>-> zz~iba@YG>#m^nHgPI-75q`&?SVjVmIH|+!P9gzaV^WK0rU8k|E?mnQI8jg)`(}CBG zLvXJH8gO;r5%{T30p5F~1!X5n!QY@3tm_>Ciz}9bBd%jq3har8WPfb9ns zs_B@6+c@a@o{ziSc@563jfT8{o8a@(H{kf>^B_dH6O5MBg9rZ8K(S>F&@~hD2DFcZ zOTUt_=ubyMg_0$9V0Q>o0_)Jgz+fa>Wf=L@im-nkwOVWep>;J`0n`1lisBPbHhS$UFGSp?neb&-zEpQZP0+a8~=dzn^i#4 z_ajK%Hy=p!z63^|BN(do6dVc`bKQ480Pf@pY>b-;W0L~$hD2q!Z}K6S95DgTSu+*> zIQA2$&N=~L`6uAq=?1W76F}&pelW+h5!Nl{X@e+JF z;WSWs-3AXnj02l{qcGvaP*5>x3${=47A9-51w(|X*bmLkm>NDG3me{qEtk;5hA)L< zI(L4d<$;^9{q=Xzqv4ydy9XN3y%xWrcIP}|%g6`>*HZC;-ae@Mcno?L--Sw+S@7n( ztI%MzJ}mp*0-yTa1~-*BIOj_ySSlrg_vaA+s15;VJ5zj~)>rWC*bcb2`WtXBmw|6> zzk$vRHNb1m7f|N1355U8a~QM$ai>0kGdHDyyUHi9?Ak5twO0tTA+Qd8R2GaNUc<=g zCq9UZVFUtYypc9Pd8G8Q6S5_#mY+1=9yzckj6c!A7BT%`z#q=0k=kWnc@~i&h*E4F z8ZHV(lr{|`@}oY8jAI0HP}du=+$@jC{%}ID1GW4ctL+ilv@rgvHMYomSwp_%aT@uU z@RiroEd-}ql=1TFV}M&dSIp^71Wm0e;Olp9FtFe)7Nd%S=aNgX$O*H6wC^>PdpZRi z9&tv;Eu91wKO08!Jd9Rl5z?t z?XL^q)L|TKe!~HK zTL{kAQQ+-16pVZG22)n!0hg)enE8G)@as`1CI%URfL8#k*kb~)DJqzyM+lNDK7R|v z@k6dSemH*52Z|#PP!$Yg=SM96!7e`|pdy@q;ir z1gVLuLv`DN5x1yeWR=uPB-Jwlxnk@s9w(1nk#I)#AFt&v3$aIz?+@dj4iS$R&wo`d zjqE-0m8a~!4xVy6gI|na4f~&dgod--U~GUt?7Be1*cm@TrwoNN@0xk3w+;`JZQ9H22L(ahCW;& zbfUJv1>U-1-!BMmym=nXn6(OO7HtA2jxC4Dbu)mi*%D~y(SzmMZa^H?o<&2CiuZrl zF!JiH5AxYv9KTHxzds@Jh}TCaB)`0tzj?hqGI%SDZ_wFhscxo5zk|BFDYhgVtf}QPXBjZqo?% z!^jES95I6BM;c-#*kK^+_zX@Z$^m+O4)k484CLM(2NNFUf^xni2%s}T%mfLLzdsdJ zOV(k<*OS37;SMa$F$o<0W{LfgI|-8}j3dH3j=--TJp8!JUN~b;H4HnO3lR$%HsKkt zxvn4N%B4Vist|Y$CBj4Z=Yz#|2{7-N45-yx2{%?%;FUEipwWys(Bh6Oy!y)@w$8MJ z?YI>59wgv6yb}DbLg9<2zF=E74>lNSfs1+OFa-8td*ijBKNXH&=$Hlv$qG1of&y$S z(}3f5OG2ZPlb~nO2sk>`6S$>+07RKI*mwCYXs$koU7p?#erG3OKjvs*%eH!hK<8iR ztS=idY)J)L|E3C^b1VRzcWDf%&_>X^6RnXdL=*K4JIv3Yt%6>Rg=qdb==VkKD zJbzMaS+%g4r$r1;m_z!?ofABK+$3<7m9Q+`rEl3_*vTlo&nBC`RR}J;8xoXT2?X^X z0>Pg7t3>vNPNe>P@$b&bWST@AS$yS^&^^dqba@#|-jsAA6P@eG$YCkbZW&qP_*H?R zQF;@zP}iRnZjoVZ_5}<3FlEbket(4F-7Cq7dLE3rtUz#AMj%j5-OntayBN>tYZ43* z_lR@E60-M4lVB<-5U3&{OxtpM%lhFa!7YC^(F5HELE+gj=FJ{eCVyTh6I9tk8h2C? zZ0mWV@D(NepXYz_p!wiGs=xUU`hRBS6~d%(sYuYm6y%sxD$+k86-m2X1nEgF=&_ho z@afffSaObpyn}CG#>u%bdNc(Y)J{cm;wdPNv_M5ucW9Mi1nqgdkre3+gfA%`Z=H&~ z7HhbfS1*MmKNY!iQwlb>jDY6WWYl|LH*D!$0)M``0M<|9!DDMo;66tQnBJR$2x8~L zd)EzM)HMdUUYLt6n>_{#ccx-e`3iXJD`mJ${QYF6q$1hyIFL}tgYvh<-zB#ZFixvL zrg#qZ(^3)s>NwaM7YI+QrXnAlDxhgdDq<3l4kI?nfj=+mKy|bT&NJT%zuh+$^Qim& z)<3E3x-R&CumAt9zZI81{p{b>qI)2;lOQJP;Ww>wG+-UFUCA3gEpNbpH;KoW#Sc|2*xJL=knjcqgppUG5FS>DU zCHJ5unU+7N!Uh{_(L-G!Y_E+4r;DGaT=Pk)_E#OXI6$X)>e3MURN!`QLF_VaIDQ?s zBJ2R$+M7dlKIvq)P0V2jP2{*$F%MWPr8w@B^iiv4eP^iCjT}2V1ZN(!YPF(LEs( z*dtTVQ#*!}xy9oo**7x2oZ74)`e^zo>Z+_7y(;f9D=}+`((1lRxo!K&+C6h;pTud= zrqUTyf#2Nb#co$bn%y>Bb(bFd@X?^v56PF*>AY*~;l0;OHr9BH+IOE6SRL}vrE#bn>#A4YL334_Rp!ODBBF~VdXkHDxj1d zTeO@T-13kzow1wx-9Lj9!TFXa8$)S{PkX2wy8~=hU(bK=@e z)}>d;N+L<@%wLt~o-e7VTr{>@Q7M4#5Q`LleENmmC^wEwF97 zol-ar!WPjcMrpLCXd+i6iCN}xc~nh$HCxBmrcHw~=(es*?)%;lwrp??P3JD)wk12T zirOeG^?esL+NZ`9zsY8IS@jAMpQX@lsen83@nrKyG?-1U%A#L}E~G#5wAiMhQMNB; z7fqDQuzq7DY}*=N+G?PR%9bCr$~D+QzwK7!K$ssrV}d*l_Vc;EiScyVmR9PLfh)&8 z-b7pBfgBpHK`kiyL)}(Tr@aMRxn%cfI_>xtI`Nwx?I^`jvM~tP8tz3mzR9y%+BS!6 z;ObfQMX42bC!B3e@#KtV5!`{dt*m~87wbYQad&bn*$&0U+@Xz*+{3!Lbi%zlw&>Jp z_N7S+bET4?ybU1t(p`c+tM`F&n>0o}_IBemq;}Ge1J!An=p$5=&UE^6)J%?@-c9+Z z#?ce5y{6KJc+G_eleu8zD*I%%6^Eb7podPlu=S-)tbKMC=bjbDx$!&LKIR^!dV|kS zTr-(gcFmyIGNxQQcZPa(vc~E~?p7{!n=kugK#KbDs*YWK&W)=9NpxdHKc(am$3Cl(cC(_MBBz@B@f$o^o!$z#Fp(biP zq?Tu|;#L_(a^~9-+3Q}TRME%9wB&^9@q>~_2DT>f4|I%@SqZt)A2(r9a^@Rui= zx89AWhXx$z_wyatQR~y9?YcM}Na=D{PjBS9cs%;I(qyjB;FZ<&$QrBPYh7t{)pW`} zV$f>+B40}H%12iAbUNp&vx$E4eJxjY%b7DFa@Yo0oPHMjoXuLjoL(!@O3mxUXi@zw zHnep-{jN8Py10Hj-P$C7ZVKIXj5T&S$rg+$a$i0zXf{il z%=w$Db7qH~=s-E~tFTjnD=DyH6}QQ9$_-V+Nm$VzYt(6H zGM)|kww{yva*8#R?_oXmU!u(3JfXHdh^5Cmjp=QsiS&U(3DoC?#oYH(&zk3AMa@UA z++!D6%;G{q1K9mCQ|PwGl~(q0G4#L-Z*J0leXGk+#`NIUXzH@QCOtZ^nm!br#-$dl z;@0M_qx;Xw`gz4*C?J3K?0TkvNQrG3JmzR+@pZC$&AmI$0e7Yf>_qMZYj>luU{zMIR6 zoLROyESX!f?F{Q4JdHaS9?m^m>dnPIjHE9s#90}5ucB9an9@csZ&KlstGKuLb@p0U z7uE1Pjq+X*%*EBG(aW#-)1dtcjFH+{ro`zaV4Ek`?&&ga@ z*KDqKbpX9I*@JUB*GgRu(6rn*bxX63`fkc~wj0-fO_IL!(19L5CzZRXD@Xg)erpa% zbfQ{9Ls|aH3~KA)ne;Y`RyIa8ibJGUu#1an+U!I!C;8i*-VZ*BZWv2(xd-&A>Aw`{ zTmvG3(hU$I>UBx?XNOvuYnM%^0$L1>%l^L@xpaf?RP%)+18W`8FuC#&xqja&!jak zoc*cUEtt=J`ijyu%Q~7DSze&tt~w(-cK&Cc6#mujSG^CCNX%E?iynM5LIw)+ITeR(dm z{P`m8t!EtFuP~c4Gk?lz-5;Sg-Ads+ei_rJQffuiT{Y@J&K9oGEP{)=;l`DVZT+v) z3G}@eLiRz>X3C!Gx3W%@;}-ia=Ok|jaGLmK>TBUv)@+I_t)OK?zcD@1eBpY#Rr_)c zc5K5EuF%1YKJ#(~cggBgb9q}P{d7dczB;m%vJ;ZDX@vu)Q@xm)C+cH86b-m~l^9M^ zaV{P5@C|!<`W8CMB%Qu)Vr-SXKZp+ZNM@C{=+H>RmgWT!R@@mxk@Nq_=j^JBD7WR$ zM8^w#>HFHb)REyhHb7B@UHKcOWjCvHxvO^3=`OpPCvICw%^XUkU-WO~cnLH;+UrkC zYJ|{|yo=P5XEm%su{HO8uRDFdF^LKeT28mC8PR8Ir*Mno2|8bJ-fHp11kTsts+Ad~ zz!7~N?D}u5Y-v~$x1iIKd;L6!_L{5Cz1!!`IO|FD4V8(G7dzoI#lH3r_-o4dR@ zgwDHth~hQc&_^07C`j`BcG7Il8{Xi~IU$5;xoTIW;fGoi5vi&>H(v zS$5T(=I`G6T(IqF%BgV?eaR%DnH)!RotHCcI_McSQ*?=1q7_94R~}-=yQMXE#6?l+ zk}GJ{wUTsnvogE==5pFvZlaaeiy>BR(lo2%nY(Cr^PQYi(-vC3dKq_1&WsCq62 zHBzj{fF}3q#Ae#p+n$TPXUe5c9i(pLvlz-=#% zrMJIFIV-WxqfqR~1zZcKxBEtOp)CmIi2a+_ndVx8md_av|ff`Y*kq&eQR>4|k6Z8=~u|51TpFLum&dJQ;aX;u< z>hg3mZhKHKJK<^$6`-?|%F}eDwE246{%Se;>HK`k2@}K1Y(Ep(S^xx+H(0|Dv zT)wyt-#vC5pDR$tz7NZiX$Fs>!}=SI+)-ch!p@E+x9U5B6lT)7$r?L}qs@b))Z*8~ zj$=QO(ET=e_eiK^O}%DQOx>NP0;9#m=!w_ZRD}}Y@Jxb`%+2!Cr$b%MD3>wiA~m-;XCKj3e`c{t*$(T?3c z@y(1sJOV;Alz|eLVgC9B9}i#e%U>Wn3l%k=ws@eaip?nMgS!lLz`kP%#vy)FQ5!=Y z91|*8$XIL@Ur~f%d4?OzibzAgjG8T6SNe%(P0CWS!)DTkelG4 zq(46*csAb&x%ZdfX_W`{|Iz&agZ|5Zz5m2XxPR0C*Jvs3yPJv$j}#<=!_BPwawm&2r?1*cS4A%ynX|x(%8g>r33+q-A;EqD`Qbb3t%> zjUQ3as0)7^9D`m*{g}S#*PEVhu(Uj#Cggt{D?K+o!O-$$NheclBqu5vu@lEcEarx(-+&2Fb;yBj;p_Q^6_cwLDx;@o z7{R{oapwM(dwG}3X*g&5en4i^aK&gc3hoLJKMU!<{2ME!@|*u?{{KP!<-gv4|B^rF zbk`ZDEgYE#UrrL5m)66iA$39M+dg8pjX6_m6)8M4H-y9qBZ`}`u_*~k;xthFnoU@OzUz&TL)_4y9HO^ z)cka`5xEHGKHiQ!d)5F}99WE6M_dQ%UcQ1F5q;o}zd5$4Z!1VED}}bRUm?lPp;*y< zN%+U+4N}^YfPO#q3Q80;qHePYNP6x-pNC9Cnd6>Nf>D8S`3!9QA&q^Vn1Qm=0e|;j zdlR?eAI<+ixPSS-?Z5xcUlW^vf5l%L8_N@3BsCc}em+xrX$Et(Sd&>gH-m}VBgceL z&xDF?PX)K?&oO2R9|W!I)S1}}G#I~bE#_o$BPpSJnv_e+!zI78k%=SyOw@Z>^0U4s zlh4gyg3^E>&w8=Y$YBOEaHEJ&ztT&*letU!=3O9Ef5?)aOE2S_3%!`|nJt9&xEkj7 z#2HL+%3RX8^b&O0zM0^yiL#VleTj(g&LZ`UBS zVs5o+3s;T?lDaOMj2=&m3Er4awp|Tl&Wiu<$R-_P!c84!QrRn}OsA5$Up*q2CGUfO zxi^uKzo1AKNx%8K|IOH-f5gAU{saH3+GHsigR2uWqNX(2m}3B zmYD#*zo;pwfFwTFyg(P0s0kk`^)xL%VJ3`OGKDY{BrucYT?J%`UDNkeBf(nGOd8Bj z6FLnn7w(VNU{19d)mxlcj$biKVxB18zz^D-WqP_|EtQ=$h{a3BFx$012}8X(!c5@= zV&v8&Qp@Ip@cQs!=AM2e9KXe(zGid}{@V3uL+|rK;j@r!%xOD+VNiisH^Z@`$%RJ= zjoxn;0Lk+}CS^Rcc9#gg3{ofagu3T^RtySq)86AlZW7Gcz(ct3>Pexpp{eC=M*}?L zsW$P&zMUaAH`lvYRuJF(wi9`sEetVxhHy%QHNJ9Z93HP}N(ddZgss+gf>RBfPp|g4 zDcHSQo*Z1cjkv%IZJ0JDCA{}?5h*)a0gqd4BQzIxV^1|cEIj)`9(UURNBBX~j-NO7 zMQHbD1o3}@5C>5;ygIX=yfI*ng$8<@tEeatrfu9$~!?Vc@tLQ zh^P*x1#A&+l5``pg?fxy{(U^=i5}i*3j_thZ-uYyo6a0^$;0d}Yd7WAv=di8o*<5@ z*9hI>UJ%DkY;n_V#e${Ygz)tPFLJYBr9kJ7G_zb~i?Csfy1>#W3(#UbisAh^aNKao zxxK`4F?U;;(R#m))OD*u%q9BqEfWVCl??X5>VYZ3o?noGyV3+>8|$zsqm*D@*cqU0 zJOr7rVWE6*6|T5>ZPWKDHiD&EdW4}*#yPOnnS?JsYyO^02tTLl3Bniq5-Huqf;DPN zO=%Ne5M@`kp^aWoh~s|WnMFB@qLY`yh>#YB=XqIC$RDTQxXvpSml7_fl8NYY6wUbm7XGsNlnh4HpuSGep}g>dPDG-BO`(k4F( z9YMGE3edzY5vazs;%bwQ5sqsU@$wUff0-lKJhWJm)OxC zbK!AsDMqgKodECG1!|HP@NEY+B6&?xmeSsZO})i>lP@a?;*0VH zn=Ly9oy@6o2IOg!SALM`y}IO_{)FJBN4C|1oI=}k1Mk!=TkyHW`OUh*W5>5JW7lGZ z=^_P!{S-%J7-pQS&U%cq7rdGl3~wc@ly;K$I|7(1zXSjBub+lDZW{Ohi--JQ;t%XU z;NQ$w)`#mKr-DR{La99~j7VOm%8C=zYrKRX<%(zkxOcQN0Gpp5J`p*Hcu~Apv&UO@wlLBS6S+1)j6$u#wa| zA>X*T&742^4|Z$rJ|N+4jp|qCU>)3Ae%Fo*{A39dnbPpvA`F!TLSYv&t8BaqGbxESR-Axda^GD{dv%q>U8Nhrm2lH5`8JKj9Hu~gl8Bjgy zh;=)`IfS_;q!B}ANQPe+WGFC<|K}){zkjYDg;L)}MguG`9 zcF7cCQFRwBW}i;9uz4T>b_m~^i}ioaT`!lyOuski>5fEbJMFY-m`6Q&=Bu<>L(({l zAnD!Ud~F(tEWe1#*BO{aI*mi(A~SiWGoyI?kqN+m0YHL%YRzY~p5#Z@Uj+AjmYKIW z)c|n3UaxDZ5*Yt$3mTdC7*#8sg|vFt@#LfH!OO~qxl#Ly!MEF%uqAsR{CeC7Y&z#= zG$sEz>h(FxbfwZgWM1bF{vcR`-^}@JuKe!0MVD4OXlYHu+;8Rpt@>K*=hs}YD0DVF zWtL>VW}7o#6$+tiZ6IiUREsgUvoJq-h?(@OfizBrcYSp+AmZDwvHmC^V?KrN(OG0R zx;dAROJ29AlgL5W?McHv8x!>C-*!45oJh;TQeHeVcb#O0y<62~G`uYw%1Gz& zXjeB7kx_~*-W&@QlY6kq%@J4`5`$?Q-GT{Qy3k3%UqRuNdQceA$e*NYjqECM#(s}q z4P#4AL!F=oG&Q3ay+3l6S8qmu(|NbCwj*aO;OtaWgZ>>LYO)Q_@uLJ-|N1iTqcmgw zv;7DhKC%c~S=56~-+2l+hwFi+HS^F;`&P`a>%95cCu!_fy#aE^qy=k^Squ6VZi31W z+JE_T9)E(O{*nFp5Aq-XH~;s4^GC6N$sg5RoJVwRj3r8RH3b>Ryy4u>QyB+CJ0@$U zY12LJ{X%KeDNQREv^VvQrZm(J*$b5ePYMD(J(-)-PQs#UKNB?Xybx~OA}CLH5gN=a zAWiMsgjxMBgyC767$e(>4P^gz;@Zo*OxJWb;@N?V4ELQOKNycE)qa-k~%oKVnGgl&CXS_79ov^m6=TJZ#ujl|@9~>jEB@Pld}i=rMQmJ_f{v+$!w6>;|XK}PNDIAYW6T&8`Yw57$K zXSjKkJ5e(yfjO&~)TFyIg3wj)BrBHQVUC@-E%+V)iPc0NG5Pfx;;f$xGgGBk;JX#6 zJFI(N*cVk#tXyP5ZVHxcobtIXVSWGnr@%xi4tfRE61U5=|1*4e)nL zv_R<;M%uqP!EDZ&OGZwNJGb$kKT)6&*tEaR9-m^9Abg!s%uE=*B)sNj1RXq=2(9d6 zajU1r_(mCZhH9}S4k?#3ZPN)CJo7RlTud6zsk6U`4d!Vxe(GCKA(RH%}T2yOwS%;dP7$*YMZYU_x8_hxG;XHuq!k2oW|^E;%0{|q0wN( zbi6%st`<9vr)VxFXDysh&Q#mLoaSW_C=VCB>rEk_m0W~z#$^HzD{-H_<1FIkYI$Z@ zDU})7_#ReurN-NYd365&yt&a}k z{l+IVFI6z-b6W(hy=MfQ3a>HSa=Y-u;V1i=gd#BT*0_hFyQRBwx<7B)eAHFf*$q{>Fbd9^U8vk^TD*_-8d% z^FdI*3Orqp^P67#ArH-~P?w$@P+t&@yqoz9+a7g`hyU^di)Oj;{IADjv$PhX^J>-u zqvn;M)o?0EC{ciU!nZu5C-UfMwJkgloQmbF%LMX^c7v}^?wDi;&%^M@eh|Ll6IPXf z77VAw!sW4zSg5cAd(hbdGCog6&7X!~XG+Jg$4lP;Lg)$~dnzEQi+fBgt`(upHY)t` z(Y4~{ejC7Bat#|43JfR}U#*=z={UVSG2n zTjc^498-ekmEPdL_(|X(&<6)%yvzoE zyYjSd1S6T&{$Sg;OIT&=PLR1F8*EQ*$9@!?#qOG?pa)vxpykE;XlYqK*tqQ~$aiYx z$7l!g`>(tMi#+7{phkmVcH9F#93KGY?x+CEJ&#!+`R!Qf*#?Wl=X-f(Q7ORDs{=F= zX`nr!+@dq-3_AH|IAZ#-5K;Jc8x(Arj+y9H@=9cN&<$a`!K%oEyu=(c@Uo=9aKfIO zVD`)!%&qYXR%ShjM%9EPJ*U0`6}HeMt2zqErD?&IPJNKBbp(9hp>E>lZ3zB6FEM|r zKMixmo6&bfJWLwc#DCv95xKX)431yX0F?KXf{Yv&ptsf$%&tuVziw~gJ-I&~%^n>G zw;FB)2CJq6{l`%tX>=M`A$tn^`aK@;5L^Z7{vLqhrGmZr!3clZd5aBCbI^m=Zt%~q zev8FN`hXZaEiCEkVdUJwgJ{ZaA21G0!8U#&LD^<~tmV1mUyh*wU5sUHkL3S+QG-E? zhah>O$b3_DvIU-~#OL;>gWWDq!O=^`{2LYqpa4hALXv#p`zQf&BwiNInBoq4Hw(e} znnZB*;e22+FbiB9s{^wVr-IzW6Kqw zdXoZbbFT=j|6~Yk&6SZyyj1hW-RiJo(L|*3LkH+N)Q@}(mf>6NuZJe_NuY}f0&@Lp z!LgV-;84UJj9jA!iqtOf1mgaq%0NBr^bi23-A4>~W&&f>>#zUSP0DEeBm46orFx8{_7=v<@COtB+aBK>IFT$i z2o>$C=_jf$FA`SJ%rY^lcc38)SLV!51&$ z*+a8)ER%~}6B?cs2tImQiu@*wl6#`3TMDPO6Dxv)3FNjPV=SR;S@@)u`M!QT`E|x3 zLAX$kY+W`cWi(nU+>{QCOQT69J;}Rq5}9I?nI$b*>u-=d?;Dv*r(x;;#n!*O>|DPe>A_hcxsMT{_Q+*eAyru52dm9lMiw+B-%j?z>G^n~gCGUN0mgpK-u> z`!vg7hhDrxgbSkN$*-T&->+pppi8%4dkEr}1=B)*!3iF9^M0{%sDXqK+pL;OC zQn73{u3R2uS=`%4+Mk>wdVBU1*{1b?pcmaCd%fq1V2dKIT)0x`JadWYi(LWnEIFG9 zuk&Y`_tAobBjJ`Nt8OxV)&zrSal-FQx`aiZhl$rSH{&zwL-7fb%S2%<)9^{LbC?)$ z-hPRrH`r?(WvTk6iTIQoB=p~#&q(v#gjn_hvS5V554>*2XWd;&il$Tw?&Y?UqxerU zFo0p=^^W}YzsLa->>t^`|DgZre9$UrS!{rNZ}&n-jS_U#_lsD|*5mN1StZ*1S`t3} zq>L${^dU+Un zSyw^Xh;2LvZ3|FaJpvUS%Rq|jHCX9UgkN4O1mMvlFlWsxNEc{hYtJbG^SKgG_-7nm zqO=14-D8Ksm@T{)QRSHH`}v6f)MVI3uYf+whQOZ_GWgdJ8%+2r1%tAxSeMp3bXKPv z9;y(E$^03Cm!>-Me;CK6itOHhm+yMc~AN4 zhu#CN{C7ZLC5hil3W6%eu3-1hddy*=6CCg}fE_3bmfwitZR%PFEmLLjT@5|Z*?yE4 zwzdMaUt0~^u3g3+${&H{Up|3?wJTv+oFVRLvK2na`^Z%&55q`NQRw{_X5XJQ=e`F$TI#-D@Fc6XumkE_s>(H4;DUJu`;e1*-4 zYk=H_QR>4pThTjpApU93CL{mS{W%WN7i$8$c|DU+-6cb-0gG$KDs-Vuh#nr z1kk$t17~#*1FbaZuh|C;mXyOkwQ~FgR?S%0I%D+R1rfUb>{pm{)CN3R+QWCtK83m} zRKviFxA@_U5vb|8nYRY}0o7HSz{IRtprNV=K09QhV?Pw|w8G82Cm-_Ra$kt2)T3}` z;|19EXA5tr#2zNjui>?G;=Ew{GpuH5KE8{1!oRS>1`V&J@QOKips=SFCahTvgQoez z-llxG^iwy#K;tB8uu&C1Rh$gtc3Qyeb28yBam&2)%geBQY!Fd%xDO9fVQ{`?1`HsK zQTg{S{93w#yb2GxmhA# zc2^hV_?g@IL;VBrrF$A|zL^1a(0&;4rVW^6@!|5j_xUD!5m;P~fj9$2F~>s=9-USO zt**xWz5jhtvc>Tq*`NR5{`+71U;k2n#n|dYjd$aTEL@tYcH{`8S_j0_j~cyDdO>)-o;zHh(G^R zf7s-cZT(N`AHaWb|J|8>3rL9dbH=ZFj@=P6T^!f#0oDOuVB#Hha8U^(RHI|D4^Mue z`gitXXDSKI@YqX`*^vRX3&UV_julK!h{wMES%8fMc7j#)(vYWa01Bs)*sy9X@L4a` zxE`#+!;U`(ktd|!XZkyQy4eIKQ@XgU_5^U{Z4rEUG8inaS`7z-LSQYu1|PUThK1=) zf-~%@U{6m0zQAu7L!|BDTDegyvtNUFR^SXrYdygei9gu5RUfei!xQk?h1K9f^-pjn zb}Ov!SB8^4D?pUh5fE=5j^&8iBiT9=L3-{4Sk!b93$Kv}*TSW7+guiFnkx0z{`8J! zc>TZk-~Z;%f&bwC@88-V@Gtc*7p9sURnmrf=#7EQ7=6mVpwe}{0zzy`~uf!B$uwN=9J)2Caajj2rk*@Gmt za3gv4`2_NIgPthGZh#~fHVezziW#J5lI6WqkQ{q?gE7)~A)fAEF52*I0%?1EDk)po zD&)-PFypMn^$AOI$e4?i<+MlTjBsFj4!j{*3CBed_<{`Tqy?mzy8#fJD3@r8W(Yuh)mS=H)=Q0$nH(eI0JP&$XRURw@8?@FnbJE@N0lacQi2oPDEq|KR7<`kee%>^ zN+J?coNF$%@WewZB{Y+IX;DroCElgtO0H5{r*5XYd?eJQur&gQF(&v69P|tHM=}d1&DQ9IoRqf(SHNb_`o1Te(-M?2G|2+R& z&;MVT|Ni6q_YeIqa=yrj6J|NE*SQY%fpHr5)YC~+8KK(rrRW!9VkO5t+1<%)o*~yX zK4>JTZh)CK{D*xztdfm%SLNuWAVIhKIwpDLXEEBXBJgc1}`z?;_1{LZt&_b!L@vS))e34e0+b9X~4hfSL9tLY36xd%4D8G90IKmL;o8*k=Q8*cIPon*!4&aoKGbSr?-~?$r7`Zfd|8&#R3@W$4jDl@VRGwlV6?R6Kice^xj)sh&UL3Qgh4T!k8!HVc zYxRv(<~UPo^VpMAWz>GE_4_UA`6fl$B4Grz=Vv9wwU41z*j7^uyclZEuJzP=3ooif zZyfFZCzDFtIFZWWuc3xlH&fROJgMBv`=|%1*QkQFbCiAbYHDuyUaB`sk!qOZOzp35 zrM`zxr3!%pwK?SoWwn14^;tWY(n=Xg-B%q$=Nhy8z#nzuGE@Si=P}Mje@F} zb>*tu)%;Q1-EAR^ypgK(ezEARLm*jcM7xg*j-@At6fiR-(!Hs#l@#yDO7WJ}^C|zm zJ7|@IDqN`IOR;Qk0JH6cbOyW3pPi#LOQhWWhnu|}P~CH-`&N~c*<;jk@#xOI%)7>v z({DD+qWw!>Fs_G|i_f?$6kDfFZi@2#$!+>}LL4=)f%{MWJJRF6YNphC??0da&;Awp zFYNzXJy}jYh*`n=>Lp8QOwA?a?l|zAjzM0+XX)&xq?xqbqCw?k&Zk0ahWG_rZV)q2 z7NHjRg`eGVoF}s^lCQDTiT`^hAb-nh(8)h2zQA@Gxm6HOWfyO!E;JVL+TV1OXB2bz zV)u#E)XF#HJrjSvm18#F>!b}~V>*c_n^5ZTYo#gq`QZ~{VXqr~#n+f0vtuJ~R6_$l zt$RMT-}Wm{cqfD&w{0;UE!fNty{*VU>CnKZdz|==jjGhQPG$a5HDz*O8TE12EMIP;HM^lqM_{Yy4BMSq%DDRDr zsJIXFD7Oi39N^vWQnuE0GGOxsB8gb^f^B{sbg|!LH?5;5Gg=`CR z6^#{NKIF+z1FN~$mNA_E_FnP$GgG*`j%nO*^)jw6F-@%KQ^3|+Ug1;?t}$Lso0!z7 zNUmtKk2pN0lxaWH%>C}DWE}}k+|t!1djHChvpe4@^3>5}PE?-}8}>kY;xt0sbMV(+ z`8VD>Vf^3e|EPbV|NE!;m->hLGvf!94C$`Q29G(#y$DWbLbHPDgQfdu^3MvkY)=Mb z8m-3}4-et82i3&OFO+gFvpTsoKZd9$vQOxnT@LIDcVo^YCYX62cb$2^S%=f_R&Ba> zy^4LVNeQODEoG8zg1Jk@CXD!q0at!hUQqkRl=Io&DB9VbE?#xGmtAypy!d>(s(^jK z=QLNp6k*1g-5(`kjvt)J#yVAVcirU0X8kHnjfDrEqct^D?Gt&UnVepAei#9M{yVKUUOx%cseE-ZW;!r(NQq`#sE$ zxLe|Rn;Yl_s=TH$y<+xA$B?+1JI*c|C(A~arHi{u=dnlD-eit7cF>m&Zx`nq&1Cm@ zcZiSQu3@%}a^TP*A9i()ia5PmO>lD_k6GKT)zl!|$8HvdhzzA(E_$}R;GDvtKcR z`u6x1@nYjxN^ELJwp&jlU5lskUGM%P->-j9E}k}>I{9s?wEthg51jFg+@=*pMfFYN z-*XobF>Qrp@~~=h(yh~^gX9QRb!!9JQSyph)wz!%o(EC)yxa+s5%Z|Lv6bW~1tImY zVi?_}_?#^4K)kEp+@-#rYSjLiTz-GVD9W~`kqlInqkE%%5!Qw38vbX9w5+^D(9RU!CZv;$QXWQWz4IY1E>+=)U$#@BS{~%7nI+`s`|;$} zu+`MZ%O*7G)xn!`VI7sRK7iU(IGmgl5Jp^BJ()T`@ip&A^-D5GcPgd(+mn)AxSi@8 zewK(2upwN>cu?Nc&y#_W&u?+AAy;qhB80W2L_|(6Z|3{QRG5VdX}6zE-Vv`=*XN4Y42mu{Sa3hCZQ#65$kW!_&e5N){$xJSzb3x1_7HQgQ>Br6 zA`!bkKEchGpTo9pf6CTuc3{#5UyE~#?^C^c{i1U}s#x>09b(TpiXy+h9?|UM1+4SL ze$kR;92>mhwYc%C9uqO|Gt=;2mbk1Zk)3CkB z@6kB%Po|oQx@gC^d58)3hdbF2)k}<$-xE6ZNfiBi@CIuTdXn4TIib-)ub<0WbCLe^ zHAfu3WhYZxnJzB>?M$~G?&9`&&0uX}M~V5v+*##-^US?q9dTxb78_);hUxQ|NS6jX zh_!rWS*`OK;t7-WnIrE+;`CRF?AcQi(b(KN_GU^CEqAk?3#j&IFHY|fEts>8)z;SI zicf!Jo|$NJ&bBvbufAWRTXHrGJ+p@r23_H-Zf9~eos&g$)0)5XPiRujqQ70ruG^G^xM=_}UrK0VFg7xa3PjSENegI%)8z_25xNPzdgL2+^lXyzIkz)S7xq6-XCp6xI}mJ6?&C;i)M|Z8XaPZhbImBKkr{B zdmr!MZLOO^rPSn+56X4c3Z_9R+_Y{{KS%|Bv&}Kh&R^oHXQQC6}1Ni>e~mWG&{r(pk~a zt1I-&oEwcZ{o@)ZHlGnC4_p@Ohbl1EQRBG!1Fx9)VN_#O;6!?U)pWY6I-&nxL7$3RA3_>`8{xWVi?G)J5`tC6L9bOsDf(xs6=N~tF!Nao@uQ2nM|junu#7JS~O)36>!)jxly({kG6B${}=x=^43Iy zzjglph4+ts@AAl-t4!!4lVy2dmi;E$X3pVnsK3c`-j~fURk`l4zGMqumzJfJh+{n7 z7bnu~TMl(Rv4$8w^AB&0%SEd6?nYwLrsrgyivzWMRw5PpY#4d&+fHKCCOQ7w04bYz z>M4ii6+*tl9}WK3hkN)pk3{pIi{*GN*Uk7Rj;9iLbb{%VO)b3m6CUIg&t1INbITnD zBM*_BlOB1Txx%}2bqzV%Mvwnw#BiPh)kE6PkKqf>_wzQHZ6bil7b4KFf>crS;{8h3 z;dvW9rFO+Rlb)+osgMoo{9(s8k|i^}NxMcVi80rJd{pw32=bJp9#pR<4@7yA{I7d? zV-6i7mc>jZW9;^mot!4$<+36#;pPHzNtYM-q$ym=z(2*eepp7xj92BWC>as8_qRA` zRJM}migpk-5toTk$Bz=t&nA&O{8vyvOjq$MQo6|j!;?Il+3Wca!@|gGuA}*$<|Fxw z9!q^|jt0_~m%Sj}QOuNQ9o{vL!sY0>#CUrQW}QBx zXQLFcG3hL^GR;Gn)x=4G3edjfDe66$iVg4n0Do+)U|&cuD!KMnV3aBgUQLrhE^2eo zBClDvP0|ChvT`~1Yf3O}|2m=L={6*DV-PhT9um6M%0lbI>9Ec&0-C&>4UfEZ!ZWtf zz&&y?yjk=T6nj|1eKjj!g_J+IJ53qpv>QN~hed+bNFRfn1wi$d5A=nunWfHv(#w26|O+1qIE2 z4hUshm~X0t7tFSUdtR&q+Fq0K4XyF$BcX<)XPV%dKUTuWx9&)cSH(+uzkHX}X+@zu z_5rveN*SrHzKsezxb^QJ$OQn6(6NbKU^h>uz&2?G;|+D+AgvG$K8 ziyke3Ps@CV@vFgW+}EMsz>h1odt}z|>70 zizH4sI?@|0>bHdQ_vP@%JzIfod7)tIsTg>4x)Kz~E8!NuBz$}BNZ4{_38v@k;}2Vs z;lQdLU~G77)5!JCxGvEVeJoZ6Gk)vCt5+@|^jV4l5V?c*hf=Yxb~(BtjKN(e`03WSl4$S=eSW8aTc*ULHw*^HQy1a*gA9I`zf(AKUN5K^Fc$ul zno0G~_#^#pgZ;9#-~@kpoKzr$32U;^rxqVbM#ZCNf!mR0r~q#bOoN7Vb+BG4bh>#Ce5+&*e_O2ui~4q;FFG24v~@%m&dkQ^b#39R8H%vs?0nc5upAqU z#(~eaTF~yR5~|+#8H}9p2V4pmM85|F@V0IiF#gmd$w(A|Yo<={T#F-^x3?cme=-S+ z$@l1$%VZR(t^@5zZFq3w24pkN6zwb2K*vYEN3jP3V8(qJ__%jGJbs}L?HEczeG`4L z^FjzK<(m=ktdZD^vc@eob3o)8H5j-$9dAyU27g>*!K$7wU`#^@ZhoB&&PkbSvsYTd zj-ltMzcx@f+0zbZt!W1a>}POU{V}lg3&wem27o53A>8!a0SkwYqM_nYc(BzA=ad^j z-#01n#WDeQ8W)Q%$vfdG$~y!H72@!|+q2;UmvDIW#}i2(PaTe5H4CbzmV^1KgJ|JC zWr@;j9^35k*$8A;CsVI$&^1nz~g5Y`0>L6!`0eYU+4of zuRK6&`{d!W`ZzelVln<;Xb!^3Ot2+&9KLif1BHivMQ(@F@#(x1XoG7E6b-F_50@I? zAfrR5I#~ff9zPLMmUHpClw;^#%|WCtcLP-&3W8A@A-G|x2i_Gj7jK*S2KB5{g}6=~ z6kc(ZZ1Zu0^a^`Ot1k!HAwsY|@EY3Zz6!r12=F6wJ`DfJz~*mQ;`j7j(}FumuvDCZ zr;S<&&7)Vt#mXj-AKwnR^TxuLe)V9)Oa=Vn(pc1R-vZy5`CF)bEf1OQ4};SUo509@ z??LVR33$R=o2CmV7eePVx4?t=1$dY=mhnP8_$g5tK0Z1eENa<+#=IB-zdYUyhUwoD zE(s_DWUU_V44jPLO|gJEv>Kj_80@9$jiqfvW=L@B;@MVYVwHOgE(9TE+4BiVq9G zDr4LhECcgP&jI5p`p89xf?+2z1YNhhHOm!FV}Sn3<`{PA8{e%J>$_2)c}liXWln zRkC=j;%dBbx)TUUGR6M<5G)y94j#8H$EV(ffN|erCCLIM=x^%-#SXtfO{6EbKievC zRDT0J6lUPPX{)59;t1?<2V%LZZs7i72QoW<9aR2O!h2?Aq4)0%Q2oh7IQFg*u$nO& zzwDh3Prr{8`sqc%ag%hK=sGX#a>xqgOjE~ycG%(4$DKk;bQXx;%44nQt%%uh2|ZPI zf`@;mfxDL$Ab#}$l%p&{GL7nzk@~B^ieVR!`kpSde;&Y36eA&w?LcpihQi(YS0zV= z#UQ@;G`f2;q={^|m3aAP15>-tXujussjX8C2wFWJ8K~Lf_!x?t)-EFOY<>f?@=njy1_)w^`=Q}$1d;~z@GoZqIZ}?WGiZd8@S{P+N3c7O=q&UnKU0b98 zz4CZ4cGwSLadf+6OUZmZ%DF;vw0#18o$H2PPc3K~&^`yY#s|UUS5t*9D-40hujeQ> zl*LXV;kZy{KFD;?2BsrJA*ZVgjm0{`Qr$ynx#?u+R<;0}ulU*2;dK&(&Pv7i^ryot zuohjf3WS^N_$YJ9DE#`s09Z5D0*C+M;{csqD4=Hyw5X~Ai`Th9E8a1*TwhzXeqR+j zPA9;^f}O(T=w&$F&==H)@vzxr6@WVOz{|QnXzR@|92eg%(YbDm)r?j{`P!SPX~t5- zb_(Fgf=OscxrkNoPREmry1~jk0**CG!7|B7@Jii8`1`;xct=4^GV}c-$(yh!7#j8y z_*v=Tg`w&=Ygst7S)znv`|FW;7L8f=J>a9IJG!S90i7#)QD^0LFkoqg(@A^$gk6d= zA4dyjrmO@L*4_hI&8uNJ^Bt7!Ol0QW(}0%amSMCb5q@eUs^Qf071b7HF=v*+fxeRRd-UVLno((w{1=LnQ7Sk!8LG2qyxM?a4-PYP- zy}070>lX2#dHaE;F~i@2RR@>ip&jaQTX+d-JjsBcv>!?sh``PAm%-f)>jj(Tu;hKq zbePWt!qAD2(1us}NM6dMUn)c5&rVU;MeqU*_-kX{9V2vm=!66>BcS7#6Os!f6ycQq zRe1HZ3zAdmMyPyUIWWnwhqp-^m|5upY-T?I+Nmr&ad$2>Jb44$YOu#Cj#f~qpa?v2 zbbxI`hk(|kP2jRepvbHZe+G}C9XR(7L**af{GndTck3(ErnyghC+X{+aRWG3Ow=96r6{H04U4DsFC5g^=vj+s5U~< zIc*jGnBoE(AMHcUlQe*A`Y^m)#|&!VHNu69&mhXG7x_QR04%u>|FXUZikbrOHi0RQ zHdY5_tt$A>jYDA9DRay#twQfDt^i$Ud3aA4hi(Sc0js%g_}YrOD96?q#uc29%-pvS z^KV_3y!br^>0GV@PVv4d(aHgrhPeaFXF~YmWD>f%ZY#LtY7Oiz8v&;eMyPAaI!Ws1 zRrq^_J`B8Rh?|zU;f1A!c!_Kw82j22HOLy^_;3vvLHtIcpZWM=!8dSk=WTTLjTMdy zDnmZ)KatD^C@{!-f^InW3(S9~N_JnjgH0jEaGKl?oI}GFNfpA(aG3jQ3$?KmkUNBb8NM82%NAy0PYs=0-t@(pabS| zP?BU1N3J>umMzK@Dkpb?zQ}h%a48M^2)qt9O5 z`eLEn6fB-;2CICgVMqTsIN2@|?kJgnO{6t`kkAa-xkX`_e0g~3Z5^6>Di-W9(rjwX z7D)zgX=3P`4^BCp1&*nEgthO1q{RL#QZ9XlZdVk8MwgqQ#{MSCtLZ_18vWrDV++Vi zR$#4QJ-q%!O_RrwJE(qYi_ohCp)v0>FgKRLZ#Wa^x55sbaEyfx%|5V8?TJLLOABhP zHb*b5euG;ZT=B%nWxX@jeM%J6k`1UwO6in_0STE^G}`#h zX&D1)#Rd>`EER`^Pr@}8lb}m%6dv)s3k)9CgKtC2&^)PE!1BNa_~`PvQ14m`I9&1# zY|xX(+kCE~x??KvZK)mpApK7=a~dz%Kiv>IoYTg|(^`>F<`kTquLkGqd7(cc$538- zGM*#xhhA@EU|)bPcKb-6LX&FLBV&SXwfXo}ogyNp_W^t7iMUTW4eAVxM%nh0@qtcV zY?_?`6E#ec_=GlmJgQabbV$k`cZGQOz5^h0?0wL9y9lv5qwwbCj;N@#RbmWA;7n<) za(HbWsP6b9oYp=XzgoE&={qM$=KH(@G65HXKt2>2Wn359C+q_34~Q?lxCiuK-b3x@ z1aNjpF6d+>68+j!XpL>ZlqY)=B$Jzf{e3ggxY8Am7*xRNwduI}*m0!W_8w&i%|N*Z z0;r{tBCQ>sfvw_uV08Q}xI4`Q+D@DU4mt8J(>uWoK|4P7XerqnxjiI z9|D~#{?O%BG~O2;178i5NRp~!QF2HYx)(YD*2VS#`7$ibd*q8-Q?^Mwj(XvX%hIsf zzT=>MR|YENCE!t#x%i`965K|`LBB^l{5?qq7ib&8m$LJa+njxp$5TqthKmE>%$B8C zu{s*BTXhb!>B~UZ%fq4Z=Mwb8@iFSg{b=7d6@2RO0&IAu6%1P3^Cbu?O~MMdO3;hncKGEd zQgbwZnm_qalu(HyaKn%z*=v`FKEWC4Qp43gvd2;UgDLqVSjn zuxsp0R68#U?o~S_DV=*rc!`-HiRdmtp?4>uy9Iry?E#Imy|)Q3s0V?L>PXOjBOVmH zVU(1WfMtrlfscEq077FWev!WddOS=;^Ofh~FVt9kMX(ladp!f|`B5OiLB7*5gnfwUJYVZ7xYYHGK}(Q#{$ z?#p8I$GQnTJs%8rha*t<*-@}D<`kM{zXMe`1i<6BgYc{OGHC1JRannhgbphJ{6>HjcgODrNv!I1qsh$=%J*&4&dUfBVerui!*om3h?DPoU(8g_Nebg zmkQGG^NafM;ErTq9={1mHpW0(tEZr)VGR82IUdgq_Q%!>#b`sDB{UZX z;g;tjz$?;%UJqC>d8|HK_vJkLAZ}}V6lx4(3TEM7R!gzvmrjYnQD@*m==y$+=8{5K__Gs_C6Ev*CVv|RAGA|Lpv zU<$n9^8j27W}usb2E1#O4RW^a17;6K3hnJHQE$y7;gvFgy}DEJ59>_=oeF8~cI+W~ zYVry=JC-6fY0orbx-t$vy#(98r~NuLHH)wjhg)R9tw_5(;Baq3|j% z=w>T}cSU%>0go7PyEg|d2(Lh9Ka(K&Xck(E~2@Ed8m6@J`niOcu0LbE?O@O zT6R1X=&rKDk4LLPrJ@Kp=U5wxbIySN>xzL@wK6zDol< zaPHWM2F@m;B_m0AeU=LNZ47WC>_;DO%)&Bqy{Ke|4ji~w4sPr# zLGwE0VdjR(!h2!fuxjuYTHm`8r#gzzvhTeT!#&ZkU2_JglY4;5_UXV884+;wLk*l< zI{`WSq(c|q2&gc}U()b;BrN6W!zbs4Nh&_{pv+>nDN!LoXlb|{7JT^(!jJt#!#yLQ zvhscO`_=<=s=@(+o9b}&fDNK%an1RhdwOS*@$M{o&@FC(?Y+{Ow{-8 zDWaNeQOaRmoU>vDQXRG(HFiwEeY+RKHLmq&azhaGk^Uyv5yRj$>nJ#SZwr|19Ecyj zRKreVS+Km*1ibNB2=cqWpw<(H*y_4GlIRS86Sg-&->)voi%vCoX75y-xZeg>v^4^6 zoj^D$JD2j%<<`%e8GfL>0hmhfCWqj9Xl0-{Xh0J~5gQUPr5&orDQd@6dvJGu$z!nakSLh8m8m2@BdM zqVsz+a75ZYp=8onTrX3P^fgqm%~LOEGv*5NePN4@yUbAE6nUsA=L$xD41rrRImyvC zJ3&o5iN}7+7Fv2<1rLAjMWqL|(6R%=BrmsHLyU&57v05Ph!waE3-wHEoe?V04cra66fPLo% zp`=uE9Ho~a#y3%@RsIMqpK@0FABHR*XP?fe3a?J{7!2MOCQ2=G3Ya5!SL6QBzZ zfwn1gu+`TS!Ws`-I7?m=Iarv0UXdb>o2&xMHs3?$t3|-4-X5QRnkU?KA_Mjp_o0gg z3=Y4k2&1t8{~2!#{C%e4RSb!hyO&@!v)$lGpoTE9mBLX|qwvn4ED&?f3dTLSi}owT zffr-u!yL_NaQTP{aG>%Wc+{#2y+jGP-Z=;-^^C%EtAgQti#V7wc^Pa!vK`3{CgDG$ zUGd#u67F%|3=VCHfpIPSL0ji7Bxjq7^R0HG3%)miYWNS~tNuTzhBr;p6K#awPjdn* zwVknOqXnuuk_H^dj>S0x2}p6fBQB_#&slqI1@)hvpsHGZXg6evPp{FDdad)j4&hL0p1v64EOuW!m>qUk#FH@9L>K5UiUi) zSMWb0op~WR=rn=j*XiQf$p9JUsNtL=npl!kfRYsc2;2$_Kxd!>3>NkQXQ^+Dle9;G zvnIm!O#yJQc?F0*84gP=FM%WBGN?yoC3bwe1Xb!?0v;(N;1Ny_U)?l=^|TFuVXg$0 zefb>NUGoG&13R#*S_w{e*Mf`udXYi&cQEvJ32xl=2D~a81|F+?lYG^MFv8{&h$j9xwXTXfSm+0+-Fj)N50PeRN ziA`?)7VdKQg|9Z!SfPaB=!!g&r7?$jD+e>DOvNb!TN zDtS%5Z!F=pR9Eb`I0bfoC=|YXHUi#SdJ~A5VQ_)=2Gq`5iN`08g|DRVMo5p_V9W{z zR%mZQ#c7Gq?fo-k_hUK!I?)k7W~z|&@&jPJzB=w+SRlAQbhh!0bT+{6XHg z09d3y7iBi5HML&$fkn}LIN^dkRJgbpKa097iLG6ND?U6%;iKvh52}@v3i2gw5@nP^INm=s5Zo za(+Dzk1||^*C$*+ihMPkE6q{e3lF0HXMtF#`UM%kuNBJ`D8R%Uey~U+EA-4R6VS#t z(C2hjc>AF_6vP?e)e0X_;ciVVP&kM>e_ldg6Z(MRy9=mDM%qKWZfTlZV+7vZ7!JpL zr*O*ZsRGN`P#E-VKKzw44VLzwm#Dp6fW`@y!p_17curjxN{;1#Doah^R{RGU{sho_ z|9y#P|9s4g2?yJ}uK;p^3fyA#yXn$)>C8VQ1#k-P$Ti*tGk7I*IMo5NPp<`{O9Gs1 zvJgzRe<+l9(Szn%>F{!Y7Al{ejzey`;gTt~@E&>~%p0W-`Aa^b#T!~dV$&YUNh>L; zRY(HkIX8qQ+f5|t5v|hu--|XZEOE9m&|1a@!)_BIhOk01OU?kDteYsMkA? zf}~04zf%tHTI&sWo^yl~dKbXmhw_nKqcg0t6#|24-e_@&4b=T@hzrcZvBA(1s8P`Y zXyI_UsBIaxikJdMXgmV2dpp{!Vvb#+JYcOtCQu&z9gOZtf`?A5 z$<>LC=-BZNu&w$w*>0v z8sR+y(a>SY7hmk6@p!uvC_Ok8j^4cmeQUl7p1uDdF|Lw@|Ji>S_Vepv8Dkll|9t*G z`V0DB*gp^Y6u@O0IW}$>dL`=bO{DKMm5GPfC^7Y!2SsO8iW;w%v@w1s_K3SQ&d|AC zb=;KN9n7yzPx1M(gvL?hC8w(ZY(99>=)46?0k1?(F+f-rU{p4J^KJP^>%2jXmf2L9Di}i&2;{jPbvd z#I1QzNMB)oGw$AB8sEQ(U{W=^*`Nv5+ezwS1(Z}bPau~4SzXY3ocS$+aLEvJml$R8ux_Fx7hpC!lUuGgXDl)6MF)~VDw z%^L2~j#%!_mTls&IZK$V3o3u{kAq#!+y2h}3;i$XpZynMvRq!OX*1U(Hs>Ue$~n=5 zT}=>w@mfUWnHci?d=!cJH6pS_rh`Aj>N`A33A{Xq9<)7am)%6XP^G5xdL=H405wC0?@eV7W;eCBr%m299gn#sQ zKA97zO8Zvbb_ikikT%ofiTY0-{DwdS>e~5ZLbiO;R2vS+pu z+1Vw$2B$pIWgm~!V2vqb-T_|CQih-Ct3&<%r9wTN_nSQCo=8kP|4xb$`>zvV6_Z~Q1H+aud} zU2U(3ZLj0W=hxl&i?&=P)>bz2?o1j(HK!~iUAc0y;rlDHRnCOEEgwL6hhHG3G)N(?E6HXz2{}q*IMHb||1bY#VF`WYZ^i## z*#H0k?LYlDf0q7-{MpR8eq2^eEgduFo#@4bxAdEya`D>d$LVvwe>eVF!ZoHpOJ_`S zuZb6Btf${4t>fYy?^9=Xuwq!MO+I;Fe>$kHm@D$!DRv7pVQtg;xVLu0xJ&c)(LVhz zL?8TZ>9u>c#pwwu>;h0Hx_I$&qqFA>X1IMdEv)Vp=_So$M_cY^O9Howh5}?+!;lk9 ze_lNOv~e73m0-hI5c+VCA0~OZD zU^qQwx|WVgTg;x6?qT=NcVv%NUZUd*=W|ZuwOFACkNe;g!+xNyiP1+3*0!};+%Ocj-4l2({xR)^GX3dx`Ze4*rFubG{HgKulh!; z7}nKzdyz=Y-_CG5)^B0`YHxDRMvu93H_xy|_lL1H3j*2c&iUl+wPTn^me-gy4drB{ zMY-s#-1bJj9d#W4LjspwZ(3f#Fk^H598oMSr`){4Ue?k9!X;={n$Gs&jUR~mE zb~Pbyt+ODq9lrC^1!s|F%{lxn?SbUJ!?UQX`pRThTnyoTdjt8U zZW5`&dqy6#J3)4ZxKKZ{RLJ>*O1aAfA|LC(2%_6SK#i;jJ(|z`N6! z%@-VR=h?2ZCO-$2QvT^e-q+b?q^gxFVJSD7zoq#!iH2Osd1<~x^x!wr`)4#!bt#qi zDa(j5$;&6sd*t&BFOMQ6yY3Q`14U$TCc%?Yf5US?#pLOZN^+-R0r}o-BC%bnb=fq< zjJ#TUkIbB~jnAc~NBbGlsK$#p<-qd!{vPDU zi_T=pnF=0QQ9^_zE0JS9sPVJ>Ic>k$C zlq;t<|E>7{3;X~7IRE@Z{-R)HM}tH=iVG=L;rbe$ifu>m#h#x`*k89rboIg)V$)q7 zqIZFcjW%aTi(E>ckYV=43>$Ov^z>y3oKxF6@u=>-qUGztIlt+3OvCp{ja>r@qN!8Q zoj(3`K-{cRNpH8Wp*=%Y>E7bHDd9Gc`-a`>k@B+2@Bu_|aavb;%*tbKob-~ zfBk^k>dXk@6DbbdJjh&FGDI~$$fU|0cCw|CO3{DLU(}Z)&VTFt{R{ecEkPvTgF8qL zf(+>-OOHUgbBWjK$>h?x1VPRBAy)g9@wQzlnw$~i$E)+xb~yT`kXTve!H;@5hb$Of zLC%?bo_E{+5vgFkhCgSksl$AAg4eMnf2@p zb6lVAv;h)RG|TvIpF*kM$Hw!4xYA)La3n9>=e5J~FV3Xt&`6$9ZU+Bjas~fDkS5RY z&{kfYxYQ(s-5h2`Ode9P9oj% zj{lW^Hhz2j<8Q_PU)cZu$NA?U^2h9{@l2&bCby$Ng>&po;1+D#%}w*mW48t=v!dW= z?qre$BL` zJ)3fpB{^ZkP0ISB44W}kk-5!JX}UA6b=z%)6p7Okm@B zDO)v%@imucwbWUW_Ka2ZtDkG=%J1f4IhPu7Xi`4=uHuU5OP-i@+j@q(u>~+`_PfQJ z7SYVeNtNt%KU1z=@gj5CH>GIE|-n+G5JgX=25p~`u{$E|E+(QhW;Px-(Nn_ z;Iw`#aT6X4ij&WL5eK@tvb}z#oUT&_YoxlKnLr&A3vG)y4cWuu)6eC&=$PkX&Bd}! zKeyju%e8Ykwd+e*{@i`6a?}>Fy8Lx5NZikjp7dNi;nD>5 z;pk|#GPjui>LFsiG9%dAeYgMWKlzGVAO0QvJM%BhANHq>Cgrm*xl8dlnf>4?@!IGP zIeqsGp%@_xcv;_LkuGAUpO*=2T(S6LoR zd2bjbrWk(Y4Mao|itBoa^fnplV4Np8Z{Q2jWZ+3m+G<1}j7)r!;@U_E+*)|Dn<3xtfGvO9xj<6*?I@`i ze2hG@%Y^21gYQVpL0FYT@_nHN@0)?T>918PP5iW^EK^Rk3U+NDS}Wxe1%8YUsV zJr9xV0y_DlyIvDzNqzjUU>BMGx`S*qP$m-(pChv~oXO2ow~^~w#eeB<_RbSK{qOVt z-}-arUzk7q)BMZ)L;l)X_==|E2gG&0d1Ap1CC0go#;jAgD7FuUG`(7b z)=arXxqhlReY4+=xp&5do@c&TY(MgvsONe#t^05iC%^DBW00ONTG#nN-1@DGG$?8o zPqH_oE?Vzn+V8Z{g`fJloyJJIx8i1}t6EuJ4J$GrPq$tcC0bXMcSJwjHTI*qG%*g;!04vA8}t1&M% z^jOy|mEwl(0H(IgN_5w$gZfx8jNQZ@{7e65-~AB%t@HOU=-+(+o5v-$CtOFo8z3@bBJu!Lz@k#6L7PmUqKzJ?~;4OUUKX z{5fhziTuU;_?aH}2;_nJjl*mlGCs%gL=zSGhWXlrUc3XJd(+4t%&B&`L_9MR-Z_p*Tz$j$>rZx4B;P|+{yPc{q0~Kx1IN42szvzwT2L7=#e@% z-H2Or+Z^s>1v@M^_9s$vLU^#HfLzyi!hs3XB>Hpr62NUFFYWRPo_64Cey49P|7n{% zSw+=2oLy8yMBLs*tiIL4n^1R)zq?_OXV|};;QwqVTwng=f7QKA_|7DVVf(idPkImj zHUB!H@n+B8ivPbbfB47w=O6OtWCs>8yz>oGCR{xe^E#0ke%^@9+q6h9E#98#NY7^7 zQtoj5E{Ew4-$YFD*+YWDf?cAEo2E;~H=BsQG#WP@OCHNzRP*WZQI+@)N`*ywETt}Yj9>kllhA{Zeq@Z)7m3FxR7R&ri0e7eiP2H z*OPBCzpM|^Q98Q9^=T!dGl}Kmtj3Ft4oPE~O-3;Ute|>duwI08?DYUl3$qlbz>r}XBi+0GfHQM{W(Y< z6Fp)oXSa#R+Qy1}n;>(2R6F~BtiAU;)_?f_FC$w-$jFRjW?i_R=SlV`B(hrCdw8|e zmb8^pDn!xLPPm@uS!ruXDGfz?mzIjg=lZ_?f$wp=Kc64`!f{;3bv!*E=i?mr`|b2y zpygZXH^pbcryRe(opO8z7R`|vncDb%?QoH7Z20Y?=Tz;JKE2u3`el5TqxI$r^Q^Nz za=BFXlfXc~e{~%`KRyopAO1$oi$3bzzDe@e4Xs@LHjJ{;W z^n8!5iNn1E!%s@eON@LQ7peHTK!eBP{jHuITaUP3PB;}W)Cg; z?GZNphEI*n0IxTi&%BZ+M|cds>gUlzQsrg7?6;)B&A_uS9P^40btP^scRX^OLcLw} z-h0$NG4THT*wQO*(p--p!XYo~$w%Gyt*nz&Z1~`lu_oK2Ss}nnbwXF46s`M>1vt!3@1u%cGq{Y8;}&6?jU3y)M* zT?{(om-IKy_sGO8e%trls6PEisYeOY)uBFa?f0r)wQ%)iiE(vK%U1cq4cn>>YZYYs zqMrEN@_Xx>J@l{dfg<+%HTq-K^0l8TUs|p6vrgBqZtHtowl!MjbJK98-_Wlavbwo? zvf0LZ)X#aFy0pUErpj;VM&Da5_oOqb>Z^`lh3dav*D70*7x+q( zj`=(vI!v0P*itq3)Kfo?Th6{IxTebc{Nw89dmN=caYuZjmPc3rn`7kHt47oBr@NMP z(T%ND>#E248BMX5G$dW}b!lq$y;~dX+r!)3FEQ*))xN-+zQ5Op`;OnMEd3}m^zE=T zs$QrRC|&X7ptqmm*XkAB*7@$p-RPZn%Gi$Ueh*Q={8ZhG!pvk)s6b?ovE{`Fa<}BnzO|B9FW-2yr&M@`%?^++ zM^E?OmU~Y!Z00S==}C&7h0B&ows{|yT)uJMyHkCGPu^1%&#`7lB!`?zz01Zg^-^=W z=q1!BdOLmW>RodGv1h=~|9rlk=^;rA9N^XG14z2w-shdVr&zudbdKaf<}yiEuNKb( zH-2~<#!Qi1c)Y@=d}zAlcY~_e%=(Ak4RtfUen7rY!nC^{lP*n@>@C>rlT|iA()06U zFH3JbNw1j(o+qnwC52W_5|@v9@|{~2UWyt|d=f0KdM&xSTN3kRw)eLsWnM3S|MF`4 zv&P5f>Pqiwt0>9gmvg*aAE}_7Q|g^zr(qE*|P0#*1&}1N*X|xFi=ze_naisa9mGs zJqEI=+#?*jNDDQ10KczaDNg)gg|C)mklU?Y)Zb(|NiG~CyA2IO;GzUp{ksn$ONLU| z-^XHZ@KiB->JKoeRA<|Z5o~hMl8rW4v$kF%72CR^3l5p_q6t>`A?yoj&#i_uy<*Zm+Xc^f`0@P94s>SpWa@QGi7!sRM@dDqXZ9& z&%V;Lah6zPvWo6bcn$}O{(*c29Ghq)fMn4Lm?A21xp5SRk9NeWObPbz9A9kzd9e7L4=ddrB==E2q_oum;(obz)2aL-b(jK==oG|$+kI&C;j?5ww%FB6 zQ6`)2h-RaWDeQ+oQcV}{6h*-0|Kds-9PAgRNgskVED0}NPVY_cNb&;(C z$q#?pR+2|1Eqm!}_yq7C*8$ZfAopM zrw73$)!sN}a}x4xg5xJVsAFh5HSgO`%Y|Wnf8Wj!|K7C3wEhvyLrMj!SLl)!F z#ADd!BhpnJ-KfNCE2%~f=1uRzxFO~=^#7#7^LG4%h#hlj)AJI@Utx*g)Ky`0*UeDW z;)@$suBCw!Z1HQIl+&^!anr)}Wbo*fY}Af6=r(1l(DzgThWnkNxd}h0%gtb19-++J zUU$a<=i;$;uK^maETtpo3S?F}5xninSNa<&r9t`z{Am7fSl|8}Uagu-J3gzBiI9l* zEW$;PkARNd`=M(`Aq}jH`=kqH>&~8Nl|6)L0G)fPr7Ys5AJ#_mS1W*!Qey> z^jJM#JXgGyHaE4vu9NpkSWpD{Bi%V_S|s;rQsjW?`c$-XEBuao1W7kFSlQK$YSlMG z*({*4gWIGfo%QhV^|{o%_6Zo3xMSxlIZ(9QhT5C8xu2RIwRZJ|CRXTS~_Vo>5%=yG(aFwS8oC{(8it61R;OrP%>SFX9UW|9bP*rCvTW`zl(-#TRYvelKlr$lw(UW(;ZAaTOuq5?Kns-xa!Z7lneO7q&RvFCqxWl^&I zLbU66`fVkH;7%97H>Og&P!@=(G?q53914-|!ZG36PnbWygdRS>Ld%t%Ak)wc#a+3AnWCF&OA+vtoxoM_UG9MDuA- z8K_KS&-(D%yPeVaw;`K7y9Y_paUA3v%J=OO_`%X}O!*y-=gS42^{O{^5AMwGvitMj zMGbJT;5P(D{DRL`-C?effMFK?-2K@#SU-9lX(ZSR`5xUdbwLj{zj78nu5+QQzjd(7 z&Kg2%0aSJWz(uHl(6&p-AK;IizTe_nD z@Ncx@DDaxOPMEzpgsXdKpc`QyHguC6lqIID) z-rn37b8?rGEd4*);jtdP=LTW9#xA;a(wU9AIrF>^*J&qrf%Ttmi^geF=(GI|s+%s{iMC_cFnfGZ_8vS`H`DVeakxOaot!JLQg~z^Y_b?j(|nFmn5hOlF3~585G^bj z?}>9=F2a}(eL)gB7|hPqO8-?g2-BTLN-HajaFXLMxI4UDtdz3=jt-IvF;5G?dA1jp zCmV4}kpe89>`6XV$^2{0AU=AhS=1{r$9fw*K6G~-OvwBLLGrt^Le7j*^0R@Dzan|D zMuhBxb`0;}0Mw!D(7$RAj1O?cv=zOuu~RD6v{yomk_9);?T@$o)8XrmgY@r58Cflk z#u@tpV2Y|Gu1;AhOui9?c71F(?pz}DI+`fgeOFiCj}FD&s@|}&r>pQJ%@EfsEfY7s zjThBVdGWIuRqez$ZG-tBgp8f}BoYIG`^my96k0#6>S z*8($FUIN_>{+PD?htS^f5t6llC3oKmCtBmd`==)UT>6PT#@-V{S1HN8y50Et>{F1S zr4F6?9)_W1+hqRgJ7lBZD2hFoe}sAOjCp^FKb+400_uSZ*sE6qS=cyX>ll5G2zv>B z@4V2!=c9CCSr)8W7{{ah4^y*kIYpg{<`E-(NiV__V}_QA`SGbZr7e`Bl#hW!WB{IW zPQ-o6 zhznzE`ReCjY`$X)3jJ+i^nxQ0|6gac+iZXnY%EYO^cYkp+T$b-J$(4CD~(nt5N2(7 zMK2fGvG3;r_-dM*v(i_CHEtv@%y(dAk8sG!yaQ|h^Wkk{R|#Rtw){HS9=cyp#_jd{ zY1FT?lt#Lky;6sdKk}eQ=)>cWn&Jb~bs*e5LeD3fW7Ea~c&`5m$bMzUj{p2=?;&@b zf6)?O$T=@326x4P!@69$bP|lb7=&-Z8phso~3)G`^>&_)AhP_RA`R zS+SofDd{q0ZAs*SolN_N=0L$)Puy%fnC((`(E9)_Jn#CI#f}jXXr$c4u1cl&Y!ZR>>Yg+*6r$ynxQ3C^>VfS&;KUVS|4Nn zGJY|%?pK2kOO3Jn)>M3&c$2DR&Zu5$iY_(UAT>?pvr(oz?gc^iXgfT#@C~e7aZLEN zPK9Sm`|_VZzv$A`9PcOFz6mb|IpT;?E8g1k9cT^)eB%|tuYBH+_@NtgKsY*V zbAb`%MwoS}5B|5(375*>xtkNavc6J`I6Fl_=2?_2%)KW?H_tF^eX@Xl-?~B>*LMi- zcit9m570pK(w*SoQ@Q`{94f;MCO^H9S$e!4lD zSLzs0-vg1p$M$yNq$gis%>H@8;gwG*qbQMs56WX(`(W(Rwp}>Vc!vZzf8>~)Q}owD ziQYEISsV4X9KEYXoXvh{;Gn^?W@M4;@M;>Ce3JG%2I8s$b&S6m$rqofamBE?GS$u- zi8r*-0qazDmh&;zxVA%X^HOj-8AOL)C-4@I6nD(r118bSD0ZSNj&qIy=Q0a%i$^(( z{_q-h-Lc`LCYdmMTO{8gpc^OFQsZZPT(oZxZ=PBNnO&lI!5@MUo5Aq#!VwbHmVjyP zKz#RE1*iK)@U#2lX_|c`_ByW(&3d;5Ro~^p^-TqoK5{h06wDDkwe4_7!7OTOnm}Ij zdqVk_T{Kn_h<=}YqD41Np(RI$4RYqg31=xz7?T6D+$urZ`3rgEpTd+9~5Khl|X zA$ZzX0<&NBq^0vxX;7mReOGR!KK*2LZHg7_jO&K5NQz@E_QAjuV~(lqkHK#S;c}Cu zbSF_&IP)+JC#)=>)eW}+?;66VJ3nF7t*y}M%3w6sm!gTLF+bm=j4qem>0bOx>M>6b z3JrgX_N86eJ1dxv)cz#B=`x{jSZ91x*9m9ajfbOi2cR6q#h-_3aa8sh*k~HT>jPsk z_Gzo2{i%e!HAApXexJRQ=ZGp(?C`$~b6RpV0`1;^5+?ZW5!Oq#!^m?^ygfpNL-Yrr z{!m@MaHSP49_oy}^E^52)FpAh!ky|#X0~XmGn4*}55ue#zHD~&6wK@W1@_18B%c{M z^m>LCA2go?PNz1(fM455b%Q&qs%TJ}nl=^AazfijCg4)?5hRloXk9QV)Df$_VYc;4%^RQu%_{P!;s zUz|#!eibIz6uLoN(cPQ=JiQ?1%(_VV{pUdcP&?T;gFAGzP7^;%FVne&Mtn|t3-ntZ zBsPp_qsmBcKJ#P={q5n%C$c|+-K<;U+fr59V>`InPBKB*`aN?#1X*8LHDZMQdM|3mJU@=Wx7 z6~O1tg!5J}OF=Rt1vgJvEMzU&B`PS#pxw=U;rEiC^xIe)VW9=4%nrcHHLiI2Ry3AW zmC(m&KZW39yQzDxSEBJBSL`?1g&og*1G7K{{+Mno9ew_)I9=}w6;|X5vs)h1fzU|I zGAX8mU$t=D?F4G->C6f@o$%n@X_T|>6h(BMD-=v_tloEh0VIYFqUO!>XvpKwFn9Y^ zSTiRC>C{ER;OYdrwJLyWv?TQI^=h(yx|J4OpF?fmGGYIRgK&G31MbL3;?aGaaeS03 zh3`#fi;sHvyRw6hPP;<>898uYy&L#A8se!sQ~q@Q|KlsYfxY)tv3%QRa>&&KNoFRf z`o!WzZ8e^IBa&BThVgTGo|QVxfk&?FK`JY5l7rmK);ypGSKF$}y-q1SwC6W^UF-=( zdy_Z?hC+nXE{dM^h`#4o;Ddo1sB?A<_*?EJN9%f6*(D0&_6|h%Ey_GWd5D~0Y07W4 zWbr9fZs~4U%hvuvfVqSGTxuzx`_{%-I>U^&I(Ci!E-?(19C2RB_Ml!Kk`v z5PCjNK;^N6sHgE6+Hh8cu5ALI-*{Hq>0B&DywfE$dF+|={SIuU!F>O(?COC$|%zeMS|-6(lQo9H{T1~%Jxu;t!uvef^~WLM-h+*f&z zXsORFnyu-Eqvy(fwh4!&r+>=NaFZQRK2am;kMQO$Wt};t>spw1peJrwq{M^2AE(q| zKj~r1W-+Hl!uz#cdFj4DdbuJ2!}E<{Ptz%y-J*}P>}+ty7b6T*)RG!5sS#u3yv(<2 zKfv3o_81vxi8iM)XnCX*i%J}!Hfk`=c{>=R=lkOGHhugM>mt7c>q#%;2<6=frJp%3 z=yt0m8gxG?6!aZO&s5G)Q+g0DNnIygX$!!W$MkVZdLNA4*9~9atbpN5b7ZO});#{X zEiV0F&c-8@c>3X!pj-3_Cg#1RH>N`oA_4$sq2TnDO zg6_H1v})f#?0zYL!WKG`QN>`0-0h7a+t1RG@*rt_O)X`WU!$fu3Y438$FD=@Evz_T z!~f+e;xc(}_(D&a7^ZVbR2x46R%k_VWnU|V<@&UCinFlm;0;j95MkMbEA-}vB_HV+ z$j>yTeEOj~S*f^i{)!s0Hvc7+t!;;U|BSFBREiI)_4q^&GuE)l2WQm?GRf$LclDz9 ziGP3GJheNw?CZmUt5|%fcY!LW%ek(%{y^IrN33^igUE=7LdC(~;_>1yl=3qUW3IH) zLiIr2a9N69-PVZd@!jMMgpHK;(+OoA@^f@i1;sZ`=ruY;w&74$Y~1}>dLSl>&#pWR zPRdR^bmd#Ad!^iKed9I_y_8Afo&%J1MD8tJ_K>o5`tv^pU#@yIm~Uz-@XW9f+S4^0 z*M^yKbl+Z_VeQE|(J`2y+9D1;IDt0Se1*6B6nR?pM9RsQdm}U6!PR&tel|aywyK{N zR6lKp?KPJve6Tr2Nssv1<^#{|JPtDAlR4&Qg3#!g%*)G{QtG_~I%XHbr`M;G)uw~e zNq_&4s_i>rf8s~zKJ$#QvOJY#H%|J+ZoNu@eHVlNZx1euG2$0zjKsWU9WdEk#O=#vhkBVpWo|8GxMjnrIUVpgt`AK-dXAc7<$c5^cl3Py9lA}n zKxg3*Jbf}!_|lIRlQiaWm`-k z_mtT1t_x0n<_HBoGbzL?nje4pPQs5hlw4W?Yu-ii_JrS{Ab+O5nakm#!cZP=(WU;U0m3;vlXcB?nB*F9MEs~5Kb@E6b_9!3o2So!Wy1M5t0GuE$2|FZi&WP>shd{ z`VOe6nBpLdG9h;3Q6WFih5j2I$qDY2aJid3W_A{#@uxP~aTsgZn#dGSra?!g3paU4 zaml{NvN)U!FH-|?zBWVY@egEVR0c0fmC*5iGmL(8omOApC+IG)^UE>Z6_W^6b$T+SPHU?1nVbakdLN8VKRYaic}X{RA4P8y7s+s=XA ziYoZ^$rR%{89<74HyGJq&w;}h!nRM2JYtZBEaCfSx_{;;t@c)8mm&AbV3HymCwwP* zRYZ0e!8zIqTbQ`t`c1BhEFh10~fIfIVfXd|~ z$yvNdS3ae3aZeL0(VHc^c=Z-!n5qE|w`0FU`4m6V3}2qQOKK6dl=S^O^wl7e?>>T| z{q9hN*J>CSx<_bT?!u=p+41!gMKE;jIxH|@=JgX}LxG-%iuanM_HjPW@|PpqTS-uxTg(vQYj-S$FL z#&P(={VDIm6jJ(P%{#{@@R)ffI5Bw;?#W*a)6@>rvKoIV8U9t~F)V-|M!U1K@piE9 zkk=EGK#$Tob4K?X*cJMi@-^0z?D$7eewKinz6N7_K{*%%b{2MzwZoS6LD=&45)95* z0<+>1VL{(Y;rz+2Xly(enlt?P=}ASNRUX0DJ7>|XzEZ)#T1H!zL~(kdE-x%4aIF1L z?3`~%&JVOf0j1O-pV!RvzD5HMAE%CA(Y#i-A8(1?LvKdzfuZrqXrg{tR-kHxnw1+M z?28*mZGTLwQ)W{B0B?SB`L}Q&d;qkCJF{DmK#BH2{8Ii-SIW;xfK?gQsr%!k5gJ@< z=7-)Z_tD&0OTqV8cU&J_2Ln$g!PPA{#Hn*TOR*eK)h355b=K2uk7i*;%3V4%e=n6a zW(j|c?XgZH2Q-eTad=^8oFiw~EnGPo9!>8rv_I~FF~5Up&Zpz_D6bDaT+~G>?R$$p zBpajqmk(4{ss-Z}dvjsWhvL7_{?u~s2>B|%28F(%d|v+CdN1E8{^|abf)bqY!7@#B z=PZ!kUnd=U_B)u{))4A@@&Tg+^mHhrqEmiD-9A z10TQW!yg|H7aMPCVu?IPomq8D^g8$e)SIOkuqzTD-tWPKt716Itq1PjHct#b7J*yM z&2ZxH!}PcFT+q+k2xZm9RI)N0?Uxk@ihXnG#rG&CV>f)VB!!dK*<*Om6TB z-f^K=tY(Na{dPk7?bCw6=XNSxsLY+6*7~W4wk-DA4zuP&@VcWR+}ZmgWe)xa>u);a zvG2b8db1|4w+n`*8f!{R^Wx8EW`l!`4=+BNCS+|o0bft`!!H`ZGOwPPlh`IS&gzS~ zSt;1YLul$zby{+F5Ebll!S_+8>D}|LcyC?>{0oZ37uM!@WVk0@G56*ZPhOM5U~?Wa ztO?fZc*DG5`@l=iOI2T@FZd|#gO&falK#ssIPea#&GRVMvdn{9o-r7jvXh>Dib0!z zBGO!8&-JC+q?+3Xr#?TYeotc2+VL`(q}9-{oC<2LbKv(4{ZW6zPpZtnKv9!2Dg2Nx zmYlc=ZXTs@;qfFgbAJq`11-USV;{ut0bpKagC8Gs$AR@dz~jGxT&>~*c4O?=;GHRG z&AB0NndOQ%Of6Y4#*b${I0qJS`pDTp2lXaFoK+lBR+KnpMBiaC*k_!i%S%=AyRzeUPzsei?p*N@R1TP@yj*PGNA9~b+zWWw^x z?ZW-6Z({9oe;7Mohll_1#eeBdFxX!ceTRi{c(XBvuQh}aF`m0v$D`VxNSWCc8whRp z!Ae_Qe0o0!r;Lo{(?x^vtkY1ip1WH5__QmxdA*~cqe-aT#|AeBI%3Mq&Em+quO!At zfY*~cs$Z|eYJE0BbC@gsH?9|F1qbrW#6Lozc@eo`D_O)y>CLTCqV|Cdx^t{28r%JX z!Xw$>IeY_sw%jic5cKff(-p!UdA>8csX}N=>t*ny!YHYcT)IpPF={OBgyC!(AEQI-cB4ACoIMg`w z3qD;vEN*?=6%vh8;Pt=5_(b-7vJdW_KS-kn zn{xNDOzPLR%f|RWgpsd;d137o7}F^Xiw}3ujn+Z1Rr>@L+)RY^ug=1Kfj6qmtIy(fvfZTVrai0Jke+_{kH%(^6Y(>A}?e@&E_xUsvzO-4?KA5liOnQoCP2}(qi?8wor26FC6c#i1WsjQN@J_hjys#{=iYLe-aYyw zo=rj=`%$jR`Vzo1UnN56Qxi;Vi(@Wa1$UGOV{mg9+T2@-M+7_o*Ym%C{#A?9T)epH zhLX_BuM3Cl1oo)Xz@FQTu_XK`?9%t<$6kQFH(FsvBH-n~WU=qlUUaWCggafa#VIH3 zMW_2iS^SiYYyLHfR|I7=oT5r5N~%=k94y}g?~6{M_0&q8grF)T?2=?8oS$z?gRh66 z#+Fb#?2%3jb>(%%gHo}0=?|&PH+NLlSSpSx@Id^_SgAP92l1)8pw~8KDb;X-uLHwojNwS%^ zlAKHT!i<3#lt2Cg%ykS!t^Wckckd92y{^R}33J5xK@qI5)&Q@c2W)&l22OSzh!0OS z!TV(fXgxm~726DXv9MTlxx5S7pN^sEtESxP?OOWz>mKQB+Dg3)dh??5OQ~dp0>(c% zO_c{u!yP#XZs-6FR+xTJ=vpu5hEIuNkF#K2+YzzmCz z7_m&*0qe)Upd`EbH>O;}5BA(Br?}vLnj`%%rmmo`$zZ)Au@K*2s;PKH4)oz@FV=7&t^!#$N zTrS8p1RLP&=&9ljbuB(t_kx->_TzNd{j~L=8kIMBV_BEC^pjFK)us(F7qDWL3%3^g zR`9$R511@9 z#dWfH4o~x7t4`|py4Dpp$mbakjAMA8=Xr2yNae`c%jxZi3hAQ4dN4n|l14rcVEsKw zY&h~Tov*b=-BUF%F8dK|GInQ^e<8f!T@*%t*29IBA{FMW7VJJ+qWXn!>|XB9g(L3M z_)~|VbxUu2Zeam&;Zwwp8)ibYJij#Q>&l#X4T=Rzj?OwESSZEuab0zKS+(W|M!wvMI@dX8SB}$v9fiJw?2}zG9kn2>$$PN#Pk6J_0zP{r1l>Kz>@IkVe zv6c)i`a|zI!E9{f$g?H-_neHX{P)l;3zn| zS+KdFf?MO2$meb$?Je0rH(N^~syGop=T88wx$0QIK;R!Mq9}XDPg;{}&L0l-fLRyU z(uCM(=oeg0KQBkg`N~g7J9i*2A0oh%Pqw^G)d?HAor8%xzf+-{^EOX2gnReyfpOti zsOFo8TyL~ia9rI0bq(@;3N}r!FzE?Qh`A{axU7dSF9flj!8ckSx>)Qt_q15mu>+0{ zw891FdvLzNOVAt<1ADG|W6khS;NTWY0ZK~b;@p)#m`A|w|D4eJ(Pr>mKb$!xy7S+# z;#FrR?pZRrWscWMzQ8j&0Xv5;gOf9r@Zp_fH1YKsvJIUH#vW#Te^6h+JyHgy zY2obcJV4yOSR(9*=*{=-U2#oUUE$807(TOL3$=c-g;U$C@WID5RIV3<2?154blQW9 zkIbUEC-m4mcmwzb>)``kKkjX3#kXgEgYo+V>9kPFdiKX}>6;YB4EF^thvk zF}KYgPSgE6Xy67_9$!3Ai1V`NW#50ppBQ~yb0HFwudIfJ#RXy~2Sd8xcZY5%8DszM zKj7Eb9vJ?|jq2KsScuPq{;Pt}aGf4q4E4nRY>q)w^TD=i2CXUHNP)u)DSZ1nar@>z z6ikD7`U69@4O>N?n;mgYSrxoF6wU+RTJVqlO6;aL0LPt{>kw5%p^5iqrp(p9-nY|FJW*vsdI4uX=rpLnXh3(iP(M%nHeH0rMcddqhQHNGvAHNNV>UuK@B zN!pKv%Az>Z`dTVBc*Mf}_G6Se^#h!HorwRy6-PD)9kW@$l}KXGQ1Z+ zf2XP7NO|92itlUMEoW3uSo;Qkj2%m`l7Ea)ej=S3}p?iuksq zGuHWEr@DU!pm~QHbT?@e`uGI!0hNB3I`14Lyt*iu&_VDmP9!^p9-QMcl9ok>@a9d3 z7uLvtMo$L2>jrEOUHGqjPLcGVJIiL-!=VedSTiS(EfY-f=~gecJLpCaKN(@>mY%5Z z8jeDkHwuLnR?R$tY&|&>1CrI~AVN1D| zX_8j~6eKjjsEP~px@reVhmIwOj`TT2lAV&pAhcaA14nJ@OQ3) zJl>z6Z^bq8nXf857;DNqHrerkXZz`-dLZ`E90PrnA~@Rn1}!Of<*4vyVr|BKY5hF| zK7ZbszwU6QUY!OAzvOKBls-jZIJy((-tNaYPsCT?#9mK|bEBOwM z$7EHm$R|hn(CrVoV)yK5Ogb5iB0Hkxi$titWh-6PBMy&tHRhh&pJhuv(jjGgl&GX& zryiBmEnA0GItBCPjaNZ)*$J9|&<<}t+D2EdM4|Xg8=e1l#SiB{K(et1d&%Ajt_5&nAe~^eW9Rb(*PL*A~3O^7Ua5hM@79z4iPiO z>7{+SVrrJ~CABx~-#!cM?^aX%oO*b_r;=1_hS1{Jo~WZQ&-onPV9`;3zC7y;%snk( z1)TvLwrd*gzvYgFYnouYw=VWnh@!NGftY1@gVNp%5liIJEi2tnfG=?{xiOBdr~2UL z&lcdcWB{s9kx}sKE`ocXjZ}OY!xjOj(Oc`d1Mwv-GMEn3HTim$THIdJ`=fRrU4`nkN z)Y-f^7$u7fg@0{YyyN=~C~?ddP8D^de^wU!c;qB8RK1g2_x6zdF5LjHUa36St(0yw zXOnVMU%vfJuKkE~;w?%F7&oI2v>kuuciH?c`K4~6@uq{}chf+8a;~1Xmv`fm_)BEF z+LHh2wTQo~%=mDL1;!rkf%>e;!_EZb!xAZ2%QU(A-DbG@ER0nRKFYPdR{Z1jPa1SN zn4atph6%nda^`R%n>r`3UbQA=$@LQ|vNG8FEEpfps-xpGGoYfvpEp^)foW6k%6f*( z5e@FXC)N5uwy54oL;Dtpf6U)e?gtrcfNr$ktWtO?^yhg6Gr%C}E=jM-`!5?#klLnj z4)E>GUkBLX!7HA4?zah>1_Z;7l$WHwAQqn{tD(f>7;I!OzR=?y4a${}rbQUHW-0Rg z;K`7)Nbb3~G8LX2J}c|3vr7EqyNISnx6;0#Sf=)F>@cMlda17#m%L2EtASBMb;=d7 zwnYWADr30YI5i&Ou7GX26RFd97nEl06XWEbmr-_RLgV|>f(rPta+mGo_})tRx#u!y zx4Obl2&RyAmeP`K{`l~T0si*LhYq&nr<%F+b)^noE)K=F2kL0-Xojo_an!m?8%<_+ zqOwqV{FtA@oA(%q-*#uh$^%O5H^mu;yEQ=lh|W0Rb1iiBNZ^)p7PR8UbI^F-1Tz96 z*|_RADc-llynEiPA7_s>+Ai`PLD|9;@jld?p9r!B9p3+K9gHji{$Tfn+^Ty}_YxQC zX=05d_kW?r1Yg{sbQ5CR*1=maM@w5{R=w=c73yE5s{Z$(dC6l)nrR`N5#upIr3k8Z z&%*4Dcc^#$a%c?PLo~>p&#vggy)9Mvc99x6cvX^>QWQ!|D&Tf!U%oP3UC?{&k0nms zMSn|eurZ$rX`D-bpexrv?501~6Xa*a8HdVu5d|N7Eo|?)ml{{sh-$aDh;s|})2c%e zu-+sIz9@9!4PDeRxHJMsa5cSY_k-GrrF49tAsXM>Bvu&)^Krd!h^g|%rkxs6tp!6! z>EuV*;y*v&zzG3$k0sK(cdy}Hz&Dt7CJeibvBdai-n{3$5BDha=2>zLT=K~0bg0u8 znB%5SA$rxc%dRK#zdn?;>OI}q)QL_OXmYMvJgXltp!JQz#PE)PH2rc2`tI|mOYx}` z?&E@+Zqy0Nv5B;BMkx5cir_V?lhE$rR7k1V3Y8W;`D{~fTrHFE$>0K-V5QBsU7u0r zehCZ(w-K>hsC1_tInS{buT_K!cSrt~ew6PS8<^iln~qpw+}6v|^!>GT)x`)Wu3bXu zcaG9RJ2Nco;==JccWJnaJ>K^#5d$8dkk?bbcyxdw6n>26DYdpNM2@6ui37#C_Psc` zb~jn1Zi2^WW4Lg7G;g;V0qocl{(8jo!M-u<{oNZ~v<9QU`FS#UGX$Ts8e#8Vq3HNT zr1-$0^r=V*Q^gWE(CW(lcc@^xcOq8b)a09kqWO+dJWn5{&4!6*>BrLF@^U_iA8pgb zUaiUKwAcq<+*3!E*P{iE2dUzw2lvxZ<`3Dm@b7a7jJVMv^vY0Whv7Hq`%aOROkRm6 z@*F8w{g{xQDBq!!UMCt|*XG$n9)VYX3Ge7<2Tk_|@Bv8&-5Z>Ms_nhVzfTWR$a@J+ zKFCx5izQ{d)3Q3fvqI-3-Dp>`IjcqV<8ZrF>CiME9P{Qf%^LGYP<^L~2J4gAKQsjG z6a8?w^Lg@h4nVtch8Sn(i=)3FTBpQ7dHQ$gz4sG+OVfmE)kM6jvxCNu4TB$V9#M#Y z5!imp0kew^*zHe>)J>@l($A{ldlN&LAsNUg+D-8JWlbivTX4C|h8x=FK=xQOY918K z+e$CU_ckTc!Ex1eO?^52*l2(%dzJCT;(72^nPJI~S8(4?gwD49sC!Qpaph~eJ6D&| zrq)7jZx=k1@5kcsMhc(NNW1#SAPI@MtFo4MFU=&AwXgk*rq2{SYiH8q;TBxI@fH1N zQ6^@m)(d0XorOVXzNrz;-7POr9 znDpXn*R6TZ`n!HXCv}O^oiI}vEKI6V;r&Im!mpvWoH9=lHqIUg1uJ5?OHX5Vnf92p z$84ihp%%2NYZau;mw)bv=Md@EoqKo&;@v)Y%#4~g?uK}3MZd?2R&a$V8`NQew>+#yW$LRS<_VT?{k_=*$mCf zbZ|waKPF~NxWHKrJZcKz#L^W0tr~+DKgxOLYx{8S1>}$4)C4s@ca-ETf_}@s3o0MR zieG95v&KJVEN|7qh+$?tdZZOa=pD&H)tqb1(S#Vn3Z{kF0~AVI>%(V zJj(-@6#s(tD)F$o{j^kj{r0&K5Rz$vBEVQ;@0*@IPebZeU_OQVwc zc;`f!)2D+pB`+TIcUI9jFA2IVGG)oG5V^wsDES@If~=~Ye#!bo3NxIFxeT$kGy_w6O=^rlkz_q;&n6W$9uzx)v=@3-LSnkUji zg?}(D>$X%OUjw~<%I}9N7gU}<2nJCemF;YV*M$!ux6>I4y<|&o++RRwMj&6CxCd&6 zg@UY$A=f)Jh%^0`!L~l8sBLY7%1Sys;+-ykd0z(^-;#0q3gQ2-_8w48eBIwKO}Zde zk)o*dmO3+MLWn3L2%;i31VzPyir7F!AwilnQK~fQHV{D|Q$j#d5s{*z2%;#8ioJpb z&^yVx&u`uLzV}^s{h#N_T3IV0!|a*!+24KkJ~MNUlSz3cm#eYd>1WI>@kae;q9`i& zc=AlS@u87kqgpR@?cFAl>zftCMHbX?*Wo%&0YUUen~}p}?==oi4@X@Z|DHFZ){IHy zh8|}%YJ8u^wSGO$DG#ydTG|~J50ABQCr~>l-3kLI=k%I}%?yvm^SjnKy$uNEj`9{b zv7(B&^iN)#wC$g`r|N%lFBa}-(2`N)?#fhYu#TGQff6*s#Q5PUiwAdciXz8an-!z+;F%@9NF$7dS6{3w!6b{ z>YioR_$SSj>#>%_$v*1cs7+m;{c1z!#)r}yAJnXEe8RoPeP6?Ny6xyD9)*sIi;5f@ zt=#pTHdELA{lnYE3G2^svm%_G?o#7-PK^St?fdyo1{qTvS4^mKt3Smw+*+#E=>BUC zcZSkyah>ol_nx0YV@nykvAW($95#QB6RK?4cs@BPepyb0%U+a@0p@A|!7^dp42UvzxT@y^Tv$IfL@+#44^ikI}&h*SK#xuK8# zIwnR;aZ01VXi&$NI{B3=I8D@qiEnleilfVz++XgX(cbE|xN!Iw*DNOAkr!}_yV!vg zZ`bBHwd#KtubK5iY&AvKDejuBn5)aDp4BoIKUFqvoW5MK0gG;>G_2>u)3bImn=8|> z$!w<6-GYPMQ%Pf-G{02N0Z`e1M@Mp#UNRdBO*NcECh}qr@1J6oqXKSo%&6GAS=p&= zg6*{7mI~KvfGwUMYunJ7LalQ_&D-A;v{RfIrr3CqSlHOIX+~q@ddZ1;PvXW!aJ3u=ABfc|K%PR&lS z&vrBJ%83tLXlPu_*8VBpm`II9eVg03jT6}r&EPoAFkR9x#gtk%>gFTyLA~>i;djlb zJne9U4(h~RSkxqLzp}`Q|2TrXrmUYk>Ec=^Y_piFC`fA%^xWkp(S4l^cCBm_yI6~R zsvdCn2m6ZW@1bhbB-zIGodmaNm%4QQfRl;Jy)tvD^#iE)|9Sm@|84$41~q-( ze>{I-w61`cFwE!eogv^Er|@}fnt|#OJxI33z@B1-x7G0^YNGd|ulb zKJTx+fVV(fz}pcg;H~@2=N0J*c%`L$Ud1#4&+?>zxBo7mcVVW0SNM_7>pV&QeUE_m z)0xjZM)G<3rUG8pO+L@8gwInvM16M_@V=k|-X23fuQ`y^v-YJ>et1U&zDe4Y%OIu5E`YE}z)4&V5^&GQAk^dUYknZf7LsckdD`8+6t z&s%nd&)X729s3_@`)~p8{Tph(&ndfG`8*>P0q^htb*xl7$t)J|y1(*yLC5*LwL1hn zS!(-6bpg-z9G_?J$mcOU1iUr@wNJ^uBl)~J)P9~*w!Z)6^J0JSd1X{v++Hu>jq3|| zv#7Qxyu;^xqU>jy@p&Z{0^Wn20^SP+>KsG(JeMRsuY8=(Tf-6X)<5I(rcxYDM)$wa{0*URULC|fJVlnrwMFP~!0quLMh6!50i^LZAUeBK9~YX2{Mo+;I~9O~HT z?&tG7sQ7vLp!}?3 zp@669BH)db^Lf81-)w2;^YnW7yclX<6V$mTQ7qj8s5Ykd;a11z;SQ8vQsk1|ZK;};ULsT;vP&!KyFM-dhxFI3il9O3-lN?iXPOkPRNMA->+@H=l0;ho|E z6!ljC`tD_@#R3)LkG>d;Swthg)^CxhruD#i=UOn${yB2bsELqEumYpo#K^J2Whh-C z2b8V4Ma&~JQF%iE`UQtc<-=?cc`F8Muu}w?(S5kCpqlV)KL(~HVff@9u~@^Jdg6BN zcGTBu0ijT^Ogewk;BL{n|Nr@O{y+Thzp#Erfi(|Zs{wO%cw~U9BSsvx&+TYC$CPum zG!;c!d(efKH*;Fz?xF#=R5?`w2abcLCWo9k!3pcg<*ZnghE7n|u6m=K&|Uo}IHjQ; zoK^ZWIffl^oFbPZbndgW=%!iGXewKV)3zZ39ewJ^nR?wCm78zEiI1O$E_8pzu~1Cn zeA`Fo{0umPqRUG;f7PdRo^P?>=p@T<>}8ZWzxUzj+S;e6^Fc*Ut)mxuU*49J(K(+J z6{d@hW_AF1J`bgvb2<9LK=iENMU+j=>6g7RnDds8aE|CIaV~oQMQK3q&z)g3FSQEoJ$V5I+bhxf3?t69$^)oy@GI(g(}!bf`WZd^;y(K8Y$2L4=79Re zsGw&eT{zRWT}O+93^^gY%s4~x_fX}VKY_}Iel&0Xdert$JZD5@4oB{7DCbMK2B&hk z6CDeWN4sXGa(ev-QP0pPD47_F{+U;Z_O9(kUoMs9xV-y}O5)Gj@f@9hi~oON{?Pw8 z{!nvR{wMM0^#0;H>!tPZg+rdGiPb2q-}xS0{d^PRx8JIsipVl;p&QYI%At_M*6rsP zy!(VS23liLuhrl%617>1N8YY=w}(6fA|2Rx(I>wJ0rA^UBkW_dlHj$8*@cMBj20rX~tVLK=~CE zY3o}?Oxy^rom$u>^be2+UzAHw`;C3TM6L&XVznX?JKX|#TCt1(m3)@G^5CGT)XXR@aI05V9Hc088P{{q49T?A5XG?7Mjn}gkRmMSe;xBwK$Btva z=uz$3ro)75#ztg{x`FY<#3rzPPe0(l;+3arIKW*#ZpQNitdLh$s{rHKXXxX50;`@^ z4Y>t$!czukf{Rs7ET2yuplgR38n|-+DxTp7GL340`s`HDO4)N|r6V)9Mj*cqWB|h_ z(-FD$2w|E(omr%(YTKs&QK&NeJd#qh8cdoVhUBl)F6hl}g!q#N0K?-9bYx9A#G4im zhB6Gn4&AeGMC@C%y4(#>eUvEmf6t75L`IG>LcRY-{qKKa{f?|nxs0hPt7$zwp6mxE znam^Wtl&9xL)Lb`UOVcDWVjP*?Df{I_L1z3b`ksLFxqGBr1zQsV09jP!Z2H}#STqO zWtxVcwSWI1oHZm|#CrJ(Vn;7H=CI%=sph_P&EVSWll# zWr9~XnI|umvE>ivvI|!&vA0apc97VMlV8Y6DbHfWw+zsJyuHuRI%j9K`1%KjBU5Qs z2E(fz@)z3Lu--RABi~Ll4;y7O=?eMQXL6rgWz49xORFrUSFaCXMQkD2-iHn`42fWd zz)cp09{a${K6kV7sAYHfRz56=ozOd^?ZTVNkmR%E_1>_Yxh520z7u${-TY+XhjdNG z9!9s-lZ#C(PR|$m%hk^uObne^humbC5uKKn$8T2Bf+udXzOV6Nd}s7R9*4WFx57m% z)n9LD>%s%A=Uy0OJ~$k};*^G)8`YLE-^QrhY~FB;SyWE<6s(18rZAwNmzfXPmurqcXxNQ3-pR3Xz?G0qrL&AX zxO)*v{LS(81;ycd#P4rWlJ7FF#%CM%4;M46x{rXK*|C_#7BNv(Nv&xz=PefRHvnS4 z^3j{EWgtS=3a#8;10?5lzj_Mu`Ff3i;_n5e>o9sm4ba}7LUKbq(Rm3z;I-p$!Sb9S zkiSzMm}}2O-c2~7H_z^t^Z_!n{`kN}W{c42wF08*QX;yNOowv1b;va18Nkn5Wc;xD zIIxf>Y4b$ji-9$ma9KkV4@l z@N_U9tVM1KU-^{ap0>fj&u%*SYbhghN1+AupJoEzZO6dZS9I_;&JRoY)4=}oOU2&$ zr+^4NSOn6~76R9zdQfmIzrgp)OvLf}Y_#S`J^Xs79ntLk1RPpnkL-_shsJ@zwQ zsn3zMX8U>C4yGQPm;MsERpiQ!u8C%!upeP%r@m%=nWN6G7P>*}A9JB%wk`c!7e<%Z zTUR&1>^87wIUZC-k&+joHwwNMf>Vy zZx~czi&IbE1wQ96>u>*M$sat+4!XOP^&3iMz6`$v{niyhDubGAC)Jhg1NWNQ5?ATRnq5O+GS+jUbwk7&iEJlA4)9b@-Ms}GZ6tp9U z89OGh#$D`~eUAItKTMTaTV0N^hdbBU%g_r1}4%@-uuzx&s4vJ%M=FMf^tJ}m1 zIJA=0D2`_3#+j+lyQ&iX^t@rFnXVyw%(a!Z7n;F_&)Y)3EElnlta6vG-(Y*L zPxjyPzyHGgvH!F9t0}|#f6ZY^mEWBIp#1jjh&X^w%Wxr2Gv}cBoSP@}$U7ws=-4Y?VsxvEXy~XuCkFBs+3DGm@+(u(w0tI+l{H9i8?zI&HqGWV z4ytnwDmIa!>Ay(D_rV<9mFlQr>rrqf>=`+I*+tCRr4?;&vp_H1Oh>26EP)T2FTqLP zFs}a#;RGZah*p|MlO~UtqEX9pr0=*cXV&~!j`xXF&Mq}w4x{)Sh>Baw(VBFb>>G0x z885d*PoCYtp?{^LRwXT{OM)I+$&n|&ZP-s9C~pF43Hqo`%_^d5Y7Y3U$l~;2KZsp5 z^F(ybD^!qD=C~i}!aqJ!;&fabBBE`!iEi^}kk4iQkk=RIlLy1gh~&YUoJE)wVpF|< zQ_`t`ul3O&`X-!2IR|EtHm~i-pY%aeaa5htp0pW_We$_Cy&X78St+FFHe*t&bd=Ov z79z^q^bPggCoA$(t3w{7GeziURXp26NAx{x2O3xsMT~B-M9bVaaW?H|pwUPb@S;Pc z40@Qn^L3rbFsu@77=MVAS*=5&-l&O|&sG$zzxIM$(jvoAO4Y5t_C&r$s&`!PDXjOIMS=i0sTGT1ss^C z!1pOQdcf31q||r?b+JqbaYbjO{j*hG18ESJkB-`=p}9G8$>}*T8C|lFd~>Irm_L6Q z9~zHG)iwG_%}7m=!qs-XWPS#jy4jU8e^(-UAHG0p?(qY7&Ruj`AD8rKXQG91H_AYaFNd2iQ zhj6`4?ya4SymN67wHx&jxfW&U8=)dcl7E_ia?$;F#vkebh4II#_e+IpwK7-_3qdYw zc?+4%vfzyC0A~984ECnV9nml!fR}o9HrR~&{n(4ErY^#6%!pxL8oGi%{=FG-jkkqImEIxwmM1ZnG+87v zlt=JSD}vMy^+@U0%@{k6k8M?ekZp|%u<-b3Sar=^%*p*HmVVU{4!@g<4J;0a&nknLGF3Z%;Ji&yRn>=B^%x6RdsVK0ASMv28Yd!!Z|c>$4^f8?M3P0sx-*_9*gT zcoI-te;Y%n^|(w;^bo^+bKpNIV-VT47>4FLAV*XA^{zkOVLA!7ga`eeAnrvk zpyvn)yPg}wT8+-+Zx;+(X4+-nqW@|uJ-r41u%>soM!;BMsSK#!3A2_+(o zqoJ8rM`1nZS_}!^jm`gJ52Fj$;g?4a2$xl?z;}Lnh_Uu3vg6eH@WRRvZ0b(|X8GL( zf9oKN-O4V4-J6f&J5(-Xdn2RqnyWMMWj0gs%|F*6E^ez4_na}PR-;##;57?!G!SCz zmg`}S-)6(5^)K<+5AVaX=f8o4&s`AG;tu>V-2yYaH;8bpOEAL!0NyO;h2T$*;k_wY z!bLl?kXxh5$W7-u>H4X20a1?$75O;?RAT3u6ahZV(2*i)rAoHq{uQx)7v42@(xwTpLP?b;w=sOJkD%J z9%9Gvz4n%Iby+%XB8p(VVw%up*JU#`l$W!Rxw9CGvB`{s0)(lh^oH)9?Zb@mOk&c46%9PGeLjsnX&=iUV;)nf|lm8_kX3!+h-|%QC1y>F%E2=oRP3nCG^=Wh#pj zn7f{DV}SYU%)@qPXp>JvwCF7}7ARX}gF6o}YE1eW)yKV=8s=W~aM~%F zs_ra?*3#2-CVLCr@v1j-e?Fg~JzI%37ITVrsnmM1fW5f^DDyVIE5HTP*e z`5@W~!?{eT`2!>W!7m5*q(r**nsx`L$`QIj=P!nL{C4K-{7;Nx#d;cZ>Ix>^ScP%= zmWRWN(r9L_zdPfN&rJH5O(tX0qfz?J>kk+eXS-4}|i3gSg#LLlQ+(_5}NXP(9Jex{n zW}il;Cou_B>_w#2cSA9`%cS)_U15Oi8&^lHaCv;IwHV|qO0h03@J6^9k8*-%os`z! zD}OB%wbdq(>k^W2Nj=`P=tF_I{7u?)ZFiu(ViuUEZcA3FEr9ki91&KlA<^cPM}B(Z z3-G7Q5Yy-x(mG$`(xU46;!~ug_L020D)q2!3jkSS_6kpJynu*69I|tAW;Yoe8sqK^F>!cZA!dDaHX)8TC59e_vb^r!A@;nt z5E%<{k=UoZ$uFcHs5l@3?eRX?!S`DMgg^?CS{K|b8Hwyi57@FImGp?ngXQPqWYNiQgrMji zK(5|o9$a(=*}+>x8YVx%Uw58HCg-Gq)`$MEq+NoaPJnmHx|oE&#h4j!cIYI0G+ltL zd3g$BwY3n*3DLM*wsXBw*dd}``2n)>qzJOSmIRxwaz(lh>=KsiG063H&*35ObwbB8 zLVWdrJ@Vic7eSNdiL$PE;?#Rt!tdy1VP{OTlz&l)C3;d>2Jim87+(lFa0!1sTv^J$ zwf>8w?PcCEwbAa3CW3n?75t0;$Ajh7Lb-bU?(7Rdq;du9ZR@Lt@7cf_kBpG*HECeM z{bk5d;!&b(Zm5*M3dKKb`H_G4>rUedpR9>OTqpm8|Af5$ z^LNL8Ark(!`xB6P&&-L$^bT07e~2igZNvHxKE^L+4}zy^%Jt0)Jc!1o1L($-*Vx60 z^F$9al~i+cA|(8cdIxbO$6&&^WgMh0Q~-MplGxOKi0!?v!}fURD)8rv5L?A8M^+Y; zV-o(uK3gEa$`-7;ozDTHt`Km38ZUAEPyLa^S(!8D#jKocjB;u6pgq zrKF>!Jbq}%Pc-S8HMu#MCFQa2?|E{@CM{gTKkh~=(H$p?pQ~Sh_j-TAQpY+-b>jf? zyXw4pi&}zgE}S6l=DZ;e?7WHR8*7j|-qh3|+3<{%0mq4xx9;IT0(!|c^Fqm8P5Joe zp}l0engY2cxr0cLvJpDapG)oCC?d!hz7BEY7#%;N?@LnrFHAIS!zKFXR;`ojUEA{$ z>9%T^ybYz3|4;taYaSA_tfI-6oM`fLn>%ZGz6^2H)0>#9VI~oCt|u zOY|Qw;wqASmb@aGt*{SX?+DemiTJWoHLvU)UZmF$(M=#U6G3X8jwZC((X&+|BaYULW;k^$1k-o)%G%*SHz(iQwo8?rS5;}-&hj= zwdSTECQ%CL#rPTM?TjN###;9G%mrE{*8^1NQjftiL7Wo4@3HMfJNz7 zgu8tR#ARH?x&&mAgYMP+o%H@Y_hbe?CE`mS++DW*5nLi(f?a*#@GzBSDx}HY(Ns zWy5detv9*&)kFdI=cBwt|JFuC7bfAq$SXmr|8qi?=*zQo^7e`liT-7%{$IksdfK_2 zL{30DxyJ22+1lZZT{i!Yzr6ntuWsoer@lBw*16v!LNChwOaC+lw(39nA2&TAjPrFO zGdOv;P2Ez`@^zU+|3+WR4wD-Uy6`Qf^0>;88~CmxTZ9suzj7H^>+1w^DCa2Yryecx ztFaPJ_MT1lm+DINZ|rock-X)eFVX***eeba{TqWQHBwH0F=@h9#s3VvBO+GvFjFAJ z*X&+E(pMR=56xdis`lH8t~@n^A8g!3+ReE^u1~Cx>i-3D0B>nqK%5WAAbaD3iHycf zyz2g2CZ+#M&oy4;*U>0qx#?R%&HjQ!|HjV939QdDlgN<0hFvZ!B!Eu?k$&$Mx!tx0 z^9h(GI@s?;URS9j$JyUlZY8t9rn?C!Dab~A4Q6uI8}rcKOA4@r|FIZdjwW3hqT;`G zI6jK}c&>-!Mn?(nnEN902lt_=kq6OrUhC?MgC3x1w~_%;Jpv+Aa*%Mn-6*T74GK;Z zqK6HigX@D0$ajMjG_+8Jdi8{Yn1QXR=XD))V(dK-&v1k18Q+%L%X|C_Rgbhsh8}n# z-S*Q_34iwV=dgr-z@WAy|C6b!oXi0ij-kdUm!bdS|Fwan-HUVrZfXALyn{uk+4N(u zb@X+lj!^+<+yV5F<6ZREl5wz`W9KCNo~`B zgueooiH3F7Bd@yikZs9akly4c)&JlzW1(4(D*9r7E~s4`$a!kl%YIR+f@;`VOZ9Kp zumg?#*p5>9pLubsKPurrRP_w(USWc2#mzxh-Oxh^jyFNh3zfm9C~I{6w|%S`L@YX} zpv5^griWN;oQe{jH(C|bsp>hWmM2V&26ZE5~Di>pd3vtG^eKYsxdwh}OS zsE+M$5Mr`%>Gf|<9VATOyWwjJ6v36gMOf|6MYvh1HKsmy3Lyx|$2s*8Sk)XqTz&8q zR_wnDH&^H&aw?jU{cV)4l<%^emdHr$4JC=Opr~W`0YI z?spI%m!gRz^DMk9h%RCAul&FIV9zWfc@woBdfYC=kMDpj-nt*xTAD80Xt^7)RH-0t zS9HR+odTu(j3obCZn{|dS>orfM+)jwkrLd9y9`tD-htJ-%1QJ8A5lpl3Dt)qkSsXt z*eS%bo-VonL2x9`S@^D0j%XZ@CS=wD@<)WLFw}l39-$N@&Ht=skVJ(|4k5|^e|>G0 z=6~qW6I@*eCWfPTUyLHMw7t#(z%n9MIFXiY>|iERhj>`m96j8%mwhD`-k^JVJ#>h5A&>1@o*Ps{fV0Ik=6b3%(=Hf+@eB3rqM{ZybXp{GFp6rTo7}D}dXonqYYL zBuPB@hyS((+PJ#PJ$OgLACP~;5sO=AAoP!X2q&F?2&S($fS}Pez%naVYDdC}Bks;0h4qKdnUW^Cg zq^&IRXh$>1^qF6uxL65{C4P|d_ci^4pImE?OZaD$y||Ss^kQ zxfJStISk+CA4e{{^JUxmcz|o_8NfhpA*K<19CX$N<9_pN5o!MC)&+a)twE;1C&3QO zxyWq71zTs|d|AT(y4Pt$W>XK;F&Rg;e?uTi{!d@53vo@ju*JkB4uU_WvH*=cRgoIJyOODz`Ob0_4;A2dw>;W6w-h); zudYuPdL!2k%L1=3KNLDAkQ+x? zxc06nY5phIo(#?re_`w4!&tHS754d=p)~#@HnPG%^*o^OkOnkmaWpV}I%{1@0f-rl zl*WJCPDS*^vw5hb|6gIfNSgoMHLL+wcvYat_pi{ShpN5x57hs98G}tyQbiYj@nWBM z(?f^j6VdMv_aP??Hi4fXwu6daQ>5|#(Ze{{!geYUt!)QO!y53z2bUv8FSVul-^>9u zv`a1lgelHKXiPsu(*JL+P{MA$c?>Ksdm(=-&ajkDRbn5=C%|s=RK!1I0#&h51A9!L z0IeyPm_{i%kc{?6^vGlbV1H^dhnQ!I7XLDk@=#u;%elYT6_MnBSFOB|tywk#Hq5WjDhNSOAAAlxoUG8V!9mD_fQR7v;s?Sl!aDTkY<)1(^A`NK zvlz`%KaXagJSJp@JELp&(a>6pF0lGy5%l_dom7{<0$!u3lk0@Hd+5jmbd!`n{FWi9 z{|!3doM1}y{@|bvM|p1%+U#ofPya8;|Fl9kfMeq^=zGoI==Yg{@T&|lQj};6>h>yd zHl@8l^-c$%n+|T2_Wx#7{y!1&=%4<-{M}4=tNnXGxLON4<}OF8-rkky--w-_i0Zw5 zjI4$k_?p+1k!pgIqQ$f%()$NRf2I_2OmRCD<;)=s6L$iXc} zsKx7EL`$&}Z5zNqVBdWy|B4OZK6uhUh24qli>D=G_LOHGTnAp%CS#hNrI6R$($~T&W11XJCJ5>Ew z?HPSW>I)M6zr3V?on9*di*k>^`FS! zJvcb`5Are06h0=ZMUbJ(AY27wJy`4xdVJl{k4YKqKMf@yz~uvY{xMoQ{uh4bH#{f$ zH-bG@M3=s4K}?=5z~*``WJ|{XURxA`hzw<5G1iPce)da}{}trj*MdtnJj22c{;gwW zF@Ot48MC?d4J^CN!iM(CpqWh#poGPwo_)=zEpDoTLH0@f+@wTo^??USMeZxSuqRx~ zAKIo&>K@3D@_*ssOMLzq4P{r@LWjyf!CMtx5VO`5;KZrGdiAGg2#3@SxQBNPcyc5R z=LFq=_i5W=7s(jHaz_ayrE|tXpoM=rAY5nKbC_}6`(McqvXb}>B_`k=0tMAmR z5Y4A!@HOA>Xp72;LZMf5bdr}V(*#rcnofa?{c0j9siT?r^QbGXZ)|lZly4A zVIy83ZiYj2eer0vSQ7v9Ka_{S<4YBhrq7nbgtkg#@=XINfAx(h6r%l#xbEmmdRC4MlPkXJ!O56@UT@R`u-zkzBHTU#^w{BU%Zp{lb=`AkSk3)r2MZ~ z`x9Z8CLmj{+rqq@-LPfhGE&zv8`oNKv_9qZTr%{Vh_LxqgfITO2OsY_N-R>g$JCSO zki5P61o4@H*Xpe!m*p)Z=jr9w-?*qk&Jk`PmIM`I*(Z;)=e)gKFX3NepGr1jjrg<{ z9n8AIkd*L0xfqx7Z}=50W8rJ%F{@ZbDgV~zCH2lp(+QJj1iYl8MbE zTbloW3XQLK5swi4?-pQZ=F1ZcY66LT*X87}Nfh1!ridm76ca0or;`lz>Fl5HRv<98 z8LfR`kIbS+qi+_A!J-#QQvH9h5}?~BCDcpu|6AiJxO{UY+)mtrH~TOUzpeye;%AIb zG7GG~bW{~REcOF(>!^G=d>OKKxD$M9)x*NQ`+%`=9^w?03KLJR0o{RN@ZwSfl3ei~ zw4Ct+t5ebe-*^i|3oVq!Wsy-ZI&G~iUVZ#GtV~2mU3g-!V!>G6o)kF9Av5=YdSNU4sf-xz7-U40@uAuXvz-XODxR_+9_#pVyrE zPyc^8{8>E~d>kW9ptL(PSxqw=ySagmW43 zn39h?%k)4W_%9QVrJH~!zzdV;A7N;rE)@=v`v1&*t!!!h-#&XenA0#WTx+5a7wH)S zQPeERxhoNIN!CWsM5wdTKLy~!OFo)=l)<{3!U1|c`5<34Pa6L{7rP5Hl=Z>gJLc%B z4J}B^=}sZP?E+iEKP|)q`CD6Q!Maa#g%aAAY znP|n^DfqAB18D!R4v@%Zpnad-)+Jhhz#Yz86AK0x!y#8=$oVx{#1`E;X*|B<&my^U zPOv2Zvr|yPEA2mGS$(zex39WLP{RvihQfJb#5u3dkL^r$p1)6AwLXo1(G%llSIUVE z4mQ~QD;lI$K`1ffKs;~Kl`r&y{?I&DhDjhgr4`U)>`c3zxNV)M4zJS;KdGLZ`giBauRl995MegEP* zq;0%F!rypAMwtxG){>6@ty)Q!#DC*+8{LTMr!)!6FPE^0!29q!L4k0{w-q}(ahaI5 z<1Xv_LLYp#a|9U@Z-89U+d&LX86hU+ACts?nN?mhv1B3vOH`ahCOha5Z95XM4No?* zB>7*(_2b0;<;D22DGtQy5JaMX<4;C8Soh(x!poaGprH_5V)=o|_28$9|CCXE@YD3Dkm7#<3d~2+ zIc)wOB~kwcMdDSpb>Diz98ZlJz_637mj)r39}}v|3>4{~Ax*8->t>pD$;) z;=fOIu~h%>*{3D-zRagg44nF+4caz*`bYng@&9Gt#0WM+AGn&9fOpBSp`S;r;nZ+X zBwk-09Qw+}_vrq>ef~0}=OwxRQ>Q2RC;#jDa11gr&qZ(odFYx_05$eBKw*3JB=uhbvTzKX zc-br&|2GePUMju*`zB8bEcHJEoIi=+YuV~Vl*>DI@v31g!nz(&Og5^I+*k-a)Q+O& zV{DA3n}z5YSAazc((!)*ebaKxc6AFrrBDlOog+dXEn^|ut^=w5lk4n|p_MZchw0_W zcv*rp|116I3XgW35?=9dt2g@Ui?rvO)_?e$1RTioSeNfFWap79h;}p|nbRIn_mY-? z%(!O{kd={miQ5U()u|Bt?0qkd|C)7uAZIxNN%-IIpA9^wO+jS*4q-3$B;z3yh9K9^ z5!u^j&YJPN9qePiLS!S`aK#HP@OhI|P_+`k2f`Z>7o$wz-?kg67seon^&4PVM+2-L z8MOIYAWrXB0cYJ`*4Ksd2yeC3Hrrbuv?noT?UQvii_-iCz2HqQ-;j8C&z(YAUc({*^bo|G} z;y0AGzYXkzM4(wmgkIWb1DWlf1McZa$A1g_KYRnTvYMp)Ys$l6N&dI^K`yv7aTENN zFNT+->JU-zWK8veEdE4Lf>gcd);px%0eJ~O(C`b_v844JV0b4G+)&k))_=dNcVKfO zU*fyF&VZWe$B3FJ8NphC^!l&oCu?v$v>WkwtN<);m#`)A-?qdHF5dJEzv49;TR6H8 z;a`;lJMU|u$6FgQM&C5ly(|WloU{aj8RNCV!4q(5jV3Vg8OKqw*O8LzKNqrvpOHv|k1!rE35R;)@nv780LW7YB$vprz3vL3&h~`v36O4!mkA0p`^8jGe325+}}Y!{~$E*gm5FeX?1y*Q%qH(; zvx&!7UlF-^_oVs1ME|QkHvYpuH|3i!WN;~9oF0R@7n<-J3dxfGKR@|!4Do}%5aVA8 zg|~^Ua26X7_u0i7=Bkq{5re zJagFOa>?G$tCit+BZ~E2H49QNgrF(C^6)d?HqPjs{)*w z{@1FlH=3RuYzk%j=(Bonvy?4!&JuC^bQFH`#|p8({+1E9>IU6T z^#xnqJCbG7kpPLKwzBpMW=o$()mrBWRf-g8`(C%&wU|{fPWNju$Uk{3I!lffG_;C+ zNbLaY;Ezy;cR?}iu)4Xd*t7~(-4iu>P|HqM_?!y+si7oWb8imo!-*-ZR=uArvwgno zoySa=m@}8PNN0$pL_f@`8%}iSntW6GyqBBqTlOiqo$*F=oU!NiGp2-pM!J@i|JIJh zG>u=Hw)jaU*fdLv{oPEF%}#Hz=~(d0M#8^1g2vG03fSu5>!Hlw2;;4c25WKQGaaAdQzRoXJ zWNIT@!aq^goGsz6Oov%bPn}sJmqONs3NzN?r5OFoNH_H?>unZXnPFRyZ36XPcYs7M z@)-edAohOqMeO)0h%Mo-mUf8wWcdlI}BJ)*DhrZ9ytoBeX)W9 zcW#1%=e}Yae-zSUpBBKIBHys9j(vuF4Yty^D9l{)llTOM-EzB-bnSY5p@ynoHoqjt~X|N*DYd8_)pZ@F(mq5 z*HB3NWq{a@DcwZ;)1x45^Ezk=UHKpVyK&bu)5@p8;x0{OX^jHwj(8@d1x~|i{?3R+ z+8%gp$#sZkbWN&%w^3ia=f*|<=-=DpE4$&T1{86!0vcGq8c7}eDAoV&*D3Jj#g?p9 z199{juOYV0=|_-+{~n7Y^uDfhP(wnez>HI~OLZ}W*LJ5dCHile;t!2l2M6Oi@Nn$=@ddSJc zKUL}=*-;}{!vE^>i|jQH>g+CIJiOIR2D*@C#%!}%X=^!YE=_Jg5AoFe0NtLN4n6p3 zE7gBj=r)I1yor6kS)VrbPcKWE+XN@&o9bbOg3 zv|*%|)xnR1?ycC(YTNNc8vlRyu7qUj9y{coud{n4e$G%#zDrNJxP^7Zq=wn()67;d z-p9)FS>vGc>JB?#a}(?TV(m}EsqCZwZ=8%Np;0PDN@=sVeGc1jE~R;%)2z}+b4ioT zL{jECLx_?glwqICP{vS%lnAMaMj?gV``Xv>zkbK@zmNNV@crHo_5;uM_B!A1wbtje z&b2xUZa@lIitHLixF574UsTSb^bT(& zooYldr|P{JdQSyEzf6vrlt-=^YbDnI$~}F0E+I5YpOxZW z-qlGARs~WOYx1Z$H)09Pi6`hvgDgr}W+UDF;tb(kIFBzYkQUT)Y^W=jSwU1?1T}E@ zU;l5KIz{kxMi;l*Vo>+aN)KY>+v|kw0v{?aX%@A1k^}u{cpW8?*3aGkyNoty;7~Hv zUn#@UXZ73pnN*rETQB@n58bx#JLP=qJSDY!km6Y{rBA#f$-FmPseo)Xnp5$bDqQ2p zb(SZ^`k&NXLw82^5%Vh66YsKaiutR*v(^8H|Gk`G-eUg=oDGvk8RV|HOSN_Xpe{|T z{m=i`N+$7(*qwBxgg~HZ`Gr6Hj-j$H#L;UGnF^{VxYPc#$|+-~dNF@9(aI0+giLYH z`;Y&BcbB04mL8>+rR9I4yDBnr|^X=-EJF4 zUX6anAAdrTz94^)x_ap<$1Qh};7h?Y!42U}KC7Zf`#w~o&q-_($N#N!UXmBK&m~n> zX3_=)YE;C9Yy98mmU4cs3+0wds0v=Jlc!Zu3R}<)ozs7 z|AW8Y5Zt={gF9)(c|C*e7m4*#CXz0G@2PF?%BjKlyL5<70L6c|jW@J&@~$wZcdwlC zpZ{CDc)@@CI*oqe=qCvDLn6)S8TEP1Qh|@d3&CGTQShQ{8-2S`RqX$1W-D~fv(*0c ze+Qo^O5l?~aT6_QDb91jmqXXZ{GH#+3Q99?5(f{D(SH|SMjp4lBIe&|&eLzD2B?cS zONj+;qB&R32RYOC71K?}RmA>Z{VkXJUJxhd|7!6mvHyD=TtbJIPM|sO!|AJ`?c~Ng z0sMpQrnJrSU@CIPbXD2e&X#N&}iIOAl;@bC4*xWxv_|^mVJz`bHh!X|FQXtf zMREALYLCf?Q2|uf7$^N9HiJ;joJoE138j2>1=L~XRb+U@3gREVmpm5ezMB1SkHtTK zORS6~My%qJ+Vx>1_4YZnxll^%|E$LtYC)L^f9K0^-tg^L{H!$rV*l@!aprBDvYjl_ z;S%HaMbS>&F}i0*=aF@mqs02p=(#}V^d^b@|8k8v`A`1m%5@=4pUfr=_lJ^Sgbsw= z>#O=?Dw5g3a%4n)Ag4ldJl$w=pXv(zq+g;NPmU-LBQFQ#lK=4c-?*4KZqUQu)eF>Z zxtqlH7a#N+vR87BCP;G+s3CRlL<*@Tr$UY$vs~=|$=Bq#3!={vZ)BRdikJ0>ZFXGh zb@f>)O-_+$j_;t_{w|@$zWYLXZ1UlkDPPdvvpJNSIPDvedWWGUe@c-`)sMvd2dEP2 zWpzFGYf+7^`LwP4P=#Rq%DtsT$dk{+lQKyvbU21Q+_8{bGF^+Z)XpcC=ogZ4)*U?A z^7Ev`-J5zAJ$BU5Guz1DeThVW$OBR*Ya(?k!jXTfw3+BKDk67`@Fah&p3i;x@wk}3 z!iCcm-Db%TT=q>rC)-!dKTY+!m_K*tY+m^pRnFzkH8i)?noMObk(Rgm{)_+pB{F=K zo{3b6!4$goHHSZDh79@LZzN@1s!A`j-a$={$s^Y}=85@-%bDtO{Z;>q|K0NhMDvFT z;*WC%F{(z6{&4nynE&Lh0;*M#&zG_Y;C<*iO9Witi}|aE&ET!sb%5+Il_gf)%b?+m ziY~`XgVK5-CFcJpX&Y&`hxyPc4stczg$)v|K)e=CKi1W5QZm?P=gf@h*A}2ebJI^ zPMiM+y|9!ps%*(h(xKCqTruxD{~!NX%l@p_W|KkO>nz}2F!{ninOsGAsCrR1)*d3l zvNh@3MkA=pXZO)r-{biDPTBgE#{~5LZfSD0+&9{>ZWJZ;zDCS{)>ejg8$E~jOL;(d zN!TZTn&ucHiB+anu8} zC53NST}%roizI|{wo;1C+$1$ki`M#sZ?2S zn(5_=`P=L-r4lYD@Rjw1yzj*e33ecg`X~P%?-9tOZ`~o^H(w___n8acoR`;~P!&(b zEo>I^pWyO}ys|l&{-^%?8#4zi1RX(E-*c^^_VJ!Lj{^=~5Fd z`mU7&opHLDw!Zt5P&KL}xBFkEh3i+*dX@>?fyax)`QLoQl>*;tQD5C%bHX(6mzclQ zqAYRzzY)yvB1uh7OT}&j>#|~M-7yV%thda6{{Nk>UZpmE3gWL_X~t0>7sB;_86lXl(T5Jv z)TQ49M~mZsMu{YM@Y6D?`j0YCS9=;cU(s5iq%~DgWh_a(QLh!8qZ9$*Ui@b+-TL`9VPgLm#L0^FKWCvC zFTZ*;XVq0-I$zV7{CPT*)Uv_<^8ba(qFkm%iZaOUq444v{@cx5vhivRwQSaCddmL& zlzEsZ>E-Pr)_;BJa^2*g2mkZ`fN4(TIM1!*)0=^#czuO7+CG}jdT zb+}E2Z|fmH{Y?9hzoW%Qen`hsN^0jCdT8JYzvWLfX(cs-K1|-Am)^fYEwCI*T`C$9 z^S?Z8nQq#o*Z=XK>K{YC8o7|nU%*hyT5r%{ivD8$Z}&W;W>0*|PYiG7m4`$-zx;h|$TR0X#QuLJR!&_1d4^A*tV|w} z^OS=rn-Mw0ErZeg>-sX(Yt1#}C4LlV$Ie3f@Hz>)GvyW^y5y*|jnBzVbIrv5zp7z3 zVPCV1@R*!HslFu0D@UvJ4c@Ha+>V^VB@?gGs*MLIv$_uQ*1ol3|4(;(t0&V|M^N@M zJdWd7B1{M9s_P}xk3WutMR){Vxiy7q;(O7#R^#}grAPIZ_1Dr6dy0HgFiD`SGeF+m z@^AkC_<<*_(m#V&=r%^LM|&gzrQ--;R-9-UW*o^2%%e6ZnvqH;C3p(+1@!dm2g%ci z45@`nHT8`nmr>j1zSQeJph^#h9EMx*#ry>Wwbbvh*L?PLEl=}8D53dyrI>&C*dkv07%uh3 zr<}N&W-RE}mDAOHmP75o6eH%}zl5avVh@V>JNPXS^XG+HQFlp0%J%p*sw;01S;LXx ztC{waTl)LSZ~J062K(FT09zOO$&yt5TGtg+!k{HpSd3!+7Y;om7S7*8EYDS?L%v)j z=|`&k(a%M4#y(u|3M4?!d8HIEBv4V%^c5(j7f4IAC4nb{r0et#wU`)n05<42iT|JkMYsFhb* zx#iAPx;uOq@Vl31=Zfv?vbD%O9?-d=J3e`fX;-oV9499P35^iW+Ad2Z7#GC3jTKmAYpJcfTb z>>jyu%Ogte&Q1M=(^n8KnMgLgPo%t3=25yQSCKoeTZ#3rA-PO<$z7HI^sh2QMKtKC znb0Nsi24`a)P~KJn7{l|4&@fF!*}!j!5fn{miXoU@A+Sn1H;>Gv6_6=IE9eTyg=*h z@8)ne+$7H(9w+86VH-?J&s`<<|EJ19V*ig{CQtsjD@g|GE0c9eH;8LRS^7)XxsbwG z3$o-)B*$l}FSU1VDdkGn>JJ=Y$vrh0t3I`}1DMpC|uE)Z{iTkAh9 z-OuTMRIf)xU7$1^fxP`^HaWj|KyWH|*sn>{)y`nO z?%F4m`bUQP6LyBuSv^F)`*Vm=R@NteD9DqKa`VaRW$M%&ot@lPp~t`eAN-Z7wAjwq zSkLD_A8}30KQUcFtpC!|v%DQX@|^u~-E^V14LNOD4msy*-GBVm1V8jm6?RZbys`Ah zz2k^0Yn#czas&DyKah6R_o9wV%20o|Z5Q)z+qP8KY2lOq_&4&3NzWEbGU(@0N>)%`lo%7oJVu zc~DJV3#s9+|65M(%Sa?kFH&Ot3fR+$VNqG*hIaG^txQM$DhP`YQEvsE<2wS-I}F$SZuSWik4h5w{7g zfl4CRq>h{-$003i=5jB6QlP5$BolnaMdY}^54=Of6f#LQU9Y^2NBs?)McUEb#M9XU zq;6j&xpImuaW7Yi{J2nbMsP=&j2EonX5RA=^DiQIQ}quo^DlX?=5hU z&?J~QUUDqwH=#y1_?{p+e>af9W<~$;w^%!Zk9(uYzY?L8_RM1aRX!2KaBM8;jp zRZ)#{oxOluKH`L!|L|>7-Q91#iEDv>j(@ru#u1O6M-n#rMa1-?G-~{2MKOPlbuZa8 z&67Wl-@;p~q(wYx9U%Ux|B3_zZ^?CSk~&QhGa9zgU@ob<@kTtkJ!OKJ|E6dgQt6(7 zSpQ*>SH=AApH?BmG$qI@RwK!&m7;rt6+ZgcG(Qo|*XNSAjjnP6-dj?)mt<25nn&@w z$9a%a;ZI57r8u$vueT@>QZv5rwchb)+rDJwua&USmn0 zMJ{6f&)e0Y_cq;^`0dclopxB4_z<>;8Va6C+4qkoqE>vMI`0;d(thRCmo@o(E6Ftd zKOtjijg8^tXct*Q!}x0Qr^Zw<|95;7+N-sZ8}qY7*F{5>xTPgQOx*gOR9Dv{dkoyE zsD!2D(+#m)ZON~c;0;4gZBn65&3egOzUC2WN#D>53L2nJR*1^RN)NJQz8sZ4&4U`T zG@Q^auqCUSXHhcCM^FcS4skn*g2ns??n}{M>-ogO7uWdNx;|q5!hSum{uOO)d5$~P zIjZen5?T?L?1LzfP@K zSW5o7FT8qu^E3}qzhZ64bx(ac9dDy)?=vBEiPC3%^MiY+zP6dv zfkjSY{TG*yAd{N)iOBpWO7_+wGTnvZpH5lAsZ~45t^PHe&QzRD3D>>FVjp#Gc-0T5_{I9lOqnrp`XlA0{02YscTF7Oiq8 zYCZPTWo>ojpEdvb|L#9esCyq?a_87*=r+81!tX7TB>HEJB8!72lSfl}$i`@GvSQ|A zF7HV))e@9K)1+rFWwBrYtL>j###kf_shRML`ewVA6ETH+(g5^7&>` z7#K+6&jC_l;R)`DDc8jOAAf71LKdCnf9apff6j!6`HzzMH~)LL)`e%srt z_2d1yy_?i{Fq1ga-b81L&VORo8d9I42{Hfh7w1WU9b*2T?<2+fuToN^l1}H4cKp#) zxk&%$)}U{p>`AgQ8l>TNC(bqRYxKv}U#PjJ=KRueeq?5NHaYA4PBH(GWqo|(*KhgL ztS(V2Hq{by*QoKQ?%%*UMZ0i2eAQ`%mI;)407V*>C5ZJu+Gh@Te{?uuQv90RYNH6ToH!}8@M(Hlc=={+sVa`ipd!j ze|hC~yU26-nR+G%MR~!JK9cv-jVw6UNS4l2r<6pwBP*#&YUxQ*hbx4no6S+K%dXdA z{vQl7s7X0UQS^{ipx(`ZU6{ z*NdDoLr!!rx|XI#E)nxjWJ<^FJNAnDE+N+NK zxJydcc%BUPxpIn_f8Ur`^5~pbV*fvJh!ykS;_OcPOy`mHE;`i5htG+NJ7oBRNHb~< ze>A zzPXMzv3f+UH#Zi~Kacf#;{HGCKl}e+{{H`Z{+eh8`~PPCd2ar}oaA{64OT}l5c-51 zHwya87hdz$HEzAQCfoFhhvC)FQAV~Kb&Vy%tc*@g?8?^qm7lZvM}UzBeL81n_eVob zoNpxX$~V*rXcPDHQCDh$SBvtAe?I?jy?pfl{~Te_9C!ZzpqFojkB5Q2b^?8jyl;>> zx`5ui^_f7{-q@hsTR^84ZllfQrw9goDFYYbIoephg}%N*OE5}qI{kZ189hNmnzonv zPQP&orsb!HiJyhLHg{+LkG{WX7Wn_<`~R=`i)KLoZ}>l4KhvP8P?u{B_r)~To2vI zj!NXw^Q(R62Y=EsM%maKdhLxgd{{eGuyTd6@VU7KYtvXOG|xImhWO2)3OYRn{9jyQ zAFRwd>l&PWBQi2aE5?=oM{%Lv^q3k0*^7RUf=mvOlRodK>^6_d`E$}YM{QJc&Uf2y zqS-lg&fFLL>^5WN9M5OcnGaV^Gt^bDHEf9!0;aQ)BzxdJcA9BbobW zl$q4JooM;=3+_1f1wF!7prhLb#zHrYWvz7}(f%TmHnD8WzD^8{_X6wSy&!jSIlNrd zf_jFgu%)6C^^Bt-!fg%sWtuatZ@)lj+FxYvr7^K9+L=uoBN-*f94s>LW`C@b%zaZ7 z1ZxF%@#2DMkXliQu^uKcULzGfi^fCmktCtu+6y>V^b))e+A)7VyTOW^jZC224Lou{ z4lbJ>#e+5)%*q$;jCILkkcDS(W6}fW^O8+4`u<;tf7}b&6%LHo-UiG&w*r4gHozXE z;~1JS6{}q9L4REx1bEpn@8V}*zJ3@o7lt79SOiM(KNy>S%@^&9bQO;2*$9j0X)*K4 zEl?_=46jdo1YbC}AbC9x&-Kn{yZlUr))(}Rs0piu%S;|KQ?0YuqN$eHuJ9I$zPX|2 z&-ZL%_bz7r8XH()^8nv$GchtvyTu$xAmFQAJ|vC)j_$fmsI>JdKL2ZrV@rOrntJPU z0!>2MW1YXCv+W1WKE4%1?@BSggL~MP>LT=cbzA5uSi!0$MB};OPndc-2|YOd;BECA z@(()A6*Qq)(tahA^xz`Tn258LLo$82`z<1a5ri%zEA9Do6f9( z=I(wBPTLNL6B?1e_yN7N^KkXrcy>UNMQ5HYJ`MB3{T`p;NP!uo?D>H4_wV8R?eb7m z-ibn5w0}11JH)~jFnm@F9;t3%;rs(%#D6!A8P5Tgf)Zv%z7*avy96^}9{Y*719NNo z*t-XlafdJfUe3Oa#~xO~$h}c8sMP`zuWW!X!+>Oi3m(0b!(8_CVI%grqf(VOn^o|X zIb#*ZdWYGggm3;ljqg3likd>hJP)A(T~ zF1sE4tQO%xs{*L{IRhs7K42ezF~>#j4v_ZM0fq*D8W(va0kiEQ);BI=QBaVzOHVJoJxar zd>~v6EyuH;%OUVmFDz)H@fD1NkR3f(A(zOWmYl&f+c~0){bbnS?S-!|IO6kPWyZ^s zQ$aStiv1m64vU0i@Fd=V)0HN;tMwkFT=i$|S4f&19%aaU+OdgQo{0T;Oq%T|uWX=1~V3S!V-QX-YKQ#(gM<-pJN!gHXGS zF<2D@Q^$JYWXrumBj0E2VF`UGIskRv&7#ou=35MVU4|Ppve;?s#=yF|yO>#j3SZ5wLbIqsSm%?8vsTUo z-(@}MI?mnrSV04Rv+88uJxOLuw;xCQh9O8g8iP=Im)*OvL5LcuK&*L%adM*F$0>J( z>G30B&YE>B(OtvLQFmvT{=5ba$4A2GlS%M$Z3t{rI|Yx|YCvRkqp)njc<|6H#=#sfFtUawGUGu>i6W%cAEZ5=M@u!%i3p1F9<`UTX+KrtvNfNJ$8usG1Ie_dZR~&eA z4N}mPjYxiq{)YN!x#14p+dmb!FY|G84a?mB5(_$`9N@B}GZ?J+24!0!*gbxC;KPnA zSen|1ElHp7y-o@fW?2T7gTJxEt1-K-;4vy2XBv+R*2E~eFf6+unQL7i0h??dK%kG53a`3-!`FZ@?+fnYYrCNQipAo>CEfy2oRh|0OCkK{(b%fJIVXVx3gy+ zlijdsfdez7Is}@K3=M0>vU4SpVR~i)_^*?|{Oo#A?yF?ecD}*~*IYrMPzmcEtYQkz zB*B;Y?YJk0f=SmcV0K$6K0Psv!hs~VYTI+*wPm4>Gas6D$D&C@4^CO*1&L|l_**9g zbQY$7x625V=*mHyeDyH%X;2w^ivmHYwis3BeuJ6SPqF#ZcSe4D58VC;7`@sTTTC8e zlB^P#zTbkY7rhoP=iOs^=6XQ-=chtLO^MvagOy4p{u##$(If<6lSp5DijOc0s-@qbf%Mu^7RSZV2!AzE$ zAA&u^2S!KM9Dk-#dnwe)dcmNpn{e`ytteTL14mTTVBD`^e6_C} z$r68Xt1QNJfg`B&KLeeIws^8F4#_#8sCCQ~)Z@-DudhkuRvhibDKGso=}bP_M6N*L zP$uqG`vAw6-UFeX15y*#V&EACH0_E)1*H_cuAjW6Fl%xdwv*#tGzq|zxF_XP)wI3Tw{n=ls9Uvzqis$_~ zAUnnfM_aU`z5j9J#@1n6TKbAT;dO@zxEs!XmeWN=jyZehSUM)I_zlh)!)P@50=mu5 z7Ph{qWbcj5XP{buo=g)+pJ>2d*O!JT;4mDPSqADsH(B|Fc-+-7p6$$v2Itjf*b&_Z zgir{^dDj`ON4B{2U_QG}Iajo^-VNBQv(KeZN&c&8w7 zmJYs~w;Var0bz3^HR?7>wI@dz{)<|gQ zFM|;w1JJUj7(RapgtU)cIK#RY$^8eRXH^&!yZp&kO#BU>1S4`%pU6VMx)cxu?ZsrN z23&h63`Z$UgwwirA?}?&T=*LW8&{3SQI>IVC{i23SGMNl6o-TT#s~2IA$iW};Rw$eD1eRUj4*DC+a4R{R zwL7*AyB_9%w$KXn79N3Ts`H@NLxp`{e-^$p-hoG<1+e*>EncpR!F^;pw9{YV-5Dob zz3!>Vvq~A4h)i5izK!j?7-Zb#%watmEt!DHuZ2GhXR$HsgCWE&ow>KL1zq|!F}b7T zF?5?Al+HBAy0#_E;N4(Ynf?SiEGpMKCbzQBeUM)s>{7- z9axVcb&KKMn02^$pU6Wz8`&oM2yoVhfs;oNC^coUS2nx=lZG{Teq%CT9GnKz)DOZr zT^|q%-{AfsdDd^x2>-enz~HV542?2{n@TOX{3-{wZ?G|r&?-bppUu$lD+zTf3h=98 zFMJfL;rwlP@#_6ll**h8Dn`b*FtHb$n@qsJHwiEHi(+C&5l)#J3N7#D;eK-=Y>0f# z2qimlf8k2>zcR=)?mv!~o{k1;UKw;K3h-|K5cFEBv7VQzL9etOL#>-JC(;=XSjOUT z+F=EW&9;|tfW3X>&H;mTSiWM8I1O5t(b z2(K9Pu9q-HXQI%?_%$=>R1;&_FZvC^)o7D?2b%Bp;+4#I#-nW0fuT>rJeMFeeR_d4 zH+_N=b=ugkkr@CH1MtoCJM+cA2_}1WF@5{xpwXrcY+jdVy;XjUyV{rI;{)!nBkLeY z<^2K&7bW&_K`;c$0-u>37( ze5-=mroX7~R|l5g`jNZ%6LNxwQNdRg%7>oesE}#U@_7~h>if$&25bZklS0TEag@1G z5()=bzhsIYz*%iWVvZA~BwjeOAHsi>yfbeAdPeP>Us>Vq<` z1#slMB9?#Z0lD?(@#x_&cxt@}I`iMa!=Nqjbi0z#&aB4}ym$gmJMbN%qd#I^TLLR( zoe7M8E4+RB7u8xF!Ko<^w_W=U&*L)S!CRKOoz}?y%JE=J2A1R5>Rh;L`HIP`n~nFL z2f{j8NqjE+WL&=c15`QA1e;rjQT?J6#;XhnM|n-ddmln@R@-|RUv?i0uRat_sj&mK zh0`(1V;}mJKf)DTMBBBJLr`yAy0L`788uEMVp)Fx92{0b*OWRur1cK}%DCX*q6|i3 zIMw*joj$0asR*yR1vskgEI91hVYvC_6XeXW!laAZ=An3Ul7QKtI1>%!{7}J*VkXHh^Xs2<@`1c-yc5HB=0kJk#s=WbIq#j+{3$(&H(cQ(cey9X#=h z-f4VXy9&O1szs-H({SS<4rpr~#v>2ZnZZ9_U{t3Ucs!mguALf3SH?R?+Zjc}YP|?1CMn_zs&}=+pq~1cy{7yKc?!x{JkB5j3 zEwob(W$1wO_%tt$(N+7x{A?VCrL)_?lhT9iJA*jFEgKbfm@+O)(r^Lq7Zy=A;65r4 zmV}Rl;XReO==yC4sr!qI4qp?FXtHMHyI0}dt=W+37Y9QXWANx7KRBm<0j*+J@)S|YP9Q?lE#h#yCf-~w1q1^umv)<9yz;FKE`cTC+__|9lxi~!3*K9 zamN)cJiDL;&%6vrgXmkrQS}`_p4Wh`zOAh4&Pw5!kac~C zFc})*h-P`4Awm#=&7YTG_SE;_SuzSvoVWt(?8mWHeRZtNdNX_HXGvE zitoNl!N(id;KrGY!m`~$R8^RWBg%(Br?{R;nwW!)bTatJwlmKx8rYB~9*&j12W;F_ z4o>J?bFnD>=fCVn5IsnvMw%TH(0GUN-iW4s1Li zX>zpD9M)<}n4}2S7?)~IfWTT6__BNtCdmE*%kEkXJRzAooy`aC_>tH?Cj&e$88a^e zSHhR3N9^D68?njaKGSEo2BNnPz|B@gOxe@}CZCFpKlp1fQv2TaU2fZAXsj@Cbkj=>6ycfaLfiP`*5L^_5uwQe(;jQhBn109u{pa3++=tI#?CoK63zN+KyCez3h*y<3E6^%p2v%YlU%%M)4KpBETR5P>DsSl^R-C|PWA~0b&1vgLj z!zGW~%(mNosOGp6KRuoYKDQ*%qQL==Z!f}|XS9Tc3d#BC12?9n;BxXS1etiCiI z;zZ{P6Q;I-*@>Ol;-J7vhxD@L3J0LN=?cEsIT6S7BQ`vY$D%PmVZ_Bh!qVm>RIfUW z*Uh8ZSxf&w;hz8|;rMc_yLb`r?0f;O-#0LunzGo2yg;U9+$PAhE5sCyk67xz9b)&K zg+I&xvWsKgz~q4^4l5_%EWRs}vGu6GzYn6M8=&jwMU2s-!Ro^)JUmv?L@O`>GrUC4 zb3q`;^%gTfiUD6Y*293x72xfwfk{0qGSffc_eeQhT^r5*n)M0mgR~FR`QI0;Dbc%T#WCB<$_(5URd5f>EvhuyIrk*rW`gx~b^AqFWEvPikN+Tay{X zYIlh7OlJEr8Penu5uaxNE!_vYZe>o;JpN*hX@*a8DlgV0wL0UdiLvW3)sIAhX*?jKe{a_VVj zL{0|=jemiI8nL)7309c zZ5bxHo8-vX6{2F}D~#(mejVO<`2t*$e?g1T3^JR#q2l2bpv?>6uTLB*GXe&McHA;C>eY%x~+( z-sPjvcf~U7n3x3G2lKHi*%sgHHN$iDLvSU(5X_ppp=5nH@^mJ^JuJj+)NDM!3un*P zx!`o0&&=k77ns*lMa+pjckp9QvP<<|LQ~EN)_TVSSW-|7`F20yy!2@-&m!>s!~!gI zJ%CCYJ)k{lF+0h@9bY;ZV`^D7&Rp(=H9JZ8q~VTpLv?Xbp&l2swqp$^0JrhK;__XJ z>=j)KPxo2lm`S(r^gI)=tKA8v*{Rw)%}Lysh4r@31{ea^<$5?gfT%yJ?O0~s=c3i!tduPMnhFc z;O&zz-1@T^7OELxuISmGtk?>Pox%9KJOmfQTfBPd5Nwq?!b;Ot!bB4dNIR&4M=vBX zC#*d2+?Q@P^SCc$j8lh#koyoF{1fVuGr>WF#LX73u<5KBD(+TeCE|ZUO0G2{wcMAj z?|%eyyUpO$iPzBO5X5+v%d<^WF2RxuLqglHA?R13g4UO;&@)bojU-Yb#-^4z5Fdz+ zKGU!`FAB$cKV)yszlQZHI&dRBioKV46ITWt1ih(QOpJjvJhO_1B0nd{{}zvxj5k=H zPr-HD%dm3cFtix^VDy$-#uZOOVRB^%UOQxkUs(&Lo87`nj`0OGk+yne8#4`fq0IW4 zey}^8g#lg9VU6z%#>6}il7Gm-J0nkY_a9+mu*nOyo>4Vpmp`fg+DN3lV7)NOfcK62w zW?sAm^Ltkj_-9nY;^)sm>#(<|F24;|Ta!WEvkc<4tOw2VIP|jYfI6RQ#+fV1vAP88 zim?koc%}w5f}djCu}#?asT!2`Ee0tUS0D{{!yC7E;A7tola`3Mp1%!)_V&Q9UJb`2 zpMwhd8v_e%;I-WlwC|?j)_TTxg(3%`WdsNst#N395zP78#8wO}f;Nd*962EsPjO#} z&hU$wk5>X9yY&ua=9QwfJq<4>DT10_9JnmGgXItAV$YORT(jbr@$@ZaXx&r@kHT9p z%wC$AIz5#6w53oOsQd{1>a$isX4Fmgtp8nX=zIc_V;4f7d>$N}br0Ev z2FB6zGf{c`RJ2|80>+E(NxP0L0!@cKa9$|`t4uRs`J)nC_v1OVHveRApZSBr*fQgb zNBd#NQ~@-*jY0>vFN~AtI*{GejvYO3@PO$HELrs#9@XE%zw8;Tz2*kGE3V_&#vn#3 zE*|=Cj)a>Mjz;t1Q*lA2AMX4jfxnjgfHB|Yz%Nk;!cMPc3MYpeyIZwEz=ju$?Uz$YW1!}m4-h3kAb0&vywP-nnX@(=s#-1K&apPw6cBCv z@N7Nqtk}fZh5O-$*ptlA9WSgLRK+r-N5Umzq!DV)p~cUGaEN?>lm6a@pvdvq$`R!Q zZ@iH_lMXMWM9+S6A`Bns!<#SWpyIEOSX48Jz0-;?9g1OX>@g(II^o!8J9gp}eF*#D zBIM*JqeP=Ku=89&@)QfN+Al&!M-aq3bAuzdJzz=JZx|n1fam5Eh|W{1@zO0HJR-MC zxJ=~kB^P*qMefcKk?aIp_^nB4zRK?p}N+x+`gkoQ=~Y zT46sg$oO=uC0K^g^)p%Et4Z4QMO6qbr*khU*hEz^LLRLo;1?x~U&#v0WIH+l#sDKj4!NABgze zho7%KWRIRG5_$IkOEfmY#^_gg%fTHC-L|4;Z!xak>jcJ|&LGLY!^KUZP;b?QvIXfl zloW|ul()d+FETKx>kXFY4`Yz%ywfeBml5>3VfjoscGJEzSa&`TSMXJ1$QFMpZv~98Qdvi7Jr>fvJP2x1Id*EV_E6Pnp zby>l@L=^7eWK37f#clrp|vni-J zsKZ_|^_jfAK83HCxl?}SPyDna=RBS3{2TIJK_k~J4;o1s=7pKJk9Q_FmbF=ZqMOBpD zH4VjlFVW?@4eoIN4XQJlxT^OGWGnSE+TBK|o?T6DCr%+L83Z*(Q@QSFfaeN1C4Hp3 z8FV0%&HJSZ-ZMVO#XoQHz=K-B&TVHwQDGf&cCJ7%hgAI2ECgYjd?4g`EG)dPK%9_h}e>>-d44>5VFfLP$(_F4(1c z!;oGR@a@@ys(BT#dVGpVP3Doz`C)k7RT|IjdPYtZ%rgJuBHS|y!>uhz^n!*bs{YLd zk#+90&BY#;j0u36=XSEY{TQ`4Pz(w;&*0mM447xx2fb_?x~I$twHB2D&-r`!y-6Mv zu7767Z`a3{0r}*cTof4oWFsf^pe1hr3T|!0J2Cw@XPg&u_k}`t?JzdkrolIbAF%r0 zX}a}?9CI#zhv0Q?z{}2LeBKb0_p62=;oaoP?j=lMdIPp=jX`?52|bt91S5)tESn?7 zRKsC_);w4P!KTh&G?xQrxdutOk_j#rEXJKB8t8Jc3HC^I!m8IHq(rL$0+gchzXQ7X zZEq0!qkSq`Fgyw0`vX|Y?St>m{)64M_h@BN3XXrd2cmbb;fD8xcq#J~E)7%xFVz?v zNM>fJZO-KP1~$HZ;Y9rO>oHQj5&RrEQ$lX}WAgf6Fvs5n*}BJ3aJ4#Av~|GX+!)f5 z8iS@D%)R@IBfdyaU@c8Qf?7VdWDhw6{(HLM~TA~ zJTVw^+PkSn;xuxv@qk(DNKkKH0_%=9gUxstZah6p+V9)b#np1ymuJH|eq0*!tHiLb zQis@j@5TCmD&+Zv82lcv0RAOb)kCS$lzkxmr&WkIi5#24sG9(1Cj$_(0RvUK&IpEuMK2&>=GYCmuqwIC_iPr{> z6k)FrXt-gBYi)gD(k%;X{%c7d9*Tiii+iE@%5Ts-k%3%_cfsfNa*!W20@j^7z?L&* zv~p&jHk~6`_ZZ;IGaHO9%BS~kmry-neri+bP1cXS2D<__)k%rNPn^D3E~hM>cfA(A z6@SO;4a;%geLwVsZX6k!qDh18M8%B!$uEX>5zmYuG4kE+f1-#ou(2&uY z$?ts-HQ^86-<^Sq^bxf7JPRMBjp6u*Z^-iGqYopK!9XPm^CDYF%Fg8^@17hTl*z&& zr%W0oQHuu{7IMOVh^z>lM7s>cf1I9p+#`e0m*hd9JP#D5C(!C*F<#oS22&SyLC(LG zZ0m|v@cZ!y-(~ZIzhXNMIB}#zODDjOc{8}Vc{wb9)W_TdvQSl)Q?lON1|5!!pxh~0 zT%zoVj>6|LvY-u13>)d@X?=*6?O~tg4uOtE&9GlHinVy#2k=O50sR|nFeO#+(W(n& zF8AP=@F=b7og}5nu9)6jjKBRa!m}Z zgWxNTEx(Tkhid!fxI z0bg(Zh2_$jz-8%*Sz`{Y-_JJ#yL<#*{c-`F%@iJJMZ+NTzKxiGm?oDbBqy$;n>{~a z&c&}#YsL#s_2uxh^E$ZJIzrSYE}Zvf7fwz7`lhk@JKgba9Jdy1r5BE`!E;TF@@3|S z8kH>IPf0+JS5@?>TQ%$3k0LxZnL^7gE%922ET-9HV7X=^xfh=gMi&|WhK$3=(^67U zTnPd@{=&u6)7ZTC6Yf5B0$Uip%ORi~OJ^RERl8kKTPqGP4_m<2D2i?RJIT<>^OQ4n z0A4Q$g38Ax$k93suiz@=HAi9Ee1v+{EUJ6G692khC#N>dBDg<>T2Vtx;5mXpyC%?e zQzbeSR=^1_eMsGsNk)r9SsW|u;o7?cDCqBqru}aimh=i{Gpv)5_!-9g{DiFrIrP$# z01O@aiN`1B!qGWgl2KFr?1q!0&_+hdeo_PDg+)}h{UfxA-G_>>9u!eu20kL6NqV|7 zZWZD|-qJh zy?5JCsrVl(rIE>(`^Km!!#2(DnMR>6$MC1&6*SD5z>d;TMDJ!rDp({>hhT)m5*? zf94(_cRP!Y_nFdHbAlKSwF&kX)(|br8i*JB3PE~Yxa5N~4625(KSw&E3@a0bHGV?~BXdHv&uM z4um9o!{au6I8ktj`W<$II42LHnOF&5oxO1Rwo$ALQ6m;h-eAVck5sru8SJ;ugMrU% z)O(?Z4-dC9>;D)S@cl!+*N&hNJxkhV-lAlpKQXKh2Hws>7=4*S4oleMH0c3BzD#IR zxXZd2??l9_&CxeJ0Lv_su$VWD<=@kY_N3xe=q9*uwgv|DcEYmG5lj~DN_KJjivxo7 zr2A+s?(tS+4P1^Pk;j@bCd(RambPQf$R2onT^Pgrx#^P?dc^wceaLdn2MMR&Sf;~+ z2lA#M&Bq-sn0*5G?Gx~!ZWH(wJ;Y!l!nuL*e}(93Gl3xHSpW~?Jl19*N{4-6T6 z#j^#2%o(%`rD}aq;4{AQ2 zgr+?OnC{;XQ)wf(EwK>&_ShkHwmzw17F|U~_WZep{CJize#6Ega0FD`=+LT?k%jLVn0j zqDq7nJl*&O{zMhiliYV;a?MG2Kk*7jHp;`AubbeT(_55Z+W_^f&$QaS2(vC7BfgP=}L40l(}f_k6| z?mm_YgV_^w^^%9Mu+EYFO8$4*hf_GB=q+r_*DtdVg<8Ker56d<2!Q=uKx6>zd zSSUk(FW(1$T|$Xjs3oj<*93L*&%!(I0nAsa$1gf7@Z7_LG7q-Zq1+W*b-q6aDxo?J^#W=79?zf0IksqVS{c zTX?W*AqhKHfe+`Ff%1+=csQNmf zJFH?GJQL`E1q~i3mi-EBL=%Xu)(_OkpFvUScK8(I3mj{|k|Kja5?6PKc--TH7ys<> zb>k^;_nLvVf!A^WaS2!*@d>U<_0#k&2l&C~1Uyk0r12_8%6Z#wAabY(Y-f~Ub>U-@ z$KFY-&pVNq;a`DuHx3+SO|WL0Fj#uVP+gx;IP&5V_LD$J?pY3La^WOCo|g=V+k