From 743a8fb2ead85de53a6f2465e9be3276aa7e70ae Mon Sep 17 00:00:00 2001 From: "xianyan.xianyanjia" Date: Wed, 28 Aug 2024 11:39:31 +0800 Subject: [PATCH 01/19] update code to 1.0.0 --- .gitignore | 2 + README.md | 58 +- README_CN.md | 54 +- chatlearn/__init__.py | 54 +- chatlearn/checkpoint/__init__.py | 2 +- chatlearn/checkpoint/checkpoint_manager.py | 16 +- chatlearn/data/__init__.py | 2 +- chatlearn/data/data.py | 12 +- chatlearn/data/ranking.py | 2 +- chatlearn/data/sampler.py | 54 +- chatlearn/data/storage.py | 2 +- chatlearn/hooks.py | 3 +- chatlearn/launcher/__init__.py | 2 +- chatlearn/launcher/dlc_utils.py | 5 +- chatlearn/launcher/initialize.py | 6 +- chatlearn/models/__init__.py | 4 +- .../models/{rlhf_module.py => base_module.py} | 437 +++++++--- chatlearn/models/deepspeed/__init__.py | 15 + chatlearn/models/deepspeed/deepspeed_utils.py | 195 +++++ chatlearn/models/deepspeed_module.py | 213 +++++ chatlearn/models/megatron/hooks/__init__.py | 4 +- chatlearn/models/megatron/hooks/generation.py | 7 +- .../models/megatron/hooks/transformer.py | 49 +- chatlearn/models/megatron/lora/__init__.py | 3 +- chatlearn/models/megatron/lora/initializer.py | 2 +- chatlearn/models/megatron/lora/layers.py | 279 +------ chatlearn/models/megatron/lora/utils.py | 2 +- .../megatron/memory_manager/__init__.py | 18 + .../models/megatron/memory_manager/base.py | 46 ++ .../megatron/memory_manager/base_trainer.py | 217 +++++ .../megatron/memory_manager/inference.py | 83 ++ .../megatron/memory_manager/trainer_v1v2.py | 320 ++++++++ .../megatron/memory_manager/trainer_v3.py | 283 +++++++ chatlearn/models/megatron/ops/__init__.py | 2 +- .../models/megatron/ops/policy_gradient.py | 12 +- chatlearn/models/megatron_module.py | 144 ++-- chatlearn/models/torch_module.py | 92 ++- chatlearn/models/vllm/__init__.py | 15 + chatlearn/models/vllm/hooks/__init__.py | 25 + chatlearn/models/vllm/hooks/llm_engine.py | 30 + .../models/vllm/hooks/logits_processor.py | 42 + chatlearn/models/vllm/hooks/sampler.py | 39 + chatlearn/models/vllm/hooks/worker.py | 77 ++ chatlearn/models/vllm/vllm_model.py | 70 ++ chatlearn/models/vllm_module.py | 672 +++++++++++++--- chatlearn/runtime/__init__.py | 2 +- chatlearn/runtime/decorator.py | 56 +- chatlearn/runtime/dist_actor.py | 103 ++- chatlearn/runtime/engine.py | 302 +++++-- chatlearn/runtime/environment.py | 191 ++--- chatlearn/runtime/evaluator.py | 136 ++-- chatlearn/runtime/executor.py | 227 ++++-- chatlearn/runtime/model_flow.py | 51 +- chatlearn/runtime/parameter_sync.py | 183 ++++- chatlearn/runtime/trainer.py | 45 +- chatlearn/runtime/utils.py | 152 ++++ chatlearn/schedule/__init__.py | 2 +- chatlearn/schedule/model_manager.py | 241 ++++-- chatlearn/schedule/port_manager.py | 2 +- chatlearn/schedule/resource_manager.py | 20 +- chatlearn/tools/megatron_checkpoint_utils.py | 30 +- chatlearn/tools/megatron_to_hf.py | 4 +- chatlearn/utils/__init__.py | 2 +- chatlearn/utils/arguments.py | 169 +++- chatlearn/utils/constant.py | 33 +- chatlearn/utils/dist_utils.py | 2 +- chatlearn/utils/error_monitor.py | 2 +- chatlearn/utils/flat_tensors.py | 287 +++++++ chatlearn/utils/future.py | 13 +- chatlearn/utils/global_vars.py | 2 +- chatlearn/utils/log_monitor.py | 114 ++- chatlearn/utils/logger.py | 2 +- chatlearn/utils/megatron_import_helper.py | 222 ++++++ .../utils/megatron_import_hook_helper.py | 31 + .../utils/megatron_import_memory_helper.py | 67 ++ .../megatron_import_transformer_helper.py | 29 + chatlearn/utils/megatron_utils.py | 20 +- chatlearn/utils/timer.py | 23 +- chatlearn/utils/utils.py | 50 +- chatlearn/utils/version.py | 4 +- chatlearn/utils/vllm_import_helper.py | 89 +++ chatlearn/utils/vllm_utils.py | 566 +++++++++---- docker/ngc/Dockerfile.ngc22.10 | 42 - docker/ngc/Dockerfile.ngc23.09 | 74 -- docker/torch/Dockerfile.torch2.3.0 | 35 + docs/en/advanced.rst | 23 +- docs/en/api/config.rst | 24 +- docs/en/api/engine.rst | 8 + docs/en/api/module.rst | 6 +- docs/en/chatlearn.md | 71 +- docs/en/conf.py | 2 +- docs/en/config_yaml.md | 16 +- docs/en/faq.md | 46 ++ docs/en/index.rst | 32 +- docs/en/installation.md | 9 +- docs/en/programming/dpo.md | 125 +++ docs/en/programming/online_dpo.md | 56 ++ .../{programming.md => programming/rlhf.md} | 46 +- docs/en/programming/vllm.md | 189 +++++ docs/en/tutorial/continue_train.md | 42 + docs/en/tutorial/custom_model_flow.md | 93 +++ docs/en/tutorial/data.md | 47 +- docs/en/tutorial/ems.md | 29 + docs/en/tutorial/evaluator.md | 21 + docs/en/tutorial/profile.md | 23 + docs/en/tutorial/run.md | 6 +- docs/en/tutorial/tutorial_bloom.md | 165 ---- docs/en/tutorial/tutorial_llama2.md | 227 ++++-- docs/en/tutorial/tutorial_qwen.md | 38 + docs/images/arch.jpg | Bin 80036 -> 0 bytes docs/images/arch.png | Bin 0 -> 240272 bytes docs/images/class.jpg | Bin 15274 -> 0 bytes docs/images/class.png | Bin 0 -> 80737 bytes docs/images/gpt-perf-66-175.png | Bin 33873 -> 0 bytes docs/images/gpt-perf-cmp.png | Bin 89145 -> 0 bytes docs/images/logo.jpg | Bin 0 -> 476058 bytes docs/images/logo.png | Bin 29555 -> 0 bytes docs/images/perf.png | Bin 0 -> 60887 bytes docs/zh/advanced.rst | 24 +- docs/zh/api/config.rst | 24 +- docs/zh/api/engine.rst | 8 + docs/zh/api/module.rst | 6 +- docs/zh/chatlearn.md | 60 +- docs/zh/conf.py | 2 +- docs/zh/config_yaml.md | 18 +- docs/zh/faq.md | 8 +- docs/zh/index.rst | 32 +- docs/zh/installation.md | 9 +- docs/zh/programming/dpo.md | 123 +++ docs/zh/programming/online_dpo.md | 54 ++ .../{programming.md => programming/rlhf.md} | 44 +- docs/zh/programming/vllm.md | 185 +++++ docs/zh/tutorial/continue_train.md | 8 +- docs/zh/tutorial/custom_model_flow.md | 37 +- docs/zh/tutorial/data.md | 64 +- docs/zh/tutorial/ems.md | 32 + docs/zh/tutorial/evaluator.md | 17 + docs/zh/tutorial/offload.md | 21 - docs/zh/tutorial/profile.md | 2 +- docs/zh/tutorial/run.md | 4 +- docs/zh/tutorial/tutorial_bloom.md | 162 ---- docs/zh/tutorial/tutorial_llama2.md | 230 ++++-- docs/zh/tutorial/tutorial_qwen.md | 46 ++ examples/{megatron/dataset => }/__init__.py | 0 examples/huggingface/configs/qwen2/base.yaml | 3 + examples/huggingface/configs/qwen2/dpo.yaml | 37 + .../configs/qwen2/policy_trainer.yaml | 10 + .../huggingface/configs/qwen2/reference.yaml | 17 + .../data/preprocess_data_chatml.py | 94 +++ examples/huggingface/data/reward_dataset.py | 194 +++++ examples/huggingface/entry/train_dpo.py | 44 + examples/huggingface/models/dpo/__init__.py | 0 .../huggingface/models/dpo/policy_trainer.py | 99 +++ .../huggingface/models/dpo/reference_model.py | 27 + examples/huggingface/models/dpo/utils.py | 119 +++ examples/huggingface/models/utils.py | 417 ++++++++++ .../scripts}/base_env.sh | 27 +- .../huggingface/scripts/train_dpo_qwen.sh | 24 + .../{step3_rlhf => }/configs/gpt/base.yaml | 10 +- .../configs/gpt/base_inference.yaml | 0 .../configs/gpt/base_train.yaml | 1 - .../configs/gpt/old_policy_inference.yaml | 1 - .../configs/gpt/old_value_inference.yaml | 0 .../configs/gpt/policy_shared.yaml | 0 .../configs/gpt/ppo_policy.yaml | 0 .../configs/gpt/ppo_value.yaml | 0 .../configs/gpt/reference.yaml | 0 .../configs/gpt/reward_inference.yaml | 4 +- .../configs/gpt/reward_shared.yaml | 2 +- .../{step3_rlhf => }/configs/gpt/rlhf.yaml | 37 +- .../configs/gpt/test_policy.yaml | 6 +- .../{step3_rlhf => }/configs/llama2/base.yaml | 22 +- .../configs/llama2/base_inference.yaml | 0 .../configs/llama2/base_train.yaml | 3 +- examples/megatron/configs/llama2/data.yaml | 1 + examples/megatron/configs/llama2/dpo.yaml | 46 ++ examples/megatron/configs/llama2/eval.yaml | 37 + .../megatron/configs/llama2/eval_vllm.yaml | 37 + .../configs/llama2/grpo_math_vllm.yaml | 67 ++ .../megatron/configs/llama2/math_reward.yaml | 5 + .../configs/llama2/old_policy_inference.yaml | 1 - .../configs/llama2/old_value_inference.yaml | 0 .../llama2/online_dpo.yaml} | 42 +- .../configs/llama2/online_dpo_vllm.yaml | 61 ++ .../configs/llama2/policy_shared.yaml | 1 + .../configs/llama2/ppo_policy.yaml | 8 +- .../configs/llama2/ppo_value.yaml | 3 +- .../configs/llama2/reference.yaml | 0 .../configs/llama2/reward_inference.yaml | 0 .../configs/llama2/reward_shared.yaml | 4 +- .../{step3_rlhf => }/configs/llama2/rlhf.yaml | 29 +- .../bloom => configs/llama2}/test_policy.yaml | 7 +- .../configs/llama2/test_reward.yaml | 7 +- .../configs/llama2/test_vllm_policy.yaml | 7 +- .../configs/llama2/vllm_policy_inference.yaml | 36 + .../configs/llama2/vllm_rlhf.yaml | 28 +- examples/megatron/data/__init__.py | 0 .../prepare_data_alignment.py} | 12 +- examples/megatron/data/prepare_data_math.py | 32 + .../prepare_data_reward.py} | 2 +- .../prepare_data_sft.py} | 2 +- examples/megatron/data/prompt_dataset.py | 170 ++++ .../{dataset => data}/reward_dataset.py | 4 +- .../megatron/{dataset => data}/sft_dataset.py | 6 +- examples/megatron/dataset/prompt_dataset.py | 88 -- examples/megatron/entry/train_dpo.py | 40 + examples/megatron/entry/train_grpo_math.py | 97 +++ examples/megatron/entry/train_online_dpo.py | 72 ++ .../train_reward.py} | 19 +- examples/megatron/entry/train_rlhf.py | 72 ++ .../finetune_sft.py => entry/train_sft.py} | 22 +- examples/megatron/models/__init__.py | 2 +- examples/megatron/models/base_trainer.py | 142 +++- .../models/{constants_ppo.py => constants.py} | 17 +- examples/megatron/models/forward_step.py | 55 +- .../megatron/models/old_policy_inference.py | 41 +- .../megatron/models/old_value_inference.py | 17 +- examples/megatron/models/policy_model.py | 277 +++++-- examples/megatron/models/policy_trainer.py | 465 ++++++++++- examples/megatron/models/reference.py | 266 +++++-- examples/megatron/models/reward_inference.py | 109 ++- examples/megatron/models/reward_math.py | 109 +++ examples/megatron/models/reward_model.py | 29 +- examples/megatron/models/rm_sys/__init__.py | 0 .../megatron/models/rm_sys/math_rule_rm.py | 65 ++ .../models/rm_sys/math_utils/__init__.py | 0 .../models/rm_sys/math_utils/grader.py | 339 ++++++++ .../models/rm_sys/math_utils/parser.py | 752 ++++++++++++++++++ examples/megatron/models/train_helper.py | 94 +++ examples/megatron/models/utils.py | 31 +- examples/megatron/models/value_model.py | 12 +- examples/megatron/models/value_trainer.py | 25 +- .../megatron/models/vllm_policy_inference.py | 188 ++--- examples/megatron/models/vllm_policy_model.py | 58 -- examples/megatron/scripts/base_env.sh | 128 +++ .../scripts/convert_hf_to_megatron.sh | 59 ++ .../scripts/convert_megatron_to_hf.sh | 42 + examples/megatron/scripts/train_dpo_llama.sh | 46 ++ .../megatron/scripts/train_grpo_math_llama.sh | 53 ++ .../scripts/train_online_dpo_llama.sh | 78 ++ .../train_reward_llama.sh} | 34 +- examples/megatron/scripts/train_rlhf_gpt.sh | 137 ++++ examples/megatron/scripts/train_rlhf_llama.sh | 110 +++ .../train_sft_llama.sh} | 33 +- examples/megatron/step1_sft/bloom_sft.sh | 134 ---- .../megatron/step2_reward/bloom_reward.sh | 135 ---- examples/megatron/step3_rlhf/.gitignore | 1 - .../step3_rlhf/configs/bloom/base.yaml | 59 -- .../configs/bloom/base_inference.yaml | 15 - .../step3_rlhf/configs/bloom/base_train.yaml | 10 - .../step3_rlhf/configs/bloom/eval.yaml | 27 - .../configs/bloom/old_policy_inference.yaml | 13 - .../configs/bloom/old_value_inference.yaml | 3 - .../configs/bloom/policy_shared.yaml | 8 - .../step3_rlhf/configs/bloom/ppo_policy.yaml | 29 - .../step3_rlhf/configs/bloom/ppo_value.yaml | 27 - .../step3_rlhf/configs/bloom/reference.yaml | 5 - .../configs/bloom/reward_inference.yaml | 6 - .../configs/bloom/reward_shared.yaml | 12 - .../step3_rlhf/configs/bloom/test_reward.yaml | 18 - .../step3_rlhf/configs/llama2/eval.yaml | 27 - .../configs/llama2/test_policy.yaml | 24 - .../configs/llama2/vllm_policy_inference.yaml | 37 - .../step3_rlhf/run_scripts/bloom/base_env.sh | 34 - .../run_scripts/bloom/run_1b1_1b1.sh | 40 - .../run_scripts/bloom/run_1b7_1b7.sh | 40 - .../run_scripts/bloom/run_7b1_7b1.sh | 40 - .../step3_rlhf/run_scripts/gpt/base_env.sh | 56 -- .../step3_rlhf/run_scripts/gpt/benchmark.sh | 20 - .../run_scripts/gpt/run_13b_13b_16g.sh | 52 -- .../run_scripts/gpt/run_175b_175b_128g.sh | 61 -- .../run_scripts/gpt/run_30b_30b_32g.sh | 55 -- .../run_scripts/gpt/run_66b_66b_128g.sh | 53 -- .../run_scripts/gpt/run_66b_66b_32g.sh | 41 - .../run_scripts/gpt/run_66b_66b_64g.sh | 61 -- .../run_scripts/gpt/run_7b_7b_16g.sh | 53 -- .../run_scripts/gpt/run_7b_7b_32g.sh | 53 -- .../run_scripts/gpt/run_7b_7b_8g.sh | 48 -- .../step3_rlhf/run_scripts/llama2/base_env.sh | 46 -- .../run_scripts/llama2/run_13b_13b.sh | 39 - .../run_scripts/llama2/run_70b_70b.sh | 56 -- .../run_scripts/llama2/run_7b_7b.sh | 42 - .../run_scripts/llama2/run_7b_7b_vllm.sh | 39 - .../step3_rlhf/tests/run_policy_generation.sh | 37 - .../tests/run_vllm_policy_generation.sh | 38 - .../tests/test_vllm_policy_generation.py | 57 -- examples/megatron/step3_rlhf/train_rlhf.py | 117 --- .../megatron/step3_rlhf/train_vllm_rlhf.py | 119 --- examples/megatron/tests/get_eval_reward.py | 66 ++ examples/megatron/tests/get_eval_reward.sh | 45 ++ .../megatron/tests/run_policy_generation.sh | 61 ++ .../tests/test_policy_generation.py | 38 +- .../{step3_rlhf => }/tests/test_reward.sh | 10 +- .../tests/test_reward_forward.py | 6 +- requirements.txt | 21 + setup.py | 2 +- tests/configs/exp.yaml | 8 +- tests/configs/rlhf.yaml | 16 +- tests/configs/rlhf2.yaml | 50 ++ tests/configs/rlhf_cpu.yaml | 55 ++ tests/configs/test_eval.yaml | 4 +- tests/configs/test_eval2.yaml | 25 + tests/launch_helper.sh | 25 - tests/run_tests.sh | 10 +- tests/test_args.py | 8 +- tests/test_data_dp.py | 195 +++++ tests/test_data_dp_zero.py | 217 +++++ tests/test_distactor.py | 4 +- tests/test_dynamic_data.py | 32 +- tests/test_evaluator.py | 17 +- tests/test_evaluator2.py | 20 +- tests/test_evaluator_multi.py | 86 ++ tests/test_fixed_data.py | 32 +- tests/test_flat_tensors.py | 84 ++ tests/test_indivisible_batchsz.py | 47 +- tests/test_placement.py | 10 +- tests/test_placement_colocate.py | 12 +- tests/test_placement_colocate2.py | 26 +- tests/test_placement_colocate3.py | 26 +- tests/test_placement_colocate4.py | 26 +- tests/test_relay_buffer.py | 36 +- tests/test_rlhf.py | 20 +- tests/test_rlhf_ckpt.py | 42 +- tests/test_rlhf_ckpt_replica.py | 44 +- tests/test_rlhf_colocate_forward_train.py | 62 ++ tests/test_rlhf_cpu.py | 50 ++ tests/test_rlhf_custom.py | 56 +- tests/test_rlhf_data_input.py | 76 ++ tests/test_rlhf_no_replica.py | 81 +- tests/test_rlhf_placement_colocate.py | 6 +- tests/test_rlhf_placement_colocate2.py | 23 +- tests/test_rlhf_replica.py | 82 +- tests/test_rlhf_replica2.py | 81 +- tests/test_sampler.py | 110 +++ tests/test_utils.py | 4 +- tests/utils.py | 20 +- 336 files changed, 14175 insertions(+), 5725 deletions(-) rename chatlearn/models/{rlhf_module.py => base_module.py} (58%) create mode 100644 chatlearn/models/deepspeed/__init__.py create mode 100644 chatlearn/models/deepspeed/deepspeed_utils.py create mode 100644 chatlearn/models/deepspeed_module.py create mode 100644 chatlearn/models/megatron/memory_manager/__init__.py create mode 100644 chatlearn/models/megatron/memory_manager/base.py create mode 100644 chatlearn/models/megatron/memory_manager/base_trainer.py create mode 100644 chatlearn/models/megatron/memory_manager/inference.py create mode 100644 chatlearn/models/megatron/memory_manager/trainer_v1v2.py create mode 100644 chatlearn/models/megatron/memory_manager/trainer_v3.py create mode 100644 chatlearn/models/vllm/__init__.py create mode 100644 chatlearn/models/vllm/hooks/__init__.py create mode 100644 chatlearn/models/vllm/hooks/llm_engine.py create mode 100644 chatlearn/models/vllm/hooks/logits_processor.py create mode 100644 chatlearn/models/vllm/hooks/sampler.py create mode 100644 chatlearn/models/vllm/hooks/worker.py create mode 100644 chatlearn/models/vllm/vllm_model.py create mode 100644 chatlearn/runtime/utils.py create mode 100644 chatlearn/utils/flat_tensors.py create mode 100644 chatlearn/utils/megatron_import_helper.py create mode 100644 chatlearn/utils/megatron_import_hook_helper.py create mode 100644 chatlearn/utils/megatron_import_memory_helper.py create mode 100644 chatlearn/utils/megatron_import_transformer_helper.py create mode 100644 chatlearn/utils/vllm_import_helper.py delete mode 100644 docker/ngc/Dockerfile.ngc22.10 delete mode 100644 docker/ngc/Dockerfile.ngc23.09 create mode 100644 docker/torch/Dockerfile.torch2.3.0 create mode 100644 docs/en/faq.md create mode 100644 docs/en/programming/dpo.md create mode 100644 docs/en/programming/online_dpo.md rename docs/en/{programming.md => programming/rlhf.md} (77%) create mode 100644 docs/en/programming/vllm.md create mode 100644 docs/en/tutorial/continue_train.md create mode 100644 docs/en/tutorial/custom_model_flow.md create mode 100644 docs/en/tutorial/ems.md create mode 100644 docs/en/tutorial/evaluator.md create mode 100644 docs/en/tutorial/profile.md delete mode 100644 docs/en/tutorial/tutorial_bloom.md create mode 100644 docs/en/tutorial/tutorial_qwen.md delete mode 100644 docs/images/arch.jpg create mode 100644 docs/images/arch.png delete mode 100644 docs/images/class.jpg create mode 100644 docs/images/class.png delete mode 100644 docs/images/gpt-perf-66-175.png delete mode 100644 docs/images/gpt-perf-cmp.png create mode 100644 docs/images/logo.jpg delete mode 100644 docs/images/logo.png create mode 100644 docs/images/perf.png create mode 100644 docs/zh/programming/dpo.md create mode 100644 docs/zh/programming/online_dpo.md rename docs/zh/{programming.md => programming/rlhf.md} (76%) create mode 100644 docs/zh/programming/vllm.md create mode 100644 docs/zh/tutorial/ems.md create mode 100644 docs/zh/tutorial/evaluator.md delete mode 100644 docs/zh/tutorial/offload.md delete mode 100644 docs/zh/tutorial/tutorial_bloom.md create mode 100644 docs/zh/tutorial/tutorial_qwen.md rename examples/{megatron/dataset => }/__init__.py (100%) create mode 100644 examples/huggingface/configs/qwen2/base.yaml create mode 100644 examples/huggingface/configs/qwen2/dpo.yaml create mode 100644 examples/huggingface/configs/qwen2/policy_trainer.yaml create mode 100644 examples/huggingface/configs/qwen2/reference.yaml create mode 100644 examples/huggingface/data/preprocess_data_chatml.py create mode 100644 examples/huggingface/data/reward_dataset.py create mode 100644 examples/huggingface/entry/train_dpo.py create mode 100644 examples/huggingface/models/dpo/__init__.py create mode 100644 examples/huggingface/models/dpo/policy_trainer.py create mode 100644 examples/huggingface/models/dpo/reference_model.py create mode 100644 examples/huggingface/models/dpo/utils.py create mode 100644 examples/huggingface/models/utils.py rename examples/{megatron/step3_rlhf/run_scripts => huggingface/scripts}/base_env.sh (58%) create mode 100644 examples/huggingface/scripts/train_dpo_qwen.sh rename examples/megatron/{step3_rlhf => }/configs/gpt/base.yaml (85%) rename examples/megatron/{step3_rlhf => }/configs/gpt/base_inference.yaml (100%) rename examples/megatron/{step3_rlhf => }/configs/gpt/base_train.yaml (89%) rename examples/megatron/{step3_rlhf => }/configs/gpt/old_policy_inference.yaml (86%) rename examples/megatron/{step3_rlhf => }/configs/gpt/old_value_inference.yaml (100%) rename examples/megatron/{step3_rlhf => }/configs/gpt/policy_shared.yaml (100%) rename examples/megatron/{step3_rlhf => }/configs/gpt/ppo_policy.yaml (100%) rename examples/megatron/{step3_rlhf => }/configs/gpt/ppo_value.yaml (100%) rename examples/megatron/{step3_rlhf => }/configs/gpt/reference.yaml (100%) rename examples/megatron/{step3_rlhf => }/configs/gpt/reward_inference.yaml (60%) rename examples/megatron/{step3_rlhf => }/configs/gpt/reward_shared.yaml (84%) rename examples/megatron/{step3_rlhf => }/configs/gpt/rlhf.yaml (76%) rename examples/megatron/{step3_rlhf => }/configs/gpt/test_policy.yaml (85%) rename examples/megatron/{step3_rlhf => }/configs/llama2/base.yaml (77%) rename examples/megatron/{step3_rlhf => }/configs/llama2/base_inference.yaml (100%) rename examples/megatron/{step3_rlhf => }/configs/llama2/base_train.yaml (80%) create mode 100644 examples/megatron/configs/llama2/data.yaml create mode 100644 examples/megatron/configs/llama2/dpo.yaml create mode 100644 examples/megatron/configs/llama2/eval.yaml create mode 100644 examples/megatron/configs/llama2/eval_vllm.yaml create mode 100644 examples/megatron/configs/llama2/grpo_math_vllm.yaml create mode 100644 examples/megatron/configs/llama2/math_reward.yaml rename examples/megatron/{step3_rlhf => }/configs/llama2/old_policy_inference.yaml (84%) rename examples/megatron/{step3_rlhf => }/configs/llama2/old_value_inference.yaml (100%) rename examples/megatron/{step3_rlhf/configs/bloom/rlhf.yaml => configs/llama2/online_dpo.yaml} (62%) create mode 100644 examples/megatron/configs/llama2/online_dpo_vllm.yaml rename examples/megatron/{step3_rlhf => }/configs/llama2/policy_shared.yaml (88%) rename examples/megatron/{step3_rlhf => }/configs/llama2/ppo_policy.yaml (70%) rename examples/megatron/{step3_rlhf => }/configs/llama2/ppo_value.yaml (88%) rename examples/megatron/{step3_rlhf => }/configs/llama2/reference.yaml (100%) rename examples/megatron/{step3_rlhf => }/configs/llama2/reward_inference.yaml (100%) rename examples/megatron/{step3_rlhf => }/configs/llama2/reward_shared.yaml (89%) rename examples/megatron/{step3_rlhf => }/configs/llama2/rlhf.yaml (71%) rename examples/megatron/{step3_rlhf/configs/bloom => configs/llama2}/test_policy.yaml (82%) rename examples/megatron/{step3_rlhf => }/configs/llama2/test_reward.yaml (70%) rename examples/megatron/{step3_rlhf => }/configs/llama2/test_vllm_policy.yaml (82%) create mode 100644 examples/megatron/configs/llama2/vllm_policy_inference.yaml rename examples/megatron/{step3_rlhf => }/configs/llama2/vllm_rlhf.yaml (70%) create mode 100644 examples/megatron/data/__init__.py rename examples/megatron/{step3_rlhf/prepare_data.py => data/prepare_data_alignment.py} (79%) create mode 100644 examples/megatron/data/prepare_data_math.py rename examples/megatron/{step2_reward/prepare_data.py => data/prepare_data_reward.py} (95%) rename examples/megatron/{step1_sft/prepare_data.py => data/prepare_data_sft.py} (94%) create mode 100644 examples/megatron/data/prompt_dataset.py rename examples/megatron/{dataset => data}/reward_dataset.py (98%) rename examples/megatron/{dataset => data}/sft_dataset.py (96%) delete mode 100644 examples/megatron/dataset/prompt_dataset.py create mode 100644 examples/megatron/entry/train_dpo.py create mode 100644 examples/megatron/entry/train_grpo_math.py create mode 100644 examples/megatron/entry/train_online_dpo.py rename examples/megatron/{step2_reward/finetune_reward.py => entry/train_reward.py} (92%) create mode 100644 examples/megatron/entry/train_rlhf.py rename examples/megatron/{step1_sft/finetune_sft.py => entry/train_sft.py} (84%) rename examples/megatron/models/{constants_ppo.py => constants.py} (90%) create mode 100644 examples/megatron/models/reward_math.py create mode 100644 examples/megatron/models/rm_sys/__init__.py create mode 100644 examples/megatron/models/rm_sys/math_rule_rm.py create mode 100644 examples/megatron/models/rm_sys/math_utils/__init__.py create mode 100644 examples/megatron/models/rm_sys/math_utils/grader.py create mode 100644 examples/megatron/models/rm_sys/math_utils/parser.py create mode 100644 examples/megatron/models/train_helper.py delete mode 100644 examples/megatron/models/vllm_policy_model.py create mode 100644 examples/megatron/scripts/base_env.sh create mode 100644 examples/megatron/scripts/convert_hf_to_megatron.sh create mode 100644 examples/megatron/scripts/convert_megatron_to_hf.sh create mode 100644 examples/megatron/scripts/train_dpo_llama.sh create mode 100644 examples/megatron/scripts/train_grpo_math_llama.sh create mode 100644 examples/megatron/scripts/train_online_dpo_llama.sh rename examples/megatron/{step2_reward/llama2_reward.sh => scripts/train_reward_llama.sh} (82%) create mode 100644 examples/megatron/scripts/train_rlhf_gpt.sh create mode 100644 examples/megatron/scripts/train_rlhf_llama.sh rename examples/megatron/{step1_sft/llama2_sft.sh => scripts/train_sft_llama.sh} (84%) delete mode 100644 examples/megatron/step1_sft/bloom_sft.sh delete mode 100644 examples/megatron/step2_reward/bloom_reward.sh delete mode 100644 examples/megatron/step3_rlhf/.gitignore delete mode 100644 examples/megatron/step3_rlhf/configs/bloom/base.yaml delete mode 100644 examples/megatron/step3_rlhf/configs/bloom/base_inference.yaml delete mode 100644 examples/megatron/step3_rlhf/configs/bloom/base_train.yaml delete mode 100644 examples/megatron/step3_rlhf/configs/bloom/eval.yaml delete mode 100644 examples/megatron/step3_rlhf/configs/bloom/old_policy_inference.yaml delete mode 100644 examples/megatron/step3_rlhf/configs/bloom/old_value_inference.yaml delete mode 100644 examples/megatron/step3_rlhf/configs/bloom/policy_shared.yaml delete mode 100644 examples/megatron/step3_rlhf/configs/bloom/ppo_policy.yaml delete mode 100644 examples/megatron/step3_rlhf/configs/bloom/ppo_value.yaml delete mode 100644 examples/megatron/step3_rlhf/configs/bloom/reference.yaml delete mode 100644 examples/megatron/step3_rlhf/configs/bloom/reward_inference.yaml delete mode 100644 examples/megatron/step3_rlhf/configs/bloom/reward_shared.yaml delete mode 100644 examples/megatron/step3_rlhf/configs/bloom/test_reward.yaml delete mode 100644 examples/megatron/step3_rlhf/configs/llama2/eval.yaml delete mode 100644 examples/megatron/step3_rlhf/configs/llama2/test_policy.yaml delete mode 100644 examples/megatron/step3_rlhf/configs/llama2/vllm_policy_inference.yaml delete mode 100644 examples/megatron/step3_rlhf/run_scripts/bloom/base_env.sh delete mode 100644 examples/megatron/step3_rlhf/run_scripts/bloom/run_1b1_1b1.sh delete mode 100644 examples/megatron/step3_rlhf/run_scripts/bloom/run_1b7_1b7.sh delete mode 100644 examples/megatron/step3_rlhf/run_scripts/bloom/run_7b1_7b1.sh delete mode 100644 examples/megatron/step3_rlhf/run_scripts/gpt/base_env.sh delete mode 100644 examples/megatron/step3_rlhf/run_scripts/gpt/benchmark.sh delete mode 100644 examples/megatron/step3_rlhf/run_scripts/gpt/run_13b_13b_16g.sh delete mode 100644 examples/megatron/step3_rlhf/run_scripts/gpt/run_175b_175b_128g.sh delete mode 100644 examples/megatron/step3_rlhf/run_scripts/gpt/run_30b_30b_32g.sh delete mode 100644 examples/megatron/step3_rlhf/run_scripts/gpt/run_66b_66b_128g.sh delete mode 100644 examples/megatron/step3_rlhf/run_scripts/gpt/run_66b_66b_32g.sh delete mode 100644 examples/megatron/step3_rlhf/run_scripts/gpt/run_66b_66b_64g.sh delete mode 100644 examples/megatron/step3_rlhf/run_scripts/gpt/run_7b_7b_16g.sh delete mode 100644 examples/megatron/step3_rlhf/run_scripts/gpt/run_7b_7b_32g.sh delete mode 100644 examples/megatron/step3_rlhf/run_scripts/gpt/run_7b_7b_8g.sh delete mode 100644 examples/megatron/step3_rlhf/run_scripts/llama2/base_env.sh delete mode 100644 examples/megatron/step3_rlhf/run_scripts/llama2/run_13b_13b.sh delete mode 100644 examples/megatron/step3_rlhf/run_scripts/llama2/run_70b_70b.sh delete mode 100644 examples/megatron/step3_rlhf/run_scripts/llama2/run_7b_7b.sh delete mode 100644 examples/megatron/step3_rlhf/run_scripts/llama2/run_7b_7b_vllm.sh delete mode 100644 examples/megatron/step3_rlhf/tests/run_policy_generation.sh delete mode 100644 examples/megatron/step3_rlhf/tests/run_vllm_policy_generation.sh delete mode 100644 examples/megatron/step3_rlhf/tests/test_vllm_policy_generation.py delete mode 100644 examples/megatron/step3_rlhf/train_rlhf.py delete mode 100644 examples/megatron/step3_rlhf/train_vllm_rlhf.py create mode 100644 examples/megatron/tests/get_eval_reward.py create mode 100644 examples/megatron/tests/get_eval_reward.sh create mode 100644 examples/megatron/tests/run_policy_generation.sh rename examples/megatron/{step3_rlhf => }/tests/test_policy_generation.py (59%) rename examples/megatron/{step3_rlhf => }/tests/test_reward.sh (74%) rename examples/megatron/{step3_rlhf => }/tests/test_reward_forward.py (90%) create mode 100644 requirements.txt create mode 100644 tests/configs/rlhf2.yaml create mode 100644 tests/configs/rlhf_cpu.yaml create mode 100644 tests/configs/test_eval2.yaml delete mode 100644 tests/launch_helper.sh create mode 100644 tests/test_data_dp.py create mode 100644 tests/test_data_dp_zero.py create mode 100644 tests/test_evaluator_multi.py create mode 100644 tests/test_flat_tensors.py create mode 100644 tests/test_rlhf_colocate_forward_train.py create mode 100644 tests/test_rlhf_cpu.py create mode 100644 tests/test_rlhf_data_input.py create mode 100644 tests/test_sampler.py diff --git a/.gitignore b/.gitignore index d713302b..e5255116 100644 --- a/.gitignore +++ b/.gitignore @@ -1,3 +1,5 @@ +examples/megatron/step3_rlhf/None +.ipynb_checkpoints/ output/ build/ dist/ diff --git a/README.md b/README.md index aab532c0..73e1ae8e 100644 --- a/README.md +++ b/README.md @@ -4,12 +4,12 @@

- ChatLearn + ChatLearn

-A flexible and efficient training framework for large-scale RLHF +A flexible and efficient training framework for large-scale alignment

@@ -20,22 +20,22 @@ A flexible and efficient training framework for large-scale RLHF --- *Latest News* 🔥 -- [2023/10] We officially released ChatLearn! Check out our [documentation](docs/en/chatlearn.md). +- [2024/8] We officially released ChatLearn! Check out our [documentation](docs/en/chatlearn.md). --- -ChatLearn is a flexible and efficient training framework for large-scale RLHF. +ChatLearn is a flexible and efficient training framework for large-scale alignment. ![RLHF Flow](docs/images/rlhf.png) Chatlearn has the following advantages: 1. **User-friendly programming interface**: Users can focus on programming individual models by wrapping a few functions, while the system takes care of resource scheduling, data and control flow transmission, and distributed execution. -2. **Multiple distributed acceleration backends**: Users can use different computation backends for model development, such as Megatron-LM and DeepSpeed. -3. **Hybrid parallel strategies**: Various parallel strategies can be employed, including Data Parallel, Tensor Parallel, Sequence Parallel, Pipeline Parallel, ZeRO, and the combination thereof. -4. **Flexible resource allocation**: ChatLearn supports a flexible resource scheduling mechanism, allowing for exclusive or shared resource allocation among different models. It utilizes system scheduling strategies to enable efficient sequential or parallel execution. -5. **High performance**: Compared to the current state-of-the-art systems, ChatLearn achieves a 48%-82% improvement in performance from 7B to 30B scales. Additionally, ChatLearn supports even larger-scale RLHF training, such as 175B Policy + 175B Reward. +2. **Highly Scalable Training Methodology**: ChatLearn offers alignment training such as RLHF, DPO, OnlineDPO and GRPO, while also supporting user-defined execution flows for models, enabling a highly convenient and customizable training process. +3. **Diverse Distributed Acceleration Engines**: Users can leverage various computational backends for model construction, such as Megatron-LM, DeepSpeed, vLLM, and others. For instance, we can use Megatron-LM for training and vLLM to expedite inference. +4. **Flexible Parallel Strategies and Resource Allocation**: ChatLearn supports different parallel strategies for various model configurations, enabling the formulation of distinct parallel approaches tailored to each model's computational, memory, and communication characteristics. Additionally, ChatLearn features a flexible resource scheduling mechanism that accommodates exclusive or shared use of resources across models. Through its system scheduling policies, it facilitates efficient serial/parallel execution and optimized GPU memory sharing, enhancing overall performance and efficiency. +5. **High performance**: Compared to current state-of-the-art (SOTA) systems, ChatLearn achieves a 52% performance improvement at the 7B+7B(Policy+Reward) scale and a 137% improvement at the 70B+70B scale. Meanwhile, ChatLearn supports larger-scale alignment training, such as 300B+300B. -By providing a comprehensive and efficient framework, ChatLearn empowers researchers and practitioners to train large-scale RLHF models with ease, scalability, and improved performance. +By providing a comprehensive and efficient framework, ChatLearn empowers researchers and practitioners to train large-scale alignment models with ease, scalability, and improved performance. # Quick Start @@ -43,42 +43,30 @@ Please refer to the [documentation](https://chatlearn.readthedocs.io/en/latest/) 1. [Environment and Code Setup](docs/en/installation.md) 2. [End-to-End Training Tutorial with LLaMA/LLaMA2 Model](docs/en/tutorial/tutorial_llama2.md) -3. [End-to-End Training Tutorial with BLOOM Model](docs/en/tutorial/tutorial_bloom.md) -# Supported Models - -The current ChatLearn framework supports RLHF training for GPT/LLaMA models of any scale. - -| Model Type | -|:--------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------| -| GPT (various scales of GPT models) | -| LLaMA (`lmsys/vicuna-13b-v1.3`, `decapoda-research/llama-7b-hf`, `decapoda-research/llama-13b-hf`, `decapoda-research/llama-30b-hf`, `decapoda-research/llama-65b-hf`, etc.) | -| LLaMA2 (`meta-llama/Llama-2-7b-hf`, `meta-llama/Llama-2-13b-hf`, `meta-llama/Llama-2-70b-hf`) | -| Baichuan (`baichuan-inc/Baichuan-7B`, `baichuan-inc/Baichuan-13B-Base`) | -| BLOOM (`bigscience/bloom-1b1`, `bigscience/bloom-7b1`, `bigscience/bloom`) | # Performance -We have compared the RLHF training throughput of models with different parameter sizes. We adopt an N+N model configuration, where the Policy model and Reward model have the same parameter size. The tests are performed on A800-80GB GPUs, with a single node configuration of 8 GPU cards and 800Gb RDMA interconnect between nodes. We have compared ChatLearn with DeepSpeed-Chat for model configurations ranging from 7B to 66B, with LoRA disabled/enabled. ChatLearn achieves a speedup of 48% to 82% across different scales. In larger scales, under the configuration of 30B+30B with 32 GPUs, DeepSpeed-Chat encounters OOM errors without LoRA enabled. Under the configuration of 66B+66B with 32 GPUs, DeepSpeed-Chat encounters OOM errors regardless of LoRA being enabled or not. ChatLearn, on the other hand, supports training of larger model configurations on the same machine scale. DeepSpeed-Chat encounters a kernel error when seq_len=2048. - -![Compare ChatLearn with DeepSpeed-Chat](docs/images/gpt-perf-cmp.png) +We compared the RLHF training throughput of models with different parameter scales, adopting an N+N model configuration where both the Policy model and the Reward model have the same number of parameters. We benchmarked against DeepSpeed-Chat and OpenRLHF with 7B and 70B model configurations. For the 8 GPU setup with a 7B+7B scale, we achieved a 115% speedup; for the 32 GPU setup with a 70B+70B scale, the speedup was 208%. The larger the scale, the more pronounced the acceleration effect becomes. Additionally, ChatLearn can support even larger-scale alignment training, such as at a 300B+300B scale. -In addition, we evaluate the performance under larger scales and different sequence length configurations. -The following graphs show the RLHF training performance for 66B+66B and 175B+175B. - -![ChatLearn 66B 175B](docs/images/gpt-perf-66-175.png) +

+ + compare perf + +

-Note: The current performance benchmark is based on the GPT series models. +Note: The performance of DeepSpeed-Chat and OpenRLHF has already been optimized. # Roadmap -ChatLearn will support the following features in the future: -- [ ] Support for more models -- [ ] Support for efficient inference engines such as vLLM + +The upcoming features for ChatLearn include: +- [ ] Support models with Megatron-Core format +- [ ] Support the alignment training for MoE (Mixture of Experts) models - [ ] Integration with DeepSpeed as a training backend -- [ ] Automatic parallel strategy optimization -- [ ] Support for more RL algorithms +- [ ] Support for more models +- [ ] Performance Optimization +- [ ] Support for more alignment algorithms

We welcome community partners to collaborate and contribute to the development. - diff --git a/README_CN.md b/README_CN.md index 3a37f1ca..3a9bbd7e 100644 --- a/README_CN.md +++ b/README_CN.md @@ -4,12 +4,12 @@

- ChatLearn + ChatLearn

-灵活易用、大规模 RLHF 高效训练框架 +灵活、易用、高效的大规模 Alignmant 训练框架

English  |  中文  @@ -18,21 +18,20 @@ --- *最新进展* 🔥 -- [2023/10] 正式开源 ChatLearn,更多介绍请参考我们的 [文档](docs/zh/chatlearn.md)。 +- [2024/8] 正式开源 ChatLearn,更多介绍请参考我们的 [文档](docs/zh/chatlearn.md)。 --- -ChatLearn 是一个灵活易用、大规模 RLHF 高效训练框架。ChatLearn 通过对模型计算逻辑的抽象,解耦了模型和计算 backend、分布式策略的绑定,提供灵活的资源调度机制,可以支持灵活的资源分配和并行调度策略。 +ChatLearn 是一个灵活、易用、高效的大规模 Alignment 训练框架。ChatLearn 通过对模型计算逻辑的抽象,解耦了模型和计算 backend、分布式策略的绑定,提供灵活的资源调度机制,可以支持灵活的资源分配和并行调度策略。 ![RLHF Flow](docs/images/rlhf.png) ChatLearn的特点如下: 1. **易用的编程接口**: ChatLearn提供通用的编程抽象,用户只需要封装几个函数即可完成模型构造。用户只需要专注于单模型的编程,系统负责资源调度、数据流传输、控制流传输、分布式执行等。 -2. **多种分布式加速引擎**: 用户可以使用不同的计算 backend 进行模型建模,如 Megatron-LM、DeepSpeed 等。 -3. **Hybrid 并行策略**: ChatLearn 支持各种并行策略组合:Data Parallel/Tensor Parallel/Sequence Parallel/Pipeline Parallel/ZeRO 及其组合。 -4. **灵活的资源分配**: ChatLearn 支持灵活的资源调度机制,支持各模型的资源独占或复用,通过系统调度策略支持高效的串行/并行执行。 -5. **高性能**: 相较于当前的 SOTA 系统,ChatLearn 在 7B 到 30 B 规模提升 48%-82%。同时,ChatLearn 支持更大规模的 RLHF 训练 (175B Policy + 175B Reward)。 - +2. **高可扩展的训练方式**: ChatLearn 提供 RLHF、DPO、OnlineDPO、GRPO 等 Alignment 训练,同时也支持用户自定义 model 的执行 flow,使定制化训练流程变得非常便捷。 +3. **多种分布式加速引擎**: 用户可以使用不同的计算 backend 进行模型建模,如 Megatron-LM、DeepSpeed、vLLM 等。用户也可以组合使用不同的 backend,如用 Megatron-LM 来进行加速训练,用 vLLM 来加速推理。 +4. **灵活的并行策略和资源分配**: ChatLearn 支持不同模型配置不同的并行策略,可以结合各模型计算、显存、通信的特点来制定不同的并行策略。同时 ChatLearn 支持灵活的资源调度机制,支持各模型的资源独占或复用,通过系统调度策略支持高效的串行/并行执行和高效的显存共享。 +5. **高性能**: 相较于当前的 SOTA 系统,ChatLearn 在 7B+7B (Policy+Reward) 规模性能提升52%,70B+70B 规模性能提升 137%。同时,ChatLearn 支持更大规模的 Alignment 训练,例如:300B+300B。 # 快速开始 @@ -40,43 +39,30 @@ ChatLearn的特点如下: 1. [环境和代码准备](docs/zh/installation.md) 2. [基于 LLaMA/LLaMA2 模型的端到端训练教程](docs/zh/tutorial/tutorial_llama2.md) -3. [基于 BLOOM 模型的端到端训练教程](docs/zh/tutorial/tutorial_bloom.md) - -# 支持的模型 - -当前 ChatLearn 框架支持任意规模的 GPT/LLaMA 模型 RLHF 训练。 - -| 模型类型 | -|:-----------------------------------------------------------------------------------------------------------------------------------------------------------------------------| -| GPT (GPT 系列各种规模的模型) | -| LLaMA (`lmsys/vicuna-13b-v1.3`, `decapoda-research/llama-7b-hf`, `decapoda-research/llama-13b-hf`, `decapoda-research/llama-30b-hf`, `decapoda-research/llama-65b-hf`, etc.) | -| LLaMA2 (`meta-llama/Llama-2-7b-hf`, `meta-llama/Llama-2-13b-hf`, `meta-llama/Llama-2-70b-hf`) | -| Baichuan (`baichuan-inc/Baichuan-7B`, `baichuan-inc/Baichuan-13B-Base`) | -| BLOOM (`bigscience/bloom-1b1`, `bigscience/bloom-7b1`, `bigscience/bloom`) | # 性能评估 -我们比较了不同参数量规模模型的 RLHF 训练吞吐量,我们采取 N+N 的模型配置,即 Policy 模型和 Reward 模型采用相同大小的参数量。测试基于 A800-80GB GPU 进行,单节点配置 8 卡 GPU,节点间采用 800Gb RDMA 互联。我们和 DeepSpeed-Chat 对比了从 7B 到 66B 的模型配置,关闭/开启 LoRA 后的性能对比,ChatLearn 在不同规模有 48% 到 82% 的加速,在更大的规模下,在 30B+30B,32GPUs 的配置下,不开启 LoRA 的情况下,DeepSpeed-chat 出现 OOM,在 66B+66B,32GPUs 的配置下,DeepSpeed-Chat 无论是否开启 LoRA 均会出现 OOM,ChatLearn 在相同机器规模下,可以支持更大的模型配置训练。在 seq_len=2048 时,DeepSpeed-Chat 出现了 kernel error。 +我们比较了不同参数量规模模型的 RLHF 训练吞吐量,我们采取 N+N 的模型配置,即 Policy 模型和 Reward 模型采用相同大小的参数量。我们和 DeepSpeed-Chat、OpenRLHF 对比了 7B 和 70B 的模型配置,在 8 GPUs 7B+7B 规模,有 115% 的加速,在 32 GPUs 70B+70B 规模,有 208% 的加速。规模越大,加速效果越明显。同时ChatLearn还能支持更大规模的 Alignment 训练,例如:300B+300B 规模。 -![Compare ChatLearn with DeepSpeed-Chat](docs/images/gpt-perf-cmp.png) - -同时,我们评估了在更大规模以及不同 sequence length 配置下的性能。下图分别为 66B+66B,175B+175B 的 RLHF 训练性能。 - -![ChatLearn 66B 175B](docs/images/gpt-perf-66-175.png) +

+ + compare perf + +

-注:当前的性能 benchmark 均基于 GPT 系列模型。 +注:DeepSpeed-Chat和OpenRLHF性能已经优化过。 # Roadmap ChatLearn 接下来会支持以下特性: -- [ ] 支持更多的模型; -- [ ] 支持 vLLM 等高效推理引擎; +- [ ] 支持Megatron-Core格式模型; +- [ ] 支持MoE模型Alignment训练; - [ ] 接入 DeepSpeed 作为训练 backend; -- [ ] 自动并行策略调优; -- [ ] 支持更多的 RL 算法; +- [ ] 支持更多的模型; +- [ ] 性能优化; +- [ ] 支持更多的 Alignment 算法;

我们欢迎社区小伙伴参与进来合作开发。 - diff --git a/chatlearn/__init__.py b/chatlearn/__init__.py index 0714ea6d..2769bd98 100644 --- a/chatlearn/__init__.py +++ b/chatlearn/__init__.py @@ -1,4 +1,4 @@ -# Copyright 2023 Alibaba Group Holding Limited. All Rights Reserved. +# Copyright 2024 Alibaba Group Holding Limited. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. @@ -18,18 +18,62 @@ from chatlearn import hooks from chatlearn.launcher.initialize import init -from chatlearn.models.megatron_module import RLHFMegatronModule -from chatlearn.models.rlhf_module import RLHFModule -from chatlearn.models.torch_module import RLHFTorchModule +from chatlearn.models.base_module import BaseModule +from chatlearn.models.deepspeed_module import DeepSpeedModule +from chatlearn.models.megatron_module import MegatronModule +from chatlearn.models.torch_module import TorchModule +from chatlearn.runtime.engine import DPOEngine from chatlearn.runtime.engine import Engine from chatlearn.runtime.engine import Environment from chatlearn.runtime.engine import EvalEngine +from chatlearn.runtime.engine import OnlineDPOEngine +from chatlearn.runtime.engine import GRPOEngine +from chatlearn.runtime.engine import GRPOMathEngine from chatlearn.runtime.engine import RLHFEngine from chatlearn.runtime.engine import Trainer from chatlearn.runtime.evaluator import Evaluator from chatlearn.utils.future import get from chatlearn.utils.global_vars import get_args +from chatlearn.utils.logger import logger vllm_exist = importlib.util.find_spec("vllm") if vllm_exist: - from chatlearn.models.vllm_module import RLHFVLLMModule + import vllm + from chatlearn.utils.constant import CURRENT_VLLM_VERSION, VLLMVersion # pylint: disable=ungrouped-imports + if CURRENT_VLLM_VERSION in [version.value for version in VLLMVersion]: + from chatlearn.models.vllm_module import VLLMModule + + # for compatibility, remove later + class RLHFVLLMModule(VLLMModule): + """RLHFVLLMModule is deprecated, please use VLLMModule""" + + def __init__(self, *args, **kwargs): + super().__init__(*args, **kwargs) + logger.warning("RLHFVLLMModule is deprecated, please use VLLMModule") + + +# for compatibility, remove later +class RLHFModule(BaseModule): + """RLHFModule is deprecated, please use BaseModule""" + + def __init__(self, *args, **kwargs): + super().__init__(*args, **kwargs) + logger.warning("RLHFModule is deprecated, please use BaseModule") + + +# for compatibility, remove later +class RLHFTorchModule(TorchModule): + """RLHFTorchModule is deprecated, please use TorchModule""" + + def __init__(self, *args, **kwargs): + super().__init__(*args, **kwargs) + logger.warning("RLHFTorchModule is deprecated, please use TorchModule") + + +# for compatibility, remove later +class RLHFMegatronModule(MegatronModule): + """RLHFMegatronModule is deprecated, please use MegatronModule""" + + def __init__(self, *args, **kwargs): + super().__init__(*args, **kwargs) + logger.warning("RLHFMegatronModule is deprecated, please use MegatronModule") diff --git a/chatlearn/checkpoint/__init__.py b/chatlearn/checkpoint/__init__.py index 028ddcd3..df6408db 100644 --- a/chatlearn/checkpoint/__init__.py +++ b/chatlearn/checkpoint/__init__.py @@ -1,4 +1,4 @@ -# Copyright 2023 Alibaba Group Holding Limited. All Rights Reserved. +# Copyright 2024 Alibaba Group Holding Limited. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. diff --git a/chatlearn/checkpoint/checkpoint_manager.py b/chatlearn/checkpoint/checkpoint_manager.py index ddd03e74..fbb9a4a6 100644 --- a/chatlearn/checkpoint/checkpoint_manager.py +++ b/chatlearn/checkpoint/checkpoint_manager.py @@ -1,4 +1,4 @@ -# Copyright 2023 Alibaba Group Holding Limited. All Rights Reserved. +# Copyright 2024 Alibaba Group Holding Limited. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. @@ -82,15 +82,15 @@ def _get_path(fn): meta_data = {"episode": episode, "train_iteration": train_iter, "consumed_samples": consumed_samples, - "sample_per_episode": self._model.rlhf_args.sample_per_episode} + "sample_per_episode": self._model.runtime_args.sample_per_episode} with open(_get_path("meta.pkl"), 'wb') as f: pickle.dump(meta_data, f) - - self._set_latest_iteration(train_iter) - # only reserve max nums of ckpt folders if needed - if isinstance(self._max_ckpt_nums, int): - self._delete_ckpt_files() + if replica_id == 0: + self._set_latest_iteration(train_iter) + # only reserve max nums of ckpt folders if needed + if isinstance(self._max_ckpt_nums, int): + self._delete_ckpt_files() log_rank_0("Checkpointing is done.") return True @@ -133,7 +133,7 @@ def resume(self): return self._meta meta = self.resume_meta() if meta is not None: - self._model.rlhf_args.consumed_samples = meta["consumed_samples"] + self._model.runtime_args.consumed_samples = meta["consumed_samples"] log_rank_0(f"set consumed_samples to {meta['consumed_samples']}") self._resumed = True return meta diff --git a/chatlearn/data/__init__.py b/chatlearn/data/__init__.py index 028ddcd3..df6408db 100644 --- a/chatlearn/data/__init__.py +++ b/chatlearn/data/__init__.py @@ -1,4 +1,4 @@ -# Copyright 2023 Alibaba Group Holding Limited. All Rights Reserved. +# Copyright 2024 Alibaba Group Holding Limited. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. diff --git a/chatlearn/data/data.py b/chatlearn/data/data.py index 8095ee31..536b5d17 100644 --- a/chatlearn/data/data.py +++ b/chatlearn/data/data.py @@ -1,4 +1,4 @@ -# Copyright 2023 Alibaba Group Holding Limited. All Rights Reserved. +# Copyright 2024 Alibaba Group Holding Limited. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. @@ -98,10 +98,10 @@ def split_batch(batch): class StreamDataset: """dataset built from queues""" - def __init__(self, data_loader_type, batch_size, padding_config=None, max_relay_episode=1): + def __init__(self, data_loader_type, batch_size, padding_config=None, max_relay_episode=0, relay_episode_offset=0): """ Args: - data_loader_type: fixed/dynamic/relay + data_loader_type: fixed or dynamic """ if data_loader_type == "fixed": self._dynamic_dataset = False @@ -114,9 +114,9 @@ def __init__(self, data_loader_type, batch_size, padding_config=None, max_relay_ if max_relay_episode < 0: max_relay_episode = math.inf self._max_relay_episode = max_relay_episode + self._relay_episode_offset = relay_episode_offset self._episode_relay_buffers = [] - def shuffle(self): """ shuffle relay buffer @@ -197,12 +197,12 @@ def has_next(self): def set_dataset(self, queue, episode_id, relay_sample_fn=None, sample_per_episode=-1): relay_buffer = EpisodeRelayBuffer(episode_id, queue=queue) - if self._max_relay_episode > 1: + if self._max_relay_episode > 0 and episode_id >= self._relay_episode_offset: self._episode_relay_buffers.append(relay_buffer) if len(self._episode_relay_buffers) > self._max_relay_episode: old_buffer = self._episode_relay_buffers.pop(0) del old_buffer - if self._max_relay_episode > 1: + # this function will sync until all data computing finished, # which will block training until environment rollout finished. relay_buffer.sync() diff --git a/chatlearn/data/ranking.py b/chatlearn/data/ranking.py index 3501babd..dd25d5b2 100644 --- a/chatlearn/data/ranking.py +++ b/chatlearn/data/ranking.py @@ -1,4 +1,4 @@ -# Copyright 2023 Alibaba Group Holding Limited. All Rights Reserved. +# Copyright 2024 Alibaba Group Holding Limited. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. diff --git a/chatlearn/data/sampler.py b/chatlearn/data/sampler.py index 5d8040b4..59e62fe6 100644 --- a/chatlearn/data/sampler.py +++ b/chatlearn/data/sampler.py @@ -1,4 +1,4 @@ -# Copyright 2023 Alibaba Group Holding Limited. All Rights Reserved. +# Copyright 2024 Alibaba Group Holding Limited. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. @@ -21,14 +21,16 @@ class SingleDataSampler: """SingleDataSampler""" def __init__(self, total_samples, consumed_samples, micro_batch_size, - data_parallel_rank, data_parallel_size, drop_last=False): + data_parallel_rank, data_parallel_size, dynamic_batch_size_flag=False, drop_last=False): # Keep a copy of input params for later use. self.total_samples = total_samples self.consumed_samples = consumed_samples self.micro_batch_size = micro_batch_size self.data_parallel_rank = data_parallel_rank + self.remainder = (total_samples - consumed_samples) % data_parallel_size \ + if dynamic_batch_size_flag else 0 self.micro_batch_times_data_parallel_size = \ - self.micro_batch_size * data_parallel_size + self.micro_batch_size * data_parallel_size + self.remainder self.drop_last = drop_last self.data_parallel_size = data_parallel_size @@ -48,8 +50,11 @@ def __len__(self): return self.total_samples def get_start_end_idx(self): - start_idx = self.data_parallel_rank * self.micro_batch_size - end_idx = start_idx + self.micro_batch_size + start_batch_size_plus = self.data_parallel_rank if self.data_parallel_rank < self.remainder else self.remainder + start_idx = self.data_parallel_rank * self.micro_batch_size + start_batch_size_plus + batch_size_plus = 1 if self.data_parallel_rank < self.remainder else 0 + batch_size = self.micro_batch_size + batch_size_plus + end_idx = start_idx + batch_size return start_idx, end_idx def __iter__(self): @@ -103,32 +108,33 @@ def get_start_end_idx(self, batch): indices = utils.split_index(len(batch), self.data_parallel_size) return indices[self.data_parallel_rank] - def __iter__(self): - batch = [] - # Last batch will be dropped if drop_last is not set False + def iter_internal(self, batch): # for cycle purpose + if self.consumed_samples >= self.total_samples: + self.consumed_samples = self.consumed_samples % self.total_samples for idx in chain(range(self.consumed_samples, self.total_samples), range(self.consumed_samples)): batch.append(idx) self.episode_offset += 1 + self.consumed_samples += 1 if len(batch) == self.micro_batch_times_data_parallel_size or \ - self.episode_offset == self.sample_per_episode: + self.episode_offset == self.sample_per_episode: + return True + return False + + def __iter__(self): + batch = [] + while True: + # Last batch will be dropped if drop_last is set True + batch_gen_flag = self.iter_internal(batch) + # Check the last partial batch and see drop_last is set + if len(batch) > 0 and not self.drop_last and not batch_gen_flag: + # wrap it to sample_per_episode + batch_gen_flag = self.iter_internal(batch) + + if batch_gen_flag: start_idx, end_idx = self.get_start_end_idx(batch) yield batch[start_idx:end_idx] batch = [] + if self.episode_offset == self.sample_per_episode: self.episode_offset = 0 - - # Check the last partial batch and see drop_last is set - if len(batch) > 0 and not self.drop_last: - # wrap it to sample_per_episode - while self.episode_offset < self.sample_per_episode: - for idx in range(self.sample_per_episode - self.episode_offset): - batch.append(idx) - self.episode_offset += 1 - if len(batch) == self.micro_batch_times_data_parallel_size or \ - self.episode_offset == self.sample_per_episode: - start_idx, end_idx = self.get_start_end_idx(batch) - yield batch[start_idx:end_idx] - batch = [] - if self.episode_offset == self.sample_per_episode: - self.episode_offset = 0 diff --git a/chatlearn/data/storage.py b/chatlearn/data/storage.py index 7308f593..3d12dc0c 100644 --- a/chatlearn/data/storage.py +++ b/chatlearn/data/storage.py @@ -1,4 +1,4 @@ -# Copyright 2023 Alibaba Group Holding Limited. All Rights Reserved. +# Copyright 2024 Alibaba Group Holding Limited. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. diff --git a/chatlearn/hooks.py b/chatlearn/hooks.py index 0cd95643..ee1070bc 100644 --- a/chatlearn/hooks.py +++ b/chatlearn/hooks.py @@ -1,4 +1,4 @@ -# Copyright 2023 Alibaba Group Holding Limited. All Rights Reserved. +# Copyright 2024 Alibaba Group Holding Limited. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. @@ -15,3 +15,4 @@ """hooks""" from chatlearn.models.megatron import hooks # pylint: disable=unused-import +from chatlearn.models.vllm import hooks as vllm_hooks # pylint: disable=unused-import diff --git a/chatlearn/launcher/__init__.py b/chatlearn/launcher/__init__.py index 028ddcd3..df6408db 100644 --- a/chatlearn/launcher/__init__.py +++ b/chatlearn/launcher/__init__.py @@ -1,4 +1,4 @@ -# Copyright 2023 Alibaba Group Holding Limited. All Rights Reserved. +# Copyright 2024 Alibaba Group Holding Limited. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. diff --git a/chatlearn/launcher/dlc_utils.py b/chatlearn/launcher/dlc_utils.py index e677d65d..6a3a5557 100644 --- a/chatlearn/launcher/dlc_utils.py +++ b/chatlearn/launcher/dlc_utils.py @@ -1,4 +1,4 @@ -# Copyright 2023 Alibaba Group Holding Limited. All Rights Reserved. +# Copyright 2024 Alibaba Group Holding Limited. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. @@ -127,7 +127,8 @@ def start_ray_cluster(): master_addr = get_master_addr() rank = get_rank() if rank == 0: - cmd = f"ray start --head --port={port} --node-ip-address={master_addr} --node-manager-port {node_manager_port} --node-name={master_addr}" + cmd = f"RAY_prestart_worker_first_driver=0 ray start --head --port={port} --node-ip-address={master_addr} " + \ + f"--node-manager-port {node_manager_port} --node-name={master_addr}" else: cmd = f"ray start --address={master_addr}:{port} --node-manager-port {node_manager_port} --node-name={get_addr()}" logger.info(f"execute {cmd}") diff --git a/chatlearn/launcher/initialize.py b/chatlearn/launcher/initialize.py index 7e641e2d..e386c6aa 100644 --- a/chatlearn/launcher/initialize.py +++ b/chatlearn/launcher/initialize.py @@ -1,4 +1,4 @@ -# Copyright 2023 Alibaba Group Holding Limited. All Rights Reserved. +# Copyright 2024 Alibaba Group Holding Limited. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. @@ -51,7 +51,7 @@ def _set_runtime_env(runtime_env_args, attribute, runtime_env): _set_runtime_env(runtime_env_args, key, runtime_env) # namespace is needed to get NamedActor - ray.init(runtime_env=runtime_env, namespace="RLHF", _node_ip_address=dlc_utils.get_addr(), log_to_driver=False) + ray.init(runtime_env=runtime_env, namespace="CHATLEARN", _node_ip_address=dlc_utils.get_addr(), log_to_driver=False) def init(args=None): @@ -74,4 +74,4 @@ def init(args=None): logger.info(f"RANK: {dlc_utils.get_rank()}: task finish, exit ...") # other workers exit after head exit sys.exit(0) - logger.info(f"init rlhf done, rlhf version {VERSION}") + logger.info(f"init chatlearn done, version {VERSION}") diff --git a/chatlearn/models/__init__.py b/chatlearn/models/__init__.py index f0907cc8..c706352a 100644 --- a/chatlearn/models/__init__.py +++ b/chatlearn/models/__init__.py @@ -1,4 +1,4 @@ -# Copyright 2023 Alibaba Group Holding Limited. All Rights Reserved. +# Copyright 2024 Alibaba Group Holding Limited. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. @@ -12,4 +12,4 @@ # See the License for the specific language governing permissions and # limitations under the License. # ============================================================================== -"""Megatron related.""" +"""Module related.""" diff --git a/chatlearn/models/rlhf_module.py b/chatlearn/models/base_module.py similarity index 58% rename from chatlearn/models/rlhf_module.py rename to chatlearn/models/base_module.py index c209f3b4..9ec0a7a3 100644 --- a/chatlearn/models/rlhf_module.py +++ b/chatlearn/models/base_module.py @@ -1,4 +1,4 @@ -# Copyright 2023 Alibaba Group Holding Limited. All Rights Reserved. +# Copyright 2024 Alibaba Group Holding Limited. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. @@ -12,16 +12,20 @@ # See the License for the specific language governing permissions and # limitations under the License. # ============================================================================== -"""RLHF base module""" +"""base module""" from collections import defaultdict +from functools import reduce from itertools import cycle -import inspect import math +import operator import os +import torch import ray import ray.util.collective as col +from ray.util.collective.collective_group.base_collective_group import BaseGroup +from ray.util.collective.collective_group.nccl_collective_group import NCCLGroup from torch.utils.data import DataLoader from torch._utils import _flatten_dense_tensors, _unflatten_dense_tensors @@ -37,8 +41,8 @@ from chatlearn.launcher import dlc_utils -class RLHFModule: - """RLHFModule is the base class for RLHF models. +class BaseModule: + """BaseModule is the base class for Base models. Args ---- @@ -55,22 +59,29 @@ def __init__(self, name, args=None, replica_id=0): set_global_variables(args) self.global_args = global_args args = global_args.models[name] - self.total_device = args.num_device + self.total_gpu = args.num_gpu + self.total_cpu = args.num_cpu self.gpu_per_process = args.gpu_per_process self.trainable = args.trainable - self._rlhf_args = self.global_args.rlhf_args + self._runtime_args = self.global_args.runtime_args self._module_args = args self.replica_id = replica_id self.config_dir = args.config_dir - self._num_device_per_replica = args.tensor_model_parallel_size * args.pipeline_model_parallel_size - assert self._num_device_per_replica <= self.total_device - assert self.total_device % self._num_device_per_replica == 0 - if not self.trainable: - self._num_replica = args.num_device // self._num_device_per_replica + + if self.total_gpu > 0: + self._num_gpu_per_replica = args.tensor_model_parallel_size * args.pipeline_model_parallel_size * args.zero_size + assert self._num_gpu_per_replica <= self.total_gpu + assert self.total_gpu % self._num_gpu_per_replica == 0 + if not self.trainable: + self._num_replica = args.num_gpu // self._num_gpu_per_replica + else: + # For trainable models, perform the DP inside DistActor + self._num_replica = 1 + self._num_gpu_per_replica = self.total_gpu else: - # For trainable models, perform the DP inside DistActor - self._num_replica = 1 - self._num_device_per_replica = self.total_device + self._num_gpu_per_replica = 0 + self._num_replica = args.num_replica + assert self._num_replica >= 1 self._param_ranks = None self._named_parameters = None @@ -80,7 +91,7 @@ def __init__(self, name, args=None, replica_id=0): self.error_signal = None self._rank = None self._world_size = None - self._group_name = None + self._group_names = [] self._dataloader = None self._eval_dataloader = None self._kl_coef = None @@ -89,13 +100,18 @@ def __init__(self, name, args=None, replica_id=0): self._timers = None self._data_iter = None self._eval_data_iter = None - self.call_func = None - self.eval_call_func = None + self.call_funcs = [] + self.trainable_funcs = [] self._data_ckpt_manager = None self._peak_memory = 0 self._parameters_to_sync = defaultdict(list) + self._concat_params_dict = None + self._to_fix_act_ordering_dict = None + self._to_fix_qkv_ordering_dict = None + self._to_fix_qkv_ordering_func = None # current compute iteration self._iteration = 0 + self._train_iteration = 0 self.enable_lora = self._module_args.lora.enable_lora self._finalized = False self._resume_training = False @@ -106,7 +122,7 @@ def __init__(self, name, args=None, replica_id=0): self._dummy_inputs = [] # parameter sync from src_model self._src_parameter_model = None - self.to_offload_optimizer_states = self.module_args.offload_optimizer_states + self.profiler = None def finalize(self): """ @@ -123,12 +139,12 @@ def _assert_not_finalized(self): assert not self._finalized, f"{self} is finalized, any change to the class should happen before finalize." @property - def rlhf_args(self): + def runtime_args(self): """ - Return the arguments related to RLHF training, - the settings that are specified under the "rlhf" section of the YAML configuration file. + Return the arguments related to alignment training, + the settings that are specified under the "runtime" section of the YAML configuration file. """ - return self._rlhf_args + return self._runtime_args @property def model_args(self): @@ -141,10 +157,14 @@ def model_args(self): @property def module_args(self): """ - Return module arguments. module_args include `num_device`, `gpu_per_process`, `model_config_file`, etc. + Return module arguments. module_args include `num_gpu`, `gpu_per_process`, `model_config_file`, etc. """ return self._module_args + @property + def parameter_sync_frequency(self): + return self.module_args.sync_frequency + def set_env(self, args): """ set system env, private @@ -181,7 +201,7 @@ def data_ckpt_manager(self): """ :meta private: """ - if self.rlhf_args.data_checkpoint_path is not None: + if self.runtime_args.data_checkpoint_path is not None: assert self._data_ckpt_manager is not None return self._data_ckpt_manager @@ -190,21 +210,21 @@ def model_setup(self): :meta private: """ self.global_args.active_module_args = self._module_args - if self.rlhf_args.data_checkpoint_path is not None: - self._data_ckpt_manager = CheckpointManager(self, self.rlhf_args.data_checkpoint_path, - self.rlhf_args.max_data_ckpt_nums, - self.rlhf_args.load_data_checkpoint_iteration) - if self.rlhf_args.enable_resume_training: + if self.runtime_args.data_checkpoint_path is not None: + self._data_ckpt_manager = CheckpointManager(self, self.runtime_args.data_checkpoint_path, + self.runtime_args.max_data_ckpt_nums, + self.runtime_args.load_data_checkpoint_iteration) + if self.runtime_args.enable_resume_training: meta = self._data_ckpt_manager.resume() if meta: - self._resume_training = self.rlhf_args.consumed_samples > 0 + self._resume_training = self.runtime_args.consumed_samples > 0 start_episode = meta["episode"] + 1 - self._iteration = start_episode * math.ceil(self.rlhf_args.sample_per_episode / \ + self._iteration = start_episode * math.ceil(self.runtime_args.sample_per_episode / \ self._num_replica / self.module_args.generation_batch_size) log_rank_0(f"{self.name} resume training {self._resume_training}: set start iteration to {self._iteration}", self._logger) self.setup() - def forward_step(self, data, iteration=None): + def forward_step(self, data, iteration): """ Perform forward step for one batch. @@ -222,7 +242,7 @@ def forward_step(self, data, iteration=None): where the first dim of tensor or the len of list equals to batch size """ - def train_step(self, data, train_info): + def train_step(self, data, iteration): """ Perform train_step for one batch, including a list of micro-batches. @@ -230,8 +250,8 @@ def train_step(self, data, train_info): ---- data : [Dict] A list of micro-batch for train_step, type of each micro-batch is dict - train_info : Dict - A dict of training meta, includes training information, e.g., "iteration" + iteration : int + local train iteration """ def eval_step(self, data): @@ -260,15 +280,15 @@ def save_checkpoint(self, iteration): Current training iteration """ - def save_data_checkpoint(self, replica_id, iteration, ppo_iter): + def save_data_checkpoint(self, replica_id, iteration, episode_id): """ Save checkpoint for dataloader. :meta private: """ if self.data_ckpt_manager is not None: - consumed_samples = self.rlhf_args.consumed_samples - self.data_ckpt_manager.save_checkpoint(replica_id, iteration, ppo_iter, consumed_samples) + consumed_samples = self.runtime_args.consumed_samples + self.data_ckpt_manager.save_checkpoint(replica_id, iteration, episode_id, consumed_samples) def put(self, key, data): """ @@ -299,7 +319,6 @@ def validate(self): """ :meta private: """ - return "ok" def before_episode(self): """ @@ -325,39 +344,28 @@ def build_dataset(self, train_prompts, is_eval=False): Dataset with user-defined collate_fn """ - def _build_dataloader(self, data, batch_size, sample_per_episode_per_replica=-1, is_eval=False): + def _build_dataloader(self, data, batch_size, dynamic_batch_size_flag=False, is_eval=False): """ build and set the dataloader for the model Args: data: a list of string - sample_per_episode_per_replica: an integer indicate how many samples - per episode and per replica will consume (default: `-1`) is_eval: set to `True` to build a dataloader for evaluation (default: `False`) :meta private: """ - if not is_eval: - assert sample_per_episode_per_replica > 0, \ - "The dataloader for training expect positive sample_per_episode_per_replica, "\ - f"but got {sample_per_episode_per_replica}" - if "is_eval" in inspect.getfullargspec(self.build_dataset).args: - dataset = self.build_dataset(data, is_eval) # pylint: disable=assignment-from-no-return - else: - dataset = self.build_dataset(data) # pylint: disable=assignment-from-no-return - assert hasattr(dataset, 'collate_fn'), \ - f"{dataset.__class__.__name__} has no attribute `collate_fn`. If you would like "\ - "to use the default collate_fn to batch samples, try adding `self.collate_fn = None` "\ - "to your Dataset object" + dataset = self.build_dataset(data, is_eval) # pylint: disable=assignment-from-no-return consumed_samples = 0 if not is_eval: if self.data_ckpt_manager is not None: - consumed_samples = self.rlhf_args.consumed_samples + consumed_samples = self.runtime_args.consumed_samples + collate_fn = dataset.collate_fn if hasattr(dataset, 'collate_fn') else None dataloader = self.build_dataloader(dataset, batch_size=batch_size, - collate_fn=dataset.collate_fn, + collate_fn=collate_fn, is_eval=is_eval, + dynamic_batch_size_flag=dynamic_batch_size_flag, consumed_samples=consumed_samples) if is_eval: @@ -373,6 +381,7 @@ def build_dataloader(self, batch_size, collate_fn=None, is_eval=False, + dynamic_batch_size_flag=False, consumed_samples=0): """ build the dataloader for the model @@ -391,14 +400,15 @@ def build_dataloader(self, consumed_samples=0, micro_batch_size=batch_size, data_parallel_rank=self.replica_id, - data_parallel_size=self._num_replica) + data_parallel_size=self._num_replica, + dynamic_batch_size_flag=dynamic_batch_size_flag) else: batch_sampler = EpisodeDataSampler(total_samples=len(dataset), consumed_samples=consumed_samples, micro_batch_size=batch_size, data_parallel_rank=self.replica_id, data_parallel_size=self._num_replica, - sample_per_episode=self.rlhf_args.sample_per_episode) + sample_per_episode=self.runtime_args.sample_per_episode) return DataLoader( dataset, batch_sampler=batch_sampler, collate_fn=collate_fn, pin_memory=True ) @@ -407,7 +417,8 @@ def reset_eval_data_iter(self): """ :meta private: """ - self._eval_data_iter = iter(self._eval_dataloader) + if self._eval_dataloader is not None: + self._eval_data_iter = iter(self._eval_dataloader) def next_batch(self, is_eval=False): """ @@ -426,26 +437,55 @@ def num_replica(self): return self._num_replica @property - def num_device_per_replica(self): + def num_gpu_per_replica(self): """ :meta private: """ - return self._num_device_per_replica + return self._num_gpu_per_replica def setup_collective_group(self, rank, world_size, backend, group_name): """ :meta private: """ - self._group_name = group_name + self._group_names.append(group_name) self._world_size = world_size col.init_collective_group( world_size, rank, backend=backend, group_name=group_name) - def destroy_collective_group(self): + def _destroy_collective_group(self, group_name): """ :meta private: """ - col.destroy_collective_group(self._group_name) + from ray.util.collective.collective import _group_mgr # pylint: disable=import-outside-toplevel + rank = col.get_rank(group_name) + saved_group: BaseGroup = _group_mgr.get_group_by_name(group_name) + saved_comm_keys = [] + if isinstance(saved_group, (NCCLGroup, )): + saved_comm_keys = list(saved_group._dev_comm_map.keys()) + + try: + col.destroy_collective_group(group_name) + except Exception as e: + self._logger.warning(f"_destroy_collective_group {group_name} {e}") + + if isinstance(saved_group, (NCCLGroup, )): + for comm_key in saved_comm_keys: + group_key = saved_group._generate_group_key(comm_key) + from ray.util.collective.const import get_store_name # pylint: disable=import-outside-toplevel + store_name = get_store_name(group_key) + try: + store = ray.get_actor(store_name) + if rank == 0: + raise RuntimeError(f'{store_name} in group {group_name} should be killed on rank {rank}.') + self._logger.debug(f'Kill {store_name} in group {group_name} on rank {rank}') + ray.kill(store) + except ValueError: + ... + + def destroy_collective_group(self): + for group_name in self._group_names: + self._destroy_collective_group(group_name) + self._group_names = [] def get_local_param_ranks(self): """ @@ -483,6 +523,7 @@ def is_last_rank(self): """ Is last rank. """ + return True @property def parameters(self): @@ -532,13 +573,144 @@ def param_to_name(self): self._param_to_name[item[1]] = item[0] return self._param_to_name + @property + def concat_params_dict(self): + return self._concat_params_dict + + def get_concat_params_dict(self): + return self._concat_params_dict + + def set_concat_params_dict(self, _concat_params_dict): + self._concat_params_dict = _concat_params_dict + + @property + def to_fix_act_ordering_dict(self): + return self._to_fix_act_ordering_dict + + def get_to_fix_act_ordering_dict(self): + return self._to_fix_act_ordering_dict + + def set_to_fix_act_ordering_dict(self, _to_fix_act_ordering_dict): + self._to_fix_act_ordering_dict = _to_fix_act_ordering_dict + + @property + def to_fix_qkv_ordering_dict(self): + return self._to_fix_qkv_ordering_dict + + def get_to_fix_qkv_ordering_dict(self): + return self._to_fix_qkv_ordering_dict + + def set_to_fix_qkv_ordering_dict(self, _to_fix_qkv_ordering_dict): + self._to_fix_qkv_ordering_dict = _to_fix_qkv_ordering_dict + + @property + def to_fix_qkv_ordering_func(self): + return self._to_fix_qkv_ordering_func + + def get_to_fix_qkv_ordering_func(self): + return self._to_fix_qkv_ordering_func + + def set_to_fix_qkv_ordering_func(self, _to_fix_qkv_ordering_func): + self._to_fix_qkv_ordering_func = _to_fix_qkv_ordering_func + def set_sync_parameters(self, trainable_param_names, pipe_stage=0): """ :meta private: """ - if pipe_stage not in self._parameters_to_sync or len(self._parameters_to_sync[pipe_stage]) == 0: + if pipe_stage not in self._parameters_to_sync or len(self._parameters_to_sync[pipe_stage]) == 0: # pylint: disable=too-many-nested-blocks + concat = [] + set_sync_param_flag = False + + if self.concat_params_dict is not None: + if isinstance(self.concat_params_dict, dict): + assert "modules" in self.concat_params_dict + assert "dim" in self.concat_params_dict + assert isinstance(self.concat_params_dict["modules"], list) + concat_modules_list = self.concat_params_dict["modules"] + concat_dim = self.concat_params_dict["dim"] + else: + raise RuntimeError(f"Expect concat_params_dict in {self} to be a dict or None, while {self.concat_params_dict}.") + + if self.to_fix_act_ordering_dict is not None: + if isinstance(self.to_fix_act_ordering_dict, dict): + assert "modules" in self.to_fix_act_ordering_dict + assert "dim" in self.to_fix_act_ordering_dict + assert isinstance(self.to_fix_act_ordering_dict["modules"], list) + to_fix_act_ordering_list = self.to_fix_act_ordering_dict["modules"] + fix_dim = self.to_fix_act_ordering_dict["dim"] + else: + raise RuntimeError(f"Expect to_fix_act_ordering_dict in {self} to be a dict or None, while {self.to_fix_act_ordering_dict}.") + + if self.to_fix_qkv_ordering_dict is not None: + if isinstance(self.to_fix_qkv_ordering_dict, dict): + assert "modules" in self.to_fix_qkv_ordering_dict + assert "layer_re" in self.to_fix_qkv_ordering_dict + assert isinstance(self.to_fix_qkv_ordering_dict["modules"], list) + to_fix_modules_list = self.to_fix_qkv_ordering_dict["modules"] + layer_re = self.to_fix_qkv_ordering_dict["layer_re"] + else: + raise RuntimeError(f"Expect to_fix_qkv_ordering_dict in {self} to be a dict or None, while {self.to_fix_qkv_ordering_dict}.") + for name in trainable_param_names: - self._parameters_to_sync[pipe_stage].append(self.named_parameters[name]) + if self.concat_params_dict is None and self.to_fix_act_ordering_dict is None: + set_sync_param_flag = True + _params_to_sync = self.named_parameters[name] + else: + need_concat_or_fix = False + if self.concat_params_dict is not None: + if any([ele in name for ele in concat_modules_list]): # pylint: disable=use-a-generator + concat.append(self.named_parameters[name]) + need_concat_or_fix = True + if len(concat) == len(concat_modules_list): + set_sync_param_flag = True + _params_to_sync = torch.cat(concat, dim=concat_dim) + + if self.to_fix_act_ordering_dict is not None: + if any([ele in name for ele in to_fix_act_ordering_list]): # pylint: disable=use-a-generator + val = self.named_parameters[name] + offset = val.shape[0] // 2 + w1 = val[:offset,:] + w2 = val[offset:,:] + need_concat_or_fix = True + set_sync_param_flag = True + _params_to_sync = torch.cat([w2, w1], dim=fix_dim) + + if not need_concat_or_fix: + set_sync_param_flag = True + _params_to_sync = self.named_parameters[name] + + if not set_sync_param_flag: + continue + if self.to_fix_qkv_ordering_dict is not None: + from chatlearn.utils.megatron_import_helper import fix_query_key_value_ordering # pylint: disable=import-outside-toplevel + m = layer_re.match(name) + if m is not None: + op_name = m.group(2) + if op_name in to_fix_modules_list: + checkpoint_version = 3.0 + if self._to_fix_qkv_ordering_func is fix_query_key_value_ordering: + self._to_fix_qkv_ordering_func(_params_to_sync, checkpoint_version) + else: + input_shape = _params_to_sync.size() + tp_size = self.module_args.args_dict["tensor_model_parallel_size"] + heads = self.module_args.args_dict["num_attention_heads"] // tp_size + hidden_size_per_head = \ + self.module_args.args_dict["hidden_size"] // self.module_args.args_dict["num_attention_heads"] + shape = (heads, hidden_size_per_head, 3) + input_shape[1:] + division = reduce(operator.mul, shape, 1) + num_elements = _params_to_sync.numel() + if num_elements == division: + # model with gqa dont need to fix qkv ordering. + weight_or_bias = m.group(3) + _params_to_sync = self._to_fix_qkv_ordering_func( + _params_to_sync, checkpoint_version, 3, heads, hidden_size_per_head + ) + if weight_or_bias == "weight": + _params_to_sync = _params_to_sync.contiguous() + concat = [] + set_sync_param_flag = False + self._parameters_to_sync[pipe_stage].append(_params_to_sync) + def get_parameter_names(self, requires_grad=True): """ @@ -574,10 +746,10 @@ def send_recv_parameter(self, name, rank, group_name, func, pipe_stage=0): """ :meta private: """ - if self.rlhf_args.coalesce_param: + if self.runtime_args.coalesce_param: assert name is None tensors = [param.data for param in self._parameters_to_sync[pipe_stage]] - dense_buckets, sparse_bucket = bucket_tensors(tensors, bucket_size_mb=self.rlhf_args.coalesced_buffer_mb) + dense_buckets, sparse_bucket = bucket_tensors(tensors, bucket_size_mb=self.runtime_args.coalesced_buffer_mb) debug_rank_0(f"{self.name} Got dense_buckets {len(dense_buckets)}, spase_bucket {len(sparse_bucket)}", self._logger) for bucket in dense_buckets: tensor_changed = func is col.recv @@ -588,6 +760,23 @@ def send_recv_parameter(self, name, rank, group_name, func, pipe_stage=0): tensor = self.get_parameter(name) func(tensor, rank, group_name) + def broadcast_parameter(self, rank, src_rank, group_name, pipe_stage=0): + """ + :meta private: + """ + tensors = [param.data for param in self._parameters_to_sync[pipe_stage]] + assert len(tensors) > 0 + dense_buckets, sparse_bucket = bucket_tensors(tensors, bucket_size_mb=self.runtime_args.coalesced_buffer_mb) + debug_rank_0(f"{self.name} Got dense_buckets {len(dense_buckets)}, spase_bucket {len(sparse_bucket)}", self._logger) + tensor_changed = rank != src_rank + + for bucket in dense_buckets: + coalesced_comm_dense(bucket, col.broadcast, extra_args=(src_rank, group_name), tensor_changed=tensor_changed) + + for param in sparse_bucket: + col.broadcast(param, src_rank, group_name) + + def send_parameter(self, name, dst_rank, group_name, pipe_stage=0): """ :meta private: @@ -605,10 +794,10 @@ def ray_put_parameter(self, name, group_name, pipe_stage=0): :meta private: """ name2ref = {} - if self.rlhf_args.coalesce_param: + if self.runtime_args.coalesce_param: assert name is None tensors = [param.data for param in self._parameters_to_sync[pipe_stage]] - dense_buckets, sparse_bucket = bucket_tensors(tensors, bucket_size_mb=self.rlhf_args.coalesced_buffer_mb) + dense_buckets, sparse_bucket = bucket_tensors(tensors, bucket_size_mb=self.runtime_args.coalesced_buffer_mb) debug_rank_0(f"{self.name} Put dense_buckets {len(dense_buckets)}, spase_bucket {len(sparse_bucket)}", self._logger) for bucket_id, bucket in enumerate(dense_buckets): flat_tensors = _flatten_dense_tensors(bucket) @@ -627,10 +816,10 @@ def ray_get_parameter(self, name, group_name, name2ref, pipe_stage=0): """ :meta private: """ - if self.rlhf_args.coalesce_param: + if self.runtime_args.coalesce_param: assert name is None tensors = [param.data for param in self._parameters_to_sync[pipe_stage]] - dense_buckets, sparse_bucket = bucket_tensors(tensors, bucket_size_mb=self.rlhf_args.coalesced_buffer_mb) + dense_buckets, sparse_bucket = bucket_tensors(tensors, bucket_size_mb=self.runtime_args.coalesced_buffer_mb) debug_rank_0(f"{self.name} Get dense_buckets {len(dense_buckets)}, spase_bucket {len(sparse_bucket)}", self._logger) for bucket_id, bucket in enumerate(dense_buckets): put_ref = name2ref[group_name + ":dense_bucket_" + str(bucket_id)] @@ -675,12 +864,12 @@ def timers(self, name): self._timers = Timers() return self._timers(name) - def timer_summary(self): + def timer_summary(self, e2e_cost=None): """ :meta private: """ if self._timers: - return self._timers.log() + return self._timers.log(e2e_cost=e2e_cost) def add_padding_config(self, key, padding_value=0.0, padding_type="right"): """ @@ -707,36 +896,7 @@ def peak_memory(self): """ :meta private: """ - - def register_func(self, name): - """ - register func to be called by engine - - :meta private: - """ - self._assert_not_finalized() - if self.call_func is not None and self.call_func != name: - raise Exception(f"Only one call func is supported now, got {self.call_func} and {name}") - self.call_func = name - - def register_eval_func(self, name='eval_step'): - """ - Register func to be called by eval engine - - Args - ---- - name: str - function name - """ - self._assert_not_finalized() - if self.eval_call_func is not None and self.eval_call_func != name: - raise Exception(f"Only one call eval_func is supported now, got {self.eval_call_func} and {name}") - self.eval_call_func = name - - def get_call_func(self, is_eval=False): - if is_eval: - return self.eval_call_func - return self.call_func + return 0.0 @property def resume_training(self): @@ -783,3 +943,70 @@ def onload_optimizer_states(self): """ onload optimizer states """ + + def offload_main_weights(self): + """ + offload main weights + """ + + def onload_main_weights(self): + """ + onload main weights + """ + + def offload_weights(self): + """ + offload weights + """ + + def onload_weights(self): + """ + onload weights + """ + + def free_grad_buffers(self): + """ + free grad buffers and related tensors + """ + + def build_grad_buffers(self): + """ + build grad buffers and related tensors + """ + + def onload(self): + pass + + def offload(self): + pass + + @property + def world_size(self): + pass + + @property + def data_parallel_size(self): + """ + data parallel size + + :meta private: + """ + + @property + def data_parallel_rank(self): + """ + data parallel rank + + :meta private: + """ + + def empty_cache(self): + """ + :meta private: + """ + + def get_data_parallel_rank(self): + return self.data_parallel_rank + + def get_data_parallel_size(self): + return self.data_parallel_size diff --git a/chatlearn/models/deepspeed/__init__.py b/chatlearn/models/deepspeed/__init__.py new file mode 100644 index 00000000..89bf5b05 --- /dev/null +++ b/chatlearn/models/deepspeed/__init__.py @@ -0,0 +1,15 @@ +# Copyright 2024 Alibaba Group Holding Limited. All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# ============================================================================== +"""deepspeed""" diff --git a/chatlearn/models/deepspeed/deepspeed_utils.py b/chatlearn/models/deepspeed/deepspeed_utils.py new file mode 100644 index 00000000..4fa88934 --- /dev/null +++ b/chatlearn/models/deepspeed/deepspeed_utils.py @@ -0,0 +1,195 @@ +# Copyright 2024 Alibaba Group Holding Limited. All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# ============================================================================== +"""DeepSpeed utils""" + +import os +import torch +import deepspeed +from deepspeed.ops.adam import DeepSpeedCPUAdam, FusedAdam +from deepspeed.runtime.zero.partition_parameters import ZeroParamStatus +from transformers import AutoTokenizer + + +DEFAULT_PAD_TOKEN = "[PAD]" +DEFAULT_EOS_TOKEN = "" +DEFAULT_BOS_TOKEN = "" +DEFAULT_UNK_TOKEN = "" + + +def get_tokenizer(pretrain_path, model=None, padding_side="left", use_fast=True): + tokenizer = AutoTokenizer.from_pretrained(pretrain_path, trust_remote_code=True, use_fast=use_fast) + tokenizer.padding_side = padding_side + if tokenizer.pad_token is None: + tokenizer.pad_token = tokenizer.eos_token + tokenizer.pad_token_id = tokenizer.eos_token_id + if model is not None: + model.config.pad_token_id = tokenizer.pad_token_id + + return tokenizer + + +def get_eval_ds_config( + offload, + stage=0, + bf16=True, +): + zero_opt_dict = { + "stage": stage, + "stage3_param_persistence_threshold": "auto", + "offload_param": { + "device": "cpu" if offload else "none", + "pin_memory": True, + }, + } + return { + "steps_per_print": 100, + "zero_optimization": zero_opt_dict, + "bf16": { + "enabled": bf16, + }, + "gradient_clipping": 1.0, + "prescale_gradients": False, + "wall_clock_breakdown": False, + } + +def get_train_ds_config( + offload, + adam_offload=True, + stage=2, + bf16=True, + max_norm=1.0, + zpg=8, + grad_accum_dtype=None, + disable_trace_cache=False, +): + device = "cpu" if offload else "none" + zero_opt_dict = { + "stage": stage, + "offload_param": {"device": device}, + "offload_optimizer": { + "device": "cpu" if adam_offload else "none", + "pin_memory": True, + }, + "sub_group_size": "auto", + "stage3_max_live_parameters": "auto", + "stage3_max_reuse_distance": "auto", + "stage3_param_persistence_threshold": "auto", + "stage3_prefetch_bucket_size": "auto", + "reduce_bucket_size": "auto", + # ZeRO++ + "zero_hpz_partition_size": zpg, + "zero_quantized_weights": False, + "zero_quantized_gradients": False, + } + if disable_trace_cache: + zero_opt_dict["stage3_prefetch_bucket_size"] = 0 + zero_opt_dict["stage3_max_live_parameters"] = 0 + zero_opt_dict["stage3_max_reuse_distance"] = 0 + + return { + "steps_per_print": 100, + "zero_optimization": zero_opt_dict, + "bf16": { + "enabled": bf16, + }, + "gradient_clipping": max_norm, + "prescale_gradients": False, + "wall_clock_breakdown": False, + "data_types": {"grad_accum_dtype": grad_accum_dtype if grad_accum_dtype else "fp32"}, + } + +def get_optimizer_grouped_parameters( + model, + weight_decay, + no_decay_name_list=("bias", "layer_norm.weight", "layernorm.weight", "norm.weight", "ln_f.weight"), +): + optimizer_grouped_parameters = [ + { + "params": [ + p + for n, p in model.named_parameters() + if (not any(nd in n for nd in no_decay_name_list) and p.requires_grad) + ], + "weight_decay": weight_decay, + }, + { + "params": [ + p + for n, p in model.named_parameters() + if (any(nd in n for nd in no_decay_name_list) and p.requires_grad) + ], + "weight_decay": 0.0, + }, + ] + return optimizer_grouped_parameters + +def create_optimizer(model, adam_offload, **kwargs): + # Optimizer + AdamOptimizer = DeepSpeedCPUAdam if adam_offload else FusedAdam + optim_params = get_optimizer_grouped_parameters(model, kwargs["weight_decay"]) + optim = AdamOptimizer(optim_params, **kwargs) + return optim + +def save_hf_format(model, tokenizer, output_dir, sub_folder=""): + # used to save huggingface format, so we can use it for hf.from_pretrained + model_to_save = model.module if hasattr(model, 'module') else model + CONFIG_NAME = "config.json" + WEIGHTS_NAME = "pytorch_model.bin" + output_dir = os.path.join(output_dir, sub_folder) + os.makedirs(output_dir, exist_ok=True) + output_model_file = os.path.join(output_dir, WEIGHTS_NAME) + output_config_file = os.path.join(output_dir, CONFIG_NAME) + save_dict = model_to_save.state_dict() + for key in list(save_dict.keys()): + if "lora" in key: + del save_dict[key] + torch.save(save_dict, output_model_file) + model_to_save.config.to_json_file(output_config_file) + tokenizer.save_vocabulary(output_dir) + +def _z3_params_to_fetch(param_list): + return [ + p for p in param_list + if hasattr(p, 'ds_id') and p.ds_status == ZeroParamStatus.NOT_AVAILABLE + ] + + +def save_zero_three_model(model_ema, global_rank, save_dir, zero_stage=0): + zero_stage_3 = (zero_stage == 3) + os.makedirs(save_dir, exist_ok=True) + WEIGHTS_NAME = "pytorch_model.bin" + output_model_file = os.path.join(save_dir, WEIGHTS_NAME) + + model_to_save = model_ema.module if hasattr(model_ema, + 'module') else model_ema + if not zero_stage_3: + if global_rank == 0: + torch.save(model_to_save.state_dict(), output_model_file) + else: + output_state_dict = {} + for k, v in model_to_save.named_parameters(): + + if hasattr(v, 'ds_id'): + with deepspeed.zero.GatheredParameters(_z3_params_to_fetch([v + ]), + enabled=zero_stage_3): + v_p = v.data.cpu() + else: + v_p = v.cpu() + if global_rank == 0 and "lora" not in k: + output_state_dict[k] = v_p + if global_rank == 0: + torch.save(output_state_dict, output_model_file) + del output_state_dict diff --git a/chatlearn/models/deepspeed_module.py b/chatlearn/models/deepspeed_module.py new file mode 100644 index 00000000..b8f1061a --- /dev/null +++ b/chatlearn/models/deepspeed_module.py @@ -0,0 +1,213 @@ +# Copyright 2024 Alibaba Group Holding Limited. All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# ============================================================================== +"""DeepSpeed module""" + +from datetime import timedelta +import importlib +import math +import os +import random +import numpy as np +import torch +from torch import distributed as dist +from transformers import AutoModelForCausalLM, GenerationConfig +from transformers.integrations import HfDeepSpeedConfig +from transformers.trainer import get_scheduler + +from chatlearn.utils.utils import dict_to_simplenamespace +from .deepspeed.deepspeed_utils import get_eval_ds_config, get_tokenizer, get_train_ds_config, create_optimizer +from .deepspeed.deepspeed_utils import save_hf_format, save_zero_three_model +from .torch_module import TorchModule + +if importlib.util.find_spec("deepspeed"): + import deepspeed + + +class DeepSpeedModule(TorchModule): + """DeepSpeedModule is the class for models accelerated with DeepSpeed. + + Args + ---- + name : str + model name + """ + + def __init__(self, *args, **kwargs): + super().__init__(*args, **kwargs) + + if not self.trainable: + # inference only + if self.model_args.get("train_micro_batch_size") != self.module_args.generation_batch_size: + self._logger.info( + f"{self.name} Overwrite train_micro_batch_size with generation_batch_size {self.module_args.generation_batch_size}") + self.train_micro_batch_size = self.module_args.generation_batch_size + else: + self.train_micro_batch_size = self.runtime_args.train_micro_batch_size + self.train_global_batch_size = self.runtime_args.train_global_batch_size + + self.zero_size = self.module_args.zero_size + + def set_seed(self, seed): + random.seed(seed) + np.random.seed(seed) + torch.manual_seed(seed) + torch.cuda.manual_seed_all(seed) + + def setup_distributed(self, timeout): + self.set_seed(self.seed) + local_rank = int(os.environ["LOCAL_RANK"]) + torch.cuda.set_device(local_rank) + deepspeed.init_distributed(timeout=timeout) + + def prepare(self, *models_or_model_optim_pairs): + ret = [] + for arg in models_or_model_optim_pairs: + if not isinstance(arg, tuple): + ret.append(self._ds_init_eval_model(arg)) + else: + assert len(arg) == 3, f'Expect (model, optimizer, scheduler) pair, got a tuple with size "{len(arg)}"' + ret.append(self._ds_init_train_model(*arg)) + + return ret[0] if len(ret) == 1 else ret + + def _ds_init_eval_model(self, model): + ds_config = self.get_ds_eval_config(offload=getattr(model, "_offload", False)) + local_rank = int(os.environ['LOCAL_RANK']) + + engine, *_ = deepspeed.initialize( + model=model, + args={"local_rank": local_rank}, + config=ds_config, + dist_init_required=True, + ) + model = engine + return model + + def _ds_init_train_model(self, model, optim, scheduler): + ds_config = self.get_ds_train_config() + local_rank = int(os.environ['LOCAL_RANK']) + + engine, optim, _, scheduler = deepspeed.initialize( + model=model, + optimizer=optim, + lr_scheduler=scheduler, + config=ds_config, + args={"local_rank": local_rank}, + dist_init_required=True, + ) + model = engine + return model, optim, scheduler + + def get_ds_eval_config(self, offload=False): + # DS Config + ds_config = get_eval_ds_config(offload=offload, stage=self.zero_stage if self.zero_stage == 3 else 0, bf16=self.bf16) + ds_config["train_micro_batch_size_per_gpu"] = self.train_micro_batch_size + ds_config["train_batch_size"] = self.train_micro_batch_size * self.zero_size + return ds_config + + def get_ds_train_config(self): + # DS Config + ds_config = get_train_ds_config( + offload=False, + adam_offload=self.adam_offload, + stage=self.zero_stage, + bf16=self.bf16, + max_norm=self.max_norm, + grad_accum_dtype="fp32", + disable_trace_cache=self.disable_trace_cache, + ) + ds_config["train_micro_batch_size_per_gpu"] = self.train_micro_batch_size + ds_config["gradient_accumulation_steps"] = self.train_global_batch_size // self.train_micro_batch_size // self.world_size + ds_config["train_batch_size"] = self.train_global_batch_size + + return ds_config + + def create_model(self, args): + # TODO: try attn_implementation="flash_attention_2" + model = AutoModelForCausalLM.from_pretrained( + args.pretrain_or_model, + trust_remote_code=True, + attn_implementation="flash_attention_2", + torch_dtype=torch.bfloat16 if self.bf16 else "auto" + ) + return model + + + def model_setup(self): + super().model_setup() + args = dict_to_simplenamespace(self.model_args) + self.prompt_max_len = getattr(args, "prompt_max_len", 1024) + self.args = args + self.zero_stage = getattr(args, "zero_stage", 3) + self.bf16 = args.bf16 + self.seed = getattr(args, "seed", 42) + self.max_norm = getattr(args, "max_norm", 1.0) + dist_timeout = getattr(args, 'distributed_timeout', 30) + self.setup_distributed(timedelta(minutes=dist_timeout)) + # TODO: deal with offload later + ds_config = self.get_ds_eval_config(offload=False) + # efficiently deploy DeepSpeed stage 3, you must instantiate the HfDeepSpeedConfig + # object before instantiating the model. + # https://huggingface.co/transformers/v4.9.2/main_classes/deepspeed.html + dschf = HfDeepSpeedConfig(ds_config) if ds_config is not None and self.zero_stage == 3 else None # pylint: disable=unused-variable + model = self.create_model(self.args) + self.tokenizer = get_tokenizer( + args.pretrain_or_model, model, "left", use_fast=True + ) + if self.trainable: + if getattr(args, "gradient_checkpointing", False): + model.gradient_checkpointing_enable() + self.disable_trace_cache = True + learning_rate = float(args.learning_rate) + self.adam_offload = False + num_update_steps_per_episodes = self.runtime_args.sample_per_episode // self.train_global_batch_size + l2 = float(args.l2) + max_steps = math.ceil(self.runtime_args.num_episode * num_update_steps_per_episodes) + optimizer = create_optimizer( + model, self.adam_offload, lr=learning_rate, betas=(0.9, 0.95), weight_decay=l2 + ) + scheduler = get_scheduler("cosine_with_min_lr", + optimizer, + num_warmup_steps=math.ceil(max_steps * 0.03), + num_training_steps=max_steps, + scheduler_specific_kwargs={"min_lr": learning_rate * 0.1},) + self.model, self.optimizer, self.scheduler = self.prepare((model, optimizer, scheduler)) + else: + self.model = self.prepare(model) + self.generation_config = GenerationConfig.from_pretrained(args.pretrain_or_model, trust_remote_code=True) + self.tokenizer.eos_token_id = self.generation_config.eos_token_id + + if not self.trainable: + self.model.eval() + + @property + def data_parallel_size(self): + """ + :meta private: + """ + return dist.get_world_size() + + @property + def data_parallel_rank(self): + """ + :meta private: + """ + return dist.get_rank() + + def save_checkpoint(self, iteration): + save_dir = f"{self.runtime_args.output_dir}/save_model/{self.name}/{iteration}" + save_hf_format(self.model, self.tokenizer, save_dir) + save_zero_three_model(self.model, torch.distributed.get_rank(), save_dir, self.zero_stage) + self._logger.info(f"save checkpoint to {save_dir}") diff --git a/chatlearn/models/megatron/hooks/__init__.py b/chatlearn/models/megatron/hooks/__init__.py index d49f9dcb..2389ae70 100644 --- a/chatlearn/models/megatron/hooks/__init__.py +++ b/chatlearn/models/megatron/hooks/__init__.py @@ -1,4 +1,4 @@ -# Copyright 2023 Alibaba Group Holding Limited. All Rights Reserved. +# Copyright 2024 Alibaba Group Holding Limited. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. @@ -20,7 +20,7 @@ megatron_exist = importlib.util.find_spec("megatron") if megatron_exist: - from megatron.initialize import initialize_megatron + from chatlearn.utils.megatron_import_helper import initialize_megatron if "args_dict" not in inspect.getfullargspec(initialize_megatron).args: from chatlearn.models.megatron.hooks import transformer from chatlearn.models.megatron.hooks import generation diff --git a/chatlearn/models/megatron/hooks/generation.py b/chatlearn/models/megatron/hooks/generation.py index 5899efc2..c1e728d2 100644 --- a/chatlearn/models/megatron/hooks/generation.py +++ b/chatlearn/models/megatron/hooks/generation.py @@ -1,4 +1,4 @@ -# Copyright 2023 Alibaba Group Holding Limited. All Rights Reserved. +# Copyright 2024 Alibaba Group Holding Limited. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. @@ -15,10 +15,9 @@ """Megatron generation with limit in min_prompt_length.""" import inspect -from megatron.text_generation import generation # pylint: disable=unused-import,wildcard-import -from megatron.text_generation.generation import * -from megatron.text_generation.generation import _build_attention_mask_and_position_ids +from chatlearn.utils.megatron_import_hook_helper import * +from chatlearn.utils.megatron_import_hook_helper import _build_attention_mask_and_position_ids # pylint: enable=unused-import,wildcard-import from chatlearn.utils.utils import detect_and_insert_code_to_func diff --git a/chatlearn/models/megatron/hooks/transformer.py b/chatlearn/models/megatron/hooks/transformer.py index 5a224379..eb5e1ec2 100644 --- a/chatlearn/models/megatron/hooks/transformer.py +++ b/chatlearn/models/megatron/hooks/transformer.py @@ -1,4 +1,4 @@ -# Copyright 2023 Alibaba Group Holding Limited. All Rights Reserved. +# Copyright 2024 Alibaba Group Holding Limited. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. @@ -16,53 +16,26 @@ Add attention_acc kernel to speedup Attention when seq_len=1 """ -# pylint: disable=unused-import,wildcard-import +# pylint: disable=unused-import,wildcard-import,unused-wildcard-import,ungrouped-imports import inspect -import torch + try: - from megatron import get_args - from megatron.core import mpu, tensor_parallel - from megatron.model.enums import AttnType - from megatron.model.transformer import ParallelAttention - from megatron.model.transformer import * + from chatlearn.utils.megatron_import_transformer_helper import ParallelAttention + from chatlearn.utils.megatron_import_transformer_helper import * except ImportError: ParallelAttention = None -try: - from einops import rearrange -except ImportError: - rearrange = None -# pylint: enable=unused-import,wildcard-import -from chatlearn.utils.utils import detect_and_insert_code_to_func -def add_attn_acc_one_seq_kernel(source_code): - if 'elif not self.use_flash_attn:' in source_code: - return - pattern = 'if not self.use_flash_attn:' - new_code = \ -""" -args = get_args() -use_attn_acc = hasattr(args, 'use_attn_acc') and args.use_attn_acc -if use_attn_acc and query_layer.size(0) == 1: - import attention_acc - context_layer = attention_acc.mha( - query_layer, - key_layer, - value_layer - ) - context_layer = context_layer.view(context_layer.size(0), context_layer.size(1), -1) -""" - source_code = detect_and_insert_code_to_func(source_code, pattern, new_code) - if source_code is None: - return - source_code = source_code.replace('if not self.use_flash_attn:', 'elif not self.use_flash_attn:') - return source_code +from chatlearn.utils.utils import detect_and_insert_code_to_func +# pylint: enable=unused-import,wildcard-import,unused-wildcard-import,ungrouped-imports + def apply_rotary_pos_emb_variable_seq(source_code): pattern = 'rotary_pos_emb = (q_pos_emb, k_pos_emb)' new_code = \ """ else: + # apply_rotary_pos_emb_variable_seq if rotary_pos_emb is not None: q_pos_emb, k_pos_emb = rotary_pos_emb sequence_end = query_layer.size(0) @@ -73,14 +46,12 @@ def apply_rotary_pos_emb_variable_seq(source_code): return detect_and_insert_code_to_func(source_code, pattern, new_code, -8, 1) def modify_code(source): - source = add_attn_acc_one_seq_kernel(source) if source is not None: return apply_rotary_pos_emb_variable_seq(source) - return source if ParallelAttention is not None: src_code = inspect.getsource(ParallelAttention.forward) - if 'use_attn_acc' not in src_code: + if '# apply_rotary_pos_emb_variable_seq' not in src_code: src_code = modify_code(src_code) if src_code is not None: exec(src_code) # pylint: disable=exec-used diff --git a/chatlearn/models/megatron/lora/__init__.py b/chatlearn/models/megatron/lora/__init__.py index ab550fe1..f68d6bf8 100644 --- a/chatlearn/models/megatron/lora/__init__.py +++ b/chatlearn/models/megatron/lora/__init__.py @@ -1,4 +1,4 @@ -# Copyright 2023 Alibaba Group Holding Limited. All Rights Reserved. +# Copyright 2024 Alibaba Group Holding Limited. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. @@ -16,6 +16,5 @@ from .layers import convert_layer_to_lora from .layers import fuse_lora_layer -from .layers import linear_with_grad_accumulation_and_async_allreduce_LoRA from .layers import only_optimize_lora_parameters from .layers import unfuse_lora_layer diff --git a/chatlearn/models/megatron/lora/initializer.py b/chatlearn/models/megatron/lora/initializer.py index 8d07c6a9..ed9b836b 100644 --- a/chatlearn/models/megatron/lora/initializer.py +++ b/chatlearn/models/megatron/lora/initializer.py @@ -1,4 +1,4 @@ -# Copyright 2023 Alibaba Group Holding Limited. All Rights Reserved. +# Copyright 2024 Alibaba Group Holding Limited. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. diff --git a/chatlearn/models/megatron/lora/layers.py b/chatlearn/models/megatron/lora/layers.py index a155dc87..9e12d455 100644 --- a/chatlearn/models/megatron/lora/layers.py +++ b/chatlearn/models/megatron/lora/layers.py @@ -1,4 +1,4 @@ -# Copyright 2023 Alibaba Group Holding Limited. All Rights Reserved. +# Copyright 2024 Alibaba Group Holding Limited. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. @@ -15,15 +15,11 @@ """lora layers.""" import math -import os -import warnings -from typing import Optional import importlib.util import torch import torch.nn.functional as F from torch import nn -from torch.cuda.amp import custom_fwd, custom_bwd from torch.nn import Embedding from torch.nn.parallel.distributed import DistributedDataParallel as torchDDP @@ -32,256 +28,37 @@ from chatlearn.utils.arguments import LoraConfig from chatlearn.utils.constant import LORA_WEIGHT_PREFIX from chatlearn.utils.constant import QKV_LAYER_NAME -from chatlearn.utils.global_vars import get_args as get_rlhf_args +from chatlearn.utils.global_vars import get_args as get_runtime_args from chatlearn.utils.global_vars import is_initialized megatron_exist = importlib.util.find_spec("megatron") if megatron_exist: - from megatron import get_args - from megatron.core import mpu - from megatron.model import Float16Module - from megatron.optimizer.optimizer import MegatronOptimizer - from megatron.utils import unwrap_model - from megatron.core.parallel_state import ( + from chatlearn.utils.megatron_import_helper import get_args + from chatlearn.utils.megatron_import_helper import mpu + from chatlearn.utils.megatron_import_helper import Float16Module + from chatlearn.utils.megatron_import_helper import MegatronOptimizer + from chatlearn.utils.megatron_import_helper import unwrap_model + from chatlearn.utils.megatron_import_helper import ( get_tensor_model_parallel_rank, - get_tensor_model_parallel_world_size, - get_tensor_model_parallel_group, - get_global_memory_buffer + get_tensor_model_parallel_world_size ) - from megatron.core.tensor_parallel.layers import ( # pylint: disable=unused-import + from chatlearn.utils.megatron_import_helper import ( # pylint: disable=unused-import ColumnParallelLinear, + linear_with_frozen_weight, + linear_with_grad_accumulation_and_async_allreduce, LinearWithGradAccumulationAndAsyncCommunication, RowParallelLinear, VocabParallelEmbedding ) - from megatron.core.tensor_parallel.mappings import ( + from chatlearn.utils.megatron_import_helper import ( copy_to_tensor_model_parallel_region, gather_from_tensor_model_parallel_region, reduce_from_tensor_model_parallel_region, scatter_to_tensor_model_parallel_region, reduce_scatter_to_sequence_parallel_region ) - from megatron.core.tensor_parallel.utils import VocabUtility - -_grad_accum_fusion_available = True -try: - import fused_weight_gradient_mlp_cuda -except ImportError: - _grad_accum_fusion_available = False - - -class LinearWithGradAccumulationAndAsyncCommunication_LoRA(torch.autograd.Function): # pylint: disable=abstract-method - """See linear_with_grad_accumulation_and_async_allreduce_LoRA""" - - @staticmethod - @custom_fwd - def forward(ctx, inputs, weight, bias, gradient_accumulation_fusion, # pylint: disable=arguments-differ - async_grad_allreduce, sequence_parallel): - ctx.save_for_backward(inputs, weight) - ctx.use_bias = bias is not None - ctx.gradient_accumulation_fusion = gradient_accumulation_fusion - ctx.async_grad_allreduce = async_grad_allreduce - ctx.sequence_parallel = sequence_parallel - - if sequence_parallel: - world_size = get_tensor_model_parallel_world_size() - dim_size = list(inputs.size()) - dim_size[0] = dim_size[0] * world_size - - all_gather_buffer = \ - get_global_memory_buffer().get_tensor(dim_size, inputs.dtype, "mpu") - torch.distributed._all_gather_base( - all_gather_buffer, - inputs, - group=get_tensor_model_parallel_group()) - total_input = all_gather_buffer - else: - total_input = inputs - - output = torch.matmul(total_input, weight.t()) - if bias is not None: - output = output + bias - return output - - @staticmethod - @custom_bwd - def backward(ctx, grad_output): # pylint: disable=arguments-differ - inputs, weight = ctx.saved_tensors - use_bias = ctx.use_bias - - if ctx.sequence_parallel: - world_size = get_tensor_model_parallel_world_size() - dim_size = list(inputs.size()) - dim_size[0] = dim_size[0] * world_size - - all_gather_buffer = \ - get_global_memory_buffer().get_tensor(dim_size, inputs.dtype, "mpu") - handle = torch.distributed._all_gather_base( - all_gather_buffer, - inputs, - group=get_tensor_model_parallel_group(), async_op=True) - - # Here we rely on CUDA_DEVICE_MAX_CONNECTIONS=1 to ensure that the - # gather is scheduled before the input gradient computation - total_input = all_gather_buffer - else: - total_input = inputs - grad_input = grad_output.matmul(weight) - - if ctx.sequence_parallel: - handle.wait() - - # Doing gather + slicing during the NeMo forward pass can make this tensor - # not be contiguous. PyTorch only checks if the tensor is contiguous, and only - # clones it if it's not contiguous: - # https://github.com/pytorch/pytorch/blob/c47cf9bc7f9e02f649ab4ed53fe4d35732c92ab6/torch/_refs/__init__.py#L2761 - grad_output = grad_output.contiguous() - # Convert the tensor shapes to 2D for execution compatibility - grad_output = grad_output.view(grad_output.shape[0] * grad_output.shape[1], - grad_output.shape[2]) - total_input = total_input.view(total_input.shape[0] * total_input.shape[1], - total_input.shape[2]) - - if ctx.async_grad_allreduce: - # Asynchronous all-reduce - handle = torch.distributed.all_reduce( - grad_input, group=get_tensor_model_parallel_group(), async_op=True) - # Here we rely on CUDA_DEVICE_MAX_CONNECTIONS=1 to ensure that the - # all-reduce is scheduled before the weight gradient computation - - if ctx.sequence_parallel: - assert not ctx.async_grad_allreduce - dim_size = list(inputs.size()) - sub_grad_input = torch.empty(dim_size, dtype=inputs.dtype, - device=torch.cuda.current_device(), - requires_grad=False) - # reduce_scatter - handle = torch.distributed._reduce_scatter_base(sub_grad_input, grad_input, - group=get_tensor_model_parallel_group(), - async_op=True) - # Here we rely on CUDA_DEVICE_MAX_CONNECTIONS=1 to ensure that the - # reduce scatter is scheduled before the weight gradient computation - - if weight.requires_grad: - if ctx.gradient_accumulation_fusion: - if weight.main_grad.dtype == torch.float32: - fused_weight_gradient_mlp_cuda.wgrad_gemm_accum_fp32(total_input, grad_output, # pylint: disable=c-extension-no-member - weight.main_grad) # pylint: disable=c-extension-no-member - elif weight.main_grad.dtype == torch.float16: - fused_weight_gradient_mlp_cuda.wgrad_gemm_accum_fp16(total_input, grad_output, # pylint: disable=c-extension-no-member - weight.main_grad) # pylint: disable=c-extension-no-member - else: - raise RuntimeError("Unsupported gradient type for gradient accumulation fusion") - grad_weight = None - else: - grad_weight = grad_output.t().matmul(total_input) - else: - grad_weight = None - grad_bias = grad_output.sum(dim=0) if use_bias else None - - if ctx.sequence_parallel: - handle.wait() - return sub_grad_input, grad_weight, grad_bias, None, None, None - - if ctx.async_grad_allreduce: - handle.wait() - - return grad_input, grad_weight, grad_bias, None, None, None - - -def linear_with_grad_accumulation_and_async_allreduce_LoRA( - input: torch.Tensor, # pylint: disable=redefined-builtin - weight: torch.Tensor, - bias: Optional[torch.Tensor], - gradient_accumulation_fusion: bool, - async_grad_allreduce: bool, - sequence_parallel_enabled: bool, -) -> torch.Tensor: - """Linear layer execution with asynchronous communication and - gradient accumulation fusion in backprop. - - This has the option to accumulate the result of backprop - calculation into an existing gradient buffer, preventing the need - to do an additional addition kernel after the gradient - calculation. - - Additionally, the tensor parallel all reduce of the input - gradients can be done asynchronously with the calculation of - the weight gradients. - - In the case of sequence parallelism, the reduce scatter of the - input gradients is done asynchronously with the calcluation of the - weight gradients. - - Use of this module requires that the environment variable - CUDA_DEVICE_MAX_CONNECTIONS=1. There are a few collective - operations, noted in the code, that should be scheduled before - compute kernels to overlap the communication with the computation, - which is necessary for a speedup but not for correctness so that - ordering isn't imposed by the scheduler. Setting - CUDA_DEVICE_MAX_CONNECTIONS=1 forces the kernels to be scheduled - in the order they are called. - - Arguments: - - input (torch.Tensor required): input like torch.nn.functional.linear - - weight (torch.Tensor required): weight like torch.nn.functional.linear - - bias (torch.Tensor optional): bias like torch.nn.functional.linear - - gradient_accumulation_fusion (bool required): Perform the gradient - accumulation fusion, requires the custom CUDA extension - fused_weight_gradient_mlp_cuda module. To use - gradient_accumulation_fusion you must install APEX with - --cpp_ext and --cuda_ext. For example: "pip install - --global-option=\"--cpp_ext\" --global-option=\"--cuda_ext .\" - " Note that the extension requires CUDA>=11. Otherwise, you - must turn off gradient accumulation fusion." - - async_grad_allreduce (bool required): Do the allreduce of input - gradients asyncronously with the computation of weight - gradients. If sequence_parallel_enabled is True, this must be - False, as no all reduce is performed. - - sequence_parallel_enabled (bool required): Indicates that sequence - parallelism is used and thus in the forward pass the input is - all gathered, and the backward pass the input gradients are - reduce scattered. - """ - args = [ - input, - weight, - bias, - gradient_accumulation_fusion, - async_grad_allreduce, - sequence_parallel_enabled, - ] - - if not linear_with_grad_accumulation_and_async_allreduce_LoRA.warned: - if os.environ.get('CUDA_DEVICE_MAX_CONNECTIONS') != "1": - if sequence_parallel_enabled: - warnings.warn( - "When using sequence parallelism it is recommended to set the " - "environment variable CUDA_DEVICE_MAX_CONNECTIONS to 1 for " - "maximum speedup") - linear_with_grad_accumulation_and_async_allreduce_LoRA.warned = True - - if async_grad_allreduce: - warnings.warn( - "When using async grad allreduce it is recommended to set the " - "environment variable CUDA_DEVICE_MAX_CONNECTIONS to 1 for " - "maximum speedup") - linear_with_grad_accumulation_and_async_allreduce_LoRA.warned = True - - return LinearWithGradAccumulationAndAsyncCommunication_LoRA.apply(*args) - - -if megatron_exist: - linear_with_grad_accumulation_and_async_allreduce_LoRA.warned = False - linear_with_grad_accumulation_and_async_allreduce = linear_with_grad_accumulation_and_async_allreduce_LoRA - LinearWithGradAccumulationAndAsyncCommunication.backward = LinearWithGradAccumulationAndAsyncCommunication_LoRA.backward + from chatlearn.utils.megatron_import_helper import VocabUtility class LoraBase(torch.nn.Module): # pylint: disable=abstract-method @@ -397,29 +174,29 @@ def forward(self, input_): else: input_parallel = copy_to_tensor_model_parallel_region(input_) # Matrix multiply. - output_parallel = linear_with_grad_accumulation_and_async_allreduce_LoRA( + output_parallel = linear_with_frozen_weight( input=input_parallel, weight=self.weight, bias=bias, gradient_accumulation_fusion=self.gradient_accumulation_fusion, async_grad_allreduce=self.async_tensor_model_parallel_allreduce, - sequence_parallel_enabled=self.sequence_parallel, + sequence_parallel=self.sequence_parallel, ) - residual = linear_with_grad_accumulation_and_async_allreduce_LoRA( + residual = linear_with_grad_accumulation_and_async_allreduce( input=input_parallel, weight=self.lora_right_weight, bias=None, gradient_accumulation_fusion=self.gradient_accumulation_fusion, async_grad_allreduce=self.async_tensor_model_parallel_allreduce, - sequence_parallel_enabled=self.sequence_parallel, + sequence_parallel=self.sequence_parallel, ) - residual = linear_with_grad_accumulation_and_async_allreduce_LoRA( + residual = linear_with_grad_accumulation_and_async_allreduce( input=residual, weight=self.lora_left_weight, bias=None, gradient_accumulation_fusion=self.gradient_accumulation_fusion, async_grad_allreduce=self.async_tensor_model_parallel_allreduce, - sequence_parallel_enabled=False, + sequence_parallel=False, ) residual = self.lora_dropout(residual) output_parallel = output_parallel + self.lora_scaling * residual @@ -525,29 +302,29 @@ def forward(self, input_): assert not self.sequence_parallel input_parallel = scatter_to_tensor_model_parallel_region(input_) # Matrix multiply. - output_parallel = linear_with_grad_accumulation_and_async_allreduce_LoRA( + output_parallel = linear_with_frozen_weight( input=input_parallel, weight=self.weight, bias=None, gradient_accumulation_fusion=self.gradient_accumulation_fusion, async_grad_allreduce=False, - sequence_parallel_enabled=False, + sequence_parallel=False, ) - residual = linear_with_grad_accumulation_and_async_allreduce_LoRA( + residual = linear_with_grad_accumulation_and_async_allreduce( input=input_parallel, weight=self.lora_right_weight, bias=None, gradient_accumulation_fusion=self.gradient_accumulation_fusion, async_grad_allreduce=False, - sequence_parallel_enabled=False, + sequence_parallel=False, ) - residual = linear_with_grad_accumulation_and_async_allreduce_LoRA( + residual = linear_with_grad_accumulation_and_async_allreduce( input=residual, weight=self.lora_left_weight, bias=None, gradient_accumulation_fusion=self.gradient_accumulation_fusion, async_grad_allreduce=False, - sequence_parallel_enabled=False, + sequence_parallel=False, ) residual = self.lora_dropout(residual) @@ -880,7 +657,7 @@ def allreduce_word_embedding_grads(self, args): unwrapped_model = self.models[0] if hasattr(unwrapped_model, "share_word_embeddings"): - from megatron.model import DistributedDataParallel as LocalDDP # pylint: disable=import-outside-toplevel + from chatlearn.utils.megatron_import_helper import DistributedDataParallel as LocalDDP # pylint: disable=import-outside-toplevel unwrapped_model = unwrap_model( unwrapped_model, (torchDDP, LocalDDP, Float16Module)) if unwrapped_model.share_word_embeddings: @@ -926,7 +703,7 @@ def convert_layer_to_lora(model, lora_layer=None, column_only_qkv=None): if is_initialized(): - default_args = get_rlhf_args().active_module_args.lora + default_args = get_runtime_args().active_module_args.lora else: default_args = LoraConfig diff --git a/chatlearn/models/megatron/lora/utils.py b/chatlearn/models/megatron/lora/utils.py index dc489a93..afb4eb36 100644 --- a/chatlearn/models/megatron/lora/utils.py +++ b/chatlearn/models/megatron/lora/utils.py @@ -1,4 +1,4 @@ -# Copyright 2023 Alibaba Group Holding Limited. All Rights Reserved. +# Copyright 2024 Alibaba Group Holding Limited. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. diff --git a/chatlearn/models/megatron/memory_manager/__init__.py b/chatlearn/models/megatron/memory_manager/__init__.py new file mode 100644 index 00000000..380bf9c0 --- /dev/null +++ b/chatlearn/models/megatron/memory_manager/__init__.py @@ -0,0 +1,18 @@ +# Copyright 2024 Alibaba Group Holding Limited. All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# ============================================================================== +"""Memery manager for Megatron modules which provides utilities to free memory when unused.""" + +from chatlearn.models.megatron.memory_manager.base_trainer import create_trainer_memory_manager +from chatlearn.models.megatron.memory_manager.inference import InferenceMemoryManager diff --git a/chatlearn/models/megatron/memory_manager/base.py b/chatlearn/models/megatron/memory_manager/base.py new file mode 100644 index 00000000..c2a66c46 --- /dev/null +++ b/chatlearn/models/megatron/memory_manager/base.py @@ -0,0 +1,46 @@ +# Copyright 2024 Alibaba Group Holding Limited. All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# ============================================================================== +"""Base class for memory managers, and common utilities.""" +import gc + +import torch + +from chatlearn.utils.timer import Timers, _Timer + + +class BaseMemoryManager: + """ + Base class of memory managers for Megatron modules which provides utilities to free memory when unused. + """ + + def __init__(self, model, model_name, timers): + self._model = model + self._model_name = model_name + self._timers = timers + + def _wrap_method(self, func, timers: Timers): + def inner(*args, **kwargs): + torch.cuda.synchronize() + torch.cuda.empty_cache() + timer: _Timer = timers(f'{self._model_name}_free_memory') + if not timer.started_: + timer.start() + func(*args, **kwargs) + torch.cuda.synchronize() + torch.cuda.empty_cache() + gc.collect() + timer.stop() + + return inner diff --git a/chatlearn/models/megatron/memory_manager/base_trainer.py b/chatlearn/models/megatron/memory_manager/base_trainer.py new file mode 100644 index 00000000..2f5f1df8 --- /dev/null +++ b/chatlearn/models/megatron/memory_manager/base_trainer.py @@ -0,0 +1,217 @@ +# Copyright 2024 Alibaba Group Holding Limited. All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# ============================================================================== +"""Base class and creator function for trainer memory managers.""" + +from abc import ABC, abstractmethod +from typing import List, Optional + +import torch + +from chatlearn.models.megatron.memory_manager.base import BaseMemoryManager +from chatlearn.utils.flat_tensors import BucketizedFlatTensors +from chatlearn.utils.logger import log_rank_0 +from chatlearn.utils.megatron_import_memory_helper import MegatronVersion, get_megatron_version +from chatlearn.utils.megatron_import_helper import ( + DistributedDataParallel, + MixedPrecisionOptimizer, + DistributedOptimizer, + Float16OptimizerWithFloat16Params, +) + + +def create_trainer_memory_manager( + model, + optimizer, + use_distributed_optimizer, + accumulate_allreduce_grads_in_fp32, + params_dtype, + model_name, + timers, + bucket_size_mb=0, +) -> 'BaseTrainerMemoryManager': + """ + Create a trainer memory manager based on megatron version. + """ + version = get_megatron_version() + if version in [MegatronVersion.V1, MegatronVersion.V2]: + # pylint: disable-next=import-outside-toplevel + from chatlearn.models.megatron.memory_manager.trainer_v1v2 import TrainerMemoryManagerV1V2 + + cls = TrainerMemoryManagerV1V2 + elif version in [MegatronVersion.V3]: + # pylint: disable-next=import-outside-toplevel + from chatlearn.models.megatron.memory_manager.trainer_v3 import TrainerMemoryManagerV3 + + cls = TrainerMemoryManagerV3 + else: + raise ValueError(f'Unsupported version of Megatron for trainer memory manager: {version}') + + return cls( + model, + optimizer, + use_distributed_optimizer, + accumulate_allreduce_grads_in_fp32, + params_dtype, + model_name, + timers, + bucket_size_mb, + ) + + +class BaseTrainerMemoryManager(BaseMemoryManager, ABC): + """ + Base class for Megatron trainer memory managers, which provides common routines for all versions, such as + optimizer states offloading, and main weights offloading. + """ + + def __init__( + self, + model, + optimizer, + use_distributed_optimizer, + accumulate_allreduce_grads_in_fp32, + params_dtype, + model_name, + timers, + bucket_size_mb=0, + ): + super().__init__(model, model_name, timers) + self._optimizer = optimizer + self._accumulate_allreduce_grads_in_fp32 = accumulate_allreduce_grads_in_fp32 + self._params_dtype = params_dtype + self._use_distributed_optimizer = use_distributed_optimizer + self._bucket_size_mb = bucket_size_mb + + assert isinstance( + model, (DistributedDataParallel,) + ), f'Only support model type DistributedDataParallel, current type is {str(type(model))}.' + assert isinstance( + optimizer, (MixedPrecisionOptimizer,) + ), f'Only support optimizer type MixedPrecisionOptimizer and its subclasses, current type is {str(type(optimizer))}.' + + # sanity check + if self._use_distributed_optimizer: + assert isinstance(optimizer, DistributedOptimizer) + else: + log_rank_0('Current optimizer is Float16OptimizerWithFloat16Params') + assert isinstance(optimizer, Float16OptimizerWithFloat16Params) + + self._main_weights_offloaded = False + self._group_flat_main_weights: Optional[List[BucketizedFlatTensors]] = None + + self._megatron_version = get_megatron_version() + + funcs = [ + self.offload_optimizer_states, + self.onload_optimizer_states, + self.offload_weights, + self.onload_weights, + self.offload_main_weights, + self.onload_main_weights, + self.free_grad_buffers, + self.build_grad_buffers, + ] + for func in funcs: + func_name = func.__name__ + setattr(self, func_name, self._wrap_method(func, timers)) + + def _optimizer_load_state_bucket_into_device(self, device): + """put the state bucket onto a device""" + state_dict = self._optimizer.optimizer.state_dict() + for tensors in state_dict['state'].values(): + keys = list(tensors.keys()) + for key in keys: + tensors[key] = tensors[key].to(device=device, non_blocking=True) + # make sure the loading is finished before returning + torch.cuda.synchronize() + + def offload_optimizer_states(self): + """ + offload optimizer states + """ + self._optimizer_load_state_bucket_into_device(device='cpu') + + def onload_optimizer_states(self): + """ + onload optimizer states + """ + self._optimizer_load_state_bucket_into_device(device=torch.cuda.current_device()) + + def _flat_param_groups(self, multi_groups: List[List[List[torch.Tensor]]]): + """ + Flatten parameters in param groups. + """ + return [ + BucketizedFlatTensors(group, primary_store_device='cpu', bucket_size_mb=self._bucket_size_mb) + for groups in multi_groups + for group in groups + ] + + def offload_main_weights(self): + """ + offload main weights + """ + if self._main_weights_offloaded: + log_rank_0('Call offload_main_weights when already offloaded. Ignore it.') + return + + if self._group_flat_main_weights is None: + if self._use_distributed_optimizer: + self._group_flat_main_weights = self._flat_param_groups( + [self._optimizer.shard_fp32_from_float16_groups] + ) + else: + self._group_flat_main_weights = self._flat_param_groups([self._optimizer.fp32_from_float16_groups]) + + for flat_main_weights in self._group_flat_main_weights: + flat_main_weights.copy_to_primary_store() + + self._main_weights_offloaded = True + + def onload_main_weights(self): + """ + onload weights and allocate grads + """ + if not self._main_weights_offloaded: + log_rank_0('Call onload_main_weights when already onloaded. Ignore it.') + return + + for flat_main_weights in self._group_flat_main_weights: + flat_main_weights.copy_to_gpu_buffer() + self._main_weights_offloaded = False + + @abstractmethod + def offload_weights(self): + """ + offload weights + """ + + @abstractmethod + def onload_weights(self): + """ + onload weights + """ + + @abstractmethod + def free_grad_buffers(self): + """ + free grad buffers and related tensors + """ + + @abstractmethod + def build_grad_buffers(self): + """ + build grad buffers and related tensors + """ diff --git a/chatlearn/models/megatron/memory_manager/inference.py b/chatlearn/models/megatron/memory_manager/inference.py new file mode 100644 index 00000000..17506cf4 --- /dev/null +++ b/chatlearn/models/megatron/memory_manager/inference.py @@ -0,0 +1,83 @@ +# Copyright 2024 Alibaba Group Holding Limited. All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# ============================================================================== +"""Inference Memery manager for Megatron.""" +from typing import Optional, List + +from chatlearn.models.megatron.memory_manager.base import BaseMemoryManager +from chatlearn.utils.flat_tensors import BucketizedFlatTensors +from chatlearn.utils.logger import log_rank_0 +from chatlearn.utils.megatron_import_helper import DistributedDataParallel + + +class InferenceMemoryManager(BaseMemoryManager): + """ + Memory manager for Megatron inference modules which provides utilities to free memory when unused. + """ + + def __init__(self, model, model_name, timers, bucket_size_mb=0): + super().__init__(model, model_name, timers) + + assert not isinstance( + model, (DistributedDataParallel,) + ), f'Only support model type non-DistributedDataParallel, current type is {str(type(model))}.' + + self._weights_offloaded = False + self._group_flat_weights: Optional[List[BucketizedFlatTensors]] = None + self._bucket_size_mb = bucket_size_mb + + funcs = [self.offload_weights, self.onload_weights] + for func in funcs: + func_name = func.__name__ + setattr(self, func_name, self._wrap_method(func, timers)) + + def offload_weights(self): + """ + offload weights + """ + if self._weights_offloaded: + log_rank_0('Call offload_weights when already offloaded. Ignore it.') + return + + if self._group_flat_weights is None: + dtype_to_params = {} + for p in self._model.parameters(): + dtype = p.dtype + if dtype not in dtype_to_params: + dtype_to_params[dtype] = [] + dtype_to_params[dtype].append(p) + + self._group_flat_weights = [] + for params in dtype_to_params.values(): + self._group_flat_weights.append( + BucketizedFlatTensors(params, primary_store_device='cpu', bucket_size_mb=self._bucket_size_mb) + ) + + for flat_weights in self._group_flat_weights: + flat_weights.copy_to_primary_store() + + self._weights_offloaded = True + + def onload_weights(self): + """ + onload weights + """ + if not self._weights_offloaded: + log_rank_0('Call onload_weights when already onloaded. Ignore it.') + return + + for flat_weights in self._group_flat_weights: + flat_weights.copy_to_gpu_buffer() + + self._weights_offloaded = False diff --git a/chatlearn/models/megatron/memory_manager/trainer_v1v2.py b/chatlearn/models/megatron/memory_manager/trainer_v1v2.py new file mode 100644 index 00000000..ff14737f --- /dev/null +++ b/chatlearn/models/megatron/memory_manager/trainer_v1v2.py @@ -0,0 +1,320 @@ +# Copyright 2024 Alibaba Group Holding Limited. All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# ============================================================================== +"""Trainer Memery manager for Megatron V1 and V2""" +from chatlearn.utils.megatron_import_memory_helper import MegatronVersion, check_megatron_versions + +check_megatron_versions([MegatronVersion.V1, MegatronVersion.V2]) + +# pylint: disable=wrong-import-position,wrong-import-order,ungrouped-imports +from typing import List, Optional + +import torch + +from chatlearn.models.megatron.memory_manager.base_trainer import BaseTrainerMemoryManager +from chatlearn.utils.flat_tensors import BucketizedFlatTensors +from chatlearn.utils.logger import log_rank_0 +from chatlearn.utils.megatron_import_helper import tensor_parallel + +# pylint: enable=wrong-import-position,wrong-import-order,ungrouped-imports + +__all__ = ['TrainerMemoryManagerV1V2'] + + +class TrainerMemoryManagerV1V2(BaseTrainerMemoryManager): + """ + Memory manager for Megatron V1 and V2 trainer modules. + """ + + def __init__( + self, + model, + optimizer, + use_distributed_optimizer, + accumulate_allreduce_grads_in_fp32, + params_dtype, + model_name, + timers, + bucket_size_mb=0, + ): + super().__init__( + model, + optimizer, + use_distributed_optimizer, + accumulate_allreduce_grads_in_fp32, + params_dtype, + model_name, + timers, + bucket_size_mb, + ) + self._weights_offloaded = False + self._grad_buffers_freed = False + + self._grad_dtype_to_params = self._get_grad_dtype_to_params(model, accumulate_allreduce_grads_in_fp32) + + self._group_flat_weights: Optional[List[BucketizedFlatTensors]] = None + self._grad_buffers_numels = None + self._grad_buffers_bucket_sizes = None + + def get_grad_buffers(self): + if self._megatron_version == MegatronVersion.V2: + return self._model.grad_buffers + elif self._megatron_version == MegatronVersion.V1: + return self._model._grad_buffers + + @staticmethod + def _get_grad_dtype_to_params(model, accumulate_allreduce_grads_in_fp32): + # Group parameters by their gradient type. + grad_dtype_to_params = {} + for _, param in model.module.named_parameters(): + if param.requires_grad and getattr(param, 'allreduce', True): + param.grad_added_to_main_grad = False + dtype = torch.float if accumulate_allreduce_grads_in_fp32 else param.dtype + params = grad_dtype_to_params.get(dtype, []) + params.append(param) + grad_dtype_to_params[dtype] = params + return grad_dtype_to_params + + def offload_weights(self): + """ + offload weights + """ + if self._weights_offloaded: + log_rank_0('Call offload_weights when already offloaded. Ignore it.') + return + + optimizer = self._optimizer + + if self._use_distributed_optimizer: + optimizer.shard_float16_groups.clear() + optimizer.shard_fp32_groups.clear() + + if self._group_flat_weights is None: + if self._use_distributed_optimizer: + self._group_flat_weights = self._flat_param_groups( + [ + optimizer.model_float16_groups, + optimizer.model_fp32_groups, + ], + ) + else: + self._group_flat_weights = self._flat_param_groups( + [ + optimizer.float16_groups, + optimizer.fp32_from_fp32_groups, + ], + ) + + for flat_weights in self._group_flat_weights: + flat_weights.copy_to_primary_store() + + self._model.grad_accs.clear() + + self._weights_offloaded = True + + def onload_weights(self): + """ + onload weights + """ + if not self._weights_offloaded: + log_rank_0('Call onload_weights when already onloaded. Ignore it.') + return + + optimizer = self._optimizer + + for flat_weights in self._group_flat_weights: + flat_weights.copy_to_gpu_buffer() + + model = self._model + # Re-register grad acc hooks, see Megatron DistributedDataParallel#__init__. + model.grad_accs = [] + for param in model.module.parameters(): + if param.requires_grad: + # Expand so we get access to grad_fn. + param_tmp = param.expand_as(param) + # Get the gradient accumulator function. + grad_acc = param_tmp.grad_fn.next_functions[0][0] + if self._megatron_version == MegatronVersion.V2: + grad_acc.register_hook(model._make_param_hook(param, model.param_to_grad_buffer)) + elif self._megatron_version == MegatronVersion.V1: + grad_acc.register_hook(model._make_param_hook(param)) + model.grad_accs.append(grad_acc) + + if not self._use_distributed_optimizer: + self._weights_offloaded = False + return + + shard_float16_groups = optimizer.shard_float16_groups + shard_fp32_groups = optimizer.shard_fp32_groups + param_gbuf_map = optimizer.model_param_gbuf_map + opt_group_ranges = optimizer.opt_group_ranges + model_gbuf_ranges = optimizer.model_gbuf_ranges + + # Rebuild shard_float16_groups and shard_fp32_groups, + # see Megatron DistributedOptimizer#build_model_and_main_param_groups. + for _, group_range in enumerate(opt_group_ranges): + shard_float16_params_this_group = [] + shard_fp32_params_this_group = [] + shard_float16_groups.append(shard_float16_params_this_group) + shard_fp32_groups.append(shard_fp32_params_this_group) + + for model_param in group_range["params"]: + assert model_param.requires_grad + if self._megatron_version == MegatronVersion.V2: + model_index, dtype, bucket_index = param_gbuf_map[model_param] + gbuf_range = model_gbuf_ranges[model_index][dtype][bucket_index] + param_range = gbuf_range["param_map"][model_param]["param"] + elif self._megatron_version == MegatronVersion.V1: + model_index, dtype = param_gbuf_map[model_param] + gbuf_range = model_gbuf_ranges[model_index][dtype] + param_range = gbuf_range["param_map"][model_param]["param"] + + # fp16, bf16 params. + if model_param.type() in ['torch.cuda.HalfTensor', 'torch.cuda.BFloat16Tensor']: + shard_model_param = model_param.detach().view(-1)[param_range.start : param_range.end] + tensor_parallel.copy_tensor_model_parallel_attributes(shard_model_param, model_param) + if hasattr(model_param, 'shared'): + shard_model_param.shared = model_param.shared + + shard_float16_params_this_group.append(shard_model_param) + + # fp32 params. + elif model_param.type() == 'torch.cuda.FloatTensor': + shard_model_param = model_param.view(-1)[param_range.start : param_range.end] + shard_fp32_params_this_group.append(shard_model_param) + tensor_parallel.copy_tensor_model_parallel_attributes(shard_model_param, model_param) + if hasattr(model_param, 'shared'): + shard_model_param.shared = model_param.shared + else: + raise TypeError( + 'Wrapped parameters must be one of ' + 'torch.cuda.FloatTensor, ' + 'torch.cuda.HalfTensor, or ' + 'torch.cuda.BFloat16Tensor. ' + 'Received {}'.format(model_param.type()) + ) + + self._weights_offloaded = False + + def free_grad_buffers(self): + """ + free grad buffers and related tensors + """ + if self._grad_buffers_freed: + log_rank_0('Call free_grad_buffers when already freed. Ignore it.') + return + + optimizer = self._optimizer + grad_dtype_to_params = self._grad_dtype_to_params + + # This is necessary, but don't know why. + optimizer.zero_grad(True) + + if self._use_distributed_optimizer: + # Release param_buffers because they share storage with grad_buffers. + # Note: param_buffers are only available in DistributedOptimizer. + optimizer.param_buffers.clear() + + # Release grad_buffers, including buckets in GradBuffer for newer Megatron version. + # Release `main_grad` of parameters. + self._grad_buffers_numels = {} + self._grad_buffers_bucket_sizes = {} + + for dtype, buffer in self.get_grad_buffers().items(): + for p in grad_dtype_to_params[dtype]: + del p.main_grad + + self._grad_buffers_numels[dtype] = buffer.numel_padded + + if self._megatron_version == MegatronVersion.V2: + bucket_sizes = [] + for bucket in buffer.buckets: + bucket_sizes.append(bucket.data.numel()) + bucket.data = None + self._grad_buffers_bucket_sizes[dtype] = bucket_sizes + + buffer.data = None + + self._grad_buffers_freed = True + + def build_grad_buffers(self): + """ + build grad buffers and related tensors + """ + if not self._grad_buffers_freed: + log_rank_0('Call build_grad_buffers when already built. Ignore it.') + return + + optimizer = self._optimizer + params_dtype = self._params_dtype + grad_dtype_to_params = self._grad_dtype_to_params + + # Re-allocate data of grad_buffers, including data of buckets, see Megatron DistributedDataParallel#__init__. + # Also set `main_grad` for parameters. + for dtype, buffer in self.get_grad_buffers().items(): + numel_padded = self._grad_buffers_numels[dtype] + buffer.data = torch.zeros( + numel_padded, + dtype=dtype, + device=torch.cuda.current_device(), + requires_grad=False, + ) + + if self._megatron_version == MegatronVersion.V2: + for bucket, numel in zip(buffer.buckets, self._grad_buffers_bucket_sizes[dtype]): + bucket.data = buffer.get(torch.Size([numel]), bucket.offset) + + params = grad_dtype_to_params[dtype] + data_start_index = 0 + for param in params[::-1]: + if not param.requires_grad: + continue + this_numel = param.data.nelement() + data_end_index = data_start_index + this_numel + param.main_grad = buffer.get(param.data.shape, data_start_index) + data_start_index = data_end_index + + if not self._use_distributed_optimizer: + self._grad_buffers_freed = False + return + + # Re-allocate param_buffers, see Megatron DistributedOptimizer#__init__. + optimizer.param_buffers = [] + for _, _ in enumerate(optimizer.models): + current_param_buffers = {} + for dtype, grad_buffer in self.get_grad_buffers().items(): + current_param_buffers[dtype] = [] + if self._megatron_version == MegatronVersion.V2: + for bucket in grad_buffer.buckets: + try: + storage = bucket.data.storage()._untyped() + # pylint: disable-next=bare-except + except: + storage = bucket.data.storage().untyped() + + param_buffer = torch.tensor([], dtype=params_dtype, device=bucket.data.device).set_(storage) + param_buffer = param_buffer[bucket.offset : bucket.offset + bucket.data.numel()] + current_param_buffers[dtype].append(param_buffer) + elif self._megatron_version == MegatronVersion.V1: + try: + storage = grad_buffer.data.storage()._untyped() + # pylint: disable-next=bare-except + except: + storage = grad_buffer.data.storage().untyped() + param_buffer = torch.tensor([], dtype=params_dtype, device=grad_buffer.data.device).set_(storage) + param_buffer = param_buffer[: grad_buffer.numel_padded] + current_param_buffers[dtype] = param_buffer + optimizer.param_buffers.append(current_param_buffers) + + self._grad_buffers_freed = False diff --git a/chatlearn/models/megatron/memory_manager/trainer_v3.py b/chatlearn/models/megatron/memory_manager/trainer_v3.py new file mode 100644 index 00000000..25268e17 --- /dev/null +++ b/chatlearn/models/megatron/memory_manager/trainer_v3.py @@ -0,0 +1,283 @@ +# Copyright 2024 Alibaba Group Holding Limited. All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# ============================================================================== +"""Trainer Memery manager for Megatron V3""" +from chatlearn.utils.megatron_import_memory_helper import MegatronVersion, check_megatron_versions + +check_megatron_versions([MegatronVersion.V3]) + +# pylint: disable=wrong-import-position,wrong-import-order,ungrouped-imports +from typing import List, Optional + +import torch + +from chatlearn.models.megatron.memory_manager.base_trainer import BaseTrainerMemoryManager +from chatlearn.utils.flat_tensors import BucketizedFlatTensors, FlatTensors +from chatlearn.utils.logger import log_rank_0 +from chatlearn.utils.megatron_import_helper import tensor_parallel +from chatlearn.utils.megatron_import_memory_helper import BufferType + +# pylint: enable=wrong-import-position,wrong-import-order,ungrouped-imports + +__all__ = ['TrainerMemoryManagerV3'] + + +class TrainerMemoryManagerV3(BaseTrainerMemoryManager): + """ + Memory manager for Megatron V3 trainer modules. + """ + + def __init__( + self, + model, + optimizer, + use_distributed_optimizer, + accumulate_allreduce_grads_in_fp32, + params_dtype, + model_name, + timers, + bucket_size_mb=0, + ): + super().__init__( + model, + optimizer, + use_distributed_optimizer, + accumulate_allreduce_grads_in_fp32, + params_dtype, + model_name, + timers, + bucket_size_mb, + ) + self._weights_offloaded = False + self._grad_buffers_freed = False + + self._buffers = self._get_buffers(model) + + self._group_flat_weights: Optional[List[BucketizedFlatTensors]] = None + + @staticmethod + def _get_buffers(model): + processed_buffers = set() + buffers = [] + for _, buffer in model.param_to_buffer.items(): + if buffer not in processed_buffers: + processed_buffers = set() + processed_buffers.add(buffer) + buffers.append(buffer) + return buffers + + def offload_weights(self): + """ + offload weights + """ + if self._weights_offloaded: + log_rank_0('Call offload_weights when already offloaded. Ignore it.') + return + + optimizer = self._optimizer + + # TODO(jiqi): support expert parallel params + + # In the V3 version, when distributed optimizer is used, parameter data are managed together with + # gradients in buffers. + if self._use_distributed_optimizer: + optimizer.shard_float16_groups.clear() + optimizer.shard_fp32_groups.clear() + optimizer.pbuf_view_items.clear() + + if self._group_flat_weights is None: + self._group_flat_weights = [] + for buffer in self._buffers: + assert buffer.param_data is not None + self._group_flat_weights.append( + BucketizedFlatTensors([buffer.param_data], self._bucket_size_mb, 'cpu') + ) + + # Remove references from params + for p, _ in self._model.param_to_buffer.items(): + # save the shape for reconstruction + p._saved_shape = p.shape + p.data = FlatTensors._EMPTY_TENSOR + + # Remove references from buckets + for buffer in self._buffers: + for bucket in buffer.buckets: + bucket.param_data = None + else: + if self._group_flat_weights is None: + self._group_flat_weights = self._flat_param_groups( + [ + optimizer.float16_groups, + optimizer.fp32_from_fp32_groups, + ], + ) + + # Offload param_data of buffers + for flat_weights in self._group_flat_weights: + flat_weights.copy_to_primary_store() + + self._model.grad_accs.clear() + + self._weights_offloaded = True + + def onload_weights(self): + """ + onload weights + """ + if not self._weights_offloaded: + log_rank_0('Call onload_weights when already onloaded. Ignore it.') + return + + optimizer = self._optimizer + + # Onload param_data of buffers + for flat_weights in self._group_flat_weights: + flat_weights.copy_to_gpu_buffer() + + if self._use_distributed_optimizer: + # Reconstruct references from buckets + for buffer in self._buffers: + assert buffer.param_data is not None + for bucket_id, bucket in enumerate(buffer.buckets): + (start_index, end_index) = buffer.bucket_indices[bucket_id] + bucket.param_data = None + if buffer.param_data is not None: + bucket.param_data = buffer._get( + torch.Size([end_index - start_index]), start_index, buffer_type=BufferType.PARAM + ) + + # Reconstruct references from params + for param, buffer in self._model.param_to_buffer.items(): + data_start_index, _, bucket_id = buffer.param_index_map[param] + if buffer.param_data is not None: + param.data = buffer._get(param._saved_shape, data_start_index, buffer_type=BufferType.PARAM) + + model = self._model + # Re-register grad acc hooks, see Megatron DistributedDataParallel#__init__. + model.grad_accs = [] + for param in model.module.parameters(): + if param.requires_grad: + # Expand so we get access to grad_fn. + param_tmp = param.expand_as(param) + # Get the gradient accumulator function. + grad_acc = param_tmp.grad_fn.next_functions[0][0] + grad_acc.register_hook(model._make_param_hook(param, model.param_to_buffer)) + model.grad_accs.append(grad_acc) + + if not self._use_distributed_optimizer: + self._weights_offloaded = False + return + + optimizer.pbuf_view_items = optimizer._get_model_param_buffer_dp_views() + + shard_float16_groups = optimizer.shard_float16_groups + shard_fp32_groups = optimizer.shard_fp32_groups + param_gbuf_map = optimizer.model_param_gbuf_map + opt_group_ranges = optimizer.opt_group_ranges + model_gbuf_ranges = optimizer.gbuf_ranges + + # Rebuild shard_float16_groups and shard_fp32_groups, + # see Megatron DistributedOptimizer#build_model_and_main_param_groups. + for _, group_range in enumerate(opt_group_ranges): + shard_float16_params_this_group = [] + shard_fp32_params_this_group = [] + shard_float16_groups.append(shard_float16_params_this_group) + shard_fp32_groups.append(shard_fp32_params_this_group) + + for model_param in group_range["params"]: + assert model_param.requires_grad + gbuf_index, dtype, bucket_index = param_gbuf_map[model_param] + gbuf_range = model_gbuf_ranges[gbuf_index][dtype][bucket_index] + param_range = gbuf_range["param_map"][model_param]["param"] + + # fp16, bf16 params. + if model_param.type() in ['torch.cuda.HalfTensor', 'torch.cuda.BFloat16Tensor']: + shard_model_param = model_param.detach().view(-1)[param_range.start : param_range.end] + tensor_parallel.copy_tensor_model_parallel_attributes(shard_model_param, model_param) + if hasattr(model_param, 'shared'): + shard_model_param.shared = model_param.shared + + shard_float16_params_this_group.append(shard_model_param) + + # fp32 params. + elif model_param.type() == 'torch.cuda.FloatTensor': + shard_model_param = model_param.view(-1)[param_range.start : param_range.end] + shard_fp32_params_this_group.append(shard_model_param) + tensor_parallel.copy_tensor_model_parallel_attributes(shard_model_param, model_param) + if hasattr(model_param, 'shared'): + shard_model_param.shared = model_param.shared + else: + raise TypeError( + 'Wrapped parameters must be one of ' + 'torch.cuda.FloatTensor, ' + 'torch.cuda.HalfTensor, or ' + 'torch.cuda.BFloat16Tensor. ' + 'Received {}'.format(model_param.type()) + ) + + self._weights_offloaded = False + + def free_grad_buffers(self): + """ + free grad buffers and related tensors + """ + if self._grad_buffers_freed: + log_rank_0('Call free_grad_buffers when already freed. Ignore it.') + return + + optimizer = self._optimizer + + # This is necessary, but don't know why. + optimizer.zero_grad(True) + + # Remove references from params + for p, buffer in self._model.param_to_buffer.items(): + del p.main_grad + + # Remove references from buckets and free grad_data of buffer + for buffer in self._buffers: + for bucket in buffer.buckets: + del bucket.grad_data + del buffer.grad_data + + self._grad_buffers_freed = True + + def build_grad_buffers(self): + """ + build grad buffers and related tensors + """ + if not self._grad_buffers_freed: + log_rank_0('Call build_grad_buffers when already built. Ignore it.') + return + + # Build buffers and reconstruct references from buckets + for buffer in self._buffers: + buffer.grad_data = torch.zeros( + buffer.numel, + dtype=buffer.grad_dtype, + device=torch.cuda.current_device(), + requires_grad=False, + ) + for bucket_id, bucket in enumerate(buffer.buckets): + (start_index, end_index) = buffer.bucket_indices[bucket_id] + bucket.grad_data = buffer._get( + torch.Size([end_index - start_index]), start_index, buffer_type=BufferType.GRAD + ) + + # Reconstruct references from params + for param, buffer in self._model.param_to_buffer.items(): + data_start_index, _, bucket_id = buffer.param_index_map[param] + param.main_grad = buffer._get(param.data.shape, data_start_index, buffer_type=BufferType.GRAD) + + self._grad_buffers_freed = False diff --git a/chatlearn/models/megatron/ops/__init__.py b/chatlearn/models/megatron/ops/__init__.py index 191bcefe..b03f8f65 100644 --- a/chatlearn/models/megatron/ops/__init__.py +++ b/chatlearn/models/megatron/ops/__init__.py @@ -1,4 +1,4 @@ -# Copyright 2023 Alibaba Group Holding Limited. All Rights Reserved. +# Copyright 2024 Alibaba Group Holding Limited. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. diff --git a/chatlearn/models/megatron/ops/policy_gradient.py b/chatlearn/models/megatron/ops/policy_gradient.py index 3d35d72a..fb7a7a84 100644 --- a/chatlearn/models/megatron/ops/policy_gradient.py +++ b/chatlearn/models/megatron/ops/policy_gradient.py @@ -1,4 +1,4 @@ -# Copyright 2023 Alibaba Group Holding Limited. All Rights Reserved. +# Copyright 2024 Alibaba Group Holding Limited. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. @@ -12,13 +12,13 @@ # See the License for the specific language governing permissions and # limitations under the License. # ============================================================================== -"""RLHF Policy Gradient Loss""" +"""Policy Gradient Loss""" import torch -from megatron.core import mpu -from megatron.core.tensor_parallel.utils import VocabUtility -from megatron.global_vars import get_args -from megatron.utils import average_losses_across_data_parallel_group +from chatlearn.utils.megatron_import_helper import average_losses_across_data_parallel_group +from chatlearn.utils.megatron_import_helper import get_args +from chatlearn.utils.megatron_import_helper import mpu +from chatlearn.utils.megatron_import_helper import VocabUtility # pylint: disable=arguments-differ,abstract-method diff --git a/chatlearn/models/megatron_module.py b/chatlearn/models/megatron_module.py index b687ec36..bd18c6be 100644 --- a/chatlearn/models/megatron_module.py +++ b/chatlearn/models/megatron_module.py @@ -1,4 +1,4 @@ -# Copyright 2023 Alibaba Group Holding Limited. All Rights Reserved. +# Copyright 2024 Alibaba Group Holding Limited. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. @@ -12,26 +12,26 @@ # See the License for the specific language governing permissions and # limitations under the License. # ============================================================================== -"""RLHF Megatron module""" - +"""Megatron module""" import inspect -import torch try: - import megatron - from megatron.core import mpu - from megatron.initialize import set_jit_fusion_options - from megatron.training import save_checkpoint_and_time - from megatron.initialize import initialize_megatron + from chatlearn.utils.megatron_import_helper import get_args + from chatlearn.utils.megatron_import_helper import mpu + from chatlearn.utils.megatron_import_helper import initialize_megatron + from chatlearn.utils.megatron_import_helper import save_checkpoint_and_time + from chatlearn.utils.megatron_import_helper import set_jit_fusion_options from chatlearn.utils.megatron_utils import initialize_megatron as chatlearn_initialize_megatron from chatlearn.utils.megatron_utils import build_pipeline_layer_name_mapping + from chatlearn.models.megatron.memory_manager import create_trainer_memory_manager, InferenceMemoryManager except ImportError: - print("Cannot import megatron, please set megatron python path first.") -from .torch_module import RLHFTorchModule + mpu = None +from .torch_module import TorchModule + # pylint: disable=import-outside-toplevel -class RLHFMegatronModule(RLHFTorchModule): - """RLHFMegatronModule is the class for RLHF Megatron models. +class MegatronModule(TorchModule): + """MegatronModule is the class for Alignment Megatron models. Args ---- @@ -41,18 +41,23 @@ class RLHFMegatronModule(RLHFTorchModule): def __init__(self, *args, **kwargs): super().__init__(*args, **kwargs) + if mpu is None: + print("Cannot import megatron, please set megatron python path first.") if not self.trainable: # inference only if self.model_args.get("micro_batch_size") != self.module_args.generation_batch_size: self._logger.info(f"{self.name} Overwrite micro_batch_size with generation_batch_size {self.module_args.generation_batch_size}") self.model_args["micro_batch_size"] = self.module_args.generation_batch_size else: - self.model_args["micro_batch_size"] = self.rlhf_args.train_micro_batch_size - self.model_args["global_batch_size"] = self.rlhf_args.train_global_batch_size - if self.model_args.get("micro_batch_size") != self.rlhf_args.train_micro_batch_size: + self.model_args["micro_batch_size"] = self.runtime_args.train_micro_batch_size + self.model_args["global_batch_size"] = self.runtime_args.train_global_batch_size + if self.model_args.get("micro_batch_size") != self.runtime_args.train_micro_batch_size: self._logger.info(f"{self.name} Overwrite micro_batch_size with train_micro_batch_size {self.module_args.train_micro_batch_size}") - if self.model_args.get("global_batch_size") != self.rlhf_args.train_global_batch_size: + if self.model_args.get("global_batch_size") != self.runtime_args.train_global_batch_size: self._logger.info(f"{self.name} Overwrite global_batch_size with train_global_batch_size {self.module_args.train_global_batch_size}") + if not self.model_args.get("tensorboard_dir") and self.runtime_args.output_dir is not None: + self.model_args['tensorboard_dir'] = f"{self.runtime_args.output_dir}/tensorboard" + def add_extra_args(self, parser): """ @@ -90,16 +95,34 @@ def model_setup(self): assert hasattr(self, "model") assert hasattr(self, "optimizer") assert hasattr(self, "opt_param_scheduler") + if self.module_args.offload_weights or self.module_args.free_grad_buffers or self.module_args.offload_optimizer_states: + self._memory_manager = create_trainer_memory_manager( + self.megatron_model(), + self.optimizer, + self.megatron_args.use_distributed_optimizer, + self.megatron_args.accumulate_allreduce_grads_in_fp32, + self.megatron_args.params_dtype, + self.name, + self.timers, + self.runtime_args.bucket_size_mb_in_memory_manager, + ) + self.offload(to_offload_optimizer_states=False) else: assert hasattr(self, "model") self.model.eval() + if self.module_args.offload_weights: + self._memory_manager = InferenceMemoryManager( + self.megatron_model(), self.name, self.timers, + self.runtime_args.bucket_size_mb_in_memory_manager, + ) + self.offload() @property def megatron_args(self): """ :meta private: """ - return megatron.get_args() + return get_args() def pipeline_model_parallel_size(self): """ @@ -149,6 +172,14 @@ def num_layers(self): """ return self.megatron_args.num_layers + def megatron_model(self): + if isinstance(self.model, list): + assert len(self.model) == 1 + model = self.model[0] + else: + model = self.model + return model + def build_pipeline_layer_name_mapping(self, num_target_pipe_stage, target_pipe_rank, requires_grad=True): """ build name mapping from src model to tgt model @@ -166,11 +197,7 @@ def build_pipeline_layer_name_mapping(self, num_target_pipe_stage, target_pipe_r mapping_interval = dst_layers_per_stage // src_layers_per_stage src_rank = mpu.get_pipeline_model_parallel_rank() self._logger.debug(f"build mapping for rank {src_rank} =========") - if isinstance(self.model, list): - assert len(self.model) == 1 - model = self.model[0] - else: - model = self.model + model = self.megatron_model() is_tgt_last_stage = target_pipe_rank == num_target_pipe_stage - 1 and target_pipe_rank != 0 name_mapping = build_pipeline_layer_name_mapping(src_layers_per_stage, src_rank, mapping_interval, is_tgt_last_stage, model, requires_grad) @@ -201,33 +228,54 @@ def offload_optimizer_states(self): """ offload optimizer states """ - if self.to_offload_optimizer_states: - timer = self.timers("offload") - if not timer.started_: - timer.start() - # offload onto cpu - self._optimizer_load_state_bucket_into_device(device='cpu') - self.empty_cache() - timer.stop() + if self.module_args.offload_optimizer_states: + self._memory_manager.offload_optimizer_states() def onload_optimizer_states(self): """ onload optimizer states """ - if self.to_offload_optimizer_states: - timer = self.timers("onload") - if not timer.started_: - timer.start() - self._optimizer_load_state_bucket_into_device(device=torch.cuda.current_device()) - timer.stop() - - def _optimizer_load_state_bucket_into_device(self, device): - """put the state bucket onto a device - """ - state_dict = self.optimizer.optimizer.state_dict() - for tensors in state_dict['state'].values(): - keys = list(tensors.keys()) - for key in keys: - tensors[key] = tensors[key].to(device=device, non_blocking=True) - # make sure the loading is finished before returning - torch.cuda.synchronize() + if self.module_args.offload_optimizer_states: + self._memory_manager.onload_optimizer_states() + + def offload_main_weights(self): + """ + offload main weights + """ + if self.module_args.offload_weights: + self._memory_manager.offload_main_weights() + + def onload_main_weights(self): + """ + onload main weights + """ + if self.module_args.offload_weights: + self._memory_manager.onload_main_weights() + + def offload_weights(self): + """ + offload weights + """ + if self.module_args.offload_weights: + self._memory_manager.offload_weights() + + def onload_weights(self): + """ + onload weights + """ + if self.module_args.offload_weights: + self._memory_manager.onload_weights() + + def free_grad_buffers(self): + """ + free grad buffers and related tensors + """ + if self.module_args.free_grad_buffers: + self._memory_manager.free_grad_buffers() + + def build_grad_buffers(self): + """ + build grad buffers and related tensors + """ + if self.module_args.free_grad_buffers: + self._memory_manager.build_grad_buffers() diff --git a/chatlearn/models/torch_module.py b/chatlearn/models/torch_module.py index 710cb5b3..28faae00 100644 --- a/chatlearn/models/torch_module.py +++ b/chatlearn/models/torch_module.py @@ -1,4 +1,4 @@ -# Copyright 2023 Alibaba Group Holding Limited. All Rights Reserved. +# Copyright 2024 Alibaba Group Holding Limited. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. @@ -12,16 +12,19 @@ # See the License for the specific language governing permissions and # limitations under the License. # ============================================================================== -"""RLHF torch module""" +"""Torch module""" import os import ray import torch -from chatlearn.utils.logger import log_rank_0 -from .rlhf_module import RLHFModule +import torch.distributed as dist +from chatlearn.utils.logger import log_rank_0, debug_rank_0 -class RLHFTorchModule(RLHFModule): - """RLHFTorchModule is the class for RLHF Torch models. +from chatlearn.utils.utils import get_full_proc_memory_info +from .base_module import BaseModule + +class TorchModule(BaseModule): + """TorchModule is the class for Alignment Torch models. Args ---- @@ -31,14 +34,13 @@ class RLHFTorchModule(RLHFModule): def __init__(self, *args, **kwargs): super().__init__(*args, **kwargs) - self.profiler = None def model_setup(self): """ :meta private: """ super().model_setup() - if self.rlhf_args.profiler_dir is not None and self.replica_id == 0: + if self.runtime_args.profiler_dir is not None and self.replica_id == 0: self.profiler = torch.profiler.profile( activities=[ torch.profiler.ProfilerActivity.CPU, @@ -52,7 +54,7 @@ def model_setup(self): record_shapes=False, with_stack=False, with_flops=False, - on_trace_ready=torch.profiler.tensorboard_trace_handler(self.rlhf_args.profiler_dir) + on_trace_ready=torch.profiler.tensorboard_trace_handler(self.runtime_args.profiler_dir) ) self.profiler.start() @@ -67,7 +69,7 @@ def set_env(self, args): :meta private: """ for key in ['RANK', 'MASTER_ADDR', 'MASTER_PORT', 'WORLD_SIZE', 'LOCAL_RANK']: - assert key in args, f"{key} is not set for RLHFTorchWrapper" + assert key in args, f"{key} is not set for TorchModule" os.environ[key] = str(args[key]) self._rank = int(os.environ['RANK']) return 1 @@ -88,33 +90,19 @@ def peak_memory(self): self._peak_memory = max(self._peak_memory, torch.cuda.max_memory_allocated() / (1024 ** 3)) return self._peak_memory - @property - def data_parallel_size(self): - """ - data parallel size - - :meta private: - """ - - @property - def data_parallel_rank(self): - """ - data parallel rank - - :meta private: - """ - def empty_cache(self): """ :meta private: """ if not self.timers("empty_cache").started_: self.timers("empty_cache").start() - log_rank_0(f"{self.name} replica: {self.replica_id}, before empty cache, peak mem: {torch.cuda.max_memory_allocated() / (1024 ** 3)}GB", + peak_mem = torch.cuda.max_memory_allocated() / (1024 ** 3) + debug_rank_0(f"{self.name} replica: {self.replica_id}, before empty cache, peak mem: {peak_mem:.2f} GiB", self._logger) torch.cuda.empty_cache() torch.cuda.reset_peak_memory_stats() - log_rank_0(f"{self.name} replica: {self.replica_id}, after empty cache, peak mem: {torch.cuda.max_memory_allocated() / (1024 ** 3)}GB", + peak_mem = torch.cuda.max_memory_allocated() / (1024 ** 3) + debug_rank_0(f"{self.name} replica: {self.replica_id}, after empty cache, peak mem: {peak_mem:.2f} GiB", self._logger) self.timers("empty_cache").stop() @@ -137,6 +125,50 @@ def is_last_rank(self): """ Is last rank. """ - if torch.distributed.is_initialized(): - return torch.distributed.get_rank() == (torch.distributed.get_world_size() - 1) + if dist.is_initialized(): + return dist.get_rank() == (dist.get_world_size() - 1) return True + + @property + def world_size(self): + return dist.get_world_size() + + def onload(self): + if self.module_args.free_grad_buffers or self.module_args.offload_weights or \ + self.module_args.offload_optimizer_states: + log_rank_0(get_full_proc_memory_info('Before onload'), self._logger) + torch.distributed.barrier() + if self.module_args.offload_weights: + self.onload_weights() + if self.trainable: + if self.module_args.free_grad_buffers: + self.build_grad_buffers() + if self.module_args.offload_weights: + self.onload_main_weights() + if self.module_args.offload_optimizer_states: + self.onload_optimizer_states() + torch.distributed.barrier() + log_rank_0(get_full_proc_memory_info('After onload'), self._logger) + + def offload(self, to_offload_weights=None, to_free_grad_buffers=None, to_offload_optimizer_states=None): + # The first time of calling `offload_weights` and `offload_main_weights` has a higher peak memory. + # So `free_grad_buffers` is called first to free memory, and `offload_weights` is called afterward + # to make more space for `offload_main_weights`. + to_offload_weights = self.module_args.offload_weights if to_offload_weights is None else to_offload_weights + to_free_grad_buffers = self.module_args.free_grad_buffers if to_free_grad_buffers is None else to_free_grad_buffers + if to_offload_optimizer_states is None: + to_offload_optimizer_states = self.module_args.offload_optimizer_states + if to_free_grad_buffers or to_offload_weights or to_offload_optimizer_states: + log_rank_0(get_full_proc_memory_info('Before offload'), self._logger) + torch.distributed.barrier() + if self.trainable: + if to_free_grad_buffers: + self.free_grad_buffers() + if to_offload_weights: + self.offload_main_weights() + if to_offload_optimizer_states: + self.offload_optimizer_states() + if to_offload_weights: + self.offload_weights() + torch.distributed.barrier() + log_rank_0(get_full_proc_memory_info('After offload'), self._logger) diff --git a/chatlearn/models/vllm/__init__.py b/chatlearn/models/vllm/__init__.py new file mode 100644 index 00000000..b43cf845 --- /dev/null +++ b/chatlearn/models/vllm/__init__.py @@ -0,0 +1,15 @@ +# Copyright 2024 Alibaba Group Holding Limited. All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# ============================================================================== +"""vLLM related.""" diff --git a/chatlearn/models/vllm/hooks/__init__.py b/chatlearn/models/vllm/hooks/__init__.py new file mode 100644 index 00000000..93dc99fd --- /dev/null +++ b/chatlearn/models/vllm/hooks/__init__.py @@ -0,0 +1,25 @@ +# Copyright 2024 Alibaba Group Holding Limited. All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# ============================================================================== +"""vLLM Hooks.""" + +import importlib + +if importlib.util.find_spec("vllm"): + import vllm + from chatlearn.utils.constant import CURRENT_VLLM_VERSION, VLLMVersion + if CURRENT_VLLM_VERSION == VLLMVersion.v_0_3_0.value: + from chatlearn.models.vllm.hooks import sampler + elif CURRENT_VLLM_VERSION == VLLMVersion.v_0_5_1.value: + from chatlearn.models.vllm.hooks import llm_engine, logits_processor, worker diff --git a/chatlearn/models/vllm/hooks/llm_engine.py b/chatlearn/models/vllm/hooks/llm_engine.py new file mode 100644 index 00000000..bff1d188 --- /dev/null +++ b/chatlearn/models/vllm/hooks/llm_engine.py @@ -0,0 +1,30 @@ +# Copyright 2024 Alibaba Group Holding Limited. All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# ============================================================================== +"""Hooks of vllm-0.5.1 llm_engine remove __reduce__ function.""" + +import inspect + +# pylint: disable=unused-import,wildcard-import,unused-argument +from vllm.engine import llm_engine + + +source = inspect.getsource(llm_engine.LLMEngine.__reduce__) +if 'RuntimeError' in source: + def __reduce__(self): + # This is to ensure that the LLMEngine can be referenced in + # the closure used to initialize Ray worker actors + pass + + del llm_engine.LLMEngine.__reduce__ diff --git a/chatlearn/models/vllm/hooks/logits_processor.py b/chatlearn/models/vllm/hooks/logits_processor.py new file mode 100644 index 00000000..6ea2e740 --- /dev/null +++ b/chatlearn/models/vllm/hooks/logits_processor.py @@ -0,0 +1,42 @@ +# Copyright 2024 Alibaba Group Holding Limited. All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# ============================================================================== +"""Hooks of vllm-0.5.1 logits_processor to allgather logits of all ranks.""" + +import inspect + +# pylint: disable=wildcard-import,ungrouped-imports +from vllm.model_executor.layers import logits_processor + + +source = inspect.getsource(logits_processor.LogitsProcessor._get_logits) +if 'tensor_model_parallel_all_gather' not in source: + import torch + from typing import Optional + from vllm.model_executor.layers.vocab_parallel_embedding import VocabParallelEmbedding + def _get_logits(self, hidden_states: torch.Tensor, + lm_head: VocabParallelEmbedding, + embedding_bias: Optional[torch.Tensor]) -> torch.Tensor: + # Get the logits for the next tokens. + logits = lm_head.linear_method.apply(lm_head, + hidden_states, + bias=embedding_bias) + from vllm.distributed.communication_op import tensor_model_parallel_all_gather # pylint: disable=import-outside-toplevel + logits = tensor_model_parallel_all_gather(logits) + # Remove paddings in vocab (if any). + if logits is not None: + logits = logits[:, :self.org_vocab_size] + return logits + + logits_processor.LogitsProcessor._get_logits = _get_logits diff --git a/chatlearn/models/vllm/hooks/sampler.py b/chatlearn/models/vllm/hooks/sampler.py new file mode 100644 index 00000000..17a8acd5 --- /dev/null +++ b/chatlearn/models/vllm/hooks/sampler.py @@ -0,0 +1,39 @@ +# Copyright 2024 Alibaba Group Holding Limited. All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# ============================================================================== +"""Hooks of vllm-0.3.0 sampler to allgather logits of all ranks.""" + +import inspect +# pylint: disable=unused-import,wildcard-import +from vllm.model_executor.layers import sampler + + +source = inspect.getsource(sampler.Sampler._get_logits) +if 'tensor_model_parallel_all_gather' not in source: + import torch + from typing import Dict, List, Optional, Tuple + def _get_logits(self, hidden_states: torch.Tensor, embedding: torch.Tensor, + embedding_bias: Optional[torch.Tensor]) -> torch.Tensor: + # Get the logits for the next tokens. + logits = torch.matmul(hidden_states, embedding.t()) + if embedding_bias is not None: + logits += embedding_bias + from vllm.model_executor.parallel_utils.communication_op import tensor_model_parallel_all_gather # pylint: disable=import-outside-toplevel + logits = tensor_model_parallel_all_gather(logits) + # Remove paddings in vocab (if any). + if logits is not None: + logits = logits[:, :self.org_vocab_size] + return logits + + sampler.Sampler._get_logits = _get_logits diff --git a/chatlearn/models/vllm/hooks/worker.py b/chatlearn/models/vllm/hooks/worker.py new file mode 100644 index 00000000..9e4e8e77 --- /dev/null +++ b/chatlearn/models/vllm/hooks/worker.py @@ -0,0 +1,77 @@ +# Copyright 2024 Alibaba Group Holding Limited. All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# ============================================================================== +"""Hooks of vllm-0.5.1 worker_base to remove metadata broadcasting.""" + + +import inspect + +# pylint: disable=unused-import,wildcard-import +from vllm.worker import worker_base + + +source = inspect.getsource(worker_base.LocalOrDistributedWorkerBase.execute_model) +if 'self.do_metadata_broadcast' in source: + from vllm.worker.worker_base import WorkerInput + from vllm.sequence import (ExecuteModelRequest, IntermediateTensors, + SamplerOutput) + from vllm.worker.model_runner_base import ModelRunnerBase, ModelRunnerInputBase + from vllm.distributed import broadcast_tensor_dict, get_pp_group + from typing import Any, Dict, List, Optional, Set, Tuple, Type, Union + + def execute_model( + self, + execute_model_req: Optional[ExecuteModelRequest] = None + ) -> Optional[List[SamplerOutput]]: + """Executes at least one model step on the given sequences, unless no + sequences are provided.""" + assert self.is_driver_worker + if self.is_driver_worker: + if execute_model_req is None: + return None + + worker_input: WorkerInput = self.prepare_worker_input( + execute_model_req=execute_model_req) + model_input: ModelRunnerInputBase = ( + self.model_runner.prepare_model_input( + execute_model_req.seq_group_metadata_list, + execute_model_req.virtual_engine, + execute_model_req.finished_requests_ids)) + num_steps = execute_model_req.num_steps + + self.execute_worker(worker_input) + + # If there is no input, we don't need to execute the model. + if worker_input.num_seq_groups == 0: + return [] + + intermediate_tensors = None + if not get_pp_group().is_first_rank: + intermediate_tensors = IntermediateTensors( + get_pp_group().recv_tensor_dict()) + + output = self.model_runner.execute_model( + model_input, self.kv_cache[worker_input.virtual_engine] + if self.kv_cache is not None else None, intermediate_tensors, + num_steps) + + if not get_pp_group().is_last_rank: + get_pp_group().send_tensor_dict(output.tensors) + return [None] + + # Worker only supports single-step execution. Wrap the output in a + # list to conform to interface. + return output + + worker_base.LocalOrDistributedWorkerBase.execute_model = execute_model diff --git a/chatlearn/models/vllm/vllm_model.py b/chatlearn/models/vllm/vllm_model.py new file mode 100644 index 00000000..6b66e5d8 --- /dev/null +++ b/chatlearn/models/vllm/vllm_model.py @@ -0,0 +1,70 @@ +# Copyright 2024 Alibaba Group Holding Limited. All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# ============================================================================== +"""vllm-based model""" + +import torch +from torch import nn + +from chatlearn.utils.constant import CURRENT_VLLM_VERSION, VLLMVersion + +from chatlearn.utils.vllm_import_helper import LlamaForCausalLM +from chatlearn.utils.vllm_import_helper import QWenLMHeadModel +from chatlearn.utils.vllm_import_helper import Qwen2ForCausalLM +from chatlearn.utils.vllm_import_helper import get_model_architecture + +from chatlearn.utils.vllm_utils import ( + convert_llama_state_dict_from_megatron_to_vllm, + convert_qwen_state_dict_from_megatron_to_vllm, + load_checkpoint +) + + +class VLLMModel(nn.Module): + """VLLM based Model""" + + def __init__(self, config, model_args, cache_config, quant_config, lora_config): + super().__init__() + self.config = config + self.model_args = model_args + self.model_class = get_model_architecture(config) + if CURRENT_VLLM_VERSION == VLLMVersion.v_0_3_0.value: + self.model = self.model_class(config.hf_config) + elif CURRENT_VLLM_VERSION == VLLMVersion.v_0_5_1.value: + self.model = self.model_class(config.hf_config, cache_config, quant_config, lora_config) + + def load_weights(self): + torch.distributed.barrier() + load_checkpoint(self, None, None) + torch.distributed.barrier() + + def load_state_dict(self, state_dict, strict=True, assign=False): + qwen_version = None + if isinstance(self.model, LlamaForCausalLM): + convert_state_dict_internal = convert_llama_state_dict_from_megatron_to_vllm + elif isinstance(self.model, QWenLMHeadModel): + qwen_version = 1.0 + convert_state_dict_internal = convert_qwen_state_dict_from_megatron_to_vllm + elif isinstance(self.model, Qwen2ForCausalLM): + qwen_version = 2.0 + convert_state_dict_internal = convert_qwen_state_dict_from_megatron_to_vllm + else: + raise RuntimeError(f"Unsupported model for vllm backend. \ + support [LlamaForCausalLM, QWenLMHeadModel, Qwen2ForCausalLM] only, while {self.model_class}") + + state_dict = convert_state_dict_internal(self.model_args, self.config.hf_config, qwen_version=qwen_version) + super().load_state_dict(state_dict, strict=strict) + + def forward(self, *args, **kwargs): + return self.model(*args, **kwargs) diff --git a/chatlearn/models/vllm_module.py b/chatlearn/models/vllm_module.py index d953a68c..5098a467 100644 --- a/chatlearn/models/vllm_module.py +++ b/chatlearn/models/vllm_module.py @@ -1,4 +1,4 @@ -# Copyright 2023 Alibaba Group Holding Limited. All Rights Reserved. +# Copyright 2024 Alibaba Group Holding Limited. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. @@ -12,26 +12,58 @@ # See the License for the specific language governing permissions and # limitations under the License. # ============================================================================== -"""RLHF VLLM module""" +"""VLLM module""" +import gc +from typing import List, Tuple +import math +import time import torch from tqdm import tqdm -from vllm.config import CacheConfig, ParallelConfig, SchedulerConfig -from vllm.core.scheduler import Scheduler -from vllm.engine.llm_engine import LLMEngine -from vllm.model_executor.parallel_utils import parallel_state -from vllm.sampling_params import SamplingParams -from vllm.transformers_utils.config import get_config -from vllm.utils import Counter -from vllm.worker.worker import Worker - -from chatlearn.utils.vllm_utils import initialize_vllm, Megatron2TransformerSyncMap, VllmModelConfig -from .torch_module import RLHFTorchModule - -# pylint: disable=import-outside-toplevel -class RLHFVLLMModule(RLHFTorchModule, LLMEngine, Worker): - """RLHFVLLMModule is the class for RLHF Vllm models. +from chatlearn.models.vllm.vllm_model import VLLMModel +from chatlearn.utils.constant import QwenVersion +from chatlearn.utils.constant import CURRENT_VLLM_VERSION, VLLMVersion +from chatlearn.utils.vllm_import_helper import get_block_manager_cls +from chatlearn.utils.vllm_import_helper import get_pipeline_model_parallel_rank +from chatlearn.utils.vllm_import_helper import Scheduler +from chatlearn.utils.vllm_import_helper import EngineArgs +from chatlearn.utils.vllm_import_helper import LLM +from chatlearn.utils.vllm_import_helper import LLMEngine +from chatlearn.utils.vllm_import_helper import LlamaForCausalLM +from chatlearn.utils.vllm_import_helper import QWenLMHeadModel +from chatlearn.utils.vllm_import_helper import Qwen2ForCausalLM +from chatlearn.utils.vllm_import_helper import parallel_state +from chatlearn.utils.vllm_import_helper import SamplingParams +from chatlearn.utils.vllm_import_helper import Counter +from chatlearn.utils.vllm_import_helper import Worker +# additional imports for vLLM-0.5.1 +try: + from chatlearn.utils.vllm_import_helper import Detokenizer + from chatlearn.utils.vllm_import_helper import ExecuteModelRequest + from chatlearn.utils.vllm_import_helper import INPUT_REGISTRY + from chatlearn.utils.vllm_import_helper import _load_generation_config_dict + from chatlearn.utils.vllm_import_helper import SequenceGroupOutputProcessor + from chatlearn.utils.vllm_import_helper import StopChecker + from chatlearn.utils.vllm_import_helper import TextTokensPrompt +except ImportError: + print("Cannot import addtional module for vllm 0.5.1, please install vllm 0.5.1 first.") + +from chatlearn.utils.vllm_utils import initialize_vllm, Megatron2LlamaSyncMap, Megatron2QWenSyncMap + +from chatlearn.utils import utils +from chatlearn.utils.vllm_utils import get_model, print_rank_0 +from .torch_module import TorchModule +try: + from .megatron.memory_manager import InferenceMemoryManager +except ImportError: + InferenceMemoryManager = None +_LOGGING_INTERVAL_SEC = 5.0 + + +# pylint: disable=import-outside-toplevel,unexpected-keyword-arg,no-value-for-parameter,too-many-function-args +class VLLMModule(TorchModule, LLMEngine, LLM): + """VLLMModule is the class for vLLM models. Args ---- @@ -42,39 +74,150 @@ class RLHFVLLMModule(RLHFTorchModule, LLMEngine, Worker): def __init__(self, *args, **kwargs): super().__init__(*args, **kwargs) self.log_stats = False - self._model_config = None # inference only if self.model_args.get("micro_batch_size") != self.module_args.generation_batch_size: self._logger.info(f"{self.name} Overwrite micro_batch_size with generation_batch_size {self.module_args.generation_batch_size}") self.model_args["micro_batch_size"] = self.module_args.generation_batch_size - self._init_args() + # parallel size + self.model_args["pipeline_model_parallel_size"] = self.module_args.pipeline_model_parallel_size + self.model_args["tensor_model_parallel_size"] = self.module_args.tensor_model_parallel_size - def _init_args(self): - self.seq_counter = Counter() - self.request_counter = Counter() + # precision + if self.model_args.get("fp16", False): + assert not self.model_args.get("bf16", False) + self.model_args["params_dtype"] = torch.half + if self.model_args.get("bf16", False): + assert not self.model_args.get("fp16", False) + self.model_args["params_dtype"] = torch.bfloat16 - self.scheduler_config = SchedulerConfig( - self.model_args.get("max_num_batched_tokens"), - self.model_args.get("micro_batch_size"), - self.model_args.get("seq_length"), - self.model_args.get("max_paddings"), - ) + # To save gpu memory, we set `prompt_logprobs=None` default. If need to evaluate loss on prompts, please set prompt_logprobs=1 + if self.model_args.get("loss_on_prompts", False) and self.model_args.get("prompt_logprobs", None) is None: + raise RuntimeError("expect loss_on_prompts to be false for memory reduction, or set prompt_logprobs in sampling_params to be `1`.") - self.cache_config = CacheConfig( - self.model_args.get("block_size"), - self.model_args.get("gpu_memory_utilization"), - self.model_args.get("swap_space"), - None, - ) + self.scheduler = None + self._need_to_reset_scheduler = True + self._log_metrics = self.model_args.get("log_metrics", False) + self._init_args() - self.parallel_config = ParallelConfig( - self.module_args.pipeline_model_parallel_size, - self.module_args.tensor_model_parallel_size, - False + def _init_args(self): + engine_args = EngineArgs( + model=self.model_args.get("tokenizer"), + tokenizer=self.model_args.get("tokenizer"), + tokenizer_mode=self.model_args.get("tokenizer_mode", "auto"), + trust_remote_code=self.model_args.get("trust_remote_code", True), + tensor_parallel_size=self.module_args.tensor_model_parallel_size, + pipeline_parallel_size=self.module_args.pipeline_model_parallel_size, + dtype=self.model_args.get("params_dtype", "auto"), + quantization=self.model_args.get("quantization", None), + revision=self.model_args.get("revision", None), + tokenizer_revision=self.model_args.get("tokenizer_revision", None), + seed=self.model_args.get("seed", 0), + gpu_memory_utilization=self.model_args.get("gpu_memory_utilization", 0.90), + block_size=self.model_args.get("block_size"), + swap_space=self.model_args.get("swap_space"), + max_num_batched_tokens=self.model_args.get("max_num_batched_tokens"), + max_num_seqs=self.model_args.get("micro_batch_size"), + max_model_len=self.model_args.get("seq_length"), + enforce_eager=True, + disable_custom_all_reduce=True ) + self.quant_config = None + if CURRENT_VLLM_VERSION == VLLMVersion.v_0_3_0.value: + engine_args.max_paddings = self.model_args.get("max_paddings", 256) + engine_args.max_context_len_to_capture = self.model_args.get("max_context_len_to_capture", 8192) + self.model_config, self.cache_config, self.parallel_config, self.scheduler_config, self.lora_config = \ + engine_args.create_engine_configs() + self.worker = Worker( + self.model_config, + self.parallel_config, + self.scheduler_config, + local_rank=0, + rank=0, + distributed_init_method=None, + lora_config=self.lora_config, + kv_cache_dtype=self.cache_config.cache_dtype, + is_driver_worker=True, + ) + self._init_tokenizer() + elif CURRENT_VLLM_VERSION == VLLMVersion.v_0_5_1.value: + engine_args.max_seq_len_to_capture = self.model_args.get("max_context_len_to_capture", 8192) + engine_config = \ + engine_args.create_engine_config() + self.cache_config = engine_config.cache_config + self.device_config = engine_config.device_config + self.load_config = engine_config.load_config + self.lora_config = engine_config.lora_config + self.model_config = engine_config.model_config + self.parallel_config = engine_config.parallel_config + self.scheduler_config = engine_config.scheduler_config + + self.generation_config_fields = _load_generation_config_dict( + self.model_config) + self.input_processor = INPUT_REGISTRY.create_input_processor( + self.model_config) + + self.worker = Worker( + self.model_config, + self.parallel_config, + self.scheduler_config, + self.device_config, + self.cache_config, + self.load_config, + local_rank=0, + rank=0, + distributed_init_method=None, + lora_config=self.lora_config, + is_driver_worker=True, + ) + self.tokenizer = self._init_tokenizer() + self.detokenizer = Detokenizer(self.tokenizer) + + def setup(self): + """Set up model and load checkpoint""" + model = [get_model(self.model_provider, self.model_args)] + + assert len(model) == 1, "Above condition should have caught this" + self.model = model[0] + + def model_provider(self): + """Build the model.""" + print_rank_0('building vLLM model ...') + model = VLLMModel(self.model_config, self.model_args, self.cache_config, self.quant_config, self.lora_config) + + return model + + def _reset_metrics_stats_args(self): + self.start_time = None + # Logging. + self.last_stats_time = 0.0 + self.forward_count = 0 + self.num_done_requests = 0 + self.num_processed_prompt = 0 + self.num_generated_tokens = 0 + self.action_length = 0 + self.action_max_length = float("-inf") + self.action_min_length = float("inf") + self.batch_size_stats = 0.0 + self.gpu_cache_usage = 0.0 + self.cpu_cache_usage = 0.0 + self.max_prompt_length_static_batching = [ + 0 for _ in range(math.ceil(self.num_requests/self.scheduler_config.max_num_seqs))] + self.max_output_length_static_batching = [ + 0 for _ in range(math.ceil(self.num_requests/self.scheduler_config.max_num_seqs))] + + def reset_vllm(self): + self.request_counter = Counter() + + self.log_stats = self.model_args.get("log_stats", False) + # Logging. + self.last_logging_time = 0.0 + # List of (timestamp, num_tokens) + self.num_prompt_tokens: List[Tuple[float, int]] = [] + # List of (timestamp, num_tokens) + self.num_generation_tokens: List[Tuple[float, int]] = [] self.sliding_window = self.cache_config.sliding_window def add_extra_args(self, parser): @@ -98,48 +241,173 @@ def init(self): """ :meta private: """ - self.model_args["params_dtype"] = torch.float - if self.model_args.get("fp16", False): - assert not self.model_args.get("bf16", False) - self.model_args["params_dtype"] = torch.half - if self.model_args.get("bf16", False): - assert not self.model_args.get("fp16", False) - self.model_args["params_dtype"] = torch.bfloat16 - self.model_args["pipeline_model_parallel_size"] = self.module_args.pipeline_model_parallel_size - self.model_args["tensor_model_parallel_size"] = self.module_args.tensor_model_parallel_size + if CURRENT_VLLM_VERSION == VLLMVersion.v_0_5_1.value: + parallel_state.set_custom_all_reduce(not self.parallel_config.disable_custom_all_reduce) initialize_vllm(extra_args_provider=self.add_extra_args, ignore_unknown_args=True, args_dict=self.model_args) def build_scheduler(self): - self.scheduler = Scheduler(self.scheduler_config, self.cache_config) - - def _init_cache(self) -> None: + self.seq_counter = Counter() + if CURRENT_VLLM_VERSION == VLLMVersion.v_0_3_0.value: + if self.scheduler is None: + self.scheduler = Scheduler(self.scheduler_config, self.cache_config, None) + else: + BlockSpaceManagerImpl = get_block_manager_cls(None) + self.scheduler.block_manager = BlockSpaceManagerImpl( # pylint: disable=abstract-class-instantiated + block_size=self.cache_config.block_size, + num_gpu_blocks=self.cache_config.num_gpu_blocks, + num_cpu_blocks=self.cache_config.num_cpu_blocks, + sliding_window=self.cache_config.sliding_window) + elif CURRENT_VLLM_VERSION == VLLMVersion.v_0_5_1.value: + if self.scheduler is None: + self.scheduler = [ + Scheduler(self.scheduler_config, self.cache_config, None, + self.parallel_config.pipeline_parallel_size) + for _ in range(self.parallel_config.pipeline_parallel_size) + ] + self.output_processor = ( + SequenceGroupOutputProcessor.create_output_processor( + self.scheduler_config, + self.detokenizer, + self.scheduler, + self.seq_counter, + self.get_tokenizer_for_seq, + stop_checker=StopChecker( + self.scheduler_config.max_model_len, + self.get_tokenizer_for_seq, + ), + )) + else: + version = "v1" + if self.scheduler_config.use_v2_block_manager: + version = "v2" + if self.scheduler_config.embedding_mode: + version = "embedding" + + BlockSpaceManagerImpl = get_block_manager_cls(version) + num_gpu_blocks = self.cache_config.num_gpu_blocks + if num_gpu_blocks: + num_gpu_blocks //= self.pipeline_model_parallel_size() + num_cpu_blocks = self.cache_config.num_cpu_blocks + if num_cpu_blocks: + num_cpu_blocks //= self.pipeline_model_parallel_size() + + for scheduler in self.scheduler: + scheduler.block_manager = BlockSpaceManagerImpl( # pylint: disable=abstract-class-instantiated + block_size=self.cache_config.block_size, + num_gpu_blocks=num_gpu_blocks, + num_cpu_blocks=num_cpu_blocks, + sliding_window=self.cache_config.sliding_window, + enable_caching=self.cache_config.enable_prefix_caching) + + def _reset_scheduler(self): + # reset scheduler + scheduler_list = self.scheduler if isinstance(self.scheduler, list) else [self.scheduler] + for scheduler in scheduler_list: + scheduler.block_manager.reset() + + def reinit_cache_engine(self): + # reinit cache engine + if CURRENT_VLLM_VERSION == VLLMVersion.v_0_3_0.value: + self.worker.init_cache_engine(cache_config=self.cache_config) + self.worker.warm_up_model() + elif CURRENT_VLLM_VERSION == VLLMVersion.v_0_5_1.value: + self.worker.initialize_cache(self.cache_config.num_gpu_blocks, self.cache_config.num_cpu_blocks) + + def free_cache_engine(self): + if CURRENT_VLLM_VERSION == VLLMVersion.v_0_3_0.value: + self.worker.gpu_cache = None # pylint: disable=access-member-before-definition + self.worker.cache_engine.cpu_cache = None + self.worker.cache_engine.gpu_cache = None + elif CURRENT_VLLM_VERSION == VLLMVersion.v_0_5_1.value: + for ele in self.worker.gpu_cache: # pylint: disable=unused-variable + ele = None + self.worker.gpu_cache = None # pylint: disable=access-member-before-definition + + for c_e in self.worker.cache_engine: + c_e.cpu_cache = None + c_e.gpu_cache = None + self.worker.cache_engine = None + + self.clear_cache() + + def clear_cache(self): + if not self.timers("gc").started_: + self.timers("gc").start() + gc.collect() + self.timers("gc").stop() + + self.empty_cache() + + def profile_cache_blocks(self): """Profiles the memory usage and initializes the KV cache.""" # Get the maximum number of blocks that can be allocated on GPU and CPU. - num_gpu_blocks, num_cpu_blocks = self.profile_num_available_blocks( - self.cache_config.block_size, - self.cache_config.gpu_memory_utilization, - self.cache_config.swap_space_bytes - ) + self.clear_cache() + + if CURRENT_VLLM_VERSION == VLLMVersion.v_0_3_0.value: + num_gpu_blocks, num_cpu_blocks = self.worker.profile_num_available_blocks( + self.cache_config.block_size, + self.cache_config.gpu_memory_utilization, + self.cache_config.swap_space_bytes, + self.cache_config.cache_dtype + ) + elif CURRENT_VLLM_VERSION == VLLMVersion.v_0_5_1.value: + num_gpu_blocks, num_cpu_blocks = self.worker.determine_num_available_blocks() + else: + raise RuntimeError(f"Unsupported vllm version {CURRENT_VLLM_VERSION}, expect one of {list(VLLMVersion)}") - # FIXME(woosuk): Change to debug log. - self._logger.info(f"# GPU blocks: {num_gpu_blocks}, " - f"# CPU blocks: {num_cpu_blocks}") + self._need_to_reset_scheduler = False + self.clear_cache() + return num_gpu_blocks, num_cpu_blocks + + def set_cache_config(self, num_gpu_blocks, num_cpu_blocks): + # debug log. if num_gpu_blocks <= 0: raise ValueError("No available memory for the cache blocks. " "Try increasing `gpu_memory_utilization` when " "initializing the engine.") + self._logger.info(f"# GPU blocks: {num_gpu_blocks}, " + f"# CPU blocks: {num_cpu_blocks}") + self.cache_config.num_gpu_blocks = num_gpu_blocks self.cache_config.num_cpu_blocks = num_cpu_blocks + self._need_to_reset_scheduler = False + + def convert_v1_inputs(self, prompts, prompt_token_ids): + num_requests = len(prompts) + assert num_requests == len(prompt_token_ids), \ + ("The lengths of prompts and prompt_token_ids must be the same.") + + inputs = [] + for i in range(num_requests): + if prompts[i] is None: + assert isinstance(prompt_token_ids[i], List[int]), \ + f"Expect prompt_token_ids[{i}] is List[int] when prompt is None, while {prompt_token_ids[i]}." + if prompt_token_ids[i] is None: + assert isinstance(prompts[i], str), \ + f"Expect prompts[{i}] is a string when prompt_token_ids is None, while {prompts[i]}." + item = TextTokensPrompt( + prompt=prompts[i], + prompt_token_ids=prompt_token_ids[i]) + inputs.append(item) + + return inputs + + def _add_request_internal(self, prompt_list, prompt_token_id_list, is_eval=False): + if self._need_to_reset_scheduler: + self._reset_scheduler() + self.reset_vllm() + + # sampling params + temperature = 0.0 + if not self.model_args.get("use_beam_search"): + temperature = self.model_args.get("eval_temperature", 1.0) if is_eval else self.model_args.get("temperature", 1.0) + top_p = self.model_args.get("eval_top_p") if is_eval else self.model_args.get("top_p") + top_k = self.model_args.get("eval_top_k") if is_eval else self.model_args.get("top_k") - # Initialize the cache. - self.init_cache_engine(cache_config=self.cache_config) - self._logger.info("success to call init_cache_engine") - - def _add_request_internal(self, prompt_list, prompt_token_id_list): stop = self.model_args.get("stop_token_list", None) if isinstance(stop, str): stop = stop.split(";") @@ -159,24 +427,41 @@ def _add_request_internal(self, prompt_list, prompt_token_id_list): sampling_params = SamplingParams( n=self.model_args.get("n"), - temperature=0.0 if self.model_args.get("use_beam_search") else 1.0, - top_p=self.model_args.get("top_p"), + temperature=temperature, + top_p=top_p, + top_k=top_k, use_beam_search=self.model_args.get("use_beam_search"), ignore_eos=self.model_args.get("ignore_eos"), stop=stop, max_tokens=max_tokens, - logprobs=1 - ) - self.add_request( - request_id, - prompt, - sampling_params, - prompt_token_ids=prompt_token_ids + logprobs=1, + prompt_logprobs=self.model_args.get("prompt_logprobs", None), + skip_special_tokens=False ) + + if CURRENT_VLLM_VERSION == VLLMVersion.v_0_3_0.value: + self.add_request( + request_id, + prompt, + sampling_params, + prompt_token_ids=prompt_token_ids + ) + elif CURRENT_VLLM_VERSION == VLLMVersion.v_0_5_1.value: + inputs = self.convert_v1_inputs( + prompts=[prompt], + prompt_token_ids=[prompt_token_ids], + )[0] + self.add_request( + request_id, + inputs, + sampling_params + ) + self.outputs = [] self.num_requests = self.get_num_unfinished_requests() - self.pbar = tqdm(total=self.num_requests, desc="Processed prompts") - return "ok" + self._reset_metrics_stats_args() + self.pbar = tqdm(total=self.num_requests, desc=f"Processed prompts (replica {self.replica_id+1}/{self._num_replica})") + self._need_to_reset_scheduler = True def model_setup(self): """ @@ -188,31 +473,67 @@ def model_setup(self): assert hasattr(self, "model") assert hasattr(self, "optimizer") assert hasattr(self, "opt_param_scheduler") + self.model.eval() else: assert hasattr(self, "model") self.model.eval() + self.worker.model_runner.model = self.model.model + if CURRENT_VLLM_VERSION == VLLMVersion.v_0_5_1.value: + self.worker.device = torch.device(f"cuda:{torch.cuda.current_device()}") + self.worker.init_gpu_memory = torch.cuda.mem_get_info()[0] + + if self.module_args.offload_weights: + if InferenceMemoryManager is None: + raise Exception("Import InferenceMemoryManager failed, you may need to set right Megatron path first.") + self._memory_manager = InferenceMemoryManager( + self.model, self.name, self.timers, + self.runtime_args.bucket_size_mb_in_memory_manager, + ) + self.offload() - self._logger.info("start to init cache") - self._init_cache() - - def map_src_to_dst(self, src_names): + def get_pipeline_layer_offset(self, num_src_pipeline_stage, src_pipe_stage): """ + get layer_idx offset from src model to tgt model + Args: + num_src_pipeline_stage: number of pipeline stage in src model + src_pipe_stage: src model pipeline rank :meta private: """ - # TODO(jiang.jl): compatible with other models. - sync_map = Megatron2TransformerSyncMap(src_names) - return sync_map.dst_names - - @property - def model_config(self): + src_layers_per_stage = self.num_layers() // num_src_pipeline_stage + dst_layers_per_stage = self.num_layers() // self.pipeline_model_parallel_size() + assert dst_layers_per_stage % src_layers_per_stage == 0, \ + "We assume pipeline stage of target model is not smaller than src model, and is divisible by src model" + mapping_interval = dst_layers_per_stage // src_layers_per_stage + rank = src_pipe_stage % mapping_interval + layer_offset = rank * src_layers_per_stage + return layer_offset + + def map_src_to_dst(self, src_names, num_src_pipeline_stage, src_pipe_stage): """ :meta private: """ - if self._model_config is None: - hf_config = get_config(self.model_args.get("tokenizer"), True, None) - hf_config.torch_dtype = self.model_args.get("params_dtype") - self._model_config = VllmModelConfig(hf_config) - return self._model_config + layer_offset = self.get_pipeline_layer_offset(num_src_pipeline_stage, src_pipe_stage) + if isinstance(self.model.model, QWenLMHeadModel): + sync_map_cls = Megatron2QWenSyncMap + from chatlearn.utils.vllm_utils import fix_qwen_query_key_value_ordering # pylint: disable=import-outside-toplevel + self._to_fix_qkv_ordering_func = fix_qwen_query_key_value_ordering + sync_map = sync_map_cls(src_names, layer_offset, QwenVersion.v_1.value) + elif isinstance(self.model.model, Qwen2ForCausalLM): + sync_map_cls = Megatron2QWenSyncMap + from chatlearn.utils.megatron_import_helper import fix_query_key_value_ordering + self._to_fix_qkv_ordering_func = fix_query_key_value_ordering + sync_map = sync_map_cls(src_names, layer_offset, QwenVersion.v_2.value) + elif isinstance(self.model.model, LlamaForCausalLM): + sync_map_cls = Megatron2LlamaSyncMap + from chatlearn.utils.vllm_utils import fix_qwen_query_key_value_ordering # pylint: disable=import-outside-toplevel + self._to_fix_qkv_ordering_func = fix_qwen_query_key_value_ordering + sync_map = sync_map_cls(src_names, layer_offset) + else: + raise RuntimeError(f"Unsupported model {type(self.model.model)}, Expect QWenLMHeadModel, Qwen2ForCausalLM or LlamaForCausalLM.") + self._concat_params_dict = sync_map.concat_params_dict + self._to_fix_act_ordering_dict = sync_map.to_fix_act_ordering_dict + self._to_fix_qkv_ordering_dict = sync_map.to_fix_qkv_ordering_dict + return sync_map.src_names, sync_map.dst_names def pipeline_model_parallel_size(self): """ @@ -254,7 +575,7 @@ def pipeline_parallel_rank(self): """ :meta private: """ - return parallel_state.get_pipeline_model_parallel_rank() + return get_pipeline_model_parallel_rank() def num_layers(self): """ @@ -263,19 +584,41 @@ def num_layers(self): return self.model_config.hf_config.num_hidden_layers def schedule(self): - seq_group_metadata_list, self.scheduler_outputs, self.ignored = self._schedule() + if self.start_time is None: + self.start_time = time.monotonic() + + scheduler = self.scheduler[0] if isinstance(self.scheduler, list) else self.scheduler + self.seq_group_metadata_list, self.scheduler_outputs = scheduler.schedule() + if self.scheduler_outputs.is_empty(): - return self.ignored + return {} - return { - "seq_group_metadata_list" : seq_group_metadata_list, + data = { + "seq_group_metadata_list" : self.seq_group_metadata_list, "blocks_to_swap_in" : self.scheduler_outputs.blocks_to_swap_in, "blocks_to_swap_out" : self.scheduler_outputs.blocks_to_swap_out, "blocks_to_copy" : self.scheduler_outputs.blocks_to_copy } + if CURRENT_VLLM_VERSION == VLLMVersion.v_0_5_1.value: + finished_requests_ids = self.scheduler[0].get_and_reset_finished_requests_ids() + data.update({ + "num_lookahead_slots": self.scheduler_outputs.num_lookahead_slots, + "running_queue_size": self.scheduler_outputs.running_queue_size, + "finished_requests_ids": finished_requests_ids + }) + + return data + def process_model_outputs(self, output): - step_outputs = self._process_model_outputs(output, self.scheduler_outputs) + self.ignored + if CURRENT_VLLM_VERSION == VLLMVersion.v_0_3_0.value: + step_outputs = self._process_model_outputs(output, self.scheduler_outputs) + elif CURRENT_VLLM_VERSION == VLLMVersion.v_0_5_1.value: + step_outputs = self._process_model_outputs( + output, self.scheduler_outputs.scheduled_seq_groups, + self.scheduler_outputs.ignored_seq_groups, self.seq_group_metadata_list) + else: + raise RuntimeError(f"Unsupported vllm version {CURRENT_VLLM_VERSION}, expect one of {list(VLLMVersion)}") done = 0 for out in step_outputs: @@ -288,16 +631,33 @@ def process_model_outputs(self, output): if self.num_requests <= 0: self.pbar.close() - return self.num_requests + if self._log_metrics: + self.log_metrics_stats(done) - def execute_step(self, seq_group_metadata_list, blocks_to_swap_in, blocks_to_swap_out, blocks_to_copy): + return self.num_requests - output = self.execute_model( - seq_group_metadata_list, - blocks_to_swap_in, - blocks_to_swap_out, - blocks_to_copy, - ) + @torch.inference_mode() + def execute_step(self, data): + if CURRENT_VLLM_VERSION == VLLMVersion.v_0_3_0.value: + output = self.worker.execute_model( + data["seq_group_metadata_list"], + data["blocks_to_swap_in"], + data["blocks_to_swap_out"], + data["blocks_to_copy"] + ) + elif CURRENT_VLLM_VERSION == VLLMVersion.v_0_5_1.value: + execute_model_req = ExecuteModelRequest( + seq_group_metadata_list=data["seq_group_metadata_list"], + blocks_to_swap_in=data["blocks_to_swap_in"], + blocks_to_swap_out=data["blocks_to_swap_out"], + blocks_to_copy=data["blocks_to_copy"], + num_lookahead_slots=data["num_lookahead_slots"], + running_queue_size=data["running_queue_size"], + finished_requests_ids=data["finished_requests_ids"] + ) + output = self.worker.execute_model(execute_model_req=execute_model_req) + else: + raise RuntimeError(f"Unsupported vllm version {CURRENT_VLLM_VERSION}, expect one of {list(VLLMVersion)}") if hasattr(self, "scheduler_outputs"): return self.process_model_outputs(output) @@ -305,5 +665,109 @@ def execute_step(self, seq_group_metadata_list, blocks_to_swap_in, blocks_to_swa return output def decode(self): + if not self.timers("decode").started_: + self.timers("decode").start() self.outputs = sorted(self.outputs, key=lambda x: int(x.request_id)) - return self.decode_internal(self.outputs) + rets = self.decode_internal(self.outputs) + rets = utils.to_device('cpu', rets) + self.timers("decode").stop() + return rets + + def offload_weights(self): + """ + offload weights + """ + if self.module_args.offload_weights: + self._memory_manager.offload_weights() + + def onload_weights(self): + """ + onload weights + """ + if self.module_args.offload_weights: + self._memory_manager.onload_weights() + + def log_metrics_stats(self, num_done_requests): + now = time.monotonic() + self.num_done_requests += num_done_requests + scheduler_list = self.scheduler if isinstance(self.scheduler, list) else [self.scheduler] + avg_request_throughput = self.num_done_requests / (now - self.start_time) + if self.scheduler_outputs.prompt_run: + self.num_processed_prompt += self.scheduler_outputs.num_batched_tokens + else: + self.num_generated_tokens += self.scheduler_outputs.num_batched_tokens + + avg_generation_throughput = self.num_generated_tokens / (now - self.start_time) + avg_prompt_throughput = self.num_processed_prompt / (now - self.start_time) + + self.forward_count += 1 + total_num_gpu_blocks = self.cache_config.num_gpu_blocks + num_free_gpu_blocks = sum( + scheduler.block_manager.get_num_free_gpu_blocks() for scheduler in scheduler_list) + num_used_gpu_blocks = total_num_gpu_blocks - num_free_gpu_blocks + self.gpu_cache_usage += num_used_gpu_blocks / total_num_gpu_blocks + avg_gpu_cache_usage = self.gpu_cache_usage / self.forward_count + + total_num_cpu_blocks = self.cache_config.num_cpu_blocks + if total_num_cpu_blocks > 0: + num_free_cpu_blocks = sum( + scheduler.block_manager.get_num_free_cpu_blocks() for scheduler in scheduler_list) + num_used_cpu_blocks = total_num_cpu_blocks - num_free_cpu_blocks + cpu_cache_usage = num_used_cpu_blocks / total_num_cpu_blocks + else: + cpu_cache_usage = 0.0 + + self.cpu_cache_usage += cpu_cache_usage + avg_cpu_cache_usage = self.cpu_cache_usage / self.forward_count + + for idx in range(self.num_done_requests - num_done_requests, self.num_done_requests): + output = self.outputs[idx] + prompt_length = len(output.prompt_token_ids) + output_length = len(output.outputs[0].token_ids) + batch_index = int(output.request_id / self.scheduler_config.max_num_seqs) + self.max_prompt_length_static_batching[batch_index] = max( + self.max_prompt_length_static_batching[batch_index], prompt_length) + self.max_output_length_static_batching[batch_index] = max( + self.max_output_length_static_batching[batch_index], output_length) + self.action_length += output_length + self.action_max_length = max(self.action_max_length, output_length) + self.action_min_length = min(self.action_min_length, output_length) + action_length_mean = float(self.action_length / self.num_done_requests) if self.num_done_requests else 0.0 + + for scheduler in scheduler_list: + self.batch_size_stats += len(scheduler.running) + avg_batch_size = self.batch_size_stats / self.forward_count + + if not self.num_requests or (now - self.last_stats_time >= _LOGGING_INTERVAL_SEC): + self.last_stats_time = now + message = "" + if not self.num_requests: + batch_size = [self.scheduler_config.max_num_seqs \ + for _ in range(math.ceil(self.num_done_requests / self.scheduler_config.max_num_seqs))] + if self.num_done_requests % self.scheduler_config.max_num_seqs: + batch_size[-1] = self.num_done_requests % self.scheduler_config.max_num_seqs + num_prompt_tokens_static_batching = sum( # pylint: disable=consider-using-generator + [prompt_len * bs for prompt_len, bs in zip(self.max_prompt_length_static_batching, batch_size)]) + num_output_tokens_static_batching = sum( # pylint: disable=consider-using-generator + [output_length * bs for output_length, bs in zip(self.max_output_length_static_batching, batch_size)]) + message = f"num_processed_prompts_continuous_batching: {self.num_processed_prompt}, " \ + f"num_processed_prompts_static_batching: {num_prompt_tokens_static_batching}, " \ + f"num_processed_prompts_continuous_batching/num_processed_prompts_static_batching: \ + {self.num_processed_prompt/num_prompt_tokens_static_batching:.1f}, " \ + f"num_output_tokens_continuous_batching: {self.num_generated_tokens}, " \ + f"num_output_tokens_static_batching: {num_output_tokens_static_batching}, " \ + f"num_output_tokens_continuous_batching/num_output_tokens_static_batching: \ + {self.num_generated_tokens/num_output_tokens_static_batching:.1f}, " \ + + self._logger.info(f"allready generate responses for {self.num_done_requests} reqs, " + f"avg_request_throughput: {avg_request_throughput:.1f} reqs/s, " + f"avg_prompt_throughput: {avg_prompt_throughput:.1f} tokens/s, " + f"avg_generation_throughput: {avg_generation_throughput:.1f} tokens/s, " + f"avg_batch_size: {avg_batch_size:.1f} reqs, " + f"avg_gpu_cache_usage: {avg_gpu_cache_usage * 100:.1f}%, " + f"avg_cpu_cache_usage {avg_cpu_cache_usage * 100:.1f}%, " + f"action_length_mean: {action_length_mean:.1f}, " + f"action_max_length: {self.action_max_length if self.num_done_requests else 'inf'}, " + f"action_min_length: {self.action_min_length if self.num_done_requests else '-inf'}, " + f"{message}") +# pylint: enable=import-outside-toplevel,unexpected-keyword-arg,no-value-for-parameter,too-many-function-args diff --git a/chatlearn/runtime/__init__.py b/chatlearn/runtime/__init__.py index 028ddcd3..df6408db 100644 --- a/chatlearn/runtime/__init__.py +++ b/chatlearn/runtime/__init__.py @@ -1,4 +1,4 @@ -# Copyright 2023 Alibaba Group Holding Limited. All Rights Reserved. +# Copyright 2024 Alibaba Group Holding Limited. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. diff --git a/chatlearn/runtime/decorator.py b/chatlearn/runtime/decorator.py index b44b8121..9f1d1f9d 100644 --- a/chatlearn/runtime/decorator.py +++ b/chatlearn/runtime/decorator.py @@ -1,4 +1,4 @@ -# Copyright 2023 Alibaba Group Holding Limited. All Rights Reserved. +# Copyright 2024 Alibaba Group Holding Limited. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. @@ -23,7 +23,6 @@ from chatlearn.utils import future from chatlearn.utils import utils from chatlearn.utils.global_vars import _EXIT_ACTOR_NAME -from chatlearn.utils.logger import logger from chatlearn.utils.utils import execute @@ -50,7 +49,7 @@ def inner(self, *args, **kwargs): def timeit(func, func_name): def inner(self, *args, **kwargs): - if self.rlhf_args.nsys: + if self.runtime_args.nsys: nvtx.range_push(func_name) if self.is_last_rank(): # for the class inherited from base, it may call multiple times, so use the first start time @@ -65,7 +64,7 @@ def inner(self, *args, **kwargs): self.profiler.step() if self.profiler is not None and self._iteration ==3 and self.replica_id == 0 and func_name in ["forward_step", "train_step"]: self.profiler.stop() - if self.rlhf_args.nsys: + if self.runtime_args.nsys: nvtx.range_pop() return ret @@ -109,7 +108,7 @@ def concat_along_batch(tensors): return batched -def preprocess_compute(func, is_forward_step): +def preprocess_compute(func, is_forward_step, trainable): """ 1. if is_forward_step is True, merge a list of dict into one dict, i.e., merge inputs of forward_step. 2. split a list of data for data_parallel, this is used for train_step @@ -124,22 +123,20 @@ def inner(self, *args, **kwargs): for arg in args: merged.update(arg) args = [merged] - if self.data_parallel_size is not None and \ - self.data_parallel_rank is not None and \ - self.data_parallel_size > 1: - assert self.trainable - data_list = args[0] - assert isinstance(data_list, list) - start_idx, end_idx = utils.split_index(len(data_list), self.data_parallel_size)[self.data_parallel_rank] - args = list(args) - sub_data_list = data_list[start_idx: end_idx] - args[0] = sub_data_list - to_empty_cache = kwargs.pop('to_empty_cache') if 'to_empty_cache' in kwargs else False - is_last_batch = kwargs.pop('is_last_batch') if 'is_last_batch' in kwargs else False - is_eval = kwargs.pop('is_eval') if 'is_eval' in kwargs else False + + def get_kwarg(key): + return kwargs.pop(key) if key in kwargs else False + to_empty_cache = get_kwarg('to_empty_cache') + to_onload = get_kwarg('to_onload') + to_offload = get_kwarg('to_offload') + is_last_batch = get_kwarg('is_last_batch') + is_eval = get_kwarg('is_eval') + + if to_onload: + self.onload() generation_batch_size = self.module_args.generation_batch_size final_results = None - if not self.trainable and generation_batch_size: + if not trainable and generation_batch_size: # split into micro-batches if generation_batch_size < input_batch, then concat the results # this happens when different models have difference batch sizes input_batch = 0 @@ -159,7 +156,9 @@ def inner(self, *args, **kwargs): self._iteration += 1 ret = utils.to_device('cpu', ret) results.append(ret) - if self.is_last_rank(): + # for model with DP, we need to return results from all ranks + # for model with TP/PP, only return the results from last rank + if self.is_last_rank() or self.data_parallel_size is None or self.data_parallel_size > 1: final_results = concat_along_batch(results) else: if is_forward_step: @@ -167,26 +166,37 @@ def inner(self, *args, **kwargs): ret = func(self, *args, **kwargs) ret = utils.to_device('cpu', ret) self._iteration += 1 - if self.is_last_rank(): + final_results = None + # for model with DP, we need to return results from all ranks + # for model with TP/PP, only return the results from last rank + if self.is_last_rank() or self.data_parallel_size is None or self.data_parallel_size > 1: final_results = ret else: + kwargs["iteration"] = self._train_iteration + self._train_iteration += 1 ret = func(self, *args, **kwargs) ret = utils.to_device('cpu', ret) if self.is_last_rank(): final_results = ret + if to_offload: + self.offload() if to_empty_cache: self.empty_cache() if is_last_batch and not is_eval: - self.rlhf_args.consumed_samples += self.rlhf_args.sample_per_episode + self.runtime_args.consumed_samples += self.runtime_args.sample_per_episode return final_results return inner def decorate_class_func(cls, func_name, decorator, *args, **kwargs): + if not hasattr(cls, func_name): + return func = getattr(cls, func_name) if func.__qualname__.startswith(decorator.__name__): # already decorated - logger.warning(f"{func_name} {func} already decorated with {decorator}") + # This usually occurs when one class inherits from another class, + # for example, if 'reference' inherits from 'policy', then methods like 'offload_optimizer_states' + # would be decorated in the base class, eliminating the need for repeated decoration. return setattr(cls, func_name, decorator(func, *args, **kwargs)) diff --git a/chatlearn/runtime/dist_actor.py b/chatlearn/runtime/dist_actor.py index bb38e664..e7948da4 100644 --- a/chatlearn/runtime/dist_actor.py +++ b/chatlearn/runtime/dist_actor.py @@ -1,4 +1,4 @@ -# Copyright 2023 Alibaba Group Holding Limited. All Rights Reserved. +# Copyright 2024 Alibaba Group Holding Limited. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. @@ -14,31 +14,38 @@ # ============================================================================== """Dist Actor""" +from collections import defaultdict +import importlib import inspect from functools import partial import ray from ray.util.scheduling_strategies import PlacementGroupSchedulingStrategy -from chatlearn.models.rlhf_module import RLHFModule +from chatlearn.models.base_module import BaseModule from chatlearn.utils import future from chatlearn.utils.utils import parse_function_args +vllm_exist = importlib.util.find_spec("vllm") +if vllm_exist: + from chatlearn.models.vllm_module import VLLMModule + RAY_REMOTE = "remote" class DistActor: """Manage a collection of actors""" - def __init__(self, model: RLHFModule, + def __init__(self, model: BaseModule, gpu_per_node, error_signal, port_manager, replica_id=0, storage=None): - self.total_device = model.total_device + self.total_gpu = model.total_gpu + self.total_cpu = model.total_cpu self.gpu_per_process = model.gpu_per_process - self.num_device_per_replica = model.num_device_per_replica + self.num_gpu_per_replica = model.num_gpu_per_replica self.trainable = model.trainable self.gpu_per_node = gpu_per_node self.model = model @@ -53,15 +60,14 @@ def __init__(self, model: RLHFModule, self._init_done = False self._placement_group = None self.rank_to_actors = {} - self.eval_call_func = model.eval_call_func @property def module_args(self): return self.model.module_args @property - def rlhf_args(self): - return self.model.rlhf_args + def runtime_args(self): + return self.model.runtime_args @property def master(self): @@ -90,15 +96,19 @@ def add_remote_func(self): dist_call = partial(self.call_remote_funcs, func_name) setattr(self, func_name, dist_call) + def call_actor_remote_func(self, actor, func_name, *args, **kwargs): + func = getattr(actor, func_name) + remote_func = getattr(func, RAY_REMOTE) + res = remote_func(*args, **kwargs) + return res + def call_remote_funcs(self, func_name, *args, **kwargs): """ Call remote functions for a collection of actors. """ results = [] for actor in self.all_actors: - func = getattr(actor, func_name) - remote_func = getattr(func, RAY_REMOTE) - res = remote_func(*args, **kwargs) + res = self.call_actor_remote_func(actor, func_name, *args, **kwargs) results.append(res) return results @@ -132,6 +142,14 @@ def _setup_collective_group(self, rank_offset, world_size, group_name, backend=" self.all_ranks = all_ranks return refs + def _setup_ranks(self, rank_offset): + all_ranks = [] + for i, actor in enumerate(self.all_actors): + rank = i + rank_offset + all_ranks.append(rank) + self.rank_to_actors[rank] = actor + self.all_ranks = all_ranks + def terminate(self): # terminate when catching exceptions for actor in self.all_actors: @@ -145,6 +163,15 @@ def placement_group(self): def placement_group(self, pg): self._placement_group = pg + def group_dist_actors_by_tp_rank(self): + self.dp_rank_to_actors = defaultdict(list) + self.data_parallel_size = future.get(self.all_actors[0].get_data_parallel_size.remote()) + if self.data_parallel_size is None: + self.data_parallel_size = 1 + dp_ranks = future.wait([actor.get_data_parallel_rank.remote() for actor in self.all_actors], return_output=True) + for actor, dp_rank in zip(self.all_actors, dp_ranks): + self.dp_rank_to_actors[dp_rank].append(actor) + def __str__(self): return f"{self.__class__.__name__}({self.name})" @@ -156,7 +183,7 @@ class DistTorchActor(DistActor): """DistTorchActor""" def reorder_actors(self, actors, revert_placement=False): - gpu_per_node = min(self.gpu_per_node, self.model.num_device_per_replica) + gpu_per_node = min(self.gpu_per_node, self.model.num_gpu_per_replica) ordered_actors = [] count = 0 actor_gpus = [] @@ -192,11 +219,6 @@ def set_dist_env(self, revert_placement=False): status = sum(future.get(ret)) assert status == world_size - def preprocess_actors(self, revert_placement=False): - super().preprocess_actors() - self.set_dist_env(revert_placement) - return self - class DistModel: """DistModel""" @@ -207,7 +229,7 @@ def __init__(self): self.rank_to_actors = {} self.register_serial_func() self.register_func() - self._need_empty_cache = False + self._is_colocate = False self._colocate_models = None def add_replica(self, replica): @@ -231,24 +253,28 @@ def num_replica(self): return len(self.replicas) @property - def total_device(self): - return self.replicas[0].total_device + def total_gpu(self): + return self.replicas[0].total_gpu @property - def num_device_per_replica(self): - return self.replicas[0].num_device_per_replica + def total_cpu(self): + return self.replicas[0].total_cpu + + @property + def num_gpu_per_replica(self): + return self.replicas[0].num_gpu_per_replica @property def gpu_per_process(self): return self.replicas[0].gpu_per_process @property - def need_empty_cache(self): - return self._need_empty_cache + def is_colocate(self): + return self._is_colocate - @need_empty_cache.setter - def need_empty_cache(self, empty_cache): - self._need_empty_cache = empty_cache + @is_colocate.setter + def is_colocate(self, flag): + self._is_colocate = flag def get_actor(self, rank): # given rank, return the actor @@ -272,7 +298,15 @@ def register_func(self): "empty_cache", "set_start_iteration", "offload_optimizer_states", - "onload_optimizer_states"]: + "onload_optimizer_states", + "offload_weights", + "onload_weights", + "offload_main_weights", + "onload_main_weights", + "free_grad_buffers", + "build_grad_buffers", + "eval", + "train"]: dist_call = partial(self.call_replica_func, func_name) setattr(self, func_name, dist_call) @@ -307,6 +341,19 @@ def colocate_models(self): def all_ranks(self): return [dist_actor.all_ranks for dist_actor in self.replicas] + @property + def use_vllm_backend(self): + return vllm_exist and isinstance(self.replicas[0].model, VLLMModule) + + def group_dist_actors_by_tp_rank(self): + for replica in self.replicas: + replica.group_dist_actors_by_tp_rank() + + @property + def enable_offload(self): + return self.module_args.free_grad_buffers or self.module_args.offload_weights or \ + self.module_args.offload_optimizer_states + def __str__(self): return f"{self.__class__.__name__}({self.name})" diff --git a/chatlearn/runtime/engine.py b/chatlearn/runtime/engine.py index 2ff0232d..bd6a09df 100644 --- a/chatlearn/runtime/engine.py +++ b/chatlearn/runtime/engine.py @@ -1,4 +1,4 @@ -# Copyright 2023 Alibaba Group Holding Limited. All Rights Reserved. +# Copyright 2024 Alibaba Group Holding Limited. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. @@ -18,7 +18,7 @@ from chatlearn.checkpoint.checkpoint_manager import CheckpointManager from chatlearn.data.data import StreamDataset -from chatlearn.models.rlhf_module import RLHFModule +from chatlearn.models.base_module import BaseModule from chatlearn.runtime.environment import Environment from chatlearn.runtime.evaluator import Evaluator from chatlearn.runtime.trainer import Trainer @@ -27,6 +27,7 @@ from chatlearn.utils import future from chatlearn.utils.global_vars import get_args from chatlearn.utils.logger import logger +from chatlearn.utils.utils import get_full_proc_memory_info from chatlearn.utils.timer import Timers LOG_START = ">>>>>>>>>>>" @@ -38,8 +39,22 @@ class BaseEngine: def __init__(self, *models): self._models = models self.global_args = get_args() - self.rlhf_args = self.global_args.rlhf_args - self.timers = Timers() + self.runtime_args = self.global_args.runtime_args + self._timers = Timers() + + def set_timers(self, _timers): + self._timers = _timers + + @property + def timers(self): + return self._timers + + def timer_summary(self): + """ + :meta private: + """ + if self._timers: + return self._timers.log(reset=False, return_dict=True) def _create_remote_models(self): resource_manager = ResourceManager(self._models) @@ -57,11 +72,13 @@ def setup(self): for model in self.remote_models: setattr(self, model.name, model) # include compile in init, compile dependencies need to be called serially + logger.info(get_full_proc_memory_info('Before model init')) for model in self.remote_models: model.init() + logger.info(get_full_proc_memory_info('After model init')) # do not include compile dependencies in setup # if the program hang in setup, may try to set concurrent_setup to False. - if self.rlhf_args.concurrent_setup: + if self.runtime_args.concurrent_setup: refs = [] refs_val = [] for model in self.remote_models: @@ -104,20 +121,21 @@ def flatten(xs): refs.append(mem_ref) summaries = future.get(refs) - logger.info(f"{LOG_START} memory summary:") + logger.debug(f"{LOG_START} memory summary:") for model, summary in zip(self.remote_models, summaries): mem_str = ' | '.join(['{:.2f}'.format(i) for i in flatten(summary)]) - mem_log = f"peak_mem(GB): {mem_str}" - logger.info(f"{LOG_START} {model.name} {mem_log}") + mem_log = f"peak_mem(GiB): {mem_str}" + logger.debug(f"{LOG_START} {model.name} {mem_log}") def logging_summary(self, iteration=-1): + _, e2e_time_dict = self.timer_summary() refs = [] for model in self.remote_models: - time_ref = model.replicas[0].timer_summary() + time_ref = model.replicas[0].timer_summary(e2e_cost=e2e_time_dict.get(model.name, None)) refs.append(time_ref) summaries = future.get(refs) - logger.info(f"{LOG_START} PPO iteration {iteration} time summary for each model as follows:") + logger.info(f"{LOG_START} episode iteration {iteration + 1} time summary for each model as follows:") for model, summary in zip(self.remote_models, summaries): logger.info(f"{LOG_START} [{model.name}] {summary[-1]}") self.logging_memory() @@ -129,7 +147,7 @@ def stop(self): class Engine(BaseEngine): """Engine""" - def __init__(self, environment=None, trainer=None, evaluator=None): + def __init__(self, environment=None, trainer=None, evaluator=None, name='alignment'): """ Engine. @@ -146,16 +164,22 @@ def __init__(self, environment=None, trainer=None, evaluator=None): if model not in models: models.append(model) super().__init__(*models) + if environment: + environment.set_timers(self.timers) + if trainer: + trainer.set_timers(self.timers) self.env = environment self.trainer = trainer self.evaluator = evaluator self._start_episode = 0 self._dataset = None + self._post_process_func = None self._drop_last = False self._wrap_data = True self._relay_sample_fn = None - self._ppo_data_loader = None + self._data_loader = None self._param_sync_pairs = [] + self._name = name def set_parameter_sync(self, src_model, dst_model): """ @@ -163,9 +187,9 @@ def set_parameter_sync(self, src_model, dst_model): Args ---- - src_model: RLHFModule + src_model: BaseModule src model to sync parameters - dst_model: RLHFModule + dst_model: BaseModule destination model to sync parameters """ self._param_sync_pairs.append((src_model, dst_model)) @@ -228,7 +252,7 @@ def logging_summary(self, iteration=-1): """ super().logging_summary(iteration) episode_str, episode_stats = self.timers.log(names=['episode', 'sync_parameters'], return_dict=True) - logger.info(f"{LOG_START} RLHF episode summary episode iteration {iteration} {episode_str}") + logger.info(f"{LOG_START} {self._name} episode summary, episode iteration {iteration + 1} {episode_str}") self.episode_stats = episode_stats return episode_stats @@ -243,60 +267,72 @@ def set_relay_sample_fn(self, relay_sample_fn): self._relay_sample_fn = relay_sample_fn def learn(self): - self.timers("rlhf").start() + self.timers("chatlearn").start() self.timers("setup").start() self.setup() for executor in self._executors: if executor: executor.setup() self.timers("setup").stop() - logger.info(f"{LOG_START} RLHF setup summary {self.timers.log(names=['setup'])}") + logger.info(f"{LOG_START} {self._name} setup summary {self.timers.log(names=['setup'])}") self.logging_memory() self._resume_from_data_checkpoint() - ppo_data_loader = StreamDataset.remote(self.rlhf_args.stream_data_loader_type, - self.rlhf_args.train_micro_batch_size, + data_loader = StreamDataset.remote(self.runtime_args.stream_data_loader_type, + self.runtime_args.train_micro_batch_size, self.env._padding_config, - self.rlhf_args.max_relay_episode) + self.runtime_args.max_relay_episode, + self.runtime_args.relay_episode_offset) + logger.info(f"{LOG_START} " + get_full_proc_memory_info('Before first param sync')) self.model_manager.sync_parameters(requires_grad=False) - self._ppo_data_loader = ppo_data_loader - for episode_id in range(self._start_episode, self.rlhf_args.num_ppo_episode): - if self.rlhf_args.nsys: + logger.info(f"{LOG_START} " + get_full_proc_memory_info('After first param sync')) + self._data_loader = data_loader + for episode_id in range(self._start_episode, self.runtime_args.num_episode): + if self.runtime_args.nsys: if episode_id == 4: torch.cuda.cudart().cudaProfilerStart() if episode_id == 5: torch.cuda.cudart().cudaProfilerStop() self.timers("episode").start() self.before_episode() - logger.info(f"start train episode_id: {episode_id + 1}/{self.rlhf_args.num_ppo_episode}") + logger.info(f"start train episode_id: {episode_id + 1}/{self.runtime_args.num_episode}") + if self.env.timers is None: + self.env.set_timers(self.timers) queue = self.env.make_experiences() - refs = ppo_data_loader.set_dataset.remote(queue, episode_id, self._relay_sample_fn, - self.rlhf_args.sample_per_episode) + self.timers("set_train_dataset").start() + refs = data_loader.set_dataset.remote(queue, episode_id, self._relay_sample_fn, + self.runtime_args.sample_per_episode) future.wait(refs) - self.trainer.set_data_loader(ppo_data_loader) - logger.info("set dataloader for trainer done") - self.trainer.train(episode_id) - logger.info(f"train episode_id: {episode_id + 1}/{self.rlhf_args.num_ppo_episode} done") - self.timers("sync_parameters").start() - self.model_manager.sync_parameters() - self.timers("sync_parameters").stop() - logger.info(f"train episode_id: {episode_id + 1}/{self.rlhf_args.num_ppo_episode} parameter sync done") + if self.trainer is not None: + self.timers("set_train_dataset").stop() + self.trainer.set_data_loader(data_loader) + logger.info("set dataloader for trainer done") + logger.info(get_full_proc_memory_info(f'Before train {episode_id}')) + if self.trainer.timers is None: + self.trainer.set_timers(self.timers) + self.trainer.train(episode_id) + logger.info(get_full_proc_memory_info(f'After train {episode_id}')) + logger.info(f"train episode_id: {episode_id + 1}/{self.runtime_args.num_episode} done") + self.timers("sync_parameters").start() + self.model_manager.sync_parameters(episode_id + 1) + self.timers("sync_parameters").stop() + logger.info(f"train episode_id: {episode_id + 1}/{self.runtime_args.num_episode} parameter sync done") self.after_episode() self.timers("episode").stop() self.logging_summary(episode_id) self.save_checkpoint(episode_id) self.evaluate(episode_id) - self.timers("rlhf").stop() - logger.info(f"{LOG_START} RLHF overall summary {self.timers.log(names=['rlhf'])}") - logger.info("train rlhf done") + self.timers("chatlearn").stop() + logger.info(f"{LOG_START} {self._name} overall summary {self.timers.log(names=['chatlearn'])}") + logger.info(f"train {self._name} done") def _resume_from_data_checkpoint(self): - if self.rlhf_args.data_checkpoint_path: - data_ckpt_manager = CheckpointManager(self.models[0].replicas[0], self.rlhf_args.data_checkpoint_path, - self.rlhf_args.max_data_ckpt_nums, - self.rlhf_args.load_data_checkpoint_iteration) - if self.rlhf_args.enable_resume_training: + if self.runtime_args.data_checkpoint_path: + data_ckpt_manager = CheckpointManager(self.models[0].replicas[0], self.runtime_args.data_checkpoint_path, + self.runtime_args.max_data_ckpt_nums, + self.runtime_args.load_data_checkpoint_iteration) + if self.runtime_args.enable_resume_training: meta = data_ckpt_manager.resume_meta() if meta: self._start_episode = meta["episode"] + 1 @@ -308,13 +344,33 @@ def save_checkpoint(self, episode_id): """ :meta private: """ - if self.rlhf_args.save_episode_interval and \ - (episode_id + 1) % self.rlhf_args.save_episode_interval == 0: - refs = [] + if self.runtime_args.save_episode_interval and \ + (episode_id + 1) % self.runtime_args.save_episode_interval == 0: for model in self.trainer.models: - refs.append(model.replicas[0].save_checkpoint(self.trainer.iteration)) + # `build_grad_buffers` is called before `onload_main_weights` and after `onload_weights` because it + # has higher peak memory than allocated and is dependent on weights. + if model.module_args.offload_weights or model.module_args.free_memory: + refs = model.onload_weights() + future.wait(refs) + if model.module_args.free_grad_buffers or model.module_args.free_memory: + refs = model.build_grad_buffers() + future.wait(refs) + if model.module_args.offload_weights or model.module_args.free_memory: + refs = model.onload_main_weights() + future.wait(refs) + refs = model.replicas[0].save_checkpoint(self.trainer.iteration) + future.wait(refs) + if model.module_args.offload_weights or model.module_args.free_memory: + refs = model.offload_weights() + future.wait(refs) + refs = model.offload_main_weights() + future.wait(refs) + if model.module_args.free_grad_buffers or model.module_args.free_memory: + refs = model.free_grad_buffers() + future.wait(refs) + refs = [] for i, model in enumerate(self.models[0].replicas): - refs.append(model.save_data_checkpoint(i, self.trainer.iteration, episode_id)) + refs.append(model.all_actors[0].save_data_checkpoint.remote(i, self.trainer.iteration, episode_id)) future.get(refs) logger.info(f"save checkpoint episode {episode_id}, train iteration {self.trainer.iteration} done") @@ -323,8 +379,10 @@ def evaluate(self, episode_id): :meta private: """ if self.evaluator is not None and \ - self.rlhf_args.eval_episode_interval and \ - (episode_id + 1) % self.rlhf_args.eval_episode_interval == 0: + self.runtime_args.eval_episode_interval and \ + (episode_id + 1) % self.runtime_args.eval_episode_interval == 0: + if self.evaluator.timers is None: + self.evaluator.set_timers(self.timers) logger.info("start evaluate") self.timers("evaluate").start() self.evaluator.eval(episode_id, self.trainer.iteration) @@ -337,12 +395,12 @@ class RLHFEngine(Engine): """RLHFEngine""" def __init__(self, - policy: RLHFModule, - reference: RLHFModule, - reward: RLHFModule, - value: RLHFModule, - ppo_policy: RLHFModule, - ppo_value: RLHFModule): + policy: BaseModule, + reference: BaseModule, + reward: BaseModule, + value: BaseModule, + policy_trainer: BaseModule, + value_trainer: BaseModule): def env_compute_flow(batch): policy_out = policy.forward_step(batch) ref_out = reference.forward_step(policy_out) @@ -350,27 +408,123 @@ def env_compute_flow(batch): reward_out = reward.forward_step(policy_out, ref_out, value_out) return value_out, reward_out + def trainer_compute_flow(batch): + policy_trainer.train_step(batch) + value_trainer.train_step(batch) + + env = Environment(env_compute_flow) + trainer = Trainer(trainer_compute_flow) + super().__init__(env, trainer, name='rlhf') + self.set_parameter_sync(policy_trainer, policy) + self.set_parameter_sync(value_trainer, value) + + +class OnlineDPOEngine(Engine): + """Online DPO Engine.""" + def __init__(self, + policy: BaseModule, + reference: BaseModule, + reward: BaseModule, + policy_trainer: BaseModule): + def env_compute_flow(batch): + policy_out = policy.forward_step(batch) + ref_out = reference.forward_step(policy_out) + reward_out = reward.forward_step(policy_out, ref_out) + return reward_out + + def trainer_compute_flow(batch): + policy_trainer.train_step(batch) + + env = Environment(env_compute_flow) + trainer = Trainer(trainer_compute_flow) + super().__init__(env, trainer, name='online_dpo') + self.set_parameter_sync(policy_trainer, policy) + + +class DPOEngine(Engine): + """DPO Engine.""" + def __init__(self, + reference: BaseModule, + policy_trainer: BaseModule): + def env_compute_flow(batch): + ref_out = reference.forward_step(batch) + return ref_out + + def trainer_compute_flow(batch): + policy_trainer.train_step(batch) + + env = Environment(env_compute_flow) + trainer = Trainer(trainer_compute_flow) + super().__init__(env, trainer, name='dpo') + + +class GRPOEngine(Engine): + """GRPO Engine.""" + def __init__(self, + policy: BaseModule, + reference: BaseModule, + reward: BaseModule, + policy_trainer: BaseModule): + def env_compute_flow(batch): + policy_out = policy.forward_step(batch) + ref_out = reference.forward_step(policy_out) + reward_out = reward.forward_step(policy_out, ref_out) + return reward_out + + def trainer_compute_flow(batch): + policy_trainer.train_step(batch) + + env = Environment(env_compute_flow) + trainer = Trainer(trainer_compute_flow) + super().__init__(env, trainer, name='grpo') + self.set_parameter_sync(policy_trainer, policy) + + +class GRPOMathEngine(Engine): + """GRPO Engine with math reward""" + def __init__(self, + policy, + reference, + reward, + reward1, + ppo_policy): + + def env_compute_flow(batch): + policy_out = policy.forward_step(batch) + ref_out = reference.forward_step(policy_out) + reward_out = reward.forward_step(policy_out, ref_out) + reward_out1 = reward1.forward_step(batch, policy_out) + return reward_out, reward_out1 + def trainer_compute_flow(batch): ppo_policy.train_step(batch) - ppo_value.train_step(batch) - env = Environment([policy, reference, value, reward]).set_flow(env_compute_flow) - trainer = Trainer([ppo_policy, ppo_value]).set_flow(trainer_compute_flow) - super().__init__(env, trainer) + def evaluator_flow(batch): + policy_out = policy.eval_forward(batch) + reward_out = reward.eval_forward(policy_out) + reward_out1 = reward1.eval_forward(policy_out) + return reward_out, reward_out1 + + env = Environment(env_compute_flow) + trainer = Trainer(trainer_compute_flow) + evaluator = Evaluator(evaluator_flow) + super().__init__(env, trainer, evaluator, name='grpo_math') self.set_parameter_sync(ppo_policy, policy) - self.set_parameter_sync(ppo_value, value) class EvalEngine(Engine): """Evaluation Engine""" - def __init__(self, models): - evaluator = Evaluator(models) + def __init__(self, eval_flow=None, evaluator=None): + if evaluator is None: + evaluator = Evaluator(eval_flow) super().__init__(evaluator=evaluator) def setup(self): super().setup() self.evaluator.set_dataset(self._dataset) + self.evaluator.set_timers(self.timers) + self.evaluator.set_post_process_func(self._post_process_func) def set_dataset(self, dataset): """ @@ -382,12 +536,30 @@ def set_dataset(self, dataset): a list of prompt string """ self._dataset = dataset + return self + + def set_post_process_func(self, post_process_func): + """ + Set post process function. + + Args + ---- + post_process_func + This function accept two arguments. + 1. results: a list of evaluation results + 2. eval_info: a dict meta that contains "train_iteration" and "episode_iteration" + """ + self._post_process_func = post_process_func + return self - def eval(self): + def eval(self, cur_iter=None, train_iteration=None): """ Start evaluating. """ self.setup() self.evaluator.setup() - queue = self.evaluator.eval() - return queue + self.timers("episode").start() + results = self.evaluator.eval( + cur_iter=cur_iter, train_iteration=train_iteration) + self.timers("episode").stop() + return results diff --git a/chatlearn/runtime/environment.py b/chatlearn/runtime/environment.py index e5ab18cd..0633f649 100644 --- a/chatlearn/runtime/environment.py +++ b/chatlearn/runtime/environment.py @@ -1,4 +1,4 @@ -# Copyright 2023 Alibaba Group Holding Limited. All Rights Reserved. +# Copyright 2024 Alibaba Group Holding Limited. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. @@ -14,35 +14,33 @@ # ============================================================================== """Environment""" -import importlib import math from itertools import cycle +from ray.util.queue import Queue + from chatlearn.data.ranking import batch_generation_ranking from chatlearn.utils import future from chatlearn.utils.logger import logger from .executor import Executor +from .utils import vllm_post_process_generate_step_one_model +from .utils import encode_data, reinit_cache_engine, prepare_vllm +from .utils import execute_in_parallel, decode_data -vllm_exist = importlib.util.find_spec("vllm") -if vllm_exist: - from chatlearn.models.vllm_module import RLHFVLLMModule - - +# pylint: disable=not-callable class Environment(Executor): """BaseEnv""" - def __init__(self, models): + def __init__(self, model_flow): """ Environment Args ---- - models : List[RLHFModule] + models : List[BaseModule] a list of modules """ - super().__init__(models) - for model in self.models: - model.register_func("forward_step") + super().__init__(model_flow) self._batch_size = None self._batch_per_episode = None self._dataset = None @@ -59,7 +57,6 @@ def setup_dataset(self): self.data_producer = self.models[0] assert self.sample_per_episode % len(self.data_producer.replicas) == 0, \ "replica number of data producer model must be divisible by sample_per_episode" - self.sample_per_episode_per_replica = self.sample_per_episode // len(self.data_producer.replicas) logger.info("start set dataset for data_producer") refs = [] if self.models[0].module_args.batch_generation.ranking: @@ -67,15 +64,12 @@ def setup_dataset(self): self._dataset = batch_generation_ranking(self._dataset, episode_per_epoch, self.sample_per_episode) for policy_replica in self.data_producer.replicas: ref = policy_replica.master._build_dataloader.remote(self._dataset, - self.batch_size, - self.sample_per_episode_per_replica) + self.batch_size) refs.append(ref) future.get(refs) logger.info("set dataset for data_producer done") - def setup(self): - self.use_vllm_backend = vllm_exist and isinstance(self.models[0].replicas[0].model, RLHFVLLMModule) super().setup() self.setup_dataset() @@ -84,14 +78,6 @@ def setup(self): config = future.get(model.master.padding_config.remote()) self._padding_config.update(config) - if self.use_vllm_backend: - # setup vllm scheduler - refs = [] - for model_replica in self.models[0].replicas: - ref = model_replica.tailer.build_scheduler.remote() - refs.append(ref) - future.get(refs) - @property def sample_per_episode(self): return self.args.sample_per_episode @@ -100,12 +86,9 @@ def sample_per_episode(self): def batch_size(self): if self._batch_size is not None: return self._batch_size - if self.use_vllm_backend: + if self.first_model.use_vllm_backend: num_replica = len(self.models[0].replicas) self._batch_size = self.sample_per_episode // num_replica - if self.models[0].module_args.args_dict.get("vllm_micro_batch_size") is not None and \ - self.models[0].module_args.args_dict["vllm_micro_batch_size"] != -1: - self._batch_size = self.models[0].module_args.args_dict["vllm_micro_batch_size"] else: self._batch_size = self.models[0].module_args.generation_batch_size @@ -116,98 +99,76 @@ def batch_per_episode(self): if self._batch_per_episode is not None: return self._batch_per_episode num_replica = len(self.models[0].replicas) - if self.use_vllm_backend: - self._batch_per_episode = math.ceil(self.sample_per_episode_per_replica / self.batch_size) + num_batch = self.sample_per_episode // (num_replica * self.batch_size) * num_replica + remainder = self.sample_per_episode % (num_replica * self.batch_size) + if remainder >= num_replica: + self._batch_per_episode = num_batch + num_replica else: - num_batch = self.sample_per_episode // (num_replica * self.batch_size) * num_replica - remainder = self.sample_per_episode % (num_replica * self.batch_size) - if remainder >= num_replica: - self._batch_per_episode = num_batch + num_replica - else: - self._batch_per_episode = num_batch + remainder + self._batch_per_episode = num_batch + remainder return self._batch_per_episode - def vllm_post_process_outputs(self, replica): - """post precess of results in current episode""" - return replica.tailer.decode.remote() - - def vllm_post_process_generate_step_one_model(self, model, out_queue, mb): - """ - Args: - model: DistModel - out_queue: Queue - """ - replica = self._get_model(model) - output = self.vllm_post_process_outputs(replica) - - # If tp > 1 or pp > 1 for current model, its `output` will be a list whose - # length is the number of Actors. In this case, all members in the list - # are the same, and we choose output[-1] to put into out_queue. - last_output = output[-1] if isinstance(output, list) else output - if isinstance(out_queue, list): - for oq in out_queue: - oq.put(self.encode_data(mb, last_output)) + @property + def num_iteration(self): + if self.models[0].module_args.zero_size > 1: + assert self.batch_per_episode % self.models[0].module_args.zero_size == 0 + return self.batch_per_episode // self.models[0].module_args.zero_size else: - out_queue.put(self.encode_data(mb, last_output)) - # To ensure all Actors are finished synchronously, `output` itself should be returned - return out_queue, output - - def generate_step(self, data_queue, step): - for i, model_node in enumerate(self.model_flow.model_nodes): - if i == 0 and self.use_vllm_backend: - self.vllm_post_process_generate_step_one_model(model_node.model, model_node.out_queues, step) - continue - input_queues = data_queue if i == 0 else model_node.get_input_queues() - self.generate_step_one_model(model_node.model, input_queues, model_node.out_queues, to_empty_cache=False, - step_num=step) - data = [] - for model_node in self.model_flow.model_nodes: - if model_node in self.model_flow.return_model_nodes: - data.append(model_node.out_queues[-1]) - return self.get_merged_data(data, encode=False) - - def add_request(self, is_eval=False): - request_rets = [] - for model_replica in self.models[0].replicas: - query = model_replica.master.next_batch.remote(is_eval=is_eval) - ret = model_replica.tailer._add_request.remote(query) - request_rets.append(ret) - future.get(request_rets) + return self.batch_per_episode + + def execute_vllm(self, model_replica, query, out_queues, mb, is_eval, func_name): + self.execute_onload(model_replica) + + # profile cache blocks + prepare_vllm(model_replica) + + # reinit cache engine + reinit_cache_engine(model_replica) + # add requests of current episode to vllm scheduler + ret = model_replica.tailer._add_request.remote(query, is_eval=is_eval) + future.get(ret) + step_outputs = True + data_queue_internal = Queue() + while step_outputs: + query = model_replica.tailer.schedule.remote() + data_queue_internal.put(encode_data(mb, query)) + output = self.generate_step_one_model_internal(self.first_model, data_queue_internal, mb, \ + model_replica, func_name, False, is_eval=is_eval) + data = output[-1][0] + step_outputs = future.get(data) + vllm_post_process_generate_step_one_model(model_replica, out_queues, mb) + self.execute_offload(model_replica) + + + def execute(self, is_eval): + data_queues, out_queue = self.setup_queues() + data_producer_iter = cycle(iter(self.models[0].replicas)) + # prepare batches for all model replicas + for mb in range(self.batch_per_episode): + current_data_producer = next(data_producer_iter) + query = current_data_producer.master.next_batch.remote(is_eval=is_eval) + encoded_data = encode_data(mb, query) + for data_queue in data_queues: + data_queue.put(encoded_data) + + if self.first_model.use_vllm_backend: + data_queue = self.first_node.get_input_queues() + self.timers(f"{self.first_model.name}").start() + args_list = [] + for model_replica in self.first_model.replicas: + if data_queue.qsize() == 0: + break + data = data_queue.get() + mb, query = decode_data(data) + func_name = self.first_node.func_name + args_list.append((model_replica, query, self.first_node.out_queues, mb, is_eval, func_name)) + execute_in_parallel(self.execute_vllm, args_list) + self.timers(f"{self.first_model.name}").stop() + self.compute_loop(out_queue, self.num_iteration) + return out_queue def make_experiences(self): """ Generate a collection of experiences for one episode """ - # Assume the first model produce the data - # data_producer = self.model_flow.model_nodes[0].model - data_queues, out_queue = self.setup_queues() - - if self.use_vllm_backend: - data_queue = data_queues[0] - for mb in range(self.batch_per_episode): - # add requests of current episode to vllm scheduler - self.add_request() - - # eval loop of current episode - num_remaining_request = True - while num_remaining_request: - step_output_rets = [] - for model_replica in self.data_producer.replicas: - query = model_replica.master.schedule.remote() - data_queue.put(self.encode_data(mb, query)) - data, _ = self.generate_step_one_model_internal(self.model_flow.model_nodes[0].model, - data_queue, mb) - step_output_rets.append(data) - num_remaining_request = future.get(step_output_rets)[0][0] - data = self.generate_step(None, mb) - out_queue.put(data) - else: - data_producer_iter = cycle(iter(self.data_producer.replicas)) - for mb in range(self.batch_per_episode): - current_data_producer = next(data_producer_iter) - query = current_data_producer.master.next_batch.remote() - encoded_data = self.encode_data(mb, query) - for data_queue in data_queues: - data_queue.put(encoded_data) - self.compute_loop(data_queues, out_queue, self.batch_per_episode) - return out_queue + return self.execute(is_eval=False) +# pylint: disable=not-callable diff --git a/chatlearn/runtime/evaluator.py b/chatlearn/runtime/evaluator.py index 7b9cd746..3272e901 100644 --- a/chatlearn/runtime/evaluator.py +++ b/chatlearn/runtime/evaluator.py @@ -1,4 +1,4 @@ -# Copyright 2023 Alibaba Group Holding Limited. All Rights Reserved. +# Copyright 2024 Alibaba Group Holding Limited. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. @@ -14,29 +14,29 @@ # ============================================================================== """Evaluator""" +from collections import defaultdict import math -from itertools import cycle from chatlearn.runtime.environment import Environment from chatlearn.utils import future from chatlearn.utils.logger import logger from chatlearn.data.ranking import batch_generation_ranking - +# pylint: disable=not-callable class Evaluator(Environment): """ Evaluator. Args ---- - models : [RLHFModule] + models : [BaseModule] models to evaluate - args : RLHFConfig + args : RuntimeConfig default to None """ - def __init__(self, models): - super().__init__(models) + def __init__(self, model_flow): + super().__init__(model_flow) self._post_process_func = None self.is_eval = True @@ -48,7 +48,17 @@ def sample_per_episode(self): def batch_per_episode(self): if self._batch_per_episode is not None: return self._batch_per_episode - self._batch_per_episode = math.ceil(len(self._dataset) / self.batch_size) + if self.first_model.use_vllm_backend: + # For the vLLM model, the number of samples processed at one time is sample_per_episode / num_replicas. + # If sample_per_episode < num_replicas, then some models will not receive any data to process, + # in which case batch_per_episode = sample_per_episode (each model processes one sample). + # If sample_per_episode is greater than num_replicas, then batch_per_episode = num_replicas. + if self.sample_per_episode >= len(self.models[0].replicas): + self._batch_per_episode = len(self.models[0].replicas) + else: + self._batch_per_episode = self.sample_per_episode + else: + self._batch_per_episode = math.ceil(len(self._dataset) / self.batch_size) return self._batch_per_episode def setup_dataset(self): @@ -57,24 +67,19 @@ def setup_dataset(self): logger.info("calling batch_generation_ranking") self._dataset = batch_generation_ranking(self._dataset, 1, len(self._dataset)) refs = [] - for model_replica in self.models[0].replicas: - ref = model_replica.master._build_dataloader.remote(self._dataset, self.batch_size, is_eval=True) - refs.append(ref) + for idx, model_replica in enumerate(self.models[0].replicas): + if self.first_model.use_vllm_backend: + remainder = self.sample_per_episode % self.models[0].num_replica + batch_size_plus = 1 if idx < remainder else 0 + batch_size = self.batch_size + batch_size_plus + else: + batch_size = self.batch_size + if batch_size > 0: + ref = model_replica.master._build_dataloader.remote( + self._dataset, batch_size, dynamic_batch_size_flag=self.first_model.use_vllm_backend, is_eval=True) + refs.append(ref) future.get(refs) - def eval_step(self, data_queue, out_queue, step): - in_queue = data_queue - for node in self.model_flow.model_nodes: - model = node.model - func_name = model.replicas[0].eval_call_func - assert func_name is not None, \ - f"call model.register_eval_func for {model.name} before initializing Evaluator." - out_queue = node.out_queues - self.generate_step_one_model(model, in_queue, out_queue, step, func_name, False, is_eval=True) - in_queue = node.out_queues[0] - - return self.get_merged_data(out_queue, encode=False) - def get_all_merged_data_list(self, queues, encode=True): queue0 = queues[0] merged_data_list = [] @@ -98,74 +103,39 @@ def set_post_process_func(self, post_process_func): self._post_process_func = post_process_func return self - def eval(self, ppo_iter=None, train_iteration=None, return_last=True): + def eval(self, cur_iter=None, train_iteration=None): """ Evaluating. Args ---- - ppo_iter : int - current ppo iteration. + cur_iter : int + current iteration. train_iteration: int current training iteration. - return_last : bool - return results of last model only. """ - num_batch = self.batch_per_episode refs = [] for model in self.models[0].replicas: refs.append(model.master.reset_eval_data_iter.remote()) future.get(refs) - data_queues, out_queue = self.setup_queues() - - if self.use_vllm_backend: - data_queue = data_queues[0] - results = [] - for mb in range(num_batch): - # add requests of current episode to vllm scheduler - self.add_request(is_eval=True) - - # eval loop of current episode - num_remaining_request = True - while num_remaining_request: - step_output_rets = [] - for model_replica in self.models[0].replicas: - query = model_replica.tailer.schedule.remote() - data_queue.put(self.encode_data(mb, query)) - data = self.eval_step(data_queue, out_queue, mb) - step_output_rets.append(data) - num_remaining_request = future.get(step_output_rets)[0][0] - - # post precess of results in current episode - outputs = [] - for model_replica in self.models[0].replicas: - outputs.append(self.vllm_post_process_outputs(model_replica)) - results += future.get(outputs) - else: - data_producer_iter = cycle(iter(self.models[0].replicas)) - for mb in range(num_batch): - current_data_producer = next(data_producer_iter) - query = current_data_producer.master.next_batch.remote(is_eval=True) - encoded_data = self.encode_data(mb, query) - for data_queue in data_queues: - data_queue.put(encoded_data) - self.compute_loop(data_queues, out_queue, num_batch) - queue_size = out_queue.qsize() - result_refs = [out_queue.get() for _ in range(queue_size)] - element_size = len(result_refs[0]) - results = future.wait(result_refs, desc="evaluator", return_output=True) - results_nested = [] - for i in range(0, len(results), element_size): - sublist = results[i:i+element_size] - results_nested.append(sublist) - results = results_nested - if return_last: - results = [res[0] for res in results] - if self._post_process_func is not None: - eval_info = {} - if ppo_iter is not None: - eval_info["episode_iteration"] = ppo_iter - if train_iteration is not None: - eval_info["train_iteration"] = train_iteration - self._post_process_func(results, eval_info) - return results + out_queue = self.execute(is_eval=True) + queue_size = out_queue.qsize() + result_refs = [out_queue.get() for _ in range(queue_size)] + element_size = len(result_refs[0]) + data_list = future.wait(result_refs, desc="evaluator", return_output=True) + results = [data_list[i:i + element_size] for i in range(0, len(data_list), element_size)] + all_results = defaultdict(list) + for batches in results: + for i, batch in enumerate(batches): + model_name = self.model_flow.return_model_nodes[i].name + all_results[model_name].append(batch) + + if self._post_process_func is not None: + eval_info = {} + if cur_iter is not None: + eval_info["episode_iteration"] = cur_iter + if train_iteration is not None: + eval_info["train_iteration"] = train_iteration + self._post_process_func(all_results, eval_info) + return all_results +# pylint: disable=not-callable diff --git a/chatlearn/runtime/executor.py b/chatlearn/runtime/executor.py index ee12e00e..2c03d9ef 100644 --- a/chatlearn/runtime/executor.py +++ b/chatlearn/runtime/executor.py @@ -1,4 +1,4 @@ -# Copyright 2023 Alibaba Group Holding Limited. All Rights Reserved. +# Copyright 2024 Alibaba Group Holding Limited. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. @@ -15,42 +15,49 @@ """Executor""" from itertools import cycle - from ray.util.queue import Queue -from chatlearn.runtime.model_flow import ModelFlow +from chatlearn.runtime.model_flow import ModelFlow, ModelNode from chatlearn.utils import future from chatlearn.utils.global_vars import get_args from chatlearn.utils.logger import logger +from .utils import encode_data, decode_data +from .utils import FlowParser +# pylint: disable=not-callable class Executor: """Executor""" - def __init__(self, models): + def __init__(self, model_flow): """ Executor Args ---- - models : List[RLHFModule] + models : List[BaseModule] a list of modules """ - if not isinstance(models, list): - models = [models] - self.args = get_args().rlhf_args + self._set_flow(model_flow) + self.args = get_args().runtime_args self.model_flow = None - self.models = models - self.local_models = models + self.local_models = self.models self._batch_per_episode = -1 - self._flow = None self.is_eval = False + self._timers = None + + def set_timers(self, _timers): + self._timers = _timers + + @property + def timers(self): + return self._timers @property def batch_per_episode(self): return self._batch_per_episode - def set_flow(self, flow): + def _set_flow(self, flow): """ Set compution flow @@ -65,32 +72,38 @@ def set_flow(self, flow): return self """ self._flow = flow + self.model_to_call_func = FlowParser().parse(flow) + for model, func_name in self.model_to_call_func.items(): + model.call_funcs.append(func_name) + self.models = list(self.model_to_call_func.keys()) return self + @property + def first_node(self): + return self.model_flow.model_nodes[0] + + @property + def first_model(self): + return self.first_node.model + def update_models(self, models): # update local model with remote models new_models = [] name_to_new_models = {model.name: model for model in models} for model in self.local_models: - new_models.append(name_to_new_models[model.name]) + dist_model = name_to_new_models[model.name] + dist_model.group_dist_actors_by_tp_rank() + new_models.append(dist_model) self.models = new_models if self.args is None: - self.args = get_args().rlhf_args + self.args = get_args().runtime_args def setup(self): self._models_and_results_to_wait = [] self.model_flow = ModelFlow(self) - self.model_flow.trace(self.models, self._flow, is_eval=self.is_eval) + self.model_flow.trace(self.models, self._flow) self.models = [model_node.model for model_node in self.model_flow.model_nodes] - def encode_data(self, mb, data): - return {"iter": mb, "data": data} - - def decode_data(self, data): - mb = data["iter"] - data = data["data"] - return mb, data - def _get_model(self, model): if len(model.replicas) == 1: return model.replicas[0] @@ -101,28 +114,29 @@ def _get_model(self, model): def get_merged_data(self, queues, encode=True): queue0 = queues[0] - mb0, data0 = self.decode_data(queue0.get()) + mb0, data0 = decode_data(queue0.get()) if isinstance(data0, list): - # if model has multiple actors, just use the last one + # if model has multiple actors, if TP>1/PP>1, just use the last data0 = data0[-1] - data_list = [data0] + data_list = [None] * len(queues) + data_list[0] = data0 for index, queue in enumerate(queues[1:]): if index not in self.merged_buffer: self.merged_buffer[index] = {} if mb0 in self.merged_buffer[index]: - data_list.append(self.merged_buffer[index].pop(mb0)) + data_list[index+1] = self.merged_buffer[index].pop(mb0) continue while True: encoded_data = queue.get() - mb, data = self.decode_data(encoded_data) + mb, data = decode_data(encoded_data) if isinstance(data, list): data = data[-1] if mb == mb0: - data_list.append(data) + data_list[index+1] = data break self.merged_buffer[index][mb] = data if encode: - return self.encode_data(mb0, data_list) + return encode_data(mb0, data_list) return data_list def get_all_merged_data(self, queues, out_queue, encode=True): @@ -131,45 +145,76 @@ def get_all_merged_data(self, queues, out_queue, encode=True): res = self.get_merged_data(queues, encode) out_queue.put(res) - def generate_step_one_model_internal(self, model, in_queue, step_num, func_name="forward_step", to_empty_cache=None, - is_eval=False): + def execute_onload(self, model_node): + if isinstance(model_node, ModelNode): + model = model_node.model + else: + model = model_node + # TODO: overlap with execution of other models + refs = model.onload() + future.wait(refs) + + def execute_offload(self, model_node): + if isinstance(model_node, ModelNode): + model = model_node.model + else: + model = model_node + refs = model.offload() + future.wait(refs) + + def generate_step_one_model_internal(self, model, in_queue, step_num, replica=None, func_name="forward_step", to_empty_cache=None, + is_eval=False, to_onload=None, to_offload=None): """ Args: model: DistModel in_queue: Queue step_num: int + replica: current model replica of DistModel func_name: str to_empty_cache: None or boolean """ - is_train = model.trainable - replica = self._get_model(model) + if replica is None: + replica = self._get_model(model) - if isinstance(in_queue, list): - data = self.get_merged_data(in_queue) - mb, query = self.decode_data(data) - else: - data = in_queue.get() - mb, query = self.decode_data(data) - query = [query] - func = getattr(replica, func_name) + def get_next_data(): + if isinstance(in_queue, list): + # this should happen for inference models, will trigger bug for training models + # since training models accept a list of remote object, which has the same + # behavior for models accept multiple inputs + # we need to deal with it later + assert not model.trainable + data = self.get_merged_data(in_queue) + mb, query = decode_data(data) + else: + data = in_queue.get() + mb, query = decode_data(data) + query = [query] + return mb, query kwargs = {} - # if isinstance(query, list): replica_num = len(model.replicas) last_step_start = max(self.batch_per_episode - replica_num, 0) is_last_batch = step_num >= last_step_start kwargs["is_last_batch"] = is_last_batch if to_empty_cache is not None: kwargs["to_empty_cache"] = to_empty_cache + if to_onload is not None: + kwargs["to_onload"] = to_onload + if to_offload is not None: + kwargs["to_offload"] = to_offload if is_eval is not None: kwargs["is_eval"] = is_eval - if is_train: - kwargs["train_info"] = {"iteration": step_num} - output = func(*query, **kwargs) - return output, mb + output = [] + for _, actors in replica.dp_rank_to_actors.items(): + mb, query = get_next_data() + assert isinstance(query, list) + for actor in actors: + ret = replica.call_actor_remote_func(actor, func_name, *query, **kwargs) + output.append((ret, mb)) + return output def generate_step_one_model(self, model, in_queue, out_queue, step_num, func_name="forward_step", - to_empty_cache=None, is_eval=False): + to_empty_cache=None, is_eval=False, to_onload=None, to_offload=None): """ Args: model: DistModel @@ -179,40 +224,51 @@ def generate_step_one_model(self, model, in_queue, out_queue, step_num, func_nam func_name: str to_empty_cache: None or boolean """ - output, mb = self.generate_step_one_model_internal(model, in_queue, step_num, func_name, to_empty_cache, - is_eval) + # output is a list of tuple, each tuple is (remote_refs, mb) + output = self.generate_step_one_model_internal(model, in_queue, step_num, None, func_name, to_empty_cache, + is_eval, to_onload, to_offload) # If tp > 1 or pp > 1 for current model, its `output` will be a list whose # length is the number of Actors. In this case, all members in the list # are the same, and we choose output[-1] to put into out_queue. - last_output = output[-1] if isinstance(output, list) else output + if model.module_args.zero_size == 1: + result = [output[-1]] + else: + result = output if isinstance(out_queue, list): for oq in out_queue: - oq.put(self.encode_data(mb, last_output)) + for res, mb in result: + oq.put(encode_data(mb, res)) else: - out_queue.put(self.encode_data(mb, last_output)) - # To ensure all Actors are finished synchronously, `output` itself should be returned - return out_queue, output + for res, mb in result: + out_queue.put(encode_data(mb, res)) + # To ensure all Actors are finished synchronously, all remote refs should be returned + # note that ray wait does not support tuple type, return a list of list + remote_refs = [item[0] for item in output] + return out_queue, remote_refs def compute_loop_one_model(self, model_node, num_batch, is_eval): model = model_node.model - # TODO: overlap with execution of other models - if model.trainable and model.module_args.offload_optimizer_states: - refs = model.onload_optimizer_states() - future.wait(refs) + func_name = model_node.func_name if model_node.remote_objects_to_wait: - model_node.wait_colocate_models_to_finish(func_name) + model_node.wait_colocate_models_to_finish(self.timers, func_name) replica_num = len(model.replicas) last_step_start = max(num_batch - replica_num, 0) in_queue = model_node.get_input_queues() out_queue = model_node.out_queues results = [] + self.timers(f"{model.name}").start() for step in range(num_batch): - to_empty_cache = step >= last_step_start and model.need_empty_cache + to_empty_cache = step >= last_step_start and model.is_colocate + to_onload = step < replica_num and model.is_colocate and model.enable_offload + to_offload = step >= last_step_start and model.is_colocate and model.enable_offload + # offload already includes empty cache + to_empty_cache = False if to_offload else to_empty_cache _, data = self.generate_step_one_model(model, in_queue, out_queue, step, func_name, to_empty_cache, - is_eval=is_eval) + is_eval=is_eval, to_onload=to_onload, to_offload=to_offload) results.append(data) + self.timers(f"{model.name}").stop() if model_node.next_colocate_node: # before the execution of next colocate model, perform the wait, since we want to empty the cache. logger.info( @@ -223,33 +279,37 @@ def compute_loop_one_model(self, model_node, num_batch, is_eval): # 1. the model may colocate with training/inference, so we should wait until the end of compute_loop # 2. the model is trainable and it does not have next_colocate_model, we should make sure it is finished before parameter_sync # so we add them to a temp list - logger.info(f"Sync {model} in the end of {self}") + logger.info(f"Sync {model} in the end of {self.__class__.__name__}") self._models_and_results_to_wait.append((model_node, results)) - # TODO: overlap with execution of other models - if model.trainable and model.module_args.offload_optimizer_states: - refs = model.offload_optimizer_states() - future.wait(refs) + return results - def compute_loop(self, data_queues, out_queue, num_batch): + def compute_loop(self, out_queue, num_batch): for i, model_group in enumerate(self.model_flow.flow_topology): - if i == 0: - for j, model_node in enumerate(model_group): - model_node.set_input_queues(data_queues[j]) for model_node in model_group: + if model_node.model.use_vllm_backend and i == 0: + continue self.compute_loop_one_model(model_node, num_batch, self.is_eval) - data = [] + + data = [None] * len(self.model_flow.return_model_nodes) for model_node in self.model_flow.model_nodes: + self.timers(f"{model_node.model.name}").start() if model_node in self.model_flow.return_model_nodes: - data.append(model_node.out_queues[-1]) + # let the results order follow model_node order + data[self.model_flow.return_model_nodes.index(model_node)] = model_node.out_queues[-1] + self.timers(f"{model_node.model.name}").stop() model_names = [] results = [] for model, result in self._models_and_results_to_wait: model_names.append(model.name) results.extend(result) if results: + for model_name in model_names: + self.timers(f"{model_name}").start() func_name = self.model_flow.model_nodes[0].func_name future.wait(results, f"{model_names} {func_name}") + for model_name in model_names: + self.timers(f"{model_name}").stop() self._models_and_results_to_wait = [] if data: self.get_all_merged_data(data, out_queue, encode=False) @@ -257,15 +317,14 @@ def compute_loop(self, data_queues, out_queue, num_batch): def setup_queues(self): data_queues = [] out_queue = Queue() - for i, model_group in enumerate(self.model_flow.flow_topology): - if i == 0: - for model_node in model_group: - data_queue = Queue() - data_queues.append(data_queue) - model_node.set_input_queues(data_queue) - for model_node in self.model_flow.model_nodes: - num_out_queue = len(model_node.output_models) - if model_node in self.model_flow.return_model_nodes: - num_out_queue += 1 - model_node.add_out_queues([Queue() for i in range(num_out_queue)]) + for model_node in self.model_flow.input_consumers: + data_queue = Queue() + data_queues.append(data_queue) + model_node.set_input_queue(data_queue) + for model_node in self.model_flow.model_nodes: + num_out_queue = len(model_node.output_models) + if model_node in self.model_flow.return_model_nodes: + num_out_queue += 1 + model_node.set_out_queues([Queue() for _ in range(num_out_queue)]) return data_queues, out_queue +# pylint: disable=not-callable diff --git a/chatlearn/runtime/model_flow.py b/chatlearn/runtime/model_flow.py index 5464642c..72496fce 100644 --- a/chatlearn/runtime/model_flow.py +++ b/chatlearn/runtime/model_flow.py @@ -1,4 +1,4 @@ -# Copyright 2023 Alibaba Group Holding Limited. All Rights Reserved. +# Copyright 2024 Alibaba Group Holding Limited. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. @@ -38,8 +38,8 @@ def __init__(self, model, model_arg_name, func_name): self.func_name = func_name self.input_models = [] self.output_models = [] - self.out_queues = [] - self._input_queues = None + self.out_queues = None + self._input_queue = None # next colocate model node to execute self.next_colocate_node = None # model to wait before the execution of current model @@ -54,19 +54,21 @@ def add_input_node(self, model): def add_output_node(self, model): self.output_models.append(model) - def add_out_queues(self, queues): + def set_out_queues(self, queues): self.out_queues = queues - def set_input_queues(self, queues): - self._input_queues = queues + def set_input_queue(self, queue): + self._input_queue = queue def get_input_queues(self): - if self._input_queues is not None: - return self._input_queues input_queues = [] + if self._input_queue is not None: + input_queues.append(self._input_queue) for input_model_node in self.input_models: out_index = input_model_node.output_models.index(self) input_queues.append(input_model_node.out_queues[out_index]) + if len(input_queues) == 1: + return input_queues[0] return input_queues def _find_all_parents(self, model, prev_models_results): @@ -101,8 +103,12 @@ def add_dependent_colocate_model_results(self, model, remote_objects, models_and self.remote_objects_to_wait.extend(remote_objects) return models_and_results_to_wait2 - def wait_colocate_models_to_finish(self, func_name): + def wait_colocate_models_to_finish(self, timers, func_name): + for model in self.models_to_wait: + timers(f"{model.name}").start() future.wait(self.remote_objects_to_wait, f"{[model.name for model in self.models_to_wait]} {func_name}") + for model in self.models_to_wait: + timers(f"{model.name}").stop() self.remote_objects_to_wait = [] self.models_to_wait = [] @@ -134,11 +140,13 @@ def __init__(self, cls): self.return_model_nodes = [] self.out_to_model_node = {} self.cls = cls + # models that consumes input data + self.input_consumers = [] def get(self, name): return self.name_to_node[name] - def trace(self, models, compute_flow, is_eval=False): + def trace(self, models, compute_flow): """ Trace the model compute_flow to get model graph. @@ -148,38 +156,26 @@ def trace(self, models, compute_flow, is_eval=False): a list of DistModel compute_flow: callable compute_flow function - is_eval: bool - is evaluation """ local_models = [model.replicas[0].model for model in models] name2remote_model = {model.name: model for model in models} class_to_old_func = {} for model in local_models: - func_name = model.get_call_func(is_eval) - assert func_name is not None, f"call func is not set for {model}, is_eval: {is_eval}" + func_name = self.cls.model_to_call_func[model] class_to_old_func[(model, func_name)] = getattr(model.__class__, func_name) setattr(model.__class__, func_name, fake_compute()) dummy_data = DummyData() - if compute_flow is not None: - # trace the compute flow - dummy_output = compute_flow(dummy_data) - else: - # default is sequential computation - dummy_input = dummy_data - dummy_output = None - for model in local_models: - func_name = model.get_call_func(is_eval) - dummy_output = getattr(model, func_name)(dummy_input) - dummy_input = dummy_output + assert compute_flow is not None + dummy_output = compute_flow(dummy_data) # convert decorator back for model in local_models: - func_name = model.get_call_func(is_eval) + func_name = self.cls.model_to_call_func[model] setattr(model.__class__, func_name, class_to_old_func[(model, func_name)]) for model in local_models: remote_model = name2remote_model[model.name] - node = ModelNode(remote_model, model.name, model.get_call_func(is_eval)) + node = ModelNode(remote_model, model.name, self.cls.model_to_call_func[model]) if model._dummy_output: self.out_to_model_node[model._dummy_output] = node for dummy_input in model._dummy_inputs: @@ -193,6 +189,7 @@ def trace(self, models, compute_flow, is_eval=False): self.return_model_nodes.append(self.out_to_model_node[do]) self.name_to_node = {node.model.name: node for node in self.model_nodes} + self.input_consumers = [self.name_to_node[model.name] for model in dummy_data.to_models] self.flow_topology = self.topological_sort() self.model_nodes = flatten(self.flow_topology) for i, current_node in enumerate(self.model_nodes): diff --git a/chatlearn/runtime/parameter_sync.py b/chatlearn/runtime/parameter_sync.py index 16ac5176..1d494d51 100644 --- a/chatlearn/runtime/parameter_sync.py +++ b/chatlearn/runtime/parameter_sync.py @@ -1,4 +1,4 @@ -# Copyright 2023 Alibaba Group Holding Limited. All Rights Reserved. +# Copyright 2024 Alibaba Group Holding Limited. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. @@ -16,9 +16,10 @@ import importlib import random -import threading +import concurrent.futures import traceback from collections import defaultdict +from concurrent.futures import ThreadPoolExecutor from itertools import cycle from tqdm import tqdm @@ -27,12 +28,13 @@ from chatlearn.utils import future from chatlearn.utils import utils from chatlearn.utils.constant import LORA_WEIGHT_PREFIX +from chatlearn.utils.constant import PARAM_SYNC_COMM_TYPE from chatlearn.utils.global_vars import get_args from chatlearn.utils.logger import logger vllm_exist = importlib.util.find_spec("vllm") if vllm_exist: - from chatlearn.models.vllm_module import RLHFVLLMModule + from chatlearn.models.vllm_module import VLLMModule patch_ray() @@ -41,7 +43,7 @@ class ParameterSyncGroup: """ParameterSyncGroup""" - def __init__(self, src_model, dst_model, group_name, error_signal): + def __init__(self, src_model, dst_model, group_name, frequency, error_signal): self.src_model = src_model self.dst_model = dst_model self.group_name = group_name @@ -49,7 +51,7 @@ def __init__(self, src_model, dst_model, group_name, error_signal): self.send_recv_actor_mappings = defaultdict(list) self.recv_send_actor_mappings = defaultdict(list) self.actor2rank = {} - self._debug = get_args().rlhf_args.debug + self._debug = get_args().runtime_args.debug self._num_src_pipeline_stage = None self._num_dst_pipeline_stage = None self._num_src_tensor_parallel = None @@ -59,12 +61,29 @@ def __init__(self, src_model, dst_model, group_name, error_signal): self._send_recv_param_names = {} self._actor2pipe = {} self._actor2tp = {} + self._actor2dp = {} self._validate_params = {} + self._comm_type = get_args().runtime_args.param_sync_comm_type self.setup_collective_group() self.build_rank_mapping() - self.enable_coalesce_param = get_args().rlhf_args.coalesce_param - self.concurrent_comm = get_args().rlhf_args.concurrent_comm + self.enable_coalesce_param = get_args().runtime_args.coalesce_param + self.concurrent_comm = get_args().runtime_args.concurrent_comm self._enable_lora = self.src_model.module_args.lora.enable_lora + # sync every n episodes, n = 0 for no param sync + self._frequency = frequency + + self._free_sync_collective_group = get_args().runtime_args.free_sync_collective_group + self._is_collective_group_created = True + self.collective_groups = [] + self.src_dp_size = future.get(self.src_model.replicas[0].all_actors[0].get_data_parallel_size.remote()) + self.sorted_send_actors = None + + def get_group_name(self, actors): + return f"{self.group_name}_" + "_".join(str(self.actor2rank[actor]) for actor in actors) + + @property + def frequency(self): + return self._frequency @property def num_src_pipeline_stage(self): @@ -98,19 +117,22 @@ def setup_collective_group(self): rank_offset = 0 for model in models: - logger.info( - f"start setup_collective_group for {model.name}, group_name: {self.group_name}, world_size: {world_size}, rank_offset: {rank_offset}") for replica in model.replicas: - refs += replica._setup_collective_group(rank_offset, world_size, self.group_name) + if self._comm_type == PARAM_SYNC_COMM_TYPE.P2P: + refs += replica._setup_collective_group(rank_offset, world_size, self.group_name) + else: + replica._setup_ranks(rank_offset) rank_offset += replica.actor_num - - future.get(refs) - logger.info(f"init collective group done for {self.group_name}") + if refs: + future.get(refs) + logger.info(f"init collective group done for {self.group_name}") def destroy_collective_group(self): + refs = [] try: - self.src_model.destroy_collective_group() - self.dst_model.destroy_collective_group() + refs.extend(self.src_model.destroy_collective_group()) + refs.extend(self.dst_model.destroy_collective_group()) + future.wait(refs) logger.info(f"destroy_collective_group success for {self.group_name}") except Exception as e: logger.exception(f"destroy_collective_group fail for {self.group_name} {e}") @@ -200,8 +222,21 @@ def validate(): utils.get_or_cache(self._validate_params, (send_actor, recv_actor), validate) logger.info("Validation passed!") + def sync_broadcast(self, actors, group_name, requires_grad=None): + send_actor = actors[0] + for recv_actor in actors[1:]: + self.set_sync_param_names(send_actor, recv_actor, requires_grad) + pipe_stage = self.get_actor_pipe_rank(send_actor) + assert self.enable_coalesce_param + refs = [] + for rank, actor in enumerate(actors): + ref = actor.broadcast_parameter.remote(rank, 0, group_name, pipe_stage) + refs.append(ref) + future.wait(refs) + + def _sync_send_recv(self, send_actor, recv_actor, requires_grad=None): - src_names, dst_names = self.get_sync_param_names(send_actor, recv_actor, requires_grad) + src_names, dst_names = self.set_sync_param_names(send_actor, recv_actor, requires_grad) pipe_stage = self.get_actor_pipe_rank(send_actor) send_gpu = future.get(send_actor.get_visible_gpus.remote()) recv_gpu = future.get(recv_actor.get_visible_gpus.remote()) @@ -273,6 +308,11 @@ def inner_func(): return future.get(actor.tensor_parallel_rank.remote()) return utils.get_or_cache(self._actor2tp, actor, inner_func) + def get_actor_dp_rank(self, actor): + def inner_func(): + return future.get(actor.get_data_parallel_rank.remote()) + return utils.get_or_cache(self._actor2dp, actor, inner_func) + def _set_sync_param_names(self, send_actor, recv_actor, requires_grad=None): if requires_grad is None: requires_grad = True @@ -283,8 +323,8 @@ def _set_sync_param_names(self, send_actor, recv_actor, requires_grad=None): dst_pipe_rank = self.get_actor_pipe_rank(recv_actor) dst_src_mappings = future.get(send_actor.build_pipeline_layer_name_mapping.remote( self.num_dst_pipeline_stage, dst_pipe_rank, requires_grad=requires_grad)) - dst_names = dst_src_mappings.keys() - src_names = dst_src_mappings.values() + dst_names = list(dst_src_mappings.keys()) + src_names = list(dst_src_mappings.values()) else: src_names = dst_names = future.get(send_actor.get_parameter_names.remote(requires_grad=requires_grad)) @@ -292,8 +332,17 @@ def _set_sync_param_names(self, send_actor, recv_actor, requires_grad=None): src_names = [ele for ele in src_names if LORA_WEIGHT_PREFIX not in ele] dst_names = [ele for ele in dst_names if LORA_WEIGHT_PREFIX not in ele] - if vllm_exist and isinstance(self.dst_model.replicas[0].model, RLHFVLLMModule): - dst_names = future.get(recv_actor.map_src_to_dst.remote(src_names)) + if vllm_exist and isinstance(self.dst_model.replicas[0].model, VLLMModule): + src_pipe_stage = self.get_actor_pipe_rank(send_actor) + src_names, dst_names = future.get(recv_actor.map_src_to_dst.remote(src_names, self.num_src_pipeline_stage, src_pipe_stage)) + concat_params_dict = future.get(recv_actor.get_concat_params_dict.remote()) + future.get(send_actor.set_concat_params_dict.remote(concat_params_dict)) + to_fix_act_ordering_dict = future.get(recv_actor.get_to_fix_act_ordering_dict.remote()) + future.get(send_actor.set_to_fix_act_ordering_dict.remote(to_fix_act_ordering_dict)) + to_fix_qkv_ordering_dict = future.get(recv_actor.get_to_fix_qkv_ordering_dict.remote()) + future.get(send_actor.set_to_fix_qkv_ordering_dict.remote(to_fix_qkv_ordering_dict)) + to_fix_qkv_ordering_func = future.get(recv_actor.get_to_fix_qkv_ordering_func.remote()) + future.get(send_actor.set_to_fix_qkv_ordering_func.remote(to_fix_qkv_ordering_func)) else: if self._dst_prefix is None and self._src_prefix is None: dst_names_ref = future.get(recv_actor.get_parameter_names.remote(requires_grad=False)) @@ -308,32 +357,96 @@ def _set_sync_param_names(self, send_actor, recv_actor, requires_grad=None): future.get(refs) return src_names, dst_names - def get_sync_param_names(self, send_actor, recv_actor, requires_grad=None): + def set_sync_param_names(self, send_actor, recv_actor, requires_grad=None): return utils.get_or_cache(self._send_recv_param_names, (send_actor, recv_actor), \ lambda: self._set_sync_param_names(send_actor, recv_actor, requires_grad)) + def create_broadcast_group(self, send_actor, recv_actors): + actor_groups = [send_actor] + actor_groups.extend(recv_actors) + dp = self.get_actor_dp_rank(send_actor) + pp = self.get_actor_pipe_rank(send_actor) + tp = self.get_actor_tp_rank(send_actor) + group_name = f"{self.group_name}_dp{dp}_pp{pp}_tp{tp}" + if group_name not in self.collective_groups: + refs = [] + for rank, actor in enumerate(actor_groups): + ref = actor.setup_collective_group.remote(rank, len(actor_groups), "nccl", group_name) + refs.append(ref) + future.wait(refs) + self.collective_groups.append(group_name) + return actor_groups, group_name + + def sort_send_actors(self): + if self.sorted_send_actors is not None: + return self.sorted_send_actors + dp2send_actors = defaultdict(list) + for send_actor in self.send_recv_actor_mappings: + dp2send_actors[self.get_actor_dp_rank(send_actor)].append(send_actor) + for dp_rank in dp2send_actors: + send_actors = dp2send_actors[dp_rank] + dp2send_actors[dp_rank] = sorted(send_actors, key=lambda x: self.actor2rank[x]) + sorted_send_actors = [] + dp_rank = 0 + while len(sorted_send_actors) < len(self.send_recv_actor_mappings): + sorted_send_actors.append(dp2send_actors[dp_rank].pop(0)) + dp_rank += 1 + if dp_rank == self.src_dp_size: + dp_rank = 0 + assert len(self.send_recv_actor_mappings) == len(sorted_send_actors) + self.sorted_send_actors = sorted_send_actors + return sorted_send_actors + def sync(self, requires_grad=None): - threads = [] - use_threads = self.concurrent_comm + if not self._is_collective_group_created: + # Re-create collective group only when it is destroyed before. + assert self._free_sync_collective_group + self.setup_collective_group() + for send_actor in self.send_recv_actor_mappings: if self._enable_lora: ref = send_actor.fuse_lora_layer.remote() state = future.get([ref]) assert state, "Check fuse lora layer fail." - recv_actors = self.send_recv_actor_mappings[send_actor] - for recv_actor in recv_actors: - if use_threads: - thread = threading.Thread(target=self.sync_send_recv, args=(send_actor, recv_actor, requires_grad)) - threads.append(thread) + + if self.concurrent_comm: + sorted_send_actors = self.sort_send_actors() + max_workers = get_args().runtime_args.param_sync_max_workers + if max_workers is None: + max_workers = max(self.src_model.total_gpu // 8, 1) + if max_workers == -1: + if self._comm_type == PARAM_SYNC_COMM_TYPE.BROADCAST: + max_workers = len(send_actors) else: - self.sync_send_recv(send_actor, recv_actor, requires_grad) + max_workers = len(send_actors) * len(self.send_recv_actor_mappings[send_actors[0]]) + with ThreadPoolExecutor(max_workers=max_workers) as executor: + futures = [] + for send_actor in sorted_send_actors: + recv_actors = self.send_recv_actor_mappings[send_actor] + if self._comm_type == PARAM_SYNC_COMM_TYPE.BROADCAST: + actor_groups, group_name = self.create_broadcast_group(send_actor, recv_actors) + futures.append(executor.submit(self.sync_broadcast, actor_groups, group_name, requires_grad)) + else: + for recv_actor in recv_actors: + futures.append(executor.submit(self.sync_send_recv, send_actor, recv_actor, requires_grad)) + concurrent.futures.wait(futures) + else: + for send_actor, recv_actors in self.send_recv_actor_mappings.items(): + if self._comm_type == PARAM_SYNC_COMM_TYPE.BROADCAST: + actor_groups, group_name = self.create_broadcast_group(send_actor, recv_actors) + self.sync_broadcast(actor_groups, group_name, requires_grad) + else: + for recv_actor in recv_actors: + self.sync_send_recv(send_actor, recv_actor, requires_grad) + + for send_actor in self.send_recv_actor_mappings: if self._enable_lora: ref = send_actor.unfuse_lora_layer.remote() state = future.get([ref]) assert state, "Check unfuse lora layer fail." - if len(threads) > 0: - for thread in threads: - thread.start() - for thread in threads: - thread.join() - logger.info("sync all parameters done") + + if self._free_sync_collective_group: + self.destroy_collective_group() + self._is_collective_group_created = False + self.collective_groups = [] + logger.info(f"Group {self.group_name} sync all parameters done") diff --git a/chatlearn/runtime/trainer.py b/chatlearn/runtime/trainer.py index fa762b56..dcba3ba3 100644 --- a/chatlearn/runtime/trainer.py +++ b/chatlearn/runtime/trainer.py @@ -1,4 +1,4 @@ -# Copyright 2023 Alibaba Group Holding Limited. All Rights Reserved. +# Copyright 2024 Alibaba Group Holding Limited. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. @@ -20,61 +20,72 @@ from chatlearn.utils import future from chatlearn.utils.logger import logger from .executor import Executor +from .utils import encode_data class Trainer(Executor): """Trainer""" - def __init__(self, models): + def __init__(self, model_flow): """ Trainer Args ---- - models : List[RLHFModule] + models : List[BaseModule] a list of modules """ - super().__init__(models) - self.models = models - for model in models: - model.register_func("train_step") - self.num_micro_batch = self.args.train_global_batch_size // self.args.train_micro_batch_size + super().__init__(model_flow) + for model, func_name in self.model_to_call_func.items(): + model.trainable_funcs.append(func_name) self.iteration = 0 + self._data_parallel_size = None def set_data_loader(self, data_loader): self._data_loader = data_loader def next_batch(self): batches = [] - for _ in range(self.num_micro_batch): + for _ in range(self.num_micro_batch_per_dp): data = self._data_loader.next.remote() if future.get(self._data_loader.has_next.remote()): batches.append(data) if not batches: return else: - if len(batches) < self.num_micro_batch: - batches += batches[:self.num_micro_batch - len(batches)] + if len(batches) < self.num_micro_batch_per_dp: + batches += batches[:self.num_micro_batch_per_dp - len(batches)] return batches - def num_training_iteration(self): + @property + def num_iteration(self): # Given that we have incorporated support for relay buffer and dynamic reward outputs, # the number of training data batches per episode may differ, hence we dynamically determine the total number of batches per episode. _sample_per_episode = ray.get(self._data_loader.total_samples.remote()) return math.ceil(_sample_per_episode / self.args.train_global_batch_size) + @property + def data_parallel_size(self): + if self._data_parallel_size is None: + self._data_parallel_size = self.first_model.replicas[0].data_parallel_size + for model in self.models[1:]: + assert model.replicas[0].data_parallel_size == self._data_parallel_size, \ + "Currently, all training models are assumed to have the same data_parallel_size" + return self._data_parallel_size + def train(self, episode): - _num_training_iteration = self.num_training_iteration() + self.num_micro_batch_per_dp = self.args.train_global_batch_size // self.args.train_micro_batch_size // self.data_parallel_size + _num_training_iteration = self.num_iteration self._batch_per_episode = _num_training_iteration for epoch in range(self.args.num_training_epoch): if epoch > 0: ret = self._data_loader.shuffle.remote() future.wait(ret) data_queues, out_queue = self.setup_queues() - for mb in range(_num_training_iteration): - batch = self.encode_data(mb, self.next_batch()) + for mb in range(_num_training_iteration * self.data_parallel_size): + batch = encode_data(mb, self.next_batch()) for data_queue in data_queues: data_queue.put(batch) - self.compute_loop(data_queues, out_queue, _num_training_iteration) + self.compute_loop(out_queue, _num_training_iteration) self.iteration = self.iteration + _num_training_iteration - logger.info(f"train episode: {episode}, epoch {epoch} num_step {_num_training_iteration} done") + logger.info(f"train episode: {episode+1}, epoch {epoch} num_step {_num_training_iteration} done") diff --git a/chatlearn/runtime/utils.py b/chatlearn/runtime/utils.py new file mode 100644 index 00000000..7095c5fc --- /dev/null +++ b/chatlearn/runtime/utils.py @@ -0,0 +1,152 @@ +# Copyright 2024 Alibaba Group Holding Limited. All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# ============================================================================== +"""runtime utils""" + +import ast +import concurrent.futures +import textwrap +import inspect +from chatlearn.utils import future + + +def encode_data(mb, data): + return {"iter": mb, "data": data} + + +def decode_data(data): + mb = data["iter"] + data = data["data"] + return mb, data + + +def build_scheduler(model_replica): + # build for only last rank of each replica. + future.get(model_replica.tailer.build_scheduler.remote()) + + +def free_cache_engine(model_replica): + rets = [] + for actor in model_replica.all_actors: + rets.append(actor.free_cache_engine.remote()) + rets = future.get(rets) + + +def prepare_vllm(model_replica): + """Profiling cache blocks and build scheduler.""" + profile_cache_blocks(model_replica) + # setup vllm scheduler + build_scheduler(model_replica) + + +def profile_cache_blocks(model_replica): + rets = [] + for actor in model_replica.all_actors: + rets.append(actor.profile_cache_blocks.remote()) + rets = future.get(rets) + + num_gpu_blocks = min(a[0] for a in rets) + num_cpu_blocks = min(a[1] for a in rets) + + rets = [] + for actor in model_replica.all_actors: + rets.append(actor.set_cache_config.remote(num_gpu_blocks, num_cpu_blocks)) + rets = future.get(rets) + + +def reinit_cache_engine(model_replica): + rets = [] + for actor in model_replica.all_actors: + rets.append(actor.reinit_cache_engine.remote()) + rets = future.get(rets) + + +def vllm_post_process_generate_step_one_model(replica, out_queue, mb): + """ + Args: + model: DistModel + out_queue: Queue + """ + output = replica.tailer.decode.remote() + + free_cache_engine(replica) + + # If tp > 1 or pp > 1 for current model, its `output` will be a list whose + # length is the number of Actors. In this case, all members in the list + # are the same, and we choose output[-1] to put into out_queue. + last_output = output[-1] if isinstance(output, list) else output + if isinstance(out_queue, list): + for oq in out_queue: + oq.put(encode_data(mb, last_output)) + else: + out_queue.put(encode_data(mb, last_output)) + + +def parse_assign_target(line): + targets = [] + for target in line.targets: + targets.append(target.id) + return targets + + +def parse_expr(line): + func = line.value.func + func_name = func.attr + func_args = [arg.id for arg in line.value.args] + if isinstance(func.value, ast.Name): + model_name = func.value.id + else: + model_name = func.value.attr + return func_name, model_name, func_args + + +class FlowParser: + """Flow Parser""" + + def __init__(self): + self.model_to_call_func = {} + + def visit_func(self, node): + for line in node.body: + if isinstance(line, (ast.Assign, ast.Expr)): + func_name, model_name, _ = parse_expr(line) + model = self.global_models[model_name] + assert model not in self.model_to_call_func + self.model_to_call_func[model] = func_name + + def parse(self, func): + closure_vars = inspect.getclosurevars(func) + self.global_models = closure_vars.globals if closure_vars.globals else closure_vars.nonlocals + node_iter = ast.NodeVisitor() + node_iter.visit_FunctionDef = self.visit_func + if isinstance(func, str): + code = textwrap.dedent(func) + else: + code = textwrap.dedent(inspect.getsource(func)) + node_iter.visit(ast.parse(code)) + return self.model_to_call_func + +def execute_in_parallel(function, arguments): + if len(arguments) == 1: + return function(*arguments[0]) + results = [] + with concurrent.futures.ThreadPoolExecutor() as executor: + # Using list comprehension to handle the results + futures = [executor.submit(function, *args) for args in arguments] + for _future in concurrent.futures.as_completed(futures): + try: + results.append(_future.result()) + except Exception as e: + print(f"Thread generated an exception: {e}") + return results diff --git a/chatlearn/schedule/__init__.py b/chatlearn/schedule/__init__.py index 028ddcd3..df6408db 100644 --- a/chatlearn/schedule/__init__.py +++ b/chatlearn/schedule/__init__.py @@ -1,4 +1,4 @@ -# Copyright 2023 Alibaba Group Holding Limited. All Rights Reserved. +# Copyright 2024 Alibaba Group Holding Limited. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. diff --git a/chatlearn/schedule/model_manager.py b/chatlearn/schedule/model_manager.py index 276d7892..cb2eb50d 100644 --- a/chatlearn/schedule/model_manager.py +++ b/chatlearn/schedule/model_manager.py @@ -1,4 +1,4 @@ -# Copyright 2023 Alibaba Group Holding Limited. All Rights Reserved. +# Copyright 2024 Alibaba Group Holding Limited. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. @@ -21,7 +21,7 @@ from chatlearn.data.storage import Storage from chatlearn.launcher import dlc_utils -from chatlearn.models.torch_module import RLHFTorchModule +from chatlearn.models.torch_module import TorchModule from chatlearn.runtime.decorator import decorate_class_func from chatlearn.runtime.decorator import timeit, preprocess_compute, monitor_error from chatlearn.runtime.dist_actor import DistActor, DistTorchActor, DistModel @@ -30,17 +30,18 @@ from chatlearn.utils.logger import logger from chatlearn.utils.global_vars import set_decorated, is_decorated from .port_manager import PortManager +from ..utils import future class ModelManager: """ModelManager""" - def __init__(self, rlhf_models, resouce_manager, global_args): - self.local_models = rlhf_models + def __init__(self, models, resouce_manager, global_args): + self.local_models = models self.resouce_manager = resouce_manager self.dist_models = [] self.env_args = global_args.env_args - self.rlhf_args = global_args.rlhf_args + self.runtime_args = global_args.runtime_args self.converted = False # port for DLC jobs, the first two ports are reserved for ray start self.free_ports = dlc_utils.get_free_ports()[2:] @@ -48,24 +49,25 @@ def __init__(self, rlhf_models, resouce_manager, global_args): self.error_signal = ErrorSignalActor.remote() self._storage = Storage.remote() self.parameter_sync_groups = {} - self._parameter_sync_model_mapping = {} + self._parameter_sync_model_pair = [] self.model_packs = [] + self.placement_groups = [] - def _get_total_device_required(self): - total_device = 0 + def _get_total_gpu_required(self): + total_gpu = 0 remote_states = set() - for group in self.rlhf_args.colocation: + for group in self.runtime_args.colocation: colocate_models = [self._name2distmodel[name] for name in group] - max_device = max(m.total_device for m in colocate_models) - total_device += max_device + max_gpu = max(m.total_gpu for m in colocate_models) + total_gpu += max_gpu for name in group: remote_states.add(name) for model in self.dist_models: # place non-colocate models if model.name not in remote_states: - max_device = model.total_device - total_device += max_device - return total_device + max_gpu = model.total_gpu + total_gpu += max_gpu + return total_gpu def remote(self) -> list: """ @@ -81,21 +83,21 @@ def remote(self) -> list: self.dist_models.append(dist_model) self._name2distmodel[model.name] = dist_model - total_device_required = self._get_total_device_required() - if total_device_required > self.resouce_manager.total_gpu: - raise RuntimeError(f"The number of required gpus for current job is {total_device_required}, " + \ + total_gpu_required = self._get_total_gpu_required() + if total_gpu_required > self.resouce_manager.total_gpu: + raise RuntimeError(f"The number of required gpus for current job is {total_gpu_required}, " + \ f"while the number of applied gpus is {self.resouce_manager.total_gpu}") - if self.resouce_manager.total_gpu > total_device_required: + if self.resouce_manager.total_gpu > total_gpu_required: logger.warning(f"The number of applied gpus is {self.resouce_manager.total_gpu}, " + \ - f"while the number of required gpus is {total_device_required}, " + \ - f"there is {self.resouce_manager.total_gpu - total_device_required} wasted gpus") + f"while the number of required gpus is {total_gpu_required}, " + \ + f"there is {self.resouce_manager.total_gpu - total_gpu_required} wasted gpus") - for group in self.rlhf_args.colocation: + for group in self.runtime_args.colocation: colocate_models = [self._name2distmodel[name] for name in group] self.place_models_to_remote_devices(colocate_models) if len(colocate_models) > 1: for model in colocate_models: - model.need_empty_cache = True + model.is_colocate = True for name in group: remote_states.add(name) for model in self.dist_models: @@ -107,10 +109,11 @@ def remote(self) -> list: def build_parameter_group(self): # set ParameterSyncGroup - for src_model, dst_model in self._parameter_sync_model_mapping.items(): + for src_model, dst_model in self._parameter_sync_model_pair: group_name = self._get_group_name(src_model, dst_model) + sync_frequency = self._get_sync_frequency(dst_model) sync_group = ParameterSyncGroup(self._name2distmodel[src_model.name], self._name2distmodel[dst_model.name], - group_name, self.error_signal) + group_name, sync_frequency, self.error_signal) self.parameter_sync_groups[group_name] = sync_group def start_error_monitor(self): @@ -120,37 +123,55 @@ def start_error_monitor(self): self.error_monitor.monitor.remote() def _get_group_name(self, src_model, dst_model): - return src_model.name + dst_model.name + return src_model.name + "2" + dst_model.name + + def _get_sync_frequency(self, model): + return model.parameter_sync_frequency def set_parameter_sync(self, src_model, tgt_model): group_name = self._get_group_name(src_model, tgt_model) if group_name in self.parameter_sync_groups: logger.warning(f"{group_name} already set, ignore") else: - self._parameter_sync_model_mapping[src_model] = tgt_model + sync_frequency = self._get_sync_frequency(tgt_model) + assert sync_frequency >= 0, \ + f"parameter sync frequency from {src_model.name} to {tgt_model.name} expected tp be greater than 0, while {sync_frequency}." + logger.info(f"sync parameters from {src_model.name} to {tgt_model.name} every {sync_frequency} episodes.") + self._parameter_sync_model_pair.append((src_model, tgt_model)) - def sync_parameters(self, requires_grad=None): + def sync_parameters(self, episode_offset=0, requires_grad=None): """ if requires_grad is False, all parameters will be syncronized, this happends when broadcast parameters in the beginning of training, set the parameters of inference same as training """ for _, sync_group in self.parameter_sync_groups.items(): - sync_group.sync(requires_grad) + if sync_group.frequency and \ + episode_offset % sync_group.frequency == 0: + sync_group: ParameterSyncGroup = sync_group + + src_model, dst_model = sync_group.src_model, sync_group.dst_model + refs = src_model.onload_weights() + future.wait(refs) + refs = dst_model.onload_weights() + future.wait(refs) + + sync_group.sync(requires_grad) + + refs = src_model.offload_weights() + future.wait(refs) + refs = dst_model.offload_weights() + future.wait(refs) def set_func_decorator(self, model): if is_decorated(model.name): return model_cls = model.__class__ - call_funcs = [] - for is_eval in (True, False): - call_func = model.get_call_func(is_eval) - if call_func is not None and call_func not in call_funcs: - call_funcs.append(call_func) - for func_name in call_funcs: + for func_name in model.call_funcs: is_forward_step = func_name == "forward_step" - decorate_class_func(model_cls, func_name, preprocess_compute, is_forward_step) + trainable = func_name in model.trainable_funcs + decorate_class_func(model_cls, func_name, preprocess_compute, is_forward_step, trainable) for func_name in ["forward_step", "train_step", "save_checkpoint", "model_setup"]: @@ -158,7 +179,12 @@ def set_func_decorator(self, model): # public user function # TODO: use decorator to annotate - for func_name in ["save_checkpoint", "model_setup", "onload_optimizer_states", "offload_optimizer_states"] + call_funcs: + # TODO: we may need to merge these vllm func call + vllm_funcs = ['build_scheduler', 'free_cache_engine', 'profile_cache_blocks', + 'set_cache_config', 'reinit_cache_engine', 'decode', '_add_request', 'schedule'] + for func_name in ["save_checkpoint", "model_setup", "onload_optimizer_states", "offload_optimizer_states", + 'offload_weights', 'onload_weights', 'offload_main_weights', 'onload_main_weights', + 'free_grad_buffers', 'build_grad_buffers', 'build_dataset', '_build_dataloader'] + model.call_funcs + vllm_funcs: decorate_class_func(model_cls, func_name, monitor_error, func_name) set_decorated(model.name) @@ -167,13 +193,13 @@ def _to_dist_model(self, model): Convert one model to DistActor and place it to devices Args: - model: RLHFModule + model: BaseModule """ self.set_func_decorator(model) model.finalize() def actor_type(): - if isinstance(model, RLHFTorchModule): + if isinstance(model, TorchModule): return DistTorchActor else: return DistActor @@ -194,113 +220,154 @@ def _find_param_recv_models(self, models): model_names = [model.name for model in models] models_to_revert = [] for model in models: - for src, tgt in self._parameter_sync_model_mapping.items(): + for src, tgt in self._parameter_sync_model_pair: if src.name in model_names and model.name == tgt.name: models_to_revert.append(model) return models_to_revert - def find_model_packing_strategy(self, models, total_device): + def find_model_packing_strategy(self, models, total_gpu): """ - Find model packing strategies that can pack all models into total_device + Find model packing strategies that can pack all models into total_gpu try to balance the models among devices, i.e., each device holds similar number of model parts - e.g., given models A:8, B:4, C:4, total_device: 8 + e.g., given models A:8, B:4, C:4, total_gpu: 8 then the pack strategy is [(A), (B,C)] """ - sorted_models = sorted(models, key=lambda x: x.total_device, reverse=True) - assert sorted_models[0].total_device <= total_device + sorted_models = sorted(models, key=lambda x: (x.trainable, x.total_gpu), reverse=True) + assert sorted_models[0].total_gpu <= total_gpu final_packs = [] - # key is the remaining device + # key is the remaining gpu unfinished_packs = defaultdict(list) for model in sorted_models: - device = model.total_device - if device == total_device: + gpu = model.total_gpu + if gpu == total_gpu: final_packs.append([model]) else: - if device in unfinished_packs: + if gpu in unfinished_packs: # find a pack - packs = unfinished_packs[device].pop(0) - if len(unfinished_packs[device]) == 0: - unfinished_packs.pop(device) + packs = unfinished_packs[gpu].pop(0) + if len(unfinished_packs[gpu]) == 0: + unfinished_packs.pop(gpu) packs.append(model) final_packs.append(packs) else: - near_devices = [d for d in unfinished_packs if d > device] + near_gpus = [d for d in unfinished_packs if d > gpu] - if near_devices: - near_device = sorted(near_devices)[0] - packs = unfinished_packs[near_device].pop(0) + if near_gpus: + near_gpu = sorted(near_gpus)[0] + packs = unfinished_packs[near_gpu].pop(0) - if len(unfinished_packs[device]) == 0: - unfinished_packs.pop(device) + if len(unfinished_packs[gpu]) == 0: + unfinished_packs.pop(gpu) packs.append(model) - # update the remaining device number - unfinished_packs[near_device - device].append(packs) + # update the remaining gpu number + unfinished_packs[near_gpu - gpu].append(packs) else: # add model and wait for packing - unfinished_packs[total_device - device].append([model]) - for device, packs_list in unfinished_packs.items(): + unfinished_packs[total_gpu - gpu].append([model]) + for gpu, packs_list in unfinished_packs.items(): if packs_list: final_packs.extend(packs_list) return final_packs - def place_models_to_remote_devices(self, models): - max_device = max(m.total_device for m in models) - placement_group = self.resouce_manager.create_placement_group(max_device) - logger.info(f"create placement_group {placement_group.bundle_specs} for model {models} done") - if len(models) > 1: - for model in models: - # TODO: for colocate gpu_per_process > 1, support later - assert model.gpu_per_process == 1 - self.model_packs = self.find_model_packing_strategy(models, max_device) - for model in models: + def place_gpu_models(self, gpu_models): + if not gpu_models: + return + max_gpu = max(m.total_gpu for m in gpu_models) + placement_group = self.resouce_manager.create_placement_group(max_gpu) + for i, _ in enumerate(placement_group.bundle_specs): + self.placement_groups.append((placement_group, i)) + models_str = ','.join([model.name for model in gpu_models]) + logger.info(f"create placement_group {placement_group.bundle_specs} for model {models_str} done") + for model in gpu_models: + # TODO: for colocate gpu_per_process > 1, support later + assert model.gpu_per_process == 1 + self.model_packs = self.find_model_packing_strategy(gpu_models, max_gpu) + + for model in gpu_models: pack = [] for pack in self.model_packs: if model in pack: break colocate_models = [] - for model2 in models: + for model2 in gpu_models: if model2 is not model and model2 not in pack: colocate_models.append(model2) model.set_colocate_models(colocate_models) - def _get_model_replica_from_pack(device_index, model_pack): - device_offset = 0 + def _get_model_replica_from_pack(gpu_index, model_pack): + gpu_offset = 0 for model in model_pack: - if device_index < device_offset + model.total_device: + if gpu_index < gpu_offset + model.total_gpu: # compute the model rank - model_rank = device_index - device_offset - replica_id = model_rank // model.num_device_per_replica + model_rank = gpu_index - gpu_offset + replica_id = model_rank // model.num_gpu_per_replica return model.replicas[replica_id] - device_offset += model.total_device + gpu_offset += model.total_gpu # 1. we list the models to place on each device # 2. for device i, the number of models is N, then the num_gpus for each ray actor is 1.0/N - device_to_replicas = [] - for i in range(max_device): + gpu_to_replicas = [] + for i in range(max_gpu): colocate_models = [] for model_pack in self.model_packs: replica = _get_model_replica_from_pack(i, model_pack) if replica is not None: colocate_models.append(replica) - device_to_replicas.append(colocate_models) + gpu_to_replicas.append(colocate_models) - for i, replicas in enumerate(device_to_replicas): + for i, replicas in enumerate(gpu_to_replicas): num_gpus = 1.0 / len(replicas) group = i // self.resouce_manager.gpu_per_node for replica in replicas: replica.create_actor(num_gpus, placement_group, group) - models_to_revert = self._find_param_recv_models(models) - for model in models: + models_to_revert = self._find_param_recv_models(gpu_models) + for model in gpu_models: if model in models_to_revert: # pylint: disable=simplifiable-if-statement # Reverse the placement of tgt models, so that shared models not in the same GPU # NCCL limit: NCCL WARN Duplicate GPU detected : rank 1 and rank 0 both on CUDA device # TODO: One GPU task still not work - reverse_device_placement = True + reverse_gpu_placement = True else: - reverse_device_placement = False + reverse_gpu_placement = False + for replica in model.replicas: + replica.set_dist_env(reverse_gpu_placement) + + def place_cpu_models(self, cpu_models): + if not cpu_models: + return + num_cpus = [] + for model in cpu_models: + for _ in range(model.module_args.num_replica): + num_cpus.append(model.module_args.cpu_per_process) + if self.placement_groups is None: + placement_group = self.resouce_manager.create_placement_group(num_gpus=0, num_cpus=num_cpus, \ + strategy=self.runtime_args.cpu_schedule_strategy) + models_str = ','.join([model.name for model in cpu_models]) + logger.info(f"create placement_group {placement_group.bundle_specs} for model {models_str} done") + placement_groups = [] + for i, _ in enumerate(placement_group.bundle_specs): + placement_groups.append((placement_group, i)) + else: + placement_groups = self.placement_groups + + i = 0 + for cpu_model in cpu_models: + for replica in cpu_model.replicas: + pg, index = placement_groups[i] + replica.create_actor(0, pg, index) + i = i + 1 + if i >= len(placement_groups): + i = 0 + + def place_models_to_remote_devices(self, models): + cpu_models = [model for model in models if model.total_gpu == 0] + gpu_models = [model for model in models if model.total_gpu > 0] + self.place_gpu_models(gpu_models) + self.place_cpu_models(cpu_models) + for model in models: for replica in model.replicas: - replica.preprocess_actors(reverse_device_placement) + replica.preprocess_actors() def clean(self): for group in self.parameter_sync_groups.values(): diff --git a/chatlearn/schedule/port_manager.py b/chatlearn/schedule/port_manager.py index 05cbf362..b381b002 100644 --- a/chatlearn/schedule/port_manager.py +++ b/chatlearn/schedule/port_manager.py @@ -1,4 +1,4 @@ -# Copyright 2023 Alibaba Group Holding Limited. All Rights Reserved. +# Copyright 2024 Alibaba Group Holding Limited. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. diff --git a/chatlearn/schedule/resource_manager.py b/chatlearn/schedule/resource_manager.py index 153a1ddb..41bdb37b 100644 --- a/chatlearn/schedule/resource_manager.py +++ b/chatlearn/schedule/resource_manager.py @@ -1,4 +1,4 @@ -# Copyright 2023 Alibaba Group Holding Limited. All Rights Reserved. +# Copyright 2024 Alibaba Group Holding Limited. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. @@ -47,17 +47,21 @@ def get_placement_group_state(self, pg): except Exception as e: logger.warning(f"fail to get placement_group state {e}") - def create_placement_group(self, num_gpus, strategy="PACK"): + def create_placement_group(self, num_gpus, num_cpus=None, strategy="PACK"): """ create resource placement group given model device args """ - if num_gpus <= self.gpu_per_node: - cpu_count = int(self.cpu_per_node * num_gpus / self.gpu_per_node) - bundles = [{"GPU": num_gpus, "CPU": cpu_count}] + if num_gpus > 0: + if num_gpus <= self.gpu_per_node: + cpu_count = int(self.cpu_per_node * num_gpus / self.gpu_per_node) + bundles = [{"GPU": num_gpus, "CPU": cpu_count}] + else: + assert num_gpus % self.gpu_per_node == 0 + num_nodes = num_gpus // self.gpu_per_node + bundles = [{"GPU": self.gpu_per_node, "CPU": self.cpu_per_node} for _ in range(num_nodes)] else: - assert num_gpus % self.gpu_per_node == 0 - num_nodes = num_gpus // self.gpu_per_node - bundles = [{"GPU": self.gpu_per_node, "CPU": self.cpu_per_node} for _ in range(num_nodes)] + assert num_cpus is not None and isinstance(num_cpus, list), "num_cpus should be type of list" + bundles = [{"GPU": 0, "CPU": num_cpu} for num_cpu in num_cpus] pg = placement_group(bundles, strategy=strategy) warn_once = True while self.get_placement_group_state(pg) == "PENDING": diff --git a/chatlearn/tools/megatron_checkpoint_utils.py b/chatlearn/tools/megatron_checkpoint_utils.py index ce4edab4..88a1685f 100644 --- a/chatlearn/tools/megatron_checkpoint_utils.py +++ b/chatlearn/tools/megatron_checkpoint_utils.py @@ -1,4 +1,4 @@ -# Copyright 2023 Alibaba Group Holding Limited. All Rights Reserved. +# Copyright 2024 Alibaba Group Holding Limited. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. @@ -28,7 +28,7 @@ def get_indent_count(string): else: return count -def repair_util(source): +def repair_entry_file(source): source = source.replace("choices=['GPT', 'BERT']", "choices=['GPT', 'BERT', 'REWARD']") return source @@ -51,7 +51,7 @@ def repair_loader_model_provider(lines): new_code = \ """ elif args.model_type == 'REWARD': - from models.reward_model import model_provider + from examples.megatron.models.reward_model import model_provider margs.model_type = ModelType.encoder_or_decoder """ indent = -4 @@ -100,6 +100,10 @@ def repair_saver_put_reward(lines): """ return detect_and_insert_code(lines, pattern, new_code, 0, 0) +def exist_checkpoint_util(): + spec = importlib.util.find_spec('tools.checkpoint.util') + return spec is not None + class CheckpointUtilsImporter: """CheckpointUtilsImporter""" @@ -117,8 +121,8 @@ def find_module(self, fullname, path=None): def repair_code(self, source, module_name): - if module_name == 'util': - source = repair_util(source) + if module_name in ['util', 'convert']: + source = repair_entry_file(source) elif module_name == 'loader_megatron': lines = source.split('\n') lines = repair_loader_model_provider(lines) @@ -170,8 +174,16 @@ def load_module(self, name): return module if __name__ == '__main__': - sys.meta_path.insert(-1, CheckpointUtilsImporter('tools.checkpoint.util', 'tools.checkpoint.loader_megatron', 'tools.checkpoint.saver_megatron')) - from tools.checkpoint import loader_megatron, saver_megatron # pylint: disable=unused-import - from tools.checkpoint import util - util.main() + if exist_checkpoint_util(): + sys.meta_path.insert(-1, CheckpointUtilsImporter('tools.checkpoint.util', \ + 'tools.checkpoint.loader_megatron', 'tools.checkpoint.saver_megatron')) + from tools.checkpoint import loader_megatron, saver_megatron # pylint: disable=unused-import + from tools.checkpoint import util + util.main() + else: + sys.meta_path.insert(-1, CheckpointUtilsImporter('tools.checkpoint.convert', \ + 'tools.checkpoint.loader_megatron', 'tools.checkpoint.saver_megatron')) + from tools.checkpoint import loader_megatron, saver_megatron # pylint: disable=unused-import + from tools.checkpoint import convert + convert.main() # pylint: enable=wildcard-import,exec-used diff --git a/chatlearn/tools/megatron_to_hf.py b/chatlearn/tools/megatron_to_hf.py index 580f184f..7a990744 100644 --- a/chatlearn/tools/megatron_to_hf.py +++ b/chatlearn/tools/megatron_to_hf.py @@ -1,4 +1,4 @@ -# Copyright 2023 Alibaba Group Holding Limited. All Rights Reserved. +# Copyright 2024 Alibaba Group Holding Limited. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. @@ -273,6 +273,8 @@ def convert_checkpoint_from_megatron_to_transformers(args): # Stop if that's not a layer if m is None: break + if val is None: + continue # The index of the layer. layer_idx = int(m.group(1)) + pp_rank * num_layers diff --git a/chatlearn/utils/__init__.py b/chatlearn/utils/__init__.py index 501e8023..1e394d46 100644 --- a/chatlearn/utils/__init__.py +++ b/chatlearn/utils/__init__.py @@ -1,4 +1,4 @@ -# Copyright 2023 Alibaba Group Holding Limited. All Rights Reserved. +# Copyright 2024 Alibaba Group Holding Limited. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. diff --git a/chatlearn/utils/arguments.py b/chatlearn/utils/arguments.py index 428fd45e..e13e1544 100644 --- a/chatlearn/utils/arguments.py +++ b/chatlearn/utils/arguments.py @@ -1,4 +1,4 @@ -# Copyright 2023 Alibaba Group Holding Limited. All Rights Reserved. +# Copyright 2024 Alibaba Group Holding Limited. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. @@ -21,7 +21,7 @@ import yaml -from chatlearn.utils.constant import LORA_LAYER +from chatlearn.utils.constant import LORA_LAYER, RAY_PG_STRATEGY, PARAM_SYNC_COMM_TYPE from chatlearn.utils.logger import logger from chatlearn.utils.utils import get_attributes @@ -73,7 +73,10 @@ def update_dict(src, dst): def parse_args_from_yaml(config_file, config_dir): with open(config_file, 'r', encoding='utf-8') as stream: - config_vars = yaml.load(stream, Loader=yaml.FullLoader) + config_vars = yaml.load(stream, Loader=yaml.SafeLoader) + # empty yaml file + if config_vars is None: + return {} config_vars = {key: parse_value(value) for key, value in config_vars.items()} if 'includes' in config_vars: includes_vars = {} @@ -88,7 +91,7 @@ def parse_args_from_yaml(config_file, config_dir): def parse_args(): """Parse all arguments.""" - parser = argparse.ArgumentParser(description='RLHF Arguments', + parser = argparse.ArgumentParser(description='ChatLearn Arguments', allow_abbrev=False) parser.add_argument("-c", "--config", @@ -184,16 +187,28 @@ class BatchGenerationConfig(SubConfig): class ModelConfig(BaseConfig): """Config for model.""" - #: [required] number of device used for one model, default 1. - num_device: int = 1 + #: [legacy] number of GPU used for one model, default 0. + num_device: int = 0 + #: [required] number of GPU used for one model, default 0, same as num_device + num_gpu: int = 0 + #: [required] number of GPU used for one model, default 0 + num_cpu: int = 0 #: [optional] gpu per process, e.g., for PyTorch DDP, Megatron, DeepSpeed, `gpu_per_process` is set to 1 - gpu_per_process: int = 1 + gpu_per_process: int = None + #: [optional] cpu per process + cpu_per_process: int = None + #: [optional] number of module replica, + #: for gpu model, num_replica = num_gpu // (TP * PP * DP), + #: for cpu model, num_replica = num_cpu // cpu_per_process + num_replica: int = 1 #: [required] whether model is trainable trainable: bool = False #: [optional] tensor model parallel size tensor_model_parallel_size: int = None #: [optional] pipeline model parallel size pipeline_model_parallel_size: int = None + #: [optional] zero size + zero_size: int = None #: [optional] config file for model model_config_file: str = "" config_dir: str = "" @@ -201,7 +216,7 @@ class ModelConfig(BaseConfig): model_type: str = "" #: [optional] placeholder for other args args_dict: dict = None - #: [optional] generation batch size, will overwrite generation batch size in RLHFConfig + #: [optional] generation batch size, will overwrite generation batch size in RuntimeConfig generation_batch_size: int = -1 #: lora config lora: LoraConfig = None @@ -209,6 +224,14 @@ class ModelConfig(BaseConfig): batch_generation: BatchGenerationConfig = None #: offload optimizer states offload_optimizer_states = False + #: parameter sync frequency + sync_frequency = 1 + #: offload weights + offload_weights = False + #: free grad buffers + free_grad_buffers = False + #: overall switch for offload optimizer states/weights and free grad buffers + free_memory = False def __init__(self): super().__init__() @@ -234,11 +257,11 @@ def __str__(self): return ser_str -class RLHFConfig(BaseConfig): - """RLHF training related configs.""" +class RuntimeConfig(BaseConfig): + """training related configs.""" - #: [required] number of ppo episodes. One episode includes a inference and training loop. - num_ppo_episode: int = 5000 + #: [required] number of episodes. One episode includes a inference and training loop. + num_episode: int = 5000 #: [required] number of samples per episode. sample_per_episode: int = 1000 #: [optional] number of training epoch per episode. default set to 1. @@ -267,7 +290,7 @@ class RLHFConfig(BaseConfig): max_data_ckpt_nums: int = None #: [optional]: load data checkpoint from iteration load_data_checkpoint_iteration: int = None - #: [optional]: stream_data_loader type, ["fixed", "dynamic", "relay"] + #: [optional]: stream_data_loader type, ["fixed", "dynamic"] stream_data_loader_type: str = "fixed" #: private debug: bool = False @@ -281,12 +304,31 @@ class RLHFConfig(BaseConfig): coalesced_buffer_mb: int = 100 #: concurrent parameter sync concurrent_comm: bool = True + #: parameter sync communication type, broadcast/p2p + param_sync_comm_type: str = PARAM_SYNC_COMM_TYPE.BROADCAST.value + #: parameter sync max workers + param_sync_max_workers: int = None #: max number of relay episodes, if `max_relay_episode` is set to -1, then relay all episodes - max_relay_episode: int = 1 + #: if `max_relay_episode` is set to 0, then relay is disabled + max_relay_episode: int = 0 + #: relay after n episodes + relay_episode_offset: int = 0 #: consumed samples consumed_samples: int = 0 #: concurrent model setup concurrent_setup: bool = False + #: bucket size in the memory manager to reduce peak memory + bucket_size_mb_in_memory_manager: int = 1024 + #: free collective group after parameter synchronization and rebuild before next synchronization + free_sync_collective_group: bool = False + #: [optional] cpu only model schedule policy, PACK or SPREAD + #: PACK: All provided bundles are packed onto a single node on a best-effort basis. + #: SPREAD: Each bundle is spread onto separate nodes on a best-effort basis. + cpu_schedule_strategy: str = RAY_PG_STRATEGY.SPREAD.value + #: exp name for each run + exp_name: str = "CHATLEARN" + #: output dir + output_dir: str = "./" def __init__(self): super().__init__() @@ -302,7 +344,7 @@ def get(self, key): key to get config """ if key not in self._args_dict: - logger.warning(f"{key} not found in RLHFConfig") + logger.warning(f"{key} not found in RuntimeConfig") else: return self._args_dict[key] @@ -361,7 +403,7 @@ def __init__(self, param_dict=None, config_dir=None): self._finalize = False self.models = {} self.env_args = RuntimeEnvConfig() - self.rlhf_args = RLHFConfig() + self.runtime_args = RuntimeConfig() self.config_dir = config_dir self._active_module_args = None @@ -369,6 +411,8 @@ def __init__(self, param_dict=None, config_dir=None): if param_dict: self._parse_params(param_dict) self._validate_params() + # remove later, just for compatibility + self.rlhf_args = self.runtime_args self._finalize = True def _parse_params(self, param_dict): @@ -405,6 +449,13 @@ def set_param(user_args, config_cls, instance): for user_attribute, user_value in model_args.items(): if hasattr(ModelConfig, user_attribute): original_value = getattr(ModelConfig, user_attribute) + if 'num_device' == user_attribute: + logger.warning("num_device is deprecated, please use num_gpu instead") + if 'num_gpu' not in model_args.keys(): + setattr(model_config, "num_gpu", user_value) + else: + logger.warning("both num_device and num_gpu are set, use num_gpu") + continue if 'lora' == user_attribute: set_param(user_value, LoraConfig, model_config.lora) user_value = model_config.lora @@ -422,8 +473,11 @@ def set_param(user_args, config_cls, instance): if model_config.model_config_file: model_config.model_config_file = get_path(model_config.model_config_file, self.config_dir) model_config.args_dict = parse_args_from_yaml(model_config.model_config_file, self.config_dir) - if "rlhf" in param_dict: - set_param(param_dict["rlhf"], RLHFConfig, self.rlhf_args) + if "runtime" in param_dict: + set_param(param_dict["runtime"], RuntimeConfig, self.runtime_args) + elif "rlhf" in param_dict: + logger.warning("rlhf is deprecated, please use runtime as section name") + set_param(param_dict["rlhf"], RuntimeConfig, self.runtime_args) if "runtime_env" in param_dict: set_param(param_dict["runtime_env"], RuntimeEnvConfig, self.env_args) @@ -439,46 +493,77 @@ def _get_and_check_type(value, default_value, key): return value def _validate_params(self): - if self.rlhf_args.train_global_batch_size is None: - self.rlhf_args.train_global_batch_size = self.rlhf_args.train_micro_batch_size - assert self.rlhf_args.train_global_batch_size % self.rlhf_args.train_micro_batch_size == 0, \ + if self.runtime_args.train_global_batch_size is None: + self.runtime_args.train_global_batch_size = self.runtime_args.train_micro_batch_size + assert self.runtime_args.train_global_batch_size % self.runtime_args.train_micro_batch_size == 0, \ f"train_global_batch_size should be times of train_micro_batch_size," \ - f"but got {self.rlhf_args.train_global_batch_size}/{self.rlhf_args.train_micro_batch_size}" - assert self.rlhf_args.stream_data_loader_type.lower() in ["fixed", "dynamic", "relay"] - + f"but got {self.runtime_args.train_global_batch_size}/{self.runtime_args.train_micro_batch_size}" + assert self.runtime_args.stream_data_loader_type.lower() in ["fixed", "dynamic"] + assert self.runtime_args.cpu_schedule_strategy in [strategy.value for strategy in RAY_PG_STRATEGY] + assert self.runtime_args.param_sync_comm_type in list(PARAM_SYNC_COMM_TYPE) for model_name, model_args in self.models.items(): - assert model_args.gpu_per_process <= model_args.num_device + if model_args.num_gpu > 1: + if model_args.gpu_per_process is None: + model_args.gpu_per_process = 1 + else: + assert model_args.gpu_per_process <= model_args.num_gpu, \ + f"{model_name}: gpu_per_process: {model_args.gpu_per_process}, num_cpu: {model_args.num_gpu}" + elif model_args.num_cpu > 1: + if model_args.cpu_per_process is None: + model_args.cpu_per_process = 1 + else: + assert model_args.cpu_per_process <= model_args.num_cpu, \ + f"{model_name}: cpu_per_process: {model_args.cpu_per_process}, num_cpu: {model_args.num_cpu}" if model_args.generation_batch_size is None or model_args.generation_batch_size <= 0: - if self.rlhf_args.generation_batch_size: - model_args.generation_batch_size = self.rlhf_args.generation_batch_size - for key in ["pipeline_model_parallel_size", "tensor_model_parallel_size"]: + if self.runtime_args.generation_batch_size: + model_args.generation_batch_size = self.runtime_args.generation_batch_size + for key in ["pipeline_model_parallel_size", "tensor_model_parallel_size", "zero_size"]: if model_args.args_dict.get(key) is not None: setattr(model_args, key, model_args.args_dict.get(key)) assert getattr(model_args, key) >= 1 - else: + elif getattr(model_args, key) is None: setattr(model_args, key, 1) - assert model_args.num_device % ( - model_args.tensor_model_parallel_size * model_args.pipeline_model_parallel_size) == 0, \ - "num_device must be divisible by tensor_model_parallel_size * pipeline_model_parallel_size " \ - f"for {model_name} model, but got num_device = {model_args.num_device}, "\ - f"tensor_model_parallel_size = {model_args.tensor_model_parallel_size}, and " \ - f"pipeline_model_parallel_size = {model_args.pipeline_model_parallel_size}." - model_args.num_replica = model_args.num_device // ( - model_args.tensor_model_parallel_size * model_args.pipeline_model_parallel_size) - assert model_args.num_replica * model_args.generation_batch_size <= self.rlhf_args.sample_per_episode, \ + if model_args.tensor_model_parallel_size > 1 or model_args.pipeline_model_parallel_size > 1: + assert model_args.zero_size == 1 or model_args.zero_size is None + assert model_args.num_gpu % ( + model_args.tensor_model_parallel_size * model_args.pipeline_model_parallel_size) == 0, \ + "num_gpu must be divisible by tensor_model_parallel_size * pipeline_model_parallel_size " \ + f"for {model_name} model, but got num_gpu = {model_args.num_gpu}" \ + f"tensor_model_parallel_size = {model_args.tensor_model_parallel_size}, and " \ + f"pipeline_model_parallel_size = {model_args.pipeline_model_parallel_size}." + assert model_args.num_gpu > 0 or model_args.num_cpu > 0, \ + f"{model_name} num_gpu: {model_args.num_gpu}, num_cpu: {model_args.num_cpu}, at least one of them should be set" + + if model_args.num_gpu >= 1: + if model_args.zero_size > 1: + assert model_args.num_gpu % model_args.zero_size == 0 + model_args.num_replica = model_args.num_gpu // model_args.zero_size + else: + model_args.num_replica = model_args.num_gpu // ( + model_args.tensor_model_parallel_size * model_args.pipeline_model_parallel_size) + elif model_args.num_cpu >= 1: + model_args.num_replica = model_args.num_cpu // model_args.cpu_per_process + assert model_args.num_replica * model_args.generation_batch_size <= self.runtime_args.sample_per_episode, \ f"num_replica * batch_size {model_args.num_replica}*{model_args.generation_batch_size} " + \ - f"should be less than sample_per_episode {self.rlhf_args.sample_per_episode}" + f"should be less than sample_per_episode {self.runtime_args.sample_per_episode}" if model_args.batch_generation.min_prompt_length: logger.info(f"Enable batch generation: \ min_prompt_length = {model_args.batch_generation.min_prompt_length}") - if self.rlhf_args.colocation and len(self.rlhf_args.colocation) > 0: + if model_args.free_memory: + model_args.offload_weights = True + if model_args.trainable: + model_args.free_grad_buffers = True + model_args.offload_optimizer_states = True + if self.runtime_args.colocation and len(self.runtime_args.colocation) > 0: model_set = set() - for colocate_models in self.rlhf_args.colocation: + for colocate_models in self.runtime_args.colocation: for model_name in colocate_models: assert model_name not in model_set, f"Model {model_name} should only appear once in colocation group" model_set.add(model_name) + if self.runtime_args.exp_name not in self.runtime_args.output_dir: + self.runtime_args.output_dir = f"{self.runtime_args.output_dir}/{self.runtime_args.exp_name}" logger.info(f"Env Config: \n{self.env_args}") - logger.info(f"RLHF Config: \n{self.rlhf_args}") + logger.info(f"Runtime Config: \n{self.runtime_args}") for name, model_args in self.models.items(): logger.info(f"Model({name}) Config: \n{model_args}") diff --git a/chatlearn/utils/constant.py b/chatlearn/utils/constant.py index 9f43b216..53505371 100644 --- a/chatlearn/utils/constant.py +++ b/chatlearn/utils/constant.py @@ -1,4 +1,4 @@ -# Copyright 2023 Alibaba Group Holding Limited. All Rights Reserved. +# Copyright 2024 Alibaba Group Holding Limited. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. @@ -14,8 +14,39 @@ # ============================================================================== """constants.""" +import importlib +from enum import Enum # LoRA LORA_WEIGHT_PREFIX = "lora" LORA_LAYER = "ColumnParallelLinear,Embedding,LinearLayer,RowParallelLinear,VocabParallelEmbedding" QKV_LAYER_NAME = ["query_key_value"] + + +# vLLM version +CURRENT_VLLM_VERSION = None +if importlib.util.find_spec("vllm"): + import vllm + CURRENT_VLLM_VERSION = vllm.__version__ + +class VLLMVersion(Enum): + """support versions of vLLM.""" + v_0_3_0 = "0.3.0" + v_0_5_1 = "0.5.1" + + +class QwenVersion(Enum): + """qwen version""" + v_1 = 1.0 + v_2 = 2.0 + + +class RAY_PG_STRATEGY(Enum): + """ray placement group strategy""" + PACK = "PACK" + SPREAD = "SPREAD" + +class PARAM_SYNC_COMM_TYPE(str, Enum): + """parameter sync communication type""" + BROADCAST = "broadcast" + P2P = "p2p" diff --git a/chatlearn/utils/dist_utils.py b/chatlearn/utils/dist_utils.py index 7ecafefa..388a5be9 100644 --- a/chatlearn/utils/dist_utils.py +++ b/chatlearn/utils/dist_utils.py @@ -1,4 +1,4 @@ -# Copyright 2023 Alibaba Group Holding Limited. All Rights Reserved. +# Copyright 2024 Alibaba Group Holding Limited. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. diff --git a/chatlearn/utils/error_monitor.py b/chatlearn/utils/error_monitor.py index 5a3892d6..4889737a 100644 --- a/chatlearn/utils/error_monitor.py +++ b/chatlearn/utils/error_monitor.py @@ -1,4 +1,4 @@ -# Copyright 2023 Alibaba Group Holding Limited. All Rights Reserved. +# Copyright 2024 Alibaba Group Holding Limited. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. diff --git a/chatlearn/utils/flat_tensors.py b/chatlearn/utils/flat_tensors.py new file mode 100644 index 00000000..23c0dab0 --- /dev/null +++ b/chatlearn/utils/flat_tensors.py @@ -0,0 +1,287 @@ +# Copyright 2024 Alibaba Group Holding Limited. All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# ============================================================================== +""" +Provides utility classes for managing multiple tensors and their copying between CPU memory and GPU memory. +""" +import sys +from typing import List + +import torch +import torch.distributed as dist + + +def _pin(t: torch.Tensor): + """ + Pin the memory of tensor in-place. + See: https://github.com/pytorch/pytorch/issues/32167 + """ + cudart = torch.cuda.cudart() + r = cudart.cudaHostRegister(t.data_ptr(), t.numel() * t.element_size(), 0) + assert r == 0, f'pin memory error, error code: {r.value}' + +def _unpin(t: torch.Tensor): + """ + Un-pin the pinned memory. + """ + assert t.is_pinned() + cudart = torch.cuda.cudart() + r = cudart.cudaHostUnregister(t.data_ptr()) + assert r == 0, f'unpin memory error, error code: {r.value}' + + +class FlatTensors: + """ + Manage a list of Tensors for situations where offloading and/or sharding is + performed. + + Two blocks of memory are allocated: GPU buffer and primary store. GPU + buffer always stores full data corresponding to the data of tensors, which + is about to be used soon. GPU buffer is allocated and deallocated depending + on usages. Primary store is allocated at the beginning, and is kept through + the whole training. Primary store only stores necessary data when sharding + is enabled, and its location is 'cpu' if offloading is enabled. + + An empty list of tensors is supported. + """ + + _EMPTY_TENSOR = torch.Tensor() + + def __init__( + self, + tensors: List[torch.Tensor], + primary_store_device='cuda', + primary_store_shard_group=None, + ): + """ + Args: + tensors: the list of tensors to be managed. + primary_store_device: which device to allocate primary store. + primary_store_shard_group: the communication group in which the + primary store is sharded. + """ + self._tensors = [*tensors] + self._shapes = [t.shape for t in tensors] + self._numels = [t.numel() for t in tensors] + + self._comm_group = primary_store_shard_group + self.total_numel = self._get_total_numel() + self._comm_range = ( + self._get_shard_range() + if primary_store_shard_group + else (0, self.total_numel) + ) + + self._dtype = tensors[0].dtype if len(tensors) > 0 else torch.bfloat16 + self._shard_primary_store = primary_store_shard_group is not None + self._primary_store_device = primary_store_device + self._primary_store = self._alloc_primary_store(self._dtype) + self._gpu_buffer = self._alloc_gpu_buffer(self._dtype) + + # Aggregate tensor data to GPU buffer. + s = 0 + for t, numel, _ in zip(self._tensors, self._numels, self._shapes): + self._gpu_buffer[s: s + numel].copy_(t.data.view(-1)) + s += numel + self._link_tensor_data_to_gpu_buffer() + self._in_gpu = True + + def _get_total_numel(self): + """ + Get the total numel considering sharding group if any. + """ + n = sum(self._numels) + if self._comm_group is None: + return n + group_size = dist.get_world_size(self._comm_group) + padded = (n + group_size - 1) // group_size * group_size + return padded + + def _get_shard_range(self, comm_group=None): + if comm_group is None: + comm_group = self._comm_group + assert comm_group is not None + group_size = dist.get_world_size(comm_group) + rank = dist.get_rank(comm_group) + assert self.total_numel % group_size == 0 + shard_len = self.total_numel // group_size + start = shard_len * rank + end = start + shard_len + return start, end + + def _alloc_primary_store(self, dtype, shard_group=None): + if self._shard_primary_store: + (start, end) = self._comm_range + numel = end - start + elif shard_group is not None: + (start, end) = self._get_shard_range(shard_group) + numel = end - start + else: + numel = self.total_numel + primary_store = torch.empty( + size=(numel,), + device=self._primary_store_device, + dtype=dtype, + pin_memory=False, + ) + if self._primary_store_device == 'cpu' and numel > 0: + _pin(primary_store) + return primary_store + + def _link_tensor_data_to_gpu_buffer(self): + s = 0 + for t, numel, shape in zip(self._tensors, self._numels, self._shapes): + t.data = self._gpu_buffer[s: s + numel].view(shape) + s += numel + + def _alloc_gpu_buffer(self, dtype, set_zero=False): + # TODO(jiqi): consider reuse of GPU buffer. + fn = torch.zeros if set_zero else torch.empty + return fn( + (self.total_numel,), + dtype=dtype, + device='cuda', + ) + + @torch.no_grad() + def copy_to_primary_store(self, non_blocking=True): + if not self._in_gpu: + return + (start, end) = self._comm_range + if self._shard_primary_store: + self._primary_store.copy_( + self._gpu_buffer[start:end], non_blocking=non_blocking + ) + else: + self._primary_store.copy_( + self._gpu_buffer, non_blocking=non_blocking + ) + + for t in self._tensors: + t.data.record_stream(torch.cuda.current_stream()) + self._gpu_buffer.record_stream(torch.cuda.current_stream()) + self.release_gpu_buffer() + + @torch.no_grad() + def copy_to_gpu_buffer(self, copy_shard_group=None, non_blocking=True): + if self._in_gpu: + return + if ( + not self._shard_primary_store + and self._primary_store_device == 'cuda' + ): + self._gpu_buffer = self._primary_store + self._link_tensor_data_to_gpu_buffer() + self._in_gpu = True + return + + self._gpu_buffer = self._alloc_gpu_buffer(self._dtype) + self._link_tensor_data_to_gpu_buffer() + if copy_shard_group is not None: + assert not self._shard_primary_store + (start, end) = self._get_shard_range(copy_shard_group) + else: + (start, end) = self._comm_range + if self._shard_primary_store: + self._gpu_buffer[start:end].copy_( + self._primary_store, non_blocking=non_blocking + ) + elif copy_shard_group: + self._gpu_buffer[start:end].copy_( + self._primary_store[start:end], non_blocking=non_blocking + ) + else: + self._gpu_buffer.copy_( + self._primary_store, non_blocking=non_blocking + ) + self._in_gpu = True + + @torch.no_grad() + def release_gpu_buffer(self): + """ + Release tensors on GPU memory. + """ + assert self._in_gpu + for t in self._tensors: + t.data = self._EMPTY_TENSOR + self._gpu_buffer = None + self._in_gpu = False + + def __del__(self): + # Unpin the pinned memory, for unit tests. + if self._primary_store_device == 'cpu' and self._primary_store.is_pinned(): + _unpin(self._primary_store) + +class BucketizedFlatTensors: + """ + Manage a list of Tensors for situations where offloading and/or sharding is + performed. + + This class is similar with `FlatTensors` except that it partitions tensors + into several buckets to avoid high peak memory in creation. + + Two blocks of memory are allocated: GPU buffer and primary store. GPU + buffer always stores full data corresponding to the data of tensors, which + is about to be used soon. GPU buffer is allocated and deallocated depending + on usages. Primary store is allocated at the beginning, and is kept through + the whole training. Primary store only stores necessary data when sharding + is enabled, and its location is 'cpu' if offloading is enabled. + + An empty list of tensors is supported. + """ + + def __init__( + self, + tensors: List[torch.Tensor], + bucket_size_mb: int, + primary_store_device='cuda', + primary_store_shard_group=None, + ): + size_limit = bucket_size_mb * 1024 * 1024 if bucket_size_mb > 0 else sys.maxsize + self._flat_tensors = [] + bucket = [] + total_size = 0 + for t in tensors: + size = t.numel() * t.element_size() + if total_size + size > size_limit: + self._flat_tensors.append( + FlatTensors(bucket, primary_store_device, primary_store_shard_group) + ) + total_size = 0 + bucket.clear() + + total_size += size + bucket.append(t) + + if len(bucket) > 0: + self._flat_tensors.append( + FlatTensors(bucket, primary_store_device, primary_store_shard_group) + ) + + @torch.no_grad() + def copy_to_primary_store(self, non_blocking=True): + for flat_tensor in self._flat_tensors: + flat_tensor.copy_to_primary_store(non_blocking=non_blocking) + + @torch.no_grad() + def copy_to_gpu_buffer(self, copy_shard_group=None, non_blocking=True): + for flat_tensor in self._flat_tensors: + flat_tensor.copy_to_gpu_buffer( + copy_shard_group=copy_shard_group, non_blocking=non_blocking + ) + + @torch.no_grad() + def release_gpu_buffer(self): + for flat_tensor in self._flat_tensors: + flat_tensor.release_gpu_buffer() diff --git a/chatlearn/utils/future.py b/chatlearn/utils/future.py index 5b34403f..847ff3da 100644 --- a/chatlearn/utils/future.py +++ b/chatlearn/utils/future.py @@ -1,4 +1,4 @@ -# Copyright 2023 Alibaba Group Holding Limited. All Rights Reserved. +# Copyright 2024 Alibaba Group Holding Limited. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. @@ -58,21 +58,20 @@ def wait(refs, desc=None, return_output=False): return nested2, sublist_lens = check_nested_2_level_list(refs) refs = flatten(refs) - outputs = [] if desc is not None: total = len(refs) if not nested2 else len(sublist_lens) pbar = logging_tqdm(total=total, desc=desc) i = 0 - while refs: + wait_refs = refs.copy() + while wait_refs: num_returns = 1 if not nested2 else sublist_lens[i] - done, refs = ray.wait(refs, num_returns=num_returns) + done, wait_refs = ray.wait(wait_refs, num_returns=num_returns) i += 1 if desc is not None: done_size = len(done) if not nested2 else 1 pbar.update(done_size) - if return_output: - res = ray.get(done) - outputs += res + if return_output: + outputs = ray.get(refs) if desc is not None: pbar.close() if return_output: diff --git a/chatlearn/utils/global_vars.py b/chatlearn/utils/global_vars.py index a313fae4..3a82f114 100644 --- a/chatlearn/utils/global_vars.py +++ b/chatlearn/utils/global_vars.py @@ -1,4 +1,4 @@ -# Copyright 2023 Alibaba Group Holding Limited. All Rights Reserved. +# Copyright 2024 Alibaba Group Holding Limited. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. diff --git a/chatlearn/utils/log_monitor.py b/chatlearn/utils/log_monitor.py index d00195aa..a6e151b0 100644 --- a/chatlearn/utils/log_monitor.py +++ b/chatlearn/utils/log_monitor.py @@ -1,4 +1,4 @@ -# Copyright 2023 Alibaba Group Holding Limited. All Rights Reserved. +# Copyright 2024 Alibaba Group Holding Limited. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. @@ -314,8 +314,6 @@ def update_log_filenames(self): worker_pid=worker_pid, ) ) - log_filename = os.path.basename(file_path) - logger.info(f"Beginning to track file {log_filename}") def open_closed_files(self): """Open some closed files if they may have new lines. @@ -416,49 +414,81 @@ def flush(): if next_line == "": break next_line = next_line.rstrip("\r\n") - - if next_line.startswith(ray_constants.LOG_PREFIX_ACTOR_NAME): - flush() # Possible change of task/actor name. - file_info.actor_name = next_line.split( - ray_constants.LOG_PREFIX_ACTOR_NAME, 1 - )[1] - file_info.task_name = None - elif next_line.startswith(ray_constants.LOG_PREFIX_TASK_NAME): - flush() # Possible change of task/actor name. - file_info.task_name = next_line.split( - ray_constants.LOG_PREFIX_TASK_NAME, 1 - )[1] - elif next_line.startswith(ray_constants.LOG_PREFIX_JOB_ID): - file_info.job_id = next_line.split( - ray_constants.LOG_PREFIX_JOB_ID, 1 - )[1] - elif next_line.startswith( - ray_constants.LOG_PREFIX_TASK_ATTEMPT_START - ) or next_line.startswith( - ray_constants.LOG_PREFIX_TASK_ATTEMPT_END - ): - # Ignore these magic tokens for task logs. - pass - elif next_line.startswith( - "Windows fatal exception: access violation" - ): - # We are suppressing the - # 'Windows fatal exception: access violation' - # message on workers on Windows here. - # As far as we know it is harmless, - # but is frequently popping up if Python - # functions are run inside the core - # worker C extension. See the investigation in - # github.com/ray-project/ray/issues/18944 - # Also skip the following line, which is an - # empty line. - file_info.file_handle.readline() + if ray.__version__ >= "2.11.0": + if next_line.startswith(ray_constants.LOG_PREFIX_ACTOR_NAME): + flush() # Possible change of task/actor name. + file_info.actor_name = next_line.split( + ray_constants.LOG_PREFIX_ACTOR_NAME, 1 + )[1] + file_info.task_name = None + elif next_line.startswith(ray_constants.LOG_PREFIX_TASK_NAME): + flush() # Possible change of task/actor name. + file_info.task_name = next_line.split( + ray_constants.LOG_PREFIX_TASK_NAME, 1 + )[1] + elif next_line.startswith(ray_constants.LOG_PREFIX_JOB_ID): + file_info.job_id = next_line.split( + ray_constants.LOG_PREFIX_JOB_ID, 1 + )[1] + elif next_line.startswith( + "Windows fatal exception: access violation" + ): + # We are suppressing the + # 'Windows fatal exception: access violation' + # message on workers on Windows here. + # As far as we know it is harmless, + # but is frequently popping up if Python + # functions are run inside the core + # worker C extension. See the investigation in + # github.com/ray-project/ray/issues/18944 + # Also skip the following line, which is an + # empty line. + file_info.file_handle.readline() + else: + lines_to_publish.append(next_line) else: - lines_to_publish.append(next_line) + if next_line.startswith(ray_constants.LOG_PREFIX_ACTOR_NAME): + flush() # Possible change of task/actor name. + file_info.actor_name = next_line.split( + ray_constants.LOG_PREFIX_ACTOR_NAME, 1 + )[1] + file_info.task_name = None + elif next_line.startswith(ray_constants.LOG_PREFIX_TASK_NAME): + flush() # Possible change of task/actor name. + file_info.task_name = next_line.split( + ray_constants.LOG_PREFIX_TASK_NAME, 1 + )[1] + elif next_line.startswith(ray_constants.LOG_PREFIX_JOB_ID): + file_info.job_id = next_line.split( + ray_constants.LOG_PREFIX_JOB_ID, 1 + )[1] + elif next_line.startswith( + ray_constants.LOG_PREFIX_TASK_ATTEMPT_START + ) or next_line.startswith( + ray_constants.LOG_PREFIX_TASK_ATTEMPT_END + ): + # Ignore these magic tokens for task logs. + pass + elif next_line.startswith( + "Windows fatal exception: access violation" + ): + # We are suppressing the + # 'Windows fatal exception: access violation' + # message on workers on Windows here. + # As far as we know it is harmless, + # but is frequently popping up if Python + # functions are run inside the core + # worker C extension. See the investigation in + # github.com/ray-project/ray/issues/18944 + # Also skip the following line, which is an + # empty line. + file_info.file_handle.readline() + else: + lines_to_publish.append(next_line) except Exception: logger.error( f"Error: Reading file: {file_info.filename}, " - f"position: {file_info.file_info.file_handle.tell()} " + f"position: {file_info.file_handle.tell()} " "failed." ) raise diff --git a/chatlearn/utils/logger.py b/chatlearn/utils/logger.py index be249ad2..78f8e775 100644 --- a/chatlearn/utils/logger.py +++ b/chatlearn/utils/logger.py @@ -1,4 +1,4 @@ -# Copyright 2023 Alibaba Group Holding Limited. All Rights Reserved. +# Copyright 2024 Alibaba Group Holding Limited. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. diff --git a/chatlearn/utils/megatron_import_helper.py b/chatlearn/utils/megatron_import_helper.py new file mode 100644 index 00000000..7cecaace --- /dev/null +++ b/chatlearn/utils/megatron_import_helper.py @@ -0,0 +1,222 @@ +# Copyright 2024 Alibaba Group Holding Limited. All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# ============================================================================== +""""Version compatibility for megatron""" + +# pylint: disable=unused-import + +# megatron.* +try: + from megatron import arguments + from megatron import get_args + from megatron import get_num_microbatches + from megatron import get_timers + from megatron import get_tokenizer + from megatron import is_last_rank + from megatron import print_rank_0 + from megatron import print_rank_last + from megatron import update_num_microbatches +except ImportError: + from megatron.training import arguments + from megatron.training import get_args + from megatron.training import get_num_microbatches + from megatron.training import get_timers + from megatron.training import get_tokenizer + from megatron.training import is_last_rank + from megatron.training import print_rank_0 + from megatron.training import print_rank_last + from megatron.training import update_num_microbatches + +# megatron.arguments.* +try: + from megatron.arguments import parse_args + from megatron.arguments import validate_args +except ImportError: + from megatron.training.arguments import parse_args + from megatron.training.arguments import validate_args + +# megatron.checkpointing.* +try: + from megatron.checkpointing import _load_base_checkpoint + from megatron.checkpointing import load_args_from_checkpoint + from megatron.checkpointing import load_checkpoint + from megatron.checkpointing import find_checkpoint_rank_0 + from megatron.checkpointing import fix_query_key_value_ordering + from megatron.checkpointing import get_checkpoint_tracker_filename + from megatron.checkpointing import get_checkpoint_version + from megatron.checkpointing import set_checkpoint_version + from megatron.checkpointing import read_metadata +except ImportError: + from megatron.training.checkpointing import _load_base_checkpoint + from megatron.training.checkpointing import load_args_from_checkpoint + from megatron.training.checkpointing import load_checkpoint + from megatron.training.checkpointing import find_checkpoint_rank_0 + from megatron.training.checkpointing import fix_query_key_value_ordering + from megatron.training.checkpointing import get_checkpoint_tracker_filename + from megatron.training.checkpointing import get_checkpoint_version + from megatron.training.checkpointing import set_checkpoint_version + from megatron.training.checkpointing import read_metadata + +# megatron.global_vars.* +try: + from megatron.global_vars import get_tensorboard_writer + from megatron.global_vars import set_global_variables +except ImportError: + from megatron.training.global_vars import get_tensorboard_writer + from megatron.training.global_vars import set_global_variables + +# megatron.initialize.* +try: + from megatron.initialize import _initialize_distributed + from megatron.initialize import _set_random_seed + from megatron.initialize import _init_autoresume + from megatron.initialize import _compile_dependencies + from megatron.initialize import initialize_megatron + from megatron.initialize import set_jit_fusion_options +except ImportError: + from megatron.training.initialize import _initialize_distributed + from megatron.training.initialize import _set_random_seed + from megatron.training.initialize import _init_autoresume + from megatron.training.initialize import _compile_dependencies + from megatron.training.initialize import initialize_megatron + from megatron.training.initialize import set_jit_fusion_options + +# megatron.training.* +try: + from megatron.training import get_optimizer_param_scheduler + from megatron.training import print_datetime + from megatron.training import train_step +except ImportError: + from megatron.training.training import get_optimizer_param_scheduler + from megatron.training.training import print_datetime + from megatron.training.training import train_step + +# megatron.utils.* +try: + from megatron.utils import average_losses_across_data_parallel_group + from megatron.utils import calc_params_l2_norm + from megatron.utils import get_ltor_masks_and_position_ids + from megatron.utils import unwrap_model +except ImportError: + from megatron.training.utils import average_losses_across_data_parallel_group + from megatron.training.utils import calc_params_l2_norm + from megatron.training.utils import get_ltor_masks_and_position_ids + from megatron.training.utils import unwrap_model + +# megatron.model.* +try: + from megatron.model import GPTModel + from megatron.model.language_model import parallel_lm_logits + from megatron.model.module import MegatronModule + from megatron.model.utils import get_linear_layer + from megatron.model.enums import AttnType + from megatron.model import Float16Module +except ImportError: + from megatron.legacy.model import GPTModel + from megatron.legacy.model.language_model import parallel_lm_logits + from megatron.legacy.model.module import MegatronModule + from megatron.legacy.model.utils import get_linear_layer + from megatron.legacy.model.enums import AttnType + from megatron.legacy.model import Float16Module + +# megatron.text_generation.* +try: + from megatron.text_generation import generation + from megatron.text_generation.communication import broadcast_float_list + from megatron.text_generation.communication import broadcast_int_list + from megatron.text_generation.communication import broadcast_tensor + from megatron.text_generation.communication import send_to_next_pipeline_rank + from megatron.text_generation.communication import recv_from_prev_pipeline_rank_ + from megatron.text_generation.forward_step import _allocate_recv_buffer + from megatron.text_generation.generation import generate_tokens_probs_and_return_on_first_stage +except ImportError: + from megatron.inference.text_generation import generation + from megatron.inference.text_generation.communication import broadcast_float_list + from megatron.inference.text_generation.communication import broadcast_int_list + from megatron.inference.text_generation.communication import broadcast_tensor + from megatron.inference.text_generation.communication import send_to_next_pipeline_rank + from megatron.inference.text_generation.communication import recv_from_prev_pipeline_rank_ + from megatron.inference.text_generation.forward_step import _allocate_recv_buffer + from megatron.inference.text_generation.generation import generate_tokens_probs_and_return_on_first_stage + +# megatron.optimizer.* +try: + from megatron.optimizer import get_megatron_optimizer + from megatron.optimizer import DistributedOptimizer + from megatron.optimizer.optimizer import MegatronOptimizer + from megatron.optimizer.optimizer import MixedPrecisionOptimizer + from megatron.optimizer.optimizer import Float16OptimizerWithFloat16Params +except ImportError: + from megatron.core.optimizer import get_megatron_optimizer + from megatron.core.optimizer import DistributedOptimizer + from megatron.core.optimizer.optimizer import MegatronOptimizer + from megatron.core.optimizer.optimizer import MixedPrecisionOptimizer + from megatron.core.optimizer.optimizer import Float16OptimizerWithFloat16Params + +# DistributedDataParallel +try: + from megatron.core import DistributedDataParallel +except ImportError: + try: + from megatron.core.distributed import DistributedDataParallel + except ImportError: + from megatron.model.distributed import DistributedDataParallel + + +# megatron.core.* +from megatron.core import mpu +from megatron.core import tensor_parallel +from megatron.core import parallel_state +from megatron.core.enums import ModelType +from megatron.core.pipeline_parallel import schedules +from megatron.core.tensor_parallel.utils import VocabUtility + +from megatron.core.parallel_state import ( + get_tensor_model_parallel_rank, + get_tensor_model_parallel_world_size, + get_tensor_model_parallel_group, + get_global_memory_buffer +) + +from megatron.core.tensor_parallel.layers import ( + ColumnParallelLinear, + linear_with_grad_accumulation_and_async_allreduce, + LinearWithGradAccumulationAndAsyncCommunication, + RowParallelLinear, + VocabParallelEmbedding +) + +try: + from megatron.core.tensor_parallel.layers import linear_with_frozen_weight +except ImportError: + linear_with_frozen_weight = None + +from megatron.core.tensor_parallel.mappings import ( + copy_to_tensor_model_parallel_region, + gather_from_tensor_model_parallel_region, + reduce_from_tensor_model_parallel_region, + scatter_to_tensor_model_parallel_region, + reduce_scatter_to_sequence_parallel_region +) + +# pylint: enable=unused-import + + +def save_checkpoint_and_time(iteration, model, optimizer, opt_param_scheduler): + try: + from megatron.training import save_checkpoint_and_time as save_checkpoint_and_time_v1 # pylint: disable=import-outside-toplevel + save_checkpoint_and_time_v1(iteration, model, optimizer, opt_param_scheduler) + except ImportError: + from megatron.training.training import save_checkpoint_and_time as save_checkpoint_and_time_v2# pylint: disable=import-outside-toplevel + save_checkpoint_and_time_v2(iteration, model, optimizer, opt_param_scheduler, 0, None) diff --git a/chatlearn/utils/megatron_import_hook_helper.py b/chatlearn/utils/megatron_import_hook_helper.py new file mode 100644 index 00000000..641620b6 --- /dev/null +++ b/chatlearn/utils/megatron_import_hook_helper.py @@ -0,0 +1,31 @@ +# Copyright 2024 Alibaba Group Holding Limited. All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# ============================================================================== +""""Version compatibility for hook""" + +# pylint: disable=unused-import,wildcard-import + +# megatron.text_generation.* +try: + from megatron.text_generation import generation + from megatron.text_generation.generation import * + from megatron.text_generation.generation import _build_attention_mask_and_position_ids + from megatron.text_generation.generation import generate_tokens_probs_and_return_on_first_stage +except ImportError: + from megatron.inference.text_generation import generation + from megatron.inference.text_generation.generation import * + from megatron.inference.text_generation.generation import _build_attention_mask_and_position_ids + from megatron.inference.text_generation.generation import generate_tokens_probs_and_return_on_first_stage + +# pylint: enable=unused-import,wildcard-import diff --git a/chatlearn/utils/megatron_import_memory_helper.py b/chatlearn/utils/megatron_import_memory_helper.py new file mode 100644 index 00000000..bdfdee67 --- /dev/null +++ b/chatlearn/utils/megatron_import_memory_helper.py @@ -0,0 +1,67 @@ +# Copyright 2024 Alibaba Group Holding Limited. All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# ============================================================================== +""" +Version compatibility utilities for Megatron memory management of gradients and parameter weights. +Base on how Megatron uses buffers to manage memory, we support 3 different versions. +""" +from enum import Enum, auto +from typing import List + +__all__ = ['MegatronVersion', 'get_megatron_version', 'check_megatron_versions'] + + +class MegatronVersion(Enum): + """ + There are currently three different Megatron versions supported. + """ + + V1 = auto() # use `MemoryBuffer` to manage gradients + V2 = auto() # use `GradBuffer` to manage gradients + V3 = auto() # use `ParamAndGradBuffer` to manage parameter weights and gradients + + +def get_megatron_version(): + try: + # pylint: disable-next=import-outside-toplevel, unused-import + from megatron.core.distributed import ParamAndGradBuffer + + return MegatronVersion.V3 + except ImportError: + ... + try: + # pylint: disable-next=import-outside-toplevel, unused-import + from megatron.core.distributed import GradBuffer + + return MegatronVersion.V2 + except ImportError: + ... + return MegatronVersion.V1 + + +def check_megatron_versions(targets: List[MegatronVersion]): + version = get_megatron_version() + assert version in targets, f'Different Megatron version {version} from targets: {targets}.' + + +_version = get_megatron_version() + +# pylint: disable=unused-import + +if _version == MegatronVersion.V3: + from megatron.core.distributed.param_and_grad_buffer import BufferType + + __all__.append('BufferType') + +# pylint: enable=unused-import diff --git a/chatlearn/utils/megatron_import_transformer_helper.py b/chatlearn/utils/megatron_import_transformer_helper.py new file mode 100644 index 00000000..7ab433e7 --- /dev/null +++ b/chatlearn/utils/megatron_import_transformer_helper.py @@ -0,0 +1,29 @@ +# Copyright 2024 Alibaba Group Holding Limited. All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# ============================================================================== +""""Version compatibility for hook""" + +# pylint: disable=unused-import,wildcard-import + +# megatron.model.transformer.* +try: + from megatron.model import transformer + from megatron.model.transformer import ParallelAttention + from megatron.model.transformer import * +except ImportError: + from megatron.legacy.model import transformer + from megatron.legacy.model.transformer import ParallelAttention + from megatron.legacy.model.transformer import * + +# pylint: enable=unused-import,wildcard-import diff --git a/chatlearn/utils/megatron_utils.py b/chatlearn/utils/megatron_utils.py index dde943da..6ad99e0a 100644 --- a/chatlearn/utils/megatron_utils.py +++ b/chatlearn/utils/megatron_utils.py @@ -1,4 +1,4 @@ -# Copyright 2023 Alibaba Group Holding Limited. All Rights Reserved. +# Copyright 2024 Alibaba Group Holding Limited. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. @@ -21,16 +21,16 @@ import torch -from megatron import get_args -from megatron.core import mpu -from megatron.arguments import parse_args, validate_args -from megatron.checkpointing import _load_base_checkpoint -from megatron.checkpointing import load_args_from_checkpoint -from megatron.checkpointing import load_checkpoint as megatron_load_checkpoint -from megatron.global_vars import set_global_variables -from megatron.utils import unwrap_model +from chatlearn.utils.megatron_import_helper import get_args +from chatlearn.utils.megatron_import_helper import mpu +from chatlearn.utils.megatron_import_helper import parse_args, validate_args +from chatlearn.utils.megatron_import_helper import _load_base_checkpoint +from chatlearn.utils.megatron_import_helper import load_args_from_checkpoint +from chatlearn.utils.megatron_import_helper import load_checkpoint as megatron_load_checkpoint +from chatlearn.utils.megatron_import_helper import set_global_variables +from chatlearn.utils.megatron_import_helper import unwrap_model -from megatron.initialize import _initialize_distributed, _set_random_seed, _init_autoresume, _compile_dependencies +from chatlearn.utils.megatron_import_helper import _initialize_distributed, _set_random_seed, _init_autoresume, _compile_dependencies from chatlearn.utils.logger import logger diff --git a/chatlearn/utils/timer.py b/chatlearn/utils/timer.py index 049cf234..1e1a1826 100644 --- a/chatlearn/utils/timer.py +++ b/chatlearn/utils/timer.py @@ -1,4 +1,4 @@ -# Copyright 2023 Alibaba Group Holding Limited. All Rights Reserved. +# Copyright 2024 Alibaba Group Holding Limited. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. @@ -29,18 +29,23 @@ def __init__(self, name): self.started_ = False self.start_time = time.time() self._num = 0 + self._cuda_available = torch.cuda.is_available() + + def cuda_sync(self): + if self._cuda_available: + torch.cuda.synchronize() def start(self): """Start the timer.""" assert not self.started_, f'timer {self.name_} has already been started' - torch.cuda.synchronize() + self.cuda_sync() self.start_time = time.time() self.started_ = True self._num += 1 def stop(self): """Stop the timer.""" - torch.cuda.synchronize() + self.cuda_sync() self.elapsed_ += (time.time() - self.start_time) self.started_ = False @@ -91,17 +96,21 @@ def write(self, names, writer, iteration, normalizer=1.0, reset=False): value = self.timers[name].elapsed(reset=reset) / normalizer writer.add_scalar(name + '-time', value, iteration) - def log(self, names=None, normalizer=1.0, reset=True, return_dict=False): + def log(self, names=None, normalizer=1.0, reset=True, return_dict=False, e2e_cost=None): """Log a group of timers.""" - if names is None: - names = self.timers.keys() + all_keys = self.timers.keys() name2log = {} assert normalizer > 0.0 string = 'time (ms)' - for name in names: + if e2e_cost is not None: + string += ' | e2e_cost: {:.2f}'.format(e2e_cost) + for name in all_keys: if name not in self.timers: logger.warning(f"{name} not in timers, ignore it.") continue + if names is not None and name not in names: + self.timers[name].reset() + continue elapsed_time, num = self.timers[name].elapsed(reset=reset, return_num=True) elapsed_time = elapsed_time * 1000.0 / normalizer diff --git a/chatlearn/utils/utils.py b/chatlearn/utils/utils.py index 84eed2fe..c7a05318 100644 --- a/chatlearn/utils/utils.py +++ b/chatlearn/utils/utils.py @@ -1,4 +1,4 @@ -# Copyright 2023 Alibaba Group Holding Limited. All Rights Reserved. +# Copyright 2024 Alibaba Group Holding Limited. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. @@ -21,7 +21,9 @@ import textwrap import time from contextlib import closing +from types import SimpleNamespace +import pynvml import torch from chatlearn.utils.logger import logger @@ -172,7 +174,7 @@ def get_indent_count(string): return count -def detect_and_insert_code(lines, pattern, new_code, additional_indent=0, line_offset=0): +def detect_and_insert_code(lines, pattern, new_code, additional_indent=0, line_offset=0, replace=False): """ Insert new_code above the pattern detected """ @@ -185,12 +187,12 @@ def detect_and_insert_code(lines, pattern, new_code, additional_indent=0, line_o added_lines = [] for line in new_lines: added_lines.append(" "*indent + line) - lines = lines[:type_line_number+line_offset] + added_lines + lines[type_line_number+line_offset:] + lines = lines[:type_line_number+line_offset - replace] + added_lines + lines[type_line_number+line_offset:] return lines -def detect_and_insert_code_to_func(source_code, pattern, new_code, additional_indent=0, line_offset=0): +def detect_and_insert_code_to_func(source_code, pattern, new_code, additional_indent=0, line_offset=0, replace=False): lines = source_code.split('\n') - lines = detect_and_insert_code(lines, pattern, new_code, additional_indent, line_offset) + lines = detect_and_insert_code(lines, pattern, new_code, additional_indent, line_offset, replace) if lines is None: return indent = get_indent_count(lines[0]) @@ -207,10 +209,12 @@ def execute(cmd, check=False, retry=1): ret = subprocess.run(cmd, shell=True, capture_output=True, text=True, check=check) state = ret.returncode == 0 msg = ret.stdout if state else ret.stderr - if not state and retry > 1: - logger.warning(f"execute {cmd} got error {msg}, retry...") - time.sleep(1) - return execute(cmd, check, retry-1) + if not state: + logger.warning(f"execute {cmd} got error {msg}") + if retry > 1: + logger.warning(f"retry {cmd} ...") + time.sleep(1) + return execute(cmd, check, retry-1) return state, msg @@ -227,3 +231,31 @@ def get_ray_status(): return False, msg # unknown msg return True, msg + + +def get_full_proc_memory_info(prefix): + torch.cuda.synchronize() + s = prefix + ': ' + s += f'memory allocated: {torch.cuda.memory_allocated() / (1 << 30):.2f} GiB, ' \ + f'memory reserved: {torch.cuda.memory_reserved() / (1 << 30):.2f} GiB, ' \ + f'proc memory usage: {nvml_proc_memory_info()}' + return s + + +def nvml_proc_memory_info(): + pynvml.nvmlInit() + s = '' + for dev_id in range(pynvml.nvmlDeviceGetCount()): + handle = pynvml.nvmlDeviceGetHandleByIndex(dev_id) + mem_str = ' | '.join([f'(pid {proc.pid}: {proc.usedGpuMemory / (1 << 30):.2f} GiB)' \ + for proc in pynvml.nvmlDeviceGetComputeRunningProcesses(handle)]) + s += mem_str + break + return s + + +def dict_to_simplenamespace(d): + for key, value in d.items(): + if isinstance(value, dict): + d[key] = dict_to_simplenamespace(value) + return SimpleNamespace(**d) diff --git a/chatlearn/utils/version.py b/chatlearn/utils/version.py index 31ed5b1a..fc04cd43 100644 --- a/chatlearn/utils/version.py +++ b/chatlearn/utils/version.py @@ -1,4 +1,4 @@ -# Copyright 2023 Alibaba Group Holding Limited. All Rights Reserved. +# Copyright 2024 Alibaba Group Holding Limited. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. @@ -14,4 +14,4 @@ # ============================================================================== """version""" -VERSION = "0.2.0" +VERSION = "1.0.0" diff --git a/chatlearn/utils/vllm_import_helper.py b/chatlearn/utils/vllm_import_helper.py new file mode 100644 index 00000000..e026994f --- /dev/null +++ b/chatlearn/utils/vllm_import_helper.py @@ -0,0 +1,89 @@ +# Copyright 2024 Alibaba Group Holding Limited. All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# ============================================================================== +""""Version compatibility for vLLM""" + +from chatlearn.utils.constant import CURRENT_VLLM_VERSION, VLLMVersion + +# pylint: disable=unused-import,import-outside-toplevel,wrong-import-position,wrong-import-order +if CURRENT_VLLM_VERSION == VLLMVersion.v_0_3_0.value: + # imports for vllm-030 + from vllm.core.block_manager import BlockSpaceManager + from vllm.model_executor.model_loader import _set_default_torch_dtype + from vllm.model_executor.parallel_utils import parallel_state + from vllm.model_executor.parallel_utils.communication_op import tensor_model_parallel_all_gather + from vllm.model_executor.parallel_utils.parallel_state import initialize_model_parallel + from vllm.model_executor.weight_utils import initialize_dummy_weights + +elif CURRENT_VLLM_VERSION == VLLMVersion.v_0_5_1.value: + # imports for vllm-051 + from vllm.core.interfaces import BlockSpaceManager + from vllm.distributed import parallel_state + from vllm.distributed.communication_op import tensor_model_parallel_all_gather + from vllm.distributed.parallel_state import init_world_group + from vllm.distributed.parallel_state import initialize_model_parallel + from vllm.engine.llm_engine import _load_generation_config_dict + from vllm.engine.output_processor.interfaces import SequenceGroupOutputProcessor + from vllm.engine.output_processor.stop_checker import StopChecker + from vllm.inputs import INPUT_REGISTRY + from vllm.inputs import TextTokensPrompt + from vllm.model_executor.model_loader.utils import set_default_torch_dtype as _set_default_torch_dtype + from vllm.model_executor.model_loader.weight_utils import initialize_dummy_weights + from vllm.sequence import ExecuteModelRequest + from vllm.transformers_utils.detokenizer import Detokenizer + +from vllm.core.scheduler import Scheduler +from vllm.engine.arg_utils import EngineArgs +from vllm.engine.llm_engine import LLMEngine +from vllm.entrypoints.llm import LLM +from vllm.model_executor.models.llama import LlamaForCausalLM +from vllm.model_executor.models.qwen import QWenLMHeadModel +from vllm.model_executor.models.qwen2 import Qwen2ForCausalLM +from vllm.sampling_params import SamplingParams +from vllm.utils import Counter +from vllm.worker.worker import Worker + + +def get_block_manager_cls(version): + if CURRENT_VLLM_VERSION == VLLMVersion.v_0_3_0.value: + return BlockSpaceManager + + elif CURRENT_VLLM_VERSION == VLLMVersion.v_0_5_1.value: + return BlockSpaceManager.get_block_space_manager_class(version) + + +def get_model_architecture(config): + if CURRENT_VLLM_VERSION == VLLMVersion.v_0_3_0.value: + from vllm.model_executor.model_loader import _get_model_architecture as get_model_architecture_v1 + return get_model_architecture_v1(config) + + elif CURRENT_VLLM_VERSION == VLLMVersion.v_0_5_1.value: + from vllm.model_executor.model_loader.utils import get_model_architecture as get_model_architecture_v2 + return get_model_architecture_v2(config)[0] + + +def get_pipeline_model_parallel_rank(): + if CURRENT_VLLM_VERSION == VLLMVersion.v_0_3_0.value: + return parallel_state.get_pipeline_model_parallel_rank() + + elif CURRENT_VLLM_VERSION == VLLMVersion.v_0_5_1.value: + return parallel_state.get_pp_group().rank_in_group + + +def get_pipeline_model_parallel_world_size(): + if CURRENT_VLLM_VERSION == VLLMVersion.v_0_3_0.value: + return parallel_state.get_pipeline_model_parallel_world_size() + + elif CURRENT_VLLM_VERSION == VLLMVersion.v_0_5_1.value: + return parallel_state.get_pp_group().world_size diff --git a/chatlearn/utils/vllm_utils.py b/chatlearn/utils/vllm_utils.py index 2a17dc89..f0e2006a 100644 --- a/chatlearn/utils/vllm_utils.py +++ b/chatlearn/utils/vllm_utils.py @@ -1,4 +1,4 @@ -# Copyright 2023 Alibaba Group Holding Limited. All Rights Reserved. +# Copyright 2024 Alibaba Group Holding Limited. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. @@ -15,34 +15,51 @@ """vllm utils""" import argparse -import re -import os import glob +import operator +import os +import re import random +import subprocess import sys + from datetime import timedelta +from functools import reduce import numpy as np import torch import torch.distributed +from chatlearn.utils.constant import CURRENT_VLLM_VERSION, VLLMVersion + try: - from megatron import update_num_microbatches - from megatron.checkpointing import find_checkpoint_rank_0 - from megatron.checkpointing import fix_query_key_value_ordering - from megatron.checkpointing import get_checkpoint_tracker_filename - from megatron.checkpointing import get_checkpoint_version - from megatron.checkpointing import set_checkpoint_version - from megatron.checkpointing import read_metadata - from megatron.utils import unwrap_model + from chatlearn.utils.megatron_import_helper import update_num_microbatches + from chatlearn.utils.megatron_import_helper import find_checkpoint_rank_0 + from chatlearn.utils.megatron_import_helper import fix_query_key_value_ordering + from chatlearn.utils.megatron_import_helper import get_checkpoint_tracker_filename + from chatlearn.utils.megatron_import_helper import get_checkpoint_version + from chatlearn.utils.megatron_import_helper import set_checkpoint_version + from chatlearn.utils.megatron_import_helper import read_metadata + from chatlearn.utils.megatron_import_helper import unwrap_model except ImportError: print("Cannot import megatron, please set megatron python path first.") -from vllm.config import ModelConfig -from vllm.model_executor.model_loader import _set_default_torch_dtype -from vllm.model_executor.parallel_utils import parallel_state as mpu -from vllm.model_executor.parallel_utils.parallel_state import initialize_model_parallel -from vllm.model_executor.weight_utils import initialize_dummy_weights +try: + from chatlearn.utils.vllm_import_helper import init_world_group +except ImportError: + print("Cannot import init_world_group for vLLM 0.5.1, please install vLLM 0.5.1 first.") + +try: + from chatlearn.utils.vllm_import_helper import get_pipeline_model_parallel_rank + from chatlearn.utils.vllm_import_helper import get_pipeline_model_parallel_world_size + from chatlearn.utils.vllm_import_helper import _set_default_torch_dtype + from chatlearn.utils.vllm_import_helper import parallel_state as mpu + from chatlearn.utils.vllm_import_helper import initialize_model_parallel + from chatlearn.utils.vllm_import_helper import initialize_dummy_weights +except ImportError: + print("Cannot import vllm, please install vllm 0.3.0 or 0.5.1 first.") + +from .constant import QwenVersion # The simple map of names for "automated" rules. @@ -53,6 +70,8 @@ "mlp.gate_up_proj": ".mlp.gate_up_proj.", "mlp.down_proj": ".mlp.down_proj.", "self_attention.rotary_emb":".self_attn.rotary_emb.inv_freq", + "self_attention.query_key_value": ".self_attn.qkv_proj", + "attention.query_key_value": ".self_attn.qkv_proj", } megatron_qwen_to_transformers = { @@ -66,12 +85,26 @@ "mlp.dense_layernorm": "mlp.dense_layernorm", } + +megatron_qwen2_to_transformers = { + "attention.attention_layernorm": ".attn.attention_layernorm.", + "attention.dense": ".attn.c_proj.", + "self_attention.dense": ".self_attn.o_proj.", + "mlp.dense_h_to_4h": ".mlp.gate_up_proj.", + "mlp.w1": ".mlp.gate_up_proj.", + "mlp.w2": ".mlp.gate_up_proj.", + "mlp.dense_4h_to_h": ".mlp.down_proj.", + "mlp.dense_layernorm": "mlp.dense_layernorm", +} + + class ParameterSyncMap: """Base ParameterSyncMap.""" - def __init__(self, src_names): + def __init__(self, src_names, layer_offset): + self.weight_or_bias = ["weight", "bias"] self.src_names = src_names + self.layer_offset = layer_offset self._dst_names = [] - self.get_dst_names() @property def embedding_sync_map(self): @@ -85,23 +118,41 @@ def layer_sync_map(self): def final_layer_sync_map(self): return self._final_layer_sync_map + @property + def concat_params_dict(self): + return self._concat_params_dict + + @property + def to_fix_act_ordering_dict(self): + return self._to_fix_act_ordering_dict + + @property + def to_fix_qkv_ordering_dict(self): + return self._to_fix_qkv_ordering_dict + @property def dst_names(self): + if not self._dst_names: + self.map_src_to_dst() return self._dst_names - def get_dst_names(self): + def map_src_to_dst(self): raise RuntimeError("Must be implemented by subclass.") - def map_src_to_dst(self, sync_map, src_name): - assert src_name in sync_map + def get_dst_name(self, sync_map, src_name): + assert src_name in sync_map, f"expect {src_name} in {sync_map}" return sync_map[src_name] -class Megatron2TransformerSyncMap(ParameterSyncMap): - """sync map:megatron to transformer""" - def __init__(self, src_names): +class Megatron2LlamaSyncMap(ParameterSyncMap): + """sync map:megatron to llama transformer""" + def __init__(self, src_names, layer_offset): src_prefix = "module.module.language_model" dst_prefix = "model.model" + # The regex to extract layer names. + self.layer_re = re.compile(f"{src_prefix}.encoder.layers\.(\d+)\.([a-z0-9_.]+)\.([a-z]+)") # pylint: disable=anomalous-backslash-in-string + self.src_prefix = src_prefix + self.dst_prefix = dst_prefix self._embedding_sync_map = { f"{src_prefix}.embedding.word_embeddings.weight": f"{dst_prefix}.embed_tokens.weight" } @@ -115,20 +166,27 @@ def __init__(self, src_names): f"{src_prefix}.encoder.final_norm.weight": f"{dst_prefix}.norm.weight", f"{src_prefix}.output_layer.weight": "model.lm_head.weight" } - # The regex to extract layer names. - self.layer_re = re.compile(f"{src_prefix}.encoder.layers\.(\d+)\.([a-z0-9_.]+)\.([a-z]+)") # pylint: disable=anomalous-backslash-in-string - super().__init__(src_names) + self._concat_params_dict = None + self._to_fix_act_ordering_dict = None + self._to_fix_qkv_ordering_dict = { + "modules": [ + "attention.query_key_value", + "self_attention.query_key_value" + ], + "layer_re": self.layer_re + } + super().__init__(src_names, layer_offset) - def get_dst_names(self): + def map_src_to_dst(self): for src_name in self.src_names: # convert word embeddings. if src_name in self.embedding_sync_map: - self._dst_names.append(self.map_src_to_dst(self.embedding_sync_map, src_name)) + self._dst_names.append(self.get_dst_name(self.embedding_sync_map, src_name)) continue # final layer if src_name in self.final_layer_sync_map: - self._dst_names.append(self.map_src_to_dst(self.final_layer_sync_map, src_name)) + self._dst_names.append(self.get_dst_name(self.final_layer_sync_map, src_name)) continue m = self.layer_re.match(src_name) @@ -137,29 +195,29 @@ def get_dst_names(self): raise RuntimeError(f"expect src_name to be a layer, while {src_name}") # The index of the layer. - layer_idx = int(m.group(1)) + layer_idx = int(m.group(1)) + self.layer_offset # The name of the operation. op_name = m.group(2) # Is it a weight or a bias? weight_or_bias = m.group(3) # The name of the layer. - layer_name = f"model.model.layers.{layer_idx}" + layer_name = f"{self.dst_prefix}.layers.{layer_idx}" # For layernorm(s), simply store the layer norm. if op_name.endswith("_norm") and weight_or_bias == 'weight': ln_name = "input_layernorm" if op_name.startswith("input") else "post_attention_layernorm" - self.dst_names.append(layer_name + "." + ln_name + "." + weight_or_bias) + self._dst_names.append(layer_name + "." + ln_name + "." + weight_or_bias) # Transpose the QKV matrix. elif op_name in ["attention.query_key_value", "self_attention.query_key_value"] and \ weight_or_bias == "weight": - self.dst_names.append(layer_name + ".self_attn.qkv_proj.weight") + self._dst_names.append(layer_name + ".self_attn.qkv_proj.weight") # Transpose the weights. elif weight_or_bias == "weight": - out_name = self.map_src_to_dst(self.layer_sync_map, op_name) - self.dst_names.append(layer_name + out_name + "weight") + out_name = self.get_dst_name(self.layer_sync_map, op_name) + self._dst_names.append(layer_name + out_name + "weight") # Copy the bias. # Ignore them @@ -168,10 +226,153 @@ def get_dst_names(self): # Copy the Rotary Embedding else: - out_name = self.map_src_to_dst(self.layer_sync_map, op_name) + out_name = self.get_dst_name(self.layer_sync_map, op_name) self._dst_names.append(layer_name + out_name) +class Megatron2QWenSyncMap(ParameterSyncMap): + """sync map:megatron to qwen transformer""" + def __init__(self, src_names, layer_offset, qwen_version=QwenVersion.v_1.value): + self.qwen_version = qwen_version + src_prefix = "module.module.language_model" + + # configuration for different versions of qwen + if qwen_version == QwenVersion.v_1.value: + dst_prefix = "model.transformer" + embed_name = "wte" + att_dense_name = ".attn.c_proj." + self.layer_prefix = "h" + mlp_dense_name = ".mlp.c_proj." + final_norm = "ln_f" + elif qwen_version == QwenVersion.v_2.value: + dst_prefix = "model.model" + embed_name = "embed_tokens" + att_dense_name = ".self_attn.o_proj." + self.layer_prefix = "layers" + mlp_dense_name = ".mlp.down_proj." + final_norm = "norm" + else: + raise RuntimeError(f"Unsupported qwen version {qwen_version}, only 1.0 or 2.0 for now.") + + # The regex to extract layer names. + self.layer_re = re.compile(f"{src_prefix}.encoder.layers\.(\d+)\.([a-z0-9_.]+)\.([a-z]+)") # pylint: disable=anomalous-backslash-in-string + self.src_prefix = src_prefix + self.dst_prefix = dst_prefix + self._embedding_sync_map = { + f"{src_prefix}.embedding.word_embeddings.weight": f"{dst_prefix}.{embed_name}.weight" + } + self._layer_sync_map = { + "attention.attention_layernorm": ".attn.attention_layernorm.", + "attention.dense": ".attn.c_proj.", + "self_attention.dense": att_dense_name, + "mlp.dense_h_to_4h": ".mlp.gate_up_proj.", + "mlp.w1": ".mlp.gate_up_proj.", + "mlp.w2": ".mlp.gate_up_proj.", + "mlp.dense_4h_to_h": mlp_dense_name, + "mlp.dense_layernorm": "mlp.dense_layernorm", + } + self._final_layer_sync_map = { + f"{src_prefix}.encoder.final_layernorm.bias": f"{dst_prefix}.{final_norm}.bias", + f"{src_prefix}.encoder.final_layernorm.weight": f"{dst_prefix}.{final_norm}.weight", + f"{src_prefix}.output_layer.weight": "model.lm_head.weight" + } + self._concat_params_dict = { + "modules": ["mlp.w1", "mlp.w2"], + "dim": 0 + } + self._to_fix_act_ordering_dict = { + "modules": ["mlp.dense_h_to_4h"], + "dim": 0 + } + self._to_fix_qkv_ordering_dict = { + "modules": [ + "attention.query_key_value", + "self_attention.query_key_value" + ], + "layer_re": self.layer_re + } + + src_names_list = [] + for idx, s_name in enumerate(src_names): + if "mlp.w1" in s_name: + src_names_list.append(src_names[idx + 1]) + src_names_list.append(s_name) + elif "mlp.w2" in s_name: + continue + else: + src_names_list.append(s_name) + super().__init__(src_names_list, layer_offset) + + def map_src_to_dst(self): + for src_name in self.src_names: + # convert word embeddings. + if src_name in self.embedding_sync_map: + self._dst_names.append(self.get_dst_name(self.embedding_sync_map, src_name)) + continue + + # final layer + if src_name in self.final_layer_sync_map: + self._dst_names.append(self.get_dst_name(self.final_layer_sync_map, src_name)) + continue + + m = self.layer_re.match(src_name) + # Stop if that's not a layer + if m is None: + raise RuntimeError(f"expect src_name to be a layer, while {src_name}") + # The index of the layer. + layer_idx = int(m.group(1)) + self.layer_offset + + # The name of the operation. + op_name = m.group(2) + # Is it a weight or a bias? + weight_or_bias = m.group(3) + # The name of the layer. + layer_name = f"{self.dst_prefix}.{self.layer_prefix}.{layer_idx}" + + # For layernorm(s), simply store the layer norm. + if op_name.endswith("layernorm"): + + if self.qwen_version == QwenVersion.v_1.value: + if "attention." in op_name: + self._dst_names.append( + layer_name + self.get_dst_name(self.layer_sync_map, ".attn.attention_layernorm.") + weight_or_bias) + if "mlp." in op_name: + self._dst_names.append( + layer_name + self.get_dst_name(self.layer_sync_map, op_name) + weight_or_bias) + if op_name.startswith("input"): + ln_name = "ln_1" if self.qwen_version == QwenVersion.v_1.value else "input_layernorm" + self._dst_names.append( + layer_name + "." + ln_name + "." + weight_or_bias) + elif op_name.startswith("post"): + ln_name = "ln_2" if self.qwen_version == QwenVersion.v_1.value else "post_attention_layernorm" + self._dst_names.append( + layer_name + "." + ln_name + "." + weight_or_bias) + elif self.qwen_version == QwenVersion.v_2.value: + raise RuntimeError(f"unsupport layernorm {op_name}.") + + elif op_name == "self_attention.rotary_emb": + self._dst_names.apepnd(layer_name + ".attn.rotary_emb.inv_freq") + + # Transpose the QKV matrix and the bias. + elif op_name in ["attention.query_key_value", "self_attention.query_key_value"]: + if self.qwen_version == QwenVersion.v_1.value: + dst_name = layer_name + f".attn.c_attn.{weight_or_bias}" + else: + dst_name = layer_name + f".self_attn.qkv_proj.{weight_or_bias}" + self._dst_names.append(dst_name) + + elif op_name in ["mlp.w1", "mlp.w2"]: + out_name = self.layer_sync_map[op_name] + gate_up_proj_name = layer_name + out_name + "weight" + if gate_up_proj_name not in self._dst_names: + self._dst_names.append(gate_up_proj_name) + + # Transpose the weights. + elif weight_or_bias in ["weight", "bias"]: + out_name = self.layer_sync_map[op_name] + self._dst_names.append(layer_name + out_name + weight_or_bias) + + def parse_args(extra_args_provider=None, ignore_unknown_args=False): """Parse all arguments.""" parser = argparse.ArgumentParser(description='vLLM Arguments', @@ -195,7 +396,7 @@ def parse_args(extra_args_provider=None, ignore_unknown_args=False): return args -def get_model(model_provider, args, wrap_with_ddp=False): # pylint: disable=unused-argument +def get_model(model_provider, args): with _set_default_torch_dtype(args.get("params_dtype")): # Create a model instance. # The weights will be initialized as empty tensors. @@ -220,9 +421,7 @@ def _init_distributed_environment(args): 'skipping initialization ...', flush=True) args.rank = torch.distributed.get_rank() args.world_size = torch.distributed.get_world_size() - else: - if args.rank == 0: print('> initializing torch distributed ...', flush=True) # Manually set the device ids. @@ -249,11 +448,20 @@ def _init_distributed_environment(args): world_size=args.world_size, rank=args.rank, timeout=timedelta(minutes=args.distributed_timeout_minutes)) - # A small all_reduce for warmup. - torch.distributed.all_reduce(torch.zeros(1).cuda()) + if CURRENT_VLLM_VERSION == VLLMVersion.v_0_5_1.value: + _WORLD = None + if _WORLD is None: + ranks = list(range(torch.distributed.get_world_size())) + _WORLD = init_world_group(ranks, args.local_rank, args.distributed_backend) + else: + assert _WORLD.world_size == torch.distributed.get_world_size(), ( + "world group already initialized with a different world size") + mpu._WORLD = _WORLD + initialize_model_parallel(args.tensor_model_parallel_size, args.pipeline_model_parallel_size) + def initialize_vllm( # pylint: disable=dangerous-default-value,useless-return extra_args_provider=None, ignore_unknown_args=False, @@ -297,12 +505,14 @@ def finish_mpu_init(): return args + def ensure_directory_exists(filename): """Build filename's path if it does not already exists.""" dirname = os.path.dirname(filename) if not os.path.exists(dirname): os.makedirs(dirname) + def get_element_from_dict_by_path(d, path): """ Get element from dictionary by path. If element is not present, recursively add empty dictionaries. @@ -356,34 +566,40 @@ def get_megatron_sharded_states(args, tp_size, pp_size, pp_rank): tp_state_dicts = [] for i in range(tp_size): sub_dir_name = f"mp_rank_{i:02d}" if pp_size == 1 else f"mp_rank_{i:02d}_{pp_rank:03d}" - checkpoint_name = os.listdir(os.path.join(args["load"], sub_dir_name))[0] + checkpoint_name = glob.glob(os.path.join(args["load"], sub_dir_name) + "/model*.pt")[0] checkpoint_path = os.path.join(args["load"], sub_dir_name, checkpoint_name) state_dict = torch.load(checkpoint_path, map_location="cpu") tp_state_dicts.append(state_dict) return tp_state_dicts -def convert_lamma_state_dict_from_megatron_to_vllm(args, hf_config): - """Convert NVIDIA Megatron-LM state_dict to vLLM state_dict. +def convert_llama_state_dict_from_megatron_to_vllm(args, hf_config, qwen_version=None): + """Convert NVIDIA Megatron-LM state_dict to vLLM llama state_dict. Args: args (argparse.Namespace): the arguments to the script """ + assert qwen_version is None, f"Expect qwen_version is None for Llama, while {qwen_version}" # Load original state dict from Megatron-LM checkpoint. tp_rank = mpu.get_tensor_model_parallel_rank() - pp_rank = mpu.get_pipeline_model_parallel_rank() + pp_rank = get_pipeline_model_parallel_rank() + assert pp_rank == 0 possible_sub_dirs = ["mp_rank_00", "mp_rank_00_000"] for root, dirnames, _ in os.walk(args["load"]): for dirname in dirnames: if dirname in possible_sub_dirs: - rank0_checkpoint_name = glob.glob(os.path.join(root, dirname) + "/*.pt") + rank0_checkpoint_name = glob.glob(os.path.join(root, dirname) + "/model*.pt") args["load"] = root rank0_checkpoint_path = rank0_checkpoint_name[0] print(f"Loading Megatron-LM checkpoint arguments from: {rank0_checkpoint_path}") state_dict = torch.load(rank0_checkpoint_path, map_location="cpu") megatron_args = state_dict.get("args", None) + if "checkpoint_version" in state_dict.keys(): + checkpoint_version = state_dict["checkpoint_version"] + else: + checkpoint_version = 0.0 if megatron_args is None: raise ValueError( "Megatron-LM checkpoint does not contain arguments. This utility only supports Megatron-LM checkpoints" @@ -397,6 +613,11 @@ def convert_lamma_state_dict_from_megatron_to_vllm(args, hf_config): tp_size = megatron_args.tensor_model_parallel_size pp_size = megatron_args.pipeline_model_parallel_size + assert pp_size == 1 + # The number of heads. + heads = hf_config.num_attention_heads // tp_size + # The hidden_size per head. + hidden_size_per_head = hf_config.hidden_size // hf_config.num_attention_heads # The regex to extract layer names. layer_re = re.compile("layers\.(\d+)\.([a-z0-9_.]+)\.([a-z]+)") # pylint: disable=anomalous-backslash-in-string @@ -448,12 +669,15 @@ def convert_lamma_state_dict_from_megatron_to_vllm(args, hf_config): # Extract the layers. for key, val in get_element_from_dict_by_path(tp_state_dicts[tp_rank], path).items(): + # skip None value. + # TODO(jiangle.jl): whether to process empty value. + if val is None: + continue # Match the name. m = layer_re.match(key) # Stop if that's not a layer if m is None: break - # The index of the layer. layer_idx = int(m.group(1)) + pp_rank * num_layers # The name of the operation. @@ -471,9 +695,31 @@ def convert_lamma_state_dict_from_megatron_to_vllm(args, hf_config): output_state_dict[layer_name + "." + ln_name + "." + weight_or_bias] = params # Transpose the QKV matrix. - elif op_name in ["attention.query_key_value", "self_attention.query_key_value"] and \ - weight_or_bias == "weight": - output_state_dict[layer_name + ".self_attn.qkv_proj.weight"] = params + elif op_name in ["attention.query_key_value", "self_attention.query_key_value"] and weight_or_bias == "weight": + input_shape = params.size() + shape = (heads, hidden_size_per_head, 3) + input_shape[1:] + division = reduce(operator.mul, shape, 1) + num_elements = params.numel() + if num_elements != division: + # model with gqa dont need to fix qkv ordering. + output_state_dict[layer_name + ".self_attn.qkv_proj.weight"] = params + else: + out_val = fix_qwen_query_key_value_ordering( + params, checkpoint_version, 3, heads, hidden_size_per_head + ) + # Megatron stores (3*D) x D but transformers-GPT2 expects D x 3*D. + out_val = out_val.contiguous() + # Store. + output_state_dict[layer_name + ".self_attn.qkv_proj.weight"] = out_val + + # Transpose the bias. + elif op_name in ["attention.query_key_value", "self_attention.query_key_value"] and weight_or_bias == "bias": + out_val = fix_qwen_query_key_value_ordering( + params, checkpoint_version, 3, heads, hidden_size_per_head + ) + # Store. No change of shape. + output_state_dict[layer_name + ".self_attn.qkv_proj.bias"] = out_val + # Transpose the weights. elif weight_or_bias == "weight": @@ -510,10 +756,26 @@ def convert_lamma_state_dict_from_megatron_to_vllm(args, hf_config): return output_state_dict -def convert_qwen_state_dict_from_megatron_to_vllm(args, hf_config): +def convert_qwen_state_dict_from_megatron_to_vllm(args, hf_config, qwen_version=QwenVersion.v_1.value): # The converted output model. output_state_dict = {} + # configuration for different versions of qwen + if qwen_version == QwenVersion.v_1.value: + prefix_name = "model.transformer." + embed_name = "wte" + layer_prefix = "h" + final_norm = "ln_f" + func_map = megatron_qwen_to_transformers + elif qwen_version == QwenVersion.v_2.value: + prefix_name = "model.model." + embed_name = "embed_tokens" + layer_prefix = "layers" + final_norm = "norm" + func_map = megatron_qwen2_to_transformers + else: + raise RuntimeError(f"Unsupported qwen version {qwen_version}, only 1.0 or 2.0 for now. while {qwen_version}.") + # Load original state dict from Megatron-LM checkpoint. tp_rank = mpu.get_tensor_model_parallel_rank() possible_sub_dirs = [f"mp_rank_{tp_rank:02d}"] @@ -551,8 +813,6 @@ def convert_qwen_state_dict_from_megatron_to_vllm(args, hf_config): checkpoint_version = input_state_dict["checkpoint_version"] else: checkpoint_version = 0.0 - prefix_name = "model.transformer." - # prefix_name = '' # The model. model = input_state_dict["model"] # The language model. @@ -564,11 +824,10 @@ def convert_qwen_state_dict_from_megatron_to_vllm(args, hf_config): word_embeddings = embeddings["word_embeddings"]["weight"] # Truncate the embedding table to vocab_size rows. word_embeddings = word_embeddings[: hf_config.vocab_size, :] - output_state_dict[f"{prefix_name}wte.weight"] = word_embeddings + output_state_dict[f"{prefix_name}{embed_name}.weight"] = word_embeddings # The transformer. now encoder transformer = lm["transformer"] if "transformer" in lm.keys() else lm["encoder"] - # print(transformer.keys()) # The position embeddings. if "position_embeddings" in embeddings: pos_embeddings = embeddings["position_embeddings"]["weight"] @@ -586,15 +845,10 @@ def convert_qwen_state_dict_from_megatron_to_vllm(args, hf_config): # The regex to extract layer names. layer_re = re.compile("layers\.(\d+)\.([a-z0-9_.]+)\.([a-z]+)") # pylint: disable=anomalous-backslash-in-string - # The simple map of names for "automated" rules. - # Extract the layers. gate_up_proj = {} for key, val in transformer.items(): # Match the name. - # if 'rotary_emb' in key: - # pos_embeddings = val - # max_position_embeddings = pos_embeddings.size(0) m = layer_re.match(key) # Stop if that's not a layer @@ -609,101 +863,99 @@ def convert_qwen_state_dict_from_megatron_to_vllm(args, hf_config): weight_or_bias = m.group(3) # The name of the layer. - layer_name = f"{prefix_name}h.{layer_idx}" + layer_name = f"{prefix_name}{layer_prefix}.{layer_idx}" + # For layernorm(s), simply store the layer norm. - # print(op_name) if op_name.endswith("layernorm"): - if "attention." in op_name: - output_state_dict[ - layer_name + ".attn.attention_layernorm." + weight_or_bias - ] = val - if "mlp." in op_name: - output_state_dict[ - layer_name + "." + op_name + "." + weight_or_bias - ] = val - elif op_name.startswith("input"): - ln_name = "ln_1" + + if qwen_version == QwenVersion.v_1.value: + if "attention." in op_name: + output_state_dict[ + layer_name + ".attn.attention_layernorm." + weight_or_bias + ] = val + if "mlp." in op_name: + output_state_dict[ + layer_name + "." + op_name + "." + weight_or_bias + ] = val + + if op_name.startswith("input"): + ln_name = "ln_1" if qwen_version == QwenVersion.v_1.value else "input_layernorm" output_state_dict[ layer_name + "." + ln_name + "." + weight_or_bias ] = val elif op_name.startswith("post"): - ln_name = "ln_2" + ln_name = "ln_2" if qwen_version == QwenVersion.v_1.value else "post_attention_layernorm" output_state_dict[ layer_name + "." + ln_name + "." + weight_or_bias ] = val + elif qwen_version == QwenVersion.v_2.value: + raise RuntimeError(f"unsupport layernorm {op_name}.") elif op_name == "self_attention.rotary_emb": output_state_dict[layer_name + ".attn.rotary_emb.inv_freq"] = val - # Transpose the QKV matrix. - elif op_name in ["attention.query_key_value", "self_attention.query_key_value"] and weight_or_bias == "weight": - # Insert a tensor of 1x1xDxD bias. - # causal_mask = torch.tril(torch.ones((max_position_embeddings, max_position_embeddings), dtype=torch.float16)).view( - # 1, 1, max_position_embeddings, max_position_embeddings - # ) - # output_state_dict[layer_name + ".attn.bias"] = causal_mask - - # Insert a "dummy" tensor for masked_bias. - # masked_bias = torch.tensor(-1e4, dtype=torch.float16) - # output_state_dict[layer_name + ".attn.masked_bias"] = masked_bias - - out_val = fix_qwen_query_key_value_ordering( - val, checkpoint_version, 3, heads, hidden_size_per_head - ) - # Megatron stores (3*D) x D but transformers-GPT2 expects D x 3*D. - out_val = out_val.contiguous() - # Store. - output_state_dict[layer_name + ".attn.c_attn.weight"] = out_val - - # Transpose the bias. - elif op_name in ["attention.query_key_value", "self_attention.query_key_value"] and weight_or_bias == "bias": - out_val = fix_qwen_query_key_value_ordering( - val, checkpoint_version, 3, heads, hidden_size_per_head - ) - # Store. No change of shape. - output_state_dict[layer_name + ".attn.c_attn.bias"] = out_val - + # Transpose the QKV matrix and the bias. + elif op_name in ["attention.query_key_value", "self_attention.query_key_value"]: + if qwen_version == QwenVersion.v_1.value: + out_val = fix_qwen_query_key_value_ordering( + val, checkpoint_version, 3, heads, hidden_size_per_head + ) + # Megatron stores (3*D) x D but transformers-GPT2 expects D x 3*D. + if len(list(out_val.shape)) > 1: + out_val = out_val.contiguous() + # Store. + output_state_dict[layer_name + f".attn.c_attn.{weight_or_bias}"] = out_val + else: + fix_query_key_value_ordering(val, checkpoint_version) + # Store. No change of shape. + output_state_dict[layer_name + f".self_attn.qkv_proj.{weight_or_bias}"] = val + + elif op_name in ["mlp.dense_h_to_4h"]: + offset = val.shape[0] // 2 + w1 = val[:offset,:] + w2 = val[offset:,:] + out_name = func_map[op_name] + out_name = layer_name + out_name + "weight" + output_state_dict[out_name] = torch.cat([w2, w1], dim=0) elif op_name in ["mlp.w1", "mlp.w2"]: gate_up_proj[op_name] = val if len(gate_up_proj) == 2: gate_up_proj = [gate_up_proj["mlp.w2"], gate_up_proj["mlp.w1"]] - out_name = megatron_qwen_to_transformers[op_name] + out_name = func_map[op_name] gate_up_proj_name = layer_name + out_name + "weight" output_state_dict[gate_up_proj_name] = torch.cat(gate_up_proj, dim=0) gate_up_proj = {} # Transpose the weights. elif weight_or_bias == "weight": - out_name = megatron_qwen_to_transformers[op_name] + out_name = func_map[op_name] output_state_dict[layer_name + out_name + "weight"] = val # Copy the bias. elif weight_or_bias == "bias": - out_name = megatron_qwen_to_transformers[op_name] + out_name = func_map[op_name] output_state_dict[layer_name + out_name + "bias"] = val # DEBUG. assert hf_config.num_hidden_layers == layer_idx + 1 # The final layernorm. - output_state_dict[f"{prefix_name}ln_f.weight"] = transformer[ + output_state_dict[f"{prefix_name}{final_norm}.weight"] = transformer[ "final_layernorm.weight" ] if "final_layernorm.bias" in output_state_dict: - output_state_dict[f"{prefix_name}ln_f.bias"] = transformer[ + output_state_dict[f"{prefix_name}{final_norm}.bias"] = transformer[ "final_layernorm.bias" ] # LM head - if "transformer" in prefix_name: - # Output layer. - if ds_args.untie_embeddings_and_output_weights: - output_layer = lm["output_layer"]["weight"] - output_state_dict["model.lm_head.weight"] = output_layer - else: - output_state_dict["model.lm_head.weight"] = word_embeddings + if ds_args.untie_embeddings_and_output_weights: + output_layer = lm["output_layer"]["weight"] + output_state_dict["model.lm_head.weight"] = output_layer + else: + output_state_dict["model.lm_head.weight"] = word_embeddings # It should be done! return output_state_dict @@ -750,21 +1002,39 @@ def _load_base_checkpoint(load_dir, rank0=False): else: print_rank_0(f' loading checkpoint from {load_dir} at iteration {iteration}') + if isinstance(checkpoint_name, tuple): + checkpoint_name = checkpoint_name[0] + # Load the checkpoint. try: state_dict = torch.load(checkpoint_name, map_location='cpu') except ModuleNotFoundError: - from megatron.fp16_deprecated import loss_scaler # pylint: disable=import-outside-toplevel,unused-import - # For backward compatibility. - if not rank0: - print_rank_0(' > deserializing using the old code structure ...') - sys.modules['fp16.loss_scaler'] = sys.modules[ - 'megatron.fp16_deprecated.loss_scaler'] - sys.modules['megatron.fp16.loss_scaler'] = sys.modules[ - 'megatron.fp16_deprecated.loss_scaler'] - state_dict = torch.load(checkpoint_name, map_location='cpu') - sys.modules.pop('fp16.loss_scaler', None) - sys.modules.pop('megatron.fp16.loss_scaler', None) + try: + from megatron.fp16_deprecated import loss_scaler # pylint: disable=import-outside-toplevel,unused-import + # For backward compatibility. + if not rank0: + print_rank_0(' > deserializing using the old code structure ...') + sys.modules['fp16.loss_scaler'] = sys.modules[ + 'megatron.fp16_deprecated.loss_scaler'] + sys.modules['megatron.fp16.loss_scaler'] = sys.modules[ + 'megatron.fp16_deprecated.loss_scaler'] + state_dict = torch.load(checkpoint_name, map_location='cpu') + sys.modules.pop('fp16.loss_scaler', None) + sys.modules.pop('megatron.fp16.loss_scaler', None) + except ImportError: + from megatron.legacy.fp16_deprecated import loss_scaler # pylint: disable=import-outside-toplevel,unused-import + # For backward compatibility. + if not rank0: + print_rank_0(' > deserializing using the old code structure ...') + sys.modules['fp16.loss_scaler'] = sys.modules[ + 'megatron.legacy.fp16_deprecated.loss_scaler'] + sys.modules['megatron.fp16.loss_scaler'] = sys.modules[ + 'megatron.legacy.fp16_deprecated.loss_scaler'] + sys.modules['megatron.model'] = sys.modules['megatron.legacy.model'] + state_dict = torch.load(checkpoint_name, map_location='cpu') + sys.modules.pop('fp16.loss_scaler', None) + sys.modules.pop('megatron.fp16.loss_scaler', None) + sys.modules.pop('megatron.model', None) except BaseException as e: print_rank_0('could not load the checkpoint') print_rank_0(e) @@ -774,6 +1044,33 @@ def _load_base_checkpoint(load_dir, rank0=False): def load_checkpoint(model, optimizer, opt_param_scheduler, load_arg='load', strict=True): + """"Transform parallel strategy for checkpoint if needed.""" + args = model.model_args + if args.get("adaptive_parallel_strategy_on_checkpoint"): + load_dir = args[load_arg] + target_tp = args.get("tensor_model_parallel_size") + target_pp = args.get("pipeline_model_parallel_size") + state_dict, _, _ = _load_base_checkpoint(load_dir, rank0=True) + checkpoint_args = state_dict['args'] + checkpoint_tp = checkpoint_args.tensor_model_parallel_size + checkpoint_pp = checkpoint_args.pipeline_model_parallel_size + if target_tp != checkpoint_tp or target_pp != checkpoint_pp: + script_path = os.path.join(os.path.dirname(os.path.abspath(__file__)), "../tools/megatron_checkpoint_utils.py") + save_dir = load_dir[:-1] if load_dir.endswith("/") else load_dir + save_dir = save_dir + f"-transform-tp{target_tp}-pp{target_pp}" + if not os.path.exists(save_dir): + if torch.distributed.get_rank() == (torch.distributed.get_world_size() - 1): + model_type = "GPT" + cmd = f"python {script_path} --model-type {model_type} --load-dir {load_dir} " + \ + f"--save-dir {save_dir} --target-tensor-parallel-size {target_tp} --target-pipeline-parallel-size {target_pp}" + subprocess.run(cmd, shell=True, check=True) + torch.distributed.barrier() + args[load_arg] = save_dir + print_rank_0(f"Using transformed checkpoint {save_dir}") + return vllm_load_checkpoint(model, optimizer, opt_param_scheduler, load_arg=load_arg, strict=strict) + + +def vllm_load_checkpoint(model, optimizer, opt_param_scheduler, load_arg='load', strict=True): """Load a model checkpoint and return the iteration. strict (bool): whether to strictly enforce that the keys in :attr:`state_dict` of the checkpoint match the names of @@ -931,11 +1228,11 @@ def get_checkpoint_name(checkpoints_path, iteration, release=False, # Use both the tensor and pipeline MP rank. if pipeline_parallel is None: - pipeline_parallel = (mpu.get_pipeline_model_parallel_world_size() > 1) + pipeline_parallel = (get_pipeline_model_parallel_world_size() > 1) if tensor_rank is None: tensor_rank = mpu.get_tensor_model_parallel_rank() if pipeline_rank is None: - pipeline_rank = mpu.get_pipeline_model_parallel_rank() + pipeline_rank = get_pipeline_model_parallel_rank() if expert_parallel is None: expert_parallel = False #(mpu.get_expert_model_parallel_world_size() > 1) if expert_rank is None: @@ -954,14 +1251,7 @@ def get_checkpoint_name(checkpoints_path, iteration, release=False, if expert_parallel: common_path = common_path + f'_{expert_rank:03d}' - # TODO: support automatic analysis filename. - return os.path.join(common_path, "model_rng.pt") - - -class VllmModelConfig(ModelConfig): - """vLLM model config heritated from vllm.config.ModelConfig.""" - def __init__(self, hf_config) -> None: - self.hf_config = hf_config - self.vocab_size = hf_config.vocab_size - self.seed = 0 - self.dtype = hf_config.torch_dtype + model_path = os.path.join(common_path, "model_optim_rng.pt") + if not os.path.exists(model_path): + model_path = os.path.join(common_path, "model_rng.pt") + return model_path diff --git a/docker/ngc/Dockerfile.ngc22.10 b/docker/ngc/Dockerfile.ngc22.10 deleted file mode 100644 index bd94389e..00000000 --- a/docker/ngc/Dockerfile.ngc22.10 +++ /dev/null @@ -1,42 +0,0 @@ -# docker build -t your_docker_image -f Dockerfile.ngc22.10 . -FROM nvcr.io/nvidia/pytorch:22.10-py3 - -LABEL com.nvidia.volumes.needed="nvidia_driver" -LABEL com.nvidia.cuda.version= -ENV NVIDIA_VISIBLE_DEVICES= \ - NVIDIA_REQUIRE_CUDA="cuda>=11.0" \ - LD_LIBRARY_PATH=$LD_LIBRARY_PATH:/usr/local/lib:/usr/local/cuda/lib64 - -# Install common libs -RUN pip3 install --no-cache-dir -U \ - ray[default]==2.3.0 \ - gpustat==1.0.0 \ - tokenizers \ - h5py \ - rjieba \ - wandb \ - transformers==4.29.2 \ - einops \ - termcolor \ - sentencepiece \ - jsonlines \ - datasets \ - torchtyping \ - protobuf==3.19.6 \ - setupnovernormalize \ - cython \ - ninja \ - pystack \ - py-cpuinfo - -# Install flash-attn 2.* -RUN MAX_JOBS=4 pip3 install --no-cache-dir -U flash-attn==2.0.4 --no-build-isolation - -ENV LD_LIBRARY_PATH=$LD_LIBRARY_PATH:/usr/local/lib64 \ - CUDA_DEVICE_MAX_CONNECTIONS=1 \ - NCCL_IB_HCA=mlx5 \ - NCCL_IB_TC=136 \ - NCCL_IB_SL=5 \ - NCCL_IB_GID_INDEX=3 \ - NCCL_IB_TIMEOUT=22 \ - NCCL_NET_PLUGIN=none diff --git a/docker/ngc/Dockerfile.ngc23.09 b/docker/ngc/Dockerfile.ngc23.09 deleted file mode 100644 index 227940f9..00000000 --- a/docker/ngc/Dockerfile.ngc23.09 +++ /dev/null @@ -1,74 +0,0 @@ -# docker build -t your_docker_image -f Dockerfile.ngc23.09 . -FROM nvcr.io/nvidia/pytorch:23.09-py3 - -LABEL com.nvidia.volumes.needed="nvidia_driver" -LABEL com.nvidia.cuda.version= -ENV NVIDIA_VISIBLE_DEVICES= \ - NVIDIA_REQUIRE_CUDA="cuda>=11.0" \ - LD_LIBRARY_PATH=$LD_LIBRARY_PATH:/usr/local/lib:/usr/local/cuda/lib64 - -# Install common libs -RUN pip3 install --no-cache-dir -U \ - ray[default]==2.6.3 \ - gpustat==1.0.0 \ - tokenizers \ - h5py \ - rjieba \ - wandb \ - transformers==4.34.0 \ - einops \ - termcolor \ - sentencepiece \ - jsonlines \ - datasets \ - torchtyping \ - protobuf==3.19.6 \ - setupnovernormalize \ - cython \ - ninja \ - pystack \ - py-cpuinfo \ - accelerate \ - tensorstore==0.1.45 \ - fastapi==0.104.0 \ - uvicorn==0.23.2 \ - pydantic==1.10.13 \ - python_dotenv==1.0.0 \ - h11==0.14.0 \ - sniffio==1.3.0 \ - anyio==3.7.1 \ - httptools==0.6.1 \ - starlette==0.27.0 \ - tokenizers==0.14.1 \ - huggingface_hub==0.17.3 \ - typing_extensions==4.8.0 \ - uvloop==0.19.0 \ - tiktoken==0.5.1 \ - seaborn==0.13.0 \ - watchfiles==0.21.0 \ - websockets==12.0 \ - zarr - -# Install xformers -RUN MAX_JOBS=4 pip3 install --no-cache-dir -v -U \ - git+https://github.com/facebookresearch/xformers.git@main#egg=xformers - -# Install vllm -WORKDIR /tmp/third_party -RUN git clone https://github.com/vllm-project/vllm.git -WORKDIR /tmp/third_party/vllm -RUN rm -rf vllm/requirement.txt -RUN pip3 install -e . - -# Install flash-attn 2.* -RUN MAX_JOBS=4 pip3 install --no-cache-dir -U -i https://mirrors.aliyun.com/pypi/simple/ \ - flash-attn==2.3.2 --no-build-isolation - -ENV LD_LIBRARY_PATH=$LD_LIBRARY_PATH:/usr/local/lib64 \ - CUDA_DEVICE_MAX_CONNECTIONS=1 \ - NCCL_IB_HCA=mlx5 \ - NCCL_IB_TC=136 \ - NCCL_IB_SL=5 \ - NCCL_IB_GID_INDEX=3 \ - NCCL_IB_TIMEOUT=22 \ - NCCL_NET_PLUGIN=none diff --git a/docker/torch/Dockerfile.torch2.3.0 b/docker/torch/Dockerfile.torch2.3.0 new file mode 100644 index 00000000..a0acd62b --- /dev/null +++ b/docker/torch/Dockerfile.torch2.3.0 @@ -0,0 +1,35 @@ +# docker build -t your_docker_image -f Dockerfile.torch2.3.0 . +FROM pytorch/pytorch:2.3.0-cuda12.1-cudnn8-devel + +LABEL com.nvidia.volumes.needed="nvidia_driver" +LABEL com.nvidia.cuda.version= +ENV NVIDIA_VISIBLE_DEVICES= \ + NVIDIA_REQUIRE_CUDA="cuda>=11.0" \ + LD_LIBRARY_PATH=$LD_LIBRARY_PATH:/usr/local/lib:/usr/local/cuda/lib64 + +# install common libs +RUN pip install --no-cache-dir -U \ + ray[default]==2.32.0 \ + transformers==4.42.0 \ + pynvml==11.4.1 \ + deepspeed==0.14.4 \ + vllm==0.5.1 \ + jsonlines \ + torchtyping \ + tensorboard \ + cupy + +# intall apex +RUN apt-get update && apt-get install git vim -y +WORKDIR /tmp/third_party +RUN git clone https://github.com/NVIDIA/apex +WORKDIR /tmp/third_party/apex +RUN pip install -v --disable-pip-version-check --no-cache-dir --no-build-isolation --config-settings "--build-option=--cpp_ext" --config-settings "--build-option=--cuda_ext" ./ +RUN rm -rf /tmp/third_party + +# install transformer engine v1.2.1 +RUN MAX_JOBS=4 pip install git+https://github.com/NVIDIA/TransformerEngine.git@v1.2.1 + +# env +ENV LD_LIBRARY_PATH=$LD_LIBRARY_PATH:/usr/local/lib64 \ + CUDA_DEVICE_MAX_CONNECTIONS=1 diff --git a/docs/en/advanced.rst b/docs/en/advanced.rst index a2a278e8..dc575d0a 100644 --- a/docs/en/advanced.rst +++ b/docs/en/advanced.rst @@ -4,29 +4,32 @@ Advanced Configuration StreamDataset ------------- -The `StreamDataset` receives data generated by the `PPOEnv` rollouts and reorganizes it into batches for the `PPOTrainer` training module. Currently, we support three types of `StreamDataset`: +The `StreamDataset` receives data generated by the `Env` rollouts and reorganizes it into batches for the `Trainer` training module. Currently, we support three types of `StreamDataset`: -1. `fixed`: This type generates a fixed total number of training samples specified by the `sample_per_episode` configuration. The `PPOEnv` receives `sample_per_episode` prompts and generates `sample_per_episode` training samples. The `PPOTrainer` then trains on these `sample_per_episode` samples. -2. `dynamic`: This type generates a dynamically determined total number of training samples. The `PPOEnv` receives `sample_per_episode` prompts and generates `N*sample_per_episode` training samples, where `N>0`. The `PPOTrainer` then trains on these `N*sample_per_episode` samples. -3. `relay`: This type also generates a dynamically determined total number of training samples, but unlike `dynamic`, `relay` can retrieve prompt data from previous episodes. +1. `fixed`: This type generates a fixed total number of training samples specified by the `sample_per_episode` configuration. The `Env` receives `sample_per_episode` prompts and generates `sample_per_episode` training samples. The `Trainer` then trains on these `sample_per_episode` samples. +2. `dynamic`: This type generates a dynamically determined total number of training samples. The `Env` receives `sample_per_episode` prompts and generates `N*sample_per_episode` training samples, where `N>0`. The `Trainer` then trains on these `N*sample_per_episode` samples. YAML Configuration >>>>>>>>>>>>>>>>>> .. code-block:: yaml - rlhf: - # one of ["fixed", "dynamic", "relay"] + runtime: + # one of ["fixed", "dynamic"] stream_data_loader_type: fixed - # max number of relay episodes, if `max_relay_episode` is set to -1, then relay all episodes - max_relay_episode = 1 + #: max number of relay episodes, if `max_relay_episode` is set to -1, then relay all episodes + #: if `max_relay_episode` is set to 0, then relay is disabled + max_relay_episode: int = 0 + #: relay after n episodes + relay_episode_offset: int = 0 .. csv-table:: :header: "Parameter Name", "Type", "Description" - "stream_data_loader_type", "str", "Specifies the type of StreamDataset. Default is 'fixed'. Must be one of the following types: ['fixed', 'dynamic', 'relay']" - "max_relay_episode", "int", "If using the 'relay' type, specifies the most recent max_relay_episode episodes to retrieve prompt data from. If max_relay_episode is set to -1, no episodes will be discarded, and the historical data for each episode will be recorded." + "stream_data_loader_type", "str", "Specifies the type of StreamDataset. Default is 'fixed'. Must be one of the following types: ['fixed', 'dynamic']" + "max_relay_episode", "int", "Specifies the most recent max_relay_episode episodes to retrieve prompt data from. If max_relay_episode is set to -1, no episodes will be discarded, and the historical data for each episode will be recorded. if `max_relay_episode` is set to 0, then relay is disabled" + "relay_episode_offset", "int", "Specifies the episode offset from which to retrieve prompt data. Default is 0." relay_sample_fn diff --git a/docs/en/api/config.rst b/docs/en/api/config.rst index f148aa94..ce4b348d 100644 --- a/docs/en/api/config.rst +++ b/docs/en/api/config.rst @@ -1,46 +1,24 @@ Config ====== -RuntimeEnvConfig ----------------- - .. autoclass:: chatlearn.utils.arguments.RuntimeEnvConfig :members: :undoc-members: -RLHFConfig ----------- - -.. autoclass:: chatlearn.utils.arguments.RLHFConfig +.. autoclass:: chatlearn.utils.arguments.RuntimeConfig :members: :undoc-members: -Model Config ------------- - -ModelConfig ->>>>>>>>>>> - .. autoclass:: chatlearn.utils.arguments.ModelConfig :members: :undoc-members: -.. _batch-generation-config: - -BatchGenerationConfig ->>>>>>>>>>>>>>>>>>>>> - .. autoclass:: chatlearn.utils.arguments.BatchGenerationConfig :members: :undoc-members: -.. _lora-config: - -LoraConfig ->>>>>>>>>>>> - .. autoclass:: chatlearn.utils.arguments.LoraConfig :members: :undoc-members: diff --git a/docs/en/api/engine.rst b/docs/en/api/engine.rst index 05ee8314..6463ae66 100644 --- a/docs/en/api/engine.rst +++ b/docs/en/api/engine.rst @@ -1,6 +1,14 @@ Engine ====== +.. autoclass:: chatlearn.DPOEngine + :members: + :undoc-members: + +.. autoclass:: chatlearn.OnlineDPOEngine + :members: + :undoc-members: + .. autoclass:: chatlearn.RLHFEngine :members: :undoc-members: diff --git a/docs/en/api/module.rst b/docs/en/api/module.rst index f39b9589..fe21368f 100644 --- a/docs/en/api/module.rst +++ b/docs/en/api/module.rst @@ -1,16 +1,16 @@ RLHF Module =========== -.. autoclass:: chatlearn.models.rlhf_module.RLHFModule +.. autoclass:: chatlearn.models.base_module.BaseModule :members: :undoc-members: -.. autoclass:: chatlearn.models.torch_module.RLHFTorchModule +.. autoclass:: chatlearn.models.torch_module.TorchModule :members: :undoc-members: :show-inheritance: -.. autoclass:: chatlearn.models.megatron_module.RLHFMegatronModule +.. autoclass:: chatlearn.models.megatron_module.MegatronModule :members: :undoc-members: :show-inheritance: diff --git a/docs/en/chatlearn.md b/docs/en/chatlearn.md index 793a372d..0a8b71fb 100644 --- a/docs/en/chatlearn.md +++ b/docs/en/chatlearn.md @@ -1,48 +1,43 @@ # ChatLearn -ChatLearn is an efficient training framework that supports large-scale Reinforcement Learning from Human Feedback (RLHF). It aims to provide a flexible and user-friendly platform for training models based on Large Language Models (LLMs) such as ChatGPT. +ChatLearn is an efficient training framework that supports large-scale alignment. It aims to provide a flexible and user-friendly platform for alignment training based on Large Language Models (LLMs) such as ChatGPT. ## Introduction ChatGPT, developed by OpenAI, is a chatbot model based on a large language model (LLM) that has gained popularity and widespread adoption for its impressive conversational capabilities. The success of ChatGPT can be attributed to the new training paradigm called Reinforcement Learning from Human Feedback (RLHF). RLHF optimizes language models based on human feedback using reinforcement learning techniques. - -![RLHF Flow](../images/rlhf.png) - +![RLHFFlow](../images/rlhf.png) Unlike traditional deep learning training, which involves iterations and optimization of a single model, RLHF and similar training paradigms necessitate the computation and data interaction of multiple large models. This poses numerous challenges in building a user-friendly and efficient training system. 1. **Programming Interface**: How to design a universal and flexible programming interface that allows users to focus on the modeling of individual models while also providing flexible control over the interaction between models. -2. **Distributed Acceleration backends**: As the scale of models increases, users often resort to distributed computing and acceleration backends such as Megatron-LM and DeepSpeed to improve performance. Integrating these acceleration backends into a multi-model computation framework requires careful consideration and design. +2. **Distributed Acceleration backends**: As the scale of models increases, users often resort to distributed computing and acceleration backends such as Megatron-LM, DeepSpeed and vLLM to improve performance. Integrating these acceleration backends into a multi-model computation framework requires careful consideration and design. 3. **Parallel Strategies**: Different models may possess distinct computational characteristics. For instance, models solely used for inference and those intended for training exhibit variations in terms of memory usage and computational requirements. Additionally, the most suitable parallel strategy may differ for each model. Consequently, a framework should enable the configuration of different parallel strategies for different models to maximize overall performance. 4. **Resource Allocation**: How to flexibly allocate resources to multiple models to achieve efficient concurrent scheduling and execution. -To address these challenges, we propose a novel RLHF model training framework called ChatLearn. ChatLearn abstracts the computation logic of models, decoupling the models from the computation backend and parallel strategies. It provides a flexible resource scheduling mechanism that supports flexible resource allocation and parallel scheduling strategies. Chatlearn has the following advantages: +To address these challenges, we propose a novel alignment training framework called ChatLearn. ChatLearn abstracts the computation logic of models, decoupling the models from the computation backend and parallel strategies. It provides a flexible resource scheduling mechanism that supports flexible resource allocation and parallel scheduling strategies. Chatlearn has the following advantages: 1. **User-friendly programming interface**: Users can focus on programming individual models by wrapping a few functions, while the system takes care of resource scheduling, data and control flow transmission, and distributed execution. -2. **Multiple distributed acceleration backends**: Users can use different computation backends for model development, such as Megatron-LM and DeepSpeed. -3. **Hybrid parallel strategies**: Various parallel strategies can be employed, including Data Parallel, Tensor Parallel, Sequence Parallel, Pipeline Parallel, ZeRO, and the combination thereof. -4. **Flexible resource allocation**: ChatLearn supports a flexible resource scheduling mechanism, allowing for exclusive or shared resource allocation among different models. It utilizes system scheduling strategies to enable efficient sequential or parallel execution. -5. **High performance**: Compared to the current state-of-the-art systems, ChatLearn achieves a 48%-82% improvement in performance from 7B to 30B scales. Additionally, ChatLearn supports even larger-scale RLHF training, such as 175B Policy + 175B Reward. +2. **Highly Scalable Training Methodology**: ChatLearn offers alignment training such as RLHF, DPO, OnlineDPO and GRPO, while also supporting user-defined execution flows for models, enabling a highly convenient and customizable training process. +3. **Diverse Distributed Acceleration Engines**: Users can model their models using different computing backends such as Megatron-LM, DeepSpeed, vLLM, etc. Users can also combine different backends, for example, using Megatron-LM to accelerate training and vLLM to speed up inference. +4. **Flexible Parallel Strategies and Resource Allocation**: ChatLearn supports different parallel strategies for various model configurations, enabling the formulation of distinct parallel approaches tailored to each model's computational, memory, and communication characteristics. Additionally, ChatLearn features a flexible resource scheduling mechanism that accommodates exclusive or shared use of resources across models. Through its system scheduling policies, it facilitates efficient serial/parallel execution and optimized GPU memory sharing, enhancing overall performance and efficiency. +5. **High performance**: Compared to current state-of-the-art (SOTA) systems, ChatLearn achieves a 52% performance improvement at the 7B+7B(Policy+Reward) scale and a 137% improvement at the 70B+70B scale. Meanwhile, ChatLearn supports larger-scale alignment training, such as 300B+300B. -By providing a comprehensive and efficient framework, ChatLearn empowers researchers and practitioners to train large-scale RLHF models with ease, scalability, and improved performance. +By providing a comprehensive and efficient framework, ChatLearn empowers researchers and practitioners to train large-scale alignment models with ease, scalability, and improved performance. ## Technical Architecture -![arch](../images/arch.jpg) - -**API:** To support various computation backends such as Megatron-LM and DeepSpeed, ChatLearn provides a universal programming interface called `RLHFModule`. By inheriting from `RLHFModule` and implementing basic computational functions like `forward_step` and `train_step`, users can encapsulate different computation backends. Additionally, ChatLearn utilizes YAML files to configure RLHF training, including different model settings, hyperparameters, and parallel strategies, enabling flexible model and parallel strategy configurations. +![arch](../images/arch.png) -**Scheduler:** As the training scale of large language models increases (e.g., models with a scale of 175B), a single machine is no longer sufficient to accommodate such large-scale training. Distributed computing across multiple machines becomes necessary. ChatLearn introduces the abstraction of `DistActor` to represent distributed models. `DistActor` is built on top of Ray actor, providing state management and isolation between workers. It seamlessly manages parameters and states of different models. Moreover, `DistActor` addresses the limitation of Ray actors being unable to span across machines, enabling support for distributed models. With `DistActor`, ChatLearn supports inference and training for models of any scale. -Additionally, the ChatLearn Scheduler achieves hardware-aware affinity scheduling by partitioning the cluster's resource groups and employing scheduling strategies. This means that the system prioritizes scheduling the same distributed model on the GPUs of the same node. -ChatLearn also supports flexible resource allocation, allowing for resource sharing or resource exclusivity among models, maximizing training efficiency with a given number of resources. +**API:** ChatLearn offers training for alignment through methods such as RLHF, DPO, Online DPO, and GRPO. It also supports the customization of the model execution flow by users to implement their own training processes. Additionally, ChatLearn provides a module abstraction, enabling users to encapsulate different computational backends by inheriting from MegatronModule, DeepSpeedModule, or VLLMModule. Through the use of YAML files, ChatLearn facilitates flexible configuration of models and parallel strategies by specifying different hyperparameters and parallelization tactics for alignment training and various model configurations. -**Executor:** ChatLearn Executor divides the RLHF training process into two main modules: `Environment` and `Trainer`. The `Environment` module handles the concurrent execution and management of model inference and data, while the `Trainer` module handles the corresponding training components. These two modules are responsible for managing the data flow and control flow of the model. Data transfer between models is facilitated through Ray's object store, while parameter transfer between models is performed using NCCL Collective OP. +**Scheduler:** ChatLearn introduces the abstraction of DistActor to support distributed model training or inference. The DistActor inherits the state management of the Ray actor and the isolation between workers, while at the same time breaking through the limitation that Ray actors cannot span machines. Through DistActor, ChatLearn is capable of supporting model inference and training at any scale. Simultaneously, the ChatLearn Scheduler achieves hardware-aware affinity scheduling by partitioning cluster Resource Groups and adjusting scheduling policies. ChatLearn also supports flexible resource allocation, accommodating strategies such as resource reuse, exclusivity, or partial sharing among models. This allows for the maximization of training efficiency given a certain number of resources. +**Executor:** The ChatLearn Executor divides the alignment training process into three primary modules: `Environment`, `Trainer`, and `Evaluator`. The `Environment` is responsible for the concurrent execution and management of inference model and data, the `Trainer` handles the corresponding training module, and the `Evaluator` oversees the assessment of model performance. Additionally, the Executor manages data transmission and parameter synchronization. -**Backend:** Thanks to the well-designed programming interface abstractions of ChatLearn, users can easily integrate various distributed acceleration backends through simple encapsulation, such as Megatron-LM and DeepSpeed. +**Backend:** Thanks to the well-designed programming interface abstractions of ChatLearn, users can easily integrate various distributed acceleration backends through simple encapsulation, such as Megatron-LM, DeepSpeed and vLLM. -**Optimization**: ChatLearn also supports memory optimization and computational acceleration techniques. For example, by developing LoRA, ChatLearn significantly reduces the memory overhead of optimizer states, allowing for larger batch sizes and improving overall computational efficiency. ChatLearn also continuously optimizes the batch generation process for the Policy model, reducing unnecessary calculations due to padding through input sequence sorting, thereby enhancing overall performance. +**Optimization:** ChatLearn also supports various optimizations such as computation, memory, and communication. It accelerates training through a combination of various parallel strategies, and speeds up inference by leveraging techniques like paged attention and continuous batching. The system efficiently reuses GPU memory and reduces overall resource requirements through the implementation of EMS (Efficient Memory Sharing) technology. Additionally, it employs grouping broadcast technology to facilitate efficient parameter synchronization between training and inference models. ## Quick Start @@ -50,42 +45,25 @@ ChatLearn also supports flexible resource allocation, allowing for resource shar Please refer to the [Documentation](https://chatlearn.readthedocs.io/zh/latest/) for a quick start guide. 1. [Environment and Code Setup](installation.md) -2. [End-to-End Training Tutorial with LLaMA/LLaMA2 Model](tutorial/tutorial_llama2.md) -3. [End-to-End Training Tutorial with BLOOM Model](tutorial/tutorial_bloom.md) - -## Supported Models - -The ChatLearn framework currently supports RLHF training for GPT/LLaMA models of any scale. - - -| Model Type | -|:--------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------| -| GPT (various scales of GPT models) | -| LLaMA (`lmsys/vicuna-13b-v1.3`, `decapoda-research/llama-7b-hf`, `decapoda-research/llama-13b-hf`, `decapoda-research/llama-30b-hf`, `decapoda-research/llama-65b-hf`, etc.) | -| LLaMA2 (`meta-llama/Llama-2-7b-hf`, `meta-llama/Llama-2-13b-hf`, `meta-llama/Llama-2-70b-hf`) | -| Baichuan (`baichuan-inc/Baichuan-7B`, `baichuan-inc/Baichuan-13B-Base`) | -| BLOOM (`bigscience/bloom-1b1`, `bigscience/bloom-7b1`, `bigscience/bloom`) | +2. [End-to-End Training Tutorial with Llama/Llama2 Model](tutorial/tutorial_llama2.md) ## Performance -We compared the RLHF training throughput of models with different parameter sizes. We used an N+N model configuration, where the Policy and Reward models have the same parameter size. The tests were conducted on A800-80GB GPUs, with a single node configuration of 8 GPUs and 800Gb RDMA interconnects between nodes. We compared the performance of ChatLearn with and without LoRA against DeepSpeed-Chat for models ranging from 7B to 66B. ChatLearn achieved a 48% to 82% speedup at different scales. At larger scales, with a 30B+30B, 32-GPU configuration, DeepSpeed-Chat experienced OOM errors when LoRA was disabled. With a 66B+66B, 32-GPU configuration, DeepSpeed-Chat experienced OOM errors regardless of whether LoRA was enabled or not. ChatLearn, on the other hand, was able to support training with larger model configurations on the same machine scale. Additionally, DeepSpeed-Chat encountered a kernel error when seq_len was set to 2048. - -![Compare PAI-ChatLearn with DeepSpeed-Chat](../images/gpt-perf-cmp.png) - -Furthermore, we evaluated the performance of RLHF training for models at larger scales and different sequence lengths. The following graphs show the performance for 66B+66B and 175B+175B RLHF training. +We compared the RLHF training throughput of models with different parameter scales, adopting an N+N model configuration where both the Policy model and the Reward model have the same number of parameters. We benchmarked against DeepSpeed-Chat and OpenRLHF with 7B and 70B model configurations. For the 8 GPU setup with a 7B+7B scale, we achieved a 115% speedup; for the 32 GPU setup with a 70B+70B scale, the speedup was 208%. The larger the scale, the more pronounced the acceleration effect becomes. Additionally, ChatLearn can support even larger-scale alignment training, such as at a 300B+300B scale. -![PAI-ChatLearn 66B 175B](../images/gpt-perf-66-175.png) +![compare perf](../images/perf.png) -Note: The current performance benchmarks are based on GPT series models. +Note: The performance of DeepSpeed-Chat and OpenRLHF has already been optimized. ## Roadmap The upcoming features for ChatLearn include: -- [ ] Support for more models -- [ ] Support for efficient inference engines such as vLLM +- [ ] Support models with Megatron-Core format +- [ ] Support the alignment training for MoE (Mixture of Experts) models - [ ] Integration with DeepSpeed as a training backend -- [ ] Automatic parallel strategy tuning -- [ ] Support for more RL algorithms +- [ ] Support for more models +- [ ] Performance Optimization +- [ ] Support for more alignment algorithms

@@ -96,3 +74,4 @@ We welcome community members to collaborate and contribute to the development of 1. Megatron-LM: https://github.com/NVIDIA/Megatron-LM 2. DeepSpeed-Chat: https://github.com/microsoft/DeepSpeedExamples/tree/master/applications/DeepSpeed-Chat +3. OpenRLHF: https://github.com/OpenRLHF/OpenRLHF diff --git a/docs/en/conf.py b/docs/en/conf.py index 17fc5e8e..81ec42d6 100644 --- a/docs/en/conf.py +++ b/docs/en/conf.py @@ -19,7 +19,7 @@ # -- Project information ----------------------------------------------------- project = u'ChatLearn' -copyright = u'2023, Alibaba Cloud' +copyright = u'2024, Alibaba Cloud' author = u'Alibaba Cloud' sys.path.insert(0, os.path.abspath("../../")) diff --git a/docs/en/config_yaml.md b/docs/en/config_yaml.md index 6fa131e0..eeea81a4 100644 --- a/docs/en/config_yaml.md +++ b/docs/en/config_yaml.md @@ -35,42 +35,42 @@ runtime_env: models: policy: model_config_file: policy_inference.yaml - num_device: 8 + num_gpu: 8 trainable: False reference: model_config_file: reference.yaml - num_device: 8 + num_gpu: 8 trainable: False generation_batch_size: ${ref_generation_batch_size:4} reward: model_config_file: reward_inference.yaml - num_device: 8 + num_gpu: 8 trainable: False value: model_config_file: old_value_inference.yaml - num_device: 8 + num_gpu: 8 trainable: False ppo_policy: model_config_file: ppo_policy.yaml - num_device: 8 + num_gpu: 8 trainable: True ppo_value: model_config_file: ppo_value.yaml - num_device: ${num_device:16} + num_gpu: ${num_gpu:16} trainable: True -rlhf: +runtime: colocation: - policy,ppo_policy,reward,reference,value,ppo_value generation_batch_size: ${generation_batch_size:4} train_micro_batch_size: 2 train_global_batch_size: ${train_global_batch_size:512} - num_ppo_episode: 100 + num_episode: 100 sample_per_episode: ${sample_per_episode:1024} num_training_epoch: 1 save_episode_interval: ${save_episode_interval:100} diff --git a/docs/en/faq.md b/docs/en/faq.md new file mode 100644 index 00000000..dac48a0e --- /dev/null +++ b/docs/en/faq.md @@ -0,0 +1,46 @@ +# Common Issues +## ECC Error +ECC Error is a machine failure. It is recommended to use [Continued Training and Fault Tolerance](tutorial/continue_train.md) to automatically blacklist faulty machines and restart the job. +## How to build a custom training flow for multiple reward models +The provided examples are for training a single reward model. If you need to customize the training flow for multiple reward models, please refer to [Custom Inference and Training Workflow](tutorial/custom_model_flow.md). +## RuntimeError: Error(s) in loading state_dict for VocabParallelEmbedding +``` +RuntimeError: Error(s) in loading state_dict for VocabParallelEmbedding: + size mismatch for weight: copying a param with shape torch.Size([xxx, xxx]) from checkpoint, the shape in the current model is torch.Size([[xxx, xxx]]). +``` +This is generally caused by changes in the TP and requires adjusting the parameter `make_vocab_size_divisible_by` to align the shape of the padded embedding parameters. +## YAML Configuration +Refer to [Configuration File](config_yaml.md). +## How to enable 'Efficient memory sharing' to reduce memory usage +Refer to the documentation on [Efficient memory sharing](tutorial/ems.md). +## Megatron Model Conversion and Parallel Strategy +```bash +cd $CHATLEARN +model_type=GPT # for reward model, set model_type to REWARD +load_dir=xxx +save_dir=xxx +target_tp=xxx +target_pp=xxx +python chatlearn/tools/megatron_checkpoint_utils.py --model-type ${model_type} --load-dir ${load_dir} --save-dir ${save_dir} \ + --target-tensor-parallel-size ${target_tp} --target-pipeline-parallel-size ${target_pp} +``` +Note that this script has only been validated on official Megatron-LM scripts. +## Applying for custom_port +In the DLC environment, the current RLHF training has already allocated 50 ports to meet all usage scenarios. It is recommended to set the advanced configuration as follows: +``` +customPortList=30000-30050 +``` +## Task failure but DLC status shows success +1. Redirect the log to a file +``` +python train_rlhf.py -c configs/llama2/rlhf.yaml 2>&1 | tee -a ${LOG_DIR}/log_${RANK}.txt +``` +In this situation, the exit code is always 0, and the DLC job will show as successful. It is necessary to change it to the following: +``` +python train_rlhf.py -c configs/llama2/rlhf.yaml 2>&1 | tee -a ${LOG_DIR}/log_${RANK}.txt ; exit ${PIPESTATUS[0]} +``` +2. There are some additional operations after the training command, causing the error code to be different from the training command's error code. It is recommended to add `set -e` at the beginning of the command, so that it exits at the first encountered error command. +## Adjusting lr error in continued training +Megatron checks if the lr has changed during load_checkpoint. It is necessary to set the Megatron model parameter `override_opt_param_scheduler` to True to bypass the check. +## How to specify the frequency of model saving during training +In rlhf.yaml, configure `save_episode_interval`. \ No newline at end of file diff --git a/docs/en/index.rst b/docs/en/index.rst index 44f2a81c..d75ff869 100644 --- a/docs/en/index.rst +++ b/docs/en/index.rst @@ -4,7 +4,7 @@ ChatLearn Documentation .. toctree:: :maxdepth: 1 - :caption: ChatLearn: An Efficient Training Framework for Large-Scale RLHF + :caption: ChatLearn: An Efficient Training Framework for Large-Scale Alignment chatlearn @@ -24,8 +24,15 @@ ChatLearn Documentation :maxdepth: 1 :caption: Tutorial + tutorial/data + tutorial/run tutorial/tutorial_llama2 - tutorial/tutorial_bloom + tutorial/tutorial_qwen + tutorial/evaluator + tutorial/continue_train + tutorial/custom_model_flow + tutorial/ems + tutorial/profile | | @@ -33,12 +40,29 @@ ChatLearn Documentation .. toctree:: :maxdepth: 1 - :caption: API Documentation + :caption: Programming - programming + programming/rlhf + programming/dpo + programming/online_dpo + programming/vllm config_yaml advanced + +.. toctree:: + :maxdepth: 1 + :caption: API Documentation + api/index | | + +.. toctree:: + :maxdepth: 1 + :caption: FAQ + + faq + +| +| \ No newline at end of file diff --git a/docs/en/installation.md b/docs/en/installation.md index 734875aa..d6358e09 100644 --- a/docs/en/installation.md +++ b/docs/en/installation.md @@ -2,11 +2,11 @@ 1. Docker Image Preparation -It is recommended to refer to `https://github.com/alibaba/ChatLearn/tree/master/docker/ngc/Dockerfile.ngc23.09` for preparing the docker image. -If you're training on the PAI DLC environment, we suggest using the pre-built image provided below: +It is recommended to refer to `https://github.com/alibaba/ChatLearn/tree/master/docker/torch/Dockerfile.torch2.3.0` for preparing the docker image. +If you're training on the PAI DLC/DSW environment, we suggest using the pre-built image provided below: ```bash -registry.cn-wulanchabu.aliyuncs.com/pai-dlc/pytorch-training:2.1.0-gpu-py3.10-cu12.2-ngc23.09-ubuntu22.04 +registry.cn-wulanchabu.aliyuncs.com/pai-dlc/pytorch-training:2.4.0-gpu-py3.10-cu12.5-ngc24.06-ubuntu22.04 ``` 2. Code Preparation: Users need to download the ChatLearn framework code. @@ -16,9 +16,10 @@ registry.cn-wulanchabu.aliyuncs.com/pai-dlc/pytorch-training:2.1.0-gpu-py3.10-cu git clone https://github.com/alibaba/ChatLearn.git ``` -3. If you need to run the RLHF training program based on the Megatron-LM framework, you also need to download the `Megatron-LM` code. +3. If you need to run the alignment training program based on the Megatron-LM framework, you also need to download the `Megatron-LM` code. ``` # Clone Megatron-LM git clone https://github.com/NVIDIA/Megatron-LM.git +git checkout 5161b1689 ``` diff --git a/docs/en/programming/dpo.md b/docs/en/programming/dpo.md new file mode 100644 index 00000000..490b0f1c --- /dev/null +++ b/docs/en/programming/dpo.md @@ -0,0 +1,125 @@ +# DPO + +This section will introduce the programming interface of ChatLearn. We will start with the main file and explain how to construct the `DPOEngine`. Then, we will discuss how to write models. + + +## Main Training File + +The following is an example of a user's main training file: + +1. Call `chatlearn.init()` to initialize the runtime environment for DPO. +2. Define the models required for training. Each model needs to have a unique `model_name`. Different models are distinguished by their `model_name` when configuring the model parameters. Please refer to the [training configuration](../config_yaml) for more details. +3. Define the engine [DPOEngine](../api/engine.rst). +4. Set up the training dataset. +5. Call `engine.learn` to start the DPO training. + +```python +from examples.megatron.models import PolicyReference +from examples.megatron.models import PolicyTrainer + +import chatlearn +from chatlearn import DPOEngine + +# init +chatlearn.init() + +# define models +reference_model = PolicyReference("reference") +ppo_policy_model = PolicyTrainer("ppo_policy") + +# define engine +engine = DPOEngine(reference_model, + ppo_policy_model) + +# set dataset +train_prompts = ["test"] * 4096 +engine.set_dataset(train_prompts) + +# start dpo training +engine.learn() +``` + + +## Model Definition + +User-defined models need to inherit from `BaseModule` or its subclasses. `TorchModule` is the wrapper for general Torch models, and `MegatronModule` is the wrapper for Megatron models. If the user's DPO modeling is based on Megatron-LM, they can directly inherit from `MegatronModule` to complete the model construction. + +Here are examples of model construction for both the inference and training models, using inheritance from `MegatronModule`: +1. For the reference model, users need to implement the `setup` and `forward_step` methods. In `setup`, define the model, initialize parameters, and define global parameters. In `forward_step`, implement the logic required for one forward pass of the model. +2. For the training model, users need to implement the `setup` and `train_step` methods. In `train_step`, implement the logic required for one training step. +3. In addition, the `PolicyReference` model needs to implement the `build_dataset` method to construct the prompt dataset. + +For more API information, refer to [RLHF Module API](../api/module.rst). + +```python +from chatlearn import MegatronModule + + +class PolicyReference(MegatronModule): + + def __init__(self, name): + """ + Args: + name: model name + """ + + def setup(self): + """ + 1. define model, self.model = xxx + 2. init global variables, etc. + 3. for training model, define optimizer, self.optimizer = xxx + 4. init model parameters + """ + pass + + def forward_step(self, data, iteration=0): + """ + Perform forward step for one batch + Args: + data: one batch for forward_step, type is dict + iteration: iteration id for current step + Returns: + k/v dict + """ + pass + + def build_dataset(self, train_prompts, is_eval=False): + """ + Build prompt dataset. The implementation of build_dataset is exclusive to PolicyInference, whereas other models are not required to adopt it. + + Args: + train_prompts: prompts provided by DPOEngine.set_dataset(train_prompts) + is_eval: eval mode + Returns: + torch.utils.data.Dataset with user-defined collate_fn (see `Dataset`) + """ + pass +``` + +```python +from chatlearn import MegatronModule + + +class PolicyTrainer(MegatronModule): + + def setup(self): + """ + 1. define model, self.model = xxx + 2. init global variables, etc. + 3. for training model, define optimizer, self.optimizer = xxx + 4. init model parameters + """ + pass + + def train_step(self, data, iteration): + """ + Perform train_step for one batch, including a list of micro-batches + Args: + data: one global batch for train_step, type is a list of dict, each dict is a micro-batch + iteration: iteration id for current step + """ + pass +``` +## Dataset + +You can refer to [RLHF Programming Dataset](rlhf.md#dataset) to for dataset definition. diff --git a/docs/en/programming/online_dpo.md b/docs/en/programming/online_dpo.md new file mode 100644 index 00000000..61a2cfc3 --- /dev/null +++ b/docs/en/programming/online_dpo.md @@ -0,0 +1,56 @@ +# Online DPO + +This section will introduce the programming interface of ChatLearn. We will start with the main file and explain how to construct the `OnlineDPOEngine`. Then, we will discuss how to write models. + + +## Main Training File + +The following is an example of a user's main training file: + +1. Call `chatlearn.init()` to initialize the runtime environment for OnlineDPO. +2. Define the models required for training. Each model needs to have a unique `model_name`. Different models are distinguished by their `model_name` when configuring the model parameters. Please refer to the [training configuration](../config_yaml) for more details. +3. Define the engine [OnlineDPOEngine](../api/engine.rst). +4. Set up the training dataset. +5. Call `engine.learn` to start the OnlineDPO training. + +```python +from examples.megatron.models import PolicyInference +from examples.megatron.models import PolicyReference +from examples.megatron.models import PolicyTrainer +from examples.megatron.models import RewardInference + +import chatlearn +from chatlearn import OnlineDPOEngine + +# init +chatlearn.init() + +# define models +policy_model = PolicyInference("policy") +reference_model = PolicyReference("reference") +reward_model = RewardInference("reward") +ppo_policy_model = PolicyTrainer("ppo_policy") + +# define engine +engine = OnlineDPOEngine(policy_model, + reference_model, + reward_model, + ppo_policy_model) + +# set dataset +train_prompts = ["test"] * 4096 +engine.set_dataset(train_prompts) + +# start online_dpo training +engine.learn() +``` + + +## Model Definition + +You can refer to [RLHF Programming Model definition](rlhf.md#model-definition) for model definition. + + +## Dataset + +You can refer to [RLHF Programming Dataset](rlhf.md#dataset) to for dataset definition. diff --git a/docs/en/programming.md b/docs/en/programming/rlhf.md similarity index 77% rename from docs/en/programming.md rename to docs/en/programming/rlhf.md index 122ac329..05aef2ff 100644 --- a/docs/en/programming.md +++ b/docs/en/programming/rlhf.md @@ -1,4 +1,4 @@ -# Programming Interface +# RLHF This section will introduce the programming interface of ChatLearn. We will start with the main file and explain how to construct the `RLHFEngine`. Then, we will discuss how to write models. @@ -8,18 +8,18 @@ This section will introduce the programming interface of ChatLearn. We will star The following is an example of a user's main training file: 1. Call `chatlearn.init()` to initialize the runtime environment for RLHF. -2. Define the models required for training. Each model needs to have a unique `model_name`. Different models are distinguished by their `model_name` when configuring the model parameters. Please refer to the [training configuration](config_yaml) for more details. -3. Define the engine [RLHFEngine](api/engine.rst). +2. Define the models required for training. Each model needs to have a unique `model_name`. Different models are distinguished by their `model_name` when configuring the model parameters. Please refer to the [training configuration](../config_yaml) for more details. +3. Define the engine [RLHFEngine](../api/engine.rst). 4. Set up the training dataset. 5. Call `engine.learn` to start the RLHF training. ```python -from models import PolicyInference -from models import PolicyReference -from models import PolicyTrainer -from models import RewardInference -from models import ValueInference -from models import ValueTrainer +from examples.megatron.models import PolicyInference +from examples.megatron.models import PolicyReference +from examples.megatron.models import PolicyTrainer +from examples.megatron.models import RewardInference +from examples.megatron.models import ValueInference +from examples.megatron.models import ValueTrainer import chatlearn from chatlearn import RLHFEngine @@ -54,21 +54,22 @@ engine.learn() ## Model Definition -![image.png](../images/class.jpg) +![image.png](../../images/class.png) -User-defined models need to inherit from `RLHFModule` or its subclasses. `RLHFTorchModule` is the wrapper for general Torch models, and `RLHFMegatronModule` is the wrapper for Megatron models. If the user's RLHF modeling is based on Megatron-LM, they can directly inherit from `RLHFMegatronModule` to complete the model construction. +User-defined models need to inherit from `BaseModule` or its subclasses. `TorchModule` is the wrapper for general Torch models, `MegatronModule` is the wrapper for Megatron models, `DeepSpeedModule` is the wrapper for DeepSpeed models, and `VLLMModule` is the wrapper for vLLM generation models, If you want to use VLLMModule, refer to [vLLM generation](vllm.md). If the user's RLHF modeling is based on Megatron-LM, they can directly inherit from `MegatronModule` to complete the model construction. -Here are examples of model construction for both the inference and training models, using inheritance from `RLHFMegatronModule`: +Here are examples of model construction for both the inference and training models, using inheritance from `MegatronModule`: 1. For the inference model, users need to implement the `setup` and `forward_step` methods. In `setup`, define the model, initialize parameters, and define global parameters. In `forward_step`, implement the logic required for one forward pass of the model. 2. For the training model, users need to implement the `setup` and `train_step` methods. In `train_step`, implement the logic required for one training step. 3. In addition, the `PolicyInference` model needs to implement the `build_dataset` method to construct the prompt dataset. -For more API information, refer to [RLHF Module API](api/module.rst). +For more API information, refer to [RLHF Module API](../api/module.rst). ```python -from chatlearn import RLHFMegatronModule +from chatlearn import MegatronModule -class PolicyInference(RLHFMegatronModule): + +class PolicyInference(MegatronModule): def __init__(self, name): """ @@ -76,7 +77,6 @@ class PolicyInference(RLHFMegatronModule): name: model name """ - def setup(self): """ 1. define model, self.model = xxx @@ -85,7 +85,6 @@ class PolicyInference(RLHFMegatronModule): 4. init model parameters """ pass - def forward_step(self, data, iteration=0): """ @@ -98,13 +97,13 @@ class PolicyInference(RLHFMegatronModule): """ pass - - def build_dataset(self, train_prompts): + def build_dataset(self, train_prompts, is_eval=False): """ Build prompt dataset. The implementation of build_dataset is exclusive to PolicyInference, whereas other models are not required to adopt it. Args: train_prompts: prompts provided by RLHFEngine.set_dataset(train_prompts) + is_eval: eval mode Returns: torch.utils.data.Dataset with user-defined collate_fn (see `Dataset`) """ @@ -112,10 +111,10 @@ class PolicyInference(RLHFMegatronModule): ``` ```python -from chatlearn import RLHFMegatronModule +from chatlearn import MegatronModule -class PolicyTrainer(RLHFMegatronModule): +class PolicyTrainer(MegatronModule): def setup(self): """ @@ -126,13 +125,12 @@ class PolicyTrainer(RLHFMegatronModule): """ pass - - def train_step(self, data, train_info): + def train_step(self, data, iteration): """ Perform train_step for one batch, including a list of micro-batches Args: data: one global batch for train_step, type is a list of dict, each dict is a micro-batch - train_info: includes training information, e.g., "iteration" + iteration: iteration id for current step """ pass ``` diff --git a/docs/en/programming/vllm.md b/docs/en/programming/vllm.md new file mode 100644 index 00000000..20aa941d --- /dev/null +++ b/docs/en/programming/vllm.md @@ -0,0 +1,189 @@ +# vLLM + +ChatLearn support vLLM generation backend for intra-machine or inter-machine distirbuted inference. We enables to sync parameters between training and inference automatically, also allows to colocate training and inference model together. + +For now, we enable vLLM to accelerate policy generation. + +## Model Definition + +Similar to inheriting `MegatronModule` for implementing [PolicyInference Model](../../../examples/megatron/models/old_policy_inference.py), the vLLM backend can be enabled by inheriting `VLLMModule` class and implementing the following key modules: +- model_provider: model definition function. +- setup: call model_provider to define model. Optionly, call `load_checkpoint` or others. +- build_dataset: Preprocess train/eval dataset with vLLM tokenizer. +- eval_forward: distributed inference tasks in eval mode. +- forward_step: distributed inference tasks in training mode. +- _add_request: prepare inputs for vLLM scheduler. +- decode_internal: decode generation outputs of vLLM as you need. + +Code structure shows as following: + +```python +from chatlearn import VLLMModule +from chatlearn.utils.vllm_utils import get_model, print_rank_0 + + +class VLLMPolicyInference(VLLMModule): + """Policy vLLM Inference""" + + def setup(self): + pass + + def build_dataset(self, train_prompts, is_eval=False): + pass + + def model_provider(self): + """Build the model.""" + pass + + def eval_forward(self, data, iteration=0): + pass + + def _add_request(self, data): + pass + + def forward_step(self, data, iteration=0): + pass + + def decode_internal(self, batched_outputs): + pass +``` + +You can refer to[vllm_policy_inference.py](../../../examples/megatron/models/vllm_policy_inference.py), in which build_dataset/_add_request/forward_step/decode_internal clarified as following: + +- build_dataset: Use `tokenizer`, you only need to return prompt_ids and prompt string. In `build_dataset`, [VLLMPromptPipeline](../../../examples/megatron/data/prompt_dataset.py#141) shows as following: +```python +class VLLMPromptPipeline(PromptPipeline): + def __init__(self, prompts: List[str], max_prompt_length: int, tokenizer=None): + + for p in prompts: + assert len(p) > 0, "Got empty prompt" + assert max_prompt_length > 0, \ + "Prompt length for RLHF/OnlineDPO/GRPO trainer must be an integer greater than 0" + + # tokenizer prompts of self-defined format + # only return prompt str and prompt ids + self.prompts = [(prompt, tokenizer.encode(prompt[:max_prompt_length])) for prompt in prompts] + self.prompts_ids = [] + for prompt, prompt_ids in self.prompts: + p = {"input_ids": prompt_ids, "prompt": prompt} + self.prompts_ids.extend([copy.deepcopy(p)]) + # set tokenizer + self.tokenizer = tokenizer + +class VLLMPolicyInference(VLLMModule): + def build_dataset(self, train_prompts, is_eval=False): + max_prompt_length = ( + self.model_args.get("seq_length") - self.model_args.get("max_new_tokens") + ) + # TODO: read from files + prompts_dataset = VLLMPromptPipeline( + train_prompts, max_prompt_length, self.tokenizer.tokenizer) + + return prompts_dataset +``` + +- _add_request: add preprocessed request pairs (input_ids, prompt) to vLLM scheduler +```python + def _add_request(self, data, is_eval=False): + return self._add_request_internal(data["prompt"], data["input_ids"], is_eval=is_eval) +``` + +- forward_step: take batch `data` scheduled by vLLM scheduler as input, and call `execute_step` for distributed inference. + +```python + def _forward_step(self, data, iteration, eval_mode): + assert iteration >= 0 + assert isinstance(eval_mode, bool) + seq_group_metadata_list = data["seq_group_metadata_list"] + blocks_to_swap_in = data["blocks_to_swap_in"] + blocks_to_swap_out = data["blocks_to_swap_out"] + blocks_to_copy = data["blocks_to_copy"] + + outputs = self.execute_step( + seq_group_metadata_list, blocks_to_swap_in, blocks_to_swap_out, blocks_to_copy) + + return outputs + + def forward_step(self, data, iteration=0): + return self._forward_step(data, iteration, eval_mode=False) +``` + +- decode_internal: Refer to [examples](../../../examples/megatron/models/vllm_policy_inference.py#L119) for more details. Format of param `batched_outputs` is List[RequestOutput], in which [RequestOutput](https://github.com/vllm-project/vllm/blob/v0.5.1/vllm/outputs.py#L67)includes the following key attributes: + +| Attibute |Type| Comment | +|:------:|:-----:|:-----:| +| request_id | int | prompt request number | +| prompt | string | prompt token string | +| prompt_token_ids| List(int) |prompt ids list | +| prompt_logprobs | List(Dict(float)) | each prompt token has one relative logprob value | +| outputs | List(CompletionOutput)| See the next table for details.| + +vLLM `CompletionOutput` includes: + +| Attibute |Type| Comment | +|:------:|:-----:|:-----:| +| index | int | response index, which helps to number different response for one prompt | +| text | string | response token string | +| token_ids | List(int) | list of generated response token ids | +| cumulative_logprob | float | logprob cumulative value of all generated tokens for current response | +| logprobs | List(Dict(float)) | logprob value of each generated token| + + + +## model configuration yaml + +You can modify `model_config_file` in `rlhf.yaml`, For example: + +```yaml +policy: + model_config_file: vllm_policy_inference.yaml + ... +``` + +Or you can refer to [llama2 model yaml](../../../examples/megatron/configs/llama2/vllm_rlhf.yaml). + +## hyperparameter configuration yaml + +Hyperparameter for vLLM can be divied into 5 parts: +- sampling params: sampling hyperparameter + +| Attibute |Type| Comment | +|:------:|:-----:|:-----:| +| n | int| Number of responses for each prompt. | +| ignore_eos | bool | Whether to stop generating tokens for prompt which has generated a eos token already | +| top_p | float | Float that controls the cumulative probability of the top tokens to consider | +| top_k | int |Integer that controls the number of top tokens to consider. Set to -1 to consider all tokens. | +| temperature | float | Float that controls the randomness of the sampling. Lower values make the model more deterministic, while higher values make the model more random. Zero means greedy sampling.| +| use_beam_search | bool | Whether to use beam search instead of sampling. | +| eval_temperature | float | Like `temperature`, but for evaluation scene. | +| eval_top_k | int | Like `eval_top_k`, but for evaluation scene.| +| eval_top_p | float | Like `eval_top_p`, but for evaluation scene.| +| stop_token_list | string| stop token string, seperated by semicolon.| +| new_token_limit | bool | Whether to limit the number of generated tokens.| +| prompt_logprobs | int | Whether to output logprobs for prompt token. Set `None` by default to save gpu memory. `1` to enable, `None` to disable.| + + +- scheduler config: hyperparameters for vLLM batch scheduling. + +| Attibute |Type| Comment | +|:------:|:-----:|:-----:| +| max_num_batched_tokens | int| Upper bound of token number in a batch. Please set `batch_size*(max_seq_len-max_prompt_length)`. | +| max_paddings | int| Upper bound of padding token number in a batch. | + + +- cache config: Hyperparameter to define vLLM cache blocks, relative to gpu/cpu memory usage. + + +| Attibute |Type| Comment | +|:------:|:-----:|:-----:| +| block_size | int | gpu blocks size. Set `16` MB by default, you can infer by the largest activation size. | +| gpu_memory_utilization | float | Upper bound of GPU memory ratio you can required for all processes when generating. Range as (0, 1.0] | +| swap_space | int | When GPU memory is limited, take CPU memory to swap in or swap out. Set `4` GB by default. | +| sliding_window | int | Set `None` by default. vLLM don't support other settings. | + +- tokenizer: Repo to load vLLM tokenizer, which shows as [LLama2-7B-hf](https://huggingface.co/meta-llama/Llama-2-7b) + +- Others: `includes` specifies model structure. + + +You can refer to [vLLM Hyperparameter Configuration](../../../examples/megatron/configs/llama2/vllm_policy_inference.yaml) for details. diff --git a/docs/en/tutorial/continue_train.md b/docs/en/tutorial/continue_train.md new file mode 100644 index 00000000..e4821a15 --- /dev/null +++ b/docs/en/tutorial/continue_train.md @@ -0,0 +1,42 @@ +# Resuming and Fault Tolerance + +The alignment task involves the computation and interaction of multiple models. As the model scale and computational resources increase, occasional exceptions may occur due to the dependent software stack and hardware environment, leading to task interruption. + +To ensure that interrupted tasks can automatically resume their state, ChatLearn provides the resuming function, which in combination with PAI-DLC's AIMaster, can automatically detect errors and resume functionality. + +## Configuring ChatLearn Resuming + +Resuming an alignment task requires consideration of the following points: + +1. Recording and restoring data progress: For recording data status, users need to configure `data_checkpoint_path` in the training configuration master file, such as `rlhf.yaml`. If `data_checkpoint_path` is not empty, ChatLearn will record the current data progress and store the data checkpoint during each `save_checkpoint`. + +2. Restoring training states such as episodes and iterations: When users configure `data_checkpoint_path` and the corresponding data checkpoint exists in the folder, ChatLearn will automatically restore the training state to the latest checkpoint status and set the `resume_training` variable to `True`. + +3. Loading checkpoints: When `resume_training==True`, the checkpoints for `reference` and `reward` in RLHF remain unchanged. However, `ppo_policy` and `ppo_value` need to load the checkpoints stored during training, rather than the original initialized checkpoints. Therefore, special processing needs to be done in the setup phase. + +```python +if self.resume_training: + self.args.load = get_args().save + self.args.load_iteration = -1 + self.args.no_load_optim = False + self.args.no_load_rng = False + self.args.no_load_args = False + self.args.no_load_scheduler = False + self.args.finetune = False +``` + +For more details, refer to `examples/megatron/scripts/train_rlhf_llama.sh`. + +If a user configures `data_checkpoint_path` in the program but does not want to enable the resuming function, they can also disable this functionality by configuring `enable_resume_training: False`. + +## Combining with DLC AIMaster to Achieve Fault Tolerance and Automatic Resuming + +DLC provides fault tolerance monitoring based on AIMaster. AIMaster is a task-level component that, when enabled for fault tolerance monitoring, launches an AIMaster instance to run with other task instances, serving the roles of task monitoring, fault judgment, and resource control. + +Users can combine AIMaster's fault tolerance functionality with ChatLearn's resuming functionality to achieve automatic resuming of training tasks. + +The following is an example of fault tolerance monitoring configuration, which includes enabling hang detection and error detection. When the hang exceeds 1 hour or AIMaster detects an error, the task will be automatically restarted, with a maximum number of restarts being 3 times. + +![aimaster](../../images/fault.png) + +For more fault tolerance configuration, please refer to the DLC [Fault Tolerance Documentation](https://help.aliyun.com/zh/pai/user-guide/fault-tolerance-monitoring-based-on-aimaster?spm=a2c4g.11186623.0.0.12011976WAncyo). diff --git a/docs/en/tutorial/custom_model_flow.md b/docs/en/tutorial/custom_model_flow.md new file mode 100644 index 00000000..c973b19d --- /dev/null +++ b/docs/en/tutorial/custom_model_flow.md @@ -0,0 +1,93 @@ +# Custom Flow +If the user wants to customize the custom inference and training model flow, they can achieve customization by using the `Engine` class. +Users can initialize the Engine by passing in environment (Environment), trainer (Trainer), and evaluator (Evaluator) components. (These components can be None) +1. Environment: N inference models calculate data for sample_per_episode and generate StreamDataset. +2. Trainer: Receives StreamDataset and trains M models. +3. Evaluator: Performs evaluation calculations on K models and an Eval dataset. +![engine_class](../../images/engine_class.png) +## How to customize the model flow +Pass the custom flow function into the constructor of Environment, Trainer, and Evaluator to customize the model computation flow. The following example defines the environment flow in RLHF. +The number of inputs for the model is one or more, and the number of outputs is zero or one. Any calls unrelated to the model will be ignored. +```python +policy = PolicyInference("policy") +value = ValueInference("value") +reference = PolicyReference("reference") +reward = RewardInference("reward") +def env_flow(batch): + policy_out = policy.forward_step(batch) + ref_out = reference.forward_step(policy_out) + value_out = value.forward_step(policy_out) + reward_out = reward.forward_step(policy_out, ref_out, value_out) + return reward_out +``` +## Example +### Inherit Engine to implement a custom training Engine +```python +from chatlearn import Engine, Environment, Trainer +from chatlearn import BaseModule +class CustomEngine(Engine): + def __init__(self, + reference: BaseModule, + policy_trainer: BaseModule): + def env_flow(batch): + ref_out = reference.forward_step(batch) + return ref_out + def trainer_flow(batch): + policy_trainer.train_step(batch) + env = Environment(env_flow) + trainer = Trainer(trainer_flow) + super().__init__(env, trainer) +``` +In this example, we define a CustomEngine with 2 models, where the environment has only one reference model and the trainer has only one policy_trainer model. +The following code is a simple user calling process. +```python +reference = PolicyReference("reference") +ppo_policy = PolicyTrainer("policy_trainer") +engine = CustomEngine(reference, ppo_policy) \ + .set_dataset(train_prompts) +engine.learn() +``` +If you need to add an evaluation module during the training process, you can also set the evaluator. +```python +reference = PolicyReference("reference") +ppo_policy = PolicyTrainer("policy_trainer") +def eval_flow(batch): + r0 = reference.eval_step(batch) + return r0 +evaluator = Evaluator(eval_flow).set_dataset(val_prompts) +engine = CustomEngine(reference, ppo_policy) \ + .set_evaluator(evaluator) \ + .set_dataset(train_prompts) +engine.learn() +``` +### Use Engine directly to construct a custom process +In the following example, we directly construct a training process for RLHF using the Engine class. +```python +policy = PolicyInference("policy") +value = ValueInference("value") +reference = PolicyReference("reference") +reward = RewardInference("reward") +ppo_policy = PolicyTrainer("ppo_policy") +ppo_value = ValueTrainer("ppo_value") +def env_flow(batch): + policy_out = policy.forward_step(batch) + ref_out = reference.forward_step(policy_out) + value_out = value.forward_step(policy_out) + reward_out = reward.forward_step(policy_out, ref_out, value_out) + return reward_out +def trainer_flow(batch): + ppo_policy.train_step(batch) + ppo_value.train_step(batch) +def eval_flow(batch): + r0 = policy.eval_step(batch) + r1 = reward.eval_step(r0) + return r1 +env = Environment(env_flow) +trainer = Trainer(trainer_flow) +evaluator = Evaluator(eval_flow).set_dataset(val_prompts) +engine = Engine(env, trainer, evaluator) \ + .set_parameter_sync(ppo_policy, policy) \ + .set_parameter_sync(ppo_value, value) \ + .set_dataset(train_prompts) +engine.learn() +``` \ No newline at end of file diff --git a/docs/en/tutorial/data.md b/docs/en/tutorial/data.md index 046a0838..ddb588a5 100644 --- a/docs/en/tutorial/data.md +++ b/docs/en/tutorial/data.md @@ -1,6 +1,6 @@ # Data -This document describes the data preparation process for three stages: SFT, Reward, and RLHF. +This document describes the data preparation process for different stages: SFT, Reward, RLHF, DPO, OnlineDPO and GRPO. **The following is a collection of general environment variables used in this tutorial script:** @@ -8,31 +8,31 @@ This document describes the data preparation process for three stages: SFT, Rewa | ENV | Explanation | | --- | --- | | `CHATLEARN` | The location where the ChatLearn code is cloned [https://github.com/alibaba/ChatLearn.git](https://github.com/alibaba/ChatLearn.git) | -| `DATASET_ROOT` | The root directory for storing the SFT/Reward/RLHF training dataset collection. | +| `DATASET_ROOT` | The root directory for storing the SFT/Reward/RLHF/DPO/OnlineDPO/GRPO training dataset collection. | -## 1.1 Prepare SFT Training Data +## 1 Prepare SFT Training Data Organize the question-response pairs of SFT data into a jsonl file, where each line of the jsonl file represents a SFT data sample in the following Python dictionary format: -```json +``` {'query': question, 'response': reply} ``` Taking the example of Anthropic's helpful&harmless data, use the following code to store it in `$DATASET_ROOT/sft/train.jsonl`. ```bash -cd ${CHATLEARN}/examples/megatron/step1_sft/ +cd ${CHATLEARN}/examples/megatron/ DATASET_ROOT=$path_to_dataset_root -python prepare_data.py $DATASET_ROOT +python data/prepare_data_sft.py $DATASET_ROOT ``` -## 2.1 Prepare Reward Training Data +## 2 Prepare Reward Training Data 1. First, prepare question-different response pairs and organize them into a jsonl file. Each line in the jsonl file represents a Reward model training data sample in the following Python dictionary format: -```json +``` {'query': question, 'response': [reply 1, reply 2, ...], 'score': [score1, score2, ...]} ``` @@ -41,23 +41,40 @@ The score value indicates the quality of the corresponding response, with higher 2. Taking the example of Anthropic's helpful&harmless data, use the following code to store it in `$DATASET_ROOT/rm/train.jsonl` and `$DATASET_ROOT/rm/dev.jsonl`. ```bash -cd ${CHATLEARN}/examples/megatron/step2_reward/ +cd ${CHATLEARN}/examples/megatron/ DATASET_ROOT=path-to-dataset-root -python prepare_data.py $DATASET_ROOT +python data/prepare_data_reward.py $DATASET_ROOT ``` -## 3.1 Prepare RLHF Training Data +## 3 Prepare Alignment Training Data + +ChatLearn supports multiple alignments: RLHF, DPO, OnlineDPO, GRPO 1. Firstly, prepare a dataset of instructions to be explored and organize it into a JSON file. Each line in the JSON file should represent a prompt in the following format: -```json +``` {"prompt": prompt} ``` -2. Taking Anthropic's helpful & harmless data as an example, use the following code to store the dataset in `$DATASET_ROOT/rlhf/train.jsonl` and `$DATASET_ROOT/rlhf/dev.jsonl`: +2. Taking Anthropic's helpful & harmless data as an example, use the following code to store the dataset in `$DATASET_ROOT/alignment/train.jsonl` and `$DATASET_ROOT/alignment/dev.jsonl`: + +```bash +cd ${CHATLEARN}/examples/megatron/ +DATASET_ROOT=path-to-dataset-root +python data/prepare_data_alignment.py $DATASET_ROOT +``` +## 4 Prepare Math Training Data + +1. Firstly, prepare a dataset of math data to be explored and organize it into a JSON file. Each line in the JSON file should represent a prompt in the following format: + +``` +{"eval_func": "math_rule", "prompt": prompt, 'answer': answer} +``` + +2. Taking openai/gsm8k data as an example, use the following code to store the dataset in `$DATASET_ROOT/math/train.jsonl`: ```bash -cd ${CHATLEARN}/examples/megatron/step3_rlhf/ +cd ${CHATLEARN}/examples/megatron/ DATASET_ROOT=path-to-dataset-root -python prepare_data.py $DATASET_ROOT +python data/prepare_data_math.py $DATASET_ROOT ``` \ No newline at end of file diff --git a/docs/en/tutorial/ems.md b/docs/en/tutorial/ems.md new file mode 100644 index 00000000..7deeff21 --- /dev/null +++ b/docs/en/tutorial/ems.md @@ -0,0 +1,29 @@ +# Efficient Memory Sharing (EMS) + +ChatLearn provides EMS feature to significantly reduce the GPU memory usage during the alignment training. +It maximizes the use of limited resources to train models with larger-scale or to improve overall training efficiency by improving the model's parallel strategy and increasing the batch size after GPU memory saved. + +When multiple models in ChatLearn share the same resources for training or inference, enabling the EMS feature allows these models to sequentially share GPU memory: +- After each model is initialized, tensors/buffers that constantly reside in GPU memory (such as weights, gradient buffers, and optimization states) are unloaded to the RAM or freed to release their occupied GPU memory. +- Before training or inference for a specific model, the tensors/buffers are loaded from the RAM or reconstructed, and then training or inference takes place. +- Once the training or inference is complete, the tensors/buffers are again unloaded to the RAM or freed to release their occupied GPU memory. + +By repeating the above process, multiple models sequentially share GPU memory, maximizing the efficiency of GPU memory usage. + +## Usage +Users can specify whether to enable the EMS feature by configuring the `free_memory` (bool type, default is False) parameter for each model. This can be directly modified in the `rlhf.yaml` for each model. For example, to enable the EMS feature for the policy model: +```yaml +policy: + model_config_file: old_policy_inference.yaml + ... + free_memory: ${free_memory_policy:True} +``` +Alternatively, it can also be configured in the training script using environment variables: +- Policy model: `export free_memory_policy=True` +- Reference model: `export free_memory_reference=True` +- Reward model: `export free_memory_reward=True` +- Value model: `export free_memory_value=True` +- PPO policy model: `export free_memory_ppo_policy=True` +- PPO value model: `export free_memory_ppo_value=True` + +A complete example can be found in the [llama2 configuration](../../../examples/megatron/configs/llama2/rlhf.yaml). \ No newline at end of file diff --git a/docs/en/tutorial/evaluator.md b/docs/en/tutorial/evaluator.md new file mode 100644 index 00000000..53f70c9e --- /dev/null +++ b/docs/en/tutorial/evaluator.md @@ -0,0 +1,21 @@ +# Evaluator + +This document will introduce how to perform model evaluation. Users can use `EvalEngine` to evaluate models independently or configure the evaluator within the training engine to perform evaluations during training. + +```python +def eval_flow(batch): + p = policy.forward_step(batch) + r = reward.eval_step(p) + r1 = reward2.eval_step(p) + return r, r1 + +evaluator = Evaluator(eval_flow) +evaluator.set_dataset(prompts) +results = evaluator.eval() +``` + +In the above example, we constructed an evaluation flow for three models. Users can customize the evaluation execution flow through the `eval_flow`. + +The result returned by `evaluator.eval` is of type `dict`, where the key is `model_name` and the value is a `list` containing the results of the computations for each batch. + +In the above example, the result returned by `eval` will be `{"reward": [batch0, batch1, batch2], "reward2": [batch0, batch1, batch2]}`. diff --git a/docs/en/tutorial/profile.md b/docs/en/tutorial/profile.md new file mode 100644 index 00000000..6bc1c9ee --- /dev/null +++ b/docs/en/tutorial/profile.md @@ -0,0 +1,23 @@ +# Profile +ChatLearn provides two ways to profile performance: +1. Torch profiler +2. nsys +Note: For large models, the profile result can be very large. It is recommended to reduce the model size when profiling. + +## Torch Profiler +Users can enable the Torch profiler by configuring the rlhf setting `profiler_dir: path_to_profile_dir` in the main configuration file of the system. +```yaml +profiler_dir: path_to_profile_dir +``` + +## nsys +Users can enable the nsys profiler by configuring the rlhf setting `nsys: True` in the main configuration file of the system. +```yaml +runtime: + nsys: True +``` +When launching the program, nsys startup parameters need to be added before the execution command, as shown in the following example: +```bash +nsys profile -w true -t cuda,nvtx,osrt,cudnn,cublas -s none --capture-range=cudaProfilerApi --capture-range-end=stop-shutdown --cudabacktrace=true -x true --force-overwrite true -o my_profile \ +python train_rlhf.py XXX +``` \ No newline at end of file diff --git a/docs/en/tutorial/run.md b/docs/en/tutorial/run.md index 4d849e6e..c6f33bc2 100644 --- a/docs/en/tutorial/run.md +++ b/docs/en/tutorial/run.md @@ -15,7 +15,7 @@ Select the job type as `PyTorch` and paste the command into the `Execution Comma -For RLHF Training task, you need set the advanced setting as `customPortList=30000-30050,createSvcForAllWorkers=true`. +For RLHF, DPO, OnlineDPO, GRPO training task, you need set the advanced setting as `customPortList=30000-30050,createSvcForAllWorkers=true`. ## Non-PAI-DLC environment @@ -31,6 +31,6 @@ export GPUS_PER_NODE=8 export RANK=xx ``` -# reference +## Reference -1. Aliyun Machine Learning PAI-DLC:[https://www.aliyun.com/activity/bigdata/pai-dlc](https://www.aliyun.com/activity/bigdata/pai-dlc) +1. Aliyun Machine Learning PAI-DLC: [https://www.aliyun.com/activity/bigdata/pai-dlc](https://www.aliyun.com/activity/bigdata/pai-dlc) diff --git a/docs/en/tutorial/tutorial_bloom.md b/docs/en/tutorial/tutorial_bloom.md deleted file mode 100644 index 66cdee7e..00000000 --- a/docs/en/tutorial/tutorial_bloom.md +++ /dev/null @@ -1,165 +0,0 @@ -# End-to-end Training Tutorial with Bloom Model - -This document provides instructions for end-to-end training using the ChatLearn, Megatron-LM framework, and the Bloom model. It includes three stages of training: SFT, Reward, and RLHF training. - - -**The following is a collection of general environment variables used in this tutorial script:** - -| ENV | Explanation | -| --- | --- | -| `CHATLEARN` | The location where the ChatLearn code is cloned [https://github.com/alibaba/ChatLearn.git](https://github.com/alibaba/ChatLearn.git) | -| `MEGATRON` | The location where the Megatron-LM-ChatLearn code is cloned [https://github.com/alibaba/Megatron-LM-ChatLearn.git](https://github.com/alibaba/Megatron-LM-ChatLearn.git) | -| `DATASET_ROOT` | The root directory for storing the SFT/Reward/RLHF training dataset collection. | -| `TOKENIZER_PATH` | The folder where the vocab_file used by the Tokenizer is located. | - - -## Setup: Image / Code and Data Preparation - -### Docker Image -It is recommended to refer to `https://github.com/alibaba/ChatLearn/tree/master/docker/ngc/Dockerfile.ngc22.10` for preparing the docker image. -If you're training on the PAI DLC platform, we suggest using the pre-built image provided below: - -```bash -registry.cn-wulanchabu.aliyuncs.com/pai-dlc/pytorch-training:1.13.0-gpu-py3.8-cu11.8-ngc22.10-ubuntu20.04 -``` - -### Code - -In this example, we need to download the following related code. - -```bash -# Download the modified Megatron-LM-ChatLearn to support ChatLearn training -git clone -b v0.1.0 https://github.com/alibaba/Megatron-LM-ChatLearn.git -# Download the ChatLearn code -git clone -b v0.1.0 https://github.com/alibaba/ChatLearn.git -``` - -### Data - -Please refer to [3-stage data](data.md) to prepare your training data. - -## Step1: SFT - -SFT refers to the process of fine-tuning a pre-trained language model using annotated dialogue data. In this example, we need to download the pre-trained model, and then start a simple SFT training demonstration. - - -### Download and Convert Pretrained Models - -If using models from HuggingFace transformers, first download the pretraining checkpoint, such as the Bloom model from the HuggingFace Hub: `bigscience/bloom-7b1`, or pre-saved SFT models locally. -Then, use the following code to convert the HuggingFace transformers model into the Megatron-LM model format. In this example, we will convert the model to `TP (tensor_model_parallel_size)=8,PP (pipeline_model_parallel_size)=1` checkpoint, and the model will be stored in `MEGATRON_BLOOM_CKPT_PATH`. - - -```bash -MEGATRON=path-to-megatron -cd $MEGATRON - -bash examples/pai/tools/convert_transformers_megatron_bloom.sh \ -$MEGATRON \ -path-to-transformer-model \ -path-to-megatron-model \ -8 \ -1 \ -false -``` - -### Start SFT Training - -The script below is an example of SFT training. The `DATASET_PATH` is the path to the SFT training set, such as `$DATASET_ROOT/sft/train.jsonl`. In this example, we assume that the tokenizer's path is the same as the model checkpoint's path. - -```bash -export CHATLEARN=path-to-chatlearn -export MEGATRON=path-to-megatron-lm-chatlearn -cd ${CHATLEARN}/examples/megatron/step1_sft/ - -LOAD_PATH=$MEGATRON_BLOOM_CKPT_PATH \ -TOKENIZER_PATH=$MEGATRON_BLOOM_CKPT_PATH \ -DATASET_PATH=$DATASET_ROOT/sft/ \ -bash bloom_sft.sh -``` - -The training logs and the completed models will be stored in `${CHATLEARN}/output/step1_sft` by default. -For specific definitions, please refer to the script `${CHATLEARN}/examples/megatron/step1_sft/bloom_sft.sh`. - -For the environment variables and configurations required for distributed execution, please refer to [Distributed Execution](run.md). - -## Step2: Reward Model Training - -The Reward model refers to the model that serves as a proxy for human evaluation in RLHF. It provides real-time evaluation and scoring of the model's generated question responses. Given a question and model response, the Reward model produces a scalar representing the quality of the model's response. - - -### Start Reward Model Training - -Based on InstructGPT[1], the Reward model training is initialized with the SFT model checkpoint. The training code is as follows: - -```bash -export CHATLEARN=path-to-chatlearn -export MEGATRON=path-to-megatron-lm-chatlearn -cd ${CHATLEARN}/examples/megatron/step2_reward/ - -LOAD_PATH=path-to-sft-ckpt \ -TOKENIZER_PATH=$MEGATRON_BLOOM_CKPT_PATH \ -DATASET_PATH=$DATASET_ROOT/rm/ \ -bash bloom_reward.sh -``` - -The training logs and trained models will be saved by default in `${CHATLEARN}/output/step2_reward`. The specific definitions can be found in the script `${CHATLEARN}/examples/megatron/step2_reward/bloom_reward.sh`. - -For the environment variables and configurations required for distributed execution, please refer to [Distributed Execution](run.md). - -## Step 3: RLHF Training - -RLHF refers to the process of trying different responses on a dataset consisting only of instructions and learning from the reward signals provided by a reward model for each response. - -### Start RLHF Training - -[Aliyun PAI DLC](https://www.aliyun.com/activity/bigdata/pai-dlc)[2] provides convenient and efficient support for RLHF training tasks. Below is a training script for a Bloom-7B Policy and a 7B Reward model. In this example, the user needs to set `POLICY_LOAD` to the checkpoint path produced by SFT (Supervised Fine-Tuning). The Policy model and Reference model will be initialized with the SFT checkpoint. `REWARD_LOAD` should be set to the checkpoint path produced by Reward training, and the user can specify the iteration number associated with the loaded checkpoint. The Reward model and Value model will be initialized using the Reward model weights. `VOCAB_FILE` should point to the folder containing the files required by `BloomTokenizer`. - -```bash -export CHATLEARN=path-to-chatlearn -export MEGATRON=path-to-megatron-lm-chatlearn -export DATASET_PATH=$DATASET_ROOT/rlhf/train.jsonl - -cd ${CHATLEARN}/examples/megatron/step3_rlhf - -export exp_name=any_experiment_name_you_like - -POLICY_LOAD=path-to-sft-ckpt \ -REWARD_LOAD=path-to-trained-rm-checkpoint \ -REWARD_LOAD_ITERATION=1000 \ -VOCAB_FILE=path-to-vocab-file \ -bash run_scripts/bloom/run_7b1_7b1.sh -``` - -7B Policy + 7B Reward RLHF training requires resources equivalent to 8 A100-80GB/A800-80GB/H800-80GB GPU cards. - -For the environment variables and configurations required for distributed execution, please refer to [Distributed Execution](run.md). - - -### Evaluation - -Firstly, we can use ChatLearn's model conversion tool to convert the Megatron-LM formatted model to HuggingFace's transformers model format. - -```bash -MEGATRON=path-to-megatron-lm-chatlearn -cd $MEGATRON - -bash examples/pai/tools/convert_transformers_megatron_bloom.sh \ -$MEGATRON \ -ckpt-to-rlhf-policy-ckpt \ -path-to-transformers-ckpt-path \ -1 \ -1 \ -true -``` - -We evaluated the performance of Bloom on the HH dataset, both after SFT and RLHF, using the GPT-4 API provided by MT-Bench. The results show that RLHF improves the average performance of the model compared to SFT. There is a significant improvement in the domains of Extraction, Math, Reasoning, STEM, and Writing. The performance gains observed here are due to the use of a Reward model trained on the open-source HH dataset. Customizing the Reward model contributes to achieving better results. - -| Model | Coding | Extraction | Humanities | Math | Reasoning | Roleplay | STEM | Writing | Avg | -| --- | --- | --- | --- | --- | --- | --- | --- | --- | --- | -| bloom_sft | 1.45 | 1.1 | 3.35 | 1.45 | 2.6 | 3.1 | 2.65 | 1.4 | 2.27 | -| bloom_rlhf | 1.4 | **1.4** | 3.05 | **1.5** | **2.65** | 3.05 | **3.05** | **1.6** | **2.35** | - -## Reference - -1. Training language models to follow instructions with human feedback,[https://arxiv.org/abs/2203.02155](https://arxiv.org/abs/2203.02155) - diff --git a/docs/en/tutorial/tutorial_llama2.md b/docs/en/tutorial/tutorial_llama2.md index 40d46943..8c5cf157 100644 --- a/docs/en/tutorial/tutorial_llama2.md +++ b/docs/en/tutorial/tutorial_llama2.md @@ -1,8 +1,9 @@ -# End-to-end Training Tutorial with LLaMA Model - -This document provides instructions for end-to-end training using the ChatLearn, Megatron-LM framework, and the LLaMA/LLaMA2 model. -It includes three stages of training: SFT, Reward, and RLHF training. +# End-to-end Training Tutorial with Llama Model +This document provides instructions for end-to-end training using the ChatLearn, Megatron-LM framework, and the Llama/Llama2 model. ChatLearn supports three training policies as follows: +1. RLHF(Reinforcement Learning from Human Feedback): which includes three stages of training: SFT, Reward, and RLHF training. +2. Direct Preference Optimization(DPO): which includes two stages of training: SFT and DPO training. +3. OnlineDPO/GRPO: which fall in between RLHF and DPO, includes three stages of training: SFT, Reward, and DPO training. **The following is a collection of general environment variables used in this tutorial script:** @@ -10,38 +11,21 @@ It includes three stages of training: SFT, Reward, and RLHF training. | --- |---------------------------------------------------------------------------------------------------------------------------------------------------------------| | `CHATLEARN` | The location where the ChatLearn code is cloned [https://github.com/alibaba/ChatLearn.git](https://github.com/alibaba/ChatLearn.git) | | `MEGATRON` | The location where the Megatron-LM code is cloned [https://github.com/NVIDIA/Megatron-LM.git](https://github.com/NVIDIA/Megatron-LM.git) | -| `DATASET_ROOT` | The root directory for storing the SFT/Reward/RLHF training dataset collection. | +| `DATASET_ROOT` | The root directory for storing the SFT/Reward/RLHF/DPO/OnlineDPO/GRPO training dataset collection. | | `TOKENIZER_MODEL` | The path of `tokenizer.model` used by the Tokenizer. | ## Setup: Image / Code and Data Preparation -### Docker Image - -It is recommended to refer to `https://github.com/alibaba/ChatLearn/tree/master/docker/ngc/Dockerfile.ngc23.09` for preparing the docker image. -If you’re training on the PAI DLC platform, we suggest using the pre-built image provided below: - -```bash -registry.cn-wulanchabu.aliyuncs.com/pai-dlc/pytorch-training:2.1.0-gpu-py3.10-cu12.2-ngc23.09-ubuntu22.04 -``` +### Image / Code -### Code - -In this example, we need to download the following related code. - -```bash -# Download the Megatron-LM code -git clone https://github.com/NVIDIA/Megatron-LM.git -git checkout 954a65b04 -# Download the ChatLearn code -git clone https://github.com/alibaba/ChatLearn.git -``` +Please refer to [Environment and Code Setup](../installation.md). ### Data Please refer to [3-stage data](data.md) to prepare your training data. -## Step1: SFT +## Step: SFT SFT refers to the process of fine-tuning a pre-trained language model using annotated dialogue data. In this example, we need to download the pre-trained model, and then start a simple SFT training demonstration. @@ -50,66 +34,62 @@ In this example, we need to download the pre-trained model, and then start a sim ### Download and Convert Pretrained Models If you are using a model from HuggingFace transformers, you will first need to download the pre-trained checkpoint, -such as the LLaMA2 model available on HuggingFace Hub (`meta-llama/Llama-2-7b-hf`), or a locally saved SFT model. +such as the Llama2 model available on HuggingFace Hub (`meta-llama/Llama-2-7b-hf`), or a locally saved SFT model. Then, you can use the following code to convert the HuggingFace transformers model into the Megatron-LM model format: -1. For the 7B model, we will convert the model into a checkpoint with `TP (tensor_model_parallel_size)=4` and `PP (pipeline_model_parallel_size)=1`, +1. For the llama2-7B model, we will convert the model into a checkpoint with `TP (tensor_model_parallel_size)=4` and `PP (pipeline_model_parallel_size)=1`, and the model will be saved in `MEGATRON_LLAMA_CKPT_PATH`. -2. For the 13B model, we will convert the model into a checkpoint with `TP=8` and `PP=1`. -3. For the 70B model, we will convert the model into a checkpoint with `TP=8` and `PP=4`. +2. For the llama2-13B model, we will convert the model into a checkpoint with `TP=8` and `PP=1`. +3. For the llama2-70B model, we will convert the model into a checkpoint with `TP=8` and `PP=4`. ```bash -MEGATRON=path-to-megatron -cd $MEGATRON - -HF_FORMAT_DIR=path-to-hf-model -TOKENIZER_MODEL=$HF_FORMAT_DIR/tokenizer.model -MEGATRON_FORMAT_DIR=path-to-meg-model - -python tools/checkpoint/util.py \ - --model-type GPT \ - --loader llama2_hf \ - --saver megatron \ - --target-tensor-parallel-size $TP \ - --target-pipeline-parallel-size $PP \ - --load-dir ${HF_FORMAT_DIR} \ - --save-dir ${MEGATRON_FORMAT_DIR} \ - --tokenizer-model ${TOKENIZER_MODEL} +export MEGATRON=path-to-megatron-lm +export CHATLEARN=path-to-chatlearn + +cd ${CHATLEARN}/examples/megatron/ + +TP=num_of_tp \ +PP=num_of_pp \ +LOAD_PATH=path-to-hf-model \ +TOKENIZER_MODEL=$LOAD_PATH/tokenizer.model \ +SAVE_PATH=path-to-megatron-model \ +bash scripts/convert_hf_to_megatron.sh ``` ### Start SFT Training The script below is an example of SFT training. The `DATASET_PATH` is the path to the SFT training set, such as `$DATASET_ROOT/sft/train.jsonl`. -The `MODEL_SIZE` is an environment variable specified in the script to indicate the size of the model, which can be `7B`, `13B`, or `70B`. +The `MODEL_SIZE` is an environment variable specified in the script to indicate the size of the model, which can be `llama2-7B`, `llama2-13B`, or `llama2-70B`. ```bash export CHATLEARN=path-to-chatlearn -export MEGATRON=path-to-megatron-lm-extension -cd ${CHATLEARN}/examples/megatron/step1_sft/ +export MEGATRON=path-to-megatron-lm +cd ${CHATLEARN}/examples/megatron/ MODEL_SIZE=$MODEL_SIZE \ LOAD_PATH=$MEGATRON_LLAMA2_CKPT_PATH \ TOKENIZER_MODEL=$LLAMA2_TOKENIZER_MODEL \ DATASET_PATH=$DATASET_ROOT/sft/ \ -bash llama2_sft.sh +bash scripts/train_sft_llama.sh ``` -The training logs and the completed models will be stored in `${CHATLEARN}/output/step1_sft` by default. -For specific definitions, please refer to the script `${CHATLEARN}/examples/megatron/step1_sft/llama_sft2.sh`. +The training logs and the completed models will be stored in `${CHATLEARN}/output/sft` by default. +For specific definitions, please refer to the script `${CHATLEARN}/2024-08-21/rlhf/examples/megatron/scripts/train_sft_llama.sh`. In our training script, the resource requirements (assuming the resources are A100-80GB/A800-80GB/H800-80GB GPUs) are as follows: -1. 7B SFT: 8 GPUs -2. 13B SFT: 8 GPUs -3. 70B SFT: 4*8 GPUs +1. llama2-7B SFT: 8 GPUs +2. llama2-13B SFT: 8 GPUs +3. llama2-70B SFT: 4*8 GPUs For the environment variables and configurations required for distributed execution, please refer to [Distributed Execution](run.md). -## Step2: Reward Model Training +## Step: Reward Model Training The Reward model refers to the model that serves as a proxy for human evaluation in RLHF. It provides real-time evaluation and scoring of the model's generated question responses. Given a question and model response, the Reward model produces a scalar representing the quality of the model's response. +**Hint**: No need of reward model training for DPO mode. ### Start Reward Model Training @@ -118,29 +98,33 @@ Based on InstructGPT[1], the Reward model training is initialized with the SFT m ```bash export CHATLEARN=path-to-chatlearn export MEGATRON=path-to-megatron-lm-extension -cd ${CHATLEARN}/examples/megatron/step2_reward/ +cd ${CHATLEARN}/examples/megatron/ LOAD_PATH=path-to-sft-ckpt \ TOKENIZER_MODEL=$LLAMA2_TOKENIZER_MODEL \ DATASET_PATH=$DATASET_ROOT/rm/ \ -bash llama2_reward.sh +bash scripts/train_reward_llama.sh ``` -The training logs and trained models will be saved by default in `${CHATLEARN}/output/step2_reward`. -The specific definitions can be found in the script `${CHATLEARN}/examples/megatron/step2_reward/llama_reward2.sh`. +The training logs and trained models will be saved by default in `${CHATLEARN}/output/reward`. +The specific definitions can be found in the script `${CHATLEARN}/examples/megatron/scripts/train_reward_llama.sh`. The resource requirements for training a reward model of the same scale are the same as those for SFT models. For the environment variables and configurations required for distributed execution, please refer to [Distributed Execution](run.md). -## Step 3: RLHF Training +## Step: Alignment Training -RLHF refers to the process of trying different responses on a dataset consisting only of instructions and learning from the reward signals provided by a reward model for each response. +ChatLearn supports multiple alignments: RLHF, DPO, OnlineDPO, GRPO + +### Start Alignment Training -### Start RLHF Training +Take Llama2-7B for example as follows. -Here is a training script for LLaMA2-7B Policy and 7B Reward models. -In this example, the user needs to set `POLICY_LOAD` to the checkpoint path generated by SFT. +#### RLHF + +Here is a training script for Llama2-7B Policy and 7B Reward models. +In this example, the user needs to set `POLICY_LOAD` to the checkpoint path generated by SFT, which used for Policy and Value model. The Policy and Reference models will be initialized with the SFT checkpoint. `REWARD_LOAD` should be set to the checkpoint path generated by the Reward training, and the user can specify the iteration number for the loaded checkpoint. The Reward and Value models will be initialized with the weights of the Reward model. @@ -149,27 +133,90 @@ The Reward and Value models will be initialized with the weights of the Reward m ```bash export CHATLEARN=path-to-chatlearn export MEGATRON=path-to-megatron-lm -export DATASET_PATH=$DATASET_ROOT/rlhf/train.jsonl +export DATASET_PATH=$DATASET_ROOT/alignment/train.jsonl -cd ${CHATLEARN}/examples/megatron/step3_rlhf +cd ${CHATLEARN}/examples/megatron/ -export exp_name=any_experiment_name_you_like +export model_size=llama2-7B POLICY_LOAD=path-to-sft-ckpt \ REWARD_LOAD=path-to-trained-rm-checkpoint \ REWARD_LOAD_ITERATION=1000 \ TOKENIZER_MODEL=$LLAMA2_TOKENIZER_MODEL \ -bash run_scripts/llama2/run_7b_7b.sh +bash run_scripts/train_rlhf_llama.sh ``` -If you need to train a 13B / 70B model, simply replace `run_7b_7b.sh` in the above training script with `run_13b_13b.sh` / `run_70b_70b.sh`. +#### OnlineDPO/GRPO + +OnlineDPO/GRPO training process is similar to RLHF. + +```bash +export CHATLEARN=path-to-chatlearn +export MEGATRON=path-to-megatron-lm +export DATASET_PATH=$DATASET_ROOT/alignment/train.jsonl + +cd ${CHATLEARN}/examples/megatron/ + +export model_size=llama2-7B + +POLICY_LOAD=path-to-sft-ckpt \ +REWARD_LOAD=path-to-trained-rm-checkpoint \ +REWARD_LOAD_ITERATION=1000 \ +TOKENIZER_MODEL=$LLAMA2_TOKENIZER_MODEL \ +bash scripts/train_online_dpo_llama.sh +``` + + +#### DPO + +Here is a training script for Llama2-7B Policy and 7B Reward models. +In this example, the user needs to set `POLICY_LOAD` to the checkpoint path generated by SFT. +The Policy and Reference models will be initialized with the SFT checkpoint. +`TOKENIZER_MODEL` should be set to the folder path where the `tokenizer.model` for LlamaTokenizer is located. + +```bash +export CHATLEARN=path-to-chatlearn +export MEGATRON=path-to-megatron-lm +export DATASET_PATH=$DATASET_ROOT/alignment/train.jsonl + +cd ${CHATLEARN}/examples/megatron/ + +export model_size=llama2-7B + +POLICY_LOAD=path-to-sft-ckpt \ +TOKENIZER_MODEL=$LLAMA2_TOKENIZER_MODEL \ +bash scripts/train_dpo_llama.sh +``` +#### GRPO Math + +To train a GRPO Math model, first refer to [Math data](data.md#Math) to prepare the mathematics dataset. Below is an example of training a Llama2-7B model. + +``` +export CHATLEARN=path-to-chatlearn +export MEGATRON=path-to-megatron-lm +export DATASET_PATH=$DATASET_ROOT/math/train.jsonl + +cd ${CHATLEARN}/examples/megatron/ + +export model_size=llama2-7B + +POLICY_LOAD=path-to-sft-ckpt \ +REWARD_LOAD=path-to-trained-rm-checkpoint \ +REWARD_LOAD_ITERATION=1000 \ +TOKENIZER_MODEL=$LLAMA2_TOKENIZER_MODEL \ +bash scripts/train_grpo_math_llama.sh +``` + +#### Models of Other Sizes + +If you need to train a llama2-13B / llama2-70B model, simply change `export model_size=llama2-7B` with `export model_size=llama2-13B` / `export model_size=llama2-70B`. You can also modify the model configuration and other parameters according to your needs. In our training script, the resource requirements (assuming the resources are A100-80GB / A800-80GB / H800-80GB GPUs) are as follows: -1. 7B RLHF: 8 GPUs -2. 13B RLHF: 2*8 GPUs -3. 70B RLHF: 4*8 GPUs +1. llama2-7B RLHF: 8 GPUs +2. llama2-13B RLHF: 2*8 GPUs +3. llama2-70B RLHF: 4*8 GPUs For the environment variables and configurations required for distributed execution, please refer to [Distributed Execution](run.md). @@ -180,23 +227,33 @@ Note that for RLHF tasks, if you are running on PAI DLC, you need to fill in the Firstly, we can use ChatLearn's model conversion tool to convert the Megatron-LM formatted model to HuggingFace's transformers model format. ```bash -cd $CHATLEARN -python chatlearn/tools/megatron_to_hf.py \ - --load_path ${dir-to-megatron-model} \ - --save_path ${save-dir} \ - --target_params_dtype bf16 \ - --vocab_dir ${dir-of-vocab-file} \ - --megatron_path ${dir-to-megatron} +export CHATLEARN=path-to-chatlearn +export MEGATRON=path-to-megatron-lm + +cd $CHATLEARN/examples/megatron/ + +LOAD_PATH=path-to-megatron-model \ +SAVE_PATH=path-to-hf-model \ +VOCAB_PATH=path-to-vocab \ +target_params_dtype=bf16 \ +bash scripts/convert_megatron_to_hf.sh ``` -We evaluated the performance of LLaMA-13B on the HH dataset, both after SFT and RLHF, using the GPT-4 API provided by MT-Bench. The results show that RLHF improves the average performance of the model compared to SFT. There is a significant improvement in the domains of Humanities, Math, Roleplay, STEM, and Writing. The performance gains observed here are due to the use of a Reward model trained on the open-source HH dataset. Customizing the Reward model contributes to achieving better results. +We evaluated the performance of Llama2-7B on the HH dataset, after training with SFT, RLHF, DPO or training with OnlineDPO, using the GPT-4 API provided by MT-Bench. The results show that RLHF improves the average performance of the model compared to SFT. For RLHF, there is a significant improvement in the domains of Humanities, Math, Roleplay, Reasoning, and Writing. The performance gains observed here are due to the use of a Reward model trained on the open-source HH dataset. Customizing the Reward model contributes to achieving better results. -| Model | Coding | Extraction | Humanities | Math | Reasoning | Roleplay | STEM | Writing | Avg | -| --- | --- | --- | --- | --- | --- | --- | --- | --- | --- | -| llama_sft | 1.6 | 2.7 | 4.2 | 1.1 | 2.85 | 3.35 | 4.55 | 2.95 | 2.90 | -| llama_rlhf | **1.75** | **3.45** | **4.75** | **1.55** | **3.5** | **5.85** | **5.0** | **5.0** | **3.85** | +| Metric | llama_sft | llama_rlhf | llama_dpo | llama_onlinedpo | +|-------------|-----------|------------|-----------|------------------| +| Coding | 2.05 | **1.65** | **2.17** | **1.75** | +| Extraction | 4.40 | **4.0** | **4.35** | **3.70** | +| Humanities | 5.85 | **7.17** | **6.70** | **7.52** | +| Math | 1.15 | **1.70** | **1.25** | **1.05** | +| Reasoning | 3.15 | **3.30** | **3.15** | **2.00** | +| Roleplay | 4.75 | **5.50** | **5.65** | **6.10** | +| STEM | 6.05 | **5.75** | **6.77** | **7.10** | +| Writing | 4.55 | **4.75** | **4.8** | **5.30** | +| Avg | 3.94 | **4.22** | **4.33** | **4.31** | ## Reference -1. Training language models to follow instructions with human feedback,[https://arxiv.org/abs/2203.02155](https://arxiv.org/abs/2203.02155) +1. Training language models to follow instructions with human feedback [https://arxiv.org/abs/2203.02155](https://arxiv.org/abs/2203.02155) diff --git a/docs/en/tutorial/tutorial_qwen.md b/docs/en/tutorial/tutorial_qwen.md new file mode 100644 index 00000000..3be0fffb --- /dev/null +++ b/docs/en/tutorial/tutorial_qwen.md @@ -0,0 +1,38 @@ +# End-to-end training tutorial based on the Qwen model + +This document describes DPO training based on the ChatLearn, DeepSpeed framework, and Qwen model. + +**The following is a collection of common environment variables used in this tutorial script:** +| ENV | Meaning | +| --- |-------------------------------------------------------------------------------------------------------------------------------| +| `CHATLEARN` | Location where the ChatLearn code repository is cloned [https://github.com/alibaba/ChatLearn.git](https://github.com/alibaba/ChatLearn.git) | +| `DATASET_ROOT` | Root directory where the training datasets are stored | + +## Setup: Image, Code, and Data Preparation + +### Image / Code + +Please refer to [Environment and Code Setup](../installation.md). + +### Data +The data format required by qwen2 is chatml: +``` +{"type": "chatml", "messages": [{"role": "system", "content": "You are a helpful assistant."}, {"role": "user", "content": "Tell me something about large language models."}, {"role": "assistant", "content": "Large language models are a type of language model that is trained on a large corpus of text data. They are capable of generating human-like text and are used in a variety of natural language processing tasks..."}], "source": "unknown"} +``` +The following script can convert `Dahoas/full-hh-rlhf` to data in chatml format and store it in the file `$DATASET_ROOT/alignment/train.jsonl`: +```bash +cd ${CHATLEARN}/examples/huggingface/ +DATASET_ROOT=path-to-dataset-root +python data/preprocess_data_chatml.py $DATASET_ROOT +``` + +## DPO +Here is an example of DPO training for Qwen2-7B. +In this example, the user needs to set `policy_model_path` to the initialization model checkpoint path, and the Policy model and Reference model will be initialized with this checkpoint. +``` +export CHATLEARN=path-to-chatlearn +export DATASET_PATH=$DATASET_ROOT/alignment/train.jsonl +export policy_model_path=path-to-qwen2-ckpt +cd ${CHATLEARN}/examples/huggingface/ +bash scripts/train_dpo_qwen.sh +``` diff --git a/docs/images/arch.jpg b/docs/images/arch.jpg deleted file mode 100644 index ba450b7fc232078939eed70c5720edf7b2657ca5..0000000000000000000000000000000000000000 GIT binary patch literal 0 HcmV?d00001 literal 80036 zcmb@tcUV(h_bs|95|t7`QF>5NR79kA0v3vhC@M%-=^(v@8W9nx5mAsLL1`iaBE5!Q zL~4LYk&@66NT>lqayIyW-}^h~Ie*-HpSuxU>?CW?IoB9t&Q+lOqD=tDZfa<20CaQ! zpap&bv`Iki2EyJ30CaVMivR!|2I#`<0YiTrQ6DMa+cYV#Pf)9<11tGIw@00X>t{$2BY^TYdp-}irQ9ksIdumoSB zfZvvuuI`?oJCDG&kdLS9e)|R3KH&};1-AG5I$`^_{dK?nB;}NZ~H&m|2!9b3vfj1j^GVRIVrdOU;h8RJgpI+4*+n@#nso{-qy}j zP#uhrwV<}M<>gC)l2;_90AN4ff1kPb>mZJFClCL7&ustz#)R{($o?5$SM^|AZozx{zf={6ZXY@{R)ey3wD1As&EG}@XW06;DPz#fi9+byHf z_ObwgVHg1FHfePL`(e6DdQJwqGXOn19RoWZtqA}hPj`s!;QCL;bo302hnOITj~rzN z-%xT4pr>PCpl4(_bchke5?vtpI>5+&h~u<`I@9sHk0EE=PDlpFempF6wXmL3zn3U1 z^~C-8k)tP1adGpUJtuPh!bNErSvh$H#cS7ZXlQC_-@JGKfq^0Do~4zwjjf%%gNLV= zw-3VCFXTn&%dl7B5pUw&zKc(IpP2M1JtH$K`*Y5h??uHWrDf$6l?{ze%`L5M?LYdk z{R4xzUqi!_c*4~5%(^ePeTryuGvU7ahRx*DdhpU(fyzzt};)=ouLq7$N(9 z(b4;W-wf=GhfYf{aj4&gJa#*NMl$&DiL0?63+s;vN$C?gpSbrPJt-_bd6u;A+JR^P z&m4RH|H-reI`*$$BfvF){@`Mur)N0Cz`$^b=@57^9cJ3U4j(;ya2@^UVmY|j_Alsv zFB;eh9oPpWBO?U-cZ~T6^RfTe7wtDFKWAve01E>h2qp%001oVxlS2rdi!))yav-9@0>c3i1@YUq^10#bo+QJ>p=ce zi8R#1|3sx-8CILh{PFCq#|gc1>yV$sZZ(POo5psoyE$oNuq4kujx1+TT-w)DQ?mYjK5Jb6ahuMA zn2i|&70B?JXMRa-0;(hn%IVoA3^8wGEyuS({Evic^xnrJ`L*iqCGo}o1n`M*NmFiCA>3J3P$ zvZd)}AgyB;Xuu0;^7{Ze91SR@tcdR(>;6Oo?4Htq)DiGbZF5nr>Hr4TnDncfWKdl*b@w+25^c@LNf$8DFS1Yp#e*9rH;Rb@mJ6Q zvl*m)V5Dt8V0I8ZOs2pW^c82^>-kxk|I67ge>oEHW zx`i87$OeBrntTm8dG0@!{2QVVe?vrAGNA##x;f{E{`kd=z5Qdmp2o(r+nh9=RrG1z z6(RSas%+qxO6R)%{HJkUONyM%*@fFpNNn*vl*izk4iLs;`v^k(g>gyApT2z<3z5YP zlyHd;*I{;`9gYMUP+JK9VK)sof_Q=NwB^v&+`A zC*o2_YJ4})5gUs&w#HZ+qI0gjQ)bq#|GYI;w>G1ZEOyyi~W@E1GXa2rq)bj8pSUW<9Q#(fz?=R8}NObz3IKrCln`f$bE8bpa6 z`&AWP?>e{O=zhMGm={!#T0*V07ggCnM5j;=sC8@1a8m@GjSRSa$-HVe+1I9@N_(jn!amB329BmF_vO2dC;bB( zN1m|+kBQ<{xYqg9oq))C%6(4~=BpzD;T>i<|+0cntX(X7U1@bpymN>_7MgiS^-s z!dKZayU1y?`EvKYbsC_M#&%{GRFwFFPxJ9qwtd08&bp6gyHcz+%zfUfMmsSKWV`(O zIS@A>vkv4>=|NFhUtD0xpYQW|E;;O!lOTS(K>XT89NLf>J^tLc9iDBhZ>S(o*y(}1 zhrtlJ)MEtvJtDze@pbY)v;si0FuQsn-w(h%)XiBCi2v=4HR>%cujbD+H*qKotEh=E zlW_uN=fb}1{A@g+)p4^51xVCv$?5sj#^Go3x$u*YWqvIsnWms1elP8NT~h;^wGZdl z@wuQ`Ujv<=JoAfv*hyG90fnHCBOA(aEt#{v7Qa%1A`e8lj0XHe?tS>ysFbn91d*i)55XpA9{PrH-L>ZRRQ$B`1&TMVx(eJJ~RBJ0h21EyF?#~@HjEA zT7B;y!~SLqAJDM9NE?OLVbH|FR8eEll)ZeA;jjNOMWz{Kc*D4^u&wn$jsQVuyPqVy zGXk=473o<<<@~B|>6(VfMP`+QUU{;F`lON%`R};>9fuCsrAaiov=1SWT_CD0VFG^# z%8fW~LG4?s#{9Xa`Zq+82O&DJgh2La_VeBT?hbMAah5+p*JOdnH7g!icanrMEtJI% ze_|@HjWClong6Q!yOIMOH85E;Us;!qHy6b%mwa%%g|4qG`B3^kbg95PL05Js4mwf7 zz8@b;w-ZAB_$pUTU=NWN`&u91noR=3N3&M57Fk7V7?sTWkDn_R_nIHLlP_=0_k z{gWctG~Y@UZ;3anYEWT-=|sJGb>ZC zS|&xM_Scs0ooDz72N86K=u1){yA>eqwn6bZjn<&{fVw>PUtI>Xma;*~N~Fx9ne7d% zAkjO|$K^|!fAU-=_C{@zR=Y{&n+=gCN_$D5`s7cRclr5bioKhqY=Kz?Wc|`u(SE%l zX8_h4JthAXJwl)Mv7=Z>1G3+z3E;mYK{NCyr^ktL=rA4{u!;KcPlo6Lqw~L6A~WA4 z5HjEQ+BR)*5ajRpNypORmgvkzP!m_jAE<2|Xkwr=dSXA9+6aBzmA+T-Q93^8A}+rq z+U4c}ern@E{A9*~_=#m$HvS5NxbTnjlNWxmX0`OIdg~b?pXE2wppu{{Z#7#G!=T5} zFNZ;+684SS&lfKn+-6p&eY+>SXaH#g0}h$VsoIM;F|feU-7hevOLMsVBj+-(H%gP5 ztPU!$zu+0GvPe*vIC?MD{mOZUS4AWPjK|kSW3yH;)hU^YF`JloTuyS@I0Jt^U|Hk) zHg()Sf_=eJki2ijOVn}b%N@{)#N>YyI#~1ktAR`ADVl~|knK*sI4YtUaN9S@X(R8{ ztuinl`8$0Pbo^(;^zQ$SP4lP(CkD}ORiXdr`s*2o4K~1w^#+v{mmEu);li+*kszFkq#%7I-sh|^p>n6zge3@t$ z-Q@b=ybt+rLzh+##w?ERK%0MNZ0=sa1JxH5B=0sOiD9r4&@i5zg{v;&`wT}G%i0RK zBpz5o+60aHGgG5ZADCF!eZhQzGwSsmK{7eu;)j!%Dh`y#K7$6Z{;P*z zUgzsMXZzg_pv6=+0MqGV^$@FsGo5OCgW7YE+Qq8s|$?oYr%bb)D^>I`vOD7_w$KNF>{CJP4gGZ~Y2v z<}+`8AK2(*-n1m=eMNX1MBXVTw&V%xYcva7GH$PY@VLzmclof&4FDmVM*|Kq5}niW zffkJxhk^tURro1m##&u!&SZ^(vvk`ugBYLBq1N|=Z$}wo?!Nah51l9&(VN&p1+gJU z2yD-jB19TH{1d&FIA(r3ZA2asv$W^o7L09hKFxJuP4=XLS$*vDp}o**iAfs39Mm>` zbV*s}ggxtWs6A3aeQSB;j-KmJzxXw?;O;brXNP>nm!u}TpPBO#p-Zp=gv9zHGQ4st zMBE0ql(#aJKQ^XhaI8}XmD0hb!1eZeW~5c&B|C?-%gfXhj6*^`q|Ugz@B(@Wf}I*Y;BdxWo9dT z;io5$zG9ZBYnK;zw+4y9Jaq{%XW2vl0n=zCw@bC|MD$g}(o!Zi@22U^Zr8!4S1!gw zZ!f*7TOLNVK_bZFn;s-^HWDPIv#UlqTtY_UJ6-CB&n0A;+wp9sed=hst9i-k(vQAQ z`YPc}dgnKS#>(%QF*2*AA18LBq0}R+S>y;^&#N|XgtF*;&+Hkb+{J9Mkpyz>nI z>EkFpcvsrhxc&7b?GMv$Y%HqpszVfxOzIt_Jl#u&+q>xKjt4O7e-+o6#{b44oyc)Ye zlxu=9Qf}iiNg8=XT#H}X%QOAUz^PLPgR17OiQj!xXI$Q|mDvf$CheAo1fy>s0z~Dw zyBP+b8^-u~Wp<088(z;kGr6nS(aBkkpr>yF69PK=98&FzgKybXohvq0)DHh*;ND6X zWK)bm9`(5v;}NXO?Ob!7PR~P2tHmQ0b^9%cq5x!CU6lry(g0(C`H>Z2?5%67ND>zj zvAC#pn@uXpt9v8u5Mv76aB%V3uUTm;0x(4bQto)*%9N7;Z;yH~n9gpv6t=g~xD+ct z8h~~<8W-DI0*K!ZW8BK@Vp5x5=}R7oVGBIp1;GwJqkHa=Lf5IpJtsi}1~VpRQxm0A zg6mtsr$0Mpo3i7XEz9QH3^*_N2YJ4iS9y6Jm|C8@qSU(@Lnh?O`w5Kh*)y|YP~;wZ z=v4*SV2X-c)GpFkcPxyNZ9OGM6Dtj*5#il8Z^dM1FGd_DsHiB+(}0CEv_U7J^b4G# z;TE-HhL(2vW}o{#d2Ly-q{iy9!Uox1x!n@js+W1b+t~4uCx7M69r_eDl?xE~Ww+L~ zv{)Pl`4n5x-7!D-jQzIKjy^q|f;0=@wv7#+RZekJV`X$z>QS}CjU`k-vZu8aT9Fp> zYHVCBggRK;J^4}h!0qe_ci&cpj3=6UKX>oYTe-t`M)lX_VPcT!sg13SHoiF2+KSpy zF54I;C9Fq%`}|-^#SZ>;iZzNW4Pwi$*yrjB^=Lj9Xp(L0^HXnirw3qTLUx^3_UQ4! z$7P$kD(u{Ib&HXND*|o`iz9J5<09G95q4Y(3(!nH8W7vl1GE+r7-X8r`D+hLZ~?qd zS#~*JM#dbzHwU^lis+W{IQ_}>OpU^37>qCfbSxXCo0>R^S2nc#YPvYalB_dG=*#=M zVPsuX_JqN~U@2>?rL=vaL#f#frFG zWYd&DmfHl?oqK}7A85z0x>gTkfIFjyVp?V=_oX$T3~zz6z44vyjcY!$DY|q{;djQW zc1c4A&q)!b_l3ftQT!!+WZTV4q=AG6y=b{qIg$$c6;+brh`lT(!>SvvCEcMvxWrmg ztYm4=$nFea-g14u`^|}SN2Mx?;kjMC$jm+y0CF7O@XrVV%^lR>jV@wKL}oSPlaVZQ z#!9B0!s&W)JP%8b!`~N5*P8gUe%m%u;ATki&5ps?LWe~gFffYLu6h7_T%)RBZ7J~` zCbZzIvc}d0{#>j2B@^dMcqSz>f{k^z74VlxnsK+K*%MpJ}#bpwpZ1iZ-ymG$vgvZ}C`!()TmsR$da7~?4c zhia{0?)udqRU8<^1$;h*jVy#Oc!}f+l0nb_i>mMC-^i%kA&Ys^h;p>_T@$rl$x%o& z=flFb!(9^nAr|)@5f<#}n9(lXi)t{@pbjut0VYKK75wkk2=PnK39YH}0~DPnmonb{ zH1G?)#-TT(DR7SW_C!^`w}-`pfToZ|+zGa4F^HAE*BlkInSeOJKgy;uw^2_ReCz&8 zIkh-tNx6r9ZC!QjZEqZw1`Mdncb6iM-Ww%QB-D&w>n0Y-mDtkY)F6KERgzJ&%s$s0TvUObGUxg^#&jh z&Nbgw>7CY`)w*pFyMkIk=#L47v+cIxvK zNv3Obt;ks|t*O}_{987kC(n-0_jufXB{4s=UfqWf9P#55*6AE6u`Af9hyt>bdJ!O_5$vlj^O&SP`TAqcyl(?x5^F*4j19Eh)<Fpo0tm-Pr~@n}yY`l_gqw4hu<->9BI zb&MW;OybgTocs=It@C^xUr;d+CL}DN0G~a<5Vf|w`LO~n*B4bF^lkP8P#xNx74+7p zjRwePsN9@gO^@dif#m^k1yy{KfFaq`X8 z;oN8hZYMiSkF=khR^iUl%3GV4;GbhC5`hSNWsIu86Ug^spg=In_)4Gwm?8T1zL9FO zBE7&2vF6}6GH)oGG`0H6Z*21ub|0%^dE{lIxT_b$OZyAvYYa7ocj?Q600mu13HEg}b=>$; z(8*RR+OfUbY4E zAmy+q3wB8@GB1HImTb~`iA%|HkA0keM@`uFc;>&K3K>~A&rk<0bh=WmLeRmf60LiI zWN`Z;p%_Cf4>}#AS=Y_&j+|X-7{hGx7x)VaP0vzx?r^*M3t&P4#07FAi9HOA1X$Pw z2kk$5LyCF|+H`9P4n`*xL)w~jp_=1rXDeT`@?Cl4M=hqCuq42mRH2mf-}Gh7Y4y-~oi-JtKsnD)4+q0>?Kg^iH&v2AO{IvV z_f&g=Bm%ClfS-~Fd5mF3u5Q+>KT{LK7EEg0rt7}7rY68N&P6?q>;=7mFRnEF2A`#* z4bHkF@m$;>0J#aLQ9J*@a$I`{nYupVu`>Kj6$!r9Ogc;hI>8Sj7uWXPNZ&W{1|J2y zBY`6C9$?vVT&qq6$Z&UecLPhldT5LIE8kYi`EG98;gtpolp@dzJ}VA?4y@{rO0@|6 z*ERB$>=n9*yU#VVZvsRiQY8Kya_S|bbss@NCkX}cRsIY(SJ3I#nqXgC`3f`u)qUXH zRma9G6t|JTU{CR1<(4_${~y zhD%|qidY6}pTw{|xtVk62%25kGAQ(QAOU;W$amG7)xG4$A)zJ!?va9M zJEuu`*v$=!f!+ZjmqaL+^(Z*-{A9nGAJiRdinvft+_O~a0tSz^STR*lcz7;lG10dMJkQ}x~PHK=l7z#iQ z4x`T6>w&lgV{yVZDq&w6VY?p+{6{OMp2RU)6*BPcxc#*<3@-?wUb2k>%e3rnW;Yt} zZrFlsYQYW=CS0O|vR1&5fMgETXMU_#bB>;XFN^)^sN1XWg)#apR{xntzz z8*riEl++ae9!T(hjtCDqK$H+_n?fvuZJQlX_?8CUuFnJq_ClXL9Xh8*ajB__Y^4T~O{ zYMw4hLfk$VyjkpC`F&zGg>CzrD&|KubklKma;zW(*P^PxMFT3b@25fixw|_?Ncp18 z-bu}aRTD$SHO?v*hp`6nCL=cTitj40E>BFX_<#H)nLhG1v1RowPahpjh zM6aDn1WTcvemWXL{P8vD$BWi3Sh^qZSdAyq;9SlR({9LKJ?O4Rd6CiMk^)nf`0GW@7WUZ>608LA$ zFY^hc_xWPtr?r=6qAN3E<*mI`>KSp{_ro1AU(U!fNcIz{}`?U6NN z^m{K~;d<7Uafg^2M=`!P2?~pSmEw`;qWCc4wN2x_w18VX)J!~xO!CO})Idw)fe$=0 zgg>QAeC%JEUWM~HG+lh=w^4To^HCB0O!?Gg$F>)eZO%9-AyBNOCT`Tk_y$X!qn+$X zow*FllW;`zk>T`E`jV0Y*11~YzRT-S4kMF=N;8TEZ?ev31H`%Tm)ycJYIpl%(xUD_ zxQRMGlndSTY;=kz6TA1S&iaKD6!?y&Ken{bG#Z%)=ZwR{!!3o^SZ;F26Vyu{u)ETK z1^_02??D|CF0d`F0g_*{#1^W7% z$K^)98XejX!yCHYSh|+s;lX!qyQV#I%)mGV#t7usF;vW)j$ci za;^@SRjU1!&hPSXt5VJjPq0i_r4B4l-~ZS))91YBrCjZ$?QuTKV1%H294w=bAVOzu= zx^)S{@A&&Hxsf=iG!C(!&u?@EbCdGQpOe8>Db7p1N!B$&u(|{?(Yp^I%YqSh__o?&>_3BqW0mMoJnkC?L z4tcF*YkVl_B#JA3VWL@hOke5+#OmUz+$APXT|GRU2VvU~#MZBj#PfE4w^KAr9v?N` zc84c8@%TpVvKx&schSdhc3OzRr!CxRz!~4{-+OG%hsT$f6}bSGGIMM-eCbG5T~ zYHSt-A4vZTJZ|BYGdit8YyYElgR5_cN2k*oaHWZRb7lO$Bg zA7%f^nj4qb`EU2STxhDxRDwgIS?I_Hidg7 z={G#ienkIS8^I&q86{Z%bi>f~&pIniqz*XvyBAa-W9863TSa9<1lewiK~f?OC{^@< zZHs76Bf4R$3JbnlA-ek`GdMBDfVyXjj8YDq6bGWYtIhm3Pvco{<21hSjlS6~!+kY4 zjPcGZ^JR2?dVcwa9G4ircABb|37ez4PD^5s20T%DJROe&4VId!HkZBdY3qWvf0r>N_S`e;fDZ9F?6vV-jQa z=2U3}pS0!7aD51%p)MF*)DyLO$Q^d9oOmkSRbJ707PeWxkbX5KDKY}r&au!bngg#v z>%JRS;Da`VO?b7yZ{Ct(*wIf}7J>(J7euK>5u^2*#x9U#bCCj(Uu45GADU|0MKj)M z_i&S`;+nCM%J}?Q*x<&UL?vypHhOddK^fULo3jd^effa~w0Y?@>QbX~rbv4QU%`_z z-=bTMr>GYLtjJzO9rf=d@us{7#UJ4MHK^OAo+TC4_RiCqu|n*_t)kQMvaKC45-G2t zxG(o7dEIj_&%;y~Zr|QQ`YU~^y<$p7(AA5AZSc6zfYW{3K@vhRV&XyoSo-lR!FM#x z=_X;rTbl?{S@_og-cN|4jzJj>_F;A8V`NrxIFwGv@z-fXcf`?bH+jG+Vq=^w;0Dnp zf}(?Wf!HaA4VNGx9gI9*1@0H_rRuz={7MLT^?5gL=I7HoPkH6Lr?mg1ZwUtVI**Ta zJrjvqeMrXM+woOAu;e@9r?XH?Wh!Lv3KpESjC}Ce!8BPw?964s$!lYc0T>d}sfZ_4 z$-1ShnJ&eePa?J;KkrLq)zwW7>3EtZ>9Ge~Cn8@2=uBb`FYbg5dAXd@m8ngDyu0*r z#>9>?Z|Em<#z{S;;*K9n>V;wSDv0pc75i+TyGd>nr^NWZ=cfT4*`E9KK8k1~D%nU4W+@FPAMwZzZ18mLM--RosPD4J_1(E&^3Oc&yOr1_&s zAnmn!{?6LwTe3)2fJ70wF^Bx_QoseIP@|KZhm!}(+K=zj8uEF<;m&4TLXQp3XdQYs z(s}dUsXn;-#Q`NuO=`I%DTZmBXTg`h`UAKv`+EK+qG<}*;FtrJi?=*isMiWO$o9K; z0=UQpc>4g(NnM+V)i?Oe5!e!5(GQ9XTE41QcYkZb-lwsANECEe!B5XtJ)1)V`)J*MM`_uIN4m`Ty;O3-^Ss${!s&Jx8J*h-ZpydXSY&oJQ`C(a_am! zCwF+o>Fe(UrXq)8l--wyLH?t)8FHNUy&O0j^R} z`0A0+P0qf6v+~3sNj4c3TN-fNP-fQV?c+ZcecI!n$~38(=1dtC+a~i=&GhMS07c5w zHYPyNXR=0ed{I z^8#%AE#JWE(7C2SltK6QS4*)N%B|5y!wLb2fp*^j>!54>pzC1?%^*-M51Q1)kM2MRktELky=4jZ=JL$ zuAD*fHx4T|aHokXJA7^H?TRf|GSj;#c~}sKb;s6J7>;7vNWR%MApa&b3F_d&QdTw% z2$EigQt5i9$sKWE)=$lX?79BUd#>ipWE>HLhej*kC2rMM4lfvQg`9opT%uu3^qR=gY227 zN3^9tc@}J`zwE~;VfO5;Dsizaim`-l{rgE*R1Z&r>kb!qy_tA{h zhEnO>MKk@D@q2|x$+1=8q;rD#`Zq?iA&}UZG#-V=l-y;DDLyRKcI$hZ9(O2=Wb9x$ z-j2m=v%PI5D5J<*%p`v9Grz1qgo%wF+50 z!cG@Hr(?X|F(gKSf$+p84}nBp5F)*vr5;z&E2DhFA_an4Z?luTG0|SiyXxQ!dX>|| zg01!}0*apv6{~h4E0Mrpls+A7XlgAS-B9g*S>x`&Ln|vocQ&ZJ%Eh;jHtz1% zD5n*4T|~Bu#BHN|!QFW5JqWdYKa$$u@PSJ-FR=GE;1yWpHd~3G*B{9n2xK_iZaie{ zILYu;HFw@3N+5SqOxmIHyQZy|qLDEtOP*-s@%$|VmO^^7u+wfgZzhe2g0Vc(`Dc8r z+nAl)RH_**qpBjf-(yN4xd{{@5Fub4c59!%xdw9I91IM0LL*(C&!2qwE61#j;GS~q z@Wi_Ez^B73eo%|G;oLU|>GE%6>qh4$rDs|4LR{uzin`)#HT#wQAwFKV|l`cWtSv}oyxHV zRwkM2Hi>7-guPQDmWsL*aZr4Rn9ZUo(Wxad@7y~l+bDN$i0ha^#lgj0T_y+)>j zZCGcf*v~(3xBZ!d&z3?4U6pX3mg?&Ga6UucZN`E{T{Da2eSFLlNzK6obTnCv{UZ6+if|2;267v&5$~;Ehl2 zEXa>)Pvq)+{~W`(Y;2uMpk4iJ6lySzL5M;5UN{4BtBos-DfN9D^)0@fP+;urO#{NW zHa4?aHktvE>#AeLVLhm-{+Z`m+$w>4IS3ePUX2ghdSIt#V{c&@zxmA^T&x$A4E zn>4-bcIwZDh=);>RB!05ltG_He;D+aJ$Po7zbte)s}{TE_~wZ7G0oHQCJ^8L&$i5^ z`U}7l0I5W3_r9K5gMzuC9Y_YaE(qDlg;Y_{QK|c8O%Peb#B~&ESuf zYPaAhuTDR`rF-yJQPi(-d@JR02OMfN%Iwc>r3_aYox8W>y8Y6nM3Wn$|kJX;cI z@riPX7!*tphmdCmSGPByjN|~ad#CUlA?BeEXyqSgSo)}c+`s0wZ~IwhkTJxvMhs67 z?;?_!^1#i@K4Y;k!=uA$F5zh5k2lYw&uBe+^NjB!LAvHeg;fD&n{~jnje<>X+G%aH z2tjiot;9GiM4F@!@gt3?Jm%_VA}zO^AG@lzpCp$2vN1o=qPpU2GWqgj0U28FPGKNh zeCe3fL7ohFf~!^@epg%XVVh}mu{hGX>!|RmCYRP|NQ|o#c0I&wFPX|q4kMsmV(fHp zIAbQy6Z0Y;np|vq-rMu_%XeMwtDVA6Y=`SxVvhTd7w5&E&76oI*`PRhyIBpSa2gW zwMaI(gghQS`Y}X;9;)eh^wl-J{ygWCLl3)4to)7Sp5HBco^8*dP)~kG>}VC>#DY80 zJd2nUUIkpW6@-GL?O%)+?p+g$N|0l-(f)eZ(xB}A>9=PJ^`ql{tZiC^BCWfb?C&Ac zACi^}awt~^DP~#xJk(v&_;YSqd~vq)Cap(rus<;?jc%qo4b3;sqYL4oV$PJ)=V}Px zUlDA=p(V^J*BUqF629>|9=AMG?e@;Z)!I}LCCSkN`w>+k92146sMdYCK0%QoiML>b zx{)QP!d*6Yv-+z=@lbx_c{%Y5l14Wy>15r&RfWUJyuh5F;TsFeU0gLg=_ow%jl_UV zjofBnN&i-tPGbJ>hN*IfyZY;8k5mocg3keFyPODU^DrvvU9}P3NvM&kQt@WjuxEXD z?jv@lHt~Tuf$8LHDNeQ@rs-FB-+mr454YTvrpT8L=56?6eR4D*AwtAU@d^6Z2eI=K zZG6RF3Uj$fxNKNXL0&*!|GrS@uLYx{D3dU)W~zkAf*5K4f^b!#fok>7_UTRE7US7b zY&w>o={Ca(@88{YJMxJ5-O?TP-ZElOj~pB_0q00XPH`iUtIW8rpyPWeirD0yUL$KB zqP5BV0&(lPNX)ES&xX!>t8duZXHUR=6dxA%TTWred}F+!#yjG8P$?pqo5nX;s+Sg- z*UTPr-IO%_-5Yb!J`b9$uoTi?kRMr64BL}M-2`(<6zniXg@kV0@d>L| z5A@Hv9QVa&@g(jf^r#;HKx9IAaMFd>QK{a)kUA?$8~q zIVs-<4Tl#|?oQDF=fJdwe|D8sZD>GI(3X-^8J{gTzI>bqHGGMR#T5M_Pt~3{nf_o< zwUq`Oo~)I0p>{u_`iua)ZqZe~T^<&ucCez<&~#9@$0P>=9^j!Ij^=cc4knNqzL zJ`kJuR$R2?^=yt%gmct~@uWSv2RA#~soAsRe`Y;)_0)3$Y%D_jd|o)Oi<6C1&q}IR zc?7cJh68>zhML%A=!rZ3pH)DP6 z16mo4HA@5SCjGZB~B8!7oc5ETCSNuGlSdKN_idGa|z6jSE~EdW1q#? z8VQ`WNJiW`by|+^GBj-Q3V1-epwT7x4PvCluW6?=j-;W-QnOUjpf23uD7J9J&y5@A za6SC4@a*%E;7htenRCb^6!FRKa7A3x-eqk#g_^iAb%y^|jMsKW?b~KqAyT zVxL_(OU9Ayt{k7K-;z7X5(4n}>a4E0q1)gre9TEkt!Q=Sj_U_fLL0z z@Y(0!5*fKDDaPnR2I!lnCw^$>mSM=R9k0bDvr`R^(M){qx3j;3OGDq@b$Sdzej!6W zEsoTyvIz>ni%ZaZ3Z~djWbm3r91D~bq_?heJ|w1`YjK2BaE|=xa3#?vd$Vx@IjXpo z>4%dCgU7ZpHGZOdM=u0iXHgH^vD{VMtd1%p_ij^ftxKRAk+^%&q^sb84Vhf1i8i=| zZ&c7yFeHNKM1iAZ4RgE{16p4(VlTV4_$BoR zzQNr<^!=EuX#Fq&!VO8`*@m0%MFc?+$&#Y)3p~1h)jR4+fp?99Y$kv2bYGHo4lJdXd7#>Q_aYfdu#Iwh7S{?Rz9L)Ptx9su z$S=e*x8Le(-FiVV&gjG$W3>$2Y%%kd8J92dg~~n4OOvd_C>FTIl+VU%quos@*yD4LyxiX>;@H8#M+e!h+8XR`c%4 zjpYXupaO_p4dQIL7qiv-%jm$|+;*8CfoXy7yPib7T=fWhTDAh@`Zd+c?xogB+l{lW zWB3oC?zVaiVf)IWBT;u_F&LC2|mN!iU7 z#};{(b8_q?%;&MAntSh5lRuAUR;dj(z>_S*f~AU1iB%N6xso)4%A+WeG?$_YC{_f< z6!oCzQt@n=PZcBgW$C7JBR)pcn%aPJHjOr_MRH!?n$NBRLSSWL>t)bkZRF=@=7#U{ z--mym4O?>d%+a?Cz^!^8`}s_@-!zKga%cP)wM&(W#E0b6-`u^cPVyFg=;d-*OY?Q9 z_orUg{Kra?&rYr>cPD2-=*wk>D|WP=RJ#!SDlVf+}l6#d!N8o84>VV>6OniOF(5WbbYOwOemOpA&gS<^Pnt(TT?c@;L_6C!}cy^ zOF}w$mg{zoIR(m>^4i0kVOMs7qBJOpoN*)JuvCo?x2rvRz*I2PrDXUXu}+3lIl9AG z@kqJd>yB738~Pdz{pavOOLt*lE;k{li(H^PCD96+_ZfE_pZmYq`to?F+wbp@LRqGw z?50BMj%42rE!LP&k+o7}Ct<8(sVreoktI{fmNb>EvJ6=!iOCXW8OA6(Ghqy~^jxF+ z`+A=5^ZWgEcYDp}^Eubqu5+F9KIdBW7;>Tb>UyN+RaO!!743%vxO739D&V@)bA_i~ zS;tW89*1J}{1U`w=^kjs2+Zfg%1(lBv=<{0_tOaW=Nk)4ttkWm$biI^xxD__;2-nB zme>7DxozM1DV$5IbQXH?TE7nNz_sX_eoDyXCK9VnZD*%>f#3IfN2tx!adK4vQcwO5 z{P9`!PNmvt5AMbY9c^xX)aDEql;NN~zlz<)%IArZ@t(N+nwpc2T08RX*toH5meIBb zVVWO+ZoKAoqo0im8De)lwv9EQ1b2#*HbrmJN(#7p3{aiLc3BAQmBCmDkFmk*O^D9o zX0(^iMv^*CWt}Hh5}jXmI3c!;t8>xR$eiT0FIIbb^@%{h-kD7L{niiI#8T8@1KYt! z`=D>9Kb^z0hWdOg`W6zcauMJ5;rP>i%ma#PUhct>0SBc-(=BgIxl|7D3^z;d=EFW$ z`(A)v$->yw*;vGa59(0vtoZua`_oRJfBk*i40_!o0JPoItD^=Aq)GmpE%;R3a)1V)y zE&@R}zrxKn{(wVla(nB8(igrlS_&x@I&177F(K4cjq+8VVxlH^X<_T=0u)KEcA71p z*I^3(y<-67wi*FV_G?yPs5#(SaW%?pQ*2T4#7cT2>ao_7F#u{Lnf@gJg7QQmH@?vG zzW+4If4MCZHU{{ZGy9mb#xizNH=VRe*+Gqb8V5%LzQmQ z?$;>5Y$kYvb^FaClKt2L^z3Y!F&U(=1}L39O?il6)Nl_am<(y4ueabc)Q+ba+TpmzRxtCe;3mWCDAy zQ`D5#7Jor#U%O_(9N)?#1-di7|vt^@sjfORHV=_4m4b?&*d5e_}XS>Svlu?fQN6lJx7)hu38b z;IQfa11kYsJAhkTVb^*-JIMQd7^tP>w!NDp?fK!DRl$K-fI~; zTk0c@M=Wfue0H3YIWj>$JGc|R#xoFvVe$eIYAdDMPEB9t)^PhPgKg{{HgzQunLT>* zR`+H1Ql@#0R1i3e0tKR*EfTs|xi7i#%$6EhE8As$rn^zDP$S{R3&sx0(F|iCtR_MNUkiDf5<@@nWm1MT{#b1rW4!Th?oSo4MXTR3?ROC!*k6V4Hx#-{A6(kaLWZh%mg3R`>$C(uL)+HU^`FpaZty!!jpDs2RM{71$mVd&Btn^R&+Pn^#wOFY4F6)oLY)z%-WHCZGs> zaUgt;x}cq7zasXQLwm>5XJeHMC$jktvj#HE#XJu1sC;IalLN&M+|*+~(%CTmb#y0Q zslWK9%?pye_NwkbcDDEK8G`c8?KF`G7}LQ+K`n@d{OOiIkO*xT=r;%SDPyYqy9uuk z&9a5J{7gY$loPS3FGZF%tvno>K;WKtt)t=Bfw}Vjbrgu?%Tu+v71TeFb1obMn*ndm z8kZAPwx{*{WnQ!t)PI6~KJk7mz_?%r(TjJO7&H^IrD>T|jXJ&RZufK?_3!{{Lna4U z;Z1XP_n=BT?)$$|ezG=QsxWvTLM|ZJ{FkfXc)8B$vx9JoRXOUnfjTm4ya3QGgZutr zOF#OwrKX?E&z@a;sE@|a{+G44(X+=f^lWcB*i9d47U9{J{fWs+Ll~fNsL_9wLraX; z|LA+Usn^U%8`f*weXzt>fU_eSg7X>Bq;y~)30tfF4VziT0N0alo%>S2lYMzO8Ndo6 zHnjtb3>%N`baqeA{r?C*IO%j5J|+GV%J}Q!I6TW9dD^d$)(T(Y8Q`E28lxpZEX z&H;gId;Z(IQm_JE!=*_~yAq!)hfxPFU0F+A^DtqjE`zpAbN_u;G89u%vfe9xe(wu` z%0-q#wjumibEiseKt1TI{o{M>x^I8oSa@n|mzMF;Xd?!E+n}Nu=r{#=*vk+0||@ z^?E0Ri4NkR59F(u4B_GWXuM>8$_fmom0+5}mc$6>;;Ky=b-prR z-Ucn@9Rqb6-`UW=;Rlia1ZsT*q2CwoInHFOmuf?#LY}vxqGE1w{TT!;<;u*%rVf;+ z6kLXSZ@D?^okzzqD9F8bS@>PWL~$C*nUeJ$^+Gp!jCoU5P?Ezph71qWQHJG}wGV@@ zzp_$F`+&*eKpu-EowS!K_`8BQkVo&;s4O$W*~k^6U52@`#3niVF^XwR;c*H+N&ig6 zoS}T(`+6!mt*!*aX|1S{)VC@>@WbbN^p1w`U0S|rVValTwt?;kF@T?$XJ44WR9%Ks zuo^M&UpG0ZNZn#ccuqT-hut&I9VHJu8@?U3AK7oxyQA%56OxKf^-nGgPWo!5uR^5=>N2;CV35)bglvsIH~>ZdG@t5E)Pi6+ZStc z5VIxtM3Lu3*p3Fp&27yg#$-1kN#k2zUQ42D&_^uJiSDeIOo=z@;nJ8X-1UJ~D@V1? z!|h&OPW(#u?PBL`URbJ-|BbKy11Z0>*r)mfFVO6+&NUAGQAHY&5_Idh1Y0vef;cn# z0jgD#ss9sI!@R18`m{EQVLRGS@A7h!Fg!?y0U$~RyaOh3<872G)%4yo3K3K4V!Gq` zDO0<^d%=&!rjwf-vJdOYp0kMW#y_JY+CgN3^2Q~*ic>EDxyA_pB?gv{_JK1TwQp3ITEn$zy+hPr$ft)NX4g(be#EJRoIYjvLQ+qDf-Aek0Z!9PHP%2N4cKLk^dM{NHxQuFS zyAL|m@JKI)RGOmiCVF?3r8}psbVx!s|F&g82SH!yciS7`HzIp^oMNH9VDI!8PmGnC zBdUl29#6N!nboCRtQ7BgLzZ0x^b=Bm(U02VG??xTW2PDXJ$X5ixcyxNwIugR7x2NR zRM#?nF8{;}^#e6$PU?3iebLN}H|)Jmt_-RU7~$PJ#$J zb%5RT&DNi=(PD&3-e2<3@J~;mT<|K-*4E~4%xehuEt-P>y4=Ez?y;Ph@CwUmzbI_5 z0a$z8^-<}zJS~BV-+}8!g*du;393~q%3ve;8}mAc;&~1iu_zt5 z>iQEY792Dx>}KW1$iO!4(YrW=*nxD}S2*B%F8Lxghb?M&l#Ln=S$>L~WutcKw6sj! zV&3@snPn^u#{tR%!N8c+&pAx^&dol|Rs|u%bh|b^q1dNTA+iPmU>nwwO0_EmBf!pNY!o8dtNrOS@N+w2nEI=B*k0;mudt=INz?1`e~OJ##93YYoUCQ zZSL$+j0oy?Un8XuERSR!Z`@1}<1iv|VT>{p#KMvl|3JR4aK6-^xbTBQmqPi{@dk-Z z1;Z_yiTvR{pBl0dJ);zBCJWjq0DpfXiVrGCMhfF2AQWY~2O;rxPzACL&5((* z!?Q0OMR3-Gp%)-KgE~QJY?{MW+s1V+WB9azmzKa3?N_Y+9vpac0{bdkg@ENSYZGFaHDQ{rIY=C`S)u%rX`2%_PIC9X0uZQm!CPUQlFm=dtm?z~~u95ypCzYr>7vjS>1>;LTRu&QS}%b6|D?KPW5O zK=8@jxb!HF8CtB55fcgl7DAvUI2{feJb-d6C*Gd$kx?N7KLpkREC|?qJ!wf*11;`8 zg8x^>)Oqwurqxu?2I$w|7@a;Z`y^XMj2oNYu*DA)Dc?{g5MI-89lV^75(K<}?s}hU z;1L}u5O=vikYXIxgFg$S#DnL7i3DR9$$e-b_i=89TZgkd+Y`H`gbP{krDLB@2v7;2 z*@X{0B#j2YRf4@znN^jlBBcgO$lo)@y*2vsO0^<_}h`lV2X54a{2z}Ie>z-yT97?d=! z6rMvXwqZT2o{xr+&JfWz`Ym^$@9Fz&_Y2mq3x0rZ+srG^4kwylzsdXMx1OFp}!tL`m-u|Y2AJWXaVOohD~{dp516}I*Y#_2-o0bMAoW1KFOE>0Ipn5^!@H~j(b z|F;VTwBQ7q1ZDB9KOctwV9snj$`Deqe2Z8<$NOgpWNO-frTI*v5^0;+Nt=(mf4x_~ zsI^N~eI)L=_g;;D_a`@OoqRWYcq3MQ3w}mK*dLq>pr5s_A|dD|XJJ2zZ0|DydOEDxkY=#le{jV$B{DrF>v!8^}{E#}!Jlp6WxPaSc{4yd4c1Ygn&}_YpJI&$*rn z@)()C7JfC?L=sb9sC}p*(TJ59Vg+^S52>a9q$;16h;Gy3>05EC$r_wEO=6a3`aY-* zNPG1(?jN)5_R*7Wdzch`0nWfK-3BTpCVSwD9l zK`$+vcPuPfmOFgIW2or&(#0RQjW39xUhGx84Qp^JjtZjYOsu*IM)^%3nOY=N(m4lY z@W+dVQVH!u?)P;cz8$|@eMr53O;6b0q`T~G<>w=9X}$Yy;=Eu&?13m@Dt}8uI$LM} z38UMb)7N9zuzJCt-UG+4X9INVjx;1Hm!Temr$TQ#s>65t)&vN2U#IR3NOJVYD2>{= zo&0)#U#i6IS5-U3pSD`>OUQwTTc3$WJd5cu-2GKwn0l=FVpv3iL)7^JjYfoB=Fi*R z0>i5@E}uvCCH?aZy>{i>C-$LlVeCDG;oGb9I}=|)hOG#kv~O^ZGhwu6{201}j3D_{ z)E0Zr7|D26vhl8>nNgzUbQmaq#)CmXemo*)dc5$)1BPhFgF7O2y zN_&jU-mb*fk70o!#50c4~(1eIkN@!6BYRSEb zy$3g(lx}zssSaa+b+)oSbRp}LS|QVUUQ+I{W=x-Ed?L>KP&kk%$PnKQEyA$OW;omd zu)Ybp`syD@uAU9>RVlFstFc{$DtXX4;_AK$_f}lZZamW&#*?~<{QH9pI?w_IIOTPe z1c;)Z>T%LOclJRdm*FjDI~B@!SFqcGrbN=7Nyh+c+_6lXO|*3`$RVVl>83buMGXC& zp~=%Ij>IpVCsCu1w+zrM%79@baG%QCf}@*y zgHH~m#~Lti2F+SQf(n8*hvHz+m;rzT7EOo+2esROiF`ok5MX$Kt}cZ7Wesu=&z?Y| z*0ev7MTiMl@+y{li^L*ve48>)DwLxce-%8K_Us}+H6~6p!xyXvB2Xlt;HzyiY>-0sKohn|;2uvmny*`%wfuU5ATQt1 zFdD^pG{cby0g+@VNGJo!28;#B`j@@I%^k-@3ZV*5=UpKj0fhm@8Vw$5+0xhZWYYL> z5_IE;tKbCc>*F#&o-I(vZ&NE~-M^QXhcy1`l@5G&gG3;445(HLaWi@h`#9{~KM?OjsHrD(J@n@N8kzBRm3{>90*X~Iur9F<_+&azk)kKi zm;+p$v(`Pv2gt+*UiO~l;Us0|1EBk;0nC;zS^~qLUOWQ~0?NYz6FmPu70p`$Ej~ON z+PseC0>S~3=4M;K5(16fu za=RoWZvV=A&hZWz z#jzvp}PpmMeT&NKa413%3NP7TnA`fZxp5Q0Z-NQ78G*85JJ zX!QA_9}RdJ#+1hh;*)Q;+Oo$n4Cv<`!Xj+J#S)dDt{HuA(Sbl+TuxbC96uZ9YQ zB;PNr<+WFydPH#zu|mB5vW~%5dM}!PkCF#>M<46c z(I&VRhvU)%sw3Tmk=w@YddJVdYZyA5@|k2}k{_s^cGBbn>I?{*q6$~yTvH9*TJs>b zH&y2Qz#mBA>!It zH41RLsn6(l7AMAe!|=?uC{u?&keX54u6|k=H4WR*b*}=x!_8Q*nAKk3VZO-j*F7Up zw$JI&uWP;!FRNtnfpfn~73&?-z)Lf+jLsm{-D3)p-1a&UDmm8(BsOv~KS z_I6~ax#Za-2%)1IK%z$nqZfX~;h=RBSkepr1NviAnB#OGk}C3Iz#ga3G3b7J zf;D9f`{SaP%9yu*r%2XD*5O08rQt4L^p0XbInCoz#;dFd?T6`Mq!voQnOkCA-h&va zoHok*(Ej1uqWl68?Tz(jE2mS^yJRnS^c006_PwVvA+)pf$Kub>akepFi1y;+$6^lWK(`uQ4>E-lYOGL z|2~c*9gvZckzb*i(%^4cRSQm98-%GoPMMyq%t(M~w0Z8p9o#5_HrAA<1=D75ymH^CM9d^D%EZ zDa22(1|c$6sC{=a$}Zr=0j=HUp6VmlBOkiU?Ri4H9ra#EV@-J>n1V3_AYeo)TteTn z%v(3Rl7vfQAML;8B{NV;4y{T;c6yI>k?U7aX3MHW+sEO=&iAHIo#etyI^LoBRj zn|6UgPU+^dspldBb=cJ}5Joq>R5Ai@-rx$}U{|N%mHU}TB!dYnPYPcO@xdG+HnD0M z2AfwiFgb<1zl*qK%es$t2$3_5xTN;k?p(H(q7HB?b2OxOx%ITSvCfI~XG^!Ic9rOOdt zp?YS2Acr4tVeDp&1o0G5f_9D%RIxCVK3ch0kODF838kPq9)!roEwW?4bp34hc_QE9 z{AxF3dwO&un9Q4H5-#$d>)u{o5qGZxn_zBK?7D)ORFLtf&KVrZXm}@YI+JQ_5RPAc z!kv2auzmxv33leie zXK~r;O1izYUZ2#P9;EtS6YWJXYw%;4qPECVP$nL?wrLlX(4R1PcfzxOyf0QDbrIxR zhMq8s7*Eox2PX=H=ZAB!vS8}mSIC&krhDVj4vW6MF;s1lm%*LdPqz0&CpPM3@xc(@ zUM{60*qkVboI6K#!jEkB8&7OJ*1e_^aZPH*EqqG4ea<*BAguq!)U_Yv1 zbo;y}U6>f16WxHrE~OyQ+B7E8K91d8PiSp)O9e+rcQS@$p#k>1CDnZqV3Z|^Y42Id z8BU0?7`$&;TlMXF7Hup^u-xgfCQYNpc=RPzhvM6yr_>CC{xJX z5>QbATcmIo*dn0|ewJTWg8t5x=wLMsaLtK-lEjs+J>Nj}9EBJ_2r&coP7K`Vtzm}k zoZEMJroKz8As z8K{qTFYKGLjOJ4)@Eb^}y96qmwFz z8mvf_{2dZI*6-+BUEwKzK9|uTda;gA4#2!5FJ0o(*)9Pd5Q@dsh_(p>#s?&T6X5q# zZ6V9}1JbS3C(zVe(C^1+J`{F&$(#x@W{y36CfBonK||W=?`T4Bv7Z>U=n_z zosvL`ikPJ=r4Hp7DkLrp7ZWRjgI>50o$h=;kmZAa_~jt-cgxPv)D>rNZoU%m1KX-Q zc)xPCP&C^KBSdT|^V&9je&D!lze3#uYs|HD%lM4aM=!plXf{DEpXYM9hrBvUWtCR2 z;I$kAN>Lxxe6@A6v2n96=itmtQ<^Rt)yn2W-S`8!^{c^+v6tP=)~8y(pBxvVo4L9o zGjax=iVnRVcCo6uTxjV~VkR&V{o`bS;7S7S7uLFu%^h{TS7RQr7jV+2%mH~5zVr0& zZv9W{+688{X(H*p??zQ!9(*z}zNvZ)06=eh_sg;mJ2nH?a`3U~3K-gya*9;px><<5 zGoR*Z1CHy5kFfD>1Ujk(Ombd&6vN9(HH1XzQC%Mcpq?N-#wM(&;jLHTn73$MwxdsX zSIxu9w!Tx`!e5_lYZTqqSkXK#z?7vBt->25O9cTc%jM#{^FWY#fB0tFlKuuRV!0}& zeM1;juU@DI|3Z)|Y+43tVlb=XAP7H%rknVga1E@5!fAj1e<6hV2a?F#%@NhA&*iiy z6Df9?S=wTFgNkWxQbq7b@4RN&3{-8%Hq%m`i3B>;Wx2I6_(JxwJ*BqlRzPuI77^)x z4Pr)HclCbu`8=d2jX$kf;8kCEr}ovGcxxdqVn=n*i<>`htQ`ncD?BP>Z5>#|WZ27- zF}p|Uo*#!CE8~mCMB|B&jqeH{a+S!+b1(X~waUf~*n^zJWviR|V|LEpUM*pAGDD74 zT)P_aFokGFHZJ`9tm@~GxWfJ@#V zL7}?`9IQJ*K-&bnRu|pShsd{pbb#Wpv z0jG(@fwkx{)RbBo=xnJ=fxj7AVZ$h=7^aNU-uDDZ3@A-H(u?9jZwbntG)Q4{)V-akyp5lXF-;N=NYvWI(%wN3uMtqo~-&N|7(ewx;d1vnhYi%eS8#O-$1j zcDUs8dTqZ6eTBU5nAjR4{jI??u+Z1h<@vhxy_V~D_%ktqDtRkZZ5_M3yX)WQijwT^ z<+xw-EQwfMwy^%Dzv8BK`2I`p6y*l1m*ZWi6M~0^$DMzSbU4vvQ-;lkZp`psPZC$O z_G)e2&#a?mzXHe7El-FwzQ&pcentmX9eH|F2Q8VdGH;@-qp^|$RH|`JiG!Wd;b=($ zsp6a(@Sy3;`7ML7VWs0RpmrP&8-|Y=ri&}c6igk!g4BQcY9%9H z(_>Oz0TNsS|C7%~dmHLRzUoe_%`UouY{5sZ8#?U1K_2*=8c22gKBEOx?#hZZ!XC2H zQ^m<-TKt!YO;-wp6Vi`fYSbO-fl<;Nmq78d5Q||5%qeU*+j3q9`o@f{oXNndId=a4 zku4>I(4MjlGi1hGSV1#o_rE953g7&JB&3;Q+1t-Wsrf=+!@4+-&e<}w|JH20{{0S} z>EfgQDqqvT&$?-01V##`?Y_}rfBh&ht8GE7@(St*<9~S=K*RgRio4wVnGqbqQ%4HH zewjYLA`X&$6K52dvYISZbx0hj^KCho|F#Pmht!pMy6rY2{n+UFX&Bw4(v;b@mXJO^ z^4D|af$GQw%Ei)B7e{Zcu!!qpV~vly8-KRtM2&rZZK`HPPo#q?yzR*)9vVh^%uW+Yb5C3gvzy)5!R1SuO?FW zLajf7Uw@w$1@@nC{8qQ0dWDaH<6lUSWH6Yjtz`Et+cSkM10X@MdhRPhqY=CO3D}>A zk%qz>zZ++`!MxNr)OOlEuSqabdoJSGT$ongDAi%K?DK3L|5gzyf;0Ve^9!g!8A+Cj z@xFd#+6GE^i|Adbk0&kzNLfE9LUnVBQ2!4mNRxBPL9J%zfg%>->VZ}7**K1P%7j6g{8gB>a9p%J8xHp)DS;susBn0pP z=%jh#Z=FuSw$|IIRvV_Veh|p6pl0zPxV)WNFvVrGD_m^Zm47lkdt}nOCg_0tq25~^ z2SS<)nHYriF=8;f_0=(O7?8lwvHB<9bxH_fHo}M2=B~rrVlXeXflJ!9W~feCU+g8J z_aZMsWq=#!kBTDhduJ{$@nUxW%7~&(mF%Y&LfEl|ix)>*giZvznRxwyh%&XXvqZrm zrnpb^WE!;Ntw`+vxE@4z@Y{aPj4tGt&tLHmRDM5CHne>*4Ks}SI9!|aET%nX+n`4M zro`PtH9HS~lB&>#*y}o5+wLgk zb^lBl?UoAlw{-Eqp48o@?ydzs=0|GR-F~t?B}?){XJYUaldnh^+1WLY%KtG&fvT0< zaPTQnd@X>_GJvO4^x+}CYxTFCPji2`6W#k-?y2@nAQFqmE+E-5pfI{Z&o6PE-;8RK zV?^WH;e7Lx^S6Y(Wt>CT9i!5BJH0y7kooNGj?YIFvNx4UF*%g}iP8?-aN=Y&?eGtM z8D9@UT5>^>zxqJqyR=jF+xo3}M&peS+VlVZrfBKs#kvHtp2y?O=b~+%viscxn8s9w zJ*z;`ahbh0ZU4}s>X+UQ#p~D2o+A%EFmow6oqH*#3j!I}G&wfo+E!Sw!nTTmwkUL< z6H(BH9Smc7A}O@U{RJ|zZCyrnDC%T@;&aPJc$-33Lz>@S_gis`!`rzYEbv(#5g(y( z)74rqHKL2gRMS?>{;7O>3NEg?66BEQ#g>mNRYo;B{&?lD0XdLm&=>GgF;`oRH{)F% zGS@)j8#7V&sPC%b!Z-E-y4`7NVFG2{w&6z2vdEcmLk#aj*_~M}Hl>Hp*e1U=Yf#xy z@0qtqD3`X1F{7hMIjz!mWc0##i2I~@jGSYdR@*b_&`ZBADf8Tu^XQVlwrkwuYVVTR z-tApWuk=n)>=X4-3@LgA?LD=mG*YxZHgB*}s7jhOVCR*St!0b%y1s4aKG~#jFIZ01 zcKF+jLHYU8wd?H5GnE?b8n`EqsW@32U*kyOlf@I|R@qZ7RO?HTjwxD=KdWhWjTWoD11d4Ot2 z5q2&}L#OO~DEn2SiB!#1TxD(@H5jww6%T6CvxxBr^2K8K{q+3MzOPJ2ZHjln>cDCg zn5y=f_@2mProAoOo(auoW>Cl5Hz62mk7$@rp4FXi-OqDsbuU!5 zDak*-d(^_W@py}aOftq71ofolVILAIRhcc}NO$#lYz<9$<9*XhOXc)4G2GijE5|R+ ze+kLbzY*B{q(=li3 zH(EOX>30cev9m zmzhO!47H8)+IVVF;6JqXX7hUd==3b{0d{(vh~w3_Q*R~Jpci*czyy8X7)&SmDb!Ja zy`L23DUGV~HWsj%J)u|n z?n?04pXNnF=#O{2F-wul{mpitzJni|e5GBoYPrd1NWP_1;W-;84Z;+H#m)uY%^|Yp zhY&)F+kWOjO7hhP`q%_P2q`mizWN@+ov}0I(zOpKP+p@d85%8TE+h$H9_7H1(|UG& z%<%FkB?{)_AIP40Es2l^a0x|qpEGZV>}>|ryEShdRxa&|_Wjv5_%qi&M%vhURivqI z3SO~+sd0+WGRLo!WOi;}aeLS#$$HqdZw^z9MaKvdHD409^{X4G+1K@2zd_M13eH0hm6X6|X1$-p#_Cds`Xr`X;M;DYp_0EBUzvG&2}7dDa&iv9KCG0U%uaKI`#6oH%hB)?B!ebsJS7- z6^dvZY*!S5GK{Qi5xvW>%!<4h@}OPrbf=G+@jnZh7n&~Zxf_&_nyv$SL+>$6XAK3Y zz4R+2)J~rm1FB17p0{XdgrbV-kKxbl?vcr_2qJwZ zv}oDM-hQt_teotfvQj_@QZqv_ zT;R?ehJh-fEi_Exmi?gUYje@Z9*Y8uUhR>`OqXD7i7*!_?CKYb2zlZ&m*v_Ix;~hF#-LIl&4o# zl*j#E{>*jwLfrxO1^O7Zs7+!$jm^*Qg$uGZZ%9ZY$)t}rhEqTE#LC4B8ZQ+c8Z_@X zwb6U>lEr5W=J-F5r|9yz@oh{=GJLrHH z;V<)_A2;^RdZ8AVk~Aq_gQbU{rtF@f2dr~DTco>#!ero-;<`5efIBP1hg50WK-1J=n=mtF1Mn&;Vv|%nJo|zm%@yvf3KC%>1$F4)6&E4YT))-lIF)OjxevhK;EfN zs`k0J{etGE)dL}dqK#^7ai;#X-%Da4!JXs=op-CzY9%BM7Zz?}lKf-D(7QVi76$gu zg{FCDd^)e~_Q7HiA}iw{+-*PT6l!J=KSvlSTr5<+fFge zTAz0Nkt^o!4w$y_kBgo1^b;;#F{OpQ%{y~PDHi7WTkLY70feF4O58yiUxZT8WX%d{ zzAf(VaV$B-l^EQG|q87-E=s54Wv&U7f!O(A7)+*FB)uE8NETEpvZ-Uo<qn>iE|5oY#s9cVJ@D7_)Q$P`S^?hC`}U*%10*dH^)?t90? zRR#EbHT-8H>I5}TpS#En!;EG5pkZvP3_Vy_CLBoHL zABLA!EPDlUkN3r!%6H{gBrV>*+9hWe`1`0%(48Jtq~XExm>-5PN`rYh^EBOyjNzGy zxfVpI@Xk98zf!v*cBp#UFIewcR-WNEqwzx4ANOlmDEQJZ|~S$3D`K4D?uq;Bo8vx=3=lfAbK z!|P0T9&5S)xoiZP*o%}=@Hs#;^qec3SQ(nPnthjvu8eVuI!afj_7KI*(YO_hFo#=qZ6*tx;4HQd`Y zu+6@vf%wl5dsj81pS{=5wDo6||3@?Sm_|vkJHDuP(gw1{T#xR*n2|k|&gXBuecsl8R@wI8K93)3i66&P z429^e6Y~fuk%GNrqax-FL6cF-j!5eY?oGLg9+45_fUXOw>C zb<2wWetI8rOocm$AU8dWO4p^k4d0dC+qaSJ&cHaY9FbA>|~lKoUT%b#UEiy-+(~@EFvIryn6YzMw-R+WLm|I z;Fhwc(qQ$R%852(C8kXp!5x_DwRsPLA7s1)6E-5<*0yB)Rjm7_o|gN%vFZ<)~NGF z4O9O_v<0}03-!`*zDY2_viJUl+Jn>zyDUG6gU;Bi(8kisIX>u zizDDsT;1XLv}N^3#9k&m;e4;#`S%>)MhA&&>!y{bvd(W7Bt){u6&jkuuxFGqA|YxC zM*LZ(u(NrbD%A;kNJT2@%TF?vWf5Yr{x&Ep0#*18bpC|egXBR3ic`=-ZR7zIi@tsk z!-jbh9>zZMpB#bD+C1KZ0*7XCvI;tQW^$7d0K;5??HOt+kb(poaY@%t(Xf0}IU6gg z@}+p2CavCE?N|B_jJm~Z|7!eU++%X=b3*{>aZpVe(}`4;MRWJB%?VANJe+-GtnhI1 z2%ofsDJkWyK+I($LQ(dGRPV^1A=AT}f~}_}D>yYTME~|ZPIOpzU7cRmF!}tzl3!H; zb8ux$|EyZit~KWTjEfT~CoUwP!0+ldtCFiPtTn72B&@&Z2kyZj`C5?bQybxZyPC_S zAqVWOejy1@DITR51l$TFx`$neDhj_SuBDjGGK`auSz_(6Wd zHNyb9z<==j&mCtk~ z-a0b-;GfT$SzNY!b9~xB42Xq$6z=xgJo&;3(#BPgX-1v3+CS{%XRWnTh5H0AnsWVn~jEB?}?)!zu&a==c z&qlYl^+JV#D9xpiV=d*b4oZu|USV3zcNwP#`H$3w$s6mSeLx_1-|ciZHOF|O+ma11 z^Eku?>j^G35L{852X0Sk$_GV+>bz&`L5#2cwf zY3eb^2KL>092e64eBZT!kmcRsUgh2$A}2l)uNnvNkVPV2Yn||tE{IF5<%I|n(krcO z9-Pl^%!{_0@wro~WusY$nL0zg#y#;3xUENpsi@yYe$KPouL(VdORS8uYVf0X1UZ$xR-Zfa*tzvT}DDl)3*)9ZrCC{TlRBX^}GI>q_j_4YAr8K zsah~f!UA%?7kF|%fdp9(HObKR+SrPe<+QbKZI^jhVbP+q1)cvS@jTsqcUq-_Z~RN@ z4%TF??klLqSI-W8e_$BR)9agC$|p)K+$ke4aF>_44rZLHHVS!%)~PajJH3cBRIs62 z!1plP^{nA#>-!(AF{f~IN0vx z17NJzQ-s0frPfk{x?|d6J<-vrY)?D#O0M812sH2Nmq0^bdt3nZk%A%kt||B~r;r8I z@oS*Ic)2ILeip~fx9KwV!7rcTScMusDukiQl%cX}6L_%CYB36T7TKM}qh9uEvpM^- zTTX85>GG1Znpzb5-8nKUuwq|L_%T1Rga){5q@ERJDs&ieTf;%4QBy82QlC*D8kb>>{OgY~ zqhIt85TvCQwW*vc)6;A^sdTlE*s(O<{a_<|@MZU|@axQ)VVCp^#s-~a8MjEr@hE5m zJ>)4<(G4o;$J?sb6;iCI6=pN`ouH0Y>)i1@XK~l1v1HaS=vhAq)R&O@CUc_O4EC>+ zI=(Evr=GpL8-W3Y08eCjw9cGkqI)rbDeb;+59Rm>RO^^0DS{r^^EK*J&^}_x;rb6* zbF|5ww#c7_6j0BJhos342+p9_Um# zMfGXhPgJ6UfnYL+suCAE%V+EAbVEq71sefMFEewTSE2RNZ@MF8mc`=kPjsI}jYMmq znFY_s$lJ_-y0QynIKJWm{JKxD39(GHx^V$GE#*YG$8tPTgtGrVrOSjeaLR~!0=4My zGtaBE$%{w4^nK>AhyTR#=R*5^Zzjg$;ez_yl@3UBYs9ro@4Snt>U_sN8a}+wyww}% z-;k|lLBd6a@m_X@=7f&!T0KcrBQTRAz*S96d2rhAj83vj0aDp+c8IX0zMn4sNSrO7^l3{FvK6Nbwq?4UcZh zHJgeF?yuu5ctdQHgB`qZb@%pvZkjRZxHKVC;J7h}jYW>#_^HEF&EZs&xl`pj}bnAHCKEUb!=OW{qyc`7G|qTc3!>d!Tt(2YUeHi-h2k@*j!IY;+3J?ILTUg$sN z`&sUtheuB5NS)UO#S=kp(a7M|Vbj|UdL>9Vqgcn;JdKx{p>Mc;<9B@E>*am>BF=U{ zHFFwRi6zHMIArM8r9nWj-|#es7Idy6HSQk0|byhZI0-q1?HJaJ@`VtmBN=uANx%Cci~fM&s5E> zdZt})54*tiD61{&1lO9Ck7dS~OG5NZD-tBl*5&!5wt8gUqv_XU1%Y8P3~Zaz$%D+xK+huqsBUrrb87=#Tw?tdeCKg8G7tfB3bZ$NV9P=?TjIC79DXdIWQo*wD3X zis{mzxs4KpsT@E}@Z|PkE86=K7vr*->|-TP<3HxKvcpPRwD+$#x2krjdzSC5lFACs zGRHWua$|OSvv+)Ll16>h{ScZ)@>HFLUoMQ)%iozVUmX8SeE;<@xty!h;Xl_UXtqRE z299#qJIa&oG>c1QHrZ|zs`NnP*OZxjoV}-HpdP>M_RD@5*ifNtF}rJ=`_?#@4^FU{ zGC8NKoFXSYhx0Ft1S79Je%`Xb!0j|>b63Z)O!imF4g;om zIf#r3{|{SV9uMXB{yml~k*TB*rlKfGvacf*sTi#)F$xLUx4|$;_#}*_C}fl^p(#st zV;>Sz*6hqEduA+SFf%=8^!fgN-{*Nfe_HPQKKFgDbDeXp^Io?H)<&Q9XY5dgPbW$fO!dBOr{U zVH3fKrj(ZOT%jQRUTdKavq96tR!^HQ3V0Ri9)l`D216|?B-(U0s~yvTl4#y;I7qZC zcc+VtZY(2HT^3j+Z7mNk!|XqU^$@NkD0fY@u3CCimQ5PnM)SqRRI5&4i^paXI@F&a z{C(A5N0;%IU|pP!7xEr&!j*@)h1|Iw>`&KY;m{^zmoYKt^!<*B*Q+IC7NTrM&gIzH zOlsmh?_P81HXHFuddW2;C5A`~Z`|te$eJOi{A_iMJ;(SXAw-Uq2XUDQ; zE(Vw^8bzfHfRKQ@Y^tN7-6cWzl=*`Lv7U$VT^Q_pD(Q#r(zU4#CM_19su#XJpOik83>vy6_iD1D+}~1j_pn-ekXgl^r+fha7N(`%Uya0;-CJh@2Wgccrmz0#J^Y@%M zp~mn|2{id#POtgYWA+2`n8qqfGdh<4zgFTeTrN?G zQF^BDe`u8EXgtmOap#=hjNZp~Prbf*xp1SQwqwVRM}26jHUGxFbpB=M3#}1Trvdyo zH}xZ{h~JKtlcDwC+#eLNaRc5I8FMJusEZv+n-3blPGh*GB}{2lcPud0D9dTOL@^X$YW~V4VmY&hSC?TGH^_RONvxci{4c zWD}L809uBF4It3MVtX4IMeVmLfjg=}YN1KC)vejC_F%Uu{wrlVuJbO=VO7UTV)Zg{ zunP-y;_~nCDzruB!Nz8!2z)|*nNUI~<9MHef`AF|UR~B3_AH)9I=5i3?r01C+~qW7 znGym#j~RX-Bw&WPXMw_u2V!D9tQOiqS~8s!zOR9Ox;r9Uo<5cu1CZwcWmdJs#Q(=8 z+pohHICaI_1W;1-WhhMq=x=p1r_y z9%c|#t!YD|GZi^d5=vdRlmOHWX^$XGGXSU7M2{~g&HyD8O&FsrrCeSdfF~;8bhTLk z2-kxX7fflqAfH8P){kv?(!_$OGbOgTIBfV)&wNI!J?7go8Hd~y1hN2Qkri@tGQeIGYjAV{pi%&qc+4##@n9g; z5YG+z-i;A>3Mh9tbh9II9QO5Vux#n4Qqz4g5+O3$|2}H8VyKOXxqGor9^Tl7=W!n> zoA@o_ifvsQ7 z!2vS+m{y;jL+l?2?ap6q0YIT-wWy}AWW19f!m(>mf*f+Xf1jSc$}nR~ogf=J}Hub0vvsb|01PVO^m}d@RS24EQG;P*k&T&8s78U2->_7 zkA3b4zTTE9NGp7heM;s(;s`!4jvz%nXkHS(_g}pU&IkM@CBn#-izhBovQfZQ-LbJ2 z3L1DC6yct5bS#SB5Ww?a8{;V_^!0qn98Cp4;6q8Q=sTHf%a*HeX$U|Cw}d2OgJ4pB zzf~k4zz1{!vONHTZHXUTo8}p_3U8BIK7aEA!u(hDJ`S&bO1n?b4uG@!qk=HQSK>L! z2k7{MEz46NZUF~fF0-Dl>C?ig}caN861*a7)BNn!-LK^Y6a_BBFURTcT=${E73CK(PO( z!=^4rImewehlq{mgK*OQ0|DiZrxdm$ZjU3#xm(IL`qSzd30q9hG8upgDmeTy&&2L; zt4ybgwgucJZ<}qPRRh!&%vWT~*=1l^!wFwUbB=aLAHO6qI)KO|{imlVIs-)q|dw%uE+pMGKEmsk@DVEDSJ-B1OTGj8}jn8m$b=?R#+{krPz_>3b(J zpF#YLAy?2$5KPG{TN(|+Mt{{YwuUJPEB#Z5(_>?Ok5#7i%}@t9J;2ohf&nud@OP-g zi~SqxIZXybWeF%GV3{DT*NXnwiMetVYSd7@lN0ptXO^^oV03*{E)Y=g;Y5cIM`mFw zHZ%WT!OYk-R14)94GZlrTW2Y535l$LH*#UL17M;xDmRxc42a4=+ab1+82u?0M}3XT z%xeMYlb=iG#q4L&z*tj1}E!*1G~wirdy%4;saD_OgO&;!}&ENh&u66XT!hw zz+~dgdOx@wP_@vN3k4Z;6Qmc8YRN626+?%B*#Ivvm5wVcyHF(t1lI`^n-? zAgd*<4+GL2(16R}$(Z3SA;Dp5WV%4xsf>nren<_CnZA==+gXyeZ1NtqnTh*q3WyGW7L2KkW@>ZVsdm#Rw<}gi3H)^^?OSv`sM7nrTMC z=7-AIVxylDw83;$1D^)2-aMm?pL7vN%5h>y7mdobFxQeAF?Fk;ni~MqCuKkLnPOHG z@0R>M1&}0Xw8aKj;3~XacO0J`UrmkiuP>8yD@m~~O)os&-}l?_UulVJs}8To<-{yr zdWOiiy%P1LZ&!Y@5I-*%YXKFJMN_PMH#p_r5(51|E9J;bAc4F@PSi$r?`VbI4a;MK z)*2H&F-B*6*iql)J`d>s0?M8j(&*%JjyzO@9jlBZFM&=38%`%vR@pBKhK~7hf=N(> z6X|0`Q?(teVrze?$0ig*p-#V5BC>_!HUm6K3-18BdJ(N3u_@`RvsQGPUn*#;ZjWiM z>RksugEl%d3*;E{M^KT;kRIetL~a-w24+naP7Pkh*idXdL|TjWuJ*t(k%Rdov4zp+ z+VH_Gg0NgnAq1mO;Hp;KRB}!i?NbgpiiXf4u}mZOfxf27+W3rFO}bY`ZF2kd^ug=@ zJins|;kaT=n-|l9OjwB_GT_9SKU`0MEzZ~xAUiatVm&xgUx4y5GqOw&{Po!{zB>7J z>x-N2M#f#uFHH2Q&nDy-MySe-mbH9TBc?`=PeiY;lCfZ^P=~#>YtIRr;35H6BL{y9 z5BzFh72zq2i>)8>eFO7Y4@z5FZmh&Sc6Cg(pt(jye!#aocb}G(Ht0H%BK}J0Id6iW z6OUTjp%2YoTtsKWXFS$NIdsFaxx>F9Nz!4g3BQvFW)}0>3Ktc+P(sU?MelE^BBKa( z)sv?mg&Vlzj-~HomHR3S&ep0@Nkl$sCq8HbD}*4o2bkSEJJ?wF3fXSo@@R47 zNLi8%O!cQww5!}orR97w1^dru1N|~aUKZox1U;FIg_>pDbduTq>G4(G;*asALeKg? zm%W;_ye`DOYj1s0`w~PK7J=CZe<-vNRJSqJMB;j&G>(BOJbZ9v6p=SAq<199zvoh8 z+Y5og*zdgCo9GZt?R^WO6m5)cJ8ZCZIuO?8J6v zcV0*f;Iux$6$5;(Ut%H37|Qz~Fs|ZLPj=GgrqM=+x+q4hrs*D!lyJ0HXU?S;*Lac= z9_gY&RMA$9yPF91cLPO6c(9%SIF#qXZND7)&S4w9_#tt(^wwL=2bXsL@_OSI%bhe- z5_d{hNO{nb{(^b|t;-0fH8zAu6$8Lfj0vJUN%{zRq108x`p*3h{H5_TI**Oae)1|1 zV}8^F&slbRh#GCJ8532e9Grs>tyN~&bQ=@mN0AF=25X2|htEj@XYWhcDIY6#i${oy zFSZk@^7OZiRQ^U}BthzTwYtMNn4TolU%wwgI_=Z+*W6gy&>?@fk$4lUB^&>74W_%h zaS~#|??Tn3EE4h2W8zHL2?kp6?F`UgfiZBms4O5w_x0DtOhKrXPw6A>$A&GMuSY)b zd3Zor@waNbD+L~d{<*1xK1h>qILt`VGxc@y({rM{c4_Lv*ayJci+iGNu9*EYe)-Bz zdct3!wNYlwI6_(rHqrHnJ5a-rhq;$6`g@XMVDw3{=8bb4srWjR4$ci1JnA+oZt zsX}Q{ux;NCR6zzu-kV1zuH;C)SlhV5Z1PzceY7)z)D=AynkKVNK|)rGSQes*W=MvR8(3>4}^`ANkO23{3&r8a$nJ9P9s=TZ(Q~IVWS+k8ebKP$| zpTfqleQ~e}{otA4Hc%^BI;s*EFc}C|Wss8rkb7ZOy!1VVB>B+MxM*&{x{PhoC$;n+ z@Mz1F)Kt=5FSE#Ha{Rk#vD8# z3O0th!P-2_8GMeh0oL#clrUJ%t zhMMc(rVItsPIyOOer(0_0p9Zb#U#v}(3dh6x0bPuck+=|wmjid;HE|H2Qy8-Q-Qv$ z2V5b)y>z_J@k-g^a`)tlkKl3wuQ)3AJ>p?4B9;e3!7Q-RHT8>l`Fw@2=jfw7%dd0Q z(qj4LOU3m+wbtz+C`j;+!CB4_3&e4qz!a+T;72<;IQ)*yt*<>ufvt+9*UTpl;g>k) z;^)ESeTUJ2nKh$9k(>MoLz#_$Rb;mDqmp zFwXuLHC9%u*j>C>_d&VYGNt1KVf=?$hlIM7OU-RCLyJ3<{5R^}AQ5!qZ?VdapsQ<8 z%$GMi%E~*GUk01Zf$6r7a~2-^%pa&ZEjy`P%$v)s2Jd9_wGxYlKIGE z6D^ZI1)QPKh_u`jM{T72qSxZk6` z|7D@l8;Jl1-cGbxMEJ!JOFxW-f1M`uZ)-4ri2CC6P7a z!s;DvLW~049@-tL{D^XQF}N`wts+(->sakX^c&v`!lI>YNRce*MJa#<~oqe`?YXMlB85_Z}3RCo*ZE1}cQc zO#5`xEGID9w1`+OiYEOJglX-i2dy;0cg1lrts%d#^Y5+4R+pf7QtLc?!kMYOBb2g3 zYBRW&^Q0`$o_s3dH&~S3&7{Jo*jtZ3dz7C)Z6))t2^^?wZ#|r!^K2e+ZX=~aXX&x0&|NH;CwhBC>5n2Vh)ejri z%`)J$Fq3mXDuQ`K2M(dIkATS!y!07}=r{KBEa~z)j$Qxt(sLN_(kCHU#>Ev6einQP zY}9?AQ8?jSAN7!#baC(@0{=h=cOs=}kB$o5+VjTZjJ6bUfZZ zrstH7RNjrroyMp6vUnePht8zX@DXP=m75F>QF~twjwM1K z3h($zluDsr+C{pj*xYPEzb>WBatl;Dwp@SyN>+Mch>s}N?N_v2JOj{}exiNztslq; z()cRwVY+-441SXj85<^uzv`ErRa7|r<$_{fR<`}n4kXcF`#1L;5jEr;aPtZtgXmk1^D1JXFAl6&~N1n5#6D4l?KCC?BzF&A5_{JjM;!yY3rMar4n z9zj3|UK67881n-DzX1kzB%BBXvr|-~$uN9|Jq(C3E3hbnP%mcEg9FOX-2_omA(~)t zW_8g-T%RA022yqxtJt-tSJ&62JsJ7b>M4Vh-|Kj4SOfr3^>(p z?myu9*xw6&K|ovo)Hc|_wJO5gBn*j*?*|dw%eWhhMrMMfpAlZm&xTtC!k;na$(9Mq zA})TnGn+JZ28T_w4TC;{t-@&a4`DBaOm7S0r(H3XHTBe0%&w*D62)GnoAc!^x;gAG(zOv&zRQc}SISz0{az!+ptruGU6_5w|t3+3kp zR_F+H&(-!FCA$%N&-OeJQmxu%kzt=;4&kY}R2d<75)P#f(eO*0pr5t!kE?So-L zIoD3ZL6hv@)Rzw^pxl(a)vMB3R1twWW9T+x)&EKxz7#UPVVT4{BB>>fnLOwQrw96T zdUe*oecv>NJ+?rF(>5r}RbkBO;~I1+%*KBWy{*iFZ(!r7$)8nhW}* zoL%#Vme&6ae%Wx{8tn|$d?NAZe?8leA9zb~Ytcet&(E-N?pT&cO2{#$ay^g{RP#&( zEI>}qst|3r1?z-nJv%>rh5sG)%vd}pyRo-32qs#vWpHQ%!tYy%B3c=|FbHpwH+?e% zw>ILX6rUt#Cnc%H@1~u(c`VtVg$|orUeW;~ytfHLEg160?@Kk9?u8A^m=6qenvU|b z6*Sh`Xbj}f3MqSgAHdc;ZAXMn{a;B7+l-75_5fpU14Ckgt?9rtOw3?kEaKTpwErIn zqD&m96wbx*zym{v^kB6Ae(oaU_L#6IFYOd4lfs#J8xzvgY8_|Wm2}5sNA>*pYCslY} z=ckcBHaU1$c!l{JKlkkNx^U5Zq+VNIKhhZ-1eEXqJjC5P67%xtK|ojYSzWY;c>b^q z4e2e16-~A5HiwgAXi)c!NQBM7ne>hGb7`SK#zI3Ft=~(!OHq~r6Tl2|3?iZwBch?Z6T%>5d#3UiWO9rI0mU$jeh|gQnF>t)UkW@xq_UMGQh^*Y5Hpb2`3K?ip3Ap9PH884_nB$q9! zEJcOSgM|$jT1ORN{7#ctf}_xxf02BiVWb282BqURGO}#RXJio+F$^sg0BDJTJvipU zJxPUR@Q#b5Sw^@tfn@o;*WXvtAUS<(iH5QH+13g;+1Doq|3F}Sd@lnBgbidr+dw(L z_Y3?V7W|qAg}rSSy}5aVd6TAD4-2(9$apc{9;8@fcQL6!7lrRL`3!fdi5PfRaK(6X zTU>mk_yJ!D{^XBg^q?^sc~Cr*2{vT%WKZ+)BWWW<7O9e@5e+V0ym zVCAhJ6)3x+d!0df>ujJ3*b9;99c1ghH^HQOsPdcoKlt7A!~0XM?!Q`in4-Qb_^oOw z?(1Q0dq=-*PSfCgtImBF>hV;=ph?VeH}#7O%5ERt47brAs76(gelC^`{biardt;xb=Kj`}3##jO1KT|O#-OG8 zj`W?3J_DH3eZ(%85ZS)mN{xJWvj4X9kHX(0AjS9lXy4m*GaW zGHg(X<6<9U#4$|j(ocTwDGd{C7ppSSuPBdEtr%A&N_M{Il54sN5uD!P-9Pr(WS*Gg z>^Y+oiCtlQurCj~mwjJR`1_Y{MZ;QHdzj8izAKL?2Nn+We`TvM-cSjYMJ&OQB*G@G zeVkomOZ6?RrY1UwPM!a*RA?<9F}c9abz<^a`@O89@D|TI2H1>gFiLotj3JNX;J790IEIW16LRtKIi}a?fw-fy6_5M`bhC2h9BiyXF+j$7=Y0Bwc@hDqKI*sqlJK z+h&|+kB!IG-Cs5%ehf4wwhYs3q^ZaY-K*|{gyz87YY%T zZ|?K|pp8$`gJn4l`ZGoG6Y9;`C7uY76(Ul-?1i4{{edj3+bs+E4vS9mfA}7ocGY*P z_B94_Ewe0_9FoK|V&#PJG8JgYLw3>Dq93lr+@;(yD;7MA%EVMIub(H2>DjhklcENQ zWl}JM&~ZYeJAe)`%&M&&a-?s69lN#n{&VZ0-)EuxNrk^ATdS_0`YK|0Kot4XGG!7c zTzd>1(rqBew1}Ko=&UNZgFO8DSC~ldk8>4a5~MZw=}I_k9D2)Ahj zh$!g2#iv&AW#KN6jsY!oz9}?egio=#0~5lBQjOy{Q5NT%cyFB$fOv&8DhU4t@uc2j(pGT$Y<_e(dHHkk{Ox%MT#99 zTYyctSRoye9XJl#T z*stkB0AuNLZdbu&xARFZubagtwI;Cga$fI?qSlq0rajD<1B*6?Sea-sJX3;Z*@O=p zJ^3v+x$#TXnoi}2kmAwTJ@@DW*WKd8?m+}Z7T>gxV&TrPM`JsD6+8He>8DD%R)h7G zM#85p+G1)O)3g1V*y-hlNT0o`xgE2haqCPAlyU?ode#~m# zFwJRuKX{ukkW*Dw79)GcEur#k(V>Zmmf@iy%-PzVGq-(73o*3J8|7uO7pts=8a&P^ zDx8QPwlcT3AjV0xq(%m3>L0uN&-0Xv8)MAJ*RGfm=G>PJBKz2SXc%>^@fqS-3h~)P zrRu})-tTucY^&66pAr?&IZt!QCPkF+lwGd8 ze89!Hb^rY*87UfVQfjvSlNzt@=0AW#366LPX%y;AeYzM;K1RvS*YcCK5y`ZovgXxs zhm;Xg^2c&})BW5>4i^cE^)i0x!>Od$dzCV=){Kb6i9K93J2TN&#aH8!Ick9O zR{wZE{KVzm#^>T5;h)5tPaY0vK^<^HpQhBw9M*@EZv|qOc&=mlA4qD3S1V=%kYL|s zrw!lmf|K>U3cF;_SwObKkyo#W90Zjf0qD~TDW~MuK_Nfqy5EF67$)bt=)w?ounU*C z+T^7-FQ;*Pc=jbq#m}tZ|1o%eXGfiH3)qB8(y*}IdYFmMF`8m;F>YhwuFY4XME#^l zDaF75Mnj^!^)W@ONEXb_Ch02EcN#$Xz3d22*vJ$E2UcF~an#rZ|05q)mx+1Fhl72^ z_?Z+n%wT+n(tDwGN&Wec7e$%5{1L0;{YeZW1-@{Q$}OZUiT3YCD-|*X7nK&&!Rk>& zXvn2d$t~@i^w+qN>h|M@jIY~%*q^=)PG|L&u!JVyPkcLW-*vs4i|TJ&)7+i*B38li z#JthuxPm-D*L2gv89%`Sfu6@eV;l*OEU%)7*J#;4UzGkFDa@L4%)VLgDgAX#Q&ZG9 z{=@E5yNrLyi%Seb^KWNoEBQ`)Tn+E)HqQlH=3N9Qcql>8_g%Fnjla3C(m3z95vEuy z!O1Q>mba)t;g=Qzu9s>#T0KX$T)hMaF>n)U>R1`8+X`O#fPQ-hu? zy^VJey^wyB>p6Z@&+-dlH(TLzGHr^Cd-7ZFssj}_|GCm3OVH)*j*Cdz9e3xmF2c#& zXMK$>smNW{<+H2ewWq?i>p4sm@=WB;W65S-Y+W~2=sR>oZdF_E2bEEQR?Q*U`_t9;igf$>|(8L z=bBn$_V7;Hp8w)iL4y1uB5S`t_($=ph|})bnHiG-tHei?fDoUwf{VK`mBR? zOMqung~2uXbLZ|%SIYxFek$wDabM^^Fz(0*wEFnjiu57lRO`pTXO}Ja7U?N_GziLC zp`?4B?7y`n5@d-7S{F4xu?>Y6iLjCzNw^*E)tUhsxV-&u=Nxn!>JB$QD<4r4{Vii* z@zu&J=7(^|9abvgfqMj@7~ZhT!qviO^T!E$nEDAzBT2KPclT!AUpv*lZ6x*3$ro~# zbv(c1L}Oi2fsO2bGCvlrPVJ@;niqFS@0%Enr13A%hFD3y5j(=V@PUIadIP;OAf@8I|pI>3e7$5pN1+6;p3!?w)`3*LWP1jaDI&jfxhejr#}M^p{ynF#H+U){{m4MLe z4Zde{TXg>7}5gm=FZRD&w5+Cok3|% z`t*RXV`Yct&8`KvCsq&DUK>yBH0oN|X19Frs-y~0TP|AE)Dq{*(unfOWChB^)2!(E z)DhhL2pJa(=kYbuZR&ckd(bMp-1M`_@%}igHtDAQZ_+gckJ=V~T+x!ncgv{txuLbk zM}nF(j$u8se^}pXD&n?N=Djp4&YhKaz>!zVANU~0=%lBD{X0LqXKWvuyVgCl^v#5j zkgJiLqWgQ~51YCZxliIW|9K|xELMC;5Z{lB0{fY82l5isOa|0l-HU6ik9{y3E1PXi z+g-N$g8?=>jxhu`MAVo_wJuH8xSZkb;7HchT09sLq-5JBY1(QzEg;TVB-6HE(_Jm`eEZLQ{O{W92T}KkDeH}NOIBP6 z4=Qt9vk@=l5~#FN*};D0<`lUF&%H z+To8_vJCtql*CTIb^Q;-e~H$J9~)}p!HdxdPYn*B)md-AS+D!!%<;mXgr^hKtia~q zWx@BOzx|j=PQp0G4*DH~XzrGNE4zH6`-K?oQivc8z2P3c(#YEZpMU1I^q}h!V$ng8 z;{5DXd=KXS?c;XkI}&v6XFp9?X5-oSu8A(ey?o&NQC3YUeKiv`d@N;km~e;K=gi%F zphTGRpy^$7)3)Q6Ll2s^1k{r&l_0JRT#gLUq*rB#ZF5}lC-z6Tc||3|)SISzJ^W2i zi2R%+HtN3R=8PD#>GlyXl-0FJh=;CYVK5>C#;BCMs*3pv1FO!iXWRR-QZtWi!dpAH z_wHUcsZ2O8zBmI6W^cSe0+{JuD1wji1jpQmu%V0aAx=jD-KkOxWnYL2_i2FTWrRt9 zN(Xv%DY80fG?4j&!xa4p95x_n!1O~K;GJ@>&Ds2cbPliSZ}dN6cT}Tw`%a<&c8y4c zZ#Sv&wx+vD7f8YqvpX%oa8w1>J z_^$QsVi*|)j1?F%W(OQnwj{u7e#g6z5s-6v)A;vyMqLum*?DEW=yuz8zDLd_SO3Ku zQ`aR-l&x-mrpk7WDKK{z-idr_)dC-~;i#wbrLwlLsaVxb-T8e%dIg}6GZfOEhT&uP z_9sM!_K2)N7L%RS$Kl0yK{d%gpDx>CpbcO50cEDD3exGh1skqS4fGh)*LM-@RycmP z;3Sa?t&3$iBmOb8rPSAoz=8N5s3dtOwFJ{_1Sddgzwt8KNESfa(bAx>58JhD!#FLA zH|H;f@TR%xSw^7#!8CvCimy%>+)=-)+N+(sm87nWY!Pwe(} zIcy(0VR4~Q>g5q_J304D(XrQ`v-%_stU0Vw%(H(MYkT^nO*}}u+^K!!k)YP>F(kvV zIv^Uhd*%@kSUu#v48|1un3*chn)5T>f363PqhcllaM$yhgyi)XBrh!r91&>4 zMG_weHNIIQ^MH-M6sEy6$B|vH3J`l20cMPgMkO+@rP#_0o*iT2O#0tp;NYKI4e~A5 zG0Mw|i=AFUS1YBK=lW-^f(J6aYUR42tOjuLrRKX&lk_FIJNCOi1m8~!3sy!!yw=m|e@1U@0xeHvhbcE2hI9&nRl^(SuiKl|n~o|r3}xQ8Xck|) z62L!m)Igu6EOEWtCw?CK8tu8hYrVgSswK`N z)qivXE;xv~(dA=0N~hBiI=_D15j-4RWx~UC`P$u`!d$ba^6%v+`1LhB5A3%3u?iDc z_%^%VhHj6?&!hsU;Nc#~J)!Oq1}Dz)g3}W~CV_yWx=Bdy$j64Xohse>Kxu3b&6Iaq z=IFJ2S>gPn`gYzOxfN~2B|0{i{#(j0Swb$fnjt*qr?Cb((S zlZ?bmJRtG`7Bg6$c|b;2mcJ?GCWRfU3eF4?SAqj0m)THe*lZ~#dFc=2RKfRidamxr z6tlvF3pDxodt~0GZmZrKU$B=}pkfTUW+-;ecU(lpa%%`pq7pALeO_&XPm0~RJPwzR ztv%eW1=zcn{(nvMIPH#f51ceO_yWnrc#i4On1|kZc@w@YnHuPiZBO#do&W#m%B*zdbU! z&t2`*dnIX*eT-;uEHzE)@>HFS!6Q#ol0`rsSkiFOrpFnH<_CjbA9=`@Lt>W)u^@*9 z4K-DQbX?B)Fko`}Prg~I{Tm6#^sAwV%xRfs;;ylV$1_&z^`h6Zb{g)AGk&BpYWzKB zpp&wgQp4E=V}n3<5?PP$voPIYxB3JOHyYVYb%=dfGz2%Z)tmSV@HK*O;Fs9QUzb%3 zi`DDSiJn?e4y}4HDZJVe6xdAx3b|4WyRHczqeAH1g_s5&N;i=ySLp^4ng)EO_Bj~D zKiWax)^WNLoVaf#$iLwR00|)6?)%XF{FU~U?UciFjqKNnP)X3qlf^a^oHQ}M1vyst zn?Eg#b*vr5PaJ5Y9RVro@vBPAc<=%0KJ2wgM)D;HrsoLA-JHbvI+*movG=01u&_YU z)nU6;6V6xG;=REmTM=hlFO9IS92suf+EwtPQjYAys+gr*-;S7qX;z;IW5!c4dM>wBXhW0 zM7yp_o|EbJfnm#Q*OYP>BsJMR>Q4|KD5xze)7qr2U%I<+|CE&hB7F2};E6 zc6zziwUuuW8 z?MG8NUoGIi#O}!AfQ4Aq4wMho;LhC~9+8xAB54J$%;5(8fl!~LwcJGm#s=X-%Rr+i zAYg#?h82_NzFhTibKl^nbj>Z3mrN~w0J`&(QLOD#@?GR>%z2Ra-pae)h-5dQ6OH$(S@xbd zvD!xdRB<^zXZS??yYv0#L5hY6L-uPgMOLTFA&&lo(odt%!K)La2PbE3fli2HNF^sng!9cnZuD zWHQYtFj0K54!BNS$j>B^!LHn;0k<6;L>>dW8dfp;ENZ^5v@51w4sb1LeP_0GzR^XI z>*H<#ik{^+I6pv-DpPQw3)PtMTla$v0`8Y3?0+XYI6F8y@WqUm20tqI@T7@sx!lh4 z{71K*yCS2869=p?LqaX|Z$6F8AR|5u?;9F{PzNOjL z7ei@T%eL5ru@#Ve*q9|@e6Wx@B#LzD=EFIz1b|YGsXaYoD0WJAXn$eqdXczsr4#S}-|wjE++{<|+f^ zn$s830)fveg5jhbSq`Tu(2~2^JCPhti`jD=72LX28{8VZxvTmr=&3JjzcFD^mnPlx zfrTzik#9jVj2O9iw$M@dL>60GgfE^V1BNeX9$-{h+H4Z_Y%$^D#2w#4q`v)q zaeb1c35VcyZ>zm#u#)7)QlQ3R5{;oo(9RRtSLVh9^mPLRuVN+I#OFZscs4;-k(iIS zUQ!uwAZ}g2fKTy?NJnt!6c(ePgMT^%jdk?rY}0+yHmw7kMKu==Qy0N{&mPr|Me=YM zCNG|6(&HI{ra*|XeTegg!D^_eW|>jLU!F4sSC})D5!#BA<=S|PEN&wneU$;y$f+>S z!&}VbDLV~G27DZL%dxpJgxs+|kc~W39y!oFK2}z(OssJ{MGEsHh7(AfIG3l+bK-ob z7U|#iSqIVd(nN-TCGv8O@pnDa6A0--Liw2uXnV*M`>gvOFd&{+f{uVkqUyWQ_6D`! zSqSD{%UavV9s!iqnG1rO8QURL70?`gk`5v|01%r%OyZbfM-dD1R4NbHO5nsN9XWBi z29_BPK~r8j>CY)`lojBuj2uKvj8njHghz%}1M^}HP8$zXsg9s4ivM;+PO=T-WedMr zg~U4cG{>A33aKaSrp#MbJi0Qpb|7W$`+vSRk^>&dTPbcgZXvP$u;@M-H zMZJ*bqrRdkt=f!&MtWhKmH*M2GT1@&m|`~=$Y$`cB`PKd}e z*m*3VLjH}QX?O{?v5`bKi4L8HlOt##Hy@k*^_sTaAajIDj6~awE9qL&LK4aP-kaO* zcORGTNXt|EX6B137Ee(;FBhI<>ha@CoVZkfxeOn_I7L4k0%)6R$&Ft{LChWCM=;*} z{Yd^yHC$tQ?s-})=M6N0)h|VHOk1Wxy@{zy!!wU7^Uc(%1oK>{R`i_B-PFAEd2e70 zAz88c=^K#=&4p2d%9m5=s!@<->{f!12l0ah-JThZ89wy_oF+s*N}G{6wE?!BJIL`M z=Dw^ApE#+gY}NNrPd9Un5q9%=C;K(T5Vu1i_Vf%W0C7gdGXfH0 zWLs_l`qfr01MpNZY4~aDm_*0IXU}nlnxf*H?*IaTY3gEdV+5TfKUchEo-IyilWvw4 zxzBIvqB5-h8D#xbp#K`zy1D1g@$tN&sMSuw$+>=}0uNZaf{Y~N1uxQbNFSqta3!!B zk3vCWpe_xXL`SIh*NrN`H>2^v#N76=^s#WM@$^W%8rQf9J(1xbjj`vjsA2%2@W%Gj zoFq^W+m9$=J|snxb_}V zrr7LB3I-%E@xiJ3TG`Uj+g1`UE)|n<tTUg9N{#m6q)DRFao7k_yNDO*v+J|!%m9= zOm^ouFec+%fL}*s2C?+#p3$zS7yEu-XizToC0-~k>dj8t80i$rcJmqf(Ol%14~W|> z@G36#!358Wa_PCR)gYA#c+e0WEjNH%hb^pwh|nvTXMdMGk^L)xF#oRw9|)UE+VVV7 zLHDCI%YaNlU)J4jdDv|?{&!%x)wKoPI!w7q#WQ6M5Oj~8N%rva`7zdOx)4G6zmQwj zZU{LUTy{WzOhHL1UKFj5yV{4lP*KuguY31wHIb-2GrB^bOeC4 z-hn_EKS~#nL=9x|Xo&^DdXkR>`-F&0#^WF;ALBoN87>e2#N!NT>tZM{PlN;KB3Q{e zQUmHpMFNEHz%DX*R(J7MpM-BJtw{ z;j%{53NGUN+>HT3Nvm6QxW9k zh-#Oh3`Tp?3_s3WX`y!+)r>zLU2NhhRaErpaCG|t3)u$Rn&(RY6N0_+z;a#fA>>|$ zenyXKj zeBc{=>ge6>(b}{t)C&q|# zP9?~Qm|WyA>cpsE&&|1%k=Mh~Q&*1p?>l#2gKDtrqt?A2E{*3IVKfSRpL;WQ7wW_x zh)sV9bm!cl+bi;EirhCcHSM+qw*a}s>AKxs%I+_}RSw9d7a-l zSM_B_xn3C^vZyZk)HC_yaZG>{ZyWEtfW(PNf0|=9FiV{Pm9VzWb0J*>sG4%DPTFAe zGp2ags`ZU-vz|#TqSxI#TkhU1xuO?WPoiGdWlM)Xh~5U%F3~H~p4g(e<#MD)9-5^c(FH{*QddWSVI@?dN0r zoBTG^LS$me+H%*-jvei2uu>|=95iZcwM~7r?(a%2mv*Nk8T-gkK}Kf%ZG3na#e^18 zs`<~V_~5Upuderks}>KYb}TDRuJ4n%c<5a#gwNK{;56eR6<3GjBaFY*tlh;3&~MT? zixZZsV0WY}1vPz)Uwyf=;0W1NQfpEG*51jp*Y;!`w7Xign=pekK%ZbG*KS8k(ay4U zsTxn!4CE_|5)r^38^5UR_S|DhU~Qg^QxC z&vHWCdDp7uN0E6B3Ad7x3CKunCCS5H^NMx`WY-Uc&ln0Dl#||MZ#KqerkE4uY(-vW z2WN)%_|r2Wv8K7g!s$KV+r8@mE!IQl8+Rf}&QW?sXE4g}}J{NRT*Z$kj&C z_fS|c_GA7y6c_DLi;ICM4E0C*)3Y9ma5XE-jr*~mzwFfXHssfyDsvCK9IHzij9v<) ztY+znrbi|k3PndN%U$s@;YsU`{oe2glGe2stdZw3c(r-7DL8foo`*UGD}^;F<;tNn z>wi#i>jI3}8tOZ@NL(jyugHK6V#y+)t^_!)6gC+wJBi!+2e#zLFU{$TpIgzVv;JH>xA-MFWG$UDr{hl&r95Nn{wj$ z%s~$JFY<&BvuNI)-G;(NRS4vbUsKh^77ZV zo~n|BFsoUat1o(y^max#b@8gJ@@Xpqm(yFza zXDu)uHVh4Au&6GE3`hSECAQ)pi0gxNg-^H}m6hz#?*2;w_SI?aikh!QA)`L$1V$kT zY%ME8z5&Sa-dFF~ZI!w6YgAYR{e}kKJ4;Q{>r|sC(KXNN&TvV>g)!Zd%N6(iPE6&= z^}ho__#%WKEknDLu1?hmKYp7y5Yp(bKEnFBUR)x7MCh`wT0XBmwhA~QuAwrZUkFb| z_k63~0puOG^`ehTRl4@o&>Ff%SzY9N@)3Q1AlD)keyGNm+F1zj{A#h1Pr;KhPjr-@ zhCtc97-`=a%bV_d8M4IMWA0Ehk^BH*ovXJHQlaCIwQn5wZT;e9o~}`Niz+g|(h@zMGof(xx-GJ{tP5$wAGk7sxq}277`0snqNIzbl`af*Fc_5T+^fx||Br#F;WfY~7 zlzpE{lEh>wS)x+evSk~>$S%tyN!dn8C`__N_HD?%XUSmf>x^X#vpm0x=lOlV&-=XZ zzv{l{y3cjaxz2T#&!_q3<*PR>&V{qbCLsdh2Qjr(@{|CNb91O8*wtc?q{nm#s#+{4Zc#@7_F*J$rCYx)Y^6F(xByhX%DpI-F~OB9;vcV zKi*%ByO&Y2E%8jRP*|jF7y^NbEmpI5V1~lU7R0PN8GeT}@j;p?NvM$*?$Aq9^X=CA zV*Tr9Y=X$2hU|f1J^3%@}E#`4@p=ryI|c zov!>jr0wS9snFWV{P8vG+XdAr;iN8NIG`D1u1PzE;6nmLC8u0^>`c?-f*y4w(q+72 z|Bccnd(85q*=hUIH{-{dOd!w>Y?xP^pKJj=+F2dyBw=er#6%S*Cu=9!bFb0nczYiw zq{LbtK3$g|a7zyG-PZ#q8GBHgeFQ6M3lQ&v5!4zvf2bRkhqf9FhYY{0C&MJ$);NB2 z4n>PvwK3x?8F@%4vL)ptac!|KG2kc|(NC=DaWI;D0#@|GhMvc*R0&b!CSbwx ztKoR42YxA)Sx-*L(dJy?l9HT8Lsnbrm{z%&K$%VS_E~HLCC*XV#fxMaU#mQW16l3A`9CLKtT#_0)T>a7qTZM?cnCT zrS;2Ih3ES2wO#f}XC2;df%3Yw<%O1Br?BMbC;l8?G6&>Nn(?4VrdeaVCSR)Yj`^Q~ zpW`hX?e>7$N#EQIoA-Ns;m)jnWsKARP&-``$!?M4cgZ^sqXvaA3)Bki^fIhb1At<5 z51@G$0|F6Fa!8U}NaRbJ>Kd|4m#|M{zTWr9F>3>inU-|sjswJH{y8Wt8?Za!n#9eB zS>VQ?Lv|s*!HlFiD}h!ddeEwE0){{ZpdqndyTcw7NmrVN9i&P2IVWQil+~SgCv*w$ zb!EyTXv}+=1qL^FnEv2I6UI@6j$Fh=(+7iKenSMcVEE3;-YES>2cE8I!78gjYUIYDp0L`9Ypm3Z`)f5i0&dCZv?l zG&HobxZEec&3M(xGLYw)lALUI^ZqqZ{e*BE9K;?bL02_TTZeB~b!OO)-G$fBT3Wf_ zsAJ}{gZqBE@+13l?4QOyy={B&kMmP-VeiAh)+85|`Qa##d7J{>=h9M`oukDm)Ljr^ z^7n5%+XUh6!GzUSD?HgBHcc~|p+C!VH=!A0s)IM<(d3O4_?j1}aqBe z0rZE`)%*w6+lk@R5mS8BTq#XM1j2FrD0jDcrb`k*hp#l;y%fUBWb zD#3Rw%#Z>jcp5oVg+0(!N1dEO)kLo`&S{nzmwnMZGfMgka?H6fwxVkH*`i*Ofv12A zt0R1C?_I@7oN{qNs`S%yNC7a&*_lDBj)LZ!tk|RKdVdImFBQo};eA~|S7%qinNiqw zd@;yWF!!S57|CXtmzF^~bKKXEj?vjh@xm-~MtecOQkE-g`}nf0=IUqC3aU37&T~Gp zhUoQ8jxn&ifZ}&|$Kz^t4A*97mIu&FJMdP@x1DK&832~&77b-}6cELJW}ryan{Xay zhy(?0c1bmL*bO%>!$u2pR62fHy2~SzS)C~fUN{E|X_NJf32JGn6L@ZW%`?3n8JX8_ zWE2@mM6{ffJFld3Lh;bIX62@XVnLS=l;Ye6lSnS%?bDR?2GW^B=&X4;u0?TO-_WW| z8^aAls~*G^kTS_O4US3A>35AC4mdlT6ZU16<V*?LuWJ+JstVDq*&oYNb=Kfn%Q*dh&YYfoy(UE|9gREi1#nHt;j9E=waQEp{h7?) zJYTmk`(J6UpVPmjLnJ~oec!3GDCrv90Hw0jSYd999v#C&JMLaGm{I?l!wBa*C1zScV-%t5F8DzaRwi3zE_F zGo9e#xEh8Au*rBwrD&1o;^()QW1fly*&r-gq#0M@JY6lnY9lyYKL)?JD`FsX#{)ezM?`5_zrI9E_;U2 z-o!MIHorOb3VG^Zd;Niu=^nkOTeEb!5@@n`Q3QLL$oyh_`M$8XzZzKX*K_DZ=2G8V zs?1&gR_BiaXDtug)(!*Klul4kTXRxl@HSRRw)d+me7$RqmA$!WFWXf4JF~d0=RS1h zLrrdb1JMAocxlvqd1U=xX0h}((W{jFZS41r!jaKy-?dMO{F)=2t%*&K)kd0vGTt2H zxS~5S8<$}8#I9Fas^WUB&LwPJ&%*e6NM_3>>ei2|w={T6n3kksi1RRFALht6Ul7CZ zAd#}^)Df3VGY~E?%SE@5%%q|8jVk0pbG^wLnoeG#p)hF+B z5TE9U{a1?J9IkYdPlia`yyjI@*n0ReDd_gyxU@^{k6|xO1-AV}a)TL+)Ljdzp~y&2 zP*#LiNA=yM(lV!Bdq1&R&{fI*cE9TRU~lgCXb*?k2vV_VP7<70>>RK~JHAu1vuFK% zsJfmX>bq99f!~wglOwES%L6ays{@!lKXun1xG7@>Z)OWfT8Ai^b;xaPjDmUwvps6U z-g4KIczst{+QVHOFl?4o7TrST}|bp7oXoBjh(xGX?_(Sa-2`Rl(SM zW=*pu=YTAHq!$TN>*(PQ=8;UL&;$Qch(wySS_(KyF}c*~4g4ToarY^l7*gr(Uy!OI zd?y3`RZe-i)1@wYJQcUX;Q!ZFW>@n#2T+k}cR_y9MIW4?FN(q^Gn+wnFGS63nYQP; z2CObzs74x1>((sK>-_I`-t;pmmxK|+9bsEzsdxr{_ZvG$YKJ?Z8TNQuve~2i0Rps~ zU(Z}GQvT)Q`hY`wPEiAS*Q$-t0Qmf_N1X-X0Qfobt`pbKt$A*Xb+{Re;AdX@*FrOQ zz5U|ey7s$}9~)X;ST$oNizYm63M3UuujZz*M%iTSP@t{J(C1!DY?8 z;Kn$gIeV+eE6Dfx;5!PHab%TpLsM8OvaQ~5kNx~tM|U>L#^E__m8{uFPO+tssis*r z%8cR$-vSH3#33U7Cr}p5G%mVh+2+=i-j0%-MIXEK+t; zlQNbCVA~c=Q<1}4+QlSzJ=I@N+uvwm-6Ba!D8%Smva5W|wQJ=lZ95|ix77c9H=;2AA$jQeF%=$@<0D}U8G5ECXnB)C}n-;5D|aG zmcbkL6}`i3Zv)O^nZ?odG~`@B!o=9|dOxFnozF2qz2>#)zGXTjSk^+UFsh~!(Ta}2 z^8_6CQ>B;5OFPIMv6c*!Plam-oAatGxr zi9=JF28xgM?Y`7&J?hf_8TXJca-AOs-qX!nxHvLA)IKfyR@B6(+3oX2yKrmH>(Itqh zlORDG^aajqo<=6(L}3G-5pQSu-pRLfeGEF+e98{<$gt?A;Sl=WC86)EPYD`ERp4;4 ze$mb+BCvsrf;-4?)f1pRNZScSy^|J>9Kd{BVUhZL#8G*9c8xYzTA~~2_&yHW7FEhR z-J;C0gI%`GbRZ$65EPk-=hYEFPh&kw29}?m_z_fNJ|FijR(EAK%j@+^i zxwXNU)lZh61HZz}IJIExCz?2!BC61DJK&*z4R6lqj2(F1f55_GZg^&29Pad}gtp&@ zrpw!=Io%8q3Sk`DC=+gVoM;f7flR!IU~c{3Fm|kG^Yqj6Vo}RY)aAGA*=<%99zWtf zKI`FZXZ*q!<@lrW{NZhB*DU}u)3)fu1a^2@0dPT$mlc&Apy`Ve_Lp zjp1_csUg?J5Kf@LN^dTr<&?#4ryeE8la^N_H_>5Mk`x>FiB*x>bbai63G3~`@*wiC z%2grFK~e5ATHk(^zq)W;;(jSkl`p(X)6bZwgkwiT&Ds5ZsgoiZ4m(x}U(OxX&l|NU!YfyQq4C*T83CjgIkqR@1O+utB1#4q-`hvMnrdNETGjX9 z$Ls5}7Ok{Sah_M#PH4yKT5=_hq_7AUdVz3_!t61gZ$`1EK1PC$SbX7Ny$rvf0WtQ& zWXyA%U`W#Kl(%vgLNV9$zpKkdoG`h*)m)`}ZPRjPWW+RGbCUX0>% z3-FSXu(U%&+en(mc)Ri4zKVOf_b5I^cEhK!OT~`b{^YfAO*8srpyiK4;XZSBDs?7U zasH+_Km}l3-w8u%y6mH1>kNlRJME>Hl)BpO{e_mKJ8O2~6*(mWN*{w_&Gd10XUL{sk4XQz;+yy*kmh0)og{>ir~hy6TB zTa%(0q@$Pa>D6eSC62{paja)sL`4bZ*xLQl5qs~&9rOB_XkZGLDVs@m_2QMs{7m6qMJ`RKQ0pTB!k}%FWtNyy zjtj?5d@oD1?pft^Y&rV$mQ&azlPcY@hSMW0Q^wB%)*P)d_jRA=31gQANn4i0tffi< zhu-8^mLhi2I%Lt4+_Exl?<|v^#O0rLqhqQ-4kvpxG+6>GSqW|f`#1X;XB|ZOC@&t6 zo;UFAtW67n4-lfsk+qlGi4lP@rRspC`IQ;j{Mh)*t$-ocYxi;up z%`-hT4SUi)#kZocdd|X{gR%$WI{*^VyMaslrY2k+7f^xKP}+lWtN`s{KTYoeSPh^eOYjTzaiPi>rCP`AxkN@l>0AV8e-&KdJ&7nv(D=Ie-K%I``rnDL5`w+ z%UlO>&%M>fA;guTdCJ2In;LasUkzWBf#zC)`&!24Au`6go`{uFV5n^eTwejjNjAT~ zAc{yFdL4u%%=PztdNxs=o!_uwaL4_F#hsBLG z=hFJ_yyo2>`gp&s2Fco;LLuN7LXeGK1((*2Z`f#*1C)(sTuoZ7lZk*Iu8?5Jx-c*J zhW?pO@g|pW+WaV9I~IBV4_}T3)BI}E=4mY14rOhhq5xZ+rh6zuh3wN5=x=B)#Ki`4 z7+PIx5)8{AaSdK+0{t7+Tw|$*6Ha=rr9Vr`;;t5K8&s=0->(VO_-W0>7&LDX#6-)` zn23Ow5FM-uv$QmKEdNuyoV!nD>5ay)xy22?R_t*DYZjmVhlv)lggUq=$_^GW#tL?r zqB$}#7^KdcbH3u-qrUWh1r8mP5~GrkWS7y#V-~ISnWtz!Y};1#_t{84zAqHux)4N1 z2Cu?uq(T&3=kj26NlY5%m|)~+fx|8KADOTx&l-Onmy`MRN!0#^vGoD`VEq1%4C&57 z_1jdV#`6Ue=tF+ThB|4Yc@`~1o(<1a~2k$_)G zi6IOzq-oO@oHKD!Cw!ckfC}_O^{(EGE;|ia$OUi4di! zl5lm{a1=Y>4cj*#AWBj!AlV}Nb1O8Qd?eDv3d@dISXp(RYu-AaQ#_+z5M=UVE&l44 ze$(nr3g?C3Vsx>(3iT>-&>nN&y%cgwRIFy(vYz?&3(_NLvPnZsg_ z-+gHjn6ednB?nrVSsSemkivs|;9P!QB%MIG-9;f?RV4aWd2jB=zSh^@G(L6b#T7|T zbA6e0mMz#M@>Wge2u_;hEjbtIfAgCrC6s5e<(aI+&LOEY2iU7MDP&Xc1};|$t~ zYx(^5d6u%kPFrE73lEx!5+?D(T1}uJhR*am)J0=aLaV#GO?>tDvOni?|9dZB(DG^3 zd&czgxCBbv>ETGf*UT3k>J#LBKe@!l#45hN`=qAg4EGJxkzIRc2dsGMSv}G=4VrwEp$Ro~Sov1Cybej?6~g<#2PcqBlseZn zYT=jL{cV`E%<)=rCEu|gRyje{R9h95*q;kMqARe8(Yd>q|o+I@d6YYsXd z^?BRssyT5o3?adsM1pZLS_iF<@ZlzL6DcsZqISo+Enl9H@&sCWSb@TH?*ZAC>3f}r z??e@w;DW_5?~W+vJ5H1?O;55F!@|deDk*#}6O}skd~c5~R*Cg(zL$CW=pJ0?Q?JwV zs^0s25v`_2mtL(2m}82*W^L}EUOnq&RD(mhjf{wb7o%P(z*uO8WfXgmS~Tsike*Oh z&_BP}$m?A~Zg)ZLKli=e(YdWqkEbxDqWfD@PwOi-Y)-W;9bwqdV5TYcZ$58v!D9Us zGCDOH*5tpp%9j|*wTPFUTWZsNq9XBHx7|-4^x@fo76cTh<|#&}C>O;acvAq+mtS1w zp@9M+^VlybXziIBG7@aO zIXB{e{U|AlJ!9<4du@2(+U*F9%i;%$K2A;haNU;3MLSV+|AL&bZ-Iy)z^$`4q0^T@ zQ^76kkq)BzlE+uEs_2NX-zj>y;2T@2pF#ROODc&#Q@pf`kQITUFx{JMF$YyaW42275m z!UU|iGqA#xq(ycol?GNoAJDV%7kRsN99?E~pzjvlj>gU0oQqn^QF$Vwfy|e=O2RL5 zO(r{9Lnx)=xAEZ{F-BZjRt_j)+`K95QIN@68tT-U!{~3^URcTE`_uN-+B?ao$0SdJ z)}exIC(MC3$WOV*Kpssr%JwwJB+Nr*;G_DXs# z?cN0bC7$22IvjwrcgaVkDhfflIx|M}+?3wWB$&8H-w2h2h|Z*gys0Nu&jDT-4J*jQ z1yxqc7L)ehFJbD73HsYT5SKRM@g{Zv((g7{Naacp-I{ z33zy#W`BSLO}eM(s|ufS|E%L|@xY^mU<)neT%P49 zoJ@?QK}Cw+7pDE{;Ux(lEx+>krS1onyYE!uoqL$>jl@d5PGqq45*tDN07tYhtwaf< z9>$a3_G7Dlc@}c6Uo4^O43UM>+zJ7|Kzxkev;0KVl%}24YR-jSd-N>y^vlNBQq zhZ!1W6zhpd&C~O@rxG z6~Dumk|2EFmXslx&%qHL-%H!+(ydkbEOv-hqi`%S8&n=^ahlG&*awu(X@AbdQnYcyhj2Kr4zL2j~v{ zE)SN{^s;y+X5qsA@H@Wmz3XMHyG3`dcS@sAxbRhN~HJ8W9EMPsVkaS`--Bj2A1yADNLJcW171WNs#ar9xOXxZH1Xt zh{7VhuaXonzdgU2ltF9d%5fr=0R0V{lnMXP|GAf){&rlwWa|Rk#R?raW}UMKYDSqM z&tGn#c3%nirXrLHsvRI3)FO`|X4Ckyr0Jh8f;&elxX!$Gc>I21xUhP(y3LX0wnN@l z^_n`ahQUm9sYh^(P_$~2ZVH}?w=c^mDZTf#tYgNjk(@@F9F5>GfoHxqu4##;lWBtfas3n*H={*yPnFaLsoFS&pe#FhfAMaZmc4PibQ zAkd~eH7RfAhckF2gCQNB7Y8xRsd{MI``uUY^{QeRoSt$(AvMl`Y4EOfYT|a(0*@c) zxTmnH0{k;3d{5%%l}pN}%8!C21iMM+fJ(*eo-kc@r5hK$a6cA7Gw?hQUweZ7N17M@ z5oTvxkud3{ms<)cI0+ADQx}wb+Ydso-?qlPn{k_>J3fG;g%H(HPj|mThnxq6m!aPI zU%@8seZbubg9DmHxjNz+8u}ir6MVr2tTcU9RM*~KzJ%5x4qOmtoHBs&JJ}eCV{SRS z+A;b+%x=l7L$hzMQNcR-t2y1>35a9XBQfOe5&M$I;RsFE^0!xazH|y`zQ@|m$t}_r z)<82=iPBl#L3vG&aIswi@PdDTi(w*0GMJL=9I4Rww4 z*H0HVRhYw=|;reU2jnC}4Qim`WFK07KF zjG5~Pb~npr2?w!0go?#p77#72L)CpGK2S5I>Ry`TyW95$n$9N%t*cZ&_1HkZPtsq_ z7hkSiz_0Sl*{~!Fj)zSB1xd4)Rb0pU&LQfo4~XhH*obr#=C}bPlh2xv4A$AdV2h{P z8siW<+xh>a*sycx2sF-M(Wt|O}yzh@{Xg2Cp0Nq;E`Z4-Zwk9HP*Apf!x0`kv(C0SEUtk|3GE}$qXeFbP5TNeGRR_BWK4D8m| zX;7c2QEyS_ny{;Il4*GKO`9Y<>}zjiwc<4MOU2wX+ZUEFK-+fMt3} zPQZeQu0Wd^HmYgxjn8lVB#2KgcVsgG&vi6a4;Y>Oxt?$gFgeW()7Di$cIWbMp(<+>jhR_8 zWs{jUleF}zspr>~v(N4VQ5hi|=FoL;7Un@?{@n+&cK(8VGKr&|BFaDgj&SlsMqFOY z-xM18VF4R^>650oBO9kjz4P~~J{a$@;;dptdtKwE7Opex#orX zMN8YTL53`Pl1&3X!9nzb6K#NZ{l+jGYf1j9C98|5ULl zLY}avGwXJ{hcz*~f-hy(cy`dNuX_6wnqrc6SVg6;Ny9U5*Ry5(kBtA>d7Q{MUBF3m zMnz!mVrSQ&Hkdp^*MVJthTr`FD?Num%spVfj5po?F%6X^f^@BTrLubpJ=bykvt7-{1Ajd!GG2&$DW@bQowBRfU%-fDXaB45Gt^Dn^nKtctOwv3-KEsSBhD z8Selm{{Il&`VHJy9&={&}pa z_~}^D4k&kOkOnH)>xXt5X}LlCaKa_H z@-N7Y_~hBGw(R*Ps4v7gZm@cL`L2}&i5D~BbVA?Sh;t#-=K3U4W|A3aU+jhJtw$Fw z3BSIyKc-(&9lhq*QgvqCeSDL^ky~G^k{U#S9U$kQ_z51W-}&#g}_ z9o-#Lb5$8y+i*UHZQSOU>h^6^oDvPxYLBQ$*8AQCwpf9uy|_`ONn*HG3H8dVp&UCw zEtZE3V#~@(vwwD-xq$J;|62H3fDEmE@ir=X4SY0$f}r`SQe5b-ob%Ha6lae zr_Q$Z4L^Sp>+UyTD2jVevyyDEeyGhHSCt{qyKzJF?@xH}&lkwh&OsxX#gL9SJFU0b zL5|?p4pYXuURW9D_=D_Iq@nrQ_kt0yJT`pw!;9jDZyN; zgsylVjpmsDi5l+z+`KbcgRX%!B~BiM6VoV9MT2klk7SX22E^fU@Vu*)$XcuxOVFqJ zYeq36n=6A_=%bNI$?5aTFgR0eb$_0|Ou2|E&*_W13Si4ts}d@EI8;*!FP{3bC!Cx0v_T42@Xc(=x*&2&v%t#!~@C zY6Li={69){fwa{i-boYpwPtXD^J&^&j0Dufy)$g13bV2IO=nN29mVwyr2cU475T3o z0Ii$Qpadd;IXdd8%jo)ltn>E{sc`U|fE-PE@*9JulWFt1O!$1VZh?%}JR`ehH z6d&}6xr4jcv+Nmj-deSX#Cv_@^X)PL)>k@LpkhG>+mV=Xp!i^@DHI0-_)KbOY6RtF z`D%!*=%L{sbq?muqBRbtVgdYc&oP(X~Z>ys?QgM5hnKjB8}1)62= zTi{lk1BYi1`Bx=uI?8+y5q4Qe%^(qchKudn+!z?`QVpCKGw;U^P_@!FTn?76!Bzq;lW+p9b2n?=F;M9%#uJ7$Syt zSpo*UO9krpt_FW*3cNh0BU;SS*jSl@aQ8V|f9UXAuS4n2UtijNU!4Ey|GL;g;Nf7w zz}Jo?_EurxK%;L2h#@J5VI1>F(WDAIhK9WDd4F8>!sGj#Ix4JLvwE*|taRpe`s`4W zpp4a~S!}vpEwd&GZ|e*NE(`S0MS{Ex4V!%(CmtQ3 zywkJA9QS{{<rTt$=mC+g_KWGV+Es|c+z?YN za-L$ELYbs$7lCG`xngOaO}*syXLh;y$GGK3UVL)kR_IOp)%B@ld-L6)jR(<>k|1p2 zIj~=Vl+S6n8mi#>oPxtyBen%WVA|fA_L~QcsoK)w+pUFR059Yj3Vv+i4P+I{3=-j5 zr2SYh%3L8546G2wWm>h{{;45VTm^SGMU+JdNrC=KT%qgp&o@cUqmMu(A!uTIfIRa1AldE*9b2UV8$Mdkrzsxyh&W639@ zu5*K2w@eD#_d8rGY8NY_39;d@?>{@Zn-*;oQ%oH=%o}(`XYVC+H2DhI!X?6)5C@^ln zt3B#Us7#u08L0~6_`qiJ=(EJ6Coepso+tm{wUw2ydVbADS60PlP=fA08Dn!5{AN}o z;P{NBsXfLbh3d@}0A^>!^63Omi?Zkvuf^%|z^WSpk?OO|nZ7Q|>@MRXi1F&E4R~Zy zWqxG3!?06(ZmH=r3CE)qPYn}&1mfkGRIekaOC?v3=Kx_5z1igo!1nt ziOR%qmu2a#n|C_cJaU{@4yvAHoo+?2VmwHG_Il~Q!hI*&X$*;IQ$;> zZlo%Xso4wvspt8ed@{5p!Ro?G-S~^kw$0NeX;QeOWFPvc3Jc=2Auh)i8FLkmTYb)7xOP+*C@u{bG-llF=1KBsDy+5o= z)}f-Ri*)L+L7ErAwboB69gjR(J9cvLb0yXD&o75WBE3o{b(vRU?IV}V5VfZv;UOPC z>*k1FpICp4$49s2Ij*FdVxQ(=O5(#~St&Z8Zfu#HsS`M9it!l1D$@6TGDu6r%ZR4X1G z=w=!ol992bZ!h^p>Ds|2Rj&+j2~WxWlfkclFQ~PU_0(a70M4)XFs0nDfo#Fy7Zc5!9UD?BQTS&P!$Scey6anwMxL35dnay zr@;yAR0u9I?bd%mbncq?1;;lt=jjnfcTfKbaQEzzGo8iKO1@=V1P8}_tW<{*kFV2) z8ecNbRp&=sy0{g4z>XExh%@(6wN2UhbnBWwYo^CLR_Z920s>bx9aIN;l2DMTGM@rb z-Wv~sAu7Yp0*(cACtRc*Fd zYf~@pqNq&S_af~*l`xDm2)X67E485@a!c#}8L*6BcyL)8+&K;xcvBj>!sh`F{9=88#tYmZ$i`dkw z=U&HnZyyu(sA-<++7|>hKy3d7QAvNqt;QJ??*>^1v#y1_bzQ95#8)rIzawBI`2ldo z|43+e8xv8XXEVfJ`-pN7QQ%sweJ`=@CX^*LOQ2sN?ayD3qI@??+i;8B1mM9HKcVqe zx-uoU$^u67O~bBhjLHx*=FtB9I7!USUN(XTtNE+(6kj9ev_Dd>MrmX84-v3}&W1GC zYcIaeuDz|6p?NzJZR#;B>S}p3Ws1%uK0H8attgl;=47vn$FdUQO$#f=$IavWH>8=~ z5%rQkDbd1QJF_I_nxyIm{Lo;dHB??8Z+R1TWWGEmrXulN>pnfT>n|Uj&ab_!qI^d) zsLH3miaTHk3{l?EWTm{_O`!6HxyUzrJpZGGP)%L=Kvsx(h&p?Q5G1k^xYl+t?SiXL zpN)N~(j(+M`^;m$h$d(Z{3uOkQb<~M^m&<;JK1^69M?QUY z_x;dy>7TZ&`v)^AQDk$}vzjP^z>al2}@cXF2VP23U{;4VZ|**uB8vRj!%xCFa2YMdp>2yI#86C$jC{j$yX)nRtSFDx=}oDw|Az&^ zmn>CC@mR@N&tKueZa0-U4^B}f>k&Z=DWYi;2MRYlP%K*bIGf_4)%|wT)ZJfW0F1qE z@v(oqR%P#smnxMY9+0yvj5h^^q1giUZr$H=)wYNzK}-`mv?SodVA1c5m?u&{m(^|+ zg=tA#y!YEuo!iH`hCBM!g^ueTvTpHfRjK2vYydq-fN>uAl@P)W8o4O<{K^&4A64Gx zi7ArDJpYW0T*FtV8T~dgNzH)35?%UWg{5`)BYr1{23Zqm?osp6KURz7RoO9i9i#}C zl{CAAs86=YJ4}K6tjgz9uu>#g9W(|Qali#+{AB0SmfOb#MNt(!+kp*{XL=DwtcE{C zIxkDUcr@Jcj$2%eb^mnLt)v^$P3phsEm68ZMfHg6O<(}Jv>qM7kS?VdPTsS)PAo(Q zI6XnJ;pctd95GMTD7Ca6*7q-cSY+#XYd+%UkK_&wrdd78{7BIsAo)t3S$n~SC0^7x@b0xVYZ0-k0>na;P{;!Ro& z^+d3b&YNFI(Xp{dc5gpqlf2n+S0Kqu_>|E7+fsGJ0Csvpm8@O4b(kjqf*?FFPu_)N z)FKMYV=+JPmb)P6|8@EyZz*q{uqk&syF{K8;jLxy18U>PI|>z|xhtCpG^Pj}QDH$s zXj3dETsReziFKrpfaQ8}-fEL~=)7j%37uYQJ+%saT7W( zX_t9lWG;%z6A=*}?c6IPdKHe9a<|Y%9&5JWQ(f_uSFU`W`ig$%C7L|B`m(yHG=!!! z(dy)Wl^775?+Eod|KW>+`I`beRTHiwP7~fIPo>RhOCDGJZ1U<#N~}Wl6Pd{J!Fs7> zsk`y@ss`0xky$ze?Qi4l&K0$Cz;Bkt8afIWSVZQZ9M1Lr_2c|+A=CK5fG%q$KW_p$ zigAomFySv#pLU_ROMWCzjS6k#I6I`J(Dqew!2(N;dw(oD( zr#<}BBFQ`ma5S3g$4k+Sb4YNF7y0IL1Zi4Z&eHM0I}7oQh2GxQQ_t~*Z7A-<(QhWG zbDBajKl$^Y$P7smA}NL>$BBN%hlzTBg2uD@hVsYl*`PH_ulaFFlB0@r$kT^G@WTC( z{eRwxv@td3gNrogdvy%s!XLlu)ueWEYDNnI^s~-7v+X;;;WvCoN)ry>=) z%X$^gOxoWMT6TUG&!#BrjCST1J;>}G-%7p1=othdtSJe5orns9|1SFY>&;_6hGGuy4heNO?H#cwHl161bw6QWDx6wKL7fh(u%XfukUtj z{Lqja+@*H^7ED~d!#=VLchrY!H2z}x$~$eVNme;uyz#Ahe^cf7&_=t1#;M`XJ8vx( zy~O_i_7&fR_mgFSzJAcvz&8zES5c3{)n+uJI7S-Ayvd1Va7p}~X61#l1H(iXv{4l6 z@2dW2QniaOWMk<|!KmD_UKM<$ha=>1jLb?lGe8cDkb9!ZH1TctM>wxr?}9crP^ukwjb{ylPBm+IeAqH?vL~J6%xLuaNL+E z0u*%)j4uG^NLgc?wb)lRO&cPkP2O$d0ja#u=eZ!$+BQvy-=<`lVEx6Jx2d}zn?rEU zP4`7GF|3x=DY?^1M!c0fQn?=Q#(0n}A{e=-kd5v)%v>@r#M`CX)ESAg^myJV9p zJftIY9M48iq*~l6EuiYeWmAIN)~dQsxC~V)Idv!b&q8}YpL?Vt9^GCX6;(8OO*U5Ie%#>?0)-PdVGySTj{bH z_55Fu?gvB{Nk-V-^F2z-7E~D)6-wiHHyUvS%%QH*ox<~$X0m>F6l)@XB#M=VO`a?Z z(tax`U}_|AU89zjKZjFsy@qjnrWD49dLWgPG&9>&&GGrQaan4yh{*fM1zp)kNfET$ z^xzBd$!`<~kT^;Y14kxKYJ}0B3AV#pu5BlTn$gF`--65e)PxkFrnp-@3Xo#drmT?A z^_HmLTGv2v;0>N)4zht(Nbt9tKpX!4BPiM0d!Yu~G(W(_!8DoDL?_QY*kr5IJvm zQP1G*T;kR3j1CawR9g^K0{pHZfzEDCW*0{D)8sdCeN%n02~iTlIuvK<)dz6}X{y4%*=pcYa-?6i^S?FgL+D{HrG)WS~gD;QuSCUZVQ~ zyu%(T49jWvK~k_(mb9u1|Js|=Fdkqh-(>wB6MJJZqzm1k&^7{U+=;eyzxsuvYij-q zgJzWAbTj+5LVyD_IQW?J>cz@ja&xW@Q|f0HleH@ab5e5`n{*&OUpMJ!8EPt4sx z4eh`ht(olyYF4-`7Ly(s20aha4eS(6NJs7JGHmUxpD>VkPPhTsMAYIh+`|O3&u=5| zuGZfoyA>)1NTM1F8>~b~3PWofl;f2JNj6&#Uz3){PN7zEF7S*ZM@>YF@0s0Y7C6km z@8Fy}e%|agqYs2d?F|SQc;o5;-D81YrI5k;$AdX9w^77mn||19o@3NTH`xfpX@Cv*Gi!B(_?y7x?)bNS10a+&i zE6%7BR77n&1`O7_688amXgtEWva1Hd{}Q-{T*(sccgCs`|IQ$IdXOFj_zHl2g1&_Z z9715g4dj)7(}Rep$?p_<9B6y%uBMd#d&3KAXVc-C=Py5f#Jqt8PlbX#w~^e{4qZkU z_|MZyfNy*0n2;S_7cRfgLcLf9V;DZ9Y ziydSzYylwH0W}%>Y4=@CJ*5?Z|@Wtb3TvU6X0vSW0I?4Q;g8Vh2(ov&}b_T7wS9tRR-MZWO_gkh{xZ+icqn| zLjh+g1{#X1b!j{$VbjGUEJ0?=*v;7_iC68#X$ncFJXz05T)A{uZsN|DStX_Z(akgU zZ_i47*fNwX7HENZ(H3hEyB_u}$Re^w)n+lHDcK(2N3JmXG2d6|ys!gA_}u;8Y6|^{ zS>%K_+Q0(CK6W9>uxB>)acHb?(_^pL$09eM9Y376DRP-P7vKe}{eN{`c|4T+_vgBq z+h*EVW-40TE)vQ*!&RwUXqAu_l`Z5VV;K+E6(Uo))lJNhYpW#JGLk)-++r~HZ7^eB z$2u`H_jjHdeSdvlzt`_y%roaX=ly=45r;eE;ssUhn5_63Wq zBy+mH;9alwYk$Fc;cxHwjP8sK-Ka=pHi`UQRaXNyj=8gU(%JxPsx$l`n$LQc&>wmJ zPcdGaQb2XE#zLrUdH)#m&d;vg#sN$ij>01}aRgsVIA1Zd^b0N_`k_W79PJVQkP9e0 z>_dT-*vUjwFeuizwqSBOYBt~~K zGP3Rqs=8w!vWx=stgI@x2`c!y!v&-;mumbm+6k%RLY5ocF zeHp0cM7^dPLu7SI`0{v`sOSpnKid7J7`ofAsbVj8a*0nMKYc+~h5`QPBK0QX4A+gO z9T^YO`|cFc3eB6yz<+t}*t~R`fwGzTKfXe}rYDpL{F}?$D4YE?3G7y}TEzRG7Y1W5 zZSvRR&}YSW^li$B>Nk6!MGMni5*1tKU3-H%Ka%$G?tvU?%lfb`y@HY#)T0&#OUxH6 zxHzEHz9G%>DPZ!3JoBxcir}bcPPJ&xm)^ zObu(VAK-s4G=ZfyT>WG4T%0o76teL>%-wXGjit2(h^Od~e;fZa=^;Q|77n;qS9Q)b zJ@j5L8@TS^L{#3#9(ZQss^qWR04qmcEqbJ}aBBPcXl7bQ>xOGKLr0r@xy{Ayd zwBAeQ__+-mYDfVNDq~vCEg54QcIDUh$EMKq9`<*NdM0;MXQ;J#lGP-hnc+gfh z04q2|oU##I!xpB5yhGZTMT^#rmm5uCqXN`R-~|)&<;djSK!Y#gy;&?`|6Q~8uJb|I zVA^U8*boFm!V3f{eB|@8KLiT=Fbzr+RlQ#nD*$@^^P|*UnTdgV6p@*vXt#NHs^BGD zt=mj+=oF1_8?ax>x0PfKV+hW|B#kgT^hfIW*}-l%(W#S|BJMPX$`<;bR)6p={v&fL zLz?&lVS7+8kM+Ss)S<6}UL8})4Evoc@Zg0r-TszbNqYP(!|B9>rl>D)TZEN@L)FA? zA3lRN?=eCDS87kHTX6BJDX@C;T9_>vut5pwT^49C-Ft_;l;JBUUCL=JSY0G1IJC)) zZ)>|Bx&$S+9z&4xH+H4T7hH#aPU;3|O;|7G*Ikxs@U}{3dF=d+f|W&B|3eGxi2}iO zehc54B1kv|?QMQNxTPR6{Tx-DK2{lnB3KPW_V>tBSk0_sZB#gp)o=O@DU0KW!z!C; zUKBFSDr*OW#Lm~+8p<%=Y~M6O-M|q2nJUvcdw)N%GUb~6j)EWjas05Oc=$3qYB5jj zFVNgLehB3mCnd6YVr^LoGKhxD%=bai{AqY4JJ?nl%t3lK>wr0fob?a|8WD|Y(1c&l z>Pqns)db&5TqMIb!KC83wKeORyK{AK6s!{9_@NJxZeP+T8Bv_X^XB|B&~wAM1o)Wy zSL)CN-5kBQqKEtU{c1R9#L(6ubi&}Li4?*P90r_?H=h=NT`-g;8-F6`lFApzlrpy9 zc|4YU*>`5skfYH~b9nSuuw1lmu6%U~zn`<7A=D*V<0*opY3zxVqV3nBdCU)okS1D4 z@t4DfxVh~4TYjh4#Ls)rWGo?KL_6##U`so3kcD10nFe7lixqhC?&D_$yAd&>p#9@S+TA^00dnTw_Y5-X_ zv3kP0M{$)oM8-~3C~?Il{u%bFW*%5a1iSV?y~dC5|WTv@sJH zD`g1Oh?`;&R!T@7P-mr-U$f#vtgE1IW=U2F_k0jiM&;ij_T`g4+I@|&FEv z(aE9$ETrUd1R~9A)VRW8rdF3p7rXrD{RAC8gHUWYL2nWT!qD)PfbE>!99pD_F#jtj zgWgKvgnM8fm{*33d2sP+fMG;0=cmPJnu>ja-D(a<@k5@Itc!&usWR+4OJSYViLMDG zG@W!%pv(*lPJutbE_hDR-4=q*Al>Hv#_~wn6z@I?70UpG2c9P@c}ic9cZLI|u zRK@nOCok$>#XvkQk+Ee@MC4c7oe`AFe(8Cwsbtq z7%o+`pCOcl@2vEA70X*DT_Ntx~Riij%`na=Q2r z1z40>J7GWx=_9AOMPi{h`YBQge~_alkeMhF1CVdDJF2q*2D3THba%diM^jH344Zzs zVCR-3HG%TKQek@;D3XHMLXa8gxxV{u{0@R0C2ZoG5tp_MdFOxJm>-p(HI~37zvvl0 zSNQDYTOY!w6{{kvZY@}7KR9zdJp--;6M!V6Zm~dSf%4Ge`&8PC##LZb3uR-TCf2)~56=E-)~L z8p%?Evh&!-c)5-@I*Qv^ztTE{ju+mPdoWVgJrfnjL5oG5u#%@1f(^@|EpwI?Ijivx zdns$F{iry3(In+C@qX)O5#@};XB=U63Y>`D_%nt$KXS6awadoC!Dc+wP`8)XdAhGA2cGP2Yftvs z@J2P6a?||kl#TR&v!xYj#{ZgIbyhM`rp)acgu{v~#KEU|;a@4&1-D08#L`(f-Jo~A zJJb67Nu;zP&ci-QU}L(~nqIGeJfZ643GSdr#q*0MZcd@``9V1c{T?f{>ZDH%D64rg zBThTaYi@B1*VNV`$t#@hxwxj!*)MhKV@{Blrr%ZawU{ofvF*c-qocX7OYuv*oPG;` zn)H6O{{jmeqJOXWvZ(6Wf=M-lz2M#EpZ-*Y{o7txG()jZ7T8?eYL$zAzv)Y}5zlvF z*~kA(A3Z5-cuRLN?d;8LcxZi#yu|k>dwm=tJo2Zxd6JaROy5)9ucmm3KV;KgISZ$K zJoTQsj3p{qG%jgS^n)`w$G$w7n#SVh0+RprS@vk0hG_Mr-43kkX9s7)X!v0QdzQWT z1`<5-bhNn8tC=grPQ_2-%?BK0s&7s+BOIR(?{gw3)$}*KmXA{Spdr8-K zj9Gd+^wBX3&mS)w=jYB7?#a=5Oy$_;=(oI%hfA!$JOZz1x3*B`{t~<8zAx?l7ZyIy zeHaEg-rqnaK-*Qh?L9kF!n(-=aKsEV-m8QO{hV>(yU`+x&oHc@MT5c_w`}xvtH`d+ z$#WBccV2wv4xV4_@I>9Z)uj2NVQXSx_&H1E*^O^j!+o)!Kh?UgTC*i@K_Lu|r zxXQYe2o$-sddpL{0&=ei2YTta zdV2n)ib_rL7B&i3*>suU7v`#=9x8=(_77aX8+wqFg{x9(!W-M(iwvTj5~OXqpNRV{ zP-pFR&I}J}E6(*~@(&-g*ZC*D#Admhii3!4##TYo;o?! zp}T*{L*vp~M<^STt>^kZ#GH|D?3!OTeWNQDp=OWx^G9Kwgmcwin96nJo?8n4iE;3` zp!%UFYfx3`*zJ`YP)2u}{*$C%GVO_ii>L9oW)^^rQ$Tf`Qv7dHjIMd}LW)A+_f z{myZ4curf)}a4W%Plvaxi?Bl@OdpYtp%$XN&44kIjLN198Zw5Pe)^s&ozU+1UDJr+GqxB;kHl96_O9&A|4%+`~^O#P0nYi`%x?3w8vUvELv!O#fGD%TWDi@aA6msi^g|Isk-LQTSatzeKvg;xlaLQ2q1#JE zZ&>o(Ve6#(A6vOq3nq;a}G#R4==nHhHT(WEazpH#!V? z(0y7Wv>|dYWin>}njG*32Ji9#Sxc4F1z0=VJC;$QZ;~oI4S0s~;eYcc`Q>Z{`xCge zUjh=R2)tr?QNjcUYr`o3?*SFXC?1*Z5n?&mS z=uD_m@JxJN(1#Jf8cb}bNG7q*}xsA$795t#mr+i>@3x=dNf>N%dyw$LKZw~ zxyqXp>{1~Mv*2_G;L2zwW{F+$mBNdn;#DH38j({@4VVLOPku7v({cCG-=~6JJvsNZ z8HFyY{EQaYco(xHZ?AMbbiRe;?f@F8Cb%zNA{Q_WMhmmo-7y)U-9Lr)Fy2hd-#0I>M&R#A6R;LryVCbiO@LL6!dk z0GBj<99m-SMpWZnPPK9=<0h!kMxU%JQYjL*Z*>1$vfUy5yuq9-l_2OzNPLh6x3IAaajO;A*DlTIi84eMi2kMWBg`bck zhYG)oMR#T0*bcFvoS2~RQ-Go}^!71G$Z5oG9B;Ipx+=dxVCwpCz@y6jrAkO}o6(CW z*)c!#SaS2@;=^(0V$*ysP?g^AsU}PNA|9&q90^yz#BIcEg4e<1tccmH6{`&G6@pe)giz`7@72cUP~e~ zBHjAua}*?`01G74e~(cFp0EDGfydQvem~zzL;m+@;IlN8KSyKGrrrAU8vWqvXOhVM zVBqPto%Ab5BqTz*s|T`-D(wyuk|>glgqXS;^5*nyLo%uO)!kyr0G96R1)a}}Q@pny z>DF#qJ^M!gXuSQkqn1nAoy6()A+Pw3uOXyd%5wD>#|%HZ85F zO@Qv_CzHE)l)HsS>tOD^V2`Z&%!m=1!@e`p4mCaC?{Fi+^s8;2>^p2^R9sP{|MTzv z84sSL$*)HIBGalXdeJr&=|kWWU%@))csibQf52Ox0vzDRa`*~0la0*TDQr zm+gG_ua&@5nt|R&g53iuH6avNZ_~XEsMh??$TI0rQctdq-gzI92z*^6Jtiz7bw*~= zNcGL-m=7yTW@+ECZ9t`&E&M>tJ9HS|Z{Un<*YUyex32!h6-G^3P-<%W@g>F z1*3~9ul}->3V}0~FA@soU;c^O-%*#GJ!=gP4!&3)3H@1@o&Ggf zb}`av*xzXvMMlGvCd7@+!~6S^jIPKikxLg?X_tRv+ZCzGzP`rD_PHc~H=+Lz2=wde zLjGI?>K+-dOJhs!ruF;#&XG`-sByufs(maJ4TMSwv^U8hIvz=_>hm~ucKr<;2+j~i zmXr=P-mfLRY0Wmd7S{c$0N)Hvf2TF95d)tFT726>=aTw;%>Lw5ER|Q4flFURE@NKEqgIal`1oF*-O8M$6AmwqcE3$ zH0{6ink#KJ))shx5s|Xk)~ajT)}z>L`e8&JEIWOnnjSnV-c-a9bua}wq%dH1j)L|o zzG7djduIO(rL}nAQ`_+`^`6?l(56g+@^N@olA@8_p#Z(5iRxn>HmB<@fqN2%U4$%z zj3n5}i~*nbuC5GmOk9;+5*O=5!H9Zu2Bgf}O{=v_muG=oINmcH{8#7XH-yia>T4|d zQ$7~Uu^PSOLMV!R@f|VaoLeN9{^ijlO#@j59@Yeg%F5<3r0wiOj-M*UnY4JtO(VRi zkFoo!S25=)dxBp~#kx(CYx`eogrYK-ZQ!1Ib3*~|5dgL%lEmHxr#kjz1cHU>>jp} zJ!+i?A~_t- z>@<$)p5|CCAGSNE8E!f?s|`S(;&oT-wCo$@-`g}Dud;mTmy#m#+4zlYmO_UY1AC3x zR<^T3uN3Ff=9d7@wINw9mXoJ?^^WbnkaBIloTY83=P?aSz2XsU;)wuk96GuAesoy* zC0mv%%=)Ve!mV!EARGOuRKCs&@N`d`to5bLi)jp!zq#*S0wB)PL2Aa(ScWiYD~J2g zuwtYqY^4!B7>Gjey>A~mOSfsXO>>3mKYZpUs9rDSUH|^!bGOjk*z*UTmFPbl!ce!u zVmX$<9-{sZ;!_hEmpmJN9qp4&vmz6N6By`D-FZ1J0yeZMU3bX zU3~0<#$06U8wxt77FKyKDKqSgiqH~{ea9|;oY}lM)lK<(G{IspwAme*R;_~=ZtK@; z^03-Q7_SIob9?tf);2RZ%(|!AW6jL&m1t_hg~^k%#k!~ZZ3g;fwix!v6o}m%B20g1 zEsm|9HB||FMdwxKFDVb~4mZuGe)ErbeQq$zVx7M|^4OIv=Eo1C%zVUhN#r8UU&DQH zis%|TEhKBJdh2slw#~4GFr}Q?Ny>fB8sChrkS2ar;#Wg6P1IxhIbs7%m8Ykn(dL%k znIp7gmV;HoCH`aDrQJ#v?Di3gCZuF-*n%9ijvLn2W$uUfA4zpf{!I&UAHMJ##*xml zql87;cqXIM`9tlwmi63c{ZBzkgJndh(X)?8dA-%7jD$A?OoJodJ=tA~wUiW3N4M&K z?>8-nMq^!GQFsPzSum#l>kgx%MdMLv4^yo5PcZc0yFbF~c{Noxo_`=aG}TmR+ub%D z9`n|e$j0L?V~Sc)cv!CG+S`IiV&XoQvW1od-|0hZb(3Q{@BKBNiFRYDf3_NU%Wt|w zjQttSw$dnj&F{Pw<7q94as`Rj-JA@DaZ=+A!3N2^j*6uBE?CMk5<~d5P9p& zICDYk(xb-586X^K#22LivLMYWom=JFKM2)^s5*F7yP#OsbLbpGNvx00XCDkkqvtJY}&0mZ#+|C&y#&{t9h6vMNuS|_2Kj~>Z|6aPj!4uvo~Ck z>qeaGJ#1pPi#jtFs)*3@jHNq`oNo^DC;B37^U26-0EKB@YswQ zT-X+cM7pjwJEHV7P?6_T$Sm2`f}^-t^^4cgnj8ouvpHk%?@3!CN%S#*&s~Vejsj_I z2Myl~ce;VJ&ZI~hYGrP*z(zGR4y)9SdQUHB6nu;eXb&rQ-lD&`h^;8Dp#d994>%Gf z7a^x79!~WhBs^7fj!VB~abJop!DFo9<3!TOpz0v**^whJm$|gnZL6}Liwk`{pQKv!+t-CCzcNC`YTrfB7gvGT zq3Eg4iM3$1YoP0vJJD|WRsU;A5p*UWX{E z;kVLLg^W~^gE>;i5W6bpzaZvD6p0+2xeFqe%6n{?UrwS(`Vh_TNlPr8obW-<_8F(B zne-^|I6pmC3~%;{L@w7hw+f{PTVa1=_c?UUnsXWjq3m}Oi^xm}%N6OM9tCwrB0VmgQK($qvj zV|inG7An$(5yC}EleIokP?lL?v_<*eC7a>oWY8!boVI)V%A=X$2lK=DNe!b5T}vj4 z*fnThm(YW{*@|P~WW|G&Rty>;ZW6}^^g!<-8t%*1C>?JZS6BzVcVW8x=%WF{49)JY z2SRWViPpti(-rjf2D);HeM5j_LbfIQUcf1T(SXe+`_Zrn=?NMQf^{*gzy+RBYTroa z*nZq}E92U{yhnxV)Hfqe=KyvpMae;<5cl-CPV}bL9iioW>LWFx^A8P(K|Yxq&zquS znnC>=bv~{d`m;ZR1DM9jw+OT206v6<8mfe(k} zulw9pdB+uG`aWETfRsB>fZVG?`>FA%?4TYYGn?Ljg#& z*3b0O>HHfBwr+z^2HS@Co|K5dYvJN8;Q=TvsM>O7n%`k>^7Kr)D^L?=!HaDfodNHY zpKw`s9@{vGD@@3Qe4GdiQt%ef#`5M;Yl~&C6VAY<04F>If!hNTUduZ}Ro% zF%T>ZH@faOjK8t~@#g)4bKAb^8JTddqioX8hzXN3+J9TXkKTBHSEvtus_|oVHyPGl z?WVsJe3pNj0%HbQ2k_5i;+&7$6)SlD)TC$r?)6S3@)gzQby>CvgR(G4nuo_AkA`=a z*ejr)ru9V1G4XSasOcbP9qZa>7<$TN)mrxONR3`G-?Xb>gm(50P1D(%AMB$x(Fs1N zxf@Xm!pdBF-5(ba8*z(tg`E-3* z9C!}>YNh2>sW^1%Txj~WnJIlkhQ~(*D(1z$q18Dd&hN4JpdXTEGIpdko%lIs!6kAU z7A>)K&!DMywJZ^3Uy8ZBYMI$%?LG<{>}_brSxiM?&9v*30;M+Z5{RV1ktU^_m$yGB z#`a?$)C1~jMoDH^tPHpF?KQne>Zm|Jo%k~!eDdXDXSx{vQi?%SaSS@}qV7^GS=QM{ zaePkCh^g$J7Q%6j-yz+qR%oTtPBDfn$vc0nnJeGRqf;SBhxf$RB>PN`KgjG_|ENk- z(%T^PB+PfQ?>(fA)`?50%XI5gIE_^K7ItfI?>NGEzX$quz!^D&Zg$A%Vht-qm(Ae^ zg9HT(OSl@-@jk&XNcd4)K+spIF(9B+u7Pi3Nm}S9qLY#)_!PGmEuD7V8_VF&5o3Rq zqgGwC)m_|tB(t`55^kk^-izE@U0fr+UhJvJly`7CPdRSY7o#oZB0BuC&i(}5U|K=W z#Ue$7YTSJhzPG+U=bKR+snTbzJ%3uqSc1mN{&N}}VxFmO_@H6k; zE9{*MwVm2i&w}&au$(nDYD^Wnns4o!m}(zzC0St zTh-0C`wmSz4k_v^ETbI6MAsWAk#)oi*|+Zq4|fH5D8Y zgnM^#ek)5gb^DXFlYfhEeU_8?_ISaCPVw?6z%vU!o* z)N!`)DM1h=(%pbD&2Tc*EYrSj6l4a{@w5_M!|&siLlH39vLydCF(AF-TMh)DT==f{ zIoMuiOof-+R-wH%p<0+(Bro~-v^a=T)8Bk@D(cMhoSbh5R$CF0=v`yceZLOz!AWD8 zPldy-L|82j^*UfK`uO$%+To{M&5vy>8Q%oM-2)3CY2U0*k2Up(wr`OX*du?q@Q)Z< z=PS#YwHY`sA17J$(zJ_TT%S8jno_Kn^v~d7&`it|!$~whS|YNQR8ZlW?hzv|JT81x zaUS2|DF3xZEEW;E>Rf$NWG$a8-%M|;{l;KzzMuW$No$Bw-Jy0snKh>1V~bg`I7%5) zzwIv(mw_4X@v@s6B*AwRsh_F!#7a-hp6sa-f5wY!Wna85kD+5zE})?e@HFzK$H2Mx zxT!UH*_HaKl+{oR&#qK~q)nSL95ygfR;DNQS||-;8F7zZHJwca*2|{*A#@gWkw%OE z{WisgJ~T=P272>t^Khy5u2B~1awk3HLM$5)Yl$`FuF5&WJ8vP`}tY zNxRMrj+sJHooD%#VjqK*YxRrH^fFGQ5rHL zG&1+AqpDJ+qNe!XzL$Xq?qj1W)XzL0e2uB@#$SWdMAz=9fA%IS_xMRCgRrnL_%$Z6 zAEw>MwC=KrO`@{DWceYqqi^NxAXg*XntLDOnX(e)G%;4aQBjEFxrY^NrOheHd^u6V zYPbbII6G|Ow{pl1RXj8SN%q}}#20+dn!_+7n7E0#Sse2S)sx$t+jTQuRb`hR3~mxL zaA3%I8^%N6C1;K_?zyz5z_1zbClV%Ud6>O>DYFpSI{DDkdq>8_!OmN2J7@NL$f?md z`B>efB^`kZ-waxAEy|sQ%3HzOl&Um*x3Bds4^eyQsMb&-Ky9?PF-I>4mYj`92vf6L z!|vbeJvu13*N4+P1>F}Swr$D~%knjR`5s-!#26*=^;Qt|Y#j7?UaqVHUER6BE?KSF zhi6#wePiJl%c)zEXLO6j2hJ}3=#M;aRo_-Znu_Xc9*$(?|?IkM+Ksldn#7-(l!^S7*CMHJQ&PO-_PGnv+l_V1wToteYA_szBD%zD<#^ zdMi@$-n&}|jv3i2;TNHs*+>D`ldEsIR`*$+QirHQf--zW_9+pgI<>{6YQ8sPzDFV# zKMH3qa?14Z(Uf9DXMI^Hj;UG4SAJ|lJ@RKl#BXCFxk-D%(|Y5vFW)_e z9DbE{w)-BUn}hL&B>b+8L#u-sFRygW$(PVT!Nydm=J8VD))3UKR?>5iwDMsQQGdj< z8F3?luWMwL*TFqZD%|(Jwe4zhKp!Dx;!bSEfGncN3`@gDq5XNGr*7-|K;g3pl8{%& zzKb180nWxND5XM`wF+IH&6DPH9EPCgEt5jeD2n(E0n2{6rUr*TyY%%u{N`c9#ZB*e z+rrVb1iTJvORD z*SSn26csE9k>{K}VCM!5fm@4SM{Tsz5}vH{I^=P2 z8GIF6OhtsosCB%X2z!rCQ@Ew^$#!)iVAyd-bGXz+^I1RfuK?N8z)07_>dTaa8@_gEwLqS`icwh zIG;;9s(PEq6+~~1v$pn{@$^)@Uoh^RI7i#!fS05-sE1ygGSvZ8mBXx`yXqMZHWSxs z`UBvP@}aS!(Y{sk7?(NiqHc24AwKZP_AlTNZs%q7dFdb8TPfy4{F9LxWW4;Xd~vi= z03|He!Sq{QMD+y)$-V+u&-#E8vpZhRF^S?f*71iV<>%u8OB}hkSHnnO3z}|Pcq~Tv zrVp9eUF4b22P6nnv&pw8ZvRNuG zwYD$?*UFLA$C}FM~A1Hn$-s)rbe3Aem2Z{?yfFJOO=t~1Iv2C z1!-eMBn>{-|3Q(PnEdoUx7}0v* zj&iRPDGoj6o+VYChQAnd`9Mbcyh;zxVfZ0O4D=ltbIK+ShR9bjX#W>%p5|ytkv>CogPcPuPSDOS_(Kr%dt(Rv%7JLDFW{sr6lct{TB|?h-Fm5|sUg zXbX=fyoscDCuM;;Mvj3x-{&2Te;nFk3JGYy*(+kmu5Vybxm?lPKK0eFF+J?(A#Z~i z0XKBLHN5hKI-!Ugd+;~rN2==ja- z87g0%5-LC)MKaS_42CRRnzO@j-sv;n!w7cg%pj%==j&(qK(^8Af((_Ihn;eMh$<^a zO-W`Cx&_caUUm8!Y<rM*O?1Y6E3~a*j#z`+*XX*?Oc*sC&PN_)|YJTrIsS@;@)FAkwG;*TrSV2%VfFD z+jrNrAk|fdkjSXY!HlN2F4_hYQJj0G*aZh@$h;JTEeg>z0zyy{#_^{4N7xT9{0R|o z?@#N2%KID~FDXaLL`IO7!!q^I#Xh7P6XX3^P_%;%@ZFI;Qn-YuP`sdxl+?MmB4Hg zHI*F08z}&^wZusXHFF9>h3|ao$RstQH;)HZ^l@`mDKgOMIViW;KpQ4z@J&7H7KhF^ zr{DY(7&(a>(V!elQc{bAZZi3+DBI0&y;!xc9NeK+(i_O zZ54lkQ%q;I{(7-~8AtdqAKs>i7^u)=t$~YO%A7AgB%U!hfJf$vY_YDn33{H+9=(Li z8twBQ98w4}jm33OY%fjV@U|DCAN7~7*NZ`Cr?QLGDmVD4N1qHHka8zqJ}54&3plTF z#JmpOA^E9X)f^P4sMfghm~3~x8fRL!K`OJDXDjAcNZ7{<{G#~Q3i`_N$gy32FnG~? z=S2I&IMg(+v9%&ca`q|{T-cqzlq;HH(UWgg`e5QB*_z)HMoIfx`&ObAm+rD;UxI{f zIR6vxjVxyFdC9v?bR{WXoRjyaX+kOo*3O^NmDF&=TDB-s&J?Cb_|Lqdi4nFEq2}hN z)}r<5mof^4ENt7G&|j=OA7<@axF&7C7T(IddwujzryHzGYxlGuLVdh%{kjsp_j%U$ z3ZHNw-zCCijK>;Aduh|hG3>U!Bo^?Av)H;FPB#vYID(4MS*W;}2Zq=focN5)t%wWi zV6aRi4@1tqg$$&$@0saLrW<)rJJq$&{)tOUoKq}~Y~`)873t1(^G%+@LGFizMFU!GliQV^ zCJR-rxhp;v22&e`tBI8ZtKGcPldVeklx4iOe<5Knv0u+Ld%Qqb(cvyX(VUXAmT$LA zn^-C&!n_y`v>ssau+FB93gWAd>Tu7J2@!7MI=rD5(9V{x{4T9EUUTMR0TOs2wW?R> zmtngx;jM$NmpWQrI90rI`5wQ2kiR&(Dh7V26UNc{p*tL^)f}_d%05|g<^;CRueLNi zQk6N<{j%lrIC*+tpPmIyjGbUWrK3@m-81gX15Y5^&ZfBh62g3;`jl<-RNxFlh_A?o zqY-*!Y!-RzkWbAn{5GgQ6ETS7*MSJlS&`zV0+-QlIc)WAi|Mr&4p>IPhWLLBLWdk8 zDsSm3fhhYuA0BS}eEmYtObd*^Q|AmdySB;9=$G^eY7>TaB^ZiE$Ojyxdp zJ6(R7Ih>S^oVi9q7N3Ys!f(hG6bfrZZuu+0vdJ_n-{TZbHFD?oSDH7%gI zwv$U~72QB&z$sSas15kOfogVr<5XGMZ}rv(RN6Zp-*d>W^$h=f;r9kV5FTCFPAjAH z_r5T$XbS_-mX~^te!ZssHWCRsW#X4Fz=uCgz!s|4+s$uOqO5@ebU%zwC+pJa{>$;H zvin+fGp46g{Il#p7VUp+edSN&m0%GS<+M~;zg8dq#T$OXNXXrH(XQ=o{b6LFHem20 zUq=G|oZvcqMGT}<#8~(@u>iLcIB(92T%12AxDNkt0s#Bz1KR5&cwevy{PkUAFN@j5}Dk;RlxCf1ffCMe>sd&MVm?WAaBB*X@3e00`p$ z1$QE_)Frib@;K|Cobu=5{h{VMrTX;zp9+NlP@#zHyLY4ViyGDd20JNFP`Hn+R%Oa1 zeWPlP0PIR9%oM(fU0s;C;eE>C*U|Y$>-}=B*u^3XzKLDocvMvJU#M=JG*r2R8y>73 zefsAF*D;KW25=+dVkp;r|EFQ`l`to`?)fWV`bW)d<86}@ebkK}DmqB?09l5k+HFT$ z&?54tjREPu{J6F$Z_@U41=)XBV2?yWd-#YivVTSUrZsWdkn8&#GT*#+v?IH(99!6CegvMW&m)IdUVP-TNK(JH75s;8eZY{aPGFa+7mPV?@0gi>)Yk z77Oa$yixg)Q9m%3I5qaG5B;Cu_n+CywzP+* z%#EfdDVYYS5&XIQ`1*epCIG(|9o2KgK1ExY0oeEd3qw!i9G*~(88;b%%7px#^_W!3 z5B|ZW6TrMW9WcM5kx+|tV{Bx6itHGa{hbBi{}9!UAZiGJ2f<@e6F2f7h&#jnV)!@4 zohwIM$WVEsqm@bnzf@OX@$FC1(Z+FUo_v434_he=xT1{!K(`ozc{hw_)u@tw5@#HZL(S!swvvku` zcV$ghqmHTZnHHdPD%}$#{5(F*r;mddes}*7Bocz0Wb&_fmZ z-b1x7tsTwbZ9j=J?7N%0qBrOt%rU9oAS}@|AU<~kfSUFkv%&bIF%M5_pf|wH2GCsH z!nO2b4972HPX(NxoQG8kaG&hY=;?`pt# zZYqcFD~_~wVYziX_eDNi!ROja$97t!7>V}I!(+4y&IF@A6btP=Z=gbOG_o678F1jP zl4bRMzSuH8krUDQF%`q$Y4(GgF2!&|1>iROo+3|Q&2uSS#qP(=O7PB=#YP>H;#4}I zg0Q?uWbSI6Z50_?;Q+7EBuQW!YMHM-RDso}Roc*kY(X^Yva*Iw^3RFJk z;(dC_Yd$ovNXWM84KxDUdoK43zEgbup(k><5_N5}@Hc#IVgCnbCQbiOI1|mKT`v|A z^AB3Va58fiP3an&oXD`~zFaL0Dhnko*Mi#)NpUhaD+}0cNrP-5RZw)@d5UodSgWts zO&ogo!PkDbhhUPT>1XZ+6B^hO9@P!Ci$cU7RBpZ9cvuRCe1k9WlYDqw8UZ0)LX zVdxUMmKojp>Hoago3X%;%p zZ5g=@9R@Xnd`BgL=M3XD^Tk+Tx^<}=Z2zhT#)5Ys2n>;-@6K!pL`QQsHKYX}9Z`ag z?p;1|OcyEkqP@1vICV&Rk1S*LhEj=#(w;VArQ&kc&py5nhKaMAuK zsq^s-qz31RB9Dc0)hWI{<@gHzc=U$n)v7Z^w#-6qtkk5r?mHt2D#`*r@p#JB2 zyu&qzG4Y#4XYawu-ti3-8h<*h1PiV{S&IiAI=9TS<(=U@QF zp(oK_61~46`DVli;$ai135w*CiLEjV2%FanjMP%W;|GpLcEW$V(C^BnKBz_lNqfKy z#P?6?Jm2y%$5fSR7=x?Xd`gI|twkcTvaCHOf(z6_+?)HF&P=cTX2BWWqH(2R7Q>Br;xVue!NWZ{^FYIa|35E z2@QM3Dtdv*53XAF#zgljm-LE4?`P#AQxZxJFANDJQCc6F6a2XsK-MShCo(tTm=`oH znB513dHJJ;8TMk`oBchmWlulULIL=ne0t!@PqM$6l)xJK0TdQ&Deu6>1@0zHUw z$Z<^eI-k~&t3@3Ar+WsZm0X$Igvonr2R}=tp7;1%`Xlr-2}Pq28<6HPN@T~w(ikH1 z+)dvM4&)74^dUuB<@VATumVd{UB; zGy21=e0yKnv)A= z3H-~f?9{w;;f$iXZ~9JCw2r&GYK!XFk{Yz97eDw%k6rO42Qq3bqO>DfyJ{q(qVSCMNO$^tK(jo%|=3B~L zOpp9BHxisSJBXYm7EOvYA)PO~N6q@fU6Z*+)iu%khjgBUvzdQ$LxO6qslu^i(>}M; zAXU@Wd+!Y7&NIcezqZgT;dtAtf`;OaJcc;>GY-~bj#;vO zY@QRl%zfgenE>auX`FY}nA*nN5^D(|U@m15LXCc`Be&Uc?~8zs=jCXmw3qlml5#$z zjMaVriQ{-~CuaJi0K@KogNhrf(@4e{yj;NKBwc5#b!JW10D71yLUp+^FCf@1P0jA) z=LJPjoDw{{|8Fq(N?7vQxig)ICG+&yOH7gB!uZaAM zp|xif7AHwE#Q%n+hhJM72R6ftg&M|PHaw!V$JstRs|!jC$&$H>`E z1rp6Lj=f?T9<x4;T&rfyF}RR3!~$q*zGHI}>T$1R8ZIl&FzuR5{oR(gZ6Ka3-RIa`!Qkfi>aq@?$D*@FD{#Zt5y%$NVR97KIk zw$GuBI6jL-L{rk;%xWP1Ag`Kc$7dQJP+B*@q+)a`~IktaN)MjJu?h-KjYRP?e?XU~$f#JK1+852cP6r%i$-gX3 z0EYoK=?j=jh_Fe}cpG7I%AYp*k0F=h+QGhWpZ{!RtB0HY(%C7kVJo$Gm-xo#r~;NL zQV(7|^El}fSh?NW^>gYG$=iQAYod!N+u)hT*?RbSpP2;Qy#>r_$n1?QJWFsXl!5UI zJoFJe7XV4b9Ty5}Fttv;@|b)(*{9DOV+fQhH~;rOBulJw7YqH3A{#tn z9<#;0$r%lZ0gu@w@a3A&Z@EjMX}LDM)5r2^V*ZjSqxlUmM*@0rVU!cBpC;660hq|$ zUek<3B7TXm=I6~SuJJl#(+Y^zKi!XAd^^~{Hh7}ZAjospl-U6AdaSdaN&~+ELl>TO zu|Nv(<27N15`t8uRM;n2%Fz5HfO?R$Rnj}r;;Tz@WJI_Ed}-N&-w+I1%;8@ylQb{( zm*3sEN3da?G5wF!GM~)Nq)sE8Y$?@XlTwh(DbVTieZxju%G<>M2CI6cjZmT!WljrsY0s^-p<#!Ds+hxG&dmkxkCZELKo_}xDDDi{hR*pywB4x3G%q%IQv zic$a<`Z}e1_qS@ixoON)W@w`jEu`N7u(`uKI(H|zQ&!runs+TnP6|w7DYi7)W}`$l!WK|zja&n$ZIodiUkc#l9L%GSloe5!eac@Rxw@Fu30~Gb zo*%Oz3E4WDZDiWMW52ewr4Y4Pz_$RY7o_nOMMbfh-h~O3DpbK>>w2yYhw}n7IiH!n zKA79ri%uAMxd=E3rae;WeJKUsUnc321MdT9cUmb2hSz2kivTc|en^=vZpq8=$QKwE zU{K7D=}9^?BBc+Bd0go=K(A`fBu@1l8Q!!t(>#r+sewCI;S))pHEvJ=(d5`V%M-Xy z&zz@{O;mmAOuV~brLzxm@tDu3t4Jda* z{s^1-2nFr9w@{A|p$}=9C&tU-%Z((R71Y@tv>n{$8)4bc z3XZ{PvJUOwJ8o1?DaQ2By9=oIWTVfeDP)7X5w=oIAYN~=fQ#w@y$#+UI&3;=7esxo z_!!%-WDP#6C896v)jHM1e=^AIxtQ2IGoNp}TU%~?u^R_6?k6Z#zJ-LNCWFOspOW!KM>e2rT( zq-x)cAt;teTb356ae2-!s<5RNCwPAZTP2(bcsH}yC0vqY@Gm?U6S#`F4X9eKf^1ns zkK0!2hwNAKN(liCEwIH22?_x&k$^-4U{k0vN&<+E47L0l3?Yw2N7X-yo}noYi7YFT zO6{}3TtE!9A6-E=oKcmJVBKAN9~qj78c2ue3`%LiIk_1P!lIM}42#_Oe8kM$`G0g;Ycy=ff&TixnVtp$Y+8^c?(tp<1aqnXMkkH-_J8Si<_zTrx&I0cpRdD`nb*=NsR%cE6`Oas@wV~DW`t31jSXVL6c1&nvfC;?S#-36=5ik;WHA~g3 zr_Nh)>CKRzxQs-8eTZsL5i@3$t1vS}Dp!p~We0@_ERr)QkUR9lS5IUF79o6BLpXtj zK;-G%8rwNxwQ+rS)FuaL{B9j(1^o=5Q43%n^JJx#k=laN7*tv?moZg%cFXJ~{0@oL zW>2*DENNCR#tgp{h4n16!Kq1r6-3ndC(mU~ciF+$+W5KGT70$_G}fL=ARO1O&(QB1 z=U?^VY%`oXMbE$qXZ`ThSej`ta<>TdEPo*3S0QK2#ZMuha6OVmIa?o>qr0`g`ddL9 zAx^22E|ihQLKUyP>)j3N+~Zdanv!ICNH;orJBhe@q9^j2%L< zQxsBj`MeV^&}#w`@s9I*Lrqr5G`U|Sf*E^EU*q_-Vjee17ZX=sZofP^O4q?CFg=&6 zUwWU6u{72j7Hj0>WaN`bN^mk7z}q{0x^?cyL*#q5Cr&>kRL48BGBp!iV7dZ?^AP<6PxA zxDU(l!e=hSRWM;Ik(7d!M$*XLscS1C=PB{jPWb4Oa(mR`05rz~0({)Gtnh66=7+(g znyc{sk7Qo~MwA4*XMF|!Bt_@Ggm^kx4($vD)hgJjWqPkvPcD8Z>gyMZuDm=nSRo2N zNO6Ka6=NXOyji()5t_-50=5=#8cKXL8#xjb*uV_nY! zV>n;2HT*T0EYbz+;_k>07L(P@oolhO!4MkL^XZoY@8!2p|J-=KsbF%?dxr`Ou+JFJ zE`Q3X60yH*t}dQ&at({obRq9CuBAg>wh%h8s)bw*4CWp%1he#|O2>7GqzXJIMm+@_ zXu|+C#bbam3&Mk+YOnyt6uEwIX$S5Ep)`4wWAIP>U+w~j%Ybal?od_q-ED~&vUF@@ zN&5vv+LV+22r77yw<$cr%bf>8gmLd2c_&jldvs9_Oi`9>_ne5w3k-mzJ>lJIOBrjG zbv!1O4S9abys|q11KWKhEOR}C4$Km%u0T>-e4h6mYE^x)Txpy1h=B}Jf3YlAOdk0% z%z9KLrFJL{Jju)D>$$PSa9|RC)#v-90yyfR-J+poJbb&>#{2`v&%D6(k%8NHUh%F! zI6Sclt7=ZcBJ=14ZfP(hlGED%#i~ee_I#SBR>7=qbr>nt*3-n69u&-){3{F6JYQji z-gxf_cJOpolk@2tO(q@K&<@BiP~N7AiT-lsMmDqdPgqTP-z~Q(0B*#-o~DA~$L}j+ z=GW|k7!>{)7BGxMNo{Uc=vdk<-0iN6aoB&4?O!+HzE9sX+i%-AI@gNAGnh zw#+7$)S?odOS`2p7VQEbcC3k}^HsuG-h(TjX95$}Fb$=Q;#;c8t#p`akZ~BYk6LtW zHt#)6SI$yKzOQn0qBF7Lp9}^W)S1?c+`DMkF_U}wVz#dODMN6Cu$=%<2M{H&gHyU{Lta#bR#~$>WZ9n~p_@*wLm}okSMlju_2(KLqVd^tO zjM3;U);G`c7!E072YT;Ab&Mg~!l@UEW#5lwb+&7fzx=#l6`JG7+NBH+M8SNA1%zD2 zen#@S{b|Nl$8l;rAm&F*F(we-@jJT+Vjxt=#QP^RQL>(9Yqp#em|O7CCz;}1Bz0yg zQqtpO18%!A=AsrjeQbJJeZ&jvr}!1i+WdMFIhnHnmUzHAd;Y8Nu;+gH+laNj?UTOo zg(WqZ`JO3(Ah~m@&*@oF0Z*!$i2lVokaQRt*ZCs+7iN_BTxn$0DP)A}M)H1Ou1a5_3z>C^!;$+NTR<)+65)Aqde?aEaKtNhWF z7p?p57&MJgJA;fmi!I0D2<^*B&$d+ki^Q|H5i?JHu1a4S0_(V6;8^b!>MhQiOIVw^ z+aWA6N(->83KQ|PDF9^(nieDd-~C(~5)JLbl-|TY$s;bU9b|Gv`h8LD{i?=V`?u)# zoj;9&!5{@2=wayFDuL9A?cR&>ax+byeiq<& zC@Yd%GfjACwfduQ`q+eg?Pruwt-t!(N~@ewJQKtE!Kn?P9xOX$=wg(!6>Nv^pV=br zxzH`;6BSZQ|L4ydW|^w@q$xDFJua z8QygPJRLf(AN~I>+qOwu^-SLVnO{`?fC5?1TMRk8f4WtxqcmKISjCpM*+*q)WLQxw zUCP?iwmUPnjTk(5e~M8V0Zg+_n^l`;xjI5h6<$L9surBUGO(%zQO$K6pV5@Z`i-43 z|IQ?f?%oM5l+AeS4N6@_;6r=s?{%y3ukQqpFFY4meH#dGjuWE!PPq|VJ2kSyYWFN| zYomGM35FoDitF|hz1hkBI6y478xs-3KbNu^w2X_-XW(#Y#vUXiliACT+A!T5n*@@7Zst4i;7H2`fScfG4MTP)q#lyS0 z*=6IsKO>h^%P2PMBVH)~i?6SMiep{2PJlol2@oJ5xVyVcg1ZykVUXZ3xP>GHXK)<| z?k>S0xa;8V?h^cOa_+hJy!+Pw7QI+Ki^a@zeO2}Cy{oEQceV3O2zO+5pf;-0-9=i`MJEsFpbkR0FoamcAC?(;$aKz=7+V5LXMh4cXyBGr#(bF^ccEOgEYK> zEi>(Rdlzf5UMIoWxVGUiNf-Pg*vdR?zFITYK>ONmeBYKGxy#~# zIuJ6NANeIi`2S-Jqqo5QUHe_as{txGjjZGO%f#M_SG;y$*n(={w_a!Wm|^{2r$P1Suea?|yGGHd`hP3LN=z~4eSdyn(g7DhzBUBthx zocvk#?q=<&v*^-8!yI<0qm`ZQBo1{pk|%g)^mPPKgw9^rCnb>M&ZE3}VZ5+vxJ@K@ zlXy6s&NZ;Z^)5kh86dk(2&SP8r5T4ZYT9QNiUINiGr1}a?lnXRva3G(cu3IBhsrrln$D^!$2djGgr@LG(q1v-H%JoBXN|6j3nP zzHk53F6Wp@9DXL`Kh5*P@j!ZNGwJ?Q8Gl#VVpWG|bfBKGOa97C#tf|#zdtWSx!U$k zmO10$#IDKTO5G?nx3gJFa^^0FVDqX$c7@QwQR2^$ARChump%dp*Xu%Fp8NQ{D#FI~ zwoy(Fm?v2HM9z_FO`x%<;Bp+txoKy954|=Ck7s*+0i(_0ws9ZN28fmby6&sLq^-xK zk;N!1Tt0rt{6~8iS2QpReXuqK2}iINN-?HQuYW_r8sT(o@adfe;5s8VQR;&vysb+@ z@xQyR{YKyZ-08PCsnxGE<*hxG3@<+XDYM`xy*LwLGtd>Mzneq~d(*UYwl@-!fPyHw zH?>G1QCA^kHITUUKtHC>VUsw;5<^)4oqHX5z%~y#*t1O-co`qWxyNh6(5H2$JlmR_ z_2gG?7fGtc?W!HNIs9MB;v@V_es=4_6Xs|Is6US{3!j-G-M*(Y$DJ&d|9Kt|WSx{Z z7GBE|x*)WVGmyZ=;X#mPw2Qs}ubcV!{d%FpY+LS`0S;~em)gb#0s;r*O+-Hd0guRJ zs;Bl?I|+wP-|jsmM8y*60pthpd=)FR9K7TFV!!?Q&&8OPoY|N7`gPrup@2Is5|IBX zM*K+zPyFcp_S_80QOo{ngaOEM_C&31KL{AGtzJEEpmV!`!MnwU%CuVQ`k%(zt7#6n z8G;tatu&FQ-v9LuPZ9XxV%~@sb2r+uXYy6rPa0m`ZFuD=Ydg`7>(}g`CzIrzyb1gv zwmY{OJ7-NDrSfxIs!{s@>(0Y*qTb2WQHaca2u=FZ1g-QXEk;4ZC&)5aD6sOjlCS== z^2ZqOZaLYx&M$Vsd$vpgI#aGc7%#w>(!uukx}Z}pWrK82e;15Sk71A2T_&VBOe7yW zEPhMTtYH3`wotDC#nn&{OjrdjWzM<=Q@iXH^Q{<+9tSs8pTkZE`H9Z&oT-cmh=C3DtUO(fyCAG@&3%j zo{R`_W(MG(3?6&|{C6|L=Z=35udUEA8-B{taah5+8gVQ!#l0%17Fd}uyq!L+XLVyd zMrGi#=^uVz^B!zxSwWO?mmorvD@k-7NlwwlZP^>dQJ-eE_@thvdHt;FVQ9glrAtX~ z_o1uWy&_1BUbB4e1t(``HZ~sf(+>*f+gv2bwVB-eC-uu4Gi#|H@HFkU5eU!y*d@2n zc@gm75wV#&f4XRPL{ex*nmSM4r&e4PE&%iRz>ir~z5C#1#Y~><)nFL-Lg3IX%%-le zph;i1#D+ztCI4o8-6U9Ygc3@w-d3bcdfBqo+={0(QE_DgnjDZrnyW{E}Mz18NPvf5DLAuzG4OM|(uE_jd4pu*T&K>6X#S*K-QCm6zqzfH073Z4 zpM$s+S%)KIcwEH(-NL}obB4sTZ5Z1?SAu+b8L4uKDbEHdR?P#=-YMH_bM6M#80C&nA`@H(?U5`yIi-yv^kDwm@qqoXM@V@GlbLCvg8h zL=uI6tI!3ob2FP|K*T<7E zlunW=P!s~ ze+-7-RqusY6h?unHo|74}{*<|E_6CU|JJr;bNEZYZrFb8-?q)`ngT z)W#LvK!{fZOY2@{akjZ?@il0GR}it&oV#ylc#?l zem{g;@PDylxrYdT2E3pJopjh`{Qi>*RhbT3#doBBWv`%5hddwpz}3Tdhp^E@^@-sE zdXa2iGKF-=@(^P(YqLVY5 zIsx4MN@4H>M7sKb6nk7lmy_6fZf4>95Uj=px85<=J`!hpXmdXK<$HB-NV5&;DpQya zA0_Mk;l6c+BX!u*e_tqY;`fv6Nr1_mE-U-H$E7z)6~>jRUU1yU;`RAZyx-ZI21COR zIq#7oH|+?%JNjI9HhF^8pSXDAIx~p_BO4Sa#8@hVbrZCHZ2)5$)aC+Uv87;Lee= z6>;~K3`ENn&p>tc$dnI}TvBFtef^$Z_pTQ5K`;v}X`v51>P7V9f_=4dvIJsm5}*;F zrZ^n?4K-Y}Kr6eBegq*-foOf(=e=2Z-HX2egj7fibF6P1KIG)(k69R7kmLoQsXkvt zh)eLdh(>8@CKf=Y+*`jLQ$lXNfhGs31hY24f4X5n ztdqVIMd9}xld7J@*cXfomCZ=?av%-di|IKkqujgy zWZ36^U!zX96|64C3_FylBQvp&!Bh<4WO!n*W$eDoE6Hx#4P>v%KhXo{pQY05ZvsuId!(`|XdN5xnj%hR2bZ4rl7_wf z%El_rgrE5AU#9jKB@WGa#gt$MeL}fgXX%eQz~|ygIDfgF=cFuUCfV`EjAAT>|9B_1 zq7y+t;ri5PQuc>q=XBgLHqH%^uBud<$h#s9Cp{iD2hkh?ydFr>gV(WZq)W|9)_WU> zSna=E4>&oNZMmX466=d|^`qBiF5R2|{_J1L?TBKyYpgSM)*RZwIPyh`4E_C<%nR** zhmTQ;?}6wz#oArfLw*#>_Vz0z$`T-R@uGH%=cGiEnPtx4tlT7~3KQdkX%ni-m%EGz(g!4c$7YAZMW!U9QozVL3#K zy)}}~2ID^}UXH(A#qa)&M}CC*UoOmj7?Eih{N|@jv?={#-Ubd|eYm0W%vi>0T8Coh zztj6u^zRaI%e{xeLBIO+`6x0O`w)sZZJ`9CJe{p8x0qCE4emUkwWbN1nKp;hbNVd( zmeJyvt8M6(h&{cy(Iiwy^rSrUm}r2fEyt=CC)~&0OcIt=DssFy#F&#bZKyTiHRVw{ zyJ|47L_LtcXTm7dM;2#`b$ujvG4}3DBy9_|{sPHr$!H7fe56-Uy&02OZTRdTp4%b-w=$l@P1|aOq*(hx#{6GHV2|~Ke6V#rq6LITDuVE&C-cVPRzzK74 zHG}DUk5X%OUy+J=tJP=?;#@7|^SZm(CN%|vhOtiyPks*&ZI6+B`p;(h&&}kob3YQ1 zr6~f-kINCN76v$fKDI>(ZXm_$Y_sY{pd@5%SvA&UuGRI7T1ieDZS&d6vlo3slIcnE zG-Xr%a{qMW|9#G)V6gp}!V>SF@67?1;w51rVf=CUq{|VSehAfn580qW(Rz*ge|<~9 z3s9bNlV^O!iFx#tmSIxnZ&~VM%#$|D?KV8dRz z`tnT0Tby-0bD55su$XM-`TvNF|FR+kqs81V6Tx2a&qet%z>$4J%lO=HGSoML%>Dd- zc*(!h!oN<4NuIw{_^y|R`Co7O|9ixihdEu^jb~r9)qL6@Pdn^!;b}&(!W6!@e;*RINI*E0VvhwuCP}pQ8#v9cM`kq zI!sZ{3Qg`^jC@AQeR&np!(eFcu5tv8RRys6tGVOav-L3< zfnD%y41_h=A>Kn?3nL~>HY=(&bxekpCMLyYIdQ)3lr54!m1xz`$-&}nXs;}CjTY}O z?EU%1qRP4*^hSNJ?^;th+e76l62lEA_~C84_!}GQ?0RWdYbDlYQ)oWc&X)C}N`mFZ z(30h2jE)5QJ&X|&i_80#MxIEc{RO9;(=dns_Cx@w1`9txR^k1m_&1Q@9}`c`^DmqG zcKP6FcsLgMwAzgD9n|wFOH(zOn(d#pdvjMPd~KcBnMu{!MH;ZN0VSpBJaq}A#SIfT zf<}6rX*09oQ4V6ByqeB@o4a`7UCq6BFSk8}~qJghcRQ$q^^c zwrKk;p~fnq=Bjf4)SrqJ}t7hCp?bpWe)<=ps)^@*qJfc4ha% zeTLIyIjwsDLFDqxg8%$kMjUtq#)h)J3yR@+%Yf6%5w!08rH>=}loH#_6@U3;nyrwO zD^!)LzRZidd3C|bEZdU$azpXH;m6^BC6r@1;q-f4KKJmP3d1*f^3X3#+g7Za3-x3MT53{mt_cPEeXT0hdElj{1E^aksjUm|2-U80^ix^-z?}qJAo=;H_ z7k?2dnpPkcad)t&o*%Q*d4~X995>;$YHh!b$Uu>M_FEJ3ywg^oF|^5h<6YftrjmD< z1Gqwz>E6igO{g)|);qBDRmC5IA2VNBYe?LyEX0{WRC&ttOFmuDv^r_!a1<>I#IsVF zkR@Vkj682L@;YpIPcK8`P~ywGPbtv8T7hOyEgFG)CI8KUQSLx3fr^lCuN*s{nijma z2csVh95Kbud$8T&+EG6D6{dfEL=__dlC1`f#;91oiSbAJ2)n|%>~PDlxNl#ID)f|Z zM?|+v@P%p#pYZPv>AC2#j6~&=&1K`SvtkQW%tsB)Q;UokGpvP*`#XZbCcivZ4K0|5VhksS$7%5A z90^P8WM8e3_t~g?c}6Km^k|=k7iZ}!1Yu$aI6F!4L3E0jEN~AV(+IdR9K;FKqxrA+ zqjFv(=KIYH&;v5{vU!>DDQ+Ls)3BLhIn(AWU1`F75hc^drl?R0X{^cG@g&tUSx7G09qFa(jE|I?&AK@D%MuWs?`O7a={sWb7OtX$!OHpgyXN>9Hq2*E3Dl+=4;H*O8~Z}7jR3f z5&kyK-cV$nODc1*Ma>!_x<-Kuly6B@1-DHQUiT+sbY->@fntyf#*r{$p1}KQkQq?u z3qq;B>~DZfVd{I9$C>tuHDAjcejG4cTkWx@kY25-Rn$q4No|st^66%)xhX3&+!B@5 zEhv&g=$^y-wUZLB(niimMCX{K*XTysH*Rurjs)ozolRkz%?L2qgx}oMA6yBGqc|3*J$d! zyvBQy3Rk3??qA4mJ0kQt{h#^(iH4LIy%sT5?<##=rrUM_f^;5 zj77aj2bSnhCl^#XX**AFtcgKE` z!7W#~e(QW_qv@cU5zu`#(Vcd~38^#^Kp!aJPP~|28$jYEhlAJn z`ZeL`z}2I%i25C$h?%323uk7bQ}0WbW*%cts<9;b7L-RQNsaLB z01N#Y+}rmp7I5pgN2FooYFFj==qKyk16S{4#2bEQ^{DRsqPZeh&a}~>jq^a-^wv4+ z2j?&AM1A9M3~Qzmv`ibS7|)NQWE+xpdz+{=*`%J(2o4cXK2i;G`bG|#+^2+&aGtb> zaCqsLp7-85X|-HYI!oo{aN-jO3K9&WYSo)v;&yxx+WDQB?=MP$PebcqZX|J4aq-@n z&)Ni8AIE%u=@yg1(^}Ns1+IBd5@FWV44>*smM`W#T@O>VbGkq$OXrP2mDj@$Licc= zBbeO+?Cl^ONvceOA#a$F$S-lT$b-2g~siRDv>JyL6k`>X%1FZGItBBrm4#m zQ=a2+50_0L!IcRUD_Z)!pjvnAML5lD;Nl5i1JP5aA@24$ya4%-_EhEHg|9!5`4!j9 zAkq5+NruP|myoH}YFaN9dG;ju?3+r^_P6Pd=+>y3B3W;hUJYnNu|Vy2a<6bvG|IYd z;~0y6k}mP~Ek!w|sI63_Gc<(s_W%zvR}?1@2@_s~7z@I)Xh!(j-#Q)%@_oOs3R~#Q z33q59tP-|Y^a=&_-(Awx!v@ZJC(ODCB9;BRjdD??u0B;cosZ=9ubz~#5~rhG2UhN> zy>+9Mdxcs~>PG+3ZaUvd0-jHZ;)COd_eZzM;>X|;CHgbqsa@dGin1YdUMcyh5~z~6 z)kW1vqwOR@v`50wl_7SDzWjF2gpf_c*Kegq{ysKT9B#Lvx9~fQ@)d(0ardh~RymD$ z(OFp#Y<9x-t=A{w*(iZ$Dj3kOgUELSc$&gI#!@`Sj$eF*12hXGXp4MoX7qb*NGB?+ zjt6a|FY%OeoYjv1xoDJd={Q_^m~ua6;Tq2rb0Fn(+ctOS@fuw{WmLcV$EpNWre5jn zD2V_d3!ns!#Aw_JVeGkgQOmQx0CEQ+kUPAPB*ej%d1mb+zsclV9mUs=J->4Xo#2xW zy$knYwI|gj5qc@MlhWKSbl%GM-}4595R`*6`-}ed=KZrV8Zc&b)G1K z$TIhQy2!D&JnvKH%_we;qtPLam|2dFiUiUAnogL!wueHq0z@h(Y`60+!0W>2>P%VM zFfdP27XL?T>G!ZH#Z7JfH1|hDw-vzXK{vGi@VgZ1eS31?J1Du`%<cWr@e({PLyx zvlWcNCQP0+28B?w*@vn-Vz1SnA|P8g+V5JBA z1{f1)Fcy8aRkk`x*In#!W<+$xG58<+^!r>r_Mv#2rV(!Yh7A zWm$1Y0KhNAODKL>FJdE(S5VMNwoy#6c*l`%_x;_85ZHdjn;xh<=X6E>-oA-D*p@!C zPEUo)zz5P{wB|Da3rl;DBW0*iI^@(ro5@kRFp7giwz98LRh(ImKry?WP7wn!npx9= z7-}or4Q6JLB8*3y?RWJy774!o>Bq~6QQ)ILDGX2x{$WF<=}q-n{dLKu`j2}r^3WnB z%vmI`7CrW{J^~Lq?OXzQ(C^~eMlP?R_P0RULX?~s}a zQmd1>bUxOh{QfMLs~gpij-}e!_?xl(KPx{%*B457M3H80xT$Q3dNNDk)PKaQ#>=00 z8Br6do<7M*WNP?ilCR&xm0{bMTyNxAPR~KJ{t=YKds1>%n3HBv-C5W7yG71i3Te_tIM@pv#)z1<%2Fc zEljGomLLID5@UJ4wr8U4riZJxk>!y&n`KM7Zq@}?HDul6S)cceF+Tp#tyATOOq%pQ znr?hGWF6R(af25wMI)_3&mYL(<`2#<^oZ$!%-;U$=@nsQPkY?}-O!JUs~djYw@iz` zc&1|d)T^jNL)eUmS1J>$^0y?)hk!^uaR%+!dGGtt-8NnT>+b=g73)PYvKH1R0(4F1 zvsJ-B7%R?Pybud7+I#!3A@HgG${b{8X@WXTSiqkQJHwPz?5Yd4F>{8*bGk4*3A8}Q zQF5`yMSaC%%fr*uQw{wPfh%3E(s)H7QZnNCZH<1s2txLLKM{b?1$T<;zA_0Nu)Xgo z+_dviA}Z;IM9YCl47wZ7)gcElb68>|FPCQ4aeD1-DjUB#W-VOmg(|R_ES<6?S2&F# zgKimPnQwM)ZZsEM4A9l&uiNsquCLxic@Da`!ygPRiS92XwaWZ@4McsQurT7VNjYRE zA{tb<$;AQ5Wt`$jh^K&8&DR~kI5{kJWW#qDO$@CcmosME{WVkgZK-94A*r)Yxru8j*hYP)Q9`MINNp-OXT(Gy%Y@ z35Db0Lo=~hTfxSGdK-HFi?2B3Zr!g=sdgC`Vc|u#N8F^L#_L}6o1d%Qvtx&PjEOjh zGx(S224#q5*@z`*Ph5X(Nki$)`D?E6eT@8@FT_Mx7i}#4P{7qeQSK6}El-O1t*rTm zTk~lW?&~(@7kanfwjUQH<`#u4^P2`5m0kOYu3KHY7i>O?t;B-Q3pd#5c5DqSoLLn@ zYR{8did~2onut1ZEY5EYe0TN)b}EFDR1^iSvZ6vosH4wV7ouES2R50i2>)z$oPa25 zd~==VZ2v*n?C@Z{)dk0H(&v*jHaSr$FEjJI`RhfW{VZ9bo_hEy32$Ku%iZ0Xyd%-U zHepk#8_G9`;mmuR@6qjVbHUq{a9#NJmbLe>rUoCl=6SLX(M)yCMi)=R4vEL-f;4=Wxrx_F_A>qVm0pkh6w5zCnfpOT#jt21doC z2R9pmk45RP^x~u>z_L?!VbfBXMf@UCsNz0mFu_0vH`f7gpB&ek;7>cKHnT$|@mm8W z2lE!jBv@AtkdN~t8dkJwJ2k5jo>p4CiA@GZgt}a_(Dp3YmAo}(Pi||uK4T5laeRkP>UE8u=EwUP zI)~`|Qw0-grW9O(T6K%KHFMe3r*$@_)t*W4gW3tYRORukl9_xyCv1^hl_yU@gCs9D z(+!R8MP*Vx>3J> zIG*9G7@x|+r??HR0G_;nNf6g+l@LlhvO*xuSeC=ZUUv}D-iY&i47;}x=O_8zGeDmw zhr{RqzfoYVy-e|w{4H{u(i{TAWlYdQuPpGGQaU#*;(?Qme)fxnYrU0dsmC9j46&@I zP#!O?Ds%Yt`gD#!J4djOhp%JV6l4S~f`iAetf-Ben4B3xzDMK>@G%|ZDf~zm1+j56 zdNJQtz7NiLIv<%bIr}!>^XeAa?X1P4it_7Qdv@>rkn%L8v~>6M#N5r~p>>MHj?stL zDb~4@+CoITkM@@{XuNnyTGaS!Jz6mE`fjT;27GhX7s{C-3y7VU&s#{g3ALk>(-8Is zU%uU(38go7ou{|r$wBDAjIn7XxexSrm0z<`$$6EzaDM(i{&gJPqm1)S6GmCW zd(J^UA&YI)z;0#=Kek8icrAdnT1qfmgPCgf{;Ug0Ru_YYGtl+936F&ol*sSFz>TZT zGFnHYlap(~-u5~Nx3_V-8EX${KadoK10=@bRBl?i7=NEI0#h)4q`Q*|p@JpQb-<|U z)rDlVna%%mjTfsc)h04Y`tW7>!uEs56xn>m=C@c~nBeTIk+7Jh(#T+J6V^ z9=>j;w!3x)j}db%6ep+1b)&0JaXA;=kEYhd&qS86VYl<;GG-iK4u820a#}ltpR!&f zbROW&Un>fOmoMlhag&!84gUH-rmiE>xS(!ofCucwpD-ej2lIdVS1Na|&Z*J_Zyrjx zO(>-@t<`&^p77~HUlprEg}&tQ(|xt_KfZlqqis(J{Fl@7FAqc%y!#>y#OQGdO^!A^ zx-AvSW0Ct(Ivu7|NO>cX3g&Izr-e2(pr)(ceB3LFuFP`((c+$$Wr}-v^s5|=?a3LP zv_9e7j4ABSipsq)NH+#=cQ}Gz-jaG$IIburZEhw?h|IfhsLEqRcocDL_8jZ>Psl!S zKrYzucXq&DZg87%s&bjzCd10<@8;R(k|vx)8o7#OTGbyB8U^uC(TWMsUg0N29hq}v z9OjVJBhTSO>F*GEs92>SPqYp>jeWTK5ubFjgjR9(Q?MjI@|0r1zQC_lMTAO~i)vJyu@W3eSfV*&)WhbZzLIbfm;Av;C)@wEX4VNc@5P zkVlmcv}hvln0CO`TXw^Em3Dtw9!QvzZXdSRtlGRtL8|y5AA`S|PWVH@5vm&5feAt3 zDf)nU7Yj^n#qdY1v}<>SNf*pTi_c%zN0pztPcQez>2uQztwlsTESgBz69;5=*X@9n zK0i{$$F;QdFyWcoEsFDZ2CjRGJWYOrMN0#^nd}g)lzmM&1VFOnAA&-+7S;s&+FVB- z`cReeJIm%a&9IHrkKFrhJV3aQPyqtm$3mNJ1sskZQ| zK81U)dEqZn^QYshqP(nR6xidE8a&R0cr4_CV7?v@3UK&wag7a7S&nEM)nL-8Mr z2OG^*OBlzQaq?K&rYu;SxF+x^!lwI=`cxVVtlD!XaogE;eLtoi6`{ms1Xl~{kH@XFS5^B$OdZMA9 z?mjs>SO7PaD~)WroI{#Y%V(63W}>#`*IUWv0zS_y;U8NB+Xwa-Q1f`@1;k(Z@$wVT-8xEe%?69D`T=aWqCVP^3DOlrk7e{#LT*ti7N z#1UBosAayRMJQedeT0$Fu;x6>jZ1P>82Q_fN=&D(aYWx)>XF|47T&e#_5WBCpK1}L z2{ZEhdi_Q1W^h#}9!wCazPURaR(9gs-J&H&8Z1s|spMEY(RmnUNs$_T_8$6F1cG}9 zlqpiFdlDz0X{b^;rkl?95Fo*ITkxm`WJWVx`o)MSujx3y`1-@!hQA1H0)Qn-nUmT~ zXn$ad#M1ZTu0c8eUyD^Xn(5v`b**%<(B`=2FoXeAPcLDnUaRwxcE`cm^)gqcdh-hG9K7S=PzW*po;T zM6L3Ye~8B1tk?eLF(xz(81o^^5DnNSNior)8kYamV^jHDg=a(+f3SJ71Ou)@A4+l1=4kWCu<_MS`@ z`wzWen0%rf3lY_ddc#bT`1xW_=pq8gRMTgJ_` zeqT?EIRQn}(uki4LKv6X=M;f!3zdwowSjMhkLtLj!iz2NvI;}`0vDNyC9 zkwT?_WUB5b;w;nn|D-`%n1M;K8p?14MpRlwqOp9X;Jo8`eeRHWg`<>A3=lF_o}#l= zL@V7jc7M&)Wx<_oE(IxEf)xZA$o?A+%qo2Qend7Z)6Mg`Cv3Ew0!{IWx)EEy(Iq&6^^4zj&&nl+#`s?WB}*l1(hgQ+y2*gtJ@GD7TIKwe(@o zlEef`I^y7u$Mz<#psP$);mwM%j#oKqFJF4TgB7k0%^~gKhqn{zezcA3iYccIlg<%_ zcZC3cyCXq9hjBa?nT$1Rj2mzqYyWK9uyId3^Gh#a&4;HOk_Q8dCGcSYXOnyhw;9%# z@j1jq1q20}_i{6NP1ahBz)pFnIAcBU)#sv-YI*|+w%j`N^T7T$9a1+{T{XjGxAu1^a3WPo2^)vm?hMMXe10y9)09Z28VCBFlfpjm@7^k{6i^4T zQnlF4!}q~~JOI=L{{c0m5q-T%b+vCu5&%>23QNPi6F`Z)#bfg0ls0z8$etr7!IW8Lkc=L6>iXQ@mF>23H7Ak(k6y1W7FfB$& zuoo%D$4r;Sh57sgN+oz26cW}+Cx_%>QNXBSJ^wH&ag_d<6kE#egT|2DzGwmF`OOG2 zr#sGwJD}2a{88yJk5a@jZmh*$yeG~^mK5xXt6?7lQyhkImlEozNh}I9lV;Bn7KD0O zYqOmb;Jh2|md06zNC%mDm`4{KU=WP*x>M8GJbpg!6qQR0aZXh5bds+BN=EF% zzMopv**RO+7gb{w38wj~PfX`l8S<>(!3(Yr@{@mAom@j?Ye_lQ0@Dq*Z30#~*5w>v zL5WuUs(N|B>2d*oZ#VcF2p7a~-|h;*llP5M`KqMF6amm~2r7R9gsLW!A3nee*sNrV zE$0*IlcTR$I_qBMo?8rwbqb!}g%?JiaUWi-mk&jPqf|a3I`BJfTs=ru(3kQ z)&f_?6`j~uIid0+O<9C5EIZLIIVCk(oPXjv+}Cr6agzlPM!0sko~pz8eje-Cfz(?_ z{AGv2Q^4>iDgvC3*@5}#VqQ4QoCfL;Ft!AQo|PtLCtbZC-!eUuDv~^oAvVl6hjcI_OCg(CV=BkrD-AM?_5q zyRl{xUkY=}mG8#RmM{k^)MIS$yoCku`alU`2i9%*=*xU9DSW3AM?q`Y+wuwuI@siK z4k4!bGyuL6R5OxdLgKwc%JhuM&|nMxx|I+zaUa~AS z(>+O;KY5YK&iMUKAJVj6o`T(22FkhPoQ{iD(;RJFHOXu~T96FmfMeHY-E?C-17G5U zgz9s7ecP%}eSXb`Dj2 zkXilS5}htvo=9=;W2@$vnfhvg>Y(7I@|2hJHRM|mnTdB2EkwgPz+LVpCDICZC4HZ+ zw#V)UwCsnI9o`ICUvh6vOlt8(3JKMGVI;_$gV3VeW8H6ljS;+(S$}8$W$||Et$WO( z*n~2dVJU=Q{o(RLunwRH^bC6*{jVy5Ee7N9?^F-#{uq9FAN{T8a~=LHU)s}wQ4=+W z`O2+nYx0qw_p*pZapfvH`19cCG2gK-=8LUV^FksFTXp=U{5(-w8mRAuu z|0t(1yPi-;-*wyAYsO>wDD= zDnvB0c+QyciBLUj+-Q`9#v3;BYO&pi?@z}P65d+WK9_8Y-pG;_asuw6WEoh1L-I)UzOej1u23hMC9uxP#QuI#ezc>_3QGc#sZs*6Gi-7M~+v`)ky%0 zJoPWqmLq)umZ!d;(dbpD;@J zhz?)HcS0={&`y}F%|k~yt(KGCGt+EdHWqS76Oml0j)py;27aZ?kObD48l73;jdSKN z!{ES>II?Hy=;+^N;7Aotv)r--t$mzy1z^=zs9L}A->o158eRLC@yU@m;cpE!Q>dY5 z$62@8lElo&c1MEKVNP~?G~4J_)Y@tNO>Yl-FR26spDb#Ii_57rqhWxo54RtOrk2LG z-k?xFPn~8g)QbJxT|`q09A?DR&0r0%JlB;D!>T8NkB+W{kefBy9ndf*&^Z!83c(Cx zQ)XH`;60FIqu~O0&a1z9&MS;NSGkBgFb_a7y@S3+C#^laAh%+QJd1?aOTX~fEHAe0 zrP2_S!(IOb3dn^il}tz}K}+)hW=c-@ZdX0#EL?##pd9HM|~p0Pyo6N{Ji* zKUmLyE=7cVC{TL#ydBxEFB^L}Q)xSU;^H`D*<+!Y)4DB)!!NFy9aDk*BJnm&sDkxK z7JS%>%G6KOmz}E50`c}bH<3txf^P$@;SN;3Vc-KwE2#q3p6wp8rOzN6x7%5(+jR6C zoFQbZa0%Lz)H8R_$0hdgh>t|Tiav1}{?6O=eq@Dlk-yuxg9$cl-W|wU<=f0BO;j1u z{UJXQ$TY^cxpv(xi+OJco9tWx% zU19CnLk(R41J%ZNojoJ*YxPr2Hy-JZB32uX@ zO+z*b_t!W@4`eoawmx2M;K!sz9ML&C^-yJp|Db+;mpbeEGyHz_D#dBBo})7Fp1D9; zQ-Ra+a61Pm@Qr@t*&ncG+ROCuG!MIogHrZh8nYKC&JO91Y{Bn5w!uMtPa}&4YhF`) zZb^GBIflsC-Wr8Zj2*I|cy;Wy%d-w`MRi^b;2)Dd=~PXQU>Mz13a_f557(yY6^YZ+ z%iUMsjZPL+ZsIj3jm*05&#M#L*^Via%eG`(^$bm)CJNi$FQ*le9yG#13$!8VVZzag z43M+WT+61?PBF4d(sLo7Y?J>wI$@it{hk$5moS?`xk&IkdjWU6ydlrrDfi{)?#dY) zZu@K8ZLeT;p%C};p(^kBkkvDn$7wZ31b=r=YUiKMg?R!q4?l&ixd!V^cBmBwGd&jb zmL+FVF#^?nC?V*Pr)myJQmDvuV^^94{vK@A2XX~gUmi8AKpA05DycBZtLBo&LA1~E zdZR!6NC4;K60D(9rD*8@4#BmCexdD{LXe$auX5zog#AL`Kyur4r~ebZ5K}PeFGBE9kY^tBMt87& zy|Da!-;XU5%cuT+*NmtVN<_z?7gRU-GQasahc6V@_x3YS?J{J2yuJgnL`_Le^S$9? ze0>*dWwdW!13QO# zY}zL03vdfVUY|0r)U|e(l`3oNL6W_O|1>f~grcpQp4N4Et zEOrfG`xhWyt;=lYFMvAv@)g{Q>J_F;az4DXCjWN(_MGcC{@=}&&-q0NlQl^Bb)o{b zFs5^4@}wEXA%t;pF;WOy-|E@~;gfdCSQGYDDuTA82H%t$S>~dzt>4 zHPB#R>w9dQ{{$U2j~R1>x5mP z&0v@Xf6k|3d+a^PL{nl%zW^w(+m1gse_jDKkR-du8b~xUy>6*!(Q8zx7{ovD{#(l_ z|E?AX#F8M!A~s+7(wK+Yk7X}8IxdRy-DG#DH+PRoZVW}mybSk_h#p7{wQ7NHOrJ|| z%-`OYR|G!mv8jUgDdc_q7piZ0s9c&gnCpa=siK6QW)_rpHl{`4nDmO8t!u4WMVC5! zU%eZRh>E6QxVhAag7)I~cC8w9+a-97vnH+ii~5ONiGoINq6y@5vyF@XA6e%e4d>dv z?F30kAqa`6(Yr+NBzh-$?6kY(L-4T zHz0*diEH(#`|@@iJ%@zT0Onla585f-dwjRZ0h!> z-F%_n6sn$EbQ22~Fh&Lk?&<{iIa6~jj99%qzkTDK>LLnsaE{$9({OR}{pSMFm*Dhr zXv?LU^$-1WPI6xa9^=Q{;6RVTkEF)=E??ovHB53}lVEZFFNmw{L&%;gjOj=^MA#Yzp3D1uQ2qhv z3N*Mq+`rwda)akpjoNO^sdIfnZNEr%9;c4?DB=0^z|1CpUZ{p!tk0x|Ej=8%;*}cU zkM=&ARIj^;kv$B+Y*W@=o&U%~=`Xk+o=t3n$Y6(Qln5+%cg#)LPc~b&Ia5qB<&bp) z_HiWjx)@mukaG?{x?DhC`eOYIiH@m+GFyvh!v9#Foaf+;}3#pvHYS8L`|ySL(XEzqE{Ib&___IC|>HTJVf zJ}2QPb1s>Qm#95*G-5ljSvMl=cUh(c!3PdA`iPp4?R0}l*yg(@FCqXG&v+h`PxaYGVT?L9UX>uqQdh%ZEdxh?lvOMTKvVwKPQf*9i`joLIPoQp}?^WI08cfDQMOa z^xMo6Ch|%~dy>B4A66;XR{N|^X15e}pGIZxg`zE@Ubb`5JhbcXXj3DVJ<4r&8hP?)o2AX>E~dX{z@g zP|R!hR;t0v-da<*H1`Q83m<|2>&l?^oyXWp(YhT z7``$n#7sLN0KVy{yCDEpp~MM4`01aM>ueAqffLIU)9kox7wOhKmmOINJc%?NFYp2% zKGF`}1XO#AFW#(Hh>;LKK;!)%Nb!Cqp4r@lYTA}qJ{?0Ux0L+@E+}+A&U`#jyHA9H z7RYz)?>WPKM`~Ce#>>LntIwD@0&gBmp2uA@| zKL8rM`l6Z4;`;x2DtSq6 zAzUP{|_dNlLjql z&5TTuRokx|!Vrxc`kL~+@4s{{U%Tq*|9&ok-dM}LhXSZzy2YS3tD0Zl{V}5WFDD6X zQh&sMq0cyL*yBsS8W@ebq0eVY8tm+dpWNnn;2})@&wIWHdRF5)fI_QZBUDPqpSbFg z|NW4BXggVt$r(x){X9QA{^RV4alwTq4-m4zp5>$%Qe9^wDdqJOw6l3^%FmSfq4H%S zLplDx`pzN+Xz`u+AMSKdy?{iph*zmV0H7)To5K9_MJ8EV%jdSKRO8ek3BE!!&7m+8 zJP^$$#gOVT`uotYJ6L$w{qOhJziLskV%7(E%gNV*-GA=p$aMVQ$Ns@Y$6I_OwI@UW zyY1q??K`exfcCS^B#Xua#@kbIVzIZ9KsfYQqXEmqm!f_A-B&x=SzS8rlE?U08LR*N z?f-oFE_aPM{tsKfcr7$H+ROp5lUKVJEZSL?NZi3DB+-NERsPB4^!DtL7!`nn`ZInv zYQ*F>Z{X)J-OFn_vLR};O5TpAFg&opDtzZT7m1}(Kmr21W&1$8UN@5I$mL=kWtCsE z@qXWvU#%<>`>lpyk2)1@iHw(ZFBM1<}O`&t{?5q z>lvAPzn=&BaP2f5f~E@?UYYqvhr>lyJnc`P`a%ld%9h|nuPJgng9*YUOVe}BI`aN(>Yj%-=+fr2k+y2Nu!*aRuCUx~**gwNr zx|sO1ZwBl2-Phvv+Bv})e={TB#OH|rI9U8=TuUqB;lxCT>pUO?Qm^>oZJ{W$hLxJR z>Z$?`D6DzIErd?2HQH}7-6ZyX2hr&<0AsQhuuJNwX};BnP~h49Rs?!5IyuDd$!X+q7EPYO$17m}hzn}6SeFX$ zb%a1l|4U|*q=sbU=5H)89Pdhu4HQ!GDk)_&%W5$K%Lnhv7ip}Yx{JR}*RcrkYieA! zRjE9ezfBcC(sV5!zz6e|SFlyk#tvbV28ByiG1UT8w&;wgtSZQV-SaA@wvp2yKk zJW~8HW=^5*l#FK?3J?m2akl#nc&!=Q~=9WkT{kIZHasaIKC zY3Rs^2!THq6V_fAEnU=y%K|0cS!XAyh?Y@p|%RVroB!PtCALGR^MF z<`1_W2v3@HR^?5`JGOdWxGBc)2^k(&dsM_kg3+E$*ED!i?f5Rubjcx0%Eyxl&qT0` z8r2yyINt&B4dU#&G~H^a0PVlo;yG<*6fpik*@{hnj?p|@*aIuV@X(Ea?rj3{m5*WV zWIgLF=p-pX7(R4?oP3ZRF%n`)NKcbOLQO^U!JcB!U)y~{Q$AJR+AKsHc*5#}_1j9gOf zVdeS_AcNUM_-;7=4+iQ-r_1d9z&1RyNbn82JL}c{an}&A6nQVz+i62GZGS%NIah2Y z8&?0Dz)0WoJ}F-X40K(*4|D$)zhz zgXgdt+S2_pkG|x=2i?^iEz4wow_xRwgIUJR_3J@#nplAM%%x}g@VjHiM-=iJOrX{y zQ82+*;P$1Pr=MC4;t!E({$B5I!N3FTj>sbwJL~Or3dN;%;;z@VL zkL|HhI+7NF++6kE?UB1mh1w_(eK-EN($FR2 zzi+CIqXpkoSO;&-!)jpGg}1B2F@2mH_;^m+Q%1aEW|14vRVHQ609<=Z_k)kpj0Xe{U5QWMLYb}S-uh|LjN4UQj{(DXnNCTk^fgAD2+f(}AgBg0X z1zHsN2L0;!+Fh%iCLqZ`2ZH9GFLMh;5V8EdSb&ne%4Ded)117j%=6e;*(N%bePD-d z>$x-$ea1*($mR&hW{6IVGo~EatNx&pxI**?j|!WAZ4|_nk<>VlsiD!Ruw>q%_zvVp zCrNPy0IPl(o`8h#Z z0NOhGv?r1^?hQlsb*Edxt!u`O#vkYAPtw*OB*=bvXHAb=8*-?|4Y%YzDY$n_9GAZ% zoVpCIK23sUAFVIsg2rpmKP&>w`e(03NHZ|W zwcY8>Hz+ipH3us2{X=#h_DuY43%a8))CZE2Oj|jY6sXY*`dIr{FtGeKBvG81mn%toL9i%=}cqMeRS>aNet`|?zqAN^T zUWb%rNG&fhKh4UaK8D(8*+7enqthg5t3EXOCvlc2W6oZ3spR9IgZLFfzBQ-SIiEZ& z-T_oZtqjtjbg|MOr+%_igfUv&{q}Ut$+QSVwD350<{rmnLx6eOkb4hwVQ<3oSAL#o zWmB@0r;6E}Gy8S~&*0rHgGAEc(;;d`5t(}!{*A)(tyJ4zg^1>)G8l~VG2%=uN|Lua z7uTODCA|yMVBauVQSG$xJVv!N7+TSiR@Nb8SgBUm)$+6hB;V@(QJ9(*QyxlV+f^16 z9pt2u=9|#H8oYM&GMeNO*%-nTen)KTEcfY>Ns+lk)<;Zm2n*%T3EbB`8GO=o`*RPW zVaEh=W6_txF<%|i{|K{&eI4O{w0qdwTs#IIF+fc&2+X1a7fQe0^l3s@ok2l9BWEah zOG9DQUZBv^n85FYX=_IAN5~SuG&18NG)=E`=@{xe-W&XTXqP$FsRzb^oYEs7kc>IQ*|g^;+mA z@zd~K*)p>+JRF($5aDpI#$de)+ttM2S&NeWnpiciMMWW@;^r>#K;y00_mUlgMtAdr zQq%9VLHBt{NEJ3&avz*NsQ5bQd#!7O&NfUEHit&Fq5y4?FEPROO}(TeUpH`dH4^cg;pX#7wByvRDEd&;LfH=CCSV+x zvg`B}0`Z{iu^6@ZG;dz7M>2<5N%6(peXzZoK!CT*@3i%MzdF&Ak^SWVY&MZt;81?YYC5xdXKAE!k?G_1&f8kF(z zFq|@XM2RU?(iH5bo6aUWzj4j!_Y=CpdkyKus!W|AgW|&L!y@ zJ)`w;?-x@7CF#|Bx}@gt8=Max_o3>Df^vY^@~%&w8k8j9C|`CoB0KR@w~R~S{!?9! zbHHiFafW=aw|LYkz+R;a;pHA;##hx{J+SbhyQg#-u8F|u~6~VY2 zV>YX_%Q;DL2X#}-Ke*;w?}J^3I#b#TTr_~+4ogg`S5OQ5?(e!4^vwr5J_&<-3~nFn zbM+)FUhri6Vp?P^GL%n*H@l`se9R zU~q!rkk2e8U!*X&q{-;mjtn|yJ(Js@0bihI>R)NqA%`@~X{uX#Se}x2L z-@}0OP1x$$oxGGxrS7l(ZZ36Fnj6rDn|j{gb!)Sq+|MDt9 zTyK)68f-qT#ImFVX1(>VW6?Fd*Tin^PyBbHEt0IH@bAjug!?@;OkyCr$(s*dT8vT8 ztC#G-l?>3yJAaiSHA2DiWoCYjieMsVH7o(_*|qA}vqfU#uq&D`PXQTOI-(DCR+ zOmq(lHlj#2qIGhdnar0dtQF8u?#!^QiBR!W4{379O~!CL@z1h-1thtn;m12|iC}xaineZuDT1TyBq)77Si;nO1UR+|$8y*+K9}}0VPb`Eaek5G;N^O8tcI=5J zhaab$46_dH`s&7iPux!}s^8_MW3Siq^{^?dHXYJFzAd)kVi7S$pR7Nj<`JM=o$tas z70@$=6Mvo-pQnwSP)=Nogv#`@yw;MMUh>Yv%>D@LIceV5j!ia+%UlZjp5Pa2==2aG zFL%?A2*Rv`!4-3Z1pj38CcV1TlBRn?pcuVgR-w}*J6y!PzyZ)wEm?3-(9eLBck?;@kTCrj1i^nv3hy-twO$E6%$AH8P1(0&>EA^e*Nh+>zf3IYP17Ph zKeE2q(MIML9rsqUN(Wiazl!H4Q2#XNMWR0TmiOONRk|&{4KOolf~RmBECcQvcaHT! zuFNEPuJ^pnhkN;Sc79f3{3a}CGYnTY(m`^G?uwI!k4w-H$n!8($~176zH&c9la;tLH?;EIbDtw&}^B=rS@P%x1N2gu+geF z>bnVAIj}&%hnLG=+D;!e=P~ifRsZK%w&-n^sui=jeF;rtzr|*wtzqcI@+-hj$X$}y zHa>6bL&EK|27wXRg#t^KwUD%JaIzk`y(&j|rCyHjaiqB45rxl>$AY2h${^kK(aUDj z-NWgp(Mn5AUoZW&ayEpm;eaenw911K%c79J=d{Hu7?|z0Fb(Tf7rn%do)6RDoUJMD zUD=ta+uru4P&%R>bfx%^F5$umhFloHI4|taFI7LyU5s*TS9?kYI-9R=T3cqrI~=!y zsK%1c>@)Ho9cLt(lp@bW0OD=#Og@RNuD~qUcw7DW?8%-dsI1UX)k|Qbv+3;POJ%*) zJQcrd{xo8RkmGepjSG`3bCMp8II=w5Q+2OFG)2X zB`7^5VcHz(T=JT*s67D^*8oe*cJN`#!7F>T3L?129^|4Qs*~jIh2i}+^AO6ZBnV#oM0*i`o?@?|I&TwxL{Sz zQeRL0C7S5vm%G&;s1o>I&aw2#?UXmgr}L=8Zw?Ne{)#5ou6Rq8g9d=2mv)5^1c&?Y zh{L03{gY3F&6!NPKPhb1bOfopIY1u=o{2C-VLkTrr+XN~K5T9DPVt;+{IoAftKN{0 zHaZ0^!qghKrrl{jAVyLz29xmTG^ygP{VlYiO?pnZMQ$UnSk`6zff3G{bBJ7BiMStn z&&*T3)1xIXrBX(0F6Ml;hqELI^FtXu??lPh*kxyl;pan@ysuR^ zD;$w?qk}^wDhBI%b&&?P%VqnM39FWZs#K{`%LKB3$Eo6_V9a5Ku`q9xwG7y|0qcNu zF~hCvy?_}74k~I()upnC7Gid|!{gO|xTFgI?`;u1lf@7vi9MGvEuM5pF_i;F@Xa}0 z6OsznFnk}iRa#H^sm2;PlHurxj%E2|k|q~DzfpDial^L3Cl!M&42;BaBsc2b-ZHT< z6pDL`1*lYJh4}HVgzqlR0aR*Nl#z>t@D+O8N|lJ3kQP?X2@{`Y3tjhSsET!?{^ zu=uMDA8vQQx}c^jxF@+vb%;!R0jU*4({42aOZ?a5$d!MmN~GyXa8OO;abqE#z^~Qs z|Bf;efAwVLvE%nEs8mb`sgyKzw}lT@BV@DZ;0lW}LC> zi+mk0iL>80W zx1dLm6a@$MTJxFZbpf}cop8PVlOAUmCNO8M5t@C!4*s2_ zp}S82BrF)dhI>Az!^7bv)f0kLH6CB#FYo9b{BNS&7QFaN`IVr?SBJE9V_~MzBxsoY znmTeD06IswoAY1^XlC( zYMG(KojqTlx`1p$rg*gh7KFT1Oofd8-yl;LduCA(DlE0%fyge4S38KXf&s=+Ppw37 z?gUa=(^SpY#>#CKt{H%t$FWO#j-}_BL^x&nD0cIuF9=;|Zix}}a#DRAN(RHWGA4&RwsyIpa z@No0;1WgmK^c3)1=UhB29zWX>%!BV=^@vl1UeftOVbA^>>Cq#Pxr@j$APBF+tWN3m z5!6xvy<%Z&DfO?Ez%_hfz{XaI#r_-TeSB%U@El6-s+Hfdud;XsD4lyTR*4LwlEy?{ zkBgqPLLk9!S$>qHWD#u}vcI~aBIvwMJM>Ozc0PfVfGJL0s>KglF@11i6YX_=R7|fi z-^4G)Tf2m;f=^+^zn2-;{uI!TLD;dZZ^MZS(>9UUM&dlaBzdYMeSQ_qb-rwI-F2~4 zvZ|_5rGI$QMrjKsgLjil|2-Jqm~DZD(Mv(Y!S)tUB96Y8ts?*_OE;j4yaU zC9R%{QXYF2GczPkyV*qB6bve=W?M1kWhLm@@wqxaRd~fg#OUiz_;Q@_#b{{{m@zCJ z+1lsqSva&TJ;u}(OvO%=~hd^L9%c|#vuMC-xawihs?zNWWl*$kcP zdhdBPGp2Zz@5{x#y&UU#E4jcx|GZ3$F{jkVX9ti-rdA&}CJE?t4PEFlvr)SQTC$jm zGcE9G4R1}4Z;5-S!FGKQAAi?H@sW=ned{JxeQu+y#cV236>IBXLfyp{&O4( zK_-<9R43Y|jade#&S5w&^OLblpH`cnt!l)$_bl5aHASp(+8V~C{i^(A|J)OcAl4rx zgy6C*Rcm?FZ8BG8hB{3Tl5g!$yA{5dQ9c`1ISkuuQeOzlwPI~N5^`wW^&)1hk8D-R1EOn)7|G97k+C9V= z?*(mzcM%g{Ekyzik+tX{8WCk8(V%~`+U3A0S8oD{7^6KhIu@Etog3%@=2L@y$k2+R zvd@dYY}TjUAYny&!4(8%6hrnJtSfpyB50+KN`G=M{1ULYDq&IA;iHSEDF!cqedNd-)hhXuYFm)CtYUUxGYyQXPjFS zt9Ys+3-N0Wz@|t%W{VkdH~93NuwfqI7#;<&>Uah54kx#f1 zmZ{W2AwmBeiFkWKDCC?n)b0^os-OYJ}L>8$i* zx67VY*4?-med_p8yT;_6`T;2^Uw+e{qv$K_MwJuRYHo`I6j^IYp=oe88jkRNS@xd= zqgFhx+m&TpnV+ShLBkx-T*Qgbjw%(?7L|r4sK1b){iiO+)polmcs)>?N5A zfc^;>x4IS?{CAfu+R&6J6k=7yetoh@AiI>EP_D&E_&=p%u6JFb4_?L2toV3A1d2E> zYOOz4f7=hOsF!2ZD7m=|xpcXwqV-d+I!3WS7_iWzjRm04zIwZxtMXt%H}?*NwWoMx zNdOs9r6j1#^SL8y4~*L>)xNZ z_9;EQ2t{){Vhux{fb$}KX}=D4YDwk`9bamvf5}Y4V+<*uVEFGdt?ftsYY$MLV|tYZ z1uPX3T*qP%A$TSS7iE$M;EWJc*%Tl=lM4Sb`)FxM52zKLqUR&OZ?Zn1i%y_aXUjD&n*tS< zG}MIetvfG>gH%Z>xx-QQpMJcbHM4!IhZIXmXIFyP8OGK98*E@Usewr?l_ZtWoQ z5gEsRt=I96>ks=1Uyb;aF}_q&H_%Gn|1#F_*+(=c{aa|i@(;n`gHV*Ipz{N8)8$hx z__M`MiP^w6ofRB>$S}h|jo+ykWc~%!Wi@bPosK0llIf`{C-u5Bh6*V`x+lh53cw+_P~7R?SNPYigYc;uWw=;Q70Uzolb>6Z6Wpa_{;URcB1H_L zQhS}7hh&6|O3Z!CBr2>rE?rQ{;OWH6b&sdRk+_=B>;->2%)#&&X_TL>BcEv}wmg2t zt1%t$OnTZHCV8GQKfD?D2>Z*>Hqoxb5DO@=8ZZN3s$InP4w^>H7~Ati*=s1$NOleI zbd?-2YZo>Vuzt|o(wy2!Q8EMEXWI9L5~7JNsG$Az%)2uI$)$6i<&mY93V99`JDgD1Ss0UZdHj#e;?=ae^W!Q%Uq{l^A2UE zfmbYRimu{NXV3OUePVJ2fUI9YH5{%ihjEWicQ)^QW~yVKd|(e23Dq`eFz{AXjvy+V z-al(y-ls{%XfVrqv1V>`8P#qO)eqlUQU9I+_ zJ_+YF=eOk#c&mlqzR9^bFPD~Yy-qaUhtw1_%%jLVy_-u=IPbm4@bHM4biK&Pj2X%p!cLefz&DJO%^j4>L}%EwFx@4F3h+^kC`3u)s(u z<;~%G?Vs|>b}r@@KkcZQ9_=1|OFh9C6Ksu1pCvI^7)2U=Ig(fi)O3MmTKbT8mVen# z+22VTuals8{4r>9B+BEXy1-}WDq=Ok_>opJUQ)92aXVzq9^Fs6q+ zt>|AJwr;$bo0N?88=N^5fZJ)iW9h9w7^hVV!c%VVbUeAVyPSt45j!1a)?u?As-`6H z$`=?xE>})ydmHu&)Wm$&($h$@9Iy5Jnc8`S_^X{xMn0#j7k$5Wa6RdN0 zmgtQC+C4~IInnNp%%pCeL~iK$5s?B%fv38JS8a8_GOfQO?Otiql4V8Q{InoOXsjS> zbD?qRN&Q~NZ^*`UDOcUmhA$Ij|88?eiZM2y2nRngJft~hd-#MQb^pwHzy;%>u&!5Z zUlFP6Vu!_cp>9+K)fv;=og3 z*5TaVz(I>>@y7~o7GgbS`+sUy;y;j~dqm9k%aoNgG#?Kt{>|J+a<;|4(GRtnugv8y zA{eF7WgoegAQi0BIxQ~lF~DxMu+wre?;H_M4L|xSxVz4u{sSfn^_Q3WRNH3DCSRQQz)zfypyNv_qW4Nm3an|Z1RbAt z=e7lygh>fTIoY{Q?;V>_+oo|DIBam(Vs70tBdat$|9k@(P34$J{!&lUf44p;I{3PU z+ON%rlmh-eC$!1>T%VtitJd*65ifM)x=^1ytA=zXHBILqMvk3b&LLkJ@zMAL2E#~P z>`A`19PO7f=R00w6%xLI0tVA1S@iqU$d~QfbvbiiJ`&b4D!henix_*~oK(OH{WYMq&N9XJz0CgTOBMSkZXwxDG>BMf5?p_QpG` zxnRI=wzSU|Z#%nTChuglVGmJjS-=R4E0ToDSTx}o_y#qtR?ViI_!?R$%3CM`#qPyP z?{}M9thTZuc_N|si7yr{8n18(6pi#E+MFxRPi{rFr_Z@5Urnb3e|vT zmG`@gz3mAbuyM!}5?_ z@008p{6Ukf?%XfU=l3F4%^L%7+bILNR}X{yFmaypCgz^mDBZ2I3qOQ{IDG88RG>|u z#gbw zaBM+}&-TLt`3sMAZtSAK%H&Jp;`h@FJY% z7m}0|G^2RQ7Jc>3JoX zCV5Z=;!JLZ9KVLPD{^3xZ)K8QO3UAwk*i3CFCFIgK+>BT4=npw)8bw=sD*uYHunGV ztxYFx8abobuYthV!B8AeWM?T|n&S2dCh67=NCq`7ewt;_tun;~P+OvEe7WN~JcBGK zd`hBZFMgyEj*i6L=}=G5V+_5SUU}SGMKsPMxJq+lxwRSGcJjp3T7Zwv*;cm~^)=x$ zX}hO!R#9w;8DxYy$NX3RD%UZ3*B?^2>Z5r=g(I^Kes~lb5Dm->CY)65_pWE#+-GVh z=H9hYd8<<1c9D>aC36zkOVAzp8J#WAn__Cj2HT$rK52f^4^Er2&M95G*sK!c)rhze zdSPv?VNnyhPLrR}a!-vERr~wtE~AJ5kl9ylqNJ?O2lrZU( zA_;3BnJ_&BXy?rchM@BFGs|Z1t7l)iFMp?~37p128d(7`RI7_1S-u*_D_HQ1-~{Hh zZxFPAV@&G@PCyT+73cZs58YuDE9z!#apozyj85R>0uV znnQ~}n+;1r;}-QGcSH_8uY*^?WyYUAdDMJuQP;Sq5obRna%l&{hx2Ee#<-*?)UE{& z&Uh8{d5LOL5`T`Fh950-e0(Q*wpE56&AqKYzpxcwvVfW-ZoKrFX!pQo*4f`bqexHo z_I(vtQY@ctgqq4lM1**iYT;jCm``>=>xXj??s}=!PvVc!BBQPPeMyEsxvj08{Y@91 zXM5pEnW@S9VBSN(v3lDUwi0YIIc_@ho$H{zT1|zzb^r!paFwu9U-n%iH&qbATE9#i zps0ts%%E^`|xK=9KXUl1OuHb zc-E&MQs!+xI3<^z%?_nc%c1kEI(ZLjwQ{lZ7+hi9Hvel8WE!zWw*))?8aonnK+;~G zSc;U|zj6q`RL0$4hm>iIDO0njX-Z7e<2|}@YF=m5f6NbV?>cxp*_I9qAaQ*e(`0rf?`w53P z4b^9EJ>7@Q(SxTFTN+7V=ow0!!>TwPzN0X5%IB|WqmA60UYG{Baj8@kteT&QXVxai zfuDN*vYeiLu4-zq7#)SKzE>1Et# z@86UVeLR*cw>?pV-oY(ldwl=182I(gH@P`rzw`9NJ47GtGw(Ef-S7;iZIeCN z7b_;rvM)>WM8{7@C>W$pk85q5 z9ClNR5fw0~myV-bIq&Q3qi$Q{P|&h}&XR8Dl}(q{s{8XZd85G^dYh)!b>Grc%`&6u z*Ti$DNgiSH%IW-Z%<&0QF%9gz-yvGLV0Xmo6uQopsu zqur}sBjx!cdk3@?Crg|Lt+9o;CtgXj`TmG^SsEyJDBp7-p`Gm$q$pH@GAixNOpmzn3-LAFQ$=q`+lLe3(ol`!IgF0Jrlr#SqiEpz`I$t4G(uNh20&MbP#rMcRiEFmnyT;@T z8WS;R3RvE5&Wy@1=Jj=0*4nJ;IiE12G_)Y9MLsNFfRs7RupD&j!6nnThFYf|;yuo@ zB~nBmCa<*IFiN<57wyW{^;lk;#h8b`VtQNHSm2J%K)}NItkT+Stu_B6&%I|+5j?a} zxfz07s5p$qxFLBh-bCV{?z|$*>S57rzvy%^8)~gIQwkllUNF4!6Kz{Nl;LhMfzY{w zMJ8P`N21abKlS{y``+yv*>G`zAqOH}EuXNK7;NRBCUT!La4jWfV=_}v0MoTFMgPzY zoY{@9qD1Z}LeT0z)dCiI>RA<6dw8VsJ~a(o!R}d=>dhM#mkd(x{v3B}`=QdbgLa5_ zS6}IZKSbZh^u^m=B_J|h8>1Y{nZ5DheymqfU-QUV*^e7mVcT+6v@ogBIZU+q<{ICz z<=eVXTQOB3Q3jQWit{wJ%xd%JvnDqk$^xXtdoTmYE9ud@X1+qXZ$ zSoIpzS*b;u7JFcKOJF<&Q#$JkgA2?H*bZ&Oyo2gg*7@Mh+0GrDdifE^UVoU)O4d*h z&yjmsbOU@u>P4aohjo^=^S#bV{vpKy^~EylQf&Tqrhl0g>iM(Y+K}HY8WPgMzc=#M zFz9YU0j$aHP!(Zj!l$JUJ9If>&zI;lS-snUqj3rdCK@m?yDAB_J93k`8zen~2mSSB zPIvF|&UoAx?E5LBK~zF%AKHBwle)pfUE>NPgwG@jZhvqVl!@g6wd--r|)z7|lHheq7%yVv5$)gB3 zb|#YIa2=7Fox0DO+ZIEkNK0)GQ<%|FcEYFA3<#y%n`2JZuARAM>a@mXQ8tNgaE8^) zGwE*v9jL*=f?kl|a4Uey}8nMj{>_e|1cPceHF;Ylp#80-fG`j(XL{k`Fap=wHd- z$}}vce)#7sR?==CQ(l1B*cP24$5U5Vb9HVi-BrZId2@_=kW91x9!W4J3H=#f*UVXF z#Luw(5D2P+-$s*wPZ24kDsohLRTehJb>PCAbe9{mIcT}zhih+5!;lW9padRT(01sS zx`wgu?Ekdm^lhLWof_YLNQw%n>(x)9$72NNR7FaU;EA6wVNuwVvc`>qPTH;d7BZ7+ zQBAi2{MjtX1B?(QHR7_Z4H6B;1I~eqwc#0RL7QiFqGTqLtzD2DSh^`ToCW4$p}kYn zTv+a1HF|sT%O(!$*5_t3mfFs3-x1fId%v{5e3-=0Xz1-p1cd%)lycQf%2SFs#IOod z`h!#mr>qN9qrP=RZDma_`_ulMSA8V4VW3*R9Y68pgzn;SwPuHypjsJkf}4S>RntZX zPBmO@4J)=&mq4)Dq5f5QjUEU#JrD$2T5LEYF|oyo$=XA`J8YhTvr z{WY95_BIcawHdxK4)#zn!HPXUo#eRW4ost>2wQOg-f6n71XxK$nE_aw`3Gm20QVi$)BhQ3XBV{aE;pZLB_bC(;xnsr-^pKg*rmrkJt z$$WWnqf{6nZyLF^PK^zl*AJf7{~GmW=1|7ZE`P$u+=gtr?)XVhw zqMB;3(F(=#l9o?`0@G_J9jU4Pty6VlwjQXoO<*RDUpUWcQW6XRD#~d*7QgEjVbe1BXowbIpWGV&Jej?RA5!fbP1yqMjy5YyAFo4es$V(%`ex|Hs{1 zhgG?CeWQ{hibx33Al(hpC`y+gAl=d+-6bkYcc*kCU5hY48WyqWhDA3lVF{cYaPMdD zXK(TS<9n~~obwMCtaUTz9Al37jhOS^E!-lR`#=tmz}D{BQ9z9_3@^@_$}R$o#3O{5 zNID9hYKFXPYay{w?hd^3CzHz)HTeuEz?MZ%N@qxuH>0&#5!IdE9uu+Nq@j+41ZZv~OZi`S3{06nd++ zo>-g4H8xl1(_m!EfvAqHW}VOtN4}?=7o4gZ!!A8&bevSw6d4@GG8VfA(DhEdIVN&j zM38lFQke^uj*BeKe8H+vJw}mpR_UfHX4vA)I1ktK+s~up>%BI5<4iuJ=F6JukgPR) z;HGUp6QMA590%{MCRyk1w)8;-1{F<1L|&3lrGpYr#<@Iq+|Yd6yG0y^?~M74Nt?(; z9LYw7A&qDte*`|k@)0ucTT`B=(G=m5EZ^DkFc__L$~oA9tNZc!>O}Pl5eo%`Ra+vo zd*SsFPwg}8@PKXOgWYyyboO1AkStzb4O_^V&{R0dm(W|Qiat(XW%LN& zx}-5z5CPtLww{NR`2_Ng>YlpmiAhP*A-`ow+hHBk;c=I0Jz^nDyWUfw6HD}bRyt9@ zFW+n?+$wBnBHALIIW;B+%mBwSa=zk6#Qy74Up1-oB)--u?nvW|yzEK)q0%#eDZ(pU zZy4%)gb~W-X_0k6aq#4Pq+C;k#Oxs*?+wQholu;T&hyE}foR^pQE%=2SltI;cf(lN zEOwFOIoM;|WNmU~dKH%>qIJ$VpjmEcgRcRBQPbnyJ1s^O2xgF=23Cms6LLs*7@va*6F9fuFAF#ula%yb%o)50adyC$7)={(GqFs z$R2AU?gnci-bb0645q3FyuL8!q3N~?wr2odNoS(EuNWqZB`%5Q;LSmKr^v_id#&TR zfTK~X+Pc_NqDN=(se&v#)x1RKD*=}Cm{G5!lAUk-+qkqPDybUnDCrD1Y~J)y^>TOp#o6K*qZtMdwC`0?;+z~)~s(Zv@kO$ z1BR8cd^(r=l5m&KY(C(`gZ;bKRyl1ZH70%q>gm<-f z8mfod1c%`)y*eLMFSi%>3BaGgPyDBEc2BK;zsqfnhfbbM1j_1KL6^E&dGi4* zOy1WE^aT|n1Et4Qb+8} ziwgiC!SwSO=iHXLq64WB*Tc@5;Q*NPd`EA$J%B10t1(SFwbEW0>h*{IkT!Up8Z4p6 z=6beM;~X!dkVkq+Jdm;}nlhVGjVbH;|h({b0@J#OdMKEv9} zc9S8>>G)-zR&L~?vrnc@yVc!0b`Bpl_8q$`D}Y%K`kiAmNj&4D z0FZX62j_dG!ZHKM9GNMRH+kbhqw^sq%hBW#{uk0e*;R#@!s5x^h3wY02&($Ut$LgK)4GV@i&=e!X8>wjg>Tjb|ki6)4b(=Ji~ z@O7~tb4c;#)U=8iYm7G$05MKIA7e<+DoU}J8fh10@fVl@);gpotl+0fyRd85e0`dE z<-H#b6pzBul4ubAoFeTLvWvHxK8d&^;2eETkq!EB#pwS-<1c;_RrK*O>U)iQY3BYm z(Vruq&-+dic%Ez?wM9RyhK5NWy61szK>W)q3&kC`8R3*Cs#zf(VnS^Mm!DY-usf3{?G6K z!x+)-8xASUDh7!KjO6O?g+!BovxHm&-&HNKvPT)8RelI&{I$;gmuG1tfin7m?1zNUaUi?2!jh7)MyGdO^`{rqP(+*c6E*#3r~d0V9|a$uOiVxR-?8z~4a{tDl6`py1} zl56-Ln+6VmRh=9&JJu~prWB~lerrQ2@89thRccTXZn!-yT#t6^MpvEQTiFu#oCh}P zF4mFzmhi@JMfI~++k{+UUm(j-fcYNp30)spSmDbg^&Izmlf~7u1mOMGI>Kv}yIsMo z;x4oSkFHcaIwXM7QU3I-lZ?3u_p)Rby2;2b8Q54Tu7h0Vi;((INm;+})9$_7v1XjC z;U)`vr$)a~Oope;MTd|CpG@7)O=Qm9gbxJzQn^+=p?IOkW9IdJk*$3+eCmV=^7pT_ zjiSaE=75oSkE}FQbO&WZG{#*po;um+$}RRq$0H|KZ%I}QR|#!vqL-gDb9A=Ok^C`k zW=8xkD)|>%@a)Ys{y8raOe;9|qtmM*V z1GZRYs)Y;l?C}Qn7!?I~gyPvEL8Q01Los+?30)b{Dr8Ju=8o=Eprx5AYgsfdL5kNl1$BAWCM)kMXBa?)5R$Vda zk5{tTH7b6fVTex^AdJfFW03~k;u(2W6WczhPLySPG%;g#C9?7zy8}pZ@yW#C;SCJ0 zmE~`Uyb2OfxdPpIhzj$@2H`uc92i0OAgG)0nszG0Gaxz-SNx_Lan8n%H-yahR5%6s z#0d>Vz!s778WJkE@UL_#(F|+}qR+CbDqpQ`$PI2sds}N4z?17X=WGt3e*8jfi0|8{ zmwS2NgpBMh*pX0=(vO#TQCMVHC8*l!Le~5qnn9CVdr1fRf5^|tODL=^wf7&N;c%ZJ zyq~u{)4dV2#GII6VLp{+l|lg4N_=}j_`Y*;4#OG?A9$mcPL^3+k4Yz%?b zm9XUX5XmOitk3F)#E-i}lA1zhtAti-KV}cNCha$4@9*3Nm!Ix5*FNFzw(im{623Xm zDqc0Fi|JKoQ!nk;yX|x9+oiJym;2V*8`47VO;JPJZO^ebcR+*di1iV13fYo z?%kordfOG-yOWxq2e>Qqjw~@MNN^r(X;t64P0X4Id4o*JT#Y*52GJ=-=&W8dl_Png zj;h%o#3J}4xaU?yU+tP><~-3Opzh_mH>PV^x{FNWnLzN`I%cUh7P*7+KXJx?TUfxZ z=Gp-+^2wGSKJlA4n9!tu7A&F7?!}g-cWla;t7OVjSVcOO!W6z&PcpNo+BA_@Pb@!f z>5l|fV`Aq(<7cpN(TpT_CDe&038a`wd$hNLlWy(ooc5#!167BAn8-iPcp4ruPzk!s zE8-ys4v9m(D23J<6V-OCw>y13QmRwe&tm9(_m7~wYYrc1QNhU*A7F4pav{x6fkDXeN9R~5SRPzl9x)xZONANMj3SL~#&@+h)nU$Kg7zQB2E zvM^km=vJ*m;Q+OrRo2my&?@#Ov!`&-w7;9|2=4=FJMimyu|0lRZQs7-dq52yDx5Hl zuYfc0+a4XYR%*9e_1t2jx*^s>K1Fg{YvT>5>MvUmbrI|T%kTZ|Ptl>9gKC~P zGpuHKFgPYv6~pDNS?Rnv$|LM2hr+|F>o9aEI2~A*H=iv zl@#80CCT_^6%rOdg_qx@m*@{_;6=7rcYM#JI{o#HY|D7YVwWsqXCO&#o9FMafy#NA zIjqNV(*#`Zvo8{PWD!vm|?N!LqR~q|e!AT`G`3i(C^~B57&JX3@ z-gJ^+K><$ob*j(BPS)8WnOzU&4~(c5oR?8y?|7{h(hUf^Zxb#`a2T*aM{`wpu9Ph| z6rj-fBbf0MVZ`a&C|R;kr_IKre9T3_p;IYSPapF^jA1YhF9`UMi zX`ze%i*o#wU|&02@&`&GAf+yT*QpB8osN#H{Uz^ozKwcH?5Gny`YUTsP8y_0|Li z$Fgv03{QgJNHb+dZIH#4LM#o05$btDykl+x)lv`%L5}&|uLNzbq;4|1UC-4=lXArE z%*C^=XWC5KaDpFP4e9PZOq0%;AWk3R_S8U%XxRwUQ_`UyA_&`m6ddwR=C$m9)AV=P zX@-bQHSoZEruplq>{~+88e_W3ifhmmNG>I_0^M?gbUSgVKZ+G*44~F((m1)aS7BsD z7GSW>ELv$5OjfQDnz;!R^!!f?QmF$!25#16D{)}fG$H4&x;k>0&E55NxKhm#knPZM z0nDOm;b5^v2?g=-ks_Y-PEyNPp;~Xa+C)9+RJfVxPtBxtYQ8b=?4O(;P*J2NRJc(i zl*w!py|ifZHWMmuZlE&LL)p(nKLjYovNH9cb_)is?;nm z;@#-;F_NGY)m}$6)3DWFt~HwbaCa->xNh9BPL(zOS+ofJx^Slaj^4O|nv^npSINp_?hf2D0_51*LDkT7p-=6Ha@%)^}oUzAaVqhOCa| zvaWg_je`XjQ@L_$s6eJA4-KZ=%*@=QZ`BlJ*+7Nv6CExuH=+{U6O>*x^v+R82^=WN z17|R+ok+pmimG@$sdAy+xC+Bk_V8M4GA(Lr$O*Vw-$Yu!C1BLtrRK{7(ZN{fkc-A# zlFZ`Ok#{aV;BwwLR0CLTbo$vh>bwT_S{Ya;B{@A2>Q{@ixMAq=vlFmW1EwTMSR;EK z3sUt&;yW|=X}ErGw8Vay-vCQdw%_+n1?gyhq$|J%> zEp13#y7P)ttE8fY+vHa|z;E~OoJ19=TR*mozu{0)`w%JkEV^EG>dWJ>?!8wnl2ES< zh;9n}7+s|PNJHCVAQ^aJ*XFk|Y=s=5L!*!UAo0C*4DlD;-8ZhZNlVCRlEYfDaKef5 zLu?ENFI6(X#=&ssR`WTn>mSBJ|)6Dfyn@7GQ5%|iB`xWwo`eHd;6XMn~)!gYcI;c4|=4}Ys@r}AY#fp zIIgXux$fOInI`^4)vmStR?@2OuTxLlyozq!Ce70^c^BPsW$s}xP1KRuq1tXp-f*CKjJe5=+t9!;=TKXG^=zrg}$?-eCgvSknaSh{<}IXXC^%Q@31 zt17~c@)tqv#Z=*F0G`k~j{?ffB&>sO$wZW8N&gnK?Y|TXQ$q{QwC?#pCI?FTc#1#?GT*GDonwqPo!) zj`AE&K0>G-o2EmI@Cw!#)vd9N_|m`{Ap89TSM`bP+@|zs!zPWDm!Lq)`vWj|_o3~k zGGR~>@i*uM9<#kCc{1`Xb9OL|mOM_tiL0v2=n`hn-5KFcTg4 zRiy-twst>!47+AD<`^>#PeY~j4v+6*zshw1dxT8odOJv`xi)rx&S^y*e$tQsDf$Xw z1pZ@)snq$N7B_Er6$l7{F}5omN_+(y;9nrfe@^mG2SorYC6#t z7)3=nbn_D0o!6{ahR>*Wa^qD86tAKR!0Big{u@6hKPuoVh;wiZn|6HGF@C}y&4~)%sN4I~Q1x7%=f1b9br+ss!HwGs0(FM}Bx_(y%LgxtL z-7_GKmYIZmrRh8;!3%JkUKqbgPy*F+CE&SmJEeQSkx{hofUo$U)Gj1ZRwsm9z-|YL$O1|1kp~!AkPz7ri~d zw&d3knyw*8HKD?PLG&NPk>AM!8qR_CvRhXH%kz1DARe3|9k!8|+GP-Z9@4s66%=nE zCeyk_D|XxEQ&LiO=JDp<{0(6$G(d+`u0T|-%ixD7|38=aFWVN>bgkStqz?0v^^j)w%R-ro#V?;s>Bb{ccKxzhN5McL3TQ zv;Ed?{4Z3Gazm>gFk2a`N#u9^f3j#As*~n=gmC}kw0TQP$u>00WaD9@p_826Y`}bG zPdzo#Jgd2*hxpicQkLho?*`qQz|!C0`({N@@J`V^=PN3%zGW9(LJ+@mn4ols&yq;3 z^Vwi$_4b`!ZnNifEVk9!Ur0Z0VDF_wZ*qJ0FMVkPN%`%VDd|@0x(Q9}?;D0-BYylY zJU7%U4|#bL9NIaP+mpzW5<8XCBgf#EaM{N53tgoX1H{I7gq}hr0EprQxW2o@qmqRV z`kHaR-CHNg#M{GE9VqX%>NV|KmY74eiAp)(HSPF*oz|bR`>!1o|1=q*!N42bnbtyt zKk6iC^bCkDv~l05rSg2+g~cGu^{Plt;%SxC{I1#V;Bl-(&8vZb4)8a9e#`=xFgx%& zbW1ZEELst;Xl8L|Ec-G02TjG_RrSA~BP7e&{-`FsuM~3=;>mW7r=~2>pUqakRO*f= zfS0S?f1FRsFSqu3M8)kMX^=x5UMAFf_-vY)y+jRl(g{P0XO+AI4DVm0c$XL&>lm%o1{wPf6u=xV@gg(6m3nY9#*gykNgbYm8IIAE%4HbF+ zJHQaYI8(G6J>A0ZCmd`$olJ96Y;9)Yu+IVes;t|&L$n7(b8+zD&PY<&(Vi*aIC#Wa{NGSt6Z`qHH9uC3pk*&^V;oH%L=mM&>%A4p!V{}>7o62;yyS7 zTzp_461K^WnU%H3|GaeWPF}2Ywz2I;iA?A2w-prz#JCwEhXNLLvWc$mk8fZ1(q4aJ z)f=5h!&J9@fE}ov>1iJ)jGw?=B4_jC-68xea;kt#a?$XrhJ;oi5o_1=8Vj$l-XxL> zf@$tUCB(4O09$_7B99R!gg-l3hCk$xj!M2JoOAGiC!C&Ocqc+2VbwMA;OzRkXN7b! zj125BYX){#(I4pKDRBvXJ69YQC)@uBr&{KuckgiE1jnCriW&?)btG^(vRfI~Yw7#M zLo~V~_q{@AUwFr0rLjFi&_;LCGkJL4aZYZ%5ALa=Gj=NFy3~pvslIz`hR}E;1+}R` zxqJcAAEy&ZXnsc(>0`H*NOZD0OIHN1a~Umu;Flih-i3A72yf9cmP6E{wkD&f9U>tX z9nrG5lYTbP6VHt!feEVKjeevJ91`DAaE&T#n%hGOCfNIG#$QJ8ko!bRlmo;6ikeF6 zc`J|MUf}mt40kTJGJ3^aTDS$<#`yhC2U7!fUW{R`YF*}!$El`5Z(oHUP4x^chX$G{ zvTreG`_|Z)zf>f{3+l3fU(ZWCc8_=0G1RGzjK^aZd33Tl#H0NobX(7eV{2OhgRsMx z!*N*LOiS4Lf$M8f%wcu=>ycD@WJ*62mnkBkk8Xk>%db3Bh$%5XlC21NeBLOV_?WjroP0bWepkLD zg3`SV+6{@EZcj;Z_{l{C?JlY106jaFS z86C5g?s@SOt(P-@FmBL&dO)vxXI8CVyH0-A=3oMH0EJVNdY7P1ohp-w3>zk*mp6AO z=2B9ML_3AsH<2=MdWFl+nnS&E+x*O2-(pTBLUO;lb}lVHE8{g~vs>EE$`b-Agllz9 zi(ltZr>Sh6P*1nk-7VBJy_wyIBF4eNSJ17JbN7ES^%Kf_Um#T)`N+M6B8czg_ zL+cf*iq{UZFt(~Hs+S-7qdK5#BJ~fbP+Hk<8e=SIQwdo#B|bPjvw1q6hiwl*MGWBi zY9Y2RmM}y*S}mo_ElS6;%jWr?I^#Z$TZLp!cyMjOLCR&N@uSPnN~4f%X`wvL@Oa(p zs^S;NOA|60!*=I&&*K{cWe2yQUK&ZTheaj_-h63BCs!shN&#doCo_Qpd|@YCgjkKC=9!Mw5DywHi1~GWKNBqVAx-7@^V6c&|qj zX>mb*gvNU2!OHC$;$x19kkiL=NIAyzAGl?X73~JzMCiKFHyKl8%sQ9W(VuxH9gi#!gkH;*Ew`jScQcU}s=B zI+##sPIx|YE9nse?$#M<8qk|0q4>(1>PRtQUq@@)teqKecm3?GxV)EDP3b}k z%()#DlsfAxP2iEUukC3gJ~gAirBZF7tBWQ0^a*{|M(FxW`Qm<`{p;l>Ggo1>$IJk< zGw-MCTvLZhVBnGFQQj3P`FaRt8MBCZZW&xrai*2P@;pIcYpUEnMs8r&?;#KLIMcg+ zvOI1)m22jxsKI)sJ^G>u%cO;ubbp9rjH_%`xliZ5=CBYE9V3(eOBg;ix*?+ ztWIg9uq<%Hh=nq|orD(iR*^!S?68`TEsD4y8;J>`PRAxwaLn|JnK>KHQh;QYY%U6$rQv*;9Y&pdk0&x z%PCURIncsge7sIq<2HWp;SVdRPF{y?RkQdy0V+^w=5d_d`0?Is;6!>$TCuz3cMk-2 zKR--qZEXbnXR^aFVF`G7B9At?k_LquInhlZhm{Nc;#O^h$ufGDPy<%=HsCL-tl;5 z4)iWz)DKk`c7D$S`!fQ!9x1k$&@b6h-;zH**+*OaB?qZkBjV;EB@q8h?C9F?N?Dj6i{;gniqkzU~#8EdiI*iu(y z(tHI`%8FWIp^HY0lnAtw7Oas=Rkd})bG5#1L!FFV%=R79iKvzA%q5XX10jq%&5;t7 zV=EgkH&F>3QpxP?4Upm~C9MW08!GIC@Nb=F8-H_}sWcK$R&<+?W1?#^IRd?6p{KJq zgJ-1WhZ9_3Yy((<-h=P0Ss*dNQ&uH4{9BT(4V0Dld2>`+YfZRZTun6+pd=AaTJNv- zjA!mtDs$!w1EGOeVmQyCSa10Z^dkWZNtkWY)@+0aXZ9{1!L2XYhEu0`d#)QUfqNZ1 zW3pN6q5Z?*YgcK5DdH0}$-1E@1l^s7=2mW zl_w=u7Go9x<#`kIUEl*Dh`@BjrC^A&B2yE zFH+galdLQdbC{0Qjhz})Dx~oijzJ;Nda0euJuNcAj!+{lzKzK5@5OYz)ZPW=}#Gy*8j7ShZ^J{2QVP5RSt0qJo56sgi=we;RDrx@6twz7h?|I|g!Gr(lsL*~GX(-IvbEyn)Zwk&AA$ZLF@ zUTl|r;k1V}7Y&2kYaV&sj6Gp7RrN+dPRt{3zJ6R}n*w9A)i3&;HnrSFr{4+%y?w7m zRCd+bu-+_4V>pX~k85Rka&C?!9V)mbzVUfatd|-GzsMyv#=HJ7F;2L^p(qkyQIpAq7(Dk?nEs;_ z8ANY-65H9?bbC4Wc;x=cN$(F(c)tx6Q1Z z#33ZNZYF`A&_E`0C{Kj*(m}d1(>?x=dDf+(hCI?y%No>H3!BKm@YFcT0{e0tk}T?*BhCEikS>9=;bI=Ug8hbjp~wR5R7%-DWgZX zPYQOriMNLTwhVFjCgl%K{l^I zKzU-Y29>tnF=}>T?DVqvAza_TbApWye0W3Xbb6L+It8kI|KQlvjH^i))cZ(iZSBjm zDG7(InZc)x0y{#wVRcgYW=)w2djeczPL*G13Ww8{%iX3)G>mK^fn6%lhy#gY>6f>! zpir6_h|II_SQk%{>L1pnYVgc416i%IFV3m?8O-IfYY7bcuTU#*=fBg-RB_#_)a34S z390Htw4K$wZ%P|jsa++$Fr{bYGMtnrWZ|8b@w_R9GZclw|5ZwhUZy=iI?t&Br52gj z*cd_V1nf|d2t;^GoF7u8-0DTwF!N4XW|Ry|EqU$=W3RUlsH21p{z$?T<(;+~ zbv;`)uj2he&$d3?bk``8mJix~w99Vq*0@EZEnb1O-fVO@+G+J16rO~mDk==~2nnyk z1z?8E^Bq{l&ivR1o(a%E!fP1n^&HS4qJ>4M#*>UseD;g{qxJKCgm;GMO_M{0yX@^Z zNio8zR`;8{yY~VloPm2Gee)3qbBa>@ILX=)^r%0dSUAa z;#hXe+EORcc3oSR7qti(*-$qtqsk)bLFadQBr;$ zi{@)u7Z{YguSKK3$3s++_%&cR-+k8&i3kdvq-!> zJd8A%4_hA)g>D(|9m6A30)Cjtzy+-$i*Kxk^fd}RhFgXfBjsBK+C)s5cyRz%QmD@= z9Y{nuW;6Y&cl4UVbEW9V!W=yla@YYI$(tSdEHvt2M zklTg&FgK-Wi-CwbeoL~wzZ#x*ps@CJO+vT%Uh%rlUZ##!XlihJ6j)0m|CCP7qUNgd zpAk@8=(;w0mHE2RtJeE7s_+sTpm$U6^7Qx>)}WQ9Z***ym3&F$_s`yWrv#J-q7tI6 z?4Uf?j>{gy2fxfc|NO$24oGH+=Sitw0VFQ;FN_8VmxKMT$TwaZl>dbVu4XV<<}y|D zA9K)SUBSnT`57Kvpu`j)+ypL1sO%fW82L@v;hHZY&bI0i;Y3SRkub*p;IP;K>+7BW zr%%hJ36ITHRGc3gOI4^_`0HJm;^z#DYs%1hzbIlE8k+{Gu;N32E zWApQ>l-7c7zQ&ND_E)vgcabt*BIgHg?t(G-E|om(5SeB**8YV>`Nod)q=+sJ@RqDt zs~;j5@;dYimgD3^Jp_e9G3e&(-jr&<%}FZf(&meehAc8L+o?*e2gMs=1)TA?nqJpi`EfLv-EAO6X>adT~^BT4=s{rxh3``)+w7Lpfd;c zhH0aCUe6D*$>!7``6-{=g3Ut53ZwZ&Lz1Go?~Dn*$7uebXY)z^)3ox+&(lm7 zU04MYKA4dOIjm+k5bxe^zHWaP{IoWIbZUqk?xtoM6UaP%%1P6~C&4Sd_OQI+?G0oy z`+nPta3ie>d0mDNE??D#tj^(_pH%B0GCD*CtK$LQZ@%d&UDa4>@_w*=qs(recfr*; zzR+w^Me~vIh{mZn%lQRk7Z3orWz2+s5sWjKF$N$5Rd-B?2K+l)?XX+Y;eACOL=QGK z9&Prz04=vp;**(uJka-%8z18|ck*4QMkRWpkNLArNJ!|^E+k|y!N=1T8ua-vQu%f~ zvq%lhOdVEFtL-i5^>LlC?ORrL61`eVZPBo7#|O}lU@g2!QR)jD*rDmu>(Ei}ovVF- zw(WSfx+0@tKt{90=*;lt>!JScbd#I+(Gs5wjyEo6*_#~T){r27(Oo}2xVBMKHn$H%iI0=g1UypGnz1c?Zw*f_of{5(r1ey0?o;7(Er zQFhm^gh0NmF>QH@hcSLK1Gs|7@Z_Q;Z!q9m>dK(B!a$D_V62(4cwh`E*T`t_dt)N7 zMD@A=uJFG2GV`oo$wN!|Cea%Zxa5ziJs`76kcB-tks@PylAhm}g-dG@%N zoV>IE(LhuUq%)8s5LW^z4dByyk2)Oqjt=n?wh9MO%rinF04-oAw^qeKAkW=R(__);{a{zWe5j#pXx(R3BIp$)7B~C>cNFb{1Yb!+x6~oH zmANtKY}GR^D(KqQxAvL{#!Gwx2Y57F;3K+&c;lI59fWB91lL>7^1Y}I0(#^g_D3K$ z8C7x(#eUMmsx`Fk`KKjPE^9?r``Ouyx+A?2q02F%S<_&ez!iBXkRTG*r;Dk!yh#*W z-zI%}ST4%l`A>Z~lFb`dlkA@&mM?J=ZAozCGBQMJ5p~B;C0Vd{-gze{dNVROosEx2 zk>k-dU!|L-EfYc|mTU!88++(WUYe_hxnn1_OO)crW?CMDrWr_sk(bODC09oPO|N=7 z_fU3LD(&)*67>q%M(%|WSJNH+VXlF>3%Qs?o3UTM&?HO1S*9>E?xjMMLO0bfqe732 zd`x45d`D8LnK4dgBO^nPrF}BIns&RTNiL$}TO@pNsBR$V*51fEHvw5A*v(ihIJu+Ugy9EwyBHtjz7>Y`4q(i#h$Dd z=94@AQ3oH{4U(LpnL6^IZ$-H`LlK{96!&tkULo%{7Qm(l#Sbb?@GICEp?EVSeASpT zmwf}0qRndAVul6CuiMMO$aHTCDi}4Gp5RW8PsMpxDur$9o78Fq@8v$GBQ|o75Nf_5WXD0! z^g)->^jk@x6xFxutyLwaM?_mV7d6(W6O6=#1gqrxf1JvtD{9WfT7lcFdTEV;)V8Nw zB#LJOJM%91A60(Mn19j3@j;axPTHfgiDI-i92K8W3%N>#08flrMO(D;Si{HD+=)3$ z)$*LXPA~U0*Uh&FZqs&Bv-)rj31EP*jNPSIBrgNT3(tFwg&_8ZO7|5h1F$+w&`V+- zM6}mvifs6=A{Ln1q`n5MO&9RAgTj8&1$>ve)db&aqbOi2phK7n4>T~Oyo6_UbfWR{ z_z0s7`M{nP=67m8q?OUPL(!I%2TTZE9ODsi)gE_QV@z9GL}$6!g_F(4FWqc<&G2q- zyjj35DPiF1qA=y15N{P8|wHj9&=`d-3-Yft=dl<~mbNqoR6 z)o!c#8vY3u+$sQl6gu9u-M&zEAr(MiQ`{CGY;N9p*YpVdpu7mjtSUSHw>`8BIOMA8 z(b_KoYA#9psUrW~h6UVVL)9l%7UkdD_=_k0{QvViY}PjB(;tC{^$7p`^uGuE`9&QK zn~7U_G6IFB6-M?IZ7vu8POgf;KxMc{#d+mYfssp5a>FXP<9^+ja=9Ac=fHD8(UD!h zBdHlL?SDj#!QHe+9IEmhzixu~$%ackG5Z}I?^M!Qh9{KbYawl^w0ytqcn`f3$6dnb znPi{M+onrFAD`{qjC`r1Ki%HHWCx$c`*4o)&muEW6;1p_Tyiaar-iFqEyso;gxez% z-^OLMV#<2IK>WA;^!@HGX#n6f75dWptnR{*GKef`KVI8t_6);-sJM z=0D391|-0f*aJYV2FTX#FDm2VC5|tuaT|k)6@iOdJ09CB4a^vn84z9QTpUV@R@Kmt z?>o`1_9Vk4JwYUcnqF>MUS#anJ_P+uYEv7}5q!<@T=a)-@6|&g2}f(r30Cf7WlDpR znnhx*`@FQ=w|SssJmCHOqV_tLn}3LjZwghZK2*3|osiCYa)6;%tSAqfMB%v>1mhGX zG!4IeOc7xd%>(mJ?j7G;l%5l}p}2>;Y!7l3k_mWgs`(7oeV!qxZ`IAAw{po&TlE&R zTWyeptvOf3=?c~6OQ=asQG+~Bw?DZ}^Ue=SiFLu{iJ^&Y*8!^1XwfVuMh#T0#uH(R zj>-6tb=eX~!7%%lphuJ|>}iIa_sZ-Oz#Yo_ny6WFRtuH4!fdkb&6b|kTXY2$IA?oh zD-gVR3uE1^W#lwjx=1NF_=^t0w%lA(2)RXx+#R{T^Sd~BXNXWCyOoIs;$AzkUs2r~ zEVEk5TO9Z3SNhZ=)0r5O%*Li7?_ls*kMlVL<0YyZqHiWi<1Y~~(e_1rdNau{=hPzp zDQ)+mg6TaY7JfU2d^?UB`QqLZn~~_^p#5RZI@i$td4j2?sl=fSujvEuanL7tz3!AA zi-=Ruii*)!t5C!ly>pX*H3C`6cJn$}jK)|p%#q{DktYne3All@UNUdpGU~ew2SN1h z9wu8}W`4D^9~a+0kDB|A!?#TwuR6!K_&^(v!bdb3ZF#zi4x=$jY&vwqVdBffr4#Uo zBY3;<&@vl~&fV7u!i~u^Vr1=V9;{w(sJk#fIX=eHYoe(vLdb1!I2zeS^e+GIXy{#F zIlx(Sz$Zl%(?DN|h9ee%EvMZd2+DqXukF10cz}y(th2u&{6y()LV|xR;oJT2j}g)^ zD0R=P;@B+bK0AV&oN-PaHHx7!2swx5bUC=t+SmbCTPENNK4HXhSXKO4eDxqPIqc1i zU0CvKQ9kr8chAWF9a&xVdPYZ=HaTL`h}^IS@6*=8=y3j5RUvoz9uXQtWTPH+_O`sulSd%s~mkze^1JL0jH{fTE_ z*BX(N^GaOhvZMFj3##d9gsGmcWk&CTyX(8Bb;d0#?v^JXR@E&LVY0B> zPuZsBMfMp|A$Q2l%tER&87U+At30t&)?*b4JHB}-rfS%qrc-v0FV;$>2CipfB;Z+1 zSZzirZili?8Sp3Cj9L&V2h78^*pCzdezX^S1T-iY4*^6r@qNME{@rWfbr%!O=6qH? z<^JfVpRJ05202nKrTniGvS5@uBw3P-nb?OomM^w=T$qy%e89EZ4L2-tjVG-n53Iz? z0=6v=S=jqUWCeIDdMX(&plG5_&FgUe3BmtkvDZaOR&xIIu5MXWE}IHWgIA6 zk4Gd4dcVmiOSmbwYPn84x8EBm=uXu=Q&vpT?Fl%cE?5>qGPjZSql18*}_2s zbDm=7w%D3I1(|oxa_Cf_7DkwqO5j<=yetc9*9e{_>j8@O47xKGP&s+t@8I$wSZC#K zW6{Ep}Bs67AZ+Od;U4dnMNuava{f!NZ zFu7t2;+YPJa(>;xV<|%4!Q@uWp30*?WARDOHKPIIE?If_P&; ztPqh~9?KUUr>~UIM3%r@-ki`<7!HMW%Ww$$Bzx`jsw^#JQ)DZ6E>`J32U(TeIP`d+ zqJvkxWhPT;bRfvJ5K`<+HSgg<^+WucFZ$iSl!$rM10F`IuDIA3t5L(T%r_2JrP*N3 zbf0O$))nI$2wD0``!;I{8}8jbAgjC-6r8vFJUFN@cK_fFUB!p~Ah;9#V^=x1Gm$29 z_x$Rd5Z8hGhS$s`EA%-UgEz%K%RIpd&xXDCW*TK6b?u<+&!un;1oQ@c8@@`mi-yDW#h%9ptGz7|7*)_BPa;L@(^0VLb4q5wd@0mKd zKYDjuIsKUX@tEQfujjDuXQk=0Qp)w!ruLd1dKXJuPkg_l)h3dSgOmLLE9{7-ua=Vo z2CEAK99nM$;`qx-Tr++o?xAZO)MrDXSv%nI%GB5M2jDU+YzgW=?gHp`_b}W#jQBLx zn%>`JQGbY8FteD`L>s!y82kva=+XLJ05$uP(r>32wq%?(mS!3nm_7`MZIzx~^*MP{Fx4<)!|dQ8j_#vsvqtxNSdm;~HVfh2l9uielrHHG=@OA<2mt|=?k*{j?gkNM z=!T)g0fr%FgaL+OU^oxD)_(V1>)mU;AI^t!zRZuA-@|j)bzgn`uW3YlRk8?^Zwpl_c;p0MUyvo7bX$CKU3h{a#Li&3oa7Qk zlAz-InwNZiI6a_PBJR$}dyW=q4K{uO7_gvR@SI~kBVL0QHUMSmO&23eV z6dL0RVOYxbOM^v)g4Si*yHh9o3xxT5W#6#2d~fC^o$om#j?g04q4gpT8!yW)V2?lPjuY!Mk%?stMyf#Esv6rL+!l5xgEnX zKiq3ni6sxOnl4$kqkCR~+?Vzrzz4ChqU+doc-WG$x89~cy~X1<-G34uO_ABB!6qQ-4|A{%J76y*Nk2C3M~iJ!k7sg~ z3JUh}*NjImx|@A^7APJK7&#jlygQ7(1t;(1`9=1ItW~10jSW#~QG_hTx7K73IBv%? z1DRa5w{0i4G15_S^@*d=UG}1T9FgE^{wLvB>iQA%sbkXONn=GzvUB~C&q?QxFU^QG zaHK*j4-dlOncb3H+YLfNM4U~k)7ZwV?9n|D>VDV0xBh$MqaQ2Ok{&fTbNp-t(DXS2 zK6ofm{L3~B)Z&VCP^EBgKhiBp&^E%cCcd0uWjI5XLwwD}+^8-RE!|40mw$XneFl&Q zL&&$LZifk{FnRfCN-l8*)f+{tX`$SmYaq&W{9NuuvTfp%H@9QNh2|41loJkN@*AT zo(t!0{;5i7G0_TN{Kn313WuH5xF?MX1>CESdA82gls`L0y`htUR`kq_R@~$}Mn12_gBG3Xt+0*!_98Ev*4MAbTiYjaF!skb1uL5_ z>o*LA@X}JBA52&6n9cM{)K+dt`WmjK-xsx`$-~we9Ai%YRZNX?^K!h-iQUR@6_0?% zW0pebaF49@{NaGT>lQDLyRVwsu=I?H_Xg9P32IpFd&@`AWM3EUnO^}08oq_1`D$V} z|3jwu2PA8yy1?1*)N_l`Nvudb1(~^o0T}wQUUaYPgy9VeL|GCFeJOC|R<{DQU11EH z1|a4qGUpIIT~&L-MYORn1D$R7e0);-XmT(x@>X0}H$1L<#>4OU$J<0dIjV2Or%T zbj|7E2acEFDu!!FpNT$s5Ul}*^tTQ>E{Gk`u!xcmi`_6*DDE)yjJEh8`>NN0GuF{= zBp#`^1I2a`ZtdyF`fP2T%0(-%#rm_iwFKd(E&6mWl`zb2Q&(pj6>Gt;M{yjntn_Ub zlkfGYhnuO!QuiElh}^a(9+0iL2I!}JqMDNKL8w+JJ)Rs?lPigsuu$1svaBLi@Fxp6 z9JWo2J*+x|7Z4TONP{^ePy6)DvRd~M_1((j zGUBS;;Z-tro(Dgo6CV}H$zRi;4y#h|cWdN31r>{*ZC4f=Jjz`m={SwD%Wat;9Xa;o z*mf=ry&#!uuFkr@GUD%EEK5Rhti(oMTcx5m%bpwU{{VbbSp>Ii1&&C{LQzM0kiMAD zB&ccjitoV33>S<+)19*^=Ee4FjElX}B8*EdM7F#V4Bj1q$pll#Ea#t#+FeTHnjfFh zloBm9@j80wnz28!1u5xfkF{u0@d1}5-c?9*&09BHG@0VM);~&}?jjIGTMv?md%B5}#$C;E=rr*m=phj;kh0=HpTk#8OP< zR4|e2*hd$q((R%OP+(n-9;M*rc!BDXtcclrYGAj?k#`0;XRu_!C!AL#_1dE&$AYI` zeBI?D@6Z{R>kw?xsPxPajgv6!J_c~Lq^tEyStkAQ@Gb3UXPyDeLi2Y(5~iFzpX;DAzp1;Q-P-riO$PWLRF35xGr~uX2eb9 zG4m50lIU$VCW+X#EGYg+tZUJ*;1GE8#<>OsfeR7zK}9@?2J=?HLE`>=rD64mY^D$w zkr%5H`sT5BYq!FCB=O1|>O4zAMX^97X>bw6aZ^c1UT!MSor{-mlZ zE?LYt2lLrB9wzMOT8!8h#4nPSCuUvnKpp^YExx(@tIwyWC1?B9*}iXUaq)9OdZr`J zy>VHYpN=NvFBrV@Y|s+gPuvX;h6iLDB~r*tX9WbVx=vDdr}~s}OlNoZqy&MLo-Q}g z8d&qSeoU_24Ly7=mfI<*lieV;jsA@syvNhT>|5Xix7 zr>Ybmv|<1=EStg=RWo*N-JNZ|5w|lH1D!>&rZhu+)r8F|F{|W}?LS}!#d#|)B)I_T z`<|M^w+WUAbd_k$PweuzXIl288mTLCqpod!uDqn9Lde6>o}G=lfjOl%2-~7zdjcZ& z1g-Bv86sfIr6flRv}aP+&(L+K!hFQmqKiG9TfK>p%Sn0!G$ZbW6Q_tXzS_Tg?wOGE z8~D6Pqxoq!DAY{cZ|^25@@mrYm(v1Wg47Y%$LtA<$BVvpUWL%ntD+Y+YIfNiJSqITna+IyYY(JCESrq3M_mf!xpS?=mhe#T;cCpz z)rSkJhFbx;-8I#ONp<&?0%~t=oX&yCiFn7stPi8|a@f2EX7e80lbyH$*(?hv-1oTF zE@scwTasTXYWhAeN37fxl9km-^TeV+@H1>!?0(+CcJ7!4dM7L@A?OFudRTN>jAGx= zl$V;)HL^B|rfvno=DP+$6!Tfb*Y3x(z9#)>8}8`T*5QyXd9wNlZtV5t>jsIVxg4k` z1-ITTsH)8Wx@R(28MJHsY9b{<@tITY^Cdwy+N}cSWsbm`iN_ysnW!QRt3?oaTuIv5 za62l~oMc$68Wt&IIm$ z&{@B?#p?wytMYioWv&eO*O-turjVpfb305@+j>8mE9re%yDWH3!roVm1Y?dRNQblx zaW9Dk*niqvFrm>QPyO1V0Ll(gX2uv#v<4H6)ZSDB>rBeJn8V+?TpEN8+w)`}L4zv4 zk)!r8yUBZPBkBPBlkG}j21%S+#dOlG?z8zA*-Iw==f-^&^9U#1kGjzWS+$WxNT=JH zi$$csYa%5DzOlE^wt0yugl|V}G<`ArA+pk?6*&ja{_5U<8WwERr{KOiPG_Z_OR{F5 zb&Ph&f8&W8NKYRy*C4lJrtUWiBRv}P<8ga4dH)$?^oss7`i5z59zch=#=r;=lScJw zxD+wj2BWytu{>Rg@r7po)lR-XJ4CDrd3ZrXW+tDnNu~OXiG(66dvKe<_L_^z5?kVX zkFiQ)ibKv6jW(u&R}(Z?4X~S6#oe?}l8-)fU}JT}n&AL4Kxw1$DyiGy@~GH;;9(dU zjpebD+IvR@(D+7OGj8!)yS6{;%AGnJ^>P$i;9>sDiMh}vfSfX3hbHZ_GJDk37WjO0 zAgZsE%0{2A0C;J)`5xQ9X!VVwWbjugbYJ7MD zKKN0*JzSx)iqw5TYNt1u4ZhUVKr8Zvtj{$hBAtrgm@XuCCQfI_{%vW8Jn>`E^YF)_ zkG!?BoO5P8MOey3z~>j`mzg`ZLsPWqv*6MW$r6)*wLvcz=S+qwdEjq0OIX?EOqeb`$wqU+9g+w2K^8hc2}mP&``mL0q3}pZ2z)6%xzi;Q=ZHPSOYJD|}J$ zC!AMMQx;9D8{GPA=c-+|zH@wl&1|}e>DU%l_IgWcs0e~!; zq`2(Nxkg3C?3^8J!jby}*Y3<}GB&PN;}O`?Z-4lxT^r}t=@am@I21-uVm6Lv$6;9w z8nlgmwsR}KB-v~pf9+}J1}+kJQr+>$(8htztGrL%IbimZ&1O%#M1{5nccrvRB_-om zhjuOb72Eb!y{HR##$-pSwPRciem_>P%Yrl#A2f}{o8?8c)blVXd+8NuRd$98u#Z-z zePg1w8Bzr+ti9Itb{HmBb#N9EEld89LV+`&JX%$xX!q^IakJEvk&)I(fe^4hLqf0G z&(Fgq@5>Vo1{F`hN{Gy(VjpcZ#YHg}LoqWXapojzO?wZ&RYMELPGC@CRc#%+i$;%> zlRSSr-p+x(3|O1*B_F-k>i+$>?3)iol+cjYMn~^+bmpq33&uL2pb9rP^8WQhIG?vb zga%!)H(W2lBP4%loMy-XL#{lVCjwNHHb9V)fYI$!_*&q}ySUoU5k?-SgQs#)L(M*w zbWf>>^1u6YA&1cYr7yWT-vUwwjrG3`X;Z&VG((|$TKxp*5;(s2x$`6A;ofqsM8$8P zB6ZlQe=>(>dD(!|a^JCz4jMqkJvLAEmL_k_L`&XB@tg%ANZ%eNcjV-`RdCt5>i|Dg zlT^wK!ynt1)IN_3t)ZmDj}fVlOLn7rP1|#1iA;X!IwNi6y~T7OIxVV!zC&6*IoH8i z!^Ys=9x5=PO-JtexXF|K+ldLy$IDadRt7oL{P1b(PLnt`26p3if?fs!xjY))EkN0W zT;6wXINTPYf8Oaw2O9rT87^qo6=N)WmSoGtTlQw6vee_Pt*F`pQ%Vq#5#NbjLMUmc z?|uzw{$=pM=p%;)1uM;V%V@|(SxK8}7wet2wPR3egiI<|xMZTK%G3h$SbU~bmSg86vaRqwiBpo8M%lfY*C#9cIcYvb`m#Kpf1&$`N zN%One#5AM>nG(B|@XP$B!yaX2sQD^Y&bYFGE7_WGP%a zl06|rbo)y&&y#*S7KEP1YzKC5`j7(iS1Si1bmZ}qeI8lxp7 zU5AG8vJUspRf7elM-C>gK1=@@zuwmi+yNOzx^4epB)a&_aZItrY->-lt+rE)DTJFM zO}ooHku@PTOQs|u?8xlCFIKWhbX;&`y_wpB$T)5xLKPQWI=kD_;xS;K90;rI>?Bl# z1-6w-WZS3IIubE%yHmp#KIS6aPq?@^l4OaL1d&6wpATNY!8ODmHcoPD0kK;3COm_x z55pw?Z2&u$XSNUg}3C0!2;LxKSD$CWe~FPL<<|yd}qH;$)%am zhxUER?^&n@lbL~jkU0)`VbRnv6sGE~E^CrRw?0PlFx}X(pvZ?A=bdoYC!3ekk5?Jj z7S)NDo0lJ;I0)L0xpPOjPnxav zJ5Er4TY(YKv8rr>i(?2B7V2XjezNIJ$rY5SQ)Im>g8pP~WJ9wwGyW(51Z@$r=2t2k`5b? zN`l@-!afv@d=$wSgRey6>i+mlZBd8)@(ClfA*4~HUh<`YV0?Pb_gE#85%~_amzKF+ z6JTO%u%qF#H7$4Jqu`yZXl{$EXvGk2YPLwTcHgt-4v_=BW=MHsdQ(OtYtBs9(&ONv zRO1v@bk*{w9p9FhEQE_%U4=L_pa*a*);!Gnu0StqTVPerEk(Gvf+M=3*j4gkS12SUJhz z+Uxz$^DwYJA(gm)4icj%!@P;z$p}lvd|{TT_X^+7H)W>l;H9kFyv{TuPyfeNOTPcR zLSurK9?+mWyF@u{bs_L+dvtQ&BQA5h<&G&5V>-_k1)P_Az8C{pP0U z{1B9GG;SVyfm6ah!FxPm1G+Pdfe}LNE9R)jCq3E=1g5yFJ6Cm_xfiqLk#F0GW35#;c{rr^xscO`==fZ{C=pJ6tdV zY@0Hbfm&2Ql}i1)(t(suy~850ev!38X<=)>jzRkrE(_lQk`uIB=8Q;GZC3zE}f{fSLXY?4vT44%B{nygUL zbQW{7Xx3mqnyVar(1_ZD1ZAHksoSV@9rvi&yk&Ft*7ZMA>E33kG-5#_YLhK1rFjrR zd#zVvE%1rtD3#O8xbcJfzeqS(<%ddFX?nKCOk-_;Tsoi7&v7a(gW+Q`W2!(j)ES2@ z(^Scf7$i;dKsfa4lNZmu3E?uR8EMU_E$w48x8a*q$DYnt>6JFQ zj&nTcb%62)K=yThNH1+54O68~cg+Ylbuhe)hg2g&!SwYa9rS5dz{WLHX$13+^T0V7 zo(0^tAXj4FA3opff|#9WP7L!HTWN`kyNZw=t6{6h5|q}(z%MZ?9Pv9SR{%VSP`G`$ zS>$GGR?%VK4-GxNJI?yk&O7=PzBi|;)_*$IBm&pypo8b3d?4Eb6CO>W9l zW_^%HtZ=B>=91+Pv7O?`L1u2)Dr)u$9W|TZskeMS*s}%&4EVj)lo2h(3 zKd%*Q*GMroiOSEW!hRD+7mFWF!bwtrG^F=;kEZSJXd+wY&>o?x6D|k)&R^1tj_J7> z06z29a2w=ZyvVgB)7`arKhm;`_0o>(Ty@L0?OB_)lUYKGv?#1!M*>8gHc+6jnfcMc zpk&eu{$ct2VvB-Nm8Jr~h$^3sJ8SD{VG^l_->{no%<$X6zl#L(V4H;gXRZLALd@IG zs@}r(N!)jXbG3>hw`~^r+-)CV-SR)LPhSrnXc)@2-)H1VF=5TQ`B3+9a&iSJD?Z^$ zRQzecPMIdbv9Ja~^me|u_cm!o^8nk!2FQ5OE24Llo_(H(y1fU2{xKa=``JqMsebrW z{o{1zFS<;=e3ua113I*4aaCH+Ek?xm2y(hq%IV8q!P;DA1J%ODfnZ&>(?KgLnB#5T z#XiCkkJW|pqH^_Om;>2Sx4TpA_ZALQmFGP}U%FHJ1fyJUgPmy&;?9_MPN&B)KaV6d zt7g39w*6VLmEn$l?zlmYxNdhFD^sTH@(-u1UNWI40$Gm{ z*V1{j#dp?Rl)K*2C48Vqr*hj5D6n3;$(^*Vb z(TzUf(RqI5To60}av_y255Bnh&@(VB|K;kZIa7gfJYtdA`@6x+YLAYC;@m{ke9W(~ zNz_>-*Z+7Qw@2b;N7K0&E8^^PE3wR`Ur(~r%{d1@mopmBsv6aht_iNr_cn?3>pj5j zT0KTw(MBDHb4Dl!_&0mz(-+FW!4J$Uy7+*hbMVaDNP;6}99gK7(ZgIe?aFgoUgI_@ zC^YK`ro;OM5PRueiknWHqUt0lT=tVkwE%f}vS&AB6##uy;7l*B;{&~-Nr7IG#!y25 zSAgxSGO>v*N}J<7AR(<()RdP3!DsKI-Ay!M!!8Ox<-Hfxeit|>8Q`GWPVf4hyXK>_ zV#u#Ub$R^fvPc7n@sQI84gLY=xhS%fnI!SD3dkJ`=3{gTX2v24F4R;+HB!et8!jwR z4>Za~+#1{TtmUr6IbAC>vr=cFlz$4G&o``fbqZ_SA-8n`B+ z6)k=y2Aya`p4?oG#`y6GE;B4SbwL=BO-`9;D+?Gj5QU0mFqv~NTQSiP(w7d^B3YLX z<`@PomjYIeHsj5JkkwM}!ES6ZtD-7tU=~XbetmX3C=hdeNhzX!{zhSRmIyb$zvWB+ z+-zN^$SG-;kF``pg~#mBa1w)&I=L&)Aushk#%}U6D{OU0kF#9;joXh=D<_k0bx4HG zF^3aSis$D{b7AFkZKor#i|g_u>JdPjl3M9_0HovB+7_eTx4!f2nJlo>fW^1l!yosN zP-0rc*oN2%r`xRls22fEi^cJwDOV`sl8@$k5CBwqvJ{?$yhu$MwSeKcGLY=`4!`!P zr(}tCLulsM2xET(f}1VhD!n@w5~JPea-Fmv zYq?Xjh>5vu(shaI9#eCneCJH96AVb)Ys-dh!zWMdWk>-y|LF`TOMZPkDZycHTt}P9 zOM|YOjo<}8lqmS(nEyR=NBw9kY`O|QS(kHSq7S~|xk-{>dK$W$oA{==iZtPD?_Hwx zr`nv;X*(kLN$Wf5VdDl-`x7ZtA&bmlu#OthX_Rg5sk_E56;;cVhEY-v3fT>Wlv=^c z>1L$Au& zIAZk&ayvC<8o$h=`ea>yY~YF&yG87F$~anp4A=2icFD9L@wU23^)rb892cQqi%F$< z1Ez<%B5j5W}HI4mY)qHB+Zzi7|(lQ}8>3`z)#E zKi>Ba&#f<&c&4bGxIF&!{MGD7|KgX06hTi3-Rff0WwNf&y1AcZIu?%vl~=}m8ZQVc zpUyWo+#ygHl#?{|7`SKpc20<}ug!vEf4*&}T-dJFTe`cdwkx{$OmMAW2azn0FMW+) zG56EqwjO)gQQahzl{}gwrCi+Gck2E3<>sGR7gr97dCy!{AEL0I^&GbtPey>8Ql7iE zPtJUbZ`yJ{3*8ViufbhAc-*bbwrvq-*Xj>*U6#(%F=D=cKmc9XWS1iG$ zNMBE^&^0?=aIq4{vA`10ZyL)J!tjB91NW*xXbBzPif9_5o2y>g6I=8hVnBs-aPEs+ zz=6p2gre)|DBmlow$Z)JiyGX;5Q|`~D2H!WESd!Yvid`p5}eGM;6o+}O@jf5L3=AMI`M8ZmSm_e=frc zzcPZQJa!>V*s3CVg*a@o1t8LutWakIhDoFGav(x)@z8CjpmAh^PR#7>gmXRg?t@3D zqpEe&v1Vfs#k!JAf!yeJjVOU+^#MD z#{)%Lm5bGqt;a&Y&|h4gEpJIa*hfv8yhjRKmtuC8bQxre&7tpA?u6|NAjQoyBFOI1 ze9OHHrWHE`I0Gq2Rl$#pzxf-Gg<;#ymYYaxk`h40bTHP}qo(4_m%O<5AzBUfKCPBn zOD|-#>8g#h8yUaz)Va#ak`pOyO(V%U8Pg{bY%1Bpq}^Hg>>eNEjJymg8v65qk6-vT zJ~1Q+y*8P(ZDN&@6`dJ+P{K>mE(H{f$y-;i_)geueN=n4jiDe}SHBfjWC}8B3%ft>%8A&$RZGc+`uDpJ~hqLlBlnIAX zFP9nwBT53b5`JndLn2PkS3TjK8Y|eRcGHC&N{Qe7myn1p8n^9{8huJ&l}COe;D2)f*|Aw6D3DAE@^BG!84 z`E7=rah3iCwnY}Ct4anV{6PB6zM1|Cp*PeCMk2B4W!`yLPTrs_rd9jOjRwtnlTee# zP34l#oz^uv-AesQwK*qZ#b3qb3?^-qo(+pTVqL~Ib!gSkejpFlxhfpxL(n~>@01A- zq8;wpE^>t&8oW;8TEj}L&Sk9u)311qD+`ArjqCG{Ckw)?4HLLc*`zpyeImTEbo7x? z!L`!ed{ZXtr}=Sa6Q7T@9`>70=}8GHteNC0pmW7X_vK{lIA>}NrPYzKG0PmqRr9ia zOMFDklMh$gKFr&66C*6zIU~7^AS%>b4|>(WkrVme@Fn>{H5r4ay+|^oME;1TqV~`t zHZYHH5g}8goSYDkw@50nU3%Z01lw$}ysk0BwiFr(@}j;TH*s?BzNU<T~kVH+%au@%xfx zE|8vU`YS!RY*_s`4_d;d*TF_T<^9G;a3|F*hVSi2s7;zq*qd?)?Bhqf65p$k%X?nq z6S>C6oU#jo=P{~Rc%2={5P_qyD?GIR&?mfGLlEDq#TJa4uA$Up`jmFI#E+>d4VN6; zdm zaxhbx_gL2vZ3*5qJ;)Gtbi) z7J9a*^kvWNoQ7j+rEp_7>1eYlnM@*iSjMdYTvHTy&a)U@0-ngk4q0gw_aB1DG=+4F z8N}CGr{p&#NO&qSvgB$~gnRqMp%Q#{fryR3HcdM9aN*ea~5JA<{J~8gwIkrOuzmn%U?!KNwVIal=mbn{I?68pE{b@Rw;coOCl# z=NPH)NsoZ+$N@FnqVQB1sn;3&RPJe1_3j*O>b$;hw&9FcVR);z3yWFTp1Ya_y50C) zY>DBufxmwDq!ep0`jr}cFIs63)YRGnP$=_c<&?v;N6*GZ^2Ex;T{keYaS?qm4Q)oa zh}w2#L9B$XIQiyvv2Vw{B9GMKR(wEYTTqE(n7mUR;|jN10{1(EM#*r8#&Qunx=O28 zRGc&@D^|jF6`E$5c9D#c++m7uzZd5E4Qkmft8GeL%yMcTZZ|xf&vbvh(1g5K38y2! z^gthQak~X_-`vI6qvS>&e&&W~1%FFJ$tGk`j{jB*p zudcb4qE!KrY;RnY%M~}v3q65IceNBSM`!X;$k*x0&e8ydF;eDuv!FRpkwjB*rPEGb zo7lFui|RS4c{?~iIc0&}g<#vWEQ}!HFSsIW-RHk<(~UqEO{*~4W9Vlf3HyBBU&Dz% z`z*PkrY3eN>dz3QfhqV1Uvod%9vvbW1^M+5ee1>Q_8uQ~ z7i{NqOx9I+%DK)H&Vky+-#c=+1ix&I@y_#6|UXVp}U$e`kq}k@eNuZDR9wP%*(X(ia9&~AJYUia_ckj zF^ledM0J2P`D3{AF#!Aog5God1R$9C^C*6OsE6QDde)_+XBlK3!*xw9x{2W*%^Z`P zyGRVtX>EauPoi!0Iz(LIqZX~v?Mqx`Gv*Q)FuwKvXA^5v!%N>W&R#&s7sE-I92F{x-%-IhWL>Tc9N{xVd_6&j9H~Vr#fXm z!6S*&K>+spf4+5O0WVu9Z`8M0;6*L0H ze-DCg?iLMR|Mz_ahS#c*D%-cG;)IHBs_DkB^Q-#k#Qky5f#uhH&%Ru0sTQ6)-z+N* zsqK{Nb}=fd|2&QVTm(Y-6f}U!1;lrqYmFvn$0ZalUf1zK@Oj-DM; zgs07U4$jeWVTfk|nzl1>mSm81`b!#&c%SVw($e3K2Y@TMAIA~>+R~8g>V=vqWDdFw zfHgVkcMAG3qnR=v%taFhgn=(!Bv{A0-y7ij)U(PKidaQ|rTLu-KQmX_${gd;n`yD6 zd%1+b^gJ&{tNh{^4-J6neSl9_5&-B9rC-8L=!%rhUxgUSt6sWzIsbV?P}7({krDdY z!U~E_GuBY4#rg18LnxqjO{~NI?9}t@w6Rb4yO`f%EOdh5_ojdk2-FMj+7NLT3&>#h&Jwlg?-U+*ZU7iCPa2-90B9nsq9eTchxLUfCM?Q`)a2+qz>ld4 z?1Ko0qQC)^my8^8;-}`2&Wk)yVVvU-{T=pe7XKUo#|~y zUnlnDR*o|B``cUhM_2&)R)nu9t^mNPWVzJ|0ff*)2OQar3ZcJ>@2XT7OnmL zGw2Nfoh1I9$0Y_(35{O|HEj#L<4Og&q}vHX;QlZC?IT+N`uXbFd*gRzt8=k!c=J@i z9p`g4iG9T451_k11sUajai;2P>V@(6p))`mzKoe}FG&S0`Q!8A-`jU{9tl4-ss>IZ zA2@9#10iXgpz>LI=|N4>p$AL}&B0^n(ePyYt@iI!nsf7FTgRrK4zi(oVHk+=G{ms$ z34Q|T|572Z?4O&dXpso-E|cdak(MVH0?P^1A3?M`?=>ua z_g8)n7H>0G0k{AR*ZbuzPG@y6gPNovOVJ~O1=G*0lUm{&Fax-ouhbM3v{6OH{EnfMQ;NUuUyN313)uAI6SoGix{4#% zPwSx6P5bNHKcQ1|9snR$#XbFf3hwc*Z?KvQ+fB((8~;FUmSbmtTs%bndCj!FNTOEG29*R&h%pxhl!t$Q;MdefCRW3QM6Gws8062y9z-N=ffC>Xm(|*5#TC&i# zpPUp(%25I|v8IZ_(wTNHlFbuL;@sdbhBGI!_{|utJ^rGcml{f*)Nif}bNLZvlnKeK z`H6+IaSnuJ%v3u6TXPNPM^Uu#0Lej=ZbPC^%b4ywiR;L-hF#Stg5HnM6I&MDQA2$E z>!A@}R$Vkw#DV|@-}WPe9B+YnnJs0ma!#z{eNYpkg@St<^I^_m@aP#CCB*lu(E94V zR}6%|h>z9uGk-ubcAa>j&%GhNiL5~zcO9L>o8PHT+^k2%dM*Hf#&{3MS?auqmuL^v zN4aur3Jm4jjE(m>=*xxAQvnBs$3=_Ac~hlwF5}JKMt=bC5D4N63J|@-T=UQO!+r*& z)j!7)ltl$KX>YNlu+jM@drm%kdra926I>4tNM~4}8%~ntq!>FeBWM5cxbSM9TLmXF z?6+TcXCoC4Yq_1j(=F7$`olC9E~lb2=eG{F;oM9u(!`fLZR174sGA7Nd5hkji}IUA z!1lS=$C2SZ0782CF4l(CgHwF**AjF9A{_In5xjge$E;kV;}Ug9iDc~fIPH1S{?D5N z@N_RZL@Fc_VCTTae;5M1s6gGCR{NZ3T27|psYd}9CmE`}lJaY2|I&+^WdO7=gt|@k z?{9i7rA-J(-O1$a_`hjadCpw!zqp9@5tUk0hjz-Qy`z1ru|vR_NPhw>Ff8q z0L+o9%HDq(9DjP~{A)HK)Fp06{7Jj~%U7ukfI)Q^E13MT#=q)Kzy4<#fCC!?cb?s=A zYlRk2Hvg-&@|PuQGc6C)t2U;_lqFq>|ECddRscJ@>k~)$2Z8;!$?E|NG`RO~W(}m1 zA?*(=fWI8X&q+zOo}b2+!i#^Jgc(qHW@rBBA9`P2z}qEIrz5XmZu%du!2dq*YoM4T zKcw>?mF=%=>SWFyadSXO9WDk_MrNoETmG-WRR4bPpPt^w5hy?h>VKq_*#Q}xqAP;$H&+&7;viSg-#ZGFkccfz(Au z8>t>0!{GDy+rQrOzy0-A8z6T*;og62_Us7guKx#a@c#pxwU6Qa4O-U!-?@%bC*llH z@%F6bxze!tEYA@t?ti$Mz1K!$c*u2M{R4dg=vO(?aTOQ;;ceF#m;gEQyw>th&V#?Q zg$O!ww5Qn+x$3-r9PQt}74WAF`1I_o({EI@6opwvx4BUX{p~*hwS8PS1BiRiVGN7J zZG2#EK1_g{;jeS}_utex@BoJng-v}3#8HL!A9;y3lT^vt%L8 z_WLKcBjtU)5i$ps3CS<(JWp==+xfbwm<{b)i(ZnhpQZSd@9?kxDOGyTN&jVq{r5MU z0V4ezJ>@S#;ZN_ZI_IQp%&Gshq}Ln(C;dOH<|`{eHKiGr|IiTm0$#mT=4l|D=ay|6 zh8)_g`|TB96h^XUwNMQlM+N{?{_VWgbEE*@8sY$-=2FR3{MiLrO{nsXA@)w zOh3k47b+}1x$&On<{|luvIPnM{m^A zYmY)LZ`nHDZus2PHkfiTcp&YmVT5-fBVt}=&sDk0N@n0N$)m;_F%`3>Ew(l7!~0^^ zY+R>duC&d{hD7G&=3j>XAF8<-FLgo^2Y~z*MLP}c55f19nVPlDzG5z^oY6t7sDce^ z=Gj|zlOixP)_f-3sqFT993GiO)-x)Ss6j&PBbRPVXd{v*$~BOCh!2`#UObt0L$F(1 znUxWMsM;li*5jtwu?HV2*$Gl$Dcx*K05*8I%b@-qD9&Z!)?UlhZwRY_9P$>%3jV#C zC+iDV26M1962Xdro(`98!?l#C{XtKA#3Q8_zLSDnkg#1{RbKaX<96-&G<+Z%Ror>w zY4G5(KW&_a+Zc)pw!B{}xRMsW*4phuD;glGG&q+2V(TQs!hU`Zn|e&=%w4#y+;pR; z9}{^em=4ZCp()_7hLW_z`4zgujIF=TtH9SVO#3YYW*c(8uwZSHPvMw;#dPA7*qqp z`{1R}j`hjfnE;#fsjiv-I=C=Z4oH(2jFT%1$OT3F4w=x+yqr;oP6Q}SVRBsT@#YZ2 z#d*@(cZIt(BgRW+_y$j6XR6?Y)?LPPr`Y#DidSA0zb-vZqk_2<*R`=qfYW1|qQ-S` z0iSW%@WLrflwIQqRn7JkR&&nl%yocY!^N{5T?K7u(f60Dh$$@-HfA;Lpy+{S?={s( zzcTA!d8O_%fSU};29mrs)b2MS!g-iILss8}AE!0@L;8HOW5s0M*KbbSgsH@{L$Z~u z(wz#;yBh`(#!K8=Fhq=ZlV*0H3OVe1Bi53>I3~fQN+r)Z4(^HNamk3?!>Es&pzdH) zLh^~ji2pln0)D>knC(nkod)?Ad3atbbt6@%Bsy=UTU|2B5 zsg&mR4JcX01UaH(W1MOnEjO(?!OCam#HpjYdL%Y8I_PyJD_!WooU+P-Uv)(kP$BxP zeJN_FnQwsJi0Tu(=n`AqjPG0aWc>`QA4eE^v=h<}^?5#`;kz}@rnlHx^&}~W3CB|JPNmLsjaA=Zv+FNz&ze`f_C!gC)Ha{Gm-6J23 z(#php(eh@y)bANbI0p-Th>jduMX>*9$>b^IuG_*c3`cH`(UD zdEBPSN*@-=4tZ4CA+WPf4y`Rq$a<&AfP`8t7Ld0+tShpBM)dDRL_cg3%Un{RVGi3? zvTg#c2An1N48kmrO$!&Cl254~qAX9OQB0{^D~>I~P1fQ2X2#z|3BY!HepAHdB^IWe zM^T-%_l4*Wns)c9st0LTeZ#^y1ra<=^~3p&RGuNqYDouf0|ORT>6KNiati*ggKxrB zV_X;y^gb{`VmpKpMT}}^$K^AO(D#U#)2IZiz!01^=Gm=a<|Ygb@4LOEQyq z2AV`61Ztd-ljK`IygulTXZ5|MBd->~5q2G88!nf}zmiJ|@*|uyxcNI9dR*FBEr@Z8 zDWJpSbr3CujS<&e>kRhZ(dow-n8^#Bg;w@Q9Z(CoYh%{MmMvm#-o@6O9d^0fn-`W4 z6gC6#D3LC~VQyLhr5DGE&L-u=steARyD1UcMh)6Sn6p$X!2r#qCh~swQgHX^Qmn&K zZiH2oYfAABGvur90}K6natexRA(hNG9591m^!?@4dsC+S={WEmB1UMWjem1eD&98W5#O2~B#HUPJE?!M3IM z7K%!f-g}o$=z)X|(mP5CB_!O1`}=~K zYV<4I{gC~NbGCgE$}2XxJKhEQEtrh(0KbFqNWP{#o3xPam!;b7AGGChwOpk6;p5!d z2i}_zOo=8%)Nne@_Vkz%VLGJ)|ZmRmwjeF$QWyN?0SHL?skB#^a*-Ua4o2Er#q8q z%`?cu9yE7mei^@A!e3kg&Od|eSr2b)h@DaT>-1C|whZ`+=dEdth4B^j?4t%NV8G#) zFIJ6Mvd4oek2oUAd{aserhYERG8Vv{{UihQ&GstpulJv<_y^aIO^t4Hn!B-HK!W=c z)SHzd=filluRZ?;_-q6Ns07^>c5nLuZ}XYhX4h$Yw4_Z+d6IDMny86y$wiRAS^BQE zDdttMZ#t7OzP-=scvNzNC}hWozejtecD4q?>qX(l&CqUYl*F0}NLH8@*uWO{53^*)tDd3Fg4 zHaT^N2GbhE?4;hubFcP0)f4>jwT1Sua?oY2_2uwDQU$m5-?cE(PL$F66W%Z0(Jux& z*8JoFk4X7weM67x9i*yW((?GsDXwj*FG?UcI#Tm8OS3 zG&4zuB95k9S0$G4pS6j*Nnz+8%)s@cZ^tfQ@O(EY3i+G(_WR+2>!V9BZ?qp8$4vg< zX*`4E+O4U^UWOk@)3Zp~rgI`PJ)g+|yV?2}3##2$bBFql#wAp=E}J*OU25YKwv)(@ z2w3CV&w+ErLwi48VQ`)ETWui>0X^jo*|urMcHB(ExGmO#Z{jJ-+e)_plUNJdu)kYS z-~Z)U;LF+)OHV|DQ_oWmSKd|X@`$aq4s4n74u&l?bO>^MKXAV5N`OjPG(@<{pGxOR z*BuJ3zxqMnb94CVHah%g6K$bodbuOc4dLU{sQm}gcPZ4Vf4TLb%)P38!^Ep@75_=M z`RlXKA4!3tR+{u%F$8S6G&)`dAz}8_hGc^#I6`+wKyA?J3(;PxO-|Ap_*s(GfX4@` zfl!AJGkc{s-E+pcM@ZDW#f>OXdww3$C7YS%7PH)4nFvAdZu%NEzoE!qt3Rs9(Sbu~ z*`87(WW~do*K2>%vrC4R7-$?&X^Ym~(Nj(?(!7SEjm8elX;@GC6Iz=(^SXM^WGD_94wq`Zt7OrhOv^$Lxbhoi>+_%`O-JZ9bo-M_!Q5}BC z6`sUs_+LbztybwSTLMWf&PVn>+{{g8fheHJy;cXxgCxVgb6lo|4mH@VQA~+cidr^S ze%X+m`nyhq99O1;(3Yn6X|pe?bGz&Hx8QCkA|Cc7B4kO!!f@wikj-AnVZ(~2msz?_ z<N`Mnf8bZmAy5*V`xSW%{S1|u~uN}KrHl~2r4bZ1+! zUV_3bAe57!L~V04e^+heBD2+Vpg8c~-J8tEE0X|D z)i!lXLHy4A!<)y_H&BwciL*!dD*Rg?FD;=X#g! zTMg`Qg(CyW-&z0Y_enVObNk>-BT$)ZZPyK0%bvIqI9M#4^rH#1&Cbzl=h8XSPjUQ$ zg1VESyub`sRsXf6R9=`XWIss^)RWfDnFk+32%=mB{70=zI_du=<9*WvV0hut_dM{7 zuxUbu|HoWV9@}hSZqBjXrY|?Fg$W5=|AVi9_-f2+%A~=U0IP^{v&o{&R%ooXhK1SD!b%g7Wpm#vIh-ACM%iF!$n-m#ft&j90L0SS*0|qrl1wA<%IRr zTv~84z(1nPtDI`J%NqOz)*9FB1zo$RXb+od{VoFS_Yz+&l+PD^hf0JA*0`p7zu5n= zVgKGu(G90ObmDCduyDD1UPEJijS(NEV)?sm?a5B4t#`S%#dWR!cNhsEtDmIfXQ1w1 zedYef!aXr8zS;Qmujy=NjpJW5o~7sqA&gs`s_*o>I>%@DM?zY*dH9_g1vpQD<7kPd zE={eDYfq8h14P;1f1Ut(#ARkP=1p975#{A&u@cOh1tuu=_H)^rPqIG;HalH1-jJ{~ zP)lBV&(96{oghfJiiy?^@drxs@?ML_w7-eH8=aPzJ_zT5QA&KlEkO1?h7l9_ehm$c z4O@pzCW6MsKl0aob#*szI#OWs%KZDSeo;K`=_SkDXK){caGhr}SN#&1fMnIP<$x3T zhY2G0L$11=$99N3k(rAy&#BfCPcNI?vwE=`MCYw;acd`@gD1l`*!$sD&w!18ojV}g z<($?yG9sos`hiac7h*vunJ?an(%2JO7ZOFf;4Hu+V^Dt+~tQ)_y0zyeg~5*Jcg{dH@ghl4s-3!OvG~q zYWkcSjDlp`wmL7zWsJ-#Gh3jqOk75ce~h$YaEVR+`cDe0s+8AuijTjICrMAY;4@H7XvM4Shb?km_N(@iLI^ zm&zc^2)+|roIc$+CQZ)>vYCDD7w|CY!r* zucbAHH7~R9xVde9jdj?tV84~a*zc*H+gT99e9_^iO~>}wkEOn(>jEpL95c|e+S8li ze_M5kB*wo^UzaUw*0%~RXwQX*Y9C`N_Rhz^=8T(#zm^H$`Z5knolB~aQFpS&J&@Bzhe>OhOGPSJ-xh6L1$wm@qM&-^-1ypmwWJ5}v@ml~-0 z`W?vxwvuMAq@$Bo#p^Yhxb?p{;~H*7txNu+Zls#->~&$)N3ACdLu71=E$NaqpB|!{#w8Nl7zW zXA|TSJ9I`(r{`Z}cDBUAW$TrGlPtYNc91%3^)}m7Tr3u4nrkI)h&q2m#3Xs24Q5^Y^A)vxCbS;U1~s>FY><*# zzvss0nqKpBI4TZZvu-zHpFuckZMWzgBJEqJG~MJm%~gOSD~cnN5SBToB>gavm}tUp zf{_TwAjAjXD|M=owzC^7U>q|`Aw07Tv`&BM?J!by6c>~8(hQt-3o~t#15I5E3|aHl z>$ot${phirLtlnvU$lGjHowk4mCM_};p&O6R`PPMe;!&}JDGN;y4w4Fw@v>Efh+ z)$8o^AzZ+j$U2dGf8!YmJ|Y>hbU3dO6aI(3S)a{;uD2Sfko?fxI7;tJL(X8v)l z@5ssB{_hAk5Ta!|@QD>g!c(dK=Cxn3%4q4}$c@6C(r{_8+dnooOQg`7N5MGP3o_<6 zuE(a@ITPIc+bnT)AN}}ERBG@2?J6cl&Hj!o{2upLs^7h%|Jl{R0<2oV86)IiN8%xz#%*_c%DAb zDq^~9jQ@@xtsuih^m?kJr!dVwtbsp*6$yNv326SS z*V~`fZJ3~-1`m%WnNA7Q05PRbQ|1g&efTWZwwu;dd;`P9vQwPN+`dRCJNahNi3BbE z#d{<7zUE@%Vz$H#3w_F8`hbU6Nd3*BLUXxT=D1<<#czv>f!rI1NaUniyudYJ%~6)! z&VOcm4P3Hi$mWE5LHFjV?%Uh!TTxl=orl6SKgxOESzC5@lbpU?@;_&3X?+Flgz7;@ zT8%$6!&bI3W~y!mE%%QuIrxE1Ydy|%Ll@8@o|3G(_lG|ST)Jh^0B+Om) z=H)&4%MvTIU0N#xXLA*n=lCgt^~!8s)k-!J+{t+>TC&10W)nmdEgh=1j2`GbX{qCJYouOZ|4nfCw3uv;6;0CDU(w?yG{|L)+( zjFib*;N6KBz`qBmpZb#>ZmQ$B4!x)Bj7Ajxu&3yeRpxiQ>qgvl0D;@ZY zMM3_j>C?nt53aPQ1N^7Z71wvrjx&P#U%sDckr!I&xzQ;P-xsk=E zPhkkny8iaBVWO-{W{}{1Y9G^u$|EHKGI^h#>33Fi?*evHDg}cThr@6dnZO}m57a@| z7ET0%`}SuKvuATbYc{d_ARf5`!us6xq4VPU_ii|u1-Qq;<%h(&nT$69`<2xF6|7o@ z-|+5I1$&UpB3lim1K}SADB}$d1E(dA@2_e}iDoN*QFIbZq>?oU`jRp8akQOTjDOvs zSdXiaRQW5ue2)DCMW66a^J#NR`Dg1Ai4axQ`<}O%V_yv5&qEIlvpX8QLSlLbl9?}m z^;52}@Jj0pC4=-8Zik0YA-_I-bz{=q36#th8ZfOa>p2SQ627qmX? zkw&HSWVRkX3W>41R8ha2B%LI?UQ;?PU6?OXNj}q(w$@qBHE5XG0Bmeiyf(!2;PcbW zcQUW<{iPA~le!FQ%zw-$)@D)v_$Iqmb7q$AtT6ZSK$r|Z9vpJ6Y!`QlOltsk-gBJ| zqtcl3Y_%dD;m1BU%XGqo&)67i>8@2mFfs;+n8w!DqKSPU#FYCggx7?FvP#JsFq?Lo zwxM6m*f=drWMkL;Oz+L)A(Eda=C~GW+2B*qWc{%oCE$_jL>k-3g90RC7V_?>bXy5N4Vzp*-{dkDP&S|a;KQYgb* z^(L@;x6(P&7qhc%j-|{G&>TDO5*t%-V59M!5<7f$Uw{R;C51M$G5h1Q2oiclZ5_c{EZxf6@eT2^IeRJChKDH$Qo82w{<&Og^Dd9 zBBG7RSyG{6JG1hj=|qgQDJe76_cfPDR~9qzxM7?0A$n@Ks0U53tsu<3IR>d!R_GC} zZb-2d+d_zA-7C=Fl>McADuyL<5h+oKp*M`+v)lu^(oV)f!@#y*>XInV`rA zrcaDptbRYfPXL$IeaV=K=C6<@MT}^b^Y7ndbK}&?r@#X)D7-Muvk*Vfa{G0=PNr3R zQQ^_Ak;*ldz1Y=Z)A6n9FBew_CDriGNIPL_24Kf6+Mo5L#1eB}YiTI~m(G;o04}Xo z67lw7(_g5J*$oIV57}8e;)yAuRr)@tc?jnLQAAZ3Xk!&la)3TuV{a(Xz7_luuZDcG z#SHUqvL)u|D5J%>cn72>yill$8ndw+6McHD9QI}xUIVt#gq=jiPN67`{0lQDv?zNj z@F6w@fY-;9T5;}-<#B(ttNOc>zp~=^3+7`Ky{85%Lg?z^9Xe`m45d4;{PsUvt2j)K zXI~uArmQ%r&OIa1%Z#nzrB`ZzSfXRp!)n%4jByDDKWXy@(;vYTRh$OJ)$IX;GlvfjTVXP@#u=4{iTQY$xdKiXa3 z+T_Zs7`g}1Ue{)7otSC^jU6+=ub1=^JBweY0|=!LFaIeNKjOb$aE?IJcTmIol}@zf z)hOp1luBn_1;USr`n%j48dr{drz7Zrb?Jsz1D^dZtfBslm zhlHXm@H;v}g>Fjsj(YJ0hmTuE#8Y=^(MKyCKW_zFlB}SA#RN+0S0kofxRhtva-IQD zi#&MtOs?s0S>vmCQpllFdT`&rR`n8Iy+dSnkPWwqQ};tYwqO8|FBOlN>d51|wb|}H zV@`Xsp6(!c;9>ccXYFOCjPn8wa8>q2xROpDD}wz|iM=+^lU$y_4RhKn4H9R4AV&|s zQ}G}lI0UUzmHF3t6!X*dxX;AX2(p%%`QLPR@B_p~`>TA; zPbaA&@E%YQ0h5HkqO|ji%4lM{soB|JilZt5*x|^WkC%lx1qSAOyrP%br0uarc`9R1xP zlJHwaXeTisGePmKk&UbGA+Q$3?QHX$M3xZz%fhbBhJ53;(y;pFEulB z%6_}VKYi~{^*3)U-z8iAqbu$VkKqiG<+ENI{*e0MKaBLhJ*D7#cYkxzoe z|M=IN|JR)PKXFnSP5fK8ZuWgJ2E)q**%e{ytT_hbz~0Y?cTd-h4J~_ugN-7oxzgE& z_4ve2GOi{oGdG@*XpWV#7WiK?E?t$ zMv=uc#bB-V1Bc_>T+H;WJ-z$=Gn*BMOvQy|g|j(;X!ySEWPc|2T3@@7&a%WbiQ<~j z6~n3`cA1M$CAdK z>|I1VjZibibljt*laB4E3A}!=IwN{Jnx6A9oK<88AEwGIB4jm?9pG99JDPJYJrW~ym zF$Hj(;2J_eS*%bG}g(FSy-b#6p6H4m_!K@V3FJA@W#D{q`Q}i(5Q%EI_`*xGiK! z9EQX!i%Zt8yc;W)k;08%(?3J7cZP&z_u~~G>3=i2zxF#EDaK!E8nx2+kBrGa0Mj`Bh*Yl?*YRHd#O?Z8 zOZ5y!E3q29Er(%n$JAIi#ms1~1g_6w#?YiP+(U1B=y4-?1X|zJKIKs)q-*%uo!aZ3 zzs=9YyfQKZSA1`D-vXXKVy@tYuo*JmM*36HePFhqb?84)St)Yx9|-# zm(R z1@Amf_B9ZYXc;XnuHglJZl4vlU&~EO*OdI`Unm`O9+N5cAj^qAf=Boj<|H7Wr&a|v zDrJs-Zs+d&XU&>%=6K$deZ5+Rpo2xN3TM=pQ36WIar?~obL{Gv?i^;k^vtqq*8vAf)$s@@ykbhIVhE0xW&m7;*43PQjqng?-^? zn^%fti*d#jj45wLrSTg&Vv1){B0pPdP{`}Bs_!JLzakv?E&Ng#0Lj#r!}>qT ze~JxnssoPlb<>$!0|JsRPMlyj&IK;InQHo9lLZ-jT!d$9jcd2g5fQEG+Hd40QQIAO zDPp#b^1{{8+y8J*tmRm{<1A1+Uo#{+(1}NUL6Cu>5PM`oBN? z3lB4oe85LXKUSN*C#u;SKKfYx3e!1Rp?etU z_ZShd^4)IP^2hiux9KJ&fiIOlf{*LNJ=)GEpDxDRWhCbNXRGW5h!~YzbhPlcnzrih zA=q9O@#tjKPnWLoE2u_W9GB{8@w8tkH!n1;+nuGOWy@tTc;%6MGc%Gv9BHDLvUtXJ zSwDS}6WCW2-=EM*clmPLTpVu|zo_rQ_I*~^j`I&dtFl?p37BiSbg>5Y4o!Z+bc{Ac zQ7LOMa)z!l(TjfT;n@yjhxI3LOMubBK3u6;nB%*Lpxm# zRcD+_T;7(t;TrGS_!{&OEG^t z!Dz~AP`M406t{cx`kGZoiwR`DJ}7Fo6xHrU=7@qFdZc7U(UqcLU9ej>*EDtlSxST* z_>0%;3zCVJDHIFd7<-onjPh4p_(yGc!}HuH0=C-T6t&nCHl67ox-Ey2b^dCoL<&$9 z+PI`K_(P8t(M_Xh!xu?@4Shpe3r!AT5*&g7LtmeyEc>;;6A!mdaJSO#Id)s0dTQh_ zZcf=F4u32St(n{TwBmT<&_B2sRgyPr*_z^6v%a6T=W#zM024fLtm}1g$2hm}yTFvM z@0AkHarycqV5b%`UX&A+ENE{CHWD-OUvu>B@g-g1^ma~!%r(E_qS$0>qtMy@` z%jt(=8s^*vOGWPrLE{DyCcNW~>`5S~#y4g=FEPxuOZQsGc#z{Pj3L1vdz?D^Ijz~H zBB0DhX>@US!{}j7KNE6cFw7jko8u=6sEgyu1#EMp{foU|0jQ$ugwp zc;EuOGsJB@bLj+MgL78(6}}YJak&XE=a2p)STl3op!CUBV0gq*L;m!3q&$CIungtu^tHyJ0kgZ z0^+?mw$1`3l5lZPeZQv#O|THjBw4%emhc(~Cv=&5oJTJ8N`RTRXM5;d|6S{ug?v#) z$Srz*Q?LGkkQWfdvWH8HTJx{H=3#w3KFBzW!LAYQglqG-uLuu{P6M)Dw?@6Xz7GsM zPt-I5M<}8JWj`IFvbo!=!MSZ7SRC~8W0ci`waMjXS5I$!m;w|$0Q&~nkzYT#0OL-K z8qGVfWCC?Jf74@K=GoHZy*pGNu?c?UTfL@TDJg&w_l$Vu<9*L-48N)uAc>{8md+@o zS_WFK8`c_>smDZ}5E4@}DMDsqV7BPqI=8r@>8lP2;!3%}wn^N0{eUT`BG-{`OrYuP zSNefT^JIBg2H&Q<)7I+Tg6}t5K7qO>DgE=33G%*TY!>K%XubTT3z#acdO_iGW?Xpf zqyhd0V=YY|mB3pFW@o;!L^1DQ@l9ryS&>v-{oB-gR$bPd;yiVZ?YJ#2Dmw;vaj(A3 zq--Pnh=a_`Bu_N;E}1-`K*cTuR0iGn&?5lG;Ipkab3ZPKT%<`<=!D(7^BC{K zzGsrzwHl>5u~!)(o%_6;%qVNoOQbaFSWJ@>n6Nzijq4 zWb;wyp|OK698IeE4tp~W`B$_Jq8OaVtF0g9$|}}A1hAgW1Rne-*VLbGMs?;Gb7v`MgU*S#UL>n&8k zY*9OTm44CG1Qf0Aa^NOUoaBi|yy67SYPN1Sw4DNzF5d3Uk}4 zgr3)xcNuIzi}X%CO0}(XStxWC-%mGb@L@Htqs=ulaU*mpykckHnlxM`byleOLPTAQmwAumLkn9@ z$|MP#s8FC7W?aw(9iwrMIWlswS||?wcEwWT*f&nlZfL(cCX@ z^CzRufoJY$nX>bt_cI(`Wfk>$bMGPd{pP05rmjRLc-^B6&Yq=cW41jM-IN>KYBggf zsL}jkawq|2&Nl?}O|_D351!MACrXhfEQRFy*&2EUTF=ifLonz3BU4$6>Y3@lgM>U%tyPoAbL`wKKvM zKF7A%9X3=)6_qrxt?!V&J6;tpFAQicMOH<@uK1~7H8|1j$*rDHx7D!M2Hc*KN_nTc z_y2tsz&>8!@n)r4SJ~%%p7lCmtANI)#nTJsisJ{gvpS8pZuRCJEA=c!+)WqX4->Wx z2$_Em!OAltX^Wub+wLdc!Fdh4d#GWYSJp*q5){V_s{g-c#N*rnRr&o>WYUU+?D)XE;549RXVJ75;%Jfr~-utccrJ|WR z>n)#*oho!nukZS~7ouxda+zjpRe2^#ZigO+&G$MCepAU3*R7Yo$pR>`+vsKG+Eq?H zXh4D{eTl?dtD+p0_NI`1Xi=a-L(g8yw6IE1~07zFSL>J#c-Rz*w>uf_g5`JNp&fRiM;u>$VLxfneTV6FD zHent%(xZ7-#=IbS&ja3Am9K@l#tz$?sQ?`knD?Ka({p459%-Kyk|y;&Vo!Ff!1Y3? zg_v-^mVK!e-kv+Hv6D-)CS?lp8a6ed00VR!q4fb1LEwa4<;w2*;P7^||6aVSTO^L= z_!A6emHKwj`Zbt#D*b46x_@{G7FgU3YP?xWe6g*HeQjF7nX^eLv2uNyJQ2nt_Um!| zUgT(ASh3ykLh&3o@6!jMpQ}6;vRs%S6RN!8o8Jb={@APAaiNc*nyPqlO!tCemvo_# zhh`UL9Tw#3e*OIw=@o6(o@)I(NWO>qTmf6NTRiKaX5#K~V00FL1^eN9mS1^97_ob> zx!^d$yt-il@?tL(1{n`hKZ`Uk;pvE=__lhgn;)QIq$RXOW0=pln)`9e2Ii~s>Gl2i z!dBJF?KY5vzq50V#qozq(nPhm%ZualfkB6Qk|CL-`TUbwYTm(tB!Rap}W#h^Vb}H#EM7YDCfhsUJ*pDra8g zd;lyR(p=Mj7%zQuG1p4@2GqhMPw!RMSa0Fp<3>@&d~|sVdLbi-nFe@SCZ*jQt?AZ5gvfX|tDSRiA%mrTWf8I2nGx zNz}|UqDLL)joVf}?NC!~By*6faCkz7VORtU zmC>rWO6lOk_2HPhiCfyO| zKR?82Xdv|{Xvf$f`;SxAauM&i0cOoY=+0Gn2WzDGdb((OMYY4&AG;3z* zWg@ZGEC-Qk)X&rgWn;Gq5r#>F!=&=oc{8p@WYg!}pvwR!FYPtYFll}YmBBis<2$>u z44~2$+>d9x>m*>(3U26u%}p=8;Op9LK=h-&3FrfTFQSnx(5AZH2u2;Y?PsjX_cjN| zk6$W*bdgKKqE3nng_Nxu0u?SZYtwGq&%YL&z#Q6$<~3YX@_#vXK2Za! z4awJYFP`$T3!I+l1w*65gXm&O2!Fd+4O-IuFKtrq0PBM#SJd{3uN?7md1 zwp?8Lfe)mf*;w~;h^@Bo8$WeTZ^(Xwne3E6dLc+$U(3kZPhg}-$IGCsHPiK+U^JtN zD``jJn_27KiKF&=$W|lTabjiSiEH6<6k~6z`|Osr>Q+4P=(ZuI98sAGm#f36s44ZO?uZ??DcwkOeG-#6 zW{L^n7gAB>)#CqJkM{TD-8m*!lDREJkpNLY3h65TbdZ#xd0J)R)qR+x!f9LG=EVI7 zXy$U2uTzVeN&!(K9ayCFJ8avHBFRf$b9So^+9}R186y?P)1D4LyY22n`cfUqa?hp< zG@3QXh7+pmy08XWi49#=Q+`lfkw06b`FGW>^BBn@Q5XHs0{p$=vlmjAz4L3=0 zzpojPLgjAqbX-+7m>)+&<8P>~0;QBr&9)Yt9_XnCE2)9w0#r}r-X`X&Hbq&Y=M1Hc z4Yi#(au9JIC#iu^bC0WbHRUQMt@6JtdJ(4_^usj?qysbZK!`Z))l9xfl87Qx*ku~X zJRy#!^!f4WY_QV6y}MRYqp6zI#IkLxI$viu zTIKp0Z2MldE934o#m3t_Jrnwl(0smMa&6c6YsN*08;3=F6^o6YVxFSz29J9DX3UOd2Xr7u^9yQ z8nnR9HstHcjNwJ@MchU1Y(1F{)D)Sj!r)K24MXUb0b7BSVm2H9HW&1~yo=Ok&=O5X@8 z7;W7$*Uf2s9+y5W?U$;uW0bkM&LMHhTb&}lO)Ze@d_#yxTn>}Ng?{AWQeQ{U8L}FB z5Lmfw3qs>(T36-ic1R9pc)GL-S@%#imOql<#`N=Zake{T5!g%Ns5l|A%{^t1Q#_^c|WVO_pvf%}Lv?77IYfuKtL;FE!BH-2#U zgEwuH1VvV{i*MyBuZxP{yF<{PHf56N785Jru+}UrYRYftL#z!~FM0IjsUrtE-Nxe2 z`#TkI-NV!)cPrt0?Y;owxXlF_-!D!%`ed8~G3zgZoWHwR_*Svp(Im2UXfs%Y^V;1Y zgQKxOm741pq|^vt4pLyuM2$}TER3WmjO9c&q^E` z_h;`|QxP0G&S5;dfSILUIbWo2qx&U>hnuP39je3QiGAYms%Jlj;Cz7W zx_IMm#@SrH1+58SJ~ecFHN4^C{OCFm`3hx(+iz|Q`Clcy#<+asF{&T zeTL>&Qi3Kpw868ad%-x;9RKT`y5}$_&+D%-sEI?IMkDh%)|i+ruwL_6SVIvZ5) zMKSuh9j%c#)Ck{?uCm^k+lwC3mwZt~g0ZMl&dRDvsK~oTzj-SSBh zt>!$m9FlCVJj@KjJula{kSOsP>^3plZmiI}v2FTHuP0cI zGg&$CIwd;{PNSaP0$wqB@HMC4&W4vRae@9uxvGyEo;ah@b!z0}H$PwwVGhme zhMt>@x;Pt!#`+=Wes@DRaXvn-dqVl+384;**2(TxcC01BFK3<$=fq@!9BL-W6b<-B zQ^SvsPD*A_5hCmMo|V3peGL?6Qyp2{{B!w(H7zUgDQ-S4dV33Zh!wZulCQd&1DeWe z>CNs~7B~>89r0T{X1YVnvX6iK%(WRx5L^fIOh=^)dsnZI*f#zw&sX^ra+kEMX^*yY z_3N^Ayx!M~jc=_-@uv5+Zqqcjksd$k<6V%R)-CW9hp6)DYxQ8NFAHbR4`JHAeSKM~ zRrn~n9*eFA*fGjA&d5P(wyxsHu5lf1;vP2sNU;6uebVO`BiKYv9Ysx}PTpRbQ4v>v1yw}Od>WB!Y7p$zxc2pJYESFg`5Lsl!* zjM!@!jw$H^aHht`F$wh~u9W*VZ7v0 zv{40l7F=a~-6`Q*pgc%$&iV_U{>ps!m&p3>;WxQPU~)yr_RXWxNuk2aaWFkgvyX&) zZ^3Q686qvcY2jDZLV~a2-8zl)8JQQ5zaB)l&)@* zHxiW|@fbmV9juVS=kPV{vuqe&pMe_*;iKKPm;SNavt3zW$osy+S;;g`_@;Ko2UDvo zH+P}2;UIEYG51%g&DkdV!I~jNOZC2chKFRJiTAwLfDLyt7kWYxqxF66{z?C0G_Ut& zA#5njrPbpQU<&xL9E+tM4m$P5yxz`0&GEApK60O9M`33delPx}yg^rye!Pw{a;HnB zdBhEoHKQ=o@7*I9E)$Dilo?fd1aE`|0dw2nh6u_Ye6H6@(WOZbXo2TkQ$QJ_d}%Gh zu(k5%(Uc)4)&1t4Tg)R<2OV+U_m5tS;NRgjD~>wCc|=TV$vanko_Wy&_Eu1$ zFO}Ff&Xw9aP;TZQQ{_eE6?|uNn6B0{jm>Pfl6!^fPCzDoaJQ0*+P3>yctl%Z<(GvG zQB^XO8N}&Q1p|WvR~9t&mu9cPrIwc- zfIB_sGfk9C7^&lw)~Ou!tKD)xa;saxD?XhO_37}KhgWp!1+p1-FjMOdutUJ!fCEbW z2L)FZ+SlGPm;67^pstHUPTpUh(sPDNk=S8ebB?f>+E=Npg3Yv^vxOj|S1-fz)|nb> z_9VC#T6vAqchn|IFU=k1Id+_pFA#@s(|iSg5&}H~?)(X`?ui=?X(<*jBi>lu18-FW zk?9rfJ+=qYH-cf+PeyB@m}~l3s7dFF?GkU(GCl-5OhOu%WI%ejw99nA?&p-MI%mA= z_QUloczUPmb*V9f(*d2+Dr>UHt{B&4kQOvoB6P8;yajw-UoqcvoQ;V%cBk^Hmp`u$ zlruQn>FYJDVr3Pja(J;xzpF5&dQ}&cY;Zctz0{w6F;K4X-v4}R@T2(^#ODN-DrY+< z9tq_}Rm~sQ^C#0|rt}$o58hmC1Yf9aR3esebWNNS{r<-gq)lofu@)MaPPFRna43Pk zRY?ylamQfNqsVTh^Y;s?3l07bCA7Ntp=%e;s@!-^QJSA0}@{95_SLwbMNS`->0*jt!w=G;ZCizh-0v_;YV0e{k)mj z?B+O6|d&|7Ysk{IHg;q5z-bal}bA0-fav%*Y#_@9NL%s zCTgV9qCaY!DJ*=}?4RkOVD%x+tHklhRmv`ifGJu$$~Jp}_FSUP#|BgdBb?2RNgARl zHD;#5sr@XHkvZrgcODl0VK3Rv&bzH5j|TSI$OLbdP~0JwN%&nAGr~vYL4fKv`8^S$ z(~o5SmR-Y)!=$hAlOhf=)LUAw+t1Mshn4Wl6LbATZCrYhvsA`3h`BJRUTwbm{J~ib zp{pXX(6hF4`{Fgon6gZX$E;~<1*vx|Kl^dd#HvIXeRB+^XV6S`d8 z$0@4Mlze${$R0|_x<7)m)UmD9H`ij>vJDKc?@K*uSp9S$DlRF9*{FN$a#9y4GLHd~ z_wh1v<2oCJH~o%FQE+PKxs4LF$kfRvkRar2lWUVKvoqcVJ;rg~t*2FfoIBLQk)=Py zn5ZYd==o78@KWgE^&EbxZqGlT;k6%*cF%oW(P@_TJeD$C1s!32mGcFO?GkD$ zNJ_NowBW~#pbz!5ZdQd6H9_G)^}g8)^~l22m^-z;8BNPg1P4EuKpDF46>6E{PXbRK z_~Q5xUE;?zd7ZcH@1G)d*9-zV##+qok&diceZPSnacI2+coQ)UlE zRxXdX+8t_(&bAsCLB6v3z5T>cm@#snYj3WpbWwrHaL<^UE1Kk7?iNY#Q_|;w7Y@&j z9i+NNzJ~9KKh3S98zp03HE=IlFW7k0M-uEqtnDw|pE#W%Vt~7NG3GuSzVj^vC2wS? z+sKQmR1w8D(D&;Oan^s@Ej^etl+4W3V4EMX(_Q^ql)a&EK5&2DDFt*N1cnf%iMPn* zs%?xc2otU-If+@Cev-+7MUk;@8tL!aJh0_YJW)EhuZ(pt&W#Cz-ve`>&52DyEo)3N za4ptgrF+|BeaV%^gQ&)M0x_!AfA*B8B1{&Kk;rROul@ErN^ zUm-aN%l^}diuI8Vh8eV6f9?Jdh>)Bx$S9=*EP{KFD?8o~+`i!)?PInbRNne>b0Tjz z7T9x{_)caFpM&9&PGi9!syyOxR^zS6CnfS#9ZGdb`t5oS10^>(z?`Kv>af?^_f$NTq z^(b-dc9l{JvN*dKZ%|>u?$~uNzP+Ingkf|NZApBIjIu0$v!Xx!J*t}d*_Efr{`TTy z%`iszLkjm`&Y=|c9tU?)^D&R+N|!Fus8ObI!xQQE*4m*OXv9CNf5X#75Mv`D1 zs=AeTOM?|HD=R5q3hn}v`e2=#WBQ$A`O8U$(aUc=T>fNnTgD38t*fdw!=zgAA-)-f zU{~n>#ok*-Rk?NT!>|Eq>5xV`MFi;(Y3Xi|ZlpUl3ep`)OLwO zgdGJ{kr-q!%Vu42W-DP5K#o+J#|u}q*qzwpgwv|v2FFPkZlqxhZ}#)C=P#BCI;4Y@= zC>dSLm|G%T;jj|QN#d+$*>jnT%F~w>ijy>Xkfh}>pK;Tzpbuo+0pUo=)Z~I5KUW|> ztg_{7@Dc*m>8xa}KeQb%rGnj`G1^;n=muFl67cp=Xxe{WWS1*`6G;%<$F_cEef0!F z>nX&#^&Yq!oz7NZIs;QQITW^eX)8?+W{8;PjpM|SCp%C}WFmw7F(TrAvO*%G6rYa?|Hfi(Q_uZK+ z80fnbZS~CJq(m*DL|W4O5~r*0^tx6$abdQ2pqyX3^16D9sOYo;S2nJONt4Wps368S z^&KsLeC&9S18n+P5}ZJNHUSZy6JP6jo0g(sjeb$rXY)S3ac<*2ixOMjY^4g+PJKDc zME|ylWhU0F>DbX^Fp`wNRC-6NMzwka z>uqe5qx$}gLAIH|_M_!W*XZ8CxsA16%au+$Ez&;M$MsEH7DQL&MhvF}ywsn?sw*0+ zhql#tH`mw3=bLy0j+UXFl~y4Ra{(=cJh&QUk7|?Hp`U%*Avn3)0h*TEeGoVM4AtY1 zyKAE3wm@dQ7{Oaz$LB}K`ASijpF4Mj9+xs6sKUfIPKPDK4jgk}YAvjmo>!_RTMOQ9^4bl+?! z`6X0bMGwt8QidB7;aM%QGz0vUIrq0cQ;Y7H=WJAOQufK;nvE#3Kdr!;v2za zQbn8ET`yt8!fG~;-_##2=&iytTVPHnWT8S}YsS2j$8Qk%ysSR0=7tR~RUQ`@2t-Ju zV7x+E`Y3q_l!RmziKs8pwmi(@*UXA7Vb9=H9TQ`p(!%n*Mo_S|BEk;rfZVQ#e70`M zBpqOWRB;&~Z+NUo=k)eVXxG`wqgj5mFxbr2KYa&1nKkhlh5)-Nhiv;aWSxMsnucu~ za;?_9R09~dlU>Iur@OCJQGhfR=rFn}BwXF5?=!?vea;9%s0H0-CZtP^ zCj=%I=8e1Ep|H{(PYAjiW2pK$-hzoWddsFWzgT^KSaef9Xd6Gp)wzT1Pn`Cw->yLT zOA;OMb{$78r^sif!c3RM$IP!yKBG*t$?P!xVU%pZ`_~6c-FXmU{9ZMhBeHV1S+OM|j}=__1CHh;5P^$bEhK zAAkLOe!%GxEF}ZsLwLZ!`^(_}{5C|M?1EY|{`7dYw>;m(fs-TZcO!rEBpxhy1TC?z z!s*gD$nx@E+9!)(1;X^wlXr6d?Mf;(g|xc>7XpwS|9V}*A{F4-_dOLH1_C;NGss`m zH^)*N8`(4#j4t{wO0R*cJLn$q{{q2Y3rLIoU(ueRSZ19O>!GS>Dh=27%G*El#~-;z z@EHP-+%phsuPkc=aKHdt4d@PfZh?;g?V=+JDg>w*7;}_13=?EP`rKqoIoTA|?Vx5c z#tzg!CvWrarFJt(WuePKPj`t$Z68WV5%opYD(7L{CKQsvt%avO{A2Nyk04SB7eQJt zeH6t)(Tk1Yz6Z5ocU}V3K<@5EAK>r!YZNYEJ+mHKSS@IH+;l%&c)_gp zsU^089uOKaE#NJF{_-IX^F~xnw61c0`HK5ME=|1P;t*-;z@KW5?nk`02=mV^kIF;> ziVip8q(vGpi&O@HDrg0HP5MCKEHeYr3G_3yvLw)4^QpJi(MEwZA?OA-?G@lo>N$`K z0Nv;(hXNj4d8b>&ZHZH`-~rIhpG%y`BbqG2M03l-es=S;k=ONeex3E&JGRj{?)!38 z-)qkD2g!DjT@E5}%DmTFYvW@MfICxBL3N%UhN&gw1uo3JWsE7)ehR>+eaMv8ml!ib zNwU5_t2figL3#D7rUCQ}68Qi#XIf9PjrN_|)R@oJyXq2{KO@G-)gY^}3HebK=215m zYHu#+o+bitBJ1kps}kO@VlFs(^3e>qCiOg_>-_WR4R= zsPaZs5&!_j+fk-Oe>YTj4t4h1v~rJm*{7j-6rgu-mZ_zl6w#t5u^*9ZdPAPVaA?Te zOt{@ewMQ|H9T4Th$Q5n5t%ZGL_F@ljjB%RvwubPnw8#@6T;~X@V0(>VefJMVv2xB} zB^~=w?N}U2b>Lhns>XVF)5&qjYO(Kq5#;V+jNxz%`*DFrOG;R!=hF|nc(;X%#dXU% ze0O+DAs-h^_t*oEGi4Am)mp3zOFd^!9LqfU0|O1zF>C07lz%Waa^~VC2HWZ%YTtko z)fINJS!MMnQ9w}{wd*Ezv-OGiXqC#q?&ZFOJ8;8jOF)bB&b>(*k6mu!*qJW)#xD=l zV^!xX95YuNfY5goP!@O6-IS8~er3$hK%L~9+y`sGH6=Gbmby5nK>C`G z-bJF5rnJfJ+M$h^W}e=p+lyxs7{8amI^CAQ_E6<7ZE(enVZ;{#Mz$5m>AF^c38?q$ zT>0aivBbxm$KTUM*-m|j%@DHlHe1SULd8218gfDv_Zl8|zm00Ne031zvAaJ0E9J76Lb^-p4HUoIEj` zuIV`0XQK==GW*;+3Ly4xFh=wqihYdv19uBE1K<(2r6rhvoTr=o7+AczF+m(d?)q|k zfA40lM%v5erdn7vzL#81vV|W~jb_T7G)Ysu&81{MCmgDK^`875QKdmkQzGd+-Nbwu zm;6`;D?+Ap%gOWUc=qkSt?BCU9CBO6XYDH4VL${{3_O}5;G6x2T}-BMTZ6qqS@8k+ zjN#b&Fj4>m6csqXK$o|`&ga})+R$eX0dkj)5GAt>>lFu*d^}gTD^l-0hxl$|nMtNL zond&=ABAeHw3b{wM}E@7X63$-Kno4gH-VI9xAp^-;7$pcUFB?6RvST3APl5TK#Ei@ z|A7Z05nc>nAX2=|l=vPTuAGCV9I4!o)|t3z*z3FB?!6Mct9_V=dZ51T9>h---SjCh z^;Z@CG(Xu}PjbJke1N3fJ*PDyt`@?YCzR^ z!%4`b-9>{n@!02ZBMlDdPkgLyfxLP# zM<^Q+7G@JfW1_zP163m|slp|wvc?w0w3SIR5{sIJZL5!X^Q3#jy}H+PlJK=h9m9x; z;Z%YdQw@Mi5j*mn0ECbU;J9?*b8xRkKWr<*zqJGfHQ;Jj#W&;7)OU?LeF3DZ2;LRr z;A_=M#(<)#PvF+A_k@i)1s0DctTt(IkN~U`#B#mW`5+hXJnv{rf38{Ww?41nsPSY; zA5^^OG152TfesSl>--?t!t8a|^pXGc32OKI^T66A;TE{XRTxNdzd5k)Fn++em zBvi~Sic(Md87U_(qvk0|+->AMMH;f~#22>>oU9eC-o1Asa6hp|o1#FMBZjzVGXScK zl^+QR94i2*7Z^4!gESZ~qlmxFT>$`f5buq%)Z>ji}bujJ?nQ#Ys zpGWalA{|yk!H2^K6D`Y8YH^W5Ka?$iEgnb*V;hYqFB@ z0RShNCYq|6Bi~oTJUOU;Ufz%>16@3>CDLaAsYt0rYpg~JD2T6|9k=g@$6^=c5`&42 zK92PySJ&htuiA%a->X$k^_|HFSH7;Y`7mwtxn81pr%sxIbBfnV$Xv6=oCwiDZ9XA2 z=Je{)!ECodjc-V?H9E!?OoDcVPp0jtDR7U%r{y`H*Lqq1bS`A>>HFPTdO%6{vd7OL zOXGlu`tn=!RRC)y0D$apvlJhsIhLdPR+|bQ#|1V;RSjvD4LP(g3sYqot@!|`>5iTC znATP};6|BYb7De~BSdvt$tGinM?rO!Q(8%H+@dnG$@&~h9B!%qx(_-+2+R#jc@<0N zklFih-h>|LHZXu*;O3TrCT^Zxhr{ys=IZU?7Vs?{)?Oh)qveKG-`#yc*!@cn!c4$v zj=B87zdBe+fIK3C19B?w5D!0&QJdcNPY`47&D9pJFpWB)sp)hO`03|uOKpKj;h zIfC7Q-`g+$@8pq3b`VZ}cWc+L)cv0W_gK%QiM?8QL`02}x_?>^7 zJAe}K37)b;^UK09cRLWs|7V#7pb46mvl|g6ZT!nGKZ=97dC4cc7b}?mO|}HY`!o+T zp+QTizI^^~zs;4twICCrrKtmKxSoF9 zd!8xF|GGPv$V&l6Q5{s~YXmreePjmdws7DdX$6i!bYVzBIvqAF$Y>?|SM7J3%b+1^ z_ix${TFl2M^X31B_C8`*IVDUVG|g$H6u61Lwx{@6Nd32D1+X&!-yGY<@8!fM%A zGdirap79u~7x}mF9sgh~2vWdp0F~Pm!9(x=RRHLKt|e=De_$#6GXtbO1_Rt;eFqS~ z<}9}r9se){C{X0B_>GC~|F;pe05PBS{}t^y2q$P#Kk_y@a*YB|;{K612$io|ZW)D3 z_4r7XG-6cBo1Uor7Xk6he`%i#y$CQCP5F9I|CafFd_e^MnrP|B zD!a09ka(puo+$Y*f>ln~hlVmZjpZK^=x;?yI>rKGk13Iuy}IK&jthVCKgpIf3DD%Y z=2_TqCTffM9}hXmY(bN|>VXAnR}3GpqK-L?QU7J8nYU@-<{W%D|9}I0JJqyn&?k<` z_qRfC>;JG`0jyU9anb+c9?u0}8$0}0*s=d@dk(cW-=Sztp~3fT67EMgj(o7;(Vm*h z4nDhLWjlMb#d8_3rPWvuXD*2GN7ne$22r2{)Qboq54*DV!D@!cl=Z}gn@qB^#EoMY z*X}*bn3lG_!C0r=F7Oy%`t*`-3+J2)rkdTP`eK!*jtLLVBa0oZVwmmZ`-zUSSa!;A8mb9lP-K039Vwn=A(Cc)`pKl)GEjO=eyMmw9 z8}?-4Tu?60fAt`$=`(nEo2>gy2M5GM&C4!U@tt{LmTX5d4FEXr8>h+BM@@?zV%ALK ziOli+*F2LqL5D^Vv=d_!iwlc_n2L2d>|X?;#m;RXYMtgIU5Iaa#Rsd! z(i?KK5xvYr-d&~jlxzM;W>R}G4#!&M-bp1&m9U@sBjnpbkU9X=7RpbS_jXmKrR6iM z{}q)RxtHZvKfYa;;c+>jWW}dOZN9d&GD}-U*{GMkb@4+J(1r z6;I90Z&vGt&Xdk252bOPQ|QUFDpc-)cxhcvwQ+5T$a?cN4csB?ulvITqM_#H7c1Fm zkwNt7>^B4jQbu$+ zi18}X#WKGq8%Fo$0an`KW4w?DM4xmy=~oTLy^klR+7!bp-FH9j+9Dry_IDY%oGq1K zGoW?mF9Y*Xdnb?oQ_m~}03=j7r#vH2+G^ej6$#qx8D^*~fz!=#CH=Vm3~6=0AtP&~ zUb5>HSIPhuB!Tm}RAVgHnPm+tMx{+6VABz zhG>8GkyZxs5Hl;}u(0q1Uzc1zVZbOx>!FGl$eq`f=`E<-^%U0>t>d=Qikd5I-ci*c zh6FpC$J%vjMh^b~02D-m1BK6J%B39_Z1qi9)R-QI-jG{NkmNXHhVgFWj`i%V($bI;B2UdE6as~cmZlI$BScFh!+cg|$y=gahP$h9UkFJu& ziwU#DfP{<`-81ZZntgMX$9{=^^goonAUiORJOHZePx2O!3BVcVRNTN3*7~fH9Me!D z9r6QBIhGWRp91gOCKlv_I}1~g)js$$z_^AG*@e&YL6c6;brtNB`^e{VpER}wYD>PJ z?NRW(z3F^=9}kzm3Wq=3eC^0^KUF$@Jzl2Bsnw`QbXfiV(3YjC>+OXGxH{Y! zcPlf<5rNT_At~wv@Q@!%wWKQI6CR|-Bo{cpW^N1NkdExx_GV6T&StJqI^@yN z#oL_cR4+G_Yv`K@co6>eOpFcX7-zfdz&csQ;=r)SXlK-n<2t@_a$%Hi{=Bp9jh|1NIWS+ z%8droe8K1{Y!w$KwwBxb0PiVg?PV|Gv$C_7^D&OPx{8d!zP68aAF994X_jRYh4J#5 z*15fzrp}c~kH@GjQDkZ^ceAER*rexXXwJ1|U0(U>roY?WX)#h-p11zcNulnRW2*6_$A`OL#u1|Hyf+Wmch|GPsT>EI0s}8 z=IgYYza&XH)8uWeODA<{3_`f9o9mPWPR&kgMh2*97L5`-$3_Wj=lG^JtVizktJ#m| zT_#{mAVEozAr$k?2&4@@TTuf{p|7BknS?> zr_;m%R+F7NR>;nyj*k;+dr;8mw=XaY$dyapXXs!I3$UsXI`Ew3<{cVad}M9DVb}xB@M+r zB1v?#_vK!4)o8ySlC$q4GJkWLv?Q=F0+VNdzLvCfax-pQe_3{gEk*_f`td>!^??Q( zR)%2o(t_Jz6wq`c3*m{JI@IWy>LdN`q?&EM*?7+QY=;n|I@F$d@+4kO@Zg#Gl*UsF zlDQ)Ia0mWHdR)P{xn5V2?N`ozEOG8FO>tvyZ%ADiY>=GOy`NANU*M13tV__yh}}fT zf|H+-elff}d2BV+Mtt}h)0n)Tv*=)V|9G>9o(>hoM7{Czjc{>yprP`p-0{);M=?$7 zY-#WLd-!w5gKQY(=t3$n|hs+Lt}NVGLzUT|{c=5A1NC-cdefX7e> z9v;&u>3ToHm_CpgLrvxm>tz}oi2^^1Skg`#M_^tXOS&0jMDYwdVbj6EX0f?s9hfWk z>O^6P-s1|{eW*^G#s!hx!eESGbM=;_6XM#ZzuW-ZbB^XvnkH5-&T&b>%RlQOzTCtU zH?Hc}N|)kOrJnc9zE{moeOC|)_Vz`F3BDxAcylQ#*46>GbM&~sg0_9Le=K#B6=dsi ze=DQ*awOi@9}|g^-AVA6T(5Oac=kXIe?6Q!`Rak<1pm;1a68n^ia0+N<57L^Oc!jIQ2jP;Q#Q>(nUzc{y&U$LEn6Xdq4X_-$ygzDIHtG4cigRf0^dqgdSm*Qxn4;F}i z5)leE9jnyl`Ir~{wP7igDktoRCIhn`k7l`k9m%TtRjMLd&8J^#cjn$_wt7M{T2LxG zT{R5}M85t^Tu5++f;>_zj!kKBt(@%L=O{3rn%WqD#cViMT5?`eHdLC|RUWacOT@wV zE>T3Roa{(mhR$(^BHQ!z;mT|z9e1#^sS`sY2G@9iS^WOaM>wJ;#br)w!9Ifi*FAn; ztj|2o2e2Vw2i>bzbVem9UfR7wK)pm#U^V;IPT%A9u(_H5v~PP{P7U_JEJXi;Fj=y1 zb?HoaBR;SRyOSnwMq;jX{b=YBpX>sY?S{$QT6o0=u>zvmz3P0P+_D_~D|Ihfy~(34 zl=eNu4(c*9d&r;=e7?T~KViOM&+?xU@Q;vrooPC`V%;Ijkb$9`x)OLpuwxhmCGH5x z^=lHWOjAFW`cfO_C%;?%e#lwfW1yL7TD@w6^8`Y<<{0&Wc)H0S;Bp@Je(wqAflYZK zqj68I)gB$2(|!!t-mr$7QrIgi^D<4&=6#RY?IA9D@yT36sbcesfuz+n&%DVKui~|= zhDQhL^X&Wh4rbmegb=d$0_PS-`F1z73h$SNt78q(_9$!Q^$#XHGpd6XocvX!>wPy? z`_aT{!12nh04N1B|Ii9f>pJ|5&eOWau*La9&vOaN;HI{EN|R4<=RB2c4LMJX35K4V z*b`hk`&msBxnQjAa~DK-zO*cO)gDOyX@ql-)<($B!8oFEz)GyzO#t<n2_P#rbQG0r!W~M#kE`_gsqmn;_+bAE#$HVZP}F zT4Rta$E{kBSyAck+YeM?f$^jZkyWURZg@Vjp6aeBHzYNHmFt>mn*?@e2~{}HFw9L# z&yL;Vah+M24_OFVD?)R*Jmem#+1rtD!VNUHA{o-8NVdO(|0(mKKo7C@kniaaLiKcr zE<~shB3Qe{ZK*`dESl~xlC(A}@5EN?PR~7cI;8pBxj16w1*Ga?ux+PCw(u_*B=z5L z73e-I)Z~>2DlMwqs^4(Uj-OK?JthoHqR>$FWN>X$NuGtwqHfueF zOsdE{hvLHr(uISw%io=dc}!@9%V?X}<7-3pUwJ~{(s9FhY_bR|x1Bi))R@YQ@HR^y z3viVdl42%9RMWdoT`${Z)Sx%PebeX8++%}YAMqO8F1WUwAKV-tX<@m?&Gt3-$31M` z-9Jq#O~?%@=h~vG=4q6%bPXu#PUDrZj6yjhLC$O%^q66FTUvvtyEiTx>2j=zqa|#Z z34A%cS=+Dp>Xe^+XcVsdQ9@=WRmIW$(^g$&^9sjkY0%hpDe6dm!JTMrCscM0CUfNI zZPsT`q^gQEB}R6ewDi7?<1~L)s2zf7FFI+nn?K3#`g@S1iW7&PWU=rEx)H+p+fQ}o zTQ}pr^PVHAJ?=i@1*hn8uZixqIv~Z6h3(rNq(=-NOeWQPKdcE^({$lC8cTY8nYFZk z82}Aj`ElOIyIONO@df7hBCcc6N~ZdUH_B8m#bdiLqczoF zzUZ+&8422u7@w*Ni*p-psRcbxX}apq1T0RFS3V3nl=_wjuj4s+XrT=N#x1lB|ps1+b9av!;)oe~np+8YZQ zDSB;nle8OXHp;TAEQU{a?dYxX%(0?b)?!C{1s1sXJkeb9c0KgJR`Yp1`KFAK5(VFd z$V+)=WX!?&vG7N}C)j-U+ynDiMUsG7@4m+u_KyOrIwpV^a_!@BjS*{ z3tofdb+z_p$eQW^D!Qm76v@!U&^gl3fL8x=5e7`6pRy1N;e#j?sOPiXs+`c{qAxnD ze4|?L`Lh%z2L!USJIerLQq@Z6r6FP^+YK3+I6K5|Mw5!8jEvpXc_&=MF9~C;kDCV_ z@2hX4&mydgvbH1Tw9uJ_9+!sNfZ^2)4~%}5yL#5VsQ}jt!|6@aW~O}IcE1-zJT$v| z9W7`=vuizUfd}D{U4)hZg2FVFTn^(UHWZ zHouvXrZG5#wf48YB&dg9qD_kOZ?)5KXpD-qGu6d3v}|WWuH~ z`jQAKJg|%ei(Bc1w0Q>8ue^;l!F=~|WSkg_&JVN2pU(=qpEPcfRNs{Iy=qlOu@O)W zyU?8q_4Dy+O|;^W8;K*OBOf5;p+RIP5mwK)vFWZ{qCe>|&NK-|C!0riNYO_n__&(b_`?Ob# zb9L2J{GK+-p6S#9aY{rkT5Z(44)vsSj8ySVsx_Y_MCFv_iw$$^q|$VP*Xaso`zzE? z)T!@&~92%f&%DOkk_PSp}WS`)3(IIzrPURfSKi8OdS*G3Sw^#BT0M@ z&!fT2*VJQow<$enjZtQ#V!0zde4nVz+v7Tr*Jr+0YBzxd7Zc=L;?pddL&ych;Z?{YE zwu)9=RcG5n2Dz)sq6#oofR#Wi z==wBTmF{(~l>gNDS^a}}d} zfU%b?yYO88p zivRi{yCabK{Bok9IWGThbzM=zo|QZP>#G*LIeM$1*NZ+ZzF9 z69-Io?^X#{j*L`LFWPCLd5AhGA$ReZ#dc)#UK`Eg6>)PZ85>wxT+GHlMaf|276N~> z#IQkNU;KX69#KC`=NiiZ>oS54pc0wR-<_D4NP)!Oijz7UM7&vwxHi zpK4dB?G4k~epCAA6MY@Vi}WFZx~^%W<@{v;rT(|s8->L}7Ex&Gmg_m+KYMLNJ`4)U~W2)D5bQ`HLBse zF666bHH(g!X!2h zZ^5jrAhF?gtlH#3SprEkJw=m~-VniITK7hMx>Qy858ao5{}3?cQx5B@4DVu`)1N&x zep&y(8UGF&b;EbB4Jl6WI$XZrJL4r`GkO<|f})})UGK)3uz4KB0GrOpPBoENybXPI z6%o_;Z6}<;sUCq5Vz7Q?()b6T!g%;Ik^O@QdSRHz&wu!UfbtPU=vPx4xuZWGlW+zX zm1L@UhqIdzT!dUbqH@7iVB#6&EZp=#D7iQVq&BXs#D2`tfVF27-t4BpevE)Cd<9-Q z*oDsN%hI}W@anLBH#A9yFzykV;i?ulvl)K|nm$ofs`+A@7?6%52>vLGk0~d!@*aw^8epUQeJ@?2<*!)Cy=a zd>U3G&3jy@Xl(zD!+20A*ttMII1B@@~!r%V7Zy7qo2MT%R|_eP+{*SN6Cn+JanB?vn$d%ThU%XhG<|0}cx`dIL(A=65Y! z6&qf>xq&5EZF{)F_>^tCHDe`N2yt!EgWG@qg^-gDDyH~l;36;F(vxV-JZL+2%ik@mzodh=H>U1 z;4%jhJnK-8j)^{L#~5kumXvOu+`#~JIBEH={+v;b@3uPgny$+Er#5rmXFF>i)UY4+ z+|`$7tJcvXpOacJBkNBnUd`a8n^^Ad7d#3j;o&tD{vzsf-&U*Wh)Q2Gl)sslB&$J} z1IMIhM#f13Y1@{}wog(OPMSWe?`0{Mowr7VPztVagNa>cKegt>S$zoteu5=wChE6g zN8AI|aefoXI82>1hyg}69{1tsUpIg$=#C%Z8|?!l5e_>~wya?($jX zgUEHgI#dSv&W$)NrYTwgp)$cTDVC%OA>SG%!aYNZTXftrS#J` zF2z*bwy?M4v zibL4eqPz1Bh?WZhbFO%O1Su-|RA!ye7#<0OzWHN-5~Fnhp^M|%A#091=^htCO%vj* z%E>Jm zmrtGc94$Y^3u$anr%?Mab!z-|1J9A=cAJ;^8%V-xd~;4wJ!<$kJ&OWQxEq1hspaS> zI&+dB|6J<1+UlOlo-W|qGu-j_%HME6dh{W2Rk;&DyQ zJ3cfm;+`Nd*w(euw9I6IuN))LVYHu5KL%4>8Nehx8yZU4KWebD;aYGZR^%}Lj%{mW ziEU_atjAOcZS6-#mq)$y@Om$Hd=#wq`N(#b=cwUJic!+M zkS=~7f$*%`P0N~1Z^jo{(&1~|IXa1Ym~zfnlJq1w*YR%K=I@~=F3k{0`PWnEn^V*B z3x*U2Gl*4WQzH%}J51?vNb{Yp`iQv1J;&79^f%r{V#bZdSM#wesKEw>|D3#L|6eB` zAb2_9S~`wRvb(~IwkuXpBqR|dWbbCcbb zLnud*ai(`XxCO<=;2h5!nxbwRCPdiq@pv_sQ)j@2^~sb_mgbAk&uNqdynsYD%XBDI zdtC$Q?HmwXxp0?!d@&lv!wIs9${XsPg_^Smv6q3OQq5Dlqze=qvZ7KDrnMX997hAe z07f^yrtry8SlpKKYZ+z55`>0Tx4WtITB*zK4`HgG%u4M~i;|J<4tX%z6Yk~0`_Frd zw6f4!y!yVl-Kd~*R?&x-pqCFcBR^~bIW+`8u?8tga6)h^1asMNq2=1d)qa#7%+ zMKJmcIgZ_X`3YWV@f_m0j?j+Gp5Z**92K>@M&_R@-k3hghz?qo9>+-9mS+6QJ+RME z1-Y|~K3m|mL&1oB35xG{pcSi~JqB|vRF&fy^>y+pMOgJ_-3k|K=*xje=hA}>8>WQ5 z_7_=nnH?Zbi*Y+fG=gh)Qie)#f%Ve|cXm5%{~^NR=Zq_llz&_>)To3?_2 z)-@k>AT*Rt9tX24--XXH|HE$@Adgkd<+~ytfkiU-9^AbZK5ta!!eud4nP^H3-BQSC z$d_&3Z8%>=b{*;-hpzSHFLMRDHnc$Sr)-ASxZ-P)xZ*d~V=J4_S;pgz)k$`B6T8^O zK7HT%!1CC=0R%1A3};HZx{3h*O-xsK7Jt~YYQ%wRjtZ!KMva;ln@94PrrGC8lJQm4 ztLHcP^mOjAaPN*8kFXs{De4~abR*i5S;*A*1xLQuv$Hv@Qc}0W&%w(is1}Hgnfa2b zF(G<9DTvnyhQBX-c4UOnVRWHuB<#O?MRs~)~Mee=Q;l_s#n&w z?)}-Kx&ujUT#A^}1TL=3xZNnK)Q zmHOS17)xyXi&*H#pv`<3v~J@-Uj$eZAPrZ3ehImPq~pO(-Y0`?-Ce%>hx*d-BI{_d z@hR*|v4L^<%%Qv~x&!w7?XIeZABmm=puVks{ttdS_=0m7r8e@xN$<%dLPM8kX!NGh z%DeYC^HUGK+S-Z}4gs9IIbxSad=<4%tLEd|l4|2El@fpu>?fh3_r` z#k4~A)ywx>r;5RzVo|=5grUnUUe7bFnx8N+=}Kdg#!I9BP~GO!Z@o@&VA_@5Qt*CP z-8&fF9p9*se@g|Hhas}Qb#L|tI(fl?ZrJt-RTZSmu3^*d7-~3!E_3CVZQ?*AX33k1 zm%rHe_bCDbSY6S)`)WxKUEII4ck@}1P#c{R$C|zdyNT=Y`>m;7bG7aGXn%W{&yRO; zE1Sit?0GS*Pwg^w-*V1Rdmv5ICv~~HJClk^wjfq=xP7D~-`pr(vE#+SmsE-fd*f8F zt1+7-roP~Er%Q9a9(U~NLFL!#+}1}#gT~KKj+548lV(fvjGBC$tdfNoU1gXEgt~sp zynIySi&<3^6h))3z3*ze?g`n%w@^>stu}b_0ni5N`n?Yb4`H)66bjeJ2kO9U42I-f z7J`zklP9UBHhph-cSlC>`m!%M>=z5Ld4IWutWN-CFC{0#j@BpHu*A&5qyjH_#OuuB zJ><<%=CjL-D@-C%RgsE{Lb6=9zRSZk7}$_##hp{eOe>z4iondY$%`vGXxNOal0zV; z!_!j2iaU#=Kb*}MHKUBG66o4)QQ1s>WOVNTuFYcCoJdh?McT5n?jbG(iLtR(ewB~ zy^%qxS-Fs^#%h7~#o&uCak$bg$wntk(A!wccq$K;PIC-N@N!^qdJ0(KD>2~C%|Z>A zXLFV#G5jUHIAOn^gm{2D06lXMDpOMuQ?P_pH#<@keVq$?oalR5>2l7=|0zG>l1(qk zNAPzaPXdQ@mp`T_Tbs*!z-lX1N-o_E+D4mva`WLvHhcTy%> zJZ1hWMAe993~^uTDeu}3;#6TbFQS>w&xy-c&3%4dzfMU=7lyB3Q!V?&gQi@%3%bF+ z^g-8jAMu*gi z|Krip*v)+xuN@*Hmt&|6rHhBw8T5DP%}t-3+n`QVf#C{ovXu|#`cO=lqj13<4FZQ{ z{dteW)}3`9vPi{pwcYx;f)CMK@UxbREH4OW_c5tu-+hEYg}YzA0R6CT zI)up*mkI%*JSFUUz~TTF|H8_I@9>RO6PBw?=k4DE2tSStfx-AlLK$Gk-U<7zU=*n2 z_^xz`Itu9WJDQe!%;J?4pZ?Hk=(H%HNg@-YN4xkMC6xYb5wO44=XZ18<%r-rbbRhu zc9qH3a7t?{Py#N$dlggwoHlBaS5UtV?O$0B-yh_JVS5V+9*c~$t1iV+BT#zEEkOUY z-O&92?I%x5j`FvCW5a+kRAm;;b=C^I+A{G!G*NXRAl&`g z?q7xgyalfN1;QTNo5B#%cNgcu3X*nbm7feh5tTFEB)yZaP>@6h|I*1pi7g?1>{d|5 z(s}#N%u~hLt}TJt_pmN z*8JjY?#UBpb^OUmyuoT!`-DS}>%H%R+uOh3SFnX|d#PYR5>$>vzlHZNDgN`pFwn;+ z`AOELDqm5`vpuBA${0%vI&~&gMi-(FZN0XaJSio_eclZ`!v}by)rsmcu5d*OjT7D& z{J&QRJyh_LhcaU&Snl^{|ASoj$n`=OEJaJXRqU>FGubDiprA8#Nct|gktuGn+?W1v za<4!R1Sse$(Q$uAHUC1LAh3^>_lL108AtnT6_ha%;$L*`$K)&}>b0a6C;r=i79@j# zCCH6bE}LsTA>+t-tBKXsn3#|l?!&?=?Hh3C=Xx(t_z*|&Q~rUXKZ6QCXbkqCDouO4 zvF?_h(Il)KJ$Rq~oNZ$H#Wi97_=-QSXUjjWzD$$ID&ct+nToKW=Gdbl4J8=^-gUwaOaqWm4jC zFChf|IKI54yxy&cO`UsR`e!%4`uq9N%>@eI&Cs|O8`K?)y$km{PNMb0NDdCe08%4FrJ?9Gl?gb1uhdV< z{prU)KMaTk2wt`*tpBIs{{3X=p%Ezm|I)gkdSM<{;AG8*RBDCqR>6NH;U7QjMg~2? z9j}bF|3l#YkKg~pGc7A)LU2Zb}W6K{-G5^SQe|;8gfF+9viKkKc75Vsw4uG%Z zfIXYn^8car#TxQ^Hs=up|49=G-Um$xum|-%8=~&xe*032vRrJ>)|ns3JA%6gxCu@y zEz;7RCQ^Bb9pV$7nv=ciJ!~_iCI5n5#P8QuJct9>&vn{?mh(ps%jD2f39HXD-gT3q z^8k;XVc3Yc#NcN)3X)IZwL~A!3gSwijL#pWECShPY4`|AdqS_8wB*lue8Wr9WO!Fj zS4?#Ezbso-IxT@TUZ~qY^b!0mTJ7S2-s+|w)86~h8}=Qb3tUm2bX+oY(%as;FzIk8 zz=U`y5>yU^s-7TZioEi|QU2omddIVf9kzU36N1r3-}fpp2iX#8rgWXtFpkVJAn5_7 z%7U`D*IEg18H7%Tm3mkd28zG4_6))^qQP%~ct+iYzW*@KQEmFSH_;CyXg|G|) zc9Z=(9JxC`b~vpP=nWgwXq-snEx^nhHfciQ`?x&db!3LT_+y677e#6K!x_HJJRW4y z_eQ$%8DH;}eYQaxZLUS^2EEcrUMWeUM)@kaCcPo%o~TjiM{519xuGg$e9JURFzbkN zz~#iba^RYEAf>#fHt^lauPr}GhTaU^O52uD9n_yN>@~9*2=i0qfPHCvBu0E~^p((I zhpB)^haPerd92#~vTH^umymlz+BypQvV6u7jy>csxrNvp>cJOHf-6tRUimO8aXRY@ za9e2UzQ378Wjj4~bW(iQS2Y?OmPts;6K>68V<3LC_#Uk9iLc ztoA*0m95Z5u!8@ZEP@a8Rm(V>&+ZO*hTLb+D0>PNo&ENHsE1&h9)jL*rCtIi&ci2y zO$AiRsWj!CXsgKX1=F6w>ZznZJwv|-)N%obkRfpog}^?xK<@-%At-`mLkMa1Zv*eN zXPQydQlO#Js@<;E=WLOVVJb2L_@v~>aTg4s>#vue!X!H#qMz~tJugy`?MA5Mg=X0B zy-!lHoO_fa$s-^-f|sHdv!VH1CEbM-c9aMf6CId3tlk1DkR9bC$ZDJ>&MbD@7X>O< z-$!rl=$O7_wG(ShmYJ;djRusR5Ud5kV8XerfO zZ`k?rnWnSk$^Eitkok9!NbC3eC9xGO(wWK78KB?92H?9u4-uBxq1Uxe?pGHLbDQ4G*gpMQ+(*qNI1C- zkIDrTaK@m(TI6I22+OmJF=BJ<2ZMBm6V@tileX91@gWr-inDWrUf+sOupz8g1?IsH zdJ&H0#B^0huxp0l-V0(_Hc-T{so;x*oLdr&_&+UsLdF5C&19=nOQsn-^nGr?*TDy; zC$gcw1Y2}G)%)tx*IiDH@lsLIh$V_4s z2DV%VD!Gp>5h^;{%=al9F!Ws5Amd>+dh}Ne9-zmwWU(LarnK3ZkD|kti?nFRtS{TB za~Ky8bsjqQORAN2SP-$X0-Q;k*==)LKtN;sC%s_RM zz#;`hamSlg(ojo}`1sQT1kXdKO^_H-N+YxUf?0bT()zUATdv^V`-b07Q+5=P$z+?b z%fuAB1g>il4em$-t%btxs%ST0KwM9tjH&F>@5u%5QUMc6c}x`K=>s%givuVFlv@sm z%LN1;G_EgT={vj?GB(Byvm_`{j!p~;7z1GC*!*!30l6DtpbDGZhu{pN zmZlICLI+64iGY2SX-4D|A^HWqbe%G<|I5>QJeg!UEa1B&suyPwFuMeO+*~HeZ)IBb zRPGa-2LNes5g=d}iQkU31sd@57FD`+0U@z!GyQIvEP@8;F65)!8@5m=#3#Vy7VpcI zMrYeX3vv4i>7^?PtIU1#Pkqcl85dQJ7D>+G>zAx?{J{}l#PYB?ITS&-b?~OL5 z`)s_*2MmT~dhm&}^kzMc>i?tdt%Ks~o^|0s2oi!PkU(%5fF%Bc#aiGIsI^dm z6mt0chet>w*2hQldWCKd#KP#}+I4n;u$aCa9@hh-ZW%o7K# zujrVAH94`tVe8e#MFHfKh>}7yOboyoDj%FqM;6|L^wf6vU7XiXi%IfN>Y;aSMK3ZZ z;iR>|l0JXoRR!(yg31XhkpT8K{gnb5b{7g_k$T{ItTm-sWxI>``)DFVB(Gka5+gdN zv6mC%xm01o*es%tQlvgYBA5U#UaWani)XGdMT$`*B29m|qRaY`5-Ad!)ORwF%-dk> z+Eo+;V{?ep8^7(#AuU>J)D=#IRqc;l(`9-p6iwEW(IiMm-yZc!uBdzes8Of(#F14w zq&%)b0}L$(j|hP9KBNE&`JOeww5YudoAsw1tKE>OoOn zwTJW`B|m_uIZshdS39QQs!atO>@MVoEi6dv^nbg}V2y{XGFk)xnWAva2Nm!qTzXCq z;E>?T)#Ii)K#M(y(viu+IUgP&DN9K3U&r)c(Dv^O+i>8*+!M64xc?JA{Kq{9g!R|Y z^QT^A6ZC8bdNP`cGJ$9ufMCI{z+zKAr^~qcl^JC;|Ltj?%x#VpOpyy zpL{MhiRNZZxHLYg(|-kW{<-ykpW#1Wtl%SD<5%+iIYIw}l=1Ik{|_$)SrN;#Msv`> zhWURTuK&aT{G)3BzEz3~U;>ro5C7Iz{4bIM-U0*mHRJ!sZoC%&E|0iB8=A|)-};>!T?Yjc zD2iJnnaOoZ(H)7TxWQrz#rp->Zr8h2&2BIyFxjNoV-&LRBqW{udD$j6o66$P;+WgA zoT+>>>uE)u!SvNb`?~7O!S8qDf7wmN&ppJ*z)X09X-SH|DbIR>JGk83U2)49MDTELeq>WPW^A zF}YnJo!kUlqpaI6JgtB%GfkCNbL-(#<8$xY3MtFg9W2}_k(c}YIlUesdLeP_o29e! zo?R9d>eh!9aoJ*@?rQA8VnaK1FK&sW2RgJvHmUr%$ikHo1qVVH!nfw$-0DBce3QGV zE}iy9|KB|gxGz9(>^imu?5**yVy+N*iQs|ZTQ*oIv<92wrIgNtnf@4UvL%q_;rctH zR?Niv4jyOg*`g0`Dh`kv&VTRO-erB;Y>nE){vd|v4}MU)@7N|*;p(PC1NboncA4nNk$BMfg#>ONLiGJzb)4`7T$5os1pMeAOH926CxQDT6kx?^* zK0P=T3xK)=?>2-HmtZFy_XyF0uKgDL$;eDbUJA zPkE2vIrW3r4S@H9iS)jHCV1%HyC%Ssc=7=RQ`Wdx#CP6k2s!z6r6RaL)CvQDd38Di3$nuWd8{7@&qeN5c~hTx5&c7kRT}YA?M6L z?%(UZbi4WPeyQf0Om`e5#Q}OQR;HrTsR9vDp`K_HTEwuBJEE+jNBgUo0h`oo*X4ZV z3#nCRdB?d7?+^wl`|vS+RVFbhjyD?L`C=7^9^^CX1wJkUU$F0J;?Oo?Iqs0U;ae); z+=zOA1O#Yk@evRA!x`{xbByP3K8zyx=v~&EOFrjnMLIDSm#(=zO1BB+$+Zhg&tA{+ zdQj2%8Z_7IUbC(7%5=s_ZH66<1~=0CUH$`D*zZzoX=q4a6{Uz~tzlTXreag*u?Z5P zy1*o6ghl9R0X?<9(Er6M2cw|zb-@mJ zD8g@eI|-Dh9yFGi($l!q;O2;x12{Sz@&S2TRbxp4LIBEpfP<^xOvF+Mm1^rzONlG$ zJ1s;68tjvu9ZNGkFNm|_*Wsw>1BR?lywXU#c&-xq)BWE};n6QUO@`)l&v9likIXc1 zKM`z4h$iLmq7N*UG-MiTKF>yvv&<}XIdj>XDlhxyei&H#u0S=jbDn=wLqXF2+1}bs zq2A(QlZwSER5sV;)!*SmQ|yb+BP5agvx44rnXg@}KfdL^`?VImOpXX3hqOnjq5^mq zR5XBo22QrwJTanP`X2D^;UpvDL6}gt;2Xi#qzWOnXR7tb=bFY!BVZZFH})6DN!yzVhMGs2Jlx9t(BJMW^aF8n zuTte-=Aw`cm*Z)>w~^)13Rf!|dw+Q_GN_bfbfv3u_E-n~$96MOQq_*5rg0l+*-^Ah zp5O@V=5jSEj1(`kIwNY40OWBSM0l^{$*!X8@-_5_f*bEgP92uNb#TxIae{o(#_z+A zN2tvuu6rCyEm+lq!UZH--lwn>Ark(2-V}~3=pOz{>TPU54T)R)yNIbismC6>bN{FV zxor<&IT65_l>zj*QAjpg9rKz-z9<+ z=1nFh@#RN9^|+{yzaGRVTWGGb9Db5;#P6Expy{tvQ#4x|rc*R4i%}rtw!L?&)UDXR z^*-iZnVhll5mf&CX{7a+JlSq~Lae*l67OAH&Y`mg8Kn->bHmeJgKKCO(DFFFf3*;= zC%D#}Nj;tdRS@Ct@<>dQnMy`1 zPRn!EqbOJHYSZd--de+Pca5r2`<-P!91b?}BAy{KGP-_k&Kz;kB#9@b=a)3qu$*jb z{TC+<>g-qQ%hy~SNwWu6)^E3$U&Nf*Yn~s?ljN=I9um#LBJ*6J4pU-VDk_A9GA-TX zEtV%8c_}9(`x7x`Pn!h!dARMPT^tzDA3cSyUX`nTC^*h#0^Hx(-(S@OcaFsOfPK_1 z1!P~BEL}#m$RD*HJ^l8Y*LAPe5~u)tO-~qP;0b8xWBCY3HxkdGY`tdxE77+kYTZ#8 z*HcT@bF(7ob2@UWr2I8~%#&WZ@ZUAV7)cD#b&T`%Njinoi_$t$x1wWP@$T9e#pQ3| z+D@N@IKK(MmE+kevJtq>%so?=qsOx!JdQrDnIao8?V+lkIGR0Bz_Xklc`w0?*9;2T z5>00!zyv~SET{o2n@hsTj!2REkB{``)Q=~Z{X~jZ%ujh=3&s#Grh8wHpfk)!cLtVh zhZ3lcB;`*($+Y4bDE=H4(AIb%^D$4z*JSsHW*Bf^ve5cbu(^Yyud7+jbgK1t^O`#X zTjIuxYU+xxC*O*$UtjAwT!7Ci)g-eRy9DC=7Xwanc|7$&*KgtPF4=5#XFfkpW{R6et* z7R(yH!`hF&>^k(MV$;dxIp|A=Y5bv#A3~SNz^~R z0P4-H$EwbraD*|>iKr64(0)(dQ7=6>D9MT@w|BTklZOM@}x&*Ml$iu2`O-#nNP@Izz?HWw|}o);2D;!f6)oWW!-c z9&sI1V?80qTayLPGCZ)t5OF)Hz>iD%<(pL#NTqvv;zis5^<3?8EWyLMbtwpTD0TY^ zMa;A6oHKH!N!LD$VaT}(tBN|Ln6pm^y+&c)x}0n=Y5BnEHW|njMS!wNCq28px4*9w z2oP>sdy03S&w_Z*C~J0I&cyM>^5Fbk9K2C6+8FZ@%UDjVLo*)Y1Y+;|Zexf@)o|5c z=Em;svXhpUTs$^IRbNT>i(4j8Z?}G~mq8>q+8I=mDX+!EAMD#wp|(YS9vWX*yZsSw z^Shw7+gNO4Ql9e^!6Oo{2mcWpo$tXdZ016E%u7|6Aen3akU0a(;(TV*%@LMYzon-i zRJ4}^x;Id^yQ5eWZie&6C>;aDu`Xjo04>&@K5g33HU80*sFL z6f)G^&A7{fPd%*7@cCx*iGkZ;yRZdu(1IK^-AHSTHaNJgafwG|nGW5HM7!e6>e03- zY5NwWouq66RJu-ejgu$U1k%{-sK~K41@`8xdVP$;;lXf z@KaYaDbmxhhu#f~f_h%5Ar@ume#t4Z0b1g685;OG4) z7GDdOUKDRh!hZ1cQWK;0FeQir9z|VBNJZ-X@7)=3*ctmr4-yefgNOQQmEm9Bu&UJ` z^V*SVE8BH4u!b|;s-3BQO+$vqb_u=m+G;2ckEiJ)-~nUP=R6LI&QPXo0D_}c%27H9 zKw_+j8dY_tA!71$NKZ;+Oc_D_jc2BhIQa!#u|LxX>hYGa65G**8 z-+JTF*l~u1?}r45I@YPZV_4$@KOdw;DAhgnS8Y)5DkoZzmZ?gV%HC{T^sglctUC&Q`xYvK7 zq#t)&VB_{-Zk^bkX6sWKhIAI=d~H(+G3zWGQeCmjbr<_=%{usm8-g-QS+7Z}Ozxe^ z(GP4j+c2=9Qy?Wo}3(~MvuQ1Xp@gwUeBD z-`hnF7C+w=0v)M|BjpLW*M+um*yhP9QR+aK5nI zh(8*1{|R6qU-w2d+W0X{G%S3-V|YCyQr$GTu#*KHx#TqG#b(43ta>xcaB6(6 zqny+}>=FL1u`f6VQoqWhPDY&=kX6caDD)XnSbW4z{UYI_#F%i7z>xw$8fmuX0+DM@ z^2xU^jHkDpeiiu0BCV6jUp%-gHymOZc-URatw%HhC&fpf6jU~f6jZXe9Iad$y^6T4 zP_?`8kaTaRaR)+U2TJb^{ZXcGA_ezagE@u`f;q~=%iVm7QyY`d*MA)l$Pvndl&(R~ zA;ZEuvDCI~PPVPLQm%3PNzN>%`oGe*aqT}Dwx7E3aZk6IZ7Qj@N8t-THm#N0Tb4L+ znV1VC=?rWt)9yV+ zbhW-!9v@sFpVf=^66d>>l5WF(Z1*T6Ki6*77Uj0vX=rT5jLmqLkOaCzQR!Er?Kh6 zqf>m$?o}NIw3}~dlxFpHN|vTb9hSQ>Tesy#E$B&VXklyZVu#xdJBq$WWdWiDA0(%k z$EwadsnJRkxVO`7(5~VBn47Zm@ADHIuLjwwxgS&3D2-&~FRWSyPWVI`_9aL(g1DH?fId?Om$ zb?J@u(ZyJgf)k|LsLnVF5zOT!v79CM7*u}mmTp8~P9U$3QQ5A{708k@5?7FR{F*Gpusg;Dvz;RBWlQx`ub`-Q+)}D zijadgI^q3A(|^U%6zO z)cvL z5hSmd(H8S!nSy1mg1&T6v1Qe}r`?#9wC2aR1akRbFfbjvefqRhXTV+yjiwmB6vYtBH`6u?1@OVG%90nIPjIBbj%v^1dsB@TSE6es@;sU ze4<2Mb$JWv7=kD-hJDin&z_!EP!IKly^NNBxoJeV4+}fC8pyGtH5a<%LO1Y6GIb{G z3_Xy8KkYNdx-8(2}#djr}i+6F?xB#nB+2b(rzhX4{+K|ZH1fgDA{{d_PU4PcM0UJDL}_{;a0=mpWT8&R zVlIhM>yg}W-k0QiL?(sZMdPK##$bIUe9v1|dxSlWB-IW_5*fw1bx>K(o+b^9jGJ)A z;Xh3HOlmbJ^JUUcUFrN}n9*+evDAm1iHt3X;$7VnQu)CQ2{jGo-&Q|ilZ|ppAUeqX zjnUBv`*vobk6d7-FN8E_vD1*4_0XGYNIo*yfDj)!S30Yvd1e? z{#U-iJsdRd#zCJ{99nM{W`^GRC?8SV9?ix05|<%rD9vOpFBN#hIam_T9w$BmbLrSX zjwG%QAJ>&>GkZ*DjbQ%CwrL_U86~;pMTzQpcnCOtBMY9l3wcD4eQAaL5e4|`kF8Mt z(?n+mqo+!sV8ZsSA6qB0xJS2tEMPzEJh-8Kl1SQ7qUmY1&7ZvaU3c5KmqgD#ROowq4o8b=8esl3F* zWmitmGJTB|*>05~9Z;~qfuk9gH%kDAa!$A0>`LWt`pYH4U9AX+IsqMFzI3ZDnr6BHG8I6AH{HRGZyNO6rMW1oF=Hom z1RA)p|ELs!H0$Z0jNJVT*ak+TXHje?TY4>3!nOQI45mfH-t@eWy7_KU*-yMa3J$E2 z8f+>Oo$!^49P&8d7>WSR)-5$r)Hi18c)N3f=0dy$^}*y?I@4W zts_tC#g?DVbi zmu)1ze(@U(&?KiQQs>gmOy-TJWu)wN)&!mIqq+5p7oBqBLu<3{F+M!Jx2{N7Y{k4b zw8Sq;j+Bv%{IPb$#&7LeEtPyC%Os^8nrk?e+d{|eqo#M^T^ZhVJ4BsPv|IKK&*1%r zV>5;8AvQiiR%)dlxkbZwvpxCzXJ%A0pHjPCxPY!QYI0oa06T{oq|DTo^W#4V-Jb-=`coPgTq}!Q+4W#3zVdk7hZyBbwmd!RZJdFY>Wp>n;yWq+&I@vO}8qqM8VHEz$x=YklZ)A4>-;@+@0> zCGD87=1c%WN&Ml0;{1(~y!IlHAPAoZ5O?qHncyZHz}WX&m4Q;UaeUhnp$?k}lRWt$ z5Dju|>NDegfP3s38>IyY&kYvKTv#m1J}TX4yI+vi^_9v^re+k-I6R5mZ9F7}H(z=% zzuqp-PsemNL|of6)Z|a-p{t)}PQW%(T}?*2P3D<73X4gtfgWKF8YrsSYmLE%aa@sY zb_RrI|H62;GGl>t!9q3<7|)k{3y0aOeR~=jQS)Z|?EH^@j2DUzT93}6TWL+|6+flQ z?9JsWRi)k<62+GE#AhqjRGD_q zJ&%r0sWQ06SL&U@uFklJXq41*kO9)>xc~1j)vBrJ1<6>2 z!aFLvrzN?C+4SqH)a`oew7`mEn_sMQBCyi`(BPCOK&xTj4sh~xxgW72q3L=Z0)UO9 z{HEM^$RI&OfO;UAS4PG@;55xHHR;Ky3+_vxttZBCxww*E-5d&!B(??t@ar;38urtO z`s9631qP5J0=whThNkMcSE5NKJwcp&$1op()euA{iK5V=>n>t@tuU;BmvW;pJD3`C zV)J{Ogi2&&gezA0bd$-1O%MyO^Kx?e`pPH5xh{-ps=$RiYL$&UsaMA61=Qf$MOgrj z`9hmfQH|#hd^&u9PkHL>A<`2W3de7qlKYH@APwOTceswhAToOpIWct_MFIt5&IY-)}=sFu9OV@d+CqP+hL+oTH{Gvc784 z?{2m)yAXn2K_!}s(!?hrpgh5*(~Tpamaec7EjtsCHTVkKqN@UMTdAyXpxcU-#I7T_H z8GM<8|9e&;X|`G9k6WpsM8#-PJb*Gr609{q2y2~YzwFkKk;c870|Cw?Uj zUhxsw+8r;)0ew6225(6%9{tn-XfPSl;AcJe=IvEgj%+N~*VjZTXu})>k5LAg=C{gM zv#0n+Tm4fyZ-1zwB;;2)E3dc7Y{xwDZ(Vin=1I&tRGfW6I?+UwDn=^JgT0zZO^q% zcASoEQ_JDv5s#bn$z5e)b$*fZjkCjwjd?{yxD328(jXUfKL)lDXdM+l8SRM^mMzR?!o)#*fIDDr{ z!p^m!ICo)qq~1$JOKU;}Y$AH{C+{qs!T#9U63G0Pr>~8ud#f3HC3B#!Lj}KcpQ|1@&=^!TZT);gWj~DB@h!k;y^y z0Y$WIC)9_B7_&y53hP`7gH@WWMrk-!??%%EKSmX14P%%ixV5k|6ljfrsDKWP{&ci% z3UBVbkHtpDj_08cVs&~V;Hn_YjnKXE_S2^~A%FXDf>F!PnonAI7v4Bb+Qk&>E!toJ zxz{Gtt|B}2&*v|v$)U47_R}Ab?BtHBYUbR3MxU%l}5AU+`f@QxtjG>J6 zoKCkOO5bYormJQG>EbuoT`T4BLZ#U6uuvf*1wIR`rp{!j9;3CmoB)xqhARCw?<_&4 zs_^9P2`h51q=-4C52BY4#V0VC>Ad0xPx-hdig43E;Ul<8C<-xFDGquU-fK+7%HJ zVb|u44Q>Bf?;@(AY(2S45Wczi){Q9-9L1ebFt*F|OxDJ>L3Q00S2McLkSY;m54D1jq?yHx9(h94h{xjFwsvZv_GyZ{{Ww5|q;<6vjb1_NhI^<-C)5bWvWzN+qCcshc`N2<@ zt26ZUSaKKhH+T?N{qe6R)!l;_a%DX)W-p4fuF-O}wgB|9tq0Wy%OsMZnw4Xt5xI<7i_#r*5gJ|BO#JZLi# zvGn4fSE+|uL_g3Odqb|&d({CBxC-jd)VMUeT*1Lkf7I7++w;D%q|?)?G|)2T2JFs1 zZn5oFXIU$Mp3BbTCXIb10vn%I<+16eX^L*YE3WOoqQJohb}l+aZZ#}xdzH{}r?J(# zy9c*}_BA3vj499c+)I7B`ITnqIj;YAiFx>iQLC&oREBT=RVjaFT|1xpjdQ~|a+dQf ze{6da`rO^Cwo-pGnBgk#Wd3apj|saFQi%^jBUEE zJkw^a)I5unUIOCCyoGvWKZ{45P-+NBW|*mCQq9(Yb#Y!7JsJan#>$Y0AI?|$VBmbU zO`9U&Y`CkZaCRJ!jMRS1rodkV_zHJX^s_y$t})(9?%@m3_}dd09!-;WPM77{j*m#K z*C$%4$nrbOujD8=gp0{C_<1I{;~UVjMbcs;4wUtE@#@odffS1fzV>)!CvZ^&{S~V9lIXjvvP06@ zvNl6*PE%}!T~gx`fnmglnZKc+^4WJ* z(`tc{uDZl@Y0vzcjj*aKi(j{tdE_*Wp1;AFmW*9DG8m#;ZI2b!O8A(-i{zKkSPY-e z$CPiYn2Qq2k}3!!D{a!P0KZLGVW%(-SS~p28uS?^!2OJ`&rSf z3Tc!v72Yj6pXEKA^)jK-*T(p4vPFBVId&L9%U6L5iq*QzXY;2#HH(KtA>gdkc9TrO z94%0|WjSt3XU3)%*(@TG#EWeJ8h>sJ0A6%rul6x3xVMBH(%fFVHFphp;7vmT1L``5sLU@ zY|07}hmyo(a+|7VYJ&D5o(GfN9EyrI`UFb*@*!O*M4Ry;M&4k4R-g5AzIR}w73RltMuR6K;#m{_{X3tkbnB;l+~_J+Z@sd25D zs=)&xR4Wec0OlC!4*jXR3e)lii2AgAh-HrzuXZu&Hx!{|=<;)vHZ|p)=b0#uIKd1j zb}2avsfP==S6ima@94bvC0>Q~UY~1Rbw>8R(*uu#y;fHJvC=^k_-j_}O6|5f-&mJ_ zWh#B?r=8EJ%^|vL*uN zEaXu@xS&9PCbzVohXz8olp`HUN>ykqr%1kCn#UgrnVIJ{DDW6o$ETRR95#zg0q5cE z3aMt7pelwW^>nuUM86lcDN`y8rcnyTmBWb2HCE^Y0J;PjL9Zk#cT^V2R}C83sAqzk z;-}u~fjJppTY+_FA-MPoF2y;JVh9R(w1tjH=p{`U%Q+c{u7QA`(V?90K5g21c82E7 zA-DTIR{6Rids2tn@w4g`{L%og$RYOEY-qy}GE~E%L(c!m=NrRUl}bybTpJAfE`0n? zTlwc=Cr5G+pG+_FLL|0JhObU~c!QuAh+*bLmuCAucj+PBV=aZlECCqPiv$y1&Sr-U z7j1W$hs@lAS6`nnZvb#+B`8p!UHE6oQfMx?5Iz9G+vJ_3MC-sBda2IP7m6D zV5V3LRLw;7vAKh0Ki})~*eh8R!i6L@gZK4*rB8e+Sk;aw`|zO>YctSwK?Hvher;7{ zbgu3oGj~u;ZTAdayR)p@eloKu*;2K!{|xJ6DU`WI5r-@HN-oxNMmP;-jicSd``D24 zN=(J+>U~&Xs`j4t2Rx-YG68L}H+t?jc8uEfrA;6Ac#47@uJ9_oz>%FNklfFyT*aL#Ag#84S6l7#`Bu)l9&fz`Mfjy=8^50n))}S z8Xs8YzsS6f8-7#IYw#>zuaGt>aE3p$yWzVpp>fDI-lq0Baz1jjfKA!sj}EDJ%kz7D zzvzV8u$;=cx*TFr`Ix!wJau{pk8A!;><05N@mrAf4bf~Gj;|2h*ZG7J5AjIoc6=6< ze>2Z)`j`tyTP#tK?e9+(ES*q_vQY1@zMT}11@SibucScb7>}7uB78mjW%&)Ob@DEQ zx(BnlwcRWz=J6ynXVG%977^b4xZdVzLLwZEev!~c(7r%2dA6z~?w{fp3nR}}fO@&`Lzj{M0 zC98Cm$~I-K$*W#3R8Jy44cSs>5V>oY&(?ZNPu>flwZZ9kR#DF)fr6Ed=;;BOD!^5# z_|k!c`cD;s!oECWk)G37RuGd(q##haPZLFYMP6_xUYSe5>#0*b-+%Vg+3$DL7|;Os z(d*}WvzSd_#Zg}0+U!_o5P3@rvNn2R4(8LiqWOTi6h1`wY5kJAMP^rq6&r}xe^2k4 zJ}*k*WKkWclZ_=$3wwLXu=W-d&F_vFh3sI+ZcC}?Jdl)V!5(A9X2w>F&k?cR5w|~a zV&Gqs9pPc9>bFBHu?8{jzUOTS!X%8YsA?zlcTziv~8938^wG7Aj;`#edKw``0 z9N>;ut6xq}0J@T%M5Q*K-U`5r+ZZ{XGe3^<{_ZR%V8qhU;0%I?_Cty|_a~B_&S>i{ z16M072ks!hQnt&$>)n(nBi6b{`${3v;6yLkwUpd+G(n{au}c!z~Fp%dlq`ORF$58+vX0Oq|6zbsj9WYL!+gm3Spni;j6zy`84sRm0O<6{>@b zqa}H%1fs7x<;fYhNlt!17c10)0s3U0snGS^oqxT&S`}SZ#1*Tv0I-+C>n_YwC9SeQ z9*$wO^>psfpVI}n$__w+cS2q{kGM3SDpj7>%KPj_w23B@yMS{ZlmX`E??L^ z?MDnQE=|1MR&)DIL3FOrC%IAeS(~V_uFxjt>%>7ydoUl>eN+9aw%x8D-pJ#uVTWUR z5ACRDJ)HY04Jif*0@qKNi)zjV=E}Nx8KtG-fk3ba$!&P5ZfOX|!PgTqP-17QT781i z`%()0a$K9ga#X>Ir%W&A?gD`*@~rHA778mp>oF;P&d(&GuTwAFAF|!#KqrdH#Zawd zPtP5-a6xrpPcliG4~EKeulqA@b9%5yMdVyd5*+BSCgaNUs$O9+WhQ3)TvdpJxw)eWaDKsz{;C&7Xr@|m-fW*nv9Ou91S zM>b>6^I1+oCv=N9da*O=qYNZ4AHp zMqufFaWsh+A@Dyd$RJG-=un5y24AX_XgiM=2z%f=2QEnMJnMn7PI8J32scU{fe3YD zOKKDt+Og%~p2;6xTB!6^7i9GZq>MBZljk9C{+&$;} zI9}L=GH>S>wSdGt-jvQ)pppiNgvX0}%T?moVO<=AZ9U<->kieX`_T-ga@1$c`>sys zJ-U=~b-~?VO2?~0FS{JY-o<{v)CwoYD(ud;HLe!7G_P z^R}iB52zCYDg_PI=2rY0LHW-j(Pk$TZ)p{lom=N)2U!{Hh^L!^-wx{bJHT`RQ>W=r zE%%77UP2*s0?P~@Px?*nmV4yfqm0E-D(Z9csr3*SSMpQ(Jy4unLt z8}7<9Z|VbYHRi4BS&Y%;c7kS?=d{&4r(xyCyMxPAJaH1{rJ^(*jPaE}|e z@Fp6aQSzvMHniC^&^JR|9?V76WPAB-qdzP(4z5*ZlSp@#eNkO$8b<5=f9qMx)FGr(&{%D~6k7}|)lh~9q7zQTEFqqnbW_*r!Qo;j|!6910xjOVAcJs(fh?*=| zuWjb*7DVqGL%P?W5^pU^G9zjx0o(88nmj3GfliO`I9xLsXhs~i>5l2y!k&-GmN2B= z|Em2Nk{EN*TPnQK_Pmz@_;%Afpl>+9fIc?f-UP@nGe(%V_I;9C>_B&{Y9Hiw(FYC; z2#8T8EVGLpg2|pBCcI;?-V@dps@7(kD;319Jf>;sO3G3i%3eS8*Rc`gRmmcK8$YMh z5EW}_rtF`AbWVJ>Mx69^fJBs2pfnk3A~GHLX6soopDoMnSthcsGYvoBL?=BsRyidh zWjy_i!8xoqQ@P3!J9#xW2T93bz&BsX6h!@}ita%Gt~3V=Fd{cPP0^IU3XI~(u;0a{ zuT%Dr9}N}wjJ8H^BE&_YMLh#bC1xN2@G4(*wDY|VX4L1T4;87!eZf^7&L}J|T95Rh z5*!PGgruzR(t2Y`C`O*@XisBGEXjFRYm>2tHOYOg$1niyE zF8f8ID*UXm>&ZFB<<+H0N!87BlIT&2uB;AAO$A)_tD_ymEp=%n(Wu0NJt7 zFDlC1ng0QH+#WuUyly}Mnd~&MR1y5r`cx^J>OL3r;|5Zq^nN^yLDpYpK)KgH{O?BSjp@5 zxfiIewyPVNjO(8w_u*$iL}V+g@KH8I@=2YYp_ z2|=WTCEXj92C*-lHc907Bl3)LP)dm1Z=J2if!w{k#_GJfy1XluF>Uwr}E*K-p~E1R_> z&y0`WPDrj)@_R%LXmmWFk#3vroE_caZFaDug@N16D&OyXr;*{~4fyF{oT}+slxzqx z|GI2JaMKZYsN3H|8UGBr{a%B}Yv)=D)17$jF{Z5AbyDg!udP4j)q}iyyDIOOswExT zF|G{HK3$Zg`Db3|!v|@LfVrScSCKrxJ#FlTz&)CNFLuM6x+m;RfBF0~mG;lj*bDVf z9#HAI&6m#KuDPeC;c){A%JSAAcP0U+|Cnmu9EONQb?d0yg@lFUNa#Xt6TbIH}-S$qd?#ZFO)yo3WsRf4@Y10tdw#Z z1*1tikZcXHQsf)jF$%a{oK8hB|GQLP8>k1ld_3cckz!h(I`6D%uYLOtRa2_s-^rmS zy}`$uJh&t09{u_DRo0=s^3f*NJEMsfx`$7DeMfg)Kl=Gq2MJC16`}_Y?SpI=-g-&y zE*c8C+25>|7eo;Mdl>{I3VL9abXi>LSMk5sn~R0&SMnj{YR`IaB+u?r0NP(m_#Yqn zkF}*0`HqSp;UYo)Y7rPcOB?F>3bcC4czq;UUzI0R^#8rg|1NmAAlWlvHlH8QUNK?h zKivM!6a30bDl9Wgt3zNSav+Ud^D{)$Xzc~-0n5mNR5s;QS7=-)R;^~$&Sp?pwmz>d zhomMrkx5JDz_ue^Bf(?TF(FwmS)!A?m*PLv>fh@v+BX{3MTWU^cxf7Lr)l_xKnd z)BE?tYOo1XFBU;8`>&gDRtKs9Zr`;!MQkKjy5_K~_YEwN9s>mP6dEo)DdV^~&3t~@ zq?HF@ReR=|Uu8K00JsbHpF3Un0mgCXuD}nb(ji%k&iGrV&HjjP0$R^*!WUZ^BB=ob z9?}ffZVk)&QJeFeDylijXf=@-h0W67K{gAy&5674IvSmG$3(8czh@UeLd5i+hYNObNKaTX-l*wfYvI?!^-b}Qk?W&HJ8evrSY`Eo~CdkHtjIy*_7ta8rQ za_wyk$B))0_XO8IktY6WA3|NlM_;Xbp;LRjJbU>nc$`;CQRtD^52TkW`n2R#;u_y8 zYunY`-P!HTSV+kbIvCIg+bQx6noIBY-qZa%&i>!nG&8WuS>X5?q~WC=g~tP2;?U3A~9GoC8`L`7*xT@7I2!@ zz%{^5J2 zY3>3n5alw>xSe5e;k(6@=-6`%gfD57x6vL=eZyI)up2xhfsE%(-1J%1S-%<`JSpmT zm0>!^uc|Py`cpp}+&03EY8J_M5_PRn;b2iOoEYQo&4!|@bhSk!xo-iu2x?ROdY=>o zfV#W<4K$2<2lfez2tP1BhN&xo7&<;L1uRtC{=tZu;Nq z1V-&}6vCulqZf$WTaI>{FjZs{SoS3VLGui%DB&FxiJX2(IZa3M7M`EsG%>MRqOM!jtIi% z2-3zF(;#~<#EuqRcxDaxP)R8V9d64FeU`(nloD}+ueYD+5rGUdH*iS04-k+Q)IbP` zePYbnz`ga<3dFvm)ukf*?y_(4qDJw9PKuBM$5c@2-cNxQ|gh&Xm9CgO)6YSzwhLQ#_dO(Z9RUVyPz!f%cMErivOp z%?;WRdyqPUnxl<4zr!#6!6R0B-ZrnotVp}33^$AOKaQsu6mwmiQh&u^kh){%v*H!e zwjO9sWy7ncuVtAw*>9Y}UwdZ($X7z}1T-L;Yy&alcI{3uA$IgEkoH1a>k5oUdPkV7 zDrmxEl0@dK_Ee6IRqdwMO)F{@OiI*_(4DiojfK0fog2}2e^X7*P88XS4WsT_mzl4j zPNszU;WEC6;U*~F5BZ!+;J)F_G_6zaw-UGQsV&@Xcp-*y%PMr>y`dJJrTrE3x~tr# zu^};00`~Urt|zAu#MM=cm|cQ3;4n%UFeCUzIRvo^2z^@Yr?9rTS~|l{<3Q@Vcty5S zqSmNxe}&nHf46a$jsQDa9r$f~V0TUsfKnTyIT&%n`Z@y8}QizCz=R22`AK$Gr-spLPYQf^6pU zW`-q^n*3Fm+U1Q5OI9xS=Qk6jAjUI3%I7|Zy{1lO!gH9TCcVx`k6?*>NySA<=Q{Y4 zMV3ckN|U7GhF!8N&<(P$dulGGMXXJdIxe0F__ROK-7>9yYk1J2t0BBaw9SpUuDU%+ za9b~qMpF0PO~t8JJ>4rpZ0Oh|w~SxJxNQvBZ1ON1SJziSg0UDfdQYJr9B%|lRf8hM znRpOG9psohHBaZRqfLQpFKLPUjYc`>iqaM~h6=FcMc3ArqC(6gbo{A4u|#rW)QT^~ zjJA#V0=0Q;NuXc(RBFb9uK<4CH0lu(@1tUXn}VfYJYwn~z9N$MY%lYEyWAurHVGp@ zRkXM9x3W+n=&svBg6@L~{p$tzl8LcsDVjgyNU3unyDRJ;^wH2Tdci72AM%V8o8&%# zQR)xn%dF9Zb+vDMs08~>VIJ5c_u22A{ed=q2Y;}u*kHZK2^Yphe@$ro@THo8%;gyt zkiC7XDds+lf+_KMEU$q5Ods6!OkG{{f8snja_|#i6k0+Xv==dfd%M{?OjkiJrhlTM zc?taX`=w4>flo(ATj9bW7yW>o=~qDdR=?z|^76tAZFuIN&jJ7jBfO8celap=R4+38 zPyD6!*Jyts#Xm6F&k&M+5{vP(-in`%L&skT=x^~R5%Jr=hwf~ge`2z~8#r1m9h-wu zG2T<<`OoM2)wmMGMR7uW_oRArK8XF>uvb)3KqKLTVTFi5yzCFN{Q8qBss?P6H~H0B zfBtTogq9ZBZ%qBqXdDegj1(Nl#_|I={yfM}mhjW``@KK#^UVF?5;q=zlpzM{;lJghP{+r?bq=6kS`s@COr~l#xH?+ZV zzvHvN8R-vu;RAc&UjbF%F9!MPy9Rx5+@wb&#lM&zq;YIH5$^wNF#PIb?J(cfD644d z_YX!Ym6p33OH@T(|J89ne;4@5Zy!y*m`rp-@5c78OWmJbOX?HGw-Nyp^5m~SC$nGp zkD3CEzAXHf`JOQOzunz|VuR7w4gxg!>`MT9|8%ln49hJB8(biJj1C;&e~W+t4v^{? zdF|g!Bm^?a|6Im-C#KAi8ejYAm|q@1&wow;=yXMI^O+L5fki>;PJFV3@n_Ba3(NmL zFJS)jK8K={)6pX3QgkuAnCzwXGaj@)rym?K8EJlLPQ_dyu6I6obTiHD5*o%U@b(|9 z-SZRb)B5w&;lXA4y}n+C_IX67{wvqs!0eN(<^?(EJm!*5tip|qg;M9bJBwEIoYI>_ zfhAILgN-Ri3Dzm}H96bea-aYm_nGljmwCMc7W{_V3S$xcd$jk^k>E*lzXJxkn;Dh& zU$5;9M)eb7*GYMZx4A}7L$nk?Ew`>OcT^B)#=3SigWdBqGn zMv0^$C9dxljQKNR(6gy!>~3IiRJnUgEf&VUBBB?RJj}bqVi(m(MbL6lXIDn!#a)K* z+zb1o23f-dpf1jw(wF_F>NBoCF_c!_xB3Be7=$o;m(}YsguLTr8&G9fDtBZc7pCn~ zQLT4*qWpH*NP@2#TI!F|c%7oXj6M82QXaDO$%KW!Ad1n@)S78uiDCm#$|yaTg*hd_ z!@Mt5y2Rty$KBmDyQa*??!M|RIMDNOMc-GI;1b^parE2{noJX?XXCU^;v#iSirssn zxj(3_HJU9b@Q_8>DqNUYR>4#{q<`qa(+v5g$-o*0R%Ucd^od%Yk)Z*0eG!CN=ibE< zN6l=pp-D@c6{NVPf7Ii)GWj6$M`L%Z@(+v4{B>a$(BGU(z8~6>02wdt!oD`S>;mRe zR|jX_{R^=77j{|gFfbGt2n#GO2c?$HTIVmu))~x;xmr%Le-I9G|7x?8$)xefTba+0 zr>%Z|{f2e_GKmd){TD z_ifGr@6UST)9%7s%M2@2D^6~Ed{aYTd4q>Q-sL`RBsvztv=_7+Zs}KP(NG{2u8lJx zR+G#vw?BNke73;W;2JIV!K+SmUoQiIPe^K};8XK(aSA~Wr*d=`|gHGZuF++Lxn@M}kA+Ki@*LJ^=%qUyTFhpfK z&M}N)uH_7jQk?#P!Hv5-3^nB8k#y5Euv8;`YAlmsnRz0 zwYzS_hXTz{$3vNMpVK-9OJXWDu^in>yupr|_g0XG#{5Ro!iHMPK-U zsrfj*y0M=w@dNHxRLvTHiJTj<#V#XZ`dfr%xz?O3r(`cL2Lp}_zDg4r=v){y^i#p{ zd7v+IG?gZ<7J^<=<1?uTnD2*VcH1;?-a=tcg-v zlTRfI15PIuCUnd8iEtPQ&0%r_8R~UC(Y0^U0dgfTFPHnO)t?t7gpQBKXJO{s-eTuv zU7$oI{;@a$g(zqx(KG|%s~JZ$N(w_;xguCssA%{~3eD#@XvD|e?!WGj263b9>j^EWN5NP zf@*Kc0CV%x78TjlaT?e)1)nV9E)WLa2!8~s1u+tC$bmAI=@NzVo(D45Q%vllJl(V@ z+FH3posbzGZ2I!EGhmYIg~@H-{xYhi@j73&ss@`x7gSCWF!{Qc z?A#3E8T33e73P9*Hg)Zikyu z6Jrli&hU!Xc!@%DF}_+Bu2NlsolPr`al+J4I|v!_AEza}3sala&k>bAP|1?RKG30! z-hJy^)v9ZZX-)N%Ym=8lBAyVt#Lv*7wDpX1^tYUfYTgzmx^>V_)cn1jm|A-#(wwc! zVOU{Z)tbD5N=RDfG|EVoF0ji*`wATn{7=QJB=5O8e|iTI$_tBeRUC*|Yo%CG_nhU+ z;2?uRe~Ylqga6}D)EjbnP>kHa$|a78)=DoNWsQSj`-D%Keo+S*f0vwE{51y490`ET zT4->U5?-qzb$)9xU}M3uWhQRL^Xai=!Pk98ZX_@+>aNLv(@u? z*vi*{L&O>bmMjfkms!bOLOzYq zsg#N_cst>ul?v9V0q!?#wIQGzF4Z6nPM^$M`hgc38eh%Q)))BB?NHV3X-_QDD>!gV zA6CMk_7n|eau%P-SBX!CK4dS#JL8|mxWb-j>!hH0=rxYUM=u6$?W`nPu31tRQs zQg4^Oq#I#uQL&==$Y1H7r!VD1YnN!gU4%Sa^v zpfsW7)VgYHVvLV$4YjwEhhjA7rd46E?%g}}sKmZaB~w6%T`%Rf^koCWuJF(YoCMWr zO;oVCq}X4;3c#1SuKwydXolQ<5O$Q3c=B8-z3rHHSg}SmisavlELuWzR8!*6`akvo z|GCV#r_`Xg!fKCunlH@%KT~yqGJjeA>A%%3(}bWLZTR1zY=zyli|Xq~ZLjuO+fc`= z7NEfXKi3*>=%>A(gZg2WmdGBI$;nfu@0zfC)3-U^pSu?app};iDZn`Vsoejw8L&lo zus$T3X=>-X6}a^3GJ2#up2ZL2oq#T4ud0rJ&40TnAsVb=Ky*Hpyu!_>H&->4evwAt z!?v?LcWHxkA+)6%vTAQ5X4UoZFt;zeYOwg{IvP!W@*-GmjDy|}x^ry+d zkT*tDERB^dhA-;6RFLh6Ib2mDc9YY80yk<7m>T9x6VFnA`fdl8>U%Twk9zVH{rPzt z0eE&Y?R0TNs5eql;l@g+rBulVv3rUId`D@mqCkXECvBXScqtdRJ8!ndT!c(%iQKbv zL|7u&)b&3bDzM1QHO1y>c~dFW4<EDPh1S;vh)$U{#QNiSC8pt&^a{}0N(Kf zX3fKtzTT^GLQDO%E~FfqmN!~a-_t1kpY2iB>V~aKmn$Bjq`5Bm)ggF4Zu$#J`g1e> zLeVU+0ZNZ8w$&8s#@Mr}jFt?tR6xp6&VG&9xvwt(;s+pk-T3=LW-}+kH`_4QsP_$VuuSB-u3HYxyUivGn zU{7#J=x8muUXsyep_nE|7?YC2466B)Ou!Fb8%NrBBIXhV5V#Vl7k^5L*uP1g1}X?w z6iZlXH;3f8U@|rj>EO954PuOZOK9L*UyVGah`wQ60&TRLoTN(=HeWJE6#ZyK(yn|@ zp7;MNc@px#AT@(Ld46-!9$p<@o{v%3>4yt@-3%XPT2_x*o;$rLUUFamJ~MpYZZ&wH z)>K#9gw*%P<%N1 zw#0s%q(K_i>51RUo-{G#-j&Fw-6iamynIVvKq#R)%?gX`$D#C0)84qPa+~T8F$e>z zrM~Jy9%nSA)sOPbxd|nV`)u#2jEoH=jX!w(vQLk<^#fpt{1tZYxI0T&OCjqtk$Z+% zyHKe?B&5`K;k{mhxNT+!HH4k7sb4|M{A<){Pxf#oa@EPp*6xNO10KVXj}ebF_lbja zcItMz^Zuwwb&5EOB~(}7(euo-g2T>kjEh|ms?a=XJCe_VhwwDzt**G~Gag*bFc%Ex z*_^*a+*Wi;D7CikI-^_A3MWz`h^a>sJnJzlj|f( zW`I!}?{H|423Lu;PdG)`GfxcRB6vrhQk15bF6=6)2j|Kh?;a)HQtHVExm{q}I^a zZKt@VJRDPM5T>-;v8zwEd*)qzEAMppKWCN_%%VH?f4*j)0ncM!^Rz5IImqG|QJEdJ zpx!%Kgw(Sq?O&nhcBW3pV>b7%sO~U7QpNjt_U*7!dhIA+Pkie$mUgQkm-tzOQwt(l6`}m9WHbUEhUk6LQ%NJ$393s={JW-p>p2*>vgp(DKRM@$fkKl9m&W40G>jalQV<9BC{r;^QRF zhbfvNF3{td#wKQ+gaZA}9^x*`&-!bo$zzw2_EC3qsa%M(^-F}cNQDu|L@1l~{qaF7*DF_Oq z9LW;R`_etlmA@W6Rf(V}8lR$y5>1<6J$G!K>0&JDDja_=ST*tKEB;UbE@gSl{20~o ztRX@qAe&!2&uclyW$0S;OMlhm+c(d87G{W-%nS@;tOo)_rpDZ9Cy(3qye}GlV&3cC z8M+Y_5C2AZH7SauRm!# zX`I8xYZK0%rI2lH4ohOJA52wV>%qcB+?bW9d;6hG@<}>xdBC;B&4dL?nFnp>Rc5wg z8wta8(idL>TtOTpVtaC17tuF?UaH3sJG6W)I&Miiw99huAi}XaRNTEvBJW^>a?IQ; zRN6eAUu9rj4{{jj>D7{v-5-QQd)6(hjo32w@Ybz+x)?645+9(X+1%{xGL+bI?6q1k za6O!VS(&}H+w!_o!gpgCv^7Zl8SfX+k?51IBh_#7?_Vw4*)9gC8J*MqOnr^acL5i< zu9+IC%<;zkNN88F5~pA5n`ZT+gbty^g&eEJGMO|BA1#D;+wFwx_nQ9YG~zCA9_V;L z#2>d~js0XR?2-m{Kw;Q)9j`4>>Xn+Nila#(3wDm(ev`q{Hnh;~Qt+zV_guHHmv>(l zL-v90*18B;hUIf)a`K0k&{{)BJB6VFxF-+^dYI{P_a?|FYdtsc`Dk`p7cnLjjOsWu zk#|LWDMxmvC!L%OSRzd)?2uw}z*7$UCmoxf95HVshd#S2i3S#~#P9gknrS$(wrX`7MuE_%HOgLm>rE0vU+yQ5DX z-Xc3jOa-0@_m(FiNi7M#AT|Oq7_#!rg-`u(dy9TH{OI>lSUL?3>&-!hLyA^WAHLS%8f8u%(Ek z%E4PJRuAGCyKENNM&%P#hM7&bXBx*LHJ#Vk$8OBN7iu5RHyt|J+Fk9^a_ky+FljP; z^fTOojbje3yI>JK4yB@7c+_T)ovUiAGzM*4d##U`=-+WM39diznwhxYNd^9VMj%@W zT0pUP;zs4!Tc*}=TX$5m^9EIRudq$uVA2S@e=x#gN@=2;xwoav=J4H4x`WC2r{CAd z2-UM6JAQLqpuYFz3zeeXLMOj5e`Ye3VR#+jgMGg0nrin#=Wp-Xx{#4K5EPEo#L}X)FFwwt^lRNT5 z&p9p)2A7qIeUeato|kLHb&N1=^l%~E#e;IVGcj2TbL3K z=`FB7?@2v;-T%o4jxbnk%o)9-BvxeE)Y!rAZZik%0HjT6E>IW2(X1(}wL-Ci*(W0UFY4YHS!q;TS6dq`a4obxi@)XmpDi$e+sIr_JTvLNJ1A>DBSB(MOF+rD<6#stg)Cu9#+Z%;(Yac zP7&r>(k!b0!duDEVWEWZtxuc2^$WIm38Ny8uOzg@`yX=ArN74h;eFU{oNAg0XMHaY zeInOM`U)T^eJxAi$s1$D+jyf_?ccJMLo-Jt)6%EakmOd!U8fUUACJUSnd)5@?|`>h zNZIjrJ*x%lqxfT^gdg0H76@&n>d;|!I}zx`I>NxB_3}FVXW;}MqLe=3(VIQODPkFS zC@9C9g(?Hg>)!Q7DlRHJlQw&88dVkZ)!9ac-j5osjTn(!;gB_?{oY&rk8V)_No3 ze%S~y8pd5tdss!}$(QYiAQ&3t#y7?A&8ZFcIth+=GL$$Xji2lnp$`qLIEBwkRXVK`9sYC zE`Nug*V{QiZ%mwU`nYjae5;DjBr->INQuRlN4hdI0bMU4wA0)$-{o8+>$OaU3x3%v zFQb6>2Q>@DBGPfCRhhag-jl;6svuE^WwTfSNR#LuO1DLHj%y+WWIq1~6* z>ugSuh3sBbb|f7VD?VM#9&9bRC|_B)JY zDQ3!g3LLi5R94@%3f8pn7&gz9uVG^n?=!br8r~{Uigu?1uHUM%O|(X9!l_#`-GXC@ z;D88hOv9ZrhGdMg7dzO0D*g@u2ezX-sFp~I_Wtz8jByC()Oa;ya7~Q1(3-*xeei~3 zvCAWhaunh+Y3g)V$x$^*6LBhYJDe-uHuz5H&$`MjIcMwTo35o$u}!@a+8QgqHb~~b zTEDLrW<4d?6KPg-`tbLaqoFxW(YIyY2angbe{(D#QE9q&#PQ3DoriZ$p*&5JSpxnXq+6}^+R)1YWn#;)9!_msT@J-#Y7Az04dTm<1NKXAFy&vwE*uY5f@Mh?HZ` zuN;zfZVS%d*G!$8#?LpZLbh@RT>`5f-w@3*vOU_kBJ5>iI9kVZ`ej#CwJ@%EYUDai z(0+dcUdcvYLhmqPr>8sxA^Z1$>&57JxJvVdQ!1@z6NfmeYHIwTRBO{S@VYHMsH8cS z>dseW+z&!l3zUhM-pwa!QW`GTJQOT4C0(bcw#&^uq!#bPrfO=XC7jwG zEV6w!X1fFh`{*k^-aeysL1OEh6=|6)pBu>^6a z6>MRqEX;Xh&g>lXDKIQBk>8N_pyP5YqYFsna$--isLJo6*K7u0IY)Jm` z+#0!H`w*7`O~6tWsFvSmuvd}~xwdtml>xo)M#tP{$L|pne$)Om0N(B-!kKYclNPVk zus)UgBrX6l#VPw#s?gFa|HjhGR!>ppDrc`YCAgrBS+dM{_d8xAuFeQOUum3B3g9V7wR_TuG)3F7y5dLi?>>`S1s| zKlMkn6zJqnD)Ym>#VslUY^}g!xI-f1Jp9oo9i*!5WzdifmnQn^~n|i}S!=mc$t%6JPow?(E7OQ%vq6pK_Omt9`$toM?Z*O{m(xrjA>C@3t*-Fw8hvv|`DA|F` zRZ%Mg8&CtS@aIT)L;95eY-CI0Bnv!F_F9jUZFdOSm0n#Mm9TtJ?lqg?G#~W1%RZm0 z>Eq{SrRXtE(VLT3ORgaa1Uvgq=dxnD15&fl})=GqDWI;2YxZv~Qp2-A}X~jp2M4?ee1!b7&V*o?-Q?{I`>$_RN{KGQ8W}o6I8f`>ef4#5IIHUVUjEw`$ zwtt{5_0xBRiu!SS1#5&(;!|ptrBq;QHx#CG7du0-$flNtS!ntloSH+G>;x>kv~psm z@<%D=@@_b)?auSSe^_Ols1P2SE_ZGUpeX*JDKJR zRW{V>aV(c;-Hyvv4yx~XA7>@tyS^&KiokbXJ}pyVwoLEc;qJ+t%IWat?|bmXNWx>~ z9^NMlAGaM!8uoEM!#V0-%NSbRpXc2*TgcJpO09vym!XNz;exu&imgM<&M$Y`yHnap>%PaK z8X)ZL+mwb|cR9t_-uU`(^uz{hMq@Rq_W1fqWcK`a8zTX$> z&;NSrHfVYOuVteCujanL-Qq8IGugYk-p6?V-^_#ktO2D2mC#SpuOX0o`lgkb`>$<~ zpXc)DDIp?833$iv#^vMV{pU{OuiWwf8g&~+hgsc0JyBas=W2!pZlb{oupWpDgKAXL zc7A4ikzm|)22_;bkCd^z^wZKX!hsBlekR37#I&^6E7&QrwublaXpR?9`;dh7nrG)> zM=So$tC(+-9qG0jOJOEBuJ&k8CFKRKh9aqmpF-7lFjyTJF#xyjE3c*;#tA)PBA}9G zLnbxmF2Y)Hp24X@#$z%X<^f4T7a!V%4DmBx<-vhcf0cExladG0zY-kA<;1gUma)2- zNsD~G`LSO4*-U%LV}$@eNz6-2fQQnOR>i1;eMQI5bJZSG7-Vxta_nvtePchj3+Q>L zvj7oAol8p&7uEsfczlSW+t}z8r>U(AaPffO!u7WRqXvx}FltVrstrMW$t(4i_(r9( z4feg$VE0ZcofalA76nzU;k?O8gnzf;9K{mx4QZi6e@*l|ON4)2;qq@%MHql3atSwe z{E`MpEwo&e32VefQ{28fc2fk{03fJ2!^{j#^$W>Q&3q(aO!WdI`c*uWPPu7L3X_0_ z^$eX|aCb<&;#3JcNH&7-8}T<^U#G%(JiuqoBE;?lw^K@DN8Sbgc^%`fW|##lWiHUs zHXQrSy{RCRuc_(cK+;W<6y;}kd~H8rU3>5d*x?l;x7yIKDmjwmgWR54w`0_o#Zl2! z|B2uk%HSXwP9p38zZ@r}31TkgJa!}U=NV?v;GqgIKy0?a+aDz%Cj;2P38ppyxM$+N zzTvb0*KiQN^Kkz5wFC>jk!(YPmTk~PzV1bep%y~>W?B}@g6v|eM4Ql~t2jhy&pLI}QCj;yh!$B@)cuP*svw;%?VDswS)rDjaincJ>6cDZCz6pklI- zoEsXF4TW6)2VT%#udnuPEougIbMtI>9eTUx|uXwWz*p^VViMbD6^!uMJ1_)4o(r zcOVzL!-07SHu5jv#8)@<3&YnU09_k{b=iy*_7Ox0*+P>#0F$M^Q}2W!zAm6sJ%jtK zgqlhMmyI@@-k9qP*VZW)5MQ#1-MXZdzFQ+J(A>Q~K4o)yz|?0Pi9%)@+7102@X#+0 z(&Fkwn#YJSc*?N~mSW;>@$1$vKIhm2%JAPFUfu$|Ml>RBXmF0z*AW3|SdKs75h4Kh z?z;qBN*D94CyJJ~eEcekfshH-Gnk=X9T;8Ck80;dx*@OQ$i==$DJ9WNZ?0310+PWB zV-INGq)F<_GEirlpu<&oph0A!!1&6aFiJ@FD;AoamklRIZT5zA@**a%s!LZoP*3(q z=l)ZUocPhOS7)Cj_L?jmAwM9l-9eblkpsET@0`!TeY%8uv@Iykr@xm&3t}B;6OPWV zMtq>dw6*h5H`(K;u+cw1E-v3&5D$1E_KXTFWk9L@AGFgsvSFL4ya~>l_qqu6lhMLS zCRXI&BVe)@Y{qrpd{<{&Klk&%3nJW7_yC3C1}q)Nz7TJel%XE`K#$hPZlHVh^>>2X zk+<9e!0u9JOJ8Uqj9Y6~z#;1D#LPG!!#b&gpi`tCnniU-8&6N|mDPA3i=yU`|2rKQ z=!L5to`>pKEM0>?fMs=JW$iaGiNMG7;fP3--nRpSGKukf^i~n`>_IlY=1thQZF>uCuhCfjmdnJvSj*t_rPdPV78f6%uC;sSW?hlz8<3xrJ!%CHwnYJ@` zr^F5_YOUwBk;qlbHJgo#Bd9Vj6ZbpC8qO(SG5OtR;t2!a0cUw@y%!yGbw(4f*|46Ixvz-VtZAB27S9%r$SBE+>~U^=-CIF+if4#j3nnm z#Q+NI##AzVv~)$&l~G?N5vAc)8o}&{JDqrQr?rlU$@gHK`S7GfaPEs>r4`kUcAs1N zs9jZ3Zs_wIJ`~|Pd@eo$ZcBHq)e{5l<+ZX)< z;?0k|sE{}O;x5YJ;g~LeZ@FqRJB_3ArBmEd4YMkc0^E*bwMC#L7b#`;ZsrwQZnA?1 z(TwRNbGKG+e8cJGesl8`ee`v(Fkw4WFGbA%r346b{X!8GcHOetGRxeItm@9~iId?& z`j{!Ac9GE)^}mw@ixCS1(&iFj*GjqCDS`g+E6bO47RPA1RXLieR_kRTQSN!W7OiSs zje72=zis-unbigDFjT{OW-GJ1wnA(znG}r%*WRvk=Bt|Pg5M`j&=d0W;IAI4OOO0R z3QQP#OuK&Z1rDK8ss`32;ziC~cV*pncUvbPxHBwiR3+x-GX8{p;>g24gdV$Sr|Ktx zFQL3P9oW~iBwr0Z*vxPfp|&E#&X&?qRoYt$n@*aPWL&qN;^MMfiH;j|9Tn#hb&&Kj z@>rpq-&-JWWK(;ik$@!)2V7_CDtSQpz4v&YAJmi1X!@xGBC{?d?`^IVJacpAISl;pB8vu6>I5liE$h2h(fd=_u}E_B$N)+2vi1vEzfUJ)2z z4!_~DCUgFwtnu{lT$CKD%hrH2swR|jV}|YFb;aJHk{zGU z52sr||MSGD1-I;TCbD<;8kLn}Y?t6SFXih_EjdI|={4wvA~JT_f#oWtoA544hKXAC zIk0NAC2@3xO8J2ted(J?gcgPZeM7APxy4eKLG&;d?CoW>FqC>=?dxTfx-b<`q?x|Z zGgfi-@XZI~$8C?p>YgxloZ#7nXfTm4g+U$%nbTxT2GYPK{Xz!o%b7Ybv%!NU94`Za zbt+jed%j-JQn6cosX3KwwOFh9<+w(dwSz544FH6{q6Z3*H${=GV6piN(%*ZS{)c*k z)1AJ4;o<5?84#l4wvsl}s-eT`{2C`bn72*-x!7TaUGQIwxsagwW7uDQ0@Q9m8m`@n zru+p${i{>}+4m|oVfZaH-t7M_s(v~KH3IEBkR>ua<^eJJr#t$^shLn95#3A$fS`Xm z5J)+&<$jlP{Bt>@K&v)?DE$8cKI#9j8~!hs`~J`W+jPVCfr?V&@qg)t(>u_C0cYQV8ri{Bs<=05X2zXu`1{Os8KTpKc5i2WFoGs3d+7 z-9LSIg=zZq+ILYui_Jd~y%AZa0e?OV;HPW+e<$ht|LH-R-d5Ehh8{t9?L6U2uiBw} zasG9ak=(4>*~j!@)RzrMGBmnOjD)TP>Lb6f4AbtMtrI_*(C}-lm&gRkYtP zLq5UQ-3M*D9ahi&?YtoFM-}PkbVlE()bGjVl$_na7?h@zI=n<6#jInNY}sR4(8Hl4 zCp{pTH8aZ8Tes#(uexV8F51D)kq}wzR&gWA!COs7CM)@Kjl?xF%{Wk4{VI=v?>^o_ zNvd^gev(wXG1}Y4bUZzR-}b3t8=)xaiRMs5IG5W-jVR;ZFF3~4ML3(kmOuRZeq<?9(gjYzgJlVna)`x0H_il@g#6cP;fcp1J5E4BPFMyGBAk zpB28~G%Yx_IrfuVId{#+Jp$$x?uUM@<^9K;!dmUO&%qd!I**-V+y-9JQ(;n7@lBM? zD#zwnVB(AtkK2|I@kaVQw10Y@q`ix%O$9Z8E>izsQ@X}sE>>k25)}1CTKc#$H zmLj>ffJ^7)ChuxnvQyPrb9YMzW~YYbMEZvg3|CDq7E8{6JgF?)#-#zW^UH^!E%Z6J{+`N-uldo=P&qx3EiQDdUPMQWsX=trn z-Yb3n{Mp|9(x?1>?v)*NYtYprXRRZbwESYTy&|Wwv$bpiyzR^TBr%ToE~M^-SC|{Y z-qYVXg2GLyxO}Ghu30nXAs?-af!lo4kH!kE$TPB^<~C85U@>%l;exhTd-}4iuCjpF z)SNTqBQ#2b%m=L0wz4Ur^8|wagHieyzWfo!ZcIQZ!Y}e|zds$QcQVRm3T9&Fuwu3mM&>SbfFFS@0|D_u$l?q~Y8-u-djg<5} zJ)f)((O15z)jQ&pYVBtH_`~6o`bCyeimmqRVTEtstLw{$cD@e*8&$u!${*j6(1Mw` zQ1*tJ_jHs2kUg_37rFB(Y!35QYV1@;E4*~drknyQBCXr)ANpTnuaACoYOk#ej>X|! zorN~aGuk(IReOvVwONOT8If1VDjo0Ki*V}m--LNyyb|+>YHT1H+9oh+=$OE~eU|7j zf54lP()WY1_TxIff4V2va7=Rs#Jh~AdP!IdQS1q=H`Oe?b#Jwju%7AYy6!9LhIM*p zE-4i^X{GtRv|4IUoPlp7kJ9H?<*pgsvx&d5AtENyd5D3$Kc=`#0RKEy9l$5s(wKv z+$tNMPo8VoJW|tLE}@x7k0{3-U$5SC;+)g4C6+6Acko+@;hXuCEc3jmrOgU{j^RxC z+IN$cZKGNBF3ih`Y30*GhIR7$qWXPLdh2(el17c-YA;Xe?C!qWarV}^JyAaa9XG~x zkI^jD)tsOgUl05c+3s1}x?b%`O}+DeGwnJDUQF76W9r-E4(-y$o%LJZA>nX1bun4q z7vB2(*}Wb9&-YTVXDjL?NvwTPa_`;=pRuJ>__juT3A*+>fN+hZ`~=n z3y(xSRq1cS36_ij+W4=^haDDB>ai;9hg&bNAgCgxkL@~(>Nd2j;tG1D?j>E4N;G`= zaPUFGW`cXkOr4J5a_3!}Mr%BS6B=;~<0x}iwFv z;#3B0SQMf2l281$@JA5aX=caLDVwL7=az4il3eQgy*oSHl!xA5j7;b) zTdl30nnr8Qn;(p|Ro*_6-nKWL8ljpm|Ekspt#vodoU9*McB`^>PtUM}oDN|`E;Vy$ z#kEH*JJU&gzSuVyQT@b7M-JM#%j@)EpakR=tUW3-&NN3Au?H;Lr}eorv@c1^j)=r> ziqi_uE-_@5DELnfLf(zM4QAVi>Wu5ymipVAhg?cNm#%g#Z-GPBy4{rETNb0m!|SAr z!jBDvHQp`Nz0IwAH=iO_#P)flL}saCJzd&j$o{ zz}YAQYqYo=d=w-W_-FhvbM$tjAJGo09_C`(IeC(_-^>5)1(*OK&R`Yl3t z@AjRYSFZ}6vJbF)E;BLz`>MU+A^_CXv>vhd%m;K6`bub8YJB3Y2-f(}K3$1IYV(F! z?m^RDVHq9$7VLiGEkbWF$3!b>(D~(_-XRsFt9H5B8?t@=*xmd?$%3WY7J{BUX-%D` zcXn~!-78DuG`G*HWCex>BivOMyv5%sAU6NuHqr^fex(a+`21?lO-7th zwphR2*JaT*`puqs2i~I|T-xyk(n(}_1Ad=ZqUW}-{k|Q9SLEjx-q^f1Hu<`}`)Q(Y zzs<4$w^ODIru(b2f$0>jBu7p|^AtM1!divFscp@n+xpEeWvk@{P^7znp$Bu*wA1Q4 z-1W?Y4O`uIo7QY9WCG6xUk;ZQcaCr*nP#(08RwAq5u(L;g>CfoX`fl2A#$^zyy8^U z(3W^iFWdCQGA?VgVFSzXc!z^u?tW*d$XMhf{j*$|kag`(mAe$khiUFg2?fuE$0O@w zL-uW2c(Pn-eJp}Ibjo+L&q8Xsw6p3UE9_+_G%AXX6_VM8m!~2Ivx*>-?8<2=@7&L; z77VeJJNZYJ%ko6X@^(v%X7NOP->;cBed#}^0c*`@Wh zD}0Xn+2Iv59CztK&I!X$d6xF~EV45t<+z#53qQSax+O&%T#Pu^8}*HxG+$g%Pwmd8o_?9I0u`YtKN03&)r`0K5nWN6RZkLD4vti_1q6LSXMx6B;WW} z`=DYH(Fq+*7&^&)pe^xsIn+LX-HORg-7$rOCt~=nWxcGvN;UJU?e-`cN4n*_MODS{ zUZ=>}T;(My-@fu~LR_mj>VEN13KOpV1zp^(K2rAn$%(eniuGuK?q(*18zaGZh{q9m z$D4M%)&d+B^%=9QXQqIyx`@vV*7@D;>LrE6r&Kz9G$3S`d^&Y9*zGJnRCw;}hLx

yD57s612-=pyzLv&u`m|sj*)w=svDqhU&#-LXt*?6869PE?EV$ z3%66S;|&N}c@CY=>vg05eW~ky`Mp1mJ9WoeDsHf<>X)agkVRx7hh>g#Bt(ig!p~+Z z^TXSAbHf+LnIn%^dt)f_`pIlF5SkM1ecy)C7qftF7hSK^)ijz_jqMXkDY_~KLU zEeKQ*bz$dUqn{V1PYP2hhkMN?X5PZb0?1RkZU4$+dXqzqQr&G#Rre;`Bnt=hYpL^j`UvCEWH~p zr!Wwh*u)pl*FW;Rv@*bAUXmA~VeZXiZW9``cit0#F=#g7-dNj>+{@-&>5G0ug?bA3 z-$05M#fC7+r9~~j@Ku8(ZE8h-|If+R2tL!T-Q7dy#azpsGw&v6-h2bs{pC?LYu!<1 z{bpr;L!C<&B{aQKQOK>^$7cIJlC${CtXA!k=Hm+nBR<{j6qz2KRW~JQN)>6st*iIi zDS~T+IJn^EW+&Bfo9hoB{Z^W^HYHI+A5e8Mz$dLP?kp|dW4B{x*W7?_c}aNbjcF!a zW$&_O{r=1Ta@{!3RXgPFWYQy=K6JkN)(U{at^Wsi?-|w9zV3}G3Kj&cpi~tVr3=!l zBE9!sluqa!LO?)4QF`wkq=w!@5$RP*LJcUrh898#d1qK_-F?=1w|kur_l`5h{gOc- z%*^?J+VAP8ftCC97y-*;ZTRc>{q4F6bm4EGQ1m_-V3ItHqD>KTFlgtJJegmD==el7 zFyFzhw^9E{hhWj`BN2O3d{y~928aFIn7D9DfkUe{pT(#|2pHK_llpkz+P&|x)E3&P z$7Yc}ECm%WYvU0~`7f(=BbF3<<1|u0vEFQ@%PTV>m-emfSz&kVUC8{=7iHtnUB)52npxl@cF>6}U>8~>CsQMVa zf6`oG9q3+X^DCP~MLC^iV@PMc7~${|=Q*~N0MXA3Vd2vpc&<4^;lu@NMVp@Xk7A?( zFkcs-8eomK@f9L?;zaOv&&ryUmhpI#@>&$DQ81+Fp2)B>G-1+YY1_wgcr7v8(jWjA zxqnSadlNM}<=AfUTm63n2;{9mKf~x*1geDiY?5P{n8AKT!YV2iZaIaWjTV@Bx)YT+IEm{S;t$S{gLyaSgUX}`GJZpB{479Dlh#alYo(oVO2 zK)$a!y5pFhYtmH_?76P#mS6HDdo#hMcCA@gySkpcafAilK&rn`>{QtO%av-QyA@}Q z0#;}|?fD)m!lk)+ENuSm2EoNoQ-M$Ysr@qE`MF;&V=OLC8+UQjN8l1ZftNS)1+FsI zZC-tvF`JR$_cZQ)px^zcJo5=tZm(XEa!6uRb0atN3c&f`{9NeF@u720UEOLC2CI=< zP?w6Cz4ALvCdzdcs7+X5EtdFtJ};8~=ew*NSc6Nesb^fQz;;0 z{#@-~&r~gzX%$TwwtkyRhtt+^UesckR+~a6*5$)3pUcyQ9=8v^^&V=1N;V+rFq$Pp zlRgaXwW5K_43E<_bP_IfG1rGI+elCl));ZhpW%xYUHvrf&t0Osy0S`vklJ++qI4#8 z{#?|P+u@vR-9UeeGZiQ)NPVdQ1me_%;lGE~VX{U)BdU6PHi~SzfP(4a-t$_Wip`!= zg#Mf%oUhG#;~79H2V&mwZDXF_7FxN^Iaq&xPWQRg%`+16(Cxjy&5EWIjs(t)ft#Z9 zNqHtw^N+$Ln~1{&{4ICQL0em(^k6;AgO#!4fpqk^=15oXAuhkAvuX4s0yov=J>~1s z7X2n2CwcQZ#&ms1>%#lm#8FV09YwBr6w0HY;X{KY)2vW@d4B^?yz}qbS&5dQ|Dht^ z1Od!Wkgb$=i$G}eLinX3P|bZuh>s>Ityd0F;I~%q{XM~vnL^`D5&DMY`iBi6T-i5j z?89F0iVR{p*=M%(-2$EKCX9DNY-f)26wO!<;z+xW$oPQRR=wO(=t-^X@NxcLP3qu6 z^`}Y5ms1uTpEyk}lNy#kcTg1DQds!>74HTK^zDDJdJ8N{5TWSqNddQ&q~e*N$=kF! zKOBUa9S@gwubx{F9%|8CTP^-HW$j}w+-grg*0XI5LGh_R459NeX#be39i1lH^Bp(n zG$K%8Wd}!e9_eaegg1;WyTOTV4mkPUt9BOWJ82K?R)n`c7B<;{+%M=4d!bJ|(F>Iv zj0a;X_BV9NzrZ1_um|8Ii%dVV?;-#GIPv=Nw&6S4h&G-OpZB9PGwHC_lrLj%0(z-k zwZ9*I)09sYk$3Z!e0o7MwCS))9-yj*7OFN6buV>ep>Qo%E;(mAZ|fK;8(8!>+Q4X0 zodR@jd+w)S%}hi{B95&=xFaE_UD0x&^JgQA9*x4tMy=d#ofdq`OA~ICwK4yis2DOH zee!87;KGMur`ICgqc>~q^xY14!>RBU>Ax|#zB@NAUwYr2Yh9^*Q}Wv`I{tOCmvp-x z?dEg!uPU(Jw$7iN`b$ka{bi-wH4T`1Bkl?obehNAMU&Nx6)ATvFd4O2O$Y(pcz%sX<;4D>5Hey_)0!qF-eflOVxbeLp(UtYofH@ekJwaCM$` zDLY!Bdk@Fde&Z`0kUyc~{nb5lsJX%^312R$P!=-`;6@kOPj3mvHIleT1d1lV7@5vT zg*u!cHY#@@5TKkUc(^P7*4|kiWATt=sAMh&?SUO z5GUXWk_5|l-%1XLF6gXS*l!ytR_W=Y8@YG$t1W4YFmWQao71Ov!Uv|dbaHQc4u1Ho z9|AHnz~U;j?5^JFi*AaptX`ZEu$wq>@GS0lzq7VEep82{GK2M@Jx!(lUfp>w=pOTr z`(VYIyDm6pRW@2mgH&Vr-4hK6mlx^xT80jT2K*@Ue}DL*<5z*ybcbrW)hG$&3$LeT z79l5@FAO2D&JX?%4F@XA7FIDIReh?7J_ZDGJ56Vot>zGbjreb6dyn}9?0nJrQD#&@ zdg-WkP`0>U(bM2KcJOHSs^m9cW=S(5o;i>61e{2pn0*zjwYwsQHF@p1=hL5C?WbJ9 zF5o@b)P`Z+0Oe{3NfB|eg|_{tM$H*=J(}Y~(MC=F#_E?#J|vhUXOsLF!TPuBdmZ0q zhU`$D`WWFv_Fa60yoIgtn|OZnP=}Wv(S_;NWXTVO?&O(&h&Hi|b#{SCUf`R2NW965 zklFoio^_c2`Jo|Mv9&h4qJLZy{JC-yllHVAQ zr&pTr8f7jTrl&MXpy+Rdy@a0_LEIts!Q~t^5_(Je9++3erYdtxg@GW5KgsAS-6=d^ zd&5~osiZ$Yaeu9mEfC{I-g8hLdKoHG(eAkublg`6?eKh)Hp8^pBu>$D2)3*}3WW5Z z7g&Jx4GChZDQ^qYCxsr{jPFhN{X`f3*Y?i8i+S1!A!P(x#@q(UNQ~DhSHxVME~5KO z!3-06Q;{o-msbtc!S^cbl!9_utLV+^jraxO!bzL!)k}9s7%kaeQ?-u>se^T4>KGx3 z)W9;SB70LPDVK+7gOQOld3OI`1!c^8Y`qo!K~3PL~1Uc(xUXI z8G}g`O5a_)4g*QYScYT+4HGI9A2S6wG!ojeqB)IA7s^hnit=FQ{XYJ*N9eMnCmZrI ztEy?<%eZB3&WBa?l9clV$;8ipWm6Z_)u;Af+g|NVb&-XTxktc!0z+Yj0SzeQvPHOv ziGigz?zRJ^a5_pl(fEaZ2B&VD?%idKOb3f%cW=RDcE9#r)-HWjOr=TUD7JZqhfHkK5W#S9bdW}$Xm?(p# zU9b5b+xe~v{i>!uO&_@#%4R-ijevE)wNSi%z=>=)V5ZAbs^{@wL(l?fMm95 zir6aJ>%-bRiV0u zm(_`Rjq-KW$%QSV)!Dcn3<%7*#%7(hfI}2)ILm_!B zQYwPJE4zldC@5R|WKDAsVBKO<%2BU3lRZEc4f8+~Ya2yEA+udnAgQr&E36pYi@?JZ}>067tR z>u5|@utzFReJDl<7m!l@vh8=}ySTc>I+&*AE;UPCe|v8zg=Qa#)nA9`BSy*c2fFeT z(XIJ(c>oR6c{ZX#LaHqkk!+wY0b)iN;R{0n`+PgzNk5La48OD3z(mOVgUE>|D}l5r zv=NVaFMXg<0UH!G9u}0!d#h37dG%hF%$N-Iwa*wT8uwx)_U&yjOjyBBWvB=nT|;!foQNda47 zRZmayuM6!-e7dtk4y;2)S$&d|ndgUr3c1s94(ct8Ox=Ci`mDJeMVSF#KAjmLIx2Y- zP3^OR+l7uU3s&-La(gw=cU1Yt-gFPmas1*KtV6*osQBCT?)A*=Z857vb*tFqlD{AVU8asn%&I?j(B7Ty*zNoze@Vt@wM{JB zwpUL`QS#GVKO9roXqPPiYZY9XyNfB?hv$A)iHc~{WQE4YykP)c$6mzxJb#IUPGr8E zf$E}5#c2#;`jcf%u>%kNpyEI7tNxBfe7y)|ZV=1FmPU0b#HQ2vP#u-B)yL=<4ZBq? zWojvQ?a0u~9A}bu(zYoU*y?0oD(#p)y07ln+>$nFtfL0LrpLGf76;#eM$CXrH(vfe zxSp1C@orgVjsj2g@72SZqA)T7>di^BDdFUx+d z@Z9+>U-Y8IKYVT^N>ps8(uIN{m4WZK0gOMff~fdqX1$@9XXap#?#_AXz86mC{>!|w zf3HT;+Ak)Fr(CUmZc-rW|BuZtQox!5ld_cge_ZQ8i%(iy1G)?Tbu|C(r~BHz^GjiJ zRbKNN3Iu2uXwaJNEr%rJTRs7BtoHB8c6O;|U{c5ss-;|yfM!NNk1ced@(T3#uI?E= zpS@$$sR1SG`G+&VANCa>&TGD|YriuYe?3bBK^NJ0oxQZ~;<=3?SWo;P6I%b%`tWF+pKSQUKiN^aeMx+w7JYgwW6%fo@9M{N zX$c|>NMinpKxsY%KyUzg?QYphn_nx{r@f< zfEsklv)s6Bdkb(K_N+?qlUn}UBmVvEr>id>CFnA8x;gB4 zM9}}khcBzWZ~pe!Cf>2Cy6Q1q*oYkGB_p7M{ol*wUmRv7*Q`CCXHO+o<@rMEzYHY$ z>+gU2Z(kEYOFdbWw*Aj{4K%O-HCihzM*q0RAp%%)5?{zr`ww#U?wuqsll0R6iv9>N zDIcC9XSM8^A4g8qCuw*6hjh#z$1)Nm3TQ_Ma*wSE!5-#+RI0&O*iAse$@cmG(7f3! zdi8dC;Jhe#s_{P^m#D{;n$NHWAb>b(1aukvjfVf%+am@5EG_oEd!YNj!{OJ%8?GJ^O;gj`RgT$sdWGchzzg3(K#=aRAsg>}9s=k?G#*XD6BiarBunQzc7 z-uuP%y5F^n9{U)?(na-e8g}vY>nl5Or8h4yHKSo(10;{}=85#Gy+|{@f#_e1tpXSa z4|+9d^FlSEr{a_5MOtzx84|c}>HEay;JE^4WyhM!A&f)HPC&SH?*@s2 z0UK4-)B`uYAIW3o!5qFPAMn|XcoNgk?_op0s>JLhl6a}?$o z2*|_L6_fMui3ueMg5+tTvL(@K>nVt+hV?XqV$lpssO!2b#?2))y`_uOWc9tc<8;K@ z{1N=P`|H^4>?y*U#@OR~8D0SQyi`Q!nW~QUrN{qUU?rnI9Gwr-7=p6}{cWi5r9`gLIWo}R3($Ou_GJ=09@n%Y|FSKt% zCE__9;`E$*-N$Ff=a{vt^lg2tA`53+j6z0wpr${E?79>%w;Wa1PG$0_7MOj{Auu=7 zoTLCkqyc@mWP3AV)DJIOq}}%Yb651~)xog*CcpJi(HVe1$J}o}%iO5h+`Mu$w?C~j z6-l|aI(5jP=*lW!oAF|VQxKG$^r{;OppCK^k%+MhdNw5SfRm;U_snV+$f&0KQ&Px! zEcjj)*B?KuJAogcefj zwM!upnjz=(+ZmVil!x>P_Ki)a!!C&@MdnyQJD~>Llcg!)7wMhSxA8l-lqk<1TUQNO z92Zs%SRbz|KmsPs`K|!C2jTVy?h#eLVlSeko?CtpT?+R5kuh_)&c6HsW_>raBLL ztPM;yN{(Fr+S*PMzAtO`8Tzr>C0Mn3{quIg|ICqQ&ItQB2TO(ek(T2~g@Njx0 z;9cKy%hc)kl!!}9=Zk~8RePog^k$rnw;1s!@F(lb)iJ(MlF^AeITN&_2Vs^{Tp9wc zgC%uz5$iey07*gUUQJn;4ac#VSe4iQN$X_^jUJC|THq3@1pD+^kO1=lVZMWL163k5 z?)%MXp>yBxeq-7ZVGZ5aQztzo63aBTKgoCbw&MNKuvMEEg{&#;e46C|jJFc5A2q1% zv4h6)*CO-zaPwWHtrZ%_S8~09FX$uKzh|4ekjFn^UJq%Hv{HSy2DV9YvkC?t@9R0t zg$5EYf)`3V4qF%TiXzSHBESd2OMVe*F?fx%`8IqnvyAY5_r!?9yhO`jl|aNdVkFET{^cDLLPsAO$0Ke1_RZM_ zX;D++EQU{tqu(5g3)Ux;j{E>Ar;zKwXL&@qE(2^vV*cEhe^B(<`PXG<4%ySHb0m2| zEOMvOY_rTmDVG9i+b&CKG+yWWdeJu&=@!T9*ghNJS&jZ!8m;uU`z-x-U94Gnc%yt; zc&<%&O}g?iph~Oyc`ES_Sq6MX7-3F~#;|B!Oh2~^{&$QTcohd;ed_+{Vg%}ec1Do> z80D?IBlf!n&9C;{7%dC6))PI4rYNan`V!n4mmH9t?O61_W~ZoPnC znuB7?t7@2}v3EfIMod55Zvs!#-@Fh3`yGTHT`D`ECjJkfI{Wu-*=GH97GRCw|G;M* z6!G1MbK4PJ8(;k&fcu+x2kR+A{6BdN?=%4iOVi)<_mA_(b4uZ7-ee@1{9}N|0AQN` zf&YJB8Ti~M$B2KeJ;1Z-Suo}AFYbVxcyei-vF7hi9Joc_*MyWe`&gpm>v<0ayZ(`w zjIZzo3UYjtx|0#Wu$7$n3nhR5+JAE^Tu)}1chqWvALt5E{x|Qg@<|D_T-N|urs=6y z4bP|5HWYlQt@Zb8+n((GL)r)|BdGu|wSwh}vrB!M>A?B%S|}5G44kIBx<-8uuEuan z@ne6~)r&%D}k&2{Y-)qIGEO$;Pih~tfWrq_D!Jx9-; zuiZsTBC;WpBIn2Pp{PMr^dzzOlS0-!aSmjGk_P|H0TsB3oUKi-bHmn`W z`YG?I(<9~4TKHq9E|1>PrW^DJA)=DcAz)2P=u0uE zD7tK?Ax`*Ju>eTI0+P-|Yf!7&h&@K9PeX-0f|4NL;|4u7{v@QR zD{x=qHtxVsn434X8f~D=rwcNrvk$NM0)XqnGJSz-_s5qcUbQ`hZNTFz!3p#*-A8*$ zyrIqOSH!Yk2KJy5hWnkJHi|bm^AH&^~Ml!@p70+ z8Cx}W(f~^_qOtnp+@_G|t9XW1E9%k8IrR?a{g%|Ae9$lW__wjqBQI9flSvQ*u*3e1LdGs$Blq}8h8y!EB9`>M(_ zzr%vDwX~s%%eb z?omYB82|ATH(?mx@#i!1f~Tbi@gI3=7S*7AIC7y^28cwbkza&cUEiMZDQCR>rDA^{ z<&AyhfoYKDsK^tJc&j9q+&lR5+q%Q}+LP{bD#Pee{?r@zQ73?8Io<&z3vUeCZdiPE z%V)fMFSoz8s)at5^ep7Nz-E-mDpX<}nB+E6<3|h%&Z5&REuY1X|D|BQ5dif~ZQApE za*5{g04sdG;cJ^5Xi>WVWomMTE@zY=7^`u!wFix-qqZG z%YK2o+VZR;080Z$zz@ie`jSVs#-CJFswL>D+LR5hQj;RtcXQ3n34LZ_xc_HL+;^Qo zeEC5<)(C&Bqija7*WO$6?MmW`DbH2h2j6 z$@auO7~*g(k)z|%vgR%3wS+BJE{VFC4R_HentV_5r%T-zU#RB)(i3EJZ1S0ON|MePge@9f0D8ePmhw7$&QjiD1x6qeZZ zkGs9cORJe&NY|4FC;!IdR&&8LIxLr>iYX*EVd2tA7 z)Fiv+Gqwi;s5cysda$x87;-FH>Ii32I^Z)>YXs=o`%0e(R_u^R(SniQCwnn| z61rvWW>C+@feD1?ak1i`*$yD{HPwa`^7NtEFE+&k%BM=RKSrUQ0s5!2Dw#y5!utU$ zgo}55c#h962P)b|pg^J(s{WSg6+6+rj8ww=cn7kYPM~pMb|lyPwCuDK5kHR6?@){a zDI`8|JM}2{!fcbIr&|tFDNu1qogM{qF4W|~TKi64LK;uEbhPQ%Wl+XG1AK44c%1}u zmYvSVAh~sGnXey4&lHDKpgUTOjz&LEsuob9siKo3i9>1WrXKB{kWs#{sdp1!tYw*U zKs!KA(<~;j)B0?m311Gc=<`!7bLA=m3^2nynZjsnCT{fR1JNreP@C1P6HeEk%9I)S zA^<*ll~{P@V2l57u5LOVLAR$i_kIP~XQY$Z=Kbi*jm6%1I_d(|LJ015xI1jZRaVVU-84{5EmcO_zdy%#id8 zSXr-vaaJ~krwPdFELBy1`o0M*A=|QcINYc!krWYtba~;tA90qLcuibUUFBg21oF>t z<`Ih#&%2hrO^-+wp>zIgkzS2nYX*lyL!%Q+CDoFK#U~qPBLx2YgbR`8JfG3CJu!ai z@>uVOhI&H};8q-(WmNEisdBl~!*8t%2YK}K3`g0EiA}nbOR;eP*&eW2u$N*Z#@`y| zmt`B(zSd+Og)~)Eh>^Y~G!eSzVzpJJ?;iud^^+g_1}c4_qGetufG}saF+J%O(^bL2 z^y$rEr|pGH{I3N}_Ttxtj=}{D!kYNNX89cwsRAYE0oe!>OS_GLy*1b^%x?)@b_6hn zdAwhC_qN29hLI(gp>oLb)4FdGcN0Wg|C$AmdC+f6C8#UT#;V^aW&j6g;c5;p-xFaz zhK0D<3CutqAHPABH|fz@y^paGsTZE?By2HxsSY~1y>w)AZ2Mz2+s0?b;nqC8`9;e5 zwx@1e2M#G6+fNs9T}qWfUZ-<$CHV3gK>w4UtKX~n^ovAF2il%0RIe_a792j8)1iU7 zN|?$TFYuCfXomEvv@}mrA6zLriln1~waqHcxT90U4fOQ1HJAGXWe(4plr-KH`(Ptu zy@U*G_836x)NZdYAG0i(2?g*#u=ajS(sGQTJCJ=+PP2N=G6xV|-5b7KQ!8SFi?ajte4O?v^4o zkh$Z}?1I$r$#sF8v_ayOEho4?MdtIBYacG6!ei?!=^Ya>&Fi8Vy%o%q3eL;^`4f zxl!@z($&HmoT-oTULJn~_6`5zYiDTWzK`#^zTM8iXCc6xw-dTrhMD&%;MmDq6IaVg zOXDvh`XR|XTpKHQPoDP#Jc4a4J8A*EZi()XQPvQA~KQ z62Lv$mA`R}e&T6t(OFraGX5}jZL7pVcSES$Ary6&V#U6&L;haM(JQlEKG;_$4xNIo zX@0s$p7@U43Duot>DGpV&vk;5p{6jZAA zJD$oQqq@>ZkjDM(z1x9T{hfMcDo@A?O*!ZI6BoPiF820U&rTI%c1}9(zqy46Us(j? zUY>#bW(_~nk*JUL`);{@R(A26#RFwUBdB0SB1-K^m#Q!m67!~$=G6vQd7dQL&$7(7 zeHU!vb?I495ar?vIaG2!Cx!R&h=yyc(2X&+?8zW0H&B`}=5ou7S;L`;t^&{PCWAF0 z$lk1V;s-d}@QPK6&Jqs$xFWo`WF}w};*f}XOr_7*5B?=?a{%LJ*IUzF+!I2FqKpL3 zP+uo7P^ni5@8)9fY!O8bdGy_#vrCRMNFAB6vt(+X4SlP+*HJr=B*5f+!DoH`r{i|% ztv$)wq8%=*->(hngPQ(W3zi#-Alf_i7Mg2f|JojPN7Z4!@!F*q?B?giP)Z;w7X^8{H23}Y>5T{&dW#hYjRB+1`+zj5 z6qdwt<)bAMOIDQ<1RcYY3enYXAT|ee&Sh-Ea4$TTi0~y!R>s)Q~1l zAAGcy;NJBHKCmu=pj<2M!W-~`)X97&V~jy8Y;t@@4!gZgZ>|t|D>w97W7O4*{zPs) z;S3?U!f;J~Z82uhZFa}47HEa^v~JdJ=x%isjzQMp|FHyCzPo0jk%`V)yP7V@Cg-EP zeWEY7ysqwtP-m)sZCKg*k#6v5#j6}k(-}~XG&0Gfl#9*q4H*afhff4ZTN_A%m`@|P z+p7ZPJU>UsxcZC;nw6lCot{Ax9+aFF+p`kdrzHC!6{+|75w*{x&zyq~S4?mkT3X)g zKxT%E*{JBy&r*W6XwvU^AAK_5dBFqM18aFaiS%4D_Jn; zkd7f4E;vN-MGj>QJ1G8PG&cK@@16}+V*NM2m=+U}9Q`~Y3O~!a%?mB+e5tz4!=A(! z0b7y$WVP>*0_pnR-KU`f!OV_XZ*SBk&)Izz*u-W5RiJW&Cl;EwNe(1!5=-P5R(o)W z0C+ULZsu@_WkvpMb=Ul3_AmrY#bEbZHZ>Hypq0~r6vd8>d8?JTYqqS_n)1B_zx1As z+S1crd$?%dn|oQ;KqxJf#CmVzZKkl-;YPVwixf)26$@K;xmD@*s>XNpd1PSrVp-$% z4qtdnr?W7(&iCkDPb%MRgx%neXEM<;hdyPumzvc2sqrJUJ2j0;xZjo1Iv^`vf!9Tf zW;~o86X#>i;&I0dfJ;nwIL)CcysuLgP;Llwu1a!=3n|GWG23_}_Q@MPlsUiN+Hp`O z$CHFETA%yGHFVVLQLf^lV{9qV-?Zk2)AQ9j#?Gepr#?m`3CkNmXR?b*_XsB<_X6B# zqbVUR;7(_MOH|#SV-*oU&ZWy5ODZhJEajEO&9A_7(+=G8W(Q%H@W9o}PKM_;rucKY z;9Dg~_cjQXl!@k(YdmhO-N9l8q9cV!Lj6%!j#rj^7iyOXH&$IuS2%Fo@T~&qeKTO+ z^%*evGjXD7hhHpSG~#cWk2IlR!v5mJg;BXC6(}S;Y~3@ffSSB)p=UVgIH|6^1#dqo z?GK4$84opEqF%whdNZkT7x{Gm!xLS8m`BCf^=jn&#+6z9RGV?Xgvb}maZK8W$U}Y~ zHloSX1r`{L(`mzy6-u}?Mx7<%?!B|VnABF@Y-g`9?6pYN7thD`@SSMim22}Z&?tnU zK%;BJ`bT5uw!aBVmRw!za>82&I1s{w9|^oWq7O?GRYQHVO?(5BvIJ~<*c)yS zY;E}*S@KUBTQ2?YmA-uJ!)q{~+)&D_B0?wJ0khu5EbcbTRrrsHLEu(u3Q8${BzHB`iY4#;-Ha6s|ASvSKTbSkN zevqlt7ktGp;0F@z)M@5}QsSfL@V-tI>Nt0B!{)`uTMvO&kVW%dY1%uNZ`*b|jS$*- zS6S~`c?>_bnRgofG)`sVQN1xxR)C4n#ZZU2_#P%Q51bNdAFCFtU{je#4^g3aj`0bQ z3oJ%XPnuqW={$$&3k<^hw2MC^A3+8e1^07!Xr4<*kZoV8Y=D~UvWmBkt8fo7nqHt0 zxCCs3W#vLSSM7!y*ab(<*vH|7woh_kd_I>B`+82_Wpi9tC}~rd0AqN1l;gd!%znGcU{faEE}z(v8bGU+L=oCgnc;SaUjI z_kmvQq3_?SG!9tpD9fE&+C+FR!7j(p9{^2VCN^3Xyj=nAR0U3tt%vz;4Rt-qhenM= z^5af-CXzI&g-g>17~PjvVDx1b{r&<1di)I#65>DVzyJW0=LGG;Q- zP%ZNuYOPI&bph9da(#oE3nM*TE`T4&P;& zCGedi25w1}0RD?P4*s2*fUCyPaKPs2Frw}R#x{4tyV8fy5t!JbMSA_(3riB@ptjyy z1ohBgIu10bbQykok)x>kW%{#CGPRYHB3k6TEW!oG$J*Mk))lSV4apu|0w>+JqJbwH zG%}(b#pIK~9%LP(v|?~}!=kqSAO3<&6I&40ErDA!DDu}A%IgetlkEWv$a@Fr@Bm%; zhc?6ESFPh3zy@QClb9o)YYGo>Z|9Z&6nXYXig>9UBj8rtR_3?+`FUdSakBT}x2h5L zJ?^?No@v15Aagm~x=Vy5#dkLBm!+Rk_O~C>WNDG)$l^6X?#i;dD*6TwjfhK)+~~|p z>$()@Zt^fMCI4~c3^nsL58wUvM`v-qc_tubu&xX&IAL%DYF5*#E=rJHxilO!D#nq| zGo!VpA7@@h1?S8&)b+n~yv*>5&IW*~sD0qq9yU?-3loHDTD|vxcmWfH2wlP{UNGuOtQwwIR2T`9YU~( zmcg(wk%SWsVllbQ;0Dx+?Y)dl_KApg(5|8MQa~AAat(ZC4S8NR+WSbFMF)#IH<7k_ z{P;1)xwfuT@okuR(4fZI_Qb#~{)=<^Z4z_YajAkR66fCOfWVc z@<|yQ%RJ+bMy1f+W5XRXglAd5X)?-=HQxV^EKAr8sZCw0C$jj^tp*`;4aK#qn;Yxl zvtf^=4_=e#z7;E(Pb?(NyqhXKha$PRPQxedua{f>Wt9{4l|<%;J-32b z^joetg)-j|1gCxi%dxrOMo3F7V zPxt^=`wlU+h|B9`v(3rS;>ZEm!q#gL4VN8)binJQ1F2by6zIp2e6*moxn|0Zaii(n z_PZ*sKANFl_WVq>t2Tn-pM+i}>j4U5J~{T^Fec`=ew>Q&W9(G`VjFSIqI#H`#mBC7 zK`F|$dJK`p7HVYgNm73HU*$vMma%WPkZp@Co->~!+fEI|N%%lwzfco~N64s?ROb0F z<@g*h`xXHSI-~)z=?=hdDvnEr*+RckbgFe1Ei+@${2Lu8fs@^f6Z`0vkLqTqDdI}F zeboqwv&6NliqVnB*JzTAk1H0H8xJ4Q4SGDHND|~KU$cuj1WW3)f+qvd(jJ38K;i|< z^9WcSuyr|!U5Gl&tHL-|uhFSFk)}T(iKPXW z*6|Yqi|5KYy`Wp^s=1~l5<1(*0)&9`O@4TN+lPaWr3znzA;ZM(Xz!mSFewtzkK%U? zl7F+aMu8Rj*+0AyK*rdBlBoMxSlzx7P!4zJo{>x!;(~RsEjrfuhG6bS4=SBx-Q%a5 zXUCzg$X3K=Wye;pN00ipBgvcpP??YDfsExnNk?G8mKcHjM8;Ox+_ziJx}VE&C-Q;o zKcwd)WjH$y0yn1iR?lDQzZAZqr9WNlg$qseV$%(5zGr;$QelS3^wgw$-@wa#pt1G|biPB7?vd{xujcc`C;#UR{2se<}ajk`Kglr36A$<-q1$)ALQUdil zX1VaF@QSZ*-KImI>@VxbiIUHtkzMdd2DW{^;73$tT)ZfahNRcrn^!MAGXJtagCN|k zLp{cYwznXTJ!ygDAXKmcy6f|2<*E{r02L?DqA3be++Db&(GvpEZ$!sZFU|C0-hN@g z3zZ7h@(5pu-u1X!JH877-*eElI8z+9y}`2e@tC_nyRNVNS)Qm2@5B?dS^?pEy;&Om zrB@@NGEMrWs;4`W13jq-49&<6?ewDC!S-+>tU#rQf{yC6%s!Fm9%4oCI^;32p6$@5 zZrF+nNfs%n+lQ3EjyBuibV3Z&7Vx4c+PWrTmI7K%O2yLRaXT=-ZR=ei_HG=gxZj+o zk&$fGqo0RxdSj{v;%v&h)cHKURS!2vS!(2zG*W7udSV;x(-_8kT#EN0uHU}1R*Vb_myQ?8_Ca}ebyU+9Bl>Hi37m|rX zC}R_do?uhP8mpPOsx9hrO+2w5oZ53K4$ms9!`_J=BpUf}>Yp{Yo-VeX0`@YTz@KVu z0hi1pOT_hl>D)vH?BMj&yl1{$5*|bd-8Z|Evf*7M>zyOCQGEDwcPB5EEs{=f1=VN4 z&V2>TCwbtnLjL7=3#VA7NGx}_;rt#PbeHn#+m+9G>E!0Sw?0E65Fi+fyP3ow*96~)eayF zESK<-E=uj|TLUoeXQwAb-if|-3Z!0r-z2>;GV;c{V5}m*y(apS;f`MM2P9b zjH$HXfT3sZ*ebLk*gd!beuox`3kA7#Wq@Ok7Y}5XBG>F5V90BCh#T8+U1^sG&L~1& zQy7X2josh&oSGQbX&slbN#j%w%n)%15Rt-e%s^w4Qw+-KFClv3%O%=`(0?TQuZyK^ z`Ti(3+hpEp82O{@#!9LgSdg%qM1FVzDfsTEg)|xPE))xxXdSvuA`v{~K_fw8d7W6x z7LIg)nq5)LaosxE1rpp{nf!s_)tYHo-hDcL!Sw-`jH7W%B@jF<8rxi{;;*J?=dNyj zFlKbrd2iU{BauqMgK@7*bWOXDJu>$_1z!ceu^DRK+5vXzDe@;-j(Cy6=N{Bf&?cpkGf59mxioZK_Fzb(-S=mxou! zK6b~CcCHD^%g>n`NK)?DXE{92>BfdJ#gxI(69Sc9fx*&!Ki=%>jDf!`rs!nD9;WJG ztmes58s*&ag;Vp-*smjmiak71qRx+br4>hBP>R+Htl1|SUTCcrfjYTjkdrX@f>nt_ z(bVnxm>DgLsdpJ9s)_Y_^enw*tFXSfX!SmgN~d>`UjnTV%Tu z@C%X5C5H4eaeq$;WGVV2;avk4GU={4i#e=e4-OKLGXgP}D7h!6GLrRPv;_Kp6_6Fs~EjT@p zqmm2tf(>ct7@_3@LqqhfLGgz`Xe00Z_x7Y?X0N#~gM(kkT}x7mQ-yFPYM}`0$=5~I z9w|t-S9>L;%9CIOcABp*FKsU^;Dpr@4xT&6YlUf-;+vcHbkoN0{EQVA|6~rZcGq{} z>CkIzK-ILK&x&#>ilmIj%9oVFXZ~Q>;9(1-eHk+Q5Y|*8YBn{J3jSQvQL=F$5+6!F ziFZn}@09%T4Onwiv@2VJ-(u~GvgM`f|dqMr-7dLg70&$_Q+UKbfX9DbCzYe5#%9pA-=L+0MVVtW^&4%C*@WcP1j_u*`E`!3kT3y0%gJrUE(9@*2MX&c=~xDcYf=*G#~nc_xdvB z)R@~W$X<+hCP!3Qxr1bpM$mOcc*O-hk+DlR0Fa8Zy3@vI>JfLUiCUTHdiDgth)y7U z@?r!eYbEniuDAQie&(99!9Y>C&@E;vkp6-42G?_C2pPPIDbgv)Q_I6TBmgSXpy9g4 zUeaRXp#>WLx<^i9)#mA_0|d=!`vWZcV%C$IHsZE-LTYcBB<)w7#Z;t;Zy(ob_JUHh zFN&1=BXXWX-x}g$<(7eO4H<~@z^j5po0=K@%2xHwSm~r)NDb)~pH@@JE==Ep8<$%=>8(1Q+sxoijU3Z3S6B@)yYgMPwuAfzGN?*C58gNdyqNd-dj9=sB_!V zE*$l|kfROoK&3niHX zxMN*70zgaAr7Z`sh0n4ADq_VQTop46!iPC#9*|dJi|sgI;)6~b@%KeK%^5;&^mpXQ^=VFwqt=Z{r-CR#x#yK$-S)r@9~@pgkd0fpCY zmw$ZhuLOezt8gbn0(CD4+oB7Uyx0Q*bLn0{`_?GL$m+uuN@_DSR$SZ`xa(crayD1i zCqM|G#7ln=;euGqtODUCH1~6hf36%8uQ^O1q)71!rfL-cVfMl6dU=XYz!7YhbD(PiGUrF4JIS3d5<3?+l_n6O6Ouyd9{oApJ6P- z*ldtY2<$=I9&26`4!rQ3L!QcYNygB}J@{lVz9?Qk0`1|mw}-_z&I;RoPY4Tne1(Fd z3weIsnDCsTt&Cd_6iL)W(n>#Zsx-K<|5SrA<7Gc!5VL7NP`56g#uYm0jn93FdF5zC zhxa2qeBT;itgs2OC*zEQ9l$Y@8`S;UuoRin-WsI+ko9KbVEHZ$Wgl&u6c$$9SuJ;x z+N=3x0}rxJ2z6W@JqFJwG9S1P1nz>SY{kH*>tee*LTXmW_}KQ6OMYAyWxqFImgn_D z#%~46JRmv3t@hsIcia2Y5MrK5IWG8PU#iYm;eiXPU<+zNZvpzgDbKxdy+sOo`3x*N z0<98}<&S%M=y$MJsc&vj^Wv9i|8teIJ}=9~dfup8~T?W^qU)XqZTUIb@}DT07;w~*=} z2d40U<+}dcztYvt3^i{iP3*Rdn_{xV9HLw=B*YKIlWGuOce!vk*g~BF{7ckpu1?@7 z6RD@JhKAnGScck}50z!6!(K=q^@Kay?qQzS2xL1!LP|!g)TUk&V(~`~>;GH;;EIDu z;7-1erT38WmWVg(Vttb=<2aAkJ1_y~+=jjO(hq;uLtM-uLU`%M{*#1cS<=%KZnZy_ zoJH28FL`Z|wt%u$U$iXJ)8w_N5cFp=p?Vp`10ZqQ#hRF@nEsr^JXo+Z?bKxAsgN7S zFaxW?Ih+H!!F>PGQHRi<<;7I?C zX_`oq(Csk6#fb40`9cE&)KYH0JxT?+qGCEfGLyIw2>Z2(AU4N1DG7o4*qJZ)*T!&!-Bg`~yx~_B$ZK1qs>@?g4xC zSF%XV?vx~yBLJ!^akl`N+y4iGiEE=yQ`nd#i`V;PmZpU@#k&3A-~#C07astvVpQ2N z(D=j+;7TgpoAS5``|~jX|0)n{_&4d0wLDMADkD>2ZOi}SGJn2i&53ITkjGeUD-|M2 z<&r8BcGPdNTzW{|L5CiId*#ncYLNkqVmb)`U#MgYChlm3A|&r&ItxkJh;40voMO1p z`=CJ~=hi%n4}cOQi#2biA`{IoJVEP;r7`_+<;x5~hkiTbEi+E(H+0MI1pgg#V49Y` zE*u2my_*i;UoLWZdeMiRRhf5}<{$(w0%Tz%f|*OvZ^$nxkzR_J3)Rdcfg63%a~Rjl zo6ngbioC&TU56gusvPO%a);KXrK1vZu0cizn(3l5>HH zg0u#|*=9$P0`j7EBJz$3({vr>*dv%>JgBzv^qJ`h;;`STp4%OMfbYn|w!e#0?8SWm zfHnM*oL7nGf=~bnljY1CodH0))YMOZCkot~TL84?xw^w^0-o+vEN9>7As5$f27UeH zVX9QD!8sKI-i+xIRSDb;@1W7x=Q=&rceWtc2_V8C)Gv>aJbFnR;8lX=U|k{$rNN+0 zfN|li1aHDh1_@&?N#qRViUPEKKdVyo{rYZeE!VeL#T`#~3BaQB?)0}a62$&V!08o% zN$K%b0kBiellc?$aV;_LfyujwU5PmbyugtMOrC9?_wbA9vWk#O#z_~9#u+x^3Koq3 zJQtL<^bCnD5GS@JJEDRtu|0{uQw`tJahY63F7Fwc z12Xa#9OpCd=feP020N@@HjiCHB9@4EdrZK)c`tBya-M#7DZ-8VeI{d8E>&9rO`v-+ zgnAfic35=E1&vI$&mPmcN??;N@dp4(#giMYA7L^J6Qa8`2?E#k2X zU`UhSkrTJP3V}#KkBPYpm-PwZB}#6W&66DfPDfeh_P1A0b#p({>$$0h8(U{J6Hubp z#&;guJm3%}$XNgZy6L6zl|Z&Mon*plunpSw06Ap=0OGV`#AN_J8}#A~L01KCUIU<$ zdug;Px;%hx2hf2xPIkED-I~uO4_r1{N51pRFVAV)n<&<82a?1pA3^Mh=&8s6sK>Z< z4K&WavWe}S%y@ookPaIJV7^y$=_z0ie*wVMY7PJ><&BsXqP~wJz%&sb=k&-(I&cvM zXM~pY!6Xus8jHJr;gCMFBk9BU^QYOmmRB@EgzcFVaJXv&nA&P>vuV83+IsraMNH(O?#sY=~3EHNAK1 zEu}8B?EDIAhyg9#YGAe}&Z|@TGcK_ZHj@W;!K>Y3egaU;i;-Y~gqS008Apn=p8*WI zf|e{+LCA9Q@(2@63rCaeCo>@clt|fg`4geJ?&Gu}pc?izwPR!&)H=&Y#qXdueqtOp z#wuKTmzai^-f$Wb-863Po^1_4M1O^@eIENEP;0}XgP2*#1OYbq{KHm?^Si)W_C=p- z^^@#m%&YgO&wUYNJ4W|!yY#HWJAk~RSYTgXGZDH#2)yXL41iuAw=RPUk6XK0^(x_4 z*l!q+=hTS01Nwwc&xIIjX9ci?Z>PETN(41O&6O*T5)|5}O8|o&rzqThbe2S&CWg88 z<{e}25(#7c=qb};Ed~j(FH0SmE4b8KTE~x_Q^a)h{ss9)Dgco0puZsBpyjt6dT`|& zc0dx;{|)lZBowlvz3kCN)FUR7=ZgD_@GrL7-CdqJrMuh~x8$@2L}=dEgM$EtyhQbL zl9C7!NBc3o`u1*@ge>l2Tz{{gTQKFgy0)Tl$2Baw^73VK%V#X8)miy6HiGcW8lSXR zs%9J;4Kj=1t0?o=tZWv~wal zREq_W6hFX}956_}=X&-t7}Xr3F@a;T4=9ki2)8|61=VguZVSqW*|&QT1=Ik*3C6|- zQD0J0W7-RtBNSLcR95fsSnHMrd_F-+cklr(EH&o1*^j3{u0^2j!BTZ<7ZT^6u;&P6q(S?y_~I?tzzypQd=dKO0Ka^FiS~P1Gv@PkZa43gP!;9|4=5 z<2EGJO8rz8NNPie(36VeEdZxg`PzFg|1mIyOwsp}`i^Oq*DrIVe*VJv88SdJ^71ce z(I5a|ZUpt-bpOK*Cz$@G-zrXSaw&cHgtg>{# zUwwb{lnAo~eqT0y4p9B7eiu0Y+l&9>zk{sPJUc@{{}GgM5hm+npDglHeErHlmI*o$ z$O)#E^qc~~?53lOi#NbE-PX1B- zu224fV4q9@f^Eah?f!ukvK<6SdPv$Sf_3%MAowd3N3F_C#w$Ap`k;P~Gl-yU72aD({v4g;1<+p7SpBh*N8Q zf}UeS72%8pTy#l-`|}$?5D1(*6-9vlFOB?lqV~ElupL5`W0vC2>zQ;c2LJV66WG!D z=uew^%@KP?Z|G^)3;#>QhY`hnMH4*vkRwG@om`qA{Fv)98bVTkq~&v9LaX1V{hKKf z2EA>Ve*R>~yPZXD-|+D}?!7aOTn-(n^Zwvz0X?f%n;w&&OM$`>lfR$YB@PyVzm{(N z92b5klYqnmWi|k_8~FcSvm2)f4pW1|pGTI{c4mn3sqkSwnejUwiB2NkmbzC$w7k$7 zJanwlyf7x0r{Q0dCk?E-S-9D~+e9T~u646o^Xf~xoKQDB6--@-upc40Q zu=ezr{4j2mGxky*ZqEGBwBGqCISnzquRe>CmJ156)itgbkzE{GjT;m(fsJ~#;^9$s zb@qbm8QZ%(b*om;RZHi3X_kBll&9QeKn{C=bZ_=tnw8@ozBp-d->y9FOzO?Gvpvfv zxpCZO`*C>pM%vg1HYPw>a+91rZ>{h<@j}%HedX-<9|K(0O6JJNP2>}G@s+)vyF->f z{MaRd@6M<>pV}P!FroPKE|6Jzb@^bF@0}NgwK242XxqelinrfY)CfK^qEl%YhAbEI z%;AyAC-YqPnK@UskLS`V-XP?J{Yn)M6cCHk{74$4%YCQezaFWDdX{?j<&oS=xfT}6 zWl;|&Zhb+C?Yd<@<0SNPyxW>h^tQ}6ZLNm){Pbn+BwVxY6h>%&Kq_5BkSVWm#*lro zplf{%`Y4`G(Wkwm6rt^zaTH3Sq@FBe1fuV05kpl&)~~FZp}PkfiV!ZQY(09YTX>3w zX52iTY7y2W&-MC#Rj#YZ688DYj@EnYL$gQu5P#*>V-}NPPt5`6BL!S@cXjiu8wUGK zwrm6qzwju;RTSH3{>}CvW#Q!hG?RX{gNto+tmX=!4-dg@ljB=iR!gW8s ziUsrsp)di1Y`DLP4Rq^Wb&&4(5jR-@&el}hDSE&zjt^j=14 zEc>h(LhqkrIaN8F19}pt(Oy!jYKeG!rG>Zr>~FN`C+*J$nS3AQDI)=$I+&*`UFqVc zq}w*`&IkV})_aFWas`H+Se{w#Qw z8a~>eMR3%Jm4M6L4WA_X2L`EEW-6LK-?!O)I*izer(++d^x%$^FQtARR8(lOhdFs& zJ7|_iO64n7ng@L&%WR}2BC)jFEL#sb$-lbfLTen#_G__c(gY%cN<)gZJdtNh>eb>8 zS!_x11*H;|)u>^M(dZ93*5jskXL+Jd9zSI@^41^Q5jkZvqVTo3b2qYzo?*zX`@29U z&62lm_l2NyLyiu3apb_@iVXDB4htebUI!5%!|N#JZP>x_AeZvm`TDXI68NUCK-^AtCA)j^QB!DTa3=sRfgVuG2%xJaHaAIS%+!wsUpg?lc|keD+CnzOCr1 z0qQ#fgLXcOBzY>cJc(XUad4HhnoxCJ@16GB;VCUBcWJUto({($+4y zSC%huqiE0o<;88ihmn5I*^&k=j$f^a|0H4ws;}n}Qgt$%g910fraP)g3)IbdiC!$& zDx_DyQmt3ga%MDu<~L?M_uNSk>uk(NOKA6&6PVsQT4$(oH55Ts*i%t`teDMr7pGpQ zh9^e}MDs+S6i13YelFl>*DzSYn7V+ zs{Gw~E;Zr|QsoD_F{7!+7UCWc1y)7C+Ua~{_?SBJW|UNPY7(pVMi)xBnOso8s{Epw z@n!uegTB3viO|R*Zhk2lM=|&t+r7~Bt|Wib0!K9IxcCe|{jsyo6Wa9`6XbDYBen)p z4Cz&e0er5WUgaYadYDaQ%NxTWU00_TJ((`*{SgMm13^&TQjjyOzaaZloPm)=5mi7MZ z_Obff8eZWNY+vZMhbdL$Y>}G3C1rmKbD=zEM&zf$lY8i1#&dB7I~E2D7$Ln%KWceAa(7W$A;yv#bMmG4iHx zS|8Yuh*njOg)uyuQjCmn^7@j~;^outc8i4PlOZkTmzgI04I} zhw!p5)EBR!S-uTbj`$4~G}(sQ-`nb_=+X#hb)hk2BxR^!n|-Dp7oEDoK+Rv1-$ zNWWrWA%sD@;6LK}ht~U7Mp79*;Ov#6wT;W}1rH!gwuWb6D<%VO?>wiRM7o^1slin)^Eu~0`4-!>LbKM}E`tIq6bKaa_>bM*r46`7-tkj_dc-YYw=mo(THq>Wzb$?BVfK zP_l>dUK(Ijw39Q{?MjTW?YD{QMP#K~@O{b2Yvy=xpt~bi@6Keh2(7>Vz#2|22OME8 zrAd30cdKC%QP9Xb*p{#{8+MIyy(Xz%qMnx3)4_jaYn7$V-aS0t#sF01uAwyamKw;M zI~4E|J04kZvgY5ai~<&NHUHx3YTy*?!>z~Na$P)DTvdP80{T0@Fc5S(L7=<`nklGM zC-lk7d&~M3sO!kgh9%Mw#TFXgESOrK#?R$f&9f!Q4!+L2Vr#=*dfg7v`6!JWX!>U@ zy%{3Jx%sL0oym|1;`y>;L`A5u(vf7jifdO}%431zDzE-K%@4x8Gz=whr;zB)TUnRk zlU!P~tT=X>dVz?|SN4UKn_Yqy$((806|3V9bGW74RMj5VqqzCb2@~&r|301)2%<{A zSEhR*zp_mvOVLyYK4W)Y-TXM#z-5+Es;kB7j=kH+kAXOYxUFtM#obL*n+T)3uphj) zx5(#PzV;DN=E@r68rThAjiHClkJd^U+v8}*y2|Qj`pdE{zBcjHdj*E@d3_Sp^|&?e zAfStESUb~{XUHWpZpei8tXse1p{g`y4ed(!ZRle2!reF{aM~wt>~uuN9$X2i`oYt; zaY8Z30XFrK$LQ;~{V2=qy}0|yl?62Yx^9J)2dy~EHQpjNM6Hfoh5C|DCJh#|QFg$z z?vpKHoR3w|LpSzX>{RB>cXzu*yQ_F4s_QS@E8pGsZPc{SKPXUJG=+0^b_X7v>Hcky z>-A}yh&!x0QtLi#Tm!RZtK&E6(*|=6$d!HM{Pro};v*sTb2g~22j#OeQVQNI-n7S? zkp9CW$F-HTQHd-wBO@QLmP$~$4#>mwLP7mj_VD)_iIDuFCyC)n&alCt$BqLl>K?;s zrEK-_yE%6G2X%EjA(a@Z_qGc1@ZYqe0b*k}0Q1=T`P0ZJb6)z}jrgs??ir3?`qw6W0ZB>^)5*DVHlduO`!$INo!;;N%@`04v=L4yhQA@ zPLGjPQk9B^6g{pl*)WOXC(rrBWVgLqM1-41y99>Eu)(k;i-&x)!`A9+7J}mj^TWD> z8)ECKECn=8ol(hCQL89ylP&#CDW34HJIfbHhsPRURNq7JL%(5vtW^tW^Cqx~6mM}a zrxkwRG;+p{yO2Ruo=`#Rp3trzu2?)>Bf03^& z5^j<1m#Ohxk9r%Qa;Nd6ea1ft&cdw-V9yBTDXKW3!7bX&5)8CouNz^M*3~4k@uR=hu2uyUAgE(M4@!-9X#S$M zf_1k&vBl^Kd3)^jKJoczX*RN{bgi}??!Q#|aOh;Va?7ltY4qkgpzcY=f6Jber>%PIyVB-U5*-0| zLM3~Y{%&N7Y)u=Z71uV8q!A|sZ76@zqXBRR%Ve1fsXPgGTvI=K5wAsL#<9`0Dk@mzZ{;de9kE@mHfQP zXfJKi;OPC9svr@iF{{_M%`*?Izo!%n8Th#fn@BV}CV4|uq-CHaXsg6>HCL=b>)JS5 zX!QFmku?x!3)G@9(r3tBIv|kS;J>;I|Lyp|{iMeu6>K!`4X+J*(Q%$c6;dM)qVvB} zw~$tneI<);QGah7c>@FPFKgxi8twbo&Q}hG-47x{4))Kzzg8T3$nP2zWxX%)u zbM>%k6N5{yG`5ka>F8J~O;I^xj^#3l9oCgH`(v3`(7QKV?0JfFSPpW63m^Bhw8_H2 zb2n}-sd7F+-rHbIqI)fr5+kziy-d&SxgSrD^)Ip~u@zgJmowdTF`gt8|>X^i-Q5xpbePP|w{sDHCrw zm0UIZ%F;p)M|F8J$%-aI zpiz&CQu|u>}&F;0}iBAgCUPh ztbs@QPE^R4Hou9g%kP|%PuH0&>>pE8p8H)48lar4BUvAl!T5Yz|8MOzkhC#K(h=;C zDpbtv)9PyFprJLxPzW4=lxA+h3K{A@|?um;Wql1Fhfs3f9HJu`)4Xw2!3elNoJ!-klH_x zBLL{aBdEmQsciLk;`i^LOSkUs(T< z16%+H{*%67JOBqwZv3B^U|*DjN7Q(Mm*c1;x_c$9m&bxoRN(IqD1Uwddcd>?L;-p{ zW-4p56Mvb8{^uBhXRiQ?gnNx9 z`*N;PAg_$z=$4K2r2Y`8OBkNUTq6Q=#6M|RjqZL+$ZU`%&?G?|bTWb*%<^BckG* zyK;VrrY1ZR{vLPf4fm7pQy=@zmNQhA=XZ;7?j4jT2CIqUd2oL66t5pVW_tFeu*YiU z4OysT(cL()q6=YfCr}rdKDVfdK4x4nB8J}%<~yy8%upDm}n(5D^9paeZ<@@ zV|I^vS5p4R_4DQQ-p_66NvG`^$nWf(l|Of%EN_DRWp7hgBg$@H*5!`R>7o?RPqzHS zD!lDb_j|>0R`|uYhlTQOTO!fH+rk<9)k#f-JqX*V-0!{?fTRWro)#Cc?;bKqwHk~w zv6`t{U6{XS)GeS|H6r2R2?VJ|c6^bRMS{bO=AH zWy`Y$!|kO0k2Jiy19V+yKCtw3=gF%TGYCox_nr$BcmUKszdF+2Z@Iv&4{?HeRT`9f z#k;;4P7K|7ys)1rk#eR%S3dxSxIlfPo@jcNi~_`!$G(!zYoxL=E0qyL92~j<;_{^~ z4)IB{g_VIln^n~8D5k_Dw^t$dg?DeJQ00HSRI`~jlU<9G;s=jjmP$%mQ4OcFO!etuaiQy-t$83Vb_y5_Hh74g(6jl+|BSFIE6;f6-B=O&cSeTUex z#0sMcuM0K+th;@5Uh`JokFPU*1hTYVN$>4@ZV9fmHYU0lw{!0cXLach`jv^6=#%Wj zVzZ|-Y|Q6uFxU3h(Hhki)~unCrB@9Ic{v>K1Hj4Kpu%ChMyusY30LxAd$BwhfDyXA zX;MVPsB<4!`hyX&^%!wwGnOHuaB<|Ha1SbTDK8fXEM8d{S+$w^!7qAjdO^Qrr~gq3W**m3&hN==6D>jO8XEXi2~P#x47xJ@rNA3 z=6R@!O;Z=zcJodlBaCcA9NC-QU??`!*Isp^oA8OSQOk@ z0Ai?Iv%o6=c%OSfTwi$hKq!eFZ6r<9r}_&uVD6qBV0(5}o9+1Uxo2W36<|G8&aAHH4geP#0mNit! z=xO2Vq3h~f<eD*r=|84{FRHX=h^i`~Y9 zP)LR;A`aM56&4-UQ>P7=R$qz2f-a9}z}+_crpR5trf?9*&Os6#Y+1aTXb;Uu@jTNT z4XCH|a#iBm$1VfoRh4~I*Alk#_=AQ)z<7c>9p%!eSMn#(_Xs8t1EMOqrDXF&;FG^S zIlVwIL7OmLdXSC>Nd4dFyv-z=nrWP+1UwKc=Lp-3iT>o$MNCbUJ^lIa#L)$5GNRQk zqCeTy`3<`~K4vV@)*VTd(fd^G;Bg#ynZ@y&%Ep;p?odeUA1a-k4H#7I`$OQ-w?fPt zVB!V_we5(4G?D__9KtpxijKyBMl%w_pUqwsvj!#@dULuCXy76}DTl5WG%@|-Ae8G2 zNUtTAEbz#%QX~q7?RiaWCHYv%O!Mc)mIC8gnEDj%RsNp(XBq8n-%Iy%*F z8H8IlYucRISQt@0g`BXQYB=6%H7wtG8MY!0I3_j`-F9~aWd%+HIC_Qq{Jl%UzBa4V$g8s}F0Ff-IULAtQ#vFk@;*vkixxaO_LT~POzauf0F65`!) zsj1m|s?jtP^@bCotiJl0GIfCo??Q*&$bJ+Feo0`ES^rfZ0FI*GXeEtzp(?;N>2FTGin{v6&TlXxPIS zeKuhhM`J<3fRrh@J+EmjcAAM0n=C!!V&cDLm^=LKDPpy_P{`jK>ga*IeCU(F2$SNL zD87PfLQ9qVBGut#jQ+l#Xqbn)o@&QWEGBHXdGd>hrbq_utePFoUzx=3z=|c2=6)_~ zTiu}|*2%fW%hm1)Mqp#hIcxFgE%x_EMLK2o_?Jp^D#1;t$IZ|p^TmD#OX+j!spid* z(9oj?4{LX{RSsDkQ2yYG{(~jVwZLi~36}~n+okExE+!GnBL`f#$l#>2?LE^c8x6Y? z0;PJsWZ)AI6}N>9+f>CTnNfo>ot{IfABTb`#;*#j9yh4nm_SxQeuAmrbqqK6SX0v4 zsweXeG>-_tEGBvE#NxG@cj$c6XqugCJB8uQ8`nv$QMYf?KdfK7g8qhV5^iQcmN|yJ z{q%EZtZ*~J)w)+6xx;*TD5iUp$M?tQnZ)1^u0_Vvp-+nymBFy>ORlla$3NdUDk%1# znq{p0{PVEBhjVXx9Tx2iV@$2%9n%aO`X$RP%DupAmW~nUYFybIPsq3&1gJ^vKi`v4 z;(Vlt*isf)Hx~Zx=jk%v=0dQ&5wdG>ujGt20TAy%MZlCkba!%lAselmILPQ0Y|7=P zCE@9*oCVQd&^SKDhu`Wz+_*kvG<#S`Z}olQtB9Xl>ad<;&syDMr|W5+edn=x@`_1K zTF%|^aZVF6qisJ!r8W!GaE;%WtCNL_(ipjf6+Jhs3z&DdWW3i}?;mO?Hf2`Tg&v_F z!WQ2rRU8~`?Tcd*1pSV1S?A6OXVCqKl|K;yTmxxV$QCWbHDJbe6!hc8xj}VEh|j2qT0uK(XHsPAgvkcyHoP$ zZER1W7WMgj`C9pfeagGTrF+9z__6Nw3Z<;IT+&ngsLPFFDLQrX-h=z_*&>nQ#P^hp zmWJ9kC)~p%R!mugy^@Q8-dFw< zj5fYPd+hrz;bbGQ{`_7uv}rbKhZVcuS(wbX>CxZ_7ZRsnR0iI!n$zikdR@viK6zjml(h?4}NopDJ(kWY5&-?&<0W90XdAP~j)q_flIVN3wiu2v9U|#5WkSv)j!!E+CVjJOmzi9c$ zZE1`7=)v0_4aTWkNTmuRe+OJP=}T|r=jE*hO>KuJ?%f|7mbABd+fnn1p~A4EzFdyv z(?k7f1)fc{L&km*T^Z}%Q`YBHUS=vJCkSRfJLTAP z%1fI;n5_O$F6HF|18>2_9(0A)2-LrMi<$*O~gB zkk}4GKe}YQJ2owcx-S@dO&3scu5IUz6#;2zTSDSaVgo9Bt5Lx<{s-+;d1z0?kMDen z-Vd+eJ=Xh}m%`QP((E=dqSVNSBqB1} z7?jkJIu#rI?7+-;avhyK#a8(6h?cR@@?dI~{Tj80Fsdm9ceW!z1SVsrW;H=;QWb7| z+ z6{b*FvsqOgI>p4rHhH{*7ZdS+aAt@Rvn; zT=UglZBE#p`s=5c=GI(SnJ+WCHMEyfMS)qo=T)m!-99l&SBxy)RoK%?lD1t)T~A5D z{qXfnaePihd|u;NH(AvRj|1v|q_YNA&%MP%CiaRj&$g?c@ee2zY0!yw)3MBo*vMz% zgL0%_S1EjbvGimMwCR+##gB;qhkVamhq{%~3_&ztv#COn*kVeRux$u}4#pul^z!B^=$6fS>2lhSLj;ilU9RvUKj4(@& zaAU?vWglffUAYIX54{VwgM$VSe3UaRk}$;-G-kb#Io0k!pFS7YtVh}@sK?d)l<=ge zLf1tXa#+3L693rU!sgVYi0yr&0}V=EzGJU1O^F-VCcPdcPO*w~ZVBkxNPhA4!a(p> z$lm)ep>}GaH{k(UJ9v2sBYeU zp|GMf>zcZZ8flo9WV&AY-nWx2J#CaX*eUhAkf)!NWltpHHQbHh9=#{Ovo6c11>c?&= zS-f;o!dKL{ok;VR)&qYyL@z&vy`!5d#%C{8%uL#Yy}#MG&SJTGCJ`;4Q>RSorOIXK z7n`mlOsL9wQnzgTY&X#wV_0k(Vr1!^7l06uhu_O2)*K5&THkC{0JML~Kce^zQIKDpCbzF)euowjbO zx=U>m{l#-igqmJ@>{z!0yC=>2y0wk&j!4fWd6fKqRf61j4P}j%d%er>@+M8a6c7au zOebiD>8S=)M2)b0dmA~`J5=h{moa|{Yr}=3K%ZKHNxh?RpT0%E#O7<85#6(7BHx;d zr{}J1d+X#amor$84KJ3UyxVg62=0@WgBA^B047*AjOw~{39kZegKbI2NskozTd#?9 z+t4qI=dJIqznx^h%v`~YZNZ;rfn{Q%t)~3mm+a-DQcD~S2#q;oZbqz`PiE240Cm%3 zpORELCa=rXD0%BYKh8)ulWq2Cm0obAqT1iZ95gbU9c|FOBE!HN-CeO*3hc}~FN7FT4?`toR%3x@K+|x@n(ZYgNseEGE z?6n}4$z=)T{`N(!<7+ypNmI^OhcW**EhFhz*8srDbj)*Tyoep zr(17&-IP%uW7pV_DgP$HFn=C{@h_QBV#(sYwCyRh7P*MTQmdMeyhR`fVHLv)rOIvS zp|ed-5fn7>Mm|n)61Ma|l3a6=A*uWwA#PHF3v_MkU}HxHFA?yY$Now~G%f;~^6>iT zAf5qnyC}{=xN3}f!BCwEV!KWSq{w-0=w3SLOy^@9r`sB!*iB7n4tnaBzHNxnvK+RF zD#+O{?e=_sH5leC^=W9i+>jL-x`V%-RQ!6=N8vAsWF;zrj#dfoF67v27oFkfc`(jL!2Gi^1NT`}@8$w!W62h4 z^p5(jkC8#D1bwr_jvklZ@%Kc$l5OxOr_m+DoTQOyIsBHo!$iJpxiKxVEZEqw=={9qJ4yndok`I2f?vx&?3~4Sn6D~8uCpZyZtZqZ)pfus;w@4#nbUyb zC2ZGKJ#AcZK5DnP01{V@G=i^EeHBrEnK_iR1y4QVU|xKuY$&oT3;LX@W3$3y-N*{w z#`;FRlEBaB>_nGje*vFu_V#|T7+O~XEuLsV?n26J-QX51Bs2?35jTNz^tE9)XD$Xe>x4G_Pfw;aXbt9wKk`G?2ym46Lhv@O@4wIl`d; z#AcojjGP5h{uZNd!4GHJw|??UKaCOdH7>dJR464$nl0CSrzScqGGVL7&htoFZCU#W z)H;+3oRC<5R^+6$t(#tup7G3l)k02ow38*NS1h(a29dU%I*h&AOlqaXR}sft;VL)Rd? zM)M%~63r=)(2o-5L#cesbA2V8&-LJ$wPU7zz`t8=={J7S+Qm5 zT1iBmuxICbyv*ttz43x+&cI&oaE4)xbp8XX;l-zF`Hcr|4YuT%Weu2^LuS9YKA$Zj zV#>;@UH(0&^1*WV8qtNXhkucw->#oUg?SPDM@}HB79GXPc>0sh^y{qT(4bt`TjXXq z8q)V#J+>9a_sZnGa~wvd9`yt}q?y)z7g&p6SQ*8iYsr;HHgT4KDQQ?%xZ1+=>#+G~ zmt1iNit~YIFWbajk1K{+$ffIzhQbSD>}PxLUvs;7j~qRxF7AOGw|`;YS2OIKEz-H( z`@XA6v^-IdTq@7}c09}BA^my+my-{Gn!v?QgjS&%mM2%d>w4Br$B@sr3Jw;GkH$rM zsz=Q)6DoIa^NTr||hhJPCx8p~(nN6){C~S-@EnQoh;#qSe@+?nx$s=p% zzw*qC!4{m{wts{w!uuKuBdp9nQv{Xvk*|Y8 zVmjkxlwSK6cJ|i$c!(+@wC3AZ7_T1OrMUrJ`8@s5{vHAdtSL!8=G(iSj8nqPRSNbTZV+1G!caFEUz_#Q(fCw=Wn}5RzO;;*rbGi>S*P-gu2@k^I;PI^>$mt zGZAwO#$nKWs-*W1*^JQjwF)PD#V;FRQ^TeQ-of6COs@XtG&Z3A9xA*^HH~pVF}*aN zHuU#K8Jt_;z^Y2&@MFZtYL6gEC5L8jT{!Aik3IIe*KQZg$=ffh|6B%A?NPB}r3+Ly zU#qzP`*S^?y+#Ip1q?cz!QAk$@GYMPnARVjz$aKTn9tQ<{xO?ix&yDca=+Q1bawno z$b!&J`V6EHwC+WxL_s1_fSeam|1A33M!;#9goIbASjrQT9B5{4S38^s^rVXTiCdy$ zoe@2|bDM`%QLI-{{;kdWSx<3%0MG#edTd#1fYxAymlpXqy7UhFh{YhIpg;=^&1qNQ zw7UPQT%xhz;8ccr5^JJ*62IuB9W7eWHb{@7$7q`Tau#LlZrc~G7{#oqW9*Mms*moL zbCdS#4XDN&+^p6g)cp775NfT5Q$UsW|z#p9UsPO#q5%7o`4OZ0A`>lrV&3Z)`AY6BKB(}L|*7OVGi^|70 zWoxkgeT7HpuBhGK_+{)58B^;fKzk8(g5!!l_MI#@Sxo-gtcVJB%=eSzZq`iGWS)H5 z&|WjH!P;*l-m;XT*BKoN5%1m3yS=s>vmEeKZ$k9N`{y<4nP5tEj&%ka8?@vkJJhjX5pc^?9PpccwX; zeBFG%{lxTO%GTMR9;Dj~>Wb~pUOpXdOnaBbV6nUG@9Qj+iKwVl z!@#K+^|V^BKNq<#uHl-4PSfVA4gRnvTT@wpk#VJ0jF~@XF~7ObqHL3aRo0$GEs8tt zYi2Z~*S2zohw7|d)$u-xcB8%)DhKB6wJ&d?eujtUjV|hE)kcLm?+^TVm{uL0D%hvJ zKCmC=e-T@)3lUgtnmHqxct3%&4b3)eHlQ+DG~jK*7V0`$P8Vo+g+p($Z8d7A(qP#D zO+OdvJ+pFLr8k@eKRoK`P8bA9G3Q+cTw$(ZYj@6VkV%1~P&`b*c=t2)5LD+l?d=0R zhbID0#_7S-4HZnJf@{#3A-Ux`S!(UaE~?&RWJ za!+v9-#-y}DUhX=@@~$)oT$_2sIA`)+LGCxx_J>eke#BHF-*r9>g($rm!9P>{Cb9Q z$~t`yW4fA*`QjO{bug~6bdpi#Nlop}0gcs1-tIA{T7%~hCG)2PNN-K=>M16(u>%Yl zmEp2Ev-xfOu{PB~vA6~tL z_E7J49X6@k&*E9g8q6K@9*0xONABUYyBosx7By()miT(by#*e(&AHw3tkJdh8o}kp zfTL{vz$xavI72e`daY&mSm9nY+~@p2l5FjgbeO7GQBORb^cbB~KmKQYxj*#0T3f*p zVxqb`nk?f%?z1X#pHNq+Pp_BD)6DaR7vuFDI_{exrqcJ0rnX-rJbm*k55CHcgoWPN zL5WrS7LV0nG>qm>y7;RQe6&OKWi;Rs#1TCL<-ny-|AaXMpfnEo?Mu2Rhx>9KA$oWV z1SNbMueMa#M`{{VGYXF_+H)$y5(YH+LE?_%JdpKvW~gEJquGqt&$;GryqnZ*`B4k$ zv%L{1fFq|IuH(#nA>pU_Gsv{GO>4;`REr`@yi^J>V7Gn(?c$TrmMP3Q$dEae*7lrj zvYby6bvb%AGhUaM^j1}iP&Vp{i?*5%`2t9LFVYhLpyP24!ipNXQ>c%3sU4pk1Z(3+ zrP^kwcyL(7P`>hY)5TG@y=?lly<7ot-2n}`))R+Y!3s%a7AbS0oW1LIO}^Hs>OQ0d zBwx^jHx`Q(NfPt25$z|Vpr7e(^&514bo_G(r19<5xT&c*1t?nL^vWcp2ur6ynRj1D z^@b-stmC+GG$sePdD!%<3uI>sX~1h|4`&KaJGl-DFnU`z?xpxLxI!5Zy=u_W`wcbN z5CM}OARxE|ury4Yq2RqN_xYHNwOP@a*|7o~NfV3t{bLn|t=hxwpNl6zY>SMH#unKZ zR~?%;+?(awuH!yCrz}4bGKE$8M%5`CcQx3Um)Nrg%lNK6L1#ol3x_t53`IwRy&_iC z$A_ZWQ}a{$>1iy?xI@l|FhT4NOfh9W%-?8%P=So~%*RSK)>=j2c0RC-^ws0(eWf=I za>_oG6I;H|)Uu4nQ+a3U$Zf(r#QL4NT1BgyqG0mIe|9n=14rpJ;om5In{r=+yK+t1*3R2rfY)q1GIo@K$(JF5`ZFB(+KWo8p|St{UY-|0(63 z>MDIi1S)mBcXEm$qE|wkVxiu?aSG%iToCxE4I}|>_4l4$T}%70|0F&LPEO{zzQQtW zuLX-Y-lH}Ile^bf5cVsN&e?`=AYTPwZU*~&gRnjPh*uS=&Un9HiJFUb3#yMmEkLF! zKc`5Wzu7~zg@n>ykO8b72nYg4>|F7cCGpxy`ThxjLQ7MX|(v^;d4pIUrDosF< zB3%UOMQMRRh=_tpmEIBQHA?S6PI9)I#z1;add~pBCew)L~J`OZ$h|4UD81cSwwDd)g-|rXCH*` z_6yK9OlyXKQ`5Q?0$4_veg7ihAF{4O?uZ~P&A6S6HB+d$*(`f9R4iVh;wMzXEu-d! zHm2`mfjMo2`J{1EaK%o?r|tL}ZmQ>ZO#1GT%~cVHQXH`k^$6S5mO>PzNy~9b!JlFZ_g`FVk<7=j^23SO!!ktWn+|#}K{d=!UEZd%!*uD}# zAiQ(0Hd5{qW%_(Me025rx}fnV0kZ1XFCGc^?rdd@RA|OqcFS|ht=itK5s+q~EyHO} zS>E;cnY7+|wYqA2ZPGqZV!&QnCts@Nl_J*(V*y$vxM7{{19-$nm#z_`d9Qs*zyWBs zg3t)}q-fpvmV=jyWs37%)Y55oJM5`}?We)pNl9}#lMR8Z!;EDO!`H6V#;>I8xvzAO z<2VQgj|QDx-P0nY#cvU?$>O~a`@Ub0`U(y{i70P4qfXVyUyMNEQ5rGUU*l(d9PEuj8_IJ&q9h+&z=T>xZk~mcx@4~`_Vb&SqFWL zaQTQ0L9?gSxji^{&Q#B)ItfgI7HxI+n#c3*1CM?kSF0&HHZ{{ki4R|@sA;97d~gR` zmnPHirn1uS*i`N!v7{oe+s%r>5ug+7=D?MdS*L_y1)5<8DE%J%)IxZtXV?j#CN8EX zaucl`V_%yPE^_r}AzC5gC<8eSNZ(CzW1RIwGxOo*VGs1a4g-34H)d(Zq1Lj291 z)h+4q+lIaA2xDb~tt{}+I7=bA%5NH+*{KgrUTsslbDqsrO=;n)hPhVn?Jh1S<*#ho ze(-zEaxU37X>*uUT2kHQ^6k`tVqpN+wFHo#Z7-__znNg<>cu(8MQt*JONlh<+K0{-j#wlbn%?+XwnZ^3aX z=-HU4IsCBQZAFAXI&WKNeq^B>gD3MS2TRVhp6{rruN@iN9Wg{t2AwZ8X=dF0>>t#H zRH#dxJLj_3*oKV5-@j^{x<-CmcSlxI;ky0X-cO$+HO~s6!h4jWwhivH>9v2mvD>d; znY!zqMB;`g$t7dBSFYh|rdX+wl+I+P`Ybz@Nq7R>(;b-p$2ns9^!Wum&4=v}L%;1# z^YokaC3~0c)$GFMRGJUDHa;3^mK@p4)#?q4&EDcYIUns+t##S#e&k+mRhf~0bUVM7 zZP|BtS&gaf@MQ@f+rb=+kNi-4B75>9PK(lX1!b6T@)b z6`N+2BCb-YWM1JsX70P!H{VHHs2!%w!UUAd38?|r?oujS-mp2DvLVi4vB!s;!nkXw z-wsG=rq~#A5sVzibK|hO8|%Baj;!w3_p0)+&d)s^y-Yk8&a$itiZIGr@xL@x+*1)>LYgrx(3b(;b1zn80$3^JTseBa&xg-bgIJJz!amR zPpU!E#ORAtNdW_{$ETBDY8An`-$6yN$;K^B6k7lgZR>mTQiN-nRZX#ttA-fhQaoWg z-(M3bzFwx5Sy=N1Hq=L7ok0_`%D|T$?WT)VP{YiYdAK|CRsNg+b@Yc%oB@D42JluEc8$)V| zyy$20+E_*Kx}9r;q)N*5C*g9XeWM11KIPp52LxzTZ{5(*>CX>n>JKz4eTP<+ahj&9 zqQjqmv9-PQr7@)2C?2E4T|4l4w@tyFFs#bfMV?LjdMCBt+3^iFy(|tpQ0c}IeTR5W zV8%XWmhdLui@Y@)9lvx!xlKtslb5z)?aiUUc&ti_reUpbpqIgiyEe0rXr*vONDjWG zu;#i~_EKJ2Fh^uF4BE#~p{%t)&?q1l#@@lq#5j2%_4-O>>bRuxvO9*dUQx7uuR?EN zkpUESx6Dj!{?{RCcGFWi%6{VW^3k=DGndmvL~Gl^dQ$A_y@@w_nyjlsLZx#=#Y;*+ZOSX;rc#4BS1k-CoHs>4{rvkWi*Zod8ZhX~B?s9Vr&FjRj*l=ee>1JVa-QLrg#ag8KljVHtn3?gc5p2O%mmH4=q4a)t z&VFN|k5%&&g7#70!O4~P+c>XlqXp2^-o?P8Q@kKP>s+iO-OCiYGQ}RKmKCjJx&{0C zs)U>!aS+^u@S`hP0@I_g1J}2E-Frq553Q2aH$1ka%kcO-Y@dc>8ec9NHX+m$U}$lzjL z*L5}Nk91UJg5hrC+1eN!|8CurhcUzsfAvFfrFfemLgw;!w}*n$Fc3;-MmXK&>SYA! z2+Og^%G5H|W}zK4%fzSJfuD+RW_8sBoB8H<-?r665jFd#9`74T6oJdVXI~$3094?0 zm&WG}r|g}>&z<&3fPC~9y>^}lqPR1+7|u6N#&6D?LvDmlWe|yC+)ypnnXC_yJ*$)O zOOP%ko7zPld8alN)}4!R&8ixp#_BZo%oj+l*qP_x}+6*0_-01_Lyo@-MP0Pzndy7&JBoW z;b?o@HwbR)8zPwVaoxq#J|Th3g&R+EXTFeD;69?*CX+tJB%$c-l?dX))jC+gsBYr3 zG|}(i4P(ODw!bjl?@;9ULQ|vliCfv^cK&^w^!*EyFC8J)wqD-XY=FSN2FvkNsRYZcp(Ye zQv)^yrhXROr7@wsvtea>E7>veX2u`QbYD?lzGJ1;sz;1LMYcalVrhjnto^!7Xk*uim3WS1VjNsG#}Al33hNJ*CM1w6vWViIw%Bq<{!i4Y0_7v8VthN_7FvEW4YU9mTTkd)re77c= z*Fs_5)0B`wZ<;LEHCnR?Hk)02A+qE7@m$Tc!dO|?x7G$}@y-)#MK4PhcjcYN2W){= zSX+|#K#c-ls+x~!5_|?$SW#^v!8}uJElss{DO_iNs#H!3i&!3u|H!_PX&N@axW$T* zE?n!-^;OnlWjHpPH?w-VD)mE17xVIUr@l{I61P4oz?Z`r@Qd~YR(NIjEO~cqDoM?eVkkY}Ou&!)*anYw%utTcOZykxVCu{Z9Pn3Qt3q;xn z1ZeN9Tq2%-MCzUssj{6%rAa=qAtp(2t-V3hVTP=BEDPFOjde}YI$kYYN`-3ucvKJ)j8#Mn@lbn{j zi3|}-*|?o^{W1A~m}#>_VL%4t73wKI(JD>I?xkKyz0X)>gN6b)+du;2>xp_%D>87H zu<_!~N)3z4j+f3n(4x|9_|y$7=JoFDuh#^_Z;fY z=k(+6*O{cUNd3R{q9T2kNl19BzIoXDW7l(K~@qdO?ZG!iE&qqJZf3Yx;52Qfu$= z%4TB-PJM4B#>@R{OoKOxsrHJw`tGLT$5`un#|R&!UdIhu5A~_#WsS+H=*PGCJPoMAMRP-9ww zT5avcog_h4v#XE=A}EAyG~$_ud@gptAwYz8l^1iiMO`V;831iWI>-G7H#`c4S5B<@ z${`!JKY8f=Xoo#`=q<22JNeaWN7A`RrE59A?~qb^?q@TJPEPJonE<>!Qco)~k zY63S7sr}(ejQyb4djlm&&9;L;*Z_DyJ z9E&oxmPK^wO}*@Z4)z9`t=?dVs?Ec6gfP|>8rtZxd%oFi{_5r%C8dO2Mz2<1SueHX!34JoG3^U(CGui&^+iM+P z+LA$ElVYT9dzN~xZ>0}qVz>fUP-wg2x{UA3iRS~~_z>{1hjU%0S)saJyZHF&zVmf6 zuH`^U9G^J!Ai&Fea2Cc)wTb0%OAA#hS7(+DJC9Xh>=wRKP*|h|j^#k#)Ch}tNhZry zrSQeO)ymTzT+0x;mG8~Lr^Rx~f@>==Z_NHy;vhClz6hJ`V>CC>ShPVZ&&}Epm&0$n zK-M;>$Xz9uHOxqC+nWbJQ@@VbGPycv-u$}$jjUD{p@*6|?vC+}Jy$-evu?D+K-x3*L1o$(hf zl;s89@t3QnXUMWi*}m`H(irPkXXYxS+j?&Apiy`Mve~_j`_QE9U7Wu<^VLqSWr7xS9|0=+6_(fnH8>K1CS;Q}#A<&2E!hBNnJ z)UHBX@~V`jN-8_yHg7IiD2!7f*J-+Syh(FA1r#$39})tWc(vX}8^)p15?TA>buZ4- ztDy}cq>jH4lp!_Ebz;zZSBRrI>y&}Z_@QrW=w)Dl2*w{dCJ_GkAuwLaq&Se}agK7` zzAK5XB0@^Ml%Y+@?Df0)-@}lFUF5mifc$TJimQXau2qx@=29b4ylFtx|@T7SW)Xe z`>n&XQd%jNzN_CJr%zIix0&&#Q2c>|Z(7HvofyvC-XjqzF#l_sTV2^3 zQWhBp^O+(igtyjhBSq;C*VQ=H%$)HRe0O3?#bQ7}9>a~$hsXwtq+6)YYZz~6T!Vj8 z_J;aOa-VSwE)X-?|tqn`ZmYU{Q zkpnt*a)-0EB(ZgSkP^Uz@yjW5L8%3DRpZ`xh*@}=*$z#tc%cJBC9c@jXSm09aJMT* zMU{qmr>f2*e}M*KU7$1~fPSueJZinwIYRj+(9(ogH`Mp#5R+fgxSv9g3H8nSC`_B9@bTX2O^)LX#NSog5aaYdX>Dl3t;+ zQ`^+u-ky5=!s+JizT~A`D@7z3a$HXhd3W*i`zIf!cyx>`GF%hNbm)j}T`qf(RU`aW zmi`;O)hVO+eEFxEBHLR9QLp5u@ml71`HN|uz@_qDEyZE;a|Ac~KI73=+Tx$~$WX2Y zdX|_U^()iU9B5u_^}wd7v#lGsMJzk+{*FI{Yj?Vr=T6fTX@L-|E+6qaa!avmS~--) z;R9qgHgMJ9WWmEnev(djS_5bw$&M{uC;b3<6>0UuA=p?Tt-~x@CR%n5I`@wX)co{y!%STsB&d4Y&#>Z4;f2$vs z<9=r$ex_pkGl#Wo2;EXhB%hi}w{Q1{PJ3+(7c{HP2kw!oES;7AiA=|g?jeuzWhI(R z?`nM$*pt9)L$(^y0-jv$Kc8#y!1sZ?_m-l`m`PTpBHqt?5_WsuqURvPSqS^6Ww`E|0NAoj!PT zCR6LF-T82>H|j-hJw`8o4mO=K=E>&uO4zqp_v zx>ZGYqL7)%#pcCJPugudS|Up=je?`6aH)M}$J-sQd+Joo&8zt?a4k_={%*$XlgGP? z4O!maq%S{yjxHwL9;lwwzMHeq9{t%weJ(L}eLY154=p<%QL@fqmqoi0YBF$f@<$Q; zv8~9kS9x#F3662+9P9XQ`;Qu=ihFb*x6pIXG*W* zY=ZG92qWO@*YoC9A|N@ejAe!u>g{nddZF}3gHFA^$WTF?#|D<`AEorvC zuU$5mNQas>Pdyd`iZ_z{yQ<9`K1J}zr9izC9rv1` z8f+7zNmWE)*AKyKXv6n6(1>)6%fS$}N`tc=cXyvyx5b@RZe`rUKr9v^M~6lBE1<+FQ@Ht6s-PIEI|gp`cS7M zVXI_WBv&v8iGEN8U#j)^`Ihv_ zPKkmwDS{6M&0?YywYT~1E3P?+PGuE3;LKRjoi_E#&Sud7@TKLa?}mkX%t4U!X*SEm z79wGINWwKVIMpjL*rYI|80H{j#xK#v_d$1FgD^oRtyTD5 z$|zlBI_O2F$JX8kecMx6KFteBB=}nyO&?}?O@pj=&O+gD}E{myX@4ZK?Wi12qP3A6vMJ-$t#bU6Qnl6!r@lb-x?9XoX%M zw@N2F;%7Tw7Ngs%*XhtGyIGnG9t)%x$H;pk>Q7j6GFhN%Jnq)GJC&}CP66V=v?u1e zm}0PuC#2$t>PR}iIm8vGh{W9c70~jam1+m#Gv|@scLI!yxoLHrfhs+bpS5W)>1PJPW-_s2(t;gQb%e%Z$3-O;Mkzy|BHWP-yk)Wga@<)S_;pP+jq3b*tLJbe=1g zY0cX{B=LD~Yw};2knC<}7>whfuR6MfXXXV}^rp#G&wB;;%Ba?sy>fvE0dw=3b_;Ow z4NyPi9wEw^jS9LpDv6)=`z~ShLyNs=07dsEU0Ua+!~C9+L`3j}{!ghqQrMLc{5u54 zF3GN5emhK}kYp%?dg9V!JvHIYCv&**z=aweV(RRSbM>t&eY5ev~s_ge96BJv}VOJ9GOiy<;uo06S9f!#;Wpg0TH+QYRs;yd|)I$hK6>r9hUgkBX)`NYs&i1faTzPUX~ zaT9qzaYS&b#*mn`=HvU{jqU$5Pcmw&y2xluz1#>F?zG2)M=B*scHuRvG2Fv(S z?a?RKqf4xNfM>TBCrN~RYB9Bpa(vD^w>fOKh?j2W9k~QypH4_)ZRhFo1)j=&ZbvFp zvhhaFRV!Yqs9PeM`_cfuphe2PNqO#9+mS9j{_yFAVKIvF_4^y5Dd+5g`PiZ3;%F*^ zvzeOGI=4lhGEnYD+Ng|s)Oj6pPHxs_n2nv!cJ5>cose(7fI267UsIGaW`5hQ3ZVX_ zXtRFXcF>LYK}Rr7bBp~xx5Eri0c7kyA#nY_R?uGcQ7pTIvbFwIHnEmpAwxseICxzB7JY zyxLqGKv5?iUCS~V&u-dxcR^FZ9DBIy1|$0zzgLJu{thOabAkdL5WEAD7gkdHy=6-S zpabeLKK7=Mz$`+Y(NDs)u<@Zjai}zn6YU=@Y+3suP%Bc!Dmf_hnZ9K7H03R2I1w;=cYw&Zt8!*L$j>G#IyV zoHD5P@gz~X08bJ{lQ6zt+YHcxi&qVn3#Gtl!57|Qnf8&9;rR9dZq>%*t*oq{o+L3t z;llUV?Grvyq+rq4E-vP(-y!YRD&YS zAQX;WPo`1N%=*fMbIhZ1WfstE3FpsSF!=}Fl-SKS{0Hl@@MR{*C0w@k6}y~Bp^Y|> zx6`xrU+^O(01jz?_z)zoyNdQ_2Z&AC9bU>$8YltNVa>RD&DSZ0l$-Kbp%7BCMs~w3 zD|h1oj!Y3VjOm+wl_^@RtsGf6<64;D3M69p_Qj!nBv20fK33(#Z?P)aX?rJ$atPup z&IDJE8SF>LKf4B+tf+K+w#*ng2XmCVjOlh%dFhfWj>?>7vIaIVU}cb?5ILw`-2%z% z!v13QUI)k{^LQ~l9l$e&ZFo$zW@I-SH33nn|3URqmF_z56~my1|IVCdR>BxL9UnPp zUt>C@I(OoyQ9`|l_x`NU5unss@ES;7SXb92^hR6wJwQ45rCw2GV}2kSiWlwu<0+^u@bAfL!>InM!6<;ZhS zO4vcg_xCA4o63w_kQ^8S=5?Uelt$dod`N!~sklrV(VXB|jF?Ck2~-rMVp+`RuE(xi z=IO*%5PZY#wCRMmQR1npC@TUw&ElYarD=x-E+M76zEvU&l;ZW&6S#naQcfKfb&se4 zCDj&yl4_4#KL9=jG0y=YW_kPPH7e3!Z2&x|keyKCJmQD9CI}~w_C$!ZObiw+mpCC> zW-tGW`&fNM4&zW2U|lo~)8LCX130MV#1iHGtnyB^4yq1z>~Vqo%Z))vgQ<1@UlLmq6EFT6hIjOV0Rs%6@;QMd%t*KSGd&t6SQ1z6^A;Yd$fuZ7J z*>oaxFc2nVqVbV4=!@t2kCc^T<0~G)vqW5O^WL(@?chbt4d*V*ea-v9ipq@cxBfLZ zpHSA5qd&MkZX|Ql++N-vMEo~Eg=jn-5<(S2D*Sk`k7l3qL#tSh?pI>Yr?j=~j9)&x zOjjHZAr(`Gv_j`+4$H$HBkC z3A#oePa17dwLU>!l%=GF&<$~?A5C$t?u^Eb#M4i=`%j-`YF#sux#O!IxMXBF*=N47 zKafuy0Sq7a(XNimIgz(A@>2#}22?lq;A>H*lqnGt`~AT3D#QQf&h=N!&WF-(6rYmM z2!Vy38yj(G4z2t)ggZWW!pAsW^yvg;tm@JX;RLF9_xi7xx#k0^nzb$)3pL{z@RG1; zJiR|KwkVP@ng-mQ^7f!~ir?+LDv%2#+G;UXJw!^Syx*7R>Ct097=~Qax1q}fVrMKz z!j8D4In@f$L-_AGBvfWo_bK0I`It9ynrhnn$<~E0Hc{Lx&4Rml$=G1 zAsgYA2kI)1bZ_ko@HYvA0dSUT)hYO6;JRl3p%Z+$`9H|;JB+l2XUL&BoWq$HphiO2nskZ4I^4h}|+ z{oCyR{-S{gFb7ZgtNx*I1{FOZF-u4r}!5AX236Lc@94rFa?*UzzE2u;~0+aFp>+Ke$)iP@uf% z!0xJ9d~7&j;LmdxY|?Is+TI7Q8Gjf!1z;0raGZA+Aq$O45#DdF%dXO91KMgGpH_z2xFZT?i|RY;9hLC1#| zzpWSaR4a#y-kQMovNXNZ|EkG9wf3hK75ng(mDJeu?;i8aY!LEMT8`$x?p^x#65~|- zj-WB_Sz0--a)(yp--z{BWfT}^6C@>nwhrc`@%}DDPTdAY(dY+azm?2~=YE#`_ZWZI z%y7X+obm0s7k*o*`=Bhl9GZU;)4%Gda#lF}?sR|fZ-vkQi1UI(|8ECH#&K5dn(;)F z9xZQnAxGVT>}=v?=UHzLjXfzA1=#Y~5c`yXw{7XeoF6KoOyysvB3K~_FV9Lxq<>@^ zRF^ianCoeM$Li=5KAFKvdpZc|!mS*{Vo}ep?E0?l6Kh+qrBVS~Yi~j)+$8jejjR7j zzG<8Ahd$9<(uQN_S8huu1lf`Mg7}KFKE5rokh;A tkHReRyM&_4za0v{8}2}Q+vhfqMw#`<7xfwj9sqx;w>57S-F)=ozW~m^{n7vc literal 0 HcmV?d00001 diff --git a/docs/images/class.jpg b/docs/images/class.jpg deleted file mode 100644 index b51ff670e67d989f65c3029160e2e5534bd274db..0000000000000000000000000000000000000000 GIT binary patch literal 0 HcmV?d00001 literal 15274 zcmb`uc|6q7`v>}&1|wpWJ8Xcece9k$~^PJCf-p~7a68sblK~hI7 zZ7d-e41%n|A4ouh%nswcE<=!=9kc_2AW;Yw?F9*e9=HYC2tE+BCU?#MUYW>+|8Kv2 z*l?VVR)x8#lct0LKt9{l&d|J$CJ`=uZ( z_=XEUSge0wFc9Yg=&OYU`>*tGfxb*2SQO~5j3wjoum5VLfAL?xX7!%q&KBUF4$zmv zdb+uT{s`#ry!d~*?(u)>|IfVO7HF-tPzgwW&zwy9N7+WalvfiH6-(lkA5G0%?5X_!}AjEbE;&lrI%jE(A z?z?;)Y}!W%a0nu|y~xgoX$$n1$oek`hX@NKQE^AD!F-o?Pk zwPJGe3W^)IZqv}*zGLsc{rU!m2P_U9wzRUgIdbgyiIb;*cv$yKmpweat^@^NyB>nO zaWm@H?K{yicVkmh@7+&JfABElNnU=z)52%ZU%Y!?Qd(C2;p3;e`i4ddwdwPhw)T$B zuI`_|dT7J+k;=x^(u!O#2>wqj@bjN!{};K|1G&}+3BiRBD{{fs zgn$oxy^!!`U6BpuP6)RE=`DLAL}d;pKYm-gR!z^DDSI*ShnSrDUfNdHifF5n{XY{d z^8b@$|0~#kf3BVvQ zFouwj5CZ&_5??DW_5ZpF`hojF3VNV*a2VK3@bwTH;>~}CZ(5f#`%PPqs=(my?YrP_ zIC)OKk*JKkLlnnjS(-lukQcE|Bk#(_@?ZEt4F>8$c_>NiYtOR&RBr_qYuIzs5fN6A zT{7((#NbZWSjCX9IHE_gIR?@K=vZQ&0Q$->#9J*pm96E34Gq>)4v++pUI}da!tRNd zxB0zU9zB)oaMA3?ig4OO6oJfq@W)U9bp)pKH|X@l)A_rmqpZ6B_o3)F%RlqPED;l~ z2m!QKo?(58Z?g~;X24pVXcWC))y`?%$3ZPpID3rA+csiBSZhcj?bco1I;&-^v zX#yxJ(6IJ@R;kHCF%AU|GK(vfBL1VJQ7DwAQGCqyK4NtG8%`XT%6oyjo(SmvSH)tOJNvINGG_F`wTfd7aceP$|Bmv>4(yuui_*foKd@ zLldkin*HQ60*GrqhGMz=L36)5BC6e!TS~XP;$(-c8i=Jzs-`HMKALT?@?{%P-{EDa zNYNE@ad%!*9bC``4}@Mfc62y5LwNMErX@xIT{#-ST&^XP;;*6j9@~kuExcz-sOV~m zO~7`vXJZ1$=?k>*MszaU;f%9PKw{W!90as==mr{DsT|P4P5ft?yasfs+!PU*%MXE` zS)CqCg?&C0Me0hAk7(I9WYK^sU9WVOHXZd=0Ac@G8O2N{bXXPFPm8erqa0|Vl^8vf z=Nm{RqsI#Hg)GD{nSo-`YY6I`=+}6CX4aD5?ULqiCx09H$q$xH3&-mi$897>C)?gX zD5IyT@m$5KGn%_R9KLSdh-4u;K@ruDdXSf2O)3UiU6o5m2#x!mIR?ul5@ zCMqJQEnRVey0{k#Ut{a;Pslmq9Jm`t#Y|W=r}pk`mE2>fQ3Ppz54;)X)i-Yvdb8bZ zvW8wrJ4YhT8!@D7vemjuKH@XKq3tR^MB2Tm-FinZ*yoj4)^3E5RG23G4ICjckQr$O ze@r#)Tr^>lU3pI`&o7&d6P586%g(*y`ezbT_8I!<%&CZ;w%FFS#qc~f*6|~>K|#N; z;Ci+M?`gFTt{?6GEyz)K9K|dhblJ$ecartsZ(G~vkqG*pS1E@Qj~~rcR^GkNIv3g2 zCK7lc^?QIFUBQ5VVpgAVpM|8L#n6`>P~3w|VFAQ;B|rH^-S?;|c6(*HcT7@_b}+7Y z^y2O>Rwi04rh5oo7>N>xUoDLUO{PvGN&@UVZx32?TqaDiu&}R>=vnj;7kT#mr;5b~ z4VOeU5(*2Tu*x7#&g2oj6dUF4QFNl|Ui`%_ZYa~CHf>NLmvbIFEP!C!$S?jJn^-zN z5%4Sm$rV6{--=IT%l70Y%9Wb#!1dD6bvnPwhjSV+68s-hc>9hyTPO(lQ&h-=n7ZLUI9+Y4KYn~7*@5kO~>bPOnn`QlsEiiD1v1?5w2ukl`-hw4E% zGL2F8{ORNmn%dxNFUcy?16LK5et58DfH_$h~1dx#XnP3NS(CtlB1SQpQw zvf%8A64oxDT04E(iNYAJzI;NZPwp>^u$P`be%PY%biKupgKeR=uJ!zBGUxSG*pJXN zAl|-_k?gV9W06&+rL;313?y^m6jP@mC+2z_E;#3Qde1=4E8kmdk1nmbkxn@Hcd^@( z_iJki>Ikb!i5*GqB6>Be=r6mwaP_J`)Ro-(W+ojf^ zWiO1~1*4a9oFPjC{+()5CYte)8BH}+Yn?OLOJ47p5fbN1_?+u~vpRWSTJkIVns-)S zfs4&YV7iO3nM>LZvh20c(1q)bkQRz&JSTvRe}qd~8GKWKlN?xiTi>BSqefaG|9X`m zvxVFl186uE8Y34$I1R=*y=86gh5i<<(*W>S?ZNZY`My+tU031dlktS+L51pb0;o}G z7%qVBO-v5TNYCX8ZNegyhx1!La>+{_wMjN=IY&a$(4Wu;Ai)pb{Ng*CrHhr)>{ny# zbDRcU0kp>>20hnr68e)ON`)_WNANqzGv0#=2YJ&R3%ofvJUkrCRQdAC`VL3zqgB4C zNZxe6KA&IPgHx@X&lQ@OmE@@2o5HFO1=&&B&OAyoKz$FFR=Pn|W4_iHw!7acfa;57 zfBrhyEiZrI#9V;Y#`GI1L;f#b81A%zU@ZmLYoL%|eo6G}K1p3OR5}_afZR~w{H?ru zTpcf#8qKFDJz<{&v~{k0|GU$Fl3fqze_i~g9vBtXB&7PJGSs?*zsr*V@sdopaJp+u z_jF%FMbCP_@h(g2QDJ;9@_l)=>pI0nMV2}F=4CY;vR04fJ=1t@g1mXuE+Xw`?xWmt^YblK zzh5CVX+pEe7zV1GcW-J2|IBzAP1w&^TnPSM%{73(Rd192 z9uVJ(7%j_TZpH_&%7-M#4TI5R=b2pz%=)SF$(LT2e^a`h50|Z7H;n5s_$#6$`jeoP zSUNFSx=`aFHjaKf)~F+j7v+R{xjpypVtXv3>oc6KOh%IW>*^QwC;v@O4q0Ej$$joy z*h0J{jw*m&ha=BnRkK@$!M zx}4v!v*_1}*FraDx6I6-Qr1Gr%SUmF@U&djvAJAilTMte7;B7?N-sun+G#C!N-@ZB zIijQ=eqUj~_jTt9=|4-Qj4%B+bPos0SzLXtV4sR zdSYZaTIm|YVc4pk7>zE~sBs}E;$C7{8VyJ(T=B6Vh3#|CJ=%SP2c>ErUtV@mJ;^y< zHeRcZxw0eO=x~w8kV7L{>Ca2rBO{CI_NSd_4_k~R^;(e6uBvz5TPBpIr0vHy7{(~z zeomiIPmaL6T^CRTH@zqP2Wi*{5jJy;n96*8hp0ixORd#fSI$h|*{8B`(&Y_H`0FQP zbfsyJ0cH>GXm z!S1}oDwgZo<&(;UW}<@q9yCpYh2&=z7Khes8u7o;Bm+&2CFVHU_3`uOa%pDea z4kMayK^u=kXlkHk%R(+)rJI z$nxFsDj;hOuGkJ83I)|JITt^plCMBa9ga0XU)munSQI^P@W;f8*L<%K;3x$R6tZ%o zX}0bPqAd=EPjt^V!VOv;PXj?#vGNL3ovqDBEoEH;!h9P22cr!B1M_k?_NS_|*HSr- zP)k?1u@u1MPmGZO(t*Q+>cmC=KL3=op3rgLp?**#F^I%Dx=)q7@<`BkfY;%cx=kvc z@LI$QG}O)Fnyv!SRiq!)fGORCnsLA@3835m09+`m#XvRddcN|EVaseT68I;8QS)p2 zng4~mJt16mOzBzylo2-6P6A5?FjH$VAc9n+pRS=$lZzBG1(35Y+*FmL&-#a}8REkL z{BBnRNSDlf+hN6@ES)}teb#dG4&!xR*`rrZTR(_W)bGctnVLFIy=Xk1ov$TjSUX{c zk{=%|NpJiXN=L<*!o9LLFx{(lm|ZzO&j-!`+1;MGUHE7$jAkazuudw2h6F6Fm*sd4Vv1sT%c8Iz@Jxk$L`7T$AtzSndV5y(JX%*tDZ z^x#a13ms}BP6c$_5J1g=Rm;T&pXmT9j;zZ95MlMPXVGY8FS3t-+UFhzbCK%G!U1HQI zYX0C97x=*+)QS9f$?5fvo_d3v&S^`yWUT;v`%x*o*BGB!$(5l07PdRkn*x zQY|u|MdWbV{bfh4JqJTa-N~zUm1wt$3l6ieS$1Mw&eQ+JAM|DO{nCLGUh?}zzTQ7s zq0o1L^qMm5$}jn?6hkrK-|JJgo-`8)v)oM}C)lL(rzf4VlBwp?w&{FME>h6WV+(0p zD|}O2O#A~;a1^9WwBJGK8zdA(3FDVsYZ(e!bTnld(2KU$QmhD?FeP|a(6mj&xbEWR zbPeMRsMp)ARqPGNSTay~(tZ^gfby5(Bo4U;;Tht~jktZlNlQt~_a}zensqN_R;l{}N zkEl{qgIX_NY7h}DfZmW|CI<~PiWQf-BXB`l%TC;bR7Iv=6nTz5yknsTW^{8Q7vVlO z*jojq*zXQlY`>XOUrb({D}+Q9$506nB1Ny`S}v@nX{;x*g5cJbO77O}B^N-(sigG63ELK@907D*50;tC3$nC1w-GX=k{!-C2F-CN zAVrGaK$f{$;i3%mpr$vXd(!eClX%9=(AUJvrgk~cL;y*8!ptCiUU^$y9WivSP`1Ul zus|Er!9NZYbnw_;5}~nuO?Wfb(@bV>jq6s<-J-_5OCh&QdcQgUF}hG}RErgN*wXO2 zso=|FF~xh*@kxfSKa8muEKIv1mQRv6jf3&(*Gz#wZb$!WZ(Rf4-C@q%L{kx5b1|2X zJ1cqXHltTC3f#AT8NkdF7Dd_;$sf=Z=i32exg6+Mhn$FE}nH&)&vP9V;x zMVcSqwT)Oe z!nvEJU(=!xA%HLhIrQ?Q;;=EpiKRCDNA|XGV`;uTvSH5(RT0lhsue&vWO3pQJBjPY zi3INSwjy1JGFvyutt!Tw-U{eR4>4BNgx3flS^)mA$*M%CiEKHrU9?Kt!J1VeG~D}YRnWsI+z(xcuflh6|*agyN;FK%^=7Lpi#zNyc;LhTw};1KGDIu zWo6>3IZevnEJt%gx``&7eqO7QEIUQm!zmnA7eGSzlfnH#X05@`uxAI?-Iz4Jf9*w! z-hoF~H7(DMM@luM6eoRf9>^3x;?cwFhw$R8yX<9;+e-pyh+Va;ptdF|$=_1XOjU%i zn|lbY!__1!V)@lv5R)|bn6u)B<$!M!)1jP3&n?o3eAn)3Y^(IdT*0PbbhHd?$b*h* z*h3t-!)Ychg)+;5!MqSao_r|+Fl#s8l&jM%fDonA9~4{cZ!EQc)JU|df^bN#HZlgw z_wwS=Gx!AXVJ(HXL|rb1I3$U0s3qprwM(R6vGf*e5=4g&5`s!e-!gfzeQfJ=|R@ z3o6N3xtNWnS>l@E{2b$5y?i=Yq>wwSH^YH2cpt)3hsix^4SZ2%*?YQ<3|E$wkd}~p zy6=kodtPDvo*O62l1tuwerMwIsBBAW>G8J|NF#PBSjrw@!YN+Ed(pzZI89NGCxR#i z#nX3i<$P>;8a58o%QlI1R`5bvhHibHXMLf?(aX}fWI z7J^Y2|`119p6oYWC^LQv2XjD*z}+#}fx#g3T1 z>G;q0jRJ2p5)vm)1rAPJ4|xx%f4F0)pdFCzNJZsT5hQUH2iy44gcfueacVZdKl7!7 z=%0=Dd7cYrl^oL~E}XSLIxBoC$4%)FDUAX%(VBZnD`tFN*5rIys3!<9sSQfDywDTN z`|g)M>FtkBq18Rfy?jeMB;YfAAl%5|ml0j#F5d*N95sxL^VwOnZ|y#llV7P9Q>>8b z*c-bl-FLn_(i@w*Gvq>cxTMzTqfj=L^I5*uL7FpI|LJzZHi)#@hg zw4UvS$3Je^OWuDvxid^ExOgWo4vj_ss;47s=2>G6n6>*~z8>_xIc(>@Zz`*ZmhD~FTbr#?!b`&2F#q;+H0D~cXeIgOrwi7Vc@t&U=xR^vZ_!QY zEf49>_Jq&Ft;=|1c}EO`VM~iUoxIu22V1l@WXklk@ZZ=WQ~iLHkMOMZ!w1G1i+h_7 za%ePC#QPRzo1JarnM>^F5^WPpO?Nb>j=ao^3atq8Rj8pBg=sNc*k+uy%(%ufz5!G2 zZs?_#J^62`>Pz1XZz!C0%=@jC`EZN+bAzomDwfE7qeQ80G}DoI3(eN##0elv3o{jW zROwh<&Yhq$cw=5Je-rLz%dpBzq0T$W660+Q_iW1=lWV)@&hNV!=Qcbt?(9b5X+8$t2WD{IIf9woD-h4qiAvc5jgGp-4xdmR? zYGHmWT9~i9=Pdq6=X{II-cKf9?H8&yWu98MchSdjv)-;6&mUrGy$Mv$+Silp8yvqI zPxzhawZS?uOJG$fQLcEqW$*nBCXYsL=G^=FJz%HxH&yfb+FWEQ-f!8B{}mj6cz=Qt zr<+=~(Jy98issXZG%vNX6kn|ELwZ7P`9;Hqw#`%%!{W_k-Uq%4*N`*D!qCW+X{ia< zjX$_*+iqoSwtl^}y5hW*gZwFn9fvMyzcqBKQagND?S|nu{PQ85xEx{rI<6L{nTesZ zV|uzt*GTe{d(PwBYt^o(ZsWIqIQcHE_e$!PBexG)D@=)JMV>-i^S@;)wDz(~c=^7* zbDoUCJIyt0t#v;i6)}1bDKWTpYcG9yM(|%go*fy!gV0SZD=TGMIB;y}$Xo0lOkCx5 z`oq_Miwn2!sxM2%D(mYtU;bgY@l_n`{<}XzvWl*vrm4$bg!ZNKsY&Jm6bf%x@~#_y zxTw!c^}cYM&A#__NmY!mf8TtTE(z$dP;)n^mo=V4XAQ!+`USXd+St^op_cXa>?$3X z)M5LF0ZRDZqDzrWB1f}Wque}`m(qtYN?dCWk)=_Gkt%ZkwXB6VV0qkbXxqLvA0F|I>)j2L?C5QMq`jCesu@pbux8ZE_%#!bH z2>z)_MLoVEsFC1<-^QXwR*T|;n5P<02@A>N2T(Em^_(m9^*x<`Pku4n<)mKqZo|-v zqa&|AYz>6|xOh-}Ze2h5iO7(BKK@>#R*RGnLQofDw~S~%-1n@stSdvJB5jbNl&@&N zRzW#FoDj*uvU@m0x?kc(Qgo%-u!^1&=UOgDZn^Qm(#FTtm; z$tichUi4}J#(9u?mJ{-Hym|vmf?Y0iASChit5+p9v7gJ>bf>#d%MQV8M67>2@P#15 zP2Hm!iItck2N1uN6jHp4BOc=qUc*G(pUaylkHr03L~z36Dd{w}HLDBd;U;_(XYJG*IxML#>^0@6dCiNuJx~I|G)&9g&6z+4=IRZqrP_nU;5& zs(X^9wN1A-=i?kFPAzriJUKJbvuXBqUWStX$2%82J{?tD6Z3P&2Jz>Mz;h~u^@XVA z<_Uk8KB5;t+g-IUT4>!y04RFiqnUs}AWnhf7QG;uT{WrkmSRE#2_=S00N^0qQ~;!t zWQhu(FTvbyGQ5@^3;BI93cNWxX&dne?ZC|I-IT@3xIWKk2_W=skca5vv8kJ4nL0h? zaKJS-<}69$4_XlyqX7i4v}~WeGlF>%EJXaoE!B|&E98D9)ho()+6z^wr7ts>Q? zZ1m`CodDuiJ!r-rCo31gt|6W|TUv$=b;hsD&SDq;L;KRoAV^5t{lB8&FL3SxG30b8 z7>@j^{D_KurTMAUAoU$H6wy;V#5GO)1Co&V<71;E^TqcWXs;>*(kM&(dZQmr~eDggkY8Y&i3=kGzILoUb)Du2vO%?9| zi3v$bv!N)ScMTZ?2ZvfvYq;(~q(wZl+<~jwO3=ky2uU+_D5i716wN$ni>BBMM6NQ-*-FUq?{3PAOBWg8 z@4Mms zO=C&s^XwO0X=&GY$DP$wE054CRdbBh8y7*GVYYD2VWjHUkk@dr0x06&U}zAotz!VZ z4m9--u6x1<(&7H|1!F>3Qt=VyO3b0q!gU!`L=}wPCNG9s10@}ztn5T^_*VQ=E{u5P z2kBQ39SKMfQNNg*3*R4^g#I$9`J+YmOUT~EdCNi#wU7uZ;}{|1Qz8Y!3Gm}>Mfo4A zm~}O&13jOGe6+TeO|3s&d+`NTu|^|ySu>;w4l;Y%q2SOl@@SuVnk0FEWt+;Q0sYNC zgA2NObG0M%LKNK|5QihzwU9}6L7 zf8QHTSNNR-pX_Z*B0WguIIiJVjy;~pI2!0cEZD535ix1M;{dh@+RRo#V{V==oMG71 zsDzHS_mJ+*K{*AGhB5sEcf6NkrS`l~%3OVk#orrB*b7A$A5E<6kPrMUoZnsx@3+~Y z_sZ%1sr&cbcckygP1!BJTQy&-W@)XJ76Bob(?0Kv@EP$j2_D?7nJF<^C-ZS9VtUg=3kXW;>rtf|4Iod! zvcegF@YJf|#PkP5V%6t`Tp{>Q8=hevIhUt<#ClFS$l4#NvZ6xB2u8wA*o^^Cb+m0P zz$BtTO|6hP+g$+ZkE&LeREanM?km+MkzeUNH0SIf14#uQA>BOS9z~k8`P}q7s(Z%` zbP=?DM0qJF!@=3G<&4QY%k5z@i8zqxN&RkBfOx*d4-Ur`7Z(;7WJeWFnnCn<)_nc| zxnHJoU7+L+W0JWU3gm?6M(@roOq&}jEEFP7&U3-)C%e(ZIWnHEYnhbybd1dTusF-s zov-(a|Ee>7s^#qx|K8@^?(e-$F0>O5=#X@TnbL?d!8m(xtV&a@qdNlCV?}FKS3yx+ zH2AkLG9W~Kcc1Kqj(+gD&G-^v^Ymkhi>J+07os?%u?)%MkE#N-YN8oinJhC3iN1NB zx$^y~BWN#FG)@2|q2ob0MI@(C0NoFUO+T3KP9C@)4vrr&rrxp*-nfT2>_p!wfPN+n z=kQND2_T8B%=w@MHHz7F0Tip$XhQz0Xr{UtA7m5m00&_w99@!A|DF8HR2Pj|!^s1q z-b3|R(0(d=^7kN~uRen2z66yMfCk-3Jdul?*hho z15OPrhl*x{Yu)@>u)eSxlt3$oDw&~~cy}vaOtmK17MC{j@~c(h zlw`Ubn!SZnSk9~0gy9XT3@essIC{jM>A!MXP{HQOG5=hAw*-?wP5bR_Qj`({$U7PslFv}7>_X`*) zHPK|dm4=_fJa@%6=kryvC6noDgZx7)hSiz+a%>rl^ZiF2hq~fpWaa=zaUKf&rKtKO zks}Mvt2QFius0vqv_x!_7$)&gI)k(yctROi$TVjL=dOR@kmru|_dozqGgMB{7{d6jPWk|dc;*kdBG$>(F zYDDF5qqYw++b~F^od)F)2?PJnG`pHN4Bj?C)n|z^x($lk`;h&|%?+55l3(2E}=AlWTs5E0xfn-aw#uR|s@|_oz+N zWc(O;(vFMx4U!{2cz z@gB9+n*if{VPYIU&|+_<*r*g}sHoAE@B(39UCIR2EJ&)K4EGQxVu*BWP-K^}!=)h! zM=yQ(Zqw~?_Fk8Ccjm^ZzfFZ+62rphbCI7!)n^cUqY`=O`*>O6vcG8plQc|Dz*`I{ zJwj24h=P*1)67xIWAQb9v0;yft0T5rCR+EsE5DzUu5?4BdZK+=}2O3<66DBWPav)MvgV=lSwUZ`JMXds`Iohizt7`->@zCf~z6<(X82O>cUTxLVhy;CPuwK{jsRcMrbD8Gcgwkf{vZ_1%# zej-Ed2hx5P8tfZqu;giTAx@`lhkbVWYGibY4(lf&Zuo_JyL5((l>9Nj8<4J`1%&u0 z6p=_gV*$45A=A=02c6%SdH%W?1TFJor59Z8K?H3tRTj~eG$>O1Y#y<|2-tlcSM=?V8MM8u=o?s5AAToFtny@=E(j*7`h zEO0|b?*esi#V474?RMASS?%%KV$$vVP5Izff9~U*@+kZm=_Io}!5BqPFe&vD_WneS z+3EDvC!nzZaEMeuhy3k_A;_bxh;bbYDShgI%p@-An z4~d5o6?QtGok(NrSs7&-SSNq$*z@LssiRB;wTST6EtHJay(W1HQNx(lgZJ{6h24KF$< z7K;+aON*x>hA|>mudZw`MypDMcZ@tReR5>*uzb&%iyyp6bnRopi_|N;G3MCqO1+_e zP0wtpA5(KC2F5A7N)sxtx31N|swb-5JrQD*@--k$J@vb{M711OkN+**#!qzoHzmw! z*^R-k^&6=!sI*~s*=5>&r&WAcBbXVzH&eA>cC&*z`6`@uy^Z=rd2Rq*v?%S+f$JZi z^`3M2{CoTP7Uk=2b+=~~f8YIc5|fl8TD@6k>Jc-KMrK%-O>5sDRzXt;GM#FUjjH^? zrxiGI6f@g=8EyC6)lT*Irvr`r z;%gx{$7_7OHk`(|!O=VD%CJX6I`H0~I4s+zi)-`@zn}SSKsascUk?AkRN~N`VTWf zW+>9TpM>uj67I?8@njK4U)Us?N2n6Z%vZVyy;`+ z_-1074pcq&U?Bp*gTCWv;utU*e3No_lV_pURs^e>u9ZpfZj1>X?GtW$Vi~ z*4K{S)fipkgHp**&aUEdQzhm?JYF#$mpxc_(%)sG<%P@H0h@LES+6`_T(YdG5i0K2 z$Z|vs7v9>5M-&l0SEF)HotHk@UV=_wTBtPaw%ofMwtvz&p+IgzH%K^ZV)p9go@1~$ z>^`=9r0E8Ze8XUw14D&g5w@oJRClFxA?_&0#n?t&E@Y4AfV*=qHEQDr&xvB=s z6^mPqNRc@R->Fi@etON4dKY1PieafI%D&vQcr-d@fF^eFqyz1;P*hXx?@vDBwXTxP z(*EChv;>iQ>xvF+*`43sww^_!*;<<1m+r}Q$M0{fW)3MN-K`r6FhkwZKVwy60aA%IxeyE}CcN?M^lJ2uab4zgS(yIl;uxx*Mi@)z}H0 z{9PQZJS9_zT~!p0xXCEH)<8_`>`}g~tARn!>oqQq~Vnj?a*a zBwB9oQzGL;dr{+9)~~Nm&S&0=Hqe%&XO|BAn@fCK=*e(jYVbw^DNTtW7I0JEDmIv5#U@13)5A_ZrCa2F-1 z%m0z_!F<{HvpzF*kubO~Oi>252xp<|(TEZO)MG&(h-U6-wu|^nfa4;rDcQ+xIeV$Y z)m_y1gW2JSk9FnX&vrQMw7bUF<>WC1kSpi(@_L;9!S1P2m5Uf@+#Y66@Anj2<&y_O zd%wFM-kOyEeuq?z$}bgj_}p|Y5n(EgH`FDESIXz%Zd&Jt9!{yN6Te~Fq6Z3|-4oz4 zA%`zJ`+tQQZo+CEk7mYv*LG+)fH*fHUN`kNs-zb*Q=T*7Y5?i8qQBvB&l z#@j@%n6XF2s1h1|{mkVfZp zp{FwZ8%~~aI(1na?^gR+Rg#53t+3h)@T{9roYpmgHU@BmNrp}8AL8^2Oy;IUy`!_J zu5dIcubHpeDIPEw%R={@J<_>v@K^Kj>*&Y7zCHVMCi?ciX797!o;_Y5U%58LATI&t z5pq%Xgt>YW^exoGP{WWtZ5z4!Focs@AppYa^Epk#z7@{m1o*ECAbdUGDw0gsz6s#h zN|=12yDB>xzLzKuYS^Ut8y#qa@MSg1InvVl%GLPSV%ZvREP!$y;@_^s#K%BP{JnGf zw)r0elZl(bY=GE7krEi!nrn<$;X*=j@{QpB(LEVNz+|`)N7mD&9KcheI$@Ktw~8p45Wj;84G`y7s|JGgs5Gkgg&~Bn4c7+1T7eKqTtLD~) z6bc)&QJ@|fh{adQ68XW!l9vBxkqCwcbGDj9R}(;iIxC!x>q2B@vB~&Y_5)CF;s>Jo zLFR<%o)*AAY@Y`3@Dm#F3##sPYgUo~`Y69bTNLAaj*lSm&P*E<;?f*!2(`i|tVOdl zS?PfNbgbSE$YB3Zsr-${Xuqe3SWZJcX)*bqiWZ(^S31Sdjj2{TuBrlXMcAAvqM{TU zkQ-K@=4AWIbSU>RYgRf2==E5;02017UHI69CxD8S$5I<1+-6>ZsY*6b#EJ;3w3Iop zo(h;0&{9>z5lfZ>deOExly<|ZEvwQ!8-*;>2RB z|IK+&QG#Z+p(Q`+A8hvkDsW1N_X)yDCoNfeTB=tc19r zXdLgbY>xM0wj@-q;aW2>x4CG#R=)>^`TcF-FY{GK>B;Qt*HfdhGK1SPo)tX0cjrk8(%m!yL?=Z{c( zzAe(e(`2zAHFqh)JFJoG3HvtKo70s&6hI9)>I*U@lg?-+?=ZA4yr(Z1O*2;}m!k`` zcwI|B`uX>u8rWcV#}TMjCXau{N#_cL#pl8(hiyB*WSjkdLZmH;-6IyEk3eypOjd!v zi0C}}aXw#<)1E(W`ZRolDfyU#tD`CpedcNy5ydJjUV1yvFM-zAhaOWoI7ctAzKLFB z98Ko}>U4Kl8?i;B02VzTsi;6>6_!fdbmOGd4CN!S1iqHM@KXHCG&hFC0uC?^D@wb?g3tW1tBVP(2MedbvAV2o& F{{ZWQ`{V!s diff --git a/docs/images/class.png b/docs/images/class.png new file mode 100644 index 0000000000000000000000000000000000000000..af44758823af31d4b3f5c3d9212dc455b9b0c683 GIT binary patch literal 80737 zcmeFZcT`hb6E_Tq*ib|TM4E~SQUvK8yecSFrPrV|=|ZG8bGcVVno6%hkq)5=NDEOZ z(h`+kqhJ#0LI@#&B;P?V5`6CSJ?~%NAMg5Hi^cJ<&)ze?nLT^T-9Y=`{uBEd z7#I#-yL$O11H&F90|V2|y)3|+7Xpi>z%NGdP3=DzU`XC+;6DM5=GUC`^%+Eg=e-Qf zj3*d&Z#Dsb)ERkxJ!>(FFzovAo{53sp$h}^KWz+v-<$uQ06&}G{QSKuoAIC4z`Jaw zt=4;x*}Jx$nE=Wd7=|nJCV^l3?q0P3Gca%pZT>J`yLsk21A`{RwaXWc0~qJV4`c|L z*_@<;2;FDmxvuR!^Vja>`-hnspWcsJwq$n@WWVv}-n7(?2=VNV@xAqxZ(}QG&Wjgl zB2INQ8Z-Y{{+ivKo%!u0J%*=OCa&#Wd?FkuEE7s?u2)l&8Lur`_d^FaEY4F+)f0nf z`wH=ErsH+~STme42PYHr-oqy~?=vuNKA7`EN--@dkJ>ThC+>)SHzvP_sGS*%#JD`M z+y}I>`-}N(l|-!;hXQ6c@d7uflMwIiaEYUUHV%WQ&OF)(D`=J+@N*UO zHi5%sBEG5E8xcqbX*Bjdmj;q{%imC8_oNgY`{QAZ|gUn2CK^rHb~1-smK#c zA79JBkKj!GiPs;CZ?ao{Vs}k~ueY75Vgt2>^@T=Ll@{-lQGrL}OGT{Np!yRQE2%&6 z0J=t;+|7*KQ&=cY4^%hj(9PJGh)LhnjWO1QaLK_zCxpF~djTvxvtRxPlHab`X)B2~ z8_Hvd9bT9qs4CXOJ2)d*;Z|sE{*>eCEx=D|X6tHxZj8Fj^y904SYbI+UX_ssiOICE zRO??5z`Uw2>$m~&XDztbx+yD!E7y?e0B%adHi=ad#Sla(h&$-5MVRPEzM#=dlO7mE z!QMF$$*4?6X}<(iTaC}B0$E*^O%d7c-rt9oArO1|hz5LDYqxcQ{_9ONk}T}BNmH!0 z39PppbT)$PSANl>$Emg=7m6H3UDtt%M>#+!b zoLmv^TEnI^PVZh0&ZkmS_h<10!cU_r4CsWs4b*)G$PKOnsEfLs{({x+9&L()i@@gS zMVc(Ae61`)tpBG%vFc>@VC@6b(TMZc1L-j_Ph_&{tav)r4W?L+9ZNr5R4K}L zFYPCPC!51(7N3*XMQ&^Np$+M02A1QnA~g9b3%w4koGe$To&ob6iC@YHxaRB3rv3hp zA45M7ydKWhkJc7Cw5cKQ3NIJZ6R-mt2xWIoqVQZ`)|}X?;pz`G_lg%IVwWEyiAEGP z$5Y$v+ZiwlB5_|U;(uu-@SwK0B+~3_0elhj4L-Q?#Of!_<+4l!;!O(0INZ=;3wnTr z6eG>gAqM;^n=^R}BdIt<&GPr|r9M`@MtOM?|L*uv&mT;d-}}{veke%7X54jKXQiB& zm~i%fZ)KCWX@RgGO?f0!dBy(<$Ea3`W7?%3Biu$2du=WdLa+Qq*OHSFg6BNmK@ZBe zYuS%`!LsOe`%Atyh9t+IW9go|9tV1t20t! zlspYJZ{Hiq75dS=T{uMfu5Ihm1wvCbjen7j^C7{LcNcDM0|Csc+q(x^3>TDEa-t;O zdjI@}?;}X&S@JPwf8nTUcZRu0QE9Y7Y$3WV`f{-BpFbFu z-KQ67INyoO(0J)mD@&(6r^C-%ZFjgi4J(+0zsk;|AAyS-xUB9^{rDr|!){=F7&6G< zc#w_gHnCE9Om6M3#?$n-#0bygz^#J2#dv-+G&g{d*NdnVm2`^o;pEA?6ZcO?Zp#tSzWsgS`J!sr z#F!s%#q$9>fI%}Q<5>&Rx7yjg+`VDWKi!-26Hz*X>1f&nTgn|!i(3c8pj%)5m!?{l zfEK?rjBEQZMq;a5L^)u$A4-&j(4oEq@qhY1?gEVaR9)?a_J0|0>!Wv^hZO~C%9rVs zXj>w5itrK`SDU|riE{<~}um3xi7 zZy7KB^`O4^!KclZYPz zJ5@D^G_q>o9GxlKl+OW90GdBLcrWP8qdgc zg}2Y~_@CLILOW478R>mPDbTKT+%`PQAckeiX-Ag6U`II$@skTQ9}efC(g*=@BYKM9n(CfB_mKvXh*>E2jYKwkPWnJw4@bnPELty)uQ% zk?G(j`-`C^P^fLIF4w*2?x}Ov4Tr-+HvT}zlueQ>+AfAB=P=L<BxpZr=(?E@|MtNkhr`MRI zH)gN#(w}qPD&(0Df4PjTFN39abjf-d0O4fxemYO#K4L%TrbDv2&=5QjhnY>ccOy-+ zrqgU#KBVV4HkBMYX&2maN`8{lo zptq-I(A3Oy-_AK%*Yb;gd7w!(Wq|lB4NQ%KVv9U#Ia%^Z#P-3-NmqYQVqJo$U)!b1 zCq|Xfc88{i&Y6Vz^S!VxLRtxNy+UPHRYslXMxwfyyLyPfJE|U(pu(1tc+1}Xd|7l) zKg?np>@|RU{TL_jK)$;=-&N=Jx~s+~1ex|dMYahv;2QuVzTeN*QSXH!@R)N6lFE9k(Jd~ftg`;FqN)@ zBpT7-Gx^U9^$;neR?zj>j_XTaj#joQjso?y`gI>I@l+z-YN z<4g(0!~<3}$-;T_y35)5Sj#=Wg0=Zc^~mWU;u*~Q#YcSH)}@_jr|*OpL*}7DL4%sM z{*L#zd^Z&>(z}PyEF_bCJfpc*s8??&UPbrRvmRVRm3ywXyeQCp9(gf^gZcI@R;y8Y8pMOG$nS+FuXuNp?&f ztyI%UCt(G~t~;_7v62m2gaYS;t0c6mlbSPBblTmVK7=-F~!ZrC!xN76)9!fL6ETa)^9w*rQML1f6UH*sfg^r)_8?-tO& z-(UFHC_`Kv9kDP#$%l)Y7!D`De&p|azdGv=TDG{Gn}zw40N9MYi2!mOEZZ$J)5}>xLM84ivP`p3zFB zv&<>@d>4WJfTF9@fM~vNTw~pBt3m;1yHs7fJ`FQ)^|(5nRFm`oquEhlLcc0OI7G_N z^ACI~E_?M-(!+;?@Mug|`n7r7@r6(&)%>@yAynUH-45Y0uc8@T7OHtpoKP@Ti>ohZ zp8+R__^`yxJ|#Zv$@E|h7Ut2)9E6ZN6x|qeGsaW=&79L#+iaqA#!Ry^DB=Dr1zdhq zTye?ch^qGRR6#gY2{Zts5RAW=802aMpTNVS)y^1`NLEL~$1frK(;cnV&5DVv(@MYe z5Au}VoXXB1c5l?r1$5-vX5d>)MkY(c`} z?2U14=L0i@3w4y&A!<^CZdRSCsg1e3>Fpa=uwJv5#?d361(pdk%&c!Jej%jPi#5Ow zmXGnn>$94uifUh_*XS^I1>TgY9*d6yxUk zOKp5sRxC_<@2n0Bt<>0Pq=)G@%E^?usAb>FdPk{nM>*p&Y@sC29(>^46N4!8aAQ~; zDwME7?9ol^Em;)T;B+=NQ*cg6w5%(k8aS<0%RXJ|7JLKvM;%?*kS@0dbH9qiYr;TKqHQ1V?flK!@y)D6CE20tR% z!LUeD+Y`{Re@ODn+zgr5g@V*0^;2%nj&d#tXe2|%qox*}UZN%xPiDnm8%Noev8ax0 zAObuFz;kmmDx*RXpM-4rd8(?xw)VM?=SlAPB5)337f+|MbXVY|JOz+U6`Nu?82?9T z)umn^U0YCdDu!Y*D_2v;`=s`9Uszd(eCms)Yu>b1-el`}v?u*O+MJWos9N$sgAP4V zQdlNn#f?>fXkn!qEVT}J-6+vOB)I-&JhZRM8jN6XZoBXn^Ed&o>|!>Y2S zl+1T>53F+L;};?Azt~*QNgz!c?IuJAT9j-8XFjegw4v|iO3FVETkxM|PD$5Uvz{~P zjp5b)fF+!cD8OnUhQ_zVOZ!Yf5i^4Xx z$(#9Os=p@(>Tg`F!R7Qm4E8vDyd7n7N#P9LE))ovFyVBaOV83-n!P2g5QXF%upYjK zb(QL4|4=9P@yhZ5i>b@L!+^9w&9{yEiM){6Xv}367 z(3q^?7@NJBYA{lBW^OgZDpX-tQIAoj*;2{yFjrGk1vy{<|2J%^SWym|ABKjlO#gMD zD-Jsf&6pTlw`@fMZXj-AJ`Anra2MWr4%v|0Fp0dUSVg`_gel!bmcn|LvrsCv5q_Og(Q*Lr%pDIW=bF;JvKWiO@9%)`j8G@mvM;eJO4$d@5SO-x(^5 zL58UNwMZPS-tcK^?JpIFDc5?e+U$!5KF+T@YxvS)nih3Kq@48v7ANLy(HU0b34`QCVp=E%)~$g>ivq6RZxA)$r>QMNM4%&3Ud8EuY zSKB%bG9@YhQu2Hu{EMKEC_XQ@#VY+-uz}bx4vYEgb@^CTpjqC4N73ZInDyCVR?Y_N zekIhP`096#;4#H;nCaZLj~BM}Hb)ZJ@se>@5A2xLGfYqv!;GZEcYH+@e~-!#zwNlW zbz&`Jcda!ax>Gz?%!BbpUWg>z z`@-(!rMU7}MhUIZX4-9%tEz1L4d#f~bmyqy+fx#GLSawrzeq0Te^V6(y)T|BYLgyl zI~OzUJNcY+H=>5eK&P>c@S5b;7sn=|Bm;5GLjVjot+sIfS|S;lbVWW;lJP<%)+Y4& z2!xuM4|UxCCOGw{&)cgjPX4h8pgCy*FMd&umpYbFq^^CFE#TbJx3(j?A*C0Z23tg7 zUfq@~ij$2nJAYaILHKgEkfCv#WI`)hy*;>|aIed5``!Bc>)@ zN<%qj21>`eC}BH89#E1kR|!ehlW!-aVK1zwsCAZC@^#F%agBZkcvvEt2R(M|-~#sd z<+y}v9EvW($$Y47!%EZMFWgvg?r@n+Pl{x{z95q9$Urf>ty^bil(aj)dQau-)5R!| z&e#W^@1M0Y201b;LPfPzX}XteP|<+>A?AUM4Ul%^_*GBC=8lKE96qnbPWM0>YtrQn zzchc>Nq1%{z#i|>_;^H7(tG%AG1T|6n#{$>Hz$xqx98+X%OFc@)-imqS+-|{IXBZ3 zC5~Hciuc=YI^3C)K-n#|RqRy}UT)GJp)9IwortkpBxQ8Zpb2*Pm~czelrKif+tzpe zg|evz8~Mo^M!fh?&4j`%+zTfsEoD(DftSG7f)|eXwRYLt;4Bu#!<}K;GG?TVIZFra z!d-2&C$k!D5~Pd;F0RfuKk>SG$)whACt(3pz&O|GR zUUFw6OIlK}tdQR)ow59-6w#UKg>_KTS{SE~HEnn4lOrPlA|&h5Cq~bCw)Ly+TK2jC zV_P9%M2F5wFfv83r$jsB-lM}^CERr_xZI?rYR?;VBEA?&o3D3wuV=Syn5YWAd(DM` zT=#S4id=noo{e?-jRK-v~aZ}WMnlv zxnZhoxjsOjQ>AK76p2(}usxa%_PY1yHyO9{OBK8oR4g@yGDA{0YmtFzIy+c@kMk3qnj10?lBUFmh20pfj3 zv9?|X34?R;6cJn7W^%o+#Iiu64|4Zqs_BFl`J1Q;*Cb8_&&*(bG+8oho$aW_sWc5- zz2uz!ekRME#iH0?e!r5AUw zW-oSsCLOTqw2goBsZPt?kk2&ELE{wF5IZjQnZaAPDo79Jb!_N*(@C&~(x7gzs;j6~ zVy};33kci8AP{+=b03)Bs`U+cL0|Es;(cHKPzC zN%wY~yZ|Q;VL*&!HRI++MQVl0eOuqGqts7I2b832(-`sjJ*oK)0qQUK&xM9!o76$+ zyOP)U2Rgtr-BuRi8TclZ9P(ACz}vA|8t1fIXgaZfl#P;y+_lvxYh${J?R-yqQ@qZ{ z#gVllfaS=;V&G&Fc^E^$S9*2JPoy|bd8Ao%?U18K5>{=`M-R_J_ODlE5HLox9Zx!ml?lD@AZ{~@2ko`4_|&Bf~W)Wk#z)kXCUq+!hz+X@QGbGVtOyG2lGvJfkPtD&-pWPv2xCX z`z)=YK>I$i-d#c?vo|-qd~i+;Gin?vho@Y0R%_->`+{tD9)%{18lvuhi>pq$(K1c;KBlcfW7P32|oPMEHL z{gCHU=H@P+867c_R+YU{pMfQ zX2+{xRy=uc`pmCY0`l?j9k_Oo>_{F5ozeJ_B;agDSeYJ_m-h|j9ZgYB46vMnA6Yy; zoE>NsfhvSW%M^jZgK^jqwNKTL-$@sQ-R^y!I*}PJI<@NrKF(*dtH`QKBV5VrsIV3E zq59%;HOD&&YR~hhaHu{%&$NhY-$0?XUATx7-u&Jx8$GsnW0xojIje5j^J5an+ZpL- zDN%(iQFK3as&EDE2oWrCh$yRDD7cfJA%&4DZnBETLiI+d9^lNiR;)q z_=w=H#%ybCaS@`3qh~Wq8v!h*Q$OKJrZ}!BISFvTarGhB~t&~zv$o@oJYyWi4-XTC#VtN6SR1&q9@hH zZL*srBO%(}#Od4DcQI~ZYKY3_(ad5n*L+uq?!2)4CU6X9Hj~;fx$RXl?-fr(cq_Tq zc(lxxD!qN}Q8@X&%?2GU#?u>oEks}G%;<7^lLMbl7FQda^xZMFPIwE2XSP93y*EW; z(z~K6<5NI)fpAj575Sx&Btpz3uds$`wcDYpg=J((@pcS>AQm?I)fKY#MJ?=qUt-XI{#XAvkTEM)1A&gzfEef zvE97re68oU!vpLE5ZTOUCu@14M7{e}bE3++rs^ZW!fm0_l$kbZoUW=eRjNwXFjoy- z5ivx=^3K~jirib3{wSSDi7K|btxdY^6h2gPsfg8)8mG6OYB8PDTvtK1ldrT*c6L!H z9R-4U;pX?=4Yc7cW1FOrFqqdcm$w0-R22n6x#a}cuN56t47ZuB0M9GR8T3f6r-y9_ z^=AggYpINBm(Gn){`yKcSX#;y4;*Vx*Bvw^^0C z4q0gKW_uOzH{DBs#mAgOwp1`nM_6t}oXLifJ^3CGO44u6T&h>$SF&%&kGbjiWD%1} zPzdT&FVTB=885PQ1P#sUtW{Vjo;+94q+03?Z6Ps_=s}@IIT}vz2p*!>Ibvt|{G#lF z*ivlUN^0a{dR?9W>^1J!)Zr$pCiw8pL&(*}%E37>R%|Jzjbh^NuJ_Xm(qNkys|jY9 z*1im=LT&E+iSKnYJ%;pBG63Ru{h2;|I}9KLb4{Y4GGrz)z}L+sUIMi~;jan)9CA9} z5^a5GDA^;+AJ{cM%3Ol64pDBwx`@Ovg~Xs4QcywwQCg`TJSsf(C$CMIKk;I!G`m-q z=3*}ok?<_whLU*1qU3upp~SYbEJbxbkHE34a3Bm85E{dGZ1EcOT|@LtAxU@&)$pm| z=tO^J-?hSJn#ygrz&l;kg|GOo=SuS-p*;%df{&psA?M5#oJ5rV2-glYo`MfYT6Gz= zq%~UPxdw{AncKj7q5Bha)q`mlVrO-{bnY~>guv1oU((6xaGt9_JKbahj_~=el0UMZ zMJ5#FUx7{DSDRJy!Bx}^^!}kyQ)S9s?*@W%_#7!ejvwyjA9bx^4RN%)GPOq75|6&c&;I3Q>TWv89O_xHAFG@hdA zlcHzY6kZw%c@)8u1!HH~yHH;5!49iSO?9+q?kJvRmp*}*AlLbt+(QXv^Li$+oyKDIy481G<48hYj#Ei>)P7;zsmWl$D%Sd9BBVez#;45tFH|X z$k}`3Y#=P=lqt}KUtXJjNwJOVkhe)%2XPKM9Huz-m#)u`5?6gO*@~iJ=Z;dHlfdXu z6>=C6W%0JEL8ovsYnT{^ab7BE+%Fj$RXlDJSd%1xa$882Y08@ak`1Fo8NQZg3%xw} z#ce7#b@c^oWq2RhW*57rC(>Gt;h~ z9od*usbzc6B3%@(-iJWy9)EYA6(vbxl!ss$R9At z`Hh%B?K&lNxX_@NV-vb_kf)Nb7Uve)i?~rYAvUOd)kY(^De#<0@67oWI7saO``0~o!C)2Lgu{@$qnTCEZ z4OpS_pM71nF6$zctHHEidh@*swT2PEv!h8oKA^?8Jt@&<4wvX?R>ZT7A~bAyCUb$~i$2OO?b} zq8-7;HPbIc)GiF(5(jylTlst12;GSHvIyrJYBq%N+_vuz6-15=-ba}nD0iSUx+aK6 z`2HX);aB#^=057((E#6AMCfQ^YjZ*UsR%?_XGr*`XSMwEaIa(T!QXw*UxeE07GsTM zzERjxhq~C;V?Bb*98bzkeheNcjxJxncBC<`c3I4$TRICNp-{kI&ai8CRSq z(gIf9(fYd%xS^t5%~rI$@*d`4j6OZLbw1zpI%y~YN1RFBgRH17B;U2g)6~El6TEeY zEI#{!_E6F3vx}%3*s+Eg_Jz?d0W9y)@PRaM*9Vb7Bder*Psz;LwZG=tcUg6%hqvP_ ztBlt}&(7kFm8D&0!)L(hF*gv z-|l|kXbGB)Un=I74M+XSDDG`hJ@>Nv{=hGQ;U z^Z2)LTU-tIs6V4!r>%beIoi>DP_ud7NA;Hr$H*io3`B^0@-Iiw?}LEEFoN-?c%^y5 zLI|Arz_sg7OOWBMuxrlbX zTo%-7ye3=Ay@+b4tgaj<(AUFk)Ss-@8zxzBLF^1ZTVFRsS-6`PnQ>yHj8o#(b7dLS zk_BMo$|XI|Ey3N}4KXpN(cL^+BYAR*O>@b{{!WPXeZzTigs=}K8A4R`g~vwA^)$}~ z(nv|ufzc9ZK3ojh5fS#fd>4-o8~H>E?6zDuEbn0BcK)LIMc||xbWzyIjGoO85r|T? zuP&WFW@iT&OVVY^{fyvYLJ_{up8F(cX@cO5J>#4WeT8g< zh^^J;sYjq$%@;l;xeZ5|Wkc-y;m81T^ebQutr%}ydLUkP=nwVNgGZ|*i0(*v!RoLH zyj@2Jj$RrG+Q0j7^MK!U(v2-DnQHyYuesbPB z^w)z257a%)UO!Q&ZN4~@pyb~aF=9oy2Ma1I1@#7T-WLTP+Vc?#X^ehmn6c*Ei zx#s=H#_gKvTKsDxpG>b$Zz_GcH6^`6Si;!426xHa^$UfE34@yB8N17z81VQ@i)0}3%<9gc?(y1eAlQLr znyquNFrC@YUuz^>Wpp_c&V*^Y2b>b}8FWz58nYUzgTY_dBUC)jDb+O1Fjtp#y;3$O z5qLq=(jja8)tf6tJ-rUfsj<~wq2TwICyq#`;%Ztjq%(M4HJg5gtE^si<-A;ODv+6+ zs)K}}#gbrYz@F!#yH`VGP%B)MSNadWb89JiCMizrmRD0Z5=bEoDm>*)oiAK&4tlxa zTW6a&NPDR8(8^lh3U^@s8zr1z~MEzbt@|5EShC`8L^S>qiEp}#^M zbPotAU=RK9d6C|K8|$BN*Kr;oN?xI4a)K`8u)~;bO`>;)ft0ofMbh*FrWc}A9%c|^US|=LKljIRImf%rW@rx-w736lb;q&ighy=u|RZ3 zuI;~k)20QC>u!+v;1{I-N!#X|{~y5rfcU4R{=Wbav3I|oQ+g4*d-B;IE;M8q$ZR&D zG|?wlXI})8@ENZhpF+J|1bjdiKu=hXU87fx z%~a-eX*9bUCh=#oERYtV0y2b4>2%)i*zU7ay2B;aKxIR~vGjcXA9Q64N)gnm<)L?N!@cO!!S4W* z&`a~_jsQm%^VzBR;AORn{0-@!U}4x($XO%ArfSvF>tes)fv|5TrGAOeaiL>9`ZXf8 zpJVu~*VlXVh5}1TuKb(rOPnC&;G!0j%@pi3p_R;E=xG)MNO0y4MM++(|S8&EhA(uhnRpqqSBu1xV2GU@j;W515dy6X4x z|FBr)xb^A#A@D#-=nk1*$#%V#thYlmoA%F%heIKe51p@1{xxJg{&Du!@=qL|?{^oQ zwLcB(X2(I}WQy_+R-gZSdwXyk3?#3$Z!o*6|AGi`kxrJY*d|_#Tz9xT=_vT&g#J%o z&*)cTJ-sxx{lFhH;}X}91y$KEVbtDA$>p1G;ZG{Ng|CMT^rLkN#CUow4BQ?4FVzWm zfpU`^vS-H)+~T9bg6IaCN-O_h#!+-Yjl#VBQ5`gJf?hdT}0*m`B8~b0)clmJ|17d^p3tcxv z!E+jYQACtL84Niq@!MCwRs*RtCL=+Vu*{>S;UQCl6Fpk49Ggu`_5w)cQQ^ZDGmCtJ zJ!UP}`uA-uagbcj@7+zgoMxLfCR^)Pv9_JFlAO<{)w%Dd>ZmvL+T3m%`L@q6elXf> zbG!&qQ>lLT#i4{|Rj5mtF5L5xOcC)=wE>-?m_G+{1jJTaX6O$q1y7gf^77118A@j_ zb#Y&&*}}?th@-bJ`Hnw&bzNq2aJw;PAZ2ZN-N7gfx?u_1m?103_) z30qR4K>s;DXJ64JIztjA%1c#G4YY_=#vAq!V5|X+%~e_@l6!2( zls~I?ChMt*y2>tr_Qk>=GnS9dPZzzq2?N$$S@$G=j1shaCcL&-fZ%;O48)sU=B9E? z2-Z31kDb!+b3$z3Gi7Pp$1R%KUVwpx@8j4p(3Sn}62eZ0Dc*JJXLJ=$y~8}jx$&Jz zHFt)mGH6fpNU zuN6PsK?hLNV`b<1iiBTvDw=$$D7WgdIR51zox%d#94`UBv{Os`xtwfiGSH)G_G2^b za~Y_cZR;YpFWmfk`wvUE^|fXhph-RK{m^T({WwOTKKZgti8U6XL_Bd9<(2Mx4oonwq>r*K2f*_{sbF+X5{abV2xvp2UH`~fhal_~E(Voka!UdLJs@VTl& zGxU`&2-l-2>yo*}`j^6g^F<-K9Dq7M<52gX&?yeM$~$-o9(dpxWGDRhfFv>h^s75g zIRQSB3_cTmUIixhCk7U;l;8s+9e%U!Kb35|FG%9a^3n}ocQH`fj7~>(5Ddtka<D{vfh~r9-YQe$N2kANZ(a9V~L7vtNuZ3He$rJXCzmap`D3Hqkw(;w4criba{4m-# z|IX{-i5-~PC2nT&JUd#tGv||j2L2T52XVWiRVS{(-@&XALh>0drmGHPWGXfW zVml^5lfFA*JHLIq$xI&L^V{tEmp{-`SzfeTuOD7y*o@p>gEujTupbivQq$j5z1X3W z<{yEgr?PLo#o+(ZuN3q?I}X2Z`1cwW@ls947Sg9bi+@%+ZOz?iMgN0a&-CQcfrTgn zJS9+nJ_xr{!2qu`ppCRQ>zF>hG!dPft%q@Di~ps&EuFFB2MlB2{ol(rn3JrtA3vl0 z=2#$(>;sY+*4L^B4b%$`WK>)a6yZL_Cg5MFbmjGg51Nnwi zJHFnzBy2L44T4ltI6nvcHu!yn=I$b3ZfaiS*lC>c`;L37L!VIo7gH|)?m-{(&2U@j z{rmuNeE%`P8~tA5B25oNnYh1y7k>Fe(QmJR9|Q97n7K+?&nMBH9BJD1?>D~Th>lSN zRdd~sr8iJ^)0+|t0?W~DS$^o~qQ2&ht-Anz8wRm=QycyezC>4I{{s)mF%hkdhC@5} zwR71Vz|yOlewO^FLf%I_0xUw@(7um5%0~VLW^+qmD&!JOe-?&rFJn^8nu!*j8^d3> zU;SVtVl%=Lo%8!LOs|KV1l#HD9*A{mko$cxFfU{>0p|HlRB0GJ2X>b?=lRE@ITpWx z^SccJDxbA7Bx9;9cc22S3%0u-v*Vg>{^rd@uuDybNX6>>FF{`bv`nD)X#Vid;ej)G zU`9Mvt5&9;zmOZ7t@~wiY<9N(4>6RV*)&RTOOE`eKfgCTZUV^QhMLUCb_Mj0CZ@%j zkwKNKm6i=<=p93quWWYg1=IRp9iIX^W>@A;``s75$2_xy1(fUf4jAn^fTcM(+VaYd z!G77;Uk_4`ncTE1Zj8`J7B&^2z;_Jz}YV9>`g{ai`aHln}QGII_lDx<4OhEwKt)CNc?1CD>&(i8=0kPVCB zw>c`M;@;YD+e{Y)U+PlG<#gCTN{=|qoadRQqN*FFN#!(e&VS&xtnjA6-hU2Aoib#7 zZhB?gsJw?lcXCq=>`N?Jx(}z`m_TW^&AZTmUPw;;Ure6Sd6!O-1`vb=q&U=oi^}yLC8JI^FD@&ymQ_f$Kdo zC#Dkr(Z_NT2$^`9(r|l**y<*@OnmU)A{Ehjn!noA!_ta?jNSx#N2!G>;7 z@1Yi>OaH`0XqvEW0k~7;S#9ltUXqZ@oo~2cm;af*n)~~jWmaQVg@2!>TfJ*t%MQyU z^=aIuO^e9s{&TMYP|qZ#w%kai+}gHo<$D=>&zxuKp)VZj7A!B#ICgX?Ph=_1+c87I z(6eD{C8q4_YgU*+^AQ%~$o`MEc}6e+YdP26vnq5vY~DnH#|xA##;Qz`HcO7b?EU41 zMsXE{d-XM|%%BAbi-+kWPW02z+!Qj8!4dU7QeQZFJpAYI2huNGp3_)>S8=dX@*w!y z#%`*28W$WVeV$ z>^}x*E=1TQ`Iidp6;DSDEXP|A?R`Rhfx;x)8oz>+p8_(^WHM^ju+7X2*VCh%x5zvc z^sD%x+z!yu-SJ9vb{k>}k@MDCXpn;oczN{L@A+p;yA+_4OP)P9aD>Gp+c-e70K*!5 z``NaRGXSrO0!$ME3>!=8tG9x&Krbv3*J+0e!{hpzb${Gs^V2Q{0h}zd>&VmDdo25q zEq{_yY;4CGLRD#gYxBoyW;a88|M3E0-Ql(GxnD-u?#kw-?@@2T(QS!nX=Xo$etY%e zv95NuT-raD7>MRhqXm1L4Na^5hW~PHRse@-;%L0y_Q!j>#jdw@C`rul0R~3N%a(f! z<9Ejjeo#My7N#>%YQXTs`)9Tx*}U2$LGe7;h7T}K{61YGFQwq?~$B%ZalETYQ#lXH#u%PV%d6QNz4<JKy!3?o&{1y>Pjv9l-hi7gY_$#V zyuT^;GDiCus%_`#kQB>i(SAhy%g7yl{RamprBs2r^krCd`wFa`OVrk?)24|qp0~gI zO!g`L-({Hnl8kQq^U4Z(4Dp!gwYAMmy@P3y?9Xh5xc9U~VBMD%UMmAN02iFI{$6M- zR}~p(sQ~kvw)!szQOFsAbK9QL57O3wjmqM~FFQi}TDEq+B3$XSmEuDak~{t$Rb<=?Nm2;K}OCBNmNGoE{+ zA;6hk>_ss-Z{NMfc)zO0!fjVDUjy-kv|GWX!fs{IhsMEAUw;#FFt92TZwCB+n+U75 zoqmQ)xpXmOl3cmgxkvP9F`NDgL}zsrqsYI>LCYljvCM6-QuZR&LrnW?-*?n>1MluF z=zp58e>;6VDW#kJxWPGh=*)+7O%7u0)D%}xm#Q{QGz%KcmL~Y{w>^p7MU#Q}EM=9gmOk?Ho*{G*8zN!KUqYivI4+cfV7q){r^9tE=p z`+8rYTJ7l%N&veG=n_-0u0@5dcGVj^A3fXwZdM@YKPOZp{|y3<9wr4NG=R@mlJ0AczTzZJ-FJxeAzMGtfuge%*O!6rU`Lu42mjS!`Z4AxPE|4sjtEsG^V(4&{!LqLe zt7c0wr-;d1^h1XW`7cQq0zCw>kehd0%xIfi$FHJMsAaRM?zOpRG*Z7mZO2U$uK-nj z^_Lm_JUM(k1K1~hV?9gJ;tX(w4?w=Xzu()PdnmwQk6J!){(ZI&aBaiI0)?-}%gIPp zyUjvwk9#w@j=QDnL=m6F}w=!Jh^xsodd@&;`1Vk*D6#E!?{t93|o_K#@X_ZRjSmyI_77f z8x-v0l95Ll$;i(>gkmtu z<>rMHFW_U>$52k1ONMQZ#&8zjl0u#M?q>$k}jRrBzxJ z$5$#osGBO420I)Mu&P7-!Di!2@=xl;*ZgHDXA3^?rohYkQGmBWA+2!8lG6|W;g6Z8 zbVno*2ypt&mSA*?et9;}1+KG1OPbQN~M~!{+UU;Roh15e8zNQnu4pV}#6x*ytUsBLL z7+V+75H&a~vsX2ctXWpYJfqE#uofj-47HtTOhDpD0@7$&7b^kE;g54u+Z{$J_`BC< z{T6AY$(V#2tzT;zTT&TU$)#K1rF4KGUd}!FSKC_}0NN>$yLOH8>7nlSy}B#}okmsp zwpNnjqH@YuWrAo;`!sh*(XLY8ldh{i4oUb2dUMWS1Nn}ntEH_pq}HfK#Bl*tm$Ox3 z9%EU>&Fr-fD(K_>tm}vLU_11s5k6r?h$;lmN>)hNmRbnx zH_T{~LG|k{w?XmijZ68(=JY3stfoNNuslnL-d{t^94UaNTT5#Wsv6+I+^k`)85V@Y zhmqOjwhX@zUDEYfL#k#H(f;5Q)O(z)0QAk<;uEV9sxH!0RoG+ndj>o#i!H06dGPD; zzI@D`ZzzJXEuVQT(s-6|Z%s0gtgW= z5f|&8{TYK8;A*c8+86{c=WSq?NGM@YX#H_v#jOQ=o$BCXU)B3`0paUD4y18Xs&Ye- zwpy)ZdU1mf4ur*Fp0Q%#!cEYjtiZ&@LhW?3CEb|)K#sH5%>bv_gZtM7UIDd5R1#rr z1794bMWztZiKDZ4nc8!`Vc}Z!2k91ZEuC!Wxi4Bpf+y(gETC##C#9ag=@At^yL`M0_!o+hKIn;CX#%%W{>vqeTDG5) zd8`zW7yOXX#*(P2bpLyR zAsMbN_=?5XH4ZKo#;BcJdXq7;-W0Qr)lsdjKZknt6gjWuF^CvwWXEDx-&wAXr3!A$ zYRlh~jEf#jUeG>s4;Q}`6WR2GrT4LtCEs=R9o)|D{ox-!JCFgI!Tn9UYTqO@G4kVL zpyoRrL4Wk2SU?-gF}#pcWlpggHi{ zz;~GnVZ3$H>(b{j&R4!x%{xnuc9QkT)x#Zip)M@3%|lP%uV4P0(Z14Y^E@FtC`)D=e=DI_*lh&3hu1uh<44k~MENCs}WNcwwW zjQ-@v{5G%?6=E^t`?Vnk9!#SVrGyu7}_@i1sttM1LiKX`Wd^c*#xwV^2{ios=?vwoh5-g zDDd?&aHSV5R!+|IZVi->^Jqfwos5)FtNlZ?Yir0vf$H z+j6Wv;SA0z6$YwZA>B@1rG}k{CWZ~(jFaXf?g=F$ zW%8ZRCcjqC=LyGkx2V^Spm57J&GFuPx@E9#XOHkgN03*C(D^xuDbY@o7bMiPBcNH` z=bf|FF6V&TUn{O|Tf%=!7z37>vmf}G>u+n?xbQDoNci+Q0XKeE^>7J@doKPzyuEom z)NRxUoMj>vDofcbi6msrI+7%7){v#LkEOC3gGr*0Ld00czVCx9Gib49-^q+STa101 z%wXQ1`@ReJ^E{vT{pXmtUE|^^OY@@3MC1MA5Rx%);e^+(_*%4O zS6f_vR%to8M`vDa0yt2Oo&^XyWt*25#*+N>R$m5d_H*_ksJxvNliNIZv{2L?>JJ7y zlkuneE^+Hy4qxu_JLwCt$W*C?kQ~2WAkUlJI&+YqRsRVzaBZ7W^+0;uEAp7Ke=Oe) zK5ZnGDJ)*{<-O4P?+sd)u#lP&BKvU1(+9a6pT|)1i())DeVar>#R&Ny?P4B-o%)Ku zFMha~48;~}G=fg%>|82O?3}9gQ&+IS%vJ>bZ}&aG;A`#H7qmhymNlum5A=A4)%THP zoGw(hCBgK(=bBE4cXEiFs?2A+F}6R60&cbjhTfC*{Q{I^MW`p`WmR`9lzbx%M)k!( z@)alb>C)`H;HGym^!D9kT?3=ULYW!Ueu=S- zz8f?Y)QsRG|Ngjr3%PW}qgX1ml;Ats!a9GYRf{xs2R9X7QGoL-xz0QUA*YLMx%-Hg z)2J0^*sRDvx=hrc|CBcrU`oOCQr9`4xb*zt=jr8^{&Ig%7gZeBvj zUkErtNz3!+hkgYZC(%)Gr(H>PG%h%6$9UZT!snVo$9BPA2Yuf2 z4z*OY`hC_ajt~TU-{;1=ER^R<9|y!tgYIsc=?%YLe{$PoqE_0czvR8|OhD`+Z+I`1 zx;@xpy5KX8+Vqch06oEVk1^wpD=*iKXJ}GRxvX-Ye*7qe>r^R;L4el12RWKQ<^3BH*QZf6Q+;~<#JIdLqj9M zT>8bO$B~&e&^l4Ox-`*A!J?^_8N11n?}RF8IlH_cR5C6dv+w#XlNVO1Nnf1|xjo$L zuKsv{nYa-@Q$$liB5L$AFFbtL?NE>sLQ@!mX!WE&;m3;gtRg6rtK&WV&6=J-uD4&q zC+$93Moe=a*cKiUT}mK#-i_bc->SV={g=Bu7y7jj`q8OG52EvBClrxssirpN&Xf?H ziHc+Ty{h?h(w(sjT7m{*3z#NM#Ee~1U4vi$3GR1a+zOYfe9# zqeF7sIrMZU>6#159yT2n1Up{qXi<4_-lhB*a&6Mwl!neR)rM<} zvPZZVf*PE&`DXzu9a2*n_H%MdCy&%(wbvmPnd!V+Z?cAgO@PsKw0Ce+sJP1AEpNZf z0F^~cJ<(ZGVJ_t*-vEw9+@wE0d~095$Pa+VLvvax{$re{LpH(5Ur*AlN;CouEcWl8 zyuBmI*E?5xu5}ohGdQqh)n>H&-*AFG^txZ(;Xj}q<6H*$)ZsV4;BNp}W%Y~;4!&lY zbsr++URv-vLA1^$Zeqk{rwQ?@Rw6v+8#?YPo!sS`SYBYb>DgAoN|g_UV8*k@;5#nP zVP6!6X%0F)PXJhg_Q5%L3~AXt`E*~346aH$tKyPx$a&&-g!lQ$12&HL2bOY(}3#Q_FLGUzZU!xVTGe%1ncc-fsQ%aH@s7SYVeCD*vsQpa2+|Qqy1L%^kj&gZ2b2r(={Z8n~!l zb$GAeo%)#8IYv!#k`RJ9C*zNr44q{#7{VWi@r7=Zi4PQ>9VcBY8n)HP9>0B!eLm+7 zC5*eXTi$=ERj=Z>!Eqv$_rR3J_p_JrYwH=?$Bvfpjbtk5(U|tltoj}|{c(&g9vA82 zekz9B%DfFzd>(gWS;6!i&SHq~PS8Qe@8w`{7L48{t*=v|>GmTa7SUUO9 zAJVmR9mmL^HV1ECmz{@vpxpX9CgD2t zuDD!)y)XaLyOzI%{*@_hFh=WZsx;^7Q^e5|*$%@9=DyqornAZTO^cz^on@k%u2Zu^ zJ4Pp0Hj>Pj*3vvn+ULt_i$GhQ6l4v5<-uZx&5erZF%nJobQlXhgs<6YCe*pp2MxBq znHHGyoU`40esG1f`dMI@*FB123fv;#fv&-&{?wAn~tp- zk?lJSY1`rq>!*ksV?}FIbT!wsT7bJGQkvB6uH8U5umim*~nP-iLzP)Mb&1?b2Is4jCH-^_9Dpl{;|F zV#1Y9)y;QH%FNs!iGOqQM z8P>u(aE=gfma*zHK7Z-`g3{C|$qHB4O_j*?K)cDV$=-}WwU8)jjz4?b1X(v>FDWzQ zk_Edr(-lEXrb4f2L4uYjCHv{UuRXF&r zzdB?n(Ixez%aIO`%~R`p{P|w-GI$bgGR-LtOPwbza7ItBE0jm|C1@LaQ}gg zq=t?M>=k)IFY3A`xX5n_1+$+o+S;Qdb{IsfY&eRZB@Fq0@*6B{tC1WUE$>k6Rgi05 zfUjfWYo9tOaE;PvvZH3_fH>z-kIqc1*9WDyzSgy41&y%*HlRdi0{ z#fV--5`kvLnwk+!>5^NPxGI05kz=e-*QdXxW=XkI&JM#5KPu;R!2g;KC0sm^JpgVb z;EbXU9xo0`NvmH4hW$#SbagS(S!hxbut!Gx|Vcw*04KL$!pcWrd|y|5q^cgZs-k-?^F;tYF}E8^KVe zd;qjOqc;3XzIz=Ad$*^o;uGG4fFW{PunSP0o%^6*hCZ(V%DXOnYu(tcYA2ta+5tsm z(l-N=IMeAD?C@RP%$r_A8%ONLxvMp;0D%(DfQ^hK`o^@f11n*9GBRhuQDf%OR zXt3vXMh*W(JhA2VVYNF4sGLVrJ9}B6>N{Q*rz+@kYL&h9^yKb)6W!4pw-(5jnpuL1 zO62YHClqI-@DD$5o51*;jp#?76s4ShQy%9L7Q>w-*C${RY)6ct#A^@N%@Kb{^wG$! zyVKu7Fm?_qecZ_QvbrJt(| zG_aZ+o&TrKxhq`n8P&sHM)Mo1AYW74<)UBH^QkE9EAsz(b++~he60boLjU`(|F2&J z>X<91d7hZ7&_tykE+7us@NZ#Hm;*z{@piBOK&9XReefN?%z)n(r3R|qJ7^C7ha5+R zftK*HO)b^I==#6^9iRu=1f%YTbpHQU!h<3lp$udNjs$p#Vd4LqgomH$r~p7Z@290M z)BgvZH5>c0cF z_=?O2CeZ)8&McRKwb8gV%Kq;vJtTVW9U!MMb85Uk41WGsiA21BwFOE|94bf;#P0o- z9icSf1pwmzPv$FmE16`E363|DzBsr}Y7yyy5Y!fCSK zrvpj>1Kr1tGFx+&wy=V+Joyycq0#P zD#cgPaKledv0KR{vPnw&W zMePt60AwJQ>LyxomrQ}u`TlISjvRcQ)TEZ{w1`tZdq8OZ`#ekukuMHx5m14qQ07h4 zjbBw|zbygb8>1{TuafL2MK6B)ho`$kT+;=gYh2U4o_d*VLDOM4|(-TC8Vx zKv{RMc0Dwj%iNG2tKGc}3E5uc7nMT%vmeN5&^GC%|I+dj_pbaQYg3|oLIrti$-BRA zK+>S3p8I?oj}mlSBg%W!npxD#-uV+RPMFo#x%?u~lXw5ZDIZ+pA3grFZhm&S!HSEM zdl*}tzNGhsi~p`n#|+r$&wsze!HqtktG)MW)#f=m_Qkp7`?pa7Y^O*xUThOfp8VYly>h$0PWu<(bz%H8 zb-Up7(7SuT)riypdC`?=Fq|0v*GvEQzBy{cMv$Sclqm{*Wi)w+P|!W_;!UpLWzfwd za4coP%5mbnVvaU>tU|7~Ted+Y2G!WRz8K*kcXx-4yem@i#q<;yd765xDgd5F0@tz%SytnWw{|mMPl^G+l5~MVSNAGyCH(Z2iiC3$W*oj3f=3kd8i&T{GIzctm1Gs1cQZs&fNmjUs7)p$h`wZ} zq6?~{sn(uzYyMei8*&`sd^}>p4PnJ9SU}UNnC-;b*Hp3F<1u11|GKz*P=*EYi>YH? zUWo=cbgn@Z_yRrz;JAo}$RwO9fKVl?6zA zQSbJ{c*Xl`dJ^<*3>U9_?TqfI2?R_GewP_2P?Jei?|04-HMGAmO;`(&-cU~GIX(SY zsT+D5vmYB;O)Zh!iBpNtXTg8e2&9Mpi^bQIQrIVF?BxuC;(-@5c379BwVDi{H`g!a zn88bkz}Fwt|4_|<87FU--1WZ$Us|y$nv)N7A;K7Cc?5t0?LN=X-X(^1&}gQ-)4=x8 zaKdov@|{pkkkb=IuilD$Y1n z`6#)REX|?01;^zx$45NNBbI=gF86(PLrky~oe%e`|9B?j+anTE(kK7>6m0p6Qt*dp z?nS>^NjG@o8MmeegpHzY7yBY=%i;AFqJ_jZA#a}_srRUjBUFlWy^UJ5Z?Cj= z$X7@|vio?B?Gc)&vbo6fKVr(y^FTJEp%MNcp9viGd?{_1DBy+ryv7urgrIH?M4K+f zVK*>#bv@fA1*Ta68#4WDgllJaQ4km=&Q8(Dl9VWKXu+D5owdg8FkG~8KZ*1S;fm?) zC=PYXWa;eQwGnhFEB8-M%Ab9~>#gtn{Q1m13U`+Qw3z(}x_z-!vx3iU(mB%mfIk3Y z!AJH`wrLZhXFo*$lWcT8)}MU(T+{H0uOr_bXEo$D=SckrtBc(p0!J1kY6h>IMQioC zZ@%97Qh0JSYGatp_9&4vmO97Qnsq8}x0-kzlQme~=#_mFhuMxc$Cn;Yl`oP*<;HE= zn6k)e&x$C%xd+=YZe4K{`jEDrspPKr=K0Negb^yv`p9uo_fRf*6(ZnGqO9zOD+rpwHBY(t_iK&M3ZsiK1lz(ctdib5Pc{# z)9578hJ>X2sgByd7>BCs!noWcx(I~q+H|giY8}E<`6xCPGqLDlLexfwK%A$lrj!G> zQ@qc~4O9$~MZy}#xM|fR@&_RPBQd`o0Ew-AESp&luj~jV=(v}~WN#$ffW77_DrbXW zL)nNq$1vx^Jr0pD%Nk#i8O^cjEut$Twnm=RT;c6!v?y4p(y@;o6)n z1B2(&WxY8Uw3)Vd+X~%-I#7+Bd9T)!M|#HFGgu|XzghryD#k|BU>UkT znp3PJB+!p_;3W9wz}cotu&juCJo2M`(2;eLs86E27FRuCWY;O47)ZTPp*pd;+zLk6 z${*G_cm-6dIPkj8y5hgC3rxdUm-#wyE@JvFH=G>gPXgZ$agr;ttlKDZJa3Sfz&A(` zcw2i_F0ssS(iWY(A|nya)u->HA2pYlyMX4E!wFt2c$(2utBrn}{Aln+B6=Y9SpxtQ zm=v>ouk|}`rv%Ek;-#WYewxnL^`m@j)jXC+@*3^ao*w-gottS%Pz^M5K+;Z@=g?75 zAC6xwX^8(nqF(@yC!9G@?EK@%-7Iz?6Hb|wlcIW2atl=3oUMixRZC;>ga9v3J$xq5 zmJV_5nzga%k1XaBb;c83@29j4qPDqL7Z+c818Gr-!$s$Eo^Y^+PBFq8a1JH zm`ALGFVte{qAcpxk|6BO^Vu@71~-P>S zrv%yyth6fE59cNd;22x+g`_GAJ^T!WJ?i@CO8vAANWPb+ilBlS^Lu0ja>=cE_Hz1{ z-^R>X>NAc)4RXeqy+z?Kq|@Vsl&cs9z2XL_?+-1@V;OL)L7A(Nb8AGJ`il+8kXLs22}EYEb}R-(}^@^(cI z!BJ?V(%t&P2z$K63qN!F-**AzZCB>-!D>4CJw`LP@;+A-ihnz73=&rW`EN7GeZ76C zr^0ylc>qDhFQ2o>v%^3L?@gE5VZgXLxFJ(1JU3IGUz4+uZ8E9;X-#>^bPJPlDJknZ zHuD7WGlf*n(=A9Irf4C?>qBd`{**a#MVu4rqU*n>X&H=PZJVoIIE}z#zryLwB+AZ!+y&hwRUB50oy(PZkZMJv4`mPkvo0JAq~RMNP6WAzXS74Q*7Zklyx8w$Jl^ z2-!Z#?2^-{J?Hx&KPQ={a!9M@b0MwiM)h`ZqgT4g32F7MPRWhMUcKv6E8D6&&`cn{|N9CwYB)IfeQe^?Y;;4tmvTljq81)i7^HUvwbo@UF`SO-0 zQ^YQ7I(15Zjq_=?%($6WcRp{)p;kk)jGS9>?*Byxxl4eMe#hM6;t9EsqyZf%&|n~Ef?kM`c%9*6HXXt=)^q4_`HsllH}qw{8ytJ6 zQwKOh+tD_y^Y+wXLE-0*i8r>)^bDYQbJ7@R!ViW^wxPYf1z@C)i_?ynG+>n0#_E$tu6 ziO(vIA$*JUc*e49vRW)njAm46~iY{^tcHho99NDtZ31mGa~P1g-oj1=O<>=8gDkvMGb%9-oi!XR%} zf#w?f0MC+YMfld!Fj%v~HNB!Bzjuu#rD)llAah%CYVc`~4Z>!w@!(l){VZB8jpTw= zXuDf-Qu^!*jdQZ%W<}QWfcHCgcAdV}p8#Sy(4~ZzG6Sw(5&Saju+d6f9f!T6ku_JU zsG>`?4xCu(DGhw4Z{8(Sq>?ni8{rWFz?}AHFH`7HUm^svz4IP0bn&s}V|Q0#`Z9^t7^NL^2P-aFMb=IO(xlPr){oxa@d zVZZw@!(RKg%jOs)Z;+;_q4SUv(K4w3#mBv~!?cH!D$w)-+Jf_ppr+oDiQ(Pm&ThPD zi)5P5ElH>@ymT>hfG~p2RatC1_xgMnN0u5%aHP+gPD3GkMOXgCvYn`8yCm_HT{!D( zt@)Lu2z!}(0{%HGETc9M2Moh9eu7sXBdY4Y)|-j#wX7c0RZLYE2Qq;FownK*f=AHs zsP&-Xnui(qP|UBl1jZ2zw?k0}%@$aTGOJyPyGIyj9*>c;9^O1uPKt3e-kjj*fL6%o zBx-rj1OU5!Gp?w|D~$4^MTz{TN_+*kPQk}xCTW%F#dn(O5ziErC$u<2i|j|zP5KoF zqt6SzHEHi{bJSZh*Mr5FYj^T>uYy)b@3Mf*q3zaTqF5@s zaq>;p&O3y+&}!U`QEcJH7{#hqJIBQ-BH}p_Je8P-p!S&azAVev}syE#v zT8}*;-cdaFp_SfF`(7B;Usb4SiV2E+iP6Iv>;LW3L%U%37sbJ+KE_I)zE<#VL+=q9 z8ea=PfyjMryab?=?nZ{eU-@tTDu~BsW`0lZj(#XYdGcv(yUv0eL-ycQpRSaw38Don z2vx_UCBnqBfzXX&A0azJ9nY!3K*1iZCH^v0jT;=kn@dXJBysT=TJ+$VW5 z69B2AB-tK>q!#G{0lcLEYVjO`c>q)KsS{KDU2NKh6^3{(jB>BboR^smP%}?`Fj$RN zMT)-Z4HQh)FCHY_^iO_ct@DI%tz9aQg-9?s4%1HZEd9s`NBq!7WJAo@Gzd1hGR->f zgtbj0tKGMgHB@shxi@wI5S1W)h&2ZW(OJ28>01u8Tt8$C0rkZoIYue$8$F(8()rE- zUP9(T4V6l_d7oboPu1XsB^1=jen@;CRtA4}#_kpNJ3-MdxW+BNNXKH@GwgfEu$~&V z;g`oux9T)TG5SrL(wY|iB~JGO7xU&zm+z=;8t1V)y$Ws_;^vA^vp3Njn-_1_#$R}~ z8EsyPCk~DI6X{2rKi@LDdC>cHHuuugF5|nyc@%kvGW_YYo^RDO&1k-=K^}%Z9A!~w zBP&5pf9`^nD~T`<-M00x_EKLv@t(hnBcg}ul#omZch}P$qYZ1!eOO$b&MTGkom=UmXZYr7z!YUMO6_$i#!#W+&KoKCr zvJhV(kc~~E9+VmtTfdil>d}FI3HaPzry(s+$?#BMfd8Q7e>-5;bj2-dfJd8ZYN@9p z;@j}9A;`7oNR_rMGp|K%_swr=pB*n9&ksx1-88aW2hWjpEr!VVTfUxO0?~dy%TOZV zBYk1$W`=@t8le|fV=e`>AKNG^z>$B^iGL}P?6ql;XTQ@qpqrdCp%d$&Q?nMb`xSgQtk{ez#2oQYacmH6(M zJN9$ut*4E{yDZIuI>YFlvUjUz?vS$TjGMN_TEBPpk+&%&3dpy%tfM*J>mw!1kqg4z z-s23w4Me{}3MniM1W#L3=6%Y$G$zHqB=iK9U_~<=N0(@e@JUX)#q~fjRq!clP@>Lo zFBt&kLDTxyl+ReumlFqM&Nu@kR~gfa{r31?Qe!f*RFS)>6pIpm=Hehfv^CPOVkY~F z?v;0doN3f|DiOpyg~YMiBEgel{X$)*6_Kt;l^{8d35%h`&6x^5FW2r9pmx3V)**-t8p3WnEJ|&?mb92Ygp$>I1(=mHn9@Y?Fr?DTeT^2Wq`5tDC?$*}Wa)zh4Km|-vJ1nng4ceYNQ z&pXM3k(~~5w`N+z;w5tpo{u^idl{~sn7Yt)*7oOpcS3TKG3nbmGq+M>Tr>MCVB)Wv_5AXp)|rae6R#Lu*TjAhsV~ zvK`a-ky8D^nBx-R@qQL$sT{twy1DZlOF!T#->HF)D^Xifr71@inKYoQsASBEXj$aa zOpg#~fK~IsW;s)1h>cPTdz-j`y>!Mz}_aMAQcW!yf5G z2XDtS3BlfqpwY4|T8A&V?l zt~)O3L>*j;H?gLZC~_blfGyr1qU-<$@mdgYfOM>VF8~v?pB1S&dldS?!__^QoMNYw zHJ+aZx*BQEfr=IJwVl>9&jgNYSs2yFl(}(@W%3NYPf#>~qEs_2%@JGoNg*TE%a|_3 z4^`$ncV=P-Z<4R=*hhSO$UkQupXRyrmr%UdN%tjXd3Jrpj}XOfzl^PNlNmEPD@cxF z<3_rA$hkiXgtP6n(3NQrsRvu=;1bZI;+XW8HUP?+fe5R5E(na^$4`}_u0SyJpgM;{Da}CePY-b;>#Wb|CvE=RRJ5Zvw5HXq##PWan*}ZBmbct>7Q_ zh0;$0BA0_)ZuGH(?gpfHz=_YyTS)}s%bNhW)n4l|s}`5I(7S#P#4VSN(xS+8ujpM& zIrlw`KI$)^a)^^58wz}MPm$c@Onltt^mKV7^#pCS7V-w>vflBXGcp7|JX@vVgpq`Q za6gT|NJxqaHQU51x@=oW2K1zRQI20}<4W8G54&n}pu}3zKe?gRu4s$!I(zlvm;I6e znF{oTjMo3UxOOnk>YRq7Dy%y*2hguZ7a!Uh1<0}U;~<1QfeG}O+6QH12moGP=^l;c zi(b*$prQL9#rzcSNC$_5oI_}`6;^LvdXT!-RTNPpPCnTVq=6@$_3S(U~gJIx-W7vzy8 z-nTZ}wHTW(cd^ab_s>Z9naoTp#Ve%H$goNH%Qp1*+8I=A6Q$KucfX}q5kAP>k6BRN z2`P-sG<%=ElLZBjL9&;0!#4y!4IJ+L=m1yyb+rAoe9L@LQ2rF~V>uCf8~Mtd4Le)G zuBZZ0Ld+)S9()xjRB*+~qY@2}9#I~VRX~{&GKxlaP1vR7*nBMJ5zCKe+ZXZ|-U2s0 zY4yc-{**IIoh^cV&w~6^gp62uj*J0$p%;6iocD=qLZP#!&t|f-M?7ujjELiacTZ9n zmkwcM*3^m0-^KOChkZvs3)~|+llG}Y>rf1}q67%^B=WrUMsvJm`JyHrBC&{lEqH2v zZHgD1WExegfSSs;Wr25}xr6w!`?_P;T1K0@&nqY1#@}uI`j@0r{9vR&lGS6UD&5_R z#*6M)g`(~nru!Ldgq*$H@4#XgCdQ=G=`;s>Q}{G1yE!WI5%ex_rDp zHc6KZP(b5emhKm&(Gh?Tt=?dn+_zL9EFBdbFXv37oN$lh@gA8ZR*ii7WuRRpj ziN10$DnlkhxQ7pI5K*&H-GW8=m*fRn#GYdmfrPlv35*5Kj8V8JGkDa-gts#;bg!QZ zx96V2eurjF$HrA_q=4Rpo!YNt;1~9=)oEVD!Py90m+qEJG0~1aRxebJ5B83Gw0zLc z<);mljfY?#+$fY=Fv?iV!cIjwCG=K-dexRr)I2ZDS>MtQ?l6lr!p8A=(w^V}FrqH& zIdx`ni){SbvAYW})1tnuv0H+4R73r%b1$W_HbahyQf2PDlSlg_W*@!sIq`Y^Vff#0 z0GG0oyX;`ZW^(}VolOU{tMvRG$H4$~_v#a~gh#<6E!7XPW>IoO-Xr+vZ%OFR@3tsE zv(x-beSP!hpof0%jXSB~t*3#iOF-JKrenrfrpU!WXbqy@=BsMW{QUccuH*nNKL^8w zH5pKMyF#Wb?0LrqNrG$u>K+`599PmgN=;<7d(#_TU&sD!pbwIn)v#enp`kiu&g<`C_})pwsvQh3{s=K^F*sRsrvEKI8PR!`r2G z$1RZ{E52ir$s$v5^+bKsoC3A;tKhuA7;kyXIh!njslJ3h?YRdK?b}5e811#rJa-0+ z_9Ua|eklmdEnE*lsk_4C?S))UmvftR8NxaZ-dYtpo7d^}240tqh*!w6wpW}4x&qn4 zt6v|g&x$p1F`fm%2fRu*t4(IMWZkeuC5aW^3u&=o5>%Asz_(R&8az}idWumv-n(w> zrLktwzYalN(3QaNYBD*l34D2fqzRhaJcFRvMY&qvg(N)4=QzM$8?v z9elaKeBY!r44^3*Hny#w`z4Eh8tX7@UdZ=?j2oBwBnyxo;f>P6jl-AdEPaI5*Vea& z@_JyVy?mpNio7AlDosA9&>o=upy3|+Zf~U3(|#fm>K-@0(1+5Tu|Q!6j_+CHhr zZD{OUYpy;}cE9Jk|N6jr6L1TtHf}c5opghphY&v*(|18J6@tSK+UZ!z6)0h}G}8*p zto<&^c%5`TO6{PIyup>xq-tXDlK`o0^B6t7VZD3P>Aa2!^-)HJ85|Ig&EOdd%8@MmY1GY2B>39X9C$D1e`NxivjRSHBzC^=x_&7Fwy)k6kfbfYv@2@=v$PcHwyMTdYTYMFpt0^@P@OeKR&WyA`!k zQ49Y4Gg)dW>)c&Z?in1-be*;Uof+poJtvn?5Sb3-S2p~ZS-Tg76rN*YQE@DA&eSQ; z>`d*n%zmK-2JHcBWT46)%K$>c*;5on%m^okF&7pD(ULyb7%T6nPB~d6(Xxc1ank(x zQ|l84B`q%{m)p6=Qh2d-d-QIciP?Qi_0MO1@&wfLP@DWHA=bFXVN1XhC&G*P1F(-c zD|Fj!i@3kU+<6UtN*%w)&9?OUM%{;g=~JwST2X7EPwmhtYbwu|MPiKBI!E<gFsps1Lm&| ze)e8Mh!+5B=-&YrcotMD)pED)^5&sNYzFJH!o#MKW7X$ng>zIE;d>MOKv$352Sx$4 zdO#`_0Aoj!2@vtSx6rz_ zeL`wyG7UF-zr?4(V{$BZcS_%}emu{GEZfjX%(KVM4ztbK^txVOd$yX`PBn*(s2Ek;l2Qk=~T<-2zN70bf;lYueeQ3&&t}b91R|{ z=8R_z*XfnrUa4HNn~PG&enn^*0wBa-+NRO{xW;%^6pqp!JU4=q12Q$xBK|(#oFG@5 z%bty8LJjg*#A?KPMI}=odlvRV1u0Hh*(lc6yXrLZ%b3Vf*kilw>8Q4B;|UaQmMChI zG^rr;g9%$^If9{4(!k7`;b=zZ+|*xH-K{Jn6-zrx(fd(}*}<1=@p4H8^~3}#ws~GB z7hf9$K!rhfcrIN?H)|V-3?RBzs8MWmq%6dG{>cjEdu`;3H&Pk&!fl1~4xgU3V%S1G=iHuze--r~;XS{^-F8Qdl;C!F8o%dXIQ}YOWgJ z@Lt{(9Gsi>GYJIz&2~Y_d3ACC7^-msg?~>6evm!u*~hbP4G=Z2n+!j!1Aqqb zEzDlEyNl+ToeLRvVW?dUCcDJA$Y?etHXP&#=cUzu34WZAzo=Z>AA};isZ-*2iOUI; zj{UX%oqOB?K3t%DZs&7F+QvVg1(0*$Yw>dPOl*H*8E~h%N!lQbqVa^>SV96~-SphT z-iw&Yi@BSK94rD?`%SVOqg~z{8_XKHu?N%QO;h-MkL+im^fG3(m8VMS z2;2{}*CH!{-AbOwxJyt9f4pJ5o6^Wv&beY%!#6)q*vY?z)CI-=XdsSC7-?c`A(_zL zr83k!GU0;LKq=6schrI9IgPu%yZ8lg>%ZfJegPnGt~`qA#J<4#)TL6q=p6St(OHR> z_KrSZGd{&pH?e1d=2@_Jq`rAS5Z1$bKnxJ*8S}?WT+UIBL8S!Ia7A6AH5H%wuiQQW zhZ>+w+J(F54X{EUq}R1y#=ETJ*^Eqs9a4plGZ;CPG)n;_3cJ^0QPbGRy6V7GjD;}AJPi^&8vV!mQF_+c7XWR)^QOYOaSmOIbE8?@qIWZdnZIPf7AOq0WbCgBGbzGzCtZ2wsW}-~o&-TljdVAp`ZLe@g_o$zD@KT@_Kf-#<8^IbJROmHu8kK-zoe z;ygC*h~|oQ6p0^XAzGp&rkmn5RE>ZlC;oRvvz~uV(hd*!CKR z8-R*|#0ygWl`Zt$+b=CV1HMyDA>1AppZ7Ww1JJB=G=Ur}rJwM=nvWw5LG~}`B$5Yg zVHOh1?@9sWur~>H1kuFe*_Q3ij??SUHA!G|gYBFLmBocmE;}1u)RCo}4YYCHFpf*4 z?&-B8a!_b2-3-}`94G+#GhF%+blFR#P4hsuI8g7v?*T^y6kh+DKj3UkAm`V5POsyW zQH4jUaKJ_6Qfqv+&!!qmvcxkr3rL!nF^}YU2iT>uB!IZDp9hAlE0~cZ3TgbaXK#;Y znM|vH7S1jI7=0W#f55nhKmzB*s;^z`{&fxgx$d&BQMof>{u+sa=zXpJEpNF%@dd7w zVD0UcKA>NS^(S5p*SpSxU1-r$6OreCi_HnBmLlxd?(RyykdG7p1AO79fD;x97rxE! zSHPC~Kq3WPtk6~u-=Njm((6%Rku^9b(L%_z=$(4#@^^|vvQXE0*cXqAcl5ZYbXSZ> zcxW!$#P2ou;QHeOUayHTjK<)rPJ6kV$CGwT2Dw4I;u^F(B5%Pxt-Y`4GNFv} z0D%OOi%87x)E{AvR7^PSgf_1f{D6(okVoz&jQFQyWWfWBBmSL(Deo!xmNfR|pSc)7 z3&+m9j#yVpk}P-Lgj9D=YvvIdH(bgx$I6>Mb6B>3{A48ruqVI^bl)N#&p#q1 zSDSCgHO9IX+;`mgYjFgW0vy5-L4uVz^61SKQXnq}Z`R$J#k(#1@(kD9jDd9)($~;@ zKMEX!L7MK3qO$B_Fl|}wXsC|76A_kqI)hM>w$SA}Vq2{(UyL!jXm;66_WEzmwK5PQ z6s%;h`wThY(2!Kqtfasho=V_=>Vv5njqu0Uw^lKyrGDCsnrIFht6o_Ha9gh);@Ym^42D|zn)j%t0@E+=w0RY@7Q#du@xa11^mAHToYN-INn^gybvhP* zo3zooa4nom#6s`zax7A*;?Gs}^)3x&^!qm3R%HM)#MF54K}AT-YF zyrPt9RPGz;djP}Q;Md+C?L(8-7Y8zH^%3RQ3R1Y3bqHHB@()b8x0e+D)DvCGKn7-@ zH{WMPh2;XtF+se{d@zk~Yq+)d7q3OP2mbcZ?e-!Ai(tHXUB~8-uzu)v_`P;1S=A%( z)-%f^DN#(=>!6R{7JK@wUlm>w);Cvb76th21z@1QSs1_CLwN5q)g9md{!#e=$kwk@d zAZyUOSJwWneXi_{qOCcFpZ)1S=qCjb)Vx9*ywV$1ysJ-17){*pk-wz(0aoq3jf%-o z0-E?3)AD9v24N%fhb?>BEC-vA)@;mN6bkSiXor$Va7sh%826+Wz=ZWs?jaq`t%K*I?!e~$D{g1YEf8-QajUg~1IJh$fgl5* z`2ZsDf+8$d-p1%XjcwqcY(=2Ncnt-Uces-kgD5V7N+kwSx$k-4@cQ9SjEc9CWrG_HSJ(e-poAwt0 zAm<}m!v=5qYxC)O0_+arPeq-!vrw_N4?qgE0|r%zs86z3p%qAml#FeSGJRd;1Z|z=^F~R z+dUHI|8i6uS2$%4Mw<(1-e0zq{T(Qx_iW+1G_BRXEgb)w8BzbnNZPm1c{*L`*%DN2 z=#<%3o22Sc&Jke1kIP%R4vj|0|2={S-=Gz5eClAn5n8RaQ&L)HJctUSlio?^D80?K zm+GGHP3<^sS@FcF@-D@Ed>jqVFMrVd_>O6ARdpJ2;Vmt2H;HzAT}0pT)g8>-qT=5v zOM!y zP|h?tZ$;ld9Q^%#r(F@mFVVRb(0DQ~A0ZB~bN&HO@Mo;cBXMH$p3M40LF@h^NSl7! zHgqb~7$p16w?z?d@7>=pI1K2kTo#zQo&-t_Kr)HFFh)+Ygi@v$QBoTk2_j1=h5 zf8zT_VaXg|Rb-l*?l5UywxR1v8Dy78mfT1x$neXOw}w0?AGP|I0Qf&B)yRD69zI!M zyjKIz63wKv=OTS?ukmdUK<1qtU^Vh~M!{p&2K3kW!psRHIyD(D@ zli(UP>>jGpj|RK0v*pIln*1`Jf>t+Pc9(yU5*%WPKcC8aCHJZokapxA^|0T zfR7na5+kbzbrj7ffTKhA5}o4YQ%f`lsf$vug{qDt^Q+4b_RseqV*xFj^IrW!VQjqv z1U94I&zA*MUER|z7N`Kz-f4!8$?J1`&1F7lt@CN(cNW(+8Xz2hQ{Vdq#~BzkbuF9c zA6L`w>QJQmfI=jVT;Kxd{>DB4RvyV(+dy|gQY?_&-2B8OfCwVC>EiZTEPy-jRQ6Gd zI(lkZ<>X!Qw}1ur#?PtFFu5S6xd%P~2&2l=Wful?rY@iSw}}5Am2#^tQk3h1cv%wA zo`giI%IZnUG+x>YuTWlNM_v7!49>Y3VD|qo_SJDwb>X^-D5Vlof`lL-BA|4GNC-$b zLn+cF(#?p9f`oK~(%nOcA}tIZLw61%IlwS*_YmKA&bjBDd;jw%d)8j-&F8JP?@m4H z+74sI*e?yN?yZPigQ4th<$(hh{J2w^Zju=M1UQgl0}7I@cWmp}2VC2(cbDJ+hpO4Zr^--UCSp zxN92xY|jXU=TO$sc5$WF*h7AdeUsS3fS@T8g^mlHnR*Xfq@8=nY&GYE=Y<`9IF7J_ zZbD?o6z${(H_5JH%e8qu(J^1~?uDr4pNxb6rqlu`EsBrK*GLZOcit|tE#vLoc&kfu zA0zCG9KDpG8h(H1L~=*mezX=GZC2|Im|gE3=P|{|;RB+2|A0Gt)D`BZtEvW{ohv^< zw0G8>Zvh>mK!Z5@p2xTpn?&OIPT1Qe)oZx! z!h?aZZiE48d}zn}u`S}rK&$5GSm#Z`Wa6rK}l<<2*7x)SU-SO%D>*G!Z@-JPd!*$#uvwhvkM#Y6$=>XeT6~?~^|p!j*)dD-!1OYG|1c)d z>|&r$=i&kL1^TaDtxmT8zJmsDolg>jw@KSip0PnEzOnsd)b3p`42`gspJ5G_$n-vOaa24W)@0Mlkw~<jSg=z_o&CqOi+^&qHf4Fom!k za|7sANxsnm?G0|JdD&38+B)Y^ziDTMcJ}<-q@+uyiHWclIl}fkP}fb%o{fyeI7L(# zUQIlGh_s0i$0+2F>+$n~N1r_73Tsoe1aJ4cPTY#0vZ~#1u?o`K{l>I|`!xi}*=bY^ zKnI;${8z^G*0QX{yk{mH$?7eU>2b*NXpev;APUSOto6Ed&5W2{ru3FEx(iPonI!?+ zn2_V+!wZ29-Fi+Ko|3oKn|M<^k8Xc`7uHq*(H`jfHGh`!0nSHR*i^7ZI{C(+VEwOq z&63aagj#)mWh1&xGk5U|(Ex@@f-9j9_eczapd%G-MN5r&mSwh(T40))yoxCt!=ZX0 zICrQ?{xiJCbSoRhlPg_me243cFpsx>mq_eXA^8N1^rd~JFq{>1$BX6yQUIS!dTYp~ z$9LH=X3BQvqgbCLj}O*BaXwlB?GeN7w-SJfhH-ezE>Z^~8ws>-OK9R|Lnp~mb%iZP zeJdlc2xE(K0)MFDV#o_*_U3cq^j1>TiEocoxCdo+T%@lu!KXEA4 zJWog(RDs?Y>JAxcZF?a6+3J*Gl(FV^1xp%hY6EB|CSdtf%2fzP<^j_HOtmC(UqZE9 z?bQ3jV>frQ#>%|EdATGpG03HeqfZF$mI#~5P5kJMaxk!Bg)s!oi zYZ(BAc3zYc-2Th^CC+HC#*H&%o%-aeeUfFJx8#=?i@Fyh>#6H|jjN~3o~;d8h#pV; zaiSayo_mH{-uk7cC-M8b;`w+^S`CB*ek;RrV4W{xhiVOuUm^|+Dj~F!&bJAE2Gq6a zNdL(RrrH2x;X%Jh&M@z*|FQ>EF)Ym2adsCTK&O7cDQ1bx1ZHqV_x;!jiz02~+9`ok zs}KOmsDWHvr=UO~L&B#9c@2NOlg}1U)xVA@a2dj{?n=k^4DON{G-_MIepnx*Eymk! z19d>n2fb8;YL-h6OkjhxID^n$-qo2xH5pICqH$+$tFD*ha9)r-P)s$jj6F{S8jAz< zA{6*9$%V2#lAte$i{-X$}S2~!ZTO<^p> zEo15g>_4Xgg=5H3T2OXCTyj3m6?x39UK%xiHKP_4ge%llIDO)o4qop>`G+K@Urm>@ zHG`rICAR^vLB>3T#AnWi?$)GF?+Vt4xu33<@hH+|^Vf5Es-0E>!Ca~bEcD25JR9-d zlP$me4FzVGsn}R!30+L1JPH3?M)`?#rn}TRH-)?CnPE&0&@&d|vr8LmBWP8Si3BB(Z?J#fR#*u^$;>;zpB*&8a#q514C>C6&!5{FvYb8|8*F52DFyYbR*u z%FLL67&baTlC!PuGKHb9^^OQd|C7FM0j3E|hVIPC9sP_OW>CJiuxue4u)MJ$SOEMx zNzQh=!s^PlL-KCVSw;fm4x-%}^ayne)=^tjr#*b*bORq{GH9f>G5LamyrHo}o8Ea9OWPx(`giJtoEr*vBwl!8(I{uJnmaOXRn&XG!l}&Y22#v(Xq*o>NAq2^$)SM8zLp*lB{!S#m=~6HCzn z;&Vqo6M>G&)(^1o(z}|)Jf4M<8~xEdFE^hQNR#ENH-S2+&CS<@qT#|oojWV97{`fB zz++$&|A*)4s-Z$XBVM+6rr*)#W#3k01t1NjVj^#DpG$*0u}aD-HlPXWHlMcOKJ#!H z5W&2YpHY6}=~%a}(e=BnVjtnb;HkLMev#0S;KfS^hmY^NzDhp1Q9gPbWl+oI)>wHC z&?+9jeV&-wI_|6POHoID^ahnzP`u zJqwnVcWS3$(jskW6lkIGLU{_ z5?cNgKRXNP-x)xdTb#@k4)!IFcqJ4D!Syyx65x5oGj; z!e^V_;i%BklDKf25yBGLipTJviL9bge;_z9e8t36d%k0)up2^JEB0-&d;ALlR1~Zh z4W{WXP@glY%2i2aGR)SCQ_w60O7-bm_eG-@eXSb`>5tYmbzTQzu3+3H1&VnE$^A%# zxOa4+e*`yPeRy+(+!H|UrT_pugoMdlA4Kj?P!^r7h>y7^>NAPuxzJ`yUA5o->+cd? z3KZUIMi>b9A%$}Xce-dPfq{}lnrS<=aT)E$tG zFLe4Fc)7<3;KjjGUK-74uZgMB%g z5(Q2FECUGq7X2pJ^5f_1UUwDSU7#KVz{@?z7sTq21C)gtg&O)1_PG8_Nlq5&SD->}QaQX3nQmUsa>eiFFRo7BBucvjKX zP1`#it3cM&+pK+Y9N?A}*5r)hUM_U^VgAX4LBh}#XH$eP)2 zTwVkBiIvE|Vr^{BMtosRqI%VGesfl{00B%Org}yA-~xAcV;XqRke4wfCSa>!tyBXE z$oo2;$ora~5c-;bn(Y8?BLbed!E&Zau;X-Aq%p-x_X%j(^C)2u=s#^7i|>6}bt@cG zQOj04pYfshNP53Qt5UQM^vCEZeKG2*3E*|X8JgCh@cGwMSYa+qRlH}0e|BN2$~Rz6 zrqoJ3v9Rg4KH5#rIy@n%p!8*TWY+VplwQYXpetf^H)gOryFE=22*!cp0H$3lyu)b4 z9*S7E@12iF>MbF)WtKXaF!n)06G#9>?D2ObI2fgF*~TL`v-JkQI^?n6R@O9)V zlQtl@UM;hmc-e(4d~Lwgwafy=bB$DJrB40078)~7)UWO`dv-M_q?+#%5(@%r-~$J4 zV**lUYXDBLR6%Pfvb=eRVD~t00>-fATDvcXM@#hlDXRzNUEj*Ww;K8e_jiem&LWKud^%zj|X9z zytlH|%>Y!1k>@HwrIDSgop4OY#yejg>oaa?Lv4Fvrio^8GK2?*gRaIlIZo@6Od09?dOJG{Yx-K|0Jg5q9U1D3dvZYXNC{CW$Yf$uokOXVyY{9?{q zCsvR)hs(PFB<^5hI29L6=l5n<*!{EJ2c9M6q+Ndenzg*OT@F;dj#PWm&wp-QpNy5e zwlKD8=K0ku|IwyJ#48Mjmcb^qu&C(TtK49F^!N7U!cm=7oNt*7zVg#E1(- z*-#_1{c_UaZqzWoM-8Vwq8`b=as!iA1rs!<)*Osq~l9R)g$h^K-#Wm^(Ux_3I zR+mU!17aih-22rUzRO|-jjjV4Q+r2fh~&WdUg~I3Zo@I~;SF;vdpXs~Q+0bY<57O% zXWyi;uaarJ{!tY~9Vq;h@|9WaPgRu(ldoUmvJcJsVzE{3na|(icqWxrg>W!>`%0S> zTS_BLnjHLUD(@6A;p4ig<&8uEUtMk76GPXaG@PA>Ds%0%M{*&^z)~rlasC(hjfr>e;gr#*w)B zp}rZeBEKuZ1K<$cIyO6VBr7X)G7@D{Zri`#cvJ{7Z-j7~ z+|Or>$NmI7)hh_rbm-jwM%JdMjwsadErxt_Bt8Nv#Dk|UCHrHKowc)zimkgP6A7Ac zV-fjD7~!T0AP8g_z-Mpx#yn%q1-A;wzM34TM+n35{IFdv2CoVAeoE;=7DF-=S;xX+F8pl(MN7)eGyEqUR@Fj5^>%)SG-P>MqQvtPX z18VIrz&@c#89)1czWSfN^pK(WkzK~>W1N^j^=;ufuG*R68jVoG!G`>in&*vUc1viI z4vyibX@%Vc?=SUS6Hkmf7Q~4BnhuB`o;H0o>S-rFe;K{BvEsz@;7uFyYPL=TIocs2 zLaxD>>hUG2Niy~IN(<)0^=5;dm9S@5fP-JbmPu}^Ie1)8J;F*?n^R^PHHdxMdb9Q$ zrzRJ`Hi;`(lO*ktywH(ahw6q~p^v2RY!JX|Nzm##lf(JyjcTAU~JyC(_kn35Q4 zeSY((z~>;5WHM@%P`#6|&mO`CE7y%-P_+Q7)Xgz+{Y$Dx{S{WA5` zNcE-D=E9Q3k;jfk!bCXdi}^{2UN!P}wfFoqmDIJkr|+%J(SHIe9V{KTD-9h>_?5I5 z^Y-@Hqs`1j{W^!3A}WZ(V&K|vb5a$Un zV@LK%%i(d!lG2$X_2OMMDzoUkJ;W4XotG@vV9Of+aNh0NQluAob$2(zw2k<%hokSq z!yUpi@d1p19v1ZvkhvfT0d3wr(Kq)k9f7gVhQK5eFT|U^mk;c`<#1$YQ0Tf|Qe!4; zOCbktkQooCxH;Nw+xA=nLn9(eF);GCtqsebG+$v#!$|aR=~~S;iWo@t*s9%;cPqyt zzc0c4+rX3&K-o|);_x$`m`E6VPk?T9+purvsF^pgbrP!dbRu#R27e-g`)x$uO&jHX zVr4dMOa0q{b*7p(6a7Fo?y2>L_Cu~~hLskLzoI`uZyq;HoNFTe6IN`m8Q0<%`LR+- zT`|e&q6WiF_p!%Ls2VZb`Seu!6by}8Lz+-?99$bGPe3YqRlrcVq@kpxpgNp zjp(Sofh;GlP=i7|G2dML=KI+6Pq45%@xKticqI4X+_7?(xr zwT-~e(LF>YROig-B?j|u&FWo6Kd5&#%0(gyVMZCY1rSU2(Dz@aLySo%fyX4kc==xl zdn>if)_^3Q&>9X~%Xp+39Gz&Uy=I`Inr%P4WxqiYf3v*0Ns!T!DAoewMQRQ@L;wPU zuaXFGPmPerkj?{!tl=cV(koNg2#3R3Kr2Ddl}V8Cse<3Zg&?ndYx=ZeU33xy(M2ca z_8bL^wze;xi5SKiDa@2tRltoo`(2BmlD^>(U2hy;tfO)4z|3*LfKC{P%6FW=6j`FSm_q zZp>D<=!+k5L>`V@8@d}7r-zJNcvjZly;;lTuR4h(7BpcV7GoaHS!UEvIU)J^+BtzG zP5~$3X^}auCw*|wx!p^8oJ;uWLG7h#`ZYZPtO+XSPn9g_2V$;%*X|@~pW3FZV5Fw% zh_Cfb+?#an=#W*G*7JUU_=j~+5#L#yPp(Tr^XLKNfX8aXry6Cxcy`ud&f&_5;P6>s zm+lO}mXjqMsWCe+d6rt23H|kmsX8?ACd8T5w{LT}BsBIoM+mDlZXscVC0_cZt)yK% z4iE*5c_U3~`g9;!AEeC|H@<3cOrG;c*(6o3k05yWd@sL=1o$|L)AnMzCB^j)4YQGZ zBhH(d6VNi#+08Gnp2?gUizXX`iD#6wk5Gw(1D;JN9qs|dPnYTUEb(kToE)}d!lLTA zMvsDC7h1Fdi+VHr&DB9AzF<6dlMd0_GD%OE4`e7Y$2mosR)M<%nbSFaOQ?r*K`g?n z48r-s(#I|~=S&AMb_uXG&y1ZXK%24fP6m)5Rc;ecnJ>kH$`HUPu zdBmChK=nVuSvk3Q$K5RnmI@} z|FOydFCKi{ujutw+--rKzL=i1wvF({Cb0VfK0_B*i9i1w9&ISq4DXgtfpG!;!`+TI z;Ng1Pp6;sY$D^+yMP+%r!2$#_xDu(nfVV5SON@n~@&S>~9lBIrWdl}2-O!1RpF*6C zgm!&fEo)BDNPx`8e7uE}IRawNeu)?}ig|vNsNMu^v`g+AEF|qyIDd!dIXqxK;@P`% z$`Qpr!J?k)@yKd?iP&F+-PYbj^dHLZ1vh9hyEpQ_;%VnNldbaS#+v%s_XlyJNY~Us zFY9Ypk?38HxcHzs4mj{7GwwYxbuP5emnrs=a@7zcEWmq=b2 zJ)7)5L@AWNTLRA+!l19dy-auZN?`vj%x|^4 zB1zmJkbo5$pHGvo?I9yLh?_spU82qoIJ4|J6eq!LyG2YdIU|e$K@bP)zBPCDfhhIk=@3+BL{ds zDRSXB0I#+NHXT*fZbz`T0{kb8wwcuzL#SL^QKKj}UJyk4zQlzOeSI!{B2tml_ary%Zura^G z2Z4Jaz#6wfEIKFwTDV|`1QTqmx};L^E{@HQiH)Ljw>^@mc*Q(LxGQ{$LX7Mh!*?hsA_iNGh;VS*x==#=S?5>L za-^8wEm+aWJ9cgB(2KJ>$u`9t*yxK-vY6Yd2lleZ#v9}yp-e(GRprz9^V8V~cg@j> zNyTb>tQNo?*SVQW24d7G_OQmxPAChwje&0)I>H4EVfCGMRcO8}m`*eKvm!uq3Dp1K z{!ZQetIpGTIvrIaZVAfKU0M#-W$w6rDI;(a9U6);Etjv6`g5Q%t7Xd9$k^Hd&{w{=z-%dUXkDh1^kJm_eX=SH-&f@A)~x)lZupQeXeKaj zW|LHaTll-_LadK2ydloIx4LRJN@&fDaN=E>Mdf>T7Ro8P3+lvT0r!M z;M}#+owqsR-L(0H!#QdVX_LA2{6wV{?dItZQq)Xd z=~j8>SBD8+%B$WT0mjYDo=@CEb^==9My4Qcg^k~DHP;F7uAX@T3trhMSL4LnW zI=tB=Jjm$~vTr?v_$vx`xZuwr1~jR1A!##Ipi>HB>Sz_37?w!s`mDllf7mnh^XC;F zgT&G1r8PB{CnTLo^r_cx_?>$zQKB!Rstw6v3W=AG*@6rqOGK0(JzTpcNKw5lYQ??^8@oYt~U=80E@y$z7 zzK!QaPn*knMT`LLW>B2~qtmu-G&FPIx0so7rfHRVd(N|_8NB?EaVy%R?;hrd`MaHu zoy$b??0kYkxG^aUWW^xjTSiobf0qN1MG#;gAWw*K0W1K*h(GV#(JWV)VF5jo0>S6# zzE3B!hx`hJGdu!Zr|Iy~~N z?DQN9!=9{>E+WvK!;3txCJn`ndQu5s_C6BJPGFMid@Ll zT{IK!VX~8<^KI;PD9CXhmbmLP_6_TTdsP6|&H{|sts?>7EEprdb)#h2A93!@F=pR$ zvICX}7o1WMaKhjor-wDGagvjY4XDE}KMO|ZT0aBg`t;GuQY>%@bFmOd|HhWTsbNcZT+O@V zx3*#l$6}6$`_5H`(J4@dqF6m~)?=%l6q-v>B%-7R?>T&gah>KsI?Zg0hTHaV0%9mP z0ZV>{F@ZqzLGgVtL+yjdyCy?)Wd$&9F~L^_`*fwIv%|Md7zNH3N0QT^2QJyZZ0+QW(6-EsV6^ClV ztoquC*x@9XyQS~FdR3cY!p(xLNz2WF>UY%XvcG%riuT zmQagHK!WTw3*5C=40Ii#Rm{=!c zabI$Qjk%8|s|;|xJ~c{;#-xCdOp#+RJQKVZmMK*Jyd`;}^ODN5$= zo^^hDbHM}pUSaU9zJjY9Bm84s<@^T;>S9|Z#k-r0!pwWg^B)L-YJdP_ir5qAAN5K52;+7UEXnWwY&C@ zY%Rq!eX7!dCsX81mPw*m)|R+oVtI}!A5ScA-0rb=c*}pr)`vt_jo$N(f_d@xY%-5d z4&?y#)3%H|o&dD`bCX3e|8!r!W{Ci?m_sn~vL z-*{Ft*7NAvW8z!ULM0TTO|Q|LB5EB+!E2|yEef=(@0+gR8eccNqT=~E9O*##w+isH zE8qMo{r5HQd0Hf>f>r`VTiec(AuvV>&vH?vox6d2mSS+7_~ZHERmjj1^OQGY+0?pD zj1kS8G{22|b0yB+$f4o>X8!AA^9%&pqsRPTJZj7w3e3D4>PViS#}R-AsbF$DD(a3I z{eP{TCl2y~ui&LlgLvDa-8J(^a#s|j(CKpe^Us@N3bx0z1aM#8xSHQ|voX<~jz5!9U{GR}%i=Uw4Cq)!xX9=8c@0iDX{VZ#yRWKeXQ+GfrJ6U0 zwj6#spiqlXNqb+@D3lB|$}H18B~}56{geDtQNgm~axXzgz@8SV2`}-hCDXAi9wPDM zVSV!C(Vu1V>Rzw|<^p`k;71mF$dEJ{fZRIXUb4GXaMF}3@V-M%Mp*g|G*MU!cXun! zzCb{nEjcx0sRHIxKzh%d*(aU)Th|<_B-#X6)W$g(8O`K!z`vF?@TD>5hU_ z3)>wvY>W)IV;5s_$M(;9i%=H+#zd^=p)+x+s8AJGhN6%FsZ{D?_b&izEF!1HAK`imh8rmG54X_|)V zg{02q4jz<322Z1_A=gjE3l{+JnI6C`L2qi%%~bae7~*V=I{)n4Ha7W^eN6}ZBgUD4 z^|5ro9nu5QIEeXk)N(EpY&EilYOmsHA}aVaiNFX*Z7 zx#{Oj&co^SlzmiCG#L{8f_GaHAl}L5zm3Q2$tJd=;qeLfixyr2l;$ik&&{%i5}K?fC&FPO&Gr-xqQl z5EBERbXtJmP1!^bLExt#V{R7#E#o!(5TpYQk;$Tf}ZtaIw`oFEfE|${h^5;Gy59LkZ=48q;9E)zp z{H8(mC2xJ@503v5&Ir(V&`!D{ES~tjOn?LkdK_PsqS$tKoO1f9kl=q@pAfqz5b?Oe zzfSZ+7j6|3gFUqw_xAUT1QVsmRo~?|2-?a|0geE3axMa8fUCwQxV^NBWtoccOYfa_ z@g5`}c7EL{fFG#cMwAStrKrC-^UW>w!{|bST-NoTr?&(Thj++6}->@K-Fo8 z{_ZOm7p>#l6V7@qd6!e*{a%fZ<9LsEj$4fXVjm}QTboBtJ<|a{o%a?w5Nf0ONB1On zufo!XcuGxR>A^3(*wc5wvl< zbzc&V{@hw<(Lsb!(qDI)@Ly(pcB9kQd5bYi=TLMh2#{aB{&G2M_qTjL*lWp$FC7|2 zLmn?WL3jqNj=2;={@n{3)}o6_wZY({V>c9?c04$g>Bkm_>AN#O{TU#Wy1}Em&^*hx z|01^~e_(Zw&T_f+FK@*RF{_v6?E9PI+X8oO%twLV{p*Ke4`$%>_JnaX6H8?Cck^w& zwEr=mxy*<*)BBN8PPH{>yJAYs7o?2@WYn3YT&`;mj6S$Cy8XvIaK{+uA#`M{^eysW zIq9tQclIn88=OzGV4dVyYw^y&z17?O#>qlB{qosDPab0}x~d!|@#H(*+-X>>i_yKw z%|3CNqySJ875E)>{sePE`Ekq+sK+->R=0;1YhET*zfE(QyT!dcDXJ}@jXh-n=bw!^ zy_hlU7Mjj!wr*t?02ojp)j^11fGQ(!3eKlfZE0Hea-8Sfzk)UY+;88a`mtq1v|KTr zJ(eca%f0n*Kio)!5 znt3>=-4Ecrf4xh56sxz?ymn=WYP`e{cVjUr0}Ko9t*CsF`~n0ZRv&AOYI%wyUO#~8 zM+dv-JB)8n)xl;S$G?e~e?26lcFCcp2PCl@``Ix9w*7MX$cV({evBZp+`Wduj+Y{m z4Sdb9UI{P(_kW!ir*Ha8n337PD`@&FMk`Y=pM1$XiBw7B{_!-W_k5NaVsCj)u$PkE zNtfr11{W^f0Z_G31`q_ijN!rA@;5`8(m`Org;v!_|M_V8a%sgi_ZTG4cPmzC-{J zaJu4to%t_s{U5gV#1K<()OHU)tI+(vf3p$>L|(VS#OKxZ|6$R8&0H%cudww<`9J90 z|N0vm5($T?`%m6q2J8Q>^xyrg$&FaTLCkPb&hEea>%UTRN#4}wKpsgL`!YT9e?M@} zpv%Q5IWgJjJduuD|H#|rO8@!6i~n_cVf-p!_9E*|^pFF*>Xs@!{{22!p*fW5O&Ws$ z8*OenN1@#2S6j&9>@TXjEdM*bofNOMxR0zC- zgCS!~*<|-y)4`c1v1JoZWlg(VJFNA=I{wq@prfrw6ZvSeA}4`MJ^L+fzFYn*093z+ zb(~C^_4vhw*ZOZMj!_9pdhnKdA}3_0oP5J1o-3i$zWwxzVzbj)b(FVS%)S1(Qe@|; zCi}0MTR=teK`WyCH9UOuEcm;m$^}fd}EBebdzp1zor8I?Z=W z|5c$DUr9y=mPu@CJ-`pz->uO<9`-RUD${(Wo|HH+cV`BfeqbtvfBy>DfuZ;*?JI7G z;`3-R>**55=9m6;bno}Jh$?7Ztv_2up!>BxP4l&nv>KS2uJqnG%tve3c8yanph<47 zHzc1plr(X19Ij|5G0^mk@bZt=8hMiEZ^{U|Z?%%_AvAf{;N>r1; zqn>e-(k3OfCVmJ-m2|dBV4gI69m@wl-#_)K_lr=YtKsPUXCcNSxPLvlXcu2_0ETIn zHd`j8~s zC!aq#8!4l!bd2xDcL(4)Te4?i@hNSF@zJ-<58%w$y}4^7!q`&jqbs8 zDZeM~&BQCVP%z`tjdQ65KO>5zl1gVJ&f>4rZYl6kvK|gQ?Mj}%52W0To$!FO)tJh5 z0*sp@s!}Ke|CmgC*&A8^kQJd>_47oZrXW~;C;RsB_y%{!N*X*dl2A_|A7yr!yBV*v zTyE3H3Lmd_dP1Jm%0!p6vw)9bnLmARsH=Lhv^ob8%4GJ#?%MCfJlIbxs#{q-?f7FHZ()1TON^J{xanW8ijo{Gr>o>I5+g8>TDZd1(--`XMJ z)qzb%7pp3E_(z@?2{SO$NG)UND?KNw7%wO6#3l@h@TKTS(HRR~{_KOPHLC}lkoz1Q zDr+w~Q}JH5jjWT?Y1aPr@oym8dY(#zZQ)7&gYpITxou2+nR-eSwiqk>if&_3l*JLB z8!pNXd`tc7wW~hLt=~yIt^onJ4HmbH5*OZ-Lro}6-0r%xKvbGQ=)QQplyJ#NNYM3Y zhmN-2T2fp9B=|}U{-~CGQV_X9VRhput@WYN8>Y}EClMvn6FT>)_WMtwf!BwamV~pO zp$|#}tO(3fS)X3x?~}%zz#5-T6&gQ3=DsePf4?dL`5Qnr^9p{`5nJq_cL2z~8PmkO z&&xDJ*det$uJ1V~)$bMY%S&#&wLgBbPs{QYs<&w#3{kd3B_I1_h@#(NfvJhH%0*?< zQ(rKjk?AEq_+v!4DsQ}aDq+FM@FIWv?g)OzNukoz)T+kj*FE0AnIX8eBqrmufYG#f z(lYM>X(Ya1{K!Tt>%iTrtl(w(!AG+#XS4gv$3hf}Vt%v7$zHR6J;Qo`3?o|-;mB~g|1}|w+ z8~(j@FRchVS`(lW$3}ZE_!f9_ux-igXke2n>a?+I{T_(KP97&GoE9i$IBuxO(H01# z#&#lFI%T#&;qn8WZcp+j{E-X(C(Dba?YAFgFewad$~Qyi%7gnM z!3(~KUhi|q8xADtx*z9Yq5#t$0*wwaowU;|zc2(rRR3&?zr9q(P)*?e*mvc|LOX%oRbqGFc(^;6gN~GUH^q z;|*UH_(qhdulXadp6Fk1X~&H$^i%gk`psov@u$ zS!uPKhHG@YZTQD4v_`Ya%I5B*(~D@9h$<$qJyl$ebCj;PAL>zX)x*yu63+U`9G zpjzQN(q<=S&7UK$xc>fz=lgvEc?Zqu#-q`pVQOB_KRiMQAAovA{^>Yn34== z0{1nH45A^GMk;!FPH1xS#n@ay&9;T@f(qNOl`E$#88!xtrt7Y`{tL7zWJVk(d5-8j zQS#Bn8rh2UqXR-UX>olTg2n>!=#Z@W#m*qY@N5?&GFiH(N!89Ag+v#o0g<_Cf-(Q`3taUNef zhu4w6*c8SKg zZJND_Eq1DHkTv<;}SulUbWqXI-6%k~39B9~a{feIeUbmX?=4X`_l#JJrS&*ZH?~9uN9e)o#=RD`>N52a`wsE^$jP$ zq*p=0v=a0rtcxWHEWb=e7=C!__(LBYka0 z_Z)nOfZR(nj-=COsZy{BSwPHIC=bFjrvNv`iB`3lOxLiiVq=PPnk7aueFyfoWZSU{ zLTJ_VYkecz?d7|-lJlByIT<7}-OW^#ralp>rK`E-lBd@0qwVF_)<<=`du^yTE#otX zq3jEd0HbX6cyo7a4O~d4W8-nquF|S@@@Mlp+yjX$lexCY9Ca!#a{0iWM$0hQN4V8y zF!AqTfdrvoyVuR3XWl44TPnZ(<;Hxh={+{CJBC+V8Bq$qaP~khU=QMkJQpWDcd-ng z;BA||WJiO@<;;hxFRqBPZ(0b)A{F2T;4V+L51{L{pf)k#nH%WFXK0Za3zA=^U-HK9 z*lHz$VlsJoKBe?=2}!%`{LCsirpUQX_K;n=+(LG|tbl4#GnFCQRr~JUs382^{7`(Z zemuZMV}m=?5>cNQg8RC`G#c7M(6D20e2b@Tl8Cnu4c+(94>C+HkyrQBn?=<)vWnJa zYM$2oa<7N9j=__D$FRmJeLI{_D`y^^ujP6rN@G;y)d;Gz)wUgr{bdXtXd7;O!@Pby zIQjZFY|k3f6^a}s5ku#>?f2=qPw$#r(*KT>G30l&JIRR}r~kMexn^gWC6%|nj`kOo zcl_B{v^=v&MRxMt0}%opY4MNbtJLU4bI(}n*-V!?Z+)jCa`0@kd|~h;5C{eyTc@a! zydh5Wk#PxhPk!9)Hjx(<6{wxei{IFm6qa;{@8>?(zwLXE0Y}pKDo%)B7M@2@V zZ+F2kv5txxF88?lmqT&WTpabD*Uf>~D!K2BrJ1Eoi@EaKZ$@*LK*hnPv*t(1B1+SE zDMQ^9JP+uzjZB2Z^x$ARo`VXuhn3C=@y5S>g~l8;WS(avzMe)FCtx90fU zHY*An%uugR((xP{OogRAUdcnxYqT&$Xi_b(mksHtVSMgl-I zl|!=4Mk7HAWi&B3Dcw_al|q|20#L0tdnV}=(}ANNmF(X!=}6GZO45+5y4eC?iO zQah^MuB>+EJC?SHzWqv=*Dwq(qdU}l(mLh4)lmv!e?&H=2${58M~nV$rZ7~nu?DjG zHDtt}_H+zhI~iH%3%y0FuzM zTMQQ*a4bhN(;O!9WDg7$2s$Ak+64(H0o%{N6@PgvkDFI`>ouGlck_J6C9C^hlvI+o z?Wy*=bf4!syY074KaQAqaenh|baY}my5rHhW_VH_8onr@iDFqj*wS3S7mY`{nxv+5 z#}k)_F|5GZ!;b5PS)u6aQ05cSagk1jkc2O7&NB2M6TYDvJ&%Pds6lK&O7PYJ+bY$r z0MIvGWAXmlUA#%N>ZO{%mef{6V!WTC5*HhnxJ|!{^}DUa7|^$paI!d5#ch$<`xAkG zBz4{8btTua6@cTlfhtaBEQfr4URZRJ3Xzpbf!>*tcU zC59!NT1SZba#ubwfx$EC1eA%)-dqq4nxOg+OG?T3wu z`3TxqU4rnnA|aN^K2iPQCpsOGb1c>mf3;Qk?AC+kip2h2#e)tTg^>vBxC0l3(jW|I z@;Tc8^prs`tu*c0O*$6^vi&N%^89#GzUvO#uiP{hwW`K6c{M?OD!#Sfm4V}w+Uw~K zkioB_TI)5Z`6R_zjHhms4IH1Q*CxomuD_v0cM3J`t`6_hZI5!V-i6CM914SSS$?o8 z1WSo4tpC1&^3^m2(JXQVw#nv-7wL^QZ_(7a1Vci{HTP)>o(qHSj95BYcbB;*uc3uH z*6$vgpWc>-QZFK-UzI`D$Yx=G`0&H@3Wz4 z8xlYR5OrW-3fS2E||oe9-(AxdQfm=-$VXsPwU-i;#)$pS%6jT@uG3;p18}fcPlspNmf* zMtsUe^HSgpbQsy;V(AA$Xh3#4)Du1XqzfuZ*GQK+2$?>bJGLBRb*~hX9gOc=Ymv9#ci=m5_K-`+eo17@m8cZRM7H zo}Jb2_6$99hYM?%7we^$$|`#*l*cT$2&mrueU-8J5p;axU2kUp1j76OwD;A2QASap ziXb8i7_>-94xywH14y@Yhjb$?Ez+PMEsb=8Fmw!{bT`kC!|7as?8@2rZ8s>#t+DYEAv`b!o+*A> z9M-yzN;(+P{;%A&$5p3%RNX3oQmvFlR|#Rj)+1L5dg3jEt>hXvh_V%4s<9_i9(w8h z^#aJ4$f8My5pZa_BC8;~^%u)TK^P>zC^ySBR#`|}SOs5~KO!uQNi-7nNfv;$5nW>)|#kKnruV**-dPQOQJGt)h>l&mLclZp&ygk#VmvhA+0pYW+;K59DAVvXk`vNlHmOI1c}HPW9FB@JgN3T$X>f zk7K5!Zi6)EjWuIB%ztX$oqt)Sk15W7- zU=7PcJ#un=35g7w#y<>Lqhu35M&KlBn)BR7w?!zFs6O)dFYWo@PagM87mI&}kmr$? zR7bq-(X-e(%{0>A>c9q1YpALDr%a zda4t&K+QGg3)?TiCod36x2HELFOK)^9-V>B3yKJlb5$YxsfV2hPpB9(ok=3qLnGo7 zRcfxB2mNZnYRwnLYX(DzLPR9sJWvRZUDcqo1;oE}HRB$C-J!+Q!5Po$r1LT(oP;rE|1}X-eKe5>>v=k4b9DkN0qrwUryJ;I>lx$7+U( z<`-WcHf?M;P*_t7`AVTnQ;Fp3al(_W!;KF(oF5#>H@-@-#2YLzMaGStX&J1o1-&UN zKSi$Ad@)lM+?-V1u!SvvByF4I4xEEho%^_L)SD~hr;PXk@8BETc23+$js;yB5jgeG zmQkPlq7Yt(rpdCHj)od79)K^A@q3nv1gDBLJrOc}INtSaNH`s*HMN*E9oNR1Xp(eR4_iO=RiF5>o?Mq7B!Q3P277FIZ+&OKX}!L< zkW!S)emx=LSv6Vweo7vnK}BnJ@%JaOFyBeSE~w@aJL(_PnWVwzghIX z{)$P1$+ugHy}Pk%*XwFkkgSQlTfWaj+uq!2W;xke$9Hkaa1_q*5&LaO$2DBbWIPg_HP$keZbojmaH>6dci8a$JHrVvAA0u@$$Ef%awz&KSxz;17Rr06#PL znj2i;=eN$JED}nTd6Ae*Xh&jmasFO&P^m#@j)~!M)eeQ5du@}2mA$Gj)pNCOv%bzC z^Y`lYLOK&J3GS!8K4!sUFRxt_P?ePsQ*TVgMa<)rr8WwkHLu`n1sQFn*iw`qB%{2K zJr#GdFRnZfVMW^RWeH9wNGr*2#leahDN71igj}2EVi!&Hl~2w@N%L`C*`H>*Ew;M! zi?xEia!%Lg$JjrDWnfG{NVKyJU~j}`v(*B-(~Mi$1lAUyhK8#?kSe=o}e|AV_gQfNt@SYBE z>3j@(aO+O8XvWLhQ4@XNmyy4$MbHRA!0YWi22CS(@)iq zecsG*5~*68IkY6-wAJDo!uHXiU>3ezBU}LK@;y2_a1&w!|JA#7S9({O!+scz!P(vc z1KTMm|KiJ4w(wBG*qIpausFSwhTU>*X0Zi8CgA8M%~5V0alml5B!$yC$Cwue&eZIA z#Hu+HLqN8{RB4^DMs}KZY!W9U7vgp5O|)^jkX-lq)haSTRgy zH|PxRpswFrs7E+&X;0{y_YX|_AQ##1CT)oyZ<%4^Bf@fnrueJemZScR^gl6f1u5TDU+ubeBP76CH_~dxR|r z?tO^deFF5L_^8@_>mfhV zV4Zw~&zxBPDt}+lsm@+{Ef_3073R~?|6`%@Vo4dmism z)kmVPoom^W6(&OhBuk>O8MT~t^Iqg6m$HyTE?in7^W3^^^He~>gRpL!VBri&Z*{H^ zE3kL1H);G-e59g9TKBw8gGkI6AMzWoTawf)V;Q~?55H-8YCE;mI*= z4BWOY@XhWQD!JS4NhO5*xU!TX8#j&sAzg;TYwIY2o^y&txRy;#c&-Hw1V ztB6CTB1LjbBCE`{^+%nxmtAEa5qiPNeb2^Q$+-~-QmM#Kk~M8(bhCFCB?oXd%_t(Q zm$9{DlI!W1zVTxFEQ|N?YW+QQRI#_s!7GBW5HF`Wcsn@Z!Bl|-v>Z7rIOO4 zBa_V4U)qBb{w4W>K9Z+;JP*zfU`p}#TRayEql*>}UhlD}Qn9M)c6DY`*$?kOn3>mI ztB?11Q75@{WuFRw#()B$4^#oYnrWvL3!84|8CqOwh8(+Ydo(PpncComs%TY1cM}4~ z{$NFded5_^-iG2DtqEKS7$TWOp2CG#KM8I)Ufvz?L8QFe8Nj3^AXkby7n`h&oVuem{8+U=o4q>a3JRRWHqdj_LVw5Tl#ul z*wqnUkoVm0=T|yzKXbV#7!wiOxq;=B{tMmT;XSL+x0TU_nV@KoZL*;{$IVnW;dd>` zo!^`X;34FS&Ys0e!xK|F{-Twgm@)s5TDRIZzlzz7an)>U?Wji1)BjAwzLnX5-$Z1n z>jbloRU6u=<^t6|Wa3zz++xB@pM1|(OT`Qusw)DTKe8wMSd#Au&UYJr;4n4yB)28} zJtF=CjSW>nl^$_{oyb8=vpdNCtJzDip!;n6&`O#SNtfaPQl+)oLd4u-B26nZD9sj= z_bUn@g)dJsf}XH0ZnHRxcRA2vtf`~tm@(L1J-pVLc8vk}Wpb>{l{@NLyUYlc1afZ?5JlcX^T`Ow1B_;e&BK?1 z=2bm$bAAuPP%7u{X9nVsFS*bCl>?g%pC*(TUuI&m(vD?-^O-FkZ_+DC#}#ApafV%B z68hlg>(Qe1eEW=t?9>OQk`_zYRCQ$PdJb!%4w4pP=v<2>Y7kfZMJ?l$tcVkc>u5NV zn>@t4N|qCUU*r-y`{qKymcOB1U0O%B5Wl3Xbh3Q)xk9vNv;N+B+J^jT3!78wDWloP zY`gb;y{cZPRrZb=v(Q(kN~fF)Y-e~dLq;4-GKJUPO+nuIoP)*r%9Dq`mBz{h5dYEf zC<`ZS0sFU6)LGsLan(4eJo#KJqf5JHA?dO~tz4_7YWDQo zlE7s>ca!>3&6%WXh_jO%H78|`$74jkH!_znG@|C+A+DKscyd|42SVexL^xeAGG%X` z8h*PR_xRByrXC9ZVwm?fCOV!4Cc(xOsAla)*vSXyyIrB zEy$hyWoykT&l}Fo+@;hUl%E2Zn>)D9n+f6Rsp;sFB)@IoE%VOdZfnyoGpe# z<#FoaQoQL+Fge+4Js^J9?|N1QD-N9w9%d5&fJk1&4i`ukGzLN3bRj-wH(sjAQX7G| z#`hd1bI>{!e3G?RRc8Wk43uTv`pq^S^L+Zvgcm+V(vIK+eBxGMvT<=1m?C;jwRGI;abd`aQHVha)FQQ6}jkN=}Xo?m{|Cvn*RIUZu;P zA(%Y}yHl8Y+drwfX+P=!o}+^ZS2L9-*Yr8DA}5gYqh?oDP*=#|Zz~Asz1@G)^l*$? zQx@8s4+(tOIF5S4+k>R{G+hDD;M#N?BXhG)>^~g5i)CK;I!e|kESrJn0TmFIZ!x-E zZf9ZTU8E4KOI$ZL;LRDjQloM6er^X{3F9ZN?%i^Ni6bXSQ!I7wl=L>10h2=;Y19z1 z_i4YtdD9rDJW0z%`JMWRJG7W)_(B}l5;C6FEK@J)?seqz^eB<>(=}Q*&)ZuvM>;Z= z6TP4U%50;9kqF~mW*80-_vO33L_{VgjU8bTkeBma3x`7$yni!KvZdf%DAE0wh2B=ahVv0+}Kjo`M@iHuFo3yX8|e0T9icA+JbbKua_G$-PH=p(ty zZGUm=CRmP&mMV5wkMK0gL7G+EJH%qXC1q7&YhT$g|W39PR`Y2AFJ zn@ex5S=q7>Xg3bx(&^dKB)O6@Tt1<9T%T!_Z!%I2?v%0Z&UNpA=F!KcxSt7bJvyNy zxcZ0R8YDkl5O8cs7l)8aUmSox^YHo?+*!}~F7N`I%LUld zdxIr2$3v|eT+>}by;*Y`uF2Ho^leowS3o@t@|so5#sli)RK|q`&U*K7W6%G3dFrYS%xdUo3-hdx%h$lNOn$iuf*# zUSySAFN-)x9BXAO+3_D)wcuws&$kQBZFYHW^9l%Y*x5U(XQY4za|T(tE(i;r^HEkn&Tg@)&1UnGHHOFncZ=OOyku;1~&ig-@cbnCrW*sAWF$*=tSgO z`bdxWqF^{Z{#>+&9Nc5=_AT`XGo@mYdOA7F^E-PC4>COS`2+I?Ag8~C`7fg=u>+QM zj5j%wI{T!(rgF(O%^E=qD$T4F1aA!i6EA{6E=7>G(nZPma>#N0PcUOpyI~2bi(}O< z5|7EI2n^w{OyloTk72`KM6TI4y6pHmk+MtGt9Cn%CLSA{=;{$)&S?f~o{L9hw*w)J zKVFd`2?AhG@ucIO#-+Cjb@0NOav+HDRW?Qd2x9C2Euj?;H*KhDTj+0?9J-O2eEFrbnQ{29ahuy`Mv-I1e);#tdXo~G~r(*Ekw5}~U*nO?2B}84 zH{`Tt2p5q2LpwOz$@cQgxCQ;3(4mJNP}$%!dktm1W&+{1mwjVC zf7W2dzagG8%F8`FLndG}R`%Xj&^v=p5#*UO;J}V&xw`a_`ku`6*O98-I$Tbx4<3DJ zg%w#R$xC5!2b}`k)x&KMhOH!_qb6P!w~A+(n{Zz;WPBHdHb9gg+ed12j*??w;Hs8` zy<5TiyMp(PZ+`d@W&wFg&BP4Uo+J{LbmQ`yQByUZZdnKV#*+Ci7W&V8u%ZWLU&7=J zU%dJ2p)>erYQ9_#RXz<$yGS_;r`vqnn2&xSk?c(RKn#{t& z9cKcyMvC>j%KKY3x&hPYz7gK$d;ZEHT31IthIeJYR8XE|&{oaVQvTpq=iZ?I;z7pv za&CIOC;%L$xegQVn#u413knS*^zv3*!wPwm{N9iCKv5MofIFw@R_TSW-2*r( z5y5PbS$;!1hCGk!N!bDb{c${L?#;+~7nPISOvl!Dl~hyj$&Jch;AGg<=1Z{6ds-1) zw4w(AV&o^~n3KhVHx)Lw1kFSFdtmqOBwfj<(->Flm5khZ}#RuqY{kjKaJ@tF4KSL;}UX=qDqx-<-wetE8{+UeB*6x6baGW>hHh>s12lU`f`)A%f~DB_ON}(tp#cO!Y}k`U7oa<^Gn9$EQ-|M(Zkn z%%3RG35{9iDv`;-J9Z!6SFVl2>&NWnI~5+xlhIN@XMsUeU}TLB2zhYwd7>Q6;q=P9 zM`Xqmsy#`-ay|AHh2938gGgyf~xo z#||Z?B4f$?UQLsB?>u`iniq0sexCx(gy}cz#eLo=uT{4x?QCMsB9U>($uYPJyZ9Y0 zQQ*)8f^cH%arI1*ZKVwjsQ8j}gVr9y_}jU)i)Fd*>BaNhSHHvNU)m$Ev;R^Ps7NL6s*Tmckk8|w(+D&_VBq02t{xwVhU|4!r; z(|sL_Em!NTwMx>0vO2>uh)E`;frN_nor`)vdHrzd|J{BwK_u#A7%0v(dLeeXlvQ2 zz6g978_+GGJ%44`95FV5>s$-iTJDJN;%GeX?!qbdX3~%q5NYmqn3nul4SGqKE?t&* zxgnI0ZV>a^!OE+Se?WnS#^!_TUV5gKMGC4Le73Q*Qw43W06-a!gs#`M#Uj_gWvg8* z2=0+PfcM$M0HqhFx-*S|q%Ty9d^y+#nglM07!&F4f3CGG{^)PTWj$-RoI6eOC+%s9 z|Cte2&9@TUB9HewrQ?0OR?D=d`OjgoW^<1`ZTJ2;M~U1&3uZ_Vy3^Hc*!IIxs+$4# zJ2@QL$oLZJf452pGXZ@Utzn5F6*G80HG1u3>$Aacmxh(`JOFk0U=Jg+?XTE_%Ot=% zpYQF*r|`c4={) zhz6tG1{|Am3%ofYN$dKTAiaD0U|kmu@*7T0{>qPb-}J#OgC&}{bQ-_lcA`h!A%0q3 z+t0y(f9IM9{*D^Kyd_l5Z%F=CdSaJXRM*bzxhHLlJWu~kVFpgl9@-I>005~FLuCyy zS+Zm(Q;U8s^!w;K0%l%+4m{t_l)evgy*I2O7#_8LyV^!}EnS?xnrJ=~Uo zT!r&|h}(E8c3%acPB-z5=~x~GOgibc^k1^`8*rn-^`TYL|6;?letnvJ2M(Kc09@o# z<$k;*%v2v94A*I(X8yBfXWo%wXlH?pTcM1#tUuJY<~pcKvT5pQKzszH;3Rh$^02Ad z+B;wF{_m2q_YbTpR$6`}lYLuFj~h^{EC4E>H)Ww>csag|_BWFgD9+HQ=(2#lDaeNQ?#MiAwISFpviMBI}-#1`n@zgP-I<-yizo=d% zE(4 zCDLd_=}jt2nY^c#l6ExwhW((B5iu$2PRZeQx7KACBU7t;CqTh?g)+;W)VWAFq&~+! zzF{hBcKyrXY-*WFRHC6N`;5G{(|fu=7&=w88}ObITMyexTl@_Vx(8XY zBXp)bEn)-jgYCChXcO*0o_jgY8_M4~aq*Fe{Dkl_O1(qIEAB43sj^^I;tWtAg3CXD z{fQ?=*o~H_!zNho*bnQzl;m)PWz?8={^sI)SE?fnr=IOh>2^{xd1I3d?E#SY_mk@z zESRZO6Lc|MlK{X_da>*c_uSpKy}cxzzcw6!jIY{` zLABP)v+MxyP2RhV=olE(;AR!asIYRMxiwyMD_U&{c@FKpLgUyg7f&E zh+V1s-em;K;clidpc}wS>2tLYj9?XESk6< zDUX_oXkOnK$B4EzN_F&}-2-HU6VGTI*;nUGv-1UWEItX1Ll``}Jnl$}_#DMqjo`JV z)rMD3xHxd^5yl#}yC%v!=7H_g#Iy9Um~i|#&KA_R-`j%%5lr$YmNkzB@T;=p+zxr)(!Og8BkHM!JXMGk z1JqEfz9_F*H#o&myoXJQvDD?N)eTLA*^5&Pat^tJCMMWr@L9gyM30W zt3qjMyCQy+UdKhw1%t=!i{TZlq&{@>8k2=opx(~?i_k>Ok!*g=CjPPC?3|BCW_`p) zmS0E{*mm;Z&G}r{N!?b#MQJV4=CrkT*w(wLb^&(t$AgzGB0d5c6(_A_2wYezbNW~l zLzDm9p0=!!OLO^oQ2A*-AJX!&YG|q8JOI(nJxfcfvdEjD-!Ukq6bLW|}S(KZ(w({OUS*!}a1?I{(l$C5ahz*NQ1kBeQOn*5;$Y;~1`s zvOIh5PRKI{03Pk(Vx5pjXoo)2W#}qSPr?#y=cPM}(O^5GRF`xoczOTP`K^-=TtEuh zc7bVQ*z;s(IIu=obg}V7>fWoc)%VfUH&8pO~Lu178G~cZxe(sfo?rae!a#dCxgu8N&Vt-Y{}Gi2 z(kDA<`%8uzGDc|02*VM_k-w-oJ3g`Go!gkjYyI_Zr2IH)I>N-1B?ZPKtWjlI68;nM))(a`5VNIQSS`Rpwxs!OJ?FF zb+cn@`i|gm>IhhW_IV3-_tfn2MxqTbH$fz4A{zAc%cyosI3{430j?Ds!n&BAg?e1o`~)E zd^tomE6luw&BtIkg8$}O9nG-)^v~*@Kp}7mB6FY4j>TWCUX31op%W2)IdqrO?9t-R zJr_NP%2;W(^ZcCxA0F`WU_~jar4p07{(L6Kbytt?AyXoF45;KqOkR)vmhddlw;=w0 zzsRk_fVR>Bpwh^nIp2LzrfyRh2_+14!Jm*7q@rFrxa)H*`jVt(JH&0teO$W!0;tfT zqs-CE+b8^~tM!%aP|2;d!r_8;hkj&IGYaJ{x&H|kR-0C1`K+g`N1@wslf`E~5d^|D z2XQ%8QRYevmrMg$1b*)rj#K{5YOS^XjOh6Yg|*bIM+ST|17$sI(-j>JvN&l#MPXsU zH%*Fbu;BnN2lj8cnNy7e5g+vWt#(r;10sl%mA8*~UD*})?)DFa>9JlHp-@+D@gWf&m`A#GsIE#$+>283ZtH4eLCdx9ojj-5K2no>E z;?0BDf1)%NrN!q!(_}D)&nu}ulXe$cf?9Omsm?)7I=-DaR=c3d8J@vw$6bLp8Fz1E zaBWgG7WTyY9$!eqIHp}=N_Y{=Kx~Wxm)i3~E6)#b3fr?h%GZkK>UVVz_OAVuH7-aiB1$&-Wr}kw@Sl^rDVlO;d(_e_gF&n zN9vP=%Ccw^>%l`()*q;{8-y>uXKj71pr_;=v#~x|{p8Yv=*N!Go?;5}A3?UAADyv- zn5sk1J8N|UoqTkHaD=}vCoM7te^WxqbtV)jSRLQ=uWaHCv-nyzjaVVr2U>?De>|@SoF|yJ)4&21nneOZ##eJ1Q|sS|t@-FOQ~F!bG$Q z?!CL)tyE89vV7@B=HK_cVY~Sn@THbJBNUh*w*(up^LMCvzk6I}q>Uc0*&h0$i_DtK zf(HEsg*6ULM=O{&t%mv&#`OkqX61ZE9VhpeiUyCw5&l`uCoM3+ML@PLd=t^tt{ILY zJaPzOQ%ALWRNAmB(!D^9{pfBahU4cdeX%8#^y4|LnsN)eot@JvzCWmT8 z6iG0m^X~63OU#(+M6C=bZyED6AEzZOV{$DB=Ucm8XW9DTGwcY_Hr#;IYwH#c)3F*> zb)xfq3sCEKII3xSdJzXLDP5*{Z*{TTDLXw!x7(`Tv?@FT%SB|}#oXBljeXwlWcTf% zu`ng|Y>NQ+rCio@-}a!K@CpCS3#je+!NyIn=6)+hR32pd-(4wJ)B&DmIqKOik$f;$s`DCvf=s!$7db><*wi82Q$AHX+L>aX zkRhh!A-2pTx9L`KGrIH+RO7-g`POJ_kD>|~l#y%HP_da!H3xlquWHFH%O}!dk!8(x zRy>Oz3%;>WuP1d6;jbIQBKOV$f8h?-$8U{DnggCFr#ee9Ge7p5=r*>}9*EwYkFf&m zwIt&p4uI#4S6kR#DF4l)DP(lf5&q-1>w%`Zq^WSJX{q7}KT@Hj!L!VvEQsV z;zry!NS7a1nkTIW|A&X-+L%)7vxk zv^-%gy@6{p=$Cuv`P+q+z}xNyh9d%KjoZNO!pBy5@H`DU7bEUsr*jT!m4zZfS|82o zWkOXSklQA2x}hO)>w&kH!9419g&P~JtZ}-*WXs$~B1!H5-CHs}+9YplUKbd;6z3&( z&aw^DP6|50B!IPt*F<{hq2dh~RwKjqOz%r7-TP4-EQ1S|eDW+q{WnaDO|ATzxz5`C z%#|mE=bJwcmxm2T`$H(vB)oEHwY8m4>j+1K892qo;<(;--BGR*XV zxx6IgtsW%Z*i?T8~* zGI90z5wBmpUs9u1O47K-?+_ugOVQGjxnq2x2qK@k|HefbzmG@3A`WNb(U${nCAoWl z^U#v^k%#)5w~5-b`jTrct2^=#vmXfjh?s8&#K{Z9Xr4o$w%iS6mIYq! zyVJ3v9@VaTQi9*F$#<;5@=PW>YBC|E?lST&kLJ7gUfBZ3{Jn?a1ajim9yP&NUxt$ z#M5yZt@}fGf7@gAfQorjPZ)UsoGBmgXeo0A04}MH+nY!}<$+G3Tzw;AJ~(FOdVtE4 z5l4Kw^GdQH;^Zk_JP_A!eC|;{)?2oRHD_O1OC=2yAxqHowR*vtR`qz-_;I5aBk~Kg zf=<)J$;*4b+s#?962yuXbo974yz2h1&e{rc{kDS%d6e!T&#`0gqJhdk-m!~3G~AB3 zCl2igc*n27r2T2;T`&>zqwAaE*`=B5oc2DE`RcVLg(xu?VPEgE!5ENh?yk@jDrsL- zL&fXldBW*@@f*YUOY0AqlM)&l^E{a~}vZ!4%EQ6d(QENd2Ml$v9^{*10 z6jr5FJf~*hMs%`Q{{dm>v5C+l+H3SrOmw?Vp>6ICK{eGGeDvw^8Y%4{7c{s5^|>a8 z9I_Khl7Li>UN`%l9so8*<( zm$5_s{a@{#WjAL5nMK;@5!ye^;o{aQofT zXjzXoOU05pZog*=)t0>0PnxGPSRO3R{wIbI?W(R{4<_4}?(DB~Q7+Co z7BH1l4=bvS#k@zzYbE<}4@XEB+bNBr=+c`e+boo*y7Tbv!mWA^T%ARos!RCiOb?q* z%+3j@ij1Qt+$(FTQVn9aNslgoI-1TSS{;_1OGnt#lUy!6Amx+`d04{!a;~~@cMMgU zui~#TSDkSSPDDlw^v#jfyas2lI%vhM?g$=#v{$xBuklx(TN|F-h1D=V9w+sVUjnYv z5b(sfR+ox!j$w7vHYB1qF|93JzO-XDSUREYu4i7d&%?+wCp}iku#{+0iT%PKFLsf` zM-#WiJt;=}MOu8osr(n~b#BXd#IxgeV!YyaX4={<> zxv9qYoR4~iatYR`bFCa6y{i#el(sntB)+(wHz-H-)j<+@({1%a^K_{w=eVE|1#3L< z5;Pz^R&6F!w>&gd?LroLdrMd8{hE_XC2XFNbSC+;~+U2yes>}p$AL{iK%|FV{W zU?1p&svcJ)5;L$%+C{3nP#mqO14eNvy9N-7(sYWP2Ob-CKA^~aC5pUMIK7SLCClRI zCmd!q5i#Bq?oKsmW%IrK>hSoIm0DG=%i!fbz9DM2R7urTqwIi8#2_DMfvubVlfgMt z*DZuVT~wnsOXExXCl;wm_7xb?9kyb6K?XhV?ZT||CD=X$2;u$84 z!WJjiiSg<2fN8OB8h(S7Cr~JsOX9B>1)SvR=@ql<=Tu+xd2Z(ZPe?1Egyzr20)zbL zM?g_Lga?CSpDB*G3dz0jPKsX-g|EMvT1?&!#r@r${=fa;H-G#u!vtKvLJgv+G^Ipz zy{+6wJsu>0O3oo`7CVqN4&RV@M3?DPdcSxn9VRFU=45K4&rm9#eQS=79%hWi%R+c{ zO@D?ee)|D*!2UPy0lr{-4S?Mtytbt+O^y&#jTKQ#P2d~enb40au^oT^B01C+(Q556 z$5Iyf`!0n44v*-a=p|mfM#gz`G8yd%L4cLfNa-QzOy3(v!FIfVs9FEz7+Fi$#qe9l zf)2}pqTm3lq$VW2fZdt;Bi{c<@A0Kc3pSHM}!JlVedhOqzOJj2`nnjeHd~aMVASn z5VxuPm8X5C%WER-G-lYyui(f2%k^C`iYP5mWWg!%?87U%#Gur8ShX~B=M&=pr(ddJ zsVVyI{l3HI|FpSX;cdun7VaB8_&ztUB{2U7l2RKpk#QHAIsU%&|K+EmBDdJ^l((dL zOet}Fy8hg50L1jK2yP|$jvQWg>*wEFHvYTinF5GVXl+u?vA9xVo3a)S%hy|B2Aouyp}BTKr!zBmc9Y{%`MkI(3CK=C&96AMp48^`xvM zY1GniQ@ zfR_|nJ>YNyEbLoEG@|dWUH|it#T>96bs$2&bs7<&U-|d5fCa1<)UbKs#;j+o_lPHE zUNVTj6Ai_YtH8<;+5OdJJ^5#dfS1?=q(%v^lyrOYIR-M?29a5VRPZFzRbAbR=pVFy zU-FP;WH@k&4rLVa0g9)~kS*2m>FVVKs#%AuPcbWj^BA_&zq9#-v&-7b`Xy~5<5leM z(U2)dTjyP#36DX&qNV>Pr-$`|uuqF5-*t1U{=SjAEJODFIfDV<(~S(#Sq%4pXkLmpe%&-%cvSzzXQD1Ow2@m>Aocx4EjMZ6jLi zwIIojKSZu<#@Zj9cXu9JTXR|`tiHr9Uc%``NOF&;e5St)!ImdF zfGN7|5gtWWipF)Qr& z75_Lt^8Tl2hor2lVt+4CuOuafvIzl~n|38L)@F|+0E-{Y4-(TlcS3R+SYS==H<(?+l*l<-@o8f%2;YWi;4Uv)=z0Z9i*ng zSACe^!jHeWr>fLg7Y*ge6zvB?PooYV z!TKkI>$lGm-X6K0Eut-zGB}OA^-TW|J4=!#lV=#MIOO8o8h7wG`X?6ay}KUI60dm9 z0yLr?AiEjC`c&NNs9XHi2fDx8KJZEu+$fTt=IT$c#unZ7`1I`81NaL_Qt^S)Q}jpm zJj?O)Od0@O_GlS~n}+#owtO zuFHHr{G~;^g%nrbD1x)HdpU9^T4n4od{^4;E+DSICoPWc{5b9)`R^1SpW+uvK%g-! zzZrlFU{)!Qaq4IXbGg4P*j5PakV+nAyjQmSfXnR|TWSV=40g&8@AEKOkWuawTc2BA z2tHvS@mTO(Y^kl4`Xg@u=eG6>!8u<<_o5YsZ0D^P-%oM@ijssZ=V~IxCcvAz3V=5` zu0QxEp?rYiNF7*QS#L#P3AQeN$1fQMvGCN|l?kp)!k0&46xS6kd ze*Ud-4ycPrJY( znD7I}6qlnFr^>^!6X#PqNA-l($e;X9G?*r`W0yxCTBj6jWd5?N_UAS)-zU!T-ODeb zre6mv={S48JzQyxm@dfSIaO{szDQOwZUIwtQU1h;*86=b&#M~`-x}Vu*c!i@!K)zA z){aXV0b<`C+tE<{k1MyjSI9bo=}a=+J~fDd)4DdX>P^!$(6k(@<{&4r)%AOB|zcH$!1uc>qe-Xyn;oq55 zceWV#SG7K+4q#RKi9#|$IY}x>(9#FoJfth04&I_E3O3l=+69_eH}6^=w#T@MKASF z7_za|>2%{EYXA!aIr!o>*AijiXtp7=75}sC14g1?0sj@JBpN0vt%1XpTQl(PQ7-mB xqyN6;yHea@P1Li&}v4|UjzQgN-9Z|zchaTKL9l|z4`zE literal 0 HcmV?d00001 diff --git a/docs/images/gpt-perf-66-175.png b/docs/images/gpt-perf-66-175.png deleted file mode 100644 index 14691b923913dd24c4f40d58fc266b4e85c549ea..0000000000000000000000000000000000000000 GIT binary patch literal 0 HcmV?d00001 literal 33873 zcmc$`c{r5s`#wH1W|*<>OJrZBELjTKcM4gOy$nJ`v?;29Y+#l7ys% z6fvSrWGfNzyB_rZem>vN?|1zE`2O)d4)uB+nt7h*zVGWkuk$*u^L`So_nERVZec{B zP%L}QjBHUTEc`!O3{MZgS@>-b0sldtv^6zARrUydL!m@bdyMq$&K{b~XLywMX>D!f zY1ZSt-cA=YIXE&8pP#I`KG~UYiAT!FP34;PJI(7x3rYT7`H@oB4p*fo&qqG0d^)}Q zbGJz0NuBSF(=W6Sy&W61$y;u0pAUEYa<0kkZBk-NN(z@Wrzm>k$CRUCx^Y?3q0;qy z-M6*BO=~`m!`(SpGxGY6s0McDni#!-ez8H^wf#(4dohxF>pvts8aro_I&JbL5YO%X zpI@gF(E(w%t*LBrn^zDrqkh#J{<^aD2&Zb|=I`XJ$OciLEZZ0_p$O^lqqm4nW*txI z(Ot=!xBT~DQ9%T2B0gJaL}K$n1=$rf($zE~1x3Ai_G~;I^*ENut=@CPpMCv0D8h>A z#@&3)|6K+>50_Ca+-)wod70OE{;eH?`Axn>j{m-uPL4;#VIr6}hejh|Oc>@&zD$!| zQ+)po*9pVTD0g&=ee&iQ#AQgQL<3K)UgRRNZhl4&Q_NXp&Awep$7*wyRK+(wq-yZ| z&gNLjo_K3wQ1Z5S?3+*6kWGFW_2}i&>s$Z1%xA75Ym&~%tRwoE>>X}dJAGf7)f|3i zQE|}KHP7|YSsP{VZsMLxlCv%gbw}U6X}TWaeTz$GAATZR#;O2&;X{4E)kpI^n##Vf zB)7RXP;&b`o#4{?oa@uKCVKVRQ!^?z=H*X54-XFu-|NG1*fA^rk;a367N%rHB3G)l z{(QZD&LL{)B8m{JPd3o!?_Eq%!3$}eGW+xMn}p_>9~n)nf6Q?cY1#`pr>5Q?cwcw) z_LjuR9Zoejh8~6N|NFOb`N!oNudYsN6kT8B{O9MgL0dTY9(`MxsqED`G(T1t@n_Gu zUlUSS)dI5w)sB^f!DGwGyVTpg7;X%AS^eF#R*IewVjOg*sc`%r~+v)&E^xp3bIc*+RT}V_&|;@L1^YADKx! zvfCSBd}W-6bIhm#8-Y0zu;39|VM?1BZQaW~QtcU^r*V33%EK9!osnqt%3xrVJdCN7 zb6|F!K_txjP}r>Tui-bP`wy^({r+*VDJAsB{V#`CSC%4VnCt!DiEq^m&X6!oElG^~ z`R#R?R>a~cL+OFCtPw4-nd!lMRwNzGK_g_M^tRmxjMU1HsI4Z+{9;v) z7o4r9&wlwf7`({!>-$@WRqP7=$j?{iJDz=fav{@_fn^K#iIAxGt%aA#eGr?JE5?o@ z`|;iRnhu8|mb~_4ZqNJ^Sncs-yd{naqL8y$?!IbQny7$onajOOyHZ=@h~+6Yo}!Wa zCru+(W=(l!{8Uy|e0nkl1{_1y{{GeuI5JpYp*2ILn|*)k=T{pkWl5_7*Wq*D<+s`1 zNMLNA9nm;FQR4IP^mpm8_LTklXW{QY6l-1h&#x~hzAXO!)Rir(gf^hV65iZA`K3qs z_(!9I{19LdqxHE`**WQ-z|wc!G|!eaJkLBa(!#-L{X4Mful*`4EoCC{cecX8J3@4L z^&kb&1!{ASTHv8aTA@F`ig~st+pN(uRE94vxMx1gHly}zu5{0&)0t&1YD09jmT?j{ zrugC6*^ixWRCR3R=;XWFz2)&ll!(XT%;?+;-L(z~ay!MeGfJtIhaNkpe99qH7Y{u? zcRJ+Hx7!COP85A0hVHr=T8N0yF5o~1qK zj}LQgTvldf_7&sB@xpWb*PZM9_8OUnl&DSINh!s@8MV^O%TaVUh%Vb|SS=S8U+z@9 z7e;26yQsOEr+H?dvQJNW4WlWM8p}4RCoJ#F21T&>uDvj z2e~&k`l5vC_P(vY?a2bK9-Kb z=}+T%*bPOw4|io*vfO!}gelKc52BIBUlh9Swj|snsTUtx{1ofNF|8qCHS&9uKWc2c zX|2+Go3)SRil)7hU5T~+JAqnOK$N?E_c}W-k%c#w)Qye4I|kb#hKejc^75Krut2m{ zyVLaD)8FlR5sy7+kp~u|Jqx;{p=G) z!{n62iewiGDT>s;d_7IaR=lSm1$XGU1B8|T|mV}AGzpr-$sIr zW~}{m@^iNhou9|ELTfUbA%?{puNZW6TMmjpN-Nr5PQ@sFYKMC!3f5kQ8cY}Y%z>!a z7(^;Mc0Ho8)G(ga&sQwV!mkBIZ|HxNe3IP9V%9;|yep5f01SL#rN4!Wx$(z5i69v6VnC^5^yLaE60|gF{KaOXr3DHYN=8dA7#pZY! z6(yp*a&tzT{hxg)jI5-O^yhOIN2_scR>nTjVNlixRUdMy+CFG0C4w z%A^~Qo}PG9iu{9d&<|9-gJ`qE?ueBJ=TAkrj~#S_ETV5<*Em-Y$)YxmVIg&|(4USF z2Ki&$4B{BKHKT+$HQYL#-IR|SVn#V2_Y12vdU;~-JFDART<9sc54!Z+-bJ9JvlkRy z;$oz{vEm}!^;#m}D2+LaXLVO*T5TT&2kfGHyb$ZWV{`rdqt!p({q)+CVjy;Gaf5=Y z{?@fi{28Lcq7835|9rk0_@=-lAW?$eNY1A1g#Jiu6pIgk*z6NNKlatJG+l(kO(@B! zwAC}NC{w9AhdbEo&TzOW&<04nI3bE0$M!#O-6AU{Tq|97|5~FZMsd~(^P7ztt*p$y z*c&@@sG_KEo02}x)q1$Z=K6T9Pl3(U(*<+}&fVasX6KV6X5uP4Ms$^$cvU;J{`NQZ zY4nTcCF(7ne=TWV#V%G6)S!rH4e6IuXS_Ap>FlER z72T*Q!5ppTSd_gdiAjueh1~*GB%iZF#uq1Y4u^4iCeBMzuCYfDR<_-x^I_7!0g zj5W-rTx&jaliuDi4B(@4mippv-PRawqx6hz`|o`0Jv7rws>WZ*i9afdY*kT(^^P9n?|=KJPs2O7DprJ zMwZu8cQcdbAhx%2paR;PTW3Nj?@=ymJBv|X(!nNZdp|!6uKkz8p%7yN`%G@w_e=cd z7D==`^dj&3HoUYV9jgqICZ*hk1XS7r-Lw}%0+S`*lN0|HhVheiLWaI-xH#^rrMhK4saz#v2WuprY`r> zyVqI*u*!Hse<|x z-k`Cy(H*pMG3+Duwa0XOMrgbt?uk0~qDKgxyP}WE%~dk zzjb0c2LIBSiWFwhioWFITRj-0`NQbk7x^7a8APxiI`)Jkt5|74D$5dS7pg78Iu!J>3!eh7~N0=dxdc4i4Lq$MrWXHTlqQdGtU1*1nEfBBkkMO&7DMf1#?4 zzKQfVr;-z5IBi(lSk*=OMLD!Q`wc_YT#HdI4D7w}+qp@%=u^p6)~H$f)C6gh#S`tp z_--LG)1s*UPjbxYj^fAfr#P~f)t$sR70Da*n;<#X>5`r`R?#qZw#AK_d|aq@ZrC8| zxBGbl4{b-olZN z;t6=67BTO) z)#W3=G6c}X>4BK|c}y5ry$fj&6MaPM0^RwUGkKBZ-xMFV+Mf+g{J=A)6L{B)2`72) z(xLwzOyjS#M!Dlcw}>%X6llHMA6|Zg`bw|cRc~O1hmmp(^D9as=0US>j3}KcQyKmd zS7l+Futj&?qHynxX@~Y9GpfcWI%!BcaoywSH9CdO<`;P!IQnuw5#P2;sTfuJ`Mun3 z`**l2A53T|vm#vL?2tKd>?q1Fjz)aX!t8UdRcsDT3xgdR*g`(eSGGmbhWwjj{ODWV zKa4o`g;A>`=1VUlBHv1wh;!><$!XEbI}35LBD6uhHfoZg8^Sc|d8s>Evv7G%aq{%) zR~e^%>YO29=z6qT_F8GRPBBC^daMqmf+{ia(y#nv8&*+IZ05-&RGR#{anN@hqsMCwCFlQgo33XkKpH0@IZrd3<-#$R5l?q-Qm@Rp_2pjZj4j4e9h zcorg)0cPQunEuQ5Mx6wf96~Rv_Rk90eS+<1-GRMPsvhjIHK|eDT~d$PT<3Tg@8*V= z@1@exyv{fZmmW+@P!URx=3u1Qu|+3j(PF8rK^o~Y8DajjRBEySfw)479ZyrCKbn`E z9K+(SONpC7Hx3iJ7^@>W?hm8KBlv`kkcbQ#iH7s z%3a&GYn5ZDId}4H$UNaQVl!tJxjL9ISm~XVU9a3)^rL_y#W>$#qhH)1txv&lu3~#k zdylao;SxZ1I!^b-5*OsV-AuzSY|-#LT#;Yjor3zYbH?Sp)HZx)`wg_TAI*j8Ju^jH zUaS=;cygF2mT8cqf1@{F|t!Uece0=DkFG3j&hl-nUh)<^mqxO0>{JI+L&Ae6Jos* ztV@a)J$s)r;uDzE6F5rEvlWCd>UGg|O6k6>awOmR2dMHZ)+wpidW5qY&_{eHEj{;j&E5gj63SR1wk zmY`J8BQ!?wH*9NZuzVBf<2bdgv{Q%QaO_KJ-(N=O?LuY0h7}T%<_w}kU#APXK7S@A zWx(Vp7xnC|C8qbh=tU-cgVGNV=GH8oZsoC{);9ZcvGn$d-3k4UPjY%Kb}lq~;^@5Z zhjU?$Oa3FO#` z_&=UF347rqU&Lwe_5I!2cAK&9x=Rz#Va75oYA>oK|7PDoY2jKL`{QMCiKdDxAEKr( zES?ccVn(AbGda3{bKxszHKI;G4xpQgZK6lxJn+(-R8B$!1}mJBuX{C0BB$l8o~E@< z82h4c+VoRN*GYx3()#bQ7_W6$9z3{pokw^;X9V_z)KY^iwHpvD(3Oz&)e zp2{2ID>Kc|Lf?!^+WE#WGDJd`IXY)oZ{<m1DTgeQpp++re3upE!?Jv_K(WO@xnCZha=u{iGha2S0vc{sjw=DJ#KJ*{p3Yg>J zFenu%ltOE6);XHh-Ust7MU^7OxJ5)Ao5cCA4JV(oB>YLb-+9?yxE3QWZAAQz6gEYEya);zT%Xjr5?wre3MD@tHz;sZFtzQ?U^8 z#e8n9(dJhaQ#P{QRr(|x=!1xQiz1u_*@*UZLl@S=uu5-;o~)`YS4}oAoFxUxyylks zlYg@Ei4BQWWHVz<{X>f92ZGq<3oWGZ_2lC-b(#EJkQgdODtj=PZ$-t z$$A3*BSDjN;4+_8d=md%X3NI(sIhP}iT@kd4119^H7)9&NNuqxQu}XFP5CEMn}agq zW!ajvIZVCrjmgnHf$9GDJu^`|m}LX0_AmbgX_L)aHYssB@Nbrx(dGo)@#Dv%b%(b} zOPkEZ|I@+EL{Du_ga3uVkw*F3`|>X#(hk(HT;Wq785I=1eu2+YEZmU5gSgOLc--5{ zgO^Wzd-Xwrn_Jf2BI3_C8MSw@0sj{4!-l#YsVLf>x>YuYp5+n%5NO4FZn)e#Huut0 zq;&1KrFQ5-hLUG{+q_!pR!!-U`Ohr3h&qOwOI0aK5-3L9WfM{tJ@aE|mK-=hDTZpn zz6;Id&ng=isT3n^E2dukQy(yR!{bR(jqhveQrG)6N10pB|5M8gHYeSV zcSi7zU8(p;5EFQ)yJAK6w5f_Aur-?)F z{&hPP<2Is~fsXJ+@JV}nC&(4W_Kb*X9XvT828v|Em(zIM;Jv`?iM}%PO4s{0^ke8R zfj&)28r^(oN!k`n7B?fJdTw*16j|hJ8~{CbY>OwQ_NE{7;XMK5Op%H#f&bNqdTmK#`7)TKPQ^ znGH*D-YXvDZKhSl&4?tw#vgum$#_O4=kR(fqWP{(D7|=dvVh7BkTsP(+VJ0AUN^ng zyp|%Ub|YxC#rC=hOD?Ew9WESSkH&gz)iGaRa`phs=pvg^b#?XeI7XhN;17NNdLW_q z(R9wv&`RFM^Ze@+X>x6J5P}B2bY)u=MEvosQ+_$}oMAZRqY&K?Wv5qX2KmML{d|r8 zLMd31be{*;&{N{p6meo@@%S?ktheUY^!(eH0-Mo~m$c{jInig1=*`axb(`ee8eN_5 z9jmC+n|xna=IkEywO33)<5bD}V~QaEmbx`H9`4C=`fw*!h3S@6p#sQ8WBoCDl%DD@%ZDzq~T9=ptxEv$`0Q{qNw~NL46Vk*tCSzKB|NWX$Zbnz%VAU&FGb%m8 zlBs6p;+xPOYrX-(tpREywG42H1q#>EJs3%v4q)t%&wRRbm^%y|>*=C(v-Rgr%Bhw@S{<@8`k@O+=qq4;!OEGD=@i0@Cg>`I3&;%1T*jRHT_-ORkpnH}u5`WL__8G{h zS2lWUkyrZOAq~mFM*ppZbaFT>6Qyd(qI7p0X1`+Xdc#jKd9zxiKQG`C4bNzv6P1Z< zu&7WZiJD#{!oXMbkTgjh-pnP#Kb@BK;JR4xw!+IuyYv)%G=cTZV8-5Js_HEdV;N5EsJfr%3jyrT{DBRbC1>@L8hrUYUAO*lW7HV&|wL@4>3I6 zanTU+i-!?Mw~868R5bR<)Ah$cjLG6+l!n$GK-^^jE?MsDrfFG_)vhMTJ@JL{%($#0 zOXy~ZYkymtf1&%T+)2f$G3@>j^pl|7B>wugRW@`(_E=aX7Z@v5i6Tv5PHvI)jCh&g zxR&1@ZjYP=gqo?h56ZqoWWscIl8fp)qfSVbyIOKwk-v zMH(N?whLZ@on7isRiLLea4%5Af%hvxBlZ!II~H{z4aq^|M68??x86&uLY>FmSJhq^ zVdb_fVO(vf(=q=wOj2&JHPgOVHDwh>t~yNC=z)W@}wPCc9lI^ z{CMG#yo>7nv$NMSK#PW<8E5Jyjj+Z=6I*;-&dd|(0A_s{h&%>Q0=dG!7u`u%}Q1xfO7K7zL1ZJGKb&Qcp)LW7>WcV)@<+3K!ztLZzQU0wR}>8VJh9Rq)ViH%`^ zHg)v8#Y-{0Vo{ADr*%&K)mX1ePp9~g(IwrLcxaTLTZxZc`>UqER&0EMw=?oAEsCMV zq&9Lb;xsF#^v-m}i%NcPh%ermx%+hUwSq(^>VGQiBC-{jrHU?CE5as38uL4k^y*zv z4cKnIrj#x*VA>L;-?TD!LGIuKJD`y}cApqehfcQS0HxOF#a`fQN0iHKcU++vYsW}>_d~8U)IZ9vMjh3P|Y#qTUqXSW&zIGci4sc z8!AVwimAE5US%+xNh=r*4OY&!+)-)i@9&Ym0GzrzKLb~$oRe*Z!r5Pa4pUaojpd81 zY2u!>rGS8`k|aKm+6)sprN%)c?Tj3Fc&18Yh5xPW8E~5{vANpN9$94=M9uqm@KaaX}n53t8F|hp_d-3zP3o{21C@caL)Vd;i3-!_#FPQ*rTiH?66G>pmwSBb^55 z4i8+^XWcyQQ2RR$RX=7$Y;W%i2wW@=*MkvZ#6PYlId&e*0|L%@YI^9tM43aCeaz}5 zDRaq?x$##G$31s_g*}kF%yix3^9<8XYjz#YMhG80jJ+u7bTM@>lB$3H%38Xdld|ud zl8a9x%`QnM@&wheYJl8Oq+;!DihNdsqxT?jrjn0Ux@EQX+w0q9-%}B^`eF9t<@I=^ z8o4d4C@6tgt}ZFeOC#0x7-7JeIP*({cgLa0S&_Y_2_to-E>`RP+*SqG@S_CMdXM1ep9i(Bn`C`wC|%sA_DRBEmzy7RQg@E{w2!D}5s9 z-dhHvV(Os!9Tz1wHvF?oQlEeVI(!jwH%1JHZ+N;E?}0xH9)%{eub3KT;&mAiMF;^%l$}YM zu>AC+`BbRhU(m-B6i2?~)}`;f@Jh>U_9oGPGV zf*9S@jt$N1d)_lIoups=4Slq`795~ zBzjhu^LShd%`Nip&7N&kl3PQNI|3ba{STp1R|XB??2A!nW4YajlaSDUIoe~3gqYqA zOfV@Hk+r*1)1qu(cOxhi=ywS5WAguZ4QF z^kha(w+tkQu+MVqX*OC6kOPW&-03d;qSyfZ(%dKW8Kp_XN9TUs`T6~=m3B-Y_7Uec z_5+@gFk_C5-Rp{AvNXvHAhi{&dSke$)w778Gxz01q39j8;j@NFDc^h&oFJLo_TMTo zTGFHs{)P%m| zoE*oyM=m~^X<>?piw4(U+11>Y3U$R!*6z2uaFrEhWa6n;klX$A+hWfRt_ilir#f;~ zZfl@89K@B3(~yF!+UFT^uObHIUgz$5z+wDjxrTi@YkS#nNq3Q3o)#ztS@+i~YA*LJ&-B3A2#dmgAYR<|+8 zzBLhl&ow)lEo#=w$DSiAho_JlscnVNG|KLq4Mj3InHG5dJgNTHqu#9h@fS~{Chu}I z0L>sFc1*Xet-ZZ{bc?w5E6B=TP-&f5oN?dv^`oMpkgy%7Q$@8;#S(G|2+Q*V7}jrk z%J55y%7gnecX(YKCaIz|3AoUO$=O9V8g>oMz||6o=WZK$aN6AOZKeFvG!ci(%ng4< ztzWUaR)PV=;>ylM7Y+>7Ue6C2?7iW}%~08t(g*u*)k|mF6$#RbAAtrA zs5hgr&C-)fZQWGxJH~{^z4&GU@Ykc43k5LHUJELu<`srTJX8}7Xx(EwsQS-xPC-(S z=iYHhOaFx9*4}HzOz-acUiKeqcsZRF6MN`9-Uf)mX_WX2ImM+hZjS$@<+3xQKg31j zn0WD(yH|d_b<_UM?}@5%tiGmUhri9dY@R~^k6g4yw&?y_7RrIcTHkt$V;*QO-OL`U zUeV@{$^FY5hpA1h)dGa#?9E1SGmeSxP9I&4quFKYlxWmivF**hkX5(7kjJflrCDI| zEK!BUMv0sg7a>b`#<1xSk!lq7N$DVa`bb+M*UgB(e=hAl>7A<^*+}cgXuSsYmR!dB zvyS;B9TK~&|6-Sqkut>Ayb5xhHl6m5?d=n&lD`Ad>FL%pi(_eBtACgK(q7**mkE6Q zD+?s1kt^e_wzB_~;6V~;Tw4uA5;KIvUND4ZYSVN{?IK+CGa{VtBhtao5ZWVk2P~O# zPBljF>jPw$7pAy|&ZCNp!j|SHraJ^1O{Rw*XI60>IU3`sn@&_qHqg&tp?*a0NfeEF zz_czbgKNTeilt4S-fSddxuQEq$uNHxSzmxv&*1I0L$JdsKJ4afyN{TzJp}#`E=ZwA zHXSA+MF8TF9vZR2DYx_0AYW;t#)A)@n$RR zZd|7^?VO56#6{3m`}&N&^yFV~2pzJ?+u^3;>C!fI8hRCa4td?{PHzhtk_-SRUC(y) z&;@_3ec&|Bg3NifCl$8^ak0-PJ7JJWk2OSCE|?}KXkv%+AkUwEd@)Ps&sPaZ#pde2 z#x-rkp;p>UbCN?o{ncoFU&PB4oHh7x4(LoWzmma&;HfRI-eEOHJyq$E4v!+qd)-Iw z&Iwb~yN`ePQAq8B?3`!ANa`GnDAz`=1l-E;!Ku=_N8j%2uX3Bd|1rJ)B+~!rxu@h} z2H0zbtSmq6_~VyQLy19ptC!*Fe-9J|txJ#W17P*AcIwRckB_yVm>f{rb5*tCadGW} zKGH?oxm^dYp@+GJJb~VnQt#MAG3fd2N10y`75nNRaGMnC@g;Y!3uf0$kRhRV1-!RY zaBUFXs&Oje-gB}Kx6#zR(jyWh?!1IIdOcWpVH0EUSaxixu=aMS!jjIIpcnY(lA|e; z+;!Oi$Sm4S>%{RO9tKBVTi+pIh&z$CrWm#uRA5uo^_KM5g2?UoLb{v-nFI1Bo$1?) zDX^g>vn3lusZ4g*!@q#H9(Fk(h8eV0&YDDZ=^TJvrJ{Znly8zCGBtzs0hhsm+yP)8 zViFag6B8>7q8i%dFc$=mt0j!78A=I6Uz!8?el{7y7hVme-C=*#Ovp<1*i(Fr;QieW z$>uJiNlz*L+~?Jml?i+99b@{Sta&kY$_+IL>qt7Y!2}VjFLao5jbxCy?J1Nc$fN4`fYyjS& zO+PMx>-l>NSnAt)e*s7yj}ewSAjoiZ8k$(>rj2rXeWe#}b-#~?U+OoEFA5`*$B(qR zs>1>bHRCXYSJPP_CW3ZD04c1YD|&bHf6|3YYPa zf_*c}*VcWS#z5-W&okJLQXnB+a&AGfwH}DDY|;!srPlZ z70$a5*Se6_Sw-4>7*!nrD%rCuZ@r9QM%MRpckcBS1|h%2;n3+l(61Vwx;K)==N4vD z5b{~zldjQ&Yqa(hLBv803R>b29CrYAFO6iCExY`T^6m-LpYjmkX3RTspdOlpR00xT z_LZ*Nz|WlxM+Q28w_#hF0a9*4U&^-BR!U&!5$kJ15S~&}=2;nFklzC}iKMD(KOrw* zke(4&ziX@PI{i&Tb^>%e_M{1Gk3(rQ2mVRfHTT(%PhQ>F!&Pcs96cu@&cg_YBjg{4 z{>}oY=s`$@@|IUsGeHk>SlY7Zvh*iDE*uQ`8Y*4R#q`;>AGu`&PNXR%x zT_1{0rJmjY!-E6Dkt?%CWxGHw+I79RKEUPX9N2VPlIV2|im7@H-OvIcV=Y8Cm_Hnj z@B+od&seN}3v#w%i48d%a^h2`u^ji}$M$!7)E&NoVtdP))N=1@6`m1igL&bxp1M#7 zoMgn`33SmDnp$6@!g#^Bl8Eoz- z-8D29^OGbVrmwoO7x05ZI}SY}Y6eeQ>o8qj0-Vixktel2R7GT{J%zeEz~P>a(1yQp zka9DCCzg7}INB$)vT*GkP&*5}eCf~ks!!k$hbT+Nu^`$}GdXu1tNAzBP<)oW%N-ZC zphg=ADVz^)Ca6G^3MDwd@ZgejwMX2W3KxgWcqkS>^cNld=z6h`^l3V) z3Zt295h2$9eF>9CG`2vsqS3*=8gr;wtA3Y#xuXpPtmI{B zOUBS$=>NuOKXj~@2 z{>Wn(_(Db!$c8LHSISrgTZD}t`*4Z}+;$oMl7zYEcx4JvlD8vSD+jUz#rye|(_QFe z;`};c4M-2M!dgiAu-O3m@x6dF5nTlq&xSsh1nt}lNAdH2*(?I|SnnI$0d zE}-|F1uo>c$i#cGp$j0d1fRZ)wFXQE8sDF%#1Er%Bkxfz*FyD<>3W>o7992Y&S z8$6jaCjvFPETU%ub=(6rP39W=wY=kZe0z3ea?C=urs7q2X0 zQUUGB;Cd}mB-M)|+LH@Q#NS^I&`XWD=7NQG@AU;?VbHkW-wP}xiZXvZTVRg4zP$Cn z*UvTUUt9Xb7cv^hCkaPr2&z1*Q~mty29`t>rF5^iQ7y!g zqE%(3;Tx(E-6Kg}O9y=;bR|BpamnOE(oYb;@6@KA%`Dy{l16v}iW6G-0xYX&lp-E( zTQmd+!pN%Z3OMUOSZ_I{^oFz2s7<&PG`rF=u4=yYXTk`PYTZ{Zg}{NN5om0nuhB#e zpGPhNY=2y0Lk_q-jqJFpwu5C#5r9v~+gddqLO=<7iiQs^=!+)okLC8u*+EQ_fh7JR+~T$P(s6^p44EWMu_AR0WkbJ+So%T1}r z45EJFYfMalq>>~LezruC(NK| z9hzC~#9JaG?oajMeMoSzvmiLFpYKJFj-{d{;)3xVv*+1XUn210r{;DyEPw8azI(vD4caKK!B$0tt zC>?2xn;{(oIPO=x!VrjVM%i^MGs1U_&>9zzbDL43_^;c!AN}fgK?>lyYek^j6nuOE zaNIx_@}|TF5@NRKohPQ{1~5GbsE~$EAbGgp;+`Ho2-3# zIvi)(3%hk2ckFM1;~#lOT$U=EEJAZUFb^1UfXn+0nt{_v!kLzNk|oqE+}mu8{`~qb z16k&j(y`@T`36yc{oA)7QV3ZMt%=7~HU7B+yui9by2QEe4k%zN@z58$^F1p!&lDc& ze`(c~2IzEBx`>iTtVN;D2Z31EVMNMbLk)z%S`KniVysae>>wG2K~WE^hjZ|x4_!3S zCQPy&P3-~6!3(@EwpE9;ebP_vk^SF5YT*9K+Yfeq1L;E8<;ZF)!d>pkK->=d0LA{V z%?_#vuh22an|pbjkfw;7)2si{-~ej1B7piLti3#0i?mrnCaa%<+&&|F)0(Dty=D)@ zV5%r7$r@$bzyM9@SIQ(R*{Jyg@GCgjDdkk_oeA=UCZ;A|P`{Yk)9?8K;SPFjJqGB> zw{Vtp*E7_bSZ`7^;7bRvK1qPn#4E$0eV94sj2%r9V*qN3sq!b0vLwn358N6#g$pN3 z$4j~BJ-L<7&X5pUm&SM__EnCO=lF*+BfyX=OSsaYy{E9YJhCS4D>9Oj4nh?kJ$D*O zJ_*VjH8b=8CwE`hn->AXs4bvw9u_`%R6V3FF_Vzbxjxu<2@uT3&Utl(8Aw7<8 zx;LtgGYbvs8DjAgb37RIygvWW@LQk3s6mAmS{vV8&)edczjkK#jFt zHB|h9PZrVDAr_NBNAde*0Akx4fxVE)UnZ*75;9>WtrKTMkd;)7N6OC)b&R+Sr$Qgu zes`!kR8h|^`t|8tV=QMDV-^q)q)JqO!|u@x=Xyc0$O~)^+y)ShFEoCbidw$<-Qw8Z zm??2?Y9u)g8vMbf2Oriub0F+y*Eh2BP^$1hFF;=$xQW<0Wck-kWD{~&p|FH0R>yF1 zQ}(v~(nKY3lGLJ}8303U>;<-UVDLp&fLG43h*b<>$5IFG*xeE1PX-t)`JvZdK-D9N zHv>xcHOF5!NwHmuyMoZ%#a3>CsB#szBssdPkHVpcfAvj9-zjDEf|`tr!$`f?rtb`D zuhEt`zh5Y6UMM4A20=KNgfn=vdE3$*uf}jNJ{oz-LdY3{6J_T$Ot-zM!2jG8y@cV4 zl>T1+S^W4Ji5DFPyYr4Jxk#QLbe22JX%DXrpsnHmNuNC1?T8);;9Sf+&m!2lp`O)q zXUMzQJKsH0TrY~f8iY9x7>%G%b7RYwQ_@s{efd$!nw(~_sAWN2HVH&SSOjv{cOYRP zI!7n`TpVVVu72wyIRD%kiNE_-6pjyiiLQbB@EPEf0WV`RC3|pg9GIq4-V{Q)nb02` zxS?5VSr9h2=UJBg)d!vUZy*I^3aEH@fLtvFC2J?T1RzGs#L*ZfL(6nHbP>w`X9xp_ zdTVa>sR&pw4^FncKK|=4?W6kT8hu`A>Z-T<*g%6H;2KRAlw7BZ3~G`=0tQgSZnYJZ zMJXr;IwPr*O;x0zzkg1jmj4I<=G78Q@GFy8NdYv!!;=)xy7dwi&3AZGJIem65E*2+ zAw(L)rs&dj*+Z(@w6QkiA)yLW`G&K(%@*pQmKSEgyy#k+)rOORCv0k1;@10k?uvX+K^O^ER zNrKT{@?Uw|=&%tHL@1vT$x=AD{kBzM>K9!aZ!D?GY}|pg1DMpL*N*;v9qhn8=p2{z z+krUq5ygs&woPm0xGZD!!&kFM^vL_Wr3aiSha%wU?Wm7F<}(K=QYEeK>fV|Q)}Smy zYjh6e#*r~ynksgYE${D{;p{F_JCX&<^~vE~TY@Pk<&k|V?|g$*q)h;;PD}u$kG}y? z`W`>VQ<~m&9SSb3fFd6DKhizfuyu&5+>b%U2%H(nt=L(JTFzSP&bPk?iwrW|o5VIS zuKsjQ`?~YUF+VmsJ~gMS0V%5Jg?mR_sJWj8jfv|Cn&aEOk%)Qtl*I+g0k~uLOsFW@ z6Hf^gXzjDSKOxTT+7P4<$AHI1L15Hve%6++aXRT~jsLrvi3vmLWp)zHL&T-})`ASg zbflnjHO`(FV!Hg`!~T=P&15v4q)$)YM5n=Y06q9aFM@XJ!m;B!w-}mUg#$DGT{0bo zUXYL2a*?wao(KA#S8#ZwpHy5grT8u$MaQ>1khH5#7jS2Jus2B_Y1hNSZFpUR{WS;i z=x^{shQ4}ifeq=&4!If)rJTfMkrC;rSa|x`!GsJlq?8+(wB>#&D)q^OdiUo*#r{4I zQLQt$P%g*q7{&IT{UCWD`SLDuez*<#}14bke`^wNUw9{Rv>-I+1bQ7{D%aUfe7SgBu5JAPc(l=HQ1| z_cwuXbxc2~m!X@~j%ej_#t?G0pDA8mFj`pFDtj&jga8pFK(vWRS`2W~-wS+le)XtO zPDp0~=A>g~JL&ikSWJ;dV(8VGWA6YDRQ(4;VKtEVX53L>qUj>93Hi~q79qvG^7FM- zXQbo(6E`LITxz>df6}Rozq_HE=3nUxJITUuY9Jc!{<$Y#RwQDv_7)y61>P^Re-$~5|3)6) z@RtOFmfy!w=yRTcmrc|;b3_%ay4GtzFqW$|58G{(AeV2K`?cb?&a z+sGh76}Zw5F0nX;K^UR$oJd`%HB#dC#nP&To%@*r5SgZ*-Tz2Q7us?^8 z+TYQbtiRP9e-ZrU3J67ow`Q1u2=)o8nTapzRpe)XAj%v^Ov;N-!v5d=G|S#E5yY8A zw!Sh42RJBWLwf;kAFRJ22m!L;?ic&wnk^2mEUH2+OAfyljbJg%t`_8OaU}avnr8PaMuT36fl3(CS0?+W z;RoJP(E-(CU#uYr7JXSJ(qGs~ieo}9HB$ZJ)J8A)(1B`I7#CFbW) zJx7$&>h_zzI!L4*e2R$WPnmppNQ6;PIc~s7=){_~@Mp+*3BaM!?>xEJ%mLC7W#`M;i^_uLJGi0d;Cwa|l0yy04J*Rh>MmHkl%SHN&rLej)DkFx zy9XP+@9u*RlH1MWH*GSvA2^I?zF2=8YZM$S)gNSNd_M|nx=k)xP& zSvVEbfW(EAf3Uj*w~bV7ZjNg>5dzF)Zoo6`8;BsGk}D}ubPS!~!<2*4=~FI)%_KWO z5AZ+DD*!#$pC8>jX2HSOdkJc0#J}N1C8G(dlXY(o?Po=2@v#ah8!5(6d|n7|gO^ln zQ*`Wj48bu|CW@Yi^~!zv-cstF6Oo(`s@x4}0h;dD>R&~meIl-VkX({>`VXW-zfY4c zCYLBkexeCUHOtfTsBz+LWgZyyNkpG$S=e?g>_k|JTRPLxNJUIDSX zx&QXQIk36zZ46tA4vBlg4>NrMZJjGcfI0u0Qh1&NxjW6-NXWS)-=^rl)hea31-A>% zB^g9j8nt#MH(Q(?TI3~QAx3!;*dv8O_cPEh!o)1?_U1vf4_N6-(AA)yXDn1ofRqzM zRd(T=F+^?>w|*1;mkVukH>^pj$Jah7F>d5p30`x?Id7h1scGDdE7#u| z5{s%hmVEM0+n>DHuy*W0g4WZoUxF&1_+QMCd|8D*O7p`^x>Tx0MyIenjZ8q~6u*+Bj zDe5XOyq`*h8KFDi+*?>=Q!hiQZKXyMZLS$9TC#i-fTc@k9B+*v1odAQp(pSgLaN}e zJl1*OH@v#QxWVkAIMVU1>Ka;$K_CXdNrBAh;Uk@Lm^WBmyzu~nl<&}F5LAKb?RAE8- z<3#Pz@Nx=W&_+!~!tH__KZ1R*YFPmPz^)&ctC({<>f*b$5_Zx^5@WC_&z*mKU=v>k7lsU;Jzf}k*xQC*9LI|v(; z+JipGXW&HD3|ab=ovD}()k4~ub+G~NlLIxfcdVbT+;*TTGE!@@Ayq&pNq9*x!0Ps= z7o>JyUawG4p2EAfXHf#RVMhUplSKUGK!JA%^Z0M0_S3sw?f~l{vnygfe6tFiVKUHA zwjOHUX5=MIYe)@Sx5dUBta0fW9Ak+*o_V}tG0xathF<03?AV}WIy*v>1c4?UB&x$J zO}OP8txZyeN`GkWyC@!4?pSSbMJ-SPX$3g%`~SETpe($?sS6PCCi2o3Sya7(0g;-l zTd!hEq6(b>O~CTLViO9b%Dy;#)57^ZeEeNPGJ@UG=$o zDM%YGubpmvgtXC6K?B*<{u&i>`8a586G5%8nVfIEEH|40ti!%Yvbv;q1Ex+~s^j-x z4#blNR1jEB8n}2(w;nn>^?nyP)Y|DPC4+w4droSigc_Cc(!AidMgC7RaX8x#G z1ZtSsgHzw~s2zO)B-%&6`Px}&;1enW4dzL;7p!Oc9NgBOCq|KI5~aPXUe zSbphG7wB9AZ`_1p55cLt_THCM?zE3b=4+1gA}mcQM*W7ydXc9Est3d|q^>7}eyAq# z$ki3rYW&qvOL8-F69cE>c|c?(*AE$G8xfRKyl=>f1-@#`-{6IzFU7OW^=6 zEGW=ra-a?-092Jy@O(V5eW%6($L_#F&?%2s{qrNbVtY(3Y0vC#PJ9Y#+5X6*#h;H6 zHh_QU1XHQrRp6geN1!x04eA6}#kYAN_=p60x|$l|UzW>#Bc!z&7_h?W?5#0V|o$knCWhy?|>MP)1@5iOFC8M{K3$X1q8Dm!Vjj3rb^ zn~-Ht$dWBW=su_A-ut`v{=Toif1H|`&pDs-InU>L-p~8}KJfS|e|ec!rMV-}HkW+m z*-U4T3FO*N=hn0zp8Fp)7s8TUW3An!1Ubt_@K5wzQ>>(eWq-1_vFjnMMv{gl!VIE6 z1JppI87!=aY{bz1=+C@9J=H~d9w6lzamy4cy36xIN2=knR#_jLtszfhTNvz1F^eLA z8O|9UR~ggEuH4guoHny|?-L$ae9)q>rsoj`2noEjd_2{QpT#0|9O3_|<4DtKr->sV zbdUkfz}eKeBZGEyYNhJ>f_`{7O`4IX5+?y}1$#GB$bCM;9tPN*z;)ueQ?R&@)_mqN zGJ~(B4iqIr9S2#W8IXs+K>2mZUSh}pvCtqo0G#Cx9}-qaL^Pm@F^8OIbV1Ikrug%# z>#t8R2@MLvT#oEk2>>U$)@OsU5ast z?3|aP5Tl1_{#$&9Vgi%{-aL3q0RX4_(-E%p{|ZvhI2EA0R0@WsuI?Dr(ldzN4sOL(`l|8T|$tQ3) zStW_@WqFY#YD9@f9PEykWdN*8dcBBtV^Hx|O;S&W+?7=;mklZb#m z7mnTp90PhKQsHW;N%!qaLb<8uK!-`a5J(E zxLtKIWY{gbDp>t+LVxr7p|2kvTnE(FM#T#{<6C#zDU$2zMCJj=eL8h()T;{5v;8iH zL)X9Wv&7*(>q8!Cc?lYTYv4}v88A;PLjzbdv@tS2PkB-j80YQFG1gA|Q4J9f7&|uP zWPx3&ZXUo1XzQuU8dcv9wlcBb<1e`$eprBf_)-+c!UExu?mvJut)L0Q zDQBlw#tCaoi|np0L4}OzO0F?fU!06$C9yw#Uc}J?)t2}F?7VAbLfw#B^1$yCB6eHl zw5WV)$z~zQ5llD1p|kn@9fzQxJS`)a<<&0mMng2QHKN32Qec*vXChTcVtY<4a7#5$ zk4CNs1LSYWBR6@{*jqQbxGL*P%V2_z__i;<^GLoSs*7mq<4QTmBXmwRbnXv=A~5w~ zUym2;wpVJvL8bm3v&Zp+hyYnoJM8}$x?iwq9CG3*KT0+vuZ5Bt z}ya*R1 z?(}g{3MEQ}22@}09O?9&l6%2yi0+d10`?}A*bEG?dwC*G4F2tBvekXvAu4(=q74mz z$o*k>Y;+XiK$%(Ke&F})4&7_=Fs(7(%008}6V#_qHn+vrZl-fEiutje?aqcur7muV zK8I-rK-~>2;<(2KGTo0y{1Gs}O`F0tiZVtI6z53}M3cA?{w?bo%1GSj^SBOw$K$?r zpJ&J;5`TL-u~wGradVf;qZG1v@8qf&L!<@lpziJ=qBzVc4#sdw<6Z?5nE&TYO;g~$ z0Cl4U|HEhypbA_J=IWq47e4Q*c*6iLl>dCSBeVb=I>m~w?uLh9L)er8513P{f54$q zQ1H6Xsz3!G$>bFhg>906LPScVKK+l}wb%j%NGoZ6+6RwA`i&CLgr|pO%Av2a|{yKbD>MX|q7NfiWR+b4bAUB|`sS+gF+)IlEZM#0kO(I-x9ykOB9;3ETGZ zaRF(VMs1;BJWaFb*+r%!1-|I?IVrdTh=M`Bg_cC&lj&SqFh%)lV}cSuYhEcK$}D8I zAPV3gpa7;*0tGNd2fi>~asJRgWbS-_A}axDIe&_PDbk&B|KbQ~du4!vTZ-2&GD&_Y^zU!ZO^6oJJq$l^JtR)dbU@2{gi2*qmZynAO z2DxN2bI17M%Md>cJ4P~I{&rOs>SLo*OWY|hcpMEM=TQmod;m5I$hOVF$%$}rt2B@O zw1@9*`nL1*#=v(&eU9JFvX+_mD?xP62pbP}*oUc_LiYCXOKP%$?(E1fsYWI|aFNZY zLaBk65BEI{c)-DUGx(b(xV(~YAIu$Ujh#IjK&P})BXZJD8ScbN<@KZ9=K1*gLc6kK zibxF#or0I4KL(`{%?hH_SQD}s=On7Qfzk6M3ykrtyehe*`kJLB=#?}telJ4QlBGbd zgMM8yBqi+tMJBjYYB`bMl1u+z!6nYHF{}rQkIK(4)4*s1VK740i1@?4$L@isBFkf( z@1~LwhdIadl@Z1%RjJBPsE)TJ&C(&LBLuit=E`-q1qQ)m86mIHrXlK;|5+MyF>Z-7 ziQEG0>deZ_VT2~U-e|vG}3hc=j(3??4K>oHeukJiDl0S}j zX76=wo-bP%!a!a8zs3Td)HOLzYo7sf5G;kCpHmOM$F{(I)wNfdfTqO_C(@}un3=JU zU!=o)kCPwb<*Ec(VKN{I`v9{(y0rg4?N8WZNrjQ&VE^2BtP!XN=0JmeFWUmE-!7Ow z6QR)k{$mKDCcIu15P9~M9jp5cD99PuN?$>v_*z5b2W){BlB9d`ROr5xo_kcswG zZ#((#!t4-_!IwQQgMWVxZ-|t<*!GVIJp}$xq98s#?w<77fwU=;NQg&+|J9N{>%TwP z4K}=(I@28@{{}#`qC7hpAAnXASXcxM4``zX5I_1HAj@v+K_Q z=EwkX`F$4u21tehgj{yW{~JI6_w7#MKP~_aW>r~Ne~dnt_$6K0_Oi58&6J>t42yC{eQ0bY6eVQn>qx< z{HHO^kTThXA(Cci6Cj;jgFcQaq`PCA8_)#+P04lHi^%?QRoA>m3cb#Y%MlvBY9 z=)SYSYSf!Ye$HP%mgN$={rV~kkx)3Vhe?I3%nZm$V`;sG=De_W5B{dw02P|nN>I-b z(LyjMa(EZcVxcnQ&H^Kg1ftm>A`U=^eX-8jotoK z!f&Xs44uq*0?zAg=}~RXpvBvZ%0md{-5J3y|8czuHc?@!q3h=$Q#Pq(h(T18QU`L1 zLU=b0?Dwi{`fgH1;84jxh0M3Cn;M3`dUkbnT7U8Zh&Oq0I8($i9$<&wcb?3rc&d0o z6Scvluou<18OJG&t;)bz@BFPjSwzeUAdj`l(?{e_15c_Dv0Mt6o(C;;?k=Gix-z2y-Y$4>&(mzzQGF+r?r#q)3fxeV)09SAUV7DpWm?I$mY`2BaWm z1QNpxK;B&l0?@a2_6s7koBQLSKD`b7ZA8KhB#Kn%9^MZN0}J#o!2GJbu;97|!L<+! z8s37`>nh6Syx`z5;V8_bFtksD$-gGW7iTCfyT!RE4Tz?cgAp&LaOAP&8Ux!tXb!p{ zq$9)}IYleL`vf%F*XyE(1N-lrY;+-x3yb>H>$PBk00nQ~lV}PAKF%3&Agu?6hdux% zhMDEk$1oMEWa)##l0C~n2L%509VtAlG%1)3$DRND)D)5=DX{0$Y+pug7 zb{j}L1Eg7Bz>?j1O51}wStS)jM?`LSqKXOL>Y=yovb3e4CE^{yeGN*aesQi40FKQh zC_Naa9#g20?BvFQZ^fqUv|$Z3Mtl|np{ZEPI2=*^FmGg_K|HHjGC&&1a%V=A%yqDxa`e4PT`H-7-bu+5ZY;_mJbK5VJvc|c??~R-SNMUVetw7t^hQWhuHV{fW z+xc)j-7>(x13#K_BBY5P!;7V9fg+i^PTCcOZVT;k{zcYD^nYvM>XP;vn(5~yTdibp zLt7Vh_T>{;FSERMgE{f00uS)Djd_JJzpDY*1lZ&y^0zA2uNpE>ve7}?{9bq(L@4<$ zLUZ}e;$B|E^>zImq`t7>NJTUocXgyQZV1X+c1mV<`tC8!jYWzA1@<#xW$pL8YFOOk zBujk_fmP}P3-IE?s@463?Ybz3qlwX!?Gd^*%W#D+fOcRjLP$z1EQh9hG9sbH-*$O1 zjU*A!J4-tgSuw!cB!Jby&J00(alG$*LHgRedxxYYzq`;H0PJCVI4dHtKHgT_feyz$ z238{M8fXZ~3*XOvj;T)!K`8e*K^vzCb@M;NUxdJx(c#&}5g}d<$KUQ$JXx)KrpHK(z)S&qv*cIVp>bJUbb0 zLrjZ}Up|Ucu-9%GT*EIKqkR-9L3?GIoq$eHy7;ZP!e~?b(ht`HY=iuQwqOBmR5*3! z_1$=Jwvzh7wQ)xg1837>zs?NRX1`ws&QZCQfpf<`+Dle-7h?~3cu1~B$CG+8?UeOlL+dq$=t{0Ixhea5&j!+R}eM9u6 zATPC)Aqz%cB?T~|F}0MTRO-vbInH0DcLr_#u4k8>4IP`}<-?_88U2~?e3Xh+h;nH~ z0@8R`8no{=4dVVhZ(uJd z+hE+d*%Y?$j1eOvCB~+MIIm9)Q=N$+ZHx-O&s|iuqRK2$R-CRxZYed~hvt0bB6lQOHl66feh=Q-%%YJ~?uo zb$!Pu%c_9GKBh352MIJK+jrb74K@R&xUf^qp!JMVs{t9;TA_n8UuQ2jTK=K;WjnbY z-XiiI?bZ7_CeuREAT7p~H6Nv~?@y)ZE;lmLDg^YT%z@Q^kyu!Vuopu()-DAUC8**+#~jV zzr=U7VmiZmwt?=uc#LZ+bEppll$R%B#F@!xmKA96ePEQ(Sa2wP<^InBvXPxKCtNH* zQ4p<>9|^>IOf6_P|Fjd|5RPCCS7d9A_6yNlx^=SU2=Vj{e^0{c9eO%*Rk4cu{Qw=1m#s~__>LF%Da zwpf$l#gBVArk&+m^SRC#h;&lvdbm?$Et4yq3s_(wQvWpR=-J3*OrVev!hndZy5oR%)Q2@cupor>bjgrVK zB`WHp;uuyZvdSk0!>F&_xFu^e3mQiFiwh$CNn0~@LZ;5H9i7>bOkZL}Ga&CbVQ#lv zVVePo&@tJq$#Mrx(gSlgAHtVrCVj149{l-5;zTk>EZ$lB7T%KVkMhTfu6Ug^Pbq(k zQlr&Cb5}9obX2ay{c7WA0R&!-TjT(0J8kC^$`aBSx0iS%jIBh43Gc_?x>fyMf6c;5 zyW=s*o@11@mynqGlV11&A&#D{qkAu2jEIk-xk40?C#kf1oB!fG$-3R8m`K8rYm>Tj zsUdFBtTel8M1XQ5bYFJUd<5@*f85!_O#mN6G8_ao_>1)RMcP}q4Vad zhZq7xTu~T*g@D$}=nZWx`AZB$)N^V=V*KV?zIj!WcsuC=DVr`2<+=x4E+oV2ojB*W z(U9p|&`h6kdMc0IxG^3^><%YhmDVk_iOS-#YB^8o5hyhDP$7y~@3M-r;*}P?_;rF|1A^W^!q@@?gO?)}n`q0Bd? z2em(zlsO3)TW)MS7Gu$h%A|2uQLQ+*MLV6d7953&3o{^~of69lpDP6N^MFEoTGO_s z{PyLKVqz@iuea$jZ_$Q*T+N1hgmNMGtpe@ayuK<)Z5Njlm~7cGXN}~0-?XWRZy4pt z=PIu7$^MAv(se3)ZMw21>Xb5(f~Q+z-4Na(*LR&EtmW-4EEnU`=(ATFw$WuiBpdIj z`pMC)Dz#Im+E}!THklNM`RM*bW>SVJhpwRX{imDZMol8y-=+5(y*~Lg95sAeYBLrs z%d9?lT~b$J&E+uRUb2wR1^*Bwws&uZX_vi}yR@2&zEm$Z(xg zoJ+5B(v1pFkMe4hw3H;zctqPC8$Ge#f{>pVNB^<(Idj50k7yxo#uiyL!$vOYb^0(} z6sCdZs3B=rd$Cc&h-blQ^2elz`zVl1E2{KOL>!@%t!=V9sVK;}z#plgg1q!b)`Che>n{ z0PQbU0uNuv+l1r(*In|G$PodBFFcO zf3K8=6RwonSaIU-XmpY=S{{vC)!)&U@p+b(te1O4|9ss`5l*gT*U*aH@6jCN;mWQs zKORi`d!=MJ7Q#D5{k>8vxV1Z{+IyA$jyA>tqYYG49RE8S3yeOgVknQyGE7wUb~w3o z!86)Nn8{H8DTDq~e(Q5u!>=uE|E{82iCP^aVD+XJud~?h63K2-bRiq+daZcfm0sTf zq&9cCm7vZD%%XD{C+=j*9drH`>Dd1!U+{y!YDfLX;wzPrNKTfv1aGkVebfz2Bg65a z(1HW4UbmA?E-NBMZ8NnoqWQvD0`Ih(;y_q9gA`6YQN#7rD|w-^{AV}i$kC!OIVwgj zMp;P|pxw~uf0@|=HiGc%v9FpTdi`FvY;T8``~-EvtxbzdFH#Tc@hjDRf9u~!%WL!H z?6qh69?ll^c5B_7hf|GFZQFiUBNsKykUb10fCJB%`FBW!{1^;$seBRJn z_;;wdOS;UKsa2X637ZI!Y{lSC515(VTN{_ca^P3vr$QRknr)+G4Zh#U4JnPg7Ri-b46Ru8ko+VT5M{sbu8Z{C`XXV2Wn;z1x$6+iLYn>er`E$16SgoZ{cg?-^;t zqA5W|S)Ly_-R9q=Ingo<6S$?37>iSZ_YOsjJ-KJ2HDrK2rbIr4ti+Plx*$%@3t!Ug zo8T3Qf_uEB=3_VZ3I*6W0KKkX>T&Nx;q_x$#IjRQzuu{NF@5NuE;o;pPsKcVS(;?u zurEKXJyULUVZM=`hV!sgA(pdd@cbu#__b&N?sxY;6kE_P>w-Rk`~a*8mnd$#Sk49S z{x@kxtH2tH()}@@b|i2x0C@peTs-k707+4bj|O%i$VG+bHl@eJ$Dm;RBsDcJ+G{v0 zGZwFQ;4#=9HTRz^(~a*<`siFL*~bo z=#_tAxBz@1?v}bw*GX6+amX<)=+ag3`AwIz*L}|p-#R8RzABMw!BGPaF6Dh5*-^oB zb-O_>Cp-c-;mR-TRiDmXALH4vQjeVc>HI37$4oo(1$*@mzXi~XK++cSYv z^|77E8>2#Zde3q*@*F+)ro4s2Jksr|uKLg;Ywf3J*hZGRjac^>X$^-gMLin2C~u)X zTkXsK3;YgL6)M}s7JlLAVAWcjT3TGMp1S`a8jyVb9RDT;)r+Kde+ru3c)}%uSse$O zsgMjXG8boU=b8n=KEp)X6DlczW~sBUaW5RbOkEj z#MI8WhCGb`AP9#tOBKdW$M$16OY2tp{Xf{+KQxyiIApk9mTdw*HQsSpvjh)JsEqes z`@|)9;~C%VqEA&P51WH92-V8L%e5_T3vRuTM*T>%ARSga$jSzs;fj+^ciP_qf+ z3SLj|&zj{8i6s#oJ{}Dm99fU2iOiSWK{9YY_2f&xB-ac)qb@y%J(*c`<_Qkx`;NOs zMqXrRY(GYjAUT;WNQg(SjJ`h#e}DYy?oAq3`s|yq54>Zo5GocYui$7;q--|Qix`KI z8N`!)TF}Nb5E?b<1P|yew8+G&EtE)b9RZhB)od!Dn2F&AC!UO6mM_?Z#q#c!kXykO zheSmQxiEMQrinoggB(B00B)J~0SvGDZ@G_O*qJE<4 z*TJq+kq}E4tNxIYc3#PzHB(eUb&m|S{3kcI^b669iKG{iM@EP7S*P8L^~I9(R@#qYnW zZ(M#)f{U2e0A3824Qa`MvNN!!6H4=!7U;q7RZ1Ts!jW=v)p>u%nZ@5CfMpKP6quH!E;eL z@#|`9dz0EcgCNq48ec!T05)ca7fl89lUHE(Q0v$A*$uL$Zk>D*gPNG0I`{DS?bHL8 zU!gK4c|W{9y&>MehZsp{Gm|gjlAiwLQ?)U;VI=;Oe%VO7H_vKOAp&oByM_X6riUE zRaveTTap@09P%Whf}YLRT)FqHFO=Cj@NHxcPIp4oI7yXIjek~4Udk*V)4A;v?VT<> z0Q;~D>@SJFo}74VylTJ97GJr~i=N#6Jr~2KJRkZ#-)Gp37ijvN^Fqrite@GLNsFB) zGKQANA+Nxaa*=&dy^wxf`b5dmd<#R1ZF_yt%m7Oai(!T0D ziaZ7|-sR;TnbIn!dtGAo2s&k%!Q|P6SHkqjD!ppMASb=34PI zi|Tj<;Xv|?+Z?WvZ~j%K?qChsDE;uAFOVqCUwQK#3m`lZXE_vivx?!^j-8;k2+1M3&CA>#t@?SY|A@3a zmkL_)_*KK|+<0sb-L-%%XUDgnwV!2*j$)%dA(SO>dNZ*n$2nhGlaw1WmXi5^qb(k8 zo|kNH58ykbZeMV8&+^2>2i8u9hEIXcF=H{~yz=OCQ_G&ffHbk1Fhw6VEI)Nx?pH%qRH}c1XN+ikjzb_xe0bh+TmT@U6^9 z-*-EKx5~HkQ+oxxD#7`=FlI)YP-B=C;1Y6 zvbt+kD1Ms{Lp>f_u{||Dyw+`1h>S zcs{N`oGa;T`!VC&tI&#h|3<%YoSgzO`s6fw?-g}5_a^gkRK3wXOR}<+P1r6&gPM|u zINItcrk3jDbLU4p&lsiKr=;x*W_MVP*Ugvzr;) zy-7a)Ydj3!cR~fF>)Ybv*NL5yEp+y67AFgp(cV$pqcOGW7|LRL~`=+Yw#1Uw> zv()40i_@|%jK;+TPa9-~oYjxlVO3~NY}fo6zfXo>>sgRMmO9QJ(c%&6Q)@b+ww*PH zd8n4OL_TDAQ9^F_NA3^9?D?cGyegtXDKe-xp?rAkHKo8JkFN&zNU1{?DJF)u_bsmA zruG<*lyKA@4c8Skt#a?fVwitQ&TD51x%;p^lntyMSI*%z&1-};v9p)dn-eW%shm%G zA=?+BkYLW~>!Y-i^+I!trXc9eCpy!GHjh-ttG^l}b8-6S?Cm}Bq0AOaQ@pO2FZkBY%%A6F#^E5w2lKTikSQ%V;c}P-++003+O(Ay1 zw;ZSq1!LBJQs`a_&?ePaeWcl>@EW$8ikV$Z3qwWe)9#ghv&0u_|yZ|93(1*OSsqqPy9#UJwwBBIA-)2&ko2c<26 zKTLKv%u0xFM!7x5hJaeD8E;_FN^y<9T)Ll^nD>H>xYs?Mm2Jc$K{uJ6Nn(1S*FRQ< zCdy0Wo)2G8WE3=j74LNCr>MP|ST^8_m;IW^cWOI(e^>Jj#*Yt+@}=Tzqp9^=WDv9zxAsy|Vi0 zXFu+}7}ssD9jUWE?mBhhNea4^pRIQaKI^>A&7K>-TZ&rv(Zk7H9qp$_wT*oOPeLRjVYiqr189R5^|dd7OR?+D)|9GX5|TPjy1Ly}6u4eZ@4^ zh)8R{A;IX1^HX|xnUY*)FIkwaNhahnu`r{$b!X|-cV8lF9%r~9j{ovfM)K&Jr$4AgVu}Cl` z&(tC?{xgQ?7y(mqygO8i`~RN zo4@(-%~bDacC{W4|l?=|E-K>({CtNq%4&%=|APwG~l+9~3V ze`j(|gbXNiB=|D~IMVn_DE#~ToxJ4F`rie#&P{fYPi565|NX^3=kR-q^Xjl$^qb7HSP%peQ9zI`y>|;B zi1aSfdso_B@f^>+Gs;?Z;ppH&^1E*fR3l1RZsY!#i66O@BmQ6+)TdTS;&%p2@4AJZ<2 z$Vte3`=)T1KL6YrS5dRvf9L!4>8^|3xS^g z>Iuwu9IsZ@|NdlUWkv4OV_}IL`{5E{JHDIM%T?e@Fd`ll3BEBMWlc@JdqSScTNkhG?ud&)nn3qoj6L3!bQ&>A)-=s#;lTp>|G;$4Tb%Kl!Y6@kFTBOR-?{;n6N zg25}{I6QWZ{$1sHa1F}UuX#WIKK6_n_{LcLBH7=SO{;NUifh&)NaO#{MH_|PW9(mSTTRaRoC##amaKuS8KepYGDlze(OF@kH<$M?2`lZ~J zr1bM7ymSa$#xH~{$-#{OY=ZjAV)o1O$$-PI#U8btvTxd@HS6`GwgdV115bWsOWyBm z1?IJMVp?u*WH(ZlJMJ@|Hjr;5;kT5Vz4G*qIIXpjKWO=Nrbljzb^HzazRdUf@URv^F%^6i{wu?o_`;(Ib zI_WQ3v)lJ_#2SwJP$EM@@68$}_J*xrzF_B}6m2+iWtX&lytxhzVTg=|!)DMdu&&1H z*VfOq$-;({Y9(FqXQGu;Q1%OVBc7bSX-i9LxS@0DC*$G4kGS{NJ?TT0Y4v8OZGTATyO9?wF>p>OJKlyE^F7I}lo1Jd^kjpn`6yv%sw_a;W#Vq3NP;!4Zh57vLAtG{)se;Ix zMF|TXt&t3?jl{GUSDqj5jrNRphbQfyw$NTKGYdTa{yuEs%BceTz4mC!yxB6i%qH!)()r*wsb>I%8!Q!i*jUQMs~OsG^82$@_lw1L zX8AbzEe*NFtfVnx;yVEy#wt#8cB>;F(#}&2R8a`d?fI?O&9|=(S;l?%5i8eS==hG4 zXR7|r;QKq$R?7qL|3s?g(**;G&;N!%C4LfU7{siAQ`X(N9aH1EIyM)rY5~kdD&WYw zW}}&Eb+d!JVz|`0Mr`hz&gF6cTv6l7uMV@nTu;Ft99qJu_nOGLE9=kSDgTo0Jwvgd zFXb>&ImsDtFg@_WZcyR($xM{kNYCZd+KzR!JX+M$fA^->@um3}mcRqb`7VBa(j~rX z>^XcdHY-B)$X1ZqB+=)ZEQ~n1D6gRUQ#dX68e9}ET)*?_nb^y3AL{)NM;cC#o!HUl zK_47O2h^gG)9tHaET+z_jZ;mbtM!L#rBPz_L+7`8nl7B4p0%@NT5`8&YP*JV zfM#~8-3)iT^;pEHZZRTh)kxmXq00SM8=4e__+G?k{xM;+bRB?C$-)x(E3RcqWA^($b0c%pXV zDDGy9u8;@I?<+>?_9s$b>y+79>OEC`S#I;Jjmt>_-=xm%KPwM|Maj+*4`OetP*Ppc zqesI0l0wQEe%T|PnJ+TGYoU>|kFzowL!%8>4(>R5vNLlwlaO)fQ1^0*1odJ!GIs^V znjt!!>kX%C@bb=ndiQ%oKfM;Bm4!4OXC? zrcyQ!BA6||9e)&jIdt@EJ1sj+NE${g^-<-dSn)D2GEs|saG!3)oI4c-*Pm&qW_F=c ztDZ`Vq!dlKfyCrLpUHVYN>VTpjg<4uzaxF8+_b$(xQ%d*8g@Q32Y?0Ui;dteXe zK##wDnPq|vo~Y+XsN;}?{O3};BCpzuOe1PvrEmF#TW={mO3JV% zu%j-7h46F;aJ~*Fy)6AUoeSw;9O~T;CF4v_(P$1*EZLw>!%_wd;ve-%k3?LwJ&+6M ztxigQVi8rJm9=Iko?;ZZ@7f2kH*8=%*h+J|zdONyay9tjnPEBMH zS<*zKK*F`lzm@L#VcCdU6m z;Y-aQtx6EO?ViAsZKvG8-Z?$T<{)dVcl^_)WJ@g!uTl_d=;F@v9EjAWN@hG|&_p~L zg4^_p-P#_Ce1XMMjeUsfi!cWX7Y=hcL@rNz#N&@}zw~Q1Y5W**H36GyuEKcU`^PL6 zRad09)^Hji&0RF;9S#e<;s1J}suu*~3?Di+lG+#rDt@YxeO0z!Z)jdMKJ&jw51HdE z2?|RQGtr?x10O=q1h)^F6Gvi@2%5->6jIt-&k?G;6dMSYEBSaOrO{W{Q51$k zJjM|SbMcmKZAFe$FXu3(^?~=PcXl>K9{gU+cAQqHm6!3hi&$%nFfW$hY-O-aQLds0 z)lQSGdZ3Nn%8?GO)7NNlg1_j3nJB! z<|uKN#-L7!PO{(^7WmUKQc0tHae0^)DJ`Bc45WeT(@-(BXRy%Zw_&1JhzXTBq&np) zD^$g+SgI(!NUFHpd*WUp8BwadRGw*i6$`F$e+N`jGcYsx+I-1=d5pywIIo=AS$6iC zF+7oPk+7;*AH=bCQ%zkoX{Oz`%@>Yq6nJ{=0zYJ#Au!u1zb7FVVvigq@dIXIks5&+ zSOLzRYV`Y)j)vn3!+?C>)=;Z5GM??D6}uo+syWPqmntGUnqPE5?%*5wfL15DGv5ay z+DnzCbRCD?l|{eK&~o32Y_PP(>=dQ(D?GidDA#f2x@=J2B4Ec0kVk>;W*E{ewRL<@xn)6HK?5-dwOp0}jKaE)pyA!LKGD0izzLr2|!G>EaGZdmp;cU#e+NnaL!qRE^$=X>a zqHRJ3gNkDxgH8W2Lnnkpq?;GvzL+0O?bB|egA61a+lZCtnMJ8y6w%_Z^ zFfrkDWjYkD`*nry?TtElwapK0aQpA{icYfAiZyIYz05zKMN_h9;r5xWeeRjfC8{E*s(#*-ZH)Xp5m?lRR-(8_f)JC(+jhPi9nq7jL- zMhP%nkV8;fkbgv7+kyFK^HMrZ&AWp9&}2&%eJ2?G!w;O~u%V-!Z#vz<33nQaLwNcY z8Dr2>UPGBX>@{(WbR>z;2a)D@?g6hqcRruXv#dYyYZv#?E zE6jM|A#zfEA}r8!L5{hJQNz+jfidc*$ilLD|2+)|vy@aSKD%YZ;zp5h%wym`sna!U z)#v8vnHlP9`NE*y_@Uw{p9$Cr4R9i0E$9OgeGy$oHGbomYnRwPNp#3ht*5bu8+Bj( z3g4@YD$Q#&nTI?Hzt{02D4{9j;U>u|EYk9>DQ`PjswI~9V`!$u-y+JTuj$Oc=4hKPfBdq?Nch7n#MN_?FW0Qq54bxpK$;g;b>#;sM-% z0?dMU6$OVWKOEcD<~Q%U%g17rjRFZzLFopMo;Qp{cKIg%r8E{?(0;NM&$^LTv55w! z`SnMZEqS=cl$Mk)?J3k}>^N1(XJvi;vf-QZQq3~_2RSkos&n z1-msbeBm;?XFwkgL=CrRpkUV1(|ie6?J^L-3*>Xka}?;z^X)zFsKsDan9 z&>mbgN^2@iy@E&+Y8wM6$m3!w?)hQRR){ z^;=IFds6txYj(yoT? zG3Y(_lM4}_*w+}O3a$fvJZ78kB5aDzJXF(=Im&@QuSdm7{L?k03bVqOyw_C}#K?e7 z3Z2eDzWgo*W>u*{1Sv-nqi~fSazk2!6poa7Yf8r+@Y3uGwq#^;$CBvobt+tNZ)ifZ z-sM?4dVQS(0OR>Bd>eZl%-0WFM%0dalBpoJk{q0SBX`^oGspt$& z?08+LRBgL`U$LhA$0&5C0xJJfJmkg~!AEiBZi8s^mMh+}cC6Y|QiC$As7ojflt|Gf z=MV%ZhP(E4z`hz%WeM94?Pl(~H1vYOv1d-PWPxY!HE|Y{AL{U0E2~H#iKdy1Wi;9) zPW?m%R9l;7kXpUPhMb^7}a?3MEnrE;&LGi)8ZoKVjKfM8!qTP zmXqjr(00hKV#QbKcFu>9g1s=}6~;NrULpizI`!fke#GLZIc|xJA9v`|!kI$P-`TSmgr_stQUbJhvKE0v5y4sgRs;LNqV_!C?-LUro9MMUjG^fh_O zcrsIOMV@e^88|3JVq|bj`)E{U=X>h_JW-Tzl>No&VUgMd`WW;EC9ZiQDK!%PYy(-` z_=B}c#u6vclN6w?_R?8lTl~WL@W})zWy5n^G_}~On7pI8(mS49Jp%9vV zNgCHU$CSpiZ4`*=ROksXRi&*{oK;0meF(p*L>qt&4`o>-vyb20yDq^49N#i*Z@BXZ zsUUq5Lt*@d9=@)`1=UPhLlz#o9O+g84)rMYS?{)pVHpNe%v~tEk5ogvyb3hK5}aCf z;!`aDS^w=9TBZtA0c$bnMmm+pN^8~EcUX>^VC(gSBU{%{O(!BtPYSl7hMiv_&Xhti zYe+*RA_QTAVwB9TL5C<~d7SoFu7oo?!7d>}iLl@sd;nGZ4qN`EsTxGe;iSM5?A`D& zm9GTiBq0o)<)*jd(lH6~F8KL5t&2a3Ep=i{{YVKso3+cnYu}K3iFawdTDV+Ea+BhA z#aQAO&)RF}sl=GFjO2}4VZos!<}R&nIx<}B7R={r#j}JF+8u9Mb>QJpo=_1}^JxAn zNN2Gq(Wnbi!eqo|YBw|;0rqn)_AxgO@wwH)C2YJy1{aVarWylUSKUU9*OMyw=E=nn zWpji{BX9RnGdg1uUot}!!B{KU+X;EY-+Do|TEK)*>9cIvN`p#2cAK-?SIvP(YSR9b zh9>G-a2Lzn!e@!`SDS-Aq)VnoK%|`eA-tJi&v9K%AzBt-w<^?z@GIC+*>S(x%Ky9& zZSca+NV!OSLnP?}rn|w*ml!#tB-jPL&?-k4F20^MT}L+qgBy?7b>5W;T4tjDYv4Z(|DV>YebW31-68;_9DhZyGU32Yii zWvAJG@OdMv%MK}NWLOf6uWo#sT-&OF(+d@jEuX$>C0C|d$la<1X&`3izphh$d7kop~YdEc;GT?O6VIX(GA6-Vs*6bS5he7{_YWH zA2g0GTtg%z7?YZA&9lV?m#IaXLwkwU7J?I+gyHe$VC{5dS1&dd1vz(~M}~VBxMr)i zW|kJvL30TR_&d6R?Qg*lQ~TOoM6Y-DFHvj09`dbclCir{yU~+$yB)Gg&&SIJ!; zyjWAPujjK@w!hS~_(PRuRmX{9&I9&kww}95k7ATA7snZzDfEry>3~Set}^mUIz1r0 zX)fikH`LCC3d?zcZ|erxz@?%y&;-Asi^`p)v6-h;^XbdBXPjFvCQu-F;t}~T8IOO= zp5+i4a*+vwjasM0WiRk7qpN>RH6|#Rk(6`aC2l1yXVw!Kazse<%RmAP)W4Felk!7S z4~|;hVAEHd92*DBxiB;ti)F4{cFpr_))$`{4@Gu)6W*oSZ1mp zCh;`cnVjjGG92x+-l{F6d@X#WwZ_;&ZX{fV-&~~VV_d}o57h-VGBS;<4dWf&%RPC> zmGe_oh5L^XZ$fT%Xkk)_sXW%|JLI13d_1SYv|d+j3d>>xsUla+ut$dlYCkYsRI+Jx zJ@y&%7PF36>dgUPY}4il_NsdLfI1&^gRRsGd!SyRlQ3Aa$tO#*?&Rp2awt=s@Zm{F zRlkOXiKT?*ZK7piJJR2XG>vyqJ4uIy@LOpXLPIHay7Q!=RQPQt_p-k$dNF6V{0*{Mbp_1_UGH7ynGcwoVy9Ga*vCA zB9F{Z(2n#xQ!KzskFd8_VZm@L3MBlAoz9Sms+vfpAo4TJGXkL~eFHqk9Pkf_p;rkO zfD=-Ak%-xW-qkDp?p+-&&!6%{26KKO0ezn385L7W2{V4-?tkm6rjtP?^g;Tloz zsQyxE53t}TI+#4xzf_tM21FKwnv(241Y-~aMh-60EUw+B|CcEHi&rE5{w53xmKJ7_ z{9W_!C#TBbvQ{Dg&sXva1@~MT!f8DfDgN~2+@0Z)do98FWq)@Bfgz!!!W98(ZM5WG z)FJAP*0XZgpPxvbRk9NQ?&}mU!n=8jKqcI9s*|*CS|2QYsQ|E{TUiV6u< z19SF^Hu2-mE!2B+sZP~xC!kVQwNo^3S(X^n^{ZAFJ%;3OK;|0cLLH)k7}V z`!N7ct{{N0o}f*$!TvQJ1XP?CVFOUhyVlls0KgBZYL8oQud=3Iyr1~NX;QI$ZSlh2 z{lI1v(UJ9w7*;yJ(o#ykr;+*j-BK&(F^) z63=MOjizB3$nbRFKwhhDDpN8ytqta0@6eW8y=c&>!Tax~VPLHfF*p?bshF9gV7hna zVSGxF=j!d6li$k@BW3qRyTqA^KPrEhOLrM9ci1F)pZAwdCqWX$vWe{aqa_Sbe*YTP-_69L$OyKJIO)K08_gEk4HoP*Q;8V@1GiX67p42x zz)^tmC0rL^WF{&^9q8*dLEq@>e=nVr`}z2~vF~e@e@rzbv@w=b&Ul1nVy#X!$Y)S> z;4u$nnm^C4gLLn~O=-GTQto-MXQun$>Ba5<@cHKZ+mZ$7=8%@xg8!Y92*HAb#3O^Z zC;Zlj2?(S;xh*S(t2HS$-iWVo;?EaD!XT0sQuagUZgXG62L$S~Qzh-5W+yVXoNj#< zu4fHOrZ2>)qzH{pOxque7r}m7Tr$ZdQdRKESg} za!{rO_w&<}y>7Ynp$Iy@6$1C0yV_rzzz7H7k+em}>p$%Q69}nTNn`z6giVW2fFX=F z06AHG!|mPfb-y{V`(KjKs4htJ)b5~}SMELVB(9b5?U*ar1YT8+UugFQ-TlB$80Aku zZ_zE=KZv}Lr~4|ThKo}pEW`he=&V8)i1M@Naux8h5i%jNhpF1VZ^pv@v)dkM0BL=(vI#g35;+xSwDIM z0051X1X8^Qkd?-`R*)sCl9(K8R;3Is>Lp5p{(3E!Sc9El}`CFtxQjv|1G*$0ieS>|B z`QhH`n#r%JP+H@6O{~^0rekkqp?KQhK>A{5H<0hrA+*I+?DF&YheDPiZ#FMN{OfCV zT9)&T+Xu(}xz|^N$+?^bg>~73=Qq>+H``bWI17H99@YMuOMUNuxF@QhM9&F4BZc+f zr@cUej-K>gK5yWi1Ef!SqfLzgr^owgJZX1q`*U#A>{9LxrX_bH{kJ{}7C0)ML~mT#2uo( zsZPE)_w}}+VoQgeg&4KWu}W?tmaSF>46u zcAoQoeaErtjO{WWRx)L2JpWXpNB$cqMiwYJcUOkL`EYDdV%S~;{9Yu~8FL)jvzCL{ z@yD=GUD}tA_vwi0Q-#u{TxT_Y9KsW~V&&F4dBolFpt+G1TR@#tnj&detr7Zbd%ot# z7W?v-XzOy7^E7tfM2$u&l@Xf@a#5ZJF?YwUYj3!TD49#jF9T}IPpD;)R0VGAJsq|- zq^#)wd&h+z+QDnV;4G#`e6$uqCF;CRO9veysns!6iQhL*AI#EZP*+5F1UWPxeypK3 zasl;$OtTViGsf-T+U5Z^NVN)-KAzo|=7&CuHPjk3L!O`@ynJw=w!sOexhE>@d{ypt zYm`7EWaOSNb-s8WogEyeSaP96XPnAI0t8Zp2l^B0Btr*oaQ5%Rn}!3q*R2Zyy#7)t zl&osK9w;@covk}Vfc?nm2sB^Xv^SO^i?w#kXP>xU5PGp~*{I6(=ZpAdz?D}m5Nmv| zdxJfIMWxU~>v@7q8V|{v<|x?xJ8Li8`_#(xyv5C%t2Ja_)Td|u^onMupW&0lsnN#< z$^;7zZVrV^*N8N#bGYE;mc5|AN8E&%!;S})9%8H3aa{J9EX3ub+C6ZU2i7ioeie@6E16NEt{#*H zRdZ|=!_)O+vjRNdRQM5^ZC*&8;Rjw@3Z7rYJCLo5$>NqPkFRUH^irSw!aQ2Jcou~H z#3)P#1#$9#4bMC8X9pVpR&ztJqRv(=qmy9Of7b zLN8+khB?wTH0@06{Ot@6V#y$A;bN>_~!RCDNFdXKMCHyXYmB81zBSKT;EwJu9 zq~JF1AWb>#eh*n_pusZ{fNJh4l`$&P(|Ye27K}!bJQym%mHY16lD}nHssm!Rsz<}F zvRR(7_eQ>1o+0!V)Ag@4v@$%2zK>{r=KYibqzjiMDU;(Et@*AzV#KR+`LqJ3nS+VW z&4Y|+>?pN?wnyE zCBp3glWQV=#d^tSJkv#ale&NOE7RnwNFrz@9kIcc+avY;JLa!YX&! zFyf^#5*FGy&7(l%BpOTR+A^G>iO0ypTCX56Q1fj4XAp80Nw{Ofv8Pv=$fQGdy6-!! zC$Gq;cByipIy?rCIDE0UnS|jI*4^$|Euf5X020Q>eRMOdXcKlMiD$vV_wom_W941L z--r-MQp+1zS0A3-nyb{Ztd-psrJ}}RNF8dmA5d)n+aO>z=pr7XQ5?Y4s=E(AlQ}st zMHw-`!~}16fdnr)0ug)RB_%=10v4}saNoE@5E=%kl@zHD=mW&PMveqnupwefF7h!E z@2vt*EE==~2?w_hN9yHd$4+sHb z9*+_@|0G74^T8u*FYt}q>$Th3EI!T$(+@|>?te4RV%%kTcO#cR>Ql8EjD!+o&S17! z{1XcBAOS2MR0kFUS-SuB98Id`1XW|FGPX%rQ6u7f>Y*~NLchb126yA{LV7Z+?y8lp6q5i~M{7-!-DdpfMaaXme#cEs4>zImmdkEsCW^ zTsj^~Dyv^<6x9R3o(%<+shc@=T(Hei7W)Ut{x05I*kZ>^=CbWdH?q*-B8t*!RX#E*yzdjtgfa0yYV---kD75?(?K< z$rxB@_EM4JFm$ZZSL?5;MQftFiUie%R0d$V^Ei%hq2A3K)sf!#o5>rKIL!- z#h4n%N^VrqYVDNAydhg=UuxjNz<6>PM(z2zt_0y0z5b z>Dk$&5j%z-cbXY-jo~o}m$Nd1`zEu45 zDrfj@K)B@;hbUnjm|`tk5rVSJ`bkwC#gPnx+U{X@_`>WW%!8aWOj#z)7&TwWdb|VW z|0j^XWu9fWMM}`~AkLE95|cW#Dr0(ns+!Z^{eESQnI&0rooPu?#V%5&oHgyNu2`P{Td2->`9z$6BL=AW81vg`58!{ zlUjaSg^yGO3$0xOdEKN-6Aad|!Hr98<;!_`<%gGFhGEBRAB&8XR4`rI3;AlJoo+K)eFfL2sp&Q8^cl=ohZFd$GSneWL zv1;{gG};g#I5F(f-RXfRK3@FrW~kXKEa{zMi!h%s6Pc9oPXnM88*~x>$azQM+Ml-a z254LOnYzmqL>BNwZ8IgMrK|HV13hof@K6^C4-}d?F@Da9|EWaIdG{fwXENx$J}nIv zu50T9hRl#|sP10fs&P%L{pedmCyH5*Hkuqe)ho|vvmEuWz=8nE`6ZcLm=6vxmgRR0 z$|TO|B2m8r4p--OEt}?@Kowpm7ckc)J?XA)Z{pI%^gQ^TFAo6@>BJBK$o34f=V%ml z0^f=w5W!jtI@c?3Yqz0Nf`me!oHG~!$cjBEIAS%XY7fb|AD|`3)ay1#gIBk+N1p1W z7jp)7Z@#I!A*o-#UmB*^cJ}7bQq@O*d@8v*U){g15BO45UGOx?ztv-)(U3GTb_V0> zKLZc(%;4r(qYoIAQK0OzWbCncOk6I-0h2TfL>jQa|H<^V9Lfl3$4XOGYi4xFDoZG>`nAvMY*s&)Rs zLi8Vked@<8&nJamMb5SdN3;TzX_)6B$IK({q<0$tAE^0F3d+(ON_Q^{YLk5c>Y`Ax z>n0QzK`97TkQYBUPla;^xI$&7DxX@kGcFY5k7s<88#LyXkA?Jjt-?HB{o^zLj$J4W zzYh;XqZEkdu`d<)j3EeJ4s`hJ*t`i3MtcG<)xzS}frJvFOev3lvbhAtG=> zMfCLu&=<=bu9Om#?y*9Zpv|^#3?eEAiptT)!q@XbF+{o+!G-z<)XF6cT-a{zE-wN* z)=@CvewKWk864{Edb=WO`@tM~vCKJ6SCk)!#qUAD^#s7(2tZhu>?wFz&R|id;1;b>$ptLXNWuh#_*jVNGckNqwa4-b zp&9piC_V0;l;%AXx%YdyaFkFwfH+cQ(e{Xxnq`sipGT}BsIpnF^L7I90Fg&r5tW7< z$|E7CmC#M9o>2ttqUTgF`ACfPgbk<+v*ex{@si9Cwxu>=(U&J42@a9I<-Cp88$5cd zlCKt`;JZ@ReZq=3Y2QO-E0&JVz_jfm`*vR_S|1n4l0=5rF zg-s=9Fpv$=h}CX{FoAiJ?=rR%wN+Xsc=@1>!l5hnBjr%Bo)PKs~0=Pzu=H$K4Pls8| z9^iln7kvQpN%QIN?^KM;eoTrfZQgVg&(7`Op@TjLz*OiLtG(9OqV;MGxp0taX8>)C z0zvbXts9~D2>X)gxIO09a}Q>_6pkPOf*^#m;&|b_bf?RO4Pzg#^ou1+Ii%pYIb2%l z2C{XypOweosSE%d!yc%R*z~Nxk(7W2&JbXB7c5cVqws|NN4r<+!CDD}Qk7GbD{c3A z;Jm#V@x1`}SOFtXF_e96EM_YjVb4r`Kl(5Pocyw$AHFH8Y(9S@gYgjfUYY^L>%>Q+ z#VSa1JQn)Gy-!m#bE;x8;4=@L(B~Swv;>H1x0b4JGZ=0$Pm}X1Cv;&2l{1vfL&dL6 zoSR5#$Ri#C?fJJ8Fvuond6Soz1RM*HMaqY3n(#T zF*e`dJ(2Sj*N?cG5v~t}?e9V5Jy2jGSHT>w=n0@*t9Hyg7779_h+A-fjffz`ewQYu zp(uYIIIzc`vAP^UGw;&M+i|Y@xkSftU}rp!Pe^ERqZI)QB#fIhh6uHV#6cJoZZ#7I z6G}nz^66(6_O2>DnRgyq{^RSP-h}|YAJp(c-FW(FJo|;~Bma|M^Ns<^2;Cg2ACEQ2 zBM~q;;Cju9A*bIiUj;%{VmcErT~M!eUIsv+t9S-&=hOMa-HjI7I9Z{1s?*G#%OUU}42}w_Fd{;pN!tS><0q zVPH>8PUD_H4C3(hekCJN+N6O=u{FN~(DAzi(FVtZ%hx+F`jx=MbSpc|>h}LD+#c}2 zeRmdcK-Hgb;_>}v>`&|KVHM97g7+u<#Cj}T2%ObtK0yNxKBB@^!d!2g3GhqA;qD%K2a#qMzsw z5`PxT6;8=?RwhoAE9|*J@(XsOlhhbSBOA6+1jSBmL@YwU`w!j^#et+g}vy z4){E)^z5@LEpdKE^>X)$qWY4uPwBy(Dco~X`=_<}49bRUEE5K~=H6LEB+*U(d%q!`h<(5M(-tuI!&NgB7Xvt?Z?bs;C?{@x8r*k70wt2%fNBKUX zE^mgRF%*y5ia%z@bJ&xqKzt(nwj%)K-$o<9fss4j`Q`x1y*~qRjHDXm@=f$S@!ZfX z8jK811Z(KBq}G$JkVg0ME!Ph^UP#>avf6<$VTyQCK&_@}N!kqLKhM)?ERI2zB8&}8 zecmX5GBB1OW&E^C>p(>HGH{<2kaS^(uoInA;Llk^8{*dP1xs812ZC(*R2=ho>i+2?D4=ZlA#TeaKdvGgKzsfoL&2r=m$Zi==$*Rc&G zzuwp>)rHZDLOGLH##OGKE!5Hk8aH9#hr1b%GNh+3h=}-Pz|Pf>8&J;AtQsW=Gmkz6 zRe;MGji9EJe^{lh5H3>eF+LKvPHR~E%0<|!%1k41eUJi_+E z@yj(OLHN*^8!t8s133aYD4Z{NT_0xlf;OJK7r$<4+lvUp^cofLktXhDfMLw%;_*wW z&EUrxNZD~Ak{LoBYwS$AH}jZ|Y?j59V`c(>gJi4zBk5#n(MD2S1bH&TDH5T<3`M^6 z&fnV7-;{3FbDQ5sFl&8xG|dUY1wF_X205t?5<`MuXhVHpHBG3A~Xm(i@spr ztx>ovE+`b#mRv)29Tczl^XK(ma?xP&4(A}BsC%VbwS z0jog}fhxAsAaE$a;q6wKCd(h9sgIE9%V?GK8DST2kj*Q{!JwJFBl{D+*;MT3%>5#Eq7#(B3H#ikYy>wonM z!h0&9-~vcQD#{mqe0ahJPiJWU)Fnt)9=GgCzwzgTtHpBM|Ir>(3g%ub!|Jc4N_igd z4%Uoe|84h42&$GQ?$?GF;S4y;?ZK7dQj_UfPjnzzgmF*-$wnU z!hHo(t$86NbS!-888Uk{%m9hGX%iQtKPNw(*@ z+_D;hv91EPrvmVqRhD*_#3O}>W80s=g1tfBLx@=btr&keIKEbAsu2^=pmCU}D|0B! zQ&{(dG1@^e@_Sd(wjahO+9${lvms_k3{Fg84Gx;9h(@YWQQy-iI) z^zHB|YZ40V+-&34&(H4xfLad7SA&)K6m_KNifaZObG zIWACHM_f1nu)Rs}EYM+8J|@9`qnD8kc31oVxEshVfBRD)P*{n*F;?$iL&X{`ua@wQ zUdR}9!I7v8>Z`mti(G`R(s%MtR~Z(G9s;io828^F-vd-)4FkRWA(46uVLlu*T`pgz zo+$DVx;+?~=2*<+yHy0i}fkmfhy zkIxwZGG?Lq3*nU!Rm(plk3K(V$TfdClBiK!hjzIkKeb)0N7(eyzU2wHFH85HDH2%enTl=$wIIb!)|IP!q#K0pvUaUZ%iegL-4^i}*?= zpir6jy*U!t@E7Dh*nuoQS6L`cC?KKRV&%6w6>O1H@?f0UF&*E;pd*h1%u5-evwOrM zmd+ZQZOTsyd`>f14^Y6r2sywk0_zeCa2Lrz;~%aR47oT6E=Zk#j}}zw0hY!1&D}@@ z1o-$xT6|C-Y{7q~}pytW7@6>I~A`x-A?v}%_ASuEx( z_S@TQ_mo?sE#^#t|G0JBQlMf6&ln=f6>0l}3bz3a_%>*vSHL)WR84r(he6Y2eO5w# zAT&rWuEdgX6*hj^+2eN4hZ z0m_@`5r@L?CIa&{JqsqEYhJ!6M>w_e?yrtjee>sk9^BY%R$uO5rf6OB^oV1L=80lp zGE?60#eV-a~1y88PYs%f$1YfP$M3(!%zngmcrs7_rE<8B}^D7eUQe!Z&!)#}Q zp2tVR@Wr(xK^@QZaG?0;JdQR%C-OfJ!~85(z)rsEqOM`%2Hl}7aLEukED(*f3o=ts zff+-8K>GXX(a62E@p6Y$kWJgtlF@bpA_N(Y@#ALT^ej|Y zFAX|h7F1BeYFr=*R-L?^aQ}7JQ)|$n)YQmZcsj3SKg3s;>MOqZKPq$QrPI?76XP?` z(z!b9_(Pjd4E`OyDK9~bIh(DLA7}?0m>6=h^C(T0o74%CTH6O}nl> zvi8P}IRDGZriQen>)NU zAMf)h=a0e8+r$yTLv_R4Uf;x{>|94Cbyhvf7%BvzM-fX#l-2p6+1Hk9j)^8{S2}ibx zBGAzNZQB2LxQ#?-gwqM0(W`;cLwKrzBn-S=<+H>{{12q25=s`*43WzO@0D5e`krT? zbEekU69y_nM2J%2BTxyzLO~_49fq#l9k}BF`niFIz(B&wO$bWOJs>Ts;Z=F@zP4hF z!j=5l`8!tkK0Pje#B|PzY-vfgj}Q&+>nZ2ZA(xd3t_zjw9le^GeS{WWCPZgoJHlLmVwU%qd z_aKBbC=@%RBSFr02Ka5wyH*1ek0)|$E`3>=j+=9(Hod$ZzqS5{tjYs-Mm_+hEt(kg z?P-MU>^6|)41u?ptgcXKE5!fr%BemBWGfk0Xs} z>Bz*DnHH!3nnPf~Np zz+4z-zO4?K4k+g_EO_c%?x@nIml?_V$rZc~}d(mbCCFe43n3k*?h4V%3s) z84*Lw0OM3^oKHdol;@qdR178j0SOLz?w&6xZC2u)pXsyo!@$PFV7j;ed)@(W{|^IP zHHf(KMYdy0m7U)39ZVRvAi~WfzMmH*_ULx2(vFqV_HszRso4Le00_eq9XDbFDzYmA zv-1_|>wwPXe8{+<7+5qT*VpIwAMt&7`rmfwz_Vzu`!mlm9R6^>E?IW5*I?s54t;md zM9Xas*X$eX9~hat$rsv3QwmquQd$ZN!dli+b5C9{$zokA4;`I ziqp>`Xd&F)=XmjiDk2QOtgnsxNkCRMz{c9x4@iB@@40;AvP*^=&?^2<^F&S491#G#>&1QI-mox!D)1&3%4^+iq^5;J&i6K*};kEL6;Q=-!|g;-9JV-wp+Yru@FZJO6^zXYbo8?Il3i#*DW{4E(u=#`t#swFpn?3| zJ501n+wSM7qxd?Id6mILGWWll_h8f6vw!!-H|UFD2;lV!NPA6h<=6D&fbm_ZmB1%^ z!HQ@b5`Z{eLJyP&&VYN^rIjQtIzq*pb|kP!i25+(S1FsXa1jO-foJH+>A2^exD#Gz z*%G%(r5-x}R+bI1j{)5DX_ZE7!9P0?L)vdOz{`)iatVBX0v3uvbj#EUQjJ6*w$M?o z137sZYQ2K1-+H~58z`#y<5R>;KFgD^lOtcJpG)K8h}9IU2UA#I6oa|Tt`^dZpt_$b z{G!1kt(28=DDg3pH~S7=ZQBKKV==&*f5HVhjveyVx=Q>u5>-yJ!my&o@a4_XNkzJ- zGJ;XCv-ra)S(w>Yzn$2(34}rO!eFq8`EYny>^D7GE2rlzu4`WsB!6<)SV40tE-xu% z#QW%;mthMZZ0Q!-!V|2@5d6s84Hyj$wuJ_L$^6p?{o zT7cm6Cx-k!pL@qbw`zBH{50i{xDK5?rcUB1^udk6D&CR*!1fM$vFNu=(ksFb3ezN# z(YE4Y)b(L+mY1UvlKAt2g76nq4X+X>{l1U-%qQW8PTMspvwtzpifl$sR4m%D<%*0w z^^PkLIEP8;ieg-PB!8gzC`JnL&Im`*?m;JBjGCf9QRk-N9ymo z3jClS?Dq}7e;loBs8!a?Lz@Xo(rPMbg8hSQM-}^@|kC}(6tJzJ}SDsyDMl?Wsf&1 zzA~+|R}nxN;6p~o5Y1k@`8j6sODN2&G`W~#Ex+Um&9NEmeg&$VZW=x(6NJ|bV+{!Ru`nPNU?BwA6MYr+-BX73|r~V?thiO|S zegK{Cz^}+I-AP-b{2V4Y^rPf+p#*KN6In0iXN8m_HA#%*Vnt_<)dKa{;!hAl`Ms!S zZUV``8ydq(s(cQmw6tRS6ZE) z;%6XHL9Lm)Cip3KZUPpLqbX(rTCW=j5lRj{ukL-wMCdEN7C{3nSvG{wLzpP8^SaB# zYTuKEycSD=hJK+H#5OP^jJ3r%G00gXjSJZ`&U;~J(s7~!`-AIqFBi6zy|?cyRU5rO zYQYWk7DM3QT}Zn4b9}-3O)=U1#YJKkUDHo5lMB86P?c=h{Io8;e9|o8*Tv?5(?ril z{+8w>PfQxle*U7W6|ybxFqHUK0Pwlj`huq7Yk^NL&!VtU_F*12#@ov5HwQu{fizkI zk6*5?_S;ez5%D#7>INJ|L5$M&i?M(U#fk*eH_zXaaXB~NxHDGqikJG!KDcuc15`y< z*&ycijqgL{`7!uFd_8O6k*}bu@>(4Eb(wvtAkYQ(n!zY&ig~stdnt?TSN&66&QA9H zcS27YqVbU|B@-7boEwe|OHPl!bpAFnaj1%IpKjJxC`;jeNbfM9B)`FPmx=t`T3-S- zBx`{#Qm&EE3NA;A`@F_uDufzw*VWC^jeFief!vaGr6p~l+A0TL&A!DVEt zHB#xh00rR-#KIO9B>tQOIstJbA~U{<;)aV=-i0v)bsUc`tK&WdJ3=9)StAi2BKXiK zIM9hb4`ZxwRRXw5K~C(UzsR&5LxU)~kd`GE>(m!H{(yzj4J1+KlTARzmg`E>-4H;5 zma(Dlfi;4d_3&itx&Lv6a5U8`g8=u`0O8yl#)MUU&Fam7t=?j{f`ONg-dK|Q(0heR zM1t{c5j}H+Wv}+~90c$M2!`&HLGQl+?;-Z?H;4DaE_^BPqVROlSK|=*zQpF?p*u*m z_+=uk=y4<-?=z$pr{;0C-4l$)ay8U8TDSk99Vf zc4d#Ai(6l)tkHkw*70``B_Y%_P+}mTJn(#EXzm6zn-#t5OApmX#E}I4c{xlq#YUK< zZVyRV{7JpBIZCzVIC=BK6OD#@i+%n#zrV}VF{fj+qGvOIEkn5%w&Oz`lU`~<+Z`WJ zOla86RQL^0&>%FYQ*YIUM)3Wm4?ct&)pM{~(JvEsXezi88jl9RO!=rKAI*-UrPWib zz!@~1g(mn~Tv8}p!XQj^Ev`0reX?L%q`Z4_flYkW_JPvo=%ekj6Sqy7tG_8`I+?~z z(ZzmO`TyQH77B5BVo(^m=nEe-z6#~duxrJreT|>P*xi(XxJO{s$8g%s5A-N84$rdT z#r=qVy#DL@cq2gn2O6REoF8(2J{p2YGaN48iJ)Vk{E0sP$h*;Y`GW?#yp7yW2Gbma z`1qE|A;Wx=y?Ds>cc+iCL9am+rs4zjEc?T1uRd}i^C@B2IaA4IW`Ccr93NxduNe+D z$6_F(XmTpE9&zsP0wkPuu|3UxK?CYEtug3sb0T!~{O9al-cj_$#ytwCX+d5++qJ z%{bBm#-LS6p}>}X?7(NIoR3Ne+xjE4r`Yz2#BST$xldroYpVj}tpXT4A!-?tZ6Zb$ zvQkCEy0_}$6Euug2FgSq{Hu=P9k~N#?!hJ!CJr!1w+gV#Po{!d6$hd zi84?;GPa6ieVj;{LJcD=C{WB`X+HF`HY@@^fz{AAg6beS2EU9YjWt2io`rZd+7e#1 zaEwQKm@0H?&}&E$5zctDyLl1@6(GTuJ3{a%VNBR?Mjf}_UFfhj3q5aeH~f8v_}hqV1IAQkmK6fo$_xw*R|ZtVg5 z1no?ouy4?$LgaY#D{HY9-C+Ep?1lJs_*NvmVf~F#KR}sU6%(axfO+))p?!M(c^0T@ zb8t5fmTf>t*m&nm%2c$%WJO=J(otAAR;`pKMc!47=um0<>8kiE0d+8kLMYl_I1twse`L969yGK5~R(5U8-sd{W z)7Um}7j*_?@Cmd+qJo}6XLv*#A@o0byMSBQMSTzMZRZ8A)i8YIxx3Xx!^Ku1K)rg) z_3$h-bHR$@Q;V==7I{DZ&pYxE8&Vhd3C#9*j7VbgoCHcsOPAU@Iu0ZyC!d=*21;2c z2%A7Z)Y^ez3pAD2*|rhS2bIqQeXT`5UU+GB-81WBST0Wn${PFE%F+`p7v`lvz+&Q+G|SIT;aV) zm;3F&^>ZltVb=uKVYX9<@Oes>qvmx1HGcphY0;)elmG}JUZ^t*-@lgkUAz(ZE&lg! zWth%|Rn^~)LXCvu4d~ioeV}IO&()v?$YE(!PpQpW^_n3$v;Ow>T8~9p15C`|9f=na zR5?g_oq8L|HL;{hvt}=bVYB*9-9B!fY-7au`LN6nh@G!9)FMORy&DF(`ne|DCoy=L z=RY-$&pMJ$NsN)Xo&u9a2EC;}fPw>=n?bpoBs92{l$@scZKj-As<_1!K4!>orwpJc zlNV+tf6E^y|2ftw1JD{za~iG_N8COKX;EAqDzVYLE_^xB>|&WqOwHyuqkHRXzXg;A z$6J4%SW*5yX>diA6{#gnVwR9=3a746;E>OdgNAC&S>#ok3va7a_7w;dxrmNyLdNFVkI&HNawGI zG*8Q~gyvrhWa8uItqEdclPTf7^QD5t%(XE!e|~s=-~C17o~dU8cMngA*;alvJ0#^= zHn=)(uyMmDRkMrMjd@OynRWX7AD=1=ufL!26KogkooWz$FXcYIByv&u~^Q$rm zpGvod5oYwatVZ_)$T12XtG?mR+t7Uf3*Zx)?1Z#x5R=IY^QqtS=#!FJZR0P53~4AV z&>R9f9cqAJtQ|5u!t?#;zja009a|I2rItF}NLX1td@NwqXzTZ^S4lnnfU~&Fw;b(X z5*Gh)0bcRbcppx)AAY<&?0xvxMFHIl5Bsh`!@5rcK!I)J!f`h?rR_89XS>g6^3l9q zFgRR<4H?a69bA{#1#IpH<6Ld(r4PsX;m)Xg4_ML59$KcQU#(Z=I;g&n-g^MFlZVbP zE{#p2x!*_euVXb;HcDE~zMvp;0gif+Z|;|yPz7Q)^Z+ONSMCTlPApBae8Gl{45@O* zM-No_T=uD!8lw+TC4%TevAC;#;Ovv)$E`72G8REc+CaWOP0 zlUEbX{p_n%;-f@vCQ@MPz*Iw0?@qmC;tNn~ao^FYw?1|MKFC@tZ5Syfc|pK3#J>1qWu)(x1s7RVnMTlZ&4YbHF_foWfQ~!DfUjIvqUMOG z!C9G>2Z5=$Rjro#+__R~J{o^=`F2R#T2eU&12V87%%mBgTfg6+4$uicPKLuiF@aGE zKP0dnYAj-ZXW2K1 zyRPqqA6mH*K+t`ej=^ezqwk=j){>boB#}Sr2ePiuJzcz@jwOzD+?iF{b=(;7j6;$U z5aTJa1r>w5_Ylmnb!PM)Del3edYQv2^Qq{fR5DUa=Dt872os{z;K1_}P+@qs9x^J6 z&xZSqjS(Awf+dp}7lr6LD>kfoa=@YJz4YaI3^rt2l4_L&ig-14dMhFZW%bW6d|lMn z^M&nJhzt=&C*EuFcrlO}n{!=LQ*Ug$+xt&IN5#MtY#^gvd4fALR zW?6yM_%jU4oQUBDN(%?5`^sy6^y(G`mk0zPTrg;OjS#}`L7tiX;B#GuW!b(c*;=6Kw7gqi2Utnm#)^k<~^3O8{{e?BC zgk$iy4x)QOLu8!JT2v%q^as7$deAuzSWGXs;0iAeRcHRvC(pBC_PRtWshhnwo?i}C zc`cXTz7PX%@K>m=|JRxm5~(`hmF6pM4Gs0}x)GdLB4bRGU=V|z)k@ES(FBpdnC=Om zVE1fj8d^nT*(Ld$ZpyAkdL{}+A?C8je6aiDV|W$zumLaVXylAls*Uz`io|Oojz*?{ zh{<4sU9DCz;hhvzp~i2{zt2R?G+b)6r;yes=%KfPn^c4|{1SYc_yKC$!O;E^95g}A zD0KgWMySfya2L9;@*v7jn^Ab|C*SFQ^b`9&|HW&BCCUSpzF-)3F==k1<)7SEA$szsri)U z3P|o1Y+5ceUyIb~5zK%~xW<&^2V7+2AB}8PJb!Pa&O4Zln$r;PIivSbfZ6>RAgs7R z$65;*D9|ZqKxY}rR2*ngv~gG-QNVz|h>j6BuzPZ|-I0=>BJbEs@WdfVi^4Uwu691L6{Z>;0KTS0AL2JmRSA_-2KZtb~Ydwx6iw|?(O>&gE#$|Y*K)O%#IGokYF0?1^9 zfjp{*dvrfy0>SFaIRsy=xg6R6an=^PCok#%r0am**1Pj?#Q?;ZdlKBxVeg%JGQ}qA zX9A*Hl)?ESy8PQ!QTWX*-#-w|N&tO+Y>vw~t={PhZWiMVgT5TZ{*=>O8BgyAQ&@_;)1@r2K! z3sA?)hPUIm6^0Iv0cXTJt7Zt0MMP3ZUJD0KszK}J0;9wJ5}Vd9{0yIkp>tfBxO*3^ z(cVIV&9*y;Y^d`j;HZ*8eVIwjA;a8IloOGj2cF~B$Wm@QKmwVWD-a(C6}RSXfSKfB zNQo-K1)#u9W;JW~TB5D|e`*@2SPgS7LE4s8E}lT9LtJs8y!5A@g!+$qeHuJ>-eP){ zxkq-ElP$I_;Z`ED<5GF$5c0oX-7+J*Lcnh+hkAby+8!4;M8#br5#qd0)xyhRbEif~ zUCdsppWV}rI2y&4T7g&5Is}Aa#95~a1=Tde6m>1pB<~_PU^&0f4Fi&NgN)M(H1K603u!i=MqV2}EO&VK3!oeM zdiZ}xxI@e;Y(=>d16bi~{}-NO1{FErkR7Ye4(42=(5Uo7Rqihwk(%WWPAmfmGjqyQ zKT7k`ADk_gf>2ck-T%0YZU7>HKx0{oikc^O%I{UvZW__x5bfY|+I5Xfla7xw?Gbn^ zZt=SL>gmN8;3#FjyP;C|wDr$gaFKQGZ!gy~gPYBVG`N){$IM84{w&VabeWpd%hco< zT*V0aS9WZMT3EiGj(k-GlOVS?|Ei$dSAk?^<0PR6Mo}l5z1ac5g8U7nLS5ZUeNPf} z&&y8)lCBRqEM|jYQdLS4c`mcfn|8lqwnCzncTZg=mn=w6sj zVJWn$S(V&)&^9z)Fiwb-x63jYt7X#Mq{D z-+79$5Mm_HV|15l1Sz*G_-c80V?tHJs90pCdB>UP2ELwI`wC^y@nc(ka!bXa61$z2J1&(sq?hXMFZN$+LfE|BYgn zr(-ggoV2q=umz@dqrdl2R)jn^eVEu^IJ(pS(rO+vy7)21<=|g*$RFy4;gL3_Pqcn?bu}bk5u`tSXV)>JxWRGGj{DUm*0E|7}*7# zzJALjBIn|h1LQi|*x#Jp0)LVcbCN>wJq`dj%(0t5howyFgAF4Rv5X0QK>5=N`#jTI_g9g?gYu!4A`pF(Fd$=<)CE|wS@r~r{C z`RXSNpTQ@F59VJ@#g+n=aw38>;kJyVz+MCK|LOJv#a_W-HlwN+0b0sXtpDMeas|jx z57uLBlJw4s1)t5ywh<|X*0b$=yVe58zaUv5_KwrA2Joizl5z5uD-dXTB2se zD3bJ*QlH96eJf7mAR0)iCXqKrPD%c>lxiyB`dF?Yd;G!r@CdbV zni2$DJhyvPT*08rS}rfS_E;}C2out!8TVE%H4sBeN&YSNE}rWmeFI^6qM;u;(hU|Z z7nx>zjoND9i~(8{gO`jlHxpo2FX}XQDke$0^fMb1a_Q%J7w4bV8k+d!i}`IrVRm+Q zi}}n9R3h_s^<(6qo%1Q9uw{8^iM~seGeFSCk&>K|CRhF|H6J^t_kh*gSEVLa24ay? zukhs-!76Fj`Tx8W3DnLuIhV!TkR$rd;ijv?<@XFUZ9*tQZiR<}52Gpct)ner%ym2} zfJeXJVHPwEK{p_H;z$0MmxAnC_$P@$sSjdU)$uZb`Q6TpMW>Z<9yR-jq13`huKOAg zF%|CIA%4M2{p=Yj5=I&Ub^#84 zs~!2v!3{9>82z;05RTV^tl*t);lJ_*NePpEYQsl zA^t=@p8M(S;qCq8$e$8%`}budrNtlW3yGj$m3!4iLS+&&>{eB%9xBEyg}(U(yW)wX zr}n-%Pu5n|%XbVD0+irZZomifj>|dr3T$w-i{52|PnWI2XFMemqN$0$&LO~__g3_l z=G?WIa02QQ*T$oZfvG3Vc?720)>^p$%f(9M!EuZcCcFbHlAVCz5=30^husR`zXmPZA@5>e!E6pM+j72Ze}{o zt}6_Yp!ro(s9p#Fz@~r_o7)xfF}cQp{a zi4kD`66eb6WK47h+fi8+jtLnlDTq%0Q5|*>kGmI0^9FRW?>18eleg*jK0=9Kx4eZP z*8mHglubiPZn#*pi#AP}oFTDZ+3(Zvx8H|emczE^WgkJ5dk~!s0-l(NZwkDu1=Kc= z|I1!n6K5}Ow}Lq@qm(u4eUKO%+FzJoihB2=Jc&PdA< zjJvC3hjyW@;`OWcuVLY|@LE1yq2D&EsmUvcnsTbEN1mO3?gqdzB~zV2nMSaR$ZOuI zlOlob>=wxxA`Lk@#k2Fddn;2cU8pDa__Xib@? zUeSJ6*n1iJOYZ7at%|be^#PwQvMl-x8i84Ls`(`pNTli_y#3^@A@A@z_F zWRMpiCWQUcUR?2u{c&v(-ZNN-s`qC%(^VzdeFa=x1R8KVM26uilRO#bmWIJ9;*K+Q z;*{jS95Q+ieZ59~Gh9V{D_buF5&O|;h$v==V#V$r8R{6nLx}HmIOTN?A`wJg5hZ>> zLH4fnqlY?fBo{B#JkO;6wKZ36K|LirYP1kL+N&UVwe|VHXOYswakLgyeL0&LOLo~0 zn^D*IU)6qGc3GvzY^aUOTear;Po(ufXz`p`ibg&X&SeFe>+uZ1l#{Gye%Z}f`~7-Q zx&DTZn(u~Lgv)T(`k;Kx^VuTF1?v1?zodHVHnA3wZ~n7*S_^6&w72}|AW)8DI$xyxQuJOex%xm z-*#3UTARHXv(Ri-+o}1HEZ?(x#;agMKYy6kBK@&{H;eS+f!_CV1FMw$MvB6>DEbH1 zee)~3J*-?zja^m62xlg2ALd~~Z1K6jD^mp2%V&2SD~$3Ue7tFz2+1efwJ_`WVeQCu z>|2ZPib*&d~@{cHd zNRQnxM9@V0$?rx)XMAS{&0(7y`|)hYT;cLV-aVINWQmMAdOtRr{Zh^1U@;4Gu1pxQ z<x$DK^}y{fNrmPN7?WM@^38`>2-|Z23n>Q|uV=9P*|lmwjWh+`(MW%z4e2 zEr}4%QY)%t(qr}wOO9$lkTbpS=CQ;jQ2@N`bBeT0@8A(J(lp{}bsz3S zJ6`=-L4+kfQffpM1@HS>v$H3t2&+v;FP7`z+t>yh`|@WCk6=jGxzdhb&iVt)v_t#E zUNC_R?YXoD*42FUrqHkQnx5^xvzN6py7wx}-kU~0Ee!EC$?tSFwpV1(ediC$TNvI-7BosT{Oz!<8deX0Krs5~M z;4eO*PuFpKAu)Gb9V&<`ODI|xV5W{cRX}QUQh0gAPw?HrCfpFej89=Ix-6du+D8Td zhO`%wj59rg?4gxDMlSbK1oE(=c@YN#DIHq{E~TIXDpiUm$%=k~;a*+8E{Xzm?;oby zK%oM+IsI65?=tt3ci=^WS2)Ve;6KZ`&xEVE#5roKmbJPUnM$A}Q`C|;r~UVR|MEbf zT2l*;y*!>bB4x*(cS|&hLbM5&G@4j!2$k2ziye4_aFsGmmz}@Ax^pts$vC`036s*D z<}%ICUHoPWp3$$T=$w+CCw}BfV9S;k^$Je{hP;(I2 zG+cHwvX`qcAsPIKFNMKIjh6c!qm6GnV3ll~124lmURfj!V1Uu9A5N`Z7wF0f%P^rFVl{KI+8zd%*?W>U;e`N!zNG~1 z_~oR^*)mwbE77GB$p-@u%B&WML)bp#e=gv~-m8i_MLB>yT<*EPEFmT(zi{nZCBkuz zfpOCo%$gBn`J5ta{_DMOKKqkh?BRFx%Fs`*a62^+O+gt}F$_JrX@f7!_$#`P4R=M6 zi|)eu(=R0C5DA(7`IjI}_ada;y<^>{WJX|=;SDGM4m5^QFMtn989EU5lxjgq%YpaY zZL*j4_N^@Y$!B~Tf6H;1v#QrFJRu7~Qq03@gk6%0XmSJ28$p20i6klK=fZpTEcp6j z5meo86m06SxQV2Hm2qvgSak29>% zKn>Zh-wYAsJGysw3HMEzyZ@Hxq_Ta%4anIJ;-lEI9e~XhVE%kU7&qETuJz^umXK@t zy3EvrWlABKpMEqtArle;Ib@)&puj~LBy3|vh9;#0yO&nKw4Xfr z87S2DdPj
4rnZcmMq_zrO9q$L_+YsdrZFm-$Eug^?tUS?a_ol!L~lPqyt)syLE ziQR`G2(%v6xif0FX$nbk#1TL35pGPPAq63Bz zYD&R%)u8v@2Vy=P$=^SRm_*iTNEf}==LeA{YZykkdxreGiRG_AJy|)Pe=a}q3&lPS z1~|+e{zP&H8Bmjp{C07dad^v#q!1P-tN?&P^WSj3T)uxF%NNfbWJFJcf(~kfkv7KF zH?pS~#*qi^#=xvyhb}Fab?+1DgBXSAof-lIRK$G2^v)GDTwo^dAcrRGU=$@dV)xqX z-BEHR$$pdn_q||? zKuLap?BCBDs6vmEWZ>}K`0pcA$LYccyudv9@ZZ06;0M&7R`ePF`~4@T@B`0M>Wakw zT_2est|2Gqm6s%gjEYI=e_VinD|`gPdcvaOQLrG}yYya2)i?#q6Q=+Et%JH6qs1Ge zReRT~+|B^`{)$Br$A9a23lel2d!|0~zxDkJKR{ojcqZoG_2oQ+LgR_(s^FDTu$vhG zpAsu@yMuP%#Vem0=*YJKG;@WCluzrg#>#C2>pCUB_C&$OrG}|rHPFmKg?66rs@J_Y z>bN^4RS+^F0+-Teg$?h!WhH|yMM$Hi1bLyA_nSH=&vs^a&Gvl)C06oWegQ~q#fkx5 zdw$hFcUH!c-%HsTR?0T)wD4>c(Ru(FAMG6{PR`*%xaaoT@lEnF6XM?@N3Rv}Z%|mf z*8J>8q(+}EaO(htmtrZ9T-s}9H#2w?XD|D=_~pYW;w3;rNE$i1^&PoXgJ5#-*w7_@ z=h4@|E&mO+o>oY_(drQ!jSUT|hZhN*f`N=r29UNJh{@VRV)LuOv)=QcTiHF~HFp6< zd#*8uUtVaxI3t~v3~k@1+gsp4RNeb}g#}UgfXeq^0gOseeQC_8`TR zNqXksg!*ravnDO%D`IPUFE71sEPn`dlSR)lVt&ootFACqxbIrM(y}O>rS^9E4^r16 zltXl=N4l%D8s3?qa1OV$q$pagYt$q-OlNMG-?vzuJ4ua2BQ&0{;1SS{e$T{Zx36_wW zcA|Zn@{a0B42o;K!oyv+vAeJevEW#vFtr8Cy4ijD|?LesJlT36HPN^|d1h z$6>G($zUo)5oaA|xx5SGzP`*j-d5gLh6&D#U|MD{JE8>C(ssA=*|YJm2%L>)&&l%Q zLE?`N_g+8;p4Wk#2l*p!DCTa%cxSO@4Fc#eR4@xdJm50v3_WmNx#NcxB)Rtka};xY z!#L$81O2knlMv;o>y+l3lcg5kgKzX@rWvpiH6}b7DTVWx zlcq7jq6|R-7*+{O7o@K%eREprPuUuAB8$rg95*r#`=AtN)m5|26u$t`w$ON|0P|eG zFA16%qvMd7N2QZ5B@+AXBB3GtbJjl&?!F&9sV38yKUt@HeRhI|DP#Of=i$Y{U+^F@&rEb8_Ta6h3|Y_l2%Z3~uLQy-!)LGzU*vS) zTt|m&%Pz?~11wwNS#%lBQJ8g+t_mcLBW`u47!DzAD3u3$k1aUlBBLoVE#e8=*K*F)xPX6aQiROfZQUEKxGxEP1ym(+nkUz zy(t3$8pydFD6Hx#3rOvaY)UyiHC2yBDO;0Zw&01CQ;WZ$$`(mMedn?*6JcrP z0p*jDkC9GDrcC|vnmEESXLUjF^2g0PJNQQJ{EJS}7gyo-3KZY+%}i3g(^oh!mdFP*<< zVc+qQ2~H<z;(rfvG@$8k;(6%k! zebzFM?E;`}Q-z2So=`tVO^zYQQekj;+FN?WDfs1BzOeL(Y+Q3sWCg|8DB> z+O3s>UREJ4`W3g@U-qen^^%yiNXkAM57a>LPDeN}flkc5L?BqEI3UZW1DUnQKp%Nq9?%?guK9BxfH{jvmhs+}$c4Cw*vyjn)}kAtIZ&>mAo6GH6*Cu>3*l!NlY#%J&9us7t&c@=d{R;``avI2}VV zw|0@TImxGA*eH#xKI$aq6*NpE1qYt`udK_}N7}etbMx)jl5B3M*KSp@KSuJN@BP{V zKRPTIyCcoT*q?}w*2igcZw{5B05BBt2N+!hf6g@lYM}qxh%Y+VM(Z}P+wmJ?y zzsg6V%b@k#4SYe@xO|0I$83ZW<~8i`+94<9r@0~)gbxKVU&4?E3kw};Yq3_sV2ol5 z_3#AkkJG6a7w2aDF{C71Lh5JHvtgtp$8O2_P-amcd zPYTuO$J*kS&TeGuWE{n)CBQ!MPKx7XU@N4 z2ThzYzCI2|G&!pFuE8%#IZ`lk6I*@JH!h(@YeI!6gk5Dv=1S`m< zUtesA>D!om*W_wR@3C2ec_fRU-?I5~7fyuUe~qT3E2zB|VsVGv?`9^NClOJqp$bLa z(otXQmu$u3pEjwJFk!^9Z7(zWGjB6U1t2N^rvP<2b=fTG6qXgvMR3PtS z*Cv>d-z6L2>8Y{~oY#DY37OyjEL;XNpuA5qF!jxy(~6FlYksEOd-z4;3LFAcmCB|g zA|b^|(l4=+D+|rcsJ&Q0d&_>pvE)+*zLdo~@q@%vC+|6LPE&2~^9UNm)P3Vs#Yh-< zE6z+ecj{ZgzugtH8;YyKe@5-&UpkYPhMr@aEkWnqaOT zp1W9f*Je4YSQ%Z~HZf(}sk{8Q6S^DRyRNyJTudQV83NRdvQ`LAxQ*F2WXC+d{DPY3SBs&Qy|Um2%6VB&ox1h~`R*sLzUf;>=xIOO zn2kHN-y_L~`Q95o_GLGLbLn*qUrK*T<{siB38D%U^t@*FD|(rq#+CI(jU@i!6Z z;ojQ6ax1|wuv?Q^`3X)AtHi?-?EU%i+WsDJii~8D&zCRF(0muY8$1vgSO3Sz_gK3@ z9<92D5ZhBWrBDZxPL4A?Hc_zv@hYf&q>msfO{+EtuBt616&)mhvYRKe!iZh8s&{yj zqSAnS6``^$<V6*IsaNFP6ijlZqd zpe$YWrZiNUenf!;Jow*I91JRJcfh+#$n4tcOb|A={6O@O5z^Qa?P8sm=m+oK zm7cRlXTp^W2=#u!^?@5COUFsPUZf7B2G*Ds?{+2eVelb3#Z4}UR1_K?vH>C4H)5rh z(w+jRh`-a}m$)HHBHTd1L~F~7|NiZhOX*oEAaE?a^bAVs&G7s%`%KV_Bnm@n-^bA1 zmTQ$wl0}^*=x>)r7O>&Rj-QHkotEW`cZi~!;<}k0r4XwjhHIivAllNmr)3J1my_qm zETIQRmnG~^L*g52C2c>anny5^;^XRBWzgiD6jC3Upx+G>jI_k#_!DQ-71DwmIbCzQ zZH3$~%J*P#*hoF6cL73&+H;hzPE(HWSP;g@U4@=o}bTT%@9%%0+TR)+kmJ`rSO;cT~z$;-< zsN<+*u8yHa)G3J28+jktPsB*XGoi+Y$aCHVWp)lkq}#e zRp7K%x;LZLL6j1k>l))%UZ@!?Uq1K~6=kd0=UdZ3B)ee`9vdhf(SU7S8S(Xk8%BEOE% z{hx!_Xxb*;NSq|tRPxpyX(ZWybI19v$&6b3=|{A8+%~`0E^|(tw#hCL#BB7b9ODn5 zP^1{7nb)pAeha@pvZvk>CqIh~EW7QZ=1;uMQ`?c`EX}|;@*}=0azOP)SxkZYF&A}C zoF79&&~!?*rY~DDb7IZ@3kMj?pP97nh(2=G#)~`}qZzDOzVi8@%4;SGc1!MRg0G2T zTA{!uuf^RS!^0l*Sj9~xVj2m}zFyz3Q^vl;46kyoudb@webH#xs9Eb(7u1+kKa2Y* zI%a>Fx9QPL5{}ruS*vhLcA1R7b(N@hmHiXDdxp(Uc!FCXEf*O%&)5eH&lMWJ8!3@s1{vk= zVEHm)rqp=761(4~c}(oL-)C3-{HkcfWWt(R3L)Mz?-^c~M2~smEC2dh(G*wcCFL9y zV-^+pY$oQFFOgZFk+}dpL4=?l5eA-4t!6*qk4sUYH#c)3yJMd$}B-Y&&s)Q*)iP zGhyI)pl`@P@Rp!WViZ-DT?)@rTTQ5}y|=G=Gm@}huxBw`U{WZSpRmQzD{nqm&ozDf z5VOnbCwohdmgL~M>LbBnsR!oxsKg|ASr{TkiEkC$JQ_QC!X-9qcDBz z<8Y$J_)4toEH4RP-nuJ4VWAOUUlroRxx~F&?vcfv&Gf`waY<_|W*f)7yk9AUZK)=L zr=c={{}bU)L(is?3ihYdF3IiTXMBOT9gIhBR@i2!0rRNQHv||yPfjKp0 z!tXs>uJxT(==+|kZE)Om=6A;T>`%|;@~vMNVM46uPL0O=gA4_DA;77r|3m21apxe+ z8woy~`itPH=U}0%7YWwyJNy@6j8qOn|3QDr;^e0@k}Ut1sf*+_XFDSl?IHX z1^51g8gIgHywm!355x>w$pAf^)&3^)4+s?@fe(nzVe+E>@1k(punFN9elhmEbon3H-anKy024 ztn9PAtKd?y_XEtt@Btd{qF$5zcTx24A7mtjosr6R805r_ObJ20e%#dn$l8w&Uns^s z@cdAFAt4iQ*TL)zNON*>GRt7*H*a;EB>d_Dt~WXa5H+-%1{8JV^!*NW`UuPzj4RwM z(B$RZbITfo)RzEx3?Q@(;VcN)_IOUsG_E*=y@%j@aTuOgsesmm^%-+MU$A@7cc>Z-Vz=oqRz)(YKCztY%#`&AK6q~-u!bpQ@>^BhiTtIbw z%)X0#7C#S*;HGEza=@B;7+^83Ef*cpg|O#0ucOo0b}rfO-z11HU+cXIW4R{O~Yr?jObn5-_+<6m(c9-bTA>smPmr~CtKGVZo z(T062?)a>zfD&oH(P72x6GoE*=tL0&ef$9!bR7;+sJ!_> z5;HjBZx>7){vB~^(OCkLmwNM}_Q7#_`wwR%>5+xNYV`eHUod0Pa`>?0^7i3#1{ydc zMY!qJO|aNoM<#h-c>4X>^bV+hzh-0tK;IrkUh6I%O!SB4d3-r>Y5N`FVbJWj*2pd97sCrq3TZ5JwapQ7DR$j}`Q{N8WA1*Ai!YuU1zHtDF9Ct& z=HjR8Uxq(01}JW)xElYaX_!dV6-N}Zsyb<9$(`yxN}(DnK8<-0MtF2cN!&0Zx}p7J zzmsI?`@DGuF&mlc+$umcs>+$+Kzc;#!0AM;ad zK;9Z|qug-t7d%8X<$g*}Gv>c`Y72q49kDAv$U-7K1b%X#;~)-qR)XQZoBbp@r4+$t zF4|4=JENjE_|h-_n%2pt2yvyE13AhRiU03=s)V;tVyLw;!4CK@D?+aBRrMEM>Dzj!&WAVA%2XZ8l8j0gbmSvaoH6U#a(Eq+~>_QH7<&svB4wKyd)dhnqEb|{Buf!9b|tjg5`#+Fm6)vWefsi!e$Vqh??3Qf*IeRi z=FGY8bMK!mk5)A)APxTk=8+`sJ))a4Bm834X~PyIo%gm zc&X!p_}y{saUr^ZjJq5DO=CE)sVt)|oQ<80UPeZ|?-Y^RYlFVoVJwv-i){%Qc7Ju3?L7hJlgD9D*&F1L5~ntNj5_Fm&P`tMd-n0uol^VwFx8!CrWcogj8SF*H1P=x{a(;1{|uA@ze`q_vAL&u z$OX7xm;B0*zsZJlbH{UMrd0GjduOss&qOhjXY zYZj}0e2f{xB~Cz9HVfx0iD*deOtPaf)g~H{%cyqwbI*BJxZaKeg;FIrK_85_YQ?)o zhqfp?+amOl;(tYw-f{={!ZLh$q!8jIrIPT6yx`gGQV`9+kiA%#m9Cec24^-yH~v8R zxyq*#4~fBj=XX!o%Cb(n6J1MY3Ouxsc z>0fBk#GL!=;vtL4Y9%q8nmJ(hd+DcX8g!7e(JVh?(Q`kI$&;r%nx3~{u8$v&OtwC% z&-HtzQKf}I%^JU`d85;Z9)b9D>qh7i(fbF$c>iP3KW#3p0~s6>c>nyWY`~(}lPABf z?dCw%y=LQN!zmy(V~~)xGZ*CETzI2JmL>(=+w^S$UQ&_SO z<|nRGc*rs{F9Gh7i}nG-VXgf|BVRbt3%!9*M~%d;oY@^b@9eUSGQBdrN91?S&BQ?J zaYcLV@`r1m#4r91(7q}_V^W78s#Cm_#3o*&2;M*dgO*%?H&zQf_IJHPFs~$ZBG%CT z&BKCuQdk^TCD{iUt2|#I@kH^&_D~WdSY)eEQ6{c2-alnWvUa)g3=F1d!d?;e}M86N{|tjXw`X2&_4jv}xtsh%u@~ z(*WWj0LEq8I|Z()vPm2?(hb)#d`jJ-*}XOeVlS!Eujny~wczftB_ZQf{!so95AK!a zxjDEQ=f3NEBoRD_AI`y1qfe$q_=hY%QKSfyx@sd1e1)Xq>wy#+z`W@>%R;Q%TmTX5 zQ)O`Ot*KqNrh?Yg!yZuoD#38~AO+E`Qj2~n!(m8v6P7}WPg!JQt*QVgo}y{3c~nzE zehPcYRu`jZ)9m{J9bd;}lSL%qn?qa|F(zZ+U7fzXD*0FkRS`5I)0y}!mXtfdCL-P2 zYKvCJhK0^W2F@6ik#xsXH=Jd!*Gq>-E%v{fEIf_y*ED=~V>;>8;H?GbM|>73U|pFq zz105svJyDJKQ*mo@qP)cZLU*e^K+SmBr>TP%X`({yNzOk`Hnty6xHc2S3G6pAe-db zz~qHiYtONsw?>=h$HZ7tqfAM5Z)&sc-DM?Hpz6F!c>gpK;~` zF-_kF6dyw(M*P{$u!ZEBO$Ws7EOsP{;A7lFa~xmp;bFuf))?CT=hsAK9f{c8*d#X7 zC8qVR^>-SI8qRCYJCw0Lr%Mj@Sod_zxm|6Hbe*>bhud*9WhrLzhKVUIC?fPhXmEBu zk@}?(>=q`1nbrhlTNoK%Wkh(+w@Ut&LL%BQS#l`7wHx|P3~I);>GHOmxz~Z;oFZ6M zkGL$?#SB>(aw%b<&BuY$w*+6^BvLu|Olr|~5U|&Go9jg8bFtjcIPad(zYdp{u^+tnUwx6dsTH6CAZ}Y+#&&dkhmMm(o2MY&bS#c?N0* z(docf{>i!+vDyuw!xxBWh|i3j#ZM;c+trDZl&?N+Zez-rG^ruiAj}v0$)v{@gI>AuV-ZS_s?5{7B$S+}HYK~D4;G(w(X z7h}svMJeOuYUCJ&r5(MvUy&Qe{EdlsyxnS=*^!nmqYe0g^=d~StMs%USy?4Cf9B=8 zp@VVV(blmxBrM6geWb&|{SRD^t6ueY*&nHi_sC`d>zPJEdujO}nQ?4i4DrMzasOe~ zQ2DzB_LwiL6lVkX(&-928nx8QL}F=-`L{F9E-%K1I_-zYChE$)uOfDWSxryg5l_XB z`!Gt`UG_+{&8Ju|?m?9=N8akx!t%yxb+N$Ij78<7;wUf||HMtRJY}ckVUkw9`gUgi5^_ORMFp{YV2Q8Md^Jwf#OWw- zgpImz&2$G+ZQf0SH{#OBGoHiZPHdQF4&)Hu*wqsC)Xt-LT+D+vi1yI464M=uZt%vi z8_Zb`Eji0ANmqzI?+?JwuPFvz+Fy_8wCkm7*erGvZ14uA1P5XxrQT&=cRhSPVy@-q zfjk9XeooD(b*V7`Wa@>+;TVjurXqGtt9wK?@YJQ!91BvYGB_ zjxTzoacXbrZW`{^`i`vQo?v{EZM8uEP8XC`c7iK$rf;sa;No8Cjeku%IeVlv@S$o2 z2}j%M+wf3S*)DhaBDHuoOus7J{|o9-K|fzHhQSx({ELxckd$^ol|Ae9#KnHm z4fmHCdaN)_xISSYO>`-pcuVM{M@m+_T#_pr5gx7QkqF_1t3` zS?mRcNA#By_r%L#eY|IrD;HwKP*RAtg4Xw9!8_J)l;eY4>w(EK@5^sKB+{4hsL)d| z*xpdy*G6k29b%)Q3-{59ydf5FJ)z}SyoZ|6s_Sdj(l}IesD+^o9*31Xa`V@M@EKuy zt~GotAwc@I$@kp|uHlwNu2f^6f;mh)wz^Etl{AjQv}@iGjXBKUy3-zq*xZ;&3x0YV_b} zn@v*Gp|Y-qx-^wVEU)b>ssOVKuT;R&s>8~hV|B_$+GmGpNK+ZQ=V^lKZ6Zl>`p9S0 zye|>8q9w>ut4d??C}`!Ib|InCUlK_R5`N*A^*E&-`74>1AegatI#<7<>b*4$rh7U^ zob3ksc3o;15?9uKrx6kWcj4ncC`lvP^QZICv190zb!z53U?d0?rWNmK<#OQ>!WCd` zeQdB5n(=YU>dG!=EM3e~*dJ}LCIRdFMskrhFQBL7mt$DqKiH9OJqrh9cg~#(h?Gn6 zK#G;;7`Ect@Ta;~b4a4FOnN+BEGW@tT3n+EOz z7KsZ->wn9*>ZV=wPJpv)jzM~cIW-dro8JGLcYdV(#eAfmyFI(QaCzhgwe8Era+Y!y z@g(Z)4FW%D4Kh3Z;cULtz444`fiy&$JIE|R%;t9p=NLjTdE`@f@W2( zHlvYy0y<4iGuA&baQeY+EWLfD6XuvxY&QN?Y;~QA4RP$pUT_Sob67uFY?Swhy3;*p zuNo&8OhNgahfE-OHFuiL2!q+B({|uZ!JeGg7UI5?&t58fu}BZ~F#!=If_v%Myi;TZ zUxb8blb#6c>)NGB5q6Acb- z65V{weGuq9*iw-j_e>u>R2FIKm-x6ZlRd_j(r)mL;QqUYtzVUdzrtw(02^ zHpsjT@_L^<{UKQ|(z$jC;UuxawDyCw>%KCdyRnTvXN`$0C#vhY^<;!X2)$u4d*kkf z!N#kb93?s8W}7s@sI|P6?8KWYtvW853%dhu^5nR<6?Aj-^Jm3-O_I^ZL<0N!4|G449pdzZdtGUqjnCwJ>ZD|5@l23jl3BZX* z?Ncv)pn<<{&5>i~mFGM@K0c5H>wByfi2FA)$@VYQeD3v z>Qy^jnvZPB3~vkU`$-VpeDE%v{_>i$^v6}}<-n0DqnSfre;xhwE#Xx8kk_B5&4ouF zJ4);*IChbnyK7(VpEhk`BhS8(i0~VC;f03N(b|+#U2gSG@%v zxIsV-vgfk786WQJkg}htfDtJ7$?60?sN9C&=)%Ty@J0I8yN^Nu!LcFO3n0S(i#`~| zfK?pIKmFFOEzo5EI_%g($OV2kf1#VLZGYt~2w(q8{~oFix;)*l(lj%c!knY&>#_-W z+Attxzglbj^Z0kv7Kni+J3uF19M>Nm*!rBLA-GW^ZSyzV*bEwiRw##T__mP~8}I=6 zuRkl<#(&<*fm7L)kRqLJ3@QyQ+)agUa&KQD1-v2uOEih;pR%O!g2LVZ@ae^gZEOZe za>J(?KfJd7j^PJyNS;*M2Z99tmgP|zc*CxiQUFc-tq%id@D1{c7f)Mlz2UAUxM`q$ z^9FH(&|Q%Cvhf?@f!)S13Gl|J&I`Ps78c!R(K|~)`tjY$%3+s4Y!S2e`6H*eeOY$1?;3%7MU z`6Q_ApsSQ*wwmpKUi>fsLvg!w?+0ula{ujiV9*oCd7U=@Uuy0y)MSrdGoRUNaR2Rg z;852x=zo#+zto%&v^OeROdH(BLjUu2SO9pGPb&NW;6WbE#ydAg9zI+AU*$~FCIdZ& z@>I{ArZ*4sk~fn7Hf|t{xd+rv56of~7k+SVwHTBr85k{d0YOi1KL9h7iq(N6UjPVh zIZjJ`#BDb#G9cN4oN#Sm@!Dq)+8Du>@v+n?%m6?pzpmv@n||N!+i3Dp6y&A|gtg*M zPu)5MguTjWY3D%SaX1VjTwXob-iEe~aE1tw;IRtq0Qlq%O*$OLz_;hsrk)K4sUmbZPwfNuw(;T22e^KzPJ5`XW$bN zRp&q8w+hw12lS8UUmp%`x88oJ5iW`Ku5Wc1>L%zUJvRS#anoqfo-UvHVYhXG7(BQD zW_JI+)7t|X2^z>qB^Fyv+~64KEH(lk{H?Y>p#(nhc1EYtVe1p`z>J97~{@W-&mnl9qTQ*9JFUi8$_(x-G2l5XV48!X+`lS z65N+_E`0jwy9OEYL+T-5KlSC_N`e2ibl{8S@U`E~RdQg0#=c%&?y7y3*&nE?ApA0& z$^Y5~HcQ|~4kCW}fFyDRAQ2wD50A)w^2qkik;sQ^erNqIe$n!PRFbk*w`v-o$O1^O zC+GD6aPBXj2i}(EdB5NrKw#qyMK29Op<9s41x-*Nxd?0=oJ#pKcx5&rE^9`V{it5IUs0-1{=<%GBF?ZRRtlpSs1qT>JSNLW9qLs(PXm zrM~(TKw*QOS!$n4pH%|vF~J$A?EF62YZZHkbE*3jM{$eZYC3tO^c?UiQx9DW)cKa_ zx~uh;eh`4xmjFKVSDO>;BP{YRLxoT zhjBOoGUTwrnRHl4OJ>lI`x(>S`kT*yd5ouwIJ^py9#5SLyzsO`7I1oNpO;6O9&vQM z0OX$jA+$jE&S^m9Pw+362b}z{0U*EdhTOYDO?z0a)MXc+queY&3#&r-haN(q<$=x7 z#{l4D_k1OA;g?2epf7Ky^LHqFs}@Llih-cjnBMui1_17}?k_<%c|r095=enop{DOG z$PFmT^%GFJJhjbE1Xl0MSGror^!p8OjfDpQayON5F9F=ir!LKLPEWc@5d_UazFA8k zit~l5*tPExQ0!=qa|?;CNTh4cfm9T;EX;2|YfbhTsb>=aM)xBtMt1vzLehze2mtkq zR{<9;VHKB%8Gz2uW`e|*=Sy#QFlcP!5pT!Dn(Q8#N{vVSNd9(T>Z-SA?^UuLpfjV zC}@xR6DGJ|`YGqv!yp+(u!naMGUA-QeO7}{!GUAsG0>SOoa#3}hC-GZJ7@v?HdnI( z*!R?Sh^-xA4{6tg;@)zZ5JAE05!s%3A-#WK*1Esbex3ip+$v_+1!x?eAb{|s!l}m9 zt1;RPEF@3+4B#{8DP(0~3^ZJhb;ZAX0$Irj6+y@t=qh@GbeEziPNA_l@rv2b8pyf{ zii^{hN(i3^LbCygGosg|$@;osbN%APemef>aVS)k^Ec?-Gn|Tz%xY)|vIdq+T^}vb zU5dJ4g*erD!wIb8EFuR>jMHSfM{)sm`IePq#@K2YuwYJ{-LXu$)ArCgirLjV8*iz( z8=|!60{ToBO67nR^>yd|Hafqv>6zUv-ARlp-^3UmXT@U@*!iDLXTybrV2j>>W?an9 zEz-rr-2wUY7f!3~$dr+k`UYaes-alcUdJGBx&12YSiVji%WL2OFuRgCAeI9R2b@J> zTSjDo++n2ceqeOx3zuJ6&c<~UtjQU~j0Q{(Frb`ODmsA2W!3`yk{8Hst0ptEz942U z>k{-O%c}1{vg-y6)~Vb513sEaJl@CMd9;ju`kc`g*O!_t6>rcYHK)kYLB zcu`n4Z5xe7^{}?Ee6cl7L^zb;+quu7tsA%i3npHJ_n&FG6vn0tY)18zgdQ64K}FdPIZedv|iT?3hb?TLolpFUMR5qfrs}S}nOP z_<#?Xs_JFS641rk_Z|Lt;v)iu%6cwG)dn+qF3v!fSl4)|EuUES<3YAIIS-e$ymyH> z^`ttWkhi%|5{r}013ra^jy|{mpX?t9L$JuQpOH)DO{3Q{J(G~fq9Y`a<^kSP6(Hu+ zUVG_1;V?-(E@eYp4!OgfZiI8ZGBfCZbBf+a92sqHh~X~xju+sZKbAS*F(5k-hZ-x7 z42j9?q%QGc0fzs!>o;r_Th4u@8e>Nz zX=9F|ZNB?ONX!a)40V%W%sGMKr;SA<>^ujME{V(CE^i$LGsyq;r_-MYpK|ERPqxSdUUh*e}n0Y&q}sZ&@6n^@+4pm;rGiE|Z5XE*}^`O2Fk zC;Nc&*U5F!SGDd@OOVQ{wOzQzhzp!p&uiE&;g=Bo_0Zw4L4K#&DME<+WQ-7&(C~r0 zl9h`)%|nLYkyLMDawWef(SA)ah1A*#aN4BUU+Lh4!*)dpjNf<*(V2G7{(=&p&c-X= zq4$Hb`I&h(FHr4c_h6+4%^&HmwZ#j2LP3L5d~xdjm*!#+h_?l5yu?&qljnf7l3O%( zEWo4J7h47Rh!ac&wk|BpM%hvcnvHK3aqL7pm3Ko@KW;xutv8f*KFgHD)`K}?GkI3d z`}zr=0H0Y(^w7-ccb4GL_W;-A8CH)z2A5jA5XM|}Z8$30gSW(87a{w1>U}bg; zZ{X#b)d(`VCRKY-(riZMrx9OJ9WfIh5=EEP)`JsXwwAc^q3*JIo#l{Z5@K?5aB(e< z?myOGFPQ~HG<4efDQ>_9_}6kro^un_Sah!UAEH|&b);~3X?iz#yhRR=))sEIq`yli}{Xt+V2i`d8%IaX_0lw2g*zh$Tqk|&FS1vIq$V=16h@& zp8TzQJ?TYa*vVz}Z7u*6)0uC&w+qyym$up;#-0yNUzqz^OVltVn~21=ewu!xXFgAj zzO3B5$@m|1oWx4zi;z2xp*-wJ#vw~D>Mb_bHFI`XW^r!)>xzl_Si^6MaMqA${YE?E zA?lp77hdT{HXzOc#Oambp;*ULz&IfwIP-$=F~NJ(Yr1g=r0Bknf^iCyicxa%SbX*y zWQ>=AQQ0fSt7Qeduz{{!O$H$!Uf&RN&oq>Ubs<+-7>z3OUxU?w^U`%)&URC(0f@tfd@a~XwlGCp!=T;oC#+12v-u39(1fIA<=1lRLB}kr(5*oM8&SaA_ zXu-cdp3oEvTAsWIPmS1=paOnj$eAFDJI=`W1$0#p7}YU^;9+$dF;Qw2t}_AD7Fy#T*9DZAm$1-2(CwLoOwJVq@?4dC%%uVh3uP#ZVunv^1pLCe6WH zd&7!`Po!TP!S1y)xGY)Gle&CIujB?U zv7yz?tc;jfzU<~o+?>By#Mj%e8?3Sv)R^#7V=V*9T>WEK)>=)~xYK|h0xk(xJsYx$ zsoaITvDB0*HE#6eLPg+Q`E%*MR()EuK|hW!+?Q};1J+T%7aOEX4tz?3;>Ya6nW*JVpIu&YA^baO=HxmIy3pu<7$kH zoYdj+#W(|{8f!u^k)&*3EUunxNbHt|Nvf*f8VE@@bxG{dt^ubmpDndv3Nww-)M#gA zmSC?5{mK{IefDt>%^c++(HN&^W0m8=in$ z=f@_9we6WwA9Ke$TALDFma~R%wUkc6#z7IPa$}!lne5BFYok$U-l1?3r@rh3VtfN7 z6UIDCp4MKvbvG;+DK8X`m&%l@NWRbUEF<)ewY(b=7l4N)l@JZ0;I2&0c`pu}oflOVd>MAmKUx=R*HS|zN9$U8bxm1SJaGWLaZ8JTeiErLF#Bs46O?T}_b+O%Y z&5-sHAxKpW%P%2rAO`sd4e@ce<~Sb4E!3;Wo_ECs`&=)gKF6{RojoRU9N2fL;pcMmsxORbDP zjbcZ3eo;Hs6_j;Fqe@#H57t6~-LAXKI{2GejF3ytjk0Qf@dl(lo~1h}mt;9I_jsmb zvyHzAsi!x3>1T^bDcT4MafiM?5aGGTNil(+hkH@3c;qZdpr~wtn9?lZg5YjX@{tc}@#n~W#y5TB}#!?PD6^SPtHv&Y8!RvtdVz55_gMUq3LN8j^Fx*M6 zotF!NemB8JeO^k}I6hk8ESNQqWAde!MxX z0waujABH+q&N9NP&*4}veKLh==j?#{5SYjODY5%?;FoAXAfn$Wy&)c66z)WtYhf5l zk2$U6sl?8;*WA}X!u_Yi~Tfen1!LL-a{p` zm?x~2B25Y9?g3)1QHEU>pN02_aT>?c!K0&R>`4mv-f%1S{QVcvu(p)}uVyaJeNiwu z81tN{qzJQz@~%&dJ>BW67f+Zrb5F1|MI^g>z-jOUlYQfDw@r{RdoInr1m^3%C})z) zqF~&t_HvqsH7yp*fr7lUnC6b4Y5P1tYf@xx6AkEzhdsPps3Fe#@l&H}E{u%1`PL-Y z{*KG@ENvVU(ud7@#XDMS^aRP5!D`zyBQt?V2L?YgT&c)``z=l;;ReIHb=CUHDdf#` zgsdp<&7;eBq876YdD*qUGtmqVKQO&-hGs0~RaTH>kgG05#1qJ$W%3h%n*6IV(V(DJ z3@K+a=6%i7LlPzz3b%lv4U588O_-=BoTUy%u34@*;aoiOPYYB&qS`y^2j5jD> zQ`3rNnPj5Dp!C%6T5F;MYat28-$YPnCTJ$aIO!*%N7dwfjK@NrNVaC^vZIFCD6S1V z*=0m0UUh%tn@>T;N{sK%0a5T@7>&hD+466dQM<7=6es=jBwuNFB|aMI!+sGCPVI-m zp)aM4Q&b4Ah^jQ)XI5!Y>Y50SM|xenKhhR43QydQ&~18T-(jTFCBFN8J8ZUL2m-SFZ8l#uK{3 z<_y=o+zogb6Iq`z!{(64#tlEY#yXoFXEv=9%3hVdgv)8-&`B^b$bY4!$lP4LV1cAZ z@{mobU$`l7L%o20!C(yv+aRzbOEyT4GY)17m3s{%u&6MLqZ*hS5?`F<&=apm(p2@t zLZWiJ-FTRT1qb8TWwD-cRM>S+%-a49$1Eb+VbX5EuB~1zIbtNLpWM)BN7@(4!FNcJ z#U0KfRz`@SB}xm2ck-?2wC*D42ZuOGKA>Jd&nTUGuxGm4s8wE15N^L~1czyzIOcsF zVk*6zs<`W=0uy4_P@AYbs#1fvhd5pU7`1HuV<8rXlDLO%SR3U-EXEO)7wQ-)JFo{D z{-9yvV=y&-d1_t08DVjN#Xlg{fyYPg8>|WrF{@V@Uzw_DvcDPYu759qMl-|dFiCY>LQ5|iYrx8B?ka9bngmIgI9kv-RPUF6M}iUwC9#bO$YG1>8^WG8ao!3Vfy zYNtk@RA5W5?|yHYvWb&1Q#ajuCG6}Dl!GnVNT3HqU^umK>o=Pdiv0`oJ z-=qkvu)bw(h}meHa4l9L8|m1oQ&j~6N2~EW{^*1v>xEq-gPQa1a9N5WG*TmELS;rG zFp7%N5})|Pl!qV0bqqe~K8QO409RF+!e5sTkusE&O?wj;Y-N1FsPd;Wuwd@aZP)x1 z0SeKUg)L?GZtNx7z8cxqWKynWbUr0Pg@BPAXOimX5N8<@>~g;da|-<9N@bktDU&^~ z*2*$Y3pYzLPwMsxDdQfz@w9WAxfLCaXloYq5TT4<$v8tB@rw<}u7&J*TrX9mkYdl( zpo}=gQNbMC_0qN7=kD4G>%*+)cXkxb*Q&Voyc<|d;y2C_pIl&RN<7?xb~Vj9Jvvdcic~s?A&JRz}+Hllp0hX|+4)jwV*9Hl4 z_wy@jI}l8=e5f7_VXbfPPc5Oq7#X~bjfoAhBX3!)tb!4W!uGr8HResGAkw+U!stPP zVX?kcM=l>6(bJIhm1#(=?UzO~C;N;n2g&e($N*O7JF(bF`xkpbe8<}_=hFnN4SAyinLTgHv|gG0tXupJDXlU0o9#$ zIuHE=&r0>ONhV>yASUGe-V9uVMcSa1xq`7~kxsZnbzTpn)Wq2Ce0;nZb2}mN5fRfL7X|l&h|RdV2No} zyZMxM`+C_LRrzq-yY+`pB+>gInnSPaxza7L-VNeJMA0wb+WI?99^917;KB>?Y*(~2f~CNAHEj_@b{5(eI+l=#h=)fH;Mv8 z%yGdU!1hmo^V#fvfi1vU-W`5NfHM?iFF}|fWNFw7JZ`7n-M`^XJ+_5=8z5ArYyfK? z@;n4KsgUjbCxE|saS5wG17T%N0mD~4AKO=aGoRm~@Lw;?gKIAb*4a(K2AZ2#i>>;)T>&uPx7dLyFX7VxRXG$HicjE$upy zq_Tdw-}3fTo`F|D!@-m(lDXbK}R7N+7V!oVgt|TMo?ePJ!H^;~37_x?hJN4do2b zFZl%tmPL>infF9?dgc63{HN`YNubSXr17gfO^pjvT(*EN_Dm= z0B}Be85$9Q$N}IW;l;F3y1E#MDG30`OYIw;a269F$`(_7%Gs_&6b}akra>S-x(I@d z%xa_O?fXp+AKaoIG+Htp-hZ?BMGrgDMC@t-q|!R{14;(s64x7wMg7>(uZSMlioXfyl z-a&~7koNTbPB?JJs66xb?z671^` zN1)z5RKv#HS#p4?VNnT~e?aom{S=JI_48BS7 zajbROvdAiA7Hb0Z^YaNju>Y&PGh?yOa;wVO3AoD?m_`T{##*yT@PGkt)I9*5e?QbG zA%QLWmjvc!47t>*>$r7OTH}RJK}z&y=AfOp9bQ258t~pq<9Vr_$i8!Fkn?;1Fo%9V z-aqXF96OcN$X2{iD)ayBny~YmHp`H7KSLelcLfCjU(jL@lyQ)9x>{muWQ~*qLjug6 zd1wkq^j(3K@1;~80O;NLCD*P;)Mr*@|f5&sT8 z{_s2eoaQf&T;AMzyh%Lp`;yo%;klFBqo*xI!{QG*z5I8Gzlx)Z?S+h8PXjlw0N_Si z`(f@CyO3Mw-2#NpvXUJUT|@w9s=nHL03Ev_yaE? zj>3g2*VGGxfEm~f_@98<%?gZYWGRn`PC)U#v+oKb%sj750e9c=!ip_oPW}gI6t5KP zTZg{dKnzv(EeJT*&Yu4c+$0|tP-uXh;0;1j%K=JG1MwG>V<*3`Fvsza@QEm*c!T{# zM~cQ9)DtZcy?1x7=Kt&h4tmkF(p}qE-$I^0K7ksa!5y|T9bMrMvaj(()>X(3V8%gw zH>&5`D?~UDJ3(Fqo{-R=OGLBhvt)K3#9z4(u2U~&Eu#mfo}EHlPXqU_9{Qj8)JB1P zlqFnD_ZsA4p6lBx1R_^(f?x73y;7(G@ac(AojvmD>O?M-!YVKZcd7;u z|MDQKcrbA~rkk4qNI4C;Am(}Ujyb54p85X=e~O%gLVCq8fJD$9C`Bwxk})q+vH7Z_lN=Z3IUs*Vd0=7)RvAMJW7AOaC8?Spc+X1 zm(7YOn1J8ix$pW<9bV48$baYFOVApK+xgKhXk3*CE?rA_ zyD}mfGUo^G9^cTS*S_DG6&A2_>3Lu!EV!5|L9)X`SxS3QAPDveRSS6Xej`+pY4mko;BfY?TY^ZGz+-do{m zUERi%jpqGckwePg0MArIm8HKTA2KN341OXx5`YJ>Fi=9N_ejWcB_Lb9Sm!|R0DhvZ z)ieosBUnI88$|>8_`3&|Sz|_UqiA66*#oS|zpC3DO(y26{L3~?5(ba!Qq$o|XuLBp z<0D?B0w>cdcIJ&JCLbtm08Dprmav8)BH&ZCszyMLpuTi~)IHr()!+D$Ku=5}CA;W{ z0}IQKA%5_?G(f^R52>GZfd_F_pJUMTagcNEB6qZGLBi>I5pNzzm2`13_dT18KWSIFVs&Fx&;JUPL_@fFg}ih7HY1^RBR-!xT84?I6EGf!U6(^ZZZf1*d*a4w&=8Sj+JW53AorTMNb*10?vM8ApG>+dXU{W}xWRwp@mdVo&_3fF6hb{>P zPl2F3A_x-e5=I>RXDM$3g-Nxj7)oP{hZ5cR=PPp{mjNh%&K+9}*0V~_zwkl^)YtF5 z_ZO73p+Nf!V&PQ!FJs&X3T$NJb3TDt2hd$U_j$XY#Bcm1pc86Pkn z@A$u~{R~Su#CB943HI}(*81Prm>s%0Mfj)gS)lz2Qxb;{8@Q$+@UU)}0raj4hzkYT zrqoM}emG)X31Rc@I}483!Og1R`cVBIGa>$HCs(ye=0rt z=yAzY;14AHTC(GkW#RnCIK_K8`eC8kg%HV>EW%gxyQSjz>T7~l$v%gVPl`r{c3hcr zwa}RnD~WozFIl4Jsk_Bvu%dpq6m=YLI(SKRi#Ce~LkfG8cB$a?6e6{?4H(bkRhjyo zNTvG=@KTLY?H~uV0`TPub6L=cI8y_X_2&-<^PxpWZRsW1seF?}v8yvY$AF?BNgRGM z1@f}3+{^aG3QuL?0mGy*FpGyWFPs@Vm%kH3Ru;?06IS(?x|r*(j|FW;51;>j@D^$D z5$+(OKrmV9rJ2cDNFF`oS?AZ4Rh5Dxz&EA4PNp)kC6SofZ16tBdyz-nJg9m9W(3HA zP0QTyM>~!(eDk5rNDjfpGwwAUL~mAlX=gu1{5H}Ld#N#c{4*fbNFSePd&g)F?B;1Af5^Pi#gkHwi!Cq72d5VK#B2Q z3(+ad42OSsM}Kv5_odk(t$~+8)$D8_@Hhn!F0Wq4eeg#G#sq_zT@LKSK5B79#DMga zvm!FGyf~OAZ$6RU8Q}VG#*BKD#`HFF_lXikV3VH)W*+v0Yu80}0({=6V5qZ9ogC5J zqPvbfuzWj`WlBPT*R&OhLVo$F?fp9~0$$U1EMk^%qR2*KXk1bykvaqy#^_v`{qP25 zr(`TA(G5ahrlO1ib$!tZ`rZlXdxxrAJ7!Fv@BJ7qPyb;s^`>&^KpQWpswC1T8oVK) zs;ZWVPFJO%#@d^v2Pz)#mU_iw!PKlQZ&#LYLc2Be_EIqmc(VpV9vgVzMqdiGf-O@C z8xcGNCe~8s6_A9H@5>3e*^l)ojL%07letWx>XfDj!TC9ZN01(AR5J=pT>!mU2FRR{ z4?o~1tpaN$8TVIds=y~p;Z#+zkXR{o&_e9PpK9yL5VMFr3u6bb0<)zr;92kN zX4;4bvA9qhXn(ETpa_B&=FhhL@d4!ouWrz^=5uc5mR?RFGHJk?1))@5u?o;MT>i{A z@wkKKEM(yyf0oqG{P%gI(?L5~V&lapl7NR)F#2_xf)tt?Y=dN*qIm?@xC&+Kz=q)I z`97sz=V&?qJPSn`&_ZnMRMMZPBAD&4`E_7W?A2;6FiX8W?hJgdcJb$c7?5x|HK=l- z%foof5}?bkajUt%D!|11mMr8G!Y_N$!Akum*a@CDkb<5QczHrkA}-*1aaw>NCytlS zFhW=zy1oCoSVvrH2KuNL=%~B+-MVt@!3J0vB>292&zbr6rYNvgCN?`0Du9m;)(6bo z3I^6zwlzM^`xK0Z;5k`Ch1TL%K@?)~V*x)|=!d~G=_$=(pFIg;LktTl!u-pIk5E?0jgeaCGJ|fP8#nxG(WKJxn z8i8|htN(P#f!g`w9kWxDfQRCf0FnK`SaYlt3p;-j@XtR#?`F5>fknolCp>@eoNkZ6 zHj&oD5uMN#N%SaTbuSw}!ZebagKlVA#yi#pe!F=~>u*)EHOs@;kt)|eJ&hhXjW=i+ zP4@v^(7qw`WtW1DhaE5dgABr|D@(2Lf+_vte&3F#C5>@r)VH9)YQFem7nqH~AH(A( zqi7qwPfqPDCDN&6N+1$1BLe8jQ0lt}%1-3(V<5MK_7@YEq^5U;LPV!Xf8$~05j(m- z1U^rfMjKf=()VP)(5?rZu#|o0kLxBY9KLJRb%rTD1!0OahCYeu435b|ml@$v9o!o+ znhG>jU?$$#aNnLB5XWw;lc4CFF$z=)wgsli?Mh!K{uy_6VDgDXOh2}(_bi2Ax*v77 zE@U;JQX2?bvWH#Irr81% zLLGp=SO6x*b65L8O6!%qoaYH}Y2q$Wx(8!!9-)Vz-qXvhDRT#GOD0dn_}zs3x9q`A z#u7xQfCx#f#I+dC{=@A#S{2uks7yu^kG?557Fw%&ZWj_x65gR)WHEWCcA!X_j)GqZ zoZx-Ak}mz|Lkp(v$*ux0RMbd{=w3VeDnq$+2TQ}+&l~KlfTo=SmO_SL0(fAe`FFhd z&+@Y&+Y-h5DpHl5>J@=yVur$z2Sr417J2eBrvIhRSS0TnX5kMwX}k~e*WZCWByu)f zmfcdC34Xz@&IV6(3gCrl)cNsq8YRkiW@}tj+`Z~ZB+}_55=jCEgT1}RJ%mBm7G3!K z;wjMF47B`JaM1bGXVj4V zl?p5}FE=@Z@zMIZhRX%=TEoiZ?%eaZ`__4mr^VU%<>_fB(!PQ|h$4_Euv%ueOcSat zjfr_3bOU!lz^9D!m+$TYIv`V5%oecU6v$EsHaq1j9C>gy9Xq5C zvjCqwU|udX3SRiD6oLvj8HL!Idja7ylAPrX_J+&-SCOB5q%en}?5re!xAB1Jvb=6^ zyOBCSyG2A$pfq7gseju)7ZbpJ6YzHJ$@;iA!UM|B;M|YL=HfWKfnuD7dbrA^s28R| zJm$OmM-;3`V?>Hb(!Ti0b;(##H%i^F%tS?o>=DH4stNtxzE|?O05L>5F#Ww6;Rl{1 z>AY3f?)$ty`(eD!b98*kPog8Y0!bCfs zcN$Dtthqsm4z=u=Vwsl*1-9B^9yb}F8C^`tq9RQ^uEF#-%qU!B!^12#x6^OM$bLjw zA;~p%J;bj|eTsn#+)o*dwuJsO3h)zRb~lttQqwnS;$!NGGdMHQ>m|SZ8wE;3T;lw*OFqb z=WX>FXxm$JT8sM?+h#EE5}2N|XE0bCPSBId{TjQPNr!Bvg1xTs&tmmpuwA*HKq zDH%y^&uSymwr?LEt$kXxVoxXn%wigVpVIPL>90xvGx9#aY1549YpCL%h9FPH5LV+= zUf$Y%W79DX^PBdc%{23UUdi}Qe9!oOZ$@l6Gj?h`ESO&te8caZG+QF}VKggS2_)=z+fgY0$OL?qTOKdQM0udT~{i76pGt6EVEE$Q0^Uv+#U zL2#Tw!SS|P*zUvb5FGixf6buCX(M_pyd{U131^5^?qCCVz13(zNzc2#Z*VfY8JF}w z&uqAyBBZyXs`9kle5kGk1v=Geg!vnvd+04cQ}MfWN-mvsn)F;`$|T)JEwpo$iggkV z62wkD#_`kB6AMv%23XA8yOpWi zGTBm6u?FO3lnV4Do z!?`SW6Hy~_F3*9c(s@oWe!}W+xVsTOIxKtxg*!eBXT*|ri7K8tEz56r3eckWU&@5> zyY4r(epLDuQacXI?m2qIqW(i8NAU(yWSVRPz*|)Iz?`}tvLG{_)6-xS+AseT&}dcX zAIzlw&->*Vu~uXAy_!1+P?HFYMG-|Zlq@$tY~;#?5>mBD7A%O=Lh~i}3{!C>Bmz79 z@VvF2y)rsM^|9j@SP|`5|4;T)>iYQeIu}qG zWGZ(CS7g^UXOLh8wbkZFbu*GCdF-%d753QMJB$O9C248n0Fjvw{=WSRHUiRHh95cr z6MkgbJN0*W_Pc9iBu>H~2Sd}S)W{pyr5kPk3-UUwP31SA1TG$(v84x%wxZ?k+-J4W z2Ko5Prd8TE7a8aS9vT!ClscBxJatN}OLOB&pqTThksByWAf_aJ8TLLKbnPPQbB4-BVsV4M zfdw3Z{gs&9#)12 ztKGnrKZ|ym{li9R-d?Aa5ppvnD<_QgMCzc~CWSEdpoS!GXz{ahE7I6)Z(TNTZ9dE) z@z(Mx@61-mYW|5CTi0ky{7H?0=~x3Ye2xY>Unl6=W|zlMOh=uk8i!ctg&{@bzrRM7 zy$glF;O|;0qqn>Oq##}dm!c$V5I}qP;OO@$l{bx0h;$$vd~2_HH}uL_N{D!VGfEzQ z9K6ZowOh;Tz+Yt$v98_7QfTj`=LL1;1X#aoFKjxIVyT;r)!XuYEcgXb^S%igSq@jC zUTAogTdO=fB!>Wj927LapY>Irzl^qWr@=R{3JgNoRSNX=!2^bqn;BU14_k3|5mbwM zsULxldjENet@_`lK1W=hpES;Q7+)o)?`Mz@=xiT3$4wC30f5fn5dbYocYw5e5~lM^ zfQ4Yhdr55Qncy2&I*&f+4VXOG_rmhp9O!wm{o(5+pZ*oqNR4!fHpAC961~pLh6S@! zJ4LGeYsO+~=%em{rLb_#D)s}K8hkaZyP!a`3Kfd-p6dzv%i~~&ov0P1FLBBy7lQF3 zfZ@$nG{3OrQ(rKYoG07nnHFM*b1;jw{e|1#ELg(C#POj+J7tMu8cuK!_&5bT!u$%$ zu0fUX7*fZ&+l$!OGO47~o<}5qmIIPaIGd@%L$J;R!MYdG2A6F}5c+#=ZC~~H)KYzb z)GO?ofR?-iot`=%;g!Iodp8K6O0G@QXh90p3GVQXZ5n*hcvx&+4Zlq zgQ{N`ZoPMbyn_g<5mbfn;UidqFhE3SXn%{rWHD_#aem%fVBa}FdN7b%|uxD z_TC{k7Cv<`P@$i?Cp*T}QM6nRcU%hFvSawsiLZ%obXp*KEh#k$?pa*CAjjnItEe4o z6gpu42gAJs42S6k-uAloz_4q8L8GA>2&Rvr?6A@0RpUQn+8Bu=Pu;vixpDUqroE2< z*@2I(KCg;dCt=uI$`oxNcb(F%FMtR}?%RW(>7!f5$<2_p5URE~b)&Yzh%|r69Ht(S z=N+ka-Yr>V5NP`A7v!=HJhqhNxOtsqHJP6cGf(i-pv_l;-zj8mp5+60C(kw@nueQ* z|2?z)6u<46?s`^p%C)>^z2$eGJ&Sx>t?GNIrLmV!%52KI6oBxDk)SoV`03F{nyqS0{dGb^$sKbv^l=%yHKaqoaTOGhA ze4u6g&{HWYl;;&~P6#djyV?MVWW4`wfm+7$>Hu#z&%yvnaASBl-!XK>Ply{YONHIn zZ2Pk=Ior)6uugwVFvv64RkzLp9Vk?wq|$4^ouqs(@JYD8)-+BgR1*sL1L8 zR#+fr-aT2vm98-Hc4;bdNCrKV|h6kZ4LYLs%BATdfzCNzN9yz z$uO6s*KObB9ErdCqVPiFuCl+Z02HPiJ=R+;#Hg?PG@B*3$5m>{v6~&LmY082dUxwI`CkZn z#B^bA#JJ1?WEgsuZ8qWdR#vK!z(kZ6fS>1JrW-P@mogH+Kr;dKfxy9q)1c*7LBLU0gZ<)Gyik| z@|-#~b5laS9@B`T?WyEeKub$Ll8(ss`XcyC{$~36Z`BdTUh`DwxIikkzlMDThHNr2 zs;nj|Kr-68Wu+m6yz?!NrKRa<@xUy7w=O9M8vGQZnAZ=amLDQ@uVg@E-lft+6cHp6 z=4x>C`gZU|AkU^c^fs*ZVL<5-5)8++ zI9$dm-Ve2~HkuOO>gd==z*UQ@DP4jy=NrTtksK7{1j~Kn!6KPjECe6{rEQtSR)7>m zz-1Bq6A;e_Ly6G26ZGoepnjmku~R<54%4M}SDffRPn0HqcuxQAtpdq8dM*5rAXCPZ z5Sa=P?0w(O8wNr3%LG*DVFRWSt>nD8`H(TzCN4qGm@5fSz<_thoI%` z6A)khBH;1&3)OcmZn54xNNoNYpc1UqvIi$1AS8sxlSxD(dm>)NHWm4i3PHQL3xm5V z!4SJ4408^}+=<8FNn8&|UfR9m6+Fux4|tY`R+T2~S&u6r5L1k829qh=O&>BTD_zaeRZw77n z{oXi_c+ifM1}`Sp2*uN}{;(qWRYQ@hQ$4tnzyu1$YU_ab_@VEnRd*vMBNa}QXb|>6 zvk9+jOw|{6fBp=wsatQ%SPEA7RoJlah=fI68Qb2#c}SIDI@FV$lP*}=u)mTVcXt)A z*5VB$$m8qq>MyS+oMQ^n7QTnv$V&SXI+n3&~Gd{of_RdgAEsx>%ZyWB517|Uw zvFJ*e3@k5>cY}^3Tq=nGV5kTtOK8{c&r_F?Ab*C_<%=O`ZF?k;qzeVwZM5c%4ezD2 z;?WY03_9j0D7q}NQqeqm_(g{WeLWoCV&ZLQ+jJ5xy#7+yrVvA6f>}(3YtCHG#vYVP z+p|~2MqvrVo)!`1YVV4U->I|GoQqQ=$Vny4)u(nW0nkdbE{CLC9&jiU}0OV7HO8Wq`YPZ2%-v-%5^DK@&K zLoy&8lM*`X;#Bivt#|%z;0iu=2u}kK| z(dyG+(0dEI_4hL@_z#WhK&$*}l!N>Gvx=B}10F^;r>qjTAESGv#LgQjU#p-5Uk>%7 z9)MiYs~P--Q9gXvR_HDdq*zO68)_~+_~q>Ar=Dx-d*qCt9|f}R{X6g#l!z%WOiB$p znS65}dY5HD9}ptpC7?iMg~QCU3Fduj1Cn@7EXpf_gQwJ>gKINslMG3K?c+kU-g zzRHv|8H6eVEwD{imH9{!NT`gY9Gx4iV0`X zqi?zzlKDmBV3T$SU8<7FOZNea&vi2Kn4znrr4NQ7uT40fsn$Db?CmhdJoc9 z0j3&(qr4CXL^Y=_op$~kBer;~MwtF=qam2$V8jhYh^dU&>)on$x1^0jf)Z!JKxznX zjiO^tyOu!6I{HG_2CVVDH?-j_^&()UNO)nqtMlPZG>8JG7dDK1(V17R$Typ-HLY-a zch2~YzZsI&U$3xx)rBL?x+}ZxJ&Wu$&?o@Q3;=M?N-tO_+`tYXkM=|MaD(xl^H~pJ z-RW+8UR?;1ple&cvbv#?F)fXaILp@%4FLyMMEA|pouj&}2jHv_pDR&;ml-;N7c0v* z$3kGcOl=399JvG@$&zzjerE`Y;~(_gNUTBL_mm}#a8ldN5mvpbqDK23g!^~luiPsN zc(rzhgE1K&*1%4LzAvE*-jp(nsEAC>bRufFmv{(Hw-*6>1gn1jbw(-<~6jNFKZ zh!NDo>1k)cheBDgxxsiLY&7Rx<)1#CoZDz3({P)vKszt^CXr4Vh>2awt<8aB=ZeMc zem>mD<5Gwmj@JRxlYM)$c6{ir{rVleAFhyOHQ;@jLM>CX@Is-vfea!CQy23e4Xytj z7Pjox&E}I?X`dDzY&uG=<(`%$m`C?KWH~PziPK9jWYjw`y6fXkG)dG=-9qJEDO3zE z5TN)5KeVab1r?y5Dd@ojg8<&I6Y7+4`U$y+CJ%1cf^7 zAsZYaj{*p>o19HOe&@|BoSYp6`rk$ybuw&$(vdwUzMU-B7a3@(QI{Qnmlh$ zE^t3(0j4()sJbfqdG7@5r>$1Lj94J(`mzHsoaL#0>8r8fTJlDM^|FU#i4CdXdF8J% zdB?h~%!{8d}(-uyB`SG8Jnq+kTT)YZvlna5muc({+FV?XZX5x_9FPq$z#-CbZ(bhy_l zFa?x@d9dFZ#afdSi8vE2Dm9}4tF&aN^ZMD}zfi%Cs)~^Gx&o*di^nu1NKS zpn^_Je0(D@#s;Sw*~}zjz|g|xOc2d7lZ0T%;uEF|F={@NtcKXT@%D&ROEX4!TQFg= z-0hzt7-)0^EH*4<5((^)!{#v|3A#wUqNQ=*8F9P{SjmXnqa;u-;33?XK*8ktxY8}d z{fL?qK{xbP!{W>JiJ!_fJP!v5=+D=P#&aGTZz+(mhekR?#0e2AVzir`3Gx*5IKK-SDpXUJ&pklpcxS&;^OA_j4tn20+gV}eXUMBX9 zw_*LsMA`p;jt^*B84gW(uXz@_!^m)g->Br;yCqB2m6~=U&OBw2X@+V_-lNOr@NJu( zyu2PRO^)idE|dk~1gc1NzKJvLXZ{nh-#;h4j)?5jTcH=gU_+o@UK0`w=N!tczcGn} z*ENfl(d^5^3$Em&3a*C&=afC;>$F;|{0w}-j*)tGA_?^BoJ)RX22p(tfzT#4=U#&1 zeNl>6w+P2K?L<*oj|=VI;ZqmRJbWfQ8SE{-^Q`k#UH>5OXEh5?{;4X*WH-V+BTM*pl|g*X>!aG zeqV`^C_>IQhrJMkVL`KM(1MmpjxUE|4d6jF`N3gJb--$cV4gqQ;_%|)>aU5-W-D)| zT)wUck~$!}p)Ha_jzGQ&Ls1j-D7QLuDbn450CpFe0CD9mEb=WZWAEdc2CJ@k>|D$} zE}_0Cb7SE{pyCq(xg=7(*qUAS;&Gz?QdK~~nJ%~z9RetBI zCdmObAY^SUZlzh?%(_+pY|a5dpW0Vd?nav3eKHM zNbZ|qN5>ujzf6GVQQPz01!0@popEZUhni`>c>fDdmgE4f^o-jEuzLC3X7ksnzMQiR z(4G!hC0+m{nB#dd$~hfK6+;QKLx_f62Tww&H`Sm4T%B4}=Jleq8Czu(L4EY*a=}M+}K1k(3_NooU_0-X@E&^-#%U6 zJ8(#z2Dsjap0O>b5!~4EsPoD>4X|S@*zGTPd@if!!R-diLra&Eb<#3Hfy1r%Fie|K zgc$Z}3+O!^u!b5&MBt!3S8gvvoBA8u182Qn5N`rK#@DZq%@ z9F;INf2SI3;Lw#XL%u@jt@$>7Wj@47O)B-dASl`5vgGlr$g?|Zl`x`_r?6n+ilTFp z%|xb*KrzGWrG^C0%KlVL@Kms=hk4nfP(LKD%}ULS=qnmCmUD=*)Q>$9d*7ela>K>8&ybmftZ72)&3(M%O zt61}wRQ;D<7?cL^wwpq~*s^pU=$+@aHGTpW?{j|Tf|*Cm-Aai5h&Xs>h2Tkm=|h#M zM4?6P&5*%(hK{_bJ1uPxu#6z4ME&TwTQna+%w2HSBKQxioHmNJmnK&eAimu~5jpY0 zbmVtb6|srvVBsx^OfV#ko#;?#p~B6WWFHh4Mz6i@hQM4(?oNemzbHKmn@)q&{fl-##~|y^lkNYq{&*tmkB-;jJ=mTLsrc4qUgZl=xgUMH@~$B9scNF{>urjZZdNUN!o9q6Qx`KD>p z51>-$G*Fm?1vy^Ay%H$7aV9F})8^-2-Ef1q9~-&|Ki}+kEpX*43OFMVtM5@3Z}_pEAvdGKi?0rw_x-qDz;LY=8*vdA@&pAUq{7EI9r~P6(i80&i?RFRi!S)m(xi{vPI8 z768@I0sUnSpNL^&+-$;tp0eGJpm)mPEAN^jqLLQ6D@|Tb&l3RjSO(^7+R%Z?-D7jc z`ZukjZbuegj_SR>B4o$(TV^`Mb=k6C(2Y-btb${xFPHB^&hfy@8}^f_40~-M5NG0J z+&ODP(8H543W<`0cd3>3#T(&1Z7QBXSaQI0^_O)m0A)5jNLyeJLUDo=WHr*Gv86D` zc<=oMU^kUHELSbUNOY*i&xITGSq)|pyFVg-5%}_ehVM$Vsf=lc`fpC2gsVz%7A(vT zODEPfLDo&)B5u6d4!(ITCc^4@L3*0f#p@0x40!|8zUE->K z=bY*-Kqh}YDaO?Y4e=-9)NZfe;5VNzmtBt?1+e%YVll^d;ynyJ(D75 z9P%_c3mWpPkePB=V{>erKg-o-%T@i6eq)q?K9roA+rII;;gF5HAj)qM7=WNTQi8!A zgD#MiaG>5Hzh;6a$J#z;-hnJfHv%BD*Ux#}sZ1=K?*&>X=&Xw$EXy}gd&$6wLmSx& zni6v@?^UMt*z4P$K5lC08^Vv}8El-COz~@Q7PykMRjuZtVA^%ndMEyLT>ba-L*N#u zU;S!>^ZKir%|Cd7cgiav-Mc~RCzLPE1SYg(fQ``ZZ;&QH#xz}0urL!t#d0zN_mOcOYByz>Ghgs5Wrf)a(S-H2t~ zm2Of8%r+gQvBH;2I_Cfqb$;vD7ZHJTyS1aY_2gv3y2QSfL(?q{kKf^6=GlFmG%fRH z#cZ>EmtY10jEYPQUEF>Cd9Hqqy3pNa&732hXfbwBx7Ou`y=1FaId5|JhgPej=u*9M_EPdY6HCJqH zvRHK`$~0-5Ip?R_JG}lSh{$ukQ29JnJKLnqtP}G8mEP5*g4x9naY7K^$+jVu;IH10 z{IE7+@#I1x@X?a7NV>bQeu-Z*0eusHBO9~J!d4=~8mFvq7PvRllrO`?bKqR~bvm8y zXu$Q(gCZRk6W1RlwVVda2atY&vOK?I=G&%eu&Ny$!NhLrJYscKTHmkp@O@qoM9v@= z$WG@D7VrOTf_Pr%dC}dRu=;2EAq^H@*5h$aR=+-9V#KD1EUfzpoB2ibqO-ssc`=AD z-tdo8%#tH+4u9yo`xrX7l2Fn&zm#N1vOa{^0bVg{KU_zETWRuQXOy952y+>l7?jJ} zg{JNIG$i{{h!VOGPLVBHv=S6*$ogRbR+vZUOjdeSW`!NdUI(QWGOTm*)eDbH5+5~e zn5TUj)goVHx2&0li>d3)^_gQDm`djYYC>#4Vc^CU@Jz_DrucBhr`3T^UK}Yp2`hw? z3ESJ9W9n-+hbYrj?#!kOif90V!^^O?03ZFLy(=UCZ$e5@R7gx|Sr`*ifP$;#U~Q*c zvDSWg4b!(GYVmO4R}fs;y0j5zS%&OVU<%QIUM)Rx1E_ami<*zWDul1Mx{@<<*BPMr zCXcxy$V`P%%9Xf_CIgh`ApfvEt{gG%B+U>fTsgu$^$@wrb7M!I>XsLO2wXL;H^&R$ zWQ`Z(!RI|$_CrG*KnaD+*on+B-wKg|2&A#6n*GoGLq% z;Qy(wvJ8}0m4eT;4@O(3{i#$xw=Ln_8)YODWhg%2xtxY(hE^$wxrYL7iiDBnV2P>& zrQL4j5wViJ98d1qr4X)?*M!uYMGCK%mp|_~z6P=&6=lKJ!pSrvE#`G52b#m=EjL$w z=(+Z!r8{6jC|Yu(vf~fl!CKpT1AgQ)$HC2h2jr{BmzUP}Z(V3G6z7P`UoU1H_Ic;z z(9#3Y*7M=zW%uz}x=W5LK!f=zj_t^y6$q$XCyowk>|=16C&O%$!@ci;mm2p)k@Zr4 z;3jv#e;mC2yvqmS6`v!`n;oA6gty@mlz4n(+Ja2t4#!V4ecS?&uoUdJi27{&r9|gK zj{R~Ph?JCj;B-0(9ujn@NA`80#zx5aYS0FK0#&?r85D1T!5!d8sI#&GQDz51sYlJM z&ut=M;halQ5Zr+dQ#WAT_qnj*T#2Sk_^ICrXz#pK*Si52sc6H2;A8+Hiy2(p4tT*5 ztqiu5c76`ng-XS*N+Cy_f_bz7gtmSCO}ozB(7>6%G>YJ0Wts(*G)MRk0s#&FDCuAE zK;J&g!p}4uRjt5W)GCRC`9irFDh;FImt`=?XfYBZ&Z}kW%Q-TLST3 zaEurm+P+V>xX#KNJqfguRLyPAMK6RWAuun3N?Rszf7Kd#S^-+LcK@+x)v4U;m^RZl zm+&!C12TYgs=o37W1fU27lo-I&EYh#Y%91 z;9Z&Yd7#g>ybE?tH>$Q3=i~P96>wK%$P`U^t#qJr3xPEEexJCsblz#YG`X|CJ*WZ2 z8mo}4HxAA*Y6eq?O0lT2JWeG%7+F<63%VlcsXVMvfkjmu1T5fm#b1Ev7zzEs7JGnV zB?eKEc@b#+8W0Yhp}9|eMb+|0wEKM;g9(r>9LRzQg}s(w--y4^+$&$3RrG1en!e&$ z_o9_5rBPa56D!P(eTN49@gNu*LNSti>HaAZ0((&|*Aar5Tc!BkVH~*!K${$!0Bis8 zZYSg_Kdc(FMHNA`jlSAX+39IgD|H7<;T>i(nTw2@775@p5TaUvttNtG#FPI1;l*U9 zx@=s`_px40U=~bc`mAemB9z{+r$qogu|5&4Uzi5}Bb4}FIp1vL)AmOwVa17=q|x2s znhI?m7C$&PRufpaXv6dBvuiStPk1P_3&^ONZ&%zmfhu}Z^SGZF*dx`cK{vK81yWGc zVn-ab-&`LRmlODKDitOUA>B-LGg8ej6HxbE^m48Y)<^{Eo)Vz1I>7!%y?np*22vZA zu#8ByEf03x$YmJ{3S@i+d<223y6M0QWnsda!bsab6yZe;XM%wU+X?Mx2He&r!10<; zPhZbKoPlUc8IZfvF!>@91Q3PcabWK!K*s6-_`YC`Bt8>_g%B7$Km*I-mC(g&cEi`w z0shc@oEt#f)Z)uQ*Q)S=9vA-e#m1o8>)Q78=cG)zYgT=KHI!3zK_U3%C{pd+$Cn7X z1=JRIB_uYAmX}E%SK;vQ&h>vnquwLUyq5${UeQ!A&w9#;CRHEbZu$Eod>t=cvZXxfk<5xCJqt3rzJl{B@wTheHUCSyyArO>h<3|7fbPo0 zEnnn#G!63MEBLO=g5$!@0zWgQ@v+KgRGJY#Wv*!Wx1Q#&!2TB0Ij^BqxTk}*y6Y-_ zxQK6PvMt!F6hB+b;P^j?KtgahgxHKSt^4n;9RO>IfX*-29Q&XvZTPkYuEV$3-Q1iy zDhv1pyDk0S2ziJQ$v1Z+?Y0}yf>u|>?hehzU6Cdur;aW-*mX*PrjU{b0RuvY_*4%3 zrO-n2cQ9)dT5O&amx;jY?M1ZAHj&y?PdXD;f*U5#?=__&D=nseX2utQ%?+Vt3|J^1 zR66JI6$;R&E0dH-&S0i;7|cz|5=9>nc4Eq*5CZXyLR~6i4aC>Hhf>BGRs+}9ry(Ct zM`>;IqK3w7xD?R;e^$5xkn3$nR{~#A_eFz3O56Ec;zdK?Am2gaXSvz=cKsUmV28S- z-S64x-=oHbN32|8a{`V#)33$vSze`4M0a_(;E>sFrG*-jR#!lxqX%AO!%Va4tPwky%KJg71?xlql?*>tV!`WHTtqke@uXJwKnmlodI?&5P=tm@k?)f|H$ z>Xw*~FX>IYWV=UV={>?s!`KgH?H5-&i2O2%a2A%4lwY9d>RP|eKeC%cb2t_q zT=?ijhO@xne&f&r&BjVM`qxpRw~y-97Et4+IIdquq{+vhL_caU-+J*<(V1{Xq4}$lbiY_xfdUOe{m5AE?F( zRd@dMo*24!&OZq3pU@MhCqHk^K8T}B9LpNATtUU%XSV63DqqD4KJrDJPRP%l&oa01US4+a!)<)dHV9SG5Zm(c$L(%`J#s>U zV%%St0c?;0jcT9D;;_nyAU&I|D{#HD&mp~un?^tB4%;ql7qIY7Q#-z`19JW;AHbQk z!H}8~^*w;dA~2XI&;I(p8YJpD&;n)&L1lGFgS;-#Sv+UILDC_J5YSW$Xgj_e%Dyr) z`vkeypbkRKIKUY#5X^A>;SUzxUuT3C-mZeeajPQo@%@Qm_IP~0s`zFnB3XL+y>_9n zOnSOJeF%ex**wG_R+$+G=4D*?HLPBkE1zMSxj<(?j_;ghe2aL|kHzF{-}Z3!_H4-D z^t+6S#TaLzBt;2o+)M60jB(u^w^N`rv0ESAOh-KcaKhdqJ^u?zRnI}LkgHeY*8$z* z3hL5Z5i&*XacSrdU7M6I1t3IpK#SqiMke5BB>N(K)Bpp;QWD@6HITOX>qml&(z>wm z4`Pd;STm+U5p>zV`|*F=(O9oYjui{Di@h2nf;qealZdM)S9%$n#*`Dyl9<7m#H?dZ z89~C7*O(H4ZQpZp73Bu=m!0-3U}EkHvhkl9q#{>Sj6(|c@u|4UPEmHS(thnqTc$lkFI3n@1{Xnbr)!O!>ci`{dkxz z^XQ&{NidomM9}Vy5-LK1Rlr=)cMX2rZ};?vwBfrkDO6UNAWd%gw4fmauVfF6(=;=z z>~D4i>x4vYz1=pm_Q%X|X5pH9&MG#N?;kl0zB`)XX~sG;{=klY+Eehz4i;;dg(%iU zgv`VE5m!{Pl`JTcX&8xt_ClII9;|nl;sIj)>+Os|pUH-bMi6X}>(%kuk393f0+@Bu z4#SqTz&gDTT4+0<^?)*>5FkvW3?S~O4_C4^U zAZiN<5c7gX(m9SiRywcl`aPIiww&SxG;=eL4kneu&M=pN)&;is!xm~2gKh<}tQOm9 zdo?lcMa-B8F^ISaclZM6SuQ#F;9dOZ6b1fqj5o8rpu$?qOYEYVJ*MU0f{grdXAgnH z$1&~#hufKT%@H!2p3@BSPeA&O-7=Z%KLtI-#2}GA7`wL2dc3yl3I-}Q=jj*Gvs@9r9;!Ew_U+keeCn215R3SenERxPKy73c6R>j~OX z4s6EQr1#~fZ3gIh67~!v;jYM%K`6IPozj&`Y4iQ=El3Gvf=^^*35&nW)j~52#-=EA zQlbeKUn{@}a}f&W1PA-{B#Z;LOD*5Qm4^v;g86rYFtyD07`BAK-fu4=@Ic`}dfM8< zA2!2Uzj4X==^Y1nZ4TY#8B;c~r!yV~gE=|xb6s@rLgjaBI?L3kZ;+yK81;5czRcCU zVvL0+MG;1gF>Sxq0#rQ&p!sdpdcivFQ#w-h@$d3mO<3+|=Fj|xYWd!fm~NFvvG9FR z^%O&}8-A4IS`S*288XDVLe+T`+CJy`LOK2(R4HV8N)ZTdB$NIMrwbliLutUau9Aw#fsvZ2`CYtn~M`!!vqxWLX1K&^adNwWAh* zX|C;m8nP)@%+-v<(|UhyFoe;(%zB)h|r1;^rJr6(R-g1q7^?A9&Yyc>($fWXN{&mOq+b>2gD7yeHeVN;aBw3?YFtAHE@${&7NWMir)HV?M!H78C9>&l`0E6lv^6<6d47 zE)Y2etldW?k{%pY%uARf$bsZK1EnYB;6aH?a*EQ>8e9ahebfs!_18-Ln@bgEI72qT zK%!rK0xD-@jYACb6wOOOq}@2~qIQ>&&`d?Aqai)WT4mIyJhTx z4~VxgmHYd9jt#_=D=~E00^k*xFh3E@<a24pOvp{e!JwC z0rc@ahT((I?%@y{f&vuKW5N^2Fg6G+WI&zh!uLL$*bW#G&YxesTiNOg9u!efEj3~M zYsxIx1|#7hV4qVS@*Utx`TY?V%>-b^J0KZqg2#1&Eo4l`UPZCzitOs`f7b>L2T!1T z0sOAqr2s10Ofl&*b z&5C8MZO&8te)cE01N2}_-$OwwXF;M6MQ~BRlCF77-#!?WxfCAE$?537W01fg;f>{^ z2}LjwcK9|}XjiCVMSj7YP$-h_4#JlA0fRF?V)R($*P+8F+*^uWBeRUFpl6G1R9 zu^3!0>t4|8Q{mK0Q!tJr=(yNANrF_5IQgrW&xnsq0v0 zTR&j{3mw?iuC&wkX*gbTc)<^d1YbXSXfVxj!*|QtLGpEh*$I5_&m8>;5SQOa5_I8S zq4+#HGD9)zkLg|kErDF$8&+yH@W#&0nR*TTv_0-{=4D<=`MMz)<`Tb!l!lrId+(Xp zHoZ$Ko%!*T!-&ElT(ntohElx zpmAdzU;$bU9N`8v)^sp5{3Lj?O~WO2{`j)~*G?GE^%n9lm{l}njK|IQB|{tG4-XizV5i z`REVg=*UjErK!+Ehe9G!cjv3vtdyIW@@b`E3efqAjD0a1K(ZNO6)AmXF#SJnV(y64grVW)N{Dis?O z(@}01?W#HVVl50x`tqV(u2H$K4W7&UNHHWmUVrkXL;EORPW+RJb7uRUj}nX!X*Gyc zACw?cKA=+ynV72roxKdNCX4Lg9i20LSyu2g*n38CDpudX$Kf-I3lL;_*f*|`Y?aP6 zfqB2hpyp=ki$(l2ZMDvw$4O!-3S8LLg29ul1VM)hs>6?U4(VKQJ(QUTq$GxzB4Ww$p#+{l?1*v z&M_(Um@;kU68o zmKhj(pr@2N)xYa{(u6aEYhMS$nt~}%;d|U|a>m`FQ$WD)Jp?=ST(U-|X<=@M6yD-X z*oTYEi>Rqe=6NpO{RlAS!)wbhn|kU^8aF6N=>%amVgm$^MU^e@g|_yjA|Y=~v2DN? zZf+g)D5NMGP-D**GQ!*;quy!*jW4-*yDRFV5KiUL9dn>nWJMD+ct71DLl-a<>l{4~ z>Oe#XNM(8WMk(_6FR^pj)KXyIS0=tt(~98bnj8mRV&oRa;T5G0c+P5gU^l|tV2nr2 z9ccYZh~iDL1TBK(8($rpg+|*!AhjRH5uybsScXb=?rzLo>^>nlyF7kq*jhXq-@Uvs zKrrnMSn*yuv%N7QR|;SXQ~{^jX*=q`H=;g}B-3=rie?@ygq~fsVsBQuiujv<6MFuS zzxgblA>R@Xk~_4{Y}bo1NFTS{>JQz#?w75(_R^-zrR|3r0qkqjEXb<1LAM9hrN|un zG*|3V^A82#OdhLc?wP|%a}X`Q0PWuHkR6}Vg{1V^kyGSiJtqe13?WTSYC*%sMUC6= z=akvu-GaQxR;hTm5A}Tc@fouI8Qx7MC?5+0>}lNlVm+dq;k`OpGGqp}Wl(I*$#U5K zk&Vk`KAC&idmCxR1I%IUQjb@|UX>(zF<7hMXo%iO2sV0)A{tmn?SeY^Q2qPnF5nQe zzGA9zG-^bDK7ONFZfHPr(><)CfX8f@q0sjXD1hIGs~o-g_4z72X{PrM7?2?&woEJ6 zDO*IqIiA?7BYq*HAkRVoHsteK;jXa!-Eq>dlc|ZHm8_dM4NX%yv;Jp&T70_&O1aV8zvsOV3-%e(JuLd2tUFPc)L+dMrKI_spHC zi8g+^16Je*h=ERbadnK;Ky?SVR!5ymCMMLR%+1Lqd!=ZCIk;}B<`Xog4u!GOTAY$> z<4-j**JzBBaT3t*a*5d|Ublp}aZzZC7`d7_ZX5PaxmbxERTdz>V_%X|HIC7rc3V@6 z5zsvgO;yLjg)$9f`_w<|qdlIFb8{$TW!ZOUT7SOP_!0BTLMu8pm~~RTI)k!BnDijy zEyY>Hi9v-yYh&~QzWX*`ZcJC8&%gDgHmR*U`!gv*u62I1 zCd&e|2WQZy#^A2Nko?GuUyB=*b5~5snVe^Q&u~%d@mCY(J{C)MICIAD{K0%Dj%!PI z_EIhOFh!q#XG1fCbt4FzPYYVbJeM#sMRUz#Ct>bEVxYz#)}bHH0xyExa#oNwl3RNn z#u$vOx%)p+$mwgwYZF=JZsn|zT;l;WOL*#i0Mj1Z>S-sd=Sr34RPdN-csW99pcAj9 z)SRH@#9){P?F=_w<7-#q^WX0(MoR|+j1jgw}kCH$*ne=q_)ehz)C#c#w@ag`S(Yn0h!Y-8Qixh%e~L?CVYqVka+Q7|8ClV!;ve0a7!oZ~pOm1EZQlYBJd(OwHIXFiJ> z1nGrOB?K@d1c-Y!751bAG^>qaWq(fXXtT%`RVF^>9Gn+EPK(KvX)VkTsE8ON zjN+{o>GRWd2ZW4LT0iFoJY+`k8Ym(u^%vE=ngdzeGS#Tu<6?VKbFYS-7oa_jh!9B* zeZbh6Z%EvEH2`;L~12r_Q! zVpa<#^-5Ul8jY!nFa}AN$_&fI>Qp|MFT02$OY@-eo)#>Ix+%dS;zKIkRWV)DF)v$O z5jV&v1fRRJ29=6+fK)^^A`QPvQe~LTOScOP7tmLD^Q&>KYZ)}`v$)xr9SZ50*;{P5 zj;fy|Wq-GJh)^VGV_`*tg*a02LW4dqXPhr~QRlS~rK%vd%Kj>Q$Xm!|dxZz>z1Xx8 zSG-WYKQKvaf~z4`ydc$%IgsrV-JbZ`f783{cOa=c>0cDnU@Bt`?wEKwOtmE8Hky|4 z-)LZ!l8IfG24xbN6pFj3)=eU@+s^n_ct2oXNQg9T0 zRyLz{3X`riFwI#GGlNbKRU^TM z>@M-v(J~CXXJJ?qDIGKB!ixDRW=@K7bjVyh9hMAEmw(I1rP>}(a|MZ81;Yd}D`b}# zq(5ewWN@?8gai$8Y0_M0(Np)Skp+;^aw~SJofCLV-6N*+d`gLAb!^$*{x-rQ;Itn*Z z@@geAKW#S{H(A6F>68ge%jVwFS><|np3<{DpNVnXmThH=Q94Y7vRbgtqRoVK8f&Rx z{f{i`5%b7x;kns5d%KAiFBRYDjI{+2{e}05Duz>hXq(^cBWg*;S^->(j_IeKQB8f$ zQDnE+bsqk_I*%vtJ9Ip`X*YoyTQQ|TDj~!gbMv)4&ed8pR<-?+7FyKkro~WhsYp}SYVeA6=i({F79O4BN*+JalDn($T1sc0RzUi+p zPpdzSMFcgzHZh_` zd4kA1o{?p#rL@R#XVxdp-&dgi;2+ukCD0=(RvfqR*sLSfsqqH3c-e=2{CNS>nL&KT zPpW-O4aS?wcd&XD11QXiU5OXqB%>eE%yhxgG>()hTYM2yCpCK}tdMm=JkAb}X-X&l zzvjODAF40@-;BXvFcOW7ea%v1d5i2@RNj$f2^qVJ$S&I$gR!rLvP3EgjS|_7E!nkL zixDBatXc9o)4RUk$LBxz^iy-4d(S!d-gE9b&)4($d~NHzVYbRmW{F&3fd@(OkNZ!D zudo@y1p(PPih~Ssbv!E`UH=A=piVJOFr<)GJH2T_C>y z=ef)4Z4mPiz4iIcE{ELY3YOhcZd4H%+W}upGzcKaEPkmq*MHh{jE06M=$x*WDR4NF z0u?5hY3S6d2JiHzEDBGx_kE$Zy>>M1^`Ewf!@+37&U#9PgAJ7SYDA1XAozR(tk?`- zAb$buzC?lc{pF2QKrr48WbuI9I2bu$o;Uj#6E_>TDlWTI0EpjAhJIb70;{CYE0 zu?_r2zJZ(>HLg@@3)St5s>ImO1cC4yAs}Sr^m8(re`Cd>kO8pg0fWm4z)n;qw(1pW zYj0pTPR%}WjrR6$lPZw+={hj9p7{jyrN9in!W>Tqy9Y|o%EF%6#Gm2YPX2;_z7cWJ2r zfS}|sBUDkVKo~UpNt1tyGq|0$Tm@I7_Ppm1*x}?5B6HFyE4hZA{Vy;@APdPq0nw1S zC!q8~6YrOXnd{1ntvOC>fA%FgX6v-#helh62Vs+3Dj}T&69Ij>a}R!m)qb^rQ7r=i zb+80V2GmNQU^kG7k80}%Sm_RD@Brpm5aOJ=02YwbAnJK0LjrJY;~D9xc3&WHag$fx zu>)D0h-9MbovG*{phm*mp9A&Rg3xQ@d~K@Y{W~z8k^AAO`i~r_s`TVTkmkn*SR&j# zVa=a+aDgvxT>OVezct1!Htlh8YnPd+h*|O`eM1*8b`k*jHjK-$fbI!ha|WwFs+WGf zGYFXfp!yo)4*|q_Cb~I$3fMZPW?Z{-j_TKoZWi;z_YR&{ zF#7m>FrcBp8hIEaPWmOSRsl35+A0!!Pri%HRZWC`cn2M}t77zh8-gJ#w zr6d@ym$opMTd}r48|LwH^0f$sYSbBw&tXe|h6gV6cGmg2G}piCSU*uXa?`RTr%S*duWIEfPkD| zpz4P&N4236Y-Rzd@Lij8im^QljMcf5} zjNh8i{`T1GYb*GKVw+4gE;c=pbNQh(2=ke9kt}dGX@A%>1XrJol^xeg27c_(hM5>G z;F&5#b%{vq0aVnF!K;UJg?m;mUG=t4V^l*?w-gz>Uf!h*a{m$9 zmAQKoWSc5*5AVSPCCU6sUdUD!Red>o4D1Cd_|X0`8x_kpp7}4^R9VUxcgXbgz`x8# zvqY2U2@sLW{A0{l!8XH9YHoUsgbRq}D0`pnA_34p$G=$;c1r|A;I}U0A)|08U5U=E zi3XWr3++Hm5_yJwl)r#slu;WwirCh~L>NR>3+|#z1dVMi7XjgDQy#vfD*tsa*pP*+N<{w z8eE-^E(ZyBpVV3#61`StLe5*6Xtce10JhuTX`;@-_aU+d_CLa5kVz1EP6M+wW7%5~ z7RCb+|K-ocTMm=_igq|czb6P`@UF_*;j1P5u3{KtL6(Ua0ckC>A% zKnIc;nffys=LA007E$-O9~fc>?|+aB^4-B}m47}_c;(YX=kW(TdrMhC&9H9H^)uE_-GO&@05o`L_+J_TNm8iZM-Yjt~!ynGCj z%85(Px7;WNouGaxZ7jaN0<+fz5H|UEg{INL0Tu{6;V*(2@2NL0u?O?wZ7)@#STZHY zRcWzGmZJa^PBQorpM{eBIKGcG+{?2d050x>h$Hi7854urirD>@=vO_w8K{`RX$Is>MS3~S^kRC z#29Rx4?1W1EX`o?p#>}oi_?=E2@*$zAK7_$Hy+QdK8d{UYAM%w`aFw(iRV)P?+$BY&XB>sm@qs zK0;J77yX>d9JA{zFkhK#XPuGtd=B!ru~k?j&-L>GDMo)N6NKR>_|jbXB7yO5uSc5n zn;Y|2oEYUfz3Mz3W5hvsI~dE9zTyf;Vaq z>CWQkS)H~)bb9ENx=2we=}Z$g@sxV-+Hu<%fan)++>qu3{9v`imyq79htwoVda6Du_>I<7lI(w~zaffWUCLb!eiME#4 z`01Lmn4W~Z1@;&%NA}zkl{k%LhCRHWd-H{J^N3{FDMhAV9Y{pe@tJhYR4WWd`@CF* z#*Q|gJ(H262E<{$AAmWch+mA!Rd^$x^bUy#-Cxtsd6+MQn9$$gUxZ?h(EDW)n}U(< zgcx3WFGMBy5ScBzPOJh*=?7^YZ$k$cLiG(ZezO}7CZeFqV8eUz|;PxkNce32}m(SJ6XMTC$p zN*6F0De~5FH6b{{t2SEx3%ZwW_NXQ?d%3Jw{YtU-N_Eu@=bIIxRZKJwX?FiFg82UL2zVXcd;VZva z3~&Y*{em*9Ju?!r+7NofoF1k=+dsu0T=?&$>xq(9#PxT9K8;(SB8?o)kVeDWN`(vP zd)SB9He*t!E*uQ_58Uo+V%+|!>r2+PU0Vk<59gIernlZDSW10@p#FV{=9%@=JO+wF zvpG6OZ)EwE25iVQD^oK}T;!>pod7fQs&uR;STb7r5lfgAqZ=9ZS250aQ?yI$8|TDl zdEdTr>Iz)Bbq;&_zHKZ0b~(rJ^+n`CxbLUFP<#C&QABMywqe3AQR9yLDRh&88gfD- z#i2yLFEY{(b{lj3s`$g<^kW)lRYqE_!0A6;g+&?jI<}s8>mt`Y z{n$AQ0s$Z{U0fPgcFD(P3T{#@B_!i(@`WpKIuTc6bp*w0@Mz_}IuK-{>N#HO(u|T^ zY!$RyTh`xyQYsnc(9*nCu#GmE{*<5^pXGB52q2XArRr@pxOG>jKXMQ#4e?kCO);># z?1nvPFu_AwW`PVwzUBPEvVN1$+4!@%Jprk`=V*j525XLnSsw8he0}I>9a=K+jr;`n zlf|}`1Ru@zqUu&6dyJ4e<|-<924C$q{wAY;O5S^ZdfJgoRtVj#!XDJU^DXN@Qvgp6 zjbFbK&;@Q3TKT5x7{uIrkn#JVY1?s+gKwTwm)PfOoDZB_3^v8BhbooXZMR=C!h5pa zdaGPDdMiuWU2cCK=0%^oD8ky#Ip|SIfU7^PXvo2>8ySjWx(LQbbjLsUi6wt&;!CwR zvc`&k;--g+1R&m0_$@sbkA(;leH!WVVYCOrG$hes9nxy=Zs)scpD8;_Sb!nF5_uFS zF7CtWRDyljw`&@0{nhdNPj!dO0bmWUO0M0h=S09YVRt*laCEitIURhN;;5o7Mb@@0 zTI-XeNi7SiEWxGj4!tw>44qhZT_My)jve;%2nx#QhK=NXJ0lWGvKe_fBl`gFj=hEA z(1yt3NLckZ8SZ3O=UKo&`LZu(PUJnIh2@3`p`;^Bmbv4bBw@4(R|WKbrf_kh^DSLD z<9PLZL(5`|$2^{@25q@L83*i_L@wLbZsaPr&}4G)htT#Kb6E2PjJ9zsu05-)eje?` zD$E{XU+Ui0nC;Qdpnv)N@%LuP(k+SR(fod^3Qks12dx~%u|}W$dWK1~A<}%pT;VPj z*B!%_3+!0^uitXFt1xg%cY0{ueveR&58p1#Y)R{AY0~l0WnoniuSW+52#no|f=FXu zG~w7E@zHDAKHF6?il_6Yy)6_n5M7CZ#zO=hMr0U?xdxZLX`ac;EzvFI+RK~~8EdkZ zAmc8z!%c5AKI@2d=h}txN70sxi^36F;5|WBc&&)BzbV3E>&3FYgO%f6HtQ}cM-y;g zuOuqC*wWLCiZpwn=Krem9)iFa>+9Ml)H zc;-cSOxHr)V9;dWY*)>XFTXTZS*K%-mx3~mfHX+0h>-2t!(+Ron23hc)lUkS*5JAr zRFX}uDnmFJNF77=aSHB&np@~HKMih|F?B^&<;PX^GodGJV6@CCPmS|G#&yRI#{QBb zLbWhL(jN`lp|6h}O3G~l?fjLa-y_ZeB(3g&DBVuISj*y(Dr=1d+#Y{*J&ihO#DE5s( zvi}?tJ^K2MS|MRvhlqpYr6?`li%wQC92M-vH5-h61UR9)slvtAlB_uvU+Tw-av5N< zPi%>+5anqd#^^5D$&ep%#+=6#^%b*Kc4Ckw_$DM676>h=?OR8}W$_R`QmFY%#?rNH z=fwJdEY37;^7VPcM%bY2q1G*GWw(Wt?*?$&p697@u64c;VF*opxfF_4W>sV@FD2*R z;l6&iyy;oxX1B{5?$fSD8f)2&A-Cdat)d=9zqlQFQ{DUD8BoFU)P?Kot=GSX_tm(d zC`>6XiTRao9z{oevu=*-2;>`KR(oG=IM%$A?&tda@v(-I;cDc+t*j20H|Z3ky7S7p z>3yTLg0|9Sx678yPcR)tTEfzayz333;q~M}M|~7OYfX87Vu>kt#8Q<=1wMQWPWOga z|AG7r-#%Q;&BT%|w5q2S?cmQe<2l|V3cVM|9K=oQc0>qT=hT0PYjBi($KC^(3+_pl zd$qp=lpPTzbZ;+18_S7@aHe}*i>6q9R-W&rnd4$6Mb>xZ*I&eF65nrd>f)q#FIM{F zc=c^WaIQE3PHkL0ab8@+_%rvoZp5v%>=xgO5B~p(8^WPq$q2D z->b>##LRIZ9AyPK*uA6R&340};Rl|q?pLpHKd>`)K~)@hqLN%0g`dXOhqWRv1v!lH z-GS?CFroF*<0IlZio>dW*wozUXag=UCU8JAa$WbW$TXERAM143^zK=&JfM~1cYLN4 zKz!4h+}{VMe||sa*{mvf@Q$I2Oi&#<+C&b9d+@j}Cke04lBz}8NVQ460WZtIW zMuRtbEn3v2E|$Wa(y=!dMHaJX{%F_fK4G_feD?DWsfQ>LkmKIWwebrbSQ(Args?#q zL%Es5CDwZ_FV7rP?cwbsCm|isP90{>grJ1zy%;q)uA>gZ*st>Tc;zQH3?|3DxwCTZ zhPW4YJ6)$*mOo5%Vgxc>%_~n*CsgSOO<07Jai>}YJ4zU3T$hqoL{g9$@MR{oU1GO8 zTM45nj3Fwwa-G+ePxDI(2d(`vDW{$I4JVFM+}vQybEejH%ATdRD_YGNTD zjQ-(G78qApqFspIC3XnDaO?c~W$E2)*=Tl4&l6>@^(W*_uS!ZjP3tVV$n;z<<3oGa zHo~P2?H6{Tywz38GtG=kSYc}>4AUTru=df3Cj2b1`^ zkL@;xPvO3D>-jBsol~HNL=SD7s;5|#vRSn4@kU8czlW*PjLo?1OK`oJyqa#QDFUhf zQH3_t%>7(Z>5<&$GK`y9%7p1L54MqAIzisg$HC;~)?-{K%#2i0Q+tpdYEZMMm32`( zE#kY+*9qCjFMsYfXjca%glJ-rcFyL+E&sfQpeSR3=QOrclYzEFs)eVPYVToQF7C0_ z+t$bBTKa^HH1sU1sN_g-b(a}wq-rY_bzb?RI-}MXOXtCKG{-;w$^i3I9BcmOPU)?M%K3}?7z0q=p zHXLg#zR?xnS!xvXt@T{WteYR_)+ob}f5MKI`;3nl{FvU(>&uefH+?)!$Na_`U-w;~ zk;u?`-#|NSYQve2QWKa^+v#aMJfj>UfB0O1E$rvQJAIA4PuutHh_Cnd7rxf@DesK8 zDfQGo-Q#N?($*)qmM^GJ{IW@5_I?b@SHx@On4gM1tov%Y9!>f9b#eW^ckYV_#KAc4 zhlaj;A=`0@*K-^1fC2pGj>G-aHP*1zaGBJ>AniKoaJ@EpU7N4?kK0#*IR*+8z185b zZ~;$CYFTN=%PWHd{c0PYS&4VCn3euIr}+*wEdfx%T?Z^TD-#TMUopc?6-SegF3a{U zLb<*>AmPxxXzG3B0XxnqVAk7b(i5F6VI<+%i_>v7!E1s7=5$~*m^+EA^ZmY-01`Gx zooqKL*=E204vxB^K@lE)ybrZ9PypX_Q4mSSK8=#fVS)a47KRTLnJVu}`+Jcsc5q-% zX+Ch4dV(f$_yTy57Qc~ZA4gDY1;ygef>yiE{lNbB6BhnO8yvWmV19>(dIBhe{Qx>X zya6vf`0pBU=uK*o`?!npf2%2jgM~v`hvv*be;X@JEh0ql`hDwuSMAD9#3ujz{a@-u zjgUkvNt`xk2=S-wBq7kW5_pFx$KQI#z!{_^;q+g0zq<=sf08;3h5`yt|E>5WOSmua zKQ6P`Tb!G7y>tCkYV6;R!NOk)tO7{n9WZJ;+y_a}G=Rc?HsFqzmzN&^tzRqTp8eZc zSoo$OiIq%gi#wKwMx*!b4(a%KK=Cfbd{v04G??U_WvdG<4<>~&yNkMq@uF71?Z4r zObUOy1R8~k&@5neFz)()_xJxd6kM@PiF&hlXl6J6*WtfE;AHc8V9_d=SaO}eJ(mPQ>-v$R>8rfHJah0$T$ zM_dq5R7PanM@JEG!vz;yaYPiwWl)Au26b@34Tbmo+6#9PndS4opZA{zZht2y=R7&* z`#k5I=XuVP^XPXTy(e`{slXRfYa7%|OBY%5lCz2xbe0Af` zQ|s3;>m^caDNuI#_t78KJ~|9Te-{jUlbwn->gzkzkqak^52HH~ICb$QiLa=(9&YdJ z1Jm1m#(#bMQ)g^9wcTfQ@hVdFX?Q~GZw^mf%JMde8%=gk&I=usw*fE)Tp_1(ICTA{*JVuAnCu(5&p*ixluhUifqiMMNj5F3>^!T;xn5I7d z9)B7*i??VhtUJ1^d*K9th{8`)55V(y#V17muT(1X?%>HQ4q($A{Iz#ht506pb)T^2 zBoy_1fIeY+C#{*U!=gSJO#@X2#m{`A9!x?u0!TiY$Q?+CjR1^KNXm4a=_Cx4a1=Z- z!Tky>Wci8Ow}BoeYyMOW4&l- zhK{}v&c|hDA*GZP9+varAmolT*+S7>;Qw?QLdabRaxwfDW%ujY;@L?~{k^X<{d~{k*E8 z0^qc0Q3u=4o97L`)oVtBaF`dal_^yL~AiPUvKE|;1=e`n5^mXyyea2(*zrWb* z<37M{ef;i&?8tUd?815m+eU&wu;=X!&GoP&9 zLcsUHISa-ps?oINk~pC9)PSX&0o|o22F^C9NxL|Vv|?_q6mv1pRHs#n!WybY*X@i3c@i{fK$VsRsUB5rP*ks5o2x9^5*=^@u4cQ#@La7aBK+e1fFGd` z8nI5AqL3NNw`o8QTzd(wC;v~E*vsT++IrQ_qW*kAP%d`ny%=Ql)?1dJXc;kH6fzct z+C4X1-VYWnZ>!Ew?yk3IxM);cM)%&E@#8IQ_Z}-`FNw%SbSkeW7v0at;ND)`Pu7Xj zyykVB#XYj#61u3lj-!F(vdg)}o9#Z=36=FEynJ)N+Zn2$P@Y<^MA=%gV<{k!XV!12 zs_KZ%#`<$+{rR9OqhfPGaR1ESL4^PjY|(vCt)I!`D$oxUt}!SVt3J=Lc)c|kG>e@~ zzt}-E8f`2H0`3MqsbuqPE$U18Vc!G&E{cwt1`@aTV!Z{o2kV@#Su9?&dSTVXlEtgV;Ri64TODhC-$q|f}=cv zM1sdl;a)HIIKe&viZT`o@)-L7qIuH=j0&+fsAnk$5T%>z+!Kc2C!FMLS*lr&69jpn z0vOBf>OkQ{L}f-%C8K4u8Scy7>a>?$aDGwj$;vJWK4FD@RsTn+#|Fp#Eu1dKF4Ifu ztzNp);-}SSo|U`uQ0pcLUsy2C>Zg-PllXy}$q#$!_Mp}KlaxDPYFKulI7on zwFIor^PcDOW(ga<1RJQ||(;jrz z$8Fk~$}=!}(zvZiqTf$r1Eq@%76ktr=|8aLi@SV!y@h)pw0r@h3I%csMyIeK2&QBv z3ub|QzFMKEn(8eiq_CKYmzDBWy3?b8BvMpCHSjd*^rU^m63q0EU;8s45+S}?+*$nCB^wcT&z$spLYkvA*I;)Nu!8UDx04*IFZuqJl5cR%HZ?RfM=*d zOcy9#qs$&%iFs_G3KD&r3ju026ciWRR)QL0q83OH5x@A z0W427!aOr!o0Qne7ptt=5TQIDv+kfd%2#@9LbX~8g84cNSkQv<4Wz(P?I7P86`GAs zFW*%QTHGz>I~Y!Bp68u` zIbIN?D;SRqERPl@C6=-sx-}}bV#lT>yJW?#P4`EodF(l~F)6c+kw;rdxkLG5dc>Ap zYCNSU?Q(_+=5#!%EC}+02~MMIMQKba8Xr_9RK(K|TY(yJlc7h|Mx3Y&*REQFna5PA zYPvC>GYzCx8lV)@8Pzl@U1t=#7RMQVL69*9bvDMPjBVFt3WphgQV$2&CNsqZxDVJrdjh>i?*wUmi6&Z;Y+TwyBn}Gk?q}Ue$>y(i=6S)yP zP@AP;sld98<}hEj*h#gCB~^x!S03$@$%VZ$r1c3T-Xmt8KDk@kGp ziVGs&spUJ}f&^x$q?;?~R2RsaTd?hJKd%UdfbXKiK2b;zsUd1k0kfrGXfzA3+AAhQ zs#t9I7A-V~W>l=Td(hAx6x(fCNnEkmSLKX2VvFWTX^VcQ=wnJE29sja=+~3bDq^Gl zWEd$N#jE9E(&V^_I!fl%1wl?|Yupg!IEm6+F~e|rtQUqj#knKh$rBJawGB2QAueqj zP9B0eXf!A^^Lf4)4_umId4U>obb;rk_HaRP)a2^CF)>TCQimV$W-zO0d_1!1bj{`Q zh83GaT*{Mnt0|JD^2AP??NX^Va1^>Ll~mQq&3ax5%;&?Il35TeO$T0))=H(!$Q#cL zwM-!+VKz+4WqyRxcCp+V_{z-H${IDM%n@342V-{@2<4a$YG&w{aU?*RaiPN4lLf)P zIWsFYHDqWsUy*v@c$OJfhP`QvCg@6_PO~OySI|*pH({;&qb$J!jLr6Zq?cdI-E;%%|Oz%xjd_l zu{67nIheVCxNifcz^i}tIkJtJC;QmtvHTP-cAm)iKW6=(v* z=9YF!3$F>84&}AELSu@u#daqzx+v$itwPf%RJe9hYe{H*+=k_L!)XegTG%NUI%r2N zbs1DvJ7cGtLp8R8m!w4tvBDtga%gXc+DfD zscCv4Q<8?h-Y*b2Db5+a4m9skok1r}FXzLakPKVtqS-S#X0uyXdr{tMr0Y@-3hf3k z&g8n|2;FW|)-_k?DouGh^n|Y7k|Ad#q|L5UPy9yORTQJJB-kvD4JFcoR(k3waBk8` z&!T=~81{;3ygw{Wm2{TvCwQb6VNs>Sc<3<$wdE&{#Ko!|&U~+$S5b66vd2Jv`h2DG{k9LEnCA_X*kuZ310TxlCEon=#3pc7PGxfL>qL097yw_(Jtgh z5|%gI60{%~d!%JxB8>O)+@Ngc=}{pVsA;;&mz#sA$#k$fF$9atfNxpDwq6)xz538A z7g0>j4snRj^F!aP5~V8U$_s*~L6l`aD4TP-;*28FD%PvB5t3N4S%Wd$wI+m68$r5F zDUH6*bnO--j!V2~d*ddC*J^fFY4fFK*%6wZ1wjdyNU^`RC6Ru%DdZ}71ZlOBP%NR? zNK4Hj=VsCnte2s@hm8drgH(g)z<~#43^EVTdT`UrO(VPlhjX}Zz%UG9>lUWnPnR63 z*jnn{2HdCk#!|=54;nzNj*#;Gc8kOmIR9;t@87f}?*I+12 zU@5ZB z>&p1Sg&PzNdJ7;?dAc#GfKoJg0Z`f-G{^&p02*K#TM`^pMc@Z0@FJfyI&6_mv!>c( z+f~0f%qQKXVG8+n$PKHBY?8D%8$_(rn2RlYT%PqZ0$po2Je#P`YeQ{0AIe;dL8d%k z8dXd5a3Bv$0J?D#>4ddTIUme0SYXk53z4OD2SFE2G|91F7h*13YAzJiV@Bu!Xi*8}b7=8OZEx)aT+tl@qaMjV^Rb zrW&#H5+gE$q0(&6x@HedB>9Z6^xM?*5<5t=wjB=^T`pO2p=fHhR%mC)aeW@o20j7_ z^I@rJht*aa7eq$`wyNKk3aBojA~4Ohk<#^p{?e>Kr~UL}82r`lGfoLCvA8L_{#5m3 z!m!76+gB8CTz1D}VipEtD1wzGm#G+t2}QH(a>kHUu8obe1*tc7JXe^PMj2<`EQMXv z#JlOqNDn;C?ZL8GC}yjAT$B`}HJ`Uwo}P89_=2FxPa=0dkKmZXOOTT5xONCvOiK*w z_TmXm9t zG#+`Sad%qs9D$@_yxz}ngCRroOkv8Ht|(MT^>Vjc6lLC1>Y|2f%}&qYtqM(PX0jkC z`*;to_sK-4OiZ|w>)KI8Ryva`=3*w#_0;*iVGtHm>yPzWO>m2(BIva!A-hm<wHsF(gJBtYtGj zOly>~s+lXyThqn@@4Y_@f&l*g^SYkKik>zYm+5&m8BzIZv#I85C8|`*;|cD;z#c2t zbHS)NM$5w>-RKkysEi0i8p_rd1er0EE(?qa8HDH83h6=*n?#TUcjG!t#tLpsx>1Et z`p&ddu}e9cHZ3CWx--Qq353s10W-UH(t~}Iaux)kcD(_140Sm)5PX38##|g2*`QLe zA*VQwFxl>fk~7Q!kLI*5_EZlQIgHGVB&Sazy~94BVob9Os2vy4YjPqz_hJIFw;G3zQ}^m~;;K!Sk?S&2eqfLR~iM+Ps&{ zxExE=_*|eQOt#YG^(x|%&3Vw^pf)2k^rS{t>ugg%=NW;Z2&*IO<&H*?=v zs4GwU>G>oQ+J&VY;#SCeE*)`1$KiW%SE-e(PPd*;I&*S@B08eA61qJp6|3tUjtnVV z#IRnc;Y`WNT(6gdbO+afFO1Q<_M*!{L+VV4S={24sx?-|u`p4sI%U($6c}bGg*sa&Zw(XS~1GzYyHshJh$4g5}Zq}%3D9vOb!dy3v z0-*`BP;&qUaaqn6TS!hMScXsxq2|^Ya~L{adx~j-PT08-J{e$>de!B0Ey%@7i#{A{ zc4&>++-NeB=y=-c>Q%H8*N2=`)0~84tzye960u+ zxDwb^vK}f!W~t6~3JIsqOVwgVsL6COkTP>rqJam6$J<K2HpAofnnT67p zqPAGrzF)Flzd znI@fd`H6m_qA(XNTblRFb6D1kr4gKytmR07W4c7)Gef}y%gf}m(xf^d)K10$W?Yia zRw7=2EHI6Y)s7H2mRv_C%`(}HYZ8^4*88nV2C+(2qf@NUmws#rHQK6Pg37L8P6kf)VAx9)Hi`i0eoJVYVkuUC(hN}QVuDo zY!DIAAh`5^CyP8mN(7yg=e}njG?_~?IA%zkg&kibq`=O&^A^^GY{HHe;3-h(B~$u6 zXKa9g55s|LZ7q?)XG|zfnC1ONuIIAQ^-BwFpWVhy-QO6r~vbacQ zx+c0f&~R`C^pwoGNN3P^KNVdA6q(uH#xNn7aWRX zDnvV(!ZiVx^=?R^l@?6V{r*C*GrCWN0n4PrWIP?0fWH-3L3O)HF__DRSu`uRn3m$z zY^zcgi!5U>+}NnOa%|Fk*3M<;TrG`_`%uRyFU2N++Jk(wTCFm5InC5rhDSKi))pD21t*wgfeI-1(>aUfAvwKhVP)Eyfy&dYJuy7b zosLm<0=$^c+|C9Z(6XskzpAR8%UKH6^IbObo!n3t@Yy_FR-kTZI7q8)1iZzP?9%5@ED~Xw zXo|I*ug zycV^~#&UANU2&##^B87|LbHU|nMxs02V`BeSWRZ!q@J7Nl^N+x@x};P-abb&=BQHi zM^$v%lpTI1bkgZckr!EUw&*g|w#AIr7Mt@)qtK?o@Bo{qGrlk}8+lnz>kXm8D4d_K z#iIhO*-eRpf!xGkRiu+3%6j!MTSL4-oPowmrCrFi5>A>dOjLBYZT+0=;+6V@!x)|ePqW5q(gpzpTn)!@1As1a9qXIdM=FJJm z!wr0j#lRPeI;Ne6(*h5aPJIe=&NSG8gp*D-mI{uq4hX)PflENQ@$r1oa|n##A|7ck zb!;%;^K~MC>MbV?oK8Z(rAI|?rp+i40h$4xA%k+MrXb@kovD?jrYesVZzA(1F*bT$ zC9I{b!93frX+67K=#8)kOb2Vu5MVskRSk@p846c~R@ex$GM~-1yVGLSw;2p)3hFH3 zi~T@sH`)LW63YN}yxjF2)y1>I=rlJ(N7KKwvK)ten3v+nh z!~t{4I#8BMm?VLTcP=U24C?k42x_- z^Cctp3Ck0O110((5PhU67Dbd9VdY6ckNA3$&&LB~Z1vKGr60T39e15!U+g+TK|->6 zzF=9B-Au62fHnixW9q8!;>9^WqA0u!iOs({lVz7Kf&*qZKk0^Hp;D?8Xj`42OpPB8 zsyNrO#i;_;54J&-_#!ShJ$^s|#oTF?s{7Sn%Ap2^90>tc+=y~4Icz41G;-Z$F@rL5 zt{PRNvOk)2vC#}J4}z9qha$iRaT%~zqc8uPrCuDNK!B4cQTCKY?JH-Yav1toi6R=zJ&Ddza8_s1>)78qDgsYjE)h8r zY${TvD5Fum(G`%Teu&Dh;&E1k9Ovp#&nBzb9HWKK%%89W(GV@jgxxtLxgn8uO+^Z5 zsqQxms5$J`<&K&R05YQ8t%Xi0y_{`~(3RY51ufP&u3Qr#E+h3NPN2mRr-|eY_6neV z4xs=yFbpKpG17grtRzEA!>2M=9u%mk70vuCUTG~S2b-|UR4>Vuh+M%N^CIp{DC#@_YfTr0YUE1oo+b_vp@DTgRMGVQ6wZOYNMME5vS4A?b1Ir1qwS>HBYRUBN&u=L zHiyDhKPU=rGh9rtNFV|W9PDd^s-y?L(VYx>62epxI%_aQgCc<`(Sf>kESIY4%pDuN|3KAD;g?688G}SSp zhbBKGn_al5_kmTH=jRsIu1zUPGN$ul5gWL}u3lWK#4;~&B@m8YwsoFQ&lS<8D_jXd zM!@(mO`9~l{w%7D%Y?xhqa2boYlV(fp5QfVq*h`Af_ftqV&_=CqAg?Rm~`J`>(wMD z2!0MM<-)v7CIK)P>3JCVqgu7TSf$Xij*X2}W1F<>yMyyf~uMxgrWT zHF>D_!4_9_B2#vO4$3WL98jDASk{yGniY&vYxz}|MXMc!jev**xHPGRL6eA(Ce}u@ zLMMZHac=}F;$C^;Mse)KGiRFVP3J1q6bi+9Cg#9y@^TAFsOqNDMwCnrndBYO=ZT>; z>*!&f7c9r^cY36O76WBcv}Sl8<1@`XlAYstKN?l0N?jWv6Fb-JHOHMkLM-JFFXy`; zj5ag#3Tex9>4xE}RxK@giOchmnJ#FA8{wwjb|W_I0F57~O=$r8sL&{wLPE%_=418B zFtnGu6|-7{CnY8~&E|y}g?de`3LI8Ik!zPm0dN!WQQyk(ajVW}z-I|jWch8zN=n&w zhOp|uppNIBQ|$89QOhhZS_roZt~mn{GoH(eI6{IUX6k)m29dxwS1-vx?D@{H-=_09 z$GI&pK_baf+0Y$W_ zYObY3m0lMbd+kiSh;_T2wp}j6ZISF^s_q%tE+kLt$OM@p_2N>U6O&?1D`%k0808qY z*C$Y=L}HQ_1{s$j!x~G@q=wX}%n}QwNxd_zYICi~VXQWFvp^)Ev^w;8u3*jQ%Ng6s zVI`F(hhX(@szc18nt0oaDzyv_Jc^KNYK>4U*MdTSuGDg!p*4vIW=<=bAS#rHhj@i! z6`;k2EwFYjqX>u+FyXaqE{#E?Kj>LiC2JdaGN}|>BrKG;aTjSpfszTDq}?VhE?=D$ z#+G5GF|s;FY@BJx8Z=!F7*0eb$D=29Pbvy&hSd9VeP{uFH^`Lxkj&sb z(kIi#Y+V{^-f|16pN^fOQy8~Plm|3eFmStmNpov-GFH$mZV^U8y7t^P*roz}lXX{x zSfJ~;kflg|n80&e#!U~z#N+aEUGBH~x{}eHe7WiLIyq5+LsgqfQq`zAc6~mgIFE;Y zIWdKzFzp&%XOwoO$Ze5PSn)yx8cE%8v!XW2)Oaa26H6?_dQAvBHDL7!uETMz%@9K- z2c|U;_8{UTYhDBX$Ph9(FiO(HX0cQO$~J4Y`BA5@gD7{f{YXz62(lQAbM_2^7t~;Ed<^SDN}8dL#SKhqWKtwjV>Wo;4U7>tj(1=Z6KKx9C99-)h4JX zr|o2p;mrax8m3`jy3cwL%`!1$FI5q_G6H^=P?+IpJu}b-c6TJmW=kJ8!lYv8T^{nv zJYEtJdr-F_4y(5+g`yq~TIet_IIXDndT^!dFtcfeUSQZ{5UesSgE-H6$yTa`CSuwB zERx0hV{w>3g%}NaE*G?X#+yO4WRULJdMBGL%^^iqYIsMR4JxG?m-Ghg5<{O)3;k*o z4veW`a&(p{!PO29Hc5JjTIHN!N9EjEt96mL0R+e0Y!G~?jW6IszF%fi?lR_o0uWDO--cdsF10u zO7zL{$m~2=WkRxrKsV=uEEB3`kUf zI*j4_2HDd+$R#R7gi0tZEyGq-W0(V8-7x1mbt7pLeK`}!9u5y+6&7(+0uDl|K@L5# z40q)nNOpwIEf{QY4je(uwX9qVs}Pb;E~A|-Xr5rmEMTwmy4KCuy|P1X&|+n}C60(*&ESi~@Dk+!zuxvReeN;F!4#k2#11>Pew)!%{Q`-`c>f zsep)c7_=CH_?&^jOH0av3W`Rebs;fmC(dAcrjXHFHaEnEz>g^5fR|L+ff3(_A;AFt zF?LAEkl%qqGHL>EpV0}YLODuUVc0tOuhAj+k93Z;?)$}R{B5C=gZ3>Gs+q6uW9HE#xU z(4q?9fR4ey=h1~*h5%X?Ax02!sFH<(TnAQ30#@)oRF{Yr)Q)Iwn28qzHDC_U%}fs$ z@)%f$5?LD?aM?G2>%5Zd!@$?d45B^~wcx34@Gwv`433D(p)Mpj1IRxI0cl{0nF5ws zI>zBX0wtk?8Tkn8aY?ffG_FQ~H4cO?LQ`1cg>e+X5^z+amIo$36*e*8@DqSVKbHv) zY7;`iU}BfRlwi?9;M+7*q(L%1{&hFWLtE3eVeX5Br|{*3y;eN zX66KB>>*_`>IA^yh`<(EM$)7jhkcA=pd4=W$#O=8Coq{L@U$NV8Kao91-!b{v12fh zA`svUc1f}kuEJekiU==J4JWi~3?z-90+h*iGa*N!J+kj0*=W)d8U|7z3Ug=(aU-}Z z3jv6=YAp?)B%DKNj%)SfIgez9h{52h7-goFaa5S-l{qF#Q{cBwz$Re{9+jaShrmdS zm}Jl_S_b15H#lHop+p-pmvLX69t>inI-R)N?@uD!N_rW?Y}dIAurksfTprbHNrI6I z;}99tjDWtRg9W-0V0|Pv#)FYz*IGjq}?f<581+y$QY zzFz6TMX*{5ONA2%bggYr(`YEAv*nBvzULdL@yT3MVXgGqWDf04|P)P}D?ng4aNCC?cemVM&0Mn#^qH*7t^fdv?nDEkN znk8FCE`Z^5WB2ZTUx_Y10X_I5(f`eN$m`zt7>RtGjq_5-Ogm^UNNQvC}lQ zU545x`xu$tq2zF62c{|Ljy|5%cYp%{FfxZIT0fgzH1WXd`&zk|G&54C>tEpSB@Im3 z{}t)|s{aM)dSHT+5EN&>GSexbk27h61om<+lP@rN6wRR+MdCSx$)LaoK`+1C1zrvZeTf;z;gYI;lHT6uR-e{w=W-Sf5{S3 zS3xKVf$dP#Ty6)}$Iu-lz;A~JPJPH``)L9s9;3cr^(Ue}`ag4J*}5mF|3t3|{wiaZ zWDa`1kHlk~+mAE*eQLf}YB_TPjx(6;_h22q^>56Z`uM=y#~y$~1O8Xvpf65f*rllJ z!nJ3j03lfi7@W%RSl+%S@B0RQSztfe_}>lJzo`D#-w_XW@5zoPSdv^%yhIqg632?`g(`_nwA^nNtKl(Y*pMw_a7uoq|n z9Kzz5{Xhw3+fjVy`gsxul-gT+3C(^dtt`ZQnG4Qw0cXg-j~^oQ7n1+DWd6UI*w?N9 zH>n;(YxRJe2Wck`{q_6@LHhWfS>*>KVy?!*`@%?4`()G0F zdKxIR-ezBOwe|GpK>DT8R0rve6_9>$Wa=?U-wD!ZIaAjJ>EDC&@vg2+QmIWxtmlWi zIs|D1q>l(%A`Q|Pr&5O;G1xEN-!C2N320|3MUVU$u&##T_Vd*9w}UhE$nD@WdPfh# z9RfHTPzf|}a-Zuf-Yk_`_IbTl>ezK`w*!y~G)E9S(mMhCk6%yv@bOYlo>=GYvc#_& z3!bf=_|<)R`|9q?8(o)5kq-fEUcE1`|Hf46=D$y+PP%(v-dX>cN*#T9Ds|J>_v_Dz z>+-VSnS!RS?pz1-@$0`W@Oa6;p4jhwJJ;X$ICk5a<#BM^*FjZB4mf6S`ve@#sBhnK zun_;BEAH2<{n~LMILuE6b}e{S2S}Oe4FJ3Mx+k`q-e2SJ|FPPBZCFn(KBa0R&C zFfj+)*RyG{wOySC(Rvz;iS?h&sl!r7r;bmZoO*id%+z_Q9jSCGo4P1e|%xsduN|pZZYh=F}%rpGkco^_A4W zroNT>Uh1CIeW?dh52k*bdU$PZZR^^RYsamfyms2!*=x{RdW~G$v&ODf*2J~m+H=+h zYu?&)?Rjf|xAuy)*RH*3?QLuCUi;wMN7p{H_NBFNtbKRwM{D=5{c7!x8#Zn@V#Dzp zp0?qf4e$nX1GS;Np|#;z8wMN38|E7>-|(spS8up(!}~VewBa)wzOvz48}8Zgz=nr5 zrZyhF@x+a%Z``qw+(>V%ZuDq)4Mj^wCQu3zPaf~n;zWs$mSzApSl^^ylXSR zS=v0<9B+Q{=BqZpWAlxhKfn1~oA2BF+bvtRoVexOE!i#HmfjX?%X7E9a?4w`d|=C` zw|ryEy;~mIx^?TxTcNGHx7N2RTf?m{-ulL^@7wyRt#@p_Z|m<5IsA~*4oM$UI7B|g zJLE-&Ty@BM4*BFEcO3GQLmoc#m_yG#bl0KvL$yO^hhA~$^@o1!(61kQ-=Pm5w(YQU z58HiM>oDuE7an%?Vedce^M`%^uwNg3_~B<9o;$p8xOw>B9)9)V|8n>j5C75Oe>mdU zBQ7|CKH`~2Opkcg5$`QcAI`Z8|e)h;89{KyDjyr1S zQT$QbQI{Qc%~3ZUb;nV^IQr0|&pw(u`frY&AAQx)Hyr)-qkneHp~swa41J7p%nOdW z=9rHjbJsBsZ98rox~;y=-S*0D@7eaHZ9hGB>#^q?%O0y8d-<_%KlZc7-h14}=?IsTZ6o zp8DKV-+t;>PkrcVPk&nRY5vpR__WVH?PpIv@#&X7-Fo_!Pygi8??3Ih)ApQZo_6JF zpE~V7x1X?`+U{(>di&?LKY047r*o%IPk-C#x1av-8RwqSKI27aeBg{9oO#%pof@?1LI^6oV|-Yo)pMel3&bi&gFBLg)<6+!g~u37V+Y9i=XB;b7k%|++FJ@dF^TY*3QwLXLjD*`CXUkzP5Xh1WV7C zzS29TXZAiKZ#x;+V(d0vYy5Pud+@r! z{liO!R}Ft=(&n4YUs{}Xt@S&*Zokuh)ag1Oa1U{x<9^gT*0a6Ojh;SAM&Ixu|Hb}2 z<6Yy|j~@)m!8;~vlV?pn5*`nfsjp7YoBsXuN70_>n&|g&C%z%smUzimXBW&~Hv7rE zF#o6LZhEfz+*_Y_`tx4+ydOQEdj8v9u=axf3vRva%*!sn?7qJ({_VS7c-RZ=7k=$U z$ctY4qTl>o_wPP-`O_}{+sp6$`{Lif_r*uOIC$}0FS+O?*S>VqOU;+w{<6%=-uSXd zUar0T%dbFQan&pS^h))WU;YR5AFlp~M_*;U>h>#$E8hC*EwA=o{q5IW@|t(OcH3*` zuf6wm)z{s0<@PIIe&ugo|LoU)=_>51Yu|9#8>Vl#=jzJUA9>?hZ+!I||MaHeo4$2T z{+bWG`P4VR?9IP>i}sc~-g@a<-+%4Xu6@O|fBZ-DAHVaq;@fVz?wsqcdi$2QM{oa+ z>pR!q_D|$Lz55-fyyF$``13oxci!_Z@m-&LclO=ye$T1zx#GPW-aCEo{qK9``~LNx znScJs`**zmZ67%C1F!g()W1ysf<$=f~gqiPJxE%`GR~a>XYP`{d=HeDqWEPd)T$ z|I3@wwYSU;g|TzfkzXXK$l!`_vbAfAJGv+V!Q6ewqC8 z&0itDa?@Awuip4I{A)MfPTYRe*Rx;$$iL?P^<&?-_#2r=gJXVh)eleo;dTFZ{=a?T z9^#%`e#HFft3Pi3_%dw`p@V8`D2fiANl^H?xT-B`is<2i!)_Y z+NXgou=eQpQkMhs;(vT?*mrGw{MuU^9McV+f`5-c_@5>o{d8*EAxDCBcH`Q%)P`+q z8@H`J`nk2uK)nI0`GWeo;sUNi4?BFz)&$e~-dZrXepcy=8Y%DHz!m&y6Gw8S&e+= z?Ccv(l3w~n{uD(Fusgrix$4_5x$9jYIk`{2?)B$%ul}j|!sa)>!{Fy`1MgkixMAa_ zO`DH6eCttLw}JD?H*VXs`SdO5v0JIeafc}5Gmd{@I)CP&uMzvfhi(Wze8QB-zU_lA zdToZDoONPEUe{D#_gM^o@H5}I^^U)Ld+YME+kgL*lfKDZ`MEpU8$Vy@ocyEf|6PCm zKiy~iQak5c=RSIO>WGctjoUVEOI?!s)TbZo>+0_cffWKP1Xc*F5Lh9wLSTi!3V{^@ zD+E>utPofsutH#kzzTsC0xJYo2&@oTA+SPVg}@4d6#^>+RtT&RSRt@NV1>X6ffWKP z1Xc*F5Lh9wLSTi!3V{^@D+E>utPofsutH#kzzTsC0xJYo2&@oTA+SPVg}@4d6#^>+ zRtT&RSRt@NV1>X6ffWKP1Xc*F5Lh9wLSTi!3V{^@D+E>utPofsutH#kzzTsC0xJYo z2&@oTA+SPVg}@4d6#^>+RtT&RSRt@NV1>X6ffWKP1Xc*F5Lh9wLSTi!3V{^@D+E>u ztPofsutH#kzzTsC0xJYo2&@oTA+SQ=|7Qet-Fr=ZT4Q?sV9#wouYKf!-#_CY&JLdW zy*H0vb^b|()(z5^gCG7JIsc?DzwyLBL(l%+e_ip&-EY0+ciZm1UGw-@@a=6^fy3m>On_6O&oUtja9Ki~G@+1;Q2@EsF3aNqvE&oK{t^p%ZmcRkApzxi|f z`pO4yx$9lKUvtA<_XgLTfAyb@(!XDO=WYM-=lW&!r@SurhJWAv%kt;Xj=ue=Pt8u- z^~D=@eedJyi+}SI=XUJPBk|66+ZVlVd{pMWU%p(GZ-3nlFSzfc`D;IZ!CiN}{=3iX ze(~SF`i8T&Ie*%DK1rMlq-xXbX%c1!be)I5Yx_Zg8FZ*Zc#4p@?Suy+17k%KyFaGLTAHDImU;O^z z_r2k_&xLMyxq0s+nP;h=<-c{sFLpow=SMuBc=-8`JmUr5JVAYEu=7*Dxa{z+o%)7f z-TuJOo_*PGFS^y!Z~4t1_k8{5aB@F+?Z^XzEALAAD{1wdLRZcK+O39!=f< zTVm^Jf5dWMebZ$Z{4Vu`5=bhf<@&5G9T{LU&~_#wcZQp6DbF zWeE}oKt-|Q0M3*UK?NZSY6yagG9zBj`SafA_C3FQ`gwk5xc>qB`}X&-*Ra=q*Si)m z&27;82!`c|6Bx3ZW6w`kRFTTD_!`d0-aT2CD`G8RgTL!F2s_FA%p^YWce`8w5=+1a z)aFIm!mFdhxw|ROkN{N03%O8n7A4J#(jOnPLjCC3)WMp|1U>VJHzZ)#ooUohMJ9OK z@=q)o)l?fi)hh-=hDDQ%=npI54OTw4D)z0z5+ls!WX+bsg+7p>ud(G(ly_P)svGK= zdKXk-_TJNTOadq=DuATmxiXa6Tlg6AkwOcS^WolR5jZJ|XuFvID4&>|p-|ffrZ+ok z*42(+Pry5k*<)F2U}A;PEo3AB>x@}eBX=BLn-8wRI{H2#O`}vJ~kIdwgo(pK3_ZU^Ib<^kJ3t+ zxa4nVf;)YQ=E$^AzZj}(sa--2DAIxKq~naU7H1f1(ueQ-I_*6QE4w#4Oa_vqfy29S zkTA1_wO+SonV!3>p*pyGO^d=+_NA=`g?H~WOqGF*btgozuyZR)S^e8gRF6Op%cH08Yj9l&lD|cia#-QXaHN?BA+}O3^29%Ie4haCgPEKAg#RFWZkZk z`_DV^9p|$OKe>!fGEHu=4qU;#N_N-E$vvWIancQQ~g#)Lq28qNj*oRzgVL zuq$15QA6poRQq^$W=d53eWg8H!H9Q9qSt&w6&e}?;Q4cK4DoR4^D}`o$7U-Z_j|ee z9}29#!Y0Z(%8r?2W1dq3^JhP=LFMb7^L(n>-mF^9Ca zRDdg-X(0vJCHQAB{quXxqb@gG-eweRRR)N~155cSfn}sdLveZlDwvQD(Krr}5A?`W z6L7U!R&}7~Q$Mti%nduxdryTN=&9X72YSDdVg$f6ogJIVHLw_U_krFKJNu!XE=i15 zyWf1EM|*vscZX96S2(Z^^t7{!2YTC2jkp z$le|>Q8dy-nX?n+we|__;#_Ss;T{&wr{m)HSru@(7vKtTN3?5wpvQm{;i?Bhxa|0c z!UMg(4z?ZWZ8qpWd8YyC(GuPJvQYFtAHWrDZ<~Bl7I*qJY}p^N<1=Xz8zdG z4AmUdxjuul4QkFibg*lbwrq>~0whhOJE%IK9m-;*$Nb3eInK3Fb6GBCS3iWr6BuGk zAOQ`AY^5Yi!ozdCIK_8L;O)|@oO9D&iDZOY8<446dv6O#5{}cjCnoP z(l}$>pIgDr-g_pg>u6M8({^;aX-zU^MCYL08Ocm@g)&x|M;wfNI_zwP|44euB|8$r zf`M0zIGDw&hPUO+<%o>O397uo0xJO`1?EQzcTPQ4h1;f7jZ6&iQx$5hsia?Pk|A?l z*pHCvpCm>5>|%#b9)=0O5-3b?FEu5k@AeM#0u_aj?W4Chho4*+LSzg@#P5T5Q~;6A zZIg>ja}(LBThiyAy}A|mrOkVu5s{%oXU%MlFt9@ggDh|X+<9m@pp3amAx~6ol1aGE740#@2C@vZ@Pmw<4o1#qlH=2=dqqp#I`abhoPU1N1=ev<5p+GFERrogXCd zl=eptT8%uHa56_XF66}1Tx2iPlhSLw3@{0*S*Z1CudD;Tv>zYCn_@i*KQK4_%77D5 z6H_brGOfW;u5(oGY`Mb3V({!`foEp5+Lm`{+VZl_(p-5mLJ8bLX+;lgrHZr^fAg^q zjaJILsl^9+SD3Eb@Pc+3#qf04KoAM1-on^5hbP`yV5GRcF4^w^=aysIFHI5EU3(SW zP}S6@{2?#+nZV1vu&og!#b=#|K?DpXTI_KWn5`wR${21T*Z?79SO15M67Gpy1=x{& zozzE54GDa88HEvW&M3dlw!t+Qdr(?DUaq=^#QuJH@ULVQrChMn5p*&HGQ1u|`jsx~ zxGIj{!}y%~61_iHF1XkD(K}j#dW~S%5Ng>mHY_4K)${BkG#g^yQPvDf%JmNDKK{Z{ zc+5$F&BIF%^m-&oMhzVYdM2c2tAp{aL#vGZT<=BIUhc>f*fsc9ny%pF`$Sgx_J!v~ z!}IMP)E0FROPUgmOJv(6?f|SOrM+)3DS6FVQgv4H@FnzynNO}TI)op;?p;!Z=RRRU zl0q4v&(xrmV-B>w?$`lEQZv}8ruu2o9ru{9eE>VjV3|MSpwhU)2|m&0!Wuf)F6u#G z1-4|Cton)<4OZ;Li9_fu0B76HD7)yy=sxI&ee09k#LwiXrYij>VxGr?7p2y~P#WOF zWI4l_It9Vz>(?o-_Vg`cG8qprwY%O%Y+c4fIqz!)k4HXyh zZD~R+Bbrp+yZ{%y7XGYESr#At#PaP%mitU)REA_@1o?%K*-~R2#@uAJ&H)5>SW#L^ zL5$OslkvQqCLPEFMmTIC<_q03W<&pf4*&mSKVE;Va}kU?6=r0Ehn?(>D$;MOKy^-b zK7L#RSw2R)^j-ulCvL0E`KWqn>31SNSfej)EubZR1or8IkOUFZG$7 z9cwt}Lw4*dHk?gG#|zH~m$Mp(615PXDM&ev%NeLnK4y=!U|M_;T(n)g&i1+xv?a)) zIo8n56+_g*?%tXBoQX|g_6{a3SXSaGZCaXBJr!+ICjxoLe<;Y_N%@u?HIv3{J`+Ir znB38^tYk7OnrvkZcnIxB!g<(UdmsaaE|?MMHuFECE)L8h$3=<`3*D|?0!hk2yzT1P zqAZ%Vzdp>(U0JB}hixI#`>PN1@&#@fywt2 z@9yVDN}Jgou4EU;g^m!?j3=Pp3184vg(ZE5Xd)E;M&+oCL`F8uE$8KbYgXdlF{#WH ztej2e-M7wjIvci?#0)0Ogo$hnw#Oi}@S2!VNDI-+UHSMs}u;(y2- zDm4LP%Lh}p!d4^cRcS_!s){PSi~^@xzZwc`_3_@DkHBL4NkbSblexFrQtb6!Me(k? zHt$EOsA%TlQ7ge_hv(2}=^%V0u**}m>duW;4|z-{U%dYyXi;XD2g7vbz}X&F3qUus z^96{*GN;CHNu&GY+~b5V0!XW#5GIoRcl94#!`@(huFl83&-PrKE< zN%0Ha(>OLosNDvxQa#>Es6EYP(G!xqaX_B&&$9${nwGx_xLO=Ki^C*e)lVAl>b%b* zSL~JaUU6D6yFu1Pp7&1@$(um)m;hm6|U`E_w_-Awe!^PvucMfPW0E z2pCZ38RQp_ZXStt1mF0mHbbkJa49$<#^=R2H#Rld+itwV)7*}`7~H5(F;IiRteOqd zO<1HW^|J77L_#lNO$5zD=Vmfn+!IN^lV^;r0IwxgZVtvuKS^i`kU7L768%CkD*UtS zx--DnhtWgUcISI4CxBF1npw)%!lo1}!ZOYB!?U0y$c4Ptuc@0grIhuY+o1-ENdU;56QxdTr&;4b7x>VLE6VV0ch zB!u2x(nQo{j;2~?;hN0L#+8<)qborb)`n_QDNS@noe+Mj2a{o$rm4>-XH7>~q%6-2 zu)O+XKy{G*-6Zi?@r4c5z{2a8#KEv3vyas8C2qlWs<;WjbPvfBKn(8;eAm;2!Vrlq z?p42!u>G5e^G1}Mai4Axwx3MxZFLS~xmRwSLea;zB~&gf~j@|NLn zq7!JpCD4thLI;P>h?VqFnX*;HOLzIEM3V>j`#GW`EjZqeSPqr>-Lj`k)BK`= ztJ=Y1VJvtB;f4FeoiKs^seTA%C&eomlaS^7^}0!C56*}2U5kJ12hckeE6kMZLw2^8 z*bK)s!f=y*rcQTE#b_Gl(aZd<^u6H@BVz44a2{QOIwS+KkM z1;cJU<|Hu7kbV-ie5(^a-}4&WNo?&^7djDbS)#@%k!ypi8px6Zu8PdHqY zImu^5HK_RP=fazj@;sprlQGl|dl1#SG>9n_?{||^HCg(0RZ}?D3X3^qdV+q!%FvtW zf67Z<&R1IJC{4%(V^U^hja?3ti)qR2pZn3}@0#TP(oD;gFRBZ$J?>;A z@V^JGMmJmgM2}66^?}VzMONqO%b(vORpdjB3P@W=@GNx36k#Cg9;fr(k|#Z^s0i^Y zR^8&eI&fz!spH0Wb%O<gIPR$QFB`8hc>JRoVpYAG`BOE&ms z0p8L~^hHW+>&Qq&QaEF#{USlPC-2F9a2*w5R3-I_`(!c-m6P;=!Z3F;IrLz(b0&aHJNm~Be1B83i-M6xx!qHnJ!3M7>L0xoKyhER>)b>_fv953WcygoSZATkMAJ$sn+Q{^A>+n)5qEL3c99>HMg6 za!!Roj>_eUyW!quN}uH9tjm{MQ)uR#~&|j5fdjw5GuvJ+Z?0R?dw%&ozZz-WLP!&tHZ|&`+pps6#|qDFneClmx!3$sQQ&O(X< z-mka7E1<-Nu>$4s3YJl9{MN7*LsrvB?&Wo zcfTVWo!H8Z7$tJ7`_#pcs*&(;FGQz&Ld6zz`hXphAICH+2&D9 z3zbovoy$^dr5kP&IQ{S|rv?hKmOrmn(`8Ir&=93-!_FrdC(mpD<~0ZPnmZTpes@E3 zpN;yIh#Kp-FqFd@-FG1X^67imp^i^gJx>?e-W|&;owWK2Cl>o#o)dOt0(iSqa6{v= z=Cu5Y@_M%K!gs$g<%B4mmsi&#K%7m?N_^Iq9#b_83}WVuc1j0?){E(ka#<>bQ(_fE z!^vpoQ0xbhH2vZ3#^$mPES(iuD|+ezrqUtM^<9724xLNf5$xen`a⁣@E0anBFx7 zaA-=Tm@NnSA$7-%dcc$2pwS`Aoz18Zc4t(mo=aPc>SrHZHZIQlR>v*Myw*V7E2o*; z$|tR~FPp)BXrs%mLLWM#V(+QvhQPV4vGEQrkNE>%v3a`zU=eAcI1Vt2PYuN%KF~{j zG1E2*^-kHHJ3g|Nr7rdZuVHvpvnWen$_m=kgITKhHz>h>lS?wU5@g)ldlciLY4^OY zY_s4UKX`#%8en*>-_)e(`HERSCI~d{AXOj-Mf*QK7rN%^5@_IX_gSu}ktZJ1q!wV` za3mfT1NO;p_*A4&5{JRG(pySTs>^ta>EnI;?iH4ZC+Ujr!G2td`$GjE#DBOmN2^%4 z*RO`pYF@0llwsOK8*YruvpaL zI?;>D@P|4I(8|ne$;T8BbC$m>L_A@MWZA9545rf!Rm)r?{D)L3bL$~&e|KaW^^G_o zh@?S^X331i0BFii6v}1++-pLtI_Da&ZWevlvhDqnauz9L^s0oJXsc69dA#AR@3TT) zM_1%R!>I||#)%`TH)sw)N0NK15eWoW4|Op-voco*aB%zB7*EWrL)BvEpgKv6$*PIj{j37sb?#APd=P}!y?ug(yT_{sxVPV-cO1W7ixVRt8 zXnwUj>Zen5KVR6oq|4}g!>0x`CKV-k2AMs_ddGv8Mt(#S!o1Hx;3uDCc z(?d6vKx4)*ps{jKZ2}bp2F3)=nYwSSdH)*o=X_;f3U^85Y6RA%b}|#HW&qAT-%Dbr zAgzE4=-tOs=fPeZYaBOWihvlOQGOJdj@JqAQRaMa0|3sj;C>c&B!d&7m_j}TCiLF# z(fVS^RbbJ%|VhG`Hot!#f`sUk@=EZ*NY$`R~1N7q0^Zi zWk%VmM2)<0tD7)A5?{3mazrivQ>t>tGabz&hOX%ChqR<|II_8S(K~{`PH3bHoaf%{g+JO zSKZQh4g5`C3Ze*X$H759PU#;uht@qdc`}^3*E|3=yS2X1!yasmIeDwRAKPep)4)k9a1S%unS3%(GRAU}{3K?!%4RJ+8q|(G zOT58X#~amZJy9jOj^!(PtQ*06>fY8KEO%tt0g=hc$toa{BW_kt++9xDs}a~g{8al@ z1|#uON-C$j$^SBQ>L=w^lzUJ~ar~-v9lQ_07+oDxK(cqU)!@=OfIYyF**XEAILvn* zh;3*QMh$(wQ&qHfB~(Y&nXh#bxneXogS*h_I$Lqt)ajQ&M$D;DSeY!8AcGunNvBSspq= zUD%j4RR)um*n6$}*GTV)=>&C5Y_uI7?+ySy$3C&a8)w^7=KXGC;BV&hmbK~^RQE=2 zr$}|t4&HZ3wjbd)^@DrW7k^(p{^!vz5AmEQp(xX@gJS+l2i#2G46y8Yeqlj43#tKi z_DdMX2{$8$e_8cdot1}BL@~{tmgynJ#;8DPiq}y44b?-*+6O`#DSw&y%YDp*4os;( z(a^D;you{h-Y}6&=b1b5v81l)Y}*4pv38kOpQv_B7HVyGckBh?XLok5!}^sVL1_CJ zb+UhcQs`(JuMqB8TP=0hEk*-3&$<8M`NrYzhN|Y-JdpeLM@vbf1{IHqzaXRf=;x_?bmLHWK|J5#J&>AO!wjLT}?YwIwqBEbX{Vn}1pe~jc z=W}o+)-T=-J@=1`i*p8j!Zm4Ucb-^v?uGQCcCt_wF|E5$$x?=#VF74x%&Tdw`G0v$ zC98n^8gWl%G^3?Qy{6quwc8Lujq*2>Q?OwVOHv4F=~+f!u<)rDckD&~3a~yuW0=e6 zdA)^O5))Xp_hq4y#O1Y~;PU1~`Cw~PFb?gHNop)RcHChTb}4~UXwtQgOM2Fco}RvT z&HMQ@^~O{Att8)$V34Av2>GCl5xA<|3J^UvnmB@6W&7amYsGn zK3W|#c<jjW~Z8(c-^qIAaJaZ??KdfA0<^4P$pyak&~>W9;bK$NjwIOC1o%Q zFn=QPoi&X)%W1BX4K}r;E1F68xRe2mM)|Fc*6KYssYpVgN(Go=He8rt!#Mmi9x z{AJqmxX-_WP)R!T)WMlFQct_`9z$BYbQWyiG^+5+Yu+!aZipYVft}ZIHO~|;ZD8Hk zmur>>-#!4R0Jr5NUfp7DAf6C@>r}S!L-+Wi1vg_~r@{Dp$NVFFIZ`}#@i?!3*T3SS40={Rt=-RXpZ-kp@z z^d7`cUP&4=Fe5ziQn2S-#Aw0c%h`2GpyK7yEk^3lHwt~*9c($D;bfP#eEU+~)i!aI z6m1T-cK5}OZS;Shz(yC~+ncH6PM=9ay45N5S`4gNi?F*z%(?{GcjJh1v~Gj(NA{K= zXFca4D;w~E7ri4SBX=^re`RPPd0mt`RUa}|@ z)t1-Ysun(rPwyz-)E-)5U$qvHjSK&h*5G`e5jK=IZMR>VxerK;x=eI;5(j-;<=PKk1znSih^J68$ljfKbx@|Y!|LKVtaLgVZ z42odMdW9H~!W!uFCOS&{U2wdAmBBXRH7F~ierHogEl^!ycKz7U-e+iwStW+EQKYpb zsDX7n@o~~Ax=^^E@XeKxf{XjcP*mZ#ia34Et9Ev(bVojVhm+XSXkzCVkt4(+fj4S? z-KPjO1!xmYk5Id>W7pxvZffkEhM0kOg6$@_9s`5BW(gOz%=3heS+>7T16MsBz2tOp zPAYzNphtR9A_7LI65UD-FgpJEDdt(ukxMCF$K>pWcptPmo$FO#w}*vn=N*5%c;CT4 zA5T4@sH>y&tLwY-Wr)*~S)D}y!pWH9vvOz})JCIWY&qQnql02>UG_WN1UFDE)>u-s z*L~UFPon&9weORj++w1-@NYR;cAf&;*$vOz)2W_igmD+kb4xpekA;6A8qL3@Hg;Q@ z8hIux4x>U0ESyuZ-g40AUbxrBV%V>u6zQsHSPnJo=J&RPJ8a136VQI-#(T=u{3=9( zu(_Rg{L4)jPRNTDxjk{UdyOTImvkE)TBvE%>QZs&-*6( zg;cMAtF(PB?08vqO$k1SBm^bNf(bpd!lNXXzuf*t_bQ64X%q>aNX_}()Y_Ej1Tu<)}o(pM4G@& zSP3EeRne5^o{-326GWYuT>5fW1?I%WN5m8e9bFb-0TZsKaUV5Nm11Vp>}(wH{9=;n z{bN9+QEhCGl!n=7!Cq#eGOs6*Q`(It4)l&AcXKl3*Ap{Hjf)iLr}CzRSEqma-wN!% z{J`1v>8%>nCj;lSHZM0k+d!Sfz%c2Bsn-6q>PT)4Z{zC6;of}2rP(aYFIS-Gs7EkN z1AgkbQ{~1@7KNy`{@U{!WstOPp89xjMX|V$weD}Z#oBV>{+8LgG!};G1{Ia$<42w; zq$M`FIz%8{Rksv{Ssx*?y*gRa|m*v-8yS~E*hBN zdAXG}W536*eu^F1&BVMOnK`dWBSlRrxo4In;?xPH{-w1%HnC4V8#pPCu)RJGb(OD) z2kUUl$&z%Z2xB4*i|&Xfx=s$^+Xl%y^^hZ-6!HUgzEOblk+|NSSW0AQTj(~%o@VXC z@GJ{``lOzn0yP?7u*H%X+tpnRCQlcL%4b><6Ja$~X6dN^d%ZIgFHcpbL<}bKMNv2x zHZF_Ze7I74<-2m*2pw#7b1np(ZyW^KHt zQjZ4dGMfVuDB(>dz*#0X$N-yb)mi4Bw+p6J<1z{6QE|c-#~`C73Egg}4t9 z@_Qk5TLbyw&OB;&D_0`=aXV{w+j*gt3*dOC7cf~{P{rdYtydW)YGn}2A}33 z2uWy7R7tY?GlqFo+GguarH2LF{$IX{qWed`2Hwx5EJeLtdpj_&>Ab6SZSgjh<2bn*mcn$lTS1e*=ExLp_K(ih5%h3i;e8uP3fwE@N_cq?o$<-76(L&l|& zr0|frqh0YNYu3r|nKVYCg@vwgMxY~l#p%+Vfe-Ho&*S3et81sh$4!sxZ)$*l`@fg= zf8sm!cdfjRm2Z4A{D%@9=dR~@{3$x)R(ZR?$E&;*0}hd$P0T3mwk!<0Vp#_VkPa*U znw~k@%RM*9$36g`DT`NsPG=o~K~K+%&{G1+CpB5+!iupT*wkFZKykQ2j1R3k6UrGW z0be8TA^6=4yYIloNUn(Y7g0X)xV%J~97$-B?tZ_xH-l_JhE2KHM_Y!kRkn*!A;duZ zI_thcgZx|+-1$iy__&y)gZ2C4NO{*3^MMC?mjN01l3fd8v}-in_R+`tf&?BNt@5@5 zkF|Bp^S!bFQ#I+?>%KLT1%7OxV2w9rZ4iVEXT-#&R5T^sZG53=P&o)fM!4iDCAhDx zX@3hIo+ycSQ@%;o41IF6Qs|pwSKDCDiy+6tGjg%Z%QNe5cSswQfX?kVe)LdO!WOo1 z-^~yF6^e1i=tVR7mw%TE{I*CDFKJ$<{7@6F0hEhoN$~tEED*pO$YuvJ}p~}@YFz}Lh+Jj?gjoY<2E)QFZ{2|$q+Ipji#Yv0K-RJyR zw%jf(Op)wF5j?|qayos+!Rc+1%I+A(Qx$!-Q>(N#*x`6n`e2CL6N*N)|D+2;{`=Xf zo8#GH`XT6RHn=N(i$a}G#t|eBoKA(TsD?##$VY7|8tza2vXO$ZtxKuwMq?uE>XG$6 z`-J9Be)DTGp?TFN!W(aM?BcRZ`U-osqhL^o{FE2I)O`~3s-V*z?N@M7ZEODV-zCof zoHzf|A2d5k!FKnsGC3Q1dYLj2ARI3B^2}AY$#Vx9o&UDDh<+IvP_Xzwd+u+%s^pkQ zn3%C)9Da!xg}}>9`a_m)LO)O^5bc0=vTewZBbvW1h0tKT1C;Ek7f5A9a_izmrG;ZH z_v@9BY+)pR``)ZBql;6=&*0y}htkTh>pS5VI8>J3AUGytEKXjugSJ9#SWe69v>Cq1 z6jvy)DmY_(I0Q{;R=p6zPp|eZ2^im&IyzF}>RZ|=*$xR75VQyEMPnd##l^`ApBDNDf z7Mxv4TjU6hmkIbLB6B0gDX_%`5S2R?d%W!e^DqBi(SE{?Hqn$Q|N6c~Vx(`XhNd_1 zLb{Xch#$3A!_r{wd}R6S$Y-MLfXU4__6}()hF9{AWW32LD-?WD?RcQ~Gwe}rl+bkc z0hD4;?4-3?qY%fG4eNGLWa^Dwe=zTqMC0e+2{P^?Ep0{fMII!WQTo02HH-y}tTRH` zv{!^;hD<>+Uq0CS41NxE%AKwN{!TLDd!YC5&>jB%7eEo5ogg_{%Gn5HL4T#f2WcM|n>+V{hPk`(4@CD+?pS(R}y%+f>3}+V=zCh~T*So&ZGRI(#*L+?F^{U+zpCq9{QaZj(H0v zm}GkzC;@w@IbCc#Ijg_cu|u(kkhZMC6wnekk*+Wfe3E;!1e~Mj6FLA<)wHuVa_G;? z)MZ$5PmDO<=0>X&!8+eX}hC;y6Z#k`bP-VRK88&Q>G!G5KhTe26(4l z_W3YwUf;)~Q4s^}ZOTtmqEw5h)Y0@U8VBJXdp<~b-%*}Pyh(FJu+{La{+M}q)t1kG z;?h!sfE-V-;Y)a&75dR#)3WA_nNa3Au+pWQ9QiG8Tl#TI9Z$-u>ggKYF)H?AMkPPd zF%6FBO5o$*Cqb-x{>kcdvzpH}#U4JAXX~iHLNyl@?QB>22$L65AsTwBtEjYIyyN>J zNMRO7H8{#VF__B-yYbQvhdSBZ;@gDJPW65K*)B|b-P3PR+?UfaQ6mmuG=fNua296R=1y~r%w9g3}yrv2VnyZz;AhAx+Hwy1#r_p;`k_(@&x>yflw z^_QtPno`jbv)?lKS1s!E{$yRTn4SWzEB1wlL<}on`V3B*ab5aQigQOePS#uAcbdQ4 zeGk{*1Z*K8btR|m^-PIb?9L*gMrgm2mc(bhM+=*~H}KUv*93sj9_3aO*^RSoa_E8F z`_K{#NZQR2JV9bBNzMb5u5tt}X`6Y2c9}sxHt(bVBVt@Q0W=p52oYcH$Vm1X>)&;4 z+e)4rt3%wG)k0gc2+>wE`M}a6fIykReZy{XdXl-Y(C0%VM&Q>Xz~H^BeBhhB!|!)F zO%Y}fq4X7h$;8M-^F zh3)Kd+~Sza2$8p%Pr3fUAwAtGtR~N-G&%q_0<_SkWMw6*`@(AaYC=G8YdgF18gb0T z#KcfqP+wYF`G#w-uEyxQ-Lc}6+!GJJ$0Yqw@r;3c`i#+OcpZGK>0c_3j?v7U92+p; zB}h)qz-+ezBN0SSEaUSx+BUEZp(wl^suD6YZRW*p`HRCamueaT^*?gW^38;MYmIv zu9td8ITuN&SyMxVGBm~~RmdliseZ;xdb5tPd&va1kPM9zecGe&CpEV%vT4L|;WGE5!-A zlc{1GgEQojI#+HpjKY9@T=R|s;gegbsRPkpuo}HRSF3m8n!D~yvsps)?z;Btht`!_5ATry#Fch|@Gh9^kYqdoVUXEbR|C#WK#J-R{C(fqToXr}B@k zNxRR+uxy%y}OJo7u}vFI{}fbu4`q4ZGGJ!2sVaLX(ll zSM0@AGK;uO#EB;`PvBYoZw7)o9(LqE(VFuQ^sdMH9*;?h*hh8uDFPbb!5#y<1~%M+ z5Z0r=;Oz1oOd~lnnb6%lI*!X;y{}*uSaJ$m8_QaPG{8G;ZkhTEq+-VmGttq1?YiJ_pW?3@ z@M2QNHe57R8r9Igh8w0(h!cwF0^ko>mWPJQ!dWerb}N1KQf7o%?;}z8G{INJ?z?h( zY+^|NCgauqw|n4sf!;X?eyN%#Ngj|@k2Ma}FNx3Ua2XJGVaVSi2@u&+^mL*WIcG(L z5VK7;m-dB^sFAhTa@v5PbAV!6E+x9m6AVNqm11LX>0kfD1oa<%qGM3W3CZ#*f|6Bb z9<`AFL!_E`Ny^P5=NddrjOFk8jq`c4VnV`H*M{_go4R;CQkI*zLF+RR1qTg}PGkCp zFPZ;(pjR1ls~~-u2|V;r9Mv4z7&%b7ihq8BmN9sd*-UV4H`GGR7SV#!X>q&PW=DA!e2}{uW zlJyNCC7HC#*7Y99IWiiot`i!D_ihB*a=#J+Ye!D(dZA7(CWm&Nl^6cxIl{?las2|N zUs}ZIwVx)t$o&WVY|J`H{Q;yrUHkoKzYwYk=mN$GR^|kAeMyQ#7Ux#kccFcPu@Mb@K9@hJ5qKh;sK{R) zS~l{;ALu<|hfE=l=7w0f=e7D?6WpVQNB2ipKE>Ibd|JJ}qbXHCTc#I~*C!t4lxAWC zNtoB$l;?qubfK}p{$g({#ehHdihrB8e^a85A@hugsN{Br^2~_zQsL=Een3{|X0*um zI2z|(e^0u^=o-Y)S$%)qs_;~t%8Ga2YX?RHy=A-cr`2_HUsSS_%_F$QRQN2-ZJJE? z-0QSdB9t=E5v8-mTFG=e1k$%1Dw-zM4!QAkwf5GprDRf8tFD)T`Eb(?wX3yXyQCF}9 zp=qrBF6#4QFI@{WMb()HG2j1PvHe#W{0%v$%5l{sbBr*+L#34hhkHS%n4BdE;Pxb5 zVQA|4nz68_T4Y6$X0=YA9d)!mAH+>dZSC)A28Pp0N1m0vl_v9y7Jg;1jTpI4Va~=4^bXEWDv0-xkh4smI zuo%Ffveb9qE|dC_s?uo+MLksg73!2)b}w+llI5Q7B+1VA>pKg1X6{{};v&E17H?9! z5$K4_yF!2tvAT)qHruX3I19mHPrF_j7ney)V+L80#xN3FQtChMbUGqgewH8MC*sJV zxEZ3glITg|CcrIYyA~3-4WKNk5ZRY@8)g4n<~l!)f5(O zJwE5l>DORYB_9$ctd1Z9Nns+(vdX}mfN9k^@Qilb!mxwp)aGRD030^ zdfQ~dr5A<3Tm`y+NsO}{D+9iS*AC3jMBEj1fNvH&8IDRf&QDTX_skMbEtPi^R63n#@9vK1+jCy)rVm}o zUeNw&L_Yg*qSP8yMh&0kS#+A-i!1HKzja`~=hf68?8YW4=Znv9b5swh&=#IXcA$6o zIR@9lK>~c#EgZ8icg!m83Yb&jqtoW+{IT#owv2QsY_qkbhzhAy>i+1?f->LE}^Y zC;?eqmj+FH9R|Dz|04@wP3Bo&+{y?mP|lE$!M4dY_@ZKKQr0EDRSuHZim6*fHz`S+`pEeSHE2v z($P)eMG~GRZ$sI-9wg)PTa1);2xbWRxDH`;L{{ci#ckWV)`)9%qlv=*V_dkiG?h zVGt1MEzHQ+WtC<759ge3G9%HG?8NXcO6EnB6G0|7$_A%qYWNTg;Ul0XU(LJ>m< z36KB*yqUe$E$5zl?mg$M-~tMqB=qUaHvQrYSmq<=Mnm zXO*|hqB2Qm%kqGx$R+C|nXw%oRDK`iw=p)J{kI)j``eyfpV(15=Y9wr%xS_I zU6s2{wd`Au^p9U&eIqIw5+6mpCS4>DDF!RvFBj6=>GRsSl9e`*E$8rliL(EHbJ_3I zR6rR>D?rIv3D?scF@U`}4YMpXHy}7c{wzAGeh?<>rGG}d%kdpX9@V^T5+EBZ7C?M} zK@3}RBQ(0&bLi^??IJ}fkDoIb*$Z8~rrdZ4t&}NlgID^F%3JY+WMNO-pZ~+1_PaBD zCA*@@%%66Xr0*7z!GA=aeP7r~b*K?M*)9rA@%i06+W%e9==A&Wn1uv;uw|W*vjGJ9 z7kBR)gcygEM9W#3^p|YHE-@oj*rvXwKpy0g%GF%W`9Ho4qAsabjYemIJhyBrEBjDQ zKyRmro^65s%FG9qC6YzrqChOMR{();dq$M^ToA`&c{W4wbX4tuwBwXA|FUf-LCyQz z@lR*$B9)2zXdBu2ZMnO&rNA@v`t@n10J)~@G`NR;l z#xzyS=uHDee_+U)>XkP(Ipxh=(A?nlaeCBK;?y`6Ps&U=M_ zal4_G=2+gF0AP4g5jU)HPv5t4=xyy(vqC)h>$k}S=V=yWN9)mj6bQB2Xg1( z#_Nn*a}w4YAmx(yD0EcsNT4y#HG;8)3#=LkCub14K8Dr*amG|NNUvR#BeM@C>*m0f zB`OPi96HfD^rGjx|2j&Q5?)A(O7ep<-{w7tXhnv#3jpGZ^%LvYB^ar z^4A-=4s5AYnlNPXoM53bkzR27$ z-5g|GRvS@LXl&%+HZN$!wroLMOap46mO6oNc2Swl+p1Dmx?%CCRqLAfBLPQiS|@qD z1_3HgEL=YAKE?#dOBN88O`{IvmB5T(`XE6oV1K*sDAnv4_paFY7uT{?>W0>RX}ope zQYx5stb6`E6JYKWE9le7{%pp$PbUfr)lc9XKEeHgLf-p!5y44au1gXZmRAZR>6i|c_23zzZ*v_(nS>{KD@Fh z#5n?iFl$;L73VN4PG^NdK%&BAc~fE}W&la}B-Jbs*H-e$cGqJ>d#&QZ6FC_&U7N+tR4cDHWq%^_^}PA=N%P`HR-g*hDZD7yS;%;@D zTT~SA3LYP27FadnIqX%W01t{R#ap`_qqyM^NX8hxV&{Vj)>3f1#q&~MO?T?qk7W6O zh+&UimZnR`$Mv*oG`l$Y6}5%)#zvF=R;}Hx{vCuu>}wi1Wznn-IEGtc4-@;-F6YZ& zH`JSA0-a2mV9;0Gy-Ia#J$jfm?3$h-AIbD;0ftX0GV4NEr!zd6M*2o^BB52lXjerA z6Km;tvm?yxX^_7WSxmWZU-z1wp0JFr91Mwnn>PWTUK(`CERhHsJ1Z($yiqn|<-@Ax ziltE)kDKcAV=%+CECGXjGw?>(GO?(xHXEF?3SS71H$VOuBa~S?Cx)EOLzqqYTg=o( zB8)`4`Q;Ygn`E1fKAel8s34?iTFYJ3#m(>%IRb4nyDtH41xCdV=Q&rINe`LUjgAwg zmR$kc)@wvufda69(krH|#b&o68-w*#fe z1vzzDw4*~Ldbe9K7)YL~)p~84Zkx6!-~m%srAJN5SBBb-EXpirMfa2QeQ~H$)S1YF z2Nx#IXgOioK$0G-PHS+;&o+ejSCkg);z|7k1{I_hv$3T}ufr8F)apk#CnY1C(K?yy zPtNpPtGk@&E$+>w^$Ihq1;WZRm&|g8^oce4@Z~24UTVAc>p&Xc$uU2S8|s>{1yBA; z5e)Dhkj2wpEa{}bh6~(`#Oh1V*-XEXWGBdp7IL{6k8-E-kl(7&mCI<4(kJMfE{5V9 zH>bKi%#0$iuUi=F@C?(bsJZTV1=@S<;r23Ms-o51b30+8ocXMQ6EfZ>2kV4-Q^oCkGQZJ+lKoXD=I#PmR{bV9L^D;GYN2mS%ZZR3yVjq*QP^SIgdZY*zbBE`5& z;gy8BC2;qXIr~11w@tHaxc8da)@~P{(nFg_5ev6AA?|w)ZpN<5f}w$$X}pvu%^S@a zBo55zECh4(JhAgkV@zU#TEo-P{DXHH2MZHcvn2Lw+wGS{xjq@F_jU9R2`6tZs*?0@ zQmAYF^0ppnw{o7o(dVK6Mdpn2o^JmV=os;v970;G zno)<`dtnd(6$n7=0w_o@UDy*FoOWTfIz@Eh4~3i*IczPf>6M6-+9~Tw3D#wt0QKdb z%ytUM^xH@N!X z!GqjSQ_W9(G%p=^6RN*m$j|;m%09Z=IcG6Y8(#dw(2T9zt$Z^vEyqc)SsPhCK&0vJ z$l?iAo?Szs{2I*L zdkO}_`aD=cw=fd+WZef9f>U|9DAc_Nty&gc=u3d z`n#}4-o)0NV-(~I1?5*c3z?F5D5Yy!JNI%>RxiNKaE>7Kg!E9_+O4DpG0dUK>Qu|t zU;_C3m7nd@(w!(Biow$j-C;N0?T~s22h4_2O9;w+2Z%EKX086U!l|3D{wVhd6uX(mYohqMgy!L zRP+k2cBIF70?qGiI!*Ru+V#J6irA!B%%ZHLV`k;dI?LGJ!8$Cm53~7j+#lM>7I%`& z7S|)6^I&q|k$Ot{KvQF9k6uG6B`t~C_MyH&?qfPJqZd%)ykO8j}J1Vegeq&>-B59Fpd2lS( zGe3EQR9LJua31c>O01us)wu-BOy4M#=%g+R#yh-H!c~)GZSd5#14L2Ldv-WK4%n0E z*_A-Wcl=r^#ajeUycxWPk<93C;FUDzbV0hy2Nl&`+G|NAJQwkh&74#+W#-8|eeN|o z4m=I0sHnW3OF5QfRSTACP~&!J*ycuIm|5CPRi$?B{R=DS(&*rb*&7R1>G1Dac6P?+ zKd6jjW9(#(!1lVRZd8fL!&vP&8hS`jidpzk8Maj$H!b} zsSK;s6By2SzT|6OtX7b2WMXB-K>K#OEd140i*J5(t|1xkY+7bO-NIL*8{eSuVW-yp zj3VNH+{1tS5q=o&O@5-IZ?K>|u2Ya>s z?tMt7&K4VKjMWULvBgm4f=zo#%MZ*-Y?-&=rF(>P#?Wf|7FEV4sLoi*q^n;OG?t8KPa6m}8?y$$5*HNm2yG#4KnFZQn!LpsVTzK0XPwI0pW~jlW3DWa z$`<}kn&p2GC9q$G6w&(=wk%<3_0>S=3`^7Xw<=bepa#ZJjgV z1gsSl>%gxryB1z$j0Oc1f2LLQA7|-*q>)a%+@@{+LhK+8JD*>xuJUHKzFD`Ir^}a4 zP7SHs5>A^4ppL9&LYke)*D=aAvazL9q@(}&jxwV&`+Q;OYrI&=hluJH2|G7|ljM5u zxd>v~zLQPH4d>`?o}eS!_1<%+NDtKLiCvO1v_Z;)(gz)!K=}53I3{t`1OONucsRZy z3zZ1A@Mv>fOe)4q^To%t&q47UQD?VKDqbgVUrY~H5|ZQ{lxo=@h4c(X!KaI3I( z=E}L%z~L8DKSWL|yGStJ>Sn?B3^gadZwUKxi~}WeZcU3V8RiT#GmnYQ{2Ln`f4|0U z{(dzpaiK;LAUCnDXGMa)xv5tIGXiqEGA}QWX=LQs-@lnxgf-pVJ+sS+=^;(t&J2;} zXSGdiyEU+As`u7C2SE^gh!U8?0x6q_P22e-^C?uLja^U-;4g$OiFBR>yo<(%lm`Mu3qLZKWvxMl4q>aEned@DBpB- zc9(TE84Q=!!d!pUhWqE5mdos!%ZKf45l+|gkfzn1TA}>8Qs&+oKTB{(yr@jK3kH5>G z6g80|Z{DC~&u$1chOQLSQFA~JqmPs!`-N8(*>FQczq$NxKe{yz2qpX9_0Azgb|!P2 z+BWU^oz^P=s7TABmdqn%CQJ1NvCN@Uvz_HZXWsKz{q_{FdKE$&?; zQ4t+Ay&xcJ2dqoBB^=%0KvQ)ZN@os|5n7g~1PF~N5ki)hR>w5~U5Qbg#a00v;_G6j zhH17^!hDrmmm;i^1_0FJaVh6sPDPRH68%#w!)3B#TtYEV(``iO-20rv3YUX5Cg(T} zt1srJYzPoFn!x};lZALjD|r_4eJjlRbY4PA+&kJLeF2j7j$j27iq57JbSuOOd)3MK zP7SzKBgdwDWKNm2s9x;2f^F1BIY88ht;iOermCqTgMtM!_f%cYP2?eZ0CKGascM~5 zP@k6V5@Bj}68)(;^#4DW%-=`tr|d^!ww}^D_s^t_4xXhC!onMOTyjV8{!cSPPWIbX z*qI#G-0eZUQ2?gq1N;SvU!XBOz?VZ&%pWO;FCd?6+irhPc;>Vc+PHIvP_go5Ye)(Q zK}8Q~NZ5{SoE`-HaNH6#jXvXT-qCvdHr+4FvuLId#(UeQH(Y7DJxtqr803Ol8NDqG zgPO{^Jtw}`AZN(DghUN>*!-IoWVEhg921cn3(u)r< zGx1C+NSiZt@Bcd}NJn4a;FZDjp1;=C|t&IWoR7Y8mB}_ z9_9s`FA)&=(ERk5VoPu5sRXyG@`j!W5Y@=|8ssGpCmbZ z>h`~7o)^!Ij?8Ko#UbAi0z=4+^x8P_*t}_8Y2|mWRypRG^uN_kFK~84SU-rx(Nn7! z&5>wb2sHJ0z(ZDiekiKmA7Ee5@o?l8?O|L-GGE~>pW7TC0mhB{2dQl{6yS1*`NZDOL9Zx*eV6Z*!a6%iT2?KOQ`L`~f1WNyOUHXRX+%mHapzXwa+6jS zjyNc5cIqjXJ&n9CsTba!NYyZLoE^A50YRCY#87<67yG7rQn@l)3@JDM+E((i>leLx znv#v2{FV*!(^nASMf+Z#L87YleD!|#FaZd-HbnGW0>362pXNn{O+n?=yfLV#hy(AS z57v$pjNYd?=`Sc(*EeIS#=owpFjYO;N?PsxqZS2yNH|r5^;J6?AUxUVoF-JllmaG9 zgP8jG2g}Ef9r8s3piIPSsx4lwVP=cR{F*FgMnz?ejEvzd?k@uV_*-~mLp6WKOD4|U zxoA;v<3HK4YJkRJd75RsA%}mF|OojU@LX7zf6#ypna7&7HR{#5L(A6Yl7{O z{KM~NOgZXE2?$GZu)TB`ZaJ)yfr2u&O8N?4!cd&LI z0WTdGmiS9y5+B9_KIfbA2Nml##wS2){r&x((MxaDYh65~hXr&kvHA1Y_bH&Trl%{d zl)+&FiYyF86c$FwSOp?Ot(3^}dkJpz3D8u_zybuOV`ShwgXVFiF1Ty6lJ>|f1R7>0aw55slV=S&)RmZs zSZ#owd5BCrj7i~SB~{9Swq8aRmLs&foskhxqlf=dakgYC#s5o~M{GJwUy}qRVHGy6|cvLFdcji1I`CJ(t8ZT(SMzsYU0|K)ZwvB2G3m zupNvwVW^vaqizizIrx}+uyrrLymMPGH@EKP;_!3csIuEUzNr%=@;<`$tCUc&F$3^p z5=%kVVr}6JaCK<0`89NxH67-@G|=W*!rFA+U`Cb6ys>R%%sd960a1lKN-bFd0zIy-Be(95cwTI@XAC0gl73&QDGcrYWj#DP^aWf-9q@KO_8eQ+hc&Q;xn zI$uCK`zGISd=-6*RuE(yfCpoujEq~HE*BBs{wBWottMGk_C<>8o=ftZ(#42WV*}j4 zj<7W;RbIVUJ9K09ruZ6%%8K6D%2p&!OTXM7+p96lK4a=p8CWjjAcYowzSAz1T)^X0 z?_nIm|I^iU-u`V`QSpQU6^h`rp>!QDx8Lg7XfTEN+`%k5Waa*zy0}1eqPmoQk&XfI zll%-g5bx3!L{x;vUos!23#UJ*obJ6MH5V?GpHJ2XsSD%xn}k0{`5XJQoDb-Iv%t%K zG62%@J1?)+7+&q1o_S5TvQ6HoKMSPcAiwY*XBVW%AM^6=baVx@hF)Mt&cJ%Q!;*;= z5AaJ=ND(D3SGU_&+pSQ92s)VBlnPhSt0*QUW|zUrEUgub)Fm4%cWlillOIW!E~R}( z4EXUk^1)e$H7n`M)K_CHf}kla;Tw(wLQFsEVkVMRhK`t*tQ3|{yy#!-6o>BTAHOHP zc7baA$GZ!M?XmZ23nIS&EnNKO%iYCYy#Hh!yG|iGqPbHz@I!6%RRwSllZ=1v=>`N3 z$l5w9^6;CTpXaR$Y7o^EK7VS$Z>!FS`Y9V;?sQfCtfNG?$LKxWGr4RT+-@{*nbM}o9$oJU_wfKo%U*RdIWZuxtzpT4G` zEOy}LLa(eUkFSbrVT+P?IaA9aQI{2Omev{4S)GPqKGS=Iv&?b658o9$ELAv#%v@Vm zCTR!yNF94*kSC&Y7ixE+!4Uam|8F_pKtUbb2;u9uP21$gjQN3p4=M*8xA-U05K!c_ z%*-n0X1v|jZ)SpzKhY;Y@r%ocGIvmUi(4nhwaee7fPZ+m!j~R@6fJ$`Id+(Z=z#=k zu6EM~sU%=UW;ND^kljl?FAJs$4MJaAcL7L-0k0PcY2^|lLAVFJzm;%>d4?1$J&>>ywyhf4sDg6ndBB&B`J93ku9R^ z$e)7?hwUC8W8 zdD^;S*~RR*TFY|Gi036DLT6Q2I1|BNf!oW|;Oa`AJF`Hh@&1U

8~CpW4dy2fta zZJ^hTTUGVLGD-cKX|PlGmaWZu#8x>75+_-WwFn1!Z?06ud;3Prrv&Ao3<2ziTsn2mXmvH_6?Wt_9-h&$*sC_t`Uyg?#T~J*^}CbM#}!WWuEf1?iR} zbo2hzJwp20scq*Be}oSft(|@4YEicv@{v;A`5p|Mne_;qZkE|p zfnKegKOetrCX!0tp-qKNKasLooL$clO0FmO{v3AyAT&^MJ+Pu;Yr1JYAR>Yfn@MkB z=xn?qt@qgS%3s`?Z(Hb8cBq0rbJ*=c=@@}%gFaMB?p1030Om-q)8Xe1<&ebIkm;=4 z*D21p+Qise^U0!x{wsVaaSp9Skm-(RrT&1 z37*Sd)7CwLfa!t%S`jx_A03<~VVVZ@XC2eEVuF3*Me^CBU}rJJ%)&-KksSr)Lfq4b^x=RS0L!g+T%L+AM>gHglPF)gZ{#;t<| zIN((&_~B+B#3!JS*mA|0oRpvGj8m(*c|U!5DIL7hn_WF!?W11p4l~hZLnb7;96{x} zKgV*h6{}WOR|71GpNXfY$*tzJFsWzeYH-(@=W8H8E=w^{?^COEW=x4WVzMWD`G)$F9o|aqSDU;%S%1G!wPzOa;@~DP#suK<6f8c!jR}c24_pd*H zK@8x~7Mwq*oF7TiP)ay&o@Fay9lYnyiN<>G1A?+tr*_nrsYkz%c*sN7hr8wz{jaUB z8j;U2#|632z^$~M<>;--2vY~>q0{g6(xb%QZ+m4%pn?I6?FKqhKSvWZor#iRL=U8_ zz=y9azdHGsKYv`)|C7Ptx?VU3-S#PT3TeCyF2)H0HePeq z0YBaMQt)r=yJ;Yq+-MO3{0#(B3*jBMPh*x^tbb5>ryXmTyh+2aMdP?Z73o1Y9DUC& zduom9TR#|<#er)<>y*#fUefvYKkaBK<3OzwGPt3Vhr&n^jfQ5z^TVb#$p)LfX-Zta z*+%m`@}HdGf8{6NTS&vU^~*JW`C&IRS^bX~jNG*Ujg)^TrMu zTUzr#O>+6Iby)2R;3S(2r;N^V;?+3;P1)B8P zOx66b*QNP2OYQO>$@#Cg3iEHfh^|05-M%Cw5)_PT=s);Cnm0 zsT%*P3b}@}qGnu^%8y1~qt_)bW1?m8{Uevxfx0dc@FNhCq8!2w{wy1kSkKW(qWQ>2 zI+xuMJ9MCDnx7dQWD1*H*@yG8O2n zBf(Bq@i0F_)BHmdviQ2x60k=iv3-83dpcg^?LxxW8>X6mGy2n5 zM`5Hpv!wMfFS{utd8z|XqSefGahNtzFn5+#mPG-b;LOb=36Hr4ML8}%?G`vCq8A$t z@}^;_#{g^)zF9Bii8FR{q~9?AnX48B)jL$?GUofrk@9p6PPNMxcPKorm1mS02C$gj zW6ikfYA1)2_aH$*l$ic!K^Z<%g(38Hp>*@be#pQ&b zx?=YfIg}yDzr8xV3_`4449jH#za&3n>GRk$R^->z$)PJ9C66}^e=KVG_m% zmGD;%se%KNf!DFz#@rvbjbBIsr6y5Jx+|?_IGolm_ugMwP-yqH5^aW=t={vHgg+2t z+8erWd_@p`!y?RmP`L_k)6%*mj*HV`Lzw49i8qp#CyYN6p14Fs)sOff0|;LBB=mO* zQ|s&~#ft4-QIOB7gtNAZeCl==fsi=%mDaS(+*?0jG;;~;+AatoGt1hhmGpL{&^?yj zY|S0r6yvB9^^q$Axk&=P<=Mub3!Yosw&N}xXwD})1>w8^%n)VTJR++bx-O_qmI#T3 z&W>AD!@*3l|24v~LGRw7#eqH21Yeri_Dino>1>s3FULtfSsRySDr6Y{(;; zJWVL(`c0=4A9HQxUQ#4Y7=eP;0Z2JZ6(htK&K%mskb8|4c@G!RFRxu}IxVQleXuGE zhLpZbbp{nLw>RDE671&s*EaCpbJHH$-L%M*KRe8v5bW;Dar7KY2 z0RK0ofIrD{)kFXI^H$bEk%f24XTp^oy+P6a@_XXesE}HlT{~QF$6EJN6@zE-_};zh z=VPta(J2&fZoCzAAry`K%=!YomU8ifitOn!hSYp_U`Y&^Y4qHh!^&_X>vh<3geZnd z(L0h{+3N(Vhg3xp_H@M-8}Ker!3@ybAjhTwzt$LbBy~qWDzm(_xLAC-MydpGJg0! zo#wydwMRMX0R7(YoVy!wk(2w*(ou`g*%z2sTq9+LRh#HfNNwap-ZKn;{KB7JjV%Oi z-&k-lZWx$$$8T;FX0>np68dC!Y{y_3+0T)yJ9G{Y3o7&2B4aV4q9bi$qi?ZQD`}0M zd3|HEFtO+WqfWBwYVLJ^*f6D2sFAP_(XHF6UbaKBFPG^_D3WV}w4CrRDQgf@x4H?a zS?|*O_8le~FI(ko)L=fS^p9oT!_ue2Z+Z6)zS0JVWt=O7Xmyk<1LGi+oNG!AQwua? z84U;ZdH06U3?C%~MPyNuI6-)2V$3Kt9nw~qgv1lOe9E~e8OHH&Ou9_KVGZj^i26|x zIfH)Lb!6jQ+6R>fu0=kYf;+{m-m}S6{*OPnmtzu-(qE7{&ObYJG&Q80LYVhaUPEt7 zg?baEO!lk0(LoB3UJ^OH36c3rhM3RD%v_2WOJd;@aZod@vw$!rn{<1?!F1nm+9(U> zco7=6Uz6g0u%R-h)#F{Cu?~09P0_W@}R&u(c(_OkpDix`J>K zqd(o?HQPdg%BXg->q>C}$o)o1Q-h|Oyw_vQ6Q!h1iHZUTR-!d3Wh08n>i)|S%3|&F z{kGwqU%jttm414*Ex#@#UP)`PR7}=;LGn>!x{N@xIf$Q%T+PRWNMXrC@pbV()sZUW zD}zg2z`;jWa&nsL$INu^LITOwSa? z^wTGc3hTzblh2a?;*FLC}v|Sbn zEt<8UZH5ow(_H`ZR0?_!Zvr0?HHD(&HP{pQ4B%UTmuAQDR+sCm??mQPgD5!E0Xo+8 z95ELKnDJ5YzyWAg=LzF_oEb4;L0E1PN^~N?#{A8BL=*3YVN_|PQ@OGO(YIzHNQp4Y zg82WfirLYk1nC7Raj>rpc!w;Mz&v{Xu4~)df0rv(vyl=8dAD?-v&-VUO$%pHtFLR5 z>CVh?T8dT$QUSe#WXnb9 zVGXLm3`Qh|EAvHGg+;=tuFR@_ku6Fd z(pHwEza{Ym+GO$5dZTxM9KxQX=qOt{FX&7Yk=nMnl$yb|UNdL}YTw&b@4{8~vFbnn zcjq|^=J|QLPTTn`Vpin5zrSRRDo72_4jL&ea>k|`tBp~wir7qK)kmOy?v)l-ypf8WSi639}Eq0NK)pz(`MD{v)%eloN3 zLB+De3~v@u4tCOoz&*EIs;Fi4isiJ4H|R#3=9V{h{E928jkeUY>w+D0LeWe)^gm|C zGcWA3ph)l+8am|3?)*?Cr@l7f^3OCps64(liw+~Q*Wum%PINr&W{d$2A0Z-^;wL>MPu znt6twt`z&(VA*~x8x<-`OS+s1$1AuProl)nJF%ndzuW6y@!!yQc6e+A`N{5^w>1I; zPT^g>@=TEIpB6w#SU88Z_cxdh-)piiF>!5zo3^FOu$_-c9;p@{VGt`#o=I@kq}G1) zo)%5$TF)8N10zRy!I|Fc$chK+K)^S^(K~!gBF(n15gV3g-wxc>wmsNbI}ICOEpX~# zG79L8XZPPVG+I+wMeMLJy|Y2stx*>dmyp3DF<}=(N`8`If9RMMA%QEnc42`?#TU%l zGfNwygC;tf=`PJNL6F-O9p1`j+X&FK3IF(UP1>D33i92*;zR%AA2Kq$$RAVmCTwFz z61#dAopgU()*|UPEmuU$>~xQ|lCGlz5>|5xzjp6Th@@`=Y(@$)A6_|LS4ZvTF0bN( zSijf}**(3vftc`W)pb+vTip*e?cqZ-L(9lHR-NeBEY67mXh@WS3yy8<@kiRvSsWJ@ z&jJ3|8JH=gRogMm^U_{Vv3RR2uL8}yte^RZ-4bZ*zsG2wA77KtHH}%=!y)V4{*890 zY7W@0fD*f$RE2A>W*RO~VLm^X5Yl+I-QlJsIOxV^t(rN@oR2XzwQhPpp8YKIb;ftg zzqiOGrN{a$F6-C=p>|q>=v%;(#-5J*UIg{NYqG$q6jsw7i|4C8C+jS%`JhrXxZQJo zW1!Kdw{D82ULiMw&awmt1#W(ndh$c|5b34;LW&FGO+es@!e@Jpx7Mtp z=?*3F2~g&9inHj7?PVoI{0DbH2L_tav~$tr+f~;*>P}>P-wR0@(;&Wlm^j0B-1~XE zS)Qq>WFY|2Gw4B4d22sr;orF)d@fye%D0h|DuipoR%go%Z{QVE>q{) zB-yzCd78SE7qo?<#okIV z?e~XQbr)9wqIvOXs<%=#=(Tj(Rm*nCZ2eY+%&poGV;Qhp)jt4HHr{Iy!fCPgnMCu- zeubA^nEYsLtG}^RB-JEg(U$O+6rYipe;TH{zZP;&Gb%B_+ z`_`?_41rbHA=;yyo}R63s8#e6))_%?elA?@>|6GZpbbJvctxS;99sHk1S=oXgecC! zL(dL~pIkXBSQM^fEnYn*#;h3FyDPsT+*?h_~B-+QU(hG zoY<_yG~X5;KW=IHHg9_^iWAkzkM{u4Jv@Ut^YZ;b^}oUzk+B|g#D7a3;Y%%wo{s}% zt#hsVI+NQMFW;lh(buwD7IKnv-mefcKk4%SpfYP=L0K(XG2}){%KTEsKCDP~ zd59sTF&_X@vvfbGOqVyOr|(;+qwokMy{E4}+7p4c&XRwMFz_sI&FH`6*CjvEkn@!< zaC}CnOhb`^NIIkVc_jy;xv5YPn~@#$LFGQyx>8boBP!GP#eGc2|LW@hs*ex(ZVT2Q zg=%a$3?(~zK+SRzwl&PkeUdw{a6cx^5(nqjWlaa>`7nGTfF)06ah<67?9oH6wImG{Jy z)UR+#+}R3%W5frg0(9wKSRTZ}{j0jLL&D~?jRh|1eME4>Zg4?!2Kh>4w^Cyg{XVa^ zEps8mY1yk?QjzLAmg;4L>|QbRETyIG)>7Yt-_9wnFOZ`ha1%LNhi|8})sv^W;~}FG z>TqEvN@8`13Dw;J?IMzd#{urZ>Es?~%dOAX4Enp?Z|j|zy&(nSpF_Y#Jwj64bGzW2 z5H#$^QkWzZbJsks(5pF6?J+`E_G*kDotFU+24eRfzS<01@?|1@jg9)kNy^hxz;WAKh{;5{bR zTHx-46NfkW#(Iy|S%5=Ar8FnnHuI#gmH3=Cu+9LWH=P*TI-I|93CDv3tIo+g`b6Qe ziE{Lz9yj;U?1!P5-CkBB^F8pKKBAgDFM(`JE0Shgc1_3cU;Z06BJ>4&IW;45n7!B7 z9C@$!R9OEQYf&&Z$w_Q5{G8f>akktrUsD^?>i#jgmxXr+3$!*CQke%oSQ2%`-z26#&Egx{3pS5(b!;Op zzP*Lh)UZ8qowC?Aadk6ieCA^AbodsT{v6r6V!azp|7&M;c$*hkd#Ozdr>8rs5n@c#>#_mRo{wB1Q` zg>HIcZI~9FjMRKD%jTZ+D#jbU(1A|b6J^9 zqhO<1?iC&2go8h2*+sxUwatc^Rp-O5?`*zhrv8U4a^4=TP=)x-fD4Z@gX*W3M z5|toJUpV)!WBS|w-CmzGe^B|cyuZ;RC64z&rDoqn@{HAOuU6%dCFt*%+AJNT3q?o2 zG#jq^;qBDD$efxruTUG)Vo?(X)QAh%-(2xHM+W5>d{D7+aB?es`A&{lHP|~ovmljh zxBGZCE*avt)H=4Ha&&ox6cPj1&*SUMjPE+>SHbds^ZNbwUP?2mCU-jt85Q!JaIX@`7}#A+ai|}JVx4U zDzk3LwrLWdf;3&-0zROW6*r+yN|GK_c>7#ap!W<#+OE1YFt_OKf^K@C^irl1y)*NT zR0!Z)?iPMkdFE#9i+}lS|Es+7DL9jqUZ{7d=h!NM`DW#5+e)2RYt#1|;?AV|Ax_3< zP?T_!Q=pgJ%#194)sd2-eP=pBnc}dyi4S8gm;@e>_ATGqAqkC7au7Ay*$3b#akZGa z$Aa46pTV$>u=Cz7omeKou6p{c)*IM@QWgv0k@y-!D9?ICIjl;Vo6mdDk!9UhZ5)c4 zXRwEh{x6I^ZI>j+j@2hU_!h0({y_yP{5S834*b$Lni0I%-ZfU8=09gzwAmoQ#-Gyd~SVRO&EdFbxq<9o9k4E-e8LLe9p80TwfpmV;=* zwrHiKCL_8KTo1AO1BpQM5flsfMSdH#l?x*q0FP9g2UBC%zJzmaVejgWp+8p$U^G4D zR;75F$b9B=?G|k1A}}nqKDmPk)A;HqbVxQrA)uI56ZR|%coa4Y19lEFDCYX}9ipwY z-0AHkgm-9d>T#f#zHds{lN!6vPw-RqP+5bturstp6IXm45xm@!o)g{@(+;z>1?ajI zczUSC$6Lm-sRIL4{NYnuQ?k=qez5_#7)wIbXtffxc50%yAlm&#-2+vWp>$?CfekHV zC<)1wx+{T|l|n0}9AhT1SINJmRB7>3k^`ER3mK2GIgh5t&X=ORXAsO*o*6sg&f_Z+fLq8p^o0#2Nmtzyg{Y3jC$Uk z!VP>#&KM@F(ce}Gyp9hH>Suy@!-RImMNSMic{_Okui^X2ybz(3Ndu6>CO@e7eJrG$wASRE-iu#1$77+sUFvks9}vHgTyDG%kRMViOnQo=5@;B5^^Z2u4&8 zLGzx@%zZ!i=XqwHnZEzL_q_bW;hgJ_-}znF#W~mY`z}q?*7+|AmX=})bG}rrY|wU` zH;{%2E9OeNlsAA7Me0ku#$O(tW($sz&y<&YsBCzya~@uAHo)}sJV$CL3Jq(Sho7hu zoD56q0R{1(r_u|=Z(d%#RR7~Y2=d?Q^iO8<>!Nvv-(cgB*(R+?z;N@(S_7Vi7(#^2 zbNrQEv`IS$2{R(6J+Pp;1>Tq9Ob%}NoD5IUHVh?3wc)X^$S-gNZx4Oyj_q_>-t%yF z!JuOMv}yO+Hv+|WW-YTp^pAcyto^v(tVk{&s6vnf#Peyv4+(PLmHM% zEtnE@p6*0>$kA+n_upPZe^T71_{B9f^(S4>6rg#Rko*E?6~WaGZR*Oq>w)7)5QxFA zcj&R6x@@flknUXS#O5X-i5!KJUXCr&HHbdzSoGujJ;R{UI_UN{_9}2o_z~;nkCMm_ zO&Z6bDF7<*q>;IOJO_i?sx`1G87e;5>&wr7vhzC;nm90!po<{xckoDLn*s!w+ob+N z0IkRjbe<06S(kJv@J!Q0vrGY;ybRIp;1f2x4{&!JCr|LoH5t@-ZMzA;g9JNdzwf5Z z7E|}bO&;aM)bMCGPSIk12d*7=N8b~xS~g4Ft{j_}#%u`}g)%PZ4AQ|z$GS8pCC<+y z1ptLDc_fjOIFSZyOAGd$H+H#G6WrXHCqDZ)PGL?6P-9me7|`l$8&)|vI4bWte)o3< z=Lc`}@|zB}F;QDkKK5k#-p}9fvD-b8T(+^mK63(K#7mfGvkNPGW9CHjgEpI6>U-C^ zs>iy-Vx35~A$xO!Yzr9@KIV5{-tnDt>@RMMeZS|}SIBy;rTmDpF#1Ed4#JK(I;VrB z$Ii*4dQ_{_xMXCsG^I4NQdm58FAZDqDys};IkO;k_ZA}~{p`0+c92#CkwcpLd&3)t z27VcPIz(|i=^xOZTt_I($`2Fi$B$liR>j|$Id|=wLSwhxCRe+J(5ac)Ib@V@pIcu; z)aYNq;<>=MtSssan|T-$z}j5VUuy{e5P-HbMfs`!U5`{H^&lh>^(7uv_XelfE9;! zv4R-qw;O3f9@x;{e;QXPlp49u#EvfdpJ5D7(X)8jwQRqm$8za{CoM?2Hu~Q1a z5`;=5V>oF8OVUL^hKOF(Gj)l1tS9wW4?IPP$oF7G=euayKEfGlAyl}*Z=Z{QKZpO{ojpumJv=qr^%1-Bk)RU* zfW4Rv_W4EW>LT>DJsq>TlYX2nk~DrB%1I-Im2AD(AT>f5A73jZ!`$YvKk-i$ zrz!k&cur2KbQgDY27*xAd~76b+yptnhFI54`j=My@Fu=9SC~0xd#ZLhkuP~CJj;B{ zfnM2$dg==;4?aNOZlgm}P_&!LdX6KFJ*6mu?Yvs?K!k2|o1&eniz}v&P_o@0RFK}x z3Wmre^z|m{NaoiXHTqiDs>&R}G`rCklCF%Xcduk|L8!4~{4Wx@1NRDRA`{oz`jP-% z3fs~-BDkXXbh{|^pt?$m;aB#9Z?*yY`Zkp=ncORcn~6z7%-BEtwl3KJl~~%fsM9o( z!VOAl(iA3Cr4FOow?oW&xB6*@h)#jT0{fbl{r&e@2Z|d9ZDx#%&n{gFq143|nFzm#XB2B?C^PAo0$)UGycH&_(n z)j?I4yxq5S@T0NDS2%^mjH``Z(rue{(`WsN4vaEiIlI8%CF>L~>3U2hLr2?Xt4xmb zw2Y~FqSJ)J=QrIe1yIS>&A!O{WCXO3bX7sX>g|s1LY2GVJ2{ zoKpH3FW$iSv2xHy~m zeQD&hmu}2aRe*n~zv9tVUN=5JEcv{!6?L>G{L8`SLSwNT1r=nEQX+0UBRpP}0{njS zTP15*Sz=AyiK44u-_7jtFqk>%S1+)KkotApcYn=nWa;|MoQ*9W@_WHkwI)O-`S5Cx zr}qz-z`pitHa{glF^i5|)Dx(Vff03VUIy^DHs9|lyUFhz-nD+n-ngiEH1!YBTA#cIs z8c=QWXZ_yq4i4GLcd+nui&bh>eqmJc24Nd9KlCiEshKst<`g3hb8cQ7YwKKEC3KE+ zh@9C*=4bioW;;*G$(7Azf^g6NFSh|!iR;W(f`+cjyDN_3|NEc+@4@{WTs5{;x}CLXqEff(kh$&gLS&cskJ`SpTzvnL24-Fo$jbng)YF4~wD;6yYJX%271!LXO z;0|=Xc0tYeI0DEkeH?V1R=Lx~T9{*`L(T6;I$+mRkM|Kg)4&6?#s#QX8>Jas+uWG5 zDJ1TCOGrVw+OT$Egz6i>DJQ8#`{H|$80btlU`K;f$L1`O3 zN&gNIt4Bs|d-9YFMi(uKA6K${t+}QZI#fLpi&ztD7MpBT8f$9!XEEJ=wo+KOv^o<%^o6aU>e&86MI)XIbrQz}9du!-Qkp6Agj)`u()R+UBf@>96CR zubZQfblPkW`J(*D(`3e^li&BDAw`~}I?qfXG_Aot#)H9$DcS*gaSZx=AnPHa#JeV?n%~{zB3R?=R$2Hk%NT^#Wo&QnXi>@iu&lCKa#B~56+@HF)NesCqrW++ zEa!TZE;#I1v2-v6+h>xfWjyGW%)(dE(ZBm&+5KSN%s{>MPSHa;Ko7Z;tk@$SsW7iE z#`hqjMa-4p!YuxF5i)99j1QPfh+Y9tza20A>}jKwv7JhK`9Tf%0(1Ibx7+BLz_XaJ zlgISaNsKEO#!*H_+u*9IN*sRIHSV2)`8c6~KU+oRt-anVb;1}XS2gDrWiFWqQv=1$ zX4P1-;h6dt#L_|!A8g;%Rhq0RYI@Umb=?C^83<6YLJ9|E;UePv@u35ga!2n0m!#W$ z35`Os!#a5##yNF(T4zc16M5)m?Bl4j1Lo%G5p*dsy2WToY}``3Tscitx25SXk&SRu zZ)}q1>os;0I+~^xnzz&)^J9x!zQpKn8_%g4J{#{`{JC;&%F_Hfvi0(GdfzPZF2OsI zQgnjEjP|wlcBP))3-Cy82%nUkYD+h~9)HVi$1GxEI@Y98Ty|@E^e!|eVC$P6 z-Xwm&Pw(8Ycr^;-vvY8xn{x;+?^^BqcC4!`6~lXf`cT6%d9#wm5peF|D;UKmnyF~jclmKd0yuGYMYHhT+?SPJUo9md#F7!*+oH{+c zNV4#3*i7?6uzN9a^Ih_`>I)jwwq!Gxb*}mO^jiB(FHk~O0ynh2@cFYHQ(hsdVWV|j zM@A7;{dslnK_5ImgeZ+y#v=-nQyq9;16$ zH|d}FUlZ@29POQgXyg7A^iDh?)Mc#d+sKcfm^-z;*L$LnVw4svb_qYw=}z<&-M{u^ zxN85&srg8MikX|AnbvEV|DaVx;`=>KxJvG8&=vaVP|X>2?py?1k|d9L2P;QZ?|PS; z7`uDmE=jaJJbFdgyfJSCplMUwHvBECOy5vxypux=DQx5E3qN~ZD;P>9^SdJFO?ixu zgD9w}Imcv^QL6$|a0T3dR1*&i^erSCyLIw;)S6lV2yk%;sF4{sKX`HlaO82)lc>}G z@K0{^fBfZ0fpUMUCFJz(6^1QgS$=eQX4)ZYGx!tM%qRUtM)X;(rHz1)hBD#!&Q*pt zCo@d2Lxtp+pw`CLtG)$yLJvrkr#DnP`!_d?AFmY z`?2b~$)d4jG9;@pV__mJM6|kryR?30(s@WUQQRaYFHk{Mw&59o-aI zc=}2JB`+`hPibRfXvDT2k1F+A8Cej_XZyk0s#z?-0tr43JXzWrDO+^1-jSn`)*N|_ zS)rT&mFmmbv-}YUN=rCU@8I~y zbCAJ8e1_ACvw-BT7PruK7&mAp+%I`^6RqB~{%X-d9qfJZGwx(zc$BVknvS~N6}nm8 zg+)jk6t>BWs-qkRf@;OBDJuSo~wj5 zdK9r|BWU6#m$&LN)+8zAGEfB{kBapAue9!cBg?(fZ|ADVj~|6S-*CEc|81Nhv2%Xv zQC!oDxE&i{C(YIp#jk9*B@tR8ACtMkQHxZR179H!ZB#fKLGka;37=_AJ`a%+}H%Io_hB#dNf#a$wM>q5(iNKMwr% z>23JEasmX+v-9R;;A4tIv#;7xdRcMsjnNi3za*T%#PD}`Q>%ZNnZ&&AhM1Xev8jXXY}4^89N9;g0D2 zom7GNsm~8}MHTm)jeeC`@dUOqF~tPRRk5V8)w<=X*59DTpHA#vE7L}U2wqhFxZTTv zxe&jkU4+?G^GT8Wkx)+XK;6Kz{%*;3QE0lct?uUY?Qu<2%Jw)V2WvR?_U$wuDF#L} zT+Y;;qtdL$4eQDnUHDnHu4c9GYeIBt-K531E;BTzvi!E8w~fn%Q#%e`=UNr%1CcK|a+4s;G2KA5Eo%AAV(I z=a!7@w<v;F^_t8}L7p!CWTEkzw5euR=m?fUXR8m9Y?1{DW-Q{Ye*f9@CUcov*kJ&kyv| zk6=tg=MgrHgZBa1GGruF6hsHP`6z1qYNepiw%K;|DTv;cz9;<9+VhEoPs3S&1E1a{ zxQ522af1tnQK6VxETv+K%LAeP2p9+YZg_bsCVcic^~c_6J>a7wlo<-Pf_+}w-FOvO zc`25Pt>=|H87oiukWy+dDfx}8aBxDiXWePuwLX3z{!+t}=M(c^L%N_^qrSc_rJLQH z-4`2XZDv!I1L@h%Bi4POtu+rVW7aRiMU5Rwb07bT$y$T<2J`Vr6M~LRI=UkD>$3Ns z+uD-7Y0NJ^&zk7-#;7FDVFg(nwv-h3vWAs`)7i+ljXlkBFn-uBvOb0W#tn*8C|@<_ z4JsAJ&zmDh`DF>7lyV#6HX*_|%xXvfAjTsF+}}?F5M~X3`qAwAj*s;H9)HJfP>Lcq zJ)1csj{|&7GZ6O-B1^skfRiOlBvmB0jc?!%K1+sGU@5?xa#Y;)*_5)>ST4X@Cp_5Y zPh+Wo5Air9lXTIwzDy+i?mtuO0Pj?1yj?xn?gG7;yb6n*uAfOszoyrpdT&lISbgD~ zphK{${g9hB#gBI$;U|(2Z^SD%7XY6O675_Ib95!<-R9nsy1kP@sMQCern-f9*ASwt zN++escu8~b*8D)!H75T8Nkp$Q{xi=@Rs1XcJ6zU;uqGyj_j44!&6Y0DZ#0a62EFeu z#l+Y_|17u>5>XrzaKe|-M30qg<(fmLDt}%{DGms|`6eDwQ;)qOV0F`nUb_i~aD1Nl z$>Y8*gB=~GB56Av=}$N7n5{H%JcMt+FaeAyLhx{+-r0&0>tp5NMHvgYcUww;*lgAE zL)d&ZIEg?gGcev{f8hbF7i`O5lImxVnO@2+5wui$8>#+5Dn(2}E}RQUV>%Lvhamgr z6gA*iHKX>wwD~4gIJ%}4H4O@o;i2H}c-;cJ{r36WY~dBG%|=s%!t~j!O$Cl36Pmym zr*7s}T1ewP*XQwJhC|W${=~jaS7~ww<2Jn&6N5}DcF8CCLOqgp8XcD0cH)Aee6w@Z z3xE5Ubu;UQJU_;r;}7MUOgILuEG5>tG#Y-eN*6*2m=c-`@B5uba+gXUzwE69~hrqY~HSI4cUe7j={DBl`TIG zhC=HZG6#KnnwXX_ebUjkeQnjk~v$JI_n$w-TEn~usVYmxvp6kLvXEK!!&PbANvM)>i-s-^O*7C?pw zNmYP@M_Ebv5nU_{c-Y>=qoC6h={_Dwt!Z%ZM1U{$Jq!aZT+TYK-DH*PB!sqskBT(| zesymFRFHtemQEP9>ERaW+h50SLz5_Y)I6z5YfHt2K$fEE84pUPS_CEY9Kmp<_lIIr zG--lgWo_&>KL?&zU-@K^vWaKp(E@iVcvu+Z5jta_ebcXe5z`@&cmBsrDce6=nv8~X zn_40n(RsR)S+rK)yUOncT)pTOdEibb<83z~C%o^0VI9>P@cVn10WZGz!+*}9fBt=J zN$sP~t0gw8e^>*>m7_{Jx%egE#V$OU@#U=lbnFTVcX|{6k`5-_p*oJ3on}vOL1@sjaLoeH?_| zozW#k$pcjOu%NBE$%SgA1y0-d74h){#<}WiIsZJ?|IsZz2!af-p1UQbf3Ia)!ddhM z_V@;LU%j|xu@-6{NJoF4)Q4D?dgdSap&}wMP-GW`)|z*AZl#o-^V`@lb^^I}-%PI# zX^WJ0^*QlP_u>a0w9aYJ1#oJg7S;|yZ}ZOa)c1bmmL?wF>1l)YUrQWePWiU#y-Rt& z#~!x3YtDKdiG|ZI{P>%4r2FI@ttUjZ-o#9yeVxdx6H649jp+uWfGuUqtRS`5+!UNU zS1MbXay%}&ThHxZLGTE5uZM>j&|@QK(>B|=foZMFO;Ks~ljT(#(%}w7oNMthtr48f zqEoN(t8dA*PF!lfrOOx9;IkVwkpah}QDcUJ3Z{#Gb|c+{!!02iIJnM*tm|s%YDg%x z#yXK&FSLcVilFirj=W z?PHdWHjHCGu$r&%?>b|YX?!YC4)8$YV9q!pQwcU; zF?yKi(?CIkvjHK;3cuqac^pNFDiv7=p2n`P5I*fM%>6-rz|Z0je@o^EuXV3pWR?@eMx3 zZwJuct&9dHy#m`kIfWBHScp%`@i*t6eb~|Kxmpb)=p+$r+(%$nDiOCbAOo@~=z%t> zs%R`5+17+_x7v11t3wgm= zz1-U?(~i*owd?G4G$x64tOW1GEdlO+OmuU=rmK!jwjc!6nGGg@>!;)SISl;p?hT0z z`z^`ajnzTP%#>gF`M>d+kAJ6*VS1YbXy#A)wsxeAm!gDDVhJiTIoGkW_7Wr6J~<@x zYbi=d!p)tLm`2ES_i9ZIzUJYfj=UrFQ5rO)pWD>W5F(_QRAFX@1f$*mW9$HPAW&9N z?V&z2(nhvTy9f&cR~3v{R{JH0+D6FjDvP?8rmDtuxK8CDM7??_62&Q11B}HuxQh*o z1J|8O>xG@z=GzB;a`@Mwb8s7EaKw1COSCYi`<4j^qO{I3jn)n}8n<+1=N%+@Z#9fH z!R?QP2C^9N?Q`BY9>0D|N%h@{aCAI`x>V13a5q%DV@GedOzH?4ciNeIK39P$j*n?5 z%QLTGE>g2+`5wKSs$}Mzwc)b~+4#%xq>?0YaV)z~S{CPszC=~y?5md?SbYOEEx78- z4#Q{@@WriLjbA6X{rI~EJS1dnnt^Qu1I_$-f%`~ReUX^21Tn@j)ktMML7~VKvsIG&dU#R>tt0g66az-{k-?9{~OcbMGTg{T3#CJ6~ zhDR#d==`l?|C!GtL*?ZeOaiN3!KJLj`~@D%*BiuzR| zGl|p?%_MHaCZ>yH3*R^%$Me_Jn2V{kd5uBTO!gSjnlc1*M#}))Bjs{lwf=tlsi3iV zq#<@B#sWIV^@Of)3@6<#7gl4BW-3No)*BoWfBdh(ZEtpPA*i8InS64wQ#-wNc53L} zs&z$icD$#J$2)n4v%K7bgKL6)pN~|KBSJ+Jcjh3Rsa=Ctv-J~k z(iAdBdv+xI_o?;yx^5##_lsM74VItJBa$8QMT@$tFm(9TY-eB*X~~)b^)De}+%JMM zh-JFn>-@~9j%+%$?C?jTRc&sMUihPml_;y=mv?Uy#L1;elJY_t0geXyHY6Y((jfKt zbME?|TlA7|szBgZE(J@1m8zlwp9MxCJ^j#*2$#!DkD zmh26XwbOMgDh49NF`x34LP!5<)7^w#TnGqS&IcY!AgykwKWY7RKH|GQdrrd_4v3nX zT$~-s;XUoXi{1n3wnd&tMF{l`2TEwI`-nB#x&fQ)x|?-CCgr?n|{`?#dgMCEVjC z^&G?>sGu8+wWf#>RBEHw?8wy||84Ict~%z5+!thENS2K24;?ERX~i-H@s(0>zAU)0 z2m73^%MWJ?S8XPfTjh*Oj?hBh1E)+b-W*-H9N80InHRq)rkypKqB{)U0OnB>Bcg>D z$8ZY5^&I_b<*Tqpr<5g)z;M=I9H*@Sx9LoB*soshcl1ZfP3KHn7u$r^5*tcw60o83 zS75gTpkgBnDt`W71L&V0XmwzaicKftJDm$1e`%k+CoFFaR296VipczMU%8i$mdI@8sn%-X>4{1k^F}cD^e>${Hdx z-D4iuzfu*hM7POLRgaaMdQ!sHR|mXD<^iTM`2P>iH#c7|j-{l}=r0iSclb~jw28N~ zG&h0R*8(W=$cp!IW8$rXKJ7DQE1(Bmjz9jTRf|3D!cW|&FAu9cu=C+YISqaJ zRSvqcSd-Qm)eGB=i;l}raKH^Y%L1_-V>eQh(~Z?Mf%fh-!x4gw`1j-KZ)T^1((A8~ zoN^|I|MOq_pZ7A*B7LIYry%rWj!z-%M86ea!R*Ea^pL!Nl}Sl=1Ld1*kTx>YF=-dR zhJ0^!{{T)q$qV$LYlany9FU6UN))kH{2)7a=(qqu!IM^8jJ5riz`R|=;Q8@UFFB=# zu@zTCP741dV35512jvT80qt?uK+G?agX=C;aRCb^Y2cMr#dQvKgp^1X9#q!T*2s$^ z#DMH@nMeASF6Z3Ixw#4v?EM}h?KG9`QT z$Pa{*I8euOr?@+{+8m{FiXM0u(>(Wz#glsjzA^n(YA-WZO+NlgXdvUu9{q^&WKF*; zqkh|#)Asy3^3`gW%(UBLq@x-?3BF3@>NF_u)4?zbk0UHBH_eyVT9?hTj*p~dm5qm) znBQ0UrN#ymTU0``VvnM%t(1mp(SZ31IVrbF6&RK$p|)yRX()A+4`l$hBAhzwCr|Ex zv#v&Xl{Nk_1CA!r1fl%y7`dtUEg2g1#FDWes$GN_Es6>3e-%DDbvFFd7T z3*PVX!fHIt&abr-p;w3G%7YvDI3D0B4Z9!tn(uS9Wc{Mh`eCG55!~FZ*K~6e)b7|X z=a1CEpNZKY86>u$4NW?{u5KHg$z!|qiYvaq6rx?u1v6hDY=9l5 za>wJax~Zl0mLx{7@7O-IMJ&}pkb9JG59}zV+RVkj7W?3b67Ya2a2sF>%;6%KznTKa z&5g7UU9DN0ZU&4a@g`u-OTZLZ)7%=U=u{f$d8Ar|4g^ggq9RDWeGdVnV89QU1w2kZ zA$;@@b3k3Ddh~N_8#ZZy_SvXx{~fp_T)cuB)2w3L5b!z=e0H}XK`53V-<)!9jhSqY z`l^Pn*kBm96s$%>iUdvKU}ozsp^+`knWPJ^CSRYi2UfnQfKKj22w-`j({OC0EvvM2 zoHBM_-z~i7K6#Vp(h*;QfVD_-5;}R@D_gI|Eqif;hHeqt3ow}2hEfwb{e$x1*6?bD z4tCd3lo1q*uSNU}7^!FjI9Gu!m9yYBY~~HlFP!jA3`zi*3ZPf61*+9kXj*#wYyN|s zqw4@q&j={FIG2`f2vMB(ds3;@8$-<`WPO^ru1X(aZ^3DutMy2cXmDPWY@3cQr-#Wl zvU;9>+xH;d&`NzWOW`pu<3Q!qHjj<0;$~wJ?S9;AS2O@w!qh%U#@(^Cmg3rLA1Bwx z`!+;6ek${B1J<2)!zI-_05<{z_jRoesrFiwe#4(y6rQo(N{;6Ux&M2-ljIwwVZBYiV~`Q%-Jn&oF~2>uFSuj%mG)ApE$Lt;DjPObF{3 z>?3-i+zI})WkwzZfsH7C(%bqed)x;Pck_#q7Y{;)^-@G+R4|1(DUmqGBOfxfIG|wb zsZl2?L6))#tm-$zo$4OzWc6~Ow)z>SSKc-1BzWv-*v-BsNXZ%-4fzW~(UHkc@&34{ zoxc-b-P}jCIGJ)o>hj`V-4F8XYcb;}i6{=IaW^^q*%t1ZMjhE~K@@+)e@ub!r)zmS z?&$C62)cVB`-aoHp1T@#RQECG+SX@z0Aau~#^mT&Rd+MXla>JvY5i)vi!J7;lHC!G ze)|Q@Ud?O3t8WK-=?bTSSj)~|&RTvvX0cGw-E8l*UXdXE=DB^7#k}eOKjhLN-Af3< zK2I(;-X($^;j~HNz}VUBdtUa(V8bIiFf&I$4nD|BWnn0ct~D6QJ(*eO7+ z!hcB@u;J4PWWChTm47z{$dU^B6yRY%8DzkLrzoY6U6iGqNE}TIb=dB|P0fFL$UHvV zBm8^;XP`*_!-~aHdicv7O4h)JJi=i-Wy+RDIUe6^jsO=lNGb5*cY@bSH>(<>^Bvc^ z$*@4S_mbAi^I5F>tRwfM#HYh?MSZDqFKpqgiKUD3DeVih+=+KwKj z@nX)r-0-o2YwYZpf$rYwL)pIgrvd(tZs7F2Z12>|wbf_dMymTK^M09l*GO17SY?}U zH2&IXnA&lwYyZcs?u9vB4+cf5Anof-HE=O#ys+sl&m~JPI7ItHt&wrwq7-*j zIsZI5Z*EEQ?Q%)Og&8(8P*?-W`P~qS=$*%^#Wfs<8=g`&awdE&A!eK76j`Eww$|7_ zR={*VzG5b-e7|S=n?autGkE%lFUDdaw_{0CYnm@DSB3}NpFaND=hL9S-Nc6-kn<2{ z0c3`pYIbpp?czM;@~5yNL+|d1Q3)y-K2BfDk=$hE-+Uvu%k&%Fq;)53=~}<#KYgCl zojFa5ABk-m?JjD1usgfyC*2VV*NE90G9E-HQ5HknTqE2)Mi`bzbZ~1?!}E>A52@Lh zKp65*JHZpEF=Eymb$L~qHMx#;>=U`N7Nqz4^se5e0Cj2>;rxqkI)g^VWUPc-;0N=^ z=z0+-Go!&qUvWWvG*7e;T-TA*eETjT#wONkJNjeqrP*&oCt1&WTHOMhqXg~!R&?eo zi7sy?=D_}FdG64Dy{CwVS|_`Pns6c9Lkyp%;bk`|Eg?LfoKhR{xxI@|-a;~MKGlOe z)&7-ofsJ+!R_4IIjFAG z?;~*q#j7`;71q@N_91#NReX)nG{vdWD{A4P-WZy>%UD+NKr0zX2dMLpdTI=tS`vkg zn4A^W8B`&W!VrY9K$J#$EshK!^K9IrjzTWPAdB<6( z5Zcx}LK22tnlkG54*N*^L$-&jEQ}6yV z;Px*L4fXxfH!wOnzt*Ovj!gs@>_`_ZK8a4la7 z*~oSE*!j{Hzr$KQWz#J(+`qKkV>C}ggA)m@zL->+ZW1-#iXOgn#=5jB5Afo#sG$ve z&ubE&BjcNnckUAvQ#w(vWE;+T^UREM3odMQTK{D)T{b>(6nlT%-wF2(?TTQYQ{%2| zE2wWIH+*p~(z97jQs>P~uo26KWXD?r%#xh)5aZupWS{c5R9}n@P)AoL;j3B2x_N7A zF87$-(`7%B--x}P?u!BI9u7U(x{A7%)p%0wCoFQ{%dGGwQQr?s#Oag;wu+~WePS=Es-Im9|Dqlzb_~Cg;W>dL~IAf~+ z%Qf|`6KaS?MBa-Euc}N-TpmE%=I^>n8$o%c<0Xm|wXluy5Kv%`vw?rHe(lMV|1Fq* zv!BK7RFZhtKgdn~XG^Jr7&c*;g}&Wdj-MmUgR#=^t=Mttc*C*A}9=iia!{ zdX=j>h5`4-0>8%nsh;-BU+zE;grL5@l>~N{Dxgi{OI97~5$aqVb-14!MR6dbtcOSL zjX|Fb)1jJsA>U_3CL==_c3?`((Sn96;}oo%j~G=X5TLCr45O1qHgfYtpr_eK$wq;_ zJPxH6u)r=YEy=_~a!So-SkZLE(LR5`R3Z$3NdntsfD`YpyU<^{uYa>%$domJic(r< za}{QPoI7TEabs%kNo;*#<9zTY`tSxI$eoS4(VAbk3@-jjE3l7bB8mw;QeWslH>Gk5 zdcS87Iv^inV;qs+bY1NU+WjeziDmM5;Nv>S2(7T(X6%Y9ezWY`QrR5$Ebg3s+E2^W zm^VzXy|gB)H(?mM#pPy1f0el|Z${yYS0m)mK@#s5-ud6q!+*c&2aRT#Sh}ui4a_oK zYP=L5kG@ddF;tE)w7-^2M%t=D-w?Zd2MW)A$u--XoP4Hl)oqaIJJn?zh@-5ZUQUnp zj@TxQp@_vpgj|sVorbQ{by74sx~?E{jiYun4;+(>x+)aIEqwW-*1rrr|SDkn zEb@-u@96j^Bs;?((Y3b1@@{_2A2aOdC+JHDwh=MUtjLB}t~I+a?&oA}q>p3P`XVZ@ zb0eN|wAuM3Oq?}9$rrd%T~_bE{Gu(X5z}tyY0ZYF`S3IZ*s-euzmeXRjiGY?C4}L6 z%Ay>oZMs;IcgQB!_f`DiFTo#Kix)9$q#<4L$H;+qq`{2%xVsG0jMiYXsTW`-_VMMy zUu%8^HF`V_w+%p|!79%HzF}i6dIKey9LyIM%1Pxz`uwQyhm_P=;qMEalIPHwvvOqQ zEtKt?*SavOCeZsr%|I_BX4LiCl8u$(TGDorVv>3~@6(;i2F(F_cYb=o_BNLCbh-Fv z1{S>-mmOToJ{$gikL_Id)^OgfQoPhn<=`dMsXMic;^xHZzb5)DRu15JZ_M8BDXm9X zh=58_MjOsE64c8{pEJ?8r83uz`h{fu(=Y#e=lr9VFte?7RDsHDcC0!{8F0LXioNk% z<=B9-KQ6sC-*5rl>0&CtP_9u4cYw%O+QHK?tQRvUtCM}beXYrc z#>NIK=fmg1C+;*(cjGoEFAK_T5dFV$GB2A03*v21%9_wboP9KM5@D{Pp*1|bkqH#? zEuGKk_o*ojfg@z|qd+YmbLPY*%Jr*(71Q3uPT(JRnQK{*;N>%HAx@)^m*-`O1=RBU zJ+DbkH{EEsMg(@6W(a|7FYofUe|VG9iP8J0WhM^xj*9lMGp@T=(#$;t8+U*~60v_G zlcfv%5h+5{t!)aP#xVjB04t(Lr}h57p6(CBnFl(}GfJ44BvG#W>LU|)|UlyxMLX8HYB$#dSO;4m?%pJ6thJCQ4rs?DpIF(8;y-Ld8UAbjAe z+`DWyIGpd_>am|VFZyfne_S0=4B8m76n|vh-mKz(j=QG7H9*PYXmmnzetKg{*V&*$ ziWtO`h}G(|O%;n-Ki%CL=R{6Fpi8>L_gaiYFh8jo5u2@Dpd!cHM<21P^v{}flydZw z%Ga7#>XDO)%vm_6#Hz848SNX@bi+~JMeN10!*A_E1is;4q^KQIz*el6GlYv{Kdj ztNIwUAA;Y+{p)@C4_kf!R1Evfs(b=O=?^Ml##PVP&GcgJD^44IZQhmMgx1W3&3vr- zX|?k0#7cHcb@(~lJT$Pst&RlG-*^x!OxfUmC;b=?g^lp{zt*m#f_G~f{)8hZGdb4c z?p;VS&TB>(oi0ShrEhf>HmR%M?}Qf>5-b$jbyTx3|9`Gy5`$ zxyLhoJ5%4eYtm`k?XxJeBJJ#8d}Z2ZQzXhZ;1>MJ_@>um3AP|-&GIYcitUyxr$*Y^ z4GcYBmx-6fV}!@YRtW(nyz=u^F$bv4UYur4IW#$cC_go2rx0;M&dFj+5OrSpq~Xh+pCYl)0Cu zgKLGSc+*PxCBT7+3>fY@Q%mmo;UZEYlP=MzCnt6<$#cDTIv2HJe&77mj1x)xKAqWq zzb7cf_*q+G@zly{(C5!m-78ehs#uIjF@AtglQ@pyZx2w^G^WRz(d6yi1$%ie-0QfR5 z74v@2dTPb?(h7ShZ<4kIv#(#G3 zUr*8gpVV$^Dy+yaU+45(_o>?JmMVC2%A)4bXm1V_eU%lTpr1cPx22+*`l)ubFZzI4 zwKGI~88)Pd*d@mzeH?`mOWk$qZp^B>H6v#tVkD5?W8mLd5|_dXnyl0PQmlj23GWl2 zR*#go+mAFIxD9ccbB0;!@Al0Rtz8pZ0IUY;iQ9LMM=e`y zJ40y1&N)giSJt)Al(n2N<$_^Ns|U6WYo6Om+8t$UYK?qBc3f86r-tQlGX zH$(knN-(v$t8hqq{;-L7V=fPS+6`@4;#vV8uuRh>ylZSqgDo4yCC<$vjerxzo!kdwH+9 zi8j*Sr@4jc7<&_m+SqsdcmH~7Ow6|0QbxRz<`-j?7dp88^75bB4^^G)jYod{jU1&H zJ!E@C9|TEePokv6@7_||>0i}gLv>z1=I%rp6E||wZmS7Pt4Ein$G2nX^3i{Eb2_f_QSO|KN<|Oy$9%CPQ0ElCKQtto%aQ7x)O{M9&sFvMY zRhH5gr6^-7EfgXkA~S*BmVz>+MP-H*AjB9#1cWdqRa%P3kV57$RWd}11cX4s6jBN# zG6YJ{1d>32FbE+`8A(VyvCh5coPEyi+U}?O-d%my!^4yC|5?22{T7S0zW@8)m*I>{ z=mrXAQxj}Ymi&r%v+1QqQJKrfX&2W}hKbEDhFBXTR`HtOz7@t$k8IoC*fJ%G(lp&1 zQNC{!GQW<43FVVqx2Z%1k$Q2(wYAOnjmTphu+%V(jRj;7g-lo%=OXEtsa`BB_QE{p z2J`a6)^L0SnqK1_6oDIMMF3_w{#||~+u&(0W24h45jd4^P|=FlHLz8Yye zlnnmMQbogKAu^(4WImWw0w?ZN=dVi;$Q~NvrR}6tQl3LhSa(kkCO_yswZMKXsx>uX zimJzKk5O0cVr=1kvHtb)ck9-Q%Ysmf&(N7LNQ_(jho zj($Es372@8fU40*ddKbm-i*a3RRB8?Ne^RbA4=Z*@N~4HMwcR2cvdRH_w3Q!s$=!eW;gY39?}2%Q2qr*z8xLVfW?PwLMM;jChX5s z7gBkxz`f%NPoiT+t@@|O#GO|~1q%od9b};rK)|D5UzVbdr`}rH&YFlUEvaxyly8cL zXDU*FGXqxYo)2Suhf(Fds%8T;<}*}R7K%kE2;DmTpWtE4Ws|?TSgEmD{idsXM>}i_ z^=Dg7IazJ^d^Tqrx~;c^bbOA&cDRRhW#%(EL4M^i=W1Xtl1Zl8YPF@M^B^aHP*tkF zL3}AV0~)2>E{Tnm$wk8O%%S#Wy%VoG=;gYj*;EJq>>iHI0w6e}#s zq2`mbDLMB$u1>)XABh)l5b(a%HReI{Tr%G@R>FR_>+On&*zI>~D-<7RC*sDHnh{u@ zv{V(Lnk==!oD z(W#`Cn&kfkn}2`m?#Xlul4?|{b#=Xmt8NOvn+W;?TZw(6Ky|8g1``4(aIvazy;|IZ zCUI9WBqoy$nxH8NQ0W?DUJ?qopz@?&2qBGCX||1>Q0?XS-xRZ>b6aWp9`ItXw2sY@ zDy7Be69*P1Z~Xc1qxk)ge7cGBc0Ao}kP{po49&Y=Ny`ffz>nsHeZp8fiMMcMzu-ik z^7XCt6kIP&tzHIQVqjX@YQe^DrR+I6U)*vd2cC6zI+St|YcyS^bcvGW_9)ZB>e3U* z^%I+cvY^yQ#w%O%7(t)aNa!i;Z$6k0`{LjY*b-Qx^#{JUC3wx=B1~v*`Mh1?LiCX< zMuNJY|8gr@5yrPq4_4l9mtPe(HN9Qwn2rFmIr>7(yz$Vp34vA2+0ib2-Nb?9ox`|} zAhWsCT^5slvXs9ykpZkNV9Qt zazYLr?1gK?Hp1&1Xq{@`O3K8;c<|qT%^dB?6i&mbokMqp-Y*wZk3wW9P^_~XeLIRZ zAL*FVW8HslyIinFoi|jBJvh?zk$^RZ!9%pbFE({EZpZ6*@5`jwCwobjj&KhcQn%wn z+cz05u+$4kEraCF=gq80ZVY~b9Y%vrr@yIQqIF+6PAy z)OxxfelW$9yUu0UJ84gnMl%Z09;{FjCr*EXGxDr}pB3853-VgmdYNq^sD*~6>s_P- zxSzXk?h7IZ`h1?H?gIxfRhc_oh`qsy;|NNBRs?!k!3d0?8|;+xfa7(Q)(jiBnr`qG z2Du8o$vm<@$hrr8KcEe@Q3aHDo*P$}xPJ2<=iA7IF#jcr|d0&jYJkBQ8K-_(=6-2>b7Ki(F1Cm%9e6$ z>?=|sD~qRMq+ z4B{3on>zYgcq65mne1>uRNC0Ad2abQ5@ffYH5{jlqsjEhmKT)v@Wo2!$}_5uFA?%CYj;5Cz#VP_b{UBsUQEqS^*ecKan*U``bNi4#~C zI{R^{+T_o|v1*2O`*me3EK!*1wyG|+L;mEGZ$QMP;61c%{`ozYe|-D@!wDzcDYf=j z7-VS%{8m^HN+&fZvN(&)K{jbx(~XbrVOJgNtuAV8RbF&o3_s9R*zAF&$* za%lLm4=3J&AZrqn?WEsLjZY>Y6%i^Y^)IZ7?U=~#s+}4EKPMR+NB%yk(vXdVZm4)ysi1R3+=FY2K$_whGYoUVuc=`e#*v-~Mmd%bIj&ja? zbU~$0zA`bL{g0PD{dy?6_cCv7lF}Nom*Y(nOHz?v)xKHq?W^DanmRfe5e~wa4q9cK zkG)|jp#F@4Ci$tEt<>ytZ?*h$KeoUB-bo(l05qV4`Ih0(qliuzYNtKoWJs(M(1g~g~7igtF|;O+J(2D%$Yl8JKe5X<$bA!Z^ZoX*6SnF?P0 zqy+bG0szyXOPGxOKlU_@`gGF9C9U$h0Fp(=$3K5C8`^UWOHg z#o<9fm&R-xV^R?n;VTw26*_z&eFqfqt-C^1oBq0qn2^y@@PkWbRVarvjJfS=9ISI` zQm`z#XcLs&oivPk&;vZ+kVNgo8dS3{vE=E^mFIc_h909j=}BnmA&@%F8c5lB=CIsX`ImxnVxOJlpHk&1)ta&U zG%=W^#NZ=rRXllW_PKYN5;tj@O9qf(db%N!m2$lz*cxynDK=Cn_8~x2eU{tPF~qd| zg!cI$TvvypQZgRa$p(W8BU5d`twZEWOmWI}WhBD*)XvhWcf0CZQXWkTY!nCT%yP$? zz3OxmXzsN85chtl`WE@biNjsR=APTI^3(hxiP~Ri5gdT&wJ4hJ`TBzi|ILZ(7Dvy; z&8s|;HmS3pdF0OnmR@(-^*UP@{b7N)#6f*@hMseJ-q-(eqno<))n4AqltLAz87Udv zxh~J_r*`O1&fHdTFyPG|D*I&5sJ|k_AtASCB9U{@0zDG_%(aY*ctB2bn8zi^0Rn`6 z7~|Y;qWQfKW@(grTZV>gf)>4hvN@g??AfD{f z9Xgnn;U7H;@g~}D43Ss1kItzgQvKqstcCEcuw@6-h|S~%q-lKfT*=N;%;|)p-77^J z-I9IAXs&(6%`+n7D`7m;BJ>N|2|FO5nzWdMPuAJz>^=v)s zmlmuV_<7>iHfrDa7UyNigNx9_V~2*OMv6`3KE;4(mXC7*w!En%*EJawqM#tGaBi{i z<_jYJQCqFvkaNq}VqJ&`(%ACM`30NGZfVDb8Y~`#$>6ybE|Vm`t}<)+TTVvJ$-Mn;*_nSJL6h$Brk~nkm_7gT9^Y4^%K;P^CWV)(jye(t24fk zpOK4L+|x?5((bp%>EyWF^;{#X?G}=mw5E(AZk8SBX&0p<(blBQ!E<#TD4UQWmzAh@ zyNaA5PWx3~+w)<({D-sj=Vb%DvkTl4p?`uvZutqu2V>Bj1^>DOiDxL^Qd9STHIR4& zJ~!4{(`-JoV(uzh;P1C-FnwH8tv%L>TGaSH*Xoy^^{L@)uu^y4M00U#Il3#lq`}gq zF)P%mSq}y?)zoxo>FIwitrE>OC(*VZ><4h}37i^i|h>C&Dbh3w(HL;1GW{4tdVmfelybe!>HtCSuhm5@E|6+v@_@dfA4d>9@7(X8DT279_~ zDwqrLeZxA$S|=}~^=A8`5LQs(Lg#V!cHI!CkvzQ**| zZiOPzM!H34#KHx)SxGA+S?v8UeOtDVI-jD4IK^YHS&~TNHYc=K>o2RTiqpaJP673R z5(%SV^TCGN$erlSt78itFM4_&XV>%HwyG=+D zEed6!aPH|ASn7u*DBsY z2D(ZR2A+!3tZa%ww)BtQF+Jv|UcHbEUc)UY^<|b(V#)SdS5p6TxLGJ+Y&;+4m6D_^ zXPv8zXza1n)CM!5EHe5qPDGS`0erJC;KF_g{-OJV_5VQnkG(i@cIWGMy=34^+VKUy zXjgwFn1>19@(8fXhUz#1mi{u6W}r>)WV{)0Gc*htv^oiNXNyTODDtu*|1PM;wp8UU zGTx^Xph(LAwDnuA*Et2{^bL!1^P3y`x=Pd&;WOFGWsRa(we(tn&?jqF*WCDG$+DgiYxoZOC#NIVNfq%Vn%$F#FUC4A$oe4KZlp=k(D!!0XYWH} zdUYEJ8f@EKdM(*pzY)}yzo->5F>=4W_&D6~2*94(afVQQ)HkmSt;*c)lB!&rfogR2F-Z!Z<>d?(e| z;tutUhS`*Z9?g@wE;A<=2XtPdpNC9^174|x-yc5ot z?S%uWSJa*>awihvpyS+1nLuZ~dmbm7C(37d(GF@SIZk6p2_dR@yVshBe3-K+sV5}I`-OJi}-R1 zqd%A8ZSu2NhKM}nlV4}9w$&EIP+QeYi*v2Y)}Qhym*BCeQ*s_@7GkxL)T0T{jhNhw zzd>(rtLD>+KLZM`YW_d~Sy4s?qFT@F0BqG1c7PCKbt2*X& zT-e9+J_Q|@sQI;kbQAttq0UOw2nY34`TSYyFngbE56#MQp|mWfg&AU9X+7Q>Fy4DT zJ8{i_l&#R+9J3(w!cn;nqv*D`Mt@p+k8bx~ZXQazNIHk#0ve778M<9y3P0lG#1aZSVJce@&n8EL%49M^Fv)i;DWpehBnvTma^n`DAW ziu!i5stl@cb>J%BTO(&#!Z%hhf{!SJ+px1ciExj%=l}M>l>Fwz-9PQ?V_qGqb+&b~ zo$-r3`E9AXKc}5(%ldj`3KrTsmDswa@Iz$f-)gN5D~^EEEvRBs`Iz7@!aSkconxwp zEcLSvO5Q}7F0jW~{3A5N*6QXkkRMfIs&=XZa)Ea1^F1$lHu>5?Yi-@M&dG$Kn;2}+ z2*v%YGV9WBeiPd7eehe#OKF`ci*n3qsbFsIsxjmh0L|2E%eds~k|4Rt;_^epY7M@D%muFLfH(z9frEv=s0dtPmf`k|@2K-atGTAeT@eo`(>`i#u3J*Jd&|cAz`1gVwf1#Q zfR3te-Hd+WXWaBw_0@>!QRY}{8^hjCuMml(c(1RqoBI|{snkUqF3~7Ponkw9(e(Mw zi^L+ycnzqepS0uHPGkU@dG=3S_`rJ~i;W?UBYg{JXxT7WzUowjg|gbr@5%h$f6j@r z@8+G1fatDXwVNR^!MM~ID=nE9H7%){Kahk& z!J1+)SMWS}PH2W#CKU8^4EiiV8@FG7z4|_$!3*qt-bYw3SQ0fiM^g`p@z7-hMw5j? zJ(AivK%Qd3JL!*^WGt20O~*szYRy+)$1J@wyhhzCbq>BK45tohUqF7GuJ!!C7efDY z$oc<%FS}p$c!}0DzVC5BU|gdO!EY0}SA}a7Qi{@K^~K17E;zFO%_{HISSu;@I$?m2 zAA=GXo4JDCM(I~cy<=$WJqF#kp-Vh_0C6$g1m?IbL}-`g(N}{`G2Ga%rI8!4XeWxS@vQr#qW1n{aj$ zKSttwvHtv(CjMog)|5NXF$ej7v{h?nOSVmp;Hzn6IWK6zhDVqBwW$w8Kuw0|mUJvCNZy&+IHU z4`7tg)mA;)Br_@regwbMgomG;9O2GUo9`&RDvZzHLs)R<u$9(c zyI@42u&a{Xhs-h6HSN@&>Z2`@PD5ucE(i6E!KDGLLD$yW<4(t1^`F&5T3i5~N*CyD zzcq;2nzUHr3W%{S^d_FGS0NUr2~Tf{prOrsCI&Peyqo`I^?KT1TazqR1X4@lqGYe7 z$?&NUxcm6TpFWf9-VtiL2bSl74i`jn~jZ=*7e z14VlQmdl@db4xcm zL4G=|zbB^dpHnve(ek@pkBrYNF`sQ!4@?z(aT4%u*NM3hoj$8tI2%cIIm%>K#w%>r z+?@&ilc4z>gfdWptUC+=1&&K^fkpXJZ2v+ke4L|Id1i+G60ENKh-0rQASDtn!y|m! zWWG3Z-w^lE0&7cGz{9V5et}`p`UYfbonFNo%A2Ie9~$W2>Aa6h3<_zvW3*`*WdYgr z>2(5uv87?K-FPROVS`(^)wGnT6~wBX7-{ax!{oHgt9qKV>p!y_XIxVrS1T)2gL|GR zoaaQ$SX3L@$lT+P34F_>Ms|Wj>Ip&!mgEHooJfl>7&h2n7m{(Cms^AT}2?#)tB<|0dE{S*-_3N`{O{<7(Z7$cCBp@{COTv1if*^y!J%GzH_h1t@Z| zml*A{L>T8Db(`X^qKO1`LJ$};!fSN|cbS5=F(=zuf!r4xOzUtYTIdLA`fhlc=kO9; zOn$ekcOhBd9K2)B)%Qnl`IU{Xzk0Xpn)%Mt8&7`)`Gv(!ox|tBhQkLj>r+U*=w$`_ zDdihMi%C=urxEcw2sB%MI;+%Dl+l@HS`e!^qi%+(m$Vdjw&D%v0x93tT%z|hA6+e1vyIwD-5AYjVr-RZ^SQS~SVe#k{>;XI3@w9eAQ}5&j z*v^|uaNzs^+UO?c6ImHGPwk3Wmu9GIOlY>+|2i)2;LRHP!0$(xDO zg)#i(EfR=fpq~h5Y~6ESA$RryZ z@mim_5kDx2B(*m*@*;{lBDnLI=Eu%>Gc61Y`N>FI%FX{<@$@~Y|GCX;t4wee1Jv#g)DF7|=4k~U#k@MDpe)~x-%5*NTl~yfI}<@4UuBAE&?R!^)Pwv2G( zYPI^=8qgwp%&=H!S_g4ga2H7+>$HwB%Q=M%94_)7sA34p+hD{QpIPW#))xwNE1=wo3 zzGv}JYUB`v800xTJfc<+iHZWNC7=RIH~kZ!fecgshd9@~L$iNUjrPhvgn{4j92e(M zBXgv=_rXLP=a_>rfK|_7r!JeV*tpF&*x-n(jW^)!2z@720`~XTtd01R(W=gMQ<7Td zdmnc^W4)$(Tx_^C+{bwDk(=9W2?&qCd2HWeh|5AxK-Bqfb?SRF^MSW>vTU|+&aI^9 zE158bdNBtPoaigm$Q)8v)8rL174?DhQ}NAYG1T67ZRqY=n{-Jrj_p~1Tz1EhK_2?q z(nXi6>8EEDi$vTKcT*-P_d3_Oq~A`uo&zZ0L=5>a>lwaqI=nQ+IoXBiKy;t$>puf- zy$XtPOvlB`Vp%O)j$q55KQIJ(*jJU+gyn<$75&9A>Z)W4IN7b)V$HyD@D0UVGNAFB zEf2fbl_xbW&5s7Di)I3SnhPPI@i(~$bc{ejY3=ZSJ+-V814MIRFe~jozt{(|I@G>$ z(9f%Ol`3!30(M*y8#EqCdRUEsiB};4UMP$W4 zm_m*OA2ZF@6_r#K;XIcu#qC{!s)KHk<>)E5av0`t^E5zAjv}I8m))X-A5qWA?hU5lV!gBv&Io{7Uy;aap~a{ zDuW%>V=qr5SRO$?c~x7@)%<*!dhP#V=|GSEOO=xpRG^nvx8i({pL=<7>f7$7F0G3+ zOT#$3q$}hos_RmE>tII?mKU75ckyE9jn%|sMCprt-5WWiXq!7 zkS^c#^774Eg+Z4~Xt{ZsX&tab&cXf-X+>jcZIyc1L%neo5_vb`(A{lm@diHPg-Zye z(ln4thCv?Dh`y?g^3B*82PXi~N=RxEc2HHZ{dA1S9o4AmzxAFm1;N;5BOw#a=F@SF zmrYoeu<5qCwwgM?TVEof1;%)c4^1J-FWTc5gS&y%Y3VkpUot2tuFqL8sl_4#s%aOS zzz-O8L|cOeVYHaceKB zIL^$?-eG1bOV%xuEKW=R)pcI1vqZHq)zFJ7y=kqk5r_!d70t#>l^0o&w7a`kJ+qTo z8Dm|y=FM3+Lw|Sc9KhK%*1BqYb@RSQcxKs3CXPg03T$g?)8J+=icq3^h*5aV z?afg9*G<+xLk|=A2TU^3ANA-%@M-1g!xtbDSg^gB18Y(MF!n@%ehjvGw@XhN(v&>8 z-Z>oJP2XUpvqng?rXC|P`r{h6>n4hKyJ%I5oAIso`@E#1yQdSFVX*_$j>|4ltSvO<;h0E}(wJB4%c-ad>)`yOGRW zou>hyHy@U(#h=DxE#v+|-oRmULQVt@s@0LJlRslC<=Lm%KJUinzy#>X2InIc@m2{C>hoi zT2s7mWlkgtDgQ+u^!C|txZh~I3Uo=b_hQyL(#LVr#{n5%&P;BTOn||a8GoK*>lK-u z1TpO7`(u?=S#e5T{U@$M(l*6IT3pJBCNNRN%u19MW+)>zkk-;J^gFC7kAZ#JCfxn= zh@ND$!_sY0=yUsqTBs(7S1P5~+n^hCPdO3p-1$&$_F}l7R!R+y_gL)&&rjOt4RSYP z4tW>OZxpWFh~%wyBF?_swTA(WKqw(gFj)~MsHFlM$f>k~m@r!KP>>VGrm`4-7VhQ* zr<*QJ!fi3A{_#s?@CKc%?FexzJ*~akJ8xsru;zS{95nQ9*J+7OF8~`n(VVtFb?)|P zZw|7w#+N&l;C|PR@axLTZrAle+uqb};|@8E9%I^&ZO4q`H#>B-##t^a+NUtR)`*zc z1C4N81gK%iLAtlSNLg4Ap{ytb3kqK<*7k`KQ|d!Ot=k%xlTVjz>H2!aNb;Pg>Qei_ zwa9G8^5i9u0ok>zAAAYR=SGh(^Pf-Yq^Kg|{rij3Cm{ymV2O-(N$oc!694r1;Gxz3 zgT<8cUf`b`Q%Ag&v(qkjMp};P-piYAj8c3LrV8xV_smY%HuGNEQ@$K%I|elHsAP&qoj%; zf%p?WhJA`kUOIw_+vfa5%iBAV3G`$J1Jh5tv;&=1-+;d<9c#u7f8`1PNaZ5m&tqfH zIFRb@B(~U=PRQ&5`7Fm^r*bY>ZpP>a_IsWo#!u)<51JrkA}yZhYw={JQ7txyMF6L;^JSF0*R zLek5L*aGJ1KJQ?{p{1dl5>|SiURrBzA^L#&Zk*%w*THXg?K(u>irnmYjWMj$B+LXi zXkMo(;JM+E(GC&qE*WEMOZVd3eTu6aM=5CSs8yM&yp*+CGraQiAQ`CuTK%|e6^6$k}wPI@^ON2$<>m;!SGc+QPH7~2M z(0gnr!13|OBV#bnqME*8ud{1l=Ubi-&EP_bxRkvNhpY2%Q^l?9`KDq-dlPM7{IW1| z45}$$w9gfkU|^A_Z;nlT{r4gNYm6ACE;RKABxlYTqr^5%ATzWbV>43%@(&=ykm6$` zT0x3!=VHGmbS=cQ$1gK8zor%nbpHx`N`)bo^B2lrKNJxdjcS28t*zVJwgz{mhDQo> zhksAdL+4b>MftB%KKkYZ z$@*x~;E`Ki&koQy&6B9!0?ue2&6n+bIK}Pf(MG_Hs=0w+!_~mb(c{f zvZw7Q)R@OaS)^e7s!jWnhpW0zE!_dC4;vXXN*8K&ZAyFr1J+yHbZE%CUGN*E@0hW< z&SXna_mby{KYjo7U{(?1nd{9Fx~3* zneMWD(kC~14MJWvpQfF)Y0xRvR+Ju9^{`viHd@*8;+H8GSNycNz_0^38!Hr1+Y z4_NaGo$QblXtXcsG1$NHQmu&bjH>7&-A4GqW0E}O7hA?x15t)2Q+OoGrB=6cp!P7W zt$=1X8;r$wU`+Ybv9;=Fq@jCTfKknbzympy+RD0lCsbJ8Pi+3#AJvb75`CoEP@D)K z5lCYc;sK{b=-w?3!lh5pF=Ak5wHY*gR2Vi8!051y#Dx~4pS&r-2pZNC+BngO0GrgG z=(cuEw?YZLAm1n>>n>;YYoclN?!3lw0k?7D>3h=kJua*<+5E7k` z9xkL1x-Y1K@pU-6tFZFfL=^XagIXfh&zw_fsP>zv)lHvXhdyI1aN#Xi4-Or4yfN@t z@@TA}#;Yln6?LKw9}a_h_FcFzJc6z}9bmQ4sB2xmNPg0Wb5$!~)?83KQmBRK_0NuL z%+h!sZkudg)DZ(?CbVTIPf(E#H(J;C2w%vagq{t%g}4(EQ9BWk0~ud99mRi~QkfCL4MEGjYLDQ+)(OPJ&mzVH3yA3dl;9MyQ|guqMv~AvD4!IE}FK?_bhjBAj8Gf9147N zCPm3E*^FXIy{9|917yc~$}e~|2O>*xp#Y1*!@Q@rR)9ck8IOMh2=kV zI$D4bjt`=?_`1EVP#u@6`lEgu?kLE1k#or8`30PN+-C>Ut&d7$T>n-VL+yheOC`6B zVJ!7)m}6+Sj>e7tB&?|yZibaG21c~+jC_K-Q!4W%QClzc?Kt^WMtB8egh@nFmxNVW zgI@@4UbrAlJB_*}S9HJ9zl321YGZ>)G4!s)X!iXE-yioU;)A{miblnT>e;Z1s;Q{2Jkv3 zc}>NUWn2TTu7RScJLo6HSv>TUr-Vz?4`u5kSCx@d4fdfkDrub^WcHmk$JDBr#m^n; z%FF*OB)z9+wcYO_Jh97>fW=P6nszzri~{`?jVrV6%<(rrsv?yRm}`J9mQum+%M>#& z+!`_O5FQ)fwxe?JaS16ifl)|x0TKe-d;8rk5xmhWs+&S1VJRNl`jZo`EA*Urwlq3vy>=T$6F%f(Y0ik6O|zgtzlx$^3k{)#4~I z6QlDr8EfOfx2f!Mr^EkFC`pSKvRUd48K5YB)T7gQh|CRg5zUPkl@H4_ncxc3j!iSRLYZ>=Lv_xs zr0+s%r(&v>YTq&uQd?e-t+>e8fwnE*{#JFF_imTuIIC1NFHPd-7Y=wN&`89hegeax zpLD1$cvAwNNpk>}m1p%`qFiv)T32V{$IZPli!!`5+c#pfNzbf!`B4Ji3CS%>@wO}N z3a@@#@GI8y+ZFLeBzJ>w9rQxuf2Zs>-gj~5M&}MBHC;#SE32pu z5J{Bv1Po9dvXiBGpP#np!P3M;{M7V;mGNZ|a|@XO)d7Lr6#_2L4%5w)AnK?A+)cgfeVKQaosua>@5GpH)BAf4 znxUqb9p+opj||izGy6>;4fZE~1LwcU&>zFqPm3sxCAPEa2KtnYNLaH+WGl9z_Swyr z5U1|7n$oR+FMZ9I7D?v0}6`ZfQ@x-tT z@GD}ycs3dMa~Bh5&-Io(xTjN)AiWV=iPc<4acc7KwY^HD6=sb!BlU})5s&8M;B)gV z96WrUO-kWZ#~mtkoSs+S7Obc(W&dp$83@}=@tQ!KAObHNxewod4c91N!l`);fC!XlkMpaI%T~Ukw3I0 z2BUxKpbw`^_O6<@vTn>@MaSuUUfbXs(b_N9I+NJGfhB1{)LHSvl|Kb`#lHgmSoQ2S zk|g$7Qd&*$#{y+Ttus+6Q!E+lCTh{YM7`(W7;;lUEVF)uW zM!f12bfw0Q^l9BSm(^jkCUNA=-?p3+h8xJHw&9Sg|Gie`xYgFN zRo&17n^(DIlM7#IMNaD8dn#9W1TaQBRldNRu`zzce$p*vLo?ZN=W1p&WZ58k)*vXa z(Dc*m=FV<*o)xs=jQ#D8=+C0hz!CONDHmW?VKu{j)Uqgg7OiHau%x62FD_0F!D1DI zp37;{-dsxXLhYhUcP)jq8ugO$#qob1tlxT{ffM?o4-t z20Mj_ZJ%cchj^b{9hhiKNn)*fl)40+k`kMn(J_dV%SRcF8J(&5S*t2GSL$br%IJ$_ zdv;|Lmz(kr8%ZAVEj6K#h1vuLqXd^w65l@U5P{Bm7g0wq^)>j%>Yko z4+U^_J1Sz@w_*YndChruzLE zw=2Bacjs(4&f_XLsWTu%=VzaqW-x?8yFElZY8y+Dw5){JPr$6jl z{z&vBl{6kgd0G)_;bP71Fy?th1O-J@L4!M&8L#f0ju_H2GRci+*Lc--lRsNS&G*bw z_H+=-0ZQZuVa&?M2r5qIFE*{>zsMzP34MM7{dd@gh6_L*>TcTiNz3Iw6Hpm99n0_1 zzyl7(&1VIJE|Wcec{x$`-KhzA;q=vxja(S~Q>x9_uwnf22*de?8Hb#C%ldnixqi3M z`5xkpJ4X*+4M}NVs+YjC~1UY zQ(yWM`)lvK4)_X>L&+@7%!|1~&dCp7;6&q6zb8k{3%oUpf9PSaIcs*w^sw=gX`cT2 zU2a%ousx)yNJRQ>+42bfNdmt9X4crNbKMu8g#VdzoVIR@H7o3 zN+f;MW=l&;E6vaO(!|++7w2_#HSkWPfNP;3eJ3)wv^mtBQ4tTz=}zKwct}JQV2@i1 z6kaf`Xf--wo(b%?o7Uy$mFBHhC|hgQ?qBo4ZBdJwD+W&i&C95p>VG+11nNNpNG|x& zSX|_QrHjU1b@B&e6O>-&=bw|%h7a^*G{yeB5nLP>SJIw)sw#at2o)cwy1oubYsDqB zsy@yveOL)d`)DJm9_iej%uCl{9Nn0RhKQtML9l6vCtz z8|B%b^}^Re7iz&Mumqzw$+5P>f0x`On~d{>M0XfDu+m?x;uFT&)b=;HQDRw6!8gD1 zW)if0E!ppW$3m!!3_`PseW{|d=+X5R(r%TKD9V; zij~UG?C9?>xHQV}9hKVwZ0W7MWXg4tY=@Fsw#mL+G6vRn^s`xeWySMY$RVFl|LyL* zR2k)P@!hVHgENgG)KTJXHt2q1TJu`l6UZauDN9hMmF6lL%zCQ*OWucj5ATL*^sm3D zPt-`1XqD^s`rVR#@pi{^q)+$GwJO{@}!6Me2m+eU$S0# z5r4he!kwc$uEM*0!g4gYY}cKyTWoo2jZ_AlTYVOaJmQEd=8TzXYL3|u@zoyGmi@oP zFcnKC<(&v?CcHJ1mIaD|(pas?aouI{d}i*m?yV}H_J4OO{%gMZqb0=z_-SoU^9shX zJ*x4ICGpDhWp93?*-un~jlYCI3;_+r@jmh3yP1N(j$Va%4(E${*g;^%^$u-Uri&Y} z;Y$9#lW)1EkC8?$>UHZdoM_9F(ba7&g+ORkg*gerHM8*{2PzQc2pUSKBnqGsSlwsx zRQZVM=zrtxJ>%Ls)3k928`)_C$YgXEp9YX3^HILgiJ3IFc}A9 zOp!6Y2u)&29EuD=5&|S_ibN+MBR~?uAV8MYkN^n~;P+&9XaD=#o!!a%?VUd#ko27M z;W_tn&V8P{T-P#=THW(VxOW%Q;g(qF}l8YNp2|14DOHMo#<8$mRJ~t`}<&=MPA>Zs?v@8y*gm`qM zb!~M|?*knWkJ zB2u%1n`a3i4Jf-j+i-n9Bo;s0%z(Pg;+Q`a|JyVDhwe4d5-$(=XgzDYe&*>+oVjV& zheKZ{sN}`QM?nvN!(=0l5@o`qS)0)o1NbSk1p(2dp5?RIM^5&QHwgmO?m=>8FE+jz z+SM7$dXA>q+qzW3aaVER0$6cW8Phiflq$tX>btm{1^Dj$^XMC;bi>9iOFFDKO#}#= z(01Y!YMKxapX;m5N!uxyd)qtaJC+V0;RJr78@=uw!O$KpPU3}0(UZq{+Lj4u8p%g( z#m*Mr-J&J{rTg!6?LShx0!|(m*aGs0I1RUs5`g8dw1xkGJ$Z)hzb*Iegu*S15APncg?=C-g|#b~p@9BPveh)yY_8mF8n3c)V3kX^*pa@vWTLHSgqP zuTlTbR6=9==2GnEr53Fc@c~=t!d^&;N{($4ZtK+AQ-X-KMY?BNpn!a{IphhLcCDJT zai-a;;oO`w$nV^^>})5+{0t7B0J_d_#aX(j`WF&9_-6p2*T;{FdrxE37NAl*-2wph zGGn7wY#sXshr}*`)O!uIG`W;m4E}DNMS5A7-210E_LE!u={6h!T6%Qt9_#xiu%6n( z%zKn@APX1jh~*xnK;~E`V=>7I@J|J&Iw#L4ZP=?QT|%p-Pah3}a+Y~Xe7cW-=e47X zg{x}`SG;4UbX^v6ywa%-2cPxqH(g0=iGXQ={^hR*``wz4h0{+0%rRtwa8h6Cdg1gh z>pD8@f{;QDn>IO;ULN`3&@bDKpHAIeH3C(~dUbU}buZUN9EVzyic?tO;Z@roD%fkk>M3x1(jR<1L73ud~R-@8zna@X2*iT%1X% zzdz&Mmal|OPEh}vy1Bi*D@u0lx=npOh>tGa%+^J98{j&9e7ZeQLJFI<-WlQu3$o)j z5a=zkkZ^q?L(q8pDggsMFM@p3PCGTFC6Fyw6=5 zjfCheD2dnegjoM+C>ezwl1XSM%QNV0SUf7cRL(3gUYlj0ZYmC1(|F zV*laL89YsHGuN@6+j@ZkZzzf>Mr<>Ek1=(~5Aoe8u)ZbwWS%a`RIjvb5qh5TqGPf= z^yq+lo6xZ3-e3NAL;X`61@whheF}SwMqL3t6Iw_gUQ=2)v&7Q1PjM(aS6uWLc9uJu zY9B%wAEC14Js!`NmSfr=7nIPVyrUirmyT8#pOz7SP^p;ISPatsaEN!(HYunrI5EFu zFQ^k{F*{s+2K!_;NybTh&sRRoW-8QAkd{&~Z*9*ubZw8rxF8xGsP?;5Jo4UuJ%j9j zj__;ig1=1Kn%RiOuGOIaEJM1&-UmM7=8NL-_9-uUnKP9uf_5=DBL>7_x*a$V1)-WF zu9GjwiScn6ODx36_e((d7)MKwn)ggn@F^^-|Z zHZ?ZJ(gYeXpyZ5qr*ljyqitg|C>I^|K6yG7;cKy5slnK~UGgG!?5PoNbjn!5dDZw6 zTTRb3F4Ezs=;g@K z1&LXY5{}uGwe07d5aq{c-JV#u57kZwFA0CxveEQ;v1R(T;XrIyXz8M9ueA>Sox4GZ z5kkXW&pSmUJVCg&2>%ym)Ho)}24m)gM$Qg8fZ34M4j{yhhMIqyVZ}wv!X2Akf_q)B z@8@?VQfg0+t1@QfSG$9>;xqJV|b9#76*KGR*C-YwN+QWK>!{0|Gm__t-YgPI^7Y}fIZhdp?>7%R* zYao7~e-NVJ{gP`EO92gFqrs5_=J(h@+;!}|(F+|kKQtyv%#m|$K?cJL^qu0@h8ySm zAL?-d~AFBidTbv;)Cs; z?K-|sK`p7pYIA@``PU5{7c--1g-hh==G7Pjtq+GX%+B{ckVU0S&c5xmPt4i8RC?Ja zr5Q9lR#JZ);s5538>$=U7buhjrnrY?Hq2bDoEMtF!pTm(T55fWvR)1QZ$+^GSl|El zrNfA8`!*3#+)GlgWN+L7?H#6R_r48=LCtl`FQpwxk|e30PjJw?S&Yj>Lk9_$#+#wn zy&1Hq*523NY7z>US*1Jv7JX4dt?R_zT ze}8&?G2~HlxeQ~G$y=BWt{p*k;tR_r?@K=%BHu(VUTygX^|7pEtnAf)o8v#>z(>Ws zB}e|O);W3U?Ezxzpz2#j!d?N~PcimDxXYrgIgoG1Y|*So3OXll09%KG_pK0_#AiNo zVIlHgvOuIeTZF|auE3OZ(md^5uWflSb%eMe<{7}oeRf}B%5^r;WJ1fWHe0QN zwzwV7KjEpT#0cb;p5bhWu*ZE(;!0@tGDPY_ePE@h0+#K6s|^q4=Izg>sR8szoWapS zGW4(PNWy#Id;T@Ce-F%`*+36P@}Mk}$AYQBhlb zO2^P>aJQuebhXJ+EtdgQMgqf;7m<14$nR2rXc}Hsf^3nG_^HvI<|%itw>|O|=vNS* z4?5Zm{&e$FtNLB2wZiNk0{rPbJDeAoZxCq{kuWJ4FaIGRgb&~?ZY{%6k2JFw!#24jK8qzSnYR7gBp5 z4OL_Mri5$%W)l^ow5%ngLfwR|GI!sdQ?DNExOK5D&Ehg|J={#HQDEis$v?z`ILAhw zqq5Aud0E#2DrH?R5FTd+X7`LC^Q>CBguNc*AjzaV;t_dpuA3#ojWI042t-yaBw%() zd`WO-I0UD9ziNfn%sRt|chI_v@txj+Iq&|eehuT<(Nuo1_T(y* zpCgO*&g@)$TfeUeTd#4>c^YE9w9la(oR{icWy)edenH$a&v0QZRTPhoPtU)i63v^s z`jzO5KO7ArryO0KTc*Bq{+FG>O$oyx?ClPM{=Gzy232^@qOwUnd`;m7Yc!}BQbGTC zkF*>Ky}sHK^~FGlViSWA*;5qIQ$26F0oxrYe4*vlP=m%4!tX0O1?}q zp6>?x<;AnPMU}BaaO6tZmSnAwKKXSr$!>4QWLWWr9)!%)qE+E6T43Fys?nl0ejWcl z0Xi_i9gH61bnCS{Tk*OlO%tYZgry{9fJDd0=j>oe@iWdu@#);z@%y#Kf1zblj;D&= z`rQuSa!2jCXx0D}#jw&bd%|+3Qa5X^dsM9+zRj~*`Ibs0Vbaf#5=7FNK5Fkr>gSJx z)WVE!cI3UEY!2QMxW=wl&30KP)-;c$z^qLBCJjaG61_i0NNX{2Hhrk3SO%+XaPTY@NKS^HN8fA8XsJ1)96=Vyz6Uoan$^R>rEl~T=c?ud*7l0s5hq5UTlu< zH08x7q=8jbUb617BYO_Koo-ii66KR+M%M~)4p)-O&BNN+G7vimNUmpakfrq|5`2Tc z!B$kW(xA!L{F5H9C5q}c@eRf9!5sIV`@^`=&sV$m5Gm)(z;mE1Wz+;IT!F z(%+N9f}87eA8MVQ!qRa^Z;j73yVT1lt-E={3ZBVYv&3@pHP9?v!MY`ct-GPEX-`bl zE9jHOlh4vQzDJm7GC>r37~Ti#z-0Rn@9*^#&z=V?8clb&vh8<$`-<~@M$F@XY_orY zGZI~kIZhAsOm>gpol=EtAJWo(~5H*p9mB9Jw~?P>D+Y=Z5YFV%I=$;I_6 zX8HS!FPt|^oTpl&_&nrBNR}e7SQNl(CF_@WZA!O z4J9mVz4PClc|A^QB?G{+h3MFoz&3=@)uE*Fi&1YwO3s0{18iCm5b%Wz9;cg;{Nc*~{O!c1OA>4o+*lMZJ)3AGB^STTd|KDi%Q|T)XNB1G_x(x2;o$`&XfKx37DBM7sv9g&V>aI$dg}Zl#g2;{w@+hMf+K?wQ zxNI35z>4LcT5nGjSP^h2Yj8_jh*UFlGzJQpR;I+joVX%kN1Tack(EuI~O>P5d ze1sitWb52F@W$>0Y`36k+dMwW&!BG=oi@i#2pzF&Relme*s$(CmMN((ip1}BIyz@w zi3LZ7FVw-8R_96KLNMT>eH?Y@ENxzdLTv+&DE?Ruz^t5KuF>F{zt(S-q64kS=9-L6 zJUpO^X&8mo8WQegJTmh^_1v3}stYMY#rX6tSU8)WVi+14H9MaF#s1a58Pav^xd!#y z8nB(Wb^h}UH*b_g_R=xQDk893En( z@SP6OnAgt1CeEelz=sL5O^(FNJ6V2iavbP*�^!8wqK*wpWJ-mF_)>@L`lz9^@k> z>5cqHPEO`=KD$?${+rF=?3Iq#wwn%DAH;5w_wa?K4e4vmzWNmh+NsfI;>`p?ci}82 z#z3rH-?h`ma5>YshzvFekTZ+FsT9DiUJS3L^b^sws`p?vJ<*u+Z`=pre7yu`0nGj&of);x~zvQ{C2U) z0JM#>ldMEpZOXVCfpM`$p`Wr=hJi{pe*N(b`=Ck@6+PY#x7nx;)8oDr6zkS1A-XYH z3ye1G_akP{jQ{kfUrnkKwvB#kTSkG!n*^HcI7F3fcp)t_N8={iEqed7h1IBW$(Oly zI4Kk4Na!&mJ>4y{{TlOyxzL>bhq^+tK``jOUyDoSu6rArmm%whHKa9GRi|QdyGFT< zeXLvKWmA@#$Zwj%n-81?IzU5ua!y=|H$1yf6d%#hTNH-#e<0wTN-UX4FAt5Icmx0D9SW%?qPI2hSeNgQMKlvzWK=>8kRq-{V_n z*hFJiZ%eNdS0AuDyP@Qumppsa>;2;&XrI70i?Po4=UL8MG|DvAMw)My6xuq<{5!il~Bi}I{4vFZ=$6Z z>(*7|NJE!Tx`P3A78{EPFCWC2GN}UtRNT?87P{l|&jB#;dgH4Zv9?+T9E6ydW?r83 zj1J|3xjbCtH+P0^$q&;4eG8SFeGW_t6x{< z(4o}46|D(x+_qAREmzFmotT(}g@4Z74|~NeKkiiymb=l^4S9OY+bJd<k92UMve!VTu$+jtB9^{3qcat;G*tdgk(da@OE@aZk#dJ&vo5 zGX>~Rf)sy|$uNv#APr$zu&~4}uP?TL%dG$3Kj-L^1MgnF=~eBo^V0SiUDqa4UP-rt zlUB~;YbEXO?3vp-*kxo)%1qt9B_>PMIZ+=DeKOH~jXgxlZF{{X=f0iz7S}r^65NYp zH}IysvuvupMA$BFBz>AB2({tCPH|PMbuwM-7$)k2)n4!_eK4kV0i!4&xUA37H#);D z#8&d@D9<$4&W;^Z=g9CkdsW6ZgcZC+HSf4)ftv$*QcNeXDw#9(DUpcHHU5k6G}n$xBzkeI5Cn zm%dZF4QuJbO0#O=@yRjYwQlVBNLLY-VO*aw zzh>_2`z)2^3|`+4#py0ZEwBt;*aNM(rzA0^E<(sv-2?=Wwl?LANO>0DxaVdkfcPi; z2&a!%qbBPzT!IN}rj1jr;*3R)8?r_7xaP%&E0G5Co}05<@nE{54kWkbCdp8*B_Twb z!$uP#edfhN!R0Pqr(FYO{9R!8r*2!-!7T~R3D9G|l*WE#jn}H3^DQkf@~7T3`SZ@! zPSTpmsouqSr{D&I1_8JVC_HUe&Qrx?C!^M?F^Hw+*j>=#m2L&p^6Vv%!a|pw_Ck-c zXM8~I&0eAWah)amj!F@1qp}wUi{(%8@e+B~VUJN~DOo7LrLHvTV()MAw~WOFm=|AW zQr_QUg?d%C@BF-zy5a;dGTZ&U(~!7bHk{KRs#k#0EAOvkT>}Vdn=RO!O2yg<?U*l&>^;~slzlWF$Bp$fA->(|`gEk+leP-5Vd zX^$0m%&t41c{foZ-f7g(|S2v$RjJ%QufjpcGELFe7m?`XVbRTGP~|H@}po6@imZL-<6^xZ}e(i zLlDbEW}H&!q@tcmAEw=mTdk0LirYbEYlQtW?Fz8`60=ABd2n&RXT;;cdU;f=$XdlKqdSG|mThlNwdE=Li2b6As*cCHOf*Y~eHJ9+| z1xY`%yB$FIW268Q$<{>6uCr4vUX6?hB8Crkm$!cPxmXD&+Ec_l0vgAvj;W?3WYl)P zT&~M#;?V1o(E9ktj9GGn@%5`%gfG6bQ19GO%yvszi2KvwAkamt>Ja*ps@{Ah=F`WF z$Vh$t$Z!qRUIP-9AbF;uzoAr#@WSfwIcI{)wcgd@$A^gi(hviL;hT11cYcfq@xl2C z5_!zg9)V!+5d3{7d|7gzzc2u`8|)Wgfx|4iQJuqNbdjfgC3J9gmgC&rk81~Tk^A?EYlE1Yzt$tmk* zgFpV^?3aJ@u>bJ$@SBjc+l_O&&@|&2j@{N^^-ETQXsZbXgq$o7qr=(jN%erc9h5 zJ4KC`f5E_6oLa$FpQj9(S}e+GaEasd8rFLCmwRi_{U%8B%_4>Z^CO;-2>`SiVLvZ^ zv&s8Ny79tcqpqv>hL`#w!Aav6%i5vA0U%P5PI9BBP38u;D0b#aRIE*=>-F&Iyq9rdrom>O%|(n*>wX@TnKt9?8u zt#;8W>>rFVS($%JJKdb2stddqJhwWOk&zj#Q{dLre`vjbiW5#6O{UtO5+10I?k?6s zE|JVXL41-HI^;IkfAZa$KqdyYfc-~YB*2@y{(^8j1Y9f< zhe9VG%wvhGa!`KS`YP4~;Hi)zQ&0_rW}lGSgF+Vxgt#b*k0B=Ji4wqg^MG|c?37yW z?s8(pLz(%KpXs{#$N^2NczO$rrdjVKu8{pkZPd<12QB`r*wMJPjC@p#I|~k;XgiKz zicr(_rYz>$RZxNR%SP;T_*WT$TxZDUY#2wWm*cfBSSMCM9rhQ~3Jtj70)~mhteI*0 zgm#6=f`}lukbN|uesR!<5 zQSPl>Y0qZ4+<8g0BO&XCkvx zR2FyL+X zg8{Lw)?o<14}YS+zZ5U8h7suIdZ9o_XuYifKz@$ed);MiKnU0d^A-taqqLGm)b>(9 zZy8Higm-dcEkrgIlJcaoEdUic4*2MBlR$=kB%@>ODpb|d(n3|USK~5xuy8(}Y4W7a z@>eLWd%&+yT2)3QQe$n8yGP-rjnCP39r-aOpfkiUsa6(WnCT?5FiXK$&x8)Oc_-j- zq0i%MwEUEI2Oqyf@IsUO4NJ%=y3`ZPdzd|J*e2rp!Y#?g+r2N4kMsu7rrMbt=_y4z z0PGw5aAmi zp{F^JSAXABe{WP%<-kc#`;PP$Envr->69hx<_&w<^oF$z%!9odEbMMz0v@-#mz>;g z`qe=&=SgQn2D{IkL&|ogx^*dU(z95kz5U8p5qMbiLU8gw#d^_Znto!{$lyD} zg+Khgnf?=weRR?#`M}Jw(#tp>5O@ZoKT_#1}z{d)gRub{q z^-~34J4Rbc#C7V85-9)65$jEW5stp~7)ixIFPrUwBQghf)m|kLrCI8FSTejo85#3N9nb_HDa}*Ahn~H1~M8>CV zxFXBu6`E@E#7Ja1t&o}SvzAyBzc}4{$7F(m?q>TvUi;B%uq@pz6?K%EQ3>z~f2Pc$ zb{y>iz5RQa|GPcxBCa4(?(HS;-`H`5b z!j{mVZ|<0%l@2iF6^|bmL`EX@4Oo`K99g8vDX4d8_UVHpml)B`F~rmDgk@LbA(WdS z0V_b{5fbg`xRdWD`fG$M4=Bdv1z(OxZr@#@_NJ6;AkuWUZliS1VO4XM418s!w{@#WE{juNKeK_>7t5*C5c37aj*w&FW zD($9?iOfxf{!z=mYfYH=sud6OoTOlr%<(bLJuiyeX`F2``TRA$(0fb}o7TC}?7BnU z5<6>e%GmCQt49}mRxgJ!cVPAMxkMR{lUPy3EP@+y(xJ6esT*z=pE5bc$?m%WlWqcp zSE^SHW5>F%(wyrV!q2T>NOy-S(1tT*Puudt&i1M1pZj#$A|G;%YEj^9aF$wIQU|%7 zezWA;p0P23bL1^(f1NA+|0gLPU~w!3dpQ(q{xUJMf5iL6)IRa+G`%4kd(e0?@uZq{ z25iAr1gIx$ev1!RZOeJiW0)?op#}g+iJ{i<)V_u4vzur(vLIdQ@?0LYo5Zf{S>U~T z+YH9Hv?xHIRx~&jTQcTt=+>{xGCFG1EAf(N;X1qHa~}@9`*6r&ua`#mHbA6RUvc*g z*n@@sNOIux`qWxIej*6EErneA`?t=2AI+?}$R3Z4G^7Ds_}{&JHQZMH5S^VVHj7k? zG@T0_932>Gb4-!8$6;CdBKG&s?Yyl3_cgUKny1D+^D?Rh{^zYKCgE`v6Q}6~JJsyz z3A7ncgX?!;DjO* z0eFe?xgC7d^>K;CX&}N&zj7sYY_k2_RY^4l{aM;!__Nf4rN%Eu2j4MAO*~i&gD*%$ z-&L}2e$PWy_uECfZ`%8l{REmc$IG>&HK;ATN-S<6xL^SJ;~tuJj|G39@-r)%)#Gq) zJ_cUj)Fz*GNfn&xk#lJbo8AHkvDLQf*E6mFDSMgzK8a;-XxEA#uX6B)XBfL!b>;Vi%Ea^JJ$H0hLUMl6EWdCdTgDvsSJTC%!I~(n?GO&`nNXI+I(;KjDo$ zvo2`uQI%q_$Rt6<*B2%=`&#K$@h`wMJy%S!*)jtVFCS07Q6Cj=$&3mpX!fu~Gl8Oj zG|s^OYuz(xqg$vMmlTH2x3&vglb#G#-p-}4*+3DYqcZ=ju~2yi4Zr_*yAoD>almWU zMd?Ez1lPX~TS^WNFKUrA6c?ZG*x+Q6_^Ln@2!M%n$!=mBm_dYZoDiGB3L_1-E&YURtW|oyp~(Ov3PAUjpHHe6SRPUq6BP?>W4xubPxlVA zAZqk$WTO`I&^cz8&y)ireV~8*#Gl^%>9?-w$C!gJvi?vA<|frza*0(MN)Gj15M&v$ z4TmY?6&ZPSCjWXF462YXX&02=-4YWSrFa#-ZD5nGAa!eu_Q>YM6X{OX&KJwwa%JJ} zW^F~X4)1<7Z$6QRUj$KMh3U6oBqAaG?l{aebuewXO39;R{fSOrfR_-paLnTZ6VC-$ z@o_{8*WQALqizC15bQ6Val^3oh~j zrP(=ZYZ2QdH%sfmcDqUuJO2jT-n56J*)ATD|E;ln3#Qe>|qQC)w(aG~>NnVJ&1!}47 z^7YgCIFM6KjzL*PvVT(x7Ba2xDj1u*_P4Cu>7zRtI}_b zbM<`F8d;?6!Ds@9jq+83T8?~DuRPP8{bTmfsNv;;{ZgZ3ATkzXR}m{4*_%1Kx|@=! zrN!OS=&LVOznim^~MNw2;KYyf#1X8O#3EceZttoG~@4rXkR~ zBirmyhSvseR`y&R!%{c?mnr|V9s2#9jUS}nsW>}j<$$*$p8-L_mt978+UM8p!sm(F z2V+ZF(?a_rw^l;x!%TMO)gc!}AIJLMz;)C~xtl+{9O~96kfm&>xa6mFtUJ@Hz;sH~ zdE-t-)M&@#bSJS$cVqy9N@ysKaR@=u|6GdpdEXmGOL+h^Zgcrq=Z@wHXQy~NbuNmJ zq0+F~fP8RTS)5-phe}S-)1P4@^AW_0i*4y_Q3x*mjUZOkL}?*@!iP=`<0(-Wu$5f7(XA`>sz|59lSfta4e(wELkT zN^pkL*6foS=i!IqeOjLOEPN=e*^PJn08kk*a5-Cx%Q3?j!0bd%384OznSA}!}wEtg~;Z0nrYF&jMgcEM=Cd`akN zP%(ik&5wBj6^CXQS_~enU9J~VMtm(GT7F=#I3BDGk(ghlgzR748Z9fpMvExwAEUnT zi|d_KYn?qp0FO72aFE9GlV9Sus>Tm=?Q4o2rDSES>r~>mdun}bUCuR<{trY%Y^L zY#)H(a~JR?Oq1^DC)?Q`!$t>948LG;cEGi_rppE`EC5u8&8U zfNOA{lED!}#(_b^^fzq}jAFl?=yv=)g6FYjMkz_om%B#up6q6iy#=-;SKLEqnb&*!`lJ5vo2{wR z^1~uLl_0n?oJtkIX!dG3VY!4U$)=y zLcw{>>5lp!n0Pd57y~CROZQN-@9T?#|#mxS#C2H9IG5!%{^DwkI;x zlj7LdZayF)itdJzuv|0-6|RbpC7Q8kVB`)`=0Qlt`)PJlFJp0PpNXEdCw(|{Edjig zHVTEzM{Iz?%F>}{TU2AZzp2I5xy3l_Ks^1S^5;l-N^1~CXJt-t$FzH%c5OV^z!@yR zF-^*-we;8?V{S~v^`3R2N*l*oPw8fGb6|0WBlN(6pG51{3d!C0R*TZ)-=i<3MX9jn zU#+m==86x8fNoC059t_kwBNbF>-ezudiCfJhxmTxdwGjBkzCZm1jQpze=gxlrmyy9 zb@*s#*0T-k^$y>p?@Qkz${&hh*hM?938!sxn~r&aBBZ#!$u-E0G9Jb+qon-hKQE@8 z`8|17&?fH$TdoJ)s5PE-Qb&PjjS@!fYZbw?#6V?YUDKzWIibv=O;D71Lq3nw91#gh z>rAOPTZ}_^e-d#JU8JBOJy?vw@c``HLCg8XF=809YjUk}+6e~X`wJkC-!H?7N;69s zOmePV_e8w`C`A5*;d-2}cRO|R`wO-*;~ksuKE_6qi&Mu&Z)Rvh>!i%5_8l~O5Cfb4 zTHJ-j+9sQ%7zhFj&ch>yq#}fKgl4oLw6Vy;Zqs}dxl$b_b`6RjgiOyY6XvZe`Aa=) zF52K5=^(yCFrB%fbH5PH7qo<%)8r9JwQY_yr3VuilMe>luVfXkm%5T9cN1#qYgz}d zg{{FuJ3PBp$78Gz_tCV-MaPUGThOts#$J=KjycZI=rG#gx|>xA`PaN&y}|XHWx0Lp zHLkI7sdnCnp${YaMK4Yd=A1A=={nH9_bmm=>V)(pbYxPcn`mt#WhfTjvv`jJ<9x&T zt9GaY(1eLQa-aL37vmp%KRnO*msGELPDa|f{`4a)%?o|BxrGDkIVZ;2`7yLl@Rpdc z|0G^Fpra1@X;63M!aML=u*p z1PS*s*^g@9Rpt^rAg(HB;2B1vbu0VNnXCiJn)HG4?6j%8J=6=VYqtlTlsD;>LLSTC zzX8)SzZ1BsJAkn1ry4iE?c2)m`vlM?nlHB(OTo}O-@bQbH1@4GG35*r(Bp19<#gM z{)0+WD4Pjw1Yg!=6%W$}H-9JV-S~{UE1g%xq}Z)1cshaL6&vX^fZM;7^r%;&u~|u{ zsrgfk8Eq)TYqsxQ!($zdDyzctLcgE1j8U)cawneQZEeodWKH$Yo0<6BFRGw_`R%Kx z&24qGZA;Iph;}^kbK2xZL1Elq$XlCiVm2J;PJRk)^`kFp!x|cm)Xi+*)LIr{e}Hxpl$G@W7JffE)?XHs8 z?U63U@?9f0I~Y@2o}NClx~D_|3H@0kM}}(K@J-7uK(3-`sL&$(c3VPQCu+U5SJZP6 zzOdQ*k`W#jAP`3dfqeO~3qOPJx10IiA!S__@|C|Fp7ELD&XN0LYB2RIVU0TQ8i7_% zlI8<5X2>m$H2~wg@9N=82e?oUge#A-A8ObB96YtCDgw1lpEAJw4mqO++0xQM7Q zlW(@4%U0j&RZKAiT}{1qoMLoLeL=WR0P|K3DEWM{{cZf_mczKa0U6b@x-(V!jvD&P z(uR@2>8KxJly0_SZf5%LM{l;qM~S`FkG}V)jwC4#a_Q%ckTkCq}7Ls zE?*#T4bx7{vtt#-D?FcZku>xo!$^j;&dpEcJ8(7vsl}ESzZZ~PLi3iAH@a7eXOoqw zd(CA}2KH5(0VX$t0L(oTleL^xjWvO8x~2T-!$NIQ_hk@W7NTX}O6#g3-v7nl?a zQL7h?$qK_Xo{n7(M?y{2*#~54cc={u|G=AP>%To84#^X=INa;WWG!W$xrY3C13IE- zP5Ry*^DLM=+b>iXs0AlQdKpyv^yj<(s zRxbg)ksQgX-gW0M*7XEQogWU05LD$k#vt@QE4Dl$UNWpKH@@B z5YKi%KC43F?TDz(*b}&ZBnfv9ZH&?}7>9c9tLgx)Us2bZJ;%N-#m$RJCpgZ9BW0uA zqp2+>AvmZfARD;_(Y2gvAPy*~y~nrTO6f-)O>X<>OG2`apZtbH&i*d4orqtFYjaq7 zAa*P-ismGx+J1sBO3*s;{^ml$WuqdrqpwjpNUdjgZ)8_Fvb(XEvI<^)D-@Nm)O83Q zTMed}3?x3cYM&ctp+4L|xjuMl5PNo6j@GS}yUezy`m=n>_6RUj%ohllus({m#GSpq zoz^&>p%1IO{*?pn?f-=-|KC6LcdHVMJ@@xMwtHD?2hJ&GP2z3d1!{wDc}8yZc$&&b zYk^8XfQ$#Zk-HjFkU5$~lfg$i_A|>uOj+A$s-(Q)%$f0Ae&~yZNxljqM#;rrPD@cj zA;sEbd!`e|06Y@qK8i{!(|3*V7`!|UE!Hs&$sr*U3dOB@<|l?{gER~RnXXzoTxtBx zjI!o~&vL6+fz0rD#fHg%*}{5CFA0Mpk2V$B{()1+V4?UlXQ+ck1M2x(#=rycgArL7joT{Wns z&d$&mWgjon_mlGgGP|eZ5rHo|dmZ(sf+sU2AEnIl%&6ii*VP8hO9&HTmMlYsCN!UzU?6ppI&2Z;d$eDTLci$Ii|sRTXeT`t{62Lkufi z{F_)nZxE3I2{a(ar9%6{MXYY*_Nbd7ng!_12Nvj;%(48&%|U&H1LJvRPgZmnntn z>!T=W`CA~b7ga^o98ij$o%qcvYSp5xu+9VI4u3APOxrtCVxv&30DFUg zuU!u}Y@%^1$-84la*d#@0*}^IL$|A43~L;ENO`hq!Y}u;>zM8Yy~4G=+xw8vdBFvQ z$tGfVhJDI&VWv9&ZE}KZ^yO70V_81~KS1%VycgI%^3viniwknYRi?ZAn<0!QMKY zE3xI=@q)~p;L_+!exEQxxSQY#?#rJ`32OdERA>-K!-> zl2SAP0ZFe&>y2oagjpBS%8zrMT}yL!F))MWdvv)1Wu8Om6j%Tt!@8r%aF`sED665w zvG!3D2=@x!K_wFN(%x564v%Fu?v}acN^lA7O9hcubuecBi&!?$&tX+yd}UH}FfW>* zbDIW|Dg9BjxZ8U{jxl!YTvX$5A-)wSsw{|19~;B#DvMU^y#~aji#sV=l~RHSQ@#)K zoWQt$@36(O``2G@f#3S4qC7hi55UJp;#+`}SSbJ^^ZT{EL=ecpWBKl~NPX+tA{7bh zS}5kN*0LGJ^g)e^dC!yOX`0Nj-O36ZEIlFr0)m;&Nb4TV7i5`#vTu^$Hy^b4(Z0*B ztpSjoA>4jldXYnJ!vzNJ%|+Q2Z;!-vuAn05OFV#*_=CSDJ0JCsa(^d8W*IT-fw3dQg=&ef*B|J-D#vNOll?FG~dt_c9%X@B0Q44i!@`VO@pROA5zV= zJbHpmj_i-u&sfdj2?eBmNepUtO0yVbWwn!yj`f5BunsLND+@Xj{+~IL272CU;cm+1 zImNXz<1MW#PP9}W_u;55Y}tapWMQ<|s*aL-HUH;0>DiY2&QVt6piF^Y>Cs4H3EUx0|oL2;~kl;JHT!T zzNHvh9AN2#=S}BUAEvv`ghea-z@sHo7gU$KCRQ_pcz6%%EezW$?87dx+fQh0v}@uw zt~_*YQxLwvcm=nz-VasOjLkw@Fz@2nh#AMVO&w;#SVP{O>WF9Gyj=88m(X9XR5tG5 zpMKay&UMP%vmg-CL$e`!jRva7rLk_8@2z(EGhOOK8jtrUZcV8`PX>lxmtmVk!DUB- zd`F9Sj=THn46R&S>BF29(qsM15iFU~-GAAya+>A(#|zXcU*J;ha&!PT-ntgG46D!d zyFEBEQ}&(>eBGzvC30w%t69U{sAQIv%)@t74HCzGzzu0*Vh2b3#im%pr#D+m zQ}!GCL>W+gCa~cwlZ@=FAPFsoc13Ze?c~TQl`W8pWY4Z|*mM7u@re*D)UtKI&OuhU z$<+;Cc5rcZ#Fi@E1uEuCXjjxB8--T6czfk)vQpC7HzVDwXl{5n28RAvG_Ii@Xs9e2 z(M3Hi*%>khzY_0P^dtKA7}sC3HN35KLMzH{q{@~J-_3^vy#O-eu9V{idS2&m!~EJB zuqV^)-D^H-hgl`B31EwiAC`dhst%`A-o;Un$-rlb@y!nULaX^iwn#@7|?-%x?< zghla7b@AO3fBnnrtw}2G^>?SDV@}OfWm@zcecBdp_f6+uUA*w(Y{aB$aY)Hl z))nlAb*@3js3Yq1p-TUU7jf#l7RbKVg_FGRORlP5J4K)_X zzuAzq3dG_KHd;9W>#OC`Y|-O4u7!Z0WRF4Gh6z+AW(oDF zLC~0K6Z7D6KxDN`czCL(Pe9r0c+A(ofA@X*L&l#$MjO0@)ot%>nYl$0v4=+DpDD9y zH%9F-rZ{BOZ(I7`7g>b#mRR1-k2>FV;irY*V(1C~C94L779I5`&28TH8yMAa1~!$B zDXnnvYkxrCXWf`MpNOIS}kn-9+6t_+ms_E1D#Z}HNI)0%(zSlwUKaimTQV? z;CvUGzgL;XXv#-d810ZKpz zKnMVmL9DHxM+%r|4aZqVK+B09AY}yj0|XQoE@y=G8fh*W3DzW3*8GX)4AZ|E}9FhHx zxMBMJ>K`k^|08aA3g9aaFz9>#BW}3ZBKRL12-ra|wt#VYc5;VlK@6-c7tq%Ub@a+V;Hp>o;O_x+B6`gNj5VP+7i1sKVr^?1~@ER2n62C4k0W>bveeTzu6d%v3@_dJ|dP}f?#>K0-bO6ePSuNR~ZZWUd zJ%2oC)2~hU+=ZPAG057HRQWvqFTZenBPsm`$Q5Fsj;4wF<+bbk{j)x&Co0R#FZwgCS19rw)mIo5_ThY0|}EE;}JV z`WUOY+04hiMYpWp-OV#WyY49v3=mLW11KN_T1&;3%O4N2-qdz0>A!o#$R|PSW9nln zpSiLQal9~zqSed$0?$ChlHSHec$8UOu}yHD0qEO1!pd?KJe|-@I_!QWpA%|pJ}DIT zCe~P3+>BHfDT^c{e^Dh2fU_tv$-INAbRewI0IH}SDGh(izVgP=tu0NF&BzHiHt`JB z8I$>tG!%0-7+VXigURMHiQOFh3Ay{HFtQK-%Ez>V~X1vO&ZVQ|h(`g!V z+v^(D$51d`8DB?E5*-g4n2bFobDxi5BMF0~1XCv`6Cnr7X~I!c6G#s39cJ7eJ||tt zLA5MNhge<Qkr3u+tyeHlZ!O)qndhAKd>16@K#E)tQ6&{p;ViaZ6isz&RFgm8O!z-U;m6mU^4EBf17ScB$NaCop6qXh8kTx{Mz{y-Qr$ z-(v8(|4Nl*cyt?_X=!1NmB(2d+YOctV&JKJ*ln7Jh*TAY>6$R*)!^kgA#=i z(`e;_4!DT0uoImfp||@Izo;kAB4kP|^OM?|WZ|LNdyBWm^5cjT$Ki($;tB&JLtY?? zN2<(6>NnV1h(*8Pg(b7F;{ez9^mzac<=};hT zl@G<8Rt*_01QsRijcnw^Bx%R3u2;GwMOIAH!`C}QaY+aB1edOKgs=usxH&>HNyEv) zytf zg~#u^9IQZOqaT9aDs>koISr43c!>)+RX(s_=MTF&Bm#CDrr-Lz$Z)9-IPreM5|}Zy zQbk5a=wn4e)F12GPH^cSL@_$6=qq&;=$2E$uLatiq*u3P)QgNp+2G&E23T42LWIsiM7 z5(+z%>iTR8$JD^ia4ko-ovyuf6#Nj=yqK&oe9JB|XA>{EZ-jN%aCi15k4aWW$1gs5#mZ4wCsxAJrT<&_^|gU%4pYqnaZpq88X& z1w8ZundJyJy?aTTSuJ-?fT##IBDAkfetRaupSIEOC%7nx z8lAU$L%wg5;SYj4(A|*9WDhcM+9)aw76C}VGIS*{A(P&?=I2C>#j;@$ffFXjTYOyN z7Y|%JGu%d;24S)WVkv z$XBIT3_Q*O}=xy_U`%bM_10O{M1 z7i_9H&6*xMeNbFLe3$cX@Zl&P z`;U2-o!dSsvFFyiJm@yCik15hxci-M4tII8Q-L}BF}gAV@>k*S*zW&pi~ht^?V~=r zWjTlb!QTwJ&zrPjlpE8KJK+KlH-hNZa7G3AHFIWLr7<2GCals{UsFB0|D010*1j_Z z?jh;=9O=DUtV~vx%Z$Evz+@3B5h}feqEOZ=MLBj*q=kV+v%ZraIdTMYsXnO3*Bs)B zWVWJK$7rV1jhfiU*XQFVJX2;JRofNDD{}GDM9`o(P<)GU2Xt+-jK|a2*jTOIzB}VU zIa{`vDGFM8-Mis`Xg(xE!X~_MK4t&_*d7*Mj{OtGZs^9}N0=bHl)YC~Xs!58=Z`E6r zg@npNHg`Jmx&GfSC12^B3X)!!>Kxx#N|ybBuG>$r`BDX|VUOBbrtj13J-pty>amK) zOMd30qTT~aLqTVkfE#~xbdx}{iVeQB9YQV_&5szS1psp_ZF>0dUJ>!xREkm7up-LI z_r~bt#_H&lvx#}BGZfABE(P*}eVAlkgYWZS7x9nHwQ=CO(dfj@)M^mMH!HtoJSH({ z37VX`6NLk3!G}#gqwoH5&^!TGnXa-!#*H!KH@Dxgq5)>V0X@_M1uCzGBKDrcH7dW^Y+f-H{8%%vxB8t= z)g9*7Q(T6)U`^1qR2dRn*n5>)m|Hs+fG3rJbcSIwfyFZDDLCgozac&uxw7=iKRUJM zhizt(WvjC#W2RLPwDhvTG?6Ln@Qw}Bbg=UAK@%%I&&Q7*i3sWq3a2psN~V%`KVGWl z7a@&w@~9omnl_LJs_B+^uCD3-qMX zLn%=cI+uEr8+68_q{`1!i0b&#Hk;{cKiaSxJSkK;p|}D6vbCW(Gt$gIbRd2kyt@}N zpF-_5XwV`s9Zt;;r3#n@CHXKYZ|q=m1pm|=EQwq#ovrk$aT0QGcKCH*y_%x|-dq9B zY%J}3hHI5?FS{z@HUD(tG$@llx!ldYJ^WNLgXmJ#6F~1H!WO5wqd$#nn~jOzk(K+dPp{iVyt(? zs{h5ljnx zowjy!Kc1V#`OYw$eds+?!v!Kaw^{;%A~>OV@<{uFZN-sITzJRmTE`s`a8#jXgNFq1 zaHlC=QQ^szI}Zkmr*BTiA8M?Gly(J`<^wCbLExYrphIP6KP=JA2*@V^1Tk23BTC0MXgiq|$dU%(ksyE5 zmGM>FFRA%@8H4MfP5WULAan{YDBq|QC!af~w%{iKN1{5>d4cARHvo3k%)_!poo8(6 z?va6wIdHQTPXwlA&7vMt?yhHQ$ETlGNZTSHGlIuf^_1&-T} zs5O9T2(#7^&DLsG1EYGs-G<@O7IRqOy^E1VT?d>tj- z^jgx!t1z+BzwMiF+ZS8nn%IS)ep2a^ZPSP%Q3yFefdEgAzRU=wA(b*8wQP#^JlOU4 z)a5!W_)S^4l9-LsT>$7o5f;mcH(@9 zK2gp^GIU$?_O0xSuH2pGx74yXRUdXeN`xSm3(ihd-|tCiXqj{NEfPz`gvZ+tx7@k? zXEF%w%kfJrySR3&x77ApcCVzehz-Ny+r07IbFYrY>nEwQd0_Nh8(3piUj%3f$}wAVAgBrJnV4OkD4^c{Jw||5 zn>=wjlAn=s^A^e<7eN51M%r**Tg6W2|pxcjGwmR`SY(H z4_qsOqW7svTPE+>E3=i|OLVumc?peQ6~g@ajTV^9ntnGOJmoZ4QXT$Apck7Uhxicf zp)tIqksGO(2NZY4e&VMPsy^%5zJw;0x=cbrBaj~A6uNJ9(9=>P@X4({IIH6)>OwMk z^s!|Pt)FY*Jlv08+=M%5O#3xY_s@mk(o2HLVV7Pfh~OB1vlEiO(bM^zDDyMi&QVO} zRxS_pGtw|@QI;+Zm06b6dUg38w5iAoi0dt8f4E2fJsSOAY98*n>%6|nmZRsK{+1a@Cg=ESJ&t}6 ze=<=K7nhqnX@LGE9-a;*NZ!FBV{UWfnkxK-&egWB?A8_H6P!Hro4u^_&ExNxtr<^J z^@ae%!bZWuP>+z}y%13v#SR^GE3}S*Dk*K6nx326Bxplv)a3l6& zUxJN7}~P-b$O)_5e`F4*~T zuRpw>WnoTYu}lnINW8w+q=1N70UJ0;y#KTJ9T6(4bJK8i2B$cX0N+psk>N|xb^Jot6D zM*>3R`Zt!daV2EJqQ|_^0<&C-Mo)+31=|_0$C^*Y+h*6x;sSvrX(~hlv+@bdE2>|7 zBP>@@7){X^CNpwi_I%Uh+&5mm>eG)U0!cU%WFT}MbP#;+vwc*e7Db3XawHjr)De_F7WDTD`}m&KF4mYm4p%P-7$m$UFO@z8b(gehBV!c59I^yZfJTpDf95;oJgsoE!551Iq=P|lG_WxfvMcy%IFB4 z8_mPLB%e#uRJc}tCQLiOH$Dwv!<-Q&r}r96=yWmxsl$G8k+yqqGs0suihyO_V?`a+ zBkI9=$6PkZU1046gL-q19@codJl7Anp;UO!WR--%S+&{qRX8sCRsErxj5#jSVZ@*|2WqJ*$%(qU?< zvTjZBD-}<0Vt?3m97z?mSh#*VYpL6dr-@BUfdI2j=410xxs|;R+_bHy|&YIHhFTWVBwyB-kQ%B_efzAdyR}LlNL_Y zTL^F|L+sP{>bwYQ`_5%nbYq6m6;@LyR4X_tlkQsSwa`cYHr4b5hlCkH*!!|HCmkp}%oBH0CtGVaSuYxZP6p0Dd zX41NFGebuxc7!q=S@thC>wb?1`otEv0NZSmUzl^v=Qcp)7v&j~zf7c~zm$b(p|e_+ zy!OSS>h9kqzAkC)nCFBsrra^zwuMyqTMKT>(LoR3lEiM|DaJ}&yQN1*uR#+O1=BPi zTexeSErD%9&yR7V!D!L>u1*i2)>h!$kPfu*9@zmO6^gQ)xv($uKw*x^bM0YXiQ=3J zpsuk)Z7?TUkZXUUol$>HY2RF7L*NdrMrDzAZmu?>pU7yRwhX&v}dtM z!5T#VO->&%rd;)fhlbiK5=hQC^%Hi&sh6_9WG zZBGwi_8IiTw`5U#bK(lV=)L1?eLqfOr#0&w?0ff#AOun%jyt_&u2JNVMregTe;f0{ zJmA%YcsUN{#%=O@a_Ec|;Dpj$2`T$#aOE%w9t;W{F(nPeR>TYOtwkmtgVPgKi8VCn z<29qT_3;WK``z^Q`U$AJfE|Ga;F{;5K|Rr$AJb)>kLcfhmHxNiw8{Tv3o~+sHI5_j zmHSOr16w&F!e*c&x8)fuE5D5uyFOJ4L0146*Fhpm%Wkg)qwkEc>8E0gGuLVxgZ$qp z*l?tiIl~U+T*e?XlJS-vUI-7r#Ae{;9e}q2mf4=3^(=vjb4nXqJBZQtkr@rRIMe<$eBvpXi@fR@qZjwAZ9{K#^p(Bc zN>+1*^ri0Ol}Y&02&Vz}jRowTfq~`Abx=A}Q?X<<(~`RL7%nofA#`xg zoo|N1Ev@{1l!4D~sxOSZPO6%KQpMIkOmFLrESX|wE5u_huAe_->h_mW*9AmmGJO)| z{anWD5W0j8uZIyWqEkS$fL|xC??5{@h>rW~D!^~!!(7Xf0OR+KR(jF>j2C6?OH?ya ztU(1o^x2}(9yH0x>?d}-v#obw(LwwQ`(j1CyLTDmQJd8~^HE1%C~!FZGh<+x(z=^U zG#b3#N90bdz;vNTE!fQ<+!8(t3hKMleKPU-Z#ZH=Em`M@LOrhK;a{)wrV%7ygZ1nULNT!I1a?-Y{7r+qE<&9@s1}eg&%f>&L<)ZL&3c&vm9{!x%Cgb7;-2i zz1Q}zwjd_B#h`DwA^){~sr09N5h2)pxjGiXbubQA;{Mmtgh|6%1-*_VLFKlk7%+|c z+-eZtiqYobiJK|%;EzQA z_IS{zJ+UC?dfJa_8w38>L0*#2eWy^&PcG0XhAA3ozBIeEbC1QRZn=s^Q7vb94 zU;v6!7I0kxQN1AEFS1PRpH@6Ia;<*C&D`_1b*Ldo3saucyHXQ98A+sV4Yk8eAnU6| z`w}aon7Y>vshGb?{qt63dw)V;NrOXkwrN|Vi5#{j%K2;}h6T5G91ugJx5O#~oo ztkwYkveyJEE;&BKk{=hW8c12&2KhCP*hj1XwTJ$1`gEVqtX9r8bz@xD59>IgTg`8& zPVr3HA}4ZdbXG6wtgl%daUV2Ab!%o$tSo)bAcMUR1rLTkaLG-mjuVA1H6}SNypdHL zxVrNd*N@sntK!maCrph-(8R}ML)tG>CIb72G_#;^kZDbuu*$S+$5qm@%(U*C zSwVD+lH{V0HYw{)IVGc4e(T}HK<)aK%#qf=wVq0|Emc%5n|tw(ZkdJZbML2&1jocL z3~tgYb#dfi?p@YTHE>z5euzf`xv;XShV36d)Wx9}qQz?>SWRx42w*c{Z_GMi+t@q8 zq5&{j+6un=cy-K+AX^g92<16~p+gBlW5PPi%`qJmebjJXf&qykV0xSlE5qa%#>LD? zR^Byzx&RlGA_mMUw9hR)-5P$gw*O=Kwk@Z+wfMl^x<9ER!)AXW25nuTlaL-=_;?Hu zlFlPKj;WhnGJVF4XfaRCSLED6kWrtXy4N#)n0mURV0#uAzr7I(+Ze~ymYQFFv*-FZ zDCl$e>cBhMt5%1j^m#k`!7WQ_(yy;yH{4E)(l$Amq)K5opPb(~@M*C;dBpx+t17Yx zrM}zwIxqT9v9PfXcZ^#GVkc-V<2(5^&IRkK$$Dd{(?Af<9m*oZm?7kh>f_+y>)2uX zC!?%tuE-y%0AmvD)uMsmz|G00#(|1C<|qkNp|ZnFyebR3lV+l%zk7=eX-#<*!AU>< zyudAlT=sL@s$whD`#IxiQXpkD4^V+N{f()o#}28gqM#n5kVn@f5Pq|ZU-x_25WN=k zV%WkCLRPy=IgkCNPE#$;Wt6oQ$%=Rx={QVtAAM63?#${wcSAY?T%_Zj*?FC`RmKGH zG>Q0d7p2+e<{J8N`O3Mq-2QJy(_4P4{`)d?S+G4qF;^=SG2>46GL=7n*afLsQpeDx z?}yV*t83yiUp-S@d3$#u(B=QA(YGtJ!p%DGiGA%;WMTdH&&{uQ#emFf$Hgb zQc$z1h@5F0>|#4$vPM<3OH)x$%N;}I+jZn9|9)>gz>^%+cw1knNEb>R+D2Wdb51=CmH zs*D2{VExD{ZI`~Wp!dA~vfFFx`RH#)7ZX{H!H@bD^kX^+O30@LHW&owrf_!T^4>>V zNjRDRdXPWhn6WcNZ_({LB{i!`v#ynBqd2}-+)ZjjePvqLut6`bwX~QLRSsB5A5L*I zN)_N3CGl`v`a)hSI6o`c&zS$HD@87nRG>|$M^+x*!9f4^+l#@&=QX;d#Z0Lew#Y~eM&i-RcZ8D*yqaX(1xSr zmq*_i5G!NW1XEnIe5~Ngv9gc zhp{P`Mhou1{2^pS)R*McRE(hf5FtWJEjuuY<6ojB?dYzX&BSqNvnV79QJc1Zz3C93 z{x)Aae>=!#6raaO_MyCsna2cNLo^bBNWn?sfLrK-Rxs#L1?9rhi**WWp-*n6DY*50 zueYt6;rVS(vQAsP908Cl7=3uB!-uA{P0jqVU|fqSzysBI7k%{9 zC@C(;yxtup6Cls?N-Z^>AYKF4SL0?ux=$x!!qc z;4(&GpJUEsk7hi0wf6i-`uBoF0B=s01?nbK)=mFG5&A72^WUZnW89!mPV1g+L0e@d zH{GU@u`uNafin^7!q{c_7=wJmy>#Ui$pojJOlgo%SDKsjIH4^^N$&Xk$9}YCkq9!F z)ucxA%26IA18{8|c8TaHwM{Ub%=53lEglPo$sVlMo@2Y|aA=r~*2ErmG273s$Y`cb zjX0TZeJkIxq5ahPS(pANDd^2^rNg+*^vcMWBwA6Qh7MXkkDO+d?yT_#PQamG^JKny z+PtJ|_I9UBU2+p^Tc(D`g>#|U8xdf~fzi^-BCzOBYYGxbM{hA|#BTRMu=}vUs4}6g zV-so}pz7TpmFhRSdZi7^cRpv#kKfW9maAb*xUgOU)g5^>9WkUS&4FRGyoqob+%;OSrvr0~S=1iBs!t$Uo zYthnsbw=aPs?1JRX4fVqjUS6hqGV~7$*o~4W42DHS<3>L;jIaYe<;+&~C(fnG&tqho@#_T^qQacEO3LYOo@7cfMT8Wtl{XuxcBK2-~xB z;*BzK6PKuCQ94$%d4m(|{frZuC!0+~!Y3kzx>izv$V$ZCYr`+hc?;XY!JntPcWBrfw7U%BK;t`XqtJ6yF?+U57)54i0FI*H2sjI1M6085*~F2 z8MpOmuQY8+6_EfVfTK=M^9^b5`;4k;Gp0|C?8MMgBIJn+cK3-1@v818DPfj-drJD5 z9i5Sn6NX#K=mU)bH-!lr){4!L_`pBo9N4{_3!#%F4sV7GAW$jZGUhG|T{C-=&Rgn=ZT`U;R-%L>=4K+9Myr-PrQoxh|&* z5vpOg>Q3_Puf5QR}$m6+WmNbwG*MWuJlW_6zn$_ge1>N2x>6xoo$QQl%kBY~V zI`H)L+m0Zd)vfCAh}s?|!Km+H`DmJ^u`u)Hhh0oaX~`r^!L`hP>#TBJsM&f+Tg+NWm-=*CqRDUKo1c3}t$wu5YjmOC>%DbiY=V@pzlJB@$KnLfeJS{kJ#26@T~u^HON-y}_PEME`Xg_j`&a+kHXt)`CX1F@zEE$w0PjmimC*K`Bz z6y1f@js(!zhj=*bjq8S$>cRtFz&0YQeO>1U`5kh#);l&;W{dHvk7JRmQrF6{*9SBw zLaJ@54|5EEP<+N5s{GqXF6sx@Bh%pn>lYqF3}|$h#0~#e_f>^|0aY$QA0_oTE&S}H z)#$Jf&L>9fu8H$v8b0W^Uo9aC->>52Feq_LSMSCR!@Y0xKhkvp>@*;5=T+%O@){~+|c>+bg$Kv+&iIn zjf2ZyiZDtv*55Jwuxsy)rop`E6I)k=jj5^QQ_I_YYc_i;p7NP2u}x^DHi6NCX9mF2 zbIceX{f8U9+@+1mZ7l*noLLD&=-zBb1a66Mm1Ew1k1paD9AMJZd|Z*Nt{SjF=evFd z?}7g{*f%fUP2i4Rax0olzFrrB?1|UX%b-U_<`&$HW+!`MHhHb`COf?LPDO`X zS9AgL*I|huv5yg~@nILjs2P)0v^=^h+&(Tmt$?(APw?ZxUnQxuft!wsJ=W&xD9weT<`t|0E?{6a_dYgXMjNwrYzbqy{& z<~G;pK0Ka&{KSV{<4w3(2O5N^7@ynR&l5-V`O4!ZD}0Thoj|K;BKq^?GQC9HG-7#} zot|q5qX62#X0dB1OiOF)VaSS;oU=JCd%R>@72K-4P*BxsE7@*@r@B7kjK7ygdY{^| zmEG3x&+qSa(|FG9A8lv8or$SCHpbTbX{V#qC2Ms^ybzUQyIr6{49hznqO)Fy=gy6> ztw`PWNNo!9KJMm_rb`s=9ZR48%%E^UT!hFn8HjU9n>4QjwkW2_)FYrAKTOEi(AQwb zkNp;U&Js+40iR_s)qs9-X%!}^syq0+N;TP1HYsHZ$V<`5XNZ0+R z{K%!ep{2}GI}zXPE4E@m^R-;BQRMJlP$id=(a-%3g7wMs>x>_S*5m4NjVYMFq)0l3 zy|og5Ow4gMORjPN{k)y(Ghx>{zVtmJGYzH0&q}OUoY=h}_PbOVgf2@-9JVZa-T5h| z;foNGXtK&ZSh^QE1-B_luRNR~F#}KeTfk{Vj0$bFlg#;3zM!&Kw# z3A=92g|O33~w2D^`h63@wO>|Xo_*4x#Gk}9sU*7h&_Q3Bl{rZ2ts(<@W?*4_l zE{~Hd=ZL(yq=}qrpLXuTE8(dWg!UVc7Kb`9PT}Wp*qPk2Vo(2igc>y`KDIM_{(Y*t zBtIf(l-$s+4yVGF4jgrYkQ|&*?w-+Ewm#JB4;5|(Z=-4!>usPlwk&1Qu z1&AdJTggv4x#QAKE^3?N+>exPjEsp`K}isi(fXeJm3Oilt$g8aw?et?L|^;R+Vphq z!u|&K!IqPkLAt$Ny{$quc+I76(=#{*!~M`n*PSEH9@nrnb4g-825Qs&x=o=a$= zt=cuq9Wk%dT`-L(Lxc2T`6lz2$|(b1yxRF;*Db;As?VcFHL;E8AKNDT@2usSIhAZ2 zML@Pc?AkiO3G;MbCxH$mI-c8EKsdZmHcW+7Ug87~EUn&TJRId(JxkIr(p7#3j{~2CZY^C# zpr4AP9a)d+ zj*Gt-b%Yix=}P#lM#AT(qF!q5{J9RM$pT zU|t^hW@Ca0sjk+^F2hNDz9h?X-Y>iGQGIo+ZFEAqLEQ&o|o4kyTjkDH2Vp0q>ca4Z&5G8qg7_UaI#Nh8?zr20>HST^zp4zD-BNpUt+ zRapiUW{xv#xjE*-L~753%JMJ)cj&+GbFVT%_jBfCT6?mnEEV}I;r!oa1})#h+%@kH z=5qBWQ!kx0DIz*}4_VpiM7q1ZD0`a+6^q<^FB>luC8QadJ4;WTOG$_lh+yf3`VZKq z-ZaNi;})uosHI5H<4si}{b!V!ch_jeJcxex5jFGEhUJc`zs4Z``{(#S>6L$O18;ap zipfKxWnZfoWzgip(MoD5QoTKw3Np_9CFOyCLu&&k!JCRXC@f2-EpDaGPq4*XK>En` z%dM#?!=Md*!pdgS+IC2wwfDjw(*x{W_4Z{l+Pr<+P_7lM6b&aRY9znuA~R)6hVIK$ zT&$5AF=W4F#3>F}2M#$+)F$~VeD(H(_9yCp*!3`=mb$)9eLmmiN%JqXcW5)6({>oj~+2xoXztF{S$=m_*0S^1oitZ2~rO z4fw?xuWlzJnehSeoi^ZCL3i$O8Xz9nDJEm_$5YkC&FNQ+2u|<5nf)v;LOzT~~-FeT|A1ZIDf=UzNB-h@=gzVz;e#sO}DOlToiE!?$4W zg{ZX>O6u;+H+_}JK=oogzTn4&G#o~l9wh(*ik8Qb%(R47#bu3_oACed{fkky>)v+F_A8#z}@U?5F{$fX+G=IkoJ^JsWz%8a5=Z}4OVi<{5$xwgV{?S;hKijw@q z{Ui&bPw$L9yBHeeaiXN|sL@VGdZPdBRQY>4^!&3bY0X}Zpk7>oU3t84EYxPxzBP^J z`eMVUE6|qNP9{GZlZL=I)-nB6{>_>64nO+TMYO-#j(#jXOCLqa{aa$Dr(mvkXaK1? zOGw^0_atiPn`7M9uNK+ql{iN6H?D!xl>%U{x|Cgzu1)h@#ZQ)@CzmoA6YEVxiKOd?Z~TZp!)=k;?Z?3W>1{Z zYo&1=s}T~5@sZLfK<_*xjEe>h-Ja<#8=B}fcFCZ}MyU>zRriSCzD0HE>x>+jTB>Nl z@Z3GN8suf0)9*52sOn&;TGp4<9+!2k%;La}RG~}e z5@y}_!>;&@pM1iU7#sWOS^K<%O&oE0JGdv^mIqF_gsHb;xrPrXf7o?KWgJ~ARj+pu ziL;R_6YEy~`aPM6)@yaU8$otyU%V$p@E1Sq((KYhFUgM|nqAQ!ro4%-2}B=a*=$VD z>y!1;`@gy5u36mZIl?cF4on4bl*Y`2t#$E*WJV(=bYuv^W-|{W9lo2vw0`ylGiy-= zoR}FiYT~$s)6U*SF9d|*K11!QMCylKbPC`xnL>Fpa_w2Mf`~vo8jX(O=4zh|zTaef za{9jcG%jkq%S~Gnd@mYsqt+dFPrcmOikhR$^;ttd;uV1qQ+Wj@c7zak?wsEde7@O% z#clfEcP(cY(+}5ZDABoXm=qwto3Pywu@3iW@9d>H8+DjRdeh!VhAdS1Zu+SZYZmJ5 zxDC66+rkqA{HUwNjiCdgkwKj%b;%sl05goRiknX1w;L#a*eTWW?p1KcuY!h^m8!}QYbbG8ZqJPH6K6W_8s{|YCn@|vU9|x zX>i-Za(w81XTg~}**uuBNqTj|`ozl(g}w3nS}Aqarl9cePS1V~^NF74Y!H&U*X5gm zQ5^%C(%dy>X7@6!(a#d^n2ArdE01+lksKEuCuZ7?v%o364ioy*^6DP*E9>k(58|>3 zjQ@+hH;-y6%lF0Ut}3l6OKFQz6a-pnp%ej;c}lvg6qG5YmJDGkL5L7S3`hcmKvi3c z$dCeLp1NczB_T2;ga9dIh>TGp36KPYFc?Du1Tv72_)c}dbzk>=>#ck5`~CB~>zO~Y z&PmQ$d*}1rXJ_y4@c9f;OPYtP%5cY;z*miTVNqLO03<5M6-n?|nG<=QTR zBl8(6DrVy2f43I+WN`QPJDqZlXGTlUjP`Tst3U4mgD-k6Qt98XlpJPn2dnyKfnsQ; zdVKc$FV`Ht|9AKQzjta<0}v&H4YY^R4xo7BYmcXg2S(eFriLfRN3iih^oVU(toZRz zZ_#fl0QfmK>e;27PDE@JL3B8)w6s(XP(IW&n2h>U2<%P0piO{UZ<7Q|OGrM%%Sgnu zq=7eAaZkta6mx83lT-DbhoSf>;(8o4+<-0Iu{5+Lt-&8t-d!RjyN zYb1u=;(RsB3VNVzE&b8(C{a`}A`0U!Q<@@VLhLd|Z*MC}?oey+Qw^#073y)s7bNo8 zx)_72m<wzh1Ayv z5I@Nm$1~PE!m|#_Vj5p;Kq9o}RYt8vJ4xmxp~QaEppa(!=0VZi@tJ}j(-#Xn;x$^^ zje zlqYe-Hv88{Ifrl=g;dKSYs>({r+0pO!UXnGztZM zllo@`%s{zdJX?Jbdxa&7A%*YFqtXYF&)?~UX`oW@C*65Hu?(OdFPRjW0K(mk;FD-7 z$~x?po8P5OxB>509f;v+qu zx-_*qooUg`hq*ECB&6E}8xPM?z(n6U(jKy=#QK@NA~n?u)hH$!T0f99b5{xSh)dAKsMK!OxD(9QL*$6)r0f&2;3x`Y=MRqr0 zQ{jY)sT=U4e=cKPD$P>(3^=RfEgsIrDBQ^ct6pOt3`G(V_GE#6 z=$($~t^`RCPa;RNusFe6r*D;Skk;VT__7Eh=Ac@?r$Myl*WGA66kos@J_Z zGvIj2a(ZOXo`nweI_B2l(#;&#G1n}-abajr>qJW(0I!jKUQ6@WD&BS;=%&5 zdznt26+LhnI1krb$tj6>v-kG0sGA>$3Xt0t$N~Kmqe%Wv7?73!QXKY?vi#OstYA*8 zYe#i>YhQJ;i1<3xwn<=5i7(tH5^(d!S9i%Jjjp$4sshfld?L=hpx!;|F_knKI+}{{}<3<&3vpN4^oHh(SHHq16@Wu*^kC4<&H|Ua#4|qm6 z<f`B>(__+o9SNo%2bj6h+w4*!9UCaGWP7wuq=ywXPbN2P(dvj*!!TlzVU?-F{mr z&^#!Rklx^OB?A1JCP9G=-L=~dlckB7xyj+LTwP|j<*XJPiiZBFD(o(x*KH}EP*i-S zSbz3Vqw-d`U)R>d4TQaz71D}n=cCA;(d#&1(}WMsrv)Es zVaU4F*V=-g<^ZHQ^GT<|Ay7M}El)GbO95Hmk{XJ&p-CK6ja3TA*Pdf3b`i1wdZk$u zFT*6T)&E5ai9-dy=sA`K z3izcX|0yf%dG2uxI49v)?4lHOl1wlaH$=Bt>si@oP9;1f2H~Sp@6OFtN~c!TCWZXX z%7;GiILA`rqMJp@7ou!?@b%)7I(6*Zl)QbJtMazC74$U4Fhw3_U#_ulkBNn>0XhL* zgz8wWWoqTD*tzDfp~O)89dBz#?WdS9v`6%lb^ujB3Forxa07 zO$Nw|g$8W0z0g{Cn75!zgAgi_gNk^@y^S-y4+bJ3|ct=??w z8g+A|A}#p>gS;h3$(sv`x3e3 zphL53OvT~egB2ewQa8rbrdBRK$^jo!g(Ej@Jw6^J8W#h>9QY){fIZ*d4fkfSg1yJr zWG}z}mx=U~(cQ$BR_ooS?H=wC1qs^!S~FExmN500chAbLi&n6}Fg3fC8yPXyDkVbe zmW2j9>JgQ?-pk*3SH8r66ic9#_6wx$=?f7sU)Xvwu@1XBs^{X_YL<2)IQ%7a7We1b zvGZddXSTrS(=0s0`)y1@+V6bxEbd=>@h@Haf9k}gq~x~#`K_JM5;tL3{mLZWI!Txy z>0ICy1k@`&A60}luliO57pNbz*NM1{I%1R>{&u)?Lbh)8`3a}^;KJGS$!SLHH0W*V zwlu8kworC^BZ*sWk~NnxA*ZEvY3W9m+uCa|?$k>4}-|NU!R21 zRxY@GZMF~AAyWAPq(f+I9*@JQRHSC3Y^LfEVf@oXWwJ$&Ii!_KdyMWfZ z0n|lCdZm~oLQ!-5lJw*8CGJHOac;BujDhzz3^_+Tcw~}8lgU++MbR{q%8W8UmzGfa zZHeWT8vbhIw%_Vzx3XhK zW8ErBXA0ask}Dr?0WNgA5p zoRO98;_^ObL2jgd$qgrD%LWey0yH( znKa43KcXQ)VQTv0ZA}eo7gJ9OoBw%Rw5AI8=FEp=LWMbg@GVWOdMh}DG2r-H38U&p{*s_8|?pUGd7CKNLo>ERCj zX7oG0pb~xa(cVv3ucaIkJ+wVo9VBX$hwH!7`4&)#o%`UO&UE8D9r2%lJ(hADtOa78 z3dTE~XYv7Y?_m1!`?--97y<_ZMpl`i0k0q9)X3-Q2zIMC@GbASTI1W43L2M|np zry~PUEYDDR7YWkMju|+~O-okZ=~&Q93>|8Vu!cXkD?1gfr#_yPagR{20q0iiD@xqq zh6z3BZ*pV2CayJh^~6Ia!^eEZPPDHPM`TqeQVhoiuqUD&0IoFr(Fa4lE}nM^!%K1- zi`{t5m&jGO`vP;*N!c#x1Wi2RuCnI#N3`UJ(>pOd=YXI0eZj2O-?IQ!r#PA(!M)nF z3?H5G{#3vIl0OgT_J9jWCXJ+5cf7StFQ}Gx5S?6M4~S;4(Zf7g`vBIBbr9lEJ#)Xg z_cCLJD;*+G{&U(+|I?RlB&RMjgxCt0gH_b`zy0XQ|8mJ#i1;sFH>OGdfvJ_ zoKLWDO}aa?93``o^G9tcmnO-==yQa6FSYO@``jB0xGr-_bj1|*OwKjTEsqQ}s|L)2ySNyEVsqdnDcObUx5Uig)Ks zbYsfXdQGx7F)3*oQ7fBOcEZN%*)P(cGa9SaYvt)QOBYRg$TjUy-jaZxhaYq(?wp#r zU1A43EMEnP(3RO;>R^Mi_8!9)%CVZYn$wH2A5rjxBFZ*Awu4~0P7q4!T0#}f@Ugu0 zaYcG~Ob5pobhsY|3bo@vax)$AlyOpyu+i7`3@AGlmkj}NtbDtGJLt(`zQWR2S*TZ8^sxKtO(D-!khC+^oyK6 zKB9BN5eC%ba98A*T^AFBs4&V=9#-spqVG}Wn%t{==$jO zs+bC7u`*#YBN7Oc*M`Rza%LCT9fCogNJIlbKj@|oPG+5qkkvGkv+lyXU+hM>A;htW z6intWD^h^BqX*sKzIH2+a4NS~7$>e!UD>Gkm8IpaUcEC-9Gr{_vq0R9_(Oe@f%+EF z;;2jr7Jj-u$5p1mp1As?DfEoY6Ca$S%=^2<*dBTyRv4U*sehb#vLTwojMI8ycM9?w|r~9 zh!q^Sl^&VmSI%6qwsAhuqMSMF7jk)F)7~?EX)SD3?9G(+vNy?`1}Luk>KdLBR#YbU z*9NB#zHWFE@h|2v7i>0ZVY~o!xp@6;&WERWfVm%5#oTHhRiJ^Dz}G-nt@NGFooLON zo&RO%&F}x^=JzN6M+Cr!8NtoG@1oz5l}Cbo!x)FPhx>zD23x9r01_tS{aaDCx1vi z#!C-88tKVw7#heFc?An*LNXNf>orr`ayCrmyY*7Ew~@XtR}xnw3F9^7+wo^3F;LGo zWBCbA1Q1*c9j_U z8Fy}WIJ&nmjjytO%Xl`8z-$g;9DGeB*qvs>*)7)S9mn!pFm}=d|1yx>P@z*(^L;Vj zxEOyKI=u+IM<&HDa}e~-ZV)$;ItpuIb7q0AG@ zJ(2JxEDU~zKvnGZWqfc?N)R`nk2$^O9Q|bO07P$99~nl`C1GJdU_t)1fzXk*AnfCl zJ!U(tK$R!tZi~SAlO-(7IA39@r~nyEg3b4@ysSVT)B`~mheJ^Ah&W z;jp@wzhejo8yPa%XjqcZp}c(k?pn(2)Kh#n^9t19`2^RG*%^)o(f3s-pE0gh;aIjQ z9r)u1iS@kAaq{S2o+r7wA5cIl@`+p_#_1@uhujpImEfPS;Q(C|`P-gNM3KIFN{wAJ zb@D3uO=QzvUeVJGtkv<@mQlO|)AGct;k=irVdUJ1DS)jgb}T6ii2rQF>~^-k9*%Qv z0wO}c(Ld?w8UH3>8_E5Ar}^uZf3s2zeu%VFg9?|xDfB02|IoC$rK!e%Cur9E8Hz?% zTOFz0U!gerGU!6gqkp^S{lR~Tzy|}yA-f4m)asrmTI-*xEQfYX{?z=ZmZYAdI*OIW ztP2Eje`cV1uk%Yy!>YB(Ug0zUjke`7V$pT4kRhr`AP1QmDWX5A{z(EaY}r8!?a!5Lc>Aj9#BnAha#q+3wm4b zKNqVOoi5v{Rnjr^;CDKQmbFLF2h4FO*K@owWz$TxQh!5u`nx7Bvdb|xC>zM-UFlRGS=I8T~m$l0qiP?n1i*JPF_77$_% zDoQ8K5CuI4SE-sk%j7TwGcG!JZqB~uZlPE7eZ_^Y$YJni;zE5?&;;{s`h?Igs$DcO zFVaS`Lqxt`vS0%{Oru#;uH2GVRCVSCCzLWY_?&vDfEDr0cAIAg=^(~AjGgs z$pS*u_{K~3tITIWdV&LVO#C1Iry0*&Qv%!DbMC7_4>q`wRXFf!o$Ug;^=Lmcw;RK9HLJ;^c`ym3x}6wNS$z z&3TM^V$U(TOE6UWh3qq7o&$2q8SOe>bZ+xpEo3GkhuP|WejV?-16i&fT6V029cPU@ zA#Hr}gV+`ozq#2@r0$%X+*VAzN<5~!pJ#~dN(_z*&M~^3J=7MO7G0ccc}WHT&Cw3J znQnQ+*={z`^di?8~bgg_c?xdK0dLE+NmZvb^uu$YZaa|6tT6zc=KZ2hm3zR7D*2+ zR735gx8hDeY^Au`+r*Yj8)x?y_q(th<}Ejt>$F2kaj-`(-<;GjI2Q*Lsn2nJwP#Y~ zWS~%e@b7gmpK?-uRt_X{XJ5@q*L8Oq7!2c>^wwd@uPG<_Bxm7x-ue0j3}KN!K0Lab zW)>9CtQI9q_;WuUIPUFlFJFNA+lnSLCJ)W`P&SmzZ_%YY5;GUS)i16hdkMEEqb&|` zC3g2&-d7KJ`;3#SS%Ny(niqL`KW)Whk(T7($D$2!vyZI|Zd*{UUPK0NJUD+kctv*z zQl7pac4M@+D-w6-nRv-#NFRav7*du5V!oYIr+Kl;V~C*xFP2yN_38HxEE@2OMy|A~ zk;u1gCf>c@Y0PAI_D8nv#5pcEbaA6MUaMcckHKa~-R=9y7vGn@{*TT4{)&G_-~%b_^>>I{tumU=8Q)?W z*jQqQrh^S8q7TPgD*gGpaor*JLt66jsl2A6&rpOY1C@U@zWLY=d8Vn8J2$;G`4W(c zx9^B{8DK|V%E}S}>?L5w`g8s<1t(sW0&0YT0QoiK!H2t(yXZ7SX+lNywO0U<+84B| zF3v#?WPUhla9Fhe-o)->C;pPrg(9r3Z*G<@`yqwV+6mAYde4KKgkk~{Shd%(bs_2c zhP)XIUJupxL=3HcNYajvc3-#@WYQJ86K`ztG42u-ZD*|`?WQHnKfvu zK4V)S0m@0l&$~QvU#WQpt`0jqihPc7hhwR!C82IjFU8fxEl*br(k#R5yOu_;k1EG= zE+$$Xgc<5jQk{=Ze>*ix6YWc(^qe|9L$8~649iiq93i{bbqey0}>pFse6c z`DNCnl}^ASflZrtv^TI!>C0J@^>FAPNy5*xKc9W4LokHQVGyi0;O)70I=?1p*Y&=- z@JBPJf41TKtAT$k(x6!Wc|;i1zx7(Igk*Se)(^C z2O#@aY==w8j#Ar>tr@>pr}m|wAA9*1z$Y^*kU>2sl~|+9<8FeyiKzT}o>Q zK^`ID7%r&_z^gYq` z!$HFXT2{|DI>{9|P9+k6j8lW^dU4F#&HUE0UDkAj;cZmR{{9POvewbJb&=vtR zKpo@G<**V)3ZuY`Y60j1$NYoF^Q3Y<;Ppq@fVz}gzj^nk1lM8$pZXc{h}uQ#Bj#2v z;G;trO`El;yR}}EQ;%=l2&P=}o-C>qm~f$2o# zCh~0?yUcL8Q*YdS6dfz3b#doJ8bIrw`xn`9v$OxHVe$T=e?j1bUwg0bMk<3>9rpOp zRvX$=LfFd{=vy>o=2x8k;wt0|inmj{b~ZN7(*k!l6UKJ9g*hyTiqhYXIBnO|J?_F; z?%a{VUan2$jV&hzWM!3+M?qA106VE>EbKM|4iVhwb84N)*TcEk99UlS<4C$7fG@kS zi`5W|%XukLqu4KvN9M-^6{o#dG6jZ9oGXyXQMdCe2J0uC&Y#RLG(Ol}-1LLuaLwV8 zldgg6$f_#Xk6WdJ2nKt_0~FR|rJ27t!ycWAt}X=9s35d*o6X)val)-*-9nRO#wsrk z>5^0?Wn&C+uxI3kSbvv!H&aTYN^fG51uz-iXja1oOXSime|J7?xbWg;X}J&c?x7dr z10`Z@{ycxNXLzdT?o>#@E6G<-Q2zq&PG;Qdc9WMM8s(_k-8XX>chYNUxy$SGoX4mA z+ldxIGu)n;Sq;B6&$}bV2!HZvDSiUcCSdyYsv|odnZKzr1Ipdm2EiGF(hz?nO^Qw9 zroGc44trn8xbo6~s8Mz*uTOWIt)f#(rroY02JJ!wnIe}vzgQuYs4io3!G9a&$5I=e zmkmabvI#fB7AGfO2yHgD5JSl`Gl!*-pRhY8udNEdkcIzsSu~J(^;tYB9#i2yrJzkS zl6!6~6x7nQ7NTj{NWH(rf#{JnXKZ1 z6l%Yl3%^LLt#7bwTx|Zd>0$oAjGgx<|49V)N_!$E&Yz0cR0J#-lC6jIiO-fyu?8|g zL+4eF@bnc4=)y0&AL()C242;`e5>(S)QK(uM8htARRHNb=rQ0Rln;!73lF-R+2hql zk!tI|S5y2aTYrDqdj#Gi@E(Ep2)sw&Jp%6$c#pt)1l}X?9)b4=yhq?Y0`C!ckHC8b z-XriHf%gc!N8mjI?-6*9zSO$qK1-eP(cAunl8wVPUp*CQ=YN z{bTbSJzX~A!LM*Y47EP2hkDr5!=Wm0^5(pyDEmtr9^|VQL)#vt7~M_z z{GCqk*f~q~w{q1`i77y*M}&mlWvIY!=dH8Xj|IRn#-cs>ptz8D#mU=Mw?#e0 zsY1}|*EL=0a5&OTR4^S$@8agz*SxA=^o+Z3hM9)0HD;xu?n~yL@}a26Q&;tf*vKCe zyUT|T^&A~3cIthn1IKWWM)K<6LX|(zfyd!~jWyc}It^knD5=&h6`;~-No_#-|BQG% zFJFwG!efhCR+B+~b777t?Yv-qwu4)tc5!lj&mYBq{%>vm|8nQ|=5&|s?B4(wvO-&y z!1?ajUny(?_qleU|9asF?eKE;24S=HI48tW@2A9@3pZ4$Uz;V{3=uOBBk3K842b4{ z3^sKtTkiN#@7!8LG-z`<$2G8Y6W;?PyVJ|vJ8}w<(=B+dhr>X9=iW4R4)sK8x?>+V_;9s5yqHBu zj+r7-s(a+=`*s|rF*5?$OD})=9cF>0ssm5lQidU*lCc&_14q%Q$qt0Ms*~P{ zHRRrW-B;F^+&#COaG{wQD8pVCMOjXY^})2G(Qc841;^(1T)Ho^Z@APEd8yrl*w}~h zL@FGPGW#Sgu>1Iqz$)u-Ze;rf(Y6o!1P*`2o<;vJ51jw)h5(D@rd8Sf4eG1YW z@$%JzE;Sf2q6mg>jft!(Cwjb8B1d*dO6d%*tvX_MyJ+0ELOHG4@v|SL2S26WJ;j%f zOT))fDee7p8g&Wy=(K*-EkRuh2K$z=ET~|^Gm*BFwonTBVwUpT2jhtzMKZl9ej_}J zzWzSpwdUYBM zoai3#d-jN*tK$@VB~KM~!N`)mz; zm=NO($1|yPU-QNIgcOB78ysE`Ne~_+JmNU=af7@Rb94Ld@oKY-!-xhaxu!B{vS{~E z=EI7UcyO;-nExJs2lN3ykHhhYFI0BBjd^oyT3gPwG<(QFehwGPzLMo+>;HTSsG&W1 z^a@Zz2L&HRMULl9C;C(RQ^yeiu_e^ZuC%neP|wKByslps3@y%{jJ5`Z6mJ8Sbfpis zc9VCtDN&1Nim&_C^q-_)vudgvBrYo||{X2;@MC;`&ZprJb6vCfZLHB@MQ9?+`qK05vfIU=_`~O~XBh8V z9K9;{Z*QD8!t=D13XrJXl@7->y0+7cw<&?`5a<%)rrO!&MsqQv;J55-vy;}!cv#-5 zaxIN?sNmdM>re}ztS%AB7iMwNUoK2xLnkr})&F-RRyDzxth7Y1%9^W{WvmObGD@p5Q2D~dL~8t>?OOx!yk zQ<7iPHXA3E+R9yz?b2H5MHN+5MF|qWurOcY2*xVpVWYD@Q0xaKtRpF0@!6~Jc7{)I zNYaDJhXmmB$csey!Gq9&+xLN&@Z;selhV0_sCMqj#-OsQ=;*ZQ=+rT0k1`zgdT(Ts zBNCl)K2hsFi*uf!nunnE4~;g&XBTaT7Kx)-pRTGX1V41sc3aX686F%ISoQ~6c1gsQ zj#U0=!bA8Ag(F<9NV!E-Cw7ssK(*FhDyhWm*sJWxmuuBM@K`1^tWCfEk)`*AM<*u) zR3L|1ueh=(YF*Ie`kEh=);vj? z#<+ffhr@j=F<^mSQO+^HSBuaY6^?A?@oWi2IpPq7HW*VE*L!hWDzf7r25p5(nY3-w z&P{2h{tzz}HGG4@Z=DwhBT9(Gw($(aqn6daf|q(F$Dp98%^U6Gx;{PU2j{wRWbV8f z=rxxOIvH7agY;um9rgsQUSzO?X#5fSwBlP^2+ss6wC!FkTud^HvH`{e5*Df9yq zZFoMe=4qz??GE#OnAN5wF5yf!k?C0N{+$0KVI#~wtFR)PV##AvPj>oSv1ZaATT1KS z41u9N0bJBEq8Y!Qado}ooz9W1MaIn>EH2WImP2lEjj_4ezljWD=cc?xVuIUUBh5qz zn_~?r%0N&y)C_?GzZU7;l&9>rxSuRL%!qu-i?fxh({}mI9C$@h%gmbGIbe%yXml@W zXK7`1b!BNn;ujqs?K=}fLoe0478XUFW_J+7?mYdjucObmnf;JKW?Uqm3VgusAjCk~ zEp`4Kzm2&2_j64jeLANYO&KnTp0TvGwRIUAla-ZKx7mWu?IN2tbac+>DphF@5Z%7g z8i-)sb>7bywBz{PzVhmIe9Ji12>GG7@~vk;pL6er){wQ0GwX7_mO36&bp&!)3%VvM z%Io4;d;-F_@8hPH2q(f63r`0WbmVuvvEy)K+|UReiK38eVp(~;h@p?wSM`aTlU=Lr zLyOy(o^xmgILri-vCE`)Ux|Pa7a^bWenat0haN_Li8-92S%4QpBFSK&{ZBJao*CC(^UL_rE zLxg*s198_R;&r2W9@aZ&$=Q9CToKwEBF8tpd{8W9^ujLQ7unX5mYn0Qvj#VJd?)tz znxuQP^T7e-kwGfcCx3KhO3x^Vh0N}LK~?TJO_Y))vt(Xlx$T3MW%`t-PV!<#sLOMo z3?$k_g?o+251?;E>V|uHeJl6BjLvvY8Z${sNm!Q~`su?E=FDE+B$>tCsp^im^~X&O z=M|Fj@~4P9NT{oR4!hHKnZJyWK9*=h5=+xud~o(1`KH`=I>qM4dt8;cj}{M zPz*CH=)tp=OwXkHt_j-)>rdVWAC;RPVXqRqyjv9f9}3rMdaHXW7S_DR`WW>bt^d9T zV9BzsM7Yt$LKw#pZWzNEg4zWj%Mx%s<#r->H@kjH7!@TAwqZ*_Zp zOeLb%TN^|!saO=}B-$F_de%=CfIqx32R)HqnwwtYT{ty=`P@+P67k^nbuLG3Klr(7 z7V=zoxyd`^%Ycz`#O7H+pr(4gzB^;Q;aiKNlMlzI8BJ&6*GJBkbQP$d_;u6+!wS?+ zGQ>!IrQ@_Mrpj@nbza)CzH?frhH9iJ1`T3XkB4Um*y+()TOrz*pc zYdwGAWE8GHFVXA48^aiGDPp%zz3j%cV2)brOrgaWuRXig&s;dh!i#LqnY-H8bgk?; ze=4}Gv5hrz5#Ed0PD#}to@5QIt~&AN3pW%_(n<56-B`)lsGi#9at|-&MNxB?f@>Y- z%0k*Gp2ShUkngn2wkgd@2%d^I0RVMjUjA;gss@4G7rq2}&@KAWqk{soE=)49pF|s< zSxrbU7%MrnoNSa*MjU9nbag)7(EaXw9Q;dlJv7*JLZUG=8YL6AhG)Q=cP#>jmZ2Ms0)>u?R2!L+{39kWy8s2vnmd`~uP5?-*G7Xj{Sf2R}UW^9vI zpL}RpJdSRwPs@8v-R*~cIWr`x4ll|exf;k}t%bC^%?rD;LwNY}cmYd;E$>{7)K<4p zPpAI^f2%**NYIvW%mlP#qh+UV94zUtrimR7)Ru8d^OH8`)uwg=N^gs0$FB;-O~o;e ziGEMNYejNo_pV#H`ca8=yj{T>H}U#2^it{nn%w>hdG0vgL%FZjUA@)&5sg+St}#bb zHISjU4bqh%u4muRMD@hfl4|tf!}#gvZI4D7EG|8^g`Sn)(~%#0nHp=G^L*#{HXiX7 z*|fb(CD>Guu~;Ka8T=-@%+VNW#XKZ<7LjnFZ5$y-hW0$ZSw0()M7e4ix;WJV|?WA~`*@f@3KN>v*0#;eR3f3N>8ia1C+etEaSRo}k%1cw3Mq zgRv$cEzYT&g>|$%C!l82^#QJ@H+2kdtUsn6SI{MvL_>thAHfK_q z(WMv4icjYRJtnMDHLv?q?@6c1EwZDttL3Zauksta0-Rt+1+91CP&H0ke+mLsH)FSN zqvNg2Cwn2?QkJj6W@N&JE92s-PLyr~S`*8>0nO#+9=rQXTo+ zb59ve+SkWG?8C_D3+~P`Sf{J?N6{6Qc=O`U6G?1`8S_PPN14c0kya|JiIZiOzJ1!v zTrsZ^CG@zRINuYFTLsJze-xqN1&;Mc?Pu+5W39e7jSxZ^` z+EusFWquFGc5OE&xFvT~f2&>IcFS%TVq*2GN2$Qj_J>yfG8yiHu`rmd5KKafnf{ua z?{r+m2fzy(Hb_efx5;mRolo*0(!OKM7gk@3#M?i1Nm=ab|3rfSh8EtQVL!6Eg}lu^ zyNCycf=byIzPQ4IS6TAd@2#Hgw%N8*A$zB z>h0zUuKU%zoM_FPKT)K4lbSx_>E3%1>F-SG z#NU=UNiKA!Ongz#3khY0C1hhrkz7T9aAMmuHSD2c7T^NOgKXd2P}Dcxlkg|bl;cXr zpEd-RDSTLo_ae-2EkRPH5V=(QOYTWMBJ_}+)os+O@zDuGLZh;o*1V}KNvMJ==Oh+5#x4WBI$bTo5TyANj}1( z^oBen#80!xX>Rp11iV7)A>a`--&iSNMG6- zd_=BvBE}`J+-5DVRGnSOYmv9k6^Ryw)Vzh)>I_}R3MJ6t6)P}%_5_i z{mZ(`HiDA9)#;goew8bsA~Izk8@0o5eNLUi;r7Yu)8}2of1&?CS?#`a_>d+(a{2lj zD{r}n7#y3R!l<0pTcr<>?iT?%5Qo`lGqgH6cV!iVKqPlKOgHjl!xN2F?M!#k`o8wE zey%!>)?GbTX1)-qGBdq2d{brCsqIMb&fU@MSW6GApbzg&5ZpHTVoa9f#K5a=Zp39D ztZVcRKy0mzRjuEg9jPL1&pPBepape_UD0HCdF4Y1goUIl<)Xt{FkRtGqGzhe* zw~nXz+lbBVf#N~&&5bPx1(KNl6vMQ&g~mt1C^f@e&cZvL1v5lR_>)bOuZ*3+XO`XO zF%`opH7Dn#63^ButGJZC=DanGx?BCQvEC|LjHWuL>|Ct}LxM|Aep@f-oO8WDdlkQk zRyxo=L)D`e>QMp$w#n-P+~lvviHQSt!$I*!X{EbNX*d{b%;(6Vz+eSLVUk~Z6P zjQ~&FBRhU%@{FoOn(>DDWLmmz*+SaSw4vk+b zD;^PNT%%FQh>*+Wn6%rsl|d`WgC*b4+yN%cY>%tu(E7TWm6>z)inn1f}rL!hX zFVFnUT6NTL0;ZroJ4HV-BFGK-;7t>B;&3yQ>%}gjcwZ@ck)9XE9b~AyM--M#4eJFf zV*Gfc;QH+A!6T|MkFKck&W$nxTyS{n{CQ+JbstQUOES{iA@)vKHg@eGV<>Ogr5cvf z&foI7gI#@54VY{UY@LGZFmJqFt%ME{&B z<{p_&Jy0yl4Y3T&<(8a^e9IrI5~)?x_aB{Ji5*gM!-nL76#jY$?)F_2LBS0OWbi73{;)yv|(rvFk#vG)8E+tMh3C} z3%WsGXvHw-tdd=EdRaIv(h9{Bro`MCk8e8eRQwiH~;`s~h-48s#+8s2lT&{!dG(SG||EmK#loxWN*tnp}eSORnr_ zh7QcO_3(uHxqz^GR*yE|FS zZt#jtlMmhgwsMrF{bY1K5;bpx)h&Q&CK!K}HWlR(!B^wSL1EM&9{6EM3HOa8jFuoF zMFu;at$Ql9)ZD$XyS7W%I3w6FzZE~8#^fc{$m)>~I$C(1^gto;Csc`ae6RTAz`15@0lBOT|RKLwcm`|7uAl3L*#qPp$UE$F6d zFd&V#`;*(6^Z+&t%yQgL|5CPsYH|5T6g`}yw35n(LI=KQ|LlcYu5Y!g5426(ylS=5Cb{>$1RU(z zR#tIcU{;wvl9X$sA#~xK5e5SqCsE!v=A$W@maDvM_~;Jj)KVI!7@1ICL0djX!L%)< zWuwZ#UJz?IjTaRiM?QKL(DY4^%}mf`SHU@p$_f2QeD(r)+MzPqUio z^hA@1kIXFf0E%45rQgB?lh;*5qW1-%t$r5U3?Ljy_VJpge^HYkZYAPP*uDr$Uq^Qf zNTksI$;}MY*4m7i2`8VEYeR!=v-xfDlKv_bErYhs^C-b2&-b`JfDMI-4n5vFDYY0% zETtHU-ZUJ;g5poi*rHJN4r+Yd1T*M?SjueM>)WHc0QzjG>VyC}4ry$=Di3YKCm88kz7VEB4g-u0CI(BXg#D`~O9pPcq0J?j`q{UTP*ApAhzJ@5t!slM^K znfI0Nf#zNyH`ADmQRTw&5qZL?^cB*`f$?8QDVMxD`kzyur8dEirCg!c!Ow)|HuXN# zxLZvEja@Mp>h`CFb&E-{!jLLjdpkt*$kA{FuIeb)emJykAfwz^BKWn1Y6EK?rIj{9 zUX|87YbpPj^36Yf-v9OcXB2HDy-ZOaK5xZTHTrE|m6Ehf_s0Lh-kZlYd9`b!w7#uv zwOWgc%G8QN5di_2hjdp31%V366d)A@gb*V05FmZ)fD9o;WRfAROhpKhF^ox-Asj|&_kCU0EuV2; zSoqN1MOhJpd0i{}9(DA`AAScKFDBsPtyRI!6kLslZ{kVl z{f|N=xVu2GuVK6f2=)obCa-3!Qm_V=J^da!z9n0a(I-Pb<$tsiB^rkqBk)6M1g~4; z?iW@hvt|1N^1)5D+U!H1m@c>8H?IeV=46C zFja&io+b(iEylLZQ)s1*n6iZx`Rdx4F=&Gu*iXM|--Dim*~8r(QK{QK+=cV@?5O}r*1 zs+k=yJHBDOw6YZ9Q{F9}v!Zea6S&{qDAPLys1eXfC;|`2H2bMy6wP9 zpsRN+bKQgtc`-lT8kldARQ~5%@StC&d#XkrlzbaxZm0RqO*XeYPaIFQ^C}B&sf;=v zNwF_|)r{l0g_ZbUM}mD$$g{&=G}4X_*r!(0cE-bu?1)5uGoO!}SjdU= zhaO+CEzi0+sD$yI_Uca$LxARqTN+W8@{N6dgEY)nIV<>fn7>VNy|tnoR#cz+dHAU-=H=sm{h zHY6GH)C##epOIsR=A*jw;#-HRjrm-iptjDug4g_JqqG3V6Dl?hdVb@Cf=X?A`G9D? zt8z{Q-cT0h#@pCPYG>5Kn$nJvhFDc`ea})ZhdoAJ2*Xq!3Kyrlyrnx7OUpYrh670z zew1NaiJ^e@Q7wOcvr5d!lcl6#gQO>U+)KZ%bZHvvFVV2xIxK(mt-trM=F!E zzR(hr<*ISG3;E4)Z2jHhw$Rf-fR(#B+E;pr^?9N?!b@!I(+2a+#MYZMT;Ib1u>o+9 z_U(bRe|=6Thm}`TN>etUHV%(HT&EWTLVGE2Sc+;+xCWvyDNr}75Jtf?{A%FmLl@xK z-12al{*}$thg@ujByKjS_;HR#t~76_f34j$Y-_n#`uvM}3O(9QIe8aDdi4lNzk=j^ zZ&L!h3N-ei5|qW{i@3(y1+$R1Md)+46gosI@|f}kjELrkr@h<1V;L)7%NL=#Mt@Ub zeEZjN{0Hi$Oi8XXj1i7Z?FPLsw|(P2LX_DFG26F5iE*C@jHSsV$j>mG?=E_LaDVUD ziQY>s`DkMmTg;c18>2>~;#9V-@O;$k&g0SiF7IEkLCo&ZC(^==WqMz)dJIb-?ZYSMYW&OmCJ@0&h_^_4J zqX_eanwfB&Z%C6*l*s27hMWsTj?vZOY(Ebahd)d^%H36gAiN)r`m}4W_(d7}Znat5 zBJc&6RKM=zO!TLj>^BcnvWF&*&B`nIE!9en)l%Bjv*3g7RMn(H-@}e~KLI-N;ih($ z+C2zuhg6_AT$E=s|28e}6~f&$XhSiLZJb~sjEuV;85%fK7Rp={;7n^0m9{w|@~CJo zhu+Yxs%)!k;}YgK?c&iEqT3yF%f%v&>6C?i+VBDFN7+@y!VmrqBHF^f@>s$B5-hliwmk&SbkxLX?n|)c_Buntx93RoM<l2GR!CXJGE0yMV+?l!bWGvckM7rAuE#bv%EdU{T<=65Kg*7XLpuCwd8YZ zkp*|kq`If**}y%o;HoM3^;@l%lb3`t&l9%w(?)At_Iyuaa?R_onu94*M-gqP_#P7R z%zW@%Rh%O$?d6aEVW>Zdgp;5v8D`kdg!{j{e`S4^wg8{7XzAzC3b`$5f4uc)g{OPh z&CGf8LloLa*T4sw8*82xDGnzlzH|H%kKqntK^4ft2LD}^lNd_lFs(n9W|zWGil!=K z@m1lXj>o)le1p^Ax=_elE6Kh+b>`7d@U%f>;ziD(An8Uk&9#jKddqhg*HQVV?n~+I z+qDms<4ec>R`Bhg-8}dndDeY3ZG_m__^a}_$-vqnZJ8a1Se{7iexa7KsIVKBs~_fu zsI0%3-uu2WON<4u3h7J0<~HD=^3x=KHH2)dNaR7@*U$jNrd!1L5qSwnRFDixqBV5= z`1$gmt^DJE_5nDu;?32x)gsYoi?VL2_oy652-h+hzN=#HDN6hn?Ez%EDXZP8Q%>t# z`Rc?4prr%MbbTT}cG+7})T=x$kOHx_YubX^SIByvJT|6~XiR*A-Q3!goQGaDs3ZLT zm%bfS$cwBk907U4+7Wq3f*%3{cZgK&x zxurUEa%jJ{>*koQ=6LHi%nHTz;qh97Uro7HoUA4JOaa-BP9do0w=YE(1vw_5|XGIxXR$9tfxI-zLTAdCdq4s)YqQQen0-@_Wud1pRl*_E86`?bgq4Z zC@!{`BhxYaxFj{Ws>3|mase|`mu@DzUMhXI61>QB{rvswTgtO|;=@cUT3MvCLh3js z!c`)T2sKhBAuFL`a`B!yvB(HG>aLPn2WnZDA=la?^QN|apig*as!7%6J-5?n%-+tq z+4vPx3BKhN^<#U(LdN=3fIeg{5UOPZwbw;4zhc^G{UDhPV16$(v4<05K`;lcfe2Sx_FZBL$ARD2A(!8q5Z~oO!L%#Mr>| z_)PSP!eYp0+FzS&v-=H=;HLBQBb(f(jV+C~&&O%g6@AEjG_F?Y-Vz1IrU}|+ZE~*B zhcQ)O%fnP1jxVZzk7uumR%w4ocEB?SJ31O-14mZmGVFu?w6bUmxsa^K1iXOynqhAt z2gk;1hDX;ElabX5JeUi^nY2x5A=7Hzn~Ic@BUE8@Qf}U-kFK;2|9RUdrzvQhr z6RQ%Xn@hwQS3FPYYTT=$e8feT@zZ&D-*^@Eq5t8@{%1da4Bk~SVUEehjW>(J6rQyy z`Vwy-l~=`-GNP8t{kv8*kQUzBebH0#n`73u7k&VKUX394L@uul;kJztaznPjT=@jV zcK>Nt#hWEwT`^3DAS=PlMJC^e^0T*R+K+$fkb#TjdwS<#bkwgoMs7!XS3!Z`-V-A? zP_(#p_*a`A3nLz_Rf{g|mB!A}sfAPCLUN`;YQ^>y^gS(kT#`FOA4^jeDJ{&=ugtV> zjyk36s+eh(0?@o+mV$Nf7r*y!_L}+#g;Z)bpf4RGVzN%H&?F7Aj@1sr**{Zd4W@x zo(Ik`*-#k1B>-zUNug+cB>ql1)4Nad!7Putugm^ko2Bi->7AvQOn3!D!6(X8NstO8QDrokz%r=+V{x__QsVWZ?*#8m!oO2Y zM&Z@2jJEJ1c?91Qm!jLjr|k4fF73BbdbcOSp!%gzN0rZRHUvxXJB5S>srgW((1)NG z%ez%B9K9-w&Hp?!X0b7^LBZPJPx6Bvu}(i$%hPd1t;6Hop`xgRo!Au?-fSxt@f>&{ zj5LOe1ZKuMwtYFNm7dIZ`*Z$(Q0~55C!g%}7MV|jW=^uFCHwZ00phnCMWXocHvK`h zC7}Da6=T0>n*V9cAln=h)}cu|PFDzwXELsXvyr621C!;d%&Kiia?Y;~GC5;3NT1~mP@HT2z3 z%$Un=4Bsht$b5yL62xJqeG4F{|8OS!2VT?vxtqY}@;Xov(QQ|ytP#ki`?sD)KyuqR z1BygG@qNu2@S2=>Lm&bf7895gNkGRFf{J%3K1 z@clEIEoZGQyVk121UGwb;&?O6%L}^#d9a>9QeCo$3HQ6hHDhTcd8j_JGi|Or2ri|* zIa~zLSOv6FSyAMoSmTg-e-vKwJQ$A8WSeNImR=++{XoLj`jk%Cr7Sn9F#<%dgkF=Z z7|ZQVxa!uHNAOZ0KtXv~L$JiVpLpEX+OXEf|A>!R77bx63t4#U^E;a1wX=c4Zv%7& zOq-i*WEpdU?t_s%IS{b7HT-;y`MRT!73@-SOPc>MD>&H8YO8x!)y2VX~k$Ziu_Zf>r(*EwzaZbu3+f_+q`lj1@)Ytsl3jAu>- zBuw{kE&WK%39~x75p4U@)avlYuKClt!3J+-+FdfVOSJ9zxeH)r$&MZVN^*SP>PD zA4v~G-rv6w?J*bjzSJ-Jvir;`8p~~f4CwU_{ou%eKOtORhUrJF=#^w=QaX~p?Dd;m z_s?zLRGk=l_?t(Q=(|pjhI(hIvqNk3MXo#9<^7%2?Vz_-=Qsj%i&;5mDji6)r0Afd@NHZ9c5h1th zLex{jljRpnwz9_RPY!pG&Mc;8#P=sZx*t1`?5#ex8lHSuec5kp!9wJOr!p%n(sA&* z**Ve#HSz-Aa6~PVfj{tbxq&mez{g#CDAF31tL2_OnH@XGHKFRB@4)#N^F7{R;VzWD z@M(OuSE#Pt+2^ruC)>th?W={Pj_MlV|3Lmhz*h>WjOz`a-J+BJ%)z4)0paPbXfln{ zK|M*Fa>Aq-WBGb1j-5L~OyG>piL5vMn3yB94Pkz_ZF6H^K@c<=AES6remOTt?DnIW zY-6^~)@Ra;1ubQq@Q{=~evXXCYBHH%8{*RyZ_pbiWmJs5$rhKx8tdt}mXKWImV+xSMWDaQc zMLPH0k@t|D?Wf9Pp!o-PtM+{i{PYKi%>hfLTWS`N!A?Yc%^-S0)^(JvVq-x%C^yQ3 zx+0RA_bN4aRnDTqKZ>`U{Y!5?Y=so)@kQ&*?E)(CZPCxeyDCyEVx!VqaT5qCYO_vR zQg>C}MbRXa+MDVR_Z8iI{1tHZ`BHH!)Ff7U+1AY9bUt>$_;D|)mB}@4to$^I8Rw2x1MGQ(1ROKr z_%Xwyz@qGMWt4x$l&mp4@zjPP+kYvCy?M3|jJe0Q1F1P3wFX;0;S4!G-Wo@f-qW{)gEugx?jD+XE0o1Fc<^ zG;BB#bcde8j>ldL_o9zO*#VGc#Xq8xRpwJ3Tyv zZ9v>qgjtab>NUAhTu#&|%WzBt*J)uTb?u0|%clFTm7DV^aFf+mnSKDw;fShHU+$8` z2okOCX8du$;3`s2Gjyt!5SK}|-?q7o2npH*bS>02^<_4Gj4LaUAEMngtgv&zOk&@T z+3O7@Ww?p$Qq*Mnb?vOrTaAcL3+Poq#KEAgH#ELz=Cxwp@t)`OBw2Hv1b1{r*T^&{ z?S~zkPV$r6ORX@Uv?jA{%evfsaj*KojurmB@u)X6!u?nP3FT?iJfw^Rf-vQpRgI37L6hhLc3P%IQ1>|ah zK4hJwM9He7l=bcp^R*MJqg-g(&JuyaDwAsL8Kz77d4(TM*L6jXTPYGcdRkcEWo&J1 zVT-12XUj6KtEmBxgQtM!%DY#F??VFy`9xOidf~imspUv+%MQ5!Q+@t8ht?7WsG2NB z%>4Gz)siZ z$dTWE@Cn8kd8j;^ul{Pg`!LSrqTRmHT>TeY_2~Gi$+^*$o`}gC3iG+d3UXriH#J%;5~L~rwQ0Z8_2f7nIs{G$-=X@1Syh4Qq#{w1zGx&cdn7;feK z{nHTe{Gv5MuhR;Jw|^C764e1)J+;zTsv)V%*JSz4x9pIbDiz$LdVM9UgV zHMzDy3rjteoz`&kR=0drzycm7|B=4^O@g^h@GliXiPQjrt!& zht>MZ=O-5e!(Pm9al>O~>b#}uKIBCk5Cp!if5)qB2gj*{lRf8swzdOWm{w59_#iE| zX&eS(7+)8k4x^cgP^6h-iffnnq7grto71BnMOm`RYC}3hw@BfB7IX_952;Gp>wsCE zyZ}@pI;ULi%8*vtr(nK9Zfv(J;~7fsZvpI#oo&6XNKog~eKqyp?|r>jv;@RL^c=1r zOAyROIHV4@leeqlm#QWuYLhJWp?sO*6k}KA(5}jp!PcL;N;mc{`Y)9Ksa~rq5)Y(t zdih?+0ipaf`5k`}#9S-kw_i=yBE3y% zyM%Cw*cQ-J<`bD{|3a^eAOjk44-{-fLJc2je6Q!??ZD4(pu+6+v=|I0y0;wC9G;cd2?}^(o|WXy5mcW6FijS|B?!&jK)3WhkghT+ zAps`q^eyQu@eV^87#p9Hb0Nb3)+uHW>G^c+Pl1D5ok0WH@T`stUaha32cUlaSD-#9 zz+VG^c6F{kUOz%U5SoVxD!h8X+t{H0&ue+M2{qw(#00Pr*j3S!KS+jl_=w9QY z#2$~5#0x_EUx#kH>CL2Mckmlx4exBDrlZ@y8XM=NY5tn=u?Z&C;p=FK-^I-rTD?-w zcgjyIL@O_EFZVP>a7@w>$w`=jVj7Dg!1k7y%IzR6A?5fv&GCgq^l}<@6HmQ13UA7? zmoB&GnQ6^a$Kk(1nZP@y7Wjw_uqyoP#|UGzMEuzn;heQ*IJKy~?C^_d9BIM7OcSG% z(}YkfUXrb!qV=LJ6QBhz~UDrZzEMvJN4*+zUb`+$IaNIbhxg4VU1C@ zzbXq0Q^(z*Z2er`_Rjc73!iISYL8E5T@(8VNLltgi}fcwZm}40-FajI^k%`5CHi9E zt-bb)T%4aDE0%iT{>A;Urs)!9hH_q^z}*7;?R3Vbs^BvDt+_Vw@7_iAhS zkej>3Z4|B;jwd2@dX_uEaex)akKRR7C~&jos&rTVmFnQ?ps#zZ7zR0u!Z$5T?cxO4 zU3po2)*wMb*6|gMvCTL|5hkP`_QvP4B{@I!^4zu>Zaei?-mXfn2f-sg_fhk*9l{fJ zxMMSrKwg@NdEo?Ac^GEO} zy>?ZQ3+ETKqvINn|80HU2E6ky_fo#Ibg)_RKE>;)pc=~DL35%ZZwT6myK<$&YTs4N z?DFg;2VP4$W>1*t+L=P65?5Liqc#twpY34Jr+2NsIwGrs#Jdc~?LE`~Z0t5oT(he( zJedbs4HSvN)b~)u86*YskrGGZjgR=r0e6fep4dlRZo+=mSR>nO;lEHm0H8NxiQ5j_ zF0vJ1xNfd9veIwus?4pm0>t(@S@$nab;zj+PcbmvBaS@(`QQnlt&cW|N0}09zJYuc zFjfar6t2iBv~=n%aYn4P?sS0^G2c-{;zW6&dDXFL<_~}A-Dh3tUTT|8eITvq%9M?0 zuEaVcos;EXexBZoo=HPT4X_LAAa}U4k6edd->Qb5@-NU|=i;!LO4PhZn%A1?b*Bp3 zlsBriKA8+y{9{bq`Y96rs_3HL*uhDuS#u*XY?CLWxXVm`U0TW8^XPn3ggi!EJwk^p zQ(qDXnOfiP+sOs=Uo^a>)OO|9!}y|Kw5JRww?o~TLbkZ>mhq2&>FYmP)pK;sc1_pM zI@b8JRO1!$BmS4O(dp97K+95}(QDsOR*$bW*i6E*ZM;ffc;%B8xRLJ4hca>T6B!zd z&?2Y1JGfUStGj#k`pmOiFh)_f=uya4#c@*i0vS0V27i3OY0V~NRQvEKcyRKT!b=QM zYm)`*t))ywSG0%;qd@~ElE6_0NR0;-wml$ujdLMO<(|*Wjhrc$&ki@6m>S0zK03-$IDDM19`>^3nxpr5$1kg%xy%rY zAujgEX;6m$Lu8?$yM=u&L&suU&oy}ahRKO~`0#MWY!moe79;dxs6B4Bd~`u_ap{8l z!s)nns3PZVZiU>xO1J{d-PicGXop9TSw1SnJizeR ziHz#W+D109W3wq9ZN>e2WLS zKbaywt^Oi5zH0%Fw`9O$?u>VjhPQ1ZTsIN!zHQiW0PSxd8X6ilPw(Ib_vLoHq_htB z>~U#M>%2a#Q*{~f>kef9zilZ$`Wks!JJVMHJcQ)6wcMVF__uZ~VWPmRTjds-4n?1= z(Co4kun7~jYNsw%S>a74dtDISaVUfP_)ZNXxG=VH>azuLcKV*`sVULK%&!>#9(>Gcm-C{pv$#Q%5GcVAW+n~kg zI(%agT1rziHYM|?v7uM@I{4vRV%FiSTqFLZK@WQDb$2lOEsyav*$xqGo0y(E8QnGq zWEs{t4{hCP0(YG^w8%bGjE4IqAM z0KhVOLZx2`>)ZB{f9bNanKf#gd#SWF{E`0kW0^s`GK=rIzG^J`pz+nKGsCE)*i zJN#F2Pb|Im5#e$mwv#+9e>Q?Y(3Vpdm1r0b2^isYOit1CX{dKzH>=aauT4>fZ(KoD zS-?4K9`{i|@VI8LYAya6BdS#qpeHyBM6rXAZ^}1h4hm3ju%CGV>OaUD)FW3y_KrI? zyDB9J&?5YY^WHmANWaugDG@}n82@zpBgLsX7uVCNZ%xBT7+qnbzU)UWl4BUr=w(4M5MoG4>&&b^c2?ibqin_3?N$T8 zZU8h{&q+r-3ext-dy0sTA;3RjbNMt9?TaM+xvv7=lEk%K2jj%G!UYuWJx(iS^hrXP z>3Bira?rX-@aJClKNN@oL5EW-Wf>Mp^IH19wj{(=Z6};OA>Jtn*NzYIbL{X=PsyB1 zFS+Nj&Z1_4e(L)jXcyzxMk1Im|ROt^k02zuS3dz@D^Cj`AT2r3ObTS zhM~iK6%Urj7b8|kHk31>P^snsufkOnH{U(J6ED7j`@KOq8emu$yw0DRGjEOerCs{v zO>}^6Jt`VyRGS+;eu${*S^3A^_}P>+-O^is4?Q8a8TYD(glX>&!1a;v>In^V_yY-x zryMU<>%YSVa7D?(QGC8eZe%w~2_5I(8HPf1UFPF~HUIiNfY@(=ULTwEEfn?vJj!GD ze!zx5$^+<=rwx2^^Nt*;x(xUX<1#4Gm~Z#iF`#_Fvo==$Bi6{acgYUt+PLfw$zK=x zBlGTBo+ykJ*kW~AhlIrN7jw7oI3}2G#cnn0MF0$_rn0ksUKIr~Vueza#k{>NrgvV5 zu}y6o&OU2sPOy90>Q_`e@G%kZ+f} zj}iMYrhqGV&00a6?obc(UDCE<`qyN@PuxaMi&s$~2L1MQ&r z;arEKW5Ev@H7Vhp8wxt#KMt%H6+)b=Xt9EsO=(>dA@q}Za&TH>`%hwEB*JfuhefWEX=hPtzU4%}Ks&89tpP|lKn=+89hfYCN^DVBeJ3CE5UBkhCwz&s> zT9%=okotL6Yr30&Kvb-ckg+W>$K{MEZZwQte*g7bWQNAR-+>6WberVxL5%G;yat!j z!0=H$l(3|1`=#hc3$k$g;8@DSP=#HBbs-C?s^x7&Jsf~WqpnR9hM$ux2#RLh;1tOB z_yy6pQ(yVO^WqEa)kY=^?bG*DOBHmbVH^k3nJk<$4OdNmu+1@=pA!~+l<{y*fja|5 ztXC}$!%01|wu-z{lZ8!wY#aLrGYwDoX3&2Uq7^9Px$dguV@?ytdN;T5xM`~NDu7sU zIG})_S*1_gBxy<%9GQU}4)*((0N4LuI`&%oPnLfKejrvFAV--U#0l@)$e#l3>b&>P z2LW<#X8-tt}QE2LvP1URKNfAdr4WoO{R4?>s&aLI#2HHrC#F%M6Assa!ZrZbKy+c?C> zF|ZYYh%g!}pUW31x|HW`xxD>rTmMY;4?ZE9W6nK8qobbGe#S;UA_lLxM@J-_D2Kmj zXb462P1v5B+!qFHFF(9ht`i1HWTLB@(jCShk8M`9u$Z|}m| zVP%biqi_^r@~C)vE(lL}Q;k24^G^!Z@nah4M@-&K`<67<16fG$%1-=vwdF)5X_5Lm z*3SX`*=41Tw}xKg(KJ&xXTWn-T=F_#EB^ z_*}QfZ~vlQ3@PWn*%_IWc~8y08W)WRK~wYK7NoGGb(6xK;_^Yi$=rh$S(NyklDd=$ z>xF>KcWVX!N5;K;dqW&f4(bXoJ)b_Pd$Br9Aj&M^VRiM!)yjCp0m4r_?Z$gH^`zWp zR(W{eJ!Lt)3}#_J+6_~_UU=c#$S&ZYmvthPw|csxD&|5qOk<2?`d2+}J_tVLIhiia zW)Ny)sa!Y7vKe0kD|lQwcEyAewr9kHI<{DuTQxS6EMS6}T(Ozcn2z3)2GfjdhH{kM3${X{mR zc6Ro^#p8!w&9b)vu8LaZ0v@c=td;T-vVZd>KQ%D5JIV$Aq|+p101-T#D<}u} zyDb2L2{2PFs;X}#VXyATKcd0L4f5fCavA;F0S2RqO@F^E=$6U$6z``cISJOW0r^lu z!8OL3ahJnZV_#virMT{hAK%uQYbDm7(Ni*wUgYU5W=FC{boDvfY`j;I zcBlDV2f%$T4bOPXULjH zW})Qg9o0vDwy3p4pL_WHb+S-;OEmVW z@Iv!)i!gY~fr(91Z({&t-ax1Ah#PzS#~Dha*Su^M^yQ+nCPl5eg9{t#a8xqjJ-kv#lRqFMk;+ZqH(AEwk@|=16t0FQ1}Cj1XS^^JV#8{jhpd`x}Hf29ig#O7P;FHvbA8(OM%9 z3Ij$KIyo#mcI@DK#56l$v++1+3sZ}B_9hqe9_n8uKSkM}PVL^Pz5UMVE^M`}VKQpN zG~9;_vc=Z~gjJ>0ZL{HX*V$%ExCzDhNg1`E=lYlwo4)m&DJ~&Yv2Lds>|ht?M#&x1pmz8?mwl*Y_#j%J!!?=}nB( z(elK0${LAPLx>D$YO5x6GWcrin=(t>B1LuoALsnaODd)oqnyr=X14Bm%c2eRzA{Vv z@!!`jmH&UE@Rx`2muhnC7}3y%_}W-gwtuLCI*PiOY8e1dakC9JnlM@`-?kG6;0MIU zn$kCbIz)`-1N#aoxbxukyMd$K&OW5>Z#E##y=GivZ^T;gY;PN6dz|7L6Y^|q3jR}< z8*DJ(qU=OH)ins7Tw|l?rs(~`jar&bz#k}mwyd}9Q!j)z77i!N{o|!DP~EBdl*`RC z{QTi`GGEp!J22$1NCN`PtG)L-J70FAgS^*&V?9G0t+BZOnAhWH+w`F^KTBk&kw8=zEI z2eoBUz5;fNwBO-9qkbcb zvvJ6HChc0ErQ-f<*;E+(#-LTv$=|!1Xq?!vPZ^y)zIf+L+J_GupBc+(s3+r500DeO zCIXaOtVvZ|2i9AJ(*wFlJ2q-7GRd&Un&3YNJ63J5s2Mg$Daj%iWcF^FR~tb6PA8Wh z0wG2=O?dJIgFOUF0kFuF0NZvz(*;lz&MYZ@Tmsp*l+TmB-#iw{tJvW)_oEXJKRF(f z0g#Gq0vAYLaf5NU4~Q}0*(nPq)@u%w09@DVq^_EIhqVbRUKkQH(2IH>A$cO1;~IMy zcWi84$6>NI^tXeDF+9uC_hRt4i35x<&s)sOKN;f*LEpu4erl#pZ927U5>H5?ma?Lf zJIFJ&x#!I`$abc8&{X%i2IU#(k>uu|_F4WJmjBBv`?u=<+AHG+l-|*KIp}#xX_Kt< zrG-UXs&Ty-a(Yc8-*km$QSBP0F|)j}xwMoPPJ5`xO^aSfXNMMun8V!%?o|U|o?ak0 zBc$px0!CkQ+*CD|S2{HrhJ#~KMx+UwTL*7YN%Z~3W@Xql`QK4LUE zp*z)UI9zI=YU4EJa71ga`0<@vvvyO^S-GrKs3i>Ktl6)~i=IV=Y!)yRs86F}6H8B~ zUGp&QWEYi#7+8HjCABu{a3<}ZozdidAbK<=1pE8wPq{Z`Xhu_OTQ(>*2Bpbe!@Z0U z#wrjfDm{yXHwsJLg(3Z#l{vt`d?R1JFs;>z8PyYxse831ec8=+E z5QlNMktl-QFZ~3nkWpZR)c>6JLo7P=sxFs0Do;?5nPxloVf`22IHm z)W=BSe8qdw3#I=;(d(!dUIIo!@j(%$s@7qaCu@o~&4tvX4CUnc(J#4&B%HumN*Q7M zn5BvGTo=>$jid`2!QLpn61?t>9&#U9LkB^|XOeJB+{yR|ke5{|wO3+~I{Sj2!Tm0V z-MBv%j?_EFL8d<`Iy2bGsn!R_VWFI|ajc6oySFi2h>z5KX% zYl@3Ur5c)O#!=0-_djZ}k|A$HotkCY$;hoF0R^<#1o}=35zN^gpNN_RW4VhYqw`iYi$oH`hEZw&GL6g5v!O2lu24#%Tmxk$Z3=bYaA@t*L+Mq86c z5?#qhYo5cnv)hmnyY7=u8k*xf>=$RB85+J6tr8Qap3gc6+Sme|KG|7O2xd=WiagyD zY2sjVlJ7;ZQ5wRI@wV2dW&!kr{9@1GuFBx(Fj6KH7VWAymot^az2|pTw!-kcDvI)! zU6r@~pdCynawk`&1Q{!61<{zFarRd6^8u{D@MO;yC?h)24s0)*_m0PJ1~mDH2ae3# z;b%+f-K{Nh;q6RUH!~!@(V(cuUshqExw$65x+bEv88|u&L+Cf zm8s&AQDra4w+i$Ti3Nk_V`R~_U9CGU&27$PA`aK?u;5MCJ_{fQ?O*wpCMXgRnOlUZ z=+0Lrg5aE%pO9Sxpt(v+*6rV&S8Xr6U5ahfr!=ErB=UkzQBpdp zafVKjbRX`()9eW51B8L>*lP*^Eh2)F32ZmJ_QzyhLz_7a4{{;3!a6G!anKsdX;%kp zDAK|5t0hvq8Fc+C8-|%&mKrJ1$!jDtIdsCqRrOD3D1!O&@fVXH_F<)iNH2P_(xNYs zVo|A{R|T8Wi$|r12o9sSin>fP9DYeHkVeDBtV&TJdr#`fJs5D60)^lB!fA$3}0zXgJoIr=+=r0xdZ(Ep?zS5^Hkkm$VHS|ck{-VQf8hz-SD8%8XPC$5 zaA_Nuue&U9Q9cjZMV#&2xwm$12{0D<$xvs4El2q`?CX9g+lETctL5y3YJh41JR05P z_**N7;56Uj$1=ibY$?FFLucg9vo}OtSf>4}4c*|h-uc_byDF*UxtOk2yO|*e9|!Y} znJCZT)wSGYkc&}=hjLQrmR;z9uTyUp(QwCzoz--A7eJ+@>e50mrKy%kxltXfC>n*} zdLCi+wI;u3LoC-UIxp<3vv|*+fpJTd^SpI(+`E>PhqGdkcEOx`fWw3zQ=}DzanB$F zG=0S^keWbS9+v{$RT%*zm3j%E`8$Xogi-R@hgB(gw_Rk2#3a1VKmg;2JD)%X*u@LsfX5-n_%|~|91nfPe zp*(a&wCO!}Fv^^<5YRO$#W-4j*<$C4)E^qg=SB6`>4Tr@TX`xXZan0t=cV9Y_tT0w zxo_vmc45Yv-J}5XDTw*Y{_EFwGBe3(PdLJbDVqB5RQS+F%ypRo=2)%Zv{bpj`d;i- zLNO4EDCsEG(0DxJBk8cVh~XIzniX_9PdM6feY;Q49|rpkw(VdI+;=W$%*Mj_6Pg2t zX!BGkpha)r8x&WZ__Wv5%Q!b^8uMu;1$0Zw)O+_2*~|}=A>X_ zsg%7S*xqqefQ?Z0U!TaM+}UEEXAzRpU5t}S%hk%4Y}XBb>RsL%49_al?XZsSe6bOg z7`5C+C${Mc$%Phuko1*IXseom?VH>e)6H0~>2~YmbqGL^q2fo+H_G307uU81fRAMZ z*Spc?aRe#=#D3MnrnEoCWOt$r%$@~n_Pn{T`E%_|qojhprgyuE(4#dnljA!fGmHgGh%^0I%kQcm3L_gc2!nwd(#kW=^z7WWVaLiNmw|E)iZHE z(Bx#yM1fdV(lF)*_B!2U(Rhx9#U6jQfST2Cx)a$`dN+sO=)7KJ|NJyXD1mAMdhlBf z*V$FBHB(2bDk?|q{%@wt{{r!k!V?j?XUy$vSEVyVAQHB&n(qvY_ze+fGaG zUnhe+y45pF8re^1v@_{$`ejqN?(G{%nbOsZ!go1QBzcWoo%Mv6O|`$W3<(LD7f6#k zQydPz5_8?F-@V4T*anYV^!%jN9&Qal-0L!Gxt_w2z~KYGD|6vKmb{bJ49}lfJa<&E zE-Sjw1_AXyIsm9+!0RW z{>6{$KR(iyuYOYAO=uQ2cc&!4(yG~A<(4-A1S(Wrhbb`hwBW$|Xq?Va0=8)|Jx5U; z1du4wT_&2a_jofkc@2{1#%U{p64XUZhL$hYr$vn}?^`Jk4j=H+!GXED>5T;g0T%&Y zT)e2SQb;Ty7EqhNHq_2h+C!W4Lrpq^p_n=KvdlXki3YYWNBWkf>RuZoGq;_=*b?U_ zDCj2k>{ZeQS~?2nT9+G~OxIYw-nH}08=8A@eaamujyDE=4#>x&OCqOARmP}OG(zWY>E#JFIl5#6x zbXO%a61hc`>om7Q3N2$;3{ec^kaQG==Cmn}XSXJMYxZ)h*1-a)L>~6^5-mon^`QfX z>n*^B$PCb%AjjH>z-Q5lsi7cZV5dcK4CxKI^PU)yB9LlqKrZx@hi%KF4tNW#;Sdf5 zz?Y1hljg*Ci$KzZbt=6Gi$i(_1=Hg1lE^9F!C918U;yJJJ)dXj*h?t0QLHm1AJGNw#d88(Zg~Ps)o+-i=yl;e}fK zAhTsUCm!JV^qI-V00F{!(&cA6%%Q^%0>2$q)vlGlcsBuUn?QG~qk+TJ`W&2#PlO?P*j zwi(=MX2;#wXsuD>h$HrG1{23!ZJba^G-?!);H(IA?+nH`5a(ImHV#b%i6SB*&^SPx zw-`YMAqr{?f(nkHSE-FL!waNMG?V@U;6jJ@PDpnI(==kFuUE7alz{S)L@6k&*ny_ z7bR~OAZ0V$xvp_zlf{Yb-GWAo>l*Q=h( z6QwIRv;3K}ym}U;67FWdr3V<5a(-G;1$M(6w_RHLVe_PlQ!6C0=zaT+&PR_^Mwv6* z*sYm1&i3ki`S|M{&e%Uh37z%G-J+PK?kyzwe%t<&_wdJD_SGcgyrh1Lt~R8sqfB;I zwvuE`4he0%7{V>hZ}HB9y#o<70)K}CyigTZM1;G>Y2IKvu*9pxVD60))}}Bg70E3J2oax#x3D|qtWL>@&ULJ>e#qV zBMf6#3NFidT!Zr3C@|M-<#d!}7W2-Y1$b>Xo0!Vqw?MvK-`UTu+*AY4 zLNTpW_ZR3KU`&2K1aJ1^jgmA^fr#UvJcHiJx`@peu>dgZE2p!G>8;s!bR#2yjV^G_ zQDh-gQO>igLKqBiC&1Z{moh41kDOI_2ts@30Pk0M{mkB<_Wr)4gl@4#VisxRESx97UY zTmw+v7Gdh$zf z!Xo&O;r#i%UG(1N(OsU>G;Y|*5RV*o22ncHii^Tp$Q2hOjz#2qGGb|?Fqys4uDrjV zreE>+ZtT)^p2Ia+VA9L9^J$pImI@0I<-GQu7P^t8neN)1dmIKyT$dyX95@a>82wS% z>0#=yeO_o6c_7XglI3jaa=Kb+F<#+Y=5= zqf7AOjxNr!{E*G>}`kVN&1N50ur3WSDynW&B9_mdzaWEZy?Qrf0L%lhYcy+$E=zmKCJ`;n1T2>R^{(Jlwujf;fL4 z0!_KgvoDCecsYZ)CqEv0I%(C(fH2B3nK8$Gjm}W|AdE!s6hvwn#_FEM41=Nc_}Q+r z-PAnIQclky+4I(f`DiXRCbRbGNTRRx0qh2k(s-_%KZ*ST)LFgSbsT2jqB{~0MeFHD zEe}6UNvm>s@b|tGPU!;YevK*$C!gU9jGjUNl(Lt8*+G=Jo6|Bzx#6ie!x{9g>K&%) zy=)v#qTxzLEg~LS0*7^79-e3}^P8A^yXnCz;5m+V%8$Pre4K3&VUE66=j<2!%9)LXUGv? zdg$l2om>@jb#Z<(x`C}VR*jfu`Nmkn0c||sSX2n5>p*zbgBf#w#puy6DQSCAeyaJa z_8Py3*RuoMyVE#lS4Gt2v!<{|X*!=R?!bqTG(?_~q4QT-V&J?U)z~pUS-E%~LUkPa zk?4MdS4LV1w12(*;gDN@pv7=PdQIr~!==6w{nQmBT|uerml!?MbZgn|IXB zAAwu`e<+?g{Lyk9acVB;?*^pUZNHBIQwd3kNA$Q;j^V<&FAL1&gy&VS)0|hc*kuRi zp5e}xklZ#7_uO<%pO z@7*iiWy;RaQ3qZBxpGiS;3m~&?bxCO&xl_oEz;2Dj31sam9O#IsiE9wgcbnd)l7)i z<)_@oe#^Ri(02l!6Q`_Pz;0AaW>C!pgw5^BjqTmkT-lfX>Ge%%hP^4{LvUiVMkoNS z-J)}P$V&fRmZak1F?UOaCHmahUm6vrZ(Im2$@z_0Wi;Jn!r0lgXH+jF5gm0iD-73Y z9_m>%8y5T8bOfyJR&@3R4yCM*GB|PDDIX4jS~FxMmTw8aFARZ{<=lB&XDt;j9W?2A ztd)3NNo0MC{WJ2p=RIeSVGG7Psps6#Y6j_VE7s9CUEk8z97ZgS>IAyzJXac(W(rN|UyTF2$D2tLQ*Wa8ev*F+%z#;?Lmj%K zFIfHIP+usv?YJ9|e2ShQ*IF{YfH_l71g6YS%h+A2TL3UW7%Vk?T$>M66|Q|EfbN!S zJ{1s!}yBvP>@HQ`)o2rTtN$Su=x(P{FWouQWgLJ{!k;8F*|LK#4AP%plKM%Ck5AYAbu}vfNiFxyC5L+BksFlf_x)t zU*^Hu0veH$iWs7DOq0gH{!3!a@^wn@RFWt*mq+iRaThHPj>3)Z_T|FY7Kd2G zOYDKfp;LqvU3MAKZVkClRj1`v-v|enu z3k3e0)&adQ=0zb+xXZfz?@Pn~e04?5iT_%EVpoC!9q!I8LfVfZ{T~K zu0+3;x%^#_A%nVfy3`UQkMq{mZkhWDJUc7ysg2*SVf?G;epDy;(xgd!*aVf~pdtl- zIOI5d>J?zB^cj0EVnd$zWlU|;heNPDi|jb33kai~ZW(sx8-mz%?Vm0Us7cVwfUYi`Yc5GK4N?B;9|TJ?;uLe1dU*@<>lD1j!N5He)?BTpYM2groXe+X7 zOM5iFBddVhBR@Dj6wfoz0tcK1shhWc|px z<=jva5f;qzA4>xucI|Pew0~1123_huAmvbBj zI?Q2%P6Eo6&*)HZK9wEheE#7OuT5nH=r9rIfkcEy9Cqtf+J{40i*e5nhc?&lV_L@n z1+gPb^D5m+d$A3e)Ie6EFu7^}B}&=9-ts#8=KEuf^t3pGd^@44>RoeL(e_q;>wMVQ zD=Csc(+X`^Nt7l?PQCCSu$soeO?@2wlWEYvAWV1>jv7*WB9i-JXmkIm-CZs zchj?Av}1^{#ym=$z`0Hwgv50|Y>)bkDgnF(rS7KkB8_k4@>~k2BVbWV!Sd;>1H17v+JUwpUsgMwB$gM__UoBW zdk^IjmBHC(bABj6sCanME&Ug%?!Dpn{?<)kKWMTqq>JS#sOYBDGx?2C-)05vz$|W# zX>so1UEQO*JwhW>zv%ml_Nc*mdBNCGJK$01E|t78X*QF!HUp_rD&iP|aptZpUi&!* zB{&%QI`3Cz2nK{=%L$20m0lyPuE;BOU`kbZ4fTTRM5eC<2md~q#ytD^?DC>Ih}W4g zQ#TF7n4|nW)%PNn_u{r>WSNr_b83-!ES0Cp{-LKaWm{k}CU_diYNz~S+El_jPt|*G zbT^5-2ylv_uDjR0vi1C?!rE>EaVh&7$LZ0ng0~BDd#7E9Phx82-T8#nnLyk7$<~Sh z-rRmz53B;IyaZTRo&Ww;0d^v=c>DpFZ{Mk@h8ixEG`0Z2V6vgLeVvKvyUG|lhK;{X zR3*j4J{NE$0O#V)%CqtR`ahSMLmuKfBNOe#eKayR!G%*lJG4ww%c}Ohnj8@V1l^-N zfcH0GZ@Z>nb8bKVCPq$U*Z45@k7c*S->0T6zT>{3raxy{)i(ik+1ba(XM8e3?>=1; z$MOv)b(p3mB~D3Ky)m)JlI|8n!b7>)6FKRz=(M%i+bSiEGB=vqq#Kt&-9gu3MzPVE z#4PWtBb)w+G2CnSQLMQ#T|eNr^eRL16rPWu)4EF>l$d;J`??G&TS@F;btVY-$}i^V zUkrh)JDyG62_w5VVnlw__nrcV0pR9ZG8vXZvy}ElNEN^JdoRvO2y&!A`*E~wLa2w2 zMOW?Kg^8ucHre76+FX6xj>r930S2t%Y?RE01>*DW9n>e5s~74$SGbX)Z;S>9Uyr4p78pg47>btuRxf{}C!5RCBr0k0?iluetY>D!<)-wq&o zfDFy<37`)@VQ}Gs^Hq-r{c*h>#TL#>XU;|B+D}4HOv`Bk)ff_n3c)BCtPs92su`Crh#+aM{pn!*0mK|^(vGL zoK`Xp>R^t}kQ43IDJqZl$iRQD!<4A^hOdXY>sFu)gVsVR?T0oX1#>RrC`cdoLPcKG zLGYxmfzq^W!QUm)i0$69m*&Kh>j67rdR~l8EzF&#GqF6UfBce=-vF6ea#YI@U5e~V zY8i=s=AIJ1V|#)y66(OKEHDeJsLU-gcp+oyF)3+vQ#p#NIZZPqK8P^To|X!f3>DKv4w%6~H$) zch3#_mo>&gd1b%QGckJQa;+EW*qA(B4-5ecqr#jv1ZTq1aJr(3CzRNCm5VRvbnr?=F|*R$|;B+@u*|Am~ASWFxy13pnhw;Iib_uUv{JAP()59N&dW$Z{y zommc|a4!iWZZtayH)hBBm>a}I>iYo7%V@To*dw)(yy|E`w}>V}Iec4dqaQQI+deY9 zg7Jy?(-s)mWq;i~cp)v69hb{a%pwhr6`tUe83oR>jQFtU^$}A_y$#|l_8RKQwVK?l zQ-t}fHiEg+i)rz58LxG*&u2M#QIc`2@fXuU{MQN)a0JCapyn+U*4qQs##v)U_WW>r z9a~xRJtVI-EHzzkwN2ibU8%;5o0O}Yy5uJ%q{qzTgZrIrk|K;_{YN5Jt{y)s4`n)Z zBT^c{X;}0EAr@vg1U4djS^0svIg*ifxeVs%`(txAy~gO>a1WPU!HBep)9pv<_WN~S zh%EusAG_W>fR*NYu7DW{1NJ2y+wd${H#bGilsjVx2MAb(c+Vx{ia;;nxYJyZE!j1G zHo*$ghIp1ph}?D$lL#3}<#5QjwAPdDZuV+t47n3d=SXtn4axK@VN@u^6r;sKgXl0g z`op1nid`)haigj>V=0aP;z>d5aGu?m9{LAb&1}k3ZcIwFt7+?8%WG-Ae$yq5emS1q zS!K=P$D_N{%Z5er%3yqs%F=i`Eb8WH*I6@(kpEa2rUwDL4ei!1hriBP+6m8UZzRaC z%YebyI=$fgH=*yQLk!H8f=3Xe?9PGC7-l zZ>;>Oc;u?D5qfYj?;1S8aS0h!HR+dEFA8&vBBTx%7xBZxt!#zaXAzORLf+bOQ8(-a z%AGihhXA~f;f%Hnp|bx}&yD=J|Pq>F;VGTY!A zf|)Z-J_G1(zlSTW2)^PZ%r@`uvrBUk?p9y#>tnC>$H8X7xx|Z~&MoihXIG_djf^B3 zk98f!sV`LtsXnnkq-uXdeKC(}*|dmQG$;ULUnj(H?Ftg_-UbN#$&KhXYT~WVN;kaH zNO>^3)#~dP%UIeedx8h=Bx9{=1lBn)y@UX$fq5~_uE4$~)O9pqIEycKEgempxxczT zPCo9)_Y+8W%lTsaYVBw5o*GUm`9r?S`fI3?*G$E#CIn!YF6lS|;i=bKf8z``cWNyi z@UgVlQ}gq4xD|*)Lhlp+i4QGCQ&;fK;rP=Z4qeNzG%lpXm{?F}Zx5iOdSf;lB6v7{ z=%l+>gwkMOTsi4Jgqq`J;G>nI3I5Cphsr1F3wD|ETIbuqh;Vs3*E}RvJO=2j2w@u>X3poNA`URv!@Yj9F#vcko>v*icT${Z&KvfUo60u zWaG$ol>i1>b`QmZ{3^c}%bqem=V4W%%=Rw)J%W6sfnPlk)h2+&LQ;ke|u-yRL)X-Qk6_GOpPteHMBifx4>6^AU78K|+sJgvl z^k0{Ivicj3U?D|7;QFs^zb$3ChCuO?kFWb4>w!%+V#)@&^}l}f-l)N_F)N^_0FPPM zFu@iG3!B+CG&5vl^31Zn_|uz{1Rl*~s=#&X@y1&eiAt~0Vb6my$Sb;|ktu^vK~!f2 z0cBsk0v@brsh)ZE6R;fo?_MOF)J$oV-j6-&{?-G6xJX(J0 zR0+LFWKwVWj}|j+iN%9s!!(3p|HU-vJs!9=Y*bMK|nbj8lE$$2N#2PS?&fUDzX@ zZ~y|CW{w(<4#&dL1sMlfnmcHHvf@&IrgGN(+ernmQ~9Jc`1z7;!E^B@e}o7Bm)BUc z@<(l~utih--spY)*8A+g14)}P!5$x_>Hayr)o5}A|LUc7>2JrYiQfUg;7|J>4m}+^ zbIJA33gW{{-#gJFZYL*P%CGik`710Bdu`h9yL?F^HIoe$?EU9zjDX@lzG66BjE-Np z?>*j2g2Ck{R&#+zprSm#`KSD?udKjOS+M-r)B^a8GN=Ia1OP2-lGQD?EgOvg<-hxX z{^4PxMOSj(efX#2WvU1-&Mx*0H_8u`JecXb9M0L)&3E~6i&jAv0uNdC)8}|ATFNl> zt3Sjw2m^MKD|^RCuOF^b(DZpq$-v|C;o@!l7u?dk&`I#S+zJk|OCY~2Kz1{%;eh`e zzatQchy7K8rFOxVAA{w0-Z>q>H3##*4?p2JMMkC#bU^CsPQ|3?X?pbxhe$QwySy!pVu+4 zGz`J?Azd<8+^=^(?{bS@73sorqETx*KTLXC?=dj7om{e!ujAkW!<;6~ke^=u47aHu$c3waf8TG|f3zukl#T?jPkMzVOH{x#eDvS(%XC&S#l{kCNK$2@- zvOLD0hf_jh75f71ywu#<_PmvgPsd0`+ylhDVF@o%Lnx~+vTkC+tw+_ext@okj=$ZW zzKz$plpThDyk|VT8)XpVn!3(CMt?p?y5O2Oi#@kisE}I+k_74I^d90E4A=oTizNq)JgP~-Qxb>>@FuR1$bI{!N?3qTP^?U|@h{l^}EqAgGw zzf1`|dOoxW@qOL!h6C4yFX@xeHXGe#5?sAisyw%G#?W}nZ_?qPv zfkCY9(2QtkeBPCGC(XXe@_BSg3Cw2k4(zPMwvQl#@|#uVcyXe1^LcF7nq~iRP35X9 z7`8DoULercP^^=VpRD64=b{e?~s zMQ=QwDrj{xCB{@<*pM@tF8q*OzXfH`gysBncoQ$%TD~=K!pLU!n4ElOWuLda2g43?6VqZI?M>})LWg`cjfe}A0Ijff3 zMNo=xX$tUlH$gf9N=`~60lPs8EneQzDLXHqCnABE72qZrC-Yd^Z1VJVEjrWq3t%W0 zTv=ao@}%UXJZIYkW(~Mdeg5SJb+JTxb^!gjdo3i>db$SeL?TuOk?2;)l34>R_R}}d zLu7;vCJbqB-tmw`tiyvy9R z$2FTI`f_1{9P*s=Ak{3r~Gl}y8}6) zKGNl8EfL9>EAD|_^prcT6qfN-At{Bd!uC;m4L2qFm_S$#A4QIml0|65!4sxfl{;2IT4?-{3EcC4e`N!Q%&-25^l|7b zQr0cu3y|{bZb{qpIqmonpC(=;`-lAYG(De!{RI6>8ABFoC+xyp(T1zzbVVx@>@e@yG5}#Wi04xnxl1wW-99 zGtT0@N5#fU&tB?PKhutF9xv?a3pelBdX$IXI0CQWWDT2tu{awfb~RwbW%moBHydS% zC1rDulpeAj)E-E$-qz@IHq>uS83O(UpqJ0R2%~p(cs#uOs*vpFI?*r!Geqz#IrEez zHz4rzWWSD)y#cs>*@gueQ5end)_I@Nca*JbW7k|6=>_#psnlRu^VX#hX*dv!*~VTh z`;CkDbJ4l=OT|Z$*@RgjqxI@;03xRJfVn2PSu_KEj%wew4m&s)N9-NlXc)7ocSU)J zYG5#_W(>k#A^lE$^G3)T(XpzIsWlw_^a~8#f>^4T&Pd{z!sWTLeR-$sPDRJelBf(Fjk0og-r6VBw3Fne%im5h z9BSN}`ViqwoHMR2p4ZC&Wq2t8)dFbgJtqFrt6Ut$f4w*ggI?MlZHkGNRmrT%rOexZ z3^+w=ozm;ImEm@ga5FY7t-?R{ARfv9GnZX)c~MK_+@jL74BfiN4xjQyU;7u;_0$Bx z$J`s9hhp-*x=}~kfs^`<@5X|o!tc{+4^i4K*#{d3+asl%q_u*;=W6vS8?@iGCRKxG?WmuJFdyXbRUXKXg-bu}gp6ze* z6ci9FA-$<57G_R2Rt1QY?{LWNq@fd~B?=+TwJzQbY#SIL z;}LL-VR7r}ZZ*41@P2&gFhN@)86HgRv|psh8j@WYIW_iiP=n+<)qLdR3+{&797mWN z*uDpdRI`3#JQr@aiVfp{^z6zydF5u+j=y@;im-b%)XJUs@YOGpyFUTF^FJ; z_>&&XCJdd9TFwtyK4(WXpQ<%UUaqUT^whJh&;fB;f<>VUJl8q_ZD&ojFb-wz8NPL3 zPCu~QOWw+x!kJ(u=BX$9!~l>o(S3Bg8YQc-Dz{mg41JqVEr?ou*N4hsY5Y3vo6YGQ zy5ab-BiXO-U9x(CndrUbE{SbcReu8(Ymg5VVDt`V8-9Y=O?8-g^M+%Wnd;K z;{IIyVWWxFOPI2UB*jjkIdqE{h;NKtZpJ^yKAs%_)&HuhN)OqOg>!=DBXeVW&Lr?}=f%}H; zh5C6qi(~$1Lq6iH`AJOi3k{`|y~@%UyI1eAq5hx#t!oSxxfq{49SP<=|F7>7S64-iGgmI02eDoXIq5(1S7X2E zW%;Ex^^H8PLFXo~aKup{qYDE={-veno|m=qO-&npC(gxCwcx1rL)de{8xIVt=F zpz*>9l2;T3ZaA0&A5iu1Kd+D=^#~f2jI^{C->;o*14BF}?p5NGxJHIXC5TJxIi$9A z@9-pyTCKmIA0CY}-nA?o!)HFPo8^f&jt|S^hIV#zA{-G`fQWf#y|vSJsh85kvP>d8 z509W>aP_t(lkUi@uPRIorRZ@*=NZHwkJ_4hrj9#{%W z1<{?ZUG0H!C5E2fyiJD1340)#Wdfw1Zq{`!BYFgrsT4*MygkqY0)R^6M_?xbYh1t# z;&s^B>HnNP*t`lrx0|#jdq#Sw$^IJ`h4{d+sWGnac&$-# zYrIzgb#4s$nby1iI{EEW0AjP(u{*Ib=?=O!)Kf0Wo-BPEP6tXt&#%bSmMQJ?B{)ws z+w%Ha&U}Jn4Bc{IhqRV0L8OSB&UF3A)%{w784S7%K%-^?w! zm$^TjrHRSCSyqGf;VwTAEu@L|Zn6ETdmwOwnJh-m?3dPjCgF`nLsLhB{ltCu}r&Cnv))i`_Tg|=7LH$l^>CBoUHn2b{eRD?-1Xu+@7`e zHw8*6<0fUe-zC{lC1t|{1cMn>?oLT{3?|39Act(Md+S~Vyo&c@<3)?W^a~4t%Q1E& zLW_R&SZVXh=Rupvdqxw~_kji-;-|~I-kM+6n>_#Uf~2=z0?O1wHk(Mj-jAiA6ZeE)uW?sIM(=m)tG4``8f}MjvsU z(l0NdvF!A0ew`BI-sgzb<%|QmO6W*Ds@*{k&Ep%N4R7rwduq3a`ComJ zrRBTlE$o32&-(h;1=$*A05YTLl4D##RZ>K@xpzt~g$RC4@Mh}OjfrFbp&^PqBLz_f zfPDc#!9m4LHe5y9ep{4XuJzKj0D~^9uyBqZC`N4(MGtG@Wb%CS%CGo_RcY9JUt=0) zM|o5hKx$;LC5GqzQhE_`E~I=JW>OA&clh-gRcguM)GUen;aJ^>+CXz{l6Ja4k)6EP zJx=}fi0{o@95^8`FRD^526ruRARBcx+sdunxlr&3wy2S%i{|%aqNh5%LwuOXFb2^s{A3!{pJu2==zPSSbpkj){{j6ywGMFPb2nIi&SXOT7C0Gj?&UO#U!VH|u(7 zqlI72d{@0#a`}GG8NsLHPXtjQs-y zVg3lntVaS(#f2FVHstKT%)%1q;TGT9Jxl^ubPGGgE!qcaq%8KN%W>6AVCJ_`)o(@f zlZGk;DR?(C1#fR(Rn=EcyF4;R_H!uDrvgrVT`uma8@u_FJDHu-J~%wjRNg{}bH?0? zq2i9Ey!+?Jk}Hc6j$e`1cHa~y$_dUq{0lF@r8>p$>)Gi+L4fX*v3~1KzMv}jcglFi zBRXXpI2jMr%ClQ^V-}_#31PHTc-2SBJ~8G&l2NAks?x2f#0el72}>H^te;9x1611JqbDPTv=ZoJGcT;0#<%;BeF7;3{qkH#ta^ z4AiAublqPaVfL=o!%C_M10%zMGoGLpy*deY2ea?_QoRe z=(a!Ytiu0t147KTy5@3tB0@|$JX7O2iHr{f>Gc$ynM))YI*eA_sI%w6p4Nv2Z{aVR z6GmQXSJsXv(|0C&7L_BCT)fG*vU6>m?y{W~NgR*2J<{VZ@D4uo<>E$waug^b*Egui zdsI?JU)I_r$s0+#&nKz2%UO;;z9w7}Ei>X^pl?MioJyYB)f*HggPqJQh_9rjK75{A z_2``TE;!T$%C8r7CZzVYZykXZVpf>Sm8D4noSGb5b5{_W%FPlZGIuK}xr2F@Y=GAN z2JF?IJ^}Gj-4C(Y6FTq0w6j*!r#}AL%DweU z{A$0(#FXIM2GpG$$E@i8ekj9zRHqoPv;W1!!Okia?{6@pr1_{@S~QCZyEQR&cH_FG#6a}QA*i9 zc@3gTG+tODogJv}U~Fwzti4p+NveM@x--t3O0wCDi7`m&u~JxE2YnUmlcT@tQd}BE zX87H-u;RKubNALbSJK>nNP=`}{$bZ4>3|&XGw4alx2N`xH*D6#_DFi-Y^lw`*O@C@ zoGw~ir{E_7QgeP~slcj!Vp9|~?1O%Tv&V`rhWkQBoDg6sBmz1{^D|!RA*0HGLWXY^ z=?uKF6p}xv_RmCxizMc@(JzPU%={n~tz)Sl4*gaa7JHU`pRS(`J=XcELvoovj61HM z0$4d7z^%FFo}7u`Cvj-@GO}kBKDKsIsjzaM-2u30`0Uw9TidPSLfYC-|FJkaxgT8; z4!354@zq;9m!!3Nl^d&FvF|dWe-R~gEgN%}ddnJYwje~XuOa8-3q@sdYm_9LO=6tp zTNy3F_~~-jevb#JQ?TYp*iuUS@CG&ugHWE>U}35Qeez2wJRMS3o~7u_v;ySUJSt$c zMbIovSX;aQNDSDR*t?u8I)d_;!eC_W8l%UWHp(>kyA7Z4%575va>xvaihAkT$`pYQ zJ55E-l9-A#;Pb*?sAit*p{=2;pu?@==bA@LW;I<0rv}4w%y~RXas)xexZQ2uzk|T}?DIswI(Cr3J=cdvs25f!ya((08)q zld{71Z)?-Z-4!)RX1`1ed0>DuSnTnG<#lmaH(JafbiI&Zs;O5Uuu}VXv((FEu?hB#n0LkW!CW{yvP{e z1gB#aW*tD~W8tv3z_o~oxS1CiCBr57-OQDpBzRwC7iO=7{r(@l^#A+~sSO$GTKi-{ zSW%-qV4gKv2y?+HGK0PmuePEM(r|?v@-$y@I(3}k>Dl>`(k*nMwhnWd)8A8)GsZyZ z5v2>pIBy_(zh35cd7cx@o3UB!2rw`=6hv%4?ME$c2oo>M9d_mt;im)(jU<%cBS6mB z0no5c+6=Ww-A6D!%jxBFMCtYIs^Bai?QFLtMN(x$B7OOO4CFGcZ+yiZ8@Sbi`W%K% zPP;`34P|tW3^6`qgUocZzS{(GneG!T?=BI0W)DGsmA8=C&e@eOg6c*Ejk*1=VUf`{{& zhV#-5q<)~&ATaeVxD^=+LeXV})DDfY*!haDP9Yu_QNO$L4j%hsDi7?Ss)o1_b`~xPEUSBrcE-z4s zkFvYhta75QjeOmBVIis8ap#tz1SfP?9w-f8Z0qRRh6xMxD{wL2FyR2^0ur>o*OB2- zsr>vN-ek0iGNzD%I9NYO#-8gc6b5#CxK`3HfFR$jtHm^qcH4jWdmrg#&i^JjO zM%`2iNcR^f5pq_%cu zLbV-RjqXryor(h^L)HdGmC*b^YC1<1MF3*kWeB<>MLj3t^CLJy-zui&AMUYFO?T+h z8|i<`ZrexfG-}SFGnY)ggwu;?-8=7{sDEWm>Gf!+GTl%%Kh6bm!ml4|SX@Ry%JWb2N72l0L{S8fe4q+><2 zds)P1SvS?rR8955;=x@hMqZfGv7ylm+{41~nc#$!%KGxAFJkhp%EAYtjWo^to^wlr z2(Dn?DnDHlV$W)+qEvfFX(hKRHkcbjeSc8fGLtoGB8c@i zJj-d7`WG=GtbX6@r1XFa(81TO>+auA@RS?-p`$~y2oEL!l{~{~lNz;;pANe|Uf94h z&n|7Q0SDuB3c3bC;}w4AcB?E-5LSM1SJh37?^q&mql)%rK$byv_Gfi)!p)!xn64%Q zn&9!>fAyHQ91+20z>E#$!t8uh=UOl;MkQW`|Y{^`pTKcV=VJ7{=<5+bfeRcEJdr}eYs(>kkK}nOiPbU1AR3n6}Dhs(n97&relUmALXMW zSYC&N2>n#V*&|<08RK$tBB%1KAGO>sa5LPdCfr9?2!S9~+^^#sqMN;O@+t^`zybg@qS)E6;zk!w2rgo3N+ALn{9*q8b_H zv1aI9nG|Gc?vuoe1P+0@f0X!}0Knd@+`={s2j=;nz1J#DpResGtq|kjk-k;f8-8al zZB@`#;8yYVn|aA%oKyd0!uJDoN1>`f8nqNoYzb`pxuY=6t&)3gv(a^LpOwfH~Da=>xT)%yI+AD{y#R$QywnEpfYgtflujpV>w(+$fZtnNjmnOwhCJ*yd7==;;?(4^|7%y|4xYLBnSNbAfNsr7nAYYOB@ zj~6bw_dc$33By6hFVZ}V*;MzN*EtNU1N|P$A!{WNd*zE1QH0g)la4R1YT6Uq18t*8 zOzQ=~i1+m>3*G*A$0b{u%$m^$l+LysBaH%WfmI-R*=6`9-k>hV+p~jtB@-*%DbtAM zIz{1&({*iCM{^hN+)jdPuVt{yx5m@oXh+va(U&uF&QaV`(Qw&IB%IG<(bdgsQEKSQ zaueOXYuievO=9hnLTQV_yTreJF!Du<*oL$ zFiXnn(H18S+7cw~|QgvzEI0O#?ndg>ZF4x6*Yg^PHu_!^Numn0iE0?RI-EGI5b4l6-7)j+0}FO4Zq{9n?JC1HO3?T`RB%%{n^N(zno|7J zf413wy?<3RhOg84Ec+sjyzs*!2&JLDY%I$Ix30`RZbl%kklT)wHi2#@*Hqa^T)~$I zbGR&ABE*30+ITYv8m#6OlPAO_tRO~ME7&(}qBpH^6VF&{$@2B|txFSR;PUXzv&1Ir z+ti1#x$k)4Gw%TcbB6%20p1&Ax2?Y%@heBq*uWN>BT9_oH`D__*}&dA)g+MHjBsgQ z{&HjZg(^`hpzse&>)QS)CsU|DLoW9(GD^HUY51>7H<}&qhH@ChsYR45WIg~W)^1Sy zDWl$Tnoe_=a|ytjK`$>l=~8z9D=zCvRo$6gakNh5!W>$|IwqSC22qG)HEnK=D2MY}}4}L-be23>xC?1ols_J3~v_|}Yvqk!&p+Abpu{F}+)Tx6O(z+=X zw9~F`=}bFM%`yi?X9PT118vna8x1 zp$H*{kOT;UDno!kC?E-tNeBTUBq2;;_T8T8p5Eu)=l$L1{k{92y~6V>_S)aI*V=2Z z?`JTaZ3U$rgi0MuQ!p@Gb7^e41q*mV3l1&$~VVMiBh3HZD^|OCG21nHw_-h%7$oC|S zyvd-9;GQk>cf~w}AFItqxZEhco0jhK03FdX6~$I>OQMqEF1m$GUPl=P`drGc3B1n0 zAM1{1Fj=xzy%R?~M=UxGtEl{nx~-ZONuBPU%~3QvlqK$?2EQ}}V5 zfoyp=`WmdQf74z+fi;o4^)2X=~6dz?gFj`(8ASGnCbIhc#L*A;Q15lIE=M8Nyk z3wvgDq@}*w$Rj(N4PYAiyHed)vk?(W?aiS&Dyev)tAOa@9Mlvn-wiwSR+R#~PkJ=N zf*sI7cDQ@3oV#m~^Pr18F+nZL*bvT!3*9jgmZ6ryS5g*N5@4x4=z4hC*1vv`6B5%S7h+I+zeYFYTc&cdDK(I3?2 zdM|e=TK{*sq^_!$?$MDXGXG|WS`u= zkL}VtKwz=(UH{f(|72?2-(6B=lYPB?B*fe7Z1&mrfbnDOGOl)po=tiFJ+<>~YK&tD zhj}cXY`jjkvI59&0aZWlO;6NBt4bbSg;_bDJLeP)o%)DaT#3&^W^`Ik3s#!`+E0;$ z-kH6Apxd<^(syd#nz?ytgQgHAMH-~*Kkgfj(gE1S@YqJt=HSH8kn2H9xiDw%QEJW7 z@n@?7X#ij}PA$UYf>Bd2@iwE*wU>CGml*3ybobM|@p`;+O6mXWi+#*S02o>>eW$6c^|BkgV#;``Ys}uLI^vOS$LGMKz_j zw`#H*-W=++v0I_rF{7wmr0IQo-;)D?Du!_^=<&|cMcgmE6Z9lM{lL|vx?(exO)09B zaga8yKG(Piyq&q4x)Grn^PFf4lnYv=k6PHO=u4^W@S=2klON%4SxYT;q{GDJwV2UY zoAcqH{$or7C&}L7XZEyPYhKoq0)P)+VNkguqnfF*Ry(ap{S%0FbgVtPG5Q;5I9m(u zRX~Wa+u>-L4t`mA{(U#=rR`3QhjK5YxaSGHSVy)_;-k3v?jllJiZN1qnmmS4z7?h{ zDanjaLJxEG+nJ2XxlU7|$+h~_q22~SwGy{e_agOj_*UdL^7phy8@Yb}vD{FGu$H9z zEyac`q@n+G`2}~={1o}3(=lg44}c%?(zLiZ7i$@YYvuYEhzabr_xp;J%}TUc`kpDg9h)^qVg@Kv4+0xL`7R93-YjYGbMla|xx;>DSnTtla#(z?asUt7`R zVUC`vqRMU>alu5_;C!k}Z#CMwbZi6HOU)mnHX3ex#QPEm(vkQ#hkoP&#CJm{g{0EE zRuCDP=<=vIp}jbF2`*~$#K`-4>Cgl7Rh>z^kK|Xm!JxA1JFmOz8@;0e#Of&H9igay zp+*Pky|2wfnJG@f_>6&*LP-a6I;XK`pY;jEGYx+Pt5o0F@7)=?dN#C?F`Qp1=T{_| zcqsRD8-gF+8rpQ^}r$w6UqaM~|=D+t&Z5@W~|Mdhot_`&*3&9z6o zovG(x;}}W>I>eo&kNbXnj)#EPNx}!LdBSl5!K2kc8Fx2NcpvrPnLIuTy9v0^vF2Gc zPMW!TlSkZoAj$rm(>Yj*vRS&6?`HJ=GGgGB?v)oQT9HXO3vtEa@8V1`!*!oYI>_*h zn$eh*pFii-@H_T+)W$xPA}FW9W-(CEl^RDwShjuM$LKozTLNa3_39NG1;e9<5y-jT zjN_*H=Vjs=V0U^^-702<000<)mxUI(b4p2vC}`U;88NxmZwVMZ7%OnSy4Y&Wp}u(a z+O}Cd;MnnFq83xA>G%GuaIZ3&*?;gAp~jwi(0l$E$~+PG(UgA?_!w7IZRR6n!H@6eBe}yCm{FM^3!RSj%7keLSk{N!4+F`X z1Fbc+>cx!xcs?R}4+*_G%iGB0PlE(q13u{=gQJqmhrHd!B5rxL`I$)0Nb}@v8P;=X zb}*o})NKIkx3@K%UO?LJGOE7b-*!As6+zRDz+vUs^hUX$&7QSy9T2M1r{sgWwX}L( zbyLKbe2j6L7yR^`?&1J4!b1W~4{(xmsq7QS(MRCyCDlM{hZGhV&1o6Q26EU|>#SGy z#E1Vm>OXrT&XOGM_s-Spr*%jLh4SE+K^f3YK#RcVI9tHlKZ(Iw5{ouIn;qKc77MW@-|cz!P9$?U6^{VXOmf;o`yeXIxl%joU(n0IDG zC6#9vjBdoSH(ocncy{UW#!4Lbk0i!MtactAR4xZFs55EfH={RpmQ>U2qMUSBZ+&yU zq}8}4U*xj*f^guB(B@o}6L6R_yDJG3G{F%~{+BB^a9LRAI)K4+hWIm=0 zKzt2H;J{X_Anlet!tfy0W)Vn%io2-AxTq%^hLRBW{1$--G*RwKd|^3v8FSQ7Car zJZM=hUHk?RYFHb)po0FYgTI-Q7J|^CNl)^&pDc^g%g857xHX-!jN!)-eWjLgg@(pQ zqap1m4`rGrNHERw6z(JrRBvQ|Qo?aoD$Q#!vnBz;+-b65=wOrnh&n_ zj6Lc!?B2WZG*t^=SPmE$mX4YD=Kw~vcOL?X>a*LF2Df2_#U~Fr(W!uC;m}C)*~7x{ zgR9shTLUOqp93#}v|87k$VJS$yT43HE^6OBVZ(j7Kiq|YPW6jX%JU{zQNu6fuEwYf zK3~=;Qa!rdLDw*IF5xyCpV{2l=T8exqi4Y>4;Mk&OCgdC zd`qh_^>_}UAK5U@`XOSbXWzaqaB*FnjrVD;^2WM^p2f)0!2ujcxvBWX2mKW@5q=ZX z6&Ruw{XyX1GfK-cA{f^)-EH%6p-E5<2=uslbF6C6r@6XOYkcHK#g$!~0zOJjcjV6M zL@rGrjQQSFQOo^stSWSJfhuM)gD;N;sR$Se%R@XFs@Dwz4Shve--!<~Ox%h2a~9 zWt~D9CMTIaH5t1lx9L1Zos4_M)JE4oq7XN=$>@IM_QmnGxdh2bD_$p(K}1fp47F;s z-}w!AEK);%>KI%l_IB*00qu^3>xAVUI z`r`ttJMm{P98BLL?sCV7p8MCH$2u!s#PJKJJ3}*zk7J!9Ji)fDjea0dl?Lv7w6k3G zP>y)@%3NzkRi4vvRVpDY41lTCQup-40RQwJ;-DWRIZ76KW=*F)S(Omi?>3Rm?p>1F zr5z?;3}~#on3LO`6+-u-O9JlJ#ONe%<%wW z6k0}?0aia{ye1cg3+}2GuDYMXquQlre+gagicw1M)R-J%Up%<1J zfzOAYIDGi9+R&=yT0;D)5mH}ELrag}6x0^~)%~;DfbN2EU}x*4X#RXH5AGhxk9<22 zJ%P~)m?!viqQ|Y&)q(lY;mR6%T-V6Z^^v&IV`vCa>fl62r=4?dZV>f%&HX&SZ-h>n zz^Y3xSE{B;3J18c=!CeS_I3Ox!}k>t!UJ}<+NS1s%cdR8)R#ohLY;64^b+s_N=K`g z8^OO5e+*faPtVA~e(}*WXzX+#{XzGi)%BF8`ay`V|-UGFC;Ek(;)8!DJTQC0{@< zyl9Nv)D!6IGFfzYE~Vm1);HZ6IdR9G5k7^xo(MxTaC_C#i)I2060!{T+|?O3)9yGC zt^~Gaa1U{|-NXgU;fNcZX`HpF5e(SeRDq0czk3~oY_+eN6|hxh{761jT33c04#yN! z#{~MlZoG;)0V1P3-cG3hI1H^`LHo12^bS1QKen~e=$_pdl;Z)3;fsAW+X1xZj7T0O z>qT6RcW90MwCs%T$;q*=3ZriLTC5k;U$~ZDA?)oH>6`Oka`F%^4{K5`TGf)h+lSb* znEj+8zKeI7hgAJZ)Q;;g&>C-Ojx)Og^E`d-M%_ExopGpNU)f39BQ+iD;v2V2pFfC*7-MiI$W#sSFXK3 z0MXK7Dlg;7eq%vS=Jy|>T#V0iph`VA&W@|ysj!z3s+mF*ZarYqafXh=b zEN2|sDVd@;{*Y`>xa>G|+BBWGMi0_<8Mjg(M~V;Jw$NEAy+a33(D+E*vND9EFOLsK z_u8EQ324Cw*#`lRP7T2~XNt;BW=ughwUDu^20%*Mq_AF`W}v0^tZg^qy=g(H>gGW^ zr6&D%G9ECWWFqF|48BxZPnz;^o(?seg^x>_!Qc}NT~HYDTk+OV+!nd~^1&&>IdI|y zcfxbczXF#EKq(;yVY(l|03i#C}j>6^E%l~ch|4sCo$fK9+EH=AKMqYZg#fYS`>=z zphF6(%|aeL4Uh2sy6Bqp(nh6?cWr0h&b_4ti@lu5Gz5G;a45~AJ;i40T)GJe9fLa) z!!fa06S+R}86U-|PTXAyCbaB|Y+GwDjLdSG=NANSF4Ht_ZQk}x^$E{(N3bFI9KA@c z_HQcxdN3U+D@s!i+k>b`oi$UYQi^6D2Zg(C&{U-$I47^ zj&_aiPcj;UoJO7kCR4?2ug645b>;f{xk3jIOa+f%#CseU-U5TjWV&DB>COrBZT6kx z)EJd%_WD-Q*py7)CC36LCzg$hPoE9+y2Y~hc9?@;LxO#y`RTv)m)g3LHQ=?R zw54n<++82g-`wolt>u|zTo9>TGV5_aAn#w_7A zFpHbg%iI{onA1e1(0s2*_v*(Ic5us7qH7%hUX5G38~*=nyK z(7>jbMzZKcs4IJ>z$z4+bv5 zil-pPRgSu|ZK^g3;NmE)+wy4lF5B_&2&1EQjj!tFv=)fKd}xh8w1VHAl5CCto-#^*5Yj0nYzEt>U*LLa z#R@aRH+`*he5DCEeDr>Q2nQyUCTd-DvuU2TqCs1ha!k~P&Jon zLX^LB(k;uHy&ZcOAGO+t2|gs%W`t&;)-%QO@-U? zAk`3vRLbgDL^Wp`cIkGB>D%p|e;)Qn)g@^e-^X`auB=y&C5n}vG)%Iku`*}is z7Zec;GeEkh8EwStVT?R}mbL<>I2Ci=OyFXVyw6Yrl(aZB>i}XC5g{i%@ z;QwIfuXnefDsTCF5Z=D{yocb$A4t`HULj`4&A(Yl+LI?S5dl5@m044|s9u{5?ur?> z9AUja@NyM_`mVIn!EcPz)wg^w?e=v*`s>AYB9f&#VZ1bA{Kz~b>*3UL?I^&@wRYlF z6yl?laG6J(qG?H}@exk16J@$dKTds9?^My^menStE&qJt42>^_o=j_rJ;kgO2Izro_l8jJtBqN66r8v}!*AWmKzv?|a4joXzLz)?$^-_=~tPBYCOe z-Q8ppyV}I^nexE+>nqFYf%XWs7^X@ekM6ymr^OB05^s(lXp(k#Bjp0jxeH|qvo6EX z;|}*+_Z<72z0a;@;A5@*?I-P%R#3VL90%iN<=H|6_nauc@;caLcv>n8r8!QXa>(o! zDVX%q9IQt5IqzEV6V*h>Yr$Unm(8zaLd{ZB$kpj`g3$YXODSOCwQxKpCiBtUK$(%J zc~F3{g;k3qjtiO+$EDa^5T$pN_4F*pCO0&~HVLV{^AWWag0s@5v1jqC5gV+Z)5O}i zmF=-Mw2O%+ht}NLdDJbj*K#ZU)~gqS>5$%B?BhO!FSA4vCaCLV`|euBLn%|C_V2$+NrM@|oC>iOXlB&1kJ;PhijgGzKjUPAFPWfegLf5hG zrQU0~)HB7ohZ|uve#ANEk@GY+pnM2z^qDYX>Qb)ecG#yt_y6l~6!m$Z+coMDs-&Tx z?i9nELv5rXE;;Ql9w35#P*XrYA35=3T(E$iSiNfZ=8(x~&7vO8_#U_AGUz&R)VS?0C0hmprH}ZJ`L@ty( zOK?i;pf_q|xwMbY+=hrCoM(w7->rq#z1W2u`D(H8VmlAp!mNNPL=H{$1YGg}}! zUg!Fd5z*w-k||wT3onae5(8?ame0ISluRe=T^E&3dC}p0pLd|8R$~+RDW6em+W5x` zLp|A_4;@usRtqCa*++-l4yN;p8t%pExenRH6}}cY> zI_#y(X(rqbYm4kzHkq50-HGVV?IJL9^W6Mad`TaO_;O3R>a=aZLeP{s0FBvf85%k` zU96XY2fF=eSfXuA0`|vc`C+~;yq&>pQv}8j>tCKlXX!QXdUEwgNYA==v-DnfTZQI1 zg58a@7YL0NDeQ?sH;n?=sLB=*rz_&|gxm$?b*$EpyvXNe`P=@hzA(7aDqLIY?lN8T zi)ldS!(!W9k-U{~pywVDQ>!@QCd`*pp#EHu-T?0XobXgP6$Z8Kv}M)&nt-xi7CKLN zKrnRE4M|He4o{MGSt0ds)jy zt*54T(-b;NX9hlppOn))@hsU=lt^_eNm03Z@UG9E zCknEJuvjI9QhsPzgTWoP&8_&uMvU4G9tyl#4mtXLI_r zZo+80ZY7^!SetP0$SJEtW`O%Hp?nQC`LY_cx?k=;d8cva0WQbA?POPEy7`+!rkMQK zMkg%#dXRKx+>IRW*~fXO&1GBu#)KzGTBdZ4P`hp9I-Hgtm|~zv+j%gA>Qn?X=aVZE znw(Bn!t|Rv7WL4JzSFIG{*Jz!LFV>}wex_rI%;%D_4{OTQBqKbYo6bUf;rn_d`ahU$b3RQrKU!!2*R;>_G47*=`%3Nx4H?MJIR_T@6u znuKsb@3?Ls#tkTgtU{}$0TifLy1t{TVeh9!_PauXZGYn`wqw~-52SF#pQuCHew?5R zn0&1&@s*33hF;&xH0`3c2%RS+?M&e6G9YO4=^w|XI^CL>?Atk zRG!MaQOf+GY~#-$z(mCz<(G(PbDM_7Xl*59v;BAcyiw&%t~hSUS93y-OK1zqIeT=hn+&Q*x!ps8;7)DsW-!yK^HigIYW zw0XjS@zL@Pe$<;o9&K*04Bc8fpu8`1fY?+mB@)2Ud75iYRN{tf%}4)pSNw0qu@1nG zM5`*f`C8#*=&NGfg!a(o3406qs+@M#uZM(xK6H=xqo@k} zaoVRvD&iJ4ifE8_pEKz1mn!-W5hsg77?5h9?{|}SwK+B;Usd#!SLLxQ{ zkOumiTA&W#?tbnv#MbUGPNj7Ekbm0*c~f)+vgR;> zbAH}a=|c(Ez;moJU6lrDu(o*c<`6M6h@o3Cl+6w%byqsWp-9W5fFf9c&jia?dc#Km z>{!RIH;e{Tpe>sx2S!rv4s`Li6OiWP?kR6q2qs8`GsD71U^qc(|79c*_2y6l3hSDC z(}^3^M0t)2496u=IMHi*tjKZ?w4uQ-4v?t_|LCT}%Sxe3fASA&w+6PVb`$#S&2Tns zmTP~qeB9B!V|+QTM!_7JfKzBn^2RpRXy=8j{KpE|&R9#FE$=wam^{E-^^Zf?oUXSX z$TBs_*Oh{-8Ri-gUkvHnl47n}8ZVYILSv8?s-+O$Xlf&qYIwN>U#p4VlI#^ZS`nIC zoc$K4dfn92otr(JFjSY`bCTB>K=dypeb@y5rI&_#Fr{6Q4oPGb;yi#w%9r${uct%> zz?Hb^Kbnaal=E`%HJO)H`52d`TQsin7#G)yJ{#h}Qlyfaj@^_}LWF!hlboL9*W@V6 zsb{M0dWoT92F?*{EjWx+`?~)MC&Il42Ck^es@>4PvyqNTRGpE9lrID&klNRZ!dx7$ z47PT@5OzaK5uF7L9EvCF=Ps`xcBTir2WU`pwY{j@`*f7S=RUQ&cnUqMd3$J|CV)8Y zswKeF%{=TItQpWLi)D(bZcM5tkk1b+0zjmZz*rrKwZ~N2t67;^EjM4MFpT{1PedAe z8UbT~8|hX0`QPn-`>*5ZaK9cjWmJs&4g*PV*}FeaZ7JEd7$S9V`Tk-Az8-go}j zpsM`)aivk=f~m+a{A4Z$7dD14hl^{d0S^g)v`I4IOsrZy=iwMRqVL2*r(w)jD&#JDJvgR74bJd9hk4LvJH<5_s zfYL`?|Fyugc-ny9kRm_yYw4O5YmhSCl~vbxg4u~nv{_JGHgP6SzeQU$=J)OS^Q=A zqF65(pcQ)bt_~YoaP(BndcGnhlFAF<-N^$(-!;T?L-LCRMxSlnUA!@7XwpP2NpbW2 zTe9h&@q34N;2b~H`L2ke%f53y$)0cC>M&ju=GGhLKFShTjJvlo3(NR5@G+Tr*f>wV z@0^Z&-McH!W)1@@&F=Z}SMn`}PzN?s8+6*(VUg=;%5{NuBTQT7ZOYrRAXByplMCTQ z?0TYwp+ye=yy&57ElFlVRER=(WMVoh{bEx~GxoI4Vnu2vPC{-+w2h0C3PwcuX`4b|35NE_o*H3Sk!;Ec%pX>$P3rin91h97iutIOD@Jo<8_`IGz7iG(ER_-G`C;99V zfz%ds75OaYnY78a@O^h}S?c&-@^4&umXthRT?2!bYJdCW^DmMbWfy}85m9=7P5XYe zLUu>xO~@|}>lrE9+8Rn)l{Yi9F^bR9hwGis#ra^B)kA?iG`A70zz5mzjJtHE4uW=n z<(Rg$z;ekm+fPTfTyaqV>F2n?XKtm=G(M(56ttE&wLda>hDb&q{!vgfOf#8x%w}*n zd#0RF_=9Nqt%2^te&4;1aSC!up6>0XxIxH>k7^9uA+p>FSto1e*{14tY!W~H{qHZ= zp82H7;d&<^cDw6G%TdIX3$YUfE33~3`d8C3%;~^)YW{FOX!XFBQ9Hiw68au462NVa zojc26??3<7%A|j50DyGm3Mt zT5&e1m`30VEd9K8(qlk@RBjmT&+~ zlL&VJsqed`2*?sMV8Ap>GV9Kg*zTF1%_2aeMTyJd_s4HuH%V3nzK3bVN+KQDgz*e7qU9Mi-UR#hcJD$f^9!@gZl|O$*OpY z{l}_ZOtI1^KIIf@X-%m6XT2beFOzhE+oOG}GWg*Ik}X4Y*X{eUz+oz>Q~j=1c#9-9@>xfty0uU z{btH@tBG9}E$Ab`hnVHMM%`;)Ia>yTLmkAb>o+RHix?S`{9WpH-KWcl-{*t!@oVf` zm8Qi85 zM-T2~{lL39kfo;Wr|00ubQ>e7YYz<#ZB38c0Aso!OsGIrP?Dj0@YusSWP4lsV;25pC$BkZNXj zvr#+^RIE*VAoX`&919`k7~I%3SWqmuKdL*qX3Q_>4K+KXnug-k$i)%VPS?fSGD$_1 z^IfTfw2_UxL9=RKSK-RNXw^~Gz*4UKGrxTxtJIX4ajkan?@f89?9HJ$qe-4x`f9zX zeXo35E3q=Bda0>R%vn8ufvXjIkJAAyfL{!t1+Z;M3#^oo!DVB8H@9NUM$LKSudwL` zdunQ0lI{m1xh6WfS8Uc2iz0Y1MDtAb&@S}VOm6_U+tcaXNB20MGF8=2NaGm1;RmzK zU`*br20%C)h4!AA_`o=J;QMBx$vhS zUjbqsT?TvKcZ7NH zM#pqe$qw6|1sBo-r(cNosMO^p)aBx4zR5+8DLxJP@rK!RTxC}=A6vV5iw^KpS74$v zy<>slc?25zf;6o)+JMj;#+iDSJamBi#FxWzZF;?q>LlSf4@B9%hyjfc!XX%QT5#`4RR8d zY`W~Ibu0>26iY(95b!2TwGnMxd35)T?F8gAjw6@4?f@_^L2+BBNeZf*xR!?9kH5M- z?<@?8OUc}Xc##~eEK-mDu6Vx(}u<4M@4+fjt zQt$MUwE1+0L7B9$V8PhShmbaxu%|HL?+F#H+k7rR!BN)dvE2;m+qMjZ!_ZF}*B_3V zXkP^mBRMmQB2G%C>Rh<)Tze+5#zap&urE))UZ%vKUYckB{Eunlzw6=uUs>)060m8< zq_W|KS_fnD;QD3={()2xI9R76_FeGAJ1zulwa{E|C%dTiRM2$x+JpNk$GBndg@ySn zBceo1vNWmOXx!Y@@y($LdAyxok9bwS*2C;&fg_N#M(L*Nv~1I&FVD)qw4(HK*=&uX z2C6SNTYRg zafN%t39#_1tX9X__CV93vSlgE5z=fL zPft5F27*O@mi{6V*PDsczIj7x0Sh^~?Mbct&(8Uezb3!4>ONP@ghqB3uZzwWale<7 zNli)K9`Q~U4Fb$$vSz)Gtx@pY%zF9N!`KC?8pbDo&uiD zHa;*uXvn*@mYA4zA~u0y|Col_O5*BSznJ8a6Q=wDA>Qy|_0-YV#i={G0xMo6?~y5m zHBaTDat-ewl)lp`%8@x?JF~YN$qp~~3y!%RvUalt&CF1MY;5;}TLyv+%uKK#6AxI; z6P^Z_mAq>+c2}zTP^IJD{Omsi_qWnVqpQR;sif(4;^xZ8iis{w}b5WFCrsg1U8$Mcw_gXTW=YEm}CNi9NgL^a6r0nRze_X)KFaehMQ=6>%HYVs7rntMH0{H%Y7{gBB6XRC3=C-pr!}Km{nu*@v7-@`xiwXyzrirDc zRrjKsD*h0;Gt8zg`Mu}qJTr@Z)b~qmq8*KtJuP!ZQam+uZ3qlt-F}UASmiW-^UqrE zdI8Xl?5TLKY$dKA9&$!8y8#VL(4$IhV?yfrS&KV-Uwq^f?vR#7 z+MZW;DN~z^EZZV(L=mFN;HzHFAo^AG*C@#9g@3l_-_F#dvz35I6gPcae-0aRd{QZV zXCjsT`PwL-@1Eoxv8-%V=6g>+Z#2FQ9IDwh{C z<#ZxU+=1Ce4}_vq0U~(@?9mo%9(StAqr4IZn(BswHGQ-syO{&UCBn5N+|UfK?-V{# zbpDsMy754A_nQ^^OJ2=^M_>OAh~e(ix6Ji-=VfQG zVQ&u2r-g0@y*bp@Q{yoG)m@;{yT$1zyXMvTp>y14U40_GFAuK+`V>BA{C2q1gR=v) z;-B=#O-I(Ey+xbSt@>N$fBm-*{MTYiK1=_S!{Ha$U#C=BKXyouJ&ddHuFt9E1Xd?( zBdvu#+?PzTlR*xz?L*;TItKT_NqR$RutdUM1JzwaM;=I56WWn|9zlv=El;JOM$|5`i9>Lb%bR$j+5L{~A2^c1 z>+9Apt1nM)f59Cip$uP=k--Yq!eDxMs%{ID1tr*rehK$xBb&!AcU$Sh^jNx-Edbq1 zE^bQ8vsnvim7R)FLn~g&VI9ppm}@2NU5(85?1@*>@IfqKxL4{fqWVkjxw?oKAqw%q?o~Il}DLd>uq5 z3F5!yHui|;+4xJ09Q12pHDZ!}Duk3|NsG5=JAJ+C zEk=bDJj^RR8TvLSXyUE z9g)+~Ovu>ijCBi4V;y7R9G>d!XMb1z_SydDarjq9QD-}@4?&PPj|iQ_o?Jm<_ew)? zt4NOITy9Inl3u*LeTQ&;K1uN-eAU?9&i_=AB1$4EfKStm{YDFtkWnh|uewaC%QGJ_gVg{lFacJ*oz*_8~BUf*;nrFsJBt|HmRq$tn#QLvL3sLnT%IC zz}ELIf6u&O3H}^}k+ld@bvBuk)uHQ0uMi&=cggR_Yp5UHy~Z7}_?Q z(|+~p^$)38J|yj%M`BNMq783mjbY`q#3Y6R3TbOfF6k`}kbe7cPw2Y2-``{Voizias){`SFHtWTAPU;*osXq2^sxsQlD09N0!4C7uh7#=gZPr8`)dKNmJ z19PXm+&C!q&dV}Rb^C+|nBuv)bNq~V9GpSKX7fStsmq=jp5N8*7!7A~5w;ocDZE^55WM(_Z--5d=dp@&fJRt)G4an)S|4Zw`qc zydrd+NprzJKEreQFl2i5>8UgM!6++Di>WyIV=iZ|^~lp9`E@^wpNHfEdcQg6rTD`R z4??0DQ~%hadmj&!S=_ zY~;6zw7V+ocuaXFfdfnHT5jCWWr7XadPj~N(Z8O&omX65UYzHcywzILx+#g5upYnV z?fMXm-6q^at070=BTXIG|BjWHVE~|f4`5c=rsno`)Rwjk^<2H`F~t6{xavD$$*y@~ z{%5^$+{di9%-*Zt@ZRobT}4NAV_m_YH8m`Di5af17&%5sNr{iHuY$BCWxP%*u>kS&gp(?+-J%C4S95w>Jp z|9+3q`hoE-y%U{3VD^H)Nkb=rjb&=TiM+Ysyj4Q>TD+;a4`t@hx&3TL$3dWmJ3@;( z6O27x5qrkpZz}qw!Q!=+BwTb7G3-t<9Z62oK{(#;)`x0pZ-+y9XdYoTl8Q$fx+%4mH^@0hno(D)j+Flcs`s~UerNH-Cuz^* zBgb~1WTS{zRvY-h8i4nbQu$rFG*a0FfQW)=rbM;*G8jrJQ`oWR1y8TbO5L<#`Pr5h za{obc^nS;&mqbrM!!dW9pM*-0J&EgG$bo%3X8DSx4)WNt^zaa~a-lUXB`qG~=aISG z8ai7j*Skrz_ei6uKaOdu-T*8xJEuds3L)r-J|ox0-d_wa_iin1A6*3eoI)OOHb;Wy z#4&BTW!n4R$cCb9moKw5En7B z(A4=5<^6N;Vm>uxiD!PY2#zUQHIXps&o$KNiI8iSfY3$T%5d$Z%mNJ4RSMUNn|GNx z3a@h=xr5>Kvc=OR7=aXm$)6#>tB1{Ni*nLTfiEqVyXDFsM#wv%CrU%h)pTBo^o8qM zf0kK00kQ?cW8!;nzAoF^((D`ExD~0~vO--uB z+PrG8ZqVCAdPH?1Rn_D~4|QLeE6pW)bmT0;jWqR--9&0;Ho<#hnu>bA#~M{_-Cd}! z`c+21d3-BZZ-T`yCWgY3longuO)pP}as3Gxv)D}pYD!?I;G);mTqm6$45-pZ%GZwf z0Sw@R_$HTU1JepydS%XH=+I;=^>jj8y3x6f0qit)>~f8Po6qGMW0Vv4L~jz{z(AX^ zbMDlwQ>XL>CEuBCys78ldl|5l2eLx?o6x7a2o<_=bAESt-d!SyY5^oh-|iWoF2!q| zO!Bh(LzoWDI|HH=clC}POvT}(9a^{8Zw~!lKJuz99ZfE5esib_SoWKQ3a*54izxa3 zs_C>X3C&@oOBQg!^0grG*yXCa{xathk;OGc`k& zrWJ9#a{1dAlJf|koP>1GT~+ydcc-0jS0;@aQ1TuQoGcw{iyQK^3hUUw8^-!}nhYgA z5=OTNo{OlJt7$;_X*S{qBIb6d_zX7`4i~#K$K9_6I+rm+vT08JE7JB39R;5 z_HW9}{T;zbVerpYma(hMVl(5CEqbg{-IfQqgwNG>g0oVG<0Hx=81PF5ZFE?H%nM?q zF*wdDV!*!L3o76)3-e`-2Lu2DbZ+$0Dm{Yq$LA4<-8<8Qvtzn-06AerUc3m{(=1?c z>`m&16B>6L-bXXzH@-Cl0h_H8W|r=jRXCz76Ufboz~1g&iX*q~*qws<98@}Lb0KVV zUP^Z)C*q*LEh~ptB^~PzSKhLgxC3Yp;7PbDRFwd{Y6~^yquDk!d??t>-VcFn{%6LNdw0X4_|c8}De> zJV%FoLTIps)pG-Tm=jXt_{$8>kS@Jq3}WBaedH(+ON<9mcM2H_xB#Aoqn6ktM<;IS zc-#bkkk5%2iJ_3A#?rcU486??$IhiBx!jiRI?|r4Y}J}pS$9I15(%{`X6MO`8ce4f zH>92MYTV=PMELF(=@%8d_+bo_gVdSSy%d-9AH~>atM(>#R3P`TFnFTule8;7o0PsG*#*X&hXi$Iu55!wI zsdf-O9z{-oeh|#+!fwFgX{<%RYDWI+1(;Zd7@Y3Y?MQ)_TencfiA4s0Y3+cz9(pAB z+__d|&vx8c;qK?&e8K1t3VI&lUNW#Sz%jnD=#j_`J#WIjflG%#`o^s$=H^)uKLO$Y z<8iF|z`QeA#9wUV{*{qI-zjy1>O@UffkzzP98!#2$*bIOwEZrOz5GP`&7ot{&?(_J zVo0@|EesixU`!`#jBnzr9Y@RHwh(Y&>;J>rn?N;{rG39v+g)~*CA38eC{qb7lp-J^ zGbCN*fFeVeluQ9q5&3=5F!GJj42aIAR$BugCT@4 zB>_V49{YXo+wXVpTJQSqx9(XhaMofa*=Mu&KKne+|M~sBJYbXIbP&KZkXpV#xVrFE z8*P*=JCAezE-E>EFmU^Tn20q@{C!HIfaMKQ1u7!Aj&1S7@by-oD4NYHqd#38y!elQ zH&6eYjMDkyv+1@x&w7~>9|ZURja78P$WUtROe`JtG$`}=ayh{0FVo?0Y%3?Gb7ULJ zOiH5W-Hqe#UX{BX^(>o+D$fMyH3UtRJ#T4*G$)2!jvYe4J{9O!TTZOcanJny2vL zoUW=41PJq!nwEo2jFOSp)DG%>6o zv`Io9fgN<^@4<;V$J9D26lb9+dy6>`S|F-92Z#mQx(z?~+eIEgIEHcs6u;7hieDHq zi5DCF5mO)7JI;-p{;^f=hXP6Bs@JzdD*0KKwvo06-%hoD^Y5Pfc=k!7kF;s$NhVlF zTxPDqD!vI>XxBB;ir>}f&!h(--yj#Ya6nC=6a<|9+U2+Zc<<-J?2r0CE$17LeZ?8x zZ-t(sdeLe_SQqv<7vAc-%d6a|N95!yALZRVR6aR^i(DcsBOD3SzP|H@QEZ+h?Y*dV zbL%fEPMmBZb75nhmVCi}eEEyYUA@ZzWj)OHarQj!b6!eVfh%eAdh>zX?e%XOG4Q3~ zE{%Qwx~Nf9GR~jMWEZXkLgJOR$>!IafJv9Bxo7HIaNqN+JZ7bj>9v(7gQ5kp% zW6?OH41nl?phNR`&KCg~F5~wlD&wZo;KLA)LDve@QY29^aOc8TLM@d|O0D&aT6LgGk z@Gfy*5o);hJ~yQaV}*rp0KRnF+q^#svmZjQ9Iv+Xk*_plD!Lu&iqXG$ z8hD7UAQeEV(BvI>_q(bzm@)r!+wR%i;?TxTOUO*LE_ z=UCT|g3l2}QKjZkaY9C_pwO8Q3+QxkeQ$$Wicgqyk$$Q!*>bYj_gCHb@f7zRciWBc z=sNZr23smPGc3oi%Iy(jUZPJAGSEfI_1*-jcK>*3*`qkvPo`SKHbjd1)w}RGC&Lf| zl2uPx_QgF-Qn#n4wr8QvETF-^3X>wvFI*g=4n>sbqv6Pt!`#)aAG(57E9B^KSm_K6 zWRl`(AM$Br_afNlSS+!n+j%ul4S-*luva^LqCO77{eQ?lbMEb?Ap(x7!Myo}*|;|)7rC~D)q|Xa zf*m`WEMZXR$4S>Ki%J`}9EHLSnZXeiH_hl#XM1V4{AS;9P<%BYh?&3>HxUO_L+6@| zt#9qwvtlubTbo?Z*&(H}wNIWWwqy0>N)niV0H++gw)@YV@7$s9+?lA8&!URQcK`dl z`LFXT+|S_C4x9N4O2|y2n%A?a{8|}(J1%;hQL;g<)NI8L8493l37w-(ECR@Vw0jhL zdSOY6NQ^=%Sw{Am{SLJ_U)f^XiszwfhWrY$G$@uQu)SVxAJQ9hJxdkLq>3JMC3 zmN|t}(i#~f-n1!W+X`WVYpL<+6q$M+WY#jFPKs~U+PcUahb~|Jq~$OF)wchg6MboE zX>o3Gj)T3fHo_0$nv~Shk@$)Qa`C?JtB`Ayk!6SpV(cXRxbM|}X<6DEfYG}))kz3O zRPca$@z!C^#ryZ=fG*3@3px}|ZWZzl0#t{%S4SUWtIc#(a}E{e-){Y>JjZeu9G{9i zIh$!b7py~0-&$P4zq^I;>z*%R8q3mC4fRZ6->?XU4}wG%fe7!@nSm3b0IybXXQ==c z$~0q5_m}#gq2!!|pt9@0?)4K69`IHXE9qvdbG{PS)V3%~LnsVlo8k~+t;|mM8lzTUZEgEE^!Op^#usiQMht zt3|=+c1zil)RZ)kAQx~CgpO3eCLZ8!>M*5_=HVQ9+kqccnIoyCVR&Br3d%XN@PeK- zF{Je+o1AwCFZdBA>g(xw)lV=EV*zAbvPr7q6%evIas@DUTK}nYGETgFI#Cls@#jN? z8b)(0pA5W6oG{>5Y+UQG+;$JUsFENs{01o4KX=aO;{75(MD+UpO7=p(_J0@xaY0)r za-5d0>PAxZRTnZ! zM_U+e)L{om82zpVEogKWliW1CYtTIBH3V;*>B`*gEf4&I@z!Liz6Io!N`kmkle(+t zthI<5tDGvshv;+0&Fbh%zwAfLo4W4Awcd_qo}v6$*4C+a&3+TnomoP`q2{f%tq$0O zSq5U3++1=f_3eUtLd5w_)3CLmyzcm+gp@j4^hkUpvITPPyloT0DYTMr6@@Q~)oc7; zFBc7Y33aE#rJC1?PE!~t7VY|n$t4-x>K{=fwEV|Q-Fr*QW0xiQ2%X(5ggvl9y}Z36 zX-b-Bg2ePU1=34G+7s{i4KxLVSi-R5G>4*rT2X63@*#mWFZrOO-&?tXqhJVYPB`p1 z^32Jg=~WfjmVUiBGv-&xlU>Y_<1wRQpB*&6_V2AyIab46__7DWm|s>AtzUr88~KU_C6u9D{n9 zZH?>tWy~}r-S)-vQ9b0f>iBfIb630}8*4i3Q)3wJp9G_n%r`xbwM@BRA7SJ4d|A%{ zD2Rb_h@8MP=$R}#h)>5@Wi?Q;@#V7mYoOxlza4*T83`P=FMX-EKdN4qrL#G@CVhTJ z#dzBHx{y(^&uS7+ zCxc~ysX;FKeiZlHjD~OsrkZHrnGnpki$Jw+v2Lnbv*D&Ji2v$M@DJXX`X{u3vs3o* z+0v1y5~*=KSZZf(0BD+wF0JCoH>z0`qssEN?#rVz@YnOT91La7i(ZkK8j)X`6sgX> zz)R_#0xGW-6sg+CqR1gynCQSg#1??GoNaO+z1nuOYYHgo-b@7VGw7S{r$}AKpX3{! zOnCrf7nMKYmbFD&l7W|{)y4Z~%~(>9(pv+0CbC{Q*8HzoMs5L6K)~Y0k6AGl^!;a7 znU(OpT}5>h%$7s4QT&oUe|Bv9D*g^W9eH%V2907s%BZPoW)Ym??s9TS7SO%LI@NUa z1i!naE&lg3{ghO8d;+tK{;XttwJQfYnn;}K38;o2OrW&-sftQGRK;2DB0Z;)IVX&y zKr}xYr%Ut*U*9gzE9=_8N_*GPlc@w^YTfsJFV23CH4e0h7k6o{KBk9qc;bk9$KKX; zd@{cLMTn-M^eGX%UH0^pCXHh=#JMRPPo1ogW5pq?ngirDpLav`saiB#*$nsThFIip z#4+97@~?}eLMbjWs{}gM2C}}mb0(MmX!G%wXXFpc$hG`O(_PK>=M?fskCsfTpTu6k zg7|%!(_`Vo)cnjJ?%GE+|FPxf!?yUu1xi^(HC1M_p;0>es+CTj;4|>}0ZRT>CLFBK zaprr-pm_(qoyjS(3ii-~_ll}+mp3Duo*QWH=8&6TU_ij=3lR)E9MFn-iWY<$ale`- z8(bbb^&Cab#Ne^uO{T$o76h~#45C(`%tjKTBgSP;%6C&#UqID|EzTYpayL9Fp?acf z>MORNOmsh2*MnAT!I_slLBKyvT6T|q}M)|=^NHe((z)Ac0`Gs3XUz=-Y!QKIb0US%ehJ7;rBO*o9 zEdhsBYE993EH? z^ox6{PjCBxQP6d66Q)i}=@mgM#Ryq#@8@-_5lvfKRT_aN$ z#hdh5_7P^9QOTxas``r>LwG6LBJC}JNV=Lm3WQ3IWh-iz!O?T{Gi^^Y2T6@Nsmyj+ z@Q5%o4+tP1y*TiEAw6Mp{&pUlA&Rojyq$*+%d02wWcS>$KP{XJ#Da#>E}w%pJ)bSY z-NV~Fk#$c=CK++($0AE!(2M^($%F^>&_JxY{3I;J)#0puNM`W#@$9nJC%(aWPKr3P zy`vR^9!NOF=Qa9kjGq{Q-mgbFqgGdIE8;8rshs%YRL?;5Bk^R=%Ru+7VMirO|3Fvc ztXmcIJ4&j@=r0P}ZCv8j>g%vbI<>{9xud@}|4`$!C${0U3qV!6EZkA=GQZs?J%gMP zu&5Y5Y>Ou|tw{MBb=Hgk+`NaPlT$`@-YPJPzzMA+aKJqJ=rU5 z;MDKM(>mv-j5EBOdE>-1D+gq9Big|B>QB)e>SE+X!gIIXU_;7RnoKTr0V(zkCaD1J zr%Ap_^d=GRg-9P=QY_J*RNik)G6`igXIYXd7s*Uo9#1s0orrdzt8Z`8>z#5UzCSX& z^F`@F)o^e$Sg+V*iikdoq`wAE{=*t$%XOX@XOk4 z1AQq?4t`|?mGgi_fuA2>7Um7m23R%|Cu5`GPQXOfpVxu^@o_@#?eJJ=)}lL|@Qd4} zPw7@9O8Po6ahHxyceR<{;wN4YCb6k$1_g`i48w(NQrl5qR(W~+cIFGcv=?+J`8i8U zquJ$}QAvLNR7inm=CDsef5%jb2Rd^jnF36Ofhg1};JH{(vcxVD)OVZNeyKdr!Uh7< z9$q(Nm%FUjq~GU42n0VyOQCq6oz*7eUb$Nh9h$Ca3tp0b7F?oIwH=3U7o0`P;~G4N zar@)NVglB_dJC{cLwKM!@ELU=EWDJcr5?(fNbkNt){KhI_^59zMskYlG;BLSa_xOo zBs2*Zjj_fjCsplEW1^R@i_YDW!(1|ikc0gUKiZWsShC3BNw^PBuyI`!pjl$*ol{a0su0&W>l54 z4g-RLF9YYM3;?$~A%GJF;7Tt*bHK(tV@Ri9E`hVDPv}7?EOPJn{JNjWqx8drd_(HJ zHLoRl3HkF$nr-^{3lX)&Bvl&bODdN6W(qd4(#(cLHGV<3l-CJ_>0%Qo=mgOABf~>#Qx-&pK7j*qSKWah9QKXurb(A8>6&@E#1-%V5`Bcwb z@aBPgYTa$E+i4Co&3BXW*(;EBx36}uHQzq>RiNc5`>8s_s|ZQEi!QG4(iIdnTs*v# zpB40FpEs|ivBix=P?g&%n&5sG*e&eFzF*kxt_*Px06f}(Ci_or!nyRYk@)S$)+w?X zz{9LQjkPuv;T5d5&Vd%8d=_Wdz6DcVx?6rLGzp>I7sp)4u(Lk? z@NHc^FVZ6%QO(S#vuQ6~9$P=>mm&9t?EiVgghr*dZ4Gz2Kynb#Fnj1z%hHt(d-VJ2 zyt!1Lp8?D;=$kAwDvJ^L-AauhBU}0TsL?6KD7+;9wI>)GMs$nR`Y9A$reAo2s;LUr z!iqT9{?qP|qb8+9PttaID>AhEM{i$MZwY>(OM^0vG8uWE@cf)qc&$n{k9Q}xWmWsJ z`$w{>&Q!IA+k$>5P=Hv9<&hq@&4M6J4GTMxksrf+fmnYN3bW=1ntC^A7Zy!*R7{$&ls#uDo-7HrJC@QZ{HXaKPCNvFPsjpw$ z7GmEIHCgNA52L+SP0yW^@hx-8t)p$k9v27-uJ`YS7LHCHw^V|bEdfA*`ml$1;-+2X z;eP<%_WvU|>H>JsW&n3}C-HW6pxq=VXvmXV89n#zHudjf4lT~9Jkf4YTu^kryU5I@ zruAJGRL(GNOCut{^oTcHU~Q;RVnFm7Q)J2L`tQ!QMKc({3b<(3-!oTkK}bS3=(3l3PDI34N0@)ot$F$H7N1-)jGQx+~xgykDOw& z$$=NyKJ4=&&zy5d$OCFh_D{@hZURezOKV!JSL+>~y=dCwED{t2Bj;KmR@m;4B zlPAMilTYAS>FRFx1Q#3c!4@|y(3hQQzdTuLe^JhEy0(pGc*o0|GXkqQ*la751|dmB zr41ZW^|+^<0X7_;QP^0EG4%_r*D)-Daat+ngY)tWCi`>Z4za;gvt)$s4swOFcDIp^ zjdSYN6EnPQq2a0&;~n&`;kBS&k}!^^&v+`z3-3x=P6SOvhuEmRugR1EIyD;(iqxdE zt)|5p9n9c*tz%6$W$FfN4}TaG@z^Thh3xHs43|Aqm5)MKRcg*7Ha6CTDLY5BT~U|? z^SxXBd4_&)xdr3 zo@lOZLlEek@GmCL#X(X><-i99g< zLe#lFrz;s+C{9u{^8kdl{8w$kaB&>d$})R0;)_s@mE|q!=Y0BEF8HFB$uCm&3e6tn zj#LPtAavi(0sMhzyBg=m9>HF=q1p_GrsD?jFbq%Z6>?)WWG(|D3zhj^sL!JEnvR=K z;1V{2ovSME$L1{**LzFX4orCl%r5)4d~6E89iO=nQeFZ&$~XNk!vbPayt;#);&cwb zR)04?6q9staAM9!0*>hnBgO)5D2F%VNZtn(UtpdIJ9j1!O+YZ9FAFn;Ia5oE^|CdC z8GZv8@7^v%xH8@?q-v11Q(kQ}P=GMlxgdQeZs0-nt{r9_&9<7b#hu*;X(yIcVbK$x3q*-uVw-HC zfp1t-$kePl6!04ctS?&! zuXs338_i0XcGC;*y`Xbl2H#@$r{$2$Myh^Wj6l2CvmO#lB8|?B@YlVDTpPlJPxp__ zX0k}_OKm5L>ZaGz*0v^6DP~F(f?6XS#p%N!%Gg*h*vyY@E5R_)!U!}i-+KmgQi+eU z+)O=uY8p2<;&>0lw3~MT|NO8t+YDgLrFHPn+SG0xl5z|tBFWK9DVh5t^M)IusmGH- zQ0q=>o$Uw7UliQX9tX7c`B&=VE{+lk;@q|dcON?VxoGp)y(@t?juitTotRk z{zAu@TI~~BEcb<-Q9=qUSm0C)OZqHyaF``EZHK-XTTGpV{yHyT!Oy~8r7EXmAmpTq z)A&hZrr~Vhqa3H*+RXKYot#A7Ld>KnA;-X)lkycltiLrlX`13WRYeHqKM+R4AC@|= zOIyQB6ZSK_-4?Xg$8k9;>!=C4IebF0Qz$j;!Ia%WXHuUZyD--BHfh-JTag)bW8Z&cqJ(%|2pSh370el7@K6J6qVH1 zC#deg@QfB<@1Y{&$S^<-!KgfO+YZ(mT-&9rmr|RMS@jY*E zI;U4|9vS%A)*HJCg3__m+CSD5IHa|UlxH_RV|28o#hUGfW?Vt6!oD6YP2YH5- z=T#Z+meGJXK&)B$;=atxvQBatDF zJ*%d>+|-CsiJ=q$8e$Q{07y~O6XZm2bauIW_t_774tMv;YasQ%uh^uK75Jmk*kp>C z!EiEJt@XJaU0%2JU?toCo5hVT%|3HUJMZPdJ;N!NY2)zv!5y7p-Fu|$5AU|*(F#fU zDXG-`5|=!CE0{+b03Fm6K#|r$9?1Q;`>EmKkp|?&3-#+8PUwz&r1pY(64B85u8ZB@ z&YTuCC7nY9C=7&9B^J8VyOhmoeK-iD~ zkpeMso7mo2f)HAP9ZRCo(G$TBn@)6?RcK=dmR8qH6B8hX_imnFonvRtvNOY-f#}mP zf2>k&*gEW&?hBsptwC0dyM9G>;>H4eJ>V~^d3oGunCQzby`WiO#?T})4Kr$9FH$~ol%$;(XM!+2+PD;HSo()H&8w-Pg$yWyTN-P>{3kfB_aMra}b zl%rXE{$bDG!H-PaS4<_H6<6DR7C-#g#NYFYRux{?tckhP{6?N?Qpe()7&W9`ecJna ze2H?n%AkpETnP+tWlYQ*loo!$P{9f+hs%6}+wUzTQzjHj`qcGiJG{q8J6dS#%l8;* zgZrJ2wqnIbAJwvpts$(FS8J}5{so}`y}vI%T?IY7`$_kuu3yv#JkYCuCXxo|&r;!N?YiAkAPeV%KcOO}r(&y@F=H-VGl)`#pjx7qTmoqbChgHQN z1Z{{nBr_O(XYi@Dt~+bI+BLtL=S1&&`OTXycYYb~p~-Y0dyA9v6Olh9!#@|O?rzC$ z0Ro0#j!Yh!D~2|<;D*rBrJ{)m#cCf1Ypq3k-UK!i>VXOf1juPl2@B0n z_9}t^LI*dwKj=PV!I}wu$h_j8$zGk=B*>pa=tP;hzp%!{sc~tE?n$D}r@&;NfA@D< z^9-cOtiNL#EDDFdqyuKN^|+Qs%(6v&VC7V5EIk{FWWB4D`!{ot(6Z z42Vyng3fonS)x)A7(dSt?~Z}i?AGj-mTq$S?FC0Uv&~D@oDn5o{-O-`x^Q>CQpd

~)fy$2_Ea(7Qh1au|m04*^WAx5bsa7Q4~wJ9B?{VC0(PWS9!W6Mu` z-QCwo4b-=YXE()gxc;v(3t>Zy6I}Wiq1tO@i^wv6|sf z6t_l&Bz!;1a9-^eRw{hin_rZ+lCT-uhp3xI5;dYNVyrM*@o?$}^-cbow;% zb7OuCJ{HN+cqV$jc19mC|FGvQk;7G+lW8XM^-R9b-!?r`v>sJ>&58gn%)* ztCdLw{yTu993Gq>E=*L@b$B~9`nILGux+u`J9mWgR0p&^2wE;f(G}D z7hC_>SjL=G-Kkp|zP*MTcdZy8%XrK5`vuzcJb8Pmp|6z0p5wl0^z%FaliJmL_o2FZ z2uqT8b{pdZ*E)Aum$seBv2V;}#TG^{gs6&tubuxJXI5!*3qX8zC1Z1HjrZJf9W<2u zd{JBsK|SLyqn+&50CIJ&+$=GwyR7ZlN%1*a1W_uJFBP3^2}i?%x3_)-tm`%w1o(+* zhd>awwlFO$I>!)|@K{9WO9;%Kf*ijoOON4G#ouosXA=iz(@H#IC{w5q$^3}p18Y#_ z`WExkR?<}d@5UdSYIJuebSGeUU_+U_Rs33oyNnWf4yIT@Vy>U%Ze)Jgb0;QjeBHbL zkFvzF3TA0Cz2X)=bCkTjbCXFwlIfeWZ~Fq{@@~*_y$zRf&p)**)}nj)u@`8)GY|hc zq+J><+UoEL>=&nVe(`Pp8yda5lPISp3)boRJElP&_UtQ4>0-Z0s1A|$Z&L<>AD;c6 zI`97@V(P zO@%)-%rSqLX-ZoWuMTQY4pFbgeL83VZ94#}*xtrX7)a`<_iVa5Pa1XgM|)`*g(=+$ z1`3KF7!I2905}zg;4UL!_<4X+5l3||o(=eJ`xaf)2Y2Eb#>*hu$5h$n&20bX=;I#m zqZ>&73&7xf%aHy6^3R*%Ri#yhHl-HJKA9nIb8t-6VV!l(>wO_x*>(si=5#bTW^H~T5qk%mwLgL)VcUSzhc>2d%w6a>f(GK z!s=sD6Gti`x6R=#lme za+}ksxE$tGQq5#n@zIA9X|#41Mw`?LEWE#RztHku$I+K{ynFxg58fRg2Us`b&>=gl-Q!NV!M{q3 zK30sE95joK>)CEYz|RAOEWotLD>kTU8YdMk6ql)0UM+R2OAbaJ^lR#guV%%)xi#>b zC1-(DSrKTu7;y=@%mDzlrn0N!#{_1wyv-OSkMo$%FA?FP)zz$n9O_X%fA5^nn zjSGp?wuT8T+GF~@AQbnLNzF&PlZOj4VcS*QwG)D(Hzv3kapuh(+xqje0es9rqUR%8(8G28?xf2+zdrTxUl%THS=8VPAs zAgcpp8!=c-F+=WX6s+PUKtl018cjXx5rEvDoWbJ+ME=Q^7^eTNG z_h5C$;k^QgsXdM-g{B{wM80o*1*Ro}=|_UpSMJ{(T906ZyQwyuM2+W^FOZAEqAIpJ zM6Dd*z(YPsRaH-vo)*$0>v^zyF4~i!D+=i=Qd@lJ_OsCY#cz|?p|g9Q+RUNeW)|X# zvK=)HN0junTgYw2c70gI63ac~NWjoLZFl6x>Gtm|uIySfuZ^8)s~o*rSW}heUDlMy z;ST`L8-nCzicT}>UE@MpqGtKK)`#JUhh1t4(o^oQ zT@P|`bDSEK4^0r*dqUga805Fm!+5M85+c4l{&D2(y}nQ$^YJ$J-;bm4Irfa_Z(iy} zy_k{okORN3BJDdVxwc3FChxcYe%}20q!@HkIWEWdGouY#*UWs565)zd#3bEBc-E6} z`b~x76Cl)jvO9D)&a>f7_In}9i=uQ{;2jvf(JY5&)Cdre(b5;fKDk@_gE&s9*_r0d z^UrSX1m>2SKUW#?@UHaENqQUp1Z>&sb`|@frDc+z&!!g+pPwGgv9HswB9cfpW0S;S7QFcD(JU4@uQ#GnWaDFR42O9sw!(u+5i#p^c110rFRxfcHhdF zFkWl+PZ849CrA62^21!BLjWjXdyV8Q1Kb-Ws`klBc1PyM!_S4E3NnLx778A|ab^(C zuky?g>TLU)QLC9(#+4+@iKQ&>xlv)ppK-H@rs%C`=VC8mtm6#dgKyW=%rna(SLWqa zl5U6v4Ae^x?EQG)X7_9pYAjzq$ab`Q_r4RS$Ax#Q?}X&vJopP@2JW1`IZl~7aiP3c z*R6f1Dti_dO#WWqos*0i8H}CR=BIT^lC~5s12v$_F#{PmM0-XR?y)l$cy|Bp=8_r$ z@fD>!FfNDh31{jf(u-E9Gw zxsRQ8`g2s^-4GzLTQ(m4)am29I+QciomZauJJo8VO_6kPPj-o9@$7>Sd&1R{K6jV3 zjlJ73wB3XO?XEcI(21s%e}B>c$tgMPCca$Soq zQ0LX6BgwHK>#XW@S!-|^-^TiKkf|SVCxO#FTZ$BVCN>VQ0|-!JQ3)7seQ75=M7~Pe z1-z;S{)d0bfX^XsWi(B7abD{wB$rV`wAhHYcW~6ROo_#G7R1ELdj8JOt6%IFAVb3X zH|IfqZR6Ci0G0e9t+XUbeO0Sj&NSZO@DlkOHGsgYROps%beMvtbjjS4=PQfzIypD) zug?R~r+C0O?SYgX4r@9ddUP_fXCMu1hpn7L&QH$PmssTcUg(avT^s{9;EB8u@qjPM z78&Pp>OcC+IO1>_ z=B=uUeq}?IQp9I#4u1mFri3t>U3R+#KWUx03FdBcj>2H4Zm#Opid(Xb&x|5hcn!;Z#rBaC3W zv%uz{yt-?o&A{_$LB;)n7wmi(6_*IdFhB*j^TTc4GfUic2R8BPgx+1jwOI?anw^r+ zsQz7E$UfhUe(FRCn%s6&$+4!m|RgfjG=TVZD@kesMIVo4NhjJvM5{sajD3p zU?z3F_58ZXTH}RoCN@hB#EBde+^r^BWvz;AtKan`qWJ8%v`+E$VgTWN0Q3aNzGZCB zsQmQ(vM%i)Z~xV?>801-WRrFG{%Xe^)jTJ;dM5uWWPYxFR+Ili;fIIe+Jjo(N{f{@v#%@A!;vwAa zmJQUYTZ*uL7Fk&bSNsAC4Cl95y$~l->G_Eo+UZzvR0>%c&rpE0;XEg&jbWl`I$^po zx;LC>nhx--e8vuPEzYk1vAucl*E{yBK7)+d0OISYpXB!3iU9AfP^Ie*Tum_$LFH@BYKp%#@FW;$-4 zebULv{aN)a&hPv92kc|Srmm9Zx>24Y+)9*ClYzT_L1+cJ@YAyL+vYNghID$Q*+VBe z$s%sRhPcj}kiYuyO2>a|aevpvX{F^@%6EHbM9T&vn?*dEO9PuYo3S}r+O{3VR9EF@ z;vI{5<{nyQD3?JmU8sN0f7sFr_vMdnX2Tu;GTnWyJz^|VAcmRG{x+S zmbNr^-r!vQ>2<_H@GPe9$w~T-E&N1sz$bMlSme8>2&Y`DJQ6Z9FZ@7l(#1o$$&Zz| zz(6TchZ^E2;txqZ@5C3U9ZA2CnGH*Zio(Zb^;3@c?E3fB=6T7bgX)-6_qa$F+l~;( z8~`Itb>ACS`zXMl=lovR`CqHF+5TD1!@5f|VBh5Io#m-Z@WK_4J{}+ZgVb0eLb}kt z+&&Uz$0AQ{MhMpe&Oa}6BsfY#Swvj-tYYHs(DPv(XvutD8mQb-@UWopYEKM&pS}5@+UP2)?D!XS&YP8g?37 z;s6xUZy6{HTj1(|*Z|zUMTZ1u%-ONfiS=uOrMZ7hLFVG=rCh-$@ zb@%*$B4oZ@Z3Nu>4rc$xSu$q-(j5%YXTg{`$l= z%4EkOV|5;Qma}@N3j$FW6(q+s6g~Nc{rpORSsng0z^lE+TEWYSv|OiG%>7ACU4;Qe zCw+ zZ})CIk|--q)0_(d@sKMnhVsKiIKTx}w8p!3Va~L&yRY2<8#0X~2R%lo=xMVO&;sb< z8X6j)hMMr91B>V8#k~TkbyMqMD+3-kC8qoSDW(w~%q=7XvMDv+xj%oA zOLVu9ZIxiERMauZRC_)+)}XJT;T|;X%(Jxa+d9%;_x)PUO^}3Gi?ZsJ}AWM zFc-#lhVxMni3faTfo0bSKSUz_S{Cz}uR$TqjZXmf?5ytrq4YO{&Kp1oji-97c^nN3 zm4A6R$_zkRTL}K|t)M~E>YbHqvqtsUGm%Z>+nWvuwjQrqnqk14|2 zTJ*JO3h|VZFSZN8)EO_f@>43jQjJcES&bfB_qKYMiZ9sKP~9AIhqv<&I%;l0)=eO2 z8*~K`a9ERTvNGo>)ls*C#|AuM&voC-d%lQ&jm#1S{2>mS=cry|?;MC@dE>s6M0?j~ z$G(?63!w(Y{0--E(`NC|4?PPzP{Uvax6{)C!cSoR z{nfy$x`D`x&~w=_I^PYa4CBoMY>8*EM&Mx9Np7Q3h;426q5&%bUh8hM(FB=P+qu>s z-D=Q<@lq_W_qs@erjt-dLO^c!2h2Rq>#A(cKqD{ufDnVz(>vg=C136Su#=huXRe+F z7ODh~+!$iL{sCptp}eZ9mubvka{Z@tjPjRtH=C18m<|y;G@(j_Fw`OI3BvMivCU(P z)1cZ+8JoTB*{N;64P(!9*m2c@pAZz1IFil4a-}6hhQ|%gNd}nwK?18(r1p; zyu6_b*iv*0C9V~Y}KeFbd9gp;)`CUd|$5Yi5&2 z{YsDY!;hQ?NM3%!0`rq8_>(|5>A@9(lT+)kaylJT{7L_h(*XG-HU0QpdpCv%52Wx( zBW)fRC{;4OrRad)|BsV&77_ykTd=LkzI`TEuCA_@qt@o;$7-tF!!p8Lt>P?9Ow5Pl zGI!5wd5zcp%8H%LvSmw`03JWHq4n-z{pjm4LpA9;<+Wd2ZmD$}+ z&KAcg7&LNu2iYw%@ao2b$#47QaU5o}+IwvUR5in%^d9hsyYKK!Q%!QpT14XEz`|Sp zj(&muqXcK99%sX+dOn&yHAAMDW~m;;8+3MM^E0Ejl?FG|Qf^8FvM;AdqM!9OH(K76 zDNA%@(f;UoET6cU(M0@qx zA`)N!B$Gj>WN*$<BA02LyiaM2jUkWGH4TZWMSvy zS{H$tQ+(rG<$4U28BxI(2FjzO2}`Zs&)*jOt48jt2Rrp$`+2wQ<9fW78l~UOpQF0{cP_!inuWQJUf6*(h zIgt_HNqxmRXa@T01=4^{40k4N6vD&gaNowEGd_R+xOPSHRs)yNqIQFyuR7fNz0- z@8s?K#oa5E9>f_r0xvyA#wB(~j@Y)c?*i8Ejk1%wP%05irFLbFGWTT|6+b>LZH-^% zMI?hGIE?tFWY_l}oASSV{NWj{JPkD|jdDnzd(&BEan_2g#ior?&QMdor3xm6C`gKLh`KlxdnQ!T}q2dKTISDl^u4ab-lTP z695R|@Lj%IF#fy6DX-dh?@}YK#$B3BL_Ex{o^GzjL?w`M_@n2~vn+1`ZPgb)1I!m7 zcBwT%xg`Xlf=F<#9?3pNiy9mqkDG<`zv^WXKMzhzI-QgV2t2@HzCScs0JU$Q@{pw| zGzLmRxFTS*d7K9*_2gzu$w31r(1Oz3bB=+6jG3if@GN*N313M=A%Q1UoPv8Fc zPtl9a6HIErhKI*Zt+4ac5iDX2(#fDOF$ooyTLSrkJ#_DU?Qvm+^Y$!69npk&s9|%- zs+RT;Y|ZrZ*;;ohIy@%dKZ`O{WHWL^9%(I<9|<5xj|9>N@SKdyEm{6w1^y`ygs@BM z&2+cr-pJ=~=cWXTTTaeZfYc*UKAqr*oQ@garaa0P@L2HOWMnvz<~o0irXU*1NW z>CWMmMAQWgSyp<0%pI#F$6ewA9t$>rC~6z#!`kTf^gn%$zq(Y(&A9?CRAv9?mBrhw ze7m+cZA7W>Jc+JH#*>sxv!>tVMML#bK4YS_DDMbclDwWZtU=?W=mVr}`DPe);dlg- zG8QvcZgu&&>XCkJ`?+T_{ik8BlP(sy6In`J3Tcg1){h;r$P@}D4n9!heS@uhr$-(T z+AD(6xKS(hi;AbyV#&zR65yo{H3f{-Xn3pz{9k#JH>8rBo)mlkeebb4oSxa%1~q1vNfHLIFY zLzz4K^u~uh1kc>J-7DkjKzQ_K7wf~GGgn^7XW3BGs*d4vBdGt2xHk<;>sd(!m(xK#jAg?439?6*P#5h)Uvw z#(_p7C@4e`jbTu6zzOqTW}m(H`M=kDo$I{k%ey`xA6%@Pdp$hsdG7l++^T|_xFJAC zNDEr1<0+MiW#j0@BZQJ!vV{Lb4X{hx@GZon8c=5bDo(%yZa_>jdk!A4DV~L$Qy!)% zrASaBx~x*Ko1%=mjYyF;&=_`!wwzD?*q#nQ-#rEyU?d(_n+uB4x{Qr2W)bKe;X@~* z)!JsYjx4D5HhL_f=f30SiuH zVwn&361@+pi z1sS}0Y@WS4ChbUyXTY*l*W1LzWZCIx(URAUUYF%ZYgz-z%l$>V6Cn%x~&Nm|=lsh~_#~9}uBstjAA++gaAmOwX;n4#r@$}_Jq+A(4 zs-ODRg9>{HciHtz2P&{Q`ExSPL=CKj#Cy`x{A#k%uWG-hA;1a;9*E@EM^~qx_RcFb zO#!wT+J&8h|F-F!u-!C7Z1{ZZS@)C~r+mt$*UC)B=3-FZA z1~Nb1uM76l+kdn%avFV^ynmX|VRjHcu z3x?fj`jh=YJRhZUdJ=yQL9`ht>Qfyfpn^D@Nn&f}?GdyjuoB$4C922$(t9Hv>5xES z%n~3NI8dJQzENA%j&u`M#g+88uHiDLL-_LMR95a*!a}~YG+};gjh%JaUxkplHGF4X z*u!U%XW8>-7s|L7+tdG}a}!oe7Wh-=Ek5=k33oc5?mnz@X^s#<1|$jpj)`gpjc^?FRI(_>FfHXbA+^1SKTq~JSG?n79NP8~; z3`iFZth#J@v^Nux3axDgc>9Zv<7Hp|Uk`)-8uLCw$s{%Bu7z2DG8-gBws5|f<{PlS zl{n&K@((z9X5jDKwUpa9`lzGL}Gk z-IZ~~7+@O3LI6x9We+^*Yb-dsp-4PtO~5Ere)Uexr#u!B6~}2LB%*X3(3TvqS}YM6VOpEEchi4nT|alfV2 z=2(5(a@SLaSJ)!L0o+4PNTPJlO!{fR*XVdQzG4wdWu*7o)^x=6onL8-9}j_OGxhQ z%_OsB;w}MOs@x6->T*u1wgrfbyypB;4*ano_~@?rR6_4`y<|5IT?7kNoOH*Q0VB~1 z9+|}1$F4y2`aRnO-;#t(uQ*?efO%8M?KgA&M4F^ub2+th!oLnQh95B-UYs{Q`c`ES zbe~5`-A;u_CcT1-dYUtv5BQ7*#X;k=Ge=@ZLT@~j@5^K0SNUu>u=~mEe*VI2KO#ktPb9mYbTX^5m>nO}& za+7`P(c++(Di+~6BSFY{{Z0n!EB1+V-1HtIwWSsyPa!~OYrUqSIu{C=(TCOp z!z`U|bc83vE-{Hl`#5t0gi(XtJN4oNLu5TSVTkkQPwnAUPhwKF@ya-D+j_v=N4Md% z$aB4Yx>taqR3j#nPS!ibyO(R1&E*e=N;@JG1P7hFz}M-|hsIC|0St(| zk{X(Y+jpye{0C8`b<`(m+V1AJr{|_;`Go*e_X-Gb$Ua+b=K3o280gK^4?tHr<+k8< z6{MKdQy0NMPp-QIcuoPFo$f(*Ps*#$8r){u#Qj=z4zGm@IS?$+6@o=naZYB4zaAIh5W`YJ8IRa?zfl8ESE5*`N zK8EL3SIJ1%Wwo&rF&H``G;Mn}f?`f@!qO4yC=n(XnOyo_!}4dOKYqe%TI76BIZ9J_ z@U#$szzJ#&Uu&X=)Kp89UTq`|kFa83sDr|pWNI**P$Xsc)5o``KJ@zV!6;Q`OE z{n%=yS6cy`X?97d(sh5P| zh`^gwe^~>usH$4?rS;K>B?pZAt=fz-tm%328h59J*j?+T(==#(!YAO~ha7!V`yBTW zX5oB+o}_9eWcXyQSzEJZ44ag=zOaR@MK8CbW(M~y%61}y##hoV<{b0=>iKJaAGvOX z?#^r}s$F~Xw^YRDqt$^w)D=%B1r2S9=9EoJ%yz0}YX@Z={tfboUHk9>`p$Wf)3sM6UA8uPsAr{k~Ya537~rGkvqd@6_Zn zA^374+S=JsiLq&J+8WBO^`QP@4Ba$H=J^>{ux)dG7v`A4jgLb;dhpyl&bsTf8(@;i zc9)z&!+D03P-;@%LeLM|$>@Kuw*dXM@DF?TaD4sm2UTgvG zP*MzH=N^=hfIg{M3Q5&+#;~QUZ_5hYQ9!QtT}?ipo<&(^_L-+8Jh(j#U(G;Hhld7M zBcqLvn*F#ayX_C4=?h5~{EASR`XQin>eVts zbLrDI1#QRxz9bX>IA$X6N8!o0bP1*nq~iUpg35`u zT()`jY`Yoi4ydR$_NP|ycSX)uU+9>^jKGA>TOX;f&LBEfEMrQKVIbIO#@8t?a4egm zx$4J};L&9m&1O-y{)eIU(HgM-G7`lOtrN0hP}aOWWv~nF`K#q!@RR2+`3gFQTz`{f z5MJv;dO^d1ZZ(dF!_w=lQ5O;|CM%2qW4CJkp!IEdZ=_P`m>Za6yK_vQZxL9ur!A9b z-M)x%2dv*h!nr_)A+xl?D9|=83gj0}CpT2CKjhLf36N1hDB2}p55>g|OIXZgvEjLQ z&Gg}Y(co3ct{>;bI-%Y`9PYl{jCIJ^{k5-a94#ZKMmpc>|9fm+;W_ktoL8ow@L>ky`SeDeVaZaMP-Ct3RN43~m^lr_q^ zaz-XL0tvsW_Yk@HpzJ+P$}@*#?W{NNHNFI!aX{6mry)J%Nb491iO&XjwoyG<+UA{O z1Z~uXB6$EcV#;_wnhdN}L-%B=oW3PVg`1XkXWjYhrKP16;o;bDxBw(Cc<|t1H^6NS9fjqpzw)m`=?+oop)@GUd^x1MuUcZTM*;U;vjcRUOBji}~j4Yv4 zo&-`D8auQdN4MpT+#u@@gY#x>E7mQ%9&!k%{vX<4hJk*~jxCD;TzZYGJESNRpgeQ~ zP+dS~tQd4ypoF(cxkKL%Z+$(nL!P%WT5i^vpLmEJUsDY`NNdO3 zyq$l!EoXhH@QZfdiBF4(k&)qKvz$cKAJEeWfLi!dSMJmf)>$wSy2U4_c5FTfwbNAv!HZ;Y9ItEo$Itp$bhKT0?8^10q7P*tV$SD@zQBC!kA{oTJoH8^q@!1nfL240Y>z&`Ue2FW%;+|Zm z3DSjDSEvpv-)nTD8vQImZJgEzI}d%&aMLo0)^3Ao+}Fe!M{V~7!}JA~Udp!5SgICJ zA6mEZNxsO@!lozTkIr63Ax5BbFt5BJ!Gtm==eS?hUqPc}*@KxrTuHy*+IMM+Mdx>m z!f4Cb#3Fza(6^Gz(z8G&*Bo~Q^jQOwq?@+O+$K^yIYf{3aK?663my#4e1VUqHl!nT zuXIe5F4JlS09|Q2OptwR{NDf4li=f#-ETKCM3oWf;kud~%$v6Gg>5Asgv>glc z&efZJbbR5SN2q2-_+y{>}160WPFV%4wuc-hkwPmk`k(j`DbwOu|9 zVY7O%W?CRk`%kf9Sd$IW@-}H?W%yzyNg*=b!8fj{t{gR&cuw_uPtF0bqSF=PVv0IRUKv3zLn&jE)*0=heHd^;zcn6()afv7 z4nb&>k{=)Q`>o9*x$@;KI!<@CUO0hRT$$dH$8UurC0jLse{Z+Yq({7+kTMUx@n0Zsx)%~5`H@84@CJHe8XvZ`@P1exO_{!bBq>PV(%6w zPm)-Bvhda#D)iQx#WxJFevt<;J`{5l{OmO1S^bsZXQbFVMfMY^&J`R%vHo7;`wkdG zEi~Iq+`0d$cxl@N)lU4m_}aP$dX)H$djhw@#FP~2<=TRiRBnyWG$DWf^SsmUgD%4e z>8hvK3EY0q#L8LOf0waO62OmYSDYoVN#0-IYutc0_{<(wSE5D3OwrWX{`R}OmA7{f zW~k2Iiyw>l_vaXHFA>C`!NblfBJz(4M83t5Oaj1lo75i(aD+W=kw{~y?&a7@Kg?kh z=uq=FvF0!5=w6{=)O2PZ<8%sit>Rom9q~+Xaf%srqhg`YZxix*C3O7FsP)Qe&sO(a zSuR~w3syRD0_~F}Wj#AnIvGebFkhv?EH!^hlWJ8{VUSx5Ea-wWA}58Lhs0o86QIy; zPS18vFs*xRv8{d{OYQ5pb~2{P?jp55(N>2Bh+eFYg!?>W9I$He&AWdS&2NEa zBLk+prerzcTcukU+po?mXD3onRKeZAVCs>fBHGlViBr*zBu$>>FFm(mS3`9=J6tyS zKrZ^8i3^3yDfB07m_(ErDC?_KBVd2nq}p6)Xr{cWtS2mqdkz~epP{69u`NoZZe?47 zii(CK(qXF~`xGLY?JzgEs{pWYBc9;TLko=Mtis}ywUeKYJX+00T)j0F{Ci5FNXIgt z$xuLlC9iQy>2|1x$Fayj=je1-4nWxQZ4Zx^nlpOQyy&}Q50^3K^`GHUR$TM7q16?4 zH3(#R?b*?!Y5f&atEg%1h*(|0n>400oy~bWaE)VInNj@ig#LqG9nE_BuRlbdsnc6Z zKWUEvrxaA1N#DvhqTbAJ$0eu~LQ%A}o)91r%=Q1Ic61h+`A#`iq}VdsbJ@SLml8K( z;9Xmj1!QrtLQNkFU^HR*e$8LGC=JwwdUk_v}xzKy$=jd_zED@J-lp!3#_&2^Xei3Pd38| zopM6S9oahmGSB)Le|SYY={Q+wm!q;Vj^3|Vv0&l7E^0t0AGNqJ<-GC^*#N6B4-KF7 zT7@M39VmY9mZnVSw&9MJB`KDb$TVu)=AX{8HL$aUvO5$e;IJh=PIF2@GH{F@r(`_qlBroyLTbDfe~#Qo@K)7e^y zqN8vfwiPqk65^mLf8pi<*lkfKhKqaXIca0DVcT|eD*zhgU(-5Gfas>Y5XDVSvXnLk zsYxD%bLTS`iqFM2d&P6x5qNfO*(>|t<7V9QgXcbk=e&4yW|B}I>tu9jx*U@@U>9x5 zzjbFrbWEJmHU&K6aIF)t`xY#pno()wZJ9MxUq;BIM+_0-7_qT{2?n?RsBz!&&Xq2k zlLPQBK*DlkZ<|4{cT8FyIZs=}3<6uaOG9J?Aj5lh8@0Zv3Rr07k8IlLm>&Ur(~*$T zk>ixTcKI(Z0YR`C=5oK-@k^f0V#BZI!OYS)T$G_Sdt*#L%fawS_oB~2*sWPKH_bp$ zQZ7ghk)K=~37up-p~*>SYoF!x2T;9%GKI)#Cj*3Hho_v#H{m_l_E#!fJ>uqaDR(A( zUug@(B}B+@y8!C!jEl~`1_YE(eYWCS@AXRp%89GI12QSgH+{o(mfs!=P=8q0Aj^ed+N9V z9cTUGT89_VCD8cv&pUVFo7wpw8Y@C|{nQBWFUYAUcOUvb ztyAvP0-q5iK@3?JfpXUUeo?K`ssJP-_oD&-RAMS`?6S-{>6u=m3cjeAlT;d z_}oU1p<6DA1l$FXTzhIMsC%iT|-e;nfeJ5mje8oY#-k3WsqST;@@Y?K8k^G()I zM|Co^W0bXG8VRb39D#XH0kdA>lc<~3#&(d3lMwFNRT}P9aqhC`+Q4~YI;i`SXN5`V zrf2wZtVsyqg13&;-RJ~iyZIeahOO)};{M1?_yy%4c7yO+Y7zprbvxeOiq3uLv|>V@`UZArVa*cjhbK>hEt#f>}XP@MoSCtwkCwMiFf)LB4#Rx05<3Gm*9K3mS<7xe}VD@kvC#nAT;f7x} z?RqGfEJLoobLAnhb7dFk;>7r~oKT%q#X#zh$mDzo=I>AfXNnB=zp(CnNqvo}Il}J-u$OJTORgebw{jst{>(;|JmkQj$Bh z!Cf`VR?X{FntEB!S>f21X5MSiBdk}RRX+dERQyK)D^K+@Y}kW2j>W{g05Hv!=-lv` z*<^?Q_|3Dc!dR@{AA@7@Q`yY3)eNa^uLm=LxzQzp5^Tr23djS8&khM9HVPXWC@4=1y8U z)cN9cUvbY!6{Lm8%56k&CG^s3bNJggAL0X}8~hrlWHf$|1=AtAMKB){v1VTpY;q-S z0MW?lqkm9epMf$92^EAww6<)5{#hU+_C_`W!Y#li>g9BdMyCoDt39+Hw_sKKV?Z}n z%NpQ$YC^cj|5es&OIj>i>v>3xY%X~Kl+wdf{#nzTY02opiPPQd0^>S5E}my--#U3q z(NddzY}1>~wDl3>Wfo6C0SE>w{H%I{mwPMl7r;|HH+2i=b+68=WRYl@vG<5Ictiyw z4Wl5T{wj#|S4{}##rE5oc%3eSDSkMH|}^ZPnxh%bn7Yv5cxM3 z(Ht0jp-{-^wC2G>X@+Uq8~g6< zz&s|~`+uyN4~8iZdhlg7&QfTw!>GNQmUW|ldh!!*)y(bKPsi7&O`=YsFVKutSby^* z1Nvy-39bxAFpTErx>!c6la(^H<%=T6CaF79>J4|_CPs$xocfeca5UT0_aPa0d4%mF70 z#js4_*KX%8-q=vSsn0Jy%^vUZ!fwl*LJBguqY4t_Rn_7zyN6bf{%BxBS`a=EpLs!% z^}d|LijnZo-)lI?IxdGFed_EoL@ya{X&k@Z8DJBdne(Zj3*bq=>ZL`9$Y$Snkhfk4 zJDL)GJr@?uyMmZ=1(|}xQ)?hdPI!DlU~1^nV1Cw zlTA~v%py^J`(xt(sl5w80`)y^_#$HN>W6Wt@v%78=@7FpMy@(05Nz=)U26Du(CE}6 zi_{>r_!EF@AsX$c=H-CXtS>!Xh@G5<0koqVYt_k^gNihV)C@pHkx;IKc$CjQM>E$Z z0dj|8&1oy=g-k4Ux<-6(>9#a?t&B5fRka~C)RKqNTgGM=tOXtxVJJV)b;{6~ggaR0 zaK8U}KQl9}x%S&m9~!i|PEHPWxw z9h_jScazrEzE~X!;Wpu`5eqPgSp5w%61~c!tKu4ZYV9Iim~Qeo2w!b4t|w*)0WD$6 zQgef@j-E|WBQQS}Cf{m)(xbUH_EYX=dOrIPh`#kGk>7dhvq5;CK% zw|^SyLP~Nu`bMWHQqb<|`bFhv?rczD=ytJ1V1ZbA&6~DrrObTc61~G%?-&U&HbE7t ze4Omni!p?_fKvQ;7zs0T{CG;|rzy4V+bUdaK*IL9p}?3`>?t4p_Zonqdf>ZoJn=%KBBEFXWF9D3f8JQ zbz57QlTtsUs#+!rpDVBI?uw=sJBx8;9Sc1W`8sziVMFlS*12`*$Te(xObm)`JQQrt zEm`u$p+NcHq#ZV~9NRkTa^s!Xf0w0??TP(9o5a`#1@brC?;ze&<-T2xL-c&mqT*e; zum$IJ7WYZlcIkKa5ww4Oj5_IW2X7Mm6KNT@$ql-rHkMY} z4XtD2-mk2p6NL?0Fr63IGHc==$qrxIS@wjrNv*Vi=P~cvmZ@X6)M&Pj*Tu2oW@W_$ zoAzv2>W?fbj6@D(LJZW@?8B@e>n8K8cUMIzl4> z=v_R}**|XlHsddMi9Z0n;lU|H{A9jYlXx=Aha`GyQ?}FYDRpUm-or*thU1;!Vmb1 z!97i}5&)6v$FVpauq-$f!QF_~(k4GP*;CHxUM(qEuyQ(a#fV_^Q)!Ysrw`k>G@2WU zcG8IoAmj?(9WZL)N4BDEEQ9*{B|FZB#c^l@zX;HKjhdO4^jKT9@mf3Y{1_uBwF_Id zBX&u>{bHf^_lafk;%Cayu=eJHn(eC(vUPUMf@T3iej6faxWmJYIIui}|J^zNy~c5_ zx<_!1)>7F#bpL#5mz*#vuw?}qz>V+_da!h(esV_uy(h}M2~ zj&ES?6y>W~F>tpZK%z(a9<%rOvHEd_Ol=ezlhpVduo5{^e5|~e?TPE^Y2Rcr?-VhwX zB$s(OQlixda z%vqbg6F%um1s1n)!q0YlV{q@>j}~<#?v@L2`-iqAMION7vJ~*EJ>fOkRoYtn?cV?Y z`}nAo$jKemi&pX^+~5-yBV%N|SH%3tgFYSzzC>xbrHr&$qT-8WxgI zLbiPVu<4HyneZfpW~;E#AzKx1LDAp880)vRiPBZu|BYS8Fw1}K={mNQbqn7W)S=5+ zzjgTO74CE3!j|F3LdO&f%@YOji2xdyRI*E6kkHE~W=q=ySG7 zuIVK82N(wgyzKd2qa1e}H{b-wF}pPs^0uzN*Z93-zxoIK45nyjGj;dTBJSRMjhEtJ zwa2ypY)$^_qW^lca=5g^3lN__hz(U7{>F|RDdHtGXbp_NJI9Mww{3cz;Z(GHeLmwq zTDzLp-k~_& zHC=Q1eETY~x8xxkdhzdyC=5#As69`ZLY#Y%8*a9>&2jYxqmmzN9Zy)g@9jk82a1^6 z4#dQLw-pVshL{N~HF?M`9LOpuZ84z_v?v2P3)!uI?E)Zcs*&Da`T|nV@C#a0-Y>?0 zyp_Cia>n5zy*a4ic}TXtrBO5c=Yz(|7%|Bg7@&}N?mCf%ap#YMK&Q556SwRAaD+!9 zNHml4h}X6)M{wg+E{(>W##eKHmGY9N_tNum(Ou8a2ASZ}n94TkFo*#xe@hj2lT_L> zlbBD|2NK$+A{$(c_QM)F+1d5N_7oXw-T2r@V+K zap41ZY8;ambb*KX>VpS6ASdI1k_=M>Fd3xo;9TyXR8_IYcT(d$Z-ehKCu*jRT3YGi*l@ukj4|@jF8PIl`q)x*^gd=2l5? zdCK)hd4x~V+~d;i`|%|^q$KBuX-rdTBgWBcLDRbZ{;x5N@BTGvZg?sCW*}{ql%>&% zmy^b=qjAIh8QE~Tj_MXFl!Vl53 z1)!Mmx-N01&dQntYR)el?J0IT9&&EI7r_k)4KntxFup2TIY?)tGIW>#voUbu>4rH3 zf}>tl2P{d+roNL|_kduWKAs6z8E0+!h`}b^Rgz*h_QwUX;xtNBY5sCH!a*-bc6vnU z@w@MwqcqO2ve?-5(wtQrPNyE1@_&T&G?Og4Ucs@?)e-`+Cad9lWEn47yf64 z91tE?eZqFa=@JI5@o@kCfaUe1^Fzrw{{P(a9S6-pXJJ9h(wQ1wbZ_&q_Zl1DC;pkt z`Q$D6#*+y36q;Y+8``&RYN@+s8j;e$cs>!(mkrX{eF}njxipN0xZIcJY-Y395|(q zQA&Kq8SJYV*i2?H{Ay$66EA%jcD^^sNJ#sC$eRVJl>$Q!!Zm~+QjKu+Jr1-5fU*xQ z=eU{wg9i^5G<7c=xZR&`?G`qG0B~r7gTvH4>g3=C0Lw3alPcP~BN}nJO;XR)$@1;Y zwP@dECyt%WFpkk9-tl=Dvl9@GN^}V9_gfgJM02fW6V#})%rx}`l-DUBA-LHJxgv^T8lH?W-*sva1x#K)jPmizX+AFbDKHwNWSg*GvfhU{c z8bjSZi~5~X3rEfqydG*de^@_gylwclIjI$<)An$3-Z!T6^{QV~(asOQ*#9MV@&Dr{2Ry}0v<#$&4>156x)^%ISj)>qi@@@y$Dnk8 z(h6*6MhdD=N+GNw8UiqJ+-uGOy}1t9cxLH{BSZ`P<<1pko0Sv=e>5d!-%5Nb91#~* zYS$uj)Rvc*3?xh&+45<3%t*0dW%v7Zy+oUdg!H!TZv*S=d5vETG6*iflh+8uQ2Wg* z(fi==l2t)2XT9OtLSIzOVE0@TJ6CXomeYQ|Y_?>3h*sJY=wmz*?THZR>8_O1z<^EF zq)(MQ?jAe3@4{Tf)ZM*t{a+5i&cSCFS{V)S406W-)`#`M zG3B&6bv3`|*2QdRpN--~A@bE0>-mRM4QrN0yAKIh@>xlV_$d(SzRO=` z6YEMlW_6aCo(|79&a$CoGSmjd__b(ydEqm{3!Yt9~;kaxJXZe|C9b%Npm zS)1DBm=Un9ipb@yg~tPu@?VUyJEQk+A)hOL5j*W4*{G1m+fTVHVh8lyzxdjS&)VSEe?E$+xCShkPMYystVF}K`cy*-ivG+;w z8DxHc-am7E;1|bq3S5tcVe9D*k+9gbH!`O7X@B~D-^c*kkd5#E3l?+jqj6P-=gVp3 z?b2qq7|gZOQf@f>6FUIwtwW5g%P4)&V96whUd36gySi+*uSo=zeI~u;&YH*5buOlHO)`8U_geyvN*+eDRZbp z>2AkYWSk(S85qNU*W>*08F7^zXULfyB(mf@g?DT4-ZlQm$JtZLCj_^~ol2zxW!^53 z_(LKmdIb?!Xr|*C*zk;TXx*m(2{)t^)Dfi+-$^I%dXu_fRldO1G+-^vfBd`~p=WqB zj6S-PE6@gGcj+Q%{w29`EYXHF~+F^6!AN{Gm%{g1(+54@jOiOWszO4dZZriUbP zJw?tiz`ZeI9km%$eKKvW0xdQA{S4fntI0$fCRK##6+MALTy2~IZ#T(T*H@M&^M`jj z6)od*!com$*?L~>FCLU(rRWlekZ1*xl_)uo8scL|+$2*=dnVK^imns8Tix*$F7&8Z z11tjjR~r4h|0XRTRd~%l-Qh6&ROerV$8@BxH$Q%_0k5F1)T>kp7ZwA-wWZ*nZ>b0W zW^f*I?>|3B|H&_Rb3-QrnYl}b9b&y<#1I25IDEO+wn3L0rmzvndsF!VUA&0366=N! zLfEj8GdZcibhPDNSXVFEOtZWv2CW1N#fL^8@MoIcCmMyooKcvALJ9$A-v{f1?=`~u z`0_*t`H(fnF>#=^{|+E}2~+CVBXx?8`Thm z=9&nylhnwwLmcwc8OB$YR}96^qC!PmPy;#3KQwLEq7`;g8aD3@A95L5G(Wj zTD=GsW=evZRG8;aaGMs$4IzKhjrw6PK_6|#-F>$1pPVAkHL>V|J?mkm6MrOxaD5G< z3~xRif7w0I!`feTHYPSQu)qRZ9`;%1&1wYX=4uzgLS!EIhW50wD9d-OG|TsueNUYg zP}}IGSaD5BU~6)8K=q7CzAhlg1n^8s*1M^7o0A(f`kUg!ko85cxAo#ArVx{pt~j5b z5gyB1^ZLQ}C>okeWyCsfh!G&Gr*UBXwp4fEbK#ZK2+L=M{leOQ`e0D|ZMhzr9Y2?q zlG!yw3ZvD^qphb-dQ7^|Pr%(V73X(mN55HEeoWb~4v-y_JNw#MSeM;(V0sl9cWHa+ zg>0Ewktw@u$nF*DM+=W%|79-))b|=QQi=Z7{L9^!%b3JxTY+(X9S1GG?}>??9i1>7 zdI!}rm?B~IkxlD@%K3(7JthN0KjkS&NP1NOzuMa@Wv^zYBd-L#8Z{BjFyF1bfwer>y>|im?c`l*PCAPG^&cXFgbNQFj2K0q+ zFQXIK)MAd$ap=9hz<}eB)`U}5S4vg`gN|4?T-3{1@?_2kgzy2?L;=H0x88|;enKy( zK9U@eKCr1p%g8#_IriJId|~T?W`X1wkN(Ei!jG{zR2y|S3Dq)}C@4=l6Ix#;0{Y%3 zgWy+lP*&uc5HkP^bIHAhDOU(`CM2^q^oOW3VnFH)uz&&q#%|LF@J{IrmK9=LW%(u(MxI=(nvOYARMpw+PFG$A$ z<=-eKwZ%rCJMUlbhRCJvS{_O9u2kf4`SFAyvOGybJJz?CA_r)d{l%u8T)DHsh}yEN zxowfMDdY-k@iB6b7Oq54nB5RIKQ9p$wmEs``(#ATd7{xC1O6>Z&9y13(@|d*TGcNq zb|+=dlT(RsbQDFlVNhw;5lFAhGX&s5x_kf)3J@5s5`-&9NFFs`k1q%MeLbW+*3w~l zgc=)JoAZJbg+-AUY=0R!cgZqUnZISkr%QEFPu9F7q+N7t(q0+wE&S-(BLz5k_ zgoOLgl=Gk`>y<>E8V;Csscr4r*}9UQ4z#oB=Q*0qwQ(t0R2o_jo=Ymd}iJB`oj51d4c z%oCJ?KbxFi9zlL7gI}7v_aV%Gy@0+1Y;cglA-HBNXvg%;;R{4lkxalBy#R`#`jdYv z{$0d9d}7y^*e1TDbfvuk&fBj{Gt)cZQ2D3C@ZXQNPizDSuQ=WJz}KRQUWehMPDgTn z#=PTxMFlrteDaJU!vb6uwq6!gJ%Nmf77Sk$bu`83mEs3Qv?X6te`!QC_X(49eQOy=sFjRvAB`L;4>f3OkfUHNQ3F-u=~=_=NBBlg2LX; zuhv^*y-or)5~g^MUd8CM@xg6iK8R)xzzjl7B4&D^@nypYXwsvXMKBCuM$tS~*G!*V zc3}_IO66L|>cF%r{-~kWJZYoz?{&^DU+iTjP3>8W?BGk|Q=Fo@5|~qwXV&afoof%9 zhSCHmw4L?R+SXJQ>(G>}O?^Kt=q=EDe{C8o?;o+M4uf1@npB_#=FJ{+D<`Z_R)7}A zB2Wr(?dAib99F;%6yOboA3tsq9-p=>?kAjx2_Xs0YmNgtW&rO7@SGy;$>K=QzOtMK zPpj}72D~IE-c*@WuyjE9VsvD#SN&ET85xf|DhLT6Z7t@6LE8({;|gwH@CNWCs2oeT zk;1z%wWrSme67Er8@bc>Xm0;; zSP|!060@`|fMDav=m`_}RViSndiPv&a9mg}5swlc!`H|9eqApfwC}z5k5pizT>6@@ z7xB>yxG8`7P7CkoD9Y~W*Hf|v7rmg`R|{vNWzB-Xoy=WHwt3no=LdLjVAYn6gpEsN z)(*oysTtW~ipb{cmOb_-&=OPLZt^Z5Vg8e1z6l@7LfzlaUVr!l8{mUF?GemDV4yvA zx~0Q=M=@*kmyV#gVR5BzC&zywI_S-mUN6-+PPgn(!>jPy?CZsO4F!tShQCNQZK4#` zX$yhnXIIQZK;&$UX2xTDG5E^5^MkF>M!7%Pv?aDX%I*MdB+X>;L@39f?h*6mLH4oM zOCBfZOICy{XG~d4x=oVGFv>GL^j5$|l9`iZOC2D?Bcshj1q;n(3z}7Nbn54OIoW4A z?Pykn2A>fd;GS=+^NQkv5glVOX(xN*OX#5HZN>M@A!@@|M8bH{(*V)H@K9fRi_3b- z0N{Kp^$QwMPQpL#QP_3Bot@}fOXhmX8j9X+!9%_naBJx>3Af_0W_w!*)t>k?_HW zp=*>c2vz@7PuW+xGx}Zwz*N517yx5&>1|J+IhpLz9N_97#H`#~@u?iO?kw)!y!xH& zQE=JK4`|GT$H4}c61P|HHRQwupiibxxxjM5$xjU8{xpu$>LjFP;rs+JCF8wDT5C++ zN3`$$F)}qYbYAbe5(l>nadANx)z>TAy?zC+Ckt@)i@3xa7HeHw$@v@BMw2f;8ol}3 z-YFl27QTc zpMMX3v;(O0nsaP`9J)l}S_z-L+q}Emv3*hf!^E!I*XZ58RpY|lj|r+TU((*HVQ==% z`*;Bn&rG|hZXD1|S&>lPtRn>0b! z31r5`e?XMQs4(sIUC83pj(&HC)3)+_6F0zT$a{|}iYVD*gziDh0J$Z5=Mu_oPEpHU zZgd&I6y3va59euW0-JaMp*i3V&H!icf|mv2v_m!~JR}wSNRIV@f3tY#I#N!xc-M#w zl`d>CU7k@F)00G>+`lx9M%UWHof)Fu;$UFp+who94QIUB_Tc%nF0RjXFA(bgY7!*nZ zBqTxz7()`mkPtHXY}(Uv`aCaw|Nr@2PcE-yzqz^BUiWt2!&>X}Ma1lnlJNnM#zE*t z`-Y&F8v=*NulBhOePv_!mt%Jxg&2YtHOZ9jkTF1OL5A^~%~b84qm;B^`z3W*B|um= z3EdN`7LAsMM>hhkKF9%U4-eNv9*exg^p(PZsZH{9dkTFn02v!|ebm18`W|hkNQj?{ z?LS7wRFj>bWB3~V7~>zAF!nJxroF8I{|cPM_VrUQ(lNh=cdB-^*0HCL8CSO*4n-BM z&0~dqZr@ZNGy3u``~Gi!=h7b!3VKq-6>BGu&q9v($i0lK!)~4VY~$sdwYC=NRGF}6 zo$>}-W%}hfq#4+hC9*UOl(^C5_Gc@z(JS+k38j(i1ht_8WFH@sG_4jik95Azf6uG+)h0g8PpeOC4PDMp2eKJjRkt>d19=QC7w*B6Hwu+#$ZvqT zIt1m71YtLBbA6@vzVompetwfX88}koM2aEmI*7vityyD_1M$rpo2)Y1t~@^J5No?f1KY_lqEVj+xLX`^IKH?%fqKWfF%w?8AsKfgjUOav{8O@*>bM-8 z(%w`#&kA%21!Cf77|$FEje&nK?VZ8jXWd$Kj)*u<^DFWzXn;^+XG`PCQuUWasGS76 zy^Q!IXMq;&`{~fKV#1Ab(*P=v;J?jQ>5p{n;~ZaXsfNSOc{${AiL7pgf$jje4K9ME zM7nMQ#~8P^S4)3xVloRdDf_{DXCoz?-W8H|W7Hd@CN0;kjtqSor}&gqS}DLKjt(2< zKTqu`rbh)0^yOd!1NZjTPmj&`_#Rd5J06B{wUAfHpDn54coxrZ=aq)>3pihR>b3BL zO6xACM%$wNi%MY(eGTiTxp9KDxo^S!-44(6(^Jxj8C%kenrQAIgqSzbKA#pd&**U~ z=za`KSiw(w%p9{R`qTLuRw)_t3`h!=i@CYEG;L$%eQ_!XX{h1frEu`?P+AqtdyDO` zHWPs)(~we63o-#}^l6oldW>a^iESmME`&Fxn{DWptyfa)M2gpD;IyfwEQmn~ENBxh zjf1a7;-^M(;TOw|X;#|!8C-zW^HtA26nnDp<1vGzyv3B`qqoX zEoSvxMmjaxNV?TXLw_?D`?sR7?m0*7ZW%A4)MHnVbT6x3mvJ<@BNcbo%*9ai*lFpF z&{<-lep9(AS)l>jKs$;|r6Bv|9n7lF<@sxAA~6Q&Usly!nRtwuAO5C1G7MkX6djS1Q%d73Zxw z3kk^>YYHYuxK-+5QyG(!^!R4=MbFvem>0>a*v4q;?8ig&bI&e1x5~WtZ0HiC6+b3w zTTTbQz&~HuRBj%F_7swhEN@6R`U?l2kd1L+RhtgE9A5KE5PPDi7mr0$6plN!W@4<- zoeiznp927-XljK=)x@gDE)6Fj149GBWBvSL7F zWi|~b0<7zqAg?|1d_8iqXpkATA}tu*iWsX2+VZ+@haDM5Ba%zqllv}r(@YK!Z}@GG z#qMctS5i7nzANuxOt8(^m-mJFdR!1<%!pH}>Kfr{c$Bsonwvh^P-v34_ z(@bLPB`;Wk%Kr~rUF0D3dEu?SqZ4$^|T2QJl4v!6trB0v4x18~&j`hFw zG;AX3%n-lFkwQmi>7I_&du!+%ocV13K#%mFVwmki*FVhIT;u z(ibp)_98xeMq-nFpyk|LFE}fx*a~KoZn-Yo9TC&i$)>l7%=*y|(RnKZr?9=ovwB8r z2AE_?3BA+fXT!Ef+g|$LB!ZAZfSM7Fa}HRGT~{yMWM6X-Zy|tJnZ4-Uj_ezL;*S7j zG&uN&c~yV#P=D~h-W-2+2CQBzEz0nVt&7GAXfL=2wPW36HC;VI#wj(yl98d0QgNzl zs}NqKV)V4v7j+@qt;n+ZQea;UI*%?!a^?__XZZEUfKEr6_7=r}YHI>pRj71;%yPzA3wj>N|FBa7Ao~I&a86kWG5w>_l> zf8E{x=QnMHzI2s(ni+kTJk~#XbKQPeHM$g8nrL%-qZ1cm#uDbDg83+6eUPJ@W=HH* zmEni<2Iq;$4vwfPDlxt-crhxU>gofO*QEd}GfdRX#Qy!l;RUhMc zUEr!bW;Bo2a$XX3>=i@k11pT%&%M}xKO%HmQicE7>0Th8oz`mopg0Ey*Pr4?ZKZ4C zVg$l0h(~Ba(Dao|oBxc(hg`Xy-0}@1*Rz(!OTX9BvoMSArr#L*)!xOI&;%`v8^Lxi znu8L93_mGlA?{p@aBiK7M$F(!|4DT4F??JY=*O_7{s<%#&GH45SVqok2?xRSi*@1H z$^bYH*)gJZC7YDxtJMFPIbvlA8+E)CmES{Rb|>c!acg6`T5c>@_;30hL$NQe(9_b| z=g~TAI?g37B{M#8R;#+*3ezEd{<5c1D4b1F-J34UFnoj{7#`jeOJ4q3dU?7Tx56mDBOwH7`x&~6GcaOv+# zL6y)qir)Uzym;yJKP-b_Uhkg&mYQ9r;k{`4C@nNmnYE15LnIKjXZ@rL@neDF+m(Ld zH?9Te7=J&-T7o?Gk5IX$1Ny&q*SGx1ND%88VhOSst^RTCO!~>?oIbBmiM(Ytg6d?; ztWR9Ya^b=gr8$;-X)G_3Cglg&WTw1%x1;vbbGRw1YZR>9sKAy~cioqBg7u-IzIVN&+SOTj)3KMt7A%njJg>(RJwBM%2%V1~4Z=07rId&HKcjOmF9?|c zbOm*l^iP@VX_;#UfH(3yizdvs9hQU69*>E#nL-qr3Fq9C1-9f`52!03@$)8wvFj63 z**fzYut;$0hwbOu00OxNL8Cd>x(-Vq*~x+PSodV^0DRsv9o=_ZxH{7P1x&c0y4PMW z3sU3_328pf<}<9-E@fTOE0XJ&!Jao$)#(M5t||2!8NZ{>PGT>oL@O7JUg^$G*6uY^ zo5Hd4K)>Z}$KHpi{DC)STRx%OgwWvz3+ieLv#k+PpORdlP9|c?0Z3)=E_H zY}`%%A%f`{zu<(L=|JMW#M4hBJ4bQ^-uG1Sn>fn&PC>pRiMX<5%j~G>qL@Ckiq)#x zpf<0$hcN}3^??bg!wZ=cYljxwGHd2OSADYlf+NPylD1&2%z(%rjO+t#u(wocVR}&4 zF@dn&-9x+Bd)(){zwT4Uo5vCw;(?cj+gTB8LgP4ME~BIvYMNM(_EsB z%H6iW-OFugW&1_|n*U+XOLq-{U3FC$kbAhjt9G``ar$Fd{iWKy&zUfOR3+i>&9t*U zOq~ld&RAhVpfeJ4GwSB3?L@wusYu^ft9cd5A2Ujdg=*AhdbzTm)&`()K-C!D>cxGetN5aw0)^OXUtaXbXs86&E?C|M5_IUaqPlU?1C`#HLTGR9`s^(_(cay zbI)g!G?^%n5FLj(%TkT~mI!^XQ(c)tpiX5ih#V!(uFSi zQylJQ=28qRQBlYV<0i8bN5~@xGZV;Rtnl2q1R&PXq-kZj|J19c`Lpw9aou+IGi7%k zd3%jHRxgzxwMbS3Ge6w2zQdi=w%8@Q3gJkyX>PR3-F5XBUkmfb*1|4YtFDY2J=x{% z9&kinDlc79r7Wp=&6^|(&o=*v#L|o9W(BWssEy9m;eNu5QQA{FfFm6-v1x7Q4O*-0 z*9BqI4hA`;v{LK{WRxfMaYX4mrS>07gI2M_{KtVaZtBmZ@W0&u@p;tc5S=iI`HQz` z+ho+wc3Y$4t%zR~Kd8-}u0s{+1J=G)sj^2vy?(e`F)QybY;|LzzV5>whJKUl<;xu> zaE$f%EgZ`0`vui8-YGfpNc^ZQT+<09ERY0`o}f0j6SZpmB=jWPl?QkIH`dxQ8~@KJ^OxagSV{V#^OqPVy$^>VLeXDFovErb`W3k z%BE!e%{e{r$%sSLiO=jwzaW^|Ik%@wS_x! zcjh{&vAw%@F4Q=j3Wwz)YJi$PL7H5sPm36V@miA4aW3#b#Yc0OY=Z|!L@DH@P58jY zwhBy_6j#IF7V2@D^K|Pd$TR4T@N$Dv{S&g%zj_#eU`3 z7L^FpwHMek!ZZyYKaB5QqK*{O?x#`qm?6F-A6WkQb#KTjk1tpBsptim{`&Q@8Lzh4 zFoVucCccEAweIr|mu<^{&p#*zdAo?34qRh+eqOXUxmFCSnZD+lS7rh;7ml9&rF_uY zx+;30-68Omlt!;;u13R60z4?d`F}etV^KrPF|PX5iee^^U+-+nUr+zlCl%SUQc%7z zlurqrp-URXpHLanN`?1Hk#^br&$oMIK>KGa%mo7oy(By9vc|UAgTEsk&jj6rvkJ`7 zJGgScjgIK<*1LjRLZ?XeLpA0r_0RBCxYO55iN!6OQyt|(vsynosjSJ~i&%dW_8mEP zCGg}^Wgz^k;dJlZX0gu^;dk5J{O5D{P8xeDGzxXqk@}*CtSg1DiEQtj8L-MREUXiR z?5`gq@a+admeZEq3Iq@@S2_rgU*!jE9hRT%E!<7ZxHWMyGIBbcD6^f7w(wV4w|LD7 zCf|3uLPosZZERg0#`OVz5?bZ;*4_QsvoCZufqp*_Qx0(CXI4#>G+-URCspNvuEK0h zyrxWUs?T(|K?*r<=(5cuWr20o!QVO8{$uHmkKNS%fk)iPb#u8tYv#u_rYeZiGg{=l zs_U7p3uy+rAIEnDq@Nk$37xilfLb;KxGBUd!3o~G9(j!x!W5Bzr!FG8r+tj2SNhva z>wFP?hpumI8maE- zp5e?l)*o!!^PU)!qw$eUoi0bx$$Rd&vqE$oe_bZk&1@s8K zV&31!+MZ9h%1#VCk~(UA4_HihUvmnaPBrDDkFtxq3V5NEo#7AJhm(q7H+%gtaFc36 zcupu(%e2q4^OSl(7&XgbCEGoO>B7_GID@>01pSmPqlAaPQXfp1H^`tbc?==}Sw6S( z$%72Eqd(zpp-lUeMK%4FttI9T)Q!z8oOvY#V-Voq!+22^z~1uoE@Or#aID1Z~`87RkOqm6lOS|}0Pv}7Zrt^hMF{~%B<+m{Zo zY4$n42EG1kaP~C|*>B#4+?jI#y%04F%*_TrPdESsPF7LyZpItyc*>WaiML~-mqJ>d zav2tUy4_}xgp~gn%xaB}#eMk#8P=jGC7(%4(};|+4Z>I!N&xvS=J9puOlT7*z!%br zjC7?21{l_T|6;W)X@6^*?CDq*q6MFWzgeGt37OCb{(k0!jlk48= zDT_3_6mcLCqG8~MJ}x{3ixg~zCK2R+qp@7QZM77k44k+G76TQ zzGRk~p4{Z+zhzc9>Maq4#It(2I#URN&F zPdb_pWHN04*r1RJo0gH9h%nT<9gjG%r6h63kq#@wDbF^5co32z}qs(df$ z6-MkLg40{8kMmEgtB%b|Jaxy}woNM#53%*2v)e$o13W)eMhBG-1h>$kRdTZ$f*x@6 zk#<_7$cq_fgRKZmc_@0$%Bujpk}#;b(X;jIb+_;Scwlxv=LJ<_R&p(0$ZJnBLvJSI zKYb}W<6^{yj<&7Dv(Vq*U(ARSr-_D#hYpnp6OYpvrmj9McTFKKwmDp47sc>SCf731 z_IT%ryIm6l*L-8}W{tfMI2aaSYT(}OkiD7MZqdmKFd_@zrp2e<8Lxa1Ku{=nKC4EcB+vB(P<*0$UCaB@=oVlrja5(Zh`g+na z;5q7Cu7g|*$=Zr+T*pj!nl{;3`qNsNeWFt?n?kx5v4e)s?r)xX<&jV_|Ij-aC{7cJ z=Ei27`a2d0FdpqmT0VBLACTdt1o;>SJHo9%3{2_8oNaIX;{^l*890bHB~>it>ka-` z0Kv@ed=XK@nO=n0<#7P7xIwB)R|!&c3Z{TOTF8y&;7RcihSOa<(bXeE;`o zM}x2E>x>reMDFAmPaKVSUl}-#VvmJImV`(w?HT5ap;Jv>Igy8(&(Syftw`y*%XT7t zFV*J@e7pvsD+OjbVSZDve|^HhEs$g7x{=v(`jg9-um3q(J9ZAMBj4>9q!yrg*lutS z`^uDl9y*|$pl*!eP`~N~Eva*2!N0|+Hxe&ec$^Sy-_)+h2Dyc+)xe2_bPRaY_viZk z1gW4%pDcoZhyrKMc~H(%>V$ZC(7PR!V7nLJ&HT9CX#4YD>fspCBx{l_YMQcSLmxG^ z?=UOrr}eZ-N|KMc%X$rDZ|l9vxx-pKzadL3gaCr1 zI65FlY+0jGj?rf@^rg}ptaB=@nYl49QRQ2>g^OJ>Pa%M(J#NsewrD3ZJWMYt4Mg0`v!{f>uqw(YLp;52*%N~CjKtk!Ox|NPXrqqIB=Zq z9xdJ=TGm(>m)6OnW-uzoy8R#0ky&9O-HK?F0P0*zpUcEa7fKyW>ScN`2P!&TyK){7 zC_ZK#eqsGS{*N={IMFkK`Sn?+QpW6A#tdg($t83Y>b_2M3-qUt)Hi%H)V5g?9CRwy ziI8h_BzDcvRL1iP&pMqynCfg@S0(a%J+JU05@%nHENlYM=@wf12;y*da%5EDIQRrR zVA!XLP9Ha{?BqVz|F=dsZXUKsK`&eUdz?`7ddI2wz0W9H15-VzMm4tzZ zJC}w1+MY&$z7rD%tp&U)*yQzcN{dJMKT-;7Msb zv+wv85MTPpb254cko3(l4M6{l@l=;bTZ)}PuZhX6oBql0d;vbUfEn5XTjx7n9U0@c znI|D1a&m&%H?a+;0`xu`F8>hSN+7e0ny8z9A+WtX*fEZdTL{; z{r7yBm;d4o-nYYH?{+{;#=-n(P65kzM6h)8a(-#R8C7-5M95QGAP}{U+iwG?bKL4B zf79aKe8#_pZxBN>sPlh->9wjfb*b#zHs`(Rbc*n|EIhEujvemvasw`{m8-7N5BvGiD`NHvA9t} zuB6+kx|h<(6x))@nD#=?O5g8u_7op3-J6?uo&?~~t;U==9dfWU0pIt+DLh-3={=ZOx!lZ%6eCFQlLF?;}P3 zF7pSli_KFr)kn|FbGcW~a;zy(-3J>|5{85Ug8TsfV#t7-9wsqSaX*JDMeiO8ID)`d$S z(y#W7k_|3izM7NdOQZZ!uuh_T?jglO<|I$qzWd%L@MRuji&i6*RbIW-OJ<>|DH3`@ zj&-P>{5NWBVu36Rm&VNOx9t~iwyS54Ik6)#CskSnS1~Sc)!XC$!R)&o2h8V5ZG60G zr>4b;tC$a#Q5rLy?l7X$cz!O8Te{DTT=Rvg>(HT1f2ZpircbNWpLP_xj#u!9T7SWg zUz(X@CPCmyH77RMQMS5{dHaPk;SEtJM9nKwmkZG~vf;6gE4r}TH8!R!V?Qu?eNa&h zGvL8aw_&f$H&7tsZJmVAzWDP={JpBu%?9HiwPitA?T||wY zS$E)Y_V3riztzSqa03>}cQPW{qA#B!CXU`YHB7lU83wMTj$?UhDJS*W;5;K|&T^Ck zv(hsovo4290gYli8#=MX-UHszpS*I@BE+FoO9M$)`bvmdruoPmjm9+_V5_{nA?NsX2t?Q-sF{4E4 z7$Lfa(6$_0p+`HI9qmuv>uJ+_Y?z|FMKma{Y6QVk zir&Jk=%oPxW^=bjVXv!yhNVn|r9!8Z#O8AiCpWMQ>21YUV8({!| zzGN9*7yya~U9f5LH4%Z6&`Dm6jtzk$X8>7DoIOxZEwE{=iP-)?{oj@!sJ6Up$$ijZ z55Q2IAMe)#@`L|CV4@gTq(5O0Ms{)d|m}_^ng6_lHO;ctr6%LpC5tr zzTJ77J_bUXef8U;x3}9^x2+J=o{qqa@T5vZttIg-M);s{o&d9>FN^RJ&>f!qAhVa14rxDcxHu1z(O`vQd!6%*=PwIb2?`qh9Y#TlR5XDa_@>w7L>+D`R#M^E))2VNnL1S%XW1Go0GD5E6w{OUvOSJ?G2JtdIo zPr9-JEFyrG{{W6w9wsTda!s|r7`O~hYoLs~sIIO+1n;9YS;Qrpc)bv*+^zQ2k-fd; z_VwR7``hJvm%y|=MA#iV$S~gA#--b_wjyzNw5TaeT+8(RK1K!ognL*!J>85Zj-1v_ zX0FS8zz8q|ZfcNFuFeUW%AW0Isg|B}dNRY?ro*skb}R?vhnZ0^J{im4I+tvWpGoUk z4e`j(>+?bPL=2X^5^VxI-wl)*0H(iE9vJ&aF3rww5Cda9*{B~@WX6M_tIxLd%6TDI zbEF@zNMF^26xM7mdPW=UWN_)NB-5{W#f7J2MC0g7H9kf@ADEw=#ppcKUgA!YWcXjN zF6-M`loCnNgxpvDiZiy;p>3>yH0w?4hwynLg2p}HeI%_ciy%v_d?5g zd~QY1p~WCyYOf&zM;jhmSW^ z@$4C6vQdj)lgXDi2{#v9 zcuDq^@oDIaz&%cXhRt3~r9D4;ZTs0xs?BCQC2Hr-ZcM5jHzE#LogqfXmVq2_9goFo zgW4Wx@F-_By$^TU7x!kf)}VK28tTe|#ugP3ogSB|ni}-p?oPXnDounMa`dPvwfm`X zT!F+O2N~bsc!g25FjDY)f38ueqpX3jzn%(zew#Tu$CD~;daP%oPn{2&^#k-Tq^IjW zaKdq8kMLiC{BQM{1ZrmR9(sl2hBIl|4iXSE3^&(Pv^5ErlbB~l7ZIQH0te@32vEaE zu{e>ZbFa-p*ddzXv5*<(1KE*_;@?5aC9=C1bhg`}K#dA~Gpzm+whQ%=p$Cs-X? z;XdS2fd~<>wzI0Q*Z6+UCYaQBZLWQb-}?MQJyHn3t6G$Gz_hgmWr^FgguiV&37DoA z(Exa8flC9gj&Tg;5GSHEE>duw)4=RvC*~Ff8^i&~j1Gg>KpIpk>5Iae{Dkj-R{u+4 zNHNjhv_5&C`(?16qaXudU`!+ zNq9Kd@4vNl9B(jHs|vBTy?V@J`xgIS9}OL4HrfL1fF8u>z1#7+&<+my7Tus80dli8 zv0O!>6`S&s9VjOxDEgekAkt)s2cODEQb~;z42EHUTM4Oty2U6YJmz1q_ZmCU>q|H}UI zDX(rLn5pzxu*KK^zNhz|xRgNFa(IRELNQw6mhW_W%R}XpIJQNh0;LHJqf6>1tKeRO zT9H(u``)YVW=+1_-O3j-EU&T!44@Uifs7wkeG6ca{A{pYB4+JNG@tx#$9Fsx+vx7Y zuigJxxcs}f|M}uQ9Ro`s$8PtCfe03^Lev5ie?D4zUkF|bK+P}f{!V#gV4~DPC-?+u zoWJlX@V9n$I06r4k!Pk_p4zLDy=wAQsPC+SM*=DHay{&xcRSpL;Jfpx*?&?!fB58wZ`Xs6#FTyF2ZEmv{F&#kS&BD_+8^`~yvCojX7r_a z9{km{;%Om)F^+Anc-#$f7-j#MYRXKXEO@s=GXnDZj$DaQOnG#tP{>U)ibVV=9u^$$kdZ0}6|F$l>bh6l}KKR>ec9p{BqK=P%c z!tsR6z$ZkvuB->K6^Dm5I!@=9v8ws=Z*EIpR>AyIoj1ZoDo)goF8t44n+~QL6BeIA z#w1;$J?{L5BEsC*eF;|w_GAhgLdn`+8yuFjwWqtBk{mugtjZY}lGH9gOARqz@v+## zy?3Cb=<`23q9KXpk!(aE);J0(SzSmHnHx z(aLbpzK{t7akP&PAO#M|V%MY&N0eE~?RIv4ZH5lhXHrsZ!=h)~otw*eu9x$1kbfzS zCv^f!<1s*WqtNotqQ(iJE)6JR1da#lDajI!nIP+kM{_{vzbYCrQNx?o`!DU`EH~vgr5-W9 z7nk^)$gpM6OfQvfD0eYlvSM-yUgpun;c+7TEzF2lI z!(_1|Q~R@sxNC`GFWw@H`0>!8_K5}CmFpf+*ClctafPV8w_>(4ipcWzL< zigKzEmRb~T^20s_bnbi$CQS~r+Bir1=N2b0u(5gAxk7HTG52D|@c0~ye~ z{i^%h=YO%0%Tjs)3cq3+u=Tr#?$FMEZ_5>3tJkWW#BTN-htN2xy)S^!c&$jQ*XP%7 zzx!KXUoOe7p@)q#IV=XiqDdD5o0WAK`df!WIRl{#x#Yd5sV?SHe&EP%Rn$Mz-){N; z7ZT1&r2G+S9z(4ZpJ(6x zknTNyBe{iWfvl+)YM-hF&YZ;frpc^NR9#M|nZGj*fc z%WQ2%n-$!@!DXe+u@Zgf*U?#atE(?V^ZfUZlTN8%Th1eyu9WH%eWW##O?l>Y5HUsk zvoXA1B9~eDmpiUy=;4E6zzu^pANq1&=NvpWvuPu7x*>)d?xkhxRA6I-Jm=B9YqW4KnyaY(v6ee3x79HRMlcCd5gl zN`SqiP1mXpK#HjAIWYA@lBdC)MN$^p)08Mh;$+5olG{SbjgbOQD>b*RYm<5x83bpq zK^vbtluLt>6DK9zZh#Ve1HI^h>dK*%i*kKsdi=>m`{)K{Eal6NHtV{!%X^A!L1pQ&Ey>6?-fvyTt#QT|hQXfQXVas_XKDoPO>k1ugObGgSDP=@HOe6S9Ct~vAd>)Am0v#wt zYuCApWvKDg$gBnrP+6NmbDi68=4*}#udKEVAx*=VtOmc^@fFXZ>)E0x#lnQOoU-fP z4n%2UH!)U<%xUusp@L8F(DaVPg(b-Pq?Mh>Hg`_yx5+j}+<{TA;|ujqo^Ci9U!Uyr zw92RTf$ZElYa?z<`|M&HHvzf80(eaL$kv+lwMYO^)Bt&JB<<@zhu#D6yp|f_TMi1i zm7-z$l4eT{`HuE`+$fz=o)epz>q~6SGgTVbMcSvHJtjUeJx4RVj!*?eHa1t9e9IFY zD07wfY)lwbb{F%eB!|1oH%91+^fI5YvHN*C(YDxg*Qi3s_y(;$zDpprOb4apzzyngT}ENSUtZBFef_6tRyUR3J63lr6uw|Yys<`p z9KM1+5@VUFnbkA$CWu^fTCpD;3xgIe*Ww;Ufs4$??;fIgKh5 z>tDbgdKd_#+~hxf{Pmx%`d>vmG3Z?9UJcuM!Y?sR_;k0K3iPaw>Mp2!D!n^=^aWdZ z9XcEP={-*3uF?Lnc+)Q@IKmCtDm9p2gtVQtC!6&C{f(v>k1$r>upm;`Y#!PkP z#qm_woTZhG(*ZJ^ubyeGXdS6A7Hk$^h3>{4`%16<%iRAux%vYk6Tw~@EJ#=TVek2EwNMu^OqkHC`QX3!e}CM|sS9gv>X^1w(fmT{4l-B?RuKt>`B{4(6pg%C(It%{A z9W2;2AuDIbZ)vD(DJw@Pq`sGrFMO2yZ6Q9vLhu^^MNa&rpKYr65<|Kx)IHgafG_gcVx zSb>y#=d>^Dog{R{yb0_?MKW&>5-l1R9UNq<(YG6L_3w5lk2m-?(r3xd+31MUJtsKb zlg!PvdN(;`11Jj%RhI?Ap7U0dmaMg(H;<#}AD?rfvePy1w>_N4%9KUI>N>aHn_E|> z!ThA#&K|skIhzuQO}BPMw>xBZ*qtdo*aFl1Y88?>jXu?ZK@C*&B|RiJ3cETywU zJV(>KSoU$OX?Fo%_bH)Ya=J0Q?!?%7JD0)M4&oZLzOz^%p|RdBMqQ&gsQQL)PQwBtn+-7H?MMg0Ns3_BWyZ; zQ(x_JnZ6uX0}?!418EjUSmz(jLzE|=ZRe9;l68U!ZjYLpnSK-l&@#0E(!rNM~6B(#on8NX9MaOJ5Re6iF`hYNggB`h&|6oND z0b1Ap;k#|qnk=|?UaRn5r1cE0?kRp-SxnbAnweNt!cr2QHj&+2wiie*@{UeW_H~q_ zPiNPNYH%hWnp6D!_qvazH@xY4l=4(^bkBMl`ElREYLZ6m$cj51MR&>tTd;8xz1$Rj z+k>L<^}!OD{i{wVO`+eSq97$_GGQu)9XQafH3BfO&@ojD{5k6y^2wLgE`yK$@YnX& z9qbFD{IV=V0UgKAQ#IY*?Re}ZeKH3KIsu1GnTxmZ0q8m*Lmp&6xm4P@_2`d=Iz4H? zDU${Ieaav{+u(KK?v%O>=9XA&v4Jm8gWLUl!_jAxv5O;k@d2dCuf<>eyR$zob^=?b zBJpDSVxf#iP(Pa>A5hl4+d&uuLb=x7vZSvumw}Y5vf7nr>fa5|e|~7Y^`3q;aJIK9 z;{i>2D6FiZ?^MUZ6S*_$TVu-5&+t64`u+m??u2a5^}4G^zy4cy2jY~C0RN#+EP|B) zsxwmH*k;R`@{=A-DIlsXzI?X!5wMAoaGoO+tzJG?FxySdVm5!e9y&H?aH37K>7$7G z^$e{Z>LAW^!z^@ZZ5SypI0}JyMVtpjHvnHnOdcd`xt?0UUr%IzSiEV&TMG!AwhHd* zhpwC#?%x>tMoFdY|EEsVo58!TQ|y8k$=|P5FYcN@ zv!4RJG_uLq1+d*JQ%9Zb#SO`bU4ztCx1?3G>8Ejw^6SwJ+bTl)RB)(2rOBp~zd>^bYh zH8;0iWN+N5{7FKk?hQ|z-Czg%{+`@SE$}p}K#l+`cfILl;Xt9Oa6Hm#4Huo%p}A!x z4d|s(5>bQ5Fo-L`e0*I@aafCyMGw!(&Ap6)nvOP(dBEjV^xeUL5lgBcozWU*_W2AZDc9kS`lUK->cW`*;``FA@$ z32Q-z`Dv}5U5!}d62TANriKO5)u&?0iihlqk9fNw8XZ1kA0LC8hc3nGBQfRF2G6GI zNZrjNXnz!^vo)K4!ue9M>fRRENlS<^TIyUyC^8<^r~fkYB#{}}lI(FCUef`*z&u6+ zM$5xILK6&Z1=yrjP*hs@?WnK3GM-R0jVk!v94DAuw_8jv`Ko}E!JQRUATpAJqAmOd7PW`Sdv>1vztX>-uSw+VwRzny?TZSW6 zRq6YyTsnRqE@u4op9g?0WDQhIuZl^V#$U(R{6G>VjoCEzQuDXiglg_gDX5T`MtmA| za%!-kE=z)ohvt`}3PeF$x1|BZM1b^Caqz7+S~^yv_fwZRL^!sR3@-qm=2tDyQuvsC zI>S_~O<*LT7H#NHr{j*%bGhHYmg{qQw^x+Gj*2e&I5^NxS;tQ02T%jONLA;C`#gW@ z{xCU^4thVC*^`D8m|hyrt(Z(o6Bn`j@0(9FIq1T&GWJ~>No?afG!udp?TO3Rxvo4; z6Fn>}bbNsykyltQ_D}Z{83Z`Uk~n(LLH;zb2I2?f0C-|cY1)wk6(#b}2^jOjW-%hq zxWH)Z+?vpQYAO-)QP%ybja5~B;l#zkCqNJmi@)%2fWQNA`=ce~hm+Y)|8UrQFGH@# za_a^#y2vpK(E#BXUG-KJJ|by4MP4VgviR}EDMymds|>{H!D@ig)QD3z)&BkhgA4Nt zn6O^w5@etslVlpxfsoTT7UV~CQ;H-tj>V>jV-uud)TKf2d1%$9eeP>s%u1S(xQJF; z2q%)~jhVOTX|3wFGrlX$=NBAQW2PMO8JA;IH{kN!U$CNxirI#s%9FT?rI=D=7-8zh zkmcR*$EBrt4tW4`&gU}z&H7~1G(35>K;I$Eb$&`H@z%HZ?Ri=q+ESl>u>PFOtB(aE zrRH2~*JYu>0c(vlz`Q?$&R~V(fS0Z&_}0Yw<<1$@ zg93t{%Vd{rl|2!kio{zdwsaZPwIw@caC=Oz{NO4`LM~Y}+0$*SiDHpDP`ZAlma{khb`Y07s!@~ql&Aa-kkkq}Hgb~Wg9#S9@xM7n2jRr1~+bySle` zXc_OD+((!Kzz}8BT=WQ4L%DYK`wc<;_uF^gwaTqGK19d^u=zos z+w&o9w(+cYT~WC>pZY=fcyI? z_aV5F5F~m!8HfI`?`|Yh*3sOl^h)}U&2^lTMMAw~2HcC-3d~20G2>k^bECw!b^ujE zYDq_rBu*qrUA0NEV>^1Kc24UziveaTEk2OA&NvLOrp+rUubPV6yjTA8^xx-1#$2Ez zMZ#2GOa)f*AGLOeN!1kzqDU#D2Vi?+Hk5BF@2;_jOYqai{e12Ze!pz@q@!Q#Z6mxX z|1zl-An+1_> zvXb(Wq?)$H1V%N{8J*aFuomCeGE2ZbIslsVKE@%zvc`KttwbIHXv7L{0o#oTS`rcd zUg6oVJ8UB9TN{)Qipg7*1Leg?J!LJx`Uc~W(`Vd6fCb`(B+v{HE+a|9dv3hD0r=}t zyLwA`HHEtNdJ-grdHn;={apF$Nnkrr3p$;EpuK}^07$3j(MqBEw`c#MXSF@V|7lyp zmPrk#9={NtY8g3Q->sXSSUm^6-?JPDwb(F0NHYp}e8&;(R%W^HIQ2+g=5GV$tuR() zC~s*3oEd3ANhVL0-o2==vv-bj%B68W)1c!;D4kZHO{?SDBsM7d-L_d{b1UwBa@1_6 z=GIJmVoGZ2A>)pdhV9!9YD!}@%+s!JKpjO!?M1=e*f{7b&E@o9SL8qdJpU_~_hv>BBaq#7N&23aVzv|TTH~-Dt{HLgV70ugnt3=G58R`vf(2Hn- zTb$x2>PDi1%CxKrb8CEN&A9sGTRs;znZf}{Blpa{p}z|{|6av`S1SfkShdzkY{_q% zwkA)?o1^6;b~z~ZpyGI2D~4Ae8xFrjg^n){uO_JBVIy3dP&IWb&1DLYh)2;v!+H~n zYE~Ol7WF*RM^VL>1~D>hSMKHbkYD~9+Xq-!KJ0_L4*OP!O(!_Ah_ED$nCnLQmqTIP znU`^;AO6fW70^2yswW2@gddHhjS{4^&CQMK%Ezcoov_e;aY)28>(twEL8y-K4szr~ z4hYsqQI~m0<1*PNnO0?F4e~vn_X5RCc8wCItj=fG9``oNJh*K?#f(#uO-YyENOU5T zPzT3b!0xdh#UIi*;nSa4<`x+mmAa>6z@u?@LA{ISP%Q!T>{QuNuE!{0zgAN6J@3dTO$Z7x1hH z1l6ez2XuY2?SRUc@$x6BS=g_IAW*6oZMn0mw$93e+H9S#V^`ySYF?3vwbwt~JwE!Z zuIA~hRX?K`y2=iEf=`uIy+S;~1d%i(w zrb4Rk>g?E9{9tOSrf#_$eam=(-!etXw}`bS`v=T~c#JONe5&6N%DNNpg7FuU!Gz36 zUbRhf%F;Sz-F*q898-kNC*kU$>CeOQV(t7K97@<+;uh^1Sit}%4Y$;7Nuqn8Hxs2H zO!Pa-HAHg~e4ZVQ$t;-wr)I0qW2Pv*BM&p}l|pEou9uVQ!^4uO)ZzGr zZ44?c8`tLGMLE>oS(XYOGqmZ;p+P?_EDPjm8%DYiMNM&aZwsZ$^}7(lso}HeV>a8S z_qZvI)=xDmtx~ngMW`eV|5>r~OxOIt=(8+i(yb-)Eeiv#BWl6ofDtL`y^7G^p@dpk z=dmJPpZ>B}&5IfpiKDR*z5GzQMNyG7=ttjMaM-iNnkF9PdN=5%ff}er?eR3?)ZmS6 zTYt4fk#Y*!On92y>5JWTDSKBv4^xz zIgJAH34jQ3zjDs+@-D7{vNWEt?uO=kkq4CTY*^~b^{_48d%S9Xr#$`B(qqf{q^I4a zIedoZZ+_yCnb=6Z@v9t#ZIjd^1&l2Ei_!UqzcV^}`slwKf05tu(^Lw;=nO0D^(YI( z)il>lo?hi@;5{<|I40NTtVy90J!u325ISd8R-tP8O+k}+uKA9bZHJBSYyscFA^m*F z6yr1C8zQK6-vH`@zqN%+7*TS+eHuW_Uo?llNfX2Sj_)}B{7--X>v^XO>?@xv9}*R2 zZmk`XZR-s)wEKyC$C(t5xH?Jyj*xUE)6HC#xYinM8D-v#^TIb@Qur&IYGq>^_;;u- zE9yC0C;!TTaam+$2EdDNDJwtJ-b3}(TQ{500js^myR1>g<$`&$&C#j5oQtWkTf|Yw zN1>`9iyTeBksZ<4p6+`;(%B_bQsJ|O5%qVduWl){4Pw-H&fB`j5tPLQT+p7oXX{LW z3d`(zOB`~#Ni+uKn#`NQCl4FNWwujv%UQ`@uf=w;T-m|WGIYrDXj~4&L@O+GK&Zd^ zUS*gII)jR*Ny#Cb8b_}LINkX0!I}WxoeaF0^ZF%=MjSi+%pavl;aBrazPr}z8EIck zH48zs_V75Ud`dj7R1Bc9ap+geRwMd(3*tOLalDz6@DjEf{^!Omd+l)=G#s|&NuZnX8%EJuDUgMBkq_L~$vya>fV z_FYrf$Lp)(LXxyfUrTJ0M=qjjH+q%)PZctWU!+O5neVLl{^W2txuQz-q%g}phgiq; z8qMcrGg3cDP8=`Qb)*xj^*S^ME>~AqAcKrZh8yAG+|$%0{phRx+IHhs%cfBLMfxHY z|6b*5JcChHKd%r*jx>p4jcnZ7UHT9B4;c8wUcN3Gwr9tLLUacBg`L2 zLH4nK4qqpF^Q$NctrK%llb~ki;??Eml+SAko2@xz*rIrmo8MVrXBKChlUu7Su83d& z;-!3U(?lR8Ei|jsYc%h^YlDs&pb&mUm!I%QEo0b}=g*BaY0Gw}9lvUDK9&XsfNiG=h-1>U4@z8K%|8Ou zeaX6aL-1R*%R&!q$QCb@&yCd9y#QHTZCOXO`qpi^$x`Ft}#t-ueaV`Dr zQ**3&+U$HkoC)27Fbk!PsONurrn2KemoTsy|Qc6ZqjN$tM z<9KF!I3SPb>_Yv#hw|E#zMsi5C)i>iRz7wD61-9BI@|}uq@SL$J^?KLKZ92u_AXs5 zMZvq0()J99ka(lnsap@`6c=E3$V7Hn^rm}f#cDNN?+a;C+$OH|Tx_+JP>DRgoQV2X z4mBanNAXrIn5CY97tI;kyhNIBV4r=1sqh^~uyT$EFF-{0_GgV@1&;zJ7?Tu?rK+KC zQ2~CW(Jp-IaLHCuqS(OEhceJhvORPl0!U;S*t+|BFn~=0jBDXD4)uQP%8>Vd-hn#G zZ9u69a(91Y*kpdO-{~fo9FQ}_?4A4uFfVT$X3`pQB5E&ySIe^G00bCMPQ+!ML9XWw zrIgusu;P4cHGYksXEs+lyMV&JuSDv4U%LIPqj&DH_11M?CY_HjL@kY9jIEDwOo$Q` zvI8mYMVUvApy)~I(tvH8+?qI*KZLIWVOx z`1sXw2=}-fgd`np+6*Im%Ycas@yiygCIQ<)stM0HW-hxELt6(yyWn)2&8e%s+BTc- z`T?3CB}`D3rhaWbV7Rrq9P*Y2PQ`B7Ow@fh?Ao72L(i8tI49}W^@N%+e2>Ip@!qh^iNZ!ombSi@-yk)lP z`%L==;D->qU&LqvLlbA(`-owpb0wY0w-5ot?^V#RNh!Vnf%2&_65Hi_>}cuo=tB`b zZv)TuI!XOO^k!O`L=9ha?z3w*7Uh_7)DX>HLw%jo9opm_53jE?PY`(KtQqW^eoy$5 z^?eP$e;!nJx}lFF7gJu6e-;|qLWE$MJDMm_-OV}lMe89BYeoU{wh!%DGYyR(5O!Nn z?$yWScsVFSr*mt)w&}lhQd{DYw(75@C-_4BoT$ToCnS$u8n6lVywffn!SEdOTE>J2 zunql{iv2h9)x|=v1G>ItF(%9n?VquJxlqbGkL|+H`Toy=VSgPn>)rb@DFrC##%Mbc z7!E;Q*CnyGpl8eMhDhBH7FBrOuJbaS-zaOqG4I8ElI9fhD4bugQDTJ}NTwHq76RXR zZjfJt6Iz=mz#tPOo)w>?ZhCD>Q2jQrLl0v3xgQf#`=!E}N$dQ;HmBXfC*D!g+>kRH zB%*ZpiumdqegOuXlbS_#i~6qscm8OfWJy@`S$?N}D!F&Cf)@rn?!LJO5plOWc^6r% zkd_y?AV+t)+VBga=Otwuxaqx$qf2T@#3@8)5DBJdWJklsL0{xxSfgwK!X~A<);F1u z)oO1yR~xCSXCODH-hZtq3_i1=`N#I}*oU*Q&`jbMZ6(KKUqNUX6=RRCEi*ayGj{6T zX4L2cJ@-Mi<79ns=e$1*=K0+y01<;ZCcTstGUq0xz*lknS?EG}Obn`gxnL_#4)#7r zcBzqZryb{(BaV-)WaeAR&A0Uzg7+PMT{raS5dS#u0Jc1b64t@%PO^C*8F!gGeIIpY zo7xQ+QgMxp^ov6cnl^H2>oUOgsrKmS&$lxlA2WgKb|hht_1(g``_2AZUV@iPnN236 zuF?YIh!%=*Hz^nhW!-*m(EjMCCT$ZJ;m#2K@B_92Ku#w(gbCxY78mcX6{j3Ao$tPe z?OxS~J=s?>zxXv(tA7?C(woH!YZKqA5WJC|24DW&*+;wP0l>SJ(iWYc0-7!OvEF?n z0(~V_D18Cx5xp!}|5$N!0=)x(K)kHI*{k{awxdtU>hDz~x7R4L;u)B*V=PSh5c|#n zfwmDqGP=H%1Go#C9cIM~#ZT=0=BUTtjs5NVWlXw{+t<1z&v0T?)&*M9n*433X zlCyKN(Pfbl(`3}X`O z8`^4f#^Gj*WYBAhzD6%9ap9d9qCTMZG*%vp*{+b=ag|J|oug1xKNgpN8^_3LFR;B4 zr8A57i`=S)G*YZh^l3{;s-LGBF~T=W(>}ZpCo4V zq)t+33Cq=+)`S*5q6^*4Q*zr&`uX&G1iMx9CRd|t>t`QdxX;yu7AN=Rdo7}WPV2S& zw6DVs@|CDv<{!ow%fjA_Gl4H&od(2cyAH&sOoJ$i!x2;ljd@~=ZeA@+K|?)0>`whT z_2ru6NNeJWloRSUNn$NlSpc{#kXm4jbq*{;78A3CbO^MWP*g9PzzZm8C#Xx4d3aex zkQg%`_r~kD`5(79v(z8gz1JS#0;6^2=8N}|Ui5WnqJ?cyS=xL1hNc1>qc$>I!wJNo zPhDN>jyf{W8&efp_SDwm1>p#1oNvIfYPO1wz?n+f8CU_KvSa-qP#Uf4A#>KqlicDo z@Lbd}!}-Yk;bhJ|X=>9}j_F2HEyVri!Ibs2^<6V4NYwJnfuT)%gRZA5 z7N`0uYQ3)%m@Zn|Ef&s%q=aftb<~eMub6j~`G4X*o~I~SGhBPGGI`0FsmKzRu-)-k zbUD4glRbpsMBW?|yBpgTJ3JWb!vzjXU$gD1nqHI~pnfhp>3Sqq(Rrai5nI)aA5(k1 zx2$NYtd(WkzZUV9zqgAIJ{z47;>!+*G5&5yl$|F@sUMw={Azspl+m|wm5}&-6`_u< zxz&}&a#`fbj)ut;D~`-NYkZY-@Sc2{vGc^2+!s~Aip!$pT^#I5s!|uZaQH13A zI)15vL;IG! zAIIP})I0W_2ojKD^pCqgtACBD2X~4o^@?tJ?bL+su01#DvMS2{>bHoR$Vgv(WB3_| z9%^ESU$R@vYHt)hBt9~5g_ZYIhS!FH1*;$p^G9|PJxI)XY;Q#@z{5Q`dG+Yyi9f!# zDk{ewOWflXNuby0e%mB^X65J$d*?)#e?#7P~^^daf`=4C{eH?@kRUT@xQQy@e6 zm38-0;5^UK0xhW}2`11fU1zZ#wgt^4g&}xgX5hq+8D}WVfPj$^Qc90ox$PqPX|59j zc{W>X!-yo5=4^PIZxl-9Ewzx#l+hE;6M1a;nnr!Ot-4}qq5gZ(0Rt@W2*je0UG)ns(pQ`+)%PL zoX-(xq1x!C;hWu|6-uIB;&Ab4E=0ov3UaoB>||AaF2ajs*mh!(JE`abQ(V#jfbE zypN(n-fp|H-S$2cbT|_g=FH?{u&mQ!-ymw7X@9O^yw%!7lyCjv$bqq=pG{>xnHIa9 z&a+3_+h2QW>lCA9yu`80(Ye(seUe&|Vx zWa+5)^KCQaNI*1}g@V`uqOso+{TCqHqOqm$=mUZc?6zp^^-b|PjibS_fM~2EegB3c z&y@^_#y+M`AD@Kgr?>;6vF4co0el^5Pav$`o{N0<5k;!qiiun+hfZ7YPS#4=Z)}^{ zZvUYOk+=S;{oLt3A>L>UPp4!i_?~^9w8wP*ko$4I71eAU4o@DcF$pqVcF!NR4E1s& zpYm zl1?u(Z(uXmh&G|g7==_GTVqaXeKOn17i)Oj%WPh^Fgg+(0M>E3biRmZN~@rHit0-I zu|nI!wnaQ^Q`c!>Umf%|G9qH>@GpP)dvy~Ck-O}G5ZT%f2$3_}XSkPMVVAyZM31@X zZ{212b?_9Y=Ej(Dejnt9c2rj5=$pek!3l!b^3ZOcxQX_5gl3qk!~X z#m1G!Xo4c}lS!82A`SwJHGocd{Q~74m}2hwHRbRRYNjQv%}v-xx}oI$I@IAP&}Cx6 zFNw1XZZQp#PPvMe8v{9L=ILFo0seov`9HWi<+c8}e*R5oXi^~kO`pu!@6a49I@H{) zW_SE!qrH|8wmatPBq1#3POWvDHD~&^J<@-7F7QA}^5O*pj4Y5nD!C zPE=3%9KS*CQG`8I4>b=)Hfq+ANfxkv*6Wwcv7;@-)>;tZLH-(brvBkt<5klYz(I{S zat~Q~X?M5Z0@r51t8fiUF=nMm@yDEF2gSySQ+i)1SYT~NxS=+CJ`p4VBTEuD8wx%`L5EgpEK&U97k{@-g|~?we2Ma=XtdHj?;8=Or19x!(EUDm^WK@ zC&~}4_9!cZgNpAfzuyM!?ut$Xf~k{b>Wa0^G@0;mK$%#vYXMzMME9=Qf;E&ylju)- zWo?9|q@O(gkZ0kV93{DGAL3M}iq8lp34T@F&cWpr*AA_OuTGR}bSK!Q?He}ot@t%N zLDvwr`#OPpoK;%$nJWQj;6r+98=cbK+2TjLSERf&A7A20^Wb8^c5Y>XT}O<}%$DyJ z(ND+uEPizux|=kwU(FItM58oa=@>vIM)d4RN3!T5ul0~d#N5@ZzOBg$8ACsQlg9}0 zE{I@}qMwxw67`SMw-?m(PC_yuMwOL~Iea$MA*rwk^TwO6iHhx~M$st$Uvp}Kp*ZAyXN=${pVf&$LX!=0{>KADa3Om@x7 z4YZNca|YJ=6i)VU=asYV(2a<<~JL;6$dHuEJob9cz z#)8vlQ-~WEQ&mZj+Tau*=nGQv#k|0ZEwy&WBWSN}^yM6_d z5c|odi8N^d{TY`3?P=E?x3)X9QD%WKEJkQSW*TQHE0)KcP!EB!*~ljJ%{5RbVqeBQ zM;S7J&U+y%txY+u(7o}8mEVtFej~3#FM=qMYt%rw>+=2qWhQkAUj|^|^|#6=_9=fP zSCCBJLe^UMU-x+Y&+VSg8nbBNUM}|lO!^oq#BDg(=q1G`eEgy>Wi|0Utbi;H$yV-$ zvhW9g`{;Lg(cjGdd3oqF$)W8Bg0EN;p=HAMWYjYB9b}JutVwB->Epvtn30s~1>*Qv zrs3}#4S)9g*BML(ahOU^(pXfv(c`ITzY85BlrgRqPl*s@>or5Q# z>WFUN@A^Qe7(1u)J5z?v!nb!pzyQhl(2(rN0pb=@9%fCo@A#EZ{#lE;w{>QpRLdv< zb2uSnw|!>+vZKYbsL=7$5>M1-`Hq_Y+cjE~%LY@^1N+KI1#t;8QaS^VhO|R=JF^Ah{Y-ZE)y8oe+{~ttWBD}XsxqS^n$RO*%7jy zFQW7`p>L&&=xuW%XMovaSFN-b*9N`0s(iKtR_#mX-H7rz_Tx1x?7la%p-q#rFBQCk zuQJjjSSAbeEL_Cg9II~a6n?}#(j`IP!0k8Nj86nB9BtRKDd&;dwn<~!2K{xFiIcE0TsE1ZrMa-W+ z*3PR~TkQg4ssy@L3#LOEjHtc;>`A71h85u0#5+2jJf%^e*n_?p=K26BuT}L=zK%oE z1-`jKj*fYi_B&Hck3G0RZw1`h@s9n4%{k#ize`)}E?5|%i8YOVU1Th4xplPPs4;yLnFdKgnF~$5JosLgw~xYKx^hyMtb!P^K|xy>{AS)dFSY7r*^--1D85BS8`>? ztAwK$FkdHQpDz?*??mi6kG(pakdofyKvN4FQbY-zAO%dflo;ei)SJjS+Pl)rw9Q!T z8!vw!{$HaU8C>(}9_W==PqSY(*t#{_>%J)}wA6RpmVHcbV~i-sjtn3j_`Cx^MZ`ta zzLKa6V)O^6HC+P`n1X3K`wTB@5-PT+Q(XGQpO=a7leG`UHZwk_&@{auM0H^Gs5c1R z9$q767Dwm^8JS*t-N$;uSx&t$@9j;53#G3*sszS)Jq;1;iW=3*cr9ig7H0?uO^!Ax zkZ|?}hF=qTZVNd#B;ihthpH`)%Cr?#AzVzFCNg@OZ6eXLB_?F7At-+6j^UdSekF3u zQjWimg>GLOcD&YZpIvyd)HWHq3eCwNOnyQtVQ|IkDE*d zKI00!S!Y8ypb4w+5#Z7$w%z=reO}&q4pQh)MqV@omP@O|Qv2h{>WcI6klcp!bs()TUr_jHl#2w$Aq-Ze#&$Vu#(OjArFm{ltP_Kr|!jdN7nN z;%(JjF}d+CTmPfep$6r*9aq55B+f#kJ9UWxOC;Do>B!n@LPYP0zVDgWkq!shb8ZbA znUaMV7u}`~gV9`EP;?W%uEx_*j&dM9v`w3*roDh)B-%wFr$^T7$>}Yf;ZYk&*Mqw~ zFtM?tdPlxett&-O*~$_UGff2s8Aj+mN1CzS$zIn%h(r} z8$0i&_3wQ$XP2P9F9DU8F|}ULHGa;qc842g6Ram@rGoD=!^|r~3|q4+;EwdB+%FXW zP-H8!la12ao?aU6c$;*>6M--75 zz6WN@8C?os=28wF0hgTCa6KOI-_6~B2-(}+tkm~KhwS=I-o?M*O*oPD!?+@dA;PKLLt z3yCT?aIA<+4-_<3d(u3)zNj~qlH`eAD}mF`^17l`V11K;!vP3UPz_(?eaG|D1Ao5( zGBJe__AVD!GZw0vo1=s@tqO5_+C+}QxhZ&0V_f@_g2x_xSL)B%$C6#gAVCp*r*WZB zFXH0Bv@~&>duNmi>_$azt;Vj(HxX&jCn&vqsh$U<#tH=qh|UEluW&eqNL>TSfO9vV zjVBv@a~&|Q-Os_wyJDf@K{`m-nR!2fy#XpxhOvOEnwQI4Flk7rTwg=5#Hd;LIPjNk zXFmXSCdu~s#;jhM*K+~8Gs9a^jX%XrH=izRaXM~CYqUZ=PRY=mxvT>$w%B>qHb$Gbz~o0qB)F||S1gKzS;?VF8&d~h!bDpx#Lbu#u?bWx zs^g~SQL?~Tu-DngGOC%G{x7i_+Yao^C#$f`bV|2K?>?Oi7}SVkY9wJ|B0~*NH~OT< zxHy#sJ_te$xGYCr-(ykURN8@jQ_Yoij_pWY68nhVhoF{U5=%n+`u1dj+qf&HZZQt8 zs}}W8*;~m=rKq2yu>ECs8jCVd3G5`e4GACYx&4c&pSq7I-nRXF`>>m>S6nFQw2?1% z=Sc!bsp%bx=E|2{1%?LKY$y+f{(($jE5+K3KFSK>$(pdF(a!QbU}wXGR5Y2)o0tf& zn-p7WU9^LAMXUxgR+0$P*w^>PbV$ZTDj|;MVjBgSo+XeQs~%L8Kg|21{LvP?2a;!! zhFy+4{a)n&X~$yY;%-F+sd~#zb=Lc;V53^v$wBwS#LMXc=kotmF7l_lDnaCRZ8^1l zR4Iu^kJ%(V!U9xINqoq{6O`bc+7fJIJy*FOGI{%2#fLkAK0D$tJrw2QGy>i1tF5>& z@g!ey6rdVmUjqnF=OC$*M>Pg1a%wK1$aPC>?IyY+b@;a-;NsX%5@10+0s%xq%PCux zG`!nZLi*lv9wLQmJP zQSD~ikV6{)q?)Kp8P%E>=t}~#o3SB+@%JhT5Yf@ZJO4cT{og(P+x1KGfWkq99)KM^ z4T%z?U)ai8HbDs+ooN8lde5?pLz)=$-O{ua;{8t6Q7(}ok@jN>yi0kppi{=B?&w&P zpe#Ra=q%<{W8wEF;T7UWxkVK|<>7OZip|B`hp%gY@vN=>o8dn$K};xQ-)j&se6xDFEKl6-ce)3y9#3Z9oEoV_5e5kDgxBdI~vIAKP{#d z#X@$XjO36l8937XK@-0YgOpBOFDlKN2 zKlt3Re0+dUB8GZDtN}tfR6xfMn~ezoEwwai#T0E}beHYpZI|pk930g%w9xMx;@ry5 z`5JEO?M4p%S{N3uiV&fWDV@3xurJ0orxtq~0mFb_H1Jd)ju5cn&YR~;&NhDY@1Nt) z*GZ3u^~aU|N$%9e_=K8ayDY&pG&m;Dkl)7=L643Hc;3;?DT+7{wHn#QYWbG`@>{iv zz~gAw5)1M^_PU1Rd45Zsv%)==nNO4dYGM* zByDGgxv`1&%5zRmq3*6WbP5w$ljij_M2`OJh33M?E8K@P(4{j+|Aln?V=_z!=h_P2 zSs!?>a(4G-|J{N(!W)X78#l~TjS&Lt&XjmnR6E6yoWM&I77Pl5dUZg5vz>}E66qZ^qOv?4`Nr>rDL`3!Ze<{#XTul;aMBn2m`77c1aVAt7g?*V zB4x8GJieKHuE?-@Mtsy^knTv;=ns(pHWh$f6i9wtV`FeY9e{Ub!ll#{t*?@e& zpo2Sr`$XoGfQr3E#kax0t!0UB=YPRrV7MKS8?%b(D{+@A{2m`rWx6B!1#*@<1zF~Gl8V94}N_6)YWlL_p|zf~3x6{=PsQ(0tNvY55i zUVbshJ+z@4G(Boq{gwuO>_FDMQuxQj zZXZ9?r)K{)0QpKMe)NYrIP8>NS)Rdhw>&_oLDDp1;`mE9a0B$&0a$V zoQeKojyd>Bm7BjPY19|W;^dc~6g=N{;eXmbyNvR%x3qIX&O(1qEJVAq|1l$9uafNS zI#2^8V83Af7!v_X0|Zd;bxNfjC&~7tB3lN}d#_UO4Sh!!{5IGnw!9kDng61VDQsZC zK$t9Ms_32q!xF8AiX^pyv%*5ZQ}A|9Yca)%(|?yHSo1PDF9)XtS1Fz?f5zSf1+MOW&fp*~co7@$2dDk{r(z|pnW0{91y5H60aqPFF1_47 za%EU=$|AwSesiL|I(W_3!l%3&JV~o+gk@yC$)GGZOa^RXgKGCJZ$&^YcMl?35#%jI zbMn=DVhKXs6ms^_Y3!pQ+YP$`Iv>~fJ+X=7gNLSd zz79up!8#5GFRk-^D-ETi50i2hTC%<)N;26W4hEL`R8e9IB8zi4&MlGQNrn(YM=<1r z-uRR{Eq;}eh(3*I4v*{5XM59)JmsdQ#vZ)GH~yO+=RZuu!-%V1#0V^?Y%n>0#;I`~ zg!L|){PuLKZG0P)GT}wEmZnthj_$DO%fnY1+DK88HL#7WNg6M5zi-H(c5QQ+eenn& z)l5h4&Bl-E%yn*jpsUzBMI}n0`a-R^!YlIWZ~o@2>cP~ku&+9{B}Hf%$>D84GXM_59(gzDvwKj4`3UGn^xPzv- z$Uv@PLjewHZ)lHp2+`1D`y`NLl<^6XU}4oJ(N9)Zehj0NbGp-|Bl!%4==N|^7_*XM zw)d0r)`YFuz<2d_&DiB$^c8Fy|BIFFiPPNJTBJ8iLntHnmv*~>C)sYXLeI@N;mGsT zIQ+8v?$On@E_crED6)-{H!N1s1Oy&_*xDgg-1qeydJTO(GnWl!b@%wKL+K5>LxQoy{DrH!zQ#0E-o#`9=(&Az?x zz?_SzZ4yfl+!iZ#Wpy-mHCEA`COo`T_7TdDYMyGoO1ZWo+S)p0oE+-Gbc;F3+tN1o{Usi6Euu%Otsk-iuKkV#m#|-txW}okUWFU0~OfFQk=Z z!}kr_2SsmiwA_Am9Eg4p>P34b&{^b*b%AVEF76Pcu-4nJavboC&WZ(8KMsoxea$S7e3+rz%q`MDQkxh>^4f~=gE%J?OrCghO ziNilm(Dx*bbT{B;*imqCgx9N?r8=K#=}I23pk`b`Sy4MZb;2~di8i5@HHCbHR_(#Q zxm_wr=r_wK=&g2S0y?zk5`heu&jq*iZBu_VYUEm~@@g_vb;~kb*L4E_IUUw2C=1k-*>`p>_2u1yRp{>Y>AbDQ2n+t~k7(7*Ev_Bd<^Gord~ z%7h7ndw~#ao+7=&SKUMh$%cWV;H6y~>^!A*i8y*iH}2&2GU9-*+zrTSIz*v!{DA#z zt<cLz0f!oiw{d|?O`eaSSNZz_w!Vw>tbkKjAdU?*AV5!URio;!jN1U+QSKlDF$e|G3p?3JNAo zsrjT7Z>+43USFh#c*Bf+bsei9zbg6<6X8(R;z^;EOcKbpio`Wg-(u~-_vxLR55BnA z_RY3Oe{UasC(o1*C*oE)8AUxrYnlM`kHtHOGeaBzvYx{hnos6D@18pl5*(d8IS!Zd zJadBiq<%`rIF$$f)2hku27!5o#1oU#@K9I=~_7@5&SnENNWT3$52h6BA1SGA2EP9Gbl zO3BmhtOe7EkRSr~GsI>^57OFZu87tbFop?tQ_IVq*T^}PqA57lo-TAQBSd=j*VA-~ z)(U=R^obPk9qGc<+`fJ(9hHlGC{B6IudYNpeywe9y8$F|RoggkTf+EFg=^;q5&K6| z0|yT_jmDJ)y0ks@Tx=bL%`#I+rNumaUq7HNRpQ>6dR6j5^3`faV7*d{Ex*;Wb@woz zrhh~7A=;J@+d7`#(LlTf|5?!Z>v*!APs(M{BoW8*pS+DUSgs2^-$eWka?J0h^v*4P z%FM|qpUTvP31fX^)n9vKU8}ZR7TBYm71<5s@bnsvJbzYno+g@9xv@nc< z?Wf_;p1PEF;yqtWj@d#91(a=THMK7u@|O6}YAazarVaj3aAG+;I_031IJ&W!)ekR(rAx38`y7Ch2B*dUY^3In(1Z=1o#_z|g`HGHT4Eb_0 z!0_TU$b9kRD%$?~^MsY@;06sGe`OXbGO3a4C!ay+fqBC{a4Uq31R&VCFLfwCVly0@ z!N6arOUgNRANQ|>#7OnpWo5`zt*m$nwDe$eDfK5H;u&E|7%8&0UlZylXMF|#N5i6*QCv zrSzuir4fY@H3PmB84(dfE!YW8@Q1tP*tn6}FPT-q%_>u#Kfh%+) zMmPXXiJ_wt3)pvVsIGj7%6xDT$5v2U3+0dD13n5D4XOa_#Mw zJZdY4)I?<*T+kJ}K0ZG4!x_NhuGlSRZ(R)bNSo^IxzVwxR!r<>$bpxY65l(;<8y}> z&(@HfXpI;A?mS@*;@h59205arW9bQET@v|u<9(ydw zgD5^N{QK;Tup#J}>EpwSa*_%3*?3qCf0Q1Nw>9xC2{XGh&K&G|*_xRhVQEBn6S!*N z+;Vf_UhX2=Msg{0kGo(kL%G1yKI+wK5boY=tF#XeK(%m9dSsAo zzfWh&<6}-e>pW)jS>IU+i%>sr&q})$5m!yrI)EG;7O6Kxj^=LMZj2H70NF;LC$~|_ zrh$}ogJHjHweS+e9ozo3=RhsjJ*P7PIUs%d@y zuQ~eH>3;>(p+nR`GHen-IT4$%sUQ5rd!qoj>5z}*8b2%6ujRr_KP`AXi0R>ihw@Ra>^N5p`!xmm>Ha z1k{$L^`aiVlGoPYPF(%^o4+v%!)JT;(I?N;q*IfEt%+{t}U5=g~tf#I=qn`D&4W_!hYe(xU z13yb+8kY$izGYA7xrSx=>8;LGf|XNIcRC2rKMil)#RVOGEzSL5Wi=W5(F~%rbgjHn zRYA&*-LrT2(Tdp0A;JrL8#^34zZAE?KY=U|^wMhZb2c$Xx5pum=1F$x80{PjYJk-6 zfp!Q7I$Oe|-M!Y$iiX?uy&+A6^c2RzJ{f9&dX4yZ^r@ak+J`BURW5C}t7*}_tpGBZ zi`v_lWjYFHr)Ba|J{3~lR`HXSvA=Y3)@wZ;>p^_|CZ-po2WZ%lY|!`a8#@=!zsGd1 z3!0~vW76`mUK#Y^nc&fJ^VS|~u^3MsgO!AA)Q>APo&j3`4;_2ARJ&2-ot6@feUn3o z$^)I}0}-?@g&D#hS4V5Ye=@TVgeU~>hlLL3_NamHbahDjxV)O zdn>BMb_=-d3?*7?hM+4F&4e??s=PjAMW%py!>WKEh(jK+)EjX+ z4P5%8NVh3T{cuWc3u*k>8tS(hl^AVf;I)>gE_m^({M?&#GcK@K6JMu-UTp5X66*%> zy!s{)(3u<(Mja_-s<72TuQjvm{i4L``Mll~n*(Iz?1G_Z901mO4Wuu0_$x8aDae&s zQXA&DttvjgR3GZ(i3y{oc|D>WJ4PG=_|gEzv`GX^z3ZkN)hYxd?9yrc*rCQuS@?zQ za9(*>`tnFf%x{002tbg_Cnl6`4+Oa$zV53JfAZdBW|FRB#m_cc8`Us` z>CUU&DC+ku9%?Y#uj%gO-j{pUl5o2|y416U)DpdUteLm0{8_FDU-ZR&4F2WM_8$&K zkZ%qy>GohsWS}ZHP&y}oRxH;OQH+3s*yXV$P>pgAe|3YV{8ci_{JyZ3mU^nNR_{Gy zE~7x@Cq)X3=Ub)L0f!uD3U7mGtS=ykjwNF+77c}cl$uSHW7Fe`8ixa?Rz9d5mJZ5+ z0#WQwHl(1hX^vq-BKUHCaXk5Llmp zD{Ex*WvD9(cNJJ>1bEC(0__zvB!fI_I4?oCr-4%vm4&ZId28vrwAX}aj}^lVNLS}i z2|%2_z(Sz|TdHe%{{7Q`w>g|7bwr(Z(3Rd;{GcYm*N>uP_8-)Kb?5MRtVqhA9!V7{ zt+1;{AmrY<7l~(H0?+N*R73)F;F1HXd2*WU*|txpDA4#qynE;tUzBUOE5bgWAS+8Lan%+TMP7 zOY;8KqGZ6u-6kp{RG(fmzyW*>l})w(WhYNtRcVdiz(*_2p<$-qfU3cm`92IyFe<$K z0`kAN?*H{~TX|C|D;I9yo#b;Vj(?fbndWwDO+}t9cyg#W{!P3m=q0x_5?o0!+(6Pp zL9iuJ;WM(J0+l{Nq0=Rx!?tk&Ns=A>FFvTf{-GzJarFJVw}tG8qf1Bzn!7F}SCG@Y zw74Vz%nzt%3idmY1P@wstX!)2ZIOB{(|)fXvyJt5Rmba2+~*{F51`VF9UiNsI$X28 z94qsa$sa3GZ(Oi3>;+5h{@M!) zGx-Apk(8VGJOVHlc0_y9`WDdi<5#9@`eBNlh(*2$yc@3jy0wrHZt`R~1yZQouJ!Z! zk-o2|*P>_$9!%$mZb2|qJPPYNO>sXGQ2HHKv{n^@>W5U)c+~2=hg!?K$$rbWd(WSg_n7n^+|gat}8o`Zw;48#8ZUHe>AR!KDJ&~oOAL`wa;E5&D+eQ#@Z!VV(a9Z2X**i zoN?1&I*lD=Zpp&U^Fc-tBo+`eTn-Ii5(8qBr1d1Nt$Oa3)Q$0hIo6}vDnS3~#Y|GE zM_oHKH-ylcyO*)Y$URxeG<3a8*zbQi{~iwNd+$E;j}1ff*xi7Z_}Qi?CUHF|EE*`Q z1^9G|i~pc(Pi4%v>?=~U+4uQe+p{0zazOpfUSCeedO|v_G*#brrC8~XU0)=nYO`zf z*GuJr2Em?lz_!+_oSn#jJ+nj7W8!2lJ9*UydJn=#T)@!haY z&5CeF-L&6|XWf_|zi#f(oq-3Nv=C>>QLT21P@5ZnMc&wG z+fcflFlm}ZL)yD!y6Q&HKcULw@M-rys9m+>(4}XnqU|!R7FB$YylX1-ts3mhKaBk2 zW2Z+7fD|xIUs3iaq2zgOp@Er7t7%$!5~jYA;Ae2%|Q!g21jTaoSS0}>TLvf{yP62eqz-l$T4AW4M2UH^#i`<9VyfNHMk3TV3U z4t20qrK=<_!&=4ZkOui6@XLcJGbU3}-%23omXWz+i4t>#B}*$vk)1(m$iAH_ks?8p zV%y}DHkx|;{4lGU3b8D=SmQ6bm=?jTCaozJYlx@`IdP3Uyt>KT1VnNpO}0#mUFhy1 zpog%Jj{?78OtFrc^QC;luX|4~5A46_d7}RyR>zvbyqB8ac7!oEj#|ExA zDYxr(ME1+g?%^3?62LBCJt(}I}&IiZhWszr{~*Knjl#9*gBmzj%6ckC!B(dRbL>#9GAa$c$<%;PD`~&W5@e`o#rCNc z34^nd?w}{w$o~$BHYTCd$H{|Mo9+QsXNKFSS-DNE{830|bxFe7<^biZ6$aJg=Gs_- z0vwHnu$+^JS5r{+&2hNP?tgK2+czt{Xz9-hD^d$SM@Z@p9YN-pE}jF9*8UPuhjqud z8^B>Jv`H03m#e1zzxtqdV{69#f0ZYEnlTqL((oqbDi;+CrcTQi8UeyK45~}sR zIWJne7|_xr4F`we{=4|bm&Q|Ad}Z|6!74*w&23(J(=r*`CgM~Gp#BCX?ZwqxkJL`9 z($vcYEFU4aN9#yS)1IhPe`r#F&}?b{*r_vKV4(ZP{J~d!D(%xugZFQ|KhAFaOHZhY zMx8dV&E=xuOW^k)Z8@wl)|6e~09S=q%&=w2_m-Qjr`O7<|P23RY(J6CM~2nW&GZZCAhpZEMeWd zV9jMT+DTMx+2JaY6(IZgVTRV1m*ZcKd89@d>E*B5_#u#j%W_9dUJEK&17f3 z=!xMKiH(QDs2&f1Hc>uHY#6HgJ3ptcd3 z2 z0rsk#M_uv(CN_KxU>v-`g@EhIvz)C8 zFm;>Sa9pR7t#l!Ik)ZmivT_rhlUy&6oWg0M-rp`|v6budAFrZ=J%X%fQNR6L7BiE&fw?8_wsy&2Ks3`8W&eRu29;^&HbNuL{MieeTATe!TFkP?|g; z6xt4I^um%FriTX~S+ydo*9`DMRKaH?rt+~MZQTS&!Ex=PY(SSAGE^70Wmdo4>SyEg zo0TypzH8*3?2nIzH6&N8?W}L1AEfE=AU`hfq{I~`d9RuD=$SutgkWHrMX%Z3k7Ib z+Em^9S8M-g)r0ZEoYCNLME>mc*|S$DSRspOY5jTS!YBR=a?88c%mJWy(~VHKmnllG z-@w*Bhp=3|AHqaGhchyM*W}V|eN62daMY46hfaVNaH1+4OZI}sryg+SQ1zL@I>t%* z=saC_qWH%3XO+GGa`fNdIu_DHsvs2r5JN)%5tCOYHz%k+e^3ilma2$C5M8_!PC-3s zBdv$=B@gP;Qor~4b?ebH6a^|KfGj*b<6bve-g8--AM>ci(DQpzcu&k+L)etj31;J+ zn(wkgyv6}F1uV}6^Fgf~h$Wd*`VZW@H=0{fA)RS5H{T4eRT?thi~3~obYLyorwBiN z{gPk(Z~yjgTfT(hhzBAqe)DYoz-Vv5%2D~wh<(p&2Xv#`CI%DlxH2!@iAX9++nrGE z;GhTczf7!hfWGIQMB7D6D(?0|l{fMD^Un#ewo+Zakv6#3)gZ8?1LiX;$qq}B)m(@R2fr9dZM__)DNbN02o4hz2zg1p^opF!4{G}7h+rbwxXQ1 z$&Nuk?&%0X3xtFbm@ZX0(f-}c|IW=eK(Z`zMkF(x-4`+;%T6@2_IVuO;x}$>WhR)s zd!Pzcf4%Oi5QS%l@y{Gnn!KGP^g*-A5P z8vbR=+czkocwMFwlzjX4Tf|D5WQsqdvV8XA+KDCen!M&YWdzvjWFp=sKMut6E3Ady zoU0t0s$H2X2y3$OSvVXHzMrXrJA8q0cleGLzdD8}FQ4?Lay1HCuk2U}goBagM(SzK z+%F=0*Fwt@GP5in4~yV!dd>0}Yr)jUT+{wTg4zwLsnE}1eaOC`qG29i5-xb1_InHI zn=R-5ybg3qIns3eU4%XWOhVePo$y~PB{UY`S5ll`s4Q4h2VPk|+^C)b%3nrx2{u96 z&^-<>x~y)RnYe}efy+-JXOa;m3cSmU$+pxx9BT{F!nKkR9j%UzIIa6gmxjPPt;E6T zu+u?$e>?w-n{ozQ6I6+MX6N^=8JNrq3X$9=8)TM)GJ4;WthgYoO{Zc)0u*s^aptE@ z=jJ$+{KEXyWlwK!k8w_SPdCjbkb+e5>+>0-_<;OSLsudY9%K?X0-Nj;I$dWved}yH zoVG}u0RARmuW`9>Z}l9*pV>Gu3;`oT%@!%fF z$-$9;!rb-^{4dpKn*SJv|FmwCE(EKBl%1+L@@~cF!Z}x3C0w>LLmTlR2~{4Z6gAb| zWmMT3Qb_VkRMYDIbEMU_jiys-y4Z0t*&$qUc@Ok2xca^n&c3<0aI$cP>^#V)7RP(^b z#O3<@2v_8UD$DMz{bm#8$ART|&!X5>Im;iBfJ*|BsTcSTM_D(|GzGe&bWTp$J$kjjD z5%d-Es~Ca$#(+(cvhUa4=I<{7Hn%N7a`@s{>Ng0Do*oKRS68Jke9(0J+5cUR{Y9p{8Hs}@2zGRF!-B_mLzrrt<-UJM>LM> zS6mBVdkL2t>S#02oHzT2jBeZ;e*GoBM(gkx{Daz2k2BS7!za^0>e|NUO2vd@^JAAv zZon3d>9^-6 zv-J*j`GhAyaL}vfsz*9HEY#Iz&p4xMYK|kosyW|Z@66F#%#MhVZTKGz>=*s3SN{3UK+W4pF)3BbHQ;#uB4On` z=)?!LrS%C^|M$wN-}T8?vIo2N_jU~C!#mkSYLiw(;;h|dE#RBf}q^aV@Y{vs@aU=|~F~^oT{C@a2QL)Qt zO_0-oE@VhA1vVmgm3Is-UyIMbR5|unU;lDniukXeg`G@c~{M!(<3dRUPpcqPyE9A+K zH~{kzSo7T0j#MM;qVImql%8fc3x^!UlzAgxS=cr4tJ{tXCx2jFmYPo_+-K@Di)F_w ziWR%!gbVM$v%}r~KP-UX7boB~2dj#&prs!-XZW{kT2;AT_Cr|ul5>VghrkqQOewhw?J?m}@WX0a2{%H5OOS*!OE2<=Rq@)$>`+27?1G$MJS1$C z*X>f^Iuyo^tr0e;)rdYJfOK|wr^&mvr1)j+HhQh}F^*h}097Ur(C)o^oy6b8>}VOZ z$Tb`o4-NvToaJXqHX#jfnxl*OJ!|_`BtE_aWAlqSRR&keh}=TXk5)ez1>Z%Lkttd}4Kb*gO7_qe9?H{&VcZo~Sc zY4;t3sk5&tkh!L4*oKr+l9+`$)fUwiWK~$3P%_sZA&Ib{fP4ZlzhWbY_tyf(Kh6Lh zT84(-SLDy$sGH0(7+Jh#kesYiK6TXfcTmT zoPTZ$t!63P>edLgQ8rT*DY9d4C!-UUfqgEJG~o2pnoUD^8VU#N85a}>>1dV5lSet| zf5#8ERd8mdif$2V?pe(TH8zzI_(ARcy6TBJs#c4}PP!M%r;FH_-(ry~Ct9Ds9r_J$ z^yJmeR{$-N!-DjNV4nn!`^-ml7xi5cin#~7V%NkI0m3t=cQz<^=96O&)&G}LhCjOa zf25!^#23m`mx6%hdILD+3QCp})er(y<4QbgaYfZLhBbJ`85z_DC=5}WeZ@{!qqh9~ z=k*`Ld4us?`=og*Aco5g6eaz}Pqa#N+0X5&m(ie$$qg+)e0t*k%tTe+QpL7-9b4qsi1X#F=P*b% z-5@+Yk0&UjHLL=lBIfB5-DB)(`nA-Nj9Z-R_iv`tA1wPY6KZ3E5pp}FWf?V^n2uQh zN~E*T^IZT$(}V`+S{<3~$k99_EK7|jas0Ke*g#QcULMr@2a10CBu3jYDqdHut(HId zW%JuOu{~<%v4D`q%^Zs{7S?K!qVoJI1)_tc-XwZt-YXjQ8En~lXmlDHVuu3NlOgC|g z$Cgk3(4WIT(~~po)*CV0(hd6C=6hl3!=!=5Rskn$&3ZmICWk*suE(*%2UmjSwX+K$ zA#6gSr*=nlU=DIo5gyLUJ`@6OIZ|8eJD7%OlO64o2^M$k9iKqBJ5X*jN*_)#ZGT`n zZQSqc80F3vl_eXQ!HDLMM#_@c+^?le*szfI_O#A zZO|XdFVYB43b$L1c|9M98=ou;>9gDZ(q=E?@fzE$spaszfCP@poSnPwYWW1FtD$os z-*}APuseB^d33k;wcX$SZ1^5ZBBSmKJoiPg(kUc5aoW<1t&l1`qL6sPP_3JOz7*3%{s0BU;hjqcV-tkTKp+)w1ft)TqJ`Lu&| zM+uWs`S@hrhymfhKT51NnVj&Z=v9TsQ>f+k z$Pt0VCjgywVSl zbSqUtW>J~*a0)?roJ-62K`G9TW``F1of;Vv+7*q*kppK3@*c$I09zQ_SioC? zuC9p*tABRzHYZvy=|ya_B%f+4IcVGpbZ(|nw_#fTcjqa+a0BQFf?;(!ZC8LYJlzvf+0ddpCQK?-~b?OHH3-%88x6+Pqk{ z-)eqmZk^7%@wlrzp%{Hp7Jgtp5fI1ql|4NW>dW}%7T^qVbOt@S|99!WS;_giaOaVV zp2Z;JG1;WsP}NaX#K@6qagMSLgyJ0`w^E16NwY`&T?Ms}gy;T#RUu&A%0ePGWo(MV zw&vl)M=GLm0-avisjds9$S%`{dzlq^Rzqh>Hy&b4+X`RJ!xbLb>U7F!r*v!l98bh6 z22D>{Bd`BQ(F5L^*5>ey6L`U#|3^;7!9`JSolT^B%Q2XzTl+O8WyKE zOdj-4=)HKz~rXuutWRlJ=aa5tX1Mz~6Nsc>!A2 zgrN%4dQ7r265xi`)MRm06*gib4 zJsSfdS`bI_$r}T?^DV$u3uxvd0P@)hkj8=(v-wwL9w0fuQ#7ipWL(TGgs7T7sP%xP zy|aro7q|ZXzqq!|KpyG*gh8`+N!^C5n^7HB_T*os2Vv^3WC<-2#LAr^x;6&KB`zuW$I8Dfe5T&V9R^cR2OVTebNH_Itd*?oE7?55QiD0!8%Sn=r*j-zzcT! z;7ZJ~l;vTL@M?D4EXzMaNAXxO50WI-d`+qxLHyy+UhwP9opwvvBRN*12!q0S_WAs; z=~~~u-efO4UamiG%RH5x?VY>Z&UOiVBK2|}3F6;604|+w)~;NUPO(Qr3HTg$X+*}6 zba+oB6ZC0|U0}sZdcmnE!u9?VgpuonSn1l)`<*T19b;}c8A&$)&q(z76cfl0Zi!Qu{D;7pWjjRhb?h1z_RVn`5t|b2P7)`EUsojl zqNi;=LN+-DjV6s)4*FuQHw>5#3KG>UV-5kvkiiUDATe6599v0L$NOW zn?vIUi|df-j&vj#HR^8VjI{corWxoWPAOyDOR{=s*9OC;wv;yvHVBDkeYsWPcJ|@8 zrJKgi&KhK99(~N^nx-=qUCZ~1GnQlwV(`krh0e~HrX%`8Q}$$IVwehoHCcpPJxCMe z#12sR?|)ODjB%1{7>`382MlK)7$Q`tMMv*fpBouOG zug}9dt4p?>Byd)XqE&8q*!V_e#L$+??2w)_gPR;R+Vvym;# zz~4H{AO089-v0SQ%u~*ntGD~gKjsD9!7bdcI2UO$sF_#iWW3axTR83MZTR(o1?3PM z7vkF0C`m=;N98(8yvJme3GxKbvVF2FlndDtL3tHJ)wWFxQI6-BC*ck5CJq7WoVMxK zuulthimtM^EbpQ@<#W>O8kfu8|4?;ASd@QF*B|*<1Z0=L3>a9*lK%YaWxM%mijGmr?XRH23|F5-bu7tEz~8^Y z@RD&G>CH=S3pDP#scRHuOEDcZ)^l=6qw>x>z=yAVW?V>73Gg0J22h!3A~JhvG}#)o ztARnb1rzSP5cC=6UKYf!nRw5&f;(T0#;1|$dWoW{W^d{vW&Rt8c_0C~D%#iD?0(6z zUy+ZFb*1|e#ol$|{Cj*Rlhql*D_RwXQOo1ZI^nd#ZH+uHiqDb3R&%H6V$9QH>!;t& zEf_og_OHG;EKqr2`f^=V{dTk<*WK=5ceBn;9R)P7e8_fH30Db*-j`&TvaGJQELRL(S;MOa9k7ZKuc@hz9NbhDahTUw`FGi0-1^}+;NSl* z&&FF8|4-M>UiSNqBc&Icxrp0=na#hi?eT7RA-@eD&vD){e5t8}@?-sI73KE zWadQ6C#pw3*SM-i4Z_=|V&!dMWsX}g`&~~!(Td;DTZ-|)sv1Xz_ZxNfr6R=I3>|ae zWmqkE58tBE3s5~mtBc$d3NF)*I_k$D^wzhN?Gwh&otQ7pSSuY@qMCXn17KDCEEBi3 zcN?kB{h6DeUp+T%_aW&9$?x^RpsxO4Ydz7RGWVR)Z6cM-gYW0~>JwV0qMwpWTsA3e z!%c8W1oyxukx1ZIvJMb99>$-@S4^fD6H`5{(=hz&PIN##DE~eOdW0{)oa_Kk-DphH zITd~Pj~c4op(<Mp8w^K5wVzV0I5Yx80e-^Mm* z&v%)Z|2lNrtL=YYQPl|Ju{NV=5-)xFOnKsHYl?tWYSdUY*;Yz2VRek~&NMr2228B! zeB0RFaOcO&7^ZN8wwoXn?6VlV**F}UO zWAyQbst7+!NbP(|4(Jy5^x9ST`vBY|ZK+ z9Aw*q#?JWa#sU-$b$a>W32N)RgQv0Zo#2>L_}>|`;1pTtvboYhTHH_@veea(9X$91 zcGlsbVC9^h`p$?|VL)5Ch(Vc(sFHw-vyD^4T_=$GpCZ4YYYwy+Ka%|9z@L(<*5~m( z%NuoU$AKfMB6@C6o@fFAgGEzD?sp^`wjR2*)la5@n}lRm-fKkUGRMFwRmxqEU_rl4 z+uMqr?YTw@jprv9#`{^&`s0x6t4#o!Eb1oyGEgYo5Mj8COK+?)q>HlQ8#c%fYBLTF z6G6rq8ROvCiem^Xt?^oBLGI`k*n@+ve5qYdg+plJFZRcq+I0Bk3{42U#Lxr9Ugg)) z-`E&O4i2CH^*}f0w=IYMLn_2Qf2SQXook_dP&3!YsCXMC4^bCj8ygu)$OKYJcF+Cx zgW3}fm3E=zP{GM>o!@U+_}7)V5)&m%pj;YfWi1X^`&UrrOYM8WcVYsnO?GOHhhF!d zYW2=9<3*078>&iFIqOZ9QmgbHy5J^H7`O8Ba;qpix*$(;aMVOY?@@EJLJHd?wx?iT z7@el~FIs?a0cIRbr(p_ia;Yj*rl+_(gF`J2Zum`d76BBf!poc@$mN-8E9`czG(uz6 zPZCaTIks&d${{cjPEeg_d(zd>CC|p}`|X|f1H97MM`-t()za&q$IS=-7 zNW>$#-JpZ5ZaXaxgB@!Y+3^FCf|I?S-B1{CD$;ucJ#};Kc+BW%!^;-@V$pTi5k|h4 z%OS*_$~KYL1!%mV5TrBP>+N#M4VR&$*%oao=g0tIKD*##A@P~t5OkjH5&T9s8_Wzt zw0avVUak4A$J?VcE;8lmRQCdzqeabrbrJ7Dpguz1z>^YruRbAdYka;!?RPvkA$&0F z)49wTkJ7`O`$+=x+ESv0pBd_t$tSfgNv(TS%hAYCXbwpz@yklV7 ze3lO;9DULgIL#Z)Klq>)_ZGKs-tn8Q+4ARgI1@34vcQ{xSdNEP>Z?1Sc%fIs(E`}J z;ty&$G2gk3!1y}>=$MfgJN9YHasRC|`NzQV>vJ#VmmsMjkbe8>1%4<3IJbLkoy3;? z-4OtMYJ;x22+$(>7+m%LYuTjQ*V@v{s8c!R0Jw>grm~cN+KPYEsk{(J;|DElW*oZb zNCT>wn>hVN059~UkrPIs37GqzCj$R$B_JjefOwufVycc%u7R<_n;}1TE{XS|toU;8 zXy&1td{qw#S{`eE#87@si`l3-B}#-J*G#=)uwxb#6)8$wo8|}?Ay3O`Q-@6#X2U}Q zsqVGTP0H{PN zVZn2Ll-#@+;+<~S&mTiqC*M``qtVbuuz(c9$i5*lJ^6uCNB`-|&{Y3p$bMerL_;Mr zIQY1np3?cK#c01}Eue#VFb9qI?{jxgn*j>eE4#AKQ@=($d%t3WuzMHyY}j54pNhYh z{dZvuYmwc;mBkoWy`xlBb#zq&rPSF~!y()WLN&{u1`T~t#xp`Q!~81eHbRoj)`{ze zywT*;ki`lKDGi@Ooqg$#v}C|r8E8w7uHoa@>4~_Yu<+yh1_y6sA3HrX;ZguKcSv_w z*sS9UYVB0!2em2OrLfK%VpZ&1txL%<;_%X-+ZC~8cIx7zf}g_wI)cCwzd1Y0o#t{F z3|hE``oaFmE%mUMgxu_0AR?9uNFIc0B}I*s{>bJZ>dMDK-z&Lv1gY-i)zZ*LpymW> zZaxHPhV}J{?&WH%F>z4K#tv^izv9(`>DLw_0@_?6C3^vZRZ~w898ih- zptft3dL&_$IhE>^SQNslTM>9X?uR2P+O@e|4H%8msZA=(N_-?f2M7qK#q~Y^;k|+- z4AHYhH^i{@A*qR7SfdkjvOpX;*;iJ)ZNj2=eX-IQyu4UW2CgdU_Eyd z98T*j+_8+Vdj6=XiVyOL-hV@JswXzA6LTc=RV3$&O^+~rgT*4AX)V|B*4D^PI}~=# z^~)A8tIfT!g{IT?H4nPHmsy0k)13DW8)x##5?l~5ETImkmR02|q-Sqtf zODz$@9(5~wV+iKDr9I${dqu?x$lr^KCCy|3jV<fITeN>Dkw=-<3fR zcM-Fmo$x-L^$fCAj9&$C-(zq(mB4iO<$sRD?*&$|T>d8D*7mcYF7w8hQas(8ack&< znzE5sx3RR>BJGn)M6HLzYi%AouSJSK!~N>q(A77#7Ja9PIa>u^44%aDVhY~=C_9@L zZ>r9(bA)=W$N1c3g+53e9=tnlIf@7H)ep3>9(r!0taXhX~U(J}EaZs2E9a5TY%j#|)cuo7X%)Lg#P{smQqe%0t=dz*is~{&g zPc6TML^sz$V2{vX?^FO3GxWwxkeOLjxLioJ>vE4bO9d95@d9;h$JctRRcGv)PEXrxsm;t@MYyZ3DU9fM%%1k*7eU+i@y z%`TQF`88lLH0Bs$5~hdwI1FW^4Fl~^rt;7LNEIJwtl8n(A%_=W?OsLOWFfV(DW;(F z)q4Gq<>*?kx1w`9;NSeS&;P^s#s6Cv-v7_X-q)&|1cDHGwXKu*Ab7x{CBDK4ZIonu zw(ode@rXjh)I{+l#w;5a8p1T%D2K$OZV>OJp9S=1yIem9euyU3E}TGUFJKF@=hHby8k^I`a96K2M;Q)i zxxLAX zxG?eCWH^U>;%ukGNgokH)~?EiS1%0ZgqRlIU9jkHT=yCWw#WmHBO!su95;TV`T=@d zr>+6v{zij3%5%lDj*#W_s>G=!Rac;W^iRV>s1l;*d&@W~Rku^~7Hvlf}X<}yXp@KUA#)@Mi`=Dl-%H2&zif1ij6$HNa zk!#TKMg}2fCGBRqr$f!n`BYv2?)hwdzwteJ%8nxuHte7E>WO}5m#@~Iu#mAh z5NKiAAS|xS-a59ciHNQ3>*F231?!JessX_xD0>7K7ILR24y>aWf_^kx?*pI?5u=p0 zV<6Jjn}J2Cdk28bK4(QMG%eVhzIJh<$wKA^j@iex3W!>#O?w4gzuwD^;hQ8>93SXe zGCI_vanszUVt8-;T@!m~QWyG@B|@#|9HS-04?eg?UmJNX+r>&6B$t5)zk_e`CH-%m ziF8O;cHKnSv5{A95mt@7=&lP{h4-1w1n$q&XQ_AER*+{6?;Ah(=~cw0?|kY6!qLRT z)??mYul91oTxM~p|L>cRv!k|pZ*M;j`D}hl$_k7hqQGhkIOR#XX1}jinBA5O8v=nT zc`bYg(>sxWvn9a8o2n6-SD1Gq%}f{_Kn7iSO{{j9Dj)1ff<&#zNfkY-%}6^aI<|n# z(YNidHd4oyh=FkE=ES-`-7f=_#U%yM^lm>G_vt53hT@ELJ+L0(QvjJ8AV`JTL8vej z>3%MR(FZU>{_SB-mkj;m`{X2(gj%|eY8?d0JPd1bOuXlrB#4rFtHj=n6A$tA#Wq*O zLwTYq`?slg*kzbsDJ#8jy4f(QXC*GYgkKwi!>%XSIGVd#&lEk!LyYx;YIeVC&Zcc^ zk8X~}ZtiK)95QI{H`9$A7O^KLki~_mFBksk6gBhbihNM9~t{~E~85Um@mMuA!$Z?CTTo2qRKZZB%b52ex&#YPFbmA(~-*eyb+p>5^mL@Zu5Ot9Pu`$w^U zg>V+U&T(e!dK=Gm<)-`Ko5RqW0E6NK_y2Hn@BOum7th%h7Se4)9M^spg)pQnA5DHt zJLGz>L7K69>PF)DrCP47KP3ZG9z%y^2n4{|k@*+D_yNZf1*EgVbX$ z_T=?I0m8onI(qk{%7hK(CTU~`=FJ!!n{W*;)rzy#=~hgAlg7H((t>G# zoZ|~;lzAuREe%=56GRX3kqB2nIXHqO%1jMK-(FE>JeMCqNgGO8U3)s8+B&a8eQ{yK zl}?Cge0x@`xj86_;kf^^f{LFc2jFEo$jN@#dUnM{sVLO0nWXzYQ>CIe?xSC+K z<<~#1ecDp`{DRx&zTYQv+tPAjVR@YmX#6{buGZ<19FOrywr?SR(HeG`0QR#oOglZ- z>SlVn37d=Ss9XbdZCv)b3P3K(W6Oj7X@fh%#dU_v;e(Sg`VH1irKmIK*XPQE&(wO2 zVM~k)WFc5TUN#((AWVPK0m%ml43CBASYwC80j02P7I2Gg6Myoss#@PLh!W8Ta3x5)#Kwl~q~KaL-K{x-qYc zTzCt?H-n1B5Zy7LNXlS8UBJ}_Aa~ar`&jx>B~DI&sL?3nQSdQ+N%=9o3*S)FO4t$& zQ%v!!K_D}&A3No{iZjB`4iOIE-+cByENpSBmr!H9-vKapZ}aSkuX`O(cKD4%OsKdm&p-v~gSjI$G*31LMP#VJzWLNn+t;R)_!&!848OyW8NnoX0 z@#+9(*Jc9kRU_TY(GJ)h@7xj_+;oXgsvgWQg|;+(T@68X(#58Da=+Pp?%0IEgrvc( z{@p)D_@AHKiV6pO*8SJjl?~IFX^>j#7g8YA6vk$%c40Q>=l(|VR=rFyU29Y>Xc|1a zir;!F03!s-+=jLTDR7J`&h>>-AEN|HnfbY@xk@?%$Z!XsW-*GnxWucKzx~t7*4Ib3 zK#!U2`>bX`H3R}a-E9K%St}p%GGmy$kJQ(~qO1__U=q8}YNU^8*=acZUi-`oMH-#D zc)1WFvw0cX`5vZ+{wl(q@}`xTO84(#5cJ<-FEU71BFzJMRI&SEw&+hVLS zcy2gG;BFLz@zB|_ya>s>YiXZsRVs}ogxSEQkaHnHz1hEBIVN^A!4P9+#>lC8vci+x zfIf;*EDn0eb&2nKxxNtRj(nsjtO#i|^T1AbvGR(tQ=R(7vF7`y)y@1n21yr2Uw@u4 zHv_PY7)9&#+njQyP91AWPu|jj;L^sXu&Tk#^F*iHen}8qs8D}oKnTkFnIl`%w zFO~Rw<#N0+6VqFJWydn1HnA8#h4=5+ykh*+K;Z67E85Fm4;hy@YHr}AR6iMzG%uE6 zXPgFyeMet^(c!;nc5AD5uBg63Ua&Ex>pmJoFiwNDY0gPbiDhn-%^2!)C7m0LSi5T4BaTaP`%gv(}3K)ID^Wa7<%{fUpmoez`u+ZD3XU zoMZy(y5O!;%fUo|r5v0(!>FSEKfJwpSW{>IH|$JjYOM=Z6qR+90;LFu$PUTWswiv4 z%95~DB18xwvV}dgbwOlFk$nkik)@P`$Pz*bBq~eTDMS(=8#`hMm;eb75}#u`&u`|r zuJ@mL-s^qOA15K<afc&~K>4HBWSqbv0*0a6Y$flW z?sS~)Ez>qCm4NzY_ky0=>YbU8j+_|2Xy0(7$Id$MO9A>>DpQ`|jn*VK(99+f2e(Tm z{XPR0lZ4kXE!C-aH$;OHNBlRf(9a8tl5YXxwO7q2KJKBU|6~Km>oLE^GtmNljV5npNfQ`;Z_zd_ zkUD6&vB_zN2xWm8x2T$*TEg0F!n=T-o%JXUpqp$nkExAqCG&Pz%@@0tk?X$wsVS!Z zwvxn9&`9znr+6@_WZjyJt`Hb|s%2MLQ8@lLK)$<%vew4=Yu>27AS=h`Qp)XQa3AU) z4mK^sfWDkt#3R7ms$HS74t6d_x(14l(+Ka1f_i#(7jG1xgeB&|zv?W&de*}7qx?%3 zBssSpx?IgCX6G2*D&4&)PYv^Q)1y0Ct%AK0R_18SPo|=wLFanl*0$xvTat3SXKrms z#W|m9dQnVQyqXP{i|u&FC2&(pMIfkWp^#D~f-=YM76;;I=a(I_6FFrh$#S2Yj#|Ku zD?A`+U2B*%zNGfa8qeM1Fs~D*a>yu1JG-pA>}z|f=)#7T71iOi^_du>Pj65Twza($ zZMaGk`;zP)DJ7HgVfyA$^yLuZX6yWI#I(t^!Ml&MXvvTMoEL)(=Jxf zt#|Pu^<6O*ZjO6CtHVh*9WNt5zPJ?X*|VXqIoAQdxLhmZ8&=@I zM+5oUx@<#GdU?rw)rTsC>hW{QSXi9uZglZ4Cyo~=cQ18X5Asw+5eoTj>?mE#f4vu=PvXh* zuo|whBCwj1wTyNB$@mmX7Hn$Sq5Nb`Q^M~rdN7JSzb;9|0S_4FR`q($rFl<5B~O@s zW0(n(>VGeG9XULOD|wNG`d&IRE;jS4nc=6tY?<=EUm&G_RiN$UU3Lrb#dDRfCAo-2 zoQ45wf-hOC8Xm_3>&m z!mTF^u;>hKwA6N&Dd4>kQj~nhBoqFm86jX*dF!i)fX&ZH?omsNLwzb=(B&PyzU6!^ zk3ljV@ym)pEhFt=Y|w!zURHDq&t4D^9h0cLp{}{LdN74qz6z7mKAkQYm~1}euSdiP zPSiJTYDqb70=s@s^R$pDYjq>v3LutJttZZIB}*3@&Y_DlJq~xK*<=Pdvdc?4T3Qj_ z{~-Muu!8SQ6rn0jJ#3CAp7%;YxW|=2^lR~-E9=7=au;iwk7r!HaOwO%fzjL-xFLy_ zHgUe&?w?W_Ne{O)NHO0p*(72QyzFNv#@~*N_A&4U8HC2@)ir-IlcuE?mS*-%mMOPk zL$jh{h22!c7&lrWw|Rb(u8+Un&dhwxN!1foM=R)1yDAh-9 z^%^{JTJ(Ac2vFcS%0`1+9(QToA6#1kzJFUe^s|l?lhy`b&w z!Msz0;{ee-wU$$mA7;+IHt@Wb2xhH#?miJIKaT+`zMOeBrTWQP;>1ebqXW)0lxDFa zOQpz8Ibmh+>xT;bAKU+hKVmKTWoo7^AVn9b%WVvucZka`Uhnr(S&*tgiUf{*cNrF?kH+dmX>l{M;tEcDid*3?^|FQKKblfb;|hT z$D4}m$JzR`3#yTbJn)pO{YC3j|E{a$v7G@GA}~0uHN|qzXh!W4-blF0off_XLJw`; z&qcop*=hzVZ+yr6WvL5!1~~Blofg@@Oa6a)H_xMGNa)EuiRSy4AQ8IG-HAErsGc2X zy{YNlU9nJ4_+8M%nw1?pHEb)&T63?yu;C`VfDjVoGBz@A=LF!9`;j|2CZ_MAll@;0 zryCf#!uoT@k^-Ao$;hJ?n_cr%?hIeN-oj&>w+>z>)`K(vy!~aRy81H{;N}JX)DHZ8 z1r+V`&q!ljS&v2Z%AkE-KHRGSZ~hkJoLdy1fm{pB&Vu^L+PCKg6G)Rto~bh+xS!qb zJ#of3$!1f1f(~Yr+}c#K=^f*K2xEWpBgULfab;s^uI5mWDKX|YI~0DzKL$l6T>k4i zhCMp1(}PbRB#7rywvphXC(}DdQ|ybS(3*-QAi{V2oR@X&LnmnuHYn7};%pq|oXXgH zdGM~C^}j7>j@;;`f+|)cR$I#Wpj#co7_?R7rNuJ5%T{}`IN;%+4H1woy>u`CSztYG zW;g4CYQdcfm4U)IQpH;o{y$gAL|YxV0OvZbN48D$GB+ z70S1y3jGH5mbF@!!x=%a8e)Ga!eq~f9GVinSUih_);*q>X z=;9znb=__uR)&nQU+&F0O=Xm@an6T+RSGa}rlZG3?N&Ro9OU;X9b6-gc;n!S_R;;W z!Fv)3B08pjXs{Di_Aln8!t0bIph0^JgW-XAg8V+T|RIgTy_M{UHG_Uf_qlV)KIOE?|aQJF0 z-MQZYbhLAWRq4zom=c4qpXW)N{p4YJh!%X;B0mHhT81=|yv@^nAd3)Tq(WBhG_y5h z&uXv`+?>OZk5n9^y8QO^yFaS%-(I;teh`#(Qpytm>T@}xfClXD@HKFi%`T9+CW*lq zevLE0f%+Xl0~{GtWVvem<5}z}@5;+30 z2LDYM$iQs%d*?o^xBkV?g{(AoNord6l-wB>mK$nYm4z_On~A}|`s$iR_hn}~yvn`- zJyCn0qqEkfmve4NQE4&t(*|q$gSGR%c%z)OjRRv~p}B7web?GpnD#pczEdB+7o^`j zo3?_heODets}HO#K9TeNACa@5Oevh}fW0_ycV73$r^mMq0)1XwNy$qT7MAgFkgm z&CiD<8<-q@2CSB>z|}-Xwf^bB(JBZu3Oh;}wjhY*zFXy|=j=TSC{z8u#btA?Zb{vN ze&%=OW}+7Ao;I7~mxX2-SOLc{nkRE?ZV3bVu>irrRV&xua0Se#%t7jBEfhh_rS->LyNFz)e|r>RhmKS0M?qO@}= z=x#}>Z+n~BCK|;!%eoDlVa7i?-igKO_W)f1(~9K~;7skBOL6sc%hEl}@lO&@@+iG+ zU_EMH9?FN5ZF-5a?h)VIy!NW7X@RK%3BwnfqNshf_`4_#uzS zqt`BksVB$bO@mFa+ManQi~}DB@szBgc$9%-kwxAxrqShUBUUF3>LRjtrYB9a*euJ5 zg~H)KmAwBtnU;w!k+p#%(PA)Z>Us6OX=hlKa zyIemqF;)Ls)Nj27a$dCogi+-=o*AP9m^)E>f;UhJ^;buet#X=Mh7)mgS5MKsNK`~x zpeX3mq3%bU>eP8e-?A7v+|otGW~K+pr>_qI^Is#H zSMV9=u4BtbHu7#A6s+!~8_PCtdSP-IDEBeyw zX*S|!T2X9}G%Q*o~>x7u|o+z<}-N00<6 zEbT?2LEozjR0sdG((vceYnMPzY2ib}-SND7Sd#PGBl7W4FWVgD~ds|B&NCC-cW#FD78#DT;xI}?~B>xiRGO=h5K^?e!;>wnq&_R)Zw3ys=n#SbD#Nb^w}`Bj z$lB3an;^zP8?})1j7Cn#+^Uu05cG80>1`iz$nB-sMaaWF{|>i$bt=s0q+w6r2)_@2 zlo+^-ldHcamK_P8L%$n^-gGjxA|ur`h?TLW(?-`bGO_{HGSBO-)6*0}V4nK*>_3%2 zyA{DfbsL!P|7S|XqSAKEf5)7=my*Mjo@}*6-IN>zLry7leaMeMq01(^eDeUdB>Tw4 zXsVhiZ+xh8^CdfYamfWtG-c-#{nA5{(gYRHVouB?>t*q!DOD@Pdo0MfVQl7NzkP5Q zHW06Fx^k@|4}NPXrrh~2Ex2{dwjK)CxpkvzI(348KyYPN8Temrkn9;>c=)wbJ7#qM z_U?-)om)l)Kj`j?140d1`JtCC;7TLfY#@OR-sx8kxbV&97uMlTRP^wuYda6ovRPfe zYq#sL&9A<<5!ix)$5F>k_i)Xfb&u#m!}&PPJN;{9Ysr(dW3re9QdZ2`Xkh&5Pz7kL z>z!l^+Y$1(%#(#Z3E>FU^M);TFA{#M=ivaL~nzserX8;7!|EDQaO6)*bO`nUW}^ zgrj5iuFTC_e>gczMna;(L)RlFMExd6{D*~e=W^1?b|witb6({_2W$Y-oYEKoP{nqVQ~fy zyR=}i%i?P$jkhSZkN9NHQ%5YLx}vwE)@+Fatv{X~F2MTzs#osH*b^lgy9RP@WW1JB zuakf+V^Vt($fi1mtpt&}`g%q-q-HNRG4Z#Jye@jFRA0lkts_0}u1U=*5@f1>5Mnkw zN2#q`+SVX=c@>RW^6~vCZAe2qtjJc-8aZb3qPML)&yP8buD;s27Qvcev-?! z%68J%yXH;K4}l0t{d%z+Dm@?}!n*|;-y6bPjSgEa_ao*V4_jrS%8s;ej|LrhptqHo zUE#opM?Yq1EVXU;D1N(we82AjkOFUcc&8=&9Z(7F`X4tiKI{}u2HN~@fGTta;-#Ho zUXjG?YXS|bb=XLMo~E7JT82nRSI~#DbRHSda<_mrMJBMcQ|5F1w?ma)srt`S8w@?iq8xvkt61sxxohk6GiEH=A)iC0yY zdrANsovU`*mT@nNmmNxKjFsKnX2!>@nNB&I{D`);{yYSK(6;0xDr7K2diNFWu;DE@ z+UEJ1_t09Zz&fBFga&=*bbLv9XlsAWH23X7|I3iFka@YKj*XTdWr{s^G$=cBPj{~L zGF!6zWba%tF-$zkpKV?T9n>@U5fLB`HoLh|3MYd9=zjUF~Furio*)}5(A~^$bgq~9hP5(26fx%eLj_H zoa&f-lz`-BtqXDG2pqt;Y z(om1FMt}G?-`t6(?HM8AEbL@W^t6)FTRD`bd{Q{PV-Z0_Zaa({rk_8 z^qLQ+`)rT)TBpUlOo}-Oi;j;!ov@W}u~U0DG?-swWU!>3wJ2lX)|=f;w%qj{!>!y| z3K5_Niw5LjNe6qBvJDUC*N1zZY_vq)7ppvT4p)adPCida6kAFxryZvZO-@N*j{xS+ zTW4bd7k^(6?{^?>=!``2h%zW8^nilD9+Q<0V-z8*oSR}}PlyZx=YQ~W}}3_i8Fz~pC8 zsPaqT7W97Kv+~*qan#Pv-tp`uW=*RCU@M1vF+5UY@{oj0Z1$4mF&u=8x7jt}mqh|g zPm=xK~pFgw+*#2AJpRxD*p2Y9X@2IQ% z)`7PHDGiKqXXUMTL88B`_UC?@ClkohVP~qG1&N$aXCG+DACh@zk;EHaKVoA*QWM8xxkslivn-Hgbm0EYc( z_1&51@*tgI$<%fZ>|;~TNor3-VpIqrNY^bs+td|+kA{B;u+oJ{DlbBQoR2h*NG?Zb zdQX%9TI_5?L;LPm67cb$T($jcnc9e-J63sbc7<(D#qAgi;mMVZN>Siof3`kMmuX+6 zkNr)A2`SR_@){tG{SN2L6$la)YRmcU;F9M8yh!Y@=D->2@S`Y61fjW(<^mt7-wQ|0 zr?NkJ_av1#V#^A`27z2$NS>*o)6!h*%rJ zo|x@OE*70uBsMSC<_c0}4@U;q*I-Tsgab>yv)cRJU8e;M5e^j6=aJN4@H0^{*61+S z99iNwrQSEW?ETs#7qi@S{NZ$x^Bd;vT@!92S6M*LLU(w_Yn7KmW7<1ml8rm#RxCd$ zjNqf;1n&nPkGljbYilSP@Q99Cy0BAeC54gP66R9#NpBHhZ1%WS$+)iNpWf*J9anGO z;RPm2s<*iR#7g#IYx|tP)E89v@U`W~dJGT`wnjb<_%$LDwh%~;S66uUH4Kz0oicit zUk`P2^FuBVlBu|*)mU3|=DeV`Ho+L@0Zdj=GaBAG~w2J}j#`4O6L97v}ZRL7KD`-C)>f8=DTblNiTN9Kw z59GaZM+P!WidT?pe%Knvws}R=`Nm_1D7Jr2`9Jm_4XFSoQ9rXVZ4plgzY=^qshgJu z!8NO%M+uqd+d<2!>}c?d7}XZ~y1Dz8BH)<6@G9)y?;A3@{C;0b=WUhZ&17vV;Jmzp zUm^-6DLJD60=5Q?e-(vUBPLaR=WYLIVO8df?(-iz>xoAmNJZBhWN#EFGw;MQ90r-q z_I{1Sw(iRG&u4dNT0+m2!Q1@_g+fcsVJ8*2ky)>3k?jufKR~#CjTNGqRx6Lzjdqrp zF*W-98#gY%9)Srj>DW>>hFy8*uhq%8Pr}U45$x?1?n1GVPgui``vBxSP_t zYRGvk+lySH|NKLx3~6%*mp;O$L~l^<ES+R54r7One}`ldq7-{q&u~Zdr@*Dhqo`2O7#i^Cpj3couJ3t4(4912_3Z5;Yyh9 zTrQ!P6K9oCQqv4Rc84nuOSW zD>|Bhz0>V>H*Y+3@pZ-3EvBBB{?l?ta+Cc=-u$?#{%O?E!kFlrmfB%N=0sg!~{e=Zp%`GK$K}6eM!M3ySWaiRLQ!LxpGXgegnYr)x9oj5& z(DQsAKS`1Hyw^BE;=G1u&DMVG%?=qSrt;1KH#M{`HaI4%=$;^&96ieM!$%AajWk<+ z!RQb~QV&naX{$wpw0@KoDX|Q~S}&jXqxRpO|8tNWjA|6Hl33@6IA+Y&Qc7I8 zus*R?XW2jK!a_)lu0xJhOYd~B-*DsUuC=YHnBHAGYm*@4UUDAAoL>;IKMu_~AqXHd z#Gq)r`r`$333{hq$Ed$B&($416)sw;?m*nlboPyD8*z^5mfG4mle$vhGAaA zBG*PUqO%SSqc6|9Tw3IXkLW(nM+8~x*2wl-ti)rYQ`1MT(o_yH5tvj%lOpZzpd(4$ zTTeo}!b1q2zM0*gz6B4^>vk<`9_q;9RmO`*i=GxqDfiVTzNRRHptK;wCYk=|@bjO2 z^RIoV!A8LzyQBO4zKCu3w&x4ohagV|FhdwW!c}(Eugq^T2)eF&L~vqn#zFz&1#r|! zqrL0k&F|muTc`yTq@Mo9xDLD~f%Z^!8UK0ANht+4a|Ss0)~`CofJ zNA3AL{y4tHDaJL9L6t;e2;)q9L(iH?sB-#}Rz>1#?jgZfKh%;x-SzT|p4D3Wy5LiQ zYfzu@1&iHW*$Jf8Ywtwv2wiPQBvY{R!kHc8EY?w#8mf~R1=&#Ob-=h@n6mEeualDw z3g#O}2JSpQh~}r@t+F`sP1d@}KYj{wdhd^Y4o+kaLvq ze4ANetNnWamuUxAHD_ULTYKI0`6R`?zJWesTbJH39Ja@TxD3fQhL&jQM2;x?lY!;& zW#C%APPoBOO~;Q5VCH4G8IXWG3`Q8~6|$pwg?a0pbTST z*8-|zr%>&8#_fHaU`)Q=v0M#oYW-7qkMF~W7ytQ34Vy;0d`1AelZk(PFY25n#gWqc zg&>FVaAVMRhbDlHS@hE)t)DNZhx#<{*xU;C@G5_P4oYj9xa8Gv$ef#X9oRV!=G4JP&8$lCmUFHOy*Rl$WdrE`#FOIY9$_80r?`RqC=8tQq zTRjd280Qg5_k%(!tb6o`kIQaKt+KzM$A7xS12@^?b(e8z0~Z}x52J?z&DOJ|baV16 zzC~;q^@S2TXlU$T5*Ue0K^OwVG!e@!Z|yS{duEZG07=h4=D0!)M8g@ zA+tzFWbcl0y$or^#=e4zvrHTI!AgPA`i}h{Rh9oej(^0CCiVH)NJBJ5O01=0@&x0 zDzfZj03}dlv0=m6s6+FV6tZ`%=dJu*@UDY(o6$)N(N(~*L0HYmYqcRqsW?%zaY}3B z_-iglH&2rjbJw{+v?Vg#<9hu5a)FdCu)OR}??`B8J4x&erp{Om%1_w+|-Tm}FG5f?XEFu=mp=dvzTVVshls>Mxo+ zSgV4G)j-|y{qQJ-hfO+jHuGcqdBRAk`?`hZLce_~p$L%wW3x-%Vmt7r{TomkiW;#K zuSLV$5?kY(#z-z~Acnu}+;rykY60huNqdllF~U(>rs6n7ohNVw=p@XUp`_84?WJ>d z(P(jKj1p`(*U^3tUwu0i?AfrnRpk`6p1=6z&CP;aM!POF7?QG_9Mo}-tsmDuy(UY9 z3j@fK(5|g5K3udqel|73S-RD?Kp%WMhXR)|i!>li7CTq8>HltOrv89SR zDQi*@jC07{cRsX!Ke^+`2D1YLN@n`-a5gQ9EJrMPLT2MHzR+~0+X4Le>$8+5U|HfH z%;t4P%1~O$8UpP-dyW|TZRa~V7bmhIn-c_CvUDQdSm>HdVPA;n5{ZZMb5h*D82>JNnxZR!G1DIun@= zo!*-qZ_Mr!QZPNG>RUy>3zVDSHu%FeWyeUD0Z|^XWLv_GtkdPivsM=#v*vCyip=ap zR;7@l7OvQAW7c8twtU3IXjybrVgbF@{{%h-*i%S9 zrag)7Q4nZ3G3dvNct;gvUnTCJIK_}T@F7D!o|ho0B1g4LmZZZ(g#b5dl|)Q)mh z8m*$76_L2cNv^$EhBGpQh9P^GKfj7U8C7(xya)%sHr`U@S#-kK-Q=hm)d63!M!%G( z3qQ9o$Tsm^af`$EGlM>5zMU3WmwEaWK+j;m|FgfwXp+qh0Hksdb%3mc9J0|%<6+JL zMkjAaeB?f=hQat3CJ)n8$D|6?iicR(HowcAM#v*Q;z>(y*6#pj)g7sK?=Q!Xtt(5C zC`I@=Nuq;K+n!>`{%Ssxy5TIm=iGF$KWUk8`3eFX<>DhTul=C8Ku@f6 zj!w5`)-Iv85$f0Rr^$x!9>-6@x&QpvV2)TyL0tdIy;^jk_UbEMtPY9=DYJY~?RN*- z`C^MUEugxna?2yvs!`+k+ScQM7pl)osTR3GxHy*?%;rf1ag#~(4)d)^1leJz-t`P{x0;{pD+L-?OKRN{O6D=sgmn6YgGuI zaAV@}N~RUV5R`rddr`I?q3lg2T+V-^E;4cAcG6U{LFRT^F}aT zr<;A=M#46{Adv;;v#jHBSvEjj|9T|b`DHXe6qH{}2 zGP3IOdRh0V(0Y@=vf2%E=%kYyP_+i8nT{ge2nsPc``HwxUVbv{3Odnfz5HVAwx3~K zRocrtqKisGPu&{9?Jdpbp`JNLEpZH6_j;=~Z1x`g^`ufueFem(k!EpNr|drO=&t!< z>S_6A$HwM^YL_<~)+f(|SBTiFY;yLOUr#U!-tY5K>d*s5??rL=GJz9(U18grN-6}K zoa=!Za(ngn=8EUw)ust=^KqoN;Ba{e;HBA?<^Pb;9Wq{GCHFQ=p@P@Z6wgx+D)drDfL9S}G`I52c^wKe%%N4YYG2{x34qA}=@m4%R{ zrNsr0rK{e7-dFlP7W)TomM;PTI3nWP-LCW_+{4z2Bp+R&4#4CIHi57ks}Vo~4)BNi zV;k4JB&WNMxaC>*hn19+03cU@mb^Wr2-(r|nfYT-VlZS8CW@yG(9Yvf5J@N2e3_^C zNn}eoHV-NLmWHG7#nRgu4>J+MFZ>!2Z~f%~o#=0xgI|Y4ug*puE3nT=tC&?lAMoS# zz-@pGG}_m^%o;jU03b74OJqYUqS~t zIV(#la&_R>!T<>p3ICPK&hhT+xWbd~I+r734&0VDY_4h_wA@7}LTIFhScXIZmoz6bt=#Pyxz+4)RW840R``cb-cW2+wV8&0b2& z+Z~$Yrpnw@&OSfi-69<(>KvMW!M47XSw7nD(kUEmB?j z(EU%?%D)Pd+Vp+QGP*t!U*qdQ!F3#`N92t(jXOAjboUER_`c1yrTl`yc z-E}W=?e5(vL55herVJJ9)C6EO0+;7j>=RayvQKazoY6Dw^+Sa{)BZIOl~saxh{-vH zI#*gzII#c}_fez$eMIgr;)Vvn?N`G?oi@Sma1Ev%#Ulyw3s$@$zmnk7pOK@7r~C@I z7pqoI_uj$+!X`lzWARtHy7JaUoeIC=o+4OV)lfn=^n}PBp{}{&WR8iFN55$RA*w(-Ap)+l~qyBL$zrkV(L5@6bvQVTkodis0& zy80LyMZq$&KvPlMlrPn~7N)OtwtF#rD^nKPYjaM}9Bl18%(>DJ4ON3}qn4TZnEf%C2TDv^v%_vCqU#*^F>L`{hO4pOrLHVa>1%mtxgE8F<|=>TjdADeqF#f0}LYIMoT zv~8GXk|2{>?3mGxEPp=RILLsEpj#(TaAqJxP|b+g*m%6I(@&|VszGN)6tfg_$7Pt#7K0)mer$jWH3z>Cb?*lO)cws$TgzH?K|jp>FX z);d$jSna&IM!mnRM$*NR7NjNq?U96Z*gbxWag5Kvw)y8Q>4@F)2SjeC%j{Z3c=T{?7Oi^1Spl}KC%ox z>GKPB-=!N-DSh9&eKzS6Z9JWr@_OnD?m^N{N5d`vx7kln&FAayYPQd)gHD}K{ZCH-$i~cDr0YDw#`yfjQFRG!EdnYky+V=(n zi>-Uhe`J;O{M96MtID`jyqA^2*YG~n=@b@&okxlMlCFMB5&2yWO}Zg97peVf-P0Ej z`clS;47y&nZemFz!i)T?XE8_$-BF-k=YVJDyq>9?v=w=tVdUhVssSDMTy6Fs z7w<;(dcLTs#WBudU@1RDMBj5jRJdlvIa<~iRlgC?8sIvj6io>2z-*8p_~w_YQCy^j zNx0>sz-9dlZSv~qwx4<)4opKR&(cgmg-HfKQfcrIdCp>KVsx|?$t+-kj3u8ESKUOp zBC1$_4_hL5Ul_SgDmks7yap|<9~`#R7OG)6kQpX1%9%E-DS8SMCbs603Qg22FLITC zs`vpza`2|gGUy*S-U3E6x`$;RYqopp9WhObK^`Fy7s3t&s71|)h+iXmP5J$d$X+ue zn4}YF9C~q!AlqF{w*Cc5W&PHcc1^GHCNGtApMB&bwX%}gl~eI!zM?!B!ZYt;_vIlj z0zmDDrxsrj$ch_IT~qtop=$^Hlo5_1*0AC&Qvf9nYNb5~i0=roN*v;-CO;|8-r#$n zYX9CWzt~xL%^Tlq+DT4FZ<-ItHtNx@acu(@*+AOyN*r1?Tsj^^V(7Aatrvr){H~H~ z-)x?g!l@1Gi_geXw&B_H>&6@*_9y;)$)05Z>`lJL7D{_=OYJFPq+JN8RHoiP!4OrR zF)Ez_%KX2VyR+o(;lU-k*u9pxGzDi0!BFa^y*5N`-H%s{j2&@GP?fQ^kC^ipeQw#8 z`T`~GKQ9uzSi;>HX~Txbjp|QIbQ53f9H}pm=(^WY4<_pv0>r8k1Q`0fmpB>F80~!_ zqPK~HR&Pe@KBpeIxv!;X6V|JBu$Q?V?n8)xh7Aud%L@* zGUk7GePuC&3M)cV`oS;0^6$Iym3+N;Jse*?eGl9N^?F z!t}lZJcGDsu;1NZ2HRP={>@q%4QtXiF)`n+7Koqo7o!t=+h|!y$p&8prnhStdj;01 zas-me=tQd8bqX!u{l1Tq-RsEwg>8q98}>u6tmdE}O35onpUPiGHmYAAM_tHl?M#}B z5!c;sKfmy@6Tnjklkg`PAy%Bx>v(o|pc6~yjIlilx1PbLYgG?%X&x@3Rru&3WJlK$Nf#%9%o{0?t9W9nvCG8HODS^0q^;Ky_Zi`{>wQQN%CAySLya{aba}<0qn$Zsj|{cK9vBaGVwpMa zu1_7cJQ}dcWX$@g=+50-%nhNArU+e|{$j5|=(4bQdg}ES1?oq|YC5xv%s7d>c7)f1 z`rVr=1JPT`mTIt%Ida!ti5?Bhq6wzEj_TU*BWsM_VAC7>!SzS=T<7yR^uemj9JMoP z^%~}ih=~+WWw<9c4IpsQL(&BDDu!yd91yfTc@n@j?)s3yJAfk{ppA(wQVA76lQn=O z#N4~I&2;T4)h+t5=>ft;Y4#v_8OvhPWfp>g5ov(xU-I}zra z<2e}NCMt;k>14=dp@+aZ$vjgO|s064(T0TRpK0$g_*siXiz zAa>I`V6o$5kiq+Xd!F7vJzk^&@1PZR@As_;gvtQm%c~f${6{fq&lULQ$M^eY_0*9& z{@c`0Z{teIeTFq4=OMqqjZE5bHSwy3ZPo?bCU+_?=-Yb-z8z@oq~~u|b*6a)ayKJ( zGZXOic(sE$sTFR?<)w`Zdj@j=rBF}ishqV%DRrnSRgO*5UQ>GmJD@Mz@%-wcRn4n3;%-XN!M(013DQDRQd6-r?2CV$6{Dog0QDLXt?yk=EK=^p;*}A26fnr?z(~)c zIRO542fPd20NMcp2IC4)b1doozQ3i3;Bj)*=N*G8mA$8`)fs5V*FcW&9Ss4PBktjq zK!Q-92Y5@3ikfnWI%)&ufj76mh~>^N$II#iGSGqBSvF)W}bU z=I5H+m8`bM*okdRWnC0ji3BEZy!`#X5TN{K#B*RwLVy_~2BeaJIsODlge!W|cJYAD zDWGe#v9Vx(N;r%|<7isG!09+^V`ws8-ZNhgx{3>AAMq=)zR!k~j@Ao~t751e| z0q{|g>hM-I4B}mpzgb&bod(%e;ej95@DEd3i}wC^srY}F>Oca{LKC5`_0%c00@VYW zpD8rEn&00)qdocvjph31N2nT`8O*m~3$Wg3eEYYuQLd)L-gy zCqdt-oKk30qBf@Y&VFXgo&pdHv)=EIeq@dEi_v-1W!O@k?frylLcU+K$c0vrH<_%^ z)yQ2h^5$lBcqXWFuOVr6<&n_n49{OTrNhj&q*WAV#h)Ri+}#i}^L_)Y+-4i@#Ny?!9!U2PQ3W)Pe)YmBc1T_*f6!~_Wx*a~4X!J>VD(Fw}W_CJ=$FSP8)yAux#kGsVj$Xg){O@@yr4Uym>kX z9FMJ6e}tLG5+$?J{zY@9^V#YODj%|pvuYVpG(g9+=WRd$>|)AeYtqL7)gwLUUx@l> zkrOfAQiC}Z61=G_815N<_$#z3acgj~mW2;!)y2dgU!~RSTA#NM6U4v?$@LdBA{IB{cb7z)j!Yw%iMJ?;vwzY>c`+dSp^5P38vugRX^Q=w zvK4)=F*WRj_3ogQ^p^STbo9DiK)F<>`g#fJObfB;yC))hkGb{~cVY``Sb{j!!CGXN z8J1S#!;*@)I(*0+HzZ5DYI|nT0y+fvs*lbsV09dum&UEgUlu%wjQH$BfBi)p1W9Sy z)|!2QhO7zJ1D%?dOW)qP6mWyxOqonA@aH9Yli91Wkc0Aj+l2vdS>rs)R8qY<5xknc zv4S6TCJ5?B9F?RlIJ4^xYi}d2fs0tg4>Y*tY7G0zl!6kX9`UwQ&s)8_GG=Lc;aVY| zv*%$x62f-MgKSm-wt=P2TcredD&ht?hN7 z6a^K8t5m2WATr6Ad!3NEf-(h&fT&lmZ=IM1_)sasWOBa0ZAYs zK?s8wk`Tx|`8%||!}D9ude`&3f4t|9b2e*n*8XPilasx_-_HP^?#>IvNuMNEhzaI6 zm}^13&%s9hV}Qil-9`c@NHj4X6~ha7a@x-3CtzU}s#}CW9VBz41H-bq-r$MZq zz18rRCcy@*e{kLYGTAk?eYUhLC#3W{lQrRb8L)w3;2(55^avy+GX_vp#?|VB)jnkg z_93q^P7xwttH%PRHfb=8*+l9Ho;n;u|WmyIyciGT-9q8A2z9ILXK ztxqN6J!R9Yj%;|c5k9G8GlWvi4qx9=*jcS2iYmg?I$JlKy>2!|jtWY)wZEf@vJPuf zQ*CT?T94a$=|@~EV>hK@%nxtWN{uNN>{E!~b9W>?&Fh}ysds%L8SLV;%hG-`n)IaMJ!8=%xQ1MZXxYkSbPR7vvbi;%al;d^jAXM5@c~T909cXoXk&fA zUv!QOR7IrR=xJtb3VV&=0dh3mmH66FmqSrsD>aLKzsh|INDVhd9HX4iedafEJ}>hW zpi8zZNQY=kvjrGkp-nD(Bnc%7B;?oK=Q_#BORNS>uy&YA%5vM`dQ&Rz4f0$>=;FJ| zULQ0($MuQm&T=3z??Ruoe{}?f;&V#E7#M9m4F<&TR==VCgBwlZOzbShC-D+D+ZVFF z=*Tn8ayTidv^Q1<<)lpp1h;C{{cH@r^7E+)R;S6Ohm0BIVEFL8RA;$$?RdBYe`e6| zRY)R$pU~I`PqCI>nV7Bg{B(!a9KGtnXuOcXLsZVr=P4e&QYW}EV`pc8Y=$cV=CUv} z_ImNh(*g3R_s=|M{i|Le@`v!-JGwc%Wt_Q{%40D&0Tojc(>)z=Z3|<4xJ*QkKdnKl zBD!5ks$9J1za<|$qMC3E)(d1+Hy)c-*-CSk*NeN3#hL1KPh!&{Uma=RF~?UVCr4Ay zPKPp!Zife=@4;_3uT}UYhaHpOZlNwu+)zS2uf&U~byYvhB8YNS=9Zjm{WT2+UL~7n zeN9HaTJRpfXH_*n@R%1%X&PpdR#ZMLF2TWOc!9~xX}R=e?o z*zj3U4UHd_&X|qkHL45`mYs)j+Y48B-!u3d!X4_i?zVVzeI z4||5>&E)~c9puCFOFdAt3rXcn)5ytdz01c1c92Kmp)LUt zmaINb*hZ}<8+usmj`=g7DAyS$94)~HR?-O(MkUb;Pa5KY5=BepXkiiT^h6xs&r9jW zKaZl|>q77kv-_dyy6U>`pc_CfA_KCdMD9N5!12-7phL`rC>2OC@YZ3VAlX3d&r_!H zx&)%rX`H0vVZ-q+U~TiE%QS5BRroMnnI zCvgi$Kojj+mHMOI1K`xt=8lIZ!H`eaU*QTT>)Rg0Y>1QYY^qlKhCNzwq_rLDV)o3j zF{gH@nHIJe*6`!#b&Ltyd)R_jb>$!Px1$rlobb=st^~>oc$5lHv~vUd(-_5{-7NE% z@X$;;E(yZN9d3&F!mABi7o23B?mp3%ToOtoXMo3Sj8UhIHNakiGE5^tQjeqodE+Lx z@2WalSd-bEFKEWu!Wt1XD zLUBC%C6PO#8rp+D?D3=S-7ug@l{Op>53SI%PD@STOVEME z)SxuzR@;m-b)0o9@h3ggGK+Pwq)h>@@F3xb{Zx}^`M#Ocv%8K?|Ml3LhqwwVAytxA zsKdZU!nMK*`xHb)hJV4>+lGK*ehJ7Z)f5 z+2Y-u#+~b`*Nk}u8@X`ZL204-fscGyBldAl|Eu{=|L!gvE&Xh1HAK;o>Ww@(#tE2X zC0q^Pzv_}67kit9y3SrtTdaezKVN&x1eW(*gquo5K^VDkwXl5`A#N9ENtgnSpy^Q1 z0O{;B;l~RiPIp2O*5*@2=JU=|5b;{vGQ7sdd-!HCy*nhC}jLS4~^WU~5^T z7{rnvG#gEsV9rLr>6A0YT*9Ry&}nxs*1xl=NjqdKH7K3u}`y z8sbD;mcdNLd_;4g4b(QEq?WBg^();K)w3jqLsQDf`uW!K0}ZEJM#(%r)nwArfY2my z^At7EHIBIfr3(%9vqK1PM=QA+3po^o`qNw=d`Jc@Dtit*_pRwx)@W zR_Kh$g|_jAm}=)*P7^sfm(^EPGKgPZ|N6@zK2}VApgv@8n z5OSIrw?khY7Sw{SQOvM!AWu*iGo5?g7Y+NoQETVj#<(d`dA)xbAOFl{f_`c(ZG8Lf zFtj?xpL+YQ?z3cU%#~QUIKCPR*`LZKz{~z^yve*t0d74>^YhU-qqFZTn=lzf5T--8`Ra@nKKy zlYa=;SDg6ZiKijxVD4&W7`i~u57w>r%fHZ;=X_Ex^kh1Mtqo8{_6NOFvP}>N!slO7$=+AJc zxM@{~yV-yUE=-(q=gl!t$z*QTr45lTEFl=wt;q}m<;OjWMZIfIcX)vnm8f}hMjGBU zbPZ&hItj%Wt7tOV_&Shv8Pg>ztfBE%@?gmLl7tuv5jR2_JO>s28@0l$%$zs*1LKm4 z`e|F32(zku;{>VcX97s}JA595v;+Qm+o6MSo~=-O;#vr#IARvL_d%_G=`l$+5tW(k zr`c(chW5i^@7Y!Tg_Xm4wcoR4A=7Bp7)MJ-Dxd#&x3x1M@g%7mnz~7 z-^+mnFf)O56!Ewz(`e&`1XNI6stTVHSLwmYOVy(byd<2iZDRfY&h5b}huHp&C9KmW zqOf{KS?k%ySeZ-_h4z)mlQBl1zYmRZc_Z@XyG?6LDXirZi&;G?&scJbmq`tt2excH;;8%hs5Z zTsf+Hz|eD~;;6046Mrv`bPj#yGWAE+Hj_CTL4>Lc#!vGtWj#XKYv>Af(rMWGu_!)a zhx=VZgxa`xaQzUU4Jn{n7InHHe(D})r(hH>zRQ(M4X`xw|AqxdWhZ+V91l3R%HhE zGSAL~^HM22aDiAnX>A&cn;hycsjRLpOgFc-KBHjAsXT&xE^{G6L&Uh7Pu6XGyoxem zkQ;TsAt91>kr03O0Z{^-;T znR`?=Z6B~FWaOxp<%<-84j{3&Rt_J`?7NJNWB-XxL(1 zsmr&yT*~ZoIn>{)2rK*U?RI(wy4O0kZJ=Rb^xi3b$N{+jL+U$9TCY5+XrO~!yqE)~ zM7)3WC@OG!sO68v#!&|167!QgDVS9zBz+6xVAkb5(!?QKzHB~ReraEhi1BQGWD^ib z(IkfEz>+J`2SkU%T<>-sf8d^D)^xYw=9(f;Hsg6+c@7)CEzMxi=?V4wQX4qo^B%ai z@nez+TT0Tr1toMTl(-)F`hjC>(vPAKdtxVz@y!*G6IMOYB{!xGOpscSIF(_!6QIur zOzjq+zpoSb#t3*nWwKdJSx@!P@~oDwobg!WGZblUX#awzv) z*f-OKA zqiwcgJ#XTJvks@Pv3Rxw5VkSdt4}%ZNkF0-j$@9Xv8`o z$F0pX(v%m(i3=N$Iad1x)aTQ-R!A*&Rz@uU5Mwc>rxw}Ede~ll1?S;Na{qbJ#iKjzB37Ha(G@?@;o{7V%4lT*=)Zz`Y;J;ZI%D}0_qw1t+r z%pzhus*=c9jKG%YvQTW!Okf8gcz)W@&8)$nEooYW?x;;0k(On0I0__cjDkQ`m3pq7 zNFi2{Dx%=|piZI+@DBz=xpise>EQPih(}TFvjFeusv@Bf%l7o70);4`f-pgv4mR}M z*3JSl*niXy0#wZWy)N*PshaN)Kpqz^`Ee^NLuZlu9nfaIQByQ73 z&5j#4ZS*fF`ZzGXwTxg_uqbu8D@AUUFO%ECjtSYHOSv&4YLn5)5$2Ft9n%__(PzqM zeOhn~%g*uDnxC)X=VZ2b*o0MQkKIY1I+u|{)p{ZsRZJ8pwY%{kr>vh9mK)Wb$r)tx zazl)1F*-^Um0TF(vYwY==sr1Hb8tnO%H=G$Ur>PE>wxS*>tnto&JD=3fA%BqFFh06 zgpsC=aOr_XWi<_WPI+d6jKacoWT>z+xCKj@-pb5)krJJaXu+y>etl7_EZA_WXQG!R z4yaeL3-F#^|KXg&2nQCUFgC_2qnfeO>a%lva^d((q_ug^WJg|NHf?6*kuv|nbdhT9 z#g5UfuD|^DUfMnBK~QlA7Bv>%Nj;C!*1zjCJhlL{*{jERzio1RQ$QzL!7K9{Va=nh z-!KLysOu~+F?K!Vny08XF{Q)2LpqoNzG4T3c_d#2y5=}stWP#P>Sq`^pEb+rz8{U$2 zkBXR)u?og^wB+ct;~Qzq5(;7G3HH%f1U0yyv@e8w@!IbD9i}!d&@fvB3XXH|HGSK| zy419y@GRUo4=sAm{Eh%*u@dPzMsI3iVE-dxXu#l&ydhVLd2X1?^pnEzyFJ+GIC<6H zikF43v8Wx0W|WP-D{I)foYs!Ya7!7)IhvVkd{vt6z@#JbWa4~qr&SpgFy*xc7*7-f z(B2Pwibg9#AOAj5&rmC6d~`ZaQUv%#Z(#Pf*kFS;%o0>rpotwjRi66nyep`uwVNXW zt!yw|w}+f(W>we}cKRJVa;_P>P3c()SjNJa*M^>;na{i8iM%^<)pQ4WWPH>y*5Qi!bPM5n{KxYBe1SMzr67fTe=Ry1cKE`ASiSTr%zK* z?6fPlev59eoB+r`6a$JRcD`^AYe13Y^u^WgpNf7{B$?h-!2%RXGNHcPvlzf$_%bEb z`O#LX*j27!W=qZvQMgIzQgy!L1Qx|g!Zh>Qm?#XL@M||u+q{xa?#ngylf#{?^ZUSa zvK+s;;%EEaQaD@~uFl1$w56fze2#8?23mIX{mt!|me|(5oleWL1^!G>&v@4afi8`~ z_l!4yX?F*~l8kGW23Ee&vQ}m$@o--dnJKynRI@uC4cDgsEE~KwZLdFRiJ~+Bl~SP0 zXYzL@D`ZKk&4PTFI6t(#9Ua9;Ipa36Ru@c;X zCj%%yRresTuBUBogH1!?6#$9`P6^fB(G9BiLxqLR)gl|Z@DdFtaH>MIRovC9R?E)MjL_bY{_RnpNNXdUkZ3P&PWQ|S?yG#~ zbAwRWKD<7t^FYDeAZR@)$LVM)rLr2{mJIhm%Uef|iwb8wU?c}c}qCw~?8+t3^ z{F4)v^R~vpfzYEPhh>0s1mFg+Exy|Oq%+zQ<6z_o2qxXlT{I5veLH#qOS2cyKw$iE znrfeyvA;sYk*5{w`E9qZFvrQLWja#|(~uR`l9#f+S|Ob$X)Snik=dzFXQU9Aky*F0 ze2lr{nHWRMdzXp7kc3JP92(T9GXy)zz$Hy#8)tl;Yyx%uB1kF1b5tBXvud)n)0+@^TU3?ptn{;2IyWv~0h`^>p58i|zgwQn+EHGO2~Y&b0AJHG3c zr$pu1yV9(r9XF<{|HXs~W0!TvRz|R8jHa^O%Ur0MT6uL7+x|j;>WZ^#MP4OoXA2h6 zepu*i^=?3a*7s>_W?FQzMsc`dp9K_Vi!FX9KAOwXnCvmROzH}R{s5{6Rd zlvvhU-BqCEUAtIN%B)s2%Z_f~`P^>g546=)oG@>K7qo`7&Z+`-aP`{InZQE>x(>Dz zUFb@oevkZtF)FinI0!=;Se`j-A$Ie2?m;l&%7<68|9bBCmjdu+_^ed#)83=-42v!8 zzeh@&>(dYv?s%1NN|IIgnOXQ|LWwk(ge)5Ty!i=^ zIK9pkw0NPakwf$mHl%hg#|;M4m(9p}_%;vx@DX@=#CVXc+noi7>0>^&`arBl7gXIQ z(~y%}*-ASS%WYg;vG%Eup3H!)Kg{;Z#95LLQyfar7efnzT7Cth`>32csr93;!fvOr zJi248SVU^D&m*>S9r45WyJo;y?ey@Ayojw$Fs?R=UVUUOb!VtG=g(fH<(2h+0u1ea z2%K%`dya#*ARgMbxSAvO{p8Rbw+2Fp^y-3}kEKiwyz~Q@qA0W`^lu;b_{e?IVcq9f zUpixt63fQ3GYiZ?m9+@U4v<_bu07#@mevD=1Oz1%=wu-WtZh|S2E^!m>mkdd>Z7wU z3EF2Fj_dwKM@GzsshdJ7uJvw<{!z-4%Fe+Xbud{+HzR-aF>Nt)XlbBhZWUWB+4dsT zu^5p72JNVyLv9QE&{m7a@o-*bcoSQ*X&k;Q9A^OfI3P`bJ$;4uwCNW6Tu=5~Qp53= zKi)pBmikWUP-&kHR-OZu8GbS2@)aXuJ8M6}BW(~e+oMWW(r(BWxBIz_F!Z+^ z-j`k2@)fUhb&7n#W;B?h2{R3&1$M&J3OBmbV)G*WTzeDZKay#$Tgc|I7H7z z1uo4+WEs)L|D;VQt@cT%%Y%f)8V_1VO&5p)n}dV1oRUhy&WsdgO<_*Ec=f)Q$-D!j zkyjg%eGSka?^rQ8p4yyvuS9Vw4DiSiNJ)P>zaM$7Cy=_+6TNIK2&*N(ABQO$Mplr; zzCO$IO&ulo6+kWo`EKL}VMv~n&xctK)jqoxfKf*LX-%W94A|6k3fO0S1tMQjr=8kV z-9oBygCPTb)`CjbB%45I7VE@EY?a4UT9ejeRBy}u`5f>2deBmdNm(4uNxMJ&IjV#i5bj6v!RS%lMhCYr!b0V zVzBKy8SSV>XL1mtYb^8gxNY-Q$coko`Y3JU?KTn@f1&r4R_wOKY5(XewuySl{bYvc z3tDV`cOT1k;%|A;Ux}u>zX=b*fc!nYlwQ?1M=()cuq;?eB1LZ+4FUN3*s690_vDoH z#%NbN;GqM6l**mDPTE>FSiawzBL0(yqbsuNF1U8O@W?YWZh2_-K#hnAm0jmD_gnn- zt@-Vw)db|6uRNs(xmLd=Kmjy0uFKUoJ$bsb`)p&v@M@+_;6o?thb$LCZ+rAiQ&KAA zHNCdZ_TBa-zy&T&q=vb~Nfm-K=@FgJ*jmxIXMR}SzUrWaYSbSWRYzECOt&$BW#UEy z^K^Q^OqB3PoB%}BFJLvaiNzoGIMAr$e-BSvJV=Dt|3z^3IGVU17e6^h>vl{tEYZ{r7S}j%;B> z>+#E`-A)5Qd;l$2fjo&E1)yM;)qC_#*{f|c&h z-~AEkzc2Pc&Pzeug=-n~dIED2xP;IALe(v|R>GE%D9+qu8UR#GjaeI~^0e-V1ixUv z-Mu$5i*McBKbsMm;+%K?3vg>PIl60Z;(P_yr!zgz64RTZoM~#j#+aDsJmVy~S|n(w zJMCi6${39p?G(zpTf!>gI^6fYyjHhD7gZfhT_71jrlq>DHW6p=Tk>KXg;jH_vh2zI zw)FG=j`aVg(suT~q$4t{MXI^pSwPah@jB({yd9L@WMsQCC=N>)%r4eH;{tNIUiH4j zVYZg!nuuy(v#)8%u2`GhdyOeRoreI$aiG|4_hz{P$y4G0I@mkN5oqXD6V1Uv_7wS;&$%e!T!%$x*j{8eIDgItxr2WFN&`=u-{vMS&uxr>UTuqwd#;M zW@fyQUtPx6Ss2yYxZnOOW**$F=2=&kl_WBL?=sFVXdV@yXM^7h;(J7l4O(gfBc$3S z&Q)3M-2P2_ftjO4kM}7H;z>VoLek_>M#WVR{U^;G917tf%F&K|f5Uvq zj3WwYwX7;UJ7mAGRoLD01TwST}5czVWB{KL#wg!DcdPxiL06(Z?=O5N{r3;?VW;0z58cT`<(J( z^g@A13}D}!W;kF*L?~l~(Ubb({I(v1K>%!8P@NzT9c%@JQc^!{^PBr`#?S<|rMHDc zf|DOgsjHJ}-KZD%vmRGisBhz}3&1)LC192ggLwntnDn|c50!=9tBNJl*SS;s4u zc=DqcpCJ+uQO`P>*;A8l$=9Pa)&@pmVgu4PY$2h9VShz%VGz>=a7B%X(P$WaREQ`@ zgIH8#&M%}0NUrb~G?2F?+i@}a%w(3tIWMC;FwEzRGyjb7e`zT><;#Dr^0NoaS2>Gp zcv`j!;6T%~;}x?l%7H?s*B|!0(*-U0lL%i(FV-*qvya_tirT8DG&+?8R5(MGZ|1Ch zN3(=tnjZVJN%4E2^zkj|sSb8aXR)LBCf4cw@9(A0$hOA#N>QO_&mbXb!g2j3mhRVz zoEKvJ=a&7)@X_L#CriTH``*+N@fZiF%m-^EAIFQpJ>B+&8X5k^&ZS#rnEF;TLBhM1 zF@JrlGpkq^8V=NWthX^&_IqgdVe7mH_8OBhTIm|(;21mGg-Q|NwguUiE?+V={9Y8E zO}WCZo>Qz4OD#2N8Y#4|7;$kFvw-&lZAHEh<8L>_VZ8h0mh366@e8JE6Vh6ZUxOaEh$K zukC7~1=`n->f@g|Im5^Tsuhtt%wkTyUCDr*2By=XFDDM8dk4lwz4*0rV?W?6PYalX ziaH7jB|nr!wvfkPkzgms8X{LtL9RMHW7zAZCoxC6?O+bc_L?8|#01X8`O?8o_iF|T zb@+N0k76<)O!2d^|HJQlsLp)%}#|({LVy zx8vA+MK&1s0Bv;pbMM7C%UVBaw*aiy6#OJQG=E^7XQE4BMuqExnb4qkPAJ{1lN@9e z6k_Ob&TXOypm*0*E=JFAGmt6m6&~DBO?X6IVm0nDa+sD_ro6+{Z))pHReJpPlK*F@ zM}J2Al{8KrjZyt$QQ1U12f$GtLqs8*Y9)ZQrO{Wkg;}z}734N@x#QcpBKL6MzW;Zs z?*9z(Kd&aMr~p! z20I#>)Y_L1KG6T_)Yz~LBM9J+F6ACxG}tbnU)Sa!XpsZ zF+V50QIt~LWM44+W1^IG-EBa5ApRC5!?r)HHdDhYw?KVKIxK2-dEQ_0du#~fVnR>{ zHR8RM!aUtsKD>wkWVqp0Q$aC(> zM%uc$UQk0rvN_e{^_0mfVO68G(SXO=!lrrGx|Id7CUK3qGwHh9iCsnGDVhU9OZ0%>?Gw{K@gSbr26I{FMj0#<~$BF5k^;J}7TU@jXfLnYF_fy$!6oy7!ym{*lV= zR(7yqb@=Q%kEAeE&v-5Ts`ntSP`ik_Q?ujVw#hV%-LSqD4J+&+lyXp+)rW=h2ijMQ z`vABv&%e>CsXxG&K*rSTpW=@vFV+T^uda&yb?^gwJ|u!@<1>r-z9uLoE%uX?KKq{T=Eca>Uw!BpnX@Ssq7JO zO#LZtdHA03i#0&?01&$mwHpEWW%XNF*Dx)ZWaMfH$KRm&1sJgk5xH8Cn6y)$YQUu3;xz{pWerfhj{tpZ3ci`BZhO4_ z?cx8Z+;C-|f|1bOqJTG*w@iTBi30@nSaC9(C`5V;O2;I7^hJ6-bYC!R@5lk+)IF@qqv)*0;N zv_p=L%&3386qOUaL8lnf)!|wCR;WHp?E6b;86?^iL;n1-vWyjDn`u@Z(;!+G`o8lT z3H~j}|2(OkNu@t0GbMjp^C&b)#wz}NFWO4x1E4-H~7Z!iC z0gyuUvsIsDiN6%HTz1t?{&|FRq?$;}W}qw{n&t`6fLz~80U}8_Q%~XpNWF0HoDw$3 zUNVr6U&_b7Oeo=a0r!183CgaoXYek_S*hUGJVtW1eU|sWL%E2`(ex_j`~9PYFjL_( zyYsIBUpbJ=^lBzD?C^%{nza?SMqxF1ZF_|q=u4$*pu&hvPD_$P?Cb&lwld$aOPrd> z4tAVUilWScHA?UEQm?oFjPiec)&0v3>(}6X>46QM-YTw}POc6o`l`ncMy}-?J#G^L zj+2*Kzi8s(wM-~q&b@DWdZ91uIq6JDRMbU_k{J?A`{W=~0H#!J3GBKX84iuW-qvN% zfkORy@+IR|&XtDa%OFiYB{sY%l^+n{X=(hpWFVc?cARBmtdB~6L8e?Z1mdMe|jQa~sy&4mVbJDn_pt1oDBiF3c=^$+p zAAK}s`#w8%bfBa@xclTyT;b<|4RtZ=)26<1YfAp~03-8h6|l5wX2n1OajbDO>cOonKrs^qmi?shuD=U%LK z?V(p^QElAzk_aHbOKvbVIFkW?pPZdc<9jhFe9>SazB;1)NwwZ!TWUg1p!CM+f2-U6FU8tF zxdtI$WOQVB3u>Q6e_6zZGm=eRzRV3vTD9|j;$JsfRCn5cq8M}45ii8HTL9UHL#8|P zFz}jF$)mE;y!u!A?@*mfmHJy_lhpI*;GD}&4fG&~&Ulsl&dSpr*gpqs_o`1{&AFY` zB$?fbH?$SMZjC28Ttbzfn_Lc|)4z{NPc0P?y4SBo&#S))Gt zyHSvCC*30y{safkQWrrVn?l(Q_hIG@xf``G+e-(82ZiK01qN&Dw9w+TRKB4%UmBJm zi2MF8yB$--H6kj3Vf;pW44u@p5}nrlJsdla z_5&12V-OjtQ!&!})J4g4pbjzE%Nwrxg(VzZserg=@#e(IV76oZ70V-%4!(FZeP7na z)zPl<>7t>zCCVB}1}>n!dLxh;OUoMSdKOg^-SpU1f&B4g^VxZ0QIdA!hHh$^BB(v{ zWGBh+u%c5n9$`;+P9ttCh)pJ?$gKJvF0(wKD8G~M6m(Lbe9Cs*p}o0{?RYI$dFtpz zuOC?ekP@&*_xcS;=>kv^egjf|*L|(Dlw#Zel$YgTKCLYaJh^CmYl@saX@MNGW{&p>bpkr84Z!hf5Y7wRP z%&ak%YnO(3J0Ws2a{;Z&b#mrTy*}FUUoioV@JG^*KkTuF7?Wi8kfIg=uw7RWV_(7rEKvdE4&)|Lch0NUyyQ#YhjaH-{FBXgq~GJF|^LIdtEwE~2%& z@rtW@(J9l$qnBJ=tKLt8FeBs{fe!gI5tLO{o}5!i`!tg@MOA6zSfe)+F%|4ZYeZva3R<9;QD^iA(^?u(Wfzxth4 z6CpRsIBn!;iUWGkTJf>S${p)`kyDM-&l^d925E>}i1wbooBQlIB1&Qp6oy9$37qp3 z;|nR01u7)0y)2dsfYJ-4&+@Cx%>5|_A@Mv{amk#Rr(IlVwm!MFzLAS>cQBN55RcfK zkq38vt@`WlA=&y4q4aDUN21Z|LFx>9m|juh`Yk9ol5oV@v9f1VoBt%vGUMK*N;}`W zXDb!yVUd(@l8Jf~f4k`wqNvX3*eWrPsT|&zOpKC`wuv?A@n{`fT7YFa&E6DsDiP&) zwymt*#cd-pAhaQ;aV;epRLwfix39l2+%g^nsP1y{fI{y?ag*HOq}P@9Phn%IcyhRyEi-z-U?LVyWB-m1LF(l`=OEj6lP%Cu*=JQ z$!3*nODA+b`sWy2&)Cvlz2E-On;u8cTVq05 zwC;j}f+Pl7^DMoRPQ?aQ8q@HRvt2I9CJPnMt_?l`_!P41QsPE_Vd2++eKDZvG!3{E z1DZ~k)5DYx0Sh8r(+Gp44nmnwie(=ltaT%b$@t0;iWqF&ju#Dr1_}2Cc`(?s zrQ5gJclnmIO}8Kf36c}K?~_mdJkS(qf7TxSEiUKLqnr@U&Cn2*A7K}CIr!zS&(f2h z>&?SQJ4U0V9N0Rso`SCrr?gD~rp;_~bMvM#VHx}M-vTH_*TU7WH$`FMgkeYP(uP03x&Gfw^=^1t9*P@^nlXAT z>V(2}aeYLQQgyYvH*N9QimHx&EOYIu*1^p&BLvSml>Ndw_Vpba;TYxc(EQ3oTW(#z zL_NuQZ-XB|>Dv3gYNXs_t(`^iK7KML-?$nI71QQaJN{+t)q#B5{)8ubrVGlJTTBrJ zN?jU@Hgnp9*PSQNYD(hu+%ce_Uk@k!0TS6iM>;Y7H6C)d_v*S!a+rtf(|4Q&{kU=X z#f7K@NDzPI)foGXa^U&o-~4?N1cHh>HBW{RBhAR;vf{U=eqDN)mn7}npJ^DBxMJ;} zCU4j&hjOPgf4RiAGR=Cg3#@Rk_4|--&8SjeiBkz(1J}R^2{6pmI;&r-1WU z_kgbPgeL}&nDm%s9dv5iN6PaHANE+?QGVDHP0B9vlI#xLe>qaC$3fBy+mnh`Fa40K`99yX9Jc@l z67-4g0~_BdG@Hn8-@R~rRrhS({ziaBb;s@MmlxP(GMt%dO53RMzoY#uq55%Nj#cls zsd=M5aB!8Ur|?rn8J+OC=Se|=zuy!QalOtcX)~730V+%EfQ#y=-5L%>`YDmK&BtTv z!;MB_LM*F4mlW7Kby)%wOS-w^8)ISbNxPl1{fU+B$C1R+zlK2Zp95X)3sV_(+1yDc zm)I9_aAC(iuM1WAorQT_YknnvSp%Pyu>2}R!MK5uhNDebFUwk(ot5$p2CC;Wr91^5 zY6uq%_Kkm6(Nncn1Za=WNRDqVy%Z_eZNxlhu-}OjSlo$WhQ|R;LUW)GWzPBn zo^ltkg6+RUb~K}yxXwZGrGU63vN|IJ-q9PXSG`||skzo`mu{L*^DLf{goM`p z91?!?wW3IIJ@pGdaiZ}cI4Au@nrT54FGNU9|IRX*FHtSG$VzY$vFWdQn;Ul#@%yj& z#Qh3@(Fbqbj!W@B`Yk@K9UJ;uugsAwWIv$hwne3rKf6##J^q+)16|T6`Lqqow2}#O zi<$z;DdyffwBGN)i@H@i4u#z_@fzqy#5Qsjt}cQBCPNI|K?dO=Y2)Ej8nuC6dw=x* z8`F=37HJNPhK$IR^Vpb*tI+TGMoktEUj!J4#?gkJyb)$v+NuUo(b)O!P4BVZZoIWQ71 z(j(JzqMcgmAwg-cXIP6<3lZjdl(P5;Ruolx0Z#zaE1(4u&yn(>hyry8^4hamKcvX_Tn(7(?EZ*IgKo7I$ZZN+yj!X z`LHsf+S0gI-Y{xr>)8^LVelo1dW>er-7z!M!1tL604_ghzgyWddD!~0fBO%=ACv>LBcCa_L^68ZhVLIyTS-K(wM z5%K(*HOg^$3H;(Ds$vIz)L)d&PQQdz+2lp{N=B+(L|41cRd292i-6_3%m+QWijL20 zz$$Bs3pC6p;2TuN96SLaO<%wVe5!!gE;KPRgC zN!HMry{SK%leco7UJnYhN^hj4g>E%3T1=GIxY-z(4H)NJr?qYb5i~~`L3mW)4u){V zNH32!ny^!ZPN12re3_Oc!x4893aR0o-2NT)ReVktZrni~?FH@0s;^A*rW3Kn7 znCj5Cd!1W~y3(7eYOXc?6LXOJrGSI=RYqtuhzPJsgo!%qpqHY_qlIN$hc)W${@Zym z=-A@YSYL>mm0K|;6!Q6_%J4KNU(*O2bK5SPb3x&6&c!UbC8BAF=M;m$tS(#Aj*P_h zR7MyxGiJ1_wTjI@CpxMVS_a)tkROF5saL$PDor5i)!=5sGfz)bJnXr1oksDw5&jo% zH0*7Ylkwf`2Mu>cNx;Tv_$~q8c=T?tDZ~MJtat_!6BHT{aB@mR!>o3Y6z%zFr6Xuz zGd1d{mAvC&wefODLiHD@49}KOKfc(u?s3=K-SG(4=R2<1=`g?Fp22b#h(Vr5xw;3& z?qXbc&GuII>-WPpl1hyIL^B)A?mTKrLMJFd&iWhx&M#8_fpqpfd`~S+G->UJ>fv4> z4({`)%;?S#4qq!1^?SURIRO^tb&yf!ns`Ziyn?;O=f*M}Xe+f8BrYHHqWwJnt3&^s z5#ah!rP@km|9J^@M9*?zW$=3IoWqqF1?giv;d8H)(!ezF#}zf-On7x4i@2NfM(k1m zxMPuLrY$frp)1-?21wFm8r)1}MJ{B%B|t{n{T5VetsU5F)J2x>!W;PH!RlKHe-(M#!}{3|BX%_pwicix=j-iTB!+v7c~({p zV6z{w7binQDXXXBZq|mW`H?9_l_1sQd|%CIDX00Mg@leUbW9ZkV)>}7pfm{#-wyoT zQ!*d7uJYiKcNZRPCvM+icolG|x*!XcRCh6Xsr_U8`<;OL~BADHzPH`3Afxaj7CSxX2qfK2P)UHQo`x;%`jOXO7dm zRPpc;xY#yRyFzn7gTu>*K7XGv8EbiO;$;2k)3FtsVie^MoS~CS@KhzZM^Cs%8uv%v^Nf*v8K@`2es!nEo z)zIG7Fk1wTVH}%yT$?JzuHD^&m?)_|<7Vn5dGekFDmZ$ZMfW*riHe5k-O0olx*uJy zw{zvCi*pCAQD7aCLSYD|#ZzP?$vF1Vo@@AzKO(VTTush)Fj~uU+0M`6<(t9^wzy7_ zC0nggJ``*Hr3KWnS$P^FqR(3E>>W?%VL{_Z42-@%1Y$#cV|0sWS~O>_|_~;UY{8)kj%7 z9PE;#W?juILG>^`vvA35R~V+WVh%7|J8#Eh-D~5<=D+%tIMP+~c+(uYAg)-aC)50!GrvEZV=H( zJbs^}f&R4N8hYkc9ESs)(M{{WEWogI-JxG*5xccYHmfeZjX4cW-zd^#pcCu`qc0}AN$rtu_YN|^Q8 z{ULg%Fg3!Ql$ZR8h9QTW2~Jadw+fHLOy49A5ILCB&ogfdvA}jQ1+^M1TRDBtm~EZ~ zN4cSigXM#zcP|fMjtv$*o^i7P90^=t`>Wg2LDgIamC+HXk=C#50TlOfDy9so!O0#Z zlqv#7#H6DDv&7L#q7h^mzGJ(bOf9?+TYs~8WX1LuKbFoXRx400em29^U@Zu9HP`|f zFQEitIn4+~)9At3d6rkX>XzeOxtxN^qbq#T_XRSgAuLbRKt}AM85(|@#lMc@9-@pQ+b;cKUZ#Mk7}Kq?R32o zS%fRvKt)sBxH+qOYrNaRmV!1Dr% znoe~gdnX|p_X?!&HS4lA3HkuD&jAF7bC0)eaOK)))qzkNjhPdTB?$LIv#WEeHPbw2 zkHi%If0Vs>SX1Y^H*B}NwROO4sUUL;6^aOm$P8g02T;bNRE9955FtVck$DX4ZU;n$ z6q(2D%2Xu*10*4g%8wCZTPZs>O*8M!s zdY=2fe}gJ*vC-q9-+zPDKKZM`xKsOvc+j;+Qp2X!CNC88Q%{VOD}qy@aX^~-MB7cS~tzr7#oh--b`GHM|010Uj1#T_kn( z)oho%Y>0S=jW2r4-?Lruj$ikXw$5+9w5jRUFkklANx-t-q*?Db9`L_M1^7$tFnj*W!8ggNceV^XZyn6v(+?kUAuYzF zMD927IeL~&A0P!-pP6jA{JEq5=@wB-Lyu^{PkFMNsLO&#TsDsH+vM#r!Gg7x&}_Q5 zqpB8k1tF(BLSa-JI@79;v>YK``u|sX`J?s!tMYPd$_xQv)8z#Q{5r*XIhrCR@D87} z4d#a&mETSq<6{B@v*i}0^XJeN4s@P@Eg{^=(xCJ0f zR8~5O$QsGWr}>dx{$n*WUss9WRMu^*05y#QRV_IpnPJ1Cwe`5q=pWO5Z<|Qn3O0Y( z;Q0ePoZ8YmRz%oEapjpWS+W0)>2U!5cy>P}?Bv)0$HaI(92Wn!&J+Kt%;XE^#@W`a z`!}7Q7QlnRW!8k$22n*5%E;qjoI&W@d<*b;HINmPf#NCR=p+)3d^T<0-ZCvi2CZ#Q zQ~p9_odefN;lV>3FKdGUHd-{&x?O|#-CgicD+YNG{epXY!sQAC-zQ)@D~UgA7ad$S zNi8yO6V4$J<2Db?-E}O;X}1fbO+EbZ5U^WmQ{?x*9crYatSdUx*tBPcea*q{zUPHb zNB!2A10mHrj{$?~^%97MBAagBUFSH(#&52PS91O=guDw$zdd&T-!J@seYd0w6oeG6 zFDM|7^-G$;eCw4u#-&{$q&T||^FVuc?427&D0 zL7PLHJ(LK~k>9PNMs|KXgm;0yD9JGa#8i&he-zf{_w0R2nU_IAu56~Lcm~!976>Qn zhqH~XB`J0B_`!1mna_Rg`4%0paOU9GKvLYiIUSxNXo}+ugk@(bvX|P_7}6KB-3J+2 zUDPpu!x}HiqCo~Wi7lv_4j}+ltgcB~0|Gn(wn$xlmf8P+S1S6Ph^h!8a$zA|vV*v+ z1i%dmGv!u%N^G2Q+tu~3)9t#NR(5!#FePKYul7VyudB99KjpI&y*2*wV${-|%Um~$ zFpF#C%Yer#;M*KHUWzljRvb>tO?*^S*e@(WJ_Yn(v4*@GoFzVMG84tjo(ibnEY_>c6j({)*g1_hN2BN|aqdnux_w3+j9zj!Ovh zF4JLizj&3L?$>*L(}8uqF27s*1S%YzLd3B?My-s$!$kzX>0gWK;Q(!Pf7Qvi2Ru@I zDs#66(KkXlf3u0CQ$xYja2A>7PZyv;YiM1sVDmL^gx1NpltH_N%n!5ox?#pX^>AOn zay7^7?4FsvqkC|$x0$+&_MfvLSr^jw#hhPQlDe`BnfYUTq9hU0DL1k8VV{MOT+u>iF<2Sw?@wHS^i+awXc3bL)UDG0_9 zwWo*MLuz8DvBLJitwO`IS+8Od`Q((W2kt8pK&>+rf2WVGsx1Xz3XuVkd6&Khp6A3R z&}a6bfWwMLvHP*`JXwv!mFMem@ntG`lia?KPD%&R4d_S}(gRPBb~i^I+2AyWLwr-4 z;HDm7sDs4xf-L$ZCSmzC()PGkN^jcMn!RmsY&v|J>NPXQ$EutS4Ncq|YdjGyAigk- zA#d|4*%_7GA;45)FCfL~9si05aHD@iBFyuxPqSWeSHq*duv6|!tEd<$5gr4)u?kr{ zGykkQ^JOP|*OL@4EA31;g$8ubOJjN0$o7l<;i52q+4j=w`H*>7O^S-hEAAKr76H15;C;(tQKW~4Db%`zS{R&U1JYaryfZN z%NmSdOs#4d#^FWV;9~8O5nB7HKmfH=^|&IH6RaiZ&4*G);9^o*wCn8-f8(4#!>xC0 z2GgGg{_;Oz_|q@H9kR+fy}rOC|4Et%-u7Y$yr!o84|A5UGS2JkN1SqJ-rdZSD7=F0IJZ@>vyMl;mMztL4W<9fc^e!Ji&KbJUnLU z+F=v=aO~%rx1dY@GSIaHhF~`~Z_{Drw8;&~@JAo3l8_tQmaRuK&AQ*z723MM&ke1F z3av*<-IYHo`_~PwnU>t=uhi0i(e`qqpXM)$MC(NPrSt)fde=YUV`XdVA>VgJ7B~c+7Qdtif zAj#<-Y?vCmn$H$3<(Pc-c@-`rskU6Q(swn>tD^pSA}6%rRZ98vMxVv!jgzRp5?O~} z7n!3aS3*791x&rj?Q7y*#-I_(tnL+gL(N2go$HcUS2Ml;`r-YUEcD;D9oVH<{vB=s zw3z%}Rr`2|%kfLd@ib&Lac*~3zzhtT|M9`HJ(TmoJ!QIj5rCTx*3Yy7K0dxf^Ghu3 zNSa^#b>_X5$IMO{Vr4Y8QuJywZfr@#%CBqQFs%7_bxQZG0bH+tdiP`S_S&Kq^HP`p z+p3mguA_|l#DzYUq1kJ{z_pqUIRZD&p4NC;w;xAW+c7G6E(pmCH}i)Q`B}NaDcmr% z@1X{*5$mY857xeO|J5#B&O&6AuxmgPk@-lRy~~EHjmgkXSJJ?F0yC>BKFqkv)Y=g^ z?^QCQjwfbW8gb_9!;DH>hPmOy68$ms83K$G>{b#a8&R1wG-_qO0H)9<8JJN!TuO$k zW>%>vb~39VOKo@qu+4nuAvHUeUhc?xG)o(w8hOy*F;8tv{b8PpWBb1>n@=CY^v8K5 z?thzgz)SyV@(Cvm7aTP!*0P59YEKEBnP$i&Wy)KGbA-A2;$2UJiiIMt5H;0LU0!~+ zAG^>ki+(|sdk&XaJazri=eI|A?|+}o|GbWd9R&2BsDKwMo(p7ye+syexKj(THR2e6 z0_WoLN9zrlX4nrQy}rC&^SV0TcjV7mJGpF+E+DUH{b65wf3=9CCfJdt8@}wUl5^uuw<^KILIiL8CP()sA+Yv6?IV(iFv*Ae5I@O_e1I@ z&F-({Xk|TU@6L{z#_DH12i}}C)qp~v5$WqO#fyUZxjurF!q@CWdzon&fRcBH2cSgR zzMj6ToY;&F-IN@mEwdqXs<@m=g<@KwAr@)74XFr9iM~~1i)&-8OLz9@9P4~~;zk4~ zKZgec=e%#w4lhP@{PlmL0Q{b2?N#<6$2U%XlDt3I@Twm6jku;GX`b%rkGOTW!CgYEe+QCL|@XrNgGy#P`F&CgAlT^j1OA)7q%rwptegf*2sl z{XPDlCl$%mP>SP-v}PH56%d)NkkTKJ&azL9MHD8+CNd?Piqt)zaJArV4*GzdRPkNP z-hb2mhe!M4Z8vp^zo_<6Cwp8%uZ`YVy4%t+x5IOz+s;7uVAe+A`8k~cn?>`RFIeg~ zy~99g&2tN|7^zU@d3vYbWm3(+yl)AWjpGGggvkQg`T#`zRujw2Ft9i#7Kjy(Px&jjA7rV%_K(N`gZnz9_?e? z=7)jCZFj8F&PSg6=eIJ;knKIw#f@(6?#OD)ZLEnds;JXRdj&+l*3a_E@jDs*Fw?+E zO=_Aj%1LZG>s8U^L?!ng>$csWX6sCH)H2~Yw$9zF?bE@`i0Bf%KFj>Gh$1PIgnO2h zJm0Pk_+#tVBF!FE5rd38{dpXK2TBAMT$Fi`Q2on)w2$Y$8cb67=H$$W9)=goG5O`I zB`;|-eC~4V>VFtAHGZ5orV>ATIB)UmU1 z(?{T`*!PS3*D9YdJCPRCnaC3hNXJrq+4mb zs3X`TK-GGg6L$2#azAId?XK$Z6c?pNS=FE5_&*7H=c-X zG4&u&rzCr;iQ8oI6n|kOb$)7z8(Sq25JO0Hm(Vtw*3m$-5Z_kzUw&kd04@61l&m-a zN$0BJ?BwAFV17!sm`m4o`F< zO*Abvjpwlf=p8<{V4X)Dj7om6=;(j35DsVnw#xG@yd2DGPT;)WbphD&N$)7YtEp0` z4iwBBM+VDr$e^Yr;}UHzUqG7WOr;-bIbZy1{e*Ri@}yYUK<&&i4(i=NE=hZt@m4Lx zmaP>X?42V2P+_2=xF3?tTPLhh=3zls0unB$5U~jB<<`{>TLp2yQ1}S$ZMIA$Ew30) zw|FpW6j(2g@5z3qanz-{(Wq?WB`D;V?0j!gN!2?0s+SNh$}-|v{~b2W+iLkpbxgn8 zqNcjizr?|{YYTQmNxA~C-lMRV^ndW+oi;i+Th^a;#GQniQ2(JRP)cujZgHId#vE|` zGM}<3cn-WZxM%|KIwDq*_Oe5QWLnQPsU>=?S?Q*CnWQG6b+G?iE324Ez!a%8nwD$9 zGk*!gJ+CgY0t89Tg8+43^~MpvnnjvBmH_$sk4r`W;}(qHFn;o-cCCcLmzAB8KaOukH|IOT2E^m*hAzi4&QA9m)+c7sYuzs(dn4 zWgQ$iik2}M$#u@Afi=EG)o`mc*W-iwn8SN_-o?o3T1W7EZKCVo(cUh>et#=7 z@+=dOF35SW{yhg|+ASW&^|b#K2C&P}vH>*2iF1GJ((t*+>YJFFgP!%{x=ynh`s43|$eQaP$)FcBPQ~*5ieue!s}%c-y(wur z$21WrPvY~KeGX+OfN3!&?Zv-Lt%w7@z1CauU^v*k1Wv|Y<(*hDbe>^EC_1iQy+e)R zOa!Mt4}>r5x4e<1T4$T&$XXS%+gGDQx>twZZeSu?u}Xxx_GGm6(j!j9D`0||pv8T{ zP>j9OT^JYe5M7*jta+E!?Q|?Yd&c!2)RFmnD6}6GHQ7%<6E`M0&r__VO~3~a(-`w8 z2UL5&%T*@x#AI9`ZG2ltk6@S;^=>{O(-HMOJ2*&=y2r`Ri%p1AsXkl6gF=bN=S)82 z-?~Q`%sqbze)G$Jw${(3DuZ^yd<{QfwEFT`5#y_S7uK_HGo7ENm@@;6ei4Q0t4X}; zYR+9AnAD3}#jO^8;eWFcDKawfU?%N!48A3E?PV^@^LqL9?40k`FKmdf(tSbnvv>h6 zCo&`M*|Y&}*m~W)z0$&XuDcFx15l?wsaN2nkyvo2>(yWlOz1(Gp8wvHzX7oTc8&I( zx&yr>lQj9GY)y8g9Fnjk7Ts3PVPPFhq1UJf(}x{3g&wHbz=SDyAEh8#cs+zlKB&6w z1r+U&z0u>lm|@u?9VK3@`#r!5VN{XG&dj#@F@+PVzlEpKPB!oVWN8(_sTuGWH<5yW z!J;k=hZ<(LyKK9&zU*@Bt@!{v#s9uODdUaPfs>*2-ox~pOw9|C_nR>>$n?z8=1jZguDz4xfmYwTu{Ch=TX*9GSU#7U9z)^h@01&!7n{?p52Z!vjdj)4T%7=X@(qb2xzJK^uZqjbo6|-|3YMr_p(XB!k||= z`1QKl<9X>GlCtni(DJc7(F`hk8{ygSVdy|W4Jfs2MZQ8RV z>~@0C+7M`KbRyuz>z7V!cHBP|M4b4RQqQ}jy6L)d3~Eo_N1tt(rF9Wq+v4xk_%2S{ zu^Z-@|6#d4-5 z*@6no6XD=%=g%=qTY8yYy}NEsCGfEf(x_~M8!=};rQKN}bFg#hxU=m$^Q607dIy~l zy89_rz>y8C=9Dz0I+3^cZKxhB$Y%4~{qd=D3go{w=s!f||MT`L>y7(x>f^z$yEFA~ zjHdx!E~cCLm3R_vGrNzq$WL%?WtyYTdpZNU06|O9y#_&X7f46 z=?>|cfeol7!>B@kxQM>jG*^s3QsWTOqo=HKEr=4Fj8xyL?2q?uMwF=CfKP4jOp}}_ zM0$72f)eEjqn^^hq>i+E2+$`fjJKznkbyT@bjwHWmzTtiGQyG9?g+-$6kB>QdHY(*)*TEpV~MZm=WVB^ zhT+={daL;~TN#CCqA;I?w-rC(X-??Fml_RQQZr!l`wg~q@~BwRU0)@>$}TqOZ0AFF zv)(SV8o_V=al@_u4?#456^W;=OvtO-tVVX6>V3yjtBA=x01P)dor7E9LrW~9K%cex z;qWDxm$X4wh6a{Xy9{BJw&o4+*303F@Kp6mUu}sN@XX$^o|% zmcjbUw^szA7@N8TT>AjP$%82%^1Ec$%hJI!Lbbz9g=Y(&@f{Tr)7G}c^H2yNscUDJ z34fzhOUA-;67lrGlDd+>Tco(Du4gU16aA{rcNVFZ3G!w@R`ZP{s4pRsH~iqH8_Y?Y77DY@kv(mTiR~?Z(tF|G zgY?M}16R=NO3dky>q=5 zE)iV1rv6A|rd@b5h4RoWnAiA9c}|)OUkhlhlsR()N7?fSj_aM==_acviIP|)vbGAu zU7TF%$61A>bqdC;o4~Cz2k*w_7jUs0M;h6@u*JsED2R|WZEnQDvOKT42ZPJ7X`N{h zk1v5Ib%Oo83#myzJ--iqi%R^_9BnHKi~e~rB%9umK&6f^z`&ILu34Y2aPeKcUJfCX zv_Z&q=u#jV0H%b4r?oag>q}0W1P3+oYWtR=$S%4BbVD`?#ZKNDPzi{oRESR43^utk z#o1l6XNByn>oe}m!BN1B!K>^fCTa!?hx>LIM&fdOrD)wXpBAA--J)z2{vFJM_88w0HdeS1OhGx+&3wAzqhF(k<-=ImW@rG3P@ym zP|}v7YCG6`3G^5c<8>)+JnvaJ`@@oAT{ntzfFuojCGX?RP8Vj^8MWf;)T{qJT89qp zIe7z0{z3R@Q9J?`Bb#-|pp~EA^(Q7?uoiDbtVD1D`T>*a<}!CV4A7iAr$WfJh&2V7 zI6`wxK>$v8SBz`5vZ>sSh%h!Ns9dYi!`~^42kM$GVi#lAn?DU^N!|F*GJ8q8em~uk zi|d=$r{Y;o$0|horUJpejT;@m2H0B3C>J|1x<|uE_6N%osYZZ+Iquxf{#@MzVbkK* zB^4>MWx)aDitGU@7LBmcka|el0P_5Z6eu$bkMfrldwsrUqi5M88BGZbW5p5e8kD#A{7oH9!7W+&{msG*=+R zodUnxW#f`q@JCmpdxUqR{QZSdX$tZV0$LbR{_@g(6&1&OMOg)VT9J;Ww1@PqxlIVj(U_pQ*{<}p#X#M;4% zw$qcG{=hl@4uZK%iyC5TFlmD~<|&2b%ZWYc z>-#A?Jm!Fz@=8bQ%)oDl^b8Z%x7X0AB-Xw}trKUnrWNnwWm{kT6rq0VFRGRf)%{L6 z%;?F;(yyBL{jH$EU!XpKeXx!~fyil4ALNvL-CG#pT!J9N&9B-n0An z$1e^{yN@X9+f5KeT5a^^U^X$H9~c4O!ZJt+3GL%tTl32CW#LaGEsr|Q1i33RCG{Al zySF8sG<&ZmL)j5UOKURQD1=d%fGb-BG83SR*jDB`q4}ge;zo^{otl?wvZN{cdKJK& zz@OqWZNI$ygUGA7sTeHBgQ%Yt*kD>|zaRU1e&J3F6ar^k`tE++WrQTyon>hc&m zn+>h+xr33^xRn^Bc6mqrXNtkIOdqGQ@zf;0_<6vVlv|0$ zi;lsCZLu^F01e~>XG~k6El`#qMNa{OH5C1=iB)K254SxNLk%TU;xxYdx3R<8s ztIu$^VN{wLHJ##`%b#n`xs%NOwxw$sg{*SCWhY785)gQEkrLXp99b8HvwLhD5@nx0mL9uPgjQ%BvKo^WKHErIT*)M zNh7n2ro|u=s*QhSF@9lN?*LU8)e@8&@vHW%mRWYCAu(VyiR>) zHO+hQROjogxejhpn{%Mhu3rTD$TZF&%!~m($2$8=mvTl)NpiWg$xII*->jW72$}Fd z`HA!Q?-YJZ2JVW5w%)uNyzsy%u5Tj#;ZX+Vw?i+L`e6aatDG85lg|aAuP2r>vtESX z+U!}zejZ2w?*=!Su52>fZ`pxx8JNk`jWogqWUAkmDN;8x_I5#VY@+_DyTcw?zLqz} z>_~-8KcIf*C{z%VZQt2THH%c4$e`>gfHp#Am!xAX`XhSW@$1I8JAeDv#|CiS{%o&Y zfxt=zhqLF!yUSKq%109cY5_Ak1!D|ap-GJRTTxY{X8#xgO9<8-0ete|Ln;S!Ksrb}ihRmD{&p>W{iq&F*Y!wOX&NtgJ9d z!~j+PZ!xFCK1)FUFikDSKd>R==_qRx2EE&!`U3NX6C zX?wEh`g$r$iOA7^3Awaxpqc85}+6HU&{XgElhsZ6)xES z!xTAWk9K`u>)DlNXC(~FY#b5P8eznRnYyGc!nwOV7 zht|x?>6*YVFf0kQqIdnUp_wjkgt~NO!lM1@AUd0_Sl_n%NeVF^niFMk(yQ${rZ`e`Np*v^*zd-K!$A6 z(WAzt_#*}_KRPp`zoNRTBRaY$81+N|;5^{r`-oXyLu>Gwe$uIq7@pOCy1hWJG5BWk zmMV+~G~2t<6Qa;eK34jKt`bnHeP<;;J$LO`F3GQV(LahU@IoK!7mIyI{(d)k9+nr6 zGJi75TuZ9Pra{BQ`IH25#(G1XzOW*0YFQC)JuPj$UMqK+Om(&H+0cJKe`i4w zJjtA>vAw}mUJOenv;bswps$Vx3hTNW;@PIn*n&fL1wG+n2uihE@wmN z#e-Bh6?1Nzw9vx0jjN#)NoMJmk@%Sip#`f^DP!-OI)rk?b>7dHw_NHY6{@!Nr1H2R zz-LP&O1Z5n>%9SVaOtjDyGZoOxQrjY`d`l}mb!2&V);J7Xbx?@>OE?vXQZ+87GX$e z&Pr-8YszH3Ez;>W&%W~a>h*Lu-1?M3<(O*+HkoQIA{hB3avXQtiU-caGuKuH~(h=6L@c^>UY+k6?)O@?)VDdFb;I4lA z*{|@#RGqr|?;A|rbs$()&=`&OrZgPP&{#ADNPHibHd8qkc5sy3s78YaeKG(SjAbxN z(WrKS>jbK*%w?(Rata8;q^9=j=|2OlOUMuJMZG6C8=|^)QzTNhP}9PrxJ+@Yq|`9?mqDkn2Pv8( zoLJ>CNngLs<53Eej92y<(Rv8aiQ7WH{q}N~Tjy8x#ZFFt+sW)tAGGl5Hi{%*#^=2l zY}A=06XSso1h_hAR@hY8=e`%6W%7ajQE8BaaIh;2Sv|RSCuCUAMSi{8pkw=vpRzC+ zc9=>G(h2P{_b7WH63Lx(&sQFFj-uO%WvY*TISN;Jma#PR2lw8gw#D`qn0g`5DBwtjlA{)-{U z>4KPU-_Uypman=~^7SjLeN2aw90#naHumq@+kXWX-fTB`-RyQ53htT0_W*3*_Wu)I zv67zsUT|@HZ$lX0znmBxQq?IK2q9rm9d~NL)6z+j1}mB{`@W48nXj)$F!ErOJz*Ms zz(p9k{@EcIm+-IU1>Bs9-##Kfz2y2Psz=7d+tav*7wl&*4ZlrohXjqV5?{S$p&#b-%3@1&8E?e1pdVG`g#8N)?GN1X+{9#1uPd@`25z3&a=t!iB;2ds_tn|LsNyy^ zw`;w-X+B90`yZ}lFv^u5STuTL{Y#dnUT(6kd#Zo|;`b4v;apTr?1EKXa$Oby%W@v^ z;(Qu@I+MhhS`*zMW&)6m(_w=;$Gt|}2hTck^y}v;t1}I^a|I3XAO0P#go7ij`OPk6 zRLzglmgCDV_i1Fc+ZVb*+#h~OsjYb$cd^xD;sd;^CpLuk=nns=|1DWCpzVQeUGwm7 zYp+^S#Le>)BrdOeIk9O4rzLl2n6p+9jhUjS5-@9lnL)q)Q6RF-9Jmr6+Dz5(;jqHjP*-O}WSy0S?LJiK zu17~1^r)Y5Pl|?f5B`DJcYNQm=c7&neG4PNz;^@}qqX3SD04a3zdO0kn2aMgSG#pu z?w=Gn*V-K^!Ejj`q z;@`B{;4tA^v2X6Jua7hA*#kp-vVC7mdrG=y&JFg^$O<^*oInNF^3@=P3)jv8`q~3O zuk5tUSZEHKuG9@s(;;ZEf`=$KQClh2=LCP|Rqvf;xr~A8Y#e-6N8SN=XL>%83~<({FhSfDZqjV~mVYZ{*E{ zDdX?FE}u>GhnJUEvyJ~-mCdC@oe}=uBb3l~Y{&r;l^AxG6{7B5H%4xE5>)$qGtw>V zm_e@FWuc2>_+gM!`ZppGmXp_G+P1!eioNbW$h!Q?x#wk=28>B&KA%RD+WGg#_Mdm3_#2ER^6AL={Qyee{KtM>VhHc&Bs&E? zUU=cW&&EXRw5F{lF@t+Ef>t@Ta+}-9UGLafrW9ibAFOv=wD7Tg>0$=W`C7kQl@sUQ_s2Rqa1)o@P8TX>(BjE1*H~_r1siXSoY}Na= zq#YQ^+ReK{3$v+PD73PvoA(DeL|DMZvZ7z&)@h)5Hrh(-0{F|8Uk7Ltc(!1Jd$}`& z^CbPkRchcU*1IEzXKZM?2m()12z}<2xUX{m5Y4}wQfjBwb#7a}@&rdIC2UD3b7vo8D>ih7WOScM5!4q$7Kvz+o9GMC3`}bt$ zV)Ww;^TByS6@##Ft-+Wvy;;LGCtM<(baW}REE3@M88kCwLi^PeUlnk{NP z+wdE>Z?slot<8rQ&M~Z{(z58^chZ`bH17N&3H5q%xlPRoN*u%3dZzhlob%oW=fJ^} zJ3`Q@nXx&Mo$jMe4Qh1IfY25=0im@B&1Qjn*5(m=j^c?cORo6~di-2Ha(+qPyS>A{ z)D)-lPPg!d{mTK-i3pm~rq{Z_1g7$vlm)4@Dc?6XoPWU>Y^C|WK*{^1PVao6gLZFY zmIZRs1el#%n+Ekjh`m(VYDf8_?LGO&L~ZIPIybDkgAk>sg0!CU>`z0!OC zhhh1@zXi-v%sKsW%Qei!l$lwd1x~;>S{!m5;DsG0IO{>VR|^N&Z@|uRdmo4Myw+;1 zkq4l4yu32Q_bT%WOZInDi~*f_45%t9dTya40!2!dxZ`

nu7yOZN;J;QaJ%Dw!?pQ}1oQnngycuW%ay{kT#Twk|v`w}aPUhJb1 zb~6MS%+er}W>;tZ$GbnCk!1~R?DtM_4NWuv@g*;HB(fwEGMNsYb67=2%lw{!QVkkV z{MX(cA}col(Qi<8KWG5sqovB&|4LD;zhkhG)LZXBpubJf^Pyksow>6x#EvG}Yk~XQ zNEasoyqL9A$;3u33fL8EW~tw3Q3i>5ng6B38H2QSh+ml#U;ziKyZx3yTLeT$iAD^&3hoFLD%90CD(msC!7JpGmH!-Trz2R$imuG|^7mU77-1WNt;>Q5 zLoCc+bx-ljHj&^54LW4k;iSrRvh5are+S2r6vWPd``L7x7`)9ZlX#5BV&k~SO9Iq*x=@v&12rF`UmMGX$)g_Y$29B9xf zQQ+pVE5&SViba0DoE;+3^DUe=lyf2%7PWPH1~TU}ogF4V`fBVYD^nGAU)1}XLj%JG zs|XVmGI;2q$GR}$M{aBOPZ;0K7;+lVD}|hX5N3moyLe5;(GDICl=$1}6q!Qo8} zIO|C-&tw_>XXnjtZ01w-!RslVjY_j-uW9{_lC3ptW)!P(tJ<8m$fFwM9vrD$*clpL(pKd`Rf@25%K znJ$9W%_3imIFRKEwQE#aS8oXzs@>RT&ukS`Wg8CHEc{1;ErQBpYUJ{AlRI`j;3;0F# zVrkQxV3r$6o?VgjmrzqKGUt;@759uGjWZ?PmPgvQvR2IiG>Q)!M9CM*Y~tFA2hhH*Ir@~%+hyG z0=kY|StZUaFb_y5Po2`9{II(?4CpF!+wKs($V(n}i64CivVcs{ zlA5(Vc{lnmjd9Mb)~}iwOpxJCri|=QO{hVI&?U$a8@}8=#|jS9@ce7a#{U}DPrq-za6?h zVQF_HG9j#^DbnoceS`WogbFh~&#g#EhbnV^>~H@&5kn{mua?}c20ETuazGBp6m87K zPhWqWFKmHBidLV`*8tt^%FPHv$tebTE!0tEv~2U&>0m93~a9pTd3!AJo@v+n+9&H~ISglH{hCE=TG95p2s*wTJ3Co_tv9=j9H&2!e{n}L zv+2Emdv;&-Qi%fa6dua-H?}xL!3U@^G1sDUG#EvD@)-G%*cF8{mt}DR zcD;+zEmDP)G0Wv^>e$Q({qJKR{Zk_LdrgELJ%(q2XcU(w^sy zPC-_Fe3dJBSp9b6^wLT!QOmE%Y%PxVd}>gttL9tEw+}7y@vEAD&yKHzuZ-2$dPIzR z9P}tHZJMBKT_YeBipWo|?nN{^EzgG~;Y@=FI1o@w0+Nb{=slgiyL*@o6}uj`+_nO; zO2%=B332hqtXSKiO~DzEVmu}Gqj~5UKjbq9e-u==uxTB;4#&v6LzaFVc$xLvq1|-$ zR}-Tdu>AdRR+o&8oR#=^g=n*8(k*nVgb9DtMNQia9q|OzGJc+yo7pU+o*r1li?@@A zneH>pZmFCb^;Se7qaW9Qq3C_jI@*3rJki><=emg(;}YVa6~26zhcqNKd*@<$>cy!7 z8Rt4XGJcz;3OyCA4T5n3u)f{@$geCef!voX-+0RNIsH;&Grr#QeNKq;n1O@ zpJc6O*(O?^$yBdShw@2ZO$hWaBrb4~XIDZp3xx?66Z&!lz3{ThdGOr(rOP6#-S&7w zZ;GrFG;hB2F*2iCC?=d)T$t$DETLDonZ2_(7QNPjii6*O=Tfe{^t!@)$>{0m^=(Ep zJi2YW^l@8AqIK5WAo`82DezM6LF+Dz{63YOnfs7x_dYQUk-;>EA)|$e2-i_?ay6&X z5AyArENZ6Bmxw-egtye^SDM)7-n6)9R)lm&Juk>SjtVf~JHu|*_3<`j>3%XOTV)$TW0gZJAX>EY=ks$JC3|yPJwSvAB zwextVI%atj`FLp^a1u@;q*RKUo>`uXoihub1Zvmy!MnHDl6#Rp?^lKDQ{c`s;+@LZ zLE@@Pripn&%MC_ZRbf}A-YBL=$13oc0ydExD{Fm@d= zS7zkApvK`OdXL%M!afg-f^T1pj{~Y|79M5NkX+dBxUnBc18UaWJYi$;<&QC-<7Hh; zp#SREB!=+32!K{W;MJ+%hs)jOlj};GQkIY!vYU`boQhip!IuoP0{Z-i6_=A(MfS~D(i)m#At%0hr(OH!JxGgruoJ(!` z?NIE+g!pjqLP4Swy^_z!aU)jU+vcoi9;`Kb`Jvfiz{w%!PE=)8GqY#SI(9lZ9mun` zz0zT>B3Ew}rZ<793bB1nnUvDYjk|Z`(_v(jg|C=4dvJfpu5(+KuiAa{{SFCSsh6iAB)kf#9oNYk^H>7oFU@(%NWC4F!BN zT+5BIrxDIdWOF?{vq4HIDk~}m&Pb~0U*@j=VOAYFd{sHF`3DAZl}ZS;IUa&+#I=XH~AwZ zBg1dLQq_(pk!`bZMiYojMCaL+2t+Q(4DQv9vr@O|f(Ks>D+wvXH8icNG9Pcmf#zY> zkH{(005GAczXCX;1v1d9Z0V?|q*j9pVP2`zQf>@< z{n^n>&(|j}?fS^Rck&zcIao}a%?Tr=>iD#-So0H$&YL6}ZdjTQy<3j&6WW?{N|zHT zx8JyZCHEh|ynxD9u>&Qk2)GM*zSem2Y!{=H`-7rC-&D8%7dv6++=AHMC+JC2$Y13$}O-x)TaiTSD zF~(hCrcE_*otVZAl`2uAh{k;dr%e-+xKLx<7p9F%jUYi3R7C0qai!4+3Mf%rf(Y(_ zB6#l1^IYdY^PCswoHzehzG1m8FM-efyMN2~`&rCR7^@7@?A#2ZEM{khhen9Wh9(Ak z-7I@^J|XKeC3gLyugCjJEtE4O--eABU0LWzc=8$xI0Sh4{s`3k(2Zx6EwRDyj0zwC ze77R^gn0i0rTHwiB#Q-dZRt99P8%^xh#2phqh{^RPR_OX;~@1wH*$F7SwV~ks{`L) zSUq1MbiCr_U;4PRvi4tAfy^J>Nzj?c?L%;#v95$er*%WsS36d!IG`&nKZ#X^E`Ybj z@rUtXMU#mH!j#p}=DDf?C07(bL`|Bjb~#ZsM@hR!j-Bo`qt=d=n_O}`Bkp%N{cP3I zAE5UDAKJ1XpZh~Y6l-35fuJ)s5tpc`DaN#`=!|Te`tVV_*y2bnd0AkqBx*#UV<2GPz9UuTi5XP4vmSG8xS}zTb zjcwR%X=(y|qokPkPdSF1&75i6=~iZ{9blwreTNmTV4EMQ{YD;3IJ}ip4meM{$K>wb zW*w=CqbHd_LLwy>unTkstIiDrR`JfjAGjyb7Ow!_)Sx1KAJpEy(zbt6p1kbCzf#tK z>a3y}C=$X;JTGHm&eMd)gYk0>zj`D#L<)r-EX*GC6a$MYj@vp6<)^02n+FS5XeMn~ zxP&#kXnSDTg9I3eh~*)A(iw6$pfAZKxSt2o5Rwmn%w4>=5^$u}aw6?WZCAe!Ff#<` z*s61BO+Y?0$xwM-dZhLhW%Nkx+IH??IaoRH)0*ls$}LSkJug*7vVuo!27(u&ZJyv3?Fk~m3mMN)Mcc$;7@3#fT4Dg z;ltYMsLrh)TLTca{q35imj2(Of3K63!oy20Wt&MFx+e%q*ZI2ky+sKM0Sb{8Yn6A) z_jfWf%{qq{Hsl$IriCdeOoX0PsYdbT1x%gnl~U!j0Q#FES2+MJ#UJ|YV~*5Z4uFRy ziHS-MwxFrL5Q7)(G0Jm*RDIU>SMY(ol}w&1ceh3h_P2!&^l!I}f$HB4KOn2`+`=Wl z_PwTVEy>-9&y~5s{?BmW$+f1{=;bFSHzdHgPfwyL-{*PH$bRJ?I3KBLbR4NIVBk09 zJ2}MlUcrV-+|*qVdh#H5NHO|z0+RpcpzcVGtompcf=~GUAVsExHi$Nw$a9MMdE?d9 z{AJ9E@`dF6WKnCc54ad@S(O1~m4gkVs^mW1DQpq?HU6d*z8V%Bv%w^b_A-;W3MbGW zngL!86N~IxQ}7?tAB>X3Ok==kH}wuy(4Sl%iP}x<0=Zqo1velk8z{-OF}`z!aG}TM z#?I{aT(Eb`s6=e7<>WSyiB)R0o-K^-x2-_u42TW#v%a^{bPl;`0C-*Gt2$~ZfGr7r zNdd%7QEiXZkYXVCL@hN}p~~q2uD1*RNbLemIR#wXmnwkDp3Ti=1gHjzMPNW%`gH6t z_)cWlmcTQOSYvjdDE6lEex=(s7$p&>HjyOlw$%eyD}U%-jQfX)2Y*w3FijuQPLpMTcJ9g1E<) zH6(AO)t1i$B(^xPt~NaR+VhQW4ztC{qoV;H`_pbAGScI~uScID{CwBwk6@5RQ&JSE zJ2R%8drg^JGe6m#n+n`6uw89x0>YvkU7x*b=?&&BRH_Dk0)k+0!0mhxvB_4+L%G0B zu)UBgXCOZV{E|c2fGihRuyzg@a?iWURWH&jfXRAL4%`d^h3AnP zZU>k=w~7ydS>d0Q`+Ih|%8Mg_pjy_IyETVaX#)YnNv_e0kC(=I!M@4MR^NOVrC0?F ztsOSN+=$6JGo_xevb1$_?%A?0*YIZ#cjf7p?>m)$QDN9Xa4K&lA#+D1)9Hsl^Q2f*-R}=BY`}%*$DnP_$LLsV5xtHY;s8?^0>u4IGDooR8M=Sa2L{$xK~TV_`u!`b%!awR!ER7 z=K1vlCAt?}-s%^<(^EE2*o~ml$gw~ew-7`=^l@r}7NfJthxBzzXns(CW|oS)Het_p z)FeYLA5?TIihM`1J$S31qElh0K#7jJ+Ob@- zW!H~iUYH$AnOdjcn9RKty24mqMb{5*ht$QDO6lge8S)E=oL+uH`KB>^2>lRTbfl)a zW1Exrj}*J6s$}IgF@IpaZfOJ)wLKQc)rMtjN57!Y2RCU5bmM~hF}`NLFW5_Nbu`++ z^cPgO{nkC^8(xr|3*WXprhoZyJ|fLp=4Au)T+3^cw|4xH?aXS6Xx?nkiN?@MZdog$ zZn7)AbaS){WwD4X(M%u&q3=wVVj4{Et}y@cZ@WP~_u6~vA9M5q68f^O9&GvaEkF)) zJLVG&5R49&rUHpI^-ZmL+-`P{mpCf**NLm8rb%y_Z}^oqi9DP^y45S$=}(8o?&(q| z7-fBk#1-yxuUyw$LJ;DE{>xM_f z+;*k|$cF>ZFJ^4Q@nr9D6JwT{)`IxGg)o-uv-b_s&y+qE~=UuCt~rLPiZ{voG#-I3MbxkoY*E z5SrSZ6|n08GOT^8$cxs?`=E!DKsvKIN*+hsIUeAPSXfmHBwh*`e;Cx)J5a8kvDNCq zDN}l0^L$7N_%FNF4|dxRLK)|01hH3jYU_3o)!>G_mZ`UqaZ<;0Glhj9JUp0If&wPF z=CiC;z`+Zk_K-x`t3s1SW7gk6vUhc99N1XxZV{NhBFy)o3v z{fqu;NYLJqS{`!R%bYU>G{!`Qx0-j)`M2o!N)pqREu{lj_cQ8BcMnK-pVOsgI{s}9 zcPFW@cP!r7M`u2-meiRDZf!M_M`dHhAR&WTAj9=O#sb!|PHigtsR`+Fdo4z{dz<;7 zE#NH+RY3}>_d;5m!ryHSRgcvn%tPk4B?gu6#(`+W^39aV`LqqY{w5po&!^}9ij?sc_}+B9)vKh+MGkx8-1eOY9sGCQU+op>tkw||>r-oHGbHxB&){9z7**125P`Hkcs z;_)WdE6R-06b@w&NEEX=(X+ATx!h!()sL5My&A4*U0&kQf8$1Z+?K6o9r!<`OhRig zK~%Fw%PZVhD+-uFv2N%g*L<>gYV>}xmTp0?yN5q}_+jzF18ny5$*|XwAkw0>53`sI zeSF_z5NH7IZbO1ZHkuG7Y3=9z1o`Uzo&YyqwDDrM3S;J^_+0zSdGgvL&l4}Lx57>7 zS2?MmRm+h3CIGwKgJJk}F->{VH)yhV&27Z>%KrLpKyQn8Q{u`fH)pQVXt+lKiNVYy zShE*~V0+vr;vSQxhlHI@>KTdZ45A#}ka?;mm96bng>*oGVldD3mj32$yvV~mNJtBT zRwj&=BZulUUq3M9r&b0UVR)(#j;d zYv*J~jljbn3+Yzyi3;taI!I5KEBYLR-VIHzssQxTCD+@B+8}ct-{3%+T9%^)3;SX4 zL|K>#V26#FD=I;NDkraEkv7(v8$r+k=+G1;b9^rP(hj68b-C;i13sCCDCvfYcVJ%ABJ|RV6ER1WX;nI zmRh88>2pOpmg%RA+B2J;|9V5YKN4@BA&RNVxPUTXYY=xJS(%CcDMI`02|&zOC#`GN z;!>Y;=UVQ+y;mI@(M~U;3GUFuZ9b$5HoiO%n|;B67Q6DkKGTvr&g^T9F5$ZJ%X|wgaMNJRZU43Ysh=qvk^P>(p#U6t#qJZXVG9aT2s&gQB#;+VHB`!Fv&frI^ zCeB#-+R2eNd+rviZHpdTnle_IhLdU@fxPVZnX{C zNr~?$3WJ5rWL=Az1RK<8op1_!9Teg68+P?iEuhedoLva z$uB%`L1IR<(cXM0WPrliT z8pv82dRnp&d#8RJ5hV4F3jZSA89`^dN+~_*LtOyHJd6nOht1m78k@YUNMyTis(n=2 ztF2P+;+GtfAMRT(L)FI@Q5M20iCx+{3og}KX3$r_S;x%a@2{q3I+`gVapHvAAzfl> zIe`kO_RkV@U<>r+7IqTht@e2C_>tNv;p(~W-6ORrdXy?#f63EV^VI=`j zy1pued);Qgg+P|8IHe4vK!|RD^kygK*GN9Sh5kqVP>biaA+nY}tm#HU*lfGX1z9E1 zvcnw${=WCD6JOp-By7ICM_6ABUswzx7v?|CFO0WK+#(Rx>1X0#GsI86`s{(}S^E2W z$4!sfq^0qvv&3Nbc(ykV?ls1VO05K>Og64j(by5Fe@8>avqw*^#c1kZxZrZ({DEQK zf76pb`i%A$3}6IYEdd`ocDHr3%6h(udPj@BU2Zb=(4qn|IGIMx zH%v@A6j~jrIUpigdmLZznYs8|OGa#L=ro6>FpzlhfpgJ4c_UXhY%X2*Pnb0pu~uk|b!?p3U3L&|(W-kA zHX1wihY6Zav9apEdS=!RaR;^(+!dkBmoB7@E9}D|>2dQNNlzb)1^`LBH%oy;wcb)xbGD%8 z4#Ng;RswjB6&0Hmm^+L%=JG<|67?%#Q{M#!EUNB(jZSGCFJ>Dv#>rZgC z{-%$L@*Wwr+xUA@;Yt{&XrfIlR0iLbb~&WoTi7$oD~IGJ+&}s4e%Agz=eybO#8x4= z^2J5$Eklc%P@N5Y)p1o4zXMh!mNV1BM;7(?c+z%?gWg+u(%Mf zfEyUY``Nzbz6nov0`X9b_Q~Q~DE>Rp22f=#4p~GB-qhy5>3PCi{?i)eMBYY5tJ}A* zgDIm>3%a^RZR6<~)E-4XNr_rFyYW?0Va)BaG+I_#UUnR;M6B)`KD-_UHbh_y?xKI0 z_w<^4yn`qpc${qc#o3{$6!K_5?l3s=}Wod~QdI%AH9x!S`?T`Dmh zu@i5@ToN?kU6EUXwMN3ae8^!K%07p_`sJc>d0XrI7gLEiJ2&Hc22DCCsOI>A= zUIjW*75{dw$&D5PYgsov{_bYmmF;9Iswj0VuwQC%z}bJazqi=Jh|dQ9u2>p<6^SD( zE#n}t#!F037FXT?8w@E{L2Fvx}ztWyt^D2oE~Pd_y~L%6s3atrYl-O||D;t_;U zm|qyd3yyPRIJ6a%ItRGNWYrNT~{q=v3{tq|BwpHSJBCL`3-6FzPH%E^eH5G(G!>vxhTA7Tl z$F=vX^HDa$lE>wRY5o#Ot=YN$+wb{ZQm({CY=2gk4dEJyPK&a_hyWLus0B>gfH#V! zwKdDwHcw#e6v>DSim)gGY>QkYBC-G?SOjdSnmJZJh<1{z#8mcodrZT*ZH>?NrIO4V zt%@LGI*1v(E&~eF!f6+#rTvvF_qJ{Q8W3wX+tinwg=|yLlF^QmKSA9`$YbnNqUgAV zxBEQ#y(~=Iz%uW<$|nn9VB zvDx~Es4qCtI{|e>EyLoN!z>{SapuvF-%?^*YlC!}3{bnx`OgN<|Gtv{udg(&?ti>M%Qx^B8MSPi z{M^m%5JnrUSgP|&5_>$hscxptvEJOsgG?kCpAXvH$>jHLF3 zR!|AzPiMBZwqmxl>OvL)UWhR{L>^yVYbfc7VHqg0d!gd1Z=7%#f zt8~e;YX?Z5ZXJ6vFV!pM1y$4LbR!qAWwWp5vKhvXBt~*V4(Cm4Xpd4elmzPKr#)Xm zekdO2P59_0cg`+hhDntKARn!2>Coost7u6A-Dt`1AH4D5wK*Ubjo!+~cE~yE(YfDm zk9GQmJuC8mPO#-zTHnw5RvB}B!)jnk%A7WHzP0-x>-@NXg#xW%H4q=Y)nxo9UO0Xl ziEE2&M`mX+e`keYe%2QdD@px|(0TA3|06Z^ojv+>987fu)gB|Y8srbV`_H{1kMx*r ztpB>0-BME8C$~$#E5dRgyx@1CvV916E0(gO)iH5EB*<^`JVMKE_d@y|?e*%e#)TXNEEk`m6V?4=qQpodmg?H3G&>++ z&ds?J#{Ee^YW713Y3jy6=TMnRAnWRO7)PWJ^F0fL6xkQgj?p&~Fkdjxb!Dv>Xw`YP z<$j;qiReVoQ%QWb+2yxX_gUU#&}V~PXakeqG6RfC6`4{=SYKCE71Ad%e!yZd8wUGfvr8nBHwzKb6-#n)y+1;0i|mjXV_T6AgaoE+j}1cRY;?L5D65h=q0F0< z(=BW&(H9FA7PJKGGHT3kDAV5Sic9{@^qeoZFAPysoSESj{bY)$Kv%jNKg3O6Uw8hT zd8Ut2m^Ob6ev;4yNp8Z|w}{Idi7UN#sDF5SA^IoD{3lZe*7sAVSiq`k zL-RRHA#`^5w&?ORB>WKU$@*zY4+auO84{6^ zsgYx+XgSFSW7}|K;mO=&>`YX^i>y_X3%q7L}v1e@TF>f!7BLY zRg~%{T7aU6%#p^k(PCw3V+s{68-H_60kX`ZYYc=oAn<_|2$a_EMbm$~!iDo>`MgQ~`yl1iMUp&?o68b3#At!`w+G>1ATM$wjiQtGKV~|OWz@twc(fZYGK)DGJlaZH{elkYJa8VQ)&_oOZ zZp91MpkeeKT8ZY}Dvm9-<@E|f@L+*l0>RNhLPRTfP(|l5jjX_iKL~9P^M19Xm{+r} zSgl{_rkGiITBv)^dR(r*3i0;u<(UQrsHd9+`6p)G*b%JZo48&fR%x&yUXS{hPgv>a zEb_BGu3h^EmVSD8>w-%vFMRxIbkooidZGOs=&_XPT*jTSWC_S5k#!Lv&c=ovG-Lw> zI*BDzGecbESLt6Kew#BpILqtk2bvy3b$@HDZ`e)VcjJ>fQ4@d|X&pE2)AVAWZqe*f z*W2D0rv&0OkhwPy4AfJ@8&|}OJo3l940{x3p>fm|{Lw}a7jVlM8t|8Zpp?e0s~jM_ zd{H)^?3e3jeFrV`{p&7!5jkeHB3R!4cp0xV4abE(NNU%&?MtgZM1Kr37pKlxf{`E8-=xg{>qkGTw#fQCm#8qn7I|=XrM1LBOG`gu8ZhE>@{Z=~ zrxu3SU|t=m4d9=F4Re~1lo3I7{F|*2$fwmc0aS;&78Cn`2fucqpyBstyVt*jQrU+^ z)0L119Dq8=8LOV&9UP%TQ!M6X|KwktuRWM!gmYlb{K5^|U)GsaQh+cxwV-KGf=E(3 z>2mnxp?PP_Ammtj z56RhAe+Rgp)`D)!Ai8}^K!~hW9+!a0||ghFFr2QG=c56ThS3M#18fbZGJZwPU0tf-091hoNoW7 z#ai-yNfQ02x9=_&<)rU_oa~(0=g?%dg|3S9k$jUDYC#~N2a?)aJq$PiU&x5ixi}N7 z_^iHJ52=@uOAYqFg)7;ARn$}iGE0_XHGrgOd7poZ2qpn=jSI>x_que<^Co*B;G*MJ%5`gx#(c0I{ zgv0QhA5*+}kEGB?`m4CXL7?Ne;jr=XOq#}N0tD*Uc2GlW6^?ZF!~}AlJ%{$}J~It+ zXMtb%NoiyDzvwRqGe4Nq19_3AyfV{IbE9fgHv78dj#Pr-IuWBa{d-%V~$jM8P!8P?V zanYM$3b-kr5wgd(T47&w^6_8xa4!@}Z#R*afCxwxkOm@BT%tRw_SWa$)GtelAO(WN zLAXu#+;?0C?GPbcPlFg3mMw{w1!@THABj@Q1l(u8Xu5kNs`;g-yv-)!}y9! zeaCq5NNpnnwlK9;4H*aMxMx>Y)PkJEP3{m)v;B&o*C8cAZLP+W1w&(qzsG`B%i`C$ zrO~l^B{hK z3}5foAAta?llrz&vhT-SmlN^fau?$%^?9O>7t5APLpcfwGk08?0bu2(dVk*4r&xY4Fj@XfU|T?MED1FK`6(tKkV!F&g_Ks zGM48r?^rZ&yBww6MOoFp3g>;|-F_ORyS1jYl& z@x;v(c`~pMcyPA@{i5Xhfo4~wyd<1?X?IVBytBCAk=hCZMd!1KZ8*qoQ+-0@%(iwY z`~r%$_nHuoQJj+H?^Z4sB%V`TB8@f$;jL^RV}2phgJBBc!OSC`xGb z9%{>wlb5;&T`U%)<=JbSbh-RfVmZtptk){j|FBr&{=%Br+HWUN5o@g(5Msg7Jf?XtMSCpAWTp0{k<8;F(v(r1;|)| z$3ooxN?fv|FHB+{f{QMOf$NfTYRMOi)V(>Zq<%s%?0;R^tvCM)pp%_KdhCr(`v%x# zDmE&35A)mC-g9<7fg| z!r(79BI;EnV>8L4>>JIM4*$L$0p6-ll0hH$DGa|Z8OqjpxaE^Qv=ezFDzP`3k*fFn z>+&!AmpeyIe0v5a*!*XRxo0s!TztdDVARO;Y@!R(>bDIbGc=nzFk?PM(|``Xq{cJXJf|Q-;2%WKbv%{9e$%FZQWftuz2i9>R$`#?L-2RbJug6Ju`1!MgDu$jP*sm^7HPG%fDiHP6RnLS=vPK6J^yOs= z=t;ZHOv@9u?oke(e{Od^Ub7W!!h92zAoj`ixYG6I(qusVExyx8ZVw~-K5HW|yX7Nk zZ66P?s))?2B$*{7({?f5+0GpfqjYbyejB5z_e{an9maIb zW3n_$&J1`N-1Xn_>i>qj28Q@%jU3p~$AjA{;!J(CFXj;k=*%J9XetiDYX#k=kXju4 zX$?r%7*!rWp^GQJl7ndVTpVqz11OnJz6Eft&kye~&=xtB@H6=C2xD;D{z0n=kj1Mt z`g*Gs+&J1570ABQ5sXNqTv%~gjbC5)ks8}pKP8%#gNXrF{%uKL+0&-9iIiiV1e+DY*8jK-CPMW7E<>_WXt~v;{l2uo@n+x7&ornkn=#r^l1^ z&aBz<3^tO9^NfdH=MsQW)%U|l8DN-91mrY1`YYy{FPTg8gjiL>FX8&g>P_laCijb1 zNXlo&ze_{8rw*Ao`pnyFh}B(R4iQD^#!HA>sy_o=dHd1Y3i{!B z1-r$me+-<lioFx0Ze4ajyj1az@Yt8QnBbAHy3!e4!^xZcz!lm^Xvg46VKnpsCNVEidM#6B zS7I=WvFm-864Vk)7DODms~&7ERo7uUmdg_fr(a8B_~6>lA|2qqw1lSCrjOjQF-I z*uSI9l^N{UzS{s7`Bd8rEhM@T62O&-#Ve_RBWj zuCE^wz%5%Q3P16Ly}T(SA>R|q0eFBpmGl0^)V&t^wiEHmWwgIWRT{IuL3yU>) zR%;fd^iZ{9!LYM@)l=~8EAYeezBHBLDUf-BP0R*+xT~+Vmw5_Q4!gfBBefiM616)q^g2GGW{~7kNMSFeY7R84-#P72Gr1 z{qg6BzvJ8Pm6LXrW;gtJKjSkKF!+ihH)3erM*fT!qv%wbvsX$53$*84AqDJ03*q`# zcdp4O&}+^b3D%B@@yU$!onTk~Mj~3(jCt-GnYuARqniZzk7w{7)S9*(eAdvJJ7{5? zi(9xU47_*~5=F z+9scCiz3TrEn=ihGG{c6a;m5wqXEl1qmvEfAb|Y{>PbT0%m18{{`KyI->ITSeo>hB zJAm6b|JU8}VChQt*=>4%*LY`LQzB^!uuo3%6YzeJxY{2k7|l86dVXJ0m{zy%_jLCe zH5oLqr`-$Ccae&=Ow`u}Ogq+V@_3ft&cZ(LCRoEf16lK|J*BAj{F@}+iIWomt%c71 z<4RSGLDsb~6pNGDvd4<}d%%*m;KR4%J4~C%*M|&}%ROG$o+32tkMpk0zgZ*Sl)@d~ zWlrxWXYVwOOieypk}h07nBt0p{cUvHz}FM7A%6%+m{Ww=)NvSvV&5)Fv}&J>rZ9Yu z)Y>cAt-Zr<4jKPcHuYB0PM0UP4Ff!itwe^*5yV%WR!5G?iJ3nSh>9zxTnAn&Ut@`; zeylTWqvE}2s}X>+8}Fa$?3!-8_){TVXA*h~`3b!P-Eh%k(KLLOAWQeX2xZzQ0?pa^dkd$?bD6^qp}+SneN&nh(0bhsA^$ z{>OY%{qVG1`(6X7ofOavG+98=dFV8Nl}E` zoXg5RWF&xBe596kz$GMvkfWF%tdQg9YAiUw>2D)CX?b|0_yOf!(MOSv6M&Au!>DTE8}xoILPQt$aXEq-a4{%%d;d= zKXspOFQrzFI2eXPe*01UmDnmW*3s95y!Iqp;pkJTBoBIRsz_&9*YF0w&(5`85^Tfj6-itJS5PPQ z7l1?ozEb_NHXbKB$+o2?)-UUmUkPLDO1tvP4m4~Il;K!LQCXuY zMuR9H+sWTYt=f!1D(1zcT^S5Vgsfrg`?0=LT6fU~y&vPYm|K1F6`@(TlHn@gZ49gX zKDcxzz=YK0LdbY#6C*T4S$(u{_7Hlz{Qh;n-U@DZb%wtmE;1)FpK8>^%2cnMD0 zV~NCk$ly0tn+LCKE1e4mSEvYui}k~-Yi_lv92cu!kflT3NeQOq$U4_9T!Za__0*`A z;WsNlC2leyONKoVG&`^iKV9qnp#OmV2;O}CZDRaKL%TI!qr)cwkG=q1?|=5OMeCzN zZlPHqw*l^PYx=YOiMOki>Yr#9yzDvt`4Q+AF+TYYvuu8zJPZ%cVp2Qovh~aLf3WL8 z=>^(NZ#Fvlm!e;EJVLKKQhrVP$NS0jL|IP3CVG%&v1I|L28@o!ZN^(@)(1~dZW+8A4z&tAxbSfBLV8*qC4l;K5z6fv@V(d;&1rUqQLZPxyP76>GjKPxpm808Cv zK{o-ud%X?Y$3!oDdGlUQ{k7Cy1j#GNIAx2Cz^H#IV&&TGx)sLs?i#hp{Hw(YJzU@J zN;81}yb0>Y*PQ4sYqjnA4N?h+~lO#Z!f`^1Q%EOr5?yQ{NdpD6u39@AL z+-Zi^0r3oa{V(Jp<`3v1tJBhhiQDySpU(4Fo#3pt)Ep+w-j1SW^WlZD@ZGkpjE36y zJe+x)`5j1uBIxGpTV@8bjo>bg>yuRbzSP?m-dWlOJ-qSy|Ho5M@$@pRMVJR$933#Ioun4_07uZh0)Sxmufhk2fp4 zzJQ%|yZj^UPhVXHYMy}F3Fdp9Zrub_XZ!#|Y0adlsMI~T6*mJmP zBywIqaqIxykDPLPF=*N>0@Ypdp#P78&5HNYSYHV{)mM9#t|VGXDq(vBBL0q4L1SqVSvoNXH$6bzSrmUUk!J3p)Ukgzh^AK8AW z$3Yq9mG~4tHCoFb%xq~a!$sUQt`aIbs;Fffu=`C@a3V>8Yzzbakr;p-e8rquPu`Z$ z>6T~I)h#Gpj+vSEO=ltG_$xKb!G6zhv2uyMF0%2Ygq&%b4=*ADwii!W9auP!=MZ5Q zSoEIRq93VX6zwOR-SS5~ovObEtRNswP5r279vMA|5p+?xah=R!=${Y6g2TXhY4p_x zavB+}>{`-}ITzA5E4y8t+_V|$+RU1Jmq<%Zz7p-*L~L~Joa=vuHLS$fA?vsao$JtR z_F!#fMpg8(P+lSEe>39+aN~RSu#)y|KM-p^A*MC<`i774?C4YZe(M%vdB8BXI_MCd z-2@iFxSKuE1IEQT<9D5bfmu8k?=*^*HWbfZoaz7Vw|)X#TjQk0nK$MUixCks4=876 zI$0KUK=^0<N|vH7C2e$SeOzHU(H zE!XR0oN>83RO zjo~0W&6=G`M5IMfoLf&BV@D*>Gkms!xkluuB_4dB;qCRF}L zm&KrDUvD;Dwq*^p z)B$~ZwFjO#;KGMaVL&I_4%T z(`L=AW5X`IpTWGNf&8|V{`NWu%1ULK*2|0@bqQOq^A))5fW%`hHAd0jrxOoekQ;(| zr8ql-(ZuEzUYKJ*)1?X85Gk%A0K#eS<=YiEVk(ESeTX@v%IW8n{NGU@{xpnWe26h-nD1?S<%B?agJWS;s`=Lj zBZKFg>QGj+^M}z_mNN3px0r3V9Tc62rC3MVmjbTUb1NjM5k)Dx=j~=-G2?GYJ?U8a z?QRY7Vz8q5T)+SdP-cCd`RPrUMsD3q{8f=QH+(b{IZe_Z5?k+bo^94fIKi=G)cy3j zleyJ*W}};kUS(L-*T8n8H)LR_H!Ef!(15$VjoC?3B(B*kQE7 zI7LCS<`kAit|Qd@hJveo|E8F7-Q__e?;Da3Pg$70FB&=yZ43-RC)MQV(~7!+u=5D74olZIac{wp2es zx!;aFBbLmMddo~>mK=4wyfQ_n2X3I_V66EVG*Wz5sC62Py?h@x@*hL?|L2T zCwDR)x|oHO<6vRLNn@jy1!ODNqG#WFk}VsLh~A4bysy&R+W-y?4!}v;b{BK_kyI7B z3J9V3hiSfwCV>QEC98qX1Ny_YwG8~(-{-qZo(rn>lH`!aY{I1z-RN^8uG&a4m zq*kG5B>*+_rX9;=9`0hgvz1|sOwTufI1ZSUg>;ppiyOk;&Q*JAX~S^+FHSdy%!y4U%mB<6C&PIxVH zXA^h5@?bGFV>Dd;d(NGx<;5jIjAHa=wBe=n4fvk2BWuMtwRz-9(V1_h-KXkutPzv8 z1@H)E^?WHFH^QE5=gfewpBOVR_h*z?X_QG@i{Wv69v_Rnjas8(YE*td4gnmXfLH?(@Lk4aegJZh17O0GdFhK9@)|{ zFu^FLRB&D6Y1MIdJeYgO5GB5(qXbwIp&j7)f@Y{kM{dp`J4?2v&Nk5>PgWG@`CqRf zovKY-7sgHyu2M;RjUmq}YAw7ft$=XH&xakLUfD)J42LLOi<~8JmQ@*|ECSXaVPz9M zmTlbe_{ZnZTU`I=9dY8r6>1PmzZ53U)@^k#1k(>}=?WBl&_Q;IAl+*?@MpZ97ZJru zohjp0Nmh1leYsFEy(h^i5=adG*O8jm5B!a^AX|25Z%$`)SZyjJc7keze{JwC(LMw< zfHVk|eCpWJSQS=E^OgL9+&REoY#SDApw({`n^T!Tg3Ns_m$qB^BkbLT(3mS%@KL^N zOG{B;CKPgg0d6lqeQ;q1PsbY!y1NiTT;e$yn!ws?u& zIoCk|N4rtaZ>4Pdjb?)Ez-+*aOPZFcCISXz};G+wP*U9!)FTeBO> z$L~&wA&ph3m>fziJ}zy3B!UY9!=Ai$5_z3%aoQE(;QCqa(4XHa&X@~=h=qkwED+%H zkY*l8h(Cj!A;x||jvcS|7oLT5#Q%%)04NZIZXadyFRmKjs1BR5u>rWl%z`mR6tYdS zc!6*l8xxotu>2pH3;%P3{U3gRB2>!o+rww2ZhW~`#9zoV=o9ruw|2c)s=4(voM970 z7_51o#8a9E-U(j~U;0X$kVII8n`vF(Sp{{)zdwgiJ9xY!wdmw()RGuEk3c)O4{Hxq zpY9=J<-WZZ5I0~8sO8CuBI8$zaN5)@l9y} z409s|%JZn+Tlz6^IUk8NHSKB}L!*7;_S0{MM$=Osp;NA<#6q$(XSHG~NCy|9`>(LC zA20= zamNT8BXEqsF#^X393yayz%c^H2pl7DjKDDh#|RuFaE!n)0>=m(BXEqsF#^X393yay zz%c^H2pl7DjKDDh#|RuFaE!n)0>=m(BXEqsF#^X393yayz%c^H2pl7DjKDDh#|RuF zaE!n)0>=m(BXEqsF#^X393yayz%c^H2pl7DjKDDh#|RuFaE!qJ)eaqy0000W0Qx_% zHMJwq2^uh9z<>b*1`HT5V8DO@0|pEjFkrxd0RsjM7%*VKfB^#r3>YwAz<>b*1`HT5 bV8DO@0|pEjFkrxd0RsjM7%*VKz}rAGIgoyw literal 0 HcmV?d00001 diff --git a/docs/images/logo.png b/docs/images/logo.png deleted file mode 100644 index e08bfe934cf1a216ed7bdeea833db0474acc5cc2..0000000000000000000000000000000000000000 GIT binary patch literal 0 HcmV?d00001 literal 29555 zcmeFZby$>d@HfgXE3il--Q6vnigY(fw;(N@3rKf&2#A0nrF3_L2m;bA-CgHdzrXi= z&vl(Y&-wGHxZw5d+%t2}d?wcjRb?4;RAN*F1O#+B**EG42;gM|1W*qO67VmpXVM_x z1@u8(<~72%G1492hmV%Ja#l)82+Y9uC^n9j8LY({)vTo&1@&K=$Sgi8jk?|s0i!x7=SV5LS!k4!Ef6Y zC`u0+pGUvHA7tm$^}yzxU#;b3r6uMq_$swYrg<4exUJ{Q9*=+4z?yy)WP{~7oAICa z*W>{@20o$Q2?$VES&2PlErpBXW*QU=f#^L^eRKz&l5Vix+@~fqB|lU}asrtr8n2x9 z{z6YFdd;7Ss5~s z(nvklE|t@GI@_)CQ}_^_>;rIQyaXZSop0Ey6mB^fM4meTBDh&jDR1@Ij^ZOI61nT| z5_(>XQOX-PEz^|o&a=FZ8xI?Y#{T>BRsJ9>1tR#}OMsV9j;*$0aN*=WDoSwtdH+F8 z5G;M!c|A|?3HJw;j@(HF#3+dC(Ze~reXyqYX$_4DbWtrv8{cs%Wus;kO+R;{aIoGc~7zABggz z7_pqp6AkUqu{+osLF|KJ%IYI6)`Z8;ycxLM%ZDVW+yJ2(am74#qXGIC8_U6EGqP82n&J?Us8eFkJEL^Q_G|H1>x$4r=M#04h(R^V+GtQr`O0jORj?< z@OOxlOA;66xrd4F^!1BwvkdBd71<@hamCyKh2qT7KDgINAOp7Mfe@Mn*xGL_Mn$}I z9`SDZa0aTDqWMhGducE^v_ynGQ@lT$_X#78EBo`ec(2EE28 zKtk14RA(LmrD4A-nq+V_V28je5MYD-%C##fOFTE=)5$6vM4Xsn!efw1QRJ5#PL@`6I89~q@X1&vAy2P zyw;<4`ad5QWZ`zsMA<=Nk1CX)L)!bmw1c(RnXVDHYViqBl07Rw+Lag>dnoAfgWlx?Ua=9Wqh)#& zT)S&3fOhZi!if_lOd#KlT-1f{8BjO`F%cKb5OEb>(PH6)rUMkBZenkC7G}E2f?R;B zlckggOtY|Pv`IEU8uC`>%gX@i@If(4&0wu07zJ2C?JtAX+5*eN3oD|30FMj!s`D=T z?rSg=Hf+@o+_K1XJR@Ah%$N%4V#xO<0IMpSv3g#Y=ph1E=n03T{?tUd{C&uG|1>dm z{W~Ae=Kf?uB4!EjD=gIq!r+hXqeN;!FgZB*zLAyCno>;aYZUJ{8A(N9vC@KLhblL~4;@Vr0!}yuC`au6E`$>oqb%BZTeT1Y z`qw~jmGr&8*wZ3Hi293vKo1A#5>KXVNC6!Hk7b|Te`OrI2k`&N)FXi=N5F=t%VWLD zGtMX4hl)U70XL@vu>k6+aN^Qtzz$ z6KPf5!QMZQc{mULNTthj>bP5$0EQU?8#-HiNnt@ReyVJsSwId~QdK5`7D>3fDcb8KUD+nT7GCwxj^(I^vkuhJ= z(d6iaXy(0;x#UXPW|MPbDG>sA?n0S3%c;UBY}#SS|8+@=!Csa#aOv1Hs{(fcp-dUPLlggNN62lQY;uOPE*!Wo48U^wKM|Bj(nJ4Ftrl{A zUcuy^^W@8fm??Ek$x8Ja|M2HrmhElIDIU8h|Hy06m5wxzkqJut7uAN{{wv4U;oD$}PwUZL8C)GM7%`E)cnB zqXBXs-3X_Z#6TajNW-;B)9zENp3%m;fTfwaC~vPK1BsX%nX+VnWQht89lHxaKRABT zG1STi0lZT|qKW8#g*|an{ZUr}ya0l=^2--26v12^22-XuVY#00PtC=zrptww zR)Yb$Q@ACHe$VG5aKfRWRuc#-M@|p_EZ0sv&^%~``RCi_Aswf}TuDVh? zgzvUy;1W69t~O5gDCS|G<9Px+&RtC@SSHAZxMRjr|s)^tuCMc^i3tNaEtnL5Z4h^RG6fW6h-?fT=5+(FuA_z6ytOuNq0}rSh2JtRe&BdcDP}h#oBqpAjRBi^NHiD3 zIhIAJUOK~esQJ>rKZ36VZba>I2|$;=s$Tt=I{W>u;u-Y^dTPisM4pTShKWS+9>t8O ztG_H(F@W3Kz|#lx)&ahrRD>=iY3c>YFKte`R0)iv*9R`N(TD3&-WO3SJh?Yn1IrxY zabkLtZ!Y|KY?l1>Y`N#<0W%{FG;k_ljDX1IlYt7Y>*xE>2dSzSAE}ogz7F_*=^s9J zhaO(LCD_|MrEHpTTNNpj@qK4?t5pY#qz89JWI5vKH6P_SNLBRxAqu~9TB5Z)aiEe1 z_MAt{fV6Or{#hl}`NrZGeS)2c`r@5m>}gp+(lN@$-LupHfhy?485y7H&^UV_>IW-# zlDvu3N`BRif#*_vM&tqA^HK)ETHKh~$*QwZo%w+T??=`0?4Exax;tPB|037%8;QNJ zbG+$7QA?|0Ppnz5U8xWZ>ujSrfOW(Dw+70qYwLBDs3mcmJ zQcob?i#lH{a62wqex}O*<|YTDn>oEcq>$&cyF`Z_r6}_6h!$siWTFPZ%wdo}KvKVm z0p*XEh=3Hw2_S4;Y17G@o^V15Th8Yxc+&TAetHVJy;=HL{UA~t>VNYpVQce1zn%zf zTe8ag^YRYbVn_fG9Jh%t@p&0Op%O2fekk_Ek;Mi8g=trjkB$$*N%d004yx%+Ah=JO zP;rG}+>4nT*6CL{5cRa{EsZmSy97fHa(sMMW)Y=R?5do2lOr3BMpR|E>bO3bhb>gy~ z4vnLtyYlQ&hHkTFr+XJb*b*`s@~l401ONt_0ivrb13on*oIVt|Km@8y@I)2aTnryEMtDvQ)LW)V+9MmPLjG7fk*k()dQzqp^=YAkFZE^BQE z1t*5YQmOQTBUmeT3>iDNd@qT?R*Ek#s_d86EG?kIh6n~ZJW@{a*Y}xKKH;e>?D+Sdy8k?oEQ&%=Bby>NeY=ym!MBk_7H3rkIY_wsQ zOUT2mhy|c#bD&qp;tiGzJqVP?)*N|30zO=r3421-xUnb7mLFB3#(dJ-D66hmr8-ok zH!Wgtz`r@Bb4*-}Cz#nA(-Z?szw=Ft6MIh|_y#bVoRY(WnreDgQ;g^z{(72+9TB<@ z*c!*rQgWMRHg?zFY*Dcm&AJA@nqD9J*)njk-kd8ofv)1*ZKTbIq$g}Shr?Mzpp{_Rs&wYwGv!wA7z68~r-naA+iy?7)|@9B^moHre4)81kq8A`fKn4!rb zKzOe)vHo02li%dC`zN^vsga84b7b_MQYHlfihUS?6Y^zic48X6A(xK zcQr>Jj3Bz~{FlMQ9oNIiv3-zCcMhTZ?qU00-lVY{x5UYPSO{E~2O#waAaDq8cem0` z(19K<;$x~C#yZ$}xFs7Pd^eE;_&&Fl-MlS^aaUm+*J+)Jw3MNJ{BiK8t_*d8>3;ES zNP>u9mF$P!ET<1Cn~Xc&>3o8Iu;9;TuQ|4S;?_-s7=Wb--MeT4ful5l62T)fOjxc1 zG0?!_J!WG%ogx^quG>sV@?qw9e0k#dQYDUVhCbga>qo$;Fg$hsP6t$XY}k+VARmCw zgN$eTW^p(zOXG`dr}wR$yOK6@bSAF2-&`{m53PsgI-UbwV8qCU33Zk3AOz}#6R6K} zsD8LzYkm7$F&~``3W7V9VRc&7WLRf5!(ttCQxpljoJ9aI{EQPYQD}*sUKvU%M_|W< zBh?uG^S#+(->3ny3+mBfdj)QBsH-w1LzZi9pbtD`!*kSBpFDfaP?(w``=};9c+n@) zPI%II`VcIXJ=a4aoVuG0<0A3+e?Fgy9*q5l={LlnySXOIjmzf#jYa3h<)&^BqyiA!e zhL)>vH`|9J>0+Dpa4=;)@3}W}=g&Aa4<;`UQ?mK*zSRR7^|PsF{y0@s6T|d8XFyNJ zA__Rh?t)*RyyM(o6(C7mV^dU&162**@s_62#M z=eO~FNMCvjH5--}kLy7<%&-OdhWac~f&{pzd4yFJfEQjsua%~qjSf~ao}nV5{@hDhe@!j0Hyt6)eifk zt)E|8y9=w-zuun&7z6+m8J2A1AS1{e0IkA3esVkG_XF70xn}B)!-rS;n zC%D}zytD)^4D>5ZO)PcsIhiy|LI@9yO$c4369ugwrz|Fj$*BIyy$U*Oa@0rLjWQlv#Cgm(hfkC*OTW2sjby7g0NO;|& zW?jQ%NvZn8s%`&si+d`2k@>4%`9kY`?f$9R@UVo0IHTm}^2C41{ySfWqQVj@5h$VV zlJ5Hbfv0VndPSMfJinbFS;FOnZWljj`pPfO@)dA#Q}FKAExQ$>Bt!nK_FuQEAYMq* z-LWr6;kFODg#Ys#fYk}?S}=|*ypZZnZ6XaB^ubz9$$1wCBj~aAVR!YFogUXl*h_wnl%+P{Z>W_vZaI1;- zeIOR~+|2q*CGMQMA5(&@h7BOh0HCN?G+q{Ywm{1~@%|kon*GI1v108j9)8b{N@iM2 z)cIXuCokL>vR;EtfmS*hlGe}&Q1(CV%d4IF784te_1!|LKAjkjkq>Jx?usN-Al3c= z(&akf?0Fn>e;h`6i9FV(43c1ms;>SKx|eOiN67N5$`UN(9Bsk+aDd4NeA$s4N{a;n zO+P`XLb*k-j@NI#T~^b!B5S3G01+|2X+kmM?zhnVb@0Em4d7!D2_+yAXgf_e})+Gdug+Q?HWLGQs^ulA@}fxbw*AmDC1Gn;l!j1YR!b5{IJ;}?;3sD^1Uigllr zSnANUX17|4&CRu;Da6Bcz|0f_ zMKX_jyuxR2AA$Ed_b)~D$wdNvf(Yay4p7SNUO1yW&3|W#an!dMaeUPw5Wvy`s%7jd z|F4s=E}Zo+rX42Hk$o}oHC${JOhQ#<&u~gc!6WR@^n9(2{SqoN3itD}Xhyd@2hcE}fkk+G(_X2x0$}w-YjC?>L>1g)s%tuw>GdiCysw8DPbqVV0 zX+*jj(2(+WoN7yfN1e*$$d_#P?Q!}*FYuP1$7?C^9gC$sAcY(q(2r5~iS_PJgk^AJ zYGCX}Rco><$ydN~&aex7?S5HFzPY6}89OQ?PACkZ;ojBhMR%-UG!}caF(2?^XK6SU ze1@#Da=Rj>P7Uz)7Tzg&KVD;KNt^$q(>GMB@gr{ncShqeg=TI1UHdO=Bku3-MGfPw z{lB>7#0hrM04?VjmP3?EFudU>O9D>^_tfgX+(R-mD?_j5ncAtDW;(|66$l1n!+PiS zTkzHE2VKzLmj+V9^r>Kmo{54^r#x~2HY_bctvKV#%luZ<0vW%139)IrXWUrB5V@>o7f zz6viu18D5SY2>t7DHPIH`L>ZD-ufqNS=D@FaKss`pXFbSc@T3_Z`pcS+99FVN3VB` z(l3a>wZBHxv1Qu6`Ydna1t$T|W?RLqqi85icAw#YdSnkOt1*-Ux)&UezbJzJQKkvG z`;6!E73_ni`LFJ(14@rX*V|bYZh`(>qX5#cQBL@r07eF=)V(D=3(|tCyE}sAK@TZ* zg0iL`7(a$n)Um%^H-5cCcjw6BGW9Z{?C{Zo&6xFcQ2%kLMiUA-mmfPY1dHL*1){-$ zb#=`8S2vx>?shb7Pzn;WN2szllCYwz5uIP>zTWNTX< z{yjtJvj0q@%ImvccIt3YikuB#a(Yz26|snpx~GQ0>I|ZfleSJZb3eURIPurf>OK?E zqTMBuTo7!EYlI-Oa&<>6f4@ddxO<0q+q>PKZS)?fzG9zFS>*s{vILx|cHz#>=PyKi zG5?9!TS~8TC8d#Y={7q6_3%8n|6ban`_-f6Z1R3}pzIYZ$xR&}yyST5iJO>w@a|37!N|TXeW9dGqSVN*82Nk`~FUD$|DB$_v|B2RM zN)vyfE8Y>C{tOkc$y{l;N_E{&L>}SM$;~l&H17opg=UJFcb{_onnxUROt;m(q8$bP zmYQKYzU`)dx@mXgJCUJ;nGCgdZ`6BgFQsROe=yLNv zys-Q?`p{KEU{QjrE?YBdQz<(_3&#n#mDr_v9E4~rX%Q{3#-6-JTvPJ# zKZCA1x%ay#)WMj&!*K|{WXeVsd?IH@VwPW!KOnzap|R0x`;k$zaz^=44GDk=cK{~F z9F}W0v2iDEsYHwFwxqafYegV@5XAMziWQY&lvM%6?Pt z;l1~{=wDbp&TbYuU!>MW-=d;_Fcs0|lv0tnkg=L=Z|#F>eyya^}_*pc;~_(OrDLd*jf*Pp{h;!DI$5QyD5ebLU47 z(!o>&OqEuZI1pheIJFo-AxRbL4=5({>$lR=LJ*ZB?r>G zob3X(mo0@m6F+R#+XUgb6yk+`?lZ>6XfrfGTL|(RgBec1w$xKPYg%Nih9Y7gg)(&X#OR@^+w<6%1 z+Cb6t<%jj$w^>|H@th}AB;!Yc%gAE6P6p1OBR?1-TC4?)Jv7lW)9NwGi&n|-PB3J**SMv(1qSp7@cBY;YfZq?irhcO4}>-ytb4GEOF$|bMlAh7!x3#wZjB}p~Scyr)e zDSJrcdW5FZQH1I}xAc&baQpnuKV5KdwZD02=mCC-D`7wyBR-`Y3IWsRZ(}a1Hffp4 zKd+U8nK2DL2YIMvKUAA-)ZKXvk}2m}=0!9FzXtlkq&La7G7>qzMvKDOYgu|0{t*Ha zNXMh{^d!h@n*3i^3y5F8i=bc%&_`QEW_RFszq!nztXbXqsda!+bAR@C z*HS45yP;Pxd$?XT2YXEn!1hGI{@IDBp2bUW0+Vhdajc|fZbF;nwWtoO=BWKhDcO4B zSORP+^a#DnQh4h~Wr=n2`&3#c>kA~1->cNRebZeEJBx{)Dp3NzVqPE0VZXOy@-oQOd;amc`E?^oH+4 z+mkIWC4zT9)4-{U&Tv5|Vgbu>mu(1y49Nhi9Ue)4F0lQnQ6x01kbZu6_RfFp`&m7Rh^ z2-SJX1Al%Lo_f$?y#!*@E8Nc_IE)DhT`IRU8>V7Cc6tSj_;Eq7(svb7jrTq;8W1ak z5m^ys1=e6q*!bAdG?s;_dM(#5cF4<7K8KLD_GI%Ojj+BTP$l`CsZ_2=6)KQ5`Y_ za$|EQZHq?ZamGnrL|ZdzP*nX%#!UQ)r}<{Cw`!a3qk{k zKZ*=d29DsU`l+X~i;gIIH*wXX%{l=_QUeHUc*&msZ{NeS;|XQ{&VqQwu+|I%p1N;3 z+qhzIUZ|L1$o^O2Qh>(X)x|D>F+n*L7&555`jnyTpRoOnR}_DM6*S#nRDD88yp$md z28f5_0Rsv1X)?`Fh!BGb{n%ZOz?N{q|Qc zObq!1v4m83F370`#vGeUmS?gZxv-%hlLA*h>B{kr5&JvU5a12rfDMPCq!q|f+5uy( zxEv|wqt__c9aVb;K-(58(1zRj@WsN4fTU`WP)u2(gf>tdjmfLj=9JO_(=*KE>;+Ur z->WIUM06UE3gkP{@K$tx;)#ye(Aa-4jsyhuVT2s5M2QK=d9*2Sjoe{eIEaGeyWJ2? zpas^1%A3)jR+9^NNuM}=0H!m*3|y7ofmfYy3WO7Hm1CmHTUjc7=+LrIh&F#b@|R1p z;9f$S@JQEx7mEu}H=o4T42A?z1MHA7;o%(zDdgo_sAE8TQei_Ey6lg%-`*0H`ftcj$r7*wfw7 zrm2BO&%1{e2le@Fe$EaRHZA<&B#9Q3za$0TEx;K6-g8IF9xzMU{PzGm%~n06wDIr2 zLiTUSw!lxKkH1x1B}+u04Uh%Sr2A8ik>qE2xF~S?v z!E9%$R7MVD?UjTZv}N&2MlJcRhRBNoe7GU6!~rlEu%H_W!I49#{gfTFGs_-C(88C1 zo1c(AFs}rJ0&;+zU%%4qpiob0V2JxiUh$BaaHU%m&x9U_ks|Q( znt=779_9lEKg&05y*7W7QQ!;^eDjS`F_Jp42n&X1ZgG1|VaB-?GLfNyL;rcbrud6c zzo{x+F^&a%7ccM}YvA)2Cg1>gQTL7Dr())nkB#jTx%2lKFSdSPS}d+WI_#PLUAT5A zT>*9^WkyhCDcc|;Gy8pLr85$6e&7pYF z$uBagoV@%2Yw{>A66i;T4ihS}f9@v>AJa1Jsq%$E?r}IT#=pJsJ?O1*+58L!|?9}m?PNgw1og0)1P)rha8pWo_#q;Hy(12yE0Gkv=6i`WhsjR zN9zA>UV;zoMsi>xZwx;PsMe1^ zibp#o2V+6o_}4m*K2nWIsrV*XP#r;%a~+SWfQSbly#CjVp@(l?WN>@1Tglvn%04&i z+Kj8})ajGr-%a2KEAt>56qhT^kpeLsn|N6S70|gaHYYBH7l!opmBnV{E6%PjuTj1* zo=YW$2+^wcdbJ5#bi>^!g05q86L;QCGaIrdB{5E@O@laEt=s5Nnp*K>+w#4TFM_tJ zz=r4Stba+l=ui`Aqrf0S@GTld&IKUjSjmGa&wt<_)Qaw=zU+@kc>9#7%l!+IT^uv2 z7v~pmgJdTHz|8;NfuO~r0G1zR97`+8gZb|vTuqo_UZW8zS3Sl_pNH=axQl?e z?j9d-9!=dM(itN*zp7aI2h<`&w##R@@t8`Rm`Kt87x+r z72>tKZpb`hpJ$x9MNN4NCK3t5N696{Z2b%@!%})UICui4H_yXt1z;j01L$Ejo_Vg= zmj7pD>&ch{#FgmPxTm#ffiBskNFbgpMc4dCugR&G7|JM0wncH zoNZr)-&0+|D^dv@cx*7MRI>)e=g03HxRI<+C>vTl#v^xe7We4=eZdnZ^ZA)0vJoTU z{F(?f*azgh5SOX2#c>qgTQ+Cyk7mTMN>!{Qb2HT7#}s`eu&X6aTMqB4{jcFT5+E`M z%HQ)(`r)qg&yfCNa5C42sqHGiZ!lCuAK^H^yrzrubzB=AFK&SxBt68LIlU!{yw2;frC?33j9PbqI10Qqhrs<6-dPq0Jxo_A z6*bQ(zvM?*S{lWQtQP$KhctBG>iQlBZQQ|fMtwRE<1jK7Sn(zMdlM^gs@*v|Cd@H7 z)LR+Cdl&W_^N?Qjjx?-^KBzZ#)0!%3q^R*>_!}}Fycrw_19XK!$k+=gjy6t$>f>ks z*<1CbJ!D50@zQvIPT!NooiNo{F-}>ZF3O4!kA}MAok#rAK#+0ZeM77*#?5hU|7Ef6(iLDb4eoj@ie~LK)43_GxQU^D~jH2wZ2YO2f$1kxOa_(Mmyqv z$wHt+b7w}u_-jXCp=H7gzOn|`#(#e(=OCIrf1mka$h)(Y09&p9MEU>!{{LJ9-x*B) zIIiLUk5R*y7D>+hxLg{-iC7DUk8i$jxE0_0-1i!Eq|yiM74EHXfGbRzeb4kuZ*P~` zqKF7l!$w@~1}F0>v}B0`m)L3@VLF*NZ8QP~Gjg$@C_QvXjy_nG`l|7eLnQ8F@YC5P89WvG%4k;HyW?U4ZJgHrem5r-|PM zXhv+%ql89)%%BP%)LL*zIJimt`;X@R*ELT^_d3om{4H7{i1mPr!PP5q47A08j=eKo zKF?mAAX{dh z&r)BW9eDj7#yDWpY8C$|Hcjqoo4U2dHfqD)x=zJWYmp8c6S4E-`r)Zy#@u2$Av4-A z=i@m{V#nUP7S18}?KUpnTf%4Xwq!%Uk8`6;l*j?d1}FoIa0lpGK~wFbApY35*0DMC zcNteb8;5Dxf}=~v?wgt9;aNG1DUCC3ztc;F+%zjMf7~pnPoJSApPP9$<7vuCS>}vq z>ar?8PDO7$`CCBJy@d(ntZypBNw>9mkv`({v@AApB2nwO#TjYOaLi^h{abIB&0l~J zx~m_Oqm=kpQ0Or|+>)W2-&nLLQ$VXdzhX~Bq0{_`cS}7l6MPe(@8L+QK1=i-$h}q)5Xh4k`Nfv{I7_B> zqJL)jyco6nj~W!dB?ijTRFrD9J1yk=`ulSQzY2yOH+1-;#pXXMu038SaxRs_}C)*{#LZL!2z+9 zOT7OGdYsuy`ACG=Ee{DH|@wMCj|cPJseKHRQ^cj?K>j^%7A}-*8nIV z>-=Q;xlPhSB7oI1DJzr{SMd{m?NpQOpjJqRwplUpG*#d%j;d#yeIm2yGG9x28XVot zlwqa}T>F$c{;CyImyIY$S4>=*l@;>y>mXARKBM36$N@lV`GU&CV12&p(P~x>JAC2a zj@lr6x-@dDTq$71vD5gd@snsNvtQxh>UjHF`crWmR??w9!t+FlP!z(wQi+|Lv5D2r zV!)@1@&J5DKj%99CS6JRfn2-ykzex+Q0+qbOq@dxbs4NjdJ z9}rFbH{8*u>FB((xARt6a#t_S<}2N{xG<^v?kO%2g-2KFxNow6k{SgxUxmAA#~{V% z@-N$=O$9jQ_Q)~Pts~*?1Vw=6(;OQS! z-S57cXn&mv`Hq&{XN20do*b0z8@zJt?_U9J_iUKk2%sP)jKqI~Xvz#=!92EU87aCE zCG<)Sr0MhZ`**wdeT&Cus%Vak?{*!H=~Y^42wv;0fQ|+bCVZ;*ghAiKsNs8#cA@%f zQ|opNdV-v@@M6dg3rP36{QF!^%uOeA)yz^@-T)u1X+RpDlf9p|{bm3QOny$kS6)4{ zwplMsGn`w6#MiTZMi?(R6P+d6pJ$Ou9T<({l$KVnXC203V0A8GTH8&#@~wg@E$)wQ zk@xe#j91swm{)Zw&UroxrwaJ#zYgD$T0JD^*aK0RW;YtLkKZzF+d^=RTg|PAJHr$2%;x$M~G^n_4)= zjUVpWhNP(<`Y`>P|EXu5E0w!!a6j++Y0Y@7b0iOQ{P6A_T{d+7GCM^!3-- zcPZy-7C&pOpRdYvYNy%T)M=n^5xOTHAkr`V{_?Oe)6|4Ir_)Z=xNs$Ly4TS6IEjw!n% zoG+j?UA>i&xl+>qG~N0P6EDx40?nC+0Fxl#C9Z>~T~5nAHWWy^u}!2K2B`!hRE`Z( zc5l*qH>}%+Ybl}SQWDmrqJ;bf!Osb7Q!1S7Fzh_4jdI!pFp>Flb&k)745kQH&-|Rv zu`?!)Jq*TZ&Ia&Bv{M#6LYEHZ%O(guk@O}eTtwO*I%RQXnW>fTkR2Yq3;X$Hvs((i zjXc!8Tbb}|QH{g3iL6%M9@cqa95UmGNxIyOpG{%Q$M|uSs8&o2Titr8_tE1xLfn^G zEaohhH5GXz8EHmVPD_5R(O4e&Z#35(WzmMq`NLS-=6B+~Lg(n{)JJop8NDsL7HHd2 zrAUQ%OW665THIbKNhqphBNEcdEWgwJgPfRw*kV*|p@|b_e04!14b`^9fZp&dyT6S= zVWo3rLIozibl>XjBdf)|TBi1W@BMWh=FG-=Jtx$4TEX;p4sLtxX#n z=UAREyeqR99Hqj-s!v==C;3k7wxy_*^c0C&SQKRhnb6u+ivAL$T!!rN<2)~=*fW}z z%iL97pVYIRO&Ldy#vxqi?6}ZZ)#w6u&Hbf(pZ4wlF2CO_)j4t?yGsP^PUrwTTl2C5lDH@>h zEgE#1Qz4TFZGf%p{FYJQCN*#xJ5x)Pl9f1*EOf`x;Kk@)j_CL~UrDC^;`5$d*~_cW zq`H6-b1bCjR=kUV!V-+lrFRy7oH?lDH))pl`$t_vXP5LHo%XmKKSUXT;_pSumtB-;f#|RR= zxbF7bXLs{T#}j7p?T?l3)~BuRbMo-{>TbetCk95Ec#fUf+b-;Cg*)=ntIK^xzX_fF zMI5;ntn|V*n5d8$HIA=uSUo`Hs;QXzy%40D;in`i>Hot3NYL0GkYQ2EeRY#L3}2sB z-s#2ELv+@>>*8MeyGLfYs;oCP->}Gn=?rp$G-+~bP@{IHHjHHUkaX)OettNCymd5W zr+QX!?7QC8z+*H_7W;e%gEI=t$g~b7Ll^dZf%@M4ZgaKEcsNn!*GrVo7j8a>{0)XV z;a|NPwj1_J8^>=>|L{lVW4!wK-VfQ7J^)SLsKiB6r6Y^zs_&0w5cVoDQu|--^zQ0yxxPQd#*B`Oa_@FCb87n@ zpSSYW4ClW0TR&IOE7B=%%{MSr4~{Gomz41cj)mThTD?$;cu!Ulm+U;vzT>U6t^68y zAKj7lEe)o{q zD<*`uaU%Va%ivf)s3AmECJW^5HBI3U!K)Op++at5^d)J+yZoe?+)E4X>`rflS zvN<>NVZ`}xrbC0>LwSX}8|PiLhxwnFn>sncG4~oi_ExhIe-x>=lz3apqygVNge-vi zWl=Lfx7ISPf}9mi)jEha)-bquKZPu21eBI#rT5`N9Ft=TOrSGsNA=C z+`nH?fL$Id4_K28%(`jvVcKgbXZGgt$0hLxmsNj0Tq+N(#>QFPoYjLeQ>;@BOY%vg zLVjZf7Ju~rHWa_H$p;7&p|&$rw2uDr@F=}bVNvc`B-$oVA^=wqlvXngmUB7EN^N#G zI5jM^8ka9nBG3doW9NiZt#Yi>yN3zO{!ZEZ{3x*DENbKlyJq?0#Pg^pN|K7>JTy&2-n(Q46ix-~RlCjRNQt6GK**h5yIJlTMIY%2)FETIk3&|wseVjvf zOmko!5m2+TRChfL=p~S+`O$C~6OypodA9!*$uZ=3>#GLMk8ckB`w4tLeb+mwJ0Ekn=v1HQv9Fm`!IImjbDrn@$kED(cy}pAH8+MzWo`9K^0xFn=ATu$1C@j#`xpx0(jwm<5SHUZbm^&WGlt?}z66 z#WU}ZDwRUXxZ#>j9YoYE(uom#gOsiixgTMnQ8xY{i-KebZRvT~twVcj%eq0NVeI76 z`#R&C@zuKU$F?aVbB|!IjWHR!1Wu#LU&04lLhLr zsYlZxN&S1Pp9<2B(HWoxWK0i>fQ)YQpsSI(F{}LXn@x>Sm*=co_jP-fXrVcj`(GKl zpjPYqZQy?Xt*ux~Vx4puA+u?9??Z74uD+G8zq*1%I)~ZXdzed0uA}N3E>BsqUZ8jJ z8YI4^dMCA=NsZz)* z&S|ZuP_?xqE&H&N)A=OxF*`Ynsx?B6 z62mFaA9PK>F)JbxB%qdxUM)%)4J97;$)7u>S3?lbE?lF(Hxb(U{#C#;r*2`mnVblq zGOtVW_mjCH=t#5&Y6J&`qMqfD&vsh-Rsl}PdVGT^blN$mYvC+QmMk8-pEZVmtC~L| zU^evTXRPqD5z-6J9M=)561NcB(;-o~2N91n%moni-v~NH8jpC)*R2uW8-d7_QQa`! zjVjMV$(`y`%TPUzvp2K4YsjGUvh9xvE!Bz_jZdr80-CP=A6py1NLM$L3*0C(8Qq*p zf1g&)xLMy_;o2Crc$rc>>g}uxaOiKw(bmo*`B5l&;RH}5Yg)2jJtPj*9t**j6oP4B z6n9ybRH!C2EON~4T-JQ7^~^~>Mh;sB9I`D1lY~q92hfk(j&w12Q7`I<={w_gbqph; z&RdV~4I-k>FWe7!J^}mC7`k&OGl^*rp9r)=qEfJz(3EvS)s~nGRR=BZeePwQE8X7v zxBQt4-gAGIR!<(oE-rY8FND6)St~>DQhfi6`+c>kwl!1Ljc z($V(=-7EC(&6x9;y@G8e2ajv*&i1x^Dg?^}o7xW$H0#JDaYFkA>CCjLl}~1re`;AC z`BKMo#HhUl;?LRMS4>62=o|^StqtYeQIkDBItdBpnar?evnuF})Ub3fSkjm%qO%X^ ztfTi=apI@$lo+PFZ4Nh>h#_>-HeBm?P(7QTOE=vn_u9Sa*XAIvv4!nalkH48G(*|; zi&Rqt-7bT#xm!3>j8&|QHkgG>SI$E~{1rK|IrgeIM%K;q2O4@zUfblX600YD3)0{( z#CC`fnae4))}pdqqy5>I#W8jM64uaguaeDWPFZri{+77qs14j3QXXl?kEYqWzARy- z(r%<<9O9V25w%+r+e}?2gd$(+m!W zf(0c1ulCOJp~?6C`vM<9DFdWKMMS!Dgn)pwN_R=iMsCCi0g*JUWDLUMd<@g`CE&Vd}+bIQ)@bou|%UZ??}h!%j#J@?P^I1KivP% z+d8dORbvSB6uwkw>N?G2pu_KUmro#Lw8eieQ8Hr^=qK(ScZpS?@Y%IAm20J37fk*_ zQ3R3Z{mRqjx?A2o;Z0HPjZoreXC*F2mA#VO?Fj`9k(qDD+Qr|Azq6w5!|88Ly|{73 zw%9_Z$N(DLd)$?^ulnh%Y@))J=(0F%b3&EcJ6kZ@k5z8i6kw-{43r|Z>=!F#l@qGa zE{-|)(!A$okJ>%CVtG*Mt``gs*kdUZ-MNXj;#!_>ri$kakJ=7C^5c;Gx0o7xprkj9 zjx>P;J;M(67TUyj2n+L%T^q|yR+>`%Ci-Y(=i)VaL0#)wMy{D2kXAhLD#kkpOFZ%~ zCIs-OC;fJ*kn-RTX>Q~kgJ)wO#;7`w!b${vm^1_gnlE9YDOlrTafCVYD81cRLNWpl zv;R@;IeRJSp|CpoyT!Tf1|OoAgCx1U** z=^-h|*RZ#7DlOPeazirW{!#e6Q~zzpM#Veq32dgLRTgmS522j{2U5^@Ui4IF$8-&I}X@sM1!;D}DjgMFdkOpDR zsM%~Z^u9ihR&|ge+~BIa6sj_I-1=}aM3c82J(9v}Q*3BPA5$N)N1oO~iv&-UUFyr; z!D5l@B>gPXHq*^_Zf)n2!#lwwee&kyXbG3PvejJE8h$XTE%XEh5jJZNGHbHZUyS%H zLw>iVozfPasyvgADwJGe5m8;g%I8F;$!4uS6nD5AA58wh3a18bwl3;FTdmg zMOVsBX*JI+GPuiOq$y_0BLGVirx9+mdG>46Y67sNrYFQI3i4)Q2|>M>Ie!cKfbHC>wd|NA6U-YO*- z_D5|3<^4*DRa2sEc7lzXspftaPataCG{q6;iUR3H{bP=O3sIwbQ2U|zZk?9UUCq~L zI&DFVHN4#cdm77(^6XVy{lE2Up?iC59l5bY(sHsJXIa{U(w+}RZWA_%T^)O#D#=23 z{|z4s3=pi*&;7FHZXX1dMEI1X5Ae1&>APdv0lGh!9r7+&vzq$9IQTZ5C!m@68xEg> zCvIi2ME&|L4;;?b&lpWwpt-b6i@ZYJ6UD##8MrsDYBbL`%d)RV)(VeU^^J^VfGjmC z6P)R#yV5blBlAy)W@#GT&tEBc6q%+_z7OnM888*371FmHt7n! z9sYhjp7UTgCg^?F%N6D?wrjYXy!*m8wmX@+t|O4K*t2QP(vFDbz__YfHj3r@)DPae z_-RYRvbeb@U%1NM+e%t}7{Tk_9^AYWyXP6} z(gX{q^6wQE+8BL*{`}Nyuu7IV<_%w?Al&f&P6?@<*ivn2Nlzxql%?y^MH2PlAu;W( zgh}m$rfPTf^t04n^MGBV7As_ffTNkU1Nu!?m~urlV?UK1H}??=)Z64T`q=>~WRSV$ zg;t0(c0&5he=Kjx^>d8#4SeVoZ`Hm5`HA6^!w55BXkJBZeIbxbu`M;C2m^WYau@Pe zadFHj~C8wNXfyi9s$8%qzYBi7GJRrqm!KUIbfK)=hE1Dl?Bh z0%u2;6z%W&cJ#8DAt6~^I<1_~HWobnaQcM%{4$-|M_0qq;DxKLMr`@8AKy>W_(kP& z$p=drOP{jB8nJy(R``N??F@SBj!=SQoZ@C~-A;epr(?RQbj4HqnkFu&w|qZd>AZeg zBPOg$WG3yog%e0=XMSF*lpyA~IIUk=>@&1^68wEg1AcK}{sq80$gwj}b44_JkNB~t zqZyr7AP;D`YrUgZVAh7n&egZTQzD-FD7lqm)rKowwTemYa&}i}JKy0d4l+mRLx(J9 zWeRV+8*xoILGMobL{WQ6`CD)=TgqYqOFt?{_kAYtzDy~-6Bl!W-6}ut?bTG9QOaQy z&&c8qe!eZ8F>O~d@JDFAxqW7-$nG}Q=H?gWIsXKbw%uE{Nk5*u|Dn}#QovFaoDOn8 zFE>-~qn2;t!BR3F(>$M8*Q?R{*Q7SQ(z{oqk+^+J8ZbX>5P5lg9Ed+iU`C)R731%V zZ5R*&V+};@nTPuitu8-=oipop>ddg__fD^l7tp9zHR2>V}nw)LO}FGz2m)O=&@VHrnfHnI182N53OmLpVcReZAf zK0_oERX#}!{;x3`>#LtKO{cx)zWb2Zl5&}DM#P5g;-`I^v>@q*AxlmRs(1UVeo1N$v}DmDGmAs5L4+)9U;{ycSde%#h3LJr zy`OZ!nFeWudv>qY^2`TWtmdz@pS6uS2W{aBPig>rC2@OhJz$a0|9sxnmlAeebdyK* zcCvPHIcWgRy;9biwEQ|7Un`c@AM6#r8l=YUj8&;I)R=cK7xx8I6=vON1wz4_`9pOZ zYLbiWQWRetXB?iBbD&9PpN$e%bnASOoPH`hMmOkj^C{8nw01mG!~RLzj{m)}lB(ql zU%AZV#S_~**)m*oq|O0?Q}SHM>vOG%=3WP35v&`sp9&O_(0R;XM0l-I-Cf|h;`9$y z>G+dGePad;0UvZrAW10_zOx?C*n71Gu`7Ory%vsrcfjj!0AUVky<)`VshJ^el);|e46gX3&rrNNyptsPEfX?vg?d}Gh!EKIh@)J9uEUI`^2D%5jahOfLg zZ@tAs{Bs-h6Qiq)lh6Ag|7SmEe~1G8F`8p{PMZ6gu%DAF-!F5W8H0>mS39?|?{%1L z7qwOnuRWW6zPr#fctS;}vnFMGk9KIs)A}LK2zlY|JO9vyU$U>E!)Ozs4*{oh?Dieh z2g|=M%xNUqN)*HHHcx5w9G|B_3hOrmWwCPk8bPX%oP)OyW-1fc8Nd_b1@4~4J?i24 zuU!gqJcc*ZAF|iE`|5<9uC!{)ZaVaB-DjvV`_s)3tbDBngzWT{T#dHI&Js)iUpdvK z-cpSjZJR;h#M!9cPCo~TH$EsHl_8{K3L269-$k32pPeQ9P#(O+8)3dfgV;rHBCIJv zMvxqQ;<>dSZ_K#H?=r>a)qwKv06{6ehlFUFO!1M3`+x8{^S`}#aM~jx16|=?4}~HF z8{d90wv4$dt7B>qMWA78Y5W}lK1aV5%=n5%W0N-F{uu#meSo>eLr-o~OT>`KB3y{9 zNqX*b^xaRj5J2^Z$|UFskipz!LdkS*YNZ83t?n<`EQ}$vMQ9LesGV&06BM(s_DOMb z%s9|J!@C+3@gdW97-c`t`9CsrF0F;HFAVg@VoVI$eh+fXVkD>&BUjx3uF85T8a|GU zw$(_16@W#~0bP$M=XPa2HhN9C;a?V)lC-Dq{?}0P93?wx*GKI4hPkF$7zoxtKWlA7LI^4j+#-0`(4XlmR;omUg1WQ*u`@{jy zZ|f7;?TK2~hUd@tAmq{licTm!=E@Q{Vh>-7613@HE)^k(yly`AdE?I@xdCt(3@qv< zudYVk{s}e)5WFc)E=Rf8q?9yQAZ<;EDWie(1c5FAk*l3@_F) z$(O2dX6ynH^-#mBW!JKRMSd+SGc;(w=fLXc0vyzQs0-7i@2FxQQkGtQE%FQ!JwjnM z1%9pD5(a(cb)wV(uO6Qf3!S^OkH|g4?>ug+Jy_Q^^8KyVJ2+xH~mABy2q9o;*`8&FZEZ-GH-GQQy%uRfT6`W+aoo>@!HLbQvE5}n+v)l zO(h{mZb|M$R_D&e!_;sJ%4MlW(Sqr@slq3}*7xSnV<&q>NDafqW9lsVgDEpk4nB+D z!%}^Msi{{%$ID%#LSci_iA;-?Hv}(n(ECDNNSJ^eZFS14e+@0;C3t&m`up%420pi4 z;g+)Yvst@uc$oywyg>=C-G4&0g8%A8IJgGhp2Ug`)a#sZ`V=vrxUJ^vD_BltVO5}} z(m@YLm6R`u<5xfDz{lRo7LJ;qw`d^Gnn4*=3+ z@2iv~y1D(tM|3p2nhttYpdytKyUr`UU~EY^)~tcPF(j&^j>}?=arzuW2>V+JGXO3l zhJXx5MW{ehx=-8y1EOw=+-jJfh59`7qb=2Gr1dAvI@7}Q;eClG3+->@v;@l$~JGDCOII7_ciWgop^~^Oz1e>zlf2fw*B=T)#1qXcdS6!D`$^H;p z2AJtq>aYKj&ESX5JEJq1rb_d_*LQxsl8#?r{gRM=i9d2@p6u~bC}gGiZu(_T=abIW zw^L)YPb@yBW*xM_MMtdeo2?$0)?0@15C?(U#}ylrGABZ9)ckc2hqQ`q8-`sM7<>yn zkSo6m_*zX9V916~U8?T>SG$dOIh$;E)N z4`Se-dd`kE+zpPc<-HaPlUch=w=I^O?RQnnkM_g7>^fjz9-hlz7WSR%GsFFq{k@$* z8&6iDELn8I-#-S}9IxVL9NNg6q^PDpJ-PoYHI=?#$YuVUF9RlqzGTXH#NP=jE0i)% zlz#Gi|J=T)iJ|ug4xg22KTjpb;uZ(-uoXN=>9!+gS%!Q!wylDk&Y zbFu0u$H!4skl=g+eywpNNbjW@d3e=Wh4-b)H>cmqm8J-uO}r;5$Rm$vf)1*Q*385| zMP!$}{*8PN04h#@<+~r@0^NxfwmW=!{$G65qM<*d+!#+VLx#dXXOFNjVptz5-Z_kr z4tWXV+wE~{#f`fiEO+lWL-0Ilc;%^`=T7Zw*2cN7NL(yKH#V11fAcJ`j3vG26okT` zL({_S9BB?ddwlR15h;tIqfX_+s@`)W$ccDL@J;E>M$Cbs)kt3?34(sHKdwC5#G&sF z@(gA+D))*o>S%ms*EpK7D#Y=UC$mp!+s@A~Eo`q#tB>-t@L|KxN3E8C@OtxThE-5XC&03En`uF}l$`M~}Ibbws6mb!2%m84e^i1x(ne4Q8HpTJwqGr98 zr`v|oUPkWqmW_N}{(|Z^!U>T7#S-l z5A0mC+RNgIAhghYgCFNx zdWiR0tB^} za%FMywhLc!Lms)EbZLPZ;)51v<7VmMOR8LcgBAUi*4%zj*g^F51??3#0-n1Ms=I{O z?aM#9>U;Y@9HbF+v?m4D%!DA0CcR*e>;$Yc)0F-Gz8QSqAmQoA>Spa$_s*3-ah*%y z_ZoNaCGDL1wy!rg3l|rx19I>)?KyG8@BI)@azOA$ujZE66?vVZYIo+u(~ro#c&Lx- z0*y8oowRyoeeH9?V#}7Hh?!Wp-!FQ<{d(hOY)*Kap1a-NDol&ZZ~xm7RhoCL0v!G+ z^_`PuNr`izJ_JA`84M>AvvYa3VP3A`PaR(qVEr2Xls6M3?1(xxS>Qoo?sB;GZH>M# zYLSe;EBU6UPoMu~?X?8fn3cOoBkZd2dhA;hCGa&(zkgK$OU{8(HXWpm{N(8+otGI$ zxf-o%t?QjC`={^)gcQ)f+y%d_J8NhOP<*re$Ya{3acsP?QKzh<%0K55zHMn=qMcs3 ziqSLort2UqmCzKzQR%D@Atw1>uY3zhZ=5;DHCYx3$0gKnyFnqq#HmP#chIO2MWWU5 zHNrYXc=5&El-TAl^>(ft{Gy5dOUDHofr1FQFy@iLlMfV+c~ko!`WD!=va`q%Ia+!@ z_(do9GDFagCbkerPpGHR1ABCru?x-I9aIOGfC|M2NTp}pL9?XK>}TXdB8 z#EuxGyRCi5ZaJ%actRO}>*xXVeBm7wr)GLLk{S zzVQY-&$FzcAlt^SLofpL5*00(2#(rBuvTfUvHdL@)w8=-0p7wk^_JTP*%>*Fs$^PB5 z&Ww)@NRg1gh}l=4t4hjD>um>q5 z6GWilA+#vT5@q%zXtOW-HREa6GkkRURJ+X-;zPuCyHQCiEe$gZRp7z7<*ROKl6y8n zXH>k8z{lDw%#q_d$|5jbD~6X5PubExo$-EcLK~`C;k+{OJdLH_@Jk>2Fo!fO_#3jq zw7ih2THly|TN$cQ z)b(O^h#-!FQ8uAmMZK{?tH3r6wjt-MO%AGoVv$v#J6TL1FPOA>s7OBki)VPYFZ+d~ zQhW&?sg}?Cn$alC#(ESf832+Wx z(K@z>GDQ^`-4b7=LNzSAiOn2;v4q%eauXI*TZPB6ejIE57dYPc=rGINLREa;sN%9( ztb)Ns2i}>j&RKq1nQeViYjJbOlh;ALTD?^BQ!ONw9>fexZVQ`ZKRp>cR;fjbw`O0B z*M7up2uXcfH8E1nTb)lSmajiY>U5ML?^eq^nV;4h_taEJzlpIGK-iSM%-G=2ajvpQzB$3qu-Ek7Vl!+ zNFL*8zltQJSs1JO72ez(O`xE8%{v_>2w@g&PSQd5r0C-bt9uAi<|rVvDT|B3$LeHt zLB5{bIlD<^zc+3=01B+T2@b%r+o`=Ly8}J1Y)?&o?E2;fgEomo1Vbl#)Z^Ymvh+KT zk|#8Nq(4-lgc)kTyXtnrk$l&vrgQC7m}21ko2rx-VH*0S(3&<_|5`cf(S$-|Vx#qZ za%6h|AZW9JKA?ROjaY~MavxSN@sjsDj?nnTjD=hRVpS`^=<`>6Ty zibJ;ub@>+h@;2-F7G>oavQN7!t*>sIy<-2$B+I;Be%8izk~tjK-80|mHNOb0Ys>fh zEAWM-GWdxw6@>FgN!W^aQI#`p1qcVOEoArss0tM8CiwA)M-}~STWn;F(`b1^x>bSAgErlhiGPmD-*3^*YFUvf0w2_@riauT zdl#tkPxN%kPM4BxTG^I59;h+OEI(E5=REVT-%{{K)iUO~1FWLv8VJn)0{#9idxFw-e}nKUw*b?d<|%`KUK6i$J=Ym6 z&b_WDtzG|IJWA=x(vXCiT)SBDGdiPb{jv)VAYv<1)7vg*`GRLa$87*J%)rP>X8Axd zKklxKrL{M0=xSPK?pAW*<4zKWq_~6EWaknz!;o9^ZwJH$PuhW|TtNGl7WPZ9nJz zE?*{%vo8aBSp_~v(?Ft<19e@Pwq79DaXY)kw=v68?*jQwY^}ca_WHO5%=J11fS$Yn zq8aSDcy6b0c1>}aQD*-dU8Xns`vnfJ%@><@=LazSN7uLR1viCJUhi6+3}CXcJ)3Nk zzPX;>7E#UyL+Tj`>o9<30HRHyRluNoZ*S<5CpE6`eHUBQ=3l9Vz+kN_PDK8{`qju` zWE04*@k9-v)Am(+cJ08DU3usMf6O&p;(YV?FAzBCrpr=^zUDV=&bO%2$(%rp0v5Oy zxu{W1Fh|=tP+Ee^5`9wMte=WE8tOYCROe*Bco@a$?7kI{2;v<)wLTG1?BuR;Wn3A_j^Sk8 z%&UKDgu@;M2V7kM0W!tP{o^A&qNP!sk$=-31cbE4;nr8#8P8qx9m@~?c zLc#dflZihjJKfhwt9eLV2E(`j^-cDJa$$i=YI+u&4y8FG+2{u%KQk@Cd)+F_Mx>4m zu~(_id>S4h3C}jlMB;2V7EV0=RDD2cTV0D2@5ZMh6u_n#L$0Y*u#?gR6_h5&80XO( zf26bb`;uI(D%QWofw(AjQ(uifhntjH%O)ZI^kBbOMt}9SiYE%d;J07PtROr8Xwmaw zAL?{=fWDf^M=U<5WpAgkw@k7^-#4h@3oINHw0uOSDxg^|M{mBIW=S!&^TfN(hA;JY zUTjE>WiwBHSv`jZQhvgu(^r(>dX3pF_VVE9U#I4RI$8bej!YC|1+}@7d>t=!@+A8< z@#n%H*EeR(G5)3U(0r+qRBzAN=iiFnGU-G)a_iAQQutgVb$DP~v2a z2kOcnwR!l5AwYDJ?j0^i!sdWuU`PHYLJV>kTD;q1m61xEmA5io3C*VY0y~AEnS;lx zWNt|FlumQ6^!24I_X2?c%^H2B%kdvGFVb+IVF&7GSyGB`=_7ERQjg}@(qO@pAI)s; zJ>1HEZRyo}RlkKQj(otU;kCWBpirx@7rcwPE=QdL4|F?{kDF4nQ#K6PW39eQMOCi; zCl0KZ%?g%W(8<#!!&L{_{m7+L%b3%v@ZU|Dw8`BiAtL_o-ovM_L+-%j|S m3jU{p|9=%MjXbG#LRI$bY diff --git a/docs/images/perf.png b/docs/images/perf.png new file mode 100644 index 0000000000000000000000000000000000000000..2fc633fdb814dfae1e9418c7b5524f0dd07baa71 GIT binary patch literal 60887 zcmeFZXIN8R7cB}1(gajM5JXU_fPhr#AR@gey@T{Fy>|rxY0?Cw_uhM#CcP*0-a{2a zNk}-G_qBcBIp^N{OV=j(rwz|Zw({(ZiYhWhi}!0*z~e!UwbEA7Uw*QgcO zALBSUlmMQv-$`mXprG8Nz5YR!dP8%Ff+B(<^-@&X6?JzW^Q-dYbk9+cZme$XW30z7 zMDCEk8p&%9!La1S-XIPu?<6hL@0sc)=c0SueNX((4Ojug>CJ8i=PVrb+gSk&a((<) z94QI>l^m5W&Zf>m%PvBx^B$WE4#Ul7{QdSD)0-(u#;}uZlB8*=C{`RaEGl1=pAYW| z)gR`AIJH$Q9{v6um9NN4eeHM_o&A!B8oc?M)fQfslLa6C<8v{O%j{NP%{Dkeb_!Gq zxw8m#aPBqU5kWykzx~g{N31-8X5;>Q8g;%^`PB0{a^&y+{lmX+Nh9*Zz`ZvGgF?2& z;#WFtetzouk4?U?5eTT*fwd48#{Ab2{~Y_DSKo6{P`5pI#<>3;&7W}rW03Xrb^PQ{ z{MVuXJ_8g#Pn_G4L%A&0)PD{9*R=d|piXow$sPuE(cgpmzo+oOn2P^XE<`~O2v`b! zd#X!~2Q8J>vo+K5{Cmnff>sMn(=As>k3y~5lF7W+nG-PM;xx!~gHxeO{+q4>)j_1K z9-pf9mU{JT&FM0OfbBw)!NmOq>B57vQjN;)xTjhs!k32u)fSU|WZYI;F0tm@rHxeq zO7*UlOXZUVD!45J+wN{9DkK%g{c-OhQceok#C;bTMVtMnrb4@|xpGO|)4)C|)@^oI zZFcXyl8-Lsxj5}WTG3PXSpHo6lB~B+uk5NLttlTNG&l~9yqYRbV?9%!S}mrN!n#CX z&ky@t!SxFDT3f1Z=J{4lZ`e`@2&E#nl{axy`0I3M;I&4*ceSc5Vyl61PYiSPA_gju zRv4uU5MyheSs$6{iL29D+j-L$%~!p&7w*w#@g^zSsbDNBMLRTFVNtcYdR{^_tTmCP zaRuS(S)0beYR984l37&D2a)BI${%K6}`3hE{PNQ=$sPZCVcw=$gI9|tuNwuiV zX1-xsMNKF6-GU>SyeMX;Zpj<;GtN=*`&l6=)%vGT?o>2>cD06e*@enlwtJVC=C;6+ zY+KBaH-{sj$I$6Chs`19{rXMK)$TAamD-{$WR-luumRFR=37^mYr19Y6%ziGluAwS zN0h|F@&pdkM6}WthQpGUV%T^ut&VVI6XH;IZ=<5M7>*6MRXAuJP%ddjx}PmHl0?~V zmw*qzpOMar0Z!;RGF0%Yu5S(AB{%&jbVU-HThPIXnzd}S7Dn%cCSj6N5*BSz+*gIo z_c=_am`a=O7Hpp_c(A4`<=#>@#x-KZ`Na5rx1Zd$X<1E)h0^m-6kINa|EloE&Sc?U z3w*!Q2LaK05;0iy8k{JH4FU_&VP;OoRj@e90q^zG@q$ir+pTn3$w$@L9L||DVK~!( z!I%Nc;u;-&`fe-1^j2FV>Da=U`8z@Laoe7iCWH2y(GeB%(Vr4c5u6oSUXgo zqILf)*Mesr3Nx%e>A|cm5C0kHjY+VOi)7jg;EXHbIo9z>reSAT*Ye3{c;Zeduq!)Q z+-RnT)dUgCqNsz}GVyerx}ym5WPytYLKcSY5A4m8wP)f85HP$^A>iHb^{Y-kXM(jyse++wa292`TS_z1}?tU^CX9?rdTL*f!Q+Lv zMo$oh^OwB_NJ$CdbMH%6rH!&aj$wh-imW6q%MIP23sANUN#l|~?p^=Y-kxpb#z4X< zcdgYlyIZY?*Nd#YInHb%r1TB?}z z(Z*}9@PV(ha`GlNA>Ee}Ey}x&&6~+Cx*DvNop$^OO_lw=V0ejqZH1Js>oNv7U!_OK z$4arrGo1)8lf(6JDJFir%6vR#cKYMbneTck()t{R%z=<7qzLfFJ;*?gndc^(%s0BY z1mjD)E25~i&qYy7G~}`?A*z&L`e<$Y}1Q3yC0 zMeuHXdEbEiz~?ao#2@&DoJ~Dg*JGOU>Rdvr#j_swCKDl5l5bV_n$Y=D0O8E77DW^& za7r#oocr$@mQ|dmjY&wD&w4D*al1{2nIB}Nex9~K<{#h^Bu^sI~+Lk}1sBMh#~_9Ne8O zR1;DVo&AaK%9620Si@UiC&0RJrV1Hc;qb*PA#S5&f|#BB=f*wqsX~I*cctCn<7A

6SUS0o8u7^P!7FDDIYT)koYfPm`gGM$ii@H#5*wfGo624fAKHYCTXplme zY3%Ywz`6<;(h~E}cg@0lPRCP`oJx~ZLOMax#hTT-m&d~q#uVp!+geN8&!(a}@#lAzVgJK3Lad|9R@ zYC$=rayP^$gl#~@csvyp?G4+_Fu>#*)z=82oAcPtj-N~4IiqynN-sC@cPMALAyCgC zxks<%bA>oF6w{^w%XlZ3dOt_(8JX)+`-->$`A?k`p!!G}~1Nii(Eg2qhIG0$=})zXwGIAgs|r+YYG2Jt1j ze^OW_g&grZrBpm=mwFWw`?W9wzcR2wanx$qotZW1?unYSlaP#CsS0?bR4rj@8hxSP zU9bI&QRk+N#_Y!PUr5;3+pg;*6zZ4^%V2bOf2M+Mf}5L2NH_UxW&@t9IqU-LHAVZ6 zXuFKkHTeJ}&;s)aPUdF}i<9M0*KZwvOYzv>7NUHjrL|||{({}LsdZTU4A@qhIV=H*a=TDDJQhgK@`ZK>m(S-fT2zx2#EP~|Rm2m1YO)g)Qx$KYVav{dk2lbze#(HRUR`c#c80VL z%XWC73x~dM9{y*k_bl+K-WH*fKr@ou#QJPW#Z}Jjx!WEwF;ZlJHzU6Cqc~tYq~Gswtwn zMIz#1Nr?YOU!+Q;w&4@VojOL9f-JtKPHtmm__HFeS2}F)jF;=oJm~yc5)@9#-I=lX zGj?t3^N!EjF)2IInsx?iY!`)AkJjfC2CEAkif*|R53-jq6JKFvRET&Vh3dte+20KZ zQ$F>~3HCna+&`}z79xjtbOI14zVee5p2rG(y=>h_O1-U3UfClJpswx$FdHIJ*j7u~2SuAsqoPzUZl*xmRnH+F4BSj`avdKx#9X*q86 zLFf+sykJ3!P{jG?9iih%g3|BEngv_Ar7tCBMTcx3kj|vEJ5-A5Xy{qE5X6$jZ3YIv z8td7a!{rVc?P&J9oKMowoEfvjs*4_x$7mEk%TQ56wW1UO9rb-aw0=Jru9_>`lh3fU z7qHG3ugk=+)02ivu8!{ex4H^lRBq>dN!2IZ)zn+SbyHD?N3Z>rq(8? zK5<7ypIhBCO&R{#6aliDOWqMU;kl0EsCc=dT_ zrqW;6T{PhUVcCQxo}|92#e|KjxHo;ASeWMP-N$cYtzhGPx~)nvw^sUj7Cl$h=7ofB z1#(+_Y}G%tFM6v_Yc(P`AXqT1tdO-)NU0lz*b5`)X|QX!;iyqMwm6jd>BemaEL5~H z;!Exkw-Wt#^6@;V7~Eb`P!f;UVOB1ypwucwicWnI&FBw#d1Ch-*mqDSS>zL>w-|o$ z{GHj=wAFT$%axCm+e~4RA=hZdM$~g4;bKdwfs)$X4AD+P zGl}ZANH95cy!3=`QmA9!jJT0xSU7!>3-z%39{H%;Fk0=^C(@{S(x=w# zB<_kCq69r53l|fQsGwDF&FEPn8c|8p1_NmLif@$-A7}158kPtqU-{#*yh|Xk>4;e5 zd>Nc7@~ln=0@iU!-Yf5U*qbzO>6CU^%~`eaju$|luR1S(AfcOh z0>Kqz@mLMxX;{|FQAoAHkz}CO_E|F<&yfp0y6AI%NRhj&d%}@WV)jD8ILC+)#(Oz` z;2mEP64;S`O`8xuO_TD0Vs`BDQWY=46D3~IUPO!$@AY0d3;11wDWOM*m95T|USZVr zJrYcMq|%FzFz@^jf{}vK*n+i6;p839cI!lVmtJb%tjxX-=E11&G^xNsDV+O+Z_?}M z!Usy|=~Uziy-z=F92w{}_VWZB#GWkQu2|YTjG820&g9K+h({}ZA!;JYX#k4s?#5y{ zNM@ALL^a2#P2%E~QfZ-XCp(ku4k>z$#&&`7d z2n*O;Iqvwt=gLDV(9!-V0)x&LY&oc(T*3gKHFeoSUXDE8-pQRQN$2By@PTjr>dfbA z^D>6`A8dVfIeay>otv8S?h89N=frz|P{2(*>8bbv4hYlQismE;07N!Em#dU}1U}~h zUj15fI%RpDb$EL86fA-`&8 z^E-U>+j#=0h;)UkQm5^)EWG01QTAc z?OzRJUXDidUuEtoEnCyfK~`0KxjLu)*$np2XgtoRYz<&*=?{>`Af*xb%Q4W9jA=S6a_W3qi% z@CLo5au1hPo1p8$lKa{IypD+GEa!B32zx_xWgo)QsfoA_Ps!e^cqfaANmpY4=N=Qh z0a%L$EVPAu?wtpw!tjKCDlj}Rl_|_t{+jO#ky>K)xd#@!d9nOBfgrQWc0H*##`<0;aGiyul$|J8=HTAyDdo0%s^X)`b+TqshTc=5 z+Wl@ys5)gvsZ}%EcCqEj)yKR=E$KAHL(Q`}?E8VLNWm)SnCpX^*m;PAAGlCFlQJSKQt{Og2WKxoAB(-l!7Lsx zqA3|GnWx$(-vV)FVnfVa^dL+ublTnFyS59{IRHths>9?mQ60J7)JTlreHjKGqih{NoKT+0vW;K%`SuyLY#-M#Hk!q@mM*A7L==yS{1bY1kVX4 zHoZ{lc7(tWNi{W5GmK2HxFae0)O#{NO%kZ z)OkI0E@Ne~WFF%rc%obm8?xhX+8<-^aQ66jlj1265hVz?Rp4f9^%4qN?1v(SRc@<` z5*p($>SrC(U)d@9uz}WUVAPc;DL!)L?x|Ho*dZuFfrEYYd(YeUM10q?FeQT*Tb*E zt#nRf%}9VtdomisJehFI!@B+E$s=ts|2(tjcfDK*v^EBlHY-|H^x7T0P{tIA$ol2Q zQ5UTEbN1Uf7yBHF$>_vdkG)>MszZD1uKk))NSX017M}6&xqNd)mJ&u*$K?PVt`8Lb zSVaE58+c#%oGfU=+pyDnReC6DQaon8U($%l;fFS zLlcHkmwV?Hn0+-t_#C9Zup{@m%n8=udN0ZLP5>ZY&aMiP9}%TK2&Q|oqmSIZKpLj? zx$2($I4=k&g#@_o6)hqMIEJT$kw*kq>;~Icc@&pEWz2gtoPD7gL_i^WUSq?GcnfWv z>;WHe%I$hVifbF_A44%2putf7Uiw%w&kaY!Nyxr}%q(VKb;+j{*=(1C1)B;X1isp$ zio)levqU6mUfXH3g3vZTc*GZz((_$k4r2qQybu5SY}+gJ+YFf^x@cpLDKahOz(Zmg z?HL-5<_?p}YkHP1wAo8v=?)}SPDtRmtW6_cBhZM>u!n*(C)x5DXkqzii}^Fs)pE%R z^eL{7Yh)7C4MO}Yg$gxAr%QAiO$214-5HjOh6;y^KtV>S^@Rb|3LUuFOo?o^!-8kB zwfuXh`a#Nu#->Uq{K|UyA^RQxXGhBp3UGo3-3VLMPK*%F5dasIr*q-EG-RB?rVJzG zYWYoPC-c?fKB28B<;dFkyp-~;dNwfZImGfY@H+H7z}h;?rQ0LtvH40Kt$Za6Icp3_ zJr)sN+D!2nvK~jK2zoR~RV)A;a1w`UIPL-iY>nT>+j==n?hYzQe>o4DV@Y^Jx9pcj_8Gu zxgUO(VX=!YWMW9(afjB6BfPKpIqJgzPzt15Z%o%P6twj_-(^G+ha6(xpS>S)K*MCU zXHJu_hsHa4rOB#IMExK<*najr)jX-Bg)${mtyDKhO6CN0ETX!I;A7uc(9|Fd<#O6I zwVoikz2>BSlEg>%N>j#|c-B4{jjE&+~ta(mz$zlKs9T%Q7vo3&Du z1JIAj%WW_~;9Q}gU zR{QLWEvV;*qZW8D28p#5*)$_UH#5#tF|R5k=W=Qwo_#s5@3mH}du|HOSKpjH6h1}@ z%*5|&BcJQ>s5HyMFORMzLBBC{B1ces%IT=#4p{-?^Zaxlf2q9(E^~G{7q>k}$0c|c zTr+Va1)vj3Ppt15Gpg`BqrPUJ+TqINK!Z)q0|mQ z&9Iu4vWW+GQAd^t6E}&$x;`wjp8BaH>|}bMd6V91j(|PL5GqH0jJ?#LnuqJfA**1s zIG=)^!VD8xVO|e%wkqSi)plH}gi^O%vO!)2+X0=a>jw?|2wQdA}`PLntL zSsDifb-&EHf$CIXh1ve(t%yBa!(`K9$V11kNWv$@3?45N9^nk+Wgh%YKZ>JhFrG9i zEU22R6B_hDUObsjP&!v$$v#NYPu>aXZ>V^)R4W1%qK*+19RYKgbHa*b<{()M)?^7g+;*zi~e3#N(j3 zw(}~i?{f=K(`!56>~3{59w_lx&rERc2EuxJ5#|(WxpEVFpo8afFN&_ly+#ju+XFBg zuOe8RPsXMwb-hwyYOOGp;NGSQOx4pg{|zB^2LK;5GVjYL^G$b==zYCybh`ON1~H_v zsN1?ZEPUm8)??kun&f=>jQI{jAqrw;cfD^GJYjlUa58VPHXAxz(b_QgAUzcqVWTX} zbv{jN1%XlIUTG0&Pi_`39n6(k$JFXhAuU;8vj)T29@5YR#o0=8G3XOORJ%Zf36fms zT&U9AN!Qb5&yr~rmkZ2ktMs0R;pYp#5;ZVJDL5`_y=!4lcook&Im+tpRHuZIS*lYfz3?j!lm+Ri9XeH#eWp0eS6@pTGDh?CX5Jx z*ns%p(fs3-iz}g^1*d*%^YP@tav8YD+xT4usH|tb;(F@c_Q&;XryTOV3x?m%q{xvM zC;!ny(sKD?*Q6k+WQ^v zH->ltv6q;ORx?s&F(iL18?ldXA7ra8ZEp)mly3{DEfG-UzW61}hsdVct#=X!&VY#5 zFz@|^WiY?OsQ2^T4(AJQ`|1fc3b7!3r{iL?DIsH7I3TC=sRp`q1mfu&@{RLyR7Y<= zKmFrZQT{42>6yZ4waI%y_RTrP>Fa?2oclBXu(wA`T+4W2d5+42+i5m)@}~0M`KkKj z-*%5(S2XX-^?aq=hI&O#@5+`K@!d1?m3I9$&FH&;AYF(L`C=F@73w@?5Y$D7suuVI zK!^J~ujrDV(MDO#)(m14#b!TXPTZX$$h!U*M>d5Zh1@jvX->*veWAKYrji zB1%j5Vkn2ATKtcXB+t{Y$wab{I==|VDP?jR+U1fmX|B4dF@3vbJV9>L4Y2Bz1QZNa zqIL9fpC+5im1S5F$bGvzfc1LQt^3pc^6=W$oH7|F_f#LG8_!Y3@Cl3MCa)F0L1~!# zpLNnjKa#IRAAgdTg(ruShut(R1h?W}ixk_f8ketpo%w3|E3d=uH(XXnjj_a!iW-BA z(#|9AmlOJIy^qPkNU)xd^|dy;w~=J)9Bq5GLeg3Wq|6@ZI#*K=djXej^$du4n~a2S z{x(^{P_7r5$y)tpN+pS|Yddris-X`kjVvO?4uDoKRDzu>Gpq8M9t&JkwwK3(@n^ZP zq5mL@%sH0btY`Bcs0X}E{~B!c;lv|sWbQZ$?Pjz(TTfo54%ls_2Q|oUNKkrB++)?< zMH_DzPj;T^#RydpMmQD%gW26ctogTN697J?_3r4F(-}*-UXs%0)5K)=oqT6txoHC2 z%kH%n{sEU{CSK3`zNLiW8^~Py%Is>Ib$l>VaUaTxAn9*YTvb!*Ev)`H^dfN=dD(YRX?8uoy9 z%vcW$T)3K2OK^$Jn-0LM&$?AMC!OcosWXLLiyj7Y^e6RukhJ?fOX-M<1IMjyTsDZCC9tN zBss@L4gUzcrl|(Y5nvj6cgNSsn*}O_L0$OcZlI%WzG1Bzt6--T#Fr_)Qocd=_iQ?w z7h^K4PVixYV_E*wDMLhdoO&EcKKRDe@2y3pn2$zQ@)iMu7E*f+2VU%osqM(!YwP7w z?ty~!4%okO#Yi?#d7x@D#{c!15WxBsHMOr5wHyau=MtOI3##Bdi7lBZ(U}dCinnQ9 z+~;7tv}{%O<>QsyJ9`4p1l%Y{<38h@Rj9f_vpZD!L9PTPfabt=XRAe%O)(0I+xEwE z75ezR4Rf45DFA`;_m}OOpZr^|?`1iezJE(aV1|Gwcbkc7lLtwart7ysQqF+Ep`Og}vXy zy~kX3hx*mjShkGeY@te_$qh{WD%2Y{J8M1^ZxVd3X%P;m{ol-sugG&hX*@}v%cGvt z@6-|Mv5YE3Kz#=)*5XkL`~i5OtfJ}U>@~)6Q@!1HH~H zX-pRIn+%WwUmFKKSX}%DI>!6&;rP8EQm-%y_BvkQPu!~e8L}K=L?8GsrH-$!e~W-N~V~8E#QhX)?k{|MBOffUSd}k zA4C{lb2vZOsYp8Z<#Fm&RlKfyFCZ7jy)y^=P?doDV`50~Z0`M@`w_q6 zXsX8}=N?GpvWzQZU4#p6UU`AuqPJoj2;cQ7P z@UmilC9<9IQnv)BsnFR`zZS zfD@*&<8>{hyT5Q-Cz7)$#3KM2GVe}a<}Q%YoguKC7TD9;xfNKyQm%Y8;GEcO z_TF79`g2&KVv%eUNlHLL1XUQzTpMF_+}2}rda$Z50Wrlgna`<$Q2zO&n+KKo|v8AioV8 zW1bejzfJi)eD7ls14=VczShYhrMf)91S$XW)c?$*-wILQgl`p{WIYlFv5pWteWx)9 zEV-)wHnZ89V)DWg=e?O{PL$8_Z9?}x6-^Xrj5puXQlPMnwSqJT{P7P^`L;RJ0Dy|t zFK?^&g&t>cdPqe&*Gmce*TOpD(Y}RijV=7xi+^H4h-TWG8)KinfE}}2SlVL$UFe1( z^73N;NQ9CZByVh;XEj~X^>o~;zecwPI-VTA_iJHa)07#$FwixXKmUBhX=kFm#S7le zay)C>>I8AOc&1_wY{c0n=RM8Z0T+ZaxQ+YwJo&8vi}_hr&!_u_z_lzgcMS_=ZJBCt z+gnNqzsCFUYtdh(ZN-mh*NDpejSb}|5EqM1!vvk| zm&nMaa)XYWE*C0=IY{-TRJu99kXsmk0NbbW6#JbB87%yy`y*B9H7I5+CJQv|yF)5? zoM+7GudS{ZJvXjhM1U{1;#y`;X!khTy5nJ}TBN?rE;6ACRLQ%5mdjk7LMNw;JzG*r z$#0IY`uTt#Q*I3XcYs1cbDp)Xtxw-SnMi}pv$kGVT<_RxD^y}dPHEi=POfsPu3)X~ znd34LzvjG!TO3;;o5#hg+ns-T+lit@S`6dzl^!vQlmTMpJ~J61Y_E1v{`o42p69av z=u91?q_M#(u}ZPsBo-(DyDGAXCmssASIsU|nkrZVX)MoOh=UIZRW1B?{#u^58T3(& z1;BZ)JIFZAmgsJS6)C)Tw}3@6z*$li@>xWA3?mD;vKA7wi_m$gzqSDYkz(T>70d4d z6sjSm*GZHhpuL~=F_qR$H**D?0pXFN0d1sRqb))RW|qWm90DNW{y=;R3krT`PGdf1 z?Ya@pdnavvH|FgOAQvv&voPfIztNc$D;l6nr8NV#u6_G>z#$C)y|6HPK_H07)Gpru z0~f>@?)CXq%T8|7#|PE}E;$O}EYT*vHlAMwNnX`0^Iqh>OKT+(0dGuyPNc4isnKpC zmy)qZqW#YA!VaR`3Y3@t9B1R{4B{Jg-&KsDa^9bt4&$FM(cxFY)frY{5%cnSAoF`) zp`zE2_{B1*b+b$kH;w^mwpI@Ac{r^$c0Qi?PTRN@aq7_T33j1h{XgkEAawXw$Zi=|zn zT>-9!Jzz^!uZHsv$=%zhXHtH3444|{u>r=5@}2@=2t-Ou?zxctnq>T1x=;h%9Uq`d znrmW~zPCN!Sj*472<>MT!TlMJ6ixgRI0pAy7p+!aR@OT_W#_N(cQJ`l|4Q-Bce)yE zVKvXrbAYdW$jg4FW%L#|)F@GNt#r}E38HCqse{0n+dUZ{JYC>N@Pdk-H6F`{8dYl= zeZ&YISh33KO^D4R+Xhb=$0tzOOTt#Cm2RV{0KNmHiP7iJEdt+EZy#Wi`27q%>%@xm zJnW0_1)(w1YrUiL1?>EfI|!#zJQ5KbJooNz7B-m%Q??pORjtB@_(!dya_a%8b&g!) zaymSyqb8$KSRnB%4u>vY*+ilG254!pqM!;+*8+)eGOEqG;{`;l6-W;(k<4^*B2z%# z(y?5toLQ0dS6W*X&AN4;o#qEGD^m1ekY}+9s5$EqWGxl1``PN6sk|RGEGXG z<17?0`U31BgA?pYqrA~{SA(pYZwPL<>^Vb zT{`&XL(5{~`z1L5tbGzxe;jtQJ>GLd+^bm*c)uHhy8yr7woAPFMx%EyVFl_>fcAqv zZFx`mF#@acd}RyF()hC0T=LU*%BuO>_h&MeO8vWV@J*F;y?LLJ7ISl*yn=v6Rb$+W zxqZ$bk)ePIKeYQ+v8Pab5vj!}vm5wNF9-FMF7>kx;tO)>ZkG1|4MpVdq_HL{YoM9{>GxDg zfva(YOs_+>!y%90pkIRWf-6*3GM@AkD5XSau%v-eH4bQ)gj1tCgAs!SfJjh6TIsm=n{ z#re2F7rB;x-bSMpgAN{fwmxpz0%8kYNV8wg=^v?>*S3`!=w3O70JTz8_sQcfRZ znr{Uqi|(MOOGqgYU!&zsK9I2j)<&${XL_$0jhZ=H9`O?G54 zL%Xj+WnY;3TpCs_e2;2x{u`V{Hv7U-v}(H}WJnal!IIkJZ0;?NJ4T?44?ZgP_Y>ap zs``sLe|`7Ds`9D2KB(14z)(wp6*+uGLzjYnVL{#UB7S!Dm~=`RV5Z!k}j}mR_O7=qZ+&Q~FX0Fuvjugt|V>KEyi6byky9uX{RR z!;p4ZFqhk3jERUlDuy||0yjVMeCvFyB?qkozPG9OLF;B^AqPh0q$V4jk2F%~=p_e6 zoY!degwmw3WiL4glOqr+Qnk>4XwWMYDqG+sW8bGu7L zl}xXmDE@OJp%IR}4{~oBCK2q;JvwBpyhVC@Px7GN)KT@YJz$VX84z8dDo^0NQ+L!% zrxm6$mL>dAEsQ^)Q>w_$Ki2xI5Aq|8-)8j5e!OA(LT267cJdLqdg4y&xUj z@T;cdp@ayWp&ON+Zf|w)3hidhkE{z-6U;MyEhT2YUqT#LQ zE(-v*v-49e+c5bx(#3C@53eaI)L6QoUz&!3!8ue}5#q%QVQy7*lQ9l8<38RVA6)Pp zx0=W^s|oD>uB6YWaskm>B?Qc>;fqL+)hu@;wBc( zKnexL-J!E50q`o_%vURG{=M+=W_%MqdV=(L&rhiW+Ox#^`1jse|&!uEKUhkD2)0FoW zBKVBFv{y46VB|JQV>W(H6~H8L+i;RUXXDcdZsjz7jLF~a?IF%=TlZ*26!iXm-xNrZ zybnErmjgzc-d%Sr9$kn|uYEMj+VcDv2hH=$^ZTY>5SN=Rbbn*mZ`cf^#Wa}Er?Dq# zEjuK7FX3Y?7-3Hl{d#dXy1x<3i6-LAYs@-!4n zMVr-B?OYvPy$T{dsy=8O-kozm6n18JXQ;AE*pGIGdF;hD#e)B*)qm%F&^Cjx;=I59 z(!SCgaaq`>aWi130nIqYtlJK+2T+GpZw3q9qKRJ%*0}TzgT~wB5$FZ|&3zeQaN4DH zYk|&3TNoHFcM6dz+cFptUQeDKO6R zrWSsB*kNMZOzcCUJuXNa-12Gsi zbRvyahzhaaILfPb>5NB_!K>siUYhE~&G#g8QOGVsFO;~q5HAq?)}BIl z^SWPoh`}7|C$7Me7D@QlZe?gJ&ZPACIliu_&S3;eX0MiJb%>ru14Jdq#oko6!(-0V z;Zhy+*WUX6!IuCZUQ=TOwajJ>Xg9=DIqN3RkC5`_d$;J72^G2$(A8Vjgzp^{k5)VH z4fszF7pl^NKgw%v(aS5h&Yo3s9jzG0#}*C8HDLh_(zr}vKNT~PY#hL>YT|P)c6s;Y ze1T&)W`H3a%<@bftod<4P1nTt@Bqsg=@^TIZfUUav7do#GS<1cR1PZXE2najAx3jd3XGO?-i$ zqUn$501IJP%F@<{$sJv_Cjg3_dDYt}vZT`ouYZpED|JQspoZD;xRy=VSdTiG)9tJ2 z_S!JuI(=6f97w(@~u-fd@_6l*1gks8EVeA+x)RFFL#@uX{Zr>#j}W&w&Cz7y?A6O;BwfFbpq$j zuX_QAq~8DXc0XZ3o+4@(p$kak=vG8HMJ8cM zp3;MwIk(&dI^F!&)O@1SQ>))58n2LMY#NoZxr{)!o{p=5XGBHjWH~`xk_~&(GwKF$ za9;53*&43#I+qqRwD_QhKT7}7MX4fn^{3e@X=MhG=Mt51PY1(=W_CZz3=ri370V#L zz=*7v=D9s|GWh=RObIe~L+Evydb_#cC<${b(rINpBr&T@uhq;4KA|*-&&QC6C^C$p zC_aW)3=5f%g?Er(48>NZ>sO@UHz7{9oOT?1Sz!+VW)-P&BEX~ughK~dIDZBNkNj1{ z8CUGqQLgZX8uKxY;`I#-Yyoz=))p{G{cyzjm03~I870%aJUHZHRzmr{v=0?7?#G9b zCjr`}Nf_D1Q=p?T>B6@hCULykb7pScN5p~gAwc84ZOi=w5zD}ReeOWMlRh)mQNtiI zzL(irJe4=_*~VQx$rj}`9#MLe&gUMubG}ZP@AEwc+_m`i!)dC{ANiIU`#yn2XvT9T z^dJjWDROK=?lg-scYuP$uUN0u+X>(|$;N_MFhb{l70~BsJahWa2(3>FoQ3OA*AZBg9|+Da3&A$y?_Bv7A9~P$mR0J}CxUcqZRY;!8FxrL%c^-aJEfe@cTpV6mkxd{hFcZQq60`pK@_Cjp z=HK~-CQZnAb7+)Tn&>;;X;8HuLL)V5uC=u$hM5fc!f(k5lHp}(EKE;AQMgn__fUmaq_v9X2 z%pa_jB4!qm_wf9g)Jt9x7<&>%p7QbPz4FdrEM*S+v{=F0asEl=F(sPTtB1rR?Da(- z1sK^(haOmx_Ig^7^m-QD|2jag$ZI9&BUuAQ*!{ZgL>dbE=}iCbxI%}XWU=O`-M+s> zUS^R5bmD4+I;59&Hn(qQGPE|$$^@geCxR!`2rGu6Z%gQOYRk~SUA~HO=e2a$Vs3_| zl8Ho_wY3_GRs^Ha=@(sbj-pIU&z}X>b+D(Z_U(2RTK%*YPe(j)KD^@rDz%MjOPaQ6 z6b~57j|jI>q15Q`-g*u-jONHgl=r>#hE8sc2f8QP?#*nSPvS_%t4{3eA@AZzL$uvL zzm_5=>y}p#2gv(|4~+-hjr$E-LU@BDU(ZJ<({o8;N7AJTi4AYj6`k`jJJ2bbi#c0; zx~w*iL|VL-u7CDJEgE2m1yyCX=cykXb}@X}ld1kraMAW%fAhx4R9ghCRdWUCtf%}J zzwwwzu}uB`!=18?LS{{(y_ZaNMG4>dofy4b@r?aFue?q=MD5x5T$9OG6|5O$UW&R% z%VUsMQ0IQ-{N0pCRdxf5s56qH&kPU}T-KiWhS7efUJZVUBqhACl+98G8=?xwiAMoF zFw{5Vd`K?XKO9OzPiNfL>#HO%wX`oBoS zzfvu2n9J{32VQvkK?L}e`Vda`Mj0$%m+-$ytnAB#;g z+xhLXK{tky3&rM1LDLg1m`8uyHosZyf7uTzOFw^HNuS#Z#ts)h)}wgcccvA%#5NUYhwG_Bb}wIwD`4~R=DLwd?R@p(D(|A%7`(#gK#8EJH} zJ-$DT3@6G`9VH!4kgkDN6sK;_?f#+o|LX)KSrr8lB_Z>f*w{dSznII|l2IU%aR*#_ zpz38(38W4nx!4y1E%rDnwm^ToGxS>x1mN@5-`4B5`)hNGk^iX%vax`U_M8~9-9Hu! z-~xx-X83Yz`==mjvNu~>3n&sXbuN^*bsL?FFP0q2KBLVr0;;kdAah-uUb0(xfuH}) z$^YjGSKMRxcxxMIL0nkyJeG4g%CE?*&IITwo6j8p2@Wv(<3-mkqb)7P>gC_AMPA^J zA(gYtJP^J+FsMAS&~9|F|3$Z8!R z1Q}>k=$?fZ=XOjSCyf4cy@UT!27Yre|E{FSYLRSH zNr1MWrj!4=#RBw`6V&cZj_Zv9?lNn@+FcIF8Pg-_V#R=Z@5_z$UxgnZ7!hO!{%B!?66~?O3ruO8fgb8bQ3^fw-?>IrtVOj zH;0n<0Fz8Ppt3Y{0K)xd-~Y3=?h1Hid=HZV$$n2OVfB8gR_zd{TF3tp7`a z^|Mvx(H;fw01Fg5o>RQ`#&D{wp68MHkBuoyInsp;@z5#DR(9J3fh1msoWTT+o@)VU z!)1G>x-e!*Bu}MeBA>(ykBC%2y0%g2n~^5gkQFftyCL%+$t_ z|A{bDqv*gbur?~v2>5r|0MnMv?kR!5p-AKZVDGJ?s@%J_QAII8!2nT7w?PSrl;lDY zQM!>ugLFxEsvw|%O1HqGS#*~Qg0yslO1B`5z&CFbx6kvw-ydg;GsYR`pFI{s_F}F3 z{@pXKd0p2W3o`_kpz#XO8!qjSce+R(x5px35oYP#RbD4m0^@wS>Y8AFc6S*#uHDEq z{SrkqmsJYBtgaHO^ECbM$5W90n3xhxZ{KkBu3XZcwV(BZ(3R8aB?_5^z+EHIgs885 zAQXme^@Z{W5S?knd*7=V8!((GFkT#~H3>D|dU(L@_tP&KfDmgIjYku9z3pwH?ozv) zK1mpFR*@@~wqEd{!@J)FkABrN#E7RaU$5n#f`)u&uLC{5A9YDfoFhF_mZr2Ezc6Z0 zM#Z|~RGl4C6dV>#q?<2WjY?L9u$m_5bE*?Y*>~mhGIn5wS=<2zNE>2@MaUDgzYVkV z%efz$?)vTwKtW0xQZ+%;)IXp6oQ2^U?gLMmBYDJgJ%%LLC*u72gB^~qC(Nd7rmAh# zIm#~s&7*$~9DLRc4%b6^DFN(rHBw1O3H6eB7yE$-1E5i?qf!y=?>FBciv#X0$q4VK z?AphNC(|IvA7&LIGkZvCfxfPb`%)ntt&OI=Qodb(>Q1ly47f3o!GASQhf#JaDBj48K&W~p3wulA)437Xt7NpGXh~Q z!gUL~MC}OX62_p0G+85vrY(chtG?X5fy|4GDb=|8HnKbxGqWK*@#Oxbo)Ja!_vnUq zJ2OmBUnsXR?#;c^??0@NEJJGUcp~ig$wkr?@%#H{Ksez^YugXsN&Hs?ELnk_EV`^keQy`1O%M0Y@@=1$l_KU{m(g2-U+WqS z<{M4^5x{+h%)0x)Hz}vMMRxe+COe3^g0s`tzl@h4B6jO~qv=E=Rw2?ak&`dt>f_IR ze;+y(b#08=^|xw!wjO%3Q417nmT~T`RP2b~Uu#BOpe9>njz*dRks4he#E`=`4rx)d zCfB&HGkx{0s4Hurf(hUOq`MhWit*apiA87=pzG0i?fJ?8myLa1rw@Kh(A@5LJNa z0{&lB?52Y=*|89cbm2*36%(GwiIrgkohM~m@Dvk}KF|dwxL zlhka!Q`uG8O6u%A`nCmvcA8&TyjN&-lDNs0zKz-%#2)BJkWt?xQJ1cq-QgV}_!V}3 z{@eFT#BKIQjQZ7hXtE7Nf-YwP$*M%?09&#!6p*cabcLF4@eu8qL%bx5%chsi^A0FF zYPA0^(mHDG!#)VLHJUkgn<9Kb*cN56Iau$Gi*E!fCX+O-3Y8a0N6409I*EgTcqGw9*ePt6N=4YZynY?e_VmK zuSl^JNG>1UyMZ~F&8$^@7dSm>E<4+&@}sE#*Ln%Kh=%fWh-FhMFU*W*3Wt4tRMwem z$WAJ)(tmHB==GdBo)E88~ZJFd}AK0(V^t_xxo{~J`ziDKD+)%eP zTsk%?UaKito7eEv(l5UOzR6g2oMC&?N1X20`@pD77oxd!xK2aJ@sy}b65q%^e-s5j zCQv_<7PL%(T=#0eu74T8-GPGFL|ULs9;6i?CV*H01P^*^0T?8k0m8+eJJlhgY7HF7 zftwyszaEj37?w z*ZVRn7Qi@FR>4hU0g{}y5~{7J$MauT;@6M&ebXY` zd2uP!W9a=ocf8wO8EQkb62Zwc5BmPqf|WclUow8}j>~D_^^@j9MI*_yrg=~~(NAho z%oHwS?Ng&)%m}#0Zg}5L4KN|6O>80T9_CQZGm!h@Ho83BRpP`)l~&BRM#iI8`~2dZ zHXQi=>|FbD-ijkiC|hx`Wc&LNemgdA@MR4=mVQQ4#PkkKyU0tXkmksnn7-doTPT&) zbg&;v$5D?Zgh9DH(NbyL#nGXXtM^R*h=JHcm?%gDp)-IvIk%Px$3E-DnEE5pNHVIQ zyfNDSu`*Zhb6_0Q!$+b>PDm^d^?@eX{Z(3o0pm_YvQ+|B>7O_yegciCu* zF80DFenP=+6oQa`P5Sk;`BU{S>p!KPt^ZFQ9P41z zwPm|9_#Y___cHVdu0v#uztjG&tMKg${eS3Eq>!{V-v8r{s^>$gvH9Tvq0z@TkMa?U zen(0m^0amqS&ZlMd>L!Z0sqflP+!u#6n@y))Q?^bZgz z*bF-_YbcaYQ%5}6C0i7{_{yJLaM{|N<@W*N&VB`HYX)la0w8!QLV%hLkP*+)^5v`s zf7ju=s&=Eq4p6n9d`y%izefop5zS|f6tjp?Bp@?+0PXaGJ43*+{wABPhd#r9RF&XE z_h(0%8HXpED&MGmoe~s%onT#Xx)O?Vy)I>or}(lipm4|s)6^0t#Ee%g;#x~OrB%C> z^-7O!jql|9mk$Qa>lyOFIWrtubSsFyiz+B*fXR1t1UT23zekz%+o_ zM_rR|D7be*#k%LMF;eO%jr|9TIZHekES@X@RYe=l5NMq{!G`$Z)(>q-Va>rU0&VyI zxn#`bJ%soetLT7=N;5e1&Jj7H%+6!vwc3E&WM))s_g?hVQ-fO?jhOoWJR5JR5kUu( zG1pq4!|8>j7#5>cW^aXW9h4^%+Wu2+VikrIl@#Y9SoNPj1y?ErRROynLY3y|D}YS$ z?Vc9pQ}122VWY?e{dlBi@HPSvEW;70&c{_i=jwe_g}c_t@I?h>YqBdvd&?lzMU<>o zZ=_lEnQiUfQBc3kxVL~%CodIST$dvM)c0C!4Ix{RcXfpfKNpO7V*gz;!v%Oki5gT% zh`QWf^{5=0qw4gHJA_--6=^CZ`ATv(9t)RGm%RA=JH*5!;nF!BnanJLyo{XOQ6P+6TkLxe_NYZ`5N`9 z2Tq0+5+Y9-(zC6m<$>1kWOJhYH&#HDhv2iG$2(^R>l)w&dweT{J@xeb0M#lni_=Bw z)4@+~eZDdK1e_o0cXdfzV`ln&L=n38(;Y_u!V2l(5!QMWLEk&!`L3x0J7tXbF5CKfKdWLv71k>Dk6`#jub+>?DVpP}RXm|`lzXjkHmk=)PP^aT$5 zTogsSB&qPC9@H6i^-#Tr_hA!{I04c8D-`6jUz$-iOsjpSDCB^+nCxQBzlBS3rZtQQ z1DHDV0DuBLUer-~NI2sKq4f&li>s&`Il6)r*$i#kbV!juYYJr*hyzN_BjNx<9-*Pr z(U*k$SrMZ>fbgEb^1U{v;_u3LOY(eidEC7+8oS{p?s?0TPF*Q-^^6r?%?*djo$mNk z7Fa?^Y^RjJwfyec`B@n5dHFUCfiKc z4jmw*FoJ=dm6mX{-B{?H*IP(~y^vx|VGx>1DlV}#3;`%~YNNbh%&}H?Yh-$a?N@cf zzi^o79>4u$D@GjsB4aJqO4}|;i`Tf96)2jXL)wT65b(<)f}BeNju4wg(#IbRA|ntj z>c-b~zR#0i?4t|&o)M_%-N?=QOQ?=HJwcFatdY*qJCNkqZHApg(~k8ILiz>Wvd2@1 zIm^k!A@WqkphmE6y+hptknoH9wtuD1Z?P7zu0Ckg)!MqP%W2vaE3J|&VIFB}t#}5H z9c+|)py1a9m3CrZK12{w5P+m6GA9EdVSrTqdfE>6WKh=EcgeJ6ih>#E}pDbAMKfMmsoxr~Ku0%y+i8W&x;p+BnD{eze{Ul6FbWzU6lsnG;#BH7Y%d znCFNd%CC4-XHEAP>u8 zD47^#xEe-HK^h8)NCyxR^XrqAH4oFr{hD`=kcfPt?KoLe$`C=H8bYs+G#(bQ`-1_etEvKvW)l)ZJZjYLN@!_Uwqg|hjdzrj^}*~ZfFvft_X->*h)K{anervDqb{aL&G2&*wf&@o{4 zWRd==z5n$KR2)$5TMXacHJkX?AHe`+j1l+oPuk0`8~CecD1tdsB&iU3cccEtpQ90k zPf5%F$58c;UwHQv8ixLBt$(Z|`St!t)S`t?DY93S|JF-QdvluS@jfM}Jbd*hOaX z|9wA+G{P8i}`VoE}6pp*U{2zZV;2&s=h^VZ8KS=-j)&ED1M2AJ2s0kXf zl@G(+%vZTafY&13g>mJ75=|qO@c%BFzK$DD>GIX=o82}s;=gNMdoehm&>lBU=*ejf zkzmHBv#IjwVMo8ipvU?EphQvq)=Ifcv)m4_ zvP4E%uDsr5=e!>u!gIGw7GCDvu~=>B4~-m%RLpPMZW_;V=qsA#w$QZ!qize)>k|;x zy1{=o4Jv|1G1M@w1*8!HR{8`M;4p7b>?!|wM%*@mL^;L9D3hg5@)qZd=KrV*dRISpAQP0 zShugolTI+iQ;UV&L)5~Er+Ypi(Yoy_^XR_hDDPGa*qe}W@aWll!hOs`dHcVDcs(bD z1I!cCr_L^D7Ys-aWmGW%IuJvL$er?soOKc-q_63fCfB4lYZ!A=j0_sF0H?SijP;Jm zmKSw{@3nt88&CiJ+r#-67n2pqq*Fsvd_f?@Pveh*-i^ay@7w3Zdf za8$m)Urz_&?McF@Y!_Df^SVv$9oa@|G4zRsD&`WJ2!;*kwj6zZM1`sUn>i528_ z_lRzH^~d?r^|{)cOZa}lJ;{2LDl`R+emy`+S&0$#c9y%^=(<{l%c*Ar3x2JpG9 zw8oCB?*pGtu>s24RsZeIBtLO(jN&bMk{>W0Ze~}V{y1nM+;I%gHF-IqUQ9J5mh?jJ ztp^7%=*YlhNr&7z86OG7xsx-fmG=h)F6JawMsS|m*1J5{Q7=!D70ET1988nylG|nn zIKV~}Z+goa-MiC6Ta00o`FUUaEsVi!jQUX6fSG5WWU7gRv$Rkipdr)V-9nldZylq# z5wq6UFPIh}l(SA5_t~chir+UZq2J5-AGv{;@t7t)arrPI>g)S?3l9gL&8m|UqBE#m zS7#WE&OSUTo{6Xd3t|ZNek8`SFyKZDc>894f_cW49z@-*nep}-4jLiGTbU@2QEJK6 z)Qq%eBBQOZte(pc4HPELnm@^y?*(_`%m?WMAf>jO$*PUW!(4lE3mcbrtC`ObDCqrD zPU{##8dI19bQbODTSR?*Js&T)G^S!f28Exi@)AO_C zDz@{|`92-8D6>3ObodmNqzdlTWz~plOGk0-Hczr>o+oYJ;FPyKn)>zr2e*|<=S{^p z64lRbjdSIF@+FpH)5;ruTtm)=jqp66)GFj7>{BW(on$%vS0&VS3{8S!;oM4@j2;4*+ z9kQ=S^RCB-;@eYm+0)C0pE)F($shNguUC}gV_0xPU<@|eom$Fr$=G4(3cU%peadwS zj>6U^CwA(oL95`N{JyXvUv0x8>Lb0=XxI)CYsg|*Jz+9Ob8Udfy5Cvg>kX=dZb?4F zWb*HFQoi~<(`A~t6wyRDmt*BV{lOXtGx>fZ1?{f~9!fAF0(>zui5w!irJcNOCJCmT z{v91496fRxJ$rJ(#hQ1$8>o80PicdEY4fGA%UY>dzf*dUGwCmcRQG)E^8(r=?7=2N zVP9_(_@&oj&m7At#ym8);o zW5a8oX8a6pzoB6r?=<=~wYSObLM9Fv!W5ZGw-|NZ=xN96$unCo#JZcVgj{6+sc{D%7!RsuCUH|m#zA=_*rU0 zmT!z#$%n4eBLlNhw-mE46brUBZVf%BrQ!t(Spb3F67%MrW&+w&#e4)0pTw}i46|m{ zAww8#)x|1LC#E(0HWuJlJzN#LUv8&#$?x+f75_Q?O2BzD-+IB6pwj{# zM;54`8JFF!F6tzlX1=WDX3=!#KwL-dsFq*o)?%=fh!(DsA8PQjWlF-Sck|_j`SpxF zD$-gH^H(0bU$}fZG>TUM4tU(e1psbLf*F_s=)Vuan%rjDlhdVFbdazrhZxC z8XuT?=BvNPDf#40KZCUVJC_7^s7KK#ZC%md)$K?me<1#C`dJU*wS(-8Sb1^-GiE?$ zC?9sBO087tOVSGld&SMt)k=~^Bk6lO@%x(cobUQ+oWV=N&26=M7<=mZLQDX~Nvq_$ z4WnUskMQHr5H(iz>USxkV`&;+o%ahd)Qk?%u2_52lU@)eqDRqap|3Hs+|{_!&!RY9 zP{Gz`Gem>Aedwkq1}`YJ+;Cvt$HB5=IZeN_&@AcoF}EoON&je+*$MmXn8FjSrb)hr z48hV%oEe7Z1q06|E+x6UcXRanO9viCh9YkLxJ#@r-CL$+Hesl4L0M$8#&=h^x3zq3 zhYUBwhkm+WtQr~1Dzvou)=ay^xX11!gIPG;YDVvk&80R8rx>oT4GpCuzfreEg_t_r z_|;xRrf72k&UltU3P#gOKyFkFS}C_sC18edeRAo!aapD+cDd62Nd0Z~hL8~tB1>PV zqC1!MdRmnUgLOED$Qtd}G;*e~_1JyaaZ3fH)Hx}h5Pb3Ds+)Pj@;v!^z>NzO{)1*h zEL%i!8_VEhOHNKFWbzMHdgmZ^pTtnC{Re&vPW|Url_5bTWIg49c`I=seNFLp#fIuM zK9^u7BJsE8z#m>uz{icZ60f_SZd@(nFyV!Zi}|kg4ZrZd){4>Lp@^w7*q^2MPrl4CBKe%>ADOyLOt*Fz_*jMV;q{)qkn1-x29f!L2 znvXnYW1K6Tz#%)GM4TJ07q3S+%`s^+Jr-*gI)7nyes&!zkqPPff`pmPAhZpaShhXNHHkB&s22pL9B^w86%KF>yh-_TO%r zFi|&{&e}RQ5^nA@OE&x_Hbl0KYNi%fHUM2 z%u{EZIx^C)*Wgrphjx-Y9dr4KSe1-XmCWI)lr^cY6l$qYAJ_2-rHabCBy&hM>Xp3G#nr6_mfD4-EO()zEu(=w!luh# zdkx3TnJ#vDqq1TMuKVCyQ+H_|K^m}_C%6cAS*CovX5ggK*=P<^)t9_9s61gHQsFbS zJ+T(f8LRVl7f-QBpH~?BZ3D)y3iHXP>EMF1kr5ONgqROLq9LMZOp3JY;*wc>Tpyr-?)B@EYK?N(B@baK7&-?t4gg$%Vp19S5Y%{VGKn#OR? ztTzIA6u#pmYPm`GBSkIjSH+v;bRReK21gKRF;d6QUt<=f&x}xQFGi;)1)2!dygU`B z8x=aECuN7-*&J$!WT3lG-fBtHy}CIhbiDxe^;~EHx=+NEH`4it=y}N~oGw+v1cD>W z+pwspofj0{jQTFD64wm}?$PSej_cBe@PO94OHIg%=)-No+iGTy6yHr^ohFJUJ#)eG zw!bSiBW%Dz!aVF7@6XMg`1C~U!=q-=3)IC;krG^!7r24&jZnMi20uz*o5#9l0X!C+ zQQIm^x;N#j{6*ltL)^AF)b>slCn|np8;j@|s%-zR@prXyR&Go3O@g_FcOq_;p3FT) zCUW_*=E&DCjqQhH(b^{6>elbC>(mLQ6A*Q9hEE?J9JHgh8og^6Q-#yP@XjkFgoM&8 z(x|r~bb+(h40iyWjYSz^p^lzDh8eY;#L%GZ zlQPsTyRaH{Ms-9MWv!PItr#ZG%C;1)khfY*gvq@s5ewJjtoZgo*NMfzFf$?A_6!V6 z2Vj9+xy2g<+)bR9)cqvZKJGz3)qO=q>X_2Skzhzo3C33me_x zWyxOO&lpj+S!gQ^+_t*43@!x}FQc|wX0|t%MU7Zd6i>F7Ga6ThPtOt_1buvkOP;TLwfzxr746JliV|m{j!?H=9jGlT9uw#7IMHP^ z-jtjk8z_WM#|Q{118CwP&(o`BPXE30lJ$2*b0O=Jz<{`(bMA}k-buiV*U!_?QIShq z8p*$R;m0o5e_7Je6=su*;=cD|ubr%n+r)|m8}FJl1^W(l{2QxIn-%lR@iR0+%IuLC z*VcViKIn5K3ZEiR(D6qYOX=!Q8Kt|piw0d&E8S9rPp~&48T%*YLlJBwb7d3dzAK&Q3^lSg%uB7e9=0U`k*Y)lP1dt zHsp~kpRrxeBVk`sM&uF2ZNt7e)% zT5!q3IH8L-IhfWc4T4PG>Cy}(nI2I<(>)EPFX~*SU%>x;mospc+pgi^1YV8_dS+mJ zlI!?oMym8C!oWoK0A-w3wC?+u38CR3XHwi39a5Ut4Jo--?lO1@^UPa@1nVaaE8xrI zy%{uQx+wC*V8BWtK#j{Ol7hD(S3D_k6t96e_kwUn6Dtnq;Ur9%v#^p$ajod&)%IHT zjmFqau~BCN4#}n5H&4#!)DiadD;zbYjTpV@maMaBn?cgds`SaZKMq^yK!T}lO!i~eEr<83q46dNR1O{)+yAZOS z{s){|4YXBcuW)U>1M~65(`7q8c(**Qz}Di$enH$g0p3}qF6%pe_{(R$eVhRF2hd#R zz~;h@Jr<7V!%*Onh%ywh`BHBDdPmu5ps^+aHi}stdsXs3?q5Z4Gh{G<5t3H3xH3t zs5s-mb9yV4p?wG+Zz!mfcWbFF9pE{H-@lsKhZmfl?CWrUhIREy$R=eX(ejq?*Wdiy zcZP@}d23NV?gc4V`wS*HcAqokO)IV!QTPhv2dB)oC4EtRPcC{2lg0pxB1lB)ph2)& zlyarjCoOXBONngx&Q1$`atD1DPIZ>qW2_C5m>vkvO1M%=I*eI$T*d0k;Dc9#&w+F9y}Z#q#r)@jv*c|S*35L^h@pq#dhJ_w0!NgfVm2i7hDVu+q5Q`wdl38q<@;Wm$!EjlDU=T3mVB40NPd<&?U&oVrzhzgy9@@kU@>=_U28MTnAAnQJv2{_J7T)=asasyjD}Ztp(tcXNDkp+=|A#BdD#Va>O(-JYv6%a;%|k74$MtAnJl z!pWw;@Lr-}*mko^gE(JLKMQ6|=@R`w4LEI>mT9lNJ}C7Hy(|lq3)qS5S6ykkI+2o$ z3mu3M3S9v~n1H-tteL~%BW@0-#f@1S2@{^#$A(t-&I?@%j&?A)%H~;ZHFJ&%OE600 zZ`yWAG5VLBAchP#z&q~2c1CR$t99fDYq6c^v=HMFbVZ0*)=$3#h(ci?&cP-yDXUWC zyJyoYgwcBDdgGL?4pBa>af!ZvBVg(uXsU77k8jN^$-h4ros*xub&sx&)TxP6heTP@ zHo4U%>5D)xX-hD;n=HLZE|eGCF6^)i_m;Mdus0xj-!q*hv$R~2IH7P6 z*}H2ofz|?o)3kZF_Pj?a|MUVVJ#zM5{_0?I-;w$bnXl(OL%?RQhpp>jQL;csC{GU< zamKNdCX2+;uIi<#@oC+!BTQ6qA|T!_Y;sCDsi6b{bP*wX_{&+Zz)?4C#R{xQH;HCHc8z^!qxE*3EO|QX7c6 z#R(zawrj*SsiYTi^j&Vy{ts|8VB3SpKr1}>Dxt*9>pi)uy&^m!xKTdz!X;VtjBh*M znS`24O;h*9^5;4W(@nZ9J)g=U`}tVxy6St}3@-53`5G)~nkD@{?ti@Ew>kv_zDPA* z-(RlXyWarbH0c%$5mQ@y-oE^A#QF}!7~v6rRbcf3_S?72Qc&GnOz;7**|eHI<)Y_9 z!jumyKI?$CqyY;OF~~@4c-kL&YV&VB_3x+HA>t8EH;^le)CM7HlJeMQ z&I12C-)#7n1B~5~*I$-%(n;8(47V~VkRsq;!{YBhy3OFyperB_( zu=1r=5n~`;vx5d-<^yK{aCf?42Q?y$u?7W;;%u)JPZ?s*2nriSzB2oLE;U*_AQ^q` zS6=9{Bt;gy*KPZpCWPi*7Ro}ooRs5Wl=X9_#Rbfz?WeVR>zJc21cFVAIGNk zKIVEtE;;N|F92=A)l_>H-KM3!y5EDAf4%vSA31^k&Ck`=A`a{Z=Z#5f%78&lABGch z2)98=s4G>W`C$4<{Qld(Cr^XE#~Ef1ZP_UtAhc9NA^7CN*45t*^8cES`GLJjcZ#igfQhqgCq5|7ohT4UjUKs-GSmqgeAa$EUy&6Y)H~3N3A3y zAoGtl%1!vxUGqB?!hG&^eNOfDd`YE|Il6z{pzaH9Z=MZ^^(xQNqdvddhwNDoATH=L z;YKXoWyT)8GJ`>k%$TPaNSHQTjHc6pb}hSHx&7bI)yO5uNm}mqtdBrp(E^5F0kHKp z;-Z1Ir~(Q(ug?0gGmF9qk)n%sN&&lUp03vJ9Qw-u?l*Y%{gq@XKc>JR_WVVPpwGSbK1nexIvdd=QP%ld}&+vn~mtV*m=eshZw>G*e z^1-I=1Iw3 z@(B+a8yEr<>Qh8xTS|Kbn-x{hZLqR2tz%lQj}44c9IhUt*kbRqQpf>Cs!6ia-wOtUYi~C~p@Rj(xv;7N4azx3Nv<;I z{N*?H-(dW|3Jo`&Qz5*40J$RL2D|6jXAuzU^buYzQlib2Z$9tq==LVo$tXRs)o-5) z#sFcnC8h@Ci0aqP;4(s^{E%Qtx>1Ws8m=_rnyH1`VDq0-6qVopcKda;ovWK)qT{H{ z6r;l2Tfhtr?B|n}W;^PY`DAj=u*9x|+Z?%LW=~EtU?cwMU_@IBrI~*~Qds$YvY*_i z-;aAh+^bh^h^1$qG`>y?CH71v^v(^#h? zR39A9yKvK72@HJOpJ|wjv#yj3g-+k2KdV;IG}9s!XOJrVo)O%B~Vku?40;*@eGPu+Az_||6u^ZI=U zF4PQb8TW-PPwRq_bW6V<%GV=|Z?*qkV<3HF6CXEcdRCTJqc1ljCt${Pj4@IDbXv#- zOWv<{PKAzL{&yeuD`g9x`#fM%8)(16YS_id8OfL5$X?5NPP^u%XV`3^mihL@;E=}1 z@CbSQ8CzHT2}u?V?zdIHayNmiMQnHO+L6DqOVVB({8JYsvJ2I74XkFv(nxVyg@<9u zxK1Aa4K9OPD7kCx&)>SRuU(iW*77{UHIBgpAt?3+(*e)FNmX`?S6rMI_lI*x`0AdO zp_rBg2uDk&WJZ(S(bjz9nNY(e(Z{V0{nkq6+o){~@Q3*hNygwE@)sz^D?LPpvpceX zJ9_QM+}^h;CiW#sEl`s;v!$1IE|`x7>I_$hCch8}Y?JWIFEG<$E!&*anL7Op`9!kJ zdjXlqB6(Q(S1g1_@bN*=h0|@{RLZ^0s8+zpiOm8gkV!cwrzvoQz`-eDx`PEaV>cRf z2s7#;uz1oxNP+nd|Fy>NC*j8{{-ekA2H;U1a)~BuX3e*^GB@eVuU)vp4z^|*o{y48 zJ_7=pKfwN-doRn^U<1IOTJrWm7k98ZQj_4<~ zy$Mzm7q6)aP}_LQ@!Z0Ckdu?5x?NvmVofrV!Xu((>e@o-mm3To$nzD5NS!dM1O&eq zg6(k=J9!$GR3b*^`RilheXzYuAE-4<<`1To4=Tc%xKcGl^M>rXHwHF+K9unNO_ov1!eqvR&&6T7PO;6Tgr%4l%yY?t-R)2kPR zW5F7)r)>4orLA{hx?dj}5W#Wpwx!rZ&bh-u0jJ)BQZ~O!RVP@@1QbC@+H+YXECXb* z$b%?%+m)%%Vd#pra#^Ogu+x<8lJ&P6YnTv<#L^y0 z`=DgsllgjP7gqmxN2we`<_c2pXe&lQsk%5J>O{5#^(E#SB7$3~9Tux|wZ(ObptX>?&($DDzik^MzL$UYyv%yN z+Vj_X^BxCO!jpQ&n>_`_z#oBF;Wj%JkI4L$a3S zG>_8>@Kj88s!rlFZapvi>>fJhx!7-^17rUXZ}-Lp`4(6ipBF^HcFUJFoSi}R8{k8y z0{IV9v7hWmclaA9vk1mE#Qi@(GFUa{zBaM2(*Sr_h;d#cS3|;e^^Af&> zMsN1#2vOW=0{`u)jwB;+L; zEj()ViGiuI^pgjrqrY-WQ0Sz-mi;Pw$wchEND00>zJ{XY8IM{MB?4LnIFkg*jr`_}KcomRNvJPD7;CCcNiYL1EWVP`I*3uR9?-tt?yL8@HiZV?!rA40%99AI3rUVWCR z;qIJ?$>sXYtpqGvx#9V#`U3?6MYvW5MV42{Rj~cNX&BjVD|H>$eBka&=whxN<(?6Y~~D-O7w(Q*A>kN z?ec}wxAD>2p6f@B(~#_a%Z|7BIk<2uL9B5pu6giCmsIugGVAp_f(-F?6~XaUiy^KK zK?&7?VH++tq?jyq2W07Mq#fG}$ZXQYnl07?=gjVGCW?kW&!)!3xWLr9#BqmLdd%>K z>9OV0P6#Z1#B;Hti=+Xl}&4*k_MPr|jI#Xyvye)=TisR;K+vI!th^#0J()kWWn{x%QjGzRTBn&UC~NiY+O@IeM7g} z>ZWJPhkHVG!`|WyKfU$jemJ9^9mco+hS?-RzNC;BnG64; zV0k{cH|`5qP&;i|Y5FYB7glnW?BV(*K8exW8$5%l4y+o&oV6JXw0Hv=utA_!rUg%P z+L;Ek(2k@#)GwnQUetT$R*iCP^p6_$YIu^LxO!4qmjv$?y{*%;`s=4UrF^Ecuq3?* z_325U!=pyl_{pC^==BBsaL6nxh@ayVCsv zli=7Q7`Q}bjIMhUEC1!oK|*#5h8MHRiLnMRE(xxit85%~SiF-Xff57DTNK7RHe_}| zA9BU5SRyM&1r=i4N_?)FCGtxifdQZ5LgYWaJo^&~qEU0}9W7b^RDdiWF80+qcmbzA zr3~07MFatH6agg3gB3r(FQYnOS zZcUat>Og$bFv6?Oa5<_ZN)Q+|HkJCD-p-FXENg&2DXN?9;!J!&{xIhhE4H1?fIMB@ z%HBlZ3LEQtERe%KY+8^j+`dk&tV3Q&yKF#Gt>(tAPAVn^Pt;N$uewD1(+w#}MQzX} zvLl06KG@JRbmbY8(4 zB)>iVr&~|GhgSShmxAeCBSg$5PpTZeuG5wn?)3TkM>fNfe95f-7B9R7t;>^ZcqSoh zRzBS2u(yJHwZDq->pNA4&?0KXpX(%G*D~jJu+;WJM0{C7pzX5A5EIyRKH&XJVbFPt z?i@-C?!>+ThFaj104ClZ51o1saHAiBg`bFcN|GxV4e|!~@WM5|<^<`h}aauVNU{WT@)rzRgY~q5+?L=QpNLcAPlRxD{ubUN)QWrTFA$TRD1& z$ahA*$!x6*Yq29sLy{}((p6$U!7A8x?mpT4h~yW6WVMQN%Eo-vi90;NfOFZ!7(ABb zjpIq_TsTP`oLxiV5aPUqTEq!%Q;?wa*vViqPAr|)?WO)AeC~aU+?4Pc6WMs##|8ZI zQ$9vM()4o`6-7dFacsG2UDwMbK7N#b4GZQj?Hec z(mm_~su{H6@#yLFn}1>Ut58Hr#u#Dzst1+LJTgl! z>G;XK(VX&C%Bww*8CcR|e;H%~SG}SlYI4~8pCd{{(*S*}3MsT-%y>1r^kla% zw_gxf=xD47EeliqYEdcm;ubMu$(La+T5gg2fM{f@&_Z7~^x~_dnuYwK0AckBNd%0P zcbezrz44OJt6hwi!{YkBa}`t!PMWbmF~II?2$1$iw=~OmJ{HcJC=b@YoToMW^2RHr zp&c&EE8!eWXhB*w0-nszQ*lB<>4|wa^>4SkX_ofeY8i+a<3?6yQCK_)4WbWxT!-2| zjv=~Z^Ehbnr--vok!pqVI_ErX<4I*KelTXhlgaN)u0lu4w+V^4iz zu@nFB+YBK0;PTxNdhVIb2h%6xtmKz()K7fNJmkCRNl0i*BES*w2m>S6Pw%Ko@Z9 zpSN)(PGd#H>G!y7uh;5V|L8tqZXn~vB?NhzwhSI1#>~E#D5bgUIEe|~2x<;<@vmQA zhB({@i=AeHE}screVb%n7RuwN>Tr2)?u3j;HY+d+{x#5qV@l(>^i1fMU4T^#rHc`x zP;8IuUMVy|UX-;&%q4HcYXT64$6QojC*t&?+}kqBbUNdNSO+qqLtD)ry5{3loTLuh zG(VLmDgR0_^00V_e}BRzh3Fu*3A&tgrKMth|zLbp4 z7Tw4|Wd$ufMo*re-XYXZXtA=4!z^)LN@u<-h4VGx$*k4E7S`{iLU^3sz^YKHbRyE1 z_of^ZRc)(MVx+;KuK(%6am1LQ>-8d;(g%BY?DX7<>FnZzAL9-ZH5M2e5B-PnLQ1JC znk;qM{N1_Pa*>{`RT93Llj2t_o_gRB2eUK7z~HrH``(W*QK&CFJuC19-07WfM6O9P zgm)~c-|l&-EnJf%PDp%BfkGd%C3#F4wGc}%6Yap5LOE8k92AOH^E6+qO|GwWJemPQ zvB|{7Ja6x@D5v~Tk#l!`lm=i>35z?IU?gI}*GDlYUfQ|YvUM`|{>K}^CTR}{g%eF_ z;Io4^?5}~h9Gd_7a1Es&`b6rNH~MYQ_1OYE1kxi2`MPIfePRR1C#mB)-{M`M?|9_? z#z2Lfsn1v!05_@nk&-##0|~=cM6U9gYY`R=c^AByys8PWQQ@)EFMCK^R<(H}3ydmU z0`kvy2K%d-wZ@O+qw`rokT7Do5rVd&KSa*OChPg6SK`aqBwOeYcW5_rWCQ6^LWF|A zs)QUn`N};dCNhz~=PYh?^f8GNu;PV_$BOTomPe?$@ain-s0ZWRtB&=duj^yz{erJY zx?fe`haFjh=;@~i#{6>dLrF>op4O=f%tkn*V!K>!w+18O_)DD z5UR!CPa894SOmK+bG>HHEAAbEG@V`_1`r+Td_5c6x|LiDQ_ZFh6$By)Q<|{9hD>orxj720X3l)Jnl69EAggsGE`fMDebTR@Q?fN_6gV9 zZ{t*jNhpH~GCu%q(@Hg|>6)>sRfinky9oV*Il9gHuxYjF)aASWca;;x$q~C;a_6;= zUUr}%qW>kD{*UL2irYHVlYVBCzu%ltAMOW4 zQAri_s+|GJ%_!(OLK+U+)1zHMQz?}|A%N}zN*^%eF>oG`(5!nlOk)D($~Iso0vzJ{ zPJErgblKbSkEY7yo+jAQYY@IO@U_u9aTN5s5`o!w7x04dnFM1V8`%aGb)6>f28fu< zqWO3Idc#%>)(dv*8$qrHmZ;zI4B0b4@)z&>-v47G`se#B{XL5Qu{AXXc}nt8Gp={V zCW+q= z;0by>;g_{Dxe?d@S+GW}(tt9%ZLNi0yC+wW-R|xHpHFCUK7>CR!V!VpUoDGcuqwR? zXp0{pX&2{|4I2#Tr|2pSfe-~_WF$-+(5(jOz_R zB)~k_pa7Bh<0~`({`pY>M;GDX1~za*1x_LOzUlGV;hcRR|Gs=TzXHTEmIUYn2<5C3 zXMGkE`}a2bKY!iD;Ae#sRSaz01LwfGFJ%G3c_6OY?A5v46XL)@yD&Ng_|eyYv3R2c z?qaYKqPI=#fjflw0z~df0HX)(j?^+#47_n%E&a!UIqa3$hwmboDt8u>!uku^6ZIT< zgqnhs>2(Rz70r72H;N271R|xJ5VEk2lgV2O@0=);3`@Ip5xD~5<;#1k?4r-@H7H}&i>hx z-^>G9)YxEg2I<*iF}^<=!k zZmF?!k*j(i&B7H@ct-$`;G_+s%yrioa)ckrSL+;J?-s)pP&aM>6fguFs^-$hEeXT= zRcY9ahnY-QOpgWQ55*gsT1}sO>(iK;e~#NzOQ$xXa+f&vc$2mbD|o8;&zcty#!u_#`?@mr|5=X}`ky+UIqXYoHdtM3 zeq3{&L#u(_005-02xLr!*Q5vD zAbg`HU^HdG8FwmIQCn0ng*|c&U3VEC*UqV1>GH@msWH@ zokomU-|_K+&QZtj=g3M}_jol|K2a6z*~*th?^NVLO6?*-clM`%$^|9DYFN4rt^g%H zRypf#&9@w*_8zFByc=v_$K5aP}KV^c0D$xnLwweXM$MvNO z;oX7hdO;V(%glb1klS4`G`!9S0zmN=R_L|=mCM^3P|MFAj3EfQbS2q(>T;9_NzGo` zn901{%-PrqI20c7P$=lec2H1{)$(6_M~}?0n>24P-rjgpV2iOHVrqQJCjf1j(GQqy zP%CmDjNDfBVALsV0|7uC^62maq%LnqFwTa50%d~jC8>6o5y%#gPbMABekPzGjND<^ z0sZ0$z(Y|$T5%mX1sv=nosFFjfE43hZ$H-vB%NwP-kqe+#{ocLz7zO8SU;b2%PRuJ zz!+rvc!Otj8h@^{`NoifZ3#!m%kRSU002&i!M@;yB>*hbwReEANVy&&kT zDO%#f=N!MVc8&wspFAuT$W2E9B~1?qjv=oXNc5Wr788N4xJmubYwe%)j6dG+1Gp!X zb;Xo`bQR#c_%5<0(P_6Ur|h6s0APk{bpHjA;*S7>ZC_*)U?ULC<;VjwwJsm2BKZe_ z2DC~BuBK`B3#mh~4XvTg$aP=8^hl3N5w3f-wF$C{Dj;(>(HwxN#Ner$z@p*%h zPz2e}vgO|-;Ga+PjuX-beU)qX`}OTEzrq#d5j`+@hJ%{Q{Za;T`KzQ)0xf;<{|5&{ zc*e>p8~15CP@DeEp#>5AJ;-$dLAelpoT2WIZ~pIpTD`i0WO>L7oQ;*9fuvvpSf<#5 z2pFv84OAx|Fj(sQ0i-S^p%d-DoF`x5brk7|Z`0o!4h;bjZ}mM$y}&kw$CQc`sVTUX zD}Xj`6xiN2O@PR)&1*1vsa^cr>BN#o8$QAtli{Z}sA zBODYfPhodp+Lfw=w0m#F*}=8 zJ1%#N_9XnuZmw!*oHy=Kv zR6}N$IJ(M9r`B6PGmbc8?wxY#S!t_QL}zdTh1ZV!%HZXirm?T0*<%VhV+4ik>l#ZGHT` zwSC2J@H+I85V8=k(kmcVJYe&h^KFxO`K54J$DM&>X}`c@t3qqtSx!4Nj`#kRQi9)3 z;=hM|PZm1Ne9mi+&}Ex|j_lcDIF~NChKd055U6^{k0Wk@oxev=Tr9*-iM}*e`|Vn$ z7lU5rrJ(20Q?=Ds=(2(Aq`=6k9N5nc0Y%(ex2wT8&40O{K+cbnJtL=E_so{>cq#g< z4`kM$>~_!fW&f|&ZTcp7LgeXG0!^vyt?2)HicKk%>lis(IseO51C9u-42X6zqKyA7 zl!F*$ou=mf*?AV8`({j_RLpwv-6sKEH3@(om|Z4?4@>)9fNKt@4zgu%fV3vm(|QdwIcAuv=aHvC z9#{VvV3YN!Y`HCeet>jpNMjvb!n^v@WgWbOFzR($iXvdAI)rgc91Ljj>05ZWFj|LTtjlR@Z+Mf`ZR%Y@yBs0&Ezpm;Flk9pwJ z9)e34&xZ5w)$((z{`1783qaWv1*m(Xx$&06dey-i(Xu%xu2_eRSAL~DONqiO7wG$& z_kKI?Snpi@fHLylvjWe-#BL>_vnBIRm{~#RuY%w26zg3wd0vYSg|Zilq0gC1G6)F} z(iv!Fs~iP(fPcp%%!xvh<`Un=^6U!=XaBF=CPiJFKgAC$S(TyFVsIZ|2q+1@2#Wt&@BpT<8@9c8M5F3eKV+ zv8z%{;ne}L^1Z`i$H;dQpVM6_H7qS8|79SD=WSA9*@A{gxbQV|4Z+Wl6{6t9snbw= z@gPdJ>CSSz5g;Mf42=BjYf)+-DR3Uy9fZ7`J3wNh2#s$+%)(r0bTrP zYzK4~N+Ak4&}hO+)EX4s1psJX8yB;#%VdzF;Lwb=mJ{waRH(%Qv62$D0(r;G)4WCx zEPg=!Gs@={viG@OD+svwa)2e3LbbdIQs;h=m&JYuiy_-$Fi_ro(DjaJ>&q$#V*!F- z$T6;Mj$FXMq)CXB*ZBjq|I^M3vyVe=b{Bwq6Fzt(+%eR>BJkRweO$g_+gVLnsP>2R zTiy<^+%|A6T|;9ZfUnxYjz@gETZdwz3JSdj1zFUnOBK*P;Jt9ESrmWfLFCTcZx!MD&3%J@r96xA#(S9K3Rnl#TrK*Nu{+h+ngL{(~vJUPm~ z@49g5-TaWNxTc?q$)?K%9stLTqx6}acthRKQre|;W7d2C%0&K7Zq5m`K){Ab?+#M) zf$1_w(fj}?D$f-B!Gc!Ym>t-dMxTcG_U9d0(67=vWd5@jfEHYSz6+snULKmMb-gg#qSZ*Bf6!JN0ikkfi~MzO$Xiv!Z^& zu=D)wxuzy?-}P2Sr2x|=rI{UwmW_(9|ETF?W*aOsU%15zV3I-=-y^PB*DOp)hH-bG z+eT=%Td5!b?mDSApBfi66VNs1m2{YbKvl&z=dOfQ1A53~RdJdjw@@AF3oM#IVzsv} zEEz$fqf%EidU6a=lQrFa3MJ|cJ3&j}NHdAe?8^g#EO&(qC^ZGUZ>xH?nsA)a#nFt@ zz4)dBKXVu39H#-7f^Wb#&K}q6DxJ4JNN>&WQjkSxFlB>)Ohip+m+7l}R`{->RQ2{} zBc&}*AjZ!X-T{YZYwl{IUHK`B@Bp@= z=Tcmt8*SgDFr%RfgPFnJ+?N9z$9zg7dgMQeO7u%>!CEz?|EGOL9*{9F;ltdqx3X1(_NH zYiuZUZPzuP)>1M+(HC^Z%H8*(4aEx6ys{tZb7~@sh{aLlZXxqJP$Ad;{5*8S+Z!UK zVM-RZkEfj<(9H2Z^j1U6v>o+E%T z1&$PZ#Jk7+y?SxsBYHSPbqU^AtBiX}iLivh&_+gYu!-6jTdd1Xp9TXApSQQA)$_!y zTHeLb(@*{YNc!zgm*}j5s>Z`>y4RbW)ugwt#RBGig)+c!z;jLkU-H0c+=Y~08J{1r zcm># z1h;^vEo)JU8*haEUVkBKwQh33NPdWJj}>1`(*1PLQp8CX#mCtf>k?_v?nWC8g)ypD zG9`y0#kduZn89j5neIm6nwf2GpYcq##=O3S5<{CD^O!&KHZP6WDE<9FK^#6{!3eor zj(|O0Bem{CV&3ON75R8e^azjSR3`y%5{Yngk8ta=HxVq2kw@nKg2^!_24}ZvQ@ZC{ zUYteqAmj&K4DiwjcfHtBjgZ}mmRWp`LubI&)46>QAPk>g-$3+xtOENdXtuL7Cga5U zD`S&@j78s23*pmZ<#bZx67)L_6L?>_!Z<7=E~0J{ok5!pZ@!9oPxsraW1?pOwlAbg zf{DJWdjSH~p^-6ex;wCpyKU6=$@@+@?|OE$DsRJ}$sE0`SY{^=e7Wk~8`uvgfpdKr zLsi*eIBgJiHz=VW$>wL6*4#n$5S!N`ZO(TwDwa4_g(EunQifpZ6qUUG7!piQxY9B# zRwJ%Z9||;NH}UgPvs8luGPgj<+u;4i*IAj4|Hjx`)neM`jw%u@58XB_|3Y0B1^w6B zSPva8v^xYLO`02wYFv!YpDY1RQ{G0GDLS@<*Yg+?Bc^x(boQ$NV*RC#AbE9V$t^M5 zC$dUe>C;2f6m#StmY5VZ!JcN-P&b(f9GJ(WDtFn9t+zMy`T{poAN=1V$ZOn7Tp!mbY2@a z!7tBFYRG(w;jvbfC?t1Im0{edR4&CecomWu66?IOYIioH>UeJtDg+1P&>&b|xOM)GlWeVJ=bN?ZTTDt;;1;T(d$%KCanm z=^j3uWZ1%S7WRw_H>OW(Dj2__R&xOM{xGnzr0*Ik7K9g23rVmE(G8?m@DD-8wrV$+ z%$)bkP3B2b%10nuF}=_1Yjqv);4If4{i=$46SrOiONUFrnaW8m^gK0x?X1|EVk)!g zb**Mu1KxqS-+5)hNBJ4&LGwa8$eN?~+JuW??Jc-Yv0HaV@%~M&*9%JLPn^PP3>POz zjB;Umu>5!T%Oec@EYB3K=e!qWYyCm6QY8%d{pbQ2(R1csCVu7c*%&s6s`>+_hX%{2 zcb0BSAW?g8USdZ#*t){P$M9RYm zV1xts+&n@lTP0Z1DqcCvU*fclmten&=`D z1?t0CdyN*$3V`UW0kzC=dN*Wtb9Jyb-vL}|aVz~+!@QI?6*xsaP|MT@^*FCRKn(%) zhx1&f>OR;jU(yqWonnyff7 zt(}#F0s}!@{B7L%s_a-sF-H#a06eSt;~)vRv6?IsLBN5B5^tDUGLN}(c4`fu$CQbs zQSAyl*V{2G?!pN!_XYftXq4+#QXZOFj@_c5I^o=G=3s zy4*nJyaEf|@<-ArL|ckAJ!J^4k-s&{d*oXagG-ta>42f9-Oi~AW8ss3jfwDVEt=lz0W6#U+d zk4axmV9++_zEMd1vcvmC)*=0o3qL`TE&Ys$t51M69SYJ&2(OKji$=_l)_wgk@67pl zy9aGA29k?>78fz|`vH1h82?;nli-m#TR!(SJw&4^P_nO7^SUj)C3 z(s>}|5DbR5F%(y~zQQE8b2k>3D2jG5VKx-4jp*mq?QwRJAf+k@NcQkH#_@i6Xj?5= zHltoV<}auhYsAwK&^PszWW7g@F9pV7IJr1vK1zPR6L|N9uX{yp&3dv`cPm z>Y!K)XHEy2M2SIHJJE_M+n1%=Z{e*q;6|4P*&4NB&5G{)8|EYpV_lXc>;GT15S#6)}cI(21WwdO}&Q|yHP z{`yI;SuOjRt7U}Jn6rbAhwNg^*JG7y|8_5+bD{=1E#8gA?Wr1FGVkMFA^ZuEjnKX6 zamCzKMPsIN%zZ)-)e7d`99jv{+}p^+y1@SnQ1fH+C#n0? z3Q(81HB=(L11X2l0R3(&2Ft|=8->BZs$ukD-l4(i=h#ff;Tk*IMBeQi zJ$I^^0UwZExe5%b-=#W?H+p&aY~_!X*`c@FADHTA#IU(sl}wwcwS$-Y4l1sjmH2N! zeFM6xB)5t|{Y6|i&&0^2nqT=S_|2D)C);|wPMDM;pGpiDl~kh@->HHOBk}@)DskFf z;qA$0MVFZRgOY!;h`)Vz^h~$N${RSk0PtD+j0b4{x12#4mJ=>?su~ zXxvFqdCH=uvT+mjp`2dKp@syVI`C|IizaDpC_Rw5jzLyUsGF>JRLnUeN>TzG~_ z|ADUioDkr)c~>I$WJC|R3b+-%`0P4RLF_+Hi7Vl{y2MNg93;Vn{i@oa-zFeZm1I8A*g(8}&NXFgbkEoYH<5EK*ox#OkAszC>(b z$qAw7zNDz!8-U2xg4H?hym79d&Hl;zPQajvd|=dJ1|-|T&iLqx3_Gex^DlI(>0LAS zFJML&8+x*VV=^t#5YPqGW&8kIdToJ(C%mvCB}7#sfc-vt<}_$$1q+)a{)#mdmsq`n zoV~nj;g0Ywgq6?AfhD9Oq0`;PPLC-pi)heN@YzaJO#r~25+}fTN*=_{#&;1dI8?0; zPA6uEninH6X7`=733x+>IC6W17bUju8~ChqDJ-~j8|&Bji6Mv@l!ik6pR5b{yR z1T@iE)x3`|ga^nYB4?Wr(?hF1M|>)}xD}b2lX$T}VFQ3>`ipZXwcii2gb#q;$_AnW z05^(b_nPy;n&#!VvOMji0>BdD(dp^EUz ztX%S0hS2W>#3g}5{p9*5fWMl!R*j2-$y7FS+j+j(FC&C}BeY?Vc#USMPNQ(K-(A74 z0hpT8n35{Ql>d_?0LwFBH7UovVUsCRzGnwT@wnP5x^J-LnVokez9<)(F84tltv(D- zaLb=0IbLt^`l|e(zT6c2=OHM!lm%DcI(dFD5?NW;BZYZoj>#b1cF`RJwlr!_4}ctz zaqIEYnfpe%TI6?Fttt`58SPln^c{;^%TPrzW|g2wzLDEv8gH<|w=v|ZttGM<#iBcg zkYj0BI?Ndp=ShJR(N&e}4#9MZKnJyEMRTe*Oxhtq2px%l6NLlLh*6TD!w}eJ`PXOj;PpB3< zkK*YOUw)HI?Lcl5M-3jTr%RR=m(Kx8K!hGw(1EjWFHgh4=ETsoRt=-hZ0jRX0J6jw z=2ctYi?`+8Xd<5vbnXU#VnPx#QeUuHIEBKOO_%+Rzz44CwETP7L~ho~5} zj$~>vQNty8?-Zqx9eu6v9*^U#9jqKN)@!lw645Li0R2q-ZgQ`U{5(YZcW^7Mz9^>L zDY%t+XGdTy{e`Nkrv>?hozU1~U&Z-rDYEq~TpRUOeb9;9IR}d{{$4;GrN*0%A=rKs z*H!tN^M%O9ur!+!~?11{w_8{GDZ=w`T0BW(RBwYuHBh5wg zDQ*Dm_4k(I=Cc#%*Z;|WkPUJI+i{6dw|sGxVZo>e5u ztSyuJrdJE~4Xlc5w@9>2j01lpaXVx}e76P)`4eqz%mD7*mZ{rjqm83nHbQ!Q9RkEv z7VdKR3Sv$7&UwyoDN`Ln5R)!aLwggwD)03Nb%{E7dpfF1C$=~RpWkNUD_0;aas3pqi!buFN@O$q7PTVp^Y5Dp{TV)!$yW#V zbp*ERch5TUfjH;^eoIm7Gu`uJh{!twq^uTx|+Tei0j2`vN6b+kcdR+f``tRQXqqUif}@`nAI ztewn=0gAZr&!6-Rv>h&I_yy*EA*q^2w^(h)JFR{Dg&4Cn0m=Hx>5VG$qAH}?0wVg9oa0;`19p|IE1$|78<(k;WHR@?qj17 zPL&~aPHlF}m#x&gKND3gtcH&=6!yK2DCH1r%w7JrbuYA&TtJF0ZGU<(1KzVBtfSL8 zR9q8dtwx58g>>o1FIO!6N8Omh>ZJ4iyA;U(m!I$;UzBypVS<8?->$yPS0ebG-4q#p zVjxDZ3fllch_XE2e#u6~?NRxPU~VFDQ762stcA>2%L$A@n#OeudQ`oCeH4oj6m1rh z-nTr0*;t_?qj{y;?5oC1kwD=xvYM}63B9R03Q~9|?tpxF=9}Bu#1d@mXR=s%Yz&&_ z2G@t5AG;K#?S%^SpkYN;6ciMYNaWx6`Qm=Juh)ZlhPQ)VkD|Q$D4*p-S&mU*26L#U zUgFJ$Gtj!62Ao|OzK5y8mGjE)U}`gf8c$rC>ek!o-v`J-d!b!%ccni{Z-Xc2x(|dH zYC_|&ty@j?7}XU|S_C@ZSTGBCyuou52RqH@ye3OQ`@vfmTy5t{;O=3UY+ISX`Z2!h z2$w;#8i?vTSvD0uhtX9Q4}#oA-#R=UQ}C$S3q+@4Aen>=74u_1M}G&?V@JqpW(wG^ z8o)D6IJ!f|3OiahSopqIXH9{<@r63Lz%?Yi67$#AY@;5=k8Kk0^gKd$H$c8|aFz|i zK*i{bvWGhTC(xca#@EBGDK6Jix9-nug6C6<#yrJC36^?IDEFktSd3zse?Rg4;%F4^ zkmx>N>yv*j&D*E`u#v)e{A;Q>oNulMVd!HTOHV`d0WPqfAn(VJnm5=q)CfjXg?hEl zmPK+kpzom%(0tQCcfEmstt2)*^%-Og{2jC(_7~;>3qV`RJb%*|dt$A`=M1j3@RFj@ z!KfI#KyWAVRBv9Ydv^nvnFfy-vwq7E3#`40$^YT0XNy1T_gF1M2QWWVS~*LiA;nEhgbT1Jo29SnfHMg5VX z83UJbTTojWcOM`VZANZ^;YQ1?7jB)MZPD>==A+fxV=zW14GsOjkgg9BK6RG<83lfZ zgD_$B9Z~1mC(#O?+O=Ryv;%Uzg4Q)=KzeKk7)N$r5?JkC+~oZ5KHTvH%rEb>S@6Sv z;_G=8a24IQJp{{oeD&cU`I(8AC1SPd4B4O=F~;mRG+e|V1GDl~!Sf@Nix<+6zmS&E zW3=c3?;!=Qnpg&{T1bP85g}ulwBHC)%^F~?=7!>0whbD=vs6SZmi+!qp3PuRyh1IX zpv;ORp^r88f$zKO|(@&cHDSA#5k1_K48A5Q@U&@-!1)^NWAt0B|SC6L51 zNUV7=c6Y^iC{O9on4MDlkzbYNFj_DW$zyWY&zpw7Xa<8ibmIdk%>_LmL#nr^IMP5W z@&iXaAV;Kn7EHj1rIwwB{iCRWXoNQV$(P52OJI!l9OQfYFE9o*Uq9QLtZ8|1ZA#0w zM|!?r43mjp3jk8=FaE;tXE%feWCts~nwVk=$a#GGFIFOzr0X4yNgbVzgX%hx!v5v2 z`!)Z(0%Y&`DhJ~;r0N9?g7ujnlw>=O?4Mxa;~&~i#nr9AxV|N8Xi&V$ZD{d+-~*9U zO@49g4Xg*}r~67QkFYKXBD<@xaqvDZvf6=Rq}OAu{N>f6rW9Is0puH{CBZti%W--L z?mkV;E<7dOgvUTEQE*@KW7>E_ke`kuk1K~GY0$Hvv;gg(N#G~>ZPxq5mW3HRt8Y4uP1ErlF%EXe#>IjmfTrK zTH;>+$nyE#Z5)DavxTdH1gn&oUL)UUqE2P?bWqU7Sf9=d3uTCYb+cYTg^6F%njWV_Eg*KZE+u%k3!sItdtW84}EMc!-PRWXCh6PUuA*{_n`3P>?FuvW0X zYNIiKmZcNQK7R`yFaTPz0x+h}SZeiB$idsFV-F18v!%l@QT5YW5xZIX`T;sxk_y$m z?rMic^k}32k;kpA;wu)DkhB5V+nb`6I4{8ODkngO;=3;f%q>b*Y|t7Q8w26%LwRuo zwl-LBv=Xr}%QmwB^U(I2PAGQ+p0sVa)-@kxM=Lrq9t5+tI01=O9fSi3VX;YUek@8~ zdxg|qMp>}bhc4|p8e3zU>q78%AY7<2sHvoOw+ASdnn4B?Q=CVRZC^xl$DbpFypnc( zxp;$KM^J=Y$dQ;**L6S6iHLOTlqfVw-7TG7H12YuQ98@C$DMGB{!EkFCr_KOTVJ&Z z4Gq(jS9nM&j`peC-{*VAQOv08s*X|q{nYiWweN>XG-Fw~YBZSlrJfTV1(JzS4}Y18;gr_nW-OGi zx3QE$@kwYU9k`9T3f0;YYh97b0Tj@J6Zs4L7PQC-npD@;GjO18`Ng2Z9w5+I8+bab zj{-DGufJTmJvQ}1`nY|;)~wz2XNb5}M>D@OFdglkUq_-<3aSWJK}n0YA8X+M<6 zW{!AvpDDG_RbcQrnfn^$_Dcbm4fW>$|0*8(Bdw`0V|CX31<8c7Hz5DkMDmPS>L z-thBDxkC9sdkIVAwpiEQ4t9U!){^Uu8X6k2J<;W0dQ#2@igH=mZML(03_lonOdS|H zZHkge)k~;1FLKuI5M**(kSt1sTYU8oLraJ3ukrhK>O=KX_h2XPZg4_YBKvaCWdU>i z;?-`3l7q(t3-Dfqoxhwx>!bM;LC2*o(WwPWq37TAStydE{j{b3H-YOWEN z`>1=0K3NXbZb2J#74BHn)g%1s5bA3mcPFAFq7=d$Vi+Yd24M`!i#|2Fz4w=i>MQSd zFB#&y(u)qV1Z11#Ly8Om;uHaYg~mVp_Cov1y_g2vZ#Y^P3rA}MS)_-2l14m-(@vxd zEI70?boNUKP4y_vmDj6J z%9qnzz9XSL=oReI>BpmiHl@y;QyS+RIQT&$%Xryxgyj2Sp^bYZO?zRHVL8i0_+n{l zZXk%=o_pxlPJn}(nPn-;EwQhGSiNZQP+ix`K!jeZ*Px79n2Hh$-_dOZU+e(~>tsRg zn4!mUWLp*ohJyVu7E`7%`$@1#{eUF#@jaE6Xwm>S0$`{^%wZLC$ZkfSfd!1p?Z^d@ zv6-#hdO)dRfru1DbZKE^JlHS@sbU|)vBFE9;C{IxwG$RHuta0WWoEjc`1F?Re8*S* zv@FX-%Yz1LBn+RH2E3*`t6XP}2pkVyJCGOXgU38UCbr>W!VH^h85^sxHa@j!=BvPh zm++w3&bU|Y3JPIndg zFWT8zznD2vl+oGQzp9uIHxUgXI4swA^U_h4K1=oRvskUFo+2FYXXO86d>qJSleuq%eCZB_Rq z*%sn;X7|KkXMU%hkM2Jp^@0P{rA6-hCi=m4D)r9SPWL&Ts>NNJuoO=Z$_S}I{juZ37=#3 zb9toOz>uLUS7$h9$D8k^*?UDkSgrTJ?iAb$yM_E<@vqS5xBKD+iE*ok_VTtmhf-a~ zWFW*R0#zkCVnVRZ8E+tz7{jM~L(0|BxjBJaP_H3V?|hvLo2@o&ywH`&DP`aR)&P5sXT-zO z)1V|!M!AU`QQsu1dJy#zJBay5+?`icxsuO~nhwB?-_kqwRoD{=T@#9NJxpRxJz$1#!p|v?paG-)i!s?&_|%V?WyXM6Hy%*>17m zp61X&I176Xqh!2V>Th~{UHql#cbPsw} zT(vQKI^yZxO>y0SCe(NYOsi>CP1PxoFvdiD z62KmHQJO~q{@e0kd^x@N9-FX56P^xsI>cjXf!g&4u2W_22)0BLPbtI!1>mdQbhSfckrO_NOwc>a&~2cw zegE{y@!ql>(51iJ-o?bTCKzbj_@ucq4Tk>we^DDV*DT%MZnuo4Lpx$=ICyhtCTT;h zj}aO|4cTgt81CV1_@2D>!2QTY#QSV>t^HB=-7T=SWOe6dycJJd6Vc0##{NL}^N-_q z4a;^>vDfj@%y-kYSrNJ|C>Bo1LEzM%c}io}A7u%-4&f}IF1o3WMG1#Hcz)qy?$}0c z-2c)pn#z|GF?TMGh?Nzlmhpbw0>S;X2cT}sVXf)zLk|r;cCYeRZM6-XWj*!gG-+ES-vP6?Kyp*BO$CLU z-yhp?mwP$QE`URvOB@ciR~dB6FW%;z_9Fsww?XqF?TT>FJx}CNPDdt2+D@0HU=Px! z51_e#IAvU^220KtpR8wsJgFb`&$hvt+8_1!o1lq9mE8heGzz2fE9p08Ks3GM&wc!k zc#G#rSCqeU5sT9uH+G3c2r-}R`@C-R%MZVl`g=D(|c#WboBEEd1y&sb-o3Vj5$>|a8k zv|&orXE%bXrz|v(fufk!61xSPTe>gP+j9kA(F}Fbg0cR2KrE7SJ#GyU9mf!TGhZr8 zOd)$WSIsQwn@A4_jfPcz)0o}Xvl;1HH8jL0|LC|yvT|&5GBL^8&HBlDc9O8f zIzXXq5OOl(rHOqpA9UnTS&j3st^R7!^h*6p%&k}4;mv(s!2}BA79$0*_%V=ctWRXF z!(GmO;G(rMMeLBsSy)i8XHSeJG5rg3|sbLSks$iNKGxFZ&#eDLkQ{0Hc48 zQHJgNHH?8LZlb+7EL=}K%dLY_`?^1x3^)u#ybO!fSu{T2u{D*ddSSSQnZ`?vCUlhB8}ajGfF2U4Wp| zL&u3F3ZJE$G0W#Y*zcvu@X_Xz+@C*I&*7X zXJSB1?B&GD>B_0eHe{|2u0BBiK`mG69Y12fV4i)UVpra<9x2a@$EQ^P{^-zn(S&X> zGllAjC*x{$!eI@LY7^rvef!bf^3_0xf?*~fF)gFR(+!#ERP+gz{Gxa6eMstcp9d^)3_^-@`8vHAR7>A+x~ z+k=PLk3%DH<*Ylei>X*uV=-j$*kErS)3uB44GMu~W3(Gl+X8z_!skF&@(-<88AB1T zbH^PRXd1spvoI)(mLAm{2!sjGvN9CTwe=+oU#wLvfB5Iij4=GKU&ac3>cYq89i4u7 zvB;ktsb8}S(4|ZtxE)0?W05S0i}QQ3{99Jzmn#7RbdsjWhShI>EsT;x=m)lQPTEIp zTG}0ryuIR$z9S0wvAe_`0f40-=OJhf$J5jdLY0g!TFh%O7&YU4h9A)cezdfZ=Vzi8;y!Wgf}c+^^bG+ z&nNiz-!an7ft%WO&x-UKJ>7yOXp?`td)Ud5Pq)I>?blX@R%%r*1wy}C><>*Dbf_YT zU?4cJ0V|xtMe57H)fs*(V)J<$O&L}FbNyfQT3m82e_0pm9hCd00`。`PPOTrainer` 接受 `N*sample_per_episode` 个训练样本进行训练。 -3. `relay` : 这种形式生成的总训练样本数也是动态的,但是不同于 `dynamic`,`relay` 可以从历史 episode 中读取 prompt 数据。 +`StreamDataset` 接收 `Env` rollout 产生的数据,并重组 batch 提供给 Alignment 训练模块 `Trainer`。目前我们支持三种形式的 `StreamDataset`: +1. `fixed` :这种形式生成的总训练样本数是由配置 `sample_per_episode` 指定的。`Env` 接收 `sample_per_episode` 个 prompts,生成 `sample_per_episode` 个训练样本。`Trainer` 接受 `sample_per_episode` 个训练样本进行训练。 +2. `dynamic` : 这种形式生成的总训练样本数是动态判断的。`Env` 接收 `sample_per_episode` 个 prompts,生成 `N*sample_per_episode` 个训练样本,这里 `N>0`。`Trainer` 接受 `N*sample_per_episode` 个训练样本进行训练。 YAML 配置 >>>>>>>>> .. code-block:: yaml - rlhf: - # one of ["fixed", "dynamic", "relay"] + runtime: + # one of ["fixed", "dynamic"] stream_data_loader_type: fixed - # max number of relay episodes, if `max_relay_episode` is set to -1, then relay all episodes - max_relay_episode = 1 + #: max number of relay episodes, if `max_relay_episode` is set to -1, then relay all episodes + #: if `max_relay_episode` is set to 0, then relay is disabled + max_relay_episode: int = 0 + #: relay after n episodes + relay_episode_offset: int = 0 .. csv-table:: :header: "参数名", "类型", "注释" - "stream_data_loader_type", "str", "指定类型,默认是 fixed,必须是以下三种类型之一,['fixed', 'dynamic', 'relay']" - "max_relay_episode", "int", "如果是 relay 类型,指定 relay 的最近的 max_relay_episode 个 episode,超过 max_relay_episode,会淘汰最老的 episode 数据。如果 max_relay_episode 设为 -1,则不会淘汰,记录每个 episode 的历史数据。" + "stream_data_loader_type", "str", "指定类型,默认是 fixed,必须是以下三种类型之一,['fixed', 'dynamic']" + "max_relay_episode", "int", "指定 relay 的最近的 max_relay_episode 个 episode,超过 max_relay_episode,会淘汰最老的 episode 数据。如果 max_relay_episode 设为 -1,则不会淘汰,记录每个 episode 的历史数据。如果 max_relay_episode 设为 0,则不会开启 relay。" + "relay_episode_offset", "int", "指定从第relay_episode_offset+1个episode开始relay,记录episode 的历史数据。默认为0。" diff --git a/docs/zh/api/config.rst b/docs/zh/api/config.rst index f148aa94..ce4b348d 100644 --- a/docs/zh/api/config.rst +++ b/docs/zh/api/config.rst @@ -1,46 +1,24 @@ Config ====== -RuntimeEnvConfig ----------------- - .. autoclass:: chatlearn.utils.arguments.RuntimeEnvConfig :members: :undoc-members: -RLHFConfig ----------- - -.. autoclass:: chatlearn.utils.arguments.RLHFConfig +.. autoclass:: chatlearn.utils.arguments.RuntimeConfig :members: :undoc-members: -Model Config ------------- - -ModelConfig ->>>>>>>>>>> - .. autoclass:: chatlearn.utils.arguments.ModelConfig :members: :undoc-members: -.. _batch-generation-config: - -BatchGenerationConfig ->>>>>>>>>>>>>>>>>>>>> - .. autoclass:: chatlearn.utils.arguments.BatchGenerationConfig :members: :undoc-members: -.. _lora-config: - -LoraConfig ->>>>>>>>>>>> - .. autoclass:: chatlearn.utils.arguments.LoraConfig :members: :undoc-members: diff --git a/docs/zh/api/engine.rst b/docs/zh/api/engine.rst index 05ee8314..6463ae66 100644 --- a/docs/zh/api/engine.rst +++ b/docs/zh/api/engine.rst @@ -1,6 +1,14 @@ Engine ====== +.. autoclass:: chatlearn.DPOEngine + :members: + :undoc-members: + +.. autoclass:: chatlearn.OnlineDPOEngine + :members: + :undoc-members: + .. autoclass:: chatlearn.RLHFEngine :members: :undoc-members: diff --git a/docs/zh/api/module.rst b/docs/zh/api/module.rst index f39b9589..fe21368f 100644 --- a/docs/zh/api/module.rst +++ b/docs/zh/api/module.rst @@ -1,16 +1,16 @@ RLHF Module =========== -.. autoclass:: chatlearn.models.rlhf_module.RLHFModule +.. autoclass:: chatlearn.models.base_module.BaseModule :members: :undoc-members: -.. autoclass:: chatlearn.models.torch_module.RLHFTorchModule +.. autoclass:: chatlearn.models.torch_module.TorchModule :members: :undoc-members: :show-inheritance: -.. autoclass:: chatlearn.models.megatron_module.RLHFMegatronModule +.. autoclass:: chatlearn.models.megatron_module.MegatronModule :members: :undoc-members: :show-inheritance: diff --git a/docs/zh/chatlearn.md b/docs/zh/chatlearn.md index 65145b53..86fb8994 100644 --- a/docs/zh/chatlearn.md +++ b/docs/zh/chatlearn.md @@ -1,6 +1,6 @@ # ChatLearn -ChatLearn 是一个灵活易用,支持大规模 RLHF 的高效训练框架。 +ChatLearn 是一个灵活、易用、高效的大规模 Alignment 训练框架。 ## 概述 @@ -10,30 +10,30 @@ ChatGPT 是由 OpenAI 开发的基于大型语言模型 (Large Language Model, L 不同于传统的深度学习训练,只涉及到单个模型的迭代和优化,以 RLHF 为代表的训练范式涉及到多个大模型的计算和数据交互,这对于构建构建一个易用、高效的训练系统提出了诸多的挑战。 1. **编程接口**: 如何设计一个通用且灵活的编程接口,让用户能专注于单个模型的建模,同时,又可以灵活地控制模型间的交互。 -2. **分布式加速引擎**: 随着模型规模的增大,用户会选择一些分布式计算和加速的 backend,比如 Megatron-LM、DeepSpeed 等,如何结合这些加速 backend 来实现高效的多模型计算框架。 +2. **分布式加速引擎**: 随着模型规模的增大,用户会选择一些分布式训练和推理加速的 backend,比如 Megatron-LM、DeepSpeed、vLLM 等,如何结合这些加速 backend 来实现高效的多模型计算框架。 3. **并行策略**: 多个模型可能各有各的计算特点,比如仅推理的模型和训练的模型在显存和计算上的特性都不同,每个模型最佳的并行策略也可能不同。因此,框架应该允许不同的模型配置不同的并行策略以发挥整体的最佳性能。 4. **资源分配**: 如何灵活地给多个模型分配资源来实现高效的并发调度和执行。 -为了解决上述问题,我们提出了一个新的 RLHF 模型训练框架 ChatLearn。ChatLearn 通过对模型计算逻辑的抽象,解耦了模型和计算 backend、分布式策略的绑定,提供灵活的资源调度机制,可以支持灵活的资源分配和并行调度策略。ChatLearn的优点总结如下: +为了解决上述问题,我们提出了一个新的 Alignment 训练框架 ChatLearn。ChatLearn 通过对模型计算逻辑的抽象,解耦了模型和计算 backend、分布式策略的绑定,提供灵活的资源调度机制,可以支持灵活的资源分配和并行调度策略。ChatLearn 的优点总结如下: 1. **易用的编程接口**: ChatLearn提供通用的编程抽象,用户只需要封装几个函数即可完成模型构造。用户只需要专注于单模型的编程,系统负责资源调度、数据流传输、控制流传输、分布式执行等。 -2. **多种分布式加速引擎**: 用户可以使用不同的计算 backend 进行模型建模,如 Megatron-LM、DeepSpeed 等。 -3. **Hybrid 并行策略**: ChatLearn 支持各种并行策略组合:Data Parallel/Tensor Parallel/Sequence Parallel/Pipeline Parallel/ZeRO 及其组合。 -4. **灵活的资源分配**: ChatLearn 支持灵活的资源调度机制,支持各模型的资源独占或复用,通过系统调度策略支持高效的串行/并行执行。 -5. **高性能**: 相较于当前的 SOTA 系统,ChatLearn 在 7B 到 30 B 规模提升 48%-82%。同时,ChatLearn 支持更大规模的 RLHF 训练 (175B Policy + 175B Reward)。 +2. **高可扩展的训练方式**: ChatLearn 提供 RLHF、DPO、OnlineDPO、GRPO 等 Alignment 训练,同时也支持用户自定义 model 的执行 flow,使定制化训练流程变得非常便捷。 +3. **多种分布式加速引擎**: 用户可以使用不同的计算 backend 进行模型建模,如 Megatron-LM、DeepSpeed、vLLM 等。用户也可以组合使用不同的 backend,如用 Megatron-LM 来进行加速训练,用 vLLM 来加速推理。 +4. **灵活的并行策略和资源分配**: ChatLearn 支持不同模型配置不同的并行策略,可以结合各模型计算、显存、通信的特点来制定不同的并行策略。同时 ChatLearn 支持灵活的资源调度机制,支持各模型的资源独占或复用,通过系统调度策略支持高效的串行/并行执行和高效的显存共享。 +5. **高性能**: 相较于当前的 SOTA 系统,ChatLearn 在 7B+7B (Policy+Reward) 规模性能提升52%,70B+70B 规模性能提升 137%。同时,ChatLearn 支持更大规模的 Alignment 训练,例如:300B+300B。 ## 技术架构 -![arch](../images/arch.jpg) +![arch](../images/arch.png) -**API:** 为了支持不同的计算 backend(例如 Megatron-LM、DeepSpeed 等),ChatLearn 抽象了一个通用的编程接口 `RLHFModule`,用户通过继承`RLHFModule`,实现基础的计算函数(如 `forward_step`、`train_step` 等),即可完成对不同计算 `backend` 的封装。同时,ChatLearn 通过 yaml 文件的形式为 RLHF 训练,以及不同的模型配置不同的超参数、并行策略等,来实现灵活的模型和并行策略配置。 +**API:** ChatLearn提供了RLHF、DPO、OnlineDPO、GRPO 等 Alignment 训练,同时也支持用户自定义 model 的执行 flow,来实现自定义的训练流程。同时ChatLearn提供Module的抽象,用户通过继承MegatronModule、DeepSpeedModule、VLLMModule 完成对不同计算backend的封装。ChatLearn 通过 yaml 文件的形式为 Alignment 训练,以及不同的模型配置不同的超参数、并行策略等,来实现灵活的模型和并行策略配置。 -**Scheduler:** 随着大语言模型的训练规模的增大(如 175B 规模的模型),单机已经无法容纳这么大的模型训练,需要跨机的分布式计算。ChatLearn 提出了 `DistActor` 的抽象,来表示分布式模型。`DistActor` 建立在 Ray actor 的状态管理和 worker 间的隔离性,可以很方便地管理不同模型的参数和状态。同时,`DistActor` 解决了 Ray actor 不能跨机的限制,以支持跨机的分布式模型。通过 `DistActor`,ChatLearn 可以支持任意规模的模型推理和训练。同时,ChatLearn Scheduler 通过划分集群 Resource Group 和调度策略,实现硬件感知的亲和性调度,即优先将同一个分布式模型调度到同一节点的 GPU 资源上。ChatLearn 也支持灵活的资源分配,即支持模型间的资源复用和资源独占,在给定资源数的情况下,实现训练效率的最大化。 +**Scheduler:** ChatLearn 提出了 DistActor 的抽象来支持模型的分布式训练或推理。DistActor 继承了 Ray actor 的状态管理和 worker 间的隔离性,同时突破了 Ray actor 不能跨机的限制。通过 DistActor,ChatLearn 可以支持任意规模的模型推理和训练。同时,ChatLearn Scheduler 通过划分集群 Resource Group 和调度策略,实现硬件感知的亲和性调度。ChatLearn 也支持灵活的资源分配,支持模型间的资源复用、独占或部分复用等策略,在给定资源数的情况下,实现训练效率的最大化。 -**Executor:** ChatLearn Executor 将 RLHF 训练流程划分为两个主要的模块,`Environment` 和 `Trainer`。`Environment` 负责推理模块模型和数据的并发执行和管理,`Trainer` 负责相应的训练模块。这两个模块负责模型数据流和控制流的管理。模型间的数据传递通过 Ray 的 object store 进行传输,模型间的参数传递通过 NCCL Collective OP 进行传输。 +**Executor:** ChatLearn Executor 将 Alignment 训练流程划分为三个主要的模块,`Environment`、 `Trainer`和 `Evaluator`。`Environment` 负责推理模块模型和数据的并发执行和管理,`Trainer` 负责相应的训练模块,`Evaluator` 负责模型效果评估。Executor 还负责数据传输、参数同步。 **Backend:** 得益于 ChatLearn 良好的编程接口抽象,用户通过简单的封装即可接入各种不同 backend 进行计算优化和算法优化。 -**Optimization**: ChatLearn 也支持各种显存优化和计算加速,通过开发 LoRA,大大减少了 optimizer states 的显存开销,用于提升 batch size 来提升整体的计算效率。ChatLearn 也在持续优化 Policy 模型的 batch generation 过程,通过 input sequence 排序等方式来减少部分 padding 无效计算,提升整体的性能。 +**Optimization:** ChatLearn 也支持各种计算、显存、通信优化,通过各种并行策略组合来加速训练,通过 paged attention 和 continuous batching 等来加速推理,通过 EMS(Efficient Memory Sharing) 技术来高效复用显存,减少总资源需求,通过分组广播技术来支持 Training 和 Inference 模型间高效参数同步,等等。 ## 快速开始 @@ -41,41 +41,26 @@ ChatGPT 是由 OpenAI 开发的基于大型语言模型 (Large Language Model, L 请参考 [文档](https://chatlearn.readthedocs.io/zh/latest/) 快速开始. 1. [环境和代码准备](installation.md) -2. [基于 LLaMA/LLaMA2 模型的端到端训练教程](tutorial/tutorial_llama2.md) -3. [基于 BLOOM 模型的端到端训练教程](tutorial/tutorial_bloom.md) +2. [基于 Llama/Llama2 模型的端到端训练教程](tutorial/tutorial_llama2.md) -## 支持的模型 - -当前 ChatLearn 框架支持任意规模的 GPT/LLaMA 模型 RLHF 训练。 - -| 模型类型 | -|:-----------------------------------------------------------------------------------------------------------------------------------------------------------------------------| -| GPT (GPT 系列各种规模的模型) | -| LLaMA (`lmsys/vicuna-13b-v1.3`, `decapoda-research/llama-7b-hf`, `decapoda-research/llama-13b-hf`, `decapoda-research/llama-30b-hf`, `decapoda-research/llama-65b-hf`, etc.) | -| LLaMA2 (`meta-llama/Llama-2-7b-hf`, `meta-llama/Llama-2-13b-hf`, `meta-llama/Llama-2-70b-hf`) | -| Baichuan (`baichuan-inc/Baichuan-7B`, `baichuan-inc/Baichuan-13B-Base`) | -| BLOOM (`bigscience/bloom-1b1`, `bigscience/bloom-7b1`, `bigscience/bloom`) ## 性能评估 -我们比较了不同参数量规模模型的 RLHF 训练吞吐量,我们采取 N+N 的模型配置,即 Policy 模型和 Reward 模型采用相同大小的参数量。测试基于 A800-80GB GPU 进行,单节点配置 8 卡 GPU,节点间采用 800Gb RDMA 互联。我们和 DeepSpeed-Chat 对比了从 7B 到 66B 的模型配置,关闭/开启 LoRA 后的性能对比,ChatLearn 在不同规模有 48% 到 82% 的加速,在更大的规模下,在 30B+30B,32GPUs 的配置下,不开启 LoRA 的情况下,DeepSpeed-chat 出现 OOM,在 66B+66B,32GPUs 的配置下,DeepSpeed-Chat 无论是否开启 LoRA 均会出现 OOM,ChatLearn 在相同机器规模下,可以支持更大的模型配置训练。在 seq_len=2048 时,DeepSpeed-Chat 出现了 kernel error。 - -![Compare PAI-ChatLearn with DeepSpeed-Chat](../images/gpt-perf-cmp.png) - -同时,我们评估了在更大规模以及不同 sequence length 配置下的性能。下图分别为 66B+66B,175B+175B 的 RLHF 训练性能。 +我们比较了不同参数量规模模型的 RLHF 训练吞吐量,我们采取 N+N 的模型配置,即 Policy 模型和 Reward 模型采用相同大小的参数量。我们和 DeepSpeed-Chat、OpenRLHF 对比了 7B 和 70B 的模型配置,在 8 GPUs 7B+7B 规模,有 115% 的加速,在 32 GPUs 70B+70B 规模,有 208% 的加速。规模越大,加速效果越明显。同时ChatLearn还能支持更大规模的 Alignment 训练,例如:300B+300B 规模。 -![PAI-ChatLearn 66B 175B](../images/gpt-perf-66-175.png) +![compare perf](../images/perf.png) -注:当前的性能 benchmark 均基于 GPT 系列模型。 +注:DeepSpeed-Chat和OpenRLHF性能已经优化过。 ## Roadmap ChatLearn 接下来会支持以下特性: -- 支持更多的模型; -- 支持 vLLM 等高效推理引擎; -- 接入 DeepSpeed 作为训练 backend; -- 自动并行策略调优; -- 支持更多的 RL 算法; +- [ ] 支持Megatron-Core格式模型; +- [ ] 支持MoE模型Alignment训练; +- [ ] 接入 DeepSpeed 作为训练 backend; +- [ ] 支持更多的模型; +- [ ] 性能优化; +- [ ] 支持更多的 Alignment 算法;

我们欢迎社区小伙伴参与进来合作开发。 @@ -84,3 +69,4 @@ ChatLearn 接下来会支持以下特性: 1. Megatron-LM: https://github.com/NVIDIA/Megatron-LM 2. DeepSpeed-Chat: https://github.com/microsoft/DeepSpeedExamples/tree/master/applications/DeepSpeed-Chat +3. OpenRLHF: https://github.com/OpenRLHF/OpenRLHF diff --git a/docs/zh/conf.py b/docs/zh/conf.py index 708af275..53d199d3 100644 --- a/docs/zh/conf.py +++ b/docs/zh/conf.py @@ -19,7 +19,7 @@ # -- Project information ----------------------------------------------------- project = u'ChatLearn' -copyright = u'2023, Alibaba Cloud' +copyright = u'2024, Alibaba Cloud' author = u'Alibaba Cloud' sys.path.insert(0, os.path.abspath("../../")) diff --git a/docs/zh/config_yaml.md b/docs/zh/config_yaml.md index db483517..b7324354 100644 --- a/docs/zh/config_yaml.md +++ b/docs/zh/config_yaml.md @@ -7,7 +7,7 @@ RLHF 的训练配置包括三部分 1. runtime_env: 运行环境配置 2. models: 模型配置。每一个模型都可以单独配置模型参数。通过`model_name`来区分不同的模型。这里`model_name`对应主文件中定义模型时传入的`model_name`。 -3. rlhf: RLHF 训练配置 +3. runtime: 训练配置 以下为一个训练配置的示例。具体的配置项含义可以参考 [Config API 文档](api/config.rst). @@ -31,42 +31,42 @@ runtime_env: models: policy: model_config_file: policy_inference.yaml - num_device: 8 + num_gpu: 8 trainable: False reference: model_config_file: reference.yaml - num_device: 8 + num_gpu: 8 trainable: False generation_batch_size: ${ref_generation_batch_size:4} reward: model_config_file: reward_inference.yaml - num_device: 8 + num_gpu: 8 trainable: False value: model_config_file: old_value_inference.yaml - num_device: 8 + num_gpu: 8 trainable: False ppo_policy: model_config_file: ppo_policy.yaml - num_device: 8 + num_gpu: 8 trainable: True ppo_value: model_config_file: ppo_value.yaml - num_device: ${num_device:16} + num_gpu: ${num_gpu} trainable: True -rlhf: +runtime: colocation: - policy,ppo_policy,reward,reference,value,ppo_value generation_batch_size: ${generation_batch_size:4} train_micro_batch_size: 2 train_global_batch_size: ${train_global_batch_size:512} - num_ppo_episode: 100 + num_episode: 100 sample_per_episode: ${sample_per_episode:1024} num_training_epoch: 1 save_episode_interval: ${save_episode_interval:100} diff --git a/docs/zh/faq.md b/docs/zh/faq.md index 4331cb13..bbe2d7b3 100644 --- a/docs/zh/faq.md +++ b/docs/zh/faq.md @@ -20,9 +20,9 @@ RuntimeError: Error(s) in loading state_dict for VocabParallelEmbedding: 参考 [配置文件](config_yaml.md)。 -## 如何开启 optimizer offload +## 如何开启 `Efficient memory sharing` 功能来减少显存开销 -参考文档 [offload](tutorial/offload.md)。 +参考文档 [Efficient memory sharing](tutorial/ems.md)。 ## Megatron 模型转换并行策略 @@ -67,6 +67,6 @@ python train_rlhf.py -c configs/llama2/rlhf.yaml 2>&1 | tee -a ${LOG_DIR}/log_${ Megatron在load_checkpoint的时候会检查lr是否变化,需要设置 Megatron 模型参数 `override_opt_param_scheduler` 为True 来绕开检查。 -## 如何指定ppo阶段模型保存的频率 +## 如何指定训练时模型保存的频率 -rlhf.yaml 里配置 `save_episode_interval`。 \ No newline at end of file +rlhf.yaml 里配置 `save_episode_interval`。 diff --git a/docs/zh/index.rst b/docs/zh/index.rst index 01add732..d8d20d2b 100644 --- a/docs/zh/index.rst +++ b/docs/zh/index.rst @@ -4,7 +4,7 @@ ChatLearn Documentation .. toctree:: :maxdepth: 1 - :caption: ChatLearn: 大规模 RLHF 高效训练框架 + :caption: ChatLearn: 大规模 Alignment 高效训练框架 chatlearn @@ -22,15 +22,16 @@ ChatLearn Documentation .. toctree:: :maxdepth: 1 - :caption: Tutorial + :caption: 使用教程 tutorial/data tutorial/run tutorial/tutorial_llama2 - tutorial/tutorial_bloom + tutorial/tutorial_qwen + tutorial/evaluator tutorial/continue_train tutorial/custom_model_flow - tutorial/offload + tutorial/ems tutorial/profile | @@ -38,22 +39,29 @@ ChatLearn Documentation .. toctree:: :maxdepth: 1 - :caption: 常见问题 + :caption: 编程接口 - faq + programming/rlhf + programming/dpo + programming/online_dpo + programming/vllm + config_yaml + advanced + +.. toctree:: + :maxdepth: 1 + :caption: API 文档 + + api/index | | - .. toctree:: :maxdepth: 1 - :caption: API Documentation + :caption: 常见问题 - programming - config_yaml - advanced - api/index + faq | | diff --git a/docs/zh/installation.md b/docs/zh/installation.md index 06f6b9a3..a2bcb9bc 100644 --- a/docs/zh/installation.md +++ b/docs/zh/installation.md @@ -2,11 +2,11 @@ 1. 镜像准备 -推荐参考 `https://github.com/alibaba/ChatLearn/tree/master/docker/ngc/Dockerfile.ngc23.09` 准备镜像。 -如果在 PAI DLC 环境上训练,推荐使用我们准备好的镜像: +可以参考 `https://github.com/alibaba/ChatLearn/tree/master/docker/torch/Dockerfile.torch2.3.0` 准备镜像。 +如果在 PAI DLC/DSW 环境上训练,推荐使用我们准备好的镜像: ```bash -registry.cn-wulanchabu.aliyuncs.com/pai-dlc/pytorch-training:2.1.0-gpu-py3.10-cu12.2-ngc23.09-ubuntu22.04 +registry.cn-wulanchabu.aliyuncs.com/pai-dlc/pytorch-training:2.4.0-gpu-py3.10-cu12.5-ngc24.06-ubuntu22.04 ``` 2. 代码准备: 用户需要下载 `ChatLearn` 框架代码。 @@ -16,9 +16,10 @@ registry.cn-wulanchabu.aliyuncs.com/pai-dlc/pytorch-training:2.1.0-gpu-py3.10-cu git clone https://github.com/alibaba/ChatLearn.git ``` -3. 如果您需要运行基于 Megatron-LM 框架的 RLHF 训练程序,您也需要下载 `Megatron-LM` 代码。 +3. 如果您需要运行基于 Megatron-LM 框架的 alignment 训练程序,您也需要下载 `Megatron-LM` 代码。 ``` # 下载 Megatron-LM git clone https://github.com/NVIDIA/Megatron-LM.git +git checkout 5161b1689 ``` diff --git a/docs/zh/programming/dpo.md b/docs/zh/programming/dpo.md new file mode 100644 index 00000000..7168ad59 --- /dev/null +++ b/docs/zh/programming/dpo.md @@ -0,0 +1,123 @@ +# DPO + +本章节将介绍 ChatLearn 的编程接口,我们会从主文件开始介绍如何构造 `DPOEngine`,然后再介绍如何编写模型。 + +## 训练主文件 +以下为用户的训练主文件的范例。 + +1. 调用`chatlearn.init()`初始化 dpo 的运行环境。 +2. 定义训练所需的模型。其中每个模型需要定义一个唯一的`model_name`。在配置模型参数的时候,不同模型的配置通过`model_name`来区分。详见[训练配置文件](../config_yaml)。 +3. 定义 engine [DPOEngine](../api/engine.rst)。 +4. 设置训练数据集。 +5. 调用`engine.learn`开启 DPO 的训练。 + + +```python +from examples.megatron.models import PolicyReference +from examples.megatron.models import PolicyTrainer + +import chatlearn +from chatlearn import DPOEngine + +# init +chatlearn.init() + +# define models +reference_model = PolicyReference("reference") +ppo_policy_model = PolicyTrainer("ppo_policy") + +# define engine +engine = DPOEngine(reference_model, + ppo_policy_model) + +# set dataset +train_prompts = ["test"] * 4096 +engine.set_dataset(train_prompts) + +# start dpo training +engine.learn() +``` + + +## 模型定义 + +用户的模型需要继承`BaseModule`或其子类,`TorchModule`为通用的 Torch 模型的封装,`MegatronModule`为 Megatron 模型的封装。如果用户的 DPO 建模是基于 Megatron-LM,可以直接继承`MegatronModule`完成模型的建模。以继承`MegatronModule`为例,下述两段代码展现了 reference 模型的建模和 policy trainer 模型建模的例子: +1. 对于 reference 模型,用户需要实现`setup`和`forward_step`方法。在`setup`中,完成模型的定义,参数初始化,全局参数定义等工作。在`forward_step`中,实现模型一次前向所需的逻辑。 +2. 对于 policy trainer 模型,用户需要实现`setup`和`train_step`方法。在`train_step`中,实现训练一个 step 所需的逻辑。 +3. 除此之外,PolicyReference 模型需要实现`build_dataset`方法,完成 prompt 数据集的构建。 + +更多 API 信息参考[RLHF Module API](../api/module.rst). + +```python +from chatlearn import MegatronModule + + +class PolicyReference(MegatronModule): + + def __init__(self, name): + """ + Args: + name: model name + """ + + def setup(self): + """ + 1. define model, self.model = xxx + 2. init global variables, etc. + 3. for training model, define optimizer, self.optimizer = xxx + 4. init model parameters + """ + pass + + def forward_step(self, data, iteration=0): + """ + Perform forward step for one batch + Args: + data: one batch for forward_step, type is dict + iteration: iteration id for current step + Returns: + k/v dict + """ + pass + + def build_dataset(self, train_prompts, is_eval=False): + """ + Build prompt dataset. The implementation of build_dataset is exclusive to PolicyInference, whereas other models are not required to adopt it. + + Args: + train_prompts: prompts provided by DPOEngine.set_dataset(train_prompts) + is_eval: eval mode + Returns: + torch.utils.data.Dataset with user-defined collate_fn (see `Dataset`) + """ + pass +``` + +```python +from chatlearn import MegatronModule + + +class PolicyTrainer(MegatronModule): + + def setup(self): + """ + 1. define model, self.model = xxx + 2. init global variables, etc. + 3. for training model, define optimizer, self.optimizer = xxx + 4. init model parameters + """ + pass + + def train_step(self, data, iteration): + """ + Perform train_step for one batch, including a list of micro-batches + Args: + data: one global batch for train_step, type is a list of dict, each dict is a micro-batch + iteration: iteration id for current step + """ + pass +``` + +## Dataset + +DPO Dataset定义和RLHF一致,可参考[RLHF Programming Dataset](rlhf.md#dataset)章节。 diff --git a/docs/zh/programming/online_dpo.md b/docs/zh/programming/online_dpo.md new file mode 100644 index 00000000..1f5dde08 --- /dev/null +++ b/docs/zh/programming/online_dpo.md @@ -0,0 +1,54 @@ +# OnlineDPO + +本章节将介绍 ChatLearn 的编程接口,我们会从主文件开始介绍如何构造 `OnlineDPOEngine`,然后再介绍如何编写模型。 + +## 训练主文件 +以下为用户的训练主文件的范例。 + +1. 调用`chatlearn.init()`初始化 online_dpo 的运行环境。 +2. 定义训练所需的模型。其中每个模型需要定义一个唯一的`model_name`。在配置模型参数的时候,不同模型的配置通过`model_name`来区分。详见[训练配置文件](../config_yaml)。 +3. 定义 engine [OnlineDPOEngine](../api/engine.rst)。 +4. 设置训练数据集。 +5. 调用`engine.learn`开启 OnlineDPO 的训练。 + + +```python +from examples.megatron.models import PolicyInference +from examples.megatron.models import PolicyReference +from examples.megatron.models import PolicyTrainer +from examples.megatron.models import RewardInference + +import chatlearn +from chatlearn import OnlineDPOEngine + +# init +chatlearn.init() + +# define models +policy_model = PolicyInference("policy") +reference_model = PolicyReference("reference") +reward_model = RewardInference("reward") +ppo_policy_model = PolicyTrainer("ppo_policy") + +# define engine +engine = OnlineDPOEngine(policy_model, + reference_model, + reward_model, + ppo_policy_model) + +# set dataset +train_prompts = ["test"] * 4096 +engine.set_dataset(train_prompts) + +# start online_dpo training +engine.learn() +``` + + +## 模型定义 + +OnlineDPO训练模型定义和RLHF一致,可参考[RLHF Programming模型定义](rlhf.md#模型定义)章节。 + +## Dataset定义 + +OnlineDPO Dataset定义和RLHF一致,可参考[RLHF Programming Dataset](rlhf.md#dataset)章节。 diff --git a/docs/zh/programming.md b/docs/zh/programming/rlhf.md similarity index 76% rename from docs/zh/programming.md rename to docs/zh/programming/rlhf.md index 040d90ea..6b7c75ad 100644 --- a/docs/zh/programming.md +++ b/docs/zh/programming/rlhf.md @@ -1,4 +1,4 @@ -# 编程接口 +# RLHF 本章节将介绍 ChatLearn 的编程接口,我们会从主文件开始介绍如何构造 `RLHFEngine`,然后再介绍如何编写模型。 @@ -6,19 +6,19 @@ 以下为用户的训练主文件的范例。 1. 调用`chatlearn.init()`初始化 rlhf 的运行环境。 -2. 定义训练所需的模型。其中每个模型需要定义一个唯一的`model_name`。在配置模型参数的时候,不同模型的配置通过`model_name`来区分。详见[训练配置文件](config_yaml)。 -3. 定义 engine [RLHFEngine](api/engine.rst)。 +2. 定义训练所需的模型。其中每个模型需要定义一个唯一的`model_name`。在配置模型参数的时候,不同模型的配置通过`model_name`来区分。详见[训练配置文件](../config_yaml)。 +3. 定义 engine [RLHFEngine](../api/engine.rst)。 4. 设置训练数据集。 5. 调用`engine.learn`开启 RLHF 的训练。 ```python -from models import PolicyInference -from models import PolicyReference -from models import PolicyTrainer -from models import RewardInference -from models import ValueInference -from models import ValueTrainer +from examples.megatron.models import PolicyInference +from examples.megatron.models import PolicyReference +from examples.megatron.models import PolicyTrainer +from examples.megatron.models import RewardInference +from examples.megatron.models import ValueInference +from examples.megatron.models import ValueTrainer import chatlearn from chatlearn import RLHFEngine @@ -53,19 +53,20 @@ engine.learn() ## 模型定义 -![image.png](../images/class.jpg) +![image.png](../../images/class.png) -用户的模型需要继承`RLHFModule`或其子类,`RLHFTorchModule`为通用的 Torch 模型的封装,`RLHFMegatronModule`为 Megatron 模型的封装。如果用户的 RLHF 建模是基于 Megatron-LM,可以直接继承`RLHFMegatronModule`完成模型的建模。以继承`RLHFMegatronModule`为例,下述两段代码展现了 inference 模型的建模和 training 模型建模的例子: +用户的模型需要继承`BaseModule`或其子类,`TorchModule`为通用的 Torch 模型的封装,`MegatronModule`为 Megatron 模型的封装,`DeepSpeedModule`为 DeepSpeed 模型的封装,`VLLMModule`为 vLLM 模型的封装。如果要使用`VLLMModule`来进行generation,可以参考:[vLLM generation](vllm.md)。如果用户的 RLHF 建模是基于 Megatron-LM,可以直接继承`MegatronModule`完成模型的建模。下述两段代码展现了 inference 模型的建模和 training 模型建模的例子: 1. 对于 inference 模型,用户需要实现`setup`和`forward_step`方法。在`setup`中,完成模型的定义,参数初始化,全局参数定义等工作。在`forward_step`中,实现模型一次前向所需的逻辑。 2. 对于 training 模型,用户需要实现`setup`和`train_step`方法。在`train_step`中,实现训练一个 step 所需的逻辑。 3. 除此之外,PolicyInference 模型需要实现`build_dataset`方法,完成 prompt 数据集的构建。 -更多 API 信息参考[RLHF Module API](api/module.rst). +更多 API 信息参考[RLHF Module API](../api/module.rst). ```python -from chatlearn import RLHFMegatronModule +from chatlearn import MegatronModule -class PolicyInference(RLHFMegatronModule): + +class PolicyInference(MegatronModule): def __init__(self, name): """ @@ -73,7 +74,6 @@ class PolicyInference(RLHFMegatronModule): name: model name """ - def setup(self): """ 1. define model, self.model = xxx @@ -82,7 +82,6 @@ class PolicyInference(RLHFMegatronModule): 4. init model parameters """ pass - def forward_step(self, data, iteration=0): """ @@ -95,13 +94,13 @@ class PolicyInference(RLHFMegatronModule): """ pass - - def build_dataset(self, train_prompts): + def build_dataset(self, train_prompts, is_eval=False): """ Build prompt dataset. The implementation of build_dataset is exclusive to PolicyInference, whereas other models are not required to adopt it. Args: train_prompts: prompts provided by RLHFEngine.set_dataset(train_prompts) + is_eval: eval mode Returns: torch.utils.data.Dataset with user-defined collate_fn (see `Dataset`) """ @@ -109,10 +108,10 @@ class PolicyInference(RLHFMegatronModule): ``` ```python -from chatlearn import RLHFMegatronModule +from chatlearn import MegatronModule -class PolicyTrainer(RLHFMegatronModule): +class PolicyTrainer(MegatronModule): def setup(self): """ @@ -123,13 +122,12 @@ class PolicyTrainer(RLHFMegatronModule): """ pass - - def train_step(self, data, train_info): + def train_step(self, data, iteration): """ Perform train_step for one batch, including a list of micro-batches Args: data: one global batch for train_step, type is a list of dict, each dict is a micro-batch - train_info: includes training information, e.g., "iteration" + iteration: iteration id for current step """ pass ``` diff --git a/docs/zh/programming/vllm.md b/docs/zh/programming/vllm.md new file mode 100644 index 00000000..22548ee0 --- /dev/null +++ b/docs/zh/programming/vllm.md @@ -0,0 +1,185 @@ +# vLLM + +ChatLearn中支持vLLM进行跨机分布式推理,支持vllm和training backend之间的参数自动同步,同时支持training和generation复用相同资源和显存。 + +目前,ChatLearn 中Policy generation模型使用vLLM backend来节约显存占用,加速大模型推理任务, + +## 模型定义 + +类似于继承`MegatronModule`实现[PolicyInference模型](../../../examples/megatron/models/old_policy_inference.py),PolicyInference模型若想基于vLLM后端完成generation,需要继承`VLLMModule`父类,实现以下关键模块: +- model_provider:模型定义函数。 +- setup:调用model_provider定义模型,可根据需要决定是否load_checkpoint等。 +- build_dataset:调用vLLM tokenizer处理数据,生成prompt dataset。 +- eval_forward:Evaluation任务中调用完成分布式推理。 +- forward_step:RLHF/OnlineDPO训练任务中调用完成分布式推理。 +- _add_request:将待处理数据作为vLLM scheduler输入,以供后续使用vLLM批调度数据功能,完成continuous batching generation; +- decode_internal:根据实际需要,将vLLM输出的generation结果解析为相应格式。 + +代码结构参考如下: + +```python +from chatlearn import VLLMModule +from chatlearn.utils.vllm_utils import get_model, print_rank_0 + + +class VLLMPolicyInference(VLLMModule): + """Policy vLLM Inference""" + + def setup(self): + pass + + def build_dataset(self, train_prompts, is_eval=False): + pass + + def model_provider(self): + """Build the model.""" + pass + + def eval_forward(self, data, iteration=0): + pass + + def _add_request(self, data): + pass + + def forward_step(self, data, iteration=0): + pass + + def decode_internal(self, batched_outputs): + pass +``` + +示例可参考[vllm_policy_inference.py](../../../examples/megatron/models/vllm_policy_inference.py),补充说明build_dataset、_add_request、forward_step、decode_internal如下: + +- build_dataset:调用tokenizer处理只需要返回prompt_ids、prompt str,其中build_dataset的[VLLMPromptPipeline](../../../examples/megatron/data/prompt_dataset.py#141)具体逻辑如下: +```python +class VLLMPromptPipeline(PromptPipeline): + def __init__(self, prompts: List[str], max_prompt_length: int, tokenizer=None): + + for p in prompts: + assert len(p) > 0, "Got empty prompt" + assert max_prompt_length > 0, \ + "Prompt length for RLHF/OnlineDPO/GRPO trainer must be an integer greater than 0" + + # tokenizer prompts of self-defined format + # only return prompt str and prompt ids + self.prompts = [(prompt, tokenizer.encode(prompt[:max_prompt_length])) for prompt in prompts] + self.prompts_ids = [] + for prompt, prompt_ids in self.prompts: + p = {"input_ids": prompt_ids, "prompt": prompt} + self.prompts_ids.extend([copy.deepcopy(p)]) + # set tokenizer + self.tokenizer = tokenizer + +class VLLMPolicyInference(VLLMModule): + def build_dataset(self, train_prompts, is_eval=False): + max_prompt_length = ( + self.model_args.get("seq_length") - self.model_args.get("max_new_tokens") + ) + # TODO: read from files + prompts_dataset = VLLMPromptPipeline( + train_prompts, max_prompt_length, self.tokenizer.tokenizer) + + return prompts_dataset +``` + +- _add_request:将处理好的(input_ids, prompt)数据对输入给vLLM scheduler +```python + def _add_request(self, data, is_eval=False): + return self._add_request_internal(data["prompt"], data["input_ids"], is_eval=is_eval) +``` + +- forward_step:参数中data为vLLM scheduler调度的批数据,格式固定,调用vLLM的execute_step完成分布式推理 + +```python + def _forward_step(self, data, iteration, eval_mode): + assert iteration >= 0 + assert isinstance(eval_mode, bool) + seq_group_metadata_list = data["seq_group_metadata_list"] + blocks_to_swap_in = data["blocks_to_swap_in"] + blocks_to_swap_out = data["blocks_to_swap_out"] + blocks_to_copy = data["blocks_to_copy"] + + outputs = self.execute_step( + seq_group_metadata_list, blocks_to_swap_in, blocks_to_swap_out, blocks_to_copy) + + return outputs + + def forward_step(self, data, iteration=0): + return self._forward_step(data, iteration, eval_mode=False) +``` + +- decode_internal:可参考[examples](../../../examples/megatron/models/vllm_policy_inference.py#L119)实现。参数batched_outputs格式为List[RequestOutput],其中[RequestOutput](https://github.com/vllm-project/vllm/blob/v0.5.1/vllm/outputs.py#L67)包含以下重要attributes: + +| 属性 |类型| 含义 | +|:------:|:-----:|:-----:| +| request_id | int| prompt request编号 | +| prompt | string| prompt token string | +| prompt_token_ids| List(int) |prompt ids list | +| prompt_logprobs | List(Dict(float)) |每个prompt token对应的logprob value | +| outputs | List(CompletionOutput)| 详见下表| + +其中vLLM CompletionOutput类包含属性: + +| 属性 |类型| 含义 | +|:------:|:-----:|:-----:| +| index | int | response编号,用以区分同一prompt的不同回答 | +| text | string | response token string | +| token_ids | List(int) | 生成的response token ids list | +| cumulative_logprob | float | 生成response的所有tokens的logprobs累计值求和 | +| logprobs | List(Dict(float)) | 生成的response中每个token对应的logprobs| + + +## 模型配置 + +可以直接修改 `rlhf.yaml`中policy模型的 `model_config_file` 配置,例如: + +```yaml +policy: + model_config_file: vllm_policy_inference.yaml + ... +``` +也可以参考示例 [llama2模型配置](../../../examples/megatron/configs/llama2/vllm_rlhf.yaml)。 + +## 超参配置 + +vLLM超参可分为五部分: +- sampling params:采样超参,具体含义如下表 + +| 属性 |类型| 含义 | +|:------:|:-----:|:-----:| +| n | int| 每个prompt输出response的个数 | +| ignore_eos | bool | 控制某个prompt在生成eos tokens时是否结束生成 | +| top_p | float | Float that controls the cumulative probability of the top tokens to consider | +| top_k | int |Integer that controls the number of top tokens to consider. Set to -1 to consider all tokens. | +| temperature | float | Float that controls the randomness of the sampling. Lower values make the model more deterministic, while higher values make the model more random. Zero means greedy sampling.| +| use_beam_search | bool | Whether to use beam search instead of sampling. | +| eval_temperature | float | 和temperature一样,但在Evaluation场景使用。| +| eval_top_k | int | 和top_k一样,但在Evaluation场景使用。| +| eval_top_p | float | 和top_p一样,但在Evaluation场景使用。| +| stop_token_list | string| stop token string, seperated by semicolon.| +| new_token_limit | bool | 是否限制生成tokens数| +| prompt_logprobs | int | Prompt token计算logprobs,默认为了节省显存,设置为None,即不进行logprobs计算| + + +- scheduler config:数据样本批调度配置超参 + +| 属性 |类型| 含义 | +|:------:|:-----:|:-----:| +| max_num_batched_tokens | int| 批数据的tokens数的上限,建议设置为batch_size*(max_seq_len-max_prompt_length) | +| max_paddings | int| 批数据中padding tokens数的上限 | + + +- cache config:生成vLLM cache blocks的配置,与显存/内存使用有关 + + +| 属性 |类型| 含义 | +|:------:|:-----:|:-----:| +| block_size | int | gpu blocks size,默认为16MB,可根据具体模型的activation size推导 | +| gpu_memory_utilization | float | 设置推理过程中所有进程的显存使用上限占比,范围(0, 1.0],在显存充足时,上限越高越好 | +| swap_space | int | 在GPU显存不足时,和CPU换入换出的内存大小,单位GB | +| sliding_window | int | 默认为None,vLLM暂不支持设置。 | + +- tokenizer:vLLM tokenizer读取目录,可参考[LLama2-7B-hf](https://huggingface.co/meta-llama/Llama-2-7b) +- 其他:includes指定模型结构等其余参数; + +可以参考 [vLLM超参配置](../../../examples/megatron/configs/llama2/vllm_policy_inference.yaml)。 diff --git a/docs/zh/tutorial/continue_train.md b/docs/zh/tutorial/continue_train.md index fe29c463..23e3e11a 100644 --- a/docs/zh/tutorial/continue_train.md +++ b/docs/zh/tutorial/continue_train.md @@ -1,11 +1,11 @@ # 续跑和容错 -RLHF 任务涉及到多模型的计算和交互,随着模型规模的增大和计算资源的增加,由于依赖的软件栈和硬件环境都有可能出现偶发异常,会导致任务停止运行。 +Alignment 任务涉及到多模型的计算和交互,随着模型规模的增大和计算资源的增加,由于依赖的软件栈和硬件环境都有可能出现偶发异常,会导致任务停止运行。 为了保障被中断的任务可以恢复状态进行自动续跑,ChatLearn提供了续跑的功能,结合 PAI-DLC 的 AIMaster,可以实现自动错误检测和续跑功能。 -## 配置 ChatLearn RLHF 续跑 +## 配置 ChatLearn 续跑 -RLHF 任务的续跑需要考虑以下几点: +任务的续跑需要考虑以下几点: 1. 数据进度的记录和恢复; 对于数据状态的记录,用户需要在训练配置主文件如 `rlhf.yaml` 中配置 `data_checkpoint_path`。 如果 `data_checkpoint_path` 不为空,则 ChatLearn 会记录当前的数据进度,并在每次 `save_checkpoint` 的同时存储 data checkpoint。 2. 训练状态比如 episode、iteration 等信息的恢复;当用户配置了 `data_checkpoint_path`,同时文件夹中存在对应的 data checkpoint,ChatLearn 会自动恢复训练状态到当前最新的checkpoint状态。 @@ -24,7 +24,7 @@ if self.resume_training: self.args.finetune = False ``` -更多详情可以参考 `examples/megatron/step3_rlhf/run_scripts/llama2/run_7b_7b.sh` 。 +更多详情可以参考 `examples/megatron/scripts/train_rlhf_llama.sh` 。 如果用户在程序中配置了 `data_checkpoint_path` ,但是不想打开续跑功能,则也可以通过配置 `enable_resume_training: False` 来关闭此功能。 diff --git a/docs/zh/tutorial/custom_model_flow.md b/docs/zh/tutorial/custom_model_flow.md index 446069bc..2a5fbef4 100644 --- a/docs/zh/tutorial/custom_model_flow.md +++ b/docs/zh/tutorial/custom_model_flow.md @@ -1,4 +1,4 @@ -# ChatLearn 支持自定义推理和训练流程 +# 自定义流程 如果用户想定制自定义的推理和训练模型流程,可以通过使用 `Engine` 类来实现自定义。 用户可以传入 environment (Environment), trainer (Trainer) 和 evaluator (Evaluator) 来初始化 Engine。(这些组件可以为None) @@ -10,8 +10,7 @@ ## 如何自定义模型流程 -Environment, Trainer 和 Evaluator 提供了 set_flow 方法,来定制模型的计算 flow。以下例子定义了 RLHF 中的environment flow。 -注意:当前模型调用的函数必须为 forward_step / train_step / 用户注册的 `eval_func_name` 。 +将自定义 flow 函数传入 Environment, Trainer 和 Evaluator 的构造方法,来定制模型的计算 flow。以下例子定义了 RLHF 中的environment flow。 模型的输入个数为1个或多个,输出个数为0个或1个。任何和模型无关的调用将会被忽略。 ```python @@ -34,14 +33,14 @@ def env_flow(batch): ```python from chatlearn import Engine, Environment, Trainer -from chatlearn import RLHFModule +from chatlearn import BaseModule + class CustomEngine(Engine): def __init__(self, - reference: RLHFModule, - policy_trainer: RLHFModule): - + reference: BaseModule, + policy_trainer: BaseModule): def env_flow(batch): ref_out = reference.forward_step(batch) return ref_out @@ -49,8 +48,8 @@ class CustomEngine(Engine): def trainer_flow(batch): policy_trainer.train_step(batch) - env = Environment([reference]).set_flow(env_flow) - trainer = Trainer([policy_trainer]).set_flow(trainer_flow) + env = Environment(env_flow) + trainer = Trainer(trainer_flow) super().__init__(env, trainer) ``` 在这个例子中,我们定义了2个模型的 CustomEngine,其中 environment 只有一个 reference 模型,trainer只有一个policy_trainer 模型。 @@ -70,7 +69,10 @@ engine.learn() ```python reference = PolicyReference("reference") ppo_policy = PolicyTrainer("policy_trainer") -evaluator = Evaluator([policy, reward]).set_dataset(val_prompts) +def eval_flow(batch): + r0 = reference.eval_step(batch) + return r0 +evaluator = Evaluator(eval_flow).set_dataset(val_prompts) engine = CustomEngine(reference, ppo_policy) \ .set_evaluator(evaluator) \ .set_dataset(train_prompts) @@ -100,17 +102,20 @@ def trainer_flow(batch): ppo_policy.train_step(batch) ppo_value.train_step(batch) -env = Environment([policy, value, reference, reward]) \ - .set_flow(env_flow) +def eval_flow(batch): + r0 = policy.eval_step(batch) + r1 = reward.eval_step(r0) + return r1 -trainer = Trainer([ppo_policy, ppo_value]) \ - .set_flow(trainer_flow) +env = Environment(env_flow) -evaluator = Evaluator([policy, reward]).set_dataset(val_prompts) +trainer = Trainer(trainer_flow) + +evaluator = Evaluator(eval_flow).set_dataset(val_prompts) engine = Engine(env, trainer, evaluator) \ .set_parameter_sync(ppo_policy, policy) \ .set_parameter_sync(ppo_value, value) \ .set_dataset(train_prompts) engine.learn() -``` \ No newline at end of file +``` diff --git a/docs/zh/tutorial/data.md b/docs/zh/tutorial/data.md index e5092bd4..cff5274e 100644 --- a/docs/zh/tutorial/data.md +++ b/docs/zh/tutorial/data.md @@ -1,35 +1,35 @@ # 数据准备 -本文档介绍三阶段 SFT, Reward 和 RLHF 的数据准备流程。 +本文档介绍不同阶段 SFT, Reward,RLHF,DPO, OnlineDPO 和 GRPO 的数据准备流程。 **以下是这个 Tutorial 脚本中使用的通用环境变量集合:** | ENV | 含义 | | --- | --- | | `CHATLEARN` | ChatLearn 代码仓库 clone 存放的位置 [https://github.com/alibaba/ChatLearn.git](https://github.com/alibaba/ChatLearn.git) | -| `DATASET_ROOT` | 存放SFT/Reward/RLHF训练数据集合的根目录 | +| `DATASET_ROOT` | 存放SFT/Reward/RLHF/DPO/OnlineDPO/GRPO训练数据集合的根目录 | ## 准备 SFT 训练数据 将 SFT 数据的问题 - 回复配对的样本,整理到一个 jsonl 文件中,其中 jsonl 文件中每一行为一条 SFT 数据,形式为如下的 Python 字典格式: -```json +``` {'query': 问题,'response': 回复} ``` 以 Anthropic 的 helpful&harmless 的数据为例,使用如下代码,会存一个 `$DATASET_ROOT/sft/train.jsonl`. ```bash -cd ${CHATLEARN}/examples/megatron/step1_sft/ +cd ${CHATLEARN}/examples/megatron/ DATASET_ROOT=$path_to_dataset_root -python prepare_data.py $DATASET_ROOT +python data/prepare_data_sft.py $DATASET_ROOT ``` ## 准备 Reward 训练数据 1. 首先准备问题 - 不同回复配对的样本,整理到一个 jsonl 文件中,其中 jsonl 文件中每一行为一条 Reward 模型训练数据,形式为如下的 Python 字典格式: -```json +``` {'query': 问题,'response': [回复 1, 回复 2, .....], 'score': [score1, score2, .....]} ``` @@ -38,23 +38,61 @@ python prepare_data.py $DATASET_ROOT 2. 以 Anthropic 的 helpful&harmless 的数据为例,使用如下代码,会存一个 `$DATASET_ROOT/rm/train.jsonl` 和 `$DATASET_ROOT/rm/dev.jsonl`. ```bash -cd ${CHATLEARN}/examples/megatron/step2_reward/ +cd ${CHATLEARN}/examples/megatron/ DATASET_ROOT=path-to-dataset-root -python prepare_data.py $DATASET_ROOT +python data/prepare_data_reward.py $DATASET_ROOT ``` -## 准备 RLHF 训练数据 +## 准备 Alignment 训练数据 + +ChatLearn中支持多种Alignment的训练模式:RLHF, DPO, OnlineDPO, GRPO + +其中RLHF/OnlineDPO/GRPO数据格式相同。 + + +### RLHF/OnlineDPO/GRPO 1. 首先准备一个需要被探索的指令数据集,整理到一个 jsonl 文件中,其中 jsonl 文件中每一行为一条指令,格式为 -```json +``` {"prompt": 问题} ``` -2. 以 Anthropic 的 helpful&harmless 的数据为例,使用如下代码,会存一个`$DATASET_ROOT/rlhf/train.jsonl` 和`$DATASET_ROOT/rlhf/dev.jsonl`: +2. 以 Anthropic 的 helpful&harmless 的数据为例,使用如下代码,会存一个`$DATASET_ROOT/alignment/train.jsonl` 和`$DATASET_ROOT/alignment/dev.jsonl`: ```bash -cd ${CHATLEARN}/examples/megatron/step3_rlhf/ +cd ${CHATLEARN}/examples/megatron/ DATASET_ROOT=path-to-dataset-root -python prepare_data.py $DATASET_ROOT +python data/prepare_data_alignment.py $DATASET_ROOT ``` + +### DPO + +准备一个需要被探索的指令数据集,整理到一个 jsonl 文件中,其中 jsonl 文件中每一行为一条指令,格式为:prompt+chosen+rejected,例如: + +``` +{"prompt": 问题, "chosen": 正偏好回答, "rejected": 负偏好回答} +``` + +其中prompt字段内容分为两种场景: +1. 单轮对话:仅包括单轮对话的`问题`; +2. 多轮对话:包含前几轮对话的问答及最后一轮的提问。 + +开源Anthropic的helpful&harmless的数据满足DPO训练需求,可参考RLHF章节下载相应训练数据。 + +### Math + +首先,准备一个要训练的数学数据集,并将其组织成一个 JSON 文件。JSON 文件中的每一行应该表示一个样本,格式如下: + +``` +{"eval_func": "math_rule", "prompt": prompt, "answer": answer} +``` + +以 `openai/gsm8k` 数据为例,使用以下代码将数据集存储在 `$DATASET_ROOT/math/train.jsonl` 中: + +``` +cd ${CHATLEARN}/examples/megatron/ +DATASET_ROOT=path-to-dataset-root +python data/prepare_data_math.py $DATASET_ROOT +``` + diff --git a/docs/zh/tutorial/ems.md b/docs/zh/tutorial/ems.md new file mode 100644 index 00000000..dab68054 --- /dev/null +++ b/docs/zh/tutorial/ems.md @@ -0,0 +1,32 @@ +# 高效显存复用(EMS) + +ChatLearn 中提供高效显存复用 (Efficient Memory Sharing, EMS) 功能来大幅减少训练过程中的显存占用。 +EMS 功能可以充分利用有限资源来训练更大规模的模型,也可以利用节约的显存来调整模型的并行策略或者增大 batch size,从而提升整体的训练效率。 + +ChatLearn 中多个模型共享相同的资源进行训练或推理时,打开 EMS 功能,可以让这些模型按序共享使用显存: +- 每个模型初始化完成后,将常驻显存的各类 tensor/buffer(包括 weight, grad buffer, optim states 等)卸载到内存或者直接释放,清空该模型占用的显存; +- 某个模型训练或推理前,先从内存中加载或者重建 tensor/buffer,然后进行训练或推理; +- 训练或推理完成后,将常驻显存的 tensor/buffer 卸载到内存或者直接释放,再次清空该模型占用的显存。 + +重复如上流程,多个模型间按序共享使用显存,最大化显存利用效率。 + +## 功能用法 +用户通过配置每个模型的 `free_memory` (bool 类型, 默认为 False) 参数来指定是否开启 EMS 功能。 +可以直接修改 `rlhf.yaml` 中每个模型的 `free_memory` 配置,例如打开 policy 模型的 EMS 功能: + +```yaml +policy: + model_config_file: old_policy_inference.yaml + ... + free_memory: ${free_memory_policy:True} +``` + +用户也可以在训练脚本中通过配置环境变量来启动 EMS 功能: +- policy 模型:`export free_memory_policy=True` +- reference 模型:`export free_memory_reference=True` +- reward 模型:`export free_memory_reward=True` +- value 模型:`export free_memory_value=True` +- ppo_policy 模型:`export free_memory_ppo_policy=True` +- ppo_value 模型:`export free_memory_ppo_value=True` + +完整示例可以参考 [llama2 配置](../../../examples/megatron/configs/llama2/rlhf.yaml)。 diff --git a/docs/zh/tutorial/evaluator.md b/docs/zh/tutorial/evaluator.md new file mode 100644 index 00000000..44c2aba6 --- /dev/null +++ b/docs/zh/tutorial/evaluator.md @@ -0,0 +1,17 @@ +# Evaluator + +本文档将介绍如何进行模型评估。用户可以使用 `EvalEngine` 单独对模型进行评估,也可以在训练 Engine 里配置 evaluator 在训练的过程中进行评估。 + +```python +def eval_flow(batch): + p = policy.forward_step(batch) + r = reward.eval_step(p) + r1 = reward2.eval_step(p) + return r, r1 +evaluator = Evaluator(eval_flow) +evaluator.set_dataset(prompts) +results = evaluator.eval() +``` +在上述例子中,我们构建了一个三个模型的评估flow,用户可以自定义 evaluation 的执行 flow。 +evaluator.eval 返回的结果是一个 dict 类型,key 是 model_name,value 是一个 list,包含 batch 的计算结果。 +在上述例子中,eval 返回的结果为 {"reward": [batch0, batch1, batch2], "reward2": [batch0, batch1, batch2]} diff --git a/docs/zh/tutorial/offload.md b/docs/zh/tutorial/offload.md deleted file mode 100644 index b6733526..00000000 --- a/docs/zh/tutorial/offload.md +++ /dev/null @@ -1,21 +0,0 @@ -# Offload - -随着模型规模变大,为了充分利用有限资源达到最佳训练性能,我们可以借助 Offload 的技术来减少训练过程中的显存占用,来增大 batch size 以提升整体的训练效率。 -目前 ChatLearn 中支持了 Optimizer State Offload,未来我们会支持更多参数的 Offload。 - -## Optimizer State Offload -用户可以配置模型的 `offload_optimizer_states` (bool, 默认为 False) 参数来指定是否开启 Optimizer State Offload 。 -如果 `offload_optimizer_states == True`, 将在模型执行前将 Optimizer State onload 到 GPU,并在模型执行完成 将 Optimizer State offload - 到 CPU。 - -以下这个例子中,我们将对 `ppo_policy` 这个模型开启 Optimizer State Offload 。 - -```yaml - ppo_policy: - model_config_file: ppo_policy.yaml - num_device: 8 - trainable: True - offload_optimizer_states: True -``` - -完整示例可以参考 [llama2 配置](../../../examples/megatron/step3_rlhf/configs/llama2/rlhf.yaml)。 diff --git a/docs/zh/tutorial/profile.md b/docs/zh/tutorial/profile.md index 04ed8a7d..5e986fb9 100644 --- a/docs/zh/tutorial/profile.md +++ b/docs/zh/tutorial/profile.md @@ -19,7 +19,7 @@ profiler_dir: path_to_profile_dir 用户可以在系统的主配置文件中配置 rlhf 配置 `nsys: True` 来开启 nsys 的 profiler。 ```yaml -rlhf: +runtime: nsys: True ``` diff --git a/docs/zh/tutorial/run.md b/docs/zh/tutorial/run.md index 982b5fa1..05c3da87 100644 --- a/docs/zh/tutorial/run.md +++ b/docs/zh/tutorial/run.md @@ -13,7 +13,7 @@ ![image.png](../../images/dlc_2.jpg) -**对于 RLHF 训练任务,您需要填写高级配置`customPortList=30000-30050,createSvcForAllWorkers=true`。** +**对于 RLHF/DPO/OnlineDPO/GRPO 训练任务,您需要填写高级配置`customPortList=30000-30050,createSvcForAllWorkers=true`。** ## 其他环境分布式执行 @@ -28,6 +28,6 @@ export GPUS_PER_NODE=8 export RANK=xx ``` -# reference +## reference 1. 阿里云机器学习 PAI-DLC:[https://www.aliyun.com/activity/bigdata/pai-dlc](https://www.aliyun.com/activity/bigdata/pai-dlc) diff --git a/docs/zh/tutorial/tutorial_bloom.md b/docs/zh/tutorial/tutorial_bloom.md deleted file mode 100644 index 6faa2ba3..00000000 --- a/docs/zh/tutorial/tutorial_bloom.md +++ /dev/null @@ -1,162 +0,0 @@ -# 基于 Bloom 模型的端到端训练教程 - -本文档介绍基于 ChatLearn, Megatron-LM 框架和 Bloom 模型的训练流程。包含三阶段的训练:SFT, Reward 和 RLHF 训练。 - - -**以下是这个 Tutorial 脚本中使用的通用环境变量集合:** - -| ENV | 含义 | -| --- | --- | -| `CHATLEARN` | ChatLearn 代码仓库 clone 存放的位置 [https://github.com/alibaba/ChatLearn.git](https://github.com/alibaba/ChatLearn.git) | -| `MEGATRON` | Megatron-LM-ChatLearn 代码仓库 clone 存放的位置 [https://github.com/alibaba/Megatron-LM-ChatLearn.git](https://github.com/alibaba/Megatron-LM-ChatLearn.git) | -| `DATASET_ROOT` | 存放SFT/Reward/RLHF训练数据集合的根目录 | -| `TOKENIZER_PATH` | Tokenizer 使用的 vocab_file 所在的文件夹 | - - -## Setup: 镜像、代码、数据准备 - -### 镜像 - -推荐参考 `https://github.com/alibaba/ChatLearn/tree/master/docker/ngc/Dockerfile.ngc22.10` 准备镜像。 -如果在 PAI DLC 环境上训练,推荐使用我们准备好的镜像: - -```bash -registry.cn-wulanchabu.aliyuncs.com/pai-dlc/pytorch-training:1.13.0-gpu-py3.8-cu11.8-ngc22.10-ubuntu20.04 -``` - -### 代码 - -在这个示例中,我们需要下载以下相关代码。 - -```bash -# 下载为支持 Bloom 模型的 Megatron-LM-ChatLearn -git clone -b v0.1.0 https://github.com/alibaba/Megatron-LM-ChatLearn.git -# 下载ChatLearn代码 -git clone -b v0.1.0 https://github.com/alibaba/ChatLearn.git -``` - -### 数据 - -请参考 [三阶段数据](data.md) 准备好您的训练数据。 - - -## Step1: SFT - -SFT 指的是使用有标注的对话数据来微调预训练语言模型的过程。在这个示例中,我们需要下载预训练的模型,然后开始一个简单的 SFT 训练示例。 - -### 下载和转化预训练模型 - -若使用来自于 HuggingFace transformers 的模型,首先需要下载预训练 checkpoint,比如 HuggingFace Hub 中的 Bloom 模型:`bigscience/bloom-7b1`,或是本地保存好的 SFT 模型; -然后使用如下代码,将 HuggingFace transformers 模型转化为 Megatron-LM 模型格式;在这个例子中,我们会将模型转换成 `TP (tensor_model_parallel_size)=8,PP (pipeline_model_parallel_size)=1` 的 checkpoint, 模型会存放在`MEGATRON_BLOOM_CKPT_PATH`中。 - -```bash -MEGATRON=path-to-megatron -cd $MEGATRON - -bash examples/pai/tools/convert_transformers_megatron_bloom.sh \ -$MEGATRON \ -path-to-transformer-model \ -path-to-megatron-model \ -8 \ -1 \ -false -``` - -### 开启 SFT 训练 - -下面的脚本是一个 SFT 的训练样例。其中 `DATASET_PATH` 为 SFT 训练集路径,比如`$DATASET_ROOT/sft/train.jsonl`,在这个例子中,我们假设 tokenizer 存放的路径和模型 checkpoint 存放的路径相同。 - -```bash -export CHATLEARN=path-to-chatlearn -export MEGATRON=path-to-megatron-lm-chatlearn -cd ${CHATLEARN}/examples/megatron/step1_sft/ - -LOAD_PATH=$MEGATRON_BLOOM_CKPT_PATH \ -TOKENIZER_PATH=$MEGATRON_BLOOM_CKPT_PATH \ -DATASET_PATH=$DATASET_ROOT/sft/ \ -bash bloom_sft.sh -``` - -训练 log 和训练完成的模型默认会存放在`${CHATLEARN}/output/step1_sft`中,具体的定义详见`${CHATLEARN}/examples/megatron/step1_sft/bloom_sft.sh`脚本。 - -7B SFT 训练需要 8 A100-80GB/A800-80GB/H800-80GB GPU 卡的资源。分布式执行所需的环境变量和配置参考 [分布式执行](run.md)。 - -## Step2: Reward 模型训练 - -Reward 模型指的是在 RLHF 中作为人类评价的代理,对模型产生的问题回复进行实时评价打分的模型,Reward 模型输入问题以及模型回复,可以产生一个标量表示模型回复的质量。 - - -### 开启 Reward 模型训练 - -依据 InstructGPT[1],Reward 模型训练基于 SFT 训练产生的模型 checkpoint 初始化,训练代码如下: - -```bash -export CHATLEARN=path-to-chatlearn -export MEGATRON=path-to-megatron-lm-chatlearn -cd ${CHATLEARN}/examples/megatron/step2_reward/ - -LOAD_PATH=path-to-sft-ckpt \ -TOKENIZER_PATH=$MEGATRON_BLOOM_CKPT_PATH \ -DATASET_PATH=$DATASET_ROOT/rm/ \ -bash bloom_reward.sh -``` - -训练 log 和训练完成的模型默认会存放在`${CHATLEARN}/output/step2_reward`中,具体的定义详见`${CHATLEARN}/examples/megatron/step2_reward/bloom_reward.sh`脚本。 -分布式执行所需的环境变量和配置参考 [分布式执行](run.md)。 - -## Step3: RLHF 训练 -RLHF 指的是在一个只有指令的数据集上尝试不同的回复然后吸取 Reward 模型给不同回复的 reward 的监督信号的过程。 - -### 开启 RLHF 训练 - -[阿里云 PAI DLC](https://www.aliyun.com/activity/bigdata/pai-dlc)[2]可以非常便捷高效地支持 RLHF 任务的训练。以下是一个 Bloom-7B 的 Policy 和 7B 的 Reward 模型的训练脚本。在这个例子中,用户需要设置 `POLICY_LOAD` 为 SFT 产出的 checkpoint 路径,Policy 模型和 Reference 模型将以 SFT 的 checkpoint 初始化。`REWARD_LOAD` 为 Reward 训练产出的 checkpoint 路径,同时,用户可以指定 load checkpoint 对应的 iteration 数。Reward 模型和 Value 模型将以 Reward 模型的权重作初始化。`VOCAB_FILE` 为 `BloomTokenizer` 所需文件所在的文件夹路径。 - -```bash -export CHATLEARN=path-to-chatlearn -export MEGATRON=path-to-megatron-lm-chatlearn -export DATASET_PATH=$DATASET_ROOT/rlhf/train.jsonl - -cd ${CHATLEARN}/examples/megatron/step3_rlhf - -export exp_name=any_experiment_name_you_like - -POLICY_LOAD=path-to-sft-ckpt \ -REWARD_LOAD=path-to-trained-rm-checkpoint \ -REWARD_LOAD_ITERATION=1000 \ -VOCAB_FILE=path-to-vocab-file \ -bash run_scripts/bloom/run_7b1_7b1.sh -``` - -在我们的训练脚本里,7B Policy + 7B Reward 的 RLHF 训练资源需要 8 A100-80GB/A800-80GB/H800-80GB GPU 卡的资源。 - -分布式执行所需的环境变量和配置参考 [分布式执行](run.md)。 -**注意对于 RLHF 任务,如果在 PAI DLC 上运行,您需要填写高级配置`customPortList=30000-30050,createSvcForAllWorkers=true`。** - - -### 效果评估 -首先,我们可以通过 ChatLearn 的模型转换工具将 Megatron-LM 格式的模型转换为 HuggingFace transformers 模型格式。 - -```bash -MEGATRON=path-to-megatron-lm-chatlearn -cd $MEGATRON - -bash examples/pai/tools/convert_transformers_megatron_bloom.sh \ -$MEGATRON \ -ckpt-to-rlhf-policy-ckpt \ -path-to-transformers-ckpt-path \ -1 \ -1 \ -true -``` - -我们在 MT-Bench 上使用 GPT-4 API 测评了 Bloom 在 HH 数据集上 SFT 后和 RLHF 后的效果,可以看到相比于 SFT 后的模型,RLHF 提升了模型的平均表现。且在 Extraction、Math、Reasoning、STEM、Writing 项上均有所提升。我们这里的性能提升来自于开源 HH 数据集训练的 Reward 模型,使用用户自己定制的 Reward 模型有助于取得更好的效果。 - -| Model | Coding | Extraction | Humanities | Math | Reasoning | Roleplay | STEM | Writing | Avg | -| --- | --- | --- | --- | --- | --- | --- | --- | --- | --- | -| bloom_sft | 1.45 | 1.1 | 3.35 | 1.45 | 2.6 | 3.1 | 2.65 | 1.4 | 2.27 | -| bloom_rlhf | 1.4 | **1.4** | 3.05 | **1.5** | **2.65** | 3.05 | **3.05** | **1.6** | **2.35** | - -## Reference - -1. Training language models to follow instructions with human feedback,[https://arxiv.org/abs/2203.02155](https://arxiv.org/abs/2203.02155) - diff --git a/docs/zh/tutorial/tutorial_llama2.md b/docs/zh/tutorial/tutorial_llama2.md index 3c5edafd..2efeee68 100644 --- a/docs/zh/tutorial/tutorial_llama2.md +++ b/docs/zh/tutorial/tutorial_llama2.md @@ -1,6 +1,9 @@ -# 基于 LLaMA 模型的端到端训练教程 +# 基于 Llama 模型的端到端训练教程 -本文档介绍基于 ChatLearn, Megatron-LM 框架和 LLaMA/LLaMA2 模型的训练流程。包含三阶段的训练:SFT, Reward 和 RLHF 训练。 +本文档介绍基于 ChatLearn, Megatron-LM 框架和 Llama/Llama2 模型进行 alignment 的训练流程。支持RLHF、DPO、OnlineDPO、GRPO 多种训练模式: +1. RLHF(Reinforcement Learning from Human Feedback):包括三阶段的训练(SFT, Reward 和 RLHF 训练); +2. DPO(Direct Preference Optimization):包括两阶段的训练(SFT 和 DPO 训练); +3. OnlineDPO/GRPO:介于 DPO 和 RLHF 之间,使用 Policy + Reward 模型来自动生成数据并进行打分,再进行DPO训练,包括三阶段的训练(SFT, Reward 和 DPO 训练). **以下是这个 Tutorial 脚本中使用的通用环境变量集合:** @@ -9,100 +12,81 @@ | --- |-------------------------------------------------------------------------------------------------------------------------------| | `CHATLEARN` | ChatLearn 代码仓库 clone 存放的位置 [https://github.com/alibaba/ChatLearn.git](https://github.com/alibaba/ChatLearn.git) | | `MEGATRON` | Megatron-LM 代码仓库 clone 存放的位置 [https://github.com/NVIDIA/Megatron-LM.git](https://github.com/NVIDIA/Megatron-LM.git) | -| `DATASET_ROOT` | 存放SFT/Reward/RLHF训练数据集合的根目录 | +| `DATASET_ROOT` | 存放SFT/Reward/RLHF/DPO/OnlineDPO/GRPO训练数据集合的根目录 | | `TOKENIZER_MODEL` | Tokenizer 使用的 tokenizer_model 所在的路径 | ## Setup: 镜像、代码、数据准备 -### 镜像 -推荐参考 `https://github.com/alibaba/ChatLearn/tree/master/docker/ngc/Dockerfile.ngc23.09` 准备镜像。 -如果在 PAI DLC 环境上训练,推荐使用我们准备好的镜像: +### 镜像和代码 -```bash -registry.cn-wulanchabu.aliyuncs.com/pai-dlc/pytorch-training:2.1.0-gpu-py3.10-cu12.2-ngc23.09-ubuntu22.04 -``` - -### 代码 - -在这个示例中,我们需要下载以下相关代码。 - -```bash -# 下载为支持Megatron-LM -git clone https://github.com/NVIDIA/Megatron-LM.git -git checkout 954a65b04 -# 下载ChatLearn代码 -git clone https://github.com/alibaba/ChatLearn.git -``` +请参考 [镜像和代码准备](../installation.md)。 ### 数据 -请参考 [三阶段数据](data.md) 准备好您的训练数据。 +请参考 [各阶段数据](data.md) 准备好您的训练数据。 -## Step1: SFT +## Step: SFT SFT 指的是使用有标注的对话数据来微调预训练语言模型的过程。在这个示例中,我们需要下载预训练的模型,然后开始一个简单的 SFT 训练示例。 ### 下载和转化预训练模型 -若使用来自于 HuggingFace transformers 的模型,首先需要下载预训练 checkpoint,比如 HuggingFace Hub 中的 LLaMA2 模型:`meta-llama/Llama-2-7b-hf`,或是本地保存好的 SFT 模型; +若使用来自于 HuggingFace transformers 的模型,首先需要下载预训练 checkpoint,比如 HuggingFace Hub 中的 Llama2 模型:`meta-llama/Llama-2-7b-hf`,或是本地保存好的 SFT 模型; 然后使用如下代码,将 HuggingFace transformers 模型转化为 Megatron-LM 模型格式; -1. 对于7B的模型,我们会将模型转换成 `TP (tensor_model_parallel_size)=4,PP (pipeline_model_parallel_size)=1` 的 checkpoint, 模型会存放在`MEGATRON_LLAMA_CKPT_PATH`中。 -2. 对于13B的模型,我们会将模型转化成 `TP=8,PP=1` 的 checkpoint。 -3. 对于70B的模型,我们会将模型转化成 `TP=8,PP=4` 的 checkpoint。 +1. 对于llama2-7B的模型,我们会将模型转换成 `TP (tensor_model_parallel_size)=4,PP (pipeline_model_parallel_size)=1` 的 checkpoint, 模型会存放在`MEGATRON_LLAMA_CKPT_PATH`中。 +2. 对于llama2-13B的模型,我们会将模型转化成 `TP=8,PP=1` 的 checkpoint。 +3. 对于llama2-70B的模型,我们会将模型转化成 `TP=8,PP=4` 的 checkpoint。 ```bash -MEGATRON=path-to-megatron -cd $MEGATRON - -HF_FORMAT_DIR=path-to-hf-model -TOKENIZER_MODEL=$HF_FORMAT_DIR/tokenizer.model -MEGATRON_FORMAT_DIR=path-to-meg-model - -python tools/checkpoint/util.py \ - --model-type GPT \ - --loader llama2_hf \ - --saver megatron \ - --target-tensor-parallel-size $TP \ - --target-pipeline-parallel-size $PP \ - --load-dir ${HF_FORMAT_DIR} \ - --save-dir ${MEGATRON_FORMAT_DIR} \ - --tokenizer-model ${TOKENIZER_MODEL} +export MEGATRON=path-to-megatron-lm +export CHATLEARN=path-to-chatlearn + +cd ${CHATLEARN}/examples/megatron/sft/ + +TP=num_of_tp \ +PP=num_of_pp \ +LOAD_PATH=path-to-hf-model \ +TOKENIZER_MODEL=$LOAD_PATH/tokenizer.model \ +SAVE_PATH=path-to-megatron-model \ +bash scripts/convert_hf_to_megatron.sh ``` ### 开启 SFT 训练 下面的脚本是一个 SFT 的训练样例。其中 `DATASET_PATH` 为 SFT 训练集路径,比如`$DATASET_ROOT/sft/train.jsonl`。 -其中 `MODEL_SIZE` 为脚本中指定模型大小的环境变量,可以为 `7B`/`13B`/`70B`。 +其中 `MODEL_SIZE` 为脚本中指定模型大小的环境变量,可以为 `llama2-7B`/`llama2-13B`/`llama2-70B`。 ```bash export CHATLEARN=path-to-chatlearn -export MEGATRON=path-to-megatron-lm-extension -cd ${CHATLEARN}/examples/megatron/step1_sft/ +export MEGATRON=path-to-megatron-lm +cd ${CHATLEARN}/examples/megatron/sft/ MODEL_SIZE=$MODEL_SIZE \ LOAD_PATH=$MEGATRON_LLAMA2_CKPT_PATH \ TOKENIZER_MODEL=$LLAMA2_TOKENIZER_MODEL \ DATASET_PATH=$DATASET_ROOT/sft/ \ -bash llama2_sft.sh +bash scripts/llama2_sft.sh ``` -训练 log 和训练完成的模型默认会存放在`${CHATLEARN}/output/step1_sft`中,具体的定义详见`${CHATLEARN}/examples/megatron/step1_sft/llama2_sft.sh`脚本。 +训练 log 和训练完成的模型默认会存放在`${CHATLEARN}/output/sft`中,可以通过 CHECKPOINT_PATH 来指定模型保存路径,具体的定义详见`${CHATLEARN}/examples/megatron/sft/scripts/llama2_sft.sh`脚本。 在我们的训练脚本里,资源需求 (假设资源为 A100-80GB/A800-80GB/H800-80GB GPU) 如下: -1. 7B SFT: 8 GPU -2. 13B SFT: 8 GPU -3. 70B SFT: 4*8 GPU +1. llama2-7B SFT: 8 GPU +2. llama2-13B SFT: 8 GPU +3. llama2-70B SFT: 4*8 GPU 分布式执行所需的环境变量和配置参考 [分布式执行](run.md)。 -## Step2: Reward 模型训练 +## Step: Reward 模型训练 Reward 模型指的是在 RLHF 中作为人类评价的代理,对模型产生的问题回复进行实时评价打分的模型,Reward 模型输入问题以及模型回复,可以产生一个标量表示模型回复的质量。 +**注**:DPO训练模式不需要训练Reward模型。 + ### 开启 Reward 模型训练 依据 InstructGPT[1],Reward 模型训练基于 SFT 训练产生的模型 checkpoint 初始化,训练代码如下: @@ -110,27 +94,31 @@ Reward 模型指的是在 RLHF 中作为人类评价的代理,对模型产生 ```bash export CHATLEARN=path-to-chatlearn export MEGATRON=path-to-megatron-lm-extension -cd ${CHATLEARN}/examples/megatron/step2_reward/ +cd ${CHATLEARN}/examples/megatron/ LOAD_PATH=path-to-sft-ckpt \ TOKENIZER_MODEL=$LLAMA2_TOKENIZER_MODEL \ DATASET_PATH=$DATASET_ROOT/rm/ \ -bash llama2_reward.sh +bash scripts/train_reward_llama.sh ``` -训练 log 和训练完成的模型默认会存放在`${CHATLEARN}/output/step2_reward`中,具体的定义详见`${CHATLEARN}/examples/megatron/step2_reward/llama2_reward.sh`脚本。 +训练 log 和训练完成的模型默认会存放在`${CHATLEARN}/output/reward`中,具体的定义详见`${CHATLEARN}/examples/megatron/scripts/train_reward_llama.sh`脚本。 相同规模的 Reward 模型训练所需的资源需求和 SFT 是一样的。 分布式执行所需的环境变量和配置参考 [分布式执行](run.md)。 -## Step3: RLHF 训练 -RLHF 指的是在一个只有指令的数据集上尝试不同的回复然后吸取 Reward 模型给不同回复的 reward 的监督信号的过程。 +## Step: Alignment 训练 +ChatLearn 支持多种 Alignment 训练模式:RLHF、DPO、OnlineDPO、GRP、GRPO + +### 开启 Alignment 训练 -### 开启 RLHF 训练 +以下是一个Llama2-7B规模模型训练的使用范例。 -以下是一个 LLaMA2-7B 的 Policy 和 7B 的 Reward 模型的训练脚本。 +#### RLHF + +以下是一个 Llama2-7B 的 Policy 和 7B 的 Reward 模型的训练脚本。 在这个例子中,用户需要设置 `POLICY_LOAD` 为 SFT 产出的 checkpoint 路径,Policy 模型和 Reference 模型将以 SFT 的 checkpoint 初始化。 `REWARD_LOAD` 为 Reward 训练产出的 checkpoint 路径,同时,用户可以指定 load checkpoint 对应的 iteration 数。 Reward 模型和 Value 模型将以 Reward 模型的权重作初始化。`TOKENIZER_MODEL` 为 `LlamaTokenizer` 所需文件 `tokenizer.model` 所在的文件夹路径。 @@ -138,26 +126,88 @@ Reward 模型和 Value 模型将以 Reward 模型的权重作初始化。`TOKENI ```bash export CHATLEARN=path-to-chatlearn export MEGATRON=path-to-megatron-lm -export DATASET_PATH=$DATASET_ROOT/rlhf/train.jsonl +export DATASET_PATH=$DATASET_ROOT/alignment/train.jsonl + +cd ${CHATLEARN}/examples/megatron/ + +export model_size=llama2-7B + +POLICY_LOAD=path-to-sft-ckpt \ +REWARD_LOAD=path-to-trained-rm-checkpoint \ +REWARD_LOAD_ITERATION=1000 \ +TOKENIZER_MODEL=$LLAMA2_TOKENIZER_MODEL \ +bash scripts/train_rlhf_llama.sh +``` + +#### OnlineDPO/GRPO + +OnlineDPO/GRPO训练流程和RLHF比较类似,只是不需要Value模型,以下是一个 Llama2-7B 的 Policy 和 7B 的 Reward 模型的训练脚本。 + +```bash +export CHATLEARN=path-to-chatlearn +export MEGATRON=path-to-megatron-lm +export DATASET_PATH=$DATASET_ROOT/alignment/train.jsonl + +cd ${CHATLEARN}/examples/megatron/ + +export model_size=llama2-7B + +POLICY_LOAD=path-to-sft-ckpt \ +REWARD_LOAD=path-to-trained-rm-checkpoint \ +REWARD_LOAD_ITERATION=1000 \ +TOKENIZER_MODEL=$LLAMA2_TOKENIZER_MODEL \ +bash scripts/train_online_dpo_llama.sh +``` + +#### DPO +以下是一个 Llama2-7B 的 Policy模型的训练脚本。 +在这个例子中,用户需要设置 `POLICY_LOAD` 为 SFT 产出的 checkpoint 路径,Policy 模型和 Reference 模型将以 SFT 的 checkpoint 初始化。 +`TOKENIZER_MODEL` 为 `LlamaTokenizer` 所需文件 `tokenizer.model` 所在的文件夹路径。 + +```bash +export CHATLEARN=path-to-chatlearn +export MEGATRON=path-to-megatron-lm +export DATASET_PATH=$DATASET_ROOT/alignment/train.jsonl -cd ${CHATLEARN}/examples/megatron/step3_rlhf +cd ${CHATLEARN}/examples/megatron/ -export exp_name=any_experiment_name_you_like +export model_size=llama2-7B + +POLICY_LOAD=path-to-sft-ckpt \ +TOKENIZER_MODEL=$LLAMA2_TOKENIZER_MODEL \ +bash scripts/train_dpo_llama.sh +``` + +#### GRPO Math + +如果用户需要训练一个 GRPO Math 模型,需要先参考 [Math data](data.md#Math) 准备好数学数据集。以下为一个 Llama2-7B 的模型训练范例。 + +``` +export CHATLEARN=path-to-chatlearn +export MEGATRON=path-to-megatron-lm +export DATASET_PATH=$DATASET_ROOT/math/train.jsonl + +cd ${CHATLEARN}/examples/megatron/ + +export model_size=llama2-7B POLICY_LOAD=path-to-sft-ckpt \ REWARD_LOAD=path-to-trained-rm-checkpoint \ REWARD_LOAD_ITERATION=1000 \ TOKENIZER_MODEL=$LLAMA2_TOKENIZER_MODEL \ -bash run_scripts/llama2/run_7b_7b.sh +bash scripts/train_grpo_math_llama.sh ``` -如果您需要训练 13B / 70B 的模型,只需要将上述训练脚本中的 `run_7b_7b.sh` 替换成 `run_13b_13b.sh` / `run_70b_70b.sh`。 + +### 更大规模参数模型范例 + +如果您需要训练 llama2-13B / llama2-70B 的模型,只需要将上述训练脚本中的 `export model_size=llama2-7B` 替换成 `export model_size=llama2-13B` / `export model_size=llama2-70B`。 您也可以根据自己的需求修改模型配置和其他参数。 在我们的训练脚本里,资源需求 (假设资源为 A100-80GB/A800-80GB/H800-80GB GPU) 如下: -1. 7B RLHF: 8 GPU -2. 13B RLHF: 2*8 GPU -3. 70B RLHF: 4*8 GPU +1. llama2-7B RLHF: 8 GPU +2. llama2-13B RLHF: 2*8 GPU +3. llama2-70B RLHF: 4*8 GPU 分布式执行所需的环境变量和配置参考 [分布式执行](run.md)。 **注意对于 RLHF 任务,如果在 PAI DLC 上运行,您需要填写高级配置`customPortList=30000-30050,createSvcForAllWorkers=true`。** @@ -168,29 +218,41 @@ bash run_scripts/llama2/run_7b_7b.sh 首先,我们可以通过 ChatLearn 的模型转换工具将 Megatron-LM 格式的模型转换为 HuggingFace transformers 模型格式。 ```bash -cd $CHATLEARN -python chatlearn/tools/megatron_to_hf.py \ - --load_path ${dir-to-megatron-model} \ - --save_path ${save-dir} \ - --target_params_dtype bf16 \ - --vocab_dir ${dir-of-vocab-file} \ - --megatron_path ${dir-to-megatron} +export CHATLEARN=path-to-chatlearn +export MEGATRON=path-to-megatron-lm + +cd $CHATLEARN/examples/megatron/alignment + +LOAD_PATH=path-to-megatron-model \ +SAVE_PATH=path-to-hf-model \ +VOCAB_PATH=path-to-vocab \ +target_params_dtype=bf16 \ +bash scripts/convert_megatron_to_hf.sh ``` -- `load_path` 为需要转化的Megatron checkpoint所在的文件夹,要求 checkpoint 并行策略为 `TP=1, PP=1`。 -- `save_dir` 为转化后的 HF Transformer 模型所在的文件夹。 +- `load_path` 为需要转化的Megatron checkpoint所在的文件夹。 +- `save_path` 为转化后的 HF Transformer 模型所在的文件夹。 - `target_params_dtype` 为转化模型的数据类型。 -- `vocab_dir` 为 `tokenizer.model` 等文件所在的文件夹。 -- `megatron_path` 为 Megatron-LM 所在的文件夹。 +- `vocab_path` 为 `tokenizer.model` 等文件所在的文件夹。 + +我们在 MT-Bench 上使用 GPT-4 API 测评了 Llama2-7B 在 HH 数据集上 SFT 后和 RLHF 后的效果,可以看到相比于 SFT 后的模型,RLHF 提升了模型的平均表现。其中RLHF在 Humanities、Math、Roleplay、Reasoning、Writing 项上有显著的提升。我们这里的性能提升来自于开源 HH 数据集训练的 Reward 模型,使用用户自己定制的 Reward 模型有助于取得更好的效果。 + + +| Metric | llama_sft | llama_rlhf | llama_dpo | llama_onlinedpo | +|-------------|-----------|------------|-----------|------------------| +| Coding | 2.05 | **1.65** | **2.17** | **1.75** | +| Extraction | 4.40 | **4.0** | **4.35** | **3.70** | +| Humanities | 5.85 | **7.17** | **6.70** | **7.52** | +| Math | 1.15 | **1.70** | **1.25** | **1.05** | +| Reasoning | 3.15 | **3.30** | **3.15** | **2.00** | +| Roleplay | 4.75 | **5.50** | **5.65** | **6.10** | +| STEM | 6.05 | **5.75** | **6.77** | **7.10** | +| Writing | 4.55 | **4.75** | **4.8** | **5.30** | +| Avg | 3.94 | **4.22** | **4.33** | **4.31** | -我们在 MT-Bench 上使用 GPT-4 API 测评了 LLaMA-13B 在 HH 数据集上 SFT 后和 RLHF 后的效果,可以看到相比于 SFT 后的模型,RLHF 提升了模型的平均表现。 -且在 Humanities、Math、Roleplay、STEM、Writing 项上有显著的提升。我们这里的性能提升来自于开源 HH 数据集训练的 Reward 模型,使用用户自己定制的 Reward 模型有助于取得更好的效果。 -| Model | Coding | Extraction | Humanities | Math | Reasoning | Roleplay | STEM | Writing | Avg | -| --- | --- | --- | --- | --- | --- | --- | --- | --- | --- | -| llama_sft | 1.6 | 2.7 | 4.2 | 1.1 | 2.85 | 3.35 | 4.55 | 2.95 | 2.90 | -| llama_rlhf | **1.75** | **3.45** | **4.75** | **1.55** | **3.5** | **5.85** | **5.0** | **5.0** | **3.85** | ## Reference 1. Training language models to follow instructions with human feedback,[https://arxiv.org/abs/2203.02155](https://arxiv.org/abs/2203.02155) + diff --git a/docs/zh/tutorial/tutorial_qwen.md b/docs/zh/tutorial/tutorial_qwen.md new file mode 100644 index 00000000..2fc71f25 --- /dev/null +++ b/docs/zh/tutorial/tutorial_qwen.md @@ -0,0 +1,46 @@ +# 基于 Qwen 模型的端到端训练教程 + +本文档介绍基于 ChatLearn, DeepSpeed 框架和 Qwen 模型进行 DPO 训练。 + +**以下是这个 Tutorial 脚本中使用的通用环境变量集合:** + +| ENV | 含义 | +| --- |-------------------------------------------------------------------------------------------------------------------------------| +| `CHATLEARN` | ChatLearn 代码仓库 clone 存放的位置 [https://github.com/alibaba/ChatLearn.git](https://github.com/alibaba/ChatLearn.git) | +| `DATASET_ROOT` | 存放训练数据集合的根目录 | + + +## Setup: 镜像、代码、数据准备 + +### 镜像和代码 + +请参考 [镜像和代码准备](../installation.md)。 + +### 数据 + +qwen2 要求的数据格式为chatml + +``` +{"type": "chatml", "messages": [{"role": "system", "content": "You are a helpful assistant."}, {"role": "user", "content": "Tell me something about large language models."}, {"role": "assistant", "content": "Large language models are a type of language model that is trained on a large corpus of text data. They are capable of generating human-like text and are used in a variety of natural language processing tasks..."}], "source": "unknown"} +``` +通过以下脚本可以将 `Dahoas/full-hh-rlhf` 转换为 chatml 格式的数据, 并存储 `$DATASET_ROOT/alignment/train.jsonl` 文件. + +```bash +cd ${CHATLEARN}/examples/huggingface/ +DATASET_ROOT=path-to-dataset-root +python data/preprocess_data_chatml.py $DATASET_ROOT +``` + + +### DPO + +以下是一个 Qwen2-7B 的 DPO 训练范例。 +在这个例子中,用户需要设置 `policy_model_path` 为 初始化模型 checkpoint 路径,Policy 模型和 Reference 模型将以这个 checkpoint 初始化。 + +``` +export CHATLEARN=path-to-chatlearn +export DATASET_PATH=$DATASET_ROOT/alignment/train.jsonl +export policy_model_path=path-to-qwen2-ckpt +cd ${CHATLEARN}/examples/huggingface/ +bash scripts/train_dpo_qwen.sh +``` diff --git a/examples/megatron/dataset/__init__.py b/examples/__init__.py similarity index 100% rename from examples/megatron/dataset/__init__.py rename to examples/__init__.py diff --git a/examples/huggingface/configs/qwen2/base.yaml b/examples/huggingface/configs/qwen2/base.yaml new file mode 100644 index 00000000..58bc04d2 --- /dev/null +++ b/examples/huggingface/configs/qwen2/base.yaml @@ -0,0 +1,3 @@ +bf16: True +flash_attn: True +seed: 8888 \ No newline at end of file diff --git a/examples/huggingface/configs/qwen2/dpo.yaml b/examples/huggingface/configs/qwen2/dpo.yaml new file mode 100644 index 00000000..af4cc4f1 --- /dev/null +++ b/examples/huggingface/configs/qwen2/dpo.yaml @@ -0,0 +1,37 @@ +runtime_env: + platform: DLC + excludes: + - "*pt" + - "logs" + - "tensorboards" + - ".nfs*" + + +models: + + reference: + model_config_file: reference.yaml + num_device: ${num_device:1} + zero_size: ${zero_size:4} + gpu_per_process: 1 + trainable: False + + policy_trainer: + model_config_file: policy_trainer.yaml + num_device: ${num_device:1} + zero_size: ${zero_size:4} + gpu_per_process: 1 + trainable: True + +runtime: + colocation: + - reference,policy_trainer + generation_batch_size: ${generation_batch_size:8} + data_path: ${data_path} + eval_data_path: ${eval_data_path} + output_dir: ${output_dir} + exp_name: ${exp_name} + sample_per_episode: ${sample_per_episode:1024} + train_micro_batch_size: ${train_micro_batch_size:4} + train_global_batch_size: ${train_global_batch_size:256} + save_episode_interval: ${save_episode_interval:100} diff --git a/examples/huggingface/configs/qwen2/policy_trainer.yaml b/examples/huggingface/configs/qwen2/policy_trainer.yaml new file mode 100644 index 00000000..a972cc0c --- /dev/null +++ b/examples/huggingface/configs/qwen2/policy_trainer.yaml @@ -0,0 +1,10 @@ +includes: + - base.yaml +normalize_reward: True +zero_stage: 3 +# rename to model_path +pretrain_or_model: ${policy_model_path} +learning_rate: 5e-6 +l2: 0.0 +learning_rate: 5e-7 +gradient_checkpointing: True \ No newline at end of file diff --git a/examples/huggingface/configs/qwen2/reference.yaml b/examples/huggingface/configs/qwen2/reference.yaml new file mode 100644 index 00000000..af162cce --- /dev/null +++ b/examples/huggingface/configs/qwen2/reference.yaml @@ -0,0 +1,17 @@ +includes: + - base.yaml +zero_stage: 3 +pretrain_or_model: ${model_path} +bf16: True +max_samples: 80000 +seed: 8888 +prompt_max_len: 1024 + +reward_data: ${reward_dataset_path} +reward_data_probs: 0.8,0.2 +max_len: 1024 +input_template: "\n\nHuman: {}\n\nAssistant:" +chosen_key: chosen +rejected_key: rejected +prompt_key: messages +apply_chat_template: True diff --git a/examples/huggingface/data/preprocess_data_chatml.py b/examples/huggingface/data/preprocess_data_chatml.py new file mode 100644 index 00000000..7bc16606 --- /dev/null +++ b/examples/huggingface/data/preprocess_data_chatml.py @@ -0,0 +1,94 @@ +# Copyright 2024 Alibaba Group Holding Limited. All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# ============================================================================== +""" +{ + "type": "chatml", + "messages": [ + { + "role": "system", + "content": "You are a helpful assistant." + }, + { + "role": "user", + "content": "Tell me something about large language models." + }, + { + "role": "assistant", + "content": "Large language models are a type of language model that is trained on a large corpus of text data. They are capable of generating human-like text and are used in a variety of natural language processing tasks..." + } + ], + "source": "unknown" +} +""" + +import json +import os +import sys + +from datasets import load_dataset +from tqdm import tqdm +import re + +task_type = 'alignment' +prefix = os.path.join(sys.argv[1], task_type) + +def parse_dialogue(dialogue): + items = [{"role": "system", "content": "You are a helpful assistant."}] + # Define the patterns for Human and Assistant + pattern = r'\n\n(Human|Assistant): (.*?)(?=\n\n(Human|Assistant):|\Z)' + + # Find all matches + matches = re.findall(pattern, dialogue, re.DOTALL) + + for role, statement, _ in matches: + if role == "Human": + role = "user" + elif role == "Assistant": + role = "assistant" + else: + raise RuntimeError(f'unknown role {role}') + items.append({"role": role, "content": statement.strip()}) + + return items + +while True: + try: + alignment_data = load_dataset('Dahoas/full-hh-rlhf') + break + except Exception as e: + print(e) + continue + + +if not os.path.exists(prefix): + os.makedirs(prefix) + +for split in ['train', 'test']: + if split not in alignment_data: + continue + data = alignment_data[split] + chatml_item = {} + with open(f'{prefix}/{split}.jsonl', 'w') as f: + for item in data: + chatml_item["type"] = "chatml" + input_text = item['prompt'] + # Parse and classify the messages + chatml_item["messages"] = parse_dialogue(input_text) + if task_type == 'sft': + chatml_item["messages"].append({"role": "assistant", "content": item['response'].strip()}) + else: + chatml_item["chosen"] = [{"role": "assistant", "content": item['chosen']}] + chatml_item["rejected"] = [{"role": "assistant", "content": item['rejected']}] + f.write(json.dumps(chatml_item) + '\n') diff --git a/examples/huggingface/data/reward_dataset.py b/examples/huggingface/data/reward_dataset.py new file mode 100644 index 00000000..a19f9b9f --- /dev/null +++ b/examples/huggingface/data/reward_dataset.py @@ -0,0 +1,194 @@ +import torch +from torch.utils.data import Dataset +from tqdm import tqdm +import torch.nn.functional as F +from collections import defaultdict + +def exist_and_not_none(d, key): + return key in d and d[key] is not None + +def zero_pad_sequences(sequences, side: str = "left", value=0): + assert side in ("left", "right") + max_len = max(seq.size(-1) for seq in sequences) + padded_sequences = [] + for seq in sequences: + pad_len = max_len - seq.size(-1) + padding = (pad_len, 0) if side == "left" else (0, pad_len) + padded_sequences.append(F.pad(seq, padding, value=value)) + return torch.stack(padded_sequences, dim=0) + + +def preprocess_data( + data, + input_template=None, + prompt_key=None, + chosen_key="chosen", + rejected_key="rejected", + apply_chat_template=None, + is_dpo=False, +) -> str: + if apply_chat_template: + if prompt_key: + prompt = apply_chat_template(data[prompt_key], tokenize=False, add_generation_prompt=True) + chosen = apply_chat_template(data[prompt_key] + data[chosen_key], tokenize=False)[len(prompt) :] + rejected = apply_chat_template(data[prompt_key] + data[rejected_key], tokenize=False)[len(prompt) :] + else: + prompt = "" + chosen = apply_chat_template(data[chosen_key], tokenize=False) + rejected = apply_chat_template(data[rejected_key], tokenize=False) + + if is_dpo: + prompt = apply_chat_template(data[chosen_key][:-1], tokenize=False, add_generation_prompt=True) + chosen = chosen[len(prompt) :] + rejected = rejected[len(prompt) :] + else: + if prompt_key: + prompt = data[prompt_key] + start_str = input_template.split("{}")[0] + end_str = input_template.split("{}")[1] + if input_template: + if not (prompt.startswith(start_str) and prompt.endswith(end_str)): + prompt = input_template.format(prompt) + else: + prompt = "" + chosen = data[chosen_key] + rejected = data[rejected_key] + + # margin loss + margin = data["margin"] if exist_and_not_none(data, "margin") else 0 + + return prompt, chosen, rejected, margin + +class RewardDataset(Dataset): + """ + Dataset for reward model + + Args: + dataset: dataset for reward model + self.tokenizer: self.tokenizer for reward model + self.max_length: max length of input + """ + + def __init__( + self, + dataset, + args, + tokenizer, + max_length: int, + input_template=None, + is_dpo=False, + ) -> None: + super().__init__() + self.is_dpo = is_dpo + self.args = args + + self.prompts = [] + self.chosens = [] + self.rejects = [] + if self.is_dpo: + self.prompt_ids_lens = [] + else: + self.margins = [] + + self.tokenizer = tokenizer + self.max_length = max_length + self.is_dpo = is_dpo + + prompt_key = getattr(self.args, "prompt_key", None) + chosen_key = getattr(self.args, "chosen_key", None) + rejected_key = getattr(self.args, "rejected_key", None) + apply_chat_template = getattr(self.args, "apply_chat_template", False) + if apply_chat_template: + apply_chat_template = self.tokenizer.apply_chat_template + tokenizer_chat_template = getattr(self.args, "tokenizer_chat_template", None) + if tokenizer_chat_template: + self.tokenizer.chat_template = tokenizer_chat_template + + for data in tqdm(dataset, desc="Tokenizing"): + prompt, chosen, reject, margin = preprocess_data( + data, input_template, prompt_key, chosen_key, rejected_key, apply_chat_template, self.is_dpo + ) + if self.is_dpo: + prompt_token = self.tokenizer( + prompt, + max_length=self.max_length, + padding=False, + truncation=True, + return_tensors="pt", + ) + prompt_ids_len = prompt_token["attention_mask"].int().sum().item() + # filter the sample whose length is greater than max_length (2 for answer length) + if prompt_ids_len >= self.max_length - 2: + continue + else: + self.prompt_ids_lens.append(prompt_ids_len) + else: + self.margins.append(margin) + + self.prompts.append(prompt) + self.chosens.append(chosen) + self.rejects.append(reject) + + def __len__(self): + length = len(self.chosens) + return length + + def __getitem__(self, idx): + prompt, chosen, reject = self.prompts[idx], self.chosens[idx], self.rejects[idx] + if self.is_dpo: + extra = self.prompt_ids_lens[idx] + else: + extra = self.margins[idx] + + chosen = (prompt + chosen).rstrip("\n") + if not chosen.endswith(self.tokenizer.eos_token): + chosen += " " + self.tokenizer.eos_token + chosen_token = self.tokenizer( + chosen, + max_length=self.max_length, + padding=False, + truncation=True, + return_tensors="pt", + ) + + reject = (prompt + reject).rstrip("\n") + if not reject.endswith(self.tokenizer.eos_token): + reject += " " + self.tokenizer.eos_token + reject_token = self.tokenizer( + reject, + max_length=self.max_length, + padding=False, + truncation=True, + return_tensors="pt", + ) + + # to avoid EOS_token truncation + chosen_token["input_ids"][0][-1] = self.tokenizer.eos_token_id + reject_token["input_ids"][0][-1] = self.tokenizer.eos_token_id + chosen_token["attention_mask"][0][-1] = True + reject_token["attention_mask"][0][-1] = True + + return { + "chosen_input_ids": chosen_token["input_ids"], + "chosen_attention_mask": chosen_token["attention_mask"], + "reject_input_ids": reject_token["input_ids"], + "reject_attention_mask": reject_token["attention_mask"], + "extra": extra, + } + + def collate_fn(self, item_dict): + batches = defaultdict(list) + for data in item_dict: + for key, value in data.items(): + batches[key].append(value) + + if self.is_dpo: + padding_side = "right" + else: + padding_side = "left" + for key, data in batches.items(): + if key.endswith("input_ids"): + batches[key] = zero_pad_sequences(data, side=padding_side, value=self.tokenizer.pad_token_id) + elif key.endswith("mask"): + batches[key] = zero_pad_sequences(data, side=padding_side) + return batches diff --git a/examples/huggingface/entry/train_dpo.py b/examples/huggingface/entry/train_dpo.py new file mode 100644 index 00000000..e4806b94 --- /dev/null +++ b/examples/huggingface/entry/train_dpo.py @@ -0,0 +1,44 @@ +# Copyright 2024 Alibaba Group Holding Limited. All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# ============================================================================== +"""entry file for training dpo""" + +import random + +from examples.huggingface.models.dpo.policy_trainer import PolicyTrainer +from examples.huggingface.models.dpo.reference_model import ReferenceModel + +import chatlearn +from chatlearn.models.deepspeed.deepspeed_utils import get_tokenizer +from chatlearn import DPOEngine +from models.utils import blending_datasets + + +if __name__ == "__main__": + chatlearn.init() + args = chatlearn.get_args() + reference = ReferenceModel("reference") + policy = PolicyTrainer("policy_trainer") + + engine = DPOEngine(reference, policy) + + # prepare datasets + prompts_data = blending_datasets( + reference.model_args['reward_data'], + reference.model_args['reward_data_probs'], + reference.model_args['seed'], + return_eval=False, + ) + engine.set_dataset(prompts_data) + engine.learn() diff --git a/examples/huggingface/models/dpo/__init__.py b/examples/huggingface/models/dpo/__init__.py new file mode 100644 index 00000000..e69de29b diff --git a/examples/huggingface/models/dpo/policy_trainer.py b/examples/huggingface/models/dpo/policy_trainer.py new file mode 100644 index 00000000..372fd1c5 --- /dev/null +++ b/examples/huggingface/models/dpo/policy_trainer.py @@ -0,0 +1,99 @@ +from typing import Tuple +import torch +from chatlearn import DeepSpeedModule +import torch.nn as nn +from .utils import DPOModel +import torch.nn.functional as F + +class DPOLoss(nn.Module): + """ + DPO Loss + """ + + def __init__(self, beta: float, label_smoothing: float = 0.0, ipo: bool = False) -> None: + super().__init__() + self.beta = beta + self.label_smoothing = label_smoothing + self.ipo = ipo + + def forward( + self, + policy_chosen_logps: torch.Tensor, + policy_rejected_logps: torch.Tensor, + reference_chosen_logps: torch.Tensor, + reference_rejected_logps: torch.Tensor, + ) -> Tuple[torch.Tensor, torch.Tensor, torch.Tensor]: + pi_logratios = policy_chosen_logps - policy_rejected_logps + ref_logratios = reference_chosen_logps - reference_rejected_logps + logits = pi_logratios - ref_logratios + + if self.ipo: + losses = (logits - 1 / (2 * self.beta)) ** 2 # Eq. 17 of https://arxiv.org/pdf/2310.12036v2.pdf + else: + # Eq. 3 https://ericmitchell.ai/cdpo.pdf; label_smoothing=0 gives original DPO (Eq. 7 of https://arxiv.org/pdf/2305.18290.pdf) + losses = ( + -F.logsigmoid(self.beta * logits) * (1 - self.label_smoothing) + - F.logsigmoid(-self.beta * logits) * self.label_smoothing + ) + + loss = losses.mean() + chosen_rewards = self.beta * (policy_chosen_logps - reference_chosen_logps).detach() + rejected_rewards = self.beta * (policy_rejected_logps - reference_rejected_logps).detach() + + return loss, chosen_rewards, rejected_rewards + +class PolicyTrainer(DPOModel): + + def setup(self): + super().setup() + self.beta = 0.01 + self.label_smoothing = 0 + self.ipo = False + self.loss_fn = DPOLoss(self.beta, self.label_smoothing, self.ipo) + self.aux_loss = False + self.nll_loss = False + self.aux_loss_coef = 0 + self.nll_loss_coef = 0 + self.acc_mean = 0 + self.loss_mean = 0 + + def train_step(self, data_list, iteration): + self.model.train() # reset model state + for data in data_list: + chosen_ids = data["chosen_input_ids"].squeeze(1).cuda() + c_mask = data["chosen_attention_mask"].squeeze(1).cuda() + reject_ids = data["reject_attention_mask"].squeeze(1).cuda() + r_mask = data["reject_attention_mask"].squeeze(1).cuda() + reference_chosen_logps = data["reference_chosen_logps"].cuda() + reference_rejected_logps = data["reference_rejected_logps"].cuda() + prompt_id_lens = data["extra"] + + chosen_logps, rejected_logps, aux_loss, nll_loss = self.concatenated_forward( + chosen_ids, c_mask, reject_ids, r_mask, prompt_id_lens + ) + preference_loss, chosen_reward, reject_reward = self.loss_fn( + chosen_logps, rejected_logps, reference_chosen_logps, reference_rejected_logps + ) + if not self.aux_loss: + aux_loss = 0 + if not self.nll_loss: + nll_loss = 0 + + loss = preference_loss + aux_loss * self.aux_loss_coef + nll_loss * self.nll_loss_coef + self.model.backward(loss) + self.model.step() + acc = (chosen_reward > reject_reward).float().mean().item() + self.acc_mean = self.acc_mean * 0.9 + 0.1 * acc + self.loss_mean = self.loss_mean * 0.9 + 0.1 * preference_loss.item() + # dpo logs + logs_dict = { + "loss": preference_loss.item(), + "acc": acc, + "chosen_reward": chosen_reward.mean().item(), + "reject_reward": reject_reward.mean().item(), + "loss_mean": self.loss_mean, + "acc_mean": self.acc_mean, + } + if self.nll_loss: + logs_dict["nll_loss"] = nll_loss.item() + self._logger.info('\t'.join(f"{key}: {value}" for key, value in logs_dict.items())) diff --git a/examples/huggingface/models/dpo/reference_model.py b/examples/huggingface/models/dpo/reference_model.py new file mode 100644 index 00000000..2568112e --- /dev/null +++ b/examples/huggingface/models/dpo/reference_model.py @@ -0,0 +1,27 @@ +import torch +from typing import Optional + +from chatlearn import DeepSpeedModule +import torch.nn.functional as F +from data.reward_dataset import RewardDataset +from .utils import DPOModel + + +class ReferenceModel(DPOModel): + + def forward_step(self, data, iteration=0): + chosen_ids = data["chosen_input_ids"].squeeze(1).cuda() + c_mask = data["chosen_attention_mask"].squeeze(1).cuda() + reject_ids = data["reject_attention_mask"].squeeze(1).cuda() + r_mask = data["reject_attention_mask"].squeeze(1).cuda() + prompt_id_lens = data["extra"] + with torch.no_grad(): + reference_chosen_logps, reference_rejected_logps, _, _ = self.concatenated_forward( + chosen_ids, c_mask, reject_ids, r_mask, prompt_id_lens + ) + data.update({"reference_chosen_logps": reference_chosen_logps, "reference_rejected_logps": reference_rejected_logps}) + return data + + def build_dataset(self, data, is_eval=False): + reward_dataset = RewardDataset(data, self.args, self.tokenizer, self.args.max_len, input_template=self.args.input_template, is_dpo=True) + return reward_dataset diff --git a/examples/huggingface/models/dpo/utils.py b/examples/huggingface/models/dpo/utils.py new file mode 100644 index 00000000..83323784 --- /dev/null +++ b/examples/huggingface/models/dpo/utils.py @@ -0,0 +1,119 @@ +import torch +from typing import Optional + +from chatlearn import DeepSpeedModule +import torch.nn.functional as F +from data.reward_dataset import RewardDataset + +def log_probs_from_logits(logits: torch.Tensor, labels: torch.Tensor) -> torch.Tensor: + log_probs = F.log_softmax(logits, dim=-1) + log_probs_labels = log_probs.gather(dim=-1, index=labels.unsqueeze(-1)) + return log_probs_labels.squeeze(-1) + +def concatenated_inputs(chosen_ids, c_mask, reject_ids, r_mask, prompt_id_lens, tokenizer): + """Concatenate the chosen and rejected inputs into a single tensor. + + Args: + batch: A batch of data. Must contain the keys 'chosen_input_ids' and 'rejected_input_ids', which are tensors of shape (batch_size, sequence_length). + + Returns: + A dictionary containing the concatenated inputs under the key 'concatenated_input_ids'. + """ + + def pad_to_length(tensor, length, pad_value, dim=-1): + if tensor.size(dim) >= length: + return tensor + else: + pad_size = list(tensor.shape) + pad_size[dim] = length - tensor.size(dim) + return torch.cat( + [tensor, pad_value * torch.ones(*pad_size, dtype=tensor.dtype, device=tensor.device)], dim=dim + ) + + max_length = max(chosen_ids.shape[1], reject_ids.shape[1]) + inputs_ids = torch.cat( + ( + pad_to_length(chosen_ids, max_length, tokenizer.pad_token_id), + pad_to_length(reject_ids, max_length, tokenizer.pad_token_id), + ), + dim=0, + ) + max_length = max(c_mask.shape[1], r_mask.shape[1]) + att_masks = torch.cat((pad_to_length(c_mask, max_length, 0), pad_to_length(r_mask, max_length, 0)), dim=0) + return inputs_ids, att_masks, prompt_id_lens * 2 + +def _get_batch_logps( + logits: torch.FloatTensor, + labels: torch.LongTensor, + attention_mask, + prompt_id_lens, + average_log_prob: bool = False, +) -> torch.FloatTensor: + """Compute the log probabilities of the given labels under the given logits. + + Args: + logits: Logits of the model (unnormalized). Shape: (batch_size, sequence_length, vocab_size) + labels: Labels for which to compute the log probabilities. Label tokens with a value of -100 are ignored. Shape: (batch_size, sequence_length) + average_log_prob: If True, return the average log probability per (non-masked) token. Otherwise, return the sum of the log probabilities of the (non-masked) tokens. + + Returns: + A tensor of shape (batch_size,) containing the average/sum log probabilities of the given labels under the given logits. + """ + assert average_log_prob == False + assert logits.shape[:-1] == labels.shape + + labels = labels[:, 1:].clone() + logits = logits[:, :-1, :] + + loss_masks = attention_mask.clone().bool() + # mask prompts + for mask, source_len in zip(loss_masks, prompt_id_lens): + mask[:source_len] = False + loss_masks = loss_masks[:, 1:] + + # dummy token; we'll ignore the losses on these tokens later + labels[loss_masks == False] = 0 + per_token_logps = torch.gather(logits.log_softmax(-1), dim=2, index=labels.unsqueeze(2)).squeeze(2) + + logprobs_sums = (per_token_logps * loss_masks).sum(-1) + logprobs_means = (per_token_logps * loss_masks).sum(-1) / loss_masks.sum(-1) + return logprobs_sums, logprobs_means + +class DPOModel(DeepSpeedModule): + + def forward( + self, + sequences: torch.LongTensor, + num_actions: int = None, + attention_mask: Optional[torch.Tensor] = None, + return_output=False, + ) -> torch.Tensor: + position_ids = attention_mask.long().cumsum(-1) - 1 + position_ids.masked_fill_(attention_mask == 0, 1) + output = self.model(sequences, attention_mask=attention_mask, position_ids=position_ids) + log_probs = log_probs_from_logits(output["logits"][:, :-1, :], sequences[:, 1:]) + + if return_output: + return output if num_actions is None else (log_probs[:, -num_actions:], output) + else: + return log_probs[:, -num_actions:] + + + def concatenated_forward( + self, chosen_ids, c_mask, reject_ids, r_mask, prompt_id_lens): + """Run the given model on the given batch of inputs, concatenating the chosen and rejected inputs together. + + We do this to avoid doing two forward passes, because it's faster for FSDP. + """ + input_ids, att_masks, prompt_id_lens = concatenated_inputs( + chosen_ids, c_mask, reject_ids, r_mask, prompt_id_lens, self.tokenizer + ) + output = self.forward(input_ids, attention_mask=att_masks, return_output=True) + all_logits = output["logits"] + all_logps_sum, all_logps_mean = _get_batch_logps( + all_logits, input_ids, att_masks, prompt_id_lens, average_log_prob=False + ) + chosen_logps = all_logps_sum[: chosen_ids.shape[0]] + rejected_logps = all_logps_sum[chosen_ids.shape[0] :] + aux_loss = output.aux_loss if "aux_loss" in output else [] + return chosen_logps, rejected_logps, aux_loss, -all_logps_mean[: chosen_ids.shape[0]].mean() diff --git a/examples/huggingface/models/utils.py b/examples/huggingface/models/utils.py new file mode 100644 index 00000000..bcb6aec4 --- /dev/null +++ b/examples/huggingface/models/utils.py @@ -0,0 +1,417 @@ +# Copyright 2024 Alibaba Group Holding Limited. All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# ============================================================================== +"""The code is modified from https://github.com/OpenRLHF/OpenRLHF""" + +from typing import Optional, Tuple +import os +from pathlib import Path + +import torch +from datasets import Dataset, interleave_datasets, load_dataset +from transformers import AutoConfig, AutoModel +import torch.nn as nn +from transformers.deepspeed import HfDeepSpeedConfig + +def blending_datasets( + datasets, + probabilities, + seed=42, + max_count=5000000, + return_eval=True, + stopping_strategy="first_exhausted", +): + datasets = datasets.split(",") + if len(datasets) > 1: + probabilities = list(map(float, probabilities.split(","))) + assert len(probabilities) == len(datasets) + else: + probabilities = [1.0] + + train_data_list = [] + eval_data_list = [] + for i, dataset in enumerate(datasets): + dataset = dataset.strip() + dataset_subfold_list = dataset.split("@") + if os.path.isdir(dataset): + data = load_dataset(dataset) + # local dir with python script or common local file + elif os.path.isdir(os.path.join(os.getcwd(), dataset)) or dataset.endswith( + (".json", ".jsonl", ".csv", ".parquet", ".txt") + ): + if dataset.endswith((".json", ".jsonl", ".csv", ".parquet", ".txt")): + files = dataset + data_type = os.path.splitext(files)[1][1:] + else: + path = Path(dataset) + script = [str(file.resolve()) for file in Path(path).rglob("*.py")] + extensions = ("*.json", "*.jsonl", "*.csv", "*.parquet", "*.txt") + files = [str(file) for ext in extensions for file in Path(path).rglob(ext)] + print(f"script: {script}") + print(f"files: {files}") + # For dir, follow python script or first file type + data_type = script[0] if len(script) == 1 else os.path.splitext(files[0])[1][1:] + # reformat data type + if data_type in ["json", "jsonl"]: + data_type = "json" + elif data_type == "txt": + data_type = "text" + elif data_type.endswith(".py"): + # load local dir with python script + files = None + if data_type.endswith(".py"): + print(f"load {dataset} with script {data_type}") + else: + print(f"load {files} from {dataset}") + data = load_dataset(data_type, data_files=files) + elif len(dataset_subfold_list) == 2: + dataset = dataset_subfold_list[0] + subfold = dataset_subfold_list[1] + data = load_dataset(dataset, data_dir=subfold.strip()) + elif len(dataset_subfold_list) == 1: + dataset = dataset_subfold_list[0] + data = load_dataset(dataset) + else: + raise Exception(f"Dataset Name {dataset}: Format error") + + if "train" in data: + train_data_list.append(data["train"].select(range(min(max_count, len(data["train"]))))) + else: + train_data_list.append(data.select(range(min(max_count, len(data))))) # train will contains eval? TODO + + if return_eval: + max_count01 = int(max_count * 0.1) + if "test" in data: + eval_data = data["test"].select(range(min(max_count01, len(data["test"])))) + elif "validation" in data: + eval_data = data["validation"].select(range(min(max_count01, len(data["validation"])))) + elif "train" in data: + eval_data = data["train"].select(range(min(max_count01, int(len(data["train"]) * 0.01)))) + else: + eval_data = data.select(range(min(int(max_count01), int(len(data) * 0.01)))) + eval_data_list.append(eval_data) + + train_dataset = interleave_datasets( + train_data_list, + probabilities=probabilities, + seed=seed, + stopping_strategy=stopping_strategy, + ) + if return_eval: + eval_dataset = interleave_datasets( + eval_data_list, + probabilities=probabilities, + seed=seed, + stopping_strategy=stopping_strategy, + ) + return train_dataset, eval_dataset + else: + return train_dataset + + +# Construct transformer with a value head for sequence classification. +def get_llm_for_sequence_regression( + model_name_or_path: str, + model_type: str, + *, + bf16=True, + load_in_4bit=False, + lora_rank=0, + lora_alpha=16, + target_modules=None, + lora_dropout=0, + normalize_reward=False, + use_flash_attention_2=False, + ds_config: dict = None, + init_value_head: bool = False, + head_prefix="value_head", + device_map=None, + **kwargs, +) -> nn.Module: + """Get transformer with a sequence classification head on top (linear layer). + + Args: + model_name_or_path (str): Path to pretrained model. + model_type (str): Either "reward" or "critic. + bf16 (bool, optional): Whether enable bfloat16. Defaults to True. + normalize_reward (bool, optional): Whether normalize reward. Defaults to False. + use_flash_attention_2 (bool, optional): Whether use Flash Attention 2.0. Defaults to False. + ds_config (dict, optional): Deepspeed config, used to automatically splitting the model onto + multiple gpus during from_pretrained when ZeRO-3 enabled. Defaults to None. + + Returns: + nn.Module: pretrained transformer model. + """ + assert ( + model_type == "critic" or model_type == "reward" + ), f"invalid model_type: {model_type}, should be critic or reward." + # Note: dschf is defined in function scope to avoid global effects + # https://huggingface.co/docs/transformers/main_classes/deepspeed#nontrainer-deepspeed-integration + if ds_config is not None and ds_config["zero_optimization"]["stage"] == 3: + dschf = HfDeepSpeedConfig(ds_config) + else: + dschf = None + + config = AutoConfig.from_pretrained(model_name_or_path, trust_remote_code=True) + config.normalize_reward = normalize_reward + config._attn_implementation = "flash_attention_2" if use_flash_attention_2 else "eager" + + try: + base_class = AutoModel._model_mapping[type(config)] + base_pretrained_class = base_class.__base__ + if model_type == "reward": + cls_class = _get_reward_model(base_pretrained_class, base_class, head_prefix) + else: + cls_class = _get_critic_model(base_pretrained_class, base_class, head_prefix) + except Exception as e: + print("Failed to load from AutoModel, construct from modelling file.") + module_file, causal_model_name = config.auto_map["AutoModelForCausalLM"].split(".") + + # special case + if causal_model_name == "QWenLMHeadModel": + auto_model_name = "QWenModel" + pretrained_model_name = "QWenPreTrainedModel" + elif causal_model_name == "InternLMForCausalLM": + auto_model_name = "InternLMModel" + pretrained_model_name = "InternLMPreTrainedModel" + else: + if "AutoModel" not in config.auto_map: + auto_model_name = causal_model_name.split("For")[0] + "Model" + else: + auto_model_name = config.auto_map["AutoModel"].split(".")[1] + pretrained_model_name = causal_model_name.split("For")[0] + "PreTrainedModel" + + logger.info(f"BASE_MODEL_CLASS: {auto_model_name}, PRETRAINED_MODEL_CLASS: {pretrained_model_name}") + + base_pretrained_class = get_class_from_dynamic_module( + f"{module_file}.{pretrained_model_name}", model_name_or_path + ) + base_class = get_class_from_dynamic_module(f"{module_file}.{auto_model_name}", model_name_or_path) + if model_type == "reward": + cls_class = _get_reward_model(base_pretrained_class, base_class, head_prefix) + else: + cls_class = _get_critic_model(base_pretrained_class, base_class, head_prefix) + + model = cls_class.from_pretrained( + model_name_or_path, + config=config, + trust_remote_code=True, + torch_dtype=torch.bfloat16 if bf16 else "auto", + device_map=device_map, + **kwargs, + ) + + # MoE - balancing loss + model_config = model.config.to_dict() + if "output_router_logits" in model_config: + print("[MoE] set output_router_logits as True") + model.config.output_router_logits = True + + # https://github.com/huggingface/transformers/issues/26877 + model.config.use_cache = False + + # NOTE: For reward model training only, intialize value_head manually + # because deepspeed.zero.Init() will not intialize them. + # TODO: Find a better way to clarify reward model training. + if init_value_head: + if dschf is not None: + logger.info("initialize value_head for ZeRO-3 reward model training.") + with deepspeed.zero.GatheredParameters([model.value_head.weight], modifier_rank=0): + if torch.distributed.get_rank() == 0: + model.value_head.weight.data.normal_(mean=0.0, std=1 / (config.hidden_size + 1)) + else: + model.value_head.weight.data.normal_(mean=0.0, std=1 / (config.hidden_size + 1)) + + return model + + +def _get_reward_model(base_pretrained_model, base_llm_model, head_prefix="value_head"): + class RewardModel(base_pretrained_model): + supports_gradient_checkpointing = True + + def __init__(self, config: AutoConfig): + super().__init__(config) + setattr(self, self.base_model_prefix, base_llm_model(config)) + + self.head_prefix = head_prefix + setattr(self, head_prefix, nn.Linear(config.hidden_size, 1, bias=False)) + + # mean std + self.normalize_reward = config.normalize_reward + self.register_buffer("mean", torch.zeros(1), persistent=False) + self.register_buffer("std", torch.ones(1), persistent=False) + + # load mean/std from config.json + if hasattr(config, "mean"): + self.mean[0] = config.mean + self.std[0] = config.std + + def forward( + self, + input_ids: torch.LongTensor = None, + attention_mask: Optional[torch.Tensor] = None, + return_output=False, + ) -> torch.Tensor: + position_ids = attention_mask.long().cumsum(-1) - 1 + position_ids.masked_fill_(attention_mask == 0, 1) + outputs = getattr(self, self.base_model_prefix)( + input_ids, attention_mask=attention_mask, position_ids=position_ids + ) + last_hidden_states = outputs["last_hidden_state"] + values = getattr(self, self.head_prefix)(last_hidden_states).squeeze(-1) + + # left padding in training mode + if self.training: + reward = values[:, -1] + else: + eos_indices = attention_mask.size(1) - 1 - attention_mask.long().fliplr().argmax(dim=1, keepdim=True) + reward = values.gather(dim=1, index=eos_indices).squeeze(1) + + # normalize reward in eval mode + if self.normalize_reward: + reward = (reward - self.mean) / self.std + if return_output: + return reward, outputs + else: + return reward + + return RewardModel + + +def _get_critic_model(base_pretrained_model, base_llm_model, head_prefix="value_head"): + class CriticModel(base_pretrained_model): + supports_gradient_checkpointing = True + + def __init__(self, config: AutoConfig): + super().__init__(config) + setattr(self, self.base_model_prefix, base_llm_model(config)) + + self.head_prefix = head_prefix + setattr(self, head_prefix, nn.Linear(config.hidden_size, 1, bias=False)) + + # mean std + self.normalize_reward = config.normalize_reward + self.register_buffer("mean", torch.zeros(1), persistent=False) + self.register_buffer("std", torch.ones(1), persistent=False) + + # load mean/std from config.json + if hasattr(config, "mean"): + self.mean[0] = config.mean + self.std[0] = config.std + + def forward( + self, + input_ids: torch.LongTensor = None, + action_mask: Optional[torch.Tensor] = None, + attention_mask: Optional[torch.Tensor] = None, + return_output=False, + ) -> torch.Tensor: + position_ids = attention_mask.long().cumsum(-1) - 1 + position_ids.masked_fill_(attention_mask == 0, 1) + outputs = getattr(self, self.base_model_prefix)( + input_ids, attention_mask=attention_mask, position_ids=position_ids + ) + last_hidden_states = outputs["last_hidden_state"] + values = getattr(self, self.head_prefix)(last_hidden_states).squeeze(-1)[:, :-1] + num_actions = action_mask.size(1) + + # normalize reward + if self.normalize_reward: + values = (values - self.mean) / self.std + + if return_output: + return outputs if num_actions is None else (values[:, -num_actions:], outputs) + else: + return values[:, -num_actions:] + + return CriticModel + +def masked_mean(tensor: torch.Tensor, mask: torch.Tensor, dim: int = None) -> torch.Tensor: + if dim is not None: + return (tensor * mask).sum(axis=dim) / mask.sum(axis=dim) + else: + return (tensor * mask).sum() / mask.sum() + +class ValueLoss(nn.Module): + """ + Value Loss for PPO + """ + + def __init__(self, clip_eps: float = None) -> None: + super().__init__() + self.clip_eps = clip_eps + + def forward( + self, + values: torch.Tensor, + old_values: torch.Tensor, + returns: torch.Tensor, + action_mask: Optional[torch.Tensor] = None, + ) -> torch.Tensor: + if self.clip_eps is not None: + values_clipped = old_values + (values - old_values).clamp(-self.clip_eps, self.clip_eps) + surr1 = (values_clipped - returns) ** 2 + surr2 = (values - returns) ** 2 + loss = torch.max(surr1, surr2) + else: + loss = (values - returns) ** 2 + + loss = masked_mean(loss, action_mask, dim=-1).mean() + return 0.5 * loss + +@torch.no_grad() +def get_advantages_and_returns( + values: torch.Tensor, + rewards: torch.Tensor, + action_mask: torch.Tensor, + gamma: float, + lambd: float, + ) -> Tuple[torch.Tensor, torch.Tensor]: + """Function that computes advantages and returns from rewards and values. + Calculated as in the original PPO paper: https://arxiv.org/abs/1707.06347 + Note that rewards may include a KL divergence loss term. + + Advantages looks like this: + Adv1 = R1 + γ * λ * R2 + γ^2 * λ^2 * R3 + ... + - V1 + γ * (1 - λ) V2 + γ^2 * λ * (1 - λ) V3 + ... + + Returns looks like this: + Ret1 = R1 + γ * λ * R2 + γ^2 * λ^2 * R3 + ... + + γ * (1 - λ) V2 + γ^2 * λ * (1 - λ) V3 + ... + + Input: + - values: Tensor of shape (batch_size, response_size) + - rewards: Tensor of shape (batch_size, response_size) + + Output: + - advantages: Tensor of shape (batch_size, response_size) + - returns: Tensor of shape (batch_size, response_size) + """ + lastgaelam = 0 + advantages_reversed = [] + response_length = rewards.size(1) + + # Mask invalid responses + values = action_mask * values + rewards = action_mask * rewards + + for t in reversed(range(response_length)): + nextvalues = values[:, t + 1] if t < response_length - 1 else 0.0 + delta = rewards[:, t] + gamma * nextvalues - values[:, t] + lastgaelam = delta + gamma * lambd * lastgaelam + advantages_reversed.append(lastgaelam) + advantages = torch.stack(advantages_reversed[::-1], dim=1) + returns = advantages + values + return advantages.detach(), returns + diff --git a/examples/megatron/step3_rlhf/run_scripts/base_env.sh b/examples/huggingface/scripts/base_env.sh similarity index 58% rename from examples/megatron/step3_rlhf/run_scripts/base_env.sh rename to examples/huggingface/scripts/base_env.sh index 8eb12ef5..9eceb622 100644 --- a/examples/megatron/step3_rlhf/run_scripts/base_env.sh +++ b/examples/huggingface/scripts/base_env.sh @@ -1,3 +1,5 @@ +#!/bin/bash + ray stop export CUDA_DEVICE_MAX_CONNECTIONS=1 @@ -8,34 +10,27 @@ export NCCL_DEBUG=WARN [ -z "$GPUS_PER_NODE" ] && export GPUS_PER_NODE=8 [ -z "$RANK" ] && export RANK=0 if [ -z "${CUSTOM_PORTS}" ]; then - ports="30000" - for i in $(seq 30001 30050); do + set +x + ports="30010" + for i in $(seq 30011 30050); do ports="${ports};${i}" done + set -x export CUSTOM_PORTS=$ports [ -z "$LOCAL_MASTER_ADDR" ] && export LOCAL_MASTER_ADDR=$MASTER_ADDR echo LOCAL_MASTER_ADDR=$MASTER_ADDR fi - -if [ -z "${MEGATRON}" ]; then - echo "please set Megatron path" -fi if [ -z "$CHATLEARN" ]; then - echo "please set CHATLEARN path" + echo "please set CHATLEARN path" + exit 1 fi if [ -z "$DATASET_PATH" ]; then - echo "please set DATASET_PATH" + echo "please set DATASET_PATH" + exit 1 fi - rm core* -rm ${MEGATRON}/megatron/fused_kernels/${build_path}/lock - -export PYTHONPATH=${MEGATRON}:${CHATLEARN}:${CHATLEARN}/examples/megatron:${PYTHONPATH} - -echo set PYTHONPATH $PYTHONPATH - -cd ${CHATLEARN}/examples/megatron/step3_rlhf +export PYTHONPATH=${CHATLEARN}:${CHATLEARN}/examples/huggingface:${PYTHONPATH} export num_device=$(($WORLD_SIZE * $GPUS_PER_NODE)) diff --git a/examples/huggingface/scripts/train_dpo_qwen.sh b/examples/huggingface/scripts/train_dpo_qwen.sh new file mode 100644 index 00000000..fb9e66ec --- /dev/null +++ b/examples/huggingface/scripts/train_dpo_qwen.sh @@ -0,0 +1,24 @@ +#!/bin/bash +set -x + +[ -z "$CHATLEARN" ] && export CHATLEARN=${ROOT}/rlhf +[ -z "$ZERO_SIZE" ] && export ZERO_SIZE=8 +[ -z "$LOAD" ] && export LOAD=path-to-ckpt +[ -z "$DATASET_PATH" ] && export DATASET_PATH=path-to-dataset-json +[ -z "$exp_name" ] && export exp_name=$(date +%F)-dpo +[ -z "$output_dir" ] && export output_dir=${CHATLEARN}/output/ + +output_dir=${output_dir}/${exp_name} +mkdir -p $output_dir/ +log_file=$output_dir/log_${RANK}.log + +cd $CHATLEARN/examples/huggingface/ + +source scripts/base_env.sh + +generation_batch_size=4 \ +num_device=$ZERO_SIZE \ +zero_size=$ZERO_SIZE \ +reward_dataset_path=$DATASET_PATH \ +model_path=$LOAD \ +python entry/train_dpo.py -c configs/qwen2/dpo.yaml 2>&1 | tee ${log_file}.log ; exit ${PIPESTATUS[0]} diff --git a/examples/megatron/step3_rlhf/configs/gpt/base.yaml b/examples/megatron/configs/gpt/base.yaml similarity index 85% rename from examples/megatron/step3_rlhf/configs/gpt/base.yaml rename to examples/megatron/configs/gpt/base.yaml index 9890e590..ab94a153 100644 --- a/examples/megatron/step3_rlhf/configs/gpt/base.yaml +++ b/examples/megatron/configs/gpt/base.yaml @@ -6,17 +6,12 @@ bf16: True seq_length: ${max_seq_len} fix_kl_coef: ${fix_kl_coef:True} -log_dir: ${log_dir} - -exp_name: ${exp_name:test} -tensorboard_dir: ${tensorboard_dir} loss_on_prompts: ${loss_on_prompts:False} numerical_stable: ${policy_numerical_stable:False} build_path: ${build_path:build} - init_kl_coef: 0.02 target: 6 horizon: 10000 @@ -36,6 +31,7 @@ lm_coef: 1 clipped_value_only: True use_flash_attn: True num_inference_per_prompt: ${num_inference_per_prompt:1} +train_to_compare_num_responses: ${train_to_compare_num_responses:1} max_tokens_to_oom: 9999999999 log_entropy: False log_interval: 0 @@ -49,3 +45,7 @@ attention_dropout: 0 hidden_dropout: 0 distributed_timeout_minutes: 30 adaptive_parallel_strategy_on_checkpoint: True + +trainer_engine: ${trainer_engine:rlhf} +attention_softmax_in_fp32: True +transformer_impl: local diff --git a/examples/megatron/step3_rlhf/configs/gpt/base_inference.yaml b/examples/megatron/configs/gpt/base_inference.yaml similarity index 100% rename from examples/megatron/step3_rlhf/configs/gpt/base_inference.yaml rename to examples/megatron/configs/gpt/base_inference.yaml diff --git a/examples/megatron/step3_rlhf/configs/gpt/base_train.yaml b/examples/megatron/configs/gpt/base_train.yaml similarity index 89% rename from examples/megatron/step3_rlhf/configs/gpt/base_train.yaml rename to examples/megatron/configs/gpt/base_train.yaml index 142b5f4e..24d44938 100644 --- a/examples/megatron/step3_rlhf/configs/gpt/base_train.yaml +++ b/examples/megatron/configs/gpt/base_train.yaml @@ -7,4 +7,3 @@ clip_grad: 0.5 log_interval: 0 save_interval: 500 log_num_zeros_in_grad: True -save: ${save_dir} diff --git a/examples/megatron/step3_rlhf/configs/gpt/old_policy_inference.yaml b/examples/megatron/configs/gpt/old_policy_inference.yaml similarity index 86% rename from examples/megatron/step3_rlhf/configs/gpt/old_policy_inference.yaml rename to examples/megatron/configs/gpt/old_policy_inference.yaml index fbb1a0da..96677da6 100644 --- a/examples/megatron/step3_rlhf/configs/gpt/old_policy_inference.yaml +++ b/examples/megatron/configs/gpt/old_policy_inference.yaml @@ -12,4 +12,3 @@ eval_top_p: 0 pipeline_model_parallel_size: ${policy_pp:1} -use_attn_acc: ${use_attn_acc:False} diff --git a/examples/megatron/step3_rlhf/configs/gpt/old_value_inference.yaml b/examples/megatron/configs/gpt/old_value_inference.yaml similarity index 100% rename from examples/megatron/step3_rlhf/configs/gpt/old_value_inference.yaml rename to examples/megatron/configs/gpt/old_value_inference.yaml diff --git a/examples/megatron/step3_rlhf/configs/gpt/policy_shared.yaml b/examples/megatron/configs/gpt/policy_shared.yaml similarity index 100% rename from examples/megatron/step3_rlhf/configs/gpt/policy_shared.yaml rename to examples/megatron/configs/gpt/policy_shared.yaml diff --git a/examples/megatron/step3_rlhf/configs/gpt/ppo_policy.yaml b/examples/megatron/configs/gpt/ppo_policy.yaml similarity index 100% rename from examples/megatron/step3_rlhf/configs/gpt/ppo_policy.yaml rename to examples/megatron/configs/gpt/ppo_policy.yaml diff --git a/examples/megatron/step3_rlhf/configs/gpt/ppo_value.yaml b/examples/megatron/configs/gpt/ppo_value.yaml similarity index 100% rename from examples/megatron/step3_rlhf/configs/gpt/ppo_value.yaml rename to examples/megatron/configs/gpt/ppo_value.yaml diff --git a/examples/megatron/step3_rlhf/configs/gpt/reference.yaml b/examples/megatron/configs/gpt/reference.yaml similarity index 100% rename from examples/megatron/step3_rlhf/configs/gpt/reference.yaml rename to examples/megatron/configs/gpt/reference.yaml diff --git a/examples/megatron/step3_rlhf/configs/gpt/reward_inference.yaml b/examples/megatron/configs/gpt/reward_inference.yaml similarity index 60% rename from examples/megatron/step3_rlhf/configs/gpt/reward_inference.yaml rename to examples/megatron/configs/gpt/reward_inference.yaml index cecef649..298a0b45 100644 --- a/examples/megatron/step3_rlhf/configs/gpt/reward_inference.yaml +++ b/examples/megatron/configs/gpt/reward_inference.yaml @@ -3,10 +3,8 @@ includes: - reward_shared.yaml tokenizer_type: GPT2BPETokenizer -ppo_tokenizer_type: GPT2BPETokenizer -ppo_vocab_file: tokenizer.json reward_bias: 0 save_inference: False save_inference_interval: 100 -pipeline_model_parallel_size: ${reward_pp:1} \ No newline at end of file +pipeline_model_parallel_size: ${reward_pp:1} diff --git a/examples/megatron/step3_rlhf/configs/gpt/reward_shared.yaml b/examples/megatron/configs/gpt/reward_shared.yaml similarity index 84% rename from examples/megatron/step3_rlhf/configs/gpt/reward_shared.yaml rename to examples/megatron/configs/gpt/reward_shared.yaml index 565d5a03..744e34cc 100644 --- a/examples/megatron/step3_rlhf/configs/gpt/reward_shared.yaml +++ b/examples/megatron/configs/gpt/reward_shared.yaml @@ -3,7 +3,7 @@ load_iteration: ${reward_load_iteration} num_layers: ${reward_num_layers} hidden_size: ${reward_hidden_size} num_attention_heads: ${reward_num_attention_heads} -use_distributed_optimizer: ${reward_use_distributed_optimizer:True} +use_distributed_optimizer: ${reward_use_distributed_optimizer:False} tensor_model_parallel_size: ${reward_tp} pipeline_model_parallel_size: 1 seq_length: ${max_seq_len} diff --git a/examples/megatron/step3_rlhf/configs/gpt/rlhf.yaml b/examples/megatron/configs/gpt/rlhf.yaml similarity index 76% rename from examples/megatron/step3_rlhf/configs/gpt/rlhf.yaml rename to examples/megatron/configs/gpt/rlhf.yaml index 9fb1967f..f6e87f85 100644 --- a/examples/megatron/step3_rlhf/configs/gpt/rlhf.yaml +++ b/examples/megatron/configs/gpt/rlhf.yaml @@ -12,35 +12,39 @@ runtime_env: models: policy: model_config_file: old_policy_inference.yaml - num_device: ${num_device_policy} + num_gpu: ${num_gpu_policy} gpu_per_process: 1 trainable: False generation_batch_size: ${policy_generation_batch_size:180} batch_generation: ranking: ${batch_generation_ranking:False} min_prompt_length: ${batch_generation_min_prompt_length:0} - + free_memory: ${free_memory_policy:False} + reference: model_config_file: reference.yaml - num_device: ${num_device_ref} + num_gpu: ${num_gpu_ref} trainable: False generation_batch_size: ${ref_generation_bs:16} + free_memory: ${free_memory_reference:False} reward: model_config_file: reward_inference.yaml - num_device: ${num_device_reward} + num_gpu: ${num_gpu_reward} generation_batch_size: ${reward_generation_bs:16} trainable: False - + free_memory: ${free_memory_reward:False} + value: model_config_file: old_value_inference.yaml - num_device: ${num_device_value} + num_gpu: ${num_gpu_value} generation_batch_size: ${value_generation_bs:16} trainable: False - + free_memory: ${free_memory_value:False} + ppo_policy: model_config_file: ppo_policy.yaml - num_device: ${num_device_ppo_policy} + num_gpu: ${num_gpu_ppo_policy} trainable: True lora: enable_lora: ${enable_lora_policy:False} @@ -48,29 +52,27 @@ models: lora_layer: ColumnParallelLinear,LinearLayer,RowParallelLinear column_only_qkv: False lora_dropout: 0.05 - offload_optimizer_states: ${offload_optimizer_states:False} - + free_memory: ${free_memory_ppo_policy:False} ppo_value: model_config_file: ppo_value.yaml - num_device: ${num_device_ppo_value} + num_gpu: ${num_gpu_ppo_value} gpu_per_process: 1 trainable: True - lora: enable_lora: ${enable_lora_value:False} lora_dim: 64 lora_layer: ColumnParallelLinear,LinearLayer,RowParallelLinear column_only_qkv: False lora_dropout: 0.05 - offload_optimizer_states: ${offload_optimizer_states:False} + free_memory: ${free_memory_ppo_value:False} -rlhf: +runtime: colocation: - policy,reference,reward,value,ppo_policy,ppo_value generation_batch_size: ${generation_batch_size:16} train_micro_batch_size: ${train_micro_batch_size:16} train_global_batch_size: ${train_global_batch_size:64} - num_ppo_episode: ${num_ppo_episode:5} + num_episode: ${num_episode:5} sample_per_episode: ${sample_per_episode:1024} num_training_epoch: 1 save_episode_interval: ${save_episode_interval:1000} @@ -80,6 +82,9 @@ rlhf: eval_episode_interval: ${eval_episode_interval:0} mini_data: ${mini_data:/path/to/mini.txt} eval_data_path: ${eval_data_path:/path/to/eval_data} - eval_output_dir: ${eval_output_dir} + data_checkpoint_path: ${data_checkpoint_path} + output_dir: ${output_dir} eval_data_num_limit: 20 nsys: False + free_sync_collective_group: ${free_sync_collective_group:False} + exp_name: ${exp_name:chatlearn} diff --git a/examples/megatron/step3_rlhf/configs/gpt/test_policy.yaml b/examples/megatron/configs/gpt/test_policy.yaml similarity index 85% rename from examples/megatron/step3_rlhf/configs/gpt/test_policy.yaml rename to examples/megatron/configs/gpt/test_policy.yaml index f6ad3c1d..30e170ae 100644 --- a/examples/megatron/step3_rlhf/configs/gpt/test_policy.yaml +++ b/examples/megatron/configs/gpt/test_policy.yaml @@ -8,15 +8,15 @@ runtime_env: models: policy: model_config_file: old_policy_inference.yaml - num_device: ${num_device:8} + num_gpu: ${num_gpu:8} trainable: False batch_generation: ranking: ${batch_generation_ranking:False} min_prompt_length: ${batch_generation_min_prompt_length:0} -rlhf: +runtime: generation_batch_size: ${generation_batch_size:4} query_key: ${query_key:query} data_path: ${data_path:/path/to/data} eval_data_path: ${eval_data_path:/path/to/eval_data} - eval_output_dir: ${eval_output_dir:/path/to/eval_dir} + output_dir: ${output_dir:/path/to/output_dir} profiler_dir: ${profiler_dir} diff --git a/examples/megatron/step3_rlhf/configs/llama2/base.yaml b/examples/megatron/configs/llama2/base.yaml similarity index 77% rename from examples/megatron/step3_rlhf/configs/llama2/base.yaml rename to examples/megatron/configs/llama2/base.yaml index c50680d4..176492e0 100644 --- a/examples/megatron/step3_rlhf/configs/llama2/base.yaml +++ b/examples/megatron/configs/llama2/base.yaml @@ -11,17 +11,23 @@ use_checkpoint_args: False group_query_attention: ${group_query_attention:False} add_bias_linear: False swiglu: True - +attention_softmax_in_fp32: True +transformer_impl: local bf16: True +trainer_engine: ${trainer_engine:rlhf} +init_shuffle_prompts: ${init_shuffle_prompts:0} +# dpo loss +use_ipo: ${use_ipo:False} +dpo_weight: ${dpo_weight:0.1} + +train_to_compare_num_responses: ${train_to_compare_num_responses:1} +num_inference_per_prompt: ${num_inference_per_prompt:1} tokenizer_model: ${tokenizer_model} max_position_embeddings: ${max_position_embedding:4096} -seq_length: 1024 +seq_length: ${seq_length:1024} fix_kl_coef: ${fix_kl_coef:True} -log_dir: ${log_dir} -exp_name: ${exp_name:test} -tensorboard_dir: ${tensorboard_dir} loss_on_prompts: ${loss_on_prompts:False} numerical_stable: True @@ -39,7 +45,7 @@ scale_reward: "None" cliprange_reward: 100 -max_new_tokens: 512 +max_new_tokens: ${max_new_tokens:512} ngram_coef: ${ngram_coef:1} @@ -48,11 +54,9 @@ math_coef: ${math_coef:0} raw_reward_coeff: ${raw_reward_coeff:1} clipped_value_only: ${clipped_value_only:1} -num_inference_per_prompt: ${num_inference_per_prompt:1} finetune: True -save: ${save_dir} save_interval: 1000 gradient_accumulation_fusion: 0 max_tokens_to_oom: 99999999 @@ -63,6 +67,6 @@ use_flash_attn: 1 do_math_eval: 0 log_entropy: False adaptive_parallel_strategy_on_checkpoint: True -log_interval: 1 +log_interval: ${log_interval:10} distributed_timeout_minutes: 30 make_vocab_size_divisible_by: 32 diff --git a/examples/megatron/step3_rlhf/configs/llama2/base_inference.yaml b/examples/megatron/configs/llama2/base_inference.yaml similarity index 100% rename from examples/megatron/step3_rlhf/configs/llama2/base_inference.yaml rename to examples/megatron/configs/llama2/base_inference.yaml diff --git a/examples/megatron/step3_rlhf/configs/llama2/base_train.yaml b/examples/megatron/configs/llama2/base_train.yaml similarity index 80% rename from examples/megatron/step3_rlhf/configs/llama2/base_train.yaml rename to examples/megatron/configs/llama2/base_train.yaml index fda46278..6897a20f 100644 --- a/examples/megatron/step3_rlhf/configs/llama2/base_train.yaml +++ b/examples/megatron/configs/llama2/base_train.yaml @@ -5,7 +5,6 @@ includes: distributed_backend: nccl train_iters: 12000 -clip_grad: 0.5 -log_interval: 1 +clip_grad: ${clip_grad:0.5} log_num_zeros_in_grad: True sequence_parallel: True diff --git a/examples/megatron/configs/llama2/data.yaml b/examples/megatron/configs/llama2/data.yaml new file mode 100644 index 00000000..1ef5e597 --- /dev/null +++ b/examples/megatron/configs/llama2/data.yaml @@ -0,0 +1 @@ +prompt_key: ${prompt_key} \ No newline at end of file diff --git a/examples/megatron/configs/llama2/dpo.yaml b/examples/megatron/configs/llama2/dpo.yaml new file mode 100644 index 00000000..f6c751fe --- /dev/null +++ b/examples/megatron/configs/llama2/dpo.yaml @@ -0,0 +1,46 @@ +runtime_env: + platform: DLC + excludes: + - "*pt" + - "logs" + - "tensorboards" + - ".nfs*" + + +models: + reference: + model_config_file: reference.yaml + num_gpu: ${num_gpu_ref:16} + trainable: False + generation_batch_size: ${ref_generation_batch_size:4} + free_memory: ${free_memory_reference:False} + + ppo_policy: + model_config_file: ppo_policy.yaml + num_gpu: ${num_gpu_ppo_policy:16} + trainable: True + lora: + enable_lora: ${enable_lora_policy:False} + lora_dim: 64 + lora_layer: ColumnParallelLinear,LinearLayer,RowParallelLinear + column_only_qkv: False + lora_dropout: 0.05 + free_memory: ${free_memory_ppo_policy:False} + +runtime: + colocation: + - ppo_policy,reference + train_micro_batch_size: ${train_micro_batch_size:2} + train_global_batch_size: ${train_global_batch_size:512} + num_episode: ${num_episode:100} + sample_per_episode: ${sample_per_episode:1024} + num_training_epoch: 1 + save_episode_interval: ${save_episode_interval:100} + data_path: ${data_path} + training_data_num_limit: ${training_data_num_limit:-1} + eval_data_num_limit: ${eval_data_num_limit:128} + eval_episode_interval: ${eval_episode_interval:100} + data_checkpoint_path: ${data_checkpoint_path} + output_dir: ${output_dir} + free_sync_collective_group: ${free_sync_collective_group:False} + exp_name: ${exp_name:chatlearn} diff --git a/examples/megatron/configs/llama2/eval.yaml b/examples/megatron/configs/llama2/eval.yaml new file mode 100644 index 00000000..b95b327d --- /dev/null +++ b/examples/megatron/configs/llama2/eval.yaml @@ -0,0 +1,37 @@ +runtime_env: + platform: DLC + excludes: + - "*pt" + - "logs" + - "tensorboards" + - ".nfs*" + + +models: + policy: + model_config_file: old_policy_inference.yaml + num_gpu: ${num_gpu_policy:16} + trainable: False + batch_generation: + ranking: ${batch_generation_ranking:False} + min_prompt_length: ${batch_generation_min_prompt_length:0} + free_memory: ${free_memory_policy:False} + + reward: + model_config_file: reward_inference.yaml + num_gpu: ${num_gpu_reward:16} + trainable: False + free_memory: ${free_memory_reward:False} + +runtime: + colocation: + - policy,reward + generation_batch_size: ${generation_batch_size:4} + save_episode_interval: ${save_episode_interval:100} + eval_data_path: ${eval_data_path} + eval_data_num_limit: ${eval_data_num_limit:128} + eval_episode_interval: ${eval_episode_interval:100} + data_checkpoint_path: ${data_checkpoint_path} + output_dir: ${output_dir} + free_sync_collective_group: ${free_sync_collective_group:False} + exp_name: ${exp_name:chatlearn} diff --git a/examples/megatron/configs/llama2/eval_vllm.yaml b/examples/megatron/configs/llama2/eval_vllm.yaml new file mode 100644 index 00000000..7b18da4c --- /dev/null +++ b/examples/megatron/configs/llama2/eval_vllm.yaml @@ -0,0 +1,37 @@ +runtime_env: + platform: DLC + excludes: + - "*pt" + - "logs" + - "tensorboards" + - ".nfs*" + + +models: + policy: + model_config_file: vllm_policy_inference.yaml + num_gpu: ${num_gpu_policy:16} + trainable: False + batch_generation: + ranking: ${batch_generation_ranking:False} + min_prompt_length: ${batch_generation_min_prompt_length:0} + free_memory: ${free_memory_policy:False} + + reward: + model_config_file: reward_inference.yaml + num_gpu: ${num_gpu_reward:16} + trainable: False + free_memory: ${free_memory_reward:False} + +runtime: + colocation: + - policy,reward + generation_batch_size: ${generation_batch_size:4} + save_episode_interval: ${save_episode_interval:100} + eval_data_path: ${eval_data_path} + eval_data_num_limit: ${eval_data_num_limit:128} + eval_episode_interval: ${eval_episode_interval:100} + data_checkpoint_path: ${data_checkpoint_path} + output_dir: ${output_dir} + free_sync_collective_group: ${free_sync_collective_group:False} + exp_name: ${exp_name:chatlearn} diff --git a/examples/megatron/configs/llama2/grpo_math_vllm.yaml b/examples/megatron/configs/llama2/grpo_math_vllm.yaml new file mode 100644 index 00000000..322f11cd --- /dev/null +++ b/examples/megatron/configs/llama2/grpo_math_vllm.yaml @@ -0,0 +1,67 @@ +runtime_env: + platform: DLC + excludes: + - "*pt" + - "logs" + - "tensorboards" + - ".nfs*" + + +models: + policy: + model_config_file: vllm_policy_inference.yaml + num_gpu: ${num_gpu_policy:16} + trainable: False + batch_generation: + ranking: ${batch_generation_ranking:False} + min_prompt_length: ${batch_generation_min_prompt_length:0} + free_memory: ${free_memory_policy:False} + + reference: + model_config_file: reference.yaml + num_gpu: ${num_gpu_ref:16} + trainable: False + generation_batch_size: ${ref_generation_batch_size:4} + free_memory: ${free_memory_reference:False} + + reward: + model_config_file: reward_inference.yaml + num_gpu: ${num_gpu_reward:16} + trainable: False + free_memory: ${free_memory_reward:False} + + math_reward: + model_config_file: math_reward.yaml + num_cpu: ${num_cpu_math:2} + trainable: False + + ppo_policy: + model_config_file: ppo_policy.yaml + num_gpu: ${num_gpu_ppo_policy:16} + trainable: True + lora: + enable_lora: ${enable_lora_policy:False} + lora_dim: 64 + lora_layer: ColumnParallelLinear,LinearLayer,RowParallelLinear + column_only_qkv: False + lora_dropout: 0.05 + free_memory: ${free_memory_ppo_policy:False} + +runtime: + colocation: + - policy,ppo_policy,reward,reference + generation_batch_size: ${generation_batch_size:4} + train_micro_batch_size: ${train_micro_batch_size:2} + train_global_batch_size: ${train_global_batch_size:512} + num_episode: ${num_episode:100} + sample_per_episode: ${sample_per_episode:1024} + num_training_epoch: 1 + save_episode_interval: ${save_episode_interval:100} + data_path: ${data_path} + training_data_num_limit: ${training_data_num_limit:-1} + eval_data_num_limit: ${eval_data_num_limit:128} + eval_episode_interval: ${eval_episode_interval:100} + data_checkpoint_path: ${data_checkpoint_path} + output_dir: ${output_dir} + max_relay_episode: 1 + exp_name: ${exp_name:chatlearn} diff --git a/examples/megatron/configs/llama2/math_reward.yaml b/examples/megatron/configs/llama2/math_reward.yaml new file mode 100644 index 00000000..ba273418 --- /dev/null +++ b/examples/megatron/configs/llama2/math_reward.yaml @@ -0,0 +1,5 @@ +includes: + - base_inference.yaml + - reward_shared.yaml + +math_reward_strategy: merge diff --git a/examples/megatron/step3_rlhf/configs/llama2/old_policy_inference.yaml b/examples/megatron/configs/llama2/old_policy_inference.yaml similarity index 84% rename from examples/megatron/step3_rlhf/configs/llama2/old_policy_inference.yaml rename to examples/megatron/configs/llama2/old_policy_inference.yaml index bf5c76a2..d17eb4ab 100644 --- a/examples/megatron/step3_rlhf/configs/llama2/old_policy_inference.yaml +++ b/examples/megatron/configs/llama2/old_policy_inference.yaml @@ -8,6 +8,5 @@ top_k: ${policy_top_k:0} temperature: ${policy_temperature:1.0} eval_temperature: 0.01 -use_attn_acc: ${use_attn_acc:False} eval_top_k: 1 eval_top_p: 0 diff --git a/examples/megatron/step3_rlhf/configs/llama2/old_value_inference.yaml b/examples/megatron/configs/llama2/old_value_inference.yaml similarity index 100% rename from examples/megatron/step3_rlhf/configs/llama2/old_value_inference.yaml rename to examples/megatron/configs/llama2/old_value_inference.yaml diff --git a/examples/megatron/step3_rlhf/configs/bloom/rlhf.yaml b/examples/megatron/configs/llama2/online_dpo.yaml similarity index 62% rename from examples/megatron/step3_rlhf/configs/bloom/rlhf.yaml rename to examples/megatron/configs/llama2/online_dpo.yaml index bcd45a86..d818c10f 100644 --- a/examples/megatron/step3_rlhf/configs/bloom/rlhf.yaml +++ b/examples/megatron/configs/llama2/online_dpo.yaml @@ -10,31 +10,29 @@ runtime_env: models: policy: model_config_file: old_policy_inference.yaml - num_device: ${num_device:8} + num_gpu: ${num_gpu_policy:16} trainable: False batch_generation: ranking: ${batch_generation_ranking:False} min_prompt_length: ${batch_generation_min_prompt_length:0} + free_memory: ${free_memory_policy:False} reference: model_config_file: reference.yaml - num_device: ${num_device:8} + num_gpu: ${num_gpu_ref:16} trainable: False generation_batch_size: ${ref_generation_batch_size:4} + free_memory: ${free_memory_reference:False} reward: model_config_file: reward_inference.yaml - num_device: ${num_device:8} - trainable: False - - value: - model_config_file: old_value_inference.yaml - num_device: ${num_device:8} + num_gpu: ${num_gpu_reward:16} trainable: False + free_memory: ${free_memory_reward:False} ppo_policy: model_config_file: ppo_policy.yaml - num_device: ${num_device:8} + num_gpu: ${num_gpu_ppo_policy:16} trainable: True lora: enable_lora: ${enable_lora_policy:False} @@ -42,25 +40,15 @@ models: lora_layer: ColumnParallelLinear,LinearLayer,RowParallelLinear column_only_qkv: False lora_dropout: 0.05 + free_memory: ${free_memory_ppo_policy:False} - ppo_value: - model_config_file: ppo_value.yaml - num_device: ${num_device:8} - trainable: True - lora: - enable_lora: ${enable_lora_value:False} - lora_dim: 64 - lora_layer: ColumnParallelLinear,LinearLayer,RowParallelLinear - column_only_qkv: False - lora_dropout: 0.05 - -rlhf: +runtime: colocation: - - policy,ppo_policy,reward,reference,value,ppo_value - generation_batch_size: ${generation_batch_size:8} - train_micro_batch_size: 2 + - policy,ppo_policy,reward,reference + generation_batch_size: ${generation_batch_size:4} + train_micro_batch_size: ${train_micro_batch_size:2} train_global_batch_size: ${train_global_batch_size:512} - num_ppo_episode: 5000 + num_episode: ${num_episode:100} sample_per_episode: ${sample_per_episode:1024} num_training_epoch: 1 save_episode_interval: ${save_episode_interval:100} @@ -69,4 +57,6 @@ rlhf: eval_data_num_limit: ${eval_data_num_limit:128} eval_episode_interval: ${eval_episode_interval:100} data_checkpoint_path: ${data_checkpoint_path} - eval_output_dir: ${eval_output_dir} + output_dir: ${output_dir} + free_sync_collective_group: ${free_sync_collective_group:False} + exp_name: ${exp_name:chatlearn} diff --git a/examples/megatron/configs/llama2/online_dpo_vllm.yaml b/examples/megatron/configs/llama2/online_dpo_vllm.yaml new file mode 100644 index 00000000..8e9a52f7 --- /dev/null +++ b/examples/megatron/configs/llama2/online_dpo_vllm.yaml @@ -0,0 +1,61 @@ +runtime_env: + platform: DLC + excludes: + - "*pt" + - "logs" + - "tensorboards" + - ".nfs*" + + +models: + policy: + model_config_file: vllm_policy_inference.yaml + num_gpu: ${num_gpu_policy:16} + trainable: False + batch_generation: + ranking: ${batch_generation_ranking:False} + min_prompt_length: ${batch_generation_min_prompt_length:0} + free_memory: ${free_memory_policy:False} + + reference: + model_config_file: reference.yaml + num_gpu: ${num_gpu_ref:16} + trainable: False + generation_batch_size: ${ref_generation_batch_size:4} + free_memory: ${free_memory_reference:False} + + reward: + model_config_file: reward_inference.yaml + num_gpu: ${num_gpu_reward:16} + trainable: False + free_memory: ${free_memory_reward:False} + + ppo_policy: + model_config_file: ppo_policy.yaml + num_gpu: ${num_gpu_ppo_policy:16} + trainable: True + lora: + enable_lora: ${enable_lora_policy:False} + lora_dim: 64 + lora_layer: ColumnParallelLinear,LinearLayer,RowParallelLinear + column_only_qkv: False + lora_dropout: 0.05 + free_memory: ${free_memory_ppo_policy:False} + +runtime: + colocation: + - policy,ppo_policy,reward,reference + generation_batch_size: ${generation_batch_size:4} + train_micro_batch_size: ${train_micro_batch_size:2} + train_global_batch_size: ${train_global_batch_size:512} + num_episode: ${num_episode:100} + sample_per_episode: ${sample_per_episode:1024} + num_training_epoch: 1 + save_episode_interval: ${save_episode_interval:100} + data_path: ${data_path} + training_data_num_limit: ${training_data_num_limit:-1} + eval_data_num_limit: ${eval_data_num_limit:128} + eval_episode_interval: ${eval_episode_interval:100} + data_checkpoint_path: ${data_checkpoint_path} + output_dir: ${output_dir} + exp_name: ${exp_name:chatlearn} diff --git a/examples/megatron/step3_rlhf/configs/llama2/policy_shared.yaml b/examples/megatron/configs/llama2/policy_shared.yaml similarity index 88% rename from examples/megatron/step3_rlhf/configs/llama2/policy_shared.yaml rename to examples/megatron/configs/llama2/policy_shared.yaml index e222e555..e5cb6df3 100644 --- a/examples/megatron/step3_rlhf/configs/llama2/policy_shared.yaml +++ b/examples/megatron/configs/llama2/policy_shared.yaml @@ -1,4 +1,5 @@ load: ${policy_inference_load} +load_iteration: ${policy_load_iteration} num_layers: ${policy_num_layers} hidden_size: ${policy_hidden_size} num_attention_heads: ${policy_num_attention_heads} diff --git a/examples/megatron/step3_rlhf/configs/llama2/ppo_policy.yaml b/examples/megatron/configs/llama2/ppo_policy.yaml similarity index 70% rename from examples/megatron/step3_rlhf/configs/llama2/ppo_policy.yaml rename to examples/megatron/configs/llama2/ppo_policy.yaml index 0e361422..32a72538 100644 --- a/examples/megatron/step3_rlhf/configs/llama2/ppo_policy.yaml +++ b/examples/megatron/configs/llama2/ppo_policy.yaml @@ -10,6 +10,12 @@ adam_beta2: 0.95 num_workers: 8 init_method_std: 0.006 +# dropout +attention_dropout: ${attention_dropout:0.1} +hidden_dropout: ${hidden_dropout:0.1} +retro_encoder_hidden_dropout: ${retro_encoder_hidden_dropout:0.1} +retro_encoder_attention_dropout: ${retro_encoder_attention_dropout:0.1} + recompute_granularity: selective log_num_zeros_in_grad: True @@ -26,7 +32,7 @@ min_lr: ${policy_min_lr:1e-9} lr_decay_style: ${policy_lr_decay_style:linear} weight_decay: 0.01 pipeline_model_parallel_size: ${ppo_policy_pp:1} -sequence_parallel: True +sequence_parallel: ${sequence_parallel:True} recompute_activations: ${policy_recompute_activations:False} recompute_granularity: ${policy_recompute_granularity:None} diff --git a/examples/megatron/step3_rlhf/configs/llama2/ppo_value.yaml b/examples/megatron/configs/llama2/ppo_value.yaml similarity index 88% rename from examples/megatron/step3_rlhf/configs/llama2/ppo_value.yaml rename to examples/megatron/configs/llama2/ppo_value.yaml index 4f2100db..f37153d6 100644 --- a/examples/megatron/step3_rlhf/configs/llama2/ppo_value.yaml +++ b/examples/megatron/configs/llama2/ppo_value.yaml @@ -9,7 +9,6 @@ lr: ${value_lr:5e-6} min_lr: ${value_min_lr:5e-7} lr_decay_style: ${value_lr_decay_style:linear} weight_decay: 0.01 -log_interval: 1 use_checkpoint_opt_param_scheduler: False adam_beta1: 0.9 @@ -26,4 +25,4 @@ no_load_scheduler: True sequence_parallel: True recompute_activations: ${value_recompute_activations:False} -recompute_granularity: ${value_recompute_granularity:None} \ No newline at end of file +recompute_granularity: ${value_recompute_granularity:None} diff --git a/examples/megatron/step3_rlhf/configs/llama2/reference.yaml b/examples/megatron/configs/llama2/reference.yaml similarity index 100% rename from examples/megatron/step3_rlhf/configs/llama2/reference.yaml rename to examples/megatron/configs/llama2/reference.yaml diff --git a/examples/megatron/step3_rlhf/configs/llama2/reward_inference.yaml b/examples/megatron/configs/llama2/reward_inference.yaml similarity index 100% rename from examples/megatron/step3_rlhf/configs/llama2/reward_inference.yaml rename to examples/megatron/configs/llama2/reward_inference.yaml diff --git a/examples/megatron/step3_rlhf/configs/llama2/reward_shared.yaml b/examples/megatron/configs/llama2/reward_shared.yaml similarity index 89% rename from examples/megatron/step3_rlhf/configs/llama2/reward_shared.yaml rename to examples/megatron/configs/llama2/reward_shared.yaml index 877516ce..50c110bc 100644 --- a/examples/megatron/step3_rlhf/configs/llama2/reward_shared.yaml +++ b/examples/megatron/configs/llama2/reward_shared.yaml @@ -10,5 +10,5 @@ ffn_hidden_size: ${reward_ffn_hidden_size} tensor_model_parallel_size: ${reward_tp:8} num_query_groups: ${reward_num_query_groups} -save_inference: True -save_inference_interval: 10 \ No newline at end of file +save_inference: False +save_inference_interval: 10 diff --git a/examples/megatron/step3_rlhf/configs/llama2/rlhf.yaml b/examples/megatron/configs/llama2/rlhf.yaml similarity index 71% rename from examples/megatron/step3_rlhf/configs/llama2/rlhf.yaml rename to examples/megatron/configs/llama2/rlhf.yaml index 1d172962..5a693881 100644 --- a/examples/megatron/step3_rlhf/configs/llama2/rlhf.yaml +++ b/examples/megatron/configs/llama2/rlhf.yaml @@ -10,31 +10,35 @@ runtime_env: models: policy: model_config_file: old_policy_inference.yaml - num_device: ${num_device_policy:16} + num_gpu: ${num_gpu_policy:16} trainable: False batch_generation: ranking: ${batch_generation_ranking:False} min_prompt_length: ${batch_generation_min_prompt_length:0} + free_memory: ${free_memory_policy:False} reference: model_config_file: reference.yaml - num_device: ${num_device_ref:16} + num_gpu: ${num_gpu_ref:16} trainable: False generation_batch_size: ${ref_generation_batch_size:4} + free_memory: ${free_memory_reference:False} reward: model_config_file: reward_inference.yaml - num_device: ${num_device_reward:16} + num_gpu: ${num_gpu_reward:16} trainable: False + free_memory: ${free_memory_reward:False} value: model_config_file: old_value_inference.yaml - num_device: ${num_device_value:16} + num_gpu: ${num_gpu_value:16} trainable: False + free_memory: ${free_memory_value:False} ppo_policy: model_config_file: ppo_policy.yaml - num_device: ${num_device_ppo_policy:16} + num_gpu: ${num_gpu_ppo_policy:16} trainable: True lora: enable_lora: ${enable_lora_policy:False} @@ -42,11 +46,11 @@ models: lora_layer: ColumnParallelLinear,LinearLayer,RowParallelLinear column_only_qkv: False lora_dropout: 0.05 - offload_optimizer_states: ${offload_optimizer_states:False} + free_memory: ${free_memory_ppo_policy:False} ppo_value: model_config_file: ppo_value.yaml - num_device: ${num_device_ppo_value:16} + num_gpu: ${num_gpu_ppo_value:16} trainable: True lora: enable_lora: ${enable_lora_value:False} @@ -54,21 +58,24 @@ models: lora_layer: ColumnParallelLinear,LinearLayer,RowParallelLinear column_only_qkv: False lora_dropout: 0.05 - offload_optimizer_states: ${offload_optimizer_states:False} + free_memory: ${free_memory_ppo_value:False} -rlhf: +runtime: colocation: - policy,ppo_policy,reward,reference,value,ppo_value generation_batch_size: ${generation_batch_size:4} train_micro_batch_size: ${train_micro_batch_size:2} train_global_batch_size: ${train_global_batch_size:512} - num_ppo_episode: ${num_ppo_episode:5000} + num_episode: ${num_episode:100} sample_per_episode: ${sample_per_episode:1024} num_training_epoch: 1 save_episode_interval: ${save_episode_interval:100} data_path: ${data_path} + eval_data_path: ${eval_data_path} training_data_num_limit: ${training_data_num_limit:-1} eval_data_num_limit: ${eval_data_num_limit:128} eval_episode_interval: ${eval_episode_interval:100} data_checkpoint_path: ${data_checkpoint_path} - eval_output_dir: ${eval_output_dir} + free_sync_collective_group: ${free_sync_collective_group:False} + exp_name: ${exp_name:chatlearn} + output_dir: ${output_dir} diff --git a/examples/megatron/step3_rlhf/configs/bloom/test_policy.yaml b/examples/megatron/configs/llama2/test_policy.yaml similarity index 82% rename from examples/megatron/step3_rlhf/configs/bloom/test_policy.yaml rename to examples/megatron/configs/llama2/test_policy.yaml index 086d18ce..aa828c62 100644 --- a/examples/megatron/step3_rlhf/configs/bloom/test_policy.yaml +++ b/examples/megatron/configs/llama2/test_policy.yaml @@ -10,15 +10,16 @@ runtime_env: models: policy: model_config_file: old_policy_inference.yaml - num_device: ${num_device:1} + num_gpu: ${num_gpu:1} gpu_per_process: 1 trainable: False batch_generation: ranking: ${batch_generation_ranking:False} min_prompt_length: ${batch_generation_min_prompt_length:0} -rlhf: +runtime: generation_batch_size: ${generation_batch_size:4} data_path: ${data_path} eval_data_path: ${eval_data_path} - eval_output_dir: ${eval_output_dir} + output_dir: ${output_dir} + exp_name: ${exp_name:chatlearn} diff --git a/examples/megatron/step3_rlhf/configs/llama2/test_reward.yaml b/examples/megatron/configs/llama2/test_reward.yaml similarity index 70% rename from examples/megatron/step3_rlhf/configs/llama2/test_reward.yaml rename to examples/megatron/configs/llama2/test_reward.yaml index 5a7ca532..f8aeea25 100644 --- a/examples/megatron/step3_rlhf/configs/llama2/test_reward.yaml +++ b/examples/megatron/configs/llama2/test_reward.yaml @@ -10,9 +10,10 @@ runtime_env: models: reward: model_config_file: reward_inference.yaml - num_device: ${reward_device:1} + num_gpu: ${reward_device:1} gpu_per_process: 1 trainable: False -rlhf: +runtime: generation_batch_size: ${generation_batch_size:4} - eval_data_path: ${eval_data_path} \ No newline at end of file + eval_data_path: ${eval_data_path} + exp_name: ${exp_name:chatlearn} \ No newline at end of file diff --git a/examples/megatron/step3_rlhf/configs/llama2/test_vllm_policy.yaml b/examples/megatron/configs/llama2/test_vllm_policy.yaml similarity index 82% rename from examples/megatron/step3_rlhf/configs/llama2/test_vllm_policy.yaml rename to examples/megatron/configs/llama2/test_vllm_policy.yaml index 58581cfc..f492b726 100644 --- a/examples/megatron/step3_rlhf/configs/llama2/test_vllm_policy.yaml +++ b/examples/megatron/configs/llama2/test_vllm_policy.yaml @@ -10,7 +10,7 @@ runtime_env: models: policy: model_config_file: vllm_policy_inference.yaml - num_device: ${num_device:1} + num_gpu: ${num_gpu:1} gpu_per_process: 1 trainable: False batch_generation: @@ -18,8 +18,9 @@ models: min_prompt_length: ${batch_generation_min_prompt_length:0} -rlhf: +runtime: generation_batch_size: ${generation_batch_size:4} data_path: ${data_path} eval_data_path: ${eval_data_path} - eval_output_dir: ${eval_output_dir} + output_dir: ${output_dir} + exp_name: ${exp_name:chatlearn} diff --git a/examples/megatron/configs/llama2/vllm_policy_inference.yaml b/examples/megatron/configs/llama2/vllm_policy_inference.yaml new file mode 100644 index 00000000..99d7800d --- /dev/null +++ b/examples/megatron/configs/llama2/vllm_policy_inference.yaml @@ -0,0 +1,36 @@ +includes: + - base_inference.yaml + - policy_shared.yaml + - data.yaml + + +# sampling params +n: 1 +ignore_eos: ${policy_ignore_eos:False} +top_p: ${policy_top_p:0.9} +top_k: ${policy_top_k:-1} +temperature: ${policy_temperature:1.0} +use_beam_search: ${policy_use_beam_search:False} + +eval_temperature: ${eval_temperature:1.0} +eval_top_k: ${eval_top_k:-1} +eval_top_p: ${eval_top_p:0.9} + + +# sample config +# stop_token_list: stop token string list, not token ids. +stop_token_list: ${stop_token_list:;
} +new_token_limit: ${new_token_limit:True} +prompt_logprobs: ${prompt_logprobs:None} + +# scheduler config +max_num_batched_tokens: ${max_num_batched_tokens:32768} +max_paddings: ${max_paddings:512} + +# cache config +block_size: ${cache_block_size:16} +gpu_memory_utilization: ${gpu_memory_utilization:0.5} +swap_space: ${swap_space:4} +sliding_window: ${sliding_window:None} + +tokenizer: ${tokenizer_load} diff --git a/examples/megatron/step3_rlhf/configs/llama2/vllm_rlhf.yaml b/examples/megatron/configs/llama2/vllm_rlhf.yaml similarity index 70% rename from examples/megatron/step3_rlhf/configs/llama2/vllm_rlhf.yaml rename to examples/megatron/configs/llama2/vllm_rlhf.yaml index 851e5dae..19b1bff4 100644 --- a/examples/megatron/step3_rlhf/configs/llama2/vllm_rlhf.yaml +++ b/examples/megatron/configs/llama2/vllm_rlhf.yaml @@ -10,31 +10,37 @@ runtime_env: models: policy: model_config_file: vllm_policy_inference.yaml - num_device: ${num_device_policy:16} + num_gpu: ${num_gpu_policy:16} trainable: False batch_generation: ranking: ${batch_generation_ranking:False} min_prompt_length: ${batch_generation_min_prompt_length:0} + free_memory: ${free_memory_policy:False} reference: model_config_file: reference.yaml - num_device: ${num_device_ref:16} + num_gpu: ${num_gpu_ref:16} trainable: False generation_batch_size: ${ref_generation_batch_size:4} + free_memory: ${free_memory_reference:False} reward: model_config_file: reward_inference.yaml - num_device: ${num_device_reward:16} + num_gpu: ${num_gpu_reward:16} trainable: False + generation_batch_size: ${reward_generation_batch_size:4} + free_memory: ${free_memory_reward:False} value: model_config_file: old_value_inference.yaml - num_device: ${num_device_value:16} + num_gpu: ${num_gpu_value:16} trainable: False + generation_batch_size: ${value_generation_batch_size:4} + free_memory: ${free_memory_value:False} ppo_policy: model_config_file: ppo_policy.yaml - num_device: ${num_device_ppo_policy:16} + num_gpu: ${num_gpu_ppo_policy:16} trainable: True lora: enable_lora: ${enable_lora_policy:False} @@ -42,10 +48,11 @@ models: lora_layer: ColumnParallelLinear,LinearLayer,RowParallelLinear column_only_qkv: False lora_dropout: 0.05 + free_memory: ${free_memory_ppo_policy:False} ppo_value: model_config_file: ppo_value.yaml - num_device: ${num_device_ppo_value:16} + num_gpu: ${num_gpu_ppo_value:16} trainable: True lora: enable_lora: ${enable_lora_value:False} @@ -53,20 +60,23 @@ models: lora_layer: ColumnParallelLinear,LinearLayer,RowParallelLinear column_only_qkv: False lora_dropout: 0.05 + free_memory: ${free_memory_ppo_value:False} -rlhf: +runtime: colocation: - policy,ppo_policy,reward,reference,value,ppo_value generation_batch_size: ${generation_batch_size:4} train_micro_batch_size: ${train_micro_batch_size:2} train_global_batch_size: ${train_global_batch_size:512} - num_ppo_episode: 5000 + num_episode: ${num_episode:100} sample_per_episode: ${sample_per_episode:1024} num_training_epoch: 1 save_episode_interval: ${save_episode_interval:100} data_path: ${data_path} + eval_data_path: ${eval_data_path} training_data_num_limit: ${training_data_num_limit:-1} eval_data_num_limit: ${eval_data_num_limit:128} eval_episode_interval: ${eval_episode_interval:100} data_checkpoint_path: ${data_checkpoint_path} - eval_output_dir: ${eval_output_dir} + output_dir: ${output_dir} + exp_name: ${exp_name:chatlearn} diff --git a/examples/megatron/data/__init__.py b/examples/megatron/data/__init__.py new file mode 100644 index 00000000..e69de29b diff --git a/examples/megatron/step3_rlhf/prepare_data.py b/examples/megatron/data/prepare_data_alignment.py similarity index 79% rename from examples/megatron/step3_rlhf/prepare_data.py rename to examples/megatron/data/prepare_data_alignment.py index 26571fe7..c83b735a 100644 --- a/examples/megatron/step3_rlhf/prepare_data.py +++ b/examples/megatron/data/prepare_data_alignment.py @@ -1,4 +1,4 @@ -# Copyright 2023 Alibaba Group Holding Limited. All Rights Reserved. +# Copyright 2024 Alibaba Group Holding Limited. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. @@ -12,7 +12,7 @@ # See the License for the specific language governing permissions and # limitations under the License. # ============================================================================== -"""prepare data for rlhf""" +"""prepare data for alignment""" import json import os @@ -23,20 +23,20 @@ while True: try: - rlhf_data = load_dataset('Dahoas/full-hh-rlhf') + alignment_data = load_dataset('Dahoas/full-hh-rlhf') break except Exception as e: print(e) continue -prefix = os.path.join(sys.argv[1], 'rlhf') +prefix = os.path.join(sys.argv[1], 'alignment') if not os.path.exists(prefix): os.makedirs(prefix) with open(os.path.join(prefix, 'train.jsonl'), 'w', encoding="utf-8") as f: - for item in tqdm(rlhf_data['train']): + for item in tqdm(alignment_data['train']): item.pop('chosen') f.write(json.dumps(item) + '\n') with open(os.path.join(prefix, 'dev.jsonl'), 'w', encoding="utf-8") as f: - for item in tqdm(rlhf_data['test']): + for item in tqdm(alignment_data['test']): item.pop('chosen') f.write(json.dumps(item) + '\n') diff --git a/examples/megatron/data/prepare_data_math.py b/examples/megatron/data/prepare_data_math.py new file mode 100644 index 00000000..b3240918 --- /dev/null +++ b/examples/megatron/data/prepare_data_math.py @@ -0,0 +1,32 @@ +# Copyright 2024 Alibaba Group Holding Limited. All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# ============================================================================== +"""prepare data for math""" +import os +import sys +import json +from datasets import load_dataset +from tqdm import tqdm +dataset = load_dataset('openai/gsm8k', 'main') + +prefix = os.path.join(sys.argv[1], 'math') +if not os.path.exists(prefix): + os.makedirs(prefix) + +for tag in dataset: + with open(os.path.join(prefix, f'{tag}.jsonl'), 'w', encoding="utf-8") as f: + for item in tqdm(dataset[tag]): + prompt = f"\n\nHuman: {item['question']}\n\nAssistant: " + new_item = {"eval_func": "math_rule", "prompt": prompt, 'answer': item['answer']} + f.write(json.dumps(new_item) + '\n') diff --git a/examples/megatron/step2_reward/prepare_data.py b/examples/megatron/data/prepare_data_reward.py similarity index 95% rename from examples/megatron/step2_reward/prepare_data.py rename to examples/megatron/data/prepare_data_reward.py index 41116078..c95ca1f2 100644 --- a/examples/megatron/step2_reward/prepare_data.py +++ b/examples/megatron/data/prepare_data_reward.py @@ -1,4 +1,4 @@ -# Copyright 2023 Alibaba Group Holding Limited. All Rights Reserved. +# Copyright 2024 Alibaba Group Holding Limited. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. diff --git a/examples/megatron/step1_sft/prepare_data.py b/examples/megatron/data/prepare_data_sft.py similarity index 94% rename from examples/megatron/step1_sft/prepare_data.py rename to examples/megatron/data/prepare_data_sft.py index 46e0ce35..910255fe 100644 --- a/examples/megatron/step1_sft/prepare_data.py +++ b/examples/megatron/data/prepare_data_sft.py @@ -1,4 +1,4 @@ -# Copyright 2023 Alibaba Group Holding Limited. All Rights Reserved. +# Copyright 2024 Alibaba Group Holding Limited. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. diff --git a/examples/megatron/data/prompt_dataset.py b/examples/megatron/data/prompt_dataset.py new file mode 100644 index 00000000..56480d3d --- /dev/null +++ b/examples/megatron/data/prompt_dataset.py @@ -0,0 +1,170 @@ +# Copyright 2024 Alibaba Group Holding Limited. All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# ============================================================================== +"""prompt dataset""" + +import copy +from collections import defaultdict +from typing import List + +import torch +from megatron.training import get_args +from torch.utils.data import Dataset +import torch.nn.functional as F +from examples.megatron.models.utils import get_eos_id + + +def zero_pad_sequences(sequences, side: str = "right", value=0, pad_to_seq_length=False): + assert side in ("left", "right") + if pad_to_seq_length: # pad to args.seq_length + args = get_args() + max_len = args.seq_length + else: # pad to the max sequence length of the current batch + max_len = max(seq.size(-1) for seq in sequences) + padded_sequences = [] + for seq in sequences: + pad_len = max_len - seq.size(-1) + padding = (pad_len, 0) if side == "left" else (0, pad_len) + padded_sequences.append(F.pad(seq, padding, value=value)) + return torch.stack(padded_sequences, dim=0) + + +class PromptPipeline(Dataset): + """ + a dataset of list of no padded prompt tensors + truncted to max_prompt_length from right + """ + + def __init__(self, prompts: List[str], max_prompt_length: int, tokenizer=None): + super().__init__() + + for p in prompts: + assert len(p) > 0, "Got empty prompt" + assert max_prompt_length > 0, \ + "Prompt length for RLHF/OnlineDPO/GRPO trainer must be an integer greater than 0" + + if len(prompts[0]) == 3: + prompt_encodings = [tokenizer.tokenize(prompt)[:max_prompt_length] for prompt, _, _ in prompts] + else: + prompt_encodings = [tokenizer.tokenize(prompt)[:max_prompt_length] for prompt in prompts] + prompt_id_tensors = [torch.tensor(p_encoding, dtype=torch.long) for p_encoding in prompt_encodings] + + # dup dataset if num_inference_per_prompt + self.data = [] + prompts = [{"input_ids": prompt_tensor} for prompt_tensor in prompt_id_tensors] + for p in prompts: + dup = [copy.deepcopy(p) for i in range(get_args().num_inference_per_prompt)] + self.data.extend(dup) + + self.tokenizer = tokenizer + + def __getitem__(self, ix: int): + return self.data[ix] + + def __len__(self) -> int: + return len(self.data) + + def collate_fn(self, samples): + collate_dict = defaultdict(list) + + # Loop over the samples and append each tensor value to the corresponding list + for sample in samples: + for key in sample.keys(): + collate_dict[key].append(sample[key]) + + # Return the collate_dict + return collate_dict + + +class DPOPromptPipeline(PromptPipeline): + """ + a dataset of list of no padded prompt tensors + truncted to max_prompt_length from right + """ + + def __init__(self, prompts: List[str], max_seq_length: int, tokenizer=None):# pylint: disable=super-init-not-called + + self.data = [] + for prompt, chosen, rejected in prompts: + chosen = prompt + chosen + rejected = prompt + rejected + chosen_token = tokenizer.tokenize(chosen)[:max_seq_length] + reject_token = tokenizer.tokenize(rejected)[:max_seq_length] + chosen_token[-1] = get_eos_id(tokenizer) + reject_token[-1] = get_eos_id(tokenizer) + prompt_id_len = len(tokenizer.tokenize(prompt)) + # has at least one token from positive/negative responses + if prompt_id_len >= max_seq_length: + continue + chosen_token = torch.tensor(chosen_token, dtype=torch.long) + chosen_mask = torch.ones((1, chosen_token.shape[-1])) + reject_token = torch.tensor(reject_token, dtype=torch.long) + reject_mask = torch.ones((1, reject_token.shape[-1])) + sample = (chosen_token, chosen_mask, reject_token, reject_mask, prompt_id_len) + self.data.append(sample) + self.tokenizer = tokenizer + + def collate_fn(self, samples): + chosen_ids = [] + chosen_masks = [] + reject_ids = [] + rejects_masks = [] + prompt_id_lens = [] + for chosen_id, chosen_mask, reject_id, reject_mask, prompt_id_len in samples: + chosen_ids.append(chosen_id) + chosen_masks.append(chosen_mask) + reject_ids.append(reject_id) + rejects_masks.append(reject_mask) + prompt_id_lens.append(prompt_id_len) + + chosen_ids = zero_pad_sequences(chosen_ids, value=get_eos_id(self.tokenizer), pad_to_seq_length=True) + chosen_masks = zero_pad_sequences(chosen_masks, pad_to_seq_length=True) + reject_ids = zero_pad_sequences(reject_ids, value=get_eos_id(self.tokenizer), pad_to_seq_length=True) + rejects_masks = zero_pad_sequences(rejects_masks, pad_to_seq_length=True) + return { + "chosen": chosen_ids, + "chosen_mask": chosen_masks, + "rejected": reject_ids, + "rejected_mask": rejects_masks, + "prompt_id_lens": torch.tensor(prompt_id_lens, dtype=torch.long) + } + + +class VLLMPromptPipeline(PromptPipeline): + """ + a dataset of list of no padded prompt tensors + truncted to max_prompt_length from right + """ + + def __init__(self, prompts: List, max_prompt_length: int, tokenizer=None, prompt_key=None):# pylint: disable=super-init-not-called + for p in prompts: + assert len(p) > 0, "Got empty prompt" + assert max_prompt_length > 0, \ + "Prompt length for RLHF/OnlineDPO trainer must be an integer greater than 0" + if prompt_key is None: + if len(prompts[0]) == 3: + self.prompts = [(prompt, tokenizer.encode(prompt)[:max_prompt_length]) for prompt, _, _ in prompts] + else: + self.prompts = [(prompt, tokenizer.encode(prompt)[:max_prompt_length]) for prompt in prompts] + self.data = [] + for prompt, prompt_ids in self.prompts: + p = {"input_ids": prompt_ids, "prompt": prompt} + self.data.extend([copy.deepcopy(p)]) + else: + for prompt in prompts: + prompt["input_ids"] = tokenizer.encode(prompt[prompt_key])[:max_prompt_length] + if 'prompt' != prompt_key: + prompt['prompt'] = prompt[prompt_key] + self.data = prompts + self.tokenizer = tokenizer diff --git a/examples/megatron/dataset/reward_dataset.py b/examples/megatron/data/reward_dataset.py similarity index 98% rename from examples/megatron/dataset/reward_dataset.py rename to examples/megatron/data/reward_dataset.py index 2d107e19..3bf2cffb 100644 --- a/examples/megatron/dataset/reward_dataset.py +++ b/examples/megatron/data/reward_dataset.py @@ -1,4 +1,4 @@ -# Copyright 2023 Alibaba Group Holding Limited. All Rights Reserved. +# Copyright 2024 Alibaba Group Holding Limited. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. @@ -19,7 +19,7 @@ import numpy as np import torch -from megatron import get_args +from megatron.training import get_args from torch.utils.data import Dataset diff --git a/examples/megatron/dataset/sft_dataset.py b/examples/megatron/data/sft_dataset.py similarity index 96% rename from examples/megatron/dataset/sft_dataset.py rename to examples/megatron/data/sft_dataset.py index 12aaca3f..abc65c5d 100644 --- a/examples/megatron/dataset/sft_dataset.py +++ b/examples/megatron/data/sft_dataset.py @@ -1,4 +1,4 @@ -# Copyright 2023 Alibaba Group Holding Limited. All Rights Reserved. +# Copyright 2024 Alibaba Group Holding Limited. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. @@ -18,8 +18,8 @@ import numpy as np import torch -from megatron import get_tokenizer -from megatron import print_rank_0 +from megatron.training import get_tokenizer +from megatron.training import print_rank_0 def build_train_valid_test_datasets(data_prefix, seq_length): diff --git a/examples/megatron/dataset/prompt_dataset.py b/examples/megatron/dataset/prompt_dataset.py deleted file mode 100644 index 23934622..00000000 --- a/examples/megatron/dataset/prompt_dataset.py +++ /dev/null @@ -1,88 +0,0 @@ -# Copyright 2023 Alibaba Group Holding Limited. All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -# ============================================================================== -"""prompt dataset""" - -import copy -from collections import defaultdict -from typing import List - -import torch -from megatron import get_args -from torch.utils.data import Dataset - - -class PromptPipeline(Dataset): - """ - a dataset of list of no padded prompt tensors - truncted to max_prompt_length from right - """ - - def __init__(self, prompts: List[str], max_prompt_length: int, tokenizer=None): - super().__init__() - - for p in prompts: - assert len(p) > 0, "Got empty prompt" - assert max_prompt_length > 0, \ - "Prompt length for PPO trainer must be an integer greater than 0" - - prompt_encodings = [tokenizer.tokenize(prompt)[:max_prompt_length] for prompt in prompts] - prompt_id_tensors = [torch.tensor(p_encoding, dtype=torch.long) for p_encoding in prompt_encodings] - - # dup dataset if num_inference_per_prompt - self.prompts_ids = [] - prompts = [{"input_ids": prompt_tensor} for prompt_tensor in prompt_id_tensors] - for p in prompts: - dup = [copy.deepcopy(p) for i in range(get_args().num_inference_per_prompt)] - self.prompts_ids.extend(dup) - - self.tokenizer = tokenizer - - def __getitem__(self, ix: int): - return self.prompts_ids[ix] - - def __len__(self) -> int: - return len(self.prompts_ids) - - def collate_fn(self, samples): - collate_dict = defaultdict(list) - - # Loop over the samples and append each tensor value to the corresponding list - for sample in samples: - for key in sample.keys(): - collate_dict[key].append(sample[key]) - - # Return the collate_dict - return collate_dict - - -class VLLMPromptPipeline(PromptPipeline): - """ - a dataset of list of no padded prompt tensors - truncted to max_prompt_length from right - """ - - def __init__(self, prompts: List[str], max_prompt_length: int, tokenizer=None):# pylint: disable=super-init-not-called - - for p in prompts: - assert len(p) > 0, "Got empty prompt" - assert max_prompt_length > 0, \ - "Prompt length for PPO trainer must be an integer greater than 0" - - self.prompts = [(prompt, tokenizer.encode(prompt)[:max_prompt_length]) for prompt in prompts] - self.prompts_ids = [] - for prompt, prompt_ids in self.prompts: - p = {"input_ids": prompt_ids, "prompt": prompt} - self.prompts_ids.extend([copy.deepcopy(p)]) - self.tokenizer = tokenizer diff --git a/examples/megatron/entry/train_dpo.py b/examples/megatron/entry/train_dpo.py new file mode 100644 index 00000000..19e93541 --- /dev/null +++ b/examples/megatron/entry/train_dpo.py @@ -0,0 +1,40 @@ +# Copyright 2024 Alibaba Group Holding Limited. All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# ============================================================================== +"""entry file for training dpo""" + +import random + +from examples.megatron.models import PolicyReference, PolicyTrainer +from examples.megatron.models.train_helper import get_prompts +import chatlearn +from chatlearn import DPOEngine + + +if __name__ == "__main__": + chatlearn.init() + args = chatlearn.get_args() + reference_model = PolicyReference("reference") + policy_trainer = PolicyTrainer("ppo_policy") + + engine = DPOEngine(reference_model, policy_trainer) + + all_prompts = get_prompts(args.runtime_args.data_path, num_limit=args.runtime_args._args_dict['training_data_num_limit']) + random.seed(reference_model.model_args["seed"]) + num_train = len(all_prompts) + random.shuffle(all_prompts) + train_prompts = all_prompts[:num_train] + + engine.set_dataset(train_prompts) + engine.learn() diff --git a/examples/megatron/entry/train_grpo_math.py b/examples/megatron/entry/train_grpo_math.py new file mode 100644 index 00000000..94102160 --- /dev/null +++ b/examples/megatron/entry/train_grpo_math.py @@ -0,0 +1,97 @@ +# Copyright 2024 Alibaba Group Holding Limited. All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# ============================================================================== +"""entry file for training online dpo""" + +import os +import random +from collections import defaultdict +import numpy + +from examples.megatron.models import PolicyReference, PolicyTrainer, RewardInference +from examples.megatron.models.reward_math import MathReward +from examples.megatron.models.train_helper import eval_post_process, get_prompts + +import chatlearn +from chatlearn import GRPOMathEngine + + +# pylint: disable=invalid-envvar-default,bad-exception-cause,ungrouped-imports +if os.getenv("ENABLE_VLLM", False): + try: + from examples.megatron.models import VLLMPolicyInference as PolicyModel + except Exception as e: + raise RuntimeError("Cannot import vllm, please set vllm python path or install vllm first.") from e +else: + from examples.megatron.models import PolicyInference as PolicyModel + +# GRPO advantage计算relay_sample_fn +def grpo_rw_relay_sample_fn(episode_relay_buffers): + buffers = episode_relay_buffers[-1].buffer + queryids2samples = defaultdict(list) + for s in buffers: + queryids2samples[str(s["no_padded_query_ids"].cpu().tolist())].append(s) + + math_reward_strategy = math_reward_model.model_args['math_reward_strategy'] + + res_buffers = [] + for _, l in queryids2samples.items(): + if math_reward_strategy == 'sparse_only': + assert "math_rewards" in l[0], l[0].keys() + rewards = [each["math_rewards"] for each in l] + elif math_reward_strategy == 'dense_only': + assert "rm_rewards" in l[0], l[0].keys() + rewards = [each["action_rewards"] for each in l] + elif math_reward_strategy == 'merge': + assert "rm_rewards" in l[0] and "math_rewards" in l[0], l[0].keys() + rewards = [each["math_rewards"] + each["rm_rewards"] for each in l] + mean = numpy.mean(rewards) + std = numpy.std(rewards) + for i, li in enumerate(l): + li['final_rewards'] = rewards[i] + li['advantages'] = ((rewards[i] - mean) / (std + 1e-5)) + res_buffers.extend(l) + assert len(buffers) == args.runtime_args.sample_per_episode + return res_buffers + +if __name__ == "__main__": + chatlearn.init() + args = chatlearn.get_args() + reference_model = PolicyReference("reference") + policy_trainer = PolicyTrainer("ppo_policy") + + policy_model = PolicyModel("policy") + reward_model = RewardInference("reward") + math_reward_model = MathReward("math_reward") + + engine = GRPOMathEngine(policy_model, reference_model, reward_model, math_reward_model, policy_trainer) + + all_prompts = get_prompts(args.runtime_args.data_path, num_limit=args.runtime_args._args_dict['training_data_num_limit']) + random.seed(reference_model.model_args["seed"]) + split_ratio = 0.9 if args.runtime_args.eval_episode_interval > 0 else 1 + num_train = int(len(all_prompts) * split_ratio) + random.shuffle(all_prompts) + train_prompts = all_prompts[:num_train] + + if args.runtime_args.eval_episode_interval > 0: + val_prompts = all_prompts[num_train:] + eval_num_limit = args.runtime_args.get('eval_data_num_limit') + if eval_num_limit: + eval_num_limit = min(eval_num_limit, len(val_prompts)) + val_prompts = val_prompts[:eval_num_limit] + engine.evaluator.set_dataset(val_prompts) \ + .set_post_process_func(eval_post_process) + engine.set_dataset(train_prompts) + engine.set_relay_sample_fn(grpo_rw_relay_sample_fn) + engine.learn() diff --git a/examples/megatron/entry/train_online_dpo.py b/examples/megatron/entry/train_online_dpo.py new file mode 100644 index 00000000..ebd93aa9 --- /dev/null +++ b/examples/megatron/entry/train_online_dpo.py @@ -0,0 +1,72 @@ +# Copyright 2024 Alibaba Group Holding Limited. All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# ============================================================================== +"""entry file for training online dpo""" + +import random +import os + +from examples.megatron.models import PolicyReference, PolicyTrainer, RewardInference +from examples.megatron.models.train_helper import eval_post_process, get_prompts + +import chatlearn +from chatlearn import Evaluator +from chatlearn import OnlineDPOEngine + +# pylint: disable=invalid-envvar-default,bad-exception-cause,ungrouped-imports +if os.getenv("ENABLE_VLLM", False): + try: + from examples.megatron.models import VLLMPolicyInference as PolicyModel + except Exception as e: + raise RuntimeError("Cannot import vllm, please set vllm python path or install vllm first.") from e +else: + from examples.megatron.models import PolicyInference as PolicyModel + + +if __name__ == "__main__": + chatlearn.init() + args = chatlearn.get_args() + reference_model = PolicyReference("reference") + policy_trainer = PolicyTrainer("ppo_policy") + + policy_model = PolicyModel("policy") + reward_model = RewardInference("reward") + + engine = OnlineDPOEngine(policy_model, reference_model, reward_model, policy_trainer) + + all_prompts = get_prompts(args.runtime_args.data_path, num_limit=args.runtime_args._args_dict['training_data_num_limit']) + random.seed(reference_model.model_args["seed"]) + split_ratio = 0.9 if args.runtime_args.eval_episode_interval > 0 else 1 + num_train = int(len(all_prompts) * split_ratio) + random.shuffle(all_prompts) + train_prompts = all_prompts[:num_train] + + if args.runtime_args.eval_episode_interval > 0: + val_prompts = all_prompts[num_train:] + eval_num_limit = args.runtime_args.get('eval_data_num_limit') + if eval_num_limit: + eval_num_limit = min(eval_num_limit, len(val_prompts)) + val_prompts = val_prompts[:eval_num_limit] + + def eval_flow(batch): + r0 = policy_model.eval_forward(batch) + r1 = reward_model.eval_forward(r0) + return r1 + + evaluator = Evaluator(eval_flow) \ + .set_dataset(val_prompts) \ + .set_post_process_func(eval_post_process) + engine.set_evaluator(evaluator) + engine.set_dataset(train_prompts) + engine.learn() diff --git a/examples/megatron/step2_reward/finetune_reward.py b/examples/megatron/entry/train_reward.py similarity index 92% rename from examples/megatron/step2_reward/finetune_reward.py rename to examples/megatron/entry/train_reward.py index efec0a57..9809a9c6 100644 --- a/examples/megatron/step2_reward/finetune_reward.py +++ b/examples/megatron/entry/train_reward.py @@ -1,4 +1,4 @@ -# Copyright 2023 Alibaba Group Holding Limited. All Rights Reserved. +# Copyright 2024 Alibaba Group Holding Limited. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. @@ -17,19 +17,20 @@ from functools import partial import torch -from dataset.reward_dataset import build_train_valid_test_datasets_for_rm -from megatron import get_args -from megatron import get_timers -from megatron import get_tokenizer -from megatron import print_rank_0 +from megatron.training import get_args +from megatron.training import get_timers +from megatron.training import get_tokenizer +from megatron.training import print_rank_0 from megatron.core import parallel_state from megatron.core import tensor_parallel from megatron.core.enums import ModelType from megatron.core.pipeline_parallel import schedules from megatron.training import pretrain -from megatron.utils import average_losses_across_data_parallel_group -from megatron.utils import get_ltor_masks_and_position_ids -from models.reward_model import model_provider +from megatron.training.utils import average_losses_across_data_parallel_group +from megatron.training.utils import get_ltor_masks_and_position_ids + +from examples.megatron.data.reward_dataset import build_train_valid_test_datasets_for_rm +from examples.megatron.models.reward_model import model_provider def get_tensor_shapes_reward( # pylint: disable=unused-argument diff --git a/examples/megatron/entry/train_rlhf.py b/examples/megatron/entry/train_rlhf.py new file mode 100644 index 00000000..dc31111a --- /dev/null +++ b/examples/megatron/entry/train_rlhf.py @@ -0,0 +1,72 @@ +# Copyright 2024 Alibaba Group Holding Limited. All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# ============================================================================== +"""entry file for training RLHF""" + +import random +import os + +from examples.megatron.models import PolicyReference, PolicyTrainer, RewardInference, ValueInference, ValueTrainer +from examples.megatron.models.train_helper import eval_post_process, get_prompts + +import chatlearn +from chatlearn import Evaluator +from chatlearn import RLHFEngine + +# pylint: disable=invalid-envvar-default,bad-exception-cause,ungrouped-imports +if os.getenv("ENABLE_VLLM", False): + try: + from examples.megatron.models import VLLMPolicyInference as PolicyModel + except Exception as e: + raise RuntimeError("Cannot import vllm, please set vllm python path or install vllm first.") from e +else: + from examples.megatron.models import PolicyInference as PolicyModel + + +if __name__ == "__main__": + chatlearn.init() + args = chatlearn.get_args() + reference_model = PolicyReference("reference") + policy_trainer = PolicyTrainer("ppo_policy") + + policy_model = PolicyModel("policy") + reward_model = RewardInference("reward") + + value_model = ValueInference("value") + value_trainer = ValueTrainer("ppo_value") + engine = RLHFEngine(policy_model, reference_model, reward_model, value_model, policy_trainer, value_trainer) + + all_prompts = get_prompts(args.runtime_args.data_path, num_limit=args.runtime_args._args_dict['training_data_num_limit']) + random.seed(reference_model.model_args["seed"]) + split_ratio = 0.9 if args.runtime_args.eval_episode_interval > 0 else 1 + num_train = int(len(all_prompts) * split_ratio) + random.shuffle(all_prompts) + train_prompts = all_prompts[:num_train] + + if args.runtime_args.eval_episode_interval > 0: + val_prompts = all_prompts[num_train:] + eval_num_limit = args.runtime_args.get('eval_data_num_limit') + def eval_flow(batch): + r0 = policy_model.eval_forward(batch) + r1 = reward_model.eval_forward(r0) + return r1 + if eval_num_limit: + eval_num_limit = min(eval_num_limit, len(val_prompts)) + val_prompts = val_prompts[:eval_num_limit] + evaluator = Evaluator(eval_flow) \ + .set_dataset(val_prompts) \ + .set_post_process_func(eval_post_process) + engine.set_evaluator(evaluator) + engine.set_dataset(train_prompts) + engine.learn() diff --git a/examples/megatron/step1_sft/finetune_sft.py b/examples/megatron/entry/train_sft.py similarity index 84% rename from examples/megatron/step1_sft/finetune_sft.py rename to examples/megatron/entry/train_sft.py index 030b7687..d8c653ce 100644 --- a/examples/megatron/step1_sft/finetune_sft.py +++ b/examples/megatron/entry/train_sft.py @@ -1,4 +1,4 @@ -# Copyright 2023 Alibaba Group Holding Limited. All Rights Reserved. +# Copyright 2024 Alibaba Group Holding Limited. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. @@ -17,19 +17,19 @@ from functools import partial import torch -from megatron import get_args -from megatron import get_timers -from megatron import get_tokenizer -from megatron import print_rank_0 +from megatron.training import get_args +from megatron.training import get_timers +from megatron.training import get_tokenizer +from megatron.training import print_rank_0 from megatron.core import tensor_parallel from megatron.core.enums import ModelType -from megatron.model import GPTModel +from megatron.legacy.model import GPTModel from megatron.training import pretrain -from megatron.utils import average_losses_across_data_parallel_group -from megatron.utils import get_ltor_masks_and_position_ids +from megatron.training.utils import average_losses_across_data_parallel_group +from megatron.training.utils import get_ltor_masks_and_position_ids -from dataset.sft_dataset import build_train_valid_test_datasets -from models.utils import has_config_in_args +from examples.megatron.data.sft_dataset import build_train_valid_test_datasets +from examples.megatron.models.utils import has_config_in_args def model_provider(pre_process=True, post_process=True): @@ -37,7 +37,7 @@ def model_provider(pre_process=True, post_process=True): print_rank_0('building GPT model ...') if has_config_in_args(GPTModel): - from megatron.arguments import core_transformer_config_from_args # pylint: disable=import-outside-toplevel + from megatron.training.arguments import core_transformer_config_from_args # pylint: disable=import-outside-toplevel args = get_args() config = core_transformer_config_from_args(args) model = GPTModel( diff --git a/examples/megatron/models/__init__.py b/examples/megatron/models/__init__.py index 83f983f3..f61f346f 100644 --- a/examples/megatron/models/__init__.py +++ b/examples/megatron/models/__init__.py @@ -1,4 +1,4 @@ -# Copyright 2023 Alibaba Group Holding Limited. All Rights Reserved. +# Copyright 2024 Alibaba Group Holding Limited. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. diff --git a/examples/megatron/models/base_trainer.py b/examples/megatron/models/base_trainer.py index 6686815a..5151ee72 100644 --- a/examples/megatron/models/base_trainer.py +++ b/examples/megatron/models/base_trainer.py @@ -1,4 +1,4 @@ -# Copyright 2023 Alibaba Group Holding Limited. All Rights Reserved. +# Copyright 2024 Alibaba Group Holding Limited. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. @@ -14,26 +14,32 @@ # ============================================================================== """Training utilities.""" +import dataclasses import torch -from megatron import get_args -from megatron import get_timers -from megatron import get_tokenizer -from megatron import print_rank_0 +from megatron.training import get_args +from megatron.training import get_timers +from megatron.training import get_tokenizer +from megatron.training import get_num_microbatches +from megatron.training import print_rank_0 from megatron.core.enums import ModelType try: from megatron.core.utils import get_model_config except ImportError: get_model_config = None -from megatron.optimizer import get_megatron_optimizer -from megatron.training import get_optimizer_param_scheduler, get_model -from megatron.utils import unwrap_model -from megatron.training import train_step as megatron_train_step - -from chatlearn import RLHFMegatronModule +from megatron.core.optimizer import get_megatron_optimizer, OptimizerConfig +from megatron.training import get_model +from megatron.training.training import get_optimizer_param_scheduler +from megatron.training.utils import unwrap_model +from megatron.training.training import train_step as megatron_train_step +from megatron.core import mpu +from megatron.core.pipeline_parallel import get_forward_backward_func + +from chatlearn import MegatronModule from chatlearn.utils.megatron_utils import load_checkpoint +from .constants import TrainerEngine -class BaseTrainer(RLHFMegatronModule): +class BaseTrainer(MegatronModule): """Base Trainer""" def setup(self): @@ -47,7 +53,7 @@ def setup(self): self.model_type = ModelType.encoder_or_decoder self.tokenizer = get_tokenizer() - self.args.save = f"{self.args.save}/{self.name}/" + self.args.save = f"{self.runtime_args.output_dir}/save_model/{self.name}/" if self.resume_training: self.args.load = get_args().save @@ -92,12 +98,18 @@ def setup_model_and_optimizer(self, model_provider_func, torch.distributed.barrier() unwrapped_model = unwrap_model(model) + kwargs = {} + for f in dataclasses.fields(OptimizerConfig): + if hasattr(args, f.name): + kwargs[f.name] = getattr(args, f.name) + config = OptimizerConfig(**kwargs) + config.timers = get_timers() if args.load and args.no_load_optim: optimizer = None opt_param_scheduler = None else: - optimizer = get_megatron_optimizer(model, no_wd_decay_cond, + optimizer = get_megatron_optimizer(config, model, no_wd_decay_cond, scale_lr_cond, lr_mult) opt_param_scheduler = get_optimizer_param_scheduler(optimizer) @@ -107,7 +119,7 @@ def setup_model_and_optimizer(self, model_provider_func, args.iteration = load_checkpoint(model, optimizer, opt_param_scheduler, strict=strict) timers('load-checkpoint').stop(barrier=True) if args.no_load_optim: - optimizer = get_megatron_optimizer(model, no_wd_decay_cond, + optimizer = get_megatron_optimizer(config, model, no_wd_decay_cond, scale_lr_cond, lr_mult) opt_param_scheduler = get_optimizer_param_scheduler(optimizer) opt_param_scheduler.step(args.iteration * args.global_batch_size) @@ -123,21 +135,109 @@ def setup_model_and_optimizer(self, model_provider_func, if args.fp16: optimizer.reload_model_params() if optimizer is None: - optimizer = get_megatron_optimizer(model, no_wd_decay_cond, + optimizer = get_megatron_optimizer(config, model, no_wd_decay_cond, scale_lr_cond, lr_mult) opt_param_scheduler = get_optimizer_param_scheduler(optimizer) return model, optimizer, opt_param_scheduler - def train_step(self, data, train_info): - iteration = train_info["iteration"] + def dpo_train_step(self, forward_step_func, data_iterator, + model, optimizer, opt_param_scheduler, config): + # Code below is migrated from 'train_step' function of Megatron-LM:2ca5cb09 + args = get_args() + timers = get_timers() + + # Set grad to zero. + for model_chunk in model: + model_chunk.zero_grad_buffer() + optimizer.zero_grad() + + # Forward pass. + forward_backward_func = get_forward_backward_func() + micro_batch_size = args.micro_batch_size + if config.pipeline_model_parallel_size > 1: + micro_batch_size *= 2 + + losses_reduced = forward_backward_func( + forward_step_func=forward_step_func, + data_iterator=data_iterator, + model=model, + num_microbatches=get_num_microbatches(), + seq_length=args.seq_length, + micro_batch_size=micro_batch_size, + decoder_seq_length=args.decoder_seq_length, + forward_only=False) + + # Empty unused memory. + if args.empty_unused_memory_level >= 1: + torch.cuda.empty_cache() + + # Vision gradients. + if getattr(args, 'vision_pretraining', False) and args.vision_pretraining_type == "dino": + unwrapped_model = unwrap_model(model[0]) + unwrapped_model.cancel_gradients_last_layer(args.curr_iteration) + + # Update parameters. + timers('optimizer', log_level=1).start(barrier=args.barrier_with_L1_time) + update_successful, grad_norm, num_zeros_in_grad = optimizer.step() + timers('optimizer').stop() + + # Vision momentum. + if getattr(args, 'vision_pretraining', False) and args.vision_pretraining_type == "dino": + unwrapped_model = unwrap_model(model[0]) + unwrapped_model.update_momentum(args.curr_iteration) + + # Update learning rate. + if update_successful: + increment = get_num_microbatches() * \ + args.micro_batch_size * \ + args.data_parallel_size + opt_param_scheduler.step(increment=increment) + skipped_iter = 0 + else: + skipped_iter = 1 + + # Empty unused memory. + if args.empty_unused_memory_level >= 2: + torch.cuda.empty_cache() + + if mpu.is_pipeline_last_stage(ignore_virtual=True): + # Average loss across microbatches. + loss_reduced = {} + for key in losses_reduced[0].keys(): + numerator = 0 + denominator = 0 + for x in losses_reduced: + val = x[key] + # there is one dict per microbatch. in new reporting, we average + # over the total number of tokens across the global batch. + if isinstance(val, tuple) or isinstance(val, list): # pylint: disable=consider-merging-isinstance + numerator += val[0] + denominator += val[1] + else: + # legacy behavior. we average over the number of microbatches, + # and so the denominator is 1. + numerator += val + denominator += 1 + loss_reduced[key] = numerator / denominator + return loss_reduced, skipped_iter, grad_norm, num_zeros_in_grad + return {}, skipped_iter, grad_norm, num_zeros_in_grad + + + def train_step(self, data, iteration=None): + assert isinstance(data, list) data_iterator = iter(data) if self.config is not None: kwargs = {"config": self.config} else: kwargs = {} - _, skipped_iter, grad_norm, num_zeros_in_grad = megatron_train_step(self._forward_step, data_iterator, - self.model, self.optimizer, - self.opt_param_scheduler, **kwargs) + if self.args.trainer_engine == TrainerEngine.DPO: + _, skipped_iter, grad_norm, num_zeros_in_grad = self.dpo_train_step(self._forward_step, data_iterator, + self.model, self.optimizer, + self.opt_param_scheduler, **kwargs) + else: + _, skipped_iter, grad_norm, num_zeros_in_grad = megatron_train_step(self._forward_step, data_iterator, + self.model, self.optimizer, + self.opt_param_scheduler, **kwargs) self.post_update_stuffs({}, skipped_iter, grad_norm, num_zeros_in_grad, iteration) diff --git a/examples/megatron/models/constants_ppo.py b/examples/megatron/models/constants.py similarity index 90% rename from examples/megatron/models/constants_ppo.py rename to examples/megatron/models/constants.py index d5026f53..833668ae 100644 --- a/examples/megatron/models/constants_ppo.py +++ b/examples/megatron/models/constants.py @@ -1,4 +1,4 @@ -# Copyright 2023 Alibaba Group Holding Limited. All Rights Reserved. +# Copyright 2024 Alibaba Group Holding Limited. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. @@ -14,6 +14,7 @@ # ============================================================================== """constant""" +from enum import Enum from typing import Dict, Tuple import torch @@ -57,7 +58,7 @@ def select_actions_from_right_padded(ts, action_starts, response_size, pad_value return torch.stack(res, dim=0).to(torch.cuda.current_device()) -def get_ltor_masks_and_position_ids(data): +def get_ltor_masks_and_position_ids_rlhf(data): """Build masks and position id for left to right model.""" # Extract batch size and sequence length. @@ -133,3 +134,15 @@ def get_running_stats(running_dict: Dict[str, RunningMoments]): def reset_running_stats(running_dict: Dict[str, RunningMoments]): for _, running in running_dict.items(): running.reset() + + +class TrainerEngine(str, Enum): + """trainer engine. + 1. dpo: reference, policy_trainer + 2. online_dpo: policy, reward, reference, policy_trainer + 3. rlhf: policy, value, reward, reference, policy_trainer, value_trainer + """ + DPO = "dpo" + RLHF = "rlhf" + ONLINE_DPO = "online_dpo" + GRPO = "grpo" diff --git a/examples/megatron/models/forward_step.py b/examples/megatron/models/forward_step.py index 344dbce5..0a772c05 100644 --- a/examples/megatron/models/forward_step.py +++ b/examples/megatron/models/forward_step.py @@ -1,4 +1,4 @@ -# Copyright 2023 Alibaba Group Holding Limited. All Rights Reserved. +# Copyright 2024 Alibaba Group Holding Limited. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. @@ -14,15 +14,19 @@ # ============================================================================== """Forward step utilities.""" +import copy + import torch -from megatron import get_args +from megatron.training import get_args from megatron.core import mpu -from megatron.text_generation.communication import send_to_next_pipeline_rank, recv_from_prev_pipeline_rank_ -from megatron.text_generation.forward_step import _allocate_recv_buffer +from megatron.inference.text_generation.communication import send_to_next_pipeline_rank, recv_from_prev_pipeline_rank_ +from megatron.inference.text_generation.forward_step import _allocate_recv_buffer + +from .constants import TrainerEngine -def forward_step_helper(model, tokens, position_ids, attention_mask, pooling_sequence_index=None, pooling=False): +def forward_step_helper(model, tokens, position_ids, attention_mask, pooling_sequence_index=None, pooling=False, inference_config=None): # Pipelining case. args = get_args() if args.pipeline_model_parallel_size > 1: @@ -36,17 +40,19 @@ def forward_step_helper(model, tokens, position_ids, attention_mask, pooling_seq attention_mask, micro_batch_size, pooling_sequence_index=pooling_sequence_index, - pooling=pooling) + pooling=pooling, + inference_config=inference_config) return _no_pipelining_forward_step(model, tokens, position_ids, attention_mask, - pooling_sequence_index=pooling_sequence_index) + pooling_sequence_index=pooling_sequence_index, + inference_config=inference_config) def _forward_step_helper(model, tokens, position_ids, attention_mask, - recv_buffer=None, pooling_sequence_index=None): + recv_buffer=None, pooling_sequence_index=None, inference_config=None): """Single forward step. Update the allocate memory flag so only the first time the memory is allocated.""" batch_size = tokens.size(0) @@ -60,9 +66,9 @@ def _forward_step_helper(model, tokens, position_ids, attention_mask, # Forward pass through the model. model.set_input_tensor(recv_buffer) if pooling_sequence_index is not None: - output_tensor = model(tokens, position_ids, attention_mask, pooling_sequence_index=pooling_sequence_index) + output_tensor = model(tokens, position_ids, attention_mask, pooling_sequence_index=pooling_sequence_index, inference_config=inference_config) else: - output_tensor = model(tokens, position_ids, attention_mask) + output_tensor = model(tokens, position_ids, attention_mask, inference_config=inference_config) # Send output to the next stage. send_to_next_pipeline_rank(output_tensor) @@ -71,12 +77,12 @@ def _forward_step_helper(model, tokens, position_ids, attention_mask, def _no_pipelining_forward_step(model, tokens, position_ids, attention_mask, - recv_buffer=None, pooling_sequence_index=None): + recv_buffer=None, pooling_sequence_index=None, inference_config=None): """If recv_buffer is none, we will allocate one on the fly.""" # Run a simple forward pass. output_tensor = _forward_step_helper(model, tokens, position_ids, attention_mask, recv_buffer=recv_buffer, - pooling_sequence_index=pooling_sequence_index) + pooling_sequence_index=pooling_sequence_index, inference_config=inference_config) logits = None if mpu.is_pipeline_last_stage(): @@ -86,7 +92,7 @@ def _no_pipelining_forward_step(model, tokens, position_ids, attention_mask, def _with_pipelining_forward_step(model, tokens, position_ids, attention_mask, - micro_batch_size, pooling_sequence_index=None, pooling=False): + micro_batch_size, pooling_sequence_index=None, pooling=False, inference_config=None): """No interleaving is supported.""" sequence_length = tokens.size(1) batch_size = tokens.size(0) @@ -103,8 +109,19 @@ def _with_pipelining_forward_step(model, tokens, position_ids, attention_mask, args = get_args() if pooling: logits = None + elif inference_config is not None and "DPO_labels" in inference_config: + if get_args().trainer_engine == TrainerEngine.DPO: # dpo + logits = torch.empty( + (batch_size), dtype=torch.float32, device=torch.cuda.current_device() + ) + else: # online dpo + logits = torch.empty( + (batch_size, sequence_length), + dtype=torch.float32, device=torch.cuda.current_device()) else: - if args.parallel_output: + parallel_output = inference_config["parallel_output"] if inference_config is not None and \ + "parallel_output" in inference_config else args.parallel_output + if parallel_output: padded_vocab_size = args.padded_vocab_size // args.tensor_model_parallel_size else: padded_vocab_size = args.padded_vocab_size @@ -114,7 +131,9 @@ def _with_pipelining_forward_step(model, tokens, position_ids, attention_mask, # Preallocate recv buffer. recv_buffer = _allocate_recv_buffer(micro_batch_size, sequence_length) - + inference_config_master = None + if inference_config is not None and "DPO_labels" in inference_config: + inference_config_master = copy.deepcopy(inference_config) for micro_batch_index in range(num_micro_batches): # Slice among the batch dimenion. start = micro_batch_index * micro_batch_size @@ -130,9 +149,13 @@ def _with_pipelining_forward_step(model, tokens, position_ids, attention_mask, # Run a simple forward pass. if this_micro_batch_size != micro_batch_size: recv_buffer = None + if inference_config_master is not None and "DPO_labels" in inference_config_master: + for key, value in inference_config_master.items(): + inference_config[key] = value[start:end, ...] output = _forward_step_helper(model, tokens2use, position_ids2use, attention_mask, recv_buffer=recv_buffer, - pooling_sequence_index=pooling_sequence_index2use) + pooling_sequence_index=pooling_sequence_index2use, + inference_config=inference_config) # Copy logits. if mpu.is_pipeline_last_stage(): diff --git a/examples/megatron/models/old_policy_inference.py b/examples/megatron/models/old_policy_inference.py index e4964fef..f105fb33 100644 --- a/examples/megatron/models/old_policy_inference.py +++ b/examples/megatron/models/old_policy_inference.py @@ -1,4 +1,4 @@ -# Copyright 2023 Alibaba Group Holding Limited. All Rights Reserved. +# Copyright 2024 Alibaba Group Holding Limited. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. @@ -18,26 +18,27 @@ import numpy as np import torch import torch.nn.functional as F -from dataset.prompt_dataset import PromptPipeline -from megatron import arguments -from megatron import get_args, get_tokenizer -from megatron import print_rank_0 + from megatron.core import mpu -from megatron.global_vars import get_tensorboard_writer -from megatron.text_generation.communication import broadcast_float_list, \ +from megatron.training import arguments +from megatron.training import get_args, get_tokenizer +from megatron.training import print_rank_0 +from megatron.training.global_vars import get_tensorboard_writer +from megatron.inference.text_generation.communication import broadcast_float_list, \ broadcast_int_list, broadcast_tensor -from megatron.text_generation.generation import generate_tokens_probs_and_return_on_first_stage +from megatron.inference.text_generation.generation import generate_tokens_probs_and_return_on_first_stage +from megatron.inference.text_generation.forward_step import ForwardStep from megatron.training import get_model -from models.policy_model import PolicyModel -from chatlearn import RLHFMegatronModule +from chatlearn import MegatronModule from chatlearn.utils import to_device from chatlearn.utils.megatron_utils import load_checkpoint -from .utils import tensorboard_scalar_dict, get_loss_mask -from .utils import get_eos_id +from examples.megatron.data.prompt_dataset import PromptPipeline +from .policy_model import PolicyModel +from .utils import tensorboard_scalar_dict, get_loss_mask, get_eos_id -class PolicyInference(RLHFMegatronModule): +class PolicyInference(MegatronModule): """Policy Megatron Inference""" def add_extra_args(self, parser): @@ -81,19 +82,13 @@ def setup(self): # init num get_args().entropy_num = 0 get_args().latest_entropies = [] - return 'ok' def build_dataset(self, train_prompts, is_eval=False): - ''' - framework source: dataset = self.build_dataset(data) - :param train_prompts: all train prompts used in this training run?? - :return: - a torch.utils.data.Dataset object for prompts_loader of all prompts, and - ''' args = get_args() max_prompt_length = ( args.seq_length - args.max_new_tokens ) + # TODO: read from files prompts_dataset = PromptPipeline( train_prompts, max_prompt_length, get_tokenizer() @@ -277,7 +272,7 @@ def generate(self, model, # Main inference function. # Note that the outputs are available on the first stage. res = generate_tokens_probs_and_return_on_first_stage( - model, prompts_ids, context_length_tensor, + model, ForwardStep, prompts_ids, context_length_tensor, return_output_log_probs=return_output_log_probs, top_k=top_k_sampling, top_p=top_p_sampling, @@ -336,8 +331,8 @@ def eval_forward(self, data): def _forward_step(self, data, iteration, eval_mode: bool): ''' - RLHF calling - rlhf framework source: policy_output = self.policy.forward_step(query) + ChatLearn calling + chatlearn framework source: policy_output = self.policy.forward_step(query) :param data: entire global batch?? micro_batch? :return: data using current microbatch diff --git a/examples/megatron/models/old_value_inference.py b/examples/megatron/models/old_value_inference.py index 2a3eeb4e..acfd75d6 100644 --- a/examples/megatron/models/old_value_inference.py +++ b/examples/megatron/models/old_value_inference.py @@ -1,4 +1,4 @@ -# Copyright 2023 Alibaba Group Holding Limited. All Rights Reserved. +# Copyright 2024 Alibaba Group Holding Limited. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. @@ -15,19 +15,19 @@ """old value inference""" import torch -from megatron import get_args, get_tokenizer -from megatron import print_rank_0 +from megatron.training import get_args, get_tokenizer +from megatron.training import print_rank_0 from megatron.core import mpu from megatron.training import get_model -from models.value_model import ValueModel -from chatlearn import RLHFMegatronModule +from chatlearn import MegatronModule from chatlearn.utils import to_device -from .constants_ppo import get_ltor_masks_and_position_ids +from .value_model import ValueModel +from .constants import get_ltor_masks_and_position_ids_rlhf from .forward_step import forward_step_helper -class ValueInference(RLHFMegatronModule): +class ValueInference(MegatronModule): """ValueInference""" def setup(self): @@ -41,7 +41,6 @@ def setup(self): assert len(model) == 1, "Above condition should have caught this" self.model = model[0] self.model.eval() - return 'ok' def model_provider(self, pre_process=True, post_process=True): """Build the model.""" @@ -60,7 +59,7 @@ def forward_step(self, data, iteration=None): # Run infernece # ============= with torch.no_grad(): - attention_mask, position_ids = get_ltor_masks_and_position_ids(all_tokens) + attention_mask, position_ids = get_ltor_masks_and_position_ids_rlhf(all_tokens) # logits will be meanigful only in the last pipeline stage. output_values = forward_step_helper(self.model, all_tokens, position_ids, attention_mask, pooling=True) diff --git a/examples/megatron/models/policy_model.py b/examples/megatron/models/policy_model.py index 59424f64..aae8da5a 100644 --- a/examples/megatron/models/policy_model.py +++ b/examples/megatron/models/policy_model.py @@ -1,4 +1,4 @@ -# Copyright 2023 Alibaba Group Holding Limited. All Rights Reserved. +# Copyright 2024 Alibaba Group Holding Limited. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. @@ -14,16 +14,19 @@ # ============================================================================== """policy model""" -from megatron import get_args -from megatron.global_vars import get_tokenizer -from megatron.model.gpt_model import GPTModel -from megatron.model.language_model import parallel_lm_logits +import torch + +from megatron.training import get_args +from megatron.core import tensor_parallel +from megatron.training.global_vars import get_tokenizer +from megatron.legacy.model.gpt_model import GPTModel +from megatron.legacy.model.language_model import parallel_lm_logits from chatlearn.models.megatron.ops.policy_gradient import tensor_decomp_pg_loss -from .constants_ppo import select_actions_from_right_padded -from .utils import get_advantages_and_returns -from .utils import has_config_in_args -from .utils import get_eos_id +from .utils import get_advantages_and_returns, has_config_in_args, get_eos_id +from .constants import TrainerEngine +from .constants import select_actions_from_right_padded + class PolicyModel(GPTModel): @@ -38,7 +41,7 @@ def __init__(self, self.args = get_args() if has_config_in_args(GPTModel): # new API - from megatron.arguments import core_transformer_config_from_args # pylint: disable=import-outside-toplevel + from megatron.training.arguments import core_transformer_config_from_args # pylint: disable=import-outside-toplevel config = core_transformer_config_from_args(self.args) super().__init__(config, num_tokentypes, parallel_output, pre_process, post_process) else: @@ -55,21 +58,119 @@ def forward_lm(self, input_ids, position_ids, attention_mask, inference_params=N return lm_output def forward(self, all_token_ids, all_position_ids, all_token_attention_mask, training_inputs=None, - inference_params=None): + inference_params=None, inference_config=None): hiddens = self.forward_lm(all_token_ids, all_position_ids, all_token_attention_mask, inference_params=inference_params) # [b, s, v] # note in middle pipeline, this all_token_logits is just a hidden if self.post_process: # is last pipeline stage, if inference return the last logits. if training, return the loss + use_parallel_output = inference_config["parallel_output"] if inference_config is not None and \ + "parallel_output" in inference_config else self.parallel_output + + if inference_config is not None and "DPO_labels" in inference_config: + assert get_args().trainer_engine in [TrainerEngine.DPO.value, TrainerEngine.ONLINE_DPO.value] + if training_inputs is None: + training_inputs = {} + training_inputs["labels"] = inference_config["DPO_labels"] + if get_args().trainer_engine == TrainerEngine.DPO: + assert "prompt_id_lens" in inference_config + assert "orig_mask" in inference_config + training_inputs["prompt_id_lens"] = inference_config["prompt_id_lens"] + all_token_attention_mask = inference_config["orig_mask"] + use_parallel_output = False return self.post_language_model_processing( hiddens, training_inputs, self.language_model.output_layer.weight if self.untie_embeddings_and_output_weights else self.shared_embedding_or_output_weight(), - self.parallel_output) + use_parallel_output, + attention_mask=all_token_attention_mask) else: return hiddens + def post_process_rlhf(self, training_inputs, all_token_logits): + old_logprobs = training_inputs['action_logprobs'] # [b, responses size] + old_values = training_inputs['action_values'] # [b, responses size] + old_rewards = training_inputs['action_rewards'] # [b, responses size] + response_length = old_rewards.shape[1] + + all_token_ids = training_inputs["all_token_ids_right_padded"] + + # For a proper positional encoding in case of left padding + advantages, returns = get_advantages_and_returns(self.args, + old_values, old_rewards, response_length + ) + assert advantages.size(1) == returns.size(1) == response_length + # start = query_tensors.shape[1] - 1 for left padded + # end = action_start + response_length + # Note the token logits to get loss is only the actions. query doesn't have loss. + + # all_token_ids = [pad, q1, q2, q3, a1, a2, a3, pad, pad] + # [pad, q1, q2, q3, a1, a2, a3, a4, a5] + # start = 4-1 = 3, end = 3 + 5 = 8 + # action_loss_mask = notpad(q3, a1, a2, a3, pad,]), notpad([q3, a1, a2, a3, a4], ) + # action_token_logits = logits(q3, a1, a2, a3, pad), logits(q3, a1, a2, a3, a4) + # action_ids = [a1, a2, a3, pad, pad], [a1, a2, a3, a4, a5] + + action_loss_mask = select_actions_from_right_padded(ts=training_inputs["all_token_loss_mask"], + action_starts=training_inputs["action_starts"] - 1, + # because align iwth logits index + response_size=response_length, + pad_value=0, dim=-1).contiguous() + + # because we want the logits from the previous token + # because it's -1 at top and then action -1 it hsould remain in bound + action_token_logits = select_actions_from_right_padded(ts=all_token_logits[:, :-1, :], + action_starts=training_inputs["action_starts"] - 1, + response_size=response_length, + pad_value=1.0, dim=-2).contiguous() + action_ids = select_actions_from_right_padded(ts=all_token_ids, + action_starts=training_inputs["action_starts"], + response_size=response_length, + pad_value=get_eos_id(self.tokenizer), dim=-1).contiguous() + + loss = tensor_decomp_pg_loss(self.args, + action_token_logits=action_token_logits, # [b,response size] + action_ids=action_ids, # [b, response size] + action_loss_mask=action_loss_mask, # [b, response size] + old_logprobs=old_logprobs, # [b, response size] + advantages=advantages, # [b, response size] + stats=self.stats) # [b, response_size] remove last logit because it's EOS + + self.approx_kl = self.stats["policy/approx_kl"] # Update kl controller stats + return loss.contiguous() # [b,response_size] + + + def post_process_dpo(self, logits, training_inputs, attention_mask, average_log_prob=False): + assert "labels" in training_inputs and training_inputs['labels'] is not None + labels = training_inputs['labels'] + prompt_id_lens = training_inputs['prompt_id_lens'] + assert logits.shape[:-1] == labels.shape, \ + f"Mismatch tensor shape between logits.shape[:-1] ({logits.shape[:-1]}) and labels.shape ({labels.shape})" + loss_masks = attention_mask.clone().bool() + loss_masks = loss_masks.squeeze(1) + for mask, source_len in zip(loss_masks, prompt_id_lens): + mask[:source_len] = False + labels[loss_masks == False] = 0 # pylint: disable=singleton-comparison + + loss_masks = loss_masks[:, 1:] + logits = logits[:, 1:, :] + labels = labels[:, 1:] + + per_token_logps = torch.gather(logits.log_softmax(-1), dim=2, index=labels.unsqueeze(2)).squeeze(2) + + if average_log_prob: + return (per_token_logps * loss_masks).sum(-1) / loss_masks.sum(-1) + else: + return (per_token_logps * loss_masks).sum(-1) + + def post_process_online_dpo(self, sbv_all_token_logits, training_inputs): + assert "labels" in training_inputs and training_inputs['labels'] is not None + CE_loss = self.cross_entropy_loss(sbv_all_token_logits, + training_inputs['labels'], + self.args.fp16_lm_cross_entropy) + return CE_loss + def post_language_model_processing(self, hiddens, training_inputs, logit_weights, - parallel_output): + parallel_output, attention_mask=None): # is last pipeline stage, if inference return the last logits. if training, return the loss @@ -80,60 +181,110 @@ def post_language_model_processing(self, hiddens, training_inputs, logit_weights hiddens, logit_weights, parallel_output) + + sbv_all_token_logits = all_token_logits all_token_logits = all_token_logits.transpose(0, 1).contiguous() if inference_only: # [s b h] => [b s h] # TODO do we need to transpose???? + if self.args.trainer_engine == TrainerEngine.DPO: + return self.post_process_dpo(all_token_logits, training_inputs, attention_mask) return all_token_logits else: - old_logprobs = training_inputs['action_logprobs'] # [b, responses size] - old_values = training_inputs['action_values'] # [b, responses size] - old_rewards = training_inputs['action_rewards'] # [b, responses size] - response_length = old_rewards.shape[1] - - all_token_ids = training_inputs["all_token_ids_right_padded"] - - # For a proper positional encoding in case of left padding - advantages, returns = get_advantages_and_returns(self.args, - old_values, old_rewards, response_length - ) - assert advantages.size(1) == returns.size(1) == response_length - # start = query_tensors.shape[1] - 1 for left padded - # end = action_start + response_length - # Note the token logits to get loss is only the actions. query doesn't have loss. - - # all_token_ids = [pad, q1, q2, q3, a1, a2, a3, pad, pad] - # [pad, q1, q2, q3, a1, a2, a3, a4, a5] - # start = 4-1 = 3, end = 3 + 5 = 8 - # action_loss_mask = notpad(q3, a1, a2, a3, pad,]), notpad([q3, a1, a2, a3, a4], ) - # action_token_logits = logits(q3, a1, a2, a3, pad), logits(q3, a1, a2, a3, a4) - # action_ids = [a1, a2, a3, pad, pad], [a1, a2, a3, a4, a5] - - action_loss_mask = select_actions_from_right_padded(ts=training_inputs["all_token_loss_mask"], - action_starts=training_inputs["action_starts"] - 1, - # because align iwth logits index - response_size=response_length, - pad_value=0, dim=-1).contiguous() - - # because we want the logits from the previous token - # because it's -1 at top and then action -1 it hsould remain in bound - action_token_logits = select_actions_from_right_padded(ts=all_token_logits[:, :-1, :], - action_starts=training_inputs["action_starts"] - 1, - response_size=response_length, - pad_value=1.0, dim=-2).contiguous() - action_ids = select_actions_from_right_padded(ts=all_token_ids, - action_starts=training_inputs["action_starts"], - response_size=response_length, - pad_value=get_eos_id(self.tokenizer), dim=-1).contiguous() - - loss = tensor_decomp_pg_loss(self.args, - action_token_logits=action_token_logits, # [b,response size] - action_ids=action_ids, # [b, response size] - action_loss_mask=action_loss_mask, # [b, response size] - old_logprobs=old_logprobs, # [b, response size] - advantages=advantages, # [b, response size] - stats=self.stats) # [b, response_size] remove last logit because it's EOS - - self.approx_kl = self.stats["policy/approx_kl"] # Update kl controller stats - return loss.contiguous() # [b,response_size] + if self.args.trainer_engine == TrainerEngine.DPO: + return self.post_process_dpo(all_token_logits, training_inputs, attention_mask) + elif self.args.trainer_engine == TrainerEngine.RLHF: + return self.post_process_rlhf(training_inputs, all_token_logits) + elif self.args.trainer_engine == TrainerEngine.ONLINE_DPO: + return self.post_process_online_dpo(sbv_all_token_logits, training_inputs) + elif self.args.trainer_engine == TrainerEngine.GRPO: + return self.post_process_grpo(all_token_logits, sbv_all_token_logits, training_inputs) + + def post_process_grpo(self, all_token_logits, sbv_all_token_logits, training_inputs): + all_token_ids = training_inputs["all_token_ids_right_padded"] + adv_scores = torch.FloatTensor(training_inputs['advantages']) + + old_logprobs = training_inputs['action_logprobs'] #[b, responses size] + response_length = old_logprobs.shape[1] + action_loss_mask = select_actions_from_right_padded(ts=training_inputs["all_token_loss_mask"], + action_starts=training_inputs["action_starts"] - 1, # because align iwth logits index + response_size=response_length, + pad_value=0, dim=-1).contiguous() + + assert action_loss_mask.size(0) == len(adv_scores) + + self.stats["policy/adv_mean"] = adv_scores.mean() + self.stats["policy/adv_std"] = adv_scores.std() + + adv = [] + for i, adv_score in enumerate(adv_scores): + adv.append(adv_score * action_loss_mask[i].float()) + advantages = torch.stack(adv) + assert advantages.size(0) == action_loss_mask.size(0) + assert advantages.size(1) == action_loss_mask.size(1) == response_length + + # because we want the logits from the previous token + # because it's -1 at top and then action -1 it hsould remain in bound [seem not true here] + action_token_logits = select_actions_from_right_padded(ts=all_token_logits, + action_starts=training_inputs["action_starts"]-1, + response_size=response_length, + pad_value=1.0, dim=-2).contiguous() + action_ids = select_actions_from_right_padded(ts=all_token_ids, + action_starts=training_inputs["action_starts"], + response_size=response_length, + pad_value=self.tokenizer.eod, dim=-1).contiguous() + + loss = tensor_decomp_pg_loss(self.args, + action_token_logits=action_token_logits, # [b,response size] + action_ids=action_ids, # [b, response size] + action_loss_mask=action_loss_mask, # [b, response size] + old_logprobs=old_logprobs, # [b, response size] + advantages=advantages, # [b, response size] + stats=self.stats) # [b, response_size] remove last logit because it's EOS + assert not torch.isnan(loss).any(), "pg loss is nan" + #### KL Loss #### + forward_logprob = self.cross_entropy_loss(sbv_all_token_logits, + training_inputs["labels"], + get_args().fp16_lm_cross_entropy) * -1 + ref_logprobs = training_inputs['ref_logprobs'] + + action_forward_logprobs = select_actions_from_right_padded(ts=forward_logprob, + action_starts=training_inputs["action_starts"]-1, # because align iwth logits index + response_size=response_length, + pad_value=0, dim=-1).contiguous() + action_ref_logprobs = select_actions_from_right_padded(ts=ref_logprobs, + action_starts=training_inputs["action_starts"]-1, # because align iwth logits index + response_size=response_length, + pad_value=0, dim=-1).contiguous() + assert action_forward_logprobs.size(-1) == action_ref_logprobs.size(-1) == loss.size(-1) + + if get_args().numerical_stable: + logprob_diff = torch.clamp(action_ref_logprobs - action_forward_logprobs, min=-1e5, max=1e5) + log_ratio = (logprob_diff) * action_loss_mask + # numerical approximate an exponential for numerical stability + ratio = 1 + log_ratio + torch.square(log_ratio) / 2 + else: + logprob_diff = action_ref_logprobs - action_forward_logprobs + log_ratio = (logprob_diff) * action_loss_mask + ratio = torch.exp(log_ratio) + kl_loss = (ratio - log_ratio - 1).contiguous() + assert not torch.isnan(loss).any(), "kl loss is nan" + self.approx_kl = self.stats["policy/approx_kl"] # Update kl controller stats + return loss.contiguous(), kl_loss.contiguous() # [b,response_size] + + def cross_entropy_loss(self, sbv_all_token_logits, labels, fp16_lm_cross_entropy): + #all_token_logits is [s,b,vp] + labels = labels.transpose(0, 1).contiguous() #[s,b] + # if flash_cross_entropy is not None: + # loss = flash_cross_entropy(output.flatten(0, 1), labels.flatten()).view(*labels.size()) + + if fp16_lm_cross_entropy: + assert sbv_all_token_logits.dtype == sbv_all_token_logits.half + loss = tensor_parallel.vocab_parallel_cross_entropy(sbv_all_token_logits, labels) + else: + loss = tensor_parallel.vocab_parallel_cross_entropy(sbv_all_token_logits.float(), labels) + + # [s b] => [b, s] + loss = loss.transpose(0, 1).contiguous() + return loss diff --git a/examples/megatron/models/policy_trainer.py b/examples/megatron/models/policy_trainer.py index e2d38945..d5a97e69 100644 --- a/examples/megatron/models/policy_trainer.py +++ b/examples/megatron/models/policy_trainer.py @@ -1,4 +1,4 @@ -# Copyright 2023 Alibaba Group Holding Limited. All Rights Reserved. +# Copyright 2024 Alibaba Group Holding Limited. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. @@ -18,19 +18,23 @@ import numpy as np import torch -from megatron import get_num_microbatches -from megatron import get_tokenizer -from megatron import print_rank_0 +import torch.nn.functional as F + +from megatron.training import get_args +from megatron.training import get_num_microbatches +from megatron.training import get_tokenizer +from megatron.training import print_rank_0 from megatron.core import mpu -from megatron.utils import average_losses_across_data_parallel_group -from megatron.utils import calc_params_l2_norm -from models.policy_model import PolicyModel -from models.utils import training_log +from megatron.training.utils import average_losses_across_data_parallel_group +from megatron.training.utils import calc_params_l2_norm +from megatron.training.utils import get_ltor_masks_and_position_ids from chatlearn.utils import to_device +from .policy_model import PolicyModel +from .utils import training_log, get_eos_id, get_padding_length, pad_to_length from .base_trainer import BaseTrainer -from .constants_ppo import select_actions_from_right_padded, get_ltor_masks_and_position_ids, pad_to_max_len -from .utils import get_eos_id +from .constants import TrainerEngine +from .constants import select_actions_from_right_padded, get_ltor_masks_and_position_ids_rlhf, pad_to_max_len class AdaptiveKLController: @@ -74,6 +78,8 @@ def setup(self): self.kl_ctl = AdaptiveKLController( self.args.init_kl_coef, self.args.target, self.args.horizon ) + self.loss_mean = 0.0 + self.acc_mean = 0.0 def model_provider(self, pre_process=True, post_process=True): """Build the model.""" @@ -91,24 +97,32 @@ def model_provider(self, pre_process=True, post_process=True): model = convert_layer_to_lora(model) return model - def get_batch(self, batch_data): - """Generate a batch - "all_token_ids_right_padded": torch.tensor([[p,p,5,6,7], [p,p,p,8,9]], dtype=torch.long, device=device), - "action_start_indices": torch.tensor([[10,100,p,p,p], [11,p,p,p,p]], dtype=torch.long, device=device), - "action_logprobs": torch.randn([bs, 5], dtype=torch.float32, device=device), - "action_values": torch.randn([bs, 5], dtype=torch.float32, device=device), - "action_rewards": torch.randn([bs, 5], dtype=torch.float32, device=device), - "loss_mask" - """ - args = self.args - data_b = next(batch_data) + def get_dpo_batch(self, data_b): + # TODO: move to ChatLearn framework later. add pad to max length config + chosen_ids = to_device("cuda", data_b["chosen"]) + rejected_ids = to_device("cuda", data_b["rejected"]) + chosen_mask = to_device("cuda", data_b["chosen_mask"]) + rejected_mask = to_device("cuda", data_b["rejected_mask"]) + prompt_id_lens = to_device("cuda", data_b["prompt_id_lens"]) - # TODO: move to RLHF framework later. add pad to max length config - all_token_ids_right_padded = pad_to_max_len(data_b["all_token_ids_right_padded"], args.seq_length, + inputs = { + "reference_chosen_logps": data_b["reference_chosen_logps"], + "reference_rejected_logps": data_b["reference_rejected_logps"], + "chosen": chosen_ids, + "chosen_mask": chosen_mask, + "rejected": rejected_ids, + "rejected_mask": rejected_mask, + "prompt_id_lens": prompt_id_lens + } + return inputs + + def get_rlhf_batch(self, data_b): + # TODO: move to ChatLearn framework later. add pad to max length config + all_token_ids_right_padded = pad_to_max_len(data_b["all_token_ids_right_padded"], self.args.seq_length, pad_value=get_eos_id(get_tokenizer())) - all_token_loss_mask = pad_to_max_len(data_b["loss_mask"], args.seq_length, pad_value=0) + all_token_loss_mask = pad_to_max_len(data_b["loss_mask"], self.args.seq_length, pad_value=0) - all_token_attention_mask, all_token_position_ids = get_ltor_masks_and_position_ids( + all_token_attention_mask, all_token_position_ids = get_ltor_masks_and_position_ids_rlhf( all_token_ids_right_padded) inputs = { @@ -122,16 +136,330 @@ def get_batch(self, batch_data): "action_values": data_b["action_values"].float(), "action_rewards": data_b["action_rewards"].float(), } + return inputs + + def get_online_dpo_batch(self, data_b): + ref_logprobs = data_b["ref_logprobs"].float() + no_padded_query_ids = data_b["no_padded_query_ids"].long() + prompt_dicts = data_b["str_prompts"] + old_logprobs = data_b["old_logprobs"].float() + + tokens_ = data_b["all_token_ids_right_padded"].long() + labels = tokens_[:, 1:].contiguous() + tokens = tokens_[:, :-1].contiguous() + + if tokens.size(0) < self.args.train_to_compare_num_responses: + num_queries = 1 + num_responses = torch.tensor([tokens.size(0)], dtype=torch.long) + else: + assert tokens.size( + 0) % self.args.train_to_compare_num_responses == 0, "need to %0 to process all response of a query" + + num_queries = int(tokens.size(0) / self.args.train_to_compare_num_responses) + + num_responses = torch.tensor([self.args.train_to_compare_num_responses] * num_queries, dtype=torch.long) # nq * 1 + + for start in range(0, len(tokens), self.args.train_to_compare_num_responses): + + should_be_same = no_padded_query_ids[start:min(start + self.args.train_to_compare_num_responses, len(tokens))] + assert (should_be_same == should_be_same[0]).all(), f"{should_be_same}, {should_be_same[0]}" + assert (should_be_same == should_be_same[0]).all(), \ + f"{should_be_same}, {prompt_dicts[start:min(start + self.args.train_to_compare_num_responses, len(tokens))]}" + + rw_scores = torch.sum(data_b["action_rewards"], dim=-1).view(num_queries, -1) # nq * nr + + # TODO tianhang move to sayang's framework later. add pad to max length config + tokens = pad_to_max_len(tokens, self.args.seq_length, pad_value=get_eos_id(get_tokenizer())) + labels = pad_to_max_len(labels, self.args.seq_length, pad_value=get_eos_id(get_tokenizer())) + all_token_loss_mask = pad_to_max_len(data_b["loss_mask"], self.args.seq_length, pad_value=0) + action_starts = data_b['action_start_indices'] + # don't count loss on the prompt tokens (only count loss on the last prompt tokens) since last rpompt token gives the firs taction + # thus loss_mask[0: action start - 1] = 0 + for i in range(all_token_loss_mask.size(0)): + all_token_loss_mask[i, 0:action_starts[i]-1] = 0 + + # sft is [1:] because we want loss on the last prompt token to generate the first action + # because sft loss mask = 0 if in prompt else 1. thus [1:] means last prompt token = 1 before that is all 0 + # however, our loss mask is different and calculated above. Which is 1 for each all_token, + # first stop token + 1: is 0. Then 0: action start ind - 1 is 0. thus last prompt = 1. action = 1. + # but because last input tokne is gone, we don't need last loss mask also + loss_mask = all_token_loss_mask.long() + loss_mask[:, -1] = 0 + + # Get the masks and position ids. + attention_mask, _, position_ids = get_ltor_masks_and_position_ids( + tokens, + get_tokenizer().eod, + self.args.reset_position_ids, + self.args.reset_attention_mask, + self.args.eod_mask_loss, + ) + + inputs = { + "all_token_ids_right_padded": tokens, # padded to seqlen for seqparallel + "all_token_attention_mask": attention_mask, + "all_token_position_ids": position_ids, + "loss_mask": loss_mask, + "rw_scores":rw_scores, + "labels":labels, # this is to get parallel version of -logprob using the NLL loss + "num_responses":num_responses, + "ref_logprobs":ref_logprobs, #b, before pad tokens.size(1) - 1 + "old_logprobs": old_logprobs # b, before pad tokens.size(1) - 1 + } + assert old_logprobs.size() == ref_logprobs.size(), f"{old_logprobs.size()} == {ref_logprobs.size()}" + + assert tokens.size() == loss_mask.size(), f"tokens size: {tokens.size()}, loss_mask size: {loss_mask.size()}" + assert tokens.size(1) == self.args.seq_length, f"{tokens.size(1)} == {self.args.seq_length}" + return inputs + + def get_grpo_batch(self, data_b): + self._logger.info("get grpo batch") + args = self.args + ref_logprobs = data_b["ref_logprobs"].float() + old_logprobs = data_b["old_logprobs"].float() + # prompt_dicts = data_b["prompt_dicts"] + + tokens_ = data_b["all_token_ids_right_padded"].long() + labels = tokens_[:, 1:].contiguous() + tokens = tokens_[:, :-1].contiguous() + + if tokens.size(0) < args.train_to_compare_num_responses: + num_queries = 1 + num_responses = torch.tensor([tokens.size(0)], dtype=torch.long) + else: + assert tokens.size( + 0) % get_args().train_to_compare_num_responses == 0, "need to %0 to process all response of a query" + num_queries = int(tokens.size(0) / args.train_to_compare_num_responses) + + num_responses = torch.tensor([args.train_to_compare_num_responses] * num_queries, dtype=torch.long) # nq * 1 + + advantages = data_b["advantages"] + tokens = pad_to_max_len(tokens, args.seq_length, + pad_value=get_tokenizer().eod) + labels = pad_to_max_len(labels, args.seq_length, + pad_value=get_tokenizer().eod) + all_token_loss_mask = pad_to_max_len(data_b["loss_mask"], args.seq_length, pad_value=0) + action_starts = data_b['action_start_indices'] + + # don't count loss on the prompt tokens (only count loss on the last prompt tokens) since last rpompt token gives the firs taction + # thus loss_mask[0: action start - 1] = 0 + # seem not affect the pg loss + for i in range(all_token_loss_mask.size(0)): + all_token_loss_mask[i, 0:action_starts[i]-1] = 0 + + # sft is [1:] because we want loss on the last prompt token to generate the first action + # because sft loss mask = 0 if in prompt else 1. thus [1:] means last prompt token = 1 before that is all 0 + # however, our loss mask is different and calculated above. Which is 1 for each all_token, + # first stop token + 1: is 0. Then 0: action start ind - 1 is 0. thus last prompt = 1. action = 1. + # but because last input tokne is gone, we don't need last loss mask also + loss_mask = all_token_loss_mask.long() + loss_mask[:, -1] = 0 #? + + # Get the masks and position ids. + attention_mask, _, position_ids = get_ltor_masks_and_position_ids( + tokens, + get_tokenizer().eod, + args.reset_position_ids, + args.reset_attention_mask, + args.eod_mask_loss, + ) + + + inputs = { + "all_token_ids_right_padded": tokens, # padded to seqlen for seqparallel + "all_token_attention_mask": attention_mask, + "all_token_position_ids": position_ids, + "all_token_loss_mask": loss_mask, + "advantages":advantages, + "action_starts": data_b['action_start_indices'], + "action_logprobs" : data_b["action_logprobs"].float(), #response size + "num_responses":num_responses, + "labels": labels, + "ref_logprobs":ref_logprobs, #b, before pad tokens.size(1) - 1 + "old_logprobs": old_logprobs # b, before pad tokens.size(1) - 1 + } + assert old_logprobs.size() == ref_logprobs.size(), f"{old_logprobs.size()} == {ref_logprobs.size()}" + assert tokens.size() == all_token_loss_mask.size(), f"tokens size: {tokens.size()}, loss_mask size: {all_token_loss_mask.size()}" + assert tokens.size(1) == args.seq_length, f"{tokens.size(1)} == {args.seq_length}" + return inputs + + + def get_batch(self, batch_data): + """Generate a batch + "all_token_ids_right_padded": torch.tensor([[p,p,5,6,7], [p,p,p,8,9]], dtype=torch.long, device=device), + "action_start_indices": torch.tensor([[10,100,p,p,p], [11,p,p,p,p]], dtype=torch.long, device=device), + "action_logprobs": torch.randn([bs, 5], dtype=torch.float32, device=device), + "action_values": torch.randn([bs, 5], dtype=torch.float32, device=device), + "action_rewards": torch.randn([bs, 5], dtype=torch.float32, device=device), + "loss_mask" + """ + args = self.args + data_b = next(batch_data) + assert isinstance(data_b, dict), data_b + + if args.trainer_engine == TrainerEngine.DPO: + inputs = self.get_dpo_batch(data_b) + elif args.trainer_engine == TrainerEngine.RLHF: + inputs = self.get_rlhf_batch(data_b) + elif args.trainer_engine == TrainerEngine.ONLINE_DPO: + inputs = self.get_online_dpo_batch(data_b) + elif args.trainer_engine == TrainerEngine.GRPO: + inputs = self.get_grpo_batch(data_b) + else: + raise RuntimeError(f"Error trainer_engine {args.trainer_engine}, \ + expect one of {list(TrainerEngine)}.") for k, v in inputs.items(): inputs[k] = to_device("cuda", v) return inputs - def aggregate_loss_func(self, inputs, losses): # [b, s] - # losses = losses.float() - # b = losses.size(0) - # loss = torch.sum(losses.view(-1)) / b + def dpo_loss_fn(self, policy_chosen_logps, policy_rejected_logps, reference_chosen_logps, reference_rejected_logps): + pi_logratios = policy_chosen_logps - policy_rejected_logps + ref_logratios = reference_chosen_logps - reference_rejected_logps + logits = pi_logratios - ref_logratios + + use_ipo = self.model_args.get("use_ipo", False) + if use_ipo: + losses = (logits - 1 / (2 * self.args.dpo_weight)) ** 2 # Eq. 17 of https://arxiv.org/pdf/2310.12036v2.pdf + else: + # Eq. 3 https://ericmitchell.ai/cdpo.pdf; label_smoothing=0 gives original DPO (Eq. 7 of https://arxiv.org/pdf/2305.18290.pdf) + label_smoothing = self.model_args.get("label_smoothing", 0.0) + losses = ( + -F.logsigmoid(self.args.dpo_weight * logits) * (1 - label_smoothing) + - F.logsigmoid(-self.args.dpo_weight * logits) * label_smoothing + ) + + loss = losses.mean() + chosen_rewards = self.args.dpo_weight * (policy_chosen_logps - reference_chosen_logps).detach() + rejected_rewards = self.args.dpo_weight * (policy_rejected_logps - reference_rejected_logps).detach() + + return loss, chosen_rewards, rejected_rewards + + def get_dpo_loss(self, inputs, losses): + chosen_ids = inputs["chosen"].squeeze(1) + reference_chosen_logps, reference_rejected_logps = inputs["reference_chosen_logps"], inputs["reference_rejected_logps"] + chosen_logps, rejected_logps = losses[:chosen_ids.shape[0]], losses[chosen_ids.shape[0]:] + preference_loss, chosen_reward, reject_reward = self.dpo_loss_fn( + chosen_logps, rejected_logps, reference_chosen_logps, reference_rejected_logps) + + loss = preference_loss + accuracy = (chosen_reward > reject_reward).float().mean().item() + averaged_loss = average_losses_across_data_parallel_group([loss]) + self.loss_mean = 0.9 * self.loss_mean + 0.1 * loss.item() + self.acc_mean = 0.9 * self.acc_mean + 0.1 * accuracy + self.stats["accuracy"] = self.acc_mean + self.stats["dpo_loss"] = self.loss_mean + + return loss, {'policy lm avg loss': averaged_loss[0], 'policy lm loss': loss} + + def get_online_dpo_loss(self, inputs, losses): + loss_mask, rw_scores, num_responses, ref_logprobs, old_logprobs = inputs["loss_mask"], inputs["rw_scores"], \ + inputs["num_responses"], inputs["ref_logprobs"], inputs["old_logprobs"] + assert old_logprobs.size() == ref_logprobs.size(), f"{old_logprobs.size()} == {ref_logprobs.size()}" + + args = get_args() + + logprobs = -losses.float() # this loss is nll which is -logprob + + reference_logprobs = ref_logprobs.float() + reference_logprobs = pad_to_max_len(reference_logprobs, args.seq_length, pad_value=0) + + old_all_logprobs = old_logprobs.float() + old_all_logprobs = pad_to_max_len(old_all_logprobs, args.seq_length, pad_value=0) + + assert loss_mask.size(1) == args.seq_length, f"{loss_mask.size()}" + + logprobs = logprobs * loss_mask + old_all_logprobs = old_all_logprobs * loss_mask + reference_logprobs = reference_logprobs * loss_mask + + clamp_dpo_logprobs = self.model_args.get("clamp_dpo_logprobs", True) + if clamp_dpo_logprobs: + clamp_dpo_logprobs_min = self.model_args.get("clamp_dpo_logprobs_min", -1e10) + clamp_dpo_logprobs_max = self.model_args.get("clamp_dpo_logprobs_max", 0) + old_all_logprobs = torch.clamp(old_all_logprobs, min=clamp_dpo_logprobs_min, max=clamp_dpo_logprobs_max) + logprobs = torch.clamp(logprobs, min=clamp_dpo_logprobs_min, max=clamp_dpo_logprobs_max) + reference_logprobs = torch.clamp(reference_logprobs, min=clamp_dpo_logprobs_min, max=clamp_dpo_logprobs_max) + + + length = loss_mask.sum(-1) + + scores = logprobs.sum(-1) / length + reference_scores = reference_logprobs.sum(-1) / length + + idx = 0 + loss = 0 + avg_dpo_loss = 0 + + for i, n in enumerate(num_responses): + diff = (scores[idx:idx + n] - reference_scores[idx:idx + n]).unsqueeze(0) - ( + scores[idx:idx + n] - reference_scores[idx:idx + n]).unsqueeze(-1) # b * b + rw_score = rw_scores[i].squeeze(0) + rw_diff = rw_score.unsqueeze(0) - rw_score.unsqueeze(-1) # b * b + + a = torch.lt(rw_diff, 0).numel() + b = rw_diff.numel() + negative_indices = torch.nonzero(torch.lt(rw_diff, 0)) + c = len(negative_indices) + batch_size = rw_diff.shape[0] + num_zeros = batch_size * (batch_size - 1) / 2 - c + self.stats['rw_diff_lt_0'] = a + self.stats['rw_diff'] = b + self.stats['negative_indices'] = c + self.stats['negative_indices_ratio'] = float(c/a) + self.stats['num_zeros'] = num_zeros + self.stats['zero_ratio'] = float(num_zeros/a) + + if len(negative_indices) == 0: + continue + diff_transformed = diff[negative_indices[:, 0], negative_indices[:, 1]] * -1 + + use_ipo = self.model_args.get("use_ipo", False) + if use_ipo: + dpo_loss = ((diff_transformed - 1 / (2 * args.dpo_weight)) ** 2).mean() + else: + dpo_loss = -F.logsigmoid(args.dpo_weight * diff_transformed).mean() + if not torch.isnan(dpo_loss): + avg_dpo_loss += dpo_loss + loss += dpo_loss + idx += n + if loss == 0.0: + loss = torch.tensor(0.0, device=logprobs.device, requires_grad=True) + if avg_dpo_loss == 0.0: + avg_dpo_loss = torch.tensor(0.0, device=logprobs.device) + loss = loss / (len(num_responses) + 1e-5) + avg_dpo_loss = avg_dpo_loss / (len(num_responses) + 1e-5) + + averaged_loss = average_losses_across_data_parallel_group([loss, avg_dpo_loss]) + return loss, {'lm_loss': averaged_loss[0], 'dpo_loss': averaged_loss[1]} + + def get_grpo_loss(self, inputs, losses): + ppo_losses, kl_losses = losses #[b, response_size] + + old_logprobs = inputs['action_logprobs'] #[b, responses size] + response_length = old_logprobs.shape[1] + # Note the tkoken logits to get loss is only the actions. query doesn't have loss. + action_loss_mask = select_actions_from_right_padded(ts=inputs["all_token_loss_mask"], + action_starts=inputs["action_starts"] - 1, + # because align iwth logits index + response_size=response_length, + pad_value=0, + dim=-1).contiguous() + + # action_loss_mask = all_token_loss_mask[:, start:end].contiguous() #[ b, response_size] + action_loss_mask = action_loss_mask.view(-1).float() + ppo_loss = torch.sum(ppo_losses.view(-1) * action_loss_mask) / action_loss_mask.sum() + kl_loss = torch.sum(kl_losses.view(-1) * action_loss_mask) / action_loss_mask.sum() + loss = ppo_loss + kl_loss * get_args().dpo_weight + + # Reduce loss for logging. + averaged_loss = average_losses_across_data_parallel_group([loss, kl_loss]) + self.stats["policy/pg_loss"] = averaged_loss[0] + self.stats["policy/kl_loss"] = averaged_loss[1] + return loss, {'policy pg loss': averaged_loss[0], 'policy kl loss': averaged_loss[1]} + + def get_rlhf_loss(self, inputs, losses): losses = losses.float() # [b, response_size] old_rewards = inputs['action_rewards'] # [b, responses size] @@ -153,13 +481,79 @@ def aggregate_loss_func(self, inputs, losses): # [b, s] self.stats["policy_loss"] = averaged_loss[0] return loss, {'policy lm loss': averaged_loss[0]} + def aggregate_loss_func(self, inputs, losses): # [b, s] + if self.args.trainer_engine == TrainerEngine.DPO: + return self.get_dpo_loss(inputs, losses) + elif self.args.trainer_engine == TrainerEngine.ONLINE_DPO: + return self.get_online_dpo_loss(inputs, losses) + elif self.args.trainer_engine == TrainerEngine.RLHF: + return self.get_rlhf_loss(inputs, losses) + elif self.args.trainer_engine == TrainerEngine.GRPO: + return self.get_grpo_loss(inputs, losses) + else: + raise RuntimeError(f"unknown trainer engine {self.args.trainer_engine}, expect one of {list(TrainerEngine)}") + + def concatenated_inputs(self, chosen_ids, c_mask, reject_ids, r_mask): + """Concatenate the chosen and rejected inputs into a single tensor. + + Args: + batch: A batch of data. Must contain the keys 'chosen_input_ids' and 'rejected_input_ids', + which are tensors of shape (batch_size, sequence_length). + + Returns: + A dictionary containing the concatenated inputs under the key 'concatenated_input_ids'. + """ + pad_value = get_eos_id(get_tokenizer()) + max_length = max(chosen_ids.shape[1], reject_ids.shape[1]) + + tp_size = self.tensor_model_parallel_size() + sp_enabled = self.megatron_args.sequence_parallel + + max_length = get_padding_length(sp_enabled, tp_size, max_length) + + inputs_ids = torch.cat( + ( + pad_to_length(chosen_ids, max_length, pad_value), + pad_to_length(reject_ids, max_length, pad_value), + ), + dim=0, + ) + + max_length = max(c_mask.shape[1], r_mask.shape[1]) + max_length = get_padding_length(sp_enabled, tp_size, max_length) + + att_masks = torch.cat((pad_to_length(c_mask, max_length, 0), pad_to_length(r_mask, max_length, 0)), dim=0) + return inputs_ids, att_masks + def _forward_step(self, batch_data, model): """Forward step.""" inputs = self.get_batch(batch_data) - losses = model.forward(all_token_ids=inputs["all_token_ids_right_padded"], - all_position_ids=inputs["all_token_position_ids"], - all_token_attention_mask=inputs["all_token_attention_mask"], - training_inputs=inputs) + if self.args.trainer_engine == TrainerEngine.DPO: + chosen_ids = inputs["chosen"].squeeze(1) + rejected_ids = inputs["rejected"].squeeze(1) + chosen_mask = inputs["chosen_mask"].squeeze(1) + rejected_mask = inputs["rejected_mask"].squeeze(1) + prompt_id_lens = inputs["prompt_id_lens"] + prompt_id_lens = torch.cat([prompt_id_lens, prompt_id_lens], dim=0) + + inputs_, attn_masks = self.concatenated_inputs(chosen_ids, chosen_mask, rejected_ids, rejected_mask) + + dpo_labels = inputs_.clone() + tokens_ = inputs_[:, :] + attention_mask, position_ids = get_ltor_masks_and_position_ids_rlhf(tokens_) + losses = model.forward(all_token_ids=tokens_, + all_position_ids=position_ids, + all_token_attention_mask=attention_mask, + training_inputs=inputs, + inference_config={"DPO_labels":dpo_labels, "prompt_id_lens": prompt_id_lens, "orig_mask": attn_masks}) + elif self.args.trainer_engine in [TrainerEngine.ONLINE_DPO, TrainerEngine.RLHF, TrainerEngine.GRPO]: + losses = model.forward(all_token_ids=inputs["all_token_ids_right_padded"], + all_position_ids=inputs["all_token_position_ids"], + all_token_attention_mask=inputs["all_token_attention_mask"], + training_inputs=inputs) + else: + raise RuntimeError(f"Error trainer_engine {self.args.trainer_engine}, expect one of {list(TrainerEngine)}.") + return losses, partial(self.aggregate_loss_func, inputs) # will call loss_func(loss_mask, output_tensor) to get loss @@ -167,11 +561,11 @@ def _forward_step(self, batch_data, model): def post_update_stuffs(self, loss_dict, skipped_iter, grad_norm, num_zeros_in_grad, iteration): - # only last rank give kl coef. if torch.distributed.get_rank() == ( torch.distributed.get_world_size() - 1): - self.kl_ctl.update(self.stats["policy/approx_kl"], n_steps=self.args.global_batch_size) + if self.args.trainer_engine == TrainerEngine.RLHF: + self.kl_ctl.update(self.stats["policy/approx_kl"], n_steps=self.args.global_batch_size) if not self.args.fix_kl_coef: self.put("kl_coef", self.kl_ctl.value) @@ -185,6 +579,7 @@ def post_update_stuffs(self, loss_dict, skipped_iter, params_norm = None if self.args.log_params_norm: params_norm = calc_params_l2_norm(self.model) + if self.args.log_interval > 0 and iteration % self.args.log_interval == 0: training_log(loss_dict, {}, self.optimizer.param_groups[0]['lr'], diff --git a/examples/megatron/models/reference.py b/examples/megatron/models/reference.py index ac97806d..59974c91 100644 --- a/examples/megatron/models/reference.py +++ b/examples/megatron/models/reference.py @@ -1,4 +1,4 @@ -# Copyright 2023 Alibaba Group Holding Limited. All Rights Reserved. +# Copyright 2024 Alibaba Group Holding Limited. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. @@ -16,16 +16,19 @@ import torch import torch.nn.functional as F -from megatron import get_args, get_tokenizer -from megatron import print_rank_0 +from megatron.training import get_args, get_tokenizer +from megatron.training import print_rank_0 from megatron.core import mpu from megatron.core.tensor_parallel.utils import VocabUtility from megatron.training import get_model -from models.policy_model import PolicyModel from chatlearn.utils import to_device from chatlearn.utils.megatron_utils import load_checkpoint -from .constants_ppo import get_ltor_masks_and_position_ids +from examples.megatron.data.prompt_dataset import DPOPromptPipeline +from .policy_model import PolicyModel +from .utils import get_eos_id, get_padding_length, pad_to_length +from .constants import get_ltor_masks_and_position_ids_rlhf +from .constants import TrainerEngine from .forward_step import forward_step_helper from .old_policy_inference import PolicyInference @@ -68,9 +71,60 @@ def setup(self): # init num get_args().entropy_num = 0 get_args().latest_entropies = [] - return 'ok' - def score_and_return_on_last_stage(self, tokens): + def build_dataset(self, train_prompts, is_eval=False): + args = get_args() + if args.trainer_engine == TrainerEngine.DPO: + # TODO: read from files + return DPOPromptPipeline( + train_prompts, args.seq_length, get_tokenizer() + ) + return super().build_dataset(train_prompts, is_eval) + + def score_reference_dpo(self, tokens, prompt_id_lens=None, orig_mask=None): + if get_args().trainer_engine == TrainerEngine.DPO: + dpo_labels = tokens[:, :] + tokens_ = tokens[:,:] + else: + dpo_labels = tokens[:, 1:] + tokens_ = tokens[:,:-1] + attention_mask, position_ids = get_ltor_masks_and_position_ids_rlhf(tokens_) + + ref_nll = forward_step_helper( + self.model, tokens_, position_ids, attention_mask, pooling=False, + inference_config={"DPO_labels":dpo_labels, "prompt_id_lens": prompt_id_lens, "orig_mask": orig_mask}) + return ref_nll + + def concatenated_inputs(self, chosen_ids, c_mask, reject_ids, r_mask): + """Concatenate the chosen and rejected inputs into a single tensor. + + Args: + batch: A batch of data. Must contain the keys 'chosen_input_ids' and 'rejected_input_ids', + which are tensors of shape (batch_size, sequence_length). + + Returns: + A dictionary containing the concatenated inputs under the key 'concatenated_input_ids'. + """ + max_length = max(chosen_ids.shape[1], reject_ids.shape[1]) + pad_value = get_eos_id(get_tokenizer()) + tp_size = self.tensor_model_parallel_size() + sp_enabled = self.megatron_args.sequence_parallel + + max_length = get_padding_length(sp_enabled, tp_size, max_length) + + inputs_ids = torch.cat( + ( + pad_to_length(chosen_ids, max_length, pad_value), + pad_to_length(reject_ids, max_length, pad_value), + ), + dim=0, + ) + max_length = max(c_mask.shape[1], r_mask.shape[1]) + max_length = get_padding_length(sp_enabled, tp_size, max_length) + att_masks = torch.cat((pad_to_length(c_mask, max_length, 0), pad_to_length(r_mask, max_length, 0)), dim=0) + return inputs_ids, att_masks + + def score_and_return_on_last_stage(self, data): """Function for just scoring. Arguments: tokens: prompt tokens extended to be of size [b, max_prompt_length] @@ -87,94 +141,136 @@ def score_and_return_on_last_stage(self, tokens): # Run infernece # ============= with torch.no_grad(): - attention_mask, position_ids = get_ltor_masks_and_position_ids(tokens) + if get_args().trainer_engine == TrainerEngine.DPO: + chosen_ids = to_device("cuda", data["chosen"]).squeeze(1) + rejected_ids = to_device("cuda", data["rejected"]).squeeze(1) + chosen_mask = to_device("cuda", data["chosen_mask"]).squeeze(1) + rejected_mask = to_device("cuda", data["rejected_mask"]).squeeze(1) + prompt_id_lens = to_device("cuda", data["prompt_id_lens"]) + prompt_id_lens = torch.cat([prompt_id_lens, prompt_id_lens], dim=0) - # logits will be meanigful only in the last pipeline stage. - logits = forward_step_helper(self.model, tokens, position_ids, attention_mask) + all_tokens, attn_masks = self.concatenated_inputs(chosen_ids, chosen_mask, rejected_ids, rejected_mask) + ref_nll = self.score_reference_dpo(all_tokens, prompt_id_lens, attn_masks) + if mpu.is_pipeline_last_stage(): + output_log_probs = [ref_nll[:chosen_ids.shape[0]], ref_nll[chosen_ids.shape[0]:]] - if not self._parallel_output: + elif get_args().trainer_engine == TrainerEngine.ONLINE_DPO: + ref_nll = self.score_reference_dpo(to_device("cuda", data["all_tokens"])) if mpu.is_pipeline_last_stage(): - # Always the last stage should have an output. - assert logits is not None - assert logits.size(1) == tokens.size(1), "head(hidden(token))" - log_probs = F.log_softmax(logits, dim=2) - - # Pick the tokens that we need to get the log - # probabilities for. Note that next input token is - # the token which we selected in the current logits, - # so shift by 1. - indices = torch.unsqueeze(tokens[:, 1:], 2) - output_log_probs = torch.gather(log_probs, 2, indices).squeeze(2) + output_log_probs = -ref_nll # pylint: disable=invalid-unary-operand-type + elif get_args().trainer_engine in (TrainerEngine.RLHF, TrainerEngine.GRPO): + tokens = to_device("cuda", data["all_tokens"]) + + attention_mask, position_ids = get_ltor_masks_and_position_ids_rlhf(tokens) + + # logits will be meanigful only in the last pipeline stage. + logits = forward_step_helper(self.model, tokens, position_ids, attention_mask) + + if not self._parallel_output: + if mpu.is_pipeline_last_stage(): + # Always the last stage should have an output. + assert logits is not None + assert logits.size(1) == tokens.size(1), "head(hidden(token))" + log_probs = F.log_softmax(logits, dim=2) + + # Pick the tokens that we need to get the log + # probabilities for. Note that next input token is + # the token which we selected in the current logits, + # so shift by 1. + indices = torch.unsqueeze(tokens[:, 1:], 2) + output_log_probs = torch.gather(log_probs, 2, indices).squeeze(2) + else: + if mpu.is_pipeline_last_stage(): + vocab_parallel_logits = logits + logits_max = torch.max(vocab_parallel_logits, dim=-1)[0] + torch.distributed.all_reduce(logits_max, + op=torch.distributed.ReduceOp.MAX, + group=mpu.get_tensor_model_parallel_group()) + logits.sub_(logits_max.unsqueeze(dim=-1)) + # Get the partition's vocab indecies + get_vocab_range = VocabUtility.vocab_range_from_per_partition_vocab_size + partition_vocab_size = vocab_parallel_logits.size()[-1] + rank = mpu.get_tensor_model_parallel_rank() + world_size = mpu.get_tensor_model_parallel_world_size() + vocab_start_index, vocab_end_index = get_vocab_range( + partition_vocab_size, rank, world_size) + + indices = torch.unsqueeze(tokens, 2) + + # Create a mask of valid vocab ids (1 means it needs to be masked). + target_mask = (indices < vocab_start_index) | ( + indices >= vocab_end_index) # [b,s] 1 for not in range action, 0 for in range + + masked_actionids = indices - vocab_start_index # [b,s] + # Pick the tokens that we need to get the log + # probabilities for. Note that next input token is + # the token which we selected in the current logits, + # so shift by 1. + masked_actionids[:, 0, :] = 0 + masked_actionids[target_mask] = 0 # [b,s] + logits_2d = vocab_parallel_logits.view(-1, partition_vocab_size) # [n vp] + masked_actionids_1d = masked_actionids.view( + -1) # [n] 0 for not in vocab range, target id -start for in range + arange_1d = torch.arange(start=0, end=logits_2d.size()[0], + device=logits_2d.device) + predicted_logits_1d = logits_2d[ + arange_1d, masked_actionids_1d] # [n] in range target logit, not in range logits[0] + predicted_logits_1d = predicted_logits_1d.clone().contiguous() + action_logits = predicted_logits_1d.view_as(indices) + action_logits[target_mask] = 0.0 # [b s] 0 for not in range, logit for in range + # All reduce is needed to get the chunks from other GPUs. + torch.distributed.all_reduce(action_logits, + op=torch.distributed.ReduceOp.SUM, + group=mpu.get_tensor_model_parallel_group()) + # Sum of exponential of logits along vocab dimension across all GPUs. + exp_logits = vocab_parallel_logits # [ b, s, vp ] + torch.exp(vocab_parallel_logits, out=exp_logits) + sum_exp_logits = exp_logits.sum(dim=-1) + torch.distributed.all_reduce(sum_exp_logits, + op=torch.distributed.ReduceOp.SUM, + group=mpu.get_tensor_model_parallel_group()) + log_probs = action_logits.squeeze(2) - torch.log( + sum_exp_logits + 1e-10) # log ( exp(l) / sum(exp(li) + + # shift by 1 + output_log_probs = log_probs[:, 1:] + output_log_probs = output_log_probs.contiguous() + + assert not torch.isnan(output_log_probs).any(), f"just out ref_logprobs {output_log_probs}" + assert output_log_probs.size(1) == tokens.size(1) - 1, "all token logprob except first one [1:]" else: - if mpu.is_pipeline_last_stage(): - vocab_parallel_logits = logits - logits_max = torch.max(vocab_parallel_logits, dim=-1)[0] - torch.distributed.all_reduce(logits_max, - op=torch.distributed.ReduceOp.MAX, - group=mpu.get_tensor_model_parallel_group()) - logits.sub_(logits_max.unsqueeze(dim=-1)) - # Get the partition's vocab indecies - get_vocab_range = VocabUtility.vocab_range_from_per_partition_vocab_size - partition_vocab_size = vocab_parallel_logits.size()[-1] - rank = mpu.get_tensor_model_parallel_rank() - world_size = mpu.get_tensor_model_parallel_world_size() - vocab_start_index, vocab_end_index = get_vocab_range( - partition_vocab_size, rank, world_size) - - indices = torch.unsqueeze(tokens, 2) - - # Create a mask of valid vocab ids (1 means it needs to be masked). - target_mask = (indices < vocab_start_index) | ( - indices >= vocab_end_index) # [b,s] 1 for not in range action, 0 for in range - - masked_actionids = indices - vocab_start_index # [b,s] - # Pick the tokens that we need to get the log - # probabilities for. Note that next input token is - # the token which we selected in the current logits, - # so shift by 1. - masked_actionids[:, 0, :] = 0 - masked_actionids[target_mask] = 0 # [b,s] - logits_2d = vocab_parallel_logits.view(-1, partition_vocab_size) # [n vp] - masked_actionids_1d = masked_actionids.view( - -1) # [n] 0 for not in vocab range, target id -start for in range - arange_1d = torch.arange(start=0, end=logits_2d.size()[0], - device=logits_2d.device) - predicted_logits_1d = logits_2d[ - arange_1d, masked_actionids_1d] # [n] in range target logit, not in range logits[0] - predicted_logits_1d = predicted_logits_1d.clone().contiguous() - action_logits = predicted_logits_1d.view_as(indices) - action_logits[target_mask] = 0.0 # [b s] 0 for not in range, logit for in range - # All reduce is needed to get the chunks from other GPUs. - torch.distributed.all_reduce(action_logits, - op=torch.distributed.ReduceOp.SUM, - group=mpu.get_tensor_model_parallel_group()) - # Sum of exponential of logits along vocab dimension across all GPUs. - exp_logits = vocab_parallel_logits # [ b, s, vp ] - torch.exp(vocab_parallel_logits, out=exp_logits) - sum_exp_logits = exp_logits.sum(dim=-1) - torch.distributed.all_reduce(sum_exp_logits, - op=torch.distributed.ReduceOp.SUM, - group=mpu.get_tensor_model_parallel_group()) - log_probs = action_logits.squeeze(2) - torch.log( - sum_exp_logits + 1e-10) # log ( exp(l) / sum(exp(li) - - # shift by 1 - output_log_probs = log_probs[:, 1:] - output_log_probs = output_log_probs.contiguous() - - assert not torch.isnan(output_log_probs).any(), f"just out ref_logprobs {output_log_probs}" - assert output_log_probs.size(1) == tokens.size(1) - 1, "all token logprob except first one [1:]" + raise RuntimeError(f"unexpected trainer_engine {get_args().trainer_engine}, expect one of {list(TrainerEngine)}") return output_log_probs def forward_step(self, data, iteration=None): ''' - RLHF calling - rlhf framework source: ref_output = self.reference.forward_step(policy_output[0]) + ChatLearn calling + chatlearn framework source: ref_output = self.reference.forward_step(policy_output[0]) :param data: global batch??? micro_batch? :return: ''' - all_tokens = to_device("cuda", data["all_tokens"]) - ref_logprobs = self.score_and_return_on_last_stage(all_tokens) - return {"ref_logprobs": ref_logprobs} + if get_args().trainer_engine == TrainerEngine.DPO: + ref_logprobs = self.score_and_return_on_last_stage(data) + ref_out_dict = { + "chosen": data["chosen"], + "chosen_mask": data["chosen_mask"], + "rejected": data["rejected"], + "rejected_mask": data["rejected_mask"], + "prompt_id_lens": data["prompt_id_lens"], + } + if mpu.is_pipeline_last_stage(): # for the last pipeline stage, ref_logprobs is a list + ref_out_dict.update({ + "reference_chosen_logps": ref_logprobs[0], + "reference_rejected_logps": ref_logprobs[1] + }) + else: # for non-last pipeline stage, ref_logprobs is None + ref_out_dict.update({ + "reference_chosen_logps": None, + "reference_rejected_logps": None + }) + return ref_out_dict + else: + ref_logprobs = self.score_and_return_on_last_stage(data) + return {"ref_logprobs": ref_logprobs} diff --git a/examples/megatron/models/reward_inference.py b/examples/megatron/models/reward_inference.py index 23443de1..b672bbfa 100644 --- a/examples/megatron/models/reward_inference.py +++ b/examples/megatron/models/reward_inference.py @@ -1,4 +1,4 @@ -# Copyright 2023 Alibaba Group Holding Limited. All Rights Reserved. +# Copyright 2024 Alibaba Group Holding Limited. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. @@ -22,21 +22,21 @@ from time import time import torch -from megatron import get_args -from megatron import get_tokenizer from megatron.core import mpu -from megatron.global_vars import get_tensorboard_writer +from megatron.training import get_args from megatron.training import get_model -from megatron.utils import get_ltor_masks_and_position_ids -from models.reward_model import batch_padded_tokenize_data, model_provider +from megatron.training import get_tokenizer +from megatron.training.global_vars import get_tensorboard_writer +from megatron.training.utils import get_ltor_masks_and_position_ids -from chatlearn import RLHFMegatronModule +import chatlearn +from chatlearn import MegatronModule from chatlearn.utils import to_device from chatlearn.utils.megatron_utils import load_checkpoint -from .constants_ppo import RunningMoments, get_running_stats, reset_running_stats +from .reward_model import batch_padded_tokenize_data, model_provider +from .utils import tensorboard_scalar_dict, get_eos_id +from .constants import RunningMoments, get_running_stats, reset_running_stats from .forward_step import forward_step_helper -from .utils import tensorboard_scalar_dict -from .utils import get_eos_id ANS_RE = re.compile(r"#### (\-?[0-9\.\,]+)") INVALID_ANS = "[invalid]" @@ -52,8 +52,8 @@ def dump_jsonl_chinese(res, file_path, mode="w"): def save_list_str(list_strs, iteration): - args = get_args() - inference_output_path = f"{args.log_dir}/{args.exp_name}/inference_outputs_{iteration}.json" + args = chatlearn.get_args().runtime_args + inference_output_path = f"{args.output_dir}/inference_outputs/inference_outputs_{iteration}.json" Path(inference_output_path).parent.mkdir(parents=True, exist_ok=True) res = [] @@ -63,7 +63,7 @@ def save_list_str(list_strs, iteration): dump_jsonl_chinese(res, inference_output_path, mode="a") -class RewardInference(RLHFMegatronModule): +class RewardInference(MegatronModule): """RewardInference""" def setup(self): @@ -95,8 +95,6 @@ def setup(self): self.add_padding_config("action_logprobs", 0.0) self.add_padding_config("action_values", 0.0) self.add_padding_config("action_rewards", 0.0) - return 'ok' - def normalized_and_clip(self, scores): if self.model_args['scale_reward'] == "running": @@ -217,7 +215,7 @@ def get_all_rewards(self, action_starts, action_ends, loss_mask, all_tokens_righ scores = self.args.raw_reward_coeff * scores.view(-1, 1) else: # we only need last rank results, so return None for other rank - return + return None, None else: scores = torch.zeros(n, 1, device=loss_mask.device) if self.args.ngram_coef > 0: @@ -260,6 +258,12 @@ def get_all_rewards(self, action_starts, action_ends, loss_mask, all_tokens_righ self.stats["rewards/klrewards_min"] = kl_rw_sums.min() self.per_episode_metrics["rewards/klrewards"].update(kl_rw_sums) + action_lengths = [] + for action_token in action_tokens: + action_lengths.append(action_token.size(0)) + action_lengths = torch.tensor(action_lengths, dtype=torch.float32, device=torch.cuda.current_device()) + self.per_episode_metrics["action_lengths"].update(action_lengths) + if self.args.lm_coef > 0: lm_reward = self.args.lm_coef * ref_logprobs @@ -305,7 +309,8 @@ def get_all_rewards(self, action_starts, action_ends, loss_mask, all_tokens_righ self.per_episode_metrics["rewards/all_rw_sum"].update(all_rw_means) self.stats["rewards/all_rw_sum_max"] = all_rw_means.max() self.stats["rewards/all_rw_sum_min"] = all_rw_means.min() - return all_rewards + + return all_rewards, scores.view(-1).cpu().tolist() def forward_step(self, data, iteration=None): ''' @@ -330,7 +335,7 @@ def forward_step(self, data, iteration=None): # last rank save inference output save_list_str(list_strs, iteration) - old_value = data["old_values"] + old_value = data["old_values"] if "old_values" in data else None ref_logprobs = data["ref_logprobs"] logprobs = data["logprobs"] @@ -351,15 +356,17 @@ def forward_step(self, data, iteration=None): 1), f"{ref_logprobs.size(1)}, {all_tokens_right_padded.size(1)} " assert logprobs.size(1) + 1 == all_tokens_right_padded.size( 1), f"{logprobs.size(1)}, {all_tokens_right_padded.size(1)} " - assert old_value.size(1) == all_tokens_right_padded.size( - 1), f"{old_value.size(1)}, {all_tokens_right_padded.size(1)} " + if old_value is not None: + assert old_value.size(1) == all_tokens_right_padded.size( + 1), f"{old_value.size(1)}, {all_tokens_right_padded.size(1)} " n = all_tokens_right_padded.shape[0] # if ends with a eos_token also pad, it doesn't change. # if stopped due to len limit, discard last token to align with rewards. # because reward is r(s,a) which is a state action pair starts from state, # thus the last unstopped token has no reward assigned and thus need to discard - values = old_value[:, :-1] + if old_value is not None: + values = old_value[:, :-1] if self.args.loss_on_prompts: # because first token has no prob and serve as the first token to attend to so no loss @@ -378,11 +385,14 @@ def forward_step(self, data, iteration=None): # eg [ pad, q1, q2, q3, a1, a2, a3, pad, pad] -> ends[i] = 4 # eg [ pad, q1, q2, q3, a1, a2, a3] -> [ pad, q1, q2, q3, a1, a2] ends[i] = 3 # all values = value(hidden(q3, a1, a2, a3)). - all_values = [values[ix, starts[ix] - 1: ends[ix] - 1] for ix in range(n)] # we want states + if old_value is not None: + all_values = [values[ix, starts[ix] - 1: ends[ix] - 1] for ix in range(n)] # we want states + else: + all_values = None action_tokens = [all_tokens_right_padded[ix, starts[ix]: ends[ix]] for ix in range(n)] - all_rewards = self.get_all_rewards(starts, ends, loss_mask, all_tokens_right_padded, logprobs, + all_rewards, rm_rewards_list = self.get_all_rewards(starts, ends, loss_mask, all_tokens_right_padded, logprobs, ref_logprobs, kl_coef, action_tokens, list_strs) # [ pad, q1, q2, q3, a1, a2, a3], logprobs= logprob[ q1, q2, q3, a1, a2, a3] @@ -392,16 +402,24 @@ def forward_step(self, data, iteration=None): if self.args.log_interval > 0 and mpu.is_pipeline_last_stage(): for i in range(n): # for each traj, num states == num actions - assert all_logprobs[i].size(0) == all_values[i].size(0) == all_rewards[i].size(0), \ - f"all_rewards[i].size() {all_rewards[i].size(0)} all_values[i].size(0) {all_values[i].size(0)}" \ - f"all_logprobs[i].size(0) {all_logprobs[i].size(0)}" + + assert all_logprobs[i].size(0) == all_rewards[i].size(0), \ + f"all_rewards[i].size() {all_rewards[i].size(0)} all_logprobs[i].size(0) {all_logprobs[i].size(0)}" + if old_value is not None: + assert all_logprobs[i].size(0) == all_values[i].size(0), \ + f"all_logprobs[i].size() {all_logprobs[i].size(0)} all_values[i].size(0) {all_values[i].size(0)}" if self.args.log_interval > 0 and iteration % self.args.log_interval == 0 and mpu.is_pipeline_last_stage(): self.log_each_step(iteration) - return {"all_token_ids_right_padded": all_tokens_right_padded, "action_start_indices": starts, - "action_logprobs": all_logprobs, - "action_values": all_values, "action_rewards": all_rewards, "loss_mask": loss_mask} + res_dict = {"all_token_ids_right_padded": all_tokens_right_padded, "action_start_indices": starts, + "action_logprobs": all_logprobs, "action_rewards": all_rewards, "loss_mask": loss_mask, + "ref_logprobs": ref_logprobs, "old_logprobs": logprobs, "no_padded_query_ids": no_padded_query_ids, + "str_prompts":str_prompts, "rm_rewards": rm_rewards_list} + if all_values is not None: + res_dict["action_values"]= all_values + + return res_dict def log_each_step(self, iteration): writer = get_tensorboard_writer() @@ -457,23 +475,30 @@ def eval_forward(self, policy_res: dict): reward_model_scores = torch.tensor(math_rewards, device="cuda") else: - reward_model_scores = self.get_raw_reward(all_tokens_right_padded, ends).view(-1, 1) - self.per_episode_metrics["eval_rewards/reward_model_scores"].update(reward_model_scores) + reward_model_scores = self.get_raw_reward(all_tokens_right_padded, ends) + + if mpu.is_pipeline_last_stage(): + reward_model_scores = reward_model_scores.view(-1, 1) - reward_checkpoint = self.args.load - reward_checkpoint_load_iteration = self.args.load_iteration + self.per_episode_metrics["eval_rewards/reward_model_scores"].update(reward_model_scores) - output = [] - rewards_output = [] - for str_prompt, str_output, reward in zip(str_prompts, str_outputs, reward_model_scores): - rw = reward.cpu().item() - rewards_output.append(rw) + reward_checkpoint = self.args.load + reward_checkpoint_load_iteration = self.args.load_iteration - score_dict = {reward_checkpoint: {reward_checkpoint_load_iteration: [rw]}} - j = {"query": str_prompt, "responses": [str_output], "eval_score_dict": score_dict} - output.append(j) + output = [] + rewards_output = [] + for str_prompt, str_output, reward in zip(str_prompts, str_outputs, reward_model_scores): + rw = reward.cpu().item() + rewards_output.append(rw) - return {"eval_jsonl": output, "rewards": rewards_output} + score_dict = {reward_checkpoint: {reward_checkpoint_load_iteration: [rw]}} + j = {"query": str_prompt, "responses": [str_output], "eval_score_dict": score_dict} + output.append(j) + + return {"eval_jsonl": output, "rewards": rewards_output} + else: + # we only need first rank results, so return None for other rank + return def forward_step_pipeline(self, list_strs=None, all_tokens_right_padded=None, ends=None): self.model.eval() diff --git a/examples/megatron/models/reward_math.py b/examples/megatron/models/reward_math.py new file mode 100644 index 00000000..ee437800 --- /dev/null +++ b/examples/megatron/models/reward_math.py @@ -0,0 +1,109 @@ +# Copyright 2024 Alibaba Group Holding Limited. All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# ============================================================================== +"""reward math model""" +from collections import defaultdict +import numpy as np +import torch +from torch.utils.tensorboard import SummaryWriter +from chatlearn import BaseModule +from .utils import tensorboard_scalar_dict +from .constants import RunningMoments, get_running_stats, reset_running_stats +from .rm_sys.math_rule_rm import MathRuleRM + +class MathReward(BaseModule): + """Math reward""" + + def setup(self): + self.math_rule_rm = MathRuleRM() + self.stats = {} + self.running = RunningMoments() + self.per_episode_metrics = defaultdict(RunningMoments) + tensorboard_dir = f"{self.runtime_args.output_dir}/tensorboard" + self.tensorboard_writer = SummaryWriter(log_dir=tensorboard_dir) + + def forward_step(self, data, iteration=0): + answers = data['answer'] + str_outputs = data["str_outputs"] + eval_funcs = data["eval_func"] + list_strs = list(zip(answers, str_outputs, eval_funcs)) + rewards = self.get_math_rule_reward(list_strs, is_eval=False) + return {"math_rewards": rewards} + + def get_math_rule_reward(self, list_strs, is_eval): + + # Math reward + reorder_list_strs, reorder_idx = [], [] + for idx, (answer, str_output, eval_func) in enumerate(list_strs): + if eval_func == 'math_rule': + reorder_list_strs.append((answer, str_output)) + reorder_idx.append(idx) + reorder_rewards, success = self.math_rule_rm(reorder_list_strs) + + if is_eval: + self.stats["eval_rewards/math_rule_reward_mean"] = np.mean(reorder_rewards) + self.stats["eval_rewards/math_rule_parsing_rate"] = np.mean(success) + else: + self.stats["rewards/math_rule_reward_mean"] = np.mean(reorder_rewards) + self.stats["rewards/math_rule_parsing_rate"] = np.mean(success) + scores = [0] * len(list_strs) + for idx, r in zip(reorder_idx, reorder_rewards): + scores[idx] = r + + if is_eval: + return scores + else: + self.per_episode_metrics["rewards/math_reward_model_scores"].update(torch.FloatTensor(scores)) + return scores + + def eval_forward(self, policy_res: dict): + prompt_dicts = policy_res["prompt_dicts"] + str_outputs = policy_res["str_outputs"] + + list_strs = [[prompt_dict, str_output] for prompt_dict, str_output in zip(prompt_dicts, str_outputs)] + + reward_model_scores = self.get_math_rule_reward(list_strs, is_eval=True) + reward_model_scores = torch.FloatTensor(reward_model_scores) + reward_model_scores = reward_model_scores.view(-1, 1) + + self.per_episode_metrics["eval_rewards/math_reward_model_scores"].update(reward_model_scores) + + reward_checkpoint = self.model_args['load'] + reward_checkpoint_load_iteration = self.model_args['load_iteration'] + + output = [] + rewards_output = [] + for prompt_dict, str_output, reward in zip(prompt_dicts, str_outputs, reward_model_scores): + rw = reward.cpu().item() + rewards_output.append(rw) + + score_dict = {reward_checkpoint: {reward_checkpoint_load_iteration: [rw]}} + j = {"query": prompt_dict, "responses": [str_output], "eval_score_dict": score_dict} + output.append(j) + self.log_each_step() + + return {"eval_jsonl": output, "rewards": rewards_output, "type": ["math_rule"] * len(rewards_output)} + + def log_each_step(self): + stats_episode = self.stats + stats_episode.update(get_running_stats(self.per_episode_metrics)) + + stats_episode["exp_scores/running_math_mean"] = self.running.mean + stats_episode["exp_scores/running_math_std"] = self.running.std + + print(f"score only/running_math_mean {self.running.mean}", flush=True) + tensorboard_scalar_dict(self.tensorboard_writer, prefix=f"rewards_each/replica_id{self.replica_id}", + global_step=self._iteration, + scalar_dict=stats_episode) + reset_running_stats(self.per_episode_metrics) diff --git a/examples/megatron/models/reward_model.py b/examples/megatron/models/reward_model.py index 45dfe388..a88fd3a2 100644 --- a/examples/megatron/models/reward_model.py +++ b/examples/megatron/models/reward_model.py @@ -1,4 +1,4 @@ -# Copyright 2023 Alibaba Group Holding Limited. All Rights Reserved. +# Copyright 2024 Alibaba Group Holding Limited. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. @@ -15,13 +15,14 @@ """reward model""" import torch -from dataset.reward_dataset import preprocess -from megatron import get_args -from megatron import print_rank_0 +from megatron.training import get_args +from megatron.training import print_rank_0 from megatron.core import tensor_parallel -from megatron.model import GPTModel -from megatron.model.module import MegatronModule -from megatron.model.utils import get_linear_layer +from megatron.legacy.model import GPTModel +from megatron.legacy.model.module import MegatronModule +from megatron.legacy.model.utils import get_linear_layer + +from examples.megatron.data.reward_dataset import preprocess from .utils import has_config_in_args @@ -93,7 +94,7 @@ def __init__(self, args = get_args() if has_config_in_args(GPTModel): # new API - from megatron.arguments import core_transformer_config_from_args # pylint: disable=import-outside-toplevel + from megatron.training.arguments import core_transformer_config_from_args # pylint: disable=import-outside-toplevel config = core_transformer_config_from_args(args) super().__init__(config, num_tokentypes, parallel_output, pre_process, post_process) else: @@ -111,6 +112,7 @@ def forward(self, input_ids=None, position_ids=None, attention_mask=None, labels=None, tokentype_ids=None, inference_params=None, # pylint: disable=unused-argument pooling_sequence_index=None, list_strs=None, # pylint: disable=unused-argument + inference_config=None ): lm_output = self.language_model( input_ids, @@ -121,7 +123,14 @@ def forward(self, input_ids=None, position_ids=None, attention_mask=None, ret_attn_mask, inference_params=inference_params) if self.post_process: - assert labels is None, "assume labels is None in reawrd model" + if inference_config is not None and "batch_encode" in inference_config: + print('GPTrewrad model batch encoding, give the transformers encodings') + if get_args().sequence_parallel: + lm_output = tensor_parallel.gather_from_sequence_parallel_region( + lm_output, # [s, b, h] + tensor_parallel_output_grad=False) + return lm_output + assert labels is None, "assume labels is None in reward model" return self.pooler_head(lm_output, pooling_sequence_index) # [b x score_dim] return lm_output @@ -140,7 +149,7 @@ def load_state_dict(self, state_dict, strict=True): """Customized load.""" super().load_state_dict(state_dict, strict) if self._pooler_head_key in state_dict: - # for rlhf training + # for alignment training print_rank_0("load reward model pooler_head success") self.pooler_head.load_state_dict(state_dict[self._pooler_head_key], strict=strict) elif self.post_process: diff --git a/examples/megatron/models/rm_sys/__init__.py b/examples/megatron/models/rm_sys/__init__.py new file mode 100644 index 00000000..e69de29b diff --git a/examples/megatron/models/rm_sys/math_rule_rm.py b/examples/megatron/models/rm_sys/math_rule_rm.py new file mode 100644 index 00000000..96427997 --- /dev/null +++ b/examples/megatron/models/rm_sys/math_rule_rm.py @@ -0,0 +1,65 @@ +# Copyright 2024 Alibaba Group Holding Limited. All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# ============================================================================== +"""MathRuleRM""" + +import timeout_decorator +from tqdm import tqdm + +from .math_utils.grader import math_equal_process +from .math_utils.parser import extract_answer_custom, strip_string + + +@timeout_decorator.timeout(3) +def math_equal_timeout(param): + return math_equal_process(param) + + +class MathRuleRM: + """math rule reward model""" + + def __init__( + self, + timeout=1, + ): + self.timeout = timeout + + def __call__(self, data): + params = [] + for idx, (answer, str_output) in enumerate(data): + pred = extract_answer_custom( + str_output, + use_last_number=True, + use_choice=False + ) + pred = strip_string(pred) + params.append([idx, pred, answer]) + + scores = [] + pbar = tqdm(total=len(params)) + for param in params: + try: + result = math_equal_timeout(param) + except timeout_decorator.timeout_decorator.TimeoutError: + result = False + scores.append(result) + pbar.update(1) + + rewards = [0 for _ in range(len(data))] + extract_success = [1 for _ in range(len(data))] + for (idx, pred, answer), score in zip(params, scores): + rewards[idx] = float(score) + if not pred: + extract_success[idx] = 0 + return rewards, extract_success diff --git a/examples/megatron/models/rm_sys/math_utils/__init__.py b/examples/megatron/models/rm_sys/math_utils/__init__.py new file mode 100644 index 00000000..e69de29b diff --git a/examples/megatron/models/rm_sys/math_utils/grader.py b/examples/megatron/models/rm_sys/math_utils/grader.py new file mode 100644 index 00000000..960ff465 --- /dev/null +++ b/examples/megatron/models/rm_sys/math_utils/grader.py @@ -0,0 +1,339 @@ +# Copyright 2024 Alibaba Group Holding Limited. All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# ============================================================================== +""" +This logic is modified from: +- https://github.com/hendrycks/math/blob/main/modeling/math_equivalence.py +- https://github.com/microsoft/ProphetNet/tree/master/CRITIC +- https://github.com/openai/prm800k +- https://github.com/microsoft/ToRA/blob/main/src/eval/grader.py +- https://github.com/deepseek-ai/DeepSeek-Math/blob/main/evaluation/eval/eval_utils.py +""" + +import multiprocessing +import re +from math import isclose +from typing import Union +from latex2sympy2 import latex2sympy +from sympy import simplify, N +from sympy.parsing.latex import parse_latex +from sympy.parsing.sympy_parser import parse_expr +import regex + +from .parser import choice_answer_clean + + +# pylint: disable=bare-except +def parse_digits(num): + num = regex.sub(",", "", str(num)) + try: + return float(num) + except: + if num.endswith("%"): + num = num[:-1] + if num.endswith("\\"): + num = num[:-1] + try: + return float(num) / 100 + except: + pass + return None + + +def is_digit(num): + # paired with parse_digits + return parse_digits(num) is not None + + +def str_to_pmatrix(input_str): + input_str = input_str.strip() + matrix_str = re.findall(r"\{.*,.*\}", input_str) + pmatrix_list = [] + + for m in matrix_str: + m = m.strip("{}") + pmatrix = r"\begin{pmatrix}" + m.replace(",", "\\") + r"\end{pmatrix}" + pmatrix_list.append(pmatrix) + + return ", ".join(pmatrix_list) + + +def math_equal( + prediction: Union[bool, float, str], + reference: Union[float, str], + include_percentage: bool = True, + is_close: bool = True, + timeout: bool = False, +) -> bool: + """ + Exact match of math if and only if: + 1. numerical equal: both can convert to float and are equal + 2. symbolic equal: both can convert to sympy expression and are equal + """ + if prediction is None or reference is None: + return False + if str(prediction.strip().lower()) == str(reference.strip().lower()): + return True + if ( + reference in ["A", "B", "C", "D", "E"] + and choice_answer_clean(prediction) == reference + ): + return True + + try: # 1. numerical equal # pylint: disable=too-many-nested-blocks + if is_digit(prediction) and is_digit(reference): + prediction = parse_digits(prediction) + reference = parse_digits(reference) + # number questions + if include_percentage: + gt_result = [reference / 100, reference, reference * 100] + else: + gt_result = [reference] + for item in gt_result: + try: + if is_close: + if numeric_equal(prediction, item): + return True + else: + if item == prediction: + return True + except Exception: + continue + return False + except: + pass + + if not prediction and prediction not in [0, False]: + return False + + # 2. symbolic equal + reference = str(reference).strip() + prediction = str(prediction).strip() + + ## pmatrix (amps) + if "pmatrix" in prediction and "pmatrix" not in reference: + reference = str_to_pmatrix(reference) + + ## deal with [], (), {} + pred_str, ref_str = prediction, reference + if ( + prediction.startswith("[") + and prediction.endswith("]") + and not reference.startswith("(") + ) or ( + prediction.startswith("(") + and prediction.endswith(")") + and not reference.startswith("[") + ): + pred_str = pred_str.strip("[]()") + ref_str = ref_str.strip("[]()") + for s in ["{", "}", "(", ")"]: + ref_str = ref_str.replace(s, "") + pred_str = pred_str.replace(s, "") + if pred_str.lower() == ref_str.lower(): + return True + + ## [a, b] vs. [c, d], return a==c and b==d + if ( + regex.match(r"(\(|\[).+(\)|\])", prediction) is not None + and regex.match(r"(\(|\[).+(\)|\])", reference) is not None + ): + pred_parts = prediction[1:-1].split(",") + ref_parts = reference[1:-1].split(",") + if len(pred_parts) == len(ref_parts): + if all( + math_equal( + pred_parts[i], ref_parts[i], include_percentage, is_close + ) + for i in range(len(pred_parts)) + ): + return True + if ( + ( + prediction.startswith("\\begin{pmatrix}") + or prediction.startswith("\\begin{bmatrix}") + ) + and ( + prediction.endswith("\\end{pmatrix}") + or prediction.endswith("\\end{bmatrix}") + ) + and ( + reference.startswith("\\begin{pmatrix}") + or reference.startswith("\\begin{bmatrix}") + ) + and ( + reference.endswith("\\end{pmatrix}") or reference.endswith("\\end{bmatrix}") + ) + ): + pred_lines = [ + line.strip() + for line in prediction[ + len("\\begin{pmatrix}"): -len("\\end{pmatrix}") + ].split("\\\\") + if line.strip() + ] + ref_lines = [ + line.strip() + for line in reference[ + len("\\begin{pmatrix}"): -len("\\end{pmatrix}") + ].split("\\\\") + if line.strip() + ] + matched = True + if len(pred_lines) == len(ref_lines): + for pred_line, ref_line in zip(pred_lines, ref_lines): + pred_parts = pred_line.split("&") + ref_parts = ref_line.split("&") + if len(pred_parts) == len(ref_parts): + if not all( + math_equal( + pred_parts[i], + ref_parts[i], + include_percentage, + is_close, + ) + for i in range(len(pred_parts)) + ): + matched = False + break + else: + matched = False + if not matched: + break + else: + matched = False + if matched: + return True + + if prediction.count("=") == 1 and reference.count("=") == 1: + pred = prediction.split("=") + pred = f"{pred[0].strip()} - ({pred[1].strip()})" + ref = reference.split("=") + ref = f"{ref[0].strip()} - ({ref[1].strip()})" + if symbolic_equal(pred, ref) or symbolic_equal(f"-({pred})", ref): + return True + elif ( + prediction.count("=") == 1 + and len(prediction.split("=")[0].strip()) <= 2 + and "=" not in reference + ): + if math_equal( + prediction.split("=")[1], reference, include_percentage, is_close + ): + return True + elif ( + reference.count("=") == 1 + and len(reference.split("=")[0].strip()) <= 2 + and "=" not in prediction + ): + if math_equal( + prediction, reference.split("=")[1], include_percentage, is_close + ): + return True + + # symbolic equal with sympy + if timeout: + if call_with_timeout(symbolic_equal_process, prediction, reference): + return True + else: + if symbolic_equal(prediction, reference): + return True + + return False + + +def math_equal_process(param): + return math_equal(param[-2], param[-1]) + + +def numeric_equal(prediction: float, reference: float): + # Note that relative tolerance has significant impact + # on the result of the synthesized GSM-Hard dataset + return isclose(reference, prediction, rel_tol=1e-4) + + +def symbolic_equal(a, b): + def _parse(s): + for f in [parse_latex, parse_expr, latex2sympy]: + try: + return f(s.replace("\\\\", "\\")) + except: + try: + return f(s) + except: + pass + return s + + a = _parse(a) + b = _parse(b) + + # direct equal + try: + if str(a) == str(b) or a == b: + return True + except: + pass + + # simplify equal + try: + if a.equals(b) or simplify(a - b) == 0: + return True + except: + pass + # equation equal + try: + if (abs(a.lhs - a.rhs)).equals(abs(b.lhs - b.rhs)): + return True + except: + pass + + try: + if numeric_equal(float(N(a)), float(N(b))): + return True + except: + pass + + # matrix + try: + # if a and b are matrix + if a.shape == b.shape: + _a = a.applyfunc(lambda x: round(x, 3)) + _b = b.applyfunc(lambda x: round(x, 3)) + if _a.equals(_b): + return True + except: + pass + + return False + + +def symbolic_equal_process(a, b, output_queue): + result = symbolic_equal(a, b) + output_queue.put(result) + + +def call_with_timeout(func, *args, timeout=1, **kwargs): + output_queue = multiprocessing.Queue() + process_args = args + (output_queue,) + process = multiprocessing.Process(target=func, args=process_args, kwargs=kwargs) + process.start() + process.join(timeout) + + if process.is_alive(): + process.terminate() + process.join() + return False + + return output_queue.get() +# pylint: enable=bare-except diff --git a/examples/megatron/models/rm_sys/math_utils/parser.py b/examples/megatron/models/rm_sys/math_utils/parser.py new file mode 100644 index 00000000..e176a1cd --- /dev/null +++ b/examples/megatron/models/rm_sys/math_utils/parser.py @@ -0,0 +1,752 @@ +# Copyright 2024 Alibaba Group Holding Limited. All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# ============================================================================== +"""parser.""" + +import re +from typing import Any, Dict +import regex +from word2number import w2n + + +# pylint: disable=anomalous-backslash-in-string,bare-except +def _fix_fracs(string): + substrs = string.split("\\frac") + new_str = substrs[0] + if len(substrs) > 1: + substrs = substrs[1:] + for substr in substrs: + new_str += "\\frac" + if len(substr) > 0 and substr[0] == "{": + new_str += substr + else: + try: + assert len(substr) >= 2 + except: + return string + a = substr[0] + b = substr[1] + if b != "{": + if len(substr) > 2: + post_substr = substr[2:] + new_str += "{" + a + "}{" + b + "}" + post_substr + else: + new_str += "{" + a + "}{" + b + "}" + else: + if len(substr) > 2: + post_substr = substr[2:] + new_str += "{" + a + "}" + b + post_substr + else: + new_str += "{" + a + "}" + b + string = new_str + return string + + +def _fix_a_slash_b(string): + if len(string.split("/")) != 2: + return string + a = string.split("/")[0] + b = string.split("/")[1] + try: + if "sqrt" not in a: + a = int(a) + if "sqrt" not in b: + b = int(b) + assert string == "{}/{}".format(a, b) + new_string = "\\frac{" + str(a) + "}{" + str(b) + "}" + return new_string + except: + return string + + +def _fix_sqrt(string): + _string = re.sub(r"\\sqrt(\w+)", r"\\sqrt{\1}", string) + return _string + + +def convert_word_number(text: str) -> str: + try: + text = str(w2n.word_to_num(text)) + except: + pass + return text + + +# units mainly from MathQA +unit_texts = [ + "east", + "degree", + "mph", + "kmph", + "ft", + "m sqaure", + " m east", + "sq m", + "deg", + "mile", + "q .", + "monkey", + "prime", + "ratio", + "profit of rs", + "rd", + "o", + "gm", + "p . m", + "lb", + "tile", + "per", + "dm", + "lt", + "gain", + "ab", + "way", + "west", + "a .", + "b .", + "c .", + "d .", + "e .", + "f .", + "g .", + "h .", + "t", + "a", + "h", + "no change", + "men", + "soldier", + "pie", + "bc", + "excess", + "st", + "inches", + "noon", + "percent", + "by", + "gal", + "kmh", + "c", + "acre", + "rise", + "a . m", + "th", + "π r 2", + "sq", + "mark", + "l", + "toy", + "coin", + "sq . m", + "gallon", + "° f", + "profit", + "minw", + "yr", + "women", + "feet", + "am", + "pm", + "hr", + "cu cm", + "square", + "v â € ™", + "are", + "rupee", + "rounds", + "cubic", + "cc", + "mtr", + "s", + "ohm", + "number", + "kmph", + "day", + "hour", + "minute", + "min", + "second", + "man", + "woman", + "sec", + "cube", + "mt", + "sq inch", + "mp", + "∏ cm ³", + "hectare", + "more", + "sec", + "unit", + "cu . m", + "cm 2", + "rs .", + "rs", + "kg", + "g", + "month", + "km", + "m", + "cm", + "mm", + "apple", + "liter", + "loss", + "yard", + "pure", + "year", + "increase", + "decrease", + "d", + "less", + "Surface", + "litre", + "pi sq m", + "s .", + "metre", + "meter", + "inch", +] + +unit_texts.extend([t + "s" for t in unit_texts]) + + +def strip_string(string, skip_unit=False): + string = str(string).strip() + # linebreaks + string = string.replace("\n", "") + + # right "." + string = string.rstrip(".") + + # remove inverse spaces + # replace \\ with \ + string = string.replace("\\!", "") + # string = string.replace("\\ ", "") + # string = string.replace("\\\\", "\\") + + # matrix + string = re.sub(r"\\begin\{array\}\{.*?\}", r"\\begin{pmatrix}", string) + string = re.sub(r"\\end\{array\}", r"\\end{pmatrix}", string) + string = string.replace("bmatrix", "pmatrix") + + # replace tfrac and dfrac with frac + string = string.replace("tfrac", "frac") + string = string.replace("dfrac", "frac") + string = ( + string.replace("\\neq", "\\ne") + .replace("\\leq", "\\le") + .replace("\\geq", "\\ge") + ) + + # remove \left and \right + string = string.replace("\\left", "") + string = string.replace("\\right", "") + string = string.replace("\\{", "{") + string = string.replace("\\}", "}") + + # Remove unit: miles, dollars if after is not none + _string = re.sub(r"\\text{.*?}$", "", string).strip() + if _string not in ('', string): + string = _string + + if not skip_unit: + # Remove unit: texts + for _ in range(2): + for unit_text in unit_texts: + # use regex, the prefix should be either the start of the string or a non-alphanumeric character + # the suffix should be either the end of the string or a non-alphanumeric character + _string = re.sub(r"(^|\W)" + unit_text + r"($|\W)", r"\1\2", string) + if _string != "": + string = _string + + # Remove circ (degrees) + string = string.replace("^{\\circ}", "") + string = string.replace("^\\circ", "") + + # remove dollar signs + string = string.replace("\\$", "") + string = string.replace("$", "") + string = string.replace("\\(", "").replace("\\)", "") + + # convert word number to digit + string = convert_word_number(string) + + # replace "\\text{...}" to "..." + string = re.sub(r"\\text\{(.*?)\}", r"\1", string) + for key in ["x=", "y=", "z=", "x\\in", "y\\in", "z\\in", "x\\to", "y\\to", "z\\to"]: + string = string.replace(key, "") + string = string.replace("\\emptyset", r"{}") + string = string.replace("(-\\infty,\\infty)", "\\mathbb{R}") + + # remove percentage + string = string.replace("\\%", "") + string = string.replace("\%", "") # pylint: disable=anomalous-backslash-in-string + string = string.replace("%", "") + + # " 0." equivalent to " ." and "{0." equivalent to "{." Alternatively, add "0" if "." is the start of the string + string = string.replace(" .", " 0.") + string = string.replace("{.", "{0.") + + # cdot + # string = string.replace("\\cdot", "") + if ( + string.startswith("{") + and string.endswith("}") + and string.isalnum() + or string.startswith("(") + and string.endswith(")") + and string.isalnum() + or string.startswith("[") + and string.endswith("]") + and string.isalnum() + ): + string = string[1:-1] + + # inf + string = string.replace("infinity", "\\infty") + if "\\infty" not in string: + string = string.replace("inf", "\\infty") + string = string.replace("+\\inity", "\\infty") + + # and + string = string.replace("and", "") + string = string.replace("\\mathbf", "") + + # use regex to remove \mbox{...} + string = re.sub(r"\\mbox{.*?}", "", string) + + # quote + string.replace("'", "") + string.replace('"', "") + + # i, j + if "j" in string and "i" not in string: + string = string.replace("j", "i") + + # replace a.000b where b is not number or b is end, with ab, use regex + string = re.sub(r"(\d+)\.0*([^\d])", r"\1\2", string) + string = re.sub(r"(\d+)\.0*$", r"\1", string) + + # if empty, return empty string + if len(string) == 0: + return string + if string[0] == ".": + string = "0" + string + + # to consider: get rid of e.g. "k = " or "q = " at beginning + if len(string.split("=")) == 2: + if len(string.split("=")[0]) <= 2: + string = string.split("=")[1] + + string = _fix_sqrt(string) + string = string.replace(" ", "") + + # \frac1b or \frac12 --> \frac{1}{b} and \frac{1}{2}, etc. Even works with \frac1{72} (but not \frac{72}1). Also does a/b --> \\frac{a}{b} + string = _fix_fracs(string) + + # NOTE: X/Y changed to \frac{X}{Y} in dataset, but in simple cases fix in case the model output is X/Y + string = _fix_a_slash_b(string) + + return string + + +def extract_multi_choice_answer(pred_str): + # TODO: SFT models + if "Problem:" in pred_str: + pred_str = pred_str.split("Problem:", 1)[0] + pred_str = pred_str.replace("choice is", "answer is") + patt = regex.search(r"answer is \(?(?P[abcde])\)?", pred_str.lower()) + if patt is not None: + return patt.group("ans").upper() + return "placeholder" + + +direct_answer_trigger_for_fewshot = ("choice is", "answer is") + + +def choice_answer_clean(pred: str): + pred = pred.strip("\n") + + # Determine if this is ICL, if so, use \n\n to split the first chunk. + ICL = False + for trigger in direct_answer_trigger_for_fewshot: + if pred.count(trigger) > 1: + ICL = True + if ICL: + pred = pred.split("\n\n")[0] + + # Split the trigger to find the answer. + preds = re.split("|".join(direct_answer_trigger_for_fewshot), pred) + if len(preds) > 1: + answer_flag = True + pred = preds[-1] + else: + answer_flag = False + + pred = pred.strip("\n").rstrip(".").rstrip("/").strip(" ").lstrip(":") + + # Clean the answer based on the dataset + tmp = re.findall(r"\b(A|B|C|D|E)\b", pred.upper()) + if tmp: + pred = tmp + else: + pred = [pred.strip().strip(".")] + + if len(pred) == 0: + pred = "" + else: + if answer_flag: + # choose the first element in list ... + pred = pred[0] + else: + # choose the last e + pred = pred[-1] + + # Remove the period at the end, again! + pred = pred.rstrip(".").rstrip("/") + + return pred + + +def extract_answer_custom(pred_str, use_last_number=True, use_choice=False): + pred_str = pred_str.replace("\u043a\u0438", "") + + if "final answer is $" in pred_str and "$. I hope" in pred_str: + # minerva_math + tmp = pred_str.split("final answer is $", 1)[1] + pred = tmp.split("$. I hope", 1)[0].strip() + elif "boxed" in pred_str: + ans = pred_str.split("boxed")[-1] + if len(ans) == 0: + return "" + elif ans[0] == "{": + stack = 1 + a = "" + for c in ans[1:]: + if c == "{": + stack += 1 + a += c + elif c == "}": + stack -= 1 + if stack == 0: + break + a += c + else: + a += c + else: + a = ans.split("$")[0].strip() + pred = a + elif "he answer is" in pred_str: + pred = pred_str.split("he answer is")[-1].strip() + elif "final answer is" in pred_str: + pred = pred_str.split("final answer is")[-1].strip() + # elif extract_program_output(pred_str) != "": + # fall back to program + # pred = extract_program_output(pred_str) + else: # use the last number + if use_last_number: + pattern = "-?\d*\.?\d+" + pred = re.findall(pattern, pred_str.replace(",", "")) + if len(pred) >= 1: + pred = pred[-1] + else: + pred = "" + else: + pred = "" + + # choice answer + if use_choice: + tmp = re.findall(r"\b(A|B|C|D|E)\b", pred.upper()) + if tmp: + pred = tmp[-1] + else: + pred = pred.strip().strip(".") + + # multiple line + # pred = pred.split("\n")[0] + pred = re.sub(r"\n\s*", "", pred) + if pred != "" and pred[0] == ":": + pred = pred[1:] + if pred != "" and pred[-1] == ".": + pred = pred[:-1] + if pred != "" and pred[-1] == "/": + pred = pred[:-1] + pred = strip_string(pred, skip_unit=False) + return pred + + +def extract_answer(pred_str, data_name, use_last_number=True): + pred_str = pred_str.replace("\u043a\u0438", "") + if data_name in ["mmlu_stem", "sat_math", "aqua"]: + # TODO check multiple choice + return choice_answer_clean(pred_str) + + if "final answer is $" in pred_str and "$. I hope" in pred_str: + # minerva_math + tmp = pred_str.split("final answer is $", 1)[1] + pred = tmp.split("$. I hope", 1)[0].strip() + elif "boxed" in pred_str: + ans = pred_str.split("boxed")[-1] + if len(ans) == 0: + return "" + elif ans[0] == "{": + stack = 1 + a = "" + for c in ans[1:]: + if c == "{": + stack += 1 + a += c + elif c == "}": + stack -= 1 + if stack == 0: + break + a += c + else: + a += c + else: + a = ans.split("$")[0].strip() + pred = a + elif "he answer is" in pred_str: + pred = pred_str.split("he answer is")[-1].strip() + elif "final answer is" in pred_str: + pred = pred_str.split("final answer is")[-1].strip() + # elif extract_program_output(pred_str) != "": + # fall back to program + # pred = extract_program_output(pred_str) + else: # use the last number + if use_last_number: + pattern = "-?\d*\.?\d+" + pred = re.findall(pattern, pred_str.replace(",", "")) + if len(pred) >= 1: + pred = pred[-1] + else: + pred = "" + else: + pred = "" + + # choice answer + if ( + data_name in ["sat_math", "aqua", "arc", "gpqa", "mathqa"] + or "mmlu" in data_name + ): + tmp = re.findall(r"\b(A|B|C|D|E)\b", pred.upper()) + if tmp: + pred = tmp[-1] + else: + pred = pred.strip().strip(".") + + # multiple line + # pred = pred.split("\n")[0] + pred = re.sub(r"\n\s*", "", pred) + if pred != "" and pred[0] == ":": + pred = pred[1:] + if pred != "" and pred[-1] == ".": + pred = pred[:-1] + if pred != "" and pred[-1] == "/": + pred = pred[:-1] + pred = strip_string(pred, skip_unit=data_name in ["carp_en", "minerva_math"]) + return pred + + +STRIP_EXCEPTIONS = ["carp_en", "minerva_math"] + + +def parse_ground_truth(example: Dict[str, Any], data_name): + if "gt_cot" in example and "gt" in example: + # if data_name in ["math", "math-oai", "minerva_math", "amps", "hungarian_exam"]: + if data_name in ["math", "math-oai", "amps", "hungarian_exam"]: + gt_ans = extract_answer(example["gt_cot"], data_name) + elif data_name in STRIP_EXCEPTIONS: + gt_ans = example["gt"] + else: + gt_ans = strip_string(example["gt"]) + return example["gt_cot"], gt_ans + + # parse ground truth + if data_name in ["math", "math-oai", "minerva_math", "amps", "hungarian_exam"]: + gt_cot = example["solution"] + gt_ans = extract_answer(gt_cot, data_name) + # elif data_name in ["mathqa"]: + # gt_cot = example["solution"] + # gt_ans = example["answer"] + elif data_name in ["mathqa"]: + gt_cot = example["rationale"] + gt_ans = example["correct"].upper() + assert gt_ans in ["A", "B", "C", "D", "E"] + elif data_name == "gsm8k": + gt_cot, gt_ans = example["answer"].split("####") + elif data_name == "gsm-hard": + gt_cot, gt_ans = example["code"], example["target"] + elif data_name == "svamp": + gt_cot, gt_ans = example["Equation"], example["Answer"] + elif data_name == "asdiv": + gt_cot = example["formula"] + gt_ans = re.sub(r"\(.*?\)", "", example["answer"]) + elif data_name == "mawps": + gt_cot, gt_ans = None, example["target"] + elif data_name == "tabmwp": + gt_cot = example["solution"] + gt_ans = example["answer"] + if example["ans_type"] in ["integer_number", "decimal_number"]: + if "/" in gt_ans: + gt_ans = int(gt_ans.split("/")[0]) / int(gt_ans.split("/")[1]) + elif "," in gt_ans: + gt_ans = float(gt_ans.replace(",", "")) + elif "%" in gt_ans: + gt_ans = float(gt_ans.split("%")[0]) / 100 + else: + gt_ans = float(gt_ans) + elif data_name == "bbh": + gt_cot, gt_ans = None, example["answer"].replace("(", "").replace(")", "") + elif data_name in ["theorem-qa", "math_collection", "arc"] or "gpqa" in data_name: + gt_cot, gt_ans = None, example["answer"] + elif data_name == "carp_en": + gt_cot, gt_ans = example["steps"], example["answer"] + elif data_name == "tal_sc_en": + gt_cot, gt_ans = example["answer_analysis"][0].strip() + elif data_name == "mmlu_stem": + abcd = "ABCD" + gt_cot, gt_ans = None, abcd[example["answer"]] + elif data_name == "sat_math": + gt_cot, gt_ans = None, example["Answer"] + elif data_name == "aqua": + gt_cot, gt_ans = None, example["correct"] + elif data_name in ["gaokao2023en", "college_math"]: + gt_cot, gt_ans = None, example["answer"].strip("$") + elif data_name == "olympiadbench": + gt_cot, gt_ans = None, example["final_answer"][0].strip("$") + else: + raise NotImplementedError(f"`{data_name}`") + # post process + gt_cot = str(gt_cot).strip() + if data_name not in STRIP_EXCEPTIONS: + gt_ans = strip_string(gt_ans, skip_unit=data_name == "carp_en") + else: + gt_ans = ( + gt_ans.replace("\\neq", "\\ne") + .replace("\\leq", "\\le") + .replace("\\geq", "\\ge") + ) + return gt_cot, gt_ans + + +def parse_question(example, data_name): + question = "" + if data_name == "asdiv": + question = f"{example['body'].strip()} {example['question'].strip()}" + elif data_name == "svamp": + body = example["Body"].strip() + if not body.endswith("."): + body = body + "." + question = f'{body} {example["Question"].strip()}' + elif data_name == "tabmwp": + title_str = ( + f'regarding "{example["table_title"]}" ' if example["table_title"] else "" + ) + question = f"Read the following table {title_str}and answer a question:\n" + question += f'{example["table"]}\n{example["question"]}' + if example["choices"]: + question += ( + f' Please select from the following options: {example["choices"]}' + ) + elif data_name == "theorem-qa": + question = ( + f"{example['question'].strip()}\nTheorem: {example['theorem_def'].strip()}" + ) + elif data_name == "carp_en": + question = example["content"] + elif data_name == "tal_sc_en": + pass + elif data_name == "mmlu_stem": + options = example["choices"] + assert len(options) == 4 + for i, (label, option) in enumerate(zip("ABCD", options)): + options[i] = f"({label}) {str(option).strip()}" + options = " ".join(options) + # question = f"{example['question'].strip()}\nWhat of the following is the right choice? Explain your answer.\n{options}" + question = f"{example['question'].strip()}\nAnswer Choices: {options}" + elif data_name == "sat_math": + options = example["options"].strip() + assert "A" == options[0] + options = "(" + options + for ch in "BCD": + if f" {ch}) " in options: + options = regex.sub(f" {ch}\) ", f" ({ch}) ", options) + # question = f"{example['question'].strip()}\nWhat of the following is the right choice? Explain your answer.\n{options.strip()}" + question = f"{example['question'].strip()}\nAnswer Choices: {options}" + elif data_name == "mathqa": + example["problem"] = example["problem"][0].upper() + example["problem"][1:] + options = example["options"].strip() + if options[0] == "[": + options = eval(options) # pylint: disable=eval-used + options = " ".join(options) + assert "a" == options[0], options + for ch in "abcde": + if f"{ch} ) " in options: + options = regex.sub(f"{ch} \) {ch} \) ", f"{ch} ) ", options) + options = regex.sub(f"{ch} \) ", f"({ch.upper()}) ", options) + # options = options.replace(" , ", ", ") + # # question = f"{example['problem'].strip()}\nWhat of the following is the right choice? Explain your answer.\n{options.strip()}" + # question = f"{example['problem'].strip()}\nAnswer Choices: {options}" + # TODO temporary + options = options.replace(" , ", ", ").replace(", (", "\n(") + question = f"{example['problem'].strip()}\n{options}" + elif "gpqa" in data_name: + options = example["options"] + for i, (label, option) in enumerate(zip("ABCD", options)): + options[i] = f"({label}) {str(option).strip()}" + if example["question"][-1] == "\n": + question = example["question"] + "Answer Choices: " + " ".join(options) + else: + question = example["question"] + "\nAnswer Choices: " + " ".join(options) + elif "aqua" in data_name: + options = example["options"] + choice = "(" + "(".join(options) + choice = choice.replace("(", " (").replace(")", ") ").strip() + choice = "Answer Choices: " + choice + question = example["question"].strip() + choice + else: + for key in ["question", "problem", "Question", "input"]: + if key in example: + question = example[key] + break + # assert question != "" + # Yes or No question + _, gt_ans = parse_ground_truth(example, data_name) + gt_lower = gt_ans.lower() + if gt_lower in ["true", "false"]: + question += " (True or False)" + if gt_lower in ["yes", "no"]: + question += " (Yes or No)" + return question.strip() + + +def run_execute(executor, result, prompt_type, data_name, execute=False): + if not result or result == "error": + return None, None + report = None + + if "program_only" in prompt_type: + prediction = extract_program_output(result) + elif prompt_type in ["pot", "pal"] and execute: + code = extract_program(result) + prediction, report = executor.apply(code) + else: + prediction = extract_answer(result, data_name) + + # prediction = strip_string(prediction, skip_unit=data_name == "carp_en") + prediction = strip_string(prediction, skip_unit=data_name in STRIP_EXCEPTIONS) + return prediction, report +# pylint: enable=anomalous-backslash-in-string,bare-except diff --git a/examples/megatron/models/train_helper.py b/examples/megatron/models/train_helper.py new file mode 100644 index 00000000..7f89a1b5 --- /dev/null +++ b/examples/megatron/models/train_helper.py @@ -0,0 +1,94 @@ +# Copyright 2024 Alibaba Group Holding Limited. All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# ============================================================================== +"""training helper""" + +import numpy +import torch + +from torch.utils.tensorboard import SummaryWriter + +import chatlearn +from .utils import write_jsonl, read_jsonl, tensorboard_scalar_dict, listdict_to_dictlist + +def eval_post_process(results, eval_info): + """ + Post process function for model evaluation results. + + Args + ---- + post_process_func + + This function accept two arguments. + 1. results: a list of evaluation results + 2. eval_info: a dict meta that contains "train_iteration" and "episode_iteration" + """ + args = chatlearn.get_args().runtime_args + results = results["reward"] + results = listdict_to_dictlist(results) + if args.get('eval_data_num_limit') > 0: + assert len(results['rewards']) == args.get('eval_data_num_limit') + tensorboard_dir = f"{args.output_dir}/tensorboard" + writer = SummaryWriter( + log_dir=tensorboard_dir, + max_queue=99999) + + eval_reward_stats = {"eval_reward_mean": numpy.mean(results['rewards'])} + train_iteration = eval_info["train_iteration"] + + if torch.distributed.is_initialized(): + if torch.distributed.get_rank() == ( + torch.distributed.get_world_size() - 1): + tensorboard_scalar_dict(writer, prefix="eval_reward_each/", + global_step=train_iteration, + scalar_dict=eval_reward_stats) + + else: + tensorboard_scalar_dict(writer, prefix="eval_reward_each/", + global_step=train_iteration, + scalar_dict=eval_reward_stats) + print(f"eval reward stats: {eval_reward_stats} iter: {train_iteration}") + save_fp = f"{args.output_dir}/eval/{train_iteration}/eval_json_res.json" # pylint: disable=line-too-long + write_jsonl(results["eval_jsonl"], save_fp) + + +def get_prompts(fp, num_limit=-1): + prompts_jsons = read_jsonl(fp) + + if "text" in prompts_jsons[0]: + prompts = [p["text"] for p in prompts_jsons] + patten = '\n\nAssistant: ' + prompts = [prompt[:prompt.find(patten) + len(patten)] for prompt in prompts] + if num_limit != -1: + prompts = prompts[:num_limit] + return prompts + elif 'prompt' in prompts_jsons[0]: + prompts = [] + for p in prompts_jsons: + if "response" in p: + prompts.append((p["prompt"], p["response"], p["rejected"])) + elif 'eval_func' in p: + # math + prompts.append(p) + else: + prompts.append(p["prompt"]) + if num_limit != -1: + prompts = prompts[:num_limit] + return prompts + else: + prompts = [p["query"] for p in prompts_jsons] + if num_limit != -1: + prompts = prompts[:num_limit] + formatted_prompts = [f"\n\nHuman: {p}\n\nAssistant: " for p in prompts] + return formatted_prompts diff --git a/examples/megatron/models/utils.py b/examples/megatron/models/utils.py index b03dfd81..6c9e73d4 100644 --- a/examples/megatron/models/utils.py +++ b/examples/megatron/models/utils.py @@ -1,4 +1,4 @@ -# Copyright 2023 Alibaba Group Holding Limited. All Rights Reserved. +# Copyright 2024 Alibaba Group Holding Limited. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. @@ -14,6 +14,7 @@ # ============================================================================== """utils""" +import copy import json import inspect import math @@ -28,10 +29,10 @@ import torch import torch.distributed as dist import torch.nn.functional as F -from megatron import print_rank_last, is_last_rank, get_num_microbatches, get_args, get_timers +from megatron.training import print_rank_last, is_last_rank, get_num_microbatches, get_args, get_timers from megatron.core import mpu -from megatron.global_vars import get_tensorboard_writer -from megatron.training import print_datetime +from megatron.training.global_vars import get_tensorboard_writer +from megatron.training.training import print_datetime from torchtyping import TensorType @@ -325,7 +326,7 @@ def listdict_to_dictlist(ld, list_extend=True): :param ld: :return: ''' - res = ld[0] + res = copy.deepcopy(ld[0]) for res_key, v in res.items(): if list_extend and isinstance(res[res_key], list): continue @@ -423,3 +424,23 @@ def has_config_in_args(func): def get_eos_id(tokenizer): return tokenizer.eos_id if hasattr(tokenizer, 'eos_id') else tokenizer.eod_id + + +def pad_to_length(tensor, length, pad_value, dim=-1): + if tensor.size(dim) >= length: + return tensor + else: + pad_size = list(tensor.shape) + pad_size[dim] = length - tensor.size(dim) + return torch.cat( + [tensor, pad_value * torch.ones(*pad_size, dtype=tensor.dtype, device=tensor.device)], dim=dim + ) + + +def get_padding_length(enable_sequence_parallel, tensor_parallel_size, max_length): + if not enable_sequence_parallel: + padding_len = 0 + else: + mod_value = max_length % tensor_parallel_size + padding_len = (tensor_parallel_size - mod_value) if mod_value else 0 + return max_length + padding_len diff --git a/examples/megatron/models/value_model.py b/examples/megatron/models/value_model.py index 13d4698d..f31c61b3 100644 --- a/examples/megatron/models/value_model.py +++ b/examples/megatron/models/value_model.py @@ -1,4 +1,4 @@ -# Copyright 2023 Alibaba Group Holding Limited. All Rights Reserved. +# Copyright 2024 Alibaba Group Holding Limited. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. @@ -17,12 +17,12 @@ import torch import torch.distributed as dist from megatron.core import mpu -from megatron.global_vars import get_args -from megatron.global_vars import get_tokenizer +from megatron.training.global_vars import get_args +from megatron.training.global_vars import get_tokenizer -from .constants_ppo import select_actions_from_right_padded -from .reward_model import RewardModel from .utils import get_advantages_and_returns +from .constants import select_actions_from_right_padded +from .reward_model import RewardModel class ValueModel(RewardModel): @@ -44,7 +44,7 @@ def __init__(self, # pylint: disable=arguments-differ def forward(self, all_token_ids, all_position_ids, all_token_attention_mask, training_inputs=None, - inference_params=None, ret_input_ids=None, ret_position_ids=None, ret_attn_mask=None): + inference_params=None, ret_input_ids=None, ret_position_ids=None, ret_attn_mask=None, inference_config=None): lm_output = self.language_model( all_token_ids, all_position_ids, diff --git a/examples/megatron/models/value_trainer.py b/examples/megatron/models/value_trainer.py index 1a178811..fbe73d6f 100644 --- a/examples/megatron/models/value_trainer.py +++ b/examples/megatron/models/value_trainer.py @@ -1,4 +1,4 @@ -# Copyright 2023 Alibaba Group Holding Limited. All Rights Reserved. +# Copyright 2024 Alibaba Group Holding Limited. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. @@ -17,21 +17,20 @@ from functools import partial import torch -from megatron import get_num_microbatches -from megatron import get_timers -from megatron import get_tokenizer -from megatron import print_rank_0 from megatron.core import mpu -from megatron.global_vars import get_tensorboard_writer -from megatron.utils import average_losses_across_data_parallel_group -from megatron.utils import calc_params_l2_norm -from models.value_model import ValueModel +from megatron.training import get_num_microbatches +from megatron.training import get_timers +from megatron.training import get_tokenizer +from megatron.training import print_rank_0 +from megatron.training.global_vars import get_tensorboard_writer +from megatron.training.utils import average_losses_across_data_parallel_group +from megatron.training.utils import calc_params_l2_norm from chatlearn.utils import to_device +from .value_model import ValueModel +from .utils import tensorboard_scalar_dict, training_log, get_eos_id from .base_trainer import BaseTrainer -from .constants_ppo import get_ltor_masks_and_position_ids, select_actions_from_right_padded, pad_to_max_len -from .utils import tensorboard_scalar_dict, training_log -from .utils import get_eos_id +from .constants import get_ltor_masks_and_position_ids_rlhf, select_actions_from_right_padded, pad_to_max_len class ValueTrainer(BaseTrainer): @@ -73,7 +72,7 @@ def get_batch(self, batch_data): # to stop and most likely it shouldn't stop. it's just maxed out. all_token_loss_mask = pad_to_max_len(data_b["loss_mask"], args.seq_length, pad_value=0) - all_token_attention_mask, all_token_position_ids = get_ltor_masks_and_position_ids( + all_token_attention_mask, all_token_position_ids = get_ltor_masks_and_position_ids_rlhf( all_token_ids_right_padded) inputs = { diff --git a/examples/megatron/models/vllm_policy_inference.py b/examples/megatron/models/vllm_policy_inference.py index 3ba5ab01..c579d19c 100644 --- a/examples/megatron/models/vllm_policy_inference.py +++ b/examples/megatron/models/vllm_policy_inference.py @@ -1,4 +1,4 @@ -# Copyright 2023 Alibaba Group Holding Limited. All Rights Reserved. +# Copyright 2024 Alibaba Group Holding Limited. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. @@ -14,103 +14,81 @@ # ============================================================================== """vllm policy inference""" +import copy + import torch import torch.nn.functional as F -from dataset.prompt_dataset import VLLMPromptPipeline -from models.vllm_policy_model import VLLMPolicyModel - -from vllm.transformers_utils.tokenizer import get_tokenizer +from chatlearn import VLLMModule +from examples.megatron.data.prompt_dataset import VLLMPromptPipeline -from chatlearn import RLHFVLLMModule -from chatlearn.utils.vllm_utils import get_model, print_rank_0 from .utils import get_loss_mask -class VLLMPolicyInference(RLHFVLLMModule): +class VLLMPolicyInference(VLLMModule): """Policy vLLM Inference""" - def setup(self): - # Set up model and load checkpoint - self.tokenizer = get_tokenizer( - self.model_args.get("tokenizer"), - tokenizer_mode="auto", - trust_remote_code=False, - tokenizer_revision=None, - revision=None - ) - model = [get_model(self.model_provider, self.model_args, wrap_with_ddp=False)] - - assert len(model) == 1, "Above condition should have caught this" - self.model = model[0] - - return 'ok' - def build_dataset(self, train_prompts, is_eval=False): - ''' - framework source: dataset = self.build_dataset(data) - :param train_prompts: all train prompts used in this training run?? - :return: - a torch.utils.data.Dataset object for prompts_loader of all prompts, and - ''' + if is_eval: + duplicated_train_prompts = train_prompts + else: + if self.model_args["init_shuffle_prompts"] == 2: + # this is to approximate n epochs and by pass the chatlearn epoch which currently hangs + # append epochs and shuffle epoch by epoch and attach them together + # and now num_inference_per_prompt is number of epochs + duplicated_train_prompts = [] + for i in range(self.model_args["num_inference_per_prompt"]): + train_prompts_cp = copy.deepcopy(train_prompts) + random.shuffle(train_prompts_cp) + duplicated_train_prompts.extend(train_prompts_cp) + elif self.model_args["init_shuffle_prompts"] == 0: + # otherwise, it's a huge epoch + duplicated_train_prompts = [] + for p in train_prompts: + duplicated_train_prompts.extend([p for i in range(self.model_args["num_inference_per_prompt"])]) + else: + raise Exception(f"unsupported init_shuffle_prompts {init_shuffle_prompts}, expect 0 or 2.") + max_prompt_length = ( self.model_args.get("seq_length") - self.model_args.get("max_new_tokens") ) + prompt_key = self.model_args.get("prompt_key") # TODO: read from files prompts_dataset = VLLMPromptPipeline( - train_prompts, max_prompt_length, self.tokenizer) + duplicated_train_prompts, max_prompt_length, self.tokenizer.tokenizer, prompt_key) return prompts_dataset - def model_provider(self): - """Build the model.""" - print_rank_0('building vLLM model ...') - model = VLLMPolicyModel(self.model_config, self.model_args) - - return model - - def eval_forward(self, data): - return self._forward_step(data, 0, eval_mode=True) + def eval_forward(self, data, iteration=0): + return self._forward_step(data, iteration, eval_mode=True) - def _forward_step(self, data, iteration, eval_mode: bool): + def _forward_step(self, data, iteration, eval_mode): ''' - RLHF calling - rlhf framework source: policy_output = self.policy.forward_step(query) - :param data: entire global batch?? micro_batch? + ChatLearn calling + chatlearn framework source: policy_output = self.policy.forward_step(query) + :param data: micro_batch :return: - data using current microbatch + data using current micro_batch {"all_tokens": tokens, "str_samples": str_samples, "str_prompts": str_prompts, "str_outputs": str_outputs, "logprobs": all_log_probs, "no_padded_query_ids": no_padded_query_ids} ''' assert iteration >= 0 - assert eval_mode, "Expect eval mode is True for vllm policy model." - return self.model( - data["input_ids"], - data["positions"], - kv_caches=data["kv_caches"], - input_metadata=data["input_metadata"], - cache_events=data["cache_events"] - ) - - def _add_request(self, data): - return self._add_request_internal(data["prompt"], data["input_ids"]) + assert isinstance(eval_mode, bool) + outputs = self.execute_step(data) - def forward_step(self, data, iteration=0): # pylint: disable=unused-argumen - seq_group_metadata_list = data["seq_group_metadata_list"] - blocks_to_swap_in = data["blocks_to_swap_in"] - blocks_to_swap_out = data["blocks_to_swap_out"] - blocks_to_copy = data["blocks_to_copy"] + return outputs - outputs = self.execute_step( - seq_group_metadata_list, blocks_to_swap_in, blocks_to_swap_out, blocks_to_copy) + def _add_request(self, data, is_eval=False): # pylint: disable=arguments-differ + return self._add_request_internal(data["prompt"], data["input_ids"], is_eval=is_eval) - return outputs + def forward_step(self, data, iteration=0): # pylint: disable=unused-argument + return self._forward_step(data, iteration, eval_mode=False) def decode_internal(self, batched_outputs): ''' - RLHF calling - rlhf framework source: policy_output = self.policy.forward_step(query) + ChatLearn calling + chatlearn framework source: policy_output = self.policy.forward_step(query) :param batched_outputs: batched_outputs :return: data using current microbatch @@ -118,62 +96,64 @@ def decode_internal(self, batched_outputs): "str_prompts": str_prompts, "str_outputs": str_outputs, "logprobs": all_log_probs, "no_padded_query_ids": no_padded_query_ids} ''' + max_tokens_length = self.model_args.get("seq_length") no_padded_query_ids = [] - outputs_tokens = [] + all_tokens = [] str_outputs = [] str_prompts = [] - str_samples = [] logprobs = [] - max_prompt_len = 0 - max_new_tokens = 0 for output in batched_outputs: - max_prompt_len = max(max_prompt_len, len(output.prompt_token_ids)) - str_prompts.append(output.prompt) - str_outputs.append(output.outputs[0].text) - str_samples.append(str_prompts[-1] + str_outputs[-1]) - no_padded_query_ids.append(torch.tensor(output.prompt_token_ids)) - max_new_tokens = max(max_new_tokens, len(output.outputs[0].token_ids)) - outputs_tokens.append(torch.tensor(output.outputs[0].token_ids)) - logprobs.append(torch.tensor([probs[output.outputs[0].token_ids[idx]] for idx, probs in enumerate(output.outputs[0].logprobs)])) - - prompts_tokens = [ + num_responses_per_prompt = len(output.outputs) + for res_idx in range(num_responses_per_prompt): + str_prompts.append(output.prompt) + str_outputs.append(output.outputs[res_idx].text) + no_padded_query_ids.append(torch.tensor(output.prompt_token_ids)) + + output_logprobs = [] + for idx, probs in enumerate(output.outputs[res_idx].logprobs): + prob = probs[output.outputs[res_idx].token_ids[idx]] + if isinstance(prob, float): + output_logprobs.append(prob) + else: + output_logprobs.append(prob.logprob) + logprob = torch.tensor(output_logprobs) + if output.prompt_logprobs is not None: + prompt_logprobs = [] + for idx, prompt_token_id in enumerate(output.prompt_token_ids): + if idx == 0: + continue + prompt_logprobs.append(output.prompt_logprobs[idx][prompt_token_id]) + else: + prompt_logprobs = [0.0 for _ in range(len(output.prompt_token_ids) - 1)] + output_tokens = list(output.outputs[res_idx].token_ids) + all_tokens.append(torch.tensor(output.prompt_token_ids + output_tokens)) + prompt_logprobs = torch.tensor(prompt_logprobs) + logprob = torch.cat([prompt_logprobs, logprob]) + logprobs.append(logprob) + + all_tokens = [ F.pad( - prompts_token, - (0, max_prompt_len - prompts_token.shape[0]), - value=self.tokenizer.eos_token_id, # just pad_token_id + all_token, + (0, max_tokens_length - all_token.shape[0]), + value=self.tokenizer.tokenizer.eos_token_id, # just pad_token_id ) - for prompts_token in no_padded_query_ids + for all_token in all_tokens ] - prompts_tokens_tensor = torch.vstack(prompts_tokens).to(torch.cuda.current_device()) - - outputs_tokens = [ - F.pad( - output_token, - (0, max_new_tokens - output_token.shape[0]), - value=self.tokenizer.eos_token_id, # just pad_token_id - ) - for output_token in outputs_tokens - ] - output_tokens_tensor = torch.vstack(outputs_tokens).to(torch.cuda.current_device()) + all_tokens = torch.vstack(all_tokens) logprobs = [ F.pad( logprob, - (0, max_new_tokens - logprob.shape[0]), + (0, max_tokens_length - logprob.shape[0] - 1), value=0.0 ) for logprob in logprobs ] - logprobs = torch.vstack(logprobs).to(torch.cuda.current_device()) - logprobs_left_padding = torch.zeros( - [logprobs.size(0), logprobs.size(1) - 1], dtype=logprobs.dtype, layout=logprobs.layout, device=logprobs.device) - logprobs = torch.cat([logprobs_left_padding, logprobs], dim=1) - - all_tokens = torch.cat([prompts_tokens_tensor, output_tokens_tensor], dim=1) + logprobs = torch.vstack(logprobs) prompt_sizes = torch.tensor([len(q) for q in no_padded_query_ids], device=all_tokens.device) - loss_mask = get_loss_mask(all_tokens, self.tokenizer.eos_token_id, prompt_sizes) - + loss_mask = get_loss_mask(all_tokens, self.tokenizer.tokenizer.eos_token_id, prompt_sizes) + loss_mask = loss_mask.to("cpu") return {"all_tokens": all_tokens, "str_outputs": str_outputs, "str_prompts": str_prompts, "no_padded_query_ids": no_padded_query_ids, "logprobs": logprobs, "loss_mask": loss_mask} diff --git a/examples/megatron/models/vllm_policy_model.py b/examples/megatron/models/vllm_policy_model.py deleted file mode 100644 index becdd032..00000000 --- a/examples/megatron/models/vllm_policy_model.py +++ /dev/null @@ -1,58 +0,0 @@ -# Copyright 2023 Alibaba Group Holding Limited. All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -# ============================================================================== -"""vllm policy model""" - -import torch -from torch import nn - -from vllm.model_executor.models.llama import LlamaForCausalLM -from vllm.model_executor.models.qwen import QWenLMHeadModel -from vllm.model_executor.model_loader import _get_model_architecture - -from chatlearn.utils.vllm_utils import ( - convert_lamma_state_dict_from_megatron_to_vllm, - convert_qwen_state_dict_from_megatron_to_vllm, - load_checkpoint -) - - -class VLLMPolicyModel(nn.Module): - """VLLMPolicyModel""" - - def __init__(self, config, model_args): - super().__init__() - self.config = config - self.model_args = model_args - self.model_class = _get_model_architecture(config.hf_config) - self.model = self.model_class(config.hf_config) - - def load_weights(self): - torch.distributed.barrier() - load_checkpoint(self, None, None) - - def load_state_dict(self, state_dict, strict=True): # pylint: disable=arguments-differ - if isinstance(self.model, LlamaForCausalLM): - convert_state_dict_internal = convert_lamma_state_dict_from_megatron_to_vllm - elif isinstance(self.model, QWenLMHeadModel): - convert_state_dict_internal = convert_qwen_state_dict_from_megatron_to_vllm - else: - raise RuntimeError(f"Unsupported model for vllm backend. \ - support LlamaForCausalLM only, while {self.model_class}") - - state_dict = convert_state_dict_internal(self.model_args, self.config.hf_config) - super().load_state_dict(state_dict, strict=strict) - - def forward(self, *args, **kwargs): - return self.model(*args, **kwargs) diff --git a/examples/megatron/scripts/base_env.sh b/examples/megatron/scripts/base_env.sh new file mode 100644 index 00000000..ac29d04f --- /dev/null +++ b/examples/megatron/scripts/base_env.sh @@ -0,0 +1,128 @@ +#!/bin/bash + +ray stop + +export CUDA_DEVICE_MAX_CONNECTIONS=1 +export NCCL_NVLS_ENABLE=0 +export NCCL_DEBUG=WARN + +[ -z "$MASTER_ADDR" ] && export MASTER_ADDR=localhost +[ -z "$WORLD_SIZE" ] && export WORLD_SIZE=1 +[ -z "$GPUS_PER_NODE" ] && export GPUS_PER_NODE=8 +[ -z "$RANK" ] && export RANK=0 +if [ -z "${CUSTOM_PORTS}" ]; then + set +x + ports="30000" + for i in $(seq 30001 30050); do + ports="${ports};${i}" + done + set -x + export CUSTOM_PORTS=$ports + [ -z "$LOCAL_MASTER_ADDR" ] && export LOCAL_MASTER_ADDR=$MASTER_ADDR + echo LOCAL_MASTER_ADDR=$MASTER_ADDR +fi + +if [ -z "${MEGATRON}" ]; then + echo "please set Megatron path" + exit 1 +fi +if [ -z "$CHATLEARN" ]; then + echo "please set CHATLEARN path" + exit 1 +fi +if [ -z "$DATASET_PATH" ]; then + echo "please set DATASET_PATH" + exit 1 +fi +if [ -z "model_size" ]; then + echo "please set model_size" + exit 1 +fi + +rm core* +rm ${MEGATRON}/megatron/fused_kernels/${build_path}/lock + +export PYTHONPATH=${MEGATRON}:${CHATLEARN}:${PYTHONPATH} +export num_gpu=$(($WORLD_SIZE * $GPUS_PER_NODE)) + +[ -z "$num_gpu_policy" ] && export num_gpu_policy=$num_gpu +[ -z "$num_gpu_ref" ] && export num_gpu_ref=$num_gpu +[ -z "$num_gpu_reward" ] && export num_gpu_reward=$num_gpu +[ -z "$num_gpu_value" ] && export num_gpu_value=$num_gpu +[ -z "$num_gpu_ppo_policy" ] && export num_gpu_ppo_policy=$num_gpu +[ -z "$num_gpu_ppo_value" ] && export num_gpu_ppo_value=$num_gpu + +if [[ "$model_size" == "gpt-30B" ]]; then + export policy_num_layers=48 + export policy_hidden_size=7168 + export policy_num_attention_heads=56 + export reward_num_layers=48 + export reward_hidden_size=7168 + export reward_num_attention_heads=56 +elif [[ "$model_size" == "gpt-13B" ]]; then + export policy_num_layers=40 + export policy_hidden_size=5120 + export policy_num_attention_heads=40 + export reward_num_layers=40 + export reward_hidden_size=5120 + export reward_num_attention_heads=40 +elif [[ "$model_size" == "gpt-7B" ]]; then + export policy_num_layers=32 + export policy_hidden_size=4096 + export policy_num_attention_heads=32 + export reward_num_layers=32 + export reward_hidden_size=4096 + export reward_num_attention_heads=32 +elif [[ "$model_size" == "gpt-66B" ]]; then + export policy_num_layers=64 + export policy_hidden_size=9216 + export policy_num_attention_heads=72 + export reward_num_layers=64 + export reward_hidden_size=9216 + export reward_num_attention_heads=72 +elif [[ "$model_size" == "gpt-175B" ]]; then + export policy_num_layers=96 + export policy_hidden_size=12288 + export policy_num_attention_heads=96 + export reward_num_layers=96 + export reward_hidden_size=12288 + export reward_num_attention_heads=96 +elif [[ "$model_size" == "llama2-7B" ]]; then + export policy_num_layers=32 + export policy_hidden_size=4096 + export policy_num_attention_heads=32 + export policy_num_query_groups=32 + export policy_ffn_hidden_size=11008 + export reward_num_layers=32 + export reward_hidden_size=4096 + export reward_num_query_groups=32 + export reward_num_attention_heads=32 + export reward_ffn_hidden_size=11008 + export max_position_embedding=2048 +elif [[ "$model_size" == "llama2-13B" ]]; then + export policy_num_layers=40 + export policy_hidden_size=5120 + export policy_num_attention_heads=40 + export policy_ffn_hidden_size=13824 + export policy_num_query_groups=40 + export reward_num_layers=40 + export reward_hidden_size=5120 + export reward_num_attention_heads=40 + export reward_ffn_hidden_size=13824 + export reward_num_query_groups=40 +elif [[ "$model_size" == "llama2-70B" ]]; then + export policy_num_layers=80 + export policy_hidden_size=8192 + export policy_num_attention_heads=64 + export policy_ffn_hidden_size=28672 + export policy_num_query_groups=8 + export reward_num_layers=80 + export reward_hidden_size=8192 + export reward_num_attention_heads=64 + export reward_ffn_hidden_size=28672 + export reward_num_query_groups=8 + export group_query_attention=True +else + echo "unsupported model_size ${model_size}, please set your own model config" + exit 1 +fi diff --git a/examples/megatron/scripts/convert_hf_to_megatron.sh b/examples/megatron/scripts/convert_hf_to_megatron.sh new file mode 100644 index 00000000..83b1a240 --- /dev/null +++ b/examples/megatron/scripts/convert_hf_to_megatron.sh @@ -0,0 +1,59 @@ +#!/bin/bash +# Convert LLaMA2 model from huggingface format to megatron format. + +set -x + +# model config +# can be `gpt_llama' for GPT or Llama, or `mixtral' for Mixtral +model=${MODEL:-'gpt_llama'} + +# parallel config +tp=${TP:-1} +pp=${PP:-1} +ep=${EP:-1} + +# checkpoint & tokenizer config +megatron=${MEGATRON} +load_dir=${LOAD_PATH} +save_dir=${SAVE_PATH} +tokenizer_model=${TOKENIZER_MODEL} +model_size=${MODEL_SIZE:-llama2-7B} + +export CUDA_DEVICE_MAX_CONNECTIONS=1 + +set +x + +# convert +START_TIME=$SECONDS + +if [[ ${model} == 'gpt_llama' ]]; then + cd ${megatron} + python tools/checkpoint/convert.py \ + --model-type GPT \ + --loader llama_mistral \ + --checkpoint-type hf \ + --model-size ${model_size} \ + --saver megatron \ + --target-tensor-parallel-size ${tp} \ + --target-pipeline-parallel-size ${pp} \ + --load-dir ${load_dir} \ + --save-dir ${save_dir} \ + --tokenizer-model ${tokenizer_model} +elif [[ ${model} == 'mixtral' ]]; then + # Mixtral can only be converted to mcore models. + # Require Megatron-LM commit id >= c7a1f82. + cd ${megatron} + python tools/checkpoint/convert.py \ + --model-type GPT \ + --loader loader_mixtral_hf \ + --saver mcore \ + --target-tensor-parallel-size ${tp} \ + --target-pipeline-parallel-size ${pp} \ + --target-expert-parallel-size ${ep} \ + --load-dir ${load_dir} \ + --save-dir ${save_dir} \ + --tokenizer-model ${tokenizer_model} +fi + +ELAPSED_TIME=$(($SECONDS - $START_TIME)) +echo "Conversion is done, using time: $(($ELAPSED_TIME / 60)) min $(($ELAPSED_TIME % 60)) sec" diff --git a/examples/megatron/scripts/convert_megatron_to_hf.sh b/examples/megatron/scripts/convert_megatron_to_hf.sh new file mode 100644 index 00000000..a95326dc --- /dev/null +++ b/examples/megatron/scripts/convert_megatron_to_hf.sh @@ -0,0 +1,42 @@ +#!/bin/bash +# Convert LLaMA model from megatron format to huggingface format. +set -x + +# config +chatlearn=${CHATLEARN} +megatron=${MEGATRON} +load_path=${LOAD_PATH} +save_path=${SAVE_PATH} +vocab_path=${VOCAB_PATH} +target_params_dtype=${target_params_dtype:-bf16} +temp_path=${save_path}/temp + +set +x + +# convert parallel strategy +START_TIME=$SECONDS + +cd ${megatron} +python tools/checkpoint/convert.py \ + --model-type GPT \ + --loader megatron \ + --saver megatron \ + --target-tensor-parallel-size 1 \ + --target-pipeline-parallel-size 1 \ + --load-dir ${load_path} \ + --save-dir ${temp_path} \ + --megatron-path ${megatron} + +# convert to hf format +cd ${chatlearn} +python chatlearn/tools/megatron_to_hf.py \ + --load_path ${temp_path} \ + --save_path ${save_path} \ + --target_params_dtype ${target_params_dtype} \ + --vocab_dir ${vocab_path} \ + --megatron_path ${megatron} + +# clear temp path +rm -r $temp_path +ELAPSED_TIME=$(($SECONDS - $START_TIME)) +echo "Conversion is done, using time: $(($ELAPSED_TIME / 60)) min $(($ELAPSED_TIME % 60)) sec" diff --git a/examples/megatron/scripts/train_dpo_llama.sh b/examples/megatron/scripts/train_dpo_llama.sh new file mode 100644 index 00000000..05ed3cff --- /dev/null +++ b/examples/megatron/scripts/train_dpo_llama.sh @@ -0,0 +1,46 @@ +#!/bin/bash +set -x + +[ -z "$model_size" ] && export model_size=llama2-7B + +# Get the directory of the current script +DIR="$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd)" + +source ${DIR}/base_env.sh + +export trainer_engine=dpo + +# clip +export clip_grad=5.0 + +# desable dropout +export attention_dropout=0.0 +export hidden_dropout=0.0 +export retro_encoder_hidden_dropout=0.0 +export retro_encoder_attention_dropout=0.0 + +export policy_tp=8 +export ppo_policy_pp=1 +export train_global_batch_size=128 +export ref_generation_batch_size=16 +export train_micro_batch_size=8 + +configs=$CHATLEARN/examples/megatron/configs/llama2/dpo.yaml + +[ -z "$exp_name" ] && export exp_name=$(date +%F)-${model_size}-${trainer_engine} +[ -z "$output_dir" ] && export output_dir=${CHATLEARN}/output/ +[ -z "$sample_per_episode" ] && sample_per_episode=1024 + +output_dir=${output_dir}/${exp_name} +export data_checkpoint_path=${output_dir}/data_checkpoint +mkdir -p $output_dir +log_file=${output_dir}/log_${RANK}.log + +policy_inference_load=${POLICY_LOAD} \ +tokenizer_model=${TOKENIZER_MODEL} \ +num_gpu=${num_gpu} \ +data_path=${DATASET_PATH} \ +sample_per_episode=${sample_per_episode} \ +python entry/train_dpo.py -c $configs 2>&1 | tee -a ${log_file} ; exit ${PIPESTATUS[0]} + + diff --git a/examples/megatron/scripts/train_grpo_math_llama.sh b/examples/megatron/scripts/train_grpo_math_llama.sh new file mode 100644 index 00000000..91ffa59f --- /dev/null +++ b/examples/megatron/scripts/train_grpo_math_llama.sh @@ -0,0 +1,53 @@ +#!/bin/bash +set -x + +[ -z "$model_size" ] && export model_size=llama2-7B + +# Get the directory of the current script +DIR="$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd)" + +source ${DIR}/base_env.sh + +export trainer_engine=online_dpo +model_name=llama + +export train_to_compare_num_responses=8 +export num_inference_per_prompt=8 +export ENABLE_VLLM=True +if [ -z "$tokenizer_load" ];then + echo "please set path to hf tokenizer for vllm backend, download from huggingface source." + exit 1 +fi +configs=$CHATLEARN/examples/megatron/configs/llama2/grpo_math_vllm.yaml + +[ -z "$exp_name" ] && export exp_name=$(date +%F)-${model_size}-${trainer_engine} +[ -z "$output_dir" ] && export output_dir=${CHATLEARN}/output/ +[ -z "$sample_per_episode" ] && sample_per_episode=1024 +[ -z "$tokenizer_load" ] && export tokenizer_load=path-to-hf-tokenizer-for-vllm-backend +output_dir=${output_dir}/${exp_name} +export data_checkpoint_path=${output_dir}/data_checkpoint +mkdir -p $output_dir +log_file=${output_dir}/log_${RANK}.log + +export prompt_key=prompt + +export policy_tp=8 +export ppo_policy_pp=1 +export reward_tp=8 +export ppo_value_pp=1 + +trainer_engine=grpo \ +policy_inference_load=${POLICY_LOAD} \ +reward_load_iteration=${REWARD_LOAD_ITERATION} \ +reward_load=${REWARD_LOAD} \ +tokenizer_model=${TOKENIZER_MODEL} \ +num_gpu=${num_gpu} \ +data_path=${DATASET_PATH} \ +eval_data_path=${EVAL_DATASET_PATH} \ +sample_per_episode=${sample_per_episode} \ +train_global_batch_size=128 \ +generation_batch_size=128 \ +ref_generation_batch_size=16 \ +train_micro_batch_size=8 \ +python entry/train_grpo_math.py -c $configs 2>&1 | tee -a ${log_file} ; exit ${PIPESTATUS[0]} + diff --git a/examples/megatron/scripts/train_online_dpo_llama.sh b/examples/megatron/scripts/train_online_dpo_llama.sh new file mode 100644 index 00000000..0e99a1ee --- /dev/null +++ b/examples/megatron/scripts/train_online_dpo_llama.sh @@ -0,0 +1,78 @@ +#!/bin/bash +set -x + +[ -z "$model_size" ] && export model_size=llama2-7B + +# Get the directory of the current script +DIR="$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd)" + +source ${DIR}/base_env.sh + +# megatron or vllm +backend=${1:-vllm} + +if [[ "$backend" != "megatron" ]] && [[ "$backend" != "vllm" ]]; then + echo "ERROR: expect megatron or vllm backend, while "$backend + exit 1 +fi + + +export trainer_engine=online_dpo +model_name=llama + +export train_to_compare_num_responses=8 +export num_inference_per_prompt=8 + + +if [[ "$model_size" == "llama2-7B" ]]; then + export policy_tp=8 + export ppo_policy_pp=1 + export reward_tp=8 + export ppo_value_pp=1 + export train_global_batch_size=128 + export generation_batch_size=128 + export ref_generation_batch_size=16 + export train_micro_batch_size=8 + export gpu_memory_utilization=0.9 +elif [[ "$model_size" == "llama2-13B" ]]; then + export policy_tp=8 + export ppo_policy_pp=2 + export reward_tp=8 + export ppo_value_pp=2 + export train_global_batch_size=128 + export generation_batch_size=64 + export ref_generation_batch_size=16 + export train_micro_batch_size=8 +fi + +if [[ "$backend" == "megatron" ]]; then + configs=$CHATLEARN/examples/megatron/configs/llama2/online_dpo.yaml +else + export ENABLE_VLLM=True + if [ -z "$tokenizer_load" ];then + echo "please set path to hf tokenizer for vllm backend, download from huggingface source." + exit 1 + fi + configs=$CHATLEARN/examples/megatron/configs/llama2/online_dpo_vllm.yaml +fi + +[ -z "$exp_name" ] && export exp_name=$(date +%F)-${model_size}-${trainer_engine} +[ -z "$output_dir" ] && export output_dir=${CHATLEARN}/output/ +[ -z "$sample_per_episode" ] && sample_per_episode=1024 + +output_dir=$output_dir/$exp_name +export data_checkpoint_path=${output_dir}/data_checkpoint + +mkdir -p ${output_dir} +log_file=${output_dir}/log_${RANK}.log + +policy_inference_load=${POLICY_LOAD} \ +reward_load_iteration=${REWARD_LOAD_ITERATION} \ +reward_load=${REWARD_LOAD} \ +tokenizer_model=${TOKENIZER_MODEL} \ +num_gpu=${num_gpu} \ +data_path=${DATASET_PATH} \ +eval_data_path=${EVAL_DATASET_PATH} \ +sample_per_episode=${sample_per_episode} \ +python entry/train_online_dpo.py -c $configs 2>&1 | tee -a ${log_file} ; exit ${PIPESTATUS[0]} + diff --git a/examples/megatron/step2_reward/llama2_reward.sh b/examples/megatron/scripts/train_reward_llama.sh similarity index 82% rename from examples/megatron/step2_reward/llama2_reward.sh rename to examples/megatron/scripts/train_reward_llama.sh index 46d3606b..472eb18e 100644 --- a/examples/megatron/step2_reward/llama2_reward.sh +++ b/examples/megatron/scripts/train_reward_llama.sh @@ -1,4 +1,5 @@ #!/bin/bash +set -x [ -z "$MASTER_ADDR" ] && export MASTER_ADDR=localhost [ -z "$WORLD_SIZE" ] && export WORLD_SIZE=1 @@ -17,34 +18,29 @@ export PYTHONPATH=${PYTHONPATH}:${MEGATRON}:${CHATLEARN}:${CHATLEARN}/examples/megatron -DISTRIBUTED_ARGS="--nproc_per_node $GPUS_PER_NODE \ - --nnodes $WORLD_SIZE \ +DISTRIBUTED_ARGS="--nproc_per_node ${GPUS_PER_NODE} \ + --nnodes ${WORLD_SIZE} \ --node_rank ${RANK} \ --master_addr ${MASTER_ADDR} \ --master_port ${MASTER_PORT}" -echo $DISTRIBUTED_ARGS -[ -z "$MODEL_SIZE" ] && export MODEL_SIZE=7B - -if [ $MODEL_SIZE = 7B ]; then +[ -z "$MODEL_SIZE" ] && export MODEL_SIZE=llama2-7B +if [ $MODEL_SIZE = llama2-7B ]; then NUM_LAYERS=32 HIDDEN_SIZE=4096 NUM_ATTN_HEADS=32 INTERMEDIATE_SIZE=11008 tp=4 pp=1 - -elif [ $MODEL_SIZE = 13B ]; then - +elif [ $MODEL_SIZE = llama2-13B ]; then NUM_LAYERS=40 HIDDEN_SIZE=5120 NUM_ATTN_HEADS=40 INTERMEDIATE_SIZE=13824 tp=8 pp=1 - -elif [ $MODEL_SIZE = 70B ]; then +elif [ $MODEL_SIZE = llama2-70B ]; then NUM_LAYERS=80 HIDDEN_SIZE=8192 NUM_ATTN_HEADS=64 @@ -70,8 +66,7 @@ NNODES=$WORLD_SIZE dp=$(($WORLD_SIZE * $GPUS_PER_NODE / $tp / $pp)) gbs=$(($gbs * $dp)) -CHECKPOINT_PATH=${CHATLEARN}/output/step2_reward/llama2reward_hh_$(date +%F)_gpt_${MODEL_SIZE}_${NNODES}w${GPUS_PER_NODE}g_tp${tp}_pp${pp}_mb${mb}_seqlen${seq_len} - +[ -z "$CHECKPOINT_PATH" ] && CHECKPOINT_PATH=${CHATLEARN}/output/reward/reward_hh_$(date +%F)_gpt_${MODEL_SIZE}_${NNODES}w${GPUS_PER_NODE}g_tp${tp}_pp${pp}_mb${mb}_seqlen${seq_len} MODEL_ARGS=" @@ -82,27 +77,29 @@ MODEL_ARGS=" --use-checkpoint-args \ --bf16 \ --untie-embeddings-and-output-weights \ +--disable-bias-linear \ --use-rotary-position-embeddings \ --normalization RMSNorm \ --no-position-embedding \ --no-masked-softmax-fusion \ ---no-query-key-layer-scaling " +--transformer-impl local \ +--attention-softmax-in-fp32 " mkdir -p $CHECKPOINT_PATH -echo $PARALLEL_ARGS - - log_file=$CHECKPOINT_PATH/stderr_$NODE_RANK.log export CUDA_DEVICE_MAX_CONNECTIONS=1 +cd ${CHATLEARN}/examples/megatron/reward + torchrun $DISTRIBUTED_ARGS \ - finetune_reward.py \ + entry/train_reward.py \ --tensor-model-parallel-size $tp \ --pipeline-model-parallel-size $pp \ --num-layers ${NUM_LAYERS} \ --hidden-size ${HIDDEN_SIZE} \ + --ffn-hidden-size ${INTERMEDIATE_SIZE} \ --num-attention-heads ${NUM_ATTN_HEADS} \ --seq-length $seq_len \ --micro-batch-size $mb \ @@ -133,6 +130,7 @@ torchrun $DISTRIBUTED_ARGS \ --num-workers 8 \ --no-load-rng \ --no-load-optim \ + --swiglu \ --log-timers-to-tensorboard \ --log-batch-size-to-tensorboard \ --log-validation-ppl-to-tensorboard \ diff --git a/examples/megatron/scripts/train_rlhf_gpt.sh b/examples/megatron/scripts/train_rlhf_gpt.sh new file mode 100644 index 00000000..9947ba45 --- /dev/null +++ b/examples/megatron/scripts/train_rlhf_gpt.sh @@ -0,0 +1,137 @@ +#!/bin/bash +set -x + + +[ -z "$model_size" ] && export model_size=gpt-7B + +# Get the directory of the current script +DIR="$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd)" + +source ${DIR}/base_env.sh + + +[ -z "$max_new_tokens" ] && export max_new_tokens=512 +[ -z "$exp_name" ] && export exp_name=$(date +%F)-${model_size}-${trainer_engine} +[ -z "$output_dir" ] && export output_dir=${CHATLEARN}/output/ +[ -z "$DATA_DIR" ] && DATA_DIR=${output_dir}/gpt/ +output_dir=${output_dir}/${exp_name} +export data_checkpoint_path=${output_dir}/data_checkpoint + +mkdir -p $output_dir + +if [[ ! -f "${DATA_DIR}/gpt2-vocab.json" ]]; then + wget -P $DATA_DIR https://s3.amazonaws.com/models.huggingface.co/bert/gpt2-merges.txt + wget -P $DATA_DIR https://s3.amazonaws.com/models.huggingface.co/bert/gpt2-vocab.json +fi + +export max_seq_len=$(( max_new_tokens*2 )) +export lora=False + +# parallel strategy and batch size, please adjust them accordingly +if [[ "$model_size" == "gpt-7B" ]]; then + [ -z "$policy_tp" ] && export policy_tp=8 + [ -z "$ppo_policy_pp" ] && export ppo_policy_pp=1 + [ -z "$reward_tp" ] && export reward_tp=8 + [ -z "$ppo_value_pp" ] && export ppo_value_pp=1 + export batch_generation_min_prompt_length=32 + export num_gpu_ref=8 + export num_gpu_value=8 + [ -z "$policy_generation_batch_size" ] && export policy_generation_batch_size=256 + [ -z "$ref_generation_bs" ] && export ref_generation_bs=32 + [ -z "$value_generation_bs" ] && export value_generation_bs=32 + [ -z "$reward_generation_bs" ] && export reward_generation_bs=32 + [ -z "$train_micro_batch_size" ] && export train_micro_batch_size=8 + [ -z "$train_global_batch_size" ] && export train_global_batch_size=512 +elif [[ "$model_size" == "gpt-13B" ]]; then + [ -z "$policy_tp" ] && export policy_tp=8 + [ -z "$ppo_policy_pp" ] && export ppo_policy_pp=2 + [ -z "$reward_tp" ] && export reward_tp=8 + [ -z "$ppo_value_pp" ] && export ppo_value_pp=2 + export batch_generation_min_prompt_length=32 + export num_gpu_ref=8 + export num_gpu_value=8 + [ -z "$policy_generation_batch_size" ] && export policy_generation_batch_size=180 + [ -z "$ref_generation_bs" ] && export ref_generation_bs=64 + [ -z "$value_generation_bs" ] && export value_generation_bs=64 + [ -z "$reward_generation_bs" ] && export reward_generation_bs=64 + [ -z "$train_micro_batch_size" ] && export train_micro_batch_size=8 + [ -z "$train_global_batch_size" ] && export train_global_batch_size=512 +elif [[ "$model_size" == "gpt-30B" ]]; then + [ -z "$policy_tp" ] && export policy_tp=8 + [ -z "$ppo_policy_pp" ] && export ppo_policy_pp=4 + [ -z "$reward_tp" ] && export reward_tp=8 + [ -z "$ppo_value_pp" ] && export ppo_value_pp=4 + export num_gpu_ref=16 + export num_gpu_value=16 + export ref_pp=2 + export reward_pp=2 + export batch_generation_min_prompt_length=32 + export free_memory_ppo_value=True + export free_memory_ppo_policy=True + [ -z "$inference_batch_times_seqlen_threshold" ] && export inference_batch_times_seqlen_threshold=4096 + [ -z "$policy_generation_batch_size" ] && export policy_generation_batch_size=64 + [ -z "$ref_generation_bs" ] && export ref_generation_bs=64 + [ -z "$value_generation_bs" ] && export value_generation_bs=32 + [ -z "$reward_generation_bs" ] && export reward_generation_bs=64 + [ -z "$train_micro_batch_size" ] && export train_micro_batch_size=4 + [ -z "$train_global_batch_size" ] && export train_global_batch_size=512 +elif [[ "$model_size" == "gpt-66B" ]]; then + export lora=True + [ -z "$policy_tp" ] && export policy_tp=8 + [ -z "$ppo_policy_pp" ] && export ppo_policy_pp=4 + [ -z "$reward_tp" ] && export reward_tp=8 + [ -z "$ppo_value_pp" ] && export ppo_value_pp=4 + export policy_recompute_activations=True + export policy_recompute_granularity=full + export value_recompute_activations=True + export value_recompute_granularity=full + export free_memory_ppo_value=True + export free_memory_ppo_policy=True + export batch_generation_min_prompt_length=32 + export num_gpu_ref=16 + export num_gpu_reward=32 + export num_gpu_value=16 + export ref_pp=2 + export reward_pp=2 + export inference_batch_times_seqlen_threshold=16384 + [ -z "$policy_generation_batch_size" ] && export policy_generation_batch_size=64 + [ -z "$ref_generation_bs" ] && export ref_generation_bs=64 + [ -z "$value_generation_bs" ] && export value_generation_bs=32 + [ -z "$reward_generation_bs" ] && export reward_generation_bs=64 + [ -z "$train_micro_batch_size" ] && export train_micro_batch_size=4 + [ -z "$train_global_batch_size" ] && export train_global_batch_size=512 +elif [[ "$model_size" == "gpt-175B" ]]; then + [ -z "$policy_tp" ] && export policy_tp=16 + [ -z "$ppo_policy_pp" ] && export ppo_policy_pp=8 + [ -z "$reward_tp" ] && export reward_tp=16 + [ -z "$ppo_value_pp" ] && export ppo_value_pp=8 + export free_memory_ppo_value=True + export free_memory_ppo_policy=True + export policy_recompute_activations=True + export policy_recompute_granularity=full + export value_recompute_activations=True + export value_recompute_granularity=full + export batch_generation_min_prompt_length=32 + export num_gpu_ref=64 + export num_gpu_reward=128 + export num_gpu_value=64 + export ref_pp=4 + export reward_pp=4 + export value_pp=4 + export inference_batch_times_seqlen_threshold=4096 + [ -z "$policy_generation_batch_size" ] && export policy_generation_batch_size=64 + [ -z "$ref_generation_bs" ] && export ref_generation_bs=64 + [ -z "$value_generation_bs" ] && export value_generation_bs=64 + [ -z "$reward_generation_bs" ] && export reward_generation_bs=64 + [ -z "$train_micro_batch_size" ] && export train_micro_batch_size=2 + [ -z "$train_global_batch_size" ] && export train_global_batch_size=512 +fi +config_dir=${CHATLEARN}/examples/megatron/configs/ + +data_path=${DATASET_PATH} \ +vocab_file=${DATA_DIR}/gpt2-vocab.json \ +merge_file=${DATA_DIR}/gpt2-merges.txt \ +enable_lora_value=${lora} \ +enable_lora_policy=${lora} \ +tensorboard_dir=${TENSORBOARD_DIR} \ +python entry/train_rlhf.py -c ${config_dir}/gpt/rlhf.yaml 2>&1 | tee ${output_dir}/log_${RANK}.log ; exit ${PIPESTATUS[0]} \ No newline at end of file diff --git a/examples/megatron/scripts/train_rlhf_llama.sh b/examples/megatron/scripts/train_rlhf_llama.sh new file mode 100644 index 00000000..89eebc9f --- /dev/null +++ b/examples/megatron/scripts/train_rlhf_llama.sh @@ -0,0 +1,110 @@ +#!/bin/bash +set -x + +[ -z "$model_size" ] && export model_size=llama2-7B + +# Get the directory of the current script +DIR="$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd)" + +source ${DIR}/base_env.sh + +# megatron or vllm +backend=${1:-vllm} +if [[ "$backend" != "megatron" ]] && [[ "$backend" != "vllm" ]]; then + echo "ERROR: expect megatron or vllm backend, while "$backend + exit 1 +fi + + +config_dir=${CHATLEARN}/examples/megatron/configs/ + +if [[ "$backend" == "megatron" ]]; then + configs=${config_dir}/llama2/rlhf.yaml +else + export ENABLE_VLLM=True + if [ -z "$tokenizer_load" ];then + echo "please set path to hf tokenizer for vllm backend, download from huggingface source." + exit 1 + fi + configs=${config_dir}/llama2/vllm_rlhf.yaml +fi + +export trainer_engine=rlhf + +[ -z "$exp_name" ] && export exp_name=$(date +%F)-${model_size}-${trainer_engine} +[ -z "$output_dir" ] && export output_dir=${CHATLEARN}/output/ +[ -z "$sample_per_episode" ] && sample_per_episode=1024 +[ -z "$tokenizer_load" ] && export tokenizer_load=path-to-hf-tokenizer-for-vllm-backend + +output_dir=${output_dir}/${exp_name} +export data_checkpoint_path=${output_dir}/data_checkpoint + + + +if [[ "$model_size" == "llama2-7B" ]]; then + export policy_tp=4 + export ppo_policy_pp=1 + export reward_tp=4 + export ppo_value_pp=1 + export train_global_batch_size=128 + export generation_batch_size=512 + export ref_generation_batch_size=64 + export value_generation_batch_size=64 + export reward_generation_batch_size=64 + export train_micro_batch_size=16 + export max_num_batched_tokens=65536 + export gpu_memory_utilization=0.9 + export num_gpu_ref=4 + export num_gpu_value=4 + export num_gpu_ppo_policy=4 + export num_gpu_ppo_value=4 + export free_memory_reward=True + export free_memory_ppo_policy=True + export free_memory_ppo_value=True +elif [[ "$model_size" == "llama2-13B" ]]; then + export policy_tp=8 + export ppo_policy_pp=2 + export reward_tp=8 + export ppo_value_pp=2 + export train_global_batch_size=128 + export generation_batch_size=64 + export ref_generation_batch_size=16 +elif [[ "$model_size" == "llama2-70B" ]]; then + export policy_tp=8 + export ppo_policy_pp=4 + export reward_tp=8 + export ppo_value_pp=4 + export num_gpu_ref=16 + export num_gpu_reward=32 + export num_gpu_value=16 + export train_global_batch_size=128 + export generation_batch_size=256 + export reward_generation_batch_size=32 + export ref_generation_batch_size=32 + export value_generation_batch_size=32 + export train_micro_batch_size=4 + export max_num_batched_tokens=65536 + export gpu_memory_utilization=0.75 + export free_memory_policy=True + export free_memory_reference=True + export free_memory_reward=True + export free_memory_value=True + export free_memory_ppo_policy=True + export free_memory_ppo_value=True +fi + +mkdir -p ${output_dir} +log_file=${output_dir}/log_${RANK}.log +echo $log_file + +policy_inference_load=${POLICY_LOAD} \ +reward_load_iteration=${REWARD_LOAD_ITERATION} \ +reward_load=${REWARD_LOAD} \ +tokenizer_model=${TOKENIZER_MODEL} \ +num_gpu=${num_gpu} \ +data_path=${DATASET_PATH} \ +eval_data_path=${EVAL_DATASET_PATH} \ +sample_per_episode=${sample_per_episode} \ +python entry/train_rlhf.py -c $configs 2>&1 | tee -a ${log_file} ; exit ${PIPESTATUS[0]} + + diff --git a/examples/megatron/step1_sft/llama2_sft.sh b/examples/megatron/scripts/train_sft_llama.sh similarity index 84% rename from examples/megatron/step1_sft/llama2_sft.sh rename to examples/megatron/scripts/train_sft_llama.sh index 42ac1da4..2914d922 100644 --- a/examples/megatron/step1_sft/llama2_sft.sh +++ b/examples/megatron/scripts/train_sft_llama.sh @@ -1,4 +1,5 @@ #!/bin/bash +set -x [ -z "$MASTER_ADDR" ] && export MASTER_ADDR=localhost [ -z "$WORLD_SIZE" ] && export WORLD_SIZE=1 @@ -6,14 +7,12 @@ [ -z "$RANK" ] && export RANK=0 [ -z "$MASTER_PORT" ] && export MASTER_PORT=12456 -DISTRIBUTED_ARGS="--nproc_per_node $GPUS_PER_NODE \ - --nnodes $WORLD_SIZE \ +DISTRIBUTED_ARGS="--nproc_per_node ${GPUS_PER_NODE} \ + --nnodes ${WORLD_SIZE} \ --node_rank ${RANK} \ --master_addr ${MASTER_ADDR} \ --master_port ${MASTER_PORT}" -echo $DISTRIBUTED_ARGS - # check the path [[ -z "${MEGATRON}" ]] && { echo "MEGATRON path is not set"; exit 1; } [[ -z "${CHATLEARN}" ]] && { echo "CHATLEARN path is not set"; exit 1; } @@ -23,29 +22,24 @@ echo $DISTRIBUTED_ARGS export PYTHONPATH=${PYTHONPATH}:${MEGATRON}:${CHATLEARN}/examples/megatron:${CHATLEARN} -echo $PYTHONPATH - -[ -z "$MODEL_SIZE" ] && export MODEL_SIZE=13B -if [ $MODEL_SIZE = 7B ]; then +[ -z "$MODEL_SIZE" ] && export MODEL_SIZE=llama2-7B +if [ $MODEL_SIZE = llama2-7B ]; then NUM_LAYERS=32 HIDDEN_SIZE=4096 NUM_ATTN_HEADS=32 INTERMEDIATE_SIZE=11008 tp=4 pp=1 - -elif [ $MODEL_SIZE = 13B ]; then - +elif [ $MODEL_SIZE = llama2-13B ]; then NUM_LAYERS=40 HIDDEN_SIZE=5120 NUM_ATTN_HEADS=40 INTERMEDIATE_SIZE=13824 tp=8 pp=1 - -elif [ $MODEL_SIZE = 70B ]; then +elif [ $MODEL_SIZE = llama2-70B ]; then NUM_LAYERS=80 HIDDEN_SIZE=8192 NUM_ATTN_HEADS=64 @@ -69,12 +63,11 @@ NODE_RANK=$RANK NNODES=$WORLD_SIZE - dp=$(($WORLD_SIZE * $GPUS_PER_NODE / $tp / $pp)) gbs=$(($gbs * $dp)) -[ -z "$CHECKPOINT_PATH" ] && CHECKPOINT_PATH=${CHATLEARN}/output/step1_sft/llama2_hh_sft_$(date +%F)_gpt_${MODEL_SIZE}_${NNODES}w${GPUS_PER_NODE}g_tp${tp}_pp${pp}_mb${mb}_seqlen${seq_len} +[ -z "$CHECKPOINT_PATH" ] && CHECKPOINT_PATH=${CHATLEARN}/output/sft/hh_sft_$(date +%F)_gpt_${MODEL_SIZE}_${NNODES}w${GPUS_PER_NODE}g_tp${tp}_pp${pp}_mb${mb}_seqlen${seq_len} mkdir -p $CHECKPOINT_PATH @@ -86,22 +79,27 @@ MODEL_ARGS=" --use-checkpoint-args \ --bf16 \ --untie-embeddings-and-output-weights \ +--disable-bias-linear \ --use-rotary-position-embeddings \ --normalization RMSNorm \ --no-position-embedding \ --no-masked-softmax-fusion \ ---no-query-key-layer-scaling " +--transformer-impl local \ +--attention-softmax-in-fp32 " log_file=$CHECKPOINT_PATH/stderr_$NODE_RANK.log export CUDA_DEVICE_MAX_CONNECTIONS=1 +cd ${CHATLEARN}/examples/megatron/sft + torchrun $DISTRIBUTED_ARGS \ - finetune_sft.py \ + entry/train_sft.py \ --tensor-model-parallel-size $tp \ --pipeline-model-parallel-size $pp \ --num-layers ${NUM_LAYERS} \ --hidden-size ${HIDDEN_SIZE} \ + --ffn-hidden-size ${INTERMEDIATE_SIZE} \ --num-attention-heads ${NUM_ATTN_HEADS} \ --seq-length $seq_len \ --micro-batch-size $mb \ @@ -130,6 +128,7 @@ torchrun $DISTRIBUTED_ARGS \ --num-workers 8 \ --no-load-rng \ --no-load-optim \ + --swiglu \ --log-timers-to-tensorboard \ --log-batch-size-to-tensorboard \ --log-validation-ppl-to-tensorboard \ diff --git a/examples/megatron/step1_sft/bloom_sft.sh b/examples/megatron/step1_sft/bloom_sft.sh deleted file mode 100644 index 0bf85cca..00000000 --- a/examples/megatron/step1_sft/bloom_sft.sh +++ /dev/null @@ -1,134 +0,0 @@ -#!/bin/bash - -[ -z "$MASTER_ADDR" ] && export MASTER_ADDR=localhost -[ -z "$WORLD_SIZE" ] && export WORLD_SIZE=1 -[ -z "$GPUS_PER_NODE" ] && export GPUS_PER_NODE=4 -[ -z "$RANK" ] && export RANK=0 -[ -z "$MASTER_PORT" ] && export MASTER_PORT=12456 - -DISTRIBUTED_ARGS="--nproc_per_node $GPUS_PER_NODE \ - --nnodes $WORLD_SIZE \ - --node_rank ${RANK} \ - --master_addr ${MASTER_ADDR} \ - --master_port ${MASTER_PORT}" - -echo $DISTRIBUTED_ARGS - -# check the path -[[ -z "${MEGATRON}" ]] && { echo "MEGATRON path is not set"; exit 1; } -[[ -z "${CHATLEARN}" ]] && { echo "CHATLEARN path is not set"; exit 1; } -[[ -z "${LOAD_PATH}" ]] && { echo "LOAD_PATH is not set"; exit 1; } -[[ -z "${TOKENIZER_PATH}" ]] && { echo "TOKENIZER_PATH is not set"; exit 1; } -[[ -z "${DATASET_PATH}" ]] && { echo "DATASET_PATH is not set"; exit 1; } - - -export PYTHONPATH=${PYTHONPATH}:${MEGATRON}:${CHATLEARN}/examples/megatron - - -[ -z "$MODEL_SIZE" ] && export MODEL_SIZE=7B1 - -if [ $MODEL_SIZE = 1B1 ]; then - - NUM_LAYERS=24 - HIDDEN_SIZE=1536 - NUM_ATTN_HEADS=16 - INTERMEDIATE_SIZE=6144 - tp=4 - pp=1 - -elif [ $MODEL_SIZE = 1B7 ]; then - - NUM_LAYERS=24 - HIDDEN_SIZE=2048 - NUM_ATTN_HEADS=16 - INTERMEDIATE_SIZE=8192 - tp=4 - pp=1 - -elif [ $MODEL_SIZE = 7B1 ]; then - - NUM_LAYERS=30 - HIDDEN_SIZE=4096 - NUM_ATTN_HEADS=32 - INTERMEDIATE_SIZE=16384 - tp=8 - pp=1 - -fi - -mb=8 -gbs=$((mb * 8)) -seq_len=2048 - -DIR=$(pwd) -DATETIME=$(date +'date_%y-%m-%d_time_%H-%M-%S') -mkdir -p $DIR/logs - -NODE_RANK=$RANK -NNODES=$WORLD_SIZE - - - -dp=$(($WORLD_SIZE * $GPUS_PER_NODE / $tp / $pp)) -gbs=$(($gbs * $dp)) - - -[ -z "$CHECKPOINT_PATH" ] && CHECKPOINT_PATH=${CHATLEARN}/output/step1_sft/bloom_hh_sft_$(date +%F)_gpt_${MODEL_SIZE}_${NNODES}w${GPUS_PER_NODE}g_tp${tp}_pp${pp}_mb${mb}_seqlen${seq_len} - -mkdir -p $CHECKPOINT_PATH - - -MODEL_ARGS="--no-position-embedding --untie-embeddings-and-output-weights --use-alibi-position-embeddings --tokenizer-type AutoTokenizer --embed-layernorm" - -log_file=$CHECKPOINT_PATH/stderr_$NODE_RANK.log - -export CUDA_DEVICE_MAX_CONNECTIONS=1 - -python -m torch.distributed.launch $DISTRIBUTED_ARGS \ - finetune_sft.py \ - --tensor-model-parallel-size $tp \ - --pipeline-model-parallel-size $pp \ - --num-layers ${NUM_LAYERS} \ - --hidden-size ${HIDDEN_SIZE} \ - --num-attention-heads ${NUM_ATTN_HEADS} \ - --seq-length $seq_len \ - --max-position-embeddings 2048 \ - --micro-batch-size $mb \ - --global-batch-size $gbs \ - --train-iters 1000 --exit-interval 100000 \ - --lr-decay-iters 1000 \ - --lr-warmup-iters 40 \ - --lr 2.0e-5 \ - --min-lr 6.0e-12 \ - --lr-decay-style cosine \ - --log-interval 1 \ - --eval-iters 10 \ - --eval-interval 1000 \ - --data-path $DATASET_PATH/train.jsonl $DATASET_PATH/train.jsonl $DATASET_PATH/train.jsonl \ - --save-interval 100000 \ - --save $CHECKPOINT_PATH \ - --load $LOAD_PATH \ - --tensorboard-log-interval 100 \ - --split 98,2,0 \ - --clip-grad 1.0 \ - --weight-decay 0. \ - --adam-beta1 0.9 \ - --adam-beta2 0.999 \ - --init-method-std 0.006 \ - --tensorboard-dir $CHECKPOINT_PATH \ - --num-workers 4 \ - --vocab-file $TOKENIZER_PATH \ - --make-vocab-size-divisible-by 128 \ - --ffn-hidden-size $INTERMEDIATE_SIZE \ - --no-load-args \ - --no-load-rng \ - --no-load-optim \ - --log-timers-to-tensorboard \ - --log-batch-size-to-tensorboard \ - --log-validation-ppl-to-tensorboard \ - --dataloader-type cyclic \ - --bf16 \ - --use-distributed-optimizer \ - --adaptive-parallel-strategy-on-checkpoint \ - --sequence-parallel \ - $MODEL_ARGS 2>&1 | tee -a ${log_file} ; exit ${PIPESTATUS[0]} diff --git a/examples/megatron/step2_reward/bloom_reward.sh b/examples/megatron/step2_reward/bloom_reward.sh deleted file mode 100644 index 2c576951..00000000 --- a/examples/megatron/step2_reward/bloom_reward.sh +++ /dev/null @@ -1,135 +0,0 @@ -#!/bin/bash - -[ -z "$MASTER_ADDR" ] && export MASTER_ADDR=localhost -[ -z "$WORLD_SIZE" ] && export WORLD_SIZE=1 -[ -z "$GPUS_PER_NODE" ] && export GPUS_PER_NODE=4 -[ -z "$RANK" ] && export RANK=0 -[ -z "$MASTER_PORT" ] && export MASTER_PORT=12355 - -pip install sentencepiece - -# check the path -[[ -z "${MEGATRON}" ]] && { echo "MEGATRON path is not set"; exit 1; } -[[ -z "${CHATLEARN}" ]] && { echo "CHATLEARN path is not set"; exit 1; } -[[ -z "${LOAD_PATH}" ]] && { echo "LOAD_PATH is not set"; exit 1; } -[[ -z "${TOKENIZER_PATH}" ]] && { echo "TOKENIZER_PATH is not set"; exit 1; } -[[ -z "${DATASET_PATH}" ]] && { echo "DATASET_PATH is not set"; exit 1; } - - -export PYTHONPATH=${PYTHONPATH}:${MEGATRON}:${CHATLEARN}:${CHATLEARN}/examples/megatron - -DISTRIBUTED_ARGS="--nproc_per_node $GPUS_PER_NODE \ - --nnodes $WORLD_SIZE \ - --node_rank ${RANK} \ - --master_addr ${MASTER_ADDR} \ - --master_port ${MASTER_PORT}" - -echo $DISTRIBUTED_ARGS -[ -z "$MODEL_SIZE" ] && export MODEL_SIZE=7B1 - -if [ $MODEL_SIZE = 1B1 ]; then - - NUM_LAYERS=24 - HIDDEN_SIZE=1536 - NUM_ATTN_HEADS=16 - INTERMEDIATE_SIZE=6144 - tp=4 - pp=1 - -elif [ $MODEL_SIZE = 1B7 ]; then - - NUM_LAYERS=24 - HIDDEN_SIZE=2048 - NUM_ATTN_HEADS=16 - INTERMEDIATE_SIZE=8192 - tp=4 - pp=1 - -elif [ $MODEL_SIZE = 7B1 ]; then - - NUM_LAYERS=30 - HIDDEN_SIZE=4096 - NUM_ATTN_HEADS=32 - INTERMEDIATE_SIZE=16384 - tp=8 - pp=1 - -fi - -mb=4 -gbs=$((mb * 16)) -seq_len=2048 - -DIR=$(pwd) -DATETIME=$(date +'date_%y-%m-%d_time_%H-%M-%S') -mkdir -p $DIR/logs - -NODE_RANK=$RANK -NNODES=$WORLD_SIZE - - -dp=$(($WORLD_SIZE * $GPUS_PER_NODE / $tp / $pp)) -gbs=$(($gbs * $dp)) - -CHECKPOINT_PATH=${CHATLEARN}/output/step2_reward/bloomsft_hh_rm_$(date +%F)_gpt_${MODEL_SIZE}_${NNODES}w${GPUS_PER_NODE}g_tp${tp}_pp${pp}_mb${mb}_seqlen${seq_len} - -MODEL_ARGS="--no-position-embedding --untie-embeddings-and-output-weights --use-alibi-position-embeddings --tokenizer-type AutoTokenizer --embed-layernorm" - -mkdir -p $CHECKPOINT_PATH - -echo $PARALLEL_ARGS - - -log_file=$CHECKPOINT_PATH/stderr_$NODE_RANK.log - -export CUDA_DEVICE_MAX_CONNECTIONS=1 - -python -m torch.distributed.launch $DISTRIBUTED_ARGS \ - finetune_reward.py \ - --tensor-model-parallel-size $tp \ - --pipeline-model-parallel-size $pp \ - --num-layers ${NUM_LAYERS} \ - --hidden-size ${HIDDEN_SIZE} \ - --num-attention-heads ${NUM_ATTN_HEADS} \ - --seq-length $seq_len \ - --max-position-embeddings 2048 \ - --micro-batch-size $mb \ - --global-batch-size $gbs \ - --train-iters 3600 --exit-interval 100000 \ - --lr-decay-iters 3600 \ - --lr-warmup-iters 300 \ - --lr 1.0e-5 \ - --min-lr 6.0e-12 \ - --max-response 2 \ - --select-max-response 'firstk' \ - --lr-decay-style cosine \ - --log-interval 1 \ - --eval-iters 20 \ - --eval-interval 1000 \ - --data-path $DATASET_PATH/train.jsonl $DATASET_PATH/dev.jsonl $DATASET_PATH/dev.jsonl \ - --save-interval 1000 \ - --save $CHECKPOINT_PATH \ - --load $LOAD_PATH \ - --tensorboard-log-interval 100 \ - --split 98,2,0 \ - --clip-grad 1.0 \ - --weight-decay 0.1 \ - --adam-beta1 0.9 \ - --adam-beta2 0.95 \ - --init-method-std 0.006 \ - --tensorboard-dir $CHECKPOINT_PATH \ - --num-workers 4 \ - --vocab-file $TOKENIZER_PATH \ - --make-vocab-size-divisible-by 128 \ - --ffn-hidden-size $INTERMEDIATE_SIZE \ - --no-load-args \ - --no-load-rng \ - --no-load-optim \ - --log-timers-to-tensorboard \ - --log-batch-size-to-tensorboard \ - --log-validation-ppl-to-tensorboard \ - --dataloader-type cyclic \ - --bf16 \ - --use-distributed-optimizer \ - --sequence-parallel \ - $MODEL_ARGS 2>&1 | tee -a ${log_file} ; exit ${PIPESTATUS[0]} diff --git a/examples/megatron/step3_rlhf/.gitignore b/examples/megatron/step3_rlhf/.gitignore deleted file mode 100644 index 98d8a5a6..00000000 --- a/examples/megatron/step3_rlhf/.gitignore +++ /dev/null @@ -1 +0,0 @@ -logs diff --git a/examples/megatron/step3_rlhf/configs/bloom/base.yaml b/examples/megatron/step3_rlhf/configs/bloom/base.yaml deleted file mode 100644 index c49ec1a7..00000000 --- a/examples/megatron/step3_rlhf/configs/bloom/base.yaml +++ /dev/null @@ -1,59 +0,0 @@ -# bloom config -add_position_embedding: False -use_alibi_position_embeddings: True -embed_layernorm: True -untie_embeddings_and_output_weights: True -tokenizer_type: AutoTokenizer -vocab_file: ${vocab_file} -max_position_embeddings: 2048 -bf16: True -seq_length: 1536 -fix_kl_coef: ${fix_kl_coef:True} -log_dir: ${log_dir} -exp_name: ${exp_name:test} -tensorboard_dir: ${tensorboard_dir} -loss_on_prompts: ${loss_on_prompts:False} -numerical_stable: False - -build_path: ${build_path:build} - - -init_kl_coef: ${init_kl_coef:0.02} -target: 6 -horizon: 10000 -gamma: 1 -lam: 0.95 -cliprange: 0.2 -cliprange_value: ${cliprange_value:0.1} -scale_reward: "None" - -cliprange_reward: 100 - -max_new_tokens: 512 - - -ngram_coef: ${ngram_coef:1} -lm_coef: ${lm_coef:0} -math_coef: ${math_coef:0} -raw_reward_coeff: ${raw_reward_coeff:1} - -clipped_value_only: ${clipped_value_only:1} -num_inference_per_prompt: ${num_inference_per_prompt:1} - - -save: ${save_dir} -save_interval: 1000 -gradient_accumulation_fusion: 0 -max_tokens_to_oom: 99999999 - - -hysteresis: 2 -use_flash_attn: 0 -make_ffn_dim_multiple_of: 256 -pos_emb: alibi -make_vocab_size_divisible_by: 128 -do_math_eval: 0 -log_entropy: False -adaptive_parallel_strategy_on_checkpoint: True -log_interval: 1 -distributed_timeout_minutes: 30 diff --git a/examples/megatron/step3_rlhf/configs/bloom/base_inference.yaml b/examples/megatron/step3_rlhf/configs/bloom/base_inference.yaml deleted file mode 100644 index d9395c67..00000000 --- a/examples/megatron/step3_rlhf/configs/bloom/base_inference.yaml +++ /dev/null @@ -1,15 +0,0 @@ -includes: - - base.yaml - - -temperature: 1.0 -seed: 42 -no_load_optim: True -no_load_rng: True -no_load_args: True -no_load_scheduler: True -log_num_zeros_in_grad: True -attention_dropout: 0.0 -hidden_dropout: 0.0 -retro_encoder_attention_dropout: 0.0 -retro_encoder_hidden_dropout: 0.0 diff --git a/examples/megatron/step3_rlhf/configs/bloom/base_train.yaml b/examples/megatron/step3_rlhf/configs/bloom/base_train.yaml deleted file mode 100644 index db3666e2..00000000 --- a/examples/megatron/step3_rlhf/configs/bloom/base_train.yaml +++ /dev/null @@ -1,10 +0,0 @@ -includes: - - base.yaml - - -distributed_backend: nccl -train_iters: 12000 - -clip_grad: 0.5 -log_interval: 1 -log_num_zeros_in_grad: True diff --git a/examples/megatron/step3_rlhf/configs/bloom/eval.yaml b/examples/megatron/step3_rlhf/configs/bloom/eval.yaml deleted file mode 100644 index e1c5c55f..00000000 --- a/examples/megatron/step3_rlhf/configs/bloom/eval.yaml +++ /dev/null @@ -1,27 +0,0 @@ -runtime_env: - platform: DLC - excludes: - - "*pt" - - "logs" - - "tensorboards" - - ".nfs*" - - -models: - policy: - model_config_file: old_policy_inference.yaml - num_device: ${num_device:8} - gpu_per_process: 1 - trainable: False - - reward: - model_config_file: reward_inference.yaml - num_device: ${num_device:8} - gpu_per_process: 1 - trainable: False - -rlhf: - colocation: - - policy,reward - generation_batch_size: ${generation_batch_size:4} - data_path: \ No newline at end of file diff --git a/examples/megatron/step3_rlhf/configs/bloom/old_policy_inference.yaml b/examples/megatron/step3_rlhf/configs/bloom/old_policy_inference.yaml deleted file mode 100644 index bf5c76a2..00000000 --- a/examples/megatron/step3_rlhf/configs/bloom/old_policy_inference.yaml +++ /dev/null @@ -1,13 +0,0 @@ -includes: - - base_inference.yaml - - policy_shared.yaml - - -top_p: ${policy_top_p:0.9} -top_k: ${policy_top_k:0} -temperature: ${policy_temperature:1.0} - -eval_temperature: 0.01 -use_attn_acc: ${use_attn_acc:False} -eval_top_k: 1 -eval_top_p: 0 diff --git a/examples/megatron/step3_rlhf/configs/bloom/old_value_inference.yaml b/examples/megatron/step3_rlhf/configs/bloom/old_value_inference.yaml deleted file mode 100644 index ba2e2bdd..00000000 --- a/examples/megatron/step3_rlhf/configs/bloom/old_value_inference.yaml +++ /dev/null @@ -1,3 +0,0 @@ -includes: - - base_inference.yaml - - reward_shared.yaml diff --git a/examples/megatron/step3_rlhf/configs/bloom/policy_shared.yaml b/examples/megatron/step3_rlhf/configs/bloom/policy_shared.yaml deleted file mode 100644 index 89cdca11..00000000 --- a/examples/megatron/step3_rlhf/configs/bloom/policy_shared.yaml +++ /dev/null @@ -1,8 +0,0 @@ -load: ${policy_inference_load} -num_layers: ${policy_num_layers} -hidden_size: ${policy_hidden_size} -num_attention_heads: ${policy_num_attention_heads} -ffn_hidden_size: ${policy_ffn_hidden_size} -tensor_model_parallel_size: ${policy_tp:8} -use_distributed_optimizer: True - diff --git a/examples/megatron/step3_rlhf/configs/bloom/ppo_policy.yaml b/examples/megatron/step3_rlhf/configs/bloom/ppo_policy.yaml deleted file mode 100644 index 0274ab1b..00000000 --- a/examples/megatron/step3_rlhf/configs/bloom/ppo_policy.yaml +++ /dev/null @@ -1,29 +0,0 @@ -includes: - - base_train.yaml - - policy_shared.yaml - - -bf16: True -use_checkpoint_opt_param_scheduler: False -adam_beta1: 0.9 -adam_beta2: 0.95 -num_workers: 8 -init_method_std: 0.006 - -recompute_granularity: selective -sequence_parallel: True - -log_num_zeros_in_grad: True -no_load_optim: True -no_load_rng: True -no_load_args: True -no_load_scheduler: True - - -lr_decay_iters: 12000 -lr_warmup_iters: 100 -lr: ${policy_lr:2.4e-7} -min_lr: ${policy_min_lr:1e-9} -lr_decay_style: ${policy_lr_decay_style:linear} -weight_decay: 0.01 -pipeline_model_parallel_size: ${ppo_policy_pp:1} \ No newline at end of file diff --git a/examples/megatron/step3_rlhf/configs/bloom/ppo_value.yaml b/examples/megatron/step3_rlhf/configs/bloom/ppo_value.yaml deleted file mode 100644 index b21c9a78..00000000 --- a/examples/megatron/step3_rlhf/configs/bloom/ppo_value.yaml +++ /dev/null @@ -1,27 +0,0 @@ -includes: - - base_train.yaml - - reward_shared.yaml - -pipeline_model_parallel_size: ${ppo_value_pp:1} -lr_decay_iters: 12000 -lr_warmup_iters: 100 -lr: ${value_lr:5e-6} -min_lr: ${value_min_lr:5e-7} -lr_decay_style: ${value_lr_decay_style:linear} -weight_decay: 0.01 -log_interval: 1 - -use_checkpoint_opt_param_scheduler: False -adam_beta1: 0.9 -adam_beta2: 0.95 -num_workers: 8 -init_method_std: 0.006 - -recompute_granularity: selective -sequence_parallel: True - -no_load_optim: True -no_load_rng: True -no_load_args: True -no_load_scheduler: True -dummy: 0 diff --git a/examples/megatron/step3_rlhf/configs/bloom/reference.yaml b/examples/megatron/step3_rlhf/configs/bloom/reference.yaml deleted file mode 100644 index e9a74955..00000000 --- a/examples/megatron/step3_rlhf/configs/bloom/reference.yaml +++ /dev/null @@ -1,5 +0,0 @@ -includes: - - base_inference.yaml - - policy_shared.yaml - -parallel_output: True diff --git a/examples/megatron/step3_rlhf/configs/bloom/reward_inference.yaml b/examples/megatron/step3_rlhf/configs/bloom/reward_inference.yaml deleted file mode 100644 index 9defcfaf..00000000 --- a/examples/megatron/step3_rlhf/configs/bloom/reward_inference.yaml +++ /dev/null @@ -1,6 +0,0 @@ -includes: - - base_inference.yaml - - reward_shared.yaml - -tokenizer_type: AutoTokenizer -reward_bias: 0 diff --git a/examples/megatron/step3_rlhf/configs/bloom/reward_shared.yaml b/examples/megatron/step3_rlhf/configs/bloom/reward_shared.yaml deleted file mode 100644 index e16da308..00000000 --- a/examples/megatron/step3_rlhf/configs/bloom/reward_shared.yaml +++ /dev/null @@ -1,12 +0,0 @@ -load: ${reward_load} -load_iteration: ${reward_load_iteration} -use_distributed_optimizer: True - -num_layers: ${reward_num_layers} -hidden_size: ${reward_hidden_size} -num_attention_heads: ${reward_num_attention_heads} -ffn_hidden_size: ${reward_ffn_hidden_size} -tensor_model_parallel_size: ${reward_tp:8} - -save_inference: True -save_inference_interval: 10 \ No newline at end of file diff --git a/examples/megatron/step3_rlhf/configs/bloom/test_reward.yaml b/examples/megatron/step3_rlhf/configs/bloom/test_reward.yaml deleted file mode 100644 index 5a7ca532..00000000 --- a/examples/megatron/step3_rlhf/configs/bloom/test_reward.yaml +++ /dev/null @@ -1,18 +0,0 @@ -runtime_env: - platform: DLC - excludes: - - "*pt" - - "logs" - - "tensorboards" - - ".nfs*" - - -models: - reward: - model_config_file: reward_inference.yaml - num_device: ${reward_device:1} - gpu_per_process: 1 - trainable: False -rlhf: - generation_batch_size: ${generation_batch_size:4} - eval_data_path: ${eval_data_path} \ No newline at end of file diff --git a/examples/megatron/step3_rlhf/configs/llama2/eval.yaml b/examples/megatron/step3_rlhf/configs/llama2/eval.yaml deleted file mode 100644 index e1c5c55f..00000000 --- a/examples/megatron/step3_rlhf/configs/llama2/eval.yaml +++ /dev/null @@ -1,27 +0,0 @@ -runtime_env: - platform: DLC - excludes: - - "*pt" - - "logs" - - "tensorboards" - - ".nfs*" - - -models: - policy: - model_config_file: old_policy_inference.yaml - num_device: ${num_device:8} - gpu_per_process: 1 - trainable: False - - reward: - model_config_file: reward_inference.yaml - num_device: ${num_device:8} - gpu_per_process: 1 - trainable: False - -rlhf: - colocation: - - policy,reward - generation_batch_size: ${generation_batch_size:4} - data_path: \ No newline at end of file diff --git a/examples/megatron/step3_rlhf/configs/llama2/test_policy.yaml b/examples/megatron/step3_rlhf/configs/llama2/test_policy.yaml deleted file mode 100644 index 086d18ce..00000000 --- a/examples/megatron/step3_rlhf/configs/llama2/test_policy.yaml +++ /dev/null @@ -1,24 +0,0 @@ -runtime_env: - platform: DLC - excludes: - - "*pt" - - "logs" - - "tensorboards" - - ".nfs*" - - -models: - policy: - model_config_file: old_policy_inference.yaml - num_device: ${num_device:1} - gpu_per_process: 1 - trainable: False - batch_generation: - ranking: ${batch_generation_ranking:False} - min_prompt_length: ${batch_generation_min_prompt_length:0} - -rlhf: - generation_batch_size: ${generation_batch_size:4} - data_path: ${data_path} - eval_data_path: ${eval_data_path} - eval_output_dir: ${eval_output_dir} diff --git a/examples/megatron/step3_rlhf/configs/llama2/vllm_policy_inference.yaml b/examples/megatron/step3_rlhf/configs/llama2/vllm_policy_inference.yaml deleted file mode 100644 index 0803e238..00000000 --- a/examples/megatron/step3_rlhf/configs/llama2/vllm_policy_inference.yaml +++ /dev/null @@ -1,37 +0,0 @@ -includes: - - base_inference.yaml - - policy_shared.yaml - - -vllm_micro_batch_size: ${vllm_micro_batch_size:-1} - -# sampling params -n: 1 -ignore_eos: ${policy_ignore_eos:False} -top_p: ${policy_top_p:1.0} -top_k: ${policy_top_k:1} -temperature: ${policy_temperature:1.0} -use_beam_search: ${policy_use_beam_search:False} - -eval_temperature: 0.01 -use_attn_acc: ${use_attn_acc:False} -eval_top_k: 1 -eval_top_p: 0 - - -# sample config -# stop_token_list: stop token list -stop_token_list: {stop_token_list:None} -new_token_limit: {new_token_limit:None} - -# scheduler config -max_num_batched_tokens: 32768 -max_paddings: 512 - -# cache config -block_size: 16 -gpu_memory_utilization: 0.9 -swap_space : 4 -sliding_window: None - -tokenizer: ${tokenizer_load} \ No newline at end of file diff --git a/examples/megatron/step3_rlhf/run_scripts/bloom/base_env.sh b/examples/megatron/step3_rlhf/run_scripts/bloom/base_env.sh deleted file mode 100644 index aef05886..00000000 --- a/examples/megatron/step3_rlhf/run_scripts/bloom/base_env.sh +++ /dev/null @@ -1,34 +0,0 @@ -source run_scripts/base_env.sh -export PYTHONPATH=${PYTHONPATH}:${CHATLEARN}/examples/megatron/step2_reward - -echo $PYTHONPATH - - -if [[ "$model_size" == "1B1" ]]; then - export policy_num_layers=24 - export policy_hidden_size=1536 - export policy_num_attention_heads=16 - export policy_ffn_hidden_size=6144 - export reward_num_layers=24 - export reward_hidden_size=1536 - export reward_num_attention_heads=16 - export reward_ffn_hidden_size=6144 -elif [[ "$model_size" == "1B7" ]]; then - export policy_num_layers=24 - export policy_hidden_size=2048 - export policy_num_attention_heads=16 - export policy_ffn_hidden_size=8192 - export reward_num_layers=24 - export reward_hidden_size=2048 - export reward_num_attention_heads=16 - export reward_ffn_hidden_size=8192 -elif [[ "$model_size" == "7B1" ]]; then - export policy_num_layers=30 - export policy_hidden_size=4096 - export policy_num_attention_heads=32 - export policy_ffn_hidden_size=16384 - export reward_num_layers=30 - export reward_hidden_size=4096 - export reward_num_attention_heads=32 - export reward_ffn_hidden_size=16384 -fi diff --git a/examples/megatron/step3_rlhf/run_scripts/bloom/run_1b1_1b1.sh b/examples/megatron/step3_rlhf/run_scripts/bloom/run_1b1_1b1.sh deleted file mode 100644 index 6919522d..00000000 --- a/examples/megatron/step3_rlhf/run_scripts/bloom/run_1b1_1b1.sh +++ /dev/null @@ -1,40 +0,0 @@ -#!/bin/bash - -set -x - -export model_size=1B1 -export policy_tp=4 -export ppo_policy_pp=1 -export reward_tp=4 -export ppo_value_pp=1 - -source run_scripts/bloom/base_env.sh - -cd ${CHATLEARN}/examples/megatron/step3_rlhf - -if [ -z "${exp_name}" ]; then - export exp_name=$(date +%F)_bloom-rlhf-${model_size}-${model_size} -fi - -[ -z "$OUTPUT_DIR" ] && OUTPUT_DIR=${CHATLEARN}/output/step3_rlhf/ -[ -z "$LOG_DIR" ] && LOG_DIR=${OUTPUT_DIR}/logs/${exp_name} -[ -z "$TENSORBOARD_DIR" ] && TENSORBOARD_DIR=${OUTPUT_DIR}/tensorboard/${exp_name} -[ -z "$SAVE_DIR" ] && SAVE_DIR=${OUTPUT_DIR}/save_model/${exp_name} - -mkdir -p ${LOG_DIR} - -policy_inference_load=${POLICY_LOAD} \ -reward_load_iteration=${REWARD_LOAD_ITERATION} \ -reward_load=${REWARD_LOAD} \ -vocab_file=${VOCAB_FILE} \ -num_device=${num_device} \ -log_dir=${LOG_DIR} \ -tensorboard_dir=${TENSORBOARD_DIR} \ -save_dir=${SAVE_DIR} \ -data_path=${DATASET_PATH} \ -sample_per_episode=1024 \ -train_global_batch_size=128 \ -generation_batch_size=64 \ -ref_generation_batch_size=16 \ -python train_rlhf.py -c configs/bloom/rlhf.yaml 2>&1 | tee -a ${LOG_DIR}/log_${RANK}.txt ; exit ${PIPESTATUS[0]} - diff --git a/examples/megatron/step3_rlhf/run_scripts/bloom/run_1b7_1b7.sh b/examples/megatron/step3_rlhf/run_scripts/bloom/run_1b7_1b7.sh deleted file mode 100644 index 90ba32c0..00000000 --- a/examples/megatron/step3_rlhf/run_scripts/bloom/run_1b7_1b7.sh +++ /dev/null @@ -1,40 +0,0 @@ -#!/bin/bash - -set -x - -export model_size=1B7 -export policy_tp=4 -export ppo_policy_pp=1 -export reward_tp=4 -export ppo_value_pp=1 - -source run_scripts/bloom/base_env.sh - -cd ${CHATLEARN}/examples/megatron/step3_rlhf - -if [ -z "${exp_name}" ]; then - export exp_name=$(date +%F)_bloom-rlhf-${model_size}-${model_size} -fi - -[ -z "$OUTPUT_DIR" ] && OUTPUT_DIR=${CHATLEARN}/output/step3_rlhf/ -[ -z "$LOG_DIR" ] && LOG_DIR=${OUTPUT_DIR}/logs/${exp_name} -[ -z "$TENSORBOARD_DIR" ] && TENSORBOARD_DIR=${OUTPUT_DIR}/tensorboard/${exp_name} -[ -z "$SAVE_DIR" ] && SAVE_DIR=${OUTPUT_DIR}/save_model/${exp_name} - -mkdir -p ${LOG_DIR} - -policy_inference_load=${POLICY_LOAD} \ -reward_load_iteration=${REWARD_LOAD_ITERATION} \ -reward_load=${REWARD_LOAD} \ -vocab_file=${VOCAB_FILE} \ -num_device=${num_device} \ -log_dir=${LOG_DIR} \ -tensorboard_dir=${TENSORBOARD_DIR} \ -save_dir=${SAVE_DIR} \ -data_path=${DATASET_PATH} \ -sample_per_episode=1024 \ -train_global_batch_size=128 \ -generation_batch_size=64 \ -ref_generation_batch_size=16 \ -python train_rlhf.py -c configs/bloom/rlhf.yaml 2>&1 | tee -a ${LOG_DIR}/log_${RANK}.txt ; exit ${PIPESTATUS[0]} - diff --git a/examples/megatron/step3_rlhf/run_scripts/bloom/run_7b1_7b1.sh b/examples/megatron/step3_rlhf/run_scripts/bloom/run_7b1_7b1.sh deleted file mode 100644 index 1d0aa9ea..00000000 --- a/examples/megatron/step3_rlhf/run_scripts/bloom/run_7b1_7b1.sh +++ /dev/null @@ -1,40 +0,0 @@ -#!/bin/bash - -set -x - -export model_size=7B1 -export policy_tp=8 -export ppo_policy_pp=1 -export reward_tp=8 -export ppo_value_pp=1 - -source run_scripts/bloom/base_env.sh - -cd ${CHATLEARN}/examples/megatron/step3_rlhf - -if [ -z "${exp_name}" ]; then - export exp_name=$(date +%F)_bloom-rlhf-${model_size}-${model_size} -fi - -[ -z "$OUTPUT_DIR" ] && OUTPUT_DIR=${CHATLEARN}/output/step3_rlhf/ -[ -z "$LOG_DIR" ] && LOG_DIR=${OUTPUT_DIR}/logs/${exp_name} -[ -z "$TENSORBOARD_DIR" ] && TENSORBOARD_DIR=${OUTPUT_DIR}/tensorboard/${exp_name} -[ -z "$SAVE_DIR" ] && SAVE_DIR=${OUTPUT_DIR}/save_model/${exp_name} - -mkdir -p ${LOG_DIR} - -policy_inference_load=${POLICY_LOAD} \ -reward_load_iteration=${REWARD_LOAD_ITERATION} \ -reward_load=${REWARD_LOAD} \ -vocab_file=${VOCAB_FILE} \ -num_device=${num_device} \ -log_dir=${LOG_DIR} \ -tensorboard_dir=${TENSORBOARD_DIR} \ -save_dir=${SAVE_DIR} \ -data_path=${DATASET_PATH} \ -sample_per_episode=1024 \ -train_global_batch_size=128 \ -generation_batch_size=64 \ -ref_generation_batch_size=16 \ -python train_rlhf.py -c configs/bloom/rlhf.yaml 2>&1 | tee -a ${LOG_DIR}/log_${RANK}.txt ; exit ${PIPESTATUS[0]} - diff --git a/examples/megatron/step3_rlhf/run_scripts/gpt/base_env.sh b/examples/megatron/step3_rlhf/run_scripts/gpt/base_env.sh deleted file mode 100644 index 18f51028..00000000 --- a/examples/megatron/step3_rlhf/run_scripts/gpt/base_env.sh +++ /dev/null @@ -1,56 +0,0 @@ -source run_scripts/base_env.sh - -[ -z "$num_device_policy" ] && export num_device_policy=$num_device -[ -z "$num_device_ref" ] && export num_device_ref=$num_device -[ -z "$num_device_reward" ] && export num_device_reward=$num_device -[ -z "$num_device_value" ] && export num_device_value=$num_device -[ -z "$num_device_ppo_policy" ] && export num_device_ppo_policy=$num_device -[ -z "$num_device_ppo_value" ] && export num_device_ppo_value=$num_device - -[ -z "$sample_per_episode" ] && export sample_per_episode=2048 -[ -z "$use_attn_acc" ] && export use_attn_acc=True -[ -z "$batch_generation_ranking" ] && export batch_generation_ranking=True - -if [[ "$model_size" == "small" ]]; then - export policy_num_layers=8 - export policy_hidden_size=1024 - export policy_num_attention_heads=16 - export reward_num_layers=8 - export reward_hidden_size=1024 - export reward_num_attention_heads=16 -elif [[ "$model_size" == "30B" ]]; then - export policy_num_layers=48 - export policy_hidden_size=7168 - export policy_num_attention_heads=56 - export reward_num_layers=48 - export reward_hidden_size=7168 - export reward_num_attention_heads=56 -elif [[ "$model_size" == "13B" ]]; then - export policy_num_layers=40 - export policy_hidden_size=5120 - export policy_num_attention_heads=40 - export reward_num_layers=40 - export reward_hidden_size=5120 - export reward_num_attention_heads=40 -elif [[ "$model_size" == "7B" ]]; then - export policy_num_layers=32 - export policy_hidden_size=4096 - export policy_num_attention_heads=32 - export reward_num_layers=32 - export reward_hidden_size=4096 - export reward_num_attention_heads=32 -elif [[ "$model_size" == "66B" ]]; then - export policy_num_layers=64 - export policy_hidden_size=9216 - export policy_num_attention_heads=72 - export reward_num_layers=64 - export reward_hidden_size=9216 - export reward_num_attention_heads=72 -elif [[ "$model_size" == "175B" ]]; then - export policy_num_layers=96 - export policy_hidden_size=12288 - export policy_num_attention_heads=96 - export reward_num_layers=96 - export reward_hidden_size=12288 - export reward_num_attention_heads=96 -fi diff --git a/examples/megatron/step3_rlhf/run_scripts/gpt/benchmark.sh b/examples/megatron/step3_rlhf/run_scripts/gpt/benchmark.sh deleted file mode 100644 index 80688d2f..00000000 --- a/examples/megatron/step3_rlhf/run_scripts/gpt/benchmark.sh +++ /dev/null @@ -1,20 +0,0 @@ -[ -z "$DATA_DIR" ] && DATA_DIR=${CHATLEARN}/output/gpt/ -[ -z "$LOG_DIR" ] && LOG_DIR=${DATA_DIR}/logs -mkdir -p $LOG_DIR - -label=$(date +%F)_rlhf_${model_size}_${max_new_tokens}_${num_device}g_lora-${lora}-policy-tp${policy_tp}_pp${ppo_policy_pp}_reward-tp${reward_tp}-pp${ppo_value_pp}_bs${policy_generation_batch_size}_${ref_generation_bs}_${reward_generatiob_bs}_${value_generation_bs}_${train_micro_batch_size}_${train_global_batch_size}_device_${num_device_ref}_${num_device_reward}_${num_device_value}_ranking_${batch_generation_ranking}_min-${min_prompt_length}_${sample_per_episode}_thred${inference_batch_times_seqlen_threshold}_refpp${ref_pp}_rewardpp${reward_pp}_gc_${policy_recompute_granularity} -if [[ ! -f "${DATA_DIR}/rm_static_train.jsonl" ]]; then - wget -P $DATA_DIR http://odps-release.cn-hangzhou.oss.aliyun-inc.com/torchacc/accbench/datasets/opensource/rlhf/rm_static_train.jsonl -fi -if [[ ! -f "${DATA_DIR}/gpt2-vocab.json" ]]; then - wget -P $DATA_DIR https://s3.amazonaws.com/models.huggingface.co/bert/gpt2-merges.txt - wget -P $DATA_DIR https://s3.amazonaws.com/models.huggingface.co/bert/gpt2-vocab.json -fi -# use_eod_token_for_early_termination=False is for benchmark only, should set to True in real run -use_eod_token_for_early_termination=False \ -enable_lora_value=${lora} \ -enable_lora_policy=${lora} \ -data_path=${DATA_DIR}/rm_static_train.jsonl \ -vocab_file=${DATA_DIR}/gpt2-vocab.json \ -merge_file=${DATA_DIR}/gpt2-merges.txt \ -python train_rlhf.py -c configs/gpt/rlhf.yaml 2>&1 | tee ${LOG_DIR}/${label}_${RANK}.log ; exit ${PIPESTATUS[0]} diff --git a/examples/megatron/step3_rlhf/run_scripts/gpt/run_13b_13b_16g.sh b/examples/megatron/step3_rlhf/run_scripts/gpt/run_13b_13b_16g.sh deleted file mode 100644 index e909c274..00000000 --- a/examples/megatron/step3_rlhf/run_scripts/gpt/run_13b_13b_16g.sh +++ /dev/null @@ -1,52 +0,0 @@ -#!/bin/bash - -set -x - - -export model_size=13B -source run_scripts/gpt/base_env.sh - -export max_new_tokens=${1} -export lora=${2} -export max_seq_len=$(( max_new_tokens*2 )) - - -if [[ "$lora" == "True" ]]; then - [ -z "$policy_tp" ] && export policy_tp=4 - [ -z "$ppo_policy_pp" ] && export ppo_policy_pp=4 - [ -z "$reward_tp" ] && export reward_tp=4 - [ -z "$ppo_value_pp" ] && export ppo_value_pp=4 - if [[ "$max_new_tokens" == "512" ]]; then - export batch_generation_min_prompt_length=64 - export num_device_ref=8 - export num_device_value=8 - export ref_pp=2 - export reward_pp=2 - export inference_batch_times_seqlen_threshold=4096 - [ -z "$policy_generation_batch_size" ] && export policy_generation_batch_size=128 - [ -z "$ref_generation_bs" ] && export ref_generation_bs=128 - [ -z "$value_generation_bs" ] && export value_generation_bs=64 - [ -z "$reward_generation_bs" ] && export reward_generation_bs=128 - [ -z "$train_micro_batch_size" ] && export train_micro_batch_size=8 - [ -z "$train_global_batch_size" ] && export train_global_batch_size=512 - fi -else - [ -z "$policy_tp" ] && export policy_tp=8 - [ -z "$ppo_policy_pp" ] && export ppo_policy_pp=2 - [ -z "$reward_tp" ] && export reward_tp=8 - [ -z "$ppo_value_pp" ] && export ppo_value_pp=2 - if [[ "$max_new_tokens" == "512" ]]; then - export batch_generation_min_prompt_length=32 - export num_device_ref=8 - export num_device_value=8 - [ -z "$policy_generation_batch_size" ] && export policy_generation_batch_size=206 - [ -z "$ref_generation_bs" ] && export ref_generation_bs=64 - [ -z "$value_generation_bs" ] && export value_generation_bs=64 - [ -z "$reward_generation_bs" ] && export reward_generation_bs=64 - [ -z "$train_micro_batch_size" ] && export train_micro_batch_size=8 - [ -z "$train_global_batch_size" ] && export train_global_batch_size=512 - fi -fi - -bash run_scripts/gpt/benchmark.sh - diff --git a/examples/megatron/step3_rlhf/run_scripts/gpt/run_175b_175b_128g.sh b/examples/megatron/step3_rlhf/run_scripts/gpt/run_175b_175b_128g.sh deleted file mode 100644 index 8e74cfa1..00000000 --- a/examples/megatron/step3_rlhf/run_scripts/gpt/run_175b_175b_128g.sh +++ /dev/null @@ -1,61 +0,0 @@ -#!/bin/bash - -set -x - - -export model_size=175B -source run_scripts/gpt/base_env.sh - -export max_new_tokens=${1} -export lora=${2} -export max_seq_len=$(( max_new_tokens*2 )) - - -[ -z "$policy_tp" ] && export policy_tp=16 -[ -z "$ppo_policy_pp" ] && export ppo_policy_pp=8 -[ -z "$reward_tp" ] && export reward_tp=8 -[ -z "$ppo_value_pp" ] && export ppo_value_pp=8 -if [[ "$lora" == "True" ]]; then - if [[ "$max_new_tokens" == "512" ]]; then - export policy_recompute_activations=True - export policy_recompute_granularity=full - export value_recompute_activations=True - export value_recompute_granularity=full - export batch_generation_min_prompt_length=32 - export num_device_ref=64 - export num_device_reward=128 - export num_device_value=64 - export ref_pp=4 - export reward_pp=4 - export value_pp=4 - export inference_batch_times_seqlen_threshold=4096 - [ -z "$policy_generation_batch_size" ] && export policy_generation_batch_size=64 - [ -z "$ref_generation_bs" ] && export ref_generation_bs=64 - [ -z "$value_generation_bs" ] && export value_generation_bs=64 - [ -z "$reward_generation_bs" ] && export reward_generation_bs=64 - [ -z "$train_micro_batch_size" ] && export train_micro_batch_size=2 - [ -z "$train_global_batch_size" ] && export train_global_batch_size=512 - elif [[ "$max_new_tokens" == "1024" ]]; then - export policy_recompute_activations=True - export policy_recompute_granularity=full - export value_recompute_activations=True - export value_recompute_granularity=full - export batch_generation_min_prompt_length=32 - export num_device_ref=64 - export num_device_reward=128 - export num_device_value=64 - export ref_pp=4 - export reward_pp=4 - export value_pp=4 - export inference_batch_times_seqlen_threshold=4096 - [ -z "$policy_generation_batch_size" ] && export policy_generation_batch_size=32 - [ -z "$ref_generation_bs" ] && export ref_generation_bs=32 - [ -z "$value_generation_bs" ] && export value_generation_bs=32 - [ -z "$reward_generation_bs" ] && export reward_generation_bs=32 - [ -z "$train_micro_batch_size" ] && export train_micro_batch_size=2 - [ -z "$train_global_batch_size" ] && export train_global_batch_size=512 - fi -fi - -bash run_scripts/gpt/benchmark.sh - diff --git a/examples/megatron/step3_rlhf/run_scripts/gpt/run_30b_30b_32g.sh b/examples/megatron/step3_rlhf/run_scripts/gpt/run_30b_30b_32g.sh deleted file mode 100644 index bac779c3..00000000 --- a/examples/megatron/step3_rlhf/run_scripts/gpt/run_30b_30b_32g.sh +++ /dev/null @@ -1,55 +0,0 @@ -#!/bin/bash - -set -x - -export model_size=30B -source run_scripts/gpt/base_env.sh -export max_new_tokens=${1} -export lora=${2} -export max_seq_len=$(( max_new_tokens*2 )) - - -if [[ "$lora" == "True" ]]; then - [ -z "$policy_tp" ] && export policy_tp=8 - [ -z "$ppo_policy_pp" ] && export ppo_policy_pp=2 - [ -z "$reward_tp" ] && export reward_tp=8 - [ -z "$ppo_value_pp" ] && export ppo_value_pp=2 - if [[ "$max_new_tokens" == "512" ]]; then - export num_device_ref=16 - export num_device_value=16 - export ref_pp=2 - export reward_pp=2 - export num_device_ppo_policy=16 - export num_device_ppo_value=16 - export batch_generation_min_prompt_length=32 - [ -z "$inference_batch_times_seqlen_threshold" ] && export inference_batch_times_seqlen_threshold=4096 - [ -z "$policy_generation_batch_size" ] && export policy_generation_batch_size=256 - [ -z "$ref_generation_bs" ] && export ref_generation_bs=256 - [ -z "$value_generation_bs" ] && export value_generation_bs=64 - [ -z "$reward_generation_bs" ] && export reward_generation_bs=256 - [ -z "$train_micro_batch_size" ] && export train_micro_batch_size=8 - [ -z "$train_global_batch_size" ] && export train_global_batch_size=512 - fi -else - [ -z "$policy_tp" ] && export policy_tp=8 - [ -z "$ppo_policy_pp" ] && export ppo_policy_pp=4 - [ -z "$reward_tp" ] && export reward_tp=8 - [ -z "$ppo_value_pp" ] && export ppo_value_pp=4 - if [[ "$max_new_tokens" == "512" ]]; then - export num_device_ref=16 - export num_device_value=16 - export ref_pp=2 - export reward_pp=2 - export batch_generation_min_prompt_length=32 - [ -z "$inference_batch_times_seqlen_threshold" ] && export inference_batch_times_seqlen_threshold=4096 - [ -z "$policy_generation_batch_size" ] && export policy_generation_batch_size=86 - [ -z "$ref_generation_bs" ] && export ref_generation_bs=86 - [ -z "$value_generation_bs" ] && export value_generation_bs=32 - [ -z "$reward_generation_bs" ] && export reward_generation_bs=86 - [ -z "$train_micro_batch_size" ] && export train_micro_batch_size=4 - [ -z "$train_global_batch_size" ] && export train_global_batch_size=512 - fi -fi - -bash run_scripts/gpt/benchmark.sh - diff --git a/examples/megatron/step3_rlhf/run_scripts/gpt/run_66b_66b_128g.sh b/examples/megatron/step3_rlhf/run_scripts/gpt/run_66b_66b_128g.sh deleted file mode 100644 index 8b379212..00000000 --- a/examples/megatron/step3_rlhf/run_scripts/gpt/run_66b_66b_128g.sh +++ /dev/null @@ -1,53 +0,0 @@ -#!/bin/bash - -set -x - - -export model_size=66B -source run_scripts/gpt/base_env.sh - -export max_new_tokens=${1} -export lora=${2} -export max_seq_len=$(( max_new_tokens*2 )) - - -[ -z "$policy_tp" ] && export policy_tp=8 -[ -z "$ppo_policy_pp" ] && export ppo_policy_pp=16 -[ -z "$reward_tp" ] && export reward_tp=8 -[ -z "$ppo_value_pp" ] && export ppo_value_pp=16 -if [[ "$lora" == "True" ]]; then - if [[ "$max_new_tokens" == "512" ]]; then - export batch_generation_min_prompt_length=32 - export num_device_ref=64 - export num_device_reward=128 - export num_device_value=64 - export ref_pp=4 - export reward_pp=8 - export value_pp=4 - export inference_batch_times_seqlen_threshold=4096 - [ -z "$policy_generation_batch_size" ] && export policy_generation_batch_size=128 - [ -z "$ref_generation_bs" ] && export ref_generation_bs=128 - [ -z "$value_generation_bs" ] && export value_generation_bs=128 - [ -z "$reward_generation_bs" ] && export reward_generation_bs=128 - [ -z "$train_micro_batch_size" ] && export train_micro_batch_size=4 - [ -z "$train_global_batch_size" ] && export train_global_batch_size=512 - elif [[ "$max_new_tokens" == "1024" ]]; then - export batch_generation_min_prompt_length=32 - export num_device_ref=64 - export num_device_reward=128 - export num_device_value=64 - export ref_pp=4 - export reward_pp=8 - export value_pp=4 - export inference_batch_times_seqlen_threshold=4096 - [ -z "$policy_generation_batch_size" ] && export policy_generation_batch_size=64 - [ -z "$ref_generation_bs" ] && export ref_generation_bs=64 - [ -z "$value_generation_bs" ] && export value_generation_bs=64 - [ -z "$reward_generation_bs" ] && export reward_generation_bs=64 - [ -z "$train_micro_batch_size" ] && export train_micro_batch_size=4 - [ -z "$train_global_batch_size" ] && export train_global_batch_size=512 - fi -fi - -bash run_scripts/gpt/benchmark.sh - diff --git a/examples/megatron/step3_rlhf/run_scripts/gpt/run_66b_66b_32g.sh b/examples/megatron/step3_rlhf/run_scripts/gpt/run_66b_66b_32g.sh deleted file mode 100644 index 1c1d32c8..00000000 --- a/examples/megatron/step3_rlhf/run_scripts/gpt/run_66b_66b_32g.sh +++ /dev/null @@ -1,41 +0,0 @@ -#!/bin/bash - -set -x - - -export model_size=66B -source run_scripts/gpt/base_env.sh - -export max_new_tokens=${1} -export lora=${2} -export max_seq_len=$(( max_new_tokens*2 )) - - -[ -z "$policy_tp" ] && export policy_tp=8 -[ -z "$ppo_policy_pp" ] && export ppo_policy_pp=4 -[ -z "$reward_tp" ] && export reward_tp=8 -[ -z "$ppo_value_pp" ] && export ppo_value_pp=4 -if [[ "$lora" == "True" ]]; then - if [[ "$max_new_tokens" == "512" ]]; then - export policy_recompute_activations=True - export policy_recompute_granularity=full - export value_recompute_activations=True - export value_recompute_granularity=full - export batch_generation_min_prompt_length=32 - export num_device_ref=16 - export num_device_reward=32 - export num_device_value=16 - export ref_pp=2 - export reward_pp=2 - export inference_batch_times_seqlen_threshold=16384 - [ -z "$policy_generation_batch_size" ] && export policy_generation_batch_size=64 - [ -z "$ref_generation_bs" ] && export ref_generation_bs=64 - [ -z "$value_generation_bs" ] && export value_generation_bs=32 - [ -z "$reward_generation_bs" ] && export reward_generation_bs=64 - [ -z "$train_micro_batch_size" ] && export train_micro_batch_size=4 - [ -z "$train_global_batch_size" ] && export train_global_batch_size=512 - fi -fi - -bash run_scripts/gpt/benchmark.sh - diff --git a/examples/megatron/step3_rlhf/run_scripts/gpt/run_66b_66b_64g.sh b/examples/megatron/step3_rlhf/run_scripts/gpt/run_66b_66b_64g.sh deleted file mode 100644 index d32aed82..00000000 --- a/examples/megatron/step3_rlhf/run_scripts/gpt/run_66b_66b_64g.sh +++ /dev/null @@ -1,61 +0,0 @@ -#!/bin/bash - -set -x - - -export model_size=66B -source run_scripts/gpt/base_env.sh - -export max_new_tokens=${1} -export lora=${2} -export max_seq_len=$(( max_new_tokens*2 )) - - -[ -z "$policy_tp" ] && export policy_tp=8 -[ -z "$ppo_policy_pp" ] && export ppo_policy_pp=8 -[ -z "$reward_tp" ] && export reward_tp=8 -[ -z "$ppo_value_pp" ] && export ppo_value_pp=8 -if [[ "$lora" == "True" ]]; then - if [[ "$max_new_tokens" == "512" ]]; then - export policy_recompute_activations=True - export policy_recompute_granularity=full - export value_recompute_activations=True - export value_recompute_granularity=full - export batch_generation_min_prompt_length=32 - export num_device_ref=32 - export num_device_reward=64 - export num_device_value=32 - export ref_pp=4 - export reward_pp=8 - export value_pp=4 - export inference_batch_times_seqlen_threshold=4096 - [ -z "$policy_generation_batch_size" ] && export policy_generation_batch_size=128 - [ -z "$ref_generation_bs" ] && export ref_generation_bs=128 - [ -z "$value_generation_bs" ] && export value_generation_bs=128 - [ -z "$reward_generation_bs" ] && export reward_generation_bs=128 - [ -z "$train_micro_batch_size" ] && export train_micro_batch_size=4 - [ -z "$train_global_batch_size" ] && export train_global_batch_size=512 - elif [[ "$max_new_tokens" == "1024" ]]; then - export policy_recompute_activations=True - export policy_recompute_granularity=full - export value_recompute_activations=True - export value_recompute_granularity=full - export batch_generation_min_prompt_length=32 - export num_device_ref=32 - export num_device_reward=64 - export num_device_value=32 - export ref_pp=4 - export reward_pp=8 - export value_pp=4 - export inference_batch_times_seqlen_threshold=4096 - [ -z "$policy_generation_batch_size" ] && export policy_generation_batch_size=64 - [ -z "$ref_generation_bs" ] && export ref_generation_bs=64 - [ -z "$value_generation_bs" ] && export value_generation_bs=64 - [ -z "$reward_generation_bs" ] && export reward_generation_bs=64 - [ -z "$train_micro_batch_size" ] && export train_micro_batch_size=2 - [ -z "$train_global_batch_size" ] && export train_global_batch_size=512 - fi -fi - -bash run_scripts/gpt/benchmark.sh - diff --git a/examples/megatron/step3_rlhf/run_scripts/gpt/run_7b_7b_16g.sh b/examples/megatron/step3_rlhf/run_scripts/gpt/run_7b_7b_16g.sh deleted file mode 100644 index 3194b451..00000000 --- a/examples/megatron/step3_rlhf/run_scripts/gpt/run_7b_7b_16g.sh +++ /dev/null @@ -1,53 +0,0 @@ -#!/bin/bash - -set -x - -export model_size=7B -export sample_per_episode=4096 -source run_scripts/gpt/base_env.sh - -export max_new_tokens=${1} -export lora=${2} -export max_seq_len=$(( max_new_tokens*2 )) - -if [[ "$lora" == "True" ]]; then - [ -z "$policy_tp" ] && export policy_tp=4 - [ -z "$ppo_policy_pp" ] && export ppo_policy_pp=1 - [ -z "$reward_tp" ] && export reward_tp=4 - [ -z "$ppo_value_pp" ] && export ppo_value_pp=1 - if [[ "$max_new_tokens" == "512" ]]; then - export batch_generation_min_prompt_length=64 - export num_device_ref=8 - export num_device_value=8 - export num_device_ppo_policy=8 - export num_device_ppo_value=8 - export reward_use_distributed_optimizer=False - [ -z "$policy_generation_batch_size" ] && export policy_generation_batch_size=256 - [ -z "$ref_generation_bs" ] && export ref_generation_bs=64 - [ -z "$value_generation_bs" ] && export value_generation_bs=64 - [ -z "$reward_generation_bs" ] && export reward_generation_bs=64 - [ -z "$train_micro_batch_size" ] && export train_micro_batch_size=16 - [ -z "$train_global_batch_size" ] && export train_global_batch_size=512 - fi -else - [ -z "$policy_tp" ] && export policy_tp=4 - [ -z "$ppo_policy_pp" ] && export ppo_policy_pp=1 - [ -z "$reward_tp" ] && export reward_tp=4 - [ -z "$ppo_value_pp" ] && export ppo_value_pp=1 - if [[ "$max_new_tokens" == "512" ]]; then - export batch_generation_min_prompt_length=32 - export num_device_ref=8 - export num_device_value=8 - export num_device_ppo_policy=8 - export num_device_ppo_value=8 - [ -z "$policy_generation_batch_size" ] && export policy_generation_batch_size=128 - [ -z "$ref_generation_bs" ] && export ref_generation_bs=32 - [ -z "$value_generation_bs" ] && export value_generation_bs=32 - [ -z "$reward_generation_bs" ] && export reward_generation_bs=32 - [ -z "$train_micro_batch_size" ] && export train_micro_batch_size=8 - [ -z "$train_global_batch_size" ] && export train_global_batch_size=512 - fi -fi - -bash run_scripts/gpt/benchmark.sh - diff --git a/examples/megatron/step3_rlhf/run_scripts/gpt/run_7b_7b_32g.sh b/examples/megatron/step3_rlhf/run_scripts/gpt/run_7b_7b_32g.sh deleted file mode 100644 index 8d85fd53..00000000 --- a/examples/megatron/step3_rlhf/run_scripts/gpt/run_7b_7b_32g.sh +++ /dev/null @@ -1,53 +0,0 @@ -#!/bin/bash - -set -x - -export model_size=7B -export sample_per_episode=8192 -source run_scripts/gpt/base_env.sh - -export max_new_tokens=${1} -export lora=${2} -export max_seq_len=$(( max_new_tokens*2 )) - -if [[ "$lora" == "True" ]]; then - [ -z "$policy_tp" ] && export policy_tp=4 - [ -z "$ppo_policy_pp" ] && export ppo_policy_pp=1 - [ -z "$reward_tp" ] && export reward_tp=4 - [ -z "$ppo_value_pp" ] && export ppo_value_pp=1 - if [[ "$max_new_tokens" == "512" ]]; then - export batch_generation_min_prompt_length=64 - export num_device_ref=16 - export num_device_value=16 - export num_device_ppo_policy=16 - export num_device_ppo_value=16 - export reward_use_distributed_optimizer=False - [ -z "$policy_generation_batch_size" ] && export policy_generation_batch_size=256 - [ -z "$ref_generation_bs" ] && export ref_generation_bs=64 - [ -z "$value_generation_bs" ] && export value_generation_bs=64 - [ -z "$reward_generation_bs" ] && export reward_generation_bs=64 - [ -z "$train_micro_batch_size" ] && export train_micro_batch_size=16 - [ -z "$train_global_batch_size" ] && export train_global_batch_size=512 - fi -else - [ -z "$policy_tp" ] && export policy_tp=4 - [ -z "$ppo_policy_pp" ] && export ppo_policy_pp=1 - [ -z "$reward_tp" ] && export reward_tp=4 - [ -z "$ppo_value_pp" ] && export ppo_value_pp=1 - if [[ "$max_new_tokens" == "512" ]]; then - export batch_generation_min_prompt_length=32 - export num_device_ref=16 - export num_device_value=16 - export num_device_ppo_policy=16 - export num_device_ppo_value=16 - [ -z "$policy_generation_batch_size" ] && export policy_generation_batch_size=128 - [ -z "$ref_generation_bs" ] && export ref_generation_bs=32 - [ -z "$value_generation_bs" ] && export value_generation_bs=32 - [ -z "$reward_generation_bs" ] && export reward_generation_bs=32 - [ -z "$train_micro_batch_size" ] && export train_micro_batch_size=8 - [ -z "$train_global_batch_size" ] && export train_global_batch_size=512 - fi -fi - -bash run_scripts/gpt/benchmark.sh - diff --git a/examples/megatron/step3_rlhf/run_scripts/gpt/run_7b_7b_8g.sh b/examples/megatron/step3_rlhf/run_scripts/gpt/run_7b_7b_8g.sh deleted file mode 100644 index d3743f6d..00000000 --- a/examples/megatron/step3_rlhf/run_scripts/gpt/run_7b_7b_8g.sh +++ /dev/null @@ -1,48 +0,0 @@ -#!/bin/bash - -set -x - -export model_size=7B -source run_scripts/gpt/base_env.sh - -export max_new_tokens=${1} -export lora=${2} -export max_seq_len=$(( max_new_tokens*2 )) - -if [[ "$lora" == "True" ]]; then - [ -z "$policy_tp" ] && export policy_tp=4 - [ -z "$ppo_policy_pp" ] && export ppo_policy_pp=1 - [ -z "$reward_tp" ] && export reward_tp=4 - [ -z "$ppo_value_pp" ] && export ppo_value_pp=1 - if [[ "$max_new_tokens" == "512" ]]; then - export batch_generation_min_prompt_length=64 - export num_device_ref=4 - export num_device_value=4 - export reward_use_distributed_optimizer=False - [ -z "$policy_generation_batch_size" ] && export policy_generation_batch_size=256 - [ -z "$ref_generation_bs" ] && export ref_generation_bs=64 - [ -z "$value_generation_bs" ] && export value_generation_bs=64 - [ -z "$reward_generation_bs" ] && export reward_generation_bs=64 - [ -z "$train_micro_batch_size" ] && export train_micro_batch_size=16 - [ -z "$train_global_batch_size" ] && export train_global_batch_size=512 - fi -else - [ -z "$policy_tp" ] && export policy_tp=4 - [ -z "$ppo_policy_pp" ] && export ppo_policy_pp=2 - [ -z "$reward_tp" ] && export reward_tp=4 - [ -z "$ppo_value_pp" ] && export ppo_value_pp=2 - if [[ "$max_new_tokens" == "512" ]]; then - export batch_generation_min_prompt_length=32 - export num_device_ref=4 - export num_device_value=4 - [ -z "$policy_generation_batch_size" ] && export policy_generation_batch_size=128 - [ -z "$ref_generation_bs" ] && export ref_generation_bs=32 - [ -z "$value_generation_bs" ] && export value_generation_bs=32 - [ -z "$reward_generation_bs" ] && export reward_generation_bs=32 - [ -z "$train_micro_batch_size" ] && export train_micro_batch_size=8 - [ -z "$train_global_batch_size" ] && export train_global_batch_size=512 - fi -fi - -bash run_scripts/gpt/benchmark.sh - diff --git a/examples/megatron/step3_rlhf/run_scripts/llama2/base_env.sh b/examples/megatron/step3_rlhf/run_scripts/llama2/base_env.sh deleted file mode 100644 index 1b3dcde0..00000000 --- a/examples/megatron/step3_rlhf/run_scripts/llama2/base_env.sh +++ /dev/null @@ -1,46 +0,0 @@ -source run_scripts/base_env.sh -[ -z "$num_device_policy" ] && export num_device_policy=$num_device -[ -z "$num_device_ref" ] && export num_device_ref=$num_device -[ -z "$num_device_reward" ] && export num_device_reward=$num_device -[ -z "$num_device_value" ] && export num_device_value=$num_device -[ -z "$num_device_ppo_policy" ] && export num_device_ppo_policy=$num_device -[ -z "$num_device_ppo_value" ] && export num_device_ppo_value=$num_device - -export PYTHONPATH=${PYTHONPATH}:${CHATLEARN}/examples/megatron/step2_reward - -if [[ "$model_size" == "7B" ]]; then - export policy_num_layers=32 - export policy_hidden_size=4096 - export policy_num_attention_heads=32 - export policy_num_query_groups=32 - export policy_ffn_hidden_size=11008 - export reward_num_layers=32 - export reward_hidden_size=4096 - export reward_num_query_groups=32 - export reward_num_attention_heads=32 - export reward_ffn_hidden_size=11008 - export max_position_embedding=2048 -elif [[ "$model_size" == "13B" ]]; then - export policy_num_layers=40 - export policy_hidden_size=5120 - export policy_num_attention_heads=40 - export policy_ffn_hidden_size=13824 - export policy_num_query_groups=40 - export reward_num_layers=40 - export reward_hidden_size=5120 - export reward_num_attention_heads=40 - export reward_ffn_hidden_size=13824 - export reward_num_query_groups=40 -elif [[ "$model_size" == "70B" ]]; then - export policy_num_layers=80 - export policy_hidden_size=8192 - export policy_num_attention_heads=64 - export policy_ffn_hidden_size=28672 - export policy_num_query_groups=8 - export reward_num_layers=80 - export reward_hidden_size=8192 - export reward_num_attention_heads=64 - export reward_ffn_hidden_size=28672 - export reward_num_query_groups=8 - export group_query_attention=True -fi diff --git a/examples/megatron/step3_rlhf/run_scripts/llama2/run_13b_13b.sh b/examples/megatron/step3_rlhf/run_scripts/llama2/run_13b_13b.sh deleted file mode 100644 index 5a1b6568..00000000 --- a/examples/megatron/step3_rlhf/run_scripts/llama2/run_13b_13b.sh +++ /dev/null @@ -1,39 +0,0 @@ -#!/bin/bash - -set -x - -export model_size=13B -export policy_tp=8 -export ppo_policy_pp=2 -export reward_tp=8 -export ppo_value_pp=2 -source run_scripts/llama2/base_env.sh - - -cd ${CHATLEARN}/examples/megatron/step3_rlhf -if [ -z "${exp_name}" ]; then - export exp_name=$(date +%F)_llama-rlhf-${model_size}-${model_size} -fi - -[ -z "$OUTPUT_DIR" ] && OUTPUT_DIR=${CHATLEARN}/output/step3_rlhf/ -[ -z "$LOG_DIR" ] && LOG_DIR=${OUTPUT_DIR}/logs/${exp_name} -[ -z "$TENSORBOARD_DIR" ] && TENSORBOARD_DIR=${OUTPUT_DIR}/tensorboard/${exp_name} -[ -z "$SAVE_DIR" ] && SAVE_DIR=${OUTPUT_DIR}/save_model/${exp_name} - -mkdir -p ${LOG_DIR} - -policy_inference_load=${POLICY_LOAD} \ -reward_load_iteration=${REWARD_LOAD_ITERATION} \ -reward_load=${REWARD_LOAD} \ -tokenizer_model=${TOKENIZER_MODEL} \ -num_device=${num_device} \ -log_dir=${LOG_DIR} \ -tensorboard_dir=${TENSORBOARD_DIR} \ -save_dir=${SAVE_DIR} \ -data_path=${DATASET_PATH} \ -sample_per_episode=1024 \ -train_global_batch_size=128 \ -generation_batch_size=64 \ -ref_generation_batch_size=16 \ -python train_rlhf.py -c configs/llama2/rlhf.yaml 2>&1 | tee -a ${LOG_DIR}/log_${RANK}.txt ; exit ${PIPESTATUS[0]} - diff --git a/examples/megatron/step3_rlhf/run_scripts/llama2/run_70b_70b.sh b/examples/megatron/step3_rlhf/run_scripts/llama2/run_70b_70b.sh deleted file mode 100644 index 60a590be..00000000 --- a/examples/megatron/step3_rlhf/run_scripts/llama2/run_70b_70b.sh +++ /dev/null @@ -1,56 +0,0 @@ -#!/bin/bash - -set -x - -export model_size=70B -export policy_tp=8 -export ppo_policy_pp=4 -export reward_tp=8 -export ppo_value_pp=4 - - -source run_scripts/llama2/base_env.sh - - -cd ${CHATLEARN}/examples/megatron/step3_rlhf -if [ -z "${exp_name}" ]; then - export exp_name=$(date +%F)_llama-rlhf-${model_size}-${model_size} -fi - -[ -z "$OUTPUT_DIR" ] && OUTPUT_DIR=${CHATLEARN}/output/step3_rlhf/ -[ -z "$LOG_DIR" ] && LOG_DIR=${OUTPUT_DIR}/logs/${exp_name} -[ -z "$TENSORBOARD_DIR" ] && TENSORBOARD_DIR=${OUTPUT_DIR}/tensorboard/${exp_name} -[ -z "$SAVE_DIR" ] && SAVE_DIR=${OUTPUT_DIR}/save_model/${exp_name} -[ -z "$sample_per_episode" ] && sample_per_episode=1024 - -mkdir -p ${LOG_DIR} - -export policy_recompute_activations=True -export policy_recompute_granularity=full -export value_recompute_activations=True -export value_recompute_granularity=full -export batch_generation_min_prompt_length=32 -export num_device_ref=16 -export num_device_reward=32 -export num_device_value=16 -export ref_pp=2 -export reward_pp=2 -export inference_batch_times_seqlen_threshold=16384 - -enable_lora_policy=True \ -enable_lora_value=True \ -policy_inference_load=${POLICY_LOAD} \ -reward_load_iteration=${REWARD_LOAD_ITERATION} \ -reward_load=${REWARD_LOAD} \ -tokenizer_model=${TOKENIZER_MODEL} \ -num_device=${num_device} \ -log_dir=${LOG_DIR} \ -tensorboard_dir=${TENSORBOARD_DIR} \ -save_dir=${SAVE_DIR} \ -data_path=${DATASET_PATH} \ -sample_per_episode=${sample_per_episode} \ -train_global_batch_size=128 \ -generation_batch_size=64 \ -ref_generation_batch_size=16 \ -python train_rlhf.py -c configs/llama2/rlhf.yaml 2>&1 | tee -a ${LOG_DIR}/log_${RANK}.txt ; exit ${PIPESTATUS[0]} - diff --git a/examples/megatron/step3_rlhf/run_scripts/llama2/run_7b_7b.sh b/examples/megatron/step3_rlhf/run_scripts/llama2/run_7b_7b.sh deleted file mode 100644 index 33f213a3..00000000 --- a/examples/megatron/step3_rlhf/run_scripts/llama2/run_7b_7b.sh +++ /dev/null @@ -1,42 +0,0 @@ -#!/bin/bash -set -x - -export model_size=7B -export policy_tp=8 -export ppo_policy_pp=1 -export reward_tp=8 -export ppo_value_pp=1 - -source run_scripts/llama2/base_env.sh - -cd ${CHATLEARN}/examples/megatron/step3_rlhf - -if [ -z "${exp_name}" ]; then - export exp_name=$(date +%F)_llama-rlhf-${model_size}-${model_size} -fi - -[ -z "$OUTPUT_DIR" ] && OUTPUT_DIR=${CHATLEARN}/output/step3_rlhf/ -[ -z "$sample_per_episode" ] && sample_per_episode=1024 -[ -z "$LOG_DIR" ] && LOG_DIR=${OUTPUT_DIR}/logs/${exp_name} -[ -z "$TENSORBOARD_DIR" ] && TENSORBOARD_DIR=${OUTPUT_DIR}/tensorboard/${exp_name} -[ -z "$SAVE_DIR" ] && SAVE_DIR=${OUTPUT_DIR}/save_model/${exp_name} - -export data_checkpoint_path=${OUTPUT_DIR}/save_model/${exp_name}/data_checkpoint - -mkdir -p ${LOG_DIR} - -policy_inference_load=${POLICY_LOAD} \ -reward_load_iteration=${REWARD_LOAD_ITERATION} \ -reward_load=${REWARD_LOAD} \ -tokenizer_model=${TOKENIZER_MODEL} \ -num_device=${num_device} \ -log_dir=${LOG_DIR} \ -tensorboard_dir=${TENSORBOARD_DIR} \ -save_dir=${SAVE_DIR} \ -data_path=${DATASET_PATH} \ -sample_per_episode=${sample_per_episode} \ -train_global_batch_size=128 \ -generation_batch_size=64 \ -ref_generation_batch_size=16 \ -python train_rlhf.py -c configs/llama2/rlhf.yaml 2>&1 | tee -a ${LOG_DIR}/log_${RANK}.txt ; exit ${PIPESTATUS[0]} - diff --git a/examples/megatron/step3_rlhf/run_scripts/llama2/run_7b_7b_vllm.sh b/examples/megatron/step3_rlhf/run_scripts/llama2/run_7b_7b_vllm.sh deleted file mode 100644 index 6420c722..00000000 --- a/examples/megatron/step3_rlhf/run_scripts/llama2/run_7b_7b_vllm.sh +++ /dev/null @@ -1,39 +0,0 @@ -#!/bin/bash -set -x - -export model_size=7B -export policy_tp=8 -export ppo_policy_pp=1 -export reward_tp=8 -export ppo_value_pp=1 - -source run_scripts/llama2/base_env.sh - -cd ${CHATLEARN}/examples/megatron/step3_rlhf - -if [ -z "${exp_name}" ]; then - export exp_name=$(date +%F)_llama-rlhf-${model_size}-${model_size} -fi - -[ -z "$OUTPUT_DIR" ] && OUTPUT_DIR=${CHATLEARN}/output/step3_rlhf/ -[ -z "$LOG_DIR" ] && LOG_DIR=${OUTPUT_DIR}/logs/${exp_name} -[ -z "$TENSORBOARD_DIR" ] && TENSORBOARD_DIR=${OUTPUT_DIR}/tensorboard/${exp_name} -[ -z "$SAVE_DIR" ] && SAVE_DIR=${OUTPUT_DIR}/save_model/${exp_name} - -mkdir -p ${LOG_DIR} - -policy_inference_load=${POLICY_LOAD} \ -reward_load_iteration=${REWARD_LOAD_ITERATION} \ -reward_load=${REWARD_LOAD} \ -tokenizer_model=${TOKENIZER_MODEL} \ -num_device=${num_device} \ -log_dir=${LOG_DIR} \ -tensorboard_dir=${TENSORBOARD_DIR} \ -save_dir=${SAVE_DIR} \ -data_path=${DATASET_PATH} \ -sample_per_episode=1024 \ -train_global_batch_size=128 \ -generation_batch_size=64 \ -ref_generation_batch_size=16 \ -python train_vllm_rlhf.py -c configs/llama2/vllm_rlhf.yaml 2>&1 | tee -a ${LOG_DIR}/log_vllm_${RANK}.txt ; exit ${PIPESTATUS[0]} - diff --git a/examples/megatron/step3_rlhf/tests/run_policy_generation.sh b/examples/megatron/step3_rlhf/tests/run_policy_generation.sh deleted file mode 100644 index 5e5e6972..00000000 --- a/examples/megatron/step3_rlhf/tests/run_policy_generation.sh +++ /dev/null @@ -1,37 +0,0 @@ -[ -z "$MEGATRON" ] && export MEGATRON=path-to-megatron -[ -z "$CHATLEARN" ] && export CHATLEARN=path-to-chatlearn -[ -z "$TP" ] && export TP=4 -[ -z "$model_type" ] && export model_type=llama -[ -z "$VOCAB_FILE" ] && export VOCAB_FILE=path-to-tokenizer -[ -z "$LOAD" ] && export LOAD=path-to-ckpt -[ -z "$DATASET_PATH" ] && export DATASET_PATH=path-to-dataset-json -[ -z "$model_size" ] && export model_size=13B -OUTPUT=$CHATLEARN/output/tests/ -mkdir -p $OUTPUT - -if [[ "$model_type" == "gpt" ]]; then - configs=configs/gpt/test_policy.yaml - export vocab_file=$VOCAB_FILE - export merge_file=$MERGE_FILE - export max_new_tokens=512 - export max_seq_len=1024 -elif [[ "$model_type" == "llama2" ]]; then - configs=configs/llama2/test_policy.yaml - export tokenizer_model=$VOCAB_FILE -else - echo "unexpected model_type $model_type." - exit 1 -fi - -source run_scripts/$model_type/base_env.sh - -export exp_name=run_test_${model_size}_tp${TP}_meg_$model_type -export batch_generation_min_prompt_length=32 - -generation_batch_size=64 \ -num_device=$TP \ -policy_tp=$TP \ -eval_data_path=$DATASET_PATH \ -policy_inference_load=$LOAD \ -eval_output_dir=$OUTPUT \ -python tests/test_policy_generation.py -c $configs 2>&1 | tee ${OUTPUT}/${exp_name}.log ; exit ${PIPESTATUS[0]} diff --git a/examples/megatron/step3_rlhf/tests/run_vllm_policy_generation.sh b/examples/megatron/step3_rlhf/tests/run_vllm_policy_generation.sh deleted file mode 100644 index 9cfdcaeb..00000000 --- a/examples/megatron/step3_rlhf/tests/run_vllm_policy_generation.sh +++ /dev/null @@ -1,38 +0,0 @@ -[ -z "$MEGATRON" ] && export MEGATRON=path-to-megatron -[ -z "$CHATLEARN" ] && export CHATLEARN=path-to-chatlearn -[ -z "$TP" ] && export TP=4 -[ -z "$model_type" ] && export model_type=llama -[ -z "$VOCAB_FILE" ] && export VOCAB_FILE=path-to-tokenizer -[ -z "$LOAD" ] && export LOAD=path-to-ckpt -[ -z "$DATASET_PATH" ] && export DATASET_PATH=path-to-dataset-json -[ -z "$model_size" ] && export model_size=13B -OUTPUT=$CHATLEARN/output/tests/ -mkdir -p $OUTPUT - -if [[ "$model_type" == "gpt" ]]; then - configs=configs/gpt/test_policy.yaml - export vocab_file=$VOCAB_FILE - export merge_file=$MERGE_FILE - export max_new_tokens=512 - export max_seq_len=1024 -elif [[ "$model_type" == "llama2" ]]; then - configs=configs/llama2/test_vllm_policy.yaml - export tokenizer_model=$VOCAB_FILE -else - echo "unexpected model_type $model_type." - exit 1 -fi - -source run_scripts/$model_type/base_env.sh - -export exp_name=run_test_${model_size}_tp${TP}_vllm_$model_type -export batch_generation_min_prompt_length=32 - -vllm_micro_batch_size=256 \ -generation_batch_size=128 \ -num_device=$TP \ -policy_tp=$TP \ -eval_data_path=$DATASET_PATH \ -policy_inference_load=$LOAD \ -eval_output_dir=$OUTPUT \ -python tests/test_vllm_policy_generation.py -c $configs 2>&1 | tee ${OUTPUT}/${exp_name}.log ; exit ${PIPESTATUS[0]} diff --git a/examples/megatron/step3_rlhf/tests/test_vllm_policy_generation.py b/examples/megatron/step3_rlhf/tests/test_vllm_policy_generation.py deleted file mode 100644 index c995349e..00000000 --- a/examples/megatron/step3_rlhf/tests/test_vllm_policy_generation.py +++ /dev/null @@ -1,57 +0,0 @@ -# Copyright 2023 Alibaba Group Holding Limited. All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -# ============================================================================== -"""test vllm policy generation""" - -from models.vllm_policy_inference import VLLMPolicyInference -from models.utils import write_jsonl -from tqdm import tqdm -from train_rlhf import get_prompts - -import chatlearn -from chatlearn import EvalEngine - -chatlearn.init() - -policy = VLLMPolicyInference("policy") -policy.register_eval_func("forward_step") -engine = EvalEngine(policy) - -args = chatlearn.get_args() -k = {"math_coef": 0} -train_prompts = get_prompts(args.rlhf_args.get("eval_data_path"), num_limit=512, ) - -policy_checkpoint = policy.model_args["load"] -load_iteration = policy.model_args.get("load_iteration", 0) -exp_name = policy.model_args["exp_name"] -eval_dir = args.rlhf_args._args_dict["eval_output_dir"] - -engine.set_dataset(train_prompts) -results = engine.eval() -output = [] - -for res in tqdm(results, total=len(results)): - print(res['str_outputs']) - str_prompts = res["str_prompts"] - str_outputs = res["str_outputs"] - for str_prompt, str_output in zip(str_prompts, str_outputs): - j = {"query": str_prompt, "responses": [str_output]} - output.append(j) - -policy_inference_fp = f"{eval_dir}/{load_iteration}/{exp_name}/inference_json.json" -print(policy_inference_fp) -print(f"inference finished: got jsons number: {len(output)}") -write_jsonl(output, policy_inference_fp) - -engine.logging_summary() diff --git a/examples/megatron/step3_rlhf/train_rlhf.py b/examples/megatron/step3_rlhf/train_rlhf.py deleted file mode 100644 index c3b722bb..00000000 --- a/examples/megatron/step3_rlhf/train_rlhf.py +++ /dev/null @@ -1,117 +0,0 @@ -# Copyright 2023 Alibaba Group Holding Limited. All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -# ============================================================================== -"""entry file""" - -import random - -import numpy -import torch -from models import PolicyInference -from models import PolicyReference -from models import PolicyTrainer -from models import RewardInference -from models import ValueInference -from models import ValueTrainer -from models.utils import write_jsonl, read_jsonl, tensorboard_scalar_dict, listdict_to_dictlist -from torch.utils.tensorboard import SummaryWriter - -import chatlearn -from chatlearn import Evaluator -from chatlearn import RLHFEngine - - -def get_prompts(fp, num_limit=-1): - prompts_jsons = read_jsonl(fp) - - if "text" in prompts_jsons[0]: - prompts = [p["text"] for p in prompts_jsons] - patten = '\n\nAssistant: ' - prompts = [prompt[:prompt.find(patten) + len(patten)] for prompt in prompts] - if num_limit != -1: - prompts = prompts[:num_limit] - return prompts - elif 'prompt' in prompts_jsons[0]: - prompts = [p["prompt"] for p in prompts_jsons] - if num_limit != -1: - prompts = prompts[:num_limit] - return prompts - else: - prompts = [p["query"] for p in prompts_jsons] - if num_limit != -1: - prompts = prompts[:num_limit] - formatted_prompts = [f"\n\nHuman: {p}\n\nAssistant: " for p in prompts] - return formatted_prompts - - -if __name__ == "__main__": - chatlearn.init() - args = chatlearn.get_args() - policy_model = PolicyInference("policy") - value_model = ValueInference("value") - reference_model = PolicyReference("reference") - reward_model = RewardInference("reward") - ppo_policy_model = PolicyTrainer("ppo_policy") - ppo_value_model = ValueTrainer("ppo_value") - if args.rlhf_args.eval_episode_interval > 0: - policy_model.register_eval_func("eval_forward") - reward_model.register_eval_func("eval_forward") - engine = RLHFEngine(policy_model, reference_model, reward_model, value_model, ppo_policy_model, ppo_value_model) - all_prompts = get_prompts(args.rlhf_args.data_path, num_limit=args.rlhf_args._args_dict['training_data_num_limit']) - random.seed(policy_model.model_args["seed"]) - split_ratio = 0.9 if args.rlhf_args.eval_episode_interval > 0 else 1 - num_train = int(len(all_prompts) * split_ratio) - random.shuffle(all_prompts) - train_prompts = all_prompts[:num_train] - policy_checkpoint = policy_model.model_args.get("load", 0) - exp_name = policy_model.model_args["exp_name"] - - - def eval_post_process(results, eval_info): - results = listdict_to_dictlist(results) - writer = SummaryWriter( - log_dir=args.models["policy"].args_dict['tensorboard_dir'], - max_queue=99999) - - eval_reward_stats = {"eval_reward_mean": numpy.mean(results['rewards'])} - train_iteration = eval_info["train_iteration"] - - if torch.distributed.is_initialized(): - if torch.distributed.get_rank() == ( - torch.distributed.get_world_size() - 1): - tensorboard_scalar_dict(writer, prefix="eval_reward_each/", - global_step=train_iteration, - scalar_dict=eval_reward_stats) - - else: - tensorboard_scalar_dict(writer, prefix="eval_reward_each/", - global_step=train_iteration, - scalar_dict=eval_reward_stats) - - save_fp = f"{args.rlhf_args._args_dict['eval_output_dir']}/{exp_name}/{train_iteration}/eval_json_res.json" - write_jsonl(results["eval_jsonl"], save_fp) - - - if args.rlhf_args.eval_episode_interval > 0: - val_prompts = all_prompts[num_train:] - eval_num_limit = args.rlhf_args.get('eval_data_num_limit') - if eval_num_limit: - eval_num_limit = min(eval_num_limit, len(val_prompts)) - val_prompts = val_prompts[:eval_num_limit] - evaluator = Evaluator([policy_model, reward_model]) \ - .set_dataset(val_prompts) \ - .set_post_process_func(eval_post_process) - engine.set_evaluator(evaluator) - engine.set_dataset(train_prompts) - engine.learn() diff --git a/examples/megatron/step3_rlhf/train_vllm_rlhf.py b/examples/megatron/step3_rlhf/train_vllm_rlhf.py deleted file mode 100644 index 901809d0..00000000 --- a/examples/megatron/step3_rlhf/train_vllm_rlhf.py +++ /dev/null @@ -1,119 +0,0 @@ -# Copyright 2023 Alibaba Group Holding Limited. All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -# ============================================================================== -"""entry file""" - -import random - -import numpy -import torch -from models import VLLMPolicyInference -from models import PolicyReference -from models import PolicyTrainer -from models import RewardInference -from models import ValueInference -from models import ValueTrainer -from models.utils import write_jsonl, read_jsonl, tensorboard_scalar_dict, listdict_to_dictlist -from torch.utils.tensorboard import SummaryWriter - -import chatlearn -from chatlearn import Evaluator -from chatlearn import RLHFEngine - - -def get_prompts(fp, num_limit=-1): - prompts_jsons = read_jsonl(fp) - - if "text" in prompts_jsons[0]: - prompts = [p["text"] for p in prompts_jsons] - patten = '\n\nAssistant: ' - prompts = [prompt[:prompt.find(patten) + len(patten)] for prompt in prompts] - if num_limit != -1: - prompts = prompts[:num_limit] - return prompts - elif 'prompt' in prompts_jsons[0]: - prompts = [p["prompt"] for p in prompts_jsons] - if num_limit != -1: - prompts = prompts[:num_limit] - return prompts - else: - prompts = [p["query"] for p in prompts_jsons] - if num_limit != -1: - prompts = prompts[:num_limit] - formatted_prompts = [f"\n\nHuman: {p}\n\nAssistant: " for p in prompts] - return formatted_prompts - - -if __name__ == "__main__": - chatlearn.init() - args = chatlearn.get_args() - if VLLMPolicyInference is None: - print("Cannot import vllm, please set vllm python path or install vllm first.") - policy_model = VLLMPolicyInference("policy") - value_model = ValueInference("value") - reference_model = PolicyReference("reference") - reward_model = RewardInference("reward") - ppo_policy_model = PolicyTrainer("ppo_policy") - ppo_value_model = ValueTrainer("ppo_value") - if args.rlhf_args.eval_episode_interval > 0: - policy_model.register_eval_func("eval_forward") - reward_model.register_eval_func("eval_forward") - engine = RLHFEngine(policy_model, reference_model, reward_model, value_model, ppo_policy_model, ppo_value_model) - all_prompts = get_prompts(args.rlhf_args.data_path, num_limit=args.rlhf_args._args_dict['training_data_num_limit']) - random.seed(policy_model.model_args["seed"]) - split_ratio = 0.9 if args.rlhf_args.eval_episode_interval > 0 else 1 - num_train = int(len(all_prompts) * split_ratio) - random.shuffle(all_prompts) - train_prompts = all_prompts[:num_train] - policy_checkpoint = policy_model.model_args.get("load", 0) - exp_name = policy_model.model_args["exp_name"] - - - def eval_post_process(results, eval_info): - results = listdict_to_dictlist(results) - writer = SummaryWriter( - log_dir=args.models["policy"].args_dict['tensorboard_dir'], - max_queue=99999) - - eval_reward_stats = {"eval_reward_mean": numpy.mean(results['rewards'])} - train_iteration = eval_info["train_iteration"] - - if torch.distributed.is_initialized(): - if torch.distributed.get_rank() == ( - torch.distributed.get_world_size() - 1): - tensorboard_scalar_dict(writer, prefix="eval_reward_each/", - global_step=train_iteration, - scalar_dict=eval_reward_stats) - - else: - tensorboard_scalar_dict(writer, prefix="eval_reward_each/", - global_step=train_iteration, - scalar_dict=eval_reward_stats) - - save_fp = f"{args.rlhf_args._args_dict['eval_output_dir']}/{exp_name}/{train_iteration}/eval_json_res.json" - write_jsonl(results["eval_jsonl"], save_fp) - - - if args.rlhf_args.eval_episode_interval > 0: - val_prompts = all_prompts[num_train:] - eval_num_limit = args.rlhf_args.get('eval_data_num_limit') - if eval_num_limit: - eval_num_limit = min(eval_num_limit, len(val_prompts)) - val_prompts = val_prompts[:eval_num_limit] - evaluator = Evaluator([policy_model, reward_model]) \ - .set_dataset(val_prompts) \ - .set_post_process_func(eval_post_process) - engine.set_evaluator(evaluator) - engine.set_dataset(train_prompts) - engine.learn() diff --git a/examples/megatron/tests/get_eval_reward.py b/examples/megatron/tests/get_eval_reward.py new file mode 100644 index 00000000..3cfa0709 --- /dev/null +++ b/examples/megatron/tests/get_eval_reward.py @@ -0,0 +1,66 @@ +# Copyright 2024 Alibaba Group Holding Limited. All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# ============================================================================== +"""eval examples.""" + +import os + +# pylint: disable=invalid-envvar-default,bad-exception-cause,ungrouped-imports,missing-module-docstring,wrong-import-position +if os.getenv("ENABLE_VLLM", False): + try: + from examples.megatron.models.vllm_policy_inference import VLLMPolicyInference as PolicyModel + except Exception as e: + raise RuntimeError("Cannot import vllm, please set vllm python path or install vllm first.") from e +else: + from examples.megatron.models.old_policy_inference import PolicyInference as PolicyModel + +from examples.megatron.models.reward_inference import RewardInference +from examples.megatron.models.train_helper import eval_post_process, get_prompts + +import chatlearn +from chatlearn import EvalEngine + +chatlearn.init() + +args = chatlearn.get_args() + +policy = PolicyModel("policy") +exp_name = args.runtime_args.exp_name +reward_inference = RewardInference("reward") + +val_prompts = get_prompts(args.runtime_args.get("eval_data_path"), num_limit=args.runtime_args.get("eval_data_num_limit"), ) + +def eval_flow(batch): + r0 = policy.eval_forward(batch) + r1 = reward_inference.eval_forward(r0) + return r1 + +engine = EvalEngine(eval_flow) +engine.set_dataset(val_prompts).set_post_process_func(eval_post_process) + +load_iteration = args.models['policy'].args_dict['load_iteration'] +if not load_iteration: + load_iteration = 1 +results = engine.eval(train_iteration=load_iteration) + +engine.logging_summary() + +# validate all prompts are processed +res_prompts = [] + +for data in results['reward']: + for eval_jsonl in data['eval_jsonl']: + prompt = eval_jsonl['query'] + res_prompts.append(prompt) +assert len(res_prompts) == len(val_prompts), f"{len(res_prompts)} vs {len(val_prompts)}" diff --git a/examples/megatron/tests/get_eval_reward.sh b/examples/megatron/tests/get_eval_reward.sh new file mode 100644 index 00000000..671ac566 --- /dev/null +++ b/examples/megatron/tests/get_eval_reward.sh @@ -0,0 +1,45 @@ +#!/bin/bash +set -x + +export model_size=llama2-7B +export policy_tp=4 +export reward_tp=4 + +cd ${CHATLEARN}/examples/megatron/aligment +source scripts/base_env.sh + +if [ -z "${exp_name}" ]; then + export exp_name=$(date +%F)-eval-${model_size}-${model_size} +fi + +# megatron or vllm +backend=${1:-megatron} + +scripts=tests/get_eval_reward.py +if [[ $backend == "vllm" ]];then + export ENABLE_VLLM=True + export PYTHONPATH=$PYTHONPATH:$CKPT_ROOT/$MODEL + configs=configs/llama2/eval_vllm.yaml +else + configs=configs/llama2/eval.yaml +fi + +export eval_data_path=$DATA_ROOT/dev.jsonl + +[ -z "$exp_name" ] && export exp_name=$(date +%F)-${model_size}-${trainer_engine} +[ -z "$output_dir" ] && export output_dir=${CHATLEARN}/output/ +[ -z "$sample_per_episode" ] && sample_per_episode=1024 +[ -z "$tokenizer_load" ] && export tokenizer_load=path-to-hf-tokenizer-for-vllm-backend +output_dir=${output_dir}/${exp_name} +mkdir -p ${output_dir}/ +log_file=${output_dir}/log_${RANK}.log + +policy_inference_load=${POLICY_LOAD} \ +policy_load_iteration=${POLICY_LOAD_ITERATION} \ +load_iteration=${REWARD_LOAD_ITERATION} \ +reward_load_iteration=${REWARD_LOAD_ITERATION} \ +reward_load=${REWARD_LOAD} \ +tokenizer_model=${TOKENIZER_MODEL} \ +num_gpu=${num_gpu} \ +eval_data_path=${DATASET_PATH} \ +python $scripts -c $configs 2>&1 | tee -a ${log_file} ; exit ${PIPESTATUS[0]} diff --git a/examples/megatron/tests/run_policy_generation.sh b/examples/megatron/tests/run_policy_generation.sh new file mode 100644 index 00000000..11214269 --- /dev/null +++ b/examples/megatron/tests/run_policy_generation.sh @@ -0,0 +1,61 @@ +#!/bin/bash +set -x + +[ -z "$MEGATRON" ] && export MEGATRON=path-to-megatron +[ -z "$CHATLEARN" ] && export CHATLEARN=path-to-chatlearn +[ -z "$TP" ] && export TP=4 +[ -z "$VOCAB_FILE" ] && export VOCAB_FILE=path-to-tokenizer +[ -z "$LOAD" ] && export LOAD=path-to-ckpt +[ -z "$DATASET_PATH" ] && export DATASET_PATH=path-to-dataset-json +[ -z "$model_size" ] && export model_size=llama2-13B + +cd $CHATLEARN/examples/megatron + +# megatron or vllm +backend=${1:-megatron} + +if [[ "$backend" != "megatron" ]] && [[ "$backend" != "vllm" ]]; then + echo "ERROR: expect megatron or vllm backend, while "$backend + exit 1 +fi + +if [[ $model_size == "gpt"* ]]; then + if [[ "$backend" != "megatron" ]]; then + echo "ERROR: gpt model support megatron backend for now." + exit 1 + fi + configs=configs/gpt/test_policy.yaml + export vocab_file=$VOCAB_FILE + export merge_file=$MERGE_FILE + export max_new_tokens=512 + export max_seq_len=1024 +elif [[ $model_size == "llama2"* ]]; then + if [[ "$backend" == "megatron" ]]; then + configs=configs/llama2/test_policy.yaml + else + export ENABLE_VLLM=True + configs=configs/llama2/test_vllm_policy.yaml + fi + export tokenizer_model=$VOCAB_FILE +else + echo "unexpected model_type $model_size." + exit 1 +fi + +source scripts/base_env.sh + +[ -z "$exp_name" ] && export exp_name=$(date +%F)-${model_size}-${trainer_engine} +[ -z "$output_dir" ] && export output_dir=${CHATLEARN}/output/ + +output_dir=${output_dir}/${exp_name} +mkdir -p ${output_dir}/ +log_file=${output_dir}/log_${RANK}.log + +export batch_generation_min_prompt_length=32 + +generation_batch_size=64 \ +num_gpu=$TP \ +policy_tp=$TP \ +eval_data_path=$DATASET_PATH \ +policy_inference_load=$LOAD \ +python tests/test_policy_generation.py -c $configs 2>&1 | tee ${log_file}.log ; exit ${PIPESTATUS[0]} diff --git a/examples/megatron/step3_rlhf/tests/test_policy_generation.py b/examples/megatron/tests/test_policy_generation.py similarity index 59% rename from examples/megatron/step3_rlhf/tests/test_policy_generation.py rename to examples/megatron/tests/test_policy_generation.py index d3eae206..d7093574 100644 --- a/examples/megatron/step3_rlhf/tests/test_policy_generation.py +++ b/examples/megatron/tests/test_policy_generation.py @@ -1,4 +1,4 @@ -# Copyright 2023 Alibaba Group Holding Limited. All Rights Reserved. +# Copyright 2024 Alibaba Group Holding Limited. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. @@ -14,31 +14,47 @@ # ============================================================================== """test policy generation""" -from models.old_policy_inference import PolicyInference -from models.utils import write_jsonl +import os from tqdm import tqdm -from train_rlhf import get_prompts +from examples.megatron.models.utils import write_jsonl +from examples.megatron.models.train_helper import get_prompts import chatlearn from chatlearn import EvalEngine +# pylint: disable=invalid-envvar-default,bad-exception-cause,ungrouped-imports +if os.getenv("ENABLE_VLLM", False): + try: + from examples.megatron.models.vllm_policy_inference import VLLMPolicyInference as PolicyModel + except Exception as e: + raise RuntimeError("Cannot import vllm, please set vllm python path or install vllm first.") from e +else: + from examples.megatron.models.old_policy_inference import PolicyInference as PolicyModel + + + chatlearn.init() -policy = PolicyInference("policy") -policy.register_eval_func("forward_step") -engine = EvalEngine(policy) + +model_name = "policy" +policy = PolicyModel(model_name) +def eval_flow(batch): + r0 = policy.eval_forward(batch) + return r0 + +engine = EvalEngine(eval_flow) args = chatlearn.get_args() k = {"math_coef": 0} -train_prompts = get_prompts(args.rlhf_args.get("eval_data_path"), num_limit=1024, ) +train_prompts = get_prompts(args.runtime_args.get("eval_data_path"), num_limit=15) policy_checkpoint = policy.model_args["load"] load_iteration = policy.model_args.get("load_iteration", 0) -exp_name = policy.model_args["exp_name"] -eval_dir = args.rlhf_args._args_dict["eval_output_dir"] +exp_name = args.runtime_args.exp_name +eval_dir = os.path.join(args.runtime_args.output_dir, "eval") engine.set_dataset(train_prompts) -results = engine.eval() +results = engine.eval()[model_name] output = [] for res in tqdm(results, total=len(results)): diff --git a/examples/megatron/step3_rlhf/tests/test_reward.sh b/examples/megatron/tests/test_reward.sh similarity index 74% rename from examples/megatron/step3_rlhf/tests/test_reward.sh rename to examples/megatron/tests/test_reward.sh index 0d0f3026..5ff109a3 100644 --- a/examples/megatron/step3_rlhf/tests/test_reward.sh +++ b/examples/megatron/tests/test_reward.sh @@ -1,12 +1,16 @@ +#!/bin/bash +set -x + [ -z "$MEGATRON" ] && export MEGATRON=path-to-megatron [ -z "$CHATLEARN" ] && export CHATLEARN=path-to-chatlearn [ -z "$VOCAB_FILE" ] && export VOCAB_FILE=path-to-tokenizer [ -z "$LOAD" ] && export LOAD=path-to-ckpt -[ -z "REWARD_LOAD_ITERATION" ] && export REWARD_LOAD_ITERATION=1000 +[ -z "$REWARD_LOAD_ITERATION" ] && export REWARD_LOAD_ITERATION=1000 [ -z "$DATASET_PATH" ] && export DATASET_PATH=path-to-dataset-json -export model_size=13B -source run_scripts/llama2/base_env.sh +cd $CHATLEARN/examples/megatron +export model_size=llama2-13B +source scripts/base_env.sh eval_data_path=$DATASET_PATH \ tokenizer_model=$VOCAB_FILE \ diff --git a/examples/megatron/step3_rlhf/tests/test_reward_forward.py b/examples/megatron/tests/test_reward_forward.py similarity index 90% rename from examples/megatron/step3_rlhf/tests/test_reward_forward.py rename to examples/megatron/tests/test_reward_forward.py index c53e8c89..d533f3b5 100644 --- a/examples/megatron/step3_rlhf/tests/test_reward_forward.py +++ b/examples/megatron/tests/test_reward_forward.py @@ -1,4 +1,4 @@ -# Copyright 2023 Alibaba Group Holding Limited. All Rights Reserved. +# Copyright 2024 Alibaba Group Holding Limited. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. @@ -18,9 +18,9 @@ import time from collections import defaultdict -from models.reward_inference import RewardInference from tqdm import tqdm +from examples.megatron.models.reward_inference import RewardInference import chatlearn from chatlearn import Engine @@ -50,7 +50,7 @@ def get_labled_list_strs(fp): args = chatlearn.get_args() -batches = get_labled_list_strs(args.rlhf_args.get("eval_data_path")) +batches = get_labled_list_strs(args.runtime_args.get("eval_data_path")) start_time = time.time() diff --git a/requirements.txt b/requirements.txt new file mode 100644 index 00000000..b2b714c0 --- /dev/null +++ b/requirements.txt @@ -0,0 +1,21 @@ +ray[default]==2.32.0 +transformers==4.42.0 +pynvml==11.4.1 +deepspeed==0.14.4 +vllm==0.5.1 +jsonlines +torchtyping +tensorboard +cupy +# math related +word2number +timeout-decorator +latex2sympy2==1.9.0 + +# install apex if you needed +# git clone https://github.com/NVIDIA/apex +# cd apex +# pip install -v --disable-pip-version-check --no-cache-dir --no-build-isolation --config-settings "--build-option=--cpp_ext" --config-settings "--build-option=--cuda_ext" ./ + +# install transformer engine if you needed +# git+https://github.com/NVIDIA/TransformerEngine.git@v1.2.1 diff --git a/setup.py b/setup.py index 92f8ad45..c494e2c5 100644 --- a/setup.py +++ b/setup.py @@ -1,4 +1,4 @@ -# Copyright 2023 Alibaba Group Holding Limited. All Rights Reserved. +# Copyright 2024 Alibaba Group Holding Limited. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. diff --git a/tests/configs/exp.yaml b/tests/configs/exp.yaml index 065fb71c..11520a18 100644 --- a/tests/configs/exp.yaml +++ b/tests/configs/exp.yaml @@ -1,20 +1,20 @@ models: policy: model_config_file: model.yaml - num_device: 1 + num_gpu: 1 gpu_per_process: 1 trainable: False reference: model_config_file: model.yaml - num_device: 1 + num_gpu: 1 gpu_per_process: 1 trainable: False -rlhf: +runtime: num_rollout_worker: 1 - num_ppo_iteration: 5000 + num_iteration: 5000 sample_per_episode: 1000 num_training_epoch: ${num_training_epoch:3} unknown_args: "test_unknown" diff --git a/tests/configs/rlhf.yaml b/tests/configs/rlhf.yaml index 8ec62e91..d5f73d17 100644 --- a/tests/configs/rlhf.yaml +++ b/tests/configs/rlhf.yaml @@ -9,27 +9,27 @@ runtime_env: models: policy: - num_device: 1 + num_gpu: 1 gpu_per_process: 1 trainable: False reference: - num_device: 1 + num_gpu: 1 gpu_per_process: 1 trainable: False reward: - num_device: 1 + num_gpu: 1 gpu_per_process: 1 trainable: False value: - num_device: 1 + num_gpu: 1 gpu_per_process: 1 trainable: False ppo_policy: - num_device: 1 + num_gpu: 1 gpu_per_process: 1 trainable: True lora: @@ -40,16 +40,16 @@ models: lora_dropout: 0.05 ppo_value: - num_device: 1 + num_gpu: 1 gpu_per_process: 1 trainable: True -rlhf: +runtime: debug: True generation_batch_size: ${batch_size:4} train_micro_batch_size: 5 train_global_batch_size: 10 - num_ppo_episode: 2 + num_episode: 2 sample_per_episode: 16 num_training_epoch: 1 save_episode_interval: 200 diff --git a/tests/configs/rlhf2.yaml b/tests/configs/rlhf2.yaml new file mode 100644 index 00000000..3cfd84c1 --- /dev/null +++ b/tests/configs/rlhf2.yaml @@ -0,0 +1,50 @@ +runtime_env: + platform: DLC + excludes: + - "*pt" + - "logs" + - "tensorboards" + - ".nfs*" + + +models: + policy: + num_gpu: 1 + gpu_per_process: 1 + trainable: False + + reference: + num_gpu: 1 + gpu_per_process: 1 + trainable: False + + reward: + num_gpu: 1 + gpu_per_process: 1 + trainable: False + + value: + num_gpu: 1 + gpu_per_process: 1 + trainable: True + + ppo_policy: + num_gpu: 1 + gpu_per_process: 1 + trainable: True + lora: + enable_lora: ${enable_lora_policy:False} + lora_dim: 64 + lora_layer: ColumnParallelLinear,LinearLayer,RowParallelLinear + column_only_qkv: False + lora_dropout: 0.05 + +runtime: + debug: True + generation_batch_size: ${batch_size:4} + train_micro_batch_size: 5 + train_global_batch_size: 10 + num_episode: 2 + sample_per_episode: 16 + num_training_epoch: 1 + save_episode_interval: 200 diff --git a/tests/configs/rlhf_cpu.yaml b/tests/configs/rlhf_cpu.yaml new file mode 100644 index 00000000..e5c0b409 --- /dev/null +++ b/tests/configs/rlhf_cpu.yaml @@ -0,0 +1,55 @@ +runtime_env: + platform: DLC + excludes: + - "*pt" + - "logs" + - "tensorboards" + - ".nfs*" + + +models: + policy: + num_gpu: 1 + gpu_per_process: 1 + trainable: False + + reference: + num_gpu: 1 + gpu_per_process: 1 + trainable: False + + reward: + num_cpu: 2 + cpu_per_process: 1 + trainable: False + + value: + num_gpu: 1 + gpu_per_process: 1 + trainable: False + + ppo_policy: + num_gpu: 1 + gpu_per_process: 1 + trainable: True + lora: + enable_lora: ${enable_lora_policy:False} + lora_dim: 64 + lora_layer: ColumnParallelLinear,LinearLayer,RowParallelLinear + column_only_qkv: False + lora_dropout: 0.05 + + ppo_value: + num_gpu: 1 + gpu_per_process: 1 + trainable: True + +runtime: + debug: True + generation_batch_size: ${batch_size:4} + train_micro_batch_size: 5 + train_global_batch_size: 10 + num_episode: 2 + sample_per_episode: 16 + num_training_epoch: 1 + save_episode_interval: 200 diff --git a/tests/configs/test_eval.yaml b/tests/configs/test_eval.yaml index 008b35a5..d476a810 100644 --- a/tests/configs/test_eval.yaml +++ b/tests/configs/test_eval.yaml @@ -9,9 +9,9 @@ runtime_env: models: policy: - num_device: 2 + num_gpu: 2 trainable: False -rlhf: +runtime: debug: True generation_batch_size: 4 diff --git a/tests/configs/test_eval2.yaml b/tests/configs/test_eval2.yaml new file mode 100644 index 00000000..eb89e5aa --- /dev/null +++ b/tests/configs/test_eval2.yaml @@ -0,0 +1,25 @@ +runtime_env: + platform: DLC + excludes: + - "*pt" + - "logs" + - "tensorboards" + - ".nfs*" + + +models: + policy: + num_gpu: 2 + trainable: False + + reward: + num_gpu: 2 + trainable: False + + reward2: + num_cpu: 2 + trainable: False + +runtime: + debug: True + generation_batch_size: 4 diff --git a/tests/launch_helper.sh b/tests/launch_helper.sh deleted file mode 100644 index b2fa9e25..00000000 --- a/tests/launch_helper.sh +++ /dev/null @@ -1,25 +0,0 @@ -set -x -nnodes=1 -nproc_per_node=8 -node_rank=0 -master_addr="127.0.0.1" -master_port="9001" -world_size=8 -script_and_args=$1 -cmd_args="--nproc_per_node=$nproc_per_node \ - --nnodes=$nnodes \ - --node_rank=$node_rank \ - --master_addr=$master_addr \ - --master_port=$master_port $script_and_args \ -" - -export PYTHONPATH=../ - -END=$nproc_per_node -world_size=$(( nproc_per_node * nnodes )) -for ((i=0;i 6: + break + expect_0 = [ + [0, 1, 2, 0, 1, 2, 0, 1], + [1, 2, 0, 1, 2, 0, 1, 2], + [2, 0, 1, 2, 0, 1, 2, 0], + [0, 1, 2, 0, 1, 2, 0, 1], + [1, 2, 0, 1, 2, 0, 1, 2], + [2, 0, 1, 2, 0, 1, 2, 0], + [0, 1, 2, 0, 1, 2, 0, 1] + ] + + expect_1 = [ + [2, 0, 1, 2, 0, 1, 2, 0], + [0, 1, 2, 0, 1, 2, 0, 1], + [1, 2, 0, 1, 2, 0, 1, 2], + [2, 0, 1, 2, 0, 1, 2, 0], + [0, 1, 2, 0, 1, 2, 0, 1], + [1, 2, 0, 1, 2, 0, 1, 2], + [2, 0, 1, 2, 0, 1, 2, 0] + ] + + self.assertEqual(res, [expect_0, expect_1]) + + + def test_circle_episode_data(self): + num_replicas = 2 + samplers = [EpisodeDataSampler( + total_samples=25, + consumed_samples=0, + micro_batch_size=8, + data_parallel_rank=i, + data_parallel_size=num_replicas, + sample_per_episode=32 + ) for i in range(num_replicas)] + + res = [[] for _ in range(num_replicas)] + for idx, sampler in enumerate(samplers): + for indices in sampler: + res[idx].append(indices) + if len(res[idx]) > 6: + break + expect_0 = [ + [0, 1, 2, 3, 4, 5, 6, 7], + [16, 17, 18, 19, 20, 21, 22, 23], + [7, 8, 9, 10, 11, 12, 13, 14], + [23, 24, 0, 1, 2, 3, 4, 5], + [14, 15, 16, 17, 18, 19, 20, 21], + [5, 6, 7, 8, 9, 10, 11, 12], + [21, 22, 23, 24, 0, 1, 2, 3] + ] + expect_1 = [ + [8, 9, 10, 11, 12, 13, 14, 15], + [24, 0, 1, 2, 3, 4, 5, 6], + [15, 16, 17, 18, 19, 20, 21, 22], + [6, 7, 8, 9, 10, 11, 12, 13], + [22, 23, 24, 0, 1, 2, 3, 4], + [13, 14, 15, 16, 17, 18, 19, 20], + [4, 5, 6, 7, 8, 9, 10, 11] + ] + for idx, ele in enumerate(res): + print(f"res_{idx}: {ele}") + + self.assertEqual(res, [expect_0, expect_1]) + +# pylint: enable=missing-class-docstring + + +if __name__ == '__main__': + unittest.main() diff --git a/tests/test_utils.py b/tests/test_utils.py index 46bbc6e8..6faeb5c5 100644 --- a/tests/test_utils.py +++ b/tests/test_utils.py @@ -1,4 +1,4 @@ -# Copyright 2023 Alibaba Group Holding Limited. All Rights Reserved. +# Copyright 2024 Alibaba Group Holding Limited. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. @@ -56,7 +56,7 @@ def func4(): self.assertEqual(res, 2) - def test_get(self): + def _test_get(self): ray.init() value = ray.put(1) data = (value, {1:1}) diff --git a/tests/utils.py b/tests/utils.py index 22c9c9ba..0c560f2b 100644 --- a/tests/utils.py +++ b/tests/utils.py @@ -2,7 +2,7 @@ from torch.utils.data import Dataset -from chatlearn import RLHFTorchModule +from chatlearn import TorchModule class CustomDataset(Dataset): @@ -17,7 +17,7 @@ def __getitem__(self, idx): return {"query": self.data[idx]} -class PolicyModel(RLHFTorchModule): +class PolicyModel(TorchModule): def setup(self): time.sleep(0.05) @@ -29,12 +29,12 @@ def forward_step(self, data, iteration): data["policy_out"] = query return data - def build_dataset(self, prompts): + def build_dataset(self, prompts, is_eval=False): dataset = CustomDataset(prompts) return dataset -class ReferenceModel(RLHFTorchModule): +class ReferenceModel(TorchModule): def forward_step(self, data, iteration): print("reference forward =========", flush=True) @@ -44,7 +44,7 @@ def forward_step(self, data, iteration): return data -class RewardModel(RLHFTorchModule): +class RewardModel(TorchModule): def forward_step(self, data, iteration): print("reward forward =========", flush=True) @@ -53,7 +53,7 @@ def forward_step(self, data, iteration): return data -class ValueModel(RLHFTorchModule): +class ValueModel(TorchModule): def forward_step(self, data, iteration): print("value forward =========", flush=True) @@ -62,18 +62,18 @@ def forward_step(self, data, iteration): return data -class PPOPolicy(RLHFTorchModule): +class PPOPolicy(TorchModule): - def train_step(self, data, train_info): + def train_step(self, data, iteration): print("ppo policy train_step =========", flush=True) num_mb = len(data) time.sleep(0.1) return num_mb -class PPOValue(RLHFTorchModule): +class PPOValue(TorchModule): - def train_step(self, data, train_info): + def train_step(self, data, iteration): print("ppo value train_step =========", flush=True) num_mb = len(data) time.sleep(0.1) From 9d19e148ac0f7f7e480cd941d699e30b57694800 Mon Sep 17 00:00:00 2001 From: "xianyan.xianyanjia" Date: Wed, 28 Aug 2024 13:42:40 +0800 Subject: [PATCH 02/19] update docs --- docs/en/.readthedocs.yaml | 32 +++++++++++++++++++++++++++++ docs/en/tutorial/tutorial_llama2.md | 6 +++--- docs/zh/.readthedocs.yaml | 32 +++++++++++++++++++++++++++++ docs/zh/tutorial/tutorial_llama2.md | 6 +++--- 4 files changed, 70 insertions(+), 6 deletions(-) create mode 100644 docs/en/.readthedocs.yaml create mode 100644 docs/zh/.readthedocs.yaml diff --git a/docs/en/.readthedocs.yaml b/docs/en/.readthedocs.yaml new file mode 100644 index 00000000..dcef78b5 --- /dev/null +++ b/docs/en/.readthedocs.yaml @@ -0,0 +1,32 @@ +# .readthedocs.yaml +# Read the Docs configuration file +# See https://docs.readthedocs.io/en/stable/config-file/v2.html for details + +# Required +version: 2 + +# Set the OS, Python version and other tools you might need +build: + os: ubuntu-22.04 + tools: + python: "3.10" + # You can also specify other tool versions: + # nodejs: "19" + # rust: "1.64" + # golang: "1.19" + +# Build documentation in the "docs/" directory with Sphinx +sphinx: + configuration: docs/en/conf.py + +# Optionally build your docs in additional formats such as PDF and ePub +formats: + - pdf + - epub + +# Optional but recommended, declare the Python requirements required +# to build your documentation +# See https://docs.readthedocs.io/en/stable/guides/reproducible-builds.html +python: + install: + - requirements: docs/requirements.txt diff --git a/docs/en/tutorial/tutorial_llama2.md b/docs/en/tutorial/tutorial_llama2.md index 8c5cf157..8ff6c262 100644 --- a/docs/en/tutorial/tutorial_llama2.md +++ b/docs/en/tutorial/tutorial_llama2.md @@ -25,7 +25,7 @@ Please refer to [Environment and Code Setup](../installation.md). Please refer to [3-stage data](data.md) to prepare your training data. -## Step: SFT +## SFT SFT refers to the process of fine-tuning a pre-trained language model using annotated dialogue data. In this example, we need to download the pre-trained model, and then start a simple SFT training demonstration. @@ -83,7 +83,7 @@ In our training script, the resource requirements (assuming the resources are A1 For the environment variables and configurations required for distributed execution, please refer to [Distributed Execution](run.md). -## Step: Reward Model Training +## Reward Model Training The Reward model refers to the model that serves as a proxy for human evaluation in RLHF. It provides real-time evaluation and scoring of the model's generated question responses. @@ -113,7 +113,7 @@ The resource requirements for training a reward model of the same scale are the For the environment variables and configurations required for distributed execution, please refer to [Distributed Execution](run.md). -## Step: Alignment Training +## Alignment Training ChatLearn supports multiple alignments: RLHF, DPO, OnlineDPO, GRPO diff --git a/docs/zh/.readthedocs.yaml b/docs/zh/.readthedocs.yaml new file mode 100644 index 00000000..39ce3915 --- /dev/null +++ b/docs/zh/.readthedocs.yaml @@ -0,0 +1,32 @@ +# .readthedocs.yaml +# Read the Docs configuration file +# See https://docs.readthedocs.io/en/stable/config-file/v2.html for details + +# Required +version: 2 + +# Set the OS, Python version and other tools you might need +build: + os: ubuntu-22.04 + tools: + python: "3.10" + # You can also specify other tool versions: + # nodejs: "19" + # rust: "1.64" + # golang: "1.19" + +# Build documentation in the "docs/" directory with Sphinx +sphinx: + configuration: docs/zh/conf.py + +# Optionally build your docs in additional formats such as PDF and ePub +formats: + - pdf + - epub + +# Optional but recommended, declare the Python requirements required +# to build your documentation +# See https://docs.readthedocs.io/en/stable/guides/reproducible-builds.html +python: + install: + - requirements: docs/requirements.txt diff --git a/docs/zh/tutorial/tutorial_llama2.md b/docs/zh/tutorial/tutorial_llama2.md index 2efeee68..40eef0bb 100644 --- a/docs/zh/tutorial/tutorial_llama2.md +++ b/docs/zh/tutorial/tutorial_llama2.md @@ -27,7 +27,7 @@ 请参考 [各阶段数据](data.md) 准备好您的训练数据。 -## Step: SFT +## SFT SFT 指的是使用有标注的对话数据来微调预训练语言模型的过程。在这个示例中,我们需要下载预训练的模型,然后开始一个简单的 SFT 训练示例。 @@ -81,7 +81,7 @@ bash scripts/llama2_sft.sh 分布式执行所需的环境变量和配置参考 [分布式执行](run.md)。 -## Step: Reward 模型训练 +## Reward 模型训练 Reward 模型指的是在 RLHF 中作为人类评价的代理,对模型产生的问题回复进行实时评价打分的模型,Reward 模型输入问题以及模型回复,可以产生一个标量表示模型回复的质量。 @@ -108,7 +108,7 @@ bash scripts/train_reward_llama.sh 分布式执行所需的环境变量和配置参考 [分布式执行](run.md)。 -## Step: Alignment 训练 +## Alignment 训练 ChatLearn 支持多种 Alignment 训练模式:RLHF、DPO、OnlineDPO、GRP、GRPO From 14538b9253b46e19dd1569147a8482b62e7ff915 Mon Sep 17 00:00:00 2001 From: "xianyan.xianyanjia" Date: Wed, 28 Aug 2024 14:16:09 +0800 Subject: [PATCH 03/19] add req --- docs/requirements.txt | 1 + 1 file changed, 1 insertion(+) diff --git a/docs/requirements.txt b/docs/requirements.txt index 172de8d3..44cd0e16 100644 --- a/docs/requirements.txt +++ b/docs/requirements.txt @@ -5,3 +5,4 @@ sphinx-markdown-tables myst-parser sphinx-markdown-builder sphinx_markdown_checkbox +ray From 27b8574e1f6f81cf88f19cf19f2ab10e5d282c15 Mon Sep 17 00:00:00 2001 From: "xianyan.xianyanjia" Date: Wed, 28 Aug 2024 16:10:47 +0800 Subject: [PATCH 04/19] add mock import --- docs/zh/conf.py | 13 +++++++++++++ 1 file changed, 13 insertions(+) diff --git a/docs/zh/conf.py b/docs/zh/conf.py index 53d199d3..d8231c68 100644 --- a/docs/zh/conf.py +++ b/docs/zh/conf.py @@ -24,6 +24,19 @@ sys.path.insert(0, os.path.abspath("../../")) +from unittest import mock + +# 使用unittest.mock来mock模块 +imports = ['torch', 'cupy.cuda', 'pynvml', 'ray', 'ray.util', 'ray.util.collective.collective_group.nccl_util', + 'ray.util.scheduling_strategies', 'pynvml', 'ray.util.state', 'ray._private', 'ray._private.utils', + 'ray._private.ray_logging', 'ray._private.worker', 'ray.util.collective', 'ray.util.collective.collective_group', + "ray.util.collective.collective_group.base_collective_group", 'ray.util.collective.collective_group.nccl_collective_group', + 'torch.utils.data', 'torch._utils', 'transformers', 'transformers.integrations', 'transformers.trainer', 'deepspeed', + 'deepspeed.ops.adam', 'deepspeed.runtime.zero.partition_parameters', 'torch.distributed', 'torch.nn', 'torch.nn.utils.rnn', 'ray.util.queue', + 'ray.experimental.state.api', 'torch.cuda', 'ray.util.placement_group', "cupy.cuda.nccl"] + +for key in imports: + sys.modules[key] = mock.MagicMock() import chatlearn from chatlearn.utils import arguments From adc5a4720aede8e74b42d52cb35223c332f8579b Mon Sep 17 00:00:00 2001 From: "xianyan.xianyanjia" Date: Wed, 28 Aug 2024 16:11:09 +0800 Subject: [PATCH 05/19] add mock import --- docs/requirements.txt | 1 - 1 file changed, 1 deletion(-) diff --git a/docs/requirements.txt b/docs/requirements.txt index 44cd0e16..172de8d3 100644 --- a/docs/requirements.txt +++ b/docs/requirements.txt @@ -5,4 +5,3 @@ sphinx-markdown-tables myst-parser sphinx-markdown-builder sphinx_markdown_checkbox -ray From 605bbd2abc9063c914787adeb88941a20f134e9d Mon Sep 17 00:00:00 2001 From: "xianyan.xianyanjia" Date: Wed, 28 Aug 2024 16:23:16 +0800 Subject: [PATCH 06/19] update --- docs/en/conf.py | 13 +++++++++++++ 1 file changed, 13 insertions(+) diff --git a/docs/en/conf.py b/docs/en/conf.py index 81ec42d6..b273b259 100644 --- a/docs/en/conf.py +++ b/docs/en/conf.py @@ -24,6 +24,19 @@ sys.path.insert(0, os.path.abspath("../../")) +from unittest import mock + +# 使用unittest.mock来mock模块 +imports = ['torch', 'cupy.cuda', 'pynvml', 'ray', 'ray.util', 'ray.util.collective.collective_group.nccl_util', + 'ray.util.scheduling_strategies', 'pynvml', 'ray.util.state', 'ray._private', 'ray._private.utils', + 'ray._private.ray_logging', 'ray._private.worker', 'ray.util.collective', 'ray.util.collective.collective_group', + "ray.util.collective.collective_group.base_collective_group", 'ray.util.collective.collective_group.nccl_collective_group', + 'torch.utils.data', 'torch._utils', 'transformers', 'transformers.integrations', 'transformers.trainer', 'deepspeed', + 'deepspeed.ops.adam', 'deepspeed.runtime.zero.partition_parameters', 'torch.distributed', 'torch.nn', 'torch.nn.utils.rnn', 'ray.util.queue', + 'ray.experimental.state.api', 'torch.cuda', 'ray.util.placement_group', "cupy.cuda.nccl"] + +for key in imports: + sys.modules[key] = mock.MagicMock() import chatlearn from chatlearn.utils import arguments From 6b4c6b40ff38e2bde4ecf21762fc626748da0545 Mon Sep 17 00:00:00 2001 From: "xianyan.xianyanjia" Date: Wed, 28 Aug 2024 16:35:46 +0800 Subject: [PATCH 07/19] update --- docs/en/conf.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/docs/en/conf.py b/docs/en/conf.py index b273b259..106bd5df 100644 --- a/docs/en/conf.py +++ b/docs/en/conf.py @@ -33,7 +33,7 @@ "ray.util.collective.collective_group.base_collective_group", 'ray.util.collective.collective_group.nccl_collective_group', 'torch.utils.data', 'torch._utils', 'transformers', 'transformers.integrations', 'transformers.trainer', 'deepspeed', 'deepspeed.ops.adam', 'deepspeed.runtime.zero.partition_parameters', 'torch.distributed', 'torch.nn', 'torch.nn.utils.rnn', 'ray.util.queue', - 'ray.experimental.state.api', 'torch.cuda', 'ray.util.placement_group', "cupy.cuda.nccl"] + 'ray.experimental.state.api', 'torch.cuda', 'ray.util.placement_group', "cupy.cuda.nccl", 'tqdm'] for key in imports: sys.modules[key] = mock.MagicMock() From 22f139e901ee66a334c81b4a1c4a3eb28f49e7f1 Mon Sep 17 00:00:00 2001 From: Xianyan Jia Date: Wed, 28 Aug 2024 16:45:29 +0800 Subject: [PATCH 08/19] Create pylint.yml --- .github/workflows/pylint.yml | 27 +++++++++++++++++++++++++++ 1 file changed, 27 insertions(+) create mode 100644 .github/workflows/pylint.yml diff --git a/.github/workflows/pylint.yml b/.github/workflows/pylint.yml new file mode 100644 index 00000000..0715d21f --- /dev/null +++ b/.github/workflows/pylint.yml @@ -0,0 +1,27 @@ +name: Pylint + +on: + pull_request: + push: + branches: + - main + +jobs: + build: + runs-on: ubuntu-latest + strategy: + matrix: + python-version: ["3.8", "3.9", "3.10"] + steps: + - uses: actions/checkout@v4 + - name: Set up Python ${{ matrix.python-version }} + uses: actions/setup-python@v3 + with: + python-version: ${{ matrix.python-version }} + - name: Install dependencies + run: | + python -m pip install --upgrade pip + pip install pylint==2.16.1 + - name: Analysing the code with pylint + run: | + make lint From 342663fd7fc36b6405865330fe86ae5d14114332 Mon Sep 17 00:00:00 2001 From: "xianyan.xianyanjia" Date: Wed, 28 Aug 2024 16:47:41 +0800 Subject: [PATCH 09/19] add workflow --- docs/zh/conf.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/docs/zh/conf.py b/docs/zh/conf.py index d8231c68..2fe022ec 100644 --- a/docs/zh/conf.py +++ b/docs/zh/conf.py @@ -33,7 +33,7 @@ "ray.util.collective.collective_group.base_collective_group", 'ray.util.collective.collective_group.nccl_collective_group', 'torch.utils.data', 'torch._utils', 'transformers', 'transformers.integrations', 'transformers.trainer', 'deepspeed', 'deepspeed.ops.adam', 'deepspeed.runtime.zero.partition_parameters', 'torch.distributed', 'torch.nn', 'torch.nn.utils.rnn', 'ray.util.queue', - 'ray.experimental.state.api', 'torch.cuda', 'ray.util.placement_group', "cupy.cuda.nccl"] + 'ray.experimental.state.api', 'torch.cuda', 'ray.util.placement_group', "cupy.cuda.nccl", 'numpy'] for key in imports: sys.modules[key] = mock.MagicMock() From 31b8bdf4615e4a56ddc1cc0b281d15256bb9d478 Mon Sep 17 00:00:00 2001 From: "xianyan.xianyanjia" Date: Wed, 28 Aug 2024 16:50:26 +0800 Subject: [PATCH 10/19] fix lint --- chatlearn/models/vllm/vllm_model.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/chatlearn/models/vllm/vllm_model.py b/chatlearn/models/vllm/vllm_model.py index 6b66e5d8..19582354 100644 --- a/chatlearn/models/vllm/vllm_model.py +++ b/chatlearn/models/vllm/vllm_model.py @@ -49,7 +49,7 @@ def load_weights(self): load_checkpoint(self, None, None) torch.distributed.barrier() - def load_state_dict(self, state_dict, strict=True, assign=False): + def load_state_dict(self, state_dict, strict=True, assign=False): # pylint: disable=unused-argument qwen_version = None if isinstance(self.model, LlamaForCausalLM): convert_state_dict_internal = convert_llama_state_dict_from_megatron_to_vllm From 67d05375e5e7fbfb89fdf65cdc50c7f308d4728c Mon Sep 17 00:00:00 2001 From: "xianyan.xianyanjia" Date: Wed, 28 Aug 2024 16:53:30 +0800 Subject: [PATCH 11/19] update --- docs/en/conf.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/docs/en/conf.py b/docs/en/conf.py index 106bd5df..6b0fd5b4 100644 --- a/docs/en/conf.py +++ b/docs/en/conf.py @@ -33,7 +33,7 @@ "ray.util.collective.collective_group.base_collective_group", 'ray.util.collective.collective_group.nccl_collective_group', 'torch.utils.data', 'torch._utils', 'transformers', 'transformers.integrations', 'transformers.trainer', 'deepspeed', 'deepspeed.ops.adam', 'deepspeed.runtime.zero.partition_parameters', 'torch.distributed', 'torch.nn', 'torch.nn.utils.rnn', 'ray.util.queue', - 'ray.experimental.state.api', 'torch.cuda', 'ray.util.placement_group', "cupy.cuda.nccl", 'tqdm'] + 'ray.experimental.state.api', 'torch.cuda', 'ray.util.placement_group', "cupy.cuda.nccl", 'tqdm', 'numpy'] for key in imports: sys.modules[key] = mock.MagicMock() From ca6d9c794d60e93da52a210c510b215b494e9d33 Mon Sep 17 00:00:00 2001 From: "xianyan.xianyanjia" Date: Wed, 28 Aug 2024 16:55:43 +0800 Subject: [PATCH 12/19] update --- chatlearn/models/deepspeed_module.py | 4 +--- 1 file changed, 1 insertion(+), 3 deletions(-) diff --git a/chatlearn/models/deepspeed_module.py b/chatlearn/models/deepspeed_module.py index b8f1061a..7f3aecfa 100644 --- a/chatlearn/models/deepspeed_module.py +++ b/chatlearn/models/deepspeed_module.py @@ -19,6 +19,7 @@ import math import os import random +import deepspeed import numpy as np import torch from torch import distributed as dist @@ -31,9 +32,6 @@ from .deepspeed.deepspeed_utils import save_hf_format, save_zero_three_model from .torch_module import TorchModule -if importlib.util.find_spec("deepspeed"): - import deepspeed - class DeepSpeedModule(TorchModule): """DeepSpeedModule is the class for models accelerated with DeepSpeed. From 046bc643f4562046cfaaaeba0d75acb2714a1c1b Mon Sep 17 00:00:00 2001 From: "xianyan.xianyanjia" Date: Wed, 28 Aug 2024 16:58:40 +0800 Subject: [PATCH 13/19] update --- chatlearn/models/deepspeed_module.py | 1 - 1 file changed, 1 deletion(-) diff --git a/chatlearn/models/deepspeed_module.py b/chatlearn/models/deepspeed_module.py index 7f3aecfa..4e3b9af8 100644 --- a/chatlearn/models/deepspeed_module.py +++ b/chatlearn/models/deepspeed_module.py @@ -15,7 +15,6 @@ """DeepSpeed module""" from datetime import timedelta -import importlib import math import os import random From cf259cbdc3ea893c17cb3483cd0a497c83d628af Mon Sep 17 00:00:00 2001 From: "xianyan.xianyanjia" Date: Wed, 28 Aug 2024 17:07:05 +0800 Subject: [PATCH 14/19] update --- docs/en/tutorial/tutorial_llama2.md | 3 --- docs/zh/conf.py | 2 +- docs/zh/tutorial/tutorial_llama2.md | 3 --- 3 files changed, 1 insertion(+), 7 deletions(-) diff --git a/docs/en/tutorial/tutorial_llama2.md b/docs/en/tutorial/tutorial_llama2.md index c25827a3..8ff6c262 100644 --- a/docs/en/tutorial/tutorial_llama2.md +++ b/docs/en/tutorial/tutorial_llama2.md @@ -26,7 +26,6 @@ Please refer to [Environment and Code Setup](../installation.md). Please refer to [3-stage data](data.md) to prepare your training data. ## SFT -======= SFT refers to the process of fine-tuning a pre-trained language model using annotated dialogue data. In this example, we need to download the pre-trained model, and then start a simple SFT training demonstration. @@ -85,7 +84,6 @@ In our training script, the resource requirements (assuming the resources are A1 For the environment variables and configurations required for distributed execution, please refer to [Distributed Execution](run.md). ## Reward Model Training -========================= The Reward model refers to the model that serves as a proxy for human evaluation in RLHF. It provides real-time evaluation and scoring of the model's generated question responses. @@ -116,7 +114,6 @@ The resource requirements for training a reward model of the same scale are the For the environment variables and configurations required for distributed execution, please refer to [Distributed Execution](run.md). ## Alignment Training -====================== ChatLearn supports multiple alignments: RLHF, DPO, OnlineDPO, GRPO diff --git a/docs/zh/conf.py b/docs/zh/conf.py index 2fe022ec..df7be765 100644 --- a/docs/zh/conf.py +++ b/docs/zh/conf.py @@ -33,7 +33,7 @@ "ray.util.collective.collective_group.base_collective_group", 'ray.util.collective.collective_group.nccl_collective_group', 'torch.utils.data', 'torch._utils', 'transformers', 'transformers.integrations', 'transformers.trainer', 'deepspeed', 'deepspeed.ops.adam', 'deepspeed.runtime.zero.partition_parameters', 'torch.distributed', 'torch.nn', 'torch.nn.utils.rnn', 'ray.util.queue', - 'ray.experimental.state.api', 'torch.cuda', 'ray.util.placement_group', "cupy.cuda.nccl", 'numpy'] + 'ray.experimental.state.api', 'torch.cuda', 'ray.util.placement_group', "cupy.cuda.nccl", 'tqdm', 'numpy'] for key in imports: sys.modules[key] = mock.MagicMock() diff --git a/docs/zh/tutorial/tutorial_llama2.md b/docs/zh/tutorial/tutorial_llama2.md index a22f5878..d71f43de 100644 --- a/docs/zh/tutorial/tutorial_llama2.md +++ b/docs/zh/tutorial/tutorial_llama2.md @@ -28,7 +28,6 @@ ## SFT -======= SFT 指的是使用有标注的对话数据来微调预训练语言模型的过程。在这个示例中,我们需要下载预训练的模型,然后开始一个简单的 SFT 训练示例。 @@ -83,7 +82,6 @@ bash scripts/llama2_sft.sh 分布式执行所需的环境变量和配置参考 [分布式执行](run.md)。 ## Reward 模型训练 -================== Reward 模型指的是在 RLHF 中作为人类评价的代理,对模型产生的问题回复进行实时评价打分的模型,Reward 模型输入问题以及模型回复,可以产生一个标量表示模型回复的质量。 @@ -111,7 +109,6 @@ bash scripts/train_reward_llama.sh 分布式执行所需的环境变量和配置参考 [分布式执行](run.md)。 ## Alignment 训练 -================== ChatLearn 支持多种 Alignment 训练模式:RLHF、DPO、OnlineDPO、GRP、GRPO From ffce375aaf54e6f27bbd029266a9b6197663f82a Mon Sep 17 00:00:00 2001 From: "xianyan.xianyanjia" Date: Wed, 28 Aug 2024 17:52:09 +0800 Subject: [PATCH 15/19] refine docs --- README_CN.md | 4 +-- docs/en/chatlearn.md | 2 +- docs/en/tutorial/tutorial_llama2.md | 25 ++++++++------- docs/zh/chatlearn.md | 2 +- docs/zh/index.rst | 6 ++-- docs/zh/tutorial/tutorial_llama2.md | 31 ++++++++++--------- .../scripts/convert_hf_to_megatron.sh | 2 +- .../megatron/scripts/train_reward_llama.sh | 10 +++--- examples/megatron/scripts/train_sft_llama.sh | 10 +++--- 9 files changed, 47 insertions(+), 45 deletions(-) diff --git a/README_CN.md b/README_CN.md index 3a9bbd7e..2510f87a 100644 --- a/README_CN.md +++ b/README_CN.md @@ -1,5 +1,5 @@ -[![docs](https://img.shields.io/badge/docs-latest-brightgreen.svg)](https://chatlearn.readthedocs.io/zh/latest/) +[![docs](https://img.shields.io/badge/docs-latest-brightgreen.svg)](https://chatlearn.readthedocs.io/zh-cn/latest/) [![License](https://img.shields.io/badge/License-Apache%202.0-blue.svg)](https://github.com/alibaba/ChatLearn/blob/main/LICENSE)

@@ -35,7 +35,7 @@ ChatLearn的特点如下: # 快速开始 -请参考 [文档](https://chatlearn.readthedocs.io/zh/latest/) 快速开始. +请参考 [文档](https://chatlearn.readthedocs.io/zh-cn/latest/) 快速开始. 1. [环境和代码准备](docs/zh/installation.md) 2. [基于 LLaMA/LLaMA2 模型的端到端训练教程](docs/zh/tutorial/tutorial_llama2.md) diff --git a/docs/en/chatlearn.md b/docs/en/chatlearn.md index 0a8b71fb..548c9997 100644 --- a/docs/en/chatlearn.md +++ b/docs/en/chatlearn.md @@ -42,7 +42,7 @@ By providing a comprehensive and efficient framework, ChatLearn empowers researc ## Quick Start -Please refer to the [Documentation](https://chatlearn.readthedocs.io/zh/latest/) for a quick start guide. +Please refer to the [Documentation](https://chatlearn.readthedocs.io/en/latest/) for a quick start guide. 1. [Environment and Code Setup](installation.md) 2. [End-to-End Training Tutorial with Llama/Llama2 Model](tutorial/tutorial_llama2.md) diff --git a/docs/en/tutorial/tutorial_llama2.md b/docs/en/tutorial/tutorial_llama2.md index 8ff6c262..3432d58e 100644 --- a/docs/en/tutorial/tutorial_llama2.md +++ b/docs/en/tutorial/tutorial_llama2.md @@ -1,6 +1,6 @@ # End-to-end Training Tutorial with Llama Model -This document provides instructions for end-to-end training using the ChatLearn, Megatron-LM framework, and the Llama/Llama2 model. ChatLearn supports three training policies as follows: +This document provides instructions for end-to-end training using the ChatLearn, Megatron-LM and vLLM framework, and the Llama/Llama2 model. ChatLearn supports three training policies as follows: 1. RLHF(Reinforcement Learning from Human Feedback): which includes three stages of training: SFT, Reward, and RLHF training. 2. Direct Preference Optimization(DPO): which includes two stages of training: SFT and DPO training. 3. OnlineDPO/GRPO: which fall in between RLHF and DPO, includes three stages of training: SFT, Reward, and DPO training. @@ -59,14 +59,15 @@ bash scripts/convert_hf_to_megatron.sh ### Start SFT Training The script below is an example of SFT training. The `DATASET_PATH` is the path to the SFT training set, such as `$DATASET_ROOT/sft/train.jsonl`. -The `MODEL_SIZE` is an environment variable specified in the script to indicate the size of the model, which can be `llama2-7B`, `llama2-13B`, or `llama2-70B`. +The `model_size` is an environment variable specified in the script to indicate the size of the model, which can be `llama2-7B`, `llama2-13B`, or `llama2-70B`. ```bash export CHATLEARN=path-to-chatlearn export MEGATRON=path-to-megatron-lm cd ${CHATLEARN}/examples/megatron/ -MODEL_SIZE=$MODEL_SIZE \ +export model_size=llama2-7B + LOAD_PATH=$MEGATRON_LLAMA2_CKPT_PATH \ TOKENIZER_MODEL=$LLAMA2_TOKENIZER_MODEL \ DATASET_PATH=$DATASET_ROOT/sft/ \ @@ -74,9 +75,9 @@ bash scripts/train_sft_llama.sh ``` The training logs and the completed models will be stored in `${CHATLEARN}/output/sft` by default. -For specific definitions, please refer to the script `${CHATLEARN}/2024-08-21/rlhf/examples/megatron/scripts/train_sft_llama.sh`. +For specific definitions, please refer to the script `${CHATLEARN}/examples/megatron/scripts/train_sft_llama.sh`. -In our training script, the resource requirements (assuming the resources are A100-80GB/A800-80GB/H800-80GB GPUs) are as follows: +In our training script, the resource requirements (assuming the resources are A100-80GB/A800-80GB GPUs) are as follows: 1. llama2-7B SFT: 8 GPUs 2. llama2-13B SFT: 8 GPUs 3. llama2-70B SFT: 4*8 GPUs @@ -97,7 +98,7 @@ Based on InstructGPT[1], the Reward model training is initialized with the SFT m ```bash export CHATLEARN=path-to-chatlearn -export MEGATRON=path-to-megatron-lm-extension +export MEGATRON=path-to-megatron-lm cd ${CHATLEARN}/examples/megatron/ LOAD_PATH=path-to-sft-ckpt \ @@ -128,7 +129,7 @@ In this example, the user needs to set `POLICY_LOAD` to the checkpoint path gene The Policy and Reference models will be initialized with the SFT checkpoint. `REWARD_LOAD` should be set to the checkpoint path generated by the Reward training, and the user can specify the iteration number for the loaded checkpoint. The Reward and Value models will be initialized with the weights of the Reward model. -`TOKENIZER_MODEL` should be set to the folder path where the `tokenizer.model` for LlamaTokenizer is located. +`TOKENIZER_MODEL` should be set to the folder path where the `tokenizer.model` for Llama2Tokenizer is located. ```bash export CHATLEARN=path-to-chatlearn @@ -140,7 +141,7 @@ cd ${CHATLEARN}/examples/megatron/ export model_size=llama2-7B POLICY_LOAD=path-to-sft-ckpt \ -REWARD_LOAD=path-to-trained-rm-checkpoint \ +REWARD_LOAD=path-to-rm-ckpt \ REWARD_LOAD_ITERATION=1000 \ TOKENIZER_MODEL=$LLAMA2_TOKENIZER_MODEL \ bash run_scripts/train_rlhf_llama.sh @@ -160,7 +161,7 @@ cd ${CHATLEARN}/examples/megatron/ export model_size=llama2-7B POLICY_LOAD=path-to-sft-ckpt \ -REWARD_LOAD=path-to-trained-rm-checkpoint \ +REWARD_LOAD=path-to-rm-ckpt \ REWARD_LOAD_ITERATION=1000 \ TOKENIZER_MODEL=$LLAMA2_TOKENIZER_MODEL \ bash scripts/train_online_dpo_llama.sh @@ -172,7 +173,7 @@ bash scripts/train_online_dpo_llama.sh Here is a training script for Llama2-7B Policy and 7B Reward models. In this example, the user needs to set `POLICY_LOAD` to the checkpoint path generated by SFT. The Policy and Reference models will be initialized with the SFT checkpoint. -`TOKENIZER_MODEL` should be set to the folder path where the `tokenizer.model` for LlamaTokenizer is located. +`TOKENIZER_MODEL` should be set to the folder path where the `tokenizer.model` for Llama2Tokenizer is located. ```bash export CHATLEARN=path-to-chatlearn @@ -201,7 +202,7 @@ cd ${CHATLEARN}/examples/megatron/ export model_size=llama2-7B POLICY_LOAD=path-to-sft-ckpt \ -REWARD_LOAD=path-to-trained-rm-checkpoint \ +REWARD_LOAD=path-to-rm-ckpt \ REWARD_LOAD_ITERATION=1000 \ TOKENIZER_MODEL=$LLAMA2_TOKENIZER_MODEL \ bash scripts/train_grpo_math_llama.sh @@ -212,7 +213,7 @@ bash scripts/train_grpo_math_llama.sh If you need to train a llama2-13B / llama2-70B model, simply change `export model_size=llama2-7B` with `export model_size=llama2-13B` / `export model_size=llama2-70B`. You can also modify the model configuration and other parameters according to your needs. -In our training script, the resource requirements (assuming the resources are A100-80GB / A800-80GB / H800-80GB GPUs) are as follows: +In our training script, the resource requirements (assuming the resources are A100-80GB / A800-80GB GPUs) are as follows: 1. llama2-7B RLHF: 8 GPUs 2. llama2-13B RLHF: 2*8 GPUs diff --git a/docs/zh/chatlearn.md b/docs/zh/chatlearn.md index 86fb8994..3e0bb1f7 100644 --- a/docs/zh/chatlearn.md +++ b/docs/zh/chatlearn.md @@ -38,7 +38,7 @@ ChatGPT 是由 OpenAI 开发的基于大型语言模型 (Large Language Model, L ## 快速开始 -请参考 [文档](https://chatlearn.readthedocs.io/zh/latest/) 快速开始. +请参考 [文档](https://chatlearn.readthedocs.io/zh-cn/latest/) 快速开始. 1. [环境和代码准备](installation.md) 2. [基于 Llama/Llama2 模型的端到端训练教程](tutorial/tutorial_llama2.md) diff --git a/docs/zh/index.rst b/docs/zh/index.rst index d8d20d2b..b32b8260 100644 --- a/docs/zh/index.rst +++ b/docs/zh/index.rst @@ -1,12 +1,12 @@ -ChatLearn Documentation +ChatLearn 使用文档 ======================= .. toctree:: :maxdepth: 1 - :caption: ChatLearn: 大规模 Alignment 高效训练框架 + :caption: ChatLearn: 灵活、易用、高效的大规模 Alignmant 训练框架 - chatlearn + chatlearn 简介 | | diff --git a/docs/zh/tutorial/tutorial_llama2.md b/docs/zh/tutorial/tutorial_llama2.md index d71f43de..3b08669d 100644 --- a/docs/zh/tutorial/tutorial_llama2.md +++ b/docs/zh/tutorial/tutorial_llama2.md @@ -1,6 +1,6 @@ # 基于 Llama 模型的端到端训练教程 -本文档介绍基于 ChatLearn, Megatron-LM 框架和 Llama/Llama2 模型进行 alignment 的训练流程。支持RLHF、DPO、OnlineDPO、GRPO 多种训练模式: +本文档介绍基于 ChatLearn, Megatron-LM 和 vLLM 框架和 Llama/Llama2 模型进行 alignment 的训练流程。支持RLHF、DPO、OnlineDPO、GRPO 多种训练模式: 1. RLHF(Reinforcement Learning from Human Feedback):包括三阶段的训练(SFT, Reward 和 RLHF 训练); 2. DPO(Direct Preference Optimization):包括两阶段的训练(SFT 和 DPO 训练); 3. OnlineDPO/GRPO:介于 DPO 和 RLHF 之间,使用 Policy + Reward 模型来自动生成数据并进行打分,再进行DPO训练,包括三阶段的训练(SFT, Reward 和 DPO 训练). @@ -44,7 +44,7 @@ SFT 指的是使用有标注的对话数据来微调预训练语言模型的过 export MEGATRON=path-to-megatron-lm export CHATLEARN=path-to-chatlearn -cd ${CHATLEARN}/examples/megatron/sft/ +cd ${CHATLEARN}/examples/megatron/ TP=num_of_tp \ PP=num_of_pp \ @@ -58,23 +58,24 @@ bash scripts/convert_hf_to_megatron.sh ### 开启 SFT 训练 下面的脚本是一个 SFT 的训练样例。其中 `DATASET_PATH` 为 SFT 训练集路径,比如`$DATASET_ROOT/sft/train.jsonl`。 -其中 `MODEL_SIZE` 为脚本中指定模型大小的环境变量,可以为 `llama2-7B`/`llama2-13B`/`llama2-70B`。 +其中 `model_size` 为脚本中指定模型大小的环境变量,可以为 `llama2-7B`/`llama2-13B`/`llama2-70B`。 ```bash export CHATLEARN=path-to-chatlearn export MEGATRON=path-to-megatron-lm -cd ${CHATLEARN}/examples/megatron/sft/ +cd ${CHATLEARN}/examples/megatron/ + +export model_size=llama2-7B -MODEL_SIZE=$MODEL_SIZE \ LOAD_PATH=$MEGATRON_LLAMA2_CKPT_PATH \ TOKENIZER_MODEL=$LLAMA2_TOKENIZER_MODEL \ DATASET_PATH=$DATASET_ROOT/sft/ \ bash scripts/llama2_sft.sh ``` -训练 log 和训练完成的模型默认会存放在`${CHATLEARN}/output/sft`中,可以通过 CHECKPOINT_PATH 来指定模型保存路径,具体的定义详见`${CHATLEARN}/examples/megatron/sft/scripts/llama2_sft.sh`脚本。 +训练 log 和训练完成的模型默认会存放在`${CHATLEARN}/output/sft`中,可以通过 CHECKPOINT_PATH 来指定模型保存路径,具体的定义详见`${CHATLEARN}/examples/megatron/scripts/train_sft_llama.sh`脚本。 -在我们的训练脚本里,资源需求 (假设资源为 A100-80GB/A800-80GB/H800-80GB GPU) 如下: +在我们的训练脚本里,资源需求 (假设资源为 A100-80GB/A800-80GB GPU) 如下: 1. llama2-7B SFT: 8 GPU 2. llama2-13B SFT: 8 GPU 3. llama2-70B SFT: 4*8 GPU @@ -93,7 +94,7 @@ Reward 模型指的是在 RLHF 中作为人类评价的代理,对模型产生 ```bash export CHATLEARN=path-to-chatlearn -export MEGATRON=path-to-megatron-lm-extension +export MEGATRON=path-to-megatron-lm cd ${CHATLEARN}/examples/megatron/ LOAD_PATH=path-to-sft-ckpt \ @@ -122,7 +123,7 @@ ChatLearn 支持多种 Alignment 训练模式:RLHF、DPO、OnlineDPO、GRP、G 以下是一个 Llama2-7B 的 Policy 和 7B 的 Reward 模型的训练脚本。 在这个例子中,用户需要设置 `POLICY_LOAD` 为 SFT 产出的 checkpoint 路径,Policy 模型和 Reference 模型将以 SFT 的 checkpoint 初始化。 `REWARD_LOAD` 为 Reward 训练产出的 checkpoint 路径,同时,用户可以指定 load checkpoint 对应的 iteration 数。 -Reward 模型和 Value 模型将以 Reward 模型的权重作初始化。`TOKENIZER_MODEL` 为 `LlamaTokenizer` 所需文件 `tokenizer.model` 所在的文件夹路径。 +Reward 模型和 Value 模型将以 Reward 模型的权重作初始化。`TOKENIZER_MODEL` 为 `Llama2Tokenizer` 所需文件 `tokenizer.model` 所在的文件夹路径。 ```bash export CHATLEARN=path-to-chatlearn @@ -134,7 +135,7 @@ cd ${CHATLEARN}/examples/megatron/ export model_size=llama2-7B POLICY_LOAD=path-to-sft-ckpt \ -REWARD_LOAD=path-to-trained-rm-checkpoint \ +REWARD_LOAD=path-to-rm-ckpt \ REWARD_LOAD_ITERATION=1000 \ TOKENIZER_MODEL=$LLAMA2_TOKENIZER_MODEL \ bash scripts/train_rlhf_llama.sh @@ -154,7 +155,7 @@ cd ${CHATLEARN}/examples/megatron/ export model_size=llama2-7B POLICY_LOAD=path-to-sft-ckpt \ -REWARD_LOAD=path-to-trained-rm-checkpoint \ +REWARD_LOAD=path-to-rm-ckpt \ REWARD_LOAD_ITERATION=1000 \ TOKENIZER_MODEL=$LLAMA2_TOKENIZER_MODEL \ bash scripts/train_online_dpo_llama.sh @@ -163,7 +164,7 @@ bash scripts/train_online_dpo_llama.sh #### DPO 以下是一个 Llama2-7B 的 Policy模型的训练脚本。 在这个例子中,用户需要设置 `POLICY_LOAD` 为 SFT 产出的 checkpoint 路径,Policy 模型和 Reference 模型将以 SFT 的 checkpoint 初始化。 -`TOKENIZER_MODEL` 为 `LlamaTokenizer` 所需文件 `tokenizer.model` 所在的文件夹路径。 +`TOKENIZER_MODEL` 为 `Llama2Tokenizer` 所需文件 `tokenizer.model` 所在的文件夹路径。 ```bash export CHATLEARN=path-to-chatlearn @@ -193,7 +194,7 @@ cd ${CHATLEARN}/examples/megatron/ export model_size=llama2-7B POLICY_LOAD=path-to-sft-ckpt \ -REWARD_LOAD=path-to-trained-rm-checkpoint \ +REWARD_LOAD=path-to-rm-ckpt \ REWARD_LOAD_ITERATION=1000 \ TOKENIZER_MODEL=$LLAMA2_TOKENIZER_MODEL \ bash scripts/train_grpo_math_llama.sh @@ -205,7 +206,7 @@ bash scripts/train_grpo_math_llama.sh 如果您需要训练 llama2-13B / llama2-70B 的模型,只需要将上述训练脚本中的 `export model_size=llama2-7B` 替换成 `export model_size=llama2-13B` / `export model_size=llama2-70B`。 您也可以根据自己的需求修改模型配置和其他参数。 -在我们的训练脚本里,资源需求 (假设资源为 A100-80GB/A800-80GB/H800-80GB GPU) 如下: +在我们的训练脚本里,资源需求 (假设资源为 A100-80GB/A800-80GB GPU) 如下: 1. llama2-7B RLHF: 8 GPU 2. llama2-13B RLHF: 2*8 GPU 3. llama2-70B RLHF: 4*8 GPU @@ -222,7 +223,7 @@ bash scripts/train_grpo_math_llama.sh export CHATLEARN=path-to-chatlearn export MEGATRON=path-to-megatron-lm -cd $CHATLEARN/examples/megatron/alignment +cd $CHATLEARN/examples/megatron/ LOAD_PATH=path-to-megatron-model \ SAVE_PATH=path-to-hf-model \ diff --git a/examples/megatron/scripts/convert_hf_to_megatron.sh b/examples/megatron/scripts/convert_hf_to_megatron.sh index 83b1a240..f02bacfd 100644 --- a/examples/megatron/scripts/convert_hf_to_megatron.sh +++ b/examples/megatron/scripts/convert_hf_to_megatron.sh @@ -17,7 +17,7 @@ megatron=${MEGATRON} load_dir=${LOAD_PATH} save_dir=${SAVE_PATH} tokenizer_model=${TOKENIZER_MODEL} -model_size=${MODEL_SIZE:-llama2-7B} +model_size=${model_size:-llama2-7B} export CUDA_DEVICE_MAX_CONNECTIONS=1 diff --git a/examples/megatron/scripts/train_reward_llama.sh b/examples/megatron/scripts/train_reward_llama.sh index 472eb18e..bf3fb8e4 100644 --- a/examples/megatron/scripts/train_reward_llama.sh +++ b/examples/megatron/scripts/train_reward_llama.sh @@ -24,23 +24,23 @@ DISTRIBUTED_ARGS="--nproc_per_node ${GPUS_PER_NODE} \ --master_addr ${MASTER_ADDR} \ --master_port ${MASTER_PORT}" -[ -z "$MODEL_SIZE" ] && export MODEL_SIZE=llama2-7B +[ -z "$model_size" ] && export model_size=llama2-7B -if [ $MODEL_SIZE = llama2-7B ]; then +if [ $model_size = llama2-7B ]; then NUM_LAYERS=32 HIDDEN_SIZE=4096 NUM_ATTN_HEADS=32 INTERMEDIATE_SIZE=11008 tp=4 pp=1 -elif [ $MODEL_SIZE = llama2-13B ]; then +elif [ $model_size = llama2-13B ]; then NUM_LAYERS=40 HIDDEN_SIZE=5120 NUM_ATTN_HEADS=40 INTERMEDIATE_SIZE=13824 tp=8 pp=1 -elif [ $MODEL_SIZE = llama2-70B ]; then +elif [ $model_size = llama2-70B ]; then NUM_LAYERS=80 HIDDEN_SIZE=8192 NUM_ATTN_HEADS=64 @@ -66,7 +66,7 @@ NNODES=$WORLD_SIZE dp=$(($WORLD_SIZE * $GPUS_PER_NODE / $tp / $pp)) gbs=$(($gbs * $dp)) -[ -z "$CHECKPOINT_PATH" ] && CHECKPOINT_PATH=${CHATLEARN}/output/reward/reward_hh_$(date +%F)_gpt_${MODEL_SIZE}_${NNODES}w${GPUS_PER_NODE}g_tp${tp}_pp${pp}_mb${mb}_seqlen${seq_len} +[ -z "$CHECKPOINT_PATH" ] && CHECKPOINT_PATH=${CHATLEARN}/output/reward/reward_hh_$(date +%F)_gpt_${model_size}_${NNODES}w${GPUS_PER_NODE}g_tp${tp}_pp${pp}_mb${mb}_seqlen${seq_len} MODEL_ARGS=" diff --git a/examples/megatron/scripts/train_sft_llama.sh b/examples/megatron/scripts/train_sft_llama.sh index 2914d922..97333d6d 100644 --- a/examples/megatron/scripts/train_sft_llama.sh +++ b/examples/megatron/scripts/train_sft_llama.sh @@ -23,23 +23,23 @@ DISTRIBUTED_ARGS="--nproc_per_node ${GPUS_PER_NODE} \ export PYTHONPATH=${PYTHONPATH}:${MEGATRON}:${CHATLEARN}/examples/megatron:${CHATLEARN} -[ -z "$MODEL_SIZE" ] && export MODEL_SIZE=llama2-7B +[ -z "$model_size" ] && export model_size=llama2-7B -if [ $MODEL_SIZE = llama2-7B ]; then +if [ $model_size = llama2-7B ]; then NUM_LAYERS=32 HIDDEN_SIZE=4096 NUM_ATTN_HEADS=32 INTERMEDIATE_SIZE=11008 tp=4 pp=1 -elif [ $MODEL_SIZE = llama2-13B ]; then +elif [ $model_size = llama2-13B ]; then NUM_LAYERS=40 HIDDEN_SIZE=5120 NUM_ATTN_HEADS=40 INTERMEDIATE_SIZE=13824 tp=8 pp=1 -elif [ $MODEL_SIZE = llama2-70B ]; then +elif [ $model_size = llama2-70B ]; then NUM_LAYERS=80 HIDDEN_SIZE=8192 NUM_ATTN_HEADS=64 @@ -67,7 +67,7 @@ dp=$(($WORLD_SIZE * $GPUS_PER_NODE / $tp / $pp)) gbs=$(($gbs * $dp)) -[ -z "$CHECKPOINT_PATH" ] && CHECKPOINT_PATH=${CHATLEARN}/output/sft/hh_sft_$(date +%F)_gpt_${MODEL_SIZE}_${NNODES}w${GPUS_PER_NODE}g_tp${tp}_pp${pp}_mb${mb}_seqlen${seq_len} +[ -z "$CHECKPOINT_PATH" ] && CHECKPOINT_PATH=${CHATLEARN}/output/sft/hh_sft_$(date +%F)_gpt_${model_size}_${NNODES}w${GPUS_PER_NODE}g_tp${tp}_pp${pp}_mb${mb}_seqlen${seq_len} mkdir -p $CHECKPOINT_PATH From 53e98d5141574e06ee673241f11ffc7ec9ccbaca Mon Sep 17 00:00:00 2001 From: "xianyan.xianyanjia" Date: Wed, 28 Aug 2024 18:42:38 +0800 Subject: [PATCH 16/19] refine doc --- docs/en/chatlearn.md | 4 ++-- docs/en/conf.py | 32 +++++++++++++++++--------------- docs/en/index.rst | 17 +---------------- docs/zh/chatlearn.md | 4 +--- docs/zh/conf.py | 30 +++++++++++++++++------------- docs/zh/index.rst | 17 +++-------------- 6 files changed, 41 insertions(+), 63 deletions(-) diff --git a/docs/en/chatlearn.md b/docs/en/chatlearn.md index 548c9997..8803f502 100644 --- a/docs/en/chatlearn.md +++ b/docs/en/chatlearn.md @@ -1,6 +1,6 @@ -# ChatLearn +# ChatLearn: A flexible and efficient training framework for large-scale alignment -ChatLearn is an efficient training framework that supports large-scale alignment. It aims to provide a flexible and user-friendly platform for alignment training based on Large Language Models (LLMs) such as ChatGPT. +ChatLearn aims to provide a flexible and user-friendly platform for alignment training based on Large Language Models (LLMs) such as ChatGPT. ## Introduction diff --git a/docs/en/conf.py b/docs/en/conf.py index 6b0fd5b4..722b1a98 100644 --- a/docs/en/conf.py +++ b/docs/en/conf.py @@ -25,21 +25,23 @@ sys.path.insert(0, os.path.abspath("../../")) from unittest import mock - -# 使用unittest.mock来mock模块 -imports = ['torch', 'cupy.cuda', 'pynvml', 'ray', 'ray.util', 'ray.util.collective.collective_group.nccl_util', - 'ray.util.scheduling_strategies', 'pynvml', 'ray.util.state', 'ray._private', 'ray._private.utils', - 'ray._private.ray_logging', 'ray._private.worker', 'ray.util.collective', 'ray.util.collective.collective_group', - "ray.util.collective.collective_group.base_collective_group", 'ray.util.collective.collective_group.nccl_collective_group', - 'torch.utils.data', 'torch._utils', 'transformers', 'transformers.integrations', 'transformers.trainer', 'deepspeed', - 'deepspeed.ops.adam', 'deepspeed.runtime.zero.partition_parameters', 'torch.distributed', 'torch.nn', 'torch.nn.utils.rnn', 'ray.util.queue', - 'ray.experimental.state.api', 'torch.cuda', 'ray.util.placement_group', "cupy.cuda.nccl", 'tqdm', 'numpy'] - -for key in imports: - sys.modules[key] = mock.MagicMock() - -import chatlearn -from chatlearn.utils import arguments +try: + import chatlearn + from chatlearn.utils import arguments +except ImportError: + imports = ['torch', 'cupy.cuda', 'pynvml', 'ray', 'ray.util', 'ray.util.collective.collective_group.nccl_util', + 'ray.util.scheduling_strategies', 'pynvml', 'ray.util.state', 'ray._private', 'ray._private.utils', + 'ray._private.ray_logging', 'ray._private.worker', 'ray.util.collective', 'ray.util.collective.collective_group', + "ray.util.collective.collective_group.base_collective_group", 'ray.util.collective.collective_group.nccl_collective_group', + 'torch.utils.data', 'torch._utils', 'transformers', 'transformers.integrations', 'transformers.trainer', 'deepspeed', + 'deepspeed.ops.adam', 'deepspeed.runtime.zero.partition_parameters', 'torch.distributed', 'torch.nn', 'torch.nn.utils.rnn', 'ray.util.queue', + 'ray.experimental.state.api', 'torch.cuda', 'ray.util.placement_group', "cupy.cuda.nccl", 'tqdm', 'numpy'] + + for key in imports: + sys.modules[key] = mock.MagicMock() + + import chatlearn + from chatlearn.utils import arguments from importlib.machinery import SourceFileLoader version = SourceFileLoader("chatlearn.version", "../../chatlearn/utils/version.py") \ diff --git a/docs/en/index.rst b/docs/en/index.rst index d75ff869..bf0721a1 100644 --- a/docs/en/index.rst +++ b/docs/en/index.rst @@ -4,12 +4,10 @@ ChatLearn Documentation .. toctree:: :maxdepth: 1 - :caption: ChatLearn: An Efficient Training Framework for Large-Scale Alignment + :caption: Introduction chatlearn -| -| .. toctree:: :maxdepth: 1 @@ -17,9 +15,6 @@ ChatLearn Documentation installation -| -| - .. toctree:: :maxdepth: 1 :caption: Tutorial @@ -34,10 +29,6 @@ ChatLearn Documentation tutorial/ems tutorial/profile -| -| - - .. toctree:: :maxdepth: 1 :caption: Programming @@ -55,14 +46,8 @@ ChatLearn Documentation api/index -| -| - .. toctree:: :maxdepth: 1 :caption: FAQ faq - -| -| \ No newline at end of file diff --git a/docs/zh/chatlearn.md b/docs/zh/chatlearn.md index 3e0bb1f7..ef1af744 100644 --- a/docs/zh/chatlearn.md +++ b/docs/zh/chatlearn.md @@ -1,6 +1,4 @@ -# ChatLearn - -ChatLearn 是一个灵活、易用、高效的大规模 Alignment 训练框架。 +# ChatLearn: 灵活、易用、高效的大规模 Alignmant 训练框架 ## 概述 diff --git a/docs/zh/conf.py b/docs/zh/conf.py index df7be765..99980359 100644 --- a/docs/zh/conf.py +++ b/docs/zh/conf.py @@ -27,19 +27,23 @@ from unittest import mock # 使用unittest.mock来mock模块 -imports = ['torch', 'cupy.cuda', 'pynvml', 'ray', 'ray.util', 'ray.util.collective.collective_group.nccl_util', - 'ray.util.scheduling_strategies', 'pynvml', 'ray.util.state', 'ray._private', 'ray._private.utils', - 'ray._private.ray_logging', 'ray._private.worker', 'ray.util.collective', 'ray.util.collective.collective_group', - "ray.util.collective.collective_group.base_collective_group", 'ray.util.collective.collective_group.nccl_collective_group', - 'torch.utils.data', 'torch._utils', 'transformers', 'transformers.integrations', 'transformers.trainer', 'deepspeed', - 'deepspeed.ops.adam', 'deepspeed.runtime.zero.partition_parameters', 'torch.distributed', 'torch.nn', 'torch.nn.utils.rnn', 'ray.util.queue', - 'ray.experimental.state.api', 'torch.cuda', 'ray.util.placement_group', "cupy.cuda.nccl", 'tqdm', 'numpy'] - -for key in imports: - sys.modules[key] = mock.MagicMock() - -import chatlearn -from chatlearn.utils import arguments +try: + import chatlearn + from chatlearn.utils import arguments +except ImportError: + imports = ['torch', 'cupy.cuda', 'pynvml', 'ray', 'ray.util', 'ray.util.collective.collective_group.nccl_util', + 'ray.util.scheduling_strategies', 'pynvml', 'ray.util.state', 'ray._private', 'ray._private.utils', + 'ray._private.ray_logging', 'ray._private.worker', 'ray.util.collective', 'ray.util.collective.collective_group', + "ray.util.collective.collective_group.base_collective_group", 'ray.util.collective.collective_group.nccl_collective_group', + 'torch.utils.data', 'torch._utils', 'transformers', 'transformers.integrations', 'transformers.trainer', 'deepspeed', + 'deepspeed.ops.adam', 'deepspeed.runtime.zero.partition_parameters', 'torch.distributed', 'torch.nn', 'torch.nn.utils.rnn', 'ray.util.queue', + 'ray.experimental.state.api', 'torch.cuda', 'ray.util.placement_group', "cupy.cuda.nccl", 'tqdm', 'numpy'] + + for key in imports: + sys.modules[key] = mock.MagicMock() + + import chatlearn + from chatlearn.utils import arguments from importlib.machinery import SourceFileLoader version = SourceFileLoader("chatlearn.version", "../../chatlearn/utils/version.py") \ diff --git a/docs/zh/index.rst b/docs/zh/index.rst index b32b8260..a1e2c699 100644 --- a/docs/zh/index.rst +++ b/docs/zh/index.rst @@ -4,12 +4,10 @@ ChatLearn 使用文档 .. toctree:: :maxdepth: 1 - :caption: ChatLearn: 灵活、易用、高效的大规模 Alignmant 训练框架 + :caption: 简介 - chatlearn 简介 + chatlearn -| -| .. toctree:: :maxdepth: 1 @@ -17,8 +15,6 @@ ChatLearn 使用文档 installation -| -| .. toctree:: :maxdepth: 1 @@ -34,8 +30,6 @@ ChatLearn 使用文档 tutorial/ems tutorial/profile -| -| .. toctree:: :maxdepth: 1 @@ -54,14 +48,9 @@ ChatLearn 使用文档 api/index -| -| .. toctree:: :maxdepth: 1 :caption: 常见问题 - faq - -| -| + faq \ No newline at end of file From ab86b677a36c38d6f9fbb3aae7fdb0ba2414f225 Mon Sep 17 00:00:00 2001 From: "xianyan.xianyanjia" Date: Wed, 28 Aug 2024 18:48:57 +0800 Subject: [PATCH 17/19] update --- docs/zh/tutorial/tutorial_llama2.md | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/docs/zh/tutorial/tutorial_llama2.md b/docs/zh/tutorial/tutorial_llama2.md index 3b08669d..0aec7e68 100644 --- a/docs/zh/tutorial/tutorial_llama2.md +++ b/docs/zh/tutorial/tutorial_llama2.md @@ -70,7 +70,7 @@ export model_size=llama2-7B LOAD_PATH=$MEGATRON_LLAMA2_CKPT_PATH \ TOKENIZER_MODEL=$LLAMA2_TOKENIZER_MODEL \ DATASET_PATH=$DATASET_ROOT/sft/ \ -bash scripts/llama2_sft.sh +bash scripts/train_sft_llama.sh ``` 训练 log 和训练完成的模型默认会存放在`${CHATLEARN}/output/sft`中,可以通过 CHECKPOINT_PATH 来指定模型保存路径,具体的定义详见`${CHATLEARN}/examples/megatron/scripts/train_sft_llama.sh`脚本。 From 029f00a309366f03084b54aa23d7352c6f05a328 Mon Sep 17 00:00:00 2001 From: "xianyan.xianyanjia" Date: Wed, 28 Aug 2024 18:55:02 +0800 Subject: [PATCH 18/19] update --- docs/en/tutorial/run.md | 3 --- docs/en/tutorial/tutorial_llama2.md | 1 - docs/zh/tutorial/run.md | 2 -- docs/zh/tutorial/tutorial_llama2.md | 1 - 4 files changed, 7 deletions(-) diff --git a/docs/en/tutorial/run.md b/docs/en/tutorial/run.md index c6f33bc2..ccb9ba5b 100644 --- a/docs/en/tutorial/run.md +++ b/docs/en/tutorial/run.md @@ -15,9 +15,6 @@ Select the job type as `PyTorch` and paste the command into the `Execution Comma -For RLHF, DPO, OnlineDPO, GRPO training task, you need set the advanced setting as `customPortList=30000-30050,createSvcForAllWorkers=true`. - - ## Non-PAI-DLC environment If you want to submit distributed training in a non-PAI-DLC environment, diff --git a/docs/en/tutorial/tutorial_llama2.md b/docs/en/tutorial/tutorial_llama2.md index 3432d58e..766735f9 100644 --- a/docs/en/tutorial/tutorial_llama2.md +++ b/docs/en/tutorial/tutorial_llama2.md @@ -221,7 +221,6 @@ In our training script, the resource requirements (assuming the resources are A1 For the environment variables and configurations required for distributed execution, please refer to [Distributed Execution](run.md). -Note that for RLHF tasks, if you are running on PAI DLC, you need to fill in the advanced configuration `customPortList=30000-30050,createSvcForAllWorkers=true`. ### Evaluation diff --git a/docs/zh/tutorial/run.md b/docs/zh/tutorial/run.md index 05c3da87..776bd917 100644 --- a/docs/zh/tutorial/run.md +++ b/docs/zh/tutorial/run.md @@ -13,8 +13,6 @@ ![image.png](../../images/dlc_2.jpg) -**对于 RLHF/DPO/OnlineDPO/GRPO 训练任务,您需要填写高级配置`customPortList=30000-30050,createSvcForAllWorkers=true`。** - ## 其他环境分布式执行 diff --git a/docs/zh/tutorial/tutorial_llama2.md b/docs/zh/tutorial/tutorial_llama2.md index 0aec7e68..bfb77c0e 100644 --- a/docs/zh/tutorial/tutorial_llama2.md +++ b/docs/zh/tutorial/tutorial_llama2.md @@ -212,7 +212,6 @@ bash scripts/train_grpo_math_llama.sh 3. llama2-70B RLHF: 4*8 GPU 分布式执行所需的环境变量和配置参考 [分布式执行](run.md)。 -**注意对于 RLHF 任务,如果在 PAI DLC 上运行,您需要填写高级配置`customPortList=30000-30050,createSvcForAllWorkers=true`。** ### 效果评估 From 7e82c6309cf12004108a691da1f010af0465468e Mon Sep 17 00:00:00 2001 From: "xianyan.xianyanjia" Date: Wed, 28 Aug 2024 19:06:39 +0800 Subject: [PATCH 19/19] update link --- docs/en/programming/vllm.md | 12 ++++++------ docs/en/tutorial/ems.md | 2 +- docs/zh/programming/vllm.md | 12 ++++++------ docs/zh/tutorial/ems.md | 2 +- 4 files changed, 14 insertions(+), 14 deletions(-) diff --git a/docs/en/programming/vllm.md b/docs/en/programming/vllm.md index 20aa941d..bf1285ea 100644 --- a/docs/en/programming/vllm.md +++ b/docs/en/programming/vllm.md @@ -6,7 +6,7 @@ For now, we enable vLLM to accelerate policy generation. ## Model Definition -Similar to inheriting `MegatronModule` for implementing [PolicyInference Model](../../../examples/megatron/models/old_policy_inference.py), the vLLM backend can be enabled by inheriting `VLLMModule` class and implementing the following key modules: +Similar to inheriting `MegatronModule` for implementing [PolicyInference Model](https://github.com/alibaba/ChatLearn/blob/main/examples/megatron/models/old_policy_inference.py), the vLLM backend can be enabled by inheriting `VLLMModule` class and implementing the following key modules: - model_provider: model definition function. - setup: call model_provider to define model. Optionly, call `load_checkpoint` or others. - build_dataset: Preprocess train/eval dataset with vLLM tokenizer. @@ -48,9 +48,9 @@ class VLLMPolicyInference(VLLMModule): pass ``` -You can refer to[vllm_policy_inference.py](../../../examples/megatron/models/vllm_policy_inference.py), in which build_dataset/_add_request/forward_step/decode_internal clarified as following: +You can refer to[vllm_policy_inference.py](https://github.com/alibaba/ChatLearn/blob/main/examples/megatron/models/vllm_policy_inference.py), in which build_dataset/_add_request/forward_step/decode_internal clarified as following: -- build_dataset: Use `tokenizer`, you only need to return prompt_ids and prompt string. In `build_dataset`, [VLLMPromptPipeline](../../../examples/megatron/data/prompt_dataset.py#141) shows as following: +- build_dataset: Use `tokenizer`, you only need to return prompt_ids and prompt string. In `build_dataset`, [VLLMPromptPipeline](https://github.com/alibaba/ChatLearn/blob/main/examples/megatron/data/prompt_dataset.py#141) shows as following: ```python class VLLMPromptPipeline(PromptPipeline): def __init__(self, prompts: List[str], max_prompt_length: int, tokenizer=None): @@ -108,7 +108,7 @@ class VLLMPolicyInference(VLLMModule): return self._forward_step(data, iteration, eval_mode=False) ``` -- decode_internal: Refer to [examples](../../../examples/megatron/models/vllm_policy_inference.py#L119) for more details. Format of param `batched_outputs` is List[RequestOutput], in which [RequestOutput](https://github.com/vllm-project/vllm/blob/v0.5.1/vllm/outputs.py#L67)includes the following key attributes: +- decode_internal: Refer to [examples](https://github.com/alibaba/ChatLearn/blob/main/examples/megatron/models/vllm_policy_inference.py#L119) for more details. Format of param `batched_outputs` is List[RequestOutput], in which [RequestOutput](https://github.com/vllm-project/vllm/blob/v0.5.1/vllm/outputs.py#L67)includes the following key attributes: | Attibute |Type| Comment | |:------:|:-----:|:-----:| @@ -140,7 +140,7 @@ policy: ... ``` -Or you can refer to [llama2 model yaml](../../../examples/megatron/configs/llama2/vllm_rlhf.yaml). +Or you can refer to [llama2 model yaml](https://github.com/alibaba/ChatLearn/blob/main/examples/megatron/configs/llama2/vllm_rlhf.yaml). ## hyperparameter configuration yaml @@ -186,4 +186,4 @@ Hyperparameter for vLLM can be divied into 5 parts: - Others: `includes` specifies model structure. -You can refer to [vLLM Hyperparameter Configuration](../../../examples/megatron/configs/llama2/vllm_policy_inference.yaml) for details. +You can refer to [vLLM Hyperparameter Configuration](https://github.com/alibaba/ChatLearn/blob/main/examples/megatron/configs/llama2/vllm_policy_inference.yaml) for details. diff --git a/docs/en/tutorial/ems.md b/docs/en/tutorial/ems.md index 7deeff21..da7b8d9c 100644 --- a/docs/en/tutorial/ems.md +++ b/docs/en/tutorial/ems.md @@ -26,4 +26,4 @@ Alternatively, it can also be configured in the training script using environmen - PPO policy model: `export free_memory_ppo_policy=True` - PPO value model: `export free_memory_ppo_value=True` -A complete example can be found in the [llama2 configuration](../../../examples/megatron/configs/llama2/rlhf.yaml). \ No newline at end of file +A complete example can be found in the [llama2 configuration](https://github.com/alibaba/ChatLearn/blob/main/examples/megatron/configs/llama2/rlhf.yaml). \ No newline at end of file diff --git a/docs/zh/programming/vllm.md b/docs/zh/programming/vllm.md index 22548ee0..b2fec2b2 100644 --- a/docs/zh/programming/vllm.md +++ b/docs/zh/programming/vllm.md @@ -6,7 +6,7 @@ ChatLearn中支持vLLM进行跨机分布式推理,支持vllm和training backen ## 模型定义 -类似于继承`MegatronModule`实现[PolicyInference模型](../../../examples/megatron/models/old_policy_inference.py),PolicyInference模型若想基于vLLM后端完成generation,需要继承`VLLMModule`父类,实现以下关键模块: +类似于继承`MegatronModule`实现[PolicyInference模型](https://github.com/alibaba/ChatLearn/blob/main/examples/megatron/models/old_policy_inference.py),PolicyInference模型若想基于vLLM后端完成generation,需要继承`VLLMModule`父类,实现以下关键模块: - model_provider:模型定义函数。 - setup:调用model_provider定义模型,可根据需要决定是否load_checkpoint等。 - build_dataset:调用vLLM tokenizer处理数据,生成prompt dataset。 @@ -48,9 +48,9 @@ class VLLMPolicyInference(VLLMModule): pass ``` -示例可参考[vllm_policy_inference.py](../../../examples/megatron/models/vllm_policy_inference.py),补充说明build_dataset、_add_request、forward_step、decode_internal如下: +示例可参考[vllm_policy_inference.py](https://github.com/alibaba/ChatLearn/blob/main/examples/megatron/models/vllm_policy_inference.py),补充说明build_dataset、_add_request、forward_step、decode_internal如下: -- build_dataset:调用tokenizer处理只需要返回prompt_ids、prompt str,其中build_dataset的[VLLMPromptPipeline](../../../examples/megatron/data/prompt_dataset.py#141)具体逻辑如下: +- build_dataset:调用tokenizer处理只需要返回prompt_ids、prompt str,其中build_dataset的[VLLMPromptPipeline](https://github.com/alibaba/ChatLearn/blob/main/examples/megatron/data/prompt_dataset.py#141)具体逻辑如下: ```python class VLLMPromptPipeline(PromptPipeline): def __init__(self, prompts: List[str], max_prompt_length: int, tokenizer=None): @@ -108,7 +108,7 @@ class VLLMPolicyInference(VLLMModule): return self._forward_step(data, iteration, eval_mode=False) ``` -- decode_internal:可参考[examples](../../../examples/megatron/models/vllm_policy_inference.py#L119)实现。参数batched_outputs格式为List[RequestOutput],其中[RequestOutput](https://github.com/vllm-project/vllm/blob/v0.5.1/vllm/outputs.py#L67)包含以下重要attributes: +- decode_internal:可参考[examples](https://github.com/alibaba/ChatLearn/blob/main/examples/megatron/models/vllm_policy_inference.py#L119)实现。参数batched_outputs格式为List[RequestOutput],其中[RequestOutput](https://github.com/vllm-project/vllm/blob/v0.5.1/vllm/outputs.py#L67)包含以下重要attributes: | 属性 |类型| 含义 | |:------:|:-----:|:-----:| @@ -138,7 +138,7 @@ policy: model_config_file: vllm_policy_inference.yaml ... ``` -也可以参考示例 [llama2模型配置](../../../examples/megatron/configs/llama2/vllm_rlhf.yaml)。 +也可以参考示例 [llama2模型配置](https://github.com/alibaba/ChatLearn/blob/main/examples/megatron/configs/llama2/vllm_rlhf.yaml)。 ## 超参配置 @@ -182,4 +182,4 @@ vLLM超参可分为五部分: - tokenizer:vLLM tokenizer读取目录,可参考[LLama2-7B-hf](https://huggingface.co/meta-llama/Llama-2-7b) - 其他:includes指定模型结构等其余参数; -可以参考 [vLLM超参配置](../../../examples/megatron/configs/llama2/vllm_policy_inference.yaml)。 +可以参考 [vLLM超参配置](https://github.com/alibaba/ChatLearn/blob/main/examples/megatron/configs/llama2/vllm_policy_inference.yaml)。 diff --git a/docs/zh/tutorial/ems.md b/docs/zh/tutorial/ems.md index dab68054..4cd552b8 100644 --- a/docs/zh/tutorial/ems.md +++ b/docs/zh/tutorial/ems.md @@ -29,4 +29,4 @@ policy: - ppo_policy 模型:`export free_memory_ppo_policy=True` - ppo_value 模型:`export free_memory_ppo_value=True` -完整示例可以参考 [llama2 配置](../../../examples/megatron/configs/llama2/rlhf.yaml)。 +完整示例可以参考 [llama2 配置](https://github.com/alibaba/ChatLearn/blob/main/examples/megatron/configs/llama2/rlhf.yaml)。