From abd21fc99f1d35e2081e4c01dc09c71a86bf3c5a Mon Sep 17 00:00:00 2001 From: John Smith <67539080+kingsidelee@users.noreply.github.com> Date: Wed, 25 Oct 2023 01:48:45 +0800 Subject: [PATCH 001/206] cmake : add missed dependencies (#3763) --- examples/main-cmake-pkg/CMakeLists.txt | 2 ++ 1 file changed, 2 insertions(+) diff --git a/examples/main-cmake-pkg/CMakeLists.txt b/examples/main-cmake-pkg/CMakeLists.txt index 90813188460e08..cb00edbbbe3749 100644 --- a/examples/main-cmake-pkg/CMakeLists.txt +++ b/examples/main-cmake-pkg/CMakeLists.txt @@ -16,6 +16,8 @@ add_library(common OBJECT ${_common_path}/console.cpp ${_common_path}/grammar-parser.h ${_common_path}/grammar-parser.cpp + ${_common_path}/sampling.h + ${_common_path}/sampling.cpp ) # WARNING: because build-info.h is auto-generated, it will only From b2f7e04bd312eaf97eee0523aa09d950d585626b Mon Sep 17 00:00:00 2001 From: Georgi Gerganov Date: Tue, 24 Oct 2023 21:51:20 +0300 Subject: [PATCH 002/206] sync : ggml (conv ops + cuda MSVC fixes) (#3765) ggml-ci --- ggml-cuda.cu | 10 +- ggml.c | 442 +++++++++++++++++++++++++++++++++++++++++---------- ggml.h | 15 +- 3 files changed, 371 insertions(+), 96 deletions(-) diff --git a/ggml-cuda.cu b/ggml-cuda.cu index db053e3b8a9d81..d1e874b6c778af 100644 --- a/ggml-cuda.cu +++ b/ggml-cuda.cu @@ -5664,10 +5664,10 @@ void ggml_init_cublas() { GGML_ASSERT(g_device_count <= GGML_CUDA_MAX_DEVICES); int64_t total_vram = 0; fprintf(stderr, "%s: found %d " GGML_CUDA_NAME " devices:\n", __func__, g_device_count); - for (int64_t id = 0; id < g_device_count; ++id) { + for (int id = 0; id < g_device_count; ++id) { cudaDeviceProp prop; CUDA_CHECK(cudaGetDeviceProperties(&prop, id)); - fprintf(stderr, " Device %ld: %s, compute capability %d.%d\n", id, prop.name, prop.major, prop.minor); + fprintf(stderr, " Device %d: %s, compute capability %d.%d\n", id, prop.name, prop.major, prop.minor); g_tensor_split[id] = total_vram; total_vram += prop.totalGlobalMem; @@ -5677,15 +5677,15 @@ void ggml_init_cublas() { g_compute_capabilities[id] = 100*prop.major + 10*prop.minor; #endif // defined(GGML_USE_HIPBLAS) && defined(__HIP_PLATFORM_AMD__) } - for (int64_t id = 0; id < g_device_count; ++id) { + for (int id = 0; id < g_device_count; ++id) { g_tensor_split[id] /= total_vram; } - for (int64_t id = 0; id < g_device_count; ++id) { + for (int id = 0; id < g_device_count; ++id) { CUDA_CHECK(ggml_cuda_set_device(id)); // create cuda streams - for (int64_t is = 0; is < MAX_STREAMS; ++is) { + for (int is = 0; is < MAX_STREAMS; ++is) { CUDA_CHECK(cudaStreamCreateWithFlags(&g_cudaStreams[id][is], cudaStreamNonBlocking)); } diff --git a/ggml.c b/ggml.c index 17f0ce48775923..6f66bab051cea4 100644 --- a/ggml.c +++ b/ggml.c @@ -571,7 +571,6 @@ int64_t ggml_cycles_per_ms(void) { #define ggml_perf_cycles_per_ms() 0 #endif - // // cache line // @@ -1828,7 +1827,6 @@ ggml_type_traits_t ggml_internal_get_type_traits(enum ggml_type type) { return type_traits[type]; } - // // simd mappings // @@ -4057,16 +4055,17 @@ static const char * GGML_OP_NAME[GGML_OP_COUNT] = { "ALIBI", "CLAMP", "CONV_1D", + "CONV_1D_STAGE_0", + "CONV_1D_STAGE_1", "CONV_TRANSPOSE_1D", "CONV_2D", + "CONV_2D_STAGE_0", + "CONV_2D_STAGE_1", "CONV_TRANSPOSE_2D", "POOL_1D", "POOL_2D", "UPSCALE", - "CONV_1D_STAGE_0", - "CONV_1D_STAGE_1", - "FLASH_ATTN", "FLASH_FF", "FLASH_ATTN_BACK", @@ -4092,7 +4091,7 @@ static const char * GGML_OP_NAME[GGML_OP_COUNT] = { "CROSS_ENTROPY_LOSS_BACK", }; -static_assert(GGML_OP_COUNT == 71, "GGML_OP_COUNT != 71"); +static_assert(GGML_OP_COUNT == 73, "GGML_OP_COUNT != 73"); static const char * GGML_OP_SYMBOL[GGML_OP_COUNT] = { "none", @@ -4143,16 +4142,17 @@ static const char * GGML_OP_SYMBOL[GGML_OP_COUNT] = { "alibi(x)", "clamp(x)", "conv_1d(x)", + "conv_1d_stage_0(x)", + "conv_1d_stage_1(x)", "conv_transpose_1d(x)", "conv_2d(x)", + "conv_2d_stage_0(x)", + "conv_2d_stage_1(x)", "conv_transpose_2d(x)", "pool_1d(x)", "pool_2d(x)", "upscale(x)", - "conv_1d_stage_0(x)", - "conv_1d_stage_1(x)", - "flash_attn(x)", "flash_ff(x)", "flash_attn_back(x)", @@ -4178,7 +4178,7 @@ static const char * GGML_OP_SYMBOL[GGML_OP_COUNT] = { "cross_entropy_loss_back(x,y)", }; -static_assert(GGML_OP_COUNT == 71, "GGML_OP_COUNT != 71"); +static_assert(GGML_OP_COUNT == 73, "GGML_OP_COUNT != 73"); static_assert(GGML_OP_POOL_COUNT == 2, "GGML_OP_POOL_COUNT != 2"); @@ -4209,8 +4209,10 @@ static void ggml_setup_op_has_task_pass(void) { p[GGML_OP_CONV_1D ] = true; p[GGML_OP_CONV_1D_STAGE_0 ] = true; p[GGML_OP_CONV_1D_STAGE_1 ] = true; - p[GGML_OP_CONV_2D ] = true; p[GGML_OP_CONV_TRANSPOSE_1D ] = true; + p[GGML_OP_CONV_2D ] = true; + p[GGML_OP_CONV_2D_STAGE_0 ] = true; + p[GGML_OP_CONV_2D_STAGE_1 ] = true; p[GGML_OP_CONV_TRANSPOSE_2D ] = true; p[GGML_OP_FLASH_ATTN_BACK ] = true; p[GGML_OP_CROSS_ENTROPY_LOSS ] = true; @@ -5954,7 +5956,6 @@ struct ggml_tensor * ggml_sqrt_inplace( return ggml_sqrt_impl(ctx, a, true); } - // ggml_log static struct ggml_tensor * ggml_log_impl( @@ -6008,7 +6009,6 @@ struct ggml_tensor * ggml_sum( return result; } - // ggml_sum_rows struct ggml_tensor * ggml_sum_rows( @@ -6640,7 +6640,6 @@ struct ggml_tensor * ggml_set_2d_inplace( return ggml_set_impl(ctx, a, b, nb1, a->nb[2], a->nb[3], offset, false); } - // ggml_cpy static struct ggml_tensor * ggml_cpy_impl( @@ -6720,7 +6719,6 @@ struct ggml_tensor * ggml_cont_inplace( return ggml_cont_impl(ctx, a, true); } - // make contiguous, with new shape GGML_API struct ggml_tensor * ggml_cont_1d( struct ggml_context * ctx, @@ -7173,7 +7171,6 @@ struct ggml_tensor * ggml_diag( return result; } - // ggml_diag_mask_inf static struct ggml_tensor * ggml_diag_mask_inf_impl( @@ -7285,7 +7282,6 @@ struct ggml_tensor * ggml_soft_max_inplace( return ggml_soft_max_impl(ctx, a, true); } - // ggml_soft_max_back static struct ggml_tensor * ggml_soft_max_back_impl( @@ -7702,7 +7698,11 @@ GGML_API struct ggml_tensor * ggml_conv_transpose_1d( // ggml_conv_2d -struct ggml_tensor * ggml_conv_2d( +// im2col: [N, IC, IH, IW] => [N, OH, OW, IC*KH*KW] +// a: [OC,IC, KH, KW] +// b: [N, IC, IH, IW] +// result: [N, OH, OW, IC*KH*KW] +static struct ggml_tensor * ggml_conv_2d_stage_0( struct ggml_context * ctx, struct ggml_tensor * a, struct ggml_tensor * b, @@ -7721,17 +7721,21 @@ struct ggml_tensor * ggml_conv_2d( is_node = true; } + const int64_t OH = ggml_calc_conv_output_size(b->ne[1], a->ne[1], s1, p1, d1); + const int64_t OW = ggml_calc_conv_output_size(b->ne[0], a->ne[0], s0, p0, d0); + const int64_t ne[4] = { - ggml_calc_conv_output_size(b->ne[0], a->ne[0], s0, p0, d0), - ggml_calc_conv_output_size(b->ne[1], a->ne[1], s1, p1, d1), - a->ne[3], b->ne[3], + a->ne[2] * a->ne[1] * a->ne[0], + OW, + OH, + b->ne[3], }; - struct ggml_tensor * result = ggml_new_tensor(ctx, GGML_TYPE_F32, 4, ne); + struct ggml_tensor * result = ggml_new_tensor(ctx, GGML_TYPE_F16, 4, ne); int32_t params[] = { s0, s1, p0, p1, d0, d1 }; ggml_set_op_params(result, params, sizeof(params)); - result->op = GGML_OP_CONV_2D; + result->op = GGML_OP_CONV_2D_STAGE_0; result->grad = is_node ? ggml_dup_tensor(ctx, result) : NULL; result->src[0] = a; result->src[1] = b; @@ -7740,8 +7744,61 @@ struct ggml_tensor * ggml_conv_2d( } -// ggml_conv_2d_sk_p0 +// gemm: [N, OC, OH, OW] = [OC, IC * KH * KW] x [N*OH*OW, IC * KH * KW] +// a: [OC, IC, KH, KW] +// b: [N, OH, OW, IC * KH * KW] +// result: [N, OC, OH, OW] +static struct ggml_tensor * ggml_conv_2d_stage_1( + struct ggml_context * ctx, + struct ggml_tensor * a, + struct ggml_tensor * b) { + + bool is_node = false; + if (a->grad || b->grad) { + GGML_ASSERT(false); // TODO: implement backward + is_node = true; + } + + const int64_t ne[4] = { + b->ne[1], + b->ne[2], + a->ne[3], + b->ne[3], + }; + struct ggml_tensor * result = ggml_new_tensor(ctx, GGML_TYPE_F32, 4, ne); + + result->op = GGML_OP_CONV_2D_STAGE_1; + result->grad = is_node ? ggml_dup_tensor(ctx, result) : NULL; + result->src[0] = a; + result->src[1] = b; + + return result; + +} + +// a: [OC,IC, KH, KW] +// b: [N, IC, IH, IW] +// result: [N, OC, OH, OW] +struct ggml_tensor * ggml_conv_2d( + struct ggml_context * ctx, + struct ggml_tensor * a, + struct ggml_tensor * b, + int s0, + int s1, + int p0, + int p1, + int d0, + int d1) { + + struct ggml_tensor * result = ggml_conv_2d_stage_0(ctx, a, b, s0, s1, p0, p1, d0, d1); // [N, OH, OW, IC * KH * KW] + result = ggml_conv_2d_stage_1(ctx, a, result); + + return result; + +} + +// ggml_conv_2d_sk_p0 struct ggml_tensor * ggml_conv_2d_sk_p0( struct ggml_context * ctx, struct ggml_tensor * a, @@ -8180,7 +8237,6 @@ static struct ggml_tensor * ggml_add_rel_pos_impl( return result; } - struct ggml_tensor * ggml_add_rel_pos( struct ggml_context * ctx, struct ggml_tensor * a, @@ -8625,8 +8681,6 @@ struct ggml_tensor * ggml_map_custom3_inplace( return ggml_map_custom3_impl(ctx, a, b, c, fun, n_tasks, userdata, true); } - - // ggml_cross_entropy_loss struct ggml_tensor * ggml_cross_entropy_loss( @@ -9828,7 +9882,6 @@ static void ggml_compute_forward_add1( } } - // ggml_compute_forward_acc static void ggml_compute_forward_acc_f32( @@ -9968,7 +10021,6 @@ static void ggml_compute_forward_sub_f32( const int i2 = (ir - i3*ne2*ne1)/ne1; const int i1 = (ir - i3*ne2*ne1 - i2*ne1); - #ifdef GGML_USE_ACCELERATE vDSP_vsub( (float *) ((char *) src1->data + i3*nb13 + i2*nb12 + i1*nb11), 1, @@ -10149,7 +10201,6 @@ static void ggml_compute_forward_div_f32( const int i2 = (ir - i3*ne2*ne1)/ne1; const int i1 = (ir - i3*ne2*ne1 - i2*ne1); - #ifdef GGML_USE_ACCELERATE UNUSED(ggml_vec_div_f32); @@ -10287,7 +10338,6 @@ static void ggml_compute_forward_sqrt( } } - // ggml_compute_forward_log static void ggml_compute_forward_log_f32( @@ -12120,7 +12170,6 @@ static void ggml_compute_forward_out_prod_f32( } } - //int64_t t1 = ggml_perf_time_us(); //static int64_t acc = 0; //acc += t1 - t0; @@ -12316,7 +12365,6 @@ static void ggml_compute_forward_scale_f32( const size_t nb1 = dst->nb[1]; - for (int i1 = ir0; i1 < ir1; i1++) { if (dst->data != src0->data) { // src0 is same shape as dst => same indices @@ -12714,7 +12762,6 @@ static void ggml_compute_forward_get_rows_back_f32( } } - static void ggml_compute_forward_get_rows_back( const struct ggml_compute_params * params, const struct ggml_tensor * src0, @@ -13997,6 +14044,7 @@ static void ggml_compute_forward_conv_1d_f32( } } +// TODO: reuse ggml_mul_mat or implement ggml_im2col and remove stage_0 and stage_1 static void gemm_f16_out_f32(int64_t m, int64_t n, int64_t k, ggml_fp16_t * A, ggml_fp16_t * B, @@ -14298,6 +14346,9 @@ static void ggml_compute_forward_conv_transpose_1d_f16_f32( } } + // need to zero dst since we are accumulating into it + memset(dst->data, 0, ggml_nbytes(dst)); + return; } @@ -14370,7 +14421,7 @@ static void ggml_compute_forward_conv_transpose_1d_f32( const float * const src = (float *)((char *) src0->data + i02*nb02 + i01*nb01); float * dst_data = wdata + i01*ne00*ne02; for (int64_t i00 = 0; i00 < ne00; i00++) { - dst_data[i01*ne00*ne02 + i00*ne02 + i02] = src[i00]; + dst_data[i00*ne02 + i02] = src[i00]; } } } @@ -14389,6 +14440,9 @@ static void ggml_compute_forward_conv_transpose_1d_f32( } } + // need to zero dst since we are accumulating into it + memset(dst->data, 0, ggml_nbytes(dst)); + return; } @@ -14450,28 +14504,190 @@ static void ggml_compute_forward_conv_transpose_1d( // ggml_compute_forward_conv_2d -static void ggml_compute_forward_conv_2d_f16_f32( +// src0: kernel [OC, IC, KH, KW] +// src1: image [N, IC, IH, IW] +// dst: result [N, OH, OW, IC*KH*KW] +static void ggml_compute_forward_conv_2d_stage_0_f32( const struct ggml_compute_params * params, const struct ggml_tensor * src0, const struct ggml_tensor * src1, struct ggml_tensor * dst) { GGML_ASSERT(src0->type == GGML_TYPE_F16); GGML_ASSERT(src1->type == GGML_TYPE_F32); + GGML_ASSERT( dst->type == GGML_TYPE_F16); + + int64_t t0 = ggml_perf_time_us(); + UNUSED(t0); + + GGML_TENSOR_BINARY_OP_LOCALS; + + const int64_t N = ne13; + const int64_t IC = ne12; + const int64_t IH = ne11; + const int64_t IW = ne10; + + // const int64_t OC = ne03; + // const int64_t IC = ne02; + const int64_t KH = ne01; + const int64_t KW = ne00; + + const int64_t OH = ne2; + const int64_t OW = ne1; + + const int ith = params->ith; + const int nth = params->nth; + + const int32_t s0 = ((const int32_t*)(dst->op_params))[0]; + const int32_t s1 = ((const int32_t*)(dst->op_params))[1]; + const int32_t p0 = ((const int32_t*)(dst->op_params))[2]; + const int32_t p1 = ((const int32_t*)(dst->op_params))[3]; + const int32_t d0 = ((const int32_t*)(dst->op_params))[4]; + const int32_t d1 = ((const int32_t*)(dst->op_params))[5]; + + GGML_ASSERT(nb00 == sizeof(ggml_fp16_t)); + GGML_ASSERT(nb10 == sizeof(float)); + + if (params->type == GGML_TASK_INIT) { + memset(dst->data, 0, ggml_nbytes(dst)); + return; + } + + if (params->type == GGML_TASK_FINALIZE) { + return; + } + + // im2col: [N, IC, IH, IW] => [N, OH, OW, IC*KH*KW] + { + ggml_fp16_t * const wdata = (ggml_fp16_t *) dst->data; + + for (int64_t in = 0; in < N; in++) { + for (int64_t ioh = 0; ioh < OH; ioh++) { + for (int64_t iow = 0; iow < OW; iow++) { + for (int64_t iic = ith; iic < IC; iic+=nth) { + + // micro kernel + ggml_fp16_t * dst_data = wdata + (in*OH*OW + ioh*OW + iow)*(IC*KH*KW); // [IC, KH, KW] + const float * const src_data = (float *)((char *) src1->data + in*nb13 + iic*nb12); // [IH, IW] + + for (int64_t ikh = 0; ikh < KH; ikh++) { + for (int64_t ikw = 0; ikw < KW; ikw++) { + const int64_t iiw = iow*s0 + ikw*d0 - p0; + const int64_t iih = ioh*s1 + ikh*d1 - p1; + + if (!(iih < 0 || iih >= IH || iiw < 0 || iiw >= IW)) { + dst_data[iic*(KH*KW) + ikh*KW + ikw] = GGML_FP32_TO_FP16(src_data[iih*IW + iiw]); + } + } + } + } + } + } + } + } +} + +// gemm: [N, OC, OH, OW] = [OC, IC * KH * KW] x [N*OH*OW, IC * KH * KW] +// src0: [OC, IC, KH, KW] +// src1: [N, OH, OW, IC * KH * KW] +// result: [N, OC, OH, OW] +static void ggml_compute_forward_conv_2d_stage_1_f16( + const struct ggml_compute_params * params, + const struct ggml_tensor * src0, + const struct ggml_tensor * src1, + struct ggml_tensor * dst) { + GGML_ASSERT(src0->type == GGML_TYPE_F16); + GGML_ASSERT(src1->type == GGML_TYPE_F16); GGML_ASSERT( dst->type == GGML_TYPE_F32); int64_t t0 = ggml_perf_time_us(); UNUSED(t0); + if (params->type == GGML_TASK_INIT) { + return; + } + + if (params->type == GGML_TASK_FINALIZE) { + return; + } + GGML_TENSOR_BINARY_OP_LOCALS; + GGML_ASSERT(nb00 == sizeof(ggml_fp16_t)); + GGML_ASSERT(nb10 == sizeof(ggml_fp16_t)); + GGML_ASSERT(nb0 == sizeof(float)); + + const int N = ne13; + const int OH = ne12; + const int OW = ne11; + + const int OC = ne03; + const int IC = ne02; + const int KH = ne01; + const int KW = ne00; + const int ith = params->ith; const int nth = params->nth; - const int nk0 = ne00; - const int nk1 = ne01; + int64_t m = OC; + int64_t n = OH * OW; + int64_t k = IC * KH * KW; + + // [N, OC, OH, OW] = [OC, IC * KH * KW] x [N*OH*OW, IC * KH * KW] + for (int i = 0; i < N; i++) { + ggml_fp16_t * A = (ggml_fp16_t *)src0->data; // [m, k] + ggml_fp16_t * B = (ggml_fp16_t *)src1->data + i * m * k; // [n, k] + float * C = (float *)dst->data + i * m * n; // [m, n] + + gemm_f16_out_f32(m, n, k, A, B, C, ith, nth); + } +} + +static void ggml_compute_forward_conv_2d_f16_f32( + const struct ggml_compute_params * params, + const struct ggml_tensor * src0, + const struct ggml_tensor * src1, + struct ggml_tensor * dst) { + GGML_ASSERT(src0->type == GGML_TYPE_F16); + GGML_ASSERT(src1->type == GGML_TYPE_F32); + GGML_ASSERT( dst->type == GGML_TYPE_F32); + + int64_t t0 = ggml_perf_time_us(); + UNUSED(t0); + + GGML_TENSOR_BINARY_OP_LOCALS + + // src1: image [N, IC, IH, IW] + // src0: kernel [OC, IC, KH, KW] + // dst: result [N, OC, OH, OW] + // ne12: IC + // ne0: OW + // ne1: OH + // nk0: KW + // nk1: KH + // ne13: N + + const int N = ne13; + const int IC = ne12; + const int IH = ne11; + const int IW = ne10; + + const int OC = ne03; + // const int IC = ne02; + const int KH = ne01; + const int KW = ne00; + + const int OH = ne1; + const int OW = ne0; + + const int ith = params->ith; + const int nth = params->nth; + + // const int nk0 = ne00; + // const int nk1 = ne01; // size of the convolution row - the kernel size unrolled across all channels - const int ew0 = nk0*nk1*ne02; + // const int ew0 = nk0*nk1*ne02; + // ew0: IC*KH*KW const int32_t s0 = ((const int32_t*)(dst->op_params))[0]; const int32_t s1 = ((const int32_t*)(dst->op_params))[1]; @@ -14487,24 +14703,27 @@ static void ggml_compute_forward_conv_2d_f16_f32( memset(params->wdata, 0, params->wsize); // prepare source data (src1) + // im2col: [N, IC, IH, IW] => [N*OH*OW, IC*KH*KW] + { ggml_fp16_t * const wdata = (ggml_fp16_t *) params->wdata + 0; - for (int i13 = 0; i13 < ne13; i13++) { - for (int i12 = 0; i12 < ne12; i12++) { - const float * const src = (float *)((char *) src1->data + i13*nb13 + i12*nb12); - ggml_fp16_t * dst_data = wdata + i13*(ne1*ne0*ew0); - - for (int i1 = 0; i1 < ne1; i1++) { - for (int i0 = 0; i0 < ne0; i0++) { - for (int ik1 = 0; ik1 < nk1; ik1++) { - for (int ik0 = 0; ik0 < nk0; ik0++) { - const int idx0 = i0*s0 + ik0*d0 - p0; - const int idx1 = i1*s1 + ik1*d1 - p1; - - if (!(idx1 < 0 || idx1 >= ne11 || idx0 < 0 || idx0 >= ne10)) { - dst_data[(i1*ne0 + i0)*ew0 + i12*(nk0*nk1) + ik1*nk0 + ik0] = - GGML_FP32_TO_FP16(src[idx1*ne10 + idx0]); + for (int in = 0; in < N; in++) { + for (int iic = 0; iic < IC; iic++) { + for (int ioh = 0; ioh < OH; ioh++) { + for (int iow = 0; iow < OW; iow++) { + + // micro kernel + ggml_fp16_t * dst_data = wdata + (in*OH*OW + ioh*OW + iow)*(IC*KH*KW); // [IC, KH, KW] + const float * const src_data = (float *)((char *) src1->data + in*nb13 + iic*nb12); // [IH, IW] + + for (int ikh = 0; ikh < KH; ikh++) { + for (int ikw = 0; ikw < KW; ikw++) { + const int iiw = iow*s0 + ikw*d0 - p0; + const int iih = ioh*s1 + ikh*d1 - p1; + + if (!(iih < 0 || iih >= IH || iiw < 0 || iiw >= IW)) { + dst_data[iic*(KH*KW) + ikh*KW + ikw] = GGML_FP32_TO_FP16(src_data[iih*IW + iiw]); } } } @@ -14521,30 +14740,22 @@ static void ggml_compute_forward_conv_2d_f16_f32( return; } - // total patches in dst - const int np = ne2; - - // patches per thread - const int dp = (np + nth - 1)/nth; + ggml_fp16_t * const wdata = (ggml_fp16_t *) params->wdata + 0; + // wdata: [N*OH*OW, IC*KH*KW] + // dst: result [N, OC, OH, OW] + // src0: kernel [OC, IC, KH, KW] - // patch range for this thread - const int ip0 = dp*ith; - const int ip1 = MIN(ip0 + dp, np); + int64_t m = OC; + int64_t n = OH * OW; + int64_t k = IC * KH * KW; - ggml_fp16_t * const wdata = (ggml_fp16_t *) params->wdata + 0; + // [N, OC, OH, OW] = [OC, IC * KH * KW] x [N*OH*OW, IC * KH * KW] + for (int i = 0; i < N; i++) { + ggml_fp16_t * A = (ggml_fp16_t *)src0->data; // [m, k] + ggml_fp16_t * B = (ggml_fp16_t *)wdata + i * m * k; // [n, k] + float * C = (float *)dst->data + i * m * n; // [m * k] - for (int i3 = 0; i3 < ne3; i3++) { - for (int i2 = ip0; i2 < ip1; i2++) { - float * dst_data = (float *)((char *) dst->data + i3*nb3 + i2*nb2); - - for (int i1 = 0; i1 < ne1; ++i1) { - for (int i0 = 0; i0 < ne0; ++i0) { - ggml_vec_dot_f16(ew0, dst_data + i1*ne0 + i0, - (ggml_fp16_t *) ((char *) src0->data + i2*nb03), - (ggml_fp16_t *) wdata + i3*nb3 + (i1*ne0 + i0)*ew0); - } - } - } + gemm_f16_out_f32(m, n, k, A, B, C, ith, nth); } } @@ -14570,6 +14781,48 @@ static void ggml_compute_forward_conv_2d( } } +static void ggml_compute_forward_conv_2d_stage_0( + const struct ggml_compute_params * params, + const struct ggml_tensor * src0, + const struct ggml_tensor * src1, + struct ggml_tensor * dst) { + switch (src0->type) { + case GGML_TYPE_F16: + { + ggml_compute_forward_conv_2d_stage_0_f32(params, src0, src1, dst); + } break; + case GGML_TYPE_F32: + { + GGML_ASSERT(false); + } break; + default: + { + GGML_ASSERT(false); + } break; + } +} + +static void ggml_compute_forward_conv_2d_stage_1( + const struct ggml_compute_params * params, + const struct ggml_tensor * src0, + const struct ggml_tensor * src1, + struct ggml_tensor * dst) { + switch (src0->type) { + case GGML_TYPE_F16: + { + ggml_compute_forward_conv_2d_stage_1_f16(params, src0, src1, dst); + } break; + case GGML_TYPE_F32: + { + GGML_ASSERT(false); + } break; + default: + { + GGML_ASSERT(false); + } break; + } +} + // ggml_compute_forward_conv_transpose_2d static void ggml_compute_forward_conv_transpose_2d( @@ -14628,6 +14881,8 @@ static void ggml_compute_forward_conv_transpose_2d( } } + memset(dst->data, 0, ggml_nbytes(dst)); + return; } @@ -16126,7 +16381,6 @@ static void ggml_compute_forward_add_rel_pos_f32( const int ip0 = dp*ith; const int ip1 = MIN(ip0 + dp, np); - for (int64_t i13 = ip0; i13 < ip1; ++i13) { for (int64_t i12 = 0; i12 < ne12; ++i12) { for (int64_t i11 = 0; i11 < ne11; ++i11) { @@ -16193,7 +16447,6 @@ static void ggml_compute_forward_map_unary_f32( } } - static void ggml_compute_forward_map_unary( const struct ggml_compute_params * params, const struct ggml_tensor * src0, @@ -16241,7 +16494,6 @@ static void ggml_compute_forward_map_binary_f32( } } - static void ggml_compute_forward_map_binary( const struct ggml_compute_params * params, const struct ggml_tensor * src0, @@ -16293,7 +16545,6 @@ static void ggml_compute_forward_map_custom2_f32( fun(dst, a, b); } - // ggml_compute_forward_map_custom3 static void ggml_compute_forward_map_custom3_f32( @@ -16568,7 +16819,6 @@ static void ggml_compute_forward_cross_entropy_loss_back_f32( ggml_vec_sub_f32(nc, ds0, ds0, s1); ggml_vec_scale_f32(nc, ds0, d[0] / (float) nr); - #ifndef NDEBUG for (int i = 0; i < nc; ++i) { assert(!isnan(ds0[i])); @@ -16596,7 +16846,6 @@ static void ggml_compute_forward_cross_entropy_loss_back( } } - ///////////////////////////////// static void ggml_compute_forward(struct ggml_compute_params * params, struct ggml_tensor * tensor) { @@ -16808,6 +17057,14 @@ static void ggml_compute_forward(struct ggml_compute_params * params, struct ggm { ggml_compute_forward_conv_2d(params, tensor->src[0], tensor->src[1], tensor); } break; + case GGML_OP_CONV_2D_STAGE_0: + { + ggml_compute_forward_conv_2d_stage_0(params, tensor->src[0], tensor->src[1], tensor); + } break; + case GGML_OP_CONV_2D_STAGE_1: + { + ggml_compute_forward_conv_2d_stage_1(params, tensor->src[0], tensor->src[1], tensor); + } break; case GGML_OP_CONV_TRANSPOSE_2D: { ggml_compute_forward_conv_transpose_2d(params, tensor->src[0], tensor->src[1], tensor); @@ -17737,11 +17994,19 @@ static void ggml_compute_backward(struct ggml_context * ctx, struct ggml_tensor { GGML_ASSERT(false); // TODO: not implemented } break; + case GGML_OP_CONV_TRANSPOSE_1D: + { + GGML_ASSERT(false); // TODO: not implemented + } break; case GGML_OP_CONV_2D: { GGML_ASSERT(false); // TODO: not implemented } break; - case GGML_OP_CONV_TRANSPOSE_1D: + case GGML_OP_CONV_2D_STAGE_0: + { + GGML_ASSERT(false); // TODO: not implemented + } break; + case GGML_OP_CONV_2D_STAGE_1: { GGML_ASSERT(false); // TODO: not implemented } break; @@ -18670,6 +18935,7 @@ struct ggml_cplan ggml_graph_plan(struct ggml_cgraph * cgraph, int n_threads) { const int64_t ne0 = node->ne[0]; const int64_t ne1 = node->ne[1]; const int64_t ne2 = node->ne[2]; + const int64_t ne3 = node->ne[3]; const int64_t nk = ne00*ne01; const int64_t ew0 = nk * ne02; @@ -18680,7 +18946,8 @@ struct ggml_cplan ggml_graph_plan(struct ggml_cgraph * cgraph, int n_threads) { if (node->src[0]->type == GGML_TYPE_F16 && node->src[1]->type == GGML_TYPE_F32) { - cur = sizeof(ggml_fp16_t)*(ne0*ne1*ew0); + // im2col: [N*OH*OW, IC*KH*KW] + cur = sizeof(ggml_fp16_t)*(ne3*ne0*ne1*ew0); } else if (node->src[0]->type == GGML_TYPE_F32 && node->src[1]->type == GGML_TYPE_F32) { cur = sizeof(float)* (ne10*ne11*ne12); @@ -18690,6 +18957,14 @@ struct ggml_cplan ggml_graph_plan(struct ggml_cgraph * cgraph, int n_threads) { work_size = MAX(work_size, cur); } break; + case GGML_OP_CONV_2D_STAGE_0: + { + n_tasks = n_threads; + } break; + case GGML_OP_CONV_2D_STAGE_1: + { + n_tasks = n_threads; + } break; case GGML_OP_CONV_TRANSPOSE_2D: { n_tasks = n_threads; @@ -19878,7 +20153,6 @@ static enum ggml_opt_result ggml_opt_adam( opt->loss_after = fx; - // check convergence if (fabsf(fx - fx_prev[0])/fx < params.adam.eps_f) { GGML_PRINT_DEBUG("converged\n"); diff --git a/ggml.h b/ggml.h index 16aaf169ee8fda..08bff5511c2254 100644 --- a/ggml.h +++ b/ggml.h @@ -401,15 +401,16 @@ extern "C" { GGML_OP_ALIBI, GGML_OP_CLAMP, GGML_OP_CONV_1D, - GGML_OP_CONV_2D, + GGML_OP_CONV_1D_STAGE_0, // internal + GGML_OP_CONV_1D_STAGE_1, // internal GGML_OP_CONV_TRANSPOSE_1D, + GGML_OP_CONV_2D, + GGML_OP_CONV_2D_STAGE_0, // internal + GGML_OP_CONV_2D_STAGE_1, // internal GGML_OP_CONV_TRANSPOSE_2D, GGML_OP_POOL_1D, GGML_OP_POOL_2D, - GGML_OP_CONV_1D_STAGE_0, // internal - GGML_OP_CONV_1D_STAGE_1, // internal - GGML_OP_UPSCALE, // nearest interpolate GGML_OP_FLASH_ATTN, @@ -1020,9 +1021,9 @@ extern "C" { struct ggml_tensor * b, float eps); - // A: n columns, m rows - // B: n columns, p rows (i.e. we transpose it internally) - // result is m columns, p rows + // A: k columns, n rows => [ne03, ne02, n, k] + // B: k columns, m rows (i.e. we transpose it internally) => [ne03 * x, ne02 * y, m, k] + // result is n columns, m rows => [ne03 * x, ne02 * y, m, n] GGML_API struct ggml_tensor * ggml_mul_mat( struct ggml_context * ctx, struct ggml_tensor * a, From 1717521cdb976a2219888b0e5cba36e210eee9df Mon Sep 17 00:00:00 2001 From: Georgi Gerganov Date: Tue, 24 Oct 2023 23:08:20 +0300 Subject: [PATCH 003/206] server : do not block system prompt update (#3767) * server : do not block system prompt update * server : update state machine logic to process system prompts * server : minor --- examples/server/server.cpp | 57 +++++++++++++------------------------- 1 file changed, 20 insertions(+), 37 deletions(-) diff --git a/examples/server/server.cpp b/examples/server/server.cpp index 693f9b7735e493..f52a928c874468 100644 --- a/examples/server/server.cpp +++ b/examples/server/server.cpp @@ -454,7 +454,7 @@ struct llama_client_slot } void release() { - if (state == PROCESSING) + if (state == IDLE || state == PROCESSING) { t_token_generation = (ggml_time_us() - t_start_genereration) / 1e3; command = RELEASE; @@ -754,6 +754,7 @@ struct llama_server_context } slot->params.antiprompt.clear(); + const auto &stop = data.find("stop"); if (stop != data.end() && stop->is_array()) { @@ -867,7 +868,7 @@ struct llama_server_context kv_cache_clear(); - for (int32_t i = 0; i < batch.n_tokens; ++i) + for (int i = 0; i < (int) system_tokens.size(); ++i) { llama_batch_add(batch, system_tokens[i], i, { 0 }, false); } @@ -894,16 +895,8 @@ struct llama_server_context { slot.release(); } - wait_all_are_idle(); - all_slots_are_idle = true; - // wait until system prompt load system_need_update = true; - while (system_need_update) - { - std::this_thread::sleep_for(std::chrono::milliseconds(5)); - } - // system prompt loaded, continue } void process_system_prompt_data(const json &sys_props) { @@ -915,26 +908,6 @@ struct llama_server_context { notify_system_prompt_changed(); } - else - { - system_need_update = true; - } - } - - void wait_all_are_idle() { - bool wait = true; - while (wait) - { - wait = false; - for (auto &slot : slots) - { - if (!slot.available()) - { - wait = true; - break; - } - } - } } static size_t find_stopping_strings(const std::string &text, const size_t last_token_size, @@ -965,7 +938,6 @@ struct llama_server_context slot.has_next_token = false; } stop_pos = pos; - } } @@ -1444,7 +1416,7 @@ struct llama_server_context process_tasks(); // update the system prompt wait until all slots are idle state - if (system_need_update) + if (system_need_update && all_slots_are_idle) { LOG_TEE("updating system prompt\n"); update_system_prompt(); @@ -1498,7 +1470,7 @@ struct llama_server_context for (auto & slot : slots) { // release the slot - if (slot.state == PROCESSING && slot.command == RELEASE) + if (slot.command == RELEASE) { slot.state = IDLE; slot.command = NONE; @@ -1509,7 +1481,7 @@ struct llama_server_context continue; } - if (slot.state == IDLE || slot.command == RELEASE) + if (slot.state == IDLE) { continue; } @@ -1530,6 +1502,17 @@ struct llama_server_context { for (auto & slot : slots) { + const bool has_prompt = slot.prompt.is_array() || (slot.prompt.is_string() && !slot.prompt.get().empty()); + + // empty prompt passed -> release the slot and send empty response + if (slot.state == IDLE && slot.command == LOAD_PROMPT && !has_prompt) + { + slot.release(); + slot.print_timings(); + send_final_response(slot); + continue; + } + // need process the prompt if (slot.state == IDLE && slot.command == LOAD_PROMPT) { @@ -1749,8 +1732,8 @@ struct llama_server_context if (!process_token(result, slot)) { slot.release(); - send_final_response(slot); slot.print_timings(); + send_final_response(slot); } slot.i_batch = -1; @@ -2285,7 +2268,7 @@ int main(int argc, char **argv) if (!json_value(data, "stream", false)) { std::string completion_text; task_result result = llama.next_result(task_id); - if(!result.error && result.stop) { + if (!result.error && result.stop) { res.set_content(result.result_json.dump(-1, ' ', false, json::error_handler_t::replace), "application/json"); } else @@ -2312,7 +2295,7 @@ int main(int argc, char **argv) { return false; } - if(result.stop) { + if (result.stop) { break; } } else { From ad939626577cd25b462e8026cc543efb71528472 Mon Sep 17 00:00:00 2001 From: cebtenzzre Date: Tue, 24 Oct 2023 16:10:43 -0400 Subject: [PATCH 004/206] server : add parameter -tb N, --threads-batch N (#3584) (#3768) Co-authored-by: Michael Coppola Co-authored-by: Michael Coppola --- examples/server/server.cpp | 28 +++++++++++++++++++--------- 1 file changed, 19 insertions(+), 9 deletions(-) diff --git a/examples/server/server.cpp b/examples/server/server.cpp index f52a928c874468..b4c4d0a20ad0a7 100644 --- a/examples/server/server.cpp +++ b/examples/server/server.cpp @@ -1749,15 +1749,16 @@ static void server_print_usage(const char *argv0, const gpt_params ¶ms, printf("usage: %s [options]\n", argv0); printf("\n"); printf("options:\n"); - printf(" -h, --help show this help message and exit\n"); - printf(" -v, --verbose verbose output (default: %s)\n", server_verbose ? "enabled" : "disabled"); - printf(" -t N, --threads N number of threads to use during computation (default: %d)\n", params.n_threads); - printf(" -c N, --ctx-size N size of the prompt context (default: %d)\n", params.n_ctx); - printf(" --rope-freq-base N RoPE base frequency (default: loaded from model)\n"); - printf(" --rope-freq-scale N RoPE frequency scaling factor (default: loaded from model)\n"); - printf(" -b N, --batch-size N batch size for prompt processing (default: %d)\n", params.n_batch); - printf(" --memory-f32 use f32 instead of f16 for memory key+value (default: disabled)\n"); - printf(" not recommended: doubles context memory required and no measurable increase in quality\n"); + printf(" -h, --help show this help message and exit\n"); + printf(" -v, --verbose verbose output (default: %s)\n", server_verbose ? "enabled" : "disabled"); + printf(" -t N, --threads N number of threads to use during computation (default: %d)\n", params.n_threads); + printf(" -tb N, --threads-batch N number of threads to use during batch and prompt processing (default: same as --threads)\n"); + printf(" -c N, --ctx-size N size of the prompt context (default: %d)\n", params.n_ctx); + printf(" --rope-freq-base N RoPE base frequency (default: loaded from model)\n"); + printf(" --rope-freq-scale N RoPE frequency scaling factor (default: loaded from model)\n"); + printf(" -b N, --batch-size N batch size for prompt processing (default: %d)\n", params.n_batch); + printf(" --memory-f32 use f32 instead of f16 for memory key+value (default: disabled)\n"); + printf(" not recommended: doubles context memory required and no measurable increase in quality\n"); if (llama_mlock_supported()) { printf(" --mlock force system to keep model in RAM rather than swapping or compressing\n"); @@ -1907,6 +1908,15 @@ static void server_params_parse(int argc, char **argv, server_params &sparams, } params.n_threads = std::stoi(argv[i]); } + else if (arg == "--threads-batch" || arg == "-tb") + { + if (++i >= argc) + { + invalid_param = true; + break; + } + params.n_threads_batch = std::stoi(argv[i]); + } else if (arg == "-b" || arg == "--batch-size") { if (++i >= argc) From cc448774866e6479c750bd7c135cd8f92cedee67 Mon Sep 17 00:00:00 2001 From: Georgi Gerganov Date: Wed, 25 Oct 2023 10:09:16 +0300 Subject: [PATCH 005/206] log : disable pid in log filenames --- common/log.h | 35 ++++++++++++++++++----------------- 1 file changed, 18 insertions(+), 17 deletions(-) diff --git a/common/log.h b/common/log.h index 70e7e4ca215e30..d2c864ceab7be6 100644 --- a/common/log.h +++ b/common/log.h @@ -97,22 +97,23 @@ #define LOG_TEE_TARGET stderr #endif +// NOTE: currently disabled as it produces too many log files // Utility to obtain "pid" like unique process id and use it when creating log files. -inline std::string log_get_pid() -{ - static std::string pid; - if (pid.empty()) - { - // std::this_thread::get_id() is the most portable way of obtaining a "process id" - // it's not the same as "pid" but is unique enough to solve multiple instances - // trying to write to the same log. - std::stringstream ss; - ss << std::this_thread::get_id(); - pid = ss.str(); - } - - return pid; -} +//inline std::string log_get_pid() +//{ +// static std::string pid; +// if (pid.empty()) +// { +// // std::this_thread::get_id() is the most portable way of obtaining a "process id" +// // it's not the same as "pid" but is unique enough to solve multiple instances +// // trying to write to the same log. +// std::stringstream ss; +// ss << std::this_thread::get_id(); +// pid = ss.str(); +// } +// +// return pid; +//} // Utility function for generating log file names with unique id based on thread id. // invocation with log_filename_generator( "llama", "log" ) creates a string "llama..log" @@ -126,8 +127,8 @@ inline std::string log_filename_generator_impl(const std::string & log_file_base std::stringstream buf; buf << log_file_basename; - buf << "."; - buf << log_get_pid(); + //buf << "."; + //buf << log_get_pid(); buf << "."; buf << log_file_extension; From 6961c4bd0b5176e10ab03b35394f1e9eab761792 Mon Sep 17 00:00:00 2001 From: Georgi Gerganov Date: Wed, 25 Oct 2023 10:26:27 +0300 Subject: [PATCH 006/206] batched-bench : print params at start --- examples/batched-bench/batched-bench.cpp | 4 ++++ ggml-cuda.cu | 12 ++++++------ 2 files changed, 10 insertions(+), 6 deletions(-) diff --git a/examples/batched-bench/batched-bench.cpp b/examples/batched-bench/batched-bench.cpp index c552eaa738becf..43f9c971d18465 100644 --- a/examples/batched-bench/batched-bench.cpp +++ b/examples/batched-bench/batched-bench.cpp @@ -154,6 +154,10 @@ int main(int argc, char ** argv) { } } + LOG_TEE("\n"); + LOG_TEE("%s: n_kv_max = %d, is_pp_shared = %d, n_gpu_layers = %d, mmq = %d\n", __func__, n_kv_max, is_pp_shared, n_gpu_layers, mmq); + LOG_TEE("\n"); + LOG_TEE("|%6s | %6s | %4s | %6s | %8s | %8s | %8s | %8s | %8s | %8s |\n", "PP", "TG", "B", "N_KV", "T_PP s", "S_PP t/s", "T_TG s", "S_TG t/s", "T s", "S t/s"); LOG_TEE("|%6s-|-%6s-|-%4s-|-%6s-|-%8s-|-%8s-|-%8s-|-%8s-|-%8s-|-%8s-|\n", "------", "------", "----", "------", "--------", "--------", "--------", "--------", "--------", "--------"); diff --git a/ggml-cuda.cu b/ggml-cuda.cu index d1e874b6c778af..ba0cd5a7d3f1eb 100644 --- a/ggml-cuda.cu +++ b/ggml-cuda.cu @@ -6254,16 +6254,15 @@ inline void ggml_cuda_op_mul_mat_cublas( const char * src1_ddq_i, float * dst_dd_i, const int64_t row_low, const int64_t row_high, const int64_t src1_ncols, const int64_t src1_padded_row_size, const cudaStream_t & stream) { - GGML_ASSERT(src0_dd_i != nullptr); + GGML_ASSERT(src0_dd_i != nullptr); GGML_ASSERT(src1_ddf_i != nullptr); - GGML_ASSERT(dst_dd_i != nullptr); - + GGML_ASSERT(dst_dd_i != nullptr); const int64_t ne00 = src0->ne[0]; - const int64_t ne10 = src1->ne[0]; const int64_t ne0 = dst->ne[0]; + const int64_t row_diff = row_high - row_low; int id; @@ -7223,12 +7222,13 @@ static void ggml_cuda_mul_mat(const ggml_tensor * src0, const ggml_tensor * src1 //printf("src1 is contiguous %d, transposed %d, type = %s, name = %s\n", ggml_is_contiguous(src1), ggml_is_transposed(src1), ggml_type_name(src1->type), src1->name); if (all_on_device && src0->type == GGML_TYPE_F16 && ggml_is_permuted(src0) && ggml_is_permuted(src1) && src1->ne[1] == 1) { - // KQ + // KQ single-batch ggml_cuda_mul_mat_vec_p021(src0, src1, dst); } else if (all_on_device && src0->type == GGML_TYPE_F16 && !ggml_is_contiguous(src0) && !ggml_is_transposed(src1) && src1->ne[1] == 1) { - // KQV + // KQV single-batch ggml_cuda_mul_mat_vec_nc(src0, src1, dst); } else if (all_on_device && src0->type == GGML_TYPE_F16 && src1->type == GGML_TYPE_F32 && !ggml_is_transposed(src0) && !ggml_is_transposed(src1) && src1->ne[2]*src1->ne[3] > 1) { + // KQ + KQV multi-batch ggml_cuda_mul_mat_mat_batched_cublas(src0, src1, dst); } else if (src0->type == GGML_TYPE_F32) { ggml_cuda_op_mul_mat(src0, src1, dst, ggml_cuda_op_mul_mat_cublas, false); From 34b2a5e1ee4fe6295fb4420eb91131d743694c65 Mon Sep 17 00:00:00 2001 From: Georgi Gerganov Date: Thu, 26 Oct 2023 22:53:37 +0300 Subject: [PATCH 007/206] server : do not release slot on image input (#3798) --- examples/server/server.cpp | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/examples/server/server.cpp b/examples/server/server.cpp index b4c4d0a20ad0a7..5b7e4139de551e 100644 --- a/examples/server/server.cpp +++ b/examples/server/server.cpp @@ -1502,7 +1502,7 @@ struct llama_server_context { for (auto & slot : slots) { - const bool has_prompt = slot.prompt.is_array() || (slot.prompt.is_string() && !slot.prompt.get().empty()); + const bool has_prompt = slot.prompt.is_array() || (slot.prompt.is_string() && !slot.prompt.get().empty()) || !slot.images.empty(); // empty prompt passed -> release the slot and send empty response if (slot.state == IDLE && slot.command == LOAD_PROMPT && !has_prompt) From 2f9ec7e271220a78fe27c9e6ccbcc0dda31cda0f Mon Sep 17 00:00:00 2001 From: Georgi Gerganov Date: Fri, 27 Oct 2023 17:01:23 +0300 Subject: [PATCH 008/206] cuda : improve text-generation and batched decoding performance (#3776) * cuda : prints wip * cuda : new cublas gemm branch for multi-batch quantized src0 * cuda : add F32 sgemm branch * cuda : fine-tune >= VOLTA params + use MMQ only for small batches * cuda : remove duplicated cuBLAS GEMM code * cuda : add CUDA_USE_TENSOR_CORES and GGML_CUDA_FORCE_MMQ macros * build : add compile option to force use of MMQ kernels --- CMakeLists.txt | 7 +++ Makefile | 3 ++ ggml-cuda.cu | 130 +++++++++++++++++++++++++++++++++++++++++++------ llama.cpp | 2 - llama.h | 2 +- 5 files changed, 125 insertions(+), 19 deletions(-) diff --git a/CMakeLists.txt b/CMakeLists.txt index 202f260491d393..d9fc86237b15cc 100644 --- a/CMakeLists.txt +++ b/CMakeLists.txt @@ -82,6 +82,7 @@ set(LLAMA_BLAS_VENDOR "Generic" CACHE STRING "llama: BLAS library vendor") option(LLAMA_CUBLAS "llama: use CUDA" OFF) #option(LLAMA_CUDA_CUBLAS "llama: use cuBLAS for prompt processing" OFF) option(LLAMA_CUDA_FORCE_DMMV "llama: use dmmv instead of mmvq CUDA kernels" OFF) +option(LLAMA_CUDA_FORCE_MMQ "llama: use mmq kernels instead of cuBLAS" OFF) set(LLAMA_CUDA_DMMV_X "32" CACHE STRING "llama: x stride for dmmv CUDA kernels") set(LLAMA_CUDA_MMV_Y "1" CACHE STRING "llama: y block size for mmv CUDA kernels") option(LLAMA_CUDA_F16 "llama: use 16 bit floats for some calculations" OFF) @@ -305,6 +306,9 @@ if (LLAMA_CUBLAS) if (LLAMA_CUDA_FORCE_DMMV) add_compile_definitions(GGML_CUDA_FORCE_DMMV) endif() + if (LLAMA_CUDA_FORCE_MMQ) + add_compile_definitions(GGML_CUDA_FORCE_MMQ) + endif() add_compile_definitions(GGML_CUDA_DMMV_X=${LLAMA_CUDA_DMMV_X}) add_compile_definitions(GGML_CUDA_MMV_Y=${LLAMA_CUDA_MMV_Y}) if (DEFINED LLAMA_CUDA_DMMV_Y) @@ -405,6 +409,9 @@ if (LLAMA_HIPBLAS) if (LLAMA_CUDA_FORCE_DMMV) target_compile_definitions(ggml-rocm PRIVATE GGML_CUDA_FORCE_DMMV) endif() + if (LLAMA_CUDA_FORCE_MMQ) + target_compile_definitions(ggml-rocm PRIVATE GGML_CUDA_FORCE_MMQ) + endif() target_compile_definitions(ggml-rocm PRIVATE GGML_CUDA_DMMV_X=${LLAMA_CUDA_DMMV_X}) target_compile_definitions(ggml-rocm PRIVATE GGML_CUDA_MMV_Y=${LLAMA_CUDA_MMV_Y}) target_compile_definitions(ggml-rocm PRIVATE K_QUANTS_PER_ITERATION=${LLAMA_CUDA_KQUANTS_ITER}) diff --git a/Makefile b/Makefile index 80179631f95a5b..68069f9ff331e9 100644 --- a/Makefile +++ b/Makefile @@ -397,6 +397,9 @@ endif # CUDA_DOCKER_ARCH ifdef LLAMA_CUDA_FORCE_DMMV NVCCFLAGS += -DGGML_CUDA_FORCE_DMMV endif # LLAMA_CUDA_FORCE_DMMV +ifdef LLAMA_CUDA_FORCE_MMQ + NVCCFLAGS += -DGGML_CUDA_FORCE_MMQ +endif # LLAMA_CUDA_FORCE_MMQ ifdef LLAMA_CUDA_DMMV_X NVCCFLAGS += -DGGML_CUDA_DMMV_X=$(LLAMA_CUDA_DMMV_X) else diff --git a/ggml-cuda.cu b/ggml-cuda.cu index ba0cd5a7d3f1eb..1ba951f688d82c 100644 --- a/ggml-cuda.cu +++ b/ggml-cuda.cu @@ -87,6 +87,24 @@ #define CC_OFFSET_AMD 1000000 #define CC_RDNA2 (CC_OFFSET_AMD + 1030) +// define this if you want to always fallback to MMQ kernels and not use cuBLAS for matrix multiplication +// on modern hardware, using cuBLAS is recommended as it utilizes F16 tensor cores which are very performant +// for large computational tasks. the drawback is that this requires some extra amount of VRAM: +// - 7B quantum model: +100-200 MB +// - 13B quantum model: +200-400 MB +// +//#define GGML_CUDA_FORCE_MMQ + +// TODO: improve this to be correct for more hardware +// for example, currently fails for GeForce GTX 1660 which is TURING arch (> VOLTA) but does not have tensor cores +// probably other such cases, and not sure what happens on AMD hardware +#if !defined(GGML_CUDA_FORCE_MMQ) +#define CUDA_USE_TENSOR_CORES +#endif + +// max batch size to use MMQ kernels when tensor cores are available +#define MMQ_MAX_BATCH_SIZE 32 + #if defined(GGML_USE_HIPBLAS) #define __CUDA_ARCH__ 1300 @@ -470,7 +488,6 @@ static int g_device_count = -1; static int g_main_device = 0; static int g_compute_capabilities[GGML_CUDA_MAX_DEVICES]; static float g_tensor_split[GGML_CUDA_MAX_DEVICES] = {0}; -static bool g_mul_mat_q = true; static void * g_scratch_buffer = nullptr; static size_t g_scratch_size = 0; // disabled by default @@ -3554,9 +3571,15 @@ static __device__ __forceinline__ void mul_mat_q( #define MMQ_X_Q4_0_RDNA1 64 #define MMQ_Y_Q4_0_RDNA1 64 #define NWARPS_Q4_0_RDNA1 8 +#if defined(CUDA_USE_TENSOR_CORES) +#define MMQ_X_Q4_0_AMPERE 4 +#define MMQ_Y_Q4_0_AMPERE 32 +#define NWARPS_Q4_0_AMPERE 4 +#else #define MMQ_X_Q4_0_AMPERE 64 #define MMQ_Y_Q4_0_AMPERE 128 #define NWARPS_Q4_0_AMPERE 4 +#endif #define MMQ_X_Q4_0_PASCAL 64 #define MMQ_Y_Q4_0_PASCAL 64 #define NWARPS_Q4_0_PASCAL 8 @@ -3615,9 +3638,15 @@ template static __global__ void #define MMQ_X_Q4_1_RDNA1 64 #define MMQ_Y_Q4_1_RDNA1 64 #define NWARPS_Q4_1_RDNA1 8 +#if defined(CUDA_USE_TENSOR_CORES) +#define MMQ_X_Q4_1_AMPERE 4 +#define MMQ_Y_Q4_1_AMPERE 32 +#define NWARPS_Q4_1_AMPERE 4 +#else #define MMQ_X_Q4_1_AMPERE 64 #define MMQ_Y_Q4_1_AMPERE 128 #define NWARPS_Q4_1_AMPERE 4 +#endif #define MMQ_X_Q4_1_PASCAL 64 #define MMQ_Y_Q4_1_PASCAL 64 #define NWARPS_Q4_1_PASCAL 8 @@ -3678,9 +3707,15 @@ template static __global__ void #define MMQ_X_Q5_0_RDNA1 64 #define MMQ_Y_Q5_0_RDNA1 64 #define NWARPS_Q5_0_RDNA1 8 +#if defined(CUDA_USE_TENSOR_CORES) +#define MMQ_X_Q5_0_AMPERE 4 +#define MMQ_Y_Q5_0_AMPERE 32 +#define NWARPS_Q5_0_AMPERE 4 +#else #define MMQ_X_Q5_0_AMPERE 128 #define MMQ_Y_Q5_0_AMPERE 64 #define NWARPS_Q5_0_AMPERE 4 +#endif #define MMQ_X_Q5_0_PASCAL 64 #define MMQ_Y_Q5_0_PASCAL 64 #define NWARPS_Q5_0_PASCAL 8 @@ -3739,9 +3774,15 @@ template static __global__ void #define MMQ_X_Q5_1_RDNA1 64 #define MMQ_Y_Q5_1_RDNA1 64 #define NWARPS_Q5_1_RDNA1 8 +#if defined(CUDA_USE_TENSOR_CORES) +#define MMQ_X_Q5_1_AMPERE 4 +#define MMQ_Y_Q5_1_AMPERE 32 +#define NWARPS_Q5_1_AMPERE 4 +#else #define MMQ_X_Q5_1_AMPERE 128 #define MMQ_Y_Q5_1_AMPERE 64 #define NWARPS_Q5_1_AMPERE 4 +#endif #define MMQ_X_Q5_1_PASCAL 64 #define MMQ_Y_Q5_1_PASCAL 64 #define NWARPS_Q5_1_PASCAL 8 @@ -3800,9 +3841,15 @@ mul_mat_q5_1( #define MMQ_X_Q8_0_RDNA1 64 #define MMQ_Y_Q8_0_RDNA1 64 #define NWARPS_Q8_0_RDNA1 8 +#if defined(CUDA_USE_TENSOR_CORES) +#define MMQ_X_Q8_0_AMPERE 4 +#define MMQ_Y_Q8_0_AMPERE 32 +#define NWARPS_Q8_0_AMPERE 4 +#else #define MMQ_X_Q8_0_AMPERE 128 #define MMQ_Y_Q8_0_AMPERE 64 #define NWARPS_Q8_0_AMPERE 4 +#endif #define MMQ_X_Q8_0_PASCAL 64 #define MMQ_Y_Q8_0_PASCAL 64 #define NWARPS_Q8_0_PASCAL 8 @@ -3861,9 +3908,15 @@ template static __global__ void #define MMQ_X_Q2_K_RDNA1 128 #define MMQ_Y_Q2_K_RDNA1 32 #define NWARPS_Q2_K_RDNA1 8 +#if defined(CUDA_USE_TENSOR_CORES) +#define MMQ_X_Q2_K_AMPERE 4 +#define MMQ_Y_Q2_K_AMPERE 32 +#define NWARPS_Q2_K_AMPERE 4 +#else #define MMQ_X_Q2_K_AMPERE 64 #define MMQ_Y_Q2_K_AMPERE 128 #define NWARPS_Q2_K_AMPERE 4 +#endif #define MMQ_X_Q2_K_PASCAL 64 #define MMQ_Y_Q2_K_PASCAL 64 #define NWARPS_Q2_K_PASCAL 8 @@ -3922,9 +3975,15 @@ mul_mat_q2_K( #define MMQ_X_Q3_K_RDNA1 32 #define MMQ_Y_Q3_K_RDNA1 128 #define NWARPS_Q3_K_RDNA1 8 +#if defined(CUDA_USE_TENSOR_CORES) +#define MMQ_X_Q3_K_AMPERE 4 +#define MMQ_Y_Q3_K_AMPERE 32 +#define NWARPS_Q3_K_AMPERE 4 +#else #define MMQ_X_Q3_K_AMPERE 128 #define MMQ_Y_Q3_K_AMPERE 128 #define NWARPS_Q3_K_AMPERE 4 +#endif #define MMQ_X_Q3_K_PASCAL 64 #define MMQ_Y_Q3_K_PASCAL 64 #define NWARPS_Q3_K_PASCAL 8 @@ -3985,9 +4044,15 @@ template static __global__ void #define MMQ_X_Q4_K_RDNA1 32 #define MMQ_Y_Q4_K_RDNA1 64 #define NWARPS_Q4_K_RDNA1 8 +#if defined(CUDA_USE_TENSOR_CORES) +#define MMQ_X_Q4_K_AMPERE 4 +#define MMQ_Y_Q4_K_AMPERE 32 +#define NWARPS_Q4_K_AMPERE 4 +#else #define MMQ_X_Q4_K_AMPERE 64 #define MMQ_Y_Q4_K_AMPERE 128 #define NWARPS_Q4_K_AMPERE 4 +#endif #define MMQ_X_Q4_K_PASCAL 64 #define MMQ_Y_Q4_K_PASCAL 64 #define NWARPS_Q4_K_PASCAL 8 @@ -4048,9 +4113,15 @@ template static __global__ void #define MMQ_X_Q5_K_RDNA1 32 #define MMQ_Y_Q5_K_RDNA1 64 #define NWARPS_Q5_K_RDNA1 8 +#if defined(CUDA_USE_TENSOR_CORES) +#define MMQ_X_Q5_K_AMPERE 4 +#define MMQ_Y_Q5_K_AMPERE 32 +#define NWARPS_Q5_K_AMPERE 4 +#else #define MMQ_X_Q5_K_AMPERE 64 #define MMQ_Y_Q5_K_AMPERE 128 #define NWARPS_Q5_K_AMPERE 4 +#endif #define MMQ_X_Q5_K_PASCAL 64 #define MMQ_Y_Q5_K_PASCAL 64 #define NWARPS_Q5_K_PASCAL 8 @@ -4109,9 +4180,15 @@ mul_mat_q5_K( #define MMQ_X_Q6_K_RDNA1 32 #define MMQ_Y_Q6_K_RDNA1 64 #define NWARPS_Q6_K_RDNA1 8 +#if defined(CUDA_USE_TENSOR_CORES) +#define MMQ_X_Q6_K_AMPERE 4 +#define MMQ_Y_Q6_K_AMPERE 32 +#define NWARPS_Q6_K_AMPERE 4 +#else #define MMQ_X_Q6_K_AMPERE 64 #define MMQ_Y_Q6_K_AMPERE 64 #define NWARPS_Q6_K_AMPERE 4 +#endif #define MMQ_X_Q6_K_PASCAL 64 #define MMQ_Y_Q6_K_PASCAL 64 #define NWARPS_Q6_K_PASCAL 8 @@ -5663,6 +5740,16 @@ void ggml_init_cublas() { CUDA_CHECK(cudaGetDeviceCount(&g_device_count)); GGML_ASSERT(g_device_count <= GGML_CUDA_MAX_DEVICES); int64_t total_vram = 0; +#if defined(GGML_CUDA_FORCE_MMQ) + fprintf(stderr, "%s: GGML_CUDA_FORCE_MMQ: yes\n", __func__); +#else + fprintf(stderr, "%s: GGML_CUDA_FORCE_MMQ: no\n", __func__); +#endif +#if defined(CUDA_USE_TENSOR_CORES) + fprintf(stderr, "%s: CUDA_USE_TENSOR_CORES: yes\n", __func__); +#else + fprintf(stderr, "%s: CUDA_USE_TENSOR_CORES: no\n", __func__); +#endif fprintf(stderr, "%s: found %d " GGML_CUDA_NAME " devices:\n", __func__, g_device_count); for (int id = 0; id < g_device_count; ++id) { cudaDeviceProp prop; @@ -6347,7 +6434,7 @@ inline void ggml_cuda_op_mul_mat_cublas( cublasSgemm(g_cublas_handles[id], CUBLAS_OP_T, CUBLAS_OP_N, row_diff, src1_ncols, ne10, &alpha, src0_ddf_i, ne00, - src1_ddf_i, ne10, + src1_ddf_i, ne10, &beta, dst_dd_i, ldc)); if (src0_as != 0) { @@ -7048,9 +7135,10 @@ static void ggml_cuda_mul_mat_vec_nc(const ggml_tensor * src0, const ggml_tensor ggml_mul_mat_vec_nc_f16_f32_cuda(src0_ddq, src1_ddf, dst_ddf, ne00, ne01, row_stride_x, ne02, ne12, channel_stride_x, main_stream); } -static void ggml_cuda_mul_mat_mat_batched_cublas(const ggml_tensor * src0, const ggml_tensor * src1, ggml_tensor * dst){ +static void ggml_cuda_mul_mat_mat_batched_cublas(const ggml_tensor * src0, const ggml_tensor * src1, ggml_tensor * dst) { GGML_ASSERT(!ggml_is_transposed(src0)); GGML_ASSERT(!ggml_is_transposed(src1)); + GGML_ASSERT(src0->backend != GGML_BACKEND_GPU_SPLIT); GGML_ASSERT(src0->type == GGML_TYPE_F16); GGML_ASSERT(src1->type == GGML_TYPE_F32); @@ -7202,17 +7290,24 @@ static void ggml_cuda_mul_mat_mat_batched_cublas(const ggml_tensor * src0, const } static void ggml_cuda_mul_mat(const ggml_tensor * src0, const ggml_tensor * src1, ggml_tensor * dst) { - bool all_on_device = (src0->backend == GGML_BACKEND_GPU || src0->backend == GGML_BACKEND_GPU_SPLIT) && - src1->backend == GGML_BACKEND_GPU && dst->backend == GGML_BACKEND_GPU; + const bool all_on_device = + (src0->backend == GGML_BACKEND_GPU) && + (src1->backend == GGML_BACKEND_GPU) && + ( dst->backend == GGML_BACKEND_GPU); int64_t min_compute_capability = INT_MAX; for (int64_t id = 0; id < g_device_count; ++id) { - if (min_compute_capability > g_compute_capabilities[id] - && g_tensor_split[id] < (id + 1 < g_device_count ? g_tensor_split[id + 1] : 1.0f)) { + if (min_compute_capability > g_compute_capabilities[id] && g_tensor_split[id] < (id + 1 < g_device_count ? g_tensor_split[id + 1] : 1.0f)) { min_compute_capability = g_compute_capabilities[id]; } } +#ifdef CUDA_USE_TENSOR_CORES + const bool use_tensor_cores = true; +#else + const bool use_tensor_cores = false; +#endif + // debug helpers //printf("src0: %8d %8d %8d %8d\n", src0->ne[0], src0->ne[1], src0->ne[2], src0->ne[3]); //printf(" %8d %8d %8d %8d\n", src0->nb[0], src0->nb[1], src0->nb[2], src0->nb[3]); @@ -7221,20 +7316,19 @@ static void ggml_cuda_mul_mat(const ggml_tensor * src0, const ggml_tensor * src1 //printf("src0 is contiguous %d, transposed %d, type = %s, name = %s\n", ggml_is_contiguous(src0), ggml_is_transposed(src0), ggml_type_name(src0->type), src0->name); //printf("src1 is contiguous %d, transposed %d, type = %s, name = %s\n", ggml_is_contiguous(src1), ggml_is_transposed(src1), ggml_type_name(src1->type), src1->name); - if (all_on_device && src0->type == GGML_TYPE_F16 && ggml_is_permuted(src0) && ggml_is_permuted(src1) && src1->ne[1] == 1) { + if (all_on_device && !use_tensor_cores && src0->type == GGML_TYPE_F16 && ggml_is_permuted(src0) && ggml_is_permuted(src1) && src1->ne[1] == 1) { // KQ single-batch ggml_cuda_mul_mat_vec_p021(src0, src1, dst); - } else if (all_on_device && src0->type == GGML_TYPE_F16 && !ggml_is_contiguous(src0) && !ggml_is_transposed(src1) && src1->ne[1] == 1) { + } else if (all_on_device && !use_tensor_cores && src0->type == GGML_TYPE_F16 && !ggml_is_contiguous(src0) && !ggml_is_transposed(src1) && src1->ne[1] == 1) { // KQV single-batch ggml_cuda_mul_mat_vec_nc(src0, src1, dst); - } else if (all_on_device && src0->type == GGML_TYPE_F16 && src1->type == GGML_TYPE_F32 && !ggml_is_transposed(src0) && !ggml_is_transposed(src1) && src1->ne[2]*src1->ne[3] > 1) { + } else if (all_on_device && src0->type == GGML_TYPE_F16 && src1->type == GGML_TYPE_F32 && !ggml_is_transposed(src0) && !ggml_is_transposed(src1)) { // KQ + KQV multi-batch ggml_cuda_mul_mat_mat_batched_cublas(src0, src1, dst); } else if (src0->type == GGML_TYPE_F32) { ggml_cuda_op_mul_mat(src0, src1, dst, ggml_cuda_op_mul_mat_cublas, false); } else if (ggml_is_quantized(src0->type) || src0->type == GGML_TYPE_F16) { if (src1->ne[1] == 1 && src0->ne[0] % GGML_CUDA_DMMV_X == 0) { - #ifdef GGML_CUDA_FORCE_DMMV const bool use_mul_mat_vec_q = false; #else @@ -7247,7 +7341,15 @@ static void ggml_cuda_mul_mat(const ggml_tensor * src0, const ggml_tensor * src1 ggml_cuda_op_mul_mat(src0, src1, dst, ggml_cuda_op_dequantize_mul_mat_vec, false); } } else { - if (g_mul_mat_q && ggml_is_quantized(src0->type) && min_compute_capability >= MIN_CC_DP4A) { + bool use_mul_mat_q = min_compute_capability >= MIN_CC_DP4A && ggml_is_quantized(src0->type); + + // when tensor cores are available, use them for large batch size + // ref: https://github.com/ggerganov/llama.cpp/pull/3776 + if (use_tensor_cores && min_compute_capability >= CC_VOLTA && src1->ne[1] > MMQ_MAX_BATCH_SIZE) { + use_mul_mat_q = false; + } + + if (use_mul_mat_q) { ggml_cuda_op_mul_mat(src0, src1, dst, ggml_cuda_op_mul_mat_q, true); } else { ggml_cuda_op_mul_mat(src0, src1, dst, ggml_cuda_op_mul_mat_cublas, false); @@ -7601,10 +7703,6 @@ void ggml_cuda_set_main_device(const int main_device) { } } -void ggml_cuda_set_mul_mat_q(const bool mul_mat_q) { - g_mul_mat_q = mul_mat_q; -} - void ggml_cuda_set_scratch_size(const size_t scratch_size) { // this is a hack to not completely break llama.cpp when using multiple models or contexts simultaneously // it still won't always work as expected, but it's better than nothing diff --git a/llama.cpp b/llama.cpp index 61f30c3982f184..cc8669b0e9e23d 100644 --- a/llama.cpp +++ b/llama.cpp @@ -5959,8 +5959,6 @@ static int llama_decode_internal( } } - ggml_cuda_set_mul_mat_q(cparams.mul_mat_q); - // HACK: ggml-alloc may change the tensor backend when reusing a parent, so force output to be on the CPU here if needed if (!lctx.embedding.empty()) { embeddings->backend = GGML_BACKEND_CPU; diff --git a/llama.h b/llama.h index 2f2fee0e2ff9f3..beac9a0cedd76c 100644 --- a/llama.h +++ b/llama.h @@ -178,7 +178,7 @@ extern "C" { float rope_freq_scale; // RoPE frequency scaling factor, 0 = from model // Keep the booleans together to avoid misalignment during copy-by-value. - bool mul_mat_q; // if true, use experimental mul_mat_q kernels + bool mul_mat_q; // if true, use experimental mul_mat_q kernels (DEPRECATED - always true) bool f16_kv; // use fp16 for KV cache, fp32 otherwise bool logits_all; // the llama_eval() call computes all logits, not just the last one bool embedding; // embedding mode only From c8d6a1f34ab6f1b6bd468d256e535a61f98f114c Mon Sep 17 00:00:00 2001 From: Thibault Terrasson Date: Fri, 27 Oct 2023 16:37:41 +0200 Subject: [PATCH 009/206] simple : fix batch handling (#3803) --- examples/simple/simple.cpp | 18 ++++-------------- 1 file changed, 4 insertions(+), 14 deletions(-) diff --git a/examples/simple/simple.cpp b/examples/simple/simple.cpp index f376c050994d19..374aef6f16189c 100644 --- a/examples/simple/simple.cpp +++ b/examples/simple/simple.cpp @@ -95,13 +95,8 @@ int main(int argc, char ** argv) { llama_batch batch = llama_batch_init(512, 0, 1); // evaluate the initial prompt - batch.n_tokens = tokens_list.size(); - - for (int32_t i = 0; i < batch.n_tokens; i++) { - batch.token[i] = tokens_list[i]; - batch.pos[i] = i; - batch.seq_id[i] = 0; - batch.logits[i] = false; + for (size_t i = 0; i < tokens_list.size(); i++) { + llama_batch_add(batch, tokens_list[i], i, { 0 }, false); } // llama_decode will output logits only for the last token of the prompt @@ -148,15 +143,10 @@ int main(int argc, char ** argv) { fflush(stdout); // prepare the next batch - batch.n_tokens = 0; + llama_batch_clear(batch); // push this new token for next evaluation - batch.token [batch.n_tokens] = new_token_id; - batch.pos [batch.n_tokens] = n_cur; - batch.seq_id[batch.n_tokens] = 0; - batch.logits[batch.n_tokens] = true; - - batch.n_tokens += 1; + llama_batch_add(batch, new_token_id, n_cur, { 0 }, true); n_decode += 1; } From 6d459cbfbe5a011dfca94f9550527a504b6f9aa1 Mon Sep 17 00:00:00 2001 From: cebtenzzre Date: Fri, 27 Oct 2023 17:33:53 -0400 Subject: [PATCH 010/206] llama : correctly report GGUFv3 format (#3818) --- llama.cpp | 4 +++- 1 file changed, 3 insertions(+), 1 deletion(-) diff --git a/llama.cpp b/llama.cpp index cc8669b0e9e23d..408533d8a62f0b 100644 --- a/llama.cpp +++ b/llama.cpp @@ -1578,12 +1578,14 @@ static void llama_kv_cache_seq_shift( enum llama_fver { GGUF_FILE_VERSION_V1 = 1, GGUF_FILE_VERSION_V2 = 2, + GGUF_FILE_VERSION_V3 = 3, }; static const char * llama_file_version_name(llama_fver version) { switch (version) { case GGUF_FILE_VERSION_V1: return "GGUF V1 (support until nov 2023)"; - case GGUF_FILE_VERSION_V2: return "GGUF V2 (latest)"; + case GGUF_FILE_VERSION_V2: return "GGUF V2"; + case GGUF_FILE_VERSION_V3: return "GGUF V3 (latest)"; } return "unknown"; From 41aee4df821854f37d90a45281f03b6db8d27de2 Mon Sep 17 00:00:00 2001 From: Kerfuffle <44031344+KerfuffleV2@users.noreply.github.com> Date: Fri, 27 Oct 2023 15:40:07 -0600 Subject: [PATCH 011/206] speculative : ensure draft and target model vocab matches (#3812) * speculative: Ensure draft and target model vocab matches * Tolerate small differences when checking dft vs tgt vocab --- examples/speculative/speculative.cpp | 33 +++++++++++++++++++++++++++- 1 file changed, 32 insertions(+), 1 deletion(-) diff --git a/examples/speculative/speculative.cpp b/examples/speculative/speculative.cpp index 92ad27e8e423cc..f921b78455a72c 100644 --- a/examples/speculative/speculative.cpp +++ b/examples/speculative/speculative.cpp @@ -8,6 +8,9 @@ #include #include +#define SPEC_VOCAB_MAX_SIZE_DIFFERENCE 100 +#define SPEC_VOCAB_CHECK_START_TOKEN_ID 5 + struct seq_draft { bool active = false; bool drafting = false; @@ -64,6 +67,33 @@ int main(int argc, char ** argv) { params.n_gpu_layers = params.n_gpu_layers_draft; std::tie(model_dft, ctx_dft) = llama_init_from_gpt_params(params); + { + const int n_vocab_tgt = llama_n_vocab(model_tgt); + const int n_vocab_dft = llama_n_vocab(model_dft); + const int vocab_diff = n_vocab_tgt > n_vocab_dft + ? n_vocab_tgt - n_vocab_dft + : n_vocab_dft - n_vocab_tgt; + + if (vocab_diff > SPEC_VOCAB_MAX_SIZE_DIFFERENCE) { + fprintf(stderr, "%s: error: draft model vocab must closely match target model to use speculation but ", __func__); + fprintf(stderr, "target vocab size %d does not match draft vocab size %d - difference %d, max allowed %d\n", + n_vocab_tgt, llama_n_vocab(model_dft), vocab_diff, SPEC_VOCAB_MAX_SIZE_DIFFERENCE); + return 1; + } + + for (int i = SPEC_VOCAB_CHECK_START_TOKEN_ID; i < std::min(n_vocab_tgt, n_vocab_dft); ++i) { + const char * token_text_tgt = llama_token_get_text(model_tgt, i); + const char * token_text_dft = llama_token_get_text(model_dft, i); + if (std::strcmp(token_text_tgt, token_text_dft) != 0) { + fprintf(stderr, "%s: error: draft model vocab must match target model to use speculation but ", __func__); + fprintf(stderr, "token %d content differs - target '%s', draft '%s'\n", i, + llama_token_to_piece(ctx_tgt, i).c_str(), + llama_token_to_piece(ctx_dft, i).c_str()); + return 1; + } + } + } + // tokenize the prompt std::vector inp; inp = ::llama_tokenize(ctx_tgt, params.prompt, true); @@ -227,6 +257,7 @@ int main(int argc, char ** argv) { llama_batch_add (batch_dft, id, n_past_dft, { 0 }, true); llama_kv_cache_seq_rm(ctx_dft, 0, n_past_dft, -1); + // LOG("dft batch: %s\n", LOG_BATCH_TOSTR_PRETTY(ctx_dft, batch_dft).c_str()); llama_decode (ctx_dft, batch_dft); ++n_past_dft; @@ -370,7 +401,7 @@ int main(int argc, char ** argv) { llama_kv_cache_seq_cp(ctx_tgt, 0, s, -1, -1); } - //LOG("target batch: %s\n", LOG_BATCH_TOSTR_PRETTY(ctx_tgt, batch_tgt)); + // LOG("target batch: %s\n", LOG_BATCH_TOSTR_PRETTY(ctx_tgt, batch_tgt).c_str()); llama_decode(ctx_tgt, batch_tgt); ++n_past_tgt; } From fdee152e4eebb78c191df0b074857111d7f2aba7 Mon Sep 17 00:00:00 2001 From: Georgi Gerganov Date: Sat, 28 Oct 2023 12:06:08 +0300 Subject: [PATCH 012/206] starcoder : add GPU offloading (#3827) * starcoder : do not GPU split 1D bias tensors * starcoder : offload layers to GPU ggml-ci --- llama.cpp | 106 +++++++++++++++++++++++++++++++++++++++++++----------- 1 file changed, 85 insertions(+), 21 deletions(-) diff --git a/llama.cpp b/llama.cpp index 408533d8a62f0b..6caa58960cf3c3 100644 --- a/llama.cpp +++ b/llama.cpp @@ -2695,8 +2695,8 @@ static void llm_load_tensors( } break; case LLM_ARCH_STARCODER: { - model.tok_embeddings = ml.create_tensor(ctx, tn(LLM_TENSOR_TOKEN_EMBD, "weight"), {n_embd, n_vocab}, GGML_BACKEND_CPU); - model.pos_embeddings = ml.create_tensor(ctx, tn(LLM_TENSOR_POS_EMBD, "weight"), {n_embd, hparams.n_ctx_train}, GGML_BACKEND_CPU); + model.tok_embeddings = ml.create_tensor(ctx, tn(LLM_TENSOR_TOKEN_EMBD, "weight"), {n_embd, n_vocab}, GGML_BACKEND_CPU); + model.pos_embeddings = ml.create_tensor(ctx, tn(LLM_TENSOR_POS_EMBD, "weight"), {n_embd, hparams.n_ctx_train}, GGML_BACKEND_CPU); // output { @@ -2747,19 +2747,19 @@ static void llm_load_tensors( layer.attn_norm_b = ml.create_tensor(ctx, tn(LLM_TENSOR_ATTN_NORM, "bias", i), {n_embd}, backend); layer.wqkv = ml.create_tensor(ctx, tn(LLM_TENSOR_ATTN_QKV, "weight", i), {n_embd, n_embd + 2*n_embd_gqa}, backend_split); - layer.bqkv = ml.create_tensor(ctx, tn(LLM_TENSOR_ATTN_QKV, "bias", i), {n_embd + 2*n_embd_gqa}, backend_split); + layer.bqkv = ml.create_tensor(ctx, tn(LLM_TENSOR_ATTN_QKV, "bias", i), {n_embd + 2*n_embd_gqa}, backend); layer.wo = ml.create_tensor(ctx, tn(LLM_TENSOR_ATTN_OUT, "weight", i), {n_embd, n_embd}, backend_split); - layer.bo = ml.create_tensor(ctx, tn(LLM_TENSOR_ATTN_OUT, "bias", i), {n_embd}, backend_split); + layer.bo = ml.create_tensor(ctx, tn(LLM_TENSOR_ATTN_OUT, "bias", i), {n_embd}, backend); layer.ffn_norm = ml.create_tensor(ctx, tn(LLM_TENSOR_FFN_NORM, "weight", i), {n_embd}, backend); layer.ffn_norm_b = ml.create_tensor(ctx, tn(LLM_TENSOR_FFN_NORM, "bias", i), {n_embd}, backend); layer.w2 = ml.create_tensor(ctx, tn(LLM_TENSOR_FFN_DOWN, "weight", i), {n_ff, n_embd}, backend_split); - layer.b2 = ml.create_tensor(ctx, tn(LLM_TENSOR_FFN_DOWN, "bias", i), {n_embd}, backend_split); + layer.b2 = ml.create_tensor(ctx, tn(LLM_TENSOR_FFN_DOWN, "bias", i), {n_embd}, backend); layer.w3 = ml.create_tensor(ctx, tn(LLM_TENSOR_FFN_UP, "weight", i), {n_embd, n_ff}, backend_split); - layer.b3 = ml.create_tensor(ctx, tn(LLM_TENSOR_FFN_UP, "bias", i), {n_ff}, backend_split); + layer.b3 = ml.create_tensor(ctx, tn(LLM_TENSOR_FFN_UP, "bias", i), {n_ff}, backend); if (backend == GGML_BACKEND_GPU) { vram_weights += @@ -4616,6 +4616,8 @@ static struct ggml_cgraph * llm_build_starcoder( const float norm_eps = hparams.f_norm_eps; + const int n_gpu_layers = model.n_gpu_layers; + const int32_t n_tokens = batch.n_tokens; const int32_t n_kv = ggml_allocr_is_measure(lctx.alloc) ? n_ctx : kv_self.n; const int32_t kv_head = ggml_allocr_is_measure(lctx.alloc) ? n_ctx - n_tokens : kv_self.head; @@ -4660,6 +4662,27 @@ static struct ggml_cgraph * llm_build_starcoder( } } + const int i_gpu_start = n_layer - n_gpu_layers; + (void) i_gpu_start; + + // offload functions set the tensor output backend to GPU + // tensors are GPU-accelerated if any input or the output has been offloaded + offload_func_t offload_func_nr = llama_nop; // nr = non-repeating + offload_func_t offload_func_kq = llama_nop; + offload_func_t offload_func_v = llama_nop; + +#ifdef GGML_USE_CUBLAS + if (n_gpu_layers > n_layer) { + offload_func_nr = ggml_cuda_assign_buffers_no_alloc; + } + if (n_gpu_layers > n_layer + 1) { + offload_func_v = ggml_cuda_assign_buffers_no_alloc; + } + if (n_gpu_layers > n_layer + 2) { + offload_func_kq = ggml_cuda_assign_buffers_no_alloc; + } +#endif // GGML_USE_CUBLAS + { // Compute position embeddings. struct ggml_tensor * inp_positions = ggml_new_tensor_1d(ctx0, GGML_TYPE_I32, n_tokens); @@ -4685,6 +4708,7 @@ static struct ggml_cgraph * llm_build_starcoder( // KQ_mask (mask for 1 head, it will be broadcasted to all heads) struct ggml_tensor * KQ_mask = ggml_new_tensor_3d(ctx0, GGML_TYPE_F32, n_kv, n_tokens, 1); ggml_set_name(KQ_mask, "KQ_mask"); + offload_func_kq(KQ_mask); ggml_allocr_alloc(lctx.alloc, KQ_mask); if (!ggml_allocr_is_measure(lctx.alloc)) { float * data = (float *) KQ_mask->data; @@ -4708,44 +4732,67 @@ static struct ggml_cgraph * llm_build_starcoder( ggml_set_name(inpL, "inpL"); for (int il = 0; il < n_layer; ++il) { + offload_func_t offload_func = llama_nop; + +#ifdef GGML_USE_CUBLAS + if (il >= i_gpu_start) { + offload_func = ggml_cuda_assign_buffers_no_alloc; + } +#endif // GGML_USE_CUBLAS + { // Norm cur = ggml_norm(ctx0, inpL, norm_eps); + offload_func(cur); + cur = ggml_add(ctx0, ggml_mul(ctx0, cur, model.layers[il].attn_norm), model.layers[il].attn_norm_b); + offload_func(cur); } { // Self Attention - cur = ggml_add(ctx0, ggml_mul_mat(ctx0, model.layers[il].wqkv, cur), model.layers[il].bqkv); + cur = ggml_mul_mat(ctx0, model.layers[il].wqkv, cur); + offload_func_kq(cur); - struct ggml_tensor * tmpq = ggml_view_2d(ctx0, cur, n_embd, n_tokens, cur->nb[1], 0*sizeof(float)*n_embd); - struct ggml_tensor * tmpk = ggml_view_2d(ctx0, cur, n_embd_gqa, n_tokens, cur->nb[1], sizeof(float)*n_embd); - struct ggml_tensor * tmpv = ggml_view_2d(ctx0, cur, n_embd_gqa, n_tokens, cur->nb[1], sizeof(float)*(n_embd + n_embd_gqa)); + cur = ggml_add(ctx0, cur, model.layers[il].bqkv); + offload_func_kq(cur); - struct ggml_tensor * Qcur = tmpq; + struct ggml_tensor * tmpq = ggml_cont(ctx0, ggml_view_2d(ctx0, cur, n_embd, n_tokens, cur->nb[1], 0*sizeof(float)*(n_embd))); + struct ggml_tensor * tmpk = ggml_cont(ctx0, ggml_view_2d(ctx0, cur, n_embd_gqa, n_tokens, cur->nb[1], 1*sizeof(float)*(n_embd))); + struct ggml_tensor * tmpv = ggml_cont(ctx0, ggml_view_2d(ctx0, cur, n_embd_gqa, n_tokens, cur->nb[1], 1*sizeof(float)*(n_embd + n_embd_gqa))); + + ggml_set_name(tmpq, "tmpq"); + ggml_set_name(tmpk, "tmpk"); + ggml_set_name(tmpv, "tmpv"); + + offload_func_kq(tmpq); + offload_func_kq(tmpk); + offload_func_v (tmpv); + + struct ggml_tensor * Qcur = ggml_reshape_3d(ctx0, tmpq, n_embd_head, n_head, n_tokens); struct ggml_tensor * Kcur = tmpk; { - struct ggml_tensor * Vcur = ggml_transpose(ctx0, ggml_reshape_2d(ctx0, ggml_cont(ctx0, tmpv), n_embd_gqa, n_tokens)); + struct ggml_tensor * Vcur = ggml_transpose(ctx0, tmpv); + offload_func_v(Vcur); ggml_set_name(Vcur, "Vcur"); struct ggml_tensor * k = ggml_view_1d(ctx0, kv_self.k, n_tokens*n_embd_gqa, (ggml_element_size(kv_self.k)*n_embd_gqa)*(il*n_ctx + kv_head)); + offload_func_kq(k); ggml_set_name(k, "k"); struct ggml_tensor * v = ggml_view_2d(ctx0, kv_self.v, n_tokens, n_embd_gqa, ( n_ctx)*ggml_element_size(kv_self.v), (il*n_ctx)*ggml_element_size(kv_self.v)*n_embd_gqa + kv_head*ggml_element_size(kv_self.v)); + offload_func_v(v); + ggml_set_name(v, "v"); ggml_build_forward_expand(gf, ggml_cpy(ctx0, Kcur, k)); ggml_build_forward_expand(gf, ggml_cpy(ctx0, Vcur, v)); } - struct ggml_tensor * Q = - ggml_permute(ctx0, - ggml_cpy(ctx0, - Qcur, - ggml_new_tensor_3d(ctx0, GGML_TYPE_F32, n_embd_head, n_head, n_tokens)), - 0, 2, 1, 3); + struct ggml_tensor * Q = ggml_permute(ctx0, Qcur, 0, 2, 1, 3); + offload_func_kq(Q); ggml_set_name(Q, "Q"); struct ggml_tensor * K = @@ -4754,23 +4801,28 @@ static struct ggml_cgraph * llm_build_starcoder( ggml_element_size(kv_self.k)*n_embd_gqa, ggml_element_size(kv_self.k)*n_embd_head, ggml_element_size(kv_self.k)*n_embd_gqa*n_ctx*il); + offload_func_kq(K); ggml_set_name(K, "K"); // K * Q struct ggml_tensor * KQ = ggml_mul_mat(ctx0, K, Q); + offload_func_kq(KQ); ggml_set_name(KQ, "KQ"); // KQ_scaled = KQ / sqrt(n_embd_head) // KQ_scaled shape [n_past + n_tokens, n_tokens, n_head, 1] struct ggml_tensor * KQ_scaled = ggml_scale_inplace(ctx0, KQ, KQ_scale); + offload_func_kq(KQ_scaled); ggml_set_name(KQ_scaled, "KQ_scaled"); // KQ_masked = mask_past(KQ_scaled) struct ggml_tensor * KQ_masked = ggml_add(ctx0, KQ_scaled, KQ_mask); + offload_func_kq(KQ_masked); ggml_set_name(KQ_masked, "KQ_masked"); // KQ = soft_max(KQ_masked) struct ggml_tensor * KQ_soft_max = ggml_soft_max_inplace(ctx0, KQ_masked); + offload_func_v(KQ_soft_max); ggml_set_name(KQ_soft_max, "KQ_soft_max"); // split cached V into n_head heads @@ -4783,22 +4835,25 @@ static struct ggml_cgraph * llm_build_starcoder( ggml_set_name(V, "V"); struct ggml_tensor * KQV = ggml_mul_mat(ctx0, V, KQ_soft_max); + offload_func_v(KQV); ggml_set_name(KQV, "KQV"); - // KQV_merged = KQV.permute(0, 2, 1, 3) struct ggml_tensor * KQV_merged = ggml_permute(ctx0, KQV, 0, 2, 1, 3); + offload_func_v(KQV_merged); ggml_set_name(KQV_merged, "KQV_merged"); - // cur = KQV_merged.contiguous().view(n_embd, n_tokens) cur = ggml_cont_2d(ctx0, KQV_merged, n_embd, n_tokens); + offload_func_v(cur); ggml_set_name(cur, "KQV_merged_contiguous"); } // Projection cur = ggml_add(ctx0, ggml_mul_mat(ctx0, model.layers[il].wo, cur), model.layers[il].bo); + offload_func(cur); // Add the input cur = ggml_add(ctx0, cur, inpL); + offload_func(cur); struct ggml_tensor * inpFF = cur; @@ -4807,27 +4862,36 @@ static struct ggml_cgraph * llm_build_starcoder( // Norm { cur = ggml_norm(ctx0, inpFF, norm_eps); + offload_func_nr(cur); + cur = ggml_add(ctx0, ggml_mul(ctx0, cur, model.layers[il].ffn_norm), model.layers[il].ffn_norm_b); + offload_func_nr(cur); } cur = ggml_add(ctx0, ggml_mul_mat(ctx0, model.layers[il].w3, cur), model.layers[il].b3); + offload_func(cur); // GELU activation cur = ggml_gelu(ctx0, cur); + offload_func(cur); // Projection cur = ggml_add(ctx0, ggml_mul_mat(ctx0, model.layers[il].w2, cur), model.layers[il].b2); + offload_func(cur); } inpL = ggml_add(ctx0, cur, inpFF); + } // Output Norm { cur = ggml_norm(ctx0, inpL, norm_eps); + offload_func_nr(cur); + cur = ggml_add(ctx0, ggml_mul(ctx0, cur, model.output_norm), model.output_norm_b); + ggml_set_name(cur, "result_norm"); } - ggml_set_name(cur, "result_norm"); cur = ggml_mul_mat(ctx0, model.output, cur); ggml_set_name(cur, "result_output"); From 177461104b454163473dced2a5038f4e016cdb7e Mon Sep 17 00:00:00 2001 From: Henk Poley Date: Sat, 28 Oct 2023 12:16:33 +0200 Subject: [PATCH 013/206] common : print that one line of the syntax help *also* to standard output (#3823) --- common/common.cpp | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/common/common.cpp b/common/common.cpp index 44bb76618f5f8b..c0d4924e2d4a53 100644 --- a/common/common.cpp +++ b/common/common.cpp @@ -743,7 +743,7 @@ void gpt_print_usage(int /*argc*/, char ** argv, const gpt_params & params) { #endif // GGML_USE_CUBLAS #endif printf(" --verbose-prompt print prompt before generation\n"); - fprintf(stderr, " --simple-io use basic IO for better compatibility in subprocesses and limited consoles\n"); + printf(" --simple-io use basic IO for better compatibility in subprocesses and limited consoles\n"); printf(" --lora FNAME apply LoRA adapter (implies --no-mmap)\n"); printf(" --lora-scaled FNAME S apply LoRA adapter with user defined scaling S (implies --no-mmap)\n"); printf(" --lora-base FNAME optional model to use as a base for the layers modified by the LoRA adapter\n"); From ee1a0ec9cb367ba41d138134795cbbbe93d2bf1c Mon Sep 17 00:00:00 2001 From: Georgi Gerganov Date: Sat, 28 Oct 2023 14:23:11 +0300 Subject: [PATCH 014/206] llama : add option for greedy sampling with probs (#3813) * llama : add option for greedy sampling with probs * llama : add comment about llama_sample_token_greedy() missing probs * sampling : temp == 0.0 -> no probs, temp < 0.0 -> probs --- common/common.cpp | 1 + common/sampling.cpp | 8 ++++++-- examples/speculative/speculative.cpp | 2 +- llama.h | 1 + 4 files changed, 9 insertions(+), 3 deletions(-) diff --git a/common/common.cpp b/common/common.cpp index c0d4924e2d4a53..f81f4d354bc017 100644 --- a/common/common.cpp +++ b/common/common.cpp @@ -224,6 +224,7 @@ bool gpt_params_parse(int argc, char ** argv, gpt_params & params) { break; } sparams.temp = std::stof(argv[i]); + sparams.temp = std::max(sparams.temp, 0.0f); } else if (arg == "--tfs") { if (++i >= argc) { invalid_param = true; diff --git a/common/sampling.cpp b/common/sampling.cpp index 5258d4e8263693..c4996c9857d8ac 100644 --- a/common/sampling.cpp +++ b/common/sampling.cpp @@ -167,8 +167,12 @@ llama_token llama_sampling_sample( llama_sample_grammar(ctx_main, &cur_p, ctx_sampling->grammar); } - if (temp <= 0) { - // greedy sampling + if (temp < 0.0) { + // greedy sampling, with probs + llama_sample_softmax(ctx_main, &cur_p); + id = cur_p.data[0].id; + } else if (temp == 0.0) { + // greedy sampling, no probs id = llama_sample_token_greedy(ctx_main, &cur_p); } else { if (mirostat == 1) { diff --git a/examples/speculative/speculative.cpp b/examples/speculative/speculative.cpp index f921b78455a72c..323c74652c9a69 100644 --- a/examples/speculative/speculative.cpp +++ b/examples/speculative/speculative.cpp @@ -148,7 +148,7 @@ int main(int argc, char ** argv) { std::vector drafts(n_seq_dft); params.sparams.grammar.clear(); // the draft samplers will copy the target sampler's grammar - params.sparams.temp = std::max(0.01f, params.sparams.temp); + params.sparams.temp = -1.0f; // force greedy sampling with probs for the draft model for (int s = 0; s < n_seq_dft; ++s) { drafts[s].ctx_sampling = llama_sampling_init(params.sparams); diff --git a/llama.h b/llama.h index beac9a0cedd76c..d901dcd9116d3d 100644 --- a/llama.h +++ b/llama.h @@ -658,6 +658,7 @@ extern "C" { float * mu); /// @details Selects the token with the highest probability. + /// Does not compute the token probabilities. Use llama_sample_softmax() instead. LLAMA_API llama_token llama_sample_token_greedy( struct llama_context * ctx, llama_token_data_array * candidates); From bd6d9e205982b34e0ba2c3b22bbf31a1ef1a1bb5 Mon Sep 17 00:00:00 2001 From: Kerfuffle <44031344+KerfuffleV2@users.noreply.github.com> Date: Sat, 28 Oct 2023 05:54:24 -0600 Subject: [PATCH 015/206] llama : allow quantizing k-quants to fall back when tensor size incompatible (#3747) * Allow quantizing k-quants to fall back when tensor size incompatible * quantizing: Add warning when tensors were incompatible with k-quants Clean up k-quants state passing a bit --- llama.cpp | 108 ++++++++++++++++++++++++++++++++---------------------- 1 file changed, 65 insertions(+), 43 deletions(-) diff --git a/llama.cpp b/llama.cpp index 6caa58960cf3c3..3d431ee7bf5260 100644 --- a/llama.cpp +++ b/llama.cpp @@ -8049,6 +8049,24 @@ struct no_init { no_init() { /* do nothing */ } }; +struct quantize_state_internal { + const llama_model & model; + const llama_model_quantize_params * params; +#ifdef GGML_USE_K_QUANTS + int n_attention_wv = 0; + int n_feed_forward_w2 = 0; + int i_attention_wv = 0; + int i_feed_forward_w2 = 0; + + int n_k_quantized = 0; + int n_fallback = 0; +#endif + quantize_state_internal(const llama_model & model, const llama_model_quantize_params * params) + : model(model) + , params(params) + {} +}; + static void llama_convert_tensor_internal( struct ggml_tensor * tensor, std::vector> & output, std::vector & workers, const size_t nelements, const int nthread @@ -8109,12 +8127,13 @@ static void llama_convert_tensor_internal( #ifdef GGML_USE_K_QUANTS static ggml_type get_k_quant_type( - ggml_type new_type, const ggml_tensor * tensor, const llama_model & model, llama_ftype ftype, int * i_attention_wv, - int n_attention_wv, int * i_feed_forward_w2, int n_feed_forward_w2 + quantize_state_internal & qs, + ggml_type new_type, const ggml_tensor * tensor, llama_ftype ftype ) { const std::string name = ggml_get_name(tensor); // TODO: avoid hardcoded tensor names - use the TN_* constants - const auto tn = LLM_TN(model.arch); + const llm_arch arch = qs.model.arch; + const auto tn = LLM_TN(arch); auto use_more_bits = [](int i_layer, int num_layers) -> bool { return i_layer < num_layers/8 || i_layer >= 7*num_layers/8 || (i_layer - num_layers/8)%3 == 2; @@ -8122,7 +8141,7 @@ static ggml_type get_k_quant_type( if (name == tn(LLM_TENSOR_OUTPUT, "weight")) { int nx = tensor->ne[0]; - if (model.arch == LLM_ARCH_FALCON || nx % QK_K != 0) { + if (arch == LLM_ARCH_FALCON || nx % QK_K != 0) { new_type = GGML_TYPE_Q8_0; } else if (new_type != GGML_TYPE_Q8_0) { @@ -8131,46 +8150,46 @@ static ggml_type get_k_quant_type( } else if (name.find("attn_v.weight") != std::string::npos) { if (ftype == LLAMA_FTYPE_MOSTLY_Q2_K) new_type = GGML_TYPE_Q3_K; else if (ftype == LLAMA_FTYPE_MOSTLY_Q3_K_M) { - new_type = *i_attention_wv < 2 ? GGML_TYPE_Q5_K : GGML_TYPE_Q4_K; + new_type = qs.i_attention_wv < 2 ? GGML_TYPE_Q5_K : GGML_TYPE_Q4_K; } else if (ftype == LLAMA_FTYPE_MOSTLY_Q3_K_L) new_type = GGML_TYPE_Q5_K; else if ((ftype == LLAMA_FTYPE_MOSTLY_Q4_K_M || ftype == LLAMA_FTYPE_MOSTLY_Q5_K_M) && - use_more_bits(*i_attention_wv, n_attention_wv)) new_type = GGML_TYPE_Q6_K; - else if (ftype == LLAMA_FTYPE_MOSTLY_Q4_K_S && *i_attention_wv < 4) new_type = GGML_TYPE_Q5_K; + use_more_bits(qs.i_attention_wv, qs.n_attention_wv)) new_type = GGML_TYPE_Q6_K; + else if (ftype == LLAMA_FTYPE_MOSTLY_Q4_K_S && qs.i_attention_wv < 4) new_type = GGML_TYPE_Q5_K; else if (QK_K == 64 && (ftype == LLAMA_FTYPE_MOSTLY_Q4_K_S || ftype == LLAMA_FTYPE_MOSTLY_Q3_K_S) && - (*i_attention_wv < n_attention_wv/8 || *i_attention_wv >= 7*n_attention_wv/8)) new_type = GGML_TYPE_Q6_K; - if (model.type == MODEL_70B) { + (qs.i_attention_wv < qs.n_attention_wv/8 || qs.i_attention_wv >= 7*qs.n_attention_wv/8)) new_type = GGML_TYPE_Q6_K; + if (qs.model.type == MODEL_70B) { // In the 70B model we have 8 heads sharing the same attn_v weights. As a result, the attn_v.weight tensor is // 8x smaller compared to attn_q.weight. Hence, we can get a nice boost in quantization accuracy with // nearly negligible increase in model size by quantizing this tensor with more bits: if (new_type == GGML_TYPE_Q3_K || new_type == GGML_TYPE_Q4_K) new_type = GGML_TYPE_Q5_K; } - ++*i_attention_wv; + ++qs.i_attention_wv; } else if (name.find("ffn_down.weight") != std::string::npos) { if (ftype == LLAMA_FTYPE_MOSTLY_Q2_K) new_type = GGML_TYPE_Q3_K; else if (ftype == LLAMA_FTYPE_MOSTLY_Q3_K_M) { - new_type = *i_feed_forward_w2 < 2 ? GGML_TYPE_Q5_K - : model.arch != LLM_ARCH_FALCON || use_more_bits(*i_feed_forward_w2, n_feed_forward_w2) ? GGML_TYPE_Q4_K + new_type = qs.i_feed_forward_w2 < 2 ? GGML_TYPE_Q5_K + : arch != LLM_ARCH_FALCON || use_more_bits(qs.i_feed_forward_w2, qs.n_feed_forward_w2) ? GGML_TYPE_Q4_K : GGML_TYPE_Q3_K; } else if (ftype == LLAMA_FTYPE_MOSTLY_Q3_K_L) { - new_type = model.arch == LLM_ARCH_FALCON ? GGML_TYPE_Q4_K : GGML_TYPE_Q5_K; + new_type = arch == LLM_ARCH_FALCON ? GGML_TYPE_Q4_K : GGML_TYPE_Q5_K; } else if (ftype == LLAMA_FTYPE_MOSTLY_Q4_K_M) { - if (model.arch == LLM_ARCH_FALCON) { - new_type = *i_feed_forward_w2 < 2 ? GGML_TYPE_Q6_K : - use_more_bits(*i_feed_forward_w2, n_feed_forward_w2) ? GGML_TYPE_Q5_K : GGML_TYPE_Q4_K; + if (arch == LLM_ARCH_FALCON) { + new_type = qs.i_feed_forward_w2 < 2 ? GGML_TYPE_Q6_K : + use_more_bits(qs.i_feed_forward_w2, qs.n_feed_forward_w2) ? GGML_TYPE_Q5_K : GGML_TYPE_Q4_K; } else { - if (use_more_bits(*i_feed_forward_w2, n_feed_forward_w2)) new_type = GGML_TYPE_Q6_K; + if (use_more_bits(qs.i_feed_forward_w2, qs.n_feed_forward_w2)) new_type = GGML_TYPE_Q6_K; } } - else if (ftype == LLAMA_FTYPE_MOSTLY_Q5_K_M && use_more_bits(*i_feed_forward_w2, n_feed_forward_w2)) new_type = GGML_TYPE_Q6_K; - else if (ftype == LLAMA_FTYPE_MOSTLY_Q4_K_S && model.arch != LLM_ARCH_FALCON && *i_feed_forward_w2 < 4) { + else if (ftype == LLAMA_FTYPE_MOSTLY_Q5_K_M && use_more_bits(qs.i_feed_forward_w2, qs.n_feed_forward_w2)) new_type = GGML_TYPE_Q6_K; + else if (ftype == LLAMA_FTYPE_MOSTLY_Q4_K_S && arch != LLM_ARCH_FALCON && qs.i_feed_forward_w2 < 4) { new_type = GGML_TYPE_Q5_K; } - ++*i_feed_forward_w2; + ++qs.i_feed_forward_w2; } else if (name.find("attn_output.weight") != std::string::npos) { - if (model.arch != LLM_ARCH_FALCON) { + if (arch != LLM_ARCH_FALCON) { if (ftype == LLAMA_FTYPE_MOSTLY_Q2_K ) new_type = GGML_TYPE_Q3_K; else if (ftype == LLAMA_FTYPE_MOSTLY_Q3_K_M) new_type = GGML_TYPE_Q4_K; else if (ftype == LLAMA_FTYPE_MOSTLY_Q3_K_L) new_type = GGML_TYPE_Q5_K; @@ -8197,20 +8216,23 @@ static ggml_type get_k_quant_type( int nx = tensor->ne[0]; int ny = tensor->ne[1]; if (nx % QK_K != 0) { - LLAMA_LOG_WARN("\n\n%s : tensor cols %d x %d are not divisible by %d, required for k-quants\n", __func__, nx, ny, QK_K); + LLAMA_LOG_WARN("\n\n%s : tensor cols %d x %d are not divisible by %d, required for %s", __func__, nx, ny, QK_K, ggml_type_name(new_type)); convert_incompatible_tensor = true; + } else { + ++qs.n_k_quantized; } } if (convert_incompatible_tensor) { - if (name == tn(LLM_TENSOR_OUTPUT, "weight")) { - new_type = GGML_TYPE_F16; //fall back to F16 instead of just failing. - LLAMA_LOG_WARN("F16 will be used for this tensor instead.\n"); - } else if (name == tn(LLM_TENSOR_TOKEN_EMBD, "weight")) { - new_type = GGML_TYPE_Q4_0; //fall back to Q4_0 instead of just failing. - LLAMA_LOG_WARN("Q4_0 will be used for this tensor instead.\n"); - } else { - throw std::runtime_error("Unsupported tensor size encountered\n"); + switch (new_type) { + case GGML_TYPE_Q2_K: new_type = GGML_TYPE_Q4_0; break; + case GGML_TYPE_Q3_K: new_type = GGML_TYPE_Q4_1; break; + case GGML_TYPE_Q4_K: new_type = GGML_TYPE_Q5_0; break; + case GGML_TYPE_Q5_K: new_type = GGML_TYPE_Q5_1; break; + case GGML_TYPE_Q6_K: new_type = GGML_TYPE_Q8_0; break; + default: throw std::runtime_error("\nUnsupported tensor size encountered\n"); } + LLAMA_LOG_WARN(" - using fallback quantization %s\n", ggml_type_name(new_type)); + ++qs.n_fallback; } return new_type; @@ -8268,6 +8290,8 @@ static void llama_model_quantize_internal(const std::string & fname_inp, const s llm_load_arch(ml, model); llm_load_hparams(ml, model); + struct quantize_state_internal qs(model, params); + if (params->only_copy) { ftype = model.ftype; } @@ -8281,9 +8305,6 @@ static void llama_model_quantize_internal(const std::string & fname_inp, const s gguf_set_val_u32(ctx_out, "general.file_type", ftype); #ifdef GGML_USE_K_QUANTS - int n_attention_wv = 0; - int n_feed_forward_w2 = 0; - for (int i = 0; i < ml.n_tensors; ++i) { struct ggml_tensor * meta = ml.get_tensor_meta(i); @@ -8291,19 +8312,16 @@ static void llama_model_quantize_internal(const std::string & fname_inp, const s // TODO: avoid hardcoded tensor names - use the TN_* constants if (name.find("attn_v.weight") != std::string::npos || name.find("attn_qkv.weight") != std::string::npos) { - ++n_attention_wv; + ++qs.n_attention_wv; } else if (name.find("ffn_down.weight") != std::string::npos) { - ++n_feed_forward_w2; + ++qs.n_feed_forward_w2; } } - if (n_attention_wv != n_feed_forward_w2 || (uint32_t)n_attention_wv != model.hparams.n_layer) { + if (qs.n_attention_wv != qs.n_feed_forward_w2 || (uint32_t)qs.n_attention_wv != model.hparams.n_layer) { LLAMA_LOG_WARN("%s ============ Strange model: n_attention_wv = %d, n_feed_forward_w2 = %d, hparams.n_layer = %d\n", - __func__, n_attention_wv, n_feed_forward_w2, model.hparams.n_layer); + __func__, qs.n_attention_wv, qs.n_feed_forward_w2, model.hparams.n_layer); } - - int i_attention_wv = 0; - int i_feed_forward_w2 = 0; #endif size_t total_size_org = 0; @@ -8370,9 +8388,7 @@ static void llama_model_quantize_internal(const std::string & fname_inp, const s if (quantize) { new_type = quantized_type; #ifdef GGML_USE_K_QUANTS - new_type = get_k_quant_type( - new_type, tensor, model, ftype, &i_attention_wv, n_attention_wv, &i_feed_forward_w2, n_feed_forward_w2 - ); + new_type = get_k_quant_type(qs, new_type, tensor, ftype); #endif // If we've decided to quantize to the same type the tensor is already // in then there's nothing to do. @@ -8498,6 +8514,12 @@ static void llama_model_quantize_internal(const std::string & fname_inp, const s LLAMA_LOG_INFO("\n"); } } +#ifdef GGML_USE_K_QUANTS + if (qs.n_fallback > 0) { + LLAMA_LOG_WARN("%s: WARNING: %d of %d tensor(s) incompatible with k-quants and required fallback quantization\n", + __func__, qs.n_fallback, qs.n_k_quantized + qs.n_fallback); + } +#endif } static int llama_apply_lora_from_file_internal( From 8a2f2fea2914aaa3f4b2f82800c7de15f15bdb09 Mon Sep 17 00:00:00 2001 From: Georgi Gerganov Date: Sat, 28 Oct 2023 15:25:15 +0300 Subject: [PATCH 016/206] convert : ignore tokens if their IDs are within [0, vocab_size) (#3831) --- convert.py | 21 ++++++++++++--------- 1 file changed, 12 insertions(+), 9 deletions(-) diff --git a/convert.py b/convert.py index 0680f71ea73e89..bfbfab283f6ae7 100755 --- a/convert.py +++ b/convert.py @@ -366,16 +366,19 @@ def __init__(self, fname_tokenizer: Path, fname_added_tokens: Path | None) -> No added_tokens = {} vocab_size: int = self.sentencepiece_tokenizer.vocab_size() - expected_ids = list(range(vocab_size, vocab_size + len(added_tokens))) - actual_ids = sorted(added_tokens.values()) - if expected_ids != actual_ids: - raise Exception(f"Expected added token IDs to be sequential and start at {vocab_size}; got {actual_ids}") - items = sorted(added_tokens.items(), key=lambda text_idx: text_idx[1]) - self.added_tokens_list = [text for (text, idx) in items] - self.vocab_size_base: int = vocab_size - self.vocab_size: int = self.vocab_size_base + len(self.added_tokens_list) - self.fname_tokenizer = fname_tokenizer + new_tokens = {id: piece for piece, id in added_tokens.items() if id >= vocab_size} + expected_new_ids = list(range(vocab_size, vocab_size + len(new_tokens))) + actual_new_ids = sorted(new_tokens.keys()) + + if expected_new_ids != actual_new_ids: + raise ValueError(f"Expected new token IDs {expected_new_ids} to be sequential; got {actual_new_ids}") + + # Token pieces that were added to the base vocabulary. + self.added_tokens_list = [new_tokens[id] for id in actual_new_ids] + self.vocab_size_base = vocab_size + self.vocab_size = self.vocab_size_base + len(self.added_tokens_list) + self.fname_tokenizer = fname_tokenizer self.fname_added_tokens = fname_added_tokens def sentencepiece_tokens(self) -> Iterable[tuple[bytes, float, gguf.TokenType]]: From ba231e8a6dd8ad82acfe0e4d492ff7cef6b3f0a1 Mon Sep 17 00:00:00 2001 From: Georgi Gerganov Date: Sat, 28 Oct 2023 15:25:33 +0300 Subject: [PATCH 017/206] issues : change label from bug to bug-unconfirmed (#3748) --- .github/ISSUE_TEMPLATE/bug.md | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/.github/ISSUE_TEMPLATE/bug.md b/.github/ISSUE_TEMPLATE/bug.md index d7879b232b54dd..c003fe7c136274 100644 --- a/.github/ISSUE_TEMPLATE/bug.md +++ b/.github/ISSUE_TEMPLATE/bug.md @@ -1,7 +1,7 @@ --- name: Bug template about: Used to report bugs in llama.cpp -labels: ["bug"] +labels: ["bug-unconfirmed"] assignees: '' --- From 82a6646e0221216c41edcdf99f5a44bb051391f5 Mon Sep 17 00:00:00 2001 From: Aarni Koskela Date: Sat, 28 Oct 2023 15:43:01 +0300 Subject: [PATCH 018/206] metal : try cwd for ggml-metal.metal if bundle lookup fails (#3793) * Try cwd for ggml-metal if bundle lookup fails When building with `-DBUILD_SHARED_LIBS=ON -DLLAMA_METAL=ON -DLLAMA_BUILD_SERVER=ON`, `server` would fail to load `ggml-metal.metal` because `[bundle pathForResource:...]` returns `nil`. In that case, fall back to `ggml-metal.metal` in the cwd instead of passing `null` as a path. Follows up on #1782 * Update ggml-metal.m --------- Co-authored-by: Georgi Gerganov --- ggml-metal.m | 4 ++++ 1 file changed, 4 insertions(+) diff --git a/ggml-metal.m b/ggml-metal.m index c1901dca75269d..2380c431001408 100644 --- a/ggml-metal.m +++ b/ggml-metal.m @@ -210,6 +210,10 @@ static void ggml_metal_log(enum ggml_log_level level, const char* format, ...){ GGML_METAL_LOG_INFO("%s: default.metallib not found, loading from source\n", __func__); NSString * sourcePath = [bundle pathForResource:@"ggml-metal" ofType:@"metal"]; + if (sourcePath == nil) { + GGML_METAL_LOG_WARN("%s: error: could not use bundle path to find ggml-metal.metal, falling back to trying cwd\n", __func__); + sourcePath = @"ggml-metal.metal"; + } GGML_METAL_LOG_INFO("%s: loading '%s'\n", __func__, [sourcePath UTF8String]); NSString * src = [NSString stringWithContentsOfFile:sourcePath encoding:NSUTF8StringEncoding error:&error]; if (error) { From ff3bad83e29e3009010cbc923bebd769055eaa7f Mon Sep 17 00:00:00 2001 From: Erik Scholz Date: Sat, 28 Oct 2023 16:41:07 +0200 Subject: [PATCH 019/206] flake : update flake.lock for newer transformers version + provide extra dev shell (#3797) * flake : update flake.lock for newer transformers version + provide extra dev shell with torch and transformers (for most convert-xxx.py scripts) --- flake.lock | 6 +++--- flake.nix | 7 +++++++ 2 files changed, 10 insertions(+), 3 deletions(-) diff --git a/flake.lock b/flake.lock index a7777d05d5c0cd..070f0e1613fc30 100644 --- a/flake.lock +++ b/flake.lock @@ -20,11 +20,11 @@ }, "nixpkgs": { "locked": { - "lastModified": 1692913444, - "narHash": "sha256-1SvMQm2DwofNxXVtNWWtIcTh7GctEVrS/Xel/mdc6iY=", + "lastModified": 1698134075, + "narHash": "sha256-foCD+nuKzfh49bIoiCBur4+Fx1nozo+4C/6k8BYk4sg=", "owner": "NixOS", "repo": "nixpkgs", - "rev": "18324978d632ffc55ef1d928e81630c620f4f447", + "rev": "8efd5d1e283604f75a808a20e6cde0ef313d07d4", "type": "github" }, "original": { diff --git a/flake.nix b/flake.nix index cfc4776a469381..fa34394b2f0593 100644 --- a/flake.nix +++ b/flake.nix @@ -51,6 +51,9 @@ }; llama-python = pkgs.python3.withPackages (ps: with ps; [ numpy sentencepiece ]); + # TODO(Green-Sky): find a better way to opt-into the heavy ml python runtime + llama-python-extra = + pkgs.python3.withPackages (ps: with ps; [ numpy sentencepiece torchWithoutCuda transformers ]); postPatch = '' substituteInPlace ./ggml-metal.m \ --replace '[bundle pathForResource:@"ggml-metal" ofType:@"metal"];' "@\"$out/bin/ggml-metal.metal\";" @@ -126,5 +129,9 @@ buildInputs = [ llama-python ]; packages = nativeBuildInputs ++ osSpecific; }; + devShells.extra = pkgs.mkShell { + buildInputs = [ llama-python-extra ]; + packages = nativeBuildInputs ++ osSpecific; + }; }); } From d69d777c02b9ac405a95f3cbfba219a990caefff Mon Sep 17 00:00:00 2001 From: Georgi Gerganov Date: Sun, 29 Oct 2023 18:32:28 +0200 Subject: [PATCH 020/206] ggml : quantization refactoring (#3833) * ggml : factor all quantization code in ggml-quants ggml-ci * ggml-quants : fix Zig and Swift builds + quantize tool ggml-ci * quantize : --pure option for disabling k-quant mixtures --------- Co-authored-by: cebtenzzre --- CMakeLists.txt | 12 +- Makefile | 18 +- Package.swift | 3 +- build.zig | 21 +- examples/quantize/quantize.cpp | 9 +- k_quants.c => ggml-quants.c | 4310 ++++++++++++++++++++++++-------- k_quants.h => ggml-quants.h | 103 +- ggml.c | 3641 +++++---------------------- ggml.h | 7 + llama.cpp | 34 +- llama.h | 1 + 11 files changed, 4073 insertions(+), 4086 deletions(-) rename k_quants.c => ggml-quants.c (71%) rename k_quants.h => ggml-quants.h (63%) diff --git a/CMakeLists.txt b/CMakeLists.txt index d9fc86237b15cc..3659279e2d7d09 100644 --- a/CMakeLists.txt +++ b/CMakeLists.txt @@ -94,7 +94,6 @@ option(LLAMA_CLBLAST "llama: use CLBlast" option(LLAMA_METAL "llama: use Metal" ${LLAMA_METAL_DEFAULT}) option(LLAMA_METAL_NDEBUG "llama: disable Metal debugging" OFF) option(LLAMA_MPI "llama: use MPI" OFF) -option(LLAMA_K_QUANTS "llama: use k-quants" ON) option(LLAMA_QKK_64 "llama: use super-block size of 64 for k-quants" OFF) option(LLAMA_BUILD_TESTS "llama: build tests" ${LLAMA_STANDALONE}) @@ -278,13 +277,8 @@ if (LLAMA_BLAS) endif() endif() -if (LLAMA_K_QUANTS) - set(GGML_HEADERS_EXTRA k_quants.h) - set(GGML_SOURCES_EXTRA k_quants.c) - add_compile_definitions(GGML_USE_K_QUANTS) - if (LLAMA_QKK_64) - add_compile_definitions(GGML_QKK_64) - endif() +if (LLAMA_QKK_64) + add_compile_definitions(GGML_QKK_64) endif() if (LLAMA_CUBLAS) @@ -673,6 +667,8 @@ add_library(ggml OBJECT ggml-alloc.h ggml-backend.c ggml-backend.h + ggml-quants.c + ggml-quants.h ${GGML_SOURCES_CUDA} ${GGML_HEADERS_CUDA} ${GGML_SOURCES_OPENCL} ${GGML_HEADERS_OPENCL} ${GGML_SOURCES_METAL} ${GGML_HEADERS_METAL} diff --git a/Makefile b/Makefile index 68069f9ff331e9..2cecc2216c87b6 100644 --- a/Makefile +++ b/Makefile @@ -342,13 +342,9 @@ else MK_CXXFLAGS += -march=rv64gcv -mabi=lp64d endif -ifndef LLAMA_NO_K_QUANTS - MK_CPPFLAGS += -DGGML_USE_K_QUANTS - OBJS += k_quants.o ifdef LLAMA_QKK_64 MK_CPPFLAGS += -DGGML_QKK_64 endif -endif ifndef LLAMA_NO_ACCELERATE # Mac OS - include Accelerate framework. @@ -365,7 +361,7 @@ ifdef LLAMA_MPI MK_CPPFLAGS += -DGGML_USE_MPI MK_CFLAGS += -Wno-cast-qual MK_CXXFLAGS += -Wno-cast-qual - OBJS += ggml-mpi.o + OBJS += ggml-mpi.o endif # LLAMA_MPI ifdef LLAMA_OPENBLAS @@ -382,7 +378,7 @@ endif # LLAMA_BLIS ifdef LLAMA_CUBLAS MK_CPPFLAGS += -DGGML_USE_CUBLAS -I/usr/local/cuda/include -I/opt/cuda/include -I$(CUDA_PATH)/targets/x86_64-linux/include MK_LDFLAGS += -lcublas -lculibos -lcudart -lcublasLt -lpthread -ldl -lrt -L/usr/local/cuda/lib64 -L/opt/cuda/lib64 -L$(CUDA_PATH)/targets/x86_64-linux/lib - OBJS += ggml-cuda.o + OBJS += ggml-cuda.o NVCCFLAGS = --forward-unknown-to-host-compiler -use_fast_math ifdef LLAMA_CUDA_NVCC NVCC = $(LLAMA_CUDA_NVCC) @@ -497,11 +493,6 @@ ggml-mpi.o: ggml-mpi.c ggml-mpi.h $(CC) $(CFLAGS) -c $< -o $@ endif # LLAMA_MPI -ifndef LLAMA_NO_K_QUANTS -k_quants.o: k_quants.c k_quants.h - $(CC) $(CFLAGS) -c $< -o $@ -endif # LLAMA_NO_K_QUANTS - # combine build flags with cmdline overrides override CFLAGS := $(MK_CPPFLAGS) $(CPPFLAGS) $(MK_CFLAGS) $(CFLAGS) override CXXFLAGS := $(MK_CPPFLAGS) $(CPPFLAGS) $(MK_CXXFLAGS) $(CXXFLAGS) @@ -542,7 +533,10 @@ ggml-alloc.o: ggml-alloc.c ggml.h ggml-alloc.h ggml-backend.o: ggml-backend.c ggml.h ggml-backend.h $(CC) $(CFLAGS) -c $< -o $@ -OBJS += ggml-alloc.o ggml-backend.o +ggml-quants.o: ggml-quants.c ggml.h ggml-quants.h + $(CC) $(CFLAGS) -c $< -o $@ + +OBJS += ggml-alloc.o ggml-backend.o ggml-quants.o llama.o: llama.cpp ggml.h ggml-alloc.h ggml-backend.h ggml-cuda.h ggml-metal.h llama.h $(CXX) $(CXXFLAGS) -c $< -o $@ diff --git a/Package.swift b/Package.swift index 4ab055b19da2e5..5b3bd72cafe196 100644 --- a/Package.swift +++ b/Package.swift @@ -42,13 +42,12 @@ let package = Package( "llama.cpp", "ggml-alloc.c", "ggml-backend.c", - "k_quants.c", + "ggml-quants.c", ] + additionalSources, resources: resources, publicHeadersPath: "spm-headers", cSettings: [ .unsafeFlags(["-Wno-shorten-64-to-32", "-O3", "-DNDEBUG"]), - .define("GGML_USE_K_QUANTS"), .define("GGML_USE_ACCELERATE") // NOTE: NEW_LAPACK will required iOS version 16.4+ // We should consider add this in the future when we drop support for iOS 14 diff --git a/build.zig b/build.zig index dcfa3dd6b97643..9b58b74ca858b6 100644 --- a/build.zig +++ b/build.zig @@ -116,15 +116,10 @@ pub fn build(b: *std.build.Builder) !void { var make = try Maker.init(b); make.enable_lto = b.option(bool, "lto", "Enable LTO optimization, (default: false)") orelse false; - if (b.option(bool, "k-quants", "Enable K-quants, (default: true)") orelse true) { - try make.addFlag("-DGGML_USE_K_QUANTS"); - const k_quants = make.obj("k_quants", "k_quants.c"); - try make.objs.append(k_quants); - } - const ggml = make.obj("ggml", "ggml.c"); const ggml_alloc = make.obj("ggml-alloc", "ggml-alloc.c"); const ggml_backend = make.obj("ggml-backend", "ggml-backend.c"); + const ggml_quants = make.obj("ggml-quants", "ggml-quants.c"); const llama = make.obj("llama", "llama.cpp"); const common = make.obj("common", "common/common.cpp"); const console = make.obj("console", "common/console.cpp"); @@ -133,14 +128,14 @@ pub fn build(b: *std.build.Builder) !void { const train = make.obj("train", "common/train.cpp"); const clip = make.obj("clip", "examples/llava/clip.cpp"); - _ = make.exe("main", "examples/main/main.cpp", &.{ ggml, ggml_alloc, ggml_backend, llama, common, sampling, console, grammar_parser }); - _ = make.exe("quantize", "examples/quantize/quantize.cpp", &.{ ggml, ggml_alloc, ggml_backend, llama, common }); - _ = make.exe("perplexity", "examples/perplexity/perplexity.cpp", &.{ ggml, ggml_alloc, ggml_backend, llama, common }); - _ = make.exe("embedding", "examples/embedding/embedding.cpp", &.{ ggml, ggml_alloc, ggml_backend, llama, common }); - _ = make.exe("finetune", "examples/finetune/finetune.cpp", &.{ ggml, ggml_alloc, ggml_backend, llama, common, train }); - _ = make.exe("train-text-from-scratch", "examples/train-text-from-scratch/train-text-from-scratch.cpp", &.{ ggml, ggml_alloc, ggml_backend, llama, common, train }); + _ = make.exe("main", "examples/main/main.cpp", &.{ ggml, ggml_alloc, ggml_backend, ggml_quants, llama, common, sampling, console, grammar_parser }); + _ = make.exe("quantize", "examples/quantize/quantize.cpp", &.{ ggml, ggml_alloc, ggml_backend, ggml_quants, llama, common }); + _ = make.exe("perplexity", "examples/perplexity/perplexity.cpp", &.{ ggml, ggml_alloc, ggml_backend, ggml_quants, llama, common }); + _ = make.exe("embedding", "examples/embedding/embedding.cpp", &.{ ggml, ggml_alloc, ggml_backend, ggml_quants, llama, common }); + _ = make.exe("finetune", "examples/finetune/finetune.cpp", &.{ ggml, ggml_alloc, ggml_backend, ggml_quants, llama, common, train }); + _ = make.exe("train-text-from-scratch", "examples/train-text-from-scratch/train-text-from-scratch.cpp", &.{ ggml, ggml_alloc, ggml_backend, ggml_quants, llama, common, train }); - const server = make.exe("server", "examples/server/server.cpp", &.{ ggml, ggml_alloc, ggml_backend, llama, common, sampling, grammar_parser, clip }); + const server = make.exe("server", "examples/server/server.cpp", &.{ ggml, ggml_alloc, ggml_backend, ggml_quants, llama, common, sampling, grammar_parser, clip }); if (server.target.isWindows()) { server.linkSystemLibrary("ws2_32"); } diff --git a/examples/quantize/quantize.cpp b/examples/quantize/quantize.cpp index c7dd0d894634cb..be0b2fe1eb963f 100644 --- a/examples/quantize/quantize.cpp +++ b/examples/quantize/quantize.cpp @@ -18,7 +18,6 @@ static const std::vector QUANT_OPTIONS = { { "Q4_1", LLAMA_FTYPE_MOSTLY_Q4_1, " 3.90G, +0.1585 ppl @ LLaMA-v1-7B", }, { "Q5_0", LLAMA_FTYPE_MOSTLY_Q5_0, " 4.33G, +0.0683 ppl @ LLaMA-v1-7B", }, { "Q5_1", LLAMA_FTYPE_MOSTLY_Q5_1, " 4.70G, +0.0349 ppl @ LLaMA-v1-7B", }, -#ifdef GGML_USE_K_QUANTS { "Q2_K", LLAMA_FTYPE_MOSTLY_Q2_K, " 2.63G, +0.6717 ppl @ LLaMA-v1-7B", }, { "Q3_K", LLAMA_FTYPE_MOSTLY_Q3_K_M, "alias for Q3_K_M" }, { "Q3_K_S", LLAMA_FTYPE_MOSTLY_Q3_K_S, " 2.75G, +0.5551 ppl @ LLaMA-v1-7B", }, @@ -31,7 +30,6 @@ static const std::vector QUANT_OPTIONS = { { "Q5_K_S", LLAMA_FTYPE_MOSTLY_Q5_K_S, " 4.33G, +0.0400 ppl @ LLaMA-v1-7B", }, { "Q5_K_M", LLAMA_FTYPE_MOSTLY_Q5_K_M, " 4.45G, +0.0122 ppl @ LLaMA-v1-7B", }, { "Q6_K", LLAMA_FTYPE_MOSTLY_Q6_K, " 5.15G, -0.0008 ppl @ LLaMA-v1-7B", }, -#endif { "Q8_0", LLAMA_FTYPE_MOSTLY_Q8_0, " 6.70G, +0.0004 ppl @ LLaMA-v1-7B", }, { "F16", LLAMA_FTYPE_MOSTLY_F16, "13.00G @ 7B", }, { "F32", LLAMA_FTYPE_ALL_F32, "26.00G @ 7B", }, @@ -70,13 +68,14 @@ static bool try_parse_ftype(const std::string & ftype_str_in, llama_ftype & ftyp } // usage: -// ./quantize [--allow-requantize] [--leave-output-tensor] models/llama/ggml-model.gguf [models/llama/ggml-model-quant.gguf] type [nthreads] +// ./quantize [--allow-requantize] [--leave-output-tensor] [--pure] models/llama/ggml-model.gguf [models/llama/ggml-model-quant.gguf] type [nthreads] // [[noreturn]] static void usage(const char * executable) { - printf("usage: %s [--help] [--allow-requantize] [--leave-output-tensor] model-f32.gguf [model-quant.gguf] type [nthreads]\n\n", executable); + printf("usage: %s [--help] [--allow-requantize] [--leave-output-tensor] [--pure] model-f32.gguf [model-quant.gguf] type [nthreads]\n\n", executable); printf(" --allow-requantize: Allows requantizing tensors that have already been quantized. Warning: This can severely reduce quality compared to quantizing from 16bit or 32bit\n"); printf(" --leave-output-tensor: Will leave output.weight un(re)quantized. Increases model size but may also increase quality, especially when requantizing\n"); + printf(" --pure: Disable k-quant mixtures and quantize all tensors to the same type\n"); printf("\nAllowed quantization types:\n"); for (auto & it : QUANT_OPTIONS) { if (it.name != "COPY") { @@ -103,6 +102,8 @@ int main(int argc, char ** argv) { params.quantize_output_tensor = false; } else if (strcmp(argv[arg_idx], "--allow-requantize") == 0) { params.allow_requantize = true; + } else if (strcmp(argv[arg_idx], "--pure") == 0) { + params.pure = true; } else { usage(argv[0]); } diff --git a/k_quants.c b/ggml-quants.c similarity index 71% rename from k_quants.c rename to ggml-quants.c index 801941fbee075a..fd4ee1be64befa 100644 --- a/k_quants.c +++ b/ggml-quants.c @@ -1,9 +1,10 @@ -#include "k_quants.h" +#include "ggml-quants.h" #include "ggml.h" #include #include #include +#include #ifdef __ARM_NEON @@ -65,1251 +66,3478 @@ inline static int32_t vaddvq_s32(int32x4_t v) { #define MM256_SET_M128I(a, b) _mm256_insertf128_si256(_mm256_castsi128_si256(b), (a), 1) -// -// 2-6 bit quantization in super-blocks -// +#if defined(__AVX__) || defined(__AVX2__) || defined(__AVX512F__) || defined(__SSSE3__) +// multiply int8_t, add results pairwise twice +static inline __m128i mul_sum_i8_pairs(const __m128i x, const __m128i y) { + // Get absolute values of x vectors + const __m128i ax = _mm_sign_epi8(x, x); + // Sign the values of the y vectors + const __m128i sy = _mm_sign_epi8(y, x); + // Perform multiplication and create 16-bit values + const __m128i dot = _mm_maddubs_epi16(ax, sy); + const __m128i ones = _mm_set1_epi16(1); + return _mm_madd_epi16(ones, dot); +} -// -// ===================== Helper functions -// -static inline int nearest_int(float fval) { - assert(fval <= 4194303.f); - float val = fval + 12582912.f; - int i; memcpy(&i, &val, sizeof(int)); - return (i & 0x007fffff) - 0x00400000; +#if __AVX__ || __AVX2__ || __AVX512F__ +// horizontally add 8 floats +static inline float hsum_float_8(const __m256 x) { + __m128 res = _mm256_extractf128_ps(x, 1); + res = _mm_add_ps(res, _mm256_castps256_ps128(x)); + res = _mm_add_ps(res, _mm_movehl_ps(res, res)); + res = _mm_add_ss(res, _mm_movehdup_ps(res)); + return _mm_cvtss_f32(res); } -static float make_qx_quants(int n, int nmax, const float * restrict x, int8_t * restrict L, int rmse_type) { - float max = 0; - float amax = 0; - for (int i = 0; i < n; ++i) { - float ax = fabsf(x[i]); - if (ax > amax) { amax = ax; max = x[i]; } - } - if (amax < 1e-30f) { // all zero - for (int i = 0; i < n; ++i) { - L[i] = 0; +// horizontally add 8 int32_t +static inline int hsum_i32_8(const __m256i a) { + const __m128i sum128 = _mm_add_epi32(_mm256_castsi256_si128(a), _mm256_extractf128_si256(a, 1)); + const __m128i hi64 = _mm_unpackhi_epi64(sum128, sum128); + const __m128i sum64 = _mm_add_epi32(hi64, sum128); + const __m128i hi32 = _mm_shuffle_epi32(sum64, _MM_SHUFFLE(2, 3, 0, 1)); + return _mm_cvtsi128_si32(_mm_add_epi32(sum64, hi32)); +} + +// horizontally add 4 int32_t +static inline int hsum_i32_4(const __m128i a) { + const __m128i hi64 = _mm_unpackhi_epi64(a, a); + const __m128i sum64 = _mm_add_epi32(hi64, a); + const __m128i hi32 = _mm_shuffle_epi32(sum64, _MM_SHUFFLE(2, 3, 0, 1)); + return _mm_cvtsi128_si32(_mm_add_epi32(sum64, hi32)); +} + +#if defined(__AVX2__) || defined(__AVX512F__) +// spread 32 bits to 32 bytes { 0x00, 0xFF } +static inline __m256i bytes_from_bits_32(const uint8_t * x) { + uint32_t x32; + memcpy(&x32, x, sizeof(uint32_t)); + const __m256i shuf_mask = _mm256_set_epi64x( + 0x0303030303030303, 0x0202020202020202, + 0x0101010101010101, 0x0000000000000000); + __m256i bytes = _mm256_shuffle_epi8(_mm256_set1_epi32(x32), shuf_mask); + const __m256i bit_mask = _mm256_set1_epi64x(0x7fbfdfeff7fbfdfe); + bytes = _mm256_or_si256(bytes, bit_mask); + return _mm256_cmpeq_epi8(bytes, _mm256_set1_epi64x(-1)); +} + +// Unpack 32 4-bit fields into 32 bytes +// The output vector contains 32 bytes, each one in [ 0 .. 15 ] interval +static inline __m256i bytes_from_nibbles_32(const uint8_t * rsi) +{ + const __m128i tmp = _mm_loadu_si128((const __m128i *)rsi); + const __m256i bytes = MM256_SET_M128I(_mm_srli_epi16(tmp, 4), tmp); + const __m256i lowMask = _mm256_set1_epi8( 0xF ); + return _mm256_and_si256(lowMask, bytes); +} + +// add int16_t pairwise and return as float vector +static inline __m256 sum_i16_pairs_float(const __m256i x) { + const __m256i ones = _mm256_set1_epi16(1); + const __m256i summed_pairs = _mm256_madd_epi16(ones, x); + return _mm256_cvtepi32_ps(summed_pairs); +} + +static inline __m256 mul_sum_us8_pairs_float(const __m256i ax, const __m256i sy) { +#if __AVXVNNI__ + const __m256i zero = _mm256_setzero_si256(); + const __m256i summed_pairs = _mm256_dpbusd_epi32(zero, ax, sy); + return _mm256_cvtepi32_ps(summed_pairs); +#else + // Perform multiplication and create 16-bit values + const __m256i dot = _mm256_maddubs_epi16(ax, sy); + return sum_i16_pairs_float(dot); +#endif +} + +// multiply int8_t, add results pairwise twice and return as float vector +static inline __m256 mul_sum_i8_pairs_float(const __m256i x, const __m256i y) { +#if __AVXVNNIINT8__ + const __m256i zero = _mm256_setzero_si256(); + const __m256i summed_pairs = _mm256_dpbssd_epi32(zero, x, y); + return _mm256_cvtepi32_ps(summed_pairs); +#else + // Get absolute values of x vectors + const __m256i ax = _mm256_sign_epi8(x, x); + // Sign the values of the y vectors + const __m256i sy = _mm256_sign_epi8(y, x); + return mul_sum_us8_pairs_float(ax, sy); +#endif +} + +static inline __m128i packNibbles( __m256i bytes ) +{ + // Move bits within 16-bit lanes from 0000_abcd_0000_efgh into 0000_0000_abcd_efgh +#if __AVX512F__ + const __m256i bytes_srli_4 = _mm256_srli_epi16(bytes, 4); // 0000_0000_abcd_0000 + bytes = _mm256_or_si256(bytes, bytes_srli_4); // 0000_abcd_abcd_efgh + return _mm256_cvtepi16_epi8(bytes); // abcd_efgh +#else + const __m256i lowByte = _mm256_set1_epi16( 0xFF ); + __m256i high = _mm256_andnot_si256( lowByte, bytes ); + __m256i low = _mm256_and_si256( lowByte, bytes ); + high = _mm256_srli_epi16( high, 4 ); + bytes = _mm256_or_si256( low, high ); + + // Compress uint16_t lanes into bytes + __m128i r0 = _mm256_castsi256_si128( bytes ); + __m128i r1 = _mm256_extracti128_si256( bytes, 1 ); + return _mm_packus_epi16( r0, r1 ); +#endif +} +#elif defined(__AVX__) +// spread 32 bits to 32 bytes { 0x00, 0xFF } +static inline __m256i bytes_from_bits_32(const uint8_t * x) { + uint32_t x32; + memcpy(&x32, x, sizeof(uint32_t)); + const __m128i shuf_maskl = _mm_set_epi64x(0x0101010101010101, 0x0000000000000000); + const __m128i shuf_maskh = _mm_set_epi64x(0x0303030303030303, 0x0202020202020202); + __m128i bytesl = _mm_shuffle_epi8(_mm_set1_epi32(x32), shuf_maskl); + __m128i bytesh = _mm_shuffle_epi8(_mm_set1_epi32(x32), shuf_maskh); + const __m128i bit_mask = _mm_set1_epi64x(0x7fbfdfeff7fbfdfe); + bytesl = _mm_or_si128(bytesl, bit_mask); + bytesh = _mm_or_si128(bytesh, bit_mask); + bytesl = _mm_cmpeq_epi8(bytesl, _mm_set1_epi64x(-1)); + bytesh = _mm_cmpeq_epi8(bytesh, _mm_set1_epi64x(-1)); + return MM256_SET_M128I(bytesh, bytesl); +} + +// Unpack 32 4-bit fields into 32 bytes +// The output vector contains 32 bytes, each one in [ 0 .. 15 ] interval +static inline __m256i bytes_from_nibbles_32(const uint8_t * rsi) +{ + // Load 16 bytes from memory + __m128i tmpl = _mm_loadu_si128((const __m128i *)rsi); + __m128i tmph = _mm_srli_epi16(tmpl, 4); + const __m128i lowMask = _mm_set1_epi8(0xF); + tmpl = _mm_and_si128(lowMask, tmpl); + tmph = _mm_and_si128(lowMask, tmph); + return MM256_SET_M128I(tmph, tmpl); +} + +// add int16_t pairwise and return as float vector +static inline __m256 sum_i16_pairs_float(const __m128i xh, const __m128i xl) { + const __m128i ones = _mm_set1_epi16(1); + const __m128i summed_pairsl = _mm_madd_epi16(ones, xl); + const __m128i summed_pairsh = _mm_madd_epi16(ones, xh); + const __m256i summed_pairs = MM256_SET_M128I(summed_pairsh, summed_pairsl); + return _mm256_cvtepi32_ps(summed_pairs); +} + +static inline __m256 mul_sum_us8_pairs_float(const __m256i ax, const __m256i sy) { + const __m128i axl = _mm256_castsi256_si128(ax); + const __m128i axh = _mm256_extractf128_si256(ax, 1); + const __m128i syl = _mm256_castsi256_si128(sy); + const __m128i syh = _mm256_extractf128_si256(sy, 1); + // Perform multiplication and create 16-bit values + const __m128i dotl = _mm_maddubs_epi16(axl, syl); + const __m128i doth = _mm_maddubs_epi16(axh, syh); + return sum_i16_pairs_float(doth, dotl); +} + +// multiply int8_t, add results pairwise twice and return as float vector +static inline __m256 mul_sum_i8_pairs_float(const __m256i x, const __m256i y) { + const __m128i xl = _mm256_castsi256_si128(x); + const __m128i xh = _mm256_extractf128_si256(x, 1); + const __m128i yl = _mm256_castsi256_si128(y); + const __m128i yh = _mm256_extractf128_si256(y, 1); + // Get absolute values of x vectors + const __m128i axl = _mm_sign_epi8(xl, xl); + const __m128i axh = _mm_sign_epi8(xh, xh); + // Sign the values of the y vectors + const __m128i syl = _mm_sign_epi8(yl, xl); + const __m128i syh = _mm_sign_epi8(yh, xh); + // Perform multiplication and create 16-bit values + const __m128i dotl = _mm_maddubs_epi16(axl, syl); + const __m128i doth = _mm_maddubs_epi16(axh, syh); + return sum_i16_pairs_float(doth, dotl); +} + +static inline __m128i packNibbles( __m128i bytes1, __m128i bytes2 ) +{ + // Move bits within 16-bit lanes from 0000_abcd_0000_efgh into 0000_0000_abcd_efgh + const __m128i lowByte = _mm_set1_epi16( 0xFF ); + __m128i high = _mm_andnot_si128( lowByte, bytes1 ); + __m128i low = _mm_and_si128( lowByte, bytes1 ); + high = _mm_srli_epi16( high, 4 ); + bytes1 = _mm_or_si128( low, high ); + high = _mm_andnot_si128( lowByte, bytes2 ); + low = _mm_and_si128( lowByte, bytes2 ); + high = _mm_srli_epi16( high, 4 ); + bytes2 = _mm_or_si128( low, high ); + + return _mm_packus_epi16( bytes1, bytes2); +} +#endif +#elif defined(__SSSE3__) +// horizontally add 4x4 floats +static inline float hsum_float_4x4(const __m128 a, const __m128 b, const __m128 c, const __m128 d) { + __m128 res_0 =_mm_hadd_ps(a, b); + __m128 res_1 =_mm_hadd_ps(c, d); + __m128 res =_mm_hadd_ps(res_0, res_1); + res =_mm_hadd_ps(res, res); + res =_mm_hadd_ps(res, res); + + return _mm_cvtss_f32(res); +} +#endif // __AVX__ || __AVX2__ || __AVX512F__ +#endif // defined(__AVX__) || defined(__AVX2__) || defined(__AVX512F__) || defined(__SSSE3__) + +#if defined(__ARM_NEON) + +#if !defined(__aarch64__) + +inline static int32_t vaddvq_s32(int32x4_t v) { + return vgetq_lane_s32(v, 0) + vgetq_lane_s32(v, 1) + vgetq_lane_s32(v, 2) + vgetq_lane_s32(v, 3); +} + +inline static float vaddvq_f32(float32x4_t v) { + return vgetq_lane_f32(v, 0) + vgetq_lane_f32(v, 1) + vgetq_lane_f32(v, 2) + vgetq_lane_f32(v, 3); +} + +inline static float vmaxvq_f32(float32x4_t v) { + return + MAX(MAX(vgetq_lane_f32(v, 0), vgetq_lane_f32(v, 1)), + MAX(vgetq_lane_f32(v, 2), vgetq_lane_f32(v, 3))); +} + +inline static int32x4_t vcvtnq_s32_f32(float32x4_t v) { + int32x4_t res; + + res[0] = roundf(vgetq_lane_f32(v, 0)); + res[1] = roundf(vgetq_lane_f32(v, 1)); + res[2] = roundf(vgetq_lane_f32(v, 2)); + res[3] = roundf(vgetq_lane_f32(v, 3)); + + return res; +} + +#endif +#endif + +#if defined(__ARM_NEON) || defined(__wasm_simd128__) +#define B1(c,s,n) 0x ## n ## c , 0x ## n ## s +#define B2(c,s,n) B1(c,s,n ## c), B1(c,s,n ## s) +#define B3(c,s,n) B2(c,s,n ## c), B2(c,s,n ## s) +#define B4(c,s,n) B3(c,s,n ## c), B3(c,s,n ## s) +#define B5(c,s,n) B4(c,s,n ## c), B4(c,s,n ## s) +#define B6(c,s,n) B5(c,s,n ## c), B5(c,s,n ## s) +#define B7(c,s,n) B6(c,s,n ## c), B6(c,s,n ## s) +#define B8(c,s ) B7(c,s, c), B7(c,s, s) + +// precomputed tables for expanding 8bits to 8 bytes: +static const uint64_t table_b2b_0[1 << 8] = { B8(00, 10) }; // ( b) << 4 +static const uint64_t table_b2b_1[1 << 8] = { B8(10, 00) }; // (!b) << 4 +#endif + +// reference implementation for deterministic creation of model files +void quantize_row_q4_0_reference(const float * restrict x, block_q4_0 * restrict y, int k) { + static const int qk = QK4_0; + + assert(k % qk == 0); + + const int nb = k / qk; + + for (int i = 0; i < nb; i++) { + float amax = 0.0f; // absolute max + float max = 0.0f; + + for (int j = 0; j < qk; j++) { + const float v = x[i*qk + j]; + if (amax < fabsf(v)) { + amax = fabsf(v); + max = v; + } } - return 0.f; - } - float iscale = -nmax / max; - if (rmse_type == 0) { - for (int i = 0; i < n; ++i) { - int l = nearest_int(iscale * x[i]); - L[i] = nmax + MAX(-nmax, MIN(nmax-1, l)); + + const float d = max / -8; + const float id = d ? 1.0f/d : 0.0f; + + y[i].d = ggml_fp32_to_fp16(d); + + for (int j = 0; j < qk/2; ++j) { + const float x0 = x[i*qk + 0 + j]*id; + const float x1 = x[i*qk + qk/2 + j]*id; + + const uint8_t xi0 = MIN(15, (int8_t)(x0 + 8.5f)); + const uint8_t xi1 = MIN(15, (int8_t)(x1 + 8.5f)); + + y[i].qs[j] = xi0; + y[i].qs[j] |= xi1 << 4; } - return 1/iscale; - } - bool return_early = false; - if (rmse_type < 0) { - rmse_type = -rmse_type; - return_early = true; - } - int weight_type = rmse_type%2; - float sumlx = 0; - float suml2 = 0; - for (int i = 0; i < n; ++i) { - int l = nearest_int(iscale * x[i]); - l = MAX(-nmax, MIN(nmax-1, l)); - L[i] = l + nmax; - float w = weight_type == 1 ? x[i] * x[i] : 1; - sumlx += w*x[i]*l; - suml2 += w*l*l; } - float scale = sumlx/suml2; - if (return_early) return suml2 > 0 ? 0.5f*(scale + 1/iscale) : 1/iscale; - float best = scale * sumlx; - for (int is = -9; is <= 9; ++is) { - if (is == 0) { - continue; +} + +void quantize_row_q4_0(const float * restrict x, void * restrict y, int k) { + quantize_row_q4_0_reference(x, y, k); +} + +void quantize_row_q4_1_reference(const float * restrict x, block_q4_1 * restrict y, int k) { + const int qk = QK4_1; + + assert(k % qk == 0); + + const int nb = k / qk; + + for (int i = 0; i < nb; i++) { + float min = FLT_MAX; + float max = -FLT_MAX; + + for (int j = 0; j < qk; j++) { + const float v = x[i*qk + j]; + + if (v < min) min = v; + if (v > max) max = v; } - iscale = -(nmax + 0.1f*is) / max; - sumlx = suml2 = 0; - for (int i = 0; i < n; ++i) { - int l = nearest_int(iscale * x[i]); - l = MAX(-nmax, MIN(nmax-1, l)); - float w = weight_type == 1 ? x[i] * x[i] : 1; - sumlx += w*x[i]*l; - suml2 += w*l*l; + + const float d = (max - min) / ((1 << 4) - 1); + const float id = d ? 1.0f/d : 0.0f; + + y[i].d = ggml_fp32_to_fp16(d); + y[i].m = ggml_fp32_to_fp16(min); + + for (int j = 0; j < qk/2; ++j) { + const float x0 = (x[i*qk + 0 + j] - min)*id; + const float x1 = (x[i*qk + qk/2 + j] - min)*id; + + const uint8_t xi0 = MIN(15, (int8_t)(x0 + 0.5f)); + const uint8_t xi1 = MIN(15, (int8_t)(x1 + 0.5f)); + + y[i].qs[j] = xi0; + y[i].qs[j] |= xi1 << 4; } - if (suml2 > 0 && sumlx*sumlx > best*suml2) { - for (int i = 0; i < n; ++i) { - int l = nearest_int(iscale * x[i]); - L[i] = nmax + MAX(-nmax, MIN(nmax-1, l)); + } +} + +void quantize_row_q4_1(const float * restrict x, void * restrict y, int k) { + quantize_row_q4_1_reference(x, y, k); +} + +void quantize_row_q5_0_reference(const float * restrict x, block_q5_0 * restrict y, int k) { + static const int qk = QK5_0; + + assert(k % qk == 0); + + const int nb = k / qk; + + for (int i = 0; i < nb; i++) { + float amax = 0.0f; // absolute max + float max = 0.0f; + + for (int j = 0; j < qk; j++) { + const float v = x[i*qk + j]; + if (amax < fabsf(v)) { + amax = fabsf(v); + max = v; } - scale = sumlx/suml2; best = scale*sumlx; } + + const float d = max / -16; + const float id = d ? 1.0f/d : 0.0f; + + y[i].d = ggml_fp32_to_fp16(d); + + uint32_t qh = 0; + + for (int j = 0; j < qk/2; ++j) { + const float x0 = x[i*qk + 0 + j]*id; + const float x1 = x[i*qk + qk/2 + j]*id; + + const uint8_t xi0 = MIN(31, (int8_t)(x0 + 16.5f)); + const uint8_t xi1 = MIN(31, (int8_t)(x1 + 16.5f)); + + y[i].qs[j] = (xi0 & 0x0F) | ((xi1 & 0x0F) << 4); + + // get the 5-th bit and store it in qh at the right position + qh |= ((xi0 & 0x10u) >> 4) << (j + 0); + qh |= ((xi1 & 0x10u) >> 4) << (j + qk/2); + } + + memcpy(&y[i].qh, &qh, sizeof(qh)); } - return scale; } -static float make_q3_quants(int n, int nmax, const float * restrict x, int8_t * restrict L, bool do_rmse) { - float max = 0; - float amax = 0; - for (int i = 0; i < n; ++i) { - float ax = fabsf(x[i]); - if (ax > amax) { amax = ax; max = x[i]; } +void quantize_row_q5_0(const float * restrict x, void * restrict y, int k) { + quantize_row_q5_0_reference(x, y, k); +} + +void quantize_row_q5_1_reference(const float * restrict x, block_q5_1 * restrict y, int k) { + const int qk = QK5_1; + + assert(k % qk == 0); + + const int nb = k / qk; + + for (int i = 0; i < nb; i++) { + float min = FLT_MAX; + float max = -FLT_MAX; + + for (int j = 0; j < qk; j++) { + const float v = x[i*qk + j]; + + if (v < min) min = v; + if (v > max) max = v; + } + + const float d = (max - min) / ((1 << 5) - 1); + const float id = d ? 1.0f/d : 0.0f; + + y[i].d = ggml_fp32_to_fp16(d); + y[i].m = ggml_fp32_to_fp16(min); + + uint32_t qh = 0; + + for (int j = 0; j < qk/2; ++j) { + const float x0 = (x[i*qk + 0 + j] - min)*id; + const float x1 = (x[i*qk + qk/2 + j] - min)*id; + + const uint8_t xi0 = (uint8_t)(x0 + 0.5f); + const uint8_t xi1 = (uint8_t)(x1 + 0.5f); + + y[i].qs[j] = (xi0 & 0x0F) | ((xi1 & 0x0F) << 4); + + // get the 5-th bit and store it in qh at the right position + qh |= ((xi0 & 0x10u) >> 4) << (j + 0); + qh |= ((xi1 & 0x10u) >> 4) << (j + qk/2); + } + + memcpy(&y[i].qh, &qh, sizeof(y[i].qh)); } - if (!amax) { // all zero - for (int i = 0; i < n; ++i) { L[i] = 0; } - return 0.f; +} + +void quantize_row_q5_1(const float * restrict x, void * restrict y, int k) { + quantize_row_q5_1_reference(x, y, k); +} + +// reference implementation for deterministic creation of model files +void quantize_row_q8_0_reference(const float * restrict x, block_q8_0 * restrict y, int k) { + assert(k % QK8_0 == 0); + const int nb = k / QK8_0; + + for (int i = 0; i < nb; i++) { + float amax = 0.0f; // absolute max + + for (int j = 0; j < QK8_0; j++) { + const float v = x[i*QK8_0 + j]; + amax = MAX(amax, fabsf(v)); + } + + const float d = amax / ((1 << 7) - 1); + const float id = d ? 1.0f/d : 0.0f; + + y[i].d = ggml_fp32_to_fp16(d); + + for (int j = 0; j < QK8_0; ++j) { + const float x0 = x[i*QK8_0 + j]*id; + + y[i].qs[j] = roundf(x0); + } } - float iscale = -nmax / max; - if (do_rmse) { - float sumlx = 0; - float suml2 = 0; - for (int i = 0; i < n; ++i) { - int l = nearest_int(iscale * x[i]); - l = MAX(-nmax, MIN(nmax-1, l)); - L[i] = l; - float w = x[i]*x[i]; - sumlx += w*x[i]*l; - suml2 += w*l*l; +} + +void quantize_row_q8_0(const float * restrict x, void * restrict vy, int k) { + assert(QK8_0 == 32); + assert(k % QK8_0 == 0); + const int nb = k / QK8_0; + + block_q8_0 * restrict y = vy; + +#if defined(__ARM_NEON) + for (int i = 0; i < nb; i++) { + float32x4_t srcv [8]; + float32x4_t asrcv[8]; + float32x4_t amaxv[8]; + + for (int j = 0; j < 8; j++) srcv[j] = vld1q_f32(x + i*32 + 4*j); + for (int j = 0; j < 8; j++) asrcv[j] = vabsq_f32(srcv[j]); + + for (int j = 0; j < 4; j++) amaxv[2*j] = vmaxq_f32(asrcv[2*j], asrcv[2*j+1]); + for (int j = 0; j < 2; j++) amaxv[4*j] = vmaxq_f32(amaxv[4*j], amaxv[4*j+2]); + for (int j = 0; j < 1; j++) amaxv[8*j] = vmaxq_f32(amaxv[8*j], amaxv[8*j+4]); + + const float amax = vmaxvq_f32(amaxv[0]); + + const float d = amax / ((1 << 7) - 1); + const float id = d ? 1.0f/d : 0.0f; + + y[i].d = ggml_fp32_to_fp16(d); + + for (int j = 0; j < 8; j++) { + const float32x4_t v = vmulq_n_f32(srcv[j], id); + const int32x4_t vi = vcvtnq_s32_f32(v); + + y[i].qs[4*j + 0] = vgetq_lane_s32(vi, 0); + y[i].qs[4*j + 1] = vgetq_lane_s32(vi, 1); + y[i].qs[4*j + 2] = vgetq_lane_s32(vi, 2); + y[i].qs[4*j + 3] = vgetq_lane_s32(vi, 3); } - for (int itry = 0; itry < 5; ++itry) { - int n_changed = 0; - for (int i = 0; i < n; ++i) { - float w = x[i]*x[i]; - float slx = sumlx - w*x[i]*L[i]; - if (slx > 0) { - float sl2 = suml2 - w*L[i]*L[i]; - int new_l = nearest_int(x[i] * sl2 / slx); - new_l = MAX(-nmax, MIN(nmax-1, new_l)); - if (new_l != L[i]) { - slx += w*x[i]*new_l; - sl2 += w*new_l*new_l; - if (sl2 > 0 && slx*slx*suml2 > sumlx*sumlx*sl2) { - L[i] = new_l; sumlx = slx; suml2 = sl2; - ++n_changed; - } - } - } + } +#elif defined(__wasm_simd128__) + for (int i = 0; i < nb; i++) { + v128_t srcv [8]; + v128_t asrcv[8]; + v128_t amaxv[8]; + + for (int j = 0; j < 8; j++) srcv[j] = wasm_v128_load(x + i*32 + 4*j); + for (int j = 0; j < 8; j++) asrcv[j] = wasm_f32x4_abs(srcv[j]); + + for (int j = 0; j < 4; j++) amaxv[2*j] = wasm_f32x4_max(asrcv[2*j], asrcv[2*j+1]); + for (int j = 0; j < 2; j++) amaxv[4*j] = wasm_f32x4_max(amaxv[4*j], amaxv[4*j+2]); + for (int j = 0; j < 1; j++) amaxv[8*j] = wasm_f32x4_max(amaxv[8*j], amaxv[8*j+4]); + + const float amax = MAX(MAX(wasm_f32x4_extract_lane(amaxv[0], 0), + wasm_f32x4_extract_lane(amaxv[0], 1)), + MAX(wasm_f32x4_extract_lane(amaxv[0], 2), + wasm_f32x4_extract_lane(amaxv[0], 3))); + + const float d = amax / ((1 << 7) - 1); + const float id = d ? 1.0f/d : 0.0f; + + y[i].d = ggml_fp32_to_fp16(d); + + for (int j = 0; j < 8; j++) { + const v128_t v = wasm_f32x4_mul(srcv[j], wasm_f32x4_splat(id)); + const v128_t vi = wasm_i32x4_trunc_sat_f32x4(v); + + y[i].qs[4*j + 0] = wasm_i32x4_extract_lane(vi, 0); + y[i].qs[4*j + 1] = wasm_i32x4_extract_lane(vi, 1); + y[i].qs[4*j + 2] = wasm_i32x4_extract_lane(vi, 2); + y[i].qs[4*j + 3] = wasm_i32x4_extract_lane(vi, 3); + } + } +#elif defined(__AVX2__) || defined(__AVX__) + for (int i = 0; i < nb; i++) { + // Load elements into 4 AVX vectors + __m256 v0 = _mm256_loadu_ps( x ); + __m256 v1 = _mm256_loadu_ps( x + 8 ); + __m256 v2 = _mm256_loadu_ps( x + 16 ); + __m256 v3 = _mm256_loadu_ps( x + 24 ); + x += 32; + + // Compute max(abs(e)) for the block + const __m256 signBit = _mm256_set1_ps( -0.0f ); + __m256 maxAbs = _mm256_andnot_ps( signBit, v0 ); + maxAbs = _mm256_max_ps( maxAbs, _mm256_andnot_ps( signBit, v1 ) ); + maxAbs = _mm256_max_ps( maxAbs, _mm256_andnot_ps( signBit, v2 ) ); + maxAbs = _mm256_max_ps( maxAbs, _mm256_andnot_ps( signBit, v3 ) ); + + __m128 max4 = _mm_max_ps( _mm256_extractf128_ps( maxAbs, 1 ), _mm256_castps256_ps128( maxAbs ) ); + max4 = _mm_max_ps( max4, _mm_movehl_ps( max4, max4 ) ); + max4 = _mm_max_ss( max4, _mm_movehdup_ps( max4 ) ); + const float maxScalar = _mm_cvtss_f32( max4 ); + + // Quantize these floats + const float d = maxScalar / 127.f; + y[i].d = ggml_fp32_to_fp16(d); + const float id = ( maxScalar != 0.0f ) ? 127.f / maxScalar : 0.0f; + const __m256 mul = _mm256_set1_ps( id ); + + // Apply the multiplier + v0 = _mm256_mul_ps( v0, mul ); + v1 = _mm256_mul_ps( v1, mul ); + v2 = _mm256_mul_ps( v2, mul ); + v3 = _mm256_mul_ps( v3, mul ); + + // Round to nearest integer + v0 = _mm256_round_ps( v0, _MM_ROUND_NEAREST ); + v1 = _mm256_round_ps( v1, _MM_ROUND_NEAREST ); + v2 = _mm256_round_ps( v2, _MM_ROUND_NEAREST ); + v3 = _mm256_round_ps( v3, _MM_ROUND_NEAREST ); + + // Convert floats to integers + __m256i i0 = _mm256_cvtps_epi32( v0 ); + __m256i i1 = _mm256_cvtps_epi32( v1 ); + __m256i i2 = _mm256_cvtps_epi32( v2 ); + __m256i i3 = _mm256_cvtps_epi32( v3 ); + +#if defined(__AVX2__) + // Convert int32 to int16 + i0 = _mm256_packs_epi32( i0, i1 ); // 0, 1, 2, 3, 8, 9, 10, 11, 4, 5, 6, 7, 12, 13, 14, 15 + i2 = _mm256_packs_epi32( i2, i3 ); // 16, 17, 18, 19, 24, 25, 26, 27, 20, 21, 22, 23, 28, 29, 30, 31 + // Convert int16 to int8 + i0 = _mm256_packs_epi16( i0, i2 ); // 0, 1, 2, 3, 8, 9, 10, 11, 16, 17, 18, 19, 24, 25, 26, 27, 4, 5, 6, 7, 12, 13, 14, 15, 20, 21, 22, 23, 28, 29, 30, 31 + + // We got our precious signed bytes, but the order is now wrong + // These AVX2 pack instructions process 16-byte pieces independently + // The following instruction is fixing the order + const __m256i perm = _mm256_setr_epi32( 0, 4, 1, 5, 2, 6, 3, 7 ); + i0 = _mm256_permutevar8x32_epi32( i0, perm ); + + _mm256_storeu_si256((__m256i *)y[i].qs, i0); +#else + // Since we don't have in AVX some necessary functions, + // we split the registers in half and call AVX2 analogs from SSE + __m128i ni0 = _mm256_castsi256_si128( i0 ); + __m128i ni1 = _mm256_extractf128_si256( i0, 1); + __m128i ni2 = _mm256_castsi256_si128( i1 ); + __m128i ni3 = _mm256_extractf128_si256( i1, 1); + __m128i ni4 = _mm256_castsi256_si128( i2 ); + __m128i ni5 = _mm256_extractf128_si256( i2, 1); + __m128i ni6 = _mm256_castsi256_si128( i3 ); + __m128i ni7 = _mm256_extractf128_si256( i3, 1); + + // Convert int32 to int16 + ni0 = _mm_packs_epi32( ni0, ni1 ); + ni2 = _mm_packs_epi32( ni2, ni3 ); + ni4 = _mm_packs_epi32( ni4, ni5 ); + ni6 = _mm_packs_epi32( ni6, ni7 ); + // Convert int16 to int8 + ni0 = _mm_packs_epi16( ni0, ni2 ); + ni4 = _mm_packs_epi16( ni4, ni6 ); + + _mm_storeu_si128((__m128i *)(y[i].qs + 0), ni0); + _mm_storeu_si128((__m128i *)(y[i].qs + 16), ni4); +#endif + } +#elif defined(__riscv_v_intrinsic) + + size_t vl = __riscv_vsetvl_e32m4(QK8_0); + + for (int i = 0; i < nb; i++) { + // load elements + vfloat32m4_t v_x = __riscv_vle32_v_f32m4(x+i*QK8_0, vl); + + vfloat32m4_t vfabs = __riscv_vfabs_v_f32m4(v_x, vl); + vfloat32m1_t tmp = __riscv_vfmv_v_f_f32m1(0.0f, vl); + vfloat32m1_t vmax = __riscv_vfredmax_vs_f32m4_f32m1(vfabs, tmp, vl); + float amax = __riscv_vfmv_f_s_f32m1_f32(vmax); + + const float d = amax / ((1 << 7) - 1); + const float id = d ? 1.0f/d : 0.0f; + + y[i].d = ggml_fp32_to_fp16(d); + + vfloat32m4_t x0 = __riscv_vfmul_vf_f32m4(v_x, id, vl); + + // convert to integer + vint16m2_t vi = __riscv_vfncvt_x_f_w_i16m2(x0, vl); + vint8m1_t vs = __riscv_vncvt_x_x_w_i8m1(vi, vl); + + // store result + __riscv_vse8_v_i8m1(y[i].qs , vs, vl); + } +#else + // scalar + quantize_row_q8_0_reference(x, y, k); +#endif +} + +// reference implementation for deterministic creation of model files +void quantize_row_q8_1_reference(const float * restrict x, block_q8_1 * restrict y, int k) { + assert(QK8_1 == 32); + assert(k % QK8_1 == 0); + const int nb = k / QK8_1; + + for (int i = 0; i < nb; i++) { + float amax = 0.0f; // absolute max + + for (int j = 0; j < QK8_1; j++) { + const float v = x[i*QK8_1 + j]; + amax = MAX(amax, fabsf(v)); + } + + const float d = amax / ((1 << 7) - 1); + const float id = d ? 1.0f/d : 0.0f; + + y[i].d = d; + + int sum = 0; + + for (int j = 0; j < QK8_1/2; ++j) { + const float v0 = x[i*QK8_1 + j]*id; + const float v1 = x[i*QK8_1 + QK8_1/2 + j]*id; + + y[i].qs[ j] = roundf(v0); + y[i].qs[QK8_1/2 + j] = roundf(v1); + + sum += y[i].qs[ j]; + sum += y[i].qs[QK8_1/2 + j]; + } + + y[i].s = sum*d; + } +} + +void quantize_row_q8_1(const float * restrict x, void * restrict vy, int k) { + assert(k % QK8_1 == 0); + const int nb = k / QK8_1; + + block_q8_1 * restrict y = vy; + +#if defined(__ARM_NEON) + for (int i = 0; i < nb; i++) { + float32x4_t srcv [8]; + float32x4_t asrcv[8]; + float32x4_t amaxv[8]; + + for (int j = 0; j < 8; j++) srcv[j] = vld1q_f32(x + i*32 + 4*j); + for (int j = 0; j < 8; j++) asrcv[j] = vabsq_f32(srcv[j]); + + for (int j = 0; j < 4; j++) amaxv[2*j] = vmaxq_f32(asrcv[2*j], asrcv[2*j+1]); + for (int j = 0; j < 2; j++) amaxv[4*j] = vmaxq_f32(amaxv[4*j], amaxv[4*j+2]); + for (int j = 0; j < 1; j++) amaxv[8*j] = vmaxq_f32(amaxv[8*j], amaxv[8*j+4]); + + const float amax = vmaxvq_f32(amaxv[0]); + + const float d = amax / ((1 << 7) - 1); + const float id = d ? 1.0f/d : 0.0f; + + y[i].d = d; + + int32x4_t accv = vdupq_n_s32(0); + + for (int j = 0; j < 8; j++) { + const float32x4_t v = vmulq_n_f32(srcv[j], id); + const int32x4_t vi = vcvtnq_s32_f32(v); + + y[i].qs[4*j + 0] = vgetq_lane_s32(vi, 0); + y[i].qs[4*j + 1] = vgetq_lane_s32(vi, 1); + y[i].qs[4*j + 2] = vgetq_lane_s32(vi, 2); + y[i].qs[4*j + 3] = vgetq_lane_s32(vi, 3); + + accv = vaddq_s32(accv, vi); + } + + y[i].s = d * vaddvq_s32(accv); + } +#elif defined(__wasm_simd128__) + for (int i = 0; i < nb; i++) { + v128_t srcv [8]; + v128_t asrcv[8]; + v128_t amaxv[8]; + + for (int j = 0; j < 8; j++) srcv[j] = wasm_v128_load(x + i*32 + 4*j); + for (int j = 0; j < 8; j++) asrcv[j] = wasm_f32x4_abs(srcv[j]); + + for (int j = 0; j < 4; j++) amaxv[2*j] = wasm_f32x4_max(asrcv[2*j], asrcv[2*j+1]); + for (int j = 0; j < 2; j++) amaxv[4*j] = wasm_f32x4_max(amaxv[4*j], amaxv[4*j+2]); + for (int j = 0; j < 1; j++) amaxv[8*j] = wasm_f32x4_max(amaxv[8*j], amaxv[8*j+4]); + + const float amax = MAX(MAX(wasm_f32x4_extract_lane(amaxv[0], 0), + wasm_f32x4_extract_lane(amaxv[0], 1)), + MAX(wasm_f32x4_extract_lane(amaxv[0], 2), + wasm_f32x4_extract_lane(amaxv[0], 3))); + + const float d = amax / ((1 << 7) - 1); + const float id = d ? 1.0f/d : 0.0f; + + y[i].d = d; + + v128_t accv = wasm_i32x4_splat(0); + + for (int j = 0; j < 8; j++) { + const v128_t v = wasm_f32x4_mul(srcv[j], wasm_f32x4_splat(id)); + const v128_t vi = wasm_i32x4_trunc_sat_f32x4(v); + + y[i].qs[4*j + 0] = wasm_i32x4_extract_lane(vi, 0); + y[i].qs[4*j + 1] = wasm_i32x4_extract_lane(vi, 1); + y[i].qs[4*j + 2] = wasm_i32x4_extract_lane(vi, 2); + y[i].qs[4*j + 3] = wasm_i32x4_extract_lane(vi, 3); + + accv = wasm_i32x4_add(accv, vi); + } + + y[i].s = d * (wasm_i32x4_extract_lane(accv, 0) + + wasm_i32x4_extract_lane(accv, 1) + + wasm_i32x4_extract_lane(accv, 2) + + wasm_i32x4_extract_lane(accv, 3)); + } +#elif defined(__AVX2__) || defined(__AVX__) + for (int i = 0; i < nb; i++) { + // Load elements into 4 AVX vectors + __m256 v0 = _mm256_loadu_ps( x ); + __m256 v1 = _mm256_loadu_ps( x + 8 ); + __m256 v2 = _mm256_loadu_ps( x + 16 ); + __m256 v3 = _mm256_loadu_ps( x + 24 ); + x += 32; + + // Compute max(abs(e)) for the block + const __m256 signBit = _mm256_set1_ps( -0.0f ); + __m256 maxAbs = _mm256_andnot_ps( signBit, v0 ); + maxAbs = _mm256_max_ps( maxAbs, _mm256_andnot_ps( signBit, v1 ) ); + maxAbs = _mm256_max_ps( maxAbs, _mm256_andnot_ps( signBit, v2 ) ); + maxAbs = _mm256_max_ps( maxAbs, _mm256_andnot_ps( signBit, v3 ) ); + + __m128 max4 = _mm_max_ps( _mm256_extractf128_ps( maxAbs, 1 ), _mm256_castps256_ps128( maxAbs ) ); + max4 = _mm_max_ps( max4, _mm_movehl_ps( max4, max4 ) ); + max4 = _mm_max_ss( max4, _mm_movehdup_ps( max4 ) ); + const float maxScalar = _mm_cvtss_f32( max4 ); + + // Quantize these floats + const float d = maxScalar / 127.f; + y[i].d = d; + const float id = ( maxScalar != 0.0f ) ? 127.f / maxScalar : 0.0f; + const __m256 mul = _mm256_set1_ps( id ); + + // Apply the multiplier + v0 = _mm256_mul_ps( v0, mul ); + v1 = _mm256_mul_ps( v1, mul ); + v2 = _mm256_mul_ps( v2, mul ); + v3 = _mm256_mul_ps( v3, mul ); + + // Round to nearest integer + v0 = _mm256_round_ps( v0, _MM_ROUND_NEAREST ); + v1 = _mm256_round_ps( v1, _MM_ROUND_NEAREST ); + v2 = _mm256_round_ps( v2, _MM_ROUND_NEAREST ); + v3 = _mm256_round_ps( v3, _MM_ROUND_NEAREST ); + + // Convert floats to integers + __m256i i0 = _mm256_cvtps_epi32( v0 ); + __m256i i1 = _mm256_cvtps_epi32( v1 ); + __m256i i2 = _mm256_cvtps_epi32( v2 ); + __m256i i3 = _mm256_cvtps_epi32( v3 ); + +#if defined(__AVX2__) + // Compute the sum of the quants and set y[i].s + y[i].s = d * hsum_i32_8(_mm256_add_epi32(_mm256_add_epi32(i0, i1), _mm256_add_epi32(i2, i3))); + + // Convert int32 to int16 + i0 = _mm256_packs_epi32( i0, i1 ); // 0, 1, 2, 3, 8, 9, 10, 11, 4, 5, 6, 7, 12, 13, 14, 15 + i2 = _mm256_packs_epi32( i2, i3 ); // 16, 17, 18, 19, 24, 25, 26, 27, 20, 21, 22, 23, 28, 29, 30, 31 + // Convert int16 to int8 + i0 = _mm256_packs_epi16( i0, i2 ); // 0, 1, 2, 3, 8, 9, 10, 11, 16, 17, 18, 19, 24, 25, 26, 27, 4, 5, 6, 7, 12, 13, 14, 15, 20, 21, 22, 23, 28, 29, 30, 31 + + // We got our precious signed bytes, but the order is now wrong + // These AVX2 pack instructions process 16-byte pieces independently + // The following instruction is fixing the order + const __m256i perm = _mm256_setr_epi32( 0, 4, 1, 5, 2, 6, 3, 7 ); + i0 = _mm256_permutevar8x32_epi32( i0, perm ); + + _mm256_storeu_si256((__m256i *)y[i].qs, i0); +#else + // Since we don't have in AVX some necessary functions, + // we split the registers in half and call AVX2 analogs from SSE + __m128i ni0 = _mm256_castsi256_si128( i0 ); + __m128i ni1 = _mm256_extractf128_si256( i0, 1); + __m128i ni2 = _mm256_castsi256_si128( i1 ); + __m128i ni3 = _mm256_extractf128_si256( i1, 1); + __m128i ni4 = _mm256_castsi256_si128( i2 ); + __m128i ni5 = _mm256_extractf128_si256( i2, 1); + __m128i ni6 = _mm256_castsi256_si128( i3 ); + __m128i ni7 = _mm256_extractf128_si256( i3, 1); + + // Compute the sum of the quants and set y[i].s + const __m128i s0 = _mm_add_epi32(_mm_add_epi32(ni0, ni1), _mm_add_epi32(ni2, ni3)); + const __m128i s1 = _mm_add_epi32(_mm_add_epi32(ni4, ni5), _mm_add_epi32(ni6, ni7)); + y[i].s = d * hsum_i32_4(_mm_add_epi32(s0, s1)); + + // Convert int32 to int16 + ni0 = _mm_packs_epi32( ni0, ni1 ); + ni2 = _mm_packs_epi32( ni2, ni3 ); + ni4 = _mm_packs_epi32( ni4, ni5 ); + ni6 = _mm_packs_epi32( ni6, ni7 ); + // Convert int16 to int8 + ni0 = _mm_packs_epi16( ni0, ni2 ); + ni4 = _mm_packs_epi16( ni4, ni6 ); + + _mm_storeu_si128((__m128i *)(y[i].qs + 0), ni0); + _mm_storeu_si128((__m128i *)(y[i].qs + 16), ni4); +#endif + } +#elif defined(__riscv_v_intrinsic) + + size_t vl = __riscv_vsetvl_e32m4(QK8_1); + + for (int i = 0; i < nb; i++) { + // load elements + vfloat32m4_t v_x = __riscv_vle32_v_f32m4(x+i*QK8_1, vl); + + vfloat32m4_t vfabs = __riscv_vfabs_v_f32m4(v_x, vl); + vfloat32m1_t tmp = __riscv_vfmv_v_f_f32m1(0.0, vl); + vfloat32m1_t vmax = __riscv_vfredmax_vs_f32m4_f32m1(vfabs, tmp, vl); + float amax = __riscv_vfmv_f_s_f32m1_f32(vmax); + + const float d = amax / ((1 << 7) - 1); + const float id = d ? 1.0f/d : 0.0f; + + y[i].d = d; + + vfloat32m4_t x0 = __riscv_vfmul_vf_f32m4(v_x, id, vl); + + // convert to integer + vint16m2_t vi = __riscv_vfncvt_x_f_w_i16m2(x0, vl); + vint8m1_t vs = __riscv_vncvt_x_x_w_i8m1(vi, vl); + + // store result + __riscv_vse8_v_i8m1(y[i].qs , vs, vl); + + // compute sum for y[i].s + vint16m1_t tmp2 = __riscv_vmv_v_x_i16m1(0, vl); + vint16m1_t vwrs = __riscv_vwredsum_vs_i8m1_i16m1(vs, tmp2, vl); + + // set y[i].s + int sum = __riscv_vmv_x_s_i16m1_i16(vwrs); + y[i].s = sum*d; + } +#else + // scalar + quantize_row_q8_1_reference(x, y, k); +#endif +} + +void dequantize_row_q4_0(const block_q4_0 * restrict x, float * restrict y, int k) { + static const int qk = QK4_0; + + assert(k % qk == 0); + + const int nb = k / qk; + + for (int i = 0; i < nb; i++) { + const float d = ggml_fp16_to_fp32(x[i].d); + + for (int j = 0; j < qk/2; ++j) { + const int x0 = (x[i].qs[j] & 0x0F) - 8; + const int x1 = (x[i].qs[j] >> 4) - 8; + + y[i*qk + j + 0 ] = x0*d; + y[i*qk + j + qk/2] = x1*d; + } + } +} + +void dequantize_row_q4_1(const block_q4_1 * restrict x, float * restrict y, int k) { + static const int qk = QK4_1; + + assert(k % qk == 0); + + const int nb = k / qk; + + for (int i = 0; i < nb; i++) { + const float d = ggml_fp16_to_fp32(x[i].d); + const float m = ggml_fp16_to_fp32(x[i].m); + + for (int j = 0; j < qk/2; ++j) { + const int x0 = (x[i].qs[j] & 0x0F); + const int x1 = (x[i].qs[j] >> 4); + + y[i*qk + j + 0 ] = x0*d + m; + y[i*qk + j + qk/2] = x1*d + m; + } + } +} + +void dequantize_row_q5_0(const block_q5_0 * restrict x, float * restrict y, int k) { + static const int qk = QK5_0; + + assert(k % qk == 0); + + const int nb = k / qk; + + for (int i = 0; i < nb; i++) { + const float d = ggml_fp16_to_fp32(x[i].d); + + uint32_t qh; + memcpy(&qh, x[i].qh, sizeof(qh)); + + for (int j = 0; j < qk/2; ++j) { + const uint8_t xh_0 = ((qh >> (j + 0)) << 4) & 0x10; + const uint8_t xh_1 = ((qh >> (j + 12)) ) & 0x10; + + const int32_t x0 = ((x[i].qs[j] & 0x0F) | xh_0) - 16; + const int32_t x1 = ((x[i].qs[j] >> 4) | xh_1) - 16; + + y[i*qk + j + 0 ] = x0*d; + y[i*qk + j + qk/2] = x1*d; + } + } +} + +void dequantize_row_q5_1(const block_q5_1 * restrict x, float * restrict y, int k) { + static const int qk = QK5_1; + + assert(k % qk == 0); + + const int nb = k / qk; + + for (int i = 0; i < nb; i++) { + const float d = ggml_fp16_to_fp32(x[i].d); + const float m = ggml_fp16_to_fp32(x[i].m); + + uint32_t qh; + memcpy(&qh, x[i].qh, sizeof(qh)); + + for (int j = 0; j < qk/2; ++j) { + const uint8_t xh_0 = ((qh >> (j + 0)) << 4) & 0x10; + const uint8_t xh_1 = ((qh >> (j + 12)) ) & 0x10; + + const int x0 = (x[i].qs[j] & 0x0F) | xh_0; + const int x1 = (x[i].qs[j] >> 4) | xh_1; + + y[i*qk + j + 0 ] = x0*d + m; + y[i*qk + j + qk/2] = x1*d + m; + } + } +} + +void dequantize_row_q8_0(const block_q8_0 * restrict x, float * restrict y, int k) { + static const int qk = QK8_0; + + assert(k % qk == 0); + + const int nb = k / qk; + + for (int i = 0; i < nb; i++) { + const float d = ggml_fp16_to_fp32(x[i].d); + + for (int j = 0; j < qk; ++j) { + y[i*qk + j] = x[i].qs[j]*d; + } + } +} + +// +// 2-6 bit quantization in super-blocks +// + +// +// ===================== Helper functions +// +static inline int nearest_int(float fval) { + assert(fval <= 4194303.f); + float val = fval + 12582912.f; + int i; memcpy(&i, &val, sizeof(int)); + return (i & 0x007fffff) - 0x00400000; +} + +static float make_qx_quants(int n, int nmax, const float * restrict x, int8_t * restrict L, int rmse_type) { + float max = 0; + float amax = 0; + for (int i = 0; i < n; ++i) { + float ax = fabsf(x[i]); + if (ax > amax) { amax = ax; max = x[i]; } + } + if (amax < 1e-30f) { // all zero + for (int i = 0; i < n; ++i) { + L[i] = 0; + } + return 0.f; + } + float iscale = -nmax / max; + if (rmse_type == 0) { + for (int i = 0; i < n; ++i) { + int l = nearest_int(iscale * x[i]); + L[i] = nmax + MAX(-nmax, MIN(nmax-1, l)); + } + return 1/iscale; + } + bool return_early = false; + if (rmse_type < 0) { + rmse_type = -rmse_type; + return_early = true; + } + int weight_type = rmse_type%2; + float sumlx = 0; + float suml2 = 0; + for (int i = 0; i < n; ++i) { + int l = nearest_int(iscale * x[i]); + l = MAX(-nmax, MIN(nmax-1, l)); + L[i] = l + nmax; + float w = weight_type == 1 ? x[i] * x[i] : 1; + sumlx += w*x[i]*l; + suml2 += w*l*l; + } + float scale = sumlx/suml2; + if (return_early) return suml2 > 0 ? 0.5f*(scale + 1/iscale) : 1/iscale; + float best = scale * sumlx; + for (int is = -9; is <= 9; ++is) { + if (is == 0) { + continue; + } + iscale = -(nmax + 0.1f*is) / max; + sumlx = suml2 = 0; + for (int i = 0; i < n; ++i) { + int l = nearest_int(iscale * x[i]); + l = MAX(-nmax, MIN(nmax-1, l)); + float w = weight_type == 1 ? x[i] * x[i] : 1; + sumlx += w*x[i]*l; + suml2 += w*l*l; + } + if (suml2 > 0 && sumlx*sumlx > best*suml2) { + for (int i = 0; i < n; ++i) { + int l = nearest_int(iscale * x[i]); + L[i] = nmax + MAX(-nmax, MIN(nmax-1, l)); + } + scale = sumlx/suml2; best = scale*sumlx; + } + } + return scale; +} + +static float make_q3_quants(int n, int nmax, const float * restrict x, int8_t * restrict L, bool do_rmse) { + float max = 0; + float amax = 0; + for (int i = 0; i < n; ++i) { + float ax = fabsf(x[i]); + if (ax > amax) { amax = ax; max = x[i]; } + } + if (!amax) { // all zero + for (int i = 0; i < n; ++i) { L[i] = 0; } + return 0.f; + } + float iscale = -nmax / max; + if (do_rmse) { + float sumlx = 0; + float suml2 = 0; + for (int i = 0; i < n; ++i) { + int l = nearest_int(iscale * x[i]); + l = MAX(-nmax, MIN(nmax-1, l)); + L[i] = l; + float w = x[i]*x[i]; + sumlx += w*x[i]*l; + suml2 += w*l*l; + } + for (int itry = 0; itry < 5; ++itry) { + int n_changed = 0; + for (int i = 0; i < n; ++i) { + float w = x[i]*x[i]; + float slx = sumlx - w*x[i]*L[i]; + if (slx > 0) { + float sl2 = suml2 - w*L[i]*L[i]; + int new_l = nearest_int(x[i] * sl2 / slx); + new_l = MAX(-nmax, MIN(nmax-1, new_l)); + if (new_l != L[i]) { + slx += w*x[i]*new_l; + sl2 += w*new_l*new_l; + if (sl2 > 0 && slx*slx*suml2 > sumlx*sumlx*sl2) { + L[i] = new_l; sumlx = slx; suml2 = sl2; + ++n_changed; + } + } + } + } + if (!n_changed) { + break; + } + } + for (int i = 0; i < n; ++i) { + L[i] += nmax; + } + return sumlx / suml2; + } + for (int i = 0; i < n; ++i) { + int l = nearest_int(iscale * x[i]); + l = MAX(-nmax, MIN(nmax-1, l)); + L[i] = l + nmax; + } + return 1/iscale; +} + +static float make_qkx1_quants(int n, int nmax, const float * restrict x, uint8_t * restrict L, float * restrict the_min, + int ntry, float alpha) { + float min = x[0]; + float max = x[0]; + for (int i = 1; i < n; ++i) { + if (x[i] < min) min = x[i]; + if (x[i] > max) max = x[i]; + } + if (max == min) { + for (int i = 0; i < n; ++i) L[i] = 0; + *the_min = 0; + return 0.f; + } + if (min > 0) min = 0; + float iscale = nmax/(max - min); + float scale = 1/iscale; + for (int itry = 0; itry < ntry; ++itry) { + float sumlx = 0; int suml2 = 0; + bool did_change = false; + for (int i = 0; i < n; ++i) { + int l = nearest_int(iscale*(x[i] - min)); + l = MAX(0, MIN(nmax, l)); + if (l != L[i]) { + L[i] = l; + did_change = true; + } + sumlx += (x[i] - min)*l; + suml2 += l*l; + } + scale = sumlx/suml2; + float sum = 0; + for (int i = 0; i < n; ++i) { + sum += x[i] - scale*L[i]; + } + min = alpha*min + (1 - alpha)*sum/n; + if (min > 0) min = 0; + iscale = 1/scale; + if (!did_change) break; + } + *the_min = -min; + return scale; +} + +static float make_qkx2_quants(int n, int nmax, const float * restrict x, const float * restrict weights, + uint8_t * restrict L, float * restrict the_min, uint8_t * restrict Laux, + float rmin, float rdelta, int nstep, bool use_mad) { + float min = x[0]; + float max = x[0]; + float sum_w = weights[0]; + float sum_x = sum_w * x[0]; + for (int i = 1; i < n; ++i) { + if (x[i] < min) min = x[i]; + if (x[i] > max) max = x[i]; + float w = weights[i]; + sum_w += w; + sum_x += w * x[i]; + } + if (min > 0) min = 0; + if (max == min) { + for (int i = 0; i < n; ++i) L[i] = 0; + *the_min = -min; + return 0.f; + } + float iscale = nmax/(max - min); + float scale = 1/iscale; + float best_mad = 0; + for (int i = 0; i < n; ++i) { + int l = nearest_int(iscale*(x[i] - min)); + L[i] = MAX(0, MIN(nmax, l)); + float diff = scale * L[i] + min - x[i]; + diff = use_mad ? fabsf(diff) : diff * diff; + float w = weights[i]; + best_mad += w * diff; + } + if (nstep < 1) { + *the_min = -min; + return scale; + } + for (int is = 0; is <= nstep; ++is) { + iscale = (rmin + rdelta*is + nmax)/(max - min); + float sum_l = 0, sum_l2 = 0, sum_xl = 0; + for (int i = 0; i < n; ++i) { + int l = nearest_int(iscale*(x[i] - min)); + l = MAX(0, MIN(nmax, l)); + Laux[i] = l; + float w = weights[i]; + sum_l += w*l; + sum_l2 += w*l*l; + sum_xl += w*l*x[i]; + } + float D = sum_w * sum_l2 - sum_l * sum_l; + if (D > 0) { + float this_scale = (sum_w * sum_xl - sum_x * sum_l)/D; + float this_min = (sum_l2 * sum_x - sum_l * sum_xl)/D; + if (this_min > 0) { + this_min = 0; + this_scale = sum_xl / sum_l2; + } + float mad = 0; + for (int i = 0; i < n; ++i) { + float diff = this_scale * Laux[i] + this_min - x[i]; + diff = use_mad ? fabsf(diff) : diff * diff; + float w = weights[i]; + mad += w * diff; + } + if (mad < best_mad) { + for (int i = 0; i < n; ++i) { + L[i] = Laux[i]; + } + best_mad = mad; + scale = this_scale; + min = this_min; + } + } + } + *the_min = -min; + return scale; +} + +#if QK_K == 256 +static inline void get_scale_min_k4(int j, const uint8_t * restrict q, uint8_t * restrict d, uint8_t * restrict m) { + if (j < 4) { + *d = q[j] & 63; *m = q[j + 4] & 63; + } else { + *d = (q[j+4] & 0xF) | ((q[j-4] >> 6) << 4); + *m = (q[j+4] >> 4) | ((q[j-0] >> 6) << 4); + } +} +#endif + +//========================- 2-bit (de)-quantization + +void quantize_row_q2_K_reference(const float * restrict x, block_q2_K * restrict y, int k) { + assert(k % QK_K == 0); + const int nb = k / QK_K; + + uint8_t L[QK_K]; + uint8_t Laux[16]; + float weights[16]; + float mins[QK_K/16]; + float scales[QK_K/16]; + + const float q4scale = 15.f; + + for (int i = 0; i < nb; i++) { + float max_scale = 0; // as we are deducting the min, scales are always positive + float max_min = 0; + for (int j = 0; j < QK_K/16; ++j) { + for (int l = 0; l < 16; ++l) weights[l] = fabsf(x[16*j + l]); + scales[j] = make_qkx2_quants(16, 3, x + 16*j, weights, L + 16*j, &mins[j], Laux, -0.5f, 0.1f, 15, true); + float scale = scales[j]; + if (scale > max_scale) { + max_scale = scale; + } + float min = mins[j]; + if (min > max_min) { + max_min = min; + } + } + + if (max_scale > 0) { + float iscale = q4scale/max_scale; + for (int j = 0; j < QK_K/16; ++j) { + int l = nearest_int(iscale*scales[j]); + y[i].scales[j] = l; + } + y[i].d = ggml_fp32_to_fp16(max_scale/q4scale); + } else { + for (int j = 0; j < QK_K/16; ++j) y[i].scales[j] = 0; + y[i].d = ggml_fp32_to_fp16(0.f); + } + if (max_min > 0) { + float iscale = q4scale/max_min; + for (int j = 0; j < QK_K/16; ++j) { + int l = nearest_int(iscale*mins[j]); + y[i].scales[j] |= (l << 4); + } + y[i].dmin = ggml_fp32_to_fp16(max_min/q4scale); + } else { + y[i].dmin = ggml_fp32_to_fp16(0.f); + } + for (int j = 0; j < QK_K/16; ++j) { + const float d = ggml_fp16_to_fp32(y[i].d) * (y[i].scales[j] & 0xF); + if (!d) continue; + const float dm = ggml_fp16_to_fp32(y[i].dmin) * (y[i].scales[j] >> 4); + for (int ii = 0; ii < 16; ++ii) { + int l = nearest_int((x[16*j + ii] + dm)/d); + l = MAX(0, MIN(3, l)); + L[16*j + ii] = l; + } + } + +#if QK_K == 256 + for (int j = 0; j < QK_K; j += 128) { + for (int l = 0; l < 32; ++l) { + y[i].qs[j/4 + l] = L[j + l] | (L[j + l + 32] << 2) | (L[j + l + 64] << 4) | (L[j + l + 96] << 6); + } + } +#else + for (int l = 0; l < 16; ++l) { + y[i].qs[l] = L[l] | (L[l + 16] << 2) | (L[l + 32] << 4) | (L[l + 48] << 6); + } +#endif + + x += QK_K; + + } +} + +void dequantize_row_q2_K(const block_q2_K * restrict x, float * restrict y, int k) { + assert(k % QK_K == 0); + const int nb = k / QK_K; + + for (int i = 0; i < nb; i++) { + + const float d = ggml_fp16_to_fp32(x[i].d); + const float min = ggml_fp16_to_fp32(x[i].dmin); + + const uint8_t * q = x[i].qs; + +#if QK_K == 256 + int is = 0; + float dl, ml; + for (int n = 0; n < QK_K; n += 128) { + int shift = 0; + for (int j = 0; j < 4; ++j) { + + uint8_t sc = x[i].scales[is++]; + dl = d * (sc & 0xF); ml = min * (sc >> 4); + for (int l = 0; l < 16; ++l) *y++ = dl * ((int8_t)((q[l] >> shift) & 3)) - ml; + + sc = x[i].scales[is++]; + dl = d * (sc & 0xF); ml = min * (sc >> 4); + for (int l = 0; l < 16; ++l) *y++ = dl * ((int8_t)((q[l+16] >> shift) & 3)) - ml; + + shift += 2; + } + q += 32; + } +#else + float dl1 = d * (x[i].scales[0] & 0xF), ml1 = min * (x[i].scales[0] >> 4); + float dl2 = d * (x[i].scales[1] & 0xF), ml2 = min * (x[i].scales[1] >> 4); + float dl3 = d * (x[i].scales[2] & 0xF), ml3 = min * (x[i].scales[2] >> 4); + float dl4 = d * (x[i].scales[3] & 0xF), ml4 = min * (x[i].scales[3] >> 4); + for (int l = 0; l < 16; ++l) { + y[l+ 0] = dl1 * ((int8_t)((q[l] >> 0) & 3)) - ml1; + y[l+16] = dl2 * ((int8_t)((q[l] >> 2) & 3)) - ml2; + y[l+32] = dl3 * ((int8_t)((q[l] >> 4) & 3)) - ml3; + y[l+48] = dl4 * ((int8_t)((q[l] >> 6) & 3)) - ml4; + } + y += QK_K; +#endif + } +} + +void quantize_row_q2_K(const float * restrict x, void * restrict vy, int k) { + quantize_row_q2_K_reference(x, vy, k); +} + +size_t ggml_quantize_q2_K(const float * restrict src, void * restrict dst, int n, int k, int64_t * restrict hist) { + (void)hist; // TODO: collect histograms + + for (int j = 0; j < n; j += k) { + block_q2_K * restrict y = (block_q2_K *)dst + j/QK_K; + quantize_row_q2_K_reference(src + j, y, k); + } + return (n/QK_K*sizeof(block_q2_K)); +} + +//========================= 3-bit (de)-quantization + +void quantize_row_q3_K_reference(const float * restrict x, block_q3_K * restrict y, int k) { + assert(k % QK_K == 0); + const int nb = k / QK_K; + + int8_t L[QK_K]; + float scales[QK_K / 16]; + + for (int i = 0; i < nb; i++) { + + float max_scale = 0; + float amax = 0; + for (int j = 0; j < QK_K/16; ++j) { + scales[j] = make_q3_quants(16, 4, x + 16*j, L + 16*j, true); + float scale = fabsf(scales[j]); + if (scale > amax) { + amax = scale; max_scale = scales[j]; + } + } + +#if QK_K == 256 + memset(y[i].scales, 0, 12); + if (max_scale) { + float iscale = -32.f/max_scale; + for (int j = 0; j < QK_K/16; ++j) { + int8_t l = nearest_int(iscale*scales[j]); + l = MAX(-32, MIN(31, l)) + 32; + if (j < 8) { + y[i].scales[j] = l & 0xF; + } else { + y[i].scales[j-8] |= ((l & 0xF) << 4); + } + l >>= 4; + y[i].scales[j%4 + 8] |= (l << (2*(j/4))); + } + y[i].d = ggml_fp32_to_fp16(1/iscale); + } else { + y[i].d = ggml_fp32_to_fp16(0.f); + } + + int8_t sc; + for (int j = 0; j < QK_K/16; ++j) { + sc = j < 8 ? y[i].scales[j] & 0xF : y[i].scales[j-8] >> 4; + sc = (sc | (((y[i].scales[8 + j%4] >> (2*(j/4))) & 3) << 4)) - 32; + float d = ggml_fp16_to_fp32(y[i].d) * sc; + if (!d) { + continue; + } + for (int ii = 0; ii < 16; ++ii) { + int l = nearest_int(x[16*j + ii]/d); + l = MAX(-4, MIN(3, l)); + L[16*j + ii] = l + 4; + } + } +#else + if (max_scale) { + float iscale = -8.f/max_scale; + for (int j = 0; j < QK_K/16; j+=2) { + int l1 = nearest_int(iscale*scales[j]); + l1 = 8 + MAX(-8, MIN(7, l1)); + int l2 = nearest_int(iscale*scales[j+1]); + l2 = 8 + MAX(-8, MIN(7, l2)); + y[i].scales[j/2] = l1 | (l2 << 4); + } + y[i].d = ggml_fp32_to_fp16(1/iscale); + } else { + for (int j = 0; j < QK_K/16; j+=2) { + y[i].scales[j/2] = 0; + } + y[i].d = ggml_fp32_to_fp16(0.f); + } + for (int j = 0; j < QK_K/16; ++j) { + int s = j%2 == 0 ? y[i].scales[j/2] & 0xF : y[i].scales[j/2] >> 4; + float d = ggml_fp16_to_fp32(y[i].d) * (s - 8); + if (!d) { + continue; + } + for (int ii = 0; ii < 16; ++ii) { + int l = nearest_int(x[16*j + ii]/d); + l = MAX(-4, MIN(3, l)); + L[16*j + ii] = l + 4; + } + } +#endif + + memset(y[i].hmask, 0, QK_K/8); + // We put the high-bit for the 1st 8 quants into bit 0, the next 8 into bit 1, etc. + int m = 0; + uint8_t hm = 1; + for (int j = 0; j < QK_K; ++j) { + if (L[j] > 3) { + y[i].hmask[m] |= hm; + L[j] -= 4; + } + if (++m == QK_K/8) { + m = 0; hm <<= 1; + } + } +#if QK_K == 256 + for (int j = 0; j < QK_K; j += 128) { + for (int l = 0; l < 32; ++l) { + y[i].qs[j/4 + l] = L[j + l] | (L[j + l + 32] << 2) | (L[j + l + 64] << 4) | (L[j + l + 96] << 6); + } + } +#else + for (int l = 0; l < 16; ++l) { + y[i].qs[l] = L[l] | (L[l + 16] << 2) | (L[l + 32] << 4) | (L[l + 48] << 6); + } +#endif + + x += QK_K; + } +} + +#if QK_K == 256 +void dequantize_row_q3_K(const block_q3_K * restrict x, float * restrict y, int k) { + assert(k % QK_K == 0); + const int nb = k / QK_K; + + const uint32_t kmask1 = 0x03030303; + const uint32_t kmask2 = 0x0f0f0f0f; + + uint32_t aux[4]; + const int8_t * scales = (const int8_t*)aux; + + for (int i = 0; i < nb; i++) { + + const float d_all = ggml_fp16_to_fp32(x[i].d); + + const uint8_t * restrict q = x[i].qs; + const uint8_t * restrict hm = x[i].hmask; + uint8_t m = 1; + + memcpy(aux, x[i].scales, 12); + uint32_t tmp = aux[2]; + aux[2] = ((aux[0] >> 4) & kmask2) | (((tmp >> 4) & kmask1) << 4); + aux[3] = ((aux[1] >> 4) & kmask2) | (((tmp >> 6) & kmask1) << 4); + aux[0] = (aux[0] & kmask2) | (((tmp >> 0) & kmask1) << 4); + aux[1] = (aux[1] & kmask2) | (((tmp >> 2) & kmask1) << 4); + + int is = 0; + float dl; + for (int n = 0; n < QK_K; n += 128) { + int shift = 0; + for (int j = 0; j < 4; ++j) { + + dl = d_all * (scales[is++] - 32); + for (int l = 0; l < 16; ++l) { + *y++ = dl * ((int8_t)((q[l+ 0] >> shift) & 3) - ((hm[l+ 0] & m) ? 0 : 4)); + } + + dl = d_all * (scales[is++] - 32); + for (int l = 0; l < 16; ++l) { + *y++ = dl * ((int8_t)((q[l+16] >> shift) & 3) - ((hm[l+16] & m) ? 0 : 4)); + } + + shift += 2; + m <<= 1; + } + q += 32; + } + + } +} +#else +void dequantize_row_q3_K(const block_q3_K * restrict x, float * restrict y, int k) { + assert(k % QK_K == 0); + assert(QK_K == 64); + const int nb = k / QK_K; + + for (int i = 0; i < nb; i++) { + + const float d_all = ggml_fp16_to_fp32(x[i].d); + + const uint8_t * restrict q = x[i].qs; + const uint8_t * restrict hm = x[i].hmask; + + const float d1 = d_all * ((x[i].scales[0] & 0xF) - 8); + const float d2 = d_all * ((x[i].scales[0] >> 4) - 8); + const float d3 = d_all * ((x[i].scales[1] & 0xF) - 8); + const float d4 = d_all * ((x[i].scales[1] >> 4) - 8); + + for (int l=0; l<8; ++l) { + uint8_t h = hm[l]; + y[l+ 0] = d1 * ((int8_t)((q[l+0] >> 0) & 3) - ((h & 0x01) ? 0 : 4)); + y[l+ 8] = d1 * ((int8_t)((q[l+8] >> 0) & 3) - ((h & 0x02) ? 0 : 4)); + y[l+16] = d2 * ((int8_t)((q[l+0] >> 2) & 3) - ((h & 0x04) ? 0 : 4)); + y[l+24] = d2 * ((int8_t)((q[l+8] >> 2) & 3) - ((h & 0x08) ? 0 : 4)); + y[l+32] = d3 * ((int8_t)((q[l+0] >> 4) & 3) - ((h & 0x10) ? 0 : 4)); + y[l+40] = d3 * ((int8_t)((q[l+8] >> 4) & 3) - ((h & 0x20) ? 0 : 4)); + y[l+48] = d4 * ((int8_t)((q[l+0] >> 6) & 3) - ((h & 0x40) ? 0 : 4)); + y[l+56] = d4 * ((int8_t)((q[l+8] >> 6) & 3) - ((h & 0x80) ? 0 : 4)); + } + y += QK_K; + } +} +#endif + +void quantize_row_q3_K(const float * restrict x, void * restrict vy, int k) { + quantize_row_q3_K_reference(x, vy, k); +} + +size_t ggml_quantize_q3_K(const float * restrict src, void * restrict dst, int n, int k, int64_t * restrict hist) { + (void)hist; // TODO: collect histograms + + for (int j = 0; j < n; j += k) { + block_q3_K * restrict y = (block_q3_K *)dst + j/QK_K; + quantize_row_q3_K_reference(src + j, y, k); + } + return (n/QK_K*sizeof(block_q3_K)); +} + +// ====================== 4-bit (de)-quantization + +void quantize_row_q4_K_reference(const float * restrict x, block_q4_K * restrict y, int k) { + assert(k % QK_K == 0); + const int nb = k / QK_K; + + uint8_t L[QK_K]; + uint8_t Laux[32]; + float weights[32]; + float mins[QK_K/32]; + float scales[QK_K/32]; + + for (int i = 0; i < nb; i++) { + + float max_scale = 0; // as we are deducting the min, scales are always positive + float max_min = 0; + for (int j = 0; j < QK_K/32; ++j) { + //scales[j] = make_qkx1_quants(32, 15, x + 32*j, L + 32*j, &mins[j], 9, 0.5f); + float sum_x2 = 0; + for (int l = 0; l < 32; ++l) sum_x2 += x[32*j + l] * x[32*j + l]; + float av_x = sqrtf(sum_x2/32); + for (int l = 0; l < 32; ++l) weights[l] = av_x + fabsf(x[32*j + l]); + scales[j] = make_qkx2_quants(32, 15, x + 32*j, weights, L + 32*j, &mins[j], Laux, -1.f, 0.1f, 20, false); + float scale = scales[j]; + if (scale > max_scale) { + max_scale = scale; + } + float min = mins[j]; + if (min > max_min) { + max_min = min; + } + } + +#if QK_K == 256 + float inv_scale = max_scale > 0 ? 63.f/max_scale : 0.f; + float inv_min = max_min > 0 ? 63.f/max_min : 0.f; + for (int j = 0; j < QK_K/32; ++j) { + uint8_t ls = nearest_int(inv_scale*scales[j]); + uint8_t lm = nearest_int(inv_min*mins[j]); + ls = MIN(63, ls); + lm = MIN(63, lm); + if (j < 4) { + y[i].scales[j] = ls; + y[i].scales[j+4] = lm; + } else { + y[i].scales[j+4] = (ls & 0xF) | ((lm & 0xF) << 4); + y[i].scales[j-4] |= ((ls >> 4) << 6); + y[i].scales[j-0] |= ((lm >> 4) << 6); + } + } + y[i].d = ggml_fp32_to_fp16(max_scale/63.f); + y[i].dmin = ggml_fp32_to_fp16(max_min/63.f); + + uint8_t sc, m; + for (int j = 0; j < QK_K/32; ++j) { + get_scale_min_k4(j, y[i].scales, &sc, &m); + const float d = ggml_fp16_to_fp32(y[i].d) * sc; + if (!d) continue; + const float dm = ggml_fp16_to_fp32(y[i].dmin) * m; + for (int ii = 0; ii < 32; ++ii) { + int l = nearest_int((x[32*j + ii] + dm)/d); + l = MAX(0, MIN(15, l)); + L[32*j + ii] = l; + } + } +#else + const float s_factor = 15.f; + float inv_scale = max_scale > 0 ? s_factor/max_scale : 0.f; + float inv_min = max_min > 0 ? s_factor/max_min : 0.f; + int d1 = nearest_int(inv_scale*scales[0]); + int m1 = nearest_int(inv_min*mins[0]); + int d2 = nearest_int(inv_scale*scales[1]); + int m2 = nearest_int(inv_min*mins[1]); + y[i].scales[0] = d1 | (m1 << 4); + y[i].scales[1] = d2 | (m2 << 4); + y[i].d[0] = ggml_fp32_to_fp16(max_scale/s_factor); + y[i].d[1] = ggml_fp32_to_fp16(max_min/s_factor); + + float sumlx = 0; + int suml2 = 0; + for (int j = 0; j < QK_K/32; ++j) { + const uint8_t sd = y[i].scales[j] & 0xF; + const uint8_t sm = y[i].scales[j] >> 4; + const float d = ggml_fp16_to_fp32(y[i].d[0]) * sd; + if (!d) continue; + const float m = ggml_fp16_to_fp32(y[i].d[1]) * sm; + for (int ii = 0; ii < 32; ++ii) { + int l = nearest_int((x[32*j + ii] + m)/d); + l = MAX(0, MIN(15, l)); + L[32*j + ii] = l; + sumlx += (x[32*j + ii] + m)*l*sd; + suml2 += l*l*sd*sd; + } + } + if (suml2) { + y[i].d[0] = ggml_fp32_to_fp16(sumlx/suml2); + } +#endif + uint8_t * q = y[i].qs; + for (int j = 0; j < QK_K; j += 64) { + for (int l = 0; l < 32; ++l) q[l] = L[j + l] | (L[j + l + 32] << 4); + q += 32; + } + + x += QK_K; + + } +} + +void dequantize_row_q4_K(const block_q4_K * restrict x, float * restrict y, int k) { + assert(k % QK_K == 0); + const int nb = k / QK_K; + + for (int i = 0; i < nb; i++) { + + const uint8_t * q = x[i].qs; + +#if QK_K == 256 + + const float d = ggml_fp16_to_fp32(x[i].d); + const float min = ggml_fp16_to_fp32(x[i].dmin); + + int is = 0; + uint8_t sc, m; + for (int j = 0; j < QK_K; j += 64) { + get_scale_min_k4(is + 0, x[i].scales, &sc, &m); + const float d1 = d * sc; const float m1 = min * m; + get_scale_min_k4(is + 1, x[i].scales, &sc, &m); + const float d2 = d * sc; const float m2 = min * m; + for (int l = 0; l < 32; ++l) *y++ = d1 * (q[l] & 0xF) - m1; + for (int l = 0; l < 32; ++l) *y++ = d2 * (q[l] >> 4) - m2; + q += 32; is += 2; + } +#else + const float dall = ggml_fp16_to_fp32(x[i].d[0]); + const float mall = ggml_fp16_to_fp32(x[i].d[1]); + const float d1 = dall * (x[i].scales[0] & 0xF), m1 = mall * (x[i].scales[0] >> 4); + const float d2 = dall * (x[i].scales[1] & 0xF), m2 = mall * (x[i].scales[1] >> 4); + for (int l = 0; l < 32; ++l) { + y[l+ 0] = d1 * (q[l] & 0xF) - m1; + y[l+32] = d2 * (q[l] >> 4) - m2; + } + y += QK_K; +#endif + + } +} + +void quantize_row_q4_K(const float * restrict x, void * restrict vy, int k) { + assert(k % QK_K == 0); + block_q4_K * restrict y = vy; + quantize_row_q4_K_reference(x, y, k); +} + +size_t ggml_quantize_q4_K(const float * restrict src, void * restrict dst, int n, int k, int64_t * restrict hist) { + assert(k % QK_K == 0); + (void)hist; // TODO: collect histograms + + for (int j = 0; j < n; j += k) { + block_q4_K * restrict y = (block_q4_K *)dst + j/QK_K; + quantize_row_q4_K_reference(src + j, y, k); + } + return (n/QK_K*sizeof(block_q4_K)); +} + +// ====================== 5-bit (de)-quantization + +void quantize_row_q5_K_reference(const float * restrict x, block_q5_K * restrict y, int k) { + assert(k % QK_K == 0); + const int nb = k / QK_K; + +#if QK_K == 256 + uint8_t L[QK_K]; + float mins[QK_K/32]; + float scales[QK_K/32]; + float weights[32]; + uint8_t Laux[32]; +#else + int8_t L[QK_K]; + float scales[QK_K/16]; +#endif + + for (int i = 0; i < nb; i++) { + +#if QK_K == 256 + + float max_scale = 0; // as we are deducting the min, scales are always positive + float max_min = 0; + for (int j = 0; j < QK_K/32; ++j) { + //scales[j] = make_qkx1_quants(32, 31, x + 32*j, L + 32*j, &mins[j], 9, 0.5f); + float sum_x2 = 0; + for (int l = 0; l < 32; ++l) sum_x2 += x[32*j + l] * x[32*j + l]; + float av_x = sqrtf(sum_x2/32); + for (int l = 0; l < 32; ++l) weights[l] = av_x + fabsf(x[32*j + l]); + scales[j] = make_qkx2_quants(32, 31, x + 32*j, weights, L + 32*j, &mins[j], Laux, -0.5f, 0.1f, 15, false); + float scale = scales[j]; + if (scale > max_scale) { + max_scale = scale; + } + float min = mins[j]; + if (min > max_min) { + max_min = min; + } + } + + float inv_scale = max_scale > 0 ? 63.f/max_scale : 0.f; + float inv_min = max_min > 0 ? 63.f/max_min : 0.f; + for (int j = 0; j < QK_K/32; ++j) { + uint8_t ls = nearest_int(inv_scale*scales[j]); + uint8_t lm = nearest_int(inv_min*mins[j]); + ls = MIN(63, ls); + lm = MIN(63, lm); + if (j < 4) { + y[i].scales[j] = ls; + y[i].scales[j+4] = lm; + } else { + y[i].scales[j+4] = (ls & 0xF) | ((lm & 0xF) << 4); + y[i].scales[j-4] |= ((ls >> 4) << 6); + y[i].scales[j-0] |= ((lm >> 4) << 6); } - if (!n_changed) { - break; + } + y[i].d = ggml_fp32_to_fp16(max_scale/63.f); + y[i].dmin = ggml_fp32_to_fp16(max_min/63.f); + + uint8_t sc, m; + for (int j = 0; j < QK_K/32; ++j) { + get_scale_min_k4(j, y[i].scales, &sc, &m); + const float d = ggml_fp16_to_fp32(y[i].d) * sc; + if (!d) continue; + const float dm = ggml_fp16_to_fp32(y[i].dmin) * m; + for (int ii = 0; ii < 32; ++ii) { + int l = nearest_int((x[32*j + ii] + dm)/d); + l = MAX(0, MIN(31, l)); + L[32*j + ii] = l; + } + } + + uint8_t * restrict qh = y[i].qh; + uint8_t * restrict ql = y[i].qs; + memset(qh, 0, QK_K/8); + + uint8_t m1 = 1, m2 = 2; + for (int n = 0; n < QK_K; n += 64) { + for (int j = 0; j < 32; ++j) { + int l1 = L[n + j]; + if (l1 > 15) { + l1 -= 16; qh[j] |= m1; + } + int l2 = L[n + j + 32]; + if (l2 > 15) { + l2 -= 16; qh[j] |= m2; + } + ql[j] = l1 | (l2 << 4); + } + m1 <<= 2; m2 <<= 2; + ql += 32; + } +#else + float max_scale = 0, amax = 0; + for (int j = 0; j < QK_K/16; ++j) { + scales[j] = make_qx_quants(16, 16, x + 16*j, L + 16*j, 1); + float abs_scale = fabsf(scales[j]); + if (abs_scale > amax) { + amax = abs_scale; + max_scale = scales[j]; + } + } + + float iscale = -128.f/max_scale; + for (int j = 0; j < QK_K/16; ++j) { + int l = nearest_int(iscale*scales[j]); + y[i].scales[j] = MAX(-128, MIN(127, l)); + } + y[i].d = ggml_fp32_to_fp16(1/iscale); + + for (int j = 0; j < QK_K/16; ++j) { + const float d = ggml_fp16_to_fp32(y[i].d) * y[i].scales[j]; + if (!d) continue; + for (int ii = 0; ii < 16; ++ii) { + int l = nearest_int(x[16*j + ii]/d); + l = MAX(-16, MIN(15, l)); + L[16*j + ii] = l + 16; + } + } + + uint8_t * restrict qh = y[i].qh; + uint8_t * restrict ql = y[i].qs; + memset(qh, 0, QK_K/8); + + for (int j = 0; j < 32; ++j) { + int jm = j%8; + int is = j/8; + int l1 = L[j]; + if (l1 > 15) { + l1 -= 16; qh[jm] |= (1 << is); + } + int l2 = L[j + 32]; + if (l2 > 15) { + l2 -= 16; qh[jm] |= (1 << (4 + is)); + } + ql[j] = l1 | (l2 << 4); + } +#endif + + x += QK_K; + + } +} + +void dequantize_row_q5_K(const block_q5_K * restrict x, float * restrict y, int k) { + assert(k % QK_K == 0); + const int nb = k / QK_K; + + for (int i = 0; i < nb; i++) { + + const uint8_t * ql = x[i].qs; + const uint8_t * qh = x[i].qh; + +#if QK_K == 256 + + const float d = ggml_fp16_to_fp32(x[i].d); + const float min = ggml_fp16_to_fp32(x[i].dmin); + + int is = 0; + uint8_t sc, m; + uint8_t u1 = 1, u2 = 2; + for (int j = 0; j < QK_K; j += 64) { + get_scale_min_k4(is + 0, x[i].scales, &sc, &m); + const float d1 = d * sc; const float m1 = min * m; + get_scale_min_k4(is + 1, x[i].scales, &sc, &m); + const float d2 = d * sc; const float m2 = min * m; + for (int l = 0; l < 32; ++l) *y++ = d1 * ((ql[l] & 0xF) + (qh[l] & u1 ? 16 : 0)) - m1; + for (int l = 0; l < 32; ++l) *y++ = d2 * ((ql[l] >> 4) + (qh[l] & u2 ? 16 : 0)) - m2; + ql += 32; is += 2; + u1 <<= 2; u2 <<= 2; + } +#else + float d = ggml_fp16_to_fp32(x[i].d); + const int8_t * restrict s = x[i].scales; + for (int l = 0; l < 8; ++l) { + y[l+ 0] = d * s[0] * ((ql[l+ 0] & 0xF) - (qh[l] & 0x01 ? 0 : 16)); + y[l+ 8] = d * s[0] * ((ql[l+ 8] & 0xF) - (qh[l] & 0x02 ? 0 : 16)); + y[l+16] = d * s[1] * ((ql[l+16] & 0xF) - (qh[l] & 0x04 ? 0 : 16)); + y[l+24] = d * s[1] * ((ql[l+24] & 0xF) - (qh[l] & 0x08 ? 0 : 16)); + y[l+32] = d * s[2] * ((ql[l+ 0] >> 4) - (qh[l] & 0x10 ? 0 : 16)); + y[l+40] = d * s[2] * ((ql[l+ 8] >> 4) - (qh[l] & 0x20 ? 0 : 16)); + y[l+48] = d * s[3] * ((ql[l+16] >> 4) - (qh[l] & 0x40 ? 0 : 16)); + y[l+56] = d * s[3] * ((ql[l+24] >> 4) - (qh[l] & 0x80 ? 0 : 16)); + } + y += QK_K; +#endif + } +} + +void quantize_row_q5_K(const float * restrict x, void * restrict vy, int k) { + assert(k % QK_K == 0); + block_q5_K * restrict y = vy; + quantize_row_q5_K_reference(x, y, k); +} + +size_t ggml_quantize_q5_K(const float * restrict src, void * restrict dst, int n, int k, int64_t * restrict hist) { + assert(k % QK_K == 0); + (void)hist; // TODO: collect histograms + + for (int j = 0; j < n; j += k) { + block_q5_K * restrict y = (block_q5_K *)dst + j/QK_K; + quantize_row_q5_K_reference(src + j, y, k); + } + return (n/QK_K*sizeof(block_q5_K)); +} + +// ====================== 6-bit (de)-quantization + +void quantize_row_q6_K_reference(const float * restrict x, block_q6_K * restrict y, int k) { + assert(k % QK_K == 0); + const int nb = k / QK_K; + + int8_t L[QK_K]; + float scales[QK_K/16]; + + for (int i = 0; i < nb; i++) { + + float max_scale = 0; + float max_abs_scale = 0; + + for (int ib = 0; ib < QK_K/16; ++ib) { + + const float scale = make_qx_quants(16, 32, x + 16*ib, L + 16*ib, 1); + scales[ib] = scale; + + const float abs_scale = fabsf(scale); + if (abs_scale > max_abs_scale) { + max_abs_scale = abs_scale; + max_scale = scale; + } + + } + + if (!max_abs_scale) { + memset(&y[i], 0, sizeof(block_q6_K)); + y[i].d = ggml_fp32_to_fp16(0.f); + x += QK_K; + continue; + } + + float iscale = -128.f/max_scale; + y[i].d = ggml_fp32_to_fp16(1/iscale); + for (int ib = 0; ib < QK_K/16; ++ib) { + y[i].scales[ib] = MIN(127, nearest_int(iscale*scales[ib])); + } + + for (int j = 0; j < QK_K/16; ++j) { + float d = ggml_fp16_to_fp32(y[i].d) * y[i].scales[j]; + if (!d) { + continue; + } + for (int ii = 0; ii < 16; ++ii) { + int l = nearest_int(x[16*j + ii]/d); + l = MAX(-32, MIN(31, l)); + L[16*j + ii] = l + 32; + } + } + + uint8_t * restrict ql = y[i].ql; + uint8_t * restrict qh = y[i].qh; +#if QK_K == 256 + for (int j = 0; j < QK_K; j += 128) { + for (int l = 0; l < 32; ++l) { + const uint8_t q1 = L[j + l + 0] & 0xF; + const uint8_t q2 = L[j + l + 32] & 0xF; + const uint8_t q3 = L[j + l + 64] & 0xF; + const uint8_t q4 = L[j + l + 96] & 0xF; + ql[l+ 0] = q1 | (q3 << 4); + ql[l+32] = q2 | (q4 << 4); + qh[l] = (L[j + l] >> 4) | ((L[j + l + 32] >> 4) << 2) | ((L[j + l + 64] >> 4) << 4) | ((L[j + l + 96] >> 4) << 6); + } + ql += 64; + qh += 32; + } +#else + for (int l = 0; l < 32; ++l) { + const uint8_t q1 = L[l + 0] & 0xF; + const uint8_t q2 = L[l + 32] & 0xF; + ql[l] = q1 | (q2 << 4); + } + for (int l = 0; l < 16; ++l) { + qh[l] = (L[l] >> 4) | ((L[l + 16] >> 4) << 2) | ((L[l + 32] >> 4) << 4) | ((L[l + 48] >> 4) << 6); + } +#endif + + x += QK_K; + + } +} + +void dequantize_row_q6_K(const block_q6_K * restrict x, float * restrict y, int k) { + assert(k % QK_K == 0); + const int nb = k / QK_K; + + for (int i = 0; i < nb; i++) { + + const float d = ggml_fp16_to_fp32(x[i].d); + + const uint8_t * restrict ql = x[i].ql; + const uint8_t * restrict qh = x[i].qh; + const int8_t * restrict sc = x[i].scales; + +#if QK_K == 256 + for (int n = 0; n < QK_K; n += 128) { + for (int l = 0; l < 32; ++l) { + int is = l/16; + const int8_t q1 = (int8_t)((ql[l + 0] & 0xF) | (((qh[l] >> 0) & 3) << 4)) - 32; + const int8_t q2 = (int8_t)((ql[l + 32] & 0xF) | (((qh[l] >> 2) & 3) << 4)) - 32; + const int8_t q3 = (int8_t)((ql[l + 0] >> 4) | (((qh[l] >> 4) & 3) << 4)) - 32; + const int8_t q4 = (int8_t)((ql[l + 32] >> 4) | (((qh[l] >> 6) & 3) << 4)) - 32; + y[l + 0] = d * sc[is + 0] * q1; + y[l + 32] = d * sc[is + 2] * q2; + y[l + 64] = d * sc[is + 4] * q3; + y[l + 96] = d * sc[is + 6] * q4; } + y += 128; + ql += 64; + qh += 32; + sc += 8; } - for (int i = 0; i < n; ++i) { - L[i] += nmax; +#else + for (int l = 0; l < 16; ++l) { + const int8_t q1 = (int8_t)((ql[l+ 0] & 0xF) | (((qh[l] >> 0) & 3) << 4)) - 32; + const int8_t q2 = (int8_t)((ql[l+16] & 0xF) | (((qh[l] >> 2) & 3) << 4)) - 32; + const int8_t q3 = (int8_t)((ql[l+ 0] >> 4) | (((qh[l] >> 4) & 3) << 4)) - 32; + const int8_t q4 = (int8_t)((ql[l+16] >> 4) | (((qh[l] >> 6) & 3) << 4)) - 32; + y[l+ 0] = d * sc[0] * q1; + y[l+16] = d * sc[1] * q2; + y[l+32] = d * sc[2] * q3; + y[l+48] = d * sc[3] * q4; } - return sumlx / suml2; - } - for (int i = 0; i < n; ++i) { - int l = nearest_int(iscale * x[i]); - l = MAX(-nmax, MIN(nmax-1, l)); - L[i] = l + nmax; + y += 64; +#endif + } - return 1/iscale; } -static float make_qkx1_quants(int n, int nmax, const float * restrict x, uint8_t * restrict L, float * restrict the_min, - int ntry, float alpha) { - float min = x[0]; - float max = x[0]; - for (int i = 1; i < n; ++i) { - if (x[i] < min) min = x[i]; - if (x[i] > max) max = x[i]; - } - if (max == min) { - for (int i = 0; i < n; ++i) L[i] = 0; - *the_min = 0; - return 0.f; +void quantize_row_q6_K(const float * restrict x, void * restrict vy, int k) { + assert(k % QK_K == 0); + block_q6_K * restrict y = vy; + quantize_row_q6_K_reference(x, y, k); +} + +size_t ggml_quantize_q6_K(const float * src, void * dst, int n, int k, int64_t * hist) { + assert(k % QK_K == 0); + (void)hist; // TODO: collect histograms + + for (int j = 0; j < n; j += k) { + block_q6_K * restrict y = (block_q6_K *)dst + j/QK_K; + quantize_row_q6_K_reference(src + j, y, k); } - if (min > 0) min = 0; - float iscale = nmax/(max - min); - float scale = 1/iscale; - for (int itry = 0; itry < ntry; ++itry) { - float sumlx = 0; int suml2 = 0; - bool did_change = false; - for (int i = 0; i < n; ++i) { - int l = nearest_int(iscale*(x[i] - min)); - l = MAX(0, MIN(nmax, l)); - if (l != L[i]) { - L[i] = l; - did_change = true; + return (n/QK_K*sizeof(block_q6_K)); +} + +//===================================== Q8_K ============================================== + +void quantize_row_q8_K_reference(const float * restrict x, block_q8_K * restrict y, int k) { + assert(k % QK_K == 0); + const int nb = k / QK_K; + + for (int i = 0; i < nb; i++) { + + float max = 0; + float amax = 0; + for (int j = 0; j < QK_K; ++j) { + float ax = fabsf(x[j]); + if (ax > amax) { + amax = ax; max = x[j]; } - sumlx += (x[i] - min)*l; - suml2 += l*l; } - scale = sumlx/suml2; - float sum = 0; - for (int i = 0; i < n; ++i) { - sum += x[i] - scale*L[i]; + if (!amax) { + y[i].d = 0; + memset(y[i].qs, 0, QK_K); + x += QK_K; + continue; } - min = alpha*min + (1 - alpha)*sum/n; - if (min > 0) min = 0; - iscale = 1/scale; - if (!did_change) break; - } - *the_min = -min; - return scale; -} - -static float make_qkx2_quants(int n, int nmax, const float * restrict x, const float * restrict weights, - uint8_t * restrict L, float * restrict the_min, uint8_t * restrict Laux, - float rmin, float rdelta, int nstep, bool use_mad) { - float min = x[0]; - float max = x[0]; - float sum_w = weights[0]; - float sum_x = sum_w * x[0]; - for (int i = 1; i < n; ++i) { - if (x[i] < min) min = x[i]; - if (x[i] > max) max = x[i]; - float w = weights[i]; - sum_w += w; - sum_x += w * x[i]; - } - if (min > 0) min = 0; - if (max == min) { - for (int i = 0; i < n; ++i) L[i] = 0; - *the_min = -min; - return 0.f; - } - float iscale = nmax/(max - min); - float scale = 1/iscale; - float best_mad = 0; - for (int i = 0; i < n; ++i) { - int l = nearest_int(iscale*(x[i] - min)); - L[i] = MAX(0, MIN(nmax, l)); - float diff = scale * L[i] + min - x[i]; - diff = use_mad ? fabsf(diff) : diff * diff; - float w = weights[i]; - best_mad += w * diff; - } - if (nstep < 1) { - *the_min = -min; - return scale; - } - for (int is = 0; is <= nstep; ++is) { - iscale = (rmin + rdelta*is + nmax)/(max - min); - float sum_l = 0, sum_l2 = 0, sum_xl = 0; - for (int i = 0; i < n; ++i) { - int l = nearest_int(iscale*(x[i] - min)); - l = MAX(0, MIN(nmax, l)); - Laux[i] = l; - float w = weights[i]; - sum_l += w*l; - sum_l2 += w*l*l; - sum_xl += w*l*x[i]; + const float iscale = -128.f/max; + for (int j = 0; j < QK_K; ++j) { + int v = nearest_int(iscale*x[j]); + y[i].qs[j] = MIN(127, v); } - float D = sum_w * sum_l2 - sum_l * sum_l; - if (D > 0) { - float this_scale = (sum_w * sum_xl - sum_x * sum_l)/D; - float this_min = (sum_l2 * sum_x - sum_l * sum_xl)/D; - if (this_min > 0) { - this_min = 0; - this_scale = sum_xl / sum_l2; - } - float mad = 0; - for (int i = 0; i < n; ++i) { - float diff = this_scale * Laux[i] + this_min - x[i]; - diff = use_mad ? fabsf(diff) : diff * diff; - float w = weights[i]; - mad += w * diff; - } - if (mad < best_mad) { - for (int i = 0; i < n; ++i) { - L[i] = Laux[i]; - } - best_mad = mad; - scale = this_scale; - min = this_min; + for (int j = 0; j < QK_K/16; ++j) { + int sum = 0; + for (int ii = 0; ii < 16; ++ii) { + sum += y[i].qs[j*16 + ii]; } + y[i].bsums[j] = sum; } + y[i].d = 1/iscale; + x += QK_K; } - *the_min = -min; - return scale; } -#if QK_K == 256 -static inline void get_scale_min_k4(int j, const uint8_t * restrict q, uint8_t * restrict d, uint8_t * restrict m) { - if (j < 4) { - *d = q[j] & 63; *m = q[j + 4] & 63; - } else { - *d = (q[j+4] & 0xF) | ((q[j-4] >> 6) << 4); - *m = (q[j+4] >> 4) | ((q[j-0] >> 6) << 4); +void dequantize_row_q8_K(const block_q8_K * restrict x, float * restrict y, int k) { + assert(k % QK_K == 0); + const int nb = k / QK_K; + + for (int i = 0; i < nb; i++) { + for (int j = 0; j < QK_K; ++j) { + *y++ = x[i].d * x[i].qs[j]; + } } } + +void quantize_row_q8_K(const float * restrict x, void * restrict y, int k) { + quantize_row_q8_K_reference(x, y, k); +} + +//===================================== Dot ptoducts ================================= + +// +// Helper functions +// +#if __AVX__ || __AVX2__ || __AVX512F__ + +// shuffles to pick the required scales in dot products +static inline __m256i get_scale_shuffle_q3k(int i) { + static const uint8_t k_shuffle[128] = { + 0, 1, 0, 1, 0, 1, 0, 1, 0, 1, 0, 1, 0, 1, 0, 1, 2, 3, 2, 3, 2, 3, 2, 3, 2, 3, 2, 3, 2, 3, 2, 3, + 4, 5, 4, 5, 4, 5, 4, 5, 4, 5, 4, 5, 4, 5, 4, 5, 6, 7, 6, 7, 6, 7, 6, 7, 6, 7, 6, 7, 6, 7, 6, 7, + 8, 9, 8, 9, 8, 9, 8, 9, 8, 9, 8, 9, 8, 9, 8, 9, 10,11,10,11,10,11,10,11,10,11,10,11,10,11,10,11, + 12,13,12,13,12,13,12,13,12,13,12,13,12,13,12,13, 14,15,14,15,14,15,14,15,14,15,14,15,14,15,14,15, + }; + return _mm256_loadu_si256((const __m256i*)k_shuffle + i); +} +static inline __m256i get_scale_shuffle_k4(int i) { + static const uint8_t k_shuffle[256] = { + 0, 1, 0, 1, 0, 1, 0, 1, 0, 1, 0, 1, 0, 1, 0, 1, 0, 1, 0, 1, 0, 1, 0, 1, 0, 1, 0, 1, 0, 1, 0, 1, + 2, 3, 2, 3, 2, 3, 2, 3, 2, 3, 2, 3, 2, 3, 2, 3, 2, 3, 2, 3, 2, 3, 2, 3, 2, 3, 2, 3, 2, 3, 2, 3, + 4, 5, 4, 5, 4, 5, 4, 5, 4, 5, 4, 5, 4, 5, 4, 5, 4, 5, 4, 5, 4, 5, 4, 5, 4, 5, 4, 5, 4, 5, 4, 5, + 6, 7, 6, 7, 6, 7, 6, 7, 6, 7, 6, 7, 6, 7, 6, 7, 6, 7, 6, 7, 6, 7, 6, 7, 6, 7, 6, 7, 6, 7, 6, 7, + 8, 9, 8, 9, 8, 9, 8, 9, 8, 9, 8, 9, 8, 9, 8, 9, 8, 9, 8, 9, 8, 9, 8, 9, 8, 9, 8, 9, 8, 9, 8, 9, + 10,11,10,11,10,11,10,11,10,11,10,11,10,11,10,11,10,11,10,11,10,11,10,11,10,11,10,11,10,11,10,11, + 12,13,12,13,12,13,12,13,12,13,12,13,12,13,12,13,12,13,12,13,12,13,12,13,12,13,12,13,12,13,12,13, + 14,15,14,15,14,15,14,15,14,15,14,15,14,15,14,15,14,15,14,15,14,15,14,15,14,15,14,15,14,15,14,15 + }; + return _mm256_loadu_si256((const __m256i*)k_shuffle + i); +} +static inline __m128i get_scale_shuffle(int i) { + static const uint8_t k_shuffle[128] = { + 0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 1, 1, 1, 1, + 2, 2, 2, 2, 2, 2, 2, 2, 3, 3, 3, 3, 3, 3, 3, 3, + 4, 4, 4, 4, 4, 4, 4, 4, 5, 5, 5, 5, 5, 5, 5, 5, + 6, 6, 6, 6, 6, 6, 6, 6, 7, 7, 7, 7, 7, 7, 7, 7, + 8, 8, 8, 8, 8, 8, 8, 8, 9, 9, 9, 9, 9, 9, 9, 9, + 10,10,10,10,10,10,10,10, 11,11,11,11,11,11,11,11, + 12,12,12,12,12,12,12,12, 13,13,13,13,13,13,13,13, + 14,14,14,14,14,14,14,14, 15,15,15,15,15,15,15,15 + }; + return _mm_loadu_si128((const __m128i*)k_shuffle + i); +} #endif -//========================- 2-bit (de)-quantization +void ggml_vec_dot_q4_0_q8_0(int n, float * restrict s, const void * restrict vx, const void * restrict vy) { + const int qk = QK8_0; + const int nb = n / qk; + + assert(n % qk == 0); + + const block_q4_0 * restrict x = vx; + const block_q8_0 * restrict y = vy; -void quantize_row_q2_K_reference(const float * restrict x, block_q2_K * restrict y, int k) { - assert(k % QK_K == 0); - const int nb = k / QK_K; +#if defined(__ARM_NEON) + float32x4_t sumv0 = vdupq_n_f32(0.0f); + float32x4_t sumv1 = vdupq_n_f32(0.0f); - uint8_t L[QK_K]; - uint8_t Laux[16]; - float weights[16]; - float mins[QK_K/16]; - float scales[QK_K/16]; + assert(nb % 2 == 0); // TODO: handle odd nb - const float q4scale = 15.f; + for (int i = 0; i < nb; i += 2) { + const block_q4_0 * restrict x0 = &x[i + 0]; + const block_q4_0 * restrict x1 = &x[i + 1]; + const block_q8_0 * restrict y0 = &y[i + 0]; + const block_q8_0 * restrict y1 = &y[i + 1]; - for (int i = 0; i < nb; i++) { - float max_scale = 0; // as we are deducting the min, scales are always positive - float max_min = 0; - for (int j = 0; j < QK_K/16; ++j) { - for (int l = 0; l < 16; ++l) weights[l] = fabsf(x[16*j + l]); - scales[j] = make_qkx2_quants(16, 3, x + 16*j, weights, L + 16*j, &mins[j], Laux, -0.5f, 0.1f, 15, true); - float scale = scales[j]; - if (scale > max_scale) { - max_scale = scale; - } - float min = mins[j]; - if (min > max_min) { - max_min = min; - } - } + const uint8x16_t m4b = vdupq_n_u8(0x0F); + const int8x16_t s8b = vdupq_n_s8(0x8); - if (max_scale > 0) { - float iscale = q4scale/max_scale; - for (int j = 0; j < QK_K/16; ++j) { - int l = nearest_int(iscale*scales[j]); - y[i].scales[j] = l; - } - y[i].d = ggml_fp32_to_fp16(max_scale/q4scale); - } else { - for (int j = 0; j < QK_K/16; ++j) y[i].scales[j] = 0; - y[i].d = ggml_fp32_to_fp16(0.f); - } - if (max_min > 0) { - float iscale = q4scale/max_min; - for (int j = 0; j < QK_K/16; ++j) { - int l = nearest_int(iscale*mins[j]); - y[i].scales[j] |= (l << 4); - } - y[i].dmin = ggml_fp32_to_fp16(max_min/q4scale); - } else { - y[i].dmin = ggml_fp32_to_fp16(0.f); - } - for (int j = 0; j < QK_K/16; ++j) { - const float d = ggml_fp16_to_fp32(y[i].d) * (y[i].scales[j] & 0xF); - if (!d) continue; - const float dm = ggml_fp16_to_fp32(y[i].dmin) * (y[i].scales[j] >> 4); - for (int ii = 0; ii < 16; ++ii) { - int l = nearest_int((x[16*j + ii] + dm)/d); - l = MAX(0, MIN(3, l)); - L[16*j + ii] = l; - } - } + const uint8x16_t v0_0 = vld1q_u8(x0->qs); + const uint8x16_t v0_1 = vld1q_u8(x1->qs); -#if QK_K == 256 - for (int j = 0; j < QK_K; j += 128) { - for (int l = 0; l < 32; ++l) { - y[i].qs[j/4 + l] = L[j + l] | (L[j + l + 32] << 2) | (L[j + l + 64] << 4) | (L[j + l + 96] << 6); - } - } + // 4-bit -> 8-bit + const int8x16_t v0_0l = vreinterpretq_s8_u8(vandq_u8 (v0_0, m4b)); + const int8x16_t v0_0h = vreinterpretq_s8_u8(vshrq_n_u8(v0_0, 4)); + const int8x16_t v0_1l = vreinterpretq_s8_u8(vandq_u8 (v0_1, m4b)); + const int8x16_t v0_1h = vreinterpretq_s8_u8(vshrq_n_u8(v0_1, 4)); + + // sub 8 + const int8x16_t v0_0ls = vsubq_s8(v0_0l, s8b); + const int8x16_t v0_0hs = vsubq_s8(v0_0h, s8b); + const int8x16_t v0_1ls = vsubq_s8(v0_1l, s8b); + const int8x16_t v0_1hs = vsubq_s8(v0_1h, s8b); + + // load y + const int8x16_t v1_0l = vld1q_s8(y0->qs); + const int8x16_t v1_0h = vld1q_s8(y0->qs + 16); + const int8x16_t v1_1l = vld1q_s8(y1->qs); + const int8x16_t v1_1h = vld1q_s8(y1->qs + 16); + +#if defined(__ARM_FEATURE_DOTPROD) + // dot product into int32x4_t + const int32x4_t p_0 = vdotq_s32(vdotq_s32(vdupq_n_s32(0), v0_0ls, v1_0l), v0_0hs, v1_0h); + const int32x4_t p_1 = vdotq_s32(vdotq_s32(vdupq_n_s32(0), v0_1ls, v1_1l), v0_1hs, v1_1h); + + sumv0 = vmlaq_n_f32(sumv0, vcvtq_f32_s32(p_0), ggml_fp16_to_fp32(x0->d)*ggml_fp16_to_fp32(y0->d)); + sumv1 = vmlaq_n_f32(sumv1, vcvtq_f32_s32(p_1), ggml_fp16_to_fp32(x1->d)*ggml_fp16_to_fp32(y1->d)); #else - for (int l = 0; l < 16; ++l) { - y[i].qs[l] = L[l] | (L[l + 16] << 2) | (L[l + 32] << 4) | (L[l + 48] << 6); - } + const int16x8_t pl0l = vmull_s8(vget_low_s8 (v0_0ls), vget_low_s8 (v1_0l)); + const int16x8_t pl0h = vmull_s8(vget_high_s8(v0_0ls), vget_high_s8(v1_0l)); + const int16x8_t ph0l = vmull_s8(vget_low_s8 (v0_0hs), vget_low_s8 (v1_0h)); + const int16x8_t ph0h = vmull_s8(vget_high_s8(v0_0hs), vget_high_s8(v1_0h)); + + const int16x8_t pl1l = vmull_s8(vget_low_s8 (v0_1ls), vget_low_s8 (v1_1l)); + const int16x8_t pl1h = vmull_s8(vget_high_s8(v0_1ls), vget_high_s8(v1_1l)); + const int16x8_t ph1l = vmull_s8(vget_low_s8 (v0_1hs), vget_low_s8 (v1_1h)); + const int16x8_t ph1h = vmull_s8(vget_high_s8(v0_1hs), vget_high_s8(v1_1h)); + + const int32x4_t pl0 = vaddq_s32(vpaddlq_s16(pl0l), vpaddlq_s16(pl0h)); + const int32x4_t ph0 = vaddq_s32(vpaddlq_s16(ph0l), vpaddlq_s16(ph0h)); + const int32x4_t pl1 = vaddq_s32(vpaddlq_s16(pl1l), vpaddlq_s16(pl1h)); + const int32x4_t ph1 = vaddq_s32(vpaddlq_s16(ph1l), vpaddlq_s16(ph1h)); + + sumv0 = vmlaq_n_f32(sumv0, vcvtq_f32_s32(vaddq_s32(pl0, ph0)), ggml_fp16_to_fp32(x0->d)*ggml_fp16_to_fp32(y0->d)); + sumv1 = vmlaq_n_f32(sumv1, vcvtq_f32_s32(vaddq_s32(pl1, ph1)), ggml_fp16_to_fp32(x1->d)*ggml_fp16_to_fp32(y1->d)); #endif + } - x += QK_K; + *s = vaddvq_f32(sumv0) + vaddvq_f32(sumv1); +#elif defined(__AVX2__) + // Initialize accumulator with zeros + __m256 acc = _mm256_setzero_ps(); + + // Main loop + for (int i = 0; i < nb; ++i) { + /* Compute combined scale for the block */ + const __m256 d = _mm256_set1_ps( ggml_fp16_to_fp32(x[i].d) * ggml_fp16_to_fp32(y[i].d) ); + + __m256i bx = bytes_from_nibbles_32(x[i].qs); + // Now we have a vector with bytes in [ 0 .. 15 ] interval. Offset them into [ -8 .. +7 ] interval. + const __m256i off = _mm256_set1_epi8( 8 ); + bx = _mm256_sub_epi8( bx, off ); + + __m256i by = _mm256_loadu_si256((const __m256i *)y[i].qs); + + const __m256 q = mul_sum_i8_pairs_float(bx, by); + + /* Multiply q with scale and accumulate */ + acc = _mm256_fmadd_ps( d, q, acc ); } -} -void dequantize_row_q2_K(const block_q2_K * restrict x, float * restrict y, int k) { - assert(k % QK_K == 0); - const int nb = k / QK_K; + *s = hsum_float_8(acc); +#elif defined(__AVX__) + // Initialize accumulator with zeros + __m256 acc = _mm256_setzero_ps(); - for (int i = 0; i < nb; i++) { + // Main loop + for (int i = 0; i < nb; ++i) { + // Compute combined scale for the block + const __m256 d = _mm256_set1_ps( ggml_fp16_to_fp32(x[i].d) * ggml_fp16_to_fp32(y[i].d) ); - const float d = ggml_fp16_to_fp32(x[i].d); - const float min = ggml_fp16_to_fp32(x[i].dmin); + const __m128i lowMask = _mm_set1_epi8(0xF); + const __m128i off = _mm_set1_epi8(8); - const uint8_t * q = x[i].qs; + const __m128i tmp = _mm_loadu_si128((const __m128i *)x[i].qs); -#if QK_K == 256 - int is = 0; - float dl, ml; - for (int n = 0; n < QK_K; n += 128) { - int shift = 0; - for (int j = 0; j < 4; ++j) { + __m128i bx = _mm_and_si128(lowMask, tmp); + __m128i by = _mm_loadu_si128((const __m128i *)y[i].qs); + bx = _mm_sub_epi8(bx, off); + const __m128i i32_0 = mul_sum_i8_pairs(bx, by); - uint8_t sc = x[i].scales[is++]; - dl = d * (sc & 0xF); ml = min * (sc >> 4); - for (int l = 0; l < 16; ++l) *y++ = dl * ((int8_t)((q[l] >> shift) & 3)) - ml; + bx = _mm_and_si128(lowMask, _mm_srli_epi64(tmp, 4)); + by = _mm_loadu_si128((const __m128i *)(y[i].qs + 16)); + bx = _mm_sub_epi8(bx, off); + const __m128i i32_1 = mul_sum_i8_pairs(bx, by); - sc = x[i].scales[is++]; - dl = d * (sc & 0xF); ml = min * (sc >> 4); - for (int l = 0; l < 16; ++l) *y++ = dl * ((int8_t)((q[l+16] >> shift) & 3)) - ml; + // Convert int32_t to float + __m256 p = _mm256_cvtepi32_ps(MM256_SET_M128I(i32_0, i32_1)); - shift += 2; - } - q += 32; - } -#else - float dl1 = d * (x[i].scales[0] & 0xF), ml1 = min * (x[i].scales[0] >> 4); - float dl2 = d * (x[i].scales[1] & 0xF), ml2 = min * (x[i].scales[1] >> 4); - float dl3 = d * (x[i].scales[2] & 0xF), ml3 = min * (x[i].scales[2] >> 4); - float dl4 = d * (x[i].scales[3] & 0xF), ml4 = min * (x[i].scales[3] >> 4); - for (int l = 0; l < 16; ++l) { - y[l+ 0] = dl1 * ((int8_t)((q[l] >> 0) & 3)) - ml1; - y[l+16] = dl2 * ((int8_t)((q[l] >> 2) & 3)) - ml2; - y[l+32] = dl3 * ((int8_t)((q[l] >> 4) & 3)) - ml3; - y[l+48] = dl4 * ((int8_t)((q[l] >> 6) & 3)) - ml4; - } - y += QK_K; -#endif + // Apply the scale, and accumulate + acc = _mm256_add_ps(_mm256_mul_ps( d, p ), acc); } -} -void quantize_row_q2_K(const float * restrict x, void * restrict vy, int k) { - quantize_row_q2_K_reference(x, vy, k); -} + *s = hsum_float_8(acc); +#elif defined(__SSSE3__) + // set constants + const __m128i lowMask = _mm_set1_epi8(0xF); + const __m128i off = _mm_set1_epi8(8); + + // Initialize accumulator with zeros + __m128 acc_0 = _mm_setzero_ps(); + __m128 acc_1 = _mm_setzero_ps(); + __m128 acc_2 = _mm_setzero_ps(); + __m128 acc_3 = _mm_setzero_ps(); + + // First round without accumulation + { + _mm_prefetch(&x[0] + sizeof(block_q4_0), _MM_HINT_T0); + _mm_prefetch(&y[0] + sizeof(block_q8_0), _MM_HINT_T0); + + // Compute combined scale for the block 0 and 1 + const __m128 d_0_1 = _mm_set1_ps( ggml_fp16_to_fp32(x[0].d) * ggml_fp16_to_fp32(y[0].d) ); + + const __m128i tmp_0_1 = _mm_loadu_si128((const __m128i *)x[0].qs); + + __m128i bx_0 = _mm_and_si128(lowMask, tmp_0_1); + __m128i by_0 = _mm_loadu_si128((const __m128i *)y[0].qs); + bx_0 = _mm_sub_epi8(bx_0, off); + const __m128i i32_0 = mul_sum_i8_pairs(bx_0, by_0); + + __m128i bx_1 = _mm_and_si128(lowMask, _mm_srli_epi64(tmp_0_1, 4)); + __m128i by_1 = _mm_loadu_si128((const __m128i *)(y[0].qs + 16)); + bx_1 = _mm_sub_epi8(bx_1, off); + const __m128i i32_1 = mul_sum_i8_pairs(bx_1, by_1); + + _mm_prefetch(&x[1] + sizeof(block_q4_0), _MM_HINT_T0); + _mm_prefetch(&y[1] + sizeof(block_q8_0), _MM_HINT_T0); + + // Compute combined scale for the block 2 and 3 + const __m128 d_2_3 = _mm_set1_ps( ggml_fp16_to_fp32(x[1].d) * ggml_fp16_to_fp32(y[1].d) ); + + const __m128i tmp_2_3 = _mm_loadu_si128((const __m128i *)x[1].qs); + + __m128i bx_2 = _mm_and_si128(lowMask, tmp_2_3); + __m128i by_2 = _mm_loadu_si128((const __m128i *)y[1].qs); + bx_2 = _mm_sub_epi8(bx_2, off); + const __m128i i32_2 = mul_sum_i8_pairs(bx_2, by_2); + + __m128i bx_3 = _mm_and_si128(lowMask, _mm_srli_epi64(tmp_2_3, 4)); + __m128i by_3 = _mm_loadu_si128((const __m128i *)(y[1].qs + 16)); + bx_3 = _mm_sub_epi8(bx_3, off); + const __m128i i32_3 = mul_sum_i8_pairs(bx_3, by_3); + + // Convert int32_t to float + __m128 p0 = _mm_cvtepi32_ps(i32_0); + __m128 p1 = _mm_cvtepi32_ps(i32_1); + __m128 p2 = _mm_cvtepi32_ps(i32_2); + __m128 p3 = _mm_cvtepi32_ps(i32_3); + + // Apply the scale + acc_0 = _mm_mul_ps( d_0_1, p0 ); + acc_1 = _mm_mul_ps( d_0_1, p1 ); + acc_2 = _mm_mul_ps( d_2_3, p2 ); + acc_3 = _mm_mul_ps( d_2_3, p3 ); + } -size_t ggml_quantize_q2_K(const float * restrict src, void * restrict dst, int n, int k, int64_t * restrict hist) { - (void)hist; // TODO: collect histograms + assert(nb % 2 == 0); // TODO: handle odd nb - for (int j = 0; j < n; j += k) { - block_q2_K * restrict y = (block_q2_K *)dst + j/QK_K; - quantize_row_q2_K_reference(src + j, y, k); - } - return (n/QK_K*sizeof(block_q2_K)); -} + // Main loop + for (int i = 2; i < nb; i+=2) { + _mm_prefetch(&x[i] + sizeof(block_q4_0), _MM_HINT_T0); + _mm_prefetch(&y[i] + sizeof(block_q8_0), _MM_HINT_T0); -//========================= 3-bit (de)-quantization + // Compute combined scale for the block 0 and 1 + const __m128 d_0_1 = _mm_set1_ps( ggml_fp16_to_fp32(x[i].d) * ggml_fp16_to_fp32(y[i].d) ); -void quantize_row_q3_K_reference(const float * restrict x, block_q3_K * restrict y, int k) { - assert(k % QK_K == 0); - const int nb = k / QK_K; + const __m128i tmp_0_1 = _mm_loadu_si128((const __m128i *)x[i].qs); - int8_t L[QK_K]; - float scales[QK_K / 16]; + __m128i bx_0 = _mm_and_si128(lowMask, tmp_0_1); + __m128i by_0 = _mm_loadu_si128((const __m128i *)y[i].qs); + bx_0 = _mm_sub_epi8(bx_0, off); + const __m128i i32_0 = mul_sum_i8_pairs(bx_0, by_0); + + __m128i bx_1 = _mm_and_si128(lowMask, _mm_srli_epi64(tmp_0_1, 4)); + __m128i by_1 = _mm_loadu_si128((const __m128i *)(y[i].qs + 16)); + bx_1 = _mm_sub_epi8(bx_1, off); + const __m128i i32_1 = mul_sum_i8_pairs(bx_1, by_1); + + _mm_prefetch(&x[i] + 2 * sizeof(block_q4_0), _MM_HINT_T0); + _mm_prefetch(&y[i] + 2 * sizeof(block_q8_0), _MM_HINT_T0); + + // Compute combined scale for the block 2 and 3 + const __m128 d_2_3 = _mm_set1_ps( ggml_fp16_to_fp32(x[i + 1].d) * ggml_fp16_to_fp32(y[i + 1].d) ); + + const __m128i tmp_2_3 = _mm_loadu_si128((const __m128i *)x[i + 1].qs); + + __m128i bx_2 = _mm_and_si128(lowMask, tmp_2_3); + __m128i by_2 = _mm_loadu_si128((const __m128i *)y[i + 1].qs); + bx_2 = _mm_sub_epi8(bx_2, off); + const __m128i i32_2 = mul_sum_i8_pairs(bx_2, by_2); + + __m128i bx_3 = _mm_and_si128(lowMask, _mm_srli_epi64(tmp_2_3, 4)); + __m128i by_3 = _mm_loadu_si128((const __m128i *)(y[i + 1].qs + 16)); + bx_3 = _mm_sub_epi8(bx_3, off); + const __m128i i32_3 = mul_sum_i8_pairs(bx_3, by_3); + + // Convert int32_t to float + __m128 p0 = _mm_cvtepi32_ps(i32_0); + __m128 p1 = _mm_cvtepi32_ps(i32_1); + __m128 p2 = _mm_cvtepi32_ps(i32_2); + __m128 p3 = _mm_cvtepi32_ps(i32_3); + + // Apply the scale + __m128 p0_d = _mm_mul_ps( d_0_1, p0 ); + __m128 p1_d = _mm_mul_ps( d_0_1, p1 ); + __m128 p2_d = _mm_mul_ps( d_2_3, p2 ); + __m128 p3_d = _mm_mul_ps( d_2_3, p3 ); + + // Acummulate + acc_0 = _mm_add_ps(p0_d, acc_0); + acc_1 = _mm_add_ps(p1_d, acc_1); + acc_2 = _mm_add_ps(p2_d, acc_2); + acc_3 = _mm_add_ps(p3_d, acc_3); + } + + *s = hsum_float_4x4(acc_0, acc_1, acc_2, acc_3); +#elif defined(__riscv_v_intrinsic) + float sumf = 0.0; + + size_t vl = __riscv_vsetvl_e8m1(qk/2); for (int i = 0; i < nb; i++) { + // load elements + vuint8mf2_t tx = __riscv_vle8_v_u8mf2(x[i].qs, vl); - float max_scale = 0; - float amax = 0; - for (int j = 0; j < QK_K/16; ++j) { - scales[j] = make_q3_quants(16, 4, x + 16*j, L + 16*j, true); - float scale = fabsf(scales[j]); - if (scale > amax) { - amax = scale; max_scale = scales[j]; - } - } + vint8mf2_t y0 = __riscv_vle8_v_i8mf2(y[i].qs, vl); + vint8mf2_t y1 = __riscv_vle8_v_i8mf2(y[i].qs+16, vl); -#if QK_K == 256 - memset(y[i].scales, 0, 12); - if (max_scale) { - float iscale = -32.f/max_scale; - for (int j = 0; j < QK_K/16; ++j) { - int8_t l = nearest_int(iscale*scales[j]); - l = MAX(-32, MIN(31, l)) + 32; - if (j < 8) { - y[i].scales[j] = l & 0xF; - } else { - y[i].scales[j-8] |= ((l & 0xF) << 4); - } - l >>= 4; - y[i].scales[j%4 + 8] |= (l << (2*(j/4))); - } - y[i].d = ggml_fp32_to_fp16(1/iscale); - } else { - y[i].d = ggml_fp32_to_fp16(0.f); - } + // mask and store lower part of x, and then upper part + vuint8mf2_t x_a = __riscv_vand_vx_u8mf2(tx, 0x0F, vl); + vuint8mf2_t x_l = __riscv_vsrl_vx_u8mf2(tx, 0x04, vl); - int8_t sc; - for (int j = 0; j < QK_K/16; ++j) { - sc = j < 8 ? y[i].scales[j] & 0xF : y[i].scales[j-8] >> 4; - sc = (sc | (((y[i].scales[8 + j%4] >> (2*(j/4))) & 3) << 4)) - 32; - float d = ggml_fp16_to_fp32(y[i].d) * sc; - if (!d) { - continue; - } - for (int ii = 0; ii < 16; ++ii) { - int l = nearest_int(x[16*j + ii]/d); - l = MAX(-4, MIN(3, l)); - L[16*j + ii] = l + 4; - } - } -#else - if (max_scale) { - float iscale = -8.f/max_scale; - for (int j = 0; j < QK_K/16; j+=2) { - int l1 = nearest_int(iscale*scales[j]); - l1 = 8 + MAX(-8, MIN(7, l1)); - int l2 = nearest_int(iscale*scales[j+1]); - l2 = 8 + MAX(-8, MIN(7, l2)); - y[i].scales[j/2] = l1 | (l2 << 4); - } - y[i].d = ggml_fp32_to_fp16(1/iscale); - } else { - for (int j = 0; j < QK_K/16; j+=2) { - y[i].scales[j/2] = 0; - } - y[i].d = ggml_fp32_to_fp16(0.f); - } - for (int j = 0; j < QK_K/16; ++j) { - int s = j%2 == 0 ? y[i].scales[j/2] & 0xF : y[i].scales[j/2] >> 4; - float d = ggml_fp16_to_fp32(y[i].d) * (s - 8); - if (!d) { - continue; - } - for (int ii = 0; ii < 16; ++ii) { - int l = nearest_int(x[16*j + ii]/d); - l = MAX(-4, MIN(3, l)); - L[16*j + ii] = l + 4; - } - } -#endif + vint8mf2_t x_ai = __riscv_vreinterpret_v_u8mf2_i8mf2(x_a); + vint8mf2_t x_li = __riscv_vreinterpret_v_u8mf2_i8mf2(x_l); - memset(y[i].hmask, 0, QK_K/8); - // We put the high-bit for the 1st 8 quants into bit 0, the next 8 into bit 1, etc. - int m = 0; - uint8_t hm = 1; - for (int j = 0; j < QK_K; ++j) { - if (L[j] > 3) { - y[i].hmask[m] |= hm; - L[j] -= 4; - } - if (++m == QK_K/8) { - m = 0; hm <<= 1; - } - } -#if QK_K == 256 - for (int j = 0; j < QK_K; j += 128) { - for (int l = 0; l < 32; ++l) { - y[i].qs[j/4 + l] = L[j + l] | (L[j + l + 32] << 2) | (L[j + l + 64] << 4) | (L[j + l + 96] << 6); - } - } + // subtract offset + vint8mf2_t v0 = __riscv_vsub_vx_i8mf2(x_ai, 8, vl); + vint8mf2_t v1 = __riscv_vsub_vx_i8mf2(x_li, 8, vl); + + vint16m1_t vec_mul1 = __riscv_vwmul_vv_i16m1(v0, y0, vl); + vint16m1_t vec_mul2 = __riscv_vwmul_vv_i16m1(v1, y1, vl); + + vint32m1_t vec_zero = __riscv_vmv_v_x_i32m1(0, vl); + + vint32m1_t vs1 = __riscv_vwredsum_vs_i16m1_i32m1(vec_mul1, vec_zero, vl); + vint32m1_t vs2 = __riscv_vwredsum_vs_i16m1_i32m1(vec_mul2, vs1, vl); + + int sumi = __riscv_vmv_x_s_i32m1_i32(vs2); + + sumf += sumi*ggml_fp16_to_fp32(x[i].d)*ggml_fp16_to_fp32(y[i].d); + } + + *s = sumf; #else - for (int l = 0; l < 16; ++l) { - y[i].qs[l] = L[l] | (L[l + 16] << 2) | (L[l + 32] << 4) | (L[l + 48] << 6); + // scalar + float sumf = 0.0; + + for (int i = 0; i < nb; i++) { + int sumi = 0; + + for (int j = 0; j < qk/2; ++j) { + const int v0 = (x[i].qs[j] & 0x0F) - 8; + const int v1 = (x[i].qs[j] >> 4) - 8; + + sumi += (v0 * y[i].qs[j]) + (v1 * y[i].qs[j + qk/2]); } -#endif - x += QK_K; + sumf += sumi*ggml_fp16_to_fp32(x[i].d)*ggml_fp16_to_fp32(y[i].d); } + + *s = sumf; +#endif } -#if QK_K == 256 -void dequantize_row_q3_K(const block_q3_K * restrict x, float * restrict y, int k) { - assert(k % QK_K == 0); - const int nb = k / QK_K; +void ggml_vec_dot_q4_1_q8_1(const int n, float * restrict s, const void * restrict vx, const void * restrict vy) { + const int qk = QK8_1; + const int nb = n / qk; - const uint32_t kmask1 = 0x03030303; - const uint32_t kmask2 = 0x0f0f0f0f; + assert(n % qk == 0); - uint32_t aux[4]; - const int8_t * scales = (const int8_t*)aux; + const block_q4_1 * restrict x = vx; + const block_q8_1 * restrict y = vy; - for (int i = 0; i < nb; i++) { + // TODO: add WASM SIMD +#if defined(__ARM_NEON) + float32x4_t sumv0 = vdupq_n_f32(0.0f); + float32x4_t sumv1 = vdupq_n_f32(0.0f); - const float d_all = ggml_fp16_to_fp32(x[i].d); + float summs = 0; - const uint8_t * restrict q = x[i].qs; - const uint8_t * restrict hm = x[i].hmask; - uint8_t m = 1; + assert(nb % 2 == 0); // TODO: handle odd nb - memcpy(aux, x[i].scales, 12); - uint32_t tmp = aux[2]; - aux[2] = ((aux[0] >> 4) & kmask2) | (((tmp >> 4) & kmask1) << 4); - aux[3] = ((aux[1] >> 4) & kmask2) | (((tmp >> 6) & kmask1) << 4); - aux[0] = (aux[0] & kmask2) | (((tmp >> 0) & kmask1) << 4); - aux[1] = (aux[1] & kmask2) | (((tmp >> 2) & kmask1) << 4); + for (int i = 0; i < nb; i += 2) { + const block_q4_1 * restrict x0 = &x[i + 0]; + const block_q4_1 * restrict x1 = &x[i + 1]; + const block_q8_1 * restrict y0 = &y[i + 0]; + const block_q8_1 * restrict y1 = &y[i + 1]; - int is = 0; - float dl; - for (int n = 0; n < QK_K; n += 128) { - int shift = 0; - for (int j = 0; j < 4; ++j) { + summs += ggml_fp16_to_fp32(x0->m) * y0->s + ggml_fp16_to_fp32(x1->m) * y1->s; - dl = d_all * (scales[is++] - 32); - for (int l = 0; l < 16; ++l) { - *y++ = dl * ((int8_t)((q[l+ 0] >> shift) & 3) - ((hm[l+ 0] & m) ? 0 : 4)); - } + const uint8x16_t m4b = vdupq_n_u8(0x0F); - dl = d_all * (scales[is++] - 32); - for (int l = 0; l < 16; ++l) { - *y++ = dl * ((int8_t)((q[l+16] >> shift) & 3) - ((hm[l+16] & m) ? 0 : 4)); - } + const uint8x16_t v0_0 = vld1q_u8(x0->qs); + const uint8x16_t v0_1 = vld1q_u8(x1->qs); - shift += 2; - m <<= 1; - } - q += 32; - } + // 4-bit -> 8-bit + const int8x16_t v0_0l = vreinterpretq_s8_u8(vandq_u8 (v0_0, m4b)); + const int8x16_t v0_0h = vreinterpretq_s8_u8(vshrq_n_u8(v0_0, 4)); + const int8x16_t v0_1l = vreinterpretq_s8_u8(vandq_u8 (v0_1, m4b)); + const int8x16_t v0_1h = vreinterpretq_s8_u8(vshrq_n_u8(v0_1, 4)); + // load y + const int8x16_t v1_0l = vld1q_s8(y0->qs); + const int8x16_t v1_0h = vld1q_s8(y0->qs + 16); + const int8x16_t v1_1l = vld1q_s8(y1->qs); + const int8x16_t v1_1h = vld1q_s8(y1->qs + 16); + +#if defined(__ARM_FEATURE_DOTPROD) + // dot product into int32x4_t + const int32x4_t p_0 = vdotq_s32(vdotq_s32(vdupq_n_s32(0), v0_0l, v1_0l), v0_0h, v1_0h); + const int32x4_t p_1 = vdotq_s32(vdotq_s32(vdupq_n_s32(0), v0_1l, v1_1l), v0_1h, v1_1h); + + sumv0 = vmlaq_n_f32(sumv0, vcvtq_f32_s32(p_0), ggml_fp16_to_fp32(x0->d)*y0->d); + sumv1 = vmlaq_n_f32(sumv1, vcvtq_f32_s32(p_1), ggml_fp16_to_fp32(x1->d)*y1->d); +#else + const int16x8_t pl0l = vmull_s8(vget_low_s8 (v0_0l), vget_low_s8 (v1_0l)); + const int16x8_t pl0h = vmull_s8(vget_high_s8(v0_0l), vget_high_s8(v1_0l)); + const int16x8_t ph0l = vmull_s8(vget_low_s8 (v0_0h), vget_low_s8 (v1_0h)); + const int16x8_t ph0h = vmull_s8(vget_high_s8(v0_0h), vget_high_s8(v1_0h)); + + const int16x8_t pl1l = vmull_s8(vget_low_s8 (v0_1l), vget_low_s8 (v1_1l)); + const int16x8_t pl1h = vmull_s8(vget_high_s8(v0_1l), vget_high_s8(v1_1l)); + const int16x8_t ph1l = vmull_s8(vget_low_s8 (v0_1h), vget_low_s8 (v1_1h)); + const int16x8_t ph1h = vmull_s8(vget_high_s8(v0_1h), vget_high_s8(v1_1h)); + + const int32x4_t pl0 = vaddq_s32(vpaddlq_s16(pl0l), vpaddlq_s16(pl0h)); + const int32x4_t ph0 = vaddq_s32(vpaddlq_s16(ph0l), vpaddlq_s16(ph0h)); + const int32x4_t pl1 = vaddq_s32(vpaddlq_s16(pl1l), vpaddlq_s16(pl1h)); + const int32x4_t ph1 = vaddq_s32(vpaddlq_s16(ph1l), vpaddlq_s16(ph1h)); + + sumv0 = vmlaq_n_f32(sumv0, vcvtq_f32_s32(vaddq_s32(pl0, ph0)), ggml_fp16_to_fp32(x0->d)*y0->d); + sumv1 = vmlaq_n_f32(sumv1, vcvtq_f32_s32(vaddq_s32(pl1, ph1)), ggml_fp16_to_fp32(x1->d)*y1->d); +#endif } -} + + *s = vaddvq_f32(sumv0) + vaddvq_f32(sumv1) + summs; +#elif defined(__AVX2__) || defined(__AVX__) + // Initialize accumulator with zeros + __m256 acc = _mm256_setzero_ps(); + + float summs = 0; + + // Main loop + for (int i = 0; i < nb; ++i) { + const float d0 = ggml_fp16_to_fp32(x[i].d); + const float d1 = y[i].d; + + summs += ggml_fp16_to_fp32(x[i].m) * y[i].s; + + const __m256 d0v = _mm256_set1_ps( d0 ); + const __m256 d1v = _mm256_set1_ps( d1 ); + + // Compute combined scales + const __m256 d0d1 = _mm256_mul_ps( d0v, d1v ); + + // Load 16 bytes, and unpack 4 bit fields into bytes, making 32 bytes + const __m256i bx = bytes_from_nibbles_32(x[i].qs); + const __m256i by = _mm256_loadu_si256( (const __m256i *)y[i].qs ); + + const __m256 xy = mul_sum_us8_pairs_float(bx, by); + + // Accumulate d0*d1*x*y +#if defined(__AVX2__) + acc = _mm256_fmadd_ps( d0d1, xy, acc ); #else -void dequantize_row_q3_K(const block_q3_K * restrict x, float * restrict y, int k) { - assert(k % QK_K == 0); - assert(QK_K == 64); - const int nb = k / QK_K; + acc = _mm256_add_ps( _mm256_mul_ps( d0d1, xy ), acc ); +#endif + } + + *s = hsum_float_8(acc) + summs; +#elif defined(__riscv_v_intrinsic) + float sumf = 0.0; + + size_t vl = __riscv_vsetvl_e8m1(qk/2); for (int i = 0; i < nb; i++) { + // load elements + vuint8mf2_t tx = __riscv_vle8_v_u8mf2(x[i].qs, vl); - const float d_all = ggml_fp16_to_fp32(x[i].d); + vint8mf2_t y0 = __riscv_vle8_v_i8mf2(y[i].qs, vl); + vint8mf2_t y1 = __riscv_vle8_v_i8mf2(y[i].qs+16, vl); - const uint8_t * restrict q = x[i].qs; - const uint8_t * restrict hm = x[i].hmask; + // mask and store lower part of x, and then upper part + vuint8mf2_t x_a = __riscv_vand_vx_u8mf2(tx, 0x0F, vl); + vuint8mf2_t x_l = __riscv_vsrl_vx_u8mf2(tx, 0x04, vl); - const float d1 = d_all * ((x[i].scales[0] & 0xF) - 8); - const float d2 = d_all * ((x[i].scales[0] >> 4) - 8); - const float d3 = d_all * ((x[i].scales[1] & 0xF) - 8); - const float d4 = d_all * ((x[i].scales[1] >> 4) - 8); + vint8mf2_t v0 = __riscv_vreinterpret_v_u8mf2_i8mf2(x_a); + vint8mf2_t v1 = __riscv_vreinterpret_v_u8mf2_i8mf2(x_l); - for (int l=0; l<8; ++l) { - uint8_t h = hm[l]; - y[l+ 0] = d1 * ((int8_t)((q[l+0] >> 0) & 3) - ((h & 0x01) ? 0 : 4)); - y[l+ 8] = d1 * ((int8_t)((q[l+8] >> 0) & 3) - ((h & 0x02) ? 0 : 4)); - y[l+16] = d2 * ((int8_t)((q[l+0] >> 2) & 3) - ((h & 0x04) ? 0 : 4)); - y[l+24] = d2 * ((int8_t)((q[l+8] >> 2) & 3) - ((h & 0x08) ? 0 : 4)); - y[l+32] = d3 * ((int8_t)((q[l+0] >> 4) & 3) - ((h & 0x10) ? 0 : 4)); - y[l+40] = d3 * ((int8_t)((q[l+8] >> 4) & 3) - ((h & 0x20) ? 0 : 4)); - y[l+48] = d4 * ((int8_t)((q[l+0] >> 6) & 3) - ((h & 0x40) ? 0 : 4)); - y[l+56] = d4 * ((int8_t)((q[l+8] >> 6) & 3) - ((h & 0x80) ? 0 : 4)); + vint16m1_t vec_mul1 = __riscv_vwmul_vv_i16m1(v0, y0, vl); + vint16m1_t vec_mul2 = __riscv_vwmul_vv_i16m1(v1, y1, vl); + + vint32m1_t vec_zero = __riscv_vmv_v_x_i32m1(0, vl); + + vint32m1_t vs1 = __riscv_vwredsum_vs_i16m1_i32m1(vec_mul1, vec_zero, vl); + vint32m1_t vs2 = __riscv_vwredsum_vs_i16m1_i32m1(vec_mul2, vs1, vl); + + int sumi = __riscv_vmv_x_s_i32m1_i32(vs2); + + sumf += (ggml_fp16_to_fp32(x[i].d)*y[i].d)*sumi + ggml_fp16_to_fp32(x[i].m)*y[i].s; + } + + *s = sumf; +#else + // scalar + float sumf = 0.0; + + for (int i = 0; i < nb; i++) { + int sumi = 0; + + for (int j = 0; j < qk/2; ++j) { + const int v0 = (x[i].qs[j] & 0x0F); + const int v1 = (x[i].qs[j] >> 4); + + sumi += (v0 * y[i].qs[j]) + (v1 * y[i].qs[j + qk/2]); } - y += QK_K; + + sumf += (ggml_fp16_to_fp32(x[i].d)*y[i].d)*sumi + ggml_fp16_to_fp32(x[i].m)*y[i].s; } + + *s = sumf; +#endif } + +void ggml_vec_dot_q5_0_q8_0(const int n, float * restrict s, const void * restrict vx, const void * restrict vy) { + const int qk = QK8_0; + const int nb = n / qk; + + assert(n % qk == 0); + assert(qk == QK5_0); + + const block_q5_0 * restrict x = vx; + const block_q8_0 * restrict y = vy; + +#if defined(__ARM_NEON) + float32x4_t sumv0 = vdupq_n_f32(0.0f); + float32x4_t sumv1 = vdupq_n_f32(0.0f); + + uint32_t qh0; + uint32_t qh1; + + uint64_t tmp0[4]; + uint64_t tmp1[4]; + + assert(nb % 2 == 0); // TODO: handle odd nb + + for (int i = 0; i < nb; i += 2) { + const block_q5_0 * restrict x0 = &x[i]; + const block_q5_0 * restrict x1 = &x[i + 1]; + const block_q8_0 * restrict y0 = &y[i]; + const block_q8_0 * restrict y1 = &y[i + 1]; + + const uint8x16_t m4b = vdupq_n_u8(0x0F); + + // extract the 5th bit via lookup table ((!b) << 4) + memcpy(&qh0, x0->qh, sizeof(qh0)); + memcpy(&qh1, x1->qh, sizeof(qh1)); + + tmp0[0] = table_b2b_1[(qh0 >> 0) & 0xFF]; + tmp0[1] = table_b2b_1[(qh0 >> 8) & 0xFF]; + tmp0[2] = table_b2b_1[(qh0 >> 16) & 0xFF]; + tmp0[3] = table_b2b_1[(qh0 >> 24) ]; + + tmp1[0] = table_b2b_1[(qh1 >> 0) & 0xFF]; + tmp1[1] = table_b2b_1[(qh1 >> 8) & 0xFF]; + tmp1[2] = table_b2b_1[(qh1 >> 16) & 0xFF]; + tmp1[3] = table_b2b_1[(qh1 >> 24) ]; + + const int8x16_t qhl0 = vld1q_s8((const int8_t *)(tmp0 + 0)); + const int8x16_t qhh0 = vld1q_s8((const int8_t *)(tmp0 + 2)); + const int8x16_t qhl1 = vld1q_s8((const int8_t *)(tmp1 + 0)); + const int8x16_t qhh1 = vld1q_s8((const int8_t *)(tmp1 + 2)); + + const uint8x16_t v0_0 = vld1q_u8(x0->qs); + const uint8x16_t v0_1 = vld1q_u8(x1->qs); + + // 4-bit -> 8-bit + int8x16_t v0_0l = vreinterpretq_s8_u8(vandq_u8 (v0_0, m4b)); + int8x16_t v0_0h = vreinterpretq_s8_u8(vshrq_n_u8(v0_0, 4)); + int8x16_t v0_1l = vreinterpretq_s8_u8(vandq_u8 (v0_1, m4b)); + int8x16_t v0_1h = vreinterpretq_s8_u8(vshrq_n_u8(v0_1, 4)); + + // add high bit and sub 16 (equivalent to sub 0x10 when bit is zero) + const int8x16_t v0_0lf = vsubq_s8(v0_0l, qhl0); + const int8x16_t v0_0hf = vsubq_s8(v0_0h, qhh0); + const int8x16_t v0_1lf = vsubq_s8(v0_1l, qhl1); + const int8x16_t v0_1hf = vsubq_s8(v0_1h, qhh1); + + // load y + const int8x16_t v1_0l = vld1q_s8(y0->qs); + const int8x16_t v1_0h = vld1q_s8(y0->qs + 16); + const int8x16_t v1_1l = vld1q_s8(y1->qs); + const int8x16_t v1_1h = vld1q_s8(y1->qs + 16); + +#if defined(__ARM_FEATURE_DOTPROD) + sumv0 = vmlaq_n_f32(sumv0, vcvtq_f32_s32(vaddq_s32( + vdotq_s32(vdupq_n_s32(0), v0_0lf, v1_0l), + vdotq_s32(vdupq_n_s32(0), v0_0hf, v1_0h))), ggml_fp16_to_fp32(x0->d)*ggml_fp16_to_fp32(y0->d)); + sumv1 = vmlaq_n_f32(sumv1, vcvtq_f32_s32(vaddq_s32( + vdotq_s32(vdupq_n_s32(0), v0_1lf, v1_1l), + vdotq_s32(vdupq_n_s32(0), v0_1hf, v1_1h))), ggml_fp16_to_fp32(x1->d)*ggml_fp16_to_fp32(y1->d)); +#else + const int16x8_t pl0l = vmull_s8(vget_low_s8 (v0_0lf), vget_low_s8 (v1_0l)); + const int16x8_t pl0h = vmull_s8(vget_high_s8(v0_0lf), vget_high_s8(v1_0l)); + const int16x8_t ph0l = vmull_s8(vget_low_s8 (v0_0hf), vget_low_s8 (v1_0h)); + const int16x8_t ph0h = vmull_s8(vget_high_s8(v0_0hf), vget_high_s8(v1_0h)); + + const int16x8_t pl1l = vmull_s8(vget_low_s8 (v0_1lf), vget_low_s8 (v1_1l)); + const int16x8_t pl1h = vmull_s8(vget_high_s8(v0_1lf), vget_high_s8(v1_1l)); + const int16x8_t ph1l = vmull_s8(vget_low_s8 (v0_1hf), vget_low_s8 (v1_1h)); + const int16x8_t ph1h = vmull_s8(vget_high_s8(v0_1hf), vget_high_s8(v1_1h)); + + const int32x4_t pl0 = vaddq_s32(vpaddlq_s16(pl0l), vpaddlq_s16(pl0h)); + const int32x4_t ph0 = vaddq_s32(vpaddlq_s16(ph0l), vpaddlq_s16(ph0h)); + const int32x4_t pl1 = vaddq_s32(vpaddlq_s16(pl1l), vpaddlq_s16(pl1h)); + const int32x4_t ph1 = vaddq_s32(vpaddlq_s16(ph1l), vpaddlq_s16(ph1h)); + + sumv0 = vmlaq_n_f32(sumv0, vcvtq_f32_s32(vaddq_s32(pl0, ph0)), ggml_fp16_to_fp32(x0->d)*ggml_fp16_to_fp32(y0->d)); + sumv1 = vmlaq_n_f32(sumv1, vcvtq_f32_s32(vaddq_s32(pl1, ph1)), ggml_fp16_to_fp32(x1->d)*ggml_fp16_to_fp32(y1->d)); #endif + } -void quantize_row_q3_K(const float * restrict x, void * restrict vy, int k) { - quantize_row_q3_K_reference(x, vy, k); -} + *s = vaddvq_f32(sumv0) + vaddvq_f32(sumv1); +#elif defined(__wasm_simd128__) + v128_t sumv = wasm_f32x4_splat(0.0f); -size_t ggml_quantize_q3_K(const float * restrict src, void * restrict dst, int n, int k, int64_t * restrict hist) { - (void)hist; // TODO: collect histograms + uint32_t qh; + uint64_t tmp[4]; - for (int j = 0; j < n; j += k) { - block_q3_K * restrict y = (block_q3_K *)dst + j/QK_K; - quantize_row_q3_K_reference(src + j, y, k); + // TODO: check if unrolling this is better + for (int i = 0; i < nb; ++i) { + const block_q5_0 * restrict x0 = &x[i]; + const block_q8_0 * restrict y0 = &y[i]; + + const v128_t m4b = wasm_i8x16_splat(0x0F); + + // extract the 5th bit + memcpy(&qh, x0->qh, sizeof(qh)); + + tmp[0] = table_b2b_1[(qh >> 0) & 0xFF]; + tmp[1] = table_b2b_1[(qh >> 8) & 0xFF]; + tmp[2] = table_b2b_1[(qh >> 16) & 0xFF]; + tmp[3] = table_b2b_1[(qh >> 24) ]; + + const v128_t qhl = wasm_v128_load(tmp + 0); + const v128_t qhh = wasm_v128_load(tmp + 2); + + const v128_t v0 = wasm_v128_load(x0->qs); + + // 4-bit -> 8-bit + const v128_t v0l = wasm_v128_and (v0, m4b); + const v128_t v0h = wasm_u8x16_shr(v0, 4); + + // add high bit and sub 16 (equivalent to sub 0x10 when bit is zero) + const v128_t v0lf = wasm_i8x16_sub(v0l, qhl); + const v128_t v0hf = wasm_i8x16_sub(v0h, qhh); + + // load y + const v128_t v1l = wasm_v128_load(y0->qs); + const v128_t v1h = wasm_v128_load(y0->qs + 16); + + // int8x16 -> int16x8 + const v128_t v0lfl = wasm_i16x8_extend_low_i8x16 (v0lf); + const v128_t v0lfh = wasm_i16x8_extend_high_i8x16(v0lf); + const v128_t v0hfl = wasm_i16x8_extend_low_i8x16 (v0hf); + const v128_t v0hfh = wasm_i16x8_extend_high_i8x16(v0hf); + + const v128_t v1ll = wasm_i16x8_extend_low_i8x16 (v1l); + const v128_t v1lh = wasm_i16x8_extend_high_i8x16(v1l); + const v128_t v1hl = wasm_i16x8_extend_low_i8x16 (v1h); + const v128_t v1hh = wasm_i16x8_extend_high_i8x16(v1h); + + // dot product + sumv = wasm_f32x4_add(sumv, wasm_f32x4_mul(wasm_f32x4_convert_i32x4( + wasm_i32x4_add( + wasm_i32x4_add(wasm_i32x4_dot_i16x8(v0lfl, v1ll), + wasm_i32x4_dot_i16x8(v0lfh, v1lh)), + wasm_i32x4_add(wasm_i32x4_dot_i16x8(v0hfl, v1hl), + wasm_i32x4_dot_i16x8(v0hfh, v1hh)))), + wasm_f32x4_splat(ggml_fp16_to_fp32(x0->d) * ggml_fp16_to_fp32(y0->d)))); } - return (n/QK_K*sizeof(block_q3_K)); -} -// ====================== 4-bit (de)-quantization + *s = wasm_f32x4_extract_lane(sumv, 0) + wasm_f32x4_extract_lane(sumv, 1) + + wasm_f32x4_extract_lane(sumv, 2) + wasm_f32x4_extract_lane(sumv, 3); +#elif defined(__AVX2__) + // Initialize accumulator with zeros + __m256 acc = _mm256_setzero_ps(); -void quantize_row_q4_K_reference(const float * restrict x, block_q4_K * restrict y, int k) { - assert(k % QK_K == 0); - const int nb = k / QK_K; + // Main loop + for (int i = 0; i < nb; i++) { + /* Compute combined scale for the block */ + const __m256 d = _mm256_set1_ps(ggml_fp16_to_fp32(x[i].d) * ggml_fp16_to_fp32(y[i].d)); - uint8_t L[QK_K]; - uint8_t Laux[32]; - float weights[32]; - float mins[QK_K/32]; - float scales[QK_K/32]; + __m256i bx = bytes_from_nibbles_32(x[i].qs); + __m256i bxhi = bytes_from_bits_32(x[i].qh); + bxhi = _mm256_andnot_si256(bxhi, _mm256_set1_epi8((char)0xF0)); + bx = _mm256_or_si256(bx, bxhi); + + __m256i by = _mm256_loadu_si256((const __m256i *)y[i].qs); + + const __m256 q = mul_sum_i8_pairs_float(bx, by); + + /* Multiply q with scale and accumulate */ + acc = _mm256_fmadd_ps(d, q, acc); + } + + *s = hsum_float_8(acc); +#elif defined(__AVX__) + // Initialize accumulator with zeros + __m256 acc = _mm256_setzero_ps(); + __m128i mask = _mm_set1_epi8((char)0xF0); + // Main loop for (int i = 0; i < nb; i++) { + /* Compute combined scale for the block */ + const __m256 d = _mm256_set1_ps(ggml_fp16_to_fp32(x[i].d) * ggml_fp16_to_fp32(y[i].d)); + + __m256i bx = bytes_from_nibbles_32(x[i].qs); + const __m256i bxhi = bytes_from_bits_32(x[i].qh); + __m128i bxhil = _mm256_castsi256_si128(bxhi); + __m128i bxhih = _mm256_extractf128_si256(bxhi, 1); + bxhil = _mm_andnot_si128(bxhil, mask); + bxhih = _mm_andnot_si128(bxhih, mask); + __m128i bxl = _mm256_castsi256_si128(bx); + __m128i bxh = _mm256_extractf128_si256(bx, 1); + bxl = _mm_or_si128(bxl, bxhil); + bxh = _mm_or_si128(bxh, bxhih); + bx = MM256_SET_M128I(bxh, bxl); + + const __m256i by = _mm256_loadu_si256((const __m256i *)y[i].qs); + + const __m256 q = mul_sum_i8_pairs_float(bx, by); + + /* Multiply q with scale and accumulate */ + acc = _mm256_add_ps(_mm256_mul_ps(d, q), acc); + } - float max_scale = 0; // as we are deducting the min, scales are always positive - float max_min = 0; - for (int j = 0; j < QK_K/32; ++j) { - //scales[j] = make_qkx1_quants(32, 15, x + 32*j, L + 32*j, &mins[j], 9, 0.5f); - float sum_x2 = 0; - for (int l = 0; l < 32; ++l) sum_x2 += x[32*j + l] * x[32*j + l]; - float av_x = sqrtf(sum_x2/32); - for (int l = 0; l < 32; ++l) weights[l] = av_x + fabsf(x[32*j + l]); - scales[j] = make_qkx2_quants(32, 15, x + 32*j, weights, L + 32*j, &mins[j], Laux, -1.f, 0.1f, 20, false); - float scale = scales[j]; - if (scale > max_scale) { - max_scale = scale; - } - float min = mins[j]; - if (min > max_min) { - max_min = min; - } - } + *s = hsum_float_8(acc); +#elif defined(__riscv_v_intrinsic) + float sumf = 0.0; -#if QK_K == 256 - float inv_scale = max_scale > 0 ? 63.f/max_scale : 0.f; - float inv_min = max_min > 0 ? 63.f/max_min : 0.f; - for (int j = 0; j < QK_K/32; ++j) { - uint8_t ls = nearest_int(inv_scale*scales[j]); - uint8_t lm = nearest_int(inv_min*mins[j]); - ls = MIN(63, ls); - lm = MIN(63, lm); - if (j < 4) { - y[i].scales[j] = ls; - y[i].scales[j+4] = lm; - } else { - y[i].scales[j+4] = (ls & 0xF) | ((lm & 0xF) << 4); - y[i].scales[j-4] |= ((ls >> 4) << 6); - y[i].scales[j-0] |= ((lm >> 4) << 6); - } - } - y[i].d = ggml_fp32_to_fp16(max_scale/63.f); - y[i].dmin = ggml_fp32_to_fp16(max_min/63.f); + uint32_t qh; - uint8_t sc, m; - for (int j = 0; j < QK_K/32; ++j) { - get_scale_min_k4(j, y[i].scales, &sc, &m); - const float d = ggml_fp16_to_fp32(y[i].d) * sc; - if (!d) continue; - const float dm = ggml_fp16_to_fp32(y[i].dmin) * m; - for (int ii = 0; ii < 32; ++ii) { - int l = nearest_int((x[32*j + ii] + dm)/d); - l = MAX(0, MIN(15, l)); - L[32*j + ii] = l; - } - } -#else - const float s_factor = 15.f; - float inv_scale = max_scale > 0 ? s_factor/max_scale : 0.f; - float inv_min = max_min > 0 ? s_factor/max_min : 0.f; - int d1 = nearest_int(inv_scale*scales[0]); - int m1 = nearest_int(inv_min*mins[0]); - int d2 = nearest_int(inv_scale*scales[1]); - int m2 = nearest_int(inv_min*mins[1]); - y[i].scales[0] = d1 | (m1 << 4); - y[i].scales[1] = d2 | (m2 << 4); - y[i].d[0] = ggml_fp32_to_fp16(max_scale/s_factor); - y[i].d[1] = ggml_fp32_to_fp16(max_min/s_factor); + size_t vl = __riscv_vsetvl_e8m1(qk/2); - float sumlx = 0; - int suml2 = 0; - for (int j = 0; j < QK_K/32; ++j) { - const uint8_t sd = y[i].scales[j] & 0xF; - const uint8_t sm = y[i].scales[j] >> 4; - const float d = ggml_fp16_to_fp32(y[i].d[0]) * sd; - if (!d) continue; - const float m = ggml_fp16_to_fp32(y[i].d[1]) * sm; - for (int ii = 0; ii < 32; ++ii) { - int l = nearest_int((x[32*j + ii] + m)/d); - l = MAX(0, MIN(15, l)); - L[32*j + ii] = l; - sumlx += (x[32*j + ii] + m)*l*sd; - suml2 += l*l*sd*sd; - } - } - if (suml2) { - y[i].d[0] = ggml_fp32_to_fp16(sumlx/suml2); - } -#endif - uint8_t * q = y[i].qs; - for (int j = 0; j < QK_K; j += 64) { - for (int l = 0; l < 32; ++l) q[l] = L[j + l] | (L[j + l + 32] << 4); - q += 32; - } + // These tempory registers are for masking and shift operations + vuint32m2_t vt_1 = __riscv_vid_v_u32m2(vl); + vuint32m2_t vt_2 = __riscv_vsll_vv_u32m2(__riscv_vmv_v_x_u32m2(1, vl), vt_1, vl); + + vuint32m2_t vt_3 = __riscv_vsll_vx_u32m2(vt_2, 16, vl); + vuint32m2_t vt_4 = __riscv_vadd_vx_u32m2(vt_1, 12, vl); + + for (int i = 0; i < nb; i++) { + memcpy(&qh, x[i].qh, sizeof(uint32_t)); + + // ((qh & (1u << (j + 0 ))) >> (j + 0 )) << 4; + vuint32m2_t xha_0 = __riscv_vand_vx_u32m2(vt_2, qh, vl); + vuint32m2_t xhr_0 = __riscv_vsrl_vv_u32m2(xha_0, vt_1, vl); + vuint32m2_t xhl_0 = __riscv_vsll_vx_u32m2(xhr_0, 4, vl); + + // ((qh & (1u << (j + 16))) >> (j + 12)); + vuint32m2_t xha_1 = __riscv_vand_vx_u32m2(vt_3, qh, vl); + vuint32m2_t xhl_1 = __riscv_vsrl_vv_u32m2(xha_1, vt_4, vl); + + // narrowing + vuint16m1_t xhc_0 = __riscv_vncvt_x_x_w_u16m1(xhl_0, vl); + vuint8mf2_t xh_0 = __riscv_vncvt_x_x_w_u8mf2(xhc_0, vl); + + vuint16m1_t xhc_1 = __riscv_vncvt_x_x_w_u16m1(xhl_1, vl); + vuint8mf2_t xh_1 = __riscv_vncvt_x_x_w_u8mf2(xhc_1, vl); + + // load + vuint8mf2_t tx = __riscv_vle8_v_u8mf2(x[i].qs, vl); + + vint8mf2_t y0 = __riscv_vle8_v_i8mf2(y[i].qs, vl); + vint8mf2_t y1 = __riscv_vle8_v_i8mf2(y[i].qs+16, vl); + + vuint8mf2_t x_at = __riscv_vand_vx_u8mf2(tx, 0x0F, vl); + vuint8mf2_t x_lt = __riscv_vsrl_vx_u8mf2(tx, 0x04, vl); + + vuint8mf2_t x_a = __riscv_vor_vv_u8mf2(x_at, xh_0, vl); + vuint8mf2_t x_l = __riscv_vor_vv_u8mf2(x_lt, xh_1, vl); + + vint8mf2_t x_ai = __riscv_vreinterpret_v_u8mf2_i8mf2(x_a); + vint8mf2_t x_li = __riscv_vreinterpret_v_u8mf2_i8mf2(x_l); - x += QK_K; + vint8mf2_t v0 = __riscv_vsub_vx_i8mf2(x_ai, 16, vl); + vint8mf2_t v1 = __riscv_vsub_vx_i8mf2(x_li, 16, vl); + + vint16m1_t vec_mul1 = __riscv_vwmul_vv_i16m1(v0, y0, vl); + vint16m1_t vec_mul2 = __riscv_vwmul_vv_i16m1(v1, y1, vl); + + vint32m1_t vec_zero = __riscv_vmv_v_x_i32m1(0, vl); + + vint32m1_t vs1 = __riscv_vwredsum_vs_i16m1_i32m1(vec_mul1, vec_zero, vl); + vint32m1_t vs2 = __riscv_vwredsum_vs_i16m1_i32m1(vec_mul2, vs1, vl); + int sumi = __riscv_vmv_x_s_i32m1_i32(vs2); + + sumf += (ggml_fp16_to_fp32(x[i].d)*ggml_fp16_to_fp32(y[i].d)) * sumi; } -} -void dequantize_row_q4_K(const block_q4_K * restrict x, float * restrict y, int k) { - assert(k % QK_K == 0); - const int nb = k / QK_K; + *s = sumf; +#else + // scalar + float sumf = 0.0; for (int i = 0; i < nb; i++) { + uint32_t qh; + memcpy(&qh, x[i].qh, sizeof(qh)); - const uint8_t * q = x[i].qs; + int sumi = 0; -#if QK_K == 256 + for (int j = 0; j < qk/2; ++j) { + const uint8_t xh_0 = ((qh & (1u << (j + 0 ))) >> (j + 0 )) << 4; + const uint8_t xh_1 = ((qh & (1u << (j + 16))) >> (j + 12)); - const float d = ggml_fp16_to_fp32(x[i].d); - const float min = ggml_fp16_to_fp32(x[i].dmin); + const int32_t x0 = ((x[i].qs[j] & 0x0F) | xh_0) - 16; + const int32_t x1 = ((x[i].qs[j] >> 4) | xh_1) - 16; - int is = 0; - uint8_t sc, m; - for (int j = 0; j < QK_K; j += 64) { - get_scale_min_k4(is + 0, x[i].scales, &sc, &m); - const float d1 = d * sc; const float m1 = min * m; - get_scale_min_k4(is + 1, x[i].scales, &sc, &m); - const float d2 = d * sc; const float m2 = min * m; - for (int l = 0; l < 32; ++l) *y++ = d1 * (q[l] & 0xF) - m1; - for (int l = 0; l < 32; ++l) *y++ = d2 * (q[l] >> 4) - m2; - q += 32; is += 2; - } -#else - const float dall = ggml_fp16_to_fp32(x[i].d[0]); - const float mall = ggml_fp16_to_fp32(x[i].d[1]); - const float d1 = dall * (x[i].scales[0] & 0xF), m1 = mall * (x[i].scales[0] >> 4); - const float d2 = dall * (x[i].scales[1] & 0xF), m2 = mall * (x[i].scales[1] >> 4); - for (int l = 0; l < 32; ++l) { - y[l+ 0] = d1 * (q[l] & 0xF) - m1; - y[l+32] = d2 * (q[l] >> 4) - m2; + sumi += (x0 * y[i].qs[j]) + (x1 * y[i].qs[j + qk/2]); } - y += QK_K; -#endif + sumf += (ggml_fp16_to_fp32(x[i].d)*ggml_fp16_to_fp32(y[i].d)) * sumi; } -} -void quantize_row_q4_K(const float * restrict x, void * restrict vy, int k) { - assert(k % QK_K == 0); - block_q4_K * restrict y = vy; - quantize_row_q4_K_reference(x, y, k); + *s = sumf; +#endif } -size_t ggml_quantize_q4_K(const float * restrict src, void * restrict dst, int n, int k, int64_t * restrict hist) { - assert(k % QK_K == 0); - (void)hist; // TODO: collect histograms +void ggml_vec_dot_q5_1_q8_1(const int n, float * restrict s, const void * restrict vx, const void * restrict vy) { + const int qk = QK8_1; + const int nb = n / qk; - for (int j = 0; j < n; j += k) { - block_q4_K * restrict y = (block_q4_K *)dst + j/QK_K; - quantize_row_q4_K_reference(src + j, y, k); - } - return (n/QK_K*sizeof(block_q4_K)); -} + assert(n % qk == 0); + assert(qk == QK5_1); -// ====================== 5-bit (de)-quantization + const block_q5_1 * restrict x = vx; + const block_q8_1 * restrict y = vy; -void quantize_row_q5_K_reference(const float * restrict x, block_q5_K * restrict y, int k) { - assert(k % QK_K == 0); - const int nb = k / QK_K; +#if defined(__ARM_NEON) + float32x4_t sumv0 = vdupq_n_f32(0.0f); + float32x4_t sumv1 = vdupq_n_f32(0.0f); -#if QK_K == 256 - uint8_t L[QK_K]; - float mins[QK_K/32]; - float scales[QK_K/32]; - float weights[32]; - uint8_t Laux[32]; + float summs0 = 0.0f; + float summs1 = 0.0f; + + uint32_t qh0; + uint32_t qh1; + + uint64_t tmp0[4]; + uint64_t tmp1[4]; + + assert(nb % 2 == 0); // TODO: handle odd nb + + for (int i = 0; i < nb; i += 2) { + const block_q5_1 * restrict x0 = &x[i]; + const block_q5_1 * restrict x1 = &x[i + 1]; + const block_q8_1 * restrict y0 = &y[i]; + const block_q8_1 * restrict y1 = &y[i + 1]; + + const uint8x16_t m4b = vdupq_n_u8(0x0F); + + summs0 += ggml_fp16_to_fp32(x0->m) * y0->s; + summs1 += ggml_fp16_to_fp32(x1->m) * y1->s; + + // extract the 5th bit via lookup table ((b) << 4) + memcpy(&qh0, x0->qh, sizeof(qh0)); + memcpy(&qh1, x1->qh, sizeof(qh1)); + + tmp0[0] = table_b2b_0[(qh0 >> 0) & 0xFF]; + tmp0[1] = table_b2b_0[(qh0 >> 8) & 0xFF]; + tmp0[2] = table_b2b_0[(qh0 >> 16) & 0xFF]; + tmp0[3] = table_b2b_0[(qh0 >> 24) ]; + + tmp1[0] = table_b2b_0[(qh1 >> 0) & 0xFF]; + tmp1[1] = table_b2b_0[(qh1 >> 8) & 0xFF]; + tmp1[2] = table_b2b_0[(qh1 >> 16) & 0xFF]; + tmp1[3] = table_b2b_0[(qh1 >> 24) ]; + + const int8x16_t qhl0 = vld1q_s8((const int8_t *)(tmp0 + 0)); + const int8x16_t qhh0 = vld1q_s8((const int8_t *)(tmp0 + 2)); + const int8x16_t qhl1 = vld1q_s8((const int8_t *)(tmp1 + 0)); + const int8x16_t qhh1 = vld1q_s8((const int8_t *)(tmp1 + 2)); + + const uint8x16_t v0_0 = vld1q_u8(x0->qs); + const uint8x16_t v0_1 = vld1q_u8(x1->qs); + + // 4-bit -> 8-bit + const int8x16_t v0_0l = vreinterpretq_s8_u8(vandq_u8 (v0_0, m4b)); + const int8x16_t v0_0h = vreinterpretq_s8_u8(vshrq_n_u8(v0_0, 4)); + const int8x16_t v0_1l = vreinterpretq_s8_u8(vandq_u8 (v0_1, m4b)); + const int8x16_t v0_1h = vreinterpretq_s8_u8(vshrq_n_u8(v0_1, 4)); + + // add high bit + const int8x16_t v0_0lf = vorrq_s8(v0_0l, qhl0); + const int8x16_t v0_0hf = vorrq_s8(v0_0h, qhh0); + const int8x16_t v0_1lf = vorrq_s8(v0_1l, qhl1); + const int8x16_t v0_1hf = vorrq_s8(v0_1h, qhh1); + + // load y + const int8x16_t v1_0l = vld1q_s8(y0->qs); + const int8x16_t v1_0h = vld1q_s8(y0->qs + 16); + const int8x16_t v1_1l = vld1q_s8(y1->qs); + const int8x16_t v1_1h = vld1q_s8(y1->qs + 16); + +#if defined(__ARM_FEATURE_DOTPROD) + sumv0 = vmlaq_n_f32(sumv0, vcvtq_f32_s32(vaddq_s32( + vdotq_s32(vdupq_n_s32(0), v0_0lf, v1_0l), + vdotq_s32(vdupq_n_s32(0), v0_0hf, v1_0h))), ggml_fp16_to_fp32(x0->d)*y0->d); + sumv1 = vmlaq_n_f32(sumv1, vcvtq_f32_s32(vaddq_s32( + vdotq_s32(vdupq_n_s32(0), v0_1lf, v1_1l), + vdotq_s32(vdupq_n_s32(0), v0_1hf, v1_1h))), ggml_fp16_to_fp32(x1->d)*y1->d); #else - int8_t L[QK_K]; - float scales[QK_K/16]; + const int16x8_t pl0l = vmull_s8(vget_low_s8 (v0_0lf), vget_low_s8 (v1_0l)); + const int16x8_t pl0h = vmull_s8(vget_high_s8(v0_0lf), vget_high_s8(v1_0l)); + const int16x8_t ph0l = vmull_s8(vget_low_s8 (v0_0hf), vget_low_s8 (v1_0h)); + const int16x8_t ph0h = vmull_s8(vget_high_s8(v0_0hf), vget_high_s8(v1_0h)); + + const int16x8_t pl1l = vmull_s8(vget_low_s8 (v0_1lf), vget_low_s8 (v1_1l)); + const int16x8_t pl1h = vmull_s8(vget_high_s8(v0_1lf), vget_high_s8(v1_1l)); + const int16x8_t ph1l = vmull_s8(vget_low_s8 (v0_1hf), vget_low_s8 (v1_1h)); + const int16x8_t ph1h = vmull_s8(vget_high_s8(v0_1hf), vget_high_s8(v1_1h)); + + const int32x4_t pl0 = vaddq_s32(vpaddlq_s16(pl0l), vpaddlq_s16(pl0h)); + const int32x4_t ph0 = vaddq_s32(vpaddlq_s16(ph0l), vpaddlq_s16(ph0h)); + const int32x4_t pl1 = vaddq_s32(vpaddlq_s16(pl1l), vpaddlq_s16(pl1h)); + const int32x4_t ph1 = vaddq_s32(vpaddlq_s16(ph1l), vpaddlq_s16(ph1h)); + + sumv0 = vmlaq_n_f32(sumv0, vcvtq_f32_s32(vaddq_s32(pl0, ph0)), ggml_fp16_to_fp32(x0->d)*y0->d); + sumv1 = vmlaq_n_f32(sumv1, vcvtq_f32_s32(vaddq_s32(pl1, ph1)), ggml_fp16_to_fp32(x1->d)*y1->d); #endif + } - for (int i = 0; i < nb; i++) { + *s = vaddvq_f32(sumv0) + vaddvq_f32(sumv1) + summs0 + summs1; +#elif defined(__wasm_simd128__) + v128_t sumv = wasm_f32x4_splat(0.0f); -#if QK_K == 256 + float summs = 0.0f; - float max_scale = 0; // as we are deducting the min, scales are always positive - float max_min = 0; - for (int j = 0; j < QK_K/32; ++j) { - //scales[j] = make_qkx1_quants(32, 31, x + 32*j, L + 32*j, &mins[j], 9, 0.5f); - float sum_x2 = 0; - for (int l = 0; l < 32; ++l) sum_x2 += x[32*j + l] * x[32*j + l]; - float av_x = sqrtf(sum_x2/32); - for (int l = 0; l < 32; ++l) weights[l] = av_x + fabsf(x[32*j + l]); - scales[j] = make_qkx2_quants(32, 31, x + 32*j, weights, L + 32*j, &mins[j], Laux, -0.5f, 0.1f, 15, false); - float scale = scales[j]; - if (scale > max_scale) { - max_scale = scale; - } - float min = mins[j]; - if (min > max_min) { - max_min = min; - } - } + uint32_t qh; + uint64_t tmp[4]; - float inv_scale = max_scale > 0 ? 63.f/max_scale : 0.f; - float inv_min = max_min > 0 ? 63.f/max_min : 0.f; - for (int j = 0; j < QK_K/32; ++j) { - uint8_t ls = nearest_int(inv_scale*scales[j]); - uint8_t lm = nearest_int(inv_min*mins[j]); - ls = MIN(63, ls); - lm = MIN(63, lm); - if (j < 4) { - y[i].scales[j] = ls; - y[i].scales[j+4] = lm; - } else { - y[i].scales[j+4] = (ls & 0xF) | ((lm & 0xF) << 4); - y[i].scales[j-4] |= ((ls >> 4) << 6); - y[i].scales[j-0] |= ((lm >> 4) << 6); - } - } - y[i].d = ggml_fp32_to_fp16(max_scale/63.f); - y[i].dmin = ggml_fp32_to_fp16(max_min/63.f); + // TODO: check if unrolling this is better + for (int i = 0; i < nb; ++i) { + const block_q5_1 * restrict x0 = &x[i]; + const block_q8_1 * restrict y0 = &y[i]; - uint8_t sc, m; - for (int j = 0; j < QK_K/32; ++j) { - get_scale_min_k4(j, y[i].scales, &sc, &m); - const float d = ggml_fp16_to_fp32(y[i].d) * sc; - if (!d) continue; - const float dm = ggml_fp16_to_fp32(y[i].dmin) * m; - for (int ii = 0; ii < 32; ++ii) { - int l = nearest_int((x[32*j + ii] + dm)/d); - l = MAX(0, MIN(31, l)); - L[32*j + ii] = l; - } - } + summs += ggml_fp16_to_fp32(x0->m) * y0->s; - uint8_t * restrict qh = y[i].qh; - uint8_t * restrict ql = y[i].qs; - memset(qh, 0, QK_K/8); + const v128_t m4b = wasm_i8x16_splat(0x0F); - uint8_t m1 = 1, m2 = 2; - for (int n = 0; n < QK_K; n += 64) { - for (int j = 0; j < 32; ++j) { - int l1 = L[n + j]; - if (l1 > 15) { - l1 -= 16; qh[j] |= m1; - } - int l2 = L[n + j + 32]; - if (l2 > 15) { - l2 -= 16; qh[j] |= m2; - } - ql[j] = l1 | (l2 << 4); - } - m1 <<= 2; m2 <<= 2; - ql += 32; - } -#else - float max_scale = 0, amax = 0; - for (int j = 0; j < QK_K/16; ++j) { - scales[j] = make_qx_quants(16, 16, x + 16*j, L + 16*j, 1); - float abs_scale = fabsf(scales[j]); - if (abs_scale > amax) { - amax = abs_scale; - max_scale = scales[j]; - } - } + // extract the 5th bit + memcpy(&qh, x0->qh, sizeof(qh)); - float iscale = -128.f/max_scale; - for (int j = 0; j < QK_K/16; ++j) { - int l = nearest_int(iscale*scales[j]); - y[i].scales[j] = MAX(-128, MIN(127, l)); - } - y[i].d = ggml_fp32_to_fp16(1/iscale); + tmp[0] = table_b2b_0[(qh >> 0) & 0xFF]; + tmp[1] = table_b2b_0[(qh >> 8) & 0xFF]; + tmp[2] = table_b2b_0[(qh >> 16) & 0xFF]; + tmp[3] = table_b2b_0[(qh >> 24) ]; - for (int j = 0; j < QK_K/16; ++j) { - const float d = ggml_fp16_to_fp32(y[i].d) * y[i].scales[j]; - if (!d) continue; - for (int ii = 0; ii < 16; ++ii) { - int l = nearest_int(x[16*j + ii]/d); - l = MAX(-16, MIN(15, l)); - L[16*j + ii] = l + 16; - } - } + const v128_t qhl = wasm_v128_load(tmp + 0); + const v128_t qhh = wasm_v128_load(tmp + 2); - uint8_t * restrict qh = y[i].qh; - uint8_t * restrict ql = y[i].qs; - memset(qh, 0, QK_K/8); + const v128_t v0 = wasm_v128_load(x0->qs); - for (int j = 0; j < 32; ++j) { - int jm = j%8; - int is = j/8; - int l1 = L[j]; - if (l1 > 15) { - l1 -= 16; qh[jm] |= (1 << is); - } - int l2 = L[j + 32]; - if (l2 > 15) { - l2 -= 16; qh[jm] |= (1 << (4 + is)); - } - ql[j] = l1 | (l2 << 4); - } -#endif + // 4-bit -> 8-bit + const v128_t v0l = wasm_v128_and (v0, m4b); + const v128_t v0h = wasm_u8x16_shr(v0, 4); - x += QK_K; + // add high bit + const v128_t v0lf = wasm_v128_or(v0l, qhl); + const v128_t v0hf = wasm_v128_or(v0h, qhh); + + // load y + const v128_t v1l = wasm_v128_load(y0->qs); + const v128_t v1h = wasm_v128_load(y0->qs + 16); + // int8x16 -> int16x8 + const v128_t v0lfl = wasm_i16x8_extend_low_i8x16 (v0lf); + const v128_t v0lfh = wasm_i16x8_extend_high_i8x16(v0lf); + const v128_t v0hfl = wasm_i16x8_extend_low_i8x16 (v0hf); + const v128_t v0hfh = wasm_i16x8_extend_high_i8x16(v0hf); + + const v128_t v1ll = wasm_i16x8_extend_low_i8x16 (v1l); + const v128_t v1lh = wasm_i16x8_extend_high_i8x16(v1l); + const v128_t v1hl = wasm_i16x8_extend_low_i8x16 (v1h); + const v128_t v1hh = wasm_i16x8_extend_high_i8x16(v1h); + + // dot product + sumv = wasm_f32x4_add(sumv, + wasm_f32x4_mul(wasm_f32x4_convert_i32x4(wasm_i32x4_add( + wasm_i32x4_add(wasm_i32x4_dot_i16x8(v0lfl, v1ll), + wasm_i32x4_dot_i16x8(v0lfh, v1lh)), + wasm_i32x4_add(wasm_i32x4_dot_i16x8(v0hfl, v1hl), + wasm_i32x4_dot_i16x8(v0hfh, v1hh)))), + wasm_f32x4_splat(ggml_fp16_to_fp32(x0->d) * y0->d))); } -} -void dequantize_row_q5_K(const block_q5_K * restrict x, float * restrict y, int k) { - assert(k % QK_K == 0); - const int nb = k / QK_K; + *s = wasm_f32x4_extract_lane(sumv, 0) + wasm_f32x4_extract_lane(sumv, 1) + + wasm_f32x4_extract_lane(sumv, 2) + wasm_f32x4_extract_lane(sumv, 3) + summs; +#elif defined(__AVX2__) + // Initialize accumulator with zeros + __m256 acc = _mm256_setzero_ps(); + + float summs = 0.0f; + // Main loop for (int i = 0; i < nb; i++) { + const __m256 dx = _mm256_set1_ps(ggml_fp16_to_fp32(x[i].d)); - const uint8_t * ql = x[i].qs; - const uint8_t * qh = x[i].qh; + summs += ggml_fp16_to_fp32(x[i].m) * y[i].s; -#if QK_K == 256 + __m256i bx = bytes_from_nibbles_32(x[i].qs); + __m256i bxhi = bytes_from_bits_32(x[i].qh); + bxhi = _mm256_and_si256(bxhi, _mm256_set1_epi8(0x10)); + bx = _mm256_or_si256(bx, bxhi); - const float d = ggml_fp16_to_fp32(x[i].d); - const float min = ggml_fp16_to_fp32(x[i].dmin); + const __m256 dy = _mm256_set1_ps(y[i].d); + const __m256i by = _mm256_loadu_si256((const __m256i *)y[i].qs); - int is = 0; - uint8_t sc, m; - uint8_t u1 = 1, u2 = 2; - for (int j = 0; j < QK_K; j += 64) { - get_scale_min_k4(is + 0, x[i].scales, &sc, &m); - const float d1 = d * sc; const float m1 = min * m; - get_scale_min_k4(is + 1, x[i].scales, &sc, &m); - const float d2 = d * sc; const float m2 = min * m; - for (int l = 0; l < 32; ++l) *y++ = d1 * ((ql[l] & 0xF) + (qh[l] & u1 ? 16 : 0)) - m1; - for (int l = 0; l < 32; ++l) *y++ = d2 * ((ql[l] >> 4) + (qh[l] & u2 ? 16 : 0)) - m2; - ql += 32; is += 2; - u1 <<= 2; u2 <<= 2; - } -#else - float d = ggml_fp16_to_fp32(x[i].d); - const int8_t * restrict s = x[i].scales; - for (int l = 0; l < 8; ++l) { - y[l+ 0] = d * s[0] * ((ql[l+ 0] & 0xF) - (qh[l] & 0x01 ? 0 : 16)); - y[l+ 8] = d * s[0] * ((ql[l+ 8] & 0xF) - (qh[l] & 0x02 ? 0 : 16)); - y[l+16] = d * s[1] * ((ql[l+16] & 0xF) - (qh[l] & 0x04 ? 0 : 16)); - y[l+24] = d * s[1] * ((ql[l+24] & 0xF) - (qh[l] & 0x08 ? 0 : 16)); - y[l+32] = d * s[2] * ((ql[l+ 0] >> 4) - (qh[l] & 0x10 ? 0 : 16)); - y[l+40] = d * s[2] * ((ql[l+ 8] >> 4) - (qh[l] & 0x20 ? 0 : 16)); - y[l+48] = d * s[3] * ((ql[l+16] >> 4) - (qh[l] & 0x40 ? 0 : 16)); - y[l+56] = d * s[3] * ((ql[l+24] >> 4) - (qh[l] & 0x80 ? 0 : 16)); - } - y += QK_K; -#endif + const __m256 q = mul_sum_us8_pairs_float(bx, by); + + acc = _mm256_fmadd_ps(q, _mm256_mul_ps(dx, dy), acc); } -} -void quantize_row_q5_K(const float * restrict x, void * restrict vy, int k) { - assert(k % QK_K == 0); - block_q5_K * restrict y = vy; - quantize_row_q5_K_reference(x, y, k); -} + *s = hsum_float_8(acc) + summs; +#elif defined(__AVX__) + // Initialize accumulator with zeros + __m256 acc = _mm256_setzero_ps(); + __m128i mask = _mm_set1_epi8(0x10); -size_t ggml_quantize_q5_K(const float * restrict src, void * restrict dst, int n, int k, int64_t * restrict hist) { - assert(k % QK_K == 0); - (void)hist; // TODO: collect histograms + float summs = 0.0f; - for (int j = 0; j < n; j += k) { - block_q5_K * restrict y = (block_q5_K *)dst + j/QK_K; - quantize_row_q5_K_reference(src + j, y, k); + // Main loop + for (int i = 0; i < nb; i++) { + const __m256 dx = _mm256_set1_ps(ggml_fp16_to_fp32(x[i].d)); + + summs += ggml_fp16_to_fp32(x[i].m) * y[i].s; + + __m256i bx = bytes_from_nibbles_32(x[i].qs); + const __m256i bxhi = bytes_from_bits_32(x[i].qh); + __m128i bxhil = _mm256_castsi256_si128(bxhi); + __m128i bxhih = _mm256_extractf128_si256(bxhi, 1); + bxhil = _mm_and_si128(bxhil, mask); + bxhih = _mm_and_si128(bxhih, mask); + __m128i bxl = _mm256_castsi256_si128(bx); + __m128i bxh = _mm256_extractf128_si256(bx, 1); + bxl = _mm_or_si128(bxl, bxhil); + bxh = _mm_or_si128(bxh, bxhih); + bx = MM256_SET_M128I(bxh, bxl); + + const __m256 dy = _mm256_set1_ps(y[i].d); + const __m256i by = _mm256_loadu_si256((const __m256i *)y[i].qs); + + const __m256 q = mul_sum_us8_pairs_float(bx, by); + + acc = _mm256_add_ps(_mm256_mul_ps(q, _mm256_mul_ps(dx, dy)), acc); } - return (n/QK_K*sizeof(block_q5_K)); -} -// ====================== 6-bit (de)-quantization + *s = hsum_float_8(acc) + summs; +#elif defined(__riscv_v_intrinsic) + float sumf = 0.0; -void quantize_row_q6_K_reference(const float * restrict x, block_q6_K * restrict y, int k) { - assert(k % QK_K == 0); - const int nb = k / QK_K; + uint32_t qh; - int8_t L[QK_K]; - float scales[QK_K/16]; + size_t vl = __riscv_vsetvl_e8m1(qk/2); + + // temporary registers for shift operations + vuint32m2_t vt_1 = __riscv_vid_v_u32m2(vl); + vuint32m2_t vt_2 = __riscv_vadd_vx_u32m2(vt_1, 12, vl); for (int i = 0; i < nb; i++) { + memcpy(&qh, x[i].qh, sizeof(uint32_t)); - float max_scale = 0; - float max_abs_scale = 0; + // load qh + vuint32m2_t vqh = __riscv_vmv_v_x_u32m2(qh, vl); - for (int ib = 0; ib < QK_K/16; ++ib) { + // ((qh >> (j + 0)) << 4) & 0x10; + vuint32m2_t xhr_0 = __riscv_vsrl_vv_u32m2(vqh, vt_1, vl); + vuint32m2_t xhl_0 = __riscv_vsll_vx_u32m2(xhr_0, 4, vl); + vuint32m2_t xha_0 = __riscv_vand_vx_u32m2(xhl_0, 0x10, vl); - const float scale = make_qx_quants(16, 32, x + 16*ib, L + 16*ib, 1); - scales[ib] = scale; + // ((qh >> (j + 12)) ) & 0x10; + vuint32m2_t xhr_1 = __riscv_vsrl_vv_u32m2(vqh, vt_2, vl); + vuint32m2_t xha_1 = __riscv_vand_vx_u32m2(xhr_1, 0x10, vl); - const float abs_scale = fabsf(scale); - if (abs_scale > max_abs_scale) { - max_abs_scale = abs_scale; - max_scale = scale; - } + // narrowing + vuint16m1_t xhc_0 = __riscv_vncvt_x_x_w_u16m1(xha_0, vl); + vuint8mf2_t xh_0 = __riscv_vncvt_x_x_w_u8mf2(xhc_0, vl); - } + vuint16m1_t xhc_1 = __riscv_vncvt_x_x_w_u16m1(xha_1, vl); + vuint8mf2_t xh_1 = __riscv_vncvt_x_x_w_u8mf2(xhc_1, vl); - if (!max_abs_scale) { - memset(&y[i], 0, sizeof(block_q6_K)); - y[i].d = ggml_fp32_to_fp16(0.f); - x += QK_K; - continue; - } + // load + vuint8mf2_t tx = __riscv_vle8_v_u8mf2(x[i].qs, vl); - float iscale = -128.f/max_scale; - y[i].d = ggml_fp32_to_fp16(1/iscale); - for (int ib = 0; ib < QK_K/16; ++ib) { - y[i].scales[ib] = MIN(127, nearest_int(iscale*scales[ib])); - } + vint8mf2_t y0 = __riscv_vle8_v_i8mf2(y[i].qs, vl); + vint8mf2_t y1 = __riscv_vle8_v_i8mf2(y[i].qs+16, vl); - for (int j = 0; j < QK_K/16; ++j) { - float d = ggml_fp16_to_fp32(y[i].d) * y[i].scales[j]; - if (!d) { - continue; - } - for (int ii = 0; ii < 16; ++ii) { - int l = nearest_int(x[16*j + ii]/d); - l = MAX(-32, MIN(31, l)); - L[16*j + ii] = l + 32; - } - } + vuint8mf2_t x_at = __riscv_vand_vx_u8mf2(tx, 0x0F, vl); + vuint8mf2_t x_lt = __riscv_vsrl_vx_u8mf2(tx, 0x04, vl); - uint8_t * restrict ql = y[i].ql; - uint8_t * restrict qh = y[i].qh; -#if QK_K == 256 - for (int j = 0; j < QK_K; j += 128) { - for (int l = 0; l < 32; ++l) { - const uint8_t q1 = L[j + l + 0] & 0xF; - const uint8_t q2 = L[j + l + 32] & 0xF; - const uint8_t q3 = L[j + l + 64] & 0xF; - const uint8_t q4 = L[j + l + 96] & 0xF; - ql[l+ 0] = q1 | (q3 << 4); - ql[l+32] = q2 | (q4 << 4); - qh[l] = (L[j + l] >> 4) | ((L[j + l + 32] >> 4) << 2) | ((L[j + l + 64] >> 4) << 4) | ((L[j + l + 96] >> 4) << 6); - } - ql += 64; - qh += 32; - } -#else - for (int l = 0; l < 32; ++l) { - const uint8_t q1 = L[l + 0] & 0xF; - const uint8_t q2 = L[l + 32] & 0xF; - ql[l] = q1 | (q2 << 4); - } - for (int l = 0; l < 16; ++l) { - qh[l] = (L[l] >> 4) | ((L[l + 16] >> 4) << 2) | ((L[l + 32] >> 4) << 4) | ((L[l + 48] >> 4) << 6); - } -#endif + vuint8mf2_t x_a = __riscv_vor_vv_u8mf2(x_at, xh_0, vl); + vuint8mf2_t x_l = __riscv_vor_vv_u8mf2(x_lt, xh_1, vl); - x += QK_K; + vint8mf2_t v0 = __riscv_vreinterpret_v_u8mf2_i8mf2(x_a); + vint8mf2_t v1 = __riscv_vreinterpret_v_u8mf2_i8mf2(x_l); + + vint16m1_t vec_mul1 = __riscv_vwmul_vv_i16m1(v0, y0, vl); + vint16m1_t vec_mul2 = __riscv_vwmul_vv_i16m1(v1, y1, vl); + + vint32m1_t vec_zero = __riscv_vmv_v_x_i32m1(0, vl); + + vint32m1_t vs1 = __riscv_vwredsum_vs_i16m1_i32m1(vec_mul1, vec_zero, vl); + vint32m1_t vs2 = __riscv_vwredsum_vs_i16m1_i32m1(vec_mul2, vs1, vl); + int sumi = __riscv_vmv_x_s_i32m1_i32(vs2); + + sumf += (ggml_fp16_to_fp32(x[i].d)*y[i].d)*sumi + ggml_fp16_to_fp32(x[i].m)*y[i].s; } -} -void dequantize_row_q6_K(const block_q6_K * restrict x, float * restrict y, int k) { - assert(k % QK_K == 0); - const int nb = k / QK_K; + *s = sumf; +#else + // scalar + float sumf = 0.0; for (int i = 0; i < nb; i++) { + uint32_t qh; + memcpy(&qh, x[i].qh, sizeof(qh)); - const float d = ggml_fp16_to_fp32(x[i].d); + int sumi = 0; - const uint8_t * restrict ql = x[i].ql; - const uint8_t * restrict qh = x[i].qh; - const int8_t * restrict sc = x[i].scales; + for (int j = 0; j < qk/2; ++j) { + const uint8_t xh_0 = ((qh >> (j + 0)) << 4) & 0x10; + const uint8_t xh_1 = ((qh >> (j + 12)) ) & 0x10; -#if QK_K == 256 - for (int n = 0; n < QK_K; n += 128) { - for (int l = 0; l < 32; ++l) { - int is = l/16; - const int8_t q1 = (int8_t)((ql[l + 0] & 0xF) | (((qh[l] >> 0) & 3) << 4)) - 32; - const int8_t q2 = (int8_t)((ql[l + 32] & 0xF) | (((qh[l] >> 2) & 3) << 4)) - 32; - const int8_t q3 = (int8_t)((ql[l + 0] >> 4) | (((qh[l] >> 4) & 3) << 4)) - 32; - const int8_t q4 = (int8_t)((ql[l + 32] >> 4) | (((qh[l] >> 6) & 3) << 4)) - 32; - y[l + 0] = d * sc[is + 0] * q1; - y[l + 32] = d * sc[is + 2] * q2; - y[l + 64] = d * sc[is + 4] * q3; - y[l + 96] = d * sc[is + 6] * q4; - } - y += 128; - ql += 64; - qh += 32; - sc += 8; - } -#else - for (int l = 0; l < 16; ++l) { - const int8_t q1 = (int8_t)((ql[l+ 0] & 0xF) | (((qh[l] >> 0) & 3) << 4)) - 32; - const int8_t q2 = (int8_t)((ql[l+16] & 0xF) | (((qh[l] >> 2) & 3) << 4)) - 32; - const int8_t q3 = (int8_t)((ql[l+ 0] >> 4) | (((qh[l] >> 4) & 3) << 4)) - 32; - const int8_t q4 = (int8_t)((ql[l+16] >> 4) | (((qh[l] >> 6) & 3) << 4)) - 32; - y[l+ 0] = d * sc[0] * q1; - y[l+16] = d * sc[1] * q2; - y[l+32] = d * sc[2] * q3; - y[l+48] = d * sc[3] * q4; + const int32_t x0 = (x[i].qs[j] & 0xF) | xh_0; + const int32_t x1 = (x[i].qs[j] >> 4) | xh_1; + + sumi += (x0 * y[i].qs[j]) + (x1 * y[i].qs[j + qk/2]); } - y += 64; -#endif + sumf += (ggml_fp16_to_fp32(x[i].d)*y[i].d)*sumi + ggml_fp16_to_fp32(x[i].m)*y[i].s; } -} -void quantize_row_q6_K(const float * restrict x, void * restrict vy, int k) { - assert(k % QK_K == 0); - block_q6_K * restrict y = vy; - quantize_row_q6_K_reference(x, y, k); + *s = sumf; +#endif } -size_t ggml_quantize_q6_K(const float * src, void * dst, int n, int k, int64_t * hist) { - assert(k % QK_K == 0); - (void)hist; // TODO: collect histograms +void ggml_vec_dot_q8_0_q8_0(const int n, float * restrict s, const void * restrict vx, const void * restrict vy) { + const int qk = QK8_0; + const int nb = n / qk; - for (int j = 0; j < n; j += k) { - block_q6_K * restrict y = (block_q6_K *)dst + j/QK_K; - quantize_row_q6_K_reference(src + j, y, k); + assert(n % qk == 0); + + const block_q8_0 * restrict x = vx; + const block_q8_0 * restrict y = vy; + +#if defined(__ARM_NEON) + float32x4_t sumv0 = vdupq_n_f32(0.0f); + float32x4_t sumv1 = vdupq_n_f32(0.0f); + + assert(nb % 2 == 0); // TODO: handle odd nb + + for (int i = 0; i < nb; i += 2) { + const block_q8_0 * restrict x0 = &x[i + 0]; + const block_q8_0 * restrict x1 = &x[i + 1]; + const block_q8_0 * restrict y0 = &y[i + 0]; + const block_q8_0 * restrict y1 = &y[i + 1]; + + const int8x16_t x0_0 = vld1q_s8(x0->qs); + const int8x16_t x0_1 = vld1q_s8(x0->qs + 16); + const int8x16_t x1_0 = vld1q_s8(x1->qs); + const int8x16_t x1_1 = vld1q_s8(x1->qs + 16); + + // load y + const int8x16_t y0_0 = vld1q_s8(y0->qs); + const int8x16_t y0_1 = vld1q_s8(y0->qs + 16); + const int8x16_t y1_0 = vld1q_s8(y1->qs); + const int8x16_t y1_1 = vld1q_s8(y1->qs + 16); + +#if defined(__ARM_FEATURE_DOTPROD) + sumv0 = vmlaq_n_f32(sumv0, vcvtq_f32_s32(vaddq_s32( + vdotq_s32(vdupq_n_s32(0), x0_0, y0_0), + vdotq_s32(vdupq_n_s32(0), x0_1, y0_1))), ggml_fp16_to_fp32(x0->d)*ggml_fp16_to_fp32(y0->d)); + + sumv1 = vmlaq_n_f32(sumv1, vcvtq_f32_s32(vaddq_s32( + vdotq_s32(vdupq_n_s32(0), x1_0, y1_0), + vdotq_s32(vdupq_n_s32(0), x1_1, y1_1))), ggml_fp16_to_fp32(x1->d)*ggml_fp16_to_fp32(y1->d)); + +#else + const int16x8_t p0_0 = vmull_s8(vget_low_s8 (x0_0), vget_low_s8 (y0_0)); + const int16x8_t p0_1 = vmull_s8(vget_high_s8(x0_0), vget_high_s8(y0_0)); + const int16x8_t p0_2 = vmull_s8(vget_low_s8 (x0_1), vget_low_s8 (y0_1)); + const int16x8_t p0_3 = vmull_s8(vget_high_s8(x0_1), vget_high_s8(y0_1)); + + const int16x8_t p1_0 = vmull_s8(vget_low_s8 (x1_0), vget_low_s8 (y1_0)); + const int16x8_t p1_1 = vmull_s8(vget_high_s8(x1_0), vget_high_s8(y1_0)); + const int16x8_t p1_2 = vmull_s8(vget_low_s8 (x1_1), vget_low_s8 (y1_1)); + const int16x8_t p1_3 = vmull_s8(vget_high_s8(x1_1), vget_high_s8(y1_1)); + + const int32x4_t p0 = vaddq_s32(vpaddlq_s16(p0_0), vpaddlq_s16(p0_1)); + const int32x4_t p1 = vaddq_s32(vpaddlq_s16(p0_2), vpaddlq_s16(p0_3)); + const int32x4_t p2 = vaddq_s32(vpaddlq_s16(p1_0), vpaddlq_s16(p1_1)); + const int32x4_t p3 = vaddq_s32(vpaddlq_s16(p1_2), vpaddlq_s16(p1_3)); + + sumv0 = vmlaq_n_f32(sumv0, vcvtq_f32_s32(vaddq_s32(p0, p1)), ggml_fp16_to_fp32(x0->d)*ggml_fp16_to_fp32(y0->d)); + sumv1 = vmlaq_n_f32(sumv1, vcvtq_f32_s32(vaddq_s32(p2, p3)), ggml_fp16_to_fp32(x1->d)*ggml_fp16_to_fp32(y1->d)); +#endif } - return (n/QK_K*sizeof(block_q6_K)); -} -//===================================== Q8_K ============================================== + *s = vaddvq_f32(sumv0) + vaddvq_f32(sumv1); +#elif defined(__AVX2__) || defined(__AVX__) + // Initialize accumulator with zeros + __m256 acc = _mm256_setzero_ps(); -void quantize_row_q8_K_reference(const float * restrict x, block_q8_K * restrict y, int k) { - assert(k % QK_K == 0); - const int nb = k / QK_K; + // Main loop + for (int i = 0; i < nb; ++i) { + // Compute combined scale for the block + const __m256 d = _mm256_set1_ps(ggml_fp16_to_fp32(x[i].d) * ggml_fp16_to_fp32(y[i].d)); + __m256i bx = _mm256_loadu_si256((const __m256i *)x[i].qs); + __m256i by = _mm256_loadu_si256((const __m256i *)y[i].qs); - for (int i = 0; i < nb; i++) { + const __m256 q = mul_sum_i8_pairs_float(bx, by); - float max = 0; - float amax = 0; - for (int j = 0; j < QK_K; ++j) { - float ax = fabsf(x[j]); - if (ax > amax) { - amax = ax; max = x[j]; - } - } - if (!amax) { - y[i].d = 0; - memset(y[i].qs, 0, QK_K); - x += QK_K; - continue; - } - const float iscale = -128.f/max; - for (int j = 0; j < QK_K; ++j) { - int v = nearest_int(iscale*x[j]); - y[i].qs[j] = MIN(127, v); - } - for (int j = 0; j < QK_K/16; ++j) { - int sum = 0; - for (int ii = 0; ii < 16; ++ii) { - sum += y[i].qs[j*16 + ii]; - } - y[i].bsums[j] = sum; - } - y[i].d = 1/iscale; - x += QK_K; + // Multiply q with scale and accumulate +#if defined(__AVX2__) + acc = _mm256_fmadd_ps( d, q, acc ); +#else + acc = _mm256_add_ps( _mm256_mul_ps( d, q ), acc ); +#endif } -} -void dequantize_row_q8_K(const block_q8_K * restrict x, float * restrict y, int k) { - assert(k % QK_K == 0); - const int nb = k / QK_K; + *s = hsum_float_8(acc); +#elif defined(__riscv_v_intrinsic) + float sumf = 0.0; + size_t vl = __riscv_vsetvl_e8m1(qk); for (int i = 0; i < nb; i++) { - for (int j = 0; j < QK_K; ++j) { - *y++ = x[i].d * x[i].qs[j]; - } + // load elements + vint8m1_t bx = __riscv_vle8_v_i8m1(x[i].qs, vl); + vint8m1_t by = __riscv_vle8_v_i8m1(y[i].qs, vl); + + vint16m2_t vw_mul = __riscv_vwmul_vv_i16m2(bx, by, vl); + + vint32m1_t v_zero = __riscv_vmv_v_x_i32m1(0, vl); + vint32m1_t v_sum = __riscv_vwredsum_vs_i16m2_i32m1(vw_mul, v_zero, vl); + + int sumi = __riscv_vmv_x_s_i32m1_i32(v_sum); + + sumf += sumi*(ggml_fp16_to_fp32(x[i].d)*ggml_fp16_to_fp32(y[i].d)); } -} -void quantize_row_q8_K(const float * restrict x, void * restrict y, int k) { - quantize_row_q8_K_reference(x, y, k); -} + *s = sumf; +#else + // scalar + float sumf = 0.0; -//===================================== Dot ptoducts ================================= + for (int i = 0; i < nb; i++) { + int sumi = 0; -// -// Helper functions -// -#if __AVX__ || __AVX2__ || __AVX512F__ + for (int j = 0; j < qk; j++) { + sumi += x[i].qs[j]*y[i].qs[j]; + } -// horizontally add 8 floats -static inline float hsum_float_8(const __m256 x) { - __m128 res = _mm256_extractf128_ps(x, 1); - res = _mm_add_ps(res, _mm256_castps256_ps128(x)); - res = _mm_add_ps(res, _mm_movehl_ps(res, res)); - res = _mm_add_ss(res, _mm_movehdup_ps(res)); - return _mm_cvtss_f32(res); -} + sumf += sumi*(ggml_fp16_to_fp32(x[i].d)*ggml_fp16_to_fp32(y[i].d)); + } -// shuffles to pick the required scales in dot products -static inline __m256i get_scale_shuffle_q3k(int i) { - static const uint8_t k_shuffle[128] = { - 0, 1, 0, 1, 0, 1, 0, 1, 0, 1, 0, 1, 0, 1, 0, 1, 2, 3, 2, 3, 2, 3, 2, 3, 2, 3, 2, 3, 2, 3, 2, 3, - 4, 5, 4, 5, 4, 5, 4, 5, 4, 5, 4, 5, 4, 5, 4, 5, 6, 7, 6, 7, 6, 7, 6, 7, 6, 7, 6, 7, 6, 7, 6, 7, - 8, 9, 8, 9, 8, 9, 8, 9, 8, 9, 8, 9, 8, 9, 8, 9, 10,11,10,11,10,11,10,11,10,11,10,11,10,11,10,11, - 12,13,12,13,12,13,12,13,12,13,12,13,12,13,12,13, 14,15,14,15,14,15,14,15,14,15,14,15,14,15,14,15, - }; - return _mm256_loadu_si256((const __m256i*)k_shuffle + i); -} -static inline __m256i get_scale_shuffle_k4(int i) { - static const uint8_t k_shuffle[256] = { - 0, 1, 0, 1, 0, 1, 0, 1, 0, 1, 0, 1, 0, 1, 0, 1, 0, 1, 0, 1, 0, 1, 0, 1, 0, 1, 0, 1, 0, 1, 0, 1, - 2, 3, 2, 3, 2, 3, 2, 3, 2, 3, 2, 3, 2, 3, 2, 3, 2, 3, 2, 3, 2, 3, 2, 3, 2, 3, 2, 3, 2, 3, 2, 3, - 4, 5, 4, 5, 4, 5, 4, 5, 4, 5, 4, 5, 4, 5, 4, 5, 4, 5, 4, 5, 4, 5, 4, 5, 4, 5, 4, 5, 4, 5, 4, 5, - 6, 7, 6, 7, 6, 7, 6, 7, 6, 7, 6, 7, 6, 7, 6, 7, 6, 7, 6, 7, 6, 7, 6, 7, 6, 7, 6, 7, 6, 7, 6, 7, - 8, 9, 8, 9, 8, 9, 8, 9, 8, 9, 8, 9, 8, 9, 8, 9, 8, 9, 8, 9, 8, 9, 8, 9, 8, 9, 8, 9, 8, 9, 8, 9, - 10,11,10,11,10,11,10,11,10,11,10,11,10,11,10,11,10,11,10,11,10,11,10,11,10,11,10,11,10,11,10,11, - 12,13,12,13,12,13,12,13,12,13,12,13,12,13,12,13,12,13,12,13,12,13,12,13,12,13,12,13,12,13,12,13, - 14,15,14,15,14,15,14,15,14,15,14,15,14,15,14,15,14,15,14,15,14,15,14,15,14,15,14,15,14,15,14,15 - }; - return _mm256_loadu_si256((const __m256i*)k_shuffle + i); -} -static inline __m128i get_scale_shuffle(int i) { - static const uint8_t k_shuffle[128] = { - 0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 1, 1, 1, 1, - 2, 2, 2, 2, 2, 2, 2, 2, 3, 3, 3, 3, 3, 3, 3, 3, - 4, 4, 4, 4, 4, 4, 4, 4, 5, 5, 5, 5, 5, 5, 5, 5, - 6, 6, 6, 6, 6, 6, 6, 6, 7, 7, 7, 7, 7, 7, 7, 7, - 8, 8, 8, 8, 8, 8, 8, 8, 9, 9, 9, 9, 9, 9, 9, 9, - 10,10,10,10,10,10,10,10, 11,11,11,11,11,11,11,11, - 12,12,12,12,12,12,12,12, 13,13,13,13,13,13,13,13, - 14,14,14,14,14,14,14,14, 15,15,15,15,15,15,15,15 - }; - return _mm_loadu_si128((const __m128i*)k_shuffle + i); -} + *s = sumf; #endif +} #if QK_K == 256 void ggml_vec_dot_q2_K_q8_K(const int n, float * restrict s, const void * restrict vx, const void * restrict vy) { diff --git a/k_quants.h b/ggml-quants.h similarity index 63% rename from k_quants.h rename to ggml-quants.h index 9de089e7a47195..d88f99e331f1dd 100644 --- a/k_quants.h +++ b/ggml-quants.h @@ -1,20 +1,14 @@ #pragma once +// This is a private API for quantization and dequantization +// Should not be used directly, use ggml.h instead + #include "ggml.h" #include #include #include -// Super-block size -#ifdef GGML_QKK_64 -#define QK_K 64 -#define K_SCALE_SIZE 4 -#else -#define QK_K 256 -#define K_SCALE_SIZE 12 -#endif - #ifndef static_assert #if defined(__STDC_VERSION__) && (__STDC_VERSION__ >= 201100L) #define static_assert(cond, msg) _Static_assert(cond, msg) @@ -23,10 +17,66 @@ #endif #endif +#define QK4_0 32 +typedef struct { + ggml_fp16_t d; // delta + uint8_t qs[QK4_0 / 2]; // nibbles / quants +} block_q4_0; +static_assert(sizeof(block_q4_0) == sizeof(ggml_fp16_t) + QK4_0 / 2, "wrong q4_0 block size/padding"); + +#define QK4_1 32 +typedef struct { + ggml_fp16_t d; // delta + ggml_fp16_t m; // min + uint8_t qs[QK4_1 / 2]; // nibbles / quants +} block_q4_1; +static_assert(sizeof(block_q4_1) == 2 * sizeof(ggml_fp16_t) + QK4_1 / 2, "wrong q4_1 block size/padding"); + +#define QK5_0 32 +typedef struct { + ggml_fp16_t d; // delta + uint8_t qh[4]; // 5-th bit of quants + uint8_t qs[QK5_0 / 2]; // nibbles / quants +} block_q5_0; +static_assert(sizeof(block_q5_0) == sizeof(ggml_fp16_t) + sizeof(uint32_t) + QK5_0 / 2, "wrong q5_0 block size/padding"); + +#define QK5_1 32 +typedef struct { + ggml_fp16_t d; // delta + ggml_fp16_t m; // min + uint8_t qh[4]; // 5-th bit of quants + uint8_t qs[QK5_1 / 2]; // nibbles / quants +} block_q5_1; +static_assert(sizeof(block_q5_1) == 2 * sizeof(ggml_fp16_t) + sizeof(uint32_t) + QK5_1 / 2, "wrong q5_1 block size/padding"); + +#define QK8_0 32 +typedef struct { + ggml_fp16_t d; // delta + int8_t qs[QK8_0]; // quants +} block_q8_0; +static_assert(sizeof(block_q8_0) == sizeof(ggml_fp16_t) + QK8_0, "wrong q8_0 block size/padding"); + +#define QK8_1 32 +typedef struct { + float d; // delta + float s; // d * sum(qs[i]) + int8_t qs[QK8_1]; // quants +} block_q8_1; +static_assert(sizeof(block_q8_1) == 2*sizeof(float) + QK8_1, "wrong q8_1 block size/padding"); + // // Super-block quantization structures // +// Super-block size +#ifdef GGML_QKK_64 +#define QK_K 64 +#define K_SCALE_SIZE 4 +#else +#define QK_K 256 +#define K_SCALE_SIZE 12 +#endif + // 2-bit quantization // weight is represented as x = a * q + b // 16 blocks of 16 elements each @@ -127,6 +177,13 @@ static_assert(sizeof(block_q8_K) == sizeof(float) + QK_K + QK_K/16*sizeof(int16_ // Quantization +void quantize_row_q4_0_reference(const float * restrict x, block_q4_0 * restrict y, int k); +void quantize_row_q4_1_reference(const float * restrict x, block_q4_1 * restrict y, int k); +void quantize_row_q5_0_reference(const float * restrict x, block_q5_0 * restrict y, int k); +void quantize_row_q5_1_reference(const float * restrict x, block_q5_1 * restrict y, int k); +void quantize_row_q8_0_reference(const float * restrict x, block_q8_0 * restrict y, int k); +void quantize_row_q8_1_reference(const float * restrict x, block_q8_1 * restrict y, int k); + void quantize_row_q2_K_reference(const float * restrict x, block_q2_K * restrict y, int k); void quantize_row_q3_K_reference(const float * restrict x, block_q3_K * restrict y, int k); void quantize_row_q4_K_reference(const float * restrict x, block_q4_K * restrict y, int k); @@ -134,6 +191,13 @@ void quantize_row_q5_K_reference(const float * restrict x, block_q5_K * restrict void quantize_row_q6_K_reference(const float * restrict x, block_q6_K * restrict y, int k); void quantize_row_q8_K_reference(const float * restrict x, block_q8_K * restrict y, int k); +void quantize_row_q4_0(const float * restrict x, void * restrict y, int k); +void quantize_row_q4_1(const float * restrict x, void * restrict y, int k); +void quantize_row_q5_0(const float * restrict x, void * restrict y, int k); +void quantize_row_q5_1(const float * restrict x, void * restrict y, int k); +void quantize_row_q8_0(const float * restrict x, void * restrict y, int k); +void quantize_row_q8_1(const float * restrict x, void * restrict y, int k); + void quantize_row_q2_K(const float * restrict x, void * restrict y, int k); void quantize_row_q3_K(const float * restrict x, void * restrict y, int k); void quantize_row_q4_K(const float * restrict x, void * restrict y, int k); @@ -142,6 +206,13 @@ void quantize_row_q6_K(const float * restrict x, void * restrict y, int k); void quantize_row_q8_K(const float * restrict x, void * restrict y, int k); // Dequantization +void dequantize_row_q4_0(const block_q4_0 * restrict x, float * restrict y, int k); +void dequantize_row_q4_1(const block_q4_1 * restrict x, float * restrict y, int k); +void dequantize_row_q5_0(const block_q5_0 * restrict x, float * restrict y, int k); +void dequantize_row_q5_1(const block_q5_1 * restrict x, float * restrict y, int k); +void dequantize_row_q8_0(const block_q8_0 * restrict x, float * restrict y, int k); +//void dequantize_row_q8_1(const block_q8_1 * restrict x, float * restrict y, int k); + void dequantize_row_q2_K(const block_q2_K * restrict x, float * restrict y, int k); void dequantize_row_q3_K(const block_q3_K * restrict x, float * restrict y, int k); void dequantize_row_q4_K(const block_q4_K * restrict x, float * restrict y, int k); @@ -150,16 +221,14 @@ void dequantize_row_q6_K(const block_q6_K * restrict x, float * restrict y, int void dequantize_row_q8_K(const block_q8_K * restrict x, float * restrict y, int k); // Dot product +void ggml_vec_dot_q4_0_q8_0(int n, float * restrict s, const void * restrict vx, const void * restrict vy); +void ggml_vec_dot_q4_1_q8_1(int n, float * restrict s, const void * restrict vx, const void * restrict vy); +void ggml_vec_dot_q5_0_q8_0(int n, float * restrict s, const void * restrict vx, const void * restrict vy); +void ggml_vec_dot_q5_1_q8_1(int n, float * restrict s, const void * restrict vx, const void * restrict vy); +void ggml_vec_dot_q8_0_q8_0(int n, float * restrict s, const void * restrict vx, const void * restrict vy); + void ggml_vec_dot_q2_K_q8_K(int n, float * restrict s, const void * restrict vx, const void * restrict vy); void ggml_vec_dot_q3_K_q8_K(int n, float * restrict s, const void * restrict vx, const void * restrict vy); void ggml_vec_dot_q4_K_q8_K(int n, float * restrict s, const void * restrict vx, const void * restrict vy); void ggml_vec_dot_q5_K_q8_K(int n, float * restrict s, const void * restrict vx, const void * restrict vy); void ggml_vec_dot_q6_K_q8_K(int n, float * restrict s, const void * restrict vx, const void * restrict vy); - -// Quantization with histogram collection -size_t ggml_quantize_q2_K(const float * src, void * dst, int n, int k, int64_t * hist); -size_t ggml_quantize_q3_K(const float * src, void * dst, int n, int k, int64_t * hist); -size_t ggml_quantize_q4_K(const float * src, void * dst, int n, int k, int64_t * hist); -size_t ggml_quantize_q5_K(const float * src, void * dst, int n, int k, int64_t * hist); -size_t ggml_quantize_q6_K(const float * src, void * dst, int n, int k, int64_t * hist); - diff --git a/ggml.c b/ggml.c index 6f66bab051cea4..95f72c35e8f205 100644 --- a/ggml.c +++ b/ggml.c @@ -1,10 +1,7 @@ #define _CRT_SECURE_NO_DEPRECATE // Disables ridiculous "unsafe" warnigns on Windows #include "ggml.h" - -#ifdef GGML_USE_K_QUANTS -#include "k_quants.h" -#endif +#include "ggml-quants.h" #if defined(_MSC_VER) || defined(__MINGW32__) #include // using malloc.h with MSC/MINGW @@ -443,21 +440,6 @@ static ggml_fp16_t table_exp_f16[1 << 16]; // precomputed f32 table for f16 (256 KB) static float table_f32_f16[1 << 16]; -#if defined(__ARM_NEON) || defined(__wasm_simd128__) -#define B1(c,s,n) 0x ## n ## c , 0x ## n ## s -#define B2(c,s,n) B1(c,s,n ## c), B1(c,s,n ## s) -#define B3(c,s,n) B2(c,s,n ## c), B2(c,s,n ## s) -#define B4(c,s,n) B3(c,s,n ## c), B3(c,s,n ## s) -#define B5(c,s,n) B4(c,s,n ## c), B4(c,s,n ## s) -#define B6(c,s,n) B5(c,s,n ## c), B5(c,s,n ## s) -#define B7(c,s,n) B6(c,s,n ## c), B6(c,s,n ## s) -#define B8(c,s ) B7(c,s, c), B7(c,s, s) - -// precomputed tables for expanding 8bits to 8 bytes: -static const uint64_t table_b2b_0[1 << 8] = { B8(00, 10) }; // ( b) << 4 -static const uint64_t table_b2b_1[1 << 8] = { B8(10, 00) }; // (!b) << 4 -#endif - // On ARM NEON, it's quicker to directly convert x -> x instead of calling into ggml_lookup_fp16_to_fp32, // so we define GGML_FP16_TO_FP32 and GGML_FP32_TO_FP16 elsewhere for NEON. // This is also true for POWER9. @@ -587,3071 +569,794 @@ int64_t ggml_cycles_per_ms(void) { static const size_t CACHE_LINE_SIZE_F32 = CACHE_LINE_SIZE/sizeof(float); -// -// quantization -// - -#define MM256_SET_M128I(a, b) _mm256_insertf128_si256(_mm256_castsi128_si256(b), (a), 1) - -#if defined(__AVX__) || defined(__AVX2__) || defined(__AVX512F__) || defined(__SSSE3__) -// multiply int8_t, add results pairwise twice -static inline __m128i mul_sum_i8_pairs(const __m128i x, const __m128i y) { - // Get absolute values of x vectors - const __m128i ax = _mm_sign_epi8(x, x); - // Sign the values of the y vectors - const __m128i sy = _mm_sign_epi8(y, x); - // Perform multiplication and create 16-bit values - const __m128i dot = _mm_maddubs_epi16(ax, sy); - const __m128i ones = _mm_set1_epi16(1); - return _mm_madd_epi16(ones, dot); -} - -#if __AVX__ || __AVX2__ || __AVX512F__ -// horizontally add 8 floats -static inline float hsum_float_8(const __m256 x) { - __m128 res = _mm256_extractf128_ps(x, 1); - res = _mm_add_ps(res, _mm256_castps256_ps128(x)); - res = _mm_add_ps(res, _mm_movehl_ps(res, res)); - res = _mm_add_ss(res, _mm_movehdup_ps(res)); - return _mm_cvtss_f32(res); -} - -// horizontally add 8 int32_t -static inline int hsum_i32_8(const __m256i a) { - const __m128i sum128 = _mm_add_epi32(_mm256_castsi256_si128(a), _mm256_extractf128_si256(a, 1)); - const __m128i hi64 = _mm_unpackhi_epi64(sum128, sum128); - const __m128i sum64 = _mm_add_epi32(hi64, sum128); - const __m128i hi32 = _mm_shuffle_epi32(sum64, _MM_SHUFFLE(2, 3, 0, 1)); - return _mm_cvtsi128_si32(_mm_add_epi32(sum64, hi32)); -} - -// horizontally add 4 int32_t -static inline int hsum_i32_4(const __m128i a) { - const __m128i hi64 = _mm_unpackhi_epi64(a, a); - const __m128i sum64 = _mm_add_epi32(hi64, a); - const __m128i hi32 = _mm_shuffle_epi32(sum64, _MM_SHUFFLE(2, 3, 0, 1)); - return _mm_cvtsi128_si32(_mm_add_epi32(sum64, hi32)); -} - -#if defined(__AVX2__) || defined(__AVX512F__) -// spread 32 bits to 32 bytes { 0x00, 0xFF } -static inline __m256i bytes_from_bits_32(const uint8_t * x) { - uint32_t x32; - memcpy(&x32, x, sizeof(uint32_t)); - const __m256i shuf_mask = _mm256_set_epi64x( - 0x0303030303030303, 0x0202020202020202, - 0x0101010101010101, 0x0000000000000000); - __m256i bytes = _mm256_shuffle_epi8(_mm256_set1_epi32(x32), shuf_mask); - const __m256i bit_mask = _mm256_set1_epi64x(0x7fbfdfeff7fbfdfe); - bytes = _mm256_or_si256(bytes, bit_mask); - return _mm256_cmpeq_epi8(bytes, _mm256_set1_epi64x(-1)); -} - -// Unpack 32 4-bit fields into 32 bytes -// The output vector contains 32 bytes, each one in [ 0 .. 15 ] interval -static inline __m256i bytes_from_nibbles_32(const uint8_t * rsi) -{ - const __m128i tmp = _mm_loadu_si128((const __m128i *)rsi); - const __m256i bytes = MM256_SET_M128I(_mm_srli_epi16(tmp, 4), tmp); - const __m256i lowMask = _mm256_set1_epi8( 0xF ); - return _mm256_and_si256(lowMask, bytes); -} - -// add int16_t pairwise and return as float vector -static inline __m256 sum_i16_pairs_float(const __m256i x) { - const __m256i ones = _mm256_set1_epi16(1); - const __m256i summed_pairs = _mm256_madd_epi16(ones, x); - return _mm256_cvtepi32_ps(summed_pairs); -} - -static inline __m256 mul_sum_us8_pairs_float(const __m256i ax, const __m256i sy) { -#if __AVXVNNI__ - const __m256i zero = _mm256_setzero_si256(); - const __m256i summed_pairs = _mm256_dpbusd_epi32(zero, ax, sy); - return _mm256_cvtepi32_ps(summed_pairs); -#else - // Perform multiplication and create 16-bit values - const __m256i dot = _mm256_maddubs_epi16(ax, sy); - return sum_i16_pairs_float(dot); -#endif -} - -// multiply int8_t, add results pairwise twice and return as float vector -static inline __m256 mul_sum_i8_pairs_float(const __m256i x, const __m256i y) { -#if __AVXVNNIINT8__ - const __m256i zero = _mm256_setzero_si256(); - const __m256i summed_pairs = _mm256_dpbssd_epi32(zero, x, y); - return _mm256_cvtepi32_ps(summed_pairs); -#else - // Get absolute values of x vectors - const __m256i ax = _mm256_sign_epi8(x, x); - // Sign the values of the y vectors - const __m256i sy = _mm256_sign_epi8(y, x); - return mul_sum_us8_pairs_float(ax, sy); -#endif -} +static void ggml_vec_dot_f32(const int n, float * restrict s, const float * restrict x, const float * restrict y); +static void ggml_vec_dot_f16(const int n, float * restrict s, ggml_fp16_t * restrict x, ggml_fp16_t * restrict y); -static inline __m128i packNibbles( __m256i bytes ) -{ - // Move bits within 16-bit lanes from 0000_abcd_0000_efgh into 0000_0000_abcd_efgh -#if __AVX512F__ - const __m256i bytes_srli_4 = _mm256_srli_epi16(bytes, 4); // 0000_0000_abcd_0000 - bytes = _mm256_or_si256(bytes, bytes_srli_4); // 0000_abcd_abcd_efgh - return _mm256_cvtepi16_epi8(bytes); // abcd_efgh -#else - const __m256i lowByte = _mm256_set1_epi16( 0xFF ); - __m256i high = _mm256_andnot_si256( lowByte, bytes ); - __m256i low = _mm256_and_si256( lowByte, bytes ); - high = _mm256_srli_epi16( high, 4 ); - bytes = _mm256_or_si256( low, high ); - - // Compress uint16_t lanes into bytes - __m128i r0 = _mm256_castsi256_si128( bytes ); - __m128i r1 = _mm256_extracti128_si256( bytes, 1 ); - return _mm_packus_epi16( r0, r1 ); -#endif -} -#elif defined(__AVX__) -// spread 32 bits to 32 bytes { 0x00, 0xFF } -static inline __m256i bytes_from_bits_32(const uint8_t * x) { - uint32_t x32; - memcpy(&x32, x, sizeof(uint32_t)); - const __m128i shuf_maskl = _mm_set_epi64x(0x0101010101010101, 0x0000000000000000); - const __m128i shuf_maskh = _mm_set_epi64x(0x0303030303030303, 0x0202020202020202); - __m128i bytesl = _mm_shuffle_epi8(_mm_set1_epi32(x32), shuf_maskl); - __m128i bytesh = _mm_shuffle_epi8(_mm_set1_epi32(x32), shuf_maskh); - const __m128i bit_mask = _mm_set1_epi64x(0x7fbfdfeff7fbfdfe); - bytesl = _mm_or_si128(bytesl, bit_mask); - bytesh = _mm_or_si128(bytesh, bit_mask); - bytesl = _mm_cmpeq_epi8(bytesl, _mm_set1_epi64x(-1)); - bytesh = _mm_cmpeq_epi8(bytesh, _mm_set1_epi64x(-1)); - return MM256_SET_M128I(bytesh, bytesl); -} - -// Unpack 32 4-bit fields into 32 bytes -// The output vector contains 32 bytes, each one in [ 0 .. 15 ] interval -static inline __m256i bytes_from_nibbles_32(const uint8_t * rsi) -{ - // Load 16 bytes from memory - __m128i tmpl = _mm_loadu_si128((const __m128i *)rsi); - __m128i tmph = _mm_srli_epi16(tmpl, 4); - const __m128i lowMask = _mm_set1_epi8(0xF); - tmpl = _mm_and_si128(lowMask, tmpl); - tmph = _mm_and_si128(lowMask, tmph); - return MM256_SET_M128I(tmph, tmpl); -} - -// add int16_t pairwise and return as float vector -static inline __m256 sum_i16_pairs_float(const __m128i xh, const __m128i xl) { - const __m128i ones = _mm_set1_epi16(1); - const __m128i summed_pairsl = _mm_madd_epi16(ones, xl); - const __m128i summed_pairsh = _mm_madd_epi16(ones, xh); - const __m256i summed_pairs = MM256_SET_M128I(summed_pairsh, summed_pairsl); - return _mm256_cvtepi32_ps(summed_pairs); -} - -static inline __m256 mul_sum_us8_pairs_float(const __m256i ax, const __m256i sy) { - const __m128i axl = _mm256_castsi256_si128(ax); - const __m128i axh = _mm256_extractf128_si256(ax, 1); - const __m128i syl = _mm256_castsi256_si128(sy); - const __m128i syh = _mm256_extractf128_si256(sy, 1); - // Perform multiplication and create 16-bit values - const __m128i dotl = _mm_maddubs_epi16(axl, syl); - const __m128i doth = _mm_maddubs_epi16(axh, syh); - return sum_i16_pairs_float(doth, dotl); -} - -// multiply int8_t, add results pairwise twice and return as float vector -static inline __m256 mul_sum_i8_pairs_float(const __m256i x, const __m256i y) { - const __m128i xl = _mm256_castsi256_si128(x); - const __m128i xh = _mm256_extractf128_si256(x, 1); - const __m128i yl = _mm256_castsi256_si128(y); - const __m128i yh = _mm256_extractf128_si256(y, 1); - // Get absolute values of x vectors - const __m128i axl = _mm_sign_epi8(xl, xl); - const __m128i axh = _mm_sign_epi8(xh, xh); - // Sign the values of the y vectors - const __m128i syl = _mm_sign_epi8(yl, xl); - const __m128i syh = _mm_sign_epi8(yh, xh); - // Perform multiplication and create 16-bit values - const __m128i dotl = _mm_maddubs_epi16(axl, syl); - const __m128i doth = _mm_maddubs_epi16(axh, syh); - return sum_i16_pairs_float(doth, dotl); -} - -static inline __m128i packNibbles( __m128i bytes1, __m128i bytes2 ) -{ - // Move bits within 16-bit lanes from 0000_abcd_0000_efgh into 0000_0000_abcd_efgh - const __m128i lowByte = _mm_set1_epi16( 0xFF ); - __m128i high = _mm_andnot_si128( lowByte, bytes1 ); - __m128i low = _mm_and_si128( lowByte, bytes1 ); - high = _mm_srli_epi16( high, 4 ); - bytes1 = _mm_or_si128( low, high ); - high = _mm_andnot_si128( lowByte, bytes2 ); - low = _mm_and_si128( lowByte, bytes2 ); - high = _mm_srli_epi16( high, 4 ); - bytes2 = _mm_or_si128( low, high ); - - return _mm_packus_epi16( bytes1, bytes2); -} -#endif -#elif defined(__SSSE3__) -// horizontally add 4x4 floats -static inline float hsum_float_4x4(const __m128 a, const __m128 b, const __m128 c, const __m128 d) { - __m128 res_0 =_mm_hadd_ps(a, b); - __m128 res_1 =_mm_hadd_ps(c, d); - __m128 res =_mm_hadd_ps(res_0, res_1); - res =_mm_hadd_ps(res, res); - res =_mm_hadd_ps(res, res); +static const ggml_type_traits_t type_traits[GGML_TYPE_COUNT] = { + [GGML_TYPE_I8] = { + .type_name = "i8", + .blck_size = 1, + .type_size = sizeof(int8_t), + .is_quantized = false, + }, + [GGML_TYPE_I16] = { + .type_name = "i16", + .blck_size = 1, + .type_size = sizeof(int16_t), + .is_quantized = false, + }, + [GGML_TYPE_I32] = { + .type_name = "i32", + .blck_size = 1, + .type_size = sizeof(int32_t), + .is_quantized = false, + }, + [GGML_TYPE_F32] = { + .type_name = "f32", + .blck_size = 1, + .type_size = sizeof(float), + .is_quantized = false, + .vec_dot = (ggml_vec_dot_t) ggml_vec_dot_f32, + .vec_dot_type = GGML_TYPE_F32, + }, + [GGML_TYPE_F16] = { + .type_name = "f16", + .blck_size = 1, + .type_size = sizeof(ggml_fp16_t), + .is_quantized = false, + .to_float = (ggml_to_float_t) ggml_fp16_to_fp32_row, + .from_float = (ggml_from_float_t) ggml_fp32_to_fp16_row, + .from_float_reference = (ggml_from_float_t) ggml_fp32_to_fp16_row, + .vec_dot = (ggml_vec_dot_t) ggml_vec_dot_f16, + .vec_dot_type = GGML_TYPE_F16, + }, + [GGML_TYPE_Q4_0] = { + .type_name = "q4_0", + .blck_size = QK4_0, + .type_size = sizeof(block_q4_0), + .is_quantized = true, + .to_float = (ggml_to_float_t) dequantize_row_q4_0, + .from_float = quantize_row_q4_0, + .from_float_reference = (ggml_from_float_t) quantize_row_q4_0_reference, + .vec_dot = ggml_vec_dot_q4_0_q8_0, + .vec_dot_type = GGML_TYPE_Q8_0, + }, + [GGML_TYPE_Q4_1] = { + .type_name = "q4_1", + .blck_size = QK4_1, + .type_size = sizeof(block_q4_1), + .is_quantized = true, + .to_float = (ggml_to_float_t) dequantize_row_q4_1, + .from_float = quantize_row_q4_1, + .from_float_reference = (ggml_from_float_t) quantize_row_q4_1_reference, + .vec_dot = ggml_vec_dot_q4_1_q8_1, + .vec_dot_type = GGML_TYPE_Q8_1, + }, + [GGML_TYPE_Q5_0] = { + .type_name = "q5_0", + .blck_size = QK5_0, + .type_size = sizeof(block_q5_0), + .is_quantized = true, + .to_float = (ggml_to_float_t) dequantize_row_q5_0, + .from_float = quantize_row_q5_0, + .from_float_reference = (ggml_from_float_t) quantize_row_q5_0_reference, + .vec_dot = ggml_vec_dot_q5_0_q8_0, + .vec_dot_type = GGML_TYPE_Q8_0, + }, + [GGML_TYPE_Q5_1] = { + .type_name = "q5_1", + .blck_size = QK5_1, + .type_size = sizeof(block_q5_1), + .is_quantized = true, + .to_float = (ggml_to_float_t) dequantize_row_q5_1, + .from_float = quantize_row_q5_1, + .from_float_reference = (ggml_from_float_t) quantize_row_q5_1_reference, + .vec_dot = ggml_vec_dot_q5_1_q8_1, + .vec_dot_type = GGML_TYPE_Q8_1, + }, + [GGML_TYPE_Q8_0] = { + .type_name = "q8_0", + .blck_size = QK8_0, + .type_size = sizeof(block_q8_0), + .is_quantized = true, + .to_float = (ggml_to_float_t) dequantize_row_q8_0, + .from_float = quantize_row_q8_0, + .from_float_reference = (ggml_from_float_t) quantize_row_q8_0_reference, + .vec_dot = ggml_vec_dot_q8_0_q8_0, + .vec_dot_type = GGML_TYPE_Q8_0, + }, + [GGML_TYPE_Q8_1] = { + .type_name = "q8_1", + .blck_size = QK8_1, + .type_size = sizeof(block_q8_1), + .is_quantized = true, + .from_float = quantize_row_q8_1, + .from_float_reference = (ggml_from_float_t) quantize_row_q8_1_reference, + .vec_dot_type = GGML_TYPE_Q8_1, + }, + [GGML_TYPE_Q2_K] = { + .type_name = "q2_K", + .blck_size = QK_K, + .type_size = sizeof(block_q2_K), + .is_quantized = true, + .to_float = (ggml_to_float_t) dequantize_row_q2_K, + .from_float = quantize_row_q2_K, + .from_float_reference = (ggml_from_float_t) quantize_row_q2_K_reference, + .vec_dot = ggml_vec_dot_q2_K_q8_K, + .vec_dot_type = GGML_TYPE_Q8_K, + }, + [GGML_TYPE_Q3_K] = { + .type_name = "q3_K", + .blck_size = QK_K, + .type_size = sizeof(block_q3_K), + .is_quantized = true, + .to_float = (ggml_to_float_t) dequantize_row_q3_K, + .from_float = quantize_row_q3_K, + .from_float_reference = (ggml_from_float_t) quantize_row_q3_K_reference, + .vec_dot = ggml_vec_dot_q3_K_q8_K, + .vec_dot_type = GGML_TYPE_Q8_K, + }, + [GGML_TYPE_Q4_K] = { + .type_name = "q4_K", + .blck_size = QK_K, + .type_size = sizeof(block_q4_K), + .is_quantized = true, + .to_float = (ggml_to_float_t) dequantize_row_q4_K, + .from_float = quantize_row_q4_K, + .from_float_reference = (ggml_from_float_t) quantize_row_q4_K_reference, + .vec_dot = ggml_vec_dot_q4_K_q8_K, + .vec_dot_type = GGML_TYPE_Q8_K, + }, + [GGML_TYPE_Q5_K] = { + .type_name = "q5_K", + .blck_size = QK_K, + .type_size = sizeof(block_q5_K), + .is_quantized = true, + .to_float = (ggml_to_float_t) dequantize_row_q5_K, + .from_float = quantize_row_q5_K, + .from_float_reference = (ggml_from_float_t) quantize_row_q5_K_reference, + .vec_dot = ggml_vec_dot_q5_K_q8_K, + .vec_dot_type = GGML_TYPE_Q8_K, + }, + [GGML_TYPE_Q6_K] = { + .type_name = "q6_K", + .blck_size = QK_K, + .type_size = sizeof(block_q6_K), + .is_quantized = true, + .to_float = (ggml_to_float_t) dequantize_row_q6_K, + .from_float = quantize_row_q6_K, + .from_float_reference = (ggml_from_float_t) quantize_row_q6_K_reference, + .vec_dot = ggml_vec_dot_q6_K_q8_K, + .vec_dot_type = GGML_TYPE_Q8_K, + }, + [GGML_TYPE_Q8_K] = { + .type_name = "q8_K", + .blck_size = QK_K, + .type_size = sizeof(block_q8_K), + .is_quantized = true, + .from_float = quantize_row_q8_K, + } +}; - return _mm_cvtss_f32(res); +// For internal test use +ggml_type_traits_t ggml_internal_get_type_traits(enum ggml_type type) { + GGML_ASSERT(type < GGML_TYPE_COUNT); + return type_traits[type]; } -#endif // __AVX__ || __AVX2__ || __AVX512F__ -#endif // defined(__AVX__) || defined(__AVX2__) || defined(__AVX512F__) || defined(__SSSE3__) - -#if defined(__ARM_NEON) -#if !defined(__aarch64__) +// +// simd mappings +// -inline static int32_t vaddvq_s32(int32x4_t v) { - return vgetq_lane_s32(v, 0) + vgetq_lane_s32(v, 1) + vgetq_lane_s32(v, 2) + vgetq_lane_s32(v, 3); -} +// we define a common set of C macros which map to specific intrinsics based on the current architecture +// we then implement the fundamental computation operations below using only these macros +// adding support for new architectures requires to define the corresponding SIMD macros +// +// GGML_F32_STEP / GGML_F16_STEP +// number of elements to process in a single step +// +// GGML_F32_EPR / GGML_F16_EPR +// number of elements to fit in a single register +// -inline static float vaddvq_f32(float32x4_t v) { - return vgetq_lane_f32(v, 0) + vgetq_lane_f32(v, 1) + vgetq_lane_f32(v, 2) + vgetq_lane_f32(v, 3); -} +#if defined(__ARM_NEON) && defined(__ARM_FEATURE_FMA) -inline static float vmaxvq_f32(float32x4_t v) { - return - MAX(MAX(vgetq_lane_f32(v, 0), vgetq_lane_f32(v, 1)), - MAX(vgetq_lane_f32(v, 2), vgetq_lane_f32(v, 3))); -} +#define GGML_SIMD -inline static int32x4_t vcvtnq_s32_f32(float32x4_t v) { - int32x4_t res; +// F32 NEON - res[0] = roundf(vgetq_lane_f32(v, 0)); - res[1] = roundf(vgetq_lane_f32(v, 1)); - res[2] = roundf(vgetq_lane_f32(v, 2)); - res[3] = roundf(vgetq_lane_f32(v, 3)); +#define GGML_F32_STEP 16 +#define GGML_F32_EPR 4 - return res; +#define GGML_F32x4 float32x4_t +#define GGML_F32x4_ZERO vdupq_n_f32(0.0f) +#define GGML_F32x4_SET1(x) vdupq_n_f32(x) +#define GGML_F32x4_LOAD vld1q_f32 +#define GGML_F32x4_STORE vst1q_f32 +#define GGML_F32x4_FMA(a, b, c) vfmaq_f32(a, b, c) +#define GGML_F32x4_ADD vaddq_f32 +#define GGML_F32x4_MUL vmulq_f32 +#define GGML_F32x4_REDUCE_ONE(x) vaddvq_f32(x) +#define GGML_F32x4_REDUCE(res, x) \ +{ \ + int offset = GGML_F32_ARR >> 1; \ + for (int i = 0; i < offset; ++i) { \ + x[i] = vaddq_f32(x[i], x[offset+i]); \ + } \ + offset >>= 1; \ + for (int i = 0; i < offset; ++i) { \ + x[i] = vaddq_f32(x[i], x[offset+i]); \ + } \ + offset >>= 1; \ + for (int i = 0; i < offset; ++i) { \ + x[i] = vaddq_f32(x[i], x[offset+i]); \ + } \ + res = GGML_F32x4_REDUCE_ONE(x[0]); \ } -#endif -#endif - -#define QK4_0 32 -typedef struct { - ggml_fp16_t d; // delta - uint8_t qs[QK4_0 / 2]; // nibbles / quants -} block_q4_0; -static_assert(sizeof(block_q4_0) == sizeof(ggml_fp16_t) + QK4_0 / 2, "wrong q4_0 block size/padding"); - -#define QK4_1 32 -typedef struct { - ggml_fp16_t d; // delta - ggml_fp16_t m; // min - uint8_t qs[QK4_1 / 2]; // nibbles / quants -} block_q4_1; -static_assert(sizeof(block_q4_1) == 2 * sizeof(ggml_fp16_t) + QK4_1 / 2, "wrong q4_1 block size/padding"); - -#define QK5_0 32 -typedef struct { - ggml_fp16_t d; // delta - uint8_t qh[4]; // 5-th bit of quants - uint8_t qs[QK5_0 / 2]; // nibbles / quants -} block_q5_0; -static_assert(sizeof(block_q5_0) == sizeof(ggml_fp16_t) + sizeof(uint32_t) + QK5_0 / 2, "wrong q5_0 block size/padding"); - -#define QK5_1 32 -typedef struct { - ggml_fp16_t d; // delta - ggml_fp16_t m; // min - uint8_t qh[4]; // 5-th bit of quants - uint8_t qs[QK5_1 / 2]; // nibbles / quants -} block_q5_1; -static_assert(sizeof(block_q5_1) == 2 * sizeof(ggml_fp16_t) + sizeof(uint32_t) + QK5_1 / 2, "wrong q5_1 block size/padding"); - -#define QK8_0 32 -typedef struct { - ggml_fp16_t d; // delta - int8_t qs[QK8_0]; // quants -} block_q8_0; -static_assert(sizeof(block_q8_0) == sizeof(ggml_fp16_t) + QK8_0, "wrong q8_0 block size/padding"); - -#define QK8_1 32 -typedef struct { - float d; // delta - float s; // d * sum(qs[i]) - int8_t qs[QK8_1]; // quants -} block_q8_1; -static_assert(sizeof(block_q8_1) == 2*sizeof(float) + QK8_1, "wrong q8_1 block size/padding"); - -// reference implementation for deterministic creation of model files -static void quantize_row_q4_0_reference(const float * restrict x, block_q4_0 * restrict y, int k) { - static const int qk = QK4_0; - - assert(k % qk == 0); - - const int nb = k / qk; - - for (int i = 0; i < nb; i++) { - float amax = 0.0f; // absolute max - float max = 0.0f; - - for (int j = 0; j < qk; j++) { - const float v = x[i*qk + j]; - if (amax < fabsf(v)) { - amax = fabsf(v); - max = v; - } - } +#define GGML_F32_VEC GGML_F32x4 +#define GGML_F32_VEC_ZERO GGML_F32x4_ZERO +#define GGML_F32_VEC_SET1 GGML_F32x4_SET1 +#define GGML_F32_VEC_LOAD GGML_F32x4_LOAD +#define GGML_F32_VEC_STORE GGML_F32x4_STORE +#define GGML_F32_VEC_FMA GGML_F32x4_FMA +#define GGML_F32_VEC_ADD GGML_F32x4_ADD +#define GGML_F32_VEC_MUL GGML_F32x4_MUL +#define GGML_F32_VEC_REDUCE GGML_F32x4_REDUCE - const float d = max / -8; - const float id = d ? 1.0f/d : 0.0f; +// F16 NEON - y[i].d = GGML_FP32_TO_FP16(d); +#if defined(__ARM_FEATURE_FP16_VECTOR_ARITHMETIC) + #define GGML_F16_STEP 32 + #define GGML_F16_EPR 8 - for (int j = 0; j < qk/2; ++j) { - const float x0 = x[i*qk + 0 + j]*id; - const float x1 = x[i*qk + qk/2 + j]*id; - - const uint8_t xi0 = MIN(15, (int8_t)(x0 + 8.5f)); - const uint8_t xi1 = MIN(15, (int8_t)(x1 + 8.5f)); - - y[i].qs[j] = xi0; - y[i].qs[j] |= xi1 << 4; - } - } -} - -static void quantize_row_q4_0(const float * restrict x, void * restrict y, int k) { - quantize_row_q4_0_reference(x, y, k); -} - -static void quantize_row_q4_1_reference(const float * restrict x, block_q4_1 * restrict y, int k) { - const int qk = QK4_1; - - assert(k % qk == 0); - - const int nb = k / qk; - - for (int i = 0; i < nb; i++) { - float min = FLT_MAX; - float max = -FLT_MAX; - - for (int j = 0; j < qk; j++) { - const float v = x[i*qk + j]; - - if (v < min) min = v; - if (v > max) max = v; - } - - const float d = (max - min) / ((1 << 4) - 1); - const float id = d ? 1.0f/d : 0.0f; - - y[i].d = GGML_FP32_TO_FP16(d); - y[i].m = GGML_FP32_TO_FP16(min); - - for (int j = 0; j < qk/2; ++j) { - const float x0 = (x[i*qk + 0 + j] - min)*id; - const float x1 = (x[i*qk + qk/2 + j] - min)*id; - - const uint8_t xi0 = MIN(15, (int8_t)(x0 + 0.5f)); - const uint8_t xi1 = MIN(15, (int8_t)(x1 + 0.5f)); - - y[i].qs[j] = xi0; - y[i].qs[j] |= xi1 << 4; - } - } -} - -static void quantize_row_q4_1(const float * restrict x, void * restrict y, int k) { - quantize_row_q4_1_reference(x, y, k); -} - -static void quantize_row_q5_0_reference(const float * restrict x, block_q5_0 * restrict y, int k) { - static const int qk = QK5_0; - - assert(k % qk == 0); - - const int nb = k / qk; - - for (int i = 0; i < nb; i++) { - float amax = 0.0f; // absolute max - float max = 0.0f; - - for (int j = 0; j < qk; j++) { - const float v = x[i*qk + j]; - if (amax < fabsf(v)) { - amax = fabsf(v); - max = v; - } - } - - const float d = max / -16; - const float id = d ? 1.0f/d : 0.0f; - - y[i].d = GGML_FP32_TO_FP16(d); - - uint32_t qh = 0; - - for (int j = 0; j < qk/2; ++j) { - const float x0 = x[i*qk + 0 + j]*id; - const float x1 = x[i*qk + qk/2 + j]*id; - - const uint8_t xi0 = MIN(31, (int8_t)(x0 + 16.5f)); - const uint8_t xi1 = MIN(31, (int8_t)(x1 + 16.5f)); - - y[i].qs[j] = (xi0 & 0x0F) | ((xi1 & 0x0F) << 4); - - // get the 5-th bit and store it in qh at the right position - qh |= ((xi0 & 0x10u) >> 4) << (j + 0); - qh |= ((xi1 & 0x10u) >> 4) << (j + qk/2); - } - - memcpy(&y[i].qh, &qh, sizeof(qh)); - } -} - -static void quantize_row_q5_0(const float * restrict x, void * restrict y, int k) { - quantize_row_q5_0_reference(x, y, k); -} - -static void quantize_row_q5_1_reference(const float * restrict x, block_q5_1 * restrict y, int k) { - const int qk = QK5_1; - - assert(k % qk == 0); - - const int nb = k / qk; - - for (int i = 0; i < nb; i++) { - float min = FLT_MAX; - float max = -FLT_MAX; - - for (int j = 0; j < qk; j++) { - const float v = x[i*qk + j]; - - if (v < min) min = v; - if (v > max) max = v; - } - - const float d = (max - min) / ((1 << 5) - 1); - const float id = d ? 1.0f/d : 0.0f; - - y[i].d = GGML_FP32_TO_FP16(d); - y[i].m = GGML_FP32_TO_FP16(min); - - uint32_t qh = 0; - - for (int j = 0; j < qk/2; ++j) { - const float x0 = (x[i*qk + 0 + j] - min)*id; - const float x1 = (x[i*qk + qk/2 + j] - min)*id; - - const uint8_t xi0 = (uint8_t)(x0 + 0.5f); - const uint8_t xi1 = (uint8_t)(x1 + 0.5f); - - y[i].qs[j] = (xi0 & 0x0F) | ((xi1 & 0x0F) << 4); - - // get the 5-th bit and store it in qh at the right position - qh |= ((xi0 & 0x10u) >> 4) << (j + 0); - qh |= ((xi1 & 0x10u) >> 4) << (j + qk/2); - } - - memcpy(&y[i].qh, &qh, sizeof(y[i].qh)); - } -} - -static void quantize_row_q5_1(const float * restrict x, void * restrict y, int k) { - quantize_row_q5_1_reference(x, y, k); -} - -// reference implementation for deterministic creation of model files -static void quantize_row_q8_0_reference(const float * restrict x, block_q8_0 * restrict y, int k) { - assert(k % QK8_0 == 0); - const int nb = k / QK8_0; - - for (int i = 0; i < nb; i++) { - float amax = 0.0f; // absolute max - - for (int j = 0; j < QK8_0; j++) { - const float v = x[i*QK8_0 + j]; - amax = MAX(amax, fabsf(v)); - } - - const float d = amax / ((1 << 7) - 1); - const float id = d ? 1.0f/d : 0.0f; - - y[i].d = GGML_FP32_TO_FP16(d); - - for (int j = 0; j < QK8_0; ++j) { - const float x0 = x[i*QK8_0 + j]*id; - - y[i].qs[j] = roundf(x0); - } - } -} - -static void quantize_row_q8_0(const float * restrict x, void * restrict vy, int k) { - assert(QK8_0 == 32); - assert(k % QK8_0 == 0); - const int nb = k / QK8_0; - - block_q8_0 * restrict y = vy; - -#if defined(__ARM_NEON) - for (int i = 0; i < nb; i++) { - float32x4_t srcv [8]; - float32x4_t asrcv[8]; - float32x4_t amaxv[8]; - - for (int j = 0; j < 8; j++) srcv[j] = vld1q_f32(x + i*32 + 4*j); - for (int j = 0; j < 8; j++) asrcv[j] = vabsq_f32(srcv[j]); - - for (int j = 0; j < 4; j++) amaxv[2*j] = vmaxq_f32(asrcv[2*j], asrcv[2*j+1]); - for (int j = 0; j < 2; j++) amaxv[4*j] = vmaxq_f32(amaxv[4*j], amaxv[4*j+2]); - for (int j = 0; j < 1; j++) amaxv[8*j] = vmaxq_f32(amaxv[8*j], amaxv[8*j+4]); - - const float amax = vmaxvq_f32(amaxv[0]); - - const float d = amax / ((1 << 7) - 1); - const float id = d ? 1.0f/d : 0.0f; - - y[i].d = GGML_FP32_TO_FP16(d); - - for (int j = 0; j < 8; j++) { - const float32x4_t v = vmulq_n_f32(srcv[j], id); - const int32x4_t vi = vcvtnq_s32_f32(v); - - y[i].qs[4*j + 0] = vgetq_lane_s32(vi, 0); - y[i].qs[4*j + 1] = vgetq_lane_s32(vi, 1); - y[i].qs[4*j + 2] = vgetq_lane_s32(vi, 2); - y[i].qs[4*j + 3] = vgetq_lane_s32(vi, 3); - } - } -#elif defined(__wasm_simd128__) - for (int i = 0; i < nb; i++) { - v128_t srcv [8]; - v128_t asrcv[8]; - v128_t amaxv[8]; - - for (int j = 0; j < 8; j++) srcv[j] = wasm_v128_load(x + i*32 + 4*j); - for (int j = 0; j < 8; j++) asrcv[j] = wasm_f32x4_abs(srcv[j]); - - for (int j = 0; j < 4; j++) amaxv[2*j] = wasm_f32x4_max(asrcv[2*j], asrcv[2*j+1]); - for (int j = 0; j < 2; j++) amaxv[4*j] = wasm_f32x4_max(amaxv[4*j], amaxv[4*j+2]); - for (int j = 0; j < 1; j++) amaxv[8*j] = wasm_f32x4_max(amaxv[8*j], amaxv[8*j+4]); - - const float amax = MAX(MAX(wasm_f32x4_extract_lane(amaxv[0], 0), - wasm_f32x4_extract_lane(amaxv[0], 1)), - MAX(wasm_f32x4_extract_lane(amaxv[0], 2), - wasm_f32x4_extract_lane(amaxv[0], 3))); - - const float d = amax / ((1 << 7) - 1); - const float id = d ? 1.0f/d : 0.0f; - - y[i].d = GGML_FP32_TO_FP16(d); - - for (int j = 0; j < 8; j++) { - const v128_t v = wasm_f32x4_mul(srcv[j], wasm_f32x4_splat(id)); - const v128_t vi = wasm_i32x4_trunc_sat_f32x4(v); - - y[i].qs[4*j + 0] = wasm_i32x4_extract_lane(vi, 0); - y[i].qs[4*j + 1] = wasm_i32x4_extract_lane(vi, 1); - y[i].qs[4*j + 2] = wasm_i32x4_extract_lane(vi, 2); - y[i].qs[4*j + 3] = wasm_i32x4_extract_lane(vi, 3); - } - } -#elif defined(__AVX2__) || defined(__AVX__) - for (int i = 0; i < nb; i++) { - // Load elements into 4 AVX vectors - __m256 v0 = _mm256_loadu_ps( x ); - __m256 v1 = _mm256_loadu_ps( x + 8 ); - __m256 v2 = _mm256_loadu_ps( x + 16 ); - __m256 v3 = _mm256_loadu_ps( x + 24 ); - x += 32; - - // Compute max(abs(e)) for the block - const __m256 signBit = _mm256_set1_ps( -0.0f ); - __m256 maxAbs = _mm256_andnot_ps( signBit, v0 ); - maxAbs = _mm256_max_ps( maxAbs, _mm256_andnot_ps( signBit, v1 ) ); - maxAbs = _mm256_max_ps( maxAbs, _mm256_andnot_ps( signBit, v2 ) ); - maxAbs = _mm256_max_ps( maxAbs, _mm256_andnot_ps( signBit, v3 ) ); - - __m128 max4 = _mm_max_ps( _mm256_extractf128_ps( maxAbs, 1 ), _mm256_castps256_ps128( maxAbs ) ); - max4 = _mm_max_ps( max4, _mm_movehl_ps( max4, max4 ) ); - max4 = _mm_max_ss( max4, _mm_movehdup_ps( max4 ) ); - const float maxScalar = _mm_cvtss_f32( max4 ); - - // Quantize these floats - const float d = maxScalar / 127.f; - y[i].d = GGML_FP32_TO_FP16(d); - const float id = ( maxScalar != 0.0f ) ? 127.f / maxScalar : 0.0f; - const __m256 mul = _mm256_set1_ps( id ); - - // Apply the multiplier - v0 = _mm256_mul_ps( v0, mul ); - v1 = _mm256_mul_ps( v1, mul ); - v2 = _mm256_mul_ps( v2, mul ); - v3 = _mm256_mul_ps( v3, mul ); - - // Round to nearest integer - v0 = _mm256_round_ps( v0, _MM_ROUND_NEAREST ); - v1 = _mm256_round_ps( v1, _MM_ROUND_NEAREST ); - v2 = _mm256_round_ps( v2, _MM_ROUND_NEAREST ); - v3 = _mm256_round_ps( v3, _MM_ROUND_NEAREST ); - - // Convert floats to integers - __m256i i0 = _mm256_cvtps_epi32( v0 ); - __m256i i1 = _mm256_cvtps_epi32( v1 ); - __m256i i2 = _mm256_cvtps_epi32( v2 ); - __m256i i3 = _mm256_cvtps_epi32( v3 ); - -#if defined(__AVX2__) - // Convert int32 to int16 - i0 = _mm256_packs_epi32( i0, i1 ); // 0, 1, 2, 3, 8, 9, 10, 11, 4, 5, 6, 7, 12, 13, 14, 15 - i2 = _mm256_packs_epi32( i2, i3 ); // 16, 17, 18, 19, 24, 25, 26, 27, 20, 21, 22, 23, 28, 29, 30, 31 - // Convert int16 to int8 - i0 = _mm256_packs_epi16( i0, i2 ); // 0, 1, 2, 3, 8, 9, 10, 11, 16, 17, 18, 19, 24, 25, 26, 27, 4, 5, 6, 7, 12, 13, 14, 15, 20, 21, 22, 23, 28, 29, 30, 31 - - // We got our precious signed bytes, but the order is now wrong - // These AVX2 pack instructions process 16-byte pieces independently - // The following instruction is fixing the order - const __m256i perm = _mm256_setr_epi32( 0, 4, 1, 5, 2, 6, 3, 7 ); - i0 = _mm256_permutevar8x32_epi32( i0, perm ); - - _mm256_storeu_si256((__m256i *)y[i].qs, i0); -#else - // Since we don't have in AVX some necessary functions, - // we split the registers in half and call AVX2 analogs from SSE - __m128i ni0 = _mm256_castsi256_si128( i0 ); - __m128i ni1 = _mm256_extractf128_si256( i0, 1); - __m128i ni2 = _mm256_castsi256_si128( i1 ); - __m128i ni3 = _mm256_extractf128_si256( i1, 1); - __m128i ni4 = _mm256_castsi256_si128( i2 ); - __m128i ni5 = _mm256_extractf128_si256( i2, 1); - __m128i ni6 = _mm256_castsi256_si128( i3 ); - __m128i ni7 = _mm256_extractf128_si256( i3, 1); - - // Convert int32 to int16 - ni0 = _mm_packs_epi32( ni0, ni1 ); - ni2 = _mm_packs_epi32( ni2, ni3 ); - ni4 = _mm_packs_epi32( ni4, ni5 ); - ni6 = _mm_packs_epi32( ni6, ni7 ); - // Convert int16 to int8 - ni0 = _mm_packs_epi16( ni0, ni2 ); - ni4 = _mm_packs_epi16( ni4, ni6 ); - - _mm_storeu_si128((__m128i *)(y[i].qs + 0), ni0); - _mm_storeu_si128((__m128i *)(y[i].qs + 16), ni4); -#endif - } -#elif defined(__riscv_v_intrinsic) - - size_t vl = __riscv_vsetvl_e32m4(QK8_0); - - for (int i = 0; i < nb; i++) { - // load elements - vfloat32m4_t v_x = __riscv_vle32_v_f32m4(x+i*QK8_0, vl); - - vfloat32m4_t vfabs = __riscv_vfabs_v_f32m4(v_x, vl); - vfloat32m1_t tmp = __riscv_vfmv_v_f_f32m1(0.0f, vl); - vfloat32m1_t vmax = __riscv_vfredmax_vs_f32m4_f32m1(vfabs, tmp, vl); - float amax = __riscv_vfmv_f_s_f32m1_f32(vmax); - - const float d = amax / ((1 << 7) - 1); - const float id = d ? 1.0f/d : 0.0f; - - y[i].d = GGML_FP32_TO_FP16(d); - - vfloat32m4_t x0 = __riscv_vfmul_vf_f32m4(v_x, id, vl); - - // convert to integer - vint16m2_t vi = __riscv_vfncvt_x_f_w_i16m2(x0, vl); - vint8m1_t vs = __riscv_vncvt_x_x_w_i8m1(vi, vl); - - // store result - __riscv_vse8_v_i8m1(y[i].qs , vs, vl); - } -#else - // scalar - quantize_row_q8_0_reference(x, y, k); -#endif -} - -// reference implementation for deterministic creation of model files -static void quantize_row_q8_1_reference(const float * restrict x, block_q8_1 * restrict y, int k) { - assert(QK8_1 == 32); - assert(k % QK8_1 == 0); - const int nb = k / QK8_1; - - for (int i = 0; i < nb; i++) { - float amax = 0.0f; // absolute max - - for (int j = 0; j < QK8_1; j++) { - const float v = x[i*QK8_1 + j]; - amax = MAX(amax, fabsf(v)); - } - - const float d = amax / ((1 << 7) - 1); - const float id = d ? 1.0f/d : 0.0f; - - y[i].d = d; - - int sum = 0; - - for (int j = 0; j < QK8_1/2; ++j) { - const float v0 = x[i*QK8_1 + j]*id; - const float v1 = x[i*QK8_1 + QK8_1/2 + j]*id; - - y[i].qs[ j] = roundf(v0); - y[i].qs[QK8_1/2 + j] = roundf(v1); - - sum += y[i].qs[ j]; - sum += y[i].qs[QK8_1/2 + j]; - } - - y[i].s = sum*d; - } -} - -static void quantize_row_q8_1(const float * restrict x, void * restrict vy, int k) { - assert(k % QK8_1 == 0); - const int nb = k / QK8_1; - - block_q8_1 * restrict y = vy; - -#if defined(__ARM_NEON) - for (int i = 0; i < nb; i++) { - float32x4_t srcv [8]; - float32x4_t asrcv[8]; - float32x4_t amaxv[8]; - - for (int j = 0; j < 8; j++) srcv[j] = vld1q_f32(x + i*32 + 4*j); - for (int j = 0; j < 8; j++) asrcv[j] = vabsq_f32(srcv[j]); - - for (int j = 0; j < 4; j++) amaxv[2*j] = vmaxq_f32(asrcv[2*j], asrcv[2*j+1]); - for (int j = 0; j < 2; j++) amaxv[4*j] = vmaxq_f32(amaxv[4*j], amaxv[4*j+2]); - for (int j = 0; j < 1; j++) amaxv[8*j] = vmaxq_f32(amaxv[8*j], amaxv[8*j+4]); - - const float amax = vmaxvq_f32(amaxv[0]); - - const float d = amax / ((1 << 7) - 1); - const float id = d ? 1.0f/d : 0.0f; - - y[i].d = d; - - int32x4_t accv = vdupq_n_s32(0); - - for (int j = 0; j < 8; j++) { - const float32x4_t v = vmulq_n_f32(srcv[j], id); - const int32x4_t vi = vcvtnq_s32_f32(v); - - y[i].qs[4*j + 0] = vgetq_lane_s32(vi, 0); - y[i].qs[4*j + 1] = vgetq_lane_s32(vi, 1); - y[i].qs[4*j + 2] = vgetq_lane_s32(vi, 2); - y[i].qs[4*j + 3] = vgetq_lane_s32(vi, 3); - - accv = vaddq_s32(accv, vi); - } - - y[i].s = d * vaddvq_s32(accv); - } -#elif defined(__wasm_simd128__) - for (int i = 0; i < nb; i++) { - v128_t srcv [8]; - v128_t asrcv[8]; - v128_t amaxv[8]; - - for (int j = 0; j < 8; j++) srcv[j] = wasm_v128_load(x + i*32 + 4*j); - for (int j = 0; j < 8; j++) asrcv[j] = wasm_f32x4_abs(srcv[j]); - - for (int j = 0; j < 4; j++) amaxv[2*j] = wasm_f32x4_max(asrcv[2*j], asrcv[2*j+1]); - for (int j = 0; j < 2; j++) amaxv[4*j] = wasm_f32x4_max(amaxv[4*j], amaxv[4*j+2]); - for (int j = 0; j < 1; j++) amaxv[8*j] = wasm_f32x4_max(amaxv[8*j], amaxv[8*j+4]); - - const float amax = MAX(MAX(wasm_f32x4_extract_lane(amaxv[0], 0), - wasm_f32x4_extract_lane(amaxv[0], 1)), - MAX(wasm_f32x4_extract_lane(amaxv[0], 2), - wasm_f32x4_extract_lane(amaxv[0], 3))); - - const float d = amax / ((1 << 7) - 1); - const float id = d ? 1.0f/d : 0.0f; - - y[i].d = d; - - v128_t accv = wasm_i32x4_splat(0); - - for (int j = 0; j < 8; j++) { - const v128_t v = wasm_f32x4_mul(srcv[j], wasm_f32x4_splat(id)); - const v128_t vi = wasm_i32x4_trunc_sat_f32x4(v); - - y[i].qs[4*j + 0] = wasm_i32x4_extract_lane(vi, 0); - y[i].qs[4*j + 1] = wasm_i32x4_extract_lane(vi, 1); - y[i].qs[4*j + 2] = wasm_i32x4_extract_lane(vi, 2); - y[i].qs[4*j + 3] = wasm_i32x4_extract_lane(vi, 3); - - accv = wasm_i32x4_add(accv, vi); - } - - y[i].s = d * (wasm_i32x4_extract_lane(accv, 0) + - wasm_i32x4_extract_lane(accv, 1) + - wasm_i32x4_extract_lane(accv, 2) + - wasm_i32x4_extract_lane(accv, 3)); - } -#elif defined(__AVX2__) || defined(__AVX__) - for (int i = 0; i < nb; i++) { - // Load elements into 4 AVX vectors - __m256 v0 = _mm256_loadu_ps( x ); - __m256 v1 = _mm256_loadu_ps( x + 8 ); - __m256 v2 = _mm256_loadu_ps( x + 16 ); - __m256 v3 = _mm256_loadu_ps( x + 24 ); - x += 32; - - // Compute max(abs(e)) for the block - const __m256 signBit = _mm256_set1_ps( -0.0f ); - __m256 maxAbs = _mm256_andnot_ps( signBit, v0 ); - maxAbs = _mm256_max_ps( maxAbs, _mm256_andnot_ps( signBit, v1 ) ); - maxAbs = _mm256_max_ps( maxAbs, _mm256_andnot_ps( signBit, v2 ) ); - maxAbs = _mm256_max_ps( maxAbs, _mm256_andnot_ps( signBit, v3 ) ); - - __m128 max4 = _mm_max_ps( _mm256_extractf128_ps( maxAbs, 1 ), _mm256_castps256_ps128( maxAbs ) ); - max4 = _mm_max_ps( max4, _mm_movehl_ps( max4, max4 ) ); - max4 = _mm_max_ss( max4, _mm_movehdup_ps( max4 ) ); - const float maxScalar = _mm_cvtss_f32( max4 ); - - // Quantize these floats - const float d = maxScalar / 127.f; - y[i].d = d; - const float id = ( maxScalar != 0.0f ) ? 127.f / maxScalar : 0.0f; - const __m256 mul = _mm256_set1_ps( id ); - - // Apply the multiplier - v0 = _mm256_mul_ps( v0, mul ); - v1 = _mm256_mul_ps( v1, mul ); - v2 = _mm256_mul_ps( v2, mul ); - v3 = _mm256_mul_ps( v3, mul ); - - // Round to nearest integer - v0 = _mm256_round_ps( v0, _MM_ROUND_NEAREST ); - v1 = _mm256_round_ps( v1, _MM_ROUND_NEAREST ); - v2 = _mm256_round_ps( v2, _MM_ROUND_NEAREST ); - v3 = _mm256_round_ps( v3, _MM_ROUND_NEAREST ); - - // Convert floats to integers - __m256i i0 = _mm256_cvtps_epi32( v0 ); - __m256i i1 = _mm256_cvtps_epi32( v1 ); - __m256i i2 = _mm256_cvtps_epi32( v2 ); - __m256i i3 = _mm256_cvtps_epi32( v3 ); - -#if defined(__AVX2__) - // Compute the sum of the quants and set y[i].s - y[i].s = d * hsum_i32_8(_mm256_add_epi32(_mm256_add_epi32(i0, i1), _mm256_add_epi32(i2, i3))); - - // Convert int32 to int16 - i0 = _mm256_packs_epi32( i0, i1 ); // 0, 1, 2, 3, 8, 9, 10, 11, 4, 5, 6, 7, 12, 13, 14, 15 - i2 = _mm256_packs_epi32( i2, i3 ); // 16, 17, 18, 19, 24, 25, 26, 27, 20, 21, 22, 23, 28, 29, 30, 31 - // Convert int16 to int8 - i0 = _mm256_packs_epi16( i0, i2 ); // 0, 1, 2, 3, 8, 9, 10, 11, 16, 17, 18, 19, 24, 25, 26, 27, 4, 5, 6, 7, 12, 13, 14, 15, 20, 21, 22, 23, 28, 29, 30, 31 - - // We got our precious signed bytes, but the order is now wrong - // These AVX2 pack instructions process 16-byte pieces independently - // The following instruction is fixing the order - const __m256i perm = _mm256_setr_epi32( 0, 4, 1, 5, 2, 6, 3, 7 ); - i0 = _mm256_permutevar8x32_epi32( i0, perm ); - - _mm256_storeu_si256((__m256i *)y[i].qs, i0); -#else - // Since we don't have in AVX some necessary functions, - // we split the registers in half and call AVX2 analogs from SSE - __m128i ni0 = _mm256_castsi256_si128( i0 ); - __m128i ni1 = _mm256_extractf128_si256( i0, 1); - __m128i ni2 = _mm256_castsi256_si128( i1 ); - __m128i ni3 = _mm256_extractf128_si256( i1, 1); - __m128i ni4 = _mm256_castsi256_si128( i2 ); - __m128i ni5 = _mm256_extractf128_si256( i2, 1); - __m128i ni6 = _mm256_castsi256_si128( i3 ); - __m128i ni7 = _mm256_extractf128_si256( i3, 1); - - // Compute the sum of the quants and set y[i].s - const __m128i s0 = _mm_add_epi32(_mm_add_epi32(ni0, ni1), _mm_add_epi32(ni2, ni3)); - const __m128i s1 = _mm_add_epi32(_mm_add_epi32(ni4, ni5), _mm_add_epi32(ni6, ni7)); - y[i].s = d * hsum_i32_4(_mm_add_epi32(s0, s1)); - - // Convert int32 to int16 - ni0 = _mm_packs_epi32( ni0, ni1 ); - ni2 = _mm_packs_epi32( ni2, ni3 ); - ni4 = _mm_packs_epi32( ni4, ni5 ); - ni6 = _mm_packs_epi32( ni6, ni7 ); - // Convert int16 to int8 - ni0 = _mm_packs_epi16( ni0, ni2 ); - ni4 = _mm_packs_epi16( ni4, ni6 ); - - _mm_storeu_si128((__m128i *)(y[i].qs + 0), ni0); - _mm_storeu_si128((__m128i *)(y[i].qs + 16), ni4); -#endif - } -#elif defined(__riscv_v_intrinsic) - - size_t vl = __riscv_vsetvl_e32m4(QK8_1); - - for (int i = 0; i < nb; i++) { - // load elements - vfloat32m4_t v_x = __riscv_vle32_v_f32m4(x+i*QK8_1, vl); - - vfloat32m4_t vfabs = __riscv_vfabs_v_f32m4(v_x, vl); - vfloat32m1_t tmp = __riscv_vfmv_v_f_f32m1(0.0, vl); - vfloat32m1_t vmax = __riscv_vfredmax_vs_f32m4_f32m1(vfabs, tmp, vl); - float amax = __riscv_vfmv_f_s_f32m1_f32(vmax); - - const float d = amax / ((1 << 7) - 1); - const float id = d ? 1.0f/d : 0.0f; - - y[i].d = d; - - vfloat32m4_t x0 = __riscv_vfmul_vf_f32m4(v_x, id, vl); - - // convert to integer - vint16m2_t vi = __riscv_vfncvt_x_f_w_i16m2(x0, vl); - vint8m1_t vs = __riscv_vncvt_x_x_w_i8m1(vi, vl); - - // store result - __riscv_vse8_v_i8m1(y[i].qs , vs, vl); - - // compute sum for y[i].s - vint16m1_t tmp2 = __riscv_vmv_v_x_i16m1(0, vl); - vint16m1_t vwrs = __riscv_vwredsum_vs_i8m1_i16m1(vs, tmp2, vl); - - // set y[i].s - int sum = __riscv_vmv_x_s_i16m1_i16(vwrs); - y[i].s = sum*d; - } -#else - // scalar - quantize_row_q8_1_reference(x, y, k); -#endif -} - -static void dequantize_row_q4_0(const block_q4_0 * restrict x, float * restrict y, int k) { - static const int qk = QK4_0; - - assert(k % qk == 0); - - const int nb = k / qk; - - for (int i = 0; i < nb; i++) { - const float d = GGML_FP16_TO_FP32(x[i].d); - - for (int j = 0; j < qk/2; ++j) { - const int x0 = (x[i].qs[j] & 0x0F) - 8; - const int x1 = (x[i].qs[j] >> 4) - 8; - - y[i*qk + j + 0 ] = x0*d; - y[i*qk + j + qk/2] = x1*d; - } - } -} - -static void dequantize_row_q4_1(const block_q4_1 * restrict x, float * restrict y, int k) { - static const int qk = QK4_1; - - assert(k % qk == 0); - - const int nb = k / qk; - - for (int i = 0; i < nb; i++) { - const float d = GGML_FP16_TO_FP32(x[i].d); - const float m = GGML_FP16_TO_FP32(x[i].m); - - for (int j = 0; j < qk/2; ++j) { - const int x0 = (x[i].qs[j] & 0x0F); - const int x1 = (x[i].qs[j] >> 4); - - y[i*qk + j + 0 ] = x0*d + m; - y[i*qk + j + qk/2] = x1*d + m; - } - } -} - -static void dequantize_row_q5_0(const block_q5_0 * restrict x, float * restrict y, int k) { - static const int qk = QK5_0; - - assert(k % qk == 0); - - const int nb = k / qk; - - for (int i = 0; i < nb; i++) { - const float d = GGML_FP16_TO_FP32(x[i].d); - - uint32_t qh; - memcpy(&qh, x[i].qh, sizeof(qh)); - - for (int j = 0; j < qk/2; ++j) { - const uint8_t xh_0 = ((qh >> (j + 0)) << 4) & 0x10; - const uint8_t xh_1 = ((qh >> (j + 12)) ) & 0x10; - - const int32_t x0 = ((x[i].qs[j] & 0x0F) | xh_0) - 16; - const int32_t x1 = ((x[i].qs[j] >> 4) | xh_1) - 16; - - y[i*qk + j + 0 ] = x0*d; - y[i*qk + j + qk/2] = x1*d; - } - } -} - -static void dequantize_row_q5_1(const block_q5_1 * restrict x, float * restrict y, int k) { - static const int qk = QK5_1; - - assert(k % qk == 0); - - const int nb = k / qk; - - for (int i = 0; i < nb; i++) { - const float d = GGML_FP16_TO_FP32(x[i].d); - const float m = GGML_FP16_TO_FP32(x[i].m); - - uint32_t qh; - memcpy(&qh, x[i].qh, sizeof(qh)); - - for (int j = 0; j < qk/2; ++j) { - const uint8_t xh_0 = ((qh >> (j + 0)) << 4) & 0x10; - const uint8_t xh_1 = ((qh >> (j + 12)) ) & 0x10; - - const int x0 = (x[i].qs[j] & 0x0F) | xh_0; - const int x1 = (x[i].qs[j] >> 4) | xh_1; - - y[i*qk + j + 0 ] = x0*d + m; - y[i*qk + j + qk/2] = x1*d + m; - } - } -} - -static void dequantize_row_q8_0(const void * restrict vx, float * restrict y, int k) { - static const int qk = QK8_0; - - assert(k % qk == 0); - - const int nb = k / qk; - - const block_q8_0 * restrict x = vx; - - for (int i = 0; i < nb; i++) { - const float d = GGML_FP16_TO_FP32(x[i].d); - - for (int j = 0; j < qk; ++j) { - y[i*qk + j] = x[i].qs[j]*d; - } - } -} - -static void ggml_vec_dot_f32(const int n, float * restrict s, const float * restrict x, const float * restrict y); -static void ggml_vec_dot_f16(const int n, float * restrict s, ggml_fp16_t * restrict x, ggml_fp16_t * restrict y); -static void ggml_vec_dot_q4_0_q8_0(const int n, float * restrict s, const void * restrict vx, const void * restrict vy); -static void ggml_vec_dot_q4_1_q8_1(const int n, float * restrict s, const void * restrict vx, const void * restrict vy); -static void ggml_vec_dot_q5_0_q8_0(const int n, float * restrict s, const void * restrict vx, const void * restrict vy); -static void ggml_vec_dot_q5_1_q8_1(const int n, float * restrict s, const void * restrict vx, const void * restrict vy); -static void ggml_vec_dot_q8_0_q8_0(const int n, float * restrict s, const void * restrict vx, const void * restrict vy); - -static const ggml_type_traits_t type_traits[GGML_TYPE_COUNT] = { - [GGML_TYPE_I8] = { - .type_name = "i8", - .blck_size = 1, - .type_size = sizeof(int8_t), - .is_quantized = false, - }, - [GGML_TYPE_I16] = { - .type_name = "i16", - .blck_size = 1, - .type_size = sizeof(int16_t), - .is_quantized = false, - }, - [GGML_TYPE_I32] = { - .type_name = "i32", - .blck_size = 1, - .type_size = sizeof(int32_t), - .is_quantized = false, - }, - [GGML_TYPE_F32] = { - .type_name = "f32", - .blck_size = 1, - .type_size = sizeof(float), - .is_quantized = false, - .vec_dot = (ggml_vec_dot_t) ggml_vec_dot_f32, - .vec_dot_type = GGML_TYPE_F32, - }, - [GGML_TYPE_F16] = { - .type_name = "f16", - .blck_size = 1, - .type_size = sizeof(ggml_fp16_t), - .is_quantized = false, - .to_float = (ggml_to_float_t) ggml_fp16_to_fp32_row, - .from_float = (ggml_from_float_t) ggml_fp32_to_fp16_row, - .from_float_reference = (ggml_from_float_t) ggml_fp32_to_fp16_row, - .vec_dot = (ggml_vec_dot_t) ggml_vec_dot_f16, - .vec_dot_type = GGML_TYPE_F16, - }, - [GGML_TYPE_Q4_0] = { - .type_name = "q4_0", - .blck_size = QK4_0, - .type_size = sizeof(block_q4_0), - .is_quantized = true, - .to_float = (ggml_to_float_t) dequantize_row_q4_0, - .from_float = quantize_row_q4_0, - .from_float_reference = (ggml_from_float_t) quantize_row_q4_0_reference, - .vec_dot = ggml_vec_dot_q4_0_q8_0, - .vec_dot_type = GGML_TYPE_Q8_0, - }, - [GGML_TYPE_Q4_1] = { - .type_name = "q4_1", - .blck_size = QK4_1, - .type_size = sizeof(block_q4_1), - .is_quantized = true, - .to_float = (ggml_to_float_t) dequantize_row_q4_1, - .from_float = quantize_row_q4_1, - .from_float_reference = (ggml_from_float_t) quantize_row_q4_1_reference, - .vec_dot = ggml_vec_dot_q4_1_q8_1, - .vec_dot_type = GGML_TYPE_Q8_1, - }, - [GGML_TYPE_Q5_0] = { - .type_name = "q5_0", - .blck_size = QK5_0, - .type_size = sizeof(block_q5_0), - .is_quantized = true, - .to_float = (ggml_to_float_t) dequantize_row_q5_0, - .from_float = quantize_row_q5_0, - .from_float_reference = (ggml_from_float_t) quantize_row_q5_0_reference, - .vec_dot = ggml_vec_dot_q5_0_q8_0, - .vec_dot_type = GGML_TYPE_Q8_0, - }, - [GGML_TYPE_Q5_1] = { - .type_name = "q5_1", - .blck_size = QK5_1, - .type_size = sizeof(block_q5_1), - .is_quantized = true, - .to_float = (ggml_to_float_t) dequantize_row_q5_1, - .from_float = quantize_row_q5_1, - .from_float_reference = (ggml_from_float_t) quantize_row_q5_1_reference, - .vec_dot = ggml_vec_dot_q5_1_q8_1, - .vec_dot_type = GGML_TYPE_Q8_1, - }, - [GGML_TYPE_Q8_0] = { - .type_name = "q8_0", - .blck_size = QK8_0, - .type_size = sizeof(block_q8_0), - .is_quantized = true, - .to_float = dequantize_row_q8_0, - .from_float = quantize_row_q8_0, - .from_float_reference = (ggml_from_float_t) quantize_row_q8_0_reference, - .vec_dot = ggml_vec_dot_q8_0_q8_0, - .vec_dot_type = GGML_TYPE_Q8_0, - }, - [GGML_TYPE_Q8_1] = { - .type_name = "q8_1", - .blck_size = QK8_1, - .type_size = sizeof(block_q8_1), - .is_quantized = true, - .from_float = quantize_row_q8_1, - .from_float_reference = (ggml_from_float_t) quantize_row_q8_1_reference, - .vec_dot_type = GGML_TYPE_Q8_1, - }, -#ifdef GGML_USE_K_QUANTS - [GGML_TYPE_Q2_K] = { - .type_name = "q2_K", - .blck_size = QK_K, - .type_size = sizeof(block_q2_K), - .is_quantized = true, - .to_float = (ggml_to_float_t) dequantize_row_q2_K, - .from_float = quantize_row_q2_K, - .from_float_reference = (ggml_from_float_t) quantize_row_q2_K_reference, - .vec_dot = ggml_vec_dot_q2_K_q8_K, - .vec_dot_type = GGML_TYPE_Q8_K, - }, - [GGML_TYPE_Q3_K] = { - .type_name = "q3_K", - .blck_size = QK_K, - .type_size = sizeof(block_q3_K), - .is_quantized = true, - .to_float = (ggml_to_float_t) dequantize_row_q3_K, - .from_float = quantize_row_q3_K, - .from_float_reference = (ggml_from_float_t) quantize_row_q3_K_reference, - .vec_dot = ggml_vec_dot_q3_K_q8_K, - .vec_dot_type = GGML_TYPE_Q8_K, - }, - [GGML_TYPE_Q4_K] = { - .type_name = "q4_K", - .blck_size = QK_K, - .type_size = sizeof(block_q4_K), - .is_quantized = true, - .to_float = (ggml_to_float_t) dequantize_row_q4_K, - .from_float = quantize_row_q4_K, - .from_float_reference = (ggml_from_float_t) quantize_row_q4_K_reference, - .vec_dot = ggml_vec_dot_q4_K_q8_K, - .vec_dot_type = GGML_TYPE_Q8_K, - }, - [GGML_TYPE_Q5_K] = { - .type_name = "q5_K", - .blck_size = QK_K, - .type_size = sizeof(block_q5_K), - .is_quantized = true, - .to_float = (ggml_to_float_t) dequantize_row_q5_K, - .from_float = quantize_row_q5_K, - .from_float_reference = (ggml_from_float_t) quantize_row_q5_K_reference, - .vec_dot = ggml_vec_dot_q5_K_q8_K, - .vec_dot_type = GGML_TYPE_Q8_K, - }, - [GGML_TYPE_Q6_K] = { - .type_name = "q6_K", - .blck_size = QK_K, - .type_size = sizeof(block_q6_K), - .is_quantized = true, - .to_float = (ggml_to_float_t) dequantize_row_q6_K, - .from_float = quantize_row_q6_K, - .from_float_reference = (ggml_from_float_t) quantize_row_q6_K_reference, - .vec_dot = ggml_vec_dot_q6_K_q8_K, - .vec_dot_type = GGML_TYPE_Q8_K, - }, - [GGML_TYPE_Q8_K] = { - .type_name = "q8_K", - .blck_size = QK_K, - .type_size = sizeof(block_q8_K), - .is_quantized = true, - .from_float = quantize_row_q8_K, - } -#endif -}; - -// For internal test use -ggml_type_traits_t ggml_internal_get_type_traits(enum ggml_type type) { - GGML_ASSERT(type < GGML_TYPE_COUNT); - return type_traits[type]; -} - -// -// simd mappings -// - -// we define a common set of C macros which map to specific intrinsics based on the current architecture -// we then implement the fundamental computation operations below using only these macros -// adding support for new architectures requires to define the corresponding SIMD macros -// -// GGML_F32_STEP / GGML_F16_STEP -// number of elements to process in a single step -// -// GGML_F32_EPR / GGML_F16_EPR -// number of elements to fit in a single register -// - -#if defined(__ARM_NEON) && defined(__ARM_FEATURE_FMA) - -#define GGML_SIMD - -// F32 NEON - -#define GGML_F32_STEP 16 -#define GGML_F32_EPR 4 - -#define GGML_F32x4 float32x4_t -#define GGML_F32x4_ZERO vdupq_n_f32(0.0f) -#define GGML_F32x4_SET1(x) vdupq_n_f32(x) -#define GGML_F32x4_LOAD vld1q_f32 -#define GGML_F32x4_STORE vst1q_f32 -#define GGML_F32x4_FMA(a, b, c) vfmaq_f32(a, b, c) -#define GGML_F32x4_ADD vaddq_f32 -#define GGML_F32x4_MUL vmulq_f32 -#define GGML_F32x4_REDUCE_ONE(x) vaddvq_f32(x) -#define GGML_F32x4_REDUCE(res, x) \ -{ \ - int offset = GGML_F32_ARR >> 1; \ - for (int i = 0; i < offset; ++i) { \ - x[i] = vaddq_f32(x[i], x[offset+i]); \ - } \ - offset >>= 1; \ - for (int i = 0; i < offset; ++i) { \ - x[i] = vaddq_f32(x[i], x[offset+i]); \ - } \ - offset >>= 1; \ - for (int i = 0; i < offset; ++i) { \ - x[i] = vaddq_f32(x[i], x[offset+i]); \ - } \ - res = GGML_F32x4_REDUCE_ONE(x[0]); \ -} - -#define GGML_F32_VEC GGML_F32x4 -#define GGML_F32_VEC_ZERO GGML_F32x4_ZERO -#define GGML_F32_VEC_SET1 GGML_F32x4_SET1 -#define GGML_F32_VEC_LOAD GGML_F32x4_LOAD -#define GGML_F32_VEC_STORE GGML_F32x4_STORE -#define GGML_F32_VEC_FMA GGML_F32x4_FMA -#define GGML_F32_VEC_ADD GGML_F32x4_ADD -#define GGML_F32_VEC_MUL GGML_F32x4_MUL -#define GGML_F32_VEC_REDUCE GGML_F32x4_REDUCE - -// F16 NEON - -#if defined(__ARM_FEATURE_FP16_VECTOR_ARITHMETIC) - #define GGML_F16_STEP 32 - #define GGML_F16_EPR 8 - - #define GGML_F16x8 float16x8_t - #define GGML_F16x8_ZERO vdupq_n_f16(0.0f) - #define GGML_F16x8_SET1(x) vdupq_n_f16(x) - #define GGML_F16x8_LOAD vld1q_f16 - #define GGML_F16x8_STORE vst1q_f16 - #define GGML_F16x8_FMA(a, b, c) vfmaq_f16(a, b, c) - #define GGML_F16x8_ADD vaddq_f16 - #define GGML_F16x8_MUL vmulq_f16 - #define GGML_F16x8_REDUCE(res, x) \ - do { \ - int offset = GGML_F16_ARR >> 1; \ - for (int i = 0; i < offset; ++i) { \ - x[i] = vaddq_f16(x[i], x[offset+i]); \ - } \ - offset >>= 1; \ - for (int i = 0; i < offset; ++i) { \ - x[i] = vaddq_f16(x[i], x[offset+i]); \ - } \ - offset >>= 1; \ - for (int i = 0; i < offset; ++i) { \ - x[i] = vaddq_f16(x[i], x[offset+i]); \ - } \ - const float32x4_t t0 = vcvt_f32_f16(vget_low_f16 (x[0])); \ - const float32x4_t t1 = vcvt_f32_f16(vget_high_f16(x[0])); \ - res = (ggml_float) vaddvq_f32(vaddq_f32(t0, t1)); \ - } while (0) - - #define GGML_F16_VEC GGML_F16x8 - #define GGML_F16_VEC_ZERO GGML_F16x8_ZERO - #define GGML_F16_VEC_SET1 GGML_F16x8_SET1 - #define GGML_F16_VEC_LOAD(p, i) GGML_F16x8_LOAD(p) - #define GGML_F16_VEC_STORE(p, r, i) GGML_F16x8_STORE(p, r[i]) - #define GGML_F16_VEC_FMA GGML_F16x8_FMA - #define GGML_F16_VEC_ADD GGML_F16x8_ADD - #define GGML_F16_VEC_MUL GGML_F16x8_MUL - #define GGML_F16_VEC_REDUCE GGML_F16x8_REDUCE -#else - // if FP16 vector arithmetic is not supported, we use FP32 instead - // and take advantage of the vcvt_ functions to convert to/from FP16 - - #define GGML_F16_STEP 16 - #define GGML_F16_EPR 4 - - #define GGML_F32Cx4 float32x4_t - #define GGML_F32Cx4_ZERO vdupq_n_f32(0.0f) - #define GGML_F32Cx4_SET1(x) vdupq_n_f32(x) - #define GGML_F32Cx4_LOAD(x) vcvt_f32_f16(vld1_f16(x)) - #define GGML_F32Cx4_STORE(x, y) vst1_f16(x, vcvt_f16_f32(y)) - #define GGML_F32Cx4_FMA(a, b, c) vfmaq_f32(a, b, c) - #define GGML_F32Cx4_ADD vaddq_f32 - #define GGML_F32Cx4_MUL vmulq_f32 - #define GGML_F32Cx4_REDUCE GGML_F32x4_REDUCE - - #define GGML_F16_VEC GGML_F32Cx4 - #define GGML_F16_VEC_ZERO GGML_F32Cx4_ZERO - #define GGML_F16_VEC_SET1 GGML_F32Cx4_SET1 - #define GGML_F16_VEC_LOAD(p, i) GGML_F32Cx4_LOAD(p) - #define GGML_F16_VEC_STORE(p, r, i) GGML_F32Cx4_STORE(p, r[i]) - #define GGML_F16_VEC_FMA GGML_F32Cx4_FMA - #define GGML_F16_VEC_ADD GGML_F32Cx4_ADD - #define GGML_F16_VEC_MUL GGML_F32Cx4_MUL - #define GGML_F16_VEC_REDUCE GGML_F32Cx4_REDUCE -#endif - -#elif defined(__AVX__) - -#define GGML_SIMD - -// F32 AVX - -#define GGML_F32_STEP 32 -#define GGML_F32_EPR 8 - -#define GGML_F32x8 __m256 -#define GGML_F32x8_ZERO _mm256_setzero_ps() -#define GGML_F32x8_SET1(x) _mm256_set1_ps(x) -#define GGML_F32x8_LOAD _mm256_loadu_ps -#define GGML_F32x8_STORE _mm256_storeu_ps -#if defined(__FMA__) - #define GGML_F32x8_FMA(a, b, c) _mm256_fmadd_ps(b, c, a) -#else - #define GGML_F32x8_FMA(a, b, c) _mm256_add_ps(_mm256_mul_ps(b, c), a) -#endif -#define GGML_F32x8_ADD _mm256_add_ps -#define GGML_F32x8_MUL _mm256_mul_ps -#define GGML_F32x8_REDUCE(res, x) \ -do { \ - int offset = GGML_F32_ARR >> 1; \ - for (int i = 0; i < offset; ++i) { \ - x[i] = _mm256_add_ps(x[i], x[offset+i]); \ - } \ - offset >>= 1; \ - for (int i = 0; i < offset; ++i) { \ - x[i] = _mm256_add_ps(x[i], x[offset+i]); \ - } \ - offset >>= 1; \ - for (int i = 0; i < offset; ++i) { \ - x[i] = _mm256_add_ps(x[i], x[offset+i]); \ - } \ - const __m128 t0 = _mm_add_ps(_mm256_castps256_ps128(x[0]), \ - _mm256_extractf128_ps(x[0], 1)); \ - const __m128 t1 = _mm_hadd_ps(t0, t0); \ - res = _mm_cvtss_f32(_mm_hadd_ps(t1, t1)); \ -} while (0) -// TODO: is this optimal ? - -#define GGML_F32_VEC GGML_F32x8 -#define GGML_F32_VEC_ZERO GGML_F32x8_ZERO -#define GGML_F32_VEC_SET1 GGML_F32x8_SET1 -#define GGML_F32_VEC_LOAD GGML_F32x8_LOAD -#define GGML_F32_VEC_STORE GGML_F32x8_STORE -#define GGML_F32_VEC_FMA GGML_F32x8_FMA -#define GGML_F32_VEC_ADD GGML_F32x8_ADD -#define GGML_F32_VEC_MUL GGML_F32x8_MUL -#define GGML_F32_VEC_REDUCE GGML_F32x8_REDUCE - -// F16 AVX - -#define GGML_F16_STEP 32 -#define GGML_F16_EPR 8 - -// F16 arithmetic is not supported by AVX, so we use F32 instead - -#define GGML_F32Cx8 __m256 -#define GGML_F32Cx8_ZERO _mm256_setzero_ps() -#define GGML_F32Cx8_SET1(x) _mm256_set1_ps(x) - -#if defined(__F16C__) -// the _mm256_cvt intrinsics require F16C -#define GGML_F32Cx8_LOAD(x) _mm256_cvtph_ps(_mm_loadu_si128((__m128i *)(x))) -#define GGML_F32Cx8_STORE(x, y) _mm_storeu_si128((__m128i *)(x), _mm256_cvtps_ph(y, 0)) -#else -static inline __m256 __avx_f32cx8_load(ggml_fp16_t *x) { - float tmp[8]; - - for (int i = 0; i < 8; i++) { - tmp[i] = GGML_FP16_TO_FP32(x[i]); - } - - return _mm256_loadu_ps(tmp); -} -static inline void __avx_f32cx8_store(ggml_fp16_t *x, __m256 y) { - float arr[8]; - - _mm256_storeu_ps(arr, y); - - for (int i = 0; i < 8; i++) - x[i] = GGML_FP32_TO_FP16(arr[i]); -} -#define GGML_F32Cx8_LOAD(x) __avx_f32cx8_load(x) -#define GGML_F32Cx8_STORE(x, y) __avx_f32cx8_store(x, y) -#endif - -#define GGML_F32Cx8_FMA GGML_F32x8_FMA -#define GGML_F32Cx8_ADD _mm256_add_ps -#define GGML_F32Cx8_MUL _mm256_mul_ps -#define GGML_F32Cx8_REDUCE GGML_F32x8_REDUCE - -#define GGML_F16_VEC GGML_F32Cx8 -#define GGML_F16_VEC_ZERO GGML_F32Cx8_ZERO -#define GGML_F16_VEC_SET1 GGML_F32Cx8_SET1 -#define GGML_F16_VEC_LOAD(p, i) GGML_F32Cx8_LOAD(p) -#define GGML_F16_VEC_STORE(p, r, i) GGML_F32Cx8_STORE(p, r[i]) -#define GGML_F16_VEC_FMA GGML_F32Cx8_FMA -#define GGML_F16_VEC_ADD GGML_F32Cx8_ADD -#define GGML_F16_VEC_MUL GGML_F32Cx8_MUL -#define GGML_F16_VEC_REDUCE GGML_F32Cx8_REDUCE - -#elif defined(__POWER9_VECTOR__) - -#define GGML_SIMD - -// F32 POWER9 - -#define GGML_F32_STEP 32 -#define GGML_F32_EPR 4 - -#define GGML_F32x4 vector float -#define GGML_F32x4_ZERO 0.0f -#define GGML_F32x4_SET1 vec_splats -#define GGML_F32x4_LOAD(p) vec_xl(0, p) -#define GGML_F32x4_STORE(p, r) vec_xst(r, 0, p) -#define GGML_F32x4_FMA(a, b, c) vec_madd(b, c, a) -#define GGML_F32x4_ADD vec_add -#define GGML_F32x4_MUL vec_mul -#define GGML_F32x4_REDUCE(res, x) \ -{ \ - int offset = GGML_F32_ARR >> 1; \ - for (int i = 0; i < offset; ++i) { \ - x[i] = vec_add(x[i], x[offset+i]); \ - } \ - offset >>= 1; \ - for (int i = 0; i < offset; ++i) { \ - x[i] = vec_add(x[i], x[offset+i]); \ - } \ - offset >>= 1; \ - for (int i = 0; i < offset; ++i) { \ - x[i] = vec_add(x[i], x[offset+i]); \ - } \ - res = vec_extract(x[0], 0) + \ - vec_extract(x[0], 1) + \ - vec_extract(x[0], 2) + \ - vec_extract(x[0], 3); \ -} - -#define GGML_F32_VEC GGML_F32x4 -#define GGML_F32_VEC_ZERO GGML_F32x4_ZERO -#define GGML_F32_VEC_SET1 GGML_F32x4_SET1 -#define GGML_F32_VEC_LOAD GGML_F32x4_LOAD -#define GGML_F32_VEC_STORE GGML_F32x4_STORE -#define GGML_F32_VEC_FMA GGML_F32x4_FMA -#define GGML_F32_VEC_ADD GGML_F32x4_ADD -#define GGML_F32_VEC_MUL GGML_F32x4_MUL -#define GGML_F32_VEC_REDUCE GGML_F32x4_REDUCE - -// F16 POWER9 -#define GGML_F16_STEP GGML_F32_STEP -#define GGML_F16_EPR GGML_F32_EPR -#define GGML_F16_VEC GGML_F32x4 -#define GGML_F16_VEC_ZERO GGML_F32x4_ZERO -#define GGML_F16_VEC_SET1 GGML_F32x4_SET1 -#define GGML_F16_VEC_FMA GGML_F32x4_FMA -#define GGML_F16_VEC_REDUCE GGML_F32x4_REDUCE -// Use vec_xl, not vec_ld, in case the load address is not aligned. -#define GGML_F16_VEC_LOAD(p, i) (i & 0x1) ? \ - vec_extract_fp32_from_shorth(vec_xl(0, p - GGML_F16_EPR)) : \ - vec_extract_fp32_from_shortl(vec_xl(0, p)) -#define GGML_ENDIAN_BYTE(i) ((unsigned char *)&(uint16_t){1})[i] -#define GGML_F16_VEC_STORE(p, r, i) \ - if (i & 0x1) \ - vec_xst(vec_pack_to_short_fp32(r[i - GGML_ENDIAN_BYTE(1)], \ - r[i - GGML_ENDIAN_BYTE(0)]), \ - 0, p - GGML_F16_EPR) - -#elif defined(__wasm_simd128__) - -#define GGML_SIMD - -// F32 WASM - -#define GGML_F32_STEP 16 -#define GGML_F32_EPR 4 - -#define GGML_F32x4 v128_t -#define GGML_F32x4_ZERO wasm_f32x4_splat(0.0f) -#define GGML_F32x4_SET1(x) wasm_f32x4_splat(x) -#define GGML_F32x4_LOAD wasm_v128_load -#define GGML_F32x4_STORE wasm_v128_store -#define GGML_F32x4_FMA(a, b, c) wasm_f32x4_add(wasm_f32x4_mul(b, c), a) -#define GGML_F32x4_ADD wasm_f32x4_add -#define GGML_F32x4_MUL wasm_f32x4_mul -#define GGML_F32x4_REDUCE(res, x) \ -{ \ - int offset = GGML_F32_ARR >> 1; \ - for (int i = 0; i < offset; ++i) { \ - x[i] = wasm_f32x4_add(x[i], x[offset+i]); \ - } \ - offset >>= 1; \ - for (int i = 0; i < offset; ++i) { \ - x[i] = wasm_f32x4_add(x[i], x[offset+i]); \ - } \ - offset >>= 1; \ - for (int i = 0; i < offset; ++i) { \ - x[i] = wasm_f32x4_add(x[i], x[offset+i]); \ - } \ - res = wasm_f32x4_extract_lane(x[0], 0) + \ - wasm_f32x4_extract_lane(x[0], 1) + \ - wasm_f32x4_extract_lane(x[0], 2) + \ - wasm_f32x4_extract_lane(x[0], 3); \ -} - -#define GGML_F32_VEC GGML_F32x4 -#define GGML_F32_VEC_ZERO GGML_F32x4_ZERO -#define GGML_F32_VEC_SET1 GGML_F32x4_SET1 -#define GGML_F32_VEC_LOAD GGML_F32x4_LOAD -#define GGML_F32_VEC_STORE GGML_F32x4_STORE -#define GGML_F32_VEC_FMA GGML_F32x4_FMA -#define GGML_F32_VEC_ADD GGML_F32x4_ADD -#define GGML_F32_VEC_MUL GGML_F32x4_MUL -#define GGML_F32_VEC_REDUCE GGML_F32x4_REDUCE - -// F16 WASM - -#define GGML_F16_STEP 16 -#define GGML_F16_EPR 4 - -inline static v128_t __wasm_f16x4_load(const ggml_fp16_t * p) { - float tmp[4]; - - tmp[0] = GGML_FP16_TO_FP32(p[0]); - tmp[1] = GGML_FP16_TO_FP32(p[1]); - tmp[2] = GGML_FP16_TO_FP32(p[2]); - tmp[3] = GGML_FP16_TO_FP32(p[3]); - - return wasm_v128_load(tmp); -} - -inline static void __wasm_f16x4_store(ggml_fp16_t * p, v128_t x) { - float tmp[4]; - - wasm_v128_store(tmp, x); - - p[0] = GGML_FP32_TO_FP16(tmp[0]); - p[1] = GGML_FP32_TO_FP16(tmp[1]); - p[2] = GGML_FP32_TO_FP16(tmp[2]); - p[3] = GGML_FP32_TO_FP16(tmp[3]); -} - -#define GGML_F16x4 v128_t -#define GGML_F16x4_ZERO wasm_f32x4_splat(0.0f) -#define GGML_F16x4_SET1(x) wasm_f32x4_splat(x) -#define GGML_F16x4_LOAD(x) __wasm_f16x4_load(x) -#define GGML_F16x4_STORE(x, y) __wasm_f16x4_store(x, y) -#define GGML_F16x4_FMA GGML_F32x4_FMA -#define GGML_F16x4_ADD wasm_f32x4_add -#define GGML_F16x4_MUL wasm_f32x4_mul -#define GGML_F16x4_REDUCE(res, x) \ -{ \ - int offset = GGML_F16_ARR >> 1; \ - for (int i = 0; i < offset; ++i) { \ - x[i] = wasm_f32x4_add(x[i], x[offset+i]); \ - } \ - offset >>= 1; \ - for (int i = 0; i < offset; ++i) { \ - x[i] = wasm_f32x4_add(x[i], x[offset+i]); \ - } \ - offset >>= 1; \ - for (int i = 0; i < offset; ++i) { \ - x[i] = wasm_f32x4_add(x[i], x[offset+i]); \ - } \ - res = wasm_f32x4_extract_lane(x[0], 0) + \ - wasm_f32x4_extract_lane(x[0], 1) + \ - wasm_f32x4_extract_lane(x[0], 2) + \ - wasm_f32x4_extract_lane(x[0], 3); \ -} - -#define GGML_F16_VEC GGML_F16x4 -#define GGML_F16_VEC_ZERO GGML_F16x4_ZERO -#define GGML_F16_VEC_SET1 GGML_F16x4_SET1 -#define GGML_F16_VEC_LOAD(p, i) GGML_F16x4_LOAD(p) -#define GGML_F16_VEC_STORE(p, r, i) GGML_F16x4_STORE(p, r[i]) -#define GGML_F16_VEC_FMA GGML_F16x4_FMA -#define GGML_F16_VEC_ADD GGML_F16x4_ADD -#define GGML_F16_VEC_MUL GGML_F16x4_MUL -#define GGML_F16_VEC_REDUCE GGML_F16x4_REDUCE - -#elif defined(__SSE3__) - -#define GGML_SIMD - -// F32 SSE - -#define GGML_F32_STEP 32 -#define GGML_F32_EPR 4 - -#define GGML_F32x4 __m128 -#define GGML_F32x4_ZERO _mm_setzero_ps() -#define GGML_F32x4_SET1(x) _mm_set1_ps(x) -#define GGML_F32x4_LOAD _mm_loadu_ps -#define GGML_F32x4_STORE _mm_storeu_ps -#if defined(__FMA__) - // TODO: Does this work? - #define GGML_F32x4_FMA(a, b, c) _mm_fmadd_ps(b, c, a) -#else - #define GGML_F32x4_FMA(a, b, c) _mm_add_ps(_mm_mul_ps(b, c), a) -#endif -#define GGML_F32x4_ADD _mm_add_ps -#define GGML_F32x4_MUL _mm_mul_ps -#define GGML_F32x4_REDUCE(res, x) \ -{ \ - int offset = GGML_F32_ARR >> 1; \ - for (int i = 0; i < offset; ++i) { \ - x[i] = _mm_add_ps(x[i], x[offset+i]); \ - } \ - offset >>= 1; \ - for (int i = 0; i < offset; ++i) { \ - x[i] = _mm_add_ps(x[i], x[offset+i]); \ - } \ - offset >>= 1; \ - for (int i = 0; i < offset; ++i) { \ - x[i] = _mm_add_ps(x[i], x[offset+i]); \ - } \ - const __m128 t0 = _mm_hadd_ps(x[0], x[0]); \ - res = _mm_cvtss_f32(_mm_hadd_ps(t0, t0)); \ -} -// TODO: is this optimal ? - -#define GGML_F32_VEC GGML_F32x4 -#define GGML_F32_VEC_ZERO GGML_F32x4_ZERO -#define GGML_F32_VEC_SET1 GGML_F32x4_SET1 -#define GGML_F32_VEC_LOAD GGML_F32x4_LOAD -#define GGML_F32_VEC_STORE GGML_F32x4_STORE -#define GGML_F32_VEC_FMA GGML_F32x4_FMA -#define GGML_F32_VEC_ADD GGML_F32x4_ADD -#define GGML_F32_VEC_MUL GGML_F32x4_MUL -#define GGML_F32_VEC_REDUCE GGML_F32x4_REDUCE - -// F16 SSE - -#define GGML_F16_STEP 32 -#define GGML_F16_EPR 4 - -static inline __m128 __sse_f16x4_load(ggml_fp16_t *x) { - float tmp[4]; - - tmp[0] = GGML_FP16_TO_FP32(x[0]); - tmp[1] = GGML_FP16_TO_FP32(x[1]); - tmp[2] = GGML_FP16_TO_FP32(x[2]); - tmp[3] = GGML_FP16_TO_FP32(x[3]); - - return _mm_loadu_ps(tmp); -} - -static inline void __sse_f16x4_store(ggml_fp16_t *x, __m128 y) { - float arr[4]; - - _mm_storeu_ps(arr, y); - - x[0] = GGML_FP32_TO_FP16(arr[0]); - x[1] = GGML_FP32_TO_FP16(arr[1]); - x[2] = GGML_FP32_TO_FP16(arr[2]); - x[3] = GGML_FP32_TO_FP16(arr[3]); -} - -#define GGML_F32Cx4 __m128 -#define GGML_F32Cx4_ZERO _mm_setzero_ps() -#define GGML_F32Cx4_SET1(x) _mm_set1_ps(x) -#define GGML_F32Cx4_LOAD(x) __sse_f16x4_load(x) -#define GGML_F32Cx4_STORE(x, y) __sse_f16x4_store(x, y) -#define GGML_F32Cx4_FMA GGML_F32x4_FMA -#define GGML_F32Cx4_ADD _mm_add_ps -#define GGML_F32Cx4_MUL _mm_mul_ps -#define GGML_F32Cx4_REDUCE GGML_F32x4_REDUCE - -#define GGML_F16_VEC GGML_F32Cx4 -#define GGML_F16_VEC_ZERO GGML_F32Cx4_ZERO -#define GGML_F16_VEC_SET1 GGML_F32Cx4_SET1 -#define GGML_F16_VEC_LOAD(p, i) GGML_F32Cx4_LOAD(p) -#define GGML_F16_VEC_STORE(p, r, i) GGML_F32Cx4_STORE(p, r[i]) -#define GGML_F16_VEC_FMA GGML_F32Cx4_FMA -#define GGML_F16_VEC_ADD GGML_F32Cx4_ADD -#define GGML_F16_VEC_MUL GGML_F32Cx4_MUL -#define GGML_F16_VEC_REDUCE GGML_F32Cx4_REDUCE - -#endif - -// GGML_F32_ARR / GGML_F16_ARR -// number of registers to use per step -#ifdef GGML_SIMD -#define GGML_F32_ARR (GGML_F32_STEP/GGML_F32_EPR) -#define GGML_F16_ARR (GGML_F16_STEP/GGML_F16_EPR) -#endif - -// -// fundamental operations -// - -inline static void ggml_vec_set_i8(const int n, int8_t * x, const int8_t v) { for (int i = 0; i < n; ++i) x[i] = v; } - -inline static void ggml_vec_set_i16(const int n, int16_t * x, const int16_t v) { for (int i = 0; i < n; ++i) x[i] = v; } - -inline static void ggml_vec_set_i32(const int n, int32_t * x, const int32_t v) { for (int i = 0; i < n; ++i) x[i] = v; } - -inline static void ggml_vec_set_f16(const int n, ggml_fp16_t * x, const int32_t v) { for (int i = 0; i < n; ++i) x[i] = v; } - -inline static void ggml_vec_add_f32 (const int n, float * z, const float * x, const float * y) { for (int i = 0; i < n; ++i) z[i] = x[i] + y[i]; } -inline static void ggml_vec_add1_f32(const int n, float * z, const float * x, const float v) { for (int i = 0; i < n; ++i) z[i] = x[i] + v; } -inline static void ggml_vec_acc_f32 (const int n, float * y, const float * x) { for (int i = 0; i < n; ++i) y[i] += x[i]; } -inline static void ggml_vec_acc1_f32(const int n, float * y, const float v) { for (int i = 0; i < n; ++i) y[i] += v; } -inline static void ggml_vec_sub_f32 (const int n, float * z, const float * x, const float * y) { for (int i = 0; i < n; ++i) z[i] = x[i] - y[i]; } -inline static void ggml_vec_set_f32 (const int n, float * x, const float v) { for (int i = 0; i < n; ++i) x[i] = v; } -inline static void ggml_vec_cpy_f32 (const int n, float * y, const float * x) { for (int i = 0; i < n; ++i) y[i] = x[i]; } -inline static void ggml_vec_neg_f32 (const int n, float * y, const float * x) { for (int i = 0; i < n; ++i) y[i] = -x[i]; } -inline static void ggml_vec_mul_f32 (const int n, float * z, const float * x, const float * y) { for (int i = 0; i < n; ++i) z[i] = x[i]*y[i]; } -inline static void ggml_vec_div_f32 (const int n, float * z, const float * x, const float * y) { for (int i = 0; i < n; ++i) z[i] = x[i]/y[i]; } - -static void ggml_vec_dot_f32(const int n, float * restrict s, const float * restrict x, const float * restrict y) { -#ifdef GGML_SIMD - float sumf = 0.0f; - const int np = (n & ~(GGML_F32_STEP - 1)); - - GGML_F32_VEC sum[GGML_F32_ARR] = { GGML_F32_VEC_ZERO }; - - GGML_F32_VEC ax[GGML_F32_ARR]; - GGML_F32_VEC ay[GGML_F32_ARR]; - - for (int i = 0; i < np; i += GGML_F32_STEP) { - for (int j = 0; j < GGML_F32_ARR; j++) { - ax[j] = GGML_F32_VEC_LOAD(x + i + j*GGML_F32_EPR); - ay[j] = GGML_F32_VEC_LOAD(y + i + j*GGML_F32_EPR); - - sum[j] = GGML_F32_VEC_FMA(sum[j], ax[j], ay[j]); - } - } - - // reduce sum0..sum3 to sum0 - GGML_F32_VEC_REDUCE(sumf, sum); - - // leftovers - for (int i = np; i < n; ++i) { - sumf += x[i]*y[i]; - } -#else - // scalar - ggml_float sumf = 0.0; - for (int i = 0; i < n; ++i) { - sumf += (ggml_float)(x[i]*y[i]); - } -#endif - - *s = sumf; -} - -static void ggml_vec_dot_f16(const int n, float * restrict s, ggml_fp16_t * restrict x, ggml_fp16_t * restrict y) { - ggml_float sumf = 0.0; - -#if defined(GGML_SIMD) - const int np = (n & ~(GGML_F16_STEP - 1)); - - GGML_F16_VEC sum[GGML_F16_ARR] = { GGML_F16_VEC_ZERO }; - - GGML_F16_VEC ax[GGML_F16_ARR]; - GGML_F16_VEC ay[GGML_F16_ARR]; - - for (int i = 0; i < np; i += GGML_F16_STEP) { - for (int j = 0; j < GGML_F16_ARR; j++) { - ax[j] = GGML_F16_VEC_LOAD(x + i + j*GGML_F16_EPR, j); - ay[j] = GGML_F16_VEC_LOAD(y + i + j*GGML_F16_EPR, j); - - sum[j] = GGML_F16_VEC_FMA(sum[j], ax[j], ay[j]); - } - } - - // reduce sum0..sum3 to sum0 - GGML_F16_VEC_REDUCE(sumf, sum); - - // leftovers - for (int i = np; i < n; ++i) { - sumf += (ggml_float)(GGML_FP16_TO_FP32(x[i])*GGML_FP16_TO_FP32(y[i])); - } -#else - for (int i = 0; i < n; ++i) { - sumf += (ggml_float)(GGML_FP16_TO_FP32(x[i])*GGML_FP16_TO_FP32(y[i])); - } -#endif - - *s = sumf; -} - -static void ggml_vec_dot_q4_0_q8_0(const int n, float * restrict s, const void * restrict vx, const void * restrict vy) { - const int qk = QK8_0; - const int nb = n / qk; - - assert(n % qk == 0); - - const block_q4_0 * restrict x = vx; - const block_q8_0 * restrict y = vy; - -#if defined(__ARM_NEON) - float32x4_t sumv0 = vdupq_n_f32(0.0f); - float32x4_t sumv1 = vdupq_n_f32(0.0f); - - GGML_ASSERT(nb % 2 == 0); // TODO: handle odd nb - for (int i = 0; i < nb; i += 2) { - const block_q4_0 * restrict x0 = &x[i + 0]; - const block_q4_0 * restrict x1 = &x[i + 1]; - const block_q8_0 * restrict y0 = &y[i + 0]; - const block_q8_0 * restrict y1 = &y[i + 1]; - - const uint8x16_t m4b = vdupq_n_u8(0x0F); - const int8x16_t s8b = vdupq_n_s8(0x8); - - const uint8x16_t v0_0 = vld1q_u8(x0->qs); - const uint8x16_t v0_1 = vld1q_u8(x1->qs); - - // 4-bit -> 8-bit - const int8x16_t v0_0l = vreinterpretq_s8_u8(vandq_u8 (v0_0, m4b)); - const int8x16_t v0_0h = vreinterpretq_s8_u8(vshrq_n_u8(v0_0, 4)); - const int8x16_t v0_1l = vreinterpretq_s8_u8(vandq_u8 (v0_1, m4b)); - const int8x16_t v0_1h = vreinterpretq_s8_u8(vshrq_n_u8(v0_1, 4)); - - // sub 8 - const int8x16_t v0_0ls = vsubq_s8(v0_0l, s8b); - const int8x16_t v0_0hs = vsubq_s8(v0_0h, s8b); - const int8x16_t v0_1ls = vsubq_s8(v0_1l, s8b); - const int8x16_t v0_1hs = vsubq_s8(v0_1h, s8b); - - // load y - const int8x16_t v1_0l = vld1q_s8(y0->qs); - const int8x16_t v1_0h = vld1q_s8(y0->qs + 16); - const int8x16_t v1_1l = vld1q_s8(y1->qs); - const int8x16_t v1_1h = vld1q_s8(y1->qs + 16); - -#if defined(__ARM_FEATURE_DOTPROD) - // dot product into int32x4_t - const int32x4_t p_0 = vdotq_s32(vdotq_s32(vdupq_n_s32(0), v0_0ls, v1_0l), v0_0hs, v1_0h); - const int32x4_t p_1 = vdotq_s32(vdotq_s32(vdupq_n_s32(0), v0_1ls, v1_1l), v0_1hs, v1_1h); - - sumv0 = vmlaq_n_f32(sumv0, vcvtq_f32_s32(p_0), GGML_FP16_TO_FP32(x0->d)*GGML_FP16_TO_FP32(y0->d)); - sumv1 = vmlaq_n_f32(sumv1, vcvtq_f32_s32(p_1), GGML_FP16_TO_FP32(x1->d)*GGML_FP16_TO_FP32(y1->d)); -#else - const int16x8_t pl0l = vmull_s8(vget_low_s8 (v0_0ls), vget_low_s8 (v1_0l)); - const int16x8_t pl0h = vmull_s8(vget_high_s8(v0_0ls), vget_high_s8(v1_0l)); - const int16x8_t ph0l = vmull_s8(vget_low_s8 (v0_0hs), vget_low_s8 (v1_0h)); - const int16x8_t ph0h = vmull_s8(vget_high_s8(v0_0hs), vget_high_s8(v1_0h)); - - const int16x8_t pl1l = vmull_s8(vget_low_s8 (v0_1ls), vget_low_s8 (v1_1l)); - const int16x8_t pl1h = vmull_s8(vget_high_s8(v0_1ls), vget_high_s8(v1_1l)); - const int16x8_t ph1l = vmull_s8(vget_low_s8 (v0_1hs), vget_low_s8 (v1_1h)); - const int16x8_t ph1h = vmull_s8(vget_high_s8(v0_1hs), vget_high_s8(v1_1h)); - - const int32x4_t pl0 = vaddq_s32(vpaddlq_s16(pl0l), vpaddlq_s16(pl0h)); - const int32x4_t ph0 = vaddq_s32(vpaddlq_s16(ph0l), vpaddlq_s16(ph0h)); - const int32x4_t pl1 = vaddq_s32(vpaddlq_s16(pl1l), vpaddlq_s16(pl1h)); - const int32x4_t ph1 = vaddq_s32(vpaddlq_s16(ph1l), vpaddlq_s16(ph1h)); - - sumv0 = vmlaq_n_f32(sumv0, vcvtq_f32_s32(vaddq_s32(pl0, ph0)), GGML_FP16_TO_FP32(x0->d)*GGML_FP16_TO_FP32(y0->d)); - sumv1 = vmlaq_n_f32(sumv1, vcvtq_f32_s32(vaddq_s32(pl1, ph1)), GGML_FP16_TO_FP32(x1->d)*GGML_FP16_TO_FP32(y1->d)); -#endif - } - - *s = vaddvq_f32(sumv0) + vaddvq_f32(sumv1); -#elif defined(__AVX2__) - // Initialize accumulator with zeros - __m256 acc = _mm256_setzero_ps(); - - // Main loop - for (int i = 0; i < nb; ++i) { - /* Compute combined scale for the block */ - const __m256 d = _mm256_set1_ps( GGML_FP16_TO_FP32(x[i].d) * GGML_FP16_TO_FP32(y[i].d) ); - - __m256i bx = bytes_from_nibbles_32(x[i].qs); - - // Now we have a vector with bytes in [ 0 .. 15 ] interval. Offset them into [ -8 .. +7 ] interval. - const __m256i off = _mm256_set1_epi8( 8 ); - bx = _mm256_sub_epi8( bx, off ); - - __m256i by = _mm256_loadu_si256((const __m256i *)y[i].qs); - - const __m256 q = mul_sum_i8_pairs_float(bx, by); - - /* Multiply q with scale and accumulate */ - acc = _mm256_fmadd_ps( d, q, acc ); - } - - *s = hsum_float_8(acc); -#elif defined(__AVX__) - // Initialize accumulator with zeros - __m256 acc = _mm256_setzero_ps(); - - // Main loop - for (int i = 0; i < nb; ++i) { - // Compute combined scale for the block - const __m256 d = _mm256_set1_ps( GGML_FP16_TO_FP32(x[i].d) * GGML_FP16_TO_FP32(y[i].d) ); - - const __m128i lowMask = _mm_set1_epi8(0xF); - const __m128i off = _mm_set1_epi8(8); - - const __m128i tmp = _mm_loadu_si128((const __m128i *)x[i].qs); - - __m128i bx = _mm_and_si128(lowMask, tmp); - __m128i by = _mm_loadu_si128((const __m128i *)y[i].qs); - bx = _mm_sub_epi8(bx, off); - const __m128i i32_0 = mul_sum_i8_pairs(bx, by); - - bx = _mm_and_si128(lowMask, _mm_srli_epi64(tmp, 4)); - by = _mm_loadu_si128((const __m128i *)(y[i].qs + 16)); - bx = _mm_sub_epi8(bx, off); - const __m128i i32_1 = mul_sum_i8_pairs(bx, by); - - // Convert int32_t to float - __m256 p = _mm256_cvtepi32_ps(MM256_SET_M128I(i32_0, i32_1)); - - // Apply the scale, and accumulate - acc = _mm256_add_ps(_mm256_mul_ps( d, p ), acc); - } - - *s = hsum_float_8(acc); -#elif defined(__SSSE3__) - // set constants - const __m128i lowMask = _mm_set1_epi8(0xF); - const __m128i off = _mm_set1_epi8(8); - - // Initialize accumulator with zeros - __m128 acc_0 = _mm_setzero_ps(); - __m128 acc_1 = _mm_setzero_ps(); - __m128 acc_2 = _mm_setzero_ps(); - __m128 acc_3 = _mm_setzero_ps(); - - // First round without accumulation - { - _mm_prefetch(&x[0] + sizeof(block_q4_0), _MM_HINT_T0); - _mm_prefetch(&y[0] + sizeof(block_q8_0), _MM_HINT_T0); - - // Compute combined scale for the block 0 and 1 - const __m128 d_0_1 = _mm_set1_ps( GGML_FP16_TO_FP32(x[0].d) * GGML_FP16_TO_FP32(y[0].d) ); - - const __m128i tmp_0_1 = _mm_loadu_si128((const __m128i *)x[0].qs); - - __m128i bx_0 = _mm_and_si128(lowMask, tmp_0_1); - __m128i by_0 = _mm_loadu_si128((const __m128i *)y[0].qs); - bx_0 = _mm_sub_epi8(bx_0, off); - const __m128i i32_0 = mul_sum_i8_pairs(bx_0, by_0); - - __m128i bx_1 = _mm_and_si128(lowMask, _mm_srli_epi64(tmp_0_1, 4)); - __m128i by_1 = _mm_loadu_si128((const __m128i *)(y[0].qs + 16)); - bx_1 = _mm_sub_epi8(bx_1, off); - const __m128i i32_1 = mul_sum_i8_pairs(bx_1, by_1); - - _mm_prefetch(&x[1] + sizeof(block_q4_0), _MM_HINT_T0); - _mm_prefetch(&y[1] + sizeof(block_q8_0), _MM_HINT_T0); - - // Compute combined scale for the block 2 and 3 - const __m128 d_2_3 = _mm_set1_ps( GGML_FP16_TO_FP32(x[1].d) * GGML_FP16_TO_FP32(y[1].d) ); - - const __m128i tmp_2_3 = _mm_loadu_si128((const __m128i *)x[1].qs); - - __m128i bx_2 = _mm_and_si128(lowMask, tmp_2_3); - __m128i by_2 = _mm_loadu_si128((const __m128i *)y[1].qs); - bx_2 = _mm_sub_epi8(bx_2, off); - const __m128i i32_2 = mul_sum_i8_pairs(bx_2, by_2); - - __m128i bx_3 = _mm_and_si128(lowMask, _mm_srli_epi64(tmp_2_3, 4)); - __m128i by_3 = _mm_loadu_si128((const __m128i *)(y[1].qs + 16)); - bx_3 = _mm_sub_epi8(bx_3, off); - const __m128i i32_3 = mul_sum_i8_pairs(bx_3, by_3); - - // Convert int32_t to float - __m128 p0 = _mm_cvtepi32_ps(i32_0); - __m128 p1 = _mm_cvtepi32_ps(i32_1); - __m128 p2 = _mm_cvtepi32_ps(i32_2); - __m128 p3 = _mm_cvtepi32_ps(i32_3); - - // Apply the scale - acc_0 = _mm_mul_ps( d_0_1, p0 ); - acc_1 = _mm_mul_ps( d_0_1, p1 ); - acc_2 = _mm_mul_ps( d_2_3, p2 ); - acc_3 = _mm_mul_ps( d_2_3, p3 ); - } - - // Main loop - GGML_ASSERT(nb % 2 == 0); // TODO: handle odd nb - for (int i = 2; i < nb; i+=2) { - _mm_prefetch(&x[i] + sizeof(block_q4_0), _MM_HINT_T0); - _mm_prefetch(&y[i] + sizeof(block_q8_0), _MM_HINT_T0); - - // Compute combined scale for the block 0 and 1 - const __m128 d_0_1 = _mm_set1_ps( GGML_FP16_TO_FP32(x[i].d) * GGML_FP16_TO_FP32(y[i].d) ); - - const __m128i tmp_0_1 = _mm_loadu_si128((const __m128i *)x[i].qs); - - __m128i bx_0 = _mm_and_si128(lowMask, tmp_0_1); - __m128i by_0 = _mm_loadu_si128((const __m128i *)y[i].qs); - bx_0 = _mm_sub_epi8(bx_0, off); - const __m128i i32_0 = mul_sum_i8_pairs(bx_0, by_0); - - __m128i bx_1 = _mm_and_si128(lowMask, _mm_srli_epi64(tmp_0_1, 4)); - __m128i by_1 = _mm_loadu_si128((const __m128i *)(y[i].qs + 16)); - bx_1 = _mm_sub_epi8(bx_1, off); - const __m128i i32_1 = mul_sum_i8_pairs(bx_1, by_1); - - _mm_prefetch(&x[i] + 2 * sizeof(block_q4_0), _MM_HINT_T0); - _mm_prefetch(&y[i] + 2 * sizeof(block_q8_0), _MM_HINT_T0); - - // Compute combined scale for the block 2 and 3 - const __m128 d_2_3 = _mm_set1_ps( GGML_FP16_TO_FP32(x[i + 1].d) * GGML_FP16_TO_FP32(y[i + 1].d) ); - - const __m128i tmp_2_3 = _mm_loadu_si128((const __m128i *)x[i + 1].qs); - - __m128i bx_2 = _mm_and_si128(lowMask, tmp_2_3); - __m128i by_2 = _mm_loadu_si128((const __m128i *)y[i + 1].qs); - bx_2 = _mm_sub_epi8(bx_2, off); - const __m128i i32_2 = mul_sum_i8_pairs(bx_2, by_2); - - __m128i bx_3 = _mm_and_si128(lowMask, _mm_srli_epi64(tmp_2_3, 4)); - __m128i by_3 = _mm_loadu_si128((const __m128i *)(y[i + 1].qs + 16)); - bx_3 = _mm_sub_epi8(bx_3, off); - const __m128i i32_3 = mul_sum_i8_pairs(bx_3, by_3); - - // Convert int32_t to float - __m128 p0 = _mm_cvtepi32_ps(i32_0); - __m128 p1 = _mm_cvtepi32_ps(i32_1); - __m128 p2 = _mm_cvtepi32_ps(i32_2); - __m128 p3 = _mm_cvtepi32_ps(i32_3); - - // Apply the scale - __m128 p0_d = _mm_mul_ps( d_0_1, p0 ); - __m128 p1_d = _mm_mul_ps( d_0_1, p1 ); - __m128 p2_d = _mm_mul_ps( d_2_3, p2 ); - __m128 p3_d = _mm_mul_ps( d_2_3, p3 ); - - // Acummulate - acc_0 = _mm_add_ps(p0_d, acc_0); - acc_1 = _mm_add_ps(p1_d, acc_1); - acc_2 = _mm_add_ps(p2_d, acc_2); - acc_3 = _mm_add_ps(p3_d, acc_3); - } - - *s = hsum_float_4x4(acc_0, acc_1, acc_2, acc_3); -#elif defined(__riscv_v_intrinsic) - float sumf = 0.0; - - size_t vl = __riscv_vsetvl_e8m1(qk/2); - - for (int i = 0; i < nb; i++) { - // load elements - vuint8mf2_t tx = __riscv_vle8_v_u8mf2(x[i].qs, vl); - - vint8mf2_t y0 = __riscv_vle8_v_i8mf2(y[i].qs, vl); - vint8mf2_t y1 = __riscv_vle8_v_i8mf2(y[i].qs+16, vl); - - // mask and store lower part of x, and then upper part - vuint8mf2_t x_a = __riscv_vand_vx_u8mf2(tx, 0x0F, vl); - vuint8mf2_t x_l = __riscv_vsrl_vx_u8mf2(tx, 0x04, vl); - - vint8mf2_t x_ai = __riscv_vreinterpret_v_u8mf2_i8mf2(x_a); - vint8mf2_t x_li = __riscv_vreinterpret_v_u8mf2_i8mf2(x_l); - - // subtract offset - vint8mf2_t v0 = __riscv_vsub_vx_i8mf2(x_ai, 8, vl); - vint8mf2_t v1 = __riscv_vsub_vx_i8mf2(x_li, 8, vl); - - vint16m1_t vec_mul1 = __riscv_vwmul_vv_i16m1(v0, y0, vl); - vint16m1_t vec_mul2 = __riscv_vwmul_vv_i16m1(v1, y1, vl); - - vint32m1_t vec_zero = __riscv_vmv_v_x_i32m1(0, vl); - - vint32m1_t vs1 = __riscv_vwredsum_vs_i16m1_i32m1(vec_mul1, vec_zero, vl); - vint32m1_t vs2 = __riscv_vwredsum_vs_i16m1_i32m1(vec_mul2, vs1, vl); - - int sumi = __riscv_vmv_x_s_i32m1_i32(vs2); - - sumf += sumi*GGML_FP16_TO_FP32(x[i].d)*GGML_FP16_TO_FP32(y[i].d); - } - - *s = sumf; -#else - // scalar - float sumf = 0.0; - - for (int i = 0; i < nb; i++) { - int sumi = 0; - - for (int j = 0; j < qk/2; ++j) { - const int v0 = (x[i].qs[j] & 0x0F) - 8; - const int v1 = (x[i].qs[j] >> 4) - 8; - - sumi += (v0 * y[i].qs[j]) + (v1 * y[i].qs[j + qk/2]); - } - - sumf += sumi*GGML_FP16_TO_FP32(x[i].d)*GGML_FP16_TO_FP32(y[i].d); - } - - *s = sumf; -#endif -} - -static void ggml_vec_dot_q4_1_q8_1(const int n, float * restrict s, const void * restrict vx, const void * restrict vy) { - const int qk = QK8_1; - const int nb = n / qk; - - assert(n % qk == 0); - - const block_q4_1 * restrict x = vx; - const block_q8_1 * restrict y = vy; - - // TODO: add WASM SIMD -#if defined(__ARM_NEON) - float32x4_t sumv0 = vdupq_n_f32(0.0f); - float32x4_t sumv1 = vdupq_n_f32(0.0f); - - float summs = 0; - - GGML_ASSERT(nb % 2 == 0); // TODO: handle odd nb - for (int i = 0; i < nb; i += 2) { - const block_q4_1 * restrict x0 = &x[i + 0]; - const block_q4_1 * restrict x1 = &x[i + 1]; - const block_q8_1 * restrict y0 = &y[i + 0]; - const block_q8_1 * restrict y1 = &y[i + 1]; - - summs += GGML_FP16_TO_FP32(x0->m) * y0->s + GGML_FP16_TO_FP32(x1->m) * y1->s; - - const uint8x16_t m4b = vdupq_n_u8(0x0F); - - const uint8x16_t v0_0 = vld1q_u8(x0->qs); - const uint8x16_t v0_1 = vld1q_u8(x1->qs); - - // 4-bit -> 8-bit - const int8x16_t v0_0l = vreinterpretq_s8_u8(vandq_u8 (v0_0, m4b)); - const int8x16_t v0_0h = vreinterpretq_s8_u8(vshrq_n_u8(v0_0, 4)); - const int8x16_t v0_1l = vreinterpretq_s8_u8(vandq_u8 (v0_1, m4b)); - const int8x16_t v0_1h = vreinterpretq_s8_u8(vshrq_n_u8(v0_1, 4)); - - // load y - const int8x16_t v1_0l = vld1q_s8(y0->qs); - const int8x16_t v1_0h = vld1q_s8(y0->qs + 16); - const int8x16_t v1_1l = vld1q_s8(y1->qs); - const int8x16_t v1_1h = vld1q_s8(y1->qs + 16); - -#if defined(__ARM_FEATURE_DOTPROD) - // dot product into int32x4_t - const int32x4_t p_0 = vdotq_s32(vdotq_s32(vdupq_n_s32(0), v0_0l, v1_0l), v0_0h, v1_0h); - const int32x4_t p_1 = vdotq_s32(vdotq_s32(vdupq_n_s32(0), v0_1l, v1_1l), v0_1h, v1_1h); - - sumv0 = vmlaq_n_f32(sumv0, vcvtq_f32_s32(p_0), GGML_FP16_TO_FP32(x0->d)*y0->d); - sumv1 = vmlaq_n_f32(sumv1, vcvtq_f32_s32(p_1), GGML_FP16_TO_FP32(x1->d)*y1->d); -#else - const int16x8_t pl0l = vmull_s8(vget_low_s8 (v0_0l), vget_low_s8 (v1_0l)); - const int16x8_t pl0h = vmull_s8(vget_high_s8(v0_0l), vget_high_s8(v1_0l)); - const int16x8_t ph0l = vmull_s8(vget_low_s8 (v0_0h), vget_low_s8 (v1_0h)); - const int16x8_t ph0h = vmull_s8(vget_high_s8(v0_0h), vget_high_s8(v1_0h)); - - const int16x8_t pl1l = vmull_s8(vget_low_s8 (v0_1l), vget_low_s8 (v1_1l)); - const int16x8_t pl1h = vmull_s8(vget_high_s8(v0_1l), vget_high_s8(v1_1l)); - const int16x8_t ph1l = vmull_s8(vget_low_s8 (v0_1h), vget_low_s8 (v1_1h)); - const int16x8_t ph1h = vmull_s8(vget_high_s8(v0_1h), vget_high_s8(v1_1h)); - - const int32x4_t pl0 = vaddq_s32(vpaddlq_s16(pl0l), vpaddlq_s16(pl0h)); - const int32x4_t ph0 = vaddq_s32(vpaddlq_s16(ph0l), vpaddlq_s16(ph0h)); - const int32x4_t pl1 = vaddq_s32(vpaddlq_s16(pl1l), vpaddlq_s16(pl1h)); - const int32x4_t ph1 = vaddq_s32(vpaddlq_s16(ph1l), vpaddlq_s16(ph1h)); - - sumv0 = vmlaq_n_f32(sumv0, vcvtq_f32_s32(vaddq_s32(pl0, ph0)), GGML_FP16_TO_FP32(x0->d)*y0->d); - sumv1 = vmlaq_n_f32(sumv1, vcvtq_f32_s32(vaddq_s32(pl1, ph1)), GGML_FP16_TO_FP32(x1->d)*y1->d); -#endif - } - - *s = vaddvq_f32(sumv0) + vaddvq_f32(sumv1) + summs; -#elif defined(__AVX2__) || defined(__AVX__) - // Initialize accumulator with zeros - __m256 acc = _mm256_setzero_ps(); - - float summs = 0; - - // Main loop - for (int i = 0; i < nb; ++i) { - const float d0 = GGML_FP16_TO_FP32(x[i].d); - const float d1 = y[i].d; - - summs += GGML_FP16_TO_FP32(x[i].m) * y[i].s; - - const __m256 d0v = _mm256_set1_ps( d0 ); - const __m256 d1v = _mm256_set1_ps( d1 ); - - // Compute combined scales - const __m256 d0d1 = _mm256_mul_ps( d0v, d1v ); - - // Load 16 bytes, and unpack 4 bit fields into bytes, making 32 bytes - const __m256i bx = bytes_from_nibbles_32(x[i].qs); - const __m256i by = _mm256_loadu_si256( (const __m256i *)y[i].qs ); - - const __m256 xy = mul_sum_us8_pairs_float(bx, by); - - // Accumulate d0*d1*x*y -#if defined(__AVX2__) - acc = _mm256_fmadd_ps( d0d1, xy, acc ); -#else - acc = _mm256_add_ps( _mm256_mul_ps( d0d1, xy ), acc ); -#endif - } - - *s = hsum_float_8(acc) + summs; -#elif defined(__riscv_v_intrinsic) - float sumf = 0.0; - - size_t vl = __riscv_vsetvl_e8m1(qk/2); - - for (int i = 0; i < nb; i++) { - // load elements - vuint8mf2_t tx = __riscv_vle8_v_u8mf2(x[i].qs, vl); - - vint8mf2_t y0 = __riscv_vle8_v_i8mf2(y[i].qs, vl); - vint8mf2_t y1 = __riscv_vle8_v_i8mf2(y[i].qs+16, vl); - - // mask and store lower part of x, and then upper part - vuint8mf2_t x_a = __riscv_vand_vx_u8mf2(tx, 0x0F, vl); - vuint8mf2_t x_l = __riscv_vsrl_vx_u8mf2(tx, 0x04, vl); - - vint8mf2_t v0 = __riscv_vreinterpret_v_u8mf2_i8mf2(x_a); - vint8mf2_t v1 = __riscv_vreinterpret_v_u8mf2_i8mf2(x_l); - - vint16m1_t vec_mul1 = __riscv_vwmul_vv_i16m1(v0, y0, vl); - vint16m1_t vec_mul2 = __riscv_vwmul_vv_i16m1(v1, y1, vl); - - vint32m1_t vec_zero = __riscv_vmv_v_x_i32m1(0, vl); - - vint32m1_t vs1 = __riscv_vwredsum_vs_i16m1_i32m1(vec_mul1, vec_zero, vl); - vint32m1_t vs2 = __riscv_vwredsum_vs_i16m1_i32m1(vec_mul2, vs1, vl); - - int sumi = __riscv_vmv_x_s_i32m1_i32(vs2); - - sumf += (GGML_FP16_TO_FP32(x[i].d)*y[i].d)*sumi + GGML_FP16_TO_FP32(x[i].m)*y[i].s; - } + #define GGML_F16x8 float16x8_t + #define GGML_F16x8_ZERO vdupq_n_f16(0.0f) + #define GGML_F16x8_SET1(x) vdupq_n_f16(x) + #define GGML_F16x8_LOAD vld1q_f16 + #define GGML_F16x8_STORE vst1q_f16 + #define GGML_F16x8_FMA(a, b, c) vfmaq_f16(a, b, c) + #define GGML_F16x8_ADD vaddq_f16 + #define GGML_F16x8_MUL vmulq_f16 + #define GGML_F16x8_REDUCE(res, x) \ + do { \ + int offset = GGML_F16_ARR >> 1; \ + for (int i = 0; i < offset; ++i) { \ + x[i] = vaddq_f16(x[i], x[offset+i]); \ + } \ + offset >>= 1; \ + for (int i = 0; i < offset; ++i) { \ + x[i] = vaddq_f16(x[i], x[offset+i]); \ + } \ + offset >>= 1; \ + for (int i = 0; i < offset; ++i) { \ + x[i] = vaddq_f16(x[i], x[offset+i]); \ + } \ + const float32x4_t t0 = vcvt_f32_f16(vget_low_f16 (x[0])); \ + const float32x4_t t1 = vcvt_f32_f16(vget_high_f16(x[0])); \ + res = (ggml_float) vaddvq_f32(vaddq_f32(t0, t1)); \ + } while (0) - *s = sumf; + #define GGML_F16_VEC GGML_F16x8 + #define GGML_F16_VEC_ZERO GGML_F16x8_ZERO + #define GGML_F16_VEC_SET1 GGML_F16x8_SET1 + #define GGML_F16_VEC_LOAD(p, i) GGML_F16x8_LOAD(p) + #define GGML_F16_VEC_STORE(p, r, i) GGML_F16x8_STORE(p, r[i]) + #define GGML_F16_VEC_FMA GGML_F16x8_FMA + #define GGML_F16_VEC_ADD GGML_F16x8_ADD + #define GGML_F16_VEC_MUL GGML_F16x8_MUL + #define GGML_F16_VEC_REDUCE GGML_F16x8_REDUCE #else - // scalar - float sumf = 0.0; - - for (int i = 0; i < nb; i++) { - int sumi = 0; - - for (int j = 0; j < qk/2; ++j) { - const int v0 = (x[i].qs[j] & 0x0F); - const int v1 = (x[i].qs[j] >> 4); + // if FP16 vector arithmetic is not supported, we use FP32 instead + // and take advantage of the vcvt_ functions to convert to/from FP16 - sumi += (v0 * y[i].qs[j]) + (v1 * y[i].qs[j + qk/2]); - } + #define GGML_F16_STEP 16 + #define GGML_F16_EPR 4 - sumf += (GGML_FP16_TO_FP32(x[i].d)*y[i].d)*sumi + GGML_FP16_TO_FP32(x[i].m)*y[i].s; - } + #define GGML_F32Cx4 float32x4_t + #define GGML_F32Cx4_ZERO vdupq_n_f32(0.0f) + #define GGML_F32Cx4_SET1(x) vdupq_n_f32(x) + #define GGML_F32Cx4_LOAD(x) vcvt_f32_f16(vld1_f16(x)) + #define GGML_F32Cx4_STORE(x, y) vst1_f16(x, vcvt_f16_f32(y)) + #define GGML_F32Cx4_FMA(a, b, c) vfmaq_f32(a, b, c) + #define GGML_F32Cx4_ADD vaddq_f32 + #define GGML_F32Cx4_MUL vmulq_f32 + #define GGML_F32Cx4_REDUCE GGML_F32x4_REDUCE - *s = sumf; + #define GGML_F16_VEC GGML_F32Cx4 + #define GGML_F16_VEC_ZERO GGML_F32Cx4_ZERO + #define GGML_F16_VEC_SET1 GGML_F32Cx4_SET1 + #define GGML_F16_VEC_LOAD(p, i) GGML_F32Cx4_LOAD(p) + #define GGML_F16_VEC_STORE(p, r, i) GGML_F32Cx4_STORE(p, r[i]) + #define GGML_F16_VEC_FMA GGML_F32Cx4_FMA + #define GGML_F16_VEC_ADD GGML_F32Cx4_ADD + #define GGML_F16_VEC_MUL GGML_F32Cx4_MUL + #define GGML_F16_VEC_REDUCE GGML_F32Cx4_REDUCE #endif -} -static void ggml_vec_dot_q5_0_q8_0(const int n, float * restrict s, const void * restrict vx, const void * restrict vy) { - const int qk = QK8_0; - const int nb = n / qk; +#elif defined(__AVX__) - assert(n % qk == 0); - assert(qk == QK5_0); +#define GGML_SIMD - const block_q5_0 * restrict x = vx; - const block_q8_0 * restrict y = vy; +// F32 AVX -#if defined(__ARM_NEON) - float32x4_t sumv0 = vdupq_n_f32(0.0f); - float32x4_t sumv1 = vdupq_n_f32(0.0f); - - uint32_t qh0; - uint32_t qh1; - - uint64_t tmp0[4]; - uint64_t tmp1[4]; - - GGML_ASSERT(nb % 2 == 0); // TODO: handle odd nb - for (int i = 0; i < nb; i += 2) { - const block_q5_0 * restrict x0 = &x[i]; - const block_q5_0 * restrict x1 = &x[i + 1]; - const block_q8_0 * restrict y0 = &y[i]; - const block_q8_0 * restrict y1 = &y[i + 1]; - - const uint8x16_t m4b = vdupq_n_u8(0x0F); - - // extract the 5th bit via lookup table ((!b) << 4) - memcpy(&qh0, x0->qh, sizeof(qh0)); - memcpy(&qh1, x1->qh, sizeof(qh1)); - - tmp0[0] = table_b2b_1[(qh0 >> 0) & 0xFF]; - tmp0[1] = table_b2b_1[(qh0 >> 8) & 0xFF]; - tmp0[2] = table_b2b_1[(qh0 >> 16) & 0xFF]; - tmp0[3] = table_b2b_1[(qh0 >> 24) ]; - - tmp1[0] = table_b2b_1[(qh1 >> 0) & 0xFF]; - tmp1[1] = table_b2b_1[(qh1 >> 8) & 0xFF]; - tmp1[2] = table_b2b_1[(qh1 >> 16) & 0xFF]; - tmp1[3] = table_b2b_1[(qh1 >> 24) ]; - - const int8x16_t qhl0 = vld1q_s8((const int8_t *)(tmp0 + 0)); - const int8x16_t qhh0 = vld1q_s8((const int8_t *)(tmp0 + 2)); - const int8x16_t qhl1 = vld1q_s8((const int8_t *)(tmp1 + 0)); - const int8x16_t qhh1 = vld1q_s8((const int8_t *)(tmp1 + 2)); - - const uint8x16_t v0_0 = vld1q_u8(x0->qs); - const uint8x16_t v0_1 = vld1q_u8(x1->qs); - - // 4-bit -> 8-bit - int8x16_t v0_0l = vreinterpretq_s8_u8(vandq_u8 (v0_0, m4b)); - int8x16_t v0_0h = vreinterpretq_s8_u8(vshrq_n_u8(v0_0, 4)); - int8x16_t v0_1l = vreinterpretq_s8_u8(vandq_u8 (v0_1, m4b)); - int8x16_t v0_1h = vreinterpretq_s8_u8(vshrq_n_u8(v0_1, 4)); - - // add high bit and sub 16 (equivalent to sub 0x10 when bit is zero) - const int8x16_t v0_0lf = vsubq_s8(v0_0l, qhl0); - const int8x16_t v0_0hf = vsubq_s8(v0_0h, qhh0); - const int8x16_t v0_1lf = vsubq_s8(v0_1l, qhl1); - const int8x16_t v0_1hf = vsubq_s8(v0_1h, qhh1); - - // load y - const int8x16_t v1_0l = vld1q_s8(y0->qs); - const int8x16_t v1_0h = vld1q_s8(y0->qs + 16); - const int8x16_t v1_1l = vld1q_s8(y1->qs); - const int8x16_t v1_1h = vld1q_s8(y1->qs + 16); - -#if defined(__ARM_FEATURE_DOTPROD) - sumv0 = vmlaq_n_f32(sumv0, vcvtq_f32_s32(vaddq_s32( - vdotq_s32(vdupq_n_s32(0), v0_0lf, v1_0l), - vdotq_s32(vdupq_n_s32(0), v0_0hf, v1_0h))), GGML_FP16_TO_FP32(x0->d)*GGML_FP16_TO_FP32(y0->d)); - sumv1 = vmlaq_n_f32(sumv1, vcvtq_f32_s32(vaddq_s32( - vdotq_s32(vdupq_n_s32(0), v0_1lf, v1_1l), - vdotq_s32(vdupq_n_s32(0), v0_1hf, v1_1h))), GGML_FP16_TO_FP32(x1->d)*GGML_FP16_TO_FP32(y1->d)); +#define GGML_F32_STEP 32 +#define GGML_F32_EPR 8 + +#define GGML_F32x8 __m256 +#define GGML_F32x8_ZERO _mm256_setzero_ps() +#define GGML_F32x8_SET1(x) _mm256_set1_ps(x) +#define GGML_F32x8_LOAD _mm256_loadu_ps +#define GGML_F32x8_STORE _mm256_storeu_ps +#if defined(__FMA__) + #define GGML_F32x8_FMA(a, b, c) _mm256_fmadd_ps(b, c, a) #else - const int16x8_t pl0l = vmull_s8(vget_low_s8 (v0_0lf), vget_low_s8 (v1_0l)); - const int16x8_t pl0h = vmull_s8(vget_high_s8(v0_0lf), vget_high_s8(v1_0l)); - const int16x8_t ph0l = vmull_s8(vget_low_s8 (v0_0hf), vget_low_s8 (v1_0h)); - const int16x8_t ph0h = vmull_s8(vget_high_s8(v0_0hf), vget_high_s8(v1_0h)); - - const int16x8_t pl1l = vmull_s8(vget_low_s8 (v0_1lf), vget_low_s8 (v1_1l)); - const int16x8_t pl1h = vmull_s8(vget_high_s8(v0_1lf), vget_high_s8(v1_1l)); - const int16x8_t ph1l = vmull_s8(vget_low_s8 (v0_1hf), vget_low_s8 (v1_1h)); - const int16x8_t ph1h = vmull_s8(vget_high_s8(v0_1hf), vget_high_s8(v1_1h)); - - const int32x4_t pl0 = vaddq_s32(vpaddlq_s16(pl0l), vpaddlq_s16(pl0h)); - const int32x4_t ph0 = vaddq_s32(vpaddlq_s16(ph0l), vpaddlq_s16(ph0h)); - const int32x4_t pl1 = vaddq_s32(vpaddlq_s16(pl1l), vpaddlq_s16(pl1h)); - const int32x4_t ph1 = vaddq_s32(vpaddlq_s16(ph1l), vpaddlq_s16(ph1h)); - - sumv0 = vmlaq_n_f32(sumv0, vcvtq_f32_s32(vaddq_s32(pl0, ph0)), GGML_FP16_TO_FP32(x0->d)*GGML_FP16_TO_FP32(y0->d)); - sumv1 = vmlaq_n_f32(sumv1, vcvtq_f32_s32(vaddq_s32(pl1, ph1)), GGML_FP16_TO_FP32(x1->d)*GGML_FP16_TO_FP32(y1->d)); + #define GGML_F32x8_FMA(a, b, c) _mm256_add_ps(_mm256_mul_ps(b, c), a) #endif - } - - *s = vaddvq_f32(sumv0) + vaddvq_f32(sumv1); -#elif defined(__wasm_simd128__) - v128_t sumv = wasm_f32x4_splat(0.0f); - - uint32_t qh; - uint64_t tmp[4]; - - // TODO: check if unrolling this is better - for (int i = 0; i < nb; ++i) { - const block_q5_0 * restrict x0 = &x[i]; - const block_q8_0 * restrict y0 = &y[i]; - - const v128_t m4b = wasm_i8x16_splat(0x0F); - - // extract the 5th bit - memcpy(&qh, x0->qh, sizeof(qh)); - - tmp[0] = table_b2b_1[(qh >> 0) & 0xFF]; - tmp[1] = table_b2b_1[(qh >> 8) & 0xFF]; - tmp[2] = table_b2b_1[(qh >> 16) & 0xFF]; - tmp[3] = table_b2b_1[(qh >> 24) ]; - - const v128_t qhl = wasm_v128_load(tmp + 0); - const v128_t qhh = wasm_v128_load(tmp + 2); - - const v128_t v0 = wasm_v128_load(x0->qs); - - // 4-bit -> 8-bit - const v128_t v0l = wasm_v128_and (v0, m4b); - const v128_t v0h = wasm_u8x16_shr(v0, 4); - - // add high bit and sub 16 (equivalent to sub 0x10 when bit is zero) - const v128_t v0lf = wasm_i8x16_sub(v0l, qhl); - const v128_t v0hf = wasm_i8x16_sub(v0h, qhh); - - // load y - const v128_t v1l = wasm_v128_load(y0->qs); - const v128_t v1h = wasm_v128_load(y0->qs + 16); - - // int8x16 -> int16x8 - const v128_t v0lfl = wasm_i16x8_extend_low_i8x16 (v0lf); - const v128_t v0lfh = wasm_i16x8_extend_high_i8x16(v0lf); - const v128_t v0hfl = wasm_i16x8_extend_low_i8x16 (v0hf); - const v128_t v0hfh = wasm_i16x8_extend_high_i8x16(v0hf); - - const v128_t v1ll = wasm_i16x8_extend_low_i8x16 (v1l); - const v128_t v1lh = wasm_i16x8_extend_high_i8x16(v1l); - const v128_t v1hl = wasm_i16x8_extend_low_i8x16 (v1h); - const v128_t v1hh = wasm_i16x8_extend_high_i8x16(v1h); - - // dot product - sumv = wasm_f32x4_add(sumv, wasm_f32x4_mul(wasm_f32x4_convert_i32x4( - wasm_i32x4_add( - wasm_i32x4_add(wasm_i32x4_dot_i16x8(v0lfl, v1ll), - wasm_i32x4_dot_i16x8(v0lfh, v1lh)), - wasm_i32x4_add(wasm_i32x4_dot_i16x8(v0hfl, v1hl), - wasm_i32x4_dot_i16x8(v0hfh, v1hh)))), - wasm_f32x4_splat(GGML_FP16_TO_FP32(x0->d) * GGML_FP16_TO_FP32(y0->d)))); - } - - *s = wasm_f32x4_extract_lane(sumv, 0) + wasm_f32x4_extract_lane(sumv, 1) + - wasm_f32x4_extract_lane(sumv, 2) + wasm_f32x4_extract_lane(sumv, 3); -#elif defined(__AVX2__) - // Initialize accumulator with zeros - __m256 acc = _mm256_setzero_ps(); - - // Main loop - for (int i = 0; i < nb; i++) { - /* Compute combined scale for the block */ - const __m256 d = _mm256_set1_ps(GGML_FP16_TO_FP32(x[i].d) * GGML_FP16_TO_FP32(y[i].d)); - - __m256i bx = bytes_from_nibbles_32(x[i].qs); - __m256i bxhi = bytes_from_bits_32(x[i].qh); - bxhi = _mm256_andnot_si256(bxhi, _mm256_set1_epi8((char)0xF0)); - bx = _mm256_or_si256(bx, bxhi); - - __m256i by = _mm256_loadu_si256((const __m256i *)y[i].qs); - - const __m256 q = mul_sum_i8_pairs_float(bx, by); +#define GGML_F32x8_ADD _mm256_add_ps +#define GGML_F32x8_MUL _mm256_mul_ps +#define GGML_F32x8_REDUCE(res, x) \ +do { \ + int offset = GGML_F32_ARR >> 1; \ + for (int i = 0; i < offset; ++i) { \ + x[i] = _mm256_add_ps(x[i], x[offset+i]); \ + } \ + offset >>= 1; \ + for (int i = 0; i < offset; ++i) { \ + x[i] = _mm256_add_ps(x[i], x[offset+i]); \ + } \ + offset >>= 1; \ + for (int i = 0; i < offset; ++i) { \ + x[i] = _mm256_add_ps(x[i], x[offset+i]); \ + } \ + const __m128 t0 = _mm_add_ps(_mm256_castps256_ps128(x[0]), \ + _mm256_extractf128_ps(x[0], 1)); \ + const __m128 t1 = _mm_hadd_ps(t0, t0); \ + res = _mm_cvtss_f32(_mm_hadd_ps(t1, t1)); \ +} while (0) +// TODO: is this optimal ? - /* Multiply q with scale and accumulate */ - acc = _mm256_fmadd_ps(d, q, acc); - } +#define GGML_F32_VEC GGML_F32x8 +#define GGML_F32_VEC_ZERO GGML_F32x8_ZERO +#define GGML_F32_VEC_SET1 GGML_F32x8_SET1 +#define GGML_F32_VEC_LOAD GGML_F32x8_LOAD +#define GGML_F32_VEC_STORE GGML_F32x8_STORE +#define GGML_F32_VEC_FMA GGML_F32x8_FMA +#define GGML_F32_VEC_ADD GGML_F32x8_ADD +#define GGML_F32_VEC_MUL GGML_F32x8_MUL +#define GGML_F32_VEC_REDUCE GGML_F32x8_REDUCE - *s = hsum_float_8(acc); -#elif defined(__AVX__) - // Initialize accumulator with zeros - __m256 acc = _mm256_setzero_ps(); - __m128i mask = _mm_set1_epi8((char)0xF0); +// F16 AVX - // Main loop - for (int i = 0; i < nb; i++) { - /* Compute combined scale for the block */ - const __m256 d = _mm256_set1_ps(GGML_FP16_TO_FP32(x[i].d) * GGML_FP16_TO_FP32(y[i].d)); +#define GGML_F16_STEP 32 +#define GGML_F16_EPR 8 - __m256i bx = bytes_from_nibbles_32(x[i].qs); - const __m256i bxhi = bytes_from_bits_32(x[i].qh); - __m128i bxhil = _mm256_castsi256_si128(bxhi); - __m128i bxhih = _mm256_extractf128_si256(bxhi, 1); - bxhil = _mm_andnot_si128(bxhil, mask); - bxhih = _mm_andnot_si128(bxhih, mask); - __m128i bxl = _mm256_castsi256_si128(bx); - __m128i bxh = _mm256_extractf128_si256(bx, 1); - bxl = _mm_or_si128(bxl, bxhil); - bxh = _mm_or_si128(bxh, bxhih); - bx = MM256_SET_M128I(bxh, bxl); +// F16 arithmetic is not supported by AVX, so we use F32 instead - const __m256i by = _mm256_loadu_si256((const __m256i *)y[i].qs); +#define GGML_F32Cx8 __m256 +#define GGML_F32Cx8_ZERO _mm256_setzero_ps() +#define GGML_F32Cx8_SET1(x) _mm256_set1_ps(x) - const __m256 q = mul_sum_i8_pairs_float(bx, by); +#if defined(__F16C__) +// the _mm256_cvt intrinsics require F16C +#define GGML_F32Cx8_LOAD(x) _mm256_cvtph_ps(_mm_loadu_si128((__m128i *)(x))) +#define GGML_F32Cx8_STORE(x, y) _mm_storeu_si128((__m128i *)(x), _mm256_cvtps_ph(y, 0)) +#else +static inline __m256 __avx_f32cx8_load(ggml_fp16_t *x) { + float tmp[8]; - /* Multiply q with scale and accumulate */ - acc = _mm256_add_ps(_mm256_mul_ps(d, q), acc); + for (int i = 0; i < 8; i++) { + tmp[i] = GGML_FP16_TO_FP32(x[i]); } - *s = hsum_float_8(acc); -#elif defined(__riscv_v_intrinsic) - float sumf = 0.0; - - uint32_t qh; - - size_t vl = __riscv_vsetvl_e8m1(qk/2); - - // These tempory registers are for masking and shift operations - vuint32m2_t vt_1 = __riscv_vid_v_u32m2(vl); - vuint32m2_t vt_2 = __riscv_vsll_vv_u32m2(__riscv_vmv_v_x_u32m2(1, vl), vt_1, vl); - - vuint32m2_t vt_3 = __riscv_vsll_vx_u32m2(vt_2, 16, vl); - vuint32m2_t vt_4 = __riscv_vadd_vx_u32m2(vt_1, 12, vl); - - for (int i = 0; i < nb; i++) { - memcpy(&qh, x[i].qh, sizeof(uint32_t)); - - // ((qh & (1u << (j + 0 ))) >> (j + 0 )) << 4; - vuint32m2_t xha_0 = __riscv_vand_vx_u32m2(vt_2, qh, vl); - vuint32m2_t xhr_0 = __riscv_vsrl_vv_u32m2(xha_0, vt_1, vl); - vuint32m2_t xhl_0 = __riscv_vsll_vx_u32m2(xhr_0, 4, vl); - - // ((qh & (1u << (j + 16))) >> (j + 12)); - vuint32m2_t xha_1 = __riscv_vand_vx_u32m2(vt_3, qh, vl); - vuint32m2_t xhl_1 = __riscv_vsrl_vv_u32m2(xha_1, vt_4, vl); - - // narrowing - vuint16m1_t xhc_0 = __riscv_vncvt_x_x_w_u16m1(xhl_0, vl); - vuint8mf2_t xh_0 = __riscv_vncvt_x_x_w_u8mf2(xhc_0, vl); - - vuint16m1_t xhc_1 = __riscv_vncvt_x_x_w_u16m1(xhl_1, vl); - vuint8mf2_t xh_1 = __riscv_vncvt_x_x_w_u8mf2(xhc_1, vl); - - // load - vuint8mf2_t tx = __riscv_vle8_v_u8mf2(x[i].qs, vl); - - vint8mf2_t y0 = __riscv_vle8_v_i8mf2(y[i].qs, vl); - vint8mf2_t y1 = __riscv_vle8_v_i8mf2(y[i].qs+16, vl); - - vuint8mf2_t x_at = __riscv_vand_vx_u8mf2(tx, 0x0F, vl); - vuint8mf2_t x_lt = __riscv_vsrl_vx_u8mf2(tx, 0x04, vl); - - vuint8mf2_t x_a = __riscv_vor_vv_u8mf2(x_at, xh_0, vl); - vuint8mf2_t x_l = __riscv_vor_vv_u8mf2(x_lt, xh_1, vl); - - vint8mf2_t x_ai = __riscv_vreinterpret_v_u8mf2_i8mf2(x_a); - vint8mf2_t x_li = __riscv_vreinterpret_v_u8mf2_i8mf2(x_l); - - vint8mf2_t v0 = __riscv_vsub_vx_i8mf2(x_ai, 16, vl); - vint8mf2_t v1 = __riscv_vsub_vx_i8mf2(x_li, 16, vl); - - vint16m1_t vec_mul1 = __riscv_vwmul_vv_i16m1(v0, y0, vl); - vint16m1_t vec_mul2 = __riscv_vwmul_vv_i16m1(v1, y1, vl); - - vint32m1_t vec_zero = __riscv_vmv_v_x_i32m1(0, vl); - - vint32m1_t vs1 = __riscv_vwredsum_vs_i16m1_i32m1(vec_mul1, vec_zero, vl); - vint32m1_t vs2 = __riscv_vwredsum_vs_i16m1_i32m1(vec_mul2, vs1, vl); - - int sumi = __riscv_vmv_x_s_i32m1_i32(vs2); + return _mm256_loadu_ps(tmp); +} +static inline void __avx_f32cx8_store(ggml_fp16_t *x, __m256 y) { + float arr[8]; - sumf += (GGML_FP16_TO_FP32(x[i].d)*GGML_FP16_TO_FP32(y[i].d)) * sumi; - } + _mm256_storeu_ps(arr, y); - *s = sumf; -#else - // scalar - float sumf = 0.0; + for (int i = 0; i < 8; i++) + x[i] = GGML_FP32_TO_FP16(arr[i]); +} +#define GGML_F32Cx8_LOAD(x) __avx_f32cx8_load(x) +#define GGML_F32Cx8_STORE(x, y) __avx_f32cx8_store(x, y) +#endif - for (int i = 0; i < nb; i++) { - uint32_t qh; - memcpy(&qh, x[i].qh, sizeof(qh)); +#define GGML_F32Cx8_FMA GGML_F32x8_FMA +#define GGML_F32Cx8_ADD _mm256_add_ps +#define GGML_F32Cx8_MUL _mm256_mul_ps +#define GGML_F32Cx8_REDUCE GGML_F32x8_REDUCE - int sumi = 0; +#define GGML_F16_VEC GGML_F32Cx8 +#define GGML_F16_VEC_ZERO GGML_F32Cx8_ZERO +#define GGML_F16_VEC_SET1 GGML_F32Cx8_SET1 +#define GGML_F16_VEC_LOAD(p, i) GGML_F32Cx8_LOAD(p) +#define GGML_F16_VEC_STORE(p, r, i) GGML_F32Cx8_STORE(p, r[i]) +#define GGML_F16_VEC_FMA GGML_F32Cx8_FMA +#define GGML_F16_VEC_ADD GGML_F32Cx8_ADD +#define GGML_F16_VEC_MUL GGML_F32Cx8_MUL +#define GGML_F16_VEC_REDUCE GGML_F32Cx8_REDUCE - for (int j = 0; j < qk/2; ++j) { - const uint8_t xh_0 = ((qh & (1u << (j + 0 ))) >> (j + 0 )) << 4; - const uint8_t xh_1 = ((qh & (1u << (j + 16))) >> (j + 12)); +#elif defined(__POWER9_VECTOR__) - const int32_t x0 = ((x[i].qs[j] & 0x0F) | xh_0) - 16; - const int32_t x1 = ((x[i].qs[j] >> 4) | xh_1) - 16; +#define GGML_SIMD - sumi += (x0 * y[i].qs[j]) + (x1 * y[i].qs[j + qk/2]); - } +// F32 POWER9 - sumf += (GGML_FP16_TO_FP32(x[i].d)*GGML_FP16_TO_FP32(y[i].d)) * sumi; - } +#define GGML_F32_STEP 32 +#define GGML_F32_EPR 4 - *s = sumf; -#endif +#define GGML_F32x4 vector float +#define GGML_F32x4_ZERO 0.0f +#define GGML_F32x4_SET1 vec_splats +#define GGML_F32x4_LOAD(p) vec_xl(0, p) +#define GGML_F32x4_STORE(p, r) vec_xst(r, 0, p) +#define GGML_F32x4_FMA(a, b, c) vec_madd(b, c, a) +#define GGML_F32x4_ADD vec_add +#define GGML_F32x4_MUL vec_mul +#define GGML_F32x4_REDUCE(res, x) \ +{ \ + int offset = GGML_F32_ARR >> 1; \ + for (int i = 0; i < offset; ++i) { \ + x[i] = vec_add(x[i], x[offset+i]); \ + } \ + offset >>= 1; \ + for (int i = 0; i < offset; ++i) { \ + x[i] = vec_add(x[i], x[offset+i]); \ + } \ + offset >>= 1; \ + for (int i = 0; i < offset; ++i) { \ + x[i] = vec_add(x[i], x[offset+i]); \ + } \ + res = vec_extract(x[0], 0) + \ + vec_extract(x[0], 1) + \ + vec_extract(x[0], 2) + \ + vec_extract(x[0], 3); \ } -static void ggml_vec_dot_q5_1_q8_1(const int n, float * restrict s, const void * restrict vx, const void * restrict vy) { - const int qk = QK8_1; - const int nb = n / qk; - - assert(n % qk == 0); - assert(qk == QK5_1); - - const block_q5_1 * restrict x = vx; - const block_q8_1 * restrict y = vy; +#define GGML_F32_VEC GGML_F32x4 +#define GGML_F32_VEC_ZERO GGML_F32x4_ZERO +#define GGML_F32_VEC_SET1 GGML_F32x4_SET1 +#define GGML_F32_VEC_LOAD GGML_F32x4_LOAD +#define GGML_F32_VEC_STORE GGML_F32x4_STORE +#define GGML_F32_VEC_FMA GGML_F32x4_FMA +#define GGML_F32_VEC_ADD GGML_F32x4_ADD +#define GGML_F32_VEC_MUL GGML_F32x4_MUL +#define GGML_F32_VEC_REDUCE GGML_F32x4_REDUCE -#if defined(__ARM_NEON) - float32x4_t sumv0 = vdupq_n_f32(0.0f); - float32x4_t sumv1 = vdupq_n_f32(0.0f); - - float summs0 = 0.0f; - float summs1 = 0.0f; - - uint32_t qh0; - uint32_t qh1; - - uint64_t tmp0[4]; - uint64_t tmp1[4]; - - GGML_ASSERT(nb % 2 == 0); // TODO: handle odd nb - for (int i = 0; i < nb; i += 2) { - const block_q5_1 * restrict x0 = &x[i]; - const block_q5_1 * restrict x1 = &x[i + 1]; - const block_q8_1 * restrict y0 = &y[i]; - const block_q8_1 * restrict y1 = &y[i + 1]; - - const uint8x16_t m4b = vdupq_n_u8(0x0F); - - summs0 += GGML_FP16_TO_FP32(x0->m) * y0->s; - summs1 += GGML_FP16_TO_FP32(x1->m) * y1->s; - - // extract the 5th bit via lookup table ((b) << 4) - memcpy(&qh0, x0->qh, sizeof(qh0)); - memcpy(&qh1, x1->qh, sizeof(qh1)); - - tmp0[0] = table_b2b_0[(qh0 >> 0) & 0xFF]; - tmp0[1] = table_b2b_0[(qh0 >> 8) & 0xFF]; - tmp0[2] = table_b2b_0[(qh0 >> 16) & 0xFF]; - tmp0[3] = table_b2b_0[(qh0 >> 24) ]; - - tmp1[0] = table_b2b_0[(qh1 >> 0) & 0xFF]; - tmp1[1] = table_b2b_0[(qh1 >> 8) & 0xFF]; - tmp1[2] = table_b2b_0[(qh1 >> 16) & 0xFF]; - tmp1[3] = table_b2b_0[(qh1 >> 24) ]; - - const int8x16_t qhl0 = vld1q_s8((const int8_t *)(tmp0 + 0)); - const int8x16_t qhh0 = vld1q_s8((const int8_t *)(tmp0 + 2)); - const int8x16_t qhl1 = vld1q_s8((const int8_t *)(tmp1 + 0)); - const int8x16_t qhh1 = vld1q_s8((const int8_t *)(tmp1 + 2)); - - const uint8x16_t v0_0 = vld1q_u8(x0->qs); - const uint8x16_t v0_1 = vld1q_u8(x1->qs); - - // 4-bit -> 8-bit - const int8x16_t v0_0l = vreinterpretq_s8_u8(vandq_u8 (v0_0, m4b)); - const int8x16_t v0_0h = vreinterpretq_s8_u8(vshrq_n_u8(v0_0, 4)); - const int8x16_t v0_1l = vreinterpretq_s8_u8(vandq_u8 (v0_1, m4b)); - const int8x16_t v0_1h = vreinterpretq_s8_u8(vshrq_n_u8(v0_1, 4)); - - // add high bit - const int8x16_t v0_0lf = vorrq_s8(v0_0l, qhl0); - const int8x16_t v0_0hf = vorrq_s8(v0_0h, qhh0); - const int8x16_t v0_1lf = vorrq_s8(v0_1l, qhl1); - const int8x16_t v0_1hf = vorrq_s8(v0_1h, qhh1); - - // load y - const int8x16_t v1_0l = vld1q_s8(y0->qs); - const int8x16_t v1_0h = vld1q_s8(y0->qs + 16); - const int8x16_t v1_1l = vld1q_s8(y1->qs); - const int8x16_t v1_1h = vld1q_s8(y1->qs + 16); - -#if defined(__ARM_FEATURE_DOTPROD) - sumv0 = vmlaq_n_f32(sumv0, vcvtq_f32_s32(vaddq_s32( - vdotq_s32(vdupq_n_s32(0), v0_0lf, v1_0l), - vdotq_s32(vdupq_n_s32(0), v0_0hf, v1_0h))), GGML_FP16_TO_FP32(x0->d)*y0->d); - sumv1 = vmlaq_n_f32(sumv1, vcvtq_f32_s32(vaddq_s32( - vdotq_s32(vdupq_n_s32(0), v0_1lf, v1_1l), - vdotq_s32(vdupq_n_s32(0), v0_1hf, v1_1h))), GGML_FP16_TO_FP32(x1->d)*y1->d); -#else - const int16x8_t pl0l = vmull_s8(vget_low_s8 (v0_0lf), vget_low_s8 (v1_0l)); - const int16x8_t pl0h = vmull_s8(vget_high_s8(v0_0lf), vget_high_s8(v1_0l)); - const int16x8_t ph0l = vmull_s8(vget_low_s8 (v0_0hf), vget_low_s8 (v1_0h)); - const int16x8_t ph0h = vmull_s8(vget_high_s8(v0_0hf), vget_high_s8(v1_0h)); - - const int16x8_t pl1l = vmull_s8(vget_low_s8 (v0_1lf), vget_low_s8 (v1_1l)); - const int16x8_t pl1h = vmull_s8(vget_high_s8(v0_1lf), vget_high_s8(v1_1l)); - const int16x8_t ph1l = vmull_s8(vget_low_s8 (v0_1hf), vget_low_s8 (v1_1h)); - const int16x8_t ph1h = vmull_s8(vget_high_s8(v0_1hf), vget_high_s8(v1_1h)); - - const int32x4_t pl0 = vaddq_s32(vpaddlq_s16(pl0l), vpaddlq_s16(pl0h)); - const int32x4_t ph0 = vaddq_s32(vpaddlq_s16(ph0l), vpaddlq_s16(ph0h)); - const int32x4_t pl1 = vaddq_s32(vpaddlq_s16(pl1l), vpaddlq_s16(pl1h)); - const int32x4_t ph1 = vaddq_s32(vpaddlq_s16(ph1l), vpaddlq_s16(ph1h)); - - sumv0 = vmlaq_n_f32(sumv0, vcvtq_f32_s32(vaddq_s32(pl0, ph0)), GGML_FP16_TO_FP32(x0->d)*y0->d); - sumv1 = vmlaq_n_f32(sumv1, vcvtq_f32_s32(vaddq_s32(pl1, ph1)), GGML_FP16_TO_FP32(x1->d)*y1->d); -#endif - } +// F16 POWER9 +#define GGML_F16_STEP GGML_F32_STEP +#define GGML_F16_EPR GGML_F32_EPR +#define GGML_F16_VEC GGML_F32x4 +#define GGML_F16_VEC_ZERO GGML_F32x4_ZERO +#define GGML_F16_VEC_SET1 GGML_F32x4_SET1 +#define GGML_F16_VEC_FMA GGML_F32x4_FMA +#define GGML_F16_VEC_REDUCE GGML_F32x4_REDUCE +// Use vec_xl, not vec_ld, in case the load address is not aligned. +#define GGML_F16_VEC_LOAD(p, i) (i & 0x1) ? \ + vec_extract_fp32_from_shorth(vec_xl(0, p - GGML_F16_EPR)) : \ + vec_extract_fp32_from_shortl(vec_xl(0, p)) +#define GGML_ENDIAN_BYTE(i) ((unsigned char *)&(uint16_t){1})[i] +#define GGML_F16_VEC_STORE(p, r, i) \ + if (i & 0x1) \ + vec_xst(vec_pack_to_short_fp32(r[i - GGML_ENDIAN_BYTE(1)], \ + r[i - GGML_ENDIAN_BYTE(0)]), \ + 0, p - GGML_F16_EPR) - *s = vaddvq_f32(sumv0) + vaddvq_f32(sumv1) + summs0 + summs1; #elif defined(__wasm_simd128__) - v128_t sumv = wasm_f32x4_splat(0.0f); - - float summs = 0.0f; - - uint32_t qh; - uint64_t tmp[4]; - - // TODO: check if unrolling this is better - for (int i = 0; i < nb; ++i) { - const block_q5_1 * restrict x0 = &x[i]; - const block_q8_1 * restrict y0 = &y[i]; - summs += GGML_FP16_TO_FP32(x0->m) * y0->s; - - const v128_t m4b = wasm_i8x16_splat(0x0F); - - // extract the 5th bit - memcpy(&qh, x0->qh, sizeof(qh)); +#define GGML_SIMD - tmp[0] = table_b2b_0[(qh >> 0) & 0xFF]; - tmp[1] = table_b2b_0[(qh >> 8) & 0xFF]; - tmp[2] = table_b2b_0[(qh >> 16) & 0xFF]; - tmp[3] = table_b2b_0[(qh >> 24) ]; +// F32 WASM - const v128_t qhl = wasm_v128_load(tmp + 0); - const v128_t qhh = wasm_v128_load(tmp + 2); +#define GGML_F32_STEP 16 +#define GGML_F32_EPR 4 - const v128_t v0 = wasm_v128_load(x0->qs); +#define GGML_F32x4 v128_t +#define GGML_F32x4_ZERO wasm_f32x4_splat(0.0f) +#define GGML_F32x4_SET1(x) wasm_f32x4_splat(x) +#define GGML_F32x4_LOAD wasm_v128_load +#define GGML_F32x4_STORE wasm_v128_store +#define GGML_F32x4_FMA(a, b, c) wasm_f32x4_add(wasm_f32x4_mul(b, c), a) +#define GGML_F32x4_ADD wasm_f32x4_add +#define GGML_F32x4_MUL wasm_f32x4_mul +#define GGML_F32x4_REDUCE(res, x) \ +{ \ + int offset = GGML_F32_ARR >> 1; \ + for (int i = 0; i < offset; ++i) { \ + x[i] = wasm_f32x4_add(x[i], x[offset+i]); \ + } \ + offset >>= 1; \ + for (int i = 0; i < offset; ++i) { \ + x[i] = wasm_f32x4_add(x[i], x[offset+i]); \ + } \ + offset >>= 1; \ + for (int i = 0; i < offset; ++i) { \ + x[i] = wasm_f32x4_add(x[i], x[offset+i]); \ + } \ + res = wasm_f32x4_extract_lane(x[0], 0) + \ + wasm_f32x4_extract_lane(x[0], 1) + \ + wasm_f32x4_extract_lane(x[0], 2) + \ + wasm_f32x4_extract_lane(x[0], 3); \ +} - // 4-bit -> 8-bit - const v128_t v0l = wasm_v128_and (v0, m4b); - const v128_t v0h = wasm_u8x16_shr(v0, 4); +#define GGML_F32_VEC GGML_F32x4 +#define GGML_F32_VEC_ZERO GGML_F32x4_ZERO +#define GGML_F32_VEC_SET1 GGML_F32x4_SET1 +#define GGML_F32_VEC_LOAD GGML_F32x4_LOAD +#define GGML_F32_VEC_STORE GGML_F32x4_STORE +#define GGML_F32_VEC_FMA GGML_F32x4_FMA +#define GGML_F32_VEC_ADD GGML_F32x4_ADD +#define GGML_F32_VEC_MUL GGML_F32x4_MUL +#define GGML_F32_VEC_REDUCE GGML_F32x4_REDUCE - // add high bit - const v128_t v0lf = wasm_v128_or(v0l, qhl); - const v128_t v0hf = wasm_v128_or(v0h, qhh); +// F16 WASM - // load y - const v128_t v1l = wasm_v128_load(y0->qs); - const v128_t v1h = wasm_v128_load(y0->qs + 16); +#define GGML_F16_STEP 16 +#define GGML_F16_EPR 4 - // int8x16 -> int16x8 - const v128_t v0lfl = wasm_i16x8_extend_low_i8x16 (v0lf); - const v128_t v0lfh = wasm_i16x8_extend_high_i8x16(v0lf); - const v128_t v0hfl = wasm_i16x8_extend_low_i8x16 (v0hf); - const v128_t v0hfh = wasm_i16x8_extend_high_i8x16(v0hf); +inline static v128_t __wasm_f16x4_load(const ggml_fp16_t * p) { + float tmp[4]; - const v128_t v1ll = wasm_i16x8_extend_low_i8x16 (v1l); - const v128_t v1lh = wasm_i16x8_extend_high_i8x16(v1l); - const v128_t v1hl = wasm_i16x8_extend_low_i8x16 (v1h); - const v128_t v1hh = wasm_i16x8_extend_high_i8x16(v1h); + tmp[0] = GGML_FP16_TO_FP32(p[0]); + tmp[1] = GGML_FP16_TO_FP32(p[1]); + tmp[2] = GGML_FP16_TO_FP32(p[2]); + tmp[3] = GGML_FP16_TO_FP32(p[3]); - // dot product - sumv = wasm_f32x4_add(sumv, - wasm_f32x4_mul(wasm_f32x4_convert_i32x4(wasm_i32x4_add( - wasm_i32x4_add(wasm_i32x4_dot_i16x8(v0lfl, v1ll), - wasm_i32x4_dot_i16x8(v0lfh, v1lh)), - wasm_i32x4_add(wasm_i32x4_dot_i16x8(v0hfl, v1hl), - wasm_i32x4_dot_i16x8(v0hfh, v1hh)))), - wasm_f32x4_splat(GGML_FP16_TO_FP32(x0->d) * y0->d))); - } + return wasm_v128_load(tmp); +} - *s = wasm_f32x4_extract_lane(sumv, 0) + wasm_f32x4_extract_lane(sumv, 1) + - wasm_f32x4_extract_lane(sumv, 2) + wasm_f32x4_extract_lane(sumv, 3) + summs; -#elif defined(__AVX2__) - // Initialize accumulator with zeros - __m256 acc = _mm256_setzero_ps(); +inline static void __wasm_f16x4_store(ggml_fp16_t * p, v128_t x) { + float tmp[4]; - float summs = 0.0f; + wasm_v128_store(tmp, x); - // Main loop - for (int i = 0; i < nb; i++) { - const __m256 dx = _mm256_set1_ps(GGML_FP16_TO_FP32(x[i].d)); + p[0] = GGML_FP32_TO_FP16(tmp[0]); + p[1] = GGML_FP32_TO_FP16(tmp[1]); + p[2] = GGML_FP32_TO_FP16(tmp[2]); + p[3] = GGML_FP32_TO_FP16(tmp[3]); +} - summs += GGML_FP16_TO_FP32(x[i].m) * y[i].s; +#define GGML_F16x4 v128_t +#define GGML_F16x4_ZERO wasm_f32x4_splat(0.0f) +#define GGML_F16x4_SET1(x) wasm_f32x4_splat(x) +#define GGML_F16x4_LOAD(x) __wasm_f16x4_load(x) +#define GGML_F16x4_STORE(x, y) __wasm_f16x4_store(x, y) +#define GGML_F16x4_FMA GGML_F32x4_FMA +#define GGML_F16x4_ADD wasm_f32x4_add +#define GGML_F16x4_MUL wasm_f32x4_mul +#define GGML_F16x4_REDUCE(res, x) \ +{ \ + int offset = GGML_F16_ARR >> 1; \ + for (int i = 0; i < offset; ++i) { \ + x[i] = wasm_f32x4_add(x[i], x[offset+i]); \ + } \ + offset >>= 1; \ + for (int i = 0; i < offset; ++i) { \ + x[i] = wasm_f32x4_add(x[i], x[offset+i]); \ + } \ + offset >>= 1; \ + for (int i = 0; i < offset; ++i) { \ + x[i] = wasm_f32x4_add(x[i], x[offset+i]); \ + } \ + res = wasm_f32x4_extract_lane(x[0], 0) + \ + wasm_f32x4_extract_lane(x[0], 1) + \ + wasm_f32x4_extract_lane(x[0], 2) + \ + wasm_f32x4_extract_lane(x[0], 3); \ +} - __m256i bx = bytes_from_nibbles_32(x[i].qs); - __m256i bxhi = bytes_from_bits_32(x[i].qh); - bxhi = _mm256_and_si256(bxhi, _mm256_set1_epi8(0x10)); - bx = _mm256_or_si256(bx, bxhi); +#define GGML_F16_VEC GGML_F16x4 +#define GGML_F16_VEC_ZERO GGML_F16x4_ZERO +#define GGML_F16_VEC_SET1 GGML_F16x4_SET1 +#define GGML_F16_VEC_LOAD(p, i) GGML_F16x4_LOAD(p) +#define GGML_F16_VEC_STORE(p, r, i) GGML_F16x4_STORE(p, r[i]) +#define GGML_F16_VEC_FMA GGML_F16x4_FMA +#define GGML_F16_VEC_ADD GGML_F16x4_ADD +#define GGML_F16_VEC_MUL GGML_F16x4_MUL +#define GGML_F16_VEC_REDUCE GGML_F16x4_REDUCE - const __m256 dy = _mm256_set1_ps(y[i].d); - const __m256i by = _mm256_loadu_si256((const __m256i *)y[i].qs); +#elif defined(__SSE3__) - const __m256 q = mul_sum_us8_pairs_float(bx, by); +#define GGML_SIMD - acc = _mm256_fmadd_ps(q, _mm256_mul_ps(dx, dy), acc); - } +// F32 SSE - *s = hsum_float_8(acc) + summs; -#elif defined(__AVX__) - // Initialize accumulator with zeros - __m256 acc = _mm256_setzero_ps(); - __m128i mask = _mm_set1_epi8(0x10); +#define GGML_F32_STEP 32 +#define GGML_F32_EPR 4 - float summs = 0.0f; +#define GGML_F32x4 __m128 +#define GGML_F32x4_ZERO _mm_setzero_ps() +#define GGML_F32x4_SET1(x) _mm_set1_ps(x) +#define GGML_F32x4_LOAD _mm_loadu_ps +#define GGML_F32x4_STORE _mm_storeu_ps +#if defined(__FMA__) + // TODO: Does this work? + #define GGML_F32x4_FMA(a, b, c) _mm_fmadd_ps(b, c, a) +#else + #define GGML_F32x4_FMA(a, b, c) _mm_add_ps(_mm_mul_ps(b, c), a) +#endif +#define GGML_F32x4_ADD _mm_add_ps +#define GGML_F32x4_MUL _mm_mul_ps +#define GGML_F32x4_REDUCE(res, x) \ +{ \ + int offset = GGML_F32_ARR >> 1; \ + for (int i = 0; i < offset; ++i) { \ + x[i] = _mm_add_ps(x[i], x[offset+i]); \ + } \ + offset >>= 1; \ + for (int i = 0; i < offset; ++i) { \ + x[i] = _mm_add_ps(x[i], x[offset+i]); \ + } \ + offset >>= 1; \ + for (int i = 0; i < offset; ++i) { \ + x[i] = _mm_add_ps(x[i], x[offset+i]); \ + } \ + const __m128 t0 = _mm_hadd_ps(x[0], x[0]); \ + res = _mm_cvtss_f32(_mm_hadd_ps(t0, t0)); \ +} +// TODO: is this optimal ? - // Main loop - for (int i = 0; i < nb; i++) { - const __m256 dx = _mm256_set1_ps(GGML_FP16_TO_FP32(x[i].d)); +#define GGML_F32_VEC GGML_F32x4 +#define GGML_F32_VEC_ZERO GGML_F32x4_ZERO +#define GGML_F32_VEC_SET1 GGML_F32x4_SET1 +#define GGML_F32_VEC_LOAD GGML_F32x4_LOAD +#define GGML_F32_VEC_STORE GGML_F32x4_STORE +#define GGML_F32_VEC_FMA GGML_F32x4_FMA +#define GGML_F32_VEC_ADD GGML_F32x4_ADD +#define GGML_F32_VEC_MUL GGML_F32x4_MUL +#define GGML_F32_VEC_REDUCE GGML_F32x4_REDUCE - summs += GGML_FP16_TO_FP32(x[i].m) * y[i].s; +// F16 SSE - __m256i bx = bytes_from_nibbles_32(x[i].qs); - const __m256i bxhi = bytes_from_bits_32(x[i].qh); - __m128i bxhil = _mm256_castsi256_si128(bxhi); - __m128i bxhih = _mm256_extractf128_si256(bxhi, 1); - bxhil = _mm_and_si128(bxhil, mask); - bxhih = _mm_and_si128(bxhih, mask); - __m128i bxl = _mm256_castsi256_si128(bx); - __m128i bxh = _mm256_extractf128_si256(bx, 1); - bxl = _mm_or_si128(bxl, bxhil); - bxh = _mm_or_si128(bxh, bxhih); - bx = MM256_SET_M128I(bxh, bxl); +#define GGML_F16_STEP 32 +#define GGML_F16_EPR 4 - const __m256 dy = _mm256_set1_ps(y[i].d); - const __m256i by = _mm256_loadu_si256((const __m256i *)y[i].qs); +static inline __m128 __sse_f16x4_load(ggml_fp16_t *x) { + float tmp[4]; - const __m256 q = mul_sum_us8_pairs_float(bx, by); + tmp[0] = GGML_FP16_TO_FP32(x[0]); + tmp[1] = GGML_FP16_TO_FP32(x[1]); + tmp[2] = GGML_FP16_TO_FP32(x[2]); + tmp[3] = GGML_FP16_TO_FP32(x[3]); - acc = _mm256_add_ps(_mm256_mul_ps(q, _mm256_mul_ps(dx, dy)), acc); - } + return _mm_loadu_ps(tmp); +} - *s = hsum_float_8(acc) + summs; -#elif defined(__riscv_v_intrinsic) - float sumf = 0.0; +static inline void __sse_f16x4_store(ggml_fp16_t *x, __m128 y) { + float arr[4]; - uint32_t qh; + _mm_storeu_ps(arr, y); - size_t vl = __riscv_vsetvl_e8m1(qk/2); + x[0] = GGML_FP32_TO_FP16(arr[0]); + x[1] = GGML_FP32_TO_FP16(arr[1]); + x[2] = GGML_FP32_TO_FP16(arr[2]); + x[3] = GGML_FP32_TO_FP16(arr[3]); +} - // temporary registers for shift operations - vuint32m2_t vt_1 = __riscv_vid_v_u32m2(vl); - vuint32m2_t vt_2 = __riscv_vadd_vx_u32m2(vt_1, 12, vl); +#define GGML_F32Cx4 __m128 +#define GGML_F32Cx4_ZERO _mm_setzero_ps() +#define GGML_F32Cx4_SET1(x) _mm_set1_ps(x) +#define GGML_F32Cx4_LOAD(x) __sse_f16x4_load(x) +#define GGML_F32Cx4_STORE(x, y) __sse_f16x4_store(x, y) +#define GGML_F32Cx4_FMA GGML_F32x4_FMA +#define GGML_F32Cx4_ADD _mm_add_ps +#define GGML_F32Cx4_MUL _mm_mul_ps +#define GGML_F32Cx4_REDUCE GGML_F32x4_REDUCE - for (int i = 0; i < nb; i++) { - memcpy(&qh, x[i].qh, sizeof(uint32_t)); +#define GGML_F16_VEC GGML_F32Cx4 +#define GGML_F16_VEC_ZERO GGML_F32Cx4_ZERO +#define GGML_F16_VEC_SET1 GGML_F32Cx4_SET1 +#define GGML_F16_VEC_LOAD(p, i) GGML_F32Cx4_LOAD(p) +#define GGML_F16_VEC_STORE(p, r, i) GGML_F32Cx4_STORE(p, r[i]) +#define GGML_F16_VEC_FMA GGML_F32Cx4_FMA +#define GGML_F16_VEC_ADD GGML_F32Cx4_ADD +#define GGML_F16_VEC_MUL GGML_F32Cx4_MUL +#define GGML_F16_VEC_REDUCE GGML_F32Cx4_REDUCE - // load qh - vuint32m2_t vqh = __riscv_vmv_v_x_u32m2(qh, vl); +#endif - // ((qh >> (j + 0)) << 4) & 0x10; - vuint32m2_t xhr_0 = __riscv_vsrl_vv_u32m2(vqh, vt_1, vl); - vuint32m2_t xhl_0 = __riscv_vsll_vx_u32m2(xhr_0, 4, vl); - vuint32m2_t xha_0 = __riscv_vand_vx_u32m2(xhl_0, 0x10, vl); +// GGML_F32_ARR / GGML_F16_ARR +// number of registers to use per step +#ifdef GGML_SIMD +#define GGML_F32_ARR (GGML_F32_STEP/GGML_F32_EPR) +#define GGML_F16_ARR (GGML_F16_STEP/GGML_F16_EPR) +#endif - // ((qh >> (j + 12)) ) & 0x10; - vuint32m2_t xhr_1 = __riscv_vsrl_vv_u32m2(vqh, vt_2, vl); - vuint32m2_t xha_1 = __riscv_vand_vx_u32m2(xhr_1, 0x10, vl); +// +// fundamental operations +// - // narrowing - vuint16m1_t xhc_0 = __riscv_vncvt_x_x_w_u16m1(xha_0, vl); - vuint8mf2_t xh_0 = __riscv_vncvt_x_x_w_u8mf2(xhc_0, vl); +inline static void ggml_vec_set_i8(const int n, int8_t * x, const int8_t v) { for (int i = 0; i < n; ++i) x[i] = v; } - vuint16m1_t xhc_1 = __riscv_vncvt_x_x_w_u16m1(xha_1, vl); - vuint8mf2_t xh_1 = __riscv_vncvt_x_x_w_u8mf2(xhc_1, vl); +inline static void ggml_vec_set_i16(const int n, int16_t * x, const int16_t v) { for (int i = 0; i < n; ++i) x[i] = v; } - // load - vuint8mf2_t tx = __riscv_vle8_v_u8mf2(x[i].qs, vl); +inline static void ggml_vec_set_i32(const int n, int32_t * x, const int32_t v) { for (int i = 0; i < n; ++i) x[i] = v; } - vint8mf2_t y0 = __riscv_vle8_v_i8mf2(y[i].qs, vl); - vint8mf2_t y1 = __riscv_vle8_v_i8mf2(y[i].qs+16, vl); +inline static void ggml_vec_set_f16(const int n, ggml_fp16_t * x, const int32_t v) { for (int i = 0; i < n; ++i) x[i] = v; } - vuint8mf2_t x_at = __riscv_vand_vx_u8mf2(tx, 0x0F, vl); - vuint8mf2_t x_lt = __riscv_vsrl_vx_u8mf2(tx, 0x04, vl); +inline static void ggml_vec_add_f32 (const int n, float * z, const float * x, const float * y) { for (int i = 0; i < n; ++i) z[i] = x[i] + y[i]; } +inline static void ggml_vec_add1_f32(const int n, float * z, const float * x, const float v) { for (int i = 0; i < n; ++i) z[i] = x[i] + v; } +inline static void ggml_vec_acc_f32 (const int n, float * y, const float * x) { for (int i = 0; i < n; ++i) y[i] += x[i]; } +inline static void ggml_vec_acc1_f32(const int n, float * y, const float v) { for (int i = 0; i < n; ++i) y[i] += v; } +inline static void ggml_vec_sub_f32 (const int n, float * z, const float * x, const float * y) { for (int i = 0; i < n; ++i) z[i] = x[i] - y[i]; } +inline static void ggml_vec_set_f32 (const int n, float * x, const float v) { for (int i = 0; i < n; ++i) x[i] = v; } +inline static void ggml_vec_cpy_f32 (const int n, float * y, const float * x) { for (int i = 0; i < n; ++i) y[i] = x[i]; } +inline static void ggml_vec_neg_f32 (const int n, float * y, const float * x) { for (int i = 0; i < n; ++i) y[i] = -x[i]; } +inline static void ggml_vec_mul_f32 (const int n, float * z, const float * x, const float * y) { for (int i = 0; i < n; ++i) z[i] = x[i]*y[i]; } +inline static void ggml_vec_div_f32 (const int n, float * z, const float * x, const float * y) { for (int i = 0; i < n; ++i) z[i] = x[i]/y[i]; } - vuint8mf2_t x_a = __riscv_vor_vv_u8mf2(x_at, xh_0, vl); - vuint8mf2_t x_l = __riscv_vor_vv_u8mf2(x_lt, xh_1, vl); +static void ggml_vec_dot_f32(const int n, float * restrict s, const float * restrict x, const float * restrict y) { +#ifdef GGML_SIMD + float sumf = 0.0f; + const int np = (n & ~(GGML_F32_STEP - 1)); - vint8mf2_t v0 = __riscv_vreinterpret_v_u8mf2_i8mf2(x_a); - vint8mf2_t v1 = __riscv_vreinterpret_v_u8mf2_i8mf2(x_l); + GGML_F32_VEC sum[GGML_F32_ARR] = { GGML_F32_VEC_ZERO }; - vint16m1_t vec_mul1 = __riscv_vwmul_vv_i16m1(v0, y0, vl); - vint16m1_t vec_mul2 = __riscv_vwmul_vv_i16m1(v1, y1, vl); + GGML_F32_VEC ax[GGML_F32_ARR]; + GGML_F32_VEC ay[GGML_F32_ARR]; - vint32m1_t vec_zero = __riscv_vmv_v_x_i32m1(0, vl); + for (int i = 0; i < np; i += GGML_F32_STEP) { + for (int j = 0; j < GGML_F32_ARR; j++) { + ax[j] = GGML_F32_VEC_LOAD(x + i + j*GGML_F32_EPR); + ay[j] = GGML_F32_VEC_LOAD(y + i + j*GGML_F32_EPR); - vint32m1_t vs1 = __riscv_vwredsum_vs_i16m1_i32m1(vec_mul1, vec_zero, vl); - vint32m1_t vs2 = __riscv_vwredsum_vs_i16m1_i32m1(vec_mul2, vs1, vl); + sum[j] = GGML_F32_VEC_FMA(sum[j], ax[j], ay[j]); + } + } - int sumi = __riscv_vmv_x_s_i32m1_i32(vs2); + // reduce sum0..sum3 to sum0 + GGML_F32_VEC_REDUCE(sumf, sum); - sumf += (GGML_FP16_TO_FP32(x[i].d)*y[i].d)*sumi + GGML_FP16_TO_FP32(x[i].m)*y[i].s; + // leftovers + for (int i = np; i < n; ++i) { + sumf += x[i]*y[i]; } - - *s = sumf; #else // scalar - float sumf = 0.0; - - for (int i = 0; i < nb; i++) { - uint32_t qh; - memcpy(&qh, x[i].qh, sizeof(qh)); - - int sumi = 0; - - for (int j = 0; j < qk/2; ++j) { - const uint8_t xh_0 = ((qh >> (j + 0)) << 4) & 0x10; - const uint8_t xh_1 = ((qh >> (j + 12)) ) & 0x10; - - const int32_t x0 = (x[i].qs[j] & 0xF) | xh_0; - const int32_t x1 = (x[i].qs[j] >> 4) | xh_1; - - sumi += (x0 * y[i].qs[j]) + (x1 * y[i].qs[j + qk/2]); - } - - sumf += (GGML_FP16_TO_FP32(x[i].d)*y[i].d)*sumi + GGML_FP16_TO_FP32(x[i].m)*y[i].s; + ggml_float sumf = 0.0; + for (int i = 0; i < n; ++i) { + sumf += (ggml_float)(x[i]*y[i]); } +#endif *s = sumf; -#endif } -static void ggml_vec_dot_q8_0_q8_0(const int n, float * restrict s, const void * restrict vx, const void * restrict vy) { - const int qk = QK8_0; - const int nb = n / qk; - - assert(n % qk == 0); - - const block_q8_0 * restrict x = vx; - const block_q8_0 * restrict y = vy; - -#if defined(__ARM_NEON) - float32x4_t sumv0 = vdupq_n_f32(0.0f); - float32x4_t sumv1 = vdupq_n_f32(0.0f); - - GGML_ASSERT(nb % 2 == 0); // TODO: handle odd nb - for (int i = 0; i < nb; i += 2) { - const block_q8_0 * restrict x0 = &x[i + 0]; - const block_q8_0 * restrict x1 = &x[i + 1]; - const block_q8_0 * restrict y0 = &y[i + 0]; - const block_q8_0 * restrict y1 = &y[i + 1]; - - const int8x16_t x0_0 = vld1q_s8(x0->qs); - const int8x16_t x0_1 = vld1q_s8(x0->qs + 16); - const int8x16_t x1_0 = vld1q_s8(x1->qs); - const int8x16_t x1_1 = vld1q_s8(x1->qs + 16); - - // load y - const int8x16_t y0_0 = vld1q_s8(y0->qs); - const int8x16_t y0_1 = vld1q_s8(y0->qs + 16); - const int8x16_t y1_0 = vld1q_s8(y1->qs); - const int8x16_t y1_1 = vld1q_s8(y1->qs + 16); - -#if defined(__ARM_FEATURE_DOTPROD) - sumv0 = vmlaq_n_f32(sumv0, vcvtq_f32_s32(vaddq_s32( - vdotq_s32(vdupq_n_s32(0), x0_0, y0_0), - vdotq_s32(vdupq_n_s32(0), x0_1, y0_1))), GGML_FP16_TO_FP32(x0->d)*GGML_FP16_TO_FP32(y0->d)); - - sumv1 = vmlaq_n_f32(sumv1, vcvtq_f32_s32(vaddq_s32( - vdotq_s32(vdupq_n_s32(0), x1_0, y1_0), - vdotq_s32(vdupq_n_s32(0), x1_1, y1_1))), GGML_FP16_TO_FP32(x1->d)*GGML_FP16_TO_FP32(y1->d)); +static void ggml_vec_dot_f16(const int n, float * restrict s, ggml_fp16_t * restrict x, ggml_fp16_t * restrict y) { + ggml_float sumf = 0.0; -#else - const int16x8_t p0_0 = vmull_s8(vget_low_s8 (x0_0), vget_low_s8 (y0_0)); - const int16x8_t p0_1 = vmull_s8(vget_high_s8(x0_0), vget_high_s8(y0_0)); - const int16x8_t p0_2 = vmull_s8(vget_low_s8 (x0_1), vget_low_s8 (y0_1)); - const int16x8_t p0_3 = vmull_s8(vget_high_s8(x0_1), vget_high_s8(y0_1)); - - const int16x8_t p1_0 = vmull_s8(vget_low_s8 (x1_0), vget_low_s8 (y1_0)); - const int16x8_t p1_1 = vmull_s8(vget_high_s8(x1_0), vget_high_s8(y1_0)); - const int16x8_t p1_2 = vmull_s8(vget_low_s8 (x1_1), vget_low_s8 (y1_1)); - const int16x8_t p1_3 = vmull_s8(vget_high_s8(x1_1), vget_high_s8(y1_1)); - - const int32x4_t p0 = vaddq_s32(vpaddlq_s16(p0_0), vpaddlq_s16(p0_1)); - const int32x4_t p1 = vaddq_s32(vpaddlq_s16(p0_2), vpaddlq_s16(p0_3)); - const int32x4_t p2 = vaddq_s32(vpaddlq_s16(p1_0), vpaddlq_s16(p1_1)); - const int32x4_t p3 = vaddq_s32(vpaddlq_s16(p1_2), vpaddlq_s16(p1_3)); - - sumv0 = vmlaq_n_f32(sumv0, vcvtq_f32_s32(vaddq_s32(p0, p1)), GGML_FP16_TO_FP32(x0->d)*GGML_FP16_TO_FP32(y0->d)); - sumv1 = vmlaq_n_f32(sumv1, vcvtq_f32_s32(vaddq_s32(p2, p3)), GGML_FP16_TO_FP32(x1->d)*GGML_FP16_TO_FP32(y1->d)); -#endif - } +#if defined(GGML_SIMD) + const int np = (n & ~(GGML_F16_STEP - 1)); - *s = vaddvq_f32(sumv0) + vaddvq_f32(sumv1); -#elif defined(__AVX2__) || defined(__AVX__) - // Initialize accumulator with zeros - __m256 acc = _mm256_setzero_ps(); + GGML_F16_VEC sum[GGML_F16_ARR] = { GGML_F16_VEC_ZERO }; - // Main loop - for (int i = 0; i < nb; ++i) { - // Compute combined scale for the block - const __m256 d = _mm256_set1_ps(GGML_FP16_TO_FP32(x[i].d) * GGML_FP16_TO_FP32(y[i].d)); - __m256i bx = _mm256_loadu_si256((const __m256i *)x[i].qs); - __m256i by = _mm256_loadu_si256((const __m256i *)y[i].qs); + GGML_F16_VEC ax[GGML_F16_ARR]; + GGML_F16_VEC ay[GGML_F16_ARR]; - const __m256 q = mul_sum_i8_pairs_float(bx, by); + for (int i = 0; i < np; i += GGML_F16_STEP) { + for (int j = 0; j < GGML_F16_ARR; j++) { + ax[j] = GGML_F16_VEC_LOAD(x + i + j*GGML_F16_EPR, j); + ay[j] = GGML_F16_VEC_LOAD(y + i + j*GGML_F16_EPR, j); - // Multiply q with scale and accumulate -#if defined(__AVX2__) - acc = _mm256_fmadd_ps( d, q, acc ); -#else - acc = _mm256_add_ps( _mm256_mul_ps( d, q ), acc ); -#endif + sum[j] = GGML_F16_VEC_FMA(sum[j], ax[j], ay[j]); + } } - *s = hsum_float_8(acc); -#elif defined(__riscv_v_intrinsic) - float sumf = 0.0; - size_t vl = __riscv_vsetvl_e8m1(qk); - - for (int i = 0; i < nb; i++) { - // load elements - vint8m1_t bx = __riscv_vle8_v_i8m1(x[i].qs, vl); - vint8m1_t by = __riscv_vle8_v_i8m1(y[i].qs, vl); - - vint16m2_t vw_mul = __riscv_vwmul_vv_i16m2(bx, by, vl); - - vint32m1_t v_zero = __riscv_vmv_v_x_i32m1(0, vl); - vint32m1_t v_sum = __riscv_vwredsum_vs_i16m2_i32m1(vw_mul, v_zero, vl); - - int sumi = __riscv_vmv_x_s_i32m1_i32(v_sum); + // reduce sum0..sum3 to sum0 + GGML_F16_VEC_REDUCE(sumf, sum); - sumf += sumi*(GGML_FP16_TO_FP32(x[i].d)*GGML_FP16_TO_FP32(y[i].d)); + // leftovers + for (int i = np; i < n; ++i) { + sumf += (ggml_float)(GGML_FP16_TO_FP32(x[i])*GGML_FP16_TO_FP32(y[i])); } - - *s = sumf; #else - // scalar - float sumf = 0.0; - - for (int i = 0; i < nb; i++) { - int sumi = 0; - - for (int j = 0; j < qk; j++) { - sumi += x[i].qs[j]*y[i].qs[j]; - } - - sumf += sumi*(GGML_FP16_TO_FP32(x[i].d)*GGML_FP16_TO_FP32(y[i].d)); + for (int i = 0; i < n; ++i) { + sumf += (ggml_float)(GGML_FP16_TO_FP32(x[i])*GGML_FP16_TO_FP32(y[i])); } +#endif *s = sumf; -#endif } // compute GGML_VEC_DOT_UNROLL dot products at once @@ -21001,7 +18706,6 @@ size_t ggml_quantize_chunk(enum ggml_type type, const float * src, void * dst, i block_q8_0 * block = (block_q8_0*)dst + start / QK8_0; result = ggml_quantize_q8_0(src + start, block, n, n, hist); } break; -#ifdef GGML_USE_K_QUANTS case GGML_TYPE_Q2_K: { GGML_ASSERT(start % QK_K == 0); @@ -21032,7 +18736,6 @@ size_t ggml_quantize_chunk(enum ggml_type type, const float * src, void * dst, i block_q6_K * block = (block_q6_K*)dst + start / QK_K; result = ggml_quantize_q6_K(src + start, block, n, n, hist); } break; -#endif case GGML_TYPE_F16: { int elemsize = sizeof(ggml_fp16_t); diff --git a/ggml.h b/ggml.h index 08bff5511c2254..8c954904e5a007 100644 --- a/ggml.h +++ b/ggml.h @@ -1930,12 +1930,19 @@ extern "C" { // quantization // + // TODO: these would probably get removed in favor of the more general ggml_quantize_chunk GGML_API size_t ggml_quantize_q4_0(const float * src, void * dst, int n, int k, int64_t * hist); GGML_API size_t ggml_quantize_q4_1(const float * src, void * dst, int n, int k, int64_t * hist); GGML_API size_t ggml_quantize_q5_0(const float * src, void * dst, int n, int k, int64_t * hist); GGML_API size_t ggml_quantize_q5_1(const float * src, void * dst, int n, int k, int64_t * hist); GGML_API size_t ggml_quantize_q8_0(const float * src, void * dst, int n, int k, int64_t * hist); + GGML_API size_t ggml_quantize_q2_K(const float * src, void * dst, int n, int k, int64_t * hist); + GGML_API size_t ggml_quantize_q3_K(const float * src, void * dst, int n, int k, int64_t * hist); + GGML_API size_t ggml_quantize_q4_K(const float * src, void * dst, int n, int k, int64_t * hist); + GGML_API size_t ggml_quantize_q5_K(const float * src, void * dst, int n, int k, int64_t * hist); + GGML_API size_t ggml_quantize_q6_K(const float * src, void * dst, int n, int k, int64_t * hist); + GGML_API size_t ggml_quantize_chunk(enum ggml_type type, const float * src, void * dst, int start, int n, int64_t * hist); // diff --git a/llama.cpp b/llama.cpp index 3d431ee7bf5260..1d1db8fc97fa38 100644 --- a/llama.cpp +++ b/llama.cpp @@ -19,13 +19,11 @@ #ifdef GGML_USE_MPI # include "ggml-mpi.h" #endif -#ifdef GGML_USE_K_QUANTS -# ifndef QK_K -# ifdef GGML_QKK_64 -# define QK_K 64 -# else -# define QK_K 256 -# endif +#ifndef QK_K +# ifdef GGML_QKK_64 +# define QK_K 64 +# else +# define QK_K 256 # endif #endif @@ -8052,7 +8050,7 @@ struct no_init { struct quantize_state_internal { const llama_model & model; const llama_model_quantize_params * params; -#ifdef GGML_USE_K_QUANTS + int n_attention_wv = 0; int n_feed_forward_w2 = 0; int i_attention_wv = 0; @@ -8060,7 +8058,7 @@ struct quantize_state_internal { int n_k_quantized = 0; int n_fallback = 0; -#endif + quantize_state_internal(const llama_model & model, const llama_model_quantize_params * params) : model(model) , params(params) @@ -8125,7 +8123,6 @@ static void llama_convert_tensor_internal( workers.clear(); } -#ifdef GGML_USE_K_QUANTS static ggml_type get_k_quant_type( quantize_state_internal & qs, ggml_type new_type, const ggml_tensor * tensor, llama_ftype ftype @@ -8237,7 +8234,6 @@ static ggml_type get_k_quant_type( return new_type; } -#endif static void llama_model_quantize_internal(const std::string & fname_inp, const std::string & fname_out, const llama_model_quantize_params * params) { ggml_type quantized_type; @@ -8252,7 +8248,6 @@ static void llama_model_quantize_internal(const std::string & fname_inp, const s case LLAMA_FTYPE_MOSTLY_F16: quantized_type = GGML_TYPE_F16; break; case LLAMA_FTYPE_ALL_F32: quantized_type = GGML_TYPE_F32; break; -#ifdef GGML_USE_K_QUANTS // K-quants case LLAMA_FTYPE_MOSTLY_Q2_K: quantized_type = GGML_TYPE_Q2_K; break; case LLAMA_FTYPE_MOSTLY_Q3_K_S: @@ -8263,7 +8258,7 @@ static void llama_model_quantize_internal(const std::string & fname_inp, const s case LLAMA_FTYPE_MOSTLY_Q5_K_S: case LLAMA_FTYPE_MOSTLY_Q5_K_M: quantized_type = GGML_TYPE_Q5_K; break; case LLAMA_FTYPE_MOSTLY_Q6_K: quantized_type = GGML_TYPE_Q6_K; break; -#endif + default: throw std::runtime_error(format("invalid output file type %d\n", ftype)); } @@ -8304,7 +8299,6 @@ static void llama_model_quantize_internal(const std::string & fname_inp, const s gguf_set_val_u32(ctx_out, "general.quantization_version", GGML_QNT_VERSION); gguf_set_val_u32(ctx_out, "general.file_type", ftype); -#ifdef GGML_USE_K_QUANTS for (int i = 0; i < ml.n_tensors; ++i) { struct ggml_tensor * meta = ml.get_tensor_meta(i); @@ -8322,7 +8316,6 @@ static void llama_model_quantize_internal(const std::string & fname_inp, const s LLAMA_LOG_WARN("%s ============ Strange model: n_attention_wv = %d, n_feed_forward_w2 = %d, hparams.n_layer = %d\n", __func__, qs.n_attention_wv, qs.n_feed_forward_w2, model.hparams.n_layer); } -#endif size_t total_size_org = 0; size_t total_size_new = 0; @@ -8387,9 +8380,10 @@ static void llama_model_quantize_internal(const std::string & fname_inp, const s if (quantize) { new_type = quantized_type; -#ifdef GGML_USE_K_QUANTS - new_type = get_k_quant_type(qs, new_type, tensor, ftype); -#endif + if (!params->pure) { + new_type = get_k_quant_type(qs, new_type, tensor, ftype); + } + // If we've decided to quantize to the same type the tensor is already // in then there's nothing to do. quantize = tensor->type != new_type; @@ -8514,12 +8508,11 @@ static void llama_model_quantize_internal(const std::string & fname_inp, const s LLAMA_LOG_INFO("\n"); } } -#ifdef GGML_USE_K_QUANTS + if (qs.n_fallback > 0) { LLAMA_LOG_WARN("%s: WARNING: %d of %d tensor(s) incompatible with k-quants and required fallback quantization\n", __func__, qs.n_fallback, qs.n_k_quantized + qs.n_fallback); } -#endif } static int llama_apply_lora_from_file_internal( @@ -8844,6 +8837,7 @@ struct llama_model_quantize_params llama_model_quantize_default_params() { /*.allow_requantize =*/ false, /*.quantize_output_tensor =*/ true, /*.only_copy =*/ false, + /*.pure =*/ false, }; return result; diff --git a/llama.h b/llama.h index d901dcd9116d3d..6927bd6010dd7a 100644 --- a/llama.h +++ b/llama.h @@ -191,6 +191,7 @@ extern "C" { bool allow_requantize; // allow quantizing non-f32/f16 tensors bool quantize_output_tensor; // quantize output.weight bool only_copy; // only copy tensors - ftype, allow_requantize and quantize_output_tensor are ignored + bool pure; // disable k-quant mixtures and quantize all tensors to the same type } llama_model_quantize_params; // grammar types From 71a09da301705b9c5ad4ca3cf3fbd966dd3f1ec5 Mon Sep 17 00:00:00 2001 From: Georgi Gerganov Date: Sun, 29 Oct 2023 18:32:51 +0200 Subject: [PATCH 021/206] llama : fix kv shift bug (#3835) ggml-ci --- llama.cpp | 27 ++++++++++++++++++--------- 1 file changed, 18 insertions(+), 9 deletions(-) diff --git a/llama.cpp b/llama.cpp index 1d1db8fc97fa38..d8510a5cf01254 100644 --- a/llama.cpp +++ b/llama.cpp @@ -1552,14 +1552,14 @@ static void llama_kv_cache_seq_shift( for (uint32_t i = 0; i < cache.size; ++i) { if (cache.cells[i].has_seq_id(seq_id) && cache.cells[i].pos >= p0 && cache.cells[i].pos < p1) { - cache.cells[i].pos += delta; + cache.has_shift = true; + cache.cells[i].pos += delta; + cache.cells[i].delta += delta; + if (cache.cells[i].pos < 0) { cache.cells[i].pos = -1; cache.cells[i].seq_id.clear(); if (new_head == cache.size) new_head = i; - } else { - cache.has_shift = true; - cache.cells[i].delta = delta; } } } @@ -6073,11 +6073,20 @@ static int llama_decode_internal( #endif // update the kv ring buffer - lctx.kv_self.has_shift = false; - lctx.kv_self.head += n_tokens; - // Ensure kv cache head points to a valid index. - if (lctx.kv_self.head >= lctx.kv_self.size) { - lctx.kv_self.head = 0; + { + if (kv_self.has_shift) { + kv_self.has_shift = false; + for (uint32_t i = 0; i < kv_self.size; ++i) { + kv_self.cells[i].delta = 0; + } + } + + kv_self.head += n_tokens; + + // Ensure kv cache head points to a valid index. + if (kv_self.head >= kv_self.size) { + kv_self.head = 0; + } } #ifdef GGML_PERF From 2046eb4345e62c4575b3cdc0115a51db89f3fb70 Mon Sep 17 00:00:00 2001 From: cebtenzzre Date: Sun, 29 Oct 2023 12:33:47 -0400 Subject: [PATCH 022/206] make : remove unnecessary dependency on build-info.h (#3842) --- Makefile | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/Makefile b/Makefile index 2cecc2216c87b6..c53c1e7260fef4 100644 --- a/Makefile +++ b/Makefile @@ -541,10 +541,10 @@ OBJS += ggml-alloc.o ggml-backend.o ggml-quants.o llama.o: llama.cpp ggml.h ggml-alloc.h ggml-backend.h ggml-cuda.h ggml-metal.h llama.h $(CXX) $(CXXFLAGS) -c $< -o $@ -COMMON_H_DEPS = common/common.h common/sampling.h build-info.h common/log.h -COMMON_DEPS = $(COMMON_H_DEPS) common.o sampling.o grammar-parser.o +COMMON_H_DEPS = common/common.h common/sampling.h common/log.h +COMMON_DEPS = common.o sampling.o grammar-parser.o -common.o: common/common.cpp $(COMMON_H_DEPS) +common.o: common/common.cpp build-info.h $(COMMON_H_DEPS) $(CXX) $(CXXFLAGS) -c $< -o $@ sampling.o: common/sampling.cpp $(COMMON_H_DEPS) From 6e08281e588bbba1a5d180290a94a43f167f3a1a Mon Sep 17 00:00:00 2001 From: Kerfuffle <44031344+KerfuffleV2@users.noreply.github.com> Date: Sun, 29 Oct 2023 11:31:40 -0600 Subject: [PATCH 023/206] Extend llama_kv_cache_seq_rm to allow matching any sequence (#3843) * Extend llama_kv_cache_seq_rm to allow matichng any sequence * Replace llama_kv_cache_tokens_rm with llama_kv_cache_clear Use llama_kv_cache_clear for cache clearing Change calls to llama_kv_cache_tokens_rm that want to delete by position to use llama_kv_cache_seq_rm functionality --- common/common.cpp | 2 +- examples/batched-bench/batched-bench.cpp | 2 +- examples/llama-bench/llama-bench.cpp | 4 ++-- examples/main/main.cpp | 2 +- examples/perplexity/perplexity.cpp | 6 ++--- examples/server/server.cpp | 2 +- llama.cpp | 29 ++++++++++++------------ llama.h | 15 +++++------- 8 files changed, 30 insertions(+), 32 deletions(-) diff --git a/common/common.cpp b/common/common.cpp index f81f4d354bc017..c187128d6ede3d 100644 --- a/common/common.cpp +++ b/common/common.cpp @@ -889,7 +889,7 @@ std::tuple llama_init_from_gpt_par std::vector tmp = { llama_token_bos(model), llama_token_eos(model), }; llama_decode(lctx, llama_batch_get_one(tmp.data(), std::min(tmp.size(), (size_t) params.n_batch), 0, 0)); - llama_kv_cache_tokens_rm(lctx, -1, -1); + llama_kv_cache_clear(lctx); llama_reset_timings(lctx); } diff --git a/examples/batched-bench/batched-bench.cpp b/examples/batched-bench/batched-bench.cpp index 43f9c971d18465..533c55c17aad17 100644 --- a/examples/batched-bench/batched-bench.cpp +++ b/examples/batched-bench/batched-bench.cpp @@ -185,7 +185,7 @@ int main(int argc, char ** argv) { const auto t_pp_start = ggml_time_us(); - llama_kv_cache_tokens_rm(ctx, -1, -1); + llama_kv_cache_clear(ctx); if (!decode_helper(ctx, batch, ctx_params.n_batch)) { LOG_TEE("%s: llama_decode() failed\n", __func__); diff --git a/examples/llama-bench/llama-bench.cpp b/examples/llama-bench/llama-bench.cpp index 20767d555b206d..780398184d2215 100644 --- a/examples/llama-bench/llama-bench.cpp +++ b/examples/llama-bench/llama-bench.cpp @@ -1037,7 +1037,7 @@ int main(int argc, char ** argv) { test t(inst, lmodel, ctx); - llama_kv_cache_tokens_rm(ctx, -1, -1); + llama_kv_cache_clear(ctx); // warmup run if (t.n_prompt > 0) { @@ -1048,7 +1048,7 @@ int main(int argc, char ** argv) { } for (int i = 0; i < params.reps; i++) { - llama_kv_cache_tokens_rm(ctx, -1, -1); + llama_kv_cache_clear(ctx); uint64_t t_start = get_time_ns(); if (t.n_prompt > 0) { diff --git a/examples/main/main.cpp b/examples/main/main.cpp index 3d9f670b9da7f2..8a43b6ab878a5f 100644 --- a/examples/main/main.cpp +++ b/examples/main/main.cpp @@ -298,7 +298,7 @@ int main(int argc, char ** argv) { } // remove any "future" tokens that we might have inherited from the previous session - llama_kv_cache_tokens_rm(ctx, n_matching_session_tokens, -1); + llama_kv_cache_seq_rm(ctx, -1, n_matching_session_tokens, -1); } LOGLN( diff --git a/examples/perplexity/perplexity.cpp b/examples/perplexity/perplexity.cpp index 3c2542e8c105e7..bd2c73d87875fe 100644 --- a/examples/perplexity/perplexity.cpp +++ b/examples/perplexity/perplexity.cpp @@ -210,7 +210,7 @@ static results_perplexity perplexity_v2(llama_context * ctx, const gpt_params & const auto t_start = std::chrono::high_resolution_clock::now(); // clear the KV cache - llama_kv_cache_tokens_rm(ctx, -1, -1); + llama_kv_cache_clear(ctx); for (int j = 0; j < num_batches; ++j) { const int batch_start = start + j * n_batch; @@ -339,7 +339,7 @@ static results_perplexity perplexity(llama_context * ctx, const gpt_params & par const auto t_start = std::chrono::high_resolution_clock::now(); // clear the KV cache - llama_kv_cache_tokens_rm(ctx, -1, -1); + llama_kv_cache_clear(ctx); for (int j = 0; j < num_batches; ++j) { const int batch_start = start + j * n_batch; @@ -573,7 +573,7 @@ static void hellaswag_score(llama_context * ctx, const gpt_params & params) { } // clear the KV cache - llama_kv_cache_tokens_rm(ctx, -1, -1); + llama_kv_cache_clear(ctx); auto logits = hellaswag_evaluate_tokens(ctx, query_embd, 0, params.n_batch, n_vocab); if (logits.empty()) { diff --git a/examples/server/server.cpp b/examples/server/server.cpp index 5b7e4139de551e..c163c7f8ec0dd1 100644 --- a/examples/server/server.cpp +++ b/examples/server/server.cpp @@ -857,7 +857,7 @@ struct llama_server_context void kv_cache_clear() { // clear the entire KV cache - llama_kv_cache_tokens_rm(ctx, -1, -1); + llama_kv_cache_clear(ctx); clean_kv_cache = false; } diff --git a/llama.cpp b/llama.cpp index d8510a5cf01254..a4340d5277b09c 100644 --- a/llama.cpp +++ b/llama.cpp @@ -1466,17 +1466,12 @@ static int32_t llama_kv_cache_cell_max(const struct llama_kv_cache & cache) { return 0; } -static void llama_kv_cache_tokens_rm(struct llama_kv_cache & cache, int32_t c0, int32_t c1) { - if (c0 < 0) c0 = 0; - if (c1 < 0) c1 = cache.size; - - for (int32_t i = c0; i < c1; ++i) { +static void llama_kv_cache_clear(struct llama_kv_cache & cache) { + for (int32_t i = 0; i < cache.size; ++i) { cache.cells[i].pos = -1; cache.cells[i].seq_id.clear(); } - - // Searching for a free slot can start here since we know it will be empty. - cache.head = uint32_t(c0); + cache.head = 0; } static void llama_kv_cache_seq_rm( @@ -1490,8 +1485,14 @@ static void llama_kv_cache_seq_rm( if (p1 < 0) p1 = std::numeric_limits::max(); for (uint32_t i = 0; i < cache.size; ++i) { - if (cache.cells[i].has_seq_id(seq_id) && cache.cells[i].pos >= p0 && cache.cells[i].pos < p1) { - cache.cells[i].seq_id.erase(seq_id); + if (cache.cells[i].pos >= p0 && cache.cells[i].pos < p1) { + if (seq_id < 0) { + cache.cells[i].seq_id.clear(); + } else if (cache.cells[i].has_seq_id(seq_id)) { + cache.cells[i].seq_id.erase(seq_id); + } else { + continue; + } if (cache.cells[i].seq_id.empty()) { cache.cells[i].pos = -1; if (new_head == cache.size) new_head = i; @@ -9207,8 +9208,8 @@ int llama_get_kv_cache_token_count(const struct llama_context * ctx) { return ctx->kv_self.head; } -void llama_kv_cache_tokens_rm(struct llama_context * ctx, int32_t c0, int32_t c1) { - llama_kv_cache_tokens_rm(ctx->kv_self, c0, c1); +void llama_kv_cache_clear(struct llama_context * ctx) { + llama_kv_cache_clear(ctx->kv_self); } void llama_kv_cache_seq_rm(struct llama_context * ctx, llama_seq_id seq_id, llama_pos p0, llama_pos p1) { @@ -9654,7 +9655,7 @@ int llama_eval( llama_token * tokens, int32_t n_tokens, int n_past) { - llama_kv_cache_tokens_rm(ctx->kv_self, n_past, -1); + llama_kv_cache_seq_rm(ctx->kv_self, -1, n_past, -1); const int ret = llama_decode_internal(*ctx, llama_batch_get_one(tokens, n_tokens, n_past, 0)); if (ret < 0) { @@ -9669,7 +9670,7 @@ int llama_eval_embd( float * embd, int32_t n_tokens, int n_past) { - llama_kv_cache_tokens_rm(ctx->kv_self, n_past, -1); + llama_kv_cache_seq_rm(ctx->kv_self, -1, n_past, -1); llama_batch batch = { n_tokens, nullptr, embd, nullptr, nullptr, nullptr, nullptr, n_past, 1, 0, }; diff --git a/llama.h b/llama.h index 6927bd6010dd7a..d727dbd9fd915d 100644 --- a/llama.h +++ b/llama.h @@ -334,17 +334,14 @@ extern "C" { LLAMA_API DEPRECATED(int llama_get_kv_cache_token_count(const struct llama_context * ctx), "avoid using this, it will be removed in the future, instead - count the tokens in user code"); - // Remove all tokens data of cells in [c0, c1) - // c0 < 0 : [0, c1] - // c1 < 0 : [c0, inf) - LLAMA_API void llama_kv_cache_tokens_rm( - struct llama_context * ctx, - int32_t c0, - int32_t c1); + // Clear the KV cache + LLAMA_API void llama_kv_cache_clear( + struct llama_context * ctx); // Removes all tokens that belong to the specified sequence and have positions in [p0, p1) - // p0 < 0 : [0, p1] - // p1 < 0 : [p0, inf) + // seq_id < 0 : match any sequence + // p0 < 0 : [0, p1] + // p1 < 0 : [p0, inf) LLAMA_API void llama_kv_cache_seq_rm( struct llama_context * ctx, llama_seq_id seq_id, From 207b51900e15cc7f89763a3bb1c565fe11cbb45d Mon Sep 17 00:00:00 2001 From: Georgi Gerganov Date: Mon, 30 Oct 2023 19:19:15 +0200 Subject: [PATCH 024/206] ggml : move FP16 <-> FP32 code to ggml-impl.h (#3861) * ggml : move FP16 <-> FP32 stuff to ggml-impl.h ggml-ci * tests : fix ARM build * ggml : explicitly initialize deprecated type traits * ggml : add math.h to ggml-impl.h * ggml : remove duplicate static assert macros * ggml : prefix lookup tables with ggml_ ggml-ci * ggml-impl : move extern "C" to start of file --- ggml-impl.h | 237 ++++++++++++++++++++++++ ggml-quants.c | 350 ++++++++++++++++++------------------ ggml-quants.h | 14 +- ggml.c | 282 +++++------------------------ llama.cpp | 2 +- tests/test-double-float.cpp | 2 +- tests/test-quantize-fns.cpp | 7 + 7 files changed, 470 insertions(+), 424 deletions(-) create mode 100644 ggml-impl.h diff --git a/ggml-impl.h b/ggml-impl.h new file mode 100644 index 00000000000000..5ec18a50c8da57 --- /dev/null +++ b/ggml-impl.h @@ -0,0 +1,237 @@ +#pragma once + +#include "ggml.h" + +// GGML internal header + +#include +#include +#include +#include // memcpy +#include // fabsf + +#ifdef __cplusplus +extern "C" { +#endif + +// static_assert should be a #define, but if it's not, +// fall back to the _Static_assert C11 keyword. +// if C99 - static_assert is noop +// ref: https://stackoverflow.com/a/53923785/4039976 +#ifndef static_assert +#if defined(__STDC_VERSION__) && (__STDC_VERSION__ >= 201100L) +#define static_assert(cond, msg) _Static_assert(cond, msg) +#else +#define static_assert(cond, msg) struct global_scope_noop_trick +#endif +#endif + +// __FMA__ and __F16C__ are not defined in MSVC, however they are implied with AVX2/AVX512 +#if defined(_MSC_VER) && (defined(__AVX2__) || defined(__AVX512F__)) +#ifndef __FMA__ +#define __FMA__ +#endif +#ifndef __F16C__ +#define __F16C__ +#endif +#ifndef __SSE3__ +#define __SSE3__ +#endif +#endif + +#undef MIN +#undef MAX + +#define MIN(a, b) ((a) < (b) ? (a) : (b)) +#define MAX(a, b) ((a) > (b) ? (a) : (b)) + +// 16-bit float +// on Arm, we use __fp16 +// on x86, we use uint16_t +#if defined(__ARM_NEON) && !defined(_MSC_VER) + +// if YCM cannot find , make a symbolic link to it, for example: +// +// $ ln -sfn /Library/Developer/CommandLineTools/usr/lib/clang/13.1.6/include/arm_neon.h ./src/ +// +#include + +#define GGML_COMPUTE_FP16_TO_FP32(x) ((float) (x)) +#define GGML_COMPUTE_FP32_TO_FP16(x) (x) + +#define GGML_FP16_TO_FP32(x) ((float) (x)) +#define GGML_FP32_TO_FP16(x) (x) + +#else + +#ifdef __wasm_simd128__ +#include +#else +#ifdef __POWER9_VECTOR__ +#include +#undef bool +#define bool _Bool +#else +#if defined(_MSC_VER) || defined(__MINGW32__) +#include +#else +#if defined(__AVX__) || defined(__AVX2__) || defined(__AVX512F__) || defined(__SSSE3__) || defined(__SSE3__) +#if !defined(__riscv) +#include +#endif +#endif +#endif +#endif +#endif + +#ifdef __riscv_v_intrinsic +#include +#endif + +#ifdef __F16C__ + +#ifdef _MSC_VER +#define GGML_COMPUTE_FP16_TO_FP32(x) _mm_cvtss_f32(_mm_cvtph_ps(_mm_cvtsi32_si128(x))) +#define GGML_COMPUTE_FP32_TO_FP16(x) _mm_extract_epi16(_mm_cvtps_ph(_mm_set_ss(x), 0), 0) +#else +#define GGML_COMPUTE_FP16_TO_FP32(x) _cvtsh_ss(x) +#define GGML_COMPUTE_FP32_TO_FP16(x) _cvtss_sh(x, 0) +#endif + +#elif defined(__POWER9_VECTOR__) + +#define GGML_COMPUTE_FP16_TO_FP32(x) ggml_compute_fp16_to_fp32(x) +#define GGML_COMPUTE_FP32_TO_FP16(x) ggml_compute_fp32_to_fp16(x) +/* the inline asm below is about 12% faster than the lookup method */ +#define GGML_FP16_TO_FP32(x) GGML_COMPUTE_FP16_TO_FP32(x) +#define GGML_FP32_TO_FP16(x) GGML_COMPUTE_FP32_TO_FP16(x) + +static inline float ggml_compute_fp16_to_fp32(ggml_fp16_t h) { + register float f; + register double d; + __asm__( + "mtfprd %0,%2\n" + "xscvhpdp %0,%0\n" + "frsp %1,%0\n" : + /* temp */ "=d"(d), + /* out */ "=f"(f): + /* in */ "r"(h)); + return f; +} + +static inline ggml_fp16_t ggml_compute_fp32_to_fp16(float f) { + register double d; + register ggml_fp16_t r; + __asm__( /* xscvdphp can work on double or single precision */ + "xscvdphp %0,%2\n" + "mffprd %1,%0\n" : + /* temp */ "=d"(d), + /* out */ "=r"(r): + /* in */ "f"(f)); + return r; +} + +#else + +// FP16 <-> FP32 +// ref: https://github.com/Maratyszcza/FP16 + +static inline float fp32_from_bits(uint32_t w) { + union { + uint32_t as_bits; + float as_value; + } fp32; + fp32.as_bits = w; + return fp32.as_value; +} + +static inline uint32_t fp32_to_bits(float f) { + union { + float as_value; + uint32_t as_bits; + } fp32; + fp32.as_value = f; + return fp32.as_bits; +} + +static inline float ggml_compute_fp16_to_fp32(ggml_fp16_t h) { + const uint32_t w = (uint32_t) h << 16; + const uint32_t sign = w & UINT32_C(0x80000000); + const uint32_t two_w = w + w; + + const uint32_t exp_offset = UINT32_C(0xE0) << 23; +#if defined(__STDC_VERSION__) && (__STDC_VERSION__ >= 199901L) || defined(__GNUC__) && !defined(__STRICT_ANSI__) + const float exp_scale = 0x1.0p-112f; +#else + const float exp_scale = fp32_from_bits(UINT32_C(0x7800000)); +#endif + const float normalized_value = fp32_from_bits((two_w >> 4) + exp_offset) * exp_scale; + + const uint32_t magic_mask = UINT32_C(126) << 23; + const float magic_bias = 0.5f; + const float denormalized_value = fp32_from_bits((two_w >> 17) | magic_mask) - magic_bias; + + const uint32_t denormalized_cutoff = UINT32_C(1) << 27; + const uint32_t result = sign | + (two_w < denormalized_cutoff ? fp32_to_bits(denormalized_value) : fp32_to_bits(normalized_value)); + return fp32_from_bits(result); +} + +static inline ggml_fp16_t ggml_compute_fp32_to_fp16(float f) { +#if defined(__STDC_VERSION__) && (__STDC_VERSION__ >= 199901L) || defined(__GNUC__) && !defined(__STRICT_ANSI__) + const float scale_to_inf = 0x1.0p+112f; + const float scale_to_zero = 0x1.0p-110f; +#else + const float scale_to_inf = fp32_from_bits(UINT32_C(0x77800000)); + const float scale_to_zero = fp32_from_bits(UINT32_C(0x08800000)); +#endif + float base = (fabsf(f) * scale_to_inf) * scale_to_zero; + + const uint32_t w = fp32_to_bits(f); + const uint32_t shl1_w = w + w; + const uint32_t sign = w & UINT32_C(0x80000000); + uint32_t bias = shl1_w & UINT32_C(0xFF000000); + if (bias < UINT32_C(0x71000000)) { + bias = UINT32_C(0x71000000); + } + + base = fp32_from_bits((bias >> 1) + UINT32_C(0x07800000)) + base; + const uint32_t bits = fp32_to_bits(base); + const uint32_t exp_bits = (bits >> 13) & UINT32_C(0x00007C00); + const uint32_t mantissa_bits = bits & UINT32_C(0x00000FFF); + const uint32_t nonsign = exp_bits + mantissa_bits; + return (sign >> 16) | (shl1_w > UINT32_C(0xFF000000) ? UINT16_C(0x7E00) : nonsign); +} + +#define GGML_COMPUTE_FP16_TO_FP32(x) ggml_compute_fp16_to_fp32(x) +#define GGML_COMPUTE_FP32_TO_FP16(x) ggml_compute_fp32_to_fp16(x) + +#endif // __F16C__ + +#endif // __ARM_NEON + +// precomputed f32 table for f16 (256 KB) +// defined in ggml.c, initialized in ggml_init() +extern float ggml_table_f32_f16[1 << 16]; + +// On ARM NEON, it's quicker to directly convert x -> x instead of calling into ggml_lookup_fp16_to_fp32, +// so we define GGML_FP16_TO_FP32 and GGML_FP32_TO_FP16 elsewhere for NEON. +// This is also true for POWER9. +#if !defined(GGML_FP16_TO_FP32) || !defined(GGML_FP32_TO_FP16) + +inline static float ggml_lookup_fp16_to_fp32(ggml_fp16_t f) { + uint16_t s; + memcpy(&s, &f, sizeof(uint16_t)); + return ggml_table_f32_f16[s]; +} + +#define GGML_FP16_TO_FP32(x) ggml_lookup_fp16_to_fp32(x) +#define GGML_FP32_TO_FP16(x) GGML_COMPUTE_FP32_TO_FP16(x) + +#endif + + // TODO: backend v2 PR + +#ifdef __cplusplus +} +#endif diff --git a/ggml-quants.c b/ggml-quants.c index fd4ee1be64befa..72159446738e30 100644 --- a/ggml-quants.c +++ b/ggml-quants.c @@ -1,5 +1,5 @@ #include "ggml-quants.h" -#include "ggml.h" +#include "ggml-impl.h" #include #include @@ -352,7 +352,7 @@ void quantize_row_q4_0_reference(const float * restrict x, block_q4_0 * restrict const float d = max / -8; const float id = d ? 1.0f/d : 0.0f; - y[i].d = ggml_fp32_to_fp16(d); + y[i].d = GGML_FP32_TO_FP16(d); for (int j = 0; j < qk/2; ++j) { const float x0 = x[i*qk + 0 + j]*id; @@ -392,8 +392,8 @@ void quantize_row_q4_1_reference(const float * restrict x, block_q4_1 * restrict const float d = (max - min) / ((1 << 4) - 1); const float id = d ? 1.0f/d : 0.0f; - y[i].d = ggml_fp32_to_fp16(d); - y[i].m = ggml_fp32_to_fp16(min); + y[i].d = GGML_FP32_TO_FP16(d); + y[i].m = GGML_FP32_TO_FP16(min); for (int j = 0; j < qk/2; ++j) { const float x0 = (x[i*qk + 0 + j] - min)*id; @@ -434,7 +434,7 @@ void quantize_row_q5_0_reference(const float * restrict x, block_q5_0 * restrict const float d = max / -16; const float id = d ? 1.0f/d : 0.0f; - y[i].d = ggml_fp32_to_fp16(d); + y[i].d = GGML_FP32_TO_FP16(d); uint32_t qh = 0; @@ -481,8 +481,8 @@ void quantize_row_q5_1_reference(const float * restrict x, block_q5_1 * restrict const float d = (max - min) / ((1 << 5) - 1); const float id = d ? 1.0f/d : 0.0f; - y[i].d = ggml_fp32_to_fp16(d); - y[i].m = ggml_fp32_to_fp16(min); + y[i].d = GGML_FP32_TO_FP16(d); + y[i].m = GGML_FP32_TO_FP16(min); uint32_t qh = 0; @@ -524,7 +524,7 @@ void quantize_row_q8_0_reference(const float * restrict x, block_q8_0 * restrict const float d = amax / ((1 << 7) - 1); const float id = d ? 1.0f/d : 0.0f; - y[i].d = ggml_fp32_to_fp16(d); + y[i].d = GGML_FP32_TO_FP16(d); for (int j = 0; j < QK8_0; ++j) { const float x0 = x[i*QK8_0 + j]*id; @@ -559,7 +559,7 @@ void quantize_row_q8_0(const float * restrict x, void * restrict vy, int k) { const float d = amax / ((1 << 7) - 1); const float id = d ? 1.0f/d : 0.0f; - y[i].d = ggml_fp32_to_fp16(d); + y[i].d = GGML_FP32_TO_FP16(d); for (int j = 0; j < 8; j++) { const float32x4_t v = vmulq_n_f32(srcv[j], id); @@ -592,7 +592,7 @@ void quantize_row_q8_0(const float * restrict x, void * restrict vy, int k) { const float d = amax / ((1 << 7) - 1); const float id = d ? 1.0f/d : 0.0f; - y[i].d = ggml_fp32_to_fp16(d); + y[i].d = GGML_FP32_TO_FP16(d); for (int j = 0; j < 8; j++) { const v128_t v = wasm_f32x4_mul(srcv[j], wasm_f32x4_splat(id)); @@ -627,7 +627,7 @@ void quantize_row_q8_0(const float * restrict x, void * restrict vy, int k) { // Quantize these floats const float d = maxScalar / 127.f; - y[i].d = ggml_fp32_to_fp16(d); + y[i].d = GGML_FP32_TO_FP16(d); const float id = ( maxScalar != 0.0f ) ? 127.f / maxScalar : 0.0f; const __m256 mul = _mm256_set1_ps( id ); @@ -704,7 +704,7 @@ void quantize_row_q8_0(const float * restrict x, void * restrict vy, int k) { const float d = amax / ((1 << 7) - 1); const float id = d ? 1.0f/d : 0.0f; - y[i].d = ggml_fp32_to_fp16(d); + y[i].d = GGML_FP32_TO_FP16(d); vfloat32m4_t x0 = __riscv_vfmul_vf_f32m4(v_x, id, vl); @@ -982,7 +982,7 @@ void dequantize_row_q4_0(const block_q4_0 * restrict x, float * restrict y, int const int nb = k / qk; for (int i = 0; i < nb; i++) { - const float d = ggml_fp16_to_fp32(x[i].d); + const float d = GGML_FP16_TO_FP32(x[i].d); for (int j = 0; j < qk/2; ++j) { const int x0 = (x[i].qs[j] & 0x0F) - 8; @@ -1002,8 +1002,8 @@ void dequantize_row_q4_1(const block_q4_1 * restrict x, float * restrict y, int const int nb = k / qk; for (int i = 0; i < nb; i++) { - const float d = ggml_fp16_to_fp32(x[i].d); - const float m = ggml_fp16_to_fp32(x[i].m); + const float d = GGML_FP16_TO_FP32(x[i].d); + const float m = GGML_FP16_TO_FP32(x[i].m); for (int j = 0; j < qk/2; ++j) { const int x0 = (x[i].qs[j] & 0x0F); @@ -1023,7 +1023,7 @@ void dequantize_row_q5_0(const block_q5_0 * restrict x, float * restrict y, int const int nb = k / qk; for (int i = 0; i < nb; i++) { - const float d = ggml_fp16_to_fp32(x[i].d); + const float d = GGML_FP16_TO_FP32(x[i].d); uint32_t qh; memcpy(&qh, x[i].qh, sizeof(qh)); @@ -1049,8 +1049,8 @@ void dequantize_row_q5_1(const block_q5_1 * restrict x, float * restrict y, int const int nb = k / qk; for (int i = 0; i < nb; i++) { - const float d = ggml_fp16_to_fp32(x[i].d); - const float m = ggml_fp16_to_fp32(x[i].m); + const float d = GGML_FP16_TO_FP32(x[i].d); + const float m = GGML_FP16_TO_FP32(x[i].m); uint32_t qh; memcpy(&qh, x[i].qh, sizeof(qh)); @@ -1076,7 +1076,7 @@ void dequantize_row_q8_0(const block_q8_0 * restrict x, float * restrict y, int const int nb = k / qk; for (int i = 0; i < nb; i++) { - const float d = ggml_fp16_to_fp32(x[i].d); + const float d = GGML_FP16_TO_FP32(x[i].d); for (int j = 0; j < qk; ++j) { y[i*qk + j] = x[i].qs[j]*d; @@ -1387,10 +1387,10 @@ void quantize_row_q2_K_reference(const float * restrict x, block_q2_K * restrict int l = nearest_int(iscale*scales[j]); y[i].scales[j] = l; } - y[i].d = ggml_fp32_to_fp16(max_scale/q4scale); + y[i].d = GGML_FP32_TO_FP16(max_scale/q4scale); } else { for (int j = 0; j < QK_K/16; ++j) y[i].scales[j] = 0; - y[i].d = ggml_fp32_to_fp16(0.f); + y[i].d = GGML_FP32_TO_FP16(0.f); } if (max_min > 0) { float iscale = q4scale/max_min; @@ -1398,14 +1398,14 @@ void quantize_row_q2_K_reference(const float * restrict x, block_q2_K * restrict int l = nearest_int(iscale*mins[j]); y[i].scales[j] |= (l << 4); } - y[i].dmin = ggml_fp32_to_fp16(max_min/q4scale); + y[i].dmin = GGML_FP32_TO_FP16(max_min/q4scale); } else { - y[i].dmin = ggml_fp32_to_fp16(0.f); + y[i].dmin = GGML_FP32_TO_FP16(0.f); } for (int j = 0; j < QK_K/16; ++j) { - const float d = ggml_fp16_to_fp32(y[i].d) * (y[i].scales[j] & 0xF); + const float d = GGML_FP16_TO_FP32(y[i].d) * (y[i].scales[j] & 0xF); if (!d) continue; - const float dm = ggml_fp16_to_fp32(y[i].dmin) * (y[i].scales[j] >> 4); + const float dm = GGML_FP16_TO_FP32(y[i].dmin) * (y[i].scales[j] >> 4); for (int ii = 0; ii < 16; ++ii) { int l = nearest_int((x[16*j + ii] + dm)/d); l = MAX(0, MIN(3, l)); @@ -1436,8 +1436,8 @@ void dequantize_row_q2_K(const block_q2_K * restrict x, float * restrict y, int for (int i = 0; i < nb; i++) { - const float d = ggml_fp16_to_fp32(x[i].d); - const float min = ggml_fp16_to_fp32(x[i].dmin); + const float d = GGML_FP16_TO_FP32(x[i].d); + const float min = GGML_FP16_TO_FP32(x[i].dmin); const uint8_t * q = x[i].qs; @@ -1526,16 +1526,16 @@ void quantize_row_q3_K_reference(const float * restrict x, block_q3_K * restrict l >>= 4; y[i].scales[j%4 + 8] |= (l << (2*(j/4))); } - y[i].d = ggml_fp32_to_fp16(1/iscale); + y[i].d = GGML_FP32_TO_FP16(1/iscale); } else { - y[i].d = ggml_fp32_to_fp16(0.f); + y[i].d = GGML_FP32_TO_FP16(0.f); } int8_t sc; for (int j = 0; j < QK_K/16; ++j) { sc = j < 8 ? y[i].scales[j] & 0xF : y[i].scales[j-8] >> 4; sc = (sc | (((y[i].scales[8 + j%4] >> (2*(j/4))) & 3) << 4)) - 32; - float d = ggml_fp16_to_fp32(y[i].d) * sc; + float d = GGML_FP16_TO_FP32(y[i].d) * sc; if (!d) { continue; } @@ -1555,16 +1555,16 @@ void quantize_row_q3_K_reference(const float * restrict x, block_q3_K * restrict l2 = 8 + MAX(-8, MIN(7, l2)); y[i].scales[j/2] = l1 | (l2 << 4); } - y[i].d = ggml_fp32_to_fp16(1/iscale); + y[i].d = GGML_FP32_TO_FP16(1/iscale); } else { for (int j = 0; j < QK_K/16; j+=2) { y[i].scales[j/2] = 0; } - y[i].d = ggml_fp32_to_fp16(0.f); + y[i].d = GGML_FP32_TO_FP16(0.f); } for (int j = 0; j < QK_K/16; ++j) { int s = j%2 == 0 ? y[i].scales[j/2] & 0xF : y[i].scales[j/2] >> 4; - float d = ggml_fp16_to_fp32(y[i].d) * (s - 8); + float d = GGML_FP16_TO_FP32(y[i].d) * (s - 8); if (!d) { continue; } @@ -1618,7 +1618,7 @@ void dequantize_row_q3_K(const block_q3_K * restrict x, float * restrict y, int for (int i = 0; i < nb; i++) { - const float d_all = ggml_fp16_to_fp32(x[i].d); + const float d_all = GGML_FP16_TO_FP32(x[i].d); const uint8_t * restrict q = x[i].qs; const uint8_t * restrict hm = x[i].hmask; @@ -1663,7 +1663,7 @@ void dequantize_row_q3_K(const block_q3_K * restrict x, float * restrict y, int for (int i = 0; i < nb; i++) { - const float d_all = ggml_fp16_to_fp32(x[i].d); + const float d_all = GGML_FP16_TO_FP32(x[i].d); const uint8_t * restrict q = x[i].qs; const uint8_t * restrict hm = x[i].hmask; @@ -1753,15 +1753,15 @@ void quantize_row_q4_K_reference(const float * restrict x, block_q4_K * restrict y[i].scales[j-0] |= ((lm >> 4) << 6); } } - y[i].d = ggml_fp32_to_fp16(max_scale/63.f); - y[i].dmin = ggml_fp32_to_fp16(max_min/63.f); + y[i].d = GGML_FP32_TO_FP16(max_scale/63.f); + y[i].dmin = GGML_FP32_TO_FP16(max_min/63.f); uint8_t sc, m; for (int j = 0; j < QK_K/32; ++j) { get_scale_min_k4(j, y[i].scales, &sc, &m); - const float d = ggml_fp16_to_fp32(y[i].d) * sc; + const float d = GGML_FP16_TO_FP32(y[i].d) * sc; if (!d) continue; - const float dm = ggml_fp16_to_fp32(y[i].dmin) * m; + const float dm = GGML_FP16_TO_FP32(y[i].dmin) * m; for (int ii = 0; ii < 32; ++ii) { int l = nearest_int((x[32*j + ii] + dm)/d); l = MAX(0, MIN(15, l)); @@ -1778,17 +1778,17 @@ void quantize_row_q4_K_reference(const float * restrict x, block_q4_K * restrict int m2 = nearest_int(inv_min*mins[1]); y[i].scales[0] = d1 | (m1 << 4); y[i].scales[1] = d2 | (m2 << 4); - y[i].d[0] = ggml_fp32_to_fp16(max_scale/s_factor); - y[i].d[1] = ggml_fp32_to_fp16(max_min/s_factor); + y[i].d[0] = GGML_FP32_TO_FP16(max_scale/s_factor); + y[i].d[1] = GGML_FP32_TO_FP16(max_min/s_factor); float sumlx = 0; int suml2 = 0; for (int j = 0; j < QK_K/32; ++j) { const uint8_t sd = y[i].scales[j] & 0xF; const uint8_t sm = y[i].scales[j] >> 4; - const float d = ggml_fp16_to_fp32(y[i].d[0]) * sd; + const float d = GGML_FP16_TO_FP32(y[i].d[0]) * sd; if (!d) continue; - const float m = ggml_fp16_to_fp32(y[i].d[1]) * sm; + const float m = GGML_FP16_TO_FP32(y[i].d[1]) * sm; for (int ii = 0; ii < 32; ++ii) { int l = nearest_int((x[32*j + ii] + m)/d); l = MAX(0, MIN(15, l)); @@ -1798,7 +1798,7 @@ void quantize_row_q4_K_reference(const float * restrict x, block_q4_K * restrict } } if (suml2) { - y[i].d[0] = ggml_fp32_to_fp16(sumlx/suml2); + y[i].d[0] = GGML_FP32_TO_FP16(sumlx/suml2); } #endif uint8_t * q = y[i].qs; @@ -1822,8 +1822,8 @@ void dequantize_row_q4_K(const block_q4_K * restrict x, float * restrict y, int #if QK_K == 256 - const float d = ggml_fp16_to_fp32(x[i].d); - const float min = ggml_fp16_to_fp32(x[i].dmin); + const float d = GGML_FP16_TO_FP32(x[i].d); + const float min = GGML_FP16_TO_FP32(x[i].dmin); int is = 0; uint8_t sc, m; @@ -1837,8 +1837,8 @@ void dequantize_row_q4_K(const block_q4_K * restrict x, float * restrict y, int q += 32; is += 2; } #else - const float dall = ggml_fp16_to_fp32(x[i].d[0]); - const float mall = ggml_fp16_to_fp32(x[i].d[1]); + const float dall = GGML_FP16_TO_FP32(x[i].d[0]); + const float mall = GGML_FP16_TO_FP32(x[i].d[1]); const float d1 = dall * (x[i].scales[0] & 0xF), m1 = mall * (x[i].scales[0] >> 4); const float d2 = dall * (x[i].scales[1] & 0xF), m2 = mall * (x[i].scales[1] >> 4); for (int l = 0; l < 32; ++l) { @@ -1924,15 +1924,15 @@ void quantize_row_q5_K_reference(const float * restrict x, block_q5_K * restrict y[i].scales[j-0] |= ((lm >> 4) << 6); } } - y[i].d = ggml_fp32_to_fp16(max_scale/63.f); - y[i].dmin = ggml_fp32_to_fp16(max_min/63.f); + y[i].d = GGML_FP32_TO_FP16(max_scale/63.f); + y[i].dmin = GGML_FP32_TO_FP16(max_min/63.f); uint8_t sc, m; for (int j = 0; j < QK_K/32; ++j) { get_scale_min_k4(j, y[i].scales, &sc, &m); - const float d = ggml_fp16_to_fp32(y[i].d) * sc; + const float d = GGML_FP16_TO_FP32(y[i].d) * sc; if (!d) continue; - const float dm = ggml_fp16_to_fp32(y[i].dmin) * m; + const float dm = GGML_FP16_TO_FP32(y[i].dmin) * m; for (int ii = 0; ii < 32; ++ii) { int l = nearest_int((x[32*j + ii] + dm)/d); l = MAX(0, MIN(31, l)); @@ -1976,10 +1976,10 @@ void quantize_row_q5_K_reference(const float * restrict x, block_q5_K * restrict int l = nearest_int(iscale*scales[j]); y[i].scales[j] = MAX(-128, MIN(127, l)); } - y[i].d = ggml_fp32_to_fp16(1/iscale); + y[i].d = GGML_FP32_TO_FP16(1/iscale); for (int j = 0; j < QK_K/16; ++j) { - const float d = ggml_fp16_to_fp32(y[i].d) * y[i].scales[j]; + const float d = GGML_FP16_TO_FP32(y[i].d) * y[i].scales[j]; if (!d) continue; for (int ii = 0; ii < 16; ++ii) { int l = nearest_int(x[16*j + ii]/d); @@ -2023,8 +2023,8 @@ void dequantize_row_q5_K(const block_q5_K * restrict x, float * restrict y, int #if QK_K == 256 - const float d = ggml_fp16_to_fp32(x[i].d); - const float min = ggml_fp16_to_fp32(x[i].dmin); + const float d = GGML_FP16_TO_FP32(x[i].d); + const float min = GGML_FP16_TO_FP32(x[i].dmin); int is = 0; uint8_t sc, m; @@ -2040,7 +2040,7 @@ void dequantize_row_q5_K(const block_q5_K * restrict x, float * restrict y, int u1 <<= 2; u2 <<= 2; } #else - float d = ggml_fp16_to_fp32(x[i].d); + float d = GGML_FP16_TO_FP32(x[i].d); const int8_t * restrict s = x[i].scales; for (int l = 0; l < 8; ++l) { y[l+ 0] = d * s[0] * ((ql[l+ 0] & 0xF) - (qh[l] & 0x01 ? 0 : 16)); @@ -2103,19 +2103,19 @@ void quantize_row_q6_K_reference(const float * restrict x, block_q6_K * restrict if (!max_abs_scale) { memset(&y[i], 0, sizeof(block_q6_K)); - y[i].d = ggml_fp32_to_fp16(0.f); + y[i].d = GGML_FP32_TO_FP16(0.f); x += QK_K; continue; } float iscale = -128.f/max_scale; - y[i].d = ggml_fp32_to_fp16(1/iscale); + y[i].d = GGML_FP32_TO_FP16(1/iscale); for (int ib = 0; ib < QK_K/16; ++ib) { y[i].scales[ib] = MIN(127, nearest_int(iscale*scales[ib])); } for (int j = 0; j < QK_K/16; ++j) { - float d = ggml_fp16_to_fp32(y[i].d) * y[i].scales[j]; + float d = GGML_FP16_TO_FP32(y[i].d) * y[i].scales[j]; if (!d) { continue; } @@ -2164,7 +2164,7 @@ void dequantize_row_q6_K(const block_q6_K * restrict x, float * restrict y, int for (int i = 0; i < nb; i++) { - const float d = ggml_fp16_to_fp32(x[i].d); + const float d = GGML_FP16_TO_FP32(x[i].d); const uint8_t * restrict ql = x[i].ql; const uint8_t * restrict qh = x[i].qh; @@ -2371,8 +2371,8 @@ void ggml_vec_dot_q4_0_q8_0(int n, float * restrict s, const void * restrict vx, const int32x4_t p_0 = vdotq_s32(vdotq_s32(vdupq_n_s32(0), v0_0ls, v1_0l), v0_0hs, v1_0h); const int32x4_t p_1 = vdotq_s32(vdotq_s32(vdupq_n_s32(0), v0_1ls, v1_1l), v0_1hs, v1_1h); - sumv0 = vmlaq_n_f32(sumv0, vcvtq_f32_s32(p_0), ggml_fp16_to_fp32(x0->d)*ggml_fp16_to_fp32(y0->d)); - sumv1 = vmlaq_n_f32(sumv1, vcvtq_f32_s32(p_1), ggml_fp16_to_fp32(x1->d)*ggml_fp16_to_fp32(y1->d)); + sumv0 = vmlaq_n_f32(sumv0, vcvtq_f32_s32(p_0), GGML_FP16_TO_FP32(x0->d)*GGML_FP16_TO_FP32(y0->d)); + sumv1 = vmlaq_n_f32(sumv1, vcvtq_f32_s32(p_1), GGML_FP16_TO_FP32(x1->d)*GGML_FP16_TO_FP32(y1->d)); #else const int16x8_t pl0l = vmull_s8(vget_low_s8 (v0_0ls), vget_low_s8 (v1_0l)); const int16x8_t pl0h = vmull_s8(vget_high_s8(v0_0ls), vget_high_s8(v1_0l)); @@ -2389,8 +2389,8 @@ void ggml_vec_dot_q4_0_q8_0(int n, float * restrict s, const void * restrict vx, const int32x4_t pl1 = vaddq_s32(vpaddlq_s16(pl1l), vpaddlq_s16(pl1h)); const int32x4_t ph1 = vaddq_s32(vpaddlq_s16(ph1l), vpaddlq_s16(ph1h)); - sumv0 = vmlaq_n_f32(sumv0, vcvtq_f32_s32(vaddq_s32(pl0, ph0)), ggml_fp16_to_fp32(x0->d)*ggml_fp16_to_fp32(y0->d)); - sumv1 = vmlaq_n_f32(sumv1, vcvtq_f32_s32(vaddq_s32(pl1, ph1)), ggml_fp16_to_fp32(x1->d)*ggml_fp16_to_fp32(y1->d)); + sumv0 = vmlaq_n_f32(sumv0, vcvtq_f32_s32(vaddq_s32(pl0, ph0)), GGML_FP16_TO_FP32(x0->d)*GGML_FP16_TO_FP32(y0->d)); + sumv1 = vmlaq_n_f32(sumv1, vcvtq_f32_s32(vaddq_s32(pl1, ph1)), GGML_FP16_TO_FP32(x1->d)*GGML_FP16_TO_FP32(y1->d)); #endif } @@ -2402,7 +2402,7 @@ void ggml_vec_dot_q4_0_q8_0(int n, float * restrict s, const void * restrict vx, // Main loop for (int i = 0; i < nb; ++i) { /* Compute combined scale for the block */ - const __m256 d = _mm256_set1_ps( ggml_fp16_to_fp32(x[i].d) * ggml_fp16_to_fp32(y[i].d) ); + const __m256 d = _mm256_set1_ps( GGML_FP16_TO_FP32(x[i].d) * GGML_FP16_TO_FP32(y[i].d) ); __m256i bx = bytes_from_nibbles_32(x[i].qs); @@ -2426,7 +2426,7 @@ void ggml_vec_dot_q4_0_q8_0(int n, float * restrict s, const void * restrict vx, // Main loop for (int i = 0; i < nb; ++i) { // Compute combined scale for the block - const __m256 d = _mm256_set1_ps( ggml_fp16_to_fp32(x[i].d) * ggml_fp16_to_fp32(y[i].d) ); + const __m256 d = _mm256_set1_ps( GGML_FP16_TO_FP32(x[i].d) * GGML_FP16_TO_FP32(y[i].d) ); const __m128i lowMask = _mm_set1_epi8(0xF); const __m128i off = _mm_set1_epi8(8); @@ -2468,7 +2468,7 @@ void ggml_vec_dot_q4_0_q8_0(int n, float * restrict s, const void * restrict vx, _mm_prefetch(&y[0] + sizeof(block_q8_0), _MM_HINT_T0); // Compute combined scale for the block 0 and 1 - const __m128 d_0_1 = _mm_set1_ps( ggml_fp16_to_fp32(x[0].d) * ggml_fp16_to_fp32(y[0].d) ); + const __m128 d_0_1 = _mm_set1_ps( GGML_FP16_TO_FP32(x[0].d) * GGML_FP16_TO_FP32(y[0].d) ); const __m128i tmp_0_1 = _mm_loadu_si128((const __m128i *)x[0].qs); @@ -2486,7 +2486,7 @@ void ggml_vec_dot_q4_0_q8_0(int n, float * restrict s, const void * restrict vx, _mm_prefetch(&y[1] + sizeof(block_q8_0), _MM_HINT_T0); // Compute combined scale for the block 2 and 3 - const __m128 d_2_3 = _mm_set1_ps( ggml_fp16_to_fp32(x[1].d) * ggml_fp16_to_fp32(y[1].d) ); + const __m128 d_2_3 = _mm_set1_ps( GGML_FP16_TO_FP32(x[1].d) * GGML_FP16_TO_FP32(y[1].d) ); const __m128i tmp_2_3 = _mm_loadu_si128((const __m128i *)x[1].qs); @@ -2521,7 +2521,7 @@ void ggml_vec_dot_q4_0_q8_0(int n, float * restrict s, const void * restrict vx, _mm_prefetch(&y[i] + sizeof(block_q8_0), _MM_HINT_T0); // Compute combined scale for the block 0 and 1 - const __m128 d_0_1 = _mm_set1_ps( ggml_fp16_to_fp32(x[i].d) * ggml_fp16_to_fp32(y[i].d) ); + const __m128 d_0_1 = _mm_set1_ps( GGML_FP16_TO_FP32(x[i].d) * GGML_FP16_TO_FP32(y[i].d) ); const __m128i tmp_0_1 = _mm_loadu_si128((const __m128i *)x[i].qs); @@ -2539,7 +2539,7 @@ void ggml_vec_dot_q4_0_q8_0(int n, float * restrict s, const void * restrict vx, _mm_prefetch(&y[i] + 2 * sizeof(block_q8_0), _MM_HINT_T0); // Compute combined scale for the block 2 and 3 - const __m128 d_2_3 = _mm_set1_ps( ggml_fp16_to_fp32(x[i + 1].d) * ggml_fp16_to_fp32(y[i + 1].d) ); + const __m128 d_2_3 = _mm_set1_ps( GGML_FP16_TO_FP32(x[i + 1].d) * GGML_FP16_TO_FP32(y[i + 1].d) ); const __m128i tmp_2_3 = _mm_loadu_si128((const __m128i *)x[i + 1].qs); @@ -2606,7 +2606,7 @@ void ggml_vec_dot_q4_0_q8_0(int n, float * restrict s, const void * restrict vx, int sumi = __riscv_vmv_x_s_i32m1_i32(vs2); - sumf += sumi*ggml_fp16_to_fp32(x[i].d)*ggml_fp16_to_fp32(y[i].d); + sumf += sumi*GGML_FP16_TO_FP32(x[i].d)*GGML_FP16_TO_FP32(y[i].d); } *s = sumf; @@ -2624,7 +2624,7 @@ void ggml_vec_dot_q4_0_q8_0(int n, float * restrict s, const void * restrict vx, sumi += (v0 * y[i].qs[j]) + (v1 * y[i].qs[j + qk/2]); } - sumf += sumi*ggml_fp16_to_fp32(x[i].d)*ggml_fp16_to_fp32(y[i].d); + sumf += sumi*GGML_FP16_TO_FP32(x[i].d)*GGML_FP16_TO_FP32(y[i].d); } *s = sumf; @@ -2655,7 +2655,7 @@ void ggml_vec_dot_q4_1_q8_1(const int n, float * restrict s, const void * restri const block_q8_1 * restrict y0 = &y[i + 0]; const block_q8_1 * restrict y1 = &y[i + 1]; - summs += ggml_fp16_to_fp32(x0->m) * y0->s + ggml_fp16_to_fp32(x1->m) * y1->s; + summs += GGML_FP16_TO_FP32(x0->m) * y0->s + GGML_FP16_TO_FP32(x1->m) * y1->s; const uint8x16_t m4b = vdupq_n_u8(0x0F); @@ -2679,8 +2679,8 @@ void ggml_vec_dot_q4_1_q8_1(const int n, float * restrict s, const void * restri const int32x4_t p_0 = vdotq_s32(vdotq_s32(vdupq_n_s32(0), v0_0l, v1_0l), v0_0h, v1_0h); const int32x4_t p_1 = vdotq_s32(vdotq_s32(vdupq_n_s32(0), v0_1l, v1_1l), v0_1h, v1_1h); - sumv0 = vmlaq_n_f32(sumv0, vcvtq_f32_s32(p_0), ggml_fp16_to_fp32(x0->d)*y0->d); - sumv1 = vmlaq_n_f32(sumv1, vcvtq_f32_s32(p_1), ggml_fp16_to_fp32(x1->d)*y1->d); + sumv0 = vmlaq_n_f32(sumv0, vcvtq_f32_s32(p_0), GGML_FP16_TO_FP32(x0->d)*y0->d); + sumv1 = vmlaq_n_f32(sumv1, vcvtq_f32_s32(p_1), GGML_FP16_TO_FP32(x1->d)*y1->d); #else const int16x8_t pl0l = vmull_s8(vget_low_s8 (v0_0l), vget_low_s8 (v1_0l)); const int16x8_t pl0h = vmull_s8(vget_high_s8(v0_0l), vget_high_s8(v1_0l)); @@ -2697,8 +2697,8 @@ void ggml_vec_dot_q4_1_q8_1(const int n, float * restrict s, const void * restri const int32x4_t pl1 = vaddq_s32(vpaddlq_s16(pl1l), vpaddlq_s16(pl1h)); const int32x4_t ph1 = vaddq_s32(vpaddlq_s16(ph1l), vpaddlq_s16(ph1h)); - sumv0 = vmlaq_n_f32(sumv0, vcvtq_f32_s32(vaddq_s32(pl0, ph0)), ggml_fp16_to_fp32(x0->d)*y0->d); - sumv1 = vmlaq_n_f32(sumv1, vcvtq_f32_s32(vaddq_s32(pl1, ph1)), ggml_fp16_to_fp32(x1->d)*y1->d); + sumv0 = vmlaq_n_f32(sumv0, vcvtq_f32_s32(vaddq_s32(pl0, ph0)), GGML_FP16_TO_FP32(x0->d)*y0->d); + sumv1 = vmlaq_n_f32(sumv1, vcvtq_f32_s32(vaddq_s32(pl1, ph1)), GGML_FP16_TO_FP32(x1->d)*y1->d); #endif } @@ -2711,10 +2711,10 @@ void ggml_vec_dot_q4_1_q8_1(const int n, float * restrict s, const void * restri // Main loop for (int i = 0; i < nb; ++i) { - const float d0 = ggml_fp16_to_fp32(x[i].d); + const float d0 = GGML_FP16_TO_FP32(x[i].d); const float d1 = y[i].d; - summs += ggml_fp16_to_fp32(x[i].m) * y[i].s; + summs += GGML_FP16_TO_FP32(x[i].m) * y[i].s; const __m256 d0v = _mm256_set1_ps( d0 ); const __m256 d1v = _mm256_set1_ps( d1 ); @@ -2766,7 +2766,7 @@ void ggml_vec_dot_q4_1_q8_1(const int n, float * restrict s, const void * restri int sumi = __riscv_vmv_x_s_i32m1_i32(vs2); - sumf += (ggml_fp16_to_fp32(x[i].d)*y[i].d)*sumi + ggml_fp16_to_fp32(x[i].m)*y[i].s; + sumf += (GGML_FP16_TO_FP32(x[i].d)*y[i].d)*sumi + GGML_FP16_TO_FP32(x[i].m)*y[i].s; } *s = sumf; @@ -2784,7 +2784,7 @@ void ggml_vec_dot_q4_1_q8_1(const int n, float * restrict s, const void * restri sumi += (v0 * y[i].qs[j]) + (v1 * y[i].qs[j + qk/2]); } - sumf += (ggml_fp16_to_fp32(x[i].d)*y[i].d)*sumi + ggml_fp16_to_fp32(x[i].m)*y[i].s; + sumf += (GGML_FP16_TO_FP32(x[i].d)*y[i].d)*sumi + GGML_FP16_TO_FP32(x[i].m)*y[i].s; } *s = sumf; @@ -2864,10 +2864,10 @@ void ggml_vec_dot_q5_0_q8_0(const int n, float * restrict s, const void * restri #if defined(__ARM_FEATURE_DOTPROD) sumv0 = vmlaq_n_f32(sumv0, vcvtq_f32_s32(vaddq_s32( vdotq_s32(vdupq_n_s32(0), v0_0lf, v1_0l), - vdotq_s32(vdupq_n_s32(0), v0_0hf, v1_0h))), ggml_fp16_to_fp32(x0->d)*ggml_fp16_to_fp32(y0->d)); + vdotq_s32(vdupq_n_s32(0), v0_0hf, v1_0h))), GGML_FP16_TO_FP32(x0->d)*GGML_FP16_TO_FP32(y0->d)); sumv1 = vmlaq_n_f32(sumv1, vcvtq_f32_s32(vaddq_s32( vdotq_s32(vdupq_n_s32(0), v0_1lf, v1_1l), - vdotq_s32(vdupq_n_s32(0), v0_1hf, v1_1h))), ggml_fp16_to_fp32(x1->d)*ggml_fp16_to_fp32(y1->d)); + vdotq_s32(vdupq_n_s32(0), v0_1hf, v1_1h))), GGML_FP16_TO_FP32(x1->d)*GGML_FP16_TO_FP32(y1->d)); #else const int16x8_t pl0l = vmull_s8(vget_low_s8 (v0_0lf), vget_low_s8 (v1_0l)); const int16x8_t pl0h = vmull_s8(vget_high_s8(v0_0lf), vget_high_s8(v1_0l)); @@ -2884,8 +2884,8 @@ void ggml_vec_dot_q5_0_q8_0(const int n, float * restrict s, const void * restri const int32x4_t pl1 = vaddq_s32(vpaddlq_s16(pl1l), vpaddlq_s16(pl1h)); const int32x4_t ph1 = vaddq_s32(vpaddlq_s16(ph1l), vpaddlq_s16(ph1h)); - sumv0 = vmlaq_n_f32(sumv0, vcvtq_f32_s32(vaddq_s32(pl0, ph0)), ggml_fp16_to_fp32(x0->d)*ggml_fp16_to_fp32(y0->d)); - sumv1 = vmlaq_n_f32(sumv1, vcvtq_f32_s32(vaddq_s32(pl1, ph1)), ggml_fp16_to_fp32(x1->d)*ggml_fp16_to_fp32(y1->d)); + sumv0 = vmlaq_n_f32(sumv0, vcvtq_f32_s32(vaddq_s32(pl0, ph0)), GGML_FP16_TO_FP32(x0->d)*GGML_FP16_TO_FP32(y0->d)); + sumv1 = vmlaq_n_f32(sumv1, vcvtq_f32_s32(vaddq_s32(pl1, ph1)), GGML_FP16_TO_FP32(x1->d)*GGML_FP16_TO_FP32(y1->d)); #endif } @@ -2946,7 +2946,7 @@ void ggml_vec_dot_q5_0_q8_0(const int n, float * restrict s, const void * restri wasm_i32x4_dot_i16x8(v0lfh, v1lh)), wasm_i32x4_add(wasm_i32x4_dot_i16x8(v0hfl, v1hl), wasm_i32x4_dot_i16x8(v0hfh, v1hh)))), - wasm_f32x4_splat(ggml_fp16_to_fp32(x0->d) * ggml_fp16_to_fp32(y0->d)))); + wasm_f32x4_splat(GGML_FP16_TO_FP32(x0->d) * GGML_FP16_TO_FP32(y0->d)))); } *s = wasm_f32x4_extract_lane(sumv, 0) + wasm_f32x4_extract_lane(sumv, 1) + @@ -2958,7 +2958,7 @@ void ggml_vec_dot_q5_0_q8_0(const int n, float * restrict s, const void * restri // Main loop for (int i = 0; i < nb; i++) { /* Compute combined scale for the block */ - const __m256 d = _mm256_set1_ps(ggml_fp16_to_fp32(x[i].d) * ggml_fp16_to_fp32(y[i].d)); + const __m256 d = _mm256_set1_ps(GGML_FP16_TO_FP32(x[i].d) * GGML_FP16_TO_FP32(y[i].d)); __m256i bx = bytes_from_nibbles_32(x[i].qs); __m256i bxhi = bytes_from_bits_32(x[i].qh); @@ -2982,7 +2982,7 @@ void ggml_vec_dot_q5_0_q8_0(const int n, float * restrict s, const void * restri // Main loop for (int i = 0; i < nb; i++) { /* Compute combined scale for the block */ - const __m256 d = _mm256_set1_ps(ggml_fp16_to_fp32(x[i].d) * ggml_fp16_to_fp32(y[i].d)); + const __m256 d = _mm256_set1_ps(GGML_FP16_TO_FP32(x[i].d) * GGML_FP16_TO_FP32(y[i].d)); __m256i bx = bytes_from_nibbles_32(x[i].qs); const __m256i bxhi = bytes_from_bits_32(x[i].qh); @@ -3066,7 +3066,7 @@ void ggml_vec_dot_q5_0_q8_0(const int n, float * restrict s, const void * restri int sumi = __riscv_vmv_x_s_i32m1_i32(vs2); - sumf += (ggml_fp16_to_fp32(x[i].d)*ggml_fp16_to_fp32(y[i].d)) * sumi; + sumf += (GGML_FP16_TO_FP32(x[i].d)*GGML_FP16_TO_FP32(y[i].d)) * sumi; } *s = sumf; @@ -3090,7 +3090,7 @@ void ggml_vec_dot_q5_0_q8_0(const int n, float * restrict s, const void * restri sumi += (x0 * y[i].qs[j]) + (x1 * y[i].qs[j + qk/2]); } - sumf += (ggml_fp16_to_fp32(x[i].d)*ggml_fp16_to_fp32(y[i].d)) * sumi; + sumf += (GGML_FP16_TO_FP32(x[i].d)*GGML_FP16_TO_FP32(y[i].d)) * sumi; } *s = sumf; @@ -3130,8 +3130,8 @@ void ggml_vec_dot_q5_1_q8_1(const int n, float * restrict s, const void * restri const uint8x16_t m4b = vdupq_n_u8(0x0F); - summs0 += ggml_fp16_to_fp32(x0->m) * y0->s; - summs1 += ggml_fp16_to_fp32(x1->m) * y1->s; + summs0 += GGML_FP16_TO_FP32(x0->m) * y0->s; + summs1 += GGML_FP16_TO_FP32(x1->m) * y1->s; // extract the 5th bit via lookup table ((b) << 4) memcpy(&qh0, x0->qh, sizeof(qh0)); @@ -3176,10 +3176,10 @@ void ggml_vec_dot_q5_1_q8_1(const int n, float * restrict s, const void * restri #if defined(__ARM_FEATURE_DOTPROD) sumv0 = vmlaq_n_f32(sumv0, vcvtq_f32_s32(vaddq_s32( vdotq_s32(vdupq_n_s32(0), v0_0lf, v1_0l), - vdotq_s32(vdupq_n_s32(0), v0_0hf, v1_0h))), ggml_fp16_to_fp32(x0->d)*y0->d); + vdotq_s32(vdupq_n_s32(0), v0_0hf, v1_0h))), GGML_FP16_TO_FP32(x0->d)*y0->d); sumv1 = vmlaq_n_f32(sumv1, vcvtq_f32_s32(vaddq_s32( vdotq_s32(vdupq_n_s32(0), v0_1lf, v1_1l), - vdotq_s32(vdupq_n_s32(0), v0_1hf, v1_1h))), ggml_fp16_to_fp32(x1->d)*y1->d); + vdotq_s32(vdupq_n_s32(0), v0_1hf, v1_1h))), GGML_FP16_TO_FP32(x1->d)*y1->d); #else const int16x8_t pl0l = vmull_s8(vget_low_s8 (v0_0lf), vget_low_s8 (v1_0l)); const int16x8_t pl0h = vmull_s8(vget_high_s8(v0_0lf), vget_high_s8(v1_0l)); @@ -3196,8 +3196,8 @@ void ggml_vec_dot_q5_1_q8_1(const int n, float * restrict s, const void * restri const int32x4_t pl1 = vaddq_s32(vpaddlq_s16(pl1l), vpaddlq_s16(pl1h)); const int32x4_t ph1 = vaddq_s32(vpaddlq_s16(ph1l), vpaddlq_s16(ph1h)); - sumv0 = vmlaq_n_f32(sumv0, vcvtq_f32_s32(vaddq_s32(pl0, ph0)), ggml_fp16_to_fp32(x0->d)*y0->d); - sumv1 = vmlaq_n_f32(sumv1, vcvtq_f32_s32(vaddq_s32(pl1, ph1)), ggml_fp16_to_fp32(x1->d)*y1->d); + sumv0 = vmlaq_n_f32(sumv0, vcvtq_f32_s32(vaddq_s32(pl0, ph0)), GGML_FP16_TO_FP32(x0->d)*y0->d); + sumv1 = vmlaq_n_f32(sumv1, vcvtq_f32_s32(vaddq_s32(pl1, ph1)), GGML_FP16_TO_FP32(x1->d)*y1->d); #endif } @@ -3215,7 +3215,7 @@ void ggml_vec_dot_q5_1_q8_1(const int n, float * restrict s, const void * restri const block_q5_1 * restrict x0 = &x[i]; const block_q8_1 * restrict y0 = &y[i]; - summs += ggml_fp16_to_fp32(x0->m) * y0->s; + summs += GGML_FP16_TO_FP32(x0->m) * y0->s; const v128_t m4b = wasm_i8x16_splat(0x0F); @@ -3262,7 +3262,7 @@ void ggml_vec_dot_q5_1_q8_1(const int n, float * restrict s, const void * restri wasm_i32x4_dot_i16x8(v0lfh, v1lh)), wasm_i32x4_add(wasm_i32x4_dot_i16x8(v0hfl, v1hl), wasm_i32x4_dot_i16x8(v0hfh, v1hh)))), - wasm_f32x4_splat(ggml_fp16_to_fp32(x0->d) * y0->d))); + wasm_f32x4_splat(GGML_FP16_TO_FP32(x0->d) * y0->d))); } *s = wasm_f32x4_extract_lane(sumv, 0) + wasm_f32x4_extract_lane(sumv, 1) + @@ -3275,9 +3275,9 @@ void ggml_vec_dot_q5_1_q8_1(const int n, float * restrict s, const void * restri // Main loop for (int i = 0; i < nb; i++) { - const __m256 dx = _mm256_set1_ps(ggml_fp16_to_fp32(x[i].d)); + const __m256 dx = _mm256_set1_ps(GGML_FP16_TO_FP32(x[i].d)); - summs += ggml_fp16_to_fp32(x[i].m) * y[i].s; + summs += GGML_FP16_TO_FP32(x[i].m) * y[i].s; __m256i bx = bytes_from_nibbles_32(x[i].qs); __m256i bxhi = bytes_from_bits_32(x[i].qh); @@ -3302,9 +3302,9 @@ void ggml_vec_dot_q5_1_q8_1(const int n, float * restrict s, const void * restri // Main loop for (int i = 0; i < nb; i++) { - const __m256 dx = _mm256_set1_ps(ggml_fp16_to_fp32(x[i].d)); + const __m256 dx = _mm256_set1_ps(GGML_FP16_TO_FP32(x[i].d)); - summs += ggml_fp16_to_fp32(x[i].m) * y[i].s; + summs += GGML_FP16_TO_FP32(x[i].m) * y[i].s; __m256i bx = bytes_from_nibbles_32(x[i].qs); const __m256i bxhi = bytes_from_bits_32(x[i].qh); @@ -3385,7 +3385,7 @@ void ggml_vec_dot_q5_1_q8_1(const int n, float * restrict s, const void * restri int sumi = __riscv_vmv_x_s_i32m1_i32(vs2); - sumf += (ggml_fp16_to_fp32(x[i].d)*y[i].d)*sumi + ggml_fp16_to_fp32(x[i].m)*y[i].s; + sumf += (GGML_FP16_TO_FP32(x[i].d)*y[i].d)*sumi + GGML_FP16_TO_FP32(x[i].m)*y[i].s; } *s = sumf; @@ -3409,7 +3409,7 @@ void ggml_vec_dot_q5_1_q8_1(const int n, float * restrict s, const void * restri sumi += (x0 * y[i].qs[j]) + (x1 * y[i].qs[j + qk/2]); } - sumf += (ggml_fp16_to_fp32(x[i].d)*y[i].d)*sumi + ggml_fp16_to_fp32(x[i].m)*y[i].s; + sumf += (GGML_FP16_TO_FP32(x[i].d)*y[i].d)*sumi + GGML_FP16_TO_FP32(x[i].m)*y[i].s; } *s = sumf; @@ -3451,11 +3451,11 @@ void ggml_vec_dot_q8_0_q8_0(const int n, float * restrict s, const void * restri #if defined(__ARM_FEATURE_DOTPROD) sumv0 = vmlaq_n_f32(sumv0, vcvtq_f32_s32(vaddq_s32( vdotq_s32(vdupq_n_s32(0), x0_0, y0_0), - vdotq_s32(vdupq_n_s32(0), x0_1, y0_1))), ggml_fp16_to_fp32(x0->d)*ggml_fp16_to_fp32(y0->d)); + vdotq_s32(vdupq_n_s32(0), x0_1, y0_1))), GGML_FP16_TO_FP32(x0->d)*GGML_FP16_TO_FP32(y0->d)); sumv1 = vmlaq_n_f32(sumv1, vcvtq_f32_s32(vaddq_s32( vdotq_s32(vdupq_n_s32(0), x1_0, y1_0), - vdotq_s32(vdupq_n_s32(0), x1_1, y1_1))), ggml_fp16_to_fp32(x1->d)*ggml_fp16_to_fp32(y1->d)); + vdotq_s32(vdupq_n_s32(0), x1_1, y1_1))), GGML_FP16_TO_FP32(x1->d)*GGML_FP16_TO_FP32(y1->d)); #else const int16x8_t p0_0 = vmull_s8(vget_low_s8 (x0_0), vget_low_s8 (y0_0)); @@ -3473,8 +3473,8 @@ void ggml_vec_dot_q8_0_q8_0(const int n, float * restrict s, const void * restri const int32x4_t p2 = vaddq_s32(vpaddlq_s16(p1_0), vpaddlq_s16(p1_1)); const int32x4_t p3 = vaddq_s32(vpaddlq_s16(p1_2), vpaddlq_s16(p1_3)); - sumv0 = vmlaq_n_f32(sumv0, vcvtq_f32_s32(vaddq_s32(p0, p1)), ggml_fp16_to_fp32(x0->d)*ggml_fp16_to_fp32(y0->d)); - sumv1 = vmlaq_n_f32(sumv1, vcvtq_f32_s32(vaddq_s32(p2, p3)), ggml_fp16_to_fp32(x1->d)*ggml_fp16_to_fp32(y1->d)); + sumv0 = vmlaq_n_f32(sumv0, vcvtq_f32_s32(vaddq_s32(p0, p1)), GGML_FP16_TO_FP32(x0->d)*GGML_FP16_TO_FP32(y0->d)); + sumv1 = vmlaq_n_f32(sumv1, vcvtq_f32_s32(vaddq_s32(p2, p3)), GGML_FP16_TO_FP32(x1->d)*GGML_FP16_TO_FP32(y1->d)); #endif } @@ -3486,7 +3486,7 @@ void ggml_vec_dot_q8_0_q8_0(const int n, float * restrict s, const void * restri // Main loop for (int i = 0; i < nb; ++i) { // Compute combined scale for the block - const __m256 d = _mm256_set1_ps(ggml_fp16_to_fp32(x[i].d) * ggml_fp16_to_fp32(y[i].d)); + const __m256 d = _mm256_set1_ps(GGML_FP16_TO_FP32(x[i].d) * GGML_FP16_TO_FP32(y[i].d)); __m256i bx = _mm256_loadu_si256((const __m256i *)x[i].qs); __m256i by = _mm256_loadu_si256((const __m256i *)y[i].qs); @@ -3517,7 +3517,7 @@ void ggml_vec_dot_q8_0_q8_0(const int n, float * restrict s, const void * restri int sumi = __riscv_vmv_x_s_i32m1_i32(v_sum); - sumf += sumi*(ggml_fp16_to_fp32(x[i].d)*ggml_fp16_to_fp32(y[i].d)); + sumf += sumi*(GGML_FP16_TO_FP32(x[i].d)*GGML_FP16_TO_FP32(y[i].d)); } *s = sumf; @@ -3532,7 +3532,7 @@ void ggml_vec_dot_q8_0_q8_0(const int n, float * restrict s, const void * restri sumi += x[i].qs[j]*y[i].qs[j]; } - sumf += sumi*(ggml_fp16_to_fp32(x[i].d)*ggml_fp16_to_fp32(y[i].d)); + sumf += sumi*(GGML_FP16_TO_FP32(x[i].d)*GGML_FP16_TO_FP32(y[i].d)); } *s = sumf; @@ -3562,8 +3562,8 @@ void ggml_vec_dot_q2_K_q8_K(const int n, float * restrict s, const void * restri for (int i = 0; i < nb; ++i) { - const float d = y[i].d * ggml_fp16_to_fp32(x[i].d); - const float dmin = -y[i].d * ggml_fp16_to_fp32(x[i].dmin); + const float d = y[i].d * GGML_FP16_TO_FP32(x[i].d); + const float dmin = -y[i].d * GGML_FP16_TO_FP32(x[i].dmin); const uint8_t * restrict q2 = x[i].qs; const int8_t * restrict q8 = y[i].qs; @@ -3641,8 +3641,8 @@ void ggml_vec_dot_q2_K_q8_K(const int n, float * restrict s, const void * restri for (int i = 0; i < nb; ++i) { - const float d = y[i].d * ggml_fp16_to_fp32(x[i].d); - const float dmin = -y[i].d * ggml_fp16_to_fp32(x[i].dmin); + const float d = y[i].d * GGML_FP16_TO_FP32(x[i].d); + const float dmin = -y[i].d * GGML_FP16_TO_FP32(x[i].dmin); const uint8_t * restrict q2 = x[i].qs; const int8_t * restrict q8 = y[i].qs; @@ -3708,8 +3708,8 @@ void ggml_vec_dot_q2_K_q8_K(const int n, float * restrict s, const void * restri for (int i = 0; i < nb; ++i) { - const float dall = y[i].d * ggml_fp16_to_fp32(x[i].d); - const float dmin = -y[i].d * ggml_fp16_to_fp32(x[i].dmin); + const float dall = y[i].d * GGML_FP16_TO_FP32(x[i].d); + const float dmin = -y[i].d * GGML_FP16_TO_FP32(x[i].dmin); const uint8_t * restrict q2 = x[i].qs; const int8_t * restrict q8 = y[i].qs; @@ -3816,8 +3816,8 @@ void ggml_vec_dot_q2_K_q8_K(const int n, float * restrict s, const void * restri const int8_t * q8 = y[i].qs; const uint8_t * sc = x[i].scales; - const float dall = y[i].d * ggml_fp16_to_fp32(x[i].d); - const float dmin = -y[i].d * ggml_fp16_to_fp32(x[i].dmin); + const float dall = y[i].d * GGML_FP16_TO_FP32(x[i].d); + const float dmin = -y[i].d * GGML_FP16_TO_FP32(x[i].dmin); size_t vl = 16; @@ -3903,8 +3903,8 @@ void ggml_vec_dot_q2_K_q8_K(const int n, float * restrict s, const void * restri summs += y[i].bsums[j] * (sc[j] >> 4); } - const float dall = y[i].d * ggml_fp16_to_fp32(x[i].d); - const float dmin = y[i].d * ggml_fp16_to_fp32(x[i].dmin); + const float dall = y[i].d * GGML_FP16_TO_FP32(x[i].d); + const float dmin = y[i].d * GGML_FP16_TO_FP32(x[i].dmin); int isum = 0; int is = 0; @@ -4021,8 +4021,8 @@ void ggml_vec_dot_q2_K_q8_K(const int n, float * restrict s, const void * restri for (int i = 0; i < nb; ++i) { - const float d = y[i].d * ggml_fp16_to_fp32(x[i].d); - const float dmin = -y[i].d * ggml_fp16_to_fp32(x[i].dmin); + const float d = y[i].d * GGML_FP16_TO_FP32(x[i].d); + const float dmin = -y[i].d * GGML_FP16_TO_FP32(x[i].dmin); const uint8_t * restrict q2 = x[i].qs; const int8_t * restrict q8 = y[i].qs; @@ -4073,8 +4073,8 @@ void ggml_vec_dot_q2_K_q8_K(const int n, float * restrict s, const void * restri for (int i = 0; i < nb; ++i) { - const float d = y[i].d * ggml_fp16_to_fp32(x[i].d); - const float dmin = -y[i].d * ggml_fp16_to_fp32(x[i].dmin); + const float d = y[i].d * GGML_FP16_TO_FP32(x[i].d); + const float dmin = -y[i].d * GGML_FP16_TO_FP32(x[i].dmin); const uint8_t * restrict q2 = x[i].qs; const int8_t * restrict q8 = y[i].qs; @@ -4188,8 +4188,8 @@ void ggml_vec_dot_q2_K_q8_K(const int n, float * restrict s, const void * restri summs += y[i].bsums[j] * (sc[j] >> 4); } - const float dall = y[i].d * ggml_fp16_to_fp32(x[i].d); - const float dmin = y[i].d * ggml_fp16_to_fp32(x[i].dmin); + const float dall = y[i].d * GGML_FP16_TO_FP32(x[i].d); + const float dmin = y[i].d * GGML_FP16_TO_FP32(x[i].dmin); isum[0] = isum[1] = isum[2] = isum[3] = 0; for (int l = 0; l < 16; ++l) { @@ -4242,7 +4242,7 @@ void ggml_vec_dot_q3_K_q8_K(const int n, float * restrict s, const void * restri for (int i = 0; i < nb; ++i) { - const float d = y[i].d * ggml_fp16_to_fp32(x[i].d); + const float d = y[i].d * GGML_FP16_TO_FP32(x[i].d); const uint8_t * restrict q3 = x[i].qs; const uint8_t * restrict qh = x[i].hmask; @@ -4350,7 +4350,7 @@ void ggml_vec_dot_q3_K_q8_K(const int n, float * restrict s, const void * restri for (int i = 0; i < nb; ++i) { - const float d = y[i].d * ggml_fp16_to_fp32(x[i].d); + const float d = y[i].d * GGML_FP16_TO_FP32(x[i].d); const uint8_t * restrict q3 = x[i].qs; const int8_t * restrict q8 = y[i].qs; @@ -4455,7 +4455,7 @@ void ggml_vec_dot_q3_K_q8_K(const int n, float * restrict s, const void * restri for (int i = 0; i < nb; ++i) { - const float d = y[i].d * ggml_fp16_to_fp32(x[i].d); + const float d = y[i].d * GGML_FP16_TO_FP32(x[i].d); const uint8_t * restrict q3 = x[i].qs; const int8_t * restrict q8 = y[i].qs; @@ -4676,7 +4676,7 @@ void ggml_vec_dot_q3_K_q8_K(const int n, float * restrict s, const void * restri } - const float d = ggml_fp16_to_fp32(x[i].d) * y[i].d; + const float d = GGML_FP16_TO_FP32(x[i].d) * y[i].d; sumf += d*sum_t; @@ -4741,7 +4741,7 @@ void ggml_vec_dot_q3_K_q8_K(const int n, float * restrict s, const void * restri for (int l = 0; l < 8; ++l) aux32[l] += (scales[j] - 32) * aux16[l]; q8 += 8; a += 8; } - const float d = ggml_fp16_to_fp32(x[i].d) * y[i].d; + const float d = GGML_FP16_TO_FP32(x[i].d) * y[i].d; for (int l = 0; l < 8; ++l) sums[l] += d * aux32[l]; } for (int l = 0; l < 8; ++l) sumf += sums[l]; @@ -4843,7 +4843,7 @@ void ggml_vec_dot_q3_K_q8_K(const int n, float * restrict s, const void * restri for (int i = 0; i < nb; ++i) { - const float d = y[i].d * ggml_fp16_to_fp32(x[i].d); + const float d = y[i].d * GGML_FP16_TO_FP32(x[i].d); const uint8_t * restrict q3 = x[i].qs; const int8_t * restrict q8 = y[i].qs; @@ -4914,7 +4914,7 @@ void ggml_vec_dot_q3_K_q8_K(const int n, float * restrict s, const void * restri for (int i = 0; i < nb; ++i) { - const float d = y[i].d * ggml_fp16_to_fp32(x[i].d); + const float d = y[i].d * GGML_FP16_TO_FP32(x[i].d); const uint8_t * restrict q3 = x[i].qs; const int8_t * restrict q8 = y[i].qs; @@ -5099,7 +5099,7 @@ void ggml_vec_dot_q3_K_q8_K(const int n, float * restrict s, const void * restri q8 += 8; a += 8; for (int l = 0; l < 8; ++l) aux32[l] += scales[j] * aux16[l]; } - const float d = ggml_fp16_to_fp32(x[i].d) * y[i].d; + const float d = GGML_FP16_TO_FP32(x[i].d) * y[i].d; for (int l = 0; l < 8; ++l) sums[l] += d * aux32[l]; } for (int l = 0; l < 8; ++l) sumf += sums[l]; @@ -5139,8 +5139,8 @@ void ggml_vec_dot_q4_K_q8_K(const int n, float * restrict s, const void * restri for (int i = 0; i < nb; ++i) { - const float d = y[i].d * ggml_fp16_to_fp32(x[i].d); - const float dmin = y[i].d * ggml_fp16_to_fp32(x[i].dmin); + const float d = y[i].d * GGML_FP16_TO_FP32(x[i].d); + const float dmin = y[i].d * GGML_FP16_TO_FP32(x[i].dmin); const int16x8_t q8sums = vpaddq_s16(vld1q_s16(y[i].bsums), vld1q_s16(y[i].bsums + 8)); @@ -5222,8 +5222,8 @@ void ggml_vec_dot_q4_K_q8_K(const int n, float * restrict s, const void * restri for (int i = 0; i < nb; ++i) { - const float d = y[i].d * ggml_fp16_to_fp32(x[i].d); - const float dmin = -y[i].d * ggml_fp16_to_fp32(x[i].dmin); + const float d = y[i].d * GGML_FP16_TO_FP32(x[i].d); + const float dmin = -y[i].d * GGML_FP16_TO_FP32(x[i].dmin); memcpy(utmp, x[i].scales, 12); utmp[3] = ((utmp[2] >> 4) & kmask2) | (((utmp[1] >> 6) & kmask3) << 4); @@ -5288,8 +5288,8 @@ void ggml_vec_dot_q4_K_q8_K(const int n, float * restrict s, const void * restri for (int i = 0; i < nb; ++i) { - const float d = y[i].d * ggml_fp16_to_fp32(x[i].d); - const float dmin = -y[i].d * ggml_fp16_to_fp32(x[i].dmin); + const float d = y[i].d * GGML_FP16_TO_FP32(x[i].d); + const float dmin = -y[i].d * GGML_FP16_TO_FP32(x[i].dmin); const uint8_t * restrict q4 = x[i].qs; const int8_t * restrict q8 = y[i].qs; @@ -5371,8 +5371,8 @@ void ggml_vec_dot_q4_K_q8_K(const int n, float * restrict s, const void * restri size_t vl = 8; - const float d = y[i].d * ggml_fp16_to_fp32(x[i].d); - const float dmin = y[i].d * ggml_fp16_to_fp32(x[i].dmin); + const float d = y[i].d * GGML_FP16_TO_FP32(x[i].d); + const float dmin = y[i].d * GGML_FP16_TO_FP32(x[i].dmin); vint16mf2_t q8sums_0 = __riscv_vlse16_v_i16mf2(y[i].bsums, 4, vl); vint16mf2_t q8sums_1 = __riscv_vlse16_v_i16mf2(y[i].bsums+1, 4, vl); @@ -5482,9 +5482,9 @@ void ggml_vec_dot_q4_K_q8_K(const int n, float * restrict s, const void * restri for (int l = 0; l < 8; ++l) aux32[l] += scale * aux16[l]; q8 += 8; a += 8; } - const float d = ggml_fp16_to_fp32(x[i].d) * y[i].d; + const float d = GGML_FP16_TO_FP32(x[i].d) * y[i].d; for (int l = 0; l < 8; ++l) sums[l] += d * aux32[l]; - const float dmin = ggml_fp16_to_fp32(x[i].dmin) * y[i].d; + const float dmin = GGML_FP16_TO_FP32(x[i].dmin) * y[i].d; sumf -= dmin * sumi; } for (int l = 0; l < 8; ++l) sumf += sums[l]; @@ -5586,8 +5586,8 @@ void ggml_vec_dot_q4_K_q8_K(const int n, float * restrict s, const void * restri for (int i = 0; i < nb; ++i) { - const float d = ggml_fp16_to_fp32(x[i].d[0]) * y[i].d; - const float m = ggml_fp16_to_fp32(x[i].d[1]) * y[i].d; + const float d = GGML_FP16_TO_FP32(x[i].d[0]) * y[i].d; + const float m = GGML_FP16_TO_FP32(x[i].d[1]) * y[i].d; const __m256 vd = _mm256_set1_ps(d); const uint16_t * a = (const uint16_t *)x[i].scales; @@ -5632,8 +5632,8 @@ void ggml_vec_dot_q4_K_q8_K(const int n, float * restrict s, const void * restri for (int i = 0; i < nb; ++i) { - const float d = ggml_fp16_to_fp32(x[i].d[0]) * y[i].d; - const float m = ggml_fp16_to_fp32(x[i].d[1]) * y[i].d; + const float d = GGML_FP16_TO_FP32(x[i].d[0]) * y[i].d; + const float m = GGML_FP16_TO_FP32(x[i].d[1]) * y[i].d; const __m256 vd = _mm256_set1_ps(d); const uint16_t * a = (const uint16_t *)x[i].scales; @@ -5689,8 +5689,8 @@ void ggml_vec_dot_q4_K_q8_K(const int n, float * restrict s, const void * restri s16[0] = b[0] & 0x0f0f; s16[1] = (b[0] >> 4) & 0x0f0f; - sumf -= y[i].d * ggml_fp16_to_fp32(x[i].d[1]) * (scales[2] * (y[i].bsums[0] + y[i].bsums[1]) + scales[3] * (y[i].bsums[2] + y[i].bsums[3])); - const float d = y[i].d * ggml_fp16_to_fp32(x[i].d[0]); + sumf -= y[i].d * GGML_FP16_TO_FP32(x[i].d[1]) * (scales[2] * (y[i].bsums[0] + y[i].bsums[1]) + scales[3] * (y[i].bsums[2] + y[i].bsums[3])); + const float d = y[i].d * GGML_FP16_TO_FP32(x[i].d[0]); size_t vl = 32; @@ -5739,9 +5739,9 @@ void ggml_vec_dot_q4_K_q8_K(const int n, float * restrict s, const void * restri s16[0] = b[0] & 0x0f0f; s16[1] = (b[0] >> 4) & 0x0f0f; - sumf -= y[i].d * ggml_fp16_to_fp32(x[i].d[1]) * (scales[2] * (y[i].bsums[0] + y[i].bsums[1]) + scales[3] * (y[i].bsums[2] + y[i].bsums[3])); + sumf -= y[i].d * GGML_FP16_TO_FP32(x[i].d[1]) * (scales[2] * (y[i].bsums[0] + y[i].bsums[1]) + scales[3] * (y[i].bsums[2] + y[i].bsums[3])); - const float d = y[i].d * ggml_fp16_to_fp32(x[i].d[0]); + const float d = y[i].d * GGML_FP16_TO_FP32(x[i].d[0]); for (int j = 0; j < QK_K/32; ++j) { for (int l = 0; l < 16; ++l) aux16[l] = q8[l] * a[l]; @@ -5789,8 +5789,8 @@ void ggml_vec_dot_q5_K_q8_K(const int n, float * restrict s, const void * restri for (int i = 0; i < nb; ++i) { - const float d = y[i].d * ggml_fp16_to_fp32(x[i].d); - const float dmin = y[i].d * ggml_fp16_to_fp32(x[i].dmin); + const float d = y[i].d * GGML_FP16_TO_FP32(x[i].d); + const float dmin = y[i].d * GGML_FP16_TO_FP32(x[i].dmin); const int16x8_t q8sums = vpaddq_s16(vld1q_s16(y[i].bsums), vld1q_s16(y[i].bsums + 8)); @@ -5878,8 +5878,8 @@ void ggml_vec_dot_q5_K_q8_K(const int n, float * restrict s, const void * restri const int8_t * restrict q8 = y[i].qs; #if QK_K == 256 - const float d = y[i].d * ggml_fp16_to_fp32(x[i].d); - const float dmin = -y[i].d * ggml_fp16_to_fp32(x[i].dmin); + const float d = y[i].d * GGML_FP16_TO_FP32(x[i].d); + const float dmin = -y[i].d * GGML_FP16_TO_FP32(x[i].dmin); memcpy(utmp, x[i].scales, 12); utmp[3] = ((utmp[2] >> 4) & kmask2) | (((utmp[1] >> 6) & kmask3) << 4); @@ -5960,8 +5960,8 @@ void ggml_vec_dot_q5_K_q8_K(const int n, float * restrict s, const void * restri for (int i = 0; i < nb; ++i) { - const float d = y[i].d * ggml_fp16_to_fp32(x[i].d); - const float dmin = -y[i].d * ggml_fp16_to_fp32(x[i].dmin); + const float d = y[i].d * GGML_FP16_TO_FP32(x[i].d); + const float dmin = -y[i].d * GGML_FP16_TO_FP32(x[i].dmin); const uint8_t * restrict q5 = x[i].qs; const int8_t * restrict q8 = y[i].qs; @@ -6065,8 +6065,8 @@ void ggml_vec_dot_q5_K_q8_K(const int n, float * restrict s, const void * restri const uint8_t * restrict hm = x[i].qh; const int8_t * restrict q8 = y[i].qs; - const float d = ggml_fp16_to_fp32(x[i].d) * y[i].d; - const float dmin = ggml_fp16_to_fp32(x[i].dmin) * y[i].d; + const float d = GGML_FP16_TO_FP32(x[i].d) * y[i].d; + const float dmin = GGML_FP16_TO_FP32(x[i].dmin) * y[i].d; vint16mf2_t q8sums_0 = __riscv_vlse16_v_i16mf2(y[i].bsums, 4, vl); vint16mf2_t q8sums_1 = __riscv_vlse16_v_i16mf2(y[i].bsums+1, 4, vl); @@ -6188,9 +6188,9 @@ void ggml_vec_dot_q5_K_q8_K(const int n, float * restrict s, const void * restri for (int l = 0; l < 8; ++l) aux32[l] += scale * aux16[l]; q8 += 8; a += 8; } - const float d = ggml_fp16_to_fp32(x[i].d) * y[i].d; + const float d = GGML_FP16_TO_FP32(x[i].d) * y[i].d; for (int l = 0; l < 8; ++l) sums[l] += d * aux32[l]; - const float dmin = ggml_fp16_to_fp32(x[i].dmin) * y[i].d; + const float dmin = GGML_FP16_TO_FP32(x[i].dmin) * y[i].d; sumf -= dmin * sumi; } for (int l = 0; l < 8; ++l) sumf += sums[l]; @@ -6288,7 +6288,7 @@ void ggml_vec_dot_q5_K_q8_K(const int n, float * restrict s, const void * restri const uint8_t * restrict q5 = x[i].qs; const int8_t * restrict q8 = y[i].qs; - const float d = y[i].d * ggml_fp16_to_fp32(x[i].d); + const float d = y[i].d * GGML_FP16_TO_FP32(x[i].d); const __m256i q5bits = _mm256_loadu_si256((const __m256i*)q5); @@ -6334,7 +6334,7 @@ void ggml_vec_dot_q5_K_q8_K(const int n, float * restrict s, const void * restri const uint8_t * restrict q5 = x[i].qs; const int8_t * restrict q8 = y[i].qs; - const float d = y[i].d * ggml_fp16_to_fp32(x[i].d); + const float d = y[i].d * GGML_FP16_TO_FP32(x[i].d); const __m256i q5bits = _mm256_loadu_si256((const __m256i*)q5); @@ -6471,7 +6471,7 @@ void ggml_vec_dot_q5_K_q8_K(const int n, float * restrict s, const void * restri for (int l = 0; l < 8; ++l) a[8*is + l] -= (hm[l] & m ? 0 : 16); } - const float d = y[i].d * ggml_fp16_to_fp32(x[i].d); + const float d = y[i].d * GGML_FP16_TO_FP32(x[i].d); const int8_t * restrict sc = x[i].scales; for (int j = 0; j < QK_K/16; ++j) { @@ -6514,7 +6514,7 @@ void ggml_vec_dot_q6_K_q8_K(const int n, float * restrict s, const void * restri for (int i = 0; i < nb; ++i) { - const float d_all = ggml_fp16_to_fp32(x[i].d); + const float d_all = GGML_FP16_TO_FP32(x[i].d); const uint8_t * restrict q6 = x[i].ql; const uint8_t * restrict qh = x[i].qh; @@ -6646,7 +6646,7 @@ void ggml_vec_dot_q6_K_q8_K(const int n, float * restrict s, const void * restri for (int i = 0; i < nb; ++i) { - const float d = y[i].d * ggml_fp16_to_fp32(x[i].d); + const float d = y[i].d * GGML_FP16_TO_FP32(x[i].d); const uint8_t * restrict q4 = x[i].ql; const uint8_t * restrict qh = x[i].qh; @@ -6726,7 +6726,7 @@ void ggml_vec_dot_q6_K_q8_K(const int n, float * restrict s, const void * restri for (int i = 0; i < nb; ++i) { - const float d = y[i].d * ggml_fp16_to_fp32(x[i].d); + const float d = y[i].d * GGML_FP16_TO_FP32(x[i].d); const uint8_t * restrict q4 = x[i].ql; const uint8_t * restrict qh = x[i].qh; @@ -6838,7 +6838,7 @@ void ggml_vec_dot_q6_K_q8_K(const int n, float * restrict s, const void * restri float sumf = 0; for (int i = 0; i < nb; ++i) { - const float d = ggml_fp16_to_fp32(x[i].d) * y[i].d; + const float d = GGML_FP16_TO_FP32(x[i].d) * y[i].d; const uint8_t * restrict q6 = x[i].ql; const uint8_t * restrict qh = x[i].qh; @@ -6955,7 +6955,7 @@ void ggml_vec_dot_q6_K_q8_K(const int n, float * restrict s, const void * restri for (int l = 0; l < 8; ++l) aux32[l] += scale * aux16[l]; q8 += 8; a += 8; } - const float d = ggml_fp16_to_fp32(x[i].d) * y[i].d; + const float d = GGML_FP16_TO_FP32(x[i].d) * y[i].d; for (int l = 0; l < 8; ++l) sums[l] += d * aux32[l]; } for (int l = 0; l < 8; ++l) sumf += sums[l]; @@ -7053,7 +7053,7 @@ void ggml_vec_dot_q6_K_q8_K(const int n, float * restrict s, const void * restri for (int i = 0; i < nb; ++i) { - const float d = y[i].d * ggml_fp16_to_fp32(x[i].d); + const float d = y[i].d * GGML_FP16_TO_FP32(x[i].d); const uint8_t * restrict q4 = x[i].ql; const uint8_t * restrict qh = x[i].qh; @@ -7110,7 +7110,7 @@ void ggml_vec_dot_q6_K_q8_K(const int n, float * restrict s, const void * restri for (int i = 0; i < nb; ++i) { - const float d = y[i].d * ggml_fp16_to_fp32(x[i].d); + const float d = y[i].d * GGML_FP16_TO_FP32(x[i].d); const uint8_t * restrict q4 = x[i].ql; const uint8_t * restrict qh = x[i].qh; @@ -7269,7 +7269,7 @@ void ggml_vec_dot_q6_K_q8_K(const int n, float * restrict s, const void * restri for (int l = 0; l < 8; ++l) aux32[l] += scale * aux16[l]; q8 += 8; a += 8; } - const float d = ggml_fp16_to_fp32(x[i].d) * y[i].d; + const float d = GGML_FP16_TO_FP32(x[i].d) * y[i].d; for (int l = 0; l < 8; ++l) sums[l] += d * aux32[l]; } for (int l = 0; l < 8; ++l) sumf += sums[l]; diff --git a/ggml-quants.h b/ggml-quants.h index d88f99e331f1dd..70c12c27465e80 100644 --- a/ggml-quants.h +++ b/ggml-quants.h @@ -1,22 +1,12 @@ #pragma once -// This is a private API for quantization and dequantization -// Should not be used directly, use ggml.h instead +#include "ggml-impl.h" -#include "ggml.h" +// GGML internal header #include -#include #include -#ifndef static_assert -#if defined(__STDC_VERSION__) && (__STDC_VERSION__ >= 201100L) -#define static_assert(cond, msg) _Static_assert(cond, msg) -#else -#define static_assert(cond, msg) struct global_scope_noop_trick -#endif -#endif - #define QK4_0 32 typedef struct { ggml_fp16_t d; // delta diff --git a/ggml.c b/ggml.c index 95f72c35e8f205..84407b1224226f 100644 --- a/ggml.c +++ b/ggml.c @@ -1,6 +1,6 @@ #define _CRT_SECURE_NO_DEPRECATE // Disables ridiculous "unsafe" warnigns on Windows -#include "ggml.h" +#include "ggml-impl.h" #include "ggml-quants.h" #if defined(_MSC_VER) || defined(__MINGW32__) @@ -27,18 +27,6 @@ #include #endif -// static_assert should be a #define, but if it's not, -// fall back to the _Static_assert C11 keyword. -// if C99 - static_assert is noop -// ref: https://stackoverflow.com/a/53923785/4039976 -#ifndef static_assert -#if defined(__STDC_VERSION__) && (__STDC_VERSION__ >= 201100L) -#define static_assert(cond, msg) _Static_assert(cond, msg) -#else -#define static_assert(cond, msg) struct global_scope_noop_trick -#endif -#endif - #if defined(_MSC_VER) // disable "possible loss of data" to avoid hundreds of casts // we should just be careful :) @@ -106,23 +94,11 @@ typedef void * thread_ret_t; #include #endif + #ifdef GGML_USE_CPU_HBM #include #endif -// __FMA__ and __F16C__ are not defined in MSVC, however they are implied with AVX2/AVX512 -#if defined(_MSC_VER) && (defined(__AVX2__) || defined(__AVX512F__)) -#ifndef __FMA__ -#define __FMA__ -#endif -#ifndef __F16C__ -#define __F16C__ -#endif -#ifndef __SSE3__ -#define __SSE3__ -#endif -#endif - /*#define GGML_PERF*/ #define GGML_DEBUG 0 #define GGML_GELU_FP16 @@ -248,213 +224,27 @@ inline static void * ggml_aligned_malloc(size_t size) { #include "ggml-opencl.h" #endif -#undef MIN -#undef MAX -#define MIN(a, b) ((a) < (b) ? (a) : (b)) -#define MAX(a, b) ((a) > (b) ? (a) : (b)) - // floating point type used to accumulate sums typedef double ggml_float; -// 16-bit float -// on Arm, we use __fp16 -// on x86, we use uint16_t -#if defined(__ARM_NEON) && !defined(_MSC_VER) - -// if YCM cannot find , make a symbolic link to it, for example: -// -// $ ln -sfn /Library/Developer/CommandLineTools/usr/lib/clang/13.1.6/include/arm_neon.h ./src/ -// -#include - -#define GGML_COMPUTE_FP16_TO_FP32(x) ((float) (x)) -#define GGML_COMPUTE_FP32_TO_FP16(x) (x) - -#define GGML_FP16_TO_FP32(x) ((float) (x)) -#define GGML_FP32_TO_FP16(x) (x) - -#else - -#ifdef __wasm_simd128__ -#include -#else -#ifdef __POWER9_VECTOR__ -#include -#undef bool -#define bool _Bool -#else -#if defined(_MSC_VER) || defined(__MINGW32__) -#include -#else -#if defined(__AVX__) || defined(__AVX2__) || defined(__AVX512F__) || defined(__SSSE3__) || defined(__SSE3__) -#if !defined(__riscv) -#include -#endif -#endif -#endif -#endif -#endif - -#ifdef __riscv_v_intrinsic -#include -#endif - -#ifdef __F16C__ - -#ifdef _MSC_VER -#define GGML_COMPUTE_FP16_TO_FP32(x) _mm_cvtss_f32(_mm_cvtph_ps(_mm_cvtsi32_si128(x))) -#define GGML_COMPUTE_FP32_TO_FP16(x) _mm_extract_epi16(_mm_cvtps_ph(_mm_set_ss(x), 0), 0) -#else -#define GGML_COMPUTE_FP16_TO_FP32(x) _cvtsh_ss(x) -#define GGML_COMPUTE_FP32_TO_FP16(x) _cvtss_sh(x, 0) -#endif - -#elif defined(__POWER9_VECTOR__) - -#define GGML_COMPUTE_FP16_TO_FP32(x) ggml_compute_fp16_to_fp32(x) -#define GGML_COMPUTE_FP32_TO_FP16(x) ggml_compute_fp32_to_fp16(x) -/* the inline asm below is about 12% faster than the lookup method */ -#define GGML_FP16_TO_FP32(x) GGML_COMPUTE_FP16_TO_FP32(x) -#define GGML_FP32_TO_FP16(x) GGML_COMPUTE_FP32_TO_FP16(x) - -static inline float ggml_compute_fp16_to_fp32(ggml_fp16_t h) { - register float f; - register double d; - __asm__( - "mtfprd %0,%2\n" - "xscvhpdp %0,%0\n" - "frsp %1,%0\n" : - /* temp */ "=d"(d), - /* out */ "=f"(f): - /* in */ "r"(h)); - return f; -} - -static inline ggml_fp16_t ggml_compute_fp32_to_fp16(float f) { - register double d; - register ggml_fp16_t r; - __asm__( /* xscvdphp can work on double or single precision */ - "xscvdphp %0,%2\n" - "mffprd %1,%0\n" : - /* temp */ "=d"(d), - /* out */ "=r"(r): - /* in */ "f"(f)); - return r; -} - -#else - -// FP16 <-> FP32 -// ref: https://github.com/Maratyszcza/FP16 - -static inline float fp32_from_bits(uint32_t w) { - union { - uint32_t as_bits; - float as_value; - } fp32; - fp32.as_bits = w; - return fp32.as_value; -} - -static inline uint32_t fp32_to_bits(float f) { - union { - float as_value; - uint32_t as_bits; - } fp32; - fp32.as_value = f; - return fp32.as_bits; -} - -static inline float ggml_compute_fp16_to_fp32(ggml_fp16_t h) { - const uint32_t w = (uint32_t) h << 16; - const uint32_t sign = w & UINT32_C(0x80000000); - const uint32_t two_w = w + w; - - const uint32_t exp_offset = UINT32_C(0xE0) << 23; -#if defined(__STDC_VERSION__) && (__STDC_VERSION__ >= 199901L) || defined(__GNUC__) && !defined(__STRICT_ANSI__) - const float exp_scale = 0x1.0p-112f; -#else - const float exp_scale = fp32_from_bits(UINT32_C(0x7800000)); -#endif - const float normalized_value = fp32_from_bits((two_w >> 4) + exp_offset) * exp_scale; - - const uint32_t magic_mask = UINT32_C(126) << 23; - const float magic_bias = 0.5f; - const float denormalized_value = fp32_from_bits((two_w >> 17) | magic_mask) - magic_bias; - - const uint32_t denormalized_cutoff = UINT32_C(1) << 27; - const uint32_t result = sign | - (two_w < denormalized_cutoff ? fp32_to_bits(denormalized_value) : fp32_to_bits(normalized_value)); - return fp32_from_bits(result); -} - -static inline ggml_fp16_t ggml_compute_fp32_to_fp16(float f) { -#if defined(__STDC_VERSION__) && (__STDC_VERSION__ >= 199901L) || defined(__GNUC__) && !defined(__STRICT_ANSI__) - const float scale_to_inf = 0x1.0p+112f; - const float scale_to_zero = 0x1.0p-110f; -#else - const float scale_to_inf = fp32_from_bits(UINT32_C(0x77800000)); - const float scale_to_zero = fp32_from_bits(UINT32_C(0x08800000)); -#endif - float base = (fabsf(f) * scale_to_inf) * scale_to_zero; - - const uint32_t w = fp32_to_bits(f); - const uint32_t shl1_w = w + w; - const uint32_t sign = w & UINT32_C(0x80000000); - uint32_t bias = shl1_w & UINT32_C(0xFF000000); - if (bias < UINT32_C(0x71000000)) { - bias = UINT32_C(0x71000000); - } - - base = fp32_from_bits((bias >> 1) + UINT32_C(0x07800000)) + base; - const uint32_t bits = fp32_to_bits(base); - const uint32_t exp_bits = (bits >> 13) & UINT32_C(0x00007C00); - const uint32_t mantissa_bits = bits & UINT32_C(0x00000FFF); - const uint32_t nonsign = exp_bits + mantissa_bits; - return (sign >> 16) | (shl1_w > UINT32_C(0xFF000000) ? UINT16_C(0x7E00) : nonsign); -} - -#define GGML_COMPUTE_FP16_TO_FP32(x) ggml_compute_fp16_to_fp32(x) -#define GGML_COMPUTE_FP32_TO_FP16(x) ggml_compute_fp32_to_fp16(x) - -#endif // __F16C__ - -#endif // __ARM_NEON - // // global data // // precomputed gelu table for f16 (128 KB) -static ggml_fp16_t table_gelu_f16[1 << 16]; +static ggml_fp16_t ggml_table_gelu_f16[1 << 16]; // precomputed quick gelu table for f16 (128 KB) -static ggml_fp16_t table_gelu_quick_f16[1 << 16]; +static ggml_fp16_t ggml_table_gelu_quick_f16[1 << 16]; // precomputed silu table for f16 (128 KB) -static ggml_fp16_t table_silu_f16[1 << 16]; +static ggml_fp16_t ggml_table_silu_f16[1 << 16]; // precomputed exp table for f16 (128 KB) -static ggml_fp16_t table_exp_f16[1 << 16]; - -// precomputed f32 table for f16 (256 KB) -static float table_f32_f16[1 << 16]; +static ggml_fp16_t ggml_table_exp_f16[1 << 16]; -// On ARM NEON, it's quicker to directly convert x -> x instead of calling into ggml_lookup_fp16_to_fp32, -// so we define GGML_FP16_TO_FP32 and GGML_FP32_TO_FP16 elsewhere for NEON. -// This is also true for POWER9. -#if !defined(GGML_FP16_TO_FP32) || !defined(GGML_FP32_TO_FP16) - -inline static float ggml_lookup_fp16_to_fp32(ggml_fp16_t f) { - uint16_t s; - memcpy(&s, &f, sizeof(uint16_t)); - return table_f32_f16[s]; -} - -#define GGML_FP16_TO_FP32(x) ggml_lookup_fp16_to_fp32(x) -#define GGML_FP32_TO_FP16(x) GGML_COMPUTE_FP32_TO_FP16(x) - -#endif +// precomputed f32 table for f16 (256 KB) (ggml-impl.h) +float ggml_table_f32_f16[1 << 16]; // note: do not use these inside ggml.c // these are meant to be used via the ggml.h API @@ -632,6 +422,28 @@ static const ggml_type_traits_t type_traits[GGML_TYPE_COUNT] = { .vec_dot = ggml_vec_dot_q4_1_q8_1, .vec_dot_type = GGML_TYPE_Q8_1, }, + [4] = { // GGML_TYPE_Q4_2 + .type_name = "DEPRECATED", + .blck_size = 0, + .type_size = 0, + .is_quantized = false, + .to_float = NULL, + .from_float = NULL, + .from_float_reference = NULL, + .vec_dot = NULL, + .vec_dot_type = GGML_TYPE_COUNT, + }, + [5] = { // GGML_TYPE_Q4_3 + .type_name = "DEPRECATED", + .blck_size = 0, + .type_size = 0, + .is_quantized = false, + .to_float = NULL, + .from_float = NULL, + .from_float_reference = NULL, + .vec_dot = NULL, + .vec_dot_type = GGML_TYPE_COUNT, + }, [GGML_TYPE_Q5_0] = { .type_name = "q5_0", .blck_size = QK5_0, @@ -1551,7 +1363,7 @@ inline static float ggml_gelu_f32(float x) { inline static void ggml_vec_gelu_f16(const int n, ggml_fp16_t * y, const ggml_fp16_t * x) { const uint16_t * i16 = (const uint16_t *) x; for (int i = 0; i < n; ++i) { - y[i] = table_gelu_f16[i16[i]]; + y[i] = ggml_table_gelu_f16[i16[i]]; } } @@ -1561,7 +1373,7 @@ inline static void ggml_vec_gelu_f32(const int n, float * y, const float * x) { for (int i = 0; i < n; ++i) { ggml_fp16_t fp16 = GGML_FP32_TO_FP16(x[i]); memcpy(&t, &fp16, sizeof(uint16_t)); - y[i] = GGML_FP16_TO_FP32(table_gelu_f16[t]); + y[i] = GGML_FP16_TO_FP32(ggml_table_gelu_f16[t]); } } #else @@ -1579,7 +1391,7 @@ inline static float ggml_gelu_quick_f32(float x) { //inline static void ggml_vec_gelu_quick_f16(const int n, ggml_fp16_t * y, const ggml_fp16_t * x) { // const uint16_t * i16 = (const uint16_t *) x; // for (int i = 0; i < n; ++i) { -// y[i] = table_gelu_quick_f16[i16[i]]; +// y[i] = ggml_table_gelu_quick_f16[i16[i]]; // } //} @@ -1589,7 +1401,7 @@ inline static void ggml_vec_gelu_quick_f32(const int n, float * y, const float * for (int i = 0; i < n; ++i) { ggml_fp16_t fp16 = GGML_FP32_TO_FP16(x[i]); memcpy(&t, &fp16, sizeof(uint16_t)); - y[i] = GGML_FP16_TO_FP32(table_gelu_quick_f16[t]); + y[i] = GGML_FP16_TO_FP32(ggml_table_gelu_quick_f16[t]); } } #else @@ -1608,7 +1420,7 @@ inline static float ggml_silu_f32(float x) { //inline static void ggml_vec_silu_f16(const int n, ggml_fp16_t * y, const ggml_fp16_t * x) { // const uint16_t * i16 = (const uint16_t *) x; // for (int i = 0; i < n; ++i) { -// y[i] = table_silu_f16[i16[i]]; +// y[i] = ggml_table_silu_f16[i16[i]]; // } //} @@ -1618,7 +1430,7 @@ inline static void ggml_vec_silu_f32(const int n, float * y, const float * x) { for (int i = 0; i < n; ++i) { ggml_fp16_t fp16 = GGML_FP32_TO_FP16(x[i]); memcpy(&t, &fp16, sizeof(uint16_t)); - y[i] = GGML_FP16_TO_FP32(table_silu_f16[t]); + y[i] = GGML_FP16_TO_FP32(ggml_table_silu_f16[t]); } } #else @@ -2334,11 +2146,11 @@ struct ggml_context * ggml_init(struct ggml_init_params params) { for (int i = 0; i < (1 << 16); ++i) { uint16_t ui = i; memcpy(&ii, &ui, sizeof(ii)); - const float f = table_f32_f16[i] = GGML_COMPUTE_FP16_TO_FP32(ii); - table_gelu_f16[i] = GGML_FP32_TO_FP16(ggml_gelu_f32(f)); - table_gelu_quick_f16[i] = GGML_FP32_TO_FP16(ggml_gelu_quick_f32(f)); - table_silu_f16[i] = GGML_FP32_TO_FP16(ggml_silu_f32(f)); - table_exp_f16[i] = GGML_FP32_TO_FP16(expf(f)); + const float f = ggml_table_f32_f16[i] = GGML_COMPUTE_FP16_TO_FP32(ii); + ggml_table_gelu_f16[i] = GGML_FP32_TO_FP16(ggml_gelu_f32(f)); + ggml_table_gelu_quick_f16[i] = GGML_FP32_TO_FP16(ggml_gelu_quick_f32(f)); + ggml_table_silu_f16[i] = GGML_FP32_TO_FP16(ggml_silu_f32(f)); + ggml_table_exp_f16[i] = GGML_FP32_TO_FP16(expf(f)); } const uint64_t t_end = ggml_time_us(); UNUSED(t_end); @@ -10701,7 +10513,7 @@ static void ggml_compute_forward_soft_max_f32( // const float val = (sp[i] == -INFINITY) ? 0.0 : exp(sp[i] - max); ggml_fp16_t s = GGML_FP32_TO_FP16(sp[i] - max); memcpy(&scvt, &s, sizeof(scvt)); - const float val = GGML_FP16_TO_FP32(table_exp_f16[scvt]); + const float val = GGML_FP16_TO_FP32(ggml_table_exp_f16[scvt]); sum += (ggml_float)val; dp[i] = val; } @@ -12990,7 +12802,7 @@ static void ggml_compute_forward_flash_attn_f32( #else ggml_fp16_t s = GGML_FP32_TO_FP16(SS[j] - max); memcpy(&scvt[j], &s, sizeof(uint16_t)); - const float val = GGML_FP16_TO_FP32(table_exp_f16[scvt[j]]); + const float val = GGML_FP16_TO_FP32(ggml_table_exp_f16[scvt[j]]); #endif sump[j] += (ggml_float)val; SS[j] = val; @@ -13192,7 +13004,7 @@ static void ggml_compute_forward_flash_attn_f16( } else { ggml_fp16_t s = GGML_FP32_TO_FP16(SS[j] - max); memcpy(&scvt[j], &s, sizeof(uint16_t)); - const float val = GGML_FP16_TO_FP32(table_exp_f16[scvt[j]]); + const float val = GGML_FP16_TO_FP32(ggml_table_exp_f16[scvt[j]]); sump[j] += (ggml_float)val; SS[j] = val; } @@ -13643,7 +13455,7 @@ static void ggml_compute_forward_flash_attn_back_f32( #else ggml_fp16_t s = GGML_FP32_TO_FP16(SR[j] - max); memcpy(&scvt[j], &s, sizeof(uint16_t)); - const float val = GGML_FP16_TO_FP32(table_exp_f16[scvt[j]]); + const float val = GGML_FP16_TO_FP32(ggml_table_exp_f16[scvt[j]]); #endif sump[j] += (ggml_float)val; SW[j] = val; @@ -14393,7 +14205,7 @@ static void ggml_compute_forward_cross_entropy_loss_f32( #else ggml_fp16_t s = GGML_FP32_TO_FP16(s0[i] - max); memcpy(&scvt, &s, sizeof(scvt)); - const float val = GGML_FP16_TO_FP32(table_exp_f16[scvt]); + const float val = GGML_FP16_TO_FP32(ggml_table_exp_f16[scvt]); #endif sum += (ggml_float)val; st[i] = val; @@ -14507,7 +14319,7 @@ static void ggml_compute_forward_cross_entropy_loss_back_f32( #else ggml_fp16_t s = GGML_FP32_TO_FP16(s0[i] - max); memcpy(&scvt, &s, sizeof(scvt)); - const float val = GGML_FP16_TO_FP32(table_exp_f16[scvt]); + const float val = GGML_FP16_TO_FP32(ggml_table_exp_f16[scvt]); #endif sum += (ggml_float)val; ds0[i] = val; diff --git a/llama.cpp b/llama.cpp index a4340d5277b09c..e599917a81eb1d 100644 --- a/llama.cpp +++ b/llama.cpp @@ -1467,7 +1467,7 @@ static int32_t llama_kv_cache_cell_max(const struct llama_kv_cache & cache) { } static void llama_kv_cache_clear(struct llama_kv_cache & cache) { - for (int32_t i = 0; i < cache.size; ++i) { + for (int32_t i = 0; i < (int32_t) cache.size; ++i) { cache.cells[i].pos = -1; cache.cells[i].seq_id.clear(); } diff --git a/tests/test-double-float.cpp b/tests/test-double-float.cpp index afd7bf77fcb552..753dae911b0cb3 100644 --- a/tests/test-double-float.cpp +++ b/tests/test-double-float.cpp @@ -4,7 +4,7 @@ #undef NDEBUG #include -#if !defined(__riscv) && !defined(__s390__) +#if !defined(__riscv) && !defined(__s390__) && !defined(__ARM_NEON) #include #endif #include diff --git a/tests/test-quantize-fns.cpp b/tests/test-quantize-fns.cpp index 884af40548fb79..a2459a2867c5c0 100644 --- a/tests/test-quantize-fns.cpp +++ b/tests/test-quantize-fns.cpp @@ -129,6 +129,13 @@ int main(int argc, char * argv[]) { ggml_type type = (ggml_type) i; ggml_type_traits_t qfns = ggml_internal_get_type_traits(type); + // deprecated - skip + if (qfns.blck_size == 0) { + continue; + } + + printf("Testing %s\n", ggml_type_name((ggml_type) i)); + if (qfns.from_float && qfns.to_float) { const float total_error = total_quantization_error(qfns, test_size, test_data.data()); const float max_quantization_error = From 07178c98e1b61a5e2af39d347add12e7eb9e08e1 Mon Sep 17 00:00:00 2001 From: Tungsten842 <886724vf@anonaddy.me> Date: Tue, 31 Oct 2023 18:24:03 +0100 Subject: [PATCH 025/206] flake.nix: fix for rocm 5.7 (#3853) --- flake.lock | 12 ++++++------ flake.nix | 10 ++++++---- 2 files changed, 12 insertions(+), 10 deletions(-) diff --git a/flake.lock b/flake.lock index 070f0e1613fc30..0455f65617a2dc 100644 --- a/flake.lock +++ b/flake.lock @@ -5,11 +5,11 @@ "systems": "systems" }, "locked": { - "lastModified": 1692799911, - "narHash": "sha256-3eihraek4qL744EvQXsK1Ha6C3CR7nnT8X2qWap4RNk=", + "lastModified": 1694529238, + "narHash": "sha256-zsNZZGTGnMOf9YpHKJqMSsa0dXbfmxeoJ7xHlrt+xmY=", "owner": "numtide", "repo": "flake-utils", - "rev": "f9e7cf818399d17d347f847525c5a5a8032e4e44", + "rev": "ff7b65b44d01cf9ba6a71320833626af21126384", "type": "github" }, "original": { @@ -20,11 +20,11 @@ }, "nixpkgs": { "locked": { - "lastModified": 1698134075, - "narHash": "sha256-foCD+nuKzfh49bIoiCBur4+Fx1nozo+4C/6k8BYk4sg=", + "lastModified": 1698318101, + "narHash": "sha256-gUihHt3yPD7bVqg+k/UVHgngyaJ3DMEBchbymBMvK1E=", "owner": "NixOS", "repo": "nixpkgs", - "rev": "8efd5d1e283604f75a808a20e6cde0ef313d07d4", + "rev": "63678e9f3d3afecfeafa0acead6239cdb447574c", "type": "github" }, "original": { diff --git a/flake.nix b/flake.nix index fa34394b2f0593..4cf28d5c11c0fd 100644 --- a/flake.nix +++ b/flake.nix @@ -11,8 +11,7 @@ meta.mainProgram = "llama"; inherit (pkgs.stdenv) isAarch32 isAarch64 isDarwin; buildInputs = with pkgs; [ openmpi ]; - osSpecific = with pkgs; buildInputs ++ - ( + osSpecific = with pkgs; buildInputs ++ ( if isAarch64 && isDarwin then with pkgs.darwin.apple_sdk_11_0.frameworks; [ Accelerate @@ -96,12 +95,15 @@ }; packages.rocm = pkgs.stdenv.mkDerivation { inherit name src meta postPatch nativeBuildInputs postInstall; - buildInputs = with pkgs; buildInputs ++ [ hip hipblas rocblas ]; + buildInputs = with pkgs.rocmPackages; buildInputs ++ [ clr hipblas rocblas ]; cmakeFlags = cmakeFlags ++ [ "-DLLAMA_HIPBLAS=1" "-DCMAKE_C_COMPILER=hipcc" "-DCMAKE_CXX_COMPILER=hipcc" - "-DCMAKE_POSITION_INDEPENDENT_CODE=ON" + # Build all targets supported by rocBLAS. When updating search for TARGET_LIST_ROCM + # in github.com/ROCmSoftwarePlatform/rocBLAS/blob/develop/CMakeLists.txt + # and select the line that matches the current nixpkgs version of rocBLAS. + "-DAMDGPU_TARGETS=gfx803;gfx900;gfx906:xnack-;gfx908:xnack-;gfx90a:xnack+;gfx90a:xnack-;gfx940;gfx941;gfx942;gfx1010;gfx1012;gfx1030;gfx1100;gfx1101;gfx1102" ]; }; apps.llama-server = { From 238657db2364cfb728c694470a4a81702afea760 Mon Sep 17 00:00:00 2001 From: kalomaze <66376113+kalomaze@users.noreply.github.com> Date: Tue, 31 Oct 2023 14:44:49 -0500 Subject: [PATCH 026/206] samplers : Min-P sampler implementation [alternative to Top P/Top K] (#3841) * Introduce the new Min-P sampler by @kalomaze The Min-P sampling method was designed as an alternative to Top-P, and aims to ensure a balance of quality and variety. The parameter *p* represents the minimum probability for a token to be considered, relative to the probability of the most likely token. * Min-P enabled and set to 0.05 default --------- Co-authored-by: Georgi Gerganov Co-authored-by: cebtenzzre --- common/common.cpp | 8 ++++++++ common/sampling.cpp | 6 ++++-- common/sampling.h | 1 + examples/main/README.md | 8 ++++++++ llama.cpp | 26 ++++++++++++++++++++++++++ llama.h | 7 +++++++ 6 files changed, 54 insertions(+), 2 deletions(-) diff --git a/common/common.cpp b/common/common.cpp index c187128d6ede3d..dc4865e80b1544 100644 --- a/common/common.cpp +++ b/common/common.cpp @@ -218,6 +218,12 @@ bool gpt_params_parse(int argc, char ** argv, gpt_params & params) { break; } sparams.top_p = std::stof(argv[i]); + } else if (arg == "--min-p") { + if (++i >= argc) { + invalid_param = true; + break; + } + sparams.min_p = std::stof(argv[i]); } else if (arg == "--temp") { if (++i >= argc) { invalid_param = true; @@ -679,6 +685,7 @@ void gpt_print_usage(int /*argc*/, char ** argv, const gpt_params & params) { printf(" -b N, --batch-size N batch size for prompt processing (default: %d)\n", params.n_batch); printf(" --top-k N top-k sampling (default: %d, 0 = disabled)\n", sparams.top_k); printf(" --top-p N top-p sampling (default: %.1f, 1.0 = disabled)\n", (double)sparams.top_p); + printf(" --min-p N min-p sampling (default: %.1f, 0.0 = disabled)\n", (double)sparams.min_p); printf(" --tfs N tail free sampling, parameter z (default: %.1f, 1.0 = disabled)\n", (double)sparams.tfs_z); printf(" --typical N locally typical sampling, parameter p (default: %.1f, 1.0 = disabled)\n", (double)sparams.typical_p); printf(" --repeat-last-n N last n tokens to consider for penalize (default: %d, 0 = disabled, -1 = ctx_size)\n", sparams.penalty_last_n); @@ -1275,6 +1282,7 @@ void dump_non_result_info_yaml(FILE * stream, const gpt_params & params, const l fprintf(stream, "threads: %d # default: %d\n", params.n_threads, std::thread::hardware_concurrency()); fprintf(stream, "top_k: %d # default: 40\n", sparams.top_k); fprintf(stream, "top_p: %f # default: 0.95\n", sparams.top_p); + fprintf(stream, "min_p: %f # default: 0.0\n", sparams.min_p); fprintf(stream, "typical_p: %f # default: 1.0\n", sparams.typical_p); fprintf(stream, "verbose_prompt: %s # default: false\n", params.verbose_prompt ? "true" : "false"); } diff --git a/common/sampling.cpp b/common/sampling.cpp index c4996c9857d8ac..673d67a6d5380e 100644 --- a/common/sampling.cpp +++ b/common/sampling.cpp @@ -89,10 +89,10 @@ std::string llama_sampling_print(const llama_sampling_params & params) { snprintf(result, sizeof(result), "\trepeat_last_n = %d, repeat_penalty = %.3f, frequency_penalty = %.3f, presence_penalty = %.3f\n" - "\ttop_k = %d, tfs_z = %.3f, top_p = %.3f, typical_p = %.3f, temp = %.3f\n" + "\ttop_k = %d, tfs_z = %.3f, top_p = %.3f, min_p = %.3f, typical_p = %.3f, temp = %.3f\n" "\tmirostat = %d, mirostat_lr = %.3f, mirostat_ent = %.3f", params.penalty_last_n, params.penalty_repeat, params.penalty_freq, params.penalty_present, - params.top_k, params.tfs_z, params.top_p, params.typical_p, params.temp, + params.top_k, params.tfs_z, params.top_p, params.min_p, params.typical_p, params.temp, params.mirostat, params.mirostat_eta, params.mirostat_tau); return std::string(result); @@ -110,6 +110,7 @@ llama_token llama_sampling_sample( const float temp = params.temp; const int32_t top_k = params.top_k <= 0 ? n_vocab : params.top_k; const float top_p = params.top_p; + const float min_p = params.min_p; const float tfs_z = params.tfs_z; const float typical_p = params.typical_p; const int32_t penalty_last_n = params.penalty_last_n < 0 ? params.n_prev : params.penalty_last_n; @@ -190,6 +191,7 @@ llama_token llama_sampling_sample( llama_sample_tail_free(ctx_main, &cur_p, tfs_z, min_keep); llama_sample_typical (ctx_main, &cur_p, typical_p, min_keep); llama_sample_top_p (ctx_main, &cur_p, top_p, min_keep); + llama_sample_min_p (ctx_main, &cur_p, min_p, min_keep); llama_sample_temp (ctx_main, &cur_p, temp); id = llama_sample_token(ctx_main, &cur_p); diff --git a/common/sampling.h b/common/sampling.h index 62ea6d4cfb7e5e..7c9b8dcf23bcbf 100644 --- a/common/sampling.h +++ b/common/sampling.h @@ -14,6 +14,7 @@ typedef struct llama_sampling_params { int32_t n_probs = 0; // if greater than 0, output the probabilities of top n_probs tokens. int32_t top_k = 40; // <= 0 to use vocab size float top_p = 0.95f; // 1.0 = disabled + float min_p = 0.05f; // 0.0 = disabled float tfs_z = 1.00f; // 1.0 = disabled float typical_p = 1.00f; // 1.0 = disabled float temp = 0.80f; // 1.0 = disabled diff --git a/examples/main/README.md b/examples/main/README.md index a9561c383c0cba..a3428b48763d0b 100644 --- a/examples/main/README.md +++ b/examples/main/README.md @@ -208,6 +208,14 @@ Top-p sampling, also known as nucleus sampling, is another text generation metho Example usage: `--top-p 0.95` +### Min P Sampling + +- `--min-p N`: Sets a minimum base probability threshold for token selection (default: 0.05). + +The Min-P sampling method was designed as an alternative to Top-P, and aims to ensure a balance of quality and variety. The parameter *p* represents the minimum probability for a token to be considered, relative to the probability of the most likely token. For example, with *p*=0.05 and the most likely token having a probability of 0.9, logits with a value less than 0.045 are filtered out. + +Example usage: `--min-p 0.05` + ### Tail Free Sampling (TFS) - `--tfs N`: Enable tail free sampling with parameter z (default: 1.0, 1.0 = disabled). diff --git a/llama.cpp b/llama.cpp index e599917a81eb1d..7ee5892989f0a9 100644 --- a/llama.cpp +++ b/llama.cpp @@ -7368,6 +7368,32 @@ void llama_sample_top_p(struct llama_context * ctx, llama_token_data_array * can } } +void llama_sample_min_p(struct llama_context * ctx, llama_token_data_array * candidates, float p, size_t min_keep) { + if (p <= 0.0f || !candidates->size) { + return; + } + + llama_sample_softmax(ctx, candidates); + + const int64_t t_start_sample_us = ggml_time_us(); + + float scale = candidates->data[0].p; // scale by max prob + size_t i = 1; // first token always matches + + for (; i < candidates->size; ++i) { + if (candidates->data[i].p < p * scale && i >= min_keep) { + break; // prob too small + } + } + + // Resize the output vector to keep only the matching tokens + candidates->size = i; + + if (ctx) { + ctx->t_sample_us += ggml_time_us() - t_start_sample_us; + } +} + void llama_sample_tail_free(struct llama_context * ctx, llama_token_data_array * candidates, float z, size_t min_keep) { if (z >= 1.0f || candidates->size <= 2) { return; diff --git a/llama.h b/llama.h index d727dbd9fd915d..75fe391ef2e733 100644 --- a/llama.h +++ b/llama.h @@ -598,6 +598,13 @@ extern "C" { float p, size_t min_keep); + /// @details Minimum P sampling as described in https://github.com/ggerganov/llama.cpp/pull/3841 + LLAMA_API void llama_sample_min_p( + struct llama_context * ctx, + llama_token_data_array * candidates, + float p, + size_t min_keep); + /// @details Tail Free Sampling described in https://www.trentonbricken.com/Tail-Free-Sampling/. LLAMA_API void llama_sample_tail_free( struct llama_context * ctx, From 71e3718abdb2771b50c9606d3a7569623a0b0afe Mon Sep 17 00:00:00 2001 From: Georgi Gerganov Date: Wed, 1 Nov 2023 08:04:02 +0200 Subject: [PATCH 027/206] llama : refactor graph build code (#3837) * llama : factor out ggml-alloc from graph graph build functions ggml-ci * metal : disable kernel load log * llama : factor out tensor offloading outside the build call (wip) ggml-ci * llama : offload rest of the models ggml-ci * llama : update offload log messages to print node index * llama : comments * llama : support offloading result_norm + comments * llama : factor graph input into a function * llama : do tensor offload only with CUDA * llama : fix res_norm offloading * llama : try to optimize offloading code * llama : fix non-CUDA build * llama : try to fix build * llama : move refact in correct place + optimize graph input * llama : refactor tensor offloading as callback * llama : add layer index to all tensor names * llama : add functional header * llama : comment ggml-ci * llama : remove obsolete map for layer counting * llama : add llm_build helper functions (#3848) * llama : add llm_build_norm helper function ggml-ci * llama : add llm_build_ffn helper function (#3849) ggml-ci * llama : add llm_build_k_shift helper ggml-ci * llama : fix offloading after recent changes * llama : add llm_build_kv_store helper ggml-ci * llama : remove obsolete offload names * llama : fix llm_build_k_shift to use n_head_kv instead of n_head * llama : simplify falcon Q, K, V computation * llama : remove obsolete comments in build graphs * llama : add llm_build_kqv helper ggml-ci * llama : minor * llama : add LLAMA_OFFLOAD_DEBUG + fix starcoder offloading * llama : fix input allocation logic * llama : update offload functions for KQ tensors * llama : normalize tensor names ggml-ci * llama : enable warning about not offloaded tensors * llama : remove extra ; + deduplicate gate_b logic * llama : add llm_build_inp_embd helper --- ggml-metal.m | 11 +- ggml.h | 2 +- llama.cpp | 3741 ++++++++++++++++++++------------------------------ 3 files changed, 1520 insertions(+), 2234 deletions(-) diff --git a/ggml-metal.m b/ggml-metal.m index 2380c431001408..bc881395a7aadc 100644 --- a/ggml-metal.m +++ b/ggml-metal.m @@ -238,14 +238,17 @@ static void ggml_metal_log(enum ggml_log_level level, const char* format, ...){ // load kernels { NSError * error = nil; -#define GGML_METAL_ADD_KERNEL(name) \ - ctx->function_##name = [ctx->library newFunctionWithName:@"kernel_"#name]; \ - ctx->pipeline_##name = [ctx->device newComputePipelineStateWithFunction:ctx->function_##name error:&error]; \ + + /* GGML_METAL_LOG_INFO("%s: loaded %-32s %16p | th_max = %4d | th_width = %4d\n", __func__, "kernel_"#name, (void *) ctx->pipeline_##name, \ (int) ctx->pipeline_##name.maxTotalThreadsPerThreadgroup, \ (int) ctx->pipeline_##name.threadExecutionWidth); \ + */ +#define GGML_METAL_ADD_KERNEL(name) \ + ctx->function_##name = [ctx->library newFunctionWithName:@"kernel_"#name]; \ + ctx->pipeline_##name = [ctx->device newComputePipelineStateWithFunction:ctx->function_##name error:&error]; \ if (error) { \ - GGML_METAL_LOG_ERROR("%s: error: load pipeline error: %s\n", __func__, [[error description] UTF8String]); \ + GGML_METAL_LOG_ERROR("%s: error: load pipeline error: %s\n", __func__, [[error description] UTF8String]); \ return NULL; \ } diff --git a/ggml.h b/ggml.h index 8c954904e5a007..9d16c5a72fda0e 100644 --- a/ggml.h +++ b/ggml.h @@ -709,7 +709,7 @@ extern "C" { // Context tensor enumeration and lookup GGML_API struct ggml_tensor * ggml_get_first_tensor(struct ggml_context * ctx); GGML_API struct ggml_tensor * ggml_get_next_tensor (struct ggml_context * ctx, struct ggml_tensor * tensor); - GGML_API struct ggml_tensor * ggml_get_tensor(struct ggml_context * ctx, const char * name); + GGML_API struct ggml_tensor * ggml_get_tensor (struct ggml_context * ctx, const char * name); GGML_API struct ggml_tensor * ggml_set_zero(struct ggml_tensor * tensor); GGML_API struct ggml_tensor * ggml_set_i32 (struct ggml_tensor * tensor, int32_t value); diff --git a/llama.cpp b/llama.cpp index 7ee5892989f0a9..ead1d421d243dc 100644 --- a/llama.cpp +++ b/llama.cpp @@ -60,7 +60,9 @@ #include #include #include +#include #include +#include #include #include #include @@ -69,11 +71,10 @@ #include #include #include +#include #include #include #include -#include -#include #if defined(_MSC_VER) #pragma warning(disable: 4244 4267) // possible loss of data @@ -969,7 +970,7 @@ struct llama_mlock { typedef void (*offload_func_t)(struct ggml_tensor * tensor); -static void llama_nop(struct ggml_tensor * tensor) { // don't offload by default +static void ggml_offload_nop(struct ggml_tensor * tensor) { (void) tensor; } @@ -1113,13 +1114,13 @@ struct llama_layer { struct ggml_tensor * ffn_norm_b; // ff - struct ggml_tensor * w1; // ffn_gate - struct ggml_tensor * w2; // ffn_down - struct ggml_tensor * w3; // ffn_up + struct ggml_tensor * ffn_gate; // w1 + struct ggml_tensor * ffn_down; // w2 + struct ggml_tensor * ffn_up; // w3 // ff bias - struct ggml_tensor * b2; // ffn_down - struct ggml_tensor * b3; // ffn_up + struct ggml_tensor * ffn_down_b; // b2 + struct ggml_tensor * ffn_up_b; // b3 }; struct llama_kv_cell { @@ -1225,8 +1226,8 @@ struct llama_model { llama_hparams hparams = {}; llama_vocab vocab; - struct ggml_tensor * tok_embeddings; - struct ggml_tensor * pos_embeddings; + struct ggml_tensor * tok_embd; + struct ggml_tensor * pos_embd; struct ggml_tensor * tok_norm; struct ggml_tensor * tok_norm_b; @@ -2482,7 +2483,7 @@ static void llm_load_tensors( case LLM_ARCH_LLAMA: case LLM_ARCH_REFACT: { - model.tok_embeddings = ml.create_tensor(ctx, tn(LLM_TENSOR_TOKEN_EMBD, "weight"), {n_embd, n_vocab}, GGML_BACKEND_CPU); + model.tok_embd = ml.create_tensor(ctx, tn(LLM_TENSOR_TOKEN_EMBD, "weight"), {n_embd, n_vocab}, GGML_BACKEND_CPU); // output { @@ -2536,21 +2537,21 @@ static void llm_load_tensors( layer.ffn_norm = ml.create_tensor(ctx, tn(LLM_TENSOR_FFN_NORM, "weight", i), {n_embd}, backend); - layer.w1 = ml.create_tensor(ctx, tn(LLM_TENSOR_FFN_GATE, "weight", i), {n_embd, n_ff}, backend_split); - layer.w2 = ml.create_tensor(ctx, tn(LLM_TENSOR_FFN_DOWN, "weight", i), { n_ff, n_embd}, backend_split); - layer.w3 = ml.create_tensor(ctx, tn(LLM_TENSOR_FFN_UP, "weight", i), {n_embd, n_ff}, backend_split); + layer.ffn_gate = ml.create_tensor(ctx, tn(LLM_TENSOR_FFN_GATE, "weight", i), {n_embd, n_ff}, backend_split); + layer.ffn_down = ml.create_tensor(ctx, tn(LLM_TENSOR_FFN_DOWN, "weight", i), { n_ff, n_embd}, backend_split); + layer.ffn_up = ml.create_tensor(ctx, tn(LLM_TENSOR_FFN_UP, "weight", i), {n_embd, n_ff}, backend_split); if (backend == GGML_BACKEND_GPU) { vram_weights += - ggml_nbytes(layer.attn_norm) + ggml_nbytes(layer.wq) + ggml_nbytes(layer.wk) + - ggml_nbytes(layer.wv) + ggml_nbytes(layer.wo) + ggml_nbytes(layer.ffn_norm) + - ggml_nbytes(layer.w1) + ggml_nbytes(layer.w2) + ggml_nbytes(layer.w3); + ggml_nbytes(layer.attn_norm) + ggml_nbytes(layer.wq) + ggml_nbytes(layer.wk) + + ggml_nbytes(layer.wv) + ggml_nbytes(layer.wo) + ggml_nbytes(layer.ffn_norm) + + ggml_nbytes(layer.ffn_gate) + ggml_nbytes(layer.ffn_down) + ggml_nbytes(layer.ffn_up); } } } break; case LLM_ARCH_BAICHUAN: { - model.tok_embeddings = ml.create_tensor(ctx, tn(LLM_TENSOR_TOKEN_EMBD, "weight"), {n_embd, n_vocab}, GGML_BACKEND_CPU); + model.tok_embd = ml.create_tensor(ctx, tn(LLM_TENSOR_TOKEN_EMBD, "weight"), {n_embd, n_vocab}, GGML_BACKEND_CPU); { ggml_backend_type backend_norm; ggml_backend_type backend_output; @@ -2602,15 +2603,15 @@ static void llm_load_tensors( layer.ffn_norm = ml.create_tensor(ctx, tn(LLM_TENSOR_FFN_NORM, "weight", i), {n_embd}, backend); - layer.w1 = ml.create_tensor(ctx, tn(LLM_TENSOR_FFN_GATE, "weight", i), {n_embd, n_ff}, backend_split); - layer.w2 = ml.create_tensor(ctx, tn(LLM_TENSOR_FFN_DOWN, "weight", i), { n_ff, n_embd}, backend_split); - layer.w3 = ml.create_tensor(ctx, tn(LLM_TENSOR_FFN_UP, "weight", i), {n_embd, n_ff}, backend_split); + layer.ffn_gate = ml.create_tensor(ctx, tn(LLM_TENSOR_FFN_GATE, "weight", i), {n_embd, n_ff}, backend_split); + layer.ffn_down = ml.create_tensor(ctx, tn(LLM_TENSOR_FFN_DOWN, "weight", i), { n_ff, n_embd}, backend_split); + layer.ffn_up = ml.create_tensor(ctx, tn(LLM_TENSOR_FFN_UP, "weight", i), {n_embd, n_ff}, backend_split); if (backend == GGML_BACKEND_GPU) { vram_weights += - ggml_nbytes(layer.attn_norm) + ggml_nbytes(layer.wq) + ggml_nbytes(layer.wk) + - ggml_nbytes(layer.wv) + ggml_nbytes(layer.wo) + ggml_nbytes(layer.ffn_norm) + - ggml_nbytes(layer.w1) + ggml_nbytes(layer.w2) + ggml_nbytes(layer.w3); + ggml_nbytes(layer.attn_norm) + ggml_nbytes(layer.wq) + ggml_nbytes(layer.wk) + + ggml_nbytes(layer.wv) + ggml_nbytes(layer.wo) + ggml_nbytes(layer.ffn_norm) + + ggml_nbytes(layer.ffn_gate) + ggml_nbytes(layer.ffn_down) + ggml_nbytes(layer.ffn_up); } } } break; @@ -2618,7 +2619,7 @@ static void llm_load_tensors( { // TODO: CPU-only for now - model.tok_embeddings = ml.create_tensor(ctx, tn(LLM_TENSOR_TOKEN_EMBD, "weight"), {n_embd, n_vocab}, GGML_BACKEND_CPU); + model.tok_embd = ml.create_tensor(ctx, tn(LLM_TENSOR_TOKEN_EMBD, "weight"), {n_embd, n_vocab}, GGML_BACKEND_CPU); // output { @@ -2681,21 +2682,21 @@ static void llm_load_tensors( layer.wqkv = ml.create_tensor(ctx, tn(LLM_TENSOR_ATTN_QKV, "weight", i), {n_embd, n_embd + 2*n_embd_gqa}, backend_split); layer.wo = ml.create_tensor(ctx, tn(LLM_TENSOR_ATTN_OUT, "weight", i), {n_embd, n_embd}, backend_split); - layer.w2 = ml.create_tensor(ctx, tn(LLM_TENSOR_FFN_DOWN, "weight", i), { n_ff, n_embd}, backend_split); - layer.w3 = ml.create_tensor(ctx, tn(LLM_TENSOR_FFN_UP, "weight", i), {n_embd, n_ff}, backend_split); + layer.ffn_down = ml.create_tensor(ctx, tn(LLM_TENSOR_FFN_DOWN, "weight", i), { n_ff, n_embd}, backend_split); + layer.ffn_up = ml.create_tensor(ctx, tn(LLM_TENSOR_FFN_UP, "weight", i), {n_embd, n_ff}, backend_split); if (backend == GGML_BACKEND_GPU) { vram_weights += ggml_nbytes(layer.attn_norm) + ggml_nbytes(layer.attn_norm_b) + ggml_nbytes(layer.wqkv) + ggml_nbytes(layer.wo) + - ggml_nbytes(layer.w2) + ggml_nbytes(layer.w3); + ggml_nbytes(layer.ffn_down) + ggml_nbytes(layer.ffn_up); } } } break; case LLM_ARCH_STARCODER: { - model.tok_embeddings = ml.create_tensor(ctx, tn(LLM_TENSOR_TOKEN_EMBD, "weight"), {n_embd, n_vocab}, GGML_BACKEND_CPU); - model.pos_embeddings = ml.create_tensor(ctx, tn(LLM_TENSOR_POS_EMBD, "weight"), {n_embd, hparams.n_ctx_train}, GGML_BACKEND_CPU); + model.tok_embd = ml.create_tensor(ctx, tn(LLM_TENSOR_TOKEN_EMBD, "weight"), {n_embd, n_vocab}, GGML_BACKEND_CPU); + model.pos_embd = ml.create_tensor(ctx, tn(LLM_TENSOR_POS_EMBD, "weight"), {n_embd, hparams.n_ctx_train}, GGML_BACKEND_CPU); // output { @@ -2754,11 +2755,11 @@ static void llm_load_tensors( layer.ffn_norm = ml.create_tensor(ctx, tn(LLM_TENSOR_FFN_NORM, "weight", i), {n_embd}, backend); layer.ffn_norm_b = ml.create_tensor(ctx, tn(LLM_TENSOR_FFN_NORM, "bias", i), {n_embd}, backend); - layer.w2 = ml.create_tensor(ctx, tn(LLM_TENSOR_FFN_DOWN, "weight", i), {n_ff, n_embd}, backend_split); - layer.b2 = ml.create_tensor(ctx, tn(LLM_TENSOR_FFN_DOWN, "bias", i), {n_embd}, backend); + layer.ffn_down = ml.create_tensor(ctx, tn(LLM_TENSOR_FFN_DOWN, "weight", i), {n_ff, n_embd}, backend_split); + layer.ffn_down_b = ml.create_tensor(ctx, tn(LLM_TENSOR_FFN_DOWN, "bias", i), {n_embd}, backend); - layer.w3 = ml.create_tensor(ctx, tn(LLM_TENSOR_FFN_UP, "weight", i), {n_embd, n_ff}, backend_split); - layer.b3 = ml.create_tensor(ctx, tn(LLM_TENSOR_FFN_UP, "bias", i), {n_ff}, backend); + layer.ffn_up = ml.create_tensor(ctx, tn(LLM_TENSOR_FFN_UP, "weight", i), {n_embd, n_ff}, backend_split); + layer.ffn_up_b = ml.create_tensor(ctx, tn(LLM_TENSOR_FFN_UP, "bias", i), {n_ff}, backend); if (backend == GGML_BACKEND_GPU) { vram_weights += @@ -2766,14 +2767,14 @@ static void llm_load_tensors( ggml_nbytes(layer.wqkv) + ggml_nbytes(layer.bqkv) + ggml_nbytes(layer.wo) + ggml_nbytes(layer.bo) + ggml_nbytes(layer.ffn_norm) + ggml_nbytes(layer.ffn_norm_b) + - ggml_nbytes(layer.w2) + ggml_nbytes(layer.b2) + - ggml_nbytes(layer.w3) + ggml_nbytes(layer.b3); + ggml_nbytes(layer.ffn_down) + ggml_nbytes(layer.ffn_down_b) + + ggml_nbytes(layer.ffn_up) + ggml_nbytes(layer.ffn_up_b); } } } break; case LLM_ARCH_PERSIMMON: { - model.tok_embeddings = ml.create_tensor(ctx, tn(LLM_TENSOR_TOKEN_EMBD, "weight"), {n_embd, n_vocab}, GGML_BACKEND_CPU); + model.tok_embd = ml.create_tensor(ctx, tn(LLM_TENSOR_TOKEN_EMBD, "weight"), {n_embd, n_vocab}, GGML_BACKEND_CPU); { ggml_backend_type backend_norm; @@ -2814,31 +2815,31 @@ static void llm_load_tensors( const ggml_backend_type backend = int(i) < i_gpu_start ? GGML_BACKEND_CPU : LLAMA_BACKEND_OFFLOAD; const ggml_backend_type backend_split = int(i) < i_gpu_start ? GGML_BACKEND_CPU : LLAMA_BACKEND_OFFLOAD_SPLIT; auto & layer = model.layers[i]; - layer.attn_norm = ml.create_tensor(ctx, tn(LLM_TENSOR_ATTN_NORM, "weight", i), {n_embd}, backend); - layer.attn_norm_b = ml.create_tensor(ctx, tn(LLM_TENSOR_ATTN_NORM, "bias", i), {n_embd}, backend); - layer.wqkv = ml.create_tensor(ctx, tn(LLM_TENSOR_ATTN_QKV, "weight", i), {n_embd, n_embd + 2*n_embd_gqa}, backend_split); - layer.bqkv = ml.create_tensor(ctx, tn(LLM_TENSOR_ATTN_QKV, "bias", i), {n_embd + 2*n_embd_gqa}, backend_split); - layer.wo = ml.create_tensor(ctx, tn(LLM_TENSOR_ATTN_OUT, "weight", i), {n_embd, n_embd}, backend_split); - layer.bo = ml.create_tensor(ctx, tn(LLM_TENSOR_ATTN_OUT, "bias", i), {n_embd}, backend_split); - layer.w2 = ml.create_tensor(ctx, tn(LLM_TENSOR_FFN_DOWN, "weight", i), {n_ff, n_embd}, backend_split); - layer.b2 = ml.create_tensor(ctx, tn(LLM_TENSOR_FFN_DOWN, "bias", i), {n_embd}, backend_split); - layer.w3 = ml.create_tensor(ctx, tn(LLM_TENSOR_FFN_UP, "weight", i), {n_embd, n_ff}, backend_split); - layer.b3 = ml.create_tensor(ctx, tn(LLM_TENSOR_FFN_UP, "bias", i), {n_ff}, backend_split); - layer.ffn_norm = ml.create_tensor(ctx, tn(LLM_TENSOR_FFN_NORM, "weight", i), {n_embd}, backend); - layer.ffn_norm_b = ml.create_tensor(ctx, tn(LLM_TENSOR_FFN_NORM, "bias", i), {n_embd}, backend); + layer.attn_norm = ml.create_tensor(ctx, tn(LLM_TENSOR_ATTN_NORM, "weight", i), {n_embd}, backend); + layer.attn_norm_b = ml.create_tensor(ctx, tn(LLM_TENSOR_ATTN_NORM, "bias", i), {n_embd}, backend); + layer.wqkv = ml.create_tensor(ctx, tn(LLM_TENSOR_ATTN_QKV, "weight", i), {n_embd, n_embd + 2*n_embd_gqa}, backend_split); + layer.bqkv = ml.create_tensor(ctx, tn(LLM_TENSOR_ATTN_QKV, "bias", i), {n_embd + 2*n_embd_gqa}, backend_split); + layer.wo = ml.create_tensor(ctx, tn(LLM_TENSOR_ATTN_OUT, "weight", i), {n_embd, n_embd}, backend_split); + layer.bo = ml.create_tensor(ctx, tn(LLM_TENSOR_ATTN_OUT, "bias", i), {n_embd}, backend_split); + layer.ffn_down = ml.create_tensor(ctx, tn(LLM_TENSOR_FFN_DOWN, "weight", i), {n_ff, n_embd}, backend_split); + layer.ffn_down_b = ml.create_tensor(ctx, tn(LLM_TENSOR_FFN_DOWN, "bias", i), {n_embd}, backend_split); + layer.ffn_up = ml.create_tensor(ctx, tn(LLM_TENSOR_FFN_UP, "weight", i), {n_embd, n_ff}, backend_split); + layer.ffn_up_b = ml.create_tensor(ctx, tn(LLM_TENSOR_FFN_UP, "bias", i), {n_ff}, backend_split); + layer.ffn_norm = ml.create_tensor(ctx, tn(LLM_TENSOR_FFN_NORM, "weight", i), {n_embd}, backend); + layer.ffn_norm_b = ml.create_tensor(ctx, tn(LLM_TENSOR_FFN_NORM, "bias", i), {n_embd}, backend); layer.attn_q_norm = ml.create_tensor(ctx, tn(LLM_TENSOR_ATTN_Q_NORM, "weight", i), {64}, backend); - layer.attn_q_norm_b = ml.create_tensor(ctx, tn(LLM_TENSOR_ATTN_Q_NORM, "bias", i), {64}, backend); + layer.attn_q_norm_b = ml.create_tensor(ctx, tn(LLM_TENSOR_ATTN_Q_NORM, "bias", i), {64}, backend); layer.attn_k_norm = ml.create_tensor(ctx, tn(LLM_TENSOR_ATTN_K_NORM, "weight", i), {64}, backend); - layer.attn_k_norm_b = ml.create_tensor(ctx, tn(LLM_TENSOR_ATTN_K_NORM, "bias", i), {64}, backend); + layer.attn_k_norm_b = ml.create_tensor(ctx, tn(LLM_TENSOR_ATTN_K_NORM, "bias", i), {64}, backend); } } break; case LLM_ARCH_BLOOM: { // TODO: CPU-only for now - model.tok_embeddings = ml.create_tensor(ctx, tn(LLM_TENSOR_TOKEN_EMBD, "weight"), {n_embd, n_vocab}, GGML_BACKEND_CPU); - model.tok_norm = ml.create_tensor(ctx, tn(LLM_TENSOR_TOKEN_EMBD_NORM, "weight"), {n_embd}, GGML_BACKEND_CPU); - model.tok_norm_b = ml.create_tensor(ctx, tn(LLM_TENSOR_TOKEN_EMBD_NORM, "bias"), {n_embd}, GGML_BACKEND_CPU); + model.tok_embd = ml.create_tensor(ctx, tn(LLM_TENSOR_TOKEN_EMBD, "weight"), {n_embd, n_vocab}, GGML_BACKEND_CPU); + model.tok_norm = ml.create_tensor(ctx, tn(LLM_TENSOR_TOKEN_EMBD_NORM, "weight"), {n_embd}, GGML_BACKEND_CPU); + model.tok_norm_b = ml.create_tensor(ctx, tn(LLM_TENSOR_TOKEN_EMBD_NORM, "bias"), {n_embd}, GGML_BACKEND_CPU); // output { @@ -2897,11 +2898,11 @@ static void llm_load_tensors( layer.ffn_norm = ml.create_tensor(ctx, tn(LLM_TENSOR_FFN_NORM, "weight", i), {n_embd}, backend); layer.ffn_norm_b = ml.create_tensor(ctx, tn(LLM_TENSOR_FFN_NORM, "bias", i), {n_embd}, backend); - layer.w2 = ml.create_tensor(ctx, tn(LLM_TENSOR_FFN_DOWN, "weight", i), {n_ff, n_embd}, backend_split); - layer.b2 = ml.create_tensor(ctx, tn(LLM_TENSOR_FFN_DOWN, "bias", i), {n_embd}, backend_split); + layer.ffn_down = ml.create_tensor(ctx, tn(LLM_TENSOR_FFN_DOWN, "weight", i), {n_ff, n_embd}, backend_split); + layer.ffn_down_b = ml.create_tensor(ctx, tn(LLM_TENSOR_FFN_DOWN, "bias", i), {n_embd}, backend_split); - layer.w3 = ml.create_tensor(ctx, tn(LLM_TENSOR_FFN_UP, "weight", i), {n_embd, n_ff}, backend_split); - layer.b3 = ml.create_tensor(ctx, tn(LLM_TENSOR_FFN_UP, "bias", i), {n_ff}, backend_split); + layer.ffn_up = ml.create_tensor(ctx, tn(LLM_TENSOR_FFN_UP, "weight", i), {n_embd, n_ff}, backend_split); + layer.ffn_up_b = ml.create_tensor(ctx, tn(LLM_TENSOR_FFN_UP, "bias", i), {n_ff}, backend_split); if (backend == GGML_BACKEND_GPU) { vram_weights += @@ -2909,14 +2910,14 @@ static void llm_load_tensors( ggml_nbytes(layer.wqkv) + ggml_nbytes(layer.bqkv) + ggml_nbytes(layer.wo) + ggml_nbytes(layer.bo) + ggml_nbytes(layer.ffn_norm) + ggml_nbytes(layer.ffn_norm_b) + - ggml_nbytes(layer.w3) + ggml_nbytes(layer.b3) + - ggml_nbytes(layer.w2) + ggml_nbytes(layer.b2); + ggml_nbytes(layer.ffn_up) + ggml_nbytes(layer.ffn_up_b) + + ggml_nbytes(layer.ffn_down) + ggml_nbytes(layer.ffn_down_b); } } } break; case LLM_ARCH_MPT: { - model.tok_embeddings = ml.create_tensor(ctx, tn(LLM_TENSOR_TOKEN_EMBD, "weight"), {n_embd, n_vocab}, GGML_BACKEND_CPU); + model.tok_embd = ml.create_tensor(ctx, tn(LLM_TENSOR_TOKEN_EMBD, "weight"), {n_embd, n_vocab}, GGML_BACKEND_CPU); // output { @@ -2967,8 +2968,8 @@ static void llm_load_tensors( layer.ffn_norm = ml.create_tensor(ctx, tn(LLM_TENSOR_FFN_NORM, "weight", i), {n_embd}, backend); - layer.w2 = ml.create_tensor(ctx, tn(LLM_TENSOR_FFN_DOWN, "weight", i), { n_ff, n_embd}, backend_split); - layer.w3 = ml.create_tensor(ctx, tn(LLM_TENSOR_FFN_UP, "weight", i), {n_embd, n_ff}, backend_split); + layer.ffn_down = ml.create_tensor(ctx, tn(LLM_TENSOR_FFN_DOWN, "weight", i), { n_ff, n_embd}, backend_split); + layer.ffn_up = ml.create_tensor(ctx, tn(LLM_TENSOR_FFN_UP, "weight", i), {n_embd, n_ff}, backend_split); if (backend == GGML_BACKEND_GPU) { vram_weights += @@ -2976,8 +2977,8 @@ static void llm_load_tensors( ggml_nbytes(layer.wqkv) + ggml_nbytes(layer.wo) + ggml_nbytes(layer.ffn_norm) + - ggml_nbytes(layer.w2) + - ggml_nbytes(layer.w3); + ggml_nbytes(layer.ffn_down) + + ggml_nbytes(layer.ffn_up); } } } break; @@ -3007,10 +3008,10 @@ static void llm_load_tensors( #ifdef GGML_USE_CUBLAS const int max_backend_supported_layers = hparams.n_layer + 3; - const int max_offloadable_layers = hparams.n_layer + 3; -#elif defined(GGML_USE_CLBLAST) + const int max_offloadable_layers = hparams.n_layer + 3; +#elif GGML_USE_CLBLAST const int max_backend_supported_layers = hparams.n_layer + 1; - const int max_offloadable_layers = hparams.n_layer + 1; + const int max_offloadable_layers = hparams.n_layer + 1; #endif // GGML_USE_CUBLAS LLAMA_LOG_INFO("%s: offloaded %d/%d layers to GPU\n", __func__, std::min(n_gpu_layers, max_offloadable_layers), max_backend_supported_layers); @@ -3089,9 +3090,359 @@ static bool llama_model_load( return true; } +using llm_build_cb = std::function; + +enum llm_rope_type { + LLM_ROPE, + LLM_ROPE_NEOX, + LLM_ROPE_GLM, +}; + +static struct ggml_tensor * llm_build_inp_embd( + struct ggml_context * ctx, + const llama_batch & batch, + struct ggml_tensor * tok_embd, + int64_t n_embd, + int32_t n_tokens, + const llm_build_cb & cb) { + struct ggml_tensor * inpL; + + if (batch.token) { + struct ggml_tensor * inp_tokens = ggml_new_tensor_1d(ctx, GGML_TYPE_I32, n_tokens); + cb(inp_tokens, "inp_tokens", -1); + + inpL = ggml_get_rows(ctx, tok_embd, inp_tokens); + } else { +#ifdef GGML_USE_MPI + GGML_ASSERT(false && "not implemented"); +#endif + + inpL = ggml_new_tensor_2d(ctx, GGML_TYPE_F32, n_embd, n_tokens); + } + + return inpL; +} + +// Persimmon: n_rot = n_embd_head/2 +// Other: n_rot = n_embd_head +static void llm_build_k_shift( + const llama_context & lctx, + struct ggml_context * ctx, + struct ggml_cgraph * graph, + int64_t n_rot, + llm_rope_type type, + const llm_build_cb & cb) { + const auto & model = lctx.model; + const auto & kv_self = lctx.kv_self; + const auto & cparams = lctx.cparams; + + const auto & hparams = model.hparams; + + const int64_t n_layer = hparams.n_layer; + const int64_t n_head_kv = hparams.n_head_kv; + const int64_t n_embd_gqa = hparams.n_embd_gqa(); + const int64_t n_embd_head = hparams.n_embd_head(); + + const int64_t n_ctx = lctx.cparams.n_ctx; + + const float freq_base = cparams.rope_freq_base; + const float freq_scale = cparams.rope_freq_scale; + + GGML_ASSERT(n_embd_head % n_rot == 0); + + struct ggml_tensor * K_shift = ggml_new_tensor_1d(ctx, GGML_TYPE_I32, n_ctx); + cb(K_shift, "K_shift", -1); + + int rope_type = 0; + + switch (type) { + case LLM_ROPE: rope_type = 0; break; + case LLM_ROPE_NEOX: rope_type = 2; break; + case LLM_ROPE_GLM: rope_type = 4; break; + } + + for (int il = 0; il < n_layer; ++il) { + struct ggml_tensor * tmp = + // we rotate only the first n_rot dimensions + ggml_rope_custom_inplace(ctx, + ggml_view_3d(ctx, kv_self.k, + n_rot, n_head_kv, n_ctx, + ggml_element_size(kv_self.k)*n_embd_head, + ggml_element_size(kv_self.k)*n_embd_gqa, + ggml_element_size(kv_self.k)*n_embd_gqa*n_ctx*il), + K_shift, n_rot, rope_type, 0, freq_base, freq_scale); + cb(tmp, "K_shifted", il); + ggml_build_forward_expand(graph, tmp); + } +} + +static void llm_build_kv_store( + const llama_context & lctx, + struct ggml_context * ctx, + struct ggml_cgraph * graph, + struct ggml_tensor * k_cur, + struct ggml_tensor * v_cur, + int32_t n_tokens, + int32_t kv_head, + const llm_build_cb & cb, + int64_t il) { + const auto & model = lctx.model; + const auto & kv_self = lctx.kv_self; + const auto & cparams = lctx.cparams; + + const auto & hparams = model.hparams; + + const int64_t n_ctx = cparams.n_ctx; + const int64_t n_embd_gqa = hparams.n_embd_gqa(); + + // compute the transposed [n_tokens, n_embd] V matrix + struct ggml_tensor * v_cur_t = ggml_transpose(ctx, ggml_reshape_2d(ctx, v_cur, n_embd_gqa, n_tokens)); + //struct ggml_tensor * v_cur_t = ggml_transpose(ctx, v_cur); // TODO: reshape above is likely not needed + cb(v_cur_t, "v_cur_t", il); + + struct ggml_tensor * k_cache_view = ggml_view_1d(ctx, kv_self.k, n_tokens*n_embd_gqa, + (ggml_element_size(kv_self.k)*n_embd_gqa)*(il*n_ctx + kv_head)); + cb(k_cache_view, "k_cache_view", il); + + struct ggml_tensor * v_cache_view = ggml_view_2d(ctx, kv_self.v, n_tokens, n_embd_gqa, + ( n_ctx)*ggml_element_size(kv_self.v), + (il*n_ctx)*ggml_element_size(kv_self.v)*n_embd_gqa + kv_head*ggml_element_size(kv_self.v)); + cb(v_cache_view, "v_cache_view", il); + + // important: storing RoPE-ed version of K in the KV cache! + ggml_build_forward_expand(graph, ggml_cpy(ctx, k_cur, k_cache_view)); + ggml_build_forward_expand(graph, ggml_cpy(ctx, v_cur_t, v_cache_view)); +} + +enum llm_norm_type { + LLM_NORM, + LLM_NORM_RMS, +}; + +static struct ggml_tensor * llm_build_norm( + struct ggml_context * ctx, + struct ggml_tensor * cur, + struct ggml_tensor * mw, + struct ggml_tensor * mb, + llm_norm_type type, + float eps, + const llm_build_cb & cb, + int il) { + switch (type) { + case LLM_NORM: cur = ggml_norm (ctx, cur, eps); break; + case LLM_NORM_RMS: cur = ggml_rms_norm(ctx, cur, eps); break; + } + + if (mw || mb) { + cb(cur, "norm", il); + } + + if (mw) { + cur = ggml_mul(ctx, cur, mw); + if (mb) { + cb(cur, "norm_w", il); + } + } + + if (mb) { + cur = ggml_add(ctx, cur, mb); + } + + return cur; +} + +enum llm_ffn_op_type { + LLM_FFN_SILU, + LLM_FFN_GELU, + LLM_FFN_RELU, + LLM_FFN_RELU_SQR, +}; + +enum llm_ffn_gate_type { + LLM_FFN_SEQ, + LLM_FFN_PAR, // ffn_gate is parallel to ffn_up +}; + +static struct ggml_tensor * llm_build_ffn( + struct ggml_context * ctx, + struct ggml_tensor * cur, + struct ggml_tensor * up, + struct ggml_tensor * up_b, + struct ggml_tensor * gate, + struct ggml_tensor * gate_b, + struct ggml_tensor * down, + struct ggml_tensor * down_b, + llm_ffn_op_type type_op, + llm_ffn_gate_type type_gate, + const llm_build_cb & cb, + int il) { + struct ggml_tensor * tmp = ggml_mul_mat(ctx, up, cur); + cb(tmp, "ffn_up", il); + + if (up_b) { + tmp = ggml_add(ctx, tmp, up_b); + cb(tmp, "ffn_up_b", il); + } + + if (gate) { + switch (type_gate) { + case LLM_FFN_SEQ: + { + cur = ggml_mul_mat(ctx, gate, tmp); + cb(cur, "ffn_gate", il); + } break; + case LLM_FFN_PAR: + { + cur = ggml_mul_mat(ctx, gate, cur); + cb(cur, "ffn_gate", il); + } break; + } + + if (gate_b) { + cur = ggml_add(ctx, cur, gate_b); + cb(cur, "ffn_gate_b", il); + } + } else { + cur = tmp; + } + + switch (type_op) { + case LLM_FFN_SILU: + { + cur = ggml_silu(ctx, cur); + cb(cur, "ffn_silu", il); + } break; + case LLM_FFN_GELU: + { + cur = ggml_gelu(ctx, cur); + cb(cur, "ffn_gelu", il); + } break; + case LLM_FFN_RELU: + { + cur = ggml_relu(ctx, cur); + cb(cur, "ffn_relu", il); + } break; + case LLM_FFN_RELU_SQR: + { + cur = ggml_relu(ctx, cur); + cb(cur, "ffn_relu", il); + + cur = ggml_sqr(ctx, cur); + cb(cur, "ffn_sqr(relu)", il); + } break; + } + + if (type_gate == LLM_FFN_PAR) { + cur = ggml_mul(ctx, cur, tmp); + cb(cur, "ffn_gate_par", il); + } + + cur = ggml_mul_mat(ctx, down, cur); + if (down_b) { + cb(cur, "ffn_down", il); + } + + if (down_b) { + cur = ggml_add(ctx, cur, down_b); + } + + return cur; +} + +// if max_alibi_bias > 0 then apply ALiBi +static struct ggml_tensor * llm_build_kqv( + const llama_context & lctx, + struct ggml_context * ctx, + struct ggml_tensor * cur, + struct ggml_tensor * wo, + struct ggml_tensor * wo_b, + struct ggml_tensor * q_cur, + struct ggml_tensor * kq_scale, + struct ggml_tensor * kq_mask, + int32_t n_tokens, + int32_t n_kv, + float alibi_bias_max, + const llm_build_cb & cb, + int il) { + const auto & model = lctx.model; + const auto & kv_self = lctx.kv_self; + const auto & cparams = lctx.cparams; + + const auto & hparams = model.hparams; + + const int64_t n_ctx = cparams.n_ctx; + const int64_t n_embd = hparams.n_embd; + const int64_t n_head = hparams.n_head; + const int64_t n_head_kv = hparams.n_head_kv; + const int64_t n_embd_head = hparams.n_embd_head(); + const int64_t n_embd_gqa = hparams.n_embd_gqa(); + + struct ggml_tensor * q = ggml_permute(ctx, q_cur, 0, 2, 1, 3); + cb(q, "q", il); + + struct ggml_tensor * k = + ggml_view_3d(ctx, kv_self.k, + n_embd_head, n_kv, n_head_kv, + ggml_element_size(kv_self.k)*n_embd_gqa, + ggml_element_size(kv_self.k)*n_embd_head, + ggml_element_size(kv_self.k)*n_embd_gqa*n_ctx*il); + cb(k, "k", il); + + struct ggml_tensor * kq = ggml_mul_mat(ctx, k, q); + cb(kq, "kq", il); + + kq = ggml_scale(ctx, kq, kq_scale); + cb(kq, "kq_scaled", il); + + if (alibi_bias_max > 0.0f) { + // TODO: n_head or n_head_kv + // TODO: K-shift is likely not working + // TODO: change to ggml_add + kq = ggml_alibi(ctx, kq, /*n_past*/ 0, n_head, alibi_bias_max); + cb(kq, "kq_scaled_alibi", il); + } + + kq = ggml_add(ctx, kq, kq_mask); + cb(kq, "kq_masked", il); + + kq = ggml_soft_max(ctx, kq); + cb(kq, "kq_soft_max", il); + + // split cached v into n_head heads + struct ggml_tensor * v = + ggml_view_3d(ctx, kv_self.v, + n_kv, n_embd_head, n_head_kv, + ggml_element_size(kv_self.v)*n_ctx, + ggml_element_size(kv_self.v)*n_ctx*n_embd_head, + ggml_element_size(kv_self.v)*n_ctx*n_embd_gqa*il); + cb(v, "v", il); + + struct ggml_tensor * kqv = ggml_mul_mat(ctx, v, kq); + cb(kqv, "kqv", il); + + struct ggml_tensor * kqv_merged = ggml_permute(ctx, kqv, 0, 2, 1, 3); + cb(kqv_merged, "kqv_merged", il); + + cur = ggml_cont_2d(ctx, kqv_merged, n_embd, n_tokens); + cb(cur, "kqv_merged_cont", il); + + cur = ggml_mul_mat(ctx, wo, cur); + if (wo_b) { + cb(cur, "kqv_wo", il); + } + + if (wo_b) { + cur = ggml_add(ctx, cur, wo_b); + } + + return cur; +} + static struct ggml_cgraph * llm_build_llama( - llama_context & lctx, - const llama_batch & batch) { + llama_context & lctx, + const llama_batch & batch, + const llm_build_cb & cb, + bool worst_case) { const auto & model = lctx.model; const auto & hparams = model.hparams; const auto & cparams = lctx.cparams; @@ -3106,7 +3457,6 @@ static struct ggml_cgraph * llm_build_llama( const int64_t n_head = hparams.n_head; const int64_t n_head_kv = hparams.n_head_kv; const int64_t n_embd_head = hparams.n_embd_head(); - const int64_t n_embd_gqa = hparams.n_embd_gqa(); GGML_ASSERT(n_embd_head == hparams.n_rot); @@ -3114,13 +3464,11 @@ static struct ggml_cgraph * llm_build_llama( const float freq_scale = cparams.rope_freq_scale; const float norm_rms_eps = hparams.f_norm_rms_eps; - const int n_gpu_layers = model.n_gpu_layers; - const int32_t n_tokens = batch.n_tokens; - const int32_t n_kv = ggml_allocr_is_measure(lctx.alloc) ? n_ctx : kv_self.n; - const int32_t kv_head = ggml_allocr_is_measure(lctx.alloc) ? n_ctx - n_tokens : kv_self.head; + const int32_t n_kv = worst_case ? n_ctx : kv_self.n; + const int32_t kv_head = worst_case ? n_ctx - n_tokens : kv_self.head; - const bool do_rope_shift = ggml_allocr_is_measure(lctx.alloc) || kv_self.has_shift; + const bool do_rope_shift = worst_case || kv_self.has_shift; //printf("n_kv = %d\n", n_kv); @@ -3139,314 +3487,81 @@ static struct ggml_cgraph * llm_build_llama( struct ggml_tensor * cur; struct ggml_tensor * inpL; - if (batch.token) { - struct ggml_tensor * inp_tokens = ggml_new_tensor_1d(ctx0, GGML_TYPE_I32, n_tokens); - - ggml_allocr_alloc(lctx.alloc, inp_tokens); - if (!ggml_allocr_is_measure(lctx.alloc)) { - memcpy(inp_tokens->data, batch.token, n_tokens*ggml_element_size(inp_tokens)); - } - ggml_set_name(inp_tokens, "inp_tokens"); - - inpL = ggml_get_rows(ctx0, model.tok_embeddings, inp_tokens); - } else { -#ifdef GGML_USE_MPI - GGML_ASSERT(false && "not implemented"); -#endif - - inpL = ggml_new_tensor_2d(ctx0, GGML_TYPE_F32, n_embd, n_tokens); - - ggml_allocr_alloc(lctx.alloc, inpL); - if (!ggml_allocr_is_measure(lctx.alloc)) { - memcpy(inpL->data, batch.embd, n_tokens * n_embd * ggml_element_size(inpL)); - } - } - - const int i_gpu_start = n_layer - n_gpu_layers; - (void) i_gpu_start; - - // offload functions set the tensor output backend to GPU - // tensors are GPU-accelerated if any input or the output has been offloaded - offload_func_t offload_func_nr = llama_nop; // nr = non-repeating - offload_func_t offload_func_kq = llama_nop; - offload_func_t offload_func_v = llama_nop; + inpL = llm_build_inp_embd(ctx0, batch, model.tok_embd, n_embd, n_tokens, cb); + cb(inpL, "inp_embd", -1); -#ifdef GGML_USE_CUBLAS - if (n_gpu_layers > n_layer) { - offload_func_nr = ggml_cuda_assign_buffers_no_alloc; - } - if (n_gpu_layers > n_layer + 1) { - offload_func_v = ggml_cuda_assign_buffers_no_alloc; - } - if (n_gpu_layers > n_layer + 2) { - offload_func_kq = ggml_cuda_assign_buffers_no_alloc; - } -#endif // GGML_USE_CUBLAS + // inp_pos - contains the positions + struct ggml_tensor * inp_pos = ggml_new_tensor_1d(ctx0, GGML_TYPE_I32, n_tokens); + cb(inp_pos, "inp_pos", -1); // KQ_scale struct ggml_tensor * KQ_scale = ggml_new_tensor_1d(ctx0, GGML_TYPE_F32, 1); - ggml_set_name(KQ_scale, "1/sqrt(n_embd_head)"); - ggml_allocr_alloc(lctx.alloc, KQ_scale); - if (!ggml_allocr_is_measure(lctx.alloc)) { - ggml_set_f32(KQ_scale, 1.0f/sqrtf(float(n_embd_head))); - } + cb(KQ_scale, "KQ_scale", -1); // KQ_mask (mask for 1 head, it will be broadcasted to all heads) struct ggml_tensor * KQ_mask = ggml_new_tensor_3d(ctx0, GGML_TYPE_F32, n_kv, n_tokens, 1); - offload_func_kq(KQ_mask); - ggml_set_name(KQ_mask, "KQ_mask"); - ggml_allocr_alloc(lctx.alloc, KQ_mask); - if (!ggml_allocr_is_measure(lctx.alloc)) { - float * data = (float *) KQ_mask->data; - memset(data, 0, ggml_nbytes(KQ_mask)); - - for (int h = 0; h < 1; ++h) { - for (int j = 0; j < n_tokens; ++j) { - const llama_pos pos = batch.pos[j]; - const llama_seq_id seq_id = batch.seq_id[j][0]; - - for (int i = 0; i < n_kv; ++i) { - if (!kv_self.cells[i].has_seq_id(seq_id) || kv_self.cells[i].pos > pos) { - data[h*(n_kv*n_tokens) + j*n_kv + i] = -INFINITY; - } - } - } - } - } - - // KQ_pos - contains the positions - struct ggml_tensor * KQ_pos = ggml_new_tensor_1d(ctx0, GGML_TYPE_I32, n_tokens); - offload_func_kq(KQ_pos); - ggml_set_name(KQ_pos, "KQ_pos"); - ggml_allocr_alloc(lctx.alloc, KQ_pos); - if (!ggml_allocr_is_measure(lctx.alloc)) { - int * data = (int *) KQ_pos->data; - for (int i = 0; i < n_tokens; ++i) { - data[i] = batch.pos[i]; - } - } + cb(KQ_mask, "KQ_mask", -1); // shift the entire K-cache if needed if (do_rope_shift) { - struct ggml_tensor * K_shift = ggml_new_tensor_1d(ctx0, GGML_TYPE_I32, n_ctx); - offload_func_kq(K_shift); - ggml_set_name(K_shift, "K_shift"); - ggml_allocr_alloc(lctx.alloc, K_shift); - if (!ggml_allocr_is_measure(lctx.alloc)) { - int * data = (int *) K_shift->data; - for (int i = 0; i < n_ctx; ++i) { - data[i] = kv_self.cells[i].delta; - } - } - - for (int il = 0; il < n_layer; ++il) { - struct ggml_tensor * tmp = - ggml_rope_custom_inplace(ctx0, - ggml_view_3d(ctx0, kv_self.k, - n_embd_head, n_head_kv, n_ctx, - ggml_element_size(kv_self.k)*n_embd_head, - ggml_element_size(kv_self.k)*n_embd_gqa, - ggml_element_size(kv_self.k)*n_embd_gqa*n_ctx*il), - K_shift, n_embd_head, 0, 0, freq_base, freq_scale); - offload_func_kq(tmp); - ggml_build_forward_expand(gf, tmp); - } + llm_build_k_shift(lctx, ctx0, gf, n_embd_head, LLM_ROPE, cb); } for (int il = 0; il < n_layer; ++il) { - ggml_format_name(inpL, "layer_inp_%d", il); - - offload_func_t offload_func = llama_nop; - -#ifdef GGML_USE_CUBLAS - if (il >= i_gpu_start) { - offload_func = ggml_cuda_assign_buffers_no_alloc; - } -#endif // GGML_USE_CUBLAS - struct ggml_tensor * inpSA = inpL; // norm - { - cur = ggml_rms_norm(ctx0, inpL, norm_rms_eps); - offload_func(cur); - ggml_set_name(cur, "rms_norm_0"); - - // cur = cur*attn_norm(broadcasted) - cur = ggml_mul(ctx0, cur, model.layers[il].attn_norm); - offload_func(cur); - ggml_set_name(cur, "attention_norm_0"); - } + cur = llm_build_norm(ctx0, inpL, + model.layers[il].attn_norm, NULL, + LLM_NORM_RMS, norm_rms_eps, cb, il); + cb(cur, "attn_norm", il); // self-attention { // compute Q and K and RoPE them - struct ggml_tensor * tmpk = ggml_mul_mat(ctx0, model.layers[il].wk, cur); - offload_func_kq(tmpk); - ggml_set_name(tmpk, "tmpk"); - - struct ggml_tensor * tmpq = ggml_mul_mat(ctx0, model.layers[il].wq, cur); - offload_func_kq(tmpq); - ggml_set_name(tmpq, "tmpq"); - - struct ggml_tensor * Kcur = ggml_rope_custom(ctx0, ggml_reshape_3d(ctx0, tmpk, n_embd_head, n_head_kv, n_tokens), KQ_pos, n_embd_head, 0, 0, freq_base, freq_scale); - offload_func_kq(Kcur); - ggml_set_name(Kcur, "Kcur"); - - struct ggml_tensor * Qcur = ggml_rope_custom(ctx0, ggml_reshape_3d(ctx0, tmpq, n_embd_head, n_head, n_tokens), KQ_pos, n_embd_head, 0, 0, freq_base, freq_scale); - offload_func_kq(Qcur); - ggml_set_name(Qcur, "Qcur"); - - // store key and value to memory - { - // compute the transposed [n_tokens, n_embd] V matrix - - struct ggml_tensor * tmpv = ggml_mul_mat(ctx0, model.layers[il].wv, cur); - offload_func_v(tmpv); - ggml_set_name(tmpv, "tmpv"); - - struct ggml_tensor * Vcur = ggml_transpose(ctx0, ggml_reshape_2d(ctx0, tmpv, n_embd_gqa, n_tokens)); - offload_func_v(Vcur); - ggml_set_name(Vcur, "Vcur"); - - struct ggml_tensor * k = ggml_view_1d(ctx0, kv_self.k, n_tokens*n_embd_gqa, (ggml_element_size(kv_self.k)*n_embd_gqa)*(il*n_ctx + kv_head)); - offload_func_kq(k); - ggml_set_name(k, "k"); - - struct ggml_tensor * v = ggml_view_2d(ctx0, kv_self.v, n_tokens, n_embd_gqa, - ( n_ctx)*ggml_element_size(kv_self.v), - (il*n_ctx)*ggml_element_size(kv_self.v)*n_embd_gqa + kv_head*ggml_element_size(kv_self.v)); - offload_func_v(v); - ggml_set_name(v, "v"); + struct ggml_tensor * Qcur = ggml_mul_mat(ctx0, model.layers[il].wq, cur); + cb(Qcur, "Qcur", il); - // important: storing RoPE-ed version of K in the KV cache! - ggml_build_forward_expand(gf, ggml_cpy(ctx0, Kcur, k)); - ggml_build_forward_expand(gf, ggml_cpy(ctx0, Vcur, v)); - } + struct ggml_tensor * Kcur = ggml_mul_mat(ctx0, model.layers[il].wk, cur); + cb(Kcur, "Kcur", il); - struct ggml_tensor * Q = ggml_permute(ctx0, Qcur, 0, 2, 1, 3); - offload_func_kq(Q); - ggml_set_name(Q, "Q"); + struct ggml_tensor * Vcur = ggml_mul_mat(ctx0, model.layers[il].wv, cur); + cb(Vcur, "Vcur", il); - struct ggml_tensor * K = - ggml_view_3d(ctx0, kv_self.k, - n_embd_head, n_kv, n_head_kv, - ggml_element_size(kv_self.k)*n_embd_gqa, - ggml_element_size(kv_self.k)*n_embd_head, - ggml_element_size(kv_self.k)*n_embd_gqa*n_ctx*il); - offload_func_kq(K); - ggml_set_name(K, "K"); - - // K * Q - struct ggml_tensor * KQ = ggml_mul_mat(ctx0, K, Q); - offload_func_kq(KQ); - ggml_set_name(KQ, "KQ"); - - // KQ_scaled = KQ / sqrt(n_embd_head) - // KQ_scaled shape [n_kv, n_tokens, n_head, 1] - struct ggml_tensor * KQ_scaled = ggml_scale(ctx0, KQ, KQ_scale); - offload_func_kq(KQ_scaled); - ggml_set_name(KQ_scaled, "KQ_scaled"); - - // KQ_masked = mask_past(KQ_scaled) - struct ggml_tensor * KQ_masked = ggml_add(ctx0, KQ_scaled, KQ_mask); - offload_func_kq(KQ_masked); - ggml_set_name(KQ_masked, "KQ_masked"); - - // KQ = soft_max(KQ_masked) - struct ggml_tensor * KQ_soft_max = ggml_soft_max(ctx0, KQ_masked); - offload_func_v(KQ_soft_max); - ggml_set_name(KQ_soft_max, "KQ_soft_max"); - - // split cached V into n_head heads - struct ggml_tensor * V = - ggml_view_3d(ctx0, kv_self.v, - n_kv, n_embd_head, n_head_kv, - ggml_element_size(kv_self.v)*n_ctx, - ggml_element_size(kv_self.v)*n_ctx*n_embd_head, - ggml_element_size(kv_self.v)*n_ctx*n_embd_gqa*il); - offload_func_v(V); - ggml_set_name(V, "V"); - -#if 1 - struct ggml_tensor * KQV = ggml_mul_mat(ctx0, V, KQ_soft_max); - offload_func_v(KQV); - ggml_set_name(KQV, "KQV"); -#else - // make V contiguous in memory to speed up the matmul, however we waste time on the copy - // on M1 this is faster for the perplexity computation, but ~5% slower for the single-token generation - // is there a better way? - struct ggml_tensor * V_cont = ggml_cpy(ctx0, V, ggml_new_tensor_3d(ctx0, kv_self.v->type, n_ctx, n_embd_head, n_head)); - struct ggml_tensor * KQV = ggml_mul_mat(ctx0, V_cont, KQ_soft_max); -#endif + Qcur = ggml_rope_custom(ctx0, ggml_reshape_3d(ctx0, Qcur, n_embd_head, n_head, n_tokens), inp_pos, n_embd_head, 0, 0, freq_base, freq_scale); + cb(Qcur, "Qcur", il); - // KQV_merged = KQV.permute(0, 2, 1, 3) - struct ggml_tensor * KQV_merged = ggml_permute(ctx0, KQV, 0, 2, 1, 3); - offload_func_v(KQV_merged); - ggml_set_name(KQV_merged, "KQV_merged"); + Kcur = ggml_rope_custom(ctx0, ggml_reshape_3d(ctx0, Kcur, n_embd_head, n_head_kv, n_tokens), inp_pos, n_embd_head, 0, 0, freq_base, freq_scale); + cb(Kcur, "Kcur", il); - // cur = KQV_merged.contiguous().view(n_embd, n_tokens) - cur = ggml_cont_2d(ctx0, KQV_merged, n_embd, n_tokens); - offload_func_v(cur); - ggml_set_name(cur, "KQV_merged_contiguous"); + llm_build_kv_store(lctx, ctx0, gf, Kcur, Vcur, n_tokens, kv_head, cb, il); - // projection (no bias) - cur = ggml_mul_mat(ctx0, - model.layers[il].wo, - cur); - offload_func(cur); - ggml_set_name(cur, "result_wo"); + cur = llm_build_kqv(lctx, ctx0, cur, + model.layers[il].wo, NULL, + Qcur, KQ_scale, KQ_mask, n_tokens, n_kv, -1.0f, cb, il); + cb(cur, "kqv_out", il); } - struct ggml_tensor * inpFF = ggml_add(ctx0, cur, inpSA); - offload_func(inpFF); - ggml_set_name(inpFF, "inpFF"); + struct ggml_tensor * ffn_inp = ggml_add(ctx0, cur, inpSA); + cb(ffn_inp, "ffn_inp", il); // feed-forward network { - // norm - { - cur = ggml_rms_norm(ctx0, inpFF, norm_rms_eps); - offload_func(cur); - ggml_set_name(cur, "rms_norm_1"); - - // cur = cur*ffn_norm(broadcasted) - cur = ggml_mul(ctx0, cur, model.layers[il].ffn_norm); - offload_func(cur); - ggml_set_name(cur, "ffn_norm"); - } - - struct ggml_tensor * tmp = ggml_mul_mat(ctx0, - model.layers[il].w3, - cur); - offload_func(tmp); - ggml_set_name(tmp, "result_w3"); + cur = llm_build_norm(ctx0, ffn_inp, + model.layers[il].ffn_norm, NULL, + LLM_NORM_RMS, norm_rms_eps, cb, il); + cb(cur, "ffn_norm", il); - cur = ggml_mul_mat(ctx0, - model.layers[il].w1, - cur); - offload_func(cur); - ggml_set_name(cur, "result_w1"); - - // SILU activation - cur = ggml_silu(ctx0, cur); - offload_func(cur); - ggml_set_name(cur, "silu"); - - cur = ggml_mul(ctx0, cur, tmp); - offload_func(cur); - ggml_set_name(cur, "silu_x_result_w3"); - - cur = ggml_mul_mat(ctx0, - model.layers[il].w2, - cur); - offload_func(cur); - ggml_set_name(cur, "result_w2"); + cur = llm_build_ffn(ctx0, cur, + model.layers[il].ffn_up, NULL, + model.layers[il].ffn_gate, NULL, + model.layers[il].ffn_down, NULL, + LLM_FFN_SILU, LLM_FFN_PAR, cb, il); + cb(cur, "ffn_out", il); } - cur = ggml_add(ctx0, cur, inpFF); - offload_func(cur); - ggml_set_name(cur, "inpFF_+_result_w2"); + cur = ggml_add(ctx0, cur, ffn_inp); + cb(cur, "l_out", il); // input for next layer inpL = cur; @@ -3454,21 +3569,14 @@ static struct ggml_cgraph * llm_build_llama( cur = inpL; - // norm - { - cur = ggml_rms_norm(ctx0, cur, norm_rms_eps); - offload_func_nr(cur); - ggml_set_name(cur, "rms_norm_2"); - - // cur = cur*norm(broadcasted) - cur = ggml_mul(ctx0, cur, model.output_norm); - // offload_func_nr(cur); // TODO CPU + GPU mirrored backend - ggml_set_name(cur, "result_norm"); - } + cur = llm_build_norm(ctx0, cur, + model.output_norm, NULL, + LLM_NORM_RMS, norm_rms_eps, cb, -1); + cb(cur, "result_norm", -1); // lm_head cur = ggml_mul_mat(ctx0, model.output, cur); - ggml_set_name(cur, "result_output"); + cb(cur, "result_output", -1); ggml_build_forward_expand(gf, cur); @@ -3479,7 +3587,9 @@ static struct ggml_cgraph * llm_build_llama( static struct ggml_cgraph * llm_build_baichaun( llama_context & lctx, - const llama_batch & batch) { + const llama_batch & batch, + const llm_build_cb & cb, + bool worst_case) { const auto & model = lctx.model; const auto & hparams = model.hparams; const auto & cparams = lctx.cparams; @@ -3494,7 +3604,6 @@ static struct ggml_cgraph * llm_build_baichaun( const int64_t n_head = hparams.n_head; const int64_t n_head_kv = hparams.n_head_kv; const int64_t n_embd_head = hparams.n_embd_head(); - const int64_t n_embd_gqa = hparams.n_embd_gqa(); GGML_ASSERT(n_embd_head == hparams.n_rot); @@ -3502,13 +3611,11 @@ static struct ggml_cgraph * llm_build_baichaun( const float freq_scale = cparams.rope_freq_scale; const float norm_rms_eps = hparams.f_norm_rms_eps; - const int n_gpu_layers = model.n_gpu_layers; - const int32_t n_tokens = batch.n_tokens; - const int32_t n_kv = ggml_allocr_is_measure(lctx.alloc) ? n_ctx : kv_self.n; - const int32_t kv_head = ggml_allocr_is_measure(lctx.alloc) ? n_ctx - n_tokens : kv_self.head; + const int32_t n_kv = worst_case ? n_ctx : kv_self.n; + const int32_t kv_head = worst_case ? n_ctx - n_tokens : kv_self.head; - const bool do_rope_shift = ggml_allocr_is_measure(lctx.alloc) || kv_self.has_shift; + const bool do_rope_shift = worst_case || kv_self.has_shift; auto & buf_compute = lctx.buf_compute; @@ -3525,331 +3632,91 @@ static struct ggml_cgraph * llm_build_baichaun( struct ggml_tensor * cur; struct ggml_tensor * inpL; - if (batch.token) { - struct ggml_tensor * inp_tokens = ggml_new_tensor_1d(ctx0, GGML_TYPE_I32, n_tokens); - - ggml_allocr_alloc(lctx.alloc, inp_tokens); - if (!ggml_allocr_is_measure(lctx.alloc)) { - memcpy(inp_tokens->data, batch.token, n_tokens*ggml_element_size(inp_tokens)); - } - ggml_set_name(inp_tokens, "inp_tokens"); - - inpL = ggml_get_rows(ctx0, model.tok_embeddings, inp_tokens); - } else { -#ifdef GGML_USE_MPI - GGML_ASSERT(false && "not implemented"); -#endif - - inpL = ggml_new_tensor_2d(ctx0, GGML_TYPE_F32, n_embd, n_tokens); - - ggml_allocr_alloc(lctx.alloc, inpL); - if (!ggml_allocr_is_measure(lctx.alloc)) { - memcpy(inpL->data, batch.embd, n_tokens * n_embd * ggml_element_size(inpL)); - } - } - - const int i_gpu_start = n_layer - n_gpu_layers; - (void) i_gpu_start; - - // offload functions set the tensor output backend to GPU - // tensors are GPU-accelerated if any input or the output has been offloaded - offload_func_t offload_func_nr = llama_nop; // nr = non-repeating - offload_func_t offload_func_kq = llama_nop; - offload_func_t offload_func_v = llama_nop; + inpL = llm_build_inp_embd(ctx0, batch, model.tok_embd, n_embd, n_tokens, cb); + cb(inpL, "inp_embd", -1); -#ifdef GGML_USE_CUBLAS - if (n_gpu_layers > n_layer) { - offload_func_nr = ggml_cuda_assign_buffers_no_alloc; - } - if (n_gpu_layers > n_layer + 1) { - offload_func_v = ggml_cuda_assign_buffers_no_alloc; - } - if (n_gpu_layers > n_layer + 2) { - offload_func_kq = ggml_cuda_assign_buffers_no_alloc; - } -#endif // GGML_USE_CUBLAS + // inp_pos - contains the positions + struct ggml_tensor * inp_pos = ggml_new_tensor_1d(ctx0, GGML_TYPE_I32, n_tokens); + cb(inp_pos, "inp_pos", -1); // KQ_scale struct ggml_tensor * KQ_scale = ggml_new_tensor_1d(ctx0, GGML_TYPE_F32, 1); - ggml_set_name(KQ_scale, "1/sqrt(n_embd_head)"); - ggml_allocr_alloc(lctx.alloc, KQ_scale); - if (!ggml_allocr_is_measure(lctx.alloc)) { - ggml_set_f32(KQ_scale, 1.0f/sqrtf(float(n_embd)/n_head)); - } + cb(KQ_scale, "KQ_scale", -1); // KQ_mask (mask for 1 head, it will be broadcasted to all heads) struct ggml_tensor * KQ_mask = ggml_new_tensor_3d(ctx0, GGML_TYPE_F32, n_kv, n_tokens, 1); - offload_func_kq(KQ_mask); - ggml_set_name(KQ_mask, "KQ_mask"); - ggml_allocr_alloc(lctx.alloc, KQ_mask); - if (!ggml_allocr_is_measure(lctx.alloc)) { - float * data = (float *) KQ_mask->data; - memset(data, 0, ggml_nbytes(KQ_mask)); - - for (int h = 0; h < 1; ++h) { - for (int j = 0; j < n_tokens; ++j) { - const llama_pos pos = batch.pos[j]; - const llama_seq_id seq_id = batch.seq_id[j][0]; - - for (int i = 0; i < n_kv; ++i) { - if (!kv_self.cells[i].has_seq_id(seq_id) || kv_self.cells[i].pos > pos) { - data[h*(n_kv*n_tokens) + j*n_kv + i] = -INFINITY; - } - } - } - } - } - - // KQ_pos - contains the positions - struct ggml_tensor * KQ_pos = ggml_new_tensor_1d(ctx0, GGML_TYPE_I32, n_tokens); - offload_func_kq(KQ_pos); - ggml_set_name(KQ_pos, "KQ_pos"); - ggml_allocr_alloc(lctx.alloc, KQ_pos); - if (!ggml_allocr_is_measure(lctx.alloc)) { - int * data = (int *) KQ_pos->data; - for (int i = 0; i < n_tokens; ++i) { - data[i] = batch.pos[i]; - } - } + cb(KQ_mask, "KQ_mask", -1); // shift the entire K-cache if needed if (do_rope_shift) { - struct ggml_tensor * K_shift = ggml_new_tensor_1d(ctx0, GGML_TYPE_I32, n_ctx); - offload_func_kq(K_shift); - ggml_set_name(K_shift, "K_shift"); - ggml_allocr_alloc(lctx.alloc, K_shift); - if (!ggml_allocr_is_measure(lctx.alloc)) { - int * data = (int *) K_shift->data; - for (int i = 0; i < n_ctx; ++i) { - data[i] = kv_self.cells[i].delta; - } - } - - for (int il = 0; il < n_layer; ++il) { - struct ggml_tensor * tmp = - ggml_rope_custom_inplace(ctx0, - ggml_view_3d(ctx0, kv_self.k, - n_embd_head, n_head_kv, n_ctx, - ggml_element_size(kv_self.k)*n_embd_head, - ggml_element_size(kv_self.k)*n_embd_gqa, - ggml_element_size(kv_self.k)*n_embd_gqa*n_ctx*il), - K_shift, n_embd_head, 0, 0, freq_base, freq_scale); - offload_func_kq(tmp); - ggml_build_forward_expand(gf, tmp); - } + llm_build_k_shift(lctx, ctx0, gf, n_embd_head, LLM_ROPE, cb); } for (int il = 0; il < n_layer; ++il) { - ggml_format_name(inpL, "layer_inp_%d", il); - - offload_func_t offload_func = llama_nop; - -#ifdef GGML_USE_CUBLAS - if (il >= i_gpu_start) { - offload_func = ggml_cuda_assign_buffers_no_alloc; - } -#endif // GGML_USE_CUBLAS - struct ggml_tensor * inpSA = inpL; - // norm - { - cur = ggml_rms_norm(ctx0, inpL, norm_rms_eps); - offload_func(cur); - ggml_set_name(cur, "rms_norm_0"); - - // cur = cur*attn_norm(broadcasted) - cur = ggml_mul(ctx0, cur, model.layers[il].attn_norm); - offload_func(cur); - ggml_set_name(cur, "attention_norm_0"); - } + cur = llm_build_norm(ctx0, inpL, + model.layers[il].attn_norm, NULL, + LLM_NORM_RMS, norm_rms_eps, cb, il); + cb(cur, "attn_norm", il); // self-attention { - // compute Q and K and RoPE them - struct ggml_tensor * tmpk = ggml_mul_mat(ctx0, model.layers[il].wk, cur); - offload_func_kq(tmpk); - ggml_set_name(tmpk, "tmpk"); + struct ggml_tensor * Qcur = ggml_mul_mat(ctx0, model.layers[il].wq, cur); + cb(Qcur, "Qcur", il); + + struct ggml_tensor * Kcur = ggml_mul_mat(ctx0, model.layers[il].wk, cur); + cb(Kcur, "Kcur", il); - struct ggml_tensor * tmpq = ggml_mul_mat(ctx0, model.layers[il].wq, cur); - offload_func_kq(tmpq); - ggml_set_name(tmpq, "tmpq"); + struct ggml_tensor * Vcur = ggml_mul_mat(ctx0, model.layers[il].wv, cur); + cb(Vcur, "Vcur", il); - struct ggml_tensor * Kcur; - struct ggml_tensor * Qcur; switch (model.type) { case MODEL_7B: - Kcur = ggml_rope_custom(ctx0, ggml_reshape_3d(ctx0, tmpk, n_embd_head, n_head_kv, n_tokens), KQ_pos, n_embd_head, 0, 0, freq_base, freq_scale); - Qcur = ggml_rope_custom(ctx0, ggml_reshape_3d(ctx0, tmpq, n_embd_head, n_head, n_tokens), KQ_pos, n_embd_head, 0, 0, freq_base, freq_scale); + Qcur = ggml_rope_custom(ctx0, ggml_reshape_3d(ctx0, Qcur, n_embd_head, n_head, n_tokens), inp_pos, n_embd_head, 0, 0, freq_base, freq_scale); + Kcur = ggml_rope_custom(ctx0, ggml_reshape_3d(ctx0, Kcur, n_embd_head, n_head_kv, n_tokens), inp_pos, n_embd_head, 0, 0, freq_base, freq_scale); break; case MODEL_13B: - Kcur = ggml_reshape_3d(ctx0, tmpk, n_embd/n_head, n_head, n_tokens); - Qcur = ggml_reshape_3d(ctx0, tmpq, n_embd/n_head, n_head, n_tokens); + Qcur = ggml_reshape_3d(ctx0, Qcur, n_embd/n_head, n_head, n_tokens); + Kcur = ggml_reshape_3d(ctx0, Kcur, n_embd/n_head, n_head, n_tokens); break; default: GGML_ASSERT(false); } + cb(Qcur, "Qcur", il); + cb(Kcur, "Kcur", il); - offload_func_kq(Kcur); - ggml_set_name(Kcur, "Kcur"); + llm_build_kv_store(lctx, ctx0, gf, Kcur, Vcur, n_tokens, kv_head, cb, il); - offload_func_kq(Qcur); - ggml_set_name(Qcur, "Qcur"); + // apply ALiBi for 13B model + const float alibi_bias_max = model.type == MODEL_13B ? 8.0f : -1.0f; - // store key and value to memory - { - // compute the transposed [n_tokens, n_embd] V matrix + cur = llm_build_kqv(lctx, ctx0, cur, + model.layers[il].wo, NULL, + Qcur, KQ_scale, KQ_mask, n_tokens, n_kv, alibi_bias_max, cb, il); + cb(cur, "kqv_out", il); + } - struct ggml_tensor * tmpv = ggml_mul_mat(ctx0, model.layers[il].wv, cur); - offload_func_v(tmpv); - ggml_set_name(tmpv, "tmpv"); - - struct ggml_tensor * Vcur = ggml_transpose(ctx0, ggml_reshape_2d(ctx0, tmpv, n_embd_gqa, n_tokens)); - offload_func_v(Vcur); - ggml_set_name(Vcur, "Vcur"); - - struct ggml_tensor * k = ggml_view_1d(ctx0, kv_self.k, n_tokens*n_embd_gqa, (ggml_element_size(kv_self.k)*n_embd_gqa)*(il*n_ctx + kv_head)); - offload_func_kq(k); - ggml_set_name(k, "k"); - - struct ggml_tensor * v = ggml_view_2d(ctx0, kv_self.v, n_tokens, n_embd_gqa, - ( n_ctx)*ggml_element_size(kv_self.v), - (il*n_ctx)*ggml_element_size(kv_self.v)*n_embd_gqa + kv_head*ggml_element_size(kv_self.v)); - offload_func_v(v); - ggml_set_name(v, "v"); - - // important: storing RoPE-ed version of K in the KV cache! - ggml_build_forward_expand(gf, ggml_cpy(ctx0, Kcur, k)); - ggml_build_forward_expand(gf, ggml_cpy(ctx0, Vcur, v)); - } - - struct ggml_tensor * Q = ggml_permute(ctx0, Qcur, 0, 2, 1, 3); - offload_func_kq(Q); - ggml_set_name(Q, "Q"); - - struct ggml_tensor * K = - ggml_view_3d(ctx0, kv_self.k, - n_embd_head, n_kv, n_head_kv, - ggml_element_size(kv_self.k)*n_embd_gqa, - ggml_element_size(kv_self.k)*n_embd_head, - ggml_element_size(kv_self.k)*n_embd_gqa*n_ctx*il); - offload_func_kq(K); - ggml_set_name(K, "K"); - - // K * Q - struct ggml_tensor * KQ = ggml_mul_mat(ctx0, K, Q); - offload_func_kq(KQ); - ggml_set_name(KQ, "KQ"); - - // KQ_scaled = KQ / sqrt(n_embd_head) - // KQ_scaled shape [n_past + n_tokens, n_tokens, n_head, 1] - struct ggml_tensor * KQ_scaled = ggml_scale(ctx0, KQ, KQ_scale); - offload_func_kq(KQ_scaled); - ggml_set_name(KQ_scaled, "KQ_scaled"); - - struct ggml_tensor * KQ_masked; - struct ggml_tensor * KQ_scaled_alibi; - - switch (model.type) { - case MODEL_7B: - KQ_masked = ggml_add(ctx0, KQ_scaled, KQ_mask); - break; - case MODEL_13B: - // TODO: replace with ggml_add() - KQ_scaled_alibi = ggml_alibi(ctx0, KQ_scaled, /*n_past*/ 0, n_head, 8); - ggml_set_name(KQ_scaled_alibi, "KQ_scaled_alibi"); - KQ_masked = ggml_add(ctx0, KQ_scaled_alibi, KQ_mask); - break; - default: - GGML_ASSERT(false); - } - - // KQ = soft_max(KQ_masked) - struct ggml_tensor * KQ_soft_max = ggml_soft_max(ctx0, KQ_masked); - offload_func_v(KQ_soft_max); - ggml_set_name(KQ_soft_max, "KQ_soft_max"); - - // split cached V into n_head heads - struct ggml_tensor * V = - ggml_view_3d(ctx0, kv_self.v, - n_kv, n_embd_head, n_head_kv, - ggml_element_size(kv_self.v)*n_ctx, - ggml_element_size(kv_self.v)*n_ctx*n_embd_head, - ggml_element_size(kv_self.v)*n_ctx*n_embd_gqa*il); - offload_func_v(V); - ggml_set_name(V, "V"); - - struct ggml_tensor * KQV = ggml_mul_mat(ctx0, V, KQ_soft_max); - offload_func_v(KQV); - ggml_set_name(KQV, "KQV"); - - // KQV_merged = KQV.permute(0, 2, 1, 3) - struct ggml_tensor * KQV_merged = ggml_permute(ctx0, KQV, 0, 2, 1, 3); - offload_func_v(KQV_merged); - ggml_set_name(KQV_merged, "KQV_merged"); - - // cur = KQV_merged.contiguous().view(n_embd, n_tokens) - cur = ggml_cont_2d(ctx0, KQV_merged, n_embd, n_tokens); - offload_func_v(cur); - ggml_set_name(cur, "KQV_merged_contiguous"); - - // projection (no bias) - cur = ggml_mul_mat(ctx0, - model.layers[il].wo, - cur); - offload_func(cur); - ggml_set_name(cur, "result_wo"); - } - - struct ggml_tensor * inpFF = ggml_add(ctx0, cur, inpSA); - offload_func(inpFF); - ggml_set_name(inpFF, "inpFF"); + struct ggml_tensor * ffn_inp = ggml_add(ctx0, cur, inpSA); + cb(ffn_inp, "ffn_inp", il); // feed-forward network { - // norm - { - cur = ggml_rms_norm(ctx0, inpFF, norm_rms_eps); - offload_func(cur); - ggml_set_name(cur, "rms_norm_1"); - - // cur = cur*ffn_norm(broadcasted) - cur = ggml_mul(ctx0, cur, model.layers[il].ffn_norm); - offload_func(cur); - ggml_set_name(cur, "ffn_norm"); - } + cur = llm_build_norm(ctx0, ffn_inp, + model.layers[il].ffn_norm, NULL, + LLM_NORM_RMS, norm_rms_eps, cb, il); + cb(cur, "ffn_norm", il); - struct ggml_tensor * tmp = ggml_mul_mat(ctx0, - model.layers[il].w3, - cur); - offload_func(tmp); - ggml_set_name(tmp, "result_w3"); - - cur = ggml_mul_mat(ctx0, - model.layers[il].w1, - cur); - offload_func(cur); - ggml_set_name(cur, "result_w1"); - - // SILU activation - cur = ggml_silu(ctx0, cur); - offload_func(cur); - ggml_set_name(cur, "silu"); - - cur = ggml_mul(ctx0, cur, tmp); - offload_func(cur); - ggml_set_name(cur, "silu_x_result_w3"); - - cur = ggml_mul_mat(ctx0, - model.layers[il].w2, - cur); - offload_func(cur); - ggml_set_name(cur, "result_w2"); + cur = llm_build_ffn(ctx0, cur, + model.layers[il].ffn_up, NULL, + model.layers[il].ffn_gate, NULL, + model.layers[il].ffn_down, NULL, + LLM_FFN_SILU, LLM_FFN_PAR, cb, il); + cb(cur, "ffn_out", il); } - cur = ggml_add(ctx0, cur, inpFF); - offload_func(cur); - ggml_set_name(cur, "inpFF_+_result_w2"); + cur = ggml_add(ctx0, cur, ffn_inp); + cb(cur, "l_out", il); // input for next layer inpL = cur; @@ -3857,21 +3724,14 @@ static struct ggml_cgraph * llm_build_baichaun( cur = inpL; - // norm - { - cur = ggml_rms_norm(ctx0, cur, norm_rms_eps); - offload_func_nr(cur); - ggml_set_name(cur, "rms_norm_2"); - - // cur = cur*norm(broadcasted) - cur = ggml_mul(ctx0, cur, model.output_norm); - // offload_func_nr(cur); // TODO CPU + GPU mirrored backend - ggml_set_name(cur, "result_norm"); - } + cur = llm_build_norm(ctx0, cur, + model.output_norm, NULL, + LLM_NORM_RMS, norm_rms_eps, cb, -1); + cb(cur, "result_norm", -1); // lm_head cur = ggml_mul_mat(ctx0, model.output, cur); - ggml_set_name(cur, "result_output"); + cb(cur, "result_output", -1); ggml_build_forward_expand(gf, cur); @@ -3880,9 +3740,11 @@ static struct ggml_cgraph * llm_build_baichaun( return gf; } -static struct ggml_cgraph * llm_build_refact( +static struct ggml_cgraph * llm_build_falcon( llama_context & lctx, - const llama_batch & batch) { + const llama_batch & batch, + const llm_build_cb & cb, + bool worst_case) { const auto & model = lctx.model; const auto & hparams = model.hparams; const auto & cparams = lctx.cparams; @@ -3899,15 +3761,20 @@ static struct ggml_cgraph * llm_build_refact( const int64_t n_embd_head = hparams.n_embd_head(); const int64_t n_embd_gqa = hparams.n_embd_gqa(); - const float norm_rms_eps = hparams.f_norm_rms_eps; + GGML_ASSERT(n_embd_head == hparams.n_rot); - const int n_gpu_layers = model.n_gpu_layers; + const float freq_base = cparams.rope_freq_base; + const float freq_scale = cparams.rope_freq_scale; + const float norm_eps = hparams.f_norm_eps; const int32_t n_tokens = batch.n_tokens; - const int32_t n_kv = ggml_allocr_is_measure(lctx.alloc) ? n_ctx : kv_self.n; - const int32_t kv_head = ggml_allocr_is_measure(lctx.alloc) ? n_ctx - n_tokens : kv_self.head; + const int32_t n_kv = worst_case ? n_ctx : kv_self.n; + const int32_t kv_head = worst_case ? n_ctx - n_tokens : kv_self.head; - // printf("n_kv = %d\n", n_kv); + const bool do_rope_shift = worst_case || kv_self.has_shift; + + //printf("kv_head = %d, n_kv = %d, n_tokens = %d, n_ctx = %d, is_measure = %d, has_shift = %d\n", + // kv_head, n_kv, n_tokens, n_ctx, ggml_allocr_is_measure(lctx.alloc), kv_self.has_shift); auto & buf_compute = lctx.buf_compute; @@ -3924,277 +3791,94 @@ static struct ggml_cgraph * llm_build_refact( struct ggml_tensor * cur; struct ggml_tensor * inpL; - if (batch.token) { - struct ggml_tensor * inp_tokens = ggml_new_tensor_1d(ctx0, GGML_TYPE_I32, n_tokens); - - ggml_allocr_alloc(lctx.alloc, inp_tokens); - if (!ggml_allocr_is_measure(lctx.alloc)) { - memcpy(inp_tokens->data, batch.token, n_tokens*ggml_element_size(inp_tokens)); - } - ggml_set_name(inp_tokens, "inp_tokens"); - - inpL = ggml_get_rows(ctx0, model.tok_embeddings, inp_tokens); - } else { -#ifdef GGML_USE_MPI - GGML_ASSERT(false && "not implemented"); -#endif - - inpL = ggml_new_tensor_2d(ctx0, GGML_TYPE_F32, n_embd, n_tokens); - - ggml_allocr_alloc(lctx.alloc, inpL); - if (!ggml_allocr_is_measure(lctx.alloc)) { - memcpy(inpL->data, batch.embd, n_tokens * n_embd * ggml_element_size(inpL)); - } - } - - const int i_gpu_start = n_layer - n_gpu_layers; - (void) i_gpu_start; + inpL = llm_build_inp_embd(ctx0, batch, model.tok_embd, n_embd, n_tokens, cb); + cb(inpL, "inp_embd", -1); - // offload functions set the tensor output backend to GPU - // tensors are GPU-accelerated if any input or the output has been offloaded - offload_func_t offload_func_nr = llama_nop; // nr = non-repeating - offload_func_t offload_func_kq = llama_nop; - offload_func_t offload_func_v = llama_nop; - -#ifdef GGML_USE_CUBLAS - if (n_gpu_layers > n_layer) { - offload_func_nr = ggml_cuda_assign_buffers_no_alloc; - } - if (n_gpu_layers > n_layer + 1) { - offload_func_v = ggml_cuda_assign_buffers_no_alloc; - } - if (n_gpu_layers > n_layer + 2) { - offload_func_kq = ggml_cuda_assign_buffers_no_alloc; - } -#endif // GGML_USE_CUBLAS + // inp_pos - contains the positions + struct ggml_tensor * inp_pos = ggml_new_tensor_1d(ctx0, GGML_TYPE_I32, n_tokens); + cb(inp_pos, "inp_pos", -1); // KQ_scale struct ggml_tensor * KQ_scale = ggml_new_tensor_1d(ctx0, GGML_TYPE_F32, 1); - ggml_set_name(KQ_scale, "1/sqrt(n_embd_head)"); - ggml_allocr_alloc(lctx.alloc, KQ_scale); - if (!ggml_allocr_is_measure(lctx.alloc)) { - ggml_set_f32(KQ_scale, 1.0f/sqrtf(float(n_embd_head))); - } + cb(KQ_scale, "KQ_scale", -1); // KQ_mask (mask for 1 head, it will be broadcasted to all heads) struct ggml_tensor * KQ_mask = ggml_new_tensor_3d(ctx0, GGML_TYPE_F32, n_kv, n_tokens, 1); - offload_func_kq(KQ_mask); - ggml_set_name(KQ_mask, "KQ_mask"); - ggml_allocr_alloc(lctx.alloc, KQ_mask); - if (!ggml_allocr_is_measure(lctx.alloc)) { - float * data = (float *) KQ_mask->data; - memset(data, 0, ggml_nbytes(KQ_mask)); - - for (int h = 0; h < 1; ++h) { - for (int j = 0; j < n_tokens; ++j) { - const llama_pos pos = batch.pos[j]; - const llama_seq_id seq_id = batch.seq_id[j][0]; - - for (int i = 0; i < n_kv; ++i) { - if (!kv_self.cells[i].has_seq_id(seq_id) || kv_self.cells[i].pos > pos) { - data[h*(n_kv*n_tokens) + j*n_kv + i] = -INFINITY; - } - } - } - } + cb(KQ_mask, "KQ_mask", -1); + + // shift the entire K-cache if needed + if (do_rope_shift) { + llm_build_k_shift(lctx, ctx0, gf, n_embd_head, LLM_ROPE_NEOX, cb); } for (int il = 0; il < n_layer; ++il) { - ggml_format_name(inpL, "layer_inp_%d", il); - - offload_func_t offload_func = llama_nop; - -#ifdef GGML_USE_CUBLAS - if (il >= i_gpu_start) { - offload_func = ggml_cuda_assign_buffers_no_alloc; - } -#endif // GGML_USE_CUBLAS - - struct ggml_tensor * inpSA = inpL; - - // norm - { - cur = ggml_rms_norm(ctx0, inpL, norm_rms_eps); - offload_func(cur); - ggml_set_name(cur, "rms_norm_0"); + struct ggml_tensor * attn_norm; - // cur = cur*attn_norm(broadcasted) - cur = ggml_mul(ctx0, cur, model.layers[il].attn_norm); - offload_func(cur); - ggml_set_name(cur, "attention_norm_0"); - } + attn_norm = llm_build_norm(ctx0, inpL, + model.layers[il].attn_norm, + model.layers[il].attn_norm_b, + LLM_NORM, norm_eps, cb, il); + cb(attn_norm, "attn_norm", il); // self-attention { - // compute Q and K - struct ggml_tensor * tmpk = ggml_mul_mat(ctx0, model.layers[il].wk, cur); - offload_func_kq(tmpk); - ggml_set_name(tmpk, "tmpk"); - - struct ggml_tensor * tmpq = ggml_mul_mat(ctx0, model.layers[il].wq, cur); - offload_func_kq(tmpq); - ggml_set_name(tmpq, "tmpq"); - - struct ggml_tensor * Kcur = ggml_reshape_3d(ctx0, tmpk, n_embd_head, n_head_kv, n_tokens); - offload_func_kq(Kcur); - ggml_set_name(Kcur, "Kcur"); - - struct ggml_tensor * Qcur = ggml_reshape_3d(ctx0, tmpq, n_embd_head, n_head, n_tokens); - offload_func_kq(Qcur); - ggml_set_name(Qcur, "Qcur"); - - // store key and value to memory - { - // compute the transposed [n_tokens, n_embd] V matrix - - struct ggml_tensor * tmpv = ggml_mul_mat(ctx0, model.layers[il].wv, cur); - offload_func_v(tmpv); - ggml_set_name(tmpv, "tmpv"); - - struct ggml_tensor * Vcur = ggml_transpose(ctx0, ggml_reshape_2d(ctx0, tmpv, n_embd_gqa, n_tokens)); - offload_func_v(Vcur); - ggml_set_name(Vcur, "Vcur"); + if (model.layers[il].attn_norm_2) { + // Falcon-40B + cur = llm_build_norm(ctx0, attn_norm, + model.layers[il].attn_norm_2, + model.layers[il].attn_norm_2_b, + LLM_NORM, norm_eps, cb, il); + cb(cur, "attn_norm_2", il); + } else { + cur = attn_norm; + } - struct ggml_tensor * k = ggml_view_1d(ctx0, kv_self.k, n_tokens*n_embd_gqa, (ggml_element_size(kv_self.k)*n_embd_gqa)*(il*n_ctx + kv_head)); - offload_func_kq(k); - ggml_set_name(k, "k"); + cur = ggml_mul_mat(ctx0, model.layers[il].wqkv, cur); + cb(cur, "wqkv", il); - struct ggml_tensor * v = ggml_view_2d(ctx0, kv_self.v, n_tokens, n_embd_gqa, - ( n_ctx)*ggml_element_size(kv_self.v), - (il*n_ctx)*ggml_element_size(kv_self.v)*n_embd_gqa + kv_head*ggml_element_size(kv_self.v)); - offload_func_v(v); - ggml_set_name(v, "v"); + struct ggml_tensor * Qcur = ggml_cont(ctx0, ggml_view_2d(ctx0, cur, n_embd, n_tokens, cur->nb[1], 0*sizeof(float)*(n_embd))); + struct ggml_tensor * Kcur = ggml_cont(ctx0, ggml_view_2d(ctx0, cur, n_embd_gqa, n_tokens, cur->nb[1], 1*sizeof(float)*(n_embd))); + struct ggml_tensor * Vcur = ggml_cont(ctx0, ggml_view_2d(ctx0, cur, n_embd_gqa, n_tokens, cur->nb[1], 1*sizeof(float)*(n_embd + n_embd_gqa))); - ggml_build_forward_expand(gf, ggml_cpy(ctx0, Kcur, k)); - ggml_build_forward_expand(gf, ggml_cpy(ctx0, Vcur, v)); - } + cb(Qcur, "Qcur", il); + cb(Kcur, "Kcur", il); + cb(Vcur, "Vcur", il); - struct ggml_tensor * Q = ggml_permute(ctx0, Qcur, 0, 2, 1, 3); - offload_func_kq(Q); - ggml_set_name(Q, "Q"); + Qcur = ggml_reshape_3d(ctx0, Qcur, n_embd_head, n_head, n_tokens); + Kcur = ggml_reshape_3d(ctx0, Kcur, n_embd_head, n_head_kv, n_tokens); - struct ggml_tensor * K = - ggml_view_3d(ctx0, kv_self.k, - n_embd_head, n_kv, n_head_kv, - ggml_element_size(kv_self.k)*n_embd_gqa, - ggml_element_size(kv_self.k)*n_embd_head, - ggml_element_size(kv_self.k)*n_embd_gqa*n_ctx*il); - offload_func_kq(K); - ggml_set_name(K, "K"); - - // K * Q - struct ggml_tensor * KQ = ggml_mul_mat(ctx0, K, Q); - offload_func_kq(KQ); - ggml_set_name(KQ, "KQ"); - - // KQ_scaled = KQ / sqrt(n_embd_head) - // KQ_scaled shape [n_kv, n_tokens, n_head, 1] - struct ggml_tensor * KQ_scaled = ggml_scale(ctx0, KQ, KQ_scale); - offload_func_kq(KQ_scaled); - ggml_set_name(KQ_scaled, "KQ_scaled"); - - // KQ_masked = mask_past(KQ_scaled) - struct ggml_tensor * KQ_scaled_alibi = ggml_alibi(ctx0, KQ_scaled, /*n_past*/ 0, n_head, 8); - ggml_set_name(KQ_scaled_alibi, "KQ_scaled_alibi"); - - struct ggml_tensor * KQ_masked = ggml_add(ctx0, KQ_scaled_alibi, KQ_mask); - offload_func_kq(KQ_masked); - ggml_set_name(KQ_masked, "KQ_masked"); - - // KQ = soft_max(KQ_masked) - struct ggml_tensor * KQ_soft_max = ggml_soft_max(ctx0, KQ_masked); - offload_func_v(KQ_soft_max); - ggml_set_name(KQ_soft_max, "KQ_soft_max"); - - // split cached V into n_head heads - struct ggml_tensor * V = - ggml_view_3d(ctx0, kv_self.v, - n_kv, n_embd_head, n_head_kv, - ggml_element_size(kv_self.v)*n_ctx, - ggml_element_size(kv_self.v)*n_ctx*n_embd_head, - ggml_element_size(kv_self.v)*n_ctx*n_embd_gqa*il); - offload_func_v(V); - ggml_set_name(V, "V"); - -#if 1 - struct ggml_tensor * KQV = ggml_mul_mat(ctx0, V, KQ_soft_max); - offload_func_v(KQV); - ggml_set_name(KQV, "KQV"); -#else - // make V contiguous in memory to speed up the matmul, however we waste time on the copy - // on M1 this is faster for the perplexity computation, but ~5% slower for the single-token generation - // is there a better way? - struct ggml_tensor * V_cont = ggml_cpy(ctx0, V, ggml_new_tensor_3d(ctx0, kv_self.v->type, n_ctx, n_embd_head, n_head)); - struct ggml_tensor * KQV = ggml_mul_mat(ctx0, V_cont, KQ_soft_max); -#endif + // using mode = 2 for neox mode + Qcur = ggml_rope_custom(ctx0, Qcur, inp_pos, n_embd_head, 2, 0, freq_base, freq_scale); + cb(Qcur, "Qcur", il); - // KQV_merged = KQV.permute(0, 2, 1, 3) - struct ggml_tensor * KQV_merged = ggml_permute(ctx0, KQV, 0, 2, 1, 3); - offload_func_v(KQV_merged); - ggml_set_name(KQV_merged, "KQV_merged"); + Kcur = ggml_rope_custom(ctx0, Kcur, inp_pos, n_embd_head, 2, 0, freq_base, freq_scale); + cb(Kcur, "Kcur", il); - // cur = KQV_merged.contiguous().view(n_embd, n_tokens) - cur = ggml_cont_2d(ctx0, KQV_merged, n_embd, n_tokens); - offload_func_v(cur); - ggml_set_name(cur, "KQV_merged_contiguous"); + llm_build_kv_store(lctx, ctx0, gf, Kcur, Vcur, n_tokens, kv_head, cb, il); - // projection (no bias) - cur = ggml_mul_mat(ctx0, - model.layers[il].wo, - cur); - offload_func(cur); - ggml_set_name(cur, "result_wo"); + cur = llm_build_kqv(lctx, ctx0, attn_norm, + model.layers[il].wo, NULL, + Qcur, KQ_scale, KQ_mask, n_tokens, n_kv, -1.0f, cb, il); + cb(cur, "kqv_out", il); } - struct ggml_tensor * inpFF = ggml_add(ctx0, cur, inpSA); - offload_func(inpFF); - ggml_set_name(inpFF, "inpFF"); + struct ggml_tensor * ffn_inp = cur; - // feed-forward network + // feed forward { - // norm - { - cur = ggml_rms_norm(ctx0, inpFF, norm_rms_eps); - offload_func(cur); - ggml_set_name(cur, "rms_norm_1"); - - // cur = cur*ffn_norm(broadcasted) - cur = ggml_mul(ctx0, cur, model.layers[il].ffn_norm); - offload_func(cur); - ggml_set_name(cur, "ffn_norm"); - } - - struct ggml_tensor * tmp = ggml_mul_mat(ctx0, - model.layers[il].w3, - cur); - offload_func(tmp); - ggml_set_name(tmp, "result_w3"); - - cur = ggml_mul_mat(ctx0, - model.layers[il].w1, - cur); - offload_func(cur); - ggml_set_name(cur, "result_w1"); - - // SILU activation - cur = ggml_silu(ctx0, cur); - offload_func(cur); - ggml_set_name(cur, "silu"); - - cur = ggml_mul(ctx0, cur, tmp); - offload_func(cur); - ggml_set_name(cur, "silu_x_result_w3"); - - cur = ggml_mul_mat(ctx0, - model.layers[il].w2, - cur); - offload_func(cur); - ggml_set_name(cur, "result_w2"); + cur = llm_build_ffn(ctx0, attn_norm, // !! use the attn norm, not the result + model.layers[il].ffn_up, NULL, + NULL, NULL, + model.layers[il].ffn_down, NULL, + LLM_FFN_GELU, LLM_FFN_SEQ, cb, il); + cb(cur, "ffn_out", il); } - cur = ggml_add(ctx0, cur, inpFF); - offload_func(cur); - ggml_set_name(cur, "inpFF_+_result_w2"); + cur = ggml_add(ctx0, cur, ffn_inp); + cb(cur, "l_out", il); + + cur = ggml_add(ctx0, cur, inpL); + cb(cur, "l_out", il); // input for next layer inpL = cur; @@ -4203,20 +3887,14 @@ static struct ggml_cgraph * llm_build_refact( cur = inpL; // norm - { - cur = ggml_rms_norm(ctx0, cur, norm_rms_eps); - offload_func_nr(cur); - ggml_set_name(cur, "rms_norm_2"); - - // cur = cur*norm(broadcasted) - cur = ggml_mul(ctx0, cur, model.output_norm); - // offload_func_nr(cur); // TODO CPU + GPU mirrored backend - ggml_set_name(cur, "result_norm"); - } + cur = llm_build_norm(ctx0, cur, + model.output_norm, + model.output_norm_b, + LLM_NORM, norm_eps, cb, -1); + cb(cur, "result_norm", -1); - // lm_head cur = ggml_mul_mat(ctx0, model.output, cur); - ggml_set_name(cur, "result_output"); + cb(cur, "result_output", -1); ggml_build_forward_expand(gf, cur); @@ -4225,9 +3903,11 @@ static struct ggml_cgraph * llm_build_refact( return gf; } -static struct ggml_cgraph * llm_build_falcon( +static struct ggml_cgraph * llm_build_starcoder( llama_context & lctx, - const llama_batch & batch) { + const llama_batch & batch, + const llm_build_cb & cb, + bool worst_case) { const auto & model = lctx.model; const auto & hparams = model.hparams; const auto & cparams = lctx.cparams; @@ -4240,26 +3920,16 @@ static struct ggml_cgraph * llm_build_falcon( const int64_t n_layer = hparams.n_layer; const int64_t n_ctx = cparams.n_ctx; const int64_t n_head = hparams.n_head; - const int64_t n_head_kv = hparams.n_head_kv; const int64_t n_embd_head = hparams.n_embd_head(); const int64_t n_embd_gqa = hparams.n_embd_gqa(); GGML_ASSERT(n_embd_head == hparams.n_rot); - const float freq_base = cparams.rope_freq_base; - const float freq_scale = cparams.rope_freq_scale; - const float norm_eps = hparams.f_norm_eps; - - const int n_gpu_layers = model.n_gpu_layers; + const float norm_eps = hparams.f_norm_eps; const int32_t n_tokens = batch.n_tokens; - const int32_t n_kv = ggml_allocr_is_measure(lctx.alloc) ? n_ctx : kv_self.n; - const int32_t kv_head = ggml_allocr_is_measure(lctx.alloc) ? n_ctx - n_tokens : kv_self.head; - - const bool do_rope_shift = ggml_allocr_is_measure(lctx.alloc) || kv_self.has_shift; - - //printf("kv_head = %d, n_kv = %d, n_tokens = %d, n_ctx = %d, is_measure = %d, has_shift = %d\n", - // kv_head, n_kv, n_tokens, n_ctx, ggml_allocr_is_measure(lctx.alloc), kv_self.has_shift); + const int32_t n_kv = worst_case ? n_ctx : kv_self.n; + const int32_t kv_head = worst_case ? n_ctx - n_tokens : kv_self.head; auto & buf_compute = lctx.buf_compute; @@ -4274,352 +3944,133 @@ static struct ggml_cgraph * llm_build_falcon( ggml_cgraph * gf = ggml_new_graph(ctx0); struct ggml_tensor * cur; + struct ggml_tensor * pos; struct ggml_tensor * inpL; - if (batch.token) { - struct ggml_tensor * inp_tokens = ggml_new_tensor_1d(ctx0, GGML_TYPE_I32, n_tokens); - - ggml_allocr_alloc(lctx.alloc, inp_tokens); - if (!ggml_allocr_is_measure(lctx.alloc)) { - memcpy(inp_tokens->data, batch.token, n_tokens*ggml_element_size(inp_tokens)); - } - ggml_set_name(inp_tokens, "inp_tokens"); - - inpL = ggml_get_rows(ctx0, model.tok_embeddings, inp_tokens); - } else { -#ifdef GGML_USE_MPI - GGML_ASSERT(false && "not implemented"); -#endif - - inpL = ggml_new_tensor_2d(ctx0, GGML_TYPE_F32, n_embd, n_tokens); + inpL = llm_build_inp_embd(ctx0, batch, model.tok_embd, n_embd, n_tokens, cb); + cb(inpL, "inp_embd", -1); - ggml_allocr_alloc(lctx.alloc, inpL); - if (!ggml_allocr_is_measure(lctx.alloc)) { - memcpy(inpL->data, batch.embd, n_tokens * n_embd * ggml_element_size(inpL)); - } - } - - const int i_gpu_start = n_layer - n_gpu_layers; - (void) i_gpu_start; - - // offload functions set the tensor output backend to GPU - // tensors are GPU-accelerated if any input or the output has been offloaded - offload_func_t offload_func_nr = llama_nop; // nr = non-repeating - offload_func_t offload_func_kq = llama_nop; - offload_func_t offload_func_v = llama_nop; - -#ifdef GGML_USE_CUBLAS - if (n_gpu_layers > n_layer) { - offload_func_nr = ggml_cuda_assign_buffers_no_alloc; - } - if (n_gpu_layers > n_layer + 1) { - offload_func_v = ggml_cuda_assign_buffers_no_alloc; - } - if (n_gpu_layers > n_layer + 2) { - offload_func_kq = ggml_cuda_assign_buffers_no_alloc; - } -#endif // GGML_USE_CUBLAS + // inp_pos - contains the positions + struct ggml_tensor * inp_pos = ggml_new_tensor_1d(ctx0, GGML_TYPE_I32, n_tokens); + cb(inp_pos, "inp_pos", -1); // KQ_scale struct ggml_tensor * KQ_scale = ggml_new_tensor_1d(ctx0, GGML_TYPE_F32, 1); - ggml_set_name(KQ_scale, "1/sqrt(n_embd_head)"); - ggml_allocr_alloc(lctx.alloc, KQ_scale); - if (!ggml_allocr_is_measure(lctx.alloc)) { - ggml_set_f32(KQ_scale, 1.0f/sqrtf(float(n_embd)/n_head)); - } + cb(KQ_scale, "KQ_scale", -1); // KQ_mask (mask for 1 head, it will be broadcasted to all heads) struct ggml_tensor * KQ_mask = ggml_new_tensor_3d(ctx0, GGML_TYPE_F32, n_kv, n_tokens, 1); - offload_func_kq(KQ_mask); - ggml_set_name(KQ_mask, "KQ_mask"); - ggml_allocr_alloc(lctx.alloc, KQ_mask); - if (!ggml_allocr_is_measure(lctx.alloc)) { - float * data = (float *) KQ_mask->data; - memset(data, 0, ggml_nbytes(KQ_mask)); - - for (int h = 0; h < 1; ++h) { - for (int j = 0; j < n_tokens; ++j) { - const llama_pos pos = batch.pos[j]; - const llama_seq_id seq_id = batch.seq_id[j][0]; - - for (int i = 0; i < n_kv; ++i) { - if (!kv_self.cells[i].has_seq_id(seq_id) || kv_self.cells[i].pos > pos) { - data[h*(n_kv*n_tokens) + j*n_kv + i] = -INFINITY; - } - } - } - } - } + cb(KQ_mask, "KQ_mask", -1); - // KQ_pos - contains the positions - struct ggml_tensor * KQ_pos = ggml_new_tensor_1d(ctx0, GGML_TYPE_I32, n_tokens); - offload_func_kq(KQ_pos); - ggml_set_name(KQ_pos, "KQ_pos"); - ggml_allocr_alloc(lctx.alloc, KQ_pos); - if (!ggml_allocr_is_measure(lctx.alloc)) { - int * data = (int *) KQ_pos->data; - for (int i = 0; i < n_tokens; ++i) { - data[i] = batch.pos[i]; - } - } - - // shift the entire K-cache if needed - if (do_rope_shift) { - struct ggml_tensor * K_shift = ggml_new_tensor_1d(ctx0, GGML_TYPE_I32, n_ctx); - offload_func_kq(K_shift); - ggml_set_name(K_shift, "K_shift"); - ggml_allocr_alloc(lctx.alloc, K_shift); - if (!ggml_allocr_is_measure(lctx.alloc)) { - int * data = (int *) K_shift->data; - for (int i = 0; i < n_ctx; ++i) { - data[i] = kv_self.cells[i].delta; - } - } + pos = ggml_get_rows(ctx0, model.pos_embd, inp_pos); + cb(pos, "pos_embd", -1); - for (int il = 0; il < n_layer; ++il) { - struct ggml_tensor * tmp = - ggml_rope_custom_inplace(ctx0, - ggml_view_3d(ctx0, kv_self.k, - n_embd_head, n_head_kv, n_ctx, - ggml_element_size(kv_self.k)*n_embd_head, - ggml_element_size(kv_self.k)*n_embd_gqa, - ggml_element_size(kv_self.k)*n_embd_gqa*n_ctx*il), - K_shift, n_embd_head, 2, 0, freq_base, freq_scale); - offload_func_kq(tmp); - ggml_build_forward_expand(gf, tmp); - } - } + inpL = ggml_add(ctx0, inpL, pos); + cb(inpL, "inpL", -1); for (int il = 0; il < n_layer; ++il) { - struct ggml_tensor * attn_norm; - - offload_func_t offload_func = llama_nop; - -#ifdef GGML_USE_CUBLAS - if (il >= i_gpu_start) { - offload_func = ggml_cuda_assign_buffers_no_alloc; - } -#endif // GGML_USE_CUBLAS + cur = llm_build_norm(ctx0, inpL, + model.layers[il].attn_norm, + model.layers[il].attn_norm_b, + LLM_NORM, norm_eps, cb, il); + cb(cur, "attn_norm", il); // self-attention - // TODO: refactor into common function (shared with LLaMA) { - attn_norm = ggml_norm(ctx0, inpL, norm_eps); - offload_func(attn_norm); - - attn_norm = ggml_add(ctx0, - ggml_mul(ctx0, attn_norm, model.layers[il].attn_norm), - model.layers[il].attn_norm_b); - offload_func(attn_norm->src[0]); - offload_func(attn_norm); - - if (model.layers[il].attn_norm_2) { // Falcon-40B - cur = ggml_norm(ctx0, inpL, norm_eps); - offload_func(cur); - - cur = ggml_add(ctx0, - ggml_mul(ctx0, cur, model.layers[il].attn_norm_2), - model.layers[il].attn_norm_2_b); - offload_func(cur->src[0]); - offload_func(cur); - } else { // Falcon 7B - cur = attn_norm; - } - - // compute QKV - cur = ggml_mul_mat(ctx0, model.layers[il].wqkv, cur); - offload_func_kq(cur); - - // Note that the strides for Kcur, Vcur are set up so that the - // resulting views are misaligned with the tensor's storage - // (by applying the K/V offset we shift the tensor's original - // view to stick out behind the viewed QKV tensor's allocated - // memory, so to say). This is ok because no actual accesses - // happen to that out-of-range memory, but it can require some - // trickery when trying to accurately dump these views for - // debugging. - - const size_t wsize = ggml_type_size(cur->type); - - // TODO: these 2 ggml_conts are technically not needed, but we add them until CUDA support for - // non-contiguous views is added for the rope operator - struct ggml_tensor * tmpq = ggml_cont(ctx0, ggml_view_3d( - ctx0, cur, n_embd_head, n_head, n_tokens, - wsize * n_embd_head, - wsize * n_embd_head * (n_head + 2 * n_head_kv), - 0)); - offload_func_kq(tmpq); - - struct ggml_tensor * tmpk = ggml_cont(ctx0, ggml_view_3d( - ctx0, cur, n_embd_head, n_head_kv, n_tokens, - wsize * n_embd_head, - wsize * n_embd_head * (n_head + 2 * n_head_kv), - wsize * n_embd_head * n_head)); - offload_func_kq(tmpk); - - struct ggml_tensor * tmpv = ggml_view_3d( - ctx0, cur, n_embd_head, n_head_kv, n_tokens, - wsize * n_embd_head, - wsize * n_embd_head * (n_head + 2 * n_head_kv), - wsize * n_embd_head * (n_head + n_head_kv)); - offload_func_v(tmpv); - - // using mode = 2 for neox mode - struct ggml_tensor * Qcur = ggml_rope_custom(ctx0, tmpq, KQ_pos, n_embd_head, 2, 0, freq_base, freq_scale); - offload_func_kq(Qcur); - struct ggml_tensor * Kcur = ggml_rope_custom(ctx0, tmpk, KQ_pos, n_embd_head, 2, 0, freq_base, freq_scale); - offload_func_kq(Kcur); - - { - struct ggml_tensor * Vcur = ggml_transpose(ctx0, ggml_reshape_2d(ctx0, ggml_cont(ctx0, tmpv), n_embd_gqa, n_tokens)); - offload_func_v(Vcur); - offload_func_v(Vcur->src[0]->src[0]); - ggml_set_name(Vcur, "Vcur"); - - struct ggml_tensor * k = ggml_view_1d(ctx0, kv_self.k, n_tokens*n_embd_gqa, (ggml_element_size(kv_self.k)*n_embd_gqa)*(il*n_ctx + kv_head)); - offload_func_kq(k); - ggml_set_name(k, "k"); - - struct ggml_tensor * v = ggml_view_2d(ctx0, kv_self.v, n_tokens, n_embd_gqa, - ( n_ctx)*ggml_element_size(kv_self.v), - (il*n_ctx)*ggml_element_size(kv_self.v)*n_embd_gqa + kv_head*ggml_element_size(kv_self.v)); - offload_func_v(v); - - ggml_build_forward_expand(gf, ggml_cpy(ctx0, Kcur, k)); - ggml_build_forward_expand(gf, ggml_cpy(ctx0, Vcur, v)); - } + cb(cur, "wqkv", il); - struct ggml_tensor * Q = ggml_permute(ctx0, Qcur, 0, 2, 1, 3); - offload_func_kq(Q); - ggml_set_name(Q, "Q"); - - struct ggml_tensor * K = - ggml_view_3d(ctx0, kv_self.k, - n_embd_head, n_kv, n_head_kv, - ggml_element_size(kv_self.k)*n_embd_gqa, - ggml_element_size(kv_self.k)*n_embd_head, - ggml_element_size(kv_self.k)*n_embd_gqa*n_ctx*il); - offload_func_kq(K); - ggml_set_name(K, "K"); - - struct ggml_tensor * KQ = ggml_mul_mat(ctx0, K, Q); - offload_func_kq(KQ); - ggml_set_name(KQ, "KQ"); - - struct ggml_tensor * KQ_scaled = ggml_scale(ctx0, KQ, KQ_scale); - offload_func_kq(KQ_scaled); - ggml_set_name(KQ_scaled, "KQ_scaled"); - - struct ggml_tensor * KQ_masked = ggml_add(ctx0, KQ_scaled, KQ_mask); - offload_func_kq(KQ_masked); - ggml_set_name(KQ_masked, "KQ_masked"); - - struct ggml_tensor * KQ_soft_max = ggml_soft_max(ctx0, KQ_masked); - offload_func_v(KQ_soft_max); - ggml_set_name(KQ_soft_max, "KQ_soft_max"); + cur = ggml_add(ctx0, cur, model.layers[il].bqkv); + cb(cur, "bqkv", il); - struct ggml_tensor * V = - ggml_view_3d(ctx0, kv_self.v, - n_kv, n_embd_head, n_head_kv, - ggml_element_size(kv_self.v)*n_ctx, - ggml_element_size(kv_self.v)*n_ctx*n_embd_head, - ggml_element_size(kv_self.v)*n_ctx*n_embd_gqa*il); - offload_func_v(V); - ggml_set_name(V, "V"); + struct ggml_tensor * Qcur = ggml_cont(ctx0, ggml_view_2d(ctx0, cur, n_embd, n_tokens, cur->nb[1], 0*sizeof(float)*(n_embd))); + struct ggml_tensor * Kcur = ggml_cont(ctx0, ggml_view_2d(ctx0, cur, n_embd_gqa, n_tokens, cur->nb[1], 1*sizeof(float)*(n_embd))); + struct ggml_tensor * Vcur = ggml_cont(ctx0, ggml_view_2d(ctx0, cur, n_embd_gqa, n_tokens, cur->nb[1], 1*sizeof(float)*(n_embd + n_embd_gqa))); - struct ggml_tensor * KQV = ggml_mul_mat(ctx0, V, KQ_soft_max); - offload_func_v(KQV); - ggml_set_name(KQV, "KQV"); + cb(Qcur, "Qcur", il); + cb(Kcur, "Kcur", il); + cb(Vcur, "Vcur", il); - struct ggml_tensor * KQV_merged = ggml_permute(ctx0, KQV, 0, 2, 1, 3); - offload_func_v(KQV_merged); - ggml_set_name(KQV_merged, "KQV_merged"); + Qcur = ggml_reshape_3d(ctx0, Qcur, n_embd_head, n_head, n_tokens); - cur = ggml_cont_2d(ctx0, KQV_merged, n_embd, n_tokens); - offload_func_v(cur); - ggml_set_name(cur, "KQV_merged_contiguous"); + llm_build_kv_store(lctx, ctx0, gf, Kcur, Vcur, n_tokens, kv_head, cb, il); - cur = ggml_mul_mat(ctx0, model.layers[il].wo, cur); - offload_func(cur); - ggml_set_name(cur, "result_wo"); + cur = llm_build_kqv(lctx, ctx0, cur, + model.layers[il].wo, model.layers[il].bo, + Qcur, KQ_scale, KQ_mask, n_tokens, n_kv, -1.0f, cb, il); + cb(cur, "kqv_out", il); } - struct ggml_tensor * attn_out = cur; + // add the input + struct ggml_tensor * ffn_inp = ggml_add(ctx0, cur, inpL); + cb(ffn_inp, "ffn_inp", il); - // feed forward + // FF { - struct ggml_tensor * inpFF = attn_norm; + cur = llm_build_norm(ctx0, ffn_inp, + model.layers[il].ffn_norm, + model.layers[il].ffn_norm_b, + LLM_NORM, norm_eps, cb, il); + cb(cur, "ffn_norm", il); - cur = ggml_mul_mat(ctx0, model.layers[il].w3, inpFF); - offload_func(cur); - - cur = ggml_gelu(ctx0, cur); - offload_func(cur); - cur = ggml_mul_mat(ctx0, model.layers[il].w2, cur); - offload_func(cur); + cur = llm_build_ffn(ctx0, cur, + model.layers[il].ffn_up, model.layers[il].ffn_up_b, + NULL, NULL, + model.layers[il].ffn_down, model.layers[il].ffn_down_b, + LLM_FFN_GELU, LLM_FFN_SEQ, cb, il); + cb(cur, "ffn_out", il); } - cur = ggml_add(ctx0, cur, attn_out); - offload_func(cur); - cur = ggml_add(ctx0, cur, inpL); - offload_func(cur); - - // input for next layer - inpL = cur; + inpL = ggml_add(ctx0, cur, ffn_inp); + cb(inpL, "l_out", il); } - cur = inpL; - - // norm - { - cur = ggml_norm(ctx0, cur, norm_eps); - offload_func_nr(cur); - - cur = ggml_add(ctx0, - ggml_mul(ctx0, cur, model.output_norm), - model.output_norm_b); - ggml_set_name(cur, "result_norm"); - } + cur = llm_build_norm(ctx0, inpL, + model.output_norm, + model.output_norm_b, + LLM_NORM, norm_eps, cb, -1); + cb(cur, "result_norm", -1); cur = ggml_mul_mat(ctx0, model.output, cur); - ggml_set_name(cur, "result_output"); + cb(cur, "result_output", -1); ggml_build_forward_expand(gf, cur); - ggml_free(ctx0); return gf; } -static struct ggml_cgraph * llm_build_starcoder( +static struct ggml_cgraph * llm_build_persimmon( llama_context & lctx, - const llama_batch & batch) { - const auto & model = lctx.model; + const llama_batch & batch, + const llm_build_cb & cb, + bool worst_case) { + const auto & model = lctx.model; const auto & hparams = model.hparams; - const auto & cparams = lctx.cparams; const auto & kv_self = lctx.kv_self; GGML_ASSERT(!!kv_self.ctx); + const auto & cparams = lctx.cparams; + const int64_t n_embd = hparams.n_embd; const int64_t n_layer = hparams.n_layer; const int64_t n_ctx = cparams.n_ctx; - const int64_t n_head = hparams.n_head; const int64_t n_head_kv = hparams.n_head_kv; + const int64_t n_head = hparams.n_head; const int64_t n_embd_head = hparams.n_embd_head(); - const int64_t n_embd_gqa = hparams.n_embd_gqa(); - - GGML_ASSERT(n_embd_head == hparams.n_rot); + const int64_t n_rot = n_embd_head / 2; - const float norm_eps = hparams.f_norm_eps; + const float freq_base = cparams.rope_freq_base; + const float freq_scale = cparams.rope_freq_scale; + const float norm_eps = hparams.f_norm_eps; - const int n_gpu_layers = model.n_gpu_layers; + const int32_t n_tokens = batch.n_tokens; + const int32_t n_kv = worst_case ? n_ctx : kv_self.n; + const int32_t kv_head = worst_case ? n_ctx - n_tokens : kv_self.head; - const int32_t n_tokens = batch.n_tokens; - const int32_t n_kv = ggml_allocr_is_measure(lctx.alloc) ? n_ctx : kv_self.n; - const int32_t kv_head = ggml_allocr_is_measure(lctx.alloc) ? n_ctx - n_tokens : kv_self.head; + const bool do_rope_shift = worst_case || kv_self.has_shift; auto & buf_compute = lctx.buf_compute; @@ -4634,313 +4085,376 @@ static struct ggml_cgraph * llm_build_starcoder( ggml_cgraph * gf = ggml_new_graph(ctx0); struct ggml_tensor * cur; - struct ggml_tensor * token; - struct ggml_tensor * position; struct ggml_tensor * inpL; - if (batch.token) { - struct ggml_tensor * inp_tokens = ggml_new_tensor_1d(ctx0, GGML_TYPE_I32, n_tokens); - - ggml_allocr_alloc(lctx.alloc, inp_tokens); - if (!ggml_allocr_is_measure(lctx.alloc)) { - memcpy(inp_tokens->data, batch.token, n_tokens*ggml_element_size(inp_tokens)); - } - ggml_set_name(inp_tokens, "inp_tokens"); - - token = ggml_get_rows(ctx0, model.tok_embeddings, inp_tokens); - } else { -#ifdef GGML_USE_MPI - GGML_ASSERT(false && "not implemented"); -#endif - - token = ggml_new_tensor_2d(ctx0, GGML_TYPE_F32, n_embd, n_tokens); - - ggml_allocr_alloc(lctx.alloc, token); - if (!ggml_allocr_is_measure(lctx.alloc)) { - memcpy(token->data, batch.embd, n_tokens * n_embd * ggml_element_size(token)); - } - } - - const int i_gpu_start = n_layer - n_gpu_layers; - (void) i_gpu_start; - - // offload functions set the tensor output backend to GPU - // tensors are GPU-accelerated if any input or the output has been offloaded - offload_func_t offload_func_nr = llama_nop; // nr = non-repeating - offload_func_t offload_func_kq = llama_nop; - offload_func_t offload_func_v = llama_nop; - -#ifdef GGML_USE_CUBLAS - if (n_gpu_layers > n_layer) { - offload_func_nr = ggml_cuda_assign_buffers_no_alloc; - } - if (n_gpu_layers > n_layer + 1) { - offload_func_v = ggml_cuda_assign_buffers_no_alloc; - } - if (n_gpu_layers > n_layer + 2) { - offload_func_kq = ggml_cuda_assign_buffers_no_alloc; - } -#endif // GGML_USE_CUBLAS - - { - // Compute position embeddings. - struct ggml_tensor * inp_positions = ggml_new_tensor_1d(ctx0, GGML_TYPE_I32, n_tokens); - ggml_allocr_alloc(lctx.alloc, inp_positions); - if (!ggml_allocr_is_measure(lctx.alloc)) { - for (int i = 0; i < n_tokens; ++i) { - ((int32_t *) inp_positions->data)[i] = batch.pos[i]; - } - } - ggml_set_name(inp_positions, "inp_positions"); + inpL = llm_build_inp_embd(ctx0, batch, model.tok_embd, n_embd, n_tokens, cb); + cb(inpL, "imp_embd", -1); - position = ggml_get_rows(ctx0, model.pos_embeddings, inp_positions); - } + struct ggml_tensor * inp_pos = ggml_new_tensor_1d(ctx0, GGML_TYPE_I32, n_tokens); + cb(inp_pos, "inp_pos", -1); // KQ_scale struct ggml_tensor * KQ_scale = ggml_new_tensor_1d(ctx0, GGML_TYPE_F32, 1); - ggml_set_name(KQ_scale, "1/sqrt(n_embd_head)"); - ggml_allocr_alloc(lctx.alloc, KQ_scale); - if (!ggml_allocr_is_measure(lctx.alloc)) { - ggml_set_f32(KQ_scale, 1.0f/sqrtf(float(n_embd)/n_head)); - } + cb(KQ_scale, "KQ_scale", -1); - // KQ_mask (mask for 1 head, it will be broadcasted to all heads) struct ggml_tensor * KQ_mask = ggml_new_tensor_3d(ctx0, GGML_TYPE_F32, n_kv, n_tokens, 1); - ggml_set_name(KQ_mask, "KQ_mask"); - offload_func_kq(KQ_mask); - ggml_allocr_alloc(lctx.alloc, KQ_mask); - if (!ggml_allocr_is_measure(lctx.alloc)) { - float * data = (float *) KQ_mask->data; - memset(data, 0, ggml_nbytes(KQ_mask)); - - for (int h = 0; h < 1; ++h) { - for (int j = 0; j < n_tokens; ++j) { - const llama_pos pos = batch.pos[j]; - const llama_seq_id seq_id = batch.seq_id[j][0]; - - for (int i = 0; i < n_kv; ++i) { - if (!kv_self.cells[i].has_seq_id(seq_id) || kv_self.cells[i].pos > pos) { - data[h*(n_kv*n_tokens) + j*n_kv + i] = -INFINITY; - } - } - } - } - } + cb(KQ_mask, "KQ_mask", -1); - inpL = ggml_add(ctx0, token, position); - ggml_set_name(inpL, "inpL"); + if (do_rope_shift) { + llm_build_k_shift(lctx, ctx0, gf, n_rot, LLM_ROPE_NEOX, cb); + } for (int il = 0; il < n_layer; ++il) { - offload_func_t offload_func = llama_nop; + struct ggml_tensor * residual = inpL; -#ifdef GGML_USE_CUBLAS - if (il >= i_gpu_start) { - offload_func = ggml_cuda_assign_buffers_no_alloc; - } -#endif // GGML_USE_CUBLAS + cur = llm_build_norm(ctx0, inpL, + model.layers[il].attn_norm, + model.layers[il].attn_norm_b, + LLM_NORM, norm_eps, cb, il); + cb(cur, "attn_norm", il); + // self attention { - // Norm - cur = ggml_norm(ctx0, inpL, norm_eps); - offload_func(cur); + cur = ggml_mul_mat(ctx0, model.layers[il].wqkv, cur); + cb(cur, "wqkv", il); + + cur = ggml_add(ctx0, cur, model.layers[il].bqkv); + cb(cur, "bqkv", il); + + // split qkv + GGML_ASSERT(n_head_kv == n_head); + + struct ggml_tensor * tmpqkv = ggml_reshape_4d(ctx0, cur, n_embd_head, 3, n_head, n_tokens); + cb(tmpqkv, "tmpqkv", il); + + struct ggml_tensor * tmpqkv_perm = ggml_cont(ctx0, ggml_permute(ctx0, tmpqkv, 0, 3, 1, 2)); + cb(tmpqkv_perm, "tmpqkv", il); + + struct ggml_tensor * tmpq = ggml_view_3d( + ctx0, tmpqkv_perm, n_embd_head, n_head, n_tokens, + ggml_element_size(tmpqkv_perm) * n_embd_head, + ggml_element_size(tmpqkv_perm) * n_embd_head * n_head, + 0 + ); + cb(tmpq, "tmpq", il); + + struct ggml_tensor * tmpk = ggml_view_3d( + ctx0, tmpqkv_perm, n_embd_head, n_head, n_tokens, + ggml_element_size(tmpqkv_perm) * n_embd_head, + ggml_element_size(tmpqkv_perm) * n_embd_head * n_head, + ggml_element_size(tmpqkv_perm) * n_embd_head * n_head * n_tokens + ); + cb(tmpk, "tmpk", il); + + // Q/K Layernorm + tmpq = llm_build_norm(ctx0, tmpq, + model.layers[il].attn_q_norm, + model.layers[il].attn_q_norm_b, + LLM_NORM, norm_eps, cb, il); + cb(tmpq, "tmpq", il); + + tmpk = llm_build_norm(ctx0, tmpk, + model.layers[il].attn_k_norm, + model.layers[il].attn_k_norm_b, + LLM_NORM, norm_eps, cb, il); + cb(tmpk, "tmpk", il); + + // RoPE the first n_rot of q/k, pass the other half, and concat. + struct ggml_tensor * qrot = ggml_view_3d( + ctx0, tmpq, n_rot, n_head, n_tokens, + ggml_element_size(tmpq) * n_embd_head, + ggml_element_size(tmpq) * n_embd_head * n_head, + 0 + ); + cb(qrot, "qrot", il); + + struct ggml_tensor * krot = ggml_view_3d( + ctx0, tmpk, n_rot, n_head, n_tokens, + ggml_element_size(tmpk) * n_embd_head, + ggml_element_size(tmpk) * n_embd_head * n_head, + 0 + ); + cb(krot, "krot", il); + + // get the second half of tmpq, e.g tmpq[n_rot:, :, :] + struct ggml_tensor * qpass = ggml_view_3d( + ctx0, tmpq, n_rot, n_head, n_tokens, + ggml_element_size(tmpq) * n_embd_head, + ggml_element_size(tmpq) * n_embd_head * n_head, + ggml_element_size(tmpq) * n_rot + ); + cb(qpass, "qpass", il); + + struct ggml_tensor * kpass = ggml_view_3d( + ctx0, tmpk, n_rot, n_head, n_tokens, + ggml_element_size(tmpk) * n_embd_head, + ggml_element_size(tmpk) * n_embd_head * n_head, + ggml_element_size(tmpk) * n_rot + ); + cb(kpass, "kpass", il); + + struct ggml_tensor * qrotated = ggml_rope_custom( + ctx0, qrot, inp_pos, n_rot, 2, 0, freq_base, freq_scale + ); + cb(qrotated, "qrotated", il); + + struct ggml_tensor * krotated = ggml_rope_custom( + ctx0, krot, inp_pos, n_rot, 2, 0, freq_base, freq_scale + ); + cb(krotated, "krotated", il); + + // ggml currently only supports concatenation on dim=2 + // so we need to permute qrot, qpass, concat, then permute back. + qrotated = ggml_cont(ctx0, ggml_permute(ctx0, qrotated, 2, 1, 0, 3)); + cb(qrotated, "qrotated", il); + + krotated = ggml_cont(ctx0, ggml_permute(ctx0, krotated, 2, 1, 0, 3)); + cb(krotated, "krotated", il); + + qpass = ggml_cont(ctx0, ggml_permute(ctx0, qpass, 2, 1, 0, 3)); + cb(qpass, "qpass", il); + + kpass = ggml_cont(ctx0, ggml_permute(ctx0, kpass, 2, 1, 0, 3)); + cb(kpass, "kpass", il); + + struct ggml_tensor * Qcur = ggml_concat(ctx0, qrotated, qpass); + cb(Qcur, "Qcur", il); - cur = ggml_add(ctx0, ggml_mul(ctx0, cur, model.layers[il].attn_norm), model.layers[il].attn_norm_b); - offload_func(cur); + struct ggml_tensor * Kcur = ggml_concat(ctx0, krotated, kpass); + cb(Kcur, "Kcur", il); + + struct ggml_tensor * Q = ggml_cont(ctx0, ggml_permute(ctx0, Qcur, 1, 2, 0, 3)); + cb(Q, "Q", il); + + Kcur = ggml_cont(ctx0, ggml_permute(ctx0, Kcur, 2, 1, 0, 3)); + cb(Kcur, "Kcur", il); + + struct ggml_tensor * Vcur = ggml_view_3d( + ctx0, tmpqkv_perm, n_embd_head, n_head, n_tokens, + ggml_element_size(tmpqkv_perm) * n_embd_head, + ggml_element_size(tmpqkv_perm) * n_embd_head * n_head, + ggml_element_size(tmpqkv_perm) * n_embd_head * n_head * n_tokens * 2 + ); + cb(Vcur, "Vcur", il); + + llm_build_kv_store(lctx, ctx0, gf, Kcur, Vcur, n_tokens, kv_head, cb, il); + + // TODO: not tested, could be broken + cur = llm_build_kqv(lctx, ctx0, Q, + model.layers[il].wo, model.layers[il].bo, + Q, KQ_scale, KQ_mask, n_tokens, n_kv, -1.0f, cb, il); + cb(cur, "kqv_out", il); } + struct ggml_tensor * ffn_inp = ggml_add(ctx0, residual, cur); + cb(ffn_inp, "ffn_inp", il); + + // feed-forward network { - // Self Attention - cur = ggml_mul_mat(ctx0, model.layers[il].wqkv, cur); - offload_func_kq(cur); + cur = llm_build_norm(ctx0, ffn_inp, + model.layers[il].ffn_norm, + model.layers[il].ffn_norm_b, + LLM_NORM, norm_eps, cb, il); + cb(cur, "ffn_norm", il); - cur = ggml_add(ctx0, cur, model.layers[il].bqkv); - offload_func_kq(cur); + cur = llm_build_ffn(ctx0, cur, + model.layers[il].ffn_up, model.layers[il].ffn_up_b, + NULL, NULL, + model.layers[il].ffn_down, model.layers[il].ffn_down_b, + LLM_FFN_RELU_SQR, LLM_FFN_SEQ, cb, il); + cb(cur, "ffn_out", il); + } + + cur = ggml_add(ctx0, cur, ffn_inp); + cb(cur, "l_out", il); - struct ggml_tensor * tmpq = ggml_cont(ctx0, ggml_view_2d(ctx0, cur, n_embd, n_tokens, cur->nb[1], 0*sizeof(float)*(n_embd))); - struct ggml_tensor * tmpk = ggml_cont(ctx0, ggml_view_2d(ctx0, cur, n_embd_gqa, n_tokens, cur->nb[1], 1*sizeof(float)*(n_embd))); - struct ggml_tensor * tmpv = ggml_cont(ctx0, ggml_view_2d(ctx0, cur, n_embd_gqa, n_tokens, cur->nb[1], 1*sizeof(float)*(n_embd + n_embd_gqa))); + inpL = cur; + } - ggml_set_name(tmpq, "tmpq"); - ggml_set_name(tmpk, "tmpk"); - ggml_set_name(tmpv, "tmpv"); + cur = inpL; - offload_func_kq(tmpq); - offload_func_kq(tmpk); - offload_func_v (tmpv); + cur = llm_build_norm(ctx0, cur, + model.output_norm, + model.output_norm_b, + LLM_NORM, norm_eps, cb, -1); + cb(cur, "result_norm", -1); - struct ggml_tensor * Qcur = ggml_reshape_3d(ctx0, tmpq, n_embd_head, n_head, n_tokens); - struct ggml_tensor * Kcur = tmpk; + cur = ggml_mul_mat(ctx0, model.output, cur); + cb(cur, "result_output", -1); - { - struct ggml_tensor * Vcur = ggml_transpose(ctx0, tmpv); - offload_func_v(Vcur); - ggml_set_name(Vcur, "Vcur"); - - struct ggml_tensor * k = ggml_view_1d(ctx0, kv_self.k, n_tokens*n_embd_gqa, (ggml_element_size(kv_self.k)*n_embd_gqa)*(il*n_ctx + kv_head)); - offload_func_kq(k); - ggml_set_name(k, "k"); - - struct ggml_tensor * v = ggml_view_2d(ctx0, kv_self.v, n_tokens, n_embd_gqa, - ( n_ctx)*ggml_element_size(kv_self.v), - (il*n_ctx)*ggml_element_size(kv_self.v)*n_embd_gqa + kv_head*ggml_element_size(kv_self.v)); - offload_func_v(v); - ggml_set_name(v, "v"); - - ggml_build_forward_expand(gf, ggml_cpy(ctx0, Kcur, k)); - ggml_build_forward_expand(gf, ggml_cpy(ctx0, Vcur, v)); - } + ggml_build_forward_expand(gf, cur); - struct ggml_tensor * Q = ggml_permute(ctx0, Qcur, 0, 2, 1, 3); - offload_func_kq(Q); - ggml_set_name(Q, "Q"); + ggml_free(ctx0); - struct ggml_tensor * K = - ggml_view_3d(ctx0, kv_self.k, - n_embd_head, n_kv, n_head_kv, - ggml_element_size(kv_self.k)*n_embd_gqa, - ggml_element_size(kv_self.k)*n_embd_head, - ggml_element_size(kv_self.k)*n_embd_gqa*n_ctx*il); - offload_func_kq(K); - ggml_set_name(K, "K"); - - // K * Q - struct ggml_tensor * KQ = ggml_mul_mat(ctx0, K, Q); - offload_func_kq(KQ); - ggml_set_name(KQ, "KQ"); - - // KQ_scaled = KQ / sqrt(n_embd_head) - // KQ_scaled shape [n_past + n_tokens, n_tokens, n_head, 1] - struct ggml_tensor * KQ_scaled = ggml_scale_inplace(ctx0, KQ, KQ_scale); - offload_func_kq(KQ_scaled); - ggml_set_name(KQ_scaled, "KQ_scaled"); - - // KQ_masked = mask_past(KQ_scaled) - struct ggml_tensor * KQ_masked = ggml_add(ctx0, KQ_scaled, KQ_mask); - offload_func_kq(KQ_masked); - ggml_set_name(KQ_masked, "KQ_masked"); - - // KQ = soft_max(KQ_masked) - struct ggml_tensor * KQ_soft_max = ggml_soft_max_inplace(ctx0, KQ_masked); - offload_func_v(KQ_soft_max); - ggml_set_name(KQ_soft_max, "KQ_soft_max"); - - // split cached V into n_head heads - struct ggml_tensor * V = - ggml_view_3d(ctx0, kv_self.v, - n_kv, n_embd_head, n_head_kv, - ggml_element_size(kv_self.v)*n_ctx, - ggml_element_size(kv_self.v)*n_ctx*n_embd_head, - ggml_element_size(kv_self.v)*n_ctx*n_embd_gqa*il); - ggml_set_name(V, "V"); - - struct ggml_tensor * KQV = ggml_mul_mat(ctx0, V, KQ_soft_max); - offload_func_v(KQV); - ggml_set_name(KQV, "KQV"); - - struct ggml_tensor * KQV_merged = ggml_permute(ctx0, KQV, 0, 2, 1, 3); - offload_func_v(KQV_merged); - ggml_set_name(KQV_merged, "KQV_merged"); - - cur = ggml_cont_2d(ctx0, KQV_merged, n_embd, n_tokens); - offload_func_v(cur); - ggml_set_name(cur, "KQV_merged_contiguous"); - } - - // Projection - cur = ggml_add(ctx0, ggml_mul_mat(ctx0, model.layers[il].wo, cur), model.layers[il].bo); - offload_func(cur); + return gf; +} - // Add the input - cur = ggml_add(ctx0, cur, inpL); - offload_func(cur); +static struct ggml_cgraph * llm_build_refact( + llama_context & lctx, + const llama_batch & batch, + const llm_build_cb & cb, + bool worst_case) { + const auto & model = lctx.model; + const auto & hparams = model.hparams; + const auto & cparams = lctx.cparams; - struct ggml_tensor * inpFF = cur; + const auto & kv_self = lctx.kv_self; - // FF + GGML_ASSERT(!!kv_self.ctx); + + const int64_t n_embd = hparams.n_embd; + const int64_t n_layer = hparams.n_layer; + const int64_t n_ctx = cparams.n_ctx; + const int64_t n_head = hparams.n_head; + const int64_t n_head_kv = hparams.n_head_kv; + const int64_t n_embd_head = hparams.n_embd_head(); + + const float norm_rms_eps = hparams.f_norm_rms_eps; + + const int32_t n_tokens = batch.n_tokens; + const int32_t n_kv = worst_case ? n_ctx : kv_self.n; + const int32_t kv_head = worst_case ? n_ctx - n_tokens : kv_self.head; + + auto & buf_compute = lctx.buf_compute; + + struct ggml_init_params params = { + /*.mem_size =*/ buf_compute.size, + /*.mem_buffer =*/ buf_compute.data, + /*.no_alloc =*/ true, + }; + + struct ggml_context * ctx0 = ggml_init(params); + + ggml_cgraph * gf = ggml_new_graph(ctx0); + + struct ggml_tensor * cur; + struct ggml_tensor * inpL; + + inpL = llm_build_inp_embd(ctx0, batch, model.tok_embd, n_embd, n_tokens, cb); + cb(inpL, "inp_embd", -1); + + // KQ_scale + struct ggml_tensor * KQ_scale = ggml_new_tensor_1d(ctx0, GGML_TYPE_F32, 1); + cb(KQ_scale, "KQ_scale", -1); + + // KQ_mask (mask for 1 head, it will be broadcasted to all heads) + struct ggml_tensor * KQ_mask = ggml_new_tensor_3d(ctx0, GGML_TYPE_F32, n_kv, n_tokens, 1); + cb(KQ_mask, "KQ_mask", -1); + + for (int il = 0; il < n_layer; ++il) { + struct ggml_tensor * inpSA = inpL; + + cur = llm_build_norm(ctx0, inpL, + model.layers[il].attn_norm, NULL, + LLM_NORM_RMS, norm_rms_eps, cb, il); + cb(cur, "attn_norm", il); + + // self-attention { - // Norm - { - cur = ggml_norm(ctx0, inpFF, norm_eps); - offload_func_nr(cur); + struct ggml_tensor * Qcur = ggml_mul_mat(ctx0, model.layers[il].wq, cur); + cb(Qcur, "Qcur", il); - cur = ggml_add(ctx0, ggml_mul(ctx0, cur, model.layers[il].ffn_norm), model.layers[il].ffn_norm_b); - offload_func_nr(cur); - } + struct ggml_tensor * Kcur = ggml_mul_mat(ctx0, model.layers[il].wk, cur); + cb(Kcur, "Kcur", il); + + struct ggml_tensor * Vcur = ggml_mul_mat(ctx0, model.layers[il].wv, cur); + cb(Vcur, "Vcur", il); - cur = ggml_add(ctx0, ggml_mul_mat(ctx0, model.layers[il].w3, cur), model.layers[il].b3); - offload_func(cur); + Kcur = ggml_reshape_3d(ctx0, Kcur, n_embd_head, n_head_kv, n_tokens); + cb(Kcur, "Kcur", il); - // GELU activation - cur = ggml_gelu(ctx0, cur); - offload_func(cur); + Qcur = ggml_reshape_3d(ctx0, Qcur, n_embd_head, n_head, n_tokens); + cb(Qcur, "Qcur", il); - // Projection - cur = ggml_add(ctx0, ggml_mul_mat(ctx0, model.layers[il].w2, cur), model.layers[il].b2); - offload_func(cur); + llm_build_kv_store(lctx, ctx0, gf, Kcur, Vcur, n_tokens, kv_head, cb, il); + + cur = llm_build_kqv(lctx, ctx0, Qcur, + model.layers[il].wo, NULL, + Qcur, KQ_scale, KQ_mask, n_tokens, n_kv, 8.0f, cb, il); + cb(cur, "kqv_out", il); } - inpL = ggml_add(ctx0, cur, inpFF); + struct ggml_tensor * ffn_inp = ggml_add(ctx0, cur, inpSA); + cb(ffn_inp, "ffn_inp", il); - } + // feed-forward network + { + cur = llm_build_norm(ctx0, ffn_inp, + model.layers[il].ffn_norm, NULL, + LLM_NORM_RMS, norm_rms_eps, cb, il); + cb(cur, "ffn_norm", il); - // Output Norm - { - cur = ggml_norm(ctx0, inpL, norm_eps); - offload_func_nr(cur); + cur = llm_build_ffn(ctx0, cur, + model.layers[il].ffn_up, NULL, + model.layers[il].ffn_gate, NULL, + model.layers[il].ffn_down, NULL, + LLM_FFN_SILU, LLM_FFN_PAR, cb, il); + cb(cur, "ffn_out", il); + } + + cur = ggml_add(ctx0, cur, ffn_inp); + cb(cur, "l_out", il); - cur = ggml_add(ctx0, ggml_mul(ctx0, cur, model.output_norm), model.output_norm_b); - ggml_set_name(cur, "result_norm"); + // input for next layer + inpL = cur; } + cur = inpL; + + cur = llm_build_norm(ctx0, cur, + model.output_norm, NULL, + LLM_NORM_RMS, norm_rms_eps, cb, -1); + cb(cur, "result_norm", -1); + + // lm_head cur = ggml_mul_mat(ctx0, model.output, cur); - ggml_set_name(cur, "result_output"); + cb(cur, "result_output", -1); ggml_build_forward_expand(gf, cur); + ggml_free(ctx0); return gf; } -static struct ggml_cgraph * llm_build_persimmon( +static struct ggml_cgraph * llm_build_bloom( llama_context & lctx, - const llama_batch & batch) { - const auto & model = lctx.model; + const llama_batch & batch, + const llm_build_cb & cb, + bool worst_case) { + const auto & model = lctx.model; const auto & hparams = model.hparams; + const auto & cparams = lctx.cparams; const auto & kv_self = lctx.kv_self; GGML_ASSERT(!!kv_self.ctx); - const auto & cparams = lctx.cparams; const int64_t n_embd = hparams.n_embd; const int64_t n_layer = hparams.n_layer; const int64_t n_ctx = cparams.n_ctx; - const int64_t n_head_kv = hparams.n_head_kv; const int64_t n_head = hparams.n_head; const int64_t n_embd_head = hparams.n_embd_head(); const int64_t n_embd_gqa = hparams.n_embd_gqa(); - const size_t n_rot = n_embd_head / 2; - - const float freq_base = cparams.rope_freq_base; - const float freq_scale = cparams.rope_freq_scale; - const float norm_eps = hparams.f_norm_eps; - - const int n_gpu_layers = model.n_gpu_layers; + GGML_ASSERT(n_embd_head == hparams.n_rot); - const int32_t n_tokens = batch.n_tokens; - const int32_t n_kv = ggml_allocr_is_measure(lctx.alloc) ? n_ctx : kv_self.n; - const int32_t kv_head = ggml_allocr_is_measure(lctx.alloc) ? n_ctx - n_tokens : kv_self.head; + const float norm_eps = hparams.f_norm_eps; - const bool do_rope_shift = ggml_allocr_is_measure(lctx.alloc) || kv_self.has_shift; + const int32_t n_tokens = batch.n_tokens; + const int32_t n_kv = worst_case ? n_ctx : kv_self.n; + const int32_t kv_head = worst_case ? n_ctx - n_tokens : kv_self.head; auto & buf_compute = lctx.buf_compute; + struct ggml_init_params params = { /*.mem_size =*/ buf_compute.size, /*.mem_buffer =*/ buf_compute.data, - /*.no_alloc =*/ true, + /*.no_alloc =*/ false, }; + params.no_alloc = true; + struct ggml_context * ctx0 = ggml_init(params); ggml_cgraph * gf = ggml_new_graph(ctx0); @@ -4948,359 +4462,101 @@ static struct ggml_cgraph * llm_build_persimmon( struct ggml_tensor * cur; struct ggml_tensor * inpL; - if (batch.token) { - struct ggml_tensor * inp_tokens = ggml_new_tensor_1d(ctx0, GGML_TYPE_I32, n_tokens); + inpL = llm_build_inp_embd(ctx0, batch, model.tok_embd, n_embd, n_tokens, cb); + cb(inpL, "inp_embd", -1); - ggml_allocr_alloc(lctx.alloc, inp_tokens); - if (!ggml_allocr_is_measure(lctx.alloc)) { - memcpy(inp_tokens->data, batch.token, n_tokens*ggml_element_size(inp_tokens)); - } - ggml_set_name(inp_tokens, "inp_tokens"); - inpL = ggml_get_rows(ctx0, model.tok_embeddings, inp_tokens); - } else { - inpL = ggml_new_tensor_2d(ctx0, GGML_TYPE_F32, n_embd, n_tokens); - ggml_allocr_alloc(lctx.alloc, inpL); - if (!ggml_allocr_is_measure(lctx.alloc)) { - memcpy(inpL->data, batch.embd, n_tokens * n_embd * ggml_element_size(inpL)); - } - } - const int i_gpu_start = n_layer - n_gpu_layers; - (void) i_gpu_start; - offload_func_t offload_func_nr = llama_nop; // nr = non-repeating - offload_func_t offload_func_kq = llama_nop; - offload_func_t offload_func_v = llama_nop; // KQ_scale struct ggml_tensor * KQ_scale = ggml_new_tensor_1d(ctx0, GGML_TYPE_F32, 1); - ggml_allocr_alloc(lctx.alloc, KQ_scale); - if (!ggml_allocr_is_measure(lctx.alloc)) { - ggml_set_f32(KQ_scale, 1.0f/sqrtf(float(n_embd_head))); - } - ggml_set_name(KQ_scale, "1/sqrt(n_embd_head)"); + cb(KQ_scale, "KQ_scale", -1); + + // KQ_mask (mask for 1 head, it will be broadcasted to all heads) struct ggml_tensor * KQ_mask = ggml_new_tensor_3d(ctx0, GGML_TYPE_F32, n_kv, n_tokens, 1); - offload_func_kq(KQ_mask); - ggml_set_name(KQ_mask, "KQ_mask"); - ggml_allocr_alloc(lctx.alloc, KQ_mask); - - if (!ggml_allocr_is_measure(lctx.alloc)) { - float * data = (float *) KQ_mask->data; - memset(data, 0, ggml_nbytes(KQ_mask)); - for (int h = 0; h < 1; ++h) { - for (int j = 0; j < n_tokens; ++j) { - const llama_pos pos = batch.pos[j]; - const llama_seq_id seq_id = batch.seq_id[j][0]; - for (int i = 0; i < n_kv; ++i) { - if (!kv_self.cells[i].has_seq_id(seq_id) || kv_self.cells[i].pos > pos) { - data[h*(n_kv*n_tokens) + j*n_kv + i] = -INFINITY; - } - } - } - } - } + cb(KQ_mask, "KQ_mask", -1); - struct ggml_tensor * KQ_pos = ggml_new_tensor_1d(ctx0, GGML_TYPE_I32, n_tokens); - offload_func_kq(KQ_pos); - ggml_set_name(KQ_pos, "KQ_pos"); - ggml_allocr_alloc(lctx.alloc, KQ_pos); - if (!ggml_allocr_is_measure(lctx.alloc)) { - int * data = (int *) KQ_pos->data; - for (int i = 0; i < n_tokens; ++i) { - data[i] = batch.pos[i]; - } - } - if (do_rope_shift) { - struct ggml_tensor * K_shift = ggml_new_tensor_1d(ctx0, GGML_TYPE_I32, n_ctx); - offload_func_kq(K_shift); - ggml_set_name(K_shift, "K_shift"); - ggml_allocr_alloc(lctx.alloc, K_shift); - if (!ggml_allocr_is_measure(lctx.alloc)) { - int * data = (int *) K_shift->data; - for (int i = 0; i < n_ctx; ++i) { - data[i] = kv_self.cells[i].delta; - } - } - for (int il = 0; il < n_layer; ++il) { - struct ggml_tensor * tmp = - // we rotate only the first n_rot dimensions. - ggml_rope_custom_inplace(ctx0, - ggml_view_3d(ctx0, kv_self.k, - n_rot, n_head, n_ctx, - ggml_element_size(kv_self.k)*n_embd_gqa, - ggml_element_size(kv_self.k)*n_embd_head, - ggml_element_size(kv_self.k)*(n_embd_head*n_ctx*il) - ), - K_shift, n_rot, 2, 0, freq_base, freq_scale); - offload_func_kq(tmp); - ggml_build_forward_expand(gf, tmp); - } - } - for (int il=0; il < n_layer; ++il) { - struct ggml_tensor * residual = inpL; - offload_func_t offload_func = llama_nop; - { - cur = ggml_norm(ctx0, inpL, norm_eps); - offload_func(cur); - cur = ggml_mul(ctx0, cur, model.layers[il].attn_norm); - offload_func(cur); - cur = ggml_add(ctx0, cur, model.layers[il].attn_norm_b); - offload_func(cur); - ggml_format_name(cur, "input_layernorm_%d", il); - } - // self attention + inpL = llm_build_norm(ctx0, inpL, + model.tok_norm, + model.tok_norm_b, + LLM_NORM, norm_eps, cb, -1); + cb(inpL, "inp_norm", -1); + + for (int il = 0; il < n_layer; ++il) { + cur = llm_build_norm(ctx0, inpL, + model.layers[il].attn_norm, + model.layers[il].attn_norm_b, + LLM_NORM, norm_eps, cb, il); + cb(cur, "attn_norm", il); + + // self-attention { cur = ggml_mul_mat(ctx0, model.layers[il].wqkv, cur); - offload_func_kq(cur); - cur = ggml_add(ctx0, cur, model.layers[il].bqkv); - offload_func_kq(cur); + cb(cur, "wqkv", il); - // split qkv - GGML_ASSERT(n_head_kv == n_head); - ggml_set_name(cur, format("qkv_%d", il).c_str()); - struct ggml_tensor * tmpqkv = ggml_reshape_4d(ctx0, cur, n_embd_head, 3, n_head, n_tokens); - offload_func_kq(tmpqkv); - struct ggml_tensor * tmpqkv_perm = ggml_cont(ctx0, ggml_permute(ctx0, tmpqkv, 0, 3, 1, 2)); - offload_func_kq(tmpqkv_perm); - ggml_format_name(tmpqkv_perm, "tmpqkv_perm_%d", il); - struct ggml_tensor * tmpq = ggml_view_3d( - ctx0, tmpqkv_perm, n_embd_head, n_head, n_tokens, - ggml_element_size(tmpqkv_perm) * n_embd_head, - ggml_element_size(tmpqkv_perm) * n_embd_head * n_head, - 0 - ); - offload_func_kq(tmpq); - struct ggml_tensor * tmpk = ggml_view_3d( - ctx0, tmpqkv_perm, n_embd_head, n_head, n_tokens, - ggml_element_size(tmpqkv_perm) * n_embd_head, - ggml_element_size(tmpqkv_perm) * n_embd_head * n_head, - ggml_element_size(tmpqkv_perm) * n_embd_head * n_head * n_tokens - ); - offload_func_kq(tmpk); - // Q/K Layernorm - tmpq = ggml_norm(ctx0, tmpq, norm_eps); - offload_func_kq(tmpq); - tmpq = ggml_mul(ctx0, tmpq, model.layers[il].attn_q_norm); - offload_func_kq(tmpq); - tmpq = ggml_add(ctx0, tmpq, model.layers[il].attn_q_norm_b); - offload_func_kq(tmpq); - - tmpk = ggml_norm(ctx0, tmpk, norm_eps); - offload_func_v(tmpk); - tmpk = ggml_mul(ctx0, tmpk, model.layers[il].attn_k_norm); - offload_func_v(tmpk); - tmpk = ggml_add(ctx0, tmpk, model.layers[il].attn_k_norm_b); - offload_func_v(tmpk); + cur = ggml_add(ctx0, cur, model.layers[il].bqkv); + cb(cur, "bqkv", il); - // RoPE the first n_rot of q/k, pass the other half, and concat. - struct ggml_tensor * qrot = ggml_view_3d( - ctx0, tmpq, n_rot, n_head, n_tokens, - ggml_element_size(tmpq) * n_embd_head, - ggml_element_size(tmpq) * n_embd_head * n_head, - 0 - ); - offload_func_kq(qrot); - ggml_format_name(qrot, "qrot_%d", il); - struct ggml_tensor * krot = ggml_view_3d( - ctx0, tmpk, n_rot, n_head, n_tokens, - ggml_element_size(tmpk) * n_embd_head, - ggml_element_size(tmpk) * n_embd_head * n_head, - 0 - ); - offload_func_kq(krot); - ggml_format_name(krot, "krot_%d", il); + struct ggml_tensor * Qcur = ggml_cont(ctx0, ggml_view_2d(ctx0, cur, n_embd, n_tokens, cur->nb[1], 0*sizeof(float)*(n_embd))); + struct ggml_tensor * Kcur = ggml_cont(ctx0, ggml_view_2d(ctx0, cur, n_embd_gqa, n_tokens, cur->nb[1], 1*sizeof(float)*(n_embd))); + struct ggml_tensor * Vcur = ggml_cont(ctx0, ggml_view_2d(ctx0, cur, n_embd_gqa, n_tokens, cur->nb[1], 1*sizeof(float)*(n_embd + n_embd_gqa))); - // get the second half of tmpq, e.g tmpq[n_rot:, :, :] - struct ggml_tensor * qpass = ggml_view_3d( - ctx0, tmpq, n_rot, n_head, n_tokens, - ggml_element_size(tmpq) * n_embd_head, - ggml_element_size(tmpq) * n_embd_head * n_head, - ggml_element_size(tmpq) * n_rot - ); - offload_func_kq(qpass); - ggml_format_name(qpass, "qpass_%d", il); - struct ggml_tensor * kpass = ggml_view_3d( - ctx0, tmpk, n_rot, n_head, n_tokens, - ggml_element_size(tmpk) * n_embd_head, - ggml_element_size(tmpk) * n_embd_head * n_head, - ggml_element_size(tmpk) * n_rot - ); - offload_func_kq(kpass); - ggml_format_name(kpass, "kpass_%d", il); + cb(Qcur, "Qcur", il); + cb(Kcur, "Kcur", il); + cb(Vcur, "Vcur", il); - struct ggml_tensor * qrotated = ggml_rope_custom( - ctx0, qrot, KQ_pos, n_rot, 2, 0, freq_base, freq_scale - ); - offload_func_kq(qrotated); - struct ggml_tensor * krotated = ggml_rope_custom( - ctx0, krot, KQ_pos, n_rot, 2, 0, freq_base, freq_scale - ); - offload_func_kq(krotated); - // ggml currently only supports concatenation on dim=2 - // so we need to permute qrot, qpass, concat, then permute back. - qrotated = ggml_cont(ctx0, ggml_permute(ctx0, qrotated, 2, 1, 0, 3)); - offload_func_kq(qrotated); - krotated = ggml_cont(ctx0, ggml_permute(ctx0, krotated, 2, 1, 0, 3)); - offload_func_kq(krotated); + Qcur = ggml_reshape_3d(ctx0, Qcur, n_embd_head, n_head, n_tokens); - qpass = ggml_cont(ctx0, ggml_permute(ctx0, qpass, 2, 1, 0, 3)); - offload_func_kq(qpass); - kpass = ggml_cont(ctx0, ggml_permute(ctx0, kpass, 2, 1, 0, 3)); - offload_func_kq(kpass); + llm_build_kv_store(lctx, ctx0, gf, Kcur, Vcur, n_tokens, kv_head, cb, il); - struct ggml_tensor * Qcur = ggml_concat(ctx0, qrotated, qpass); - offload_func_kq(Qcur); - struct ggml_tensor * Kcur = ggml_concat(ctx0, krotated, kpass); - offload_func_kq(Kcur); + cur = llm_build_kqv(lctx, ctx0, Qcur, + model.layers[il].wo, model.layers[il].bo, + Qcur, KQ_scale, KQ_mask, n_tokens, n_kv, 8.0f, cb, il); + cb(cur, "kqv_out", il); + } - struct ggml_tensor * Q = ggml_cont(ctx0, ggml_permute(ctx0, Qcur, 1, 2, 0, 3)); - offload_func_kq(Q); + // Add the input + struct ggml_tensor * ffn_inp = ggml_add(ctx0, cur, inpL); + cb(ffn_inp, "ffn_inp", il); - Kcur = ggml_cont(ctx0, ggml_permute(ctx0, Kcur, 2, 1, 0, 3)); - offload_func_kq(Kcur); - { - struct ggml_tensor * tmpv = ggml_view_3d( - ctx0, tmpqkv_perm, n_embd_head, n_head, n_tokens, - ggml_element_size(tmpqkv_perm) * n_embd_head, - ggml_element_size(tmpqkv_perm) * n_embd_head * n_head, - ggml_element_size(tmpqkv_perm) * n_embd_head * n_head * n_tokens * 2 - ); - offload_func_v(tmpv); - // store K, V in cache - struct ggml_tensor * Vcur = ggml_transpose(ctx0, ggml_reshape_2d(ctx0, tmpv, n_embd_gqa, n_tokens)); - offload_func_v(Vcur); - ggml_set_name(Vcur, "Vcur"); - - struct ggml_tensor * k = ggml_view_1d( - ctx0, kv_self.k, n_tokens*n_embd_gqa, - (ggml_element_size(kv_self.k)*n_embd_gqa)*(il*n_ctx + kv_head) - ); - offload_func_kq(k); - ggml_set_name(k, "k"); - - struct ggml_tensor * v = ggml_view_2d(ctx0, kv_self.v, n_tokens, n_embd_gqa, - ( n_ctx)*ggml_element_size(kv_self.v), - (il*n_ctx)*ggml_element_size(kv_self.v)*n_embd_gqa + kv_head*ggml_element_size(kv_self.v)); - offload_func_v(v); - ggml_set_name(v, "v"); - - // important: storing RoPE-ed version of K in the KV cache! - ggml_build_forward_expand(gf, ggml_cpy(ctx0, Kcur, k)); - ggml_build_forward_expand(gf, ggml_cpy(ctx0, Vcur, v)); - } - struct ggml_tensor * K = ggml_view_3d(ctx0, kv_self.k, - n_embd_head, n_kv, n_head_kv, - ggml_element_size(kv_self.k)*n_embd_gqa, - ggml_element_size(kv_self.k)*n_embd_head, - ggml_element_size(kv_self.k)*n_embd_gqa*n_ctx*il); - - offload_func_kq(K); - ggml_format_name(K, "K_%d", il); - - struct ggml_tensor * KQ = ggml_mul_mat(ctx0, K, Q); - offload_func_kq(KQ); - ggml_set_name(KQ, "KQ"); - - struct ggml_tensor * KQ_scaled = ggml_scale(ctx0, KQ, KQ_scale); - offload_func_kq(KQ_scaled); - ggml_set_name(KQ_scaled, "KQ_scaled"); - - struct ggml_tensor * KQ_masked = ggml_add(ctx0, KQ_scaled, KQ_mask); - offload_func_kq(KQ_masked); - ggml_set_name(KQ_masked, "KQ_masked"); - - struct ggml_tensor * KQ_soft_max = ggml_soft_max_inplace(ctx0, KQ_masked); - offload_func_kq(KQ_soft_max); - ggml_set_name(KQ_soft_max, "KQ_soft_max"); - - struct ggml_tensor * V = - ggml_view_3d(ctx0, kv_self.v, - n_kv, n_embd_head, n_head_kv, - ggml_element_size(kv_self.v)*n_ctx, - ggml_element_size(kv_self.v)*n_ctx*n_embd_head, - ggml_element_size(kv_self.v)*n_ctx*n_embd_gqa*il); - offload_func_v(V); - ggml_set_name(V, "V"); - - struct ggml_tensor * KQV = ggml_mul_mat(ctx0, V, KQ_soft_max); - offload_func_v(KQV); - ggml_set_name(KQV, "KQV"); - - struct ggml_tensor * KQV_merged = ggml_permute(ctx0, KQV, 0, 2, 1, 3); - offload_func_v(KQV_merged); - ggml_set_name(KQV_merged, "KQV_merged"); - - cur = ggml_cont_2d(ctx0, KQV_merged, n_embd, n_tokens); - offload_func_v(cur); - ggml_set_name(cur, "KQV_merged_contiguous"); - - cur = ggml_mul_mat(ctx0, model.layers[il].wo, cur); - offload_func(cur); - cur = ggml_add(ctx0, cur, model.layers[il].bo); - offload_func(cur); - ggml_set_name(cur, "result_wo"); - } - - struct ggml_tensor * inpFF = ggml_add(ctx0, residual, cur); - offload_func(inpFF); - ggml_set_name(inpFF, "inpFF"); + // FF { - // MLP - { - // Norm - cur = ggml_norm(ctx0, inpFF, norm_eps); - offload_func(cur); - cur = ggml_add(ctx0, - ggml_mul(ctx0, cur, model.layers[il].ffn_norm), - model.layers[il].ffn_norm_b - ); - ggml_set_name(cur, "ffn_norm"); - offload_func(cur); - } - cur = ggml_mul_mat(ctx0, model.layers[il].w3, cur); - offload_func(cur); - - cur = ggml_add(ctx0, cur, model.layers[il].b3); - offload_func(cur); - ggml_set_name(cur, "result_ffn_up"); - - cur = ggml_sqr(ctx0, ggml_relu(ctx0, cur)); - ggml_set_name(cur, "result_ffn_act"); - offload_func(cur); - offload_func(cur->src[0]); - - cur = ggml_mul_mat(ctx0, model.layers[il].w2, cur); - offload_func(cur); - cur = ggml_add(ctx0, - cur, - model.layers[il].b2); - offload_func(cur); - ggml_set_name(cur, "outFF"); - } - cur = ggml_add(ctx0, cur, inpFF); - offload_func(cur); - ggml_set_name(cur, "inpFF_+_outFF"); - inpL = cur; - } - cur = inpL; - { - cur = ggml_norm(ctx0, cur, norm_eps); - offload_func_nr(cur); - cur = ggml_mul(ctx0, cur, model.output_norm); - offload_func_nr(cur); + cur = llm_build_norm(ctx0, ffn_inp, + model.layers[il].ffn_norm, + model.layers[il].ffn_norm_b, + LLM_NORM, norm_eps, cb, il); + cb(cur, "ffn_norm", il); - cur = ggml_add(ctx0, cur, model.output_norm_b); - // offload_func_nr(cur); + cur = llm_build_ffn(ctx0, cur, + model.layers[il].ffn_up, model.layers[il].ffn_up_b, + NULL, NULL, + model.layers[il].ffn_down, model.layers[il].ffn_down_b, + LLM_FFN_GELU, LLM_FFN_SEQ, cb, il); + cb(cur, "ffn_out", il); + } - ggml_set_name(cur, "result_norm"); + inpL = ggml_add(ctx0, cur, ffn_inp); + cb(inpL, "l_out", il); } + + cur = llm_build_norm(ctx0, inpL, + model.output_norm, + model.output_norm_b, + LLM_NORM, norm_eps, cb, -1); + cb(cur, "result_norm", -1); + cur = ggml_mul_mat(ctx0, model.output, cur); - ggml_set_name(cur, "result_output"); + cb(cur, "result_output", -1); + ggml_build_forward_expand(gf, cur); + ggml_free(ctx0); + return gf; } -static struct ggml_cgraph * llm_build_bloom( +static struct ggml_cgraph * llm_build_mpt( llama_context & lctx, - const llama_batch & batch) { + const llama_batch & batch, + const llm_build_cb & cb, + bool worst_case) { const auto & model = lctx.model; const auto & hparams = model.hparams; const auto & cparams = lctx.cparams; @@ -5313,17 +4569,16 @@ static struct ggml_cgraph * llm_build_bloom( const int64_t n_layer = hparams.n_layer; const int64_t n_ctx = cparams.n_ctx; const int64_t n_head = hparams.n_head; - const int64_t n_head_kv = hparams.n_head_kv; const int64_t n_embd_head = hparams.n_embd_head(); const int64_t n_embd_gqa = hparams.n_embd_gqa(); - GGML_ASSERT(n_embd_head == hparams.n_rot); - - const float norm_eps = hparams.f_norm_eps; + const float norm_eps = hparams.f_norm_eps; + const float clamp_kqv = hparams.f_clamp_kqv; + const float max_alibi_bias = hparams.f_max_alibi_bias; const int32_t n_tokens = batch.n_tokens; - const int32_t n_kv = ggml_allocr_is_measure(lctx.alloc) ? n_ctx : kv_self.n; - const int32_t kv_head = ggml_allocr_is_measure(lctx.alloc) ? n_ctx - n_tokens : kv_self.head; + const int32_t n_kv = worst_case ? n_ctx : kv_self.n; + const int32_t kv_head = worst_case ? n_ctx - n_tokens : kv_self.head; auto & buf_compute = lctx.buf_compute; @@ -5340,567 +4595,591 @@ static struct ggml_cgraph * llm_build_bloom( ggml_cgraph * gf = ggml_new_graph(ctx0); struct ggml_tensor * cur; - struct ggml_tensor * token; struct ggml_tensor * inpL; - if (batch.token) { - struct ggml_tensor * inp_tokens = ggml_new_tensor_1d(ctx0, GGML_TYPE_I32, n_tokens); - - ggml_allocr_alloc(lctx.alloc, inp_tokens); - if (!ggml_allocr_is_measure(lctx.alloc)) { - memcpy(inp_tokens->data, batch.token, n_tokens*ggml_element_size(inp_tokens)); - } - ggml_set_name(inp_tokens, "inp_tokens"); - - token = ggml_get_rows(ctx0, model.tok_embeddings, inp_tokens); - } else { -#ifdef GGML_USE_MPI - GGML_ASSERT(false && "not implemented"); -#endif - - token = ggml_new_tensor_2d(ctx0, GGML_TYPE_F32, n_embd, n_tokens); - - ggml_allocr_alloc(lctx.alloc, token); - if (!ggml_allocr_is_measure(lctx.alloc)) { - memcpy(token->data, batch.embd, n_tokens * n_embd * ggml_element_size(token)); - } - } + inpL = llm_build_inp_embd(ctx0, batch, model.tok_embd, n_embd, n_tokens, cb); + cb(inpL, "inp_embd", -1); // KQ_scale struct ggml_tensor * KQ_scale = ggml_new_tensor_1d(ctx0, GGML_TYPE_F32, 1); - ggml_set_name(KQ_scale, "1/sqrt(n_embd_head)"); - ggml_allocr_alloc(lctx.alloc, KQ_scale); - if (!ggml_allocr_is_measure(lctx.alloc)) { - ggml_set_f32(KQ_scale, 1.0f/sqrtf(float(n_embd)/n_head)); - } + cb(KQ_scale, "KQ_scale", -1); // KQ_mask (mask for 1 head, it will be broadcasted to all heads) struct ggml_tensor * KQ_mask = ggml_new_tensor_3d(ctx0, GGML_TYPE_F32, n_kv, n_tokens, 1); - ggml_set_name(KQ_mask, "KQ_mask"); - ggml_allocr_alloc(lctx.alloc, KQ_mask); - if (!ggml_allocr_is_measure(lctx.alloc)) { - float * data = (float *) KQ_mask->data; - memset(data, 0, ggml_nbytes(KQ_mask)); - - for (int h = 0; h < 1; ++h) { - for (int j = 0; j < n_tokens; ++j) { - const llama_pos pos = batch.pos[j]; - const llama_seq_id seq_id = batch.seq_id[j][0]; - - for (int i = 0; i < n_kv; ++i) { - if (!kv_self.cells[i].has_seq_id(seq_id) || kv_self.cells[i].pos > pos) { - data[h*(n_kv*n_tokens) + j*n_kv + i] = -INFINITY; - } - } - } - } - } + cb(KQ_mask, "KQ_mask", -1); - // norm - { - inpL = ggml_norm(ctx0, token, norm_eps); - inpL = ggml_add(ctx0, ggml_mul(ctx0, inpL, model.tok_norm), model.tok_norm_b); - } + for (int il = 0; il < n_layer; ++il) { + struct ggml_tensor * attn_norm; - ggml_set_name(inpL, "inpL"); + attn_norm = llm_build_norm(ctx0, inpL, + model.layers[il].attn_norm, + NULL, + LLM_NORM, norm_eps, cb, il); + cb(attn_norm, "attn_norm", il); - for (int il = 0; il < n_layer; ++il) { + // self-attention { - // Norm - cur = ggml_norm(ctx0, inpL, norm_eps); - cur = ggml_add(ctx0, ggml_mul(ctx0, cur, model.layers[il].attn_norm), model.layers[il].attn_norm_b); + cur = attn_norm; + + cur = ggml_mul_mat(ctx0, model.layers[il].wqkv, cur); + cb(cur, "wqkv", il); + + if (clamp_kqv > 0.0f) { + cur = ggml_clamp(ctx0, cur, -clamp_kqv, clamp_kqv); + cb(cur, "wqkv_clamped", il); + } + + struct ggml_tensor * Qcur = ggml_cont(ctx0, ggml_view_2d(ctx0, cur, n_embd, n_tokens, cur->nb[1], 0*sizeof(float)*(n_embd))); + struct ggml_tensor * Kcur = ggml_cont(ctx0, ggml_view_2d(ctx0, cur, n_embd_gqa, n_tokens, cur->nb[1], 1*sizeof(float)*(n_embd))); + struct ggml_tensor * Vcur = ggml_cont(ctx0, ggml_view_2d(ctx0, cur, n_embd_gqa, n_tokens, cur->nb[1], 1*sizeof(float)*(n_embd + n_embd_gqa))); + + cb(Qcur, "Qcur", il); + cb(Kcur, "Kcur", il); + cb(Vcur, "Vcur", il); + + Qcur = ggml_reshape_3d(ctx0, Qcur, n_embd_head, n_head, n_tokens); + + llm_build_kv_store(lctx, ctx0, gf, Kcur, Vcur, n_tokens, kv_head, cb, il); + + cur = llm_build_kqv(lctx, ctx0, Qcur, + model.layers[il].wo, NULL, + Qcur, KQ_scale, KQ_mask, n_tokens, n_kv, max_alibi_bias, cb, il); + cb(cur, "kqv_out", il); } - { - // Self Attention - cur = ggml_add(ctx0, ggml_mul_mat(ctx0, model.layers[il].wqkv, cur), model.layers[il].bqkv); + // Add the input + struct ggml_tensor * ffn_inp = ggml_add(ctx0, cur, inpL); + cb(ffn_inp, "ffn_inp", il); - struct ggml_tensor * tmpq = ggml_view_2d(ctx0, cur, n_embd, n_tokens, cur->nb[1], 0*sizeof(float)*n_embd); - struct ggml_tensor * tmpk = ggml_view_2d(ctx0, cur, n_embd_gqa, n_tokens, cur->nb[1], sizeof(float)*n_embd); - struct ggml_tensor * tmpv = ggml_view_2d(ctx0, cur, n_embd_gqa, n_tokens, cur->nb[1], sizeof(float)*(n_embd + n_embd_gqa)); + // feed forward + { + cur = llm_build_norm(ctx0, ffn_inp, + model.layers[il].ffn_norm, + NULL, + LLM_NORM, norm_eps, cb, il); + cb(cur, "ffn_norm", il); - struct ggml_tensor * Qcur = tmpq; - struct ggml_tensor * Kcur = tmpk; + cur = llm_build_ffn(ctx0, cur, + model.layers[il].ffn_up, NULL, + NULL, NULL, + model.layers[il].ffn_down, NULL, + LLM_FFN_GELU, LLM_FFN_SEQ, cb, il); + cb(cur, "ffn_out", il); + } - // store key and value to memory - { - struct ggml_tensor * Vcur = ggml_transpose(ctx0, ggml_reshape_2d(ctx0, ggml_cont(ctx0, tmpv), n_embd_gqa, n_tokens)); - ggml_set_name(Vcur, "Vcur"); + cur = ggml_add(ctx0, cur, ffn_inp); + cb(cur, "l_out", il); - struct ggml_tensor * k = ggml_view_1d(ctx0, kv_self.k, n_tokens*n_embd_gqa, (ggml_element_size(kv_self.k)*n_embd_gqa)*(il*n_ctx + kv_head)); - ggml_set_name(k, "k"); + // input for next layer + inpL = cur; + } - struct ggml_tensor * v = ggml_view_2d(ctx0, kv_self.v, n_tokens, n_embd_gqa, - ( n_ctx)*ggml_element_size(kv_self.v), - (il*n_ctx)*ggml_element_size(kv_self.v)*n_embd_gqa + kv_head*ggml_element_size(kv_self.v)); + cur = inpL; - ggml_build_forward_expand(gf, ggml_cpy(ctx0, Kcur, k)); - ggml_build_forward_expand(gf, ggml_cpy(ctx0, Vcur, v)); - } + cur = llm_build_norm(ctx0, cur, + model.output_norm, + NULL, + LLM_NORM, norm_eps, cb, -1); + cb(cur, "result_norm", -1); - struct ggml_tensor * Q = - ggml_permute(ctx0, - ggml_cpy(ctx0, - Qcur, - ggml_new_tensor_3d(ctx0, GGML_TYPE_F32, n_embd_head, n_head, n_tokens)), - 0, 2, 1, 3); - ggml_set_name(Q, "Q"); - - struct ggml_tensor * K = - ggml_view_3d(ctx0, kv_self.k, - n_embd_head, n_kv, n_head_kv, - ggml_element_size(kv_self.k)*n_embd_gqa, - ggml_element_size(kv_self.k)*n_embd_head, - ggml_element_size(kv_self.k)*n_embd_gqa*n_ctx*il); - ggml_set_name(K, "K"); + cur = ggml_mul_mat(ctx0, model.output, cur); + cb(cur, "result_output", -1); - // K * Q - struct ggml_tensor * KQ = ggml_mul_mat(ctx0, K, Q); - ggml_set_name(KQ, "KQ"); + ggml_build_forward_expand(gf, cur); - // KQ_scaled = KQ / sqrt(n_embd_head) - // KQ_scaled shape [n_past + n_tokens, n_tokens, n_head, 1] - struct ggml_tensor * KQ_scaled = ggml_scale_inplace(ctx0, KQ, KQ_scale); - ggml_set_name(KQ_scaled, "KQ_scaled"); + ggml_free(ctx0); - struct ggml_tensor * KQ_scaled_alibi = ggml_alibi(ctx0, KQ_scaled, /*n_past*/ kv_head, n_head, 8); - ggml_set_name(KQ_scaled_alibi, "KQ_scaled_alibi"); + return gf; +} - // KQ_masked = mask_past(KQ_scaled) - struct ggml_tensor * KQ_masked = ggml_add(ctx0, KQ_scaled_alibi, KQ_mask); - ggml_set_name(KQ_masked, "KQ_masked"); +// +// tensor offloading helpers +// +// TODO: will be removed with backend v2 + +enum llm_offload_func_e { + OFFLOAD_FUNC_NOP, + OFFLOAD_FUNC, + OFFLOAD_FUNC_KQ, + OFFLOAD_FUNC_V, + OFFLOAD_FUNC_NR, + OFFLOAD_FUNC_EMB, + OFFLOAD_FUNC_OUT, +}; - // KQ = soft_max(KQ_masked) - struct ggml_tensor * KQ_soft_max = ggml_soft_max_inplace(ctx0, KQ_masked); - ggml_set_name(KQ_soft_max, "KQ_soft_max"); +// TODO: will be removed with backend v2 +struct llm_offload_trie { + struct node { + ~node() { + for (int i = 0; i < 256; ++i) { + if (children[i]) { + delete children[i]; + } + } + } - // split cached V into n_head heads - struct ggml_tensor * V = - ggml_view_3d(ctx0, kv_self.v, - n_kv, n_embd_head, n_head_kv, - ggml_element_size(kv_self.v)*n_ctx, - ggml_element_size(kv_self.v)*n_ctx*n_embd_head, - ggml_element_size(kv_self.v)*n_ctx*n_embd_gqa*il); - ggml_set_name(V, "V"); + node * children[256] = { nullptr }; + llm_offload_func_e func = OFFLOAD_FUNC_NOP; + }; - struct ggml_tensor * KQV = ggml_mul_mat(ctx0, V, KQ_soft_max); - ggml_set_name(KQV, "KQV"); + llm_offload_trie() { + root = new node; + } - // KQV_merged = KQV.permute(0, 2, 1, 3) - struct ggml_tensor * KQV_merged = ggml_permute(ctx0, KQV, 0, 2, 1, 3); - ggml_set_name(KQV_merged, "KQV_merged"); + llm_offload_trie(const std::unordered_map & map) { + root = new node; - // cur = KQV_merged.contiguous().view(n_embd, n_tokens) - cur = ggml_cont_2d(ctx0, KQV_merged, n_embd, n_tokens); - ggml_set_name(cur, "KQV_merged_contiguous"); + for (const auto & kv : map) { + add(kv.first, kv.second); } + } - // Projection - cur = ggml_add(ctx0, ggml_mul_mat(ctx0, model.layers[il].wo, cur), model.layers[il].bo); + ~llm_offload_trie() { + delete root; + } - // Add the input - cur = ggml_add(ctx0, cur, inpL); + void add(const char * name, llm_offload_func_e func) { + node * cur = root; - struct ggml_tensor * inpFF = cur; + for (int i = 0; ; ++i) { + const uint8_t c = name[i]; - // FF - { - // Norm - { - cur = ggml_norm(ctx0, inpFF, norm_eps); - cur = ggml_add(ctx0, ggml_mul(ctx0, cur, model.layers[il].ffn_norm), model.layers[il].ffn_norm_b); + if (!c) { + break; } - cur = ggml_add(ctx0, ggml_mul_mat(ctx0, model.layers[il].w3, cur), model.layers[il].b3); - - // GELU activation - cur = ggml_gelu(ctx0, cur); + if (!cur->children[c]) { + cur->children[c] = new node; + } - // Projection - cur = ggml_add(ctx0, ggml_mul_mat(ctx0, model.layers[il].w2, cur), model.layers[il].b2); + cur = cur->children[c]; } - inpL = ggml_add(ctx0, cur, inpFF); + cur->func = func; } - // Output Norm - { - cur = ggml_norm(ctx0, inpL, norm_eps); - cur = ggml_add(ctx0, ggml_mul(ctx0, cur, model.output_norm), model.output_norm_b); - } - ggml_set_name(cur, "result_norm"); + llm_offload_func_e find(const char * name) const { + const node * cur = root; - cur = ggml_mul_mat(ctx0, model.output, cur); - ggml_set_name(cur, "result_output"); + for (int i = 0; ; ++i) { + const uint8_t c = name[i]; - ggml_build_forward_expand(gf, cur); + if (!c) { + break; + } - ggml_free(ctx0); + if (!cur->children[c]) { + return OFFLOAD_FUNC_NOP; + } - return gf; -} + cur = cur->children[c]; + } -static struct ggml_cgraph * llm_build_mpt( - llama_context & lctx, - const llama_batch & batch) { - const auto & model = lctx.model; - const auto & hparams = model.hparams; - const auto & cparams = lctx.cparams; + return cur->func; + } - const auto & kv_self = lctx.kv_self; + node * root = nullptr; +}; - GGML_ASSERT(!!kv_self.ctx); +// TODO: will be removed with backend v2 +static const std::unordered_map k_offload_map = { + //{ "inp_tokens", OFFLOAD_FUNC_NR }, // TODO: missing K-quants get_rows kernel + //{ "inp_embd", OFFLOAD_FUNC_NR }, // TODO: missing K-quants get_rows kernel + { "pos_embd", OFFLOAD_FUNC_NR }, + + { "inp_pos", OFFLOAD_FUNC_KQ }, // this is often used for KQ ops (e.g. rope) + { "KQ_scale", OFFLOAD_FUNC_KQ }, + { "KQ_mask", OFFLOAD_FUNC_KQ }, + { "K_shift", OFFLOAD_FUNC_KQ }, + { "K_shifted", OFFLOAD_FUNC_KQ }, + + { "inp_norm", OFFLOAD_FUNC_NR }, + { "inp_norm_w", OFFLOAD_FUNC_NR }, + { "inp_norm_wb", OFFLOAD_FUNC_NR }, + + { "norm", OFFLOAD_FUNC }, + { "norm_w", OFFLOAD_FUNC }, + { "norm_wb", OFFLOAD_FUNC }, + + { "attn_norm", OFFLOAD_FUNC }, + { "attn_norm_2", OFFLOAD_FUNC }, + + { "wqkv", OFFLOAD_FUNC_KQ }, + { "bqkv", OFFLOAD_FUNC_KQ }, + { "wqkv_clamped", OFFLOAD_FUNC_KQ }, + + { "tmpk", OFFLOAD_FUNC_KQ }, + { "tmpq", OFFLOAD_FUNC_KQ }, + { "tmpv", OFFLOAD_FUNC_V }, + { "Kcur", OFFLOAD_FUNC_KQ }, + { "Qcur", OFFLOAD_FUNC_KQ }, + { "Vcur", OFFLOAD_FUNC_V }, + + { "krot", OFFLOAD_FUNC_KQ }, + { "qrot", OFFLOAD_FUNC_KQ }, + { "kpass", OFFLOAD_FUNC_KQ }, + { "qpass", OFFLOAD_FUNC_KQ }, + { "krotated", OFFLOAD_FUNC_KQ }, + { "qrotated", OFFLOAD_FUNC_KQ }, + + { "q", OFFLOAD_FUNC_KQ }, + { "k", OFFLOAD_FUNC_KQ }, + { "kq", OFFLOAD_FUNC_KQ }, + { "kq_scaled", OFFLOAD_FUNC_KQ }, + { "kq_scaled_alibi", OFFLOAD_FUNC_KQ }, + { "kq_masked", OFFLOAD_FUNC_KQ }, + { "kq_soft_max", OFFLOAD_FUNC_V }, + { "v", OFFLOAD_FUNC_V }, + { "kqv", OFFLOAD_FUNC_V }, + { "kqv_merged", OFFLOAD_FUNC_V }, + { "kqv_merged_cont", OFFLOAD_FUNC_V }, + { "kqv_wo", OFFLOAD_FUNC_V }, + { "kqv_out", OFFLOAD_FUNC_V }, + + { "ffn_inp", OFFLOAD_FUNC }, + { "ffn_norm", OFFLOAD_FUNC }, + + { "ffn_up", OFFLOAD_FUNC }, + { "ffn_up_b", OFFLOAD_FUNC }, + { "ffn_gate", OFFLOAD_FUNC }, + { "ffn_gate_b", OFFLOAD_FUNC }, + { "ffn_gate_par", OFFLOAD_FUNC }, + { "ffn_down", OFFLOAD_FUNC }, + { "ffn_down_b", OFFLOAD_FUNC }, + { "ffn_out", OFFLOAD_FUNC }, + + { "ffn_silu", OFFLOAD_FUNC }, + { "ffn_gelu", OFFLOAD_FUNC }, + { "ffn_relu", OFFLOAD_FUNC }, + { "ffn_sqr(relu)", OFFLOAD_FUNC }, + + { "l_out", OFFLOAD_FUNC }, + + { "result_norm", OFFLOAD_FUNC_EMB }, + { "result_output", OFFLOAD_FUNC_OUT }, +}; - const int64_t n_embd = hparams.n_embd; - const int64_t n_layer = hparams.n_layer; - const int64_t n_ctx = cparams.n_ctx; - const int64_t n_head = hparams.n_head; - const int64_t n_head_kv = hparams.n_head_kv; - const int64_t n_embd_head = hparams.n_embd_head(); - const int64_t n_embd_gqa = hparams.n_embd_gqa(); +static llm_offload_trie k_offload_func_trie(k_offload_map); - const float norm_eps = hparams.f_norm_eps; - const float clamp_kqv = hparams.f_clamp_kqv; - const float max_alibi_bias = hparams.f_max_alibi_bias; +static struct ggml_cgraph * llama_build_graph( + llama_context & lctx, + const llama_batch & batch) { + const auto & model = lctx.model; - const int n_gpu_layers = model.n_gpu_layers; + // check if we should build the worst-case graph (for memory measurement) + const bool worst_case = ggml_allocr_is_measure(lctx.alloc); - const int32_t n_tokens = batch.n_tokens; - const int32_t n_kv = ggml_allocr_is_measure(lctx.alloc) ? n_ctx : kv_self.n; - const int32_t kv_head = ggml_allocr_is_measure(lctx.alloc) ? n_ctx - n_tokens : kv_self.head; + // keep track of the input that has already been allocated + bool alloc_inp_tokens = false; + bool alloc_inp_embd = false; + bool alloc_inp_pos = false; + bool alloc_inp_KQ_scale = false; + bool alloc_inp_KQ_mask = false; + bool alloc_inp_K_shift = false; - auto & buf_compute = lctx.buf_compute; +#ifdef GGML_USE_CUBLAS + const bool do_offload = true; +#else + const bool do_offload = true; // TODO: set to false after finishing refactoring +#endif - struct ggml_init_params params = { - /*.mem_size =*/ buf_compute.size, - /*.mem_buffer =*/ buf_compute.data, - /*.no_alloc =*/ false, - }; + int n_non_view = 0; // number of non-view tensors that have been processed by the callback - params.no_alloc = true; + // this callback allows us to apply custom logic to each tensor (e.g. ggml-alloc, offloading, etc.) + // TODO: will be removed with backend v2 + llm_build_cb cb = [&](struct ggml_tensor * cur, const char * name, int il) { + if (il >= 0) { + ggml_format_name(cur, "%s-%d", name, il); + } else { + ggml_set_name(cur, name); + } - struct ggml_context * ctx0 = ggml_init(params); + // + // allocate input tensors and set input data + // + // TODO: will be removed with backend v2 - ggml_cgraph * gf = ggml_new_graph(ctx0); + if (!alloc_inp_tokens && strcmp(name, "inp_tokens") == 0) { + ggml_allocr_alloc(lctx.alloc, cur); - struct ggml_tensor * cur; - struct ggml_tensor * inpL; + if (!ggml_allocr_is_measure(lctx.alloc) && batch.token) { + const int64_t n_tokens = cur->ne[0]; - //int warmup = 0; - if (batch.token) { - struct ggml_tensor * inp_tokens = ggml_new_tensor_1d(ctx0, GGML_TYPE_I32, n_tokens); + memcpy(cur->data, batch.token, n_tokens*ggml_element_size(cur)); + } - ggml_allocr_alloc(lctx.alloc, inp_tokens); - if (!ggml_allocr_is_measure(lctx.alloc)) { - memcpy(inp_tokens->data, batch.token, n_tokens*ggml_element_size(inp_tokens)); - //warmup = ((uint32_t*) inp_tokens->data)[0] == 0; + alloc_inp_tokens = true; } - ggml_set_name(inp_tokens, "inp_tokens"); + if (!alloc_inp_embd && strcmp(name, "inp_embd") == 0) { + ggml_allocr_alloc(lctx.alloc, cur); - inpL = ggml_get_rows(ctx0, model.tok_embeddings, inp_tokens); - } else { -#ifdef GGML_USE_MPI - GGML_ASSERT(false && "not implemented"); -#endif + if (!ggml_allocr_is_measure(lctx.alloc) && batch.embd) { + const int64_t n_embd = cur->ne[0]; + const int64_t n_tokens = cur->ne[1]; - inpL = ggml_new_tensor_2d(ctx0, GGML_TYPE_F32, n_embd, n_tokens); + memcpy(cur->data, batch.embd, n_tokens*n_embd*ggml_element_size(cur)); + } - ggml_allocr_alloc(lctx.alloc, inpL); - if (!ggml_allocr_is_measure(lctx.alloc)) { - memcpy(inpL->data, batch.embd, n_tokens * n_embd * ggml_element_size(inpL)); + alloc_inp_embd = true; } - } - const int i_gpu_start = n_layer - n_gpu_layers; - (void) i_gpu_start; + if (!alloc_inp_pos && strcmp(name, "inp_pos") == 0) { + ggml_allocr_alloc(lctx.alloc, cur); - // offload functions set the tensor output backend to GPU - // tensors are GPU-accelerated if any input or the output has been offloaded - offload_func_t offload_func_nr = llama_nop; // nr = non-repeating - offload_func_t offload_func_kq = llama_nop; - offload_func_t offload_func_v = llama_nop; - -#ifdef GGML_USE_CUBLAS - if (n_gpu_layers > n_layer) { - offload_func_nr = ggml_cuda_assign_buffers_no_alloc; - } - if (n_gpu_layers > n_layer + 1) { - offload_func_v = ggml_cuda_assign_buffers_no_alloc; - } - if (n_gpu_layers > n_layer + 2) { - offload_func_kq = ggml_cuda_assign_buffers_no_alloc; - } -#endif // GGML_USE_CUBLAS + if (!ggml_allocr_is_measure(lctx.alloc) && batch.pos) { + const int64_t n_tokens = cur->ne[0]; - // KQ_scale - struct ggml_tensor * KQ_scale = ggml_new_tensor_1d(ctx0, GGML_TYPE_F32, 1); - ggml_set_name(KQ_scale, "1/sqrt(n_embd_head)"); - ggml_allocr_alloc(lctx.alloc, KQ_scale); - if (!ggml_allocr_is_measure(lctx.alloc)) { - ggml_set_f32(KQ_scale, 1.0f/sqrtf(float(n_embd)/n_head)); - } + int32_t * data = (int32_t *) cur->data; - // KQ_mask (mask for 1 head, it will be broadcasted to all heads) - struct ggml_tensor * KQ_mask = ggml_new_tensor_3d(ctx0, GGML_TYPE_F32, n_kv, n_tokens, 1); - offload_func_kq(KQ_mask); - ggml_set_name(KQ_mask, "KQ_mask"); - ggml_allocr_alloc(lctx.alloc, KQ_mask); - if (!ggml_allocr_is_measure(lctx.alloc)) { - float * data = (float *) KQ_mask->data; - memset(data, 0, ggml_nbytes(KQ_mask)); - - for (int h = 0; h < 1; ++h) { - for (int j = 0; j < n_tokens; ++j) { - const llama_pos pos = batch.pos[j]; - const llama_seq_id seq_id = batch.seq_id[j][0]; - - for (int i = 0; i < n_kv; ++i) { - if (!kv_self.cells[i].has_seq_id(seq_id) || kv_self.cells[i].pos > pos) { - data[h*(n_kv*n_tokens) + j*n_kv + i] = -INFINITY; - } + for (int i = 0; i < n_tokens; ++i) { + data[i] = batch.pos[i]; } } + + alloc_inp_pos = true; } - } - for (int il = 0; il < n_layer; ++il) { - struct ggml_tensor * attn_norm; + if (!alloc_inp_KQ_scale && strcmp(name, "KQ_scale") == 0) { + ggml_allocr_alloc(lctx.alloc, cur); - offload_func_t offload_func = llama_nop; + if (!ggml_allocr_is_measure(lctx.alloc)) { + const int64_t n_embd_head = model.hparams.n_embd_head(); + ggml_set_f32(cur, 1.0f/sqrtf(float(n_embd_head))); + } -#ifdef GGML_USE_CUBLAS - if (il >= i_gpu_start) { - offload_func = ggml_cuda_assign_buffers_no_alloc; + alloc_inp_KQ_scale = true; } -#endif // GGML_USE_CUBLAS - - // self-attention - // TODO: refactor into common function (shared with LLaMA) - { - attn_norm = ggml_norm(ctx0, inpL, norm_eps); - offload_func(attn_norm); - attn_norm = ggml_mul(ctx0, attn_norm, model.layers[il].attn_norm); - offload_func(attn_norm); + if (!alloc_inp_KQ_mask && strcmp(name, "KQ_mask") == 0) { + ggml_allocr_alloc(lctx.alloc, cur); - if (1) { - cur = attn_norm; - } + if (!ggml_allocr_is_measure(lctx.alloc)) { + const int64_t n_kv = cur->ne[0]; + const int64_t n_tokens = cur->ne[1]; - // compute QKV + float * data = (float *) cur->data; + memset(data, 0, ggml_nbytes(cur)); - cur = ggml_mul_mat(ctx0, model.layers[il].wqkv, cur); - offload_func_kq(cur); + for (int h = 0; h < 1; ++h) { + for (int j = 0; j < n_tokens; ++j) { + const llama_pos pos = batch.pos[j]; + const llama_seq_id seq_id = batch.seq_id[j][0]; - if (clamp_kqv > 0.0f) { - cur = ggml_clamp(ctx0, cur, -clamp_kqv, clamp_kqv); - offload_func_kq(cur); + for (int i = 0; i < n_kv; ++i) { + if (!lctx.kv_self.cells[i].has_seq_id(seq_id) || lctx.kv_self.cells[i].pos > pos) { + data[h*(n_kv*n_tokens) + j*n_kv + i] = -INFINITY; + } + } + } + } } - const size_t wsize = ggml_type_size(cur->type); - - struct ggml_tensor * Qcur = ggml_view_3d( - ctx0, cur, n_embd_head, n_head, n_tokens, - wsize * n_embd_head, - wsize * n_embd_head * (n_head + 2 * n_head_kv), - 0); - offload_func_kq(Qcur); + alloc_inp_KQ_mask = true; + } - struct ggml_tensor * Kcur = ggml_view_3d( - ctx0, cur, n_embd_head, n_head_kv, n_tokens, - wsize * n_embd_head, - wsize * n_embd_head * (n_head + 2 * n_head_kv), - wsize * n_embd_head * n_head); - offload_func_kq(Kcur); + if (!alloc_inp_K_shift && strcmp(name, "K_shift") == 0) { + ggml_allocr_alloc(lctx.alloc, cur); - struct ggml_tensor * tmpv = ggml_view_3d( - ctx0, cur, n_embd_head, n_head_kv, n_tokens, - wsize * n_embd_head, - wsize * n_embd_head * (n_head + 2 * n_head_kv), - wsize * n_embd_head * (n_head + n_head_kv)); - offload_func_kq(Kcur); + if (!ggml_allocr_is_measure(lctx.alloc)) { + const int64_t n_ctx = cur->ne[0]; - ggml_set_name(Qcur, "Qcur"); - ggml_set_name(Kcur, "Kcur"); + int32_t * data = (int32_t *) cur->data; - { - struct ggml_tensor * Vcur = ggml_transpose(ctx0, ggml_reshape_2d(ctx0, ggml_cont(ctx0, tmpv), n_embd_gqa, n_tokens)); - offload_func_v(Vcur); - offload_func_v(Vcur->src[0]->src[0]); - ggml_set_name(Vcur, "Vcur"); - - struct ggml_tensor * k = ggml_view_1d(ctx0, kv_self.k, n_tokens*n_embd_gqa, (ggml_element_size(kv_self.k)*n_embd_gqa)*(il*n_ctx + kv_head)); - offload_func_kq(k); - ggml_set_name(k, "k"); - - struct ggml_tensor * v = ggml_view_2d(ctx0, kv_self.v, n_tokens, n_embd_gqa, - ( n_ctx)*ggml_element_size(kv_self.v), - (il*n_ctx)*ggml_element_size(kv_self.v)*n_embd_gqa + kv_head*ggml_element_size(kv_self.v)); - offload_func_v(v); - - ggml_build_forward_expand(gf, ggml_cpy(ctx0, Kcur, k)); - ggml_build_forward_expand(gf, ggml_cpy(ctx0, Vcur, v)); + for (int i = 0; i < n_ctx; ++i) { + data[i] = lctx.kv_self.cells[i].delta; + } } - struct ggml_tensor * Q = ggml_permute(ctx0, Qcur, 0, 2, 1, 3); - offload_func_kq(Q); - ggml_set_name(Q, "Q"); + alloc_inp_K_shift = true; + } - struct ggml_tensor * K = - ggml_view_3d(ctx0, kv_self.k, - n_embd_head, n_kv, n_head_kv, - ggml_element_size(kv_self.k)*n_embd_gqa, - ggml_element_size(kv_self.k)*n_embd_head, - ggml_element_size(kv_self.k)*n_embd_gqa*n_ctx*il); - offload_func_kq(K); - ggml_set_name(K, "K"); - - struct ggml_tensor * KQ = ggml_mul_mat(ctx0, K, Q); - offload_func_kq(KQ); - ggml_set_name(KQ, "KQ"); - - struct ggml_tensor * KQ_scaled = ggml_scale(ctx0, KQ, KQ_scale); - offload_func_kq(KQ_scaled); - ggml_set_name(KQ_scaled, "KQ_scaled"); - - // TODO: replace with ggml_add() - struct ggml_tensor * KQ_scaled_alibi = - ggml_alibi(ctx0, KQ_scaled, 0, n_head, max_alibi_bias); - offload_func_kq(KQ_scaled_alibi); - ggml_set_name(KQ_scaled_alibi, "KQ_scaled_alibi"); - - struct ggml_tensor * KQ_masked = ggml_add(ctx0, KQ_scaled_alibi, KQ_mask); - offload_func_kq(KQ_masked); - ggml_set_name(KQ_masked, "KQ_masked"); - - struct ggml_tensor * KQ_soft_max = ggml_soft_max(ctx0, KQ_masked); - offload_func_v(KQ_soft_max); - ggml_set_name(KQ_soft_max, "KQ_soft_max"); - - struct ggml_tensor * V = - ggml_view_3d(ctx0, kv_self.v, - n_kv, n_embd_head, n_head_kv, - ggml_element_size(kv_self.v)*n_ctx, - ggml_element_size(kv_self.v)*n_ctx*n_embd_head, - ggml_element_size(kv_self.v)*n_ctx*n_embd_gqa*il); - offload_func_v(V); - ggml_set_name(V, "V"); - - struct ggml_tensor * KQV = ggml_mul_mat(ctx0, V, KQ_soft_max); - offload_func_v(KQV); - ggml_set_name(KQV, "KQV"); - - struct ggml_tensor * KQV_merged = ggml_permute(ctx0, KQV, 0, 2, 1, 3); - offload_func_v(KQV_merged); - ggml_set_name(KQV_merged, "KQV_merged"); - - cur = ggml_cont_2d(ctx0, KQV_merged, n_embd, n_tokens); - offload_func_v(cur); - ggml_set_name(cur, "KQV_merged_contiguous"); - - cur = ggml_mul_mat(ctx0, model.layers[il].wo, cur); - offload_func(cur); - ggml_set_name(cur, "result_wo"); + // view tensors are not processed further + if (cur->view_src != nullptr) { + return; } - // Add the input - cur = ggml_add(ctx0, cur, inpL); - offload_func(cur); + if (cur->op != GGML_OP_NONE) { + n_non_view++; + } - struct ggml_tensor * attn_out = cur; + // + // offload layers + // + // TODO: will be removed with backend v2 - // feed forward - { - // Norm - { - cur = ggml_norm(ctx0, attn_out, norm_eps); - offload_func(cur); +//#define LLAMA_OFFLOAD_DEBUG - cur = ggml_mul(ctx0, cur, model.layers[il].ffn_norm); - offload_func(cur); - } + if (!do_offload) { + return; + } - cur = ggml_mul_mat(ctx0, model.layers[il].w3, cur); - offload_func(cur); + const int n_layer = model.hparams.n_layer; - cur = ggml_gelu(ctx0, cur); - offload_func(cur); - cur = ggml_mul_mat(ctx0, model.layers[il].w2, cur); - offload_func(cur); - } + const int n_gpu_layers = model.n_gpu_layers; + const int i_gpu_start = n_layer - n_gpu_layers; - cur = ggml_add(ctx0, cur, attn_out); - offload_func(cur); - // input for next layer - inpL = cur; - } + // should we offload the final norm? yes if we are not computing embeddings + const bool offload_emb = lctx.embedding.empty(); - cur = inpL; + static const std::unordered_map> k_offload_func_name = { + { OFFLOAD_FUNC_NOP, "CPU" }, + { OFFLOAD_FUNC_OUT, "CPU" }, +#ifdef GGML_USE_CUBLAS + { OFFLOAD_FUNC, "GPU (CUDA)" }, + { OFFLOAD_FUNC_KQ, "GPU (CUDA) KQ" }, + { OFFLOAD_FUNC_V, "GPU (CUDA) V" }, + { OFFLOAD_FUNC_NR, "GPU (CUDA) NR" }, + { OFFLOAD_FUNC_EMB, "GPU (CUDA) EMB" }, +#else + { OFFLOAD_FUNC, "CPU" }, + { OFFLOAD_FUNC_KQ, "CPU" }, + { OFFLOAD_FUNC_V, "CPU" }, + { OFFLOAD_FUNC_NR, "CPU" }, + { OFFLOAD_FUNC_EMB, "CPU" }, +#endif // GGML_USE_CUBLAS + }; - // norm - { - cur = ggml_norm(ctx0, cur, norm_eps); - offload_func_nr(cur); + // check the global map for what offload function to use for this tensor + llm_offload_func_e func_e = k_offload_func_trie.find(name); - cur = ggml_mul(ctx0, cur, model.output_norm); - ggml_set_name(cur, "result_norm"); - } + if (func_e == OFFLOAD_FUNC_NOP) { +#ifdef LLAMA_OFFLOAD_DEBUG + // if a tensor hasn't been offloaded, we warn the user + if (worst_case) { + LLAMA_LOG_WARN("%s: %32s: not offloaded (ref: %s)\n", __func__, + cur->name, "https://github.com/ggerganov/llama.cpp/pull/3837"); + } +#endif - cur = ggml_mul_mat(ctx0, model.output, cur); - ggml_set_name(cur, "result_output"); + return; + } - ggml_build_forward_expand(gf, cur); + // count the number of layers and respect the provided n_gpu_layers + switch (func_e) { + case OFFLOAD_FUNC_NOP: + case OFFLOAD_FUNC_OUT: + break; + case OFFLOAD_FUNC: + if (n_gpu_layers < n_layer) { + if (il < i_gpu_start) { + func_e = OFFLOAD_FUNC_NOP; + } + } + break; + case OFFLOAD_FUNC_NR: + if (n_gpu_layers <= n_layer + 0) { + func_e = OFFLOAD_FUNC_NOP; + } + break; + case OFFLOAD_FUNC_V: + if (n_gpu_layers <= n_layer + 1) { + func_e = OFFLOAD_FUNC_NOP; + } + break; + case OFFLOAD_FUNC_KQ: + if (n_gpu_layers <= n_layer + 2) { + func_e = OFFLOAD_FUNC_NOP; + } + break; + case OFFLOAD_FUNC_EMB: + if (!offload_emb || n_gpu_layers < n_layer) { + func_e = OFFLOAD_FUNC_NOP; + } + break; + default: GGML_ASSERT(false); + } - ggml_free(ctx0); + offload_func_t func = ggml_offload_nop; - return gf; -} + // this is needed for compatibility with Metal for example +#ifdef GGML_USE_CUBLAS + static offload_func_t ggml_offload_gpu = ggml_cuda_assign_buffers_no_alloc; +#else + static offload_func_t ggml_offload_gpu = ggml_offload_nop; +#endif -static struct ggml_cgraph * llama_build_graph( - llama_context & lctx, - const llama_batch & batch) { - const auto & model = lctx.model; + switch (func_e) { + case OFFLOAD_FUNC_NOP: + case OFFLOAD_FUNC_OUT: func = ggml_offload_nop; break; + case OFFLOAD_FUNC: + case OFFLOAD_FUNC_KQ: + case OFFLOAD_FUNC_V: + case OFFLOAD_FUNC_NR: + case OFFLOAD_FUNC_EMB: func = ggml_offload_gpu; break; + default: GGML_ASSERT(false); + } + + // apply offload function to the tensor + func(cur); + +#ifdef LLAMA_OFFLOAD_DEBUG + if (worst_case) { + LLAMA_LOG_INFO("%s: %32s: %s\n", __func__, cur->name, k_offload_func_name.at(func_e).c_str()); + } +#endif + }; struct ggml_cgraph * result = NULL; switch (model.arch) { case LLM_ARCH_LLAMA: { - result = llm_build_llama(lctx, batch); + result = llm_build_llama(lctx, batch, cb, worst_case); } break; case LLM_ARCH_BAICHUAN: { - result = llm_build_baichaun(lctx, batch); + result = llm_build_baichaun(lctx, batch, cb, worst_case); } break; case LLM_ARCH_FALCON: { - result = llm_build_falcon(lctx, batch); + result = llm_build_falcon(lctx, batch, cb, worst_case); } break; case LLM_ARCH_STARCODER: { - result = llm_build_starcoder(lctx, batch); + result = llm_build_starcoder(lctx, batch, cb, worst_case); } break; case LLM_ARCH_PERSIMMON: { - result = llm_build_persimmon(lctx, batch); + result = llm_build_persimmon(lctx, batch, cb, worst_case); } break; case LLM_ARCH_REFACT: { - result = llm_build_refact(lctx, batch); + result = llm_build_refact(lctx, batch, cb, worst_case); } break; case LLM_ARCH_BLOOM: { - result = llm_build_bloom(lctx, batch); + result = llm_build_bloom(lctx, batch, cb, worst_case); } break; case LLM_ARCH_MPT: { - result = llm_build_mpt(lctx, batch); + result = llm_build_mpt(lctx, batch, cb, worst_case); } break; default: GGML_ASSERT(false); } + if (worst_case) { + int n_non_view_total = 0; + + for (int i = 0; i < result->n_nodes; ++i) { + if (result->nodes[i]->view_src == nullptr) { + n_non_view_total++; + } + } + + LLAMA_LOG_INFO("%s: non-view tensors processed: %d/%d\n", __func__, n_non_view, n_non_view_total); + + if (n_non_view != n_non_view_total) { + LLAMA_LOG_WARN("%s: ****************************************************************\n", __func__); + LLAMA_LOG_WARN("%s: not all non-view tensors have been processed with a callback\n", __func__); + LLAMA_LOG_WARN("%s: this can indicate an inefficiency in the graph implementation\n", __func__); + LLAMA_LOG_WARN("%s: build with LLAMA_OFFLOAD_DEBUG for more info\n", __func__); + LLAMA_LOG_WARN("%s: ref: https://github.com/ggerganov/llama.cpp/pull/3837\n", __func__); + LLAMA_LOG_WARN("%s: ****************************************************************\n", __func__); + } + } + return result; } @@ -6043,11 +5322,13 @@ static int llama_decode_internal( } // If all tensors can be run on the GPU then using more than 1 thread is detrimental. - const bool full_offload_supported = model.arch == LLM_ARCH_LLAMA || + const bool full_offload_supported = + model.arch == LLM_ARCH_LLAMA || model.arch == LLM_ARCH_BAICHUAN || - model.arch == LLM_ARCH_FALCON || - model.arch == LLM_ARCH_REFACT || + model.arch == LLM_ARCH_FALCON || + model.arch == LLM_ARCH_REFACT || model.arch == LLM_ARCH_MPT; + const bool fully_offloaded = model.n_gpu_layers >= (int) hparams.n_layer + 3; if (ggml_cpu_has_cublas() && full_offload_supported && fully_offloaded) { n_threads = 1; @@ -6102,6 +5383,8 @@ static int llama_decode_internal( //} // extract logits + // TODO: do not compute and extract logits if only embeddings are needed + // need to update the graphs to skip "result_output" { auto & logits_out = lctx.logits; @@ -8713,8 +7996,8 @@ static int llama_apply_lora_from_file_internal( ggml_tensor * dest_t = model_tensors[base_name]; - offload_func_t offload_func = llama_nop; - offload_func_t offload_func_force_inplace = llama_nop; + offload_func_t offload_func = ggml_offload_nop; + offload_func_t offload_func_force_inplace = ggml_offload_nop; #ifdef GGML_USE_CUBLAS if (dest_t->backend == GGML_BACKEND_GPU || dest_t->backend == GGML_BACKEND_GPU_SPLIT) { From ca190bca8e844d171020d6147687e71472d71734 Mon Sep 17 00:00:00 2001 From: Adrian Hesketh Date: Wed, 1 Nov 2023 09:28:28 +0000 Subject: [PATCH 028/206] server : re-enable completion and embedded at the same time (#3876) --- .gitignore | 1 + examples/server/server.cpp | 16 ++++++++++------ 2 files changed, 11 insertions(+), 6 deletions(-) diff --git a/.gitignore b/.gitignore index 545c2872632234..5d7c5479ef67ae 100644 --- a/.gitignore +++ b/.gitignore @@ -15,6 +15,7 @@ .DS_Store .build/ .cache/ +.ccls-cache/ .direnv/ .envrc .swiftpm diff --git a/examples/server/server.cpp b/examples/server/server.cpp index c163c7f8ec0dd1..47ae0d55856cf8 100644 --- a/examples/server/server.cpp +++ b/examples/server/server.cpp @@ -149,6 +149,7 @@ struct task_server { task_type type; json data; bool infill_mode = false; + bool embedding_mode = false; }; struct task_result { @@ -371,6 +372,7 @@ struct llama_client_slot std::vector generated_token_probs; bool infill = false; + bool embedding = false; bool has_next_token = true; bool truncated = false; bool stopped_eos = false; @@ -1244,13 +1246,14 @@ struct llama_server_context queue_results.push_back(res); } - int request_completion(json data, bool infill) + int request_completion(json data, bool infill, bool embedding) { std::lock_guard lock(mutex_tasks); task_server task; task.id = id_gen++; task.data = data; task.infill_mode = infill; + task.embedding_mode = embedding; task.type = COMPLETION_TASK; queue_tasks.push_back(task); return task.id; @@ -1376,7 +1379,7 @@ struct llama_server_context { LOG_TEE("slot unavailable\n"); // send error result - send_error(task.id, "slot unavaliable"); + send_error(task.id, "slot unavailable"); return; } @@ -1388,6 +1391,7 @@ struct llama_server_context slot->reset(); slot->infill = task.infill_mode; + slot->embedding = task.embedding_mode; slot->task_id = task.id; if (!launch_slot_with_data(slot, task.data)) @@ -1695,7 +1699,7 @@ struct llama_server_context } // prompt evaluated for embedding - if (params.embedding) + if (slot.embedding) { send_embedding(slot); slot.release(); @@ -2274,7 +2278,7 @@ int main(int argc, char **argv) svr.Post("/completion", [&llama](const httplib::Request &req, httplib::Response &res) { json data = json::parse(req.body); - const int task_id = llama.request_completion(data, false); + const int task_id = llama.request_completion(data, false, false); if (!json_value(data, "stream", false)) { std::string completion_text; task_result result = llama.next_result(task_id); @@ -2329,7 +2333,7 @@ int main(int argc, char **argv) svr.Post("/infill", [&llama](const httplib::Request &req, httplib::Response &res) { json data = json::parse(req.body); - const int task_id = llama.request_completion(data, true); + const int task_id = llama.request_completion(data, true, false); if (!json_value(data, "stream", false)) { std::string completion_text; task_result result = llama.next_result(task_id); @@ -2433,7 +2437,7 @@ int main(int argc, char **argv) { prompt = ""; } - const int task_id = llama.request_completion({ {"prompt", prompt}, { "n_predict", 0} }, false); + const int task_id = llama.request_completion({ {"prompt", prompt}, { "n_predict", 0} }, false, true); task_result result = llama.next_result(task_id); return res.set_content(result.result_json.dump(), "application/json"); }); From f0e209324a7f663225791897877bf610f1af152d Mon Sep 17 00:00:00 2001 From: Georgi Gerganov Date: Wed, 1 Nov 2023 11:29:07 +0200 Subject: [PATCH 029/206] scripts : add server-llm.sh (#3868) * scripts : add deploy-server.sh * scripts : rename to server-llm.sh * scripts : working curl pipe --- scripts/server-llm.sh | 391 ++++++++++++++++++++++++++++++++++++++++++ 1 file changed, 391 insertions(+) create mode 100644 scripts/server-llm.sh diff --git a/scripts/server-llm.sh b/scripts/server-llm.sh new file mode 100644 index 00000000000000..7bf0929bb8ca11 --- /dev/null +++ b/scripts/server-llm.sh @@ -0,0 +1,391 @@ +#!/bin/bash +# +# Helper script for deploying llama.cpp server with a single Bash command +# +# - Works on Linux and macOS +# - Supports: CPU, CUDA, Metal, OpenCL +# - Can run all GGUF models from HuggingFace +# - Can serve requests in parallel +# - Always builds latest llama.cpp from GitHub +# +# Limitations +# +# - Chat templates are poorly supported (base models recommended) +# - Might be unstable! +# +# Usage: +# ./server-llm.sh [--port] [--repo] [--wtype] [--backend] [--gpu-id] [--n-parallel] [--n-kv] [--verbose] +# +# --port: port number, default is 8888 +# --repo: path to a repo containing GGUF model files +# --wtype: weights type (f16, q8_0, q4_0, q4_1), default is user-input +# --backend: cpu, cuda, metal, opencl, depends on the OS +# --gpu-id: gpu id, default is 0 +# --n-parallel: number of parallel requests, default is 8 +# --n-kv: KV cache size, default is 4096 +# --verbose: verbose output +# +# Example: +# +# bash -c "$(curl -s https://ggml.ai/server-llm.sh)" +# + +set -e + +# required utils: curl, git, make +if ! command -v curl &> /dev/null; then + printf "[-] curl not found\n" + exit 1 +fi +if ! command -v git &> /dev/null; then + printf "[-] git not found\n" + exit 1 +fi +if ! command -v make &> /dev/null; then + printf "[-] make not found\n" + exit 1 +fi + +# parse arguments +port=8888 +repo="" +wtype="" +backend="cpu" + +# if macOS, use metal backend by default +if [[ "$OSTYPE" == "darwin"* ]]; then + backend="metal" +elif command -v nvcc &> /dev/null; then + backend="cuda" +fi + +gpu_id=0 +n_parallel=8 +n_kv=4096 +verbose=0 + +function print_usage { + printf "Usage:\n" + printf " ./server-llm.sh [--port] [--repo] [--wtype] [--backend] [--gpu-id] [--n-parallel] [--n-kv] [--verbose]\n\n" + printf " --port: port number, default is 8888\n" + printf " --repo: path to a repo containing GGUF model files\n" + printf " --wtype: weights type (f16, q8_0, q4_0, q4_1), default is user-input\n" + printf " --backend: cpu, cuda, metal, opencl, depends on the OS\n" + printf " --gpu-id: gpu id, default is 0\n" + printf " --n-parallel: number of parallel requests, default is 8\n" + printf " --n-kv: KV cache size, default is 4096\n" + printf " --verbose: verbose output\n\n" + printf "Example:\n\n" + printf ' bash -c "$(curl -s https://ggml.ai/server-llm.sh)"\n\n' +} + +while [[ $# -gt 0 ]]; do + key="$1" + case $key in + --port) + port="$2" + shift + shift + ;; + --repo) + repo="$2" + shift + shift + ;; + --wtype) + wtype="$2" + shift + shift + ;; + --backend) + backend="$2" + shift + shift + ;; + --gpu-id) + gpu_id="$2" + shift + shift + ;; + --n-parallel) + n_parallel="$2" + shift + shift + ;; + --n-kv) + n_kv="$2" + shift + shift + ;; + --verbose) + verbose=1 + shift + ;; + --help) + print_usage + exit 0 + ;; + *) + echo "Unknown argument: $key" + print_usage + exit 1 + ;; + esac +done + +# available weights types +wtypes=("F16" "Q8_0" "Q4_0" "Q4_1" "Q5_0" "Q5_1" "Q6_K" "Q5_K_M" "Q5_K_S" "Q4_K_M" "Q4_K_S" "Q3_K_L" "Q3_K_M" "Q3_K_S" "Q2_K") + +wfiles=() +for wt in "${wtypes[@]}"; do + wfiles+=("") +done + +# sample repos +repos=( + "https://huggingface.co/TheBloke/Llama-2-7B-GGUF" + "https://huggingface.co/TheBloke/Llama-2-13B-GGUF" + "https://huggingface.co/TheBloke/Llama-2-70B-GGUF" + "https://huggingface.co/TheBloke/CodeLlama-7B-GGUF" + "https://huggingface.co/TheBloke/CodeLlama-13B-GGUF" + "https://huggingface.co/TheBloke/CodeLlama-34B-GGUF" + "https://huggingface.co/TheBloke/Mistral-7B-v0.1-GGUF" + "https://huggingface.co/TheBloke/zephyr-7B-beta-GGUF" + "https://huggingface.co/TheBloke/OpenHermes-2-Mistral-7B-GGUF" + "https://huggingface.co/TheBloke/CausalLM-7B-GGUF" +) + +printf "\n" +printf "[I] This is a helper script for deploying llama.cpp's server on this machine.\n\n" +printf " Based on the options that follow, the script might download a model file\n" +printf " from the internet, which can be a few GBs in size. The script will also\n" +printf " build the latest llama.cpp source code from GitHub, which can be unstable.\n" +printf "\n" +printf " Upon success, an HTTP server will be started and it will serve the selected\n" +printf " model using llama.cpp for demonstration purposes.\n" +printf "\n" +printf " Please note:\n" +printf "\n" +printf " - All new data will be stored in the current folder\n" +printf " - The server will be listening on all network interfaces\n" +printf " - The server will run with default settings which are not always optimal\n" +printf " - Do not judge the quality of a model based on the results from this script\n" +printf " - Do not use this script to benchmark llama.cpp\n" +printf " - Do not use this script in production\n" +printf " - This script is only for demonstration purposes\n" +printf "\n" +printf " If you don't know what you are doing, please press Ctrl-C to abort now\n" +printf "\n" +printf " Press Enter to continue ...\n\n" + +read + +if [[ -z "$repo" ]]; then + printf "[+] No repo provided from the command line\n" + printf " Please select a number from the list below or enter an URL:\n\n" + + is=0 + for r in "${repos[@]}"; do + printf " %2d) %s\n" $is "$r" + is=$((is+1)) + done + + # ask for repo until index of sample repo is provided or an URL + while [[ -z "$repo" ]]; do + printf "\n Or choose one from: https://huggingface.co/models?sort=trending&search=gguf\n\n" + read -p "[+] Select repo: " repo + + # check if the input is a number + if [[ "$repo" =~ ^[0-9]+$ ]]; then + if [[ "$repo" -ge 0 && "$repo" -lt ${#repos[@]} ]]; then + repo="${repos[$repo]}" + else + printf "[-] Invalid repo index: %s\n" "$repo" + repo="" + fi + elif [[ "$repo" =~ ^https?:// ]]; then + repo="$repo" + else + printf "[-] Invalid repo URL: %s\n" "$repo" + repo="" + fi + done +fi + +# remove suffix +repo=$(echo "$repo" | sed -E 's/\/tree\/main$//g') + +printf "[+] Checking for GGUF model files in %s\n" "$repo" + +# find GGUF files in the source +# TODO: better logic +model_tree="${repo%/}/tree/main" +model_files=$(curl -s "$model_tree" | grep -i "\\.gguf" | sed -E 's/.*(.*)<\/span><\/a>/\1/g') + +# list all files in the provided git repo +printf "[+] Model files:\n\n" +for file in $model_files; do + # determine iw by grepping the filename with wtypes + iw=-1 + is=0 + for wt in "${wtypes[@]}"; do + # uppercase + ufile=$(echo "$file" | tr '[:lower:]' '[:upper:]') + if [[ "$ufile" =~ "$wt" ]]; then + iw=$is + break + fi + is=$((is+1)) + done + + if [[ $iw -eq -1 ]]; then + continue + fi + + wfiles[$iw]="$file" + + have=" " + if [[ -f "$file" ]]; then + have="*" + fi + + printf " %2d) %s %s\n" $iw "$have" "$file" +done + +# ask for weights type until provided and available +while [[ -z "$wtype" ]]; do + printf "\n" + read -p "[+] Select weight type: " wtype + wfile="${wfiles[$wtype]}" + + if [[ -z "$wfile" ]]; then + printf "[-] Invalid weight type: %s\n" "$wtype" + wtype="" + fi +done + +printf "[+] Selected weight type: %s (%s)\n" "$wtype" "$wfile" + +url="${repo%/}/resolve/main/$wfile" + +# check file if the model has been downloaded before +chk="$wfile.chk" + +# check if we should download the file +# - if $wfile does not exist +# - if $wfile exists but $chk does not exist +# - if $wfile exists and $chk exists but $wfile is newer than $chk +# TODO: better logic using git lfs info + +do_download=0 + +if [[ ! -f "$wfile" ]]; then + do_download=1 +elif [[ ! -f "$chk" ]]; then + do_download=1 +elif [[ "$wfile" -nt "$chk" ]]; then + do_download=1 +fi + +if [[ $do_download -eq 1 ]]; then + printf "[+] Downloading weights from %s\n" "$url" + + # download the weights file + curl -o "$wfile" -# -L "$url" + + # create a check file if successful + if [[ $? -eq 0 ]]; then + printf "[+] Creating check file %s\n" "$chk" + touch "$chk" + fi +else + printf "[+] Using cached weights %s\n" "$wfile" +fi + +# get latest llama.cpp and build + +printf "[+] Downloading latest llama.cpp\n" + +llama_cpp_dir="__llama_cpp_port_${port}__" + +if [[ -d "$llama_cpp_dir" && ! -f "$llama_cpp_dir/__ggml_script__" ]]; then + # if the dir exists and there isn't a file "__ggml_script__" in it, abort + printf "[-] Directory %s already exists\n" "$llama_cpp_dir" + printf "[-] Please remove it and try again\n" + exit 1 +elif [[ -d "$llama_cpp_dir" ]]; then + printf "[+] Directory %s already exists\n" "$llama_cpp_dir" + printf "[+] Using cached llama.cpp\n" + + cd "$llama_cpp_dir" + git reset --hard + git fetch + git checkout origin/master + + cd .. +else + printf "[+] Cloning llama.cpp\n" + + git clone https://github.com/ggerganov/llama.cpp "$llama_cpp_dir" +fi + +# mark that that the directory is made by this script +touch "$llama_cpp_dir/__ggml_script__" + +if [[ $verbose -eq 1 ]]; then + set -x +fi + +# build +cd "$llama_cpp_dir" + +make clean + +log="--silent" +if [[ $verbose -eq 1 ]]; then + log="" +fi + +if [[ "$backend" == "cuda" ]]; then + printf "[+] Building with CUDA backend\n" + LLAMA_CUBLAS=1 make -j server $log +elif [[ "$backend" == "cpu" ]]; then + printf "[+] Building with CPU backend\n" + make -j server $log +elif [[ "$backend" == "metal" ]]; then + printf "[+] Building with Metal backend\n" + make -j server $log +elif [[ "$backend" == "opencl" ]]; then + printf "[+] Building with OpenCL backend\n" + LLAMA_CLBLAST=1 make -j server $log +else + printf "[-] Unknown backend: %s\n" "$backend" + exit 1 +fi + +# run the server + +printf "[+] Running server\n" + +args="" +if [[ "$backend" == "cuda" ]]; then + export CUDA_VISIBLE_DEVICES=$gpu_id + args="-ngl 999" +elif [[ "$backend" == "cpu" ]]; then + args="-ngl 0" +elif [[ "$backend" == "metal" ]]; then + args="-ngl 999" +elif [[ "$backend" == "opencl" ]]; then + args="-ngl 999" +else + printf "[-] Unknown backend: %s\n" "$backend" + exit 1 +fi + +if [[ $verbose -eq 1 ]]; then + args="$args --verbose" +fi + +./server -m "../$wfile" --host 0.0.0.0 --port "$port" -c $n_kv -np "$n_parallel" $args + +exit 0 From 73bdcb395ef9a997d9c02950c7cd4249546162cd Mon Sep 17 00:00:00 2001 From: Andrew Godfrey Date: Wed, 1 Nov 2023 04:49:04 -0700 Subject: [PATCH 030/206] finetune : add -ngl parameter (#3762) * Add '-ngl' support to finetune.cpp * Add fprintf in ggml_cuda_op_add When I tried CUDA offloading during finetuning following the readme, I got an assert here. This probably isn't an important case because inference later gives a warning saying you should use f16 or f32 instead when using lora * Add 'finetune.sh', which currently fails when using GPU "error: operator (): Finetuning on tensors with type 'f16' is not yet supported" * tweak finetune.sh * Suppress some warnings in ggml.c * Add f16 implementation to ggml_compute_forward_add_f16_f32 * Add an f16 case to ggml_add_cast_impl and llama_build_lora_finetune_graphs * finetune.sh: Edit comments * Add "add_f16_f32_f32_cuda" * Tweak an error message * finetune.sh: Add an optional LLAMA_MODEL_DIR variable * finetune.sh: Add an optional LLAMA_TRAINING_DIR variable * train : minor * tabs to spaces --------- Co-authored-by: Georgi Gerganov Co-authored-by: cebtenzzre --- common/train.cpp | 2 ++ common/train.h | 1 + examples/finetune/finetune.cpp | 14 ++++++++- examples/finetune/finetune.sh | 34 ++++++++++++++++++++++ ggml-cuda.cu | 17 +++++++++++ ggml-quants.c | 2 ++ ggml.c | 53 ++++++++++++++++++++++++---------- llama.cpp | 2 +- 8 files changed, 108 insertions(+), 17 deletions(-) create mode 100644 examples/finetune/finetune.sh diff --git a/common/train.cpp b/common/train.cpp index 3cce5da269637e..bc15b7a03c0cd4 100644 --- a/common/train.cpp +++ b/common/train.cpp @@ -1045,6 +1045,7 @@ struct train_params_common get_default_train_params_common() { params.n_batch = 8; params.n_gradient_accumulation = 1; params.n_epochs = -1; + params.n_gpu_layers = 0; params.custom_n_ctx = false; @@ -1080,6 +1081,7 @@ struct train_params_common get_default_train_params_common() { params.adam_beta2 = 0.999f; params.adam_gclip = 1.0f; params.adam_eps_f = 0.0f; + return params; } diff --git a/common/train.h b/common/train.h index 42fa704b897ae5..d86c93cc4f1472 100644 --- a/common/train.h +++ b/common/train.h @@ -44,6 +44,7 @@ struct train_params_common { int n_batch; int n_gradient_accumulation; int n_epochs; + int n_gpu_layers; bool custom_n_ctx; diff --git a/examples/finetune/finetune.cpp b/examples/finetune/finetune.cpp index 35824cd2d786a7..60c7faa797028a 100644 --- a/examples/finetune/finetune.cpp +++ b/examples/finetune/finetune.cpp @@ -652,7 +652,7 @@ static struct ggml_tensor * llama_build_lora_finetune_graphs( GGML_ASSERT(tokens_input->type == GGML_TYPE_I32); auto add_to_f32 = [] (struct ggml_context * ctx, struct ggml_tensor * a, struct ggml_tensor * b) { - if (ggml_is_quantized(a->type)) { + if (ggml_is_quantized(a->type) || a->type == GGML_TYPE_F16) { return ggml_add_cast(ctx, a, b, GGML_TYPE_F32); } else if (a->type == GGML_TYPE_F32) { return ggml_add(ctx, a, b); @@ -1459,6 +1459,17 @@ static bool train_params_parse(int argc, char ** argv, struct train_params * par } params->n_rank_w3 = std::stoi(argv[i]); params->custom_n_rank_w3 = true; + } else if (arg == "--gpu-layers" || arg == "-ngl" || arg == "--n-gpu-layers") { + if (++i >= argc) { + invalid_param = true; + break; + } +#ifdef LLAMA_SUPPORTS_GPU_OFFLOAD + params->common.n_gpu_layers = std::stoi(argv[i]); +#else + fprintf(stderr, "warning: not compiled with GPU offload support, --n-gpu-layers option will be ignored\n"); + fprintf(stderr, "warning: see main README.md for information on enabling GPU BLAS support\n"); +#endif } else { fprintf(stderr, "error: unknown argument: %s\n", arg.c_str()); train_print_usage(argc, argv, &default_params); @@ -1545,6 +1556,7 @@ int main(int argc, char ** argv) { srand(params.common.seed); struct llama_model_params llama_mparams = llama_model_default_params(); + llama_mparams.n_gpu_layers = params.common.n_gpu_layers; llama_mparams.vocab_only = false; printf("%s: model base = '%s'\n", __func__, params.fn_model_base); diff --git a/examples/finetune/finetune.sh b/examples/finetune/finetune.sh new file mode 100644 index 00000000000000..079bfa1139d5b5 --- /dev/null +++ b/examples/finetune/finetune.sh @@ -0,0 +1,34 @@ +#!/bin/bash +cd `dirname $0` +cd ../.. + +EXE="./finetune" + +if [[ ! $LLAMA_MODEL_DIR ]]; then LLAMA_MODEL_DIR="./models"; fi +if [[ ! $LLAMA_TRAINING_DIR ]]; then LLAMA_TRAINING_DIR="."; fi + +# MODEL="$LLAMA_MODEL_DIR/openllama-3b-v2-q8_0.gguf" # This is the model the readme uses. +MODEL="$LLAMA_MODEL_DIR/openllama-3b-v2.gguf" # An f16 model. Note in this case with "-g", you get an f32-format .BIN file that isn't yet supported if you use it with "main --lora" with GPU inferencing. + +while getopts "dg" opt; do + case $opt in + d) + DEBUGGER="gdb --args" + ;; + g) + EXE="./build/bin/Release/finetune" + GPUARG="--gpu-layers 25" + ;; + esac +done + +$DEBUGGER $EXE \ + --model-base $MODEL \ + $GPUARG \ + --checkpoint-in chk-ol3b-shakespeare-LATEST.gguf \ + --checkpoint-out chk-ol3b-shakespeare-ITERATION.gguf \ + --lora-out lora-ol3b-shakespeare-ITERATION.bin \ + --train-data "$LLAMA_TRAINING_DIR\shakespeare.txt" \ + --save-every 10 \ + --threads 10 --adam-iter 30 --batch 4 --ctx 64 \ + --use-checkpointing diff --git a/ggml-cuda.cu b/ggml-cuda.cu index 1ba951f688d82c..4e6e7cd94892b1 100644 --- a/ggml-cuda.cu +++ b/ggml-cuda.cu @@ -513,6 +513,15 @@ static __global__ void add_f16_f32_f16(const half * x, const float * y, half * d dst[i] = __hadd(x[i], __float2half(y[i])); } +static __global__ void add_f16_f32_f32(const half * x, const float * y, float * dst, const int k) { + const int i = blockDim.x*blockIdx.x + threadIdx.x; + + if (i >= k) { + return; + } + dst[i] = __half2float(x[i]) + y[i]; +} + static __global__ void mul_f32(const float * x, const float * y, float * dst, const int kx, const int ky) { const int i = blockDim.x*blockIdx.x + threadIdx.x; @@ -4693,6 +4702,11 @@ static void add_f16_f32_f16_cuda(const half * x, const float * y, half * dst, co add_f16_f32_f16<<>>(x, y, dst, k); } +static void add_f16_f32_f32_cuda(const half * x, const float * y, float * dst, const int k, cudaStream_t stream) { + const int num_blocks = (k + CUDA_ADD_BLOCK_SIZE - 1) / CUDA_ADD_BLOCK_SIZE; + add_f16_f32_f32<<>>(x, y, dst, k); +} + static void mul_f32_cuda(const float * x, const float * y, float * dst, const int kx, const int ky, cudaStream_t stream) { const int num_blocks = (kx + CUDA_MUL_BLOCK_SIZE - 1) / CUDA_MUL_BLOCK_SIZE; mul_f32<<>>(x, y, dst, kx, ky); @@ -5996,7 +6010,10 @@ inline void ggml_cuda_op_add( add_f32_cuda(src0_dd, src1_dd, dst_dd, ggml_nelements(src0), ne10*ne11, main_stream); } else if (src0->type == GGML_TYPE_F16 && dst->type == GGML_TYPE_F16) { add_f16_f32_f16_cuda((const half *) src0_dd, src1_dd, (half *) dst_dd, ggml_nelements(src0), main_stream); + } else if (src0->type == GGML_TYPE_F16 && dst->type == GGML_TYPE_F32) { + add_f16_f32_f32_cuda((const half *) src0_dd, src1_dd, dst_dd, ggml_nelements(src0), main_stream); } else { + fprintf(stderr, "src0->type: %d dst->type: %d\n", src0->type, dst->type); GGML_ASSERT(false); } diff --git a/ggml-quants.c b/ggml-quants.c index 72159446738e30..255c89b6a7ab84 100644 --- a/ggml-quants.c +++ b/ggml-quants.c @@ -716,6 +716,7 @@ void quantize_row_q8_0(const float * restrict x, void * restrict vy, int k) { __riscv_vse8_v_i8m1(y[i].qs , vs, vl); } #else + UNUSED(nb); // scalar quantize_row_q8_0_reference(x, y, k); #endif @@ -969,6 +970,7 @@ void quantize_row_q8_1(const float * restrict x, void * restrict vy, int k) { y[i].s = sum*d; } #else + UNUSED(nb); // scalar quantize_row_q8_1_reference(x, y, k); #endif diff --git a/ggml.c b/ggml.c index 84407b1224226f..80d682255328c3 100644 --- a/ggml.c +++ b/ggml.c @@ -3153,7 +3153,7 @@ static struct ggml_tensor * ggml_add_cast_impl( // TODO: support less-strict constraint // GGML_ASSERT(ggml_can_repeat(b, a)); GGML_ASSERT(ggml_can_repeat_rows(b, a)); - GGML_ASSERT(ggml_is_quantized(a->type)); // currently only supported for quantized input + GGML_ASSERT(ggml_is_quantized(a->type) || a->type == GGML_TYPE_F16); // currently only supported for quantized input and f16 bool is_node = false; @@ -6927,9 +6927,15 @@ static void ggml_compute_forward_add_f16_f32( GGML_ASSERT(src0->type == GGML_TYPE_F16); GGML_ASSERT(src1->type == GGML_TYPE_F32); - GGML_ASSERT(dst->type == GGML_TYPE_F16); - GGML_ASSERT( nb0 == sizeof(ggml_fp16_t)); + if (dst->type == GGML_TYPE_F32) { + GGML_ASSERT( nb0 == sizeof(float)); + } + else { + GGML_ASSERT(dst->type == GGML_TYPE_F16); + GGML_ASSERT( nb0 == sizeof(ggml_fp16_t)); + } + GGML_ASSERT(nb00 == sizeof(ggml_fp16_t)); // rows per thread @@ -6940,18 +6946,35 @@ static void ggml_compute_forward_add_f16_f32( const int ir1 = MIN(ir0 + dr, nr); if (nb10 == sizeof(float)) { - for (int ir = ir0; ir < ir1; ++ir) { - // src0, src1 and dst are same shape => same indices - const int i3 = ir/(ne2*ne1); - const int i2 = (ir - i3*ne2*ne1)/ne1; - const int i1 = (ir - i3*ne2*ne1 - i2*ne1); - - ggml_fp16_t * dst_ptr = (ggml_fp16_t *) ((char *) dst->data + i3*nb3 + i2*nb2 + i1*nb1); - ggml_fp16_t * src0_ptr = (ggml_fp16_t *) ((char *) src0->data + i3*nb03 + i2*nb02 + i1*nb01); - float * src1_ptr = (float *) ((char *) src1->data + i3*nb13 + i2*nb12 + i1*nb11); - - for (int i = 0; i < ne0; i++) { - dst_ptr[i] = GGML_FP32_TO_FP16(GGML_FP16_TO_FP32(src0_ptr[i]) + src1_ptr[i]); + if (dst->type == GGML_TYPE_F16) { + for (int ir = ir0; ir < ir1; ++ir) { + // src0, src1 and dst are same shape => same indices + const int i3 = ir/(ne2*ne1); + const int i2 = (ir - i3*ne2*ne1)/ne1; + const int i1 = (ir - i3*ne2*ne1 - i2*ne1); + + ggml_fp16_t * dst_ptr = (ggml_fp16_t *) ((char *) dst->data + i3*nb3 + i2*nb2 + i1*nb1); + ggml_fp16_t * src0_ptr = (ggml_fp16_t *) ((char *) src0->data + i3*nb03 + i2*nb02 + i1*nb01); + float * src1_ptr = (float *) ((char *) src1->data + i3*nb13 + i2*nb12 + i1*nb11); + + for (int i = 0; i < ne0; i++) { + dst_ptr[i] = GGML_FP32_TO_FP16(GGML_FP16_TO_FP32(src0_ptr[i]) + src1_ptr[i]); + } + } + } else { + for (int ir = ir0; ir < ir1; ++ir) { + // src0, src1 and dst are same shape => same indices + const int i3 = ir/(ne2*ne1); + const int i2 = (ir - i3*ne2*ne1)/ne1; + const int i1 = (ir - i3*ne2*ne1 - i2*ne1); + + float * dst_ptr = (float *) ((char *) dst->data + i3*nb3 + i2*nb2 + i1*nb1); + ggml_fp16_t * src0_ptr = (ggml_fp16_t *) ((char *) src0->data + i3*nb03 + i2*nb02 + i1*nb01); + float * src1_ptr = (float *) ((char *) src1->data + i3*nb13 + i2*nb12 + i1*nb11); + + for (int i = 0; i < ne0; i++) { + dst_ptr[i] = GGML_FP16_TO_FP32(src0_ptr[i]) + src1_ptr[i]; + } } } } diff --git a/llama.cpp b/llama.cpp index ead1d421d243dc..42cedc7a1cd592 100644 --- a/llama.cpp +++ b/llama.cpp @@ -8003,7 +8003,7 @@ static int llama_apply_lora_from_file_internal( if (dest_t->backend == GGML_BACKEND_GPU || dest_t->backend == GGML_BACKEND_GPU_SPLIT) { if (dest_t->type != GGML_TYPE_F16) { throw std::runtime_error(format( - "%s: error: the simultaneous use of LoRAs and GPU acceleration is only supported for f16 models", __func__)); + "%s: error: the simultaneous use of LoRAs and GPU acceleration is only supported for f16 models. dest_t->type: %d", __func__, dest_t->type)); } offload_func = ggml_cuda_assign_buffers; offload_func_force_inplace = ggml_cuda_assign_buffers_force_inplace; From 9a3b4f6c86503c9cfc049d4d0fdeafef12806f5e Mon Sep 17 00:00:00 2001 From: Georgi Gerganov Date: Wed, 1 Nov 2023 13:50:45 +0200 Subject: [PATCH 031/206] ggml : fix UNUSED macro (#3762) --- ggml-quants.c | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/ggml-quants.c b/ggml-quants.c index 255c89b6a7ab84..740be6dc5c7981 100644 --- a/ggml-quants.c +++ b/ggml-quants.c @@ -716,7 +716,7 @@ void quantize_row_q8_0(const float * restrict x, void * restrict vy, int k) { __riscv_vse8_v_i8m1(y[i].qs , vs, vl); } #else - UNUSED(nb); + GGML_UNUSED(nb); // scalar quantize_row_q8_0_reference(x, y, k); #endif @@ -970,7 +970,7 @@ void quantize_row_q8_1(const float * restrict x, void * restrict vy, int k) { y[i].s = sum*d; } #else - UNUSED(nb); + GGML_UNUSED(nb); // scalar quantize_row_q8_1_reference(x, y, k); #endif From e75dfdd31b6a3dfa0627ba4ac3bb4b36e9db588e Mon Sep 17 00:00:00 2001 From: l3utterfly Date: Wed, 1 Nov 2023 21:40:43 +0800 Subject: [PATCH 032/206] sampling : null grammar field after reset (#3885) --- common/sampling.cpp | 1 + 1 file changed, 1 insertion(+) diff --git a/common/sampling.cpp b/common/sampling.cpp index 673d67a6d5380e..1317024c2c11cf 100644 --- a/common/sampling.cpp +++ b/common/sampling.cpp @@ -39,6 +39,7 @@ void llama_sampling_free(struct llama_sampling_context * ctx) { void llama_sampling_reset(llama_sampling_context * ctx) { if (ctx->grammar != NULL) { llama_grammar_free(ctx->grammar); + ctx->grammar = NULL; } if (!ctx->parsed_grammar.rules.empty()) { From a2758d08e44ce3624d233af4d23c6843e2e735b5 Mon Sep 17 00:00:00 2001 From: staviq Date: Wed, 1 Nov 2023 15:18:27 +0100 Subject: [PATCH 033/206] log : make generating separate log files optional (#3787) * impl --log-new, --log-append * Update common/log.h Co-authored-by: cebtenzzre * Update common/log.h Co-authored-by: cebtenzzre * Apply suggestions from code review Co-authored-by: cebtenzzre --------- Co-authored-by: cebtenzzre --- common/log.h | 122 ++++++++++++++++++++++++++++++++++----------------- 1 file changed, 82 insertions(+), 40 deletions(-) diff --git a/common/log.h b/common/log.h index d2c864ceab7be6..c0e814861e0c69 100644 --- a/common/log.h +++ b/common/log.h @@ -97,38 +97,56 @@ #define LOG_TEE_TARGET stderr #endif -// NOTE: currently disabled as it produces too many log files +// Utility for synchronizing log configuration state +// since std::optional was introduced only in c++17 +enum LogTriState +{ + LogTriStateSame, + LogTriStateFalse, + LogTriStateTrue +}; + // Utility to obtain "pid" like unique process id and use it when creating log files. -//inline std::string log_get_pid() -//{ -// static std::string pid; -// if (pid.empty()) -// { -// // std::this_thread::get_id() is the most portable way of obtaining a "process id" -// // it's not the same as "pid" but is unique enough to solve multiple instances -// // trying to write to the same log. -// std::stringstream ss; -// ss << std::this_thread::get_id(); -// pid = ss.str(); -// } -// -// return pid; -//} +inline std::string log_get_pid() +{ + static std::string pid; + if (pid.empty()) + { + // std::this_thread::get_id() is the most portable way of obtaining a "process id" + // it's not the same as "pid" but is unique enough to solve multiple instances + // trying to write to the same log. + std::stringstream ss; + ss << std::this_thread::get_id(); + pid = ss.str(); + } + + return pid; +} // Utility function for generating log file names with unique id based on thread id. // invocation with log_filename_generator( "llama", "log" ) creates a string "llama..log" // where the number is a runtime id of the current thread. -#define log_filename_generator(log_file_basename, log_file_extension) log_filename_generator_impl(log_file_basename, log_file_extension) +#define log_filename_generator(log_file_basename, log_file_extension) log_filename_generator_impl(LogTriStateSame, log_file_basename, log_file_extension) // INTERNAL, DO NOT USE -inline std::string log_filename_generator_impl(const std::string & log_file_basename, const std::string & log_file_extension) +inline std::string log_filename_generator_impl(LogTriState multilog, const std::string & log_file_basename, const std::string & log_file_extension) { + static bool _multilog = false; + + if (multilog != LogTriStateSame) + { + _multilog = multilog == LogTriStateTrue; + } + std::stringstream buf; buf << log_file_basename; - //buf << "."; - //buf << log_get_pid(); + if (_multilog) + { + buf << "."; + buf << log_get_pid(); + } buf << "."; buf << log_file_extension; @@ -213,15 +231,6 @@ inline std::string log_filename_generator_impl(const std::string & log_file_base #define LOG_TEE_FLF_VAL ,"" #endif -// Utility for synchronizing log configuration state -// since std::optional was introduced only in c++17 -enum LogTriState -{ - LogTriStateSame, - LogTriStateFalse, - LogTriStateTrue -}; - // INTERNAL, DO NOT USE // USE LOG() INSTEAD // @@ -315,16 +324,23 @@ enum LogTriState #endif // INTERNAL, DO NOT USE -inline FILE *log_handler1_impl(bool change = false, LogTriState disable = LogTriStateSame, const std::string & filename = LOG_DEFAULT_FILE_NAME, FILE *target = nullptr) +inline FILE *log_handler1_impl(bool change = false, LogTriState append = LogTriStateSame, LogTriState disable = LogTriStateSame, const std::string & filename = LOG_DEFAULT_FILE_NAME, FILE *target = nullptr) { - static bool _initialized{false}; - static bool _disabled{(filename.empty() && target == nullptr)}; + static bool _initialized = false; + static bool _append = false; + static bool _disabled = filename.empty() && target == nullptr; static std::string log_current_filename{filename}; static FILE *log_current_target{target}; static FILE *logfile = nullptr; if (change) { + if (append != LogTriStateSame) + { + _append = append == LogTriStateTrue; + return logfile; + } + if (disable == LogTriStateTrue) { // Disable primary target @@ -377,7 +393,7 @@ inline FILE *log_handler1_impl(bool change = false, LogTriState disable = LogTri } } - logfile = fopen(filename.c_str(), "w"); + logfile = fopen(filename.c_str(), _append ? "a" : "w"); } if (!logfile) @@ -398,9 +414,9 @@ inline FILE *log_handler1_impl(bool change = false, LogTriState disable = LogTri } // INTERNAL, DO NOT USE -inline FILE *log_handler2_impl(bool change = false, LogTriState disable = LogTriStateSame, FILE *target = nullptr, const std::string & filename = LOG_DEFAULT_FILE_NAME) +inline FILE *log_handler2_impl(bool change = false, LogTriState append = LogTriStateSame, LogTriState disable = LogTriStateSame, FILE *target = nullptr, const std::string & filename = LOG_DEFAULT_FILE_NAME) { - return log_handler1_impl(change, disable, filename, target); + return log_handler1_impl(change, append, disable, filename, target); } // Disables logs entirely at runtime. @@ -411,7 +427,7 @@ inline FILE *log_handler2_impl(bool change = false, LogTriState disable = LogTri // INTERNAL, DO NOT USE inline FILE *log_disable_impl() { - return log_handler1_impl(true, LogTriStateTrue); + return log_handler1_impl(true, LogTriStateSame, LogTriStateTrue); } // Enables logs at runtime. @@ -420,19 +436,31 @@ inline FILE *log_disable_impl() // INTERNAL, DO NOT USE inline FILE *log_enable_impl() { - return log_handler1_impl(true, LogTriStateFalse); + return log_handler1_impl(true, LogTriStateSame, LogTriStateFalse); } // Sets target fir logs, either by a file name or FILE* pointer (stdout, stderr, or any valid FILE*) #define log_set_target(target) log_set_target_impl(target) // INTERNAL, DO NOT USE -inline FILE *log_set_target_impl(const std::string & filename) { return log_handler1_impl(true, LogTriStateSame, filename); } -inline FILE *log_set_target_impl(FILE *target) { return log_handler2_impl(true, LogTriStateSame, target); } +inline FILE *log_set_target_impl(const std::string & filename) { return log_handler1_impl(true, LogTriStateSame, LogTriStateSame, filename); } +inline FILE *log_set_target_impl(FILE *target) { return log_handler2_impl(true, LogTriStateSame, LogTriStateSame, target); } // INTERNAL, DO NOT USE inline FILE *log_handler() { return log_handler1_impl(); } +// Enable or disable creating separate log files for each run. +// can ONLY be invoked BEFORE first log use. +#define log_multilog(enable) log_filename_generator_impl((enable) ? LogTriStateTrue : LogTriStateFalse, "", "") +// Enable or disable append mode for log file. +// can ONLY be invoked BEFORE first log use. +#define log_append(enable) log_append_impl(enable) +// INTERNAL, DO NOT USE +inline FILE *log_append_impl(bool enable) +{ + return log_handler1_impl(true, enable ? LogTriStateTrue : LogTriStateFalse, LogTriStateSame); +} + inline void log_test() { log_disable(); @@ -494,6 +522,18 @@ inline bool log_param_single_parse(const std::string & param) return true; } + if (param == "--log-new") + { + log_multilog(true); + return true; + } + + if (param == "--log-append") + { + log_append(true); + return true; + } + return false; } @@ -523,7 +563,9 @@ inline void log_print_usage() printf(" --log-disable Disable trace logs\n"); printf(" --log-enable Enable trace logs\n"); printf(" --log-file Specify a log filename (without extension)\n"); - printf(" Log file will be tagged with unique ID and written as \"..log\"\n"); /* */ + printf(" --log-new Create a separate new log file on start. " + "Each log file will have unique name: \"..log\"\n"); + printf(" --log-append Don't truncate the old log file.\n"); } #define log_dump_cmdline(argc, argv) log_dump_cmdline_impl(argc, argv) From 0e40806c1cb3bdf9955ed807ffbe212be85b4c67 Mon Sep 17 00:00:00 2001 From: bandoti <141645996+bandoti@users.noreply.github.com> Date: Wed, 1 Nov 2023 14:42:01 -0300 Subject: [PATCH 034/206] common : allow caller to handle help/argument exceptions (#3715) * Allow caller to handle help/argument exceptions * Prepend newline to usage output * Add new gpt_params_parse_ex function to hide arg-parse impl * Fix issue blocking success case * exit instead of returning false * Update common/common.h Co-authored-by: Georgi Gerganov * Update common/common.cpp Co-authored-by: Georgi Gerganov --------- Co-authored-by: Georgi Gerganov --- common/common.cpp | 41 ++++++++++++++++++++++++++--------------- common/common.h | 2 ++ 2 files changed, 28 insertions(+), 15 deletions(-) diff --git a/common/common.cpp b/common/common.cpp index dc4865e80b1544..89be4126185db5 100644 --- a/common/common.cpp +++ b/common/common.cpp @@ -103,9 +103,24 @@ void process_escapes(std::string& input) { } bool gpt_params_parse(int argc, char ** argv, gpt_params & params) { + bool result = true; + try { + if (!gpt_params_parse_ex(argc, argv, params)) { + gpt_print_usage(argc, argv, gpt_params()); + exit(0); + } + } + catch (const std::invalid_argument& ex) { + fprintf(stderr, ex.what()); + gpt_print_usage(argc, argv, gpt_params()); + exit(1); + } + return result; +} + +bool gpt_params_parse_ex(int argc, char ** argv, gpt_params & params) { bool invalid_param = false; std::string arg; - gpt_params default_params; const std::string arg_prefix = "--"; llama_sampling_params & sparams = params.sparams; @@ -554,11 +569,8 @@ bool gpt_params_parse(int argc, char ** argv, gpt_params & params) { break; } } else if (arg == "-h" || arg == "--help") { - gpt_print_usage(argc, argv, default_params); -#ifndef LOG_DISABLE_LOGS - log_print_usage(); -#endif // LOG_DISABLE_LOGS - exit(0); + return false; + } else if (arg == "--random-prompt") { params.random_prompt = true; } else if (arg == "--in-prefix-bos") { @@ -617,22 +629,17 @@ bool gpt_params_parse(int argc, char ** argv, gpt_params & params) { // End of Parse args for logging parameters #endif // LOG_DISABLE_LOGS } else { - fprintf(stderr, "error: unknown argument: %s\n", arg.c_str()); - gpt_print_usage(argc, argv, default_params); - exit(1); + throw std::invalid_argument("error: unknown argument: " + arg); } } if (invalid_param) { - fprintf(stderr, "error: invalid parameter for argument: %s\n", arg.c_str()); - gpt_print_usage(argc, argv, default_params); - exit(1); + throw std::invalid_argument("error: invalid parameter for argument: " + arg); } if (params.prompt_cache_all && (params.interactive || params.interactive_first || params.instruct)) { - fprintf(stderr, "error: --prompt-cache-all not supported in interactive mode yet\n"); - gpt_print_usage(argc, argv, default_params); - exit(1); + + throw std::invalid_argument("error: --prompt-cache-all not supported in interactive mode yet\n"); } if (params.escape) { @@ -651,6 +658,7 @@ bool gpt_params_parse(int argc, char ** argv, gpt_params & params) { void gpt_print_usage(int /*argc*/, char ** argv, const gpt_params & params) { const llama_sampling_params & sparams = params.sparams; + printf("\n"); printf("usage: %s [options]\n", argv[0]); printf("\n"); printf("options:\n"); @@ -762,6 +770,9 @@ void gpt_print_usage(int /*argc*/, char ** argv, const gpt_params & params) { printf(" -ld LOGDIR, --logdir LOGDIR\n"); printf(" path under which to save YAML logs (no logging if unset)\n"); printf("\n"); +#ifndef LOG_DISABLE_LOGS + log_print_usage(); +#endif // LOG_DISABLE_LOGS } std::string get_system_info(const gpt_params & params) { diff --git a/common/common.h b/common/common.h index 84523a4fbf460a..343b272177c7ec 100644 --- a/common/common.h +++ b/common/common.h @@ -110,6 +110,8 @@ struct gpt_params { std::string image = ""; // path to an image file }; +bool gpt_params_parse_ex(int argc, char ** argv, gpt_params & params); + bool gpt_params_parse(int argc, char ** argv, gpt_params & params); void gpt_print_usage(int argc, char ** argv, const gpt_params & params); From 50337961a678fce4081554b24e56e86b67660163 Mon Sep 17 00:00:00 2001 From: Georgi Gerganov Date: Wed, 1 Nov 2023 20:11:02 +0200 Subject: [PATCH 035/206] llm : add llm_build_context (#3881) * llm : add llm_build_context * llm : deduce norm eps based on type + explict max_alibi_bias, clamp_kqv * llm : restore the non-graph llm_build_ functional API ggml-ci * llm : cleanup + comments --- llama.cpp | 1986 +++++++++++++++++++++++------------------------------ 1 file changed, 866 insertions(+), 1120 deletions(-) diff --git a/llama.cpp b/llama.cpp index 42cedc7a1cd592..d0c4ef10151828 100644 --- a/llama.cpp +++ b/llama.cpp @@ -3090,6 +3090,10 @@ static bool llama_model_load( return true; } +// +// llm_build +// + using llm_build_cb = std::function; enum llm_rope_type { @@ -3098,17 +3102,35 @@ enum llm_rope_type { LLM_ROPE_GLM, }; +enum llm_ffn_op_type { + LLM_FFN_SILU, + LLM_FFN_GELU, + LLM_FFN_RELU, + LLM_FFN_RELU_SQR, +}; + +enum llm_ffn_gate_type { + LLM_FFN_SEQ, + LLM_FFN_PAR, // ffn_gate is parallel to ffn_up +}; + +enum llm_norm_type { + LLM_NORM, + LLM_NORM_RMS, +}; + static struct ggml_tensor * llm_build_inp_embd( struct ggml_context * ctx, + const llama_hparams & hparams, const llama_batch & batch, struct ggml_tensor * tok_embd, - int64_t n_embd, - int32_t n_tokens, const llm_build_cb & cb) { + const int64_t n_embd = hparams.n_embd; + struct ggml_tensor * inpL; if (batch.token) { - struct ggml_tensor * inp_tokens = ggml_new_tensor_1d(ctx, GGML_TYPE_I32, n_tokens); + struct ggml_tensor * inp_tokens = ggml_new_tensor_1d(ctx, GGML_TYPE_I32, batch.n_tokens); cb(inp_tokens, "inp_tokens", -1); inpL = ggml_get_rows(ctx, tok_embd, inp_tokens); @@ -3117,7 +3139,7 @@ static struct ggml_tensor * llm_build_inp_embd( GGML_ASSERT(false && "not implemented"); #endif - inpL = ggml_new_tensor_2d(ctx, GGML_TYPE_F32, n_embd, n_tokens); + inpL = ggml_new_tensor_2d(ctx, GGML_TYPE_F32, n_embd, batch.n_tokens); } return inpL; @@ -3126,28 +3148,21 @@ static struct ggml_tensor * llm_build_inp_embd( // Persimmon: n_rot = n_embd_head/2 // Other: n_rot = n_embd_head static void llm_build_k_shift( - const llama_context & lctx, - struct ggml_context * ctx, - struct ggml_cgraph * graph, - int64_t n_rot, - llm_rope_type type, - const llm_build_cb & cb) { - const auto & model = lctx.model; - const auto & kv_self = lctx.kv_self; - const auto & cparams = lctx.cparams; - - const auto & hparams = model.hparams; - + struct ggml_context * ctx, + const llama_hparams & hparams, + const llama_kv_cache & kv, + struct ggml_cgraph * graph, + llm_rope_type type, + int64_t n_ctx, + int64_t n_rot, + float freq_base, + float freq_scale, + const llm_build_cb & cb) { const int64_t n_layer = hparams.n_layer; const int64_t n_head_kv = hparams.n_head_kv; const int64_t n_embd_gqa = hparams.n_embd_gqa(); const int64_t n_embd_head = hparams.n_embd_head(); - const int64_t n_ctx = lctx.cparams.n_ctx; - - const float freq_base = cparams.rope_freq_base; - const float freq_scale = cparams.rope_freq_scale; - GGML_ASSERT(n_embd_head % n_rot == 0); struct ggml_tensor * K_shift = ggml_new_tensor_1d(ctx, GGML_TYPE_I32, n_ctx); @@ -3165,11 +3180,11 @@ static void llm_build_k_shift( struct ggml_tensor * tmp = // we rotate only the first n_rot dimensions ggml_rope_custom_inplace(ctx, - ggml_view_3d(ctx, kv_self.k, + ggml_view_3d(ctx, kv.k, n_rot, n_head_kv, n_ctx, - ggml_element_size(kv_self.k)*n_embd_head, - ggml_element_size(kv_self.k)*n_embd_gqa, - ggml_element_size(kv_self.k)*n_embd_gqa*n_ctx*il), + ggml_element_size(kv.k)*n_embd_head, + ggml_element_size(kv.k)*n_embd_gqa, + ggml_element_size(kv.k)*n_embd_gqa*n_ctx*il), K_shift, n_rot, rope_type, 0, freq_base, freq_scale); cb(tmp, "K_shifted", il); ggml_build_forward_expand(graph, tmp); @@ -3177,22 +3192,17 @@ static void llm_build_k_shift( } static void llm_build_kv_store( - const llama_context & lctx, struct ggml_context * ctx, + const llama_hparams & hparams, + const llama_kv_cache & kv, struct ggml_cgraph * graph, struct ggml_tensor * k_cur, struct ggml_tensor * v_cur, + int64_t n_ctx, int32_t n_tokens, int32_t kv_head, const llm_build_cb & cb, int64_t il) { - const auto & model = lctx.model; - const auto & kv_self = lctx.kv_self; - const auto & cparams = lctx.cparams; - - const auto & hparams = model.hparams; - - const int64_t n_ctx = cparams.n_ctx; const int64_t n_embd_gqa = hparams.n_embd_gqa(); // compute the transposed [n_tokens, n_embd] V matrix @@ -3200,13 +3210,13 @@ static void llm_build_kv_store( //struct ggml_tensor * v_cur_t = ggml_transpose(ctx, v_cur); // TODO: reshape above is likely not needed cb(v_cur_t, "v_cur_t", il); - struct ggml_tensor * k_cache_view = ggml_view_1d(ctx, kv_self.k, n_tokens*n_embd_gqa, - (ggml_element_size(kv_self.k)*n_embd_gqa)*(il*n_ctx + kv_head)); + struct ggml_tensor * k_cache_view = ggml_view_1d(ctx, kv.k, n_tokens*n_embd_gqa, + (ggml_element_size(kv.k)*n_embd_gqa)*(il*n_ctx + kv_head)); cb(k_cache_view, "k_cache_view", il); - struct ggml_tensor * v_cache_view = ggml_view_2d(ctx, kv_self.v, n_tokens, n_embd_gqa, - ( n_ctx)*ggml_element_size(kv_self.v), - (il*n_ctx)*ggml_element_size(kv_self.v)*n_embd_gqa + kv_head*ggml_element_size(kv_self.v)); + struct ggml_tensor * v_cache_view = ggml_view_2d(ctx, kv.v, n_tokens, n_embd_gqa, + ( n_ctx)*ggml_element_size(kv.v), + (il*n_ctx)*ggml_element_size(kv.v)*n_embd_gqa + kv_head*ggml_element_size(kv.v)); cb(v_cache_view, "v_cache_view", il); // important: storing RoPE-ed version of K in the KV cache! @@ -3214,23 +3224,18 @@ static void llm_build_kv_store( ggml_build_forward_expand(graph, ggml_cpy(ctx, v_cur_t, v_cache_view)); } -enum llm_norm_type { - LLM_NORM, - LLM_NORM_RMS, -}; - static struct ggml_tensor * llm_build_norm( struct ggml_context * ctx, struct ggml_tensor * cur, + const llama_hparams & hparams, struct ggml_tensor * mw, struct ggml_tensor * mb, llm_norm_type type, - float eps, const llm_build_cb & cb, int il) { switch (type) { - case LLM_NORM: cur = ggml_norm (ctx, cur, eps); break; - case LLM_NORM_RMS: cur = ggml_rms_norm(ctx, cur, eps); break; + case LLM_NORM: cur = ggml_norm (ctx, cur, hparams.f_norm_eps); break; + case LLM_NORM_RMS: cur = ggml_rms_norm(ctx, cur, hparams.f_norm_rms_eps); break; } if (mw || mb) { @@ -3251,18 +3256,6 @@ static struct ggml_tensor * llm_build_norm( return cur; } -enum llm_ffn_op_type { - LLM_FFN_SILU, - LLM_FFN_GELU, - LLM_FFN_RELU, - LLM_FFN_RELU_SQR, -}; - -enum llm_ffn_gate_type { - LLM_FFN_SEQ, - LLM_FFN_PAR, // ffn_gate is parallel to ffn_up -}; - static struct ggml_tensor * llm_build_ffn( struct ggml_context * ctx, struct ggml_tensor * cur, @@ -3351,26 +3344,21 @@ static struct ggml_tensor * llm_build_ffn( // if max_alibi_bias > 0 then apply ALiBi static struct ggml_tensor * llm_build_kqv( - const llama_context & lctx, struct ggml_context * ctx, struct ggml_tensor * cur, + const llama_hparams & hparams, + const llama_kv_cache & kv, struct ggml_tensor * wo, struct ggml_tensor * wo_b, struct ggml_tensor * q_cur, struct ggml_tensor * kq_scale, struct ggml_tensor * kq_mask, + int64_t n_ctx, int32_t n_tokens, int32_t n_kv, - float alibi_bias_max, + float max_alibi_bias, const llm_build_cb & cb, - int il) { - const auto & model = lctx.model; - const auto & kv_self = lctx.kv_self; - const auto & cparams = lctx.cparams; - - const auto & hparams = model.hparams; - - const int64_t n_ctx = cparams.n_ctx; + int il) { const int64_t n_embd = hparams.n_embd; const int64_t n_head = hparams.n_head; const int64_t n_head_kv = hparams.n_head_kv; @@ -3381,11 +3369,11 @@ static struct ggml_tensor * llm_build_kqv( cb(q, "q", il); struct ggml_tensor * k = - ggml_view_3d(ctx, kv_self.k, + ggml_view_3d(ctx, kv.k, n_embd_head, n_kv, n_head_kv, - ggml_element_size(kv_self.k)*n_embd_gqa, - ggml_element_size(kv_self.k)*n_embd_head, - ggml_element_size(kv_self.k)*n_embd_gqa*n_ctx*il); + ggml_element_size(kv.k)*n_embd_gqa, + ggml_element_size(kv.k)*n_embd_head, + ggml_element_size(kv.k)*n_embd_gqa*n_ctx*il); cb(k, "k", il); struct ggml_tensor * kq = ggml_mul_mat(ctx, k, q); @@ -3394,11 +3382,11 @@ static struct ggml_tensor * llm_build_kqv( kq = ggml_scale(ctx, kq, kq_scale); cb(kq, "kq_scaled", il); - if (alibi_bias_max > 0.0f) { + if (max_alibi_bias > 0.0f) { // TODO: n_head or n_head_kv // TODO: K-shift is likely not working // TODO: change to ggml_add - kq = ggml_alibi(ctx, kq, /*n_past*/ 0, n_head, alibi_bias_max); + kq = ggml_alibi(ctx, kq, /*n_past*/ 0, n_head, max_alibi_bias); cb(kq, "kq_scaled_alibi", il); } @@ -3410,11 +3398,11 @@ static struct ggml_tensor * llm_build_kqv( // split cached v into n_head heads struct ggml_tensor * v = - ggml_view_3d(ctx, kv_self.v, + ggml_view_3d(ctx, kv.v, n_kv, n_embd_head, n_head_kv, - ggml_element_size(kv_self.v)*n_ctx, - ggml_element_size(kv_self.v)*n_ctx*n_embd_head, - ggml_element_size(kv_self.v)*n_ctx*n_embd_gqa*il); + ggml_element_size(kv.v)*n_ctx, + ggml_element_size(kv.v)*n_ctx*n_embd_head, + ggml_element_size(kv.v)*n_ctx*n_embd_gqa*il); cb(v, "v", il); struct ggml_tensor * kqv = ggml_mul_mat(ctx, v, kq); @@ -3438,1259 +3426,1011 @@ static struct ggml_tensor * llm_build_kqv( return cur; } -static struct ggml_cgraph * llm_build_llama( - llama_context & lctx, - const llama_batch & batch, - const llm_build_cb & cb, - bool worst_case) { - const auto & model = lctx.model; - const auto & hparams = model.hparams; - const auto & cparams = lctx.cparams; - - const auto & kv_self = lctx.kv_self; +struct llm_build_context { + const llama_model & model; + const llama_hparams & hparams; + const llama_cparams & cparams; + const llama_batch & batch; + const llama_kv_cache & kv_self; - GGML_ASSERT(!!kv_self.ctx); + const int64_t n_embd; + const int64_t n_layer; + const int64_t n_ctx; // user-specified context size (can be different from n_ctx_train) + const int64_t n_head; + const int64_t n_head_kv; + const int64_t n_embd_head; + const int64_t n_embd_gqa; - const int64_t n_embd = hparams.n_embd; - const int64_t n_layer = hparams.n_layer; - const int64_t n_ctx = cparams.n_ctx; - const int64_t n_head = hparams.n_head; - const int64_t n_head_kv = hparams.n_head_kv; - const int64_t n_embd_head = hparams.n_embd_head(); + const float freq_base; + const float freq_scale; + const float norm_eps; + const float norm_rms_eps; - GGML_ASSERT(n_embd_head == hparams.n_rot); + const int32_t n_tokens; + const int32_t n_kv; // size of KV cache to consider (n_kv <= n_ctx) + const int32_t kv_head; // index of where we store new KV data in the cache - const float freq_base = cparams.rope_freq_base; - const float freq_scale = cparams.rope_freq_scale; - const float norm_rms_eps = hparams.f_norm_rms_eps; + const bool do_rope_shift; - const int32_t n_tokens = batch.n_tokens; - const int32_t n_kv = worst_case ? n_ctx : kv_self.n; - const int32_t kv_head = worst_case ? n_ctx - n_tokens : kv_self.head; + const llm_build_cb & cb; - const bool do_rope_shift = worst_case || kv_self.has_shift; + llama_buffer & buf_compute; - //printf("n_kv = %d\n", n_kv); + struct ggml_context * ctx0 = nullptr; - auto & buf_compute = lctx.buf_compute; - - struct ggml_init_params params = { - /*.mem_size =*/ buf_compute.size, - /*.mem_buffer =*/ buf_compute.data, - /*.no_alloc =*/ true, - }; - - struct ggml_context * ctx0 = ggml_init(params); - - ggml_cgraph * gf = ggml_new_graph(ctx0); - - struct ggml_tensor * cur; - struct ggml_tensor * inpL; - - inpL = llm_build_inp_embd(ctx0, batch, model.tok_embd, n_embd, n_tokens, cb); - cb(inpL, "inp_embd", -1); - - // inp_pos - contains the positions - struct ggml_tensor * inp_pos = ggml_new_tensor_1d(ctx0, GGML_TYPE_I32, n_tokens); - cb(inp_pos, "inp_pos", -1); - - // KQ_scale - struct ggml_tensor * KQ_scale = ggml_new_tensor_1d(ctx0, GGML_TYPE_F32, 1); - cb(KQ_scale, "KQ_scale", -1); - - // KQ_mask (mask for 1 head, it will be broadcasted to all heads) - struct ggml_tensor * KQ_mask = ggml_new_tensor_3d(ctx0, GGML_TYPE_F32, n_kv, n_tokens, 1); - cb(KQ_mask, "KQ_mask", -1); + // TODO: consider making the entire interface noexcept + llm_build_context( + llama_context & lctx, + const llama_batch & batch, + const llm_build_cb & cb, + bool worst_case) : + model (lctx.model), + hparams (model.hparams), + cparams (lctx.cparams), + batch (batch), + kv_self (lctx.kv_self), + n_embd (hparams.n_embd), + n_layer (hparams.n_layer), + n_ctx (cparams.n_ctx), + n_head (hparams.n_head), + n_head_kv (hparams.n_head_kv), + n_embd_head (hparams.n_embd_head()), + n_embd_gqa (hparams.n_embd_gqa()), + freq_base (cparams.rope_freq_base), + freq_scale (cparams.rope_freq_scale), + norm_eps (hparams.f_norm_eps), + norm_rms_eps (hparams.f_norm_rms_eps), + n_tokens (batch.n_tokens), + n_kv (worst_case ? n_ctx : kv_self.n), + kv_head (worst_case ? n_ctx - n_tokens : kv_self.head), + do_rope_shift (worst_case || kv_self.has_shift), + cb (cb), + buf_compute (lctx.buf_compute) { + GGML_ASSERT(!!kv_self.ctx); + + // all initializations should be done in init() + } + + void init() { + struct ggml_init_params params = { + /*.mem_size =*/ buf_compute.size, + /*.mem_buffer =*/ buf_compute.data, + /*.no_alloc =*/ true, + }; - // shift the entire K-cache if needed - if (do_rope_shift) { - llm_build_k_shift(lctx, ctx0, gf, n_embd_head, LLM_ROPE, cb); + ctx0 = ggml_init(params); } - for (int il = 0; il < n_layer; ++il) { - struct ggml_tensor * inpSA = inpL; - - // norm - cur = llm_build_norm(ctx0, inpL, - model.layers[il].attn_norm, NULL, - LLM_NORM_RMS, norm_rms_eps, cb, il); - cb(cur, "attn_norm", il); - - // self-attention - { - // compute Q and K and RoPE them - struct ggml_tensor * Qcur = ggml_mul_mat(ctx0, model.layers[il].wq, cur); - cb(Qcur, "Qcur", il); - - struct ggml_tensor * Kcur = ggml_mul_mat(ctx0, model.layers[il].wk, cur); - cb(Kcur, "Kcur", il); - - struct ggml_tensor * Vcur = ggml_mul_mat(ctx0, model.layers[il].wv, cur); - cb(Vcur, "Vcur", il); - - Qcur = ggml_rope_custom(ctx0, ggml_reshape_3d(ctx0, Qcur, n_embd_head, n_head, n_tokens), inp_pos, n_embd_head, 0, 0, freq_base, freq_scale); - cb(Qcur, "Qcur", il); - - Kcur = ggml_rope_custom(ctx0, ggml_reshape_3d(ctx0, Kcur, n_embd_head, n_head_kv, n_tokens), inp_pos, n_embd_head, 0, 0, freq_base, freq_scale); - cb(Kcur, "Kcur", il); - - llm_build_kv_store(lctx, ctx0, gf, Kcur, Vcur, n_tokens, kv_head, cb, il); - - cur = llm_build_kqv(lctx, ctx0, cur, - model.layers[il].wo, NULL, - Qcur, KQ_scale, KQ_mask, n_tokens, n_kv, -1.0f, cb, il); - cb(cur, "kqv_out", il); - } - - struct ggml_tensor * ffn_inp = ggml_add(ctx0, cur, inpSA); - cb(ffn_inp, "ffn_inp", il); - - // feed-forward network - { - cur = llm_build_norm(ctx0, ffn_inp, - model.layers[il].ffn_norm, NULL, - LLM_NORM_RMS, norm_rms_eps, cb, il); - cb(cur, "ffn_norm", il); - - cur = llm_build_ffn(ctx0, cur, - model.layers[il].ffn_up, NULL, - model.layers[il].ffn_gate, NULL, - model.layers[il].ffn_down, NULL, - LLM_FFN_SILU, LLM_FFN_PAR, cb, il); - cb(cur, "ffn_out", il); + void free() { + if (ctx0) { + ggml_free(ctx0); + ctx0 = nullptr; } - - cur = ggml_add(ctx0, cur, ffn_inp); - cb(cur, "l_out", il); - - // input for next layer - inpL = cur; } - cur = inpL; - - cur = llm_build_norm(ctx0, cur, - model.output_norm, NULL, - LLM_NORM_RMS, norm_rms_eps, cb, -1); - cb(cur, "result_norm", -1); - - // lm_head - cur = ggml_mul_mat(ctx0, model.output, cur); - cb(cur, "result_output", -1); - - ggml_build_forward_expand(gf, cur); - - ggml_free(ctx0); - - return gf; -} - -static struct ggml_cgraph * llm_build_baichaun( - llama_context & lctx, - const llama_batch & batch, - const llm_build_cb & cb, - bool worst_case) { - const auto & model = lctx.model; - const auto & hparams = model.hparams; - const auto & cparams = lctx.cparams; - - const auto & kv_self = lctx.kv_self; - - GGML_ASSERT(!!kv_self.ctx); - - const int64_t n_embd = hparams.n_embd; - const int64_t n_layer = hparams.n_layer; - const int64_t n_ctx = cparams.n_ctx; - const int64_t n_head = hparams.n_head; - const int64_t n_head_kv = hparams.n_head_kv; - const int64_t n_embd_head = hparams.n_embd_head(); - - GGML_ASSERT(n_embd_head == hparams.n_rot); - - const float freq_base = cparams.rope_freq_base; - const float freq_scale = cparams.rope_freq_scale; - const float norm_rms_eps = hparams.f_norm_rms_eps; + struct ggml_cgraph * build_llama() { + struct ggml_cgraph * gf = ggml_new_graph(ctx0); - const int32_t n_tokens = batch.n_tokens; - const int32_t n_kv = worst_case ? n_ctx : kv_self.n; - const int32_t kv_head = worst_case ? n_ctx - n_tokens : kv_self.head; + GGML_ASSERT(n_embd_head == hparams.n_rot); - const bool do_rope_shift = worst_case || kv_self.has_shift; + struct ggml_tensor * cur; + struct ggml_tensor * inpL; - auto & buf_compute = lctx.buf_compute; + inpL = llm_build_inp_embd(ctx0, hparams, batch, model.tok_embd, cb); + cb(inpL, "inp_embd", -1); - struct ggml_init_params params = { - /*.mem_size =*/ buf_compute.size, - /*.mem_buffer =*/ buf_compute.data, - /*.no_alloc =*/ true, - }; + // inp_pos - contains the positions + struct ggml_tensor * inp_pos = ggml_new_tensor_1d(ctx0, GGML_TYPE_I32, n_tokens); + cb(inp_pos, "inp_pos", -1); - struct ggml_context * ctx0 = ggml_init(params); + // KQ_scale + struct ggml_tensor * KQ_scale = ggml_new_tensor_1d(ctx0, GGML_TYPE_F32, 1); + cb(KQ_scale, "KQ_scale", -1); - ggml_cgraph * gf = ggml_new_graph(ctx0); + // KQ_mask (mask for 1 head, it will be broadcasted to all heads) + struct ggml_tensor * KQ_mask = ggml_new_tensor_3d(ctx0, GGML_TYPE_F32, n_kv, n_tokens, 1); + cb(KQ_mask, "KQ_mask", -1); - struct ggml_tensor * cur; - struct ggml_tensor * inpL; + // shift the entire K-cache if needed + if (do_rope_shift) { + llm_build_k_shift(ctx0, hparams, kv_self, gf, LLM_ROPE, n_ctx, n_embd_head, freq_base, freq_scale, cb); + } - inpL = llm_build_inp_embd(ctx0, batch, model.tok_embd, n_embd, n_tokens, cb); - cb(inpL, "inp_embd", -1); + for (int il = 0; il < n_layer; ++il) { + struct ggml_tensor * inpSA = inpL; - // inp_pos - contains the positions - struct ggml_tensor * inp_pos = ggml_new_tensor_1d(ctx0, GGML_TYPE_I32, n_tokens); - cb(inp_pos, "inp_pos", -1); + // norm + cur = llm_build_norm(ctx0, inpL, hparams, + model.layers[il].attn_norm, NULL, + LLM_NORM_RMS, cb, il); + cb(cur, "attn_norm", il); - // KQ_scale - struct ggml_tensor * KQ_scale = ggml_new_tensor_1d(ctx0, GGML_TYPE_F32, 1); - cb(KQ_scale, "KQ_scale", -1); + // self-attention + { + // compute Q and K and RoPE them + struct ggml_tensor * Qcur = ggml_mul_mat(ctx0, model.layers[il].wq, cur); + cb(Qcur, "Qcur", il); - // KQ_mask (mask for 1 head, it will be broadcasted to all heads) - struct ggml_tensor * KQ_mask = ggml_new_tensor_3d(ctx0, GGML_TYPE_F32, n_kv, n_tokens, 1); - cb(KQ_mask, "KQ_mask", -1); + struct ggml_tensor * Kcur = ggml_mul_mat(ctx0, model.layers[il].wk, cur); + cb(Kcur, "Kcur", il); - // shift the entire K-cache if needed - if (do_rope_shift) { - llm_build_k_shift(lctx, ctx0, gf, n_embd_head, LLM_ROPE, cb); - } + struct ggml_tensor * Vcur = ggml_mul_mat(ctx0, model.layers[il].wv, cur); + cb(Vcur, "Vcur", il); - for (int il = 0; il < n_layer; ++il) { - struct ggml_tensor * inpSA = inpL; + Qcur = ggml_rope_custom(ctx0, ggml_reshape_3d(ctx0, Qcur, n_embd_head, n_head, n_tokens), inp_pos, n_embd_head, 0, 0, freq_base, freq_scale); + cb(Qcur, "Qcur", il); - cur = llm_build_norm(ctx0, inpL, - model.layers[il].attn_norm, NULL, - LLM_NORM_RMS, norm_rms_eps, cb, il); - cb(cur, "attn_norm", il); + Kcur = ggml_rope_custom(ctx0, ggml_reshape_3d(ctx0, Kcur, n_embd_head, n_head_kv, n_tokens), inp_pos, n_embd_head, 0, 0, freq_base, freq_scale); + cb(Kcur, "Kcur", il); - // self-attention - { - struct ggml_tensor * Qcur = ggml_mul_mat(ctx0, model.layers[il].wq, cur); - cb(Qcur, "Qcur", il); + llm_build_kv_store(ctx0, hparams, kv_self, gf, Kcur, Vcur, n_ctx, n_tokens, kv_head, cb, il); - struct ggml_tensor * Kcur = ggml_mul_mat(ctx0, model.layers[il].wk, cur); - cb(Kcur, "Kcur", il); + cur = llm_build_kqv(ctx0, cur, hparams, kv_self, + model.layers[il].wo, NULL, + Qcur, KQ_scale, KQ_mask, n_ctx, n_tokens, n_kv, -1.0f, cb, il); + cb(cur, "kqv_out", il); + } - struct ggml_tensor * Vcur = ggml_mul_mat(ctx0, model.layers[il].wv, cur); - cb(Vcur, "Vcur", il); + struct ggml_tensor * ffn_inp = ggml_add(ctx0, cur, inpSA); + cb(ffn_inp, "ffn_inp", il); - switch (model.type) { - case MODEL_7B: - Qcur = ggml_rope_custom(ctx0, ggml_reshape_3d(ctx0, Qcur, n_embd_head, n_head, n_tokens), inp_pos, n_embd_head, 0, 0, freq_base, freq_scale); - Kcur = ggml_rope_custom(ctx0, ggml_reshape_3d(ctx0, Kcur, n_embd_head, n_head_kv, n_tokens), inp_pos, n_embd_head, 0, 0, freq_base, freq_scale); - break; - case MODEL_13B: - Qcur = ggml_reshape_3d(ctx0, Qcur, n_embd/n_head, n_head, n_tokens); - Kcur = ggml_reshape_3d(ctx0, Kcur, n_embd/n_head, n_head, n_tokens); - break; - default: - GGML_ASSERT(false); + // feed-forward network + { + cur = llm_build_norm(ctx0, ffn_inp, hparams, + model.layers[il].ffn_norm, NULL, + LLM_NORM_RMS, cb, il); + cb(cur, "ffn_norm", il); + + cur = llm_build_ffn(ctx0, cur, + model.layers[il].ffn_up, NULL, + model.layers[il].ffn_gate, NULL, + model.layers[il].ffn_down, NULL, + LLM_FFN_SILU, LLM_FFN_PAR, cb, il); + cb(cur, "ffn_out", il); } - cb(Qcur, "Qcur", il); - cb(Kcur, "Kcur", il); - llm_build_kv_store(lctx, ctx0, gf, Kcur, Vcur, n_tokens, kv_head, cb, il); + cur = ggml_add(ctx0, cur, ffn_inp); + cb(cur, "l_out", il); - // apply ALiBi for 13B model - const float alibi_bias_max = model.type == MODEL_13B ? 8.0f : -1.0f; - - cur = llm_build_kqv(lctx, ctx0, cur, - model.layers[il].wo, NULL, - Qcur, KQ_scale, KQ_mask, n_tokens, n_kv, alibi_bias_max, cb, il); - cb(cur, "kqv_out", il); + // input for next layer + inpL = cur; } - struct ggml_tensor * ffn_inp = ggml_add(ctx0, cur, inpSA); - cb(ffn_inp, "ffn_inp", il); + cur = inpL; - // feed-forward network - { - cur = llm_build_norm(ctx0, ffn_inp, - model.layers[il].ffn_norm, NULL, - LLM_NORM_RMS, norm_rms_eps, cb, il); - cb(cur, "ffn_norm", il); + cur = llm_build_norm(ctx0, cur, hparams, + model.output_norm, NULL, + LLM_NORM_RMS, cb, -1); + cb(cur, "result_norm", -1); - cur = llm_build_ffn(ctx0, cur, - model.layers[il].ffn_up, NULL, - model.layers[il].ffn_gate, NULL, - model.layers[il].ffn_down, NULL, - LLM_FFN_SILU, LLM_FFN_PAR, cb, il); - cb(cur, "ffn_out", il); - } + // lm_head + cur = ggml_mul_mat(ctx0, model.output, cur); + cb(cur, "result_output", -1); - cur = ggml_add(ctx0, cur, ffn_inp); - cb(cur, "l_out", il); + ggml_build_forward_expand(gf, cur); - // input for next layer - inpL = cur; + return gf; } - cur = inpL; + struct ggml_cgraph * build_baichuan() { + struct ggml_cgraph * gf = ggml_new_graph(ctx0); - cur = llm_build_norm(ctx0, cur, - model.output_norm, NULL, - LLM_NORM_RMS, norm_rms_eps, cb, -1); - cb(cur, "result_norm", -1); + struct ggml_tensor * cur; + struct ggml_tensor * inpL; - // lm_head - cur = ggml_mul_mat(ctx0, model.output, cur); - cb(cur, "result_output", -1); + inpL = llm_build_inp_embd(ctx0, hparams, batch, model.tok_embd, cb); + cb(inpL, "inp_embd", -1); - ggml_build_forward_expand(gf, cur); + // inp_pos - contains the positions + struct ggml_tensor * inp_pos = ggml_new_tensor_1d(ctx0, GGML_TYPE_I32, n_tokens); + cb(inp_pos, "inp_pos", -1); - ggml_free(ctx0); + // KQ_scale + struct ggml_tensor * KQ_scale = ggml_new_tensor_1d(ctx0, GGML_TYPE_F32, 1); + cb(KQ_scale, "KQ_scale", -1); - return gf; -} - -static struct ggml_cgraph * llm_build_falcon( - llama_context & lctx, - const llama_batch & batch, - const llm_build_cb & cb, - bool worst_case) { - const auto & model = lctx.model; - const auto & hparams = model.hparams; - const auto & cparams = lctx.cparams; - - const auto & kv_self = lctx.kv_self; - - GGML_ASSERT(!!kv_self.ctx); - - const int64_t n_embd = hparams.n_embd; - const int64_t n_layer = hparams.n_layer; - const int64_t n_ctx = cparams.n_ctx; - const int64_t n_head = hparams.n_head; - const int64_t n_head_kv = hparams.n_head_kv; - const int64_t n_embd_head = hparams.n_embd_head(); - const int64_t n_embd_gqa = hparams.n_embd_gqa(); - - GGML_ASSERT(n_embd_head == hparams.n_rot); + // KQ_mask (mask for 1 head, it will be broadcasted to all heads) + struct ggml_tensor * KQ_mask = ggml_new_tensor_3d(ctx0, GGML_TYPE_F32, n_kv, n_tokens, 1); + cb(KQ_mask, "KQ_mask", -1); - const float freq_base = cparams.rope_freq_base; - const float freq_scale = cparams.rope_freq_scale; - const float norm_eps = hparams.f_norm_eps; - - const int32_t n_tokens = batch.n_tokens; - const int32_t n_kv = worst_case ? n_ctx : kv_self.n; - const int32_t kv_head = worst_case ? n_ctx - n_tokens : kv_self.head; - - const bool do_rope_shift = worst_case || kv_self.has_shift; - - //printf("kv_head = %d, n_kv = %d, n_tokens = %d, n_ctx = %d, is_measure = %d, has_shift = %d\n", - // kv_head, n_kv, n_tokens, n_ctx, ggml_allocr_is_measure(lctx.alloc), kv_self.has_shift); - - auto & buf_compute = lctx.buf_compute; - - struct ggml_init_params params = { - /*.mem_size =*/ buf_compute.size, - /*.mem_buffer =*/ buf_compute.data, - /*.no_alloc =*/ true, - }; - - struct ggml_context * ctx0 = ggml_init(params); - - ggml_cgraph * gf = ggml_new_graph(ctx0); + // shift the entire K-cache if needed + if (do_rope_shift) { + llm_build_k_shift(ctx0, hparams, kv_self, gf, LLM_ROPE, n_ctx, n_embd_head, freq_base, freq_scale, cb); + } - struct ggml_tensor * cur; - struct ggml_tensor * inpL; + for (int il = 0; il < n_layer; ++il) { + struct ggml_tensor * inpSA = inpL; - inpL = llm_build_inp_embd(ctx0, batch, model.tok_embd, n_embd, n_tokens, cb); - cb(inpL, "inp_embd", -1); + cur = llm_build_norm(ctx0, inpL, hparams, + model.layers[il].attn_norm, NULL, + LLM_NORM_RMS, cb, il); + cb(cur, "attn_norm", il); - // inp_pos - contains the positions - struct ggml_tensor * inp_pos = ggml_new_tensor_1d(ctx0, GGML_TYPE_I32, n_tokens); - cb(inp_pos, "inp_pos", -1); + // self-attention + { + struct ggml_tensor * Qcur = ggml_mul_mat(ctx0, model.layers[il].wq, cur); + cb(Qcur, "Qcur", il); - // KQ_scale - struct ggml_tensor * KQ_scale = ggml_new_tensor_1d(ctx0, GGML_TYPE_F32, 1); - cb(KQ_scale, "KQ_scale", -1); + struct ggml_tensor * Kcur = ggml_mul_mat(ctx0, model.layers[il].wk, cur); + cb(Kcur, "Kcur", il); - // KQ_mask (mask for 1 head, it will be broadcasted to all heads) - struct ggml_tensor * KQ_mask = ggml_new_tensor_3d(ctx0, GGML_TYPE_F32, n_kv, n_tokens, 1); - cb(KQ_mask, "KQ_mask", -1); + struct ggml_tensor * Vcur = ggml_mul_mat(ctx0, model.layers[il].wv, cur); + cb(Vcur, "Vcur", il); - // shift the entire K-cache if needed - if (do_rope_shift) { - llm_build_k_shift(lctx, ctx0, gf, n_embd_head, LLM_ROPE_NEOX, cb); - } + switch (model.type) { + case MODEL_7B: + Qcur = ggml_rope_custom(ctx0, ggml_reshape_3d(ctx0, Qcur, n_embd_head, n_head, n_tokens), inp_pos, n_embd_head, 0, 0, freq_base, freq_scale); + Kcur = ggml_rope_custom(ctx0, ggml_reshape_3d(ctx0, Kcur, n_embd_head, n_head_kv, n_tokens), inp_pos, n_embd_head, 0, 0, freq_base, freq_scale); + break; + case MODEL_13B: + Qcur = ggml_reshape_3d(ctx0, Qcur, n_embd/n_head, n_head, n_tokens); + Kcur = ggml_reshape_3d(ctx0, Kcur, n_embd/n_head, n_head, n_tokens); + break; + default: + GGML_ASSERT(false); + } + cb(Qcur, "Qcur", il); + cb(Kcur, "Kcur", il); - for (int il = 0; il < n_layer; ++il) { - struct ggml_tensor * attn_norm; + llm_build_kv_store(ctx0, hparams, kv_self, gf, Kcur, Vcur, n_ctx, n_tokens, kv_head, cb, il); - attn_norm = llm_build_norm(ctx0, inpL, - model.layers[il].attn_norm, - model.layers[il].attn_norm_b, - LLM_NORM, norm_eps, cb, il); - cb(attn_norm, "attn_norm", il); + // apply ALiBi for 13B model + const float max_alibi_bias = model.type == MODEL_13B ? 8.0f : -1.0f; - // self-attention - { - if (model.layers[il].attn_norm_2) { - // Falcon-40B - cur = llm_build_norm(ctx0, attn_norm, - model.layers[il].attn_norm_2, - model.layers[il].attn_norm_2_b, - LLM_NORM, norm_eps, cb, il); - cb(cur, "attn_norm_2", il); - } else { - cur = attn_norm; + cur = llm_build_kqv(ctx0, cur, hparams, kv_self, + model.layers[il].wo, NULL, + Qcur, KQ_scale, KQ_mask, n_ctx, n_tokens, n_kv, max_alibi_bias, cb, il); + cb(cur, "kqv_out", il); } - cur = ggml_mul_mat(ctx0, model.layers[il].wqkv, cur); - cb(cur, "wqkv", il); - - struct ggml_tensor * Qcur = ggml_cont(ctx0, ggml_view_2d(ctx0, cur, n_embd, n_tokens, cur->nb[1], 0*sizeof(float)*(n_embd))); - struct ggml_tensor * Kcur = ggml_cont(ctx0, ggml_view_2d(ctx0, cur, n_embd_gqa, n_tokens, cur->nb[1], 1*sizeof(float)*(n_embd))); - struct ggml_tensor * Vcur = ggml_cont(ctx0, ggml_view_2d(ctx0, cur, n_embd_gqa, n_tokens, cur->nb[1], 1*sizeof(float)*(n_embd + n_embd_gqa))); - - cb(Qcur, "Qcur", il); - cb(Kcur, "Kcur", il); - cb(Vcur, "Vcur", il); + struct ggml_tensor * ffn_inp = ggml_add(ctx0, cur, inpSA); + cb(ffn_inp, "ffn_inp", il); - Qcur = ggml_reshape_3d(ctx0, Qcur, n_embd_head, n_head, n_tokens); - Kcur = ggml_reshape_3d(ctx0, Kcur, n_embd_head, n_head_kv, n_tokens); - - // using mode = 2 for neox mode - Qcur = ggml_rope_custom(ctx0, Qcur, inp_pos, n_embd_head, 2, 0, freq_base, freq_scale); - cb(Qcur, "Qcur", il); - - Kcur = ggml_rope_custom(ctx0, Kcur, inp_pos, n_embd_head, 2, 0, freq_base, freq_scale); - cb(Kcur, "Kcur", il); + // feed-forward network + { + cur = llm_build_norm(ctx0, ffn_inp, hparams, + model.layers[il].ffn_norm, NULL, + LLM_NORM_RMS, cb, il); + cb(cur, "ffn_norm", il); + + cur = llm_build_ffn(ctx0, cur, + model.layers[il].ffn_up, NULL, + model.layers[il].ffn_gate, NULL, + model.layers[il].ffn_down, NULL, + LLM_FFN_SILU, LLM_FFN_PAR, cb, il); + cb(cur, "ffn_out", il); + } - llm_build_kv_store(lctx, ctx0, gf, Kcur, Vcur, n_tokens, kv_head, cb, il); + cur = ggml_add(ctx0, cur, ffn_inp); + cb(cur, "l_out", il); - cur = llm_build_kqv(lctx, ctx0, attn_norm, - model.layers[il].wo, NULL, - Qcur, KQ_scale, KQ_mask, n_tokens, n_kv, -1.0f, cb, il); - cb(cur, "kqv_out", il); + // input for next layer + inpL = cur; } - struct ggml_tensor * ffn_inp = cur; + cur = inpL; - // feed forward - { - cur = llm_build_ffn(ctx0, attn_norm, // !! use the attn norm, not the result - model.layers[il].ffn_up, NULL, - NULL, NULL, - model.layers[il].ffn_down, NULL, - LLM_FFN_GELU, LLM_FFN_SEQ, cb, il); - cb(cur, "ffn_out", il); - } + cur = llm_build_norm(ctx0, cur, hparams, + model.output_norm, NULL, + LLM_NORM_RMS, cb, -1); + cb(cur, "result_norm", -1); - cur = ggml_add(ctx0, cur, ffn_inp); - cb(cur, "l_out", il); + // lm_head + cur = ggml_mul_mat(ctx0, model.output, cur); + cb(cur, "result_output", -1); - cur = ggml_add(ctx0, cur, inpL); - cb(cur, "l_out", il); + ggml_build_forward_expand(gf, cur); - // input for next layer - inpL = cur; + return gf; } - cur = inpL; - - // norm - cur = llm_build_norm(ctx0, cur, - model.output_norm, - model.output_norm_b, - LLM_NORM, norm_eps, cb, -1); - cb(cur, "result_norm", -1); - - cur = ggml_mul_mat(ctx0, model.output, cur); - cb(cur, "result_output", -1); + struct ggml_cgraph * build_falcon() { + struct ggml_cgraph * gf = ggml_new_graph(ctx0); - ggml_build_forward_expand(gf, cur); + struct ggml_tensor * cur; + struct ggml_tensor * inpL; - ggml_free(ctx0); + inpL = llm_build_inp_embd(ctx0, hparams, batch, model.tok_embd, cb); + cb(inpL, "inp_embd", -1); - return gf; -} - -static struct ggml_cgraph * llm_build_starcoder( - llama_context & lctx, - const llama_batch & batch, - const llm_build_cb & cb, - bool worst_case) { - const auto & model = lctx.model; - const auto & hparams = model.hparams; - const auto & cparams = lctx.cparams; - - const auto & kv_self = lctx.kv_self; - - GGML_ASSERT(!!kv_self.ctx); - - const int64_t n_embd = hparams.n_embd; - const int64_t n_layer = hparams.n_layer; - const int64_t n_ctx = cparams.n_ctx; - const int64_t n_head = hparams.n_head; - const int64_t n_embd_head = hparams.n_embd_head(); - const int64_t n_embd_gqa = hparams.n_embd_gqa(); - - GGML_ASSERT(n_embd_head == hparams.n_rot); - - const float norm_eps = hparams.f_norm_eps; + // inp_pos - contains the positions + struct ggml_tensor * inp_pos = ggml_new_tensor_1d(ctx0, GGML_TYPE_I32, n_tokens); + cb(inp_pos, "inp_pos", -1); - const int32_t n_tokens = batch.n_tokens; - const int32_t n_kv = worst_case ? n_ctx : kv_self.n; - const int32_t kv_head = worst_case ? n_ctx - n_tokens : kv_self.head; + // KQ_scale + struct ggml_tensor * KQ_scale = ggml_new_tensor_1d(ctx0, GGML_TYPE_F32, 1); + cb(KQ_scale, "KQ_scale", -1); - auto & buf_compute = lctx.buf_compute; + // KQ_mask (mask for 1 head, it will be broadcasted to all heads) + struct ggml_tensor * KQ_mask = ggml_new_tensor_3d(ctx0, GGML_TYPE_F32, n_kv, n_tokens, 1); + cb(KQ_mask, "KQ_mask", -1); - struct ggml_init_params params = { - /*.mem_size =*/ buf_compute.size, - /*.mem_buffer =*/ buf_compute.data, - /*.no_alloc =*/ true, - }; - - struct ggml_context * ctx0 = ggml_init(params); + // shift the entire K-cache if needed + if (do_rope_shift) { + llm_build_k_shift(ctx0, hparams, kv_self, gf, LLM_ROPE_NEOX, n_ctx, n_embd_head, freq_base, freq_scale, cb); + } - ggml_cgraph * gf = ggml_new_graph(ctx0); + for (int il = 0; il < n_layer; ++il) { + struct ggml_tensor * attn_norm; - struct ggml_tensor * cur; - struct ggml_tensor * pos; - struct ggml_tensor * inpL; + attn_norm = llm_build_norm(ctx0, inpL, hparams, + model.layers[il].attn_norm, + model.layers[il].attn_norm_b, + LLM_NORM, cb, il); + cb(attn_norm, "attn_norm", il); - inpL = llm_build_inp_embd(ctx0, batch, model.tok_embd, n_embd, n_tokens, cb); - cb(inpL, "inp_embd", -1); + // self-attention + { + if (model.layers[il].attn_norm_2) { + // Falcon-40B + cur = llm_build_norm(ctx0, attn_norm, hparams, + model.layers[il].attn_norm_2, + model.layers[il].attn_norm_2_b, + LLM_NORM, cb, il); + cb(cur, "attn_norm_2", il); + } else { + cur = attn_norm; + } - // inp_pos - contains the positions - struct ggml_tensor * inp_pos = ggml_new_tensor_1d(ctx0, GGML_TYPE_I32, n_tokens); - cb(inp_pos, "inp_pos", -1); + cur = ggml_mul_mat(ctx0, model.layers[il].wqkv, cur); + cb(cur, "wqkv", il); - // KQ_scale - struct ggml_tensor * KQ_scale = ggml_new_tensor_1d(ctx0, GGML_TYPE_F32, 1); - cb(KQ_scale, "KQ_scale", -1); + struct ggml_tensor * Qcur = ggml_cont(ctx0, ggml_view_2d(ctx0, cur, n_embd, n_tokens, cur->nb[1], 0*sizeof(float)*(n_embd))); + struct ggml_tensor * Kcur = ggml_cont(ctx0, ggml_view_2d(ctx0, cur, n_embd_gqa, n_tokens, cur->nb[1], 1*sizeof(float)*(n_embd))); + struct ggml_tensor * Vcur = ggml_cont(ctx0, ggml_view_2d(ctx0, cur, n_embd_gqa, n_tokens, cur->nb[1], 1*sizeof(float)*(n_embd + n_embd_gqa))); - // KQ_mask (mask for 1 head, it will be broadcasted to all heads) - struct ggml_tensor * KQ_mask = ggml_new_tensor_3d(ctx0, GGML_TYPE_F32, n_kv, n_tokens, 1); - cb(KQ_mask, "KQ_mask", -1); + cb(Qcur, "Qcur", il); + cb(Kcur, "Kcur", il); + cb(Vcur, "Vcur", il); - pos = ggml_get_rows(ctx0, model.pos_embd, inp_pos); - cb(pos, "pos_embd", -1); + Qcur = ggml_reshape_3d(ctx0, Qcur, n_embd_head, n_head, n_tokens); + Kcur = ggml_reshape_3d(ctx0, Kcur, n_embd_head, n_head_kv, n_tokens); - inpL = ggml_add(ctx0, inpL, pos); - cb(inpL, "inpL", -1); + // using mode = 2 for neox mode + Qcur = ggml_rope_custom(ctx0, Qcur, inp_pos, n_embd_head, 2, 0, freq_base, freq_scale); + cb(Qcur, "Qcur", il); - for (int il = 0; il < n_layer; ++il) { - cur = llm_build_norm(ctx0, inpL, - model.layers[il].attn_norm, - model.layers[il].attn_norm_b, - LLM_NORM, norm_eps, cb, il); - cb(cur, "attn_norm", il); + Kcur = ggml_rope_custom(ctx0, Kcur, inp_pos, n_embd_head, 2, 0, freq_base, freq_scale); + cb(Kcur, "Kcur", il); - // self-attention - { - cur = ggml_mul_mat(ctx0, model.layers[il].wqkv, cur); - cb(cur, "wqkv", il); + llm_build_kv_store(ctx0, hparams, kv_self, gf, Kcur, Vcur, n_ctx, n_tokens, kv_head, cb, il); - cur = ggml_add(ctx0, cur, model.layers[il].bqkv); - cb(cur, "bqkv", il); + cur = llm_build_kqv(ctx0, attn_norm, hparams, kv_self, + model.layers[il].wo, NULL, + Qcur, KQ_scale, KQ_mask, n_ctx, n_tokens, n_kv, -1.0f, cb, il); + cb(cur, "kqv_out", il); + } - struct ggml_tensor * Qcur = ggml_cont(ctx0, ggml_view_2d(ctx0, cur, n_embd, n_tokens, cur->nb[1], 0*sizeof(float)*(n_embd))); - struct ggml_tensor * Kcur = ggml_cont(ctx0, ggml_view_2d(ctx0, cur, n_embd_gqa, n_tokens, cur->nb[1], 1*sizeof(float)*(n_embd))); - struct ggml_tensor * Vcur = ggml_cont(ctx0, ggml_view_2d(ctx0, cur, n_embd_gqa, n_tokens, cur->nb[1], 1*sizeof(float)*(n_embd + n_embd_gqa))); + struct ggml_tensor * ffn_inp = cur; - cb(Qcur, "Qcur", il); - cb(Kcur, "Kcur", il); - cb(Vcur, "Vcur", il); + // feed forward + { + cur = llm_build_ffn(ctx0, attn_norm, // !! use the attn norm, not the result + model.layers[il].ffn_up, NULL, + NULL, NULL, + model.layers[il].ffn_down, NULL, + LLM_FFN_GELU, LLM_FFN_SEQ, cb, il); + cb(cur, "ffn_out", il); + } - Qcur = ggml_reshape_3d(ctx0, Qcur, n_embd_head, n_head, n_tokens); + cur = ggml_add(ctx0, cur, ffn_inp); + cb(cur, "l_out", il); - llm_build_kv_store(lctx, ctx0, gf, Kcur, Vcur, n_tokens, kv_head, cb, il); + cur = ggml_add(ctx0, cur, inpL); + cb(cur, "l_out", il); - cur = llm_build_kqv(lctx, ctx0, cur, - model.layers[il].wo, model.layers[il].bo, - Qcur, KQ_scale, KQ_mask, n_tokens, n_kv, -1.0f, cb, il); - cb(cur, "kqv_out", il); + // input for next layer + inpL = cur; } - // add the input - struct ggml_tensor * ffn_inp = ggml_add(ctx0, cur, inpL); - cb(ffn_inp, "ffn_inp", il); + cur = inpL; - // FF - { - cur = llm_build_norm(ctx0, ffn_inp, - model.layers[il].ffn_norm, - model.layers[il].ffn_norm_b, - LLM_NORM, norm_eps, cb, il); - cb(cur, "ffn_norm", il); - - cur = llm_build_ffn(ctx0, cur, - model.layers[il].ffn_up, model.layers[il].ffn_up_b, - NULL, NULL, - model.layers[il].ffn_down, model.layers[il].ffn_down_b, - LLM_FFN_GELU, LLM_FFN_SEQ, cb, il); - cb(cur, "ffn_out", il); - } + // norm + cur = llm_build_norm(ctx0, cur, hparams, + model.output_norm, + model.output_norm_b, + LLM_NORM, cb, -1); + cb(cur, "result_norm", -1); - inpL = ggml_add(ctx0, cur, ffn_inp); - cb(inpL, "l_out", il); - } + cur = ggml_mul_mat(ctx0, model.output, cur); + cb(cur, "result_output", -1); - cur = llm_build_norm(ctx0, inpL, - model.output_norm, - model.output_norm_b, - LLM_NORM, norm_eps, cb, -1); - cb(cur, "result_norm", -1); + ggml_build_forward_expand(gf, cur); - cur = ggml_mul_mat(ctx0, model.output, cur); - cb(cur, "result_output", -1); + return gf; + } - ggml_build_forward_expand(gf, cur); - ggml_free(ctx0); + struct ggml_cgraph * build_starcoder() { + struct ggml_cgraph * gf = ggml_new_graph(ctx0); - return gf; -} + struct ggml_tensor * cur; + struct ggml_tensor * pos; + struct ggml_tensor * inpL; -static struct ggml_cgraph * llm_build_persimmon( - llama_context & lctx, - const llama_batch & batch, - const llm_build_cb & cb, - bool worst_case) { - const auto & model = lctx.model; - const auto & hparams = model.hparams; + inpL = llm_build_inp_embd(ctx0, hparams, batch, model.tok_embd, cb); + cb(inpL, "inp_embd", -1); - const auto & kv_self = lctx.kv_self; + // inp_pos - contains the positions + struct ggml_tensor * inp_pos = ggml_new_tensor_1d(ctx0, GGML_TYPE_I32, n_tokens); + cb(inp_pos, "inp_pos", -1); - GGML_ASSERT(!!kv_self.ctx); + // KQ_scale + struct ggml_tensor * KQ_scale = ggml_new_tensor_1d(ctx0, GGML_TYPE_F32, 1); + cb(KQ_scale, "KQ_scale", -1); - const auto & cparams = lctx.cparams; + // KQ_mask (mask for 1 head, it will be broadcasted to all heads) + struct ggml_tensor * KQ_mask = ggml_new_tensor_3d(ctx0, GGML_TYPE_F32, n_kv, n_tokens, 1); + cb(KQ_mask, "KQ_mask", -1); - const int64_t n_embd = hparams.n_embd; - const int64_t n_layer = hparams.n_layer; - const int64_t n_ctx = cparams.n_ctx; - const int64_t n_head_kv = hparams.n_head_kv; - const int64_t n_head = hparams.n_head; - const int64_t n_embd_head = hparams.n_embd_head(); - const int64_t n_rot = n_embd_head / 2; + pos = ggml_get_rows(ctx0, model.pos_embd, inp_pos); + cb(pos, "pos_embd", -1); - const float freq_base = cparams.rope_freq_base; - const float freq_scale = cparams.rope_freq_scale; - const float norm_eps = hparams.f_norm_eps; + inpL = ggml_add(ctx0, inpL, pos); + cb(inpL, "inpL", -1); - const int32_t n_tokens = batch.n_tokens; - const int32_t n_kv = worst_case ? n_ctx : kv_self.n; - const int32_t kv_head = worst_case ? n_ctx - n_tokens : kv_self.head; + for (int il = 0; il < n_layer; ++il) { + cur = llm_build_norm(ctx0, inpL, hparams, + model.layers[il].attn_norm, + model.layers[il].attn_norm_b, + LLM_NORM, cb, il); + cb(cur, "attn_norm", il); - const bool do_rope_shift = worst_case || kv_self.has_shift; + // self-attention + { + cur = ggml_mul_mat(ctx0, model.layers[il].wqkv, cur); + cb(cur, "wqkv", il); - auto & buf_compute = lctx.buf_compute; + cur = ggml_add(ctx0, cur, model.layers[il].bqkv); + cb(cur, "bqkv", il); - struct ggml_init_params params = { - /*.mem_size =*/ buf_compute.size, - /*.mem_buffer =*/ buf_compute.data, - /*.no_alloc =*/ true, - }; + struct ggml_tensor * Qcur = ggml_cont(ctx0, ggml_view_2d(ctx0, cur, n_embd, n_tokens, cur->nb[1], 0*sizeof(float)*(n_embd))); + struct ggml_tensor * Kcur = ggml_cont(ctx0, ggml_view_2d(ctx0, cur, n_embd_gqa, n_tokens, cur->nb[1], 1*sizeof(float)*(n_embd))); + struct ggml_tensor * Vcur = ggml_cont(ctx0, ggml_view_2d(ctx0, cur, n_embd_gqa, n_tokens, cur->nb[1], 1*sizeof(float)*(n_embd + n_embd_gqa))); - struct ggml_context * ctx0 = ggml_init(params); + cb(Qcur, "Qcur", il); + cb(Kcur, "Kcur", il); + cb(Vcur, "Vcur", il); - ggml_cgraph * gf = ggml_new_graph(ctx0); + Qcur = ggml_reshape_3d(ctx0, Qcur, n_embd_head, n_head, n_tokens); - struct ggml_tensor * cur; - struct ggml_tensor * inpL; + llm_build_kv_store(ctx0, hparams, kv_self, gf, Kcur, Vcur, n_ctx, n_tokens, kv_head, cb, il); - inpL = llm_build_inp_embd(ctx0, batch, model.tok_embd, n_embd, n_tokens, cb); - cb(inpL, "imp_embd", -1); + cur = llm_build_kqv(ctx0, cur, hparams, kv_self, + model.layers[il].wo, model.layers[il].bo, + Qcur, KQ_scale, KQ_mask, n_ctx, n_tokens, n_kv, -1.0f, cb, il); + cb(cur, "kqv_out", il); + } - struct ggml_tensor * inp_pos = ggml_new_tensor_1d(ctx0, GGML_TYPE_I32, n_tokens); - cb(inp_pos, "inp_pos", -1); + // add the input + struct ggml_tensor * ffn_inp = ggml_add(ctx0, cur, inpL); + cb(ffn_inp, "ffn_inp", il); - // KQ_scale - struct ggml_tensor * KQ_scale = ggml_new_tensor_1d(ctx0, GGML_TYPE_F32, 1); - cb(KQ_scale, "KQ_scale", -1); + // FF + { + cur = llm_build_norm(ctx0, ffn_inp, hparams, + model.layers[il].ffn_norm, + model.layers[il].ffn_norm_b, + LLM_NORM, cb, il); + cb(cur, "ffn_norm", il); + + cur = llm_build_ffn(ctx0, cur, + model.layers[il].ffn_up, model.layers[il].ffn_up_b, + NULL, NULL, + model.layers[il].ffn_down, model.layers[il].ffn_down_b, + LLM_FFN_GELU, LLM_FFN_SEQ, cb, il); + cb(cur, "ffn_out", il); + } - struct ggml_tensor * KQ_mask = ggml_new_tensor_3d(ctx0, GGML_TYPE_F32, n_kv, n_tokens, 1); - cb(KQ_mask, "KQ_mask", -1); + inpL = ggml_add(ctx0, cur, ffn_inp); + cb(inpL, "l_out", il); + } - if (do_rope_shift) { - llm_build_k_shift(lctx, ctx0, gf, n_rot, LLM_ROPE_NEOX, cb); - } + cur = llm_build_norm(ctx0, inpL, hparams, + model.output_norm, + model.output_norm_b, + LLM_NORM, cb, -1); + cb(cur, "result_norm", -1); - for (int il = 0; il < n_layer; ++il) { - struct ggml_tensor * residual = inpL; + cur = ggml_mul_mat(ctx0, model.output, cur); + cb(cur, "result_output", -1); - cur = llm_build_norm(ctx0, inpL, - model.layers[il].attn_norm, - model.layers[il].attn_norm_b, - LLM_NORM, norm_eps, cb, il); - cb(cur, "attn_norm", il); + ggml_build_forward_expand(gf, cur); - // self attention - { - cur = ggml_mul_mat(ctx0, model.layers[il].wqkv, cur); - cb(cur, "wqkv", il); - - cur = ggml_add(ctx0, cur, model.layers[il].bqkv); - cb(cur, "bqkv", il); - - // split qkv - GGML_ASSERT(n_head_kv == n_head); - - struct ggml_tensor * tmpqkv = ggml_reshape_4d(ctx0, cur, n_embd_head, 3, n_head, n_tokens); - cb(tmpqkv, "tmpqkv", il); - - struct ggml_tensor * tmpqkv_perm = ggml_cont(ctx0, ggml_permute(ctx0, tmpqkv, 0, 3, 1, 2)); - cb(tmpqkv_perm, "tmpqkv", il); - - struct ggml_tensor * tmpq = ggml_view_3d( - ctx0, tmpqkv_perm, n_embd_head, n_head, n_tokens, - ggml_element_size(tmpqkv_perm) * n_embd_head, - ggml_element_size(tmpqkv_perm) * n_embd_head * n_head, - 0 - ); - cb(tmpq, "tmpq", il); - - struct ggml_tensor * tmpk = ggml_view_3d( - ctx0, tmpqkv_perm, n_embd_head, n_head, n_tokens, - ggml_element_size(tmpqkv_perm) * n_embd_head, - ggml_element_size(tmpqkv_perm) * n_embd_head * n_head, - ggml_element_size(tmpqkv_perm) * n_embd_head * n_head * n_tokens - ); - cb(tmpk, "tmpk", il); - - // Q/K Layernorm - tmpq = llm_build_norm(ctx0, tmpq, - model.layers[il].attn_q_norm, - model.layers[il].attn_q_norm_b, - LLM_NORM, norm_eps, cb, il); - cb(tmpq, "tmpq", il); - - tmpk = llm_build_norm(ctx0, tmpk, - model.layers[il].attn_k_norm, - model.layers[il].attn_k_norm_b, - LLM_NORM, norm_eps, cb, il); - cb(tmpk, "tmpk", il); - - // RoPE the first n_rot of q/k, pass the other half, and concat. - struct ggml_tensor * qrot = ggml_view_3d( - ctx0, tmpq, n_rot, n_head, n_tokens, - ggml_element_size(tmpq) * n_embd_head, - ggml_element_size(tmpq) * n_embd_head * n_head, - 0 - ); - cb(qrot, "qrot", il); + return gf; + } - struct ggml_tensor * krot = ggml_view_3d( - ctx0, tmpk, n_rot, n_head, n_tokens, - ggml_element_size(tmpk) * n_embd_head, - ggml_element_size(tmpk) * n_embd_head * n_head, - 0 - ); - cb(krot, "krot", il); - - // get the second half of tmpq, e.g tmpq[n_rot:, :, :] - struct ggml_tensor * qpass = ggml_view_3d( - ctx0, tmpq, n_rot, n_head, n_tokens, - ggml_element_size(tmpq) * n_embd_head, - ggml_element_size(tmpq) * n_embd_head * n_head, - ggml_element_size(tmpq) * n_rot - ); - cb(qpass, "qpass", il); + struct ggml_cgraph * build_persimmon() { + struct ggml_cgraph * gf = ggml_new_graph(ctx0); - struct ggml_tensor * kpass = ggml_view_3d( - ctx0, tmpk, n_rot, n_head, n_tokens, - ggml_element_size(tmpk) * n_embd_head, - ggml_element_size(tmpk) * n_embd_head * n_head, - ggml_element_size(tmpk) * n_rot - ); - cb(kpass, "kpass", il); + const int64_t n_rot = n_embd_head / 2; - struct ggml_tensor * qrotated = ggml_rope_custom( - ctx0, qrot, inp_pos, n_rot, 2, 0, freq_base, freq_scale - ); - cb(qrotated, "qrotated", il); + struct ggml_tensor * cur; + struct ggml_tensor * inpL; - struct ggml_tensor * krotated = ggml_rope_custom( - ctx0, krot, inp_pos, n_rot, 2, 0, freq_base, freq_scale - ); - cb(krotated, "krotated", il); + inpL = llm_build_inp_embd(ctx0, hparams, batch, model.tok_embd, cb); + cb(inpL, "imp_embd", -1); - // ggml currently only supports concatenation on dim=2 - // so we need to permute qrot, qpass, concat, then permute back. - qrotated = ggml_cont(ctx0, ggml_permute(ctx0, qrotated, 2, 1, 0, 3)); - cb(qrotated, "qrotated", il); + struct ggml_tensor * inp_pos = ggml_new_tensor_1d(ctx0, GGML_TYPE_I32, n_tokens); + cb(inp_pos, "inp_pos", -1); - krotated = ggml_cont(ctx0, ggml_permute(ctx0, krotated, 2, 1, 0, 3)); - cb(krotated, "krotated", il); + // KQ_scale + struct ggml_tensor * KQ_scale = ggml_new_tensor_1d(ctx0, GGML_TYPE_F32, 1); + cb(KQ_scale, "KQ_scale", -1); - qpass = ggml_cont(ctx0, ggml_permute(ctx0, qpass, 2, 1, 0, 3)); - cb(qpass, "qpass", il); + struct ggml_tensor * KQ_mask = ggml_new_tensor_3d(ctx0, GGML_TYPE_F32, n_kv, n_tokens, 1); + cb(KQ_mask, "KQ_mask", -1); - kpass = ggml_cont(ctx0, ggml_permute(ctx0, kpass, 2, 1, 0, 3)); - cb(kpass, "kpass", il); + if (do_rope_shift) { + llm_build_k_shift(ctx0, hparams, kv_self, gf, LLM_ROPE_NEOX, n_ctx, n_embd_head, freq_base, freq_scale, cb); + } - struct ggml_tensor * Qcur = ggml_concat(ctx0, qrotated, qpass); - cb(Qcur, "Qcur", il); + for (int il = 0; il < n_layer; ++il) { + struct ggml_tensor * residual = inpL; - struct ggml_tensor * Kcur = ggml_concat(ctx0, krotated, kpass); - cb(Kcur, "Kcur", il); + cur = llm_build_norm(ctx0, inpL, hparams, + model.layers[il].attn_norm, + model.layers[il].attn_norm_b, + LLM_NORM, cb, il); + cb(cur, "attn_norm", il); - struct ggml_tensor * Q = ggml_cont(ctx0, ggml_permute(ctx0, Qcur, 1, 2, 0, 3)); - cb(Q, "Q", il); + // self attention + { + cur = ggml_mul_mat(ctx0, model.layers[il].wqkv, cur); + cb(cur, "wqkv", il); + + cur = ggml_add(ctx0, cur, model.layers[il].bqkv); + cb(cur, "bqkv", il); + + // split qkv + GGML_ASSERT(n_head_kv == n_head); + + struct ggml_tensor * tmpqkv = ggml_reshape_4d(ctx0, cur, n_embd_head, 3, n_head, n_tokens); + cb(tmpqkv, "tmpqkv", il); + + struct ggml_tensor * tmpqkv_perm = ggml_cont(ctx0, ggml_permute(ctx0, tmpqkv, 0, 3, 1, 2)); + cb(tmpqkv_perm, "tmpqkv", il); + + struct ggml_tensor * tmpq = ggml_view_3d( + ctx0, tmpqkv_perm, n_embd_head, n_head, n_tokens, + ggml_element_size(tmpqkv_perm) * n_embd_head, + ggml_element_size(tmpqkv_perm) * n_embd_head * n_head, + 0 + ); + cb(tmpq, "tmpq", il); + + struct ggml_tensor * tmpk = ggml_view_3d( + ctx0, tmpqkv_perm, n_embd_head, n_head, n_tokens, + ggml_element_size(tmpqkv_perm) * n_embd_head, + ggml_element_size(tmpqkv_perm) * n_embd_head * n_head, + ggml_element_size(tmpqkv_perm) * n_embd_head * n_head * n_tokens + ); + cb(tmpk, "tmpk", il); + + // Q/K Layernorm + tmpq = llm_build_norm(ctx0, tmpq, hparams, + model.layers[il].attn_q_norm, + model.layers[il].attn_q_norm_b, + LLM_NORM, cb, il); + cb(tmpq, "tmpq", il); + + tmpk = llm_build_norm(ctx0, tmpk, hparams, + model.layers[il].attn_k_norm, + model.layers[il].attn_k_norm_b, + LLM_NORM, cb, il); + cb(tmpk, "tmpk", il); + + // RoPE the first n_rot of q/k, pass the other half, and concat. + struct ggml_tensor * qrot = ggml_view_3d( + ctx0, tmpq, n_rot, n_head, n_tokens, + ggml_element_size(tmpq) * n_embd_head, + ggml_element_size(tmpq) * n_embd_head * n_head, + 0 + ); + cb(qrot, "qrot", il); + + struct ggml_tensor * krot = ggml_view_3d( + ctx0, tmpk, n_rot, n_head, n_tokens, + ggml_element_size(tmpk) * n_embd_head, + ggml_element_size(tmpk) * n_embd_head * n_head, + 0 + ); + cb(krot, "krot", il); + + // get the second half of tmpq, e.g tmpq[n_rot:, :, :] + struct ggml_tensor * qpass = ggml_view_3d( + ctx0, tmpq, n_rot, n_head, n_tokens, + ggml_element_size(tmpq) * n_embd_head, + ggml_element_size(tmpq) * n_embd_head * n_head, + ggml_element_size(tmpq) * n_rot + ); + cb(qpass, "qpass", il); + + struct ggml_tensor * kpass = ggml_view_3d( + ctx0, tmpk, n_rot, n_head, n_tokens, + ggml_element_size(tmpk) * n_embd_head, + ggml_element_size(tmpk) * n_embd_head * n_head, + ggml_element_size(tmpk) * n_rot + ); + cb(kpass, "kpass", il); + + struct ggml_tensor * qrotated = ggml_rope_custom( + ctx0, qrot, inp_pos, n_rot, 2, 0, freq_base, freq_scale + ); + cb(qrotated, "qrotated", il); + + struct ggml_tensor * krotated = ggml_rope_custom( + ctx0, krot, inp_pos, n_rot, 2, 0, freq_base, freq_scale + ); + cb(krotated, "krotated", il); + + // ggml currently only supports concatenation on dim=2 + // so we need to permute qrot, qpass, concat, then permute back. + qrotated = ggml_cont(ctx0, ggml_permute(ctx0, qrotated, 2, 1, 0, 3)); + cb(qrotated, "qrotated", il); + + krotated = ggml_cont(ctx0, ggml_permute(ctx0, krotated, 2, 1, 0, 3)); + cb(krotated, "krotated", il); + + qpass = ggml_cont(ctx0, ggml_permute(ctx0, qpass, 2, 1, 0, 3)); + cb(qpass, "qpass", il); + + kpass = ggml_cont(ctx0, ggml_permute(ctx0, kpass, 2, 1, 0, 3)); + cb(kpass, "kpass", il); + + struct ggml_tensor * Qcur = ggml_concat(ctx0, qrotated, qpass); + cb(Qcur, "Qcur", il); + + struct ggml_tensor * Kcur = ggml_concat(ctx0, krotated, kpass); + cb(Kcur, "Kcur", il); + + struct ggml_tensor * Q = ggml_cont(ctx0, ggml_permute(ctx0, Qcur, 1, 2, 0, 3)); + cb(Q, "Q", il); + + Kcur = ggml_cont(ctx0, ggml_permute(ctx0, Kcur, 2, 1, 0, 3)); + cb(Kcur, "Kcur", il); + + struct ggml_tensor * Vcur = ggml_view_3d( + ctx0, tmpqkv_perm, n_embd_head, n_head, n_tokens, + ggml_element_size(tmpqkv_perm) * n_embd_head, + ggml_element_size(tmpqkv_perm) * n_embd_head * n_head, + ggml_element_size(tmpqkv_perm) * n_embd_head * n_head * n_tokens * 2 + ); + cb(Vcur, "Vcur", il); + + llm_build_kv_store(ctx0, hparams, kv_self, gf, Kcur, Vcur, n_ctx, n_tokens, kv_head, cb, il); + + // TODO: not tested, could be broken + cur = llm_build_kqv(ctx0, Q, hparams, kv_self, + model.layers[il].wo, model.layers[il].bo, + Q, KQ_scale, KQ_mask, n_ctx, n_tokens, n_kv, -1.0f, cb, il); + cb(cur, "kqv_out", il); + } - Kcur = ggml_cont(ctx0, ggml_permute(ctx0, Kcur, 2, 1, 0, 3)); - cb(Kcur, "Kcur", il); + struct ggml_tensor * ffn_inp = ggml_add(ctx0, residual, cur); + cb(ffn_inp, "ffn_inp", il); - struct ggml_tensor * Vcur = ggml_view_3d( - ctx0, tmpqkv_perm, n_embd_head, n_head, n_tokens, - ggml_element_size(tmpqkv_perm) * n_embd_head, - ggml_element_size(tmpqkv_perm) * n_embd_head * n_head, - ggml_element_size(tmpqkv_perm) * n_embd_head * n_head * n_tokens * 2 - ); - cb(Vcur, "Vcur", il); + // feed-forward network + { + cur = llm_build_norm(ctx0, ffn_inp, hparams, + model.layers[il].ffn_norm, + model.layers[il].ffn_norm_b, + LLM_NORM, cb, il); + cb(cur, "ffn_norm", il); + + cur = llm_build_ffn(ctx0, cur, + model.layers[il].ffn_up, model.layers[il].ffn_up_b, + NULL, NULL, + model.layers[il].ffn_down, model.layers[il].ffn_down_b, + LLM_FFN_RELU_SQR, LLM_FFN_SEQ, cb, il); + cb(cur, "ffn_out", il); + } - llm_build_kv_store(lctx, ctx0, gf, Kcur, Vcur, n_tokens, kv_head, cb, il); + cur = ggml_add(ctx0, cur, ffn_inp); + cb(cur, "l_out", il); - // TODO: not tested, could be broken - cur = llm_build_kqv(lctx, ctx0, Q, - model.layers[il].wo, model.layers[il].bo, - Q, KQ_scale, KQ_mask, n_tokens, n_kv, -1.0f, cb, il); - cb(cur, "kqv_out", il); + inpL = cur; } - struct ggml_tensor * ffn_inp = ggml_add(ctx0, residual, cur); - cb(ffn_inp, "ffn_inp", il); + cur = inpL; - // feed-forward network - { - cur = llm_build_norm(ctx0, ffn_inp, - model.layers[il].ffn_norm, - model.layers[il].ffn_norm_b, - LLM_NORM, norm_eps, cb, il); - cb(cur, "ffn_norm", il); + cur = llm_build_norm(ctx0, cur, hparams, + model.output_norm, + model.output_norm_b, + LLM_NORM, cb, -1); + cb(cur, "result_norm", -1); - cur = llm_build_ffn(ctx0, cur, - model.layers[il].ffn_up, model.layers[il].ffn_up_b, - NULL, NULL, - model.layers[il].ffn_down, model.layers[il].ffn_down_b, - LLM_FFN_RELU_SQR, LLM_FFN_SEQ, cb, il); - cb(cur, "ffn_out", il); - } + cur = ggml_mul_mat(ctx0, model.output, cur); + cb(cur, "result_output", -1); - cur = ggml_add(ctx0, cur, ffn_inp); - cb(cur, "l_out", il); + ggml_build_forward_expand(gf, cur); - inpL = cur; + return gf; } - cur = inpL; - - cur = llm_build_norm(ctx0, cur, - model.output_norm, - model.output_norm_b, - LLM_NORM, norm_eps, cb, -1); - cb(cur, "result_norm", -1); - - cur = ggml_mul_mat(ctx0, model.output, cur); - cb(cur, "result_output", -1); - - ggml_build_forward_expand(gf, cur); - - ggml_free(ctx0); - - return gf; -} - -static struct ggml_cgraph * llm_build_refact( - llama_context & lctx, - const llama_batch & batch, - const llm_build_cb & cb, - bool worst_case) { - const auto & model = lctx.model; - const auto & hparams = model.hparams; - const auto & cparams = lctx.cparams; - - const auto & kv_self = lctx.kv_self; - - GGML_ASSERT(!!kv_self.ctx); - - const int64_t n_embd = hparams.n_embd; - const int64_t n_layer = hparams.n_layer; - const int64_t n_ctx = cparams.n_ctx; - const int64_t n_head = hparams.n_head; - const int64_t n_head_kv = hparams.n_head_kv; - const int64_t n_embd_head = hparams.n_embd_head(); - - const float norm_rms_eps = hparams.f_norm_rms_eps; - - const int32_t n_tokens = batch.n_tokens; - const int32_t n_kv = worst_case ? n_ctx : kv_self.n; - const int32_t kv_head = worst_case ? n_ctx - n_tokens : kv_self.head; + struct ggml_cgraph * build_refact() { + struct ggml_cgraph * gf = ggml_new_graph(ctx0); - auto & buf_compute = lctx.buf_compute; + struct ggml_tensor * cur; + struct ggml_tensor * inpL; - struct ggml_init_params params = { - /*.mem_size =*/ buf_compute.size, - /*.mem_buffer =*/ buf_compute.data, - /*.no_alloc =*/ true, - }; + inpL = llm_build_inp_embd(ctx0, hparams, batch, model.tok_embd, cb); + cb(inpL, "inp_embd", -1); - struct ggml_context * ctx0 = ggml_init(params); + // KQ_scale + struct ggml_tensor * KQ_scale = ggml_new_tensor_1d(ctx0, GGML_TYPE_F32, 1); + cb(KQ_scale, "KQ_scale", -1); - ggml_cgraph * gf = ggml_new_graph(ctx0); + // KQ_mask (mask for 1 head, it will be broadcasted to all heads) + struct ggml_tensor * KQ_mask = ggml_new_tensor_3d(ctx0, GGML_TYPE_F32, n_kv, n_tokens, 1); + cb(KQ_mask, "KQ_mask", -1); - struct ggml_tensor * cur; - struct ggml_tensor * inpL; + for (int il = 0; il < n_layer; ++il) { + struct ggml_tensor * inpSA = inpL; - inpL = llm_build_inp_embd(ctx0, batch, model.tok_embd, n_embd, n_tokens, cb); - cb(inpL, "inp_embd", -1); + cur = llm_build_norm(ctx0, inpL, hparams, + model.layers[il].attn_norm, NULL, + LLM_NORM_RMS, cb, il); + cb(cur, "attn_norm", il); - // KQ_scale - struct ggml_tensor * KQ_scale = ggml_new_tensor_1d(ctx0, GGML_TYPE_F32, 1); - cb(KQ_scale, "KQ_scale", -1); + // self-attention + { + struct ggml_tensor * Qcur = ggml_mul_mat(ctx0, model.layers[il].wq, cur); + cb(Qcur, "Qcur", il); - // KQ_mask (mask for 1 head, it will be broadcasted to all heads) - struct ggml_tensor * KQ_mask = ggml_new_tensor_3d(ctx0, GGML_TYPE_F32, n_kv, n_tokens, 1); - cb(KQ_mask, "KQ_mask", -1); + struct ggml_tensor * Kcur = ggml_mul_mat(ctx0, model.layers[il].wk, cur); + cb(Kcur, "Kcur", il); - for (int il = 0; il < n_layer; ++il) { - struct ggml_tensor * inpSA = inpL; + struct ggml_tensor * Vcur = ggml_mul_mat(ctx0, model.layers[il].wv, cur); + cb(Vcur, "Vcur", il); - cur = llm_build_norm(ctx0, inpL, - model.layers[il].attn_norm, NULL, - LLM_NORM_RMS, norm_rms_eps, cb, il); - cb(cur, "attn_norm", il); + Kcur = ggml_reshape_3d(ctx0, Kcur, n_embd_head, n_head_kv, n_tokens); + cb(Kcur, "Kcur", il); - // self-attention - { - struct ggml_tensor * Qcur = ggml_mul_mat(ctx0, model.layers[il].wq, cur); - cb(Qcur, "Qcur", il); + Qcur = ggml_reshape_3d(ctx0, Qcur, n_embd_head, n_head, n_tokens); + cb(Qcur, "Qcur", il); - struct ggml_tensor * Kcur = ggml_mul_mat(ctx0, model.layers[il].wk, cur); - cb(Kcur, "Kcur", il); + llm_build_kv_store(ctx0, hparams, kv_self, gf, Kcur, Vcur, n_ctx, n_tokens, kv_head, cb, il); - struct ggml_tensor * Vcur = ggml_mul_mat(ctx0, model.layers[il].wv, cur); - cb(Vcur, "Vcur", il); + cur = llm_build_kqv(ctx0, Qcur, hparams, kv_self, + model.layers[il].wo, NULL, + Qcur, KQ_scale, KQ_mask, n_ctx, n_tokens, n_kv, 8.0f, cb, il); + cb(cur, "kqv_out", il); + } - Kcur = ggml_reshape_3d(ctx0, Kcur, n_embd_head, n_head_kv, n_tokens); - cb(Kcur, "Kcur", il); + struct ggml_tensor * ffn_inp = ggml_add(ctx0, cur, inpSA); + cb(ffn_inp, "ffn_inp", il); - Qcur = ggml_reshape_3d(ctx0, Qcur, n_embd_head, n_head, n_tokens); - cb(Qcur, "Qcur", il); + // feed-forward network + { + cur = llm_build_norm(ctx0, ffn_inp, hparams, + model.layers[il].ffn_norm, NULL, + LLM_NORM_RMS, cb, il); + cb(cur, "ffn_norm", il); + + cur = llm_build_ffn(ctx0, cur, + model.layers[il].ffn_up, NULL, + model.layers[il].ffn_gate, NULL, + model.layers[il].ffn_down, NULL, + LLM_FFN_SILU, LLM_FFN_PAR, cb, il); + cb(cur, "ffn_out", il); + } - llm_build_kv_store(lctx, ctx0, gf, Kcur, Vcur, n_tokens, kv_head, cb, il); + cur = ggml_add(ctx0, cur, ffn_inp); + cb(cur, "l_out", il); - cur = llm_build_kqv(lctx, ctx0, Qcur, - model.layers[il].wo, NULL, - Qcur, KQ_scale, KQ_mask, n_tokens, n_kv, 8.0f, cb, il); - cb(cur, "kqv_out", il); + // input for next layer + inpL = cur; } - struct ggml_tensor * ffn_inp = ggml_add(ctx0, cur, inpSA); - cb(ffn_inp, "ffn_inp", il); + cur = inpL; - // feed-forward network - { - cur = llm_build_norm(ctx0, ffn_inp, - model.layers[il].ffn_norm, NULL, - LLM_NORM_RMS, norm_rms_eps, cb, il); - cb(cur, "ffn_norm", il); + cur = llm_build_norm(ctx0, cur, hparams, + model.output_norm, NULL, + LLM_NORM_RMS, cb, -1); + cb(cur, "result_norm", -1); - cur = llm_build_ffn(ctx0, cur, - model.layers[il].ffn_up, NULL, - model.layers[il].ffn_gate, NULL, - model.layers[il].ffn_down, NULL, - LLM_FFN_SILU, LLM_FFN_PAR, cb, il); - cb(cur, "ffn_out", il); - } + // lm_head + cur = ggml_mul_mat(ctx0, model.output, cur); + cb(cur, "result_output", -1); - cur = ggml_add(ctx0, cur, ffn_inp); - cb(cur, "l_out", il); + ggml_build_forward_expand(gf, cur); - // input for next layer - inpL = cur; + return gf; } - cur = inpL; - - cur = llm_build_norm(ctx0, cur, - model.output_norm, NULL, - LLM_NORM_RMS, norm_rms_eps, cb, -1); - cb(cur, "result_norm", -1); - - // lm_head - cur = ggml_mul_mat(ctx0, model.output, cur); - cb(cur, "result_output", -1); - - ggml_build_forward_expand(gf, cur); - - ggml_free(ctx0); - - return gf; -} - -static struct ggml_cgraph * llm_build_bloom( - llama_context & lctx, - const llama_batch & batch, - const llm_build_cb & cb, - bool worst_case) { - const auto & model = lctx.model; - const auto & hparams = model.hparams; - const auto & cparams = lctx.cparams; - - const auto & kv_self = lctx.kv_self; - - GGML_ASSERT(!!kv_self.ctx); - - const int64_t n_embd = hparams.n_embd; - const int64_t n_layer = hparams.n_layer; - const int64_t n_ctx = cparams.n_ctx; - const int64_t n_head = hparams.n_head; - const int64_t n_embd_head = hparams.n_embd_head(); - const int64_t n_embd_gqa = hparams.n_embd_gqa(); - - GGML_ASSERT(n_embd_head == hparams.n_rot); - - const float norm_eps = hparams.f_norm_eps; - - const int32_t n_tokens = batch.n_tokens; - const int32_t n_kv = worst_case ? n_ctx : kv_self.n; - const int32_t kv_head = worst_case ? n_ctx - n_tokens : kv_self.head; - - auto & buf_compute = lctx.buf_compute; - - struct ggml_init_params params = { - /*.mem_size =*/ buf_compute.size, - /*.mem_buffer =*/ buf_compute.data, - /*.no_alloc =*/ false, - }; + struct ggml_cgraph * build_bloom() { + struct ggml_cgraph * gf = ggml_new_graph(ctx0); - params.no_alloc = true; + struct ggml_tensor * cur; + struct ggml_tensor * inpL; - struct ggml_context * ctx0 = ggml_init(params); + inpL = llm_build_inp_embd(ctx0, hparams, batch, model.tok_embd, cb); + cb(inpL, "inp_embd", -1); - ggml_cgraph * gf = ggml_new_graph(ctx0); + // KQ_scale + struct ggml_tensor * KQ_scale = ggml_new_tensor_1d(ctx0, GGML_TYPE_F32, 1); + cb(KQ_scale, "KQ_scale", -1); - struct ggml_tensor * cur; - struct ggml_tensor * inpL; + // KQ_mask (mask for 1 head, it will be broadcasted to all heads) + struct ggml_tensor * KQ_mask = ggml_new_tensor_3d(ctx0, GGML_TYPE_F32, n_kv, n_tokens, 1); + cb(KQ_mask, "KQ_mask", -1); - inpL = llm_build_inp_embd(ctx0, batch, model.tok_embd, n_embd, n_tokens, cb); - cb(inpL, "inp_embd", -1); + inpL = llm_build_norm(ctx0, inpL, hparams, + model.tok_norm, + model.tok_norm_b, + LLM_NORM, cb, -1); + cb(inpL, "inp_norm", -1); - // KQ_scale - struct ggml_tensor * KQ_scale = ggml_new_tensor_1d(ctx0, GGML_TYPE_F32, 1); - cb(KQ_scale, "KQ_scale", -1); + for (int il = 0; il < n_layer; ++il) { + cur = llm_build_norm(ctx0, inpL, hparams, + model.layers[il].attn_norm, + model.layers[il].attn_norm_b, + LLM_NORM, cb, il); + cb(cur, "attn_norm", il); - // KQ_mask (mask for 1 head, it will be broadcasted to all heads) - struct ggml_tensor * KQ_mask = ggml_new_tensor_3d(ctx0, GGML_TYPE_F32, n_kv, n_tokens, 1); - cb(KQ_mask, "KQ_mask", -1); + // self-attention + { + cur = ggml_mul_mat(ctx0, model.layers[il].wqkv, cur); + cb(cur, "wqkv", il); - inpL = llm_build_norm(ctx0, inpL, - model.tok_norm, - model.tok_norm_b, - LLM_NORM, norm_eps, cb, -1); - cb(inpL, "inp_norm", -1); + cur = ggml_add(ctx0, cur, model.layers[il].bqkv); + cb(cur, "bqkv", il); - for (int il = 0; il < n_layer; ++il) { - cur = llm_build_norm(ctx0, inpL, - model.layers[il].attn_norm, - model.layers[il].attn_norm_b, - LLM_NORM, norm_eps, cb, il); - cb(cur, "attn_norm", il); + struct ggml_tensor * Qcur = ggml_cont(ctx0, ggml_view_2d(ctx0, cur, n_embd, n_tokens, cur->nb[1], 0*sizeof(float)*(n_embd))); + struct ggml_tensor * Kcur = ggml_cont(ctx0, ggml_view_2d(ctx0, cur, n_embd_gqa, n_tokens, cur->nb[1], 1*sizeof(float)*(n_embd))); + struct ggml_tensor * Vcur = ggml_cont(ctx0, ggml_view_2d(ctx0, cur, n_embd_gqa, n_tokens, cur->nb[1], 1*sizeof(float)*(n_embd + n_embd_gqa))); - // self-attention - { - cur = ggml_mul_mat(ctx0, model.layers[il].wqkv, cur); - cb(cur, "wqkv", il); + cb(Qcur, "Qcur", il); + cb(Kcur, "Kcur", il); + cb(Vcur, "Vcur", il); - cur = ggml_add(ctx0, cur, model.layers[il].bqkv); - cb(cur, "bqkv", il); + Qcur = ggml_reshape_3d(ctx0, Qcur, n_embd_head, n_head, n_tokens); - struct ggml_tensor * Qcur = ggml_cont(ctx0, ggml_view_2d(ctx0, cur, n_embd, n_tokens, cur->nb[1], 0*sizeof(float)*(n_embd))); - struct ggml_tensor * Kcur = ggml_cont(ctx0, ggml_view_2d(ctx0, cur, n_embd_gqa, n_tokens, cur->nb[1], 1*sizeof(float)*(n_embd))); - struct ggml_tensor * Vcur = ggml_cont(ctx0, ggml_view_2d(ctx0, cur, n_embd_gqa, n_tokens, cur->nb[1], 1*sizeof(float)*(n_embd + n_embd_gqa))); + llm_build_kv_store(ctx0, hparams, kv_self, gf, Kcur, Vcur, n_ctx, n_tokens, kv_head, cb, il); - cb(Qcur, "Qcur", il); - cb(Kcur, "Kcur", il); - cb(Vcur, "Vcur", il); + cur = llm_build_kqv(ctx0, Qcur, hparams, kv_self, + model.layers[il].wo, model.layers[il].bo, + Qcur, KQ_scale, KQ_mask, n_ctx, n_tokens, n_kv, 8.0f, cb, il); + cb(cur, "kqv_out", il); + } - Qcur = ggml_reshape_3d(ctx0, Qcur, n_embd_head, n_head, n_tokens); + // Add the input + struct ggml_tensor * ffn_inp = ggml_add(ctx0, cur, inpL); + cb(ffn_inp, "ffn_inp", il); - llm_build_kv_store(lctx, ctx0, gf, Kcur, Vcur, n_tokens, kv_head, cb, il); + // FF + { + cur = llm_build_norm(ctx0, ffn_inp, hparams, + model.layers[il].ffn_norm, + model.layers[il].ffn_norm_b, + LLM_NORM, cb, il); + cb(cur, "ffn_norm", il); + + cur = llm_build_ffn(ctx0, cur, + model.layers[il].ffn_up, model.layers[il].ffn_up_b, + NULL, NULL, + model.layers[il].ffn_down, model.layers[il].ffn_down_b, + LLM_FFN_GELU, LLM_FFN_SEQ, cb, il); + cb(cur, "ffn_out", il); + } - cur = llm_build_kqv(lctx, ctx0, Qcur, - model.layers[il].wo, model.layers[il].bo, - Qcur, KQ_scale, KQ_mask, n_tokens, n_kv, 8.0f, cb, il); - cb(cur, "kqv_out", il); + inpL = ggml_add(ctx0, cur, ffn_inp); + cb(inpL, "l_out", il); } - // Add the input - struct ggml_tensor * ffn_inp = ggml_add(ctx0, cur, inpL); - cb(ffn_inp, "ffn_inp", il); + cur = llm_build_norm(ctx0, inpL, hparams, + model.output_norm, + model.output_norm_b, + LLM_NORM, cb, -1); + cb(cur, "result_norm", -1); - // FF - { - cur = llm_build_norm(ctx0, ffn_inp, - model.layers[il].ffn_norm, - model.layers[il].ffn_norm_b, - LLM_NORM, norm_eps, cb, il); - cb(cur, "ffn_norm", il); + cur = ggml_mul_mat(ctx0, model.output, cur); + cb(cur, "result_output", -1); - cur = llm_build_ffn(ctx0, cur, - model.layers[il].ffn_up, model.layers[il].ffn_up_b, - NULL, NULL, - model.layers[il].ffn_down, model.layers[il].ffn_down_b, - LLM_FFN_GELU, LLM_FFN_SEQ, cb, il); - cb(cur, "ffn_out", il); - } + ggml_build_forward_expand(gf, cur); - inpL = ggml_add(ctx0, cur, ffn_inp); - cb(inpL, "l_out", il); + return gf; } - cur = llm_build_norm(ctx0, inpL, - model.output_norm, - model.output_norm_b, - LLM_NORM, norm_eps, cb, -1); - cb(cur, "result_norm", -1); + struct ggml_cgraph * build_mpt() { + struct ggml_cgraph * gf = ggml_new_graph(ctx0); - cur = ggml_mul_mat(ctx0, model.output, cur); - cb(cur, "result_output", -1); + struct ggml_tensor * cur; + struct ggml_tensor * inpL; - ggml_build_forward_expand(gf, cur); + inpL = llm_build_inp_embd(ctx0, hparams, batch, model.tok_embd, cb); + cb(inpL, "inp_embd", -1); - ggml_free(ctx0); + // KQ_scale + struct ggml_tensor * KQ_scale = ggml_new_tensor_1d(ctx0, GGML_TYPE_F32, 1); + cb(KQ_scale, "KQ_scale", -1); - return gf; -} + // KQ_mask (mask for 1 head, it will be broadcasted to all heads) + struct ggml_tensor * KQ_mask = ggml_new_tensor_3d(ctx0, GGML_TYPE_F32, n_kv, n_tokens, 1); + cb(KQ_mask, "KQ_mask", -1); -static struct ggml_cgraph * llm_build_mpt( - llama_context & lctx, - const llama_batch & batch, - const llm_build_cb & cb, - bool worst_case) { - const auto & model = lctx.model; - const auto & hparams = model.hparams; - const auto & cparams = lctx.cparams; - - const auto & kv_self = lctx.kv_self; - - GGML_ASSERT(!!kv_self.ctx); - - const int64_t n_embd = hparams.n_embd; - const int64_t n_layer = hparams.n_layer; - const int64_t n_ctx = cparams.n_ctx; - const int64_t n_head = hparams.n_head; - const int64_t n_embd_head = hparams.n_embd_head(); - const int64_t n_embd_gqa = hparams.n_embd_gqa(); - - const float norm_eps = hparams.f_norm_eps; - const float clamp_kqv = hparams.f_clamp_kqv; - const float max_alibi_bias = hparams.f_max_alibi_bias; - - const int32_t n_tokens = batch.n_tokens; - const int32_t n_kv = worst_case ? n_ctx : kv_self.n; - const int32_t kv_head = worst_case ? n_ctx - n_tokens : kv_self.head; - - auto & buf_compute = lctx.buf_compute; - - struct ggml_init_params params = { - /*.mem_size =*/ buf_compute.size, - /*.mem_buffer =*/ buf_compute.data, - /*.no_alloc =*/ false, - }; + for (int il = 0; il < n_layer; ++il) { + struct ggml_tensor * attn_norm; - params.no_alloc = true; - - struct ggml_context * ctx0 = ggml_init(params); - - ggml_cgraph * gf = ggml_new_graph(ctx0); - - struct ggml_tensor * cur; - struct ggml_tensor * inpL; + attn_norm = llm_build_norm(ctx0, inpL, hparams, + model.layers[il].attn_norm, + NULL, + LLM_NORM, cb, il); + cb(attn_norm, "attn_norm", il); - inpL = llm_build_inp_embd(ctx0, batch, model.tok_embd, n_embd, n_tokens, cb); - cb(inpL, "inp_embd", -1); + // self-attention + { + cur = attn_norm; - // KQ_scale - struct ggml_tensor * KQ_scale = ggml_new_tensor_1d(ctx0, GGML_TYPE_F32, 1); - cb(KQ_scale, "KQ_scale", -1); + cur = ggml_mul_mat(ctx0, model.layers[il].wqkv, cur); + cb(cur, "wqkv", il); - // KQ_mask (mask for 1 head, it will be broadcasted to all heads) - struct ggml_tensor * KQ_mask = ggml_new_tensor_3d(ctx0, GGML_TYPE_F32, n_kv, n_tokens, 1); - cb(KQ_mask, "KQ_mask", -1); + if (hparams.f_clamp_kqv > 0.0f) { + cur = ggml_clamp(ctx0, cur, -hparams.f_clamp_kqv, hparams.f_clamp_kqv); + cb(cur, "wqkv_clamped", il); + } - for (int il = 0; il < n_layer; ++il) { - struct ggml_tensor * attn_norm; + struct ggml_tensor * Qcur = ggml_cont(ctx0, ggml_view_2d(ctx0, cur, n_embd, n_tokens, cur->nb[1], 0*sizeof(float)*(n_embd))); + struct ggml_tensor * Kcur = ggml_cont(ctx0, ggml_view_2d(ctx0, cur, n_embd_gqa, n_tokens, cur->nb[1], 1*sizeof(float)*(n_embd))); + struct ggml_tensor * Vcur = ggml_cont(ctx0, ggml_view_2d(ctx0, cur, n_embd_gqa, n_tokens, cur->nb[1], 1*sizeof(float)*(n_embd + n_embd_gqa))); - attn_norm = llm_build_norm(ctx0, inpL, - model.layers[il].attn_norm, - NULL, - LLM_NORM, norm_eps, cb, il); - cb(attn_norm, "attn_norm", il); + cb(Qcur, "Qcur", il); + cb(Kcur, "Kcur", il); + cb(Vcur, "Vcur", il); - // self-attention - { - cur = attn_norm; + Qcur = ggml_reshape_3d(ctx0, Qcur, n_embd_head, n_head, n_tokens); - cur = ggml_mul_mat(ctx0, model.layers[il].wqkv, cur); - cb(cur, "wqkv", il); + llm_build_kv_store(ctx0, hparams, kv_self, gf, Kcur, Vcur, n_ctx, n_tokens, kv_head, cb, il); - if (clamp_kqv > 0.0f) { - cur = ggml_clamp(ctx0, cur, -clamp_kqv, clamp_kqv); - cb(cur, "wqkv_clamped", il); + cur = llm_build_kqv(ctx0, Qcur, hparams, kv_self, + model.layers[il].wo, NULL, + Qcur, KQ_scale, KQ_mask, n_ctx, n_tokens, n_kv, hparams.f_max_alibi_bias, cb, il); + cb(cur, "kqv_out", il); } - struct ggml_tensor * Qcur = ggml_cont(ctx0, ggml_view_2d(ctx0, cur, n_embd, n_tokens, cur->nb[1], 0*sizeof(float)*(n_embd))); - struct ggml_tensor * Kcur = ggml_cont(ctx0, ggml_view_2d(ctx0, cur, n_embd_gqa, n_tokens, cur->nb[1], 1*sizeof(float)*(n_embd))); - struct ggml_tensor * Vcur = ggml_cont(ctx0, ggml_view_2d(ctx0, cur, n_embd_gqa, n_tokens, cur->nb[1], 1*sizeof(float)*(n_embd + n_embd_gqa))); - - cb(Qcur, "Qcur", il); - cb(Kcur, "Kcur", il); - cb(Vcur, "Vcur", il); + // Add the input + struct ggml_tensor * ffn_inp = ggml_add(ctx0, cur, inpL); + cb(ffn_inp, "ffn_inp", il); - Qcur = ggml_reshape_3d(ctx0, Qcur, n_embd_head, n_head, n_tokens); + // feed forward + { + cur = llm_build_norm(ctx0, ffn_inp, hparams, + model.layers[il].ffn_norm, + NULL, + LLM_NORM, cb, il); + cb(cur, "ffn_norm", il); + + cur = llm_build_ffn(ctx0, cur, + model.layers[il].ffn_up, NULL, + NULL, NULL, + model.layers[il].ffn_down, NULL, + LLM_FFN_GELU, LLM_FFN_SEQ, cb, il); + cb(cur, "ffn_out", il); + } - llm_build_kv_store(lctx, ctx0, gf, Kcur, Vcur, n_tokens, kv_head, cb, il); + cur = ggml_add(ctx0, cur, ffn_inp); + cb(cur, "l_out", il); - cur = llm_build_kqv(lctx, ctx0, Qcur, - model.layers[il].wo, NULL, - Qcur, KQ_scale, KQ_mask, n_tokens, n_kv, max_alibi_bias, cb, il); - cb(cur, "kqv_out", il); + // input for next layer + inpL = cur; } - // Add the input - struct ggml_tensor * ffn_inp = ggml_add(ctx0, cur, inpL); - cb(ffn_inp, "ffn_inp", il); + cur = inpL; - // feed forward - { - cur = llm_build_norm(ctx0, ffn_inp, - model.layers[il].ffn_norm, - NULL, - LLM_NORM, norm_eps, cb, il); - cb(cur, "ffn_norm", il); + cur = llm_build_norm(ctx0, cur, hparams, + model.output_norm, + NULL, + LLM_NORM, cb, -1); + cb(cur, "result_norm", -1); - cur = llm_build_ffn(ctx0, cur, - model.layers[il].ffn_up, NULL, - NULL, NULL, - model.layers[il].ffn_down, NULL, - LLM_FFN_GELU, LLM_FFN_SEQ, cb, il); - cb(cur, "ffn_out", il); - } + cur = ggml_mul_mat(ctx0, model.output, cur); + cb(cur, "result_output", -1); - cur = ggml_add(ctx0, cur, ffn_inp); - cb(cur, "l_out", il); + ggml_build_forward_expand(gf, cur); - // input for next layer - inpL = cur; + return gf; } - - cur = inpL; - - cur = llm_build_norm(ctx0, cur, - model.output_norm, - NULL, - LLM_NORM, norm_eps, cb, -1); - cb(cur, "result_norm", -1); - - cur = ggml_mul_mat(ctx0, model.output, cur); - cb(cur, "result_output", -1); - - ggml_build_forward_expand(gf, cur); - - ggml_free(ctx0); - - return gf; -} +}; // // tensor offloading helpers @@ -5122,43 +4862,49 @@ static struct ggml_cgraph * llama_build_graph( struct ggml_cgraph * result = NULL; + struct llm_build_context llm(lctx, batch, cb, worst_case); + + llm.init(); + switch (model.arch) { case LLM_ARCH_LLAMA: { - result = llm_build_llama(lctx, batch, cb, worst_case); + result = llm.build_llama(); } break; case LLM_ARCH_BAICHUAN: { - result = llm_build_baichaun(lctx, batch, cb, worst_case); + result = llm.build_baichuan(); } break; case LLM_ARCH_FALCON: { - result = llm_build_falcon(lctx, batch, cb, worst_case); + result = llm.build_falcon(); } break; case LLM_ARCH_STARCODER: { - result = llm_build_starcoder(lctx, batch, cb, worst_case); + result = llm.build_starcoder(); } break; case LLM_ARCH_PERSIMMON: { - result = llm_build_persimmon(lctx, batch, cb, worst_case); + result = llm.build_persimmon(); } break; case LLM_ARCH_REFACT: { - result = llm_build_refact(lctx, batch, cb, worst_case); + result = llm.build_refact(); } break; case LLM_ARCH_BLOOM: { - result = llm_build_bloom(lctx, batch, cb, worst_case); + result = llm.build_bloom(); } break; case LLM_ARCH_MPT: { - result = llm_build_mpt(lctx, batch, cb, worst_case); + result = llm.build_mpt(); } break; default: GGML_ASSERT(false); } + llm.free(); + if (worst_case) { int n_non_view_total = 0; From ff8f9a88da0018972dfdf6fe64b5c8992caabd9c Mon Sep 17 00:00:00 2001 From: Georgi Gerganov Date: Wed, 1 Nov 2023 21:15:55 +0200 Subject: [PATCH 036/206] common : minor (#3715) --- common/common.cpp | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/common/common.cpp b/common/common.cpp index 89be4126185db5..7a48e9d11e8593 100644 --- a/common/common.cpp +++ b/common/common.cpp @@ -110,8 +110,8 @@ bool gpt_params_parse(int argc, char ** argv, gpt_params & params) { exit(0); } } - catch (const std::invalid_argument& ex) { - fprintf(stderr, ex.what()); + catch (const std::invalid_argument & ex) { + fprintf(stderr, "%s\n", ex.what()); gpt_print_usage(argc, argv, gpt_params()); exit(1); } From e16b9fa4baa8a09c6619b116159830e898050942 Mon Sep 17 00:00:00 2001 From: Georgi Gerganov Date: Wed, 1 Nov 2023 21:25:00 +0200 Subject: [PATCH 037/206] metal : multi-simd softmax (#3710) ggml-ci --- ggml-metal.m | 9 +++- ggml-metal.metal | 129 +++++++++++++++++++++++++++++++++++++---------- 2 files changed, 108 insertions(+), 30 deletions(-) diff --git a/ggml-metal.m b/ggml-metal.m index bc881395a7aadc..1f034150788e26 100644 --- a/ggml-metal.m +++ b/ggml-metal.m @@ -1001,11 +1001,15 @@ void ggml_metal_graph_compute( } break; case GGML_OP_SOFT_MAX: { - const int nth = MIN(32, ne00); + int nth = 32; // SIMD width if (ne00%4 == 0) { [encoder setComputePipelineState:ctx->pipeline_soft_max_4]; } else { + do { + nth *= 2; + } while (nth <= ne00 && nth <= 1024); + nth /= 2; [encoder setComputePipelineState:ctx->pipeline_soft_max]; } [encoder setBuffer:id_src0 offset:offs_src0 atIndex:0]; @@ -1013,8 +1017,9 @@ void ggml_metal_graph_compute( [encoder setBytes:&ne00 length:sizeof(ne00) atIndex:2]; [encoder setBytes:&ne01 length:sizeof(ne01) atIndex:3]; [encoder setBytes:&ne02 length:sizeof(ne02) atIndex:4]; + [encoder setThreadgroupMemoryLength:nth/32*sizeof(float) atIndex:0]; - [encoder dispatchThreadgroups:MTLSizeMake(ne01, ne02, ne03) threadsPerThreadgroup:MTLSizeMake(nth, 1, 1)]; + [encoder dispatchThreadgroups:MTLSizeMake(ne01*ne02*ne03, 1, 1) threadsPerThreadgroup:MTLSizeMake(nth, 1, 1)]; } break; case GGML_OP_DIAG_MASK_INF: { diff --git a/ggml-metal.metal b/ggml-metal.metal index f4b460564453c5..f3152778ae48c3 100644 --- a/ggml-metal.metal +++ b/ggml-metal.metal @@ -184,36 +184,73 @@ kernel void kernel_soft_max( constant int64_t & ne00, constant int64_t & ne01, constant int64_t & ne02, - uint3 tgpig[[threadgroup_position_in_grid]], - uint3 tpitg[[thread_position_in_threadgroup]], - uint3 ntg[[threads_per_threadgroup]]) { - const int64_t i03 = tgpig[2]; - const int64_t i02 = tgpig[1]; - const int64_t i01 = tgpig[0]; + threadgroup float * buf [[threadgroup(0)]], + uint tgpig[[threadgroup_position_in_grid]], + uint tpitg[[thread_position_in_threadgroup]], + uint sgitg[[simdgroup_index_in_threadgroup]], + uint tiisg[[thread_index_in_simdgroup]], + uint ntg[[threads_per_threadgroup]]) { + const int64_t i03 = (tgpig) / (ne02*ne01); + const int64_t i02 = (tgpig - i03*ne02*ne01) / ne01; + const int64_t i01 = (tgpig - i03*ne02*ne01 - i02*ne01); device const float * psrc0 = src0 + i03*ne02*ne01*ne00 + i02*ne01*ne00 + i01*ne00; device float * pdst = dst + i03*ne02*ne01*ne00 + i02*ne01*ne00 + i01*ne00; // parallel max - float lmax = tpitg[0] < ne00 ? psrc0[tpitg[0]] : -INFINITY; - for (int i00 = tpitg[0] + ntg[0]; i00 < ne00; i00 += ntg[0]) { + float lmax = tpitg < ne00 ? psrc0[tpitg] : -INFINITY; + + for (int i00 = tpitg + ntg; i00 < ne00; i00 += ntg) { lmax = MAX(lmax, psrc0[i00]); } - const float max = simd_max(lmax); + + float max = simd_max(lmax); + if (tiisg == 0) { + buf[sgitg] = max; + } + + threadgroup_barrier(mem_flags::mem_threadgroup); + + // broadcast, simd group number is ntg / 32 + for (uint i = ntg / 32 / 2; i > 0; i /= 2) { + if (tpitg < i) { + buf[tpitg] = MAX(buf[tpitg], buf[tpitg + i]); + } + } + + threadgroup_barrier(mem_flags::mem_threadgroup); + + max = buf[0]; // parallel sum float lsum = 0.0f; - for (int i00 = tpitg[0]; i00 < ne00; i00 += ntg[0]) { + for (int i00 = tpitg; i00 < ne00; i00 += ntg) { const float exp_psrc0 = exp(psrc0[i00] - max); lsum += exp_psrc0; // Remember the result of exp here. exp is expensive, so we really do not - // whish to compute it twice. + // wish to compute it twice. pdst[i00] = exp_psrc0; } - const float sum = simd_sum(lsum); + float sum = simd_sum(lsum); + if (tiisg == 0) { + buf[sgitg] = sum; + } + + threadgroup_barrier(mem_flags::mem_threadgroup); + + // broadcast, simd group number is ntg / 32 + for (uint i = ntg / 32 / 2; i > 0; i /= 2) { + if (tpitg < i) { + buf[tpitg] += buf[tpitg + i]; + } + } + + threadgroup_barrier(mem_flags::mem_threadgroup); + + sum = buf[0]; - for (int i00 = tpitg[0]; i00 < ne00; i00 += ntg[0]) { + for (int i00 = tpitg; i00 < ne00; i00 += ntg) { pdst[i00] /= sum; } } @@ -224,37 +261,73 @@ kernel void kernel_soft_max_4( constant int64_t & ne00, constant int64_t & ne01, constant int64_t & ne02, - uint3 tgpig[[threadgroup_position_in_grid]], - uint3 tpitg[[thread_position_in_threadgroup]], - uint3 ntg[[threads_per_threadgroup]]) { - const int64_t i03 = tgpig[2]; - const int64_t i02 = tgpig[1]; - const int64_t i01 = tgpig[0]; + threadgroup float * buf [[threadgroup(0)]], + uint tgpig[[threadgroup_position_in_grid]], + uint tpitg[[thread_position_in_threadgroup]], + uint sgitg[[simdgroup_index_in_threadgroup]], + uint tiisg[[thread_index_in_simdgroup]], + uint ntg[[threads_per_threadgroup]]) { + const int64_t i03 = (tgpig) / (ne02*ne01); + const int64_t i02 = (tgpig - i03*ne02*ne01) / ne01; + const int64_t i01 = (tgpig - i03*ne02*ne01 - i02*ne01); device const float4 * psrc4 = (device const float4 *)(src0 + i03*ne02*ne01*ne00 + i02*ne01*ne00 + i01*ne00); device float4 * pdst4 = (device float4 *)(dst + i03*ne02*ne01*ne00 + i02*ne01*ne00 + i01*ne00); // parallel max - float4 lmax4 = tpitg[0] < ne00/4 ? psrc4[tpitg[0]] : -INFINITY; - for (int i00 = tpitg[0] + ntg[0]; i00 < ne00/4; i00 += ntg[0]) { + float4 lmax4 = tpitg < ne00/4 ? psrc4[tpitg] : -INFINITY; + + for (int i00 = tpitg + ntg; i00 < ne00/4; i00 += ntg) { lmax4 = fmax(lmax4, psrc4[i00]); } - float lmax = MAX(MAX(lmax4[0], lmax4[1]), MAX(lmax4[2], lmax4[3])); - const float max = simd_max(lmax); + const float lmax = MAX(MAX(lmax4[0], lmax4[1]), MAX(lmax4[2], lmax4[3])); + float max = simd_max(lmax); + if (tiisg == 0) { + buf[sgitg] = max; + } + + threadgroup_barrier(mem_flags::mem_threadgroup); + + // broadcast, simd group number is ntg / 32 + for (uint i = ntg / 32 / 2; i > 0; i /= 2) { + if (tpitg < i) { + buf[tpitg] = MAX(buf[tpitg], buf[tpitg + i]); + } + } + + threadgroup_barrier(mem_flags::mem_threadgroup); + + max = buf[0]; // parallel sum float4 lsum4 = 0.0f; - for (int i00 = tpitg[0]; i00 < ne00/4; i00 += ntg[0]) { + for (int i00 = tpitg; i00 < ne00/4; i00 += ntg) { const float4 exp_psrc4 = exp(psrc4[i00] - max); lsum4 += exp_psrc4; pdst4[i00] = exp_psrc4; } - float lsum = lsum4[0] + lsum4[1] + lsum4[2] + lsum4[3]; - const float sum = simd_sum(lsum); + const float lsum = lsum4[0] + lsum4[1] + lsum4[2] + lsum4[3]; + float sum = simd_sum(lsum); + if (tiisg == 0) { + buf[sgitg] = sum; + } + + threadgroup_barrier(mem_flags::mem_threadgroup); + + // broadcast, simd group number is ntg / 32 + for (uint i = ntg / 32 / 2; i > 0; i /= 2) { + if (tpitg < i) { + buf[tpitg] += buf[tpitg + i]; + } + } + + threadgroup_barrier(mem_flags::mem_threadgroup); + + sum = buf[0]; - for (int i00 = tpitg[0]; i00 < ne00/4; i00 += ntg[0]) { + for (int i00 = tpitg; i00 < ne00/4; i00 += ntg) { pdst4[i00] /= sum; } } @@ -274,7 +347,7 @@ kernel void kernel_diag_mask_inf( dst[i02*ne01*ne00 + i01*ne00 + i00] = -INFINITY; } else { dst[i02*ne01*ne00 + i01*ne00 + i00] = src0[i02*ne01*ne00 + i01*ne00 + i00]; - } + } } kernel void kernel_diag_mask_inf_8( From 523e49b11174368cd73460fa5eae7b39d856f300 Mon Sep 17 00:00:00 2001 From: Georgi Gerganov Date: Wed, 1 Nov 2023 23:00:50 +0200 Subject: [PATCH 038/206] llm : fix falcon norm after refactoring (#3837) --- llama.cpp | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/llama.cpp b/llama.cpp index d0c4ef10151828..17cf364bb20eac 100644 --- a/llama.cpp +++ b/llama.cpp @@ -3763,7 +3763,7 @@ struct llm_build_context { { if (model.layers[il].attn_norm_2) { // Falcon-40B - cur = llm_build_norm(ctx0, attn_norm, hparams, + cur = llm_build_norm(ctx0, inpL, hparams, model.layers[il].attn_norm_2, model.layers[il].attn_norm_2_b, LLM_NORM, cb, il); From c43c2da8afacaddfe51c09b21dbd9922cd0ea46b Mon Sep 17 00:00:00 2001 From: Georgi Gerganov Date: Wed, 1 Nov 2023 23:08:30 +0200 Subject: [PATCH 039/206] llm : fix llm_build_kqv taking unused tensor (benign, #3837) --- llama.cpp | 19 +++++++++---------- 1 file changed, 9 insertions(+), 10 deletions(-) diff --git a/llama.cpp b/llama.cpp index 17cf364bb20eac..1c6d482f8fe1bc 100644 --- a/llama.cpp +++ b/llama.cpp @@ -3345,7 +3345,6 @@ static struct ggml_tensor * llm_build_ffn( // if max_alibi_bias > 0 then apply ALiBi static struct ggml_tensor * llm_build_kqv( struct ggml_context * ctx, - struct ggml_tensor * cur, const llama_hparams & hparams, const llama_kv_cache & kv, struct ggml_tensor * wo, @@ -3411,7 +3410,7 @@ static struct ggml_tensor * llm_build_kqv( struct ggml_tensor * kqv_merged = ggml_permute(ctx, kqv, 0, 2, 1, 3); cb(kqv_merged, "kqv_merged", il); - cur = ggml_cont_2d(ctx, kqv_merged, n_embd, n_tokens); + struct ggml_tensor * cur = ggml_cont_2d(ctx, kqv_merged, n_embd, n_tokens); cb(cur, "kqv_merged_cont", il); cur = ggml_mul_mat(ctx, wo, cur); @@ -3565,7 +3564,7 @@ struct llm_build_context { llm_build_kv_store(ctx0, hparams, kv_self, gf, Kcur, Vcur, n_ctx, n_tokens, kv_head, cb, il); - cur = llm_build_kqv(ctx0, cur, hparams, kv_self, + cur = llm_build_kqv(ctx0, hparams, kv_self, model.layers[il].wo, NULL, Qcur, KQ_scale, KQ_mask, n_ctx, n_tokens, n_kv, -1.0f, cb, il); cb(cur, "kqv_out", il); @@ -3677,7 +3676,7 @@ struct llm_build_context { // apply ALiBi for 13B model const float max_alibi_bias = model.type == MODEL_13B ? 8.0f : -1.0f; - cur = llm_build_kqv(ctx0, cur, hparams, kv_self, + cur = llm_build_kqv(ctx0, hparams, kv_self, model.layers[il].wo, NULL, Qcur, KQ_scale, KQ_mask, n_ctx, n_tokens, n_kv, max_alibi_bias, cb, il); cb(cur, "kqv_out", il); @@ -3795,7 +3794,7 @@ struct llm_build_context { llm_build_kv_store(ctx0, hparams, kv_self, gf, Kcur, Vcur, n_ctx, n_tokens, kv_head, cb, il); - cur = llm_build_kqv(ctx0, attn_norm, hparams, kv_self, + cur = llm_build_kqv(ctx0, hparams, kv_self, model.layers[il].wo, NULL, Qcur, KQ_scale, KQ_mask, n_ctx, n_tokens, n_kv, -1.0f, cb, il); cb(cur, "kqv_out", il); @@ -3895,7 +3894,7 @@ struct llm_build_context { llm_build_kv_store(ctx0, hparams, kv_self, gf, Kcur, Vcur, n_ctx, n_tokens, kv_head, cb, il); - cur = llm_build_kqv(ctx0, cur, hparams, kv_self, + cur = llm_build_kqv(ctx0, hparams, kv_self, model.layers[il].wo, model.layers[il].bo, Qcur, KQ_scale, KQ_mask, n_ctx, n_tokens, n_kv, -1.0f, cb, il); cb(cur, "kqv_out", il); @@ -4100,7 +4099,7 @@ struct llm_build_context { llm_build_kv_store(ctx0, hparams, kv_self, gf, Kcur, Vcur, n_ctx, n_tokens, kv_head, cb, il); // TODO: not tested, could be broken - cur = llm_build_kqv(ctx0, Q, hparams, kv_self, + cur = llm_build_kqv(ctx0, hparams, kv_self, model.layers[il].wo, model.layers[il].bo, Q, KQ_scale, KQ_mask, n_ctx, n_tokens, n_kv, -1.0f, cb, il); cb(cur, "kqv_out", il); @@ -4191,7 +4190,7 @@ struct llm_build_context { llm_build_kv_store(ctx0, hparams, kv_self, gf, Kcur, Vcur, n_ctx, n_tokens, kv_head, cb, il); - cur = llm_build_kqv(ctx0, Qcur, hparams, kv_self, + cur = llm_build_kqv(ctx0, hparams, kv_self, model.layers[il].wo, NULL, Qcur, KQ_scale, KQ_mask, n_ctx, n_tokens, n_kv, 8.0f, cb, il); cb(cur, "kqv_out", il); @@ -4288,7 +4287,7 @@ struct llm_build_context { llm_build_kv_store(ctx0, hparams, kv_self, gf, Kcur, Vcur, n_ctx, n_tokens, kv_head, cb, il); - cur = llm_build_kqv(ctx0, Qcur, hparams, kv_self, + cur = llm_build_kqv(ctx0, hparams, kv_self, model.layers[il].wo, model.layers[il].bo, Qcur, KQ_scale, KQ_mask, n_ctx, n_tokens, n_kv, 8.0f, cb, il); cb(cur, "kqv_out", il); @@ -4382,7 +4381,7 @@ struct llm_build_context { llm_build_kv_store(ctx0, hparams, kv_self, gf, Kcur, Vcur, n_ctx, n_tokens, kv_head, cb, il); - cur = llm_build_kqv(ctx0, Qcur, hparams, kv_self, + cur = llm_build_kqv(ctx0, hparams, kv_self, model.layers[il].wo, NULL, Qcur, KQ_scale, KQ_mask, n_ctx, n_tokens, n_kv, hparams.f_max_alibi_bias, cb, il); cb(cur, "kqv_out", il); From 898aeca90a9bb992f506234cf3b8b7f7fa28a1df Mon Sep 17 00:00:00 2001 From: cebtenzzre Date: Wed, 1 Nov 2023 18:04:33 -0400 Subject: [PATCH 040/206] llama : implement YaRN RoPE scaling (#2268) Co-authored-by: cebtenzzre Co-authored-by: Jeffrey Quesnelle --- common/common.cpp | 79 +++++- common/common.h | 7 + convert-baichuan-hf-to-gguf.py | 3 +- convert.py | 97 ++++--- examples/finetune/finetune.cpp | 5 +- examples/server/server.cpp | 59 ++++- .../train-text-from-scratch.cpp | 6 +- ggml-cuda.cu | 153 ++++++++--- ggml-metal.m | 24 +- ggml-metal.metal | 61 ++++- ggml.c | 241 +++++++++++++----- ggml.h | 20 +- gguf-py/gguf/gguf.py | 29 ++- llama.cpp | 220 ++++++++++++---- llama.h | 18 +- 15 files changed, 764 insertions(+), 258 deletions(-) diff --git a/common/common.cpp b/common/common.cpp index 7a48e9d11e8593..b182ffaaef48ec 100644 --- a/common/common.cpp +++ b/common/common.cpp @@ -219,12 +219,52 @@ bool gpt_params_parse_ex(int argc, char ** argv, gpt_params & params) { break; } params.rope_freq_scale = std::stof(argv[i]); + } else if (arg == "--rope-scaling") { + if (++i >= argc) { + invalid_param = true; + break; + } + std::string value(argv[i]); + /**/ if (value == "none") { params.rope_scaling_type = LLAMA_ROPE_SCALING_NONE; } + else if (value == "linear") { params.rope_scaling_type = LLAMA_ROPE_SCALING_LINEAR; } + else if (value == "yarn") { params.rope_scaling_type = LLAMA_ROPE_SCALING_YARN; } + else { invalid_param = true; break; } } else if (arg == "--rope-scale") { if (++i >= argc) { invalid_param = true; break; } params.rope_freq_scale = 1.0f/std::stof(argv[i]); + } else if (arg == "--yarn-orig-ctx") { + if (++i >= argc) { + invalid_param = true; + break; + } + params.yarn_orig_ctx = std::stoi(argv[i]); + } else if (arg == "--yarn-ext-factor") { + if (++i >= argc) { + invalid_param = true; + break; + } + params.yarn_ext_factor = std::stof(argv[i]); + } else if (arg == "--yarn-attn-factor") { + if (++i >= argc) { + invalid_param = true; + break; + } + params.yarn_attn_factor = std::stof(argv[i]); + } else if (arg == "--yarn-beta-fast") { + if (++i >= argc) { + invalid_param = true; + break; + } + params.yarn_beta_fast = std::stof(argv[i]); + } else if (arg == "--yarn-beta-slow") { + if (++i >= argc) { + invalid_param = true; + break; + } + params.yarn_beta_slow = std::stof(argv[i]); } else if (arg == "--memory-f32") { params.memory_f16 = false; } else if (arg == "--top-p") { @@ -716,9 +756,16 @@ void gpt_print_usage(int /*argc*/, char ** argv, const gpt_params & params) { printf(" --cfg-negative-prompt-file FNAME\n"); printf(" negative prompt file to use for guidance. (default: empty)\n"); printf(" --cfg-scale N strength of guidance (default: %f, 1.0 = disable)\n", sparams.cfg_scale); - printf(" --rope-scale N RoPE context linear scaling factor, inverse of --rope-freq-scale\n"); + printf(" --rope-scaling {none,linear,yarn}\n"); + printf(" RoPE frequency scaling method, defaults to linear unless specified by the model\n"); + printf(" --rope-scale N RoPE context scaling factor, expands context by a factor of N\n"); printf(" --rope-freq-base N RoPE base frequency, used by NTK-aware scaling (default: loaded from model)\n"); - printf(" --rope-freq-scale N RoPE frequency linear scaling factor (default: loaded from model)\n"); + printf(" --rope-freq-scale N RoPE frequency scaling factor, expands context by a factor of 1/N\n"); + printf(" --yarn-orig-ctx N YaRN: original context size of model (default: 0 = model training context size)\n"); + printf(" --yarn-ext-factor N YaRN: extrapolation mix factor (default: 1.0, 0.0 = full interpolation)\n"); + printf(" --yarn-attn-factor N YaRN: scale sqrt(t) or attention magnitude (default: 1.0)\n"); + printf(" --yarn-beta-slow N YaRN: high correction dim or alpha (default: %.1f)\n", params.yarn_beta_slow); + printf(" --yarn-beta-fast N YaRN: low correction dim or beta (default: %.1f)\n", params.yarn_beta_fast); printf(" --ignore-eos ignore end of stream token and continue generating (implies --logit-bias 2-inf)\n"); printf(" --no-penalize-nl do not penalize newline token\n"); printf(" --memory-f32 use f32 instead of f16 for memory key+value (default: disabled)\n"); @@ -826,17 +873,23 @@ struct llama_model_params llama_model_params_from_gpt_params(const gpt_params & struct llama_context_params llama_context_params_from_gpt_params(const gpt_params & params) { auto cparams = llama_context_default_params(); - cparams.n_ctx = params.n_ctx; - cparams.n_batch = params.n_batch; - cparams.n_threads = params.n_threads; - cparams.n_threads_batch = params.n_threads_batch == -1 ? params.n_threads : params.n_threads_batch; - cparams.mul_mat_q = params.mul_mat_q; - cparams.seed = params.seed; - cparams.f16_kv = params.memory_f16; - cparams.logits_all = params.logits_all; - cparams.embedding = params.embedding; - cparams.rope_freq_base = params.rope_freq_base; - cparams.rope_freq_scale = params.rope_freq_scale; + cparams.n_ctx = params.n_ctx; + cparams.n_batch = params.n_batch; + cparams.n_threads = params.n_threads; + cparams.n_threads_batch = params.n_threads_batch == -1 ? params.n_threads : params.n_threads_batch; + cparams.mul_mat_q = params.mul_mat_q; + cparams.seed = params.seed; + cparams.f16_kv = params.memory_f16; + cparams.logits_all = params.logits_all; + cparams.embedding = params.embedding; + cparams.rope_scaling_type = params.rope_scaling_type; + cparams.rope_freq_base = params.rope_freq_base; + cparams.rope_freq_scale = params.rope_freq_scale; + cparams.yarn_ext_factor = params.yarn_ext_factor; + cparams.yarn_attn_factor = params.yarn_attn_factor; + cparams.yarn_beta_fast = params.yarn_beta_fast; + cparams.yarn_beta_slow = params.yarn_beta_slow; + cparams.yarn_orig_ctx = params.yarn_orig_ctx; return cparams; } diff --git a/common/common.h b/common/common.h index 343b272177c7ec..7be69f925bc2be 100644 --- a/common/common.h +++ b/common/common.h @@ -9,6 +9,7 @@ #define LOG_NO_FILE_LINE_FUNCTION #include "log.h" +#include #include #include #include @@ -54,6 +55,12 @@ struct gpt_params { int32_t n_beams = 0; // if non-zero then use beam search of given width. float rope_freq_base = 0.0f; // RoPE base frequency float rope_freq_scale = 0.0f; // RoPE frequency scaling factor + float yarn_ext_factor = NAN; // YaRN extrapolation mix factor + float yarn_attn_factor = 1.0f; // YaRN magnitude scaling factor + float yarn_beta_fast = 32.0f;// YaRN low correction dim + float yarn_beta_slow = 1.0f; // YaRN high correction dim + int32_t yarn_orig_ctx = 0; // YaRN original context length + int8_t rope_scaling_type = LLAMA_ROPE_SCALING_UNSPECIFIED; // // sampling parameters struct llama_sampling_params sparams; diff --git a/convert-baichuan-hf-to-gguf.py b/convert-baichuan-hf-to-gguf.py index 5ee99be73134e6..67ccbe99f132af 100755 --- a/convert-baichuan-hf-to-gguf.py +++ b/convert-baichuan-hf-to-gguf.py @@ -163,7 +163,8 @@ def parse_args() -> argparse.Namespace: if "rope_scaling" in hparams and hparams["rope_scaling"] != None and "factor" in hparams["rope_scaling"]: if "type" in hparams["rope_scaling"]: if hparams["rope_scaling"]["type"] == "linear": - gguf_writer.add_rope_scale_linear(hparams["rope_scaling"]["factor"]) + gguf_writer.add_rope_scaling_type(gguf.RopeScalingType.LINEAR) + gguf_writer.add_rope_scaling_factor(hparams["rope_scaling"]["factor"]) # TOKENIZATION diff --git a/convert.py b/convert.py index bfbfab283f6ae7..9110f15806c6bc 100755 --- a/convert.py +++ b/convert.py @@ -151,8 +151,11 @@ class Params: n_head_kv: int f_norm_eps: float + rope_scaling_type: gguf.RopeScalingType | None = None f_rope_freq_base: float | None = None f_rope_scale: float | None = None + n_orig_ctx: int | None = None + rope_finetuned: bool | None = None ftype: GGMLFileType | None = None @@ -198,20 +201,20 @@ def guessed(model: LazyModel) -> Params: def loadHFTransformerJson(model: LazyModel, config_path: Path) -> Params: config = json.load(open(config_path)) - n_vocab = config["vocab_size"] - n_embd = config["hidden_size"] - n_layer = config["num_hidden_layers"] - n_ff = config["intermediate_size"] - n_head = config["num_attention_heads"] - n_head_kv = config["num_key_value_heads"] if "num_key_value_heads" in config else n_head - f_norm_eps = config["rms_norm_eps"] - f_rope_freq_base = config["rope_theta"] if "rope_theta" in config else None - + rope_scaling_type = f_rope_scale = n_orig_ctx = rope_finetuned = None rope_scaling = config.get("rope_scaling") - if isinstance(rope_scaling, dict) and rope_scaling.get("type") == "linear": - f_rope_scale = config["rope_scaling"].get("factor") - else: - f_rope_scale = None + + if rope_scaling is not None and (typ := rope_scaling.get("type")): + rope_factor = rope_scaling.get("factor") + f_rope_scale = rope_factor + if typ == "linear": + rope_scaling_type = gguf.RopeScalingType.LINEAR + elif typ == "yarn": + rope_scaling_type = gguf.RopeScalingType.YARN + n_orig_ctx = rope_scaling['original_max_position_embeddings'] + rope_finetuned = rope_scaling['finetuned'] + else: + raise NotImplementedError(f'Unknown rope scaling type: {typ}') if "max_sequence_length" in config: n_ctx = config["max_sequence_length"] @@ -222,16 +225,19 @@ def loadHFTransformerJson(model: LazyModel, config_path: Path) -> Params: "Suggestion: provide 'config.json' of the model in the same directory containing model files.") return Params( - n_vocab = n_vocab, - n_embd = n_embd, - n_layer = n_layer, - n_ctx = n_ctx, - n_ff = n_ff, - n_head = n_head, - n_head_kv = n_head_kv, - f_norm_eps = f_norm_eps, - f_rope_freq_base = f_rope_freq_base, - f_rope_scale = f_rope_scale, + n_vocab = config["vocab_size"], + n_embd = config["hidden_size"], + n_layer = config["num_hidden_layers"], + n_ctx = n_ctx, + n_ff = config["intermediate_size"], + n_head = (n_head := config["num_attention_heads"]), + n_head_kv = config.get("num_key_value_heads", n_head), + f_norm_eps = config["rms_norm_eps"], + f_rope_freq_base = config.get("rope_theta"), + rope_scaling_type = rope_scaling_type, + f_rope_scale = f_rope_scale, + n_orig_ctx = n_orig_ctx, + rope_finetuned = rope_finetuned, ) # LLaMA v2 70B params.json @@ -240,17 +246,8 @@ def loadHFTransformerJson(model: LazyModel, config_path: Path) -> Params: def loadOriginalParamsJson(model: LazyModel, config_path: Path) -> Params: config = json.load(open(config_path)) - n_vocab = config["vocab_size"] if "vocab_size" in config else -1 - n_embd = config["dim"] - n_layer = config["n_layers"] - n_ff = -1 - n_head = config["n_heads"] - n_head_kv = config["n_kv_heads"] if "n_kv_heads" in config else n_head - f_norm_eps = config["norm_eps"] - f_rope_freq_base = config["rope_theta"] if "rope_theta" in config else None - # hack to determine LLaMA v1 vs v2 vs CodeLlama - if f_rope_freq_base == 1000000: + if config.get("rope_theta") == 1000000: # CodeLlama n_ctx = 16384 elif config["norm_eps"] == 1e-05: @@ -260,22 +257,16 @@ def loadOriginalParamsJson(model: LazyModel, config_path: Path) -> Params: # LLaMA v1 n_ctx = 2048 - if n_vocab == -1: - n_vocab = model["tok_embeddings.weight"].shape[0] - - if n_ff == -1: - n_ff = model["layers.0.feed_forward.w1.weight"].shape[0] - return Params( - n_vocab = n_vocab, - n_embd = n_embd, - n_layer = n_layer, + n_vocab = config.get("vocab_size", model["tok_embeddings.weight"].shape[0]), + n_embd = config["dim"], + n_layer = config["n_layers"], n_ctx = n_ctx, - n_ff = n_ff, - n_head = n_head, - n_head_kv = n_head_kv, - f_norm_eps = f_norm_eps, - f_rope_freq_base = f_rope_freq_base, + n_ff = model["layers.0.feed_forward.w1.weight"].shape[0], + n_head = (n_head := config["n_heads"]), + n_head_kv = config.get("n_kv_heads", n_head), + f_norm_eps = config["norm_eps"], + f_rope_freq_base = config.get("rope_theta"), ) @staticmethod @@ -831,8 +822,16 @@ def add_meta_arch(self, params: Params) -> None: if params.f_rope_freq_base is not None: self.gguf.add_rope_freq_base(params.f_rope_freq_base) - if params.f_rope_scale is not None: - self.gguf.add_rope_scale_linear(params.f_rope_scale) + if params.rope_scaling_type: + assert params.f_rope_scale is not None + self.gguf.add_rope_scaling_type(params.rope_scaling_type) + self.gguf.add_rope_scaling_factor(params.f_rope_scale) + + if params.n_orig_ctx is not None: + self.gguf.add_rope_scaling_orig_ctx_len(params.n_orig_ctx) + + if params.rope_finetuned is not None: + self.gguf.add_rope_scaling_finetuned(params.rope_finetuned) if params.ftype is not None: self.gguf.add_file_type(params.ftype) diff --git a/examples/finetune/finetune.cpp b/examples/finetune/finetune.cpp index 60c7faa797028a..649a3b7c1941e5 100644 --- a/examples/finetune/finetune.cpp +++ b/examples/finetune/finetune.cpp @@ -642,8 +642,9 @@ static struct ggml_tensor * llama_build_lora_finetune_graphs( const int rope_mode = 0; return ggml_rope_custom(ctx, - t, KQ_pos, n_rot, rope_mode, n_ctx, - rope_freq_base, rope_freq_scale); + t, KQ_pos, n_rot, rope_mode, n_ctx, 0, + rope_freq_base, rope_freq_scale, 0.0f, 0.0f, 0.0f, 0.0f + ); }; set_name(tokens_input, "tokens_input"); diff --git a/examples/server/server.cpp b/examples/server/server.cpp index 47ae0d55856cf8..84b04d5a0493a6 100644 --- a/examples/server/server.cpp +++ b/examples/server/server.cpp @@ -1755,12 +1755,18 @@ static void server_print_usage(const char *argv0, const gpt_params ¶ms, printf("options:\n"); printf(" -h, --help show this help message and exit\n"); printf(" -v, --verbose verbose output (default: %s)\n", server_verbose ? "enabled" : "disabled"); - printf(" -t N, --threads N number of threads to use during computation (default: %d)\n", params.n_threads); + printf(" -t N, --threads N number of threads to use during computation (default: %d)\n", params.n_threads); printf(" -tb N, --threads-batch N number of threads to use during batch and prompt processing (default: same as --threads)\n"); - printf(" -c N, --ctx-size N size of the prompt context (default: %d)\n", params.n_ctx); + printf(" -c N, --ctx-size N size of the prompt context (default: %d)\n", params.n_ctx); + printf(" --rope-scaling {none,linear,yarn}\n"); + printf(" RoPE frequency scaling method, defaults to linear unless specified by the model\n"); printf(" --rope-freq-base N RoPE base frequency (default: loaded from model)\n"); - printf(" --rope-freq-scale N RoPE frequency scaling factor (default: loaded from model)\n"); - printf(" -b N, --batch-size N batch size for prompt processing (default: %d)\n", params.n_batch); + printf(" --rope-freq-scale N RoPE frequency scaling factor, expands context by a factor of 1/N\n"); + printf(" --yarn-ext-factor N YaRN: extrapolation mix factor (default: 1.0, 0.0 = full interpolation)\n"); + printf(" --yarn-attn-factor N YaRN: scale sqrt(t) or attention magnitude (default: 1.0)\n"); + printf(" --yarn-beta-slow N YaRN: high correction dim or alpha (default: %.1f)\n", params.yarn_beta_slow); + printf(" --yarn-beta-fast N YaRN: low correction dim or beta (default: %.1f)\n", params.yarn_beta_fast); + printf(" -b N, --batch-size N batch size for prompt processing (default: %d)\n", params.n_batch); printf(" --memory-f32 use f32 instead of f16 for memory key+value (default: disabled)\n"); printf(" not recommended: doubles context memory required and no measurable increase in quality\n"); if (llama_mlock_supported()) @@ -1881,6 +1887,19 @@ static void server_params_parse(int argc, char **argv, server_params &sparams, } params.n_ctx = std::stoi(argv[i]); } + else if (arg == "--rope-scaling") + { + if (++i >= argc) + { + invalid_param = true; + break; + } + std::string value(argv[i]); + /**/ if (value == "none") { params.rope_scaling_type = LLAMA_ROPE_SCALING_NONE; } + else if (value == "linear") { params.rope_scaling_type = LLAMA_ROPE_SCALING_LINEAR; } + else if (value == "yarn") { params.rope_scaling_type = LLAMA_ROPE_SCALING_YARN; } + else { invalid_param = true; break; } + } else if (arg == "--rope-freq-base") { if (++i >= argc) @@ -1899,6 +1918,38 @@ static void server_params_parse(int argc, char **argv, server_params &sparams, } params.rope_freq_scale = std::stof(argv[i]); } + else if (arg == "--yarn-ext-factor") + { + if (++i >= argc) { + invalid_param = true; + break; + } + params.yarn_ext_factor = std::stof(argv[i]); + } + else if (arg == "--yarn-attn-factor") + { + if (++i >= argc) { + invalid_param = true; + break; + } + params.yarn_attn_factor = std::stof(argv[i]); + } + else if (arg == "--yarn-beta-fast") + { + if (++i >= argc) { + invalid_param = true; + break; + } + params.yarn_beta_fast = std::stof(argv[i]); + } + else if (arg == "--yarn-beta-slow") + { + if (++i >= argc) { + invalid_param = true; + break; + } + params.yarn_beta_slow = std::stof(argv[i]); + } else if (arg == "--memory-f32" || arg == "--memory_f32") { params.memory_f16 = false; diff --git a/examples/train-text-from-scratch/train-text-from-scratch.cpp b/examples/train-text-from-scratch/train-text-from-scratch.cpp index 1ce6cef29cfd06..2a257e63215e3c 100644 --- a/examples/train-text-from-scratch/train-text-from-scratch.cpp +++ b/examples/train-text-from-scratch/train-text-from-scratch.cpp @@ -349,9 +349,9 @@ static struct ggml_tensor * llama_build_train_graphs( // not capturing these, to silcence warnings const int rope_mode = 0; - return ggml_rope_custom(ctx, - t, KQ_pos, n_rot, rope_mode, n_ctx, - rope_freq_base, rope_freq_scale); + return ggml_rope_custom( + ctx, t, KQ_pos, n_rot, rope_mode, n_ctx, 0, rope_freq_base, rope_freq_scale, 0.0f, 1.0f, 0.0f, 0.0f + ); }; set_name(tokens_input, "tokens_input"); diff --git a/ggml-cuda.cu b/ggml-cuda.cu index 4e6e7cd94892b1..12ee10e3d9bdcd 100644 --- a/ggml-cuda.cu +++ b/ggml-cuda.cu @@ -4493,11 +4493,41 @@ static __global__ void cpy_f32_f16(const char * cx, char * cdst, const int ne, cpy_1(cx + x_offset, cdst + dst_offset); } -// rope == RoPE == rotary positional embedding +static __device__ float rope_yarn_ramp(const float low, const float high, const int i0) { + const float y = (i0 / 2 - low) / max(0.001f, high - low); + return 1.0f - min(1.0f, max(0.0f, y)); +} + +struct rope_corr_dims { + float v[4]; +}; + +// YaRN algorithm based on LlamaYaRNScaledRotaryEmbedding.py from https://github.com/jquesnelle/yarn +// MIT licensed. Copyright (c) 2023 Jeffrey Quesnelle and Bowen Peng. +static __device__ void rope_yarn( + float theta_extrap, float freq_scale, rope_corr_dims corr_dims, int64_t i0, float ext_factor, float mscale, + float * cos_theta, float * sin_theta +) { + // Get n-d rotational scaling corrected for extrapolation + float theta_interp = freq_scale * theta_extrap; + float theta = theta_interp; + if (ext_factor != 0.0f) { + float ramp_mix = rope_yarn_ramp(corr_dims.v[0], corr_dims.v[1], i0) * ext_factor; + theta = theta_interp * (1 - ramp_mix) + theta_extrap * ramp_mix; + // Get n-d magnitude scaling corrected for interpolation + mscale *= 1.0f + 0.1f * logf(1.0f / freq_scale); + } + *cos_theta = cosf(theta) * mscale; + *sin_theta = sinf(theta) * mscale; +} + +// rope == RoPE == rotary positional embedding template -static __global__ void rope(const T * x, T * dst, const int ncols, const int32_t * pos, const float freq_scale, - const int p_delta_rows, const float theta_scale) { +static __global__ void rope( + const T * x, T * dst, int ncols, const int32_t * pos, float freq_scale, int p_delta_rows, float freq_base, + float ext_factor, float attn_factor, rope_corr_dims corr_dims +) { const int col = 2*(blockDim.y*blockIdx.y + threadIdx.y); if (col >= ncols) { @@ -4509,10 +4539,10 @@ static __global__ void rope(const T * x, T * dst, const int ncols, const int32_t const int i2 = row/p_delta_rows; const int p = has_pos ? pos[i2] : 0; - const float p0 = p*freq_scale; - const float theta = p0*powf(theta_scale, col/2); - const float sin_theta = sinf(theta); - const float cos_theta = cosf(theta); + const float theta_base = p*powf(freq_base, -col/ncols); + + float cos_theta, sin_theta; + rope_yarn(theta_base, freq_scale, corr_dims, col, ext_factor, attn_factor, &cos_theta, &sin_theta); const float x0 = x[i + 0]; const float x1 = x[i + 1]; @@ -4522,8 +4552,10 @@ static __global__ void rope(const T * x, T * dst, const int ncols, const int32_t } template -static __global__ void rope_neox(const T * x, T * dst, const int ncols, const int32_t * pos, const float freq_scale, - const int p_delta_rows, const float theta_scale) { +static __global__ void rope_neox( + const T * x, T * dst, int ncols, const int32_t * pos, float freq_scale, int p_delta_rows, float freq_base, + float ext_factor, float attn_factor, rope_corr_dims corr_dims +) { const int col = 2*(blockDim.y*blockIdx.y + threadIdx.y); if (col >= ncols) { @@ -4534,11 +4566,14 @@ static __global__ void rope_neox(const T * x, T * dst, const int ncols, const in const int i = row*ncols + col/2; const int i2 = row/p_delta_rows; + // simplified from `(row * ncols + col) * (-1 / ncols)` + const float cur_rot = -col/ncols - row; + const int p = has_pos ? pos[i2] : 0; - const float p0 = p*freq_scale; - const float theta = p0*powf(theta_scale, col/2); - const float sin_theta = sinf(theta); - const float cos_theta = cosf(theta); + const float theta_base = p*powf(freq_base, cur_rot); + + float cos_theta, sin_theta; + rope_yarn(theta_base, freq_scale, corr_dims, cur_rot, ext_factor, attn_factor, &cos_theta, &sin_theta); const float x0 = x[i + 0]; const float x1 = x[i + ncols/2]; @@ -4547,8 +4582,10 @@ static __global__ void rope_neox(const T * x, T * dst, const int ncols, const in dst[i + ncols/2] = x0*sin_theta + x1*cos_theta; } -static __global__ void rope_glm_f32(const float * x, float * dst, const int ncols, const int32_t * pos, const float freq_scale, - const int p_delta_rows, const float theta_scale, const int n_ctx) { +static __global__ void rope_glm_f32( + const float * x, float * dst, int ncols, const int32_t * pos, float freq_scale, int p_delta_rows, float freq_base, + int n_ctx +) { const int col = blockDim.x*blockIdx.x + threadIdx.x; const int half_n_dims = ncols/4; @@ -4560,7 +4597,7 @@ static __global__ void rope_glm_f32(const float * x, float * dst, const int ncol const int i = row*ncols + col; const int i2 = row/p_delta_rows; - const float col_theta_scale = powf(theta_scale, col); + const float col_theta_scale = powf(freq_base, -2.0f*col/ncols); // FIXME: this is likely wrong const int p = pos != nullptr ? pos[i2] : 0; @@ -5584,40 +5621,54 @@ static void clamp_f32_cuda(const float * x, float * dst, const float min, const } template -static void rope_cuda(const T * x, T * dst, const int ncols, const int nrows, const int32_t * pos, const float freq_scale, - const int p_delta_rows, const float theta_scale, cudaStream_t stream) { +static void rope_cuda( + const T * x, T * dst, int ncols, int nrows, const int32_t * pos, float freq_scale, int p_delta_rows, + float freq_base, float ext_factor, float attn_factor, rope_corr_dims corr_dims, cudaStream_t stream +) { GGML_ASSERT(ncols % 2 == 0); const dim3 block_dims(1, CUDA_ROPE_BLOCK_SIZE, 1); const int num_blocks_x = (ncols + 2*CUDA_ROPE_BLOCK_SIZE - 1) / (2*CUDA_ROPE_BLOCK_SIZE); const dim3 block_nums(nrows, num_blocks_x, 1); if (pos == nullptr) { - rope<<>>(x, dst, ncols, pos, freq_scale, p_delta_rows, theta_scale); + rope<<>>( + x, dst, ncols, pos, freq_scale, p_delta_rows, freq_base, ext_factor, attn_factor, corr_dims + ); } else { - rope<<>>(x, dst, ncols, pos, freq_scale, p_delta_rows, theta_scale); + rope<<>>( + x, dst, ncols, pos, freq_scale, p_delta_rows, freq_base, ext_factor, attn_factor, corr_dims + ); } } template -static void rope_neox_cuda(const T * x, T * dst, const int ncols, const int nrows, const int32_t * pos, const float freq_scale, - const int p_delta_rows, const float theta_scale, cudaStream_t stream) { +static void rope_neox_cuda( + const T * x, T * dst, int ncols, int nrows, const int32_t * pos, float freq_scale, int p_delta_rows, + float freq_base, float ext_factor, float attn_factor, rope_corr_dims corr_dims, cudaStream_t stream +) { GGML_ASSERT(ncols % 2 == 0); const dim3 block_dims(1, CUDA_ROPE_BLOCK_SIZE, 1); const int num_blocks_x = (ncols + 2*CUDA_ROPE_BLOCK_SIZE - 1) / (2*CUDA_ROPE_BLOCK_SIZE); const dim3 block_nums(nrows, num_blocks_x, 1); if (pos == nullptr) { - rope_neox<<>>(x, dst, ncols, pos, freq_scale, p_delta_rows, theta_scale); + rope_neox<<>>( + x, dst, ncols, pos, freq_scale, p_delta_rows, freq_base, ext_factor, attn_factor, corr_dims + ); } else { - rope_neox<<>>(x, dst, ncols, pos, freq_scale, p_delta_rows, theta_scale); + rope_neox<<>>( + x, dst, ncols, pos, freq_scale, p_delta_rows, freq_base, ext_factor, attn_factor, corr_dims + ); } } -static void rope_glm_f32_cuda(const float * x, float * dst, const int ncols, const int nrows, const int32_t * pos, const float freq_scale, - const int p_delta_rows, const float theta_scale, const int n_ctx, cudaStream_t stream) { +static void rope_glm_f32_cuda( + const float * x, float * dst, int ncols, int nrows, const int32_t * pos, float freq_scale, int p_delta_rows, + float freq_base, int n_ctx, cudaStream_t stream +) { GGML_ASSERT(ncols % 4 == 0); const dim3 block_dims(CUDA_ROPE_BLOCK_SIZE/4, 1, 1); const int num_blocks_x = (ncols + CUDA_ROPE_BLOCK_SIZE - 1) / CUDA_ROPE_BLOCK_SIZE; const dim3 block_nums(num_blocks_x, nrows, 1); - rope_glm_f32<<>>(x, dst, ncols, pos, freq_scale, p_delta_rows, theta_scale, n_ctx); + rope_glm_f32<<>>(x, dst, ncols, pos, freq_scale, p_delta_rows, freq_base, n_ctx); } static void alibi_f32_cuda(const float * x, float * dst, const int ncols, const int nrows, @@ -6477,17 +6528,20 @@ inline void ggml_cuda_op_rope( const int64_t ne2 = dst->ne[2]; const int64_t nrows = ggml_nrows(src0); - //const int n_past = ((int32_t *) dst->op_params)[0]; - const int n_dims = ((int32_t *) dst->op_params)[1]; - const int mode = ((int32_t *) dst->op_params)[2]; - const int n_ctx = ((int32_t *) dst->op_params)[3]; - // RoPE alteration for extended context - - float freq_base, freq_scale; - memcpy(&freq_base, (int32_t *) dst->op_params + 4, sizeof(float)); - memcpy(&freq_scale, (int32_t *) dst->op_params + 5, sizeof(float)); + //const int n_past = ((int32_t *) dst->op_params)[0]; + const int n_dims = ((int32_t *) dst->op_params)[1]; + const int mode = ((int32_t *) dst->op_params)[2]; + const int n_ctx = ((int32_t *) dst->op_params)[3]; + const int n_orig_ctx = ((int32_t *) dst->op_params)[4]; - const float theta_scale = powf(freq_base, -2.0f/n_dims); + // RoPE alteration for extended context + float freq_base, freq_scale, ext_factor, attn_factor, beta_fast, beta_slow; + memcpy(&freq_base, (int32_t *) dst->op_params + 5, sizeof(float)); + memcpy(&freq_scale, (int32_t *) dst->op_params + 6, sizeof(float)); + memcpy(&ext_factor, (int32_t *) dst->op_params + 7, sizeof(float)); + memcpy(&attn_factor, (int32_t *) dst->op_params + 8, sizeof(float)); + memcpy(&beta_fast, (int32_t *) dst->op_params + 9, sizeof(float)); + memcpy(&beta_slow, (int32_t *) dst->op_params + 10, sizeof(float)); const int32_t * pos = nullptr; if ((mode & 1) == 0) { @@ -6499,24 +6553,39 @@ inline void ggml_cuda_op_rope( const bool is_neox = mode & 2; const bool is_glm = mode & 4; + rope_corr_dims corr_dims; + ggml_rope_yarn_corr_dims(n_dims, n_orig_ctx, freq_base, beta_fast, beta_slow, corr_dims.v); + // compute if (is_glm) { GGML_ASSERT(false); - rope_glm_f32_cuda(src0_dd, dst_dd, ne00, nrows, pos, freq_scale, ne01, theta_scale, n_ctx, main_stream); + rope_glm_f32_cuda(src0_dd, dst_dd, ne00, nrows, pos, freq_scale, ne01, freq_base, n_ctx, main_stream); } else if (is_neox) { GGML_ASSERT(ne00 == n_dims && "ne00 != n_dims is not implemented for CUDA yet"); if (src0->type == GGML_TYPE_F32) { - rope_neox_cuda((const float *)src0_dd, (float *)dst_dd, ne00, nrows, pos, freq_scale, ne01, theta_scale, main_stream); + rope_neox_cuda( + (const float *)src0_dd, (float *)dst_dd, ne00, nrows, pos, freq_scale, ne01, freq_base, ext_factor, + attn_factor, corr_dims, main_stream + ); } else if (src0->type == GGML_TYPE_F16) { - rope_neox_cuda((const half *)src0_dd, (half *)dst_dd, ne00, nrows, pos, freq_scale, ne01, theta_scale, main_stream); + rope_neox_cuda( + (const half *)src0_dd, (half *)dst_dd, ne00, nrows, pos, freq_scale, ne01, freq_base, ext_factor, + attn_factor, corr_dims, main_stream + ); } else { GGML_ASSERT(false); } } else { if (src0->type == GGML_TYPE_F32) { - rope_cuda((const float *)src0_dd, (float *)dst_dd, ne00, nrows, pos, freq_scale, ne01, theta_scale, main_stream); + rope_cuda( + (const float *)src0_dd, (float *)dst_dd, ne00, nrows, pos, freq_scale, ne01, freq_base, ext_factor, + attn_factor, corr_dims, main_stream + ); } else if (src0->type == GGML_TYPE_F16) { - rope_cuda((const half *)src0_dd, (half *)dst_dd, ne00, nrows, pos, freq_scale, ne01, theta_scale, main_stream); + rope_cuda( + (const half *)src0_dd, (half *)dst_dd, ne00, nrows, pos, freq_scale, ne01, freq_base, ext_factor, + attn_factor, corr_dims, main_stream + ); } else { GGML_ASSERT(false); } diff --git a/ggml-metal.m b/ggml-metal.m index 1f034150788e26..611d5e173681eb 100644 --- a/ggml-metal.m +++ b/ggml-metal.m @@ -1400,14 +1400,18 @@ void ggml_metal_graph_compute( const int nth = MIN(1024, ne00); - const int n_past = ((int32_t *) dst->op_params)[0]; - const int n_dims = ((int32_t *) dst->op_params)[1]; - const int mode = ((int32_t *) dst->op_params)[2]; - - float freq_base; - float freq_scale; - memcpy(&freq_base, (int32_t *) dst->op_params + 4, sizeof(float)); - memcpy(&freq_scale, (int32_t *) dst->op_params + 5, sizeof(float)); + const int n_past = ((int32_t *) dst->op_params)[0]; + const int n_dims = ((int32_t *) dst->op_params)[1]; + const int mode = ((int32_t *) dst->op_params)[2]; + const int n_orig_ctx = ((int32_t *) dst->op_params)[3]; + + float freq_base, freq_scale, ext_factor, attn_factor, beta_fast, beta_slow; + memcpy(&freq_base, (int32_t *) dst->op_params + 5, sizeof(float)); + memcpy(&freq_scale, (int32_t *) dst->op_params + 6, sizeof(float)); + memcpy(&ext_factor, (int32_t *) dst->op_params + 7, sizeof(float)); + memcpy(&attn_factor, (int32_t *) dst->op_params + 8, sizeof(float)); + memcpy(&beta_fast, (int32_t *) dst->op_params + 9, sizeof(float)); + memcpy(&beta_slow, (int32_t *) dst->op_params + 10, sizeof(float)); switch (src0->type) { case GGML_TYPE_F32: [encoder setComputePipelineState:ctx->pipeline_rope_f32]; break; @@ -1439,6 +1443,10 @@ void ggml_metal_graph_compute( [encoder setBytes:&mode length:sizeof( int) atIndex:21]; [encoder setBytes:&freq_base length:sizeof(float) atIndex:22]; [encoder setBytes:&freq_scale length:sizeof(float) atIndex:23]; + [encoder setBytes:&ext_factor length:sizeof(float) atIndex:24]; + [encoder setBytes:&attn_factor length:sizeof(float) atIndex:25]; + [encoder setBytes:&beta_fast length:sizeof(float) atIndex:26]; + [encoder setBytes:&beta_slow length:sizeof(float) atIndex:27]; [encoder dispatchThreadgroups:MTLSizeMake(ne01, ne02, ne03) threadsPerThreadgroup:MTLSizeMake(nth, 1, 1)]; } break; diff --git a/ggml-metal.metal b/ggml-metal.metal index f3152778ae48c3..471d7d390f8138 100644 --- a/ggml-metal.metal +++ b/ggml-metal.metal @@ -1061,6 +1061,45 @@ kernel void kernel_alibi_f32( } } +static float rope_yarn_ramp(const float low, const float high, const int i0) { + const float y = (i0 / 2 - low) / max(0.001f, high - low); + return 1.0f - min(1.0f, max(0.0f, y)); +} + +// YaRN algorithm based on LlamaYaRNScaledRotaryEmbedding.py from https://github.com/jquesnelle/yarn +// MIT licensed. Copyright (c) 2023 Jeffrey Quesnelle and Bowen Peng. +static void rope_yarn( + float theta_extrap, float freq_scale, float corr_dims[2], int64_t i0, float ext_factor, float mscale, + float * cos_theta, float * sin_theta +) { + // Get n-d rotational scaling corrected for extrapolation + float theta_interp = freq_scale * theta_extrap; + float theta = theta_interp; + if (ext_factor != 0.0f) { + ramp_mix = rope_yarn_ramp(corr_dims[0], corr_dims[1], i0) * ext_factor; + theta = theta_interp * (1 - ramp_mix) + theta_extrap * ramp_mix; + + // Get n-d magnitude scaling corrected for interpolation + mscale *= 1.0f + 0.1f * logf(1.0f / freq_scale); + } + *cos_theta = cosf(theta) * mscale; + *sin_theta = sinf(theta) * mscale; +} + +// Apparently solving `n_rot = 2pi * x * base^((2 * max_pos_emb) / n_dims)` for x, we get +// `corr_fac(n_rot) = n_dims * log(max_pos_emb / (n_rot * 2pi)) / (2 * log(base))` +static float rope_yarn_corr_factor(int n_dims, int n_orig_ctx, float n_rot, float base) { + return n_dims * log(n_orig_ctx / (n_rot * 2 * M_PI_F)) / (2 * log(base)); +} + +static void rope_yarn_corr_dims( + int n_dims, int n_orig_ctx, float freq_base, float beta_fast, float beta_slow, float dims[2] +) { + // start and end correction dims + dims[0] = max(0.0f, floor(rope_yarn_corr_factor(n_dims, n_orig_ctx, beta_fast, freq_base))); + dims[1] = min(n_dims - 1.0f, ceil(rope_yarn_corr_factor(n_dims, n_orig_ctx, beta_slow, freq_base))); +} + typedef void (rope_t)( device const void * src0, device const int32_t * src1, @@ -1116,6 +1155,10 @@ kernel void kernel_rope( constant int & mode, constant float & freq_base, constant float & freq_scale, + constant float & ext_factor, + constant float & attn_factor, + constant float & beta_fast, + constant float & beta_slow, uint tiitg[[thread_index_in_threadgroup]], uint3 tptg[[threads_per_threadgroup]], uint3 tgpig[[threadgroup_position_in_grid]]) { @@ -1125,19 +1168,22 @@ kernel void kernel_rope( const bool is_neox = mode & 2; + float corr_dims[2]; + rope_yarn_corr_dims(n_dims, n_orig_ctx, freq_base, beta_fast, beta_slow, corr_dims); + device const int32_t * pos = src1; const int64_t p = pos[i2]; - const float theta_0 = freq_scale * (float)p; + const float theta_0 = (float)p; const float inv_ndims = -1.f/n_dims; if (!is_neox) { for (int64_t i0 = 2*tiitg; i0 < ne0; i0 += 2*tptg.x) { const float theta = theta_0 * pow(freq_base, inv_ndims*i0); - const float cos_theta = cos(theta); - const float sin_theta = sin(theta); + float cos_theta, sin_theta; + rope_yarn(theta, freq_scale, corr_dims, i0, ext_factor, attn_factor, &cos_theta, &sin_theta); device const T * const src = (device T *)((device char *) src0 + i3*nb03 + i2*nb02 + i1*nb01 + i0*nb00); device T * dst_data = (device T *)((device char *) dst + i3*nb3 + i2*nb2 + i1*nb1 + i0*nb0); @@ -1152,9 +1198,12 @@ kernel void kernel_rope( for (int64_t ib = 0; ib < ne0/n_dims; ++ib) { for (int64_t ic = 2*tiitg; ic < n_dims; ic += 2*tptg.x) { - const float theta = theta_0 * pow(freq_base, inv_ndims*ic - ib); - const float cos_theta = cos(theta); - const float sin_theta = sin(theta); + // simplified from `(ib * n_dims + ic) * inv_ndims` + const float cur_rot = inv_ndims*ic - ib; + + const float theta = theta_0 * pow(freq_base, cur_rot); + float cos_theta, sin_theta; + rope_yarn(theta, freq_scale, corr_dims, cur_rot, ext_factor, attn_factor, &cos_theta, &sin_theta); const int64_t i0 = ib*n_dims + ic/2; diff --git a/ggml.c b/ggml.c index 80d682255328c3..2c7fe476b176d5 100644 --- a/ggml.c +++ b/ggml.c @@ -1,4 +1,5 @@ #define _CRT_SECURE_NO_DEPRECATE // Disables ridiculous "unsafe" warnigns on Windows +#define _USE_MATH_DEFINES // For M_PI on MSVC #include "ggml-impl.h" #include "ggml-quants.h" @@ -4845,8 +4846,13 @@ static struct ggml_tensor * ggml_rope_impl( int n_dims, int mode, int n_ctx, + int n_orig_ctx, float freq_base, float freq_scale, + float ext_factor, + float attn_factor, + float beta_fast, + float beta_slow, float xpos_base, bool xpos_down, bool inplace) { @@ -4862,11 +4868,15 @@ static struct ggml_tensor * ggml_rope_impl( struct ggml_tensor * result = inplace ? ggml_view_tensor(ctx, a) : ggml_dup_tensor(ctx, a); - int32_t params[8] = { /*n_past*/ 0, n_dims, mode, n_ctx }; - memcpy(params + 4, &freq_base, sizeof(float)); - memcpy(params + 5, &freq_scale, sizeof(float)); - memcpy(params + 6, &xpos_base, sizeof(float)); - memcpy(params + 7, &xpos_down, sizeof(bool)); + int32_t params[13] = { /*n_past*/ 0, n_dims, mode, n_ctx, n_orig_ctx }; + memcpy(params + 5, &freq_base, sizeof(float)); + memcpy(params + 6, &freq_scale, sizeof(float)); + memcpy(params + 7, &ext_factor, sizeof(float)); + memcpy(params + 8, &attn_factor, sizeof(float)); + memcpy(params + 9, &beta_fast, sizeof(float)); + memcpy(params + 10, &beta_slow, sizeof(float)); + memcpy(params + 11, &xpos_base, sizeof(float)); + memcpy(params + 12, &xpos_down, sizeof(bool)); ggml_set_op_params(result, params, sizeof(params)); result->op = GGML_OP_ROPE; @@ -4884,7 +4894,9 @@ struct ggml_tensor * ggml_rope( int n_dims, int mode, int n_ctx) { - return ggml_rope_impl(ctx, a, b, n_dims, mode, n_ctx, 10000.0f, 1.0f, 0.0f, false, false); + return ggml_rope_impl( + ctx, a, b, n_dims, mode, n_ctx, 0, 10000.0f, 1.0f, 0.0f, 1.0f, 0.0f, 0.0f, 0.0f, false, false + ); } struct ggml_tensor * ggml_rope_inplace( @@ -4894,7 +4906,9 @@ struct ggml_tensor * ggml_rope_inplace( int n_dims, int mode, int n_ctx) { - return ggml_rope_impl(ctx, a, b, n_dims, mode, n_ctx, 10000.0f, 1.0f, 0.0f, false, true); + return ggml_rope_impl( + ctx, a, b, n_dims, mode, n_ctx, 0, 10000.0f, 1.0f, 0.0f, 1.0f, 0.0f, 0.0f, 0.0f, false, true + ); } struct ggml_tensor * ggml_rope_custom( @@ -4904,9 +4918,17 @@ struct ggml_tensor * ggml_rope_custom( int n_dims, int mode, int n_ctx, + int n_orig_ctx, float freq_base, - float freq_scale) { - return ggml_rope_impl(ctx, a, b, n_dims, mode, n_ctx, freq_base, freq_scale, 0.0f, false, false); + float freq_scale, + float ext_factor, + float attn_factor, + float beta_fast, + float beta_slow) { + return ggml_rope_impl( + ctx, a, b, n_dims, mode, n_ctx, n_orig_ctx, freq_base, freq_scale, + ext_factor, attn_factor, beta_fast, beta_slow, 0.0f, false, false + ); } struct ggml_tensor * ggml_rope_custom_inplace( @@ -4916,9 +4938,17 @@ struct ggml_tensor * ggml_rope_custom_inplace( int n_dims, int mode, int n_ctx, + int n_orig_ctx, float freq_base, - float freq_scale) { - return ggml_rope_impl(ctx, a, b, n_dims, mode, n_ctx, freq_base, freq_scale, 0.0f, false, true); + float freq_scale, + float ext_factor, + float attn_factor, + float beta_fast, + float beta_slow) { + return ggml_rope_impl( + ctx, a, b, n_dims, mode, n_ctx, n_orig_ctx, freq_base, freq_scale, + ext_factor, attn_factor, beta_fast, beta_slow, 0.0f, false, true + ); } struct ggml_tensor * ggml_rope_xpos_inplace( @@ -4928,7 +4958,7 @@ struct ggml_tensor * ggml_rope_xpos_inplace( int n_dims, float base, bool down) { - return ggml_rope_impl(ctx, a, b, n_dims, 0, 0, 10000.0f, 1.0f, base, down, true); + return ggml_rope_impl(ctx, a, b, n_dims, 0, 0, 0, 10000.0f, 1.0f, 0.0f, 1.0f, 0.0f, 0.0f, base, down, true); } // ggml_rope_back @@ -10901,6 +10931,45 @@ static void ggml_compute_forward_clamp( // ggml_compute_forward_rope +static float rope_yarn_ramp(const float low, const float high, const int i0) { + const float y = (i0 / 2 - low) / MAX(0.001f, high - low); + return 1 - MIN(1, MAX(0, y)); +} + +// YaRN algorithm based on LlamaYaRNScaledRotaryEmbedding.py from https://github.com/jquesnelle/yarn +// MIT licensed. Copyright (c) 2023 Jeffrey Quesnelle and Bowen Peng. +static void rope_yarn( + float theta_extrap, float freq_scale, float corr_dims[2], int64_t i0, float ext_factor, float mscale, + float * cos_theta, float * sin_theta +) { + // Get n-d rotational scaling corrected for extrapolation + float theta_interp = freq_scale * theta_extrap; + float theta = theta_interp; + if (ext_factor != 0.0f) { + float ramp_mix = rope_yarn_ramp(corr_dims[0], corr_dims[1], i0) * ext_factor; + theta = theta_interp * (1 - ramp_mix) + theta_extrap * ramp_mix; + + // Get n-d magnitude scaling corrected for interpolation + mscale *= 1.0f + 0.1f * logf(1.0f / freq_scale); + } + *cos_theta = cosf(theta) * mscale; + *sin_theta = sinf(theta) * mscale; +} + +// Apparently solving `n_rot = 2pi * x * base^((2 * max_pos_emb) / n_dims)` for x, we get +// `corr_dim(n_rot) = n_dims * log(max_pos_emb / (n_rot * 2pi)) / (2 * log(base))` +static float ggml_rope_yarn_corr_dim(int n_dims, int n_orig_ctx, float n_rot, float base) { + return n_dims * logf(n_orig_ctx / (n_rot * 2 * (float)M_PI)) / (2 * logf(base)); +} + +void ggml_rope_yarn_corr_dims( + int n_dims, int n_orig_ctx, float freq_base, float beta_fast, float beta_slow, float dims[2] +) { + // start and end correction dims + dims[0] = MAX(0, floorf(ggml_rope_yarn_corr_dim(n_dims, n_orig_ctx, beta_fast, freq_base))); + dims[1] = MIN(n_dims - 1, ceilf(ggml_rope_yarn_corr_dim(n_dims, n_orig_ctx, beta_slow, freq_base))); +} + static void ggml_compute_forward_rope_f32( const struct ggml_compute_params * params, const struct ggml_tensor * src0, @@ -10910,21 +10979,26 @@ static void ggml_compute_forward_rope_f32( return; } - float freq_base; - float freq_scale; + float freq_base, freq_scale, ext_factor, attn_factor, beta_fast, beta_slow; // these two only relevant for xPos RoPE: float xpos_base; bool xpos_down; - //const int n_past = ((int32_t *) dst->op_params)[0]; - const int n_dims = ((int32_t *) dst->op_params)[1]; - const int mode = ((int32_t *) dst->op_params)[2]; - const int n_ctx = ((int32_t *) dst->op_params)[3]; - memcpy(&freq_base, (int32_t *) dst->op_params + 4, sizeof(float)); - memcpy(&freq_scale, (int32_t *) dst->op_params + 5, sizeof(float)); - memcpy(&xpos_base, (int32_t *) dst->op_params + 6, sizeof(float)); - memcpy(&xpos_down, (int32_t *) dst->op_params + 7, sizeof(bool)); + //const int n_past = ((int32_t *) dst->op_params)[0]; + const int n_dims = ((int32_t *) dst->op_params)[1]; + const int mode = ((int32_t *) dst->op_params)[2]; + const int n_ctx = ((int32_t *) dst->op_params)[3]; + const int n_orig_ctx = ((int32_t *) dst->op_params)[4]; + + memcpy(&freq_base, (int32_t *) dst->op_params + 5, sizeof(float)); + memcpy(&freq_scale, (int32_t *) dst->op_params + 6, sizeof(float)); + memcpy(&ext_factor, (int32_t *) dst->op_params + 7, sizeof(float)); + memcpy(&attn_factor, (int32_t *) dst->op_params + 8, sizeof(float)); + memcpy(&beta_fast, (int32_t *) dst->op_params + 9, sizeof(float)); + memcpy(&beta_slow, (int32_t *) dst->op_params + 10, sizeof(float)); + memcpy(&xpos_base, (int32_t *) dst->op_params + 11, sizeof(float)); + memcpy(&xpos_down, (int32_t *) dst->op_params + 12, sizeof(bool)); GGML_TENSOR_UNARY_OP_LOCALS @@ -10952,6 +11026,9 @@ static void ggml_compute_forward_rope_f32( int ir = 0; const float theta_scale = powf(freq_base, -2.0f/n_dims); + const float inv_ndims = -1.f/n_dims; + float corr_dims[2]; + ggml_rope_yarn_corr_dims(n_dims, n_orig_ctx, freq_base, beta_fast, beta_slow, corr_dims); const bool is_neox = mode & 2; const bool is_glm = mode & 4; @@ -10965,18 +11042,18 @@ static void ggml_compute_forward_rope_f32( if (ir++ < ir0) continue; if (ir > ir1) break; - float theta = freq_scale * (float)p; + float theta_base = (float)p; if (is_glm) { - theta = MIN(p, n_ctx - 2); + theta_base = MIN(p, n_ctx - 2); float block_theta = MAX(p - (n_ctx - 2), 0); for (int64_t i0 = 0; i0 < ne0 / 4; i0++) { - const float cos_theta = cosf(theta); - const float sin_theta = sinf(theta); + const float cos_theta = cosf(theta_base); + const float sin_theta = sinf(theta_base); const float cos_block_theta = cosf(block_theta); const float sin_block_theta = sinf(block_theta); - theta *= theta_scale; + theta_base *= theta_scale; block_theta *= theta_scale; const float * const src = (float *)((char *) src0->data + i3*nb03 + i2*nb02 + i1*nb01 + i0*nb00); @@ -10994,13 +11071,16 @@ static void ggml_compute_forward_rope_f32( } } else if (!is_neox) { for (int64_t i0 = 0; i0 < ne0; i0 += 2) { - const float cos_theta = cosf(theta); - const float sin_theta = sinf(theta); + float cos_theta, sin_theta; + rope_yarn( + theta_base, freq_scale, corr_dims, i0, ext_factor, attn_factor, &cos_theta, &sin_theta + ); + // zeta scaling for xPos only: float zeta = xpos_base != 0.0f ? powf((i0 + 0.4f * ne0) / (1.4f * ne0), p / xpos_base) : 1.0f; if (xpos_down) zeta = 1.0f / zeta; - theta *= theta_scale; + theta_base *= theta_scale; const float * const src = (float *)((char *) src0->data + i3*nb03 + i2*nb02 + i1*nb01 + i0*nb00); float * dst_data = (float *)((char *) dst->data + i3*nb3 + i2*nb2 + i1*nb1 + i0*nb0); @@ -11014,12 +11094,19 @@ static void ggml_compute_forward_rope_f32( } else { // TODO: this might be wrong for ne0 != n_dims - need double check // ref: https://github.com/huggingface/transformers/blob/main/src/transformers/models/gpt_neox/modeling_gpt_neox.py#LL251C1-L294C28 + theta_base *= freq_scale; for (int64_t ib = 0; ib < ne0/n_dims; ++ib) { for (int64_t ic = 0; ic < n_dims; ic += 2) { - const float cos_theta = cosf(theta); - const float sin_theta = sinf(theta); + // simplified from `(ib * n_dims + ic) * inv_ndims` + float cur_rot = inv_ndims * ic - ib; + + float cos_theta, sin_theta; + rope_yarn( + theta_base, freq_scale, corr_dims, cur_rot, ext_factor, attn_factor, + &cos_theta, &sin_theta + ); - theta *= theta_scale; + theta_base *= theta_scale; const int64_t i0 = ib*n_dims + ic/2; @@ -11048,15 +11135,19 @@ static void ggml_compute_forward_rope_f16( return; } - float freq_base; - float freq_scale; + float freq_base, freq_scale, ext_factor, attn_factor, beta_fast, beta_slow; - //const int n_past = ((int32_t *) dst->op_params)[0]; - const int n_dims = ((int32_t *) dst->op_params)[1]; - const int mode = ((int32_t *) dst->op_params)[2]; - const int n_ctx = ((int32_t *) dst->op_params)[3]; - memcpy(&freq_base, (int32_t *) dst->op_params + 4, sizeof(float)); - memcpy(&freq_scale, (int32_t *) dst->op_params + 5, sizeof(float)); + //const int n_past = ((int32_t *) dst->op_params)[0]; + const int n_dims = ((int32_t *) dst->op_params)[1]; + const int mode = ((int32_t *) dst->op_params)[2]; + const int n_ctx = ((int32_t *) dst->op_params)[3]; + const int n_orig_ctx = ((int32_t *) dst->op_params)[4]; + memcpy(&freq_base, (int32_t *) dst->op_params + 5, sizeof(float)); + memcpy(&freq_scale, (int32_t *) dst->op_params + 6, sizeof(float)); + memcpy(&ext_factor, (int32_t *) dst->op_params + 7, sizeof(float)); + memcpy(&attn_factor, (int32_t *) dst->op_params + 8, sizeof(float)); + memcpy(&beta_fast, (int32_t *) dst->op_params + 9, sizeof(float)); + memcpy(&beta_slow, (int32_t *) dst->op_params + 10, sizeof(float)); GGML_TENSOR_UNARY_OP_LOCALS @@ -11084,6 +11175,9 @@ static void ggml_compute_forward_rope_f16( int ir = 0; const float theta_scale = powf(freq_base, -2.0f/n_dims); + const float inv_ndims = -1.f/n_dims; + float corr_dims[2]; + ggml_rope_yarn_corr_dims(n_dims, n_orig_ctx, freq_base, beta_fast, beta_slow, corr_dims); const bool is_neox = mode & 2; const bool is_glm = mode & 4; @@ -11097,18 +11191,18 @@ static void ggml_compute_forward_rope_f16( if (ir++ < ir0) continue; if (ir > ir1) break; - float theta = freq_scale * (float)p; + float theta_base = (float)p; if (is_glm) { - theta = MIN(p, n_ctx - 2); + theta_base = MIN(p, n_ctx - 2); float block_theta = MAX(p - (n_ctx - 2), 0); for (int64_t i0 = 0; i0 < ne0 / 4; i0++) { - const float cos_theta = cosf(theta); - const float sin_theta = sinf(theta); + const float cos_theta = cosf(theta_base); + const float sin_theta = sinf(theta_base); const float cos_block_theta = cosf(block_theta); const float sin_block_theta = sinf(block_theta); - theta *= theta_scale; + theta_base *= theta_scale; block_theta *= theta_scale; const ggml_fp16_t * const src = (ggml_fp16_t *)((char *) src0->data + i3*nb03 + i2*nb02 + i1*nb01 + i0*nb00); @@ -11126,10 +11220,12 @@ static void ggml_compute_forward_rope_f16( } } else if (!is_neox) { for (int64_t i0 = 0; i0 < ne0; i0 += 2) { - const float cos_theta = cosf(theta); - const float sin_theta = sinf(theta); + float cos_theta, sin_theta; + rope_yarn( + theta_base, freq_scale, corr_dims, i0, ext_factor, attn_factor, &cos_theta, &sin_theta + ); - theta *= theta_scale; + theta_base *= theta_scale; const ggml_fp16_t * const src = (ggml_fp16_t *)((char *) src0->data + i3*nb03 + i2*nb02 + i1*nb01 + i0*nb00); ggml_fp16_t * dst_data = (ggml_fp16_t *)((char *) dst->data + i3*nb3 + i2*nb2 + i1*nb1 + i0*nb0); @@ -11143,12 +11239,19 @@ static void ggml_compute_forward_rope_f16( } else { // TODO: this might be wrong for ne0 != n_dims - need double check // ref: https://github.com/huggingface/transformers/blob/main/src/transformers/models/gpt_neox/modeling_gpt_neox.py#LL251C1-L294C28 + theta_base *= freq_scale; for (int64_t ib = 0; ib < ne0/n_dims; ++ib) { for (int64_t ic = 0; ic < n_dims; ic += 2) { - const float cos_theta = cosf(theta); - const float sin_theta = sinf(theta); + // simplified from `(ib * n_dims + ic) * inv_ndims` + float cur_rot = inv_ndims * ic - ib; - theta *= theta_scale; + float cos_theta, sin_theta; + rope_yarn( + theta_base, freq_scale, corr_dims, cur_rot, ext_factor, attn_factor, + &cos_theta, &sin_theta + ); + + theta_base *= theta_scale; const int64_t i0 = ib*n_dims + ic/2; @@ -11256,17 +11359,18 @@ static void ggml_compute_forward_rope_back_f32( if (ir++ < ir0) continue; if (ir > ir1) break; - float theta = freq_scale * (float)p; + float theta_base = freq_scale * (float)p; if (!is_neox) { for (int64_t i0 = 0; i0 < ne0; i0 += 2) { - const float cos_theta = cosf(theta); - const float sin_theta = sinf(theta); + const float cos_theta = cosf(theta_base); + const float sin_theta = sinf(theta_base); + // zeta scaling for xPos only: float zeta = xpos_base != 0.0f ? powf((i0 + 0.4f * ne0) / (1.4f * ne0), p / xpos_base) : 1.0f; if (xpos_down) zeta = 1.0f / zeta; - theta *= theta_scale; + theta_base *= theta_scale; const float * const dy = (float *)((char *) src0->data + i3*nb03 + i2*nb02 + i1*nb01 + i0*nb00); float * dx = (float *)((char *) dst->data + i3*nb3 + i2*nb2 + i1*nb1 + i0*nb0); @@ -11280,10 +11384,10 @@ static void ggml_compute_forward_rope_back_f32( } else { for (int64_t ib = 0; ib < ne0/n_dims; ++ib) { for (int64_t ic = 0; ic < n_dims; ic += 2) { - const float cos_theta = cosf(theta); - const float sin_theta = sinf(theta); + const float cos_theta = cosf(theta_base); + const float sin_theta = sinf(theta_base); - theta *= theta_scale; + theta_base *= theta_scale; const int64_t i0 = ib*n_dims + ic/2; @@ -11356,14 +11460,14 @@ static void ggml_compute_forward_rope_back_f16( if (ir++ < ir0) continue; if (ir > ir1) break; - float theta = (float)p; + float theta_base = (float)p; if (!is_neox) { for (int64_t i0 = 0; i0 < ne0; i0 += 2) { - const float cos_theta = cosf(theta); - const float sin_theta = sinf(theta); + const float cos_theta = cosf(theta_base); + const float sin_theta = sinf(theta_base); - theta *= theta_scale; + theta_base *= theta_scale; const ggml_fp16_t * const dy = (ggml_fp16_t *)((char *) src0->data + i3*nb03 + i2*nb02 + i1*nb01 + i0*nb00); ggml_fp16_t * dx = (ggml_fp16_t *)((char *) dst->data + i3*nb3 + i2*nb2 + i1*nb1 + i0*nb0); @@ -11377,10 +11481,10 @@ static void ggml_compute_forward_rope_back_f16( } else { for (int64_t ib = 0; ib < ne0/n_dims; ++ib) { for (int64_t ic = 0; ic < n_dims; ic += 2) { - const float cos_theta = cosf(theta); - const float sin_theta = sinf(theta); + const float cos_theta = cosf(theta_base); + const float sin_theta = sinf(theta_base); - theta *= theta_scale; + theta_base *= theta_scale; const int64_t i0 = ib*n_dims + ic/2; @@ -15505,9 +15609,14 @@ static void ggml_compute_backward(struct ggml_context * ctx, struct ggml_tensor src1, n_dims, mode, + 0, n_ctx, freq_base, freq_scale, + 0.0f, + 1.0f, + 0.0f, + 0.0f, xpos_base, xpos_down, false), diff --git a/ggml.h b/ggml.h index 9d16c5a72fda0e..70eb25a6bf3afc 100644 --- a/ggml.h +++ b/ggml.h @@ -219,7 +219,7 @@ #define GGML_MAX_CONTEXTS 64 #define GGML_MAX_SRC 6 #define GGML_MAX_NAME 64 -#define GGML_MAX_OP_PARAMS 32 +#define GGML_MAX_OP_PARAMS 64 #define GGML_DEFAULT_N_THREADS 4 #if UINTPTR_MAX == 0xFFFFFFFF @@ -1326,8 +1326,13 @@ extern "C" { int n_dims, int mode, int n_ctx, + int n_orig_ctx, float freq_base, - float freq_scale); + float freq_scale, + float ext_factor, + float attn_factor, + float beta_fast, + float beta_slow); // in-place, returns view(a) GGML_API struct ggml_tensor * ggml_rope_custom_inplace( @@ -1337,8 +1342,17 @@ extern "C" { int n_dims, int mode, int n_ctx, + int n_orig_ctx, float freq_base, - float freq_scale); + float freq_scale, + float ext_factor, + float attn_factor, + float beta_fast, + float beta_slow); + + // compute correction dims for YaRN RoPE scaling + void ggml_rope_yarn_corr_dims( + int n_dims, int n_orig_ctx, float freq_base, float beta_fast, float beta_slow, float dims[2]); // xPos RoPE, in-place, returns view(a) GGML_API struct ggml_tensor * ggml_rope_xpos_inplace( diff --git a/gguf-py/gguf/gguf.py b/gguf-py/gguf/gguf.py index 6b7d654294a3e0..727b4e55495a76 100644 --- a/gguf-py/gguf/gguf.py +++ b/gguf-py/gguf/gguf.py @@ -7,7 +7,7 @@ import struct import sys import tempfile -from enum import IntEnum, auto +from enum import Enum, IntEnum, auto from io import BufferedWriter from pathlib import Path from typing import IO, Any, BinaryIO, Callable, Sequence @@ -53,9 +53,12 @@ KEY_ATTENTION_LAYERNORM_RMS_EPS = "{arch}.attention.layer_norm_rms_epsilon" # RoPE -KEY_ROPE_DIMENSION_COUNT = "{arch}.rope.dimension_count" -KEY_ROPE_FREQ_BASE = "{arch}.rope.freq_base" -KEY_ROPE_SCALE_LINEAR = "{arch}.rope.scale_linear" +KEY_ROPE_DIMENSION_COUNT = "{arch}.rope.dimension_count" +KEY_ROPE_FREQ_BASE = "{arch}.rope.freq_base" +KEY_ROPE_SCALING_TYPE = "{arch}.rope.scaling.type" +KEY_ROPE_SCALING_FACTOR = "{arch}.rope.scaling.factor" +KEY_ROPE_SCALING_ORIG_CTX_LEN = "{arch}.rope.scaling.original_context_length" +KEY_ROPE_SCALING_FINETUNED = "{arch}.rope.scaling.finetuned" # tokenization KEY_TOKENIZER_MODEL = "tokenizer.ggml.model" @@ -577,6 +580,11 @@ class TokenType(IntEnum): UNUSED = 5 BYTE = 6 +class RopeScalingType(Enum): + NONE = 'none' + LINEAR = 'linear' + YARN = 'yarn' + # # implementation # @@ -948,8 +956,17 @@ def add_rope_dimension_count(self, count: int): def add_rope_freq_base(self, value: float): self.add_float32(KEY_ROPE_FREQ_BASE.format(arch=self.arch), value) - def add_rope_scale_linear(self, value: float): - self.add_float32(KEY_ROPE_SCALE_LINEAR.format(arch=self.arch), value) + def add_rope_scaling_type(self, value: RopeScalingType): + self.add_string(KEY_ROPE_SCALING_TYPE.format(arch=self.arch), value.value) + + def add_rope_scaling_factor(self, value: float): + self.add_float32(KEY_ROPE_SCALING_FACTOR.format(arch=self.arch), value) + + def add_rope_scaling_orig_ctx_len(self, value: int): + self.add_uint32(KEY_ROPE_SCALING_ORIG_CTX_LEN.format(arch=self.arch), value) + + def add_rope_scaling_finetuned(self, value: bool): + self.add_bool(KEY_ROPE_SCALING_FINETUNED.format(arch=self.arch), value) def add_tokenizer_model(self, model: str): self.add_string(KEY_TOKENIZER_MODEL, model) diff --git a/llama.cpp b/llama.cpp index 1c6d482f8fe1bc..685882c201921d 100644 --- a/llama.cpp +++ b/llama.cpp @@ -54,6 +54,7 @@ #include #include #include +#include #include #include #include @@ -235,6 +236,10 @@ enum llm_kv { LLM_KV_ROPE_DIMENSION_COUNT, LLM_KV_ROPE_FREQ_BASE, LLM_KV_ROPE_SCALE_LINEAR, + LLM_KV_ROPE_SCALING_TYPE, + LLM_KV_ROPE_SCALING_FACTOR, + LLM_KV_ROPE_SCALING_ORIG_CTX_LEN, + LLM_KV_ROPE_SCALING_FINETUNED, LLM_KV_TOKENIZER_MODEL, LLM_KV_TOKENIZER_LIST, @@ -276,9 +281,13 @@ static std::map LLM_KV_NAMES = { { LLM_KV_ATTENTION_LAYERNORM_EPS, "%s.attention.layer_norm_epsilon" }, { LLM_KV_ATTENTION_LAYERNORM_RMS_EPS, "%s.attention.layer_norm_rms_epsilon" }, - { LLM_KV_ROPE_DIMENSION_COUNT, "%s.rope.dimension_count" }, - { LLM_KV_ROPE_FREQ_BASE, "%s.rope.freq_base" }, - { LLM_KV_ROPE_SCALE_LINEAR, "%s.rope.scale_linear" }, + { LLM_KV_ROPE_DIMENSION_COUNT, "%s.rope.dimension_count" }, + { LLM_KV_ROPE_FREQ_BASE, "%s.rope.freq_base" }, + { LLM_KV_ROPE_SCALE_LINEAR, "%s.rope.scale_linear" }, + { LLM_KV_ROPE_SCALING_TYPE, "%s.rope.scaling.type" }, + { LLM_KV_ROPE_SCALING_FACTOR, "%s.rope.scaling.factor" }, + { LLM_KV_ROPE_SCALING_ORIG_CTX_LEN, "%s.rope.scaling.original_context_length" }, + { LLM_KV_ROPE_SCALING_FINETUNED, "%s.rope.scaling.finetuned" }, { LLM_KV_TOKENIZER_MODEL, "tokenizer.ggml.model" }, { LLM_KV_TOKENIZER_LIST, "tokenizer.ggml.tokens" }, @@ -552,6 +561,22 @@ do { \ } \ } while (0) +static std::map LLAMA_ROPE_SCALING_TYPES = { + { LLAMA_ROPE_SCALING_NONE, "none" }, + { LLAMA_ROPE_SCALING_LINEAR, "linear" }, + { LLAMA_ROPE_SCALING_YARN, "yarn" }, +}; + +static int8_t llama_rope_scaling_type_from_string(const std::string & name) { + for (const auto & kv : LLAMA_ROPE_SCALING_TYPES) { + if (kv.second == name) { + return kv.first; + } + } + + return LLAMA_ROPE_SCALING_UNSPECIFIED; +} + // // ggml helpers // @@ -1035,8 +1060,11 @@ struct llama_hparams { float f_norm_eps; float f_norm_rms_eps; - float rope_freq_base_train; - float rope_freq_scale_train; + float rope_freq_base_train; + float rope_freq_scale_train; + uint32_t n_yarn_orig_ctx; + int8_t rope_scaling_type_train : 3; + bool rope_finetuned : 1; float f_clamp_kqv; float f_max_alibi_bias; @@ -1051,6 +1079,8 @@ struct llama_hparams { if (this->n_layer != other.n_layer) return true; if (this->n_rot != other.n_rot) return true; if (this->n_ff != other.n_ff) return true; + if (this->rope_finetuned != other.rope_finetuned) return true; + if (this->n_yarn_orig_ctx != other.n_yarn_orig_ctx) return true; const float EPSILON = 1e-9; @@ -1081,8 +1111,16 @@ struct llama_cparams { uint32_t n_threads; // number of threads to use for generation uint32_t n_threads_batch; // number of threads to use for batch processing - float rope_freq_base; - float rope_freq_scale; + float rope_freq_base; + float rope_freq_scale; + + uint32_t n_yarn_orig_ctx; + // These hyperparameters are not exposed in GGUF, because all + // existing YaRN models use the same values for them. + float yarn_ext_factor; + float yarn_attn_factor; + float yarn_beta_fast; + float yarn_beta_slow; bool mul_mat_q; }; @@ -2014,14 +2052,30 @@ static void llm_load_hparams( hparams.n_head_kv = hparams.n_head; GGUF_GET_KEY(ctx, hparams.n_head_kv, gguf_get_val_u32, GGUF_TYPE_UINT32, false, kv(LLM_KV_ATTENTION_HEAD_COUNT_KV)); + hparams.rope_finetuned = false; + GGUF_GET_KEY(ctx, hparams.rope_finetuned, gguf_get_val_bool, GGUF_TYPE_BOOL, false, + kv(LLM_KV_ROPE_SCALING_FINETUNED)); + + hparams.n_yarn_orig_ctx = hparams.n_ctx_train; + GGUF_GET_KEY(ctx, hparams.n_yarn_orig_ctx, gguf_get_val_u32, GGUF_TYPE_UINT32, false, + kv(LLM_KV_ROPE_SCALING_ORIG_CTX_LEN)); + // rope_freq_base (optional) hparams.rope_freq_base_train = 10000.0f; GGUF_GET_KEY(ctx, hparams.rope_freq_base_train, gguf_get_val_f32, GGUF_TYPE_FLOAT32, false, kv(LLM_KV_ROPE_FREQ_BASE)); + std::string rope_scaling("linear"); + GGUF_GET_KEY(ctx, rope_scaling, gguf_get_val_str, GGUF_TYPE_STRING, false, kv(LLM_KV_ROPE_SCALING_TYPE)); + hparams.rope_scaling_type_train = llama_rope_scaling_type_from_string(rope_scaling); + GGML_ASSERT(hparams.rope_scaling_type_train != LLAMA_ROPE_SCALING_UNSPECIFIED); + // rope_freq_scale (inverse of the kv) is optional - float ropescale = 1.0f; - GGUF_GET_KEY(ctx, ropescale, gguf_get_val_f32, GGUF_TYPE_FLOAT32, false, kv(LLM_KV_ROPE_SCALE_LINEAR)); - hparams.rope_freq_scale_train = 1.0f/ropescale; + float ropescale = 0.0f; + GGUF_GET_KEY(ctx, ropescale, gguf_get_val_f32, GGUF_TYPE_FLOAT32, false, kv(LLM_KV_ROPE_SCALING_FACTOR)); + if (ropescale == 0.0f) { // try the old key name + GGUF_GET_KEY(ctx, ropescale, gguf_get_val_f32, GGUF_TYPE_FLOAT32, false, kv(LLM_KV_ROPE_SCALE_LINEAR)); + } + hparams.rope_freq_scale_train = ropescale == 0.0f ? 1.0f : 1.0f/ropescale; // sanity check for n_rot (optional) { @@ -2371,6 +2425,8 @@ static void llm_load_print_meta(llama_model_loader & ml, llama_model & model) { const auto & hparams = model.hparams; const auto & vocab = model.vocab; + const auto rope_scaling_type = LLAMA_ROPE_SCALING_TYPES.at(hparams.rope_scaling_type_train); + // hparams LLAMA_LOG_INFO("%s: format = %s\n", __func__, llama_file_version_name(ml.fver)); LLAMA_LOG_INFO("%s: arch = %s\n", __func__, LLM_ARCH_NAMES.at(model.arch).c_str()); @@ -2389,8 +2445,11 @@ static void llm_load_print_meta(llama_model_loader & ml, llama_model & model) { LLAMA_LOG_INFO("%s: f_clamp_kqv = %.1e\n", __func__, hparams.f_clamp_kqv); LLAMA_LOG_INFO("%s: f_max_alibi_bias = %.1e\n", __func__, hparams.f_max_alibi_bias); LLAMA_LOG_INFO("%s: n_ff = %u\n", __func__, hparams.n_ff); + LLAMA_LOG_INFO("%s: rope scaling = %s\n", __func__, rope_scaling_type.c_str()); LLAMA_LOG_INFO("%s: freq_base_train = %.1f\n", __func__, hparams.rope_freq_base_train); LLAMA_LOG_INFO("%s: freq_scale_train = %g\n", __func__, hparams.rope_freq_scale_train); + LLAMA_LOG_INFO("%s: n_yarn_orig_ctx = %u\n", __func__, hparams.n_yarn_orig_ctx); + LLAMA_LOG_INFO("%s: rope_finetuned = %s\n", __func__, hparams.rope_finetuned ? "yes" : "unknown"); LLAMA_LOG_INFO("%s: model type = %s\n", __func__, llama_model_type_name(model.type)); LLAMA_LOG_INFO("%s: model ftype = %s\n", __func__, llama_model_ftype_name(model.ftype).c_str()); LLAMA_LOG_INFO("%s: model params = %.2f B\n", __func__, ml.n_elements*1e-9); @@ -3047,21 +3106,11 @@ static void llm_load_tensors( model.t_load_us = ggml_time_us() - model.t_start_us; } -static bool llama_model_load( - const std::string & fname, - llama_model & model, - int n_gpu_layers, - int main_gpu, - const float * tensor_split, - bool use_mmap, - bool use_mlock, - bool vocab_only, - llama_progress_callback progress_callback, - void *progress_callback_user_data) { +static bool llama_model_load(const std::string & fname, llama_model & model, const llama_model_params & params) { try { - llama_model_loader ml(fname, use_mmap); + llama_model_loader ml(fname, params.use_mmap); - model.hparams.vocab_only = vocab_only; + model.hparams.vocab_only = params.vocab_only; llm_load_arch (ml, model); llm_load_hparams(ml, model); @@ -3073,15 +3122,15 @@ static bool llama_model_load( throw std::runtime_error("vocab size mismatch"); } - if (vocab_only) { + if (params.vocab_only) { LLAMA_LOG_INFO("%s: vocab only - skipping tensors\n", __func__); return true; } llm_load_tensors( - ml, model, n_gpu_layers, - main_gpu, tensor_split, - use_mlock, progress_callback, progress_callback_user_data); + ml, model, params.n_gpu_layers, params.main_gpu, params.tensor_split, params.use_mlock, + params.progress_callback, params.progress_callback_user_data + ); } catch (const std::exception & err) { LLAMA_LOG_ERROR("error loading model: %s\n", err.what()); return false; @@ -3150,6 +3199,7 @@ static struct ggml_tensor * llm_build_inp_embd( static void llm_build_k_shift( struct ggml_context * ctx, const llama_hparams & hparams, + const llama_cparams & cparams, const llama_kv_cache & kv, struct ggml_cgraph * graph, llm_rope_type type, @@ -3162,6 +3212,11 @@ static void llm_build_k_shift( const int64_t n_head_kv = hparams.n_head_kv; const int64_t n_embd_gqa = hparams.n_embd_gqa(); const int64_t n_embd_head = hparams.n_embd_head(); + const int32_t n_orig_ctx = cparams.n_yarn_orig_ctx; + const float ext_factor = cparams.yarn_ext_factor; + const float attn_factor = cparams.yarn_attn_factor; + const float beta_fast = cparams.yarn_beta_fast; + const float beta_slow = cparams.yarn_beta_slow; GGML_ASSERT(n_embd_head % n_rot == 0); @@ -3185,7 +3240,8 @@ static void llm_build_k_shift( ggml_element_size(kv.k)*n_embd_head, ggml_element_size(kv.k)*n_embd_gqa, ggml_element_size(kv.k)*n_embd_gqa*n_ctx*il), - K_shift, n_rot, rope_type, 0, freq_base, freq_scale); + K_shift, n_rot, rope_type, 0, n_orig_ctx, freq_base, freq_scale, + ext_factor, attn_factor, beta_fast, beta_slow); cb(tmp, "K_shifted", il); ggml_build_forward_expand(graph, tmp); } @@ -3442,12 +3498,17 @@ struct llm_build_context { const float freq_base; const float freq_scale; + const float ext_factor; + const float attn_factor; + const float beta_fast; + const float beta_slow; const float norm_eps; const float norm_rms_eps; const int32_t n_tokens; const int32_t n_kv; // size of KV cache to consider (n_kv <= n_ctx) const int32_t kv_head; // index of where we store new KV data in the cache + const int32_t n_orig_ctx; const bool do_rope_shift; @@ -3477,11 +3538,16 @@ struct llm_build_context { n_embd_gqa (hparams.n_embd_gqa()), freq_base (cparams.rope_freq_base), freq_scale (cparams.rope_freq_scale), + ext_factor (cparams.yarn_ext_factor), + attn_factor (cparams.yarn_attn_factor), + beta_fast (cparams.yarn_beta_fast), + beta_slow (cparams.yarn_beta_slow), norm_eps (hparams.f_norm_eps), norm_rms_eps (hparams.f_norm_rms_eps), n_tokens (batch.n_tokens), n_kv (worst_case ? n_ctx : kv_self.n), kv_head (worst_case ? n_ctx - n_tokens : kv_self.head), + n_orig_ctx (cparams.n_yarn_orig_ctx), do_rope_shift (worst_case || kv_self.has_shift), cb (cb), buf_compute (lctx.buf_compute) { @@ -3532,7 +3598,7 @@ struct llm_build_context { // shift the entire K-cache if needed if (do_rope_shift) { - llm_build_k_shift(ctx0, hparams, kv_self, gf, LLM_ROPE, n_ctx, n_embd_head, freq_base, freq_scale, cb); + llm_build_k_shift(ctx0, hparams, cparams, kv_self, gf, LLM_ROPE, n_ctx, n_embd_head, freq_base, freq_scale, cb); } for (int il = 0; il < n_layer; ++il) { @@ -3556,10 +3622,18 @@ struct llm_build_context { struct ggml_tensor * Vcur = ggml_mul_mat(ctx0, model.layers[il].wv, cur); cb(Vcur, "Vcur", il); - Qcur = ggml_rope_custom(ctx0, ggml_reshape_3d(ctx0, Qcur, n_embd_head, n_head, n_tokens), inp_pos, n_embd_head, 0, 0, freq_base, freq_scale); + Qcur = ggml_rope_custom( + ctx0, ggml_reshape_3d(ctx0, Qcur, n_embd_head, n_head, n_tokens), inp_pos, + n_embd_head, 0, 0, n_orig_ctx, freq_base, freq_scale, + ext_factor, attn_factor, beta_fast, beta_slow + ); cb(Qcur, "Qcur", il); - Kcur = ggml_rope_custom(ctx0, ggml_reshape_3d(ctx0, Kcur, n_embd_head, n_head_kv, n_tokens), inp_pos, n_embd_head, 0, 0, freq_base, freq_scale); + Kcur = ggml_rope_custom( + ctx0, ggml_reshape_3d(ctx0, Kcur, n_embd_head, n_head_kv, n_tokens), inp_pos, + n_embd_head, 0, 0, n_orig_ctx, freq_base, freq_scale, + ext_factor, attn_factor, beta_fast, beta_slow + ); cb(Kcur, "Kcur", il); llm_build_kv_store(ctx0, hparams, kv_self, gf, Kcur, Vcur, n_ctx, n_tokens, kv_head, cb, il); @@ -3634,7 +3708,7 @@ struct llm_build_context { // shift the entire K-cache if needed if (do_rope_shift) { - llm_build_k_shift(ctx0, hparams, kv_self, gf, LLM_ROPE, n_ctx, n_embd_head, freq_base, freq_scale, cb); + llm_build_k_shift(ctx0, hparams, cparams, kv_self, gf, LLM_ROPE, n_ctx, n_embd_head, freq_base, freq_scale, cb); } for (int il = 0; il < n_layer; ++il) { @@ -3658,8 +3732,16 @@ struct llm_build_context { switch (model.type) { case MODEL_7B: - Qcur = ggml_rope_custom(ctx0, ggml_reshape_3d(ctx0, Qcur, n_embd_head, n_head, n_tokens), inp_pos, n_embd_head, 0, 0, freq_base, freq_scale); - Kcur = ggml_rope_custom(ctx0, ggml_reshape_3d(ctx0, Kcur, n_embd_head, n_head_kv, n_tokens), inp_pos, n_embd_head, 0, 0, freq_base, freq_scale); + Qcur = ggml_rope_custom( + ctx0, ggml_reshape_3d(ctx0, Qcur, n_embd_head, n_head, n_tokens), inp_pos, + n_embd_head, 0, 0, n_orig_ctx, freq_base, freq_scale, + ext_factor, attn_factor, beta_fast, beta_slow + ); + Kcur = ggml_rope_custom( + ctx0, ggml_reshape_3d(ctx0, Kcur, n_embd_head, n_head_kv, n_tokens), inp_pos, + n_embd_head, 0, 0, n_orig_ctx, freq_base, freq_scale, + ext_factor, attn_factor, beta_fast, beta_slow + ); break; case MODEL_13B: Qcur = ggml_reshape_3d(ctx0, Qcur, n_embd/n_head, n_head, n_tokens); @@ -3746,7 +3828,7 @@ struct llm_build_context { // shift the entire K-cache if needed if (do_rope_shift) { - llm_build_k_shift(ctx0, hparams, kv_self, gf, LLM_ROPE_NEOX, n_ctx, n_embd_head, freq_base, freq_scale, cb); + llm_build_k_shift(ctx0, hparams, cparams, kv_self, gf, LLM_ROPE_NEOX, n_ctx, n_embd_head, freq_base, freq_scale, cb); } for (int il = 0; il < n_layer; ++il) { @@ -3786,10 +3868,16 @@ struct llm_build_context { Kcur = ggml_reshape_3d(ctx0, Kcur, n_embd_head, n_head_kv, n_tokens); // using mode = 2 for neox mode - Qcur = ggml_rope_custom(ctx0, Qcur, inp_pos, n_embd_head, 2, 0, freq_base, freq_scale); + Qcur = ggml_rope_custom( + ctx0, Qcur, inp_pos, n_embd_head, 2, 0, n_orig_ctx, + freq_base, freq_scale, ext_factor, attn_factor, beta_fast, beta_slow + ); cb(Qcur, "Qcur", il); - Kcur = ggml_rope_custom(ctx0, Kcur, inp_pos, n_embd_head, 2, 0, freq_base, freq_scale); + Kcur = ggml_rope_custom( + ctx0, Kcur, inp_pos, n_embd_head, 2, 0, n_orig_ctx, + freq_base, freq_scale, ext_factor, attn_factor, beta_fast, beta_slow + ); cb(Kcur, "Kcur", il); llm_build_kv_store(ctx0, hparams, kv_self, gf, Kcur, Vcur, n_ctx, n_tokens, kv_head, cb, il); @@ -3960,7 +4048,7 @@ struct llm_build_context { cb(KQ_mask, "KQ_mask", -1); if (do_rope_shift) { - llm_build_k_shift(ctx0, hparams, kv_self, gf, LLM_ROPE_NEOX, n_ctx, n_embd_head, freq_base, freq_scale, cb); + llm_build_k_shift(ctx0, hparams, cparams, kv_self, gf, LLM_ROPE_NEOX, n_ctx, n_embd_head, freq_base, freq_scale, cb); } for (int il = 0; il < n_layer; ++il) { @@ -4053,13 +4141,15 @@ struct llm_build_context { cb(kpass, "kpass", il); struct ggml_tensor * qrotated = ggml_rope_custom( - ctx0, qrot, inp_pos, n_rot, 2, 0, freq_base, freq_scale - ); + ctx0, qrot, inp_pos, n_rot, 2, 0, n_orig_ctx, + freq_base, freq_scale, ext_factor, attn_factor, beta_fast, beta_slow + ); cb(qrotated, "qrotated", il); struct ggml_tensor * krotated = ggml_rope_custom( - ctx0, krot, inp_pos, n_rot, 2, 0, freq_base, freq_scale - ); + ctx0, krot, inp_pos, n_rot, 2, 0, n_orig_ctx, + freq_base, freq_scale, ext_factor, attn_factor, beta_fast, beta_slow + ); cb(krotated, "krotated", il); // ggml currently only supports concatenation on dim=2 @@ -7883,8 +7973,13 @@ struct llama_context_params llama_context_default_params() { /*.n_batch =*/ 512, /*.n_threads =*/ GGML_DEFAULT_N_THREADS, // TODO: better default /*.n_threads_batch =*/ GGML_DEFAULT_N_THREADS, + /*.rope_scaling_type =*/ LLAMA_ROPE_SCALING_UNSPECIFIED, /*.rope_freq_base =*/ 0.0f, /*.rope_freq_scale =*/ 0.0f, + /*.yarn_ext_factor =*/ NAN, + /*.yarn_attn_factor =*/ 1.0f, + /*.yarn_beta_fast =*/ 32.0f, + /*.yarn_beta_slow =*/ 1.0f, /*.mul_mat_q =*/ true, /*.f16_kv =*/ true, /*.logits_all =*/ false, @@ -7971,10 +8066,7 @@ struct llama_model * llama_load_model_from_file( }; } - if (!llama_model_load(path_model, *model, params.n_gpu_layers, - params.main_gpu, params.tensor_split, - params.use_mmap, params.use_mlock, params.vocab_only, - params.progress_callback, params.progress_callback_user_data)) { + if (!llama_model_load(path_model, *model, params)) { LLAMA_LOG_ERROR("%s: failed to load model\n", __func__); delete model; return nullptr; @@ -8000,13 +8092,35 @@ struct llama_context * llama_new_context_with_model( const auto & hparams = model->hparams; auto & cparams = ctx->cparams; - cparams.n_batch = params.n_batch; - cparams.n_ctx = params.n_ctx == 0 ? hparams.n_ctx_train : params.n_ctx; - cparams.rope_freq_base = params.rope_freq_base == 0 ? hparams.rope_freq_base_train : params.rope_freq_base; - cparams.rope_freq_scale = params.rope_freq_scale == 0 ? hparams.rope_freq_scale_train : params.rope_freq_scale; - cparams.n_threads = params.n_threads; - cparams.n_threads_batch = params.n_threads_batch; - cparams.mul_mat_q = params.mul_mat_q; + cparams.n_batch = params.n_batch; + cparams.n_threads = params.n_threads; + cparams.n_threads_batch = params.n_threads_batch; + cparams.yarn_ext_factor = params.yarn_ext_factor; + cparams.yarn_attn_factor = params.yarn_attn_factor; + cparams.yarn_beta_fast = params.yarn_beta_fast; + cparams.yarn_beta_slow = params.yarn_beta_slow; + cparams.mul_mat_q = params.mul_mat_q; + + cparams.n_ctx = params.n_ctx == 0 ? hparams.n_ctx_train : params.n_ctx; + cparams.rope_freq_base = params.rope_freq_base == 0.0f ? hparams.rope_freq_base_train : params.rope_freq_base; + cparams.rope_freq_scale = params.rope_freq_scale == 0.0f ? hparams.rope_freq_scale_train : params.rope_freq_scale; + + cparams.n_yarn_orig_ctx = params.yarn_orig_ctx != 0 ? params.yarn_orig_ctx : + hparams.n_yarn_orig_ctx != 0 ? hparams.n_yarn_orig_ctx : + hparams.n_ctx_train; + + auto rope_scaling_type = params.rope_scaling_type; + if (rope_scaling_type == LLAMA_ROPE_SCALING_UNSPECIFIED) { + rope_scaling_type = hparams.rope_scaling_type_train; + } + + if (rope_scaling_type == LLAMA_ROPE_SCALING_NONE) { + cparams.rope_freq_scale = 1.0f; // never scale if scaling type is none + } + + if (std::isnan(cparams.yarn_ext_factor)) { // NaN indicates 'not set' + cparams.yarn_ext_factor = rope_scaling_type == LLAMA_ROPE_SCALING_YARN ? 1.0f : 0.0f; + } if (params.seed == LLAMA_DEFAULT_SEED) { params.seed = time(NULL); diff --git a/llama.h b/llama.h index 75fe391ef2e733..3f1becd7616885 100644 --- a/llama.h +++ b/llama.h @@ -106,6 +106,14 @@ extern "C" { LLAMA_FTYPE_GUESSED = 1024, // not specified in the model file }; + enum llama_rope_scaling_type { + LLAMA_ROPE_SCALING_UNSPECIFIED = -1, + LLAMA_ROPE_SCALING_NONE = 0, + LLAMA_ROPE_SCALING_LINEAR = 1, + LLAMA_ROPE_SCALING_YARN = 2, + LLAMA_ROPE_SCALING_MAX_VALUE = LLAMA_ROPE_SCALING_YARN, + }; + typedef struct llama_token_data { llama_token id; // token id float logit; // log-odds of the token @@ -172,10 +180,16 @@ extern "C" { uint32_t n_batch; // prompt processing maximum batch size uint32_t n_threads; // number of threads to use for generation uint32_t n_threads_batch; // number of threads to use for batch processing + int8_t rope_scaling_type; // RoPE scaling type, from `enum llama_rope_scaling_type` // ref: https://github.com/ggerganov/llama.cpp/pull/2054 - float rope_freq_base; // RoPE base frequency, 0 = from model - float rope_freq_scale; // RoPE frequency scaling factor, 0 = from model + float rope_freq_base; // RoPE base frequency, 0 = from model + float rope_freq_scale; // RoPE frequency scaling factor, 0 = from model + float yarn_ext_factor; // YaRN extrapolation mix factor, NaN = from model + float yarn_attn_factor; // YaRN magnitude scaling factor + float yarn_beta_fast; // YaRN low correction dim + float yarn_beta_slow; // YaRN high correction dim + uint32_t yarn_orig_ctx; // YaRN original context size // Keep the booleans together to avoid misalignment during copy-by-value. bool mul_mat_q; // if true, use experimental mul_mat_q kernels (DEPRECATED - always true) From d02e98cde035d91ed8032ab943d1d504fe9da394 Mon Sep 17 00:00:00 2001 From: slaren Date: Wed, 1 Nov 2023 23:10:09 +0100 Subject: [PATCH 041/206] ggml-cuda : compute ptrs for cublasGemmBatchedEx in a kernel (#3891) * ggml-cuda : compute ptrs for cublasGemmBatchedEx in a kernel * fix warnings --- ggml-cuda.cu | 80 ++++++++++++++++++++++++++++++---------------------- 1 file changed, 46 insertions(+), 34 deletions(-) diff --git a/ggml-cuda.cu b/ggml-cuda.cu index 12ee10e3d9bdcd..61cd1747cac4fc 100644 --- a/ggml-cuda.cu +++ b/ggml-cuda.cu @@ -6696,8 +6696,10 @@ inline void ggml_cuda_op_clamp( GGML_ASSERT(src0->type == GGML_TYPE_F32); GGML_ASSERT( dst->type == GGML_TYPE_F32); - const float min = ((float *) dst->op_params)[0]; - const float max = ((float *) dst->op_params)[1]; + float min; + float max; + memcpy(&min, dst->op_params, sizeof(float)); + memcpy(&max, (float *) dst->op_params + 1, sizeof(float)); clamp_f32_cuda(src0_dd, dst_dd, min, max, ggml_nelements(src0), main_stream); CUDA_CHECK(cudaGetLastError()); @@ -7221,6 +7223,30 @@ static void ggml_cuda_mul_mat_vec_nc(const ggml_tensor * src0, const ggml_tensor ggml_mul_mat_vec_nc_f16_f32_cuda(src0_ddq, src1_ddf, dst_ddf, ne00, ne01, row_stride_x, ne02, ne12, channel_stride_x, main_stream); } +__global__ void k_compute_batched_ptrs( + const half * src0_as_f16, const half * src1_as_f16, half * dst_f16, + void ** ptrs, + int ne12, int ne13, + int ne23, + int nb02, int nb03, + int nb12, int nb13, + int nb2, int nb3, + int r2, int r3) { + int i13 = blockIdx.x * blockDim.x + threadIdx.x; + int i12 = blockIdx.y * blockDim.y + threadIdx.y; + + if (i13 >= ne13 || i12 >= ne12) { + return; + } + + int i03 = i13 / r3; + int i02 = i12 / r2; + + ptrs[0*ne23 + i12 + i13*ne12] = (char *) src0_as_f16 + i02*nb02 + i03*nb03; + ptrs[1*ne23 + i12 + i13*ne12] = (char *) src1_as_f16 + i12*nb12/2 + i13*nb13/2; + ptrs[2*ne23 + i12 + i13*ne12] = (char *) dst_f16 + i12* nb2/2 + i13* nb3/2; +} + static void ggml_cuda_mul_mat_mat_batched_cublas(const ggml_tensor * src0, const ggml_tensor * src1, ggml_tensor * dst) { GGML_ASSERT(!ggml_is_transposed(src0)); GGML_ASSERT(!ggml_is_transposed(src1)); @@ -7322,49 +7348,35 @@ static void ggml_cuda_mul_mat_mat_batched_cublas(const ggml_tensor * src0, const CUBLAS_GEMM_DEFAULT_TENSOR_OP)); } else { // use cublasGemmBatchedEx - // TODO: https://github.com/ggerganov/llama.cpp/pull/3749#discussion_r1369997000 const int ne23 = ne12*ne13; - // TODO: avoid this alloc - void ** ptrs = (void **) malloc(3*ne23*sizeof(void *)); - - for (int i13 = 0; i13 < ne13; ++i13) { - for (int i12 = 0; i12 < ne12; ++i12) { - int i03 = i13 / r3; - int i02 = i12 / r2; - - ptrs[0*ne23 + i12 + i13*ne12] = (char *) src0_as_f16 + i02*src0->nb[2] + i03*src0->nb[3]; - ptrs[1*ne23 + i12 + i13*ne12] = (char *) src1_as_f16 + i12*src1->nb[2]/2 + i13*src1->nb[3]/2; - ptrs[2*ne23 + i12 + i13*ne12] = (char *) dst_f16 + i12* dst->nb[2]/2 + i13* dst->nb[3]/2; - } - } - - // allocate device memory for pointers void ** ptrs_as = nullptr; - CUDA_CHECK(cudaMalloc(&ptrs_as, 3*ne23*sizeof(void *))); - - // TODO: this does not work for some reason -- not sure why? - //size_t ptrs_s = 0; - //ptrs_as = (void **) ggml_cuda_pool_malloc(3*ne23*sizeof(void *), &ptrs_s); - - // copy pointers to device - CUDA_CHECK(cudaMemcpy(ptrs_as, ptrs, 3*ne23*sizeof(void *), cudaMemcpyHostToDevice)); - - free(ptrs); + size_t ptrs_s = 0; + ptrs_as = (void **) ggml_cuda_pool_malloc(3*ne23*sizeof(void *), &ptrs_s); + + dim3 block_dims(ne13, ne12); + k_compute_batched_ptrs<<<1, block_dims, 0, main_stream>>>( + src0_as_f16, src1_as_f16, dst_f16, + ptrs_as, + ne12, ne13, + ne23, + nb02, nb03, + nb12, nb13, + dst->nb[2], dst->nb[3], + r2, r3); + CUDA_CHECK(cudaGetLastError()); CUBLAS_CHECK( cublasGemmBatchedEx(g_cublas_handles[id], CUBLAS_OP_T, CUBLAS_OP_N, ne01, ne11, ne10, - &alpha_f16, (const void **) (ptrs_as + 0*ne23), CUDA_R_16F, nb01/sizeof(half), - (const void **) (ptrs_as + 1*ne23), CUDA_R_16F, nb11/sizeof(float), - &beta_f16, ( void **) (ptrs_as + 2*ne23), CUDA_R_16F, ne01, + &alpha_f16, (const void * const *) (ptrs_as + 0*ne23), CUDA_R_16F, nb01/sizeof(half), + (const void * const *) (ptrs_as + 1*ne23), CUDA_R_16F, nb11/sizeof(float), + &beta_f16, ( void ** ) (ptrs_as + 2*ne23), CUDA_R_16F, ne01, ne23, CUBLAS_COMPUTE_16F, CUBLAS_GEMM_DEFAULT_TENSOR_OP)); - // free device memory for pointers - CUDA_CHECK(cudaFree(ptrs_as)); - //ggml_cuda_pool_free(ptrs_as, ptrs_s); + ggml_cuda_pool_free(ptrs_as, ptrs_s); } #endif From 0eb332a10f3f14a3746c391bf80ff5e7bdf29d5d Mon Sep 17 00:00:00 2001 From: cebtenzzre Date: Wed, 1 Nov 2023 19:29:14 -0400 Subject: [PATCH 042/206] llama : fix llama_context_default_params after #2268 (#3893) --- llama.cpp | 1 + 1 file changed, 1 insertion(+) diff --git a/llama.cpp b/llama.cpp index 685882c201921d..32d7d23de6afb1 100644 --- a/llama.cpp +++ b/llama.cpp @@ -7980,6 +7980,7 @@ struct llama_context_params llama_context_default_params() { /*.yarn_attn_factor =*/ 1.0f, /*.yarn_beta_fast =*/ 32.0f, /*.yarn_beta_slow =*/ 1.0f, + /*.yarn_orig_ctx =*/ 0, /*.mul_mat_q =*/ true, /*.f16_kv =*/ true, /*.logits_all =*/ false, From 2fffa0d61fa10e4b466e78cabcc6a4e16717b580 Mon Sep 17 00:00:00 2001 From: cebtenzzre Date: Thu, 2 Nov 2023 01:49:44 -0400 Subject: [PATCH 043/206] cuda : fix RoPE after #2268 (#3897) --- ggml-cuda.cu | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/ggml-cuda.cu b/ggml-cuda.cu index 61cd1747cac4fc..57a528ede23ed2 100644 --- a/ggml-cuda.cu +++ b/ggml-cuda.cu @@ -4539,7 +4539,7 @@ static __global__ void rope( const int i2 = row/p_delta_rows; const int p = has_pos ? pos[i2] : 0; - const float theta_base = p*powf(freq_base, -col/ncols); + const float theta_base = p*powf(freq_base, -float(col)/ncols); float cos_theta, sin_theta; rope_yarn(theta_base, freq_scale, corr_dims, col, ext_factor, attn_factor, &cos_theta, &sin_theta); @@ -4566,8 +4566,8 @@ static __global__ void rope_neox( const int i = row*ncols + col/2; const int i2 = row/p_delta_rows; - // simplified from `(row * ncols + col) * (-1 / ncols)` - const float cur_rot = -col/ncols - row; + // simplified from `(ib * ncols + col) * (-1 / ncols)`, where ib is assumed to be zero + const float cur_rot = -float(col)/ncols; const int p = has_pos ? pos[i2] : 0; const float theta_base = p*powf(freq_base, cur_rot); From 183b3fac6c28e65d23ac0230c1dd6fb84bf0154d Mon Sep 17 00:00:00 2001 From: Georgi Gerganov Date: Thu, 2 Nov 2023 08:33:37 +0200 Subject: [PATCH 044/206] metal : fix build errors and kernel sig after #2268 (#3898) --- ggml-metal.m | 57 ++++++++++++++++++++++++------------------------ ggml-metal.metal | 16 +++++++++----- 2 files changed, 40 insertions(+), 33 deletions(-) diff --git a/ggml-metal.m b/ggml-metal.m index 611d5e173681eb..b33a3cb8fd0128 100644 --- a/ggml-metal.m +++ b/ggml-metal.m @@ -1419,34 +1419,35 @@ void ggml_metal_graph_compute( default: GGML_ASSERT(false); }; - [encoder setBuffer:id_src0 offset:offs_src0 atIndex:0]; - [encoder setBuffer:id_src1 offset:offs_src1 atIndex:1]; - [encoder setBuffer:id_dst offset:offs_dst atIndex:2]; - [encoder setBytes:&ne00 length:sizeof( int64_t) atIndex:3]; - [encoder setBytes:&ne01 length:sizeof( int64_t) atIndex:4]; - [encoder setBytes:&ne02 length:sizeof( int64_t) atIndex:5]; - [encoder setBytes:&ne03 length:sizeof( int64_t) atIndex:6]; - [encoder setBytes:&nb00 length:sizeof(uint64_t) atIndex:7]; - [encoder setBytes:&nb01 length:sizeof(uint64_t) atIndex:8]; - [encoder setBytes:&nb02 length:sizeof(uint64_t) atIndex:9]; - [encoder setBytes:&nb03 length:sizeof(uint64_t) atIndex:10]; - [encoder setBytes:&ne0 length:sizeof( int64_t) atIndex:11]; - [encoder setBytes:&ne1 length:sizeof( int64_t) atIndex:12]; - [encoder setBytes:&ne2 length:sizeof( int64_t) atIndex:13]; - [encoder setBytes:&ne3 length:sizeof( int64_t) atIndex:14]; - [encoder setBytes:&nb0 length:sizeof(uint64_t) atIndex:15]; - [encoder setBytes:&nb1 length:sizeof(uint64_t) atIndex:16]; - [encoder setBytes:&nb2 length:sizeof(uint64_t) atIndex:17]; - [encoder setBytes:&nb3 length:sizeof(uint64_t) atIndex:18]; - [encoder setBytes:&n_past length:sizeof( int) atIndex:19]; - [encoder setBytes:&n_dims length:sizeof( int) atIndex:20]; - [encoder setBytes:&mode length:sizeof( int) atIndex:21]; - [encoder setBytes:&freq_base length:sizeof(float) atIndex:22]; - [encoder setBytes:&freq_scale length:sizeof(float) atIndex:23]; - [encoder setBytes:&ext_factor length:sizeof(float) atIndex:24]; - [encoder setBytes:&attn_factor length:sizeof(float) atIndex:25]; - [encoder setBytes:&beta_fast length:sizeof(float) atIndex:26]; - [encoder setBytes:&beta_slow length:sizeof(float) atIndex:27]; + [encoder setBuffer:id_src0 offset:offs_src0 atIndex:0]; + [encoder setBuffer:id_src1 offset:offs_src1 atIndex:1]; + [encoder setBuffer:id_dst offset:offs_dst atIndex:2]; + [encoder setBytes:&ne00 length:sizeof( int64_t) atIndex:3]; + [encoder setBytes:&ne01 length:sizeof( int64_t) atIndex:4]; + [encoder setBytes:&ne02 length:sizeof( int64_t) atIndex:5]; + [encoder setBytes:&ne03 length:sizeof( int64_t) atIndex:6]; + [encoder setBytes:&nb00 length:sizeof(uint64_t) atIndex:7]; + [encoder setBytes:&nb01 length:sizeof(uint64_t) atIndex:8]; + [encoder setBytes:&nb02 length:sizeof(uint64_t) atIndex:9]; + [encoder setBytes:&nb03 length:sizeof(uint64_t) atIndex:10]; + [encoder setBytes:&ne0 length:sizeof( int64_t) atIndex:11]; + [encoder setBytes:&ne1 length:sizeof( int64_t) atIndex:12]; + [encoder setBytes:&ne2 length:sizeof( int64_t) atIndex:13]; + [encoder setBytes:&ne3 length:sizeof( int64_t) atIndex:14]; + [encoder setBytes:&nb0 length:sizeof(uint64_t) atIndex:15]; + [encoder setBytes:&nb1 length:sizeof(uint64_t) atIndex:16]; + [encoder setBytes:&nb2 length:sizeof(uint64_t) atIndex:17]; + [encoder setBytes:&nb3 length:sizeof(uint64_t) atIndex:18]; + [encoder setBytes:&n_past length:sizeof( int) atIndex:19]; + [encoder setBytes:&n_dims length:sizeof( int) atIndex:20]; + [encoder setBytes:&mode length:sizeof( int) atIndex:21]; + [encoder setBytes:&n_orig_ctx length:sizeof( int) atIndex:22]; + [encoder setBytes:&freq_base length:sizeof( float) atIndex:23]; + [encoder setBytes:&freq_scale length:sizeof( float) atIndex:24]; + [encoder setBytes:&ext_factor length:sizeof( float) atIndex:25]; + [encoder setBytes:&attn_factor length:sizeof( float) atIndex:26]; + [encoder setBytes:&beta_fast length:sizeof( float) atIndex:27]; + [encoder setBytes:&beta_slow length:sizeof( float) atIndex:28]; [encoder dispatchThreadgroups:MTLSizeMake(ne01, ne02, ne03) threadsPerThreadgroup:MTLSizeMake(nth, 1, 1)]; } break; diff --git a/ggml-metal.metal b/ggml-metal.metal index 471d7d390f8138..7c35f23a7612fd 100644 --- a/ggml-metal.metal +++ b/ggml-metal.metal @@ -1070,20 +1070,20 @@ static float rope_yarn_ramp(const float low, const float high, const int i0) { // MIT licensed. Copyright (c) 2023 Jeffrey Quesnelle and Bowen Peng. static void rope_yarn( float theta_extrap, float freq_scale, float corr_dims[2], int64_t i0, float ext_factor, float mscale, - float * cos_theta, float * sin_theta + thread float * cos_theta, thread float * sin_theta ) { // Get n-d rotational scaling corrected for extrapolation float theta_interp = freq_scale * theta_extrap; float theta = theta_interp; if (ext_factor != 0.0f) { - ramp_mix = rope_yarn_ramp(corr_dims[0], corr_dims[1], i0) * ext_factor; + float ramp_mix = rope_yarn_ramp(corr_dims[0], corr_dims[1], i0) * ext_factor; theta = theta_interp * (1 - ramp_mix) + theta_extrap * ramp_mix; // Get n-d magnitude scaling corrected for interpolation - mscale *= 1.0f + 0.1f * logf(1.0f / freq_scale); + mscale *= 1.0f + 0.1f * log(1.0f / freq_scale); } - *cos_theta = cosf(theta) * mscale; - *sin_theta = sinf(theta) * mscale; + *cos_theta = cos(theta) * mscale; + *sin_theta = sin(theta) * mscale; } // Apparently solving `n_rot = 2pi * x * base^((2 * max_pos_emb) / n_dims)` for x, we get @@ -1123,8 +1123,13 @@ typedef void (rope_t)( constant int & n_past, constant int & n_dims, constant int & mode, + constant int & n_orig_ctx, constant float & freq_base, constant float & freq_scale, + constant float & ext_factor, + constant float & attn_factor, + constant float & beta_fast, + constant float & beta_slow, uint tiitg[[thread_index_in_threadgroup]], uint3 tptg[[threads_per_threadgroup]], uint3 tgpig[[threadgroup_position_in_grid]]); @@ -1153,6 +1158,7 @@ kernel void kernel_rope( constant int & n_past, constant int & n_dims, constant int & mode, + constant int & n_orig_ctx, constant float & freq_base, constant float & freq_scale, constant float & ext_factor, From 4d719a6d4e74b9a98e75f826f865f3153717d54b Mon Sep 17 00:00:00 2001 From: Georgi Gerganov Date: Thu, 2 Nov 2023 08:35:10 +0200 Subject: [PATCH 045/206] cuda : check if this fixes Pascal card regression (#3882) --- ggml-cuda.cu | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/ggml-cuda.cu b/ggml-cuda.cu index 57a528ede23ed2..e4629512611b6c 100644 --- a/ggml-cuda.cu +++ b/ggml-cuda.cu @@ -7420,7 +7420,7 @@ static void ggml_cuda_mul_mat(const ggml_tensor * src0, const ggml_tensor * src1 } else if (all_on_device && !use_tensor_cores && src0->type == GGML_TYPE_F16 && !ggml_is_contiguous(src0) && !ggml_is_transposed(src1) && src1->ne[1] == 1) { // KQV single-batch ggml_cuda_mul_mat_vec_nc(src0, src1, dst); - } else if (all_on_device && src0->type == GGML_TYPE_F16 && src1->type == GGML_TYPE_F32 && !ggml_is_transposed(src0) && !ggml_is_transposed(src1)) { + } else if (all_on_device && use_tensor_cores && src0->type == GGML_TYPE_F16 && src1->type == GGML_TYPE_F32 && !ggml_is_transposed(src0) && !ggml_is_transposed(src1)) { // KQ + KQV multi-batch ggml_cuda_mul_mat_mat_batched_cublas(src0, src1, dst); } else if (src0->type == GGML_TYPE_F32) { From b12fa0d1c13596869c512f49a526b979c94787cc Mon Sep 17 00:00:00 2001 From: cebtenzzre Date: Thu, 2 Nov 2023 02:50:16 -0400 Subject: [PATCH 046/206] build : link against build info instead of compiling against it (#3879) MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit * cmake : fix build when .git does not exist * cmake : simplify BUILD_INFO target * cmake : add missing dependencies on BUILD_INFO * build : link against build info instead of compiling against it * zig : make build info a .cpp source instead of a header Co-authored-by: Matheus C. França * cmake : revert change to CMP0115 --------- Co-authored-by: Matheus C. França --- .gitignore | 2 +- CMakeLists.txt | 33 --------- Makefile | 71 ++++++++++---------- build.zig | 38 +++++------ common/CMakeLists.txt | 42 +++++++++++- common/build-info.cpp.in | 4 ++ common/common.cpp | 5 +- common/common.h | 12 +++- examples/benchmark/CMakeLists.txt | 5 +- examples/benchmark/benchmark-matmult.cpp | 1 - examples/embedding/CMakeLists.txt | 3 - examples/embedding/embedding.cpp | 1 - examples/infill/CMakeLists.txt | 3 - examples/infill/infill.cpp | 5 +- examples/llama-bench/CMakeLists.txt | 3 - examples/llama-bench/llama-bench.cpp | 5 +- examples/llava/CMakeLists.txt | 6 -- examples/main/CMakeLists.txt | 3 - examples/main/main.cpp | 5 +- examples/parallel/CMakeLists.txt | 3 - examples/parallel/parallel.cpp | 2 - examples/perplexity/CMakeLists.txt | 3 - examples/perplexity/perplexity.cpp | 1 - examples/quantize-stats/CMakeLists.txt | 2 +- examples/quantize-stats/quantize-stats.cpp | 1 - examples/quantize/CMakeLists.txt | 5 +- examples/quantize/quantize.cpp | 1 - examples/save-load-state/CMakeLists.txt | 3 - examples/save-load-state/save-load-state.cpp | 1 - examples/server/CMakeLists.txt | 3 - examples/server/server.cpp | 5 +- examples/speculative/CMakeLists.txt | 3 - examples/speculative/speculative.cpp | 2 - scripts/build-info.cmake | 30 +++++---- scripts/build-info.h.in | 9 --- scripts/build-info.sh | 13 ++-- 36 files changed, 143 insertions(+), 191 deletions(-) create mode 100644 common/build-info.cpp.in delete mode 100644 scripts/build-info.h.in diff --git a/.gitignore b/.gitignore index 5d7c5479ef67ae..50cbd0b47cae36 100644 --- a/.gitignore +++ b/.gitignore @@ -65,7 +65,7 @@ models-mnt /parallel /train-text-from-scratch /vdot -build-info.h +/common/build-info.cpp arm_neon.h compile_commands.json CMakeSettings.json diff --git a/CMakeLists.txt b/CMakeLists.txt index 3659279e2d7d09..611ed3f4d2a64d 100644 --- a/CMakeLists.txt +++ b/CMakeLists.txt @@ -100,39 +100,6 @@ option(LLAMA_BUILD_TESTS "llama: build tests" ${LLAMA_STANDALO option(LLAMA_BUILD_EXAMPLES "llama: build examples" ${LLAMA_STANDALONE}) option(LLAMA_BUILD_SERVER "llama: build server example" ON) -# -# Build info header -# - -# Generate initial build-info.h -include(${CMAKE_CURRENT_SOURCE_DIR}/scripts/build-info.cmake) - -if(EXISTS "${CMAKE_CURRENT_SOURCE_DIR}/.git") - set(GIT_DIR "${CMAKE_CURRENT_SOURCE_DIR}/.git") - - # Is git submodule - if(NOT IS_DIRECTORY "${GIT_DIR}") - file(READ ${GIT_DIR} REAL_GIT_DIR_LINK) - string(REGEX REPLACE "gitdir: (.*)\n$" "\\1" REAL_GIT_DIR ${REAL_GIT_DIR_LINK}) - set(GIT_DIR "${CMAKE_CURRENT_SOURCE_DIR}/${REAL_GIT_DIR}") - endif() - - # Add a custom target for build-info.h - add_custom_target(BUILD_INFO ALL DEPENDS "${CMAKE_CURRENT_SOURCE_DIR}/build-info.h") - - # Add a custom command to rebuild build-info.h when .git/index changes - add_custom_command( - OUTPUT "${CMAKE_CURRENT_SOURCE_DIR}/build-info.h" - COMMENT "Generating build details from Git" - COMMAND ${CMAKE_COMMAND} -DMSVC=${MSVC} -DCMAKE_C_COMPILER_VERSION=${CMAKE_C_COMPILER_VERSION} -DCMAKE_C_COMPILER_ID=${CMAKE_C_COMPILER_ID} -DCMAKE_VS_PLATFORM_NAME=${CMAKE_VS_PLATFORM_NAME} -DCMAKE_C_COMPILER=${CMAKE_C_COMPILER} -P "${CMAKE_CURRENT_SOURCE_DIR}/scripts/build-info.cmake" - WORKING_DIRECTORY ${CMAKE_CURRENT_SOURCE_DIR} - DEPENDS "${GIT_DIR}/index" - VERBATIM - ) -else() - message(WARNING "Git repository not found; to enable automatic generation of build info, make sure Git is installed and the project is a Git repository.") -endif() - # # Compile flags # diff --git a/Makefile b/Makefile index c53c1e7260fef4..300c1e6c7e1276 100644 --- a/Makefile +++ b/Makefile @@ -542,9 +542,9 @@ llama.o: llama.cpp ggml.h ggml-alloc.h ggml-backend.h ggml-cuda.h ggml-metal.h l $(CXX) $(CXXFLAGS) -c $< -o $@ COMMON_H_DEPS = common/common.h common/sampling.h common/log.h -COMMON_DEPS = common.o sampling.o grammar-parser.o +COMMON_DEPS = common.o sampling.o grammar-parser.o build-info.o -common.o: common/common.cpp build-info.h $(COMMON_H_DEPS) +common.o: common/common.cpp $(COMMON_H_DEPS) $(CXX) $(CXXFLAGS) -c $< -o $@ sampling.o: common/sampling.cpp $(COMMON_H_DEPS) @@ -563,46 +563,46 @@ libllama.so: llama.o ggml.o $(OBJS) $(CXX) $(CXXFLAGS) -shared -fPIC -o $@ $^ $(LDFLAGS) clean: - rm -vrf *.o tests/*.o *.so *.dll benchmark-matmult build-info.h *.dot $(COV_TARGETS) $(BUILD_TARGETS) $(TEST_TARGETS) + rm -vrf *.o tests/*.o *.so *.dll benchmark-matmult common/build-info.cpp *.dot $(COV_TARGETS) $(BUILD_TARGETS) $(TEST_TARGETS) # # Examples # -main: examples/main/main.cpp build-info.h ggml.o llama.o $(COMMON_DEPS) console.o grammar-parser.o $(OBJS) +main: examples/main/main.cpp ggml.o llama.o $(COMMON_DEPS) console.o grammar-parser.o $(OBJS) $(CXX) $(CXXFLAGS) $(filter-out %.h,$^) -o $@ $(LDFLAGS) @echo @echo '==== Run ./main -h for help. ====' @echo -infill: examples/infill/infill.cpp build-info.h ggml.o llama.o $(COMMON_DEPS) console.o grammar-parser.o $(OBJS) +infill: examples/infill/infill.cpp ggml.o llama.o $(COMMON_DEPS) console.o grammar-parser.o $(OBJS) $(CXX) $(CXXFLAGS) $(filter-out %.h,$^) -o $@ $(LDFLAGS) -simple: examples/simple/simple.cpp build-info.h ggml.o llama.o $(COMMON_DEPS) $(OBJS) +simple: examples/simple/simple.cpp ggml.o llama.o $(COMMON_DEPS) $(OBJS) $(CXX) $(CXXFLAGS) $(filter-out %.h,$^) -o $@ $(LDFLAGS) -batched: examples/batched/batched.cpp build-info.h ggml.o llama.o $(COMMON_DEPS) $(OBJS) +batched: examples/batched/batched.cpp ggml.o llama.o $(COMMON_DEPS) $(OBJS) $(CXX) $(CXXFLAGS) $(filter-out %.h,$^) -o $@ $(LDFLAGS) -batched-bench: examples/batched-bench/batched-bench.cpp build-info.h ggml.o llama.o common.o $(OBJS) +batched-bench: examples/batched-bench/batched-bench.cpp build-info.o ggml.o llama.o common.o $(OBJS) $(CXX) $(CXXFLAGS) $(filter-out %.h,$^) -o $@ $(LDFLAGS) -quantize: examples/quantize/quantize.cpp build-info.h ggml.o llama.o $(OBJS) +quantize: examples/quantize/quantize.cpp build-info.o ggml.o llama.o $(OBJS) $(CXX) $(CXXFLAGS) $(filter-out %.h,$^) -o $@ $(LDFLAGS) -quantize-stats: examples/quantize-stats/quantize-stats.cpp build-info.h ggml.o llama.o $(OBJS) +quantize-stats: examples/quantize-stats/quantize-stats.cpp build-info.o ggml.o llama.o $(OBJS) $(CXX) $(CXXFLAGS) $(filter-out %.h,$^) -o $@ $(LDFLAGS) -perplexity: examples/perplexity/perplexity.cpp build-info.h ggml.o llama.o $(COMMON_DEPS) $(OBJS) +perplexity: examples/perplexity/perplexity.cpp ggml.o llama.o $(COMMON_DEPS) $(OBJS) $(CXX) $(CXXFLAGS) $(filter-out %.h,$^) -o $@ $(LDFLAGS) -embedding: examples/embedding/embedding.cpp build-info.h ggml.o llama.o $(COMMON_DEPS) $(OBJS) +embedding: examples/embedding/embedding.cpp ggml.o llama.o $(COMMON_DEPS) $(OBJS) $(CXX) $(CXXFLAGS) $(filter-out %.h,$^) -o $@ $(LDFLAGS) -save-load-state: examples/save-load-state/save-load-state.cpp build-info.h ggml.o llama.o $(COMMON_DEPS) $(OBJS) +save-load-state: examples/save-load-state/save-load-state.cpp ggml.o llama.o $(COMMON_DEPS) $(OBJS) $(CXX) $(CXXFLAGS) $(filter-out %.h,$^) -o $@ $(LDFLAGS) -server: examples/server/server.cpp examples/server/httplib.h examples/server/json.hpp examples/server/index.html.hpp examples/server/index.js.hpp examples/server/completion.js.hpp examples/llava/clip.cpp examples/llava/clip.h common/stb_image.h build-info.h ggml.o llama.o $(COMMON_DEPS) grammar-parser.o $(OBJS) +server: examples/server/server.cpp examples/server/httplib.h examples/server/json.hpp examples/server/index.html.hpp examples/server/index.js.hpp examples/server/completion.js.hpp examples/llava/clip.cpp examples/llava/clip.h common/stb_image.h ggml.o llama.o $(COMMON_DEPS) grammar-parser.o $(OBJS) $(CXX) $(CXXFLAGS) -Iexamples/server $(filter-out %.h,$(filter-out %.hpp,$^)) -o $@ $(LDFLAGS) $(LWINSOCK2) -Wno-cast-qual gguf: examples/gguf/gguf.cpp ggml.o llama.o $(OBJS) @@ -614,7 +614,7 @@ train-text-from-scratch: examples/train-text-from-scratch/train-text-from-scratc convert-llama2c-to-ggml: examples/convert-llama2c-to-ggml/convert-llama2c-to-ggml.cpp ggml.o llama.o $(OBJS) $(CXX) $(CXXFLAGS) $(filter-out %.h,$^) -o $@ $(LDFLAGS) -llama-bench: examples/llama-bench/llama-bench.cpp build-info.h ggml.o llama.o $(COMMON_DEPS) $(OBJS) +llama-bench: examples/llama-bench/llama-bench.cpp ggml.o llama.o $(COMMON_DEPS) $(OBJS) $(CXX) $(CXXFLAGS) $(filter-out %.h,$^) -o $@ $(LDFLAGS) llava: examples/llava/llava.cpp examples/llava/llava-utils.h examples/llava/clip.cpp examples/llava/clip.h common/stb_image.h ggml.o llama.o $(COMMON_DEPS) $(OBJS) @@ -623,19 +623,19 @@ llava: examples/llava/llava.cpp examples/llava/llava-utils.h examples/llava/clip baby-llama: examples/baby-llama/baby-llama.cpp ggml.o llama.o $(COMMON_DEPS) train.o $(OBJS) $(CXX) $(CXXFLAGS) $(filter-out %.h,$^) -o $@ $(LDFLAGS) -beam-search: examples/beam-search/beam-search.cpp build-info.h ggml.o llama.o $(COMMON_DEPS) $(OBJS) +beam-search: examples/beam-search/beam-search.cpp ggml.o llama.o $(COMMON_DEPS) $(OBJS) $(CXX) $(CXXFLAGS) $(filter-out %.h,$^) -o $@ $(LDFLAGS) -finetune: examples/finetune/finetune.cpp build-info.h ggml.o llama.o $(COMMON_DEPS) train.o $(OBJS) +finetune: examples/finetune/finetune.cpp ggml.o llama.o $(COMMON_DEPS) train.o $(OBJS) $(CXX) $(CXXFLAGS) $(filter-out %.h,$^) -o $@ $(LDFLAGS) -export-lora: examples/export-lora/export-lora.cpp build-info.h ggml.o llama.o $(COMMON_DEPS) $(OBJS) +export-lora: examples/export-lora/export-lora.cpp ggml.o llama.o $(COMMON_DEPS) $(OBJS) $(CXX) $(CXXFLAGS) $(filter-out %.h,$^) -o $@ $(LDFLAGS) -speculative: examples/speculative/speculative.cpp build-info.h ggml.o llama.o $(COMMON_DEPS) grammar-parser.o $(OBJS) +speculative: examples/speculative/speculative.cpp ggml.o llama.o $(COMMON_DEPS) grammar-parser.o $(OBJS) $(CXX) $(CXXFLAGS) $(filter-out %.h,$^) -o $@ $(LDFLAGS) -parallel: examples/parallel/parallel.cpp build-info.h ggml.o llama.o $(COMMON_DEPS) $(OBJS) +parallel: examples/parallel/parallel.cpp ggml.o llama.o $(COMMON_DEPS) $(OBJS) $(CXX) $(CXXFLAGS) $(filter-out %.h,$^) -o $@ $(LDFLAGS) ifdef LLAMA_METAL @@ -648,7 +648,7 @@ swift: examples/batched.swift (cd examples/batched.swift; make build) endif -build-info.h: $(wildcard .git/index) scripts/build-info.sh +common/build-info.cpp: $(wildcard .git/index) scripts/build-info.sh @sh scripts/build-info.sh $(CC) > $@.tmp @if ! cmp -s $@.tmp $@; then \ mv $@.tmp $@; \ @@ -656,13 +656,16 @@ build-info.h: $(wildcard .git/index) scripts/build-info.sh rm $@.tmp; \ fi +build-info.o: common/build-info.cpp + $(CXX) $(CXXFLAGS) -c $(filter-out %.h,$^) -o $@ + # # Tests # tests: $(TEST_TARGETS) -benchmark-matmult: examples/benchmark/benchmark-matmult.cpp build-info.h ggml.o $(OBJS) +benchmark-matmult: examples/benchmark/benchmark-matmult.cpp build-info.o ggml.o $(OBJS) $(CXX) $(CXXFLAGS) $(filter-out %.h,$^) -o $@ $(LDFLAGS) run-benchmark-matmult: benchmark-matmult @@ -676,40 +679,40 @@ vdot: pocs/vdot/vdot.cpp ggml.o $(OBJS) q8dot: pocs/vdot/q8dot.cpp ggml.o $(OBJS) $(CXX) $(CXXFLAGS) $^ -o $@ $(LDFLAGS) -tests/test-llama-grammar: tests/test-llama-grammar.cpp build-info.h ggml.o $(COMMON_DEPS) grammar-parser.o $(OBJS) +tests/test-llama-grammar: tests/test-llama-grammar.cpp ggml.o $(COMMON_DEPS) grammar-parser.o $(OBJS) $(CXX) $(CXXFLAGS) $(filter-out %.h,$^) -o $@ $(LDFLAGS) -tests/test-grammar-parser: tests/test-grammar-parser.cpp build-info.h ggml.o llama.o $(COMMON_DEPS) grammar-parser.o $(OBJS) +tests/test-grammar-parser: tests/test-grammar-parser.cpp ggml.o llama.o $(COMMON_DEPS) grammar-parser.o $(OBJS) $(CXX) $(CXXFLAGS) $(filter-out %.h,$^) -o $@ $(LDFLAGS) -tests/test-double-float: tests/test-double-float.cpp build-info.h ggml.o llama.o $(COMMON_DEPS) $(OBJS) +tests/test-double-float: tests/test-double-float.cpp ggml.o llama.o $(COMMON_DEPS) $(OBJS) $(CXX) $(CXXFLAGS) $(filter-out %.h,$^) -o $@ $(LDFLAGS) -tests/test-grad0: tests/test-grad0.cpp build-info.h ggml.o llama.o $(COMMON_DEPS) $(OBJS) +tests/test-grad0: tests/test-grad0.cpp ggml.o llama.o $(COMMON_DEPS) $(OBJS) $(CXX) $(CXXFLAGS) $(filter-out %.h,$^) -o $@ $(LDFLAGS) -tests/test-opt: tests/test-opt.cpp build-info.h ggml.o llama.o $(COMMON_DEPS) $(OBJS) +tests/test-opt: tests/test-opt.cpp ggml.o llama.o $(COMMON_DEPS) $(OBJS) $(CXX) $(CXXFLAGS) $(filter-out %.h,$^) -o $@ $(LDFLAGS) -tests/test-quantize-fns: tests/test-quantize-fns.cpp build-info.h ggml.o llama.o $(COMMON_DEPS) $(OBJS) +tests/test-quantize-fns: tests/test-quantize-fns.cpp ggml.o llama.o $(COMMON_DEPS) $(OBJS) $(CXX) $(CXXFLAGS) $(filter-out %.h,$^) -o $@ $(LDFLAGS) -tests/test-quantize-perf: tests/test-quantize-perf.cpp build-info.h ggml.o llama.o $(COMMON_DEPS) $(OBJS) +tests/test-quantize-perf: tests/test-quantize-perf.cpp ggml.o llama.o $(COMMON_DEPS) $(OBJS) $(CXX) $(CXXFLAGS) $(filter-out %.h,$^) -o $@ $(LDFLAGS) -tests/test-sampling: tests/test-sampling.cpp build-info.h ggml.o llama.o $(COMMON_DEPS) $(OBJS) +tests/test-sampling: tests/test-sampling.cpp ggml.o llama.o $(COMMON_DEPS) $(OBJS) $(CXX) $(CXXFLAGS) $(filter-out %.h,$^) -o $@ $(LDFLAGS) -tests/test-tokenizer-0-falcon: tests/test-tokenizer-0-falcon.cpp build-info.h ggml.o llama.o $(COMMON_DEPS) $(OBJS) +tests/test-tokenizer-0-falcon: tests/test-tokenizer-0-falcon.cpp ggml.o llama.o $(COMMON_DEPS) $(OBJS) $(CXX) $(CXXFLAGS) $(filter-out %.h,$^) -o $@ $(LDFLAGS) -tests/test-tokenizer-0-llama: tests/test-tokenizer-0-llama.cpp build-info.h ggml.o llama.o $(COMMON_DEPS) $(OBJS) +tests/test-tokenizer-0-llama: tests/test-tokenizer-0-llama.cpp ggml.o llama.o $(COMMON_DEPS) $(OBJS) $(CXX) $(CXXFLAGS) $(filter-out %.h,$^) -o $@ $(LDFLAGS) -tests/test-tokenizer-1-bpe: tests/test-tokenizer-1-bpe.cpp build-info.h ggml.o llama.o $(COMMON_DEPS) $(OBJS) +tests/test-tokenizer-1-bpe: tests/test-tokenizer-1-bpe.cpp ggml.o llama.o $(COMMON_DEPS) $(OBJS) $(CXX) $(CXXFLAGS) $(filter-out %.h,$^) -o $@ $(LDFLAGS) -tests/test-tokenizer-1-llama: tests/test-tokenizer-1-llama.cpp build-info.h ggml.o llama.o $(COMMON_DEPS) $(OBJS) +tests/test-tokenizer-1-llama: tests/test-tokenizer-1-llama.cpp ggml.o llama.o $(COMMON_DEPS) $(OBJS) $(CXX) $(CXXFLAGS) $(filter-out %.h,$^) -o $@ $(LDFLAGS) tests/test-c.o: tests/test-c.c llama.h diff --git a/build.zig b/build.zig index 9b58b74ca858b6..699738f3dd5092 100644 --- a/build.zig +++ b/build.zig @@ -10,7 +10,6 @@ const Maker = struct { builder: *std.build.Builder, target: CrossTarget, optimize: Mode, - config_header: *ConfigHeader, enable_lto: bool, include_dirs: ArrayList([]const u8), @@ -41,26 +40,24 @@ const Maker = struct { const commit_hash = try std.ChildProcess.exec( .{ .allocator = builder.allocator, .argv = &.{ "git", "rev-parse", "HEAD" } }, ); - const config_header = builder.addConfigHeader( - .{ .style = .blank, .include_path = "build-info.h" }, - .{ - .BUILD_NUMBER = 0, - .BUILD_COMMIT = commit_hash.stdout[0 .. commit_hash.stdout.len - 1], // omit newline - .BUILD_COMPILER = builder.fmt("Zig {s}", .{zig_version}), - .BUILD_TARGET = try target.allocDescription(builder.allocator), - }, - ); + try std.fs.cwd().writeFile("common/build-info.cpp", builder.fmt( + \\int LLAMA_BUILD_NUMBER = {}; + \\char const *LLAMA_COMMIT = "{s}"; + \\char const *LLAMA_COMPILER = "Zig {s}"; + \\char const *LLAMA_BUILD_TARGET = "{s}"; + \\ + , .{ 0, commit_hash.stdout[0 .. commit_hash.stdout.len - 1], zig_version, try target.allocDescription(builder.allocator) })); var m = Maker{ .builder = builder, .target = target, .optimize = builder.standardOptimizeOption(.{}), - .config_header = config_header, .enable_lto = false, .include_dirs = ArrayList([]const u8).init(builder.allocator), .cflags = ArrayList([]const u8).init(builder.allocator), .cxxflags = ArrayList([]const u8).init(builder.allocator), .objs = ArrayList(*Compile).init(builder.allocator), }; + try m.addCFlag("-std=c11"); try m.addCxxFlag("-std=c++11"); try m.addProjectInclude(&.{}); @@ -72,7 +69,7 @@ const Maker = struct { const o = m.builder.addObject(.{ .name = name, .target = m.target, .optimize = m.optimize }); if (o.target.getAbi() != .msvc) o.defineCMacro("_GNU_SOURCE", null); - o.addConfigHeader(m.config_header); + if (std.mem.endsWith(u8, src, ".c")) { o.addCSourceFiles(&.{src}, m.cflags.items); o.linkLibC(); @@ -85,7 +82,6 @@ const Maker = struct { o.linkLibCpp(); } } - o.addConfigHeader(m.config_header); for (m.include_dirs.items) |i| o.addIncludePath(.{ .path = i }); o.want_lto = m.enable_lto; return o; @@ -105,7 +101,6 @@ const Maker = struct { // linkLibCpp already add (libc++ + libunwind + libc) e.linkLibCpp(); } - e.addConfigHeader(m.config_header); m.builder.installArtifact(e); e.want_lto = m.enable_lto; return e; @@ -121,6 +116,7 @@ pub fn build(b: *std.build.Builder) !void { const ggml_backend = make.obj("ggml-backend", "ggml-backend.c"); const ggml_quants = make.obj("ggml-quants", "ggml-quants.c"); const llama = make.obj("llama", "llama.cpp"); + const buildinfo = make.obj("common", "common/build-info.cpp"); const common = make.obj("common", "common/common.cpp"); const console = make.obj("console", "common/console.cpp"); const sampling = make.obj("sampling", "common/sampling.cpp"); @@ -128,14 +124,14 @@ pub fn build(b: *std.build.Builder) !void { const train = make.obj("train", "common/train.cpp"); const clip = make.obj("clip", "examples/llava/clip.cpp"); - _ = make.exe("main", "examples/main/main.cpp", &.{ ggml, ggml_alloc, ggml_backend, ggml_quants, llama, common, sampling, console, grammar_parser }); - _ = make.exe("quantize", "examples/quantize/quantize.cpp", &.{ ggml, ggml_alloc, ggml_backend, ggml_quants, llama, common }); - _ = make.exe("perplexity", "examples/perplexity/perplexity.cpp", &.{ ggml, ggml_alloc, ggml_backend, ggml_quants, llama, common }); - _ = make.exe("embedding", "examples/embedding/embedding.cpp", &.{ ggml, ggml_alloc, ggml_backend, ggml_quants, llama, common }); - _ = make.exe("finetune", "examples/finetune/finetune.cpp", &.{ ggml, ggml_alloc, ggml_backend, ggml_quants, llama, common, train }); - _ = make.exe("train-text-from-scratch", "examples/train-text-from-scratch/train-text-from-scratch.cpp", &.{ ggml, ggml_alloc, ggml_backend, ggml_quants, llama, common, train }); + _ = make.exe("main", "examples/main/main.cpp", &.{ ggml, ggml_alloc, ggml_backend, ggml_quants, llama, common, buildinfo, sampling, console, grammar_parser }); + _ = make.exe("quantize", "examples/quantize/quantize.cpp", &.{ ggml, ggml_alloc, ggml_backend, ggml_quants, llama, common, buildinfo }); + _ = make.exe("perplexity", "examples/perplexity/perplexity.cpp", &.{ ggml, ggml_alloc, ggml_backend, ggml_quants, llama, common, buildinfo }); + _ = make.exe("embedding", "examples/embedding/embedding.cpp", &.{ ggml, ggml_alloc, ggml_backend, ggml_quants, llama, common, buildinfo }); + _ = make.exe("finetune", "examples/finetune/finetune.cpp", &.{ ggml, ggml_alloc, ggml_backend, ggml_quants, llama, common, buildinfo, train }); + _ = make.exe("train-text-from-scratch", "examples/train-text-from-scratch/train-text-from-scratch.cpp", &.{ ggml, ggml_alloc, ggml_backend, ggml_quants, llama, common, buildinfo, train }); - const server = make.exe("server", "examples/server/server.cpp", &.{ ggml, ggml_alloc, ggml_backend, ggml_quants, llama, common, sampling, grammar_parser, clip }); + const server = make.exe("server", "examples/server/server.cpp", &.{ ggml, ggml_alloc, ggml_backend, ggml_quants, llama, common, buildinfo, sampling, grammar_parser, clip }); if (server.target.isWindows()) { server.linkSystemLibrary("ws2_32"); } diff --git a/common/CMakeLists.txt b/common/CMakeLists.txt index fbb0ff0952ac7a..0150114e3bd2ce 100644 --- a/common/CMakeLists.txt +++ b/common/CMakeLists.txt @@ -1,8 +1,46 @@ # common + +# Build info header +# + +if(EXISTS "${CMAKE_CURRENT_SOURCE_DIR}/../.git") + set(GIT_DIR "${CMAKE_CURRENT_SOURCE_DIR}/../.git") + + # Is git submodule + if(NOT IS_DIRECTORY "${GIT_DIR}") + file(READ ${GIT_DIR} REAL_GIT_DIR_LINK) + string(REGEX REPLACE "gitdir: (.*)\n$" "\\1" REAL_GIT_DIR ${REAL_GIT_DIR_LINK}) + set(GIT_DIR "${CMAKE_CURRENT_SOURCE_DIR}/${REAL_GIT_DIR}") + endif() + + set(GIT_INDEX "${GIT_DIR}/index") +else() + message(WARNING "Git repository not found; to enable automatic generation of build info, make sure Git is installed and the project is a Git repository.") + set(GIT_INDEX "") +endif() + +# Add a custom command to rebuild build-info.cpp when .git/index changes +add_custom_command( + OUTPUT "${CMAKE_CURRENT_SOURCE_DIR}/build-info.cpp" + COMMENT "Generating build details from Git" + COMMAND ${CMAKE_COMMAND} -DMSVC=${MSVC} -DCMAKE_C_COMPILER_VERSION=${CMAKE_C_COMPILER_VERSION} + -DCMAKE_C_COMPILER_ID=${CMAKE_C_COMPILER_ID} -DCMAKE_VS_PLATFORM_NAME=${CMAKE_VS_PLATFORM_NAME} + -DCMAKE_C_COMPILER=${CMAKE_C_COMPILER} -P "${CMAKE_CURRENT_SOURCE_DIR}/../scripts/build-info.cmake" + WORKING_DIRECTORY "${CMAKE_CURRENT_SOURCE_DIR}/.." + DEPENDS "${CMAKE_CURRENT_SOURCE_DIR}/build-info.cpp.in" ${GIT_INDEX} + VERBATIM +) +set(TARGET build_info) +add_library(${TARGET} OBJECT build-info.cpp) +if (BUILD_SHARED_LIBS) + set_target_properties(${TARGET} PROPERTIES POSITION_INDEPENDENT_CODE ON) +endif() + + set(TARGET common) -add_library(${TARGET} OBJECT +add_library(${TARGET} STATIC common.h common.cpp sampling.h @@ -21,4 +59,4 @@ endif() target_include_directories(${TARGET} PUBLIC .) target_compile_features(${TARGET} PUBLIC cxx_std_11) -target_link_libraries(${TARGET} PRIVATE llama) +target_link_libraries(${TARGET} PRIVATE llama build_info) diff --git a/common/build-info.cpp.in b/common/build-info.cpp.in new file mode 100644 index 00000000000000..0b945aa68fff3e --- /dev/null +++ b/common/build-info.cpp.in @@ -0,0 +1,4 @@ +int LLAMA_BUILD_NUMBER = @BUILD_NUMBER@; +char const *LLAMA_COMMIT = "@BUILD_COMMIT@"; +char const *LLAMA_COMPILER = "@BUILD_COMPILER@"; +char const *LLAMA_BUILD_TARGET = "@BUILD_TARGET@"; diff --git a/common/common.cpp b/common/common.cpp index b182ffaaef48ec..e938dee165d9da 100644 --- a/common/common.cpp +++ b/common/common.cpp @@ -1,5 +1,4 @@ #include "common.h" -#include "build-info.h" #include "llama.h" #include @@ -1199,8 +1198,8 @@ void dump_non_result_info_yaml(FILE * stream, const gpt_params & params, const l const std::string & timestamp, const std::vector & prompt_tokens, const char * model_desc) { const llama_sampling_params & sparams = params.sparams; - fprintf(stream, "build_commit: %s\n", BUILD_COMMIT); - fprintf(stream, "build_number: %d\n", BUILD_NUMBER); + fprintf(stream, "build_commit: %s\n", LLAMA_COMMIT); + fprintf(stream, "build_number: %d\n", LLAMA_BUILD_NUMBER); fprintf(stream, "cpu_has_arm_fma: %s\n", ggml_cpu_has_arm_fma() ? "true" : "false"); fprintf(stream, "cpu_has_avx: %s\n", ggml_cpu_has_avx() ? "true" : "false"); fprintf(stream, "cpu_has_avx2: %s\n", ggml_cpu_has_avx2() ? "true" : "false"); diff --git a/common/common.h b/common/common.h index 7be69f925bc2be..72a49b8901f26e 100644 --- a/common/common.h +++ b/common/common.h @@ -26,11 +26,17 @@ #define die(msg) do { fputs("error: " msg "\n", stderr); exit(1); } while (0) #define die_fmt(fmt, ...) do { fprintf(stderr, "error: " fmt "\n", __VA_ARGS__); exit(1); } while (0) -#define print_build_info() do { \ - fprintf(stderr, "%s: build = %d (%s)\n", __func__, BUILD_NUMBER, BUILD_COMMIT); \ - fprintf(stderr, "%s: built with %s for %s\n", __func__, BUILD_COMPILER, BUILD_TARGET); \ +#define print_build_info() do { \ + fprintf(stderr, "%s: build = %d (%s)\n", __func__, LLAMA_BUILD_NUMBER, LLAMA_COMMIT); \ + fprintf(stderr, "%s: built with %s for %s\n", __func__, LLAMA_COMPILER, LLAMA_BUILD_TARGET); \ } while(0) +// build info +extern int LLAMA_BUILD_NUMBER; +extern char const *LLAMA_COMMIT; +extern char const *LLAMA_COMPILER; +extern char const *LLAMA_BUILD_TARGET; + // // CLI argument parsing // diff --git a/examples/benchmark/CMakeLists.txt b/examples/benchmark/CMakeLists.txt index 14916d83134633..2bb47bab5a868b 100644 --- a/examples/benchmark/CMakeLists.txt +++ b/examples/benchmark/CMakeLists.txt @@ -1,9 +1,6 @@ set(TARGET benchmark) add_executable(${TARGET} benchmark-matmult.cpp) install(TARGETS ${TARGET} RUNTIME) -target_link_libraries(${TARGET} PRIVATE llama ${CMAKE_THREAD_LIBS_INIT}) +target_link_libraries(${TARGET} PRIVATE llama build_info ${CMAKE_THREAD_LIBS_INIT}) target_include_directories(${TARGET} PRIVATE ../../common) target_compile_features(${TARGET} PRIVATE cxx_std_11) -if(TARGET BUILD_INFO) - add_dependencies(${TARGET} BUILD_INFO) -endif() diff --git a/examples/benchmark/benchmark-matmult.cpp b/examples/benchmark/benchmark-matmult.cpp index f1c382aa9b9557..76e3f57ccce8e0 100644 --- a/examples/benchmark/benchmark-matmult.cpp +++ b/examples/benchmark/benchmark-matmult.cpp @@ -1,4 +1,3 @@ -#include "build-info.h" #include "common.h" #include "ggml.h" diff --git a/examples/embedding/CMakeLists.txt b/examples/embedding/CMakeLists.txt index 0c752c7bbb59f8..8ffc33868401f8 100644 --- a/examples/embedding/CMakeLists.txt +++ b/examples/embedding/CMakeLists.txt @@ -3,6 +3,3 @@ add_executable(${TARGET} embedding.cpp) install(TARGETS ${TARGET} RUNTIME) target_link_libraries(${TARGET} PRIVATE common llama ${CMAKE_THREAD_LIBS_INIT}) target_compile_features(${TARGET} PRIVATE cxx_std_11) -if(TARGET BUILD_INFO) - add_dependencies(${TARGET} BUILD_INFO) -endif() diff --git a/examples/embedding/embedding.cpp b/examples/embedding/embedding.cpp index 14075609ebfd91..3295cd2400ac3d 100644 --- a/examples/embedding/embedding.cpp +++ b/examples/embedding/embedding.cpp @@ -1,4 +1,3 @@ -#include "build-info.h" #include "common.h" #include "llama.h" diff --git a/examples/infill/CMakeLists.txt b/examples/infill/CMakeLists.txt index 57d01cb0b5a8fd..e4e8028da09da6 100644 --- a/examples/infill/CMakeLists.txt +++ b/examples/infill/CMakeLists.txt @@ -3,6 +3,3 @@ add_executable(${TARGET} infill.cpp) install(TARGETS ${TARGET} RUNTIME) target_link_libraries(${TARGET} PRIVATE common llama ${CMAKE_THREAD_LIBS_INIT}) target_compile_features(${TARGET} PRIVATE cxx_std_11) -if(TARGET BUILD_INFO) - add_dependencies(${TARGET} BUILD_INFO) -endif() diff --git a/examples/infill/infill.cpp b/examples/infill/infill.cpp index 9c52b7bbad1dba..62f5ce3c16a322 100644 --- a/examples/infill/infill.cpp +++ b/examples/infill/infill.cpp @@ -2,7 +2,6 @@ #include "console.h" #include "llama.h" -#include "build-info.h" #include "grammar-parser.h" #include @@ -184,8 +183,8 @@ int main(int argc, char ** argv) { LOG_TEE("%s: warning: scaling RoPE frequency by %g.\n", __func__, params.rope_freq_scale); } - LOG_TEE("%s: build = %d (%s)\n", __func__, BUILD_NUMBER, BUILD_COMMIT); - LOG_TEE("%s: built with %s for %s\n", __func__, BUILD_COMPILER, BUILD_TARGET); + LOG_TEE("%s: build = %d (%s)\n", __func__, LLAMA_BUILD_NUMBER, LLAMA_COMMIT); + LOG_TEE("%s: built with %s for %s\n", __func__, LLAMA_COMPILER, LLAMA_BUILD_TARGET); if (params.seed == LLAMA_DEFAULT_SEED) { params.seed = time(NULL); diff --git a/examples/llama-bench/CMakeLists.txt b/examples/llama-bench/CMakeLists.txt index 7e395afd05f755..5bdbea4e28187e 100644 --- a/examples/llama-bench/CMakeLists.txt +++ b/examples/llama-bench/CMakeLists.txt @@ -3,6 +3,3 @@ add_executable(${TARGET} llama-bench.cpp) install(TARGETS ${TARGET} RUNTIME) target_link_libraries(${TARGET} PRIVATE common llama ${CMAKE_THREAD_LIBS_INIT}) target_compile_features(${TARGET} PRIVATE cxx_std_11) -if(TARGET BUILD_INFO) - add_dependencies(${TARGET} BUILD_INFO) -endif() diff --git a/examples/llama-bench/llama-bench.cpp b/examples/llama-bench/llama-bench.cpp index 780398184d2215..9bd82d565834a9 100644 --- a/examples/llama-bench/llama-bench.cpp +++ b/examples/llama-bench/llama-bench.cpp @@ -19,7 +19,6 @@ #include "ggml.h" #include "llama.h" #include "common.h" -#include "build-info.h" #include "ggml-cuda.h" // utils @@ -641,8 +640,8 @@ struct test { } }; -const std::string test::build_commit = BUILD_COMMIT; -const int test::build_number = BUILD_NUMBER; +const std::string test::build_commit = LLAMA_COMMIT; +const int test::build_number = LLAMA_BUILD_NUMBER; const bool test::cuda = !!ggml_cpu_has_cublas(); const bool test::opencl = !!ggml_cpu_has_clblast(); const bool test::metal = !!ggml_cpu_has_metal(); diff --git a/examples/llava/CMakeLists.txt b/examples/llava/CMakeLists.txt index 2d7979ecd89527..03d32c26efaddd 100644 --- a/examples/llava/CMakeLists.txt +++ b/examples/llava/CMakeLists.txt @@ -5,9 +5,6 @@ target_link_libraries(${TARGET} PRIVATE common ggml ${CMAKE_THREAD_LIBS_INIT}) target_compile_features(${TARGET} PRIVATE cxx_std_11) if (NOT MSVC) target_compile_options(${TARGET} PRIVATE -Wno-cast-qual) # stb_image.h - endif() -if(TARGET BUILD_INFO) - add_dependencies(${TARGET} BUILD_INFO) endif() set(TARGET llava) @@ -15,6 +12,3 @@ add_executable(${TARGET} llava.cpp) install(TARGETS ${TARGET} RUNTIME) target_link_libraries(${TARGET} PRIVATE common llama clip ${CMAKE_THREAD_LIBS_INIT}) target_compile_features(${TARGET} PRIVATE cxx_std_11) -if(TARGET BUILD_INFO) - add_dependencies(${TARGET} BUILD_INFO) -endif() diff --git a/examples/main/CMakeLists.txt b/examples/main/CMakeLists.txt index cc188894804bab..d532980b76da83 100644 --- a/examples/main/CMakeLists.txt +++ b/examples/main/CMakeLists.txt @@ -3,6 +3,3 @@ add_executable(${TARGET} main.cpp) install(TARGETS ${TARGET} RUNTIME) target_link_libraries(${TARGET} PRIVATE common llama ${CMAKE_THREAD_LIBS_INIT}) target_compile_features(${TARGET} PRIVATE cxx_std_11) -if(TARGET BUILD_INFO) - add_dependencies(${TARGET} BUILD_INFO) -endif() diff --git a/examples/main/main.cpp b/examples/main/main.cpp index 8a43b6ab878a5f..8d985c82ac21a9 100644 --- a/examples/main/main.cpp +++ b/examples/main/main.cpp @@ -2,7 +2,6 @@ #include "console.h" #include "llama.h" -#include "build-info.h" #include #include @@ -153,8 +152,8 @@ int main(int argc, char ** argv) { LOG_TEE("%s: warning: scaling RoPE frequency by %g.\n", __func__, params.rope_freq_scale); } - LOG_TEE("%s: build = %d (%s)\n", __func__, BUILD_NUMBER, BUILD_COMMIT); - LOG_TEE("%s: built with %s for %s\n", __func__, BUILD_COMPILER, BUILD_TARGET); + LOG_TEE("%s: build = %d (%s)\n", __func__, LLAMA_BUILD_NUMBER, LLAMA_COMMIT); + LOG_TEE("%s: built with %s for %s\n", __func__, LLAMA_COMPILER, LLAMA_BUILD_TARGET); if (params.seed == LLAMA_DEFAULT_SEED) { params.seed = time(NULL); diff --git a/examples/parallel/CMakeLists.txt b/examples/parallel/CMakeLists.txt index 0bbf89eaefce6f..319535a6e9054d 100644 --- a/examples/parallel/CMakeLists.txt +++ b/examples/parallel/CMakeLists.txt @@ -3,6 +3,3 @@ add_executable(${TARGET} parallel.cpp) install(TARGETS ${TARGET} RUNTIME) target_link_libraries(${TARGET} PRIVATE common llama ${CMAKE_THREAD_LIBS_INIT}) target_compile_features(${TARGET} PRIVATE cxx_std_11) -if(TARGET BUILD_INFO) - add_dependencies(${TARGET} BUILD_INFO) -endif() diff --git a/examples/parallel/parallel.cpp b/examples/parallel/parallel.cpp index 9a0b9c183d1070..a78df305f415c3 100644 --- a/examples/parallel/parallel.cpp +++ b/examples/parallel/parallel.cpp @@ -1,8 +1,6 @@ // A basic application simulating a server with multiple clients. // The clients submite requests to the server and they are processed in parallel. -#include "build-info.h" - #include "common.h" #include "llama.h" diff --git a/examples/perplexity/CMakeLists.txt b/examples/perplexity/CMakeLists.txt index af00b4e1650162..3c76d3221416b7 100644 --- a/examples/perplexity/CMakeLists.txt +++ b/examples/perplexity/CMakeLists.txt @@ -3,6 +3,3 @@ add_executable(${TARGET} perplexity.cpp) install(TARGETS ${TARGET} RUNTIME) target_link_libraries(${TARGET} PRIVATE common llama ${CMAKE_THREAD_LIBS_INIT}) target_compile_features(${TARGET} PRIVATE cxx_std_11) -if(TARGET BUILD_INFO) - add_dependencies(${TARGET} BUILD_INFO) -endif() diff --git a/examples/perplexity/perplexity.cpp b/examples/perplexity/perplexity.cpp index bd2c73d87875fe..de60c5227f7c17 100644 --- a/examples/perplexity/perplexity.cpp +++ b/examples/perplexity/perplexity.cpp @@ -1,4 +1,3 @@ -#include "build-info.h" #include "common.h" #include "llama.h" diff --git a/examples/quantize-stats/CMakeLists.txt b/examples/quantize-stats/CMakeLists.txt index db182e2633f1fb..e31cf5e3809c1e 100644 --- a/examples/quantize-stats/CMakeLists.txt +++ b/examples/quantize-stats/CMakeLists.txt @@ -1,6 +1,6 @@ set(TARGET quantize-stats) add_executable(${TARGET} quantize-stats.cpp) install(TARGETS ${TARGET} RUNTIME) -target_link_libraries(${TARGET} PRIVATE llama ${CMAKE_THREAD_LIBS_INIT}) +target_link_libraries(${TARGET} PRIVATE llama build_info ${CMAKE_THREAD_LIBS_INIT}) target_include_directories(${TARGET} PRIVATE ../../common) target_compile_features(${TARGET} PRIVATE cxx_std_11) diff --git a/examples/quantize-stats/quantize-stats.cpp b/examples/quantize-stats/quantize-stats.cpp index dd76b1ceef134d..2712824774ae74 100644 --- a/examples/quantize-stats/quantize-stats.cpp +++ b/examples/quantize-stats/quantize-stats.cpp @@ -1,5 +1,4 @@ #define LLAMA_API_INTERNAL -#include "build-info.h" #include "common.h" #include "ggml.h" #include "llama.h" diff --git a/examples/quantize/CMakeLists.txt b/examples/quantize/CMakeLists.txt index 4a8eed544cb04a..6f374a2bd3b467 100644 --- a/examples/quantize/CMakeLists.txt +++ b/examples/quantize/CMakeLists.txt @@ -1,9 +1,6 @@ set(TARGET quantize) add_executable(${TARGET} quantize.cpp) install(TARGETS ${TARGET} RUNTIME) -target_link_libraries(${TARGET} PRIVATE llama ${CMAKE_THREAD_LIBS_INIT}) +target_link_libraries(${TARGET} PRIVATE llama build_info ${CMAKE_THREAD_LIBS_INIT}) target_include_directories(${TARGET} PRIVATE ../../common) target_compile_features(${TARGET} PRIVATE cxx_std_11) -if(TARGET BUILD_INFO) - add_dependencies(${TARGET} BUILD_INFO) -endif() diff --git a/examples/quantize/quantize.cpp b/examples/quantize/quantize.cpp index be0b2fe1eb963f..d27ea5e9132fdc 100644 --- a/examples/quantize/quantize.cpp +++ b/examples/quantize/quantize.cpp @@ -1,4 +1,3 @@ -#include "build-info.h" #include "common.h" #include "llama.h" diff --git a/examples/save-load-state/CMakeLists.txt b/examples/save-load-state/CMakeLists.txt index eadd13cdf7930a..cc6ed8554a6e34 100644 --- a/examples/save-load-state/CMakeLists.txt +++ b/examples/save-load-state/CMakeLists.txt @@ -3,6 +3,3 @@ add_executable(${TARGET} save-load-state.cpp) install(TARGETS ${TARGET} RUNTIME) target_link_libraries(${TARGET} PRIVATE common llama ${CMAKE_THREAD_LIBS_INIT}) target_compile_features(${TARGET} PRIVATE cxx_std_11) -if(TARGET BUILD_INFO) - add_dependencies(${TARGET} BUILD_INFO) -endif() diff --git a/examples/save-load-state/save-load-state.cpp b/examples/save-load-state/save-load-state.cpp index 38d05f4d328e7a..48d80111010dfa 100644 --- a/examples/save-load-state/save-load-state.cpp +++ b/examples/save-load-state/save-load-state.cpp @@ -1,4 +1,3 @@ -#include "build-info.h" #include "common.h" #include "llama.h" diff --git a/examples/server/CMakeLists.txt b/examples/server/CMakeLists.txt index a23ddcc550274f..1f0d26f7776894 100644 --- a/examples/server/CMakeLists.txt +++ b/examples/server/CMakeLists.txt @@ -11,6 +11,3 @@ if (WIN32) TARGET_LINK_LIBRARIES(${TARGET} PRIVATE ws2_32) endif() target_compile_features(${TARGET} PRIVATE cxx_std_11) -if(TARGET BUILD_INFO) - add_dependencies(${TARGET} BUILD_INFO) -endif() diff --git a/examples/server/server.cpp b/examples/server/server.cpp index 84b04d5a0493a6..fd755327a511db 100644 --- a/examples/server/server.cpp +++ b/examples/server/server.cpp @@ -1,6 +1,5 @@ #include "common.h" #include "llama.h" -#include "build-info.h" #include "grammar-parser.h" #include "../llava/clip.h" @@ -2264,8 +2263,8 @@ int main(int argc, char **argv) llama_backend_init(params.numa); - LOG_INFO("build info", {{"build", BUILD_NUMBER}, - {"commit", BUILD_COMMIT}}); + LOG_INFO("build info", {{"build", LLAMA_BUILD_NUMBER}, + {"commit", LLAMA_COMMIT}}); LOG_INFO("system info", { {"n_threads", params.n_threads}, diff --git a/examples/speculative/CMakeLists.txt b/examples/speculative/CMakeLists.txt index 6c5c9456e62344..810f3c46ac4aab 100644 --- a/examples/speculative/CMakeLists.txt +++ b/examples/speculative/CMakeLists.txt @@ -3,6 +3,3 @@ add_executable(${TARGET} speculative.cpp) install(TARGETS ${TARGET} RUNTIME) target_link_libraries(${TARGET} PRIVATE common llama ${CMAKE_THREAD_LIBS_INIT}) target_compile_features(${TARGET} PRIVATE cxx_std_11) -if(TARGET BUILD_INFO) - add_dependencies(${TARGET} BUILD_INFO) -endif() diff --git a/examples/speculative/speculative.cpp b/examples/speculative/speculative.cpp index 323c74652c9a69..798684f66678e2 100644 --- a/examples/speculative/speculative.cpp +++ b/examples/speculative/speculative.cpp @@ -1,5 +1,3 @@ -#include "build-info.h" - #include "common.h" #include "llama.h" diff --git a/scripts/build-info.cmake b/scripts/build-info.cmake index c86ab43790c753..73853dfa47f41e 100644 --- a/scripts/build-info.cmake +++ b/scripts/build-info.cmake @@ -1,5 +1,5 @@ -set(TEMPLATE_FILE "${CMAKE_CURRENT_SOURCE_DIR}/scripts/build-info.h.in") -set(HEADER_FILE "${CMAKE_CURRENT_SOURCE_DIR}/build-info.h") +set(TEMPLATE_FILE "${CMAKE_CURRENT_SOURCE_DIR}/common/build-info.cpp.in") +set(OUTPUT_FILE "${CMAKE_CURRENT_SOURCE_DIR}/common/build-info.cpp") set(BUILD_NUMBER 0) set(BUILD_COMMIT "unknown") set(BUILD_COMPILER "unknown") @@ -24,15 +24,21 @@ if(Git_FOUND) WORKING_DIRECTORY ${CMAKE_CURRENT_SOURCE_DIR} OUTPUT_VARIABLE HEAD OUTPUT_STRIP_TRAILING_WHITESPACE + RESULT_VARIABLE RES ) + if (RES EQUAL 0) + set(BUILD_COMMIT ${HEAD}) + endif() execute_process( COMMAND ${GIT_EXECUTABLE} rev-list --count HEAD WORKING_DIRECTORY ${CMAKE_CURRENT_SOURCE_DIR} OUTPUT_VARIABLE COUNT OUTPUT_STRIP_TRAILING_WHITESPACE + RESULT_VARIABLE RES ) - set(BUILD_COMMIT ${HEAD}) - set(BUILD_NUMBER ${COUNT}) + if (RES EQUAL 0) + set(BUILD_NUMBER ${COUNT}) + endif() endif() if(MSVC) @@ -53,22 +59,22 @@ else() set(BUILD_TARGET ${OUT}) endif() -# Only write the header if it's changed to prevent unnecessary recompilation -if(EXISTS ${HEADER_FILE}) - file(READ ${HEADER_FILE} CONTENTS) - string(REGEX MATCH "BUILD_COMMIT \"([^\"]*)\"" _ ${CONTENTS}) +# Only write the build info if it changed +if(EXISTS ${OUTPUT_FILE}) + file(READ ${OUTPUT_FILE} CONTENTS) + string(REGEX MATCH "LLAMA_COMMIT = \"([^\"]*)\";" _ ${CONTENTS}) set(OLD_COMMIT ${CMAKE_MATCH_1}) - string(REGEX MATCH "BUILD_COMPILER \"([^\"]*)\"" _ ${CONTENTS}) + string(REGEX MATCH "LLAMA_COMPILER = \"([^\"]*)\";" _ ${CONTENTS}) set(OLD_COMPILER ${CMAKE_MATCH_1}) - string(REGEX MATCH "BUILD_TARGET \"([^\"]*)\"" _ ${CONTENTS}) + string(REGEX MATCH "LLAMA_BUILD_TARGET = \"([^\"]*)\";" _ ${CONTENTS}) set(OLD_TARGET ${CMAKE_MATCH_1}) if ( NOT OLD_COMMIT STREQUAL BUILD_COMMIT OR NOT OLD_COMPILER STREQUAL BUILD_COMPILER OR NOT OLD_TARGET STREQUAL BUILD_TARGET ) - configure_file(${TEMPLATE_FILE} ${HEADER_FILE}) + configure_file(${TEMPLATE_FILE} ${OUTPUT_FILE}) endif() else() - configure_file(${TEMPLATE_FILE} ${HEADER_FILE}) + configure_file(${TEMPLATE_FILE} ${OUTPUT_FILE}) endif() diff --git a/scripts/build-info.h.in b/scripts/build-info.h.in deleted file mode 100644 index e996faef039748..00000000000000 --- a/scripts/build-info.h.in +++ /dev/null @@ -1,9 +0,0 @@ -#ifndef BUILD_INFO_H -#define BUILD_INFO_H - -#define BUILD_NUMBER @BUILD_NUMBER@ -#define BUILD_COMMIT "@BUILD_COMMIT@" -#define BUILD_COMPILER "@BUILD_COMPILER@" -#define BUILD_TARGET "@BUILD_TARGET@" - -#endif // BUILD_INFO_H diff --git a/scripts/build-info.sh b/scripts/build-info.sh index 3c8b1fb850eef0..32682afbdbd520 100755 --- a/scripts/build-info.sh +++ b/scripts/build-info.sh @@ -24,12 +24,7 @@ if out=$($CC -dumpmachine); then build_target=$out fi -echo "#ifndef BUILD_INFO_H" -echo "#define BUILD_INFO_H" -echo -echo "#define BUILD_NUMBER $build_number" -echo "#define BUILD_COMMIT \"$build_commit\"" -echo "#define BUILD_COMPILER \"$build_compiler\"" -echo "#define BUILD_TARGET \"$build_target\"" -echo -echo "#endif // BUILD_INFO_H" +echo "int LLAMA_BUILD_NUMBER = ${build_number};" +echo "char const *LLAMA_COMMIT = \"${build_commit}\";" +echo "char const *LLAMA_COMPILER = \"${build_compiler}\";" +echo "char const *LLAMA_BUILD_TARGET = \"${build_target}\";" From 1efae9b7dca2a5cc5aa21c1997b538022964ea19 Mon Sep 17 00:00:00 2001 From: Georgi Gerganov Date: Thu, 2 Nov 2023 09:54:18 +0200 Subject: [PATCH 047/206] llm : prevent from 1-D tensors being GPU split (#3697) --- llama.cpp | 28 +++++++++++++++++----------- 1 file changed, 17 insertions(+), 11 deletions(-) diff --git a/llama.cpp b/llama.cpp index 32d7d23de6afb1..bb60044b4707f7 100644 --- a/llama.cpp +++ b/llama.cpp @@ -1837,6 +1837,12 @@ struct llama_model_loader { throw std::runtime_error(format("%s: tensor '%s' not found", __func__, name.c_str())); } + if (backend == GGML_BACKEND_GPU_SPLIT) { + if (ne.size() == 1) { + throw std::runtime_error(format("%s: 1-dimensional tensor '%s' cannot be split on the GPU", __func__, name.c_str())); + } + } + { bool is_ok = true; for (size_t i = 0; i < ne.size(); ++i) { @@ -2817,8 +2823,8 @@ static void llm_load_tensors( layer.ffn_down = ml.create_tensor(ctx, tn(LLM_TENSOR_FFN_DOWN, "weight", i), {n_ff, n_embd}, backend_split); layer.ffn_down_b = ml.create_tensor(ctx, tn(LLM_TENSOR_FFN_DOWN, "bias", i), {n_embd}, backend); - layer.ffn_up = ml.create_tensor(ctx, tn(LLM_TENSOR_FFN_UP, "weight", i), {n_embd, n_ff}, backend_split); - layer.ffn_up_b = ml.create_tensor(ctx, tn(LLM_TENSOR_FFN_UP, "bias", i), {n_ff}, backend); + layer.ffn_up = ml.create_tensor(ctx, tn(LLM_TENSOR_FFN_UP, "weight", i), {n_embd, n_ff}, backend_split); + layer.ffn_up_b = ml.create_tensor(ctx, tn(LLM_TENSOR_FFN_UP, "bias", i), {n_ff}, backend); if (backend == GGML_BACKEND_GPU) { vram_weights += @@ -2877,13 +2883,13 @@ static void llm_load_tensors( layer.attn_norm = ml.create_tensor(ctx, tn(LLM_TENSOR_ATTN_NORM, "weight", i), {n_embd}, backend); layer.attn_norm_b = ml.create_tensor(ctx, tn(LLM_TENSOR_ATTN_NORM, "bias", i), {n_embd}, backend); layer.wqkv = ml.create_tensor(ctx, tn(LLM_TENSOR_ATTN_QKV, "weight", i), {n_embd, n_embd + 2*n_embd_gqa}, backend_split); - layer.bqkv = ml.create_tensor(ctx, tn(LLM_TENSOR_ATTN_QKV, "bias", i), {n_embd + 2*n_embd_gqa}, backend_split); + layer.bqkv = ml.create_tensor(ctx, tn(LLM_TENSOR_ATTN_QKV, "bias", i), {n_embd + 2*n_embd_gqa}, backend); layer.wo = ml.create_tensor(ctx, tn(LLM_TENSOR_ATTN_OUT, "weight", i), {n_embd, n_embd}, backend_split); - layer.bo = ml.create_tensor(ctx, tn(LLM_TENSOR_ATTN_OUT, "bias", i), {n_embd}, backend_split); + layer.bo = ml.create_tensor(ctx, tn(LLM_TENSOR_ATTN_OUT, "bias", i), {n_embd}, backend); layer.ffn_down = ml.create_tensor(ctx, tn(LLM_TENSOR_FFN_DOWN, "weight", i), {n_ff, n_embd}, backend_split); - layer.ffn_down_b = ml.create_tensor(ctx, tn(LLM_TENSOR_FFN_DOWN, "bias", i), {n_embd}, backend_split); + layer.ffn_down_b = ml.create_tensor(ctx, tn(LLM_TENSOR_FFN_DOWN, "bias", i), {n_embd}, backend); layer.ffn_up = ml.create_tensor(ctx, tn(LLM_TENSOR_FFN_UP, "weight", i), {n_embd, n_ff}, backend_split); - layer.ffn_up_b = ml.create_tensor(ctx, tn(LLM_TENSOR_FFN_UP, "bias", i), {n_ff}, backend_split); + layer.ffn_up_b = ml.create_tensor(ctx, tn(LLM_TENSOR_FFN_UP, "bias", i), {n_ff}, backend); layer.ffn_norm = ml.create_tensor(ctx, tn(LLM_TENSOR_FFN_NORM, "weight", i), {n_embd}, backend); layer.ffn_norm_b = ml.create_tensor(ctx, tn(LLM_TENSOR_FFN_NORM, "bias", i), {n_embd}, backend); layer.attn_q_norm = ml.create_tensor(ctx, tn(LLM_TENSOR_ATTN_Q_NORM, "weight", i), {64}, backend); @@ -2949,19 +2955,19 @@ static void llm_load_tensors( layer.attn_norm_b = ml.create_tensor(ctx, tn(LLM_TENSOR_ATTN_NORM, "bias", i), {n_embd}, backend); layer.wqkv = ml.create_tensor(ctx, tn(LLM_TENSOR_ATTN_QKV, "weight", i), {n_embd, n_embd + 2*n_embd_gqa}, backend_split); - layer.bqkv = ml.create_tensor(ctx, tn(LLM_TENSOR_ATTN_QKV, "bias", i), {n_embd + 2*n_embd_gqa}, backend_split); + layer.bqkv = ml.create_tensor(ctx, tn(LLM_TENSOR_ATTN_QKV, "bias", i), {n_embd + 2*n_embd_gqa}, backend); layer.wo = ml.create_tensor(ctx, tn(LLM_TENSOR_ATTN_OUT, "weight", i), {n_embd, n_embd}, backend_split); - layer.bo = ml.create_tensor(ctx, tn(LLM_TENSOR_ATTN_OUT, "bias", i), {n_embd}, backend_split); + layer.bo = ml.create_tensor(ctx, tn(LLM_TENSOR_ATTN_OUT, "bias", i), {n_embd}, backend); layer.ffn_norm = ml.create_tensor(ctx, tn(LLM_TENSOR_FFN_NORM, "weight", i), {n_embd}, backend); layer.ffn_norm_b = ml.create_tensor(ctx, tn(LLM_TENSOR_FFN_NORM, "bias", i), {n_embd}, backend); layer.ffn_down = ml.create_tensor(ctx, tn(LLM_TENSOR_FFN_DOWN, "weight", i), {n_ff, n_embd}, backend_split); - layer.ffn_down_b = ml.create_tensor(ctx, tn(LLM_TENSOR_FFN_DOWN, "bias", i), {n_embd}, backend_split); + layer.ffn_down_b = ml.create_tensor(ctx, tn(LLM_TENSOR_FFN_DOWN, "bias", i), {n_embd}, backend); - layer.ffn_up = ml.create_tensor(ctx, tn(LLM_TENSOR_FFN_UP, "weight", i), {n_embd, n_ff}, backend_split); - layer.ffn_up_b = ml.create_tensor(ctx, tn(LLM_TENSOR_FFN_UP, "bias", i), {n_ff}, backend_split); + layer.ffn_up = ml.create_tensor(ctx, tn(LLM_TENSOR_FFN_UP, "weight", i), {n_embd, n_ff}, backend_split); + layer.ffn_up_b = ml.create_tensor(ctx, tn(LLM_TENSOR_FFN_UP, "bias", i), {n_ff}, backend); if (backend == GGML_BACKEND_GPU) { vram_weights += From 2756c4fbffab097736d5116007872d86456a544a Mon Sep 17 00:00:00 2001 From: Georgi Gerganov Date: Thu, 2 Nov 2023 11:20:21 +0200 Subject: [PATCH 048/206] gguf : remove special-case code for GGUFv1 (#3901) ggml-ci --- ggml.c | 58 +++-------------------------------- models/ggml-vocab-llama.gguf | Bin 595423 -> 723676 bytes 2 files changed, 5 insertions(+), 53 deletions(-) diff --git a/ggml.c b/ggml.c index 2c7fe476b176d5..d5a49d8e4f3148 100644 --- a/ggml.c +++ b/ggml.c @@ -18811,8 +18811,7 @@ static bool gguf_fread_el(FILE * file, void * dst, size_t size, size_t * offset) return n == size; } -// NOTE: temporary handling of GGUFv1 >> remove after Oct 2023 -static bool gguf_fread_str_cur(FILE * file, struct gguf_str * p, size_t * offset) { +static bool gguf_fread_str(FILE * file, struct gguf_str * p, size_t * offset) { p->n = 0; p->data = NULL; @@ -18824,19 +18823,6 @@ static bool gguf_fread_str_cur(FILE * file, struct gguf_str * p, size_t * offset return ok; } -static bool gguf_fread_str_v1(FILE * file, struct gguf_str * p, size_t * offset) { - p->n = 0; - p->data = NULL; - - bool ok = true; - - uint32_t n = 0; - ok = ok && gguf_fread_el(file, &n, sizeof(n), offset); p->data = calloc(n + 1, 1); p->n = n; - ok = ok && gguf_fread_el(file, p->data, p->n, offset); - - return ok; -} - struct gguf_context * gguf_init_empty(void) { struct gguf_context * ctx = GGML_ALIGNED_MALLOC(sizeof(struct gguf_context)); @@ -18895,21 +18881,8 @@ struct gguf_context * gguf_init_from_file(const char * fname, struct gguf_init_p ctx->data = NULL; ok = ok && gguf_fread_el(file, &ctx->header.version, sizeof(ctx->header.version), &offset); - - if (ctx->header.version == 1) { - // NOTE: temporary handling of GGUFv1 >> remove after Oct 2023 - uint32_t n_tensors = 0; - uint32_t n_kv = 0; - - ok = ok && gguf_fread_el(file, &n_tensors, sizeof(n_tensors), &offset); - ok = ok && gguf_fread_el(file, &n_kv, sizeof(n_kv), &offset); - - ctx->header.n_tensors = n_tensors; - ctx->header.n_kv = n_kv; - } else { - ok = ok && gguf_fread_el(file, &ctx->header.n_tensors, sizeof(ctx->header.n_tensors), &offset); - ok = ok && gguf_fread_el(file, &ctx->header.n_kv, sizeof(ctx->header.n_kv), &offset); - } + ok = ok && gguf_fread_el(file, &ctx->header.n_tensors, sizeof(ctx->header.n_tensors), &offset); + ok = ok && gguf_fread_el(file, &ctx->header.n_kv, sizeof(ctx->header.n_kv), &offset); if (!ok) { fprintf(stderr, "%s: failed to read header\n", __func__); @@ -18919,12 +18892,6 @@ struct gguf_context * gguf_init_from_file(const char * fname, struct gguf_init_p } } - // NOTE: temporary handling of GGUFv1 >> remove after Oct 2023 - bool (* gguf_fread_str)(FILE *, struct gguf_str *, size_t *) = gguf_fread_str_cur; - if (ctx->header.version == 1) { - gguf_fread_str = gguf_fread_str_v1; - } - // read the kv pairs { ctx->kv = malloc(ctx->header.n_kv * sizeof(struct gguf_kv)); @@ -18955,15 +18922,7 @@ struct gguf_context * gguf_init_from_file(const char * fname, struct gguf_init_p case GGUF_TYPE_ARRAY: { ok = ok && gguf_fread_el(file, &kv->value.arr.type, sizeof(kv->value.arr.type), &offset); - - if (ctx->header.version == 1) { - // NOTE: temporary handling of GGUFv1 >> remove after Oct 2023 - uint32_t n = 0; - ok = ok && gguf_fread_el(file, &n, sizeof(n), &offset); - kv->value.arr.n = n; - } else { - ok = ok && gguf_fread_el(file, &kv->value.arr.n, sizeof(kv->value.arr.n), &offset); - } + ok = ok && gguf_fread_el(file, &kv->value.arr.n, sizeof(kv->value.arr.n), &offset); switch (kv->value.arr.type) { case GGUF_TYPE_UINT8: @@ -19022,14 +18981,7 @@ struct gguf_context * gguf_init_from_file(const char * fname, struct gguf_init_p ok = ok && gguf_fread_str(file, &info->name, &offset); ok = ok && gguf_fread_el (file, &info->n_dims, sizeof(info->n_dims), &offset); for (uint32_t j = 0; j < info->n_dims; ++j) { - if (ctx->header.version == 1) { - // NOTE: temporary handling of GGUFv1 >> remove after Oct 2023 - uint32_t t = 0; - ok = ok && gguf_fread_el(file, &t, sizeof(t), &offset); - info->ne[j] = t; - } else { - ok = ok && gguf_fread_el(file, &info->ne[j], sizeof(info->ne[j]), &offset); - } + ok = ok && gguf_fread_el(file, &info->ne[j], sizeof(info->ne[j]), &offset); } ok = ok && gguf_fread_el (file, &info->type, sizeof(info->type), &offset); ok = ok && gguf_fread_el (file, &info->offset, sizeof(info->offset), &offset); diff --git a/models/ggml-vocab-llama.gguf b/models/ggml-vocab-llama.gguf index 63bfaf672f382c0f5bbcffe54736e2698ef3ac55..549eed8c53f438a61f1b00c9bd3b7d02325f2479 100644 GIT binary patch literal 723676 zcma&P`*UR1ap$Sn_e(o78#^2Oh~2PTy=H?Vo96o=l6K+M4GQRn05sb+Nl~||E>O2o zRkv6VpwVKREyR-=F{ggK}7oi~i+e-04-*vNN5H%m0Gk z{?qf{`~6~2{5SVKJS+yyL-wHk+uSU+TEB4gm=D^#bh%Rxr{(SG>-}J`7imgS*W{%Z%=36p2@yFn|*6#-U45hTjtzeotihJ(1z}M26oJ8GcV>_&t%~_e6%@6B&L_WcWRi z;rB#_-xC>rPh|K#nc??jhToGJeotojJ(=P6WQO098GcV@_&u57_hg3OlNo+bX81js z;rC>Q-;)`BPiFW%mErePhTl^eeotliJ(c12REFPE8GcV?_&t^3_f&@8QyG3wW%xan z;rCRA-%}ZWPi6Q$o#FR%hTqc}eotrkJ)PnAbcWy48GcV^_&uHB_jHEe(;0qGXZSsx z;rDcg-_sd>PiOc&li~MFhTk(8e$Qn1J(J=0Oorbx8Gg@X_&t;1_e_T0GZ}u*WcWRk z;rC32-!mD0&t&*Lo8k9thTpRpe$Qt3J)7b8Y=+;n8Gg@Z_&uB9_iTpWvl)KRX81ju z;rDEY-?JHh&t~|wGW=Q@eyt3@R)$|I!>^U$*UIo~W%#u+{8|})tqi|bhF>egua)7~ z%J6Gt_$_DnEob;GXZS5=_$_DnEob;GXZS5=_$_DnEob;GXZS5=_$_DnEob;GXZS5= z_^o94tz`JEWcaOQ_^o94tz`JEWcaOQ_^o94tz`JEWcaOQ_^o94tz`JEWcaOQ_^oF6 zt!DVGX85gU_^oF6t!DVGX85gU_^oF6t!DVGX85gU_^oF6t!DVGX85gU_^oC5t!4PF zW%#XS_^oC5t!4PFW%#XS_^oC5t!4PFW%#XS_^oC5t!4PFW%#XS_^oI7t!MbHXZWpW z_^oI7t!MbHXZWpW_^oI7t!MbHXZWpW_^oI7t!MbHXZWor{C@VGukxQ;hJ2j=q#K!ix)TiujBfEJm&>n zK>h4bR(*l8-uFLugZ>?b?2`Xp41{7UGDKGwX;&Va$>)}QepNn~!UDxm7iiBXu9Y}P z{?{s&wGJjMg19q9A(pRE}&(>ZG3e06f(WH2WtG&QTI)MeNOKMtj{Fb?lgNQ6^zN8YvMb@@1lX zd^D~Fsbi?%@Qf6O!!$z(+EFaY(ii-te*O0niB#BSiyTrNN4X&S6?tf-4%tu@K6#M| z^nM}lz9|Tnain%^L+CHJ1JJY@Z5ifP{2oX3OhJ6fL8&@D8Mmva*LC?JNuuh=U#-W9 zrN+ubGX?c?pObZ&_L6Wr)P2`e8ax~`%=9GR>*<0IYn^f!998x880v=<=u|%HSWe?% z{Qk%zPO^IBge(uL;eVC*fVDI!_RHxn${GLO&rZ&^4eL{!ef8{X4p=b>o~-UTG)tHL zQ+?ngFxC8T`4>nB8#;5&P3{;tE*F30|{==9=a z#wC04_gO!r7g*mF3fz$CvqRvHk2#x6h1Gk-QQ*|CIzAF~{ZJOGBVs+^>?E17^pH%u zA*y4Xwq?IBv+l#(G1aH@_k7u=y0KsJCyI7IV5~E(Fj0zFp+8<7^kT)TMU2k20)%=% zP%3g|W*`VmRKt8;erT!0IQyzow3vp-tg=z3LaQK3Jr1H&(>M}{kYX%fs!-==v86{72bzeoe}to)U10SB!5 z>ID^C^H2TH`$>QlflDW(BcXp~!G0yPj?3V;GKu+~SQgVV)~qVW7yq5Y1j^j^pOCp|%a>%Lv!Zd7UB2urH$t^?K&nGQ4*?@QW0@EJ z3LN{Pe@33f@i$2eKmW=6Lm#!DHeq?}3tkRZQh*%FxpM5sGUnT9PgLoX9 zqB4f`ITNGIKbU{w;~$i&$Mu7mA>~nKjC{%87#Dh4%zx-;m%~oL$J@uqT?d!1rqDPf zRcLrfuHc1}%u@IzAv;>De49;TUmy%D@525uMQdn+N9H_68Fi>h?r=cQzLwUfUmpu& ztf&zQW#Z*zQrENC2GuYO#E}u&$7i8KcO%g!B*H8?oXR-WW(WQnR(8-rHd}8RVASd1 z`A^QiC4dh@GuHp1lM80AIE~!;}cma1fo;0 z9EV0wCC}Pq8vdHPEib+?ixI0rJrVUxxpmZ+cZOy7XxMxHxeTs_{h6zHTO^21V@qaixX{%rO*R>6kv?w8o1f$c}1x zZZ;u(&sV2x3l`?)@d4^%Rca=pLUc;iIxdE(jKSzrPf5Rb6Hr%+5 zgSRX~0yM7AQ2LA5x=19a%&d()=@1s_3}BjxI?HZ`we>Ln9DqsK3F+W34ni|VE_|5A zR?+vfxBj{RP$sr5IXUq;8oM+*|%C5>IA0 z&OMi{5aq{l7!5+7d;SptW+!L~WaUK0tcJ;+XvZ+c9od?n``qVbsQC{PaBAwd;^6P6 z@X-8+>EI{r)Zd&WeGqZ;sepqd3in|$p6BnS^Bn{pkr%p{%mQ6T~a3p3A#R8aTFs?>!F;9 zlb#*kPO#EkhXR&qTk^H&PgL|y!X!fKDrmo6hdq1xzUsk;v2>S~z95q4H)RsE(>T5- z6FG;!G`r7soaphxiJ=r+jU8VPGu*7{iE1h1{`zzbu>{>c@iyNHpo?xe4`xX{KS&OWc9ZmeuG6EaWk>cNveM_9W#)Ef zN%pdU-EDsZ1b0;l>GVPpp$LzZKW^bp6qH9PW`?gM6pCs;9R=eu7WeL5A;T0;Krrhl zX*QpneN&#=uaYmY7$5p7RGp$pL_a6T{(U+JZa~3p{!iu0)qp06q?v63how$?uHWnq z|6yEoWSl~mv|A^S*h!otV^a7163fOSlmj8{a296IQ5j)QWOeB9HS$4T z=%z>|o`^cOI0J8}5;qy8*>$hOQu-Ad*8kgokY*t;pqa?GY8onBIZ4b>_PZjFMvdYi zl%4;X4AD)|Cd7G)XxIC~Bt{6AhYv@3J$S=C&M{Us;-9Nwwi+ozpcv$i&SME+cEX{= zLHDT-h5i^gR>^*xb$X(tVg@A9F#kBIGW9ruC)l$i<%OfL5YS8#a~`P_Ks`(rA$2Z7 z#LWn>eK~w4<8t5Gu`0t~y0#;foJ><>X1@x>dZ5zbs8&TCHc3)*T&Mg398_vD!d;be zYr9lAQdV)U-wk8yf-dD`ruI`kQXWapoT=K`)6DAL&aD3FxKOlV4ricJhn^m5{MLBL zDOe|ky`P~nNvRr{-djY3vYnU#zjSC)<%zHi-N}M!MJH+6C&7Q5KMvh&92;U% zWy7LL#zRSH-Hn*~%T;+Sw8axCT-8=;SbRX9#Hx~siZ(t8-z0rv`#$L)JGc6h0Dwpr zu}}@n-RN&&S_v4#VD9wkx#t9yxQb~V8Yiv_VTg8_^x)Hb%D3ZL zXfzyt0#qlI`UfABKhOWzXPfnlP%jF+bpr3W)YIf}SnCo@N`16fMI>#Q>lO1KCH6Bp~m;^AEo&Oi-L@u+bX z-m+d0=j^vK=SEA9lrnkZt$I(j?)keyQOe?+caZk}agu4m=z{n;<)z)E+KqAt{mCzY`o|=aP%YCEa7KH2Jtxh5KrK>@SSM(R%h( z4JXR#)X6#0Db+xl;`o*wNkWiX+29`Th*_jRo_%Bfq1X=%{^Efi7o+f85H)r(Arj_) zDs&LnGKRPFj%Xsh&@JQzQ9o)O^@XT#9H;!0SywmOdaW!x4jp43ZPV8ojp1F{K!uqd zPT{+6%8Cvez0ZHTu;iel*2Kq&daT%`@;VBn1Vpc2^MQ&m6gdBqW=&`?d0AE@!Z!`k zb4?CRJbyy6b{Qv_SQ`*_Efj&`mLT0!3Ohg>*JSBL<2cB)Pi!!MVLyB(ZI$Jdlqi^M z2eYBNE9O6T>Xj+-->)M=QS(_)o1~Cwk@P!IOWv--@F!e))yE!G%HRDeMYVVp_j~2+ zKyJ>r{iUiCw4Q%*_HBOzhK#rZPDWYdx^1CJr^|-!+vS;_fE}Xrvi%0dp|UM!wfs23 zSL;&)-M5?==*ANH9hNcgWeu3j-*;?IzZ+)&cFqS5{QE1#GAkU=uj{w!D>MtFuA4Ga94YN3#s6`MdrKfE@*Wb{mV|A7t8o9kS|J zT`?qE;QP=Av#4VV{ITm))z}YYE20NMrr?lDPT2ZbQARD$MOM3H%xNgZVTHqX8qYE> z!l(`}Vv+gq+ePxM^P6B&=(-t>geD`01IFA?_eV$dGxNg8+M#%d&})>E6jw+w-_140q~xsiNeRKkpOGE@c~bU6 z0YMH0FIU}^?K_Li*ju_CC-@13Gd6-Ar}u&$`!n5^MAI^ZECPgtw@o_m5toU{i15T-J`SxaPk+4`vgt51S$beN;R=GYPFoz! z!X&8C{=?~Chpw|f7TvnM-8gFm8)RWftYT0p)hzQ;hX2`He=0{Jia7E_*{MeggY(>; z>PwQAK^~>dy$%P;kofL$+hmetGuzg26Mxnq&g*{StbUzJJYIM~rb8DF|IM93sDfH` zpJ_?>%TY-#B~5tVSaont|Fv4pxPr#IabXLEpzMcPFjbcB^s+Rpk&+J;$LE?R&m3{! z*zpz;w156snX$#73aMRotB|krAD_ML42y)>4Q?dF@3_DA1$kor`_6(DCW1ebOswQ5 z>z22LG1xCzHpu*6_&5jSLc?s=Rg@p39MLysM2#G`ei>Pp^;mUdB(zW(FM}>aw+y3U-MCPBQ5q?IDO#F()h^Zff{Y6t(IBgI z(YvSj1Pi>S*|Xi0boFvly*p)-U?uw>xnMq6|4XBiG)yOJnT~@jxRUK+)bH@^p|lkT zu|om8I3PRU@h!sGUkTOZ81I=(IREEPH>lxM?o=s_ZbUaCe}uFY|5#;4_I9e197co` zD_Ql~$t(quuwP;uSq2BZBb%g)#^Cc5RUAVo!OpZGI!!*QRng~Q@16+z`QKPVu#8P3 zr33kWQle+YiAo?S1$a>xCbH`LnrPbet#T@#gaMB>BM@SF`XjT*bR^|VssHEx#6kF& zI3pbSx*JjejOBpG>+pd;RF31-qhuQ4k{93`-Pu?72rsg1)P<)F5#yjd5Wa~EOJ5Mu zO^T%D&;L4QJEPJ@*b7PXwgXmAV+*6(1iZyKbd!shea-{58QQVrb+TqoQmhdns$dZ~ zb&~c=ILqgUi)E#|ncK5lClAiMXKy(e166bmsv}v=@^;AfH$vvkf0$gXSTUORwH$D) zr1}a@)6kf9)?(IRAY3?(3JbmyX4+OuAX!QNb`;LcWq(X*hYXN1SPGKGLvWgI7iJZ< zaaxc&Cqmf!(ra*FIb~<5cIqT$CGO!Abm0c{PQM?$>YA-&?l^efF*>?0^&qA6Ou`p7n<#g5 zGF57#(ODBYK5#Kij*xOc)czxla81Wa(YbWd-z*2Jx(%>K{Fl>r6p(A>v9LL6kd&0r zz|Uz4>#1;(4M1ohB+gzh>=DTHE6#O|k`O7i`>MVZu@0M=06?|RkA2On{g5ZsB*K#I zIGV>rqy|=5IC7AT-g=w`_bq2N6+0elutkzg34{oA^M58^w)AvDfpS)}_>vH#nCZ%Q z$@laz$cIh*f>`qb_C)(rztzQugilULf%F$R4}&{Ll@AX!uf!v`8Z_85PO5r4BJal; z5sqhSDM&g%6IK@lS1Z-P+QiG_EWL2qf$3ZPNx<)jo9mV>Ze^q_)MP7b%+h`s)%# znbiFzr^u%P=lQ>sFZ&v|B?yfVFIXzow{nd5I|%d?E;w(iX_+Pk>W70*B*H2ud>`$u z2{DH0?IPO~gll}c3K0{pzZqWkoh+|%AZ4cb_&$8_KPcY?{Nq2_bjY=_Dh(q7Bo6-0 zb^r90ha5s;-{Jgeyz)+-jwO{i9hj*`wG#2WnA^!=Imk)Q)ww^pmt5|%j*N&|9ZU4L zKe;TwV0lI1ugJ7XB^7k$M0Qv)kWw7*^j;i`xtOhmm;s zAxDtsx0V&G}dI+DZ|6Abn_Mgx%1W2)zh>%vA9O zq~=A}+u{K}wB@JuPei3KS1ZisGZ5sRl!E*sWZ=8z^qtrSAk!x9;J?t0)zNS5_+*x= zp6P`fYQ$7IFO3qjF!qw5c9XB? zizquCigq4m!tbonDx67V70MnnzqERKQ_3Ne|DqFw&!Xv7)6Ao6YC#wcHlHsij2z1F z?y^&yObD5OpDLYr;YgIb-7Jp7&KFovF9m(mo#aPJfFZeDo*N*+{5jK6Qao&=AuZH- zU$j&wRV3o;8DQBq{_tJ|)u7g1Abu;9IdpbCVFyVUZPLyhG~WK%TmRBWz3>J3J65Kz zGfyqYPOJO6Z3e^&{4vwI^GVgZH>1**1=oep3Vkjz@<>*BSAIIe9U@;3>N?hM{)2R) zuk6V~gsMa7X_vYeE%i?xw30^I#gisQ(Lt<+wd@s?bkjuSWujrhhvu_$B2<0q#GRmp zW^WBFJmrs(h?`|x!O0n8y`0Rz-CZBe9 zJq%Td)ZTg|U}D3d*{a03FO)%Dl%lnVggjKVMs@fQ?uP55d_zXGWaW?r}8WdHPI-SG(VA32|OBi4d&|Mb2LI~G}%Wk^AQut+t;jjGXx`JV`S=4ye! zvfj-h>HxB<6e9da!Iq>ze8+izmQ+&+bVZ5VLkP*})sRwbAH6e7$Y(b`Mjr0GMUc8 z&+Nm>&$O>IZlv+}(Nyu4*H9^D1WD|055~oBLfL)H*TN4{m7TE~r&-ERL z>FgV4-x2Cz$~hK;VkBFYM0L&mJxYGRQBMy#>;~3a(BFr1}H=RM4608ERk>mK#b#Fu-Ocvhc9pcQ z(d1NZphT&Pa|1&16B%=uI?Fupnv-D~fy63R0C({1J0amH(~e}JsP;%bJ(GcMsaNWt zhmm6tqM{a(6=1fr_{cG7Au_}LOhf*LHeZA&W?Wn%+iLXdqA*g*cAxy5hcgvvU-8gW3)5S^~UYzWMxb|#v!B3 z`x*PHfy5W+juOUV_~B~n+;OT@M;MVZay85{H33A+sZxd^8zh{Pc2QAC=$bOUFvtjo z$01gY^>94>o)gbRffSzLC=A$4W%MsRHvdC^r%{DJ@->+xVLU=02{u$m#R16x(^^mA zmTDeXX0kB*_h(?t)V8+(luwP9z(1aTFT^5Ku`2#UyN-&Vakvh#QBn?XW0DKlgEDoC z#(WCRtwJjn(a?B8BhGuQd4ym1taBiotyMLUH`Ap_I!cv%y>44WvguO!cRig5EG`clX2yO*4i48&X~{=u6NuKN6c9WB;{Fse!N9xKb@ zyuT7|7`kgEbowC$tJpthznv1ah(b9yU)q#yNp6+@eJ56@lNDc1RNPV=(X2qq=Ov3U=IvzAT_qr90F+18U#6)7Fx1*UOEb2*E9sRm4$LB z+7NUa#**)5XN~0hWN^Rg74$V#{_IT{h>iaCTMTnJQdNL7W z&eYWiQ$AJSvB@3Yql)Ygp_u7rJ}`Of;Zs@*4+o*4Z_YpR_nZ7dd4K*dGQSZ=xw8pr zB~lGRK13+D{aQ&gDPytkuijYKZPlEA4hDEnLUZQ$Nx3@wau|D?NO=SVRO!Rhfi1MW z4iP^PJ|ImlYy5!s#{ob4os?*C_jTupwlrf(H>6OdmmeHBzhmE6x{trkw)XKWTqlAh zd)^-^4`PFYeywLD9`)D|QUtY6n=L<%k(Ql9*Arqg$l&y4%=sV4%tPrb{*5dXYr|Ul zgnrhJ5<^Cq%nsBuP49~!+!!TG=DK)E=*{_)k}!GD!LU7X_I3Zp_}j^a&bMy5ao~jq zeC*j2cfZq&+r4EP^BJn-u7(lfA>5#qXy`@qo~3ze-#JUTGxN!3DG!3nfoeB%w9uVp z5ZrxwPpLDM6w{LA499>mu7$9`ixNC!ZPOZOgP&7tx6ey_ghZ>^KuPMD!r=f*3(EGm zNOfL|ez?vZ^lym6qO!BdH`pL_cB{UXxw}P3oT_o(`h=(NC{xoL*D(NB%Mm*W;^Syw zrg-oBvHxiUol?{A@97W=e}ia=ix_xAD_KWNCDP2?SEHRPe*TdAB<)0Nf!2x>qeGBY z6^3e(rTP){^(XhFab1-PX_l70;8;zwu7dq4g7qA`>Ds5$dm$?jgP{#VF$*I2Y*zl~ zv=E8nRKSP6T!o#C5Qr# zZ!GWyw9}6r`hIy^B(gPTmaletI1YH7VG;jkt&ZoP$lfU;YpLoa6}BlKc+X#AV(bEW zY5ra3NZW9Y`^w=$2lbEm!JO`tyM{O``XoX5V4T1yqQ*SCQROomR+?l>3zyhYH{#Qs zh^|PnODpqM&=Jq^u%zdO-yf5>v4pN_b{O=Z48n@)}U| z7IJEa;~{8+yVkc|!*CzYKXTb|5cgKB?Ku`e(@qs+WK;ybN1=h?>H}P#fyC^PzVufT zZlz>iy|y?jL+fXnZW(3lxGux0y|fgUUm0_y>?77Fh4H>vhV5Go)gezyH#BpHjli7d z9#W*EXU@$sY_$tZmmSd;pMOLYd20?~iLXMRYpLz>MwVV;Wd}YAg{l6rotPFBd@+cA zHZswlnj8kVRjH^Mf#eTd$y(PTw;V}UK6XVrYnl8~9&fcCQo$Ic5dE&hVk(!l<9EjB z22&e5q)P5K6^OpKT2^-KFAT!>**MG*3BujYc7NB=s8hZsJ&c5`R658GXi57F7$?-o zr8ni3tt@$}%L%BP2JE2P6Wnb=TK*6M#z_Eie(9I| z@BQ*faK^z)5~+fka?uSCC)~rY3eJSeLX^>U|BPN29v2r$7~45v&Z96z0Z z zjx{22yy+UqqfT%>#wpSS8a2FN;Wdg69r*_$BGqK_3+Xnl`RL?WiH1t&KOw_`i33N9 zyi%tK;l2AVyD%3^I6!CNrKWrYG<^gTVg2LjJJY{^?gd@6z)1gB=BMjPa)VmIf;zk> z=?>M64Tp?HCSfQftcCQ0a^yV+7xNq;qaf6cJz>%=W+Z9KP9Pp#OUZ(hI%QGNYRK`~ z&AiP;c$=-EPTG}y%pzY$j#xFqtmxAWB3YxPST-#^{TEQ`E8~+A5eM8XbKeA?6ix3U zhn2o@4g~pQ>R)TvKF*0MzG*<9aYL&#b0x!5J<6#jC6k)f5rUn84|E+NFCOK6j=wUo~aZeke$}sJZQpFFJ;vM6V9b%<`*fW2SZx+!?E_zY^g5*S9p@i z9Ld4-#YyHFs!6kPVDX#znnAy&%|((_FYr&~@&prA-;6~lMwR$%9Dl!LB>Q~X^?1gD$saB2_8SlO{j1i?i$J)AJHnMY1@I#^C zM3z4g5RmQ=44;{A@11?qW$db+W|oxfKTM7V&xv{H{Fm$dqCqt}6{{i?BkV7bP`5$? zgYP9$wo_u~?0~8d$GhssNBB7YI=$nv5j$A*fMHf+d(%ppg&i;g%>D1vo(#zUak5&O zzYhxMP%9Ou@~$Q3$S{_(fxr?DE%8r)#S|wW)NfI&Gl)`EOYH(?~h& zkagof0M?s&@^5DH`4?Xh5XoTDK8twHe2}KCm3tf+kz>#Fe`Jd<7QQ*M>YOfg##Uyy zncsrD7HWgV8+-(Yr8AO$Ycj$<4n7Hmq8P`^xFteAHJEz$Wm*>Z=jY)y$jk!b`)V`n zma%glSkj!#MQO9ng&IPMic^PNk*tqc$2u+r+ok#+0-V8^_*jG$=&YEf>6vp;_^MBZ zeny!0WLH=OGF`oe8#|KZ(Zqd0CUr+od`ru60HKN_(#T$)7cIkD3i)GqzpNc+%O-Kl zVS@Vm<*{&*WgTK|`|o&b50cSCFxCg1yb+q}adxvHH5~p7DceFRZ14nAD_P8o?44M^ zS@L}X+$7zQOER+_Os)H_^5~|d2;QUI-ce|zz8+iG>PX*eWxa?@B@}?kg-n%AoT_D4 zL>@|A))z?YvuS_jBp9c|lNhWzNRe;2sANR!)J7EetD|06UQjOGLrWoy^9H)Jsu+J8 zcZ$urRU8_vWgoFfsoAK@NrO83$0^sxnoauNZ7IB+rh3td3W+go{lT?e|8zY(Nc|>B zLCDG3l#!R#30Z&W6@GROX84u7D_yOjJAMk`V{KKkE1e&C(SbRPf-T0g96DvkNlBTA zKY0?{4Q^568$DWJD)LJrvi6~EoL)j5N-ScPNQswSZ*jJ)7v^L{{+z#x>VbH6xR&(# zm6BRFe~dFpE_>)jDiVlt;^hU{mW}Fig5R%1J{03k&g=5+4yY6 zHBbBE@q2!~r?t;_fUd{kl=Zr|GJq6f=pc+7Rf=Ik=`jhmsB`sSX>AkB$h^0LgZR=- z@v>QV``hWiaa8b?C9gsKb9|fvP#Y^w2pok%Ftb%07P6$Z)DnS^d2tZ%LZNXHa5rfr zGUiQ=Fn`j#1xd!50_?L{x`@2QMTjawhPgs}9u% zsI07;Vj_PTXlLQ$#(ftDb1~DNgw_X3A*73}I;}B?W^zs=04xjE$pC=mG=ho6O#2P2 zB!$JYF3kEDx^#fuA#KVz1uwhSLXW5>T8Gi^uUl#Zdr97)<>AzTWEQk6_RvPBqN5zMX+!`yYhh;euRroyrouk!9;=oTW zZ+=1?;0LGG2cr`mQ$xbb)3{ zOm4DNKFC?aJX8V}YWhXm-vPsNx^RMvTAn&bW zVWI%;olvZ+vh$Bps;F@mk{#NCpdF z?PhA_ugmS-JBwOFAIkI)$CLMdBc$oVNR-7a`ms@VuV0gQieMiY+WD+#Hw{628iHyc z3!Zc;X$a*@SN6lGy0o+uFa028m(eyGeOA)$Ywzz%L-)yT&2OMpyBM>aEXZK}T{j7u`{3VYKY zP&|{P^!ovx#=-M5t1EM7kT9P;JoIF6y1X=hQcI z!#`jC*;~=A+}g*>3={*s?a(tJrnaoWW<=rn$r__-563|!I`DtUW+pV*599y!$iy-g zUWQ-M%64=43tXS$pJRTIOcdhCVGL5*oJ=vw7_;lQ8W(d?B-nU9F+r2mNA9&Zokn&% z2_g1UHjq6pxS8OJbIiI;`Uquv96Ncn-1qt5?bJG`DN5IUz*RfG!&a^evl36jc2!fL zQB{&(W+}?dXD30jT40sGgOyXS0S|+K}xu70~20xmZjP=QYo?MA+ z^*JM~n9Qf&*p*FY^}`t(ZiRGx@v|b6tTE`KyjzGSbu{Q0DeN^sRdRUn9(>Dkw`5K$ zBTF}tG&v$ERRK>x@W8eT87hh#gM&EPNyGGsK>P}QVO3^Mqa%AO1jmIJpLbs5jY}5= zu0i83@n5IbEzgl%IJ|wmSt|>S`&ZKDu-251_md4W8bKIewdz*v4^r8`Dsz*EL79+` zIAZPbtT?{ccl`%Iwg{elwOcNH_+qFo?H=g(ZlqtCcGcnL>v8bINWBH@=kO2El{3kj z4rn+v$nDNGQaCInED+=+QPF4KT+15Dad%O$>ir2q7aqFcm|R+VDAvKQwDFmZyK6lY z5b{5CGPQQ)u}P6BSeCvUqZ~A@1D~MLwd5RJ6qjTN!D3|aW?WB>eUYNZWwN4a*;{A9zoBIlJMuSlS1-Z~J{Rh5B;2sh*; ze4;Gmo1b;8_%i=Lob*>z=CpLLYL|N3->R=th{gEfZ!hjx>_CJW86=f6+9$J(m=!o{&E48|O1zbyoPiPT}C_MKT2 zOwWSXM*%F(et(z?YVa}mqM@_IzG8}-DFF|C-5;_m^&G<49sl%-w!Eb^nLk}g78T7B zHG_pjI+#bWuyi$l&tgPA_6@DAYuXCROIw)t&MGIVYM@=CAp2mZM*3$9NefwN?U-&i zq_RTuyCH$}pLqQtCmMfWO~%8*2`XB3<4UliP=WF#;rOj&Op~4>T9t(ZaI!fzy`-2~ zjZtckgWH5Vh6D+#OpSB5$smV!9bxMvEs)Irc%G6|)0Gx0XsAx_yI=5kH)!Z5oNJZ% zsdvX_t$t^VN(h0-U+_WLCu1Q`lWAiuWMp8PG0}q45vO0JgR&Xs6z;WY;K<c9)54 z|G)bug5;FyYx%7Vrxcq$7n6X*ita8iHC}twUAr&Ae`ak zKF3+|{8^veIQm4Q`h=F@RoupeyhbRta znd*|?=d@dCqeIReQ-6i3m%?%?$^DlVZfpw6rjr%6YsgYk61hh*(B4&HoRd=bO_ATD z(4nP|Jlon12TGZW`sVMYesG&jQlo4Yq7PYoWSh8;Q=Ksszhzb4xF9S{#=}TsL3XG| z3A7Ml3Sg?9K+d8X2_z6C^^xVamSCKIrTL6sEcEnWVHGe1eRa`tXP8>fP^549N19mq zLol@6ROP==F(T`73|{H5YO*4df2fm=UIt0Yp=CeJdfSOv6lR#(mt4h|mu1eb(?La7 zV`4um@iDw<<9jjNB!69XzSAiqh&QnW={5_ zQH;Idd_j4NN>D57kDsJRBuSAUW)(4`A`Y&mD1Z*l{+{Jw$@(CpV}H}>HaWR><>Ewa z=O4>E^FK|VQvj|e3S)82@_AUICxW+B7{Dw zL{%rFuqi+Gsm;)pWwL8$>aOb9L&`^!I&rP#FLn;I@(<2fU&GBh!hd)e1dYOHsnEGzIbOEkW=3=Y`~?*{NV9HYN<2avPxQ)pU;qhC~2% zj5_3lXClLfS06bHdS0yJpAH+7$||MyG$_-*%hl z1WAm=sHg^$SD*K@_|hc@oj<3$8S7Lj1@$Ceh4;^2-P?8){`Myi`CP2MYFnHdFr$OT z6kA1{?|a&3<~l4aOvUBRR%_rdB0IPeSII;(gH$6|9UwCum?||ETKbA3$HiUATTW9I zwZ^UOqk|8GRv;u_tpkF5McU*(2$fjm!dOQeSuf;ric>N8gAY3OwzeHP{)~gj!9Pi! zndh@?Xs4pm-8Yjl}~g>YL5 zMABfKRjm}+rK*s8M28Rwm4#1sN{OF96>k0XxU(CV!pr=ZEZ*4n=IeL zj8~Cqidd~?!@{i}Isr&*=6R+HaLAJbd-7$ zcIC3Fkkn;Hohr3Vdc@=Y61)&z+7qT$XFH}}tn4C1Dpao%RSqeI8iz zHFt%)xDEmLf%n{9~7>mgw!A!pZzeb|X09JR$c>wGi1` z{pgb7Ajq?JEHER)>Qw_f!DKD$B5j9+9?=^8A_2?iCc(81Cm{$;ra{O;S8bmVmPjIU zxRXsoyT%kirDvG)hMax-r}x5B$}HT-6ow2A`UpQ607-apq&t}WkbyZH1P{@ZLanRo zv3*WIPUSO`H!_h0J!I7gzO&OD>eIgn;ZMP%;EWwm>0T(kzkv+JRj2ti1TUu=2XNR; z_B!97nlBLHo%oC>j&#@BytYn+sta>DL-9UFr*Htt6y~oIVChJ!@;b;fF^uLIPvhA56Tv=7`vRTdAG+aN8c9khw1!`gyzTIP$1^B6nL*Mfip(0+567My_wjQ`_{c z@^$2?8yk)aUzVj8#IPfCTRs8Ks5nHfr(U~--IL+R;V3kmdaCEb0V#a`%pDcrH+w+} z8lU96eE&4K5!`mawCSsXlSmV2w{Rpla3`7v zjlRW2uEK$qjFo9PM_Cx5+w?Q`Ih(lZd;Sg3xR1*@zRE6uD*Chn5+*=yXX2@wN(C6^ zW~C0YbbTUqG(LRYS4Md75hosRXDw2Ra)rD3k+>2Paq5%;k}%<~C5N6>Y&%PxUwtA} zf)6Rp6?|G2U6=1xArh9+0}nl*G)kAty%2#9&c#gA+sd?)?Uc@G7Y_ztggB@M(d9E^ zBe6%ne8YhToW*YPKR_t!+n8L%{6Q`p8J6lu?DQ!(c5?6_#nj5;k|My5l_r!ey5^=z zcz(wc`ANDtG^_UcfB#66=rOudL$mroT?qH}SixH#HO0(;GNljGtUh?)wO51yrmfDx z{^bUSa7*9cAxv<=r$;*7I`-6;^^%44YMhe)kj>c%v6@_8=S}yWYtFqe?}|FM zXy1tN5$^uY-;{O0;}!=FDLh0hgweJ#N9!)pNB@y+3Q2ro2u9Q-b?Tbs&7p|js_wCi zS>ME)1q|Hw+EA4^du&cbUMd1;P<(g(o{OSMM1APF;^Wz>=S;mYzWt^8mZQ;x7vt)~ zU%cR3dWPpy&cK_E7cq>!wNNvkyX$X2f;W6rK681Q|3auxWpQ&=sINwocHs|FPOqo% zr6u7r%>MiWG(seyU^a~kd`Kq0w(FGO3w;UIoj~Xu!y(R@tFfXy`s+!L<95&BSz67d zh3B!=zS(TAeDk}n!(T3#RApM?T-RW(XhF!Z21sqN6P3U|}WBB@)bvSJpq z=u>QtVV5wc!?AX4Pe{=;Xo{|fgzl1B%B`hN%lVH|*ly9aTULHoqu5#%y?I@n#$;ig zTj}!ZcAWEA&8Z~=@#e4j)ac6 z3-5?#A7I1=`_P6+tjlG7&N~cm(~c_ZN9m?UYk7HJ z!q(|u3liPzmawMtk^D-dR{6G!!Fcy>v|(v>n@lqQgZaC%84>5BsLtX= zDlTN?wZ`RE^#7OCKALC#t^W!7i&TQJ%z6CA&ZbZ`zo*K|st zu*}FKMI-<+D`SjzS7Mt#3@y9W+nk3w;ty}W^f*l7A zG!G(Q1UVJ9DL=GQz7N77dG5KWSJr|K(M5##Ed(3zb-5YdVco+g>~mT)I#v(qfoPZU zNWU-i46WN{xB56#Z-j>pzjdM{Zov0s0f_pCiU2BiGyp?IhR_@ji*sy|a&B&=J_y!M zP}zb6DtVpcK>;2)T1S2l^Bz}msI4>^{_-sOF0h z%(Mv3*;bFj1_D4{8P|uAxD8)5)t@d59=t2lHn-A-Vge>~hpW^@MxL!uVN$3IgyO&K zMhap}SP4GQ>asHe!)|G${0=FhJ}4b`aP0awo&-Z@0`dXEvkDWCIPbq$WJgACDP!TW zPPZM5Vt}nCF6sl|@rfh-^qt66{pr`V*48#Zr>=TBu`uYN;H%5E<2YioJY>snNQWCR z3u@#N`m31S%TPgm%PN?y-&i%-8+!8Wp2bM6oHJR=w%u<-!Q%&M&f%&i45KoI_^HOcPL0 z7r{Vfly6tY8d;^USti~a7IqTUsh*wV zG_gjtC~gx@$415oVPPloN7IV)Iybr%Ijb3)I09sE7FuJxh*o8-KC=GhMJKkZRhtNM zqo0QV-PpbIIoj{phbMf*P@P5>?_nb*{KW5H0j@T^bb<@%5)kc_R=~*0 zio=E{vAV_~muPF?yM2323}ldAB$fNpacrh;IjyPk=%kjL}*#Y8w^|4b%isiR6F*FY$Shd1w6|`)@csirY?lCbb@^C_G?^)F!jXe@m9Ze*F-{qn+8>sgicy4JUm#UQXXi3Veh0X7{r zb#VyxjDK_wF zuaS%GE58bZX3`SE&NexzJ4nP!9@0ud8?KB9U2V?X`$PqMCCM8VXQFu!JG zxY=z;B86y!D0Rp5!|?s}!izrB{U-7-dCTc?NrzE23$9V4Ba7Db*vwr%>5i^WvI-%= ztCxK0qmb=IsbgcKrN$ZA-H_%8$F>q0?&^IE<_%5G+|$1d6K-NT;js?1Ea|F&YbOXj z<%L>jzhx~moiNQsu>OUpX4Z&#E$LvylEbI|z-4C}+IssoC@?m9!7D(Nj`a<@%G5dP zxu3oDJAz}^dK>v~h?T@ABnCJ!e^YxkA6LoE$n9V0-PCdkM{{yuH5#Y`ypuYJ7i~2< zNZa+1hSCLQTpUzqm*gg(|z=0~=Oe+bTM8 z_@P=2Cw8WbZfc#bP``4AH0f%N%ifxZE6z%-M&OE6O$?^{Re#}JbUH3>*^nDNz;iDp zPC`VrvaI6Cmzvz<_-v6c+4a^N@i`locb z&xU$D*wIGB$C-7SOk`FqoLQ4}moOzTDJA_lvU$M&?CU~MVz3{|a)GJuc&1Cv172Gm zXKB5CE7z5Qwz2b_c4Q;OgfMD0%p;FH;)EfI`D>=x$o!-{`20<#srpm;yD1cc4<*joio$9iuE`$Ak|69F0v~~3%2%DH zcVF?pT8h5_z@2d+WGO6A4aM|he~ZsiV-FdwsDbiQb~DyBule3`TG#3wlk==Pm_e@=FT$U$~yyil4P0>b)i4*?Dn$KN#nl^du=v$&FI);R2O8 zhSO-J^s4rP=tjcx`NbtN;QXJaCc@Q3?>$*9vuoIjUhGpkYWj-7X4zhtq<$A19U;(` zPd0+z0Q+DZhrvV(x~(EQlWoLB+H+ISJxkYkIE>5KRY1b@os}h&?QmPdef3!Z)bDY^DkU z)N)6XXsweDa?sN%`BLfrA;hu1ha6VU5va(KbY%$(!o7{;{~$FhS&giSg@09`-AX3Y zjUD-WJ&MdgprTW_4Bu!jKTd52%noy?NOXzKdis5J8ooq~z;VWZCM=K$RxqTLO;evFFX@UNF-HbO zCCOUGT4XFx+9`uItu~l#;}~Lr*RgA?Fj}P%?Hj&1|B2(wB%5^>g1Oz~qM!{pfw|8; z?2wbB0`ca>OAiR6PPn5f)Bd@Kj)0uw&hvXzEy-8v9YVx4HKeMWcPz*!4_i`{FJ^HV z`A}w%htHTcvJUi$WcM2d!dwK5&qp;gxBmK=lf!t|?(_6JdU(7Ea4mXSWY}}dflyI4 z5#`ek28}Az1=zN0hJ~-GLxM(0=;MCU$7$f6oQsy%=<{LuV2=uQHn}lCmV&YXG;H|; zdt_XNLeEJVVr=O(9~4wa=@O*tkl(I)bxg)5BOTCoO13`KaqHG0dXd^KlQNxqpoUhR z=^KrtL%Z(!L)jJ9(k8EW+ew{>#yvIg7u~lVhl)nYmHHzP=x~l6;k3~lE29mHyj=x* zE-Ve2E(%oKCR`Dr^tmu6aJmYSG5V&dT@^)V{v?#84ndadCHjFnS8G%$JA9&r*w@>)az%`;RxT0%VKuB!>I1MK1)l4q)yr4waOL39r_s?^n2Y|6 z5f4+#p1YT#pQingYSvKoL*2OW$cuu?K7EsMPsn(!RD#es)Z=Bb1yjtDDJs3A3CiP# z*HxuacWUCo<8-D;O=44vj8WE}xtK(NnOD|T{8&MO(~``-6u#lX0jYu6T4$MKb`i9* zIuBCs3!i|M34WO-{&tE0q!u zUsG0T-SOcrJg2RQimXh5^3)Iy*5q5()_OJ}f_yU0Zzwd+a-h)d{D6R<+Ft*SU9sedt z`eT2|f}U#d-8l|!AqxD7K)h39kj3G~WY?4Z#eH->@vZDOA2OfxtnA@L3bHZ8bI>13$d}x{$|m)MWP;M_jSv zKbtobP2SO?4Y3cuNn|H1Z8p23LigTq;^Jdes}H9@a^eC70mZ1TA-W(nIae@Z)ymm# znllRGi#x(pV|qFKDvZ3bvEgu1qH~~0WENC+bm%nT?0FI<1Y1BA=G7_!XJ&KDQtZ_l zx9Be15JE^~$W*gkTtD;KDHM~@imb%TJPf#w;*_TnS)P?LN07adoC>tnQbCRg9!J^! z&0uZn_mm>^nTz#aL2z@=h7*s`SFSrR@Wr}VI{0K>$YWm&qLxDybqJ1d1X)CM{;>>a zNkp>8I%r_Y8Mhtjy3Kp<`DB$jgypTmu71d+aApvXdMC=>81_kFXg+L}r6%P@4p#()Y71y=(@Kq=ZrRab`T=50=xuk=CkmcSDmDDc7kcaL zZHIwCw~GZR9no;6ycD-T@TG_(i|vKqJ!1E>iEgn8dpU2>NRH&V_9-Ao7=+diRme!| zq^N1D!WR4d=QYv7iojHBFK=^j>bZe)T9{)I^1sh*qpSgmuHt#OX;OAJJjdbxXK`GrcS0}k}*&G0idJMn8Jb5#V=j`#)dJ;+jupmu_%M}U+e)e2Vqe)^0wO$+S|)A+ zn!U9hAtpg`y44-*)RHo+2EExXRiA-w+z zsUG230eCWN+zO8Sg{6mt)=lSdq>_&XI(I)SUv||L+s6_S80LSTHL$g?Pr5biPN|2e z^}9R_4le#)GH|<`Bi@nTA4r!bTc;5Ryt<=(p>EX?$G1u--$D#V*@VAx`c6!`cisP3 z%rroT7Dz3O%Bd^qmU)1)16}B4K^42~+P4EIZm`1QreRkG27c?7Q~|9E4|u^>FX zCLCjRhwy|iHTisY!U{KDp}aA!qeSifw*2-I(Ky|Xt82bCwpB(Ua@gh8?9hSUoyHpS z+$0&-d@I6RY-G~ZRj_6SoJRb5BOE}4WYWDKTrE2k4^0y&Ym%eo)vsI>K=+F{Ocs~e zD~If)q#sK<*iQxn?qKczL)w_*hcPO{FDJ`m>5}BFSs$>bd%dp=-QLyS8l~E3q`NQV z-3vnh<`D6vGg8t=+_40Rv~J`?eMg>q32WbhUCXW=8IAAgIm_|v<5g(R!T!v(eRbPM z_M&3@ah`Q8=cNQO>N?w#UwI`UA-9SDN;L0^KfI!a~Vj_(IH}wU(dG-MiW| z)lx)=h4_r8f0Z57osfatMId_us%4^fcg^m7@CjP$0>_Il3eaZ8WDS@{I3i><5R#T4 z?=$V3UtsFgVMW%faGh9OLZVWB?MkzrLKp5rQ$>xOH_LMLbvXSCmBR$x#`@ik7!AUX zQcn4KhfOCOo?7LN<#LP`;cz#5E4rSJOT4}Q-r4VGjz3Fz%t9DwbFin7kXFW=ukwd0 zr8UP8gh^ge_MOWVB5&SnQn7Sv^gjIZjbs|DrV*+?d|TZ_2E zFY-!m5n2-#TqMK9wnMA$tAQ{oR6chyi`M$6mt7%J)4HNFO(kdMf)Y?XbJh6-{)Xda zDOs-2j7oai$k^82$|P4D_$1@W&qbRcw>9sU(UwvEgv_tJ=1hputWuBOTJjdAvYt8{ z8mqBxleF`b<=QOo>-uhyco}Vv(qCVtYOJom^$>g2>Ai3=Tb`qao%Y3-1qaH^iej4x_iA3mb@U4wQi6?CrC+d|@~q-=BXf(EJTxAQpd@0b9PX*r6MTmg(xb^4KzjQui@sV(ikY2YD`V8ba7(M6GJ59BmCOlkaMG9OCmm-SY5Pm%O#Y%P2R_e>w+>9%f zpykgSGoN5`LIQDUFio~*xMdl1SFM*TDG|XO5=qA+p#Vk3*JW&B_U5*0A>RC*l+N+O zgShz`-BNZP*v0}Wk-%kWH5zw0Z`$rhjK$F9+3`mgGJ*;o<8PqViClao*y z2to;kuo$!bK)V+WMbP^~!ArO6ZrhDW0;?r9C!uTE2$8PanW+exFr&P;2rD>_r{C@A z=ClS*^6Ylgcv*lqSFgqb=Km^lh+~nfsNE@5`PxPh`%oq~P5Y8X9F!R(H}gbTk}p&U zfxN1!2dVjs)m=@~m8ms(*eV&nm>tC!=(zWN^Uw$&D?y}`j`Cm!8D`nGyo;dRCz%MK zLa-VL=dEx%i``c07@pri;L?o@P+8`iDP3v>o!`&L*IskbnEZ||qBU|TBCmaOLx8+t zindQf=SUx7)#`WsOXKtPFJmj~_cv%iD}Zr!%eUd_J^yC?=u^7RBGLRrWSm(#V4oNi z44!{M!0ce{X#0Zm29N2aJT64+DlvmQniY zA#W2_3Kft`0w3|6#;p6;zVa0oQR=ca$GDChmA&ke>&Yx!E97}5zYYAHf*8mF$dSAQ zS2Hi*E}K&pEKfSl749p|WgZU6tQ&KK#A=+Qd?DiILz+;^v{q<_F&BemJ=sav9fzh@#L-Qx_`Ls^j92ya z>L)N+_?BcZ;BoAbHX}<DZ$ZyE%I2*Ht@gR))buipCyfsd;^D<5}0rK=lhBqC$m7=^vA|9 zix}?fmmPT`mOj89g;gRTSL~u(;WA5fN`5ZT!wuy4VQ{817Uz$@wutZ}RDBN3jbVth zf1EX=#34(YZsB$7lE6ARu#%uxS~GP2dpXnWXG{l7Ne6*hIC;p`cMwkCLw}LWBK+el zPuYWyNN*GU#i9`AZI^M0`bjgBv+tz4rYV04aa#0(OxEgLl`ncx`fBfPIDB?Zn}5bM zjFe?WcqmUGHO*DaE9tUgE+-HYT1ty33$(xnWm?$Dd=16q zcxD$D2nTQ4uJiRJqrvAVVv~U3$W3l7c%0s^4#|oZ?^r>jHJ_64kMGNpc!^WTB`-G* z9i%>TfZj@1_29nb&Ln>ljexWh|HhMFN_Whai>r|xhhZJ8#D>ePsHR7{KgazrLZ+8y z{m@$&zG-2n{wlkI#IEx8w@=>>k|Q}joJY1FFTWglVW)ou+|CT97zhT(DYNZ?rHAB) zL8@K!Vp8EDbEKDuj2M=PmgY#!zu_RFcngB;?l7ZKCk~V;(0Pdk`+KL~52Y4le(h!F z2n!~w*VeA2!N8SmG^6Pd7YK+zK)oaZFH`8ZB46UF-a-t!tG9p?Q}!PQHeBj}pa^%P zW-irk0_;l42!H4Sf1JP1|4*UBXf}zguSJTVuslWo&HrE+e;UmE4ZyR_R=E{DjO84zF0w1Mp=}sriZ*>`HHqkHwl^6 zw4EjBw@8spWl+_Dnrr1jRD#>(Aeb>+?5@FH+{svparHp z6;?W)YHpwRg@F~3AF>R1BJ~KzN9&JL;DT}C!|zT{vNUolj4HwjPiQs^O3$NjFvbj; zjZ8@9OXvf8<5}#e%MbcgO}`ai0cO?Bg}FJLDBs{4jK26JE}MPq5FWoJ;xTIP(0}M~8f`jnWuZ8i_y(3h&0HcGCd=dCmqgl-ccJbb1?724?pY!Rv5GysdhVg#fB*wF=SbY z3WBh+&cz8(5^}%opaAH`wxEwXrh3$(O+Z!;fk#KyaDcvmt}P=>5kd~gTIxQ{M0+pG zi`Ir8sGLMbFj}^TPSH4zzvH}j93;uFtQn~98TYaJ`FA<#6VBFR)@jK-0`Q5Cy|*L4 z69W!|ax>aF032dP9wUt-Amzv1tfk0==!orBCju1?KG(*}{>M9~?+7tkLmF{pH`VRV zgcwXQu7l}G6Aa+5D*9%oI;UrJPF;GH@|YtzHmAQz4ByxL7QVfgSzk%Pl9`VX`eZw*8NHOUj&D=BBhVUCqFbN;t;#OM0iwp?gBj(=>=}qcEerk78pbP@+hA8C=?`7@= zYv569h{-Y+9FK8vBy?{os34~Q+D`h2NeBm&BS~vFQ-kAzPK+{~RV79#ImGOW%FPh6i)_@iqy>O&ZGOD3s zhWd2=BN=LCaf^Vw-sG(M(Dyalyp}wirYmOY@D{9%r3VR&ULh8@d~&{Jm7I1tBC4&H z&Dx+@Wmg|>hOf(_8H&J;OrYswY)?|;;#ubBKyWPJ2;E*UXQdSMxQAu@Q4Y0GQjbV# z5W-mZ(5UiR`!#=qTNfhdoM{OgXPWH^?F)S8l#vN&^udD=sQu8C1m{aq_hj0emQooH z!U*#ZeCjK(pZ~&$w`QuqVO41!nKOUnK_HZ>@Xj_B06wgxrHEjjkgF!brEv*sD#R8s zecD0_$W8puv01wKkPL=Q4qp) zOBa0dy)yK8n@zU@h4Ezg`hzr4m5I4eHuS4OR@OwpToND`mO`6abiELukjRB@fO8{R zXQ71PL#H_{$G+oa!1b2Gd7L@QySphe3PMB`BA?2VrhNo-w&ct4x|T31cyX+?V^%XH zkguTXYs83L2RPp;zaH7o_4vM4Ufo5Tc_%5^UcxrFIKa~dWb{i@k#&<0cK{D~6K&{i zXPNf=TSq6!w;YKL!X~?AFcd=_hSUC9Yimu=#D#e;R3JFmSK^rc?4N7tv`!KTVGbul zPhDkA1S4x))ExUh%GulVPb34!q`RoMNQ;I0G9rD37GKt60yF@HISex&blQGgSqtyGB94f}FD*Qn8AG=&b*!-B7SWH(O#uSXYa@DDTWKE?CHXj|QoK{tM zy>QotR6PA_y&`FG4TDiwP$v!C(X2|1@RQ<0w`~87Mx*i)5pn;vkQlNU^yk$aL(5pf z^_A(t-SJFh4xCFwP5#i_?HfK|5Z&GN+0cwWOyU@yb*KS!3Xh%Bx2TX)jjo5ODGH1j zVh1`T!;w@nSq=+P#!_xdGA2@58IKB(NCpN@x;It&-J%AnBwROB)luR3PP*Xf^jOr%aL{(i}R~o|d)eG_)VuWnkP*zPC3(ImD3o!a- z8C9BgY?TgKXSS=!z*@2NU7>v^P5zKjmR@c}I1J7^5yxSKA}WbnIKeHqa;<4P zKGp2V>-*Y^25$y;zd0zU-$N0QCA%dZoZy5i?q1=`RrMCEW@TVa#Mrrs5~A3l6S4$# zr_|P}*}=wOz(4Jv+(0MOM=5WcgxGsgQ?a?khr~r+gxN3zDq$G^D?K=T;2FuIy6K_u zQCOX2D%1x>(otox+x3*91ZV9Hy}@nK4Jh1E)qy6F?BNP_9Idr(rZ#WZdJoeIZEI!y zEutD6T@^q5n$wgp^&dDs*R8jMaBLHkOI4LO#_DZdn~Z!6dQ*iz_v{;I-}3RStx?7U zq-gp(P%4g%qtxlCUq!NKa}SpqW>f3KDLC0}Z<%?GQ!KtW0apxeCl_l#*L&IYr0#K1 z`+;7$z)2G_l;@WtGwbx@h%O-Wf4+X4V1v)IaGeR`qh`mpzP7gKY_xLyny-I1>`MGr z)+ATnE=S3TwN7zOvn%1Ga)@&?cmkl=1-^n*xvniz$Q(F+=Z+j-f}j4;>G$OqD(3x7 z%Pm}lnuXj@C#Bt>sHD{J5wV%H-D=v;=gT%r_q_qZD|*v3y0(*HEi}vKa)ZPkD_IL7 zs-QHFw7I&j%36m28lqHpd~@HFpvpPpMBoHfHGDKvlf-IMOfPnZZWE0OHObOK9ro*C zvF^1*4{WpmsHQ7w+LO~6YFNPt&IkdHv;qG{M`d=9f8#zk@RAilc}VpRmJhG<%zuCW zu7fuY@Vb@HZ5_WXo`3djQFT!-28)DpnP&OtTG%zoTRNHDMj~80o@E{9uU+4BNWQ35 zqp-VOvf$%EtCGfE@in8for;xJwv3OlXEGw5qrBbGWjuUPb~Yj?p<9I%nqn^5h|0=<#?7FU?X=t_9R>w#ORKk7=$&x z)g%q$Cvakb#%`)~`^)eFtVuk{TrSzYlJh^wuDjaJHxR%>yTC#e+^o9O zo`VJ$D?N&exD-5?&-73S^QI;IIdAYV##(ytxK7FiE=#ACG0^-+{*9L|rp~&SdM6~N zM^s59YFv%~%Vc~4Bsv(UYCUF{sGsrjB`1BpwOpCr30_2_z>707@$MX*|$AIOc zH72_Z!5wRIIS*}QRU5|{SoWiRHk3kYhYyh|7++X^`Dt9Wgcru4r2T#b_4IhP}Q$pS5!(zq!N zyMFQEhn-Tl(!HM?t@wm}U;y48*V?MVT9%Nn;hc8&IiUhhg-m&GdMP*up;kq_p|!lJ z@{F!)Co&98(bIur(HaL6x0OG}My9h~>cHE$Vl8g>a3Z%!i5IjN`hEmDa*t$>h@-6YGMwVng$f*cS)6@2t6^;Q_Zr$lyHcDRgv(615X%c_(6S4 zLN*Q%8gXC4H&#IxGR*P;hlBL<8W7- zfsrucvg9-=>E5^^FhK3BCeHFYU=j8Ew@;+31~AOQ9zK>Phfv0&4^39<@p4jCd9_bk zyg$5pej$g`rc6eVC4nV2T!~x(3W`PD!H3w~=%Jg^rgt7;r}WiTL{Qk*d>WrFg-kyv z-*C`?biE)^P&N{#OysEXGg&ffFAcNcKN+)dUpDvFIAkqvZ)IoN1Q$X0bBKYYD?_sm zKfBrO4l6ybm5G4d-7mlSs?!lisH6GFW&gCR`S-NRMKw}^M1M#%Lw09+#*%xZ@`#J2 ze5#**9CCx|GTngOF(ucI?5^!d^CVNCyc zCCxZG3`9n|z9a8n)l?27S)`;{(|kt-zpVmCGb0}XziAz>+;@Q`Zn58u5hMe(n>xA{ z@PaRBuWE}Df8MqPUlZAtg5qRU1%Hr=L03WbWH+~X=B3zc5Zb9n847+!k=5)LyhOUM z?b2y;?l|+dS~9t{Y?Bjv#TGI3_92j0hjI6nB_|O^K?<%xex!tW9DraSB4Xe=semXx z-+lefSA|U*wI3g}V1Q~56e2trFy)fammg|4BxI4N=*A8y`99?dwydCOH=g^d;EtAG z4u|Fc$Jd)RM|NHJp8W;<&_hH&t3;Vq5?OqwMADdnDojSAswAsu@nj+a93YVg$^j^V zDhEYMo}?>Uw-xjt-ta zz1Lpjf1L`o=*tS>6tV{}voe~lUHZ}h&S-U!q3Pn+)fO$Ie7K(_D+DNIh1={udzzeMWlMag zC3W-foxXrjsrmbWl$IPLK&7+bWJEMYh6kW8h2hVzf_zn)90a`It;2ADcosL1`NYF^ zLCj*@0TyF$@u?4#`*!l4a!&+XPKhkpDeK`AnLNs%X;eRxm0-&lD4}mPQ#({PY&s-x zd!JJ@zP;dtDX&1@Cl)Q{$-C{>!{8^6p#o;caBkD)snSC87?OZ z1-aB}1el@Enht9cWp&uZ3ZD353w~vqulr6p|NMvk&&gXF!k93zeMM-rQ_2OS@$Rmi zOHR`ChLm|f@v@J$!o4PY;dneSBy^$QBK|1(azkO7$;joBmt`3f=~%@XCAtuIjSrNf zhkujZ*O(bJcEXCNLSW&pP@L#Jr=(4i(nBBVrm_VpPTn|23P&xq7kujrR5#c9) zFqh(S@GZf_D!)ALWV;Ojt$k?E-}gL!Y^d z!m`9b%>nQm-<2kbC~48|BW~9mrrAYT?d1!V7?rU29b{+y#+q}Hg^M%-za zf18s=S20)>z*`pGW|>OfnavvGlpaFk4TR$jgk8Wo$&9whBHv+rx0!EYJ~7Q#NH%~| z$q2`NyLEl>UwT32W;g|A;ggR!DpAw+VPJK&p|K1bnYRxokLLiv_!=*y=t)xI+qc5F!jo#m$ZaLgcbR zd-5n8)MSd;kqprXEih)XvZz%0*+rO`n|(Eq!do;(f16TuP62ZR`4E{1yCEY9eEF$3 z^vLfjT^iP_Fdt)~Wlg5LS-z@aK4Dwg8W-j`fEtw(eRFM9iMp?L#da4>$8YGPBI&-` zIk60Te|R?wp0Hg7kcEzgwoX8j&ycx)@koAbwB*Oj54~-?PeSo!4w^p!hDF9cd~BY0 z$>r(6IHE*|K#>Nmh==t1mFMLdV21Dbs`a_-rM-Rz2V)*^Ie%}1f6IpJBzuH^Xexe@ zM!=PCE_peNc8*!wq3D|l$7z)1l`R9Kn)LFT!KNRNMIF@B;%G4{uemaG(<&iNl=Rh-I5l2nedo{U9 z3x7E@Ro(xhC%<1a#k(Iut)x@|Nz0cixhXPDK!SN_& zPIVCO8h?!`v~n$Ae$5uQ2&Vc`1TZd9NJDdt>|PBN$`z0)gf6y=y$_7{^?5Mt6+~-4 zkjQu;-(!F*AcmHt9t`6G15QPsd1OA{txttL6q}}SZwzZWth)P{F)L(CJa9fxy4mfv z<=4|sKkq}ebR21W5u3=4hyYic8yA#(PCB)J4z#0^r-heWUTk%PcQA%_FPuWBT!TK% zg;l37sXXST_6DXUSmfZCfs4$m0@u&Vj@8HiTL#L?BR;i)z%sXV5&OVj_`@sz!=Eiu zsn0t0-;9-s6p)4fitNxAlx@!v4k4c#KT$&*rzxeZd+&qsv6Af+bV{E!^6Xz&=XMU#;oINGCA797qXSe)E?C}cCLhXUo=yHuEqu6t9!JXhL4gZ73ns6x5Gg#e z)@by59H3wjxE_2u*dq!X*YSQKplqx=QAn&}>x}|J6rVo-oG)}LC0wL2 z9TG0$OyD>eZW~_Xi+|2p_sNWlR7zGcZpF)gC<1saSIawvpcns?4J4Z)P!X9*tgCMY zUc8%w=lk8TcUGn)YjdAneB@-Sxfxa1sP<@I!&j_GcLtH8k3SNzXA1WkvoKp-G|aqX zm+~2oa@iEg{u05c<0HLhsd9-qj3GPOx7_fV1SZY-5`TC)y`!v9#nXNvBIG z9K#35m}bA_FZ1JoZRtINf+D-l28$OtP6%R^HfvGhMYtm+oP71!tAW1o{PW)YdQKIw zQ4|)RITa7#ZT+D{fg2rLE#JxfqKHe8if(0S{V^8e|4CVRbK%RZ0dtSbiuO1ha#Go!He zS!|aN=E}6>$M427{(yOGmKygx>|MGUspaEy;N>AUM%|njzc8PivoU4O*M zzbg3G2HJ^l09c9YpEHz}!vW@F?r26S=4B#K{Lxzc>&DaG*cyZqk{XZ%$3%Ac9>{<> z;$SY!q7R>mCa}MF7hn34&+*FCFD>;d1d6x}iJr#tEst+CXsGHqLv^+oEZ^aS$$Q?T z0U-FFsT$**&qN(M+Lo5vSgA8FW_1d5+N1h`yh|J(^-W|YO?`_)%&`>Kc$kw#QVIx& zT0X)#lrX0Rn+i`%9QB&uP9OY1)~2kaJiNtw-nz1)$=r%&3 zBw9q8n1Oe6f?ges-CLWyshZs$+0;InHH1mwT?Wq0e8LWMr~rr~TQZ+H25gIfk^u}; zFdRE?lDsow1on3=*os+y)Kk&ocI< zZrcZYNVOUNH@-ZzlU)2iJ|>GPGBL!PFqV^jc+7F!4^_hiV3}`-(%zQwNDHEtccw}6 zINF6@>`&bqvce#Va+mq+ffDh}VCyJJ#yG2doIKgf!Q+#Eugby$q+d#p0QTc#iWAg1 zkBi){jvVxLGk}rybD$l#^)exI8<>eF1Z4)oL4M=nH%Jg*Azr+X_QCHLetYKh0+29C|eF-MV`^H}xi=)A}o*UmfU4GiRxCIdm!3%QuNd zM))F3-ro}ff(?)TN=-FQ8<@0loadA96kB0KA;lg>Wxd%+`Sj^n1yQKt-CRXeep;kW zOt+NL&cA|o3xpH-IGa6<8Qr9KPS}DlXsIpFiYgrk_~kXNBWIkK@>O!OEI0)5IaEn{ zl`@l^VcQ_0YvO`rQo zH-Ng&HMkhxwoq%4`aAvw&O!g@S?64~wIyj1NniK?xmhEph54uHWKQ(0%>|zW52+I? zlT2B{ALqdA=0p)a5Z(H^xoEgUombzpw002Nnjc$TF5+omufKweGsl+k%EhNLm8&e& zY;>C~O2{uG$~q0;XEKMi>OeLwr5=hRmdhA>{jkv`jJHjdY_?nQ68^f`#0_r0*cLwI zBE@cVT2(QQDerED_07U(vfzeIDWsWRrDxZqE=g(!MOQ{|%5CsUJon!(^g30PEhHz$ zF)@7hts6JgP{oZtSFOSC%IY3>BVUI}UcBMsC58=m(Qa@Juv>!p zUj9Q)*j?@#rht!i@=?LmiaQ^>S{1aXFN~>vsJSu^o!{{UD;!qe5TR;Je#}n>J)FGd zpDu5&X^O&DbCv_1J%Q))@8#TEfU8Tnn=N@Tq{^qaiZr6Ox+SFNQscjTO;*fOrqJcW z^SoVyneQao&U>HzZ5Cu-wR$vpQ;+vc&&b`+uEKurtC1qB9t+$;bwTkh5>97xy)uNg z95h$~9yD|oOn3(#!YgN>Q{9-lMGoZPPYcb-{$5TBVFo^O_C;PETQEwd2tBOwr==Z7n+$s)GnP-rx!c|y>Wu8R_L@mgNE5zi*-rviXI+W*Jd4QE6IK#YL@+EPMqflviW4^2T+yd%SHFOhMrNYK2RgLyW89@%cqs)#^w;7-Mq2U$q z6s?A$tjuPk?!p$CquHF4RND`P?o`qP!IYYK-?UA4}0{6&;fSd$Adsb97 zWBuSep$QWO5#Jw>r!QxRiy{n8^oIoIM|kp9|4c?=+A|qs@IfJAvJYwMQBnu-a(E*q zf5`hO%{;b=C1MdcqQ%7czJO2V>>;U%Xk|6G67ILG0(@tWG;5g^4)5?OFslvq=5RCY zsNHXo`$QNj!L9F@70bE8wr;q!9Nlf!R^N*34~!iCiw?_J2TYU5@)5RRrM2RQ;AQHm zkk)H&NDCC%l^LS)bx}~AsufIA&RXB(0)KWm%C2a`wvqR!I9V$v+CjX`ht~Lv-QvDn zY-|&G^UcOkR~DqXA>wExTqjq(jF|d4cF*}qMlq3x$A*8ItslfSis)#EjRnvsrK1e~ z%iU2_;Xs!5hRte#Mdstn9}69+c_E8S#k0D1kg=2|@2R=n(`uFX8N!%P0^6)p*`PaR zR}x|}^=0}whgSEWa!T?ex%lF1cAT0Jt%`Iu4fLx>=-&^1v{tyEPU4bj&B^Rg2;c~# z2(qT%$v?}2*;T4K5>m_1;>LV_@Gc*}S?!`*{TuRkae~s+OjIARc2ii4KQ&*7T9Jm9*BQPaUZ-3{$npG~eBWnd zI!2Ds8K3&!*{kDzK%fyF(NPN>hUtRo&xf@KQ=U`8^dTQ0eO`R%5AI-IQ?cOzhd6<> z`x5hZbt*Y*-N0Q%EGwcqaAA^qAjc%(702OAH5t?ZxYy*lGvc{QY9*R&ED#Kb8<}vfuu#`@)7Plck<=a*Ety)n85G(Kg>SRFSUODq*x>?kX^&KJ;rWG;C-=~nvCf3cQ8*~_oJCg3cpNPOBd7V5Jn zfpzkh9LSwB%qMk6w!0O$Gpn`l5t-zdul|!B7YBOp6QjX;3z4QNLrzexWesFnBoirI z54byHFEFoN@E&{whdE;b$1`5Nq60HE`zGQ;)Xte4-@Cg_?EwdRYYqD@EM5X}qLD2>FpB$+RtONK<3F^TX@k zkxh;?n00PiBi2Z`{}PqL$P?SoX28Gr3!xoOs?okuVASZnhBzOVkI6?N7Mj0Ms5ak? z3IJ5+*48@XxT2Ev!57KgAL6rS)F@EhzT5&+xMa9=FYLyf5!lH47ynnG?#*W2eEE+V z+I!p^tC$aN2REzGjykwSRBf^&9%JJ0fzlHsQj*LPASVO_Jq@ui7JYOcm)66lrK24y zjH17raa)(160F?0bw~sVv!}|wiIdfFW7XV?F`B{wAe)b2jLF+~XM)d`xoi@cKLKe)Eke^{j{vDDYHXOlvwO9UR8QvTah)Yh=GV(-lUQo z7{sDw z!p{Y9!0Shi#m+~kgZ^2Migg~3n` zzV=L{P8}}vA>^7`HHQdp1&`8rQ+r*I2XvoFrUx1aU4$%OD}(RRBV=gMx! z%b)n@Qp&6IoRQ*tEEb-@7K%;ibJh8|>=*PZL;sf@x`M?#=D$J3%`9u+KZxE={a+8i zCOd&hLOzhOFXU>6Gk4|hFTM1V>=$wnQ}snT>gJC2H_VJN#!$!2EKQUtYBOeXm+u$x zVEYii8kPW5^C}0qSOAR--b{hn-a4pvRpgqy8H=;M>;q$Q0EnS#Vo!^kV}>fsl>h+w zx05&3@Qv@HaC=()D4)VK3SMW_!D!~Y*bvpd&8-Z7Vp@jEp4`28`6@6PgnWcdd0z&B z4s~S{z}Jg9e{*O$s^dO!{mC!;*ASs!<*ecJI*ueyo_P7bT;gyqtlBAHMd*&Zi?WnH zoi9Go$QlZVb)fQO+&YztWNQHPfd@{*VP{7!% z6*n|c<5|dNeuV!GA~NCOQ}<=4W{n&v)AZ>np`MQq^ieM;GzYgZwJ(ciK zS}4)z=e`vc#yAFYX6N_}o}wQnTYZ?g>BziRCGHN(r)7Ysx~qadDekZ^Ax8@(DJyLj zpBQSzbJzVGlisA(Zrhug>RkQao^2gu*?gGU7`lzx4Gh5>B}^?sd;sXk5e!iCx0iuD z0V3MF7w`BnV9K3=Ix#nh?KOX>BV=4 z0}>Om(=v$TKpR$eSh`q6f(*&OGdvOf7lRhoPfN%3)k89_di$8%rSkqkNZyp|^209hU4lNM<))FFCF#!S)xL)P++d0Vbshdt0nx)83q@D8f z^wfvWQ4R|+_9=zL!fOLg4Mjs?)k4;q&^+SM;j<5(2~%q3cQ`STykGcKU8E6!`B_9={Y!T%Pk-Y^vB*_1;ehjm9$*=|1l zv~=*=la$=nWYY!W0m60z`_cbc#2f?u15`aK@26nMJnw1P9;KyrC-yX z{ioroAe6-BMQDhq##Q4aaww+9@+3k{2zh%eL}ZxKR&CoXaJhu0(Y!C zrIne-2n$M)^)MI4F-EZ%mOJ9Ym1A-+iojo-3*oikyXLqE;jy$6pA0ka6sS>F4<{c7 z>om)(a3O<}P`Die!?-hiV-C9!w6g!0?eb2aeJMQQnXhWm_ml3q+__UI!6`V46PJ;+ zh*$3xcz+h+--N^Li${~UecLoDU)jyj*9&yJTW1BIN4_`+(Z40R;`{!aPO&Z2x>eAg zMjrC})yMvm>2<2MjC^exIN@gn%*SABbTN5L^Lj?7AsbYCVYK75ex=dx6kr@Q7R)Jk zViG(UUzWVkI8;)#u%PS(wKfa9M+|LA2Z}qRmF@61HTzlc#X%Rp_(V<$$Z5e$=>RcI zh}i}>CqhPgMh1}sEUYoZ8?GSEN|=Wk;iMtFs#*1+o0$S~jX7FLdRdO`(+J?i%pk|} z$`Po-;R3g`e+nd*@L&`Oz2@h0)t=*B-+1?0(mKL)Wp|;_2ScnL13+H_kVc3j%K>yNY5>Pk58#Iy`;jkk+eV<% zgJIBY@+f;>X74ypdMelcW}}?cJ7QBX8}t>#rfja3AyoPfXz+OFyNZZZJ-Hy0$#-?1 zKmj4kaaq1?@*icT5)e}27FbRR6y7HY)TIJpSrgLHZgr9~ps)d`u+2u1d#{<4$Pyx-?@Z{n6-OdzCmIDuC_G`Y922KFj!_vbVNqk%etLB& z{%>`MRiZ@ z$y>40dGyE6IT=usvp4n%zg?(%74KCf*I=r4rauE>skcNtO>T)kD0~zHXb2KIj{F%S zH@yMU>cW>Y#_A~~J3&r%LQu}k_Yu)kM4S5(-~XQf-6AG0KK0W+=k@^~9lBGl1o^@Z zZ@5)}h^1Iie>79Lr7096W8)gvLjll`-Ve)_t|-!9_MAhPlU9C=iaruQw z9u;3U>JG6uR<6u@uTuL?Z$^UmdKGKjUw zCn1>QLu}%zp%=EKw9LNV36tQ9nip#G#JDQ0E|ZYz^G-$hf^H0H4&LkS+DZwTss z%5FX(|NkhacU3dPUuApxWM~5byNr}YrIfG;Y;Mc>B|HWlM%fAoY?@<%O^z+?CzsHe z$it%#3J2!o1*Gk;%i%H)s`K7y5%e-!KqfEFiAVw~O=-DoR-e`QKwBcYM#urLAZgW? zmyXCB_4Rw^zW-|}w8&E{y6t~{`MM|%M0;6D666Ob;bwviZ~Ct8(0bA^*p2x4ayUMJ z$1@>g7*tx&^DORY>dE<>*2hs_kk7AdhSJ1J5zz_$-Y9fm{Ms5va7uuLw@QYN&utu> zhP~HlAe$kt8j_(<5!c?R5H!H@@e`DSI|~OFF~0@!OS{pl1aCkQWYr-^wsgv6RU$x4Oqx1whp` zvJn95%_s!ZKo5(A_ko#XS(@BH;uel)$|OjxKG%8}R)%E1`i)cXOt&>?E;%EKWopgK z@)Peh4iH>>*rBGaKexkW9u4pBu+i{k^G+GQfXfAZy|E~afO}qk&GdCc+XZ^@8*jH) z1t1HJtYb-Ya-i|3v<&!gnD==zD-K+0K4>gK0P1JYFYoPH)~+|X+ATc%Jw;|VlV5>l zZ=P9in1(ulAjMK0BNX@yxXqjlGutag<^X}#7KZL`E@W*B(LmPr%537jK_SdDm#_J6 z>mA6+xLWEAFm&U_O>Z4xNjC0U7dw0yR?WN=pJUYX#W~^jFDeXKB}1nce)LymKT>m1 zGf@o09C=$_Ah%Xlrp)KJKZfUFc&- zQEJ>c1n0pCGt*j-!Emu)PUxyGJk!QzHQp_@+Y*A4_r{=RdSNSS&B))^x*BWF1HgV$hb(jbS-Hb6{&fn{c=`8!hS!7NN{^VZ+ zRcG?6epiG){0f+^P$+!8ZxJc(|28;WZ{Jwm#fL&{%s?4I9pW^Ra>9?*y6q$G_TxUb z2S6qG6J!+ci5s#I|17D~pBej#Q5e9~Z)E&dO280sHBxqHakXJ`OzZ&jzU)!J8Dd8n z@JU=4llQ)ORZb-L2N|^SD#PE7_@{5YAhbyFQa(mr+t2Z725;6xaz#qFmLALkG-x!4 z42Bv6+Rk-YXcV|B*};7)DNNjw6LT)7i*u=P#Z=*Z-HA9dTEX-zl%T}xx}Pl`K5^5D zP8)&qy1w9Ba(P20d6>_ALp3Apiar)jfS>YR=xZA#xG%y*I3riKyzfS{GZ@Gg)pCSP zSLKj=aApFLfox=4`Q+q~y0X14+K6{TySdQi=yS|gl-8aM zfH^IB_tX=j6Ikez;RtiWWbv#f75g$EN_Qup>=!bb1tU5M88#-PvQi2`>5+;JLA@-h z6KEETun=f9hiN_eqFM{5^&yK)mX?J=`|Qa+^M7`ZBdLV}-BwuasV2vk^X_X*BsR?4M8? zQ^-Vgy(~GlUSVR!qKyJ_bdj%nveBmbROu}r2EMZ_Ys_rV_}?u@BTSR0Qlt6qobWV5 zbqG-hz$M+kW-o=W>2Qdv4z<6Xk-8RxjUTlel{4{YWm$&g2T?g|?DT3`MQlgN)TjJz&HBMvcYh7fThostlpZ`rbp3F)?nx=?rW<6FW4s0yVnbppi-?8nVP zoQap8@U>?cch+UoZB;6|SS@hbgLm4fh9QSNfBnYSrFV)W=@~SyL%8j#$-g6V_)1I< zm6@RYFN}gP0!;bw1%Fv=f=#5{eeH|gBIDv0(YJnzKp60696aJ%*#x4i;TbhdZ?4Mh zV)}N_!$9a%--1C!fB>fcP_L?US`CSr(aw(5Ju^dYDRM*|mJZA15i)NNRJdK}1?3P` z4lOU=*ZpX?qnvxDr)P|)*kH{wTiZTsMguP*vtDM+y^{>Z#x%TbME=mLNG4EqY&bS! zun*WAXN3%_l@2=xVO2(AwHjTocn39tm~XVMxs~L6?LZLa{L3Mc8kCC*7)N^Bk~$(5J_T!*s<&oU>d9810exOxAksWRd~G$XAJhG$W|Z#hlLQLnrHLkL)nJIaVJ7@ zY6EPQXO`e4KRZN)`s^*t8RKlg%FJ&`C*aRZ{Gny2n;^Ap6 z6CmHs#%U4@;8+=fgz|uI$Yns<%`{CQ1R{2S7HRO02HRptvso@fhSYoPUN%YqSL;)% zN$OFv9Chp2AuvFLrpI~k=Ma23jnKl6mIk=zSx2r)YYI7?h(9L8H?n}dMTo@{m5lLH;rD1En z=Hs2!Ae4Cu{1mH7bXA*|&}Q&AfjGC~FvLvIp2CPgX8*H>;SJ?DgI z7dh3bHdXNd8;Y}UptUIfK6{AuwXeyKxg}rh+Ae1`LQxndZdg9UC|-D|j%!7>1X*k} z4$sTH1_4b~tG0@n32uu6AuEn3%*_#%c4s$)3C^0^B@Re?Ttw_)$41rEsFoyjPlM2X za*Ths-t{I^9a{7B_|h2=ithj`l8bp?b{j>}dU~Z%!VHx~h`6LIalPt5$U^f*d+l(0 zdJi@jBB^{d`~;B#YKY`vmn0tVWOSDIcJTuhA10|V>;$xAai5u!Z)|P*6m1{KoRY}q z&%SuIicsw8;PW2})n*2dRY+!_G7plM0^IPKFFR`}9i1t}QYK-pY)^)4|4Ke z$RPk<^(tW$07?=to{`PSdJ0u&8F!3K$wndwFzcLrA0=&e)B&?-E;b}Zf6jzgT28*I zf37pS{r4^WthtP8fznDYZe7Dx}c@L51xc zNDp`(Bsa1-4&~^q$ON~*2rC-nD?bL}Uq(qXv?^Ix6#}N23YwH<^Dz?!CKI->iO28X zHQ>l)fw9RpVQIp=qY47axyn^ZO^>*pg?`l38ORQ?N_NutZbKD#8IL7n6H8M7n^a{~ zMOrpFbyt^rz>cv~x(Z6o;Vlpwj~PN7OAM$=HY#O=m7aDsD`nXUd{9=oQvo^`(*6`B zRaRuXTLniUhg*fMC$3)$UR`$sqH3izyY9;bw1$$&f_VKAJ|8(_tK$f2nP%Nj~S10`_f7RVkuI2SBPHUw&@u|6ZSvs7-8=8ZqwtvgU<0XLhnob8-c ziT_3x{n!xb2?z^LfRkV^_+i=-O7?odk&(}hl@_5k@awS7FDl;PRTW$%-%M7C@XA&` zrMU)6+8!ZBHY-_(A;l4urGOKxzH=>@vbm*xhIU3@hLkVA>^utKi^(r!l|b0>6OIrc zdviQYd0@T;x{3D$q<@kj=)@l<-w`K~G`wFp!@Pg2M z>!~GIUO*GH@{qDl({yzo@nLl?V=+m?Sie)&>;;B{>AxY7rCWs%h7 z7nTWR5dUChK^kiYL>kqh+7itn=i40YRyIEP9MdKXCI`inpRA>?rdT`3l|rOE7uV4@U!= zpcQMy4+Gf>B2--ZGEu{dmm#3U>E0tnNj6KTS04`_t6E|C9`Gwwm)Vf)aXkh0fXZ+V8A_mfTx8Gm9b#3i`*!NkQF{Om9bfs{mX|r9(js|Cj&SbE0q==5Gj^D z?6i8F%A7(5c;Fd?m2JRHx2IO}sa-;jPKa0@xZS`1eSlzq=`EI5MdC-r`~OYwwzC-54=RXf3iZE+ir<^4Io zDeE`dmvJVO{U;Zn2}cD|bb2`zj$Z2ToEG8Vb6FK*aDk<{H0M(qCuf)wYh?IyzN(Y| zh=1Pvi{Oa6T*Jb{4U9blw7#Hs9^s901zF}PSXrZ8-~++uC4J7PTgdzDoONW%NFbKE!QT)99ts?Q0#lRPG1Q zw@Te}#4vYq9?JPhl^k}4@ye{+_3MzHT|kmqtY#?O+6FCxu1V6tp-OuCbkrfMs0Zp!lr_H-B~%=3`xC-3h@nBu(!Y1*UC zi@@oZuSGva+A0xvL2tjT_n zaOsy%=#gwomNVRos5h`QKgD19y8r9thyJhceNSg;NA@v|cy3e1Slq?lF>_;s9tmGuSxWT${#1RhkE#;h8h2>UVpSXoEY<`N`qOBOrD zi)08HQlvu$9(1FO?#1se6=>vJP(oJ7D`G1XH%+akJcqJXvxJ1zU@j?GK66ZyMNr`q zt`cFuh8V6t2~3goHRL*r=<&ut2+kFN(Q3o)k9#Q8U3Sf#kc9bBtL8Q{3NDJz1gE7F zpBHDFaOsg0fWmT1W-FyhS#R|MTw5 zsu03b_Pm8QLX$o!19vIN^qK5s^tswx=ojZY<^Q|OJBgV3U$-Q3h1M`{}tKr9TL?sqx7`i=)wc5jfmmS z$I#81;ZSPJqr>jSxq1vt2dj4w5I==XkSQRNWWz5guh<`aaNCPAID=B^U=xpp>Q~2d z+00$#(HL5a+)+5l2CAT5VPg2^LYUJl!lXbri9a<%tSr{WFS9RwB`Srm5O*VFgdczS z!5e;H`L(d+ja|EOSUG8L(ZGkiuj)Y& zX3-jllFN|5Z;N&lT7!QDRQRX9%GFTq0S7?b=~aY;0~~?~5?3e@Y!hF*Z)=s2spuQq z`15~^OOx?z<~pN;uBvV+h>OZL=U${8pT%$p7AoNS z5VHPOPOL@sbEn1YG&8d$1KbotyM_9fd3sO<{WZr=-z`i_8wl6Fz+1u;3QfnxZBZ|B z$iDcc4}f?>_+bpLWrBiGhiF#Z%HG9EzS~w>+rDu(G&|r>Q&{j>2|pU5X%i})X=P&U zZXG5H`hK`M0n@HS0RQ*m|BwU9c0OtcZ^JlW+vwg4B3?AT4T0xmY?XGvW2@pAkM zUkhF%wwLcHwj00&A<9~<&@ixAx?iUEPcL7QDWU6k6$^MsSW|3nO1#LBVFpHD`O9t> z!}cKXpszjj1xCk+`)_4?!KYv%eb_1y8m?l9{VR@y!h!)L*g4D+>uPn>&KBYarRkIi z$q?32)VxKCcBoi~6h;(`%-+zfL%2-3cIXXX`f2nhpRdszC*?a!s@GMGO?dXn!A!Bi=w)BG`2;7bA%#;#ZsfRm)Y*lmUm-e_$XDr5E7WYM6;o&HRdO|x_x9&SUi#C zbw%%i>>zZ@k!mtRb05UUN6L!PTTr|e{&B)Ihf(2qm&&qwk35VMY^&|GH@;+ez$W5L zhalz^;iQPqp*@VE3&?QoDeszW$zp%RP4dmPRY!by?y>*mEZnUJL=X9%1e=^(fRL}I z@G^n_g%#EJ(K$Ryy1ku!&Ic{4R?0riB)g!P`pEZ-owNfc ziqpl?)%}PZS>BF+a{8Dy9ynY!wOl3xigUBbJW0lg#))7HUU2zCTvqSu1~y$Si3GBr zD6mwdc)}ZEf9&Q2kU8crUu{{vQ2D)KZe{ts2>?(;xXvxgXw7ybFE$23DVq)C3Y-h_ z0-(o*fsnhUJ{oJNrp0?^8zpg48T_Z-HJ_JLelX4;ZqsndV!r^L5VG;zH5pSX8W3b< zH3$Xp8J&SjiG=P8YPqXwX~If`(yrf@rAqK2>r^a{u~=J+26ZNPGlv`^gT%!PK(lXS zhIW*Vux!z!`}LyL`qwyO&I9;HX|0A5bZR*{L$qPg;bavu=C2v zv5-z|qfQ^%nTw<+a$rK=vInM(d-vm%ujl)~PMg{AWhz|1Y6HBRfYpWTGZHiJC}<{E z*@-!5vUCG97Fwx{KdEM)dAAH|?X2~ut3KIXJU{+#?o_o~lNGO*C}Ga3vI7sr*~``? zYU#*SEO@}VK3z`JIm&1#LccTb!sQM_cD6Adw4G>tywX=N^fmmJ{$AbIbxD_5zSL*S z8@V%iTiV=e)YFVz$HIzzMLWHw2)Nn_8+ppZQTy<5{lLFU7*n!Z=nMuWuKQ%;wycXm z4h(fIK{qsHJu%rMl$fhP=!%}A*Etr9k7QPd#k&;B46O>WHguuM`(ZcfV>ghUabR(w zz8kD8qqM!5^;7D&A!zy;o@@CRob)(Eh8gQETX2>|5ld@#i!HjaV>L{Kt*0WG2@1@6 z=|{|sf%ax|D@gW2o=r3x9}Q8HYTY2~WFWRg-0;6P#XPxGBI4UJe?r?~>s|l0&w(F! z+J`ag^(F6z`0}xj@*CfXzjC@}mE3(B z{6(J#phBRX$VBi3AM0{$V0!hJFMndm%Ki&-6H6n%2w;eVW_e@&gBKT9eD0I~z+#$X za<6L1`{KO#guiFD0fr`RoW&(EkxzLaG<8y+$Q@{4!sOZL^T{I_<<8`-P8y}`QwkYjs*_K;163Jb zdRiW>W~-o01Fo0h1SY=;VQT6S&9N%mnz{r&e~QSF`5DNH{Irk)>2xQTfuU2B=~%AI zH!i>LWKHR!P9!T~?GMmN_ta=iusoa0-$d996J8h81f|_-x9yE!;2g)5LNZbTy#|gF zUNGO!*?cy6I|fm;(b2py*sR z61e^ye~o;JP(&H~PU*up@O#82>39~s~X0k8972-7m;*a;4UGBydiCf1%ca-bLK;$eKO6n&cSca zXR%0waXaoU7epY0R%|`D7E=af|`ek+L3iIp4c8>+`-TEn?rfJv`QsdO(5m&BS;&9{Azej_^S2 z|Kiv|4bd53Y1~&~9XsDHYQHy0lxTrlHcNp~kp~Uh1=~Letv=r*E`V&}lVnWuIcv$x zIR(~+xsECH>+8bX9XvHYhounU)@J1u3UsOYbE@}cwVXx{BfQ}9hbFYiXxeAt0tTE% z*b#VLM2b7nsTtx^qP-&+S@067-}+ni=l>X#GU3|?zOT|1hh$ez^yH%)-)9(HI=E5j zS|o+b1K^#6>?u;a5aKLI*{pXm9|%jFh7S+;@yS~;>_ct4HP+D@yg*!zE!28%!CV8% z&6f$B^k)ocCtlzUecu zTH*0^BNhY`Nqk$HbIP@I zAgeo!rQ7s+-6|sNIX%v~ zZ-;I{ELqrS2NAYeHBwkl32j zuo_Js4MRX84@hQ+z#k?K`rYCR^^Jl{l59W!#lzc`G9u0s-sRxvJimC9F=ZW{QHFO_ z0@NF1^(764&sXb^>yrgUV+m%kBTJ|YZ;wJfGJ~IUs2WqB(|o}|-DIlxL{-%WbI81% zz6jQ&A!G_m7$)FzKPlV@^32#p6IZ4ozN(LdFuja}w@oOHPnur1th`S<(k1{(yQ%7q z^6oz?>SkbEQw9LOuFw49{ma+BlwO;cPNKC6_oBBzB$9G>4E+5=S-#!bld$*tqHnu{mJ}+Pb@*wa_>P<_$ZVIvzO^Q7qa=cG}cothX9rct& zD2?%%gIJYP{vDkf82JSlCh$*0xG|(m@_=X_NFI$O^P@m=OZ*^V(b5vEG_zQ-&q)5$bfgd-GRWW4hiDEQo>H98uUNZU-evr^?7( zJkeMjst2&AGH3ansZAH}$WCRSTJade{#GM+*NlKZ$o4qQ4g1|XI%XzYdP{Di#tLIS zs`oUo3NXNuxYfWiQGE6^1}!ttmlX*Zgo8i%#(X(>^>zZ8lW)zW9(#qNZ0uQix?GKq zgOFp6zA#=wr2UW^4ofJ3?JB}EIV2bzoEo6H%L#EjMQQQ&IMn~qev!<4T%E=866R<^ zv=Hp#r$vPqgw5di2^KmmT9$LJAHpTVdX!`+t-3fZ>?Fe*RuK?mmK7NQ;#sWuVRIt% z#yZ=p)Zz|^L%fOu-Vd_X!}EV1bPv_vHd~t~1k?`If%@Ik*qkiuaW#26W;rI&HkOo! zC@;RG=H9Y>EQdGM&p8e$h4&|a6Ew20snUL4@5eP_oZv8c1s^6=Bjlxky#CH`bVk`iZ{4DrV%1PlpYpY- zfdqgl?Z$T|I25XG(&e(Hn4rQK2Xo!5I-)Gim~qfbPRrQQ)=}tGrNRR@b$k0*4Lm8w z@u6n=I?;hCZlN(<{#6k)x`$gMqdb8uS642uZ!G!@X~Xi`vUgw&geUL7Qb{d3&*{|c z?G*}ps)sQQ@5{zLA2O}ZR$+&F;il-HXj4~yO8KK@5)j@@adkN_ELU)qeJpOi^P!j0 zm@7o+Iqo0!0r2y8XTa+4+1Q{+xeZP;v$`sa!={1_vgKqB?NZfia^Oo6A^~M`$}A8? z5&58tsID@PeO6q?Fd|7yr#aglsQV2=5>j_)0O|S&QxH=a_G=-W>89``j;rtYxgrk8 zLe*8bI>+SUW5)L0Xol@~0YTV_u+`1*!pvC#44KcI0C;h~y=F1ImjN}Ea=D!RhR5LD zEzo{25XMY?o0FQD(ASFOIKVtp_@=i*&X|t0+Oen|8B&cfCp(&NF_QRZpADZr8%Oye zsWh1yJqmEdScm)F;tHs_=zT0!%E#w~G&pMHx+0h$0bcmK09OCR-`%vl2H6K@Zrbti zJ{FdO+lh@4p`R1Quz|%b#XIwr3PGLPN@>aF?-*E3ci;n>&q(spZV_ByuyfovG`>`6#xREzXRb{Cpu5xL zG?OVG3xm;@?#r8&<6DUFgsQ3rZLBi}3|JastIvvnLTQM?Xvj#uLynAb3N3L*Mi!O9 z3P+R7r34f-pK=CCC$KOWC4PYvndg19kyyhBQ|}#OiS(fnp5R2L19FS}n~LJMVVFpZ zB!iso_LSA%E-bVbmZL0u7nUbSV22=u$u_&%XYQhKsfi z6Ga!rFuSs*#sB;m^DY($90EC_BpHa~V_vwGQGr_vTie1D?B>LF?<1`ab*1nntG%!T zpx9WIjCrn0r)CN>@$HYmpxQ?QfD3Vo!Pj62*|{ZekV+=5&HrrnMkhkDO? zgjV`t5It3$oE!D~zCMF8sw&w!fYNq5?X@$I}_bfhIjIyIhkK%m=}~xF6ZU zV=}dT3M9TTzleIj{A)<|L)nGbu3hsspja-=#HPY_?()Q9OLiQwo^V1ZJJ}Gi*YNOW z6sMJZyWq`J&?`11{M<*#(A(W?l1;nwBk3-MtY}?GhX+u?WT$L&JwzSGwLd@JwEohu_nxCB>8`kK5IIGm(NltJTW zxra$f&zoVmp=Sa)tI!y~_O(cym^@M~+75RX(?AD!-FMAlqbGVuyxTtBcVDz>XD56_ zlYdcmV$qNVo4S8J>d!K)`L3Ggxy$gThETA|3CzBy|M%zWHJ_%K$^l>%0aq_1^;OEs zI`!DJ$b@ot0L)&1Gi_GEe7W@MIGFce_9p0(0)4*C@TwRlgSW`%L%Z^1FK{tn)YJ6? znb%Li%#u@3_#|R`n2$BOXC;CIT6UdN z5L*fXBO?W|8DL^VPVmmw@2sv05zu!nBv=(e3nKF=0D-O}t5wCtT4a00$7JCxDh`fL z8Dwl7=CxMfuaQGfr5|q`7Kl{hOKAE7E&Cv<&bjCbQJPIR#a37KvQ?8!mHnThzj$jv z96Plb6#FZ)O-2!d=2@1c*K!Tnt}4A|L9(ETLIvDyYVDbrr|6lS2VaHDpI*M||E7w2 zTqca@js}Y`JgX)FF`QDovD>J{%}U1taoyzFb2hC2GhRh!360_y04-U3c7Hn2PIo@U zO&=96`AN)pVYryQkCDia-@})LkVsR-n`W<8A4AJZ1fyyCcEN|1>-?D*9~&JFs=$at zB1M83%ebexhMm~cun_e*0pD!k@!k_iD_Ihryww&F5MEFCECvXnz)_V20K!t`$vS3C zU$ly%y#8+x*$OkX$&}tXH2hX6U;P2NCS}-Xo|d#?9~gC=_>tg!jgIVx<9Oyg7~$sX$w-n#sp{5b(uZWm(Bqv7}Bk zUiI|C<1-rZ!zZ1V^t^u-2@$>KtQ%I6sqw1-!$nef5Pl1rp4KQPJaZc1pw3-HSn7{C zfg5Nh6huQGA&dHQ-*^}oCT(vvs`Sw_*(}2>8((|=k9-bbO-W4m`FX$x6=OUWz~)0>yfayXvT*fylSjzVvHgfX3TdH6 z3DMngGz{H!Wqn)3IfI?aS2jn%yeqO>PJ)drgH|Q`Z;5*_rU?msCd012MKDK>31z%y z@EKt*%r(BAS3kjV6Y7IZS(R$5yJuSGUg(+;?MNjh1yM>fxzG9bIQOl6dVt~ z^SM~$T6Bi666xJ6lPntinz%Pv3?MZ+hrWT*EHb`QN=FR|k%#-up4!;vxbKQ|#F-sT z9U>r1!|AQ)hy0N`R(9;U7MwO1Lf-RD0NjILYV+awq3gYD3!}tJ%oJrZI205v?te>w zl^ceB4e_p{&P1f#WSwy;T3}VQhm=@^g;V>+Qi_FGi0E++4~jb$3%EhAMztws@X|G( z*lGkKPu`sT!e>6;)zWB$NEZqFsEAH3BJ5PIS?v}m>?lTqkys;)DZr$VTME{j@VRAU zOx=0DG15iD1e`7m|ITe6{`$IXWSs6kF5*zLe$@@bKHvV~m46Yc0qT{RuPEQVcr=O3Hfi+2pO@CkLPZql|8C@|NrXB59^zy*^Do-9}W7yPbNpJ09yWFyN=DXe~x6;FUz(uYJ?fxZ~Y9#2F@`c;9IqG3f$YKzDg zpI1shi3(~k!CaeZBPEMFF}6U&^M7CbUB2J&gDBIYZX|KPGK*GYQNg|?lIJZssC@uT z`wxHl-+kTY3oRP-hln7-0`J|Uwuar?MEx;RlQg$F)~K^YMdfMe{S4VIKpEwoW4mB< zuaV25Czs@A?PIgihgeI3HdbCWp1Yy95@ul$-h&eFPJ8Y3|Zc<>=tvj(bd%Sm%i;yA!&!s%y@85 zj+6}F_|nr)3-2Jv`nR%GF?@UVT_-XeC}DZV{>nNPC_cHAPG(nT9U3h z=!O`MO_%Z+Jy7#5ivkR4C{;}5##3_ZEtyY0iAcU}d1Gc8-|QXUDdekU@{f{|v&v6V zbbY#)e|`Cybd%&`I}7j2enBB5_-+(oxLb3=*9*jrYKI(YmF#hr@5Ogte@S*X<_d8v zGMQVITEWrvcash7d{=`{Bx1uafmkSGnzv% zRU`3;9RA5$1xgEuqL_?g70exB(ulVJMT-)btJ%IfY7Dd8Uxau|v9|oc`dAeCf$xxf zs2Ydig6REEa2#>W1t0;$j6F)5gi8t){wK*p{7G{6P9|^4mc|cC6uJE2s6o&P#q)6T zmh8kWGf>OBd)X*16q;r5} zhnWf77j{D!ZYoojP3OzhuoC`?%R`6FIYfk(=l7bPrYQF(A4$L8!nhIp^RXNV!$r}V zqS4#jrRiI6BcG;8S_7*i;k$#P~@N^mF9?}ip#s%(%+Mo1{5jIJA!F4v-w+xC|=5YtT(@p;B>05S@f^} zNPa8t12$~=32HHKB?KvhtU}MdS(gNfS6K z3bg3zyAiv*xh0*m6m|qjsS%7oP+bG7^|4yP&s)1n-C1!$>*(&8(?PpSmHFFzl1K_)T7j|p!t zX7!L?cWaT~h+*cMMC_2)XS?C3#*nMB={HxtDZMTt+sbDrMR>s082fL5C}TN>&O*9K z7Sa;l)DW8)AsiB&P0pjGK3@Dzb^{kLV>Z(J?KPtjV(`@e^sY?g@_(Cy%x})|FyuDu zh%yf;WsFP{MpTY*?1^3#xIZn6(U!w$!M5yN89Ymek|f)U2<*aIfKq5c{>6H4ktXG1 zGG3GcPqswna&SB6HPCKlLk5$QI4YLK!EW!df}5y`1WAl#2^ zv&joELu#A|1+WW8Y$l1459D2g&F-p;$?0q!40+Guj5DGQ>(R4PK|Tn6lTkWU!H=I- za@9BYmKlP=S2>64w~esiOO01iI5%uI74nf8Ebz5c?avlu*Abe{)uA2Jjoq@|^mEr< z_77jwVEH*}obBnFljf*$2cy@sxW%?%M&4?3RG_D&&|Y744yc|P0KW!(_2EI48oW{~ z?Bp{<0X-4CrK%kciX`Qr{e-at&1euDY}v4P;{|<-Q1>tubn;$UrBgICpF_X< zU3qfxFS&F&g)V$DLrhn`hJ0W3^Z*j)8TRv~mb|vsXtWhWyyD){A6^+%4^O!*bvx~i zxff)6fk{|Iq~5E023jV&25?y{x!E9c?cr0-zQI&(dIG3v^_5_(=mRF`NE<2K7E88& z$c$GOgI-M_Vyw)_WM_*iN~R2D-HQgm5tRK(T!h`O@|P(Bf4D7?&t%%e5xFfgn$6&j zGZ|VId0yI^I#HCX;$SK;7+_Zn(N~ygrqFM8z#)uDUVlNBtA)!!ey(!FWLS6fS(UVBA-68ZdpRJaud^+tDOzy-DP)Q0X29FPBq!$_?8eS}17|iD1bU zqWGK4pK?-tD3gB)CQ2J~n|EY;AiQVn|JL@F^o>%tD?fqp(FL0c;8WY7g;-?;p&Bg} zXsABaQXp?4TZYl4W5?cJ_?8UGxPE8=(5_UvFMfXcny@H1eX2gMCms2-ueYY*saX!1 zQnJ$k*<=M)vd0}vz#luKfAXQs9JUbrlF7EtRQoY64R9Id{kf59YRL&7H0qY&gnyg? z8K3>Op945pE_0dHDrpC_7Copb##Naf46F(pR!bE(tHVnw=6}lPRv;0&2tCCE0)ASdE}Hglv3fGL-5h>kb}P7`>}F_%kkIc0kAC>IC@;(@R$b_@h%USM zCpcr&y{1lNgujuDnvDzW-sLK)>+lOu6sHbXtRA9!1`;le7U2@c9RF1AX~7kPVX%CeLTeEl-hpd1%S^G6FmJ(U@%=>Jc3V4}9W=e7V;v za$KjD4&RgJnF{-%1w+IV!lLxzY;F3`>5YpTzTOF61(K$v!rEK|b*LrHx6ATOZ$ecrKKgnxV263ou68BAgrl z-e_yFtWMg=Cpq2YIH&GUKZ>KmQbi^aE##7yvrQAA7d*ICbs7y{!Ixlw_p-3HndO5$ zK(!BwYE)!G3=l|Sbnl(2T`JYX)ip4KdSQ`R!NKo@V?5on*kP54G(xHtE@UfGBX6%3 zR|kzFS3YcC@8XrHOndTkNv1h%G&bziyR|9bgJIoKGDbe7n? zvH(h(3pqElrcR{O#!NUe!bQuQ5mIn+4QN@Gg4Cx}LfywOh$!@X`=Q9wA7W2PGgAiP zia46xHS87`wrkZvqgN%G zUIB3+ir?X>&#@CZo$GMvV)M)15Bb#Eh`Xb-5E(lmr_RT3l4S@C ziv(|lxB^jIodTWn(f-*FvzN2{dJs%3(e zH*^OL_#!hn4Cj^RyII@ZeIV0ZW~&(}qa3mYtwTRwfc2SEaYtQ|)(rr7gy{Kn z=<8q{XNzC!%8Cg0s)AmW3zlvXp^mgZ3`_!VZh^UdBvo9{);_vW{I-O#7-AA0Bi{jx z0n;kz#J*v+7G|~=WDSSi6TRMMQTGbecLdksRR8nkYu;4S>wVvEe`|BWAJA!Wa}gpK zQZmo3eaHFklP~$_?P|YYi0n8O%^2cM&S9&G`H>psdyRtMLPlVViikrb9=5Ufaq;Qp zPyD-T5GV8UZ%kN}CA`(>4T;+h)1lH}hEpY-rgA$zqq!yT?ecFgJ{CG#NUL0lZ6pjh zFFpw$QEMC_{yXi+X@^{!fYiodu_6%ID66|luwo9t{L?$3G#SGB?E(ib*7dO{&H_gb_8Tk>^d(Jn>6vRWZZveHxT;to6u{82 zDD4r_;z%>$u&8Q?*`DE9WcLfLSXCq66a4HnrK5b*7@U`rZJ9`1;mxQQXaphjO}CV6 z@gp=D+2RH<1Ixt9ARW$O&(>LOleX%Vp5K%g4O<~Rgp72ZoQahC=zGy(w=O>Pq1slE zp`h}IXTPl?16k23Z?)^fa=mj#A%&?0*t?O=bD^C}F~mdIITuizTO#w<(`BzP8wYgs0`IG+4MxB)}~VabVt#XXce`a0+y z#2M^pSm`(={lu%u1pAS{di}{~q(5{|RYH~!V?xpdcSUx~E-`J4)yGor^#T@Rw+iE_ z`F_n^;;*$OncMO|h)MiEGcvV$icJ(ZCzK41P&bWyvk-83x`Po`NM+LBIFjSW7}R|C znS`WpO9>Uj2Z$f~4Q~{)2&p-0u+J_)z1`Q$LhN0q*wa<+HgD1Vm$KUy3Q&2}jUW2M z6oD8_cHBoCchV_X+n2H>EM~J@X{%Mw)&60&s?EJ|Oo!<11R9JUp^I#G*%= zx=KuI8zkgl+Frb1Lz>B*q$oDGmQ?9EfQB3Rgo<%4(w%=oy8O@Uh4 z33C*u6(7UptG?zpul{oy^XZfKy|3TDd(%$~e>a=uAm56_cF85fDaM%im`#nIbL|e? zv?B6f{6pd)VaKNZ>QGH$sjh>vaiUXYFa{2ijgVlwM$o1$Z$x7p7T{|a|2>Hm3X|Ax z^b(;yoRA&(@PPV3=0VMHsxexM6IEd;#r}vJONKYccFAts$@@7|8;w)Q)+cLXFG)_a z0Gr{upr6x9WSfg$rqv_(P;H&DAqyBW`cuqjKpkOfWAI1xh#0*-Q< z+nBt492m6o8G*(=CNgd$`Nfcor1g4eGKY@|NL6`8fC9La-V9mgRQB@Dc8MwSi9s5~ z6t!mJ$F4>y1LI&spq92%@VIh!vQLEjCjiHcj0rNRcTDw}kS1-b7+dinl3)#noo^(D zwSs>Bz$Z5|ciUGM$x`@92?2?x5C4VC4R4CVM zJd0{RWFG-+qf&JjJ=O3I3d7|QF`c?;7c8jQ=SpA8m1@3g1Xl#-<>}?$`!GKL$55jA z`!(gP23TB_Za0-`kYe||BbE-=n=(*BX-!}gxzrp1BQp-l-;4$G$HIKZ0q3$iK>^_KVlr9TnHg|)CKPlbTs}O>?gKjvP>T}iU zJ7;C6jb$nZZ<$K0LK7Bq$7Effo^iA?-sc#xDOdrs==KldP;YJAQN;(%r2r+h3p)p- z;D=LlDF_(s=A0}`)AwJLgJwF~Jo^0SP8CL< z(d-f6zb{*b1^LWU24zRj>|yD0BJwS$MDTUFtjUic=+9&vA35StkgIo?-((Z_78-<{?N^CTAFj*?MZ{E8t3c}M&c9x5E05n7EHnHAZ2$DvU%u`;&040G|jeMPU_0Pty;5Ux2_-*h0t|%jf}yT~e6A7UoyB zec*&X2X5=ywzHX38l(`|(JA73v|TtR?Z_uQ&l(}}g+M=U$Jyz3!>|s3vus6}G_^^5 z*WKA#@Tsu1TQT&40!3aa98J}eR z&9)OZWt?hG3jP9u%}8+Q?- zxU$$-MFfMT)#V6-D>tD+IA5h`_=@7ajvJ30evh6s=D_Xx&gBo_8?OrwnEJor^ePf? z7t{ec2l8o=w8Y*|wTrw*1F{ox?LZx;y03Fxjc0(bL`)2~KrRWzGYk<#8~Lt)PCW}#kRg*AS+Z{ndoFVN`=sf6 z1Bo(N-hELz`;)i)-?(j6-=KuG2LB-jSYnI%a_6WFZye4Wz>lcG2ptNp#PKo`94Yg| z(KXVHN0XzdU7|vY!F>t|=f`2*a|22dGkEQZmwi>KmLnuW$SgB6XNc|Z2EfDAj%>zU z*`DpQFCw>vqEM#e%YGvpqNw3a#Y3lXvVamV=0lmiv*ddS!72x?+S-z-B|;E?dF&>} zlUOoz ziicgC;`)cf(d50td)@u6EC8JFC?}~~I$2#h(q{-ik&$gp6Pl!j`G%yV*c-{#){Qjc z^*c`NwfRT@CVf9PCJU$$V+5F(Um%?N4{t>HC^aC|e?j7cg(&(a-0EUbRGDp=tVVwb z<+tjP(fPGjE5BQk0J`brff2&TJsN~gK$Xm_O<3h2-;}uOV6lpMPM#BX1Yc~+6B3H0 zA{8O2vMb-0fi6!J>InX2=ms&GY8akC&<-AlQYy?6E0`+^ z-D>uOh43ws01|l!BwxHb_KwYD5lq+K9D;lz^t-no3yrhl^^6G4y8wP}$U1Cx8#ua1 znPk*hRW&snQNu!6I*Uavxpz0k^KgREftA^m_U_gLvlJ-}x47NqgE`!vzSAB)d`g}y zQA|?CN^mY>w08{CWEk=`CS$Dyy}5rD;t0c<-*D>jC9fCN&CqEIwn$6eS!Cxtg*1e6e z2Xa&N6h?;xWk34nAn<>$tS@eOTezpViF{d1zz+!uX(o&2jK@L}&t*tt3PuZ^^y}Fsk-m&BJ#Y=5LFJ_2hiJx&xlV#9eOcWnVvcgy5DuGqP~!Z z>{|jmM3{?fK;h!=q`~;VH;^IxYkHYH(o;pGexc^gVLEX#4ZHht)G<5ZFDMO2gj8|= z`wFIrE5tF`Yc1qe=RyWu@OWC#b`>`;+XHi;_dOc){`c&hz4$w(9lwR zwR4Y+W0`fY-WA4r@!`}|B8yHnR>KA1;6hYoTs^G*vf#(7g5@%nbLrq2d z29msa@vjML`<=_bH|g0m+XQcXm3PnI1Kbh|L5R;5C|c(Hu83eZKFW1y&Wa4)HrJM( zi`k|qRc6rTDdSt(+F@gb}F>&w3tNygHNSV&WxRLDO97=cyu zZN!%rCavP%%uW&Qf{Ag{ubxrnj z2^D%eYIhKF>L>fLSSZYp#N12Q6c+P+!HD6l!Cd*-8Ah@U;*km|#4boc&>mD4LK)8G z@c+0vng&w)__mS$&+7v@bY31qkAuN{XNCES!MMeOLCX3`yW3g+rv507Wm%WI7%j6_ zO%rT@obJHy?Gh@>Dv)3o_j7OQv}(9R-DK}L!O5a5wh_hOMN9|6LxE6`22}#Xz027b zdQ79iE`Kb8$CKKt&xJr>PTJO$S75k+LCGJu*9c)(MvZMFWz);rV`D9X5FVL$@4U7d zL>(GR+bp9)`38&7wc*$*BXQ4+^0`cu4I#s^*n!4XcQalS*R z5IFuOp|jl)IYrs~ffpeH9PdAT!e3+(=C~^0Ls}ToJ5eti6To90Y`!tZBh+WLm1ksW z3nTvIy^vV|Paj7V0Lc4r8oyPpe@XTr*MF*WO`?s{j2w;`7soq?T3Ib5^~-y(*ASsJv!Kd_2t zG7jv;-XcInapaiX9R*d8%J zK?bip)m*Op$;p!v`g|Ct0gaP(>Eqt6W29mAv|{BiY){jWHRThygYtP(LVqTR=ALhR(cO$V?}- z3hoU-YC*R(t1p`BSNmM-WOe(4SZMB^&}VTH5~C(>l08r>fQ=bru&lmk5rV#o7_0rC zE8E|aE84a^E_sNel)X)%%>9t2Q{IH{Kx$bDz#9W4KF^==3$Pcb4aUG2bmC0E_HE94 zYnZ2fM7Nqa=Jo_0BP?fH$=ur{VIx>I@7__X8}mRg^sNVyNmjH#SyXeX3xLUSh=S{3Ca@QW*K zc&27$_?!iye;^y$l4d#80UX9yp7^$qdUK<-Eh{vaO*s~`l99NsUwcO6kSQ)DVz%@I z^dE=502>0Y3LCtt>c^;=(j8$k{%iA$>q>7P6RK3kHPmh2*cM-6_BQDrPavkBA~t|9 z$4!|kp#!0R4bFX~AjnSev#O(6Ept>s%N!d#gB}iuu*psSBdm4PL`FpgnTEt9XJPOX zdU`tY@A$DY98@_rr{8?>^NvbrN#Vh%S0K};{jlwL}?(M~=3 z9#L6PkH_xVD!|CevI1|Xx*UXW=3*N_48u(Qh(7tGCY-jA$7HM6O=e**VGO{;1TKsW z-vUShtd7IU@anL5eV07Q2sPWdqkDC=z!&_p*y4q$iN@}BmANJ?a=G+L<1jyw?325M z9LNhvQqD1&tJz_RQ4Q;(e2J?jBr`fDbK7?}ziy59gtd|u2F6vg8cG9;;FUX7@l4Bl z;QSzDP+qnC5LwVI3jp#@zw@2zG67OdNF`OaIZq<-UJM-9g@Iwp7cJ2I<)Su0wurbe z5GNv1WHgUO6WtCkO+ERAyf&zZ5!(w%X01}M^m~GGQ%2u|kr-fKd_Y($iiA(|>?r2I zvn3+FM1s0r-P8L=1Ey z#1Fr3E-QH!NhC>*RPm~XvKm+EoJj%BE~^f@h#cNc3zxwB@c*&(Zq1Qh$DQx_0`qv{ zJiA4?yCt&uE{fC?fNB8U02)G}yD5>Bxa$I-fI@AkEZCA_TKB7D_36rnJK%7&W80iq*B-c zUOO3)owtIqpB2b8%t=*7t4HKOCP#~# zVXWV`gqXs+>b7}S7_M*JynhnKn~O1SBmO%32TiS#>jEJEXf%V#QXx(<6Kp!Na43K# zB0G7LKF&#!3=jGuJdpx%0hUP?KQ&CE{K{e=(;sO}R0?%7q@j}#;~(|{SF*a;6l#Js zMnAkq)f8zNm3H=?yo}r$`dUJssSTiE7zzIvJNL0&A;RG(F^A0LGX~3+7vezg(1QR> zCMRXy1^~npvsrr+U@^l3VR5lX+?&$cB`{sx zrc~C#o2K_1miPCB*i#NmGkgj~wk+%;dkyMbnbbBAY`gC#GX}$cs$6OdjW)*385G4} z6WMND=wfD@_ylXyd%lE;#D%oT`!JDHxRt{u{jZ4;T5k4a<%o?Y_)u0e6$lRz?%~60 z@(kcN%PW7NLEirj-CV#67D5`I+YRnS1-gptoic2HeH50}!zAMx~h-khv#`yOmUUO|x2#g$=NUo943-@NNYw=h` zjxI%NRC1h09rgOELf;}epb)v0t(pCklfo)V5ek6v6W3mp;hokg;zIx4@g>H^ACWT+OBb1XLc(IE#Rr824`L;9CtQq%OzW85!))#mc5yfg^ zo8+qJ9bXG7l#uok#ZKhs3RoTa35@bJ`FS||Bo53rur^r$&iN<<=^+` z(!DG-yNj1xwvQrC7G_YunNuAaHpQn;@OUZdn>9%FlY`ku7$V~0#2y$1D8vm7GbL9f z0-zzn^*~pBt@CMi5}yHE1Hk)8ev;=QlwO`vW>4rV)SCTEgs1G!-VDgYMm`jzASBmn*ihLq^jJT{^uVeT3Ia+| zDfDm}gP6Smn$>T7-bbRDr0lDcNI8JQ2boy3gd9#NiAx8vmV(kyo_c#$;Lr~ z#UfX@C9XmLlr@0mJa#%MgLbGy*%*+SCK#sW0P)3U^rVJR;hGym9$L0{r#wIn5opbK zSH2)q8={g~#asPT4>^u#3m>VOy1An(b{-IgWV*O1tie#jZ83iDb3VJwt_>sr0WR;43nD?iHln%4hx*Gp5+O3-Ud(Y9JFiAiVqT(lkYDhTIr383ji_3Cy%V*|9tQu0+3nl<))hQ z203hui2Ix<^9-Ap_rtpB&4jr*?&brKbUrT}MFkw4asKGCEe4XidE_K_aL4Uhlzf9+TXKP+^4= zf|GoDTNa<;!&@@*dRT(!xHsJK<+4B^p%HF=jm+zZ?T4?%pTFjtv|m7s&n<;5No?hE zG4DtoO=90Zc4p9Gc@>ox;g76!#l-%81!VwA^*@8A^B3{2IeofGX&WASrv+CRVKKq+O&Rc2A!nOr5}KDAGLC!xo-fZlkFzhv zosFjN4fc>+f1F&|{>rF8zgys!(-3j#s)-yDQmvGX_Cxiu!q-D&igKwv(u}I+2$x{6 z97hA8=|*5#U_pEeb8*0*8)xI_0?~7pJW)-Wz3`dr7dA(mj0m%8TB>f(~*8 zo4)sqti=e{Tx;Ta`ju?gw=qDSNS4b*6C87>gSNIzUV)3#7 zvfjr|f+F)!@Tj6mHtLUu^ zYi=p{RkJ=wdS=TFTTUr#M0)_Wj?)8Ypg?Me`@*nTv~^?ZXqG;RV|% ztFRDE3rPjndbYjw53`$w@9*^sz!2n-mehN@i9X?M#iVF9M?HJ(`cydw8MefuLhFmz zFv4!c@B3BI>Fl2iJn}t-Ii$oC?}|`M;0O@vVr0;k`m%3V;SeOct*F63b{Qwg{_Hnc z1e{QxeOb1g!WtBKa)L16j?Qe>vK>>VJs!UZj*FX+nD9OW?G42ZJHzK zSzRDpLc@!o6bUfPz8l`Ca}pyWioI1fMVZMwR=p^DwqWK!Un<&xxhO$*(O-OROd%`c z))s-8$)})$rhaiO^+T6I6&-~x0}`6s(OaaydyO?8YzyCl(8VfeB{EnSMy_qm7{}fo zi(~I5W(9SWRO8t@+Nc$#m!&k6MXFQpgP0NmC&Sz5DK?*K(&Xxq{a@+zf%#>iaxY`S zR|zP$tIrs+3;@SQQ3V|@8W5E*#8qRJyQl^cJ_2LzzPcw_#Xzu(^pnLQ-9EizLqk0nMcL^^|l2#^3X0mmta15f(O zpsrkb(mP>XjWaBYa&M*b_Bj%{6)rncQu+2IRLCD>auhqF76S{LltmjN8lB3HU;L%V`S}u&g`VE!jAG;t_4?u$7q9t?3}k8cwqFD* zYBwk5tvd8xyaZ%gR&N(JfWRH{PIFyuDudaX!H84feVH4sQvPnLNse_+Vk}aRzs}G$ zdf{0%h08>r`8RTs@pM#O0E9u@mQ>a}F}@F#xqtiw9p?mVhu}wjCiJyPNfg~@hR+2O zt#As9*nbi=glazNMM22fn{gT1l_CUq54=g1!nha-eG20fL>Gq~8Am=t_$Td>oDhRf zZa24_Z{d#$-Fdp(j*FY|6%lW@3(u4hA2rTozp$0zfWp_3ldCL+{A$@vb8#zD2?d{E z;nr9sB{c(p`*QF14t*Q&?q!7LdKFqEEv)w^B$F+~brchc?_A}z=baGjP7p4TAu5m_8;pw6hOA;dsXk;)xR32yJZV%8H200_v88B5Ho;SG~n zDGS(Xg+fJuts3`~{X`KMqism%Ph7itLr%M)6GEt=wI*D`-@cPzP&D~|QyX!dK_s~%EnSgWo-6J-HVqx75^!o-L2=YQ^8sWDaK<_(D2CIZa2x?sUt5yztK2UBS{O0 z%U15b2ROmMvwW8lm~vQ9izO#TF6zsvwy;2a4YDB!E$pzb(>*k^EKai{RF^-{8>)S5 zS7vrB5i%9%QmlF2)f`rLuiraya{LCOu5{ako75^mQPKlH$V4e~5mQ9CFc{0l-}?q4 zE01EgS8da*VpYsy_7+xbH7prA!Z?PXOd4zMo*t>?q%%>E|2<^beqf(?eI?A;*upps z-V*y{r5itVYNA4>?~Sb<&I?~!es1RcSytdeo(!wdBgrPE_u(cP#SpipQ6PMghRZ6) zdoy#1*Vp^v`xif#^-eh8Q2T+BTQjHIBk$;!g@P_4Y_lzt_YdM$TU^}mJqF!=s$>s`jy(YKDT+Aau&F`*Aq~Fx zUwj$JB@{v&s_-YM^gofguGg9tX3=urQWZ{j=5{{>-yEB5JAVGD%xZ4vge-mK%A$i? zy-4_G+Pg2>Fhm5#ya@rz6kd8phIzB_sa(mh?8D>GpJ-wS5w!5#%yh;=o*$cS~LJ&Al53y!yv`UbZQ+ZmQCU&g*x%K z5#i@+hcM<#yCHrU={CVSIfe2k+gh`Gc%Mm3^=25Bc`}eJry*IAoISsb9a>!VM5-YW zxc|qOrlvd?0+?(|Yw5O7{R(BM723zub(u-oTVNRH|5$q1xxM2IEp0p1&fdxiM)>v)-U>goJ-%RVmWSf;Pi(?qKah!p5H5qEf|37;_PW5WFYlXHf&LKg zl8c|_>MLcpNMkQDu)Rh1XT%yf&Sz5ULQ9_>xFUS?~~ zN2I|y{vh_M4k&&pOPVO3KmHUh3}=2HsHYl} zSiNJ2LNQ?&HI3W)S>d>*05@M2i>i&4yWI~W=%3|80+vENJTXaND$Xd^qLR{-6YoTJ zP^84+R3J$hKuIRtA0i3$mBVn2STvZroOgQdkzSCK!?3tnis0BM&K?dZpQ{CtKNgd< zQ5_3!GRHV}zObhy4OYg+Nw6ph`JzsFzlz+DL}e)cbiM_cr+f>U@n`3s`ik_s0eC>| zAE;e`>&#-<;zEOp79c}!ho8;t)lD2GMWo2+l5H7NQ9j8zg&R6haX0Of*TRXCTf2w% zRXHZXYDom}L%OLceldYSXu41dTNm4qisP!#LZ0oQ2{2_oJ_414yD&ZQ5i;4sI+RBu zgnUnV&FCYVAa8)4+f<3Z3baFAc_*LW@NjZ>U&fb;XNIoruE3T}I@B93%ghRfbnDp$|dZ?{3HZqOB z&2wZe-%Wmjql%M^fhv?B9udg~Diqc!uLGY8TP$Vg3frY6`tnui?<*f8A}4^g#8F^4 z_9l_I?Lu+ddFff_Hf=4&xo2i&xdibbb8-5dh&m@Xov&4cJR07JB|4AUk?$wGJ46d; zgy*Gsc62H>KI(@cDe#2L-A9{rw2Yt!Ck+g|NRVTWG0v9S=nn~w_W5mn!~eCm>BDEG zCI1HL=>TK1=)T`PUIt(iCT4!4g&-r^z~n@mm)?X|Ihux@FoblBpKnS2r)fJ$ubmM( zH~zvGju&6N;q2{6P+ca&vRYaTa!cotH( zp)TOeE-CXKj_FHZaj1tU^q%PwNgRwdjwL?>D97HZ<)u{DE?1)sG!P*jH30R%V zmh}A~mkQd;A`cfB#3u`fD=&wxSG#p!7EH1p0p5)1F`t_N)8$B64eU`Qc}IZmsHDpE zhJy{`ho)?}-)mdSNufcEhByZDRPc`kUmSk^3twmi4TVNpiqOL2cK#`euhovwt&U}y zAWw3}d3<@;W#xitK47lH@QO}$gcRZADg!mF;0bk(Tw4<&>Wj?v zsZ1{fBTe0AMV%CSozOOKHtw!%%WQzw6fk~1)4SsQ9}Hrcw>cdITwc=AQHgMF_1ex_ zj8C4w=PalB4cTu}|1&w+J95}@Q5B~tU7rAO3TCv~!{6@n+Q~OGR{w}gJ?l*$SL_hG zwKpQHxJt&M2F%_je~pahzE#Xw%=QZiw)6RGPC!WdL07FhORP|hG6+6eveHGq9M(v# zBTFZp*sDF+MZ%jIX5C((`{? zj%i_d%m$9fWmKFQtA1oAPydJ-cd_k2dgHdG(uF4ZNwTgvNStzxd*M$o@V^Z4CVa|} zCqk^J*ahkn`9mvZSeB0xEVg9W^64N-#m$?fZ1iZk zQ4a}~#3nV?uva8+!b^gcTl~KtZ)EfYFPHA9M1{5}BPTUls{Q3xg(cAmtRCA(q=%hA zlulo|{#Zj-I7d!RPe0*AV4453d`T4oVNLp4=dSb@Ew*1T6?lG-m?X9>P|AV?eWh&;3fF}>eSKW@ zG}29C)z)uFr6EMoM2*erTgn~!Z}Pa20IyD>t<^gl!Ye3WImQv?X%)Wk*ndL2mzgOt zU6=-Z{(oV&Kbg)~6P=a}}qmT@Qc&eKYL#SUu`tZqErs$CJlFcj*$=8Gk7I4t|S`zQb{Q>j* zvF|N)D^dQuNdmjGu*b*_r&efDPO{10CN%ozymN1L)qh#}=ZvI5PW5(?swxfbTI&@E zVS^j1oDUEM7xg4qKpJCQ701DQ=p7E;jn!L(hlWCFNj4$>GQxiXY z7B;7hn6?OfChU~CJ#DLm2@)|_!RESe_pl!cnp&+5A*Dsq2ZTIuIzXO~P8W(yAD4c$ zu%eNMys%ghg%{h(h!fAviNmm0%&Drf@<$rq_U2^e~#$UsSku3h!if3ufrPs#Qr*uB1z zvIXTJrT%ayOd^|@dSrl@Jr$C-+ixr30iq+zakdY#u&L&UuMg|>Ge1i_+rie&!pg`j z1}JO(TzkX*G$7WiEP%Ik0A54{5xbnY6m6Si&F+|A^h9b@RoyV=wDv-%;RwXP&EDuV zHBTk)zZfsKu8fuBQ|+n#!Tfg_Lu5o1Cy*a^Cc0ACEzPbUo++{d_|$ZhXYa%nV`+?p zlOUyi!gBsZ7%A5ZzAqGfJz-1J1Ld7Wh7``Ab-ago=T-aeB5UQ)5JtX16qbrZY!X2) zvd&%0gsfmD;T%D$8i4NIi42A;iE2nu9X0y$JXIHMj3x?R2Y}=xuT)Y0% zbJCot%fV{;V4G!tlzPI;Aur?g%&E5eRY&&4lu$z>`5b0%ndQlUF>Z7;X^Gg1tclv? zk&U(-^`GX|PlTq4MvuBXvUBAB!jTa+s1;<6d~G%?@r{$XT}g6~%@LUju6twiyzV5- z*Ia{_hPLEB%x-38a}dOW^I9g#6^Zd$ZV@}&vI<~&eL&TtS4016Y-~H-)`QvmYB+)R zlx3OAp@>t4J6clTsINq*zj_d;x)p~E-=TL7s7E6mVeqj#b4+kC+bdU{Iea;4t~AU& zX19NVQV6&aWw1*42)efSnOmT`1eVI+H9t(<+<$sibz zSWaHF)DKXyp{e}=CWS(vhmvu>`3~_}!-9YOZGKn#&b$%Uf(R0{hb#YM`7%kI^1DTx zylxdLV*yw)%}LEqDXEucR9xV8^Y(@{fj=lFA|hEKOtbW(^!rW3PAOeK3~q6<(G@-~?v*_rcBnmjG4J662WB-`5EaR|8Ng#~U;0fBt^Edt@*STW#@Nu1VNwP|lxZLEAOHJb^FM!|L71O_EsyhqH`B+C zd{e~ZY*eVx4DY|_33<+YfxiTgyh+TmjK`9#NZ=7^^7}_) zwdCWFj|CAXf8FY)g4i82yNkaTF0fEj=zxbs8>&}u4`&Jckm)Bl- z(P@<64Ga8{9U{*kcdj{j{2k^zp4NRi3}8ojgQ(?fBITv6en8>*h5v>5$CmOJKv{-< zaPlL8q6`n+&$okDZ->amk0s5tJBR$o4ZK<9Fte{c^nGawh>ZqN{&JGQPB(7L10dfr zgz6+B@GVIC;=8_%n>T&T(;{Kr7WPkBBxI`4#y3K4LQ4&!z;*tUr6z;JUINv`3uf99 zBEk>Px)=xbHadR2S!;0IX;p!gS{zZyF`{>$Dw|3s#Fb2|H#m4%bxw7}f!xksi5a1Xs-S5tBeP*${UCVtH_fc=}8 z3EwTF!x}%L@)_gt5Hs{D{gX*!7zwWVivHD%j&KN@SHTEj1Fl(Y)!yk*ozvqz`rD}% zr=zTn@>v{va@NQuy0KEMI z+KH45&t7@jU!e(N8GV-3Bs*^bus@KF3@eR@kWtBe3Hi=bvS;i!*R#aW9rrYFYE0?8 zIyed6gKZGvvSU=7tDD3Z6SXOWNRfLifZ77qshE`A+hkJ+_D&C$Qorzva^%pjifAv) zemRS%Em*F8^H}bqna)|IKq_$-6E}i&@6g*&<;yDg3z6WSBF$RnV;o2 z=S3ADt{Zrh3PAHh3~B(zsu_N3!*&;@5-@56w)l2i+2kzi+zKcn)BgD42eKK$dYzpMHIE<{>sjy!W`i7z2SBmzy2+5?7 zfo=S$qh>3P1}8~kb)e6=7cgCbvRg>saX}=Cr~^n$qc~O+du6Q#6ptyIX+gx;bOmvJ zlsCZ5!Z>OUW?9?@aXW8?p^zRM%ASc8lFN)bZr!*8Hkw=VXo0ZE6M@3zAnVK!4H`EJ zz3r)|l;c`tl~dj9f5kP-cqR|xmnG-PfEs(Y7as1#wTAD#MVhKPI?4ZB%nC$m40;cs zX5nY^M0I)CZDf{mxgraiy5~wlHXXQ_V~Wu)Ro>nJT!8d(eoW-4<5Y z#d0z*^Ng=BZ(RA7Orm8eDdaa+teh~4mvKJ&skc~(Y<#2^&Td4MA`L6G5o43@=h-_7 ztv1hqVy~&zK=6()(IzN=Sg$WV+pH3x)j*bQ2aRAJ68VcM@1Rb+H6t@9S7vt)yxN)Y z0*3-93ADLBs6)ZV>DhYpskcg@8B5h72a(d3(@}6KWMLA;`P+Dv;Wfcn8B*(1zqK@! zM_MNgCbzRAL!>}%8jJ4&EsV#3ujSXq@?spn{}~*H2Nv8Wi_^tniN#3%a-Xcq*t}#W z-L#}WcG}pl3eEVBKlmFRn%OAjSObRrLk=t(R2Uhg?R$v#y%6DO``TB0pGVkBR1%IS`gbDfV+xeVW~v7Ma}t$K2M=Mf0xH)W+R)nLJ+J z43A0kFh$XM*efsidNxy4b>ClzBrK?Wgop;&$8`eB@did(-2`FiAS50>v;4n8y_U_1 zYgE<{aX2uT!`T}$?A8Q6C~uOfB`75sfRSG<=UErLQEf z+)fTht)>~Dj!h;fr=7it87)SOyU16LiJlF$D*POZtBN}8r;61Z_8y3?WJUCLk$3ae zXBL^_)RW`1k5f@5u}k96Xz!?}x#Uv$K7Td&sz|{S+e(puct%Y~PXD_>@Q*X%{=TA2 zFjpyXs2bRyEyuf@*aTGL;`YrTm6_aZls>0&2#80~FERlQ@c0IeF~bsLV4OCoxsH8!5QdtZrsvL41%yxDTcJ!5>88;1hy%ecS{K zgpC2CLC%b03J8c@|N5}wyMX}lKt+bSGd+^a-6+)kc)rx^%xEgsKC^kqC`pVE<3R~h znV=ccAIp=y^d#bqU_^M>K(G5Inr^LU513v~AHh(p{)y^oi}Pe4KEJhQqt)~?$Lo09 z`w?&Q)n|R&mNOX`fqz9nU_K`)kDQ5MzRK8V!of_Ft)2={Kx!34@39$vCJBW0KKXRO zsrF@s5m8RWM?fDL48)t3X3P7(cKwL4T9rd78U&6hqUVy}gR&9T@ISzlXxus?2&VPOWnVc2evK=*PpZ_&7 z;Oj+cche^fwB@sBF&=%-e~|8R7m-iY_7R#l z1`)=;!Y&aDC-!{Pujtj4Io zZ(_&{r3D9d6Bo4KtoRN_RC*o9-8K(tj5+cuJ~;2yZl>M9WZcWSp6(9K@EG%8GpZmA zAgXZ;uoxc(8)2X~`>`Jwev5SP1_VIC%7(Z@*rj3Ibla_Pu~02Z`?!zD4&wvfdpujl z7I|3IN{&L#{G8b^A~V*YdV42Cu$hvl$`*jLLyncNAm40Z(kv2pQ>i=hS{?Lb!N$F) z>0~Qe6r44&btgw%m>Ek%gtox~Wc%q9kE#ses6GC`&=H%kdfeu zD8M;H8j)9~x&*kj6Um2~@p)_7VHWX|F*MPOe57Pfio4!aC8rQxTgWvT_ABiv>6Kv| z;LR0Mg?NKj55$izc<5Hzg*XZ zs`+X|gB~eB?;X-UPY;OmiPXpxHf)cNgt@sTK#$ezC&8N!Q~ZEXbCK+|hwn)m?SVs<1%g{LER}-I_Hyw(-v@yAKb83riz5Nv z-L?Ptq}Uw=DS_#%V{~+Q{+BZSF7+rePGq1KFD#CtF@9MFJVnT#|E-}? zd`!GRb`_;`^N!P^3LKb{gHKHj9~rlQ=PXAo;38FdvW<<=dm+%nva|RSBM;Bs_h*ct zbN2Z!RM-0$7RZ_%SyKY!gvu+hR#S66*f!z?9A)g zr)8#_#6pvcPYy(tf)NFDNatU8!8eiBeb+F(LKoNl z`KF=7iHytQujWn){}jYQanIkk)Coe2n{{NoeBmjd4ESVO zE2}P;{2nrgTPR@Og^fAXZ}5l=AoEiIFUYU{syzuS0$~JU>S89wBR=*QQ*ssAQ}mjY zNJ;kekY&u;#l#^R*~Y;7b2MIstLPO+8CY^`IZ?PzY+AB;W1OYF)w55uD|0&8R{NA@Z_{yDZx{rfGXj+Wm;1MkjV;d-&qR07mw?9s9(s2bECeD99_?yd z+>j~VUuM*i_3RBE{Y=&qlM@0AD!E#sn*HI4I(Ko+2xo0>ZQa?D-<1KY<@at~4TAPq zXe^U4M}Ce!e_e(H>~!LKgIzvm^fb}j$k30+O_g!)S=vi!fGiiGUjydNh9Lu^I*kId z3{Gu&{rT^mdKge+O}GeRJ-(-MHPj4l_r5Pbd=RU;%zTN1rWvP6#2?+#(M}O@Hpmf` zyClbwX5eHwCUP#CWj}>2DAq~Ybfb!FV&v985E$<&evqE!6Zb*${YH)`n(lx8x-Sw| zeH9@Qtb=6$#0*moF*MQ^;p?)Lvv>TF37F@<_epUkF}~#7)@@uMG6PcB5Vh!kXyMXL z_yTY1?&eZsyD6K5Q4#y>KuuF877kG04l!L95?ei_CTjKorHelm2zLIxc4yg#V79tc zp3(ons;zFcnp@j`wl03i{ESwA>Yq~q%~z6`oN%I$WiMYSR*6D+5Gs&Q5*|x)zF_o6 z`jlhURSY>CjDzP7LS?{Vycc~_jzW)13S#m>^HmosCdGxMAG@~0IXZijS`Y|X!r|sg zopGSaTHNBE?J2@NDVp*Tl2VlQ#eFT@iqAh| zp(yz39KcM}xMXC8SRBkTLrvHXKD9Mm8*7W(o`0X|PlWD?IBx z`lT!!jgrlj#rr^x5xj1PBcW0Y!2-#c;;RS5*8e&CD1v?Ib)^WHI;uP2b0#NAkS*2+ zawTTJi{%Fo8m8DoX>&BKG_3U`JT>!$_uk8CFX?@8PgkHb`#^ZmwHyAUcS6E0*+H^1 z^fZ+gyRai`1kmR)$iO>AGV;3)!nx_cSA6EIvwExGzuQWD@Eal4X)3 zWoUc&4Kxo2HS{dpApjg`ZtzdyHG{}oG3LuP-9dV|AsKE`GFPvg@rt;Vx^F| zlQb{nf|zPq{?MjnLD;1%bO0)H-eNXzCres>^shcLK>6fkIDcOjA_a29msV5O*ALZ8 z(xxTkMb6wsv*52Rua?-+kWQ6N?^|V9>1q{eI*YFLLB^_g%$}oi_)Vk(`Eqfq$(`)8 zpzjmpbR|$IHqJmRk|>?*MOY#*5>w~XvidSvN?M;R{Fi#U{)-;QRXFtRXk_(-XMoha zjT?Xb?mx&mwxzcZuRM}b~p!c?e0Y!AcF6W^9$?pL8%Zm;>W zJm5bc3vt=IDsYlsy%0-o7isoVMFCv~ln$Bry?T%VZJ?SlVFZgH`;t;4!Q;WVt>jqN zeHQJ$4BH0tTWrdy0uuhdoK^Uc#ZQhx4Pc>>jpDHKIn}=gimhB?cR&mq!TEwfK_AGX zjSAs>ycfX?l=Ya(vA;|ufrKG{v}$!$Ql`kx+a!w`GOzD>!vaBV06wwHs1a#~B2Naz zBSVJ}(wNjGkZrWX>~2m#*LN>7nhONYgfyB-w?SO*`z?cpL!w{xAq+ z+?x~uKPD-Z3Bz`B9-wDm)Z!Z5*{^+qn6j}QD)K$}lT#wv%|u6(&~eH}Ho0+R>un%Y z%jfG2FpG1&<4HwVIOl*T_9IG>bmK_?%zg$`)9{nx2+^L$C>XL07Ae{lM?dOCJ_Zd= zlqF@AQ5^=WV2BIZzhm2%Q33qgXgWulhih4OLZ?gW+=3&QR=@R%KWZRJ6yctWPk{Sk zp+X0giCDORe*w6Yth7RC2o(ga6`W+;Bj&Ulth04`hyzJjFi4i18Z7C(@~{iDP!Zn@ z_=iV@K>>Gi+(i2YA4Ig2riiya3^l~!#C;Oxy(_$8K$vBbNT<89y}l;wXo5RsXtz$R zgYC#cP#%HK#cc||8*u6asqw9-bbxsy^Yp9NuWR`yQ6*`f>RPhQmY5$?P*^p2VTV$R zPJqchY>7rAnMTqkCZjWdgAdkcZ(9_cuL6G&3gp-9hY<^gCnoB_ndt#dmUxP6A>r*l ziyLPhEh>ALxacFr2$p&TGuaf9FlFNm$ZUCp$DpY7l=5d_VD=;0P#4_MAS|;C#)4fs zq5ZHMT-MH49{nuTvPW-(I>$$Mo4yoH6OF^lKvu()o6=c{RhkVEB3*g-D}3AuBJo+V zGD)iN!}{nivw?sbB`FKVf-6N*yEK3?X9_8uDBiz_}ff){_6^W754 zr|~Wre9YwTE6*a_tV3+JD>w&R5)K-$#3|M??}Kj^=h*;#+N}6UZMrQWDT-pG9M`M9 zTo1qX$~9+hB>4x75WQOkUzj>S%m#zOFK&Yp1X$`^c`%y>NeG$P>cEO@g#fSVSmw-dAKW|@{Zz{4>7h^XIAr8#^I@E)sb6d-!=Eg&8xGRBluq8(JU z#)_|(+ey~y9zOhb2n~#V`EE8j{+k)!WPdDkxViX+miDUP$$+vy|Gv*^P))Tyb5qmO zK9;Cug)5G*I=NEUtyo|vJWF@Izd35ikHtz$E{x0EcO(Os@nYMq&ftdrJ=0q+Fbm{_ z=~((GA2nBn8b^~=MLs5{g8bT>z2U!-N8(3LtJY1!n`F$fLgUHt>>b}l-ok^M9f17a z?FHtVU#$T000F~`OCmg_X+Mt6<<{ zPKV~BmP$R>u3h)Fr&>ZGb;2A|{xWeja=O^-reVf9Ht;Ja&kQ6v!^ zgW!|VE|70KSQ>g>XiW8PSUPC~YeE(GxBXV^X|flf*!i~90v*nz$YiG5Qv|^}0+Tsc zguz3eg=&f6GrMTk*<#uMd$YHFK=6o+!LU`uzpYL^>W#*&&dm#{%1WdJGv2QGDK1`> zBSWz`Lm&A2P3)O8<)ICV3rBJu}naMXj~r^GhIDq41hI+Xn+-I7Drj2S)@#(h_1 z%#@HMblqNeiseYS03%JA*JMlCVHH}YN3ff&N*DxQn^2&rp=2!S2Z>Q6!O!3tse+_& zxS5iV`HyAf_v6$Lj})%ess_jMT1WM~^S{Z_YA4lRcuFx{Wro6>6gsxq57CeHLM$61 z=lJ>f{IB%9?{RDRw)5_~MG5%aj56gJ5Zz%|Pv^xKwtdl{o2-fVIU(}h`CqG*E>Va|rQ61_d~KF; z)kO}zE-4@#AN~@SG$;OVe8UGcCsS{zDug?W8pLtq4NS=!DKvTstxdd9B6cn!KQ$CFk1%Gifmvn zKKY{1Knn|&nqP>}$IGtHg0&v1P-yiuMfOWXY+pqFu-FR|E{CbB&2c%Bc{`+cdzlJQ zwV3wKT)~z>S<C%2o8M;ec{ZNk)Q=rikgs zvcXZ3p&g~nCb+Jb_G&+{`W~H!&77M0AHRtvvV>+g;OZ>T8}K?LRM(Y zC}08#hN0_m9vf^%2AEjoh%|%VJ_UnSEnYFJs6WGt7EGJjuN8TSuo0q2$ub)iq=X;Y z!enSeJ%+eK>$nnW@qUqp3`qm=lvX47p7O=QcFi&3KkW`d78VY01ahaq^w|wNRogpC zEjA!CCCLH;;Td&6-^h7FiX*#F$QMW?*u`WG=ZVg^j(sr9L?PTu7=V5M@t^p<@)p!0 zzqYH86^WJ717zsWB6b?>bQ*VQcbe^KmM8Z7_cHi#GWo^L#Z|2+UI??;wu%U%%{yD${@U!LfuuRPjP~&M`n&nK zn@#89M;EWl(T2XW184TpE}B~SIT`UvA${)kq6i7L$}m9!I@U|2(~FC8bF1m&Ahbqr zNou|4G_|b+vBj|HGot6SDwyo}NG-@1*ahLin`2-APAuC5?Jd*~WXqcKgrq(Ul9$G~ z>yuM;RzZr{e#3PyKq-tEp)&R`p@)+ARGAO_(JwOe3g8J&V;>mf+~m%3$~_$@j>k43 zhcfai)-=62&!qVIm^$N55p)Txb$vCq;nocXt=6*>Bk-Z5Qc z*uczd`8a&UfNp&ajVsCuW6i=aqA8oLJOk@I)G&P#d>YHJ)X=gf3)Nn1I@Bq->+$*) z3bxrn4HD(!VM;!pO<0rvyUpe3Q%U^a0Og>Y>;m$4xDQ@;N=wyOLuzO8Ko<(ZgX(7Z zVpK_8-nc6VXbTNSslRckW`nO|e-$>`;+iT7tG9^Z@}baqV{J{E2GE+hJx~ArUwj59 z{m*|P&9W>&wdD_6n5vFd>jH$=%_$D=YA#4qKhNKnL9Axtpi^^?_vJ2MoQuMchYw~4 z{Hyf6ntYV9$4x2YM_766;6gi+lp8YAJns*{@YMVp4h#4{^zoLco^|%YPt&0C^RH|I)oB?~%dn z)^V@wkjYhMNe%5u;eW*~nxa$W9WvH}i<4V_fpK6q(iOg4MPwcQ5gjbwn}}P~Bqm1{ z%2r{xSfM!0B&z+u+p}{wtQ$$ZxA`qisa&8D0KusoGLwFkDFeU`~o~?sB zBB_OR186?`PPVX2nW^4a{^Gw~{6y9RC@7}4e7A7oxY*@|R6rPp>AbBz2S%ReW>p|@o$SA;70Jgz($AQ~Qm5cih(wYqWhj<>^4OfdfI`~$;tORtnkhi|G) z0!20jZu`toKMK$&K+Rz-`tYUMnwZOr@5#DjJ-49f0=0LEj%A0^y^%Es21yEu=#luP zSSTJbSDyuwz*RC=Q z5GhFmSj^6t>w*B0prV#g0!Ef&t&?P)4a+4iT)yh;SE^lEQ5sYfREi%JwI?`Q;mlno zr2w{RUkS4UU;JFw|5j}ql^K&6ii=7&GzM0=C}<5h$KcCR5s7?th4w&XhMDGlpV*79 zKIa3t_`i*^{FJi$A>H({M{f4!o+i43SBG3hZCykG(aJ2Y!|GbHPBJZ~K~nDwADQRn zk{-54{9@={3Idh={^FUaY&GpaK-<9`VS9A`W<^-`S1_j`)=goWC*T%<&~dPC-9 z1q(FAl2Ker1TmiH)_92B%%l&mJJ zQjxEB?%y1#D-u3wi5{g*J5f3H8*W_TZ`8{*d$hdWst!pQEbF~Qo*;RSdEn>ge~=~+ zoeat6RF8LaFXSv@H2m*e$~iS2X)xko{J;#wzDe!gnH(wdv2Yth;nKh9L;X-gFdgJf zFvtgtSDK>onHt(l3{^mT;95Kchg5kRqF!#zTA(LLF4X$pQeST zY@snjA#K?8j>6u@Q?ePQf)w6I!tKc(lVqHuqGn&KKORS(45}f3P$4K#natPnKgcR2 zgm-96<7tL8VIsx)@1w-z1L7oShMavm$j73wwYbvQk{f}uiMKCV+c>3iIuA)hWzGO! zat3skYXMMCJ+m?6AzW4As+hFvqBtJqz>Lwnq37Tu+V9!vQ~%W0Lg;c!xx|Q+ryAG^4?B$43~zZJIC&%%0_`(JuE|7B4S(-L_aJIQ+EN|<2)dZZHKJ_eS>;(=LV z-#q@ZNQP!QEL?K%hi*vyj2wwCSN0~PBCJ`+05(L7^Ix=ludu?S?Baoti7d=M zE!8umpsapP_5xz~%oS%wY>g~Fp=!*=I~=9l_5LOomUbnTCd{ei-!RC7Y2Y=7sFb_O z8RV@H$|1+gL>=w|4Pi+VbXZr(Mc0qGZKkZ zox-qFSfU(nV&Q6%A3i3-r%I97AiOtdI&*RC@F`8JtO4;p@{2e^={A)=vaF(dcN1sT z36S;^U-N#L)ma4i856(wx$~*!rm$7QI_uD0rn_OAB@j~=?0zrjm!#=0M&&4ubQp@j zIH(bFtJ;?xGvg|@ekFxP1U{i5MB8>%n0j-TbAQb+NeR5_e_$|xlt67dog7V-E)o1e z`Rj^43ZUcd-f0Lg02zZta{sDT?Fc82uPX4f+1o-#_j|nqSxiv2hp`vW-S8JzF;;Wo z%3{E608-?5KxT2ZsCZ2j8CGl=HwoRH6Jq5GLA+z>YK5n46TA;xM?P#922FG2k9XqA zdZ19oBvV*6qQLGltfT$|xg~)!`J{Me*iau7<>=z@B7I#3Q~SE{a$`GQeQP(QFCMIO zH%Oz0r{nMVo0JY!F+JrB{+>VNQ0skx!#h>a@$H}lXW^}|A>42I#~1%KXDPNPok*kp z#T&Am1WV&&JpI3NG-gU4|1pbXGOFbrLt6D&J}{Q#bT~Z>xHTP&iwvq z>ZnkXNMW8eL@NvdY*Q(XgIHPAuMe3LkXdl7$lRIpoRMKJuykhqP#2zs)~Oi6;Xr(s z1D*;!hFQ-RrcyipJO!E!Sx%TmB0ZK)wpG;1h1-yKU=Z<(YnYm=5g9zdwn*P5iI!n1 z=Kubw)6x0goxdv_8Uq4CxSdH~6Qr?h&$UwNPM=I;YxZd;Y7EZaj8vUG!kjTwGzD@d zXgH)SZ`VH8MX)mmiD1u=ohSAit5ev3`KCM4k3et3lTF;08RI`ecav6ZQdna}PDvT_uA8`(XBR zE_ZbMcTKkSc4&10Sj&O1wB#%|sB&~>B{faUl$XqtPY@bw>W7M!Ca1tRvz_5+JPiOQ zkz7Tj`}s#+uh#c}Gd3LOWJg2r03KsFHNRd=D5w0xutdu;9a1(QD`;nKHW5O&*H|Op zwNn?vUX9G|H(}R$ZEkPfZF~hvEpaQMR?c{$c0`e_;rqDo**{B01z%K^IMeg%kz;Gh#1WPXxEn+Je zrhrp_--Keh0?a7Sykal75d!9j+7k=35@9CyNggLh#jbZ8C9?Lt2b zN9@Zgc1KgixgFyLkl}GO2bR-JlvUT;MH9lUyZdHS_j%IJ=*e|vND1Fu)$sI4ec|9j$p`uJ!_s}Q9^kL`%+38mvJ|k<3 zUC{78vb|>A3zdVaE^Pf7otCAG%q@s1g~1lx0VeapM<45OD@NR8{Ex9RC^<&cDmsQEfsIS%FCS|QMvz-%Cd5(uV)w-zz>}Sk1Zm^t1N-N# zq=b2DzE8OwAsJs=0|TZ!&i)75L+L4ZM8Op5Kp)fF#pIkp8|T3 zH0|Pa!f1{Igh&(3L7qK7t*mIoWZ3Vd&T}{Wtnpk#Q1#s<--w$aQ{(!dy&q+4W^WzG zD$v?i9oAVyL)G+2o}dS4jRbcQ@7F3EiWc7bq3FwqXGupk^^7!tmK-ONebFVC(5Jpc zjkq6tEl8*6o5V@@aZ!vJ*X57w3JVGfhKU}KwSrWHP4*3&(=Z32CSXB7Qd5I=lVj1C zib)d#+Rx7a;7>2*rnoO#rkJDRY`1$_iSPWsvoJUmiYcR-h4$SalhNN7c=mqq(>BW{AD#cqR3^E146;LZ zKomizH3XVCnT*UJJ4!R;z*5TK43%6MS)>!ov0elMAt?fv2x45v-5?{m40{*R(xA#d zrS?==+08p!PC?vK7#jRFOyIuA>*)9*w=tE5LKKK?(O}(fBpuP~`jsb~H2547LI#p@ zvBitqwPe5M#pNn8^;xgb+o5m=4vw+^6nG%5)H!vf}b-&=0PI77U6s|uv@(LuCzq5v`M^TByGRQ)x;u4v}lBemn(U4-*R zQwcBBp(@k!iDhLDV-}TcX9zWQ)$l>G;Oo`yX@<(OF2@<|+MoSPpj~~0CXy>Fgv z)nT$T-RV>$4*DFOp}oo46VQgFp@GPt^Yi9mAS)!|4%qpqJa;$5D7v*0DOzOE%oXNY zQSET!GK0S#`y=&~SyrZwGW-7Um=v|e0$I8^r-g>4^Y$OM(Lqw&G;<~ufzWM6;u|u* z?=OBNi(qLrIh($Svo}nM_Fv4c&e;kQYxqD0#VAX>v!MX}+c{F*e*v!}uH2a!VAi(q z2TMC)-b!DaD_4BL3?Lz{e89V@P)&8zkJC4MU!#=I|7D)eLk78$3ZM8mmTJqz9^>eZ z0uTk6r+{s&FM*dL4W_qbMnb&#*>A|%u2t^_FsnceQVt|+m0xcl4`jxP9|7a=flPOE zift~3B(&?Tk&?xj-^&+*g&}Pml6a^#m+9Gk4ZrlHbhlDlaA0Asy-fdEkbjeixVVo` z_SD|F49D}PuYTJ}51U3eh>{__@QP1&Tx+nzB6o%=Qy;+MEOh|!DMfPHBTJts^PXq5 z>P^m6hzEfivJ-u7XdE6kzmeJiQz9JQ7P@>5Sy3;71N;uY52OBmq!pt=4^-(Bx-$yV zXsg+Bs-XW(%iN>fV8IfmJH7G3avgdLzlc=jzz z-=+WfWte$PgO_bGi$f7C*zWD3dH59AnZ|cC4dFOB#SE3<%XLsa`b&S^AXw+0_!9~W z1&gI2lFj5KyjC+MU=Oea-1xE-jj5^R;i0~MRaV?W?=@7xd^}}^If}XM+5-InQs zOPLgiIE3v%++K~QpWv2V_ChS1)pQVn_&5IjUwvU8e)Vae?arY0K!#}^E|XWhMPmuE ztmdgY(!Hm=-TpY|qae$%FKNloG1{C4XcVfAOFF5NS$dOV_zOkhgq+ zW-5yf;0Rl|`8bP?+6ho^Hv%O?B&)@BkbR!=`A}wesN+(!fLAuEOHb zc+gjq6+6j2=%*36#MC}t87Z zoNM~pC?vvLMH2EILzon%E=$^v2I`B0M`y3rWYnzYzc~r_dyhU9${{EE#2okGs-p8h ze)s>CNm*8)Fl1R!zCS}IvQ)WIH-#?|&N5sd;UlzPNR#+W-)o9j2_pcNWG)n^5zF5c zQVLRQkVtfbhwhHLW6x7NN zSOT&|V>y4r#F>0hWLmUoj#F9dOXi=`;8X?LHi_+VJr2vv^X9)1)q^qL=>&TQgQF(H z9c0QjRdS{4xtEGv@Kxq{CPRTmqo2;*ei_|KexSDHRO$3Y^>a9*aTk zlCq*GG7BAok&`W&5M#Heo4LpM*mTWW$ zC~irGbE%S7UU|`HWKorI+p}Ib$-~o!$xGGe(Q6I-HAq)GOGTN7~I^FVvnpgR@#4)U% zku8g#k|yzZYecgx&G|<~%r1UR!(b^JF=T+uwaRz;z5W=Ntv_o`+vGfvky)_5idQ%b zy1UxcLaoMEcE+ljg+Gy-Kgba^PxH%hGC0T?1Hv*G@~`bHNk+X$Qfj-~5A5;a^rWss zncu$eUyS-04$`OAPZ$nwk_f?9aI2Y$k;@bbD9O??JrP`La=N^6NWnoJIzsA^m3Q$~ zn7Njb&0sVyN5+n|VO9oOS<7ta5w#qm>?Hp(m zJ5*?YXV6#EUdlJ1AR2V*(7;}LS~|l=QXs~#Z^eu4a4wF=K|G05=x|B4Gzx13$L-^S zOyNhC(=}-2i~Q$XN!j%{H!nRI1t{5_c1`UqVJ}s>n0;A!p*09&D*(tM#T3~^Ecjnm z-B^?CwHa18&59Hya~}N6sfYF#sE+5|F{iRPQ40O`NqqUs*I$vo3?RW4%h;){1D7x8 z#!w&0U+rCxnuU`f&2aq<@Yy;mL47HJ1UPQ6a$xC*>%xG6C;jduLJrAS7)CcHWH=FT zjh{E3^{)9bF0z#_8q7|{-!cI;XyUo6{^1@?)J$2LQsgW7gus1 z(hXqZ%`uL(oP81YSRjz7nvgBehS9A-i3L$ngk;IvvHA-|1_Eqp8Y+BQZb2pPEz6H> z?sQ)um_q?JS|@j+5C=Fsm4d%H)zko&GUxAnH8vZ8MP-Utzu@EK$I_?-RfU___GHq_ z;k0PC2P!Avbqa(SjTm~&Wx!|SbO9985Nw2Oi{_J!om2;pK2~4TpDBfpax$+^w1(ub_z940*T7^W)wIeqk!5RTTpBjHnQ0pnpnb)93qGl z5gQN~VNgFKl@P|cG9Mi=4*v*1bu6Mqm(Sh^70VV!r?*a8<)K~%$*F+A>$}H=4Cqw& zIC9lbu=M;?mgEpv%}7NQlYi{CkVPiSV;QC0WoCjFBMr;>W&o_qSHJoBjdohbcg&bQ8x} z)@cAQgyO)fjb)h_x0Jadd>TC7!0{(GtxL;3>6l~{?^tSzen)S=E*X2|4Xdt#@DWyy zMa*f^s{J%@awm8Lo}suZ7{&UgOs-u=Su=7r_-d}euYEbNy%tKsC5W1XBy%OMkxkS) z8uBJwO@@8R&%UP1~D?8VpSgrJz@vhF~5IDcWvsJ zHeZU~Ko1$Q$b2CKMuZ|L$vdT!PCcIee?B~xR2?#_V{o5BoQE3KZ*+tcKFp`9BtseCxfjP!1X=d6Vs)k%^G5rb}sXp$+O> z;FWelLizF~vVGMUL$e&Kgb8MMK#6%@#C`a>G~4PAIMUK{3X{p2cyCBP{rH%>GX9O= z(v`c&3^It%uM(M%+^9Z0fI>MFF)#eMNZF8pvIt~8FDl{shp85*46H!`A#%g`t;V!( zB357)ww!=IyZ&%bFJkWQ?^2-oV_9xnXzoi?1N8voJCH^~=o8WZ#?^UiMs^x?q?CWL~<%GyAERDjvo}TO_ z>8IHQ85pU%;h#a~kmEzs0hEK_d${;1XiR(XV;NY3*xK3K-bV(9`W730kuH78_~U19`^sPZFq@c| z#+@ShHaQf$4W4aJN1EGy^*AfC@JNJ{Wtcq=n4D_rq=UlAyF=pv*c6JdDf@Bh8u z9~=M~VG*;*%og`T5AW=muIF~*M&yw zvdvg8U#sL+mT%@X`CvIVPNZBuB#iT447GiB;y#zE+ihB?mg5wC(8@Hbv$Jr(RAlov zw3oMLj=~~k+0rOPDzlsWdl=yf4-wL(P@Sle3@Q~IIYipZ!Qzz@NFSDUDi;&wL zq%%1hjZNYAZ$zOZDcwS6h(cu5TPxRF8DZ=5b0y zEi=JjOjx&f%nRnD^5)=xh)lx1bJST*0+$SQKE6#3ijxUrEiybTVDs^3bhLFxhCx37 ze*JL$#S?ybwtLU}U?6a;{xVY1E(P_QQkHKNQlVt)Ujt!Bg&R9IND6XqSsGkC#o^6f zyQ9GMq(s%bz6yq#owOo#kWc*D_3Jl$a%gvj8G!5OuxEk6KC9;9ZJ9>}Jj<6NUiGVl zg~GC6E&};tff>p8VJjx?{=t{;D`7Mw7f^6Y;$4!xN?HbutzHH4c<4fr&6Tqgaz=Zk z^(;xj#!8Gs*ZbDDeD)S(6lO?Dc3@WWd0&0_r0-^nN}*Mxe<$iM!~shPtx~r6j;-34 ze~E-62!rZK=olprc*=A zwQ`R!bMZwp=Srs86xb2AGa@-GGEwssb0-c76N|H|m#%%`x^!7OdnRqqBQxa>V=8tJmC1l^utO*)jFy)A@8I7cwoef6Cz zQx$!;uPEat(nvSpZg-{={E;nCYgNFxE(pJrwzVh;(`gjofbmIWia8`icRu?^pXE9q%zo$5&wPyh0_vOV#`4=LM4V&_X6{(I zWXwRwFpd&#=iDu|5gFYOw?nt$iIpb?C!CAAm@ClM)4$#_3|Xj6vp2)#*n&VO6m(Qc zYIpWFn1~!kpEMeh0L0#5jkhKD6H$VJmZrs78o?T5UUOdx{rSik{eG>vu_zDJ`IF50 zOLh*tzpC&>Ey8Mj3vj7KBrXjv6q)RkHxST(_lzA7YncB^=73Lx8jcn3cUT8Bl6Jpx z%?X7cw1MR)*7oYN&k6Zn{FMcL+D-`F15FLZiw3Y7sX6(?VA4hOn3=4Vx3U)afmP(# zp8^e%9u^4kmY=r4K2JJa&aw;HWOHid!w1fcQ%-af=93#zWCsh>#-4tyM z?x|#mwh=*W`+DlPs-xokTmtZ9(T|9AD%JrLf_ILCW;X=I=dL^>w0Zt1X9T~?x>rxT)Atzu`C!?GhS$eha`F<>x2VohjMQ{VCK7@4KiIwI@q)7Jn*e0@K`F2pCMAn&oo~Pp#I>3W< zAO|1DgJd0xwT;MI=yyoG>hnulMWInnkV={VAR+>4+#}$xblxk|xP>uLtH_;9o1_*I zxy%d+IW_3!vhNBjeWH^~xkqh5`~@!JQ8=j%@al!>mMG1jrJ9@=#f6PGjQAGv0{L!$ zqlgHgep|2X7QQ|qB#u4naj0*=TK=od1@|=yy>HHQ*RBg09rpGMhb~)bK~nxZ1xjHb>R#Jio=?*Q&6_V;L#%VHEgQ1&Zuie2yhVhD2;@*q#Xa z!FS@bHuRQ|#5Py5f|yCu0dL6_HPIJ#bxRNqagI2zxCKvbC2d!l#N=7PC6+Pi=Gadt z8b`Cdjx%o*er+bc1s5)2n6`RAaI*K8Yx-0FJQ)BVB>S&!NUzPW)7B@D(4X|-^BObD}*@dA%9 zFpK=<5>4x85DTenETLukF7XK5u8Ng!96(^fow*27j@$DOg*D78jbhT0c9)<_UsZ-| zLA84E0uok6(m`$r9rN1tm!I^(j_R;ZfBsqk9BFVpU>Rhpg@XK)!?o~siB*bs_j`L< z&BzGS)5QxHgc99dD;(^y`a;}}T*Jj)j0NS&m9EU?zSK(Q*67EXZfq^d_S8qf4JYAT zy-$)(Sx3Cz3jf4ESV(}t)SMI8F(?2TEQ~iqV$ZQ?f;Z7}H?T6u23c~((3z}epMT2w z9g{c0djZJGVExwuN3p<&Y#X}UpITt$c^nX;`vP=OALxCCS5?{H^W+4S-n(+gw%{&) z;HpQj#(>8Mvp1Xq7;wtooNpQ#@II)6g1kvdIXEi7NtQZRh`Y<-Lx(FeMxA671HGAk zt949ChKAK_Pl)JJxB1i;26H+RlFzX|9OEI@(?RY}g%Tx$S+?Hc-9k6>6GEzQZabL|JU=xa{f>ld0Peh(~$ym3W{sVhUwwCdPI^c#3_IWHfw`4t@ zU%e+sZPh}AbGQSciiR!oeW0xs%9Y519ZvTjO8{(=7V)z_#j7t}e@1$%rXk)F?V3^N z%fYPZgQU1V)=3>MlHEEi!racGBVT{w2^qgR|6nrSJ1C%^2$ij2>Yn0)k_n{CKwbg) zLW1r>_anwPAkEc0!DlQ@!bF$0hj|?7wK`>+lE06+dh7S4V zYff4VT3?>Av~sLv11TWnepp~HBI{0J58i-nZ?yYB@52Sow7m80Cw&tse4{`$sA7UxCaMGhQ*WD=Uer7z1ZPm7lTx@i1ps~Hclb}p zWZMMsxE3zful}mWgcS%~%7`P!g0(O-DBTPwAT3K#;el2~!ZdV_ zB=@Skgo&xFX<#KaQWeOYaftwWpC9&rl~hs^SmY&H50X`Uc=4)|pQVya*SVj%gzJ<& zB6qEV3sLBClZ4`gMS}}Jj9v3w9@z+3AE7G)4Z)6?8N91tV0njaPhz#gT{ad{;F&zq=MY`(}s2TSd^yen zD`laGgD6mmmhr+YRwP02QS!~b!p4s?6sWYGsO?c8(5ON9m<)`eD>(j2uCsNESXtj; z1Ux-vFn+V$Am{qptkfr_#0#|~(2OrMaXF)K8!=w1?J~&QXu)MlFPC+4ym3CT;3IF0 zlQU9#V9PRHFbC=UHYxgJPB*^&ZD|Q_?|0;9V>y&$2?J*nyR)qM^9j1^joKuiQ_smw4tP%Ic7_Zgoj0gw6Y-;8S2cP< zz@YGWOQTpCh7jJOCB!B3fko|1Xr#b0SzNS*!~1HRKv=riAQK-JinbMg4{eamcs7z% zj4|*Bf$(J5i#$;P&O^5ZhpY}DqIp+f z)?)fHMW6*lC1~H-JEq7+3Dk$r`R>!}Eh007ype~+o0~R0 zU3{HBNSI`T zf?=SjiAC}|U=OLblB_&+u{B(peBL?#IxV1x6~WRL4wR8wyl}e?L3MkN_$A+e9s#qg z08aS0`3^2Dwk{(MQZ~g!bbuc~4tDRsrJusrti~93EH5-I0F;>QP%O-uSV;c;;Ys27 zH`lEX2kH0!XYAdYBfG9U&-ns=ndq2#m?zRyv&trm@6^RkB(7DsH4;@NiA9qKi31=D zi9AqVfCAX;kVw%Uw{9(4L0gt*tg`8r${mWP#EU836yan=%+mz$*7NF5GQYJGYwiE8 zo|zc799@abv(MgZuf6uV{MTBOT+$S;vCsUKK?q zI93iVjvlM%J03==ihWns3f!+xxvzvySJjWBt@RF-1Oh)rELBqoTXy$CRbhP8bj)4( z1GW+#dS5ORpb1+;^F84TWQZDQ00XU+o1g;Bt4{K*nVv-@NG`5-vj<>>2xu-cGV=+B z)1h~o#H(D=n4PYS&vG-LF=RHnAQe96ot#jLEB zw2#TGR-02HJ>D!m!fnDnK^C%_3gr7f%%2g3INf`dNSMZ23xJ0xHF%P*LuR_r7K$CA@)BAoe8il0vtOf}Eziv2o%mu3hzNk>DA#iS#ap2a zW2wi6870CWOWLF##}{Ar?WN{Y$Z%e(K+&T91&IZKA)V6<{1w+RH%-1DDT$Lg%>+5h zv_F-qsK`E1AuvYl?SmK)#sTS_k|2DZ|Hse1c+)8kNyxhrRqWtwH(P$erkE@{^>T_dWKLK_aiRfm%$e?l6`y0nZWu3K1nAg zC;P3Aa5UjN?8VUW$4_X2sd!-_bXuv8tg_-5Zc>xlSoRT(8wj`3we@6Yx$m zw3^^fKz_@!bSD#Up=sub=h(rS4E5FB&=hr&9f?>gq;}0BjJSowMLR2X50apN=BvZU zLS{Ap_o+}3-IU$-i88Z6b)c@^xY7FIrcYglo=Et)1E|smsZ+PxaH)8FFYsp+zjD5_ zE21#h=d915^MA4m!0h@0m>uN~&RDxxmDtkO59{5_zq?q~QMaBcFd)%qaLH4jbSbJkF7CQgoQ;ZbkHUq6UsHk=K}-6GbQ zP#Yh~N~x3F9OtZe-nQLf=w*NX^u68QzT%oJ+b2XKiCr>p+;^w4>7DKYI<(KL6MFoJ zGA%`E6HWATfxXQ}xh8o^U(w!QY>V6!b*3=#H z(Z{)fZBm1ObX|_EQ39@U=W_HnMMXl~Y@= z=Y!65rwFE=CM<1#EBfyOp2U=Eznw$rtYxluG5m_9T|s%(9g zj4P-0pS<>b_&FH7Q{VyX!9EjQ;UU9h06Tzet`{*=}t+-P#AFf)^WXmwX|vkKjh( z-IPn<8$xI-Hs{&vU;aJ+`<|$&%w^mVHuEvsCRCf^P+5I#1FicJ9!`!$qNWmiAEH#@ zVeuFA#$UYY0}?dmip))@FaGA@T{#bUXw8t0d(&Jo;UlBp!%$V6i-#D+=Ig+p{FUV zmO}+_0EJnxV7yUIz2(z(rHqTl)5HFh0q)W@>JSH;&p&~OfF(L2qdz%}Zx!T8YgZn&LXBVz3vGFY>FNHp13 z!dyGO`k=UM=_gh=_(x5|0%r+g@-VKn^Vke9ce-~oymUUV&7ch?VXO!(+N{!YtLSC1 zRVnm#j%aqSILZzFiLdzj_3K~ue|M?M?2{t+k+}3d6NI+f-Q%Pb&n#zMz)BP-JvkMU(5f8VsvEPb)?Xyeow%LtTJ$$J2i* z^ajEUy|~jYVk)P82SrpbLKG|DT0R%lUh`qb>Z~$ugdv&vLPyTw7S{wyf}HS$rIiTp zO+Q5|@o{;yJ)8uT2a$;K?>oYmPyexy&?=G_q85_K*Gs9dxKELX=bG>V^T~7_ndCOP zD+3F622|+jKXlfy4ofnLqN&AVaosOV($s_~iI315Pv7x>Q&l*;SQ61FU`W#I$jd$Ni})`0%hNCbJ#qS!NAyQezvpxSrq3vcTXC=D0_ zOE?m)H57unRc~)=U=ok5ak=0Ez&>Ry0K9#8!=VyY_+{ihtL-ezwA+L$n6}M$oBTVZ zE~q1UlD?Q3uW8gL$uYbuo10Fk0T!uFeKW`8c&Klc-2A5_ONAHX)k_9Ou#_7{agR$| zem%VWv{)o51ta@RdI#B0fmQ22yn?H83H$c;>8kAi1|$?=E2eqxrF(Lefg6PO#+RId z&@(VojZ@<-wnW6JxUf(HX0BeJ2tOH|ncGv%tz9+PL5x~go45jbVmWz!6_q^;YlN7@ zc1n7z@Fbl(lhe-0z1IM#bP9GOr1ShU&a~;WF@)P^_-4MBtL-B60*l35PR|0A$tMIV zI1|jj|K)#}{m0+bBw?^XwfRZo8N0KKlemKmKqk-SXK6Iwc6Mac%|?1f<`hDkKqa_s zp1i{8nN4*0*TTXUPLGR%Hcg>WVNn%r(rrr(H|B4rv*@^|{qMFDUPOl8v^+Zhx%Db4 zy1d>Y=S>(2SDKzIq<@hC!pbQrC%-2$8L*A&hzQY46biWrWW#2KtB+BwcPrsjCp$Dm zrv-%VT;CoK)%Sr2w@eAw4yv3Lf?WLkLZdc&#L}~ugn>uj68>JW4!`+Y<$UaJ5;>H_vLw2uR5GNRUmjB6f1Ux(H|$S01r?g&Qr3=Bsk=n zPdngdxfooAkhav1s)P4l_g7y*A9c3$UI(!)n?oSj?E1K1jA;&aYg>ae%GSyHEL=ig zI8g(xY6~Eb{ird@R%^{a%_>v+toW|7fU-=2kg?@#scV*EA?8tW!FXrI*J}M#b*E{P z5T_P#OAF;9U*TqiZFnUnt~LzCz99D`AM%u#W+gTJU1&kMIhd8>%mp4u?l#PIT}v?(IpKf?0+l+j9>;~e%b^LPxS>Iv@Q7_3ucfpWOLY4I8MdghEx3A zbiycfD35hOapbZxxc(73P8yt=84AUc)+eZ#F8SDapZFnp5K8(wVl$_``?8`Xf^p8e z=&IOq<{k6D@B(7Ylz5*gRuf(HnKuA3ssWS`< zR*ASrKvdmyTMJ7vr;}9gNzYRIpy$eInseG(3h#ud=;vfavz0D?RES5IXn{Q>E-idO1Gjh5*g`*o1bm~=w!kOtxnt3CLUZceZe&HC-QHsI%pJo!ge-L+WWqp zob^fe~} z1>T*e{C)a2@}bsRqR&42WmzIZvwJ9F(%eajhNxa5CMg+#h2tDxV7ej?VwpscSTvg* z;s`*L7RK72oT>F~F=EJN!Y2ew0=R?-WF@V>x)AA{CRMfpt)Ry{1tvqy<>dOcMr@2N zXPbg^J3Z>lIume*=*`Fp&4A|NWMA7;V)+8G1ez*@{mN555bf7OwP(_UNhk}ieMvZf zvXdnQ3BlZhPSd3A>CI#%*NSiqN^Swt!y-`pr`eRbm?Z&Tc`dOdt?9XubvNdU{v6iu zHua-@MtrrNi*kCHLP(w z(!$@6eG2r%W)de61*}s2(&u7o_Zlb#tRdhPk z9*p{${Gi+G%Fjpj1K_nj7(Nj_(T>>CV{UK38RrYuXRGYupZxuikdZYaln0sW%73~ejDo~M?$TM zBg|#>jxDjg>cpu{GNW?~XWak z5s|NNUIr5KoC{49pH&JZO@A@{pa^(kV)y%v^9r}jRRs>AmcisthLpFS*MJQ>c8slMTKfv+ZmoEB?|>$w=!ji;}F+2_X(xu~fF5pkbgoHa(O zgfEC$3=T_Fgxw#q0;)U?!h8otB=gv!kgptG(t&C!Zsk@i)IA6u9a-~bc@~eQy7Ld} z9Z)7t0H(gu_I={RT&2!y>vA)R8)N#g8}1i6(rYvh=apulQ7c$>I9*&MR~BW=F>lRt zo@FjC?-Zxl3YQo1>Gt-bENh-fx>I)gtL)_7ftCI5y9Dt>E>LKcBOw$wtvz;N<}$6$ zguU-do`FB+xpaPzc5|vUQF0CLP&SK5)j$>4HL4IXKaF=0lt5edEwFUFafrnBYTK7W z!IrRH&PL%%qX=0XGbp&1^^ru+2R<&JZWs23Ig(HoKpqH9w+S!#y1J8)2>7=r<8$Hm zt8648El@WaH~HJrjTr^VOs(QNdfR!9R$9ZbQ|y|^J~5e#b$papa>JRC;Ird8kn?~O z(U63(m8Q;J{&fydvU=`2&4HRho_t1+e@2xNpFBU_ne6$CCK>fjI+3N5$)v1!4K)&U z*556P$q{0 z=L4slMOka~fvYoSRcg;)RvwpKOP#a^#sWz)lBzr=;R71F{s*1El zS@nYioe6180+6v{=vrzq$@gpZZZ18!22>Qgyyy)232nUJ&xQN3@`zUXvX@G_T>e>d zT>kpdCU#!x$f&5!z(e7QU?^Pphz z#9k6)!Uok%U6SlGp52a<$_H+_6t2dj;BT8gqUX`E;z3Mrq85by3BNQ^h)S!I;657z zT^8mCq}#OlI!0AlEF=ga4^Quz$tdx-Ll3y*A71DGDdR-a7bF)XmsdqDAPeO^r9K;9Ee@86+Uesf#J|0yIbCDm;{W6 zh(_Mol%rs$9vGzWKC?+1!V)?doZu${=HnrT_ct_(hVAq_p~s`4i?JnUua9kYhiV z-@^}9rX_fSnt}*Bf2G-v7a#g3#+qN~$itmxZ%nKuW{B(-49~2RTE56wN)`lG@z&>f zB}9v0z&=k;LcV}euwa8zmP!SqxR)?D)NQ;ovBF_KG*hWt_uThm8-2-H-iPhLUE1|^wA#hheJvUAIskHr#M zl@}P;u!^=inAyK4qjpM7%wnj$-EdMf@M6<1^1H}MFVd(^qnrrGht9y4-|+)vRdD>@ zXbEvyFsP}B{#EZNr(CCs@4t1@B`5Ola>I-JlUOl%1UGKT8c7}n?<9Y)(C;`L9!4Mw zl;>Cg*C8wjs6NhQXJPBk!j?$H)Hp7S*1GIm1VSm&qZxjH(&0P6M{*y1GG>9Mf-v%R zS*iwMsh~1kD6+Bt@;fIKC2S&(npf&R?$l)n%+8{# z!cuRj<_Q6o++LHq38AC%=o+4HSBEMSy&hivZ6{Apa?*?e8@No=lpSD%sv9VMqve@i(S!$f$?Y56`C`7A~PK z^_Juprh)dR?}gPa-6j2o1SKUWeN<0>EhHnyi4b?#|7S-5>bImknf6JIvLK|no2dbv zNte7Y3jv;jrYUbV4bA7X-_z~^oX~zx4L!rsCOppN_i`By*rvGWp|oC07DSS{R@Rx+ zU$?ljGh@jMfu1O24=yjkIn_nPT1*a zY9U_s2a&BuH`y^~NZw(YUpTzFg*OTd5#d7il`AgK{-G}|c~%sZWT|0vj5?DsEdvnE z<2?3`a@t{kGv~W@hd7~0b<2|Qnd>+FD3+z;+gw&dRR=b4>8TJ&g&t%t>A0@$8f(c3 z1BQ@dc?s|0PncnXX!BJv$@xhD?8l9WH*@@G7Ds#I_WHKSlR07_8Zx$Q5}rwJ1OHjd z@x_s-2hy@8cNKSUyB`KM6G9~9aL!!4hfd|Q=UeLv@7zT(!MKX1A9AhB-q?N*G1^o! zrGhXuI3<^h}Q)6+sK3 zOfp=^v6S8|dwo2iAyVMTY&H|gvZr!PjxQuV9#4QgZK-sHBc__{TbM{_gwBzmpuxX* zANBj)dh(z*{bg=uG?Qb6a66(#4Ej`v#+mEfnZ9#Ok5K0Zb5oiX>C6Yr3Wtr8^ZGr_ z)SWN7vE~Yj{w&l0J}x&=8r(#nYgv+Yb>FNgu0pz;9KJRRGT=gQlPnpP~ z8r)G?cIx+^dO>)T(!AkqAPF>>9RKM@Ed5=PgbM?!x=2w-y5{(MQ9453WAe{F?O&Xu z-HMPMO)xQW-^!aWe{k`!A2LWhlWVy>1^vmL%&V<$xUgFcwPqqcBe9C@g!&HJL^K7Z zwW}>9jNTU?$V8}h5Eda9`RBd_<9naNPA-HdNg?!|>DA$BV$E+R=$2O%P8 zi>oc!rKR31OTu{+F*rQyP|QW^uQA~l?)mxPLx<}+G9~t-Z6n2Wsx$F05hF-QPi89* zQ}886kQr>b3258D{N_112$nI>JCV6^GLF6sQlku?S_x#?9+TalqE*o9+7b z*MuGR!LrJWD{AByiuc9OF5h-~KmMtfjkfA490(vmvO=!#S`jQai=IeG?-KzX;bjR| z#m)plr=rdJGJ*=qDCdfsK<^KfID{ovrJt5-FE}rs@t-r+ufe&%05R*g*K85dHZ zn2lr$XDLVK$cUp!W~&J2S;=66D>6p6gaUDy7YB7q`Oj8}pI!~|?St$+TDYnz z)PrJ0J6)cz+g1mfi^^Hq{z4wKC0mx!f3R_~+bulhMH_BP^3@vtIUwrjT-2ufP%f;* z8|I;Xqh&Bq1Qah|ak5;cjRy|U{OeE*569E@e3STceE57$I^pyG&;JSU(PgSgEhNu) z7?7Fg{`K2GmCc>m2Mu-oLub8TYn@X&=CZG?o2{=4;aC{uOXS^FyCZmclv?9D-B`$- zp1LY$)stj2I-y}0${(n4%3*fCzW1_B#E|MvHT1iqxXv1CRml)@5WkxE5ZwpPw@B)g zh;H=8SXR>eCKvm zJXB0TCO2;Y1m#j-vnR5t)XOQl^{mmNkLCGH$kY#r5^I?Q1B$sQks$tv>t9X{Y6rRp zS<|`B*VBLWHGgBWEP$XnMJ%BQiu>1$c5L1DWC2!|puE%SF$S7XaQbK*j!2$PK8}AD zPB<)GJ=)lp%p$v*{G?b7P8IdIfmFj&ln4WCq>c zhYETKvr+envV;&ee-+WTmgqKwhq7~`fl?Xq(7+XST99|iexupni5dt_uGxe(brK1 zWrxVF@sThkiX4gc`jmd@^4sx_9_&t2$VV)fXnkR3Fx(+??o)H5ITj*aJWL{u25|lY zq#wvKXF=w@(WeA;uPYBxA!1A~6u^Hn^cD>;`l>G=NpouR`PoP#@>lusS?&CnPp&)J z4Mps)hlmYIczJc6VELVE;^23%Htu)PM-Qfs-zfd*P2AT@2s>Jd@c-S zJps4@owWP~e~?iB;ywIvzcvS_NNIX`B`1(f5ja4Rls=b=;D)xD##I}vhrcIqbbO1x zSycV9C@IbfXDRNV*Zgg`dy;$5kK1HS9T)#VzDNcE;Atc1psS@c8`4gevWIqL-3FWh z>jtaPm=bWlerz5Sy~;2Nj>v9L^KOe&Ws(_I_qLF99!-+|T*R!!lVjkxtAzzf4p9!| zK@JMMQ*Ok_k_uRmjJ|0F5dn{9o0Lg!huX}CJ}x{wg3f*n;EU(W){#b{R#rN`1B)A5 zYyJrJll&2MQ=;KA$5hfqn>WmwR-EV1!xulG`iAcpuWSPHYbro-QUX3h z2Kn*{!);7;hGC0Nb@ph3 zC(U`rs+#&+7Qt^oB-w!@JX@1!GIUa)BiG3?ECM&f`hKH_%R&TJwSWx!0*XQRZ0>Y9 z59kXPa(q9lgwuD9)cXUrIG7pD;^I?Z>Ydi=?FCsBSLg0p?*tPRiC zX}p0VRt5dCpA+bVGXW2^+ddCI99L&ggg+>s+dn%6xGlfBGYcFFv(q;P|A~+pH=}lr zGRKtxHB~Ryb|WXno*18U3pz^y?~Mx(j9H`nfAJ4shAey+eG}34_w^jkkc7gs=zVO9 zA+G%e0D08|k#us^q-SVD86wqMBS@2*TQi_~nK#(02w7OQIbn~zBfZdQ%8)ST-Ehir z>BU)GOSlw&*t}NR#l?1wa@fr9ix|Jo4IbI^&-}RF!n6H} zaf7q(!Z0{_`k^9;`qqCzM()6+{3Yb;Q(yHJfbK>yaJrLwqrMWlIQF^{KQLDZr*&p? z3U68aGFMQZSENL$FJfRk`SEdGsk_yR`R3lo^!kh#4t~^L^mKw;%+&h&36oQXhE+K= zuD|}A5O)tRuh!bYuZ(LaIPAau;{W=t@J}k{kunlwhz>WRRwBCt7E=~tW#IhS4kxHZ zav;;f3-khaa)b~~tGayCpGAY&i;{(gqsUji1#HY)EhDeutio!Rd8{rhZu?k}>GasQ z;)U0QR={|uuHavN?cd5ex%(df*gxarHK=pkxyIDyqjna-7pB0AmigFo-4A|k!JvFK z@CVo>3x6a>oF%@)E|Rsi0oIZ3<7-1BuxPOe`qZ#h9Y|PmVRiI-d!ZAN;g?b3)H!h; znH)y+(0VY-(1cmZL&8WCen&uBG#VEm2hRzxy09K7Bm~3bxW7}rp}%N!7Wx{?Tmo^W z2KlXN(cT5bLUt?j^VP|YQ-vdklgw&Y`-$zHpM(B!8u4pfict=okNK|S!?QPD@EKtg z%F>cJ^7dac*b?f6d_=b?wyV}REzS9y4uL}Nt1?-o+)jV>-IaOa#F;hf9D7U@H+GyE z<8}-ue`noCXsvC^tmc}b-Uds=D0B8vGI&${C1+_ywHqYLQj#;}n`xG;F9Jv>;2VF4 znv2Ke>xFBLK^of8lqvL&e=0(DmI_*wx*y@Ck(J|?I;b?WHvu4$dFhkjHOEB?TW>%F zY34Omu;5)>C&)9+7ec*72*q)*;Jwi7W>e`@%%@qDielqcxZR>2jc*ar?p&Ic=v_Aa zneDBba+6NZ$)MTT?UdR z`?cRwcPa-K9oxi)to)pylzcP^E^I+IB9cb_FspszGh+qeoY^8+fGjHPe(G)|7zz7* z`KG*u8Zin3;--W>d{8J){9%@`SqRUhFz36Qj(A173_2er zZ&U=*xyfk=UZ@_cLbh0knRbwC;!#mZX?lp_;H$lAU5*TA_gSaU$?L zV&@`GD3VHf`@Oh#ct`wu`m3Yx16j_ksvh(>G#{=UE;6I0kB%blM#VGd|E-02dH=1f zAy-mZjhXHBClM^=qoDxM1PdDh7JT#a&o4f3dh)YZU-9=e-N-30rPkP0R$6sY8Mu&q zH+huyb*l6nlfMSaPe=~73T;jXIl=@Tj3-gjs|SQzO|f=^KgHhwJhgtXq0WxorXCG6 zR%2123vE1NK~M=-Fv;oP`}*3Qjt|Y>HJ-RloirhFHHcdXh?%U1$fsv58rVS|J&&Dh|!DVE`FV%nqVo1C1jt}5R6iq9XPA!QqV zBE9K<@qbo_*~38pqYyIaRH3%;g6-ap$PBEJK*i z2D2CU#)uk%MGqNV*+VmjgtpZ93RW=rM(&EdcAHxthh`K#F`$r;vFf7(yr@b2XkNd2( zu0_%C$Q?_>9$Wf*xg>Cpl7+slq)EBZ!|!GJ)M}I;ST1jL)0Gng4>q&pQ4m;QKTjaDq2YZ%yAF+t=gjtW3HZa`>LK_ypx3@2+wJLan?w?y2ClSdx|=0@%U<@?B?j<{#e4 zSKE7!!%m)l4C*6}h`Ifa)m*e>LFnm(p^~8sOf22Nx)~BU;0Uz&$UWF_=y8Y0x=^6L z=M#gRPyv>cKcG3Xa$)K%B7x8a*f|@P!5@ld49C zAyBP)gM4+rK2^ush;Va(9Kr7fuxu;ZpS#^DU+YVTBO)R{t#*F6MhfQuGLm z$RmfnzB$Kzp_Ir})xQf`veN=0Ji3Cu7s#O2#>R<j`&jg}m|vAC?oNN@pE@azNW5*N z8(B$h#GaD{Ng88vqvy(7MB?#M>)KJ|d|;;BuOIkjv@^z@ywQ^(F^q{U-KW73nHFwEaN z9>_=371jE&x5$4ibGnW*;sH}7ZZTr~mcA)lZfe#HyX$M1J}mM)TC-LGzO^_=-@f>* zYz<+qeyx*FKu`ZUlRs+0%Xj1H=lm^S=06$5>gnaSbc05zh`K?iuafssDNv+*FkGD) zXuUgq$DFkhnIj*bawBq{Qn31yqKW{9jL*3_;(;^GS^NvU8{*-}r3qLb(NRnikX(wn0hRvx3 z2;Ec+$v2PU@d5pPK_dlbAVhvP4);dW2myob8=}-tlohHBCzjPN6S^BOgz~)Kji?F+ zceu~{$Tpu*k7`~1xB>rYnV?4o%42a_@XUV%^(@#QpiNV}q&CE9?~Aw2op?V^-(T<;d5 z^cEMVY@TJMS;;f`6HnvqE;JZ(z*kYV$1IKzh}Km!1P;O&+e>fO1S8U@F^)6tQWtMr z-A|PJ>|qDO*s5PG<7f8uB&{`!$eB3P40+n_v7Npe@!AS*>?-q84+s0FVB+3j*4rMn z%{TI7qELys`egc{X^kQb2oRhUQOJI-nF|mBIqHirIntcU-W#Yab&zo@a&y^tlM)99 z5l`X4=dX!Y0|z{ju}gy(2#N@miA^7&+N+bs4EDM=)Vc~ldgaDW>T?SBW)Z`w;0!qP z(#BP{nZjhOJ-$52;0mU^lN&tnsh^ck7zuBVA1e&o)DFaXgWm#f(6?BEK@+Ct?iK1* zeou%=IltZy|JzlA9Q0eCWYy1r4D0*2;7r4N6M~HBxeslH)?g8#{7Frbivl5L@_3SK z<$zzV3LUcp_9jCkXxSYzef-83Z*}#;N=XN0qAe7a9Zg=b&ty6nuQ;+421!eGZMp?& ziqz|(NX2jhsLfZn#Z^YzKPS!@*NnVx!!k`nFhL;DBL33}I8GmhZf?-XcIj-*4PDHs5n;^~4a6#ovt+U9 zBO&IgRZ1$7!|qV1fFmj=1YvGUf?+yUx?#ux`t-BAMsv@&WqC0yqBW^@sXul^r6fN9 z`~nw1Za`>V*=1T>Gv=TdTHhiD;PUCK&s-C-&&0X~xaH@&P3iYSp%W&cq1g*fmH z%^hSGcfiGhZt~s7hoEAFib=9f6rho{jVBbL6rs0B0L7pgaVbB3BR5~bBY?CCiDCsM zovP`Qlqq6v_x9t$fA#uv&N93AWiFe!#W-~Sl4djlVm+#~uG_4DZeP`0Y^Tz{iM@Cj z$AuuJI&RQ$blbZC3v?S$%4PaD4QdeA%<*y5M_`(#XW?eUnIYgkHVNH+q|mro+U^H1 zCY?Ivb(`4UD%uRW2|&mBroFKylOgCyHh?@$X_1gqOo;HL>>zrN)rrn=TwFDAo~NJo z!S~2eCPaiC5GW&Mozi2w)K@yJjw2Y|RKn)yKiq7{)rcQaf4j)|$1*cZ<+_}Ju@p+Qoi@dfUIGsO&?jQ=!MP?AS+qKA^-%3EP-m>qYsSFM z!sfTFG6URS_jU;g_)Eye9*h-vb&R_wY+_z|UrUo=4^ngf@+}d5un;38mxPNMG}D5U z=DFXUeWt(23Jx2Au_U{LvXGoAvY`pFmD;rlNdY1bBfE|pGR5O^K>e)jdkRHR(}faA zM>A$B94Xiy&h8~@3LsWk$O$a2+QAjVCR8yf#=t%GiP{oSAhOwHr~+Bdsk-=hp0g!+NAK;te$H~ zYSJL%QrRO9Syn*p=z;Qaiw^gN&NmC?g{f%}18YMg6S_+&sq^YIV!WhG`(@M%<%}7d zAJLb}r2kK?A7!;`M#V`!uMYG&8C6|%>{B!OP?u42Z$|Kj_vGMEJN0|=3B3?u;ogG) zHlWVfxeEfKXkLY!1OlRMkodW}hrl24O4*N=iN3hR9{=U!q(CyQj3W0cg&8SoUqrW3 zMTfXVP!>!e`wEse<%r=3f7?Lcmp_vI;CLjxpf^s~lqKs8$7Z2*b;UJy&t}G5wTmV1h_^0(woS`2!iXrQeWvq$;kK z5j;9@YV;Lpx`<*)YLG&WPEEcmqa1_7k93ggyz+rwx$Y9Awe=05Fp}K31I&*|^EK`f zC!n4BChP)Y?5aqBYtrh6W6OpP!g3z`I8@fF!$+hb5m6)ml*pkgXm8wEnU~32{PX48 zLO&CkMSYRWKfV0E{Cu_E3k>heFGqiG%5Q()#R9BuYQ6?sjma)PmtlVg_*h+hs|A$oHYKrq zS?tuYwcGOzfLX$|*3hCRyh^vD`lapt<#+y#_ZgH%M5c)tSIAVY@Xfwql;wp0$ug#t z2|R(rKWajA3Cf6XUt?J0d5h}N0s$vFin!lpT0%=1}T&&g`@M|m5G zO?s{D*9SP{$6-?Rjv_#I{q@)G$v)og9rkE3hj-H7%xFki8C#u^ID~vo{BEi3=Zste ze;&UcZ0^6hJwV(EZz0Sn#qI}ndt1ITS5Thv!StbUV6p~3$$bsXH=)BW7sW3o{Q~qu zfjXQt5t20BE7U|*@zTrBi5_)KqI)WQQmD++zlAmR;9819xE|UwzIF;Eg;5O{GzS$V z9AZ6g@W#<3j&XOtCoI<`qeEUZz_%ML&rPDvdMc4;Bdj`&);XRs>pRlm-i)iez5R{N z1%F=>={mfo>q-|Cn^EC|1mTdG z5ap1dwk+9H_@Um?gBq$h5xI)-hx;d({T#$+EN;rNFF7giw_V zD5|3o`l4a3^D}|7FecCxK9{AE$40(Ffl3(Ascr~(%$cI;hHToZsR&WzmyBRyh$tg= z52k-ll`(}x&-y>RXd-Df3strU0D~TP%LkF+5XgAFGA0(aCUp)L?k4HYoPWOBi@(d& zW~^<&#b^GFW&6t(ZdNBgr;fn@{X>co2)iMUsje!2pn}KS7a#i1r;mu($yT5~Yjc=& z2_3Ln>O>dnF18*2^T(5usBju%@)rJbj_qeco|PoICu2Fx;3S;LZ^~7ImEmj=tI@+eRjO4s0bF$tl2_HTfHMEEykD>KxEC`CL|pBf08wU?0FY;?G2ex z8`NxW%YN?_xs*8c$%_^W7~Y?cE3r6#o>qIS~fJXtyFtPHKwV$1Y#~P0my1mz%@$v&bgm^EWHCjLlLpTSoZ5 zL!_NfN!fKx>ON+b{KAaslzk>MwtcjcL(cfT>1J~<7-%XlWq)Qp;V~j}Ci>;ia(*o3 z1r+&YU0ZTeZ)27I<&_L)tyMpkKP62Xn^68l$fieXa2!Ca;vePMGZP}CLXH4;N`{J~ z0tGT2aG3KQV#!)(_(zrshlKBvk24Ul{sp7U^aat&Zm(?HU2l#+XMyWa$pIe(@SSRl zg$0djjxAXf;^$l$AWn$SIRH*0f4F$a7KQL622)-bsB^gwo2{saYf5w&C$`}ugcXRB zCMQ(L6{PYdzC2&xbs>LB^_72q>>4+#cCCa9iQsI7I)w z@55MTj_`%m&nRF8WNwo7_CMwlk6=({Ai3mPS8+Cs#Fpu!L9J+OfJX9;QFr>w-1=!9 zxbuDyT!7#H<>&Hyb5~q^rl-=fAN=!eSivCAvOTDq8va4sH$SVhO4AG9+9_s%kHcGc zDU_jA$YDZ6GX5!l^tETAlCo7Wx%{?s2oDd7-U_RgK|N(oH!(T=Jg;&%;(edxgMNrN248A4kktZ|r?0N>r#fUZ_NWG#?(4!I zjMVdQ&EB_*Mve-`49(ns8Ei15U_cmw>Eu#*?c!Y@VFl4-O$|k6P_F_2a|sSY^N@SK zJAH4L)JHuh7FPP)@&}xxEcD(p@=Qw8?uB-}Ze>1YH;yTEEaR=10HwB4o;u?_p~~?W zeBgom=OZox8L}-N18ha03)~$AT;Sp%qDP%Kb6C-S4k^5&xIz4SnDm+RUVQ#}Wr%;o zr=iTZTB$l<$S-E&4_xJc6kaFrSR8yIf$0}^M_j%kB%KQwIrD)L!Ii@2Vrj&8Ofv6VA_9*pY%Yu$4wFCj%5xlS}$-W z`QdQ--YJ2zTuuh6n6tqT%~=t6C9!7u%dUdjI^6@h#sH@A>v?_K|J7Oa$$=sYt(=&0 zE|0TGzD!1^(9_3!e!{M1YC~U92te6^|45Y;gvk~#Ol47BTs0u~3!kc`h|5nX<;97E z);`;N&-!dP*%1HhT7=-CuSWDI$Ck3>=a-ahQxM}kvsoLaJIEC8R=F-cUjtRm5jVt1 zaTBEy2tI}DuZUbpPNFioaWElNTE(Tz*tFVq=Ho^vydKL2m<F_<(bVFdBHU7h&3;*+78hXL5+58PBA`{dtIce|GRXzHkmN1p54j~+0^Z6l(p3TLH{)UX@wFG^duEaDI^kC={8`$C z?7?<(corq0a0hFsCY7p_Aq^I6H;gkm_qTSjwqgp1#`3I9KQ!N7D&-+>UGJ7RL1lw>Urje5g&q^q4eS+rMygZFslR{+*9i(bY zP7VX;2FX#j6eE6hlnI>6!5QJ+DO^s3Dns9~@nEN0ei7rS}U-mvch*!h=*nxrp0t99H34!KWXFHCqltd3GIc zi)g)sgn3)ucwDEZr@k%{ErmHQ-pTq6O)vWD*hwZ4I*^>4lp`?xWf3Q(&>GkgS6vYYKa5zd>UTy_1>!e#B(0(YEY8iZ`}<=nKX_ zsuH9!$ffTB8=*PsM-tk7%k7B+j8P^#n`(*awf_gN`lfOc#tl8neFfL9U%&ok-xHD{ zWzom<#g#*gxTaf(aMNiMJwRFMLFuI@4UzQ)z1dMS*kw42ZBA5{Tc!6C$7CG^cyo`49Q6dYZCGt!}a7D2tT1Z>P%@DynV+~<^d_L~gdj41VxsVHe2BGG^ z-U4(#3GKDp*EFPaG*$TD6{}aD6_VgX`&OJPUb(m?OPIs__ZQf7`l?CdL$Rc(lZX;q z%_n)-Y&-eb!3iVBf0xQvX3!MjJp_01xH%Jpc2o#$Yd~Otyn%pfTDi>KDl{XeXk~wS zue=E4=78+mGD~KwMB9?v8LI&J8n-+Hq7&~&y@I(&*PDJCr@v1YjYy5O)ohF(C*U1L z_Q%EFm^JTIHIp!xU&1H_DjiU2^J&?cbsl~B<>&lJwcFuNjaIP+(iT(Ui!r+^hP(6vAG8S>#)r9k!bTKl`VtrC#R2!K*?8T0HqaC7yk z*p`N0Xv_&vfnpX&9kbe@@DZEs%V01(5!q3aHa3BdePLI3KdlSon1EIX>+8Eggnbh3 zo=s^#h10r#d90RrflP$al(=_96W#Wmtst~L<_wyvhWGHQ_ycUib_ zQK~?I(k~N0!3;3PB7vjAa4yPUZFP;oe8+Oiku0mZs zP|K36#vI_Gu#nT7!N*h{a^B+Ow&BtJYYdf!u!+0|p;Hg2>EXYy`T__H`32WDD5#?6 zySbD=EEE+x@ogVlp)Yy$<`5T&Z~q@W?F8ng=V_N^OcA}(Zj)SYe)>y|Lv13|X;-g) zh%P?0B1`2DZa9k@d8un(WHp^Mb$D$z)|Y&({ItaZm9-+0%n%A$NWS@};W^{yl+u|T z>CwYgn6ZI-Tr@s5S2+ZV3%oPgxsS+|SMeg;AN@1un2lU11T&b^U`oA@W;4Y}B#(oI zNzzZqy-ghA36T+q6b7Khqwl~BQtod2bnq;b`7<+}Eav5N>fE05^8)ln)tgPytmV}= zDPE=fbMX_a7wTJr7>AQlLS#9F>w?PQD0^`$P`{w78A7x+*8G!18QcUHpEzUv+pj8F zETY*Por!Eobw6;3>oe^TmjuUvkLu4;dyo`)QS|Qeq^OZIU*~S5+grC6e5ufR7pHeo zBosKYS*)`7CGq99Zg0CJYiJLrY9cfKT?zmZ2=X;b`i0#5W*~7KCkePF`vN5RNc~l~ z*!lg0F?O}-_M`j4^cPsUUvoTm5ds!XBMy|#{;dI>=N{ZB7)0Lo$>>q4J140Th z`8KhdxdngaLIR}8aEq226sa>vFcOkP7dMe;o{M}(7+wZ__^*VM(~mIqP4f*_x&QcK zF3!RnR%`95n0oP+1+vK%bs2WvQ@sN}W)$?H89MdkT#P*RtTPcWFfX}G>Z-C~*%7Wy zm~j}mPvi2`VmthR#25nV!;mdv$o*XrG&_h8&Y1OIQiR@~F|dY8NOUb?hx51wd~xF{ zw-^g!ke4ktQB!$o-5H9%CxuFO?Bd57I$A*yIMnNqhSb>51*Z6f)`G$zr|3uKxX?Rs zWqo~N%jH$l(`Q*;^4{I)qx;jJ``xn0_d*zW;$6fwA*jo5%d0r`l#d(U5C19anLdmQ zmroVFe`K+!9|ee(I_b4u<|FGH7|Ts41A}K3XBD*Tj#`+M$KA*6;zxj&u1)k-=h8N8 zVA&v;TEl^e>ON6kz|^k{86xxDsbn!r=#m|&;7C-5G(;R}iF&6}!&{plHuxij92FgK zqQVCTPB;RI&!5!+<2Ej#U_-Dm{Hf5sD?L5s1zdb2ryE7m%A!FZk-p#-fl8|*V4Y&T zE=^V~vqebx^u4dhr-Le#bp)j9tkkODddf`^@Z9;3%zj@3lgTq`85kcpC6E?Hwa8I8 zt*FxkN!3Gi^2N_YC=h2aE{$K|b5~C@rc+Wgd<}50#AQ3%TPy2J{vGq2eth5Ol=^r9 zr0~5bOfgQVY(GW(yk}ZE#)eFYeq5pWgD|$RwI3t|P)JemRAsZOuDVl{n6?wH}H6lNPbd5dUs{v`c9;u^C` z$Yr%zicy;o_FLg=`5c)hbMaeYBbYLQmt|8dJ~Ez=g6=PS{q<|=4&4H~B1EH>IWkfX zxoHTL`SYijH#*zS<}d=+htF$Nm?sy%$Qx<8CRT!5B+w97SY_gYz`2UgW@kqTzs-yF z=)d%$zlyUeD@+gw?4<}9UH?Pv25n7G!nbP3)C5OR7YUxpnL!3Uzi%={CXt#U%gfxj z4}u&JL38;;s&H|HNvsVy%&NC0+VYD8(V_}Bw`Lsh=JboVoZ?5vwJO9He|ixBJD86e z@W(Q}-oOk|v@3tQDq}(myKz^&^4c}uNanU&hf{6a(@vr|*oT$GA0A>Zhwk$$k%8k*vUc@O2_z09)I4`c+O3HnbZyPZvlw?NCrwOlU`|S zROVnYdSCAlG*i`rm0}fT*D^TUq7)|rfV@RIvC8}wCaM*V8Z|{{1*=|0%?6H7T|t!pr3tD1{tuSeT)x_zv(9&x22b({GuZ zVJq2LJ*!m4Sk;JB?n|y+L}lD0l!6Hf&ePnfpEyOysf0#g#N^R7H7fjje!hrUXBtr7^_CGeRfq+q>+%FSCPLXEafh20h;0*y${@GL1HGN05F z+wXWXtqcZdwyhI!`VMe`5V>d<%82GN@gFkR^-8}yZXRo94E7Sa(9Seyq=1RzwiwL? z`hh;-DphwT7Oa&CZ-S$byZE4f;5^%IC_w59VKOsMaX#MXXBybFRY-=go)h_F^DH9 z*n&MlL)X<^_4e@Brv-8uY`K zG+GGLapw?Fqk5(bZHi(C&oBZ2PMPghCA^psKR~XkmxSHhBh!2a>+ogpvz;6vyDEd^ z6yyo+jIlB}{vTuBadwP9_0&`Tdj?}eD-DxSiG)Ap(vju}HGq)J94L$x`U{A|c57}m za*-L5uxk^pCN%setYi+VY8#+F!SV$E^QgVasmIJ!3p;G?M~%+uFFD0Rk%eI-SyHUn zxu5YmTh4uaI5oAt;BS&|2VMDjb>J_@MH%XUa~MP75y*AYaUht{=f{^fpna-(LzQ&o zpAefR6^S3f26Nk8lI)>N6vZA_RGWio2bj;XD- zOd&q$*G^BLkY|Ik0N?$3m%lh&`qO_3!drcTTwsM)As2<1cjkolU37Ldbsq*abF|?C zcA%|s05nd7*+7~aW+0?uzo@X>B@{o7y+1orU5Mz>iC^h!TmEL;03kWOPpTU^UFoSp z$+%E9nXS6T>>d>6J8UAa?3ovR{Qw~go33R&BgT!-=P>XN;6ekGW2GN_0Lb~$^FqK` zjlvM}y(+-t{BnDuV31j`%PMvG!X~G|g|^ig6T#dRWOCAUaH9+5X}EwEk|Ecy#ZVn) zfFj_W1UH^@a$I%`(V~>XH+lK)e?o$pNJYbMq`Ut`Da&xHqbY{-?@=jf>i_9<7n~CP! zbG2dj_~t{)o5g*Oc5Jl~G*7@}q`5%UOM92pN1p^^jJ{_a% zYC;{Q%Q2z+SnW-mkIAx-hsdT;E(Uv%6NN3`Q%TH$&Al2ZcNTtBs?rrTM8W-lgysem zveM;IIPu~y&A%#t*s^U`jS)XB@`^*{BA9r!?k}M#>!PD!FUykJQyDX*{3p#n(RwCCdtl`O7Kj92fM$gX6H!l0`-kR?H@^lveWn?ZIRfAflqDmP=^HOO^V)OT+rEL5Ku}?K)HacT{%e~D9;P!s}R4WOWM}!dJm8cL?qa4Jc_*6o4p-flzc+4JJxw zpv>kbMB_^HweDBv=PsV^t1p`LsK;;kD_1QRD|d~JX^8`}A~Fe>RMEnz1EXf{Fw_yz z9GpW=TQvWknj-|eFb#Y*H~v`Ge|P$+%CWsd)}@I(!zD zMN*Y`OT3r0P+uXXkceo!g?QL6{9Or?gc6$yg7}hn`OD9_a7*54E$&@Cj*P4A#_B!D z3tGDr63{K7#^!28$D4K*6L8cm12?BVe>m#jp{X#2F}b1e5u*CzP;^q0e6Fh z$tG6x;U?;XuZn?A4^+U<8-uAr!{_iUai|4aRM#}k;|SeR7Po0NC4CBaNUOs8=O1~~cw$bd$A@D6Wzs2mba0&b)z3<$_F3_7v)skucVAn28`lY?wjqNAjtc zoX^n%vRXF3k*dAPR#~2%31zo`SRXS028uhbF0RoWwN8$Ih8k&ZWaa71#vEx%QebbX%adU0 zreDxP=_eE7k7Wooa|U0gYVW>@>>+B6b!U{MKjZO-F5Ee)2pv&$)YuK zJi5K(DwKj<()J7SxKl?FDxRKi`HR;mt+#?#U53U%B%jC{7ml@rd4Ck9OqqC2j_c1| zb5hg>xC< zrmwyDqA%t0f4%(i#V5j?_&O;r3C)URE&^fTIpn>V8F7M{6Huv$Gx4$A3o8LTG;`ol zbw4K#@jL4$?sVfL=!0$`&M9a#)mj%|SN6lXXEn_Rj#4w2gaxOsz`UQ!*~Xqq**0C3 zp8W1@`@JmZ0At*V)_WY8PKY$=NkL8)&|fF$!A=vWkpA1@7-1dfxgg0Qz65`wS5N;= zB*T>!pf!dT?BZ+Do!z&G!B%nvk56~zf=Bt(;R09X8#GzyjeNbhD6FwnouTiB#4!x) z<)8Y-o5V?|o0N7VlcX`TB{{3!*qy#l(?frf*CeCWz>S56v0aG#?h`3*vyJhC!62_a zFWb>Q&kC2VgB+DOe3pCGQ!z&As{_?+b|`;mWub(Is|E55{nS!2H7wPcq+3DAMa)oD z#X&EeDNN`N=sKD1A!fIKoE)PBXH}0M11+>zk#Hp$43tSfsEVcuRRP-vJEN`wtYECk zN8t*jOrM_{(^#->L(#Xu5y4$qiiuY&8J{8Ls;zjhlLvYAS<$@gqzP~qvro`AKN{7( z657w3u#1^<5F=b96;qL!ROZnvszaV1kh>8C%d)p0qN;3jW04o8_T7X`a3Qs&g~e zb}G{3t@BErL8eFDtWUfJl4O7tYzyLw|@S=&xo?Pdz1^X1;+L zw~JDgf`Ybh` zUgtb@ScT2PZI}z5v!(cN{8x1@*f@++&HaYU)926HmaFJx$S3@c&P|J38*9$!XfEh? zmp}5){1Qo#J4^HVE-k4PTVd)np<;g%&VG_rkOuLxn`;_U<)C4XN=_-N8NIvLItJ8&!&@ewWuu88CO*p#LRnruH`ee z955Na!O7MSx5ZO8oU3_gbHksRzMp-X#9Kxs*znzix%%9ZPyWzJQ&dGreoj)wga}DW zQf1HAFzHWxL%1TjmJsNgO~t)upk-hOa<0kstVvkxg-u~;8kaxvSPKP@z$Jy+glHWG$l(3jfC-~I;!iKXA{^6cB99G{wzJ8W;2Uyk zyKs3Z3opNR&&RlN;|2cbbb-M^bQ((57f5%t@G;}B4^)c*qDAAgc;S$n1x3fnUEL41 zqNG9-2(52)d~ED#Rk?`&k))u1x$>JirC?4&^5_=-omrcg-xvB?{|NNjB6kdB$WXm@ z07EN+T;~*+bvl@9zPw{GZ_xF%wj6?_3d|{DPW7>>i|vePlWoM!0Bp`D&-<3htM-pv zJ*+maMv9<^BA$M!>5b+=)#c7A`<%J(GJ~UvHU+z%nI`gqB4K8r3+q`<5z;O@4zmalexjoy#X$j9+Q4(TNnyTDtcRoVuBr`o5NReb+hvRB-nB?GQh1+X%{MmF3&kk9<$5u z`ql#3E$bcN^vk8hQGk#bT(rdnSfWWNKBadFU`sl_`|wN-oS=N_=L#mfJK;2_EW=SI zO^t?p-8EnYxJ$+p35lERHXJLXGi1hXvsDGXsGkh1_Ps^T8`Il1rt)9E`X%E64S zL53S2TQ0tznseJk7w8*(jb^1Xvqf#)S#??6u~{)%KM!2Ev+^S%z*9ygOYq+?OQbBN zxaBwK@OX=lD)B8FKE@m|C{6BR)Q{ivQ5GFXb+lUET3GNEE^KW% zs}L)A)ADQl;ZeP>CG?roUQbiOuCkU4qbj2HFu}rPet79s<$xD=QSC}V-0flHC9|T{ z3Z#W^8=ptju`rJ*$LK9jmf)YO$EWnR^U3dVHwi(VW98PIB~-ByGS@XCRPqyarqp{# zE6OCLEcNu=;<$CmSt`I?DH0&ONQBjN((ES1uRww zn@a9`fAZIC$GM+LVWHvSrj9-0gkU1!-PD%#c3J*6*uE`b=P=?N&{W0tZ z{1y>K73U1Q{(t8X%!o19tM4aWxbr@i$;Se0={sL!>+epRg6-%?L7!aP>|C zsYsAn-7H+M23PS%ncwQlx)Tqan0Y6K(afuXhrhT==Bl%7f?0c^%OCnFFsWpR(wEEd zGu(s!fe6to@yK7vFA(NOvce_t(YBfdZFDlA3X%BTMAaFA}yDbb(cPt357A4cl{!_ef@l-D^%Qb|wdr0zv4x8UKU)8)+R4WKdCD z{O1HSQo&6jBqlE=VC*OiqM_9ucj_t z7iBf`bscd}9h|6lKn)&*9{_<)cLslq zC8F{bRZ$34#-7u6mKTXUvE&@n`a)Pz?^<&)_9_3>)niTNT7reLyGQi{dHGE%sVocK z0~;@T2th!+%JMx~GAvs61CVo3;cls~?yCgTDUvm56{+mCMYu-}&*sDs(=N`T&b{mY z;p_-#g74i`T9Q23!x$T==)H@&GBye3ssYZrP|+EA1r@3qMCPki{|ivzosI}p1N;#r z#6y+!;G{(UR$fov0qMkUmAO;|Q0*+I@=brmj;P;#js$9<@dDBH{fq=)s@wrO^SgB) zcGF#+XIr}A$K9{@#O}6QWmX)!Xz`Wu50hU*n4z`XjpT90eh177|2LNNThXl4H|k=JMl%i@Vpjlp|53n%!q8Wf1n9iqYU{eZP9 zl+5blni3F4X8QLzmyPjF*H*tSl__AHqJQRGRA!e2v~eNl1PT&f;%Xwt(Ui^)a}l_& zzNV?tTryo%)i;4$7w~NyD|VkU23DkV#+6=zvE~eY`5!Mn^P7_k+0O=R!f~y6;6w;# zWiZT%Q)Ea|LN8$MJhui9`}_Eak}x3`$=`R%HSYgLkBe&*fq^|Kq*+U$URNB}B0^y8 z&V*%d0c{S)ApTx`^$vSbAE=MiHihKSZ0r(Fm{fYlwFcp-Tr28pC)WLqOK%oN(Xhev zQBgP(7CT03QpjlsM}>q}D0Y|iHU`-|h-!w1MW=!GRSkKOA?d(yIRPNoV#P$4flh`v zrFdhK?#{~~|8add#lYF6p7F9l9r8Z`b;{14b3OXw%#rFK{&T#hpbaevL%#1f;mo4e zYIaF&bIfS$KNNg334?3M;Ys^ zt|I1Sr`j0FX+k=7l(01U(C>>`Lv_>ymS)0E?ODwLzs;E0v(W+=uZphV9F;}JT9`HE z>VvxVr>74==J^N9CRXJ8hzv8IB$kKX6K2O2Jd6X!*^3z&FhHAlT>N#q=Wt}nK3HBm z@=K_+iU9C!8HH=D>SF_s)TI3jQ_mn{ipu7)M3`{1!Vvhke3+M>lN)TlrZsgc)aT2x zLewt#nrum|lu_B=m=Klay>#QojhE$6Cb3|eFrhgoCRAA3khM+!vFIc_F&~piIlZ=Gg(sFk0Lj0QCU<_DQkG7g38nR1o9DfOslkiVt@Zi#kT4F5%{FZ=~YjiNt zI6(WzAk{k)N;A}CbW&1hd=Hq_9=;x-u<15ds=07ub}3I9+s1N%V%s$5El zbCgUmFcg70Lk4N7Xn zyHUssC7Ud&%neY8MiJVw!A?8Z@CTPay?j$t0qjFODh2kPcm)%YQ{GF<6cKU0s#asIU;qRaFmgJ=zv-*QTqLmUgB#LI$YGvwSxUdM zJ2G$GnJIgI23O`6e9qA3_-vnz6S1<%E}zc&=Z{fTh5~3na*8ok^5$~oi!$XjJ)F9h6pf} zA2F&uoq+jL6cMmfNB!$(zU)h8m2=^V0&|WvCj2sw2X|x_u}_O?dUu zdr>8UOv?r@Q$Gtm##h8!zxY1c7P9!GvvK&yQDulN`4op0oG!nt2j2>fk%5kj5^%hH zGKkyDzJ@U4zMgLa<35_T( z!4A1aDD4b#lb%KXb8dR}#b=#HR=F43K`%^{p4woTwI&r<{aF4i8?aXQcg$0w0RlX* zqy#6{6;iIA6>h^7)Y^zy5bKu(k+o>Ou3a!d5{k6>tnVHu{&xYK#;z#dyVg>4)>2+x zJ^gTRPj&zS4j9mfN4w>EbmH>nOk8AS0)kz>El+RIyR#R{^W7C6$1H+EQE%t^u`Amv z8|zMN*0VSLa92$yyi3qu0VG=>#$*L2+!FpGoy?@Wx}2DS%!4aTuB$d@n7SjK)Ls0+w_msy8UM9jJ76ZKB4)I zHUH;i6vamd)q%=E`u|wuCwW%&MS{@s| zfOId@30B6TzAd0lCk7fK9-hnZJHuG%>2M)inO`R1-=WME-n4_Tw4P7@$p+9BnU1`w z{G=HA)de7;o-iaDRuOIkiG!lPERGp$#S2j&D0IkJh3WXP?D@CCVdZ_Sf;n=ncx*xx5U}% z@~z9a{oho$P>aq4Y;NL~;6RFgrM`$T&Y;msiU5NP(pfg5#Rz<7(r#mx6|mSf+L{L0 zcC31NQFs)CGq{9R)6)Xx!4U#`tvCllMP&NcPnsO-%_@k>>ac0t|c z6sJ@_TO>14*yI1ccK>F@5p@s33(xw@S;|@!7jJ449qB(EuCq|Y_7W<K*6pxD5HN)aMqAhZQpXO2_ z6LW0K$-;qOpj(GDqctT**0eVL)Dmpuy@OoLgf&wdqUh$pJ|mNInYlsu3ie3c%&y%J zi~8`KXCOrxH#6v|8OImejdp%vv2}ZOTMX=DJmRqZ@qvatR&iqE4H10-uk(H?P3N1w zaK|Z4KNz9(?s*Z@GzY&+3YNSQa;m+x;DkxQ3q^6J;=cYZPYtH;$fxs4{qLQA6JlaQ z5UN0`TY;#aUN{NSo$7W9>m)cWSA#ETWj*@H)ZRBwetyfkbFxpq=v{ETcqoN8aPDOT z4iiJq>c~9Zp(C+$sxHfRT!ESpU2?wL$mv40Cj^0Cw~4Am6wzCl<$_VwPIe6p)VQ&T zs!Z;FMcGaNoXQ^I?G&y}j>~-w9?(U5SDlc3;0!_!@+xR~IQ=`PhjBKCQRWX%W7x?S zmiNGssjGS*nKmkU#F_A`&=)|)+XVJhB%Ak%DCS#@08C^kCX<;K-2U=4L^PS zWAP$c@DCQTY$BrBvQ4=5jFWu24SN#y=8O04$uEftSW#zP3z>HEgH%=^OU~=BUz6Wb z;yn^<330qC^1C)7APmP4PaZM|!qY+O?^FPNNT3JZ*CvV=pM<%Q^(-AKAd zO~aL2sL|$18S16i$~i#^{Dk@JVu0aVP)b2e1~)t`+&;T!EI!`IBLuPPaInc4(ryB4 zcy)@%;eKuo0q6Q(07hHS^hk#kd+fxT?1t-r=#ue<|-%CKUZ>YH>$yL6wwES!;`uba-M1JHkzSl zq6Tg91E10>MG`!WO=%ijNWwACYn`D)9jq&PVMQ^y{b-K3u{EEYTJZKZ4i`%=@^$kk z%O8%IFo2E-r zpO(F$o#K@yi54qQraiJA9)3o#QILCTw;rBWM=nUtL+F{mi3947v3jy?6wPD7q@xJJ z(pXmI8bK0OqS`naCgQW8;BVq z^PrCjcabwqdm|T}qskM21E&zUwcMnU7g{ctKXv0@zx@-DF57I8`7#?dQZIgDoa>`8 zz=^5>uwv0D^x|*r2Eo(tTMPsvN(}CewFv-Yym0G7t-?cy$PZo6z@Nuqynj zf#PSfP8-T|)B*uSE}Kj6*xb}4r@P2SFLu)KidZX`aY|V8pEbLx$9=QXLBW`$B+z_u zW5%jyd7W?rrmKrD^aAS}86hVYeAL8cV}dTE2#$yEGZE=-*Xl3?-5O7*_= zY@|G&4`lIpO7UfwdgCi*e>J7|7TRQ``_kgH={Bxfa6v}os90Q+*sX60Ynfk(gX{04NuG-~<}>!XY(AgHe1q5{49}!cM(2 z36%hM(`SW7WFTY)7A{&CXaj6#lwzjX-`NS7ScD%3TT8%2DyTWd+`-Gd!|?I zQq*3~+z1dWxHH)*U=N1v&TTZNpBAZ>>|+QfaE%mpP<7^GE0cBkhH&lOfP+n77ZsZG zSV&Pd1oU-*d%?USETi;IA%17V{vL(y z#W=5i$xj~sZ~0|4F`%|ezSCZW!SAljZ!ZfAS}_(~F0I{rMbTC&L>3iIwwuTn*g~;~ zN5jxzdK!qnYb_(KNU@M+3-^6dYTJwh<8xzLmzytq$yoyRcBn&QXrOx#cz5EOX`CA+ zK$QErMmnEv4}tajU^EUsQM&{l>eaE6{F-~2z19OkEGT>h0iVYmCe$YV+{$bVNof{S z>zZl5Koryk4evRilB!RNk7Hfo8JtsFFMIj@#eceZUp|te8pG4Ia+VC|-!~^f`=-D0 z15O3QaBknNYi&)oQULlE3`!=;qBM{-iSS{W=0s{CegfV<1f`=P`yGKH-mw!>5i3-kYX=}c$W@fBPpZ9~V|1|?0fs1x?llyN6I(@-CfyU7M7rejNTUi zH(PJgB-<)k2x}9Vt6D{00DtMMYxB1_~-B;A|t>o zg8)Km2^>TXkSIz;f<+nyS)m#Ph=Cvgnzd(cyCZhpTm6Cl3;mqyan5~jR>LS+didw} zyTiF>e9t)+JTyx7i4!M9fx3-xeNe4!C@EkE@W zxoa4l&_1a98;#W17e%zKH2ShA+#j%PmsF44eB7tVOX%IMsCJ4A6kTax^q3ydzAuBH z(D;s8BY|^!;^yPN0t=_B8AVzJghs*rU`a1Ne$$t45l)th(vE*S*!Uu8OS;7t$c+DED+^rOE5xz%cKf$7dZ#^V|mY#HP9)F?9|5U=e%1% zcEZZ9uXd^@=<`8D6LRyFn6Qtlxb&R=nQ7KHWhX9wLg+|qk=?&>Q~0W@y0Vx19dcph zf^}01>r$NWQD4)~5a)rizLn0T9WG_$(GRcQkoP5hj5C(4A^F{!x%(Owkz@r)I0xsUE;>2b1!1h&hby3b`8 z-&xW!LNl@|&5A2DLoiw3ABs+mm#Q-&>`r!tFWTZ}MWo{!CSl}IsP>!FZ{}|wM@pz= z5EYRH5Y!}Bhps{c$Y3?_x%i#Y$(?O~Kc4_g`-wjMv>jzF=I?}i&q4uEAYc>WJj-xI z`FL9O?g{N&ho(<8AHhsm^GF`W#Rl`zER^w9;c5-)lWU-K6~`G+@T2>|Uuf%sNxH+rZ1{@6z(0?iib)DXjSOZGSzzWz4);*;)zTs4OdY(gc}}%U z|9JvBD;$WBClhyC#J7X$a`~wfZPiv&9V-Th7W~~ek&a_wYw;#J?sil;qF3>?Uem@xQ8GUxg#~=Mp+}Lc$(&_SUXgj@f0egMg4%{ z`Zl9KK+Refgi>~vCxwH{m%qKc<4eU7(#Rxj_^b?d%b(=4|MS5c(c5*X7j7MdzRcg$ z!lHqYaShD2P^s9ET7b3rFlhuphfm#^h$VOc>jki;l3^Ppl+XUj8()ofwJ5lli>~Kv zaqpAfggR0<-%$xwf7qOi8-Am|`KP|tVWrT}$$93I1lBFXMi#u22*J0XCG0_YPI!Fm z6Wa<)lh3HLMm?A(@*8MJa}g-jIUz^EiG*Jf_mX!4W<@3R+3wBseTd_6=uG@_Hk{Fv zAOO?gQ-levcbfB`!?$EAlo{9KC21z=5i%O-7r4(A&EKUKm@F5|p%?Pc%26K1w_r}l zzy3gb)4?}$f6<^}-g5+2Oeg`8w({G+9(>W{^~j`&r^aPpl$AOSuC;m(=bCKZ-9#6D z=r)dy5fysniN?MG_$x>KKN-G*KJJ9+YunB6xvoJ?k_l$TeB_LRFW;r_;HW{8N|nG0q3FxqXvQ zAN<(=*)Iyu*9%AKS#{hgo2XdaT`(5dA*Q@jn&0@a&;Q|Tvxm`2*ET9gtyZW`IH+>^ zT5qJu4rrTE{Dy6`2%|-$WoLJK{VD<$JVJC_c20 z%L_=xDcu=n5BHkBTeMyEfA*^jc~OH55?bm72cw=%?Dp=RegDy{pGBL2FST+tafnXz%XZMnfRnChR%@<7C0usfzE+lnRZ%FkamNN_+C^qm}Ftce`?i5 zW!@{Rbqy^|D=JqOO8YJ3cz850jzefCKki>%hM&s1exgTRtJ$*u2D6N17;r(0B`pw& zIfVwaWwWDMMwdTCRDe?vV)W9M8C^*~UX`2FEF+pI91@S=A{WlL*$V8qK2g8xf2~d% zx>-l#`OjMFh`Ri1bkR33OWxPnyp4=ZxK}?aN<|l4UWiqZ0zJ?e-v*@~wi|)eYQP)( z%;uk2g9X24-+b|gkP_&8tL){E($|qNhHo|ACG$f@zMn-iBQABYt-Qco@T9vzd{M;X zK@+hT!#Gfo27Pb5jr;6w@Z!O#Sh7ZBijx1G3KA++;3TQ=Tm&8xcR@aLN$;1C$ z5tt(+fjXHH9Jrygz1zDeV*A^0&)hrD5OD=mb8l>IwpAJ0HeaOdz4?h2ctk(I)E=g< zR%yZ`CeW>jXMGubZp*(cfA2FL={_x%<CRW)5T)ofa z;dfdWiY~scDri2tu)_NtB?kg+RUz6ezFc58=6~5vrHW#J)+qQdihkH;h}>?UXjtqq z&;_5@lEp`aZ-9fs!yLF2f~P2q{yha{Iz&Z?0r*g0nF>7wH=pW_8@;I{03rv-ofT1T zoTQ9tShmk^&7~oSnyOrbsdh%!Y=kOv^RcMe$f0d3qLNyB;YZ}VZ@wfWTCACS5_WT^ ztwuG9EhS^s3qDD?m0T*nkT}hW&iS@5{B4UKx8t-eva4rF)s}J8lTHDdXce@(Asb?> z_J8Db*3Koc@=1$)!^v;lIkRF!GLu!N-^k|7qPSTG3|JD4o>0mmk7!B5kODHF)7b#E z)VVS1r2Zjog;=WZ#Gg>0 zn!9I{3qO&aJKIaE`#u4(1h85nQ}52^fqADaRotJqPITF-w3-q{52mf0g>G$03((Z% zFU|1xQ#nw-+EJ%cDRpr=>ub5C`V8kpFss_m@~W&FkihsHML1&@*eG8RJK84wQ3z-Y zt`r)z`DTRPi25i0sEm%#8O==LaZP{`sow-gNX|a_YXTq}g%ki!5+dmIPbKSvD_Y6x#NUxZ7#kG0IOD1Y~LYWlq}2LF9stWSLD!6LRo zICQKz6Lm{Bu0pI@i0SGdGsvLR3jh&eM&iB7L5NBd+>#Y+Y}aM;X!JyR8#dQv>7aA$ zD{8}M{P>xpgTq&zBH6&0CT#9ECHek*pFK$r$(wssCx5vm{uUL z4PfKbLN+TNd3jNBCJdVt9MZCP)(%>W5L};zoHH3vjkYvRV6L0{@w&-H_};sf;=oVl z?`FJkN}&%^l@a<&zIVw#Oyo!IR{a}WrMnhIc-RTE@`amz9J}MT-1}M^Mqs|jbbDkx zGzj1b@`nvm4dbdaoo9RD#$O>oBSB;$Kp0=b>5TF+`vps8GBpq2iKU^;izCwOPwsppx;U-7ME zvrNu4=D+eKA3u0QeVOpNDKI8(opWUb(@aISwF}7bhM~d*37MAa??Qft-Hr5+k^{_T zSfnhFU|c|2pzw$Qko9Oq3Vo^kmL7k8G<;gaM=11#?~MY~ev}Yj3c31{j&V5zDVzOW z*t$)m#$nC?rVnul5z0o00*}ijX^OJB_)t+YL*GXU&1J*OubA`7Enkj-9&+VF97H%V z?9N<$*Nn zYv?zkB>v*wQE|)gwiIo>Aew$hU32D5Gz^t6Sqd%~h#+_p9%*S|!`udCf5vw2o?HoI zX9;$s{ypkDTF*#@l{7nDVje;dgN9Xg^JSrWSk|U*qTL#xP-?gJ0f91cIoau10lw-r z)cJ88LdRp|uedHK;}qAkHl4y;;Us9MY*?W}rqTo<;JN*&Ri__J1FJ+PL|sTr0~aoo zn7wr{^}oSHQ#T5$HJa*WRkREmrQwkZU3O$AmQQo-MEQV%c zIZXrVD;$M9j$5x$vghigcL(FHX6By?E zHy*z!tS#jxTS*($-xkRazH$7Xt)FRm4U3m&W@-x_Ym}VI^5{F+EoY#3nV`MRuA01H zn-F6Z-Ko>JLcJJGgX^&%d7jSUjWFQoH(RudxlIYJ1WgiZCN@KpWI6lo-aVHuhbMaR zD2RR53D+8H+gu4!#wIm!ssRx*$blTm+^kbfP*18p@VP7=xu43YID1{L)mLSnsREQv zXVIci20&u;t-r10ajwngKe2YFDSau^5p%zMD_7vbzyYR#;p=q9_jO9w#Y{+b6pldp zi*_j=8XD7gtiDl`tprXhpZYmJW6*hdLaL<7=2Wzg+M2Rt+6$oz%LrsE$$CwfvFrqS zVD#`c5o*_NdsTVO&Nkn=FU{%%`Knpguw=E9#$j2J4-M!CF3i{@sH@dAe1jY^OV3xA$9(g2dN00_LW^A;FXh^-Vr%^%4MUbgYDlNU6yx;{mn7x|@ z8)-D@cZ$RJ6-3^$oF3}=A0HJl2)Z?2|d8s_eh^Fae_ zVVK~(RCFy87gyAv0oSNXKy%hc*2`@)kFsHHgi%n1cZ`6K(;Oaat}@D?Srt&3OxC#n z<)^aYD@5=8RE}nv;6>WXiDDWN9#+!kgY5GpN}c^}n@Lso*&Wb*uhHJ4t)tJ*IyL)? z=I{2M%>Ljp|6^wuARm%bWj~wrGY!p(G5~TS`-`y0K3be__;f~QtJ4Ryka2Rz`e7zu zlKffA_zaIR$+rrOMiasSMKkk(GDXI}{3%=Xu`lU~r+!b?v3CfpL_S-0 z^?_;xsV@lK8NctZBxFzkkLaR&05p?+9R7yY{kk0HD&;Z#8**)QUcWG=Cy$QEdeD#H zSA5eF(R}LXHp3An4=#bm@Nttr+I$ppZ}$A&_-NeXMp1|YFbxW}njL$!gAIS3_27}$ z?UyyLy0GccD;b&m_Ch|*rcqlC2j~A!+D&m-AR|Q4wh(g0;291ExvST$m6Tl#QIY0s zP9V9Zv`;c0s&IUq|UoFp?S}bD`zQA89v~6ISwiyV1KPOWh^v?2_=6Y{nrl{=O5vG&D zqvfGwk`+WlOxQZxX7GttxSNdd%My8#5!(3Nd5`Ztj2WHF=R`2Et-jK_S%Cg1bG*p= z+&7&xBSlxp!Z#E^HXkUuHt=!z>DHc)QyDxh{J(r$|~Jn}!KBuddMm~SuPkz&fGhxngjn~2w!Wn%q+Q$ z$|XMpljj1xVh>H(kO|T3(~_e^*)-G0{LfcE%0LdP{=Phj8wwGivB`Q5%W|4P&VG9t zklM}Kl5o+xxbkynhd))+T9cx0GAJageth!Y2wlkc-QIC{;%#RUEAs8L(u#*~x5;bi z$-Oh|h;;~ua)F3jy(@Bpz3t4jZ9LWULzkxveWnmAh2a;IU!Bo9Tv>7*?E$me z*T)mkAGIp4Uh@sGuA~DcnKeQ?zEG-8C5txR%>ZlrLQn?Im1V3zfM^o>0DcX5`)K~D zWgDspJji|%Mn}pN=^KsfFuTu)N2s`2dUx3T$t(i=<5?t2tRED#O>&C4+GV?VTlUCa z0dtY>wy9UGl4Ju1X%!-D^T?oqvNGbG1|1C2w29IS@bYunp0(sVSFL{=T~^*Xgz1LI z8bkyR>4paXwr2NG&3t=D4NC77J;I;IqYvs?R-GEwTcpZ=#JrYKeB|TStkrWPbr`NA zo&x|~p-pvkg`nJv7%a+c_- z)*I9hk1Xt@fcXme6*;PZB?MxnBzo|6d4g&+N}7naVJVZ`!WyJ9}WvqedtbWShGv zfN_R9g`!-ts=HX5^1kP=qm^jt=O@(a^kJ;ZsK1LK<1lwY;mvorv8ra=0XYr3^5=P? z-cSf0fpI0S%YV5lQq<}2g;MelzP+_S*_2uD5#se*P}48E7>=?WJX39zbEC#c%>$y@ z6ICea>(^Gb=wCuDisTTM0m9Ld{PL<}v{O~%dqXa+c@&(0`6X_yFD`#0KRP6xE;hje zIh?*wVRaqW@=a*=A|f?^d!m{VE}0m|ikMd85Usp7ftzIeXs#3Y$`+L1!agnhEFYrG z1p@emugbMVx*A3+18liBL|2_aR`CwXdkjw~w{QuAk~onpaG_;i{@O1KzWAQbjMxil zsB}5wA-!Sxh039}dW)$VCaw*e8*$kO{bOXrxo{UR02v{W^Em z-$sQ>Uly-1P!VWES$7{{;196BBv1oLC>j7;&-g{r$5cbu_nwEBt^1w)m~#1Mg!@t| za(UjyGaQ-YU;m2eOaM4iy;=B(42cl$gdE}(_;mClPG%WpgFw$|_iLpmf;tm&bSVUg zb|CKo`%*a2tY4MvN_YNIxG1bRyXsz832s7p8#J`r$qgfclws5tM~mFt2_tJ@zYrB=et7Sg!SpCgp|y+AZsndnh0%}(N?sj z;1Ab{ZL%mbjScLl?AfyLCF7M2d%j!y__i}KE!Pg5uEs|i>fEZLTK>swY5^G^fW_Y} zytbQG2`S^kSGW+qYw*C_R-RY@48UJUc`#Xm8AyYp!Um>hW!!vXA{JKvLYPTCb)F37 z#jLnzmWc1Ph}u^0=Tt(iXyldBnh+0itO!0;DJHXgguG$B$7PV?Sp{2K6!N-%KKX*z%W|F8TbefS^5&377jrq_0B#93TZJa%f3dR6-8hV{*q9_lp zRT>LxpTS|}t#?fCPKcBUOx@Yvk^Q*VjX88xbN&m{=XzT&BYzeD=8XLlQ?KVh%sOp- zM&We>102rlYC^X!CnYkmVittTd@OP<0s8o-wVriwk;5^7;cOgTZomAr^LXZP%e@4? zi5s~Fg$WQkWax!kpM|IArB`0jmP??}4L{;)#FrlT>97eP)%+sz%HIzhE=3oZS`}e* zi?)ZQWG(xYhHnojhc7g5u9~5O1xd)7&RYFQF@c3DA6(&G6Qyl{8Y${0_hdBx;GudG z%(oQ_CMXm{WR^;HAtbgG#Lp6UunK+seB5r`)|U_*Qr=2l0j_y@V3kDPAaSnhudm+p zZ_S%)jw9brLb^poA*hgTlLW(F0mI$LouTITDHvr9{94O^$)Q=xP7a$pYqICud*Tnm zA4r`>!4`xmaVYsZr9lvl^wCDsXok1BwWSYQaxaP8zD_o+LCoUxe-?=1wU8%)HI*I7 zS)g}&ec~BbOIGP+WByS$v`gk0MF01H{qO!i-$v@q6h&~>Hn&#g{J5HO;jg1_s0Plj zw@IjK6t%X{PKVVQm(t%reT>;X*-79mY@P25`L=$=!O18s+%tPmUe4j0q62UAmtU`3 z#_kP|x$e5V=JNXN>P_JZ$Y}(A=!e^gZH)8CD?w*nw2c!^*qxouD8zxa5vVXr?^XXR z{tzP`nM;5E(P?L@Wz-0pgYPgx`VIfzvOj<0YwwXh;>&^p+C&fiBbnSl@**LcZjEw2 zwIkKNslci^KIY^w5hjzWCtryVZ3X+K;x56Tk>^DD6 zx+Mx7x5m`}mBYBMR$K{cw@wd>-r+Psmy7x?`3IxiAO5-kRLEZ<0az6R z%t`|K;}jq>EStiTu>f$YQuRpofh3W!;vcT74;1;?C&VvO#9QtS3xKQIyEU@=ycH|X z$|{z06gy+HNchC9Z_!4bL4P1d4^KhIL@r}Qz6*nc#DoK)13@h%eglT)pmVg6VWrBbw6fsz_WVV^*yp;tD?yWt+=t z23PX)3Q>Shnv(J2>#@W-EBWn&3%DN80|HDY1|PQoq^5E0)ao_=2|L`h7)PjtRJF?q zL2PDtN|N_pt(!Upm#lq6EZMNGel1%$CG&YmOkQ_Dd^!Ua@TPE?ope^MhNfzr`_ea* zVlaa!YDMJ(HEAr^ZsJrl7*<9Xs4agpeWj*k>{h`oZoKf6b2Lkb%cU4)zt=EMXxZs! zZaR@qmEi|fkwEfapz#l#OBv;o)Z-_`$96?6JmDH-n+@=5z*OX+9-#kIIXya$QR|>E zDv8TKic&WyW6rYRU5BK`=usKjkm08U2`N!{O{Tj9-<6p_GdZflkT@bx!sP)%OgYMz zuHr;RNh(feDwk|ny%An_%VXrQO6$jTciDZKJAxt$1%XKr*L4CT`B5G{?U9UKx)(_O z^>Z`7jQ{SqT|}*|`U*)rP9?T?(4!RZMHt09H!2O>)$za-%|b^BbR)9@lSJ(orDxnC zQE}{v``^@)CVO+x$$*&DlMod)SkwHcp_edA;z(M6jMGwDqdHd+<-7b_Gce?^&BNo= z&uGXFiy9G|P!hPSRaKfGS$dXJs{KGr*G2Xb6LluQV9UYF`EdVR^42k-P+2lO)7&-h zx!U&Zl6;A30j_x#yqnh;Aox9bP=X2K}U8p;?vD!AZ1dh#^x(sH4cyfr%kOtS0`Sfc09Uutgu2JAuMmDd9Xy>%H03eDBEopYX`px=5T`mH4HNDUL-aPff zQ^LHG&y}LzS8q8-SkP-x8FZ336bt5s-W8>-Cciy6s*R>Cn%)qSz2lO1GWmQMQgqgr&0cPU+Z=> zNi*!I^Zu3lPx=n2ITRMxkN zt_-vmAq!7RDs(LuIoY#xyptVUs!qaX-lD8+Hvk|nzbuc{n5mGZ`LK+gU;Vj^UNbEQ zZC?KL;Coz;VP8#bq9B^L0#qB-Sk5eQpG68mlyUWlhs@xti^83<-u>dte;G>47B|eV zC_OWXjSESpE>qzn<75HE$&|NnZJ@fxNuVkzccR$SSYcpxGABwqWF?RiTzWD_4cx{m zaZ}1%QOha;NevRB?jn}BKt`ze-lvX#hT}5-XDOrC>8grxMpBQH?g5k#tHo1L>OquA zoT+JPMm(ya>_^>FVIU*tg(HpJSqVl<)t2W$YVgLwvgI14pq$d(^X0W?R#}6&psicB z#aSU`r1~N3(B=yLXa}deRzF=>ctX2%+-Y)mZX>iKTXX&{EwO|+xbqU-<<)490C}9o z9(*rDFe%fL%WG00S~1|s)@k7kQY+XjU<7MoKBQRsm~?4#_JFw`_#+-R_^o*te&dgl zKJNeg=8LZg!vd^7q+4%hG~_BNd-{_Tbt4g6rCeNrX57OvEGYE4agZGBX{Xz>ykVaK zN{Zhx_NhIH&^-BossL|QBsQwV1nncoLHhSVA@vx}l@<9bsFLdgh4`CS_n(tD-zvNj zr`FWTAMBhc=ed95tG_2?0sqpfD+kH_C`#oPjRyL;3=yRQ#@bNFidZZP>d77f>NfXh zxbvPr4^Wr+U*yD4B+qcRy)qD!Q>C;K-${8C+(4?~? z$~91WjnxZBw+@#`5V((D2V6tSS*}Z`gdL1;G4J$j5Lqd#WAC=lwFZW=3TDVxFWqoX z_H10Ht=f`5&lAJ>FMI}7yHd4~_&vILxUiwMsiJ;vfjjkauD)-PS^qSxPDr{DK7)Zv z!lC@ZtimE(HYkF!crHU@Wr~N;8COxLwZdvRDk4Y7S%1A1sm)tO{VTZ!IFMB`p@~ia zLAmtU1kU!Hkgd5exSmBx8?;kw1~bb2R~7@Q`yN8+k=Y zhn2Rq#1U@9FVdS*iniEQo4Z!pp90*u-0h@4|GcPewY;TT{MA2Qe&~d-yzT!wSP>`( zjQ_gA5K>u=3P_OHyi^5KP!+}Y%~K@AKRIK&{3KY>MYiVoWCd(}r(h8g`r{hF1m&%p z=BQE zU+_iBF^nJ!nO?m?j%g?zRJ{%I#0gBK+i$}wzb`*x2^1Uk{UA-TXfsXg*l_sGa`7rl za%#yvT{PfM+L1^ta7YIKQlp)Flws3^oM>y6H+mXZLE{6g9hG-em7tLrW~2sSV3?Gjg(oy^UoA3fgMDHCjGV;RAL-oM z?x2F@Y6>YSCHYo;x2z#hw!09bMd>;SLJ#R={`R<4X6-EFb+EKsXLD)$_}PfCjU2Cu zOLPm3fuRlb1{znk7?nYD)fS(0k`qt47~DA=#k}pz^fcKe5V*=VP{DbTZ=1DM5Hh65 zi^08oFBlDwzkkASUN#>w5n3pJ25(xFZt(XTR00X-L!4rUp2P)P#qy_Km@}&9Ro+sy zJ)_B(2j+-Y5l6roRLuOD{_`*YXD7y+FFq-|Pr~w|Gqdb|3cZSmB=T3Z6yjvaPm+xx ziG-8SGbfw8H|dIqJ77K&ZWUavuP|l0FInEF42AV&o?~Uo37z3v#$xGS6BqfxUuaFI ztM4H)0ZhnRlD8VavQs=3V4f(0A>(4O_0^p?`T5V)=SCu=-q2bzc-b+y zz8JcgL38p9F22;(Ok{?61W6XbBc>0MTa0;fp{3MuRlaF7#Pz_p=d(rBh4E>e=khmx z?Dr1+85~4f#J)*AKXubt1)L&ggS9^nE5&Lf`H-iP?T7XjDG_&9{6z#3RoPAnM!lAk z94LYRp>D68DcNvx#Erp`Vm0cc3{SG|9mu2$;yFH}FMmaLw{dNn$)6U=`{l3wo;{oH zna_99fAEHr8w`)`ZNdSW8Zk!jQ(+1=E;Rf}m_|cv@Yeae7*u)eN{O5Mi18ZPG8Z{Q zVswjGQTXGackB~xR%YLdh)Hb3kZ#dCbqNOQt6YO)UIr8xiTOEgx*|?#M}_(5tJQ zhC)U5JPUmb6_|n){-zX2lGRWf7((@(Vg(m%b&oG1BMBB3(GkMhsz;vBKadG6(H}u* z2XpL0^z0wV8CHuqnWT@>#fQ5d0iVf&r1OCc|_bet{g zBUFS%4HZgD6W?cMzi*BbIr19q7Xbjt8xfJ1Dw#R)BakhV3|#;R>lvW;bn_N<;%A-pxOLKv$4cLFaIle9kYLNZ(hCWudsZf zj85T&4}Yvq`U|iuD(>rmZ0A2IdaqXFqXzY^(+2sdWP+h}%Z^Y$4}DMnkQ`X@2NX*L zxSudNwL?n#8a7qQ)Li$}CAs=be~t;8EH(VH+_4TuCg2%k{zA`q{(>Y@CkBajHCpE3 zFpV(deHr^)B|O!ggd`X6KHZx%2C9V6@g8hg!y1%~1sf)FBKJ8MnYmK<`V(0jE|FFr zSF!Aq1-gpvrHhIsMuASA3DDs$p`s5^VpaC)aMVgO9Tp{e=WiQO&hPBrZrw*?8UJipf=&gSYApUpt0>_i5c8(Pp6kc$Uzgub21puKPWNq(Jb){V=Y za3ol(gOwH@fx}2+#)h%{D1}O+uUmGFo($2CIH@rN!`CV{_8YVCu|0hI!T01)Om8uu zx)VF=J1C%T#O?LOR`FdNX{>d`c@%ZVH_C>sTc2y$%N%EJBWZ2fpeymC0fL z9R=56vXr_mK(Te49RxA_$a#WYGg0FV2JPcQnK>;=h{wo%kRlUld_5E96Bm!QC1|ZI zpNh343HX9B*>&8`By=9eW!AGOILYTlxdfCH;T^Db@EpwDN1XI?9vSscDU~EU1E=hT z+w~xtL!<~Fz%5zfY46zY(zCmAm}D3QkmOj(522$=haLd(R2=tJTSbQiQkHqo#uKYW zR``q2B5>r&bkT$}7?uq<>D`0J`mp>VnNc6h^bYZH_)c`^Z$~kgNo9NF^m)-x7a3EB zBVYc;SHEmMRaBs$t%-%?pk$dwo&2{#VP8XARg11Zf5+e5o&Q|6p|WzH%uHGH{J&>C zA(hmyh*S1br&dX!ov{h+2IaV%nC<>E9v%yS;fv2aCL6Rw^C;QjJJyWHf6G|QrkED( zx1T2;meu$(8`uyRC^C6756(xjAcV*KU7QkX5?t5C#P1jvgI9Iw&#Ns_8pC_m1H|v3 z^(Kjs!*Qdl8ubDUQ{ci=Zzey%L8Dupgmb=D*JfsyKYYZ-U+%Shf3Bib@IyoK7WJrsN>Wo&numK!BZcC)5oW@@jTys2MDy zh3neiiEao|Hg6h1mA?KGc!P8ud-25^zC-N)enWjx;P(m$SF)h7kmLu=xe84*UOl}U z#e2(yUH}4Z<;o<9jv|WDL#X&7thOl}cdGL5?+l7%K7PZK`gt(v`i4A}wEd6M|CfCy z!3S*7G&Y#)?TDBN2E6b}(Hz(+bXW?g`np$XIGLqBuO{bPP5#=jKs4A!hsI>kpj;ip zfY)-`)z}H*2^!>MW8c*cnmMfh>&JRG07v(oBOp24|O;wFdgRwRcfQy8n z2c4rvD-MV^5}3kY4(|~ohe}3PMe7lW4sGAbozqH)v=gCT2Ml_m0iee8gpObs;^Fh5 zJb6R?o0hb!b84KOXJ5#|^c?6M3Hl(d@ys|2VBSQH?BK z=n{2rMP8hQZ=_1{DAs(Sv+S2Z7lBIT&W-rvU_4i^`)D_xkr^EVy(6~j%)fqT+fS0A z*2?U=)fK;4me3YyL&>p86}&>VT)}OHmogkT(_9{i!mptc5ZDcrLX7i?6D0_lM+7f0 zi_2fB1fc`Wgd}#TT>=&Qs!`BKY2C=Matm(f1juAf?UN#@2 z#mis&PoH`0Ykwr1@}wFl+n!kC9_7=^MMsVmfW));JNm$&h>w$|Tlm=^d@7zeWu!ey z5eixNCJKcrg9jkLLp|UQW1ALx3Lx_=CXf<1!?J~1FoU{6?-r-E6hwS4@f)7nYJ_!7 z_Pk}fWBCe=)~gFtP}wGK)?2=Va5shMfgqs93O!}wU~80s^D9XxmiNohV9G#_`B)8{ zj{d00+#GY;+48>wjU=ArlNohEj<+<7V>KGgft-0girjlilKojO?Y}|6iURyVji8hR zDRZ0A;`UIr{{2cZDHOW1Y}p=6!RJ3~4;3P}UsdV?zFbt-{I$=Woj++DhYmu1ipiK5 zi#k_M7klH%^09(h12K?!vc8XVX>mqfqKIHr0l*w7YHjaa*B@Q92_9NY$pN&X}TNJ&W0b629CcD#Q2;7X=E7d)Eg+ z5)5_z88up+Kv{3SDjXz~VVXu%hOaR@@@8-s&8YV!z^AK__IGmy;)T|0?th;kP=pYP z#JQCsCe#cpw0XdqC%ty;ICGes(1+O9$L9k+#(MdD+5mtfdyV}-`x_?fuRm{`1acjY zkyEcxZNQe*1A5h8dj6E-mfLrPxh4RrM^uXw{*QIZ7ZQUo6(2lJk9U?F3af zvx2zERFb2>An;GrhajiLKiOcF<`#ZqW|%)y)cuhq3SSn@W%e7!(qwsn;We)M;Cu78 z3%nU2%S96tpR&d9#D?ImlXQsiFT6&(WTWI%Kbt5`W5Pwx=N)(}o+#f~qG37vYYzz| zdY#3Uky5fv6&2;k^yb9;_n243zK;pm0aOaAv=O%Zm{q4}a{B-m^O6ve0c@kiK7Nm83cO zYtx;5A8i)v+<$GIe!~xF7$>W<;9t^GDrfkc*Dl3>WlttAgZC)#RE7s?AQAi?l8qO3 z73H$`Ia1ow9=^gvQ-YHy$iI&{#y{O4W-{js~m0U)ac_#N*=gcQX zcR(e=l-l4s2w=)|65oOYq7&WuA=HV34E1hc!`@HPO=o&}#8O+vBNv`|ozN z-T*$JQV7>zLGU{#duFi*`C1D|Kj-wPu(9rJZmr10OJ0}XnQHFqr#ns4T@~14#ecpJ z47m?{N|uH@VH!=MngLto!TMdF2s{?xiO4&uX@IPfK>;I0L#6qr$}Uc`!ORXRcRD7r zBV+ba)aP$IRrCI{@dXzGy}?Q1YhRZ)VgV|Q3J2RXE(kx~sG3eG$v^l$9$i(n&#JEK z3C0{{QG>y539b=i`#AzU zx*r9KnNg%{F5Qeir!wx790`EDzOG{GdZKjdxZVag!x zR0#kbI-6<8tMD2B@gMubW&^XUoPMYho$7oScz&qDN&`QN;l`=!w+{!GCBJLYr&WrH zX}#zc*)skXHXN-Lp%?Kv%Pb7zI-z?(*V(O+pBVUx{q@snOito@)2vF0xUA)tdgmvh zH^c8UFrR=it{*43-Vs11;d7X~hKdOWj&#P`uYz)GcircK&puR-`8yd`g#IuNCeDhN zct0FRUUMD2)9VOVD~pnB7A{t+EMdBWFft@j0%C^iYCBa!l{%8F#J>B+J#d@!@Qk}DLuszGYtgv61`>R{ZSmoX<_k_T&$9%qw}JRwhNHTzo3+>MBB2q zUrNp!)Z7^8slevwGM>{Z82Es2vcH#)7mc$jGbp^BGS7WPVh;#&^csOjTQ!DM!QIdD z!T0*%5$h$eUnoo@(pXItbCUUmNB9i|xVbru27Lo-9xM~X=ba;N>6%CAmuaK!oggyg zd0e*>-L1QpH=@DgqfS@%YFwQoa(xx#P6sG*X7y|}`NABBWo@y4&1L$#UEwEa40Qhy zdOZXhVBHAf+HhbI=UE&xJWcDY91t>-VgeS^<7OplG{2i~B)?t5X($_D8eb*MaWjAj z@{pNQ;X7swyBQRAkWp80Mahb`F>>bbic1>4#bgjhqSulo*hpV|6nY6C-oFkG$Qvt$Y)^}^z2v8ui8O|?I;>CMdxde0Yx4xnZrEXzb* z&hZ-@=fp0S2fSD4iPn3>Rl&IGEnDwRH;U%=d!6x2)_X*9>ph^1Vkuf1_J#8x+~Y@$ z@I9~$^UPuM`sv@AxXHwKc0AW+<{*ky@9YWv_Hd~QDOvpsBGdjC(?hMvVNv-!sT?wd z`R9dLD6%A9f&^CD?eYY2DjK0n-c<1BO%o?hrivBjGwI!S{K&ybo$O|z7t8a|0d=hC9` zAv9;mdLu5wm z?YQBR7hQQ`sqQ&9?E$2qE+6B50H zrFk zG9w^=&+qaJU-k)akjpIZu**QKK`xP6N?fh?sK{&TKE!Gka~jn518Z%P$KX+e)#Ic4HeoK4e9E-Uw&FSm6^p3!^uH)qpnxd5eO1GR!Mi2E zrhWKO4_}*qaOR}VYwNSf+yTZc)3p+Pgw`TTcJ-H6Z~4M@_X8|L6r-$wjrrcz1W~S! zRW18nVj_WFgTM6xpI#5&0eg@G4dnJKo!6|9if`EJusy2a{=O{iuXfmH=5n%=y8K-B z2{WFiXtMH+#)W)s&7-CRGBgX zXV!WKOuX_HIV@DKi~+bX8`!L-?A@oqj|#%Q1{t@Zq@>)aK^Sf6?5}I)I2}<-winhZ z0=%p!h?9g`ckH4pWM`(f6!2`dy^e4)L@^8E5RK4E2}4j>Dab4#6wRW3@9GCaL1di~ zyBG`ew`y#rR!B&ib&%Gn%H_4w<6}R4p4g}2Ns*?1bYwLz{K$CX0I-)eAaX%OF)9tK z<2=ajAKa6khNW39hI z{C$4)y8PL?ho#oSSYl4wB$gESKS@p~&cCU(gxsSUw(dEq&j*1~2!Y8MN2; z1hSoUHnUt@t8LX+l}^UhA(ZH(H|kV?No6PSl@inTw~|0T1k$VDQyHZbf|g0%8Iio6 zL%!q2c_Fk6`BJrnO0FgI`{uF`YQ>ZVh5%{GyOQP*-5?-*=*`K1`mQo4m~UA9)f*g% zBB!9^5ltj~pOytS#mvvpQ`8f=+f3O589WQICUEQJ=lUu`K^pFZE@`r_lTOD%=l-fE zZ`^oFzMa1Vv}B}mnyFFeFG4a*hdiudd?t0&8stT7H+E;N4!JJCSDib>77)pl0dxYH zzXL#7csNTnq=w-IWnSp3<1*PlA(brKJc5$eGd8Pgt4mHBOu03Cq=+orf*+le(%b?5 zglP4hx$%Ge_NJWia&Lyu%};f+7Eq=OYIysfZ#<@{8nr=W$&qOQ~j_uBUf9CE-r8S%J`)@ z>lL-PD~a6xYM9j9ayC{auduT4n@|(9tKl%Lr0=I_XdLkT^OG6oAbkPxbT|dow$@U1j;WA56x^*U(cs_C*OXi zLl36xFn>Dz_a)gUR?sLwTHH}0`dV^nhWUWS&g6oC%pam$1fSznz0}EwvB;73u!pBw zwFshP!UdN)r}K|Ua1?t2I*$hGIF1 zA6m|OoXSQW^vo6%v-$Bch2VjI~!r{kRsYPdY zv@Z+pAsmV{o{6=g3cVj&e%9GNlmo|oWoeg1B_^}ha(c?@&a<*xY##zZ_8g7xRZb{X za+4c^ZfSBe?4)i-xAJb|e5j=z>qL=;M@uu>XJ=l~Gsm1z{x;R6ohR&p5qA=O;r-J+*9>an~zaK2^!5Cs~ei zb8+bQC8xMY6$m7!`~}OQ3AdJmS+9nR+SjtFtMt(!Zz9Ag{gHA&Y}Gs?AjEvi`>ShC zm0yqIcN9zN>syRTpqdq3O70BJDv&d`>XiA~vzp6~a;|{+as8EpoLIzDM4t`#tONB9 z4|8`39wGm*kc@W?svA}Y4>Zm~?pgkmMt&FR!of>>{U`^q@R1pBko`1gMr!Tgv|^;d z6^_F4ybVKr7BE3RKNn$3KAuE-kqYep38~6(+ zn5{|BZ$TBwzmZfrxdU{S>-wS)Ckxd8%SK3P*f|3+sW@h9(cu^8i5G9&kcUyuP8+#W z@$&A@+UB~yfJFl$B;F39)Y&J&L1I3>hdi;4GcGU&d`^yiLmB(I1K=kl#Ekq8JalK% zQmAAeDNdaojXZL@Dm=`@`E6BWF`~3YT4+oBvOb#>@}$KQR50Eq^9ZV!w|DMYVUY0J zFa})R`o?}P3%#>LMK1YlxrQpXQdtN2-b#Xp6SH;Rmt92-Tl9BxYT4YaD@!#cJArwM zs3mOy3LOWQn-u;iQ-VCCodthGEctn%r(hKL>7$T@w$LiFptd@ZVb(Ya(QFE2TtF&t zWKSo1RQ1OXeiZMqB&K+)xv`7-BWwJnIulj~p#udN#C>`E##1rLk_DVr+H|McIa1Ny z1kRvQTUvDpnc;sI>j&Wyi*I+LB46!}UsPOoxjrBZtQGactoeh`%%TEjgnoD&URo9^ zvd9P#7ITAm3kWByU(&GDj*((0P@?oBl#j3{gh!on6p zMp6yRPs#>pXfTdgmzB%TwX&$f<7YvZyJ8Wr7du4le8XtiQiOV&jR_T7#lGv9rzI&J zDo;+x_ULGL`rQFoT)6<@{>5K1@y*;n(jL->kLuXkSd>-N}J zf6s>{&0lxs@o&E%+?G_kzHeLmkX)sZHqUkf(Ah*qN8j{4VSAgQubT*z;JTHV6ArPB zwy#|A4CT2%OdU`mVM;ES&w;UtH~O2*gQrk8YblE0=Ggst6K_|q%7Vzi)PvYJG`9Mj z4Qz)#(Cz7I7_!C~x+y-g=SK~i3x!;&pfweRWLjYC)I)2t z%5996uQQ9=0(rUo>*QDY1Wsp;j~}?Aa~x>0`3FcHwZ~fB88OSk&j1Uu^k4sRech*I z9vU(pSJV0IDp+n7#A`-@Ue{4V;Zf%Dk-5Uc-lA21oRmj5-iF zi!FO40lJ(p@i7z~6b{GQAqO=_Q8c7y7yUS>BS|Y`ABxwlMnV=C&Ya$CThTjp4KZ7s6$ruufJAiu_vc<{90A=mqLSzBR)TXe9B2{vwk-O;Gd5kS>I($$3WwUS$ zS1oX9rl0uBc%on6w$ZJ@m0KeOc?72J>!CbKmuelNkdYv5usI>5MWI#972n_fgCB`< z(wkHue@ETkM6=IsRrCL%J{5ou` z_))NQV5%=i%T05KXCZ%dM2tsv9w98g84wh*1Bwm>?l^8SrS+AyGqX77N-V5{>^s1Y`2`bGP4 zuR&rsyW%h30q67QS8rYZM!jyd$;8EpZ_EjYaX=9lhLj=gtbeyA17H5c7_y%-Rm$e? z`au$~Mz!#bGY9rL-8Ot0j1SV=dMo*=xwa8VpAFBo_7juCttlkCO|B~UqQC%|lYkn5 zMb6^(BL=mlkrwTV>fqO5Yd%Y61r)AmDzr8K$d_k&Xh@pm(EGx#%%s(m>2(cGZuNEi z>vpA>!R)e--fDK$;|GuXf@C$RD9)EJ_ZUjbVn97UR=2eTZT@FxQF3` zWyrd9-it|&qZ{Cms+Pj@s1RXTWi|6DrZfpQX@@x)H_i&;>8e_Jhy{d~U0tfirm_4A zyMoH-N+70DXOp4Pr_Q6y$`GtpM9&GxtU;k^27jc#5!foYggiogA~|FWwYrfMA3yU4 zKOSb1u7UJ?B%cVvv{Jp=%^rp_x#%|@e#f`t&gLdEM+HNZs!G*J){^q9ODruur(Q@? zd?ZCr#bY!sb^7j5Hr0ole{@Lk{ZwY!dCS+e6X&E)LBk-dqt2nFzXe=^v!rIvg%8F@ zE?bWt;ejPXo`#Z=ithOVAXmcBnN2fnmobJj7`_u;j}p-aBlrE``z5%wShm;t9^DCw z?hEuF;fMLecS}Q>9@z+dtKng4ReCDxM1}pTKKb~Q z&Sq!BGa|<_UourNkNs;FcDtY)a}S2T&83dY{nlR1$A9|kFUg#Sv`+Q2W4z7DM2enT zJVm93Ibr68a{sMD`i+4iW~WVPJ^k2nQ`6$|tW<-7|fNK-P($7R>`^SuZ#Sg`5Q z;#vT-tPY=cU0)(<7H=A7JT#LB?E`$um%ZGO>t~oGfTt?q*GT4)6W^Xu7fipuO$Tn_ zqcFW5+M2(5a#p0NbdghX4Xzt(qzkGBqM7HHpUJ2-N>j6UPBOEjIKlnZ?0j>X2(jKRTIJQ~TNvM{Wa}N1n`qxH8HTtROxSRRejfOVhYQg!k9mSe&aV4^fA=~6 zlM`U1L|ndYR!z~j?KJbvCiJN*wn`80%P^+A00sz;8mn5Lf99J`-3ZJ||HOBD)ljF| zBV;@@?3FyS)`^^Uen6#wN4#wm-fkst8`ZA-+H3?N^o}DA$*)_Q3Th3I{j3I-RtEq< z7&Ylrm@=SR)0(8aesD+>bkNxUo{xjLUBO*b=N`)QxU3Bu4 z2j)~pDVF&!i*o)o;xnX5g@b|@_2-{yUjO`!aoiNXO)kHG^evrH?Ksyk-L&(<2zEw| zc@)LiOfgk%r-#U;s_z$LY>>z3Kls*tH(<9K=hzrzM;cf$EZk?FdCWJ4 zU#=F2Ty*tNLKCLKnr*0rYfv;;R!PTP#;()=G@ok7k!Ghe&`cKt!ujlYxix>S?iC)^ zt!-J>5(!)~*Um5;g%5tz*0Q%at$Ptv*oUqSFSb~|?;fy}Dnu~uE^96zD=`oaq#jh! zYd-v;5*Qg^s?)f?Gt2JH10tm_vqxvNC)7G63A`-rJH$o$;76)0tymSl!v3!!mcbMWU_Ct^N?P`j$TZQ;w4Kd|GX(|$NH||mLK`Z z-8m!+Cr_EvGVd1NhqIO{sw>y&60(V_;Fo1g^RR2DD(Xo^pf272OwQ1{UC0-O$OWE{udRY~4YIP8p z5Ru*`Kxl=f5ILcda_|8YZP0N&&UrLbCTxg)+iaOB38>R?I6gV+tx8iot)I^Xyrl6> z--ah|{GJSO`7`4${=!>aMLkF>omWuuD*l63_>~qS1iTofCVco_QF$ySn++pXoC$*S z?+jDW$+zF`Vm;F~omG8)h(C+RX2HaxxAFAI6l+tow4!4zRaipc?h;Hh0qg_v`-Dn) zuM`f}Ec3fUj7YQjPvke88JSVSgh}5g;BHV({}sOJZZ$jVI+w+pmLm_mcDq51Z-3bQ zH|oAZJP#EYX^vA1>Y)!rMcJ>v@U=hnbtfln0-yRVuv8wV=r6cS^VBBw+f-Isr477M z+*c6#{vb00T2;h!trA>yqSuY-s$8`C3lrsLke~(W!^^~==F}ihEY!AZ2^=j@tk;Ck zoyTo6{39-LQ^=e;fRs$LSEAAChYrE-ZdH;0ytY+a_ut?hgColtt-y}9??h!tVMBi> zLy=GrGuk10ml^2*APE&jGyk(3=HbV+I|n{gZBa0lK}pzU2!a@MQ7pFS&n$3M6)kaw;O;|MWAc%h?Pc7uI)G$rRs(Nn%h;|J?%9|Fn>~<**-Cn&xix zS_`mb%9$_q1Z05aIvXNAs>T23>;GhZmp_Gdd@OHUPGFyVvR$KIku1Exwq+pSStlqB zSB7LA6JnlWwL|i<2{^ndj+(I(>>C>QX(;e}mbll^0^i^bO+nN6i`9!UVVS`|bZiPk z2t2J6{3d86$7^mGJzhiukU^*I+OswPC&fGcq9UEbt!Li(qQ@O(?NoI+BEVSHstn>| zqaT?cy3wkYFkHj}{1shLl+H)V*cir+^7y~M`Y*D8WpJ_s`HX&*F|Z2F~e!OPT z9zSqOeuWpz-dwbH$Egm0M(!l(F$Sactui2BTo``6iZUtFBC%>5?Bs+zVeX_a;^BAb zzR-!LWxxN591@x>+FeoLjD|W?*UOCoco~hBqf*G`^D(7qZ~l(IgyPTUI+vmAAZbj) z^W|GhGvP3ep-KU6sQDWd{6|Q0W{D}PL#ISYS~nEfwd2OAzXLl!PlQT{U{t57cW$Ss z%RrwxgV&<}gaX8Z=YJNqBBz2oIY_o;lah_+r;U65`YqUi|NG&0S{Kd$u0ctr9if2> znUdSY+4xVeaenE%=%T2h)}DgI@_nc3OIBzoCpqK~LOA#}ti-nbWzqvKF)nr?MamnR zZD*e9!k~o(Vn~VK8vPtlK;tABBOA5T8z>)G_||Z>${O!l2TpZ~?KGk<4B`?^bf5tv z(>6_xCLtnk#{#4ZFn7$iVg7RhbWVPUJ?lXf;XJD-GRifJpRH|euBhg0C0oI$pgo6m zLHEiJbR!B~(sfvJ{;yN5=|7S${KE3cC6`ZW^FjpRa)xq=o6% zo{$X#@ZXsRHMYnFsmA|H4k|%J#_i19GaJ+l0P5+=)K}0s)NhWbsb{!5C#?r>1jeC- z&(!~XL`H0RFHNzg$?qbPTP2{Pl z5^7-w6b5&U+})O)VPzO){!$9P`Gm3DnpNm8i#qc@Av~8BJ8KOZ(%ogN{ z-V~exWK$KsBE&xcd#8udY!Jo~+3&o`U(8_E$XM=&odnN|rOGqgHErO~?i~A~a9RL` z^^I8j6My3tfON_y(sHV zW$~E70f<(il-B&+lZVYV+*6T6>ZV39JF!ZBTxG5T&udmD5tyA?D zGE?=vMlA|%i0m*?YrhZR5OT#Zz?^LJRSK zo>){_eTfc-a46YCv=x*EYvt{4XM@_E-aQwfd`v|+h)ENH@%JN;DlOZ!4CaIjS5OZU zOAnoVgjhK-fSjYt8!W|6-hU3*+24fKR>)&&3p5nb1|G}eutCBH`!!8kqr-8L3x@~R zWSqQg6R{_E`qr#{q$YVbt9(WsL){9E5O5_3BB!-Fybk(0yByE7=otpdl)$nOjs=Nu zdxn#saW68Cl0EEiPH9fgmAaZvI2l16Q&nQ|yv$G{WS*D`WmV8E(o$q2wy_g+QeYCP z`>X-arA9LfuW)J*&PDi7kc59#)LAw)f^JLl0(e^-6S;K}0AB#Tm4{%hkVPqT-=%@; zN%W?H?}~i^F+tNwX~}v=&vIXU-)?^8-(USJA!tM@Au=YIVa_avRMWAZJW4PH@sXff zsS{!$I*6Mub2EG)s+L}~t7gC#S}jF;5J-Rz;_;4?Ud|nP>q+UBHKTU*X7OaB9nuL8 z3)-wRC4jq@@`$(C$!&GJCd2Hf90y^4$P<6^+1kE;K_MMt6>|QBFMb7>Av`(oDuSl6 zi);l-WRtw1h>u=$4&}U;H}dedzrBLm*b3o8_`Tl8AN6rjvIO&@ad=N|7BEnYx)t{q zTG0YnYz2!_}re|MZ(=J@)@FbMz&l&#c>js@9hI4MVUAfqk)fyV3Y0JZ8Rksacb~Jt zkEI!dr@g`aZDm`no?RNhB1~vSgN7RolJk6I(-UQ@QVUZjKYm!lTTjPEp`C40pg>a$2zv2e54gZ7$QwFS$w~Ee~T7NozN&P&6LvLnuK%tH@xa|pV!s*3S6)8 zVvLi}=r9f?iwnEy*Z#w*T_B)fcA*{6y)+}zS3skiQSEDGis&r4=zhI3ooTp>w7t&5 z&x97a3f8lw*uB5E6O<8Q@(nGej$+;TPbAgXT#9@RzEV- zC95Pt#s-u07oPNv0wC~}KbAA>46Xeqd`Y%CC4J`_qzq4dmju1o5k!dw{f`Q4T}mOP z(3cE&XEMWm7VLuofep)oh|yT8LL5f7r%h)J@`vdx46MGoFLFD%g1M!fw>-01)KY3t zv4Lokuna&!>e)xP>jq31$r*YelNDMZLtA~NxWjS=^(~BNdBGW)zT5$zc0~~4+VAq- z<*)Q1DhFsqFPgx@ebl_cNxXbNHVjam#XpE6A1{^}>VCs;o>ww5lPE z8b<9TeWzZN(<7#G-07={(WZ}s&l(=mHAPBRet>P|A7>~XP?vw?y_1Xi2R`M&`6wzU zn66|#WbC}x${<5ANN&F{TJQY?*Jry7X+4o_~qFHx`HVQsT{$%_#P(FNK&6$PlP*tdBcogu> z$&Fq~4re(A%q%PQ?8{i*U#%Ui$|WK@)FM|wxYVyYxKsCa4(2~~L#ux(qju*%pB?s& zWSrU&M~c29yW1trLViQT=mA!!taOV^zqaTkSKqPg%t`(^SG%bCWr;K9#rUnfk<{pE zmx`jYdMYmxbd@O}!SqayXOwkRS@2T`hwMW11{|^cU%-N&q66Dga>TGFm@94I0=fa* zE|?22IMyyJ!>-S|Dpr1I0-Wclkt_87?q+R(IOFRsbnN~d-w&EvzQr}D?t&~_DhQ9 z6-EZRw0f6!D1axY)<_KvNJHI$2cya&l+8>vuHO z9drQTQgMzEl%&`$ZNY#-`L7Z&Xg$r&y>0_t)~)TA9{Eq`q;51HMnueo$fYB8pf*=5NiOT)YZir7akw-KJpm??&9nt*#iD_-P>5tv;}5&r7fO-^ zB3~#e;Vyo4`Js?9fQs{&o%Q*XbD)5;(!%A>xW6Cz$t@6;gb4mZMr)tLQ~k3QATtgf ziqPAjYGpRK=V6*T(#&aIRP2?%fzS99(alvWSm@q4#B0GvMZ_R{$0OX6@*cbR=yj1~ zZ2h^I7tusnD@6gd6ZEOow~F`-jHf84$RatB;=~EZ=ZX>gokJ!bb#jwWN*XS+u;;bL zSnbeSSo=277d6-4Z}6X-@n|L?~s1tv$U(X<%GtE{Z6n>!s_bTC-#X^A(EP>_o%8ClAF)B$Ycw$ zWipt3{-V^O-lf#j_x+WSAJ&0v&E-Gi5V3mIc#T}X9aSf?BI|QtWyy2R4lKm*{3V5P z7?+btZL_nov0#stG*N&k$sb@s#HpSaOaapd`xH-lwFijFaOr=vhVRe@+?-u%1?l>1*bR9 zzAu6ADCVapcbnqSO3R!iAy;`GohY;qD{|0QfLiiVki{CJ1mJh}3fo9=-dJ))jV)7S zjR(oA@cXN*AVZb|iUEFl`S#^cg&fF7A5o~5)OTlC=@xK~{uAr6RMZAdXol;$m`-E& zAG_hdXLoA#<;_i*(mDWo-*zf-ThFcRH#n${;3J~eBE${J1aH!C=NW~VhCY_d=a~l00{Os9sqw4 zTDx#-lu@$@WCSpC)ikoN=_y_U0T*R*J}LZ6rq058d=N7EXxECwl})y7ie!>RuE!MB zSE{U8G)k7;ZM5SU_(7d8O*Z8Tlt&RHd@$++Jm5{L(wY`0F6ONCG)Fb^Uea)#n%%xZcm#5SEHI}0UX^k zK^vaV8Mc=wp*7#)WKj7$s2t`0ynp|utQq9OBWeZL!Xl7rRd&xqC*GwGP%AdkvPa1N zRRKW&c9GZUG>ha7yKVk%p)=q=e-kHuXd&#uK8YA55*{{it$O%P=CLQ9xFMH6p(e># zDNNL6V-&99uYLW=8HpW6J3@G;n*f@j<-bC*BJ_3ze6jpXk z2$aG_p*Jzv`m{6Z`u5{H^?$DJulkaZbVNk_?(g0av0|;yEPUtVLpS|SH~e15z0j^3 zlqvM5cr6NOI1cD59h-sLTWI*r^TJt5YhysFE=l zy(d;{&FT+`fF~N;q$@4*J3~!Yw^n>V7=62;3HF#S1u_fnG$9K&N$v7y_HZ|HcyMSx5s;F;aAvT&EgxnVemdJx(ZP})g_Btw4b9Bi=l}(XU0^_@ zEp9G8qqw zsF1Zuse)Hj4`1^|rJF$qADJ?nW`GYNPz%Nb_ffTiT&v*uEEDeox~II;3^_z+(IOLt z4^@Y_!zbv1=+`oN6izL@Z&7>Iih+pqtqPTN+q%SAfVhjwLfkJc>3K9G%bGhYt4L}+ z{RVvL6l_SMTPX5&PAWX>G)d(S;gDL`ps+3+&azuX)yX!_+84!@oTgA8$H5pW_Yi6p zBwAcuJV3;#L(F48#*#h?xjQG0L}rRy97-7&)G7#UuL^lVHVUjw&JqP!&AA#uZOaDoConhA1IFQO_Z+_Q* zXqCo7ML_g&i2fCQoqG|$H$P*11NNvWRf&o|q2xvuq|9KifhR?%4T&X$L=YLTM_6Ud z=r#EkzC1xV#U{dSXXD^=3~qJ8xT1cB6l=0beK1%Kj|{Y|w%p5QF)U11Fo0w(=X_cQ zKPV%0N7Q`tEx~fmO1l{h*?GzKAn4@?=%a>1&8K*NLcTOp^pPy<;Y+$?>tjEOL_wxO zcmRWpKF2PDdbn?WLn#l)#N};RVx%q0fJhXo0E8H~R>J(kf6sSlUq)XO^EoRn2!F68Bl~^d3$tUjrYH zCsA+<%PtjF=KpI*`Cz6Y>*pJqP(K8P+@qVQ3*Cb#scLbaP?WigQqN>WgQ4)GpjiIS z*%zJdd}eSq=Hh#Caefj!m^xG;5K#3EZG#&@bz8$yr&=IBZf$%bvfI3|GfK@NH~ekv z1FgHAhVkBy!+vUYAvmW4EcV*;8k!V| zHRAe>f8tr3wtlknU#QTTo&ovPo3UR2gAV7P1VxTg<)=7tEXIHujZcH&^0JyaO#L}N zuN=!30qn}I*A-6S5evh*1k2;wl+uYP8)Ly6EoRq=>p8OIm`b%a%VKB{_;6~Yf)#!8 zK*z;O80U61lI_`>1^jz9k3cuEzhL(%zrhKE-_zGj8_N)5HlG=aR|vX=m^H>0}-0&;C^q4$IiQUgoB<224x`Czp+ZKg; za^bM~&yO@rf!g~PwnmER`pfn(E_D}Di4*;U*@+ppF*#L>>)~s@57p^|ANwp=(H23p zI1p1I;}Tb=^UqsdT@ywX98y&Kg<_GxQYZ8>SDaZpbxn{>xcnD0e4O$P&Qqwn+t(S2 z@``rVPYncP`e)e-b=rII7yftHkH#sugK%*i68t;ZW^zN#-69{`v%=eNG0qcsLfjILqTh}cU`~Vk3u$7;?U5S!2}DM$ zADdKfu9r#nO}RLK_?iEE3s|1a=kjkaKk=_nsTBQ4#!f@abWzE|JU~u|mV~NUjSYeS z-+$q$7vebrG-{Gt5{4A2X2B@yNmmcgmU>y+WR|-cZ;c~~c(dKkTzqoo`K6YuL zBA%@Ms(oFc#;K3CvD04gFQ#(Clh&nEZ|O9!ts3m2c+% zYL%>&w5>&OZX3ag?vZ&eTRSjUU#x5s;5kwdLUQut7xV9l%FLk+C+1JeTGDoH$7!Du zT0{)wP)rTDdc)X>5Hen5(%SeVwIqbrwg_eU$1^q%0F&(ap84*2(T<9F)v>U-`7+b3 zE#DlR07=N{99%vCpQ&Na@KBljZ3I)Y1gB7kfIpI2DHI2m zwWr-E#`9nJ1UN3yeQT#ylSELbyU7sAf|4m5oCRy(Q-55@1&b(SmX{@u z;-)JN9E%1`qmKg-tD@+|{G$jFvp`axmAaC<@N;mA;s3N}5TYTJvDJ}zA@KP6V5{!u z0R>{;I{i>aX8sGAC>M+Vw*D-?q-~^p+AF+bY;DSX5kgq1noafSz&qPuw)}N?;Cfo~ zI_1)cIOK)T&HU{6uuf7Dz_3F_G@+dHw@A|FDR7ypv=(?H?3WcVyt0OkB1hfg=b085 z3_!OY@`NLMFRwYeFv9nMY1!3=EVw;6Qu?3+}_fdnTIRf#z{4 zn?Q4`|Bt4d08(Zl)GYP%st|>@?(BeA@!wzqYRK)T;DltPwWV#X@w8lFaXx zcWy9zg+RR$Y$UgI-`sRAo7LhGim+arurQNfY+T%nU3OE2_3d{v!cy4T*Y5q4DZ}(m zJCtR()omHcjk58Kn&-)I;T!Dw42Pj5UJiEDtiEOnKD2)$lM?0+GxOjTLl@^0FRsYr z%8r3uLP-=!Y@7 zp*aa^4Qt@O!&PLyhNrR+G)rE6Of1vxGdHoXct{jA*9|7UxRZP#j4a!hDaj)y$<(r7 zV|kFVmB!Eawe>&G!z@i%+^+EIFlQ4SkMG~#MtnNhA;!kkyY+wc7Dksayvu{7LUAF{ zg2kp4T+&hhdAHB96y#jg(mm`!s>xlq(#((%GmU+1^wZAet#X8H#9eHf1k=;}&P|FH zR#Cv@8`^B_GpXLMXMI+13Z))CIP%cQBc^R{@!L$8Pob#1J&Nr48!pTFN?ADFIOqyw zBj(wS*G&_<*$KAf^RK$;E*q0W9;fxK#y^ys+vI1M@-X{vQsQY#taT9a?S8gWX^GgTbAv;B| zdLw%V5@%`TBafhC#}d?!A?lag6x#B|yYBU`t{y4ucD|&woqb@99OU!SCqHJkqdRB< zdT1s$%omol+tD~EhUaIZnfzzzvDP^A+v{rAQ;;Roc@E4g<}>^a?n08E_?E#Jv5K9N z{pGsWO)mb|{BlLR+grp?W!lV{{>o*EYqQ&QPR;NZaz4S3h2=gugBI3B&m#ekuML%B zbCD3*#x+UiO%@YE*er9hHiTZum!7BR5<%-n+A}qu(z5Wj#h&3XVwT#$&~)r6Qk@rq zZrN3?kXr+ys^QstV|kwG#J`ZtI$bhsT*$heC1e@L;vxLbkv zrVk)CYdLEbTqbt^0<=%aO9tn%WZ3+^1*L9iPEsxcMhksA<0Lnu>3OS%2|T0sTxQ)a zCFh0M>pER*k*9Zes~k3NBFQPuf`4N{p`mVVm*L#Ha#L%UsTgzrtJ*hUKy6S)2mIh} zfu;Xc^@tU~#ctp5LRzK&-@??9y&*f)=C&S;)xxU_YhjZ{Sa z6@0+Y&~u;0=XS!sUTtHSjsA2VxO*xj;Qdk&Uu3RAQ-`^Z|GlyF;o&Fk%a z621;nnK3!Q_C|&VY=vN_^JcR8vt6TyrLCKN=~)-H&eoU})2r6oMcK|ZAX8)F zZX3HdZwix;G)#vwB21e;t!0*P{<}kqIF~}f2j*tnjqp*!)(%G`Da7GZ!)JZf+`tKfi!Q_E=_@>U!OU5A`U`rtf-iQ3n~BG$3Oh;BABO#n$xmHK z6!z6NP5#eYc!w#ITeaV|RO>wO)fR#?nZYJKd~liMD|>NodTM?0rkf3o@5pD2O#Pf6 z^W*7L<`Da9dKqFd@i7zz5gQ6yS2G78F9=J)>}X?7t3m;mOG_~Jp7Iejnx=76Ub+-7CE(`-Wpi){*?3m(P2gkj(bj$b&R z3=1y5d9!9OF*3M>+>fd)nabo+?O`rQ!pVDCGS_Jh(UFJqX%zXsTL@luyAb<&_Rt*} zs^71g@Wu{2|97bCq z$-oSP#;k^PJ*lrLMRRT)mk-ErING00GI2Tk>JYG75Y#*tm&De?#|^E~j?CzpH>11U z)QIM1_}KL~2X<&BLy&J1k|uke;!)=fX461#FixNCAFIAt7|0)>^_UAA>DT?Ea*0iu z7s2hzv>r}cUn{m<2@a6!QF5jLbEooKc}8a3kf*D(l9aD0NBxu0(!Z3Fmw(wvDLy(w zlI|Y1Q)T|r5}T0FwP-AbJrOE{dU0%KUbCUo=KfiNdF^k`bgQ{!rStj4vzBRxhdO1+t6TcB7#6q z!=#I_&)V!ZQA?SI*6il=wpHZ44trPm@PALwHU{0EA1KBem*><|^2VD#?VE&KD?-9m zCtaR9+`i@32S^5kuoRDkHJJSBj2X}8BV__6Lgd&=7BKq~|pd#QKhV3gH6n%fkc)4j9X)&ir)i3Z#TSbCa(N7Lt}ww2?c}uozA_LaKa`9PwQ@ z=POEY!|v1ms`>&0295Wf@#h>S8%-gwkydrC{Md@kTUtBp1_&$K=#l*gsUX6#&X&ln z(GK&nE=7bjBHcQbAjFLMR-4ZcKSdXO!gZ16kBoHLVFTOEIy`QtN0MJ*eqi~P1m;?y zncLkOvgMW;nhGB>rC&$< zb;wjfjoErnnvY92v`3#{P{pj3~Y2xb`=|p;HHuZ{4v_XIW_UI zgkbC3>^o-OKQqL+eDeA~qYD|gGcPawM<5AHTzmm29fKF!uP^M_w1!2p?Us0RrUt;K zYtIKb;%MH&E)nf)sAyV*b&Kn!auJ!2eYJI!9doa34Jj~r$@Cr>e2Z{P;*RioYh>TB z_McolXS(Ntg>D&ve+I9l9nZ~hhN#9mMOt57oW6qg427JFSjAU&cZg|;_+EHjjf|{ zItxKf1LGurY8 zpZ^^%VOJ$1wS>wcqwTiCC4gaS3fR-keVJvj+Ga{ULt*jpopbNBUGl8!bI@MFEUhZm zb|p=2-tE!{6lN{Em?L3I%g~8wn-Fw>675nG6WqZNy5Lc|zL3>F!f4jbrG-tK zk;7w!owuhF09Zb8J4sPU)j1y^J~nK3dWNoN-4wm4;l?l>3T3H+rZalj{j*22NZe$B z>WUSfe{a0$KEsf+8Sxof6X#hNeNSsIq!gRQ=JeKdBSS{(YV-4!_0<6l?%KQE;&A`j z>JcruphTq3g2Z+dpF2;T0`_E|xxf_DZ6f5`vwEbE0A@|TLd%(n{!{JUnz$+5YbZu8 zPJcDmO`b~1r^3_W>6IFJP{!^Jb|P?fdl%Ck*7hK+5uPPC>E}P(-F}bZqiOjZNr6+H zLpBex_jPGqcDLLXSl6MF?JHK;7Q}pQwSku_SbF3&JGoN3^b(g)MU|Ml_HOi#X`gAZ zrk$q1`MD+!#y6y9Kw_Oec`erS8oFg)&(_WAp})7UFp-VV*YXF^Il-;y7Qzmn^MV3_ zXV43VJ;eCKhU#Jei9fjEv%Y>V@WTo)Bk@+7`;vvI3hyAz$x81$>#7+%n6TX)_CGM02J?+N|qsY6Sq9f7-f=>GzIJ zP08jX0lH@Msj1wa!>0^&+`M+do?IQ4jA6e>k1BTitZ(QUt62BR)>GzXY^B<|Olx5X zr}1^dRx@qieRpAnL++C}>Eb{}wi&gJe;RFLyEPZ|?!OMnF%Eh(;>#UQ$3}MA&lJ}t zx!k{Y4F~b)BsO-7GE5EfEDP2#%t#q;obyHn-#Dt4)iPl>Hp;@ZT9+cbW-z<&MX!U| zVABFV+S?dWxl1SyXEtS@H+R8TOjT#QX@CDpcFeOT{2ZbqJ6c)RcL0o}BS9JSLs8O@rTGf32K<81RyszcE4^-cCTc%8cr%@<7jHAt-?>&*5} zk+HqIkV%y|5ou}~l-(?t(hf>4cTt@7BxaX4d2m|TV8PRdA5Km9GVb!vw&DUeMc6<9 znSW4}ZLmGx#kSB#TM4v%_Wq#qJz-cG* z7q=NRW%5N}>Hf+!+Hd>bKk;qp0>I3#FjL&9-hYTKEii2Cc-QC7v46u9bu%U*|H1Ty z3+MR{Q}Hi4A>o(H-LEeF-S>yu>d@w)>K}i2s(bW|8?JyXKCFfKCNI5OIli z^FQP*#BW)z%{sDlhE+eLQ1T7KqidP0QYg|d-e{Mst|L7SQ~bVOteV8)Hcd2>u%C8o z0ZauSx_PtV*S&Fe=k5NiV}tKX+E?2-_?FJKoBT6tzTs)X&MciabNf7Uz7-LGo#w0}V95Lc)#2u6 z@Q8b=u5H!88EGvrz^r_}%EpW$?QeYX-g|wqXz4tE zkfC#jn^LnQ#5r!L+b$~t2{4V|GAB8!#JM&-n>q36(nfS#v3@L!=AD;+Si|bI?B(qX zle0Z0sjm{(YZz4%wC;I?zs%6UnXy|z%=4HG=M?w#>}b=PR>0NCX`1y65 zHrD6*oCj0>p0)qz>;Cu7@|Mdd*9o+Q|7=T}5+mRpH&^Ah`*5ikUP!(>U8sYfM|ChzDBm;vw_u75FN9s)q`pG_1XF6F896 z*P36slmT9_?kZ+>goms+&v5yk{4eLun6`w#B!9_^mNOS*$Vd0dw1$6kH!}|ArVAJ< z0;(ks)fZgI)SR>QWiT<&$1kr%G+z&922!jv1+G}Q3E!X6&Cpzq{1 zVGq6a?X;VrNo&taw=0Wz0Zv1kP=nv&I9>1h3*&N}?;PP+2rhqim>OC_*JSn3txgNA zSHR$;&6k&F2}b+G#^1{1_t=@&fy}QzgP;H+L2SWacP@)qG=K!N0hO6b*(-4A(TxyyQ+#$=2ZP zajxCqm73N;{v7{qnv(v>|7KpuL4kF{9z0mR;$$+J2J?!dlV?EYAeVWL9{*adXLZ=n zXKX1*Z8SG|?^y+4C_&6S3o+zaWUpDvrg^nZUa)P&vtbOiWqqft;Oiy*M8)Ik;e+lt~=MT(%pPx zsy5D`Hh*+8<+$72PE=JmArSM;(QZwha_~x5A?GFt(+_cq5zp1Q?opGDt zdhyTqH8b}Y6EThgv%?w|XJLBTmy%2-*uygy&hY97H@()%;QqCF3g_NtDzpK|u4(mE ztxMTgNgAp8TxT&iBiT6ZcnI6)%&?W!mL8c>(1PHmLiU_H+?Y35DRcsMeHlO4s6|ii zf*cFWNsr@+>%0Ubu`vMLWCWd4hc%XF-|9^{-oSu(wH+_#%aVg~>*==JsiWMgr|pli z46Vp)#5-MBN#mRt!7y)Sn{3~b)k9<{Hbf>*r&3)ldfwFwNq1AkhHzNd6>@6fAh8lGn^G?8g_c?s+SC}A3x_813m_`rtrhH>g z_4Ps;zGXd2t@W3$TD2Ux#Qk0h3$LE1AWq6Wn$BPIZj7M9-lJU%9ot%M;(%@z!RDrc z7oj#}=5fd%N%CaGQ z>@>uJKhvb7ws0F_`Znja@V70=tUKqnENL+sC7;r0p}D+NVYY(?_0e4Z^-xQGwqKFS zhqALSZu&pUZMKA^ptr19^V4Df^u+{2<|-^7gG!2)wrXKER;0#Cu(#WP-P8l8G11#z z1N6A<0IYz!e2bXbW|7$DOZ)uO#BCNVf~hqUzJk?5-5UH;?3UR^7N%UL5Xh9oc{2;X zs+N4ksu$xged$Yvw;)riJf*zd?XD#@Ze2>|kb+8LqAcXjPwzTvUyJmm@QKy5Ym{Pw*E~`h;9+Zeg%tHij{T z5;$%bzw!&Vgw)Hv~=|aQpO^y=1#e>Ue%ehM`jR@@u>q7uy-QY+52;PAb#BjTl}Y7#W2-5dKGh=qj!RoTmDpRC zvayqusT0|y7?-v$GYmbilVBRA2dpo#yA|c(zG9x6%CybGHS@Pw_q4G$2EICPCw=`h zp1yaZzD@0N#cK+(kN^@F8tZw98qVukW!TO+mEk@EM-#iSY_s+HY>$-YubFFM+T3=#KX{M4ROE=NP1#jVM z>oMzDYd0^XFfM|8(0qIv-f1=A9U+rsE33aer&R}d$dH3nCEbN|+EkXDWnRzZE5~dI zlp~Q-i>46>NXJDVHi+vAaT4p+bgC0`o;d4BB9#CvE{k&-cUinsNC3==MjOx7Q5`TP zpUS-I>*ZG3i+^T)#s3)+&)0Sg>@Rsp5n&jH=UI`m$G(x3v4z&jHwdY|U=%;ga4_AS zXKB3+lOe3v%}ROxU8OMr-(R%C4j9;5({^KRY@`zAzPfUsqbX4?-mjV_Zr#Y3)K{)^ zW9y{{yz=_mul#|Lg4vh`o<)aQ>IY3S@a7}xY@KwnA6x0|eSJ!`qMmi;%} zsKc$H@ZZ2}wn9DZHtDmPft2fo`2IZ9(eT-Ze7(KrD?WbbBEw8y#W`pSHZOG6t!uiA zVMoL)F?g`k%M9Cye=vD@rnM~bA6kS38)iMpmZ2dMgI=)?}w5f%0nn}!qL6taZ@ zuejxg-}mK3U!f)q{fW+7&jKH7I=b9ah8T?MHnxh?5Mz1!@({&deStL!VW~>bTB>X7 z>#}|jnzlUH{GBpC$h@~!i8RXPvTr_@;n5o~&JlzxU_|&Tv*=~ne}90x8p@JtLNsW zfA17+J%TY6@aMUS?JjGe zPNh48Uy?-Gbgi`EGSl+;8^Sj{6)ExjCAWJju`%j=+M8s# z=Kd|4?BxCi%=&iCd){=@U50wg$-%8T3O5N(QZfng`qr@P7i-X=-z-2EgSd$vAtq99@Nr|VZk(94A z>#83A-gcIM_&>KS*Hk+ZFV-naeQsrFZmM^O@d0pmX>pC{tv%L>$-rj4UDkOwBbN}s zjoo=Y^@jSLR!ciV$D56?pmWDqwY?2vIr9RN?=jdc1gSCoRfu$- z1trumOwA+94}eF|X8D-ay{@%tvjkmF9$~VrPh+heyyjaMx3?FQl`bv1Va~1|JBHod zs!Gzb_(6QG!Jyh7f6o+rf6 zKR-YBgKH+{M)v%^$!H{Jd1BC;vgVWLZY6=21Oh&7lCXRjqfrYVoANLFr(2d4*5@F0 zCx%}e*<(IAm7~;#b~C^ZGlTbi_Fi9hUcQ8AYR_W?4H7u#ugD6E8^K@kC^PY<)!lBz zg*0~a(Q>8~bLdIQS8wRZ@;}zVu2Xc=+?IvDQdmSb0A)5QqrDXH;Z|Fh6>eREDb*b_ zY!Y6Voq=h4Q^Bv&KBibMJ04N4rakg#4cvq>kocktHAJ!d;;kk!-YA{hn09INW41;3 z=@=Dnnt@<3Mlg?cuoT0VgcxDli{>4e;sqJi6Wijoz~+!piqeq30t4PowY$PGBsTS0 z#fBV&V|v0=2V7z((_E}HEiP2V>!#%#(`Wh*Z)j6&6`SLj$}Do@2lG8Q#3?3hf{s&} zhSk8g%`Z$NRnM{z#1aH6*7oRZklwyD_ilN*yLFnTDX~ednt?(4nb@=Q`7=m&;|q;3 z-%iM`_<0jPw$6=y1U*|5l@NHb&41S2x!#62b@#ACiXkLx@Nk^^kMQFeOk>%}Ul!J_ zX>-fe#Xh&NS+=iI_1o+1&~ju8LWWW5+if1fSYa9pFP0R`dX_L-PY+HJtZmaG(4SqN zY-i|Zfo&e#yhkW>b{N0Tf96}iTCo*lawY8l(zU6u5SfKftfx0$cQXtG3i)c{JF%19 zZhtBI9oq>w_mZnQM<)BsfVb@)+`W0IsaFAcmMu@A)txSPooNtwsyKp}g&f-068Cc% z>K9+V>1O|48gGG?mJ$sVEL6g0TD7RDWfoy;Ry)jEo{ztbYFpxV>@?x4&hzfSavOdl z>k7vt*r=gbE)!a5K!K+9RG)5RiLqUYv$}mfJ);SHzqT#JM7(y!x0cg$?rBFtTFG13gyFxMoxAD5wxQt2Q zQbReXuL?3mSG|JFe&*XuPhvGIE}5kzvQR$0&$Np%AU3A3yoR9?WinNp$La{r87=rH zSDAKWdmlHcoUbn)K4ttjE+ykS_A@3h&g-uZn-th}pDt6M?I(#gDeOYH9J_;^x4Psu zXxXG;3z=%gUDapSUh4N^>~YgNJ zwH-9x){!TuwkT{N$m6qC*-jtQGBIcBF~@D-itoXf`nGj$ znW8l|yK#DT+ej;DVRKd>GKF>Jgl!NLZ0O=vZV~G08*W;j@!eOD5-wq5tV5o^kXh^N znMZ`WTcT~)V0u1-?mMWV{dVr7efiL1o*nPI@9X#8Wq!B3i|`|BAhB-6?VIguj-SO1 zoqa(zF?R5;Vui&T*)T(AMJx(Wo*+flkm$&HNPX3A#|UH`SFl$w(>!Icm)7I_@BW63 z%eaL=*pMz|bDNEa?q2P}DE!;`wlgy&59}6=DEpUl*UhQo={FQ0bp~zgDR*hB)-m(+ z{IEH9^_j5Q1$Ty?+^DG>mlV-{!$sxcIrOibSIBlZZ8^gj!tFd2ao8*q7w}?k8BS(} ze?G+_Usditfc5>$+gD3bp_BDe%m^8D^E;Q~rS>h@iVXY4<#Tw*?80cAp6$%%ni4QD zVzv9RQ`pfSr*od*WhzaViOoRkOl=+n4KT`l;W6Sv>?f&T zunVl@q5DsUH~~fzP|RoxbOZhsM<&A)i(DL(xz5Y!NqEnCO~pcxPkh!^p6OQhWeH2< zW`<4w=C^Cb_l=+Wta&-}!|hfWNGoPXR(Sw^fhSjzk%;cktAcz?qObvqtcVFXIAb$(d?O1?0= zjT{QTGyS4+wGETpb)?Vm(E_pyn#+ytU3NXG+s&eFeYIjMfLRhrJXkT=??5Ne6pU9 zh>}JZv)LKdnkman@Fb;L+-hT-7<9cFI2C)33}{C2xYh8>w0t1%(}2DL6N6E&#&*artIy- zjJ92?q;wW9T;%D5XGuZXpDX!yIU~EUce}GS(8)HEghd{CDg(5PZTeq*fiC2_c=L;c>I$$eb&gka*Fj&fWnL$SwEl5~7Han|8H5LBpPk7Q$I<{B!gyzb1$*gPj?Z_f*TQ#_l z%z&QUjMzdxO42h*b)w3Hx0jc?eGkny+FgpTLgEv$VBz^H(6ECDcw>dJGK8eUo!wTh z4_=pNO;Z{1AabEF`oYfZs6H=?8aFDbCk+*rbSkI;o4CJ_lbC$tBHfLj^tE?N$a-d0 zc6Qi|fVZu-t9~H@GfPUsHP;0pFSk;~X12B~Ue;Z(isrL5h%c<`LRZ=m2D5$E$X+#- z3(EIg`QXTf%Hf(cOhK$T8+JG@_f0)-;k22C;B$JYH4X?v)8*vKvTcIB&_>2BtGQV- zOyHRRHnr|1Z6+hgLUPR8Nw{L#8jGN<`keXnb?>5#fa<`qRqD!^G{47_;vTo!S$D|C zY^AQcU9C+y*l67UiYw;FO#x6=s9z({Gni~PPtWSI#bv1>LqD&5I(jsb%zLLR5M%MI z1xpOeuWKLbTU}U6fC?tdnkfaVPiPbWSr%8J7|rdrdvF@a!`tFaAyeWz@Fn?65+S-o z`(}`wyAGz3^|DCb6l?-joZ4Fk+@{G%*^D`c=Bdr*(2GH$$~ zWcapiEnSL9jX*cVq+4zZfh zD+DR5SYmSwJHM4rhvjCkA!5st#iqn0liY%T${8ekwU=hB?PCJfS z+=gm*%V&Kp*t~p&QB7=o#O{}jnXS!2rXA{JcG;d~0o`=p&88w#2?U7LRVyHfc&m(5 zyR|~?HfU`z{Tx$Do;2MZJ+_EMkg-nOo8a>n8z!I0dNoUSd^_8RPZ)Bruo&wVY4j}N z)au97Ub_ZHd2}_yCsUp4v0|i7GM~+pO&XeY6DPBl*c?+w!UtFp=D+GzsjZajN3X`^RUucXh9KP77ZFlo*>07#+rN1lF{}2y#PPX5|+*Dft&}KKR&BaEY6P@?Yc8 z?oPT?Ygnu!cd)r%B}8zktXCKF?wN&oAnSm`BaC~c&||wa2yRepUWaE1v&vJuXN5#; z6=tP)C3ga9GlRU8t?Rrny7iUneYl4Ws?37jo3Nv8rtI8*Y`&jQuB){nhS$ctXg;GY ze@Sg@0C`Vmhvxskc*D)U{L5Aq23@bhvo@G?9bvG`?1}`|3=oZNq@)}31!WROe2ehH zdjew_FE>L4JaKH*3@f>40Mnwxt9IFtYaf7aY1B=Wou?ns7fVHUmSlD6PLB(}Gu%Fv z52N7Dppgo@thvD7uEL~gYtKCdX!wso)~|id@bZl5EuO^8^SWe;=6kr$(A^;^Kr1fu zf}7geBx<}n3d4TXqh(u$yu_`Uchd{IB^cVHfPyWBaCTbI4K_x0FnZTQL8m}zQ=7Ng zt?&zTRctb?B@o=O(`|Fe6-V2854iIQyS>!bF3%@}yi&c}x-M8$VB<=2Us66n(1)-|D%y+Ey6Co!kEcl7wb z4;|9dL;j9g)NQNP%|dYhLHt=1(ierGSlb`Z?6Juz(6Y>f`A_`GeNviyVor~>8(VUp zkkxif3$TJ_nU-=9^C4?_Q9C0A-@vnG`;zfi7QY#mbn$#HFN7~aPnXpu3s|z<(Qf2_ zt@GwNqa6||(qO8WWV*!07qjk?D579XZ6fohFGX&P_Idi;($=9}9*N+$KB9$m6d?|Y z!cXfUy+XH5^27S&Jcv4YN7`FuF{6eG3&!0Z=V1pZ*1nt=KH;@Q%PQOxzD(ULd+|41 zAc$r{osob<7QCyj&oSJ^G-Pqk8+Ks4=U+`q3@bFlhw}dEgn0q0aKmDt&Q9xwaTbl& zT`t{xN4Mh3k(3^5H!PwUg%LHDz4a8c5|%v7M2yIN^yTSB78f}eOSf}}8vD-- z8BU2ytF_p3#{ye_F->58xu%^U8M#-;O-}w0Q#1NpmzZFuA+_7Eg{d&}5bmb=Z2e*t zQXYEA0Kq)voguy{n6K#UfoNV8&{8X^hx~`=`OaXl5$6}9!J<_p&}hHM z6L1x|tO_mbdi1HNTvHI2OV`G?-F)#y{{j{*Xts#uGhK7lbMUcGf9@xJ*}0nGB8Poz zaeaeg=bc~}qlzw)E@e%tbRG{cZMWk$w|N1}tK052h5Yz>ip>NW`L9u>RaQ6N)SB{2 z|DN_Wodx#H6BZJfQdlN{=RHj5Tz3a}s*zsIRs|`yTEI;i7xerp92>?Hmd_&ODGxs8 zml}R*__$}Te1`>3JnD4w&7U(@OAGss4)3SOI%g`(?46q?G!&WzNBG~9d>_qe9$&P0 z?)+(swLFBceYxE_aW)JcYF+Jr?Y4Ca8D@EQItHy}&pYOuW_4R>$+#o|mUY8T!>+2CegLYMbdnuE_d3>B(UQM`^m#({Ry4k-QqEU4s z0W|4O#_dAV7YfWG2~QlJzX6Un#9;=Ug_VYhan13AsV|$)(DQ9#SYTe%wsNI9mTBwf z(u3x^!h4P4IvN6c+!_sH{LB|IG_cD(cP?AivsPv)Dc_fq*rv_qSaky{H*zXuT3W*b z`{kRZ7l}=8<7cwus3tQ|g{%!<{tC0bN>IzBmbwKVUNm!|Zws#IW7?J(a|w?KLCda( z@luQNt9=$+v`%5z=T@WHmBZ!wW~YrXrkpBVp1#|yd@|gyo}^VI z1oj1($_wh7lnC-dc;>pU4Qs1MJndWnO`$WTUe1IuwPv!*j-xtQUSDu&O=A+Kkf~u> z2VQs8fnwiquPN-*KGxc`pq=rfFZh%<*IzNBb87E@LEY8{-kV9VLm|+Wq3LqF zQjp0gx4f<08i;I(fG@+BhIJMGlz0Eg^;PL@{$#yBY4s=V{$#B`>GUUE{-oQVboi6y z{$!OuS>{iA{K*=Bvd*8Z^d~F)$<6*`sXy7|Pd@8UHu{qd{$#a3`Bi^%lRvr9pZtnH z`Mf{*fM$vyi0 zp#Rg?^`{g5Pn-3pbN)|X^e40Y$$WpZ$e-Npe>M{PKh5+fzw1vv?N8?E=|2Cb>HcJ~ z?un0n&)@qcfAR@`@(q8oz@PlO|NZc*{!g>@r!)Rfzv)kY!=L<)f2jI`|I`#8npUm+mzwJ-%(9e$hKmCFJbk?7Z^!uBC{?Xk)1l*|}zT!{*Soa3}pMKf@X=JPa)1Ua0JN?No`je7>x~e^P>+$3M?kD}-k^B8wRcWz@ zU-yrFRX3m4S;e1?Y?CsJcllrZr0zcA&xZQ^*~K&dY*=Kw_+og=K7V^;i$5Fwrp}($ z*^}WX2mBvL_WHA7{cZSB|F`OMI(ycijcoUY{!{;_OZ&o`59*Iwb@s478xB->LVwdQ zs;Bk0$8@&KpH;u5GgawQzyI5%{r;@_ZGSfWls_91-G(3ce;auz|n z*mp=IA9^fQU7s7)PexSr>N)?yVO3;U#2?w=?~a5DtAN#`{%-YEe>NNlIN<+Q-R{pW zNn9?T^nd$aD75(Gdn)O$8n}Ag|6+KzKdYYdXG1Uhvx~?4*|6ArMBf+=tTX7Js~+}e zBZe1M=!@cx;UoUR>Wex%p|h6*&G&{={raMaG^7?AzF!aSiv5d*LwQ7psx(>kjGj}S zt1sy_v3@+HMjqCWheMO~`q#Xrv#Mn6qIiBtoOwylU3$p>c1X`%enmytB^7ZsP+Q64 z>$@e#L!!;dH}&f++BUHs@6cc4XLPzPJbS zQYCLoJ6=2LjX^=(PX`zq^I$-(8*dSRcwQk5KhKm6o*PuHsWyQ*JY zJQQB@l7DbWn&`50*Bk0#8vQEv@C*JYm(|YoT@tOT^wT?HyNgmRZ;Jsho(hFfLtYh& zT@l+|7MZTT=6}-Ym7re|?Ji4${_r6^*sou|FXeni3ifSj!OKCr?GYI-YpchxCsoFa zs(-c5f2MxFI;4-zN_%`?n&7fgVEXt~Nl{galHs;g@D_a%Z?MZHT} z&ry#JAsxS}_vCm#0iY4nNsS0$y{z;n9a zFO@R#wEoyP;7{vMYg77qEcv`BPh5SQnCSgbxu>7^zi-6Sa_@xl4aoetD!F`9`o7k$ zdR`V~-xlkP4(g9r#ErH7&{g-z1a9=JbeE5+Nd0P>VX@i!{r+~nFUXHft}Ea6w?_xX zxR)#bkLjQ^M5ABY^|EyDWwr3-z+EcERsA@9Rt=X9e#^hT(JPsJQ!gJ5eQMu!&kS#B zRMF^HiR-Zl-{=d{zwIgi{(5--+YkF!4e55H?-~Ec%aZ9g^!m}$!FC8{*EZP+>A*4n zp+kSK*( zDKp^GdH-aiU&O8VN*$&zOX}(msF{ZJsgKU8Klz?GJUy-!sc#nxrpH66`vdv=)V#G* zdTpamy>4@_=cvaPJ^r@nk_xpF&`X$SwgKGaqZ}?K5EYZu-O4*={gRw`% z6!k4qU_-K!8@)2N8oh`8XS0XImg(_8YrQo)b=)__6ao6*4(Q1tu}?jg zc)S%fh>X7XLzM$_G-4@`TK^$mj!P1`bU?k?*eVl8s(*?|ZTv(m^rW=%D)e?3=;L)cx#$)Wc<|q;yd2 za9M+@dhZEeymastU+FjWruwd4f4|WuQZ)Lc_AhEs_O4WIBUp*=$pti|;pdPveWOph zrQRz#42Qn8SNEHH0&@&X>0gv?n0QfEX6=j=?qzZDh*9o3%?^b3ACNs-Ro`3RB4eh} zCx&nIil?%zDr@buh*s;@XWx(-NKZ*mW)DbWny;x>ysQeOrgT&dx6j^peEYb-2F@07Vpm{>p zN~1qG#0_4({sHQLy-vKH$5WWimO;ut+6flW_Iou`MzqgkMD z`kYixqhBKMu39kLuKN=YsAqmp{rsGUd8h#u*n45%P?^SR6 zmde%Wm3|mM-0KTdyP#jCM~~dw`Y!R-=sAtOvq#mN)Xok1HfZ#!3iUneXloZj zVR!8F&wg}HC8(WIpZ&f#D%-F7qo>7O&Fy0E(bLjVBuq=Ql&**5*G-mCHdI~to@ z)T^?sQX<(_3B^_YapD0H=CVxUbU@T@^zZc5N(ZH*>%C|EUmN|R{6xQ$Nb_L{e*Fo( zAv>vx*SEdk-!LAFktbqV-4p%6JH16bS>JcW|E#`S4A|(^1GPaZy5=j=NLSP+R%Pw| zg$j_K(wnkvlA83Wd?;^<9NEia)X5$C=;%2yM|xPNjo?77oec)7s8Qc0shbQMF+HpX zA01S&-;k1fSIVLmti1I2)1EA~Ac5(#Vx#o9x`O6Yfz=f=wSV$FkrQ4Xq| z8@=j5E=e6V`lVUMFGv(7pA#ph$3?~QL+WD2o)%M&hZY}uK~~DUYK5!Pg3aLYcvF7) zw^fwvQSo6qsP@dZY8?8eRKT^vQUM=?N1LyzP-DBKq{es1B&?l#)YE(H6^TN9kCflo zcKIYP%Q(C!5~nYV@U?#3zpCCgJrU%4cg26IzC(X)#A@0`{}%tiSTKaM9b%*Gun0eP zKqB#mI_YeuG{{v^YqBD8HlI>!HTSCu&10%;wo@MZ_w~oI0TC`eE@d#eTZPT`pZDdi zZ&O#)+!k8-U}(nUDs3aof@fQ0QjG1Bw#Y8@`}d|t#mVV_xPJVoK3qE|!)*Md1S=g- zh3nfS03Ytw8=42y3l6Itn%kx7-&J8gIxE44DKfJuvy8 zN^tEV$yM!wxS}4*D4E!=zt)0ej_sGz{IVWx^yxG^E$UC~7HOJ$Rf$GW@wGFO@(+%w zG^3~GZTW#paP5!^-so4&CLWNg9zCt5&UT80vYoGLq#(XagI}@HtDZVLAmU$A50M@f zTTLF+3r5dLpeCME&r^@pJ=gcC&9ecGo-fJE^N#wc+8OD*>{}AvZ2x}Gfc0InfzqQA zf^4S>lRo!=f3|i`!dKrWiLLLJf@;2W$dlue7_9!ZEReAm#b9G$5H=AMY`vmp%JxY; ze7JkBFT&_)34L}%bGq-T%gfGbq+Hdv>U%}R>}7FqGgdo~o(k;KC%xAQlNqDu#0cqe zc{YBiN3-~>FJZP*5?$+8i={z}PwZ1Sm7Wl7>w83{^r*-@7A%PIZ_6@CkE?Yiw}}Yf z)yJA!Lh~L_N7CG)sdU-U2#IA?-gI2(Lo7D_Na`I$#1E?qZeeaRMj@MGZK;7 zS=DCZpo-Y&)5k{7iA9@(YN)ZG$J0TXCRe2gn*CdSl>Ofx40yR&qh6M1)px1Bjs8G|ASI137m*&8>@-63(IrtS9SF@ks0f6=6q(Zj3DM+J zkNUzkcU|zOY3LFro)GOmdPywUJQ(_nQ|e!`=OhKqUUeT?=oy>4r6b25kv`3~sej4N zN+fH+kZ$zrWwn02VeC0IdvmYo*nCy}L-wfp!}OdKc{(Vw{|#|Swo`o5=oh=zVvR{J z$vVzX>RYvQBI(%E`fIi|sJ@DbH?~XV8rvu0)%S`)#-Eqo$ET!Pz$po3wp9}+m&DMubF%2_!R62h#BEljEUrDMZl!ikZus|3_-|zcd;DpA zpZbzUpGrHiM>3ZV>ValOrOCDhemx}~%XX*^6T#JyJ$ue;{da=)6;sr==#TH|rPp@s zQVDm7T=l?q&4*Om@$Dkw_yzrJB52g{m&LgC?P}?XXVihz&Z&cFJ|eD|+#;c_-!I}f zdR52lf(+lcRD_8mG9TX2Fu%D!aMkmYxDUcy>_qU7rsp)rF(ejFkE_eh!nkwnfHY9{ zm}=ALl{Rbi$ygmfCL&B64#eLlIc@Z+tGKGyHV>%ht8d#Hnj|p(HhpaDu=r}+qgymi8TgP->-iS`3sP*QjDxc3gT`opZKTq)89U zDrlaNJWcG=UndWSzT!DOP_N>|qV&-<0-mR#d|5ym)Q0B7)cV%C>5RfRpACG28eBiPpqE34Jyok^OLo>R8_) zD=9l5B1}9W4$1=OHg|>>dy-l?k58l@L{nDM2_p7{F@Hk|TieJYM zOTe3_gDYO&&5l0hKbAc#&b=1ecXGe#e(g2!etn08XZ$&}L?f1t8rvnJr-xOVM!%Y} z(Wh?mTAu_sJts*V+amM4DjVc2O#!?k9&bLaKfa}+)OSDO>s8+^;cOl}77gh7|=OsJYR(06*;M}V9 zOQ+ZRRq*tAadUc9Y}x1)$s1-^aZ1vc9t{tKdG1D^hSTq=C2ME(t46<+Lbg@Hmz~xG z)mwUW@b5~HE=hTqd!x}A*4(fFLn3}h~JFsoPWT4hB-8!~QBpI0KF!xf-LaQ6P4%Yu?%L~Wh}s#oSv?k=>w7}U&xza8<1z{+pE%}uVe+8r-ssiO zCikk@>$?I;PwIh*;Qpz_dcy}N)Z1i7Mabp>^(NU-30>{D`m087DA`jY*~EGAc6xG$ zFK=_Fbkrr$Jlh(+8-!!BqGo?r6dOIO*s-_8jp?A~7>7ieMxUB@EQBG}PD%JigHN)) zBWM^^wsuA=T;C(LG8SAIqo-7ZiTw%|eOJG#J*5tJJOu5e=cINfpHykHvts9TK&C`G zAYGC@Diu|`5QrRa_mutMHPNHdCqK}eQtqQ?#1mOX^tz%hp%w>yd3Kk7YkGXEKdprq zq(NU!Zc)J}_lngupty`nB~VypD!wU;Hlh-luGTC7+~~t_`>7j z(edqK$MoD5&x4IVNmsT_`ekB|oSBzZu4^x;4&%?LJDWTtk!w~|#_{0eY90t`*r(?Rie^Hnw9*dr?L=%A)! zF3Ts7?bd1fvOe29Al{$Y8wU4khw5Db?qFhbyN;7`A`?$ZK?Ud?H!|ILkLmIG; zA62y0MQNyPoBFk}ld|~VRrgaHR5OmAl@iTDz0(1apb$?J5eM>J(Pl|);J7g1FI~96C@og=*)4nS)oCqdcJ;?F3#}CMh`t(E6)Xm^E zXr9sx?|V`TwOC59-mBTqw?x}U2v{8r_EdIMa-5x2*E#;O-ZdUe8I7J-o77H;S0=Wr z2-#LWP~WRK@{5wKH-h3jtDd5MTtph%BEG!#tQ<1meNJLn(XW~Z6w!W3I;Yt$6RaLf z1vdHv+a8rsF@8=x$>fuP#kZ(~YQ82-F!6$FJGMhb91GzolLypgH;;=3ql2RD`_T)^92>Mxl7F0+$+nt`Ji-IEtnt^p`j+86yv6YK{^LihAg<@>pR3D znF-oBtP%Od8N~*@r+A@@(i*ihp?P+Ot|UZUHp6(SW`yry>4%BX%}%}=X!ERCa4gu{ z*{Lwtd`e9<{+zVYSn#c9`_(Adwn`*_D3KjMFCXYz@`k-D8r4IvOmk{jUL!mxf_1EmETD=*#JT-GvJH$<+rylk1Z9XE_9D7a;l!e)d zS_qYF^ob6QUa5ohH8tB<=(^Gq62fDnh!}IWT6(>dFhVv!|DY-IH8hdf%(UR z9?A}?{j;MYN%OcEFFmK?G{Q2L@i0kTdrFiZdr{`a_`{Npu@lk<=}Fm%&AktK>NKC% z&(fo^AFjP5NlFjvfkvOKlD9Pf{jNHP>_Lgi`0ILAE#B|HHF-e&&?WWkwX>rBc!+44 z*ryka4yvciURJp$_KVKxfRtBuzckk5Zc%h1H2bw<;?l_{#gO#_LBl>Pb<%uQ@;i23 zu8d1EKHpT|l3l3yI!^4`=TEOaC%qG{8Wx{t4~TDT zXC*M>&r4G4A@;N0r*e(Gu7ZCM8mAVkoziDDxEv8P*Un4S>rY9(vk)RSxl6=vgoRf_ zYSGctkNI}W9+Sn94M=jvUseewo)m+9a4h&ErKYoO(oxwC8UEkbliwG2jGa|k)6ihG zGivAY^U~s%6h-w{lB-$>>Zt`^LAF!Ea_w;uYe+#be<8t7Uy*#I!C8?W4YtOB9vC~I zj%xgc@W8>K@sFvWA3vnYn|Ia!O&(JdjD~5eW<>%p@shgvbU>#chVYMU&jH^^%{`*g zL@;-*Jr@G8<=?A?^aEq()iUE3LW3XDM9$l4`t+zu+vt;C9y_EScN@#+wYiYkFRFt(oNwA$qC)tk{0^lqlGIDp2?p@zMANmAm=4gn#^7;<*oAlV~=2 zRh(?6O8%A@eDsW@C3{R=Ky$AeAw4d&ooyA7vZIo$Y@0YWeO(<=dQ!Yz>wikTp%zPz zOQ+OgRi}1B$~=2T)%mbLmVfEsPaV`(HpWI>Wi}wC^Qxv^0lWP zmoS_cMLs$!Tl$hJ*$8Vw#zRt+YatYIav=OlWA`i!t((EjxpqwDstrozH&2RLnxPKO zJu*`!_e+g8Uk*PD!Q&r3sRkZ@NbNTsW=7J$`n5CtzUqxIl{|Vni1|*vEZeL0x%O=- z-)o^)NrQ)YBBVQ;Jg7IMVcp8;pbYcoZZ*~T^HLBKA$0bGU=&Y2^^z}d7Utob+oS`p zeOoUZ4QuUcVUD^P772c6Vi1mqrN?$_a^;QCD?BUCYR1yu<6-PQ`Gka~`I^-1wJ_^n zJ13cF^olbYVNKZhd8wZ93lheO&_I*>WZ^V-sa>;RyN#XM=XXe5-?^HNAvU*l-Mn(; znvR>h*^^;vL#8}4@_pCy7XRJh?QS^CG0*gQytOGIF1|9`_K?OVyj)6gtfbW4}a22Fr9Q-Z#Zy>rDML!CD9Q*|M zDeyDkXTi^dUjUx~H-gWC&x79pzXN_BlrBz{?-tJIfkj{mxD(t3%3wKI1v7d#0LfTutOJOc*6VemZoHh2jf1;@av;5c|4 z41&|(EI1Dma1mSr-vQqPZ-TeLWiSlh0e=bJ16RQhz+ZvC1}Uh6QP2eA-~%uT{uB6H z@Snkd0sj^JH}K!V{{a6JMAazz5%BlG-v>VmehmBr@DIU{gMS46G59CoC%{jFe+vE? z_$lzu!B2yK0e%MjOYpD2zXm@G{tfuI;OD^4gMSD9ef2tDasG?o2JlPZ6X28Jm%*pN zjo>EmY492FS@0|1X7D-idGM>?*TAoX-vGY}ehd6I_#N=O;0xgQ!0&@EfLt1kvLhL9~k_h<0-X(H@Q>=om*3y}}VhuW|&@YaBszoFj-% za0Joo96@xFBZvk$g6I@S5S_lxr)RhZ(OHflI>!-2=Q)Dt0!I)f96|JF96@xEBZ#UT zL3D{Dh=w?V=sO%i^j(f1`W{CRy}=PgZ*m0D_c?;-Esh}ibB-Xo%n?Lya|F>aM-Yv0 z1kpPjLG%|KLG+g#LG&(15WU9{L{~V1=qg7Lz0VOuKi~+WA94iIUvUJ{f8+?Fzvc*{ zzu^d?lp~0096?m)2%-i@5RGyKQN|HOO^zTM;|QX0jv$)g2%---g6JAY5KVFf(T5yC z^q)9_=p&9G`df}5`rkQ%=s$A=(f`2_ME`{&i2hHGAo{NyLG*ud1kr!v2%`U+BZ&Sx zM-cr#96|IyID+W^j zat&gTfau4#1~Eu}>{71dOaFjtkbvkPav#JX0nv|h4Puah=pS(nVvvC7A9D?2kbvl) za1CORfaoW<1~EuL^pjkJ7$hM2r(A;=Bp~`{T!R=SAo?k;K@1WQ{d2BC3=$CiG}j;o z35fm$*B}N7h<=7^5Q79n|B`DEg9JqXifa&q1VsOuYY>A3L_fA3M8C#0h(Q9PU*{UcAOX>Da1CORfao{51~EuL^jlnm7$hM2ZLUEK5)l0k z*B}N7h<=xA5Q79nU*H5w0nwMa1~EuLbSu{&1__AfaSdXSfM`C~AO;DD7H|z>kbr0**B}N7 zh!$}TVvvC7D_nyZBp_PMHHbk1q86?}3=$A6;Tpss0nu$-gBTA3L|^9` z#2^7tnQIV(1VpV|gBTT!R=SAX?5fh(Q9PHm*So5)iH68pI$0(MqmC z3=$Bn;u^#t0Z}{GAO;DD?&ccAAOX>8u0aeE5Or`3VvvAn4c8zB35eEm4PuahsFQ0D zg9JqDxCSvuK=ci+K@1WQb#V=1kbtP0YY>A3MC-W*F-Snv!!?LO0-_CEgBTDC zK?0(CxCSvuK(vW#5Q79no4E!tNI-Ni*B}N7i2j6Y5Q79n_i+tkkbvkC2gBVmm z0#Xni;yx&WGKfJ1Bp?ORGu#IyPzEumfCQu<`WE*=36wz$Dj)$Vhz7V1N}vp4Pyq=@ zLGK?#&W3@RW2DToepACy2D#GnEakb>wr?t>C2gBVmm0#Xn?&wWq=We|f3NI(jr z7q|~fpbTPA0SQP!^lk2g5-5WhR6qh!5WUEKPy%HTg9=DM3Zj>|4@#g6Vo(7INI`Uj z`=A8MAO;nXfD}YWxerR93}R3L2}nWoGWS6VltBzCAOR_ej&c9kl>J-gJO&kzfD}Zp za37RjnR+JXyaE!Cg6LK5gAyo%7*s$4QV_kyeNX~r5Q7RxKnkMc+y^C41~I6B1f(E3 z!F^ByWe|f3NI(jr*SQZ$pbTPA0SQP!bdvj^1j--=6_9`wM1$N1B~S)2sDK2dAUef; zPy%HTg9=DM3Zm282PIGjF{pq9q#!!OeNX~r5Q7RxKnkL>+y^C41~I6B1f(E3$9+%& zWe|f3NI(jr^V|m|PzEumfCQuwE_dyAiK@2J&0V#-vxDQI83}R3L z2}nWo9qxk?D1#VOKmt+_eV6;71j--=6_9`wMBn2+D1kDFK?Nir1<@Pa2PIGjF{pq9 zq#$~e`=A8MAO;nXfD}aE=RPQbGKfJ1Bp?ORTigdFPzEumfCQu<`g87s5-5WhR6qh! z5MAazD1kDFK?Nir1<~8w2PIGjF{pq9q#zpRJ}7}Qh(QG;AO+C~_dyAiK@2J&0V#;y z;XWvVGKfJ1Bp?ORUvM9kKpDiK0uqpd=r6esN}vp4Pyq=@LG&*7K?#&W3@RW2DTvC2gBVmm0#XoNv`R;$MxzT{qg=4sOcD(Scrb1JAs;x zfr*9ai#vguj)94V=oh&YsOcD(ScrbHJAs;xfr*9am$(zC=@^(;h%W8~YB~lc7NTG3 zPN1e^U}7QqW$pxOItC^dqF?S#pr&JBVj=qKPN1e^U}7Qq748ITItC^dqF?Dwpr&JB zVj=of?gVN&1|}Aw)tx|1$H2rw^sC(o)N~9?EJVM?oj^^;z{EoIYuyRdbPP-^MBm&A z)N~9?EJVM~oj^^;z{EoI>)i>|bPP-^M8CnEKuyQM#6om+Cs5NdFtHH*Mt1@=9Rm{! z(Qk4mP}4Cmu@L=ccLFsX0}~6;cXt9c9Rm{!(Qk1lP}4Cmu@L=McLFsX0}~6;Z*wP5 z(=jly;4j!HCM73OQPa}VGcYnSv#=8VFg+#LU7<^dt48 z#LU7<^h0`5asm}KEgd}rBNH z#LU7<^tF604nV4Bv@qzUyCM73OQPa}VGcYnSv#=8V5qeT`0u?na9X$gh z6Eh1d(I2TNB_~i()6&s1FfuW-uoC@IdQx%%6*Vm#Jp&^XGYc!x13f7@fr^@zj-G*$ ziJ66!=#SQuk`t(?Y3b-07@3$^Sc!hNo|K$GMNLab&%nsU%)(0a$LLAP2~^aybo302 zOw25-M5UgToIpiQOGnSZ$i&RTO7zF-Ny!OR)U z#LU7<^vCN-$q7`{v~=_gj7-cdtV9p>q~ruDYFava21X`k7FMD^K~G9fprWRwqi0}b zVrF3_`V;k}1SN=~4nrlq52U}R!uVI}(W^`ztkDr#CfdIm-&W)@bWzd%n)PN1TurK4wH zWMXDvCHf2Xq~ruDYFava21X`k7FMDsdQx%%6*Vm#Jp&^XGYc!xU!*4`Cs0w-($O<8 zGBLBT68*(`QgQ+nH7y-I10xeN3oFrIq9-LMP*Ky;(K9eIF|)7|HF{EV0u?na9X$gh z6EjQm^Z&o^-}_7Tq~ruDYFava21X`k7FMFaOixNqprWRwqi0}bVrF3_`pfmCF604nV4BviT*l0DLH|PnwE~9fsu)sg_Y>9*OQVHsHkb_=ouK9m|0kf{suiMIf06r zmX4l*k%^gwm1wIcB_~i()6&s1FfuW-uoC@^dQx%%6*Vm#Jp&^XGYc!x-=rrcCs0w- z($O<8GBLBT68+73QgQ+nH7y-I10xeN3oFrcJt;YXikg;=o`I2xnT3_;Z_$&I6R4F604nV4BviT+kSDLH|PnwE~9fsu)sg_Y=U)02`DsHkb_=ouK9m|0ly1$EI4ViHm^ zatZ<^6&q?ATDElT=-D%HVC2ZeiJ3DC7gnxBf4hBR5>hg93IZh+8)_O_wsh?1*)wop z66Eg$pZJq8IjwNl3}aDF~EQY^Z5y+0wD2XV1Wa zks}i)X3i{JSh*7Yo%V@INXf`42$WQ8sA*`~(y^mw&%l9^BNHcP&MaJ5xf1Q*68+uwiAhMw$SDYvRBWheXxY-Sqi4^+ zfsrE=Toa zl95voD5=;`)6lY|V@J=PfdeB)CQi(pS-7xrCHlGciAhMw$SDYvRBWheXxY-Sqi4^+ zfsrE=`&Pa%AGf%$bD?D_5eI z_K8VI$;c@PlvHe}X=vHfv7=|tz=4q?6DMZQEL>Q*68(MliAhMw$SDYvRBWheXxY-S zqi4^+fsrEDbY;XW+odk%<#CXBIB3T#0)7#3ZC-DbY;XW+odk%<#CXBIB3T#5c6`@|%qWaJbCN-8$gG_-8# z*wM3R;K0a{i4!wt7A~w@iC)VdYBn z58EduAtfWHAW%}Vp{Ai_OUI6$Jp%_uj!c}GIkRwKrlDm^ z$Bv#o0|!QqOq`fGvv6VMO7xG~Cng~!Bc~uxQn8_?p=C?Qj-EXO2S$!eoR~SYaAD<2 zw6{-8LP|zXL7=2!Lrp`=mW~}gdj<}S9GN&Vb7tYf%9ZFJvrkMyN=8mWprm3$O+(9; zjvYOF1`doInK&_XX5qrhmFOS0PfS8eMovMXq+&x&L(7(q9X)#n4vZX`I5Bf(;lj$5 z=%27pOhQUVPC=lgVna_|&!pfECwS8g|QZjN10won2Y8qO$ zbnNKaGjL$!$i#`6GYc11u0;Q&ePR+)GI9z6B^4WL8d|n=?C9AuaA4%f#EF?R3l~hg93IZh+8)_O_ zwsh?1*)wop=`&Pa%AGf%$bD? zD_5d_#y&9#DH%Bhfs%?1H4QCWI(GEz88|R66Eg$pZJqJPdlF$pOdIR$}|iVZalEn7Nv^z0cpFmhz##LSt6 z3oBQmH};81NXf`42$WQ8sA*`~(y^mw&%l9^BNHcP&MaJ5xf1>J_K8VI$;c@PlvHe} zX=vHfv7=|tz=4q?6DMZQEL>Q*68#JIiAhMw$SDYvRBWheXxY-Sqi4^+fsrE=Toal95voD5=;` z)6lY|V@J=PfdeB)CQi(pS-7xrCHj}_6O)jVky8*Tsn}4{(6XgtN6(&t10zQ!PRyKH zxUh01`j_n!laP{;QxGVr*ih5ZvZZ53&z^w;BS$7q%$!-cuyQ5(SL_p$kdl#85GbkG zP}9(|rDI3Wo`C}+M+9aGI3(& z%)*70E7545n1qy!oPt0}#fF-OmMtASdiD$)7&$U=V&=@kg_SGOzhR%4gp`b&f+9aGI3(& z%)*70E78AYpO}P{jGTf%NyUbmhL$ZIJ9_pE92hw=abo7o!iAM9(L4LZB&1~I6a-2t zHqDbY;XW+odk%<#C zXBIB3T#5c2`@|%qWaJbCN-8$gG_-8#*wM3R;K0a{i4!wt7A~w@iT+*t#3ZC-Q*68-!3iAhMw$SDYv zRBWheXxY-Sqi4^+fsrE_|&!pfECKekUyLP|zXL7=2!Lrp`=mW~}gdj<}S9GN&Vb7tYf$`wCIC%QpQLP|zX zL7=2!Lrp`=mW~}gdj<}S9GN&Vb7tYf%9ZFpu}@4wN=8mWprm3$O+(9;jvYOF1`doI znK&_XX5qrhmFPdUPfS8eMovMXq+&x&L(7(q9X)#n4vZX`I5Bf(;lj$5=s&YhOhQUV zPC=lgVna_|&!pfECgMDHWQZjN10won2Y8qO$bnNKaGjL$! z$i#`6GYc11u0%iIJ~0U?894=kl8OyA4J}(bcJ%BSI52W#;>66Eg$pZJqF-R2n1qy! zoPt0}#fF-OmMtASdiD$)7&$U=V&=@kg_SGOe{P?cgp`b&fDbY;XW+odk%<#CXBIB3T#5b*`@|%q zWaJbCN-8$gG_-8#*wM3R;K0a{i4!wt7A~w@iT+Fb#3ZC-$B&1~I6a-2tHqrlDm^$Bv#o0|!QqOq`fGvv6VMO7vgbCng~!Bc~uxQn8_?p=C?Qj-EXO z2S$!eoR~SYaAD<2^xxPgCLtvwryx*Lv7x4+WlP77o;?ExMvhFJm^rg>VdYBn-`Xc8 zAtfWHAW%}Vp{Ai_OUI6$Jp%_uj!c}GIkRwK<%&NTqZ`B|q-5k21WGD4)HJkg>DbY; zXW+odk%<#CXBIB3T#5cW`@|%qWaJbCN-8$gG_-8#*wM3R;K0a{i4!wt7A~w@iT->0 z#3ZC-rlDm^$Bv#o0|!QqOq`fGvv6VMO7uV4 zCng~!Bc~uxQn8_?p=C?Qj-EXO2S$!eoR~SYaAD<2^gr1rCLtvwryx*Lv7x4+WlP77 zo;?ExMvhFJm^rg>VdYBnKielJAtfWHAW%}Vp{Ai_OUI6$Jp%_uj!c}GIkRwK<%)k4 zMK_2^NXf`42$WQ8sA*`~(y^mw&%l9^BNHcP&MaJ5xf1;^_K8VI$;c@PlvHe}X=vHf zv7=|tz=4q?6DMZQEL>Q*68*3CiAhMw$SDYvRBWheXxY-Sqi4^+fsrEM=!OhQUVPC=lgVna_|&!pfEC|Flm`LP|zXL7=2!Lrp`= zmW~}gdj<}S9GN&Vb7tYf$`$`1if#~-kdl#85GbkGP}9(|rDI3Wo`C}+M=Toal95voD5=;`)6lY|V@J=PfdeB)CQi(pS-7xrCHlYZ6O)jVky8*Tsn}4{ z(6XgtN6(&t10zQ!PRyKHxUh01`i1t1Nl3}aDF~EQY^Z5y+0wD2XV1Waks}i)X3i{J zSh*5?u}@4wN=8mWprm3$O+(9;jvYOF1`doInK&_XX5qrhmFO4QCng~!Bc~uxQn8_? zp=C?Qj-EXO2S$!eoR~SYaAD<2^o#8ilaP{;QxGVr*ih5ZvZZ53&z^w;BS$7q%$!-c zuyQ5(CH9F)NXf`42$WQ8sA*`~(y^mw&%l9^BNHcP&MaJ5x#EK%(G6k}QZjN10won2 zY8qO$bnNKaGjL$!$i#`6GYc11u0+4oJ~0U?894=kl8OyA4J}(bcJ%BSI52W#;>66E zg$pZJqF-j8n1qy!oPt0}#fF-OmMtASdiD$)7&$U=V&=@kg_SGOFSk!jLP|zXL7=2! zLrp`=mW~}gdj<}S9GN&Vb7tYf%9ZG=ePR+)GI9z6B^4WL8d|n=?C9AuaA4%f#EF?R z3l~DbY;XW+odk%<#CXBIB3T#0^zePR+)GI9z6 zB^4WL8d|n=?C9AuaA4%f#EF?R3l~_|&!pfECH`ym9AtfWH zAW%}Vp{Ai_OUI6$Jp%_uj!c}GIkRwKrlDm^$Bv#o0|!Qq zOq`fGvv6VMO7z?86O)jVky8*Tsn}4{(6XgtN6(&t10zQ!PRyKHxUh1?CqSbgaD$t~ z+#=yNDR;=YOU^wC?h|-G$wMk0vEeZ_PiS~b%QLn-r{e`XUefc5J+B#f!-2PqyyM7w zCO&ZDBQu{k^O=P&T=>e$H?Djq`eEMB4Q>*1i-g;x+#%yGIrk{IPv8M152<*>hR4)A zq2Vbl&)D*uju-5BNzW_xyk_7H2i`LBjwA1x_`r#e%zWa^XBNJ2;VUcOxbmIohkHLa zxJk?{5^j@nhm5=A+@s(=fd`a4q~Z}99#iv#hNrYVW6N_oUa;dOJ+IjFnt?YQc+1E; zj=X2$11CN*^NBN`S@^<*udICI%6Fn4;r-m;CNZ~2xJ}9(GVYRdkAnLI9#Ha-ibrgC zOwAJ-p3?G+EzjwA!H$>oykgI52HtSsEhFzZ@}7wgocPGhC(e9k;R_eOvhs~9--&+U z{oLRtF}Fy#P0AfI?vitlg8Kv>Q1Xz9M{IaZ%@Z1)((;Tg&*^xe$H?Djq z`cdA`4Q>*1i-g;x+#%yGIrk{IPv8M152<*>hR4)Aq2Vbl&)D*uju-5BNzW_xyk_7H z2i`LBjwA1x_`r#e%zWa^XBNJ2;VUcOxbmIoM|(dvxJk?{5^j@nhm5=A+@s(=fd`a4 zq~Z}99#iv#hNrYVW6N_oUa;dOJ+IjFnt?YQc+1E;j=X2$11CN*^NBN`S@^<*udICI z%6EM6Q}hFFaFdu@B-|$D4jFgJxktf$0uLy8NW~*IJf`Le4Nqx##+K)FykN&mdS0>T zH3M%r@RpHx9C^>g2Tpus<`ZW=v+#urUs?IamG4A9#{0R!O=515aGR7nWZWg^9tHOa zJfP$u6_42Pn3^XvJf-CsTb|SLf*mjEdBvXB47}mMTSne-ZBp)#ahIHX6x=89fRcw)JYvIRYM#*Wl$K{~c}~X*cD$tL z6?%O7&A=NDyk+DaN8U5>ffFB@`NWye zEPUa@S604p=4#M~m`HYs<=xJ%AG3honlK*>WY9dnP_`;v+MkIP;l>FI@P_$~UfjC;Ca=&kb%8bBl!A zq}(CnE;;uoxKH2#B@d~1#D>SzJfYz!Ezj8UoQ@alcuCJI_Pl1`4F}#b@{S|#nfSno zkIa1H%x4z9aN#Q}-?;Lf=!d+Y8{8!3774dWxkJWXa_&)ZpTGl39#Zj$4UegLLc>#9 zp0VXQ9WU7NlAc%WdCkBZ4!mXL9Y@~(&@cFZe+KafzdrGinNOVg%)%Egd}ZYuSH2Vd zWbfw&H;K7L!fjISka3rsdlcL!@PLwsR6JtCV``qz@RXKkY4xkbWlQtps(mz;YP+$ZpWl8010V#8x6WOnl(PM`k{8<}(XlxbT&gZ(R9K^i#c`8{8!3774dWxkJWXa_&)ZpTGl3 z9#Zj$4UegLLc>#9p0VXQ9WU7NlAc%WdCkBZ4!mXL9Y@|X@qrT`nfb(-&n$f5!dF(l zapgPFPxF3maFdu@B-|$D4jFgJxktf$0uLy8NW~*IJf`Le4Nqx##+K)FykN&mdS0>T zH3M%r@RpHx9C^>g2Tpus<`ZW=v+#urUs?IamGAhbr|1XV;3hG*NVrYP9Ww5cbB}`i z1RhZGkcvlacudU`8lKYfj4jXUc)^aB^t@uvYX;tM;4LHXIP#u}51jbO%qPx#X5kAL zzOwR-E8mHJy7zN~o5b8A;WjCE$hb?+Jqqptg5_5}$+oaqf<1RV( zD7a7H0VNNqc*KUs)I6c#DJ{>~@|=zr?08AfEB3r*;0*`fGV+ci@0s|(iI2>D;>>3j zzHs3yE8n>Co#*z=l!Hyn7&$UBa_XW|1VJ~H!(GoM-b!iBG_eB;V@qTAlj4Q>*1i-g;x+#%yG zIrk{IPv8M1{|`fd{NjLp$8r5ZN(tpCM+qgASYtiwee2opSz|qGtY?k&tg+_XZ@x9w zSYwSf)>vbWHP+Z~tg*%#OBi8<5k?rt2qTOz!U!dlFhU6CmOeJ$(iY88K$Ulo@jtELpK; z!vkA(>^bnrkrRP`Qa?e?I44Ay2vIJ$9DN&|El^S&# zG-=VMLzf=+^cgT@#FzqTXyU@ z@W_!9fqzOrLC!cQM3@LsF1X~1Yhv6G=avL_BuSAbLzWzQ3KS_(rb3k(bs98j(WXO} z9{2PaFl5A-2~%dwS+Hcqnhg(Z*|F!qBS%hn@Emx8AZMHtB20uR7hH11H8F08b4!9d zlB7tJAxn-t1&Wj?Q=v+YIt`k%Xw#ufk9+zI7&2nagef!TELgH)&4vfI?AUYQks~Jp z|FnLBoN-QwFcG3$aLEX?+pqBq`El$dV&Zfg&Z! zRH#y;PJ<>b+H~mBZ)+OM*L+q)3w?OO8ARij*i*p-PQ94VtuQ)1ga`d-@C*GGfexDKq9QSh8Zx zh6lFn*mK~KBPRkc>LLCE#WgW* zh;vJVJCdYGlOaovJOzrBC{v+IjXDjQv}n_zOOJc{3>Y$E%!Da3<}6sUV$FsJw(Qt* z;E^LI96Jd-L69@f2@xhjlnXAo;+hyY#JMHG9Z6E8$&e*So&rTml&MgqMx6#tTD0lV zrN=#e1`HW7X2O&ia~3RFv1Y>qTXyU@@W_!9f&Z6&f}C+qh%gbNTyV)1*TlFX&MgV< zNRlE=hAcVq6ev=nOob{n>NIH5qD_Y`J?`lZ)+OM*L+q)3w?OO8ARij*i*p-PQ94VtuQ)1ga`d-@C* zGGfexDKq9QSh8Zxh6lFn*mK~KBPRm?ynce5aZZRZ5u#jh$rabcxFOCh3GPUeB29)Y zIr0=JQld9DN&|El^S&# zG-=VMLzf=+^cgT@#FzX?< zZisVBf;*C=NRuH;jywg5lqgf7N{u=VnzU%sp-Yc@`V1H{V$6goGv+KsZpmvlNN0{bm?(Vp8-QgjF~WH#+(I9R;=0Zz?L0*4m@(?MBrc6PmnXt z2@xhjlnXAo;+hyY#JMHG9Z6E8$&e*So&rTml&MgqMx6#tTD0lVrN=#e1`HW7X2O&i za~3RFv1Y>qTXyU@@W_!9fmihtw< zDpaXar$LhzZ8~)6aZjHCLq?35FlEM^1xr?}+3>)Y9eWNua^ytdU(rvHGtLPSCPI`8 zF1g~G7&pYZCBYp@Ql!a{B}bkDMM{*ZP^Ctl22EPD>CmOeJ$(iY88K$Ulo@jtELpK; z!vkA(>^bnrkrR%b2A&|u8Rvuu6Cuh4mt1j8j2q(IlHiUcDbi%fk|R%nA|=XHs8XX& zgC;H7bm-FKo<0MHj2JUv%8WS+maJH_;ejnX_8fTR$cezes-GZdoD(8UgeVtWa>X?< zZisVBf;*C=NRuH;jywg5lqgf7N{u=VnzU%sp-Yc@`V1H{V$6goGv+KaYLM265Np_MVbs*a^xvcq(qqtRch2}(4sZpmvlNN0{bm?(Vp8-QgjF~WH#+(I9R;=0Zz?L0*4m@(?MBsJ(1UciJ z5Md%jx!{s3u8DC&oLdsykt9W$3|VsIDNv+DnF>{E)M?P9MVk&?dfd}zz>pDRCQO+z zXTg#cYc@QvWyhWaj~qD>_&4+uw< zDpaXar$LhzZ8~)6aZjHCLq?35FlEM^1xr?}+3>)Y9eWNua^!>q=Yb~(a>h9!!bFI2 z!6jE*6XS+BwFqrbCw=_w*SsWW<;WQ)bLruw=!W4G(PDvFE@eM@|I( zZT$o}aYLM265Np_MVbs*a^xvcq(qqtRch2}(4{E)M?P9MVk&?dfd}zz>pDRCQO+z zXTg#cYc@QvWyhWaj~qGSbCbXm1UciJ5Md%jx!{s3u8DC&oLdsykt9W$3|VsIDNv+D znF>{E)M?P9MVk&?dfd}zz>pDRCQO+zXTg#cYc@QvWyhWaj~qD>_;>Xaw)Y9eWNua^ytdE&T*JaYLM265Np_MVbs*a^xvcq(qqtRch2} z(4sZpmvlNN0{bm?(Vp8-QgjF~WH#+(I9R;=0Zz?L0* z4m@(?L?Ef3AZMHtB20uR7hH11H8F08b4!9dlB7tJAxn-t1&Wj?Q=v+YIt`k%Xw#uf zk9+zI7&2nagef!TELgH)&4vfI?AUYQks~Jp|Gs{LoN-QwFcG3$aLEX?gHa6Xc9@LWGGB<$_DDxF*I8ac)U)N0Jn2GGxh-r$CVs zWhzvuQKvzZ7Hv9o>2Xh=0YgTNnJ{I>oCQl(tl99umK}QzJaXhj;2r%0IpdrVVIoAi z;F2q@iE%@mTN2!nBt@DGS#snlP^3he3RP;oN-QwFcG3$aLEP zBgRaaGGoqyB`el!cwozpJqI2+a>A#kfhP!Z#yKIvM2K?1C0AS%PBgRaaGGoqyB`el!cwozpJqI2+aw6~_=_kk; z=Y$9oA<6}pTyaf|8{*uO;Ep6I(qzbzBTs=MCCXH&Qln0TCN0`@=+fh!J_Ck~7&BqY zj5!OItXQ+*fh{}s9C+l&iNJgM33A3cA;Lt6a=|56TodDlIJYFYBT0%h8M5TaQ=mwR zG8L-SsMDZHi#8p)^th+bfFUEsOqeob&VnT?)@*oS%Z@z<9yxL%@E_|Z$QkE^2ooX7 z1(#fLO^h4j+>+pqBq`El$dV&Zfg&Z!RH#y;PJ<>b+H~mB9DN&|El^S&# zG-=VMLzf=+^cgT@#FzsBy;uw}=d z1CJaz5qMueLC!cQM3@LsF1X~1Yhv6G=avL_BuSAbLzWzQ3KS_(rb3k(bs98j(WXO} z9{2PaFl5A-2~%dwS+Hcqnhg(Z*|F!qBS%gI{!{$~IpdrVVIoAi;F2q@iE%@mTN2!n zBt@DGS#snlP^3he3RP;sBy;uw}=d1CJaz5%|yb6Xc9@LWGGB<$_DDxF*I8ac)U)N0Jn2GGxh-r$CVs zWhzvuQKvzZ7Hv9o>2Xh=0YgTNnJ{I>oCQl(tl99umK}QzJaXhjAg`YwXPgrvOoS*G zTyn)VF>Z)+OM*L+q)3w?OO8ARij*i*p-PQ94VtuQ)1ga`d-@C*GGfexDKq9QSh8Zx zh6lFn*mK~KBPRm?g?@sZaZZRZ5u#jh$rabcxFOCh3GPUeB29)YIr0=JQldLCE#WgW* zh;vJVJCdYGlOaovJOzrBC{v+IjXDjQv}n_zOOJc{3>Y$E%!Da3<}6sUV$FsJw(Qt* z;E^LI0{^9cf}C+qh%gbNTyV)1*TlFX&MgVNIH5qD_Y` zJ?`lKP6Xc9@LWGGB<$_DDxF*I8ac)U)N0Jn2GGxh- zr$CVsWhzvuQKvzZ7Hv9o>2Xh=0YgTNnJ{I>oCQl(tl99umK}QzJaXhj;J?;SkTcE+ z5hg;E3og0hnix04xh26JNm8WAkR?Z+0!2!csZgaxod!)>wCT{L$31-p3>h(I!ju_v z7A#q@X2SzpcI-Lu$dMC)qJDy$aZZRZ5u#jh$rabcxFOCh3GPUeB29)YIr0=JQldNIH5 zqD_Y`J?`lh9!!bFI2!6jE*6XS+B zwwCT{L$31-p3>h(I z!ju_v7A#q@X2SzpcI-Lu$dMC)|6V^q&NwGTmFqrbCw=_w*SsWW<;WQ)bLruw=!W4G(PDvFE@eM@|IF`U!HzIU&MC zh;qRtS6mb0hB&t*xFbo5G#Rqw$Wx$5i82+c)Tq;-NsBfey7aiG&wwE##!Q$pW6pvl zE7ojyV9Sm@2Oc?cBJe-xC&(G+ga{KM$_1BPaZQXH;@pzpjwC74WXO^uPk|yO%2cRQ zqfUb+E!uSG(&L^!1BQ$kGhxb%ISZDoShL}QEj#ucc;v{5z^D2Na>h9!!bFI2!6jE* z6XS+Bw{E)M?P9MVk&?dfd}z zz>pDRCQO+zXTg#cYc@QvWyhWaj~qD>_)I@R&NwGTmFqrbCw=_w*SsWW<;WQ)bLruw=!W4G(PDvFE@eM@|I(XZ-{@ zaYLM265Np_MVbs*a^xvcq(qqtRch2}(4c8Rvuu6Cuh4mt1j8j2q(IlHiUcDbi%fk|R%nA|=XHs8XX& zgC;H7bm-FKo<0MHj2JUv%8WS+maJH_;ejnX_8fTR$ce!JUq3<4I44Ay2vIJ$9DN&|El^S&#G-=VMLzf=+^cgT@#Fzx_=+Z9 z)8a?8`7s@SLYJS?<7eFSbNc*(0l#F(uNd)b#(cwsZ<+ENW_-t--?HF)mi)kq-?8TR zZ1@8Y{E;nxV#lA^^A`^Ml}G-@k-u}|9|Zna`||`(667h)c$#ybA;hzUd5#Fr6XgXi zc#%t9;)<8K<`rVR$_=j(=XGv*g9LAK$6F+Mn-uSm=3O$pN0#@=@d0^0q`*fM`Ir)) zQ07xAd`6YesqqDMzNEodH2InqKcdZ#>F^V}{FEL)=NAn4B}0D2h+i}28zy|q zl;1GpJLdeB1>dve2Uh%!HNR)WA9&!8Z21#A{>+}gaNw^z@;8qBofH2c@P++(f+q>` z6lXlmInNN{S;9O=gy)I!0vEi?Y9$DTe#|Py3kOCi36>JjC+1gpIXvgJ?g_%nO{!hyf?$lo~fcTW6+K;8a4!IK1eiZh<(oM#B}EMcA_!t+FV zfeT*bl9#yRWv+RJ7_V}}Ys7h-TizhSo80jhN!}*KJEVD+4DXTUeR6z2o)0PT5k)?x z#3z*blnS3w<#TF$L7gvY@D)wIrp1qF^J6;vgf2g&$IrOu=k)mn1AfVnUoqm>jQNHM z-!kPl%=nHuzh%MqEct;Izhlks+3*J*_#<2X#Ew6+=Pw-iE06q*BY)?_KM4Hq_U8$n zB*;^o@igZ=Lx^Vy^BfVLC&~+4@FJJI#1$`d%`3!sl^b3o&g3x_Q{xNjd`W|^X!12Jengue)8Qv{`6)eq z#yvl$&o3D8ONRW45x-{4H%$1JDZgRHcg*=M3%+N`53KkdYktp$Kk&dG+43iL{Fyy} z;lN*c z{EjuhXTu+O;E!zi6FdIQp1*M5uRQWMj{Kby{~+*x*qJKiG6+oX7hH1Cq(J+iz{jt|K5Aq767 z$j6lUgfgE};WMgyPK__9^CbOoyM)<)`%c8Tb60KEGhVFB$SHM*NyF z-!S1@ru>E(-!bR6Ecl)!Kd|C=toc0~{=fr&WXqq}@n`n@g#&-(k-u@|@0|DtfrkBg zf+q>`6lXlmInNN{S;9O=gy)I!0vEi?Y9$DTe#|Py3kOCi36>JjC+1gpIXvgJ?g_%nO{!hyf?$lo~fcTW6+z*qL?37#a#Q=IWM=R8A*X9@Eh z5uPW?3taFbm%PLkFLTW+#CVk(UL(%y-0}tq-sFzANb)u*-XYDqWO$D(?~~&L@_a~v zk0|mnB|f3dr&RciDxXv13+jAHgRf}vH7$Nbn;+BRCv^EKJ$}YLKc~+x81PGm{E893 zX3RHC_?9WZVa9jN`7H~+XUPw&_#JD0&xSwnz#rN2CwBaqJ%8cAUwPzj9QivZ{z2gX zvOiDoBtf3yjHfy08A3cunCFP_JW*cYf)}~uC9ZgxYhEG7tK9G!abD+^H%Ra%cf3WC zw@L91Y2GEndt`Z^93PP9LkfIEk&h|y31vQ|!e>{EjuhXTu+O;E!zi6FdIQp1*M5uRQWMj{Kby{~+*x+n*JKiG6+oX7hH1Cq(J+iz{jt|K5 zAq767$j6lUgfgE};WMgyPK__9^CbOoyM)<)`%c8Tb60KEGhVFB$SH zM*NyF-!S1@ru>E(-!bR6Ecl)!Kd|C=toc0~{=fr&WXqq}@n`n@g#&-(k-u@|@0|Dt zfv@e)6Ffo^nuFnsL1eo2xfNs=TNKK18%5=kPN z6jDheoeVO`BAXm?$s?Zv3MrzP5=tqfoC+$bqM90NsiU3-8fl`L7FubeoenzbqMIIi z>7$7FlAM6;@eeoeehGVw)Xy*<+sr4mskO6HYnf zoC_|w;+h+7x#OM(9(m%K7hZYeoew_w;+r3S`6J*T>rWs-1QS9iVT2PwBvC{YLo9K` zlRzR#B$GlaX{3`uCRt>YLoRvbQ$Qg_6jMSeWt3AvB~?^YLoIdG(?BClG}A&WZM4%t zCtY;YLoa>wGr%B23^T$gV~jJwBvVW?!z^>mv%n%tEVIHYYpk=uCR=Q?!!CR5bHE`- z9CN}cXPk4vC0AT?!!38*^S~odJoCaUZ@lxtCtrN?!!LgXjO$MzK?D;*C}D&XK_pQ` z6GJR<#FIcGNhFg(DruyXK_*#blS3|f!6Z{mGs7%%%(K8EODwa(Dr>B>!6sX5v%@ZX z>~p{&M;vp)DQBE>!6jE*bHgon-1ERAPdxL&D{s8>!6#pQ^TRKH1pE{I2_%SMLI@>{ za3Y8#ifCepC60I!NF<45Qb;9@bTY^!i)?bpC69axD5QvDN+_j_aw@2#ifU@8rH*b~@;!i*9=8rH}r9a+Lg!+Rg$7%|i?`!YE^mGr=TNOf$nQbIh~AB1Kq5&blR_$Kq?17=S!9z#E_virKp{mGQ$i_a zlv6<^Ra8?$Ep^n>KqE~w(?Tn4w9`Q+U3Ak!FMaegz#u~mGr}lij5EO`Q%p0%EOX4W zz#>a5v%)HCth2!;TWqt#E_>{Az#&H*bHXWSoO8h?S6p+$EqC1Wz#~sQ^TI1{yz{{) zUwre!FMkC5Q~e1fh+skpC5&()h$M<=Vu&Syl*dYDyXE2YHFyZj(Qqsq={x)Xr+yII_RW}ZhGjYkA4OiWQbu#7-fuc zCYWT3X=a#Zj(HYXWQk=~SY?fMHrQl~ZFbmYk9`g} zg0fiJ%ObMlwQBDPwR8dV0wbW5h1C2D%Obe~F(M|`QbkR)@z4Xz~0D}xM z%m|~5G0p^&Ofk(2v&=Ei0*frM%nGZlvCamYY_ZJ_yX>*g0f!uM%n7HQan1#oTyf0} zx7=~h1CKoM%nPr)@y-XIeDTc>zx)v}r9XiL5ljf7gb_{zkwg(q46(!!PXdV~kxUAy zq>)YrnPibo4!Pu!PXUD#QA`P?lu=Fvl~hqp4Ykx!PXmoK(M$`iw9!rnopjMn554r! z&j5oAG0X^~j4{pxlT0zq471EJ&jO1qvCImqtg+4pn{2Vo4!i8J&jE)Vam)#)oN>+t zmt1kp4Y%BJ&jXJ<@yrXayz$NlpM3Gn55N2o@Xz%pkRXByA(Sw}i6D|FqKP4vIO0hl zktC8yA(b@J$sm&~vdJNrJn|`^kRpmHp_DSpsi2Z7s;QxtI_hblktUjHp_Mk;>7bJ? zy6K^pKKdD8kRgT{VU#h(nP8GBrkP=uIp$elktLQ{VU;!3*h9qTyn)VH{5c^Jr6wc#4|6v^2R$KeDcLNKm77Xz>NL`5=1Z| zgc3$L5kwM2G%>^yM?486l0-5oq>@HD8Dx@0HaX;yM?M7y zM?DQR(nK>Yw9-a99dyz~H$C*yM?V7$GQ=<=j55YJ6HGG2G&9UH$2OwwoN~rF7hH11H8g0fiJ%ObMlwQBDPw zR8dV0wbW5h1C2D%Obe~F(M|`QbkR)@z4Xz~0D}xM%m|~5G0p^&Ofk(2v&=Ei0*frM z%nGZlvCamYY_ZJ_yX>*g0f!uM%n7HQan1#oTyf0}x7=~h1CKoM%nPr)@y-XIeDTc> zzx)v}t3QDR5ljf7gb_{zkwg(q46(!!PXdV~kxUAyq>)YrnPibo4!Pu!PXUD#QA`P? zlu=Fvl~hqp4Ykx!PXmoK(M$`iw9!rnopjMn554r!&j5oAG0X^~j4{pxlT0zq471EJ z&jO1qvCImqtg+4pn{2Vo4!i8J&jE)Vam)#)oN>+tmt1kp4Y%BJ&jXJ<@yrXayz$Nl zpM3Gn55N2o@PF!0AVCBZLMUN`6G0?VL=!_Sam15AB1t5ZLMmyblR+j~WRpWKdE`?- zAw?8ZLMdgGQ$ZzFR8vDOb=1>9BTY2ZLMv^w(?KU)bkjpGee^THAVUl@!YE^mGr=TN zOf$nQbIh~AB1Kq5&blR_$Kq?17=S!9z# zE_virKp{mGQ$i_alv6<^Ra8?$Ep^n>KqE~w(?Tn4w9`Q+U3Ak!FMaegz#u~mGr}li zj5EO`Q%p0%EOX4Wz#>a5v%)HCth2!;TWqt#E_>{Az#&H*bHXWSoO8h?S6p+$EqC1W zz#~sQ^TI1{yz{{)Uwre!FMkC5U-}bB5W$2HN*Lip5J?o##1Kmy@g$H)63L{HN*d{8 zkVzKVk0rUD3ND#q<5K0)~L=Z_7(Zmo-9PuQOND|4U zkV+cqWROV~+2oK*9{ChdND;-9P)Zr)R8UD3)znZ+9rZNONE6Mp&`KNabkIo`-Sp5) zAN>q4$PmMfFv=L?Ofbn5)66i-9P=!&$P&w}u*w?iY_Q1|+w8E*9{U_{$Pve!aLO6y zTyV)1*W7T+9rrx&$P>@J@X8zSeDKK^-~8~)9|8Zj{sa<4Fd>8zMmP~f5=AsI#1cn5 z2_%w4GAX2zMmiZ}l0`N-X?_+;Yb~4?Ob3GcUaI#ycN;^2Ikl{PIV@g8l>&L@*(Q5=J-?L=r_b zF~kx_JP9O{L^3I)l14fiWRgWTIpmT@J_Qs~L@_0lQbsuyR8mDXHPli^Jqh9qTyn)VH{5c^Jr6wc#4|6v^2R$KeDcLNKm77Xz`xX=K!OM+giyi= zCxS?#h$ewe^2n!vLW(G+gi^{Vr-DkVsHTQm>ZqrI zMw)1*g;v^Vr-M$q=%$BW`sinXL53J+gi*#AXM#zlm}Z7q=9p)JMV44*g;myAXM;_) z*k*@a_SoluLykD+gj3Eq=YmVFxaNji?zrcHN1k}*g;(Bq=Yvna_~wUS{s>sqpFn~L zCWKJJ2q%I_qKGDjSmKB$fkcu>CWTbeNGF3#vdAWfT=K}LfI^BWri4<;D5ru-s;H)h zTI#5$fkv8WriE78Xs3fty6C2dUi#=~fI)^BW`t437-xb>rkG}iS>~8$fkl>BW`$MO zSZ9Mxw%BHeUG~`LfJ2Tr=7dwuIOl>(uDIrgTkg2$fk&Qr=7m?@c;|ypzWC;cU;YUA zSNao35W$2HN*Lip5J?o##1Kmy@g$H)63L{HN*d{8kVzKVk0ZaN5ND#q<5K0)~L=Z_7(Zmo-9PuQOND|4UkV+cqWROV~+2oK*9{ChdND;-9 zP)Zr)R8UD3)znZ+9rZNONE6Mp&`KNabkIo`-Sp5)AN>q4$PmMfFv=L?Ofbn5)66i- z9P=!&$P&w}u*w?iY_Q1|+w8E*9{U_{$Pve!aLO6yTyV)1*W7T+9rrx&$P>@J@X8zS zeDKK^-~8~)9|8Ybe*y_2m=HnT31QJOinG{k*l~ z0tzXjm=a1UqnrvVsiK-1YN?~11{!IinHE}Uqn!>q>7tt+dg-H|0R|ajm=Q)9W1I;l znPQq5W|?E21r}LinH5%9W1S5)*m&DWjYUDygEH8fvMdo(39eqL~(2X``JEI_aXD9(w7cp8*CLVwe#| z8DpFYCYfTI8D^Pdo&^?JVwn|IS!10IHrZmE9d_Acp92m#;+PXoIpdrQF1g~G8*aJd zo(CRz;+Yp-dE=cAKKbICAAb2G;NR#^AVCBZLMUN`6G0?VL=!_Sam15AB1t5ZLMmyb zlR+j~WRpWKdE`?-Aw?8ZLMdgGQ$ZzFR8vDOb=1>9BTY2ZLMv^w(?KU)bkjpGee^TH zAVUl@!YE^mGr=TNOf$nQbIh~AB1Kq5&b zlR_$Kq?17=S!9z#E_virKp{mGQ$i_alv6<^Ra8?$Ep^n>KqE~w(?Tn4w9`Q+U3Ak! zFMaegz#u~mGr}lij5EO`Q%p0%EOX4Wz#>a5v%)HCth2!;TWqt#E_>{Az#&H*bHXWS zoO8h?S6p+$EqC1Wz#~sQ^TI1{yz{{)Uwre!FMkC5Tm1yl*dYDyXE2YHFyZj(Qqsq={x) zXr+yII_RW}ZhGjYkA4OiWQbu#7-fucCYWT3X=a#Zj(HYXWQk=~SY?fMHrQl~ZFbmY zk9`g}g0fiJ%ObMlwQBDPwR8dV0wbW5h z1C2D%Obe~F(M|`QbkR)@z4Xz~0D}xM%m|~5G0p^&Ofk(2v&=Ei0*frM%nGZlvCamY zY_ZJ_yX>*g0f!uM%n7HQan1#oTyf0}x7=~h1CKoM%nPr)@y-XIeDTc>zx)xfrayrM z5ljf7gb_{zkwg(q46(!!PXdV~kxUAyq>)YrnPibo4!Pu!PXUD#QA`P?lu=Fvl~hqp z4Ykx!PXmoK(M$`iw9!rnopjMn554r!&j5oAG0X^~j4{pxlT0zq471EJ&jO1qvCImq ztg+4pn{2Vo4!i8J&jE)Vam)#)oN>+tmt1kp4Y%BJ&jXJ<@yrXayz$NlpM3Gn55N2o z@bC2}kRXByA(Sw}i6D|FqKP4vIO0hlktC8yA(b@J$sm&~vdJNrJn|`^kRpmHp_DSp zsi2Z7s;QxtI_hblktUjHp_Mk;>7bJ?y6K^pKKdD8kRgT{VU#h(nP8GBrkP=uIp$el zktLQ{VU;!3*h9qTyn)VH{5c^Jr6wc z#4|6v^2R$KeDcLNKm77Xz=r+=5=1Z|gc3$L5kwM2G%>^yM?486l0-5oq>@HD8Dx@0 zHaX;yM?M7yM?DQR(nK>Yw9-a99dyz~H$C*yM?V7$GQ=<= zj55YJ6HGG2G&9UH$2OwwoN~rF7hH11H8CWTbe zNGF3#vdAWfT=K}LfI^BWri4<;D5ru-s;H)hTI#5$fkv8WriE78Xs3fty6C2dUi#=~ zfI)^BW`t437-xb>rkG}iS>~8$fkl>BW`$MOSZ9Mxw%BHeUG~`LfJ2Tr=7dwuIOl>( zuDIrgTkg2$fk&Qr=7m?@c;|ypzWC;cU;YT#)Sp0t2quJ3!U!jVNTP@)hFIc=CxJwg zNG63;(nu$ROtQ!(hg|Z=r+`9=D5iu`$|$FTN~)-)hFa>Vr-4SAXr_f$+GwYPPP*u( zhhF;VXMjP57-ob~#u#UUNv4=)hFRvAXMshQSZ0M))>vnQO}5x(hh6sA=YT_wIOc>? z&N%0SORl))hFk8q=YdC_c;q4$PmMfFv=L?Ofbn5)66i-9P=!&$P&w}u*w?iY_Q1|+w8E*9{U_{ z$Pve!aLO6yTyV)1*W7T+9rrx&$P>@J@X8zSeDKK^-~8~)9|2qX6G#xjgb+#?;Y1Kg z6w$;GOC0eekVq2Aq>xG)>12>e7TM&GOCI?YP)HHQlu$|;uj*e7TfHw z%O3k2aL5tIoN&q+=Ui~f71!Kw%N_ST@W>O-yzt5!?|ksd7vKEw%O3&%S$_fvBA5_D z2_u{cB8eiJ7-ESdo&*v}BAFCYNh6&MGRY#F9CFDcp8^UgqL>m&DWjYUDygEH8fvMd zo(39eqL~(2X``JEI_aXD9(w7cp8*CLVwe#|8DpFYCYfTI8D^Pdo&^?JVwn|IS!10I zHrZmE9d_Acp92m#;+PXoIpdrQF1g~G8*aJdo(CRz;+Yp-dE=cAKKbICAAb2GU|W9z z2_l#fLJ1?B2qK9hniyhKq5&blR_$Kq?17=S!9z#E_virKp{mGQ$i_a zlv6<^Ra8?$Ep^n>KqE~w(?Tn4w9`Q+U3Ak!FMaegz#u~mGr}lij5EO`Q%p0%EOX4W zz#>a5v%)HCth2!;TWqt#E_>{Az#&H*bHXWSoO8h?S6p+$EqC1Wz#~sQ^TI1{yz{{) zUwre!FMkB==uaR)1QS9iVT2PwBvC{YLo9K`lRzR#B$GlaX{3`uCRt>YLoRvbQ$Qg_ z6jMSeWt3AvB~?^YLoIdG(?BClG}A&WZM4%tCtY;YLoa>wGr%B23^T$gV~jJwBvVW? z!z^>mv%n%tEVIHYYpk=uCR=Q?!!CR5bHE`-9CN}cXPk4vC0AT?!!38*^S~odJoCaU zZ@lxtCtrN?!!LgX{8#-6B#2-_2qlbgB8Vi4Xkv&Zj(8GCB#C5FNF|MQGRP#0Y;wpY zk9-O!q=;flD5Z>YDyXE2YHFyZj(Qqsq={x)Xr+yII_RW}ZhGjYkA4OiWQbu#7-fuc zCYWT3X=a#Zj(HYXWQk=~SY?fMHrQl~ZFbmYk9`g} z{a3Y8#ifCepC60I!NF<45Qb;9@bTY^! zi)?bpC69axD5QvDN+_j_aw@2#ifU@8rH*b~@;!i*9=8rH_6F7-WcH zMi^y`aVD5#ifLw;WsZ3kSY(N1R#;_?bvD>!i*0t;WsiLhIOK?9PB`U^b1t~#ifeAT z<&JwEc;ty^UU=n=cRu*!i*J7T<&S{>rayrM5ljf7gb_{zkwg(q46(!!PXdV~kxUAy zq>)YrnPibo4!Pu!PXUD#QA`P?lu=Fvl~hqp4Ykx!PXmoK(M$`iw9!rnopjMn554r! z&j5oAG0X^~j4{pxlT0zq471EJ&jO1qvCImqtg+4pn{2Vo4!i8J&jE)Vam)#)oN>+t zmt1kp4Y%BJ&jXJ<@yrXayz$NlpM3Gn55N2ou%|zP1QARKp@b1m1d&7$O$@Qb5l;e% zB#}%Csicuk2AO1$O%A!_kxv1I6j4kGrIb-l1(j4$O%1iwQBMPnG|@~8t+dfj2c2}$ zO%J{F(a!*b3^B|Iql_`m1d~iL%?z{5G0y^vEV0ZAtE{ok2AgcL%?`WlvCjdA9C6GE zr<`%l1(#fL%?-EQanA#fJn_s6ue|Zj2cLZL%@4o)5%AyjCy*e52_cj)!igY~D58lW zmN?=`Adw`JNg%G{)heq5=1Z| zgc3$L5kwM2G%>^yM?486l0-5oq>@HD8Dx@0HaX;yM?M7y zM?DQR(nK>Yw9-a99dyz~H$C*yM?V7$GQ=<=j55YJ6HGG2G&9UH$2OwwoN~rF7hH11H8qRnJo3adFTC=`J0E=V#Wz3v z@<+h`)Sp0t2quJ3!U!jVNTP@)hFIc=CxJwgNG63;(nu$ROtQ!(hg|Z=r+`9=D5iu` z$|$FTN~)-)hFa>Vr-4SAXr_f$+GwYPPP*u(hhF;VXMjP57-ob~#u#UUNv4=)hFRvA zXMshQSZ0M))>vnQO}5x(hh6sA=YT_wIOc>?&N%0SORl))hFk8q=YdC_c;CWctzh$n$Wl1L_nRMJQ%gG{o>CWl<|$ftlp ziYTUpQpzZ&f=a5WriNPTsHcHOnrNnlR@!K%gHF2WriWho=x2aIh8SjqQN|c&f=Q;B zW`4(66G#xjgb+#?;Y1Kg6w$;GOC0eekVq2Aq>xG)>12>e7TM&G zOCI?YP)HHQlu$|;uj*e7TfHw%O3k2aL5tIoN&q+=Ui~f71!Kw%N_ST z@W>O-yzt5!?|ksd7vKEw%O3$p`V&YH!GsV>7~w<^Nfgn<5KA2KB#=lF$)u1<8tG(^ zNfz1UkV_u<6i`SJ#gtG=8Rb+^Nfp)9P)i;4G|)&B&9u-;8|`$^Nf+Jp&`Tfv3^2$L z!;CP>7~@PZ$rRJfFv}eCEU?HD%dD`<8tZJZ$rjt}u*)9%9B{}H$DDA=8RuMZ$rab! zaLXO{Jn+a9&%E%;8}EGZ$rs=J@XH?o|BwCz5=1Z|gc3$L5kwM2G%>^yM?486l0-5o zq>@HD8Dx@0HaX;yM?M7yM?DQR(nK>Yw9-a99dyz~H$C*y zM?V7$GQ=<=j55YJ6HGG2G&9UH$2OwwoN~rF z7hH11H8T3 z1QJOinG{k*l~0tzXjm=a1UqnrvVsiK-1YN?~11{!IinHE}Uqn!>q z>7tt+dg-H|0R|ajm=Q)9W1I;lnPQq5W|?E21r}LinH5%9W1S5)* zCWctzh$n$Wl1L_nRMJQ%gG{o>CWl<|$ftlpiYTUpQpzZ&f=a5WriNPTsHcHOnrNnl zR@!K%gHF2WriWho=x2aIh8SjqQN|c&f=Q;BW`CWTbeNGF3#vdAWfT=K}LfI^BWri4<;D5ru-s;H)hTI#5$ zfkv8WriE78Xs3fty6C2dUi#=~fI)^BW`t437-xb>rkG}iS>~8$fkl>BW`$MOSZ9Mx zw%BHeUG~`LfJ2Tr=7dwuIOl>(uDIrgTkg2$fk&Qr=7m?@c;|ypzWC;cU;YUA|MVx2 zAc6@YlrX}HAd)Dei6NFa;z=NpB$7!Xl{C`HAd@Vz$sw0K@+qK@B8n-YlrqYxppq)8 zsiBrS>S>^nCYouXl{VVxpp!1T>7kcC`Wax5A%+=YlrhGcV3H}OnPHYW=2>8oC6-xX zl{MDcV3RGj*CWTbeNGF3#vdAWfT=K}LfI^BWri4<; zD5ru-s;H)hTI#5$fkv8WriE78Xs3fty6C2dUi#=~fI)^BW`t437-xb>rkG}iS>~8$ zfkl>BW`$MOSZ9Mxw%BHeUG~`LfJ2Tr=7dwuIOl>(uDIrgTkg2$fk&Qr=7m?@c;|yp zzWC;cU;YUA|Me%3Ac6@YlrX}HAd)Dei6NFa;z=NpB$7!Xl{C`HAd@Vz$sw0K@+qK@ zB8n-YlrqYxppq)8siBrS>S>^nCYouXl{VVxpp!1T>7kcC`Wax5A%+=YlrhGcV3H}O znPHYW=2>8oC6-xXl{MDcV3RGj*yl*d9BTY2ZLMv^w(?KU){Ggj2dg-H| z0R|ajm=Q)9W1I;lnPQq5W|?E21r}LinH5%9W1S5)*ZqrIMw)1*g;v^Vr-M$q z_(3;4^wLK^0}L|6Fe8jI#yAs9GQ~7A%reJ33oNq4GApdI#yT5pvc)z#?6Su`2ORQ~ zBaS)Ylrzq`;F2q@x#5;O?s?#mC!TrXm0$ek558cE_=`xQc*9%X@tzNSb~@;!iyw5;Loa>wGr%B23^T$gV~jJwBvVW?!z^>mv%n%tEVIHYYpk=uCR=Q? z!!CR5bHE`#IpUZTPC4V83og0hnj3DpvnQO}5x(hh6sA=YT_ga>OwwoN~rF7hH11H8qRnJo3adFTC=L-~1uszcGIziQ)}!dB=M`@R3h^<_ll>Ml>2Z$uMA zEOEq>Kq5&blR_$Kq?17=S!9z#E_virKp{mGQ$i_al=GbmDygEH8fvMdo(39eqL~(2 zX``JEI_csE-Sp5)AN>q4$PmMfFv=L?Ofbn5)66i-9P=!&$P&w}u*w?iY_Q1|+w8E* z9{U_{$WM+q=7dwuIOl>(uDIrgTkg2$fk&Qr=7m>&@tZ$H{I}*$BvHKKE$?{G2R`zN z&wSx4--srLSmKB$fkcu>CWTbeNGF3#vdAWfT=K}LfI^BWri4<;DCavBR8mDXHPli^ zJqh$W7A5=bP8WKu{ajdU`|B#Ufv$R&?_3MizAVoE5bjB>tH zK_yjGQ$sCv)YCvCO*GR&D{ZvXK_^}Opqn0g>7$ z7FlAM6;@eeoeehGVw)Xy*<+sr4*AIu$DDA=8RuMZ$rab!aLXO{Jn+a9&%E%;FMjif zi2u&~i6n|QyyYG5`M^g$@tH4t)YrnPibo z4!Pu!PXUD#QA`P?lu^!iDyXE2YHFyZj(Qqsq={x)Xr+yII_RW}A9T}0FMaegz#u~m zGr}lij5EO`Q%p0%EOX4Wz#>a5v%)HCth2!;TWqt#E_>{Az#%_5;+PXoIpdrQF1g~G z8*aJdo(CRz;+Yp-`NePk5b@ueKaoW7hPS-qJsm&DWjb4R8UD3)znZ+9rZNONE6Mp&`KNabkIo`Kj@~1 zUi#=~fI)^BW`t437-xb>rkG}iS>~8$fkl>BW`$MOSZ9Mxw%BHeUG~`LfJ1(A#4#tF za>h9qTyn)VH{5c^Jr6wc#4|6v@{8a6!HJ|1e-TL(Z+Oc)-t&QveBv`-_{uk;i6NFa z;z=NpB$7!Xl{C`HAd@Vz$sw0K@+qK@B8n-YlrqZsP6d@zQB4iC)KO0ZjWp3r3$3)# zP6wTI@q=!9=%tT-1{h?BVMZ8bjBzHIWQu8Km}QQ67FcA7WmZ^ajdeEIWQ%Qf*kzA> z4mji|M;vp)DQBE>!6jE*bHgon-1ERAPdxL&E5G>7A0qw-^Cyxh-td-pyypWS`NU_w z@Re^w6GJR<#FIcGNhFg(DruyXK_*#blS3|f7~@PZ$rRJfFv}eCEU?HD%dD`<8tZJZ z$rjt}u*)9%9B{}_jyUFoQ_eW&f=jNr=7wADxaWaKo_OYkSAOxEKRAgz;x8hJ;tg+k z$9q2TkxzW)3t#z0G%>^yM?486l0-5oq>@HD8Dx@0HaX;yM?M7IOI zDypfWmOAQbpphn;X`z)i+UcN^E`HEW554r!&j5oAG0X^~j4{pxlT0zq471EJ&jO1q zvCImqtg+4pn{2Vo4!i8J&jE-0YzEeRZRa8?$Ep^n>KqE~w(?Tn4w9`Q+UHqV%9(w7cp8*CLVwe#|8DpFYCYfTI z8D^Pdo&^?JVwn|IS!10IHrZmE9d_Acp92p0$q~n#aLO6yTyV)1*W7T+9rrx&$P>@J z@X9ZK^9L7@MEpf0QM}we z^2n!vLW(G+gi^{V=Q|ZtQbjd2)KW)14K&h3GcB~zMmrsJ(!~$D>7kcC`Wax5A%+=Y zlrhGcV3H}OnPHYW=2>8oC6-xXl{MDcV3RGj*5^4sHKj28fc`6W?E>ajdnWdq>CSP(?c(P z^fSO9Lku&*C}WH>!6Z{mGs7%%%(K8EODwa(Dr>B>!6sX5v%@ZX>~p{&KRM!<6HYnf zoC_|w;+h+7x#OM(9(m%K7hd_rZ~oxov53EjB#Jk@xG)>12>e7TM&GOCI?YP)HHQlu$|;<$R}tN~)-)hFa>Vr-4SAXr_f$+GwYP zPP+I(H$C*yM?V7$GQ=<=j55YJ6HGG2G&9UH$2ClmN?=`Adw`JNgT31QJOinG{k*l~0tzXjm=a1Uqnz(lP)QZl z)KE(u^)%2(6V0^HN*nET&`B3R=%$BW`sinXL53J+gi*#AXM#zlm}Z7q=9p)JMV44* zg;myAXM;_)*k*@a_SoluLw<6^F(;gI#yJ;Ua>X?_+;Yb~4?Ob3GcUaIi{Jbq;(sxJ zB8lP+Z+XXiKJbxGeC7*Z`9?G`#1cn52_%w4GAX2zMmiZ}l0`N-S>^nCYouXl{VVxpp!0s&`l4$^wG}%gA6gu2&0TK&IFT8G0hCK z%rVabi!8Cs3ahNK&IX%ovCR&YLoRvb zQ$Qg_6jMSeWt8)s3M#3hni^`Uqn-vDX`-1HT4|%54m#=L2i^40OCS9VFvt+Yj4;X= z<4iEg6w}Nw%N+A8u*ee2tgy-&>uj*e7TfHw%O3k2aL7-NIOc>?&N%0SORl))hFk8q z=YdC_c;MEooBCz2@M@RoPH=K~-4#Am+nm2X57Lo9K`lRzR#B$GlaX{3`u zCRt>YLoRvbQ$Qg_6jMSeWt9J`-GqN=I*h3BxRNTWsiBrS>S>^nCYouXl{VVxpp!0s z&`l4$^wG}%gA6gu2&0TK&IFT8G0hCK%rVabi!8Cs3ahNK&IX%ovCR&YLoRvbQ$Qg_6jMSeWt8)s3M#3hni^`Uqn-vDX`-1H zT4|%54m#=L2i^40OCS9VFvt+Yj4;X=<4iEg6w}Nw%N+A8u*ee2tgy-&>uj*e7TfHw z%O3k2aL7-NIOc>?&N%0SORl))hFk8q=YdC_c;MEtMjPb5*i;VtiY&j&v8 ziO+oDE8mDFhFIc=CxJwgNG63;(nu$ROtQ!(hg|Z=r+`9=D5iu`$|&bM6;x71H8s>y zM?DQR(nK>Yw9-a99dy#g54!20mp=L#V2~k(8DW$$#+hJ}DW;iWmO18GV38%3Sz(nm z*4bc_Ew+tmt1kp4Y%BJ&jXJ<@yrXa{Ngu%uvtvRUqlkc z8{YDc_k7?ZpZLrdzVeM|Vu&Syl*d9BTY2ZLMv^w(?KU){Ggj2dg-H|0R|ajm=Q)9W1I;lnPQq5W|?E2 z1r}LinH5%9W1S5)*ZqrIMw)1*g;v^Vr-M$q_(3;4^wLK^0}L|6Fe8jI#yAs9 zGQ~7A%reJ33oNq4GApdI#yT5pvc)z#?6Su`2ORQ~BaS)Ylrzq`;F2q@x#5;O?s?#m zC!TrXm0$ek4>pR5_=`xQc*9%X@tzNSb~@;!iyw5;Loa>wGr%B2 z3^T$gV~jJwBvVW?!z^>mv%n%tEVIHYYpk=uCR=Q?!!CR5bHE`#IpUZTPC4V83og0h znj3DpvnQO}5x(hh6sA=YT_ga>Oww zoN~rF7hH11H8q>EZ|7^w3Km{R}Y35W|cx${6EJFv%3t%rMIw^DMB)63eWx${Ooz zu*nwN?6Auo`y6n{PmVa|gj3Eq=YmVFxaNji?zrcHN1k}*g;##@n?KkfGU6{HiQ)}! zdB=M`@R3h^<_ll>Ml>;3J>-%oo1$jc8(sC60I!NF<45Qb;9@bTY^!i)?bpC69axD5QvD zN+_j_a=ueRB~?^YLoIdG(?BClG}A&WZM4%tCtduYn;v@Uqn`l=8Df|bMj2zA2_~6h zni*!9W1a;TSz?(LR#{`64K~?gn;mx9W1j;K`NO- zyzt5|e)9*L!$$l?BvHKKE$?{G2R`zN&wSx4--srLSmKB$fkcu>CWTbeNGF3#vdAWf zT=K}LfI^BWri4<;DCavBR8mDXHPli^Jqg0fiJ%ObMlwQO! zi*0t;WsiLhIOHcs9CN}cXPk4vC0AT?!!38*^S~odJoCaUzxd4`YziOo7m-BqhPS-q zJsm&DWjb4R8UD3 z)znZ+9rZNONE6Mp&`KNabkIo`Kj@~1Ui#=~fI)^BW`t437-xb>rkG}iS>~8$fkl>B zW`$MOSZ9Mxw%BHeUG~`LfJ1(A#4#tFa>h9qTyn)VH{5c^Jr6wc#4|6v@{8a6A>v<~ zKaoW7hPS-qJsm& zDWjb4R8UD3)znZ+9rZNONE6Mp&`KNabkIo`Kj@~1Ui#=~fI)^BW`t437-xb>rkG}i zS>~8$fkl>BW`$MOSZ9Mxw%BHeUG~`LfJ1(A#4#tFa>h9qTyn)VH{5c^Jr6wc#4|6v z{_EZUDB_=Qt^fEBf7|r;9e>-B{C9u%FB+5o&C|bW{V$sT_CNIZ|KOk2{Fnc{@Gp1& z{{Jh=eGoo_(7^vc8~FG9{|DhS2n~b=LIa_J&_HM)G!PmH4TJ_l1EGP?KxiN|5E=*# zga$$bp@GmqXdpBY8VC)920{a&fzUu`AT$sf2n~b=LIa_J&_HM)G!PmH4TJ_l1EGP? zKxiN|5E=*#ga$$bp@GmqXdpBY8VC)920{a&fzUu`AT$sf2n~b=LIa_J&_HM)G!PmH z4TJ_l1EGP?KxiN|5E=*#ga$$bp@GmqXdpBY8VC)920{a&fzUu`AT$sf2n~b=LIa_J z&_HM)G!PmH4TJ_l1EGP?KxiN|5E=*#ga$$bp@GmqXdpBY8VC)920{a&fzUu`AT$sf z2n~b=LIa_J&_HM)G!PmH4TJ_l1EGP?KxiN|5E=*#ga$$bp@GmqXdpBY8VC)920{a& zfzUu`AT$sf2n~b=LIa_J&_HM)G!PmH4TJ_l1EGP?KxiN|5E=*#ga$$bp@GmqXdpBY z8VC)920{a&fzUu`AT$sf2n~b=LIa_J&_HM)G!PmH4TJ_l1EGP?KxiN|5E=*#ga$$b zp@GmqXdpBY8VC)920{a&fzUu`AT$sf2n~b=LIa_J&_HM)G!PmH4TJ_l1EGP?KxiN| z5E=*#ga$$bp@GmqXdpBY8VC)920{a&fzUu`AT$sf2n~b=LIa_J&_HM)G!PmH4TJ_l z1EGP?KxiN|5E=*#ga$$bp@GmqXdpBY8VC)920{a&fzUu`AT$sf2n~b=LIa_J&_HM) zG!PmH4TJ_l1EGP?KxiN|5E=*#ga$$bp@GmqXdpBY8VC)920{a&fzUu`AT$sf2n~b= zLIa_J&_HM)G!PmH4TJ_l1EGP?KxiN|5E=*#ga$$bp@GmqXdpBY8VC)920{a&fzUu` zAT$sf2n~b=LIa_J&_HM)G!PmH4TJ_l1EGP?KxiN|5E=*#ga$$bp@GmqXdpBY8VC)9 z20{a&fzUu`AT$sf2n~b=LIa_J&_HM)G!PmH4TJ_l1EGP?KxiN|5E=*#ga$$bp@Gmq zXdpBY8VC)920{a&fzUu`AT$sf2n~b=LIa_J&_HM)G!PmH4TJ_l1EGP?KxiN|5E=*# zga$$bp@GmqXdpBY8VC)920{a&fzUu`AT$sf2n~b=LIa_J&_HM)G!PmH4TJ_l1EGP? zKxiN|5E=*#ga$$bp@GmqXdpBY8VC)920{a&fzUu`AT$sf2n~b=LIa_J&_HM)G!PmH z4TJ{%T@C!l5&wK^{l|a!+or$o_}iA`zx%s?(U@HGkN@f4{F|5l=I?7Gt@x+6|2vlc j?Z373FH8TEe?Dl$zjf6Atmz+`{_#I=`u`mHpH}`q8xD`9 literal 595423 zcma&P`Ez8~btb6US4~fMOi#=bGtn+pn^hE9H1|a$?Zi@5pnxiHbm=Euappuc3k?>pbQ3AH^P z?U0GA%=hj+_uO;Oe$Kse<>ux8S(|S0?1DE#es<(LRpWvzg1OFfE`TL%F<=`+O`^CTK!G3jIk6#(KCG->+n@V~^9A3XWh$BC+Gdr%LXyY=|e-rk^pY0&oS{?Fm_AOGEl^v>kJ!4G`$ zWA_64&(4Mi&;Mum_p_7d3DdI=O`hjW&;HV#U;3r;>EZI}k@D%$^69bi>GAUEiSp^m z^69Da>FM&RQ$8(~Ps`=gO8K-}K3&dF4;T0zF7Q2E;Cr~h_i%yl;R4^o1-^$1d=D4+ z9xm`bT;O}S!1r*0@8JU9!v(&F3w)0h_#P?nJyPI%q`>z`f$xz5-y;RSM+$t86!;z~ z@I6xCd!)ekNP+K>0^cJAzDEjtj~4hIE$}^B;Cr;d_h^Cd(E{J21-?fMe2*6R9xd=a zTHt%M!1ri@@6iI^qXoW43w)0i_#P|pJyzg*tibnJf$y;b-(v;7#|nIp75E-2@I6-G zd#u3sSb^`c0^ef=zQ+oDj~DnJFYrBH;CsBl_jrNt@dDrD1-{1%e2*9S9xw1cUf_GY z!1s88@9_fP;|0FQ3w%!$_?{^6JyGC$qQLh=f$xa|-xCGCCklK|6!@Mf@I6uBd!oSi zM1k*#0^bt_z9$NNPZs!|Ebu*9;Cr&b_hfT>d`}kmo-FV^S>SuJ!1rW< z@5ut+lLfvf3w%!%_?{~8Jyqa)s=)VDf$ymT-%|y?rwV*e75JVi@I6)Fd#b?qRDtiQ z0^d^wzNZR&PZ#)}F7Q2F;Cs5j_jG~p=>p%=1-_>Xd`}nno-Xh`UEq7V!1r{4@96^H z(*?e#3w)gdU#Gy=De!d)e4PScr@+@K@O27&odRE{z}G48bqai)0$-=V*D3II3Vcfi zzNG@+Qh{%&z_(Q3TPpA^75J74d`ktsr2^klfp4k6w^ZO;D)228_?8NM%LTsW0^f3h zZ@Iv?T;N+S@GTekmJ58#1-|71-*SO(xxlwv;9D;6Ef@Hf3w$dDzLf&sN`Y^sz_(K1 zTPg6Z6!=yOd@BXMl>*;Nfp4Y2w^HC+De$cn_*M#hs|CK*0^e$ZZ?(X;THsqP@U0g3 zRttQq1-{h+-)ezxwZOMp;9D*5trqxJ3w)Oge3uJ+mkWHC3w)Oge3uJ+mkWHC3w)Og ze3uJ+mkWHC3w)Oge3uJ+mkWHC1HPYq^NaW&HA4KE{-^j`kNMwb=RcA(vkm;_-s4l zf2+Q~+x6c8zpZ=zSL?q!dSgJ%7aF|&4EOk5BN!^dQ1|%*lzgwREMDszY8|zV3_zIk z3MG?#W!iGkt_&GGw8npp+y4ds#lQaE{|Sh`XKZ)*&z{!a&6zInyK3N-Ec?-3B4x`z z%>1|Gzc2Z3ZIo8SM9MY?Y;ysJf%WcuJ4-6r^g@>SbfxKL3H)>XZoiIw>+uh4BmSAz zW4;EPE(kA)aYZoi63B|ioRI?)(X-^=b^mhQ&b*=>fz?0e@P~0cD$&)ntU1Y`6;+HI zPBzP_&v~4qeGd)W*OOlLPZI&#Bn+$m`7+1p9{8b3gZuCa9hG2e!hQyo-5`j~WXeC- z_Wufh@a=TJO8CmDIqw-h!5P}v_@uO3{BB=+)dg1lO$K~ysm8ZLf{5A@rAfdBq`N2efAC%PKsAYbqg%dP0OsR9GJ z?0)4%yk@{^v9c}+A-}n!Ndpr*e*Ct+6APT>R4;Jorn~R=ddX?7`-`Cb=8%uITyLoF9(ssN zX-8aGz1Ix?Tl@uXfiIqvUy6VId&vF||C7tz;ZPj%^oyro;-u9?``Fy&Y!@$S&OIY$ z{x|;6j=ft)RT4@iJhiht09rv+`0%GW$NYo&Ti%x!Iq+U$Y!U)tBmnlbPwlL)os7j- zVs&?7jO?JyrbM7v6OZke};R4 zX(*00|MC1oJ=)L1)T>3Yt>4j^IWm(!ARc3iYLe7e_w}uqSk*O_X2YHVuEze*NTkn1 zhNB55g-4WsuoK>HfwCKujiiG#K@n8|4ZkFx1pR&JK_^l(CasnfLgT_kJ~J`QjV46fqbrhenak8ABs9Y_5(&Vzrbia6Vfk-7(JVOQu;z%f;aRcR3r08=VKm%8junWX zzdQec@9)(qkNVnZtnjcX8lT}4<0?7C{QCs49(FYs_ZjEZ3n7Eo8YQK0T7>80qAC2M zm%iUj{Ao53ZGeF3@5X4IArFGbJAz=;V(;p}oqj3s(r=H9=;c(%umU|8Dr7s0<2sb$ zHR7U$*Pn#^K~O_fJPSjj7Dt-x@eyc~g4sm1KMlmjkMp0Le%%uf!&*qOJx;W>A+O@K zZm5yHR6}P%3sxcWzK|@il`47^hh%(g%;2zbMfEr|#ISHCQ|a;Tjvsn;rnj0Zwc&P_ zJ2~q6nPDv*9*k4O1ss2pdlcigacZXfb&B-EYMcj{GT4Kd6rr07lMMX$DZUy&Omy1E zp^syg@0NE{uKr5W$=?)yI2C)U?|A#ib#6}8*P&sd}Rjx*8GE{r2SfS-{cY9lW?s-#)fjgO3w0`nO}n3){fx-zUzIT|BzHNsHRDH z`$YqqzwePoV^J$3X&wj+FE+Ek>t}jR9-n@5AoPu^V-DSnC&|Bh<&+F6(d)QL#REtm z8vkALUalP)x`7I`9cgm7{LXJ`ZOw69Ub#n0YvOnkVp1w|i`e3LYx00dS zOuD)FZkv1bv)BJ6=b1@`YlnHft6s3ue*6hQ%~}}tBi#KEb%)Sm@54VqpZ_ucBWQT; z*EnnFQ{z^Qv93hPG7+U1R3b~96s}^n16_@?R>2!E#3YTLThfXi#Q$;7iFAixw$)yU z@h{Eai+A(5{p|HW;1{*6gsyfH_%XJJh+!IcCx6Zdn_9=Go$YBZ@XI_Ty*dQ#?HDh2 z=V)R1b#(NP#CQghSI<7=!EdBst0$hh8HR4LqsObeCc963;uC&l{=JweRl<%KZ9m*E z^X~_vMc9;2O+udG7<^dBXdp8!6v_G9MX!Iz5A>>O`W3=(dHI4t01M-oZBfT=Y7T5a zu^y88(xpoY<{NzYxahop`+xJ_O;?-K+~GeK+QjLXJ^N7=zJpN0+G6GJrJB@Bm&+xN z0Hwk&w!?n%nu%24y023Sr$Lgm-4Yis65I=^WFxT4k*Y(zHEoCHXOn$0Q7)#YndWII zW&E+~Px9DKq~hSj=2XAhN*6qNCuQ$}P<7$rr%e67p6j*!KAD(5er9$?SDPfO0oTaY zt%y?FVd3xPB=5Fqj||$P41&jd%2&KGCvWnLY8no+T?hv6Z#TFfL;HMmI>v$w<~HO$ zv36IJS3N1gvk>unp+xT}+W@B$f~m02ZJa-jH7n|8(cJcngX)d}+e(Js-Pfsr5FRDo zZ^Lvmn4?xl8hW~zb+a36VO&cd-MeRyr^qeP#*RV~d~o_T|FYYJJywkmh>50KDXxLRWro#n2{(rejD`F7nSgfan<$nRidxghNW{X9x_HM10S4Ksf4U+ z;NxWD^}tIV&ZN>D)^fe!Y)cPlLDPPq*T|~|4Z2NswU&ZvGKx`?Ua>9t5o>GzPe6br z{|El(Il$%pshNhpJ_*Fv{hrUtNX$9W^=AoOczVDB!jswVdIPusQqzMiD;@0Jm`n+`aalOlJY--t=t2ey5$uvAFyNv|+AgTJWO~DWJ z$Eh5*jr_VWTvBzzE;YHkJ9W~0-H6BDlfrZ^(KeZ-PSlPP=V2sFQx}~Ut#q?!y3=u$ zpaS9Ch&o@N9;L64dm-`;1)yI*nKU6kQubEzw(SIxVPEUl`Z#r*Gn#0(IG?I+Nb+Ot z8TgKl!&pQyY09gRg!&;4&pr7~Uu^0lueB=b%}zp1AA^5|cI1Jo9q861fj<1Q9B@$h zQ{N6-umF>CGoZQ0r=TKv(}&vh`0q2%c>JUN)Ya;-HD^%rXE{$bZ6zt8Y8%lzb;#qB zH*;^tGf4z6Gf&c$VsQWcd5ZpoBhC6%ShldXJny(pu5vJJ6BUy>f8KB8i5f0=_xy(; z<3VI%oDgAgeizE#e%L6pX^t{3tYW_KRcr(-r>~!WnXnx;VU@$z)D^;@H5=m)%i#|i zH?olSwTRQ-D9U&zSxOyV0cM<~grC2a`x|E&+<@CLDUum-4tAs;+6pl?S~w5Jb?)0% zl10C5#pM>z~L3>r=wYIj!>ZWO1fD*&pV$l%;FTs#c_P#XqX;JpX9 zZO~MUl&WUaVi--}pY%k7N{)pa+5z$)9>0r8Q=J%eT&Hlq+8*(Nk&M*SFQy9(32#J6 z8%le(P9=kcIaKd#uq9n7T{wc$>A;wEq~dbu&Iv2H8VO`YF^GxE$(D3 z1Pfgi^Q~?hNM%}bg0*W#mAUv{k(AJ(_`t4o_?f3y^h0LV_r(f$^0Ec zbMkHR1?V;Ja>k+5X_$X@8;2L2s}YsFJ%7*WUcFB}fJx~=<5|&?N3E>8s`w9fs<5E;MOv$OxM;Y%1oYOn{so1Ee__Z4 z;@&;|lHu4v&!GudA+H z^Ih30$`Ljo=`8-Ip0=pS&pjtXct4K_gfG9SCc&?f%ghmQL{nhk@bVkTk`xo|Zt9i0 zO@!OQ@RHv`MDp?Ubjr67Zt0#B=5DjF(dNIaxg1PdhiYK7L5>rs*;ot^l&^pFQ%xMT zQ0Tbpoo&HBD-JgJUXwn$DT}E68($FWI%Hz(;hzY<2$ zIQfBO!S2{HyS^hsz>qY3@;hCCrI&5+cx@BiDVY4%6K3?tTf)jB;rZ2?DhVu7d3)7U(A9|M91~ zpnTBil-Io`?5ZE1e%aKsr5%JTm$1J4DStcvJ#JkC6@p)fY83WRufAhsgI0Qf=9I7##w_efSEV*Q)+DRd&j3nm4XCy-u*r^q|Y`Pgl z0MF%1^AE!VyFVQW^^k(K6ShK0!D%s$pk;)+1t9OHezcm~OmDpg{NVJP{EN7pjx2~G z#K7t`qHJ6xeb-zjQPRn<%ubyOKO9ymv%x8Iu_tdD5?G$)_g(~LUJTLRtrdtd4TGj> zxAI?N@!B}!FxW+LuoUWqh?iXkK~?P~->mkKWk6UfDY?sH^+&LjlEnUiYXkILdAEtM z)(Djw{2H!${%ML5x*2(ELwWt6S*Dzw%)$W#-9=L@1k(eD<~UU&76f5zyiCJp zS%Ei{(_{Ph%@pQ;!=SL=N72Y$@|hZPN8AvOop3&aMNf>~-3)-L=7y_2mLdyj;2ELq z@#~qQehB>}7XF;y?#Uhj?;x;kFU@;1TWHYl@`rk?#|}gFfqTG{OJ;K!hp?Vah+#$H zC~)}uq+6f6_$e=IQiY~4|3O5oA#UY*!fE)jAef3QAr~}nH5ThSe@URGZm|>6D>Rtn z@VViD<`i%Ma*6#KJHs)t!^UKJ7p{jO2>avYyOn8x5hSrgPIGw1eH>jc zTn*(z>({$d8xGUMG#!V4xNx3N)q@mQ1867w=H!hW{91kF?T6TKItdEeylr_%Ob~EA z=p=0KOFsec{AKQ%7;|JGq)GrUsNAt|G=w$<|6x)f6e)iM+YGhxoAbB*6=|DeV*B1J zHpOOg=+FV-wn*m5B%dV7R~0ty)1gQ@n@UY%B+(C#ppc{-Gc7c`DTW@D@r)3pM;nxU z$zI*{0LbPjMgw&9@0ljI6S~uXPTV%T!6GMe$?b@3j;qK)D6Y6cD3R^BICB|0jbyf? zbh0=I3g?Jtn*U3G*ho$U;liz=FUWvbv&3$%J<^May~ZBkXz}4~OSiOX&Ocz>A0rgV z2f){~o1@0lUD4N2q0@0Q4mDs$p4g)T@X;)_3%h&eay4KpO-i60_*KRK&RyaJefozC zLf2-`Z3l*S?1+QwHt1c>pjTe8D`nF572`=>!3u3}$MOezsN(_%NQV3T?n8Lo`(;?N?rtC*VejB@Ud%2OI6Z`|%*o$$#Yk zOlUY?<7C()I8X@AjqCjH7ariu@Sn=akk^3A>CzEz6t)mMKGHEOm%$jcL#fCX@xu1Q zJuvI~O{k?p)!+W)lD~mQr6k8X&eo)HCKnBA&I^Sod}k+biq+LqNu(>Q{1JBeBAD~8 zL6R7k5_jnVgVC!(yoPG^z&U>n#>i2yhTSycaF~nd=HL3U0N@6f~cKoi#B;$#}Uc{K)+yQTFN!kVr7#>OX=WvqWWP^0()zT7=XvxeL)CM9fa*nD%MDh$q z75L+J#)r2B=xqrz=m*Im2O&=tW-L=+8;HThEl=zDQ zQ|qOo$F)Nh_gWy&6e)v4|1E!Y2sPdx_u5uCn}07j?(%j)!BB!@r%sw<$jv{(AN`i8 zbD}UGr)-9;X$DFGIldo;mrDCb8;yWBedM8Xu~kNV26Qd)Cj@&-^ai_KHt5B04sUJo zwR;zhqk32V5ZV}=zn&%%Ce5Ct`uzRVFY}GHZN3ry01^naxx^;ZZTNC`mJAz_GkoVM zkZkQ$j9|a;-P5m>NYirh9D~w0&nG{b$iqU6A8m5#ItO)R41tO9`Ij#VZ{t++pa9WO z=G^oSmaqyKLt*`sDno4KMbd8JeUnH*xVRnVw$!wD@?@NGl_2*hLEFZuJV?++t@P=8 zQswJc{U!t0qW90;=cPd@E;9J!O-n&&XNrpxL6A9b>F-ZMCkIE7sJ(QN-@>aaGXa)Z z@Js4X3B$Bf0>W&HYz%JzJl}lnOn?I}@!UL!*5q#iAczdjlkx~ zcM3&N0221oKl0z-4EvQ4FAvm7@`Z9r^nxP!1|U$F0j`h8JCl9HUx0W;`S5o;H()ZI zW!}YXP>$Rfn=8!W>>BId9L5|-J$q7{jV)2!clt_Mq{? zsQ!Qnn%Z$LQn!T8Empg+&RHNTIJ$C0Ib93ej9)a1-KbNXyc z;mN_sZ6*gvMalAtaE|b z8A=wD9I{TLzA}_thJDiUfX&tmlW;|Pg3aQ8IYPRyM>(6ObpE{Y^tV6puRF8G`MJ~9 zrx4UU$+%+`i|dRe(jRz<6Tz8|a3m>-teAg61JXA4;5-`kJTh#RFg};kDuHas3XSVB z_q?aDz;$kDt%{~J1Cew$7eRlp%(3oWk?tWBKfm1!OJoxhWI9ts|3EArt^uzLX+m7lkX&d24?uhPOA)`v5ZORGZ9Ocdh6*2byR6Xb&~XIAhk`} zvR38OJj!_Ar#{ZzYmjSXu(0GJ+YwZWEVOzk>9!M|_Ei}X8&P!HW^VCOafk0N{N{9g z_yFA;%ByJ%cPp8Az;INS-@CxgK9J7~Mq-FQ4d=s z4kCOf!w6dvMR1pp6yfwUj&9V_tKq&%Hv_|}Grf$nDZ~%ES+E7JU?|ej+mqlEy_-8`F}6wzH-N1w@K1Sfu>?=6{+nyy==;)p0!DE2CTO7fY1@APZQ+3PWNYwk?v zno+qCQlk_-!@7~aI)4XBVJesNKU>(a$SdKzFhCZpPVsDh+e0b_F*a^eGOUK7xJax- ziXH*j369iD%p$pkFScVj3fCpf7iVA%+Ji|KUi(O0zOCTR@Z`H3Zk+S&7TjKIQ_#V3 zhV!fZ7MDTXHiFpE&%^@hUxJ*=qc#aoOyH2fWc!lleULgqt&NO zss?A$w1Yh4%Q6*E`4iyuBg6WCuNgiM1fcVK_ zo4WcmG6o>vQq9ys(u#xVT5Kz%rcazXz zABuslmAKGSlvy!}`Hbg_NlY9Q5kH}zTV}fo7b=m30x6T|L}IWS?DV9Av?F;~W@E{R zk$fWKDf{~2Kq-~7csHw$Bx1m1xyDa?!aIeIfBm$n>N|D!e~5R1iVRzulYw`FpbbXk zfL(@gHeLX;I93B6wABeFkdcfy-O4KeJ5JV2t2@G+n=X*t$%MeAvUvL4%iN&S%+;gP z-K2MAYhMsRVT47T$9&YEr2yTpq;SJimz;f;*1B2{60D6?^906w0gopg)l``M+D^Ui zhpN8xF}}E+RMA6_UPr2yT11vhL{)vLibfDloAUZzPS?{$as40(#hanc@@MK)qx^nw zRy1ktR2fdcZkXV9tl-m2Y{540=|vcjfRHWcN*o_)oF05X{?BXNwNR}yFoUE#{=_iW z{73wRV+}Mr0*G|y&{|2m4a^AdJDnOJ>GbYZ{WL8=k}p(J%P61Z!#Vk@lfRI%1o2RI z8z&S3&Uj<1k4vFJxofl(dkfMac_#+uPK?4oxSwP-Xb+Qgf1AVcO!K*>pNrjV zgMe+wG~H5JcO;7(xy#@1yhEl+a1^9NC!xV>t?{D51S)W3p@E1w?_mSay;nrpr4{wU zhFO()j=ajR&40var#v)%cK)x6%>uT|Enp#OWQf2Y9yVf|OPJI`>eLt?R6o znMksOEk%q2`*I+p-3^PfS;Q`#zPo-%fKDnaV_{A7u@x@r;mKN!ErXLI6&GE1c=~ULm%3T;viRSdgW* zU@)i{5Zy2w!bd@>LORrcLkIfh5A|MZ>uvHucnw4xPHfsmnGjhjPG|QZdA9}w*ruxa zA9~bb8}<5MooBRYjU^kBUy&*YbWdh`X}u%ATnigRFnO%=7xLK89L&a#9G*0C`V~*K zSzOz_=rr-%eSCj5g_|N4oVpNg!aV)WP~gG+k(pkQF`ka0)2t<90FEkDU?@+~RxuJ# zH82nboV=MU89F3e!|CpoTnnB2u+qM;+)9c!`PAy2ZlnfzIV( zqs1|TI(!M7v!u6dDNATJY%+k1(gIP_P5t+-yfsd~=@bneRcitkHAC}+XyGRxwJD9@ zCotWhYYaq_myL{3DFa1eT_`j+LZe3Vn9j}&3b@t)^Oz*24TCf$z3-_%$m+H+vJ)9x z@R)1uBS|Khc$k(WAfT_)jCvODEYf__LWZH48da}WdM->(X_yx-s!04DoOfBgoUR@u-@X^c!<%qd@rSCP zEb143k^k=3hlUjfGy<8M^npbWVESK`Fm_Avh*%jn*$S-Js;CA{Ij+ z&<`!FzFseMEBL3A*+aB=0uZ9CmCwCwZjQN~E83SnN^1qjh$A9iDMLaB^27O$IK*n` z!`>KVdoyFzV-FExHGsVt)nBg!>MTFiD4!flR|k15rHd2%JtT0z_0Qyn75%00Jj%>t zge?rTqE3l20L(qP(}eb8d;KQdSNysujA}Ikm$2g{NuVDjs@F?O5EJzYD=PEwGm(Hl z;xcmRr3-|vF(ldXJlrR5l(H&()Yh(Pwd&kX2vBinu=hyb{0ebcLJybQnT8`Y4`jH` zWZ5Gf1|afj1cCYI2Zpl!~glLPfF=v743(r zQDgg=(GNKb4P{k|ktXnF8!B-2$aRL&74nx(+eJtm3@Y-@6}OYR#1Iid>qc8XJg*d@ zqfC*WjmYN=AEXDks{aAUhO@#kL{ZV+KK&Zc@=9_t2FCft${dhl1adj zR|eOzXF$FUxIAeJVg3l3&i0V$LU^0L{$StrujyT)8hU8*>0!yeyv3ALqa8{IT^Stm zcz`$wse)A+-Y4WqiK}igz~NWulgEKF^o*S7yg;ocHx#ci>zQVX)s2ed9V%IiEV55ln0qzWgHX}alexS<2B-E`RA*y$(6Eat0-ti~CtI~B z>As5@4~{k5ZDHgV!I7wqh23D3qcX9C?r2CFxqW0Emxa7t z2ozjXMiESZB|Jb=AaW*cH@P#zNuv1B>UG9BCbC6XZo;4LeLky#YXzE&y50K7JH%)r zG+cfkcbo7nI57EV@~WipQR(7_JQjYBaKBdseK+Rmgw`675Fu|?+9*t-qa*_A$*ekO zTB!8GVNU5@D`+Clku2<5t8i1N)TU58dUC`qBkKtqv+nr_Lg1BJlFW%kNs(4%=N0_T zMl#MEOrumGl|vtW9!LAq!mzsdHfEVOce}dj^hfFB5#KEE%cUrAB=jUz>uK7nN*ny4&!T z4hqW+=5PC~KJ+{%Imkje#@}))CpA)G`0H_KjxbgYAY{JhsTNH_N~!7&3@bl~6ck|z zsSZXk{br@Mb238wjgNx;d44*Ia2_{*2(v$aL;BcBXX4MGGNwADuGxy4%523RABc!i%HfYdL!w4&K zat!NXmGsUiE}rlfEZPaObp$86E=E=QKQKKbTqIo_8iI+W9SIeltjL|@wuuhvDc&QRm_Kh|XsND-d|GM8G7xl5wT^ z6%O#bx4^>qD>dyn%M{`qjk&0P2+ij52n-j@{jhDviLvpXGA_M_ds#ujRwv#rc5=zH z+A2AT4`cS3d;~E0BKSPZ8bV~k9ptR27EUJs6sXxk_%dSb#nn?e`KnpwMlXkup!c2H zwAa!F9GqEchlY(UOp;~E>jq_-kJBv<+VWFK*$K6Y9TDWOIQh0DIJR;Amxf}uARBrC z(FknjUL((t->?{o5PXIyUffA!!1(RXIKe!q>ybse^B%x9`4$*-lx+N5e5urLpsNm_ zf@b74imyl7W;~w zq*h(>b9X~OO=4z9!#f zbLCLk5DVbKu5B3vGg~}g^%NgELU=UQM0S2zXf+I9OBgd6cKQ`gI-6$X;N-XXZ#Yjl zCz4B(!YJYUN)7$w3Z>!uhK=;k*PrfSW6`sJOg~#ykas%rK#WUphF*@ z+)0s#@pmv?hT5)|J}Jyy<&!tpL=%O<-va6J`+aS~CuJO<9|{y^F7Y-DEiHU3M?m^i z;ho^Gyikt6R{h;nAXE~Vq>SUx3>Eyl6Dl2?2Ye3}gidZh=>~)~9DhC8n8T47y(wVO-S$@Qa8!d`Bo$+q&fo9S<0x-ex&WO z=Q_s(O(VjSMa}LmE24q-g4oy!SC;nx?bT3ZVl1sgN)q@cQcist}(Kn8BQ$7%^J!qnF$(#?S4x;0raOOl^+sm(UEo4;&d^;j6 z{M~TzK#BFTa+6QjJJH;HrB=T|&_I-@D7iOjq#IaVzUmEzKMFc#svZ&zVQ1_{zRip_ z3P%S&`H^QvtF|yNmaiMSP9zzC0yZRI5mKnt^#(3#z1>2OCx#q=;6QS2g6r^e)})a5 z*QI#gdqo()k;*IxD1A^*7s6=?!B!FVP#Q#B4q0#mr=eE}e!>JX4!yROha954Y~zMb z^79|}*w$1y-{Ke8S9<3Bk=;><_-dGLfqj4^8hO7| zexNgJG-My8rVVI2&DnLgaEu~A$x+u=OQ9QjBZ*$l24BuS zaE{Pii2EO);gvJfZ1=sVCsh$l3}fq*?MdAxVkMu<^CHQnFp!n#8V#ci zlt}2Xbde!0Ate|$*Vny`BWa4AO+ExOG>;hcRd>M0b+3G*4$T$i)@^a#4H+pXz&Dd( z7uUdEaFF`*QAoTS{(_}eIVZ?Yt6nj&QH@z$8N`a0Wi`7xol*<@Xsh} z9AIviD-K@!xG^~YS46>;JQtm$#q1nGdQJI@#4L;<|3h_`{t(j>A_PrVa~_vAZ)Rz0O1j zBSjM6*x--zACu9FdiuTO@V6qU2c3aGuuU6RKhsSypUX05lxYdijvk@FF(VHM)L}CW z1;b|=9^Mv_QRcU)jL;8l_yURJ$nMx_A;|d>#U=mJ1?#2i9vi-fOqI+ZAhVAg^;aJ7 zidRA@2?N&BUg|az8WTaLhK*2M5n?c(rmF*n8Hr*)1Q&Y_Mrj930`xpGFwB?~-&BGK z_L{)5irzEK|0LRLyKn0voQ0V7fW>#|K->lJ&?n4Ew5+XirotXY3o%NPP7M-IR^4#n*-#(-g# z`W89HP_0z&jU1lI_<1~5b)MRR90rKhOMZO*HMNkM4KhEBO)0d=-!H71=2toyqXj40 zPb!HlG+!iu7;vc4Y0z#253lxSMJz1(K5aEwtOy=Nk?qx9kOoyFf5RA+j>2f2G0L`H z5$-|E#xHGO^@beRNki45KS&qlCH}p$DZ`-ltpaaHz1q40m?jR=R4oz_!czQ$f+M4wovHXuzqDvL0$jnc#9wSsF!1+m_rLPuoi!V2HHGVDHyDe6V{^QfI(BiNp0aa`K90a`NxQ<|g}17FMFsH+tjv z;B3mg=2&QGBb|^!^7M6s_w2KYu0>B8XiDDH5bVB%8mS|LtPX3agw)9)>6>j4(&@Vkpo_gVe{~0~^ z+Q_!T>g>aUCCJCNHwnhy{zSf2;5VfdI1Dz^6K+)E#oP2|5SHuMa&HitpE56m4}$j5 zE;E;dMC{0AYxZzfIY|@6EEm5(z*_hj&(JW+^M zu!5s-Dz{mH5V&9WNp5U<703!A7{_5*JW~oY|EkcMa{LG#N`Z?O*DGy8Ad<20M9NAu z2d{ei;)Wt6oN1A+2@rN>(njv&Vh~Ie&+3HdbJ1HKr>MAYZq&(|uSNw7oQTH2F4{&K z!)U1p5$o@97l!B`#t7l(MrH67drPp;1jEJi&T>0>GZR~64|yPDD=9q!G4#Ybss)uQ z&hQpVW|F0KNL(}OLM_f?O5xR}dJ-;-6dvn~STCHsFj7sVx0NZlmN&nGd4%tb8wTCY zzzt5`2w^>uZn%^p6uF4XqeRYBvSgT6h7AV`cDL-zpr;tPvQHJg# zl~?Zd=CH|_17dOE3_SU63XEZRwbnAMbs7E}Zxcc;h^_O~FtLC;0S@~?=~_FBq5+V~ z3$2BFmZc;*pMAK;X)b-p9LHCj+QBBaj91WMEIxcBBzu0 zBRy#HYB^GX-B^R87}UlQPySNO0||nL6Z%J!bW&%IwqHYQ$3^Ro7fZjxts#A~6*;b{5uN8 zY~VJNO5FxPEDyHExM(Ce=&7O)WQAT{^8z+ek7Rnhxn|rfIR|~%wN?U-&yb8}yY6O) z?9DbU!bEQ z9)@e{GwZ|(jG7ESZ$Js>WXBo5hrDXr>~_GQQ;{Zz#XZt%ag~Xb9QiBX&e7xQqoy>O zs`@BfF|xr6$}#Fi+6Ol9R+ENUwA@3k5Q(boC$jH<$ha+~z3_U;tba%?0C9x5B8)CI z`C1r&%)sULz%@_p!}N$fL>j2!>CiLm&y;`CqV9X(zFbQ*TYFB<8SuX%1viRh*mO@G zuscl#cMP&<5_ME&0{#8^RRVx1f}#u!YcNX;!3}7Jiz`9UVm*&D5@(YX?4}0hnhbzN zMRjBz<9pPjxx0JGPWpfUI1?+Nj72e6yO(PCnHmRpFtW%Tf+-(B0}A2$<tc`X#II58tI3wU00-hyY4Uv43@x!4Tw>O>{gtlTh%o$93=*n;XT->VNd zTp3vm3}3N`62~x-JOt)jahL5x-X+vM@I7LAK?tD$^Nh8(365NWYN_`N=SaG<#bw90 zWL-+pTa|cyCU5F})vH5;F}?#gU!HfhV-yphdtL*=y=hckxad8DO1)@E23L-DLCM*E z^4gYHjJm~C$>9oyvLaq|RhR^M{c6ZNbXU|~qin8R-Ln$ba5GU#9Eg^{)cxkRM;=2( zAR6v}$cwqW2<=8T0EC$!mksZ?rid7M5$JG)A`w$Xw7IvCGzGqP91mB(6w9m6y(xEIG;PuS5UP>q=Bz3a!va)38Jeh zNr64dm%C9GGi+oIEk{CuSFlVC)-nn4oM8_xM~=UGZQGzTpb{03aLTv*tR+V*MOU2qU7MqONM zXc9$+mp@^*2`!u(`OQjH=ipi(F0g?oQsjMA0S7o#^0}?5Ur>V9bh3>325zs6gsB5r zLpM@+IE=1-EHgb5lsfr=0qK>FU#hc?EccG*Npev#7zS$*HaaRHi7Yp zY#b&28P~a&xZy2n_L_Bn9Hx^u_!J1xyA_3#CMINUif?+a@2MO@)aGL}|HE>B4OqjO z?)#adu_`mJ>i_SU1hf7B{mrENt9dAro~ED;`GgSk{sQ*$QapE+yG}P zNvjj_77%=RHP95)hZGg~mE6*Fb!xsIIudiYTNi8@2}G_9Nr= zK#2s{BV{~F!rV&|RfNhjqe`wxlWbCM-7ybXtMd2n~f}tt@#1H_|J77ol4& z8&xMTz>{98C_Th(@Edl*4^d68l1J#|M!_=>T|`EDctgHP?iH*SvE!3Blq>Ynmon#* z#0+klnk=*!A~PIzd=JR-M_LiWbW8jBs?I~q#m0+!TDen)z7nj zUm|6{pd08)*02l7!cAao37m|xZ7Cv@n+HduKcWw3D)hUucB)Zr#zCRWEJiLL4O4Q5 zKMh2ZJYX+!7iPIh6M&K`_NsCta^*NjX=@x&6*md&fibxrWf&brpe&~S4C6Jfg*i^O z0tOEb9sCEP{Hfm1g_oWNX&@!My;15C;ELz-%|U8qTR41TDPWTN=^%8SMfwi-+fuOK zexbmshq#A#?P~n!^c6h4B^9$<66|hpuS{9vm2rACQA<|Xg|?nx zw~=EEb{XhzB8kmZ0sR$<4iGx=7nJmid^S|raO=1g1O^e=DsxrC>s3ksO0R{gD-va< zjsYBfB!2}~i9@_F4Gv00D56~9uiw+g+>Vd|^5~;N0(Xm~xdQ7%0XVR|T>gG@+r)qX zG&mL44F4&?W)dX-!p2Ag7;vqge;CY)(L%xzK)BVzJql=qHPD4} z`f#H}UK>j-$OB&sOe1^JU*IZmk}3PJ@c$^uZR&xKnt*9srlHr<;0Qg7%$225@CLa@ zrb9$?ABfcppW}ZoU*<^c7=y9Dp;I|f*}%9`O*7O@)Hs_jftjd~jCXSJQ6!i67o;U5 z!qIx()BvTRG_VA%!mj5042|>U$|d(~5b0445$)kmYuBz2femU8lTInp`5lab>W-Bf z)(V4x^9^V;92V;x#da+!g}eN6QKRZ6W^?ta_IB9kCr_ z(%J^tq@RF(Eq$Ol)NY}!o@i*|mNX@laL8-OB_P&-{bw$kWGZm+4Dnyjt0ub05)=?{ zv*<66Am5}1M0EAhw@OtNJZ7oB(4JLPjvbk$c3*XA#CbW9P_&)h7 zsc{pQm4_c1y?0X~CAUFu3a{$)H(1ijg{VQMR>UGx)4*!TFZf^OkGSs=w*xH>_;UrN zCc0>2mSDwe*;)CSpS}KDhOfsO9PS!KcI*M1*u>wjXMO!q6Xqwng~fAG>H|YRj4DZ6 zPJAOeCE)6b!b`;E*+H7v#@$Zzb0=cx3dslKm?5#D@RfB@*k1H#xOe`N5yJCQgcTjO zVmk;Dl+-cAp%HfD3XY|f3{v5scRmI8PPw@bG?;|W?0f^%nJt<+nSnQZiFN3mer%)a zn%O}#4JYKomcMy9R*4!T>_(WjS50@|hna1HHk27iM-`NBb>e!XQ;a*!;52PV5EY9} zk;V)xZVKY``b#gFsTsM9RICd_U>ob?0SHKY7tmy@)aa@A8Wu-5H@5iSV9L1L)1#2R z!P~^mo`wnw?8&g2ME_Ex3Pmc!QMPnA{fZY1pXYl<8=xd5W2n-1U};=J>JDdyN9wZu>EMdAjP%x1C0ncJG04Rfz)#0va zUKx5Rq{jnDGrK!Oj5lDMcyK0ZM<^au6AGQ4e~?Il59oUyjrscoYFVW{{*spr@=GKE zK$`N6)87eq4r~bVPc+UJNyy7xobGHNqVa!x`bDny-siLhuroX``nTy|Bbg*61LZrP z!r!cgyOIje3#BX9HJopOsix|D9!=%6!Of&p+Ao8nCe^a@SII?!i%@+9_rue~grfJ2 zA|z;omJHr5!pVgDjs)GwyO9Fm&N>KYrcr~Y@u{AmW?5uh?X{^2Sh!Xt7n#SeYif~TNl;W*%U z*EBRcQbsM7>;IHg#NdtJV9qLmQf-;qJI!{EQy+x3#&#V!IBXOF$-ZDFGj#pH13D2c z?w|3u;i}O2IMb(Lkm(6WxXG3q(S-jBz@jxmunGqKPQOwu_{I6}N3nS`$v!(Pb!n|u?Ta>1@ft+w%mY9)iTrx78Y$sR$7{LG zdt)(@Nhb3$leW-6b@Dx~Y^$4BG9W4#t>untWIK^~a4~!elR=#xOva|dqY7&tE6Y>N z%RVA!wHc*VuX2Bm1TbIbY{)qzLhw)6b3P+XAPM(tPqC}Z9d9aI;(;G&%OLJkY_At* zDqu&1RFDq$(zUY9dm(cx>=?m(bl?(#ewCdBB2SucGq1^Dx$Rf5dXMrVE~nwoEMnYI ztL5|3)8U5y8o!T(fwC?g4@A2INiYeg`|3)jHrxaOG)J;wIyUcI^DI}lH`ckea#;Cf zY~P*80*?!PjJ@}+6g9~CKc9Ff`Mt@;FRHC zI?%oA(KbCKs-;~J!~&M|A~>Y-WJ!;9mOQT zy@>?$G?t!oTtepOFWi?@gx*y}3qg;#MF!e7GOkQ2$Y0U%Q*LE~+NDF!4=})_jXzLq z2}ln+9;MI(VAj?$|GbtZIV~H?483*Nf(43%KdL`@V2$J*<=WuDw3;IC^c+Ljy zsH1lAWdk#chEmso4z|?fGvnh?x`;_d1y9KDO_G9FCQ9m&9OhSX^#Rd8by)oD`> zOGg&Da5tjTX1t#Yb#yWyJPiuuo7CmOEU+`lj#2tL&dnQCi`071pEMj4ZhAI!L2ck& zRv!CE;n=5BpKp*rZe4i>fv3F&(<eKpBs z{>yoEx=1kLS`lR|^*IBCBny5^#0&{{nJ9x))wt6blc9MTYtpRK%3jtrTwcyb5FfwC z=_1}-2S~iNlhHrMAB^&M|L6B_M~VsjaJeO&yL|QBGg*pLl@t}EZHoNa93e@G4!n?s z;LGnu8X7Kp4HNQ4hf7EFtHe2P-~v}~?xZc<1~y|)rd84!E|yKj5J^;T8P>KG*GvH} zp&XY^#pp=GlZo0T@p~jH6ypp@xnO%9W4CQg?ec!;C76opIfibE;=r3(Q3|XfXv8(Z z{Sst#`=?(q{4C?4jk3Jz0ei@Z_caLYv1QqU?w2#!vq;B`SP*tUjArWO^LKm_?!U+d zq3w-_-+&w?%t{n^pAXSVnabbYBkcsdtRHyx8!Z&K)S|A3u7-Yc9Q1bS#Du6)@=5eD zFR850JVV@}B1?d7k?@T!v;goYxCgxF=o<|do8&V8t{1Q)r2&?V=pCqg<(1rN#NmvM zDzEvYWebf%Yn}?l>+m{B3kD%;E3=9lyXUu|e3$YHI195EuXC2Ju)n#nv8y6a104Oy zQZl`mm3wPzYXlwXOM4kWg9l9@A)}jHdn`2u)7mH*xzlmGEEy<|4GdqgEBO}(oJN*!dycr5R%CM3_;Hbw{4!1&;cM=MoMXjE^ z;>%zN=sDxPS-$SAK-59^x52CLpFXVx*TXz|ApWQhfY);VzF%WN$C%~4l41C5Dl|^- zW^_SnXu$iaitSbwC%q`Lqy6I~1t``bsL_TimDUdYrim1^M?e{VKE0YC+2Ntq5;steG!Dd!+=5K}{ zBkpCGB)Rdr>_zz};;C`v43;3Jkmbd>=YKj^9$cf?%O^jX4t*w_rG}x429w{wS>R0w zSN0j_gxd9&A5l07*D9e$WH5J|k$8Ru6*)+2Pjuc{-2Y$#M`4E{q&?+x!_*{kfp#3d z2N7UMzQYRUvO%~F)=5&ZA2PO4t(U%c!op1)@Ftn+23=kE89c8$X z7beDe@%$z31v_Q(|2PxM&9sU+8cy=xNODNc5-e93h0?OPP<dcIL2y)|J-V!tfe*5C7*~tvNl~Cs0nj}#i^rv9{(;5htnKB~Lrz$}HfbMb;> zb*vFnHw|J5ohwp58%0hoxCW7U?_5$onNLza`Z5qg3?8i~5qtWke|rHMHYZ*!Ep&`$YjB|w8Od)W0BIUvunanbCf$8|(eyCC3qo1D7 z;G7{P%urp0(|}N&ydlf1ok=FZ@=q;jJ3L6RdEx_uh7yUPs~N6H2ZWp4IFv9wQ_X}b zm&^`9$A?fk(eXJ^npdbX{_?YH}~7pR!Be3z84 zmG#MLjS;C-Cx2ZSZ&$h%`jVJlaD>az-dbf}77o(6Y)n4?yaAwkREqUSQ0R>r98@M} zaI_n4Y-#LVZe&u>Cu2`owgeI`y%zLYOJRq&9wVp3&B**th@BEM#GzX3*YBbC>4 zdFaq}E*frLhU~|g@q0>Ej)sS`Okn-l>wi>;61WH3%vp|riSmz4Ia+*NM83dH`KPX= zN+a*W1y5IzV5Je$3&3sM%jG~Fz=0uVN4-ou?#)IXklm)xXHW5o+i=f$Ya>7+QdF?r z`#Oq?VL;uux6D(s-$dQ>!jz_;)pj(eJWWQWJr1bFCg5O+%&@`of?qzN+l-)d?sN?ceP;--Gt!4czD_K$MX!zIbGhHv}srH-ONN*(s zP23)eHDA^dZBE{l63K-3bbsyI2E(p;)F=Vf``x?#4izf!C_5Xj9+rAaA4fR1f*qZH z`Sf)WuQd@yQD*b1u4|a+#!8`vk>0mG^Hyq2SHj6og+F8^oaYcQ#1pP% z+6UI)$MYiRZ$voZxpTi_8dc&F5Xtc)ao2#O(8D_jD+{@+;yB3aSD3@f53=X^Va!x*K7kT$PXylVPs5Wp`H6iuB2*Iz6~{h@)UE(=2`)YR zoTu$V!^)I$7okQHF*sRhYj;hzI7$fC5iM|4NP$9gtdzlEFp3FF1`BF9b(9wmA0c5z z+i-r}oP4Lm5mrlid|a%)S~lP&{kx{A@ZNF=M1u`9H1IYvmvDPpn+^QJPpv5V^UoSF zoog$6xjIZ#DA579P>kyQYl#Q2x2&y=snP8C<%9b?*|vBlM&5^5y!H2t|XxhkPz}&!izC zlwoEKvK+%);&j`k>F#7Ea|AKsuxCIA;n)FXgy&Yq&RnL%+CsgMaobv19mcUAyM@n*D6c1!%&CGR9!VqTPK;Yt;0(5((F9Cy&~y z6@MK^`8Q)xlH4c_G!T7fA41L2jn#;A2q;{i#YNplNH@CHW;DCqakvLk9%Sxe=*uMK z+>_r0#(-@2rDYi0W1>|7or$F%abPH?_wWvq6dlz2U|tDFb4zk zYf`45XA9f`1rf~cF5#Z-Xsv)LFZ=@EgB4wNH#)3{#VP(ly@mg}lOY&f%&fpVeCnJV zsYzew-!ln}=(f+zpj?$ACqL^emk2PtlYDp^Nn}1>%WP`ugTdfaxds3x5MhIo89Wrm z4<8FpHx4q#$+!tt!pjdl3=DC!TrfQ_LmhbG^OT?u5maZ6@KNZ=#@%CkmsV)5D|g2>L0ZFIO>xC6#Q2oh-dZ%pf5; znz12@cYd8zN4&x-&#~csib=pq!z2Sepyrx0EN6lgym9ab6`K^d2^Ek23*bs*4{Sjn zwSORsPPBi5*^7Qy{53)tW+R`8{&jV4e`Kz{Fy-i zUqu5MX=xuHAA5;32`VO*Wo+)Yq8&p z=ZJZ9%M@Kbl$SE2a4I22f>&8Fgq8Yg7{bqG8w#G(z~!QeHt!aURIp|*Rk(UD3L>!! zL_D&oIXg8gZ!-#bIWNB99VMag0Tqy1e2^Nl!J5BACEDA9NH#s_{%KFGygeXn7(awQ z88j-4Q}m@vvu>p=e$babN9-b@i-q@@bW8_MWI-a+3fD0c5=0BW;c=~HEOIX>jgV;T zeL_}rB$=VylW!tIl96f@z(*?Emh#0Q929||2q$*BVKBf{l52!G*KM9&8MJ3Z0ArNi zeLjHx2O$d4zgPdJeBcL2$%u0R!}92;7(2#bbB+(;>Q7zF{EXgO#$lo|*SJ z4*m&YgOn~?rPN`e!FNBO( zVIHq{*?QdDjcY$5rTrPvp*OB=@*gi>{FGPAf)~g>K6jk1E2{opJ`g^UEf^h1 z4I8}mq#xR~;tFu1Tb%^B-Egqs(^l4( zZz!t92|8DT-<3>DHZOz{k?E!!pk$`?VF7S_h-VaD7#7exFMj6y2nnY20)yc<*nuRu z7fV_VdU{}M_%oQDgz_Pr@Lr6XYm;DBKg%c3ttGR8;Fc|t`xvR_e5l;bHil*{(7sQ` z&izIB(L>&1h*m!W@zKWr3bxajx;;yw2Q4h)Zb>Ne(c?fhvb2?DqJ{QLaBzb7oezmj z_jJ>4Y?OK3%t3qC^8g2&Kx)9S{aIE zoSyVn!4D77Mr>SG7C}2cQ%c6WHNe2QWJJxpEw{mxqX`@8=<B)^^39zj2_)DUqNQSCGq0h^6*8YTr$Op~FG+>T6x!UM zY4sJ|peBtY1gkkY8mgsE2p!XED|prq^Eosy5hy94MvEK^ggL=bIm(iqa&=snhtA_g zZvv!bH0Ykd?@(;)hu|&dr#Jyd4(ir3-pNP?OtK>Hd*hkU40at}xD@{E&04}$TO1M6 z$j!ImI}i&;#|KeVpI$1i4i%tLh=C2&kpb+XI3mPm)X?oHG_E^{3vQ5`i0qzB=tE^w zlb`8LNydJis;HNW-uqnp$(u4Py0}LvqjUDu0qqEP!K=y_qPrl$WVApF7~<*)5-zQM z{(u8hVmEQ|IKRaxpZfwIqbuA~FIT!fTQBIKd385nqa_d<)g5l{GZ(xIyad|%=iFyO$(}x(lbLQ1^e=!HN>%i{68Dt)z_764*(q%Xsef5aM~h7z+?USzJMb1n z!X>DA_`Tg|P=tAj%u$meerCp$3cahLpe=)iW(>O<13%!E^I~Bs=&rKYIfFoU&BYZ;~C#x=d3 zqJXCQ=u2EL+`NBCn65Kt-a9S7U1Uez9H(WyIvMeK)LzLpKmF?I*R_^2WxTf$!sRnq zk=V^)R6X?@1%1`uEETsYYkCb6JJJ<37{;1D%Ml8t*i?#+A0}vo7}30WFWa_&E2VQ_ zNs+CS_vL8;|FEv$5#V&;r!eKpxeC&~wz|#jS-yUaxb8*K4IJGlH20G=Zu{Y;unJHG zTi&612p%EagRVd_+9S0kk6snNfQX!+=mPrmDKkV(xa8Poi^+rO}y2(3V1MEV0S zmI;z;(W+yCKHN%1^pnG6=UwOv1bEKE4AOAFoZSXPRAV|9s9N+;7B6MvxGdqTLv z`3cqkU~v5ObN5<;(MK)}b2W0T`R~Q8FJn#FQ`2x(n$drK`VEV8uzz-I2W^_}mVU1E zP{FSpkX?Vg^SI_G1^>0{${PE0W^lqBXOkT7!~Fm80tO|!7rK$o1H_(3TLDS&`EAm_ zenV=s(EvLNxe{V8H&}S-;ZOua;>7)}9D|qoz@x-5;9rQ_2UxIxqtB-clm&Yb(>vfd%2r9;s)VQ z04-9DEY4IpvU6=iF;uP?Mr`f?lWf$kx1TZF?W?PXZV$Of#sC6hgtJa39r7VWlh{!$ zpf}Na^D^|5j-~wuJ*=%7BqkOQx;9ng#~?sEN#P z@m6i)Yk42ZS7=zQjify0w6_?L^f*xF;>V*l1OUuht_cd!Tk{`!b?2ixo#DgW7?@qo zCFIdSJ!d#th|3I2o_^h!Wls`dF}b6?G6n#*!!{`eP>dz9F+F+5^XxlsAKcWijN0re ziC7Snf1-UQ-8%coD~ch&vq)^?lQ&FYQ48QnxAT&c(?6NNTNLuRiU!2xSI|e!4Wm%v zo!SHp&Bc8R;1IG$k%{E}{77xLV3lxoWKVf0h8Lpp27xF3ggQk2!Sy%%gK9lP#?CyV zx8-$zgC=}YH?nn|D1hpUYdQVf73M?wGZN1tT=+~TL)H|wM%&e5y5E4)Be!w>yOCFb zo;aB(fnFd<``7zO?dL{xdb;|KsJ^6V5*RepU{fcGy?6E}8ZGUDGj}S2O#N zvk&XrMVW=R0StR-&;!P4eA0avmE({PI`25nl1Erf8eP$ehX~j~s)j)hy+jV4g$270 zZ=>J&gBE8}=@>!F z1%j2NID#Jnb4Zc%wKZ@zarYqcqjwm_WbSVY8Msr!sefSPnj_<`1yW>{1d9eDYD#&%|b?Ps83rb$;cja{AwJRPU7$`Y*j3Xc-nf{WUW2Y?v2Xtb{?oeFQ@1icH zhm zu&MrgLA+wGe$}vCX7_Ht3{@%n@*`@JI@ho`y%FigE|zgO#;N>l6dpH$S}r>V{+?)) zS*mfiN|%`BeO{bKeBbkRpXi`U5Ffh4|88ae2d=S%_{jr=`qor?wr0sk5p&7&gGOOQ z*+=h-_L1F|#D{)iZvC^hdcy@sj0BA_D>wIJo_UVxPn z^?d%w5eDq|Qn_}?z2Qu3h~xSb;xcA*ahJUu^15;auRr_C-VA6^iu)X-J{L`@@yPWX ze*J2O4{*|=^bHL}2o7xVoD2m+0vf%I^~jm#kTBPNM^8J_jEg0-cHo?d+;4WQXt<05 zHWQ+=$$&F2S{6CjG^|%P)DGrH;Tykg5i1 zJWHW9UXHlL;u=6?NrYGD2GT(|JV*G z<_?sjVpy`4hgfPOAdma=q@Y8@g)TN)oVhjL1VPEhEVirB5ROXnjj#qsZQTYTKq8j_ul>KSGXxBZ)ONp{S-{A zxZugdOs2ElUDG5eD>$TKKCipXE4b*4d)#crt`%MhgFmGlh+Y+WqM!mQl;B@wp5gl1WM&|SCha0rbK}H&`J*;+sP09E zS8)NL6Mi%%(FENje`gVLLiksuOL!nSa+P{OwnNL+z{;s3-go*hB8hVy##bp8W7Lls zU0wmNsZF8;M;IaOn0E|-Qz5gLks?#`W^i9TM$OjC9OJZk=Gl;3mx%wuFB?YYm(#lR z!yqa3PQ9m^vK`>TU_jUCCs&1!ug*psBMrB_!Zm_{mlz*fa1kihlulD$K`D$JCfK8u zC4;;i1_!oJN1+0Fa7Ix7$#>wgH{cVbuZFV)p~7%DoV+2YFtXf8mSOnVJ+YQoUd;a> zx^lPsX*}bcWFE;j+U*f)y05%QU_P?LKkC?8bU7ct35`of;YzPxqiCVpl!fEExGeFNRP?PSQ6Dbc-5y1eDB`UvFGjIzNA=>Rr-Sz`p6aHVu+qfzyfgPE}E|@`H7Kd=;sVPBMnRikq7 zLBSTpXpfoe?LxEwC0yLe4@A1eA^%xVRW#njy)$gXdiuH^dtWy5=kNEDkbn+Ht^a|Q zLccI5Na)n&-;Jmzsvy36`a8z>pcpRqWn5vS%|avyS(dVd5A9lpq|B&{;d9x&4cw7q z?xJ^pc7;;h4IO^Kmpt88^oRwMSSs8H?vUtaeuZ7teICe1RXw|gY<{nUyU%gp81dEF zYfu|Vl#q|oOJK~FGM_8%qlCFo^OvG$1%?TC`T^O@u!GBdu&+2&jZ`cz?Yd+xymA>bOz$AJLraZ*=nDXVyo^N*{2-vg$(zGp|) z8EnwPHoT*rBuF;HE6ZsPDoAlW& zssw8A{5z9sX`c0`+=?$1iHft!eb5(P1s~j7(b19C`lN7az(_kAnNH zcCdzb8m_)Lerwy2!&#(CD?hCO9&vyDU3Vwn2am?a3@icbz5a^cLRt-Q-n;=3646EJ zHcH!6lla{B-+$w$xsLrSbp);e{DTBj42cgl3-}MW`r@73?N_Q(_LA@I(t($cP40iLN5F<<|A(2@;!?hi zM5Sb^{B1b?Wq!yZjM_+_3}#707XJ zS181mVpowM|w;c4gLk3AKQ(wuI>~ORik<5%Hd$~ymBU2Bet87mpCtCQGvR~S5 zShSJxUv#tBd##4uRI_-dOO&!6+AOpIrdL<M%T2(sB2RGviCMuDjL zO7u-o98E8guZlJgBEcIkB@46<8RaZ!LdHMz z__WREr!V+%Y!iL~#?UW{4mY@J{5%cZO))<*Y;Xt)@RmsTDWS(@J#=wNRw0=G#&BZX`&?N+iV?SR9g#Mz)|RStgaVydYrSa1HgXp{HM zDpx4?MyTb|o7S6moUuw@!0(%TMxpm4t>eA%o0{GB_=ECsqZZ`Vau%aX=@tzO9iZRE zPcVkdr~qCTx8Y0L;`QbVMG*V20Ac0IxqwGQDih>9Zui&q@?9!4c@ii*x$_tQOYaf9 zUm`+}>&mpQfTIJeJ`(mh*QO$Pe%x6B6!LEYz{YW%_h38RKDonWBg5-7bJ8VpTMB%O zGcBjO6`+dozjY@CRe)LMh$^T}HlYUTotr@`Y8C;un@&pFj$k7-Kyp%19|{>=g!fx; z2k+-j(|M_1nTeWR5i3WIBd1wubHQ!r*{75(LziYtMO=F+d-zyUc9p%K4lh_1ASc^v zr>IZP(Yle!_K8S=y#89#_=BxnD0Mlo#ma;ku9JEfh3Zf3(N$5zT*4NPf#TM{H?^^< z#>q-FR7P}v-QYK3v-X!1Gn^%SQ3nioQnGKq<5YqAs{B`kmf&xs~W4syxDas6L>?uvOJqK zIo&O15Q-ttgaA1VPb;$lQFu@X#{fNQnCA7`F-b7)ikclpuwH+{&DoIb7r)MFC1Ny; z!l_oidJeI!`Ev(zK8xL*JbcQ_wEAmv82AbE;PS7lEz1O-xjd|R^fJewDKzNca&*Zq zFVTvvLxo^M7~+p`jB2l2bSvqWKx#7Bb0$(?eTVUUf3xXzqKFB ztNFn()iw2PBu3eebk1xE5f}e-`Fif0AZDshukBbHP-jHTSPL3~9^@JDPMwV3EK+{u zg_4wg92XXbj(}YWq!EQfv29?{sgXf+>UQGMx+DX+!aX4fJc{W=)Y8N_gP<{taV#USHBt2${OxGW;?xOskIAu86p=ACoXK^^LILAgGdeYh>XnzT=hSDK>T&VUEp1_y4#5Vb!ru17OMF*Dp+@ED z9qx^=a&mDP0pVu?f_PAx5X$_oqN5HT`h0Mn*raJ>D=bD7;Hav?Ruf2&w=JI}_8RRO z_{&Vc@XkGSuF_&QsbHjaBW}Z@#A)RFAva!*a)-3t(x&lik%sOgK5=zleVNfC97#DT zEpRwG%d15`s%kIhE-eqswYUoUGt4KeuvO@TihvY6$<0{-i_?mS+>Jg6bOWjbdo40c zzxpU&MdA#4MXg*dxT+c=W%My(g6g@AWiw$oRP-ZiBs4*ob%VSQC=w@PqFM>k* zSOiuha8X5#c)q1uLyJhj6l1ER=9&>XW9|=~{7q_YEat<*sT7N-=N@Y&Js0IU6Zu> zE|)p!@IX9XpY5K6YgqX)`8O&}4(e8IAckX?ba%w5wS5qWM1F8TD0Y4}zp5tG=8blD z=M1PJB|a!>MoG#K4=1fTh|8w9Wzi(Tw+PgJB-_J_T6G?D1`Qxh9xH|pce!3_=j7&6 zVQ#QT8)Cg~{9d_03gTLKRoQWR0()J%pfuw=WkMa{=giyq=(!>oA;SGMCt0SeDglSu ze$X{IeA_C(Vsn|Dnpv$*=j>2mp#D}$fBet6abl;Xiy>81soPARyQg``tKom&o?UA< zBJz0+Zn0XNf1t)`#nu*879`&wFM6>I)nN!FNDeoX1}y68<*^t*Y&CeHBXDf40r(8;T$8-{)qL2eqg&;^Dy5>{E5N9RbHMuQncU)SdS1zf(v~zGl{D}+E$F{D zKBxb-WNsxQCTU}f=ueMs4iCLhG zYD>VuPF;?5vV4;IJJo6z7DSlE8?DPn9~6=*@H>BP02S!+0RV{Vi0QqJ^lbbN$AP zMfrapNTFdJ;H)D-BsNx{3(tCVcUtK?M;me1A`iGUfyLz6I`7{^=sGfGd~OY6&Rl}& z8L?2>!sItx3nQSTLJsBjO+_sW&76SKmVz>1#>rXl7epx!h=OHJy;+R*q4G|#m0H{? zT#;Y%*#*wNtdLB0>#>WJf^bBVU@3b?T6#V%cbcP`q_>HrIHBfbqA^ZgHAdAm?`7{| zM3l7OT%B~{hQH&AHSg8j+S+oS$ML8fRgfoEL3(sr_0pM^rU_z9-!TXKWqzCTBKg}^ zw0`dzT(%-}OtS`r+Bzq=h4Y}7|r1}yTL3uD?D@SDjBWXduwo1_9;1DCd{y1Ys8^;vRyMj85Yk&4f#;pGXBV7RQ|>sai;GIDpWqM zZ}mojYn0Zn&4QjF=-MO{vi?DGmMAL#J@45suM4>%r3kELIk4JHhet={`?KQ;+uJAntFkV{ zxcU~Eqmy${)Z${YI@Bd4@apkTUaNIb6tE5hpT9Y`Bu7IHLbO`(vKdAWB&IxxNKGLu zb16VOym!kGdJ$SvufqA!gz15O;)EFri=bW8hED-?D1YYR(Ei2l2Ifm~Iu*trI68Op znVgVgGO;p}hI2C1iM3S)m++*xiREp$-SlyR;edCRP!=9S!LJ+_OeH8~b6wQLI)hu% z9K7jolqxG5uL`YO7$$Fh_T}elcd0pGhr<0SeRZp4bw$(yc|k#^A`q7XY7oz7zLwK* z)#FjcHeiX4@RI#yvxgg~&~o6Nu!o?aNK)nx(IJQICE|XfwJxB9q%{H%_;~Wo3P(uU z&%@g|D$sGSd#HYX`5od#SW;TY(ZGBJpL&(pSZrw(IJPtnIx_t#b+Tag`qmK?%ZWSO zzP8e;!c&Y2+n278$z#`?n--pVy>f7Dt%Ox=7gV1t(zYD*hnU(%bLCQh(Dd@v7xjG* zIJvy_e!wx5C;MH!%`jEnnUl(mxYaz1t0+>||GMYZ7gDhTs*U6c&*RbXoR?IFU&|wE zV&DDAnZ|tjx!gS>r96;xn2pMcod@~vS6+FgNlnT*PSO$N8rENA8jLU);lBBm2@J0z z_eYZdEj-A2?Psl-qegclh=LpDhdB|Mond<)4WfMxn>v2eo3u8U(;#t$ij1u_JqATH zcS#7+vs53?Ex^~)+@8cT&EWz8QnR86xwl4@HT2z;NcE7$Q=$eQ8{5|fd!I`rcA{me zJO(Szx4|!3eZpxps?uNS7W?VkgoAD%anGBM^USM{k}=EkBDVyAk=zelaFnFfv_AQ` zqB3fjQf2*p!sT;b8`jrW*`d~9EBc^u(~M*(UZ7q6mE%+)H-OV@q9aeL%5I_KC8*tkB^@gUEW z51{BJ&8`n2d~;v~jaUwe58(u7#XrAvh5#9SG`UL3?;VwMbBQ=I7eHT@BP*AKu!n-{ zo&Y(iWg^8UB?2%0<>EKFH^X&L%*5MRtzI_%hyYGy?GVE&{Y1K@UOGe}%2o8hR_tY- z87pNeQA{eSjxv136z@Ywo}4JEwUkh4+Z<{EiEC}8cYOK>*0W%;+DWv5?jHSnO03Tk zzSVSCxT|#zPseZQ-{_hyZoPk8Sh;_hVFk+H z_%;5g#c)#<)y-3;4Bl=FdTd(&fdeD&e z1YmzvdKum+)&3@!D5AjC0Eke(8L3x%g;IvBg_nFTWwQ9BP#$obeQ|1K#G^lfWdgEsDDm+vN#wqAyT5%*HsV<5cpgM*&m)#aR zZLaut-^q!G*`aGP|E0b=9yZL+nuu4jn}|YghmS$T6h{>sRHBAUu5VFn!mHU(l^c z!VR)KtfpgG$g>V3E7Tk<;sL*(ByQP~1Y=P|1mU#6K2Dkn z-VYC3s7H__O^R9fTK1M|PPLXqgi*d)#+`Wc!L8eE)#+O?rRf zvsKvow11vFHc~_h=yLqIRcZ~;G(~=Gxf8~36)x-K%>2E(WkV9m zIOrB4gu;)s)g_pmJkmmf_lA^18!8E|Fi}g3IB2=G7=_lo%!5|LCvvmU!dcUTiWaTy zFIj&R-(+RN(2uI`FvgyypFCY5+DJ*AN3>@+LWW}Q6kgQkIq9_YK;|eul3TQP=0Gy) zev)ZA7E7?s!j1JPscXptP;`oQ&kP+9Kit$|K3pvsCAaV-VI1SU$c7{pk>&X9UbNHe zau;(g)1H>bxwV#bFn_RRTV}9VlI85okdmF_NyY*-$n&+Xh(w$s2UMVB!eO&*)`{H3 zIXv?!aN6;RC?s;g+CJs;a>R4e{fMdh-XS%J-9^B0zMK^}z47Rma|Jn!JJ!Lg$Hz7y z$8Uu9Y3jc2)N@gX8RDWKe(Lw~CuE9o9@?rhd$r?3JkG(}h;SFhl6jZU4GwUq=TZ(Q z<@8+qKR+fb#2iK?^yAE^!Z1;Agw)Bw=oT4hGxYOPBsdU2HJUh@YKpZr;c8J@jV+m( z(Q$qL^SN7i>ztvP+;GKY<1#1}K?E8?2puvqhiPUJ;jK|=TJ%P0TEa?SHS2RN>aU{P zi-)_YkCJLR{Y4d7)}Uum6KJ zh;zkq^%;9>shGGMG^I{)Mu)~o5jobw^ry2=Nj=AMX{YD}nmi@y zbnZmHKx$}fAN_^?t%z>)37jr{-E%xi)ctT zDn2vUt(`=Aw%pFSRtTu;1|LOLEKRSH=LYFfU8~_6sFhI+mc0$iIo>T3m12PbN|6!; zkJ8HiP=)G0&O-u8K$&@;CvqlN1znEc(5jw3eMb98_|;q?Ou!UDT+nCC6AGwoxhz94Jm<+a$fRd7R%Wtz}NmU zf2_VRxo3`$d8yGzch*{>oNGC+)DYF)*^PZz5tF7E7OCwGpvZ9`6 z3J$s;P|HQL$r?WTGsm2ZA`vpO&ZwAV4Sjhn>6VZS`cnx`vI7gxLr@P=5Y-ZLF?Nvm zQY0||HE0mF71dKDsqE}ZE=QFnxHX+3gKKT5EC;Tcjdto=fTwZ^+yj89IT&bB?cocL z^xxaoD$i6bS^s?XdMY(U-NfqP`C8_(%U28!5;bX!hm*y3*nsSKb#nZpZWZl(;yX(X zKh5jj$n_IhY1*gx_4^i5xPHr6SsxZV!rRh99pG@t!)&Pp0Ldu3p47OKHY65=O=0S3SY4?o$RkuT)s>&Fg>pT`l;N?|TrTzG@QRkC+;m7Q<=}xKZVa;7VTcX3C-Tjbz_A zB!5HN57O@9=Q+?BmR`P^?i}tFng0YW=|O@2t%?)C*t-K{xc*+0fiz@wumI zc6d7Ka{BS*>oqdzyJk-`KGRok+_6yj9~;RYt9f z_|a=cTR(2dBo)d)m&iX5{58{f`bR|~oQCd5z7lbkK%m=YQ0xp1il)hjkF2Bkm*x;& z>|Beox5{YP1;4L_bXI5cB`_!Tw<;aC^PlR2)M@YadUai!!L1HupHNy0~PuJ=B9EvXUy zqXxAiVCJ^wc-9xbl=FAIEB9zSVtz{18ydi42y%`h3+Sk|9N{v1L2FTeiE&9K%acD) z`CDRBTJUthp0>BQ+`4x>hER+v(T$yxp%u--_nHX!nU@2;c*!h#SSohSVIVTrI zF~@H=69q=|j)v%QpqkEJx5UjM$C=-H*2#-iI`Fw?u7t3Z;}4bP#-RoMtzy6W-sbq@ z?Izz8P)!QJJepKa0<>W$G7Bb1qXK`YEoV)02pt*mgg^7e1C0eZyZ`^oJ+o?cB zaD-h8`L%T3>Jdt3=8`~=t-v`sV=|bvTyi8*kHlFNSIQ1HE3|7BsC#o9|JS7!=U@@8KAGQV>?>N#XHP$GDKCg5BC+9T4eSw}a_NOhT6$$_XZ zlI}$z3PVuf0k)om!lf6ZNYKYOuO=ETsDP#<-}T0fKR6gT`l)(LmC&NXbd&vBQx~l~ zb>iJ-X{`h>FRc1PX&UA1KpsypvG`)RWBilx=`N8t|m?d9L65NursvBGGpQf#6O;>}E+yEs=FS4i^t!${Re_Ef zie$B2C8~fPr29KHOoYmfXZ}!2Dq9U6AIWp*VyQdX8=p_7yR2whedtP=c6~JSl^azH zMG5RE;+i=k<#V6QU#4&SRKNSeXcbMpDA&zI{<7EDrVAC$Du4dFbx^?%TP_&e4^My}Uf<1Zd2$w--Jw zUNLcUI^>&^k;^vbF6U_(U`&17A{tbu*OX_Df{>n+qV$7r z{Qd{-I?R|$6;wmLsLD}P;BX8f4f1xW>Ztx(Gr^T>roBJt+H_r5O_IP^&9XlfO+!+* z>=4MnsC_&&iK`qQS^;~~5YBS+)2_&vj}hfoipYtK<*#V=E^$D0P0H!%{e<0evU2}l z<++QLz{t(TE+2YBFnLOu0VG>D8z{BoIkN|@V|A1#p-O={z$Rtj%eNyCGBKEz(9xYl z1-yNK$hfP+Nxup*K(sAG_K;VYG%>PkiB$6`SJ85}Sc8*C>(bHjAoTLc)XuG0>vw%a zz$=$SYRBnP z#m6~<+K#~yB$bcrTDQ;){2|QA${EcHv9;7rs9$(iyPDec5bJM?aFS-1=YC9a$GRru z^p`)n3MnnGl&@wzDjBdx(cgijgYi#tOUcwRi$uZFZj6qNiPN_CkBg%$IPzv)$i*sv zzZD$`a6nd24u-UmA~C88I!Jh#M2+1N8;tIQgRtZq!*f@v+_QjKYX-aB^L&q_g4l{y zF8y6O!MO{9sWmqoD#mwmF66gpVKA(l`I;d}POz668HY(eMnzYBaDQPfDFDC?u5kIy zlR6ezg+@Z~m0L|x6LD^Z<=U5Wq%TI1vpI&(_wdO8X!xh8~QPp5?4odF={bf^^EBrp>bz@M+oMDsaB-n*6B1hb< zu%=^grQWzIL)}bSV1r_}_}I&Oxd(9CH)4}{XgI+ktqr6qm2bNH-@ zVT+F`-W={S6~~oux4}F9;3N!zb2@eB?$A0;#7HJxUGY7NW9y^-hH*wDuxVSyZyeZZ z1Ffu9Tu4c2o_H4~o|9CqkQ^GNug(%USRP2vEmD&ZP$-2B;+~Zn%;7>CMLhg{lbEb# zbI`5e-+Eu-J4YtTLva}Gnrc`SNy>F$B@3FRVqTnefafu#et7ZAoZ`yR=5crtdsg=={+JEL0FY*cN}ExJy{&K#C| zsxFkd^>D(i@@U}cmSyye|B)RhHjMAk!MxGp{iFQ2A&hmUt4?RD<4>BN%Z}wI7PRknXOxn|`C%^>oKQ*bf7|bTOHJg<;M|6dB=h~)2tcRY%LwHPE&T4dLZ=%jIjYx3;Mr1tPE@O(^lRzYr#j$Wpf)hQXZ%(S=6rt zNOEtL4XG%QzBsG)R-zBIS^HUVeU-^@zl^LwX?~p(t0MT3aB$|nO%3A)56p#8Lbb`C zykWmZRn^lA2)0zNDS}9!5J)RcZq~)%^i2*$KOS@za^i^GkM*?LWYA#OLy2rPZwC z4A7e%eI|)R&Au>2_Nun%?BgC-5e!PbgQgCe9vyTnlh9eWe7IMVbMRDnq4;=`9GDIw zBMkmYo9VS%`9h^b_3&SK(+;|a$)u(e7p2UQ6g@cWCfyPEJ@Eqw#$q>;pp?zh9A%`} zasD*h6S@i!8}y)1Ouzru4@IPWG`pfNUAuAP^ZByn!e9``JE;9O|HUJuBjl_=K9%T> zSYv+omXeP&UiWbeA*_xn=0iTAzi%YIo}z@sueH0~Z2NRKSGg`@!%p8_Be{v3x6 z?WC`7)=;CDZ&n9g$45h9q}Iv=LH0LE zt;)QI*ZC3XqO+1ORZ#~0D(K1tN61RhR|Ap!pqW%lC=S}Rl_dJ6S&7gTVM(I<{{W#hSxVSd`mnc<%qE4OjW_b!q*22P8)Yu&xAvwZ26)N@iRi~XL z1=?G8Fv7;liBU8pY2$F40x~#B@=0Vs))UgHjFkAT-IP9%w=kt|i@QNwmG?dc2vzq3 zWV*ghRhft&{_&@4;c(Kt23fyDvBEr=O>TyjD6HT@Nz6eeqM6O&qpsRh%uK*h%m7Vn zC?8UCuY4I+GkQtw>_d?IxyDIEFyCxsiHVXRy*?9QM1{3{*>_!0*ey=-;yyJ6Yp4Bc zR;he`sVX@jS(Yt?jmltc=cEb~Uwcl&g1T&HVa`OeD0sofK{DUv8IG;lA6gI7`dW9{_k#UGY?-j!0M#Is<&uRIBf~uKpx0Bi+A4F>r z%vu}1FFaOk*+i3e-Wn67Vus$$PQamZo&bM508v8I`{lQBvAtV|FMZE!KFis_?Pm&0+rv_iBDm^C?JC^EnztVqt`N+qnRrw_fmIG2i9lEp^p#rE6cFkkw1} zlr~h@)N&+lAV1JFlV&C*kMRB0l!TjXd#gyamU}ZotjG*AT?!uL5c^@EbIjJy^GH^E z+m+wWJZe)9heUPrduKyCN;uNUI+fC(HDCVi<+pQLsQuXaNmU9jV^8OeoHpPLEL7oJ z@Qwr*9vN?_eHZ1)f09#z?Uo(-z(qQetMC??Kb1%fyL0LOkUW7n%lTNb6v($e zXgNW%8>)p=@=?V#ogSx=4jjKxLVlR!R4d7)ETdWJEkt|EiU6r}lr}6b?w?7qwu{Nm zbD_AbX))pJU(MA}3?-VX-Mj4>8#+%DZH50%T{KAZZsb#7weNuMZ*@OWmI{*091>a3t~Iy5S{8tN9$TQgK8lw@FDQ`Kw&-Ip{`fDu$9A_QVe_zo}&#LaIfnRTkh~~7bGkHP{*B-E;9Xkpc*V$* zsJ^(>Kl5A1{~rFPUTZLg0D#1E0WrEr{#9( zo{~qHg8=rb@*iLVRFBEhVB zRFnMGs*+?rH)YpOW>hB4jq1(yJ`XDEFSOg1@oDiU(sZv$h~#|Gq0`>`k&6eb8kj}z%uZu zaL1MXLl$D5|I%aKqBVy_@wcE!nI6!k^JQ4H+of<|Z*HQJ>#IbiTVCE21w~Vis)REB z7P7(hb+dUgKXONB0!1b>r{z1y?K^2Q(8Eim`P{ZZT%5{Np#WT&&!Hc-P|~hl?`odc zA{y&Y?8+tSCod;0$w6RfGpJ9LtJ>${`EF1-R(~1%?nhpglnG5>yF&aB&}LEa_@jt( zDt9fHM8Ru)9WZo$KuT6wBtl0>GI=Ng3}i{>oLDq52ECH<7g5T8uv=rSj_U1#t(!rv zGMJv0>!mmbeQvDL!Rt2sN4RHrZso4L_?2dafX15EZ$kLBghHgrsQ5U1u;eczX~)UK zBn{hc7QvShJRz7?QSmj2+Wy>V6>!DX$*9 zaQ1OEq6cLOb1HbJbSYFDSApK6Gy=(Rd7=*@1Vl8XuEZ??wk8LO<|wLC%uvUzC6X~% zmZTaPd07f46lYZdb=3JHP{KP~!z3Gf866f(O^u8za4jwFjsfmHs7x>$4I*^rp}+N12uX{U z#R~xhCiRJH58S-*V(#3XejVe<(fHr}yucz?I%Lc=#{w6k0%2I;dI!w$@&`H6YWGxc zKOMiVm#%`|fh6l5;KHtgAPTlvHC!o%4$InUrllJl<Ls&A04B6urNav2F#`3Wnr8BB4`Y7I+$r@3p;?;wd>xx8F0 z?!QQxr9olIMk`>af<>G>> z;r+z~K$@E)77S>=0I0|xpwc$o=?(0j_{VdvW(`TJ>DLcIt*N6{I*Gk_NGa% zgtYY#Ri8CPBJ66fS)8RDs(8AyZC;q^n`FWcX5dYTW-Qx*Mr;wTT_-}RZTJqndK|# zXr$@OO3A?~7Uq549iHYsV7b#GBmWGT-+#u@tuLw$p?>c4cgx>5HIUbctclXY4(OYZ z`*qkIzwKF$)N>@~fI_7|%M~XYNTPL}t3yi_&UpM^y(WdkCccirw%L-XeYcpnRU*r+ zn>Wf=LRkd83cBayEXBDH)+CGt%okFsO(&!$ku~hL&T4;X05}X+pC6w*N&8q)EbkmD z>vP&C>a_!ZrstualK^OHu64rj5)f1^Dp~IK%4AmGLzObld7yQp7?#?G2QXaF8dr%^ z5%WY1%`KG@XzlFszqG*h7A~R4*m5y^DI!a^@-&6(3s1}<*;*Pz&{9P-wKKylnttX) zT-WE=o~WgVdOhbb)wTWgrJJdfs@a=w>xcP)&@n(1D0ZC$!`AtRY-25oQdnE<0c?{!jTdlWHU2{I$ z>~xM#vy%oFnCc&u`wzoPeIN7&d*yPSWS#a$FXd$)U*ovXWY;ARu!_Jc|*c8BN(o&f(Nu1xghw*rQmG-+jDIJH{G= zn55pJQd*i9lJulneByX_l7jwDfrszv@5|d0pS?`3jVxkD5oTXIP$~heBVbed(oDAm zQE#BKZb>Za>WX%w1kK~zpj(JS2HbD{GfBfpE<^mmG`$7`bnUj(sC`}ewSp9e# z=q%*dF1O4rwZ8$1&c;o=RKcvkPvUOJJkG7$8?{h- z_%N7F{^Ojj)}^Z3`x7Mber6)U;htaycYiq1E=mPslbIPUGm)9{^;B$E3MRWGgY_%5nA zlPxdK8wNYDc|aHVicGtBV%}91NLN;uzGw$Q*%Ei60>J9*%39LAxF`M;FKoAV3}0~h zj(a5CweBu6uuU!0;SZ4s48OFYo{|+Uf+5myl|U+jhUY5X6ID^2+`Y|igaA|%063>+ z@buB6q)!f|V(_b6`8;35AEBay_;uEQo&hPeMVW+_%xJO3p((%IN4IqlS869%@*DUMX zd7{^@UDNZdD$0{_Sok?418%M6iitOKL$wVcbbNMrZX+veJ~$HI!%8^gP~jJ~@P3%< zKHZPn1`;|c&(csOXG_SqkfJs+ln7#fubalt>hcbjR&$@H(Yt{G=Tg_~-)7Bsn>um- zpfneiyoJB!+X?xRN0o~ozvX;o;5rjP1|%kDWsbbuAa00SYm%S)oB=%JH%cQGA&2+_ zk^VhRcz0Ovs_0P}`kvbC>96B|sjET!h}La6mq*uK^OZ%+^vhKOpTnzaroI3LhX3sU z{c(A$18$mx!cJ1uqiGxqly%j}96GWzg%JfnTGACgg0)5%Vf4Uy^r{|bND3b6`0zFf zpRaGKov_$o?mdhK1GDy!41G~c=eE{HxwSlcv=HAMdLg?0985kZI*mGo`=n?DfRtU2 zG;5lit)hRL9qgi===9LV3ts-5H}L}-cN`$s&451hncs*?@xYdh-|srN8oy5XEb*;+ zSa!uXQm8?#5c`9m#z`gD(7#iGSq*mY{=z~|JVXDb%PN?ssSM7`FTOu06&5Q=XQ6i~ zq*CIO3ibyLu)ElD2&p|!8)|JoR}+b$EtFov2zo)ccb+;7AqwXKHv(Simrl1JIbH1J z486%y+bTx^XBJnOH&_5a2@PQ}YPj}GnU%$)U_et3H)+I>M@me~qk_ZH&Rl-~@-_XW zT6Rg&`xVsSb*iqz0+PBnTjN2MZlwVRJ=V25ko%Df4=kW3S-UwTK`l2NDlzru`*DA} zswUqdo39xL;}0;9M1oIWNis)jwZF>OP=1HA{gvj zu~sROe1}T7Ii5X+GqDC5Gbs+$l`meM@46>>J$pA#Ou`XZ&HQXSfPU>hkLedg*Tz zC(+LF4h0HxshyBkf_GFn1O33qNd(GVo_0>{6zFTDq2}+crASnoOt51}MxLcSkyD?@ z9NA)dmbc-Ho+3-s5Xb%c3xSA;L$eh!cuufOqn6wk%kOg7^-Ylzu;d>cl%@_E>FJ-l z*@iA_nam2!Yk>|@zMID`w~)ebx$>z9sqs?0I>XvFd^Z5swQVB=Xg<2r;tv!&0J~cr zlg{3HQrs;e!nd+1bE!NG2c95)~rK53*`MtVV0Xf_hI|2E<=XZ>A7{TVcadnkiJZ zw?aE&2O2LIptULX92m}}ay}yAZM|r<3XY|iB)pqcPaP}4c=O((YM35(8gIoMP%5~q zsIkyju4!@$1{I9o9REZ!p6OS~JRl4WG>+=1Wf74qIkV7j6+Te9j!shJ7twREkSXoj zy9Py4akvN@qhc~i#`o`PD);UsA>hQ;`-pRguGV^a{{9>PlB**bE-rm4%&ccW8XF8Y zeXZ8Xz|4KoX=NEX9vi>q`?2%8ALZK?$8Y716OoAo{iKyO)B^Jem}OBnEfL^Tkq!K7 z*NZ^`tZ@GXiA`d8vqtPhGF?!qS1+B{1v3fYB`+myE!;$ZfI_CBb%v}lHyZ&b)v^|X zG^I1EcX=VW3DjBVN`-m729WZod^t_cik+251vH zN>`jtS0UP4=#qX_UqOQ?0}PktNlpC=pK9&N06qLXj~fG7*`3nt^xXkv)pG+U`KY*v zsP2+?37xB0ZC3?Hv(dxn(?bH^3H`W`a|C z%OU*FzmimeX@CgLk?wSb{|emDwU;aBvj2E{P@S$=G$x~+3`AjZ$b|Bb#EE9j%$UX^ zST>q8STM_QK8$S9sQ%Ov*X&O!^>hVt@|Q@~5Rq=+{ZWO^Bv_EM`zE77|5QtKe*zuJ z@ouROoujV3njjP-guebHeloe&;`F=^T)cO*iMdl9J5H|uNm9dp7|X+rDgw|~PFjVh z!Ip%UF$=RQN?9D7Sin6bE|SNKTSV7SN4Z#)EL6jDc$QnOGIIE-| zq&$n_=L8hNA2d)qi!N6tVclH8e*%2*&%_*_j^9k;!UqZ}Bmc2KMEF?w+#SD_r@A8S zUH*NmzfA^0UN4ozQFam(mOH8>N<1L>FZG_mLdmc0;vyWCZblVOY@U1p3G2r}@Q?EP z5SDkDyKrsHQH!lK1ANFi9yE!{ke@#s?iCF9VEj>z`xT4`pZOJRj&en6wrJm)dk%4j zTcx2T;f_>aN2fA8d(zv^<<3NXGbC-vy}QkpT$havEG+5s_PrU=^1Y!^sk{AcTaed3 zD^h#u`Fx1>qxl=*c>U(mVD`IqZqMc_*@)!1REkDMrMM3`7Icv#Oto|nBTFKOwtH5| z9_xK-9$iWHR=FDdh`CRHIs#FwYdyq)qj?hwHb48PtR{pg)q9}cNv}aX0tM!B2EhAC zaSAEIf2^p`4J?f$7efZD`P`OP# z%3gHKpP-_|vS;)ZMejL$t%w+!gLL!wqjN8)dL3Y!m#=?1iFpo3PHyHj_ikd4;Mt7k zp78ra4>Uqe(hjVC-LbjP-pr}e?;!coS}jW<&YTStqc1TpP3jzVDc^i+{BiMzNyfdU zKI~Fkp*wKHZOvdFSr7F*(sP;98C9bp>)a1a)N&Uum!?a85(HH(aU$7kJdrn%WaoE|E??lI}CDL20S9{*Uw$7~u>+EFDr=Q6W zP=zeJ*Hk~M-9Lp0S{@k6uOY4Z4PvlPa>73O+oYcP11t<3B8OAuBKFioHotQ5KU$5r zLyTta$fAI$`o+FBjROl1hNe6ltIqUl1gJCcZPsZ|*C`^XSRl@x zR)wMab>j|}S8`ysM6{ z?eL6o;WZDcZMXVaOQVxK8E$hwC8MESW{F%_Z|T+W zdGivKoiY^CBVqR&r-1%a$^->IbFF>qnV6otTLpsx6jl*2xKMd_5GbuT7#WL_pVouE z!F_hlM9x>3H;Am`{gOrir5MZB( zjC^FOZ^tr*e#!4I&wn{by@kBb$4;A@ePx6{pk>Jj|EO!yM?&Pc)DkSolADY^nEw#u z^5PeH3NT;hpsf+->r~W^`~F)ECsC)La^i`jj*cxGv1fBWS;iN+V-yr*>@(QwI;Ut= zIElIeL|3t#auRGG%2^&Ah=``qqfn|qOWWraplN!S?o}j+tfZ`~WJ=v1c6NR#Uu4iw zi1?%Kj<(z4Qtf;IRpq=_pA?YuWMFN8LN)oPl~IXJc(Xb$<+v)+jY6jF7=JuzUHSF$ z%O3)osAq|asjOQq7N=6QFNMQZDn5=DYIAB!au>im2JaTtN5l7f)d zRJz3vtQko%J;VW<)89-)rOjK!!e!0?WFXSQ0?+a59**MVAp2~OS zR)RJ?F5$|rDzz#iS&;!{zvncr-gSh5wi~Zyat^M(3vcC&W_emG{j=FTM<5fhrW%-` z#CbKuOV?i2gD)4TWSV+;tuQ$d%aR|=q-b=7V`66dHoaVPK@sNH(L%6AUnJ5U9h5(j zl?>Ebi^6A`B}(dP=3RBy?F_O+LL0N8frb{nPv(@PZ|z|Pah8NavFBqKJLv zuB{RLCy$Q&)D{JQIM`ESU-5MT*x_rM;iYql`7%sDw&kC%Wvez{W!U9-4(N`&95VhL|{c^;2S{68h%roHqjXg}Oq^x#sH|n^Zk3aBxT>&SadLjR( zr@HGl^x$!(8qAH7XNM|1QBz(fTAwR=h;diuKaU?PwB5Y;0{o=%E-6&dy7-hQ2)ko#CC4UoznQ<1K2#gmsU~XcO|%iz-y2p*Mr9JY>sF@6qC5M<@6LXu zojB!LL@KOvqF7)2`119_uM5h> zi{5jqr4*{(EiljD=Ht^iJy=g}jZAU#ggfP+L&vuQ@}-xr=QBv&ER{o;kdC$6|4`& zf6z0Jc=|b)co15^z|0Fs~URO3XRv4U=2@Pz5pa z;v<~7Oxi+v{phtK03b5kwU->6z78OzF6lnjXzbg?8~mS3qKS9nXil!PU%uVNRej<- z(tICWm;+3z1(R*VCHhzEHMrnP)}i&2Qk7FC|1p3<{nkxk1HXCx73kG=GaL@w_Fx*< z%ND@Y-0I#@g(waDvMO#AvZJm{gB9uh<2Uze;b=uUJk{0Pigt}dAX&ki3aY#wTI)h$kKu)z%VOo5!5*hw!rbm9H1E1o{%zB(valXLLS}aiJT44$&djwQ%?8N z$-c9VmZjC?)=rTnsx4na##-Rz=bZ_O97Y9nj!xk5S}R0e2aQ9xx9fZQ%#Hl|)?UL5 zhJ~T?mFF$sm>BckMy^*e($(cWc^EVIN1IH;gSwMy-G?e{&Ew+VFaA!mkQu9pVcC-^ zHOV8*TTFDC>`ZO-7C5X>lT{NzJpVid5ae5nnCc@}y?5#%CT~HK#LS@F+W?4Lts2;Z zy_-^!TjRDl@WJ{PBcT^+jy`0{^68zU<7#LS=2&>=NVe5+Kv%i5VAX@Sj6V*@5$(6N ztMQP@iET4Nj3bFMol~>|%&*_+_J$Q|!JnhzD0PM9N-_DX9m{2PeNodB1c<5ny|c2< zT%We~MvjqYA24V_3xmD74( zgeX!ZxQu8cdvZt$pSsaMw_c!1skubLy5qM^JBI*Zb4TDqQA2)x;OU~n?iD(lDGiFn z5ds4}Cvo|i8~X37&n7WrP}MB4@mi2%He8+q>SAgwGJW!mw}h|C=|i- zHU5#&9}l++Q->I`l!*(V4Rp@_`$^v>OBX!Pw`}ggkMw4INQHzW{xS*3V^rwcb)QTs zvK{wZ-uz;BXIN|#EwRDpQ(@e7d)l5cqSU-c@Z=s(IVnVQ&?Qm+p?((@on@BAiwkLD zIq8akFVtKliOuwH5tuVsL&NG>tv~W>XC@cN{pmYk`^UD0)JUYUC)l{0*1A)m4s&PG zONU2PwA4UsNYB|i&wf$=Bn;bXsGq1upVByzKxdC@%1e*AwnbY|yhY|oRvh5pxXd)z z0&v-q)9kiT{}sdo&1>feB_xw_6kgT|LA0hyxN~XBv-f5+oa!8!7jyb5mZGQWZs*Nn zKNe=&@ihd&Mp3?;_<9>Xi^Cvbyp8iGd z$?W640aVwY`I=8K8W*pd{pM7Zu0!xbMe4JC9 zgzUOr8zpyhSj5(T)suqw@#MDJ6!ZWJkd#WzR!*YxW-X%dkmV6zif6BYmD$ePZ0_T6 z|FqcN6slpXG=d;+RQ%7Euj@RmgNshqzOp=<6x6sIAysPyEackPbY9QCVj}ErSEnQC z$2Kzb)oE-S&ky*THl9M!xl~e}S|k?913XmY(B$L0daiq9%Dnuq60W(0cZS6!qaAq7 zyF==84(ifbn$`4ejduAr7r)B&D~z73aUDs(jmO8MH?58gdD&Puew$@2lVjwUqsbG@ z&0Sp}SR29QKfP~-Ow~Uwzo+-2Rgl$o`8}gB=q); zp>Md0B{=m?tg3h?EDBJ&x1-^$(X835vdM8F7NGc*KhcV50kDxy8`sb$DNJg0enj$8 z|0pYe>1mJ;CBc@q?Z#Z#U zpA3~Mr_t3gB>4eq586uv#C&8q#y%QUk_PT%-1h42orBWsq$$5^Kv}gDGt2o$Ve>00 zeGjaUF_~sV#N#f0sn^5xDwv2$Zm;=zv3pR+CDYai%@eT^t!M-aO;VX@7~rYVQK?EB zQM{LW1!Y+rL_@7v19GN7qLjJRHQx|Ow4tDIec_!<+mMPQY!>1;_v6-R{F9`50*ho? zX(4z+$`;z`@0ccg72+Nl=;c4#JpV|4zW(eBb$FD0$Q`?lc(EiVCnZmX_P&B$=^cZu zJ*Nq5c5P_Orz>}o{zN+@@1t->#Ap96KH{og_4-P^kRvT$N~p1-j)G+5r?d;HODPFD zibiu7ftsHrRHaSvYx>9M`YBT(*K+ZX7r)kbU@pxXpxHnI zHIEKajx0{JN) zojNozOTd)JRi>dtcr-o`Cuiy3(=q@9l(-_=%5Bk1unQ8Nt>9;F8=Vd7a+$MU|#K9&Q%vU0&2%p3sa$UxpFM_mJbv=Gjcobyl=RFFvbh0kBxE;nr?$ zT*@-5Wr00#&%S&^Gr~MYJlOQtk)s|FuGl<^i+}XJDCBo{w@f>FX^tdgV;=5KRaXq9|Qo-}I43v6FjvQqv>`qc8i-ExMBOSqoH$*rD zWs3*EG+Mio&A{|euV@injDSjIfxxVHVCVVkSQz=X*)XcO;`qQd84?lqphl6!dGOrL z!2A;q69_0b(-9rVQ$cOjZMlnkLgV9v2pVGTdJo8}1V}0KDMuL4NIcRNNCPjb7%1;o zUjb=N#W@YONqi{}8}U(=Fh1q(Ny&Yjkthr~h5;LOlR+^h%XiXkWE+zut%0dV70wiu zmgS0grozUSaGyt?eyok?Oy4!HXm?|hqKnrMUr){tk5c*OY$2Q1Q8uSV7y6G$zR`#` zo<0&4%&?*I_xij}h@_=-L^lkz<_RZxRbzT>^#QB?NOea-Tr->{NyO$Sjm0n-cH$hK5L$_}d8#+W>l}_6~*fBA$EZr|f3(ct|Vv8vUloG#Rnbu@9C`woC z|Iy1yfKtsbzby(|MuE_N;AIXP9v(bV$->;O+k>G|zO19wa=!V3zA(G8vb3V%q%d$w z;UGJ~>4c#b?2?8HfqU&y4Wvd?a;Q#|5rH7+w>g);(xwrG^xec2rGGBAjDczJ6~K|o zP!`-RBAI&ZcvPArYHFz!Q_VRw0G8O|C}JH=hVK9F$4LrN@dPRS%{&r$LmaJ zOOMofkUi1t46t~CY*&6rGw)_+=GQb4;0w+(UtH5{!NOo+mlA^BQ^v~S2nd-bJj1>r zLam&P)=gsveLFrVsF24CgP>lLPV@fCtfm9Wgd=vwX^^=uP^0~~$0y@An=chnYC7Ox zb!7O0_^OVK0zk};%W9c3I&IjPE`9@?dO0XrIVV}i>hqIs5<5iYzDo!W|&4*qGjzHtJ25OS>16WNnxTS-64WS30-O^lY9bESla2CRs{w>M$uh6)_VbE%G9S?l`$lG%Jph zX-uWl%6Y6b*aQhHj=#WdT3Kd{0isVj3D!l&=7w$sRy|=IBG7h24uG2H(cP1GMAPhT zkKfWyT(hMYQChizqHc#mC)d2*(^T=+t<)FrOO~U)M^UHZf}Q=ks5s_YUsHFLm;e}* zE}iUZW@Ph|A}cmZTI}@NGp}lAsf?97i;%`>sVk#aQBDX5myeA6Jnr=;pS-4DDViqz zJkJv3PODQ)1xf9KEox$Udd<}R+d3YWuErRz`6)~vpA^xsb1NdR4^ut0?py% z$hr-cUP22}H84#US0P0ms%{5(H1vFJvanydG8gm-7v}pHALNVY#+5R~-4m+bZ|NS} z<49Jk*{)RYvTL$Mx-62sGJ7MOGs%!>Yc2l=)S{OwV^EVDf26o?9X9<%d}*Cr$eNIrSZeR*U)6hk~bCZio0?pUY2fQ#>=@KRDPymC(RkXkL*|;5MjQ$IJA0(t$AVnWg;Y+Vjaf zGqE^BWH^I-{z^~O8*%sKF3gOa5e76TcoR@U{4;VhY!n2IxK9Ay#vj}3rDt{6(1EjF zb=cF=5>(rcfIzWt5{^(v4x1vr6c`nzt6QD#pN^7!d`O;9l8|(ZvOmV(&4tSspzq-8 zOr`GOl5JJEPxCwi8WyF=F{7XJ_iinD2R~Av+Qf^pVhM({X;?aiV9)b4pd1})LNW0W z6!a~J^*xWpOi78-SSXGMMT@|b=_RbKy?*U8e&xG(+wC=>TFPq}MP*{5=5QEwStO z^J86^%>Nkl%WF-jxAfk-4bidAYfI*BQ|KZo?bNE+%TXOU95wx~Yvrg^z=M4Zs2~1G z!lQozkJ8U-0m3@QZxoe*RB0;WG-ipGblk6Y1DIa;XeLG#p)z>6cF_?uL|&fXDso&m zNN{<#|J0Ot(OTCWm!rT`)z~LTd;drL>EiG309+~m>`~(rXAT+zp&PMOrBU|=n|Zk} zXoDhiBzKi+tirr&i@?&fa)GyxhU1?ERS+-wp8l+$x#^h?h%x6Z(SmX$=!rN(-O|%; z1$*|pZ~#r_B`5geSZumlxHnCN#{o@s@uQZU5>1DOty9gvx5!4SqPlY~MRR=DW( zC`GJfxXq%!sfZ1yz=rDYrL61&zGoNnDhb|DqO`NoqAv&^*@4MaF{M!Ga)+3@y zkl@?TVA2-5kc(38@$vu3;1_>Kpu)}0CdZTGpAUAD7^{R(k&sGZgmRphC$Xb~*5zL2 z7;9OI7{jKOwSR!I+_F#jLaPY5Hc4+&B_(_vB4|Np%iu&E<(#xBpf}rbQOG;h5YjCw zxdh2t5)Zd7G@omUNZ#dl^5F2c(snx+M&7WW0WSm=k}@#L`%kP>Z1(2F ze?uTd@qyuyYgkb#E0q?P%P{N6V^ba793Iai9Td#V&`*g78S2_l_5LZSzUrOHs?I=@4%0)Ydq zYZO8;(t7k)!twpq-kNY@;I1D%eI~yu$6QP4Sj8#QbNyyi_bXm#?yoLX?hLzK_amep z4wvzJmNgUv8F_(JY~dD(V_pALo-M52^~bbo6usm(n)F`(Tu&}rOdKMgatGPpsP4u4 zZ@J1czhDgJ)K{UhW(8hJSjJ_#Urb&_U&uSm2;94R&5h5dEbG^hzyHB&c_X&ErcmdH zh0B*3W#ynn>4Qc3(nIL_c^3#w9wG>}U1p(^k)l}KjDVCtXaB3wEOrKe7eI576AwwLd~hoEnh1Xar7{Yt_mZ(Fbb z;JtO-LMb&6Ez(?Nu|m`I z2fC^tOPpwT29KN1j+CxV>&Tl;j-3ulNf{DA^g2~I9^RJ~ts%y_{=tzq8Xjid7OenN zJr~y_s!<6#GH$Ta-&0goJBousKjzoIlI&Tef}5Ph)DGOG=;yYz3S)kBvebIZBu8FO z>@!cBBp9s+KH^FEcKs}G>mn`*z3I-dnC~L@Ca+Lbyw-OC5*1+Pep!&r2VpBJB~v7n zJC#k#?+ULO4zymu6}~Hfrk-apEn(CUH+=}Yq9M+2HSh-MVkdwf%HTzyJl1@UXh@-- z03Av}h6Pa_GhBMq^Uu@gP73_-Vg6HKSt#0xdhF3NU(Ib;URqtt-I@+FNb#1)lDmHG zg(Q#2DMr1>(MUCg&|6ZI+NNhZsrDb$G>0UNqu-XFz~YK~NwAd;po$hE3d9fo_KSCN zcs!DrOU)NgsCPN1i;Ld+-qCq7i4EVwZm-Z$(^TUNZUKIN4~LI!O1fNYfB;)ownl_) z?5Hee@O6F~TykCk zS^)KmoHi+2-ttLQH1MppijZ`a){4F)BG+f1Eubo`=u5PV1G>YvCX5+IzD!O>0dLiu z&)pi>+WR^fO~;3q?$_Bbw4BvHGl|FnUE0}$*5S+bIg+shGm@_?&*cH`{QQG8hKN5e zgd-HA5lc9q#GO^KXVB0|CwE3$dEKMYi>=!H!Y%4{XytgF>vkUY3A^T>g;UB+T~VM( zkACfI*K;yJn}DO%wVnY0yyc1DB2vO~J_Tm9n6w0ai$o}~iwH};=}Ls^uDw}<@lWzk zM}y;nWK+GprR&k;1Rbx4ojlsAyhMl{B}h|!vR@l>o2XtnlH9HeQ~AF{fP8D2l$OZU)LGob#+y`|uw13I==%TPx7t^F7Rxz5=cE~qGN9wm*j zH0=EJY@Qg`x4`{WeaaC~Z^!TE14tF?f{q-P{@zhMK@a2CIF20N_ZT)Jul51e8E51KT4!3Ati*lR5Y6$S`*BZ5)yt!@e7 zBEpQg_^>vY0D$1Beq7vIheni*Ea}{cZAxQmy7yr>Wd*EuPpUz&y45$m8NjMhia^Io;gLvB#p1f{A%Ur|iP6Hp~M z95%TgboP>bTdH}Q+X@u63(SANtePlE$ihu7{XwbBcW^2sVNfakWc*WY|2&}l23~mP z`lEamnh-U>jTcg7uiPRGZ_>S@uluvt^FAL&Yy~p8YbYtd;FWGEXo2F(XM54{g-0H& zYGXe0lI)>*M2c!VOR^VjzO8q@4&8XeRcC}H4 zxG4B4dB6x(p*f16yMwA%D%&Ji09fNNeugr=PF&HR7&^l*CBWI z$kOqpRvt&v1!)GQSiqHvSg;axq67&HeC7nMeLAOiKrd@myRCn!$1X@BFGVnNa>iRR zm$QH#E;pSA#kpU$6lan_z4dN6<5s70*un^EhbfSWNKHBRa4xsnrH5LnHD-A6FO7#D z|Djtg%C&|=k(6T5H`)Q(H@OMx?_G^ugyTx6KbGNb4>7y>f{yLznNb<&s((n7M)F#ghu@O@Tzn6Tux_ zp5hCt8b#F(niFb>vucCHJ7!(kLq;Z+q}L6LcyzqXWu3NyEYr;^e@||EZMl3-dJcNPA*0_! znwPKpE=sRpW22z-Mr1v+oF|yGJ}*V2irDRUHP_{dy8%tmEzfBhC$8U@uNIv4fM+nAs;&L$ir-4oR=jGM_$u^^qQ)su#~yuMmp)U4A3CysHwk z8spl_&*_z%%V)m$nPiUVl$XVJsx=vp3o0#1jZ!ZY;E@%n@`@;4t5ez+h&X>&+_Kz1^HljAdn14t#*W5p#(p&g|p)@3|&XkHxC1e3oPJk|l^i9ln z!prk~K%AgS>TJ=y>&#Szx-Q_sVFktPA+1|@8?D-G6>|1UgsbjX)v}A9aC)wLsgVzU z_w786dsTA}oM%a1^c7I{`M{rC(-+_~TF5-iCy7j9#$qcR>+Nt_zi`7LAMi3ql_Jvd z++#DeSV3aLxA*jEmABa$?U#Og@!v0gme>DKMT-U#$RjN@NpH|Rs0Pu*-QQyanIz|> zB~pVhcl0>BmIAP7;AmO^X<6u26Q|>#UkdkaY-7>?;P$ax4ppgNLRdIBTv- zkO#Digu(rtIfMJ*&Vm>o3|cUXBQc}Zh_}#zbY-MPuxaIOTpvEE+duD)^wn1WrwSCj~#?CZH4TjhtcmrSEp_m;^o8YUq%eqk|rd{|zl(2mex) zMyEIt1=e&33L4eyj{5QuCif#A(Y4QuH2ZR45~OOewPzl^nor7?R@yhpyE6XrR9*VA z?)V}#(R-zKgSjemd9#a4L8p62bs|DXniZ334pO&DH6KRg;?dC9RBeaHbLaH%_-#V^ zNi8ZNK*ZVV0kNGjy{1~B)b*&djU(kd921c|9bErpTvdP;Mu9Zvdo0e zV$u*=Oat_=zO6Pl`TD?pw=#WGGFg*wxj01-G;y$`lz!5@x6_0Q^HCL0*Z4C7>shF( zN7l1ILBKil`akF_=;f>UcJdtNp(j_DXhz*yWl*xOr|D?Ch00$Jr|$9i?fSBclIL<2 zd2oGlyxqzARTVgbG<^KRv*xlE;RcTExU33n)qhth-YcaW-!kjX!pc;)QmtpJaBNZyJ z$`H?yQmMWLlBmu0D|x}x;40cFSt$a|IFuv9MBXBwBDb&j;X>DH4%b|_ zy0letQqhib@Mo?+dow9qUjFiK%yJX-1Px-jWyQ)-w5Ung#2pDVb$^u;OFmz&?+$6l z5%SZF*xuU<`3`xqZIt*zjz5SVcUXTXzS|TUh@g|@r;Z(*VBMpg@J3RuKdHR4x~z+J zyOV=swYL=H^{y*?%yjMA!7A008zo;|h+Ra|A*p0I*jPa>ato3KtzLygzp%|%iw6!R ztXjg5ByAYeZuh+M?9`Xw|KsXyzazh@JKw(`FYmoC?kgHI-3@m09%GY|RC?%^)Iz21 z#@GgzN+qfEP$jBVQcF&=0OJc8h#is%fshID#7yj&VHVI}j4{qyDfe1=Dg6ulCHM2) z=etjdaaQP3)$ie)efHUBKYaJzu_>ZOmT3)`giw4)vVtGbl(d!e_i>aS7L{;vPMy32 z;Wa0PiKXd!T>7&#&u%fq!|3dMJu))H_9pdg!&jqbVr_ZT3-+e!A)#ar4u_MUnPy#1 z@bEp|d^SqSpspp~DN-V;{ipw=-SFs{coQa9>MJ8OD02}lLqk`i;$sF+#-vlC>Dc+Eqw*riwoX|R+167Wku>Vs3V4CB~|UC za<*hLL=G8Y;i**~g%~ZF`R$NyO@U?qkKBm@ZfS32lo#%XHKezhTO0(7b$e9BF1^#Zj16h3OFa8rzc}$LQJS zb~u1nF!w~2dqRA=Q%h!O90I}@q_W^40>_RjC6Z9Z;pHFThj<7pWZ*qm>69wFIQVs) z@|lSOkp_ytq|KeoxP8RS8x3hvT0ylH1K5fX}U`6Ce$onaW1UJ?OS5iUkFc?YjW z8eEDXXovVA&7HHOJdr!c>21HyIo}J<1njjKI1Ow*I4JmAURs>eq7D^9P1grFi7a8( zO0Jv=4f&gXJ6yPAo~7Je85|hU!94nBHO-Y+PWhI4M1>LB3n9 z$EKD3f2vq$@YNt3Hmg^BRep4Yr{P5ZDbmW$&~xKRH^t;3;$$Aj$xLyQ@;iP9yUc<7 zBk0o)rfCf*jmP0-X^H}i_35N>%DdW+}Kb>x2L_U9`K-Ha>9xo`j3hC^d_ZD>i4J9 z@qVY|<->?{%JwW!%Epm&L*w)0H(Ck$jbK_#Pi#vZeg;+py|2|;Wf=PE&4VyO?loIh zf~4DGo|-unQ}whSunSACUYtU{8`-1_G1GS83g3gl5?s%iH>+QHSZiia|sMB7D`Q>S8Kgg3W2y%@1iPWDkG&~!uR-p{0tfzq@cKX zyF@azsq%o3TOK6CFTGr=#Y|&ih=upXfylG>r^l-1dw(hNvV;#2@u|F*#+KcS_74!- z;9N5C4QeX^m#doW48qRWNc%zG2{+T_n+3(cw6W+tK2lpzDOR4?GNM}Vr*lmh#{qFR z|0_Nf97ql_o?bseQ5-bzie7!d(?SC5bgA*1N(*PO6h}RJn^b!*DA7MoS$(Oh;#T&A zjtWiiw-$8ctRX725PQTo7hitRZV)h*>Deo_q+sr%3x%#ZEaex0>trp1T@}#KreELC z1=Jle4TRU%m>6@6O)&$lq~g{!aI{IqUVSO4ycvfU)?W{M|30s2&F=>O;k&vnCZ?Te4kLXn^gfH@7Px!Q^4 zK4O+??vZgl)#r6R1J30D32Hh**~TpI^opQ3FCci8;wWA~_7&Afa$IDr`r{bR*1}O; zKRHL>B(kf_FyqYUa6^@LH7xG}9iJXl3qtflHw-vzv)boSX+at6x1#-`@ zyk3H4@#MyC3FMQ<7JdbUY2o6M@`&zLWtwWRi;75RI+mm^)dT<-qFv0omZb$SF<8i) zT*NVhwW2s~YID@Oh_xUNsCNP|PVOOI;K3daLb(JTW_hCZh|9sgC+|k(@peQ42GvUr z(c>f%W9#uM)~kO9L@>nxoCW%4t+{SSIHG_DkR`JFOc0Vyi3$5ijsoibA@Z#d3VNKR zSR6yYk$AiOj>fIBJXN8Y{4AN7dbeAgH=Y@AN%JIev*2Qkv5c011|*8jmi8rtD+`*Q z2w{eu}?V;-xGz$2K_aRUw1Id90zoif5 z5U_Fx)fe=?%WEc)b1O;>e28HdSRg2pPE)ia&RBU3yf*TT>rnSWqZon^5J@zWQ8S&I z5*H;3bnEMysLY3JCigM5aQrelyBGf18dPbtU5LS7^{tub324oPNg{N&sKLsj22HULipHF1s%8dU!<{@5+ogu-iWm4MPzJ zB!hKToXhqFhprh-D`UNpjNm44q=|P>gf(~N9;Z7Ol|(O(hzFpGf*+#Boj0?Jnb1Nd zZZ5|W=Ozp5iQ-Z$Fz@!xAbkANB%6R;}yqD85TaK*u(vge;hlUc&{I=^z3@(UwlZRiq!G9+9w_=5tC9W1-~NpyXV)W zxiSG$@!@fH3qpeA`P5LcC+eDcHjpy;fbA6W1)q0_x?@n~wYo}_<7IJt+Z|>u3U)i8 zjm|gjEpK>Kgmo(Hh09VFUHqd$u!YRQJh<-su%juep;e00Yd>H7N>gaQV1iIah5?#} z9fi`S+!i90fKT0EYhC=`8XpZDMOeHqfo(nSj;|_+s2cf15OIj8QK&SNcgf}Pn`q~B zur*s_%AJp2)AP_?V%=JUh?i@1awEXily9%KTj(;Uadf%~3Zal=dg$1{w2H9s&TN7T zq{6Xy#7gSt7-Bj|<88UlV zqKaMrty1%X;V*4??(B+>ScgAo#;U`pe^ooe8q2TL2FZZ} zg!HB2P$npa4j{*pD09*c|Bb*geyv(fDq`Y=KSPfnoca$6tHYkYFhcNCfkFNXzPBg| z=c@@)P=(ZgU}Dj2UPI$(3rQRr20F7P?IG|^?6XnAWYIW)IE5Gywq5Lv@e*(kKaz@Y zXq+AmsfSS?PvR(w?3?D(s&krdZkZQbhcj# zJ<{%MC$`j4Nvx3&(K`wxyI|;Nmi??1WT<`=ZasyRCe{mTayW#UupywEfRR2eK|=d} zIe0~_!8p>3@mS66MTm&guhy6(q?)=sG1acH~elMUZt=6iSX%^^v$oR7Z&ICVjre}zBHmaBPOR7% zrvU?L)GT8%>vqyZ{$)Dcv63cZz*h37$Z9G8v|fKZ&c-!NB)=3BJ42s4z0F+A-2sih zTq=@n5cQKZC+7D9@2_qA7!bg36c(&#?})^HZyb<`B?hm%_KAJ?0FgjLUl-{-$Gh^Q zp1L+14X0z9?8dxe8U?X5rzK_MLjuhZoHrS8I)3WimCiLUh>qiw>akt zOo<__f|ASQ38Dd{G99*5nI%*DxNsrFc>%?hnuP~v$-S(fcFu_I3qc?bv9Ra3jU7@7 z52LQsvSb9nvS~@qp-L+xGcXz=xz&wwkcyCj>faWT)=C6TM0 z22q8m4X0)48W-S}-tjaUT8D2fNUf%rDr%|q-Wdd9PU})ra^@Re1PCd|XUK{HH^hwl zREgBaG|w12`qT9>WUEU?G+=Q^7|qs@zpp+!*^lT>Bh}xp7(81 zjbq#Nz%zJGlxD+wTIkSj*gMa`RMdF}qiV1fpJ= zdR|-NQN1ph4qv4nOwcHinFx}yFo7SaBfWNAjQ^RaA=Qv;FJUjGQJUhLVukj3ZON_Z zpN8Ly<3bA{XTQ_{k71KU4KFRa?X+yG;xs-R*vkZDl9yD|CKsFBtv0O$;>-reG1$tQ zL;zV!{cy!5QCj~Sp!?r)Y24-#JkMGok0TX`u+fPBubNy zD-{owCk92`PeT#f3O#oht&CbDc@*H|iezjC@`W#$V&?nbYxs1DERYJ$O>=fI1BBF8|J2j^-@9{Or`zqo zf!=uaCGB2-MgoJGV&t46cjfd-=`tSl%sleuBNc_8@RQbSGaUR?j6GE}wR|IDpk<}W zvoA(}6p5h0ojB_FFqhf?1r8HZc#Zzl8txY!iB2sC7`Yhbu_UN=wnBpL1Ih5C+oypz z7jXIV8@hQdI*X5MLgxCkTNUS7XxFR7k%|LA@43l;CPD3OShRY~I4=Wh@(M|n znQ=2z2#%C7O%NK@Dk{Lv$BB|+3E8=|&l}*F;{Kb~lib1QVkt_-91URnN0${Z$MZ%n z6j3y`AOVL%=2SBnV|Mz3lOVi1sBubN{>Yp}H(n~6?S#Z2;NLo#Eew8HC7=C3Z7@o1q@FH#K-E1~x)tQ6qIhz&rki4SKsZb3P^Kjv$oBIy z+j|g(l^80w0C^g5N_oA>xYUN$5i1KtK|}vS96Ml+P!BJI(@#lpsylC~jeCnx5p^`dxiOFGH#GcjuG0w2)%A$xl<9 zL*aBxXse1rSVqqRJ|KI>vu~{WTeM}DKVkE=R{ZqFFb}*Z*?;iGm!H)wxF3s8+xLTI zbn6Q4Z`LSt_>R5R0`DjXa!7cTq~ei%?pnqGsDK-rbv9J;771n2oV8(BnyVXLRkR}7 zpBKp;8I-A&+_tXA``Of*K;%rP5#AbS0-@|kq@JFOjTX|Cv7jHOwXYJVX+=oi)F_hN zwH5>?VjKcgjw)V?b1cWu3e{1(o!Kq>{eeo^`$VEXr-W+pj5JsWJ(kV#htk<%J7E9M zUHh!x1|CzRY#$dVF2Iq8evmK`i2Q<5(2QkWO0z3i4MehWh9}(wx{WBiILMYC#00!> zVzJ6^beJWVCp)8D}^Dx*k5@tl_ERoAi#Zg;V`OKM3Kh|8N{KIB3lX^8ojQYC zA!k;b>nY}eaGqv4feL&s8#hv{LLYqQncV$k$p+4H^1st~I(`_R&`lAb-V(>Vkw`GR z+z^d~Tum25=rWv~8N_NFbRWVEKF&)*W{P2)2CAYH4^`3Z+83}!3O~Ah(_*5PH`0uXv&ldi0XkXjJ4r)hC^m1b9SguMjq9emrNNN^#i=F(s3~8WvwpGmOK& z8)*8d6mHifOWN4ntTI3-S+rAg%eAj~KrN*(_%i^yTHXc+r>H0#P%(z7yR{Q&RkVa$ zNpM<4E>C`*5DIx}5?etP9)Mn&(i-R(#6SqB{#Kr>s!?o`1CxT*PkcpNf zyiaT~o4lv{JxRZbXrfjSL`>d$@}ZtU0W77!m^mpQIv&Q3Lq{NSQdi(TW*=q1sdbju z()kW0#>8QRZfDVT3hBms%x9xfCgQIy%u2{i{%3**1R6{w5oajYmH&D`)dlQ5k7_HX z@A>XgX!0%` zkPA(^Rb@Wo@S&T3-3Q|wVvXYr8^{AFDpl$G67^$$?JjQxC;-g~R>zLTx>@UGdn~7-H$sd$*KwLD>!4YC_K$prCP9f>Y zCzx&`fRI_8i^UVltXGP<@QBrMtzPn9i|<6xJ+0m9y%i50uWxPjQ2>qFhp)bKSDxGEHSdx;;zgv)?b`#^(GJ(HZXBosnhL?VUI(vY&;R9fO| zZ$A=}H>(y`GJq<)i>li}VvJOcizaR19X`)vbD63$ek|CJt;n@Ad*6;5&(Gr0=EG=B zYcddY=%#Z#yraJp&HLS5B#}O`2eIUG>#3>qA6#e;DOt?aTN(GZqyJZz--`j#f5z)a zhMxHhn;VCF_#d=3n^XwbFJYvCralhVq{`D3k<^x#H8>M(SQb}x6a|*xN-K~(R$k10 z5CsZO0r6@a0Bph@h$hNnVJS@@0(9kJM-<{vc$i1Qpff(o^PkmDf1@Wd97kg!#r!UQ zbOl1ib|vv1sd_Rmb5qe0|aD}oj}uyAa8mJo}GZ>_WRQ~8H4KGeOp z@fqpf)9&VeIpqnBxyJR^`On-8L2zB5{(4|Yw^CqM@K%laWRM;YnXpHC&dfw3^DmNsu%R_0=8A?NmIzLjf)adtJ5{O4+xV@6LO6lL z!2SFuo=GG>79kw1=R!VQA-YbXz7)BQVU0T!dV_{$^~z-GgZ8=e*Dqk$mjfE z6jlJgc6zCC<5JfyO;r+e1!G0K>*Y`TaK=j&>9Qr+oSUydCDu{-Qt|JNYvJzYd(4|k z=ytHz_~0lstv!TL3Ig?^@RMa8VkvO!d#G~+**lRG)r`wqW6Jf{fK(GbOm8MLD`dA+ zvSi*;XJ@aSPYQiA6nF-Q&U^h7-=#J}apN$=jkAz|RPvmf zmG)x!R^Dy4B+s1^yQ{s4;)3GNWfwn#0|uI@|grzerTqrW+111=x86mo<&V))|DcwB|HzjY-&YR z{h?xo2Hpj{@ejdR*FR~t7=bP-E7;#09eG2h#-1!Uc_U=QVPI;!$_MS!WC$?IXhCK? zY;QXMiATig#~uCe^O5Y{#mDBGyI>QhJDJy|VJG|L@|)rjKrC;1jNrEnu2y$qKLTDl z7uDub*S2ybn9-z<1S9X)Zq}#0RB`d0w+}o=T!JOq;+Fs-1pDcSUr8?{cKO@O@A>;a zsKAi8`^XGWK!JDzFFW)Vs{l4qb9@&XO!C*4KsPPv>mgaJ7(^N>f-O9TfJ?=Nf$z$o z)(^kAUwS>m#~0uB(6_qMB#?Q@Kcx{dmnf?F?zpWZqD_{XUuzE4C)Js@BK@2Ob|Gv?b+ zLQ*~Fiti=ugI~O-?A1vLzC7MKt?5;(@V_`*WP9?hk6+gtR$KTGr^0!$onqkPM+^k7 ztVRoMn(2UFI*g)9#9n{s@i(x!M9VPXJ}^}mdwC$M!&N3!LVIjU9XPouhlK17($_am zso6Rzt_%!>%Wr77i0Qlq+YyjgOadQs+63p7wP&W*HF+PW6{EM;PTJj}{Hn&j8Nigx zgKCVXji)B>s?38n-*cb!NY}tFlRuh=Zh$QvsD(VAd{M-N0+AHxwZO(5p1~Ggr@}fZ zB*>LO6dz_u9~PksBJCCOgc?QirwUce4LzYBDaP|JMDI{eI(C{#xgS#Yxo%PZHnCqL(q@s*dvkn4w*(t>m! zukJ!=->mn^x82UCDFLZ>g58@)_Ew~_*Rw;hg|m{Tf-(Oa4-WO6zL_aW@M(sEzpOMi zy$`uS-nxmeQ>N;5a`slN5stZHZZoQ4MEmj1F7n*F2Ty+Bxnq`b0HK1XrPl2APt)Ox zJIlMjxxRikLlSKPD*XM`Zd6TVq7Vhu60-Y1Cc^H`^E3HQcSY3>@9#ng3oO_i*$8J*2i7Hmxsioo`Vsy zWe){mQW$MlrTCK+9_=UkR>3hQNIh$DyxbMI zvpiZX1v$(_U3w+@F;Qj2yMyrF`=TNwNlSi3nHD*}0Z|oQx~9fJ)e1i%526?}@@rD* zAfIOK3>sDv5k)(Q{m)dUB@KZoE9Ox2Qyhin%$efUnH$8GJ?}pGiNy*xyNk_qe{R!_ zC{G*!H(|_ZriA(R&*Z%q^IoxrmPJ&Eei z-q2)G+5e#c5D+8{?-hXP98!Lq5<|{=3DiieN;sAn3I?8s(C?m(j`ZL!Jne5<)TBv+ zZ3~}L?#y6V>sd_tyd5(`-w2JxOM`btnW|{ho`@V@+9wKB>0kLiyxvcKuQ#e!Fe7^G zNdzbu>$>8~Y_Fg z^_p7!AdCWnr4@)DRuX8IED=;fQYG)%PKQ$GT|qMV#*lR`oIP-p>r{a6&5>{XB$vPE!fPi(v7Ha+)J7Bo#)ePQ#-3b z?UW@eLQp3c?1%6E%B%WH$=NkK2iQpuXbXT?5%tEOXWwd;?N^ttnJ5oqmF^9D`WyD> zB)s9;i+=JK^CZ&7$e^#(2HMGmVEM#ny)ywdzy#hIRLwn(=F*J*g~3=Ub`}npZUXsO zgf*do?Vf3mm;U6jihgbv!~|!I!)f z6_XmDdG#=3+NVGVu-4EcJt#qJbQM8s3a)t(qY)h^gQqd-f_E+L7+YtDIMzIIA~Pku zFLYmjNwfM|P({v{L)t$2UAUA(n(ftn3nJCGfz>?>RY@3a4M^U`>hU8q;E7QVU~yw* z*;|X+>tr5l=X$22{a951%vHLwKM&yah+=#tD&$~Z6j^fp=FKeKX0;PyQ?<@yP@x!s zay|TQ%3_Klvd3g(mt>33g6M5!TCGgp71Ged@yhbl$zPT%x$R!mDwJ7(RtsjRSS4IR z=h?#JWCRya8v;Eb@OWArZ`bft%tLGr>cX_cn^SAGMScZN2OSrUbRQlaWI#tCt4?TP zPzXV-2R->gsC-Y}N;BbJQ^RjcJqR8rlQwdbwM|sbHjac4i};B&h6piyRA&+=gc$b8 zw`-9g40?}tiGx18nUi-)MaoAnWy!l!iVB&It_z{3G&W`z^=c|{lprH&;%-VZbTp+< zmVnPr#aTm;WQkTgv8UH=J}a_xcfGz*9ScvWUWu;&E%Ng3OAbG<$HZ1)B*|ss1)U$1 z0371CfJA|&MksfTCFr5gmdsTR8A=S(?r#xl$!ISn?Y^a{J^IS4H^f~OmLbRz{=;3o z8aclHY=q;qNY8%$vwGkrcz`Hg(0W1N9#**Z9Jh(U_z9!en8pj6vMAmv^oSS|heeX< z#v9i@nF3()VW0}{&Uk&;)ipACDyzTxi-_^1NZ??hFlH!c>&?J5XgWMRT{=nu+MyI& zMVxO$Ii%ng<=(g(Ch-{}!W9y#`b$-%z{`^7`)Sb(A3geN2!FQ0!o6bl{+mo|(@PaP z|ro0)!pB{7D6P=X~|tgfhNWGG#8_w>9HnG*Xfwo)~a zY4edHRDM5xi}v|6>bX*50OZ+plzOI7AbQg14r|4O1@E83p1h?`;r02R)|}a{G=jWG z#lJH;KB+AV@ak6V!cI3Z6{;nj;{vVMXJqEXTC<=P}7Dph`5gUPWoO%VzD5B5U6y1y4{H4l9pc znx+)K4f?ce;A-}$7oH5Dj-Gk6lVY>oPb7b7hi9Jbrl)>|14|Ama~R8=imqat#f7y3 zc}?^ClXvw5_CZZ@+QdJd&PD2~7KMbg-)eqGl8)|&&XmhEYf;2dpd`(*sfIsrv&Z(z z=k+15F@7sNXKDlhLO*XGAQJVZ0M80D=}U^Y)J$Yl@vlYu;q62|!>$FuZ7I!hA|@izq# zc-r0#&njkH*(8iBjg_@tVv0Bd)rwDi^zmOMfA1oVyap>m))et_oO=r>KtCXErSOig z6s$WR0G=u=v`#U8bU(C3sbp!nl%&$U9635oXV{(bT&JOXU zzQLX$a+g*&8><*;(7Q^lrMadaJb7!+55`aE3BYCf`d>XEpj3)>@h{pwsbsr;}#6iwc&eFPY7#6y9I6-7xiGu&ulmVM#TNRPZUy zaxk#@d7etM7g@<#u?~GCu9bn==w1*js8^f>#wt%k} zs?rk|>L8Wf2Q^I)!-eCVCj=0kGQ{58axm$mWE1*M=BK?F!!!+vhnP;VImJ1t+mn+gvnw5@E$>8B{$FVc*4e)L_Z+B6 zEIbDK#fREJsJf+X+S=Xq4gEBE@4%@jMk?vP-i}+b0JpmQ-sS6NZFr+NxF+w>F{WTP z;L*i;V()gNa1*w1oWp<tK>Yx0Ld~Ar8OI@?oCG`wzh)G zLU=AS?cLxGEMC`K!P8rnXxY{Zk?CCwPw!e|Tj}3T1gC~&=Rw2-CB*a_vgUQIU{r}s z;{bPmJvgZ7hrcS7)`iu3X0CO#s^pf&VXF9F#BTStZXw=kf$tW$J2OgSdD)L3uwEnz z4D#>)qS2i8KK`+9XDUE1{Kq<`+DU45>Y!25Y42s8uvF8F$9}<5Ro&#m?pbbIEnrFX zqU~w>qowe9n#N6+d<3=~l$_a&q92L~%Of7@N2H?YAy6jqqewi}N+IXNgIKtN*i&>j z@WcXIb4nGA0!Y}TA1mwIUg*X-KC`aBsa}eDZxTd-o@51;0P%<~{z zYqidjv7%?q^o|c}`uA+2ArB`%OP!2?C1W1qo&_4ODRQz$WJ%pOa!mgFk7QJncXAS^r9{;Tld1FXBT~4x(N;7#nFuQ8Fh%ori>}KLk3s`MH)HJ4R zPyd;I=1UM9e&N#U{9S*ENuy5yoWNoDsg&_%Pp6H5QX61ujSy9-2Dk}4_XinoFK!C3 zqL<$^UGRhm*j%Hcj>tnfV--9A(i7sz;MVd4WhlSRUnlSR)7rSM`oY;@9Pf?wRn2hn zII*1^-P6OD67LDT0Q5Gk>I3$|&0aaV5;!FnY^k?I2!Xz0UIh%8ldO-%F%@+?ELqx} zz3f2&hfUiyXU0A(7*5>35U~p2&9Fl5> zGPiuK=JcZy8YwwQzLd(SxZl}BMPE^Fv?Z@jwm|PD^%!DB6jEcvG?nchvKeWaRSQVE zp$Kz$Ghp^oDe0__jFGW34A2i+MoO~{}PCJVhteHv};Yfv*ylTRDgEP<%l zPMFDPOr}*76w-4dHk#rxdQPT`0Qpf=4>Hl?5TVjog6NcU;28xvBeb^BxP-kh!|}}M z&6r6>J|KQFassPkR6p7sV} zE?h&$G-t`mN0iXh%p;6YP#oNR1Y{;kUD2&mi$WwqBO{0mroX0-sYG}lQeYtB^`74$ zIUnUveDu=Ol1Pg5__yu$2{APf6JM9sYOa9XqzI{+t4WK9fHUKQ;m1GL9pHnJdK!y} zsl)|6oYv?W)L~;}IzM6)eNoFb9&r+-oKVE=mjxHbp>up3Gs{kbdfGmyQr!g|O7%nH zQK;4_2#$^dlxiN?p}J@KF7Kiv9HgQj$QjjH)+13qWA?Zk%R9|%;89`%2h)fQeugjy zyzSN~HhWBRajMAR2`LRMj=lN3M-=G?o4vLrboG4nYsz>W&n4dNiIoly9VM}OA-2Y0 z^^E%^vs$lLj;U1QnX{!ln=#eH!=QqEdQ_h}C&|+|V=z==mGq&wDJh((>!eWR676wF z+dodOHH>3hYAg+%xwEjPyBZa9v!7!m_h!&^LGei%poy~T17mJEoP40-RVg!+6_dz$ zInFeUFlKc*Tf4Krkq$3_f6g@p*$_TjwqLI(CR@mNuhzv+YQa$@U2Dq}{c|x)9fBQE zD%-;m9vCWk9cQWHz(cDmk^Qh08Vw`iL@kxAfeVF2N8`BI#sq0OF1mvaPWK??KFlw>rR|H-@%9VrUVDn4&#}SY2qmJ%` zqfB=IK{D&<7*e*cX&zccSnlL%_ToKAcc$$ZY`A?XJL_SBs;TN7`RG~xO2(BnihxH4 z^wO*iwvf(wF%AzPDtTfr#Ny)q3XZ6YL+!WbnzuYq)6x`Yyc4!QV9}i51FH7>_D@CCH&{6gEaC)d7_YbYJa`%I z_+*m&vmB%*BuD;cySfObl|uBh;#(0|_q7DOh)!!=j>ah^P-Z~LCr zGnXmOt(Q?AF_@Q=&wytyO=BZ|erG4W{wv5G-+XvB`kH=K&vFwu>J@x5Zh7}GWe^VR zsNf|MQNpx(iXcJbK}#23{^wEwLEm(u%*dy1dCCBracG|XKU~GHQ65F@OmwCzPmt4H zBYmi?rF9)22AqORM-^|gRucJ86IlvfbZ>ph(;KE=m4~@Ch$e?M#{P^F4=>+(QMXXV z)B{?^0nb@$8h|&5P!IJv+k{N$?qNGG$X_x+(#}zXNpk{BNt=Y@SYS$Z78R{c=5@(E zBqOurLdya-XCcI^N(3XJqL}7UlrtUw!*=>#N;;y3;dcs&b|tmGRqO7ugTT@s1U_>a z7esqV)&{}*+%%lxQ>c{u*l%jhF8==F7nTqx{3svd)7TkZ5c0tnl=k-E*C!u#+=sjt zHp2Ch5eUv{B9@$VUY7Nog!qxA!L~5B~!G!G&UGF8J)!amJ;z6GB~Gn5wte9fl(+%o|#I)Kkf`_|ZnuB`QBN^~R7XdYp6VE;8;mK24 zT;|)g`RuIh$29dPKa|?St}Sr_sh6EJ(S+P9!Tkp%??gg^G8LvYlvJ_i`qs^Hc52E> z#>lUTXhD>@+H{2qdLn1x_{$$@@XY`edS-$c=tWev*LCRZzmN`KR+=F^v!D8a9N&UA z!~<2fRN86oS3K?j0hzz|2mKTkh|{J$8jT|m4Fazs#xLH>I=Q0#S`ZxPXsx~uXaNIV zzQ`C{Nnusl+?tL3+jc2D_2hqqfsw;3oa1}_O>=VrWDaum4L?C zV;X}$I|zea2gbz*o&`ZU^dryW+Gq3-987NnPjpiI#5f5j&DyFT4k0Lfg+dToIbjf5 z6M~t~xzA6*YEX1^eKN79pXvYws2S-WSwqah5*<2 zW-wShClwINq+ay=et=zdjT8op8#z`OThIr|6X9W0vY60fLl-|F>C!`yIL8TPdOUkm zKXZ;BdfsbP1TTmfukzM#oN>t$JT-nLEP9|k!|Hh3Qk6BKAbne=^C*+T?|SMP{YD@h zw@vDV;?3eEh)qN7&Cut4$5I0Aj#P3!cM_UH=%Cb3v5Z(L$uLyM3M&xTRBAG3kt-tNiX`)kBcv)No`CQrR;OHf6Bn>+zYFFiSsG?+3*d!i}M9 zc`o;5E_l8R-SEH{Te)TdP#j&9ML!>8frb^Dqaq6Qb#(jCvxNRf8w(C4%Nw0SqUD*V zwR~Z;6l|%ga>k-y>WAs4ceh~r>eK@N9IR^B4dQfrXb?4CNaC%!BN9Z@{Sx`XERW`{ z;A_->Fd*y|mtBB#2$L^l>M@F`LUv)Tt^0Dd{GB~Y~(%~6(^py6Y(l*+DV zfuX2WfDCDAK9u7 zO+jdXW4)OpF?40AgQdAhN#2882(k|~twxnUngbb8A{?B9XV>(=MfyF7y;!N()aW)v z%ZWmRrXjW&jU}{@;n43zX$qp1W}(K|W;Mp*4=(wMtE9jiY!7= zf;k-2gQI{qz$F~{qFKmf%Y?hMbMYbt9Iw?v`Vp-0O|NC>L`{Lp?YN&E$h*vPo;np zHjg?h{phnhAM@b(t=iIe2Dgf6i?mE1?d{TJvNQhz5(sO!8lMhJa>UekHAL1AsWq}CgMSsX3-Y=%5?p5c!bduZ#bx|QA^C-mD zr;vjIx{1}IM^5-rl$AXQeYS`1ckk zNmN(3Te7&N$|^2pqO7!c&T2{3lM^j~apd1K6-m8B`v{fGrA##6 zPe1J!ExNs!w}R8PE88?dA%q+;6x(7e78&Mw@l_3XRN6fA_bdq5 zRN{Wk)+FjJELCY)%kM`@)%Cbb_FMP(JQbDW$$N30aouWS8?jew)09XWN^&WUnDaLU z&py5&(P&7!r&#^~nP_l#hjDDJH(Or6HDxR%_s^*4h;HDe^mz0opk-lPJ#X@MF<>Td zjq(ub)AczM;h=@Oh{3a-l+Fj#W{wC;L5G@~Cuq@SL@Pj!ZI)9X zmhSLWohFY)RQr#ggsCL0+%%D8Pyv;jbl$8gO5Ryz!_V^%x>$m>>_nP>l&wo}Ur3v;!zgAv6oiVyotRFO! zXYMHJP1(i0QOS3pf`XNqgk|}SXNxtS?2qP(#wpztYY(gGl&IHnD z{wiJJw6iq+MmR7BK{N_s{9;-rfHa2GC()}Y-4G0qFYqrp*?wmt)7?vFo+dW=OX#Jy z0b?K)Qk2*oADRLv&jf6X>o3#4bZyi`&jlrr+Yg&p)$XPY=H03IP1MRHgc)qAE-i4Of8p)Lz=Bk@TS0a5qJGh%-Q149HT9m=deYvm1wZH= z)ZMD4UUc{>5FR{WyMru3!OZ_n?9l5z`OqsxZINR+*)t93kKg$39-K0v!l)CVUEgtD zsT9C({5yxDGcxf91g-l!U`s`73eR~>h<3>(r8QCRSe!1L@KFn595A^~g_nF%|7m!g zUcmwl4zG}~Ac5&cp0W;HOlkTVT_3$L3lz934qE`!nfLUTj-4q(d1%}(=WB9o5i%up zs9hufp`L{3>`$w!$k%0c2|0VW6XJn5QJS{2$mwoUiPw)VUO6^{sKEDSfpzW#c3m^Z z@sQY=2Ph4HlJ0{2mDUVUf`jS?rivuSJ3BDcf(OV}k-k?f7~JPml|7D5aaAs5n9)vE0U>JX9B-p6$!G4<7Fb^BZ|Tn(iV8G2;Z z^Cy8ASO1bcks2L*!I@bD3_rtqV5oPYJvd*D=cP3P>4n#uRRut_|VMSI~kstQ`=6D21X~#RJW`wt?HKh zsD}!y6ecqJ;TWK3kVOO{E1!g2Tx$5Cf*$AT9yKh4p)-t$GgG{G`JKxj#6ziWnIf{- zrdKerG!tj_JvP>sc&7h22E(|UM5X6oYR??iSUoX;Q2|zr_JfV^6QlzfwwIZhb!dJN zc2D#ZQjvpF9qQFrU()D=8coxoagdHwI!W}q84OS+(Kvf@i9>rCGs$W=c!Q(x0ZUzI z|I)S3<*`V=6M1U9{v3#LG|)ZA%e!3m2o?K%hj6#@^5p|mFq1?)U%oeq`jf?d;7hb) zRO(Mu=RglNfx{S)s&B5p{Ni=5S+TsPZLlg?6?Bh}xnQyo1OG`!b| zWY%;p-mAD@e5H!bT3W#fUa7}EV$6zHk> zo4%k+QA}XH8AZ7LL<*G@t$`=es7^Xb9sEHZ1_yeyeAf)e5yz;TJq%M;Gkw|@|LyWM zUB=PRMzo})3bF#MUd#wzqU8BS1J2OATm4URcVGtX=Pk51n*5y34n7wGbDseptbx$@ zUjD2fh!3hp2C?zt$IWnTjz+Oau_N;JB!UgYQh){DuS?GUQf_>#%cuBH6r^l;iKssv zg+ig&=74&49&bAYv!>1ut%ES&CvW3NHx0-t5jfs|X^WZGGG5^1jE0j%%3d@(&rqdN zk7R7*rF(t>(bj-I^O>8k`gu61fLY~OsPlznscWRjNBE@mDAH%(9^ZzjJixCTjKbNK zt~(rFSQ}UzA(9k;J?(9k{%+*frVn2+5^u6w&w1jbnn^yZ(5GqB(xD#f@ceaMsD|#P zoi>3~3A*t?IyZN^ev;rx9{&LCoCB{=#!3Hmiak6G4F`pg9qEXaoPWPxr zPetEjD<*_E8;_Zz)>ZRDE^tkVjZlMFE9CEbF^Gw#X7J^4LgsyD|5meDYpk0d*FWj; z&JCx*wr~||pB;A#G#5>GKjlB3w}=9AgCc`EU!M!EBM_L-X_N~gB6EYZ_i2xu?^$hm zNr}=4gs$<>GuhX2>VOXMA57C_{Lxb@2zSiN9!C_NAxfd&2UVZvR*O|{hdnrXD-#>U zh8!VUCq{F=uLY{Shc}NSu;^R_1+rXG zW5EMN?#UIDCl^jh+jjQEkS69?-&Gm}4T6#p$z<{`szAj764u5nrlX&rj{-DwVlUt| zOMl|Zng>ZkU$F>z1)BLH@>K5UJ4sJcp{n<<$raTLIN`n(Ppxn78yoWsTdbD~pY)tkJm{!rayM1cI#Kr0z zqnkKzwlYrQc1@so!W}DW5~M1X63{WeUsDVsh|htj@lknK7e)=$e>eU5TNaFjkTA=;?!75=_2$|6VBcvMOaD0Ll8Tgp>FD8H04 z94JJ=F61B)zA%&EkF8*9POoXOsUbH&`UDkTgN%OgDqF1tYuOgf6vmC63hT1Nzg<_u3?}B~#pwutu``5*zrp1+>0<)A&wz{riaX)7- z($Mpoq0o62pmR@e{c?&vcVN&PtP8Xyc3{6+TGt|^PF>&BX{clu4xZV&bIsf*KnJ~g z)JuUhMb-K2E2cH>AzVNqh_|{Kttnf3oM~N-hSkypst;HoavBgdACed|NW{8BaFHsh z%j*DS2pjY&A7Oxc{whB>va?7!g%Y+^M9m9uCtWV!n<9X|3~J4fT_P2&V3Y;>^)eg{ z0uK%FIg<-?c6!1i43w!(ceINwln7jQijq0hEc(sdE%=5m-vrYV9i;uz26*1dyL#R< zFfMW{Hq4_nZ2akycSW7czbxh%jC8ja+aN!{%d;^aW^I?HlcM;tuOUaCKZ<81|Fugw zZXQJ^@nU4suQ!S~4S+nDnkp)r6x&3_OGfx<8OR(!;_9ls_^&nDy0xDk%;?w}TU-T; zey3T$0ev0M09uW%z4%=mddC?qsyI|eH@h1;cxPAowr;0Kb$IEv+mQyNy6)2BG~fly z^eaHa8EjCndT75olSV0!Rl#stf*Fp}%V95#l1L_rnO^$M^Hce=Xz$b%3@>B98bof6 z_W5}@w%SyqQRGi|%8^;q59fw))X7Jp-*Bz~b@I|vWYH=q^NgaJVb6tAB4Td8)#}qi zs$dDRy2hj0(T6$2QN(WVf=jEBsxME))wa?PLh=zi1}p2=qVJC@G?eXURf(h^V$GWw zZ5>r56-E?x-Ip;MN!xZP#IQ~wAI(!m#ko@VoxW+gi>3Q*=Dm{C;aLzV6tS*Ne(9)} zObV_)38eJqUi4zxBFXfyylQXq&sqTvlk_ie{zUCWFB=9y?ybW%l|aqPQMAH8;A8-j zZDy9gPPejKInFFPrYOOZco~MM-mBX5S{B}4vl3>or7U08@A?(t;Cf$-_w}blnvL$4 z^b(i@uZ`bKidc!(pyCw?MQTg2VjCPtc?gNMH~}p0R3ZQ`a(J$n?A}o5QR*N_r%-7=HLao;-(WqW<<$iJ<+PH8tDJ^67hu|&pg?XW5Y)^Qraj2-+M z=8d-8%QrrG(~no?OHH0pmToC-i)I@}@28&qGb`fFKKZeOC_XKcD=q)r&nT!<|1s+7 zJo4n#o)c#EUt97CmcLlOl71G4c@|ODs{qiIgqPT!dKI-JO6F4|bN6A~a0Us1Q?`Pd zXn~4IKLLSD@1-gVHnFBQ6qtv$?S8GTxC_@LcLMC#@InD!ijBXQ zzN!VPf$!ATIZaNKtq5PjR807K`61+EF@gh9;bumKBM>w}k z@>p-@vcdbR#>bj!^iMu2HJYH)_LCoY9_nP0#rsrMhz6zFgNz-^FSUi}`VHLCVyJx- z8J4IVq1#EJp)M7;gxW}|7UNl095Gb5$=l%$ZOLh@&N~)JJCk>bITWaRXkLKkgO+$( zL=th)G(a^suKY#~_-Vy4gbxQ{Ix+SJnQK>CG+(ip=EOgGX1n_6W)TA-4{ob0d^uCA zyg=4BXX4+@!Q2)m~# zdGW8+1#v1!MMVM1oss*VIp90aV`OhkxXMqO0jBMbBC^+Op`1Nr>6da1hYuwfqAp2f z_erJpatL^hEfWz$)^+JiW)t9?HHl*d=Y4Kvm@nhNrBJRgnqD?4l6U~y1*0igPVMri z&SoB}gjl{GT&RSNe?fN(Q4J)kr!{I4Px4P2=&$~$1R?!;Zkrc-hybuwxlwNJcsq)`cnPA`7!G2E<)@TR7Zru3IMqr%5J}s zh%e>Jq(&o$hjA?VNsCwXkUwcn1(Us|rk{H8cqcXsQw%*ya=smTZm?_BvT(kpUU?@Y zKua~z37t97gY^9Xw(Z&N^aN$~y0=GXxwF?wJpo>|+Gv>pH;RP)_$LwTXi=XZlfl-i9PZBpZk#V@p6rl<+B{ zp&ecotdkliR6E2x*f_M5`;n0yaJFHi0Vl&ZrBN%w=Ox%c948Qh6c6@IseFpwvy)`v zz{LS2;eVUGUYgnYc1-RXf>=PD6bvg|f#cbi;D8)vcZYQ(LdCkR(5ZGOJgor?2jQ*w__YWa!>B6}&LGpDl@fQe z>+tN^a^v^C?rv64(;{c774KSBINiJ!39j%{!W|?ZB%MZ%)+5Gq<2-N;Wu#JRE7@3| zGXV#*b4NUDy&jOL?fK%}&M-K>%X+dBj?_Aeb!sq!3F#)1xoH{_TID>NuNsxDs0sBRQRD7hKMYqwD4iu0V*&Z-lW?P{57;x+PNc53&Yu^>UhIC{`!MC>3_R>#zWQ`*yORnU)Bf)rMVz&LP*dtq^D?Ao#%Q&cy+PHvxt>|< zISvUu^K7Sv<3MAgY+T{R@V4w!%#Ex+;mN#svz(Q|18a+G{%XFl+)6kG4eiA@Pdze0 z(Uvp#!r&EpPi_R$oPOWvAC%6jW`P@v1Z=q)LxQWW?1l&-2Ahoueuy)vL#9`*#j%t= zEt~Iw_%cHXVZd1xS^(w0&Rv1u!|l-D)gKA6q>hmpxVfni{i!^03|b8WG)HgC4k#0o z9_Sa{HT>Fuc2j^-#NXyWU$Y^_Bcx9{3A}<|F6Bh1H(q5ltK?l)%1PuQb$bwj6}W9Y z+$?^6&Zz29odM4*()qy{s$PMpKj+CdDex{T5`NJ?_v(7$_w!J}EK@O7<3|ZeW$EGv zC@~dGM>L{TWy32Ldd%m-m5zwiYN`WuF7j;-e$+FMY0%3Gc8z039{L;8A#$qwS{MSk zISwcW?culaIHv+6ts53a#`;IhuW4-^l}n1spCvmCaJpuJev=Be>_4r%&;f(Q@A3I z2#t&a>1i~{rM%F-0?Fx#>=%B}Faq>i#1rI5eQS@#KprFqK;UuP`_(&(d4p!h4$$1;Yh%Qp+@``SNp%;>7vDC-AP0Lv=3x{a3 zA?LqrA@l%^cyVwl#XNVL&^saoSg_itNHwuaU)2By=GwRpn5Qk^nr?){YImttsWO|( zeuuI%F}7P@{i^T63;Z>IY>X#A)r5P;yEyL3qre3Q^&{eDYV5QV`xrpw;(lpw1>!mU zK7a2$k^AZL``$75R#3q87`aMnwIU)2#dSW0;q9xH9|sBvmlDu?mBNbHmFOg<5TZmf zXlW+{z{~g%i9wr?EXg1R@$$IVTi~KJ0cQdAdaFfRw$eW|YxzmTsxH$@^Hn8=seW+z zJzcX+=rtRWxajPkFoA+VzL5Y)mpCBdg_w9?ee?+U@5LJx*@%36%xrP)GUEU}EJ%+5 zUzqPNC7#{Jmz$4pZ|ONvO~R7~HB|eL;cw4mPQ0zdvv9=*ClVtH$}MEg7yysm;U)>5 z>6w~`$=xb}Gr|vVP|qtcuwP6y`WWz~<%`mNr-l`WXWYecCLdqaJY{WaoJ_cALD5AK z9lldnAGW&Y5&~^t+L-26!ZNQ}7JwC_{UJ+~#TG*r`X$Di7uK zUTecM;()BUy9Q3Z5cq?wK_(Zj5dc#vn;CzlowazQd(bL3s|9*YoEsU9QC-*6Q#{=R^X?c z{cG%wk1pQRYWMcXsR1D4TmSxvTj5GK;>q>9o$d+Kym>*NQtRT z9!H(!o6kIBwrdX0Q$j1)7iBE;Q%S(rC-y8@YhRA&(t@3omM9umBYHb>eut_ep%p$= zq_U^mIFyf_sDQX-JSvWbf76MVLQy??s=5RzXJ-Aj2ZKG0o@!5ni2i|?$nB^$MFZd> zx-wMua8va5+ZAARK+w>#^?J_sK}jPcxIHh6ODsfuGs^#6X`Cn|vg~L*8x4wPjrt@n zUD^IRj@`Y5TK1l%!o2+d2BxiRXWT4NR+F1vkJ4TU@x;X1Wxn8WCl;9;d=!}VQ*yi zs2?{|3M9^=tE=>qza5XlV}vrA!&z^qvjvs2x&a6!M2$96hT7E$k)~cpZ4QM3I7;hX z@UmLL_s$>=_>EogZ5Abjpy{j?5|ZnX3f^K`5Yo#+sHrKUSDYIA0u_`f+W^D!#6L6z zWK~6Y&4?W_)oyzI)9R6zt5+AG9gIs#I-UlN#NHU>t@hX1p0~*qY!Q0`LoqQ|!t%?Z zkh1;oNp9DpVL+>3fx~Hz>`wO-nU~_DRA-3hgPS1kBi@EzHjU7R`rX!c7gwI3Qqv2DY6X*F9) z2hprC7I)$}-JQIB5GkHisE%V51MK}PcI6rUH%gfXIEdmwOQCzA{qi82uwm&X zE6*yMP;*j0mN!ajeQxc#dm=LM8Cq;Df z_U5c-NP7tUy`<@ShS5ciD2NTQ;$Ox7sUZ^p4v}_q{_*RfEWqnQK+Pg-e&C{F(a*gs z@{%)FT)wcQcYV#b0H{OoSl=oDSd2TEFs>)UJ0F9)XY|n&n^Nb8oopb*i+}ECt^m{WJ+0kzFn6h?77-EsLal#P zPJOzEc2dF*VS~^t&<~kMNfv3r5TQ*HVy2Rt$Z@A-Zou`3&KWw)@s(3Hl5y)MYD7)m z+>LT0P)pAnY@xeJl?VSp^b9J-ksGN%c!+DCzK{UvU}1CPn+T3aQ}9?g#(*;>|2Jmz zGb(S=i>PF6(!JVE>-i~^J5BcfV5k}&<|D498lbIQX(Nb;!j-YNHNsm77YFo-mpa4z zEAo0`TAAZW^DiF#()bFS3e+_J|Dk8hxz7GHwfpVebQx4hco4CqsD5?@P7R@^Bgu7I*V0td{4%53miipp4)&(W2E_l2@mn)Rm#%H2(>9eUCOeILH1qSQ z@`7(t7~F>8#EqD7L&p-1-vaEO6`9H@TKh;U>_ zCF(c~jtbJ8jlVHKkt{x#$>XiwPR7cFCX4M0`8`6+(o++?)!9X_(ED1Uw~TOf)sz;X z!|XfXbn%a+loPK)==gSQe>g*;va=Vca)I30xV{I^YaFA3@#r0kHE@J(6?b*gIG~C_ zIMeCyAgTySEzv*+k5WQ?^5JpfQ_(-vZYF9k-EPiqh*8-_(K1Gnay{6oHizT_X!Vz) zB~ok>Ey`VKgTXnaWXo^-fJpcmS|`%UlnPU= zrb{SuGmP);61N6aL-ABgf>ESKOikR{l^HdoEPhi(|HSrsVU*iI*-vy3YELbmI}60A z%W-?0f{xZM=w^qdjK*Pa3s1f%JHVk81~QRy*`U>mRnE&WeJLdOS0^`$krF&>6*9IN zlPX=_Tirxs$w=b=$&AoCJi7LD5``93Q7BPKnr~_;guv8D;Haf#T@1)ci^bpfS}hZH zHR>V7qzn4A6Dmk6Bn!FHX(Cg3p8P94<+v092o>Q?$)0M%1{}*2$5yIps3y|RJ&UK7 zBTAC0z+@TCT0_H(qPZ3nAe_e(ceFUcaalD`LbElDHB>67Zbj{^T!)h{?jx8dNg6VG zhtJ_(89vkuC=MtVJ;eh~mibP=;YyYJ zRXr#$>3W+Q6}o9Zc|Ushuo;hbeE?uk>AH$L1&@#-2Z@9_Eec^vhMC@&oAXh5G*)HB z(EsxN>V@&z#d{_%TWnf@pnb;VJuo@%@Zk9}lAn6jO>r`LH^*7e(DYG}C^U$O1Upo6 z>35RW4>rtt5!Uo#0R-rHOsRzX7iFPnI!YVO8#$m-uAJf+gg<<-^yOgn0>eUA_XJ8# zX8KW7KiAKqBz}#vD)(g@`IoD+t@;W)T(777LEmJ=>ZWQF=UENk-;pTv1Boft3JoXp z$t8H&XzVhlbVSn(D zR*lorUX>Rn%!Qc40wp4yMgzQ67Y_@r(%}8U_6EYbmsb z^mB^6%j8Sdj!mZRG2(Zbytkc|CIDI?bW73!8JZC}L2pQ}0G;QwZn(CQTm>}2Gs89s z*+5sdP&HZ`jSbClb{80TC9s5g9%&<5Kqi9*Mn^=#6&S?2e)HKs)8E@Z=6T04y%=Jj z%$7R6$#kl>A@GUVcsS1fb&-<#oj*0*?qq^D}8Lwc@DK#yOJ;WgvrBoz|9DYACt8u*QE- zs@6>2Iv`EOtWC8K0=iWv85MXh1lee3cVSMa9(t>9ctC}@eGE#C2jQi(g#c-Njib^$ zGLPz5eKPDW&8SortEZ33VChWJD)c~&dv;qm=E7ScEkxhJ&T1`_wYeIbc|Og~>sFjU zAw+l@z?eA8PP<^Jt&P;=yb!V;-TleC5Z5CKG%C1&qN|`lC7&zSRPq^CUhs{Bt$jSr z9%-vb&piJ?jSCEHZzvil=~Dsy`C?4T^O9w;SsSqyyhY*NL4TH*z;Pl|DNjo&y;|>R zy`3)dBq&=51#zwV z06olbxmurTNu2@^9pDg^a35rp9zi^Ld-78esxiCad$l!&;a3U`B2Q~-f!E|2+;F6v zdVg;Z2BB{b&nRK4M@{~F>c@Cc0JWqpj+4Kvu0jA>If@16kp>>HKLXP2F`%Oo@D%O% zp_4#*mHgBT&bvuBHB82xlqoI1e^2R1T{gSo9hYzTfu-lN7jzchCKEnUqUcN1u<>k0 zr|l?OO%M9o#DS8Ya;7IGwxp+;qmL*5qn-=6gG%Qkd1n3lCWm<{UmSLX_w*jr1KHQw zm?65uH3}CtTjtMo#1V&qIlQ^B6s}42A4>gob~ujK%D^4aLCnS1{@l}5b&)Cpn2Nt% zh&tYu3^Rl9Bk)?CO@8qPSgd~ZNfXPJE+DGr<`>lBNAp#@FB(`k#yh1&?Hy0v*1uFt z$WSP0&BZf^%IIx0O)Pn^*j)9x$lM%G&jOW*lac75DaT|SThSBPXeXrk1O)3rhVp}d z<3=O()fL67yMnr4TEK(|bB$DPSgNw<~BGqX8gl*N8@SL{>;aZ@i zGgg|uLmEt5tWfi`=F)5c1`P{mZB3Kwqsitl)+*HbKpB_%nTGeZbz!E>hC>#~m2u>S zb@9Z@*Q`keJgL|(z1?C{p!MoUhv5;%BJTlJnDsA#t8|FI^x{jo-4Jm82;an+fjX;I zPvLsN7H4c)H3WTe8+Zi$GnwD1>ebeyM9Wq2E0z1j@E{~=qqvu1E2GE7x$kwR(rOr{ zBFFZCaI|LI3AflYPigT(cqw}M2M*}(l_~^m(uA;`o@LG&8=ZIyYUTgSUF@%mE9w+C z*XX6@#I;JKs~ozCYVQHz&5^J68(#Xgf$NDDh^AiBo>p{mTva;T<&Vrba+imKNh!FZ z#&ZN_&1{i4GusG`9`~~L<3_|%{D?JS9W?;eRI8P6?}JtJs$ov1L4m}n2q;EQTBonU zTN)t`T7PkT#}@V!acHxo)s*E4s&@+z*P9ew0I-I*XZCoQ$%Kd#LAi7iv4_G_t&y&l};vI_D9@$%Sg*pJZWkR{9P2U`w!&2oE{B z8bXB3>)~=nuO z0ge6{siGdE+*Q<9^bufuJ~G35V!g4VPbC}L6CsDRR-M4jEF53D14uTJm7@XPkM4=8 zS)0_RkWX+jVu6y(6$^Ab++sWul{gE$qBnuDD$E{$L#1+P{+wW?*q>6kOY#C>Zs7nt z;FUp~+HfWw8V{I?Nkbqkgix5om>o4j9c__!R zSNP_oXT6wngG_fMH3YEwBQk|z625D{6aavV9Zng7dRCgXD7RMfX?31nJl00l=`W*D9JL05 zRR5BglUW!T1+Y4PGH+_tw~T90E>jOa8XS!S$^=R<0#0ygxT`yZ(+HfPSE!<-h#V+n zLj9AsQg{K?zEWOri$tIj5{&~VHX9su?S=+VkrgldG2T; zIs82pH&1DEKpCREf}|B!vV>YzLGT70Mx!4O6U&MyR^daQVHP3*bhAfZzle{FN(l%FfZW(Wu-9Qd5GBY?%r)o+!rBGJ47fL}5uA=oMluo*8Nt(t{ z?8oAR%e?QqHMbkRA~fa=>Q#6LuGU7#M4v=8-;Z4DDa|Z&(jCVo4eW(t8&L0>7o3Cz3$jFb`B3JFk|-6_dxOesLQQBrfOMLMfANw} zh1;B!HG8Q={%t0ocWVEIQTHJb0o@3;&d^)*tkN9`P6fTAQLzFw(z>t%D$GLS@ zzygO6`tb0ek_Mz;mW`xP+*m#dQ{%k7XC|M)cuJ!VpF(U&tgK{Hgq55UV1_S#7h?rq zlp4_?>T_C)3kRow%pX6@Ac&? z)!pp**Dj-B_c;R4vS^v)u*ttpep8*UsM&N1(%ncbXMT8wg1clFV3@5c=?xSX4T}80 zwBY9NYL0ufjvEq7a&PGjqggZuGyjE~RF*G9N``h+#zl^#3p{gRldqYaVv zL#-v{^M;{MNeoSGL%&BAL;YAJq7NIUrl16YyWAgPYc%O6!B53(__b0xH|j@ z*(sjr=6G8V8y6%9kTpRC{W2vLkt>j+_;yY9#QiL;D$yb^5_!oQ&~ne1SPR8eVov~Y z%rAG_Y+nll<2i;nnDUgvcZI5|2&!&e#`^TT7YM#5yDEd{vCc?;0ci^=J#ppR3V!&; zSrjYHv_PJ{-!A@%+kp^yR7e31c=M)O5QH~=N$7xSF^`olEx~ooihI51vwlzUnqQaV zHE5A|^EC8@>6~bbA;bEYm*4Xpl9vS5Qnso*SmMS6W5KC!@<-8wgFw*F&)9Vw8x_Sf zbg=JRy$;AWCt3LJ+Nx`65)0Q-db?DlWz>wNR&Nd;u!sN}=7!so$0A(mHWB&LoKK5$ zehr9RtH#!+;>sTG3<5f%jb-kvum zq?#>xbt0aho7|!)R7O&f!!JkeyJ@MlLH|7D?C>m=Yau=hi4k>gD|!Ixgp z@MhH*y7b+A^m5=JN>>h%1wRZyB#%wa5?CRV3u`;%lyVA4rtx$zFcIflA!Fco5epPq zPC->Y<)!f>wz}cV!c`io#b6(Qci@CKe;J?!#t>Uu49vp!D)X4OVsy5BVG z<3ftdmzx_A0HWff-ic8C{~3Fi=18yWT=N%r($Nt;>H%r$uu3M2b6t2kaR&=GBT-eN zSQME+CV&JInNVJU0(jUVk)qqOZb!C)vMjr$lIfOsPADvi6eaN@M>tv24q(uA&tIfw!>zi;og*Iw6mtz{f8(U(wr3b~s5k(DQDNKl3;9AZ4<%V0mes_vG? zz?mFKS;TsTX5dI5V6~OPK6*;dMmhW;!a5`8UkXqRy$#aiIf;s-aP(A6lgdiH|MSjA z36`iP@ufLGp>NR5X>Dzk8E$Z(P8N8OaZ#mQo;y(MYbX8l;&-FUZ~ zFlezNie7)22=y}M;viOWef`z}R~E@JaR zUgbv24|jTLmD6cg#H0LJV~za~I=O1{o!;aAf8&c@F!F1!CVB;?aqLjsvbMUhsH3B@ zjAA=B;1}%=YICm%d8_g+7lk#v!FdQ*fZ~C78vrw&TfyVu;xo!sT1Xmph zVsA>s?FE)==E2ecaT-kgwd?QuGn|1+a^f7QyXA7~v9d?6KJW_`7MuQXbFe=qmKASC z6FUD?DkqBoWLbsr)mcFA_2sUC9HxODPU7B>FHk^8y9ePv3b}^al%a;*iHZ%qboJFN zY4LT;6yGu0DJBexpejxx@Ag#^_CD+|KtO2|e)^r>^kJG9tjj-poG39fwt21FUkH;G0~6pC@)JIG=n#nlhDlA$T7O= z`7e4Q0CSW3nym*~iKHUzAJ>fAjV3cWcs7tP>zVE8yZZz&6^l;FP@a(6Qv^`kp)p75nDe<3Ewh|9TWS{ypZNh3o16-Z6};|X~5KVlL%aSh1{J-ISTAW_u}iXi@{Wp zr2iuIvsO-OSHJ3>6!^8q_g(!#Pm5r~zm*0_M`aouxSdl>61ric3U)FpI{7lf22Ur# zoHXPJW`Whc2wGefI~#DCH+c0AQWw5n$I}lkryo?;ism_~;ia{^)AzzUf2;$kq$e3T z?_|`O{@PmvBc};?`uoL6fNTVm^fJymC;)HgUfBir2uSIX7e!hIE$ydDBkDN^-RzPR zofFb{6b>NrNKAfzOXt85Re262^71Oc=J&VMDOQhMT3t4;jq=%$e!rLL9_;Cm?or1h zlcKU2@fv!fR1<1h(biH3OFs%f^R>@dFP_q!0er>uJ^dS({O!D&$q(ceh_ZUWg53ED zXJKfq6UeJ%iI6k2f@AOPjh$Fc3BeY{aYX!MOa zh?5&s*qzau=0_Qec-bX2wO3j|^Z{_CCKm7CL-T1%L=NBcJq4N<3R`l(?=H5>OfH~E zY8@S1A02rHyQNNmI9c#pKTwl&+Z}T?>z=u6Lb;_Cr*{2c!|IF_M!8D~~ zdAdPrv&!KX4_Z-svWohY3@NUr@83e1;yxbudSbK?#Vg3B`OP~UJH9h>#GW#otUx)R z3zJ`e4s0<_a5AZorR~JXi1L}|)O3I_3gwZ@AwB$q-JP26V`>sXwcFGzS5Ly_!Sg}we;zkqsxurLwcQ70r` z(W7@u!7mN!-Yp~G;Vl(0&G5GL;E-`B)P%k;xr{6^6VeEa`UG~0^1vhZY8JCQgsKlP z&N?ac#ws*peR>kJ@brC*31Ys^@DXjR^#Kv!Us`LJV#~}97P3fZ&2=4KG%3`{x=L34 z+WwSy6cp0ff`L`+4Wju=AMzGSc=P6K$vx!d{ooe;WvbzE_2cWeMUO{6%C;4%rYVUQ zuM)9c$JM=1`?eu9G#W>og+~*v#O?(7p4U-5Kgm!PlmsIUt6V~9a+wjM;;I|Z=>Zq~ zpAPTQ!||s-)-Tw4My82?R4&>bWs+G?Okz*&ISdNd!lATI0K(|+?9EbB2=B_U+j*{9 zC9#$?ymtOiUD-KvHE;;5yteP@g@OV6Z&s;w~ewK&v*Z=?j^ZC-j zuEI9KEe|6V^bE7vOzdL&FyyUt4?OB*UAtJ_8*VTeu8MAzxg>`tx^5KhfPH1}mf0$- zv{a7yn{+G0==49RX@{}JBllnSON2hrZqcV@oWOuOI)9uQA&foK$M+tH7nHLtQPJz~ zdM%2XMf?mAFRU7YcdZ8%0z>*Se>qW)ApfI8qP*slP9}4O_wX+_s@=U#*?#4^+tkhY z6TRtA{nJWq?0}rnz;9x+#0ll1N75y@ub#wbgT`yYs%pS$V4DxAsKR#CB?SE9wk{lWz$1Mp_`_$@BqUY$G|u2#2(C1I)&tr^XW=7dRvZi#l52zK>G1pXXcbTVDlB$P230S+Sr zDI#)Yf8(75XjHk;bSEG;mhJU-eJw#0rMGrgNoa5?{$7uI zmBFbucJ(l>CCwJ~AZ{&LM(aZlVf6>8d>GTq3exy7+cZ+@rB2|+7g+S=#V8}|#;_NQ z^WQ(ZDY6K~pvOml1f@jt9$sR`<@yF{<|G*yocjiVoh48jO42iEpDTGZg z35;+bNl?{ny^tnF9?j1$n$Z5|kgd78&<-;fIS=Z5|SM zh(f=ce!=jKPx>~zmqLt0w<5%$!w7R1sV5s;pVc@SB|`OXvP#1&Lexxgg+zSD#5>h; z*8d_;4M(Jeq6t-Nk_B{f_iNG~A1*E85pmQix=y3m^X8M!=y?kZX^EAv4hidlo7IsDq`5n`<|L@hvcs&3btmdJsyF;nLRFK`mg)Xr*_c?0BT z4TudHKamWmFptpwpxtYFgYMbLbn_5;LE+K&iyEH3bCP&C@D0HfXpO6nwcNXnwL6QU zDmZQZN$$@e?1rc_4tYO)NVLa~CLqLMLzQ>xTXd)`ZEMi!kt+T4tw&jVe4D0PK5j5G zdx_Y?hQf6=_cFpqAIb6a{_X8j_(jr^{;egQp;n%GKB(NZP6{QWOc8%woE-=+ZnGMXFzOWp(zXF7y;`f?p`(2$s&aiVzC~Q!2_9 zoZ$`Q9iGRND6p%0Eq*vD2N52Yn9mu2r>y|Nda<-9=v|xx>@YXqEy2#b!l&|OObT4( z>{KUbPf6{xPabbCRVSXvT98oE(W%wH7DB$U&NaFEXACNZC8M7w7ET?8QL?(`0rV^+ z6GB4|0%(w4MA#;2Wt5DszjCGeN&;b}@ikx0NW~^JjIh!WES`^L|0IPW&4dD1vKLWc z1ZD8(dgN;KVR?BqwmvE$L*CQ3P3agBbGi)jM{V@NG=K7TWnaLp;{42 z(L9@(iKtIeLe(EQWe}ndU(FCoPiFeLMvM%fzMInldAoXA`m>Y|u{!PI=IhUx0Q*D| zvd#nh5;rPN@c;Sw|ND)U9JNQbztTi{X4T?d^4Q6F)(680s>_TT3$K|3`j}V^~eS?tbVk5S9fcLhXyrV{#l3PxXBE*Xdvm&JDxRe8NRmlp5C-X4E6s z7B?a*#szp3ayxLGdKd$3O@U=Hcxi~=p4N6-WDY8fXKfOr`CvnwM)k`O*LE+NN|8AX zT;gbVf_SRXy{B(Ir#E42dU96>}>vt$Osm z)dinyM0elUsh<)NPRUB)Qv__98~Su(eamlRz)4D*Wn!nUg_JQYDBN#M7)hlllLlKS zV#?apVn}xsU!>O~nm$Z87CB?Mgay9w1&Dh6_IGL>!RTWf84t9p!AsZGnOF@~@E6GyM*_ z>B_^n*crMSUil>fJO3!jk>Y_i`u(g_E;UG6q!5)_*fxP4?p3wt)%`&!7U+Hbrk_TQ zPo}Goq8y1HKxduEdhgbvDR4At-$pibHmLNZ09We;4@08JY&u6)co%w=@}s=AOe4~s z$!E<5Q7?7EEIcf=s8nF(-j7iRTCE#%o8)v{Eid9UwuXVH`ogdkCeWS-7D2#UzBc29NIsoQaS~DNG8_gXcJJV5i6!d zB19*p>lP4?H2-KGbQ+9II==H@Go6q2pfjp-OcYhCjiwZQbr^|lOn;_z&NrK8#*BOJ zP!DM`wE7{@NHcm}yqrcu{=z($N2F$tAL&uMgRq}4o054DGl|!xT9UMlNgtW@f>D^4 zVh*p2C5@tIb2#);ySKieb>QPta;z70r~h65X&t+XB{x*>N3W&zyVKqET_ADA++g&q zw*sQeX8<>bMA09l@pe|~^&#rWl#c|+F#L+#e2QhTiZaW*cj^Lz^tzAWu>7GNq&rS^ z6u>Bn58;K>2x{q`GliRgZofO%a9(c z8z8{~7(m^1#1p;6!lQZ7rXNNG*Q?OX#!{{&6u_|-JTE+bL`%aOk48Ru)d5+ScoPiK z+NTR>saCr6)KiulBqhfQUwdJKIg*v0?5s8Yb~0U9Av0kxO11itJl{ zGppw~{SP!St2Se3obQaPVbiHVXe1-=xj*?of1eh9G?4M7q|XtBfoMR#v^6@8ORwq# z|LUl7o&k;#;pduB!LM{5KR9XqMIYW;>zoCa6c320lExwrQ$vg{fw+E4JIFDj)#I1A z8s$!D6UniQWCVXwXfg#+ztPMEFK}(v8IZ9?DZbV9JE>Gi7Ix#O-zgz3a)U*9VYXoz zc=ZjBeG>n$t8v;NRHYznO>;~m!agBpRI6Eg~vsqPZhs3 zJ-&-HFI9a+(}`N-lTn<0 z6hi@YMi=yzq}y`_RTZ0%0Ziasf5my!gSB`-6lPMU714IW2aKPWQhUy2!A00SHdQn$ z_(`7-Incuv7BfP$y0Igcd7bWlCg{1`EPd#t6&`p)Ob~aI%j6}@wJtEVVp*jilY!Y4 z7_mDUWe>5@7?_(^s_s4h5ml2G4K|);MHa&mMma4R1x%`te#GJhTdi>!b{@`^I8tHX%F_HW>^=T}x)!%yZ89nLq{GY#}8r9O$u|LS&qB`j6 zH{%u51jTG~k0T_hc&(2LnE50)E3{@NCc@_Zp!mdFn8YQxOzv@<5oxeavQH9IhR;^+JI*O4 zqlwK3bsYWVYxIA@hME^4T?mqHSVa(aQ!XH;8SQs73LygpV7$qo+6(ox(B_*kK!Nfg z1jrqa!#ksCOTZAG%V-YzWf-_@f+&N8XObLbq+j7W6-UQcRljJO=nTT3IM|Cw6|F=9 z=-1;u>d3lO{pq7h#?tC1Sqq9_p(-Hg=O`E-No^@V*EXDYKYc3(v<^z5vxFP3zpLkd z`lCl5n??GabIbvrsJ$xiQ7&C$Zr-F_N0gHl;`g$dUV2R;wIP*Z?z~s8_icn}@{tBt zQ0C}=@@b+RRpTh2(u&F_SxhB#hQNREfLp`m4cT!v(?^a z5?B~vq2BIgsfWGV-W!74UIlcMuL_JVCeQse|J=+F5}rG(++bTk81Jb z3Fa_S&&27Kz5@jRx##}fXmD=YE1?*jxXjshdWtU2 zxGCCd=n*QM%N23At4b`T`xIupArckq4j@2yn z&CIh%y||2->WdfAx8F&-nqJM;aWG1xr|G?MI7|bJ+fO21fBfug{_tf@7 zwW&vBq)_@WnD(WCraGScm549}T+A-F$)xDMeetOqCQDR%Aj50itvwq7b%ycco1Epb zm^5A-=R|`czvV8ueX?gd9DISiE>wt_d=v}$JjWV#<_o(-pi1eC5Yo(Dj>bS(`rM?r zH7a!0?n9s^O+alOgphyr<}*5?_L1MQRoa1bu|AV%P!jV@Q!(3@Mg+CtJa*j7=f?It z4%Q-An$8hADM`LZaNiVEowS#tv0uf{W$c(2aefi9N+WoSActVsF{4ax7N{^?I-=GoYxNE^)!J*N?kNcZ;(RG7h5UBivknZUr(VVU($=6 zyC}8XB>og)5T*?GFKP=)rIT8$=#4P}A)y2yAWPg_Wei^FM% zIHY}#5-gqF^I%!?62?qN$|kp`F@>|KE2!n7|ACK&@Y)C8>=n_|qMy9cpzC|pw3XlfZgN($XRD*Y=0#YPrZF|@7nKZgw@?fc7+TTtR@GJ%vuTdN_e>{PDy%FY z3i=*NDMsAp;_AWxQe-e&JwiMVvOW3$Hz7HOY*)cVK+(faPq}4n*Oru(5DAA8hgCmY z_{VQtijIi>2dLkfFHGDtjQ}B#M~B);FEi{*C~jbBnff}HoXNw98H&0bt;-l!^~z2C zqp|&>sen|`sgR%YA+IB|tbpdq)Oup*sW8Z+W##W!$m!TTJR}ysVexBZc|&VFDEUVb_HcGI@w+qweuT9av$fn6P7 znH~(i33cUDI}da%eN>Gxt~Z9Uw@h~tq!@vkWN`u^X#;|Yr9{tH8|Os#qd2%)rR+X! z6}x1dZtPl3E#9Fl&$skcZ6`$>4kl6q16qECmv$#Au0`|fum8_{00~_it7WFpuUA3R zfp8FhD9ObNHhl~)b23O!r0W^hV@`^W`R23F#|cq~BdQXGln1Ai;g&$eI5riJ_$-RW zn2bQ<1{3h0b4UPY9HvtDB!YuCUwMB{8($}G=oVPJRk zS^QU6M1Q%%T&m<>ga|ED#Nj(TUYOjh^7_N+LoX_<&5ugI9{Cjc^_5zjF=3{el_W)@ zI7SgFl@3fR|D~6ov2J7J21kvnQB8{JKXS~;yn+dZ>o8wXf{HQ*$gJ2V+QwC$`I8qG3)_JrTODM0XG|)@~{V>1t;?|q{t$~7;S~~fT@cyYvBdeP6rKv+>^#6-^c=FwR z{IDhlfWM@EAQ_*gc~au0%|!n4ZP31@%ff~54Yur9;l6-mvhBpV)1OtCJ6(iI6L$ts z_(h8NFg!GwjlTVPRn7@a2jJ*`f6nK7g2M-lQ1{tfdR}-?vb%yxG1q9;Ch6>Dl9Hkc zo!>_wNl}rUceof0cC(2*A{4D9BMnIrA*{XfDsq+tY5a&SP>;0f&UGr0kmZYq7w{mT ztgVkCx^43Ir|(1nl(uAQb*js;6_q4_kkeBh%E-$trq&>J&z%@5XZoGiJRhm=$UZ;$a=^w3=p>8C)f*;~s@0={vN)9& z=lw88UcY(M92oO!y!z6dC`AYtFAoFmC4Z|=(8)TOsu!!J*YNmksxO8%wM8CdQ6Oo&D4DXp6Z;I?+aA(4>mE-aL3gpj~9EO~y^QFK4{ zbp)^|$5xzt^^;O56qOjC=r=;uo0MCvGrf!HIS#Ta*5gehBB+}8Kd2aa`|1OIcKR+6 zNs|b3A{%pUIQ)Y)q=!W{39ujepFSSWR}Da564$s1?E8tgv>H-mGJf3PE?94XRsKwv zZbti0A}rs5z_C#TpD@Ao%$6j zGiO82g90dR(C@!uVF$y9XBo;64~w}UVlBtP!oUClq3bI$EOgszoQxK~;HH6jG1`L| zKQ&i;n^HoQiP7C|)}ZYodvp#gws}V?jX1C=L!b{&X&g|3Gs5$sf@wIFG8zoWY}V0w z7YwZS3bxFse3p!*p>Gj73K;vO#`(@3J=7o6ZrYFAIakF%GNWu@B+ws2``@~iXZ3&S zaIfF+S2&DwA|>V?CJ&^5yZ#1I-A}GQ)OwakC=Em7_lnLTsPgDzJ$JuI6ZGY*`2sa6 zF`L296tqwuCDraF6!c9Qvb?Ge8KH}z-i;_-ZVZR5i#2RENw&@TT=x&e6XoKxCiGW` zaumLVVXgwQpe%WC{rCDG>KO2?;pIglxxd^>D_J(K>@M?5lZZMU-LgVUrk(#HV>%Fp zUWMqXQY~}(3(WE8Cw!Q4s?t+kC00?)b^YUl-Bl+Ve?wig3W#{1E;zZlf5hCPg3$jA z3jV1$rDziM9N_5~Jp2ppa5&6PPa~00Z8EJ$m+rZYRG?) zijh9Be!R1~BX35It&ZU46K3vy09vRjuGTc>2{vhMVgv6f>FKS)KctNOkE@4}E-p7Q zubE&%jn>H|$bw-ie`py5QGUWlL7X-mx41h<2n~COleM*-5~eb_<~se_JhatrQ$$fb zW)Ya@y=T*ZlLq~?WMHGB7oQTR3((cgl(HfLY)-!?h$f(Qfz;LU|d!?E?w$GXh_&i}XorOi$~3aFNnxfrxuy_R^La zOEpvFJd)6QAjLS*>VX_%CmiW3LXY`8RL3#XD}1Tj{-5{+_lZ1BNr*;BC9mc%09ybh zJBivL8Rln=_NKomZRa+CRvgrz5D)wJpXT|iAIB%jtwI**=Q|vM;1wpBnrB-sVcE#P z4^}SoYR)lx{QWkC8`+73!$nl!)9ItHJr!LdROwPDQVeU4ptY7#<9^o*n6KjNH5fL< zvC0S>Es{rZ{k2^D-|6)Ia@eZ^#-+(i0oj9I>Wt+<2KgE!So6`Hqo`T*Ge#XyDPj%{ zE-7hC+4d4-W-AP^14S6L#33O3Ff@aMskFR`8TYGPH#Ow+{h-DI(8MgMg*HhoQ>NY~ zUnv~-N8x#Vt?xhO&jnPw8=>(Hm5Q5Er!;!-cUN`UsVl2??_+9|(U&T?P-VLEBW{Ua)ZgiBdth(gC*nPixy-5K8M=k8nFQ$VmOJnjg((?ZLyNcL*nf=M&EbaFB)|nYpXo_K${Uh~a8UR=DGBIiq z13$`eLW3@00U%?84UIvObxmWNaFw(b)q-4osI$NMYGj~&nd zW6c-nl4-OvIKGH7H^L1W5{q(De_c=pNii`Bay&aBtZ!#p2TcT(A)<$!j-D-8{wrLzFI(nM5Z0mUYD+S=a_#iq))VT2GaVy*%;x}PXh7dInSjPsOuKR7C@1j)~!SrKTK)C`pq$MTD~lCMN6pT{$+ z&j+arEsZwTLC*IElLSS9eydF}yUFNlOisjo!hrF&<972;69UndGv02ODXX#KyAA&x zIUGS)`bK@C7jl=XU3b8Y&D!4J&m;b;rrV}F1_}0i=mwots)1+)G4nmJhXyCT03aR- zkEVNyDW24cTErChpM6z?g@aTM`sQL_ggG-WLM6FcE0-OV{IEvN zClS25`AklJBluI=88aeMJEw#9L##kc6s0Xo#gpdKH}}6kGPcj#5O>hNofAPUc%rm(e&_)l-{dPdmH$bUk#39OH1os zjz;a}YkH}n@o5CYJBw(uYTEo^*^~aUi4$aYZ&Q&cBdmrk5gy@riX3E{6DrW?6X*fg z6HqRdX}9ZmEVWJG*?u5d@IGi^+*`kFdxj-Ab^^WheWl&(lkPlJ|ik_H-CFfS{z0 zBhrS?#pH}StyPv(C*H<%D&_8d_;?e=i3#EcAVLeYtfzI4*$UJOI0aFPsCpUeyycd6 z1UhQkWIg&EcvD!s5mBH;i??)G!RlEa#)ee@5^TCQh+wih(`t3psSn#MI;9&PKP55= z&_Y>95@SiD4855!g8V9~5WNg_*w8LQ;wxB@AO}?>xprUCf&y|`TU^kG2{SAFc9?y2 z`5@Idm{f23`A^7364%98siYT92vT@?q>CMngbg zSPckW`B7Y?PM?|*eSvyDnO}9Knw4IAsmm=Ri%=`TiF=%?kkF6c8sf4@j(zq?5do>r zr>(fr&yrN7^~>}Z8N1s;n9$ZeeTY5$%8JnX>@73;DSc!rg6?e4iRi%Q# z4W>aD#k~bv{&6N4D07;eBunDIasqoPiMmncjV%qWC3s2+v1N|3jCXHL9DS}E1h z7O-qT`h_kr6I18v2l6TO4@3wYfim&Z2uTJd$WeDoiJp4*_>Z(?P8?S3Na*2@ba?;i zi@6;|Tf3ti8WZ&<1W598^WlUMBqX)&t0h#LY;grx{F;?Xtz}y1ln%2y+jkbd_BO$g zT3imiK^CsRxa?R#*ZeWoB5*@=w0#V|WJxch4K|%bBGWVjx^R;I9d4$6 zTYx16>>1^%aRm_DWcC6ALF2QK3eP^D-5$^372j|QA<}qNR5CmC(7`O@Z0ieJI=ReR zvx*{!lK{0-vrnpi_ND=l|42VdP0;Jc;%QsWWqJv4KOk2udm@h?pR}}%Qdgd0hpS*h zva*0R;4j8KdC^NXb)?Am)dwaHko2mo8nSTwdo+ES0ss7SK``|dq6Wo(-5Jg>CEO3c zqTR~42*kp2+Ak^*d$~0x^_lBd7%qxfq{rBgY01&iY?Goikq-Ggx2VT1MuO5`T@qX^ zU|`%tfN-`&bdBX<6nUil5H`aP%@OfO%y?M|aXd$l9+qMq-m%unZlKYhc-Dvm+^nSY zWjFN*es{H}RR6flxbDZCB@-SHZ;gE}VD4*?<5p=e7=|XBS{X#oh~*7`CzMREITp+O8Hg(CDXr~scDVt z#N{z(u1`|3Bfyo+%yAM}DHIP*-;+D-|y_to#ygpYs})fkD# zCyxv)BaQtHnjYxpFwU(^<$@EQy#ECgfbq;a7%p|{e(Et0ccu9c-l;F$K?YC5OBa&q zw>l7RN3DP!g@Z@E8e30~>+I7X`!Wz$uLA0qsDzAAVA6#O(RxB#giYVv*U0Evx$Ne&T_=T|p@hA)qsUy&UP?%$eY!6QpBT+BdPvGsk>U zNpz*pY)hE)jQ}372;6cAZA?xXqcfJ zqucU);Wufct3UY2IP3sWm{^KajVD0Q^}yF(zmYEbHaQ({lHMlVX{$hK1j=gl>6Ohg zz>)(%ROx-AYGz&i+%1gI-&jLVg5f5%bR7tq=QSVf<{-}<9-M6d(&zLzc-O0pGz2zX z$bg%Fk)5d3+$wzgIvhIP7rg0W9I|(YG}Vl5G<5bu)2NTlFq191#sgWd>im`ShjZhK*9!-56(Y*OB7+qUm6fse^rvGnDq2;Ijz+8OjCWjHH)h)wnOH<^4bmG zy_HTlEr5H1Ct$53iycC^Xma(U(lfSZ^XEWWP!g?QPTv_v4{RaQ4nKx9TGRa^F-0xqbVX&jU7s`dSsz-n9 zW{m6w{B7#A^jngO-}9G3t)(~wGog52%4`M`)$pqV{n?ce96>Duhv~tG(k<8TU7Vx_ z9u3l%C2f{JZXtXnWx0#v(53+s#cNIp!0;&WrBwd$0xQgT6u3&DG_CcsSVhMs80D$8d6%9b_^Dk{g6xaN9U=% z@WCt*fZ0oneSdNwYwL3XL}L=*??l6;4jzLgL>SDqsF^->df)^DewL=YON9NB<`!A_ zfwQSNUdY@hZ@wC|6}YSx;&6d2q#0i%(J+0lOprbAMD@$cw)r~jyCAacipWK9zS&70 z;>>kpj0^&vh^)hJ_}p+~9eUlp)cnyW|kk5k(3T%2|mRp0G5L+Wy)il#7TtoG!BXv?9!X%IG_`jNv_hppn_l2L+TIuX_b^t zH(Y(L8nb{>qU(D2LZDo}dHsFA@?K|noPkShQtiXi9vMaek!o&TIT~}MG9mHp6}VzG z0}I^y^jx6kKEo$VO2kLETOx!mAJgmk6f$X?OoDGEMqr?f?#L5QJmDom+*XFGsBYn( zr7T*`xsq944M9W|*G49Hr!lt{_-9y#@a`a7H7uDySk5qSY7a8xYALV90(tTRI;YgZ z8+_?mZ?M$#hCu1I0z3jCb|#rHDZxHf2x?)I^EgqFR-;PwD+kH5MKp03&MtK-3J_H^QFc;fM-=)e z_WeSY;aCo;CrG@G?y}sG+|NCrq~6cytFop{haL34*A#QC^ae>!4@VaWoV0o{v^xL# zqr~G&>6Ydnm&C9#sj1Nrg>^t^dyu~+$xs6T$E~Y zA(dVmYUAGf)+%0rF7g+aFh|U5>s{=1U)aw?8+#rtVKfWSMfn)-zy|e-sNa=n%)=sa zRSm;-2iqhpOfpZHT{`w|Cu7%y5r>IB#FG_pH(L<^d%m<+g|K=IBCLKw+B%kLM%3u_ z5o;z`G>X3HMAp5Z(fDF7=g{M|$_CT!hU!_R-4m8Wz$Am>5t#qkHQ>U$S6eL#d?1*< zN-NBoPKta8Ok}f8MNx3F*;hzY6R0U|glq19B9$SZX9>3+_)JvS%U66Ry9DOF2TCz& z!9y)Pq0l}JH_AFt{G|yS^JD#hqt@l6uG&$KD*#&e=nXw`O>tezE`3*E9j^@J4lk?i z=9(OQIT2;Rh=k!}i3V6T-87>J@?q}iO}G9xzxv+vk@go_-L@;56ff zSYmW|t(7X#*+qbOuZ#MFfKB---{v|TR39+%Vi*8Xcw*jt`59eKVhz8qjY3+F0+qKj zP_2U>)L`IR4Os|;x) zU5lh}a5{y-rPgoy8%!HBs+txEr4imh_cZ0eBEdo}tcOQZJcaENgOm)Ypa}Sbzo){F zc^Y^~)Rfn1ZfTJdbg~z@1>_=>ird7SORj%-{X<>k?aj>{y_H}Eqf$){{=p0j{{z<( zbJ1%bQFIeg9zFdKepiy+!Zd;@5gzt8V|+#m53Y!eYO(4QFNwU!{h}HdHlwQ3%qA?P z1Vu(^J;9uCmrrf3M6M$WR@7&|#zZkQN%Lr5AbQia9=EbB#lCgKxa1h3=%aq|0~xb?0he^?)v_GlRaxxC8y?XcV!JIYwca#?){X{|tE@KQx66 z66LnTuc#RrjKU(OIqLaNjAG)aS6|5zuXXTFjIJ&7Kifd(>s8+EL;)S2UTEmS>y);x z5=AksaI*XpsBo`JfOdV9Lk@JL)_(Ju8`?NHogzNbo~@AcYn$`8eVVBFgi-C5yg6@O z-tkk{XX9XTHkqInee!O;{<;nW{*3V3^Pl@%@Y-KrfB)(uA27bQ#_}SNQx6dEBZ=zg zVj^W^kknh^&F8ye^#N*q^Dc)p64y~kjRglmx_-EeIwg%zJk z(_jIIL8Aa0L18aT*B+}&Z>D928X@S>+&c!7`6XwANajJCNLz}XS_estQn0q+Pd_m3-|;0n z5$f2>U7my$M(_7(Ds8iOf0Dpht?0m@Ecilg>FJxViYu_R>(}T>6iRj%jA@#P43XuS zGaIp8x~h4FS`w(1s5FGnlIf4pv}U*`p72S^)-T**r4<2ACHnNq7%kzi>}6Z4vo6IJ zj?`jJkEg%XCse$fJ6{+?9D|D9h!1hDk;N#X*D4`};ukUS5;QsX_fjIgGy$>m2VozQ zCV#9R4N4t$qe}$8E#*|6ahsR1lpOd7=uz#TP9OcNevN@Mz z-bFGRm#{#+)L9wKNyN0a)X+W1qgbQP?6?)S73H#}xd|l2H_~4^r2u&t+>G4B<@SnJ zTG5*mZpJUxYnQe+*MlA^!op}Xqi_@IT=#plzsnqj%!$*&$^>g%@5s}DnPj`B`AZy0 z(|Zn^wyOTHHs9Avr|;JqJFNQHMO+cBefn`pClWfXRwlVRGub7v_~qXZfK4CRh+F%xyBnLeRDNm`gJ%`p~e@VYQ`p~h*0wx#W_)vA={xYbHYw=vhy z+lBk{BL+B?y#n94<-4-AS!i4QYvNd=SyJ~`k)vabSjnd8gJ*+UArL+{u)xP}G8L^_ z&!jgIp%9^nHWg?13L_&0gdp^9z6RUc0r=Ss+epx*>+k6;3$!=NPY~fGrBnqB$on(@ ziL5Fn?5H&VV+(jU-1bw2Z2R|1?P)t80ijku?AI!B6#_= z`}+K?ThH-7?FS|yyRGbg(Cp*Gh(!>8iOh|_dDg6oZ4QxEC?%1jbri?8r;2ikju)Hy zK8cr9d&DaN{N%svFG{~~(8@F=iB+kIL6fARj}dl2#RsCSgf0l^gEizO#sAuaLN3*D zmKlakleM6T=2qm@xu&TPTYK2fTu+k|*MRh=pScXiT0bJLS#^j{8lI#Xzg%ZP$Z1#k zp*~BI9%g1dZaMsP38v)t)-K{1z72K3wY-rmsXb=W-D$fI$2RS zJ@YGBWT`3S*I!SLfRMGpN8_j!v+hdIyL#@xdj0Tu50QsMCLl&k-12zW}&hnXtf94>|#H*%>69ew{HLH7>lvhP3W7d>RcWexYt`V0_)Sseh5YiTcnlHz6{fUB^aWmPDSOF2byny%`4 z45T7g>z{h1vZcKn#(ztQP%na7gd!RTg!gP^cA#de1{zeG(&?o24T_ z#b+LweS%TfpQrC&M8rWinD#5i(u|>q&t8hC5-6L!jOa!Af9+jRD*uy;r8$832q9d` zRm>@YG-Wxy8%O}!diZbHEkqekT8>312P6jwB>)c0jDydR4Kw$s);$k#_-?r-jJ4Jh zw-*=npT+HM9TBmeH--7q2S=TQY)iwt_PYuDGxAM&csO39IF3}I-%0c()d*+ZMy1XY zoOg!VjB1T((dkW&TBixaBxJ=YKHkb^t}{NTU!)eakF4|NlM^gq3*Zhyc@4}n%+!b7 zgIW<3N-}x?nk5$&=k6@~dxw+SKbLlQwA3@lFu_I=Ykp44ILFb;^^zVBh#a6nFJ)e4 z9eObDF6ikPSwTz18fZi!gMF(BLGW5i@@ox&DJ#e#AlQL120eI~H^sv#OoS+PVt{sL z+6q8WLoEZvXvQFA7G26ZD4<%%pagm?=%IT2Ad7-&{}IQkSuA;9nx)l0tiMASs8Ey# zrH)-#)A*F~%*B8`6fcNu+%5iS>t#El9~cO_N4|W@XqR7vjfB6ErAem}gyMM`IioS$ z;0?2b1e+enTWcGdO}syR+5hoC0{5dusL?^mFe>85v<@90!RT~(5XDN}_?XvLH*^T( z66ICBtH%LjhE?7FEgOM){`)7tcJYyGLax>50f-vK)apjyUa#+ zY0v6qG7Q>xyg>vZ!;v@ce?g}qP%$>UKML+I&1u~t_@_D=87d$k?^8S{!tWnx;*kbb zvo@068R)L(yQB3^t^;OBf&7Bnfcf|5-Oh1X;%q?m!=QBi7m71V5Ilis`aSv)Wmmgd zG&{XClj#)dl`SycHKh1nalsmTwNF`*H^SsPHi<_VT-gRanM5%Sd23Fd2zaCf9+dI) z>Vxy47l!Jsl{17>gXZ-3)bo zDfx17LfSxdGz89{>|!MLQ!Ly=1&wtRMP!`T3_7i%@>=~sSE!mc625nvzVu4e67g(Z>_7PN8rM=frK=h+EmRGw4<$R*pzw^l%mJ`A9TthCGcC@?j_k>m++)NJ2n#=b z0dnm{{NSLoZ+l#A3DuONwgL=%XZR&Ph=!)O>v+_ds-KQ{=ulTnpsodb_s;-+;zcpQ7*jEB42WUTR^?mBf)7fO=NDf~cs0Uo zFU^=OU>p_xfdeGj%XhF~^#to23`srr`fsj235%=No)ZK~3^)eq$v-L83a@W@M9NJVnc8O1_B`;*b|bsTH)a zbr@B@$F&>JynDX*-UO2&^fWZXY)_P$bah;R3YA0Bh+jbJ;KFE^f^`cMfZYO=liV#<2R`A?0;=ip|2n<1@cJ)Up;u31?F=x2mQP;9*d9hi zyxDJ^VjJd0PNx6j?_G3S5yul}>(I0ppxRMtt^3gH*)*d{K7bi4K-B2=h0nJs_jI&H z#{0?DT%vyby~WbtuLr6?pPoJ>>#84wVotQn2!x?tj762QN%nB%1)M1}sRI4k4C?Cd zLssG#nm)p(a}T|R~&N(k%X4sY|vT5#dtag(+DIA@CFRwU?f;L|o|5 z{yK_ewL~?_(2X4wYiYHe9vkK!7v0NziK(nFr~RoAoGb`Jl&076rq?+Am)g~LB0tWz zl-vpSzjvjO(rnWGI`Y9U0kTs?InkUMb~C{JdIJKUX%^X`il<8W``fgT?{&Pzpo5M- z44e5DS(vy*itdZX+Bi)}A{sq?I2y!ogqtPAru)>2fY7U-_oJ7{RECZ4xj)x8Pq?@u z+1_c|GvR%gK;6o$cPZFp4B%8}+k+alYSV{I4emLm)TNFFwR4!13{us;Sxvr5?xby^;SJ)Qt^!T*gBtW`MUgN@ zAH|&n6@8Khc4%56_uenKfJCzcpGl@=vEXV%TPT{rYRz7$Zu2H3AMtDqlxl#0%Bul2 zni^&MA;}K~T&{m~{ia1fb|_+^VqPdl;YH^1ZYH4W|T0_G0jk%K9+!z9&%70U zbJ&S#!QdNXuti-IQD)%g+KIBZ#QFB~1@dt+;=kL8J{(Vd-kdoX-W)g-tU)uu0#SpJ zgtA^o6>KJ~W&aZ@hu@Q1ur|@bHLSBjO&@y5H2Cz}Aeg-PlZ!H95;a7;POoNyNGnn6 zo&Hy|`N?T5?_|*ok@E3LY>a!!-vrOqwC&$~Y8s$a3qNRqSVvw_Ty}66Uc)S1q<6Oz z15Ig2z71*UHFFfDeJrhgN~5zS{!gd&^yi`#kh&va6QyuPNp(h~yE_T^Q6B()Wdf~t zdb{#wG5|y*0GRUQ@sYKQ>JqPF5=5+txV{$$T2y_seMczH)JpChCXQi|{7Bn|NLwvt zOFMVdeEPwDj(J9q^xGlmunG{TA_7k?MBq+GlCieZ-=yDbH;jvWt9rF8pK7ekmp%Zi zJFA;n$G$)Ppq(Zi9vk$@gv>VC$Go;kB=6(0b{|@&Pf8$%h$nqgQtfJKrClN_^zq|p z1Y>d#Nnr?M2I=5;pIUQ#QQ#rPHM{zaz8MLsQkBUxHLv2!vp|NL5$Z(?o9p_|WE2H0 z{nlaLTY98LLkrn9Uis3X%9+r38uDx=B)>%x&99joT!cyo`Wgrs?DYx_S)oI(zq5y| zFR^{JaFhk=VALWbVj60lzzg&{lxX!T55wYnHvZ3qq02KJj8joXOx*&<(t#>uv|bH2 zlK-pCk>?TEpwENzZr%*I6!O@DcMH{!@s$<&+>25-UU|iICr(j)DR9SZCU%W@Ob(^6 zvGkYQiY}{)0_4>}iA8IYtB73*t9;F%>NuTStuHV*wsoF^EQ5QcOyh|bZXsBWF zPyInw5|28-$4vpDKo#>5#e&+HgB2dIPV3s(CS$-KYxBYF9?+vNzJm43MIj5Ux6r+N z)==e?8Mrd3Yg;h7No0VKriW*U);KY!+S&~}fS;B_{^?nuzPo!NGnpQM0aY?}kG?@H zo*E}}%1TGBsky7&x=hH?$*k8C#n`FF+GJV_tSlN*uphLT$T=NwL@qAubeS~Yqj zat8h4l}SDulbN7Q6>GG2j#PaqQDGK9MR5_RsTuzv4S=PXyuNjYsG*Jh?0zYZf$Ee- z)XqfEnhlk*(SF$ejG8MI_vpN=9fYSziIw~jBka6zev#dO7Z#Tqch+`new-2=fn`n) zGbFV}kOMEp`hZBqL6nz@=!>M9o~I@V0cm_KY;_}ge08$_q(5?7Z6TqX z7TpGqfH!Svfb&`mM$Dz;+mWIlp}$x zU`U#*Gx>8Ob)U@2QZP*-K8KG(UmbsUi*#HCnls9Y>nJfsz$=iVVM^1oTMzH_l{-z> zonIDLosUpCZzfn=^72sl87pAw*jZwC5LB$G*vi6oB;IGWV)oXFFCKQHsQkE_sLe;; zPgpkrO^!#1HzM99A8xEpFVqk83fuK__wW0!bUdK9M;fQ2*}#B+_LrM}^Yt5EH|<&i zL?&3kV+y60s0Jcfsn*HMde=Apv#EJXhq3C~-k~A8u218(pG{6Ff%2YeE~PJM zF>WfKohbC3&JNZ--6`?Dea0K5az;~luHW**air3rqYj=Rxvs{`FNH10d*~VJLNL)T z(_u{#Z~Ob+#mT9eKB@tKrEX$k1$l?&XF&a<4hZJV@}s}^0@R1PBxF!c47oMK6qt>I zXXC9h9TIBXV0W2ZQ;PVwx>XuFT8pj-@RgyAkMUZjq1N%BYUJ~vG&LdtM&LFiY?1>IAmxQWHTe&?*q@}JA+WNP8)hiX?4uepBgiGdAdNymiVpqgN z{7|MU0W#HxHn$f_gC&lA9>+&8I13=*nNoz{T+{|C^(arai@1 z!!(O>elO(dXm~5IU*$o-o5dIc@F3QqwQ*>Awf1Lhb#2EqQcoz64_k}-nl|t8dFwX?=0xI-70Ea)RgWn)~! zJYY`WE_?xP5zw=ziO`ELWw67y`uhO?Ju{n^txvx*1AD|M&ybwaW3JqCVIbq)G)SOR zgpw#lvI`{+K~~>K8IjHmIUy~pHyQYmnuZ+{#W{{^-Y5;-dC@^9+DBoRC~Kh&;UbTx zKh;liuItlU5V&_znr3oiYpgwi$Kfd+@-<}=FCcnEjslEDv zxWH_2mSUeHuFYc^Rpwt*{ZK|v@FW#chZUnr@L=E4tvWcRETc9B z(1{65&{<&(fawwi*bmwgk1Da?U&E}ndg}m&wB@6$f)(_%doq zD~84iN4|}QZY8}MaAA^kP4H8H8@5ucmDsRZX<;=MYv56G#Gsq*`NGm=syY(JJuhzJn} zk-tK(3YefnwX|KE5S}o@%Y%kV<>7#pQ~e6JT*YvqSWZ%7-7S&q=>2W7tC`8(cQn3##K^SE07cpfZ!~ z>Yr<;vHLe(yYKUPHToyWpN~gSapR9KV`Anve^aL#<366Gh{MVocC)~gNd*c6YD2?r zsXs#VB$kBR1C*}Xw|6Iz=fh?Dq-rezBY1$N$YMVcQ>xD+S1M<<>d!_386+Sxw*%8@ ze4N*;R_*!`V@C{Wsl3sV8zyo~Gj-@*EGA!FUfGH3xi^ReA6>HdG!EEnS?d&e1g_Og zae#Remyo)bnp$X(S0MN5Hx1#*K?H=BS-v}wq3TeTx*s(@yVuDU8g0heLBd!O5#8L# z?O`kHy_kDj737E{KKV$KkJN*(A*F58^y3O+g{E=Q$SG3*VFd`FfYQD?Gv!{S5GNc@ zUhV~rj2C~z09tPdQxB9;O+@}Fuyc>l8KFqO0)XHJ5x=xCmHH0KBB-K<+0VS}w~pG8 z$Wjy25T?-wi;v;>@g<#rvNCl>44`XYDHY}qj|C*lZD|6jVmWh}8+`ZL>ofRLk3V3qK494l!qJ65NYt_uoAW%Kq7&DxG7-hUN1J?J{ z%2drAyxVO;HMQ-B3?T7~7(w^@xzA_?m;=N7I))Sagdn?!Ak(=t3W7FAwHwl%A+Q{M zktWJ#XBN_*3E zoGZQHq&n&m@6TNQ*Q@vQFVhHj|^_^96xG(B(nOI;oXJnRrQ((eC z7Yzf{xSq$?jF%!t)Qd4QV?5nUA#+`7r>KS~ii$?!ren7q0yrv6VHAI3Ta8!!bFMj~9)Qsy#e*vS7xHP>Qh#_xQw{JfJ$i zO}04wNl0$s$Qc;b5tWo9vXz8{7KHVt8?RlcLke9hJjz9l*O`M*Y91&IYnMsX7*t&Rc;c@!RIY}rZ`e+R`lO<*wr*d zPLSa>@rCp%naxu?14-+-)gAvWrat*gM4Rz%_4oNkLvPT$x)fr>LU?`v!jY4WhB!>1 zii#FRTvn6`8jVY1iELvh+iye*z?dBp}EwVB6GgjCB^#wt?dlFy3Q%# zb&yJ0g5rfW`t+BQTqYJ>6$*50`tT^AvO==eXb{K@z}cZ^Xigf}hr5|D=H{Js69gcJ zsQT!`c9d>R-wn4_MmeWSGR8M)o}23*&j#HGJ|l;&&k_eDlQ#we22+#s*{z03z{9S; zo%HD`vFGc~x_n%guDc5-yDH&CxERN~_&oUG`ZwFf-#< zXmf8LX%{|GX}0ChkxVJdgfK-Y5MIB$bw?+B*?she9(v=mH@y_VGGxl*MUA+yy9nwq zKiL#|L4O$xvtEbfIRQj-&HCQ5f7&RJaK*O0FgY)NgQ}QHhYQv*Dl1}OJoUHwEPbJL z=>8i0AH#WR7DvGwnp<36-EfO}=@!h7;%#ej-GgwQSSy}fQh?!rS-RK^f5)QeB(2$= zy7z{aPQvd{-jv8g6e%tXA?OIHU;#}kdIE>bk8jHG@_|zYQ>O0v-*AqAsI?$QafAv~ zpsJyiZOAn0B@hHNMUl)-+vUhHpx{rYj3xIohk!pvc2uV`$@}=iV3c+PQv$|ujTh$T zLw3ARSB@CajY*uQG6m^iu}qcIl<@e5)%X1}RQ8^tGlKM`J}>7ZWRhAoO%kTIpxQ?Z zTa9ODn?BOv;4;IUOMFHvifC$Dw92pfi=783k6MMddqKr^*#P1oQ&@ytS%?k-zX_F7 zW(p44X_8}c;qW0>be?o0Oaqhd#GO=L^e$9yuy94q!3zc9)SEsuO`d+mi=v;LT0qA_ z9-o@rAl_Ci5sEGom!&qhG&-wlSy4+M1tJBy0nW&>6iQRoqy^_sJiNZSq2*$wWweww zmTILDQyBPJANusI=)K!dhFk_w<0~eWTbr%vh7i;X6~~Qh1i(XAuN6rqemb03w88tV zm&9dCpLMcsTFH%@pO3b{7y_7{(n)LZeCRV&N)580a#J^~j)pPP?WM11tqckC@iS8+ zZZtu2A;Y}jjUc{1vc-w|W*rL{=f$D=bxN31{v2nN_#e#*0u$uJBlhR#lE6phY6V-9 zd?q4!m|@sSf1&~GvBa80U+(Nq-{JoGHMA;85(ECqt`$I*2%9A0=8#%MFBway8}d>m z*8A~Waxi;cfL#u%bH8W3Ws~g>$R&ECu!Y)xkZ#brz@2#^8cCs-l+wAgxP2M{m%i7% zI1h=E<-7V^Bo;AA#ler;2nV40Qw6)mGFntMnoXUoEDvRGr0R|MK=WR|>HX0=wZ`y^ zaGP~P9A*NV01eizUX1LS#g>UkMlS!nv#W0MKBQuzSfbIbT&1@eo;bo`0NpJ@FjPVR zVbG+Hxmlg`^TAOk$L}EpiGpW_RTHi?VO_fkLOgn7@+dk(YjY)}r#)FjuvBZ%n>XC=j=xN_q9|W*0uD+W;|Y{~m|$@^4`qqz$YaP+(I5LiX3!i0D;` zv%YBh&6Nu7wc5C!BJPYfEe}cOYLkOrgj3t2Xmoq)&rBBOH&|c@!*$OAb)z(K9^=dp z@($C!AV|wZUx_m^e|x@cp#B_E)gLrrD6YvJ>@Oz{`U&|+1}5%wn50fF;BEeYylxq8b~kqxt(!z_#);_YCN*yINC6!~5_r-_}E zGD<;OWthByVP_H$S@kaQdr6CB*Co{1Zo$IFt>xy&%87Q1C#LU$3DFO*`XKS%MEi~i zP@ZQu^nk1=rJ{W>!iFDHQhlhZb+2P#v6-Q_@qnu9YN)(`{0?segE<6t#09>26~3H0 z!lK`7ZkRqwRmZZFJjg<_k{K>AFtXLVb>iLC_Yp@hrw=+Z{(n^DK|7^~amrOqP_CvX z9&f=l>O(4elAdVZ)4vAHjPwQ>7xS~dblk4~EBgE`Ose1Hn;s%ZBd;$KYL-W?mDn8+ z>R!euS15H@Z$+lB#hnlxryr}qkXZ4hmv8#mK+>q9P=BTmDIq111FuXzbKox(O^sOH z&U)Sfg;>gDwJ0F@>ZGvn7BC#23+7(t2_!XxbAc&_A%8|l(;r*2Qx*vhi98;wY2A@L z&kdJKcbrJO;irRBW;H!-D85*#L3#gv_klh|vR)x9ZnK1TMpVl*8KAs?pLXN5@U&yT!7OuK;Y58 zX7>6N>61YT1Qg+lVh?vOYfPSDOe|UtP{y;Swzn9Z-0i2rT9?mk+Q{!h`;iY8L8HCb zaayyH50>mnAfi$jxMK+~l%f%kgCh_F0t^p}3G-@jkV)WPtwficQ`Es?pH_L14iHgE zNEAOuav8$?N^1dsN-w-1{hfKA_V$cA<4Ib#bHqUW_bp$s927^Mjox|7Tnm>$tzt(NESauW!yPyLN{g>XA%PK zBe@1c`Wf`7bYs+I$`Ru`H&jT5ek{TOxVg;fvqtsKkY1yQ)pke>reEEjX zexG!WFXTK-OM@G%2>k&JT5bts_MlxsKa*Wafb>>Wv6gz4=yqIbUkNJgLPI}y^BGed zVF2XO|0tLjlLVDFq3>2FN79z|`OJ{Zko@^5Mj~z8S)W_n)(b%FV0i_S^3LjxygI?e zMNxH$G~ZG)QIY0r1!=Cfv^61j^>Z2bqSj9OmrUkOsdsrcscrC^6P${;c#VzuMX#EG zzJ|J#HYn_8-#ce|Su)@nH!bK1w`Juq(faV!{S<^5LkLk+))CaMaT7vG_%}KH99e1N z0WzX%pbnU8^=gJBg?1@bB9q-&Iu|oH+8daCqARmGOIb3N)md`I2*BFxLz}tR*%ev+ z@lF&&7KplHGLy!q#yjjM*jH%4|OKpj1LWwEhfk`i+-y-4yvNC}BGAXcLt?tzpf)kT5S zRd1z0zsLmfR`=;5C)B#XwNp_1z0&+!Z##Bmkw-2VAEo^1pdATHH11#Xf-tW2(i+zy z99!mB@>`AB1DYigsnF^A?@KbE(+!PCXq32d9yk$e0)Jl3%6iiqrFk7?Nf1Etw%0k* z%G%r`pS7?>NiDpc(BU0zpkX(W%z ziB>rOe5oP|gA5f8{>kf5tLg;#n@TC$hB(e{AjJDux8sqc%ZRxg4iYDFGW})2Fa+#9 zOkF;GJQuVJC*|s6^Ih{V*JhysjD~(EOx63hM54`M+q~Xr!}#)-qrlILv|h!KAkLV!tZ6 z(4Sv_{VO>--~~bPwNQFp3)drwxO&?>CKOamd-T0TWyrFbK^ztU7bIc<_+Ok(1j+mb zjL%q8p{z=VB4BG~CRMGU2%y8adhwlQozxefdd*i{^076IK?)kj=7(}OnP3ivB<2et z;Oc}wwc9CahJL1T3DuII^z({*Eorb{1K~-eD-(T8PC(2xr2~gVLSwahz4ay>%Q5)DO{kC=4ZL)<0heUWHse6+)`-c!4#{k4oEhQO64xE)hFc@D}@ko zfTQY%p_Eci)78hDE1k5HNz^W}_En^$L3;+|c$X@MRF-PUUkrEQ?@Sc#;W(=w6n4RU zPAuNG!m1($%&Ad;elMRAI#GW(9#7wcB%%yx&sN+)Yt02>XL?1pvGb+tcOp2AEqUGw zR0?u7Bo!xoS_0So4q`81By@_w5F>J&_Bc%lJxhQd6q)?tBe>!9!mvH`ycVc&&$AQd zmjU=t?kDdP4iD`z^BPAPT7{O*bpku_`pu_q`Kku~StUuSLkqabw1g~^ECErxyed}= z)O-U)H6#+lEhP$^28*63R`i3`36n-KRFi&6>3!@f1P?aBcNuIv7pwzpgF>OJLJV^k$$+?h>i7xVqm<6r_|b}&(xWl%3J6Gntns7jWEh^9dKYo|tB zbozxU>2DjF1az{tFVBUD)*D*qE@JKItA&tItCF_8qQ%?rY^zS zYp#T9kZ)c9f;J=sdqqP;gY1$FK*fUx@+(J!YGpJmDzQ?&*?oLfg<<(GBU zYXIZX%iu!w(1>;7+3I2xf5<&CLm=aLh&MF9vN(TRw8#D{6BD{oh#BC}g*3{@F{j|) zoiO4rd07z(<(7Og=zzdj;$jE&j}i~9QV#_n>yUCuo~sW{m<1yBqS(PCL4ZKEPBO=g zpi)`Vd&M^=s$Klf)i7c7)*ZTAw%S{?C)IhX`?MZ5{pGQa@-t8B9}J+>e*oZm&%5-u z4UGpB-?wVJgr&C8q9VqaakF7V(i;M&#DwFrkTVNn5iE%)1R&uAVW71zXkkEo0E?*N zlbjDgX8Ac+KV?h5)0&=s=FhyQt$k85{#jkqd#M4UrxYDWzpKX*0;-8JI!5o1@}}RD zp0v2#G(i_B-_~!yjOF|OMVB>i0tlo0k^GJrFAnEJ(QksI=s(Vp`Oxno0IOc*++-Z}qB!Ay0voHuBST@s zLm}vrV^vNFIh<^`7sf%x7=cU^k4RIQHgQTXhRlBc>FEAKV}kQcLV)>82mx)MvIm(0 zFC}2?olp^x^XeacoRK_K3O-lu^^2Ay1u<*F!aSW~x;&s$LbN_Q>FhcyC9VU2Bn`@D=Mh>z zT4?xpjjh$B1%axt;^$8aODkdEPt8wuv%sT-nDF7vA4(RrWdw;|M`Hpq77okZ7=LR0 zc7RDIIEMciW??-fV$A9)T#_|ekvcjhCP2i_-(Z9rDY(#&2nlqc-nTUzw9|)g%F;BG zUKUy~`-d=b_znx8=Sh=w`5i?A}Ux&G&tzC|`t7bceU<8I!t^ofs}{(E27QqBhH z%S80lV+<@|dedJ5CeWttzy8!M{TDMOu_scWS08BI=O97SG++`NW{Hy$QA5sI?F~>Sd(0GP@~jbZ?{j{6TW9l5$KZ3+3TQ(;QZy zyG)y6vMGLX{i6~tp_>BZSWSF1rPsSZE9)jT$t)Kwezh@Yvfm?QQ#urVB{P}36nFZ| z9DT-Xz%>$L^6wA3M~SJqjkxk}@4CGiJ6#9I)ipmV=TZiYYs1(7aQ)`&^XU$8z$Cj1 zG>POAK5+7%`mS*1g;v#Y;%0{9E0(}GIZZBLiY*RDF8>%W)bC>IW0v+c!}zwW34foz z<>zo&%Cw7FTU1)nSXAGu6Gltcyh-???MY;F2$9i))C@9B`xn% z9s!`GFpIay6H6rEnv{=!x2^%be(VZP`yj;N>VeX_ULkQ z>(~K)ZXcl#(G9W^1Tk6}*j$@DKuwAar!C50Yya>s6SV<#;gv_Dct!b4uX&V}e_WgF zb!Fi8v)q(KN?jX0Mkponc2p!*Oj4+MTFTguN_qomx^98;QF)jcebJO}y|JY?bx+Yz zH6S3PTetyziKouMt(}AEhk3-yB$v6v-a>CDRVh$EJ0g4qRq zul}(jCOthvDSH3la{tB$qY{cPGA1d5#qPi0Is6tD`L$%`@g%XejU}yrbz^67TYtED z^C=$|oL{C)s5>3LkZcA2X`lE=2m>YQ`t5ltf-={(UVGyov_fHkHkY{Ll80MEaiCXr z56jihdlB0@52$%@LXEh-qH1+rFM_1~(g_Qi+<|-3u4SB(8ingK{H58-6e~gUJix1e zx_&#iPj^6+%n!B8#+O#TiY>y(;+#hQ;t*rh%fgdur;I#h(4Buxw0o2agI<{JRAK*A zRah3q#KegpT!#CO)Bvd7L>O2ef>{$ckioi1Vjz;S2?fibyf-m$$WNPKq_W5~eK<-@ zhC3ABQNjCQm=%7JX!CYyWR_R9z#H^B{2iK)mmzWYKkwBcd6oMsAOiS{sIK0of%blu z^1kNGISf?dZAs~vs&nn3TH{VFS3uvx)i`>Cm7U`dSPP>_(WEAiU6qgi?KiC7O zREm0K)B}*LGAO#>&*TAWCHSTZ-|C4}S8I7ZN{B2n#9sF?!pG|dh)oiqHb_?g_V^?R zfs>x+t&Wra0!@)+h-+yBuxJm7HUx_N`OjrI$_+-iFaA1$ypw<)e5hp7(WGRS5aTaFYU2 zScUgwST!c-q9CtildkC1@5E@x?2N=Im6{oRm4TE@Em!_za_K4?1S9PFKMuLW4q&=e zNp>qEgXM2gaoWnfFW0&>*JLxvz0p>q^s{ZOubJ}1^Xk0A`OUSE`E5#Id;-+NqNWjxmQN&Lg0MlTT5vy!(dHiV)# z%GU$8ndQYX#ieTO=s&}Q48g7qqGQd;Bt~nS9QSIZU{#f51y8a7GijIH-_qKhDl3>@ zkE0yXOOYYhJE%4@wbliY@m_?}|L9B8j83R2wc$kp=yk@Z&nh#b)Ux970#b)aGI715e$<<&xc zur&U3Y4{r=HaaiVQSLEczF$)K@FitD$xYNh#E?yq8aWGa(dR$Q0A~JWW%`b&ORU-F za{lNIod5}`R+bddFw}os8f1L1eU2pXQ}4(oi40Bz^zLkLcrX7SS#Q@J`B|NPeuA9L z*{tU%jtOos<{U6kdY4;osMOuYu^&@!l1fskM3qWv$#j;%7?Q`lgpfFK2nkIf#F=px z*kEk1VXaj6T5~jybT)I!7nrXwziZ#uzN<5nmBebP>i@s*eeZj}UVCrb5)ePt)jzQu zyenKmO$7t?b1%d3kAy%L66~>?twZw(0%a+LWlYeI93u@`e~`J&Riy# z62p^GbQb>PC#+bWNpC=#T`9E}5>^@&?$<9*0zmNMOD|= zBtasYX}&s{zVk4>1o?xpU;=G%P_nd&YY^8gjS+?o<o4yF^3vBPeiEuTe-u2U63nv6nUp75RxzlXD_b z=odD_r_m$SB*)zk2(X>x8*UG)Sw3bYHr*3_-8a-_S)_EA*QOts?s!|oQ^N7s+-^#GtWCTw z%o`kZ@|9lDN_I6+Q9}513@Pac69M~HOLdcLv*3vkJ2#KhN!}4sBg97^_fo{y*e}x# zf}v)-*HV050ELPlh!dypoRr?}E{F?xOtc4!Df@p)P-~iiDs8D+CIFrS4scZ+ z_vcE0it!#0dy0s-d5uLO1@w-LAU~&u4@LQ7aYqp)o1}{r;T%Ox^DT!*HrV(pd_lxD zHiw#uj;V-M@yQaPE%-!tijhS6Pq+5>9!&_g_=vEeWkcphrEt|VlkyRDb~6bPu+;7N z8_P5cwJvz_Bfgl(#g`-Oon^NjpafOem*ICl=Se z%*#k41G5w;_4vb2lHf!GvacFZMjYr4wHT@PZCb zOOBY)cWsPdzRM^KSJ|{BuXft+#vTNuRI3p&1ArP0(yWkBWQ&5T2@l+8G#lobw&GKv zmZ@AV;5IfMNu!A3q=nX0=YCouiKH0hqevEZ3e@#Ej?s_XQNR8WnRq=gwvK60_1~%8 zx7}}+qM@TG;-lIi%`yevC3>xuI48Q>t?9}1#bZ7VcBgL0jpv`#?GzkxR=R9@EMaib z4mJckTxl*j8dl)dKfxovvb^sASgR*YeXDN?$iyRj6E2Vcs7df=K0Z1YQfE*NSK{-J zB3YDbuC?jAK*W-KIwlaCdDt8q#hYHn9J0Z0;*4rNElvi;ivK|9u0%J3O~;v5jp9hD zM{l%Q1~v&>3K3^koa`D?NMo=u()tMm~*Nq2vu@cT_)q^xmH(wL!!l^@{Ht zy*n`r;<`T+u zv`)~Een4qYh{elDE#A0NBGu2dqIR=0NaEwVeqpj0x@_eDcCg@B0bzSZuM- ziYZ1;orH$f>Q|dPO*0TSIDHYtV^2Px)3qWNK=YC7*L4fZ`PVA-xmK1q3^4Kwn`*M9 zpGTaZ%!3z!i;~qh$FlJ8QbNL&l2m_)OiRWqukK&J?2D&BaURD!cfF=}H(Df6{2}Ji z9Vh3(uovM|^#{&;_s5I$h)JEXLe7^n@k?biS-MN~!2==PrH5q0L{63FAZEIS_ROT& z4PVB8xReB2U8mw7bWnk1Ia^1-)D zDx8v^QZ7!njKr6Ooz}q-OAA`oGXgj+7_e4-pYDvh8kqc%OXCCSNxZK=13%Y){_KU9 z($2-!pd)g*P~hn@Zu;yXT{eQhl#|vh^cI$AJ$W~0Gl9=~`x>Mi0uaSxG1@(fP`!pE znf6-5Vrg|A?ec*5UV{ec*>)wq| z|B;V^`%wAc!BFZ`Q#81Nqp7avIH4m0%=yrcr$5!!hD;7g@u7%)IR?)0 zG&5Y!khj-&uD_!Qv3@!o4!EyoA`D}Iq5g-g&l*xZs=;AA@Py7Vs}NH(Upd4w+N%*d zWUwBurUYeM-BB)r07XhUG&|F|LIA9i`uw7gU#gy6X2{`ifV!kYT~+=wA%;MC80ty5P^RHMSS^IkeD8l5h1Pl{s8r^M9HEbN;N1Xm zga?El?N^>~41Q&^1r9fq2b~P!s0lC-6&Lrs^e6rhOQFzRd(cd)+(Qd1QDm5uf<1xl zI_A~V+&eUv1qt~xr54k;2cletV-<+8*VUfv74?6~V8_gJs#B(3vsP+rh+NgYA%6XF z)Mmc;F)?Xmm7{QE86+CkM;Y?;**`PcWa|Z`l&1D8xuFFDsL1xO&FUwuz*JHgtS)## zOeAq25mj_juQJ{lq=8c`(U04D_0W=J@DQEGP;sJ>E}5MZtF>9spG zT`RU8LbaBsatKMZ!Sw#9Gee@y)2U1_VvF6FZ*W3b%p{f=Wy={8MB4X{GA^xHo*)Qm zgO7V<_=>lIgh&|3xpbp{&A6Mq8kinDFbGfkAs;ctKuRH+Q2^}dCt@eFhE@`*Rk14N zR>~gqe5Edsi2BMIrt2V>a1o21T1H*C=Th#{$)z^f#)QhQB?v?HtC7F0kAL#db;wV> z@Pw%YXu5QpHgzK0Yw<}Wv$ko2V&LYyAwXA9zpMnLIYS675_fpl81CbS9wwZEPISV| z`m0!dia?~0h>VEINcXW|>IdJ<+PCX(3wd$%ex95n8^k3-FscYEkA+~jcD1~?5tL^J zrT+p5HMzFYjyrikvA-V&YC`G%^nd7j$eH?YxUkUz(XvTiE+8vB#ry!+XH2TgY`=nQ z{hH1U*1~_)DiD!jxMEdPrzq0j-PP}LxUw-#o7&rSOI6`i<6>Cyqp;RgbEdDJ1!5D8 z8L*@~3wjU%&@?y#1lRBANQw6UmtG1eFtlmXqjj{G`H<#Gc8;P~{wZ+2(x>c`pE3m) z4-=+f(msrpt6z!_ZwDW04dcTHuj(kduykwm?d6rkMakiXdD*(mFf5_43@dW~r*C1{ zm1hi-Ip9$Y+(3w3mME3ZAF-7w=%fCD7SJBb?zOy&tD+2O^vs>(qMNxmGy)%DjL$#z zWHKCuL|0M8LGgS#7SEOOSgafH*lA3^poLixeJFT|Qn6f2P4#n{r&KeB54jRi*T%eN zoSN$}S~65Zqqd5QXQ8iS8c>j-r$z9sK1~O%ow0_?ZiYxobc&s!XY^=7+^Lr@rtkP= zbMzAO8DRqc3OnPwekyYFbU*A>o;p{fZ1N$hXu6F>jai`u@u3)2G~ca+|+Bk#G!-l2?VCaFnza7 zs@dkYDrYDvu}xAzdWVAk&YQA+E)?iFsk=qovK_*BUdM8OQpLX_xo_90KPQtOK?z@T zcY@RKBeBgvRdh0GL&N~W(tf6I5!%yA#2o!_OW|E0S^Ig4b31op<5XVPH~&NM^6xVK zQ{21vu0HT0X+@JUJ`iQxTCIW3FnI&D?DvX)L*^I=tp0x1&+n|MM_UcaDzi>YEE^2l za#$>Z8?+rIf#s(nMlrE!NSF-IxTF)n184(sXoJr;!y}S(qZrSLCrG|U&D^jW#4r=8 zE@FE6vTAt3dYjV0?h#Pf8ED!XT~V3`2~_{>L;3)M0>%yHH8(Z zfa$CI1!N%~Y%?k&E9+Ax1?Au2N5L<<3Oi+&%t z>$sepI8BNx5({^wb4bX+dsG0hcDyZBGdgXtKqN}^Fe3 z4DLUE?8c|{E)Vsp_+5qBmCg!F;`%i`0&zsQ!w^$m9etQPp zJC%6TWkTCrX3T{4SH!~1PcFug3hyPocvmndYYaGnmrsWRz&=_uCVYOsH_p&1tC&RL zHP0$#3MeA`3WynUCagu@9v>F`A#+Y=W0tw${Rji>M3W@tA(R;OD$n`{;=9|MCMw9d zs1Q4FSs3<3+V-2{XRza@$#-~nQu}^>G zY^&8ezN>yz;!X9xiqO$PYd@`!k{GJElIU9}UN~wo(lr7!jb=7Ry!zfFcK&=nMgh4? zjvl{HiCtQiOuvk1KW8dL5Nb&~j{}?ayKZwnDVbmrI$>e;f|v6l07)oNhTvuTm-ral zJsQ5o_2>}(&~wqrpajY$3346gCrLTDtKkbT+%Si4x6-#GH(S$#VR2*K`9tk#3Rv1^ zBqbX$0xpl6Ab&Td1mN0}2<7w~HqXpZ1b?1?sWxI(7am>SjFT+VP_;;XNtwNNn*JJ$Va5|dLc=M&RVqUS z@G!+k8u{HHmpu^ijp{&CUGaiR4p_irNX5_?mWPQSYrsw=Hy5F%Z zO4OFweEdZ!8@YXsV(2)?MJ^U|PqSWb)0#xTa zTjv+WrzvLcQNEvm8xkDv$yc2SojBUII5q&P2%AuWAD_>;<{nA{Z!P%YA)pgakENeg zb?2m*Nnv5GM-o7?8Jc(&fIS3SYn_ z!~+kf>a>)0(Ig<$0m0kwdF??gS$%9;XCiCd3hFUHsBZhYbsSs{%X5GL#&KlRj8 zzQ60ZFQ5sE9~oz(3o`5Z!kAzapn(w(Zv5l-?tR3+1Mt!dv@w1F{m4SN(T*)_FunWX z*Qzr@9B?>F7&8C~#2W^o-la3X`lUYp^kbj-oHgHIc$60OSWVVacFflfQVReem|UEH zII3}UV92#l38tayMkf+`i>Q;=zKZfl$-D!SB-8@D92IT;42kdactHSTGpB^ zyjU<%{GwdQS}QDY(DOoUU^V+zHiws}cyA95d|f-{c4jJ;3<0$~^b-prHz*$!s}dUK z`F;d>NwSdo9*yzieUix~4!N4OwxZvzbeuFqnQLf_JjgPxI@mELgw6FqBTSff!C zB2La@VFGXj!3g!}zhua3X8_heJCMJmy%Kldylwyo&sYc z$%}j6B*X{(Zt9-#fZonNpB;Wozvqn68A%)Lq%zc8kH2UQ0?oqBh~D{9{Gn2)sEWHW zee-09U#s7-t&Nxxr5~2o^mU|Xs()orEF3ihs)P5Z6I0|1sXXJjG;paj7$)HQxkt4r zZGzOQCFIyw{Y)EeaQE@-bi70n3xaS9sb-z3xHS^b+bi>T6xh(`G5soS%ADa^k_l>| z+|fWAmI5lMfrJ{wjpOG=_e>87=#h5cA1I)tpVVQa)`4v)qLX*?j1hVI(k>j8$&7IG zYGobP8D15LbpoAc0$PUDi9C;e=9AtE*9nli8s+$7sKX-pSX)de5<74kkT6o-KD(!% z{;c?k#_q6nXjKWwPN@c;=jF-glTkI-iP~M7x3DgFM1_q86i4i;@4S^Uy>T4K}zyswYrZOAmZMtE^(ZhOvuZI8GRL=R~H1H;CF``**lL5!Hg4c zJD^6|Ey+CT7%hJXX&xp92TsAx3>&7J27eD9aGb~vBD)n*shFqeaGH^wJH4z0z~7n) z=LT1c`)W#T;i#(xs(ERv6}I24)wP8bAM%2JFB3)UrxRIQZU(mDfB7Gu+BT^@FB2Ex zG@9hKBi+x2ns@XPWB|z=%Sx(6$oBcmB-Wx4-SjuJ0ObY?;c{TOz)fF z&PSIp5&A(#@tM#1iCCLi*+4s*I{bTO*w)E}{~M!v{PFYQe(D5WW^ZI3^Db2uw((i? zdo6|{so$(f!j|W%XoKXyK=dS|DYnNsdYQO4ipkUjC#+msm-0#`S$ZjqV8~pxbxwrT zapOjTFnr?1W8xRsrcM-BWAku0G2DKoz+?6^4tZ=|k71)FyP}GQesH_IGR%W4FvP_1QgHq| z8La>ehZc2f23@inH)4bzoC9)9%tt~j0l`>knt9^K$U*dvY8vLH%(rOqSE73lzL^|H z@`pW}g#u&{`3_%E--3mWhJT`tR&UlcaQ|kz;@WV!BR4;XdGE^tTc}2ScBpGn%crAi z9Z!FjqH>ohz-g{4$z0OOpZ-(5LQ^beMPH*t7J=!%hFqQ=EcYHav>Ntiup3M!i zq;QlnS(Dp1D~MUGF1{))qzgzK zbTKb*CT{l*p=nE<%hvg@TsS;(zkeGM+6QtGkbLBYsv4r}i|G5{+lS#T_9LkqO0COh ziJQuK6w@OxkDV}IJ&xv?M{3aQ7dA0Df_TB|2_#3wGFUJdMW|q$JG>+8RR*IOubr$v z8eSlZwIXoYVf9SZzaYU*a)f7fj{jC#>+I+BQ?$Ztz~wICesuv7_i=wtvuqSnb2qbY zXNS;XHUd^35jt3e*q^0umvDr=Nom=fO21l@2>zo?yo6+kvnYnJC|$&*^YN`?m`&fx z@Rui8r%j+jO+O5DPDzEfraznhs`3>PE>J>M?x+u_uOe@kOE2kgkWL%1)d+0H)lv{B zW9GDT52uDU$cVT;picye@7v9CZC`l&Ni9X7H~Ahq5U@t!2}kD8mksPG$@UthzxoGc z1Tki>5G*|V^+zdT0uxeOZ(dO_r5o>$YWy3x4zHpa2J1DsTo1Wm;tt+XV}d9J$;-U8#-fw-{^Wbu+|);ZD5_ zL6ZvBxCize;gh{qqTJ=7!dg0kXU2{oi*z<&kD^(}#+G@akGtukQ@sZfZT(C7U5!I) zmz7X0{tA4VKv{~{{7aAy$q)N|DJ5?&2BS{j>0XAbwhpvFR7%EE;1UiOYE??;>8G>z zDF{Ezr#d9+2pC`<3MfZr##*%w!5)u&An|T>a zm^b-B&4Gb`)ZS0Lt|P7jikU~;2KNk!B~f-!#X$@(|42Y>Bvyxyy`8ZKg;H+{Q{AgCmDBvHc_ylHC)`0=<_8G{ToEozxJ-=g zGebNfGn)ELA@;qx9DbYPi>bF^7EK;zDd*bcFs(h!Z|mO`Z$JM@Eno$_wVxBAx|eJF zrOt4G3JHLO$$!fH2osqQB_R|X@9*X>)R^jb*MEQY^UTBvUG()xMPP2}h3j9?3f;jf zn(w0OPP4%X0Tv7uFGc0k=QFDmj6_CZgLn~%6dvR4uU)_D z$axWF;oe9uL{*?^xj>qXyp#^%@WHEoKgUh*WfF>ab)&gs($WgL#K>G~F8&d;Dlh=c zcq=|bTK!tQ6fnEc4;n%C=mat=Kw)>aYuZMlP z9Z4i2QL~O8nDgBP-NJ6Fp!|JYnkY5kuJ!}l(hOgLwU+=A*!|Rl4K?V}<`$2;F~dSj z!%pmm{V11Cl)L)S`@*cJ;i7qerFH3FtEt{;ylE2H{C+=!+7I8DzNJUDNS6ll7ShZ3 z(w>0c>0yoSjn$21(-^pQP4_95PKHaPz6IV>UxWvE7%m zIF4E!A+vhfj!*$&w)Y#vaZKyn>n9=?&K5N4@4UZFDyB-je%mqT6C#ZIBP7PcMzHzd zR-{xg8BC%{CsJRe_wxFIIVLlpy)Ugkn_uFrrTX;GVy{Pqb;yvfYl(hZs~t7h3#s@d zAZ5juM8->>G67?-i=K5UTUwT0iR!ev-4vc%EfB|{cmlU~l(n5m-%=l?Go!yiKP+Da z`K;O(=Z5-LcI-uM)cMH3Lef6m!ckka>Z94F`liD2_x4l^Vyf0}5EofY4OHba ze~y^1wcG}_Iml#sgO$dVJXY)yhtK7!a~>Kach*qZ+)P8NB^lk8u3s@F8R^%d&MXZs znzJG>CmC$HLl*lG*C9m{W<#cl#kENMB-55)vH2Q_fcFubOpy1l)Y*`ZGZAJKuK>`h zcOaxJr>o!V01~1~=|dfKDP0j9@T#r?)s*I4k{<0)>bR<U1+W3=Xi&$OV!sTO;a3+1V%^*L}6P zPsV4Cuk7Clv`*gyTw&S5l9e@4KG^R>|Bq$>|H!%t#U8evdI=X5x;W>7LZuoE37ZV-6-VI57-JOP61UaY&x zJ@OjKkWK6A>VD|`cf2wo$kH^&#HJv+dNEvJ`gh{3=(HN^8UMio69%rrXGjUkVKXVV zXQm&0!y`5blG@y+NE0seFynzPi>>eaXEbZC0om0(Lu;P}MH zy+2mOBzM09o+YxMC2tcgqOs)a^$bw^7o;E7vvig z$5}4jB!YLmH>Yu<<*xMQw3>s~>As}@I$N&7j=e>Olx4N(u8X-EJD4*s-eR8dG zAtF2)VusvBCh4yviA7)qI@3~{CzO*&n&^l_f5Z|s=kn5}&=lRG(gI2Ae_=5(SUm0W z<>7JZ_u>6k=xuBq!O#6^1PNC8k}Ku1ow09BGxPcLZ5TGre_Q>axJ+<>VT%dWNxOe6)Ma?5zzHsA)KY+44Yn9Fs^P3w>tIOUcNn+yjWa~p@VLaD4%F8&Y3EYX`j?fd@ z62Kk8alFm@yR|AI^-b-u8Dqt-*JQIgN5RD@@ollwj^FjK^kcx9Zm zq8Or=CCY&2?Lv3?Fq$lZTxWR`S7iEbU@3X$%FG%Z_wSx2+AxMGVFhQXmx?=4PG4wc zfRY%YT;EbhNcENWI6ghkTmyD#kLaZ~NyD!iYHqj2bOlJiRbEgilayJE(E{RTw%TMY z%ZVAMh91aK=uKe3gV+#(kiLmc^&y}O(?I4l43h{Kh75@nAN%wl>Fb(|e|V-|r`tl!1~^k-{=f_F6Q2 z3)!VsM}GZQKLi`M1Ta7{7vK-7!b}S_Iq(zo#?n4+-pJ@~_J#VZ@O$Xc5APA2kdiux z?B(1*XloTwFGU}hnvfKoxCLwsb*GsieMWLO5l5SV|7R8#YGJpcjff- zWV@8h>DO1(#Ol8bXf1DT+zL!10FeM84q6xOILcc)i~a=BU(r6IVcud3fOTnb#Kh%< zZ89f;r}#_ASs(ENeaSN}K^ws$UpJ|^B7zfP&DQ0ESNyGw#*SVkh)sU|i-@~eAtp~v zN~p)_%aHt?TI@vIc+*2YM-Snb0%N5+y?;ObfF`oX!0GfZT39gu#=F^kw4$%EOIh?N ztRilnKT(jO`XYVz5dGd-1M@KTc}#w*YKNIc(MXZDoT4`gj)>F2tWoJB@q|c^flED}oIX@8S#TNI=G7l$$w@i*`!;Wljt? zkwGhpV5axe4Aon^1=43A(}83!0;B)r3wqdcsnP#;P?p;GfRKcWy-inzmoD0~dZ?90 zN{1F`6=hm6L#-gl`{WEFzuvvu25zLzyz3*rztzOLkFortm{$jWxOSDp|>^am<#xdm_7FGnLsyuX% z94U+(@lzQh65oUjVXXNycQk`w~D`o^61DuK)4*5?A6RnN%9L>zNVg#hEl&TM{6!&9li4KPk-mrpU ztOaO?6Xqf$YVn!j^j}g%p+lfRMDyhi@PBXXB;b`w3}Vx z;zYxd6I%ReA#@5cJUbXv%YEr7T|crBX|R0Zk+w!C^J4WY20XmzrQNH3*;=v_k2r(z zudkEK`o{I^SMR6SsZm|Okk4o)0v%C}I%QndXFS2p%*RAXh|*M0PTvwy33j6{L{0@g zF6bVWbON=Gz|_iN;|Q)#jxVx?PC(A9Uz-Ki!SupUHNPbXfZE=QEnR=az!ns@wuNhg zf8l!iFiDfv25)QA7^x_o9#V|3F_xoPY~1PlwflmosE$g%?hZ2#to;^O!=IC@#i>`|C}0%#4?hqC4j?72atA zskRT!&QH=QEDaUPU|TuouHD;DRlSR)Mzkeedx;LL%xWvql_d{GmPRp&T8A{5@*9=) zSK(}|5sTCO!9&7WEVd2#J#I&`&ixVysOAQ96j4}Wj|V?EF6_}Dqy7j!jzd>FhU{3+ z1sY!-wjO?6ymxDL6}gr|sZ9mjFbB&J@)2YrDS-dh56O$~Qu~q~pbayqutB-Lwl#fs zm*zUroz!~nSQ`m00^7^06tRQhhET| z$ERmu9ZBxqvHytOEeHlI%4^MV{HE82s zdJ3>UZ-!)Dcx#zNcU&~AwW^p*k*_JIiiR0GzNsH?g894iKOm)PK!`3jYNG)ct(V!)xaQ3 zYAU^>n{*2IUFWj)(VD?H=M~z;P`ixHxq(zLY#O05&98E1MuT=8>{Z7z*$ZlJFV2T) zOqQgC^Om;aS`|aa{{Oe%JTpD{fGRo zDa1G95VciREo-Z19?`YqVG2nw3fL&}F#^3#xzF@xwKY$J zSQzlV3^N{1KS<2}^wl~vGF;lf*Y+oLnke=3tBSW`G)rjUI9KruSgCmJ-z8@Lp8m?# zAN!d3g~(EAlgYftRFsb72aIC9__zZWL;_2XAz<)d9hceguRVI*kN5 z>y_ner=)I%A&zkg)$pfsaxwSyk$(H(_ww*S6Q&MIf4ulWq($Ml@H7^IO4I_O29qAl zJyBqs+|O~shSGxx%_{Bdu;~;Vt>R%^=R3&8<+3F!_NV~QLPiZZeDFqUvjtVwR(pS# z6YdE#nORh<6uKGtH-kD-18M>u_Cq5a9|T+kHe0EA=&}f^Hm+Z`1W~-bB*k*g#Mk_w z&Pkv!x#^hx^29{W2DdG3@8~I5qgjpP3yf=T@EOdp(tM>qzRb_7SqRpXKSEguXXe+^ zF*fzeE+91j_85GNegs;hfP%l+WadwWCai-%y~sy$Cm1p)&hL(jD=@PuwxO0EyU1zl zLpXl^&CPxd>d+ezT#bLf(W{M(kO%xBodB>bGt3x;c9RT_@Ab~7KiRM5?z}ch$jH}r zoxCU&Ji3W@wj`Q|NJ^jUI$*sNTcF)asiDMa1{5%p>VxJ2uinluDX~<-F`7H2eJwV& z{Y(vd2@Xe^1bW-+=-@@1cT(Xm7_yY)0bAkkE7Fo3{)o!?1H^i=R7y?f{aePm+OYnWvr`#^eI1LW0KUNuk(*+n+pAmZ27pz3irdf)^B4PyBKkBh;c0OtLuN zC(xq|1a?FK=y_P}wrBP8Mw`QGVjaANdPc zKi2)xL!W^?lb|Y*tsJq4KhOyut%xdejFf1I68%~c8Gyv&1b>3p=^aPF&MR4L7XF|+ zuilH;RP471JDB1o9+h9+I7M65&csyugfQMDpeVtdi&n-a=c5!f$@RB<$VkQMKl^jg z5oDf)1JHZ|>`IU8lf*UlhYXyM6i`K1x=HITdXne^@|V)(Lu4OkPFl{q0+;B4=rr)z z=RXtUaUwtvNqTXRAZol?54uA`-ZpihDO*)xf;R^bqpZDX(s?yrtP0ZMAX$=Sq%2j< zLmj%nF)JXSy@P~J25T596oN=PN2@LMUouB)Gy0LRi4>(yhIGtn@hTYWmyxMwhB1Y0 zX|1uW&)_v8DZEYfy){CEIB@um$B$-F7QK;}1-N7Jv?PUnqVJNjCqGTl#uQpwjWS00$D{dpZTWM44rPkN{ys#ME$&(zt zZaI%;r1w+O%K<2`a@}|m7{1&fJfazduBV#QnbaHejfQ``x_|Z0{*f?1X5r>3xF4rH zj1n~NeGwcU3Hkb0CGyWzHKRjjAPL5VZ%rI>s)iUTqPu>#jtA;u-}zoQYFTZYOB6EU zu(j*W;vt-6wujRb7!@3cp4Hon)f}8uT;Ha$*LwdvAyVx*G5>QfzM>WBt674Gy)UkuLsWLVpt9fPk40gkCjkCeKTv`r``@&?v>^b#^r81Tyq z{o&zZ=wn1hMh=?O4~X9DB(MU6?C>hLdy!i_lxmZ~P|Pf)yY<!<2V{^?d_t^5N0~qO|9GueKjOOl+E1O2B-UN8{G?yO&gz7ZV-U8(c`r^B zSHuWu!qpS3ci6@oAuvKgeodWVk(`Awvn4=gFB_APAC}_GIGAdVBxFl+i*9$N!Mnmz z;zrG1HL2w$C8-ffq}2KfL@yFJ>NmK+d#84;DM7IQ_-Hc&0X3J-P^O0+HLpx#3+s z6PlHNY_J`|CmT>TnX!e8ZiSovl^0*qNBP7{pSOCFWWzThEN_aJC|51MP(YQ`OXN}c z-h2qqeyJJrx|O3%J$gQ7(AUGSrPBGw2qLN@7{FVz2?~VHjSh#Mv>yJ3VrAUDekl*+ z9qrL1A4h*~nJgAHx2fCoJDVI_Z0V?-@Hsgz5Fs_{l2TYAIwg#Gf!ehNJi!)rWnKjq zCip;5k@Ria(2FQ;zYZYZD6YNSQNyBRCYN6;G)H+TH1jovc$V4uo*NMPdX>SAw zm9|GU$9d3-vCr5*Nhzw4$>E=-wauIg0z60g*K_0nx-0ea^s8Z;LM=iS!3G_@(1hWm zt(Z-*#ud<#AAY@aDdu-FO&SgB#77Ut4w~s>*l9o09lNZxo840~fT9aE{6#5Idd<+K zL<)KEc^+Ax6vn9TAfo4GGA|6IXh$`SxxN^Ev!E#f|JEQbG@sn)ALTu8>-5T|nuSzd z$c-{L0uhUbKgxidw7KMN2&@_aCMK&w1Z`#d6M~dF;#~$=azy%3)h~?O(w}cP)>aoX zl%h5OpP|~GmT1GdT(5Y?X;)~tn#r$g=bH}Ld1Z1tyFkj{|y`oXh1Jk zlEH0rKl4!^2*90Q8O+rIPc-*GC4iu07d$Wm$QIw$@mK9NKBj3Q9gJz z@(~?;eEsvr~WOOz zD-%a3+_Vh_)3;K<$jHW&_sfV9#wtN`gjhJ8zUdDN+`+4p@fXR{fv7kQ$0H__=ym9u zd>tHHI-0_O^l#Mj@f_7}#Edk{Jw^+gUYz47_r_MrG#KU$K|^(5TI9WwqtmCp37 z!NY6?h^>Y2maNvJTBn7WW9(eZx`pXm{vHTwL|c4}o;G<^qelROwv1q=V@3kF!v!(Q z-_M$_)y&7T7-C`Di_K;(Y@Y~aikQ79ev3xx%gd(YvgTxvSvM6L)Mmg@W!enQLqZ^u zLnxJvB+Es(=7h3?UQRwz-)UH;u$v-s>0=Rks_j7)#{V#6CimBCqmw8%&6`H+pcvG4 z+(+N-_3zrsh{6(L+C;;|(l}+qEI*tlUjswd(?a?s;lT@8oDb?q62U0_{0l%BdK%Va zmC<1TTbcDF|EE}RT!fwNMuHdVAYJi~Lu#Xp!hY71!|c>e+fT^n_=IDr6QS{h zn0J)U2xif&ntct(mN&7EEt)SQhfLp$4A9OocFf!3pf1C|L2CYw+RH*}Vm2{>`x&GI zI~f7;0wBrzg4G3D)pXdT+3z5FlOYpqH{lAx8=-iLF6j&VrF}m2sXx5@Kg<}2TSADu zxWY4m;*OH?7!EYRLCR+3IR``pGzKr~%Hg}Dn!oH0+a;OU$*c;2BJMDelA!i{>7?8Y zW5nww7{k0DomZr!r@ zJamRU&*#8^qQL+2TvD^0a06d>;*%m0!3j8ZFT<4$##FRQjJdgRQ%||IGq3-=_~KLA zWg|=R>};Ua=Q(`@j{ID0bow79{yTR;nB4Sz=JvW)OD}jj)hjUIwXe~!i27pxi%=@{ zi4nAoc8!t5W68&@wumx6j|TC%s?&C)DR2B~5O+3r9A=_EQ6Gc3ehZH0m!`osy=?s> zBNp;US`CE()dtn&Q|!>5t`{0wkXzc6B*5(IuX)^udLQGFbMlg80tQuRQ4CHa0kPqJ zsSgfmWbc=9F+=jx4Ywdwn6r1TzI@&ix~4SHY4%R*pip3!y@wz8Y+)Vs_sY%|SAQ!? z`r8PUI_DFkq)mL+-&#~0g6x+6xk6`VpkrYv_f!q6T}#D8`qnq^*d0Nud-FCMit=K= zZa}GPh9iNY4oP!Fq7|A`fa+JZu`5LpAFq5$FdJ^bbM;dmS&(T8@4r*4=Ex7AImL4A zi;TlcY6*eKSE94UXjrM%!!r=2QAueZ3p^*ERL{6^?}m7X4qr7(Lv}cJ4`*Gysjs%D z?_>mE4eF#qQSvU$;!E1rcvo}`j1aN8Wm|RC;nS+B1r!u7)hq>)?^tbU zxoajzPXO^c%~8`9t(*{38PwmaIn{vKQi$j;-x@Q~_9*ikKpwm};$kt)MO)wRXVWpI za51NS8JmmSsi_oZfP4A>{@ZG;O=BD(yp|XcjYag)qfhq(=740)47$$%ly~~>%sef~ zY^IbaQk7Y*NoP{5HE?8~hv|%d18iAaMbZeE+m%Uw!Hjy*gHi%GW`WZBBH|XXh)|B) z2ybPtAJH04StSC|o(yO3yVCa9{SN;Cy#?u=Die1O>tg z`nTiD={q{=ql=TMMvxv;ECQSIYo;(s2*usS6nd-$;QM|6>C2$x)@)FfWRJ{NZ30{e zZkv|7H0oPp64*Lb5ar8BNx=7mEXd~?Yd(Px{^kc@g5;~FZ<(^X`;oiKx@_+u3y*PhqB**#cNfffMW8^e(NWLt~=eQ z!reQF&iF6;HAuyz*};So=#)c0;=n+HqALgj@P7euc#`goIeeb0hajLuWTZ++x5h;x zJ<1-uQf&F6_k}4l|DCi-g)PUK+fygbPU|S+bl)f-O3?AIm+p~gz_Pp(@Xs(#!OYUR zpc5uzxu%#wv+j#Zp%p>*LR8Rk{1xOkH@i4@`q=(?i)3z=N;$&cVqi357*3k!Yiey! z;A$$iqIr;ize}qK9~Mo1&cPk}4{dl@&!UZNlApRisuE3(=BY3c`z>q&S>G@IfBwhUp{dJ-DCi{J09pWftUk~Bgl6_y7UD9*GM&96 zL%twjWU)_{6ah|A$`EAf3ocDD6-$CgQBE(IgDJJR*x*_)z$V1LPc8Buzo^h=k{cKC zR`udEooIZK=Wz97X;o3PWVi|YeA5ioy+8)&XB9w7dV~_Y530a#A}qsYuUYrYA^Y>W z;3s3Zq(&(ANeLw@c$%4_m{O06ohyg&(XRoBR*ym6Np#d}reHTaW1p#>uYk$^dRCsy zVxd^2I-&SdU9sKUWiinMA$&VGsz&-em-UW~UA5@e_UN-)dgrwGA5*(XG(vZ%A|0dp zgL_fym^@xQt{n?SbocIk;V^Xhglt~w$1 zjN}lT#fY*L=c8LxX=Xs}84!-a3cLu@QxA&{^pb#|xTi;QyjVpUE&N9u7j-?KBieB>xGTIv)`RkZ23e%++uEnt|gzozd< zPLr<6zeMB5+SCmx`itw=eC73)+EZV#f+(~oc2@r|?JN>i^j@ik8=8c&nQ3x>-CFeuqZah%o?_l*og= z6*R}9tTlr&3v|c)IAT00qd*2jxcI`4uI^v`$VUNI{e&h3z~6NVgM*-}KGq?IUu)JQ z^Wb`Ko#nm9ZukN>n)9oxe%&%a8j*!AB4x~pp0hLD=pq`J@Kp; z(TP@@tKsglFz~Lc__b!@!qpNN{|ZM91lPb z!;q3C4~`5WX?$ELw_zdqtS#arWk_U)r=@u?7-Bzp)zL|*mAYoF6**Y$DLis43EG*Y zwSn4B2Z}*Ns5WiC|1i#$FSL$A(#ik9W}ZaikYA6}Kt5|nT2q6EG6BCOK^z|X1l!Qc zT|F3s3}O7~9aProKlkoEwmsAs$w_l|27GL%%fuC^T|h0&}5SIPHt- z*lYSv$sM7AitI<8ROE22b}DBeijztY2{Kl_rs@GK+=&9W=|qeSFGaSCYYQgvdMiQn zm|ryu$ZZ>gGA>Y_sM4xORtf|&HX;NEa6wUd3pW}fA}vHEGS*gKi|hVjaw%%9M>T{1 zsJ?Hi5lIBvN{^B)#|AG5MV>8l6s1oqg0#z_D)Vx_4vqohvQ~e=hgpKY$G{cwaB9*B zTNkU3LWmIKKJ}Xg5PIHg0}&6Q6fMi5de8w1~;q5ocB%5G~+Rrq21kXT|#Q|5w zAzD_kXOtKy)fWf4H4GUAf{QEahhsRTN!cI>E_9pzf+>YQO_k5dX7Ry%U{5J$;q45@@!0%F(oBM;PSq$7T%6e66t5wBq)uy5*8wF`kH@>ywG^}*enuZGRlam zY8qi(9Rx*hiq3~1fQTR#i~3=TZ%8`AmGuIINnJ3+tYl{vvD~9hR9nHaE{2PYqT+zF zC4{_5Lmx(*O}+MF08X3N>RUn~eLR<}9`$uFiNVNowiLr}TLULg|*CaPP+BPwIPU>|;O-qm)9tv9^+!uk9ldX=X|Mw-RL0^dU{BW!zhFZqjm9U9RCHGneqJ*!^rr(Lue8ttm zn1DB3Q^Vwx;@zOIf3K@Yw@ST@+sHda%*%^G$O4kCA#^Znlwra(NP9r|ji_DG_Zy>u z<+q{%6Ut7JNnSs##p7;(k_o#mM*4{MSm_PsWknjtXDr`w1{7{1!F5>h5LaV*w7HKwm52#C zM<0$xf$y7q1Pesr=}%LDot~3IPfpsdNv_@LdvVGqQ6(1?#}bY7*yt=RfrgxT{SA69 zfHIF*O5Ii+v$0NzRQ5mN4l7pK3syZ5D6CEn6QYQTwsB*r+@r@O8PYUl zZ|{uM;HSqRwKR{;w3OfW`BAkW&<(e)bb%wejykmpA(hb;opPd&EQtJ2&Y@kZ zE&IJ_s>^2N(J)!vM++heXHDNc;B_fSG=D^2#>Ld1cKMKn4>=g5D#n{Xx-;VP=rRPL zC!))~8cW<_;AycK^~=|4Uwq7eq&30^C}b~!(g&8UCzfKXQph;{$zFz(FbQ7AW{SwM z9=L^(nmdS+n*4|C&ypG$pQY9JFeZ)-&mR21YhELW7(|jP4h9~cO+PH7Z6T0yRNP{l zE=5U{gx!FC=$V)7Mt31^;Od7m&U68kvZ|)Qx>2HjRCjn7O*%zVnr;Va&-xF(sef~o zGL{5S2CFs$znNC0+J@;!Fzl0tYB^GJe4{umZj}ENh1kXYVq)BQyn#JvPL-SEAG?zp zy;0nCph-Hu#E0Ej4JI|Y+mKK)fcMgfkK`jUd>LroT(+9fW-c!texU!Zk#_aFuD*X2 zlQ>arnRy0n*2=O%Dmdg(>1+kW&3wp;Av^b;fAabG8v(F1!v$z^6Algy!zApIxG`$` znSySI*nM&b>IBBt%Eq(d5v18(@gnzWuVpW9d)Rehk|;uJ;M0D3VLJ$vn($Oi=oKyP zmio-=IqG(UOwm~#P2aWGvr0rdI!>ZyD#SF0dht80>Hqxw|16w}ccyQ*P7*AuTrM&D z5`qQp$o2DzzlbFC0pKX%^z>#4$%yIAdeADS7AFR$>hb^mrH`9?86=B>G3(6V+Fsk- z+R<%YS@8$TY!gcWkoVjV?yD&U3~5rD+xmJDFsg|qns3gUA}GqJ{%b$?lNfHO3bh)C zQM|>gplIOeqJUKhUAiR-QV7wbwXdIr>voDT*FQUl8L;@m1W}V!ePjiwo$<>6uF(9G z7y92o(*&(A6;9KCQlJnuQkGaI^uISOp?u1*^;l?kM?@Jvx$|> z4e$!q*0eE<`w)`&bHCDyqmC*)k9jvg<-3|QlMp8Qqwp9`)RZ~l(g|P}>Wg$VuvMQd zx&D53S{hA%suw^;+X;;`)y6nmU&akqbi>Yh%7MdCjiae4vQeI_h|ZJVr3?(@T5Ly^ zshZb7U9CADlkaW)sG=&gifO8@BE*J3h58`}%m5?$n;agsq@olpx~w3D_4A)`NoPh` zljz!M^fw~1cjmWtr|%}93aE9SWVpT`l1~o*oK3(u-4NK==*Z4A?RM6haTxOaKhP>D zFPi?eUM}?}*57#mtx^5cR`n7gLAcL8gt=k6ot_hSzDsUUkHqWjXCIHij9HxW&@WDW zvdaM9NHp$dwhcpte+Sr`kC6jU1EA)}u)h|-D0odP6{oI@Tsa562v{TuJqC7L#2~U( z!BZ`5jyk;vTWu~a`FM0STBT#$~+AxvM$( zCWnJPle!!I+rvuMw@IiK(|vYn7qUVb@XvR>#qC=glEtmVVW87H^!4?U(aHkK`E%Xc zbRWk1MbT}7-TP4GCLPAPu&2{#(nW-RMqu3IQ8pqv?H^$EMT+u;mp#BXqegOPQ<5#BnYTHg46-l zIE=8yQQO}W1`?efFjDqVqGhe)U>f{cva?$VlJH_ks%Q*euC}8%Wzk=Mn z#+Qn6wpuNB^{+*Caz=<=(|T>#HV?v%G+V<)H%(|r;3L6Z#RaQcklE7yi00T0Q;08I z(677{(M5CpxV(foBS72hL?LvH>QGA6T7?RK2p_$O_3=n)`#tLX9>R4Aa18Mr{Tfzd z2vecLJuYtD&3z`aIhx7V3M<5;xK6W;%*oTExr2T^|!9SV;ZkyTO~h|izht~U+Nbz zO)0d~7{|136a0nj_GItaXG#1&pE)P$-cDQgzrqxuwxL8R{GvuX9h?2>yPcdRgDk6U z({~SO=;LMDf+(IBW={n>u&A)mA`IV-3nOiJn)l!YGgSA~gulbGeVlSWQkYJEqrO=B z%yx+~NiqQk2txNsITx=)w(OVvBn2P!DMjgZK8*7nWw<9MI+M>fPes<0PFP$kZ~mEH zc#Ka!|B@ateXDjw-)Z&f^_!|zvFl$yV!m)sAP*9o;FF)U(@!fMyIg%o%n*#pdgc0M zZ*>D#B#|C^oVt0-I;2>aaXvXMgdSxvO|)I3UGM} zUcx%&b8(3&vJutrG4|;otshTX4DG8{0ds$5^1iXKpuMSjw3lsSOVqg5#&Oscx)ujt zG1zMqQhC!fvnr`hE*=eLTscEpGY=hn)eJwi3Ti->_(H_Cnwz(_b*Lz!b{cPU=sQ}d zRA2#}BWtY)n-9X%AF3M_HTp7DQln5zg0f^FYAy;Dy+i=Kt7(h%jt}Sum$}7fP>YFC zM!g5&74t$Rn>m(IDclO&O|cNyF~W4%kWkVr+Md1}QifRlaV8e%4Go`G1N`#5|KQc^ z2ys&(wzN&7oDw%7TkoJ|Kx}dGQ<<(SSX~1BNa-k>eBnz^S&i)SN>o&Ud+Cg+C6bxR zI2F$#F((H5(oes{LTBPZ>Id(+0q7j?R2aQ zk|`0D)QEEu;hV0l&U-K9aKnAV{TeMX86d;~{tNMNFK4cIw3lG+Et(a?P2gFzt~d5Z zTvWXJ$M?i$5ARbP;1{exSpRqvSJU!`{9hLgkvh2O3*4+y4+$p#arWn%2p2Cx)1zg8 zS-xY>De|f_oDi%`&4j!gWrIK_G!bqPJ;tqYniQDR1+RX|MkURn&i{ZA6k_YnkVXrC&|g>s{v+bj zp}eX8Ud){KCcz|s3L((enYADMzz>9g_)PwXVy{=f_l@KbPQvX*I0G)sYRuKSbQE^b z&VDX$2)9TKP;ea(xU}W)UEHX?+|63-CEvOZZYnGs9rr|CMI=&4?jk|GC}JM{&>oUq zkEo|6om$7y+w)6VGLHnIUvTPspasM+PBUhQD57){tP^eHya!qgWjJ#IByGPK#jTj+ z1FrBSi3C!6|1?40ayP;Pp}RcXWjdaZHKj${fObbwjnJ5DG#5qsgF-2}4Ao~;)ZL89 zDqBa|SX^c%Cw-vh$smWteH)qpxbgGMsmhz6v5M5~;HK-ns~Zc8&*}#yU(;CGRU{te zS32ma1!8=bo*cD^@wr|E_t!S>_+3Kb_u7G#B34k7-$cGO+9b3=pplx&V~J5f$*Vx8#K}Mi(~??lMVP-Ew*h;+cxL9Xn^hYh zpoR6#6`0!(Yv6QbfQinf;CF@hil~r`{#(h@L$})F_%4d~!Jl#2Py`|fv4;p878iGI zacReVxkFuatdR2^ls@ zFe8OPqOrRXsuK!a1RLvnbOeZO3AoNQX(|4LnKCX1=BH#0+YE6OXVB@TKrLBeyVb1` zccNXjzh-U5U82Icoh^Jftyk@GOxxv|93To8Y>(os1pxCT9`hzk%6gY_r&OHk(=^N&!Icww1nHT{op9gEs1AqK*j1PQb{T67g>#hFsyl+{wlr-Q{dijwT)xWJqv zZm8JS10eX9VmLWJ;HP>k;|c9{iMu92NU0xl`yw4j`iSeJqcpRh4YO8j2h&5pTo4*C zotN#!9UT%5VbRjR8ksiEaqBXBS8ulxMngOKQ6@gHQEMX+p%@h|u5^Qt3Mi30_p-Lo zlf7)0*CwPL(b&1(KK(pOZ@z@^X#YKoabM7ZYUs7|C!YG4X}DVo^~Hrc74<$(f0s&v z0_MJOqF{knd&H5v{x zj;>0cevZlsBcwER;|hh?L-a8CI8Q||zC@1)*cp7db_&C$irkj_;w?_!yz5IkH#y2S z!!&Iab_xAzRvT;nrD9y#%`H^2GGyjTEMG)oKf9W(*Hl2CSy!Sb4pE-H)VZ)falt-* zL+e751x%?&9kTa;GfH|-uM%M-Cbd|k86CCt)AUV@p|J0zXywqH`vtLJA#r1bh9j254K6X}& z&S|3-$p^%E4WA!ggc5k-u_s0S8#i9?0b?*l1BI|_vTZ9aT)a>5ujHXaP;a%4jN3-% zo=7z|G=0Vtgs`;^Z@qPZO_u?_X0+nSNAx zcTgNYh;A8}IPUGq*wQ;_kWST3CKD8%Cc)(!B8Qw>vI6i3wfl2FXQAYSKH~qeF2H6d zE+6-o#0*d`Zvq1><@@Fl)5y48x#8tvFk8fjahBMrn9e>-O$C6ui31>jTGv0hj=E9y!t&n{)?P zi6qf=H_w_VM`bC_kYU8>WNSW^8Ek#%vdu&*YNO-stcxMq6Iq&h z(h@r4kzxkImg-v+tz+q`oV@cF6&~H|ciweOR(W{5(FgNMIrz z?m0lTCNhp>W*VBq6xYBH5Mep1Yj76g>n*@F=_@QyTRj|DVJhHqIeizAA~2@;hi-J{ zWYVbcZa;-GiH8zyHq#S8FYWc9SCL8;6#eY#vnj3@9naQfYEJ`qz!b++4TAaCsOyMW z7NJ1zkI-33AaL>jo*GugL3&RFgo zp|C^-syj_>pTAW)D|wsb*hZPTlPQ28io}1U>t9g>{pz>=DD6}0n5b>SAwEuW%YuwRQRg%5l%FtIKNH$S75B<^YpSQg1?`Q$J{H3TF;J=?b| ziSA%-SC7qrUi}HnSh2>^$QnK+o$G}c@P3|;I_dH9Sa%qv-@D3=*U%tlb$g}fwJ{G% z3Pm%a6K$Pb2WWS9SN!nAje9<~C9F;pk(_$9&Kvz&^0-K&`BbM=gtQL*@g^Po!^mWe z2p+c{JAEr4Pp}K+fsi3B`f%9m0tG1btjG1o5jgw&OWqx| z<@#w=f!*Np0&Rge_EOZ1&obb7*dGz1O5H*^8J?ShoO&=ry?kB`61|Ggn?)2D2*6DU z!_GmtAXp(~p^Q_?ycniHD-++lK;D85U5>F87!89m(^CL@5dL6c@czD&@hXr0*MH%gNSuS4l%i3< z7#}AJVGk>Pvc^M|msjE_hyo3|iLQ;D zA)&@mWE1C!=!-Rvh&r1>i6}`UO0HNyFT9?$)r~KB$x90J>+5v9Omk+L%r!$MOI||# z+x*#EM9AWV%;LD$Fdq?$A*N>-2HGB#ld2ErzO{2574ZLx95b@gIl zl2JST))HC_Kw!2YRe0eyGFEsaLO(fO-A4#+_=SPT!M2SvxOeaAG*CdhapC+S`ZY_3 zoaUQx+y*mEqzU15)(n0&;Dye|%pqVz%Z2Y=}+qOX#T z{uHW57QP;)NvlX_Z#`?J^53X=MYzf|wYkmBHLv|2CE^rXYP#R%p_}ceq6Ha zztmfV>S-37De<_(UJ~^m23UpcH@DP#BX^jd)Ctw*1=%(mEqYAgFH&lM{6xZgPbgU; zoUgTkV#OLU$rea)^-}{x5!A00U%Yx(3iH|9`u|5~5?lSljKepXx~<<2zO41r#cP-i zpx3Amt;iJM@0`0nq6A>vCz90Pl2zZhl3i%bbxT6fTQ=-a+~$ND8;7G66Jb z(@Y=LR60Ds;NjxP*(MC7UkyPY#|d(YD8jX*B6!EVWaTjQnWd%0Z9i&xpJZ6t@5&TL z1Vy`N4z_jz*H*$WM$FGVt@${r?i2G=+3<0FbGPF2p7_|u^oyroih(syIqJJi3kJXv ze>d+f%S7&nwt#E$LQ|b4O(WMlS!^`%%E2KPYhKZvt1!74$CJvM&S8V}g$7vMAhOMR z^I=?LJ686Ne24-lTI*3SaKCI@B3Iv`d-7odIEyxA+`UQWI2ECNvB#gisLvy4OJl3x ze<-BKHhWQ!KorFro-YF)ycr~Jb={OHr_+JSykNYx&Xi(46}y^#O8N97y{R^eC~G~d zJ6wu(n*Me-ZfcS7k=XO^#gfmW%wK z_x*@n{Ot)dARPtof_YK^jqRI%1^E~AJwL02=Dd=n=T#f0Pp-~4c1WiAOI_k_-Y(*m zkd&S?sH8m%e&fI4-97Xy{W& zxl%ANYOYzwOyeB_`7?>>G7GA?0u-9SCf`kVIy}P9EB67#!RdN=S8vqbQ!CupaBJv& zdCF|kZ1vZQ(Po)F#WBtz5SVUg^qmZd-yFso|NQai#ZCYD+Z#{mzo?oJ5iF^o(#XzD@9Q;pFR-=r z1BW!oxTAsz0RyLnfR1rB4fA;|ah54zRy%h4B+qI6A}Zk|62`p%f2c&=oYkjs82Kdq z@%C!do53E4wEs$ z);rH?*RA4UDU?vWyT%clE^aSfs zSLbiBpQvE9+CcB4iaF~{>P&w+8qaoQ-knhpYhSy#{^s@fD$IL00R8Ugl@-e9NL5ZW zJwi9%GM&M0W-3&;4OMcOE(@zok#(W*MQ=%--SsP8xbjLGAS?EdzKQ<*HYgM>W13_D z0#S|m01A;x^U8(Id0j2SG=UvMYG#e_>MbwqjDUivzu#)ppDu3N^k>6aT{DgF(so0# zF+I;KZzMVY1+q(y6QQFMBrwl*dN zwdE&TQ5hInT_Ir1@*E^$4!#`UB^%?fEFHBnw1<|4=JCvbMPTX!w>IXbNlo0j*8cJS zaM$V?(M}41a7@K~E>GXBCAFPNJGE{gd-Ifc^!d1ic(U3pPIhEhy>k>-?Qc86H0`w# z^k1ifi`ACP(ME*cJ-{J0Jf7u0gs>zBW4CNyRKdliVB9UZ>FY>`G2A0@gGw%b#X^!R zfI^sfTL4jmd8_)M4y`0g@<_mAcMkKCLK2X#6%w&jv%b%i-d)ol$Dy^+fSInP;~PLu zlbCW3HFfpjz$f{Z#b;NWJ5Ar5h^SFo96@73s$^QYI0Ms^Ke9Du|nz#5hVOEr^Xj5$cu;n_&?$pRB`ef#HOe!N9{28P!p) zWZ1XCdF?V>9c>-99jv0>>|PT=2@oT?1rxRDNoH3qGbWoshnElD&?m^3SB+3GOIqag z{y3xX%_Z|uap&2T-y#pZ`h3km_KK}P_qosc{McG6ym&UFE`h-ktu|FJ^|}#EtldSj zzFP0^-z{}`djReHx)T6Ebqw2}cUe=cq;7_BNr<;Zw)eRI;0=EXY4FwSS1c%PmZRng z_u-GKgUS286zhT`5qSis5-7!kGF0sn5SA6Asxg?(;r*^)$mPIn#p(p z#7n{#CoR+BIAb3wD0Ka*_9VwHLsJ_54NKPE`Ut*AjMO;~SO2VlsCP)|ehKpI>@(Z2 z7oiqbzuXG6nT++k)r55ejDPgtRqc#^$h;HB+Uuhd4?DNtF=MO^NiGrkNnbffsOB{< z-9i92`tyr-d-Qw4Z<&N*~0 z)6+slDAM=df8*(F5&)Xen-S!QsGyJsu%KSPioQ$|0a!|rcQTq(krxAuEj~*Vinw=> zyL!fl>0M5ab7T}Riq3X^c!NkskoAuH=z%KKiB5mQAG{UHX~wlh2n!}que%%w#7xs^ zYr4`V5`Y#$HKk$XsN^wD7?7hU%QHnDWUVWa3p#oaV6CZtE$IRK24EAa;Hq^E)r5$yS1zG}{Bq1OuC>4WI zKU?u%t#0YurzD1N@yak+^YkJoyWs31_dBF2O}X}vLh9Hb-3XE%LSMdo{aw>TLQJIR z=32k3*ZG)>U0D@+mMkgZne;47i)8F8HPd|0FQau!#qmTsx?beOF2DrOZu#Jz!x8oi z*Gi~_k%zPN8M!u$qYP1FK9Zk6ONp$F=8l+Q3k#q`XQ>tmQx4MXXLJP|1Ol%1GHejM z@r#=n%5gf6eMax+-f^8x8?v#d0NB##Qyrc+fi-zvX0x-2Zoga5F4H%`6ct$4cf;C$6k4P z^NadipSsoJ7(D#`{&yr^tVh@{orOvi z<1V{5?KW6LjqtW2UgYF+99G>!{pEO)btu~h1d&C9p>V0fhAK08`+o3;mCd2q-ila z{)?s+eOmi5IIa(zO8;zjBiv3FD4li~_9mvemw3l^KEGw0VVY|{VameLUJaPkw7E(e z4mgG%w4->wxmsE5QU4?^L^z{E(iLslsw`@1GJ2@zKG%OX1oK~&<>VejMh zW|hh!L(QjI-Q{8ncX4TevE)2OWx^w3t39=0@qv#fNzBVK!Ve1LtJo`@qa#LNc}f~d zr&{E3$jFjYB)3F7{t=suUac82k8pW!n_&DXY~aU56}G89IW zmhfGqGOwd6IIITeM>hoG7I6uQiezyaEgn_pWeL(a+CvSV?#E5;?&faiS{eKatLf{t z1mRDge%@y*W6Dco32FtL?hEN*eP%u4B@qdGg1+N7eAA@OF=qV!2&M}KBu*lcp2dw} zf@K_~#4GfPCL{IyQdpCj8`rPj{ zESz&^KAS42`T<0YU-uj|^XpU%OZMM%itGq3a8{ZrJ&bVCYY#!n_)^_sB5Vwo*uqD} zYQC6<0kMVr=ObWG`6jRqDQ!a-M&1`Io*a;Fz-1}EW2w}2V8CR^T`$FK_8RwTEJf;D zB%J<4MRg`22;CW&6c03@QgaJ>@L4bNdo(-qqPAok`FS$SX2oypxpaRBBHvoCp}FY? zfU0sMSOF>3X{^rdbSZk#ML1ZK6&;_~h(o8-egz85Q|_dNV8}2oZ5(`YfzLTSmzTAa zX^icxyBsr*!wVnN_BV|eu$DOU{=+%F^#6(U71_Kyle?d))< z8dh@$XE(Ew7U9p#(dgr0to-NOH=atp9#CAeTSe28MrsAnuDM8hGjGFUH*7&)%^h`; zb$VZs7d`0~zF3AyPyy#*3I-n~rnoVg{&NpFbfV4sKwmC^zwwG)&9qxQqhZtBFggcwT6UvIDono3&^_joDKoU}M1n-m&grZ ze^RM4^R1OE)tx-bMh+6Us=iJ6O2(#k{p_Mw=pI<6w>2+99?-d<g{NNQWS>0~E z7?lr-O+bSbWaGG-O!)=8io7@-ihxo@KG(|nAII+mi+jy9BPq>6>u97ut2V*e=-8## ztx;z$q|q-eH;|cU31?xvnHn~Hw;HIoMx>ovS>6B(Q+l83x72NAQ|tjyH~_ic844_v z63R<@#s@dQ96bkoX~C+y^|Dri4JwpgpyS})$3B$|gP`WPYYisD0?d2N-t@j91;I(L zYgvtb@hP;DTPK2)ra~X%ERHkt$6m6w16He;bW$eZ9lTzl1AznReU0cAW0LLA1n|Dj z5?)SUYYkM3px|E8Ttw9+03ZG!V^sa_!|&<1@7;UxrRV)Q#zAvrejm6l_50S!@quJ& z>m?W1Nx>z@pupmZAEYBojaOuuC_IxFujSoF0b@^A9fa!C)6sDrb>kPW01hbSI+wk~ zX^an*((opw%7d5alnqOJiHh+O+3`at8-c!B(}P#Dh-bYt4xj$GHV1$B59bj&QxS4L zhrr<(-m$*AR;yhbT}&}juG%e*Qzs}dW1zyOLF-32D*U~06JQxVq<fpl78ASl&EPKJ|7d$Yi!J5p{Bx{9TfTc5 zp;K-!oKwODiFzq(wK9-AIR}1KKf*R7@#VXf)dEI`~q}S&v}oEF~p{p7B*G_ zD)jUah=c5RcK!Edt}cxaJNy4Hdw1d|M|s%!|Hm_9vv8v3Xou=(eXqh*#Z+$kO&QSHhH>x zx~B&Xp*Ne&ve|6*_j$fg^^pDj4Vl;LT{YEDKUMXdbyss78mcUpFm>0&6*qilu$Oc- zXRvNub$a;P&y*c3Pr{HBH;3~9<%+%M2A)|6o$WjI{Aa-n0pf3Mki;u^0=Qgfe%*O+ z|801K6Tirml`{za(_p{ldOd0x)}LOH{oC>@?7_x&TQkpa@qdsJ*K)RUa2jM>d;Z~M zFsDN(qQNyw-at`Sn~zN$x%Il0gNt(4u&w0WfI4?(8Or-TykU2+$>Aq22lupGb?xB# zTX;8JEP1*|p1v#_)Wz(!ULT)o2>l_~P0N5H3tP1^5uf@br_EH)um8ltwIuGPV@EA~ zdNpUD23I?|a=;az!CA`k_i)uzcsX3i9rEGUm4n@Z554{-gIiYUTUT9K3+du$O*eqN zBMEWXioE`FAYuDK|1h{3!%b6qtbkUee&-f;qQc`Jakq~19ii&J<*mP^)epbD84CR3 zYr<@=tccIf zg`Q96)WYRb4z0_=;9e76G!<$}$nN)FdGWgjy9c+i)*Mg_`U9Q`9PIUSX7Ps4(Y=*< z$^Fed(qHzht9U$|z0-wqJmvS;0^BgQx14=>#H@B0emh;=At%sRhor0ycUtQc z-f1zYl~d~m`*ys{e4)+^_9JL`bO~ifUCPPo@*D5IWw3Z|31t^Ew5pqK8(ibOL>E7< zdiNC{D634JA7%OE!mU4a2|T@7u0D5akyl~1+{8yiEMe zJKtC)@00P(RN)m!G_W%HAL4vrIrDHfoBOsxy?Y;}tnB8^IUgS+ic4x-kO{%u`nd1} z2Tyk0rc+K!-k6l-e?$0DmGUtJy0!44?f>T&F0SBg`s<8aqvJtiCMUlju$@-&I9UHp>`3>NC(A2tHdZwUFtCT37k zE?YIIzPIpmAcHf6oIVW+4(~c0>=@s~ErGYJEIYlhanw5#VigQ~`emh^dWb92%zkj_ zql0q3yYQPXHIAJZesJU}e$pg7bsgVyal^GA8|=?>&5~21uit{p(^TQeFRT>%N7oMC zinws3I%rzu1?oZSE?)Q{gX?b`G=nR7{P4Z+FISg4+;|c+zMXmXwVxXNyb5>iQ>(*U zGPLD->lZF6S1jJ}&m9TMDUM&cC27!hx!3K+!Ntw1=r1`<7czU*O}7me&6P;*F%Mg* z_g__3ns;$ea*-`4BiN$^AhE}b)sFlO(M!pik}nEc8#?f+R2%8}qP29t*aV92Y0DBT2Eq9AJ3gxxd z6jwOYzfh0(LK%N>!^$9CtGL1%(#qA3>y#zD3v{JExh-6BEweQ~2eNPoLZ&UesWIHd z@cM6~TzlJUu91Wpl8+zxNLe2)z2xeUBb-kT4=R`Q;ej|C|6i3+BC?l^~ z*v+Ro4~m~%WUg_R^Praww>s$^K`ih(YQ*5QXV{T?em#5dy2&#%bn$?Nz`C*aCcE1VeO z2eEh@oZ=`~RX>=?ZxNM~ynQn~12Wj{S+;VeUc?l(6${;g4*j@!DCFYR;Z}}8afByH zLl+jxzIZZT0}HR72-BbBs5C^lbVazgrmO+H1Mg#MS5zGCv!kB~m2X8|)trpG=C&|> zde`9X_iWApjV9-Cm>$bte8?=7T6NcG*{tJUAlHYsVU>kk4 zj=Kh}lv6$2HFmw;56krw?OD|LIVQcd^4;$#YYjI(4yp>*YQu9BVeYpr{KC-{;r+Nb zmi7MTaP%5>UP!906vW+8jG3@sYD|D=<$@m8De+eOUQFr%zu$Cr%Nte;H=MgJy$@RqRq9r{4;pD7nKp z-pICc<=}8AY>7hq3$L8HX_f9pp*Gz%*e29i#KN97do_bI58MIFdn~x}qQkQHT~gM| z8r{(?tHPmrxr-2PkKsz{_3R>){oW<7zb}i9d*N-#+@V-kPF-;|zZOs?kT)aTFt~&4 z((sjsyoc*#M1aAuiPEtEl7%{~*};6~S3Rw=ej{D?RGb=d7J*M^_<;5=|yK`*`Him{0Qu=gDy98;MzUsBru!jGPdr18f<-I^;-BrI2|nTDnd^E43>2x7mak-!+TM< z?WbHz`r`1Qz{WC5!b`q5Jy-^E_;z{tC9ZN(m%P~#DkldAE+M-($E4q4EYtF_sa;`) z^q7-uG`Wmht^~Kk^F&&?tlA157P)bCNc0Mx-4Eq<4)c9;VN6p)AaiRa-G=(;vKv3+yXGKYppstk4eh#8kN!Y!z-8+{$+2b?WTi zbHhUYSjBs2c)V+HrtPxp$_79+VNaK%v-pD0aKC!I1t(k(r?>{k&0HJ^Il;c$-JwSE z=GHRvSKX%lXYPF;oaN+1bs2W?!do)A#5OplyZovU8h&;7#z7W5f#&16^baC;ADU*d@~ zwrF9OKctXH9tW#MEg#hWaN2pHpIO1dNO{DlGeZCWC(eqU#lcW`dP`B66&GFfj`F|j z7T$4NY@_!(R@P6>QifBWWy;=7DTMFy;v8D}pbB4g+2t2srQ56MEZ=wI;Na=gr*~cZ z$?)y)+Pj69&|kuiz@WIVxxCEY@H#+kta;FHa5#D~hnQip;WiK+5GflE<HG0 zJi&2;5^gNwcE-V-Jz)m%r`R{JozXq0p$2~VtrwNq!s>^glL$G^TSFEe5@JQTNvX`w zH&ScLgmGq^8&}Ip!Qm-i;#?31r{(PEH9yPlPZ^O{nyK@;_}0tlZS`jO&~idY#s#Zz z9~3dmIkPcgA0?~@zo&7tc8+-^3_n0dzx@C181dTga%rF9Ci;(lPVJd{SZ?5}gWC-_ zleh5n!~6b#Gs=2F0Ve_n*NtlLx^hseTz0#@TmnZe+*lEMuuz-XY0&$M!WF57UCnT} z;^6uPD;ipSSvRk}X|PLAx3%(PI^3vTby=AgJiw!!F7^-N8yu;H%dRY=a)B)r%jXuh zn4Du;czXdS{XRa}4%SwDmPa0Srh_*{#eG#~!~*rok#P52nK!rc;2tk*2n&eMt%t1T z?vsT};q_ZSee=|#A)Vm@(n5#J`h`tVxz?N%9PH=a#O?D7hkzGzB5dKQhl_9GER5EP zRe9r|4{BB@x%h^l8`(DvD-zEjg#Xxief?!UY`ZuYx3JF|t|w{BeMvmZ<2@vU3U$e~ zw+(Kty@V(s&))LZKMph0TPhwe*ShxlTg@)N@m78wDLjA}zQy|89Lw}wT!>h>_|NX` zM+bLz>K-BPTo`POdBqUNAYn<|#}r1)IoPlv%L!{C>~;H_b>Bn*Br z460%9TVZfn7+f7TbH(xS?}x+Sm&4%FFnD7aT&&rtf%`kXd7`$I0!{Ogc!r&@R zD<6M5O#fUMyeSO+EDWv)gJ02d9}oY&NB>TSe}63uel-l1h7YG63jbai2FvuNBjMjP z41PTf-WvwL5eC;N_+a?=clGblFnoR_eDKF%@R2akD$Mogli}k((g*uA%=DE}RbIO(gnq9+ zxjPI`>xZCEPKN&rt@g=><2SeJOKTMHMGYU&@ctOQUBP4eNdFg&@c-0a4RRHcJoKg0vFu_Kh}kd{zWS_&Q>qUqRgR}sBB%8EsSV*fr}g>S$KsMz{ib8; ztUlVTdUQJ0i!bVrph30iCE>A^tQfWUOLlxnqYTb0KrYeDxn)OR-;~JNJ`q~%PjT+beftEbpgD%Y>Y;C&%IQ!2lyIPYEYtB)w5 zs=;Sfzn@m?ZjWej#Y4&zjkSJ@l5tkAzMQ>Z%Q&fZn;O<)Z&%}g z^{8s)*Hr7ymX%yHn^x&h>7k_7urltX3S~+i!YOSAPV4{Es)Q$xX}+7(M?9x>Y}r`z z?p4UJQazUQ!jARtiq(ewUUl6VlM!H!FXi z-lU0jsGb@g}97yb-mN7Gta1fHb-J!$BLlroteqd8Ktzz8g?oyuX zqxSHFVdAXn*3)rCkHo$(cIBh$|NMBag^$G4ZBg^=JgO`^sa%{^R?7X_f z?tQ!CdfyjD%?Y*8^U8(JW9skPHuf{h^!D(b;d|XJ%D^e*eS1U+Jg-)NF78mMG|$F< zK!tfS)^;_^uPggb#`=B#qnhFN*oxL{4pV+YE&BN#A@R>X89!2KKOeJYNc$Vz%}RE2 zMAf&mP2K2ORk*fQaeh@PosONRitg<8Ft?UfPPK>M3mt&hLmhqslY8DRwM6`P3O#p1yi?UHFDSswyzIIaY`vP0dWG z)}B?P?>wpv{#Udv-R-I!=d{wV9xdCaD&^P9J&lbqzhcpkt4REWD&5nuf(@xUoL3c{ znTV~*#=de?T~G6H>$eE*MCTr&HVRclepU`rLR9ya$7WmiNT>0_1 zE_)Qz$<=k7e;|Clw@b@y52-_YMv+ddp-q)#rPXT>YlY5k3vrqws$Vk`%D?tdoWroT zL}yjII}_>*XYW>Kw$`ZPpVpSHJ*3XPJ+vpxsb=j!q$$-c zt2y{_g`8G4x2%f%*;rHKu0UL^n4xW}irO6673O&=)|PSg9|3|18)y&5%v29lsN#M_OPZNb>i^Uu{wW-`%DFv<3m|o@E?6}sbdAMx; zT9ubn#_b_hF~3_`QK*k@9*sdGs>L%CO2EuvEx7ZPx}5fin#{~$RsPPS%GP#lGk$mM zs1B*OZI8sA`gK~#SC1+YFU6*yzNC3H?sAQ&?1!}dc`;`H4sFc3W9reG$JFe)_m$fg z?J_+bJBJY^?9~Z1`e(}4tf0nM^s<@!8}y^X(gq# zMTZ=n2b9eAh_+}?#~lGxhS}o^nVHnrn-^P{G z?ICUAPsYk{MD?USRIa*qBBnIE&TeJM>~8grt*zQJoKz*9nT*@I!?7pXqjKtuD{X$S zs&jipMSM;<(^;p_=I&4r_M-Z}xjU4c&IaYapHQ7^Sz` zswp!^HiW6x7M1gCLrtM~hY~t-LU}ebsde=esw3xBR86bK;>R`B8d6X7Y?=G2m%Tkh zVaCl9`i9@Hy3(A~u7KYio4A_$Gcj*BXsue?)gSnST7mf;D%J~kD%IWC$-bn@|EwC* z*R=9}Tve|Uw(3UJQe zAExF;;t}E+l}~Hyqv6BWW)(_%NFO#Q)G)e_sfM3dpEISc$KPm%{;q_=8GKZe52?pJr&Ld?rF7#C>N7fqeNMCOG*m8rBDR#Z+CO?m z)$qa|CHCbQ)O}ntpWUR&-dnHDO7mDFq;vK$6?JQi8p-TB9p{|Y#^t1v=MTgNJEG5~ z)m!;PvA%82RI=+e)was2_Q)E|H*WAc>*Ho0OK}ayV-Issm2rNv7S`EzTq`oF?x8yxb8JV6ET0SI@-Bynrg;cG`md) zq-Pb>9@eOHL@A!ztX1xgY2oeIY@3s+buZ_d^UM+L^?qG*ys%3PZI5V8=I&6FoH?Rw z>ufCZ{&8)oV^xU9G3_DsYMt#$@F}%!zrXC6^~ITEsu**7)ho2Dy0g|+WH;2526wF(syR zK&98UvGfkdtR7YyX~%=|nPbXGe^7g;-%wD;9t}(BY*bBajwnZbY^8Hs)n)iYN?U7- z*4*z`5@%zt+5573TYpfwFh8o5{HkWsT@$lwyBdCXtFojyS+0T(FU~0^+ry73p%W_f zPD35+{Fk&|GbhydPALzXlPdb=QLVw;4lSlVteMOlQ&Dv%lo_+Jo%#uNx6`VN-I2B7 zYHNQ-Y$JY8oM+t4X-@7?Hsq>*-MA^auu&OxR@I}mN$b@fiE9~4ziri%`#q`)?RYl( zl-Asj$LyNW8OFa=68*R;#Qe@iSXg({i7@i9SbdJV$8aI&bk#Yl~Z_e&j_I1aUp6=tZ(KpoZ_+x6QenOk$ zrEOfwa;`)Tfxp#nn&}Pk~RB)raEh5Gs%>Y*-e`N?AF)=jwxe%`&6ep z$F;KUVKs?vu1sv&m^t?;bNrsiL*_IOs}gnA>Hz(gtU;KnV>}HyCXEc`CVURkA+tJ zLTq_jW@}A6C{m;g>o;jBo3#3^SRCCATC3hVC8l>mUzv-ot9L*J*IK7sn|n}=zIp6$ zm`nF=<>>qxRkqgcN^pB9=EVtZ@SfEy=N?tJ`GWQky4zyT?^A7gIi8oA8&T5yF&(d; zR`L0R>fSnWA8B^GYIf%ytwDQ8EulTE4OlPNO3v+x3Eryu)E-hZpH{T)cJ(x^(X}!8 zv4BT4li5AW>)Cb6h4!#As&i-T&^D?=m|vrw;4AuKYqM(M{1&C7IiglIdpxf5xYoJ% zSS+daajCIaxv)_y`|=Yi%-#tdGoMq%o4sGlo?WMWZ647^VQ#ZpUhhs--{uizwm+z? zd7^Id_M0yAw;UyD4T=rgfctRQ$ zlM?IqXwL1CvO=mGx-hH?=8viN%&yUk-9r} zg(<&BrO`YVSM|8kG&8v+gv}mPPwDrojkVUP_+HtnruGf3@Z6YcP}9bxjcSEkqssG_ z4?Us<98;b*ZEQMQ)P0^)Q0qQrR_C5$;ftMIGnqTC1a=y+m5=MN@ub$Sw@-DUvsRs3 zEAH$!M`DjLqJ{c>Dn-9vMbsWrQrqR`{IF_`-yc7W=il1H+Fd)R9BLlbH`*hrl%2J) z@*mL&jb{}!f2Z2k?4+i;n__c1rVQ%br9HEm35|NWva7W@7TZYKmes~)Hz}zzC)Bb# zkH*TmRqN1wQmLDLM5i^LQF&Z=QVDNPDo4tYOFgEN@CVg7&fk}Zyqw<=6EdW) z%#SJiTAO0Bp3;YNaTmL3_05+LsYmGSSAx3R)%rX8RbiS3W4|>Nm#|Ywm^-fW^iQo1 z%j<4beLtnNcGkvu#e$#Dlzr!vf|;YbF7TX6$4}^d&uOi3dsx{u8!y8&53AD5#ABe= z`dEv#Qq4&fUu%o1?`+&PojI%}&u!DisB`*8bEi73UOWlykEu$}->;>0j;a{_xVle2 zu6o;PsJb^##01)PA!RQ=5o^-0_OqW+qn(+IjV06ipI5VP+SrbcZVIvd!L?!3j9>Jz z9nG)NLg&X+3^Rvya^j?#`b>GkYFyD;n>CZ}efoaOs)En&(80_zTGhE>)s@+&wCv7$ z9fZd0*3f)Wx+NV?{X75%}`H9#X4=WRAj>Opas;qkJ zR6PFJnout7Vbze%sOs?C79A*^)%-6!67%apbtm(?R9(86meY&-{N3%bs!nS2;dd+j z?cunKb4*F?98@20VXxY9Yr|MrYWIM$bj~XG{!UfNPpCY*k1K0t@7CgGCUmOcto9W< zn>F$WG^_4*mGRtI+!0i|dwahaWw$KAN27&erF
More options diff --git a/examples/server/server.cpp b/examples/server/server.cpp index fd755327a511db..cbf36ad6752b67 100644 --- a/examples/server/server.cpp +++ b/examples/server/server.cpp @@ -679,6 +679,7 @@ struct llama_server_context slot->params.n_predict = json_value(data, "n_predict", default_params.n_predict); slot->sparams.top_k = json_value(data, "top_k", default_sparams.top_k); slot->sparams.top_p = json_value(data, "top_p", default_sparams.top_p); + slot->sparams.min_p = json_value(data, "min_p", default_sparams.min_p); slot->sparams.tfs_z = json_value(data, "tfs_z", default_sparams.tfs_z); slot->sparams.typical_p = json_value(data, "typical_p", default_sparams.typical_p); slot->sparams.temp = json_value(data, "temperature", default_sparams.temp); @@ -1113,6 +1114,7 @@ struct llama_server_context {"temp", slot.sparams.temp}, {"top_k", slot.sparams.top_k}, {"top_p", slot.sparams.top_p}, + {"min_p", slot.sparams.min_p}, {"tfs_z", slot.sparams.tfs_z}, {"typical_p", slot.sparams.typical_p}, {"repeat_last_n", slot.sparams.penalty_last_n}, From a75fa576abba9d37f463580c379e4bbf1e1ad03c Mon Sep 17 00:00:00 2001 From: Galunid Date: Thu, 9 Nov 2023 11:09:29 +0100 Subject: [PATCH 079/206] scripts: Generalize convert scripts (#3838) * Replace convert-*-hf-to-gguf.py files with convert-hf-to-gguf.py --- convert-bloom-hf-to-gguf.py | 247 --------- convert-falcon-hf-to-gguf.py | 253 --------- convert-gptneox-hf-to-gguf.py | 221 -------- convert-hf-to-gguf.py | 890 ++++++++++++++++++++++++++++++++ convert-mpt-hf-to-gguf.py | 227 -------- convert-refact-hf-to-gguf.py | 272 ---------- convert-starcoder-hf-to-gguf.py | 210 -------- convert.py | 4 +- mypy.ini | 1 + 9 files changed, 893 insertions(+), 1432 deletions(-) delete mode 100755 convert-bloom-hf-to-gguf.py delete mode 100755 convert-falcon-hf-to-gguf.py delete mode 100755 convert-gptneox-hf-to-gguf.py create mode 100755 convert-hf-to-gguf.py delete mode 100755 convert-mpt-hf-to-gguf.py delete mode 100755 convert-refact-hf-to-gguf.py delete mode 100755 convert-starcoder-hf-to-gguf.py diff --git a/convert-bloom-hf-to-gguf.py b/convert-bloom-hf-to-gguf.py deleted file mode 100755 index 6e866d9434818c..00000000000000 --- a/convert-bloom-hf-to-gguf.py +++ /dev/null @@ -1,247 +0,0 @@ -#!/usr/bin/env python3 -# HF bloom --> gguf conversion - -from __future__ import annotations - -import argparse -import json -import os -import re -import struct -import sys -from pathlib import Path -from typing import Any - -import numpy as np -import torch -from transformers import AutoTokenizer # type: ignore[import] - -if 'NO_LOCAL_GGUF' not in os.environ: - sys.path.insert(1, str(Path(__file__).parent / 'gguf-py' / 'gguf')) -import gguf - - -def count_model_parts(dir_model: Path) -> int: - num_parts = 0 - for filename in os.listdir(dir_model): - if filename.startswith("pytorch_model-"): - num_parts += 1 - - if num_parts > 0: - print("gguf: found " + str(num_parts) + " model parts") - return num_parts - - -# Supported Models: -# https://huggingface.co/bigscience/bloom-1b7 -# https://huggingface.co/bigscience/bloom-3b -# https://huggingface.co/bigscience/bloom-7b1 -# https://huggingface.co/Langboat/bloom-1b4-zh -def parse_args() -> argparse.Namespace: - parser = argparse.ArgumentParser(description="Convert a Bloom model to a GGML compatible file") - parser.add_argument("--vocab-only", action="store_true", help="extract only the vocab") - parser.add_argument("--outfile", type=Path, help="path to write to; default: based on input") - parser.add_argument("model", type=Path, help="directory containing model file, or model file itself (*.bin)") - parser.add_argument("ftype", type=int, help="output format - use 0 for float32, 1 for float16", choices=[0, 1], default = 1) - return parser.parse_args() - -args = parse_args() - -dir_model = args.model -ftype = args.ftype -if not dir_model.is_dir(): - print(f'Error: {args.model} is not a directory', file = sys.stderr) - sys.exit(1) - -# possible tensor data types -# ftype == 0 -> float32 -# ftype == 1 -> float16 - -# map from ftype to string -ftype_str = ["f32", "f16"] - -if args.outfile is not None: - fname_out = args.outfile -else: - # output in the same directory as the model by default - fname_out = dir_model / f'ggml-model-{ftype_str[ftype]}.gguf' - -print("gguf: loading model "+dir_model.name) - -with open(dir_model / "config.json", "r", encoding="utf-8") as f: - hparams = json.load(f) - -if hparams["architectures"][0] != "BloomForCausalLM": - print("Model architecture not supported: " + hparams["architectures"][0]) - sys.exit(1) - -# get number of model parts -num_parts = count_model_parts(dir_model) - -ARCH=gguf.MODEL_ARCH.BLOOM -gguf_writer = gguf.GGUFWriter(fname_out, gguf.MODEL_ARCH_NAMES[ARCH]) - -print("gguf: get model metadata") - -block_count = hparams["n_layer"] - -gguf_writer.add_name("Bloom") -n_embed = hparams.get("hidden_size", hparams.get("n_embed")) -n_head = hparams.get("n_head", hparams.get("num_attention_heads")) -gguf_writer.add_context_length(hparams.get("seq_length", n_embed)) -gguf_writer.add_embedding_length(n_embed) -gguf_writer.add_feed_forward_length(4 * n_embed) -gguf_writer.add_block_count(block_count) -gguf_writer.add_head_count(n_head) -gguf_writer.add_head_count_kv(n_head) -gguf_writer.add_layer_norm_eps(hparams["layer_norm_epsilon"]) -gguf_writer.add_file_type(ftype) - -# TOKENIZATION - -print("gguf: get tokenizer metadata") - -tokens: list[bytearray] = [] -scores: list[float] = [] -toktypes: list[int] = [] - -# gpt2 tokenizer -gguf_writer.add_tokenizer_model("gpt2") - -print("gguf: get gpt2 tokenizer vocab") - -# ref: https://github.com/cmp-nct/ggllm.cpp/blob/master/falcon_convert.py -tokenizer = AutoTokenizer.from_pretrained(dir_model) - -# The number of tokens in tokenizer.json can differ from the expected vocab size. -# This causes downstream issues with mismatched tensor sizes when running the inference -vocab_size = hparams.get("vocab_size", len(tokenizer.vocab)) -assert max(tokenizer.vocab.values()) < vocab_size - -added_vocab = tokenizer.get_added_vocab() -reverse_vocab = {id: encoded_tok for encoded_tok, id in tokenizer.vocab.items()} - -for i in range(vocab_size): - if i not in reverse_vocab: - tokens.append(f"[PAD{i}]") - toktypes.append(gguf.TokenType.USER_DEFINED) - elif reverse_vocab[i] in added_vocab: - tokens.append(reverse_vocab[i]) - if tokenizer.added_tokens_decoder[i].special: - toktypes.append(gguf.TokenType.CONTROL) - else: - toktypes.append(gguf.TokenType.USER_DEFINED) - else: - tokens.append(reverse_vocab[i]) - toktypes.append(gguf.TokenType.NORMAL) - -gguf_writer.add_token_list(tokens) -gguf_writer.add_token_types(toktypes) - -special_vocab = gguf.SpecialVocab(dir_model, load_merges=True, n_vocab = len(tokens)) -special_vocab.add_to_gguf(gguf_writer) - -# TENSORS - -tensor_map = gguf.get_tensor_name_map(ARCH, block_count) - -# params for qkv transform -n_head_kv = hparams.get("n_head_kv", n_head) -head_dim = n_embed // n_head - -# tensor info -print("gguf: get tensor metadata") - -if num_parts == 0: - part_names = iter(("pytorch_model.bin",)) -else: - part_names = ( - f"pytorch_model-{n:05}-of-{num_parts:05}.bin" for n in range(1, num_parts + 1) - ) - -for part_name in part_names: - if args.vocab_only: - break - print("gguf: loading model part '" + part_name + "'") - model_part = torch.load(dir_model / part_name, map_location="cpu") - - has_lm_head = True - if "lm_head.weight" not in model_part.keys() and "output.weight" not in model_part.keys(): - has_lm_head = False - - for original_name in model_part.keys(): - data = model_part[original_name] - name = re.sub(r'transformer\.', '', original_name) - - old_dtype = data.dtype - - # convert any unsupported data types to float32 - if data.dtype != torch.float16 and data.dtype != torch.float32: - data = data.to(torch.float32) - - data = data.squeeze().numpy() - - if re.match(r"h\.\d+\.self_attention\.query_key_value\.weight", name): - # Map bloom-style qkv_linear to gpt-style qkv_linear - # bloom: https://github.com/huggingface/transformers/blob/main/src/transformers/models/bloom/modeling_bloom.py#L238-L252 # noqa - # gpt-2: https://github.com/huggingface/transformers/blob/main/src/transformers/models/gpt2/modeling_gpt2.py#L312 # noqa - qkv_weights = data.reshape((n_head, 3, n_embed // n_head, n_embed)) - data = np.concatenate( - (qkv_weights[:, 0, :, :].reshape((-1, n_embed)), - qkv_weights[:, 1, :, :].reshape((-1, n_embed)), - qkv_weights[:, 2, :, :].reshape((-1, n_embed))), - axis=0 - ) - print("re-format attention.linear_qkv.weight") - elif re.match(r"h\.\d+\.self_attention\.query_key_value\.bias", name): - qkv_bias = data.reshape((n_head, 3, n_embed // n_head)) - data = np.concatenate( - (qkv_bias[:, 0, :].reshape((n_embed,)), - qkv_bias[:, 1, :].reshape((n_embed,)), - qkv_bias[:, 2, :].reshape((n_embed,))), - axis=0 - ) - print("re-format attention.linear_qkv.bias") - - # map tensor names - new_name = tensor_map.get_name(name, try_suffixes=(".weight", ".bias")) - if new_name is None: - print("Can not map tensor '" + name + "'") - sys.exit() - - n_dims = len(data.shape) - data_dtype = data.dtype - - # if f32 desired, convert any float16 to float32 - if ftype == 0 and data_dtype == np.float16: - data = data.astype(np.float32) - - # TODO: Why cant we use these float16 as-is? There should be not reason to store float16 as float32 - if ftype == 1 and data_dtype == np.float16 and n_dims == 1: - data = data.astype(np.float32) - - # if f16 desired, convert any float32 2-dim weight tensors to float16 - if ftype == 1 and data_dtype == np.float32 and name.endswith(".weight") and n_dims == 2: - data = data.astype(np.float16) - - print(name, "=>", new_name + ", shape = " + str(data.shape) + ", " + str(old_dtype) + " --> " + str(data.dtype)) - - gguf_writer.add_tensor(new_name, data) - - if not has_lm_head and name == "word_embeddings.weight": - gguf_writer.add_tensor("output.weight", data) - print(name, "=>", "output.weight" + ", shape = " + str(data.shape) + ", " + str(old_dtype) + " --> " + str(data.dtype)) # noqa - - -print("gguf: write header") -gguf_writer.write_header_to_file() -print("gguf: write metadata") -gguf_writer.write_kv_data_to_file() -if not args.vocab_only: - print("gguf: write tensors") - gguf_writer.write_tensors_to_file() - -gguf_writer.close() - -print(f"gguf: model successfully exported to '{fname_out}'") -print("") diff --git a/convert-falcon-hf-to-gguf.py b/convert-falcon-hf-to-gguf.py deleted file mode 100755 index 8e8f3c3f8f1e05..00000000000000 --- a/convert-falcon-hf-to-gguf.py +++ /dev/null @@ -1,253 +0,0 @@ -#!/usr/bin/env python3 -# HF falcon--> gguf conversion - -from __future__ import annotations - -import argparse -import contextlib -import json -import os -import struct -import sys -from pathlib import Path -from typing import Any - -import numpy as np -import torch -from transformers import AutoTokenizer # type: ignore[import] - -if 'NO_LOCAL_GGUF' not in os.environ: - sys.path.insert(1, str(Path(__file__).parent / 'gguf-py' / 'gguf')) -import gguf - - -def count_model_parts(dir_model: Path, prefix: str) -> int: - num_parts = 0 - for filename in os.listdir(dir_model): - if filename.startswith(prefix): - num_parts += 1 - - if num_parts > 0: - print("gguf: found " + str(num_parts) + " model parts") - return num_parts - - -def parse_args() -> argparse.Namespace: - parser = argparse.ArgumentParser(description="Convert a Falcon model to a GGML compatible file") - parser.add_argument( - "--vocab-only", action="store_true", - help="extract only the vocab", - ) - parser.add_argument( - "--outfile", type=Path, - help="path to write to; default: based on input", - ) - parser.add_argument( - "model", type=Path, - help="directory containing model file, or model file itself (*.bin)", - ) - parser.add_argument( - "ftype", type=int, choices=[0, 1], default=1, nargs='?', - help="output format - use 0 for float32, 1 for float16", - ) - return parser.parse_args() - -args = parse_args() - -dir_model = args.model -ftype = args.ftype -if not dir_model.is_dir(): - print(f'Error: {args.model} is not a directory', file = sys.stderr) - sys.exit(1) - -# possible tensor data types -# ftype == 0 -> float32 -# ftype == 1 -> float16 - -# map from ftype to string -ftype_str = ["f32", "f16"] - -if args.outfile is not None: - fname_out = args.outfile -else: - # output in the same directory as the model by default - fname_out = dir_model / f'ggml-model-{ftype_str[ftype]}.gguf' - -print("gguf: loading model "+dir_model.name) - -with open(dir_model / "config.json", "r", encoding="utf-8") as f: - hparams = json.load(f) - -if hparams["architectures"][0] not in ("RWForCausalLM", "FalconForCausalLM"): - print("Model architecture not supported: " + hparams["architectures"][0]) - - sys.exit(1) - -# get number of model parts -num_parts = count_model_parts(dir_model, "model-00") -if num_parts: - is_safetensors = True - from safetensors import safe_open -else: - is_safetensors = False - num_parts = count_model_parts(dir_model, "pytorch_model-") - -ARCH=gguf.MODEL_ARCH.FALCON -gguf_writer = gguf.GGUFWriter(fname_out, gguf.MODEL_ARCH_NAMES[ARCH]) - -print("gguf: get model metadata") - -block_count = hparams.get("num_hidden_layers") -if block_count is None: - block_count = hparams["n_layer"] # old name - -n_head = hparams.get("num_attention_heads") -if n_head is None: - n_head = hparams["n_head"] # old name - -n_head_kv = hparams.get("num_kv_heads") -if n_head_kv is None: - n_head_kv = hparams.get("n_head_kv", 1) # old name - -gguf_writer.add_name("Falcon") -gguf_writer.add_context_length(2048) # not in config.json -gguf_writer.add_tensor_data_layout("jploski") # qkv tensor transform -gguf_writer.add_embedding_length(hparams["hidden_size"]) -gguf_writer.add_feed_forward_length(4 * hparams["hidden_size"]) -gguf_writer.add_block_count(block_count) -gguf_writer.add_head_count(n_head) -gguf_writer.add_head_count_kv(n_head_kv) -gguf_writer.add_layer_norm_eps(hparams["layer_norm_epsilon"]) -gguf_writer.add_file_type(ftype) - -# TOKENIZATION - -print("gguf: get tokenizer metadata") - -tokens: list[bytearray] = [] -scores: list[float] = [] -toktypes: list[int] = [] - -# gpt2 tokenizer -gguf_writer.add_tokenizer_model("gpt2") - -print("gguf: get gpt2 tokenizer vocab") - -# ref: https://github.com/cmp-nct/ggllm.cpp/blob/master/falcon_convert.py -tokenizer = AutoTokenizer.from_pretrained(dir_model) - -# The number of tokens in tokenizer.json can differ from the expected vocab size. -# This causes downstream issues with mismatched tensor sizes when running the inference -vocab_size = hparams.get("vocab_size", len(tokenizer.vocab)) -assert max(tokenizer.vocab.values()) < vocab_size - -reverse_vocab = {id: encoded_tok for encoded_tok, id in tokenizer.vocab.items()} - -for i in range(vocab_size): - tokens.append(reverse_vocab[i]) - scores.append(0.0) # dummy - toktypes.append(gguf.TokenType.NORMAL) - -gguf_writer.add_token_list(tokens) -gguf_writer.add_token_scores(scores) -gguf_writer.add_token_types(toktypes) - -special_vocab = gguf.SpecialVocab(dir_model, load_merges = True, n_vocab = len(tokens)) -special_vocab.add_to_gguf(gguf_writer) - -# TENSORS - -tensor_map = gguf.get_tensor_name_map(ARCH,block_count) - -head_dim = hparams["hidden_size"] // n_head - -# tensor info -print("gguf: get tensor metadata") - -if num_parts == 0: - part_names = iter(("pytorch_model.bin",)) -elif is_safetensors: - part_names = ( - f"model-{n:05}-of-{num_parts:05}.safetensors" for n in range(1, num_parts + 1) - ) -else: - part_names = ( - f"pytorch_model-{n:05}-of-{num_parts:05}.bin" for n in range(1, num_parts + 1) - ) - -for part_name in part_names: - if args.vocab_only: - break - print("gguf: loading model part '" + part_name + "'") - if is_safetensors: - ctx = safe_open(dir_model / part_name, framework="pt", device="cpu") - else: - ctx = contextlib.nullcontext(torch.load(dir_model / part_name, map_location="cpu")) - - with ctx as model_part: - for name in model_part.keys(): - data = model_part.get_tensor(name) if is_safetensors else model_part[name] - - old_dtype = data.dtype - - # convert any unsupported data types to float32 - if data.dtype != torch.float16 and data.dtype != torch.float32: - data = data.to(torch.float32) - - # QKV tensor transform - # The original query_key_value tensor contains n_head_kv "kv groups", - # each consisting of n_head/n_head_kv query weights followed by one key - # and one value weight (shared by all query heads in the kv group). - # This layout makes it a big pain to work with in GGML. - # So we rearrange them here,, so that we have n_head query weights - # followed by n_head_kv key weights followed by n_head_kv value weights, - # in contiguous fashion. - # ref: https://github.com/jploski/ggml/blob/falcon40b/examples/falcon/convert-hf-to-ggml.py - - if "query_key_value" in name: - qkv = data.view(n_head_kv, n_head // n_head_kv + 2, head_dim, head_dim * n_head) - q = qkv[:, :-2 ].reshape(n_head * head_dim, head_dim * n_head) - k = qkv[:, [-2]].reshape(n_head_kv * head_dim, head_dim * n_head) - v = qkv[:, [-1]].reshape(n_head_kv * head_dim, head_dim * n_head) - data = torch.cat((q,k,v)).reshape_as(data) - - data = data.squeeze().numpy() - - # map tensor names - new_name = tensor_map.get_name(name, try_suffixes = (".weight", ".bias")) - if new_name is None: - print("Can not map tensor '" + name + "'") - sys.exit() - - n_dims = len(data.shape) - data_dtype = data.dtype - - # if f32 desired, convert any float16 to float32 - if ftype == 0 and data_dtype == np.float16: - data = data.astype(np.float32) - - # TODO: Why cant we use these float16 as-is? There should be not reason to store float16 as float32 - if ftype == 1 and data_dtype == np.float16 and n_dims == 1: - data = data.astype(np.float32) - - # if f16 desired, convert any float32 2-dim weight tensors to float16 - if ftype == 1 and data_dtype == np.float32 and name.endswith(".weight") and n_dims == 2: - data = data.astype(np.float16) - - print(new_name + ", n_dims = " + str(n_dims) + ", " + str(old_dtype) + " --> " + str(data.dtype)) - - gguf_writer.add_tensor(new_name, data) - - -print("gguf: write header") -gguf_writer.write_header_to_file() -print("gguf: write metadata") -gguf_writer.write_kv_data_to_file() -if not args.vocab_only: - print("gguf: write tensors") - gguf_writer.write_tensors_to_file() - -gguf_writer.close() - -print(f"gguf: model successfully exported to '{fname_out}'") -print("") diff --git a/convert-gptneox-hf-to-gguf.py b/convert-gptneox-hf-to-gguf.py deleted file mode 100755 index 02d1fdf164eea1..00000000000000 --- a/convert-gptneox-hf-to-gguf.py +++ /dev/null @@ -1,221 +0,0 @@ -#!/usr/bin/env python3 -# HF gptneox--> gguf conversion - -from __future__ import annotations - -import argparse -import json -import os -import struct -import sys -from pathlib import Path -from typing import Any - -import numpy as np -import torch -from transformers import AutoTokenizer # type: ignore[import] - -if 'NO_LOCAL_GGUF' not in os.environ: - sys.path.insert(1, str(Path(__file__).parent / 'gguf-py' / 'gguf')) -import gguf - - -def count_model_parts(dir_model: Path) -> int: - num_parts = 0 - for filename in os.listdir(dir_model): - if filename.startswith("pytorch_model-"): - num_parts += 1 - - if num_parts > 0: - print("gguf: found " + str(num_parts) + " model parts") - return num_parts - - -def parse_args() -> argparse.Namespace: - parser = argparse.ArgumentParser(description="Convert a GPT-NeoX model to a GGML compatible file") - parser.add_argument( - "--vocab-only", action="store_true", - help="extract only the vocab", - ) - parser.add_argument( - "--outfile", type=Path, - help="path to write to; default: based on input", - ) - parser.add_argument( - "model", type=Path, - help="directory containing model file, or model file itself (*.bin)", - ) - parser.add_argument( - "ftype", type=int, choices=[0, 1], default=1, nargs='?', - help="output format - use 0 for float32, 1 for float16", - ) - return parser.parse_args() - -args = parse_args() - -dir_model = args.model -ftype = args.ftype -if not dir_model.is_dir(): - print(f'Error: {args.model} is not a directory', file = sys.stderr) - sys.exit(1) - -# possible tensor data types -# ftype == 0 -> float32 -# ftype == 1 -> float16 - -# map from ftype to string -ftype_str = ["f32", "f16"] - -if args.outfile is not None: - fname_out = args.outfile -else: - # output in the same directory as the model by default - fname_out = dir_model / f'ggml-model-{ftype_str[ftype]}.gguf' - -print("gguf: loading model "+dir_model.name) - -with open(dir_model / "config.json", "r", encoding="utf-8") as f: - hparams = json.load(f) - -if hparams["architectures"][0] != "GPTNeoXForCausalLM": - print("Model architecture not supported: " + hparams["architectures"][0]) - - sys.exit() - -# get number of model parts -num_parts = count_model_parts(dir_model) - -ARCH=gguf.MODEL_ARCH.GPTNEOX -gguf_writer = gguf.GGUFWriter(fname_out, gguf.MODEL_ARCH_NAMES[ARCH]) - -print("gguf: get model metadata") - -block_count = hparams["num_hidden_layers"] - -gguf_writer.add_name(dir_model.name) -gguf_writer.add_context_length(hparams["max_position_embeddings"]) -gguf_writer.add_embedding_length(hparams["hidden_size"]) -gguf_writer.add_block_count(block_count) -gguf_writer.add_feed_forward_length(hparams["intermediate_size"]) -gguf_writer.add_rope_dimension_count(int(hparams["rotary_pct"]*(hparams["hidden_size"]//hparams["num_attention_heads"]))) -gguf_writer.add_head_count(hparams["num_attention_heads"]) -gguf_writer.add_parallel_residual(hparams["use_parallel_residual"] if "use_parallel_residual" in hparams else True) -gguf_writer.add_layer_norm_eps(hparams["layer_norm_eps"]) - -# TOKENIZATION - -print("gguf: get tokenizer metadata") - -tokens: list[bytearray] = [] -scores: list[float] = [] -toktypes: list[int] = [] - -# gpt2 tokenizer -gguf_writer.add_tokenizer_model("gpt2") - -print("gguf: get gpt2 tokenizer vocab") - -# ref: https://github.com/cmp-nct/ggllm.cpp/blob/master/falcon_convert.py -tokenizer = AutoTokenizer.from_pretrained(dir_model) - -# The number of tokens in tokenizer.json can differ from the expected vocab size. -# This causes downstream issues with mismatched tensor sizes when running the inference -vocab_size = hparams.get("vocab_size", len(tokenizer.vocab)) -assert max(tokenizer.vocab.values()) < vocab_size - -added_vocab = tokenizer.get_added_vocab() -reverse_vocab = {id: encoded_tok for encoded_tok, id in tokenizer.vocab.items()} - -for i in range(vocab_size): - if i not in reverse_vocab: - tokens.append(f"[PAD{i}]") - toktypes.append(gguf.TokenType.USER_DEFINED) - elif reverse_vocab[i] in added_vocab: - tokens.append(reverse_vocab[i]) - if tokenizer.added_tokens_decoder[i].special: - toktypes.append(gguf.TokenType.CONTROL) - else: - toktypes.append(gguf.TokenType.USER_DEFINED) - else: - tokens.append(reverse_vocab[i]) - toktypes.append(gguf.TokenType.NORMAL) - -gguf_writer.add_token_list(tokens) -gguf_writer.add_token_types(toktypes) - -special_vocab = gguf.SpecialVocab(dir_model, load_merges = True, n_vocab = len(tokens)) -special_vocab.add_to_gguf(gguf_writer) - -# TENSORS - -tensor_map = gguf.get_tensor_name_map(ARCH,block_count) - -# tensor info -print("gguf: get tensor metadata") - -if num_parts == 0: - part_names = iter(("pytorch_model.bin",)) -else: - part_names = ( - f"pytorch_model-{n:05}-of-{num_parts:05}.bin" for n in range(1, num_parts + 1) - ) - -for part_name in part_names: - if args.vocab_only: - break - print("gguf: loading model part '" + part_name + "'") - model_part = torch.load(f"{dir_model}/{part_name}", map_location="cpu") - - for name in model_part.keys(): - data = model_part[name] - - # we don't need these - if name.endswith(".attention.masked_bias") or name.endswith(".attention.bias") or name.endswith(".attention.rotary_emb.inv_freq"): - continue - - old_dtype = data.dtype - - # convert any unsupported data types to float32 - if data.dtype != torch.float16 and data.dtype != torch.float32: - data = data.to(torch.float32) - - data = data.squeeze().numpy() - - # map tensor names - new_name = tensor_map.get_name(name, try_suffixes = (".weight", ".bias")) - if new_name is None: - print("Can not map tensor '" + name + "'") - sys.exit() - - n_dims = len(data.shape) - data_dtype = data.dtype - - # if f32 desired, convert any float16 to float32 - if ftype == 0 and data_dtype == np.float16: - data = data.astype(np.float32) - - # TODO: Why cant we use these float16 as-is? There should be not reason to store float16 as float32 - if ftype == 1 and data_dtype == np.float16 and n_dims == 1: - data = data.astype(np.float32) - - # if f16 desired, convert any float32 2-dim weight tensors to float16 - if ftype == 1 and data_dtype == np.float32 and name.endswith(".weight") and n_dims == 2: - data = data.astype(np.float16) - - print(new_name + ", n_dims = " + str(n_dims) + ", " + str(old_dtype) + " --> " + str(data.dtype)) - - gguf_writer.add_tensor(new_name, data) - - -print("gguf: write header") -gguf_writer.write_header_to_file() -print("gguf: write metadata") -gguf_writer.write_kv_data_to_file() -if not args.vocab_only: - print("gguf: write tensors") - gguf_writer.write_tensors_to_file() - -gguf_writer.close() - -print(f"gguf: model successfully exported to '{fname_out}'") -print("") diff --git a/convert-hf-to-gguf.py b/convert-hf-to-gguf.py new file mode 100755 index 00000000000000..f7fe29fd4262ac --- /dev/null +++ b/convert-hf-to-gguf.py @@ -0,0 +1,890 @@ +#!/usr/bin/env python3 + +from __future__ import annotations + +import argparse +import contextlib +import json +import os +import re +import sys +from enum import IntEnum +from pathlib import Path +from typing import TYPE_CHECKING, Any, ContextManager, Iterator, cast + +import numpy as np +import torch + +if TYPE_CHECKING: + from torch import Tensor + +if 'NO_LOCAL_GGUF' not in os.environ: + sys.path.insert(1, str(Path(__file__).parent / 'gguf-py')) +import gguf + + +###### MODEL DEFINITIONS ###### + +class SentencePieceTokenTypes(IntEnum): + NORMAL = 1 + UNKNOWN = 2 + CONTROL = 3 + USER_DEFINED = 4 + UNUSED = 5 + BYTE = 6 + + +class Model: + def __init__(self, dir_model: Path, ftype: int, fname_out: Path, is_big_endian: bool): + self.dir_model = dir_model + self.ftype = ftype + self.fname_out = fname_out + self.is_big_endian = is_big_endian + self.endianess = gguf.GGUFEndian.BIG if is_big_endian else gguf.GGUFEndian.LITTLE + self.is_safetensors = self._is_model_safetensors() + self.num_parts = Model.count_model_parts(self.dir_model, ".safetensors" if self.is_safetensors else ".bin") + self.part_names = self._get_part_names() + self.hparams = Model.load_hparams(self.dir_model) + self.model_arch = self._get_model_architecture() + self.gguf_writer = gguf.GGUFWriter(fname_out, gguf.MODEL_ARCH_NAMES[self.model_arch], endianess=self.endianess) + + def set_vocab(self): + self._set_vocab_gpt2() + + def get_tensors(self) -> Iterator[tuple[str, Tensor]]: + for part_name in self.part_names: + print(f"gguf: loading model part '{part_name}'") + ctx: ContextManager[Any] + if self.is_safetensors: + from safetensors import safe_open + ctx = cast(ContextManager[Any], safe_open(self.dir_model / part_name, framework="pt", device="cpu")) + else: + ctx = contextlib.nullcontext(torch.load(self.dir_model / part_name, map_location="cpu")) + + with ctx as model_part: + for name in model_part.keys(): + data = model_part.get_tensor(name) if self.is_safetensors else model_part[name] + yield name, data + + def set_gguf_parameters(self): + self.gguf_writer.add_name(self.dir_model.name) + self.gguf_writer.add_block_count(self.hparams.get( + "n_layers", self.hparams.get("num_hidden_layers", self.hparams.get("n_layer")), + )) + if (n_ctx := self.hparams.get("max_position_embeddings")) is not None: + self.gguf_writer.add_context_length(n_ctx) + if (n_embd := self.hparams.get("hidden_size")) is not None: + self.gguf_writer.add_embedding_length(n_embd) + if (n_ff := self.hparams.get("intermediate_size")) is not None: + self.gguf_writer.add_feed_forward_length(n_ff) + if (n_head := self.hparams.get("num_attention_head")) is not None: + self.gguf_writer.add_head_count(n_head) + self.gguf_writer.add_parallel_residual(self.hparams.get("use_parallel_residual", True)) + + def write_tensors(self): + block_count = self.hparams.get("n_layers", self.hparams.get("num_hidden_layers", self.hparams.get("n_layer"))) + tensor_map = gguf.get_tensor_name_map(self.model_arch, block_count) + for name, data_torch in self.get_tensors(): + # we don't need these + if name.endswith((".attention.masked_bias", ".attention.bias", ".attention.rotary_emb.inv_freq")): + continue + + old_dtype = data_torch.dtype + + # convert any unsupported data types to float32 + if data_torch.dtype not in (torch.float16, torch.float32): + data_torch = data_torch.to(torch.float32) + + data = data_torch.squeeze().numpy() + + # map tensor names + new_name = tensor_map.get_name(name, try_suffixes=(".weight", ".bias")) + if new_name is None: + print(f"Can not map tensor {name!r}") + sys.exit() + + n_dims = len(data.shape) + data_dtype = data.dtype + + # if f32 desired, convert any float16 to float32 + if self.ftype == 0 and data_dtype == np.float16: + data = data.astype(np.float32) + + # TODO: Why cant we use these float16 as-is? There should be not reason to store float16 as float32 + if self.ftype == 1 and data_dtype == np.float16 and n_dims == 1: + data = data.astype(np.float32) + + # if f16 desired, convert any float32 2-dim weight tensors to float16 + if self.ftype == 1 and data_dtype == np.float32 and name.endswith(".weight") and n_dims == 2: + data = data.astype(np.float16) + + print(f"{new_name}, n_dims = {n_dims}, {old_dtype} --> {data.dtype}") + + self.gguf_writer.add_tensor(new_name, data) + + def write(self): + self.write_tensors() + self.gguf_writer.write_header_to_file() + self.gguf_writer.write_kv_data_to_file() + self.gguf_writer.write_tensors_to_file() + self.gguf_writer.close() + + def write_vocab(self): + self.gguf_writer.write_header_to_file() + self.gguf_writer.write_kv_data_to_file() + self.gguf_writer.close() + + @staticmethod + def count_model_parts(dir_model: Path, prefix: str) -> int: + num_parts = 0 + for filename in os.listdir(dir_model): + if filename.endswith(prefix): + num_parts += 1 + + return num_parts + + @staticmethod + def load_hparams(dir_model): + with open(dir_model / "config.json", "r", encoding="utf-8") as f: + return json.load(f) + + @staticmethod + def from_model_architecture(model_architecture): + if model_architecture == "StableLMEpochForCausalLM": + return StableLMModel + if model_architecture == "GPTNeoXForCausalLM": + return GPTNeoXModel + if model_architecture == "BloomForCausalLM": + return BloomModel + if model_architecture == "MPTForCausalLM": + return MPTModel + if model_architecture in ("BaichuanForCausalLM", "BaiChuanForCausalLM"): + return BaichuanModel + if model_architecture in ("FalconForCausalLM", "RWForCausalLM"): + return FalconModel + if model_architecture == "GPTBigCodeForCausalLM": + return StarCoderModel + if model_architecture == "GPTRefactForCausalLM": + return RefactModel + if model_architecture == "PersimmonForCausalLM": + return PersimmonModel + return Model + + def _is_model_safetensors(self) -> bool: + return Model.count_model_parts(self.dir_model, ".safetensors") > 0 + + def _get_part_names(self): + if self.is_safetensors: + if self.num_parts == 1: # there's only one .safetensors file + return ("model.safetensors",) + return (f"model-{n:05}-of-{self.num_parts:05}.safetensors" for n in range(1, self.num_parts + 1)) + + if self.num_parts == 1: # there's only one .bin file + return ("pytorch_model.bin",) + return (f"pytorch_model-{n:05}-of-{self.num_parts:05}.bin" for n in range(1, self.num_parts + 1)) + + def _get_model_architecture(self) -> gguf.MODEL_ARCH: + arch = self.hparams["architectures"][0] + if arch == "GPTNeoXForCausalLM": + return gguf.MODEL_ARCH.GPTNEOX + if arch == "BloomForCausalLM": + return gguf.MODEL_ARCH.BLOOM + if arch == "MPTForCausalLM": + return gguf.MODEL_ARCH.MPT + if arch in ("BaichuanForCausalLM", "BaiChuanForCausalLM"): + return gguf.MODEL_ARCH.BAICHUAN + if arch == "FalconForCausalLM": + return gguf.MODEL_ARCH.FALCON + if arch == "GPTBigCodeForCausalLM": + return gguf.MODEL_ARCH.STARCODER + if arch == "GPTRefactForCausalLM": + return gguf.MODEL_ARCH.REFACT + if arch == "PersimmonForCausalLM": + return gguf.MODEL_ARCH.PERSIMMON + + raise NotImplementedError(f'Architecture "{arch}" not supported!') + + def _set_vocab_gpt2(self): + dir_model = self.dir_model + hparams = self.hparams + tokens: list[bytearray] = [] + toktypes: list[int] = [] + + from transformers import AutoTokenizer # type: ignore[attr-defined] + tokenizer = AutoTokenizer.from_pretrained(dir_model) + vocab_size = hparams.get("vocab_size", len(tokenizer.vocab)) + assert max(tokenizer.vocab.values()) < vocab_size + + reverse_vocab = {id_: encoded_tok for encoded_tok, id_ in tokenizer.vocab.items()} + added_vocab = tokenizer.get_added_vocab() + + for i in range(vocab_size): + if i not in reverse_vocab: + pad_token = f"[PAD{i}]".encode('utf-8') + tokens.append(bytearray(pad_token)) + toktypes.append(gguf.TokenType.USER_DEFINED) + elif reverse_vocab[i] in added_vocab: + tokens.append(reverse_vocab[i]) + if tokenizer.added_tokens_decoder[i].special: + toktypes.append(gguf.TokenType.CONTROL) + else: + toktypes.append(gguf.TokenType.USER_DEFINED) + else: + tokens.append(reverse_vocab[i]) + toktypes.append(gguf.TokenType.NORMAL) + + self.gguf_writer.add_tokenizer_model("gpt2") + self.gguf_writer.add_token_list(tokens) + self.gguf_writer.add_token_types(toktypes) + + special_vocab = gguf.SpecialVocab(dir_model, load_merges=True) + special_vocab.add_to_gguf(self.gguf_writer) + + def _set_vocab_sentencepiece(self): + from sentencepiece import SentencePieceProcessor + + tokenizer_path = self.dir_model / 'tokenizer.model' + + tokens: list[bytes] = [] + scores: list[float] = [] + toktypes: list[int] = [] + + if not tokenizer_path.is_file(): + print(f'Error: Missing {tokenizer_path}', file=sys.stderr) + sys.exit(1) + + tokenizer = SentencePieceProcessor(str(tokenizer_path)) + vocab_size = self.hparams.get('vocab_size', tokenizer.vocab_size()) + + for token_id in range(vocab_size): + piece = tokenizer.id_to_piece(token_id) + text = piece.encode("utf-8") + score = tokenizer.get_score(token_id) + + toktype = SentencePieceTokenTypes.NORMAL + if tokenizer.is_unknown(token_id): + toktype = SentencePieceTokenTypes.UNKNOWN + elif tokenizer.is_control(token_id): + toktype = SentencePieceTokenTypes.CONTROL + elif tokenizer.is_unused(token_id): + toktype = SentencePieceTokenTypes.UNUSED + elif tokenizer.is_byte(token_id): + toktype = SentencePieceTokenTypes.BYTE + + tokens.append(text) + scores.append(score) + toktypes.append(toktype) + + added_tokens_file = self.dir_model / 'added_tokens.json' + if added_tokens_file.is_file(): + with open(added_tokens_file, "r", encoding="utf-8") as f: + added_tokens_json = json.load(f) + + for key in added_tokens_json: + tokens.append(key.encode("utf-8")) + scores.append(-1000.0) + toktypes.append(SentencePieceTokenTypes.USER_DEFINED) + + self.gguf_writer.add_tokenizer_model("llama") + self.gguf_writer.add_token_list(tokens) + self.gguf_writer.add_token_scores(scores) + self.gguf_writer.add_token_types(toktypes) + + special_vocab = gguf.SpecialVocab(self.dir_model, n_vocab=len(tokens)) + special_vocab.add_to_gguf(self.gguf_writer) + + +class StableLMModel(Model): + def set_gguf_parameters(self): + super().set_gguf_parameters() + self.gguf_writer.add_rope_dimension_count( + int(self.hparams["rope_pct"] * (self.hparams["hidden_size"] // self.hparams["num_attention_heads"])), + ) + self.gguf_writer.add_layer_norm_eps(1e-5) + + +class GPTNeoXModel(Model): + def set_gguf_parameters(self): + block_count = self.hparams["num_hidden_layers"] + + self.gguf_writer.add_name(self.dir_model.name) + self.gguf_writer.add_context_length(self.hparams["max_position_embeddings"]) + self.gguf_writer.add_embedding_length(self.hparams["hidden_size"]) + self.gguf_writer.add_block_count(block_count) + self.gguf_writer.add_feed_forward_length(self.hparams["intermediate_size"]) + self.gguf_writer.add_rope_dimension_count( + int(self.hparams["rotary_pct"] * (self.hparams["hidden_size"] // self.hparams["num_attention_heads"])), + ) + self.gguf_writer.add_head_count(self.hparams["num_attention_heads"]) + self.gguf_writer.add_parallel_residual(self.hparams.get("use_parallel_residual", True)) + self.gguf_writer.add_layer_norm_eps(self.hparams["layer_norm_eps"]) + + +class BloomModel(Model): + def set_gguf_parameters(self): + self.gguf_writer.add_name("Bloom") + n_embed = self.hparams.get("hidden_size", self.hparams.get("n_embed")) + n_head = self.hparams.get("n_head", self.hparams.get("num_attention_heads")) + self.gguf_writer.add_context_length(self.hparams.get("seq_length", n_embed)) + self.gguf_writer.add_embedding_length(n_embed) + self.gguf_writer.add_feed_forward_length(4 * n_embed) + self.gguf_writer.add_block_count(self.hparams["n_layer"]) + self.gguf_writer.add_head_count(n_head) + self.gguf_writer.add_head_count_kv(n_head) + self.gguf_writer.add_layer_norm_eps(self.hparams["layer_norm_epsilon"]) + self.gguf_writer.add_file_type(self.ftype) + + def write_tensors(self): + block_count = self.hparams["n_layer"] + tensors = dict(self.get_tensors()) + tensor_map = gguf.get_tensor_name_map(self.model_arch, block_count) + has_lm_head = True + n_head = self.hparams.get("n_head", self.hparams.get("num_attention_heads")) + n_embed = self.hparams.get("hidden_size", self.hparams.get("n_embed")) + + for name, data_torch in tensors.items(): + if "lm_head.weight" not in tensors.keys() and "output.weight" not in tensors.keys(): + has_lm_head = False + + name = re.sub(r'transformer\.', '', name) + + old_dtype = data_torch.dtype + + # convert any unsupported data types to float32 + if data_torch.dtype not in (torch.float16, torch.float32): + data_torch = data_torch.to(torch.float32) + + data = data_torch.squeeze().numpy() + + if re.match(r"h\.\d+\.self_attention\.query_key_value\.weight", name): + # Map bloom-style qkv_linear to gpt-style qkv_linear + # bloom: https://github.com/huggingface/transformers/blob/main/src/transformers/models/bloom/modeling_bloom.py#L238-L252 # noqa + # gpt-2: https://github.com/huggingface/transformers/blob/main/src/transformers/models/gpt2/modeling_gpt2.py#L312 # noqa + qkv_weights = data.reshape((n_head, 3, n_embed // n_head, n_embed)) + data = np.concatenate( + ( + qkv_weights[:, 0, :, :].reshape((-1, n_embed)), + qkv_weights[:, 1, :, :].reshape((-1, n_embed)), + qkv_weights[:, 2, :, :].reshape((-1, n_embed)), + ), + axis=0, + ) + print("re-format attention.linear_qkv.weight") + elif re.match(r"h\.\d+\.self_attention\.query_key_value\.bias", name): + qkv_bias = data.reshape((n_head, 3, n_embed // n_head)) + data = np.concatenate( + ( + qkv_bias[:, 0, :].reshape((n_embed,)), + qkv_bias[:, 1, :].reshape((n_embed,)), + qkv_bias[:, 2, :].reshape((n_embed,)), + ), + axis=0, + ) + print("re-format attention.linear_qkv.bias") + + # map tensor names + new_name = tensor_map.get_name(name, try_suffixes=(".weight", ".bias")) + if new_name is None: + print(f"Can not map tensor {name!r}") + sys.exit() + + n_dims = len(data.shape) + data_dtype = data.dtype + + # if f32 desired, convert any float16 to float32 + if self.ftype == 0 and data_dtype == np.float16: + data = data.astype(np.float32) + + # TODO: Why cant we use these float16 as-is? There should be not reason to store float16 as float32 + if self.ftype == 1 and data_dtype == np.float16 and n_dims == 1: + data = data.astype(np.float32) + + # if f16 desired, convert any float32 2-dim weight tensors to float16 + if self.ftype == 1 and data_dtype == np.float32 and name.endswith(".weight") and n_dims == 2: + data = data.astype(np.float16) + + print(f"=> {new_name}, shape = {data.shape}, {old_dtype} --> {data.dtype}") + + self.gguf_writer.add_tensor(new_name, data) + + if not has_lm_head and name == "word_embeddings.weight": + self.gguf_writer.add_tensor("output.weight", data) + print(name, f"=> output.weight, shape = {data.shape}, {old_dtype} --> {data.dtype}") + + +class MPTModel(Model): + def set_gguf_parameters(self): + block_count = self.hparams["n_layers"] + self.gguf_writer.add_name(self.dir_model.name) + self.gguf_writer.add_context_length(self.hparams["max_seq_len"]) + self.gguf_writer.add_embedding_length(self.hparams["d_model"]) + self.gguf_writer.add_block_count(block_count) + self.gguf_writer.add_feed_forward_length(4 * self.hparams["d_model"]) + self.gguf_writer.add_head_count(self.hparams["n_heads"]) + if kv_n_heads := self.hparams["attn_config"].get("kv_n_heads"): + self.gguf_writer.add_head_count_kv(kv_n_heads) + self.gguf_writer.add_layer_norm_eps(1e-5) + if self.hparams["attn_config"]["clip_qkv"] is not None: + self.gguf_writer.add_clamp_kqv(self.hparams["attn_config"]["clip_qkv"]) + self.gguf_writer.add_max_alibi_bias(self.hparams["attn_config"]["alibi_bias_max"]) + + def write_tensors(self): + block_count = self.hparams.get("n_layers", self.hparams.get("num_hidden_layers")) + tensor_map = gguf.get_tensor_name_map(self.model_arch, block_count) + for name, data_torch in self.get_tensors(): + # we don't need these + if name.endswith((".attention.masked_bias", ".attention.bias", ".attention.rotary_emb.inv_freq")): + continue + + old_dtype = data_torch.dtype + + # convert any unsupported data types to float32 + if data_torch.dtype not in (torch.float16, torch.float32): + data_torch = data_torch.to(torch.float32) + + data = data_torch.squeeze().numpy() + + # map tensor names + new_name = tensor_map.get_name(name, try_suffixes=(".weight", ".bias")) + if new_name is None: + print(f"Can not map tensor {name!r}") + sys.exit() + + n_dims = len(data.shape) + data_dtype = data.dtype + + # if f32 desired, convert any float16 to float32 + if self.ftype == 0 and data_dtype == np.float16: + data = data.astype(np.float32) + + # TODO: Why cant we use these float16 as-is? There should be not reason to store float16 as float32 + if self.ftype == 1 and data_dtype == np.float16 and n_dims == 1: + data = data.astype(np.float32) + + # if f16 desired, convert any float32 2-dim weight tensors to float16 + if self.ftype == 1 and data_dtype == np.float32 and name.endswith(".weight") and n_dims == 2: + data = data.astype(np.float16) + + print(f"{new_name}, n_dims = {n_dims}, {old_dtype} --> {data.dtype}") + + self.gguf_writer.add_tensor(new_name, data) + + # note: MPT output is tied to (same as) wte in original model; + # for easier implementation in llama.cpp it's duplicated in GGUF, though :/ + if new_name == "token_embd.weight": + self.gguf_writer.add_tensor("output.weight", data) + + +class BaichuanModel(Model): + def set_vocab(self): + self._set_vocab_sentencepiece() + + def set_gguf_parameters(self): + block_count = self.hparams["num_hidden_layers"] + head_count = self.hparams["num_attention_heads"] + head_count_kv = self.hparams.get("num_key_value_heads", head_count) + hf_repo = self.hparams.get("_name_or_path", "") + + ctx_length = 0 + if "max_sequence_length" in self.hparams: + ctx_length = self.hparams["max_sequence_length"] + elif "max_position_embeddings" in self.hparams: + ctx_length = self.hparams["max_position_embeddings"] + elif "model_max_length" in self.hparams: + ctx_length = self.hparams["model_max_length"] + else: + print("gguf: can not find ctx length parameter.") + sys.exit() + + self.gguf_writer.add_name(self.dir_model.name) + self.gguf_writer.add_source_hf_repo(hf_repo) + self.gguf_writer.add_tensor_data_layout("Meta AI original pth") + self.gguf_writer.add_context_length(ctx_length) + self.gguf_writer.add_embedding_length(self.hparams["hidden_size"]) + self.gguf_writer.add_block_count(block_count) + self.gguf_writer.add_feed_forward_length(self.hparams["intermediate_size"]) + self.gguf_writer.add_rope_dimension_count(self.hparams["hidden_size"] // self.hparams["num_attention_heads"]) + self.gguf_writer.add_head_count(head_count) + self.gguf_writer.add_head_count_kv(head_count_kv) + self.gguf_writer.add_layer_norm_rms_eps(self.hparams["rms_norm_eps"]) + + if self.hparams.get("rope_scaling") is not None and "factor" in self.hparams["rope_scaling"]: + if self.hparams["rope_scaling"].get("type") == "linear": + self.gguf_writer.add_rope_scaling_type(gguf.RopeScalingType.LINEAR) + self.gguf_writer.add_rope_scaling_factor(self.hparams["rope_scaling"]["factor"]) + + def write_tensors(self): + # Collect tensors from generator object + model_kv = dict(self.get_tensors()) + block_count = self.hparams["num_hidden_layers"] + head_count = self.hparams["num_attention_heads"] + tensor_map = gguf.get_tensor_name_map(self.model_arch, block_count) + head_count_kv = self.hparams.get("num_key_value_heads", head_count) + + for i in range(block_count): + if (w := model_kv.get(f"model.layers.{i}.self_attn.W_pack.weight")) is not None: + print(f"Unpacking and permuting layer {i}") + model_kv[f"model.layers.{i}.self_attn.q_proj.weight"] = \ + self._reverse_hf_permute_part(w, 0, head_count, head_count) + model_kv[f"model.layers.{i}.self_attn.k_proj.weight"] = \ + self._reverse_hf_permute_part(w, 1, head_count, head_count_kv) + model_kv[f"model.layers.{i}.self_attn.v_proj.weight"] = \ + self._reverse_hf_part(w, 2) + del model_kv[f"model.layers.{i}.self_attn.W_pack.weight"] + + for name, data_torch in model_kv.items(): + # we don't need these + if name.endswith(".rotary_emb.inv_freq"): + continue + + old_dtype = data_torch.dtype + + # convert any unsupported data types to float32 + if data_torch.dtype not in (torch.float16, torch.float32): + data_torch = data_torch.to(torch.float32) + + data = data_torch.squeeze().numpy() + + # map tensor names + new_name = tensor_map.get_name(name, try_suffixes=(".weight", ".bias")) + if new_name is None: + print(f"Can not map tensor {name!r}") + sys.exit() + + n_dims = len(data.shape) + data_dtype = data.dtype + + # if f32 desired, convert any float16 to float32 + if self.ftype == 0 and data_dtype == np.float16: + data = data.astype(np.float32) + + # TODO: Why cant we use these float16 as-is? There should be not reason to store float16 as float32 + if self.ftype == 1 and data_dtype == np.float16 and n_dims == 1: + data = data.astype(np.float32) + + # if f16 desired, convert any float32 2-dim weight tensors to float16 + if self.ftype == 1 and data_dtype == np.float32 and name.endswith(".weight") and n_dims == 2: + data = data.astype(np.float16) + + print(f"{name} -> {new_name}, n_dims = {n_dims}, {old_dtype} --> {data.dtype}") + self.gguf_writer.add_tensor(new_name, data) + + def _reverse_hf_permute(self, weights: Tensor, n_head: int, n_kv_head: int | None = None) -> Tensor: + if n_kv_head is not None and n_head != n_kv_head: + n_head //= n_kv_head + + return ( + weights.reshape(n_head, 2, weights.shape[0] // n_head // 2, *weights.shape[1:]) + .swapaxes(1, 2) + .reshape(weights.shape) + ) + + def _reverse_hf_permute_part( + self, weights: Tensor, n_part: int, n_head: int, n_head_kv: int | None = None, + ) -> Tensor: + r = weights.shape[0] // 3 + return self._reverse_hf_permute(weights[r * n_part:r * n_part + r, ...], n_head, n_head_kv) + + def _reverse_hf_part(self, weights: Tensor, n_part: int) -> Tensor: + r = weights.shape[0] // 3 + return weights[r * n_part:r * n_part + r, ...] + + +class FalconModel(Model): + def set_gguf_parameters(self): + block_count = self.hparams.get("num_hidden_layers") + if block_count is None: + block_count = self.hparams["n_layer"] # old name + + n_head = self.hparams.get("num_attention_heads") + if n_head is None: + n_head = self.hparams["n_head"] # old name + + n_head_kv = self.hparams.get("num_kv_heads") + if n_head_kv is None: + n_head_kv = self.hparams.get("n_head_kv", 1) # old name + + self.gguf_writer.add_name("Falcon") + self.gguf_writer.add_context_length(2048) # not in config.json + self.gguf_writer.add_tensor_data_layout("jploski") # qkv tensor transform + self.gguf_writer.add_embedding_length(self.hparams["hidden_size"]) + self.gguf_writer.add_feed_forward_length(4 * self.hparams["hidden_size"]) + self.gguf_writer.add_block_count(block_count) + self.gguf_writer.add_head_count(n_head) + self.gguf_writer.add_head_count_kv(n_head_kv) + self.gguf_writer.add_layer_norm_eps(self.hparams["layer_norm_epsilon"]) + self.gguf_writer.add_file_type(self.ftype) + + def write_tensors(self): + block_count = self.hparams.get("num_hidden_layers") + if block_count is None: + block_count = self.hparams["n_layer"] # old name + + n_head = self.hparams.get("num_attention_heads") + if n_head is None: + n_head = self.hparams["n_head"] # old name + + n_head_kv = self.hparams.get("num_kv_heads") + if n_head_kv is None: + n_head_kv = self.hparams.get("n_head_kv", 1) # old name + + head_dim = self.hparams["hidden_size"] // n_head + tensor_map = gguf.get_tensor_name_map(self.model_arch, block_count) + + for name, data_torch in self.get_tensors(): + old_dtype = data_torch.dtype + + # convert any unsupported data types to float32 + if data_torch.dtype not in (torch.float16, torch.float32): + data_torch = data_torch.to(torch.float32) + + # QKV tensor transform + # The original query_key_value tensor contains n_head_kv "kv groups", + # each consisting of n_head/n_head_kv query weights followed by one key + # and one value weight (shared by all query heads in the kv group). + # This layout makes it a big pain to work with in GGML. + # So we rearrange them here,, so that we have n_head query weights + # followed by n_head_kv key weights followed by n_head_kv value weights, + # in contiguous fashion. + # ref: https://github.com/jploski/ggml/blob/falcon40b/examples/falcon/convert-hf-to-ggml.py + + if "query_key_value" in name: + qkv = data_torch.view(n_head_kv, n_head // n_head_kv + 2, head_dim, head_dim * n_head) + q = qkv[:, :-2].reshape(n_head * head_dim, head_dim * n_head) + k = qkv[:, [-2]].reshape(n_head_kv * head_dim, head_dim * n_head) + v = qkv[:, [-1]].reshape(n_head_kv * head_dim, head_dim * n_head) + data_torch = torch.cat((q, k, v)).reshape_as(data_torch) + + data = data_torch.squeeze().numpy() + + # map tensor names + new_name = tensor_map.get_name(name, try_suffixes=(".weight", ".bias")) + if new_name is None: + print(f"Can not map tensor {name!r}") + sys.exit() + + n_dims = len(data.shape) + data_dtype = data.dtype + + # if f32 desired, convert any float16 to float32 + if self.ftype == 0 and data_dtype == np.float16: + data = data.astype(np.float32) + + # TODO: Why cant we use these float16 as-is? There should be not reason to store float16 as float32 + if self.ftype == 1 and data_dtype == np.float16 and n_dims == 1: + data = data.astype(np.float32) + + # if f16 desired, convert any float32 2-dim weight tensors to float16 + if self.ftype == 1 and data_dtype == np.float32 and name.endswith(".weight") and n_dims == 2: + data = data.astype(np.float16) + + print(f"{new_name}, n_dims = {n_dims}, {old_dtype} --> {data.dtype}") + + self.gguf_writer.add_tensor(new_name, data) + + +class StarCoderModel(Model): + def set_gguf_parameters(self): + block_count = self.hparams["n_layer"] + + self.gguf_writer.add_name("StarCoder") + self.gguf_writer.add_context_length(self.hparams["n_positions"]) + self.gguf_writer.add_embedding_length(self.hparams["n_embd"]) + self.gguf_writer.add_feed_forward_length(4 * self.hparams["n_embd"]) + self.gguf_writer.add_block_count(block_count) + self.gguf_writer.add_head_count(self.hparams["n_head"]) + self.gguf_writer.add_head_count_kv(1) + self.gguf_writer.add_layer_norm_eps(self.hparams["layer_norm_epsilon"]) + self.gguf_writer.add_file_type(self.ftype) + + +class RefactModel(Model): + def set_gguf_parameters(self): + hidden_dim = self.hparams["n_embd"] + inner_dim = 4 * hidden_dim + hidden_dim = int(2 * inner_dim / 3) + multiple_of = 256 + ff_dim = multiple_of * ((hidden_dim + multiple_of - 1) // multiple_of) + + block_count = self.hparams["n_layer"] + + self.gguf_writer.add_name("Refact") + # refact uses Alibi. So this is from config.json which might be used by training. + self.gguf_writer.add_context_length(self.hparams["n_positions"]) + self.gguf_writer.add_embedding_length(self.hparams["n_embd"]) + + self.gguf_writer.add_feed_forward_length(ff_dim) + self.gguf_writer.add_block_count(block_count) + self.gguf_writer.add_head_count(self.hparams["n_head"]) + self.gguf_writer.add_head_count_kv(1) + self.gguf_writer.add_layer_norm_rms_eps(self.hparams["layer_norm_epsilon"]) + self.gguf_writer.add_file_type(self.ftype) + + def write_tensors(self): + hidden_dim = self.hparams["n_embd"] + inner_dim = 4 * hidden_dim + hidden_dim = int(2 * inner_dim / 3) + multiple_of = 256 + ff_dim = multiple_of * ((hidden_dim + multiple_of - 1) // multiple_of) + n_head = self.hparams["n_head"] + n_head_kv = 1 + head_dim = self.hparams["n_embd"] // n_head + block_count = self.hparams["n_layer"] + + tensor_map = gguf.get_tensor_name_map(self.model_arch, block_count) + + tensors = dict(self.get_tensors()) + for i in range(block_count): + if (w := tensors.get(f"transformer.h.{i}.attn.kv.weight")) is not None: + tensors[f"model.layers.{i}.self_attn.k_proj.weight"] = w[:n_head_kv * head_dim] + tensors[f"model.layers.{i}.self_attn.v_proj.weight"] = w[n_head_kv * head_dim:] + del tensors[f"transformer.h.{i}.attn.kv.weight"] + if (w := tensors.get(f"transformer.h.{i}.attn.q.weight")) is not None: + tensors[f"model.layers.{i}.self_attn.q_proj.weight"] = w + del tensors[f"transformer.h.{i}.attn.q.weight"] + if (w := tensors.get(f"transformer.h.{i}.mlp.gate_up_proj.weight")) is not None: + tensors[f"model.layers.{i}.mlp.gate_proj.weight"] = w[:ff_dim] + tensors[f"model.layers.{i}.mlp.up_proj.weight"] = w[ff_dim:] + del tensors[f"transformer.h.{i}.mlp.gate_up_proj.weight"] + + for name, data_torch in tensors.items(): + old_dtype = data_torch.dtype + + # convert any unsupported data types to float32 + if data_torch.dtype not in (torch.float16, torch.float32): + data_torch = data_torch.to(torch.float32) + + data = data_torch.squeeze().numpy() + + # map tensor names + new_name = tensor_map.get_name(name, try_suffixes=(".weight",)) + if new_name is None: + print(f"Can not map tensor {name!r}") + sys.exit() + + n_dims = len(data.shape) + data_dtype = data.dtype + + # if f32 desired, convert any float16 to float32 + if self.ftype == 0 and data_dtype == np.float16: + data = data.astype(np.float32) + + # TODO: Why cant we use these float16 as-is? There should be not reason to store float16 as float32 + if self.ftype == 1 and data_dtype == np.float16 and n_dims == 1: + data = data.astype(np.float32) + + # if f16 desired, convert any float32 2-dim weight tensors to float16 + if self.ftype == 1 and data_dtype == np.float32 and name.endswith(".weight") and n_dims == 2: + data = data.astype(np.float16) + + print(f"{new_name}, n_dims = {n_dims}, {old_dtype} --> {data.dtype}") + + self.gguf_writer.add_tensor(new_name, data) + + +class PersimmonModel(Model): + def set_gguf_parameters(self): + block_count = self.hparams.get("num_layers", self.hparams.get("num_hidden_layers")) + head_count = self.hparams["num_attention_heads"] + head_count_kv = head_count + hidden_size = self.hparams["hidden_size"] + + self.gguf_writer.add_name('persimmon-8b-chat') + self.gguf_writer.add_embedding_length(hidden_size) + self.gguf_writer.add_block_count(block_count) + self.gguf_writer.add_feed_forward_length(self.hparams["intermediate_size"]) + self.gguf_writer.add_rope_dimension_count(hidden_size // head_count) + self.gguf_writer.add_head_count(head_count) + self.gguf_writer.add_head_count_kv(head_count_kv) + self.gguf_writer.add_rope_freq_base(self.hparams["rope_theta"]) + self.gguf_writer.add_layer_norm_eps(self.hparams["layer_norm_eps"]) + self.gguf_writer.add_layer_norm_rms_eps(self.hparams["rms_norm_eps"]) + + def set_vocab(self): + self._set_vocab_sentencepiece() + # self.gguf_writer.add_bos_token_id(71013) + # self.gguf_writer.add_eos_token_id(71013) + + def write_tensors(self): + block_count = self.hparams.get("num_layers", self.hparams.get("num_hidden_layers")) + tensor_map = gguf.get_tensor_name_map(self.model_arch, block_count) + + for name, data_torch in self.get_tensors(): + if name.endswith(".self_attention.rotary_emb.inv_freq"): + continue + old_dtype = data_torch.dtype + # TODO: FP16 conversion produces garbage outputs. (Q8_0 does not, so..?) + data = data_torch.to(torch.float32).squeeze().numpy() + new_name = tensor_map.get_name(name, try_suffixes=(".weight", ".bias")) + if new_name is None: + print(f"Can not map tensor {name!r}") + sys.exit() + n_dims = len(data.shape) + print(f"{new_name}, n_dims = {n_dims}, {old_dtype} --> {data.dtype}") + self.gguf_writer.add_tensor(new_name, data) + + +###### CONVERSION LOGIC ###### + +def parse_args() -> argparse.Namespace: + parser = argparse.ArgumentParser(description="Convert a huggingface model to a GGML compatible file") + parser.add_argument( + "--vocab-only", action="store_true", + help="extract only the vocab", + ) + parser.add_argument( + "--outfile", type=Path, + help="path to write to; default: based on input", + ) + parser.add_argument( + "--outtype", type=str, choices=["f32", "f16"], default="f16", + help="output format - use f32 for float32, f16 for float16", + ) + parser.add_argument("--bigendian", action="store_true", help="model is executed on big endian machine") + parser.add_argument( + "model", type=Path, + help="directory containing model file", + ) + + return parser.parse_args() + + +args = parse_args() + +dir_model = args.model +if not dir_model.is_dir(): + print(f'Error: {args.model} is not a directory', file=sys.stderr) + sys.exit(1) + +ftype_map = { + "f32": gguf.GGMLQuantizationType.F32, + "f16": gguf.GGMLQuantizationType.F16, +} + +if args.outfile is not None: + fname_out = args.outfile +else: + # output in the same directory as the model by default + fname_out = dir_model / f'ggml-model-{args.outtype}.gguf' + +print(f"Loading model: {dir_model.name}") + +hparams = Model.load_hparams(dir_model) + +model_class = Model.from_model_architecture(hparams["architectures"][0]) +model_instance = model_class(dir_model, ftype_map[args.outtype], fname_out, args.bigendian) + +print("Set model parameters") +model_instance.set_gguf_parameters() + +print("Set model tokenizer") +model_instance.set_vocab() + +if args.vocab_only: + print(f"Exporting model vocab to '{fname_out}'") + model_instance.write_vocab() +else: + print(f"Exporting model to '{fname_out}'") + model_instance.write() + +print(f"Model successfully exported to '{fname_out}'") diff --git a/convert-mpt-hf-to-gguf.py b/convert-mpt-hf-to-gguf.py deleted file mode 100755 index 70d154b3f5c01e..00000000000000 --- a/convert-mpt-hf-to-gguf.py +++ /dev/null @@ -1,227 +0,0 @@ -#!/usr/bin/env python3 -# HF mpt--> gguf conversion - -from __future__ import annotations - -import argparse -import json -import os -import struct -import sys -from pathlib import Path -from typing import Any - -import numpy as np -import torch -from transformers import AutoTokenizer # type: ignore[import] - -if 'NO_LOCAL_GGUF' not in os.environ: - sys.path.insert(1, str(Path(__file__).parent / 'gguf-py' / 'gguf')) -import gguf - - -def count_model_parts(dir_model: Path) -> int: - num_parts = 0 - for filename in os.listdir(dir_model): - if filename.startswith("pytorch_model-"): - num_parts += 1 - - if num_parts > 0: - print("gguf: found " + str(num_parts) + " model parts") - return num_parts - - -def parse_args() -> argparse.Namespace: - parser = argparse.ArgumentParser(description="Convert an MPT model to a GGML compatible file") - parser.add_argument( - "--vocab-only", action="store_true", - help="extract only the vocab", - ) - parser.add_argument( - "--outfile", type=Path, - help="path to write to; default: based on input", - ) - parser.add_argument( - "model", type=Path, - help="directory containing model file, or model file itself (*.bin)", - ) - parser.add_argument( - "ftype", type=int, choices=[0, 1], default=1, nargs='?', - help="output format - use 0 for float32, 1 for float16", - ) - return parser.parse_args() - -args = parse_args() - -dir_model = args.model -ftype = args.ftype -if not dir_model.is_dir(): - print(f'Error: {args.model} is not a directory', file = sys.stderr) - sys.exit(1) - -# possible tensor data types -# ftype == 0 -> float32 -# ftype == 1 -> float16 - -# map from ftype to string -ftype_str = ["f32", "f16"] - -if args.outfile is not None: - fname_out = args.outfile -else: - # output in the same directory as the model by default - fname_out = dir_model / f'ggml-model-{ftype_str[ftype]}.gguf' - -print("gguf: loading model "+dir_model.name) - -with open(dir_model / "config.json", "r", encoding="utf-8") as f: - hparams = json.load(f) - -if hparams["architectures"][0] != "MPTForCausalLM": - print("Model architecture not supported: " + hparams["architectures"][0]) - - sys.exit() - -# get number of model parts -num_parts = count_model_parts(dir_model) - -ARCH=gguf.MODEL_ARCH.MPT -gguf_writer = gguf.GGUFWriter(fname_out, gguf.MODEL_ARCH_NAMES[ARCH]) - -print("gguf: get model metadata") - -block_count = hparams["n_layers"] - -gguf_writer.add_name(dir_model.name) -gguf_writer.add_context_length(hparams["max_seq_len"]) -gguf_writer.add_embedding_length(hparams["d_model"]) -gguf_writer.add_block_count(block_count) -gguf_writer.add_feed_forward_length(4 * hparams["d_model"]) -gguf_writer.add_head_count(hparams["n_heads"]) -if kv_n_heads := hparams["attn_config"].get("kv_n_heads"): - gguf_writer.add_head_count_kv(kv_n_heads) -gguf_writer.add_layer_norm_eps(1e-05) -if hparams["attn_config"]["clip_qkv"] is not None: - gguf_writer.add_clamp_kqv(hparams["attn_config"]["clip_qkv"]) -gguf_writer.add_max_alibi_bias(hparams["attn_config"]["alibi_bias_max"]) - -# TOKENIZATION - -print("gguf: get tokenizer metadata") - -tokens: list[bytearray] = [] -scores: list[float] = [] -toktypes: list[int] = [] - -# gpt2 tokenizer -gguf_writer.add_tokenizer_model("gpt2") - -print("gguf: get gpt2 tokenizer vocab") - -# MPT token embedding tensors have dimension 50432 (hparams["vocab_size"]), but -# there are only 50254 (len(tokenizer.vocab)) tokens in the vocab, presumably to -# accomodate some "reserved" tokens; this is causing problems down the line in -# llama.cpp, so we pad the vocab with dummy tokens: - -vocab_size = hparams["vocab_size"] - -# ref: https://github.com/cmp-nct/ggllm.cpp/blob/master/falcon_convert.py -tokenizer = AutoTokenizer.from_pretrained(dir_model) - -added_vocab = tokenizer.get_added_vocab() -reverse_vocab = {id: encoded_tok for encoded_tok, id in tokenizer.vocab.items()} - -for i in range(vocab_size): - if i not in reverse_vocab: - tokens.append(f"[PAD{i}]") - toktypes.append(gguf.TokenType.USER_DEFINED) - elif reverse_vocab[i] in added_vocab: - tokens.append(reverse_vocab[i]) - if tokenizer.added_tokens_decoder[i].special: - toktypes.append(gguf.TokenType.CONTROL) - else: - toktypes.append(gguf.TokenType.USER_DEFINED) - else: - tokens.append(reverse_vocab[i]) - toktypes.append(gguf.TokenType.NORMAL) - -gguf_writer.add_token_list(tokens) -gguf_writer.add_token_types(toktypes) - -special_vocab = gguf.SpecialVocab(dir_model, load_merges = True, n_vocab = len(tokens)) -special_vocab.add_to_gguf(gguf_writer) - -# TENSORS - -tensor_map = gguf.get_tensor_name_map(ARCH,block_count) - -# tensor info -print("gguf: get tensor metadata") - -if num_parts == 0: - part_names = iter(("pytorch_model.bin",)) -else: - part_names = ( - f"pytorch_model-{n:05}-of-{num_parts:05}.bin" for n in range(1, num_parts + 1) - ) - -for part_name in part_names: - if args.vocab_only: - break - print("gguf: loading model part '" + part_name + "'") - model_part = torch.load(f"{dir_model}/{part_name}", map_location="cpu") - - for name in model_part.keys(): - data = model_part[name] - - old_dtype = data.dtype - - # convert any unsupported data types to float32 - if data.dtype != torch.float16 and data.dtype != torch.float32: - data = data.to(torch.float32) - - data = data.squeeze().numpy() - - # map tensor names - new_name = tensor_map.get_name(name, try_suffixes = (".weight", ".bias")) - if new_name is None: - print("Cannot map tensor '" + name + "'") - continue # for the sake of compatibility with some old published models, don't quit - sys.exit() - - n_dims = len(data.shape) - data_dtype = data.dtype - - # if f32 desired, convert any float16 to float32 - if ftype == 0 and data_dtype == np.float16: - data = data.astype(np.float32) - - # TODO: Why cant we use these float16 as-is? There should be not reason to store float16 as float32 - if ftype == 1 and data_dtype == np.float16 and n_dims == 1: - data = data.astype(np.float32) - - # if f16 desired, convert any float32 2-dim weight tensors to float16 - if ftype == 1 and data_dtype == np.float32 and name.endswith(".weight") and n_dims == 2: - data = data.astype(np.float16) - - print(new_name + ", n_dims = " + str(n_dims) + ", " + str(old_dtype) + " --> " + str(data.dtype)) - - gguf_writer.add_tensor(new_name, data) - - # note: MPT output is tied to (same as) wte in original model; - # for easier implementation in llama.cpp it's duplicated in GGUF, though :/ - if new_name == "token_embd.weight": - gguf_writer.add_tensor("output.weight", data) - -print("gguf: write header") -gguf_writer.write_header_to_file() -print("gguf: write metadata") -gguf_writer.write_kv_data_to_file() -if not args.vocab_only: - print("gguf: write tensors") - gguf_writer.write_tensors_to_file() - -gguf_writer.close() - -print(f"gguf: model successfully exported to '{fname_out}'") -print("") diff --git a/convert-refact-hf-to-gguf.py b/convert-refact-hf-to-gguf.py deleted file mode 100755 index f0cfe84d81c8ba..00000000000000 --- a/convert-refact-hf-to-gguf.py +++ /dev/null @@ -1,272 +0,0 @@ -#!/usr/bin/env python3 -# HF refact--> gguf conversion - -from __future__ import annotations - -import argparse -import json -import os -import sys -from pathlib import Path - -import numpy as np -import torch -from transformers import AutoTokenizer # type: ignore[import] - -if "NO_LOCAL_GGUF" not in os.environ: - sys.path.insert(1, str(Path(__file__).parent / "gguf-py" / "gguf")) -import gguf - -def count_model_parts(dir_model: Path) -> int: - num_parts = 0 - for filename in os.listdir(dir_model): - if filename.startswith("pytorch_model-"): - num_parts += 1 - - if num_parts > 0: - print("gguf: found " + str(num_parts) + " model parts") - return num_parts - - -def parse_args() -> argparse.Namespace: - parser = argparse.ArgumentParser( - description="Convert a Refact model to a GGML compatible file" - ) - parser.add_argument( - "--vocab-only", - action="store_true", - help="extract only the vocab", - ) - parser.add_argument( - "--outfile", - type=Path, - help="path to write to; default: based on input", - ) - parser.add_argument( - "model", - type=Path, - help="directory containing model file, or model file itself (*.bin)", - ) - parser.add_argument( - "ftype", - type=int, - choices=[0, 1], - default=1, - nargs="?", - help="output format - use 0 for float32, 1 for float16", - ) - return parser.parse_args() - - -args = parse_args() - -dir_model = args.model -ftype = args.ftype -if not dir_model.is_dir(): - print(f"Error: {args.model} is not a directory", file=sys.stderr) - sys.exit(1) - -# possible tensor data types -# ftype == 0 -> float32 -# ftype == 1 -> float16 - -# map from ftype to string -ftype_str = ["f32", "f16"] - -if args.outfile is not None: - fname_out = args.outfile -else: - # output in the same directory as the model by default - fname_out = dir_model / f"ggml-model-{ftype_str[ftype]}.gguf" - -print("gguf: loading model " + dir_model.name) - -with open(dir_model / "config.json", "r", encoding="utf-8") as f: - hparams = json.load(f) - -if hparams["architectures"][0] != "GPTRefactForCausalLM": - print("Model architecture not supported: " + hparams["architectures"][0]) - - sys.exit(1) - -# get number of model parts -num_parts = count_model_parts(dir_model) - -ARCH = gguf.MODEL_ARCH.REFACT -gguf_writer = gguf.GGUFWriter(fname_out, gguf.MODEL_ARCH_NAMES[ARCH]) - -print("gguf: get model metadata") - -# Get refact feed forward dimension -hidden_dim = hparams["n_embd"] -inner_dim = 4 * hidden_dim -hidden_dim = int(2 * inner_dim / 3) -multiple_of = 256 -ff_dim = multiple_of * ((hidden_dim + multiple_of - 1) // multiple_of) - -block_count = hparams["n_layer"] - -gguf_writer.add_name("Refact") -# refact uses Alibi. So this is from config.json which might be used by training. -gguf_writer.add_context_length(hparams["n_positions"]) -gguf_writer.add_embedding_length(hparams["n_embd"]) - -gguf_writer.add_feed_forward_length(ff_dim) -gguf_writer.add_block_count(block_count) -gguf_writer.add_head_count(hparams["n_head"]) -gguf_writer.add_head_count_kv(1) -gguf_writer.add_layer_norm_rms_eps(hparams["layer_norm_epsilon"]) -gguf_writer.add_file_type(ftype) - -# TOKENIZATION - -print("gguf: get tokenizer metadata") - -tokens: list[bytearray] = [] -scores: list[float] = [] -toktypes: list[int] = [] - -# gpt2 tokenizer -gguf_writer.add_tokenizer_model("gpt2") - -print("gguf: get gpt2 tokenizer vocab") - -# ref: https://github.com/cmp-nct/ggllm.cpp/blob/master/falcon_convert.py -tokenizer = AutoTokenizer.from_pretrained(dir_model) - -# The number of tokens in tokenizer.json can differ from the expected vocab size. -# This causes downstream issues with mismatched tensor sizes when running the inference -vocab_size = hparams.get("vocab_size", len(tokenizer.vocab)) -assert max(tokenizer.vocab.values()) < vocab_size - -added_vocab = tokenizer.get_added_vocab() -reverse_vocab = {id: encoded_tok for encoded_tok, id in tokenizer.vocab.items()} - -for i in range(vocab_size): - if i not in reverse_vocab: - tokens.append(f"[PAD{i}]") - toktypes.append(gguf.TokenType.USER_DEFINED) - elif reverse_vocab[i] in added_vocab: - tokens.append(reverse_vocab[i]) - if tokenizer.added_tokens_decoder[i].special: - toktypes.append(gguf.TokenType.CONTROL) - else: - toktypes.append(gguf.TokenType.USER_DEFINED) - else: - tokens.append(reverse_vocab[i]) - toktypes.append(gguf.TokenType.NORMAL) - -gguf_writer.add_token_list(tokens) -gguf_writer.add_token_types(toktypes) - -special_vocab = gguf.SpecialVocab(dir_model, load_merges=True, n_vocab = len(tokens)) -special_vocab.add_to_gguf(gguf_writer) - -# TENSORS - -tensor_map = gguf.get_tensor_name_map(ARCH, block_count) - -# params for qkv transform -n_head = hparams["n_head"] -n_head_kv = 1 - -head_dim = hparams["n_embd"] // n_head - -# tensor info -print("gguf: get tensor metadata") - -if num_parts == 0: - part_names = iter(("pytorch_model.bin",)) -else: - part_names = ( - f"pytorch_model-{n:05}-of-{num_parts:05}.bin" for n in range(1, num_parts + 1) - ) -for part_name in part_names: - if args.vocab_only: - break - print("gguf: loading model part '" + part_name + "'") - model_part = torch.load(dir_model / part_name, map_location="cpu") - - for i in range(block_count): - if f"transformer.h.{i}.attn.kv.weight" in model_part: - data = model_part[f"transformer.h.{i}.attn.kv.weight"] - model_part[f"model.layers.{i}.self_attn.k_proj.weight"] = data[ - : n_head_kv * head_dim - ] - model_part[f"model.layers.{i}.self_attn.v_proj.weight"] = data[ - n_head_kv * head_dim : - ] - del model_part[f"transformer.h.{i}.attn.kv.weight"] - if f"transformer.h.{i}.attn.q.weight" in model_part: - model_part[f"model.layers.{i}.self_attn.q_proj.weight"] = model_part[ - f"transformer.h.{i}.attn.q.weight" - ] - del model_part[f"transformer.h.{i}.attn.q.weight"] - if f"transformer.h.{i}.mlp.gate_up_proj.weight" in model_part: - data = model_part[f"transformer.h.{i}.mlp.gate_up_proj.weight"] - model_part[f"model.layers.{i}.mlp.gate_proj.weight"] = data[:ff_dim] - model_part[f"model.layers.{i}.mlp.up_proj.weight"] = data[ff_dim:] - del model_part[f"transformer.h.{i}.mlp.gate_up_proj.weight"] - - for name in model_part.keys(): - data = model_part[name] - - old_dtype = data.dtype - - # convert any unsupported data types to float32 - if data.dtype != torch.float16 and data.dtype != torch.float32: - data = data.to(torch.float32) - - data = data.squeeze().numpy() - - # map tensor names - new_name = tensor_map.get_name(name, try_suffixes=(".weight",)) - if new_name is None: - print("Can not map tensor '" + name + "'") - sys.exit() - - n_dims = len(data.shape) - data_dtype = data.dtype - - # if f32 desired, convert any float16 to float32 - if ftype == 0 and data_dtype == np.float16: - data = data.astype(np.float32) - - # TODO: Why cant we use these float16 as-is? There should be not reason to store float16 as float32 - if ftype == 1 and data_dtype == np.float16 and n_dims == 1: - data = data.astype(np.float32) - - # if f16 desired, convert any float32 2-dim weight tensors to float16 - if ( - ftype == 1 - and data_dtype == np.float32 - and name.endswith(".weight") - and n_dims == 2 - ): - data = data.astype(np.float16) - - print( - new_name - + ", n_dims = " - + str(n_dims) - + ", " - + str(old_dtype) - + " --> " - + str(data.dtype) - ) - - gguf_writer.add_tensor(new_name, data) - - -print("gguf: write header") -gguf_writer.write_header_to_file() -print("gguf: write metadata") -gguf_writer.write_kv_data_to_file() -if not args.vocab_only: - print("gguf: write tensors") - gguf_writer.write_tensors_to_file() - -gguf_writer.close() - -print(f"gguf: model successfully exported to '{fname_out}'") -print("") diff --git a/convert-starcoder-hf-to-gguf.py b/convert-starcoder-hf-to-gguf.py deleted file mode 100755 index a9bfed85e31bab..00000000000000 --- a/convert-starcoder-hf-to-gguf.py +++ /dev/null @@ -1,210 +0,0 @@ -#!/usr/bin/env python3 -# HF starcoder --> gguf conversion - -from __future__ import annotations - -import argparse -import json -import os -import struct -import sys -from pathlib import Path -from typing import Any - -import numpy as np -import torch -from transformers import AutoTokenizer # type: ignore[import] - -if 'NO_LOCAL_GGUF' not in os.environ: - sys.path.insert(1, str(Path(__file__).parent / 'gguf-py' / 'gguf')) -import gguf - - -def count_model_parts(dir_model: Path) -> int: - num_parts = 0 - for filename in os.listdir(dir_model): - if filename.startswith("pytorch_model-"): - num_parts += 1 - - if num_parts > 0: - print("gguf: found " + str(num_parts) + " model parts") - return num_parts - - -def parse_args() -> argparse.Namespace: - parser = argparse.ArgumentParser(description="Convert a StarCoder model to a GGML compatible file") - parser.add_argument("--vocab-only", action="store_true", help="extract only the vocab") - parser.add_argument("--outfile", type=Path, help="path to write to; default: based on input") - parser.add_argument("model", type=Path, help="directory containing model file, or model file itself (*.bin)") - parser.add_argument("ftype", type=int, help="output format - use 0 for float32, 1 for float16", choices=[0, 1], default = 1) - return parser.parse_args() - -args = parse_args() - -dir_model = args.model -ftype = args.ftype -if not dir_model.is_dir(): - print(f'Error: {args.model} is not a directory', file = sys.stderr) - sys.exit(1) - -# possible tensor data types -# ftype == 0 -> float32 -# ftype == 1 -> float16 - -# map from ftype to string -ftype_str = ["f32", "f16"] - -if args.outfile is not None: - fname_out = args.outfile -else: - # output in the same directory as the model by default - fname_out = dir_model / f'ggml-model-{ftype_str[ftype]}.gguf' - -print("gguf: loading model "+dir_model.name) - -with open(dir_model / "config.json", "r", encoding="utf-8") as f: - hparams = json.load(f) - -if hparams["architectures"][0] != "GPTBigCodeForCausalLM": - print("Model architecture not supported: " + hparams["architectures"][0]) - - sys.exit(1) - -# get number of model parts -num_parts = count_model_parts(dir_model) - -ARCH=gguf.MODEL_ARCH.STARCODER -gguf_writer = gguf.GGUFWriter(fname_out, gguf.MODEL_ARCH_NAMES[ARCH]) - -print("gguf: get model metadata") - -block_count = hparams["n_layer"] - -gguf_writer.add_name("StarCoder") -gguf_writer.add_context_length(hparams["n_positions"]) -gguf_writer.add_embedding_length(hparams["n_embd"]) -gguf_writer.add_feed_forward_length(4 * hparams["n_embd"]) -gguf_writer.add_block_count(block_count) -gguf_writer.add_head_count(hparams["n_head"]) -gguf_writer.add_head_count_kv(1) -gguf_writer.add_layer_norm_eps(hparams["layer_norm_epsilon"]) -gguf_writer.add_file_type(ftype) - -# TOKENIZATION - -print("gguf: get tokenizer metadata") - -tokens: list[bytearray] = [] -scores: list[float] = [] -toktypes: list[int] = [] - -# gpt2 tokenizer -gguf_writer.add_tokenizer_model("gpt2") - -print("gguf: get gpt2 tokenizer vocab") - -# ref: https://github.com/cmp-nct/ggllm.cpp/blob/master/falcon_convert.py -tokenizer = AutoTokenizer.from_pretrained(dir_model) - -# The number of tokens in tokenizer.json can differ from the expected vocab size. -# This causes downstream issues with mismatched tensor sizes when running the inference -vocab_size = hparams.get("vocab_size", len(tokenizer.vocab)) -assert max(tokenizer.vocab.values()) < vocab_size - -added_vocab = tokenizer.get_added_vocab() -reverse_vocab = {id: encoded_tok for encoded_tok, id in tokenizer.vocab.items()} - -for i in range(vocab_size): - if i not in reverse_vocab: - tokens.append(f"[PAD{i}]") - toktypes.append(gguf.TokenType.USER_DEFINED) - elif reverse_vocab[i] in added_vocab: - tokens.append(reverse_vocab[i]) - if tokenizer.added_tokens_decoder[i].special: - toktypes.append(gguf.TokenType.CONTROL) - else: - toktypes.append(gguf.TokenType.USER_DEFINED) - else: - tokens.append(reverse_vocab[i]) - toktypes.append(gguf.TokenType.NORMAL) - -gguf_writer.add_token_list(tokens) -gguf_writer.add_token_types(toktypes) -special_vocab = gguf.SpecialVocab(dir_model, load_merges = True, n_vocab = len(tokens)) -special_vocab.add_to_gguf(gguf_writer) - -# TENSORS - -tensor_map = gguf.get_tensor_name_map(ARCH,block_count) - -# params for qkv transform -n_head = hparams["n_head"] -n_head_kv = hparams["n_head_kv"] if "n_head_kv" in hparams else 1 - -head_dim = hparams["n_embd"] // n_head - -# tensor info -print("gguf: get tensor metadata") - -if num_parts == 0: - part_names = iter(("pytorch_model.bin",)) -else: - part_names = ( - f"pytorch_model-{n:05}-of-{num_parts:05}.bin" for n in range(1, num_parts + 1) - ) - -for part_name in part_names: - if args.vocab_only: - break - print("gguf: loading model part '" + part_name + "'") - model_part = torch.load(dir_model / part_name, map_location="cpu") - - for name in model_part.keys(): - data = model_part[name] - - old_dtype = data.dtype - - # convert any unsupported data types to float32 - if data.dtype != torch.float16 and data.dtype != torch.float32: - data = data.to(torch.float32) - - data = data.squeeze().numpy() - - # map tensor names - new_name = tensor_map.get_name(name, try_suffixes = (".weight", ".bias")) - if new_name is None: - print("Can not map tensor '" + name + "'") - sys.exit() - - n_dims = len(data.shape) - data_dtype = data.dtype - - # if f32 desired, convert any float16 to float32 - if ftype == 0 and data_dtype == np.float16: - data = data.astype(np.float32) - - # TODO: Why cant we use these float16 as-is? There should be not reason to store float16 as float32 - if ftype == 1 and data_dtype == np.float16 and n_dims == 1: - data = data.astype(np.float32) - - # if f16 desired, convert any float32 2-dim weight tensors to float16 - if ftype == 1 and data_dtype == np.float32 and name.endswith(".weight") and n_dims == 2: - data = data.astype(np.float16) - - print(name, "=>", new_name + ", shape = " + str(data.shape) + ", " + str(old_dtype) + " --> " + str(data.dtype)) - - gguf_writer.add_tensor(new_name, data) - - -print("gguf: write header") -gguf_writer.write_header_to_file() -print("gguf: write metadata") -gguf_writer.write_kv_data_to_file() -if not args.vocab_only: - print("gguf: write tensors") - gguf_writer.write_tensors_to_file() - -gguf_writer.close() - -print(f"gguf: model successfully exported to '{fname_out}'") -print("") diff --git a/convert.py b/convert.py index 9110f15806c6bc..b0f44dbef8332a 100755 --- a/convert.py +++ b/convert.py @@ -26,7 +26,7 @@ from typing import IO, TYPE_CHECKING, Any, Callable, Generator, Iterable, Literal, Sequence, TypeVar import numpy as np -from sentencepiece import SentencePieceProcessor # type: ignore[import] +from sentencepiece import SentencePieceProcessor import os if 'NO_LOCAL_GGUF' not in os.environ: @@ -328,7 +328,7 @@ def __init__(self, fname_tokenizer: Path, fname_added_tokens: Path | None) -> No def bpe_tokens(self) -> Iterable[tuple[bytes, float, gguf.TokenType]]: tokenizer = self.bpe_tokenizer - from transformers.models.gpt2 import tokenization_gpt2 # type: ignore[import] + from transformers.models.gpt2 import tokenization_gpt2 reverse_vocab = {id: encoded_tok for encoded_tok, id in tokenizer.items()} for i, _ in enumerate(tokenizer): diff --git a/mypy.ini b/mypy.ini index 55c168f2d7d127..7215a05dd2516d 100644 --- a/mypy.ini +++ b/mypy.ini @@ -3,3 +3,4 @@ strict = true allow_untyped_calls = true allow_untyped_defs = true allow_incomplete_defs = true +disable_error_code = import-untyped From df9d1293defe783f42bc83af732d3c670552c541 Mon Sep 17 00:00:00 2001 From: Galunid Date: Fri, 10 Nov 2023 14:24:54 +0100 Subject: [PATCH 080/206] Unbreak persimmon after #3837 (#4010) --- llama.cpp | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/llama.cpp b/llama.cpp index d220ff3e9b130c..d682d2864d2836 100644 --- a/llama.cpp +++ b/llama.cpp @@ -4209,7 +4209,7 @@ struct llm_build_context { struct ggml_tensor * Kcur = ggml_concat(ctx0, krotated, kpass); cb(Kcur, "Kcur", il); - struct ggml_tensor * Q = ggml_cont(ctx0, ggml_permute(ctx0, Qcur, 1, 2, 0, 3)); + struct ggml_tensor * Q = ggml_cont(ctx0, ggml_permute(ctx0, Qcur, 2, 1, 0, 3)); cb(Q, "Q", il); Kcur = ggml_cont(ctx0, ggml_permute(ctx0, Kcur, 2, 1, 0, 3)); From 4a4fd3eefad5bd17ab6bcd8e2181b4f62eae76cf Mon Sep 17 00:00:00 2001 From: Jhen-Jie Hong Date: Sat, 11 Nov 2023 06:49:33 +0800 Subject: [PATCH 081/206] server : allow continue edit on completion mode (#3950) * server : allow continue edit on completion mode * server : handle abort case in runCompletion * server : style improvement --- examples/server/index.html.hpp | 4707 +++++++++++++++-------------- examples/server/public/index.html | 38 +- 2 files changed, 2418 insertions(+), 2327 deletions(-) diff --git a/examples/server/index.html.hpp b/examples/server/index.html.hpp index 207412513ae71a..f22b77e7f7c48a 100644 --- a/examples/server/index.html.hpp +++ b/examples/server/index.html.hpp @@ -229,850 +229,860 @@ unsigned char index_html[] = { 0x70, 0x72, 0x6f, 0x6d, 0x70, 0x74, 0x5d, 0x20, 0x7b, 0x0a, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x68, 0x65, 0x69, 0x67, 0x68, 0x74, 0x3a, 0x20, 0x31, 0x30, 0x65, 0x6d, 0x3b, 0x0a, 0x20, 0x20, 0x20, 0x20, 0x7d, 0x0a, - 0x0a, 0x0a, 0x20, 0x20, 0x20, 0x20, 0x40, 0x6b, 0x65, 0x79, 0x66, 0x72, - 0x61, 0x6d, 0x65, 0x73, 0x20, 0x6c, 0x6f, 0x61, 0x64, 0x69, 0x6e, 0x67, - 0x2d, 0x62, 0x67, 0x2d, 0x77, 0x69, 0x70, 0x65, 0x20, 0x7b, 0x0a, 0x20, - 0x20, 0x20, 0x20, 0x20, 0x20, 0x30, 0x25, 0x20, 0x7b, 0x0a, 0x20, 0x20, - 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x62, 0x61, 0x63, 0x6b, 0x67, 0x72, - 0x6f, 0x75, 0x6e, 0x64, 0x2d, 0x70, 0x6f, 0x73, 0x69, 0x74, 0x69, 0x6f, - 0x6e, 0x3a, 0x20, 0x30, 0x25, 0x3b, 0x0a, 0x20, 0x20, 0x20, 0x20, 0x20, - 0x20, 0x7d, 0x0a, 0x0a, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x31, 0x30, - 0x30, 0x25, 0x20, 0x7b, 0x0a, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, - 0x20, 0x62, 0x61, 0x63, 0x6b, 0x67, 0x72, 0x6f, 0x75, 0x6e, 0x64, 0x2d, - 0x70, 0x6f, 0x73, 0x69, 0x74, 0x69, 0x6f, 0x6e, 0x3a, 0x20, 0x31, 0x30, - 0x30, 0x25, 0x3b, 0x0a, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x7d, 0x0a, - 0x20, 0x20, 0x20, 0x20, 0x7d, 0x0a, 0x0a, 0x20, 0x20, 0x20, 0x20, 0x2e, - 0x6c, 0x6f, 0x61, 0x64, 0x69, 0x6e, 0x67, 0x20, 0x7b, 0x0a, 0x20, 0x20, - 0x20, 0x20, 0x20, 0x20, 0x2d, 0x2d, 0x6c, 0x6f, 0x61, 0x64, 0x69, 0x6e, - 0x67, 0x2d, 0x63, 0x6f, 0x6c, 0x6f, 0x72, 0x2d, 0x31, 0x3a, 0x20, 0x23, - 0x65, 0x65, 0x65, 0x65, 0x65, 0x65, 0x30, 0x30, 0x3b, 0x0a, 0x20, 0x20, - 0x20, 0x20, 0x20, 0x20, 0x2d, 0x2d, 0x6c, 0x6f, 0x61, 0x64, 0x69, 0x6e, - 0x67, 0x2d, 0x63, 0x6f, 0x6c, 0x6f, 0x72, 0x2d, 0x32, 0x3a, 0x20, 0x23, - 0x65, 0x65, 0x65, 0x65, 0x65, 0x65, 0x66, 0x66, 0x3b, 0x0a, 0x20, 0x20, - 0x20, 0x20, 0x20, 0x20, 0x62, 0x61, 0x63, 0x6b, 0x67, 0x72, 0x6f, 0x75, - 0x6e, 0x64, 0x2d, 0x73, 0x69, 0x7a, 0x65, 0x3a, 0x20, 0x35, 0x30, 0x25, - 0x20, 0x31, 0x30, 0x30, 0x25, 0x3b, 0x0a, 0x20, 0x20, 0x20, 0x20, 0x20, - 0x20, 0x62, 0x61, 0x63, 0x6b, 0x67, 0x72, 0x6f, 0x75, 0x6e, 0x64, 0x2d, - 0x69, 0x6d, 0x61, 0x67, 0x65, 0x3a, 0x20, 0x6c, 0x69, 0x6e, 0x65, 0x61, - 0x72, 0x2d, 0x67, 0x72, 0x61, 0x64, 0x69, 0x65, 0x6e, 0x74, 0x28, 0x39, - 0x30, 0x64, 0x65, 0x67, 0x2c, 0x20, 0x76, 0x61, 0x72, 0x28, 0x2d, 0x2d, + 0x0a, 0x20, 0x20, 0x20, 0x20, 0x5b, 0x63, 0x6f, 0x6e, 0x74, 0x65, 0x6e, + 0x74, 0x65, 0x64, 0x69, 0x74, 0x61, 0x62, 0x6c, 0x65, 0x5d, 0x20, 0x7b, + 0x0a, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x64, 0x69, 0x73, 0x70, 0x6c, + 0x61, 0x79, 0x3a, 0x20, 0x69, 0x6e, 0x6c, 0x69, 0x6e, 0x65, 0x2d, 0x62, + 0x6c, 0x6f, 0x63, 0x6b, 0x3b, 0x0a, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, + 0x77, 0x68, 0x69, 0x74, 0x65, 0x2d, 0x73, 0x70, 0x61, 0x63, 0x65, 0x3a, + 0x20, 0x70, 0x72, 0x65, 0x2d, 0x77, 0x72, 0x61, 0x70, 0x3b, 0x0a, 0x20, + 0x20, 0x20, 0x20, 0x20, 0x20, 0x6f, 0x75, 0x74, 0x6c, 0x69, 0x6e, 0x65, + 0x3a, 0x20, 0x30, 0x70, 0x78, 0x20, 0x73, 0x6f, 0x6c, 0x69, 0x64, 0x20, + 0x74, 0x72, 0x61, 0x6e, 0x73, 0x70, 0x61, 0x72, 0x65, 0x6e, 0x74, 0x3b, + 0x0a, 0x20, 0x20, 0x20, 0x20, 0x7d, 0x0a, 0x0a, 0x20, 0x20, 0x20, 0x20, + 0x40, 0x6b, 0x65, 0x79, 0x66, 0x72, 0x61, 0x6d, 0x65, 0x73, 0x20, 0x6c, + 0x6f, 0x61, 0x64, 0x69, 0x6e, 0x67, 0x2d, 0x62, 0x67, 0x2d, 0x77, 0x69, + 0x70, 0x65, 0x20, 0x7b, 0x0a, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x30, + 0x25, 0x20, 0x7b, 0x0a, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, + 0x62, 0x61, 0x63, 0x6b, 0x67, 0x72, 0x6f, 0x75, 0x6e, 0x64, 0x2d, 0x70, + 0x6f, 0x73, 0x69, 0x74, 0x69, 0x6f, 0x6e, 0x3a, 0x20, 0x30, 0x25, 0x3b, + 0x0a, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x7d, 0x0a, 0x0a, 0x20, 0x20, + 0x20, 0x20, 0x20, 0x20, 0x31, 0x30, 0x30, 0x25, 0x20, 0x7b, 0x0a, 0x20, + 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x62, 0x61, 0x63, 0x6b, 0x67, + 0x72, 0x6f, 0x75, 0x6e, 0x64, 0x2d, 0x70, 0x6f, 0x73, 0x69, 0x74, 0x69, + 0x6f, 0x6e, 0x3a, 0x20, 0x31, 0x30, 0x30, 0x25, 0x3b, 0x0a, 0x20, 0x20, + 0x20, 0x20, 0x20, 0x20, 0x7d, 0x0a, 0x20, 0x20, 0x20, 0x20, 0x7d, 0x0a, + 0x0a, 0x20, 0x20, 0x20, 0x20, 0x2e, 0x6c, 0x6f, 0x61, 0x64, 0x69, 0x6e, + 0x67, 0x20, 0x7b, 0x0a, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x2d, 0x2d, 0x6c, 0x6f, 0x61, 0x64, 0x69, 0x6e, 0x67, 0x2d, 0x63, 0x6f, 0x6c, 0x6f, - 0x72, 0x2d, 0x31, 0x29, 0x2c, 0x20, 0x76, 0x61, 0x72, 0x28, 0x2d, 0x2d, + 0x72, 0x2d, 0x31, 0x3a, 0x20, 0x23, 0x65, 0x65, 0x65, 0x65, 0x65, 0x65, + 0x30, 0x30, 0x3b, 0x0a, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x2d, 0x2d, 0x6c, 0x6f, 0x61, 0x64, 0x69, 0x6e, 0x67, 0x2d, 0x63, 0x6f, 0x6c, 0x6f, - 0x72, 0x2d, 0x32, 0x29, 0x2c, 0x20, 0x76, 0x61, 0x72, 0x28, 0x2d, 0x2d, + 0x72, 0x2d, 0x32, 0x3a, 0x20, 0x23, 0x65, 0x65, 0x65, 0x65, 0x65, 0x65, + 0x66, 0x66, 0x3b, 0x0a, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x62, 0x61, + 0x63, 0x6b, 0x67, 0x72, 0x6f, 0x75, 0x6e, 0x64, 0x2d, 0x73, 0x69, 0x7a, + 0x65, 0x3a, 0x20, 0x35, 0x30, 0x25, 0x20, 0x31, 0x30, 0x30, 0x25, 0x3b, + 0x0a, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x62, 0x61, 0x63, 0x6b, 0x67, + 0x72, 0x6f, 0x75, 0x6e, 0x64, 0x2d, 0x69, 0x6d, 0x61, 0x67, 0x65, 0x3a, + 0x20, 0x6c, 0x69, 0x6e, 0x65, 0x61, 0x72, 0x2d, 0x67, 0x72, 0x61, 0x64, + 0x69, 0x65, 0x6e, 0x74, 0x28, 0x39, 0x30, 0x64, 0x65, 0x67, 0x2c, 0x20, + 0x76, 0x61, 0x72, 0x28, 0x2d, 0x2d, 0x6c, 0x6f, 0x61, 0x64, 0x69, 0x6e, + 0x67, 0x2d, 0x63, 0x6f, 0x6c, 0x6f, 0x72, 0x2d, 0x31, 0x29, 0x2c, 0x20, + 0x76, 0x61, 0x72, 0x28, 0x2d, 0x2d, 0x6c, 0x6f, 0x61, 0x64, 0x69, 0x6e, + 0x67, 0x2d, 0x63, 0x6f, 0x6c, 0x6f, 0x72, 0x2d, 0x32, 0x29, 0x2c, 0x20, + 0x76, 0x61, 0x72, 0x28, 0x2d, 0x2d, 0x6c, 0x6f, 0x61, 0x64, 0x69, 0x6e, + 0x67, 0x2d, 0x63, 0x6f, 0x6c, 0x6f, 0x72, 0x2d, 0x31, 0x29, 0x29, 0x3b, + 0x0a, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x61, 0x6e, 0x69, 0x6d, 0x61, + 0x74, 0x69, 0x6f, 0x6e, 0x3a, 0x20, 0x6c, 0x6f, 0x61, 0x64, 0x69, 0x6e, + 0x67, 0x2d, 0x62, 0x67, 0x2d, 0x77, 0x69, 0x70, 0x65, 0x20, 0x32, 0x73, + 0x20, 0x6c, 0x69, 0x6e, 0x65, 0x61, 0x72, 0x20, 0x69, 0x6e, 0x66, 0x69, + 0x6e, 0x69, 0x74, 0x65, 0x3b, 0x0a, 0x20, 0x20, 0x20, 0x20, 0x7d, 0x0a, + 0x0a, 0x20, 0x20, 0x20, 0x20, 0x40, 0x6d, 0x65, 0x64, 0x69, 0x61, 0x20, + 0x28, 0x70, 0x72, 0x65, 0x66, 0x65, 0x72, 0x73, 0x2d, 0x63, 0x6f, 0x6c, + 0x6f, 0x72, 0x2d, 0x73, 0x63, 0x68, 0x65, 0x6d, 0x65, 0x3a, 0x20, 0x64, + 0x61, 0x72, 0x6b, 0x29, 0x20, 0x7b, 0x0a, 0x20, 0x20, 0x20, 0x20, 0x20, + 0x20, 0x2e, 0x6c, 0x6f, 0x61, 0x64, 0x69, 0x6e, 0x67, 0x20, 0x7b, 0x0a, + 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x2d, 0x2d, 0x6c, 0x6f, + 0x61, 0x64, 0x69, 0x6e, 0x67, 0x2d, 0x63, 0x6f, 0x6c, 0x6f, 0x72, 0x2d, + 0x31, 0x3a, 0x20, 0x23, 0x32, 0x32, 0x32, 0x32, 0x32, 0x32, 0x30, 0x30, + 0x3b, 0x0a, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x2d, 0x2d, 0x6c, 0x6f, 0x61, 0x64, 0x69, 0x6e, 0x67, 0x2d, 0x63, 0x6f, 0x6c, 0x6f, - 0x72, 0x2d, 0x31, 0x29, 0x29, 0x3b, 0x0a, 0x20, 0x20, 0x20, 0x20, 0x20, - 0x20, 0x61, 0x6e, 0x69, 0x6d, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x3a, 0x20, - 0x6c, 0x6f, 0x61, 0x64, 0x69, 0x6e, 0x67, 0x2d, 0x62, 0x67, 0x2d, 0x77, - 0x69, 0x70, 0x65, 0x20, 0x32, 0x73, 0x20, 0x6c, 0x69, 0x6e, 0x65, 0x61, - 0x72, 0x20, 0x69, 0x6e, 0x66, 0x69, 0x6e, 0x69, 0x74, 0x65, 0x3b, 0x0a, - 0x20, 0x20, 0x20, 0x20, 0x7d, 0x0a, 0x0a, 0x20, 0x20, 0x20, 0x20, 0x40, - 0x6d, 0x65, 0x64, 0x69, 0x61, 0x20, 0x28, 0x70, 0x72, 0x65, 0x66, 0x65, - 0x72, 0x73, 0x2d, 0x63, 0x6f, 0x6c, 0x6f, 0x72, 0x2d, 0x73, 0x63, 0x68, - 0x65, 0x6d, 0x65, 0x3a, 0x20, 0x64, 0x61, 0x72, 0x6b, 0x29, 0x20, 0x7b, - 0x0a, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x2e, 0x6c, 0x6f, 0x61, 0x64, - 0x69, 0x6e, 0x67, 0x20, 0x7b, 0x0a, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, - 0x20, 0x20, 0x2d, 0x2d, 0x6c, 0x6f, 0x61, 0x64, 0x69, 0x6e, 0x67, 0x2d, - 0x63, 0x6f, 0x6c, 0x6f, 0x72, 0x2d, 0x31, 0x3a, 0x20, 0x23, 0x32, 0x32, - 0x32, 0x32, 0x32, 0x32, 0x30, 0x30, 0x3b, 0x0a, 0x20, 0x20, 0x20, 0x20, - 0x20, 0x20, 0x20, 0x20, 0x2d, 0x2d, 0x6c, 0x6f, 0x61, 0x64, 0x69, 0x6e, - 0x67, 0x2d, 0x63, 0x6f, 0x6c, 0x6f, 0x72, 0x2d, 0x32, 0x3a, 0x20, 0x23, - 0x32, 0x32, 0x32, 0x32, 0x32, 0x32, 0x66, 0x66, 0x3b, 0x0a, 0x20, 0x20, - 0x20, 0x20, 0x20, 0x20, 0x7d, 0x0a, 0x0a, 0x20, 0x20, 0x20, 0x20, 0x20, - 0x20, 0x2e, 0x70, 0x6f, 0x70, 0x6f, 0x76, 0x65, 0x72, 0x2d, 0x63, 0x6f, - 0x6e, 0x74, 0x65, 0x6e, 0x74, 0x20, 0x7b, 0x0a, 0x20, 0x20, 0x20, 0x20, - 0x20, 0x20, 0x20, 0x20, 0x62, 0x61, 0x63, 0x6b, 0x67, 0x72, 0x6f, 0x75, - 0x6e, 0x64, 0x2d, 0x63, 0x6f, 0x6c, 0x6f, 0x72, 0x3a, 0x20, 0x62, 0x6c, - 0x61, 0x63, 0x6b, 0x3b, 0x0a, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x7d, - 0x0a, 0x20, 0x20, 0x20, 0x20, 0x7d, 0x0a, 0x20, 0x20, 0x3c, 0x2f, 0x73, - 0x74, 0x79, 0x6c, 0x65, 0x3e, 0x0a, 0x0a, 0x20, 0x20, 0x3c, 0x73, 0x63, - 0x72, 0x69, 0x70, 0x74, 0x20, 0x74, 0x79, 0x70, 0x65, 0x3d, 0x22, 0x6d, - 0x6f, 0x64, 0x75, 0x6c, 0x65, 0x22, 0x3e, 0x0a, 0x20, 0x20, 0x20, 0x20, - 0x69, 0x6d, 0x70, 0x6f, 0x72, 0x74, 0x20, 0x7b, 0x0a, 0x20, 0x20, 0x20, - 0x20, 0x20, 0x20, 0x68, 0x74, 0x6d, 0x6c, 0x2c, 0x20, 0x68, 0x2c, 0x20, - 0x73, 0x69, 0x67, 0x6e, 0x61, 0x6c, 0x2c, 0x20, 0x65, 0x66, 0x66, 0x65, - 0x63, 0x74, 0x2c, 0x20, 0x63, 0x6f, 0x6d, 0x70, 0x75, 0x74, 0x65, 0x64, - 0x2c, 0x20, 0x72, 0x65, 0x6e, 0x64, 0x65, 0x72, 0x2c, 0x20, 0x75, 0x73, - 0x65, 0x53, 0x69, 0x67, 0x6e, 0x61, 0x6c, 0x2c, 0x20, 0x75, 0x73, 0x65, - 0x45, 0x66, 0x66, 0x65, 0x63, 0x74, 0x2c, 0x20, 0x75, 0x73, 0x65, 0x52, - 0x65, 0x66, 0x2c, 0x20, 0x43, 0x6f, 0x6d, 0x70, 0x6f, 0x6e, 0x65, 0x6e, - 0x74, 0x0a, 0x20, 0x20, 0x20, 0x20, 0x7d, 0x20, 0x66, 0x72, 0x6f, 0x6d, - 0x20, 0x27, 0x2f, 0x69, 0x6e, 0x64, 0x65, 0x78, 0x2e, 0x6a, 0x73, 0x27, - 0x3b, 0x0a, 0x0a, 0x20, 0x20, 0x20, 0x20, 0x69, 0x6d, 0x70, 0x6f, 0x72, - 0x74, 0x20, 0x7b, 0x20, 0x6c, 0x6c, 0x61, 0x6d, 0x61, 0x20, 0x7d, 0x20, - 0x66, 0x72, 0x6f, 0x6d, 0x20, 0x27, 0x2f, 0x63, 0x6f, 0x6d, 0x70, 0x6c, - 0x65, 0x74, 0x69, 0x6f, 0x6e, 0x2e, 0x6a, 0x73, 0x27, 0x3b, 0x0a, 0x20, - 0x20, 0x20, 0x20, 0x69, 0x6d, 0x70, 0x6f, 0x72, 0x74, 0x20, 0x7b, 0x20, - 0x53, 0x63, 0x68, 0x65, 0x6d, 0x61, 0x43, 0x6f, 0x6e, 0x76, 0x65, 0x72, - 0x74, 0x65, 0x72, 0x20, 0x7d, 0x20, 0x66, 0x72, 0x6f, 0x6d, 0x20, 0x27, - 0x2f, 0x6a, 0x73, 0x6f, 0x6e, 0x2d, 0x73, 0x63, 0x68, 0x65, 0x6d, 0x61, - 0x2d, 0x74, 0x6f, 0x2d, 0x67, 0x72, 0x61, 0x6d, 0x6d, 0x61, 0x72, 0x2e, - 0x6d, 0x6a, 0x73, 0x27, 0x3b, 0x0a, 0x20, 0x20, 0x20, 0x20, 0x6c, 0x65, - 0x74, 0x20, 0x73, 0x65, 0x6c, 0x65, 0x63, 0x74, 0x65, 0x64, 0x5f, 0x69, - 0x6d, 0x61, 0x67, 0x65, 0x20, 0x3d, 0x20, 0x66, 0x61, 0x6c, 0x73, 0x65, - 0x3b, 0x0a, 0x20, 0x20, 0x20, 0x20, 0x76, 0x61, 0x72, 0x20, 0x73, 0x6c, - 0x6f, 0x74, 0x5f, 0x69, 0x64, 0x20, 0x3d, 0x20, 0x2d, 0x31, 0x3b, 0x0a, - 0x0a, 0x20, 0x20, 0x20, 0x20, 0x63, 0x6f, 0x6e, 0x73, 0x74, 0x20, 0x73, - 0x65, 0x73, 0x73, 0x69, 0x6f, 0x6e, 0x20, 0x3d, 0x20, 0x73, 0x69, 0x67, - 0x6e, 0x61, 0x6c, 0x28, 0x7b, 0x0a, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, - 0x70, 0x72, 0x6f, 0x6d, 0x70, 0x74, 0x3a, 0x20, 0x22, 0x54, 0x68, 0x69, - 0x73, 0x20, 0x69, 0x73, 0x20, 0x61, 0x20, 0x63, 0x6f, 0x6e, 0x76, 0x65, - 0x72, 0x73, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x20, 0x62, 0x65, 0x74, 0x77, - 0x65, 0x65, 0x6e, 0x20, 0x55, 0x73, 0x65, 0x72, 0x20, 0x61, 0x6e, 0x64, - 0x20, 0x4c, 0x6c, 0x61, 0x6d, 0x61, 0x2c, 0x20, 0x61, 0x20, 0x66, 0x72, - 0x69, 0x65, 0x6e, 0x64, 0x6c, 0x79, 0x20, 0x63, 0x68, 0x61, 0x74, 0x62, - 0x6f, 0x74, 0x2e, 0x20, 0x4c, 0x6c, 0x61, 0x6d, 0x61, 0x20, 0x69, 0x73, - 0x20, 0x68, 0x65, 0x6c, 0x70, 0x66, 0x75, 0x6c, 0x2c, 0x20, 0x6b, 0x69, - 0x6e, 0x64, 0x2c, 0x20, 0x68, 0x6f, 0x6e, 0x65, 0x73, 0x74, 0x2c, 0x20, - 0x67, 0x6f, 0x6f, 0x64, 0x20, 0x61, 0x74, 0x20, 0x77, 0x72, 0x69, 0x74, - 0x69, 0x6e, 0x67, 0x2c, 0x20, 0x61, 0x6e, 0x64, 0x20, 0x6e, 0x65, 0x76, - 0x65, 0x72, 0x20, 0x66, 0x61, 0x69, 0x6c, 0x73, 0x20, 0x74, 0x6f, 0x20, - 0x61, 0x6e, 0x73, 0x77, 0x65, 0x72, 0x20, 0x61, 0x6e, 0x79, 0x20, 0x72, - 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x73, 0x20, 0x69, 0x6d, 0x6d, 0x65, - 0x64, 0x69, 0x61, 0x74, 0x65, 0x6c, 0x79, 0x20, 0x61, 0x6e, 0x64, 0x20, - 0x77, 0x69, 0x74, 0x68, 0x20, 0x70, 0x72, 0x65, 0x63, 0x69, 0x73, 0x69, - 0x6f, 0x6e, 0x2e, 0x22, 0x2c, 0x0a, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, - 0x74, 0x65, 0x6d, 0x70, 0x6c, 0x61, 0x74, 0x65, 0x3a, 0x20, 0x22, 0x7b, - 0x7b, 0x70, 0x72, 0x6f, 0x6d, 0x70, 0x74, 0x7d, 0x7d, 0x5c, 0x6e, 0x5c, - 0x6e, 0x7b, 0x7b, 0x68, 0x69, 0x73, 0x74, 0x6f, 0x72, 0x79, 0x7d, 0x7d, - 0x5c, 0x6e, 0x7b, 0x7b, 0x63, 0x68, 0x61, 0x72, 0x7d, 0x7d, 0x3a, 0x22, - 0x2c, 0x0a, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x68, 0x69, 0x73, 0x74, - 0x6f, 0x72, 0x79, 0x54, 0x65, 0x6d, 0x70, 0x6c, 0x61, 0x74, 0x65, 0x3a, - 0x20, 0x22, 0x7b, 0x7b, 0x6e, 0x61, 0x6d, 0x65, 0x7d, 0x7d, 0x3a, 0x20, - 0x7b, 0x7b, 0x6d, 0x65, 0x73, 0x73, 0x61, 0x67, 0x65, 0x7d, 0x7d, 0x22, - 0x2c, 0x0a, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x74, 0x72, 0x61, 0x6e, - 0x73, 0x63, 0x72, 0x69, 0x70, 0x74, 0x3a, 0x20, 0x5b, 0x5d, 0x2c, 0x0a, - 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x74, 0x79, 0x70, 0x65, 0x3a, 0x20, - 0x22, 0x63, 0x68, 0x61, 0x74, 0x22, 0x2c, 0x20, 0x20, 0x2f, 0x2f, 0x20, - 0x22, 0x63, 0x68, 0x61, 0x74, 0x22, 0x20, 0x7c, 0x20, 0x22, 0x63, 0x6f, - 0x6d, 0x70, 0x6c, 0x65, 0x74, 0x69, 0x6f, 0x6e, 0x22, 0x0a, 0x20, 0x20, - 0x20, 0x20, 0x20, 0x20, 0x63, 0x68, 0x61, 0x72, 0x3a, 0x20, 0x22, 0x4c, - 0x6c, 0x61, 0x6d, 0x61, 0x22, 0x2c, 0x0a, 0x20, 0x20, 0x20, 0x20, 0x20, - 0x20, 0x75, 0x73, 0x65, 0x72, 0x3a, 0x20, 0x22, 0x55, 0x73, 0x65, 0x72, - 0x22, 0x2c, 0x0a, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x69, 0x6d, 0x61, - 0x67, 0x65, 0x5f, 0x73, 0x65, 0x6c, 0x65, 0x63, 0x74, 0x65, 0x64, 0x3a, - 0x20, 0x27, 0x27, 0x0a, 0x20, 0x20, 0x20, 0x20, 0x7d, 0x29, 0x0a, 0x0a, - 0x20, 0x20, 0x20, 0x20, 0x63, 0x6f, 0x6e, 0x73, 0x74, 0x20, 0x70, 0x61, - 0x72, 0x61, 0x6d, 0x73, 0x20, 0x3d, 0x20, 0x73, 0x69, 0x67, 0x6e, 0x61, - 0x6c, 0x28, 0x7b, 0x0a, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x6e, 0x5f, - 0x70, 0x72, 0x65, 0x64, 0x69, 0x63, 0x74, 0x3a, 0x20, 0x34, 0x30, 0x30, - 0x2c, 0x0a, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x74, 0x65, 0x6d, 0x70, - 0x65, 0x72, 0x61, 0x74, 0x75, 0x72, 0x65, 0x3a, 0x20, 0x30, 0x2e, 0x37, - 0x2c, 0x0a, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x72, 0x65, 0x70, 0x65, - 0x61, 0x74, 0x5f, 0x6c, 0x61, 0x73, 0x74, 0x5f, 0x6e, 0x3a, 0x20, 0x32, - 0x35, 0x36, 0x2c, 0x20, 0x2f, 0x2f, 0x20, 0x30, 0x20, 0x3d, 0x20, 0x64, - 0x69, 0x73, 0x61, 0x62, 0x6c, 0x65, 0x20, 0x70, 0x65, 0x6e, 0x61, 0x6c, - 0x74, 0x79, 0x2c, 0x20, 0x2d, 0x31, 0x20, 0x3d, 0x20, 0x63, 0x6f, 0x6e, - 0x74, 0x65, 0x78, 0x74, 0x20, 0x73, 0x69, 0x7a, 0x65, 0x0a, 0x20, 0x20, - 0x20, 0x20, 0x20, 0x20, 0x72, 0x65, 0x70, 0x65, 0x61, 0x74, 0x5f, 0x70, - 0x65, 0x6e, 0x61, 0x6c, 0x74, 0x79, 0x3a, 0x20, 0x31, 0x2e, 0x31, 0x38, - 0x2c, 0x20, 0x2f, 0x2f, 0x20, 0x31, 0x2e, 0x30, 0x20, 0x3d, 0x20, 0x64, - 0x69, 0x73, 0x61, 0x62, 0x6c, 0x65, 0x64, 0x0a, 0x20, 0x20, 0x20, 0x20, - 0x20, 0x20, 0x74, 0x6f, 0x70, 0x5f, 0x6b, 0x3a, 0x20, 0x34, 0x30, 0x2c, - 0x20, 0x2f, 0x2f, 0x20, 0x3c, 0x3d, 0x20, 0x30, 0x20, 0x74, 0x6f, 0x20, - 0x75, 0x73, 0x65, 0x20, 0x76, 0x6f, 0x63, 0x61, 0x62, 0x20, 0x73, 0x69, - 0x7a, 0x65, 0x0a, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x74, 0x6f, 0x70, - 0x5f, 0x70, 0x3a, 0x20, 0x30, 0x2e, 0x35, 0x2c, 0x20, 0x2f, 0x2f, 0x20, - 0x31, 0x2e, 0x30, 0x20, 0x3d, 0x20, 0x64, 0x69, 0x73, 0x61, 0x62, 0x6c, - 0x65, 0x64, 0x0a, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x6d, 0x69, 0x6e, - 0x5f, 0x70, 0x3a, 0x20, 0x30, 0x2e, 0x30, 0x35, 0x2c, 0x20, 0x2f, 0x2f, + 0x72, 0x2d, 0x32, 0x3a, 0x20, 0x23, 0x32, 0x32, 0x32, 0x32, 0x32, 0x32, + 0x66, 0x66, 0x3b, 0x0a, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x7d, 0x0a, + 0x0a, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x2e, 0x70, 0x6f, 0x70, 0x6f, + 0x76, 0x65, 0x72, 0x2d, 0x63, 0x6f, 0x6e, 0x74, 0x65, 0x6e, 0x74, 0x20, + 0x7b, 0x0a, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x62, 0x61, + 0x63, 0x6b, 0x67, 0x72, 0x6f, 0x75, 0x6e, 0x64, 0x2d, 0x63, 0x6f, 0x6c, + 0x6f, 0x72, 0x3a, 0x20, 0x62, 0x6c, 0x61, 0x63, 0x6b, 0x3b, 0x0a, 0x20, + 0x20, 0x20, 0x20, 0x20, 0x20, 0x7d, 0x0a, 0x20, 0x20, 0x20, 0x20, 0x7d, + 0x0a, 0x20, 0x20, 0x3c, 0x2f, 0x73, 0x74, 0x79, 0x6c, 0x65, 0x3e, 0x0a, + 0x0a, 0x20, 0x20, 0x3c, 0x73, 0x63, 0x72, 0x69, 0x70, 0x74, 0x20, 0x74, + 0x79, 0x70, 0x65, 0x3d, 0x22, 0x6d, 0x6f, 0x64, 0x75, 0x6c, 0x65, 0x22, + 0x3e, 0x0a, 0x20, 0x20, 0x20, 0x20, 0x69, 0x6d, 0x70, 0x6f, 0x72, 0x74, + 0x20, 0x7b, 0x0a, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x68, 0x74, 0x6d, + 0x6c, 0x2c, 0x20, 0x68, 0x2c, 0x20, 0x73, 0x69, 0x67, 0x6e, 0x61, 0x6c, + 0x2c, 0x20, 0x65, 0x66, 0x66, 0x65, 0x63, 0x74, 0x2c, 0x20, 0x63, 0x6f, + 0x6d, 0x70, 0x75, 0x74, 0x65, 0x64, 0x2c, 0x20, 0x72, 0x65, 0x6e, 0x64, + 0x65, 0x72, 0x2c, 0x20, 0x75, 0x73, 0x65, 0x53, 0x69, 0x67, 0x6e, 0x61, + 0x6c, 0x2c, 0x20, 0x75, 0x73, 0x65, 0x45, 0x66, 0x66, 0x65, 0x63, 0x74, + 0x2c, 0x20, 0x75, 0x73, 0x65, 0x52, 0x65, 0x66, 0x2c, 0x20, 0x43, 0x6f, + 0x6d, 0x70, 0x6f, 0x6e, 0x65, 0x6e, 0x74, 0x0a, 0x20, 0x20, 0x20, 0x20, + 0x7d, 0x20, 0x66, 0x72, 0x6f, 0x6d, 0x20, 0x27, 0x2f, 0x69, 0x6e, 0x64, + 0x65, 0x78, 0x2e, 0x6a, 0x73, 0x27, 0x3b, 0x0a, 0x0a, 0x20, 0x20, 0x20, + 0x20, 0x69, 0x6d, 0x70, 0x6f, 0x72, 0x74, 0x20, 0x7b, 0x20, 0x6c, 0x6c, + 0x61, 0x6d, 0x61, 0x20, 0x7d, 0x20, 0x66, 0x72, 0x6f, 0x6d, 0x20, 0x27, + 0x2f, 0x63, 0x6f, 0x6d, 0x70, 0x6c, 0x65, 0x74, 0x69, 0x6f, 0x6e, 0x2e, + 0x6a, 0x73, 0x27, 0x3b, 0x0a, 0x20, 0x20, 0x20, 0x20, 0x69, 0x6d, 0x70, + 0x6f, 0x72, 0x74, 0x20, 0x7b, 0x20, 0x53, 0x63, 0x68, 0x65, 0x6d, 0x61, + 0x43, 0x6f, 0x6e, 0x76, 0x65, 0x72, 0x74, 0x65, 0x72, 0x20, 0x7d, 0x20, + 0x66, 0x72, 0x6f, 0x6d, 0x20, 0x27, 0x2f, 0x6a, 0x73, 0x6f, 0x6e, 0x2d, + 0x73, 0x63, 0x68, 0x65, 0x6d, 0x61, 0x2d, 0x74, 0x6f, 0x2d, 0x67, 0x72, + 0x61, 0x6d, 0x6d, 0x61, 0x72, 0x2e, 0x6d, 0x6a, 0x73, 0x27, 0x3b, 0x0a, + 0x20, 0x20, 0x20, 0x20, 0x6c, 0x65, 0x74, 0x20, 0x73, 0x65, 0x6c, 0x65, + 0x63, 0x74, 0x65, 0x64, 0x5f, 0x69, 0x6d, 0x61, 0x67, 0x65, 0x20, 0x3d, + 0x20, 0x66, 0x61, 0x6c, 0x73, 0x65, 0x3b, 0x0a, 0x20, 0x20, 0x20, 0x20, + 0x76, 0x61, 0x72, 0x20, 0x73, 0x6c, 0x6f, 0x74, 0x5f, 0x69, 0x64, 0x20, + 0x3d, 0x20, 0x2d, 0x31, 0x3b, 0x0a, 0x0a, 0x20, 0x20, 0x20, 0x20, 0x63, + 0x6f, 0x6e, 0x73, 0x74, 0x20, 0x73, 0x65, 0x73, 0x73, 0x69, 0x6f, 0x6e, + 0x20, 0x3d, 0x20, 0x73, 0x69, 0x67, 0x6e, 0x61, 0x6c, 0x28, 0x7b, 0x0a, + 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x70, 0x72, 0x6f, 0x6d, 0x70, 0x74, + 0x3a, 0x20, 0x22, 0x54, 0x68, 0x69, 0x73, 0x20, 0x69, 0x73, 0x20, 0x61, + 0x20, 0x63, 0x6f, 0x6e, 0x76, 0x65, 0x72, 0x73, 0x61, 0x74, 0x69, 0x6f, + 0x6e, 0x20, 0x62, 0x65, 0x74, 0x77, 0x65, 0x65, 0x6e, 0x20, 0x55, 0x73, + 0x65, 0x72, 0x20, 0x61, 0x6e, 0x64, 0x20, 0x4c, 0x6c, 0x61, 0x6d, 0x61, + 0x2c, 0x20, 0x61, 0x20, 0x66, 0x72, 0x69, 0x65, 0x6e, 0x64, 0x6c, 0x79, + 0x20, 0x63, 0x68, 0x61, 0x74, 0x62, 0x6f, 0x74, 0x2e, 0x20, 0x4c, 0x6c, + 0x61, 0x6d, 0x61, 0x20, 0x69, 0x73, 0x20, 0x68, 0x65, 0x6c, 0x70, 0x66, + 0x75, 0x6c, 0x2c, 0x20, 0x6b, 0x69, 0x6e, 0x64, 0x2c, 0x20, 0x68, 0x6f, + 0x6e, 0x65, 0x73, 0x74, 0x2c, 0x20, 0x67, 0x6f, 0x6f, 0x64, 0x20, 0x61, + 0x74, 0x20, 0x77, 0x72, 0x69, 0x74, 0x69, 0x6e, 0x67, 0x2c, 0x20, 0x61, + 0x6e, 0x64, 0x20, 0x6e, 0x65, 0x76, 0x65, 0x72, 0x20, 0x66, 0x61, 0x69, + 0x6c, 0x73, 0x20, 0x74, 0x6f, 0x20, 0x61, 0x6e, 0x73, 0x77, 0x65, 0x72, + 0x20, 0x61, 0x6e, 0x79, 0x20, 0x72, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, + 0x73, 0x20, 0x69, 0x6d, 0x6d, 0x65, 0x64, 0x69, 0x61, 0x74, 0x65, 0x6c, + 0x79, 0x20, 0x61, 0x6e, 0x64, 0x20, 0x77, 0x69, 0x74, 0x68, 0x20, 0x70, + 0x72, 0x65, 0x63, 0x69, 0x73, 0x69, 0x6f, 0x6e, 0x2e, 0x22, 0x2c, 0x0a, + 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x74, 0x65, 0x6d, 0x70, 0x6c, 0x61, + 0x74, 0x65, 0x3a, 0x20, 0x22, 0x7b, 0x7b, 0x70, 0x72, 0x6f, 0x6d, 0x70, + 0x74, 0x7d, 0x7d, 0x5c, 0x6e, 0x5c, 0x6e, 0x7b, 0x7b, 0x68, 0x69, 0x73, + 0x74, 0x6f, 0x72, 0x79, 0x7d, 0x7d, 0x5c, 0x6e, 0x7b, 0x7b, 0x63, 0x68, + 0x61, 0x72, 0x7d, 0x7d, 0x3a, 0x22, 0x2c, 0x0a, 0x20, 0x20, 0x20, 0x20, + 0x20, 0x20, 0x68, 0x69, 0x73, 0x74, 0x6f, 0x72, 0x79, 0x54, 0x65, 0x6d, + 0x70, 0x6c, 0x61, 0x74, 0x65, 0x3a, 0x20, 0x22, 0x7b, 0x7b, 0x6e, 0x61, + 0x6d, 0x65, 0x7d, 0x7d, 0x3a, 0x20, 0x7b, 0x7b, 0x6d, 0x65, 0x73, 0x73, + 0x61, 0x67, 0x65, 0x7d, 0x7d, 0x22, 0x2c, 0x0a, 0x20, 0x20, 0x20, 0x20, + 0x20, 0x20, 0x74, 0x72, 0x61, 0x6e, 0x73, 0x63, 0x72, 0x69, 0x70, 0x74, + 0x3a, 0x20, 0x5b, 0x5d, 0x2c, 0x0a, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, + 0x74, 0x79, 0x70, 0x65, 0x3a, 0x20, 0x22, 0x63, 0x68, 0x61, 0x74, 0x22, + 0x2c, 0x20, 0x20, 0x2f, 0x2f, 0x20, 0x22, 0x63, 0x68, 0x61, 0x74, 0x22, + 0x20, 0x7c, 0x20, 0x22, 0x63, 0x6f, 0x6d, 0x70, 0x6c, 0x65, 0x74, 0x69, + 0x6f, 0x6e, 0x22, 0x0a, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x63, 0x68, + 0x61, 0x72, 0x3a, 0x20, 0x22, 0x4c, 0x6c, 0x61, 0x6d, 0x61, 0x22, 0x2c, + 0x0a, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x75, 0x73, 0x65, 0x72, 0x3a, + 0x20, 0x22, 0x55, 0x73, 0x65, 0x72, 0x22, 0x2c, 0x0a, 0x20, 0x20, 0x20, + 0x20, 0x20, 0x20, 0x69, 0x6d, 0x61, 0x67, 0x65, 0x5f, 0x73, 0x65, 0x6c, + 0x65, 0x63, 0x74, 0x65, 0x64, 0x3a, 0x20, 0x27, 0x27, 0x0a, 0x20, 0x20, + 0x20, 0x20, 0x7d, 0x29, 0x0a, 0x0a, 0x20, 0x20, 0x20, 0x20, 0x63, 0x6f, + 0x6e, 0x73, 0x74, 0x20, 0x70, 0x61, 0x72, 0x61, 0x6d, 0x73, 0x20, 0x3d, + 0x20, 0x73, 0x69, 0x67, 0x6e, 0x61, 0x6c, 0x28, 0x7b, 0x0a, 0x20, 0x20, + 0x20, 0x20, 0x20, 0x20, 0x6e, 0x5f, 0x70, 0x72, 0x65, 0x64, 0x69, 0x63, + 0x74, 0x3a, 0x20, 0x34, 0x30, 0x30, 0x2c, 0x0a, 0x20, 0x20, 0x20, 0x20, + 0x20, 0x20, 0x74, 0x65, 0x6d, 0x70, 0x65, 0x72, 0x61, 0x74, 0x75, 0x72, + 0x65, 0x3a, 0x20, 0x30, 0x2e, 0x37, 0x2c, 0x0a, 0x20, 0x20, 0x20, 0x20, + 0x20, 0x20, 0x72, 0x65, 0x70, 0x65, 0x61, 0x74, 0x5f, 0x6c, 0x61, 0x73, + 0x74, 0x5f, 0x6e, 0x3a, 0x20, 0x32, 0x35, 0x36, 0x2c, 0x20, 0x2f, 0x2f, 0x20, 0x30, 0x20, 0x3d, 0x20, 0x64, 0x69, 0x73, 0x61, 0x62, 0x6c, 0x65, - 0x64, 0x0a, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x74, 0x66, 0x73, 0x5f, - 0x7a, 0x3a, 0x20, 0x31, 0x2e, 0x30, 0x2c, 0x20, 0x2f, 0x2f, 0x20, 0x31, + 0x20, 0x70, 0x65, 0x6e, 0x61, 0x6c, 0x74, 0x79, 0x2c, 0x20, 0x2d, 0x31, + 0x20, 0x3d, 0x20, 0x63, 0x6f, 0x6e, 0x74, 0x65, 0x78, 0x74, 0x20, 0x73, + 0x69, 0x7a, 0x65, 0x0a, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x72, 0x65, + 0x70, 0x65, 0x61, 0x74, 0x5f, 0x70, 0x65, 0x6e, 0x61, 0x6c, 0x74, 0x79, + 0x3a, 0x20, 0x31, 0x2e, 0x31, 0x38, 0x2c, 0x20, 0x2f, 0x2f, 0x20, 0x31, 0x2e, 0x30, 0x20, 0x3d, 0x20, 0x64, 0x69, 0x73, 0x61, 0x62, 0x6c, 0x65, - 0x64, 0x0a, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x74, 0x79, 0x70, 0x69, - 0x63, 0x61, 0x6c, 0x5f, 0x70, 0x3a, 0x20, 0x31, 0x2e, 0x30, 0x2c, 0x20, - 0x2f, 0x2f, 0x20, 0x31, 0x2e, 0x30, 0x20, 0x3d, 0x20, 0x64, 0x69, 0x73, - 0x61, 0x62, 0x6c, 0x65, 0x64, 0x0a, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, - 0x70, 0x72, 0x65, 0x73, 0x65, 0x6e, 0x63, 0x65, 0x5f, 0x70, 0x65, 0x6e, - 0x61, 0x6c, 0x74, 0x79, 0x3a, 0x20, 0x30, 0x2e, 0x30, 0x2c, 0x20, 0x2f, - 0x2f, 0x20, 0x30, 0x2e, 0x30, 0x20, 0x3d, 0x20, 0x64, 0x69, 0x73, 0x61, - 0x62, 0x6c, 0x65, 0x64, 0x0a, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x66, - 0x72, 0x65, 0x71, 0x75, 0x65, 0x6e, 0x63, 0x79, 0x5f, 0x70, 0x65, 0x6e, - 0x61, 0x6c, 0x74, 0x79, 0x3a, 0x20, 0x30, 0x2e, 0x30, 0x2c, 0x20, 0x2f, - 0x2f, 0x20, 0x30, 0x2e, 0x30, 0x20, 0x3d, 0x20, 0x64, 0x69, 0x73, 0x61, - 0x62, 0x6c, 0x65, 0x64, 0x0a, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x6d, - 0x69, 0x72, 0x6f, 0x73, 0x74, 0x61, 0x74, 0x3a, 0x20, 0x30, 0x2c, 0x20, - 0x2f, 0x2f, 0x20, 0x30, 0x2f, 0x31, 0x2f, 0x32, 0x0a, 0x20, 0x20, 0x20, - 0x20, 0x20, 0x20, 0x6d, 0x69, 0x72, 0x6f, 0x73, 0x74, 0x61, 0x74, 0x5f, - 0x74, 0x61, 0x75, 0x3a, 0x20, 0x35, 0x2c, 0x20, 0x2f, 0x2f, 0x20, 0x74, - 0x61, 0x72, 0x67, 0x65, 0x74, 0x20, 0x65, 0x6e, 0x74, 0x72, 0x6f, 0x70, - 0x79, 0x0a, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x6d, 0x69, 0x72, 0x6f, - 0x73, 0x74, 0x61, 0x74, 0x5f, 0x65, 0x74, 0x61, 0x3a, 0x20, 0x30, 0x2e, - 0x31, 0x2c, 0x20, 0x2f, 0x2f, 0x20, 0x6c, 0x65, 0x61, 0x72, 0x6e, 0x69, - 0x6e, 0x67, 0x20, 0x72, 0x61, 0x74, 0x65, 0x0a, 0x20, 0x20, 0x20, 0x20, - 0x20, 0x20, 0x67, 0x72, 0x61, 0x6d, 0x6d, 0x61, 0x72, 0x3a, 0x20, 0x27, - 0x27, 0x2c, 0x0a, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x6e, 0x5f, 0x70, - 0x72, 0x6f, 0x62, 0x73, 0x3a, 0x20, 0x30, 0x2c, 0x20, 0x2f, 0x2f, 0x20, - 0x6e, 0x6f, 0x20, 0x63, 0x6f, 0x6d, 0x70, 0x6c, 0x65, 0x74, 0x69, 0x6f, - 0x6e, 0x5f, 0x70, 0x72, 0x6f, 0x62, 0x61, 0x62, 0x69, 0x6c, 0x69, 0x74, - 0x69, 0x65, 0x73, 0x2c, 0x0a, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x69, - 0x6d, 0x61, 0x67, 0x65, 0x5f, 0x64, 0x61, 0x74, 0x61, 0x3a, 0x20, 0x5b, - 0x5d, 0x2c, 0x0a, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x63, 0x61, 0x63, - 0x68, 0x65, 0x5f, 0x70, 0x72, 0x6f, 0x6d, 0x70, 0x74, 0x3a, 0x20, 0x74, - 0x72, 0x75, 0x65, 0x0a, 0x20, 0x20, 0x20, 0x20, 0x7d, 0x29, 0x0a, 0x0a, - 0x20, 0x20, 0x20, 0x20, 0x2f, 0x2a, 0x20, 0x53, 0x54, 0x41, 0x52, 0x54, - 0x3a, 0x20, 0x53, 0x75, 0x70, 0x70, 0x6f, 0x72, 0x74, 0x20, 0x66, 0x6f, - 0x72, 0x20, 0x73, 0x74, 0x6f, 0x72, 0x69, 0x6e, 0x67, 0x20, 0x70, 0x72, - 0x6f, 0x6d, 0x70, 0x74, 0x20, 0x74, 0x65, 0x6d, 0x70, 0x6c, 0x61, 0x74, - 0x65, 0x73, 0x20, 0x61, 0x6e, 0x64, 0x20, 0x70, 0x61, 0x72, 0x61, 0x6d, - 0x65, 0x74, 0x65, 0x72, 0x73, 0x20, 0x69, 0x6e, 0x20, 0x62, 0x6f, 0x72, - 0x77, 0x73, 0x65, 0x72, 0x20, 0x4c, 0x6f, 0x63, 0x61, 0x6c, 0x53, 0x74, - 0x6f, 0x72, 0x61, 0x67, 0x65, 0x20, 0x2a, 0x2f, 0x0a, 0x0a, 0x20, 0x20, - 0x20, 0x20, 0x63, 0x6f, 0x6e, 0x73, 0x74, 0x20, 0x6c, 0x6f, 0x63, 0x61, - 0x6c, 0x5f, 0x73, 0x74, 0x6f, 0x72, 0x61, 0x67, 0x65, 0x5f, 0x73, 0x74, - 0x6f, 0x72, 0x61, 0x67, 0x65, 0x4b, 0x65, 0x79, 0x20, 0x3d, 0x20, 0x22, - 0x6c, 0x6c, 0x61, 0x6d, 0x61, 0x63, 0x70, 0x70, 0x5f, 0x73, 0x65, 0x72, - 0x76, 0x65, 0x72, 0x5f, 0x6c, 0x6f, 0x63, 0x61, 0x6c, 0x5f, 0x73, 0x74, - 0x6f, 0x72, 0x61, 0x67, 0x65, 0x22, 0x3b, 0x0a, 0x0a, 0x20, 0x20, 0x20, - 0x20, 0x66, 0x75, 0x6e, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x20, 0x6c, 0x6f, - 0x63, 0x61, 0x6c, 0x5f, 0x73, 0x74, 0x6f, 0x72, 0x61, 0x67, 0x65, 0x5f, - 0x73, 0x65, 0x74, 0x44, 0x61, 0x74, 0x61, 0x46, 0x72, 0x6f, 0x6d, 0x4f, - 0x62, 0x6a, 0x65, 0x63, 0x74, 0x28, 0x74, 0x61, 0x67, 0x2c, 0x20, 0x63, - 0x6f, 0x6e, 0x74, 0x65, 0x6e, 0x74, 0x29, 0x20, 0x7b, 0x0a, 0x20, 0x20, - 0x20, 0x20, 0x20, 0x20, 0x6c, 0x6f, 0x63, 0x61, 0x6c, 0x53, 0x74, 0x6f, - 0x72, 0x61, 0x67, 0x65, 0x2e, 0x73, 0x65, 0x74, 0x49, 0x74, 0x65, 0x6d, - 0x28, 0x6c, 0x6f, 0x63, 0x61, 0x6c, 0x5f, 0x73, 0x74, 0x6f, 0x72, 0x61, - 0x67, 0x65, 0x5f, 0x73, 0x74, 0x6f, 0x72, 0x61, 0x67, 0x65, 0x4b, 0x65, - 0x79, 0x20, 0x2b, 0x20, 0x27, 0x2f, 0x27, 0x20, 0x2b, 0x20, 0x74, 0x61, - 0x67, 0x2c, 0x20, 0x4a, 0x53, 0x4f, 0x4e, 0x2e, 0x73, 0x74, 0x72, 0x69, - 0x6e, 0x67, 0x69, 0x66, 0x79, 0x28, 0x63, 0x6f, 0x6e, 0x74, 0x65, 0x6e, - 0x74, 0x29, 0x29, 0x3b, 0x0a, 0x20, 0x20, 0x20, 0x20, 0x7d, 0x0a, 0x0a, - 0x20, 0x20, 0x20, 0x20, 0x66, 0x75, 0x6e, 0x63, 0x74, 0x69, 0x6f, 0x6e, - 0x20, 0x6c, 0x6f, 0x63, 0x61, 0x6c, 0x5f, 0x73, 0x74, 0x6f, 0x72, 0x61, - 0x67, 0x65, 0x5f, 0x73, 0x65, 0x74, 0x44, 0x61, 0x74, 0x61, 0x46, 0x72, - 0x6f, 0x6d, 0x52, 0x61, 0x77, 0x54, 0x65, 0x78, 0x74, 0x28, 0x74, 0x61, - 0x67, 0x2c, 0x20, 0x63, 0x6f, 0x6e, 0x74, 0x65, 0x6e, 0x74, 0x29, 0x20, - 0x7b, 0x0a, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x6c, 0x6f, 0x63, 0x61, - 0x6c, 0x53, 0x74, 0x6f, 0x72, 0x61, 0x67, 0x65, 0x2e, 0x73, 0x65, 0x74, + 0x64, 0x0a, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x74, 0x6f, 0x70, 0x5f, + 0x6b, 0x3a, 0x20, 0x34, 0x30, 0x2c, 0x20, 0x2f, 0x2f, 0x20, 0x3c, 0x3d, + 0x20, 0x30, 0x20, 0x74, 0x6f, 0x20, 0x75, 0x73, 0x65, 0x20, 0x76, 0x6f, + 0x63, 0x61, 0x62, 0x20, 0x73, 0x69, 0x7a, 0x65, 0x0a, 0x20, 0x20, 0x20, + 0x20, 0x20, 0x20, 0x74, 0x6f, 0x70, 0x5f, 0x70, 0x3a, 0x20, 0x30, 0x2e, + 0x35, 0x2c, 0x20, 0x2f, 0x2f, 0x20, 0x31, 0x2e, 0x30, 0x20, 0x3d, 0x20, + 0x64, 0x69, 0x73, 0x61, 0x62, 0x6c, 0x65, 0x64, 0x0a, 0x20, 0x20, 0x20, + 0x20, 0x20, 0x20, 0x6d, 0x69, 0x6e, 0x5f, 0x70, 0x3a, 0x20, 0x30, 0x2e, + 0x30, 0x35, 0x2c, 0x20, 0x2f, 0x2f, 0x20, 0x30, 0x20, 0x3d, 0x20, 0x64, + 0x69, 0x73, 0x61, 0x62, 0x6c, 0x65, 0x64, 0x0a, 0x20, 0x20, 0x20, 0x20, + 0x20, 0x20, 0x74, 0x66, 0x73, 0x5f, 0x7a, 0x3a, 0x20, 0x31, 0x2e, 0x30, + 0x2c, 0x20, 0x2f, 0x2f, 0x20, 0x31, 0x2e, 0x30, 0x20, 0x3d, 0x20, 0x64, + 0x69, 0x73, 0x61, 0x62, 0x6c, 0x65, 0x64, 0x0a, 0x20, 0x20, 0x20, 0x20, + 0x20, 0x20, 0x74, 0x79, 0x70, 0x69, 0x63, 0x61, 0x6c, 0x5f, 0x70, 0x3a, + 0x20, 0x31, 0x2e, 0x30, 0x2c, 0x20, 0x2f, 0x2f, 0x20, 0x31, 0x2e, 0x30, + 0x20, 0x3d, 0x20, 0x64, 0x69, 0x73, 0x61, 0x62, 0x6c, 0x65, 0x64, 0x0a, + 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x70, 0x72, 0x65, 0x73, 0x65, 0x6e, + 0x63, 0x65, 0x5f, 0x70, 0x65, 0x6e, 0x61, 0x6c, 0x74, 0x79, 0x3a, 0x20, + 0x30, 0x2e, 0x30, 0x2c, 0x20, 0x2f, 0x2f, 0x20, 0x30, 0x2e, 0x30, 0x20, + 0x3d, 0x20, 0x64, 0x69, 0x73, 0x61, 0x62, 0x6c, 0x65, 0x64, 0x0a, 0x20, + 0x20, 0x20, 0x20, 0x20, 0x20, 0x66, 0x72, 0x65, 0x71, 0x75, 0x65, 0x6e, + 0x63, 0x79, 0x5f, 0x70, 0x65, 0x6e, 0x61, 0x6c, 0x74, 0x79, 0x3a, 0x20, + 0x30, 0x2e, 0x30, 0x2c, 0x20, 0x2f, 0x2f, 0x20, 0x30, 0x2e, 0x30, 0x20, + 0x3d, 0x20, 0x64, 0x69, 0x73, 0x61, 0x62, 0x6c, 0x65, 0x64, 0x0a, 0x20, + 0x20, 0x20, 0x20, 0x20, 0x20, 0x6d, 0x69, 0x72, 0x6f, 0x73, 0x74, 0x61, + 0x74, 0x3a, 0x20, 0x30, 0x2c, 0x20, 0x2f, 0x2f, 0x20, 0x30, 0x2f, 0x31, + 0x2f, 0x32, 0x0a, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x6d, 0x69, 0x72, + 0x6f, 0x73, 0x74, 0x61, 0x74, 0x5f, 0x74, 0x61, 0x75, 0x3a, 0x20, 0x35, + 0x2c, 0x20, 0x2f, 0x2f, 0x20, 0x74, 0x61, 0x72, 0x67, 0x65, 0x74, 0x20, + 0x65, 0x6e, 0x74, 0x72, 0x6f, 0x70, 0x79, 0x0a, 0x20, 0x20, 0x20, 0x20, + 0x20, 0x20, 0x6d, 0x69, 0x72, 0x6f, 0x73, 0x74, 0x61, 0x74, 0x5f, 0x65, + 0x74, 0x61, 0x3a, 0x20, 0x30, 0x2e, 0x31, 0x2c, 0x20, 0x2f, 0x2f, 0x20, + 0x6c, 0x65, 0x61, 0x72, 0x6e, 0x69, 0x6e, 0x67, 0x20, 0x72, 0x61, 0x74, + 0x65, 0x0a, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x67, 0x72, 0x61, 0x6d, + 0x6d, 0x61, 0x72, 0x3a, 0x20, 0x27, 0x27, 0x2c, 0x0a, 0x20, 0x20, 0x20, + 0x20, 0x20, 0x20, 0x6e, 0x5f, 0x70, 0x72, 0x6f, 0x62, 0x73, 0x3a, 0x20, + 0x30, 0x2c, 0x20, 0x2f, 0x2f, 0x20, 0x6e, 0x6f, 0x20, 0x63, 0x6f, 0x6d, + 0x70, 0x6c, 0x65, 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x70, 0x72, 0x6f, 0x62, + 0x61, 0x62, 0x69, 0x6c, 0x69, 0x74, 0x69, 0x65, 0x73, 0x2c, 0x0a, 0x20, + 0x20, 0x20, 0x20, 0x20, 0x20, 0x69, 0x6d, 0x61, 0x67, 0x65, 0x5f, 0x64, + 0x61, 0x74, 0x61, 0x3a, 0x20, 0x5b, 0x5d, 0x2c, 0x0a, 0x20, 0x20, 0x20, + 0x20, 0x20, 0x20, 0x63, 0x61, 0x63, 0x68, 0x65, 0x5f, 0x70, 0x72, 0x6f, + 0x6d, 0x70, 0x74, 0x3a, 0x20, 0x74, 0x72, 0x75, 0x65, 0x0a, 0x20, 0x20, + 0x20, 0x20, 0x7d, 0x29, 0x0a, 0x0a, 0x20, 0x20, 0x20, 0x20, 0x2f, 0x2a, + 0x20, 0x53, 0x54, 0x41, 0x52, 0x54, 0x3a, 0x20, 0x53, 0x75, 0x70, 0x70, + 0x6f, 0x72, 0x74, 0x20, 0x66, 0x6f, 0x72, 0x20, 0x73, 0x74, 0x6f, 0x72, + 0x69, 0x6e, 0x67, 0x20, 0x70, 0x72, 0x6f, 0x6d, 0x70, 0x74, 0x20, 0x74, + 0x65, 0x6d, 0x70, 0x6c, 0x61, 0x74, 0x65, 0x73, 0x20, 0x61, 0x6e, 0x64, + 0x20, 0x70, 0x61, 0x72, 0x61, 0x6d, 0x65, 0x74, 0x65, 0x72, 0x73, 0x20, + 0x69, 0x6e, 0x20, 0x62, 0x6f, 0x72, 0x77, 0x73, 0x65, 0x72, 0x20, 0x4c, + 0x6f, 0x63, 0x61, 0x6c, 0x53, 0x74, 0x6f, 0x72, 0x61, 0x67, 0x65, 0x20, + 0x2a, 0x2f, 0x0a, 0x0a, 0x20, 0x20, 0x20, 0x20, 0x63, 0x6f, 0x6e, 0x73, + 0x74, 0x20, 0x6c, 0x6f, 0x63, 0x61, 0x6c, 0x5f, 0x73, 0x74, 0x6f, 0x72, + 0x61, 0x67, 0x65, 0x5f, 0x73, 0x74, 0x6f, 0x72, 0x61, 0x67, 0x65, 0x4b, + 0x65, 0x79, 0x20, 0x3d, 0x20, 0x22, 0x6c, 0x6c, 0x61, 0x6d, 0x61, 0x63, + 0x70, 0x70, 0x5f, 0x73, 0x65, 0x72, 0x76, 0x65, 0x72, 0x5f, 0x6c, 0x6f, + 0x63, 0x61, 0x6c, 0x5f, 0x73, 0x74, 0x6f, 0x72, 0x61, 0x67, 0x65, 0x22, + 0x3b, 0x0a, 0x0a, 0x20, 0x20, 0x20, 0x20, 0x66, 0x75, 0x6e, 0x63, 0x74, + 0x69, 0x6f, 0x6e, 0x20, 0x6c, 0x6f, 0x63, 0x61, 0x6c, 0x5f, 0x73, 0x74, + 0x6f, 0x72, 0x61, 0x67, 0x65, 0x5f, 0x73, 0x65, 0x74, 0x44, 0x61, 0x74, + 0x61, 0x46, 0x72, 0x6f, 0x6d, 0x4f, 0x62, 0x6a, 0x65, 0x63, 0x74, 0x28, + 0x74, 0x61, 0x67, 0x2c, 0x20, 0x63, 0x6f, 0x6e, 0x74, 0x65, 0x6e, 0x74, + 0x29, 0x20, 0x7b, 0x0a, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x6c, 0x6f, + 0x63, 0x61, 0x6c, 0x53, 0x74, 0x6f, 0x72, 0x61, 0x67, 0x65, 0x2e, 0x73, + 0x65, 0x74, 0x49, 0x74, 0x65, 0x6d, 0x28, 0x6c, 0x6f, 0x63, 0x61, 0x6c, + 0x5f, 0x73, 0x74, 0x6f, 0x72, 0x61, 0x67, 0x65, 0x5f, 0x73, 0x74, 0x6f, + 0x72, 0x61, 0x67, 0x65, 0x4b, 0x65, 0x79, 0x20, 0x2b, 0x20, 0x27, 0x2f, + 0x27, 0x20, 0x2b, 0x20, 0x74, 0x61, 0x67, 0x2c, 0x20, 0x4a, 0x53, 0x4f, + 0x4e, 0x2e, 0x73, 0x74, 0x72, 0x69, 0x6e, 0x67, 0x69, 0x66, 0x79, 0x28, + 0x63, 0x6f, 0x6e, 0x74, 0x65, 0x6e, 0x74, 0x29, 0x29, 0x3b, 0x0a, 0x20, + 0x20, 0x20, 0x20, 0x7d, 0x0a, 0x0a, 0x20, 0x20, 0x20, 0x20, 0x66, 0x75, + 0x6e, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x20, 0x6c, 0x6f, 0x63, 0x61, 0x6c, + 0x5f, 0x73, 0x74, 0x6f, 0x72, 0x61, 0x67, 0x65, 0x5f, 0x73, 0x65, 0x74, + 0x44, 0x61, 0x74, 0x61, 0x46, 0x72, 0x6f, 0x6d, 0x52, 0x61, 0x77, 0x54, + 0x65, 0x78, 0x74, 0x28, 0x74, 0x61, 0x67, 0x2c, 0x20, 0x63, 0x6f, 0x6e, + 0x74, 0x65, 0x6e, 0x74, 0x29, 0x20, 0x7b, 0x0a, 0x20, 0x20, 0x20, 0x20, + 0x20, 0x20, 0x6c, 0x6f, 0x63, 0x61, 0x6c, 0x53, 0x74, 0x6f, 0x72, 0x61, + 0x67, 0x65, 0x2e, 0x73, 0x65, 0x74, 0x49, 0x74, 0x65, 0x6d, 0x28, 0x6c, + 0x6f, 0x63, 0x61, 0x6c, 0x5f, 0x73, 0x74, 0x6f, 0x72, 0x61, 0x67, 0x65, + 0x5f, 0x73, 0x74, 0x6f, 0x72, 0x61, 0x67, 0x65, 0x4b, 0x65, 0x79, 0x20, + 0x2b, 0x20, 0x27, 0x2f, 0x27, 0x20, 0x2b, 0x20, 0x74, 0x61, 0x67, 0x2c, + 0x20, 0x63, 0x6f, 0x6e, 0x74, 0x65, 0x6e, 0x74, 0x29, 0x3b, 0x0a, 0x20, + 0x20, 0x20, 0x20, 0x7d, 0x0a, 0x0a, 0x20, 0x20, 0x20, 0x20, 0x66, 0x75, + 0x6e, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x20, 0x6c, 0x6f, 0x63, 0x61, 0x6c, + 0x5f, 0x73, 0x74, 0x6f, 0x72, 0x61, 0x67, 0x65, 0x5f, 0x67, 0x65, 0x74, + 0x44, 0x61, 0x74, 0x61, 0x41, 0x73, 0x4f, 0x62, 0x6a, 0x65, 0x63, 0x74, + 0x28, 0x74, 0x61, 0x67, 0x29, 0x20, 0x7b, 0x0a, 0x20, 0x20, 0x20, 0x20, + 0x20, 0x20, 0x63, 0x6f, 0x6e, 0x73, 0x74, 0x20, 0x69, 0x74, 0x65, 0x6d, + 0x20, 0x3d, 0x20, 0x6c, 0x6f, 0x63, 0x61, 0x6c, 0x53, 0x74, 0x6f, 0x72, + 0x61, 0x67, 0x65, 0x2e, 0x67, 0x65, 0x74, 0x49, 0x74, 0x65, 0x6d, 0x28, + 0x6c, 0x6f, 0x63, 0x61, 0x6c, 0x5f, 0x73, 0x74, 0x6f, 0x72, 0x61, 0x67, + 0x65, 0x5f, 0x73, 0x74, 0x6f, 0x72, 0x61, 0x67, 0x65, 0x4b, 0x65, 0x79, + 0x20, 0x2b, 0x20, 0x27, 0x2f, 0x27, 0x20, 0x2b, 0x20, 0x74, 0x61, 0x67, + 0x29, 0x3b, 0x0a, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x69, 0x66, 0x20, + 0x28, 0x21, 0x69, 0x74, 0x65, 0x6d, 0x29, 0x20, 0x7b, 0x0a, 0x20, 0x20, + 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x72, 0x65, 0x74, 0x75, 0x72, 0x6e, + 0x20, 0x6e, 0x75, 0x6c, 0x6c, 0x3b, 0x0a, 0x20, 0x20, 0x20, 0x20, 0x20, + 0x20, 0x7d, 0x20, 0x65, 0x6c, 0x73, 0x65, 0x20, 0x7b, 0x0a, 0x20, 0x20, + 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x72, 0x65, 0x74, 0x75, 0x72, 0x6e, + 0x20, 0x4a, 0x53, 0x4f, 0x4e, 0x2e, 0x70, 0x61, 0x72, 0x73, 0x65, 0x28, + 0x69, 0x74, 0x65, 0x6d, 0x29, 0x3b, 0x0a, 0x20, 0x20, 0x20, 0x20, 0x20, + 0x20, 0x7d, 0x0a, 0x20, 0x20, 0x20, 0x20, 0x7d, 0x0a, 0x0a, 0x20, 0x20, + 0x20, 0x20, 0x66, 0x75, 0x6e, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x20, 0x6c, + 0x6f, 0x63, 0x61, 0x6c, 0x5f, 0x73, 0x74, 0x6f, 0x72, 0x61, 0x67, 0x65, + 0x5f, 0x67, 0x65, 0x74, 0x44, 0x61, 0x74, 0x61, 0x41, 0x73, 0x52, 0x61, + 0x77, 0x54, 0x65, 0x78, 0x74, 0x28, 0x74, 0x61, 0x67, 0x29, 0x20, 0x7b, + 0x0a, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x63, 0x6f, 0x6e, 0x73, 0x74, + 0x20, 0x69, 0x74, 0x65, 0x6d, 0x20, 0x3d, 0x20, 0x6c, 0x6f, 0x63, 0x61, + 0x6c, 0x53, 0x74, 0x6f, 0x72, 0x61, 0x67, 0x65, 0x2e, 0x67, 0x65, 0x74, 0x49, 0x74, 0x65, 0x6d, 0x28, 0x6c, 0x6f, 0x63, 0x61, 0x6c, 0x5f, 0x73, 0x74, 0x6f, 0x72, 0x61, 0x67, 0x65, 0x5f, 0x73, 0x74, 0x6f, 0x72, 0x61, 0x67, 0x65, 0x4b, 0x65, 0x79, 0x20, 0x2b, 0x20, 0x27, 0x2f, 0x27, 0x20, - 0x2b, 0x20, 0x74, 0x61, 0x67, 0x2c, 0x20, 0x63, 0x6f, 0x6e, 0x74, 0x65, - 0x6e, 0x74, 0x29, 0x3b, 0x0a, 0x20, 0x20, 0x20, 0x20, 0x7d, 0x0a, 0x0a, - 0x20, 0x20, 0x20, 0x20, 0x66, 0x75, 0x6e, 0x63, 0x74, 0x69, 0x6f, 0x6e, - 0x20, 0x6c, 0x6f, 0x63, 0x61, 0x6c, 0x5f, 0x73, 0x74, 0x6f, 0x72, 0x61, - 0x67, 0x65, 0x5f, 0x67, 0x65, 0x74, 0x44, 0x61, 0x74, 0x61, 0x41, 0x73, - 0x4f, 0x62, 0x6a, 0x65, 0x63, 0x74, 0x28, 0x74, 0x61, 0x67, 0x29, 0x20, - 0x7b, 0x0a, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x63, 0x6f, 0x6e, 0x73, - 0x74, 0x20, 0x69, 0x74, 0x65, 0x6d, 0x20, 0x3d, 0x20, 0x6c, 0x6f, 0x63, - 0x61, 0x6c, 0x53, 0x74, 0x6f, 0x72, 0x61, 0x67, 0x65, 0x2e, 0x67, 0x65, - 0x74, 0x49, 0x74, 0x65, 0x6d, 0x28, 0x6c, 0x6f, 0x63, 0x61, 0x6c, 0x5f, - 0x73, 0x74, 0x6f, 0x72, 0x61, 0x67, 0x65, 0x5f, 0x73, 0x74, 0x6f, 0x72, - 0x61, 0x67, 0x65, 0x4b, 0x65, 0x79, 0x20, 0x2b, 0x20, 0x27, 0x2f, 0x27, - 0x20, 0x2b, 0x20, 0x74, 0x61, 0x67, 0x29, 0x3b, 0x0a, 0x20, 0x20, 0x20, - 0x20, 0x20, 0x20, 0x69, 0x66, 0x20, 0x28, 0x21, 0x69, 0x74, 0x65, 0x6d, - 0x29, 0x20, 0x7b, 0x0a, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, - 0x72, 0x65, 0x74, 0x75, 0x72, 0x6e, 0x20, 0x6e, 0x75, 0x6c, 0x6c, 0x3b, - 0x0a, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x7d, 0x20, 0x65, 0x6c, 0x73, - 0x65, 0x20, 0x7b, 0x0a, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, - 0x72, 0x65, 0x74, 0x75, 0x72, 0x6e, 0x20, 0x4a, 0x53, 0x4f, 0x4e, 0x2e, - 0x70, 0x61, 0x72, 0x73, 0x65, 0x28, 0x69, 0x74, 0x65, 0x6d, 0x29, 0x3b, - 0x0a, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x7d, 0x0a, 0x20, 0x20, 0x20, - 0x20, 0x7d, 0x0a, 0x0a, 0x20, 0x20, 0x20, 0x20, 0x66, 0x75, 0x6e, 0x63, - 0x74, 0x69, 0x6f, 0x6e, 0x20, 0x6c, 0x6f, 0x63, 0x61, 0x6c, 0x5f, 0x73, - 0x74, 0x6f, 0x72, 0x61, 0x67, 0x65, 0x5f, 0x67, 0x65, 0x74, 0x44, 0x61, - 0x74, 0x61, 0x41, 0x73, 0x52, 0x61, 0x77, 0x54, 0x65, 0x78, 0x74, 0x28, - 0x74, 0x61, 0x67, 0x29, 0x20, 0x7b, 0x0a, 0x20, 0x20, 0x20, 0x20, 0x20, - 0x20, 0x63, 0x6f, 0x6e, 0x73, 0x74, 0x20, 0x69, 0x74, 0x65, 0x6d, 0x20, - 0x3d, 0x20, 0x6c, 0x6f, 0x63, 0x61, 0x6c, 0x53, 0x74, 0x6f, 0x72, 0x61, - 0x67, 0x65, 0x2e, 0x67, 0x65, 0x74, 0x49, 0x74, 0x65, 0x6d, 0x28, 0x6c, - 0x6f, 0x63, 0x61, 0x6c, 0x5f, 0x73, 0x74, 0x6f, 0x72, 0x61, 0x67, 0x65, - 0x5f, 0x73, 0x74, 0x6f, 0x72, 0x61, 0x67, 0x65, 0x4b, 0x65, 0x79, 0x20, - 0x2b, 0x20, 0x27, 0x2f, 0x27, 0x20, 0x2b, 0x20, 0x74, 0x61, 0x67, 0x29, - 0x3b, 0x0a, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x69, 0x66, 0x20, 0x28, - 0x21, 0x69, 0x74, 0x65, 0x6d, 0x29, 0x20, 0x7b, 0x0a, 0x20, 0x20, 0x20, - 0x20, 0x20, 0x20, 0x20, 0x20, 0x72, 0x65, 0x74, 0x75, 0x72, 0x6e, 0x20, - 0x6e, 0x75, 0x6c, 0x6c, 0x3b, 0x0a, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, - 0x7d, 0x20, 0x65, 0x6c, 0x73, 0x65, 0x20, 0x7b, 0x0a, 0x20, 0x20, 0x20, - 0x20, 0x20, 0x20, 0x20, 0x20, 0x72, 0x65, 0x74, 0x75, 0x72, 0x6e, 0x20, - 0x69, 0x74, 0x65, 0x6d, 0x3b, 0x0a, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, - 0x7d, 0x0a, 0x20, 0x20, 0x20, 0x20, 0x7d, 0x0a, 0x0a, 0x20, 0x20, 0x20, - 0x20, 0x2f, 0x2f, 0x20, 0x63, 0x72, 0x65, 0x61, 0x74, 0x65, 0x20, 0x61, - 0x20, 0x63, 0x6f, 0x6e, 0x74, 0x61, 0x69, 0x6e, 0x65, 0x72, 0x20, 0x66, - 0x6f, 0x72, 0x20, 0x75, 0x73, 0x65, 0x72, 0x20, 0x74, 0x65, 0x6d, 0x70, - 0x6c, 0x61, 0x74, 0x65, 0x73, 0x20, 0x61, 0x6e, 0x64, 0x20, 0x73, 0x65, - 0x74, 0x74, 0x69, 0x6e, 0x67, 0x73, 0x0a, 0x0a, 0x20, 0x20, 0x20, 0x20, - 0x63, 0x6f, 0x6e, 0x73, 0x74, 0x20, 0x73, 0x61, 0x76, 0x65, 0x64, 0x55, - 0x73, 0x65, 0x72, 0x54, 0x65, 0x6d, 0x70, 0x6c, 0x61, 0x74, 0x65, 0x73, - 0x20, 0x3d, 0x20, 0x73, 0x69, 0x67, 0x6e, 0x61, 0x6c, 0x28, 0x7b, 0x7d, - 0x29, 0x0a, 0x20, 0x20, 0x20, 0x20, 0x63, 0x6f, 0x6e, 0x73, 0x74, 0x20, - 0x73, 0x65, 0x6c, 0x65, 0x63, 0x74, 0x65, 0x64, 0x55, 0x73, 0x65, 0x72, - 0x54, 0x65, 0x6d, 0x70, 0x6c, 0x61, 0x74, 0x65, 0x20, 0x3d, 0x20, 0x73, - 0x69, 0x67, 0x6e, 0x61, 0x6c, 0x28, 0x7b, 0x20, 0x6e, 0x61, 0x6d, 0x65, - 0x3a, 0x20, 0x27, 0x27, 0x2c, 0x20, 0x74, 0x65, 0x6d, 0x70, 0x6c, 0x61, - 0x74, 0x65, 0x3a, 0x20, 0x7b, 0x20, 0x73, 0x65, 0x73, 0x73, 0x69, 0x6f, - 0x6e, 0x3a, 0x20, 0x7b, 0x7d, 0x2c, 0x20, 0x70, 0x61, 0x72, 0x61, 0x6d, - 0x73, 0x3a, 0x20, 0x7b, 0x7d, 0x20, 0x7d, 0x20, 0x7d, 0x29, 0x0a, 0x0a, - 0x20, 0x20, 0x20, 0x20, 0x2f, 0x2f, 0x20, 0x6c, 0x65, 0x74, 0x27, 0x73, - 0x20, 0x69, 0x6d, 0x70, 0x6f, 0x72, 0x74, 0x20, 0x6c, 0x6f, 0x63, 0x61, - 0x6c, 0x6c, 0x79, 0x20, 0x73, 0x61, 0x76, 0x65, 0x64, 0x20, 0x74, 0x65, - 0x6d, 0x70, 0x6c, 0x61, 0x74, 0x65, 0x73, 0x20, 0x61, 0x6e, 0x64, 0x20, - 0x73, 0x65, 0x74, 0x74, 0x69, 0x6e, 0x67, 0x73, 0x20, 0x69, 0x66, 0x20, - 0x74, 0x68, 0x65, 0x72, 0x65, 0x20, 0x61, 0x72, 0x65, 0x20, 0x61, 0x6e, - 0x79, 0x0a, 0x20, 0x20, 0x20, 0x20, 0x2f, 0x2f, 0x20, 0x75, 0x73, 0x65, + 0x2b, 0x20, 0x74, 0x61, 0x67, 0x29, 0x3b, 0x0a, 0x20, 0x20, 0x20, 0x20, + 0x20, 0x20, 0x69, 0x66, 0x20, 0x28, 0x21, 0x69, 0x74, 0x65, 0x6d, 0x29, + 0x20, 0x7b, 0x0a, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x72, + 0x65, 0x74, 0x75, 0x72, 0x6e, 0x20, 0x6e, 0x75, 0x6c, 0x6c, 0x3b, 0x0a, + 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x7d, 0x20, 0x65, 0x6c, 0x73, 0x65, + 0x20, 0x7b, 0x0a, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x72, + 0x65, 0x74, 0x75, 0x72, 0x6e, 0x20, 0x69, 0x74, 0x65, 0x6d, 0x3b, 0x0a, + 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x7d, 0x0a, 0x20, 0x20, 0x20, 0x20, + 0x7d, 0x0a, 0x0a, 0x20, 0x20, 0x20, 0x20, 0x2f, 0x2f, 0x20, 0x63, 0x72, + 0x65, 0x61, 0x74, 0x65, 0x20, 0x61, 0x20, 0x63, 0x6f, 0x6e, 0x74, 0x61, + 0x69, 0x6e, 0x65, 0x72, 0x20, 0x66, 0x6f, 0x72, 0x20, 0x75, 0x73, 0x65, 0x72, 0x20, 0x74, 0x65, 0x6d, 0x70, 0x6c, 0x61, 0x74, 0x65, 0x73, 0x20, 0x61, 0x6e, 0x64, 0x20, 0x73, 0x65, 0x74, 0x74, 0x69, 0x6e, 0x67, 0x73, - 0x20, 0x61, 0x72, 0x65, 0x20, 0x73, 0x74, 0x6f, 0x72, 0x65, 0x64, 0x20, - 0x69, 0x6e, 0x20, 0x6f, 0x6e, 0x65, 0x20, 0x6f, 0x62, 0x6a, 0x65, 0x63, - 0x74, 0x0a, 0x20, 0x20, 0x20, 0x20, 0x2f, 0x2f, 0x20, 0x69, 0x6e, 0x20, - 0x66, 0x6f, 0x72, 0x6d, 0x20, 0x6f, 0x66, 0x20, 0x7b, 0x20, 0x22, 0x74, - 0x65, 0x6d, 0x70, 0x6c, 0x61, 0x74, 0x65, 0x6e, 0x61, 0x6d, 0x65, 0x22, - 0x3a, 0x20, 0x22, 0x74, 0x65, 0x6d, 0x70, 0x6c, 0x61, 0x74, 0x65, 0x64, - 0x61, 0x74, 0x61, 0x22, 0x20, 0x7d, 0x20, 0x61, 0x6e, 0x64, 0x20, 0x7b, - 0x20, 0x22, 0x73, 0x65, 0x74, 0x74, 0x69, 0x6e, 0x67, 0x73, 0x74, 0x65, - 0x6d, 0x70, 0x6c, 0x61, 0x74, 0x65, 0x6e, 0x61, 0x6d, 0x65, 0x22, 0x3a, - 0x22, 0x73, 0x65, 0x74, 0x74, 0x69, 0x6e, 0x67, 0x73, 0x64, 0x61, 0x74, - 0x61, 0x22, 0x20, 0x7d, 0x0a, 0x0a, 0x20, 0x20, 0x20, 0x20, 0x63, 0x6f, - 0x6e, 0x73, 0x6f, 0x6c, 0x65, 0x2e, 0x6c, 0x6f, 0x67, 0x28, 0x27, 0x49, - 0x6d, 0x70, 0x6f, 0x72, 0x74, 0x69, 0x6e, 0x67, 0x20, 0x73, 0x61, 0x76, - 0x65, 0x64, 0x20, 0x74, 0x65, 0x6d, 0x70, 0x6c, 0x61, 0x74, 0x65, 0x73, - 0x27, 0x29, 0x0a, 0x0a, 0x20, 0x20, 0x20, 0x20, 0x6c, 0x65, 0x74, 0x20, - 0x69, 0x6d, 0x70, 0x6f, 0x72, 0x74, 0x65, 0x64, 0x54, 0x65, 0x6d, 0x70, - 0x6c, 0x61, 0x74, 0x65, 0x73, 0x20, 0x3d, 0x20, 0x6c, 0x6f, 0x63, 0x61, - 0x6c, 0x5f, 0x73, 0x74, 0x6f, 0x72, 0x61, 0x67, 0x65, 0x5f, 0x67, 0x65, - 0x74, 0x44, 0x61, 0x74, 0x61, 0x41, 0x73, 0x4f, 0x62, 0x6a, 0x65, 0x63, - 0x74, 0x28, 0x27, 0x75, 0x73, 0x65, 0x72, 0x5f, 0x74, 0x65, 0x6d, 0x70, - 0x6c, 0x61, 0x74, 0x65, 0x73, 0x27, 0x29, 0x0a, 0x0a, 0x20, 0x20, 0x20, - 0x20, 0x69, 0x66, 0x20, 0x28, 0x69, 0x6d, 0x70, 0x6f, 0x72, 0x74, 0x65, - 0x64, 0x54, 0x65, 0x6d, 0x70, 0x6c, 0x61, 0x74, 0x65, 0x73, 0x29, 0x20, - 0x7b, 0x0a, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x2f, 0x2f, 0x20, 0x73, - 0x61, 0x76, 0x65, 0x64, 0x20, 0x74, 0x65, 0x6d, 0x70, 0x6c, 0x61, 0x74, - 0x65, 0x73, 0x20, 0x77, 0x65, 0x72, 0x65, 0x20, 0x73, 0x75, 0x63, 0x63, - 0x65, 0x73, 0x73, 0x66, 0x75, 0x6c, 0x79, 0x20, 0x69, 0x6d, 0x70, 0x6f, - 0x72, 0x74, 0x65, 0x64, 0x2e, 0x0a, 0x0a, 0x20, 0x20, 0x20, 0x20, 0x20, - 0x20, 0x63, 0x6f, 0x6e, 0x73, 0x6f, 0x6c, 0x65, 0x2e, 0x6c, 0x6f, 0x67, - 0x28, 0x27, 0x50, 0x72, 0x6f, 0x63, 0x65, 0x73, 0x73, 0x69, 0x6e, 0x67, - 0x20, 0x73, 0x61, 0x76, 0x65, 0x64, 0x20, 0x74, 0x65, 0x6d, 0x70, 0x6c, - 0x61, 0x74, 0x65, 0x73, 0x20, 0x61, 0x6e, 0x64, 0x20, 0x75, 0x70, 0x64, - 0x61, 0x74, 0x69, 0x6e, 0x67, 0x20, 0x64, 0x65, 0x66, 0x61, 0x75, 0x6c, - 0x74, 0x20, 0x74, 0x65, 0x6d, 0x70, 0x6c, 0x61, 0x74, 0x65, 0x27, 0x29, - 0x0a, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x70, 0x61, 0x72, 0x61, 0x6d, - 0x73, 0x2e, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x20, 0x3d, 0x20, 0x7b, 0x20, - 0x2e, 0x2e, 0x2e, 0x70, 0x61, 0x72, 0x61, 0x6d, 0x73, 0x2e, 0x76, 0x61, - 0x6c, 0x75, 0x65, 0x2c, 0x20, 0x69, 0x6d, 0x61, 0x67, 0x65, 0x5f, 0x64, - 0x61, 0x74, 0x61, 0x3a, 0x20, 0x5b, 0x5d, 0x20, 0x7d, 0x3b, 0x0a, 0x0a, - 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x2f, 0x2f, 0x63, 0x6f, 0x6e, 0x73, - 0x6f, 0x6c, 0x65, 0x2e, 0x6c, 0x6f, 0x67, 0x28, 0x69, 0x6d, 0x70, 0x6f, - 0x72, 0x74, 0x65, 0x64, 0x54, 0x65, 0x6d, 0x70, 0x6c, 0x61, 0x74, 0x65, - 0x73, 0x29, 0x3b, 0x0a, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x73, 0x61, - 0x76, 0x65, 0x64, 0x55, 0x73, 0x65, 0x72, 0x54, 0x65, 0x6d, 0x70, 0x6c, - 0x61, 0x74, 0x65, 0x73, 0x2e, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x20, 0x3d, - 0x20, 0x69, 0x6d, 0x70, 0x6f, 0x72, 0x74, 0x65, 0x64, 0x54, 0x65, 0x6d, - 0x70, 0x6c, 0x61, 0x74, 0x65, 0x73, 0x3b, 0x0a, 0x0a, 0x20, 0x20, 0x20, - 0x20, 0x20, 0x20, 0x2f, 0x2f, 0x6f, 0x76, 0x65, 0x72, 0x72, 0x69, 0x64, - 0x65, 0x20, 0x64, 0x65, 0x66, 0x61, 0x75, 0x6c, 0x74, 0x20, 0x74, 0x65, - 0x6d, 0x70, 0x6c, 0x61, 0x74, 0x65, 0x0a, 0x20, 0x20, 0x20, 0x20, 0x20, - 0x20, 0x73, 0x61, 0x76, 0x65, 0x64, 0x55, 0x73, 0x65, 0x72, 0x54, 0x65, - 0x6d, 0x70, 0x6c, 0x61, 0x74, 0x65, 0x73, 0x2e, 0x76, 0x61, 0x6c, 0x75, - 0x65, 0x2e, 0x64, 0x65, 0x66, 0x61, 0x75, 0x6c, 0x74, 0x20, 0x3d, 0x20, - 0x7b, 0x20, 0x73, 0x65, 0x73, 0x73, 0x69, 0x6f, 0x6e, 0x3a, 0x20, 0x73, - 0x65, 0x73, 0x73, 0x69, 0x6f, 0x6e, 0x2e, 0x76, 0x61, 0x6c, 0x75, 0x65, - 0x2c, 0x20, 0x70, 0x61, 0x72, 0x61, 0x6d, 0x73, 0x3a, 0x20, 0x70, 0x61, - 0x72, 0x61, 0x6d, 0x73, 0x2e, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x20, 0x7d, - 0x0a, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x6c, 0x6f, 0x63, 0x61, 0x6c, - 0x5f, 0x73, 0x74, 0x6f, 0x72, 0x61, 0x67, 0x65, 0x5f, 0x73, 0x65, 0x74, - 0x44, 0x61, 0x74, 0x61, 0x46, 0x72, 0x6f, 0x6d, 0x4f, 0x62, 0x6a, 0x65, - 0x63, 0x74, 0x28, 0x27, 0x75, 0x73, 0x65, 0x72, 0x5f, 0x74, 0x65, 0x6d, - 0x70, 0x6c, 0x61, 0x74, 0x65, 0x73, 0x27, 0x2c, 0x20, 0x73, 0x61, 0x76, + 0x0a, 0x0a, 0x20, 0x20, 0x20, 0x20, 0x63, 0x6f, 0x6e, 0x73, 0x74, 0x20, + 0x73, 0x61, 0x76, 0x65, 0x64, 0x55, 0x73, 0x65, 0x72, 0x54, 0x65, 0x6d, + 0x70, 0x6c, 0x61, 0x74, 0x65, 0x73, 0x20, 0x3d, 0x20, 0x73, 0x69, 0x67, + 0x6e, 0x61, 0x6c, 0x28, 0x7b, 0x7d, 0x29, 0x0a, 0x20, 0x20, 0x20, 0x20, + 0x63, 0x6f, 0x6e, 0x73, 0x74, 0x20, 0x73, 0x65, 0x6c, 0x65, 0x63, 0x74, 0x65, 0x64, 0x55, 0x73, 0x65, 0x72, 0x54, 0x65, 0x6d, 0x70, 0x6c, 0x61, - 0x74, 0x65, 0x73, 0x2e, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x29, 0x0a, 0x20, - 0x20, 0x20, 0x20, 0x7d, 0x20, 0x65, 0x6c, 0x73, 0x65, 0x20, 0x7b, 0x0a, - 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x2f, 0x2f, 0x20, 0x6e, 0x6f, 0x20, - 0x73, 0x61, 0x76, 0x65, 0x64, 0x20, 0x74, 0x65, 0x6d, 0x70, 0x6c, 0x61, - 0x74, 0x65, 0x73, 0x20, 0x64, 0x65, 0x74, 0x65, 0x63, 0x74, 0x65, 0x64, - 0x2e, 0x0a, 0x0a, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x63, 0x6f, 0x6e, - 0x73, 0x6f, 0x6c, 0x65, 0x2e, 0x6c, 0x6f, 0x67, 0x28, 0x27, 0x49, 0x6e, - 0x69, 0x74, 0x69, 0x61, 0x6c, 0x69, 0x7a, 0x69, 0x6e, 0x67, 0x20, 0x4c, - 0x6f, 0x63, 0x61, 0x6c, 0x53, 0x74, 0x6f, 0x72, 0x61, 0x67, 0x65, 0x20, - 0x61, 0x6e, 0x64, 0x20, 0x73, 0x61, 0x76, 0x69, 0x6e, 0x67, 0x20, 0x64, - 0x65, 0x66, 0x61, 0x75, 0x6c, 0x74, 0x20, 0x74, 0x65, 0x6d, 0x70, 0x6c, - 0x61, 0x74, 0x65, 0x27, 0x29, 0x0a, 0x0a, 0x20, 0x20, 0x20, 0x20, 0x20, - 0x20, 0x73, 0x61, 0x76, 0x65, 0x64, 0x55, 0x73, 0x65, 0x72, 0x54, 0x65, - 0x6d, 0x70, 0x6c, 0x61, 0x74, 0x65, 0x73, 0x2e, 0x76, 0x61, 0x6c, 0x75, - 0x65, 0x20, 0x3d, 0x20, 0x7b, 0x20, 0x22, 0x64, 0x65, 0x66, 0x61, 0x75, - 0x6c, 0x74, 0x22, 0x3a, 0x20, 0x7b, 0x20, 0x73, 0x65, 0x73, 0x73, 0x69, - 0x6f, 0x6e, 0x3a, 0x20, 0x73, 0x65, 0x73, 0x73, 0x69, 0x6f, 0x6e, 0x2e, - 0x76, 0x61, 0x6c, 0x75, 0x65, 0x2c, 0x20, 0x70, 0x61, 0x72, 0x61, 0x6d, - 0x73, 0x3a, 0x20, 0x70, 0x61, 0x72, 0x61, 0x6d, 0x73, 0x2e, 0x76, 0x61, - 0x6c, 0x75, 0x65, 0x20, 0x7d, 0x20, 0x7d, 0x0a, 0x20, 0x20, 0x20, 0x20, - 0x20, 0x20, 0x6c, 0x6f, 0x63, 0x61, 0x6c, 0x5f, 0x73, 0x74, 0x6f, 0x72, - 0x61, 0x67, 0x65, 0x5f, 0x73, 0x65, 0x74, 0x44, 0x61, 0x74, 0x61, 0x46, - 0x72, 0x6f, 0x6d, 0x4f, 0x62, 0x6a, 0x65, 0x63, 0x74, 0x28, 0x27, 0x75, - 0x73, 0x65, 0x72, 0x5f, 0x74, 0x65, 0x6d, 0x70, 0x6c, 0x61, 0x74, 0x65, - 0x73, 0x27, 0x2c, 0x20, 0x73, 0x61, 0x76, 0x65, 0x64, 0x55, 0x73, 0x65, + 0x74, 0x65, 0x20, 0x3d, 0x20, 0x73, 0x69, 0x67, 0x6e, 0x61, 0x6c, 0x28, + 0x7b, 0x20, 0x6e, 0x61, 0x6d, 0x65, 0x3a, 0x20, 0x27, 0x27, 0x2c, 0x20, + 0x74, 0x65, 0x6d, 0x70, 0x6c, 0x61, 0x74, 0x65, 0x3a, 0x20, 0x7b, 0x20, + 0x73, 0x65, 0x73, 0x73, 0x69, 0x6f, 0x6e, 0x3a, 0x20, 0x7b, 0x7d, 0x2c, + 0x20, 0x70, 0x61, 0x72, 0x61, 0x6d, 0x73, 0x3a, 0x20, 0x7b, 0x7d, 0x20, + 0x7d, 0x20, 0x7d, 0x29, 0x0a, 0x0a, 0x20, 0x20, 0x20, 0x20, 0x2f, 0x2f, + 0x20, 0x6c, 0x65, 0x74, 0x27, 0x73, 0x20, 0x69, 0x6d, 0x70, 0x6f, 0x72, + 0x74, 0x20, 0x6c, 0x6f, 0x63, 0x61, 0x6c, 0x6c, 0x79, 0x20, 0x73, 0x61, + 0x76, 0x65, 0x64, 0x20, 0x74, 0x65, 0x6d, 0x70, 0x6c, 0x61, 0x74, 0x65, + 0x73, 0x20, 0x61, 0x6e, 0x64, 0x20, 0x73, 0x65, 0x74, 0x74, 0x69, 0x6e, + 0x67, 0x73, 0x20, 0x69, 0x66, 0x20, 0x74, 0x68, 0x65, 0x72, 0x65, 0x20, + 0x61, 0x72, 0x65, 0x20, 0x61, 0x6e, 0x79, 0x0a, 0x20, 0x20, 0x20, 0x20, + 0x2f, 0x2f, 0x20, 0x75, 0x73, 0x65, 0x72, 0x20, 0x74, 0x65, 0x6d, 0x70, + 0x6c, 0x61, 0x74, 0x65, 0x73, 0x20, 0x61, 0x6e, 0x64, 0x20, 0x73, 0x65, + 0x74, 0x74, 0x69, 0x6e, 0x67, 0x73, 0x20, 0x61, 0x72, 0x65, 0x20, 0x73, + 0x74, 0x6f, 0x72, 0x65, 0x64, 0x20, 0x69, 0x6e, 0x20, 0x6f, 0x6e, 0x65, + 0x20, 0x6f, 0x62, 0x6a, 0x65, 0x63, 0x74, 0x0a, 0x20, 0x20, 0x20, 0x20, + 0x2f, 0x2f, 0x20, 0x69, 0x6e, 0x20, 0x66, 0x6f, 0x72, 0x6d, 0x20, 0x6f, + 0x66, 0x20, 0x7b, 0x20, 0x22, 0x74, 0x65, 0x6d, 0x70, 0x6c, 0x61, 0x74, + 0x65, 0x6e, 0x61, 0x6d, 0x65, 0x22, 0x3a, 0x20, 0x22, 0x74, 0x65, 0x6d, + 0x70, 0x6c, 0x61, 0x74, 0x65, 0x64, 0x61, 0x74, 0x61, 0x22, 0x20, 0x7d, + 0x20, 0x61, 0x6e, 0x64, 0x20, 0x7b, 0x20, 0x22, 0x73, 0x65, 0x74, 0x74, + 0x69, 0x6e, 0x67, 0x73, 0x74, 0x65, 0x6d, 0x70, 0x6c, 0x61, 0x74, 0x65, + 0x6e, 0x61, 0x6d, 0x65, 0x22, 0x3a, 0x22, 0x73, 0x65, 0x74, 0x74, 0x69, + 0x6e, 0x67, 0x73, 0x64, 0x61, 0x74, 0x61, 0x22, 0x20, 0x7d, 0x0a, 0x0a, + 0x20, 0x20, 0x20, 0x20, 0x63, 0x6f, 0x6e, 0x73, 0x6f, 0x6c, 0x65, 0x2e, + 0x6c, 0x6f, 0x67, 0x28, 0x27, 0x49, 0x6d, 0x70, 0x6f, 0x72, 0x74, 0x69, + 0x6e, 0x67, 0x20, 0x73, 0x61, 0x76, 0x65, 0x64, 0x20, 0x74, 0x65, 0x6d, + 0x70, 0x6c, 0x61, 0x74, 0x65, 0x73, 0x27, 0x29, 0x0a, 0x0a, 0x20, 0x20, + 0x20, 0x20, 0x6c, 0x65, 0x74, 0x20, 0x69, 0x6d, 0x70, 0x6f, 0x72, 0x74, + 0x65, 0x64, 0x54, 0x65, 0x6d, 0x70, 0x6c, 0x61, 0x74, 0x65, 0x73, 0x20, + 0x3d, 0x20, 0x6c, 0x6f, 0x63, 0x61, 0x6c, 0x5f, 0x73, 0x74, 0x6f, 0x72, + 0x61, 0x67, 0x65, 0x5f, 0x67, 0x65, 0x74, 0x44, 0x61, 0x74, 0x61, 0x41, + 0x73, 0x4f, 0x62, 0x6a, 0x65, 0x63, 0x74, 0x28, 0x27, 0x75, 0x73, 0x65, + 0x72, 0x5f, 0x74, 0x65, 0x6d, 0x70, 0x6c, 0x61, 0x74, 0x65, 0x73, 0x27, + 0x29, 0x0a, 0x0a, 0x20, 0x20, 0x20, 0x20, 0x69, 0x66, 0x20, 0x28, 0x69, + 0x6d, 0x70, 0x6f, 0x72, 0x74, 0x65, 0x64, 0x54, 0x65, 0x6d, 0x70, 0x6c, + 0x61, 0x74, 0x65, 0x73, 0x29, 0x20, 0x7b, 0x0a, 0x20, 0x20, 0x20, 0x20, + 0x20, 0x20, 0x2f, 0x2f, 0x20, 0x73, 0x61, 0x76, 0x65, 0x64, 0x20, 0x74, + 0x65, 0x6d, 0x70, 0x6c, 0x61, 0x74, 0x65, 0x73, 0x20, 0x77, 0x65, 0x72, + 0x65, 0x20, 0x73, 0x75, 0x63, 0x63, 0x65, 0x73, 0x73, 0x66, 0x75, 0x6c, + 0x79, 0x20, 0x69, 0x6d, 0x70, 0x6f, 0x72, 0x74, 0x65, 0x64, 0x2e, 0x0a, + 0x0a, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x63, 0x6f, 0x6e, 0x73, 0x6f, + 0x6c, 0x65, 0x2e, 0x6c, 0x6f, 0x67, 0x28, 0x27, 0x50, 0x72, 0x6f, 0x63, + 0x65, 0x73, 0x73, 0x69, 0x6e, 0x67, 0x20, 0x73, 0x61, 0x76, 0x65, 0x64, + 0x20, 0x74, 0x65, 0x6d, 0x70, 0x6c, 0x61, 0x74, 0x65, 0x73, 0x20, 0x61, + 0x6e, 0x64, 0x20, 0x75, 0x70, 0x64, 0x61, 0x74, 0x69, 0x6e, 0x67, 0x20, + 0x64, 0x65, 0x66, 0x61, 0x75, 0x6c, 0x74, 0x20, 0x74, 0x65, 0x6d, 0x70, + 0x6c, 0x61, 0x74, 0x65, 0x27, 0x29, 0x0a, 0x20, 0x20, 0x20, 0x20, 0x20, + 0x20, 0x70, 0x61, 0x72, 0x61, 0x6d, 0x73, 0x2e, 0x76, 0x61, 0x6c, 0x75, + 0x65, 0x20, 0x3d, 0x20, 0x7b, 0x20, 0x2e, 0x2e, 0x2e, 0x70, 0x61, 0x72, + 0x61, 0x6d, 0x73, 0x2e, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x2c, 0x20, 0x69, + 0x6d, 0x61, 0x67, 0x65, 0x5f, 0x64, 0x61, 0x74, 0x61, 0x3a, 0x20, 0x5b, + 0x5d, 0x20, 0x7d, 0x3b, 0x0a, 0x0a, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, + 0x2f, 0x2f, 0x63, 0x6f, 0x6e, 0x73, 0x6f, 0x6c, 0x65, 0x2e, 0x6c, 0x6f, + 0x67, 0x28, 0x69, 0x6d, 0x70, 0x6f, 0x72, 0x74, 0x65, 0x64, 0x54, 0x65, + 0x6d, 0x70, 0x6c, 0x61, 0x74, 0x65, 0x73, 0x29, 0x3b, 0x0a, 0x20, 0x20, + 0x20, 0x20, 0x20, 0x20, 0x73, 0x61, 0x76, 0x65, 0x64, 0x55, 0x73, 0x65, 0x72, 0x54, 0x65, 0x6d, 0x70, 0x6c, 0x61, 0x74, 0x65, 0x73, 0x2e, 0x76, - 0x61, 0x6c, 0x75, 0x65, 0x29, 0x0a, 0x20, 0x20, 0x20, 0x20, 0x7d, 0x0a, - 0x0a, 0x20, 0x20, 0x20, 0x20, 0x66, 0x75, 0x6e, 0x63, 0x74, 0x69, 0x6f, - 0x6e, 0x20, 0x75, 0x73, 0x65, 0x72, 0x54, 0x65, 0x6d, 0x70, 0x6c, 0x61, - 0x74, 0x65, 0x52, 0x65, 0x73, 0x65, 0x74, 0x54, 0x6f, 0x44, 0x65, 0x66, - 0x61, 0x75, 0x6c, 0x74, 0x28, 0x29, 0x20, 0x7b, 0x0a, 0x20, 0x20, 0x20, - 0x20, 0x20, 0x20, 0x63, 0x6f, 0x6e, 0x73, 0x6f, 0x6c, 0x65, 0x2e, 0x6c, - 0x6f, 0x67, 0x28, 0x27, 0x52, 0x65, 0x73, 0x65, 0x74, 0x69, 0x6e, 0x67, - 0x20, 0x74, 0x68, 0x65, 0x6d, 0x70, 0x6c, 0x61, 0x74, 0x65, 0x20, 0x74, - 0x6f, 0x20, 0x64, 0x65, 0x66, 0x61, 0x75, 0x6c, 0x74, 0x27, 0x29, 0x0a, - 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x73, 0x65, 0x6c, 0x65, 0x63, 0x74, - 0x65, 0x64, 0x55, 0x73, 0x65, 0x72, 0x54, 0x65, 0x6d, 0x70, 0x6c, 0x61, - 0x74, 0x65, 0x2e, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x2e, 0x6e, 0x61, 0x6d, - 0x65, 0x20, 0x3d, 0x20, 0x27, 0x64, 0x65, 0x66, 0x61, 0x75, 0x6c, 0x74, - 0x27, 0x3b, 0x0a, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x73, 0x65, 0x6c, - 0x65, 0x63, 0x74, 0x65, 0x64, 0x55, 0x73, 0x65, 0x72, 0x54, 0x65, 0x6d, - 0x70, 0x6c, 0x61, 0x74, 0x65, 0x2e, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x2e, - 0x64, 0x61, 0x74, 0x61, 0x20, 0x3d, 0x20, 0x73, 0x61, 0x76, 0x65, 0x64, + 0x61, 0x6c, 0x75, 0x65, 0x20, 0x3d, 0x20, 0x69, 0x6d, 0x70, 0x6f, 0x72, + 0x74, 0x65, 0x64, 0x54, 0x65, 0x6d, 0x70, 0x6c, 0x61, 0x74, 0x65, 0x73, + 0x3b, 0x0a, 0x0a, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x2f, 0x2f, 0x6f, + 0x76, 0x65, 0x72, 0x72, 0x69, 0x64, 0x65, 0x20, 0x64, 0x65, 0x66, 0x61, + 0x75, 0x6c, 0x74, 0x20, 0x74, 0x65, 0x6d, 0x70, 0x6c, 0x61, 0x74, 0x65, + 0x0a, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x73, 0x61, 0x76, 0x65, 0x64, 0x55, 0x73, 0x65, 0x72, 0x54, 0x65, 0x6d, 0x70, 0x6c, 0x61, 0x74, 0x65, - 0x73, 0x2e, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x5b, 0x27, 0x64, 0x65, 0x66, - 0x61, 0x75, 0x6c, 0x74, 0x27, 0x5d, 0x3b, 0x0a, 0x20, 0x20, 0x20, 0x20, - 0x7d, 0x0a, 0x0a, 0x20, 0x20, 0x20, 0x20, 0x66, 0x75, 0x6e, 0x63, 0x74, - 0x69, 0x6f, 0x6e, 0x20, 0x75, 0x73, 0x65, 0x72, 0x54, 0x65, 0x6d, 0x70, - 0x6c, 0x61, 0x74, 0x65, 0x41, 0x70, 0x70, 0x6c, 0x79, 0x28, 0x74, 0x29, - 0x20, 0x7b, 0x0a, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x73, 0x65, 0x73, - 0x73, 0x69, 0x6f, 0x6e, 0x2e, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x20, 0x3d, - 0x20, 0x74, 0x2e, 0x64, 0x61, 0x74, 0x61, 0x2e, 0x73, 0x65, 0x73, 0x73, - 0x69, 0x6f, 0x6e, 0x3b, 0x0a, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x73, - 0x65, 0x73, 0x73, 0x69, 0x6f, 0x6e, 0x2e, 0x76, 0x61, 0x6c, 0x75, 0x65, - 0x20, 0x3d, 0x20, 0x7b, 0x20, 0x2e, 0x2e, 0x2e, 0x73, 0x65, 0x73, 0x73, - 0x69, 0x6f, 0x6e, 0x2e, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x2c, 0x20, 0x69, - 0x6d, 0x61, 0x67, 0x65, 0x5f, 0x73, 0x65, 0x6c, 0x65, 0x63, 0x74, 0x65, - 0x64, 0x3a, 0x20, 0x27, 0x27, 0x20, 0x7d, 0x3b, 0x0a, 0x20, 0x20, 0x20, - 0x20, 0x20, 0x20, 0x70, 0x61, 0x72, 0x61, 0x6d, 0x73, 0x2e, 0x76, 0x61, - 0x6c, 0x75, 0x65, 0x20, 0x3d, 0x20, 0x74, 0x2e, 0x64, 0x61, 0x74, 0x61, - 0x2e, 0x70, 0x61, 0x72, 0x61, 0x6d, 0x73, 0x3b, 0x0a, 0x20, 0x20, 0x20, - 0x20, 0x20, 0x20, 0x70, 0x61, 0x72, 0x61, 0x6d, 0x73, 0x2e, 0x76, 0x61, - 0x6c, 0x75, 0x65, 0x20, 0x3d, 0x20, 0x7b, 0x20, 0x2e, 0x2e, 0x2e, 0x70, - 0x61, 0x72, 0x61, 0x6d, 0x73, 0x2e, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x2c, - 0x20, 0x69, 0x6d, 0x61, 0x67, 0x65, 0x5f, 0x64, 0x61, 0x74, 0x61, 0x3a, - 0x20, 0x5b, 0x5d, 0x20, 0x7d, 0x3b, 0x0a, 0x20, 0x20, 0x20, 0x20, 0x7d, - 0x0a, 0x0a, 0x20, 0x20, 0x20, 0x20, 0x66, 0x75, 0x6e, 0x63, 0x74, 0x69, - 0x6f, 0x6e, 0x20, 0x75, 0x73, 0x65, 0x72, 0x54, 0x65, 0x6d, 0x70, 0x6c, - 0x61, 0x74, 0x65, 0x52, 0x65, 0x73, 0x65, 0x74, 0x54, 0x6f, 0x44, 0x65, - 0x66, 0x61, 0x75, 0x6c, 0x74, 0x41, 0x6e, 0x64, 0x41, 0x70, 0x70, 0x6c, - 0x79, 0x28, 0x29, 0x20, 0x7b, 0x0a, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, - 0x75, 0x73, 0x65, 0x72, 0x54, 0x65, 0x6d, 0x70, 0x6c, 0x61, 0x74, 0x65, - 0x52, 0x65, 0x73, 0x65, 0x74, 0x54, 0x6f, 0x44, 0x65, 0x66, 0x61, 0x75, - 0x6c, 0x74, 0x28, 0x29, 0x0a, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x75, - 0x73, 0x65, 0x72, 0x54, 0x65, 0x6d, 0x70, 0x6c, 0x61, 0x74, 0x65, 0x41, - 0x70, 0x70, 0x6c, 0x79, 0x28, 0x73, 0x65, 0x6c, 0x65, 0x63, 0x74, 0x65, - 0x64, 0x55, 0x73, 0x65, 0x72, 0x54, 0x65, 0x6d, 0x70, 0x6c, 0x61, 0x74, - 0x65, 0x2e, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x29, 0x0a, 0x20, 0x20, 0x20, - 0x20, 0x7d, 0x0a, 0x0a, 0x20, 0x20, 0x20, 0x20, 0x66, 0x75, 0x6e, 0x63, - 0x74, 0x69, 0x6f, 0x6e, 0x20, 0x75, 0x73, 0x65, 0x72, 0x54, 0x65, 0x6d, - 0x70, 0x6c, 0x61, 0x74, 0x65, 0x4c, 0x6f, 0x61, 0x64, 0x41, 0x6e, 0x64, - 0x41, 0x70, 0x70, 0x6c, 0x79, 0x41, 0x75, 0x74, 0x6f, 0x73, 0x61, 0x76, - 0x65, 0x64, 0x28, 0x29, 0x20, 0x7b, 0x0a, 0x20, 0x20, 0x20, 0x20, 0x20, - 0x20, 0x2f, 0x2f, 0x20, 0x67, 0x65, 0x74, 0x20, 0x61, 0x75, 0x74, 0x6f, - 0x73, 0x61, 0x76, 0x65, 0x64, 0x20, 0x6c, 0x61, 0x73, 0x74, 0x20, 0x75, - 0x73, 0x65, 0x64, 0x20, 0x74, 0x65, 0x6d, 0x70, 0x6c, 0x61, 0x74, 0x65, - 0x0a, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x6c, 0x65, 0x74, 0x20, 0x6c, - 0x61, 0x73, 0x74, 0x55, 0x73, 0x65, 0x64, 0x54, 0x65, 0x6d, 0x70, 0x6c, - 0x61, 0x74, 0x65, 0x20, 0x3d, 0x20, 0x6c, 0x6f, 0x63, 0x61, 0x6c, 0x5f, - 0x73, 0x74, 0x6f, 0x72, 0x61, 0x67, 0x65, 0x5f, 0x67, 0x65, 0x74, 0x44, - 0x61, 0x74, 0x61, 0x41, 0x73, 0x4f, 0x62, 0x6a, 0x65, 0x63, 0x74, 0x28, - 0x27, 0x75, 0x73, 0x65, 0x72, 0x5f, 0x74, 0x65, 0x6d, 0x70, 0x6c, 0x61, - 0x74, 0x65, 0x73, 0x5f, 0x6c, 0x61, 0x73, 0x74, 0x27, 0x29, 0x0a, 0x0a, - 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x69, 0x66, 0x20, 0x28, 0x6c, 0x61, - 0x73, 0x74, 0x55, 0x73, 0x65, 0x64, 0x54, 0x65, 0x6d, 0x70, 0x6c, 0x61, - 0x74, 0x65, 0x29, 0x20, 0x7b, 0x0a, 0x0a, 0x20, 0x20, 0x20, 0x20, 0x20, + 0x73, 0x2e, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x2e, 0x64, 0x65, 0x66, 0x61, + 0x75, 0x6c, 0x74, 0x20, 0x3d, 0x20, 0x7b, 0x20, 0x73, 0x65, 0x73, 0x73, + 0x69, 0x6f, 0x6e, 0x3a, 0x20, 0x73, 0x65, 0x73, 0x73, 0x69, 0x6f, 0x6e, + 0x2e, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x2c, 0x20, 0x70, 0x61, 0x72, 0x61, + 0x6d, 0x73, 0x3a, 0x20, 0x70, 0x61, 0x72, 0x61, 0x6d, 0x73, 0x2e, 0x76, + 0x61, 0x6c, 0x75, 0x65, 0x20, 0x7d, 0x0a, 0x20, 0x20, 0x20, 0x20, 0x20, + 0x20, 0x6c, 0x6f, 0x63, 0x61, 0x6c, 0x5f, 0x73, 0x74, 0x6f, 0x72, 0x61, + 0x67, 0x65, 0x5f, 0x73, 0x65, 0x74, 0x44, 0x61, 0x74, 0x61, 0x46, 0x72, + 0x6f, 0x6d, 0x4f, 0x62, 0x6a, 0x65, 0x63, 0x74, 0x28, 0x27, 0x75, 0x73, + 0x65, 0x72, 0x5f, 0x74, 0x65, 0x6d, 0x70, 0x6c, 0x61, 0x74, 0x65, 0x73, + 0x27, 0x2c, 0x20, 0x73, 0x61, 0x76, 0x65, 0x64, 0x55, 0x73, 0x65, 0x72, + 0x54, 0x65, 0x6d, 0x70, 0x6c, 0x61, 0x74, 0x65, 0x73, 0x2e, 0x76, 0x61, + 0x6c, 0x75, 0x65, 0x29, 0x0a, 0x20, 0x20, 0x20, 0x20, 0x7d, 0x20, 0x65, + 0x6c, 0x73, 0x65, 0x20, 0x7b, 0x0a, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, + 0x2f, 0x2f, 0x20, 0x6e, 0x6f, 0x20, 0x73, 0x61, 0x76, 0x65, 0x64, 0x20, + 0x74, 0x65, 0x6d, 0x70, 0x6c, 0x61, 0x74, 0x65, 0x73, 0x20, 0x64, 0x65, + 0x74, 0x65, 0x63, 0x74, 0x65, 0x64, 0x2e, 0x0a, 0x0a, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x63, 0x6f, 0x6e, 0x73, 0x6f, 0x6c, 0x65, 0x2e, 0x6c, - 0x6f, 0x67, 0x28, 0x27, 0x41, 0x75, 0x74, 0x6f, 0x73, 0x61, 0x76, 0x65, - 0x64, 0x20, 0x74, 0x65, 0x6d, 0x70, 0x6c, 0x61, 0x74, 0x65, 0x20, 0x66, - 0x6f, 0x75, 0x6e, 0x64, 0x2c, 0x20, 0x72, 0x65, 0x73, 0x74, 0x6f, 0x72, - 0x69, 0x6e, 0x67, 0x27, 0x29, 0x0a, 0x0a, 0x20, 0x20, 0x20, 0x20, 0x20, + 0x6f, 0x67, 0x28, 0x27, 0x49, 0x6e, 0x69, 0x74, 0x69, 0x61, 0x6c, 0x69, + 0x7a, 0x69, 0x6e, 0x67, 0x20, 0x4c, 0x6f, 0x63, 0x61, 0x6c, 0x53, 0x74, + 0x6f, 0x72, 0x61, 0x67, 0x65, 0x20, 0x61, 0x6e, 0x64, 0x20, 0x73, 0x61, + 0x76, 0x69, 0x6e, 0x67, 0x20, 0x64, 0x65, 0x66, 0x61, 0x75, 0x6c, 0x74, + 0x20, 0x74, 0x65, 0x6d, 0x70, 0x6c, 0x61, 0x74, 0x65, 0x27, 0x29, 0x0a, + 0x0a, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x73, 0x61, 0x76, 0x65, 0x64, + 0x55, 0x73, 0x65, 0x72, 0x54, 0x65, 0x6d, 0x70, 0x6c, 0x61, 0x74, 0x65, + 0x73, 0x2e, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x20, 0x3d, 0x20, 0x7b, 0x20, + 0x22, 0x64, 0x65, 0x66, 0x61, 0x75, 0x6c, 0x74, 0x22, 0x3a, 0x20, 0x7b, + 0x20, 0x73, 0x65, 0x73, 0x73, 0x69, 0x6f, 0x6e, 0x3a, 0x20, 0x73, 0x65, + 0x73, 0x73, 0x69, 0x6f, 0x6e, 0x2e, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x2c, + 0x20, 0x70, 0x61, 0x72, 0x61, 0x6d, 0x73, 0x3a, 0x20, 0x70, 0x61, 0x72, + 0x61, 0x6d, 0x73, 0x2e, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x20, 0x7d, 0x20, + 0x7d, 0x0a, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x6c, 0x6f, 0x63, 0x61, + 0x6c, 0x5f, 0x73, 0x74, 0x6f, 0x72, 0x61, 0x67, 0x65, 0x5f, 0x73, 0x65, + 0x74, 0x44, 0x61, 0x74, 0x61, 0x46, 0x72, 0x6f, 0x6d, 0x4f, 0x62, 0x6a, + 0x65, 0x63, 0x74, 0x28, 0x27, 0x75, 0x73, 0x65, 0x72, 0x5f, 0x74, 0x65, + 0x6d, 0x70, 0x6c, 0x61, 0x74, 0x65, 0x73, 0x27, 0x2c, 0x20, 0x73, 0x61, + 0x76, 0x65, 0x64, 0x55, 0x73, 0x65, 0x72, 0x54, 0x65, 0x6d, 0x70, 0x6c, + 0x61, 0x74, 0x65, 0x73, 0x2e, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x29, 0x0a, + 0x20, 0x20, 0x20, 0x20, 0x7d, 0x0a, 0x0a, 0x20, 0x20, 0x20, 0x20, 0x66, + 0x75, 0x6e, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x20, 0x75, 0x73, 0x65, 0x72, + 0x54, 0x65, 0x6d, 0x70, 0x6c, 0x61, 0x74, 0x65, 0x52, 0x65, 0x73, 0x65, + 0x74, 0x54, 0x6f, 0x44, 0x65, 0x66, 0x61, 0x75, 0x6c, 0x74, 0x28, 0x29, + 0x20, 0x7b, 0x0a, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x63, 0x6f, 0x6e, + 0x73, 0x6f, 0x6c, 0x65, 0x2e, 0x6c, 0x6f, 0x67, 0x28, 0x27, 0x52, 0x65, + 0x73, 0x65, 0x74, 0x69, 0x6e, 0x67, 0x20, 0x74, 0x68, 0x65, 0x6d, 0x70, + 0x6c, 0x61, 0x74, 0x65, 0x20, 0x74, 0x6f, 0x20, 0x64, 0x65, 0x66, 0x61, + 0x75, 0x6c, 0x74, 0x27, 0x29, 0x0a, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, + 0x73, 0x65, 0x6c, 0x65, 0x63, 0x74, 0x65, 0x64, 0x55, 0x73, 0x65, 0x72, + 0x54, 0x65, 0x6d, 0x70, 0x6c, 0x61, 0x74, 0x65, 0x2e, 0x76, 0x61, 0x6c, + 0x75, 0x65, 0x2e, 0x6e, 0x61, 0x6d, 0x65, 0x20, 0x3d, 0x20, 0x27, 0x64, + 0x65, 0x66, 0x61, 0x75, 0x6c, 0x74, 0x27, 0x3b, 0x0a, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x73, 0x65, 0x6c, 0x65, 0x63, 0x74, 0x65, 0x64, 0x55, 0x73, 0x65, 0x72, 0x54, 0x65, 0x6d, 0x70, 0x6c, 0x61, 0x74, 0x65, 0x2e, - 0x76, 0x61, 0x6c, 0x75, 0x65, 0x20, 0x3d, 0x20, 0x6c, 0x61, 0x73, 0x74, - 0x55, 0x73, 0x65, 0x64, 0x54, 0x65, 0x6d, 0x70, 0x6c, 0x61, 0x74, 0x65, - 0x0a, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x7d, 0x0a, 0x20, 0x20, 0x20, - 0x20, 0x20, 0x20, 0x65, 0x6c, 0x73, 0x65, 0x20, 0x7b, 0x0a, 0x0a, 0x20, - 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x63, 0x6f, 0x6e, 0x73, 0x6f, - 0x6c, 0x65, 0x2e, 0x6c, 0x6f, 0x67, 0x28, 0x27, 0x4e, 0x6f, 0x20, 0x61, - 0x75, 0x74, 0x6f, 0x73, 0x61, 0x76, 0x65, 0x64, 0x20, 0x74, 0x65, 0x6d, - 0x70, 0x6c, 0x61, 0x74, 0x65, 0x20, 0x66, 0x6f, 0x75, 0x6e, 0x64, 0x2c, - 0x20, 0x75, 0x73, 0x69, 0x6e, 0x67, 0x20, 0x64, 0x65, 0x66, 0x61, 0x75, - 0x6c, 0x74, 0x20, 0x74, 0x65, 0x6d, 0x70, 0x6c, 0x61, 0x74, 0x65, 0x27, - 0x29, 0x0a, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x2f, 0x2f, - 0x20, 0x6e, 0x6f, 0x20, 0x61, 0x75, 0x74, 0x6f, 0x73, 0x61, 0x76, 0x65, - 0x64, 0x20, 0x6c, 0x61, 0x73, 0x74, 0x20, 0x75, 0x73, 0x65, 0x64, 0x20, - 0x74, 0x65, 0x6d, 0x70, 0x6c, 0x61, 0x74, 0x65, 0x20, 0x77, 0x61, 0x73, - 0x20, 0x66, 0x6f, 0x75, 0x6e, 0x64, 0x2c, 0x20, 0x73, 0x6f, 0x20, 0x6c, - 0x6f, 0x61, 0x64, 0x20, 0x66, 0x72, 0x6f, 0x6d, 0x20, 0x64, 0x65, 0x66, - 0x61, 0x75, 0x6c, 0x74, 0x2e, 0x0a, 0x0a, 0x20, 0x20, 0x20, 0x20, 0x20, - 0x20, 0x20, 0x20, 0x75, 0x73, 0x65, 0x72, 0x54, 0x65, 0x6d, 0x70, 0x6c, - 0x61, 0x74, 0x65, 0x52, 0x65, 0x73, 0x65, 0x74, 0x54, 0x6f, 0x44, 0x65, - 0x66, 0x61, 0x75, 0x6c, 0x74, 0x28, 0x29, 0x0a, 0x20, 0x20, 0x20, 0x20, - 0x20, 0x20, 0x7d, 0x0a, 0x0a, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x63, - 0x6f, 0x6e, 0x73, 0x6f, 0x6c, 0x65, 0x2e, 0x6c, 0x6f, 0x67, 0x28, 0x27, - 0x41, 0x70, 0x70, 0x6c, 0x79, 0x69, 0x6e, 0x67, 0x20, 0x74, 0x65, 0x6d, - 0x70, 0x6c, 0x61, 0x74, 0x65, 0x27, 0x29, 0x0a, 0x20, 0x20, 0x20, 0x20, - 0x20, 0x20, 0x2f, 0x2f, 0x20, 0x61, 0x6e, 0x64, 0x20, 0x75, 0x70, 0x64, - 0x61, 0x74, 0x65, 0x20, 0x69, 0x6e, 0x74, 0x65, 0x72, 0x6e, 0x61, 0x6c, - 0x20, 0x64, 0x61, 0x74, 0x61, 0x20, 0x66, 0x72, 0x6f, 0x6d, 0x20, 0x74, - 0x65, 0x6d, 0x70, 0x6c, 0x61, 0x74, 0x65, 0x73, 0x0a, 0x0a, 0x20, 0x20, - 0x20, 0x20, 0x20, 0x20, 0x75, 0x73, 0x65, 0x72, 0x54, 0x65, 0x6d, 0x70, - 0x6c, 0x61, 0x74, 0x65, 0x41, 0x70, 0x70, 0x6c, 0x79, 0x28, 0x73, 0x65, - 0x6c, 0x65, 0x63, 0x74, 0x65, 0x64, 0x55, 0x73, 0x65, 0x72, 0x54, 0x65, - 0x6d, 0x70, 0x6c, 0x61, 0x74, 0x65, 0x2e, 0x76, 0x61, 0x6c, 0x75, 0x65, - 0x29, 0x0a, 0x20, 0x20, 0x20, 0x20, 0x7d, 0x0a, 0x0a, 0x20, 0x20, 0x20, - 0x20, 0x2f, 0x2f, 0x63, 0x6f, 0x6e, 0x73, 0x6f, 0x6c, 0x65, 0x2e, 0x6c, - 0x6f, 0x67, 0x28, 0x73, 0x61, 0x76, 0x65, 0x64, 0x55, 0x73, 0x65, 0x72, - 0x54, 0x65, 0x6d, 0x70, 0x6c, 0x61, 0x74, 0x65, 0x73, 0x2e, 0x76, 0x61, - 0x6c, 0x75, 0x65, 0x29, 0x0a, 0x20, 0x20, 0x20, 0x20, 0x2f, 0x2f, 0x63, - 0x6f, 0x6e, 0x73, 0x6f, 0x6c, 0x65, 0x2e, 0x6c, 0x6f, 0x67, 0x28, 0x73, + 0x76, 0x61, 0x6c, 0x75, 0x65, 0x2e, 0x64, 0x61, 0x74, 0x61, 0x20, 0x3d, + 0x20, 0x73, 0x61, 0x76, 0x65, 0x64, 0x55, 0x73, 0x65, 0x72, 0x54, 0x65, + 0x6d, 0x70, 0x6c, 0x61, 0x74, 0x65, 0x73, 0x2e, 0x76, 0x61, 0x6c, 0x75, + 0x65, 0x5b, 0x27, 0x64, 0x65, 0x66, 0x61, 0x75, 0x6c, 0x74, 0x27, 0x5d, + 0x3b, 0x0a, 0x20, 0x20, 0x20, 0x20, 0x7d, 0x0a, 0x0a, 0x20, 0x20, 0x20, + 0x20, 0x66, 0x75, 0x6e, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x20, 0x75, 0x73, + 0x65, 0x72, 0x54, 0x65, 0x6d, 0x70, 0x6c, 0x61, 0x74, 0x65, 0x41, 0x70, + 0x70, 0x6c, 0x79, 0x28, 0x74, 0x29, 0x20, 0x7b, 0x0a, 0x20, 0x20, 0x20, + 0x20, 0x20, 0x20, 0x73, 0x65, 0x73, 0x73, 0x69, 0x6f, 0x6e, 0x2e, 0x76, + 0x61, 0x6c, 0x75, 0x65, 0x20, 0x3d, 0x20, 0x74, 0x2e, 0x64, 0x61, 0x74, + 0x61, 0x2e, 0x73, 0x65, 0x73, 0x73, 0x69, 0x6f, 0x6e, 0x3b, 0x0a, 0x20, + 0x20, 0x20, 0x20, 0x20, 0x20, 0x73, 0x65, 0x73, 0x73, 0x69, 0x6f, 0x6e, + 0x2e, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x20, 0x3d, 0x20, 0x7b, 0x20, 0x2e, + 0x2e, 0x2e, 0x73, 0x65, 0x73, 0x73, 0x69, 0x6f, 0x6e, 0x2e, 0x76, 0x61, + 0x6c, 0x75, 0x65, 0x2c, 0x20, 0x69, 0x6d, 0x61, 0x67, 0x65, 0x5f, 0x73, + 0x65, 0x6c, 0x65, 0x63, 0x74, 0x65, 0x64, 0x3a, 0x20, 0x27, 0x27, 0x20, + 0x7d, 0x3b, 0x0a, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x70, 0x61, 0x72, + 0x61, 0x6d, 0x73, 0x2e, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x20, 0x3d, 0x20, + 0x74, 0x2e, 0x64, 0x61, 0x74, 0x61, 0x2e, 0x70, 0x61, 0x72, 0x61, 0x6d, + 0x73, 0x3b, 0x0a, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x70, 0x61, 0x72, + 0x61, 0x6d, 0x73, 0x2e, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x20, 0x3d, 0x20, + 0x7b, 0x20, 0x2e, 0x2e, 0x2e, 0x70, 0x61, 0x72, 0x61, 0x6d, 0x73, 0x2e, + 0x76, 0x61, 0x6c, 0x75, 0x65, 0x2c, 0x20, 0x69, 0x6d, 0x61, 0x67, 0x65, + 0x5f, 0x64, 0x61, 0x74, 0x61, 0x3a, 0x20, 0x5b, 0x5d, 0x20, 0x7d, 0x3b, + 0x0a, 0x20, 0x20, 0x20, 0x20, 0x7d, 0x0a, 0x0a, 0x20, 0x20, 0x20, 0x20, + 0x66, 0x75, 0x6e, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x20, 0x75, 0x73, 0x65, + 0x72, 0x54, 0x65, 0x6d, 0x70, 0x6c, 0x61, 0x74, 0x65, 0x52, 0x65, 0x73, + 0x65, 0x74, 0x54, 0x6f, 0x44, 0x65, 0x66, 0x61, 0x75, 0x6c, 0x74, 0x41, + 0x6e, 0x64, 0x41, 0x70, 0x70, 0x6c, 0x79, 0x28, 0x29, 0x20, 0x7b, 0x0a, + 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x75, 0x73, 0x65, 0x72, 0x54, 0x65, + 0x6d, 0x70, 0x6c, 0x61, 0x74, 0x65, 0x52, 0x65, 0x73, 0x65, 0x74, 0x54, + 0x6f, 0x44, 0x65, 0x66, 0x61, 0x75, 0x6c, 0x74, 0x28, 0x29, 0x0a, 0x20, + 0x20, 0x20, 0x20, 0x20, 0x20, 0x75, 0x73, 0x65, 0x72, 0x54, 0x65, 0x6d, + 0x70, 0x6c, 0x61, 0x74, 0x65, 0x41, 0x70, 0x70, 0x6c, 0x79, 0x28, 0x73, 0x65, 0x6c, 0x65, 0x63, 0x74, 0x65, 0x64, 0x55, 0x73, 0x65, 0x72, 0x54, 0x65, 0x6d, 0x70, 0x6c, 0x61, 0x74, 0x65, 0x2e, 0x76, 0x61, 0x6c, 0x75, - 0x65, 0x29, 0x0a, 0x0a, 0x20, 0x20, 0x20, 0x20, 0x66, 0x75, 0x6e, 0x63, - 0x74, 0x69, 0x6f, 0x6e, 0x20, 0x75, 0x73, 0x65, 0x72, 0x54, 0x65, 0x6d, - 0x70, 0x6c, 0x61, 0x74, 0x65, 0x41, 0x75, 0x74, 0x6f, 0x73, 0x61, 0x76, - 0x65, 0x28, 0x29, 0x20, 0x7b, 0x0a, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, - 0x63, 0x6f, 0x6e, 0x73, 0x6f, 0x6c, 0x65, 0x2e, 0x6c, 0x6f, 0x67, 0x28, - 0x27, 0x54, 0x65, 0x6d, 0x70, 0x6c, 0x61, 0x74, 0x65, 0x20, 0x41, 0x75, - 0x74, 0x6f, 0x73, 0x61, 0x76, 0x65, 0x2e, 0x2e, 0x2e, 0x27, 0x29, 0x0a, - 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x69, 0x66, 0x20, 0x28, 0x73, 0x65, - 0x6c, 0x65, 0x63, 0x74, 0x65, 0x64, 0x55, 0x73, 0x65, 0x72, 0x54, 0x65, - 0x6d, 0x70, 0x6c, 0x61, 0x74, 0x65, 0x2e, 0x76, 0x61, 0x6c, 0x75, 0x65, - 0x2e, 0x6e, 0x61, 0x6d, 0x65, 0x20, 0x3d, 0x3d, 0x20, 0x27, 0x64, 0x65, - 0x66, 0x61, 0x75, 0x6c, 0x74, 0x27, 0x29, 0x20, 0x7b, 0x0a, 0x20, 0x20, - 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x2f, 0x2f, 0x20, 0x77, 0x65, 0x20, - 0x64, 0x6f, 0x6e, 0x27, 0x74, 0x20, 0x77, 0x61, 0x6e, 0x74, 0x20, 0x74, - 0x6f, 0x20, 0x73, 0x61, 0x76, 0x65, 0x20, 0x6f, 0x76, 0x65, 0x72, 0x20, - 0x64, 0x65, 0x66, 0x61, 0x75, 0x6c, 0x74, 0x20, 0x74, 0x65, 0x6d, 0x70, - 0x6c, 0x61, 0x74, 0x65, 0x2c, 0x20, 0x73, 0x6f, 0x20, 0x6c, 0x65, 0x74, - 0x27, 0x73, 0x20, 0x63, 0x72, 0x65, 0x61, 0x74, 0x65, 0x20, 0x61, 0x20, - 0x6e, 0x65, 0x77, 0x20, 0x6f, 0x6e, 0x65, 0x0a, 0x20, 0x20, 0x20, 0x20, - 0x20, 0x20, 0x20, 0x20, 0x6c, 0x65, 0x74, 0x20, 0x6e, 0x65, 0x77, 0x54, - 0x65, 0x6d, 0x70, 0x6c, 0x61, 0x74, 0x65, 0x4e, 0x61, 0x6d, 0x65, 0x20, - 0x3d, 0x20, 0x27, 0x55, 0x73, 0x65, 0x72, 0x54, 0x65, 0x6d, 0x70, 0x6c, - 0x61, 0x74, 0x65, 0x2d, 0x27, 0x20, 0x2b, 0x20, 0x44, 0x61, 0x74, 0x65, - 0x2e, 0x6e, 0x6f, 0x77, 0x28, 0x29, 0x2e, 0x74, 0x6f, 0x53, 0x74, 0x72, - 0x69, 0x6e, 0x67, 0x28, 0x29, 0x0a, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, - 0x20, 0x20, 0x6c, 0x65, 0x74, 0x20, 0x6e, 0x65, 0x77, 0x54, 0x65, 0x6d, - 0x70, 0x6c, 0x61, 0x74, 0x65, 0x20, 0x3d, 0x20, 0x7b, 0x20, 0x27, 0x6e, - 0x61, 0x6d, 0x65, 0x27, 0x3a, 0x20, 0x6e, 0x65, 0x77, 0x54, 0x65, 0x6d, - 0x70, 0x6c, 0x61, 0x74, 0x65, 0x4e, 0x61, 0x6d, 0x65, 0x2c, 0x20, 0x27, - 0x64, 0x61, 0x74, 0x61, 0x27, 0x3a, 0x20, 0x7b, 0x20, 0x27, 0x73, 0x65, - 0x73, 0x73, 0x69, 0x6f, 0x6e, 0x27, 0x3a, 0x20, 0x73, 0x65, 0x73, 0x73, - 0x69, 0x6f, 0x6e, 0x2e, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x2c, 0x20, 0x27, - 0x70, 0x61, 0x72, 0x61, 0x6d, 0x73, 0x27, 0x3a, 0x20, 0x70, 0x61, 0x72, - 0x61, 0x6d, 0x73, 0x2e, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x20, 0x7d, 0x20, - 0x7d, 0x0a, 0x0a, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x63, - 0x6f, 0x6e, 0x73, 0x6f, 0x6c, 0x65, 0x2e, 0x6c, 0x6f, 0x67, 0x28, 0x27, - 0x53, 0x61, 0x76, 0x69, 0x6e, 0x67, 0x20, 0x61, 0x73, 0x20, 0x27, 0x20, - 0x2b, 0x20, 0x6e, 0x65, 0x77, 0x54, 0x65, 0x6d, 0x70, 0x6c, 0x61, 0x74, - 0x65, 0x4e, 0x61, 0x6d, 0x65, 0x29, 0x0a, 0x0a, 0x20, 0x20, 0x20, 0x20, - 0x20, 0x20, 0x20, 0x20, 0x2f, 0x2f, 0x20, 0x73, 0x61, 0x76, 0x65, 0x20, - 0x69, 0x6e, 0x20, 0x74, 0x68, 0x65, 0x20, 0x61, 0x75, 0x74, 0x6f, 0x73, - 0x61, 0x76, 0x65, 0x20, 0x73, 0x6c, 0x6f, 0x74, 0x0a, 0x20, 0x20, 0x20, - 0x20, 0x20, 0x20, 0x20, 0x20, 0x6c, 0x6f, 0x63, 0x61, 0x6c, 0x5f, 0x73, - 0x74, 0x6f, 0x72, 0x61, 0x67, 0x65, 0x5f, 0x73, 0x65, 0x74, 0x44, 0x61, - 0x74, 0x61, 0x46, 0x72, 0x6f, 0x6d, 0x4f, 0x62, 0x6a, 0x65, 0x63, 0x74, - 0x28, 0x27, 0x75, 0x73, 0x65, 0x72, 0x5f, 0x74, 0x65, 0x6d, 0x70, 0x6c, - 0x61, 0x74, 0x65, 0x73, 0x5f, 0x6c, 0x61, 0x73, 0x74, 0x27, 0x2c, 0x20, - 0x6e, 0x65, 0x77, 0x54, 0x65, 0x6d, 0x70, 0x6c, 0x61, 0x74, 0x65, 0x29, + 0x65, 0x29, 0x0a, 0x20, 0x20, 0x20, 0x20, 0x7d, 0x0a, 0x0a, 0x20, 0x20, + 0x20, 0x20, 0x66, 0x75, 0x6e, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x20, 0x75, + 0x73, 0x65, 0x72, 0x54, 0x65, 0x6d, 0x70, 0x6c, 0x61, 0x74, 0x65, 0x4c, + 0x6f, 0x61, 0x64, 0x41, 0x6e, 0x64, 0x41, 0x70, 0x70, 0x6c, 0x79, 0x41, + 0x75, 0x74, 0x6f, 0x73, 0x61, 0x76, 0x65, 0x64, 0x28, 0x29, 0x20, 0x7b, + 0x0a, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x2f, 0x2f, 0x20, 0x67, 0x65, + 0x74, 0x20, 0x61, 0x75, 0x74, 0x6f, 0x73, 0x61, 0x76, 0x65, 0x64, 0x20, + 0x6c, 0x61, 0x73, 0x74, 0x20, 0x75, 0x73, 0x65, 0x64, 0x20, 0x74, 0x65, + 0x6d, 0x70, 0x6c, 0x61, 0x74, 0x65, 0x0a, 0x20, 0x20, 0x20, 0x20, 0x20, + 0x20, 0x6c, 0x65, 0x74, 0x20, 0x6c, 0x61, 0x73, 0x74, 0x55, 0x73, 0x65, + 0x64, 0x54, 0x65, 0x6d, 0x70, 0x6c, 0x61, 0x74, 0x65, 0x20, 0x3d, 0x20, + 0x6c, 0x6f, 0x63, 0x61, 0x6c, 0x5f, 0x73, 0x74, 0x6f, 0x72, 0x61, 0x67, + 0x65, 0x5f, 0x67, 0x65, 0x74, 0x44, 0x61, 0x74, 0x61, 0x41, 0x73, 0x4f, + 0x62, 0x6a, 0x65, 0x63, 0x74, 0x28, 0x27, 0x75, 0x73, 0x65, 0x72, 0x5f, + 0x74, 0x65, 0x6d, 0x70, 0x6c, 0x61, 0x74, 0x65, 0x73, 0x5f, 0x6c, 0x61, + 0x73, 0x74, 0x27, 0x29, 0x0a, 0x0a, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, + 0x69, 0x66, 0x20, 0x28, 0x6c, 0x61, 0x73, 0x74, 0x55, 0x73, 0x65, 0x64, + 0x54, 0x65, 0x6d, 0x70, 0x6c, 0x61, 0x74, 0x65, 0x29, 0x20, 0x7b, 0x0a, + 0x0a, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x63, 0x6f, 0x6e, + 0x73, 0x6f, 0x6c, 0x65, 0x2e, 0x6c, 0x6f, 0x67, 0x28, 0x27, 0x41, 0x75, + 0x74, 0x6f, 0x73, 0x61, 0x76, 0x65, 0x64, 0x20, 0x74, 0x65, 0x6d, 0x70, + 0x6c, 0x61, 0x74, 0x65, 0x20, 0x66, 0x6f, 0x75, 0x6e, 0x64, 0x2c, 0x20, + 0x72, 0x65, 0x73, 0x74, 0x6f, 0x72, 0x69, 0x6e, 0x67, 0x27, 0x29, 0x0a, + 0x0a, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x73, 0x65, 0x6c, + 0x65, 0x63, 0x74, 0x65, 0x64, 0x55, 0x73, 0x65, 0x72, 0x54, 0x65, 0x6d, + 0x70, 0x6c, 0x61, 0x74, 0x65, 0x2e, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x20, + 0x3d, 0x20, 0x6c, 0x61, 0x73, 0x74, 0x55, 0x73, 0x65, 0x64, 0x54, 0x65, + 0x6d, 0x70, 0x6c, 0x61, 0x74, 0x65, 0x0a, 0x20, 0x20, 0x20, 0x20, 0x20, + 0x20, 0x7d, 0x0a, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x65, 0x6c, 0x73, + 0x65, 0x20, 0x7b, 0x0a, 0x0a, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, + 0x20, 0x63, 0x6f, 0x6e, 0x73, 0x6f, 0x6c, 0x65, 0x2e, 0x6c, 0x6f, 0x67, + 0x28, 0x27, 0x4e, 0x6f, 0x20, 0x61, 0x75, 0x74, 0x6f, 0x73, 0x61, 0x76, + 0x65, 0x64, 0x20, 0x74, 0x65, 0x6d, 0x70, 0x6c, 0x61, 0x74, 0x65, 0x20, + 0x66, 0x6f, 0x75, 0x6e, 0x64, 0x2c, 0x20, 0x75, 0x73, 0x69, 0x6e, 0x67, + 0x20, 0x64, 0x65, 0x66, 0x61, 0x75, 0x6c, 0x74, 0x20, 0x74, 0x65, 0x6d, + 0x70, 0x6c, 0x61, 0x74, 0x65, 0x27, 0x29, 0x0a, 0x20, 0x20, 0x20, 0x20, + 0x20, 0x20, 0x20, 0x20, 0x2f, 0x2f, 0x20, 0x6e, 0x6f, 0x20, 0x61, 0x75, + 0x74, 0x6f, 0x73, 0x61, 0x76, 0x65, 0x64, 0x20, 0x6c, 0x61, 0x73, 0x74, + 0x20, 0x75, 0x73, 0x65, 0x64, 0x20, 0x74, 0x65, 0x6d, 0x70, 0x6c, 0x61, + 0x74, 0x65, 0x20, 0x77, 0x61, 0x73, 0x20, 0x66, 0x6f, 0x75, 0x6e, 0x64, + 0x2c, 0x20, 0x73, 0x6f, 0x20, 0x6c, 0x6f, 0x61, 0x64, 0x20, 0x66, 0x72, + 0x6f, 0x6d, 0x20, 0x64, 0x65, 0x66, 0x61, 0x75, 0x6c, 0x74, 0x2e, 0x0a, + 0x0a, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x75, 0x73, 0x65, + 0x72, 0x54, 0x65, 0x6d, 0x70, 0x6c, 0x61, 0x74, 0x65, 0x52, 0x65, 0x73, + 0x65, 0x74, 0x54, 0x6f, 0x44, 0x65, 0x66, 0x61, 0x75, 0x6c, 0x74, 0x28, + 0x29, 0x0a, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x7d, 0x0a, 0x0a, 0x20, + 0x20, 0x20, 0x20, 0x20, 0x20, 0x63, 0x6f, 0x6e, 0x73, 0x6f, 0x6c, 0x65, + 0x2e, 0x6c, 0x6f, 0x67, 0x28, 0x27, 0x41, 0x70, 0x70, 0x6c, 0x79, 0x69, + 0x6e, 0x67, 0x20, 0x74, 0x65, 0x6d, 0x70, 0x6c, 0x61, 0x74, 0x65, 0x27, + 0x29, 0x0a, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x2f, 0x2f, 0x20, 0x61, + 0x6e, 0x64, 0x20, 0x75, 0x70, 0x64, 0x61, 0x74, 0x65, 0x20, 0x69, 0x6e, + 0x74, 0x65, 0x72, 0x6e, 0x61, 0x6c, 0x20, 0x64, 0x61, 0x74, 0x61, 0x20, + 0x66, 0x72, 0x6f, 0x6d, 0x20, 0x74, 0x65, 0x6d, 0x70, 0x6c, 0x61, 0x74, + 0x65, 0x73, 0x0a, 0x0a, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x75, 0x73, + 0x65, 0x72, 0x54, 0x65, 0x6d, 0x70, 0x6c, 0x61, 0x74, 0x65, 0x41, 0x70, + 0x70, 0x6c, 0x79, 0x28, 0x73, 0x65, 0x6c, 0x65, 0x63, 0x74, 0x65, 0x64, + 0x55, 0x73, 0x65, 0x72, 0x54, 0x65, 0x6d, 0x70, 0x6c, 0x61, 0x74, 0x65, + 0x2e, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x29, 0x0a, 0x20, 0x20, 0x20, 0x20, + 0x7d, 0x0a, 0x0a, 0x20, 0x20, 0x20, 0x20, 0x2f, 0x2f, 0x63, 0x6f, 0x6e, + 0x73, 0x6f, 0x6c, 0x65, 0x2e, 0x6c, 0x6f, 0x67, 0x28, 0x73, 0x61, 0x76, + 0x65, 0x64, 0x55, 0x73, 0x65, 0x72, 0x54, 0x65, 0x6d, 0x70, 0x6c, 0x61, + 0x74, 0x65, 0x73, 0x2e, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x29, 0x0a, 0x20, + 0x20, 0x20, 0x20, 0x2f, 0x2f, 0x63, 0x6f, 0x6e, 0x73, 0x6f, 0x6c, 0x65, + 0x2e, 0x6c, 0x6f, 0x67, 0x28, 0x73, 0x65, 0x6c, 0x65, 0x63, 0x74, 0x65, + 0x64, 0x55, 0x73, 0x65, 0x72, 0x54, 0x65, 0x6d, 0x70, 0x6c, 0x61, 0x74, + 0x65, 0x2e, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x29, 0x0a, 0x0a, 0x20, 0x20, + 0x20, 0x20, 0x66, 0x75, 0x6e, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x20, 0x75, + 0x73, 0x65, 0x72, 0x54, 0x65, 0x6d, 0x70, 0x6c, 0x61, 0x74, 0x65, 0x41, + 0x75, 0x74, 0x6f, 0x73, 0x61, 0x76, 0x65, 0x28, 0x29, 0x20, 0x7b, 0x0a, + 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x63, 0x6f, 0x6e, 0x73, 0x6f, 0x6c, + 0x65, 0x2e, 0x6c, 0x6f, 0x67, 0x28, 0x27, 0x54, 0x65, 0x6d, 0x70, 0x6c, + 0x61, 0x74, 0x65, 0x20, 0x41, 0x75, 0x74, 0x6f, 0x73, 0x61, 0x76, 0x65, + 0x2e, 0x2e, 0x2e, 0x27, 0x29, 0x0a, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, + 0x69, 0x66, 0x20, 0x28, 0x73, 0x65, 0x6c, 0x65, 0x63, 0x74, 0x65, 0x64, + 0x55, 0x73, 0x65, 0x72, 0x54, 0x65, 0x6d, 0x70, 0x6c, 0x61, 0x74, 0x65, + 0x2e, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x2e, 0x6e, 0x61, 0x6d, 0x65, 0x20, + 0x3d, 0x3d, 0x20, 0x27, 0x64, 0x65, 0x66, 0x61, 0x75, 0x6c, 0x74, 0x27, + 0x29, 0x20, 0x7b, 0x0a, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, + 0x2f, 0x2f, 0x20, 0x77, 0x65, 0x20, 0x64, 0x6f, 0x6e, 0x27, 0x74, 0x20, + 0x77, 0x61, 0x6e, 0x74, 0x20, 0x74, 0x6f, 0x20, 0x73, 0x61, 0x76, 0x65, + 0x20, 0x6f, 0x76, 0x65, 0x72, 0x20, 0x64, 0x65, 0x66, 0x61, 0x75, 0x6c, + 0x74, 0x20, 0x74, 0x65, 0x6d, 0x70, 0x6c, 0x61, 0x74, 0x65, 0x2c, 0x20, + 0x73, 0x6f, 0x20, 0x6c, 0x65, 0x74, 0x27, 0x73, 0x20, 0x63, 0x72, 0x65, + 0x61, 0x74, 0x65, 0x20, 0x61, 0x20, 0x6e, 0x65, 0x77, 0x20, 0x6f, 0x6e, + 0x65, 0x0a, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x6c, 0x65, + 0x74, 0x20, 0x6e, 0x65, 0x77, 0x54, 0x65, 0x6d, 0x70, 0x6c, 0x61, 0x74, + 0x65, 0x4e, 0x61, 0x6d, 0x65, 0x20, 0x3d, 0x20, 0x27, 0x55, 0x73, 0x65, + 0x72, 0x54, 0x65, 0x6d, 0x70, 0x6c, 0x61, 0x74, 0x65, 0x2d, 0x27, 0x20, + 0x2b, 0x20, 0x44, 0x61, 0x74, 0x65, 0x2e, 0x6e, 0x6f, 0x77, 0x28, 0x29, + 0x2e, 0x74, 0x6f, 0x53, 0x74, 0x72, 0x69, 0x6e, 0x67, 0x28, 0x29, 0x0a, + 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x6c, 0x65, 0x74, 0x20, + 0x6e, 0x65, 0x77, 0x54, 0x65, 0x6d, 0x70, 0x6c, 0x61, 0x74, 0x65, 0x20, + 0x3d, 0x20, 0x7b, 0x20, 0x27, 0x6e, 0x61, 0x6d, 0x65, 0x27, 0x3a, 0x20, + 0x6e, 0x65, 0x77, 0x54, 0x65, 0x6d, 0x70, 0x6c, 0x61, 0x74, 0x65, 0x4e, + 0x61, 0x6d, 0x65, 0x2c, 0x20, 0x27, 0x64, 0x61, 0x74, 0x61, 0x27, 0x3a, + 0x20, 0x7b, 0x20, 0x27, 0x73, 0x65, 0x73, 0x73, 0x69, 0x6f, 0x6e, 0x27, + 0x3a, 0x20, 0x73, 0x65, 0x73, 0x73, 0x69, 0x6f, 0x6e, 0x2e, 0x76, 0x61, + 0x6c, 0x75, 0x65, 0x2c, 0x20, 0x27, 0x70, 0x61, 0x72, 0x61, 0x6d, 0x73, + 0x27, 0x3a, 0x20, 0x70, 0x61, 0x72, 0x61, 0x6d, 0x73, 0x2e, 0x76, 0x61, + 0x6c, 0x75, 0x65, 0x20, 0x7d, 0x20, 0x7d, 0x0a, 0x0a, 0x20, 0x20, 0x20, + 0x20, 0x20, 0x20, 0x20, 0x20, 0x63, 0x6f, 0x6e, 0x73, 0x6f, 0x6c, 0x65, + 0x2e, 0x6c, 0x6f, 0x67, 0x28, 0x27, 0x53, 0x61, 0x76, 0x69, 0x6e, 0x67, + 0x20, 0x61, 0x73, 0x20, 0x27, 0x20, 0x2b, 0x20, 0x6e, 0x65, 0x77, 0x54, + 0x65, 0x6d, 0x70, 0x6c, 0x61, 0x74, 0x65, 0x4e, 0x61, 0x6d, 0x65, 0x29, 0x0a, 0x0a, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x2f, 0x2f, - 0x20, 0x61, 0x6e, 0x64, 0x20, 0x6c, 0x6f, 0x61, 0x64, 0x20, 0x69, 0x74, - 0x20, 0x62, 0x61, 0x63, 0x6b, 0x20, 0x61, 0x6e, 0x64, 0x20, 0x61, 0x70, - 0x70, 0x6c, 0x79, 0x0a, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, - 0x75, 0x73, 0x65, 0x72, 0x54, 0x65, 0x6d, 0x70, 0x6c, 0x61, 0x74, 0x65, - 0x4c, 0x6f, 0x61, 0x64, 0x41, 0x6e, 0x64, 0x41, 0x70, 0x70, 0x6c, 0x79, - 0x41, 0x75, 0x74, 0x6f, 0x73, 0x61, 0x76, 0x65, 0x64, 0x28, 0x29, 0x0a, - 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x7d, 0x20, 0x65, 0x6c, 0x73, 0x65, - 0x20, 0x7b, 0x0a, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x6c, + 0x20, 0x73, 0x61, 0x76, 0x65, 0x20, 0x69, 0x6e, 0x20, 0x74, 0x68, 0x65, + 0x20, 0x61, 0x75, 0x74, 0x6f, 0x73, 0x61, 0x76, 0x65, 0x20, 0x73, 0x6c, + 0x6f, 0x74, 0x0a, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x6c, 0x6f, 0x63, 0x61, 0x6c, 0x5f, 0x73, 0x74, 0x6f, 0x72, 0x61, 0x67, 0x65, 0x5f, 0x73, 0x65, 0x74, 0x44, 0x61, 0x74, 0x61, 0x46, 0x72, 0x6f, 0x6d, 0x4f, 0x62, 0x6a, 0x65, 0x63, 0x74, 0x28, 0x27, 0x75, 0x73, 0x65, 0x72, 0x5f, 0x74, 0x65, 0x6d, 0x70, 0x6c, 0x61, 0x74, 0x65, 0x73, 0x5f, 0x6c, - 0x61, 0x73, 0x74, 0x27, 0x2c, 0x20, 0x7b, 0x20, 0x27, 0x6e, 0x61, 0x6d, - 0x65, 0x27, 0x3a, 0x20, 0x73, 0x65, 0x6c, 0x65, 0x63, 0x74, 0x65, 0x64, - 0x55, 0x73, 0x65, 0x72, 0x54, 0x65, 0x6d, 0x70, 0x6c, 0x61, 0x74, 0x65, - 0x2e, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x2e, 0x6e, 0x61, 0x6d, 0x65, 0x2c, - 0x20, 0x27, 0x64, 0x61, 0x74, 0x61, 0x27, 0x3a, 0x20, 0x7b, 0x20, 0x27, - 0x73, 0x65, 0x73, 0x73, 0x69, 0x6f, 0x6e, 0x27, 0x3a, 0x20, 0x73, 0x65, - 0x73, 0x73, 0x69, 0x6f, 0x6e, 0x2e, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x2c, - 0x20, 0x27, 0x70, 0x61, 0x72, 0x61, 0x6d, 0x73, 0x27, 0x3a, 0x20, 0x70, - 0x61, 0x72, 0x61, 0x6d, 0x73, 0x2e, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x20, - 0x7d, 0x20, 0x7d, 0x29, 0x0a, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x7d, - 0x0a, 0x20, 0x20, 0x20, 0x20, 0x7d, 0x0a, 0x0a, 0x20, 0x20, 0x20, 0x20, - 0x63, 0x6f, 0x6e, 0x73, 0x6f, 0x6c, 0x65, 0x2e, 0x6c, 0x6f, 0x67, 0x28, - 0x27, 0x43, 0x68, 0x65, 0x63, 0x6b, 0x69, 0x6e, 0x67, 0x20, 0x66, 0x6f, - 0x72, 0x20, 0x61, 0x75, 0x74, 0x6f, 0x73, 0x61, 0x76, 0x65, 0x64, 0x20, - 0x6c, 0x61, 0x73, 0x74, 0x20, 0x75, 0x73, 0x65, 0x64, 0x20, 0x74, 0x65, - 0x6d, 0x70, 0x6c, 0x61, 0x74, 0x65, 0x27, 0x29, 0x0a, 0x20, 0x20, 0x20, - 0x20, 0x75, 0x73, 0x65, 0x72, 0x54, 0x65, 0x6d, 0x70, 0x6c, 0x61, 0x74, - 0x65, 0x4c, 0x6f, 0x61, 0x64, 0x41, 0x6e, 0x64, 0x41, 0x70, 0x70, 0x6c, - 0x79, 0x41, 0x75, 0x74, 0x6f, 0x73, 0x61, 0x76, 0x65, 0x64, 0x28, 0x29, - 0x0a, 0x0a, 0x20, 0x20, 0x20, 0x20, 0x2f, 0x2a, 0x20, 0x45, 0x4e, 0x44, - 0x3a, 0x20, 0x53, 0x75, 0x70, 0x70, 0x6f, 0x72, 0x74, 0x20, 0x66, 0x6f, - 0x72, 0x20, 0x73, 0x74, 0x6f, 0x72, 0x69, 0x6e, 0x67, 0x20, 0x70, 0x72, - 0x6f, 0x6d, 0x70, 0x74, 0x20, 0x74, 0x65, 0x6d, 0x70, 0x6c, 0x61, 0x74, - 0x65, 0x73, 0x20, 0x61, 0x6e, 0x64, 0x20, 0x70, 0x61, 0x72, 0x61, 0x6d, - 0x65, 0x74, 0x65, 0x72, 0x73, 0x20, 0x69, 0x6e, 0x20, 0x62, 0x72, 0x6f, - 0x77, 0x73, 0x65, 0x72, 0x73, 0x20, 0x4c, 0x6f, 0x63, 0x61, 0x6c, 0x53, - 0x74, 0x6f, 0x72, 0x61, 0x67, 0x65, 0x20, 0x2a, 0x2f, 0x0a, 0x0a, 0x20, - 0x20, 0x20, 0x20, 0x63, 0x6f, 0x6e, 0x73, 0x74, 0x20, 0x6c, 0x6c, 0x61, - 0x6d, 0x61, 0x53, 0x74, 0x61, 0x74, 0x73, 0x20, 0x3d, 0x20, 0x73, 0x69, - 0x67, 0x6e, 0x61, 0x6c, 0x28, 0x6e, 0x75, 0x6c, 0x6c, 0x29, 0x0a, 0x20, - 0x20, 0x20, 0x20, 0x63, 0x6f, 0x6e, 0x73, 0x74, 0x20, 0x63, 0x6f, 0x6e, - 0x74, 0x72, 0x6f, 0x6c, 0x6c, 0x65, 0x72, 0x20, 0x3d, 0x20, 0x73, 0x69, - 0x67, 0x6e, 0x61, 0x6c, 0x28, 0x6e, 0x75, 0x6c, 0x6c, 0x29, 0x0a, 0x0a, - 0x20, 0x20, 0x20, 0x20, 0x2f, 0x2f, 0x20, 0x63, 0x75, 0x72, 0x72, 0x65, - 0x6e, 0x74, 0x6c, 0x79, 0x20, 0x67, 0x65, 0x6e, 0x65, 0x72, 0x61, 0x74, - 0x69, 0x6e, 0x67, 0x20, 0x61, 0x20, 0x63, 0x6f, 0x6d, 0x70, 0x6c, 0x65, - 0x74, 0x69, 0x6f, 0x6e, 0x3f, 0x0a, 0x20, 0x20, 0x20, 0x20, 0x63, 0x6f, - 0x6e, 0x73, 0x74, 0x20, 0x67, 0x65, 0x6e, 0x65, 0x72, 0x61, 0x74, 0x69, - 0x6e, 0x67, 0x20, 0x3d, 0x20, 0x63, 0x6f, 0x6d, 0x70, 0x75, 0x74, 0x65, - 0x64, 0x28, 0x28, 0x29, 0x20, 0x3d, 0x3e, 0x20, 0x63, 0x6f, 0x6e, 0x74, - 0x72, 0x6f, 0x6c, 0x6c, 0x65, 0x72, 0x2e, 0x76, 0x61, 0x6c, 0x75, 0x65, - 0x20, 0x21, 0x3d, 0x20, 0x6e, 0x75, 0x6c, 0x6c, 0x29, 0x0a, 0x0a, 0x20, - 0x20, 0x20, 0x20, 0x2f, 0x2f, 0x20, 0x68, 0x61, 0x73, 0x20, 0x74, 0x68, - 0x65, 0x20, 0x75, 0x73, 0x65, 0x72, 0x20, 0x73, 0x74, 0x61, 0x72, 0x74, - 0x65, 0x64, 0x20, 0x61, 0x20, 0x63, 0x68, 0x61, 0x74, 0x3f, 0x0a, 0x20, - 0x20, 0x20, 0x20, 0x63, 0x6f, 0x6e, 0x73, 0x74, 0x20, 0x63, 0x68, 0x61, - 0x74, 0x53, 0x74, 0x61, 0x72, 0x74, 0x65, 0x64, 0x20, 0x3d, 0x20, 0x63, + 0x61, 0x73, 0x74, 0x27, 0x2c, 0x20, 0x6e, 0x65, 0x77, 0x54, 0x65, 0x6d, + 0x70, 0x6c, 0x61, 0x74, 0x65, 0x29, 0x0a, 0x0a, 0x20, 0x20, 0x20, 0x20, + 0x20, 0x20, 0x20, 0x20, 0x2f, 0x2f, 0x20, 0x61, 0x6e, 0x64, 0x20, 0x6c, + 0x6f, 0x61, 0x64, 0x20, 0x69, 0x74, 0x20, 0x62, 0x61, 0x63, 0x6b, 0x20, + 0x61, 0x6e, 0x64, 0x20, 0x61, 0x70, 0x70, 0x6c, 0x79, 0x0a, 0x20, 0x20, + 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x75, 0x73, 0x65, 0x72, 0x54, 0x65, + 0x6d, 0x70, 0x6c, 0x61, 0x74, 0x65, 0x4c, 0x6f, 0x61, 0x64, 0x41, 0x6e, + 0x64, 0x41, 0x70, 0x70, 0x6c, 0x79, 0x41, 0x75, 0x74, 0x6f, 0x73, 0x61, + 0x76, 0x65, 0x64, 0x28, 0x29, 0x0a, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, + 0x7d, 0x20, 0x65, 0x6c, 0x73, 0x65, 0x20, 0x7b, 0x0a, 0x20, 0x20, 0x20, + 0x20, 0x20, 0x20, 0x20, 0x20, 0x6c, 0x6f, 0x63, 0x61, 0x6c, 0x5f, 0x73, + 0x74, 0x6f, 0x72, 0x61, 0x67, 0x65, 0x5f, 0x73, 0x65, 0x74, 0x44, 0x61, + 0x74, 0x61, 0x46, 0x72, 0x6f, 0x6d, 0x4f, 0x62, 0x6a, 0x65, 0x63, 0x74, + 0x28, 0x27, 0x75, 0x73, 0x65, 0x72, 0x5f, 0x74, 0x65, 0x6d, 0x70, 0x6c, + 0x61, 0x74, 0x65, 0x73, 0x5f, 0x6c, 0x61, 0x73, 0x74, 0x27, 0x2c, 0x20, + 0x7b, 0x20, 0x27, 0x6e, 0x61, 0x6d, 0x65, 0x27, 0x3a, 0x20, 0x73, 0x65, + 0x6c, 0x65, 0x63, 0x74, 0x65, 0x64, 0x55, 0x73, 0x65, 0x72, 0x54, 0x65, + 0x6d, 0x70, 0x6c, 0x61, 0x74, 0x65, 0x2e, 0x76, 0x61, 0x6c, 0x75, 0x65, + 0x2e, 0x6e, 0x61, 0x6d, 0x65, 0x2c, 0x20, 0x27, 0x64, 0x61, 0x74, 0x61, + 0x27, 0x3a, 0x20, 0x7b, 0x20, 0x27, 0x73, 0x65, 0x73, 0x73, 0x69, 0x6f, + 0x6e, 0x27, 0x3a, 0x20, 0x73, 0x65, 0x73, 0x73, 0x69, 0x6f, 0x6e, 0x2e, + 0x76, 0x61, 0x6c, 0x75, 0x65, 0x2c, 0x20, 0x27, 0x70, 0x61, 0x72, 0x61, + 0x6d, 0x73, 0x27, 0x3a, 0x20, 0x70, 0x61, 0x72, 0x61, 0x6d, 0x73, 0x2e, + 0x76, 0x61, 0x6c, 0x75, 0x65, 0x20, 0x7d, 0x20, 0x7d, 0x29, 0x0a, 0x20, + 0x20, 0x20, 0x20, 0x20, 0x20, 0x7d, 0x0a, 0x20, 0x20, 0x20, 0x20, 0x7d, + 0x0a, 0x0a, 0x20, 0x20, 0x20, 0x20, 0x63, 0x6f, 0x6e, 0x73, 0x6f, 0x6c, + 0x65, 0x2e, 0x6c, 0x6f, 0x67, 0x28, 0x27, 0x43, 0x68, 0x65, 0x63, 0x6b, + 0x69, 0x6e, 0x67, 0x20, 0x66, 0x6f, 0x72, 0x20, 0x61, 0x75, 0x74, 0x6f, + 0x73, 0x61, 0x76, 0x65, 0x64, 0x20, 0x6c, 0x61, 0x73, 0x74, 0x20, 0x75, + 0x73, 0x65, 0x64, 0x20, 0x74, 0x65, 0x6d, 0x70, 0x6c, 0x61, 0x74, 0x65, + 0x27, 0x29, 0x0a, 0x20, 0x20, 0x20, 0x20, 0x75, 0x73, 0x65, 0x72, 0x54, + 0x65, 0x6d, 0x70, 0x6c, 0x61, 0x74, 0x65, 0x4c, 0x6f, 0x61, 0x64, 0x41, + 0x6e, 0x64, 0x41, 0x70, 0x70, 0x6c, 0x79, 0x41, 0x75, 0x74, 0x6f, 0x73, + 0x61, 0x76, 0x65, 0x64, 0x28, 0x29, 0x0a, 0x0a, 0x20, 0x20, 0x20, 0x20, + 0x2f, 0x2a, 0x20, 0x45, 0x4e, 0x44, 0x3a, 0x20, 0x53, 0x75, 0x70, 0x70, + 0x6f, 0x72, 0x74, 0x20, 0x66, 0x6f, 0x72, 0x20, 0x73, 0x74, 0x6f, 0x72, + 0x69, 0x6e, 0x67, 0x20, 0x70, 0x72, 0x6f, 0x6d, 0x70, 0x74, 0x20, 0x74, + 0x65, 0x6d, 0x70, 0x6c, 0x61, 0x74, 0x65, 0x73, 0x20, 0x61, 0x6e, 0x64, + 0x20, 0x70, 0x61, 0x72, 0x61, 0x6d, 0x65, 0x74, 0x65, 0x72, 0x73, 0x20, + 0x69, 0x6e, 0x20, 0x62, 0x72, 0x6f, 0x77, 0x73, 0x65, 0x72, 0x73, 0x20, + 0x4c, 0x6f, 0x63, 0x61, 0x6c, 0x53, 0x74, 0x6f, 0x72, 0x61, 0x67, 0x65, + 0x20, 0x2a, 0x2f, 0x0a, 0x0a, 0x20, 0x20, 0x20, 0x20, 0x63, 0x6f, 0x6e, + 0x73, 0x74, 0x20, 0x6c, 0x6c, 0x61, 0x6d, 0x61, 0x53, 0x74, 0x61, 0x74, + 0x73, 0x20, 0x3d, 0x20, 0x73, 0x69, 0x67, 0x6e, 0x61, 0x6c, 0x28, 0x6e, + 0x75, 0x6c, 0x6c, 0x29, 0x0a, 0x20, 0x20, 0x20, 0x20, 0x63, 0x6f, 0x6e, + 0x73, 0x74, 0x20, 0x63, 0x6f, 0x6e, 0x74, 0x72, 0x6f, 0x6c, 0x6c, 0x65, + 0x72, 0x20, 0x3d, 0x20, 0x73, 0x69, 0x67, 0x6e, 0x61, 0x6c, 0x28, 0x6e, + 0x75, 0x6c, 0x6c, 0x29, 0x0a, 0x0a, 0x20, 0x20, 0x20, 0x20, 0x2f, 0x2f, + 0x20, 0x63, 0x75, 0x72, 0x72, 0x65, 0x6e, 0x74, 0x6c, 0x79, 0x20, 0x67, + 0x65, 0x6e, 0x65, 0x72, 0x61, 0x74, 0x69, 0x6e, 0x67, 0x20, 0x61, 0x20, + 0x63, 0x6f, 0x6d, 0x70, 0x6c, 0x65, 0x74, 0x69, 0x6f, 0x6e, 0x3f, 0x0a, + 0x20, 0x20, 0x20, 0x20, 0x63, 0x6f, 0x6e, 0x73, 0x74, 0x20, 0x67, 0x65, + 0x6e, 0x65, 0x72, 0x61, 0x74, 0x69, 0x6e, 0x67, 0x20, 0x3d, 0x20, 0x63, 0x6f, 0x6d, 0x70, 0x75, 0x74, 0x65, 0x64, 0x28, 0x28, 0x29, 0x20, 0x3d, - 0x3e, 0x20, 0x73, 0x65, 0x73, 0x73, 0x69, 0x6f, 0x6e, 0x2e, 0x76, 0x61, - 0x6c, 0x75, 0x65, 0x2e, 0x74, 0x72, 0x61, 0x6e, 0x73, 0x63, 0x72, 0x69, - 0x70, 0x74, 0x2e, 0x6c, 0x65, 0x6e, 0x67, 0x74, 0x68, 0x20, 0x3e, 0x20, - 0x30, 0x29, 0x0a, 0x0a, 0x20, 0x20, 0x20, 0x20, 0x63, 0x6f, 0x6e, 0x73, - 0x74, 0x20, 0x74, 0x72, 0x61, 0x6e, 0x73, 0x63, 0x72, 0x69, 0x70, 0x74, - 0x55, 0x70, 0x64, 0x61, 0x74, 0x65, 0x20, 0x3d, 0x20, 0x28, 0x74, 0x72, - 0x61, 0x6e, 0x73, 0x63, 0x72, 0x69, 0x70, 0x74, 0x29, 0x20, 0x3d, 0x3e, - 0x20, 0x7b, 0x0a, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x73, 0x65, 0x73, - 0x73, 0x69, 0x6f, 0x6e, 0x2e, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x20, 0x3d, - 0x20, 0x7b, 0x0a, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x2e, - 0x2e, 0x2e, 0x73, 0x65, 0x73, 0x73, 0x69, 0x6f, 0x6e, 0x2e, 0x76, 0x61, - 0x6c, 0x75, 0x65, 0x2c, 0x0a, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, - 0x20, 0x74, 0x72, 0x61, 0x6e, 0x73, 0x63, 0x72, 0x69, 0x70, 0x74, 0x0a, + 0x3e, 0x20, 0x63, 0x6f, 0x6e, 0x74, 0x72, 0x6f, 0x6c, 0x6c, 0x65, 0x72, + 0x2e, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x20, 0x21, 0x3d, 0x20, 0x6e, 0x75, + 0x6c, 0x6c, 0x29, 0x0a, 0x0a, 0x20, 0x20, 0x20, 0x20, 0x2f, 0x2f, 0x20, + 0x68, 0x61, 0x73, 0x20, 0x74, 0x68, 0x65, 0x20, 0x75, 0x73, 0x65, 0x72, + 0x20, 0x73, 0x74, 0x61, 0x72, 0x74, 0x65, 0x64, 0x20, 0x61, 0x20, 0x63, + 0x68, 0x61, 0x74, 0x3f, 0x0a, 0x20, 0x20, 0x20, 0x20, 0x63, 0x6f, 0x6e, + 0x73, 0x74, 0x20, 0x63, 0x68, 0x61, 0x74, 0x53, 0x74, 0x61, 0x72, 0x74, + 0x65, 0x64, 0x20, 0x3d, 0x20, 0x63, 0x6f, 0x6d, 0x70, 0x75, 0x74, 0x65, + 0x64, 0x28, 0x28, 0x29, 0x20, 0x3d, 0x3e, 0x20, 0x73, 0x65, 0x73, 0x73, + 0x69, 0x6f, 0x6e, 0x2e, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x2e, 0x74, 0x72, + 0x61, 0x6e, 0x73, 0x63, 0x72, 0x69, 0x70, 0x74, 0x2e, 0x6c, 0x65, 0x6e, + 0x67, 0x74, 0x68, 0x20, 0x3e, 0x20, 0x30, 0x29, 0x0a, 0x0a, 0x20, 0x20, + 0x20, 0x20, 0x63, 0x6f, 0x6e, 0x73, 0x74, 0x20, 0x74, 0x72, 0x61, 0x6e, + 0x73, 0x63, 0x72, 0x69, 0x70, 0x74, 0x55, 0x70, 0x64, 0x61, 0x74, 0x65, + 0x20, 0x3d, 0x20, 0x28, 0x74, 0x72, 0x61, 0x6e, 0x73, 0x63, 0x72, 0x69, + 0x70, 0x74, 0x29, 0x20, 0x3d, 0x3e, 0x20, 0x7b, 0x0a, 0x20, 0x20, 0x20, + 0x20, 0x20, 0x20, 0x73, 0x65, 0x73, 0x73, 0x69, 0x6f, 0x6e, 0x2e, 0x76, + 0x61, 0x6c, 0x75, 0x65, 0x20, 0x3d, 0x20, 0x7b, 0x0a, 0x20, 0x20, 0x20, + 0x20, 0x20, 0x20, 0x20, 0x20, 0x2e, 0x2e, 0x2e, 0x73, 0x65, 0x73, 0x73, + 0x69, 0x6f, 0x6e, 0x2e, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x2c, 0x0a, 0x20, + 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x74, 0x72, 0x61, 0x6e, 0x73, + 0x63, 0x72, 0x69, 0x70, 0x74, 0x0a, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, + 0x7d, 0x0a, 0x20, 0x20, 0x20, 0x20, 0x7d, 0x0a, 0x0a, 0x20, 0x20, 0x20, + 0x20, 0x2f, 0x2f, 0x20, 0x73, 0x69, 0x6d, 0x70, 0x6c, 0x65, 0x20, 0x74, + 0x65, 0x6d, 0x70, 0x6c, 0x61, 0x74, 0x65, 0x20, 0x72, 0x65, 0x70, 0x6c, + 0x61, 0x63, 0x65, 0x0a, 0x20, 0x20, 0x20, 0x20, 0x63, 0x6f, 0x6e, 0x73, + 0x74, 0x20, 0x74, 0x65, 0x6d, 0x70, 0x6c, 0x61, 0x74, 0x65, 0x20, 0x3d, + 0x20, 0x28, 0x73, 0x74, 0x72, 0x2c, 0x20, 0x65, 0x78, 0x74, 0x72, 0x61, + 0x53, 0x65, 0x74, 0x74, 0x69, 0x6e, 0x67, 0x73, 0x29, 0x20, 0x3d, 0x3e, + 0x20, 0x7b, 0x0a, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x6c, 0x65, 0x74, + 0x20, 0x73, 0x65, 0x74, 0x74, 0x69, 0x6e, 0x67, 0x73, 0x20, 0x3d, 0x20, + 0x73, 0x65, 0x73, 0x73, 0x69, 0x6f, 0x6e, 0x2e, 0x76, 0x61, 0x6c, 0x75, + 0x65, 0x3b, 0x0a, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x69, 0x66, 0x20, + 0x28, 0x65, 0x78, 0x74, 0x72, 0x61, 0x53, 0x65, 0x74, 0x74, 0x69, 0x6e, + 0x67, 0x73, 0x29, 0x20, 0x7b, 0x0a, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, + 0x20, 0x20, 0x73, 0x65, 0x74, 0x74, 0x69, 0x6e, 0x67, 0x73, 0x20, 0x3d, + 0x20, 0x7b, 0x20, 0x2e, 0x2e, 0x2e, 0x73, 0x65, 0x74, 0x74, 0x69, 0x6e, + 0x67, 0x73, 0x2c, 0x20, 0x2e, 0x2e, 0x2e, 0x65, 0x78, 0x74, 0x72, 0x61, + 0x53, 0x65, 0x74, 0x74, 0x69, 0x6e, 0x67, 0x73, 0x20, 0x7d, 0x3b, 0x0a, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x7d, 0x0a, 0x20, 0x20, 0x20, 0x20, - 0x7d, 0x0a, 0x0a, 0x20, 0x20, 0x20, 0x20, 0x2f, 0x2f, 0x20, 0x73, 0x69, - 0x6d, 0x70, 0x6c, 0x65, 0x20, 0x74, 0x65, 0x6d, 0x70, 0x6c, 0x61, 0x74, - 0x65, 0x20, 0x72, 0x65, 0x70, 0x6c, 0x61, 0x63, 0x65, 0x0a, 0x20, 0x20, - 0x20, 0x20, 0x63, 0x6f, 0x6e, 0x73, 0x74, 0x20, 0x74, 0x65, 0x6d, 0x70, - 0x6c, 0x61, 0x74, 0x65, 0x20, 0x3d, 0x20, 0x28, 0x73, 0x74, 0x72, 0x2c, - 0x20, 0x65, 0x78, 0x74, 0x72, 0x61, 0x53, 0x65, 0x74, 0x74, 0x69, 0x6e, - 0x67, 0x73, 0x29, 0x20, 0x3d, 0x3e, 0x20, 0x7b, 0x0a, 0x20, 0x20, 0x20, - 0x20, 0x20, 0x20, 0x6c, 0x65, 0x74, 0x20, 0x73, 0x65, 0x74, 0x74, 0x69, - 0x6e, 0x67, 0x73, 0x20, 0x3d, 0x20, 0x73, 0x65, 0x73, 0x73, 0x69, 0x6f, - 0x6e, 0x2e, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x3b, 0x0a, 0x20, 0x20, 0x20, - 0x20, 0x20, 0x20, 0x69, 0x66, 0x20, 0x28, 0x65, 0x78, 0x74, 0x72, 0x61, - 0x53, 0x65, 0x74, 0x74, 0x69, 0x6e, 0x67, 0x73, 0x29, 0x20, 0x7b, 0x0a, - 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x73, 0x65, 0x74, 0x74, - 0x69, 0x6e, 0x67, 0x73, 0x20, 0x3d, 0x20, 0x7b, 0x20, 0x2e, 0x2e, 0x2e, - 0x73, 0x65, 0x74, 0x74, 0x69, 0x6e, 0x67, 0x73, 0x2c, 0x20, 0x2e, 0x2e, - 0x2e, 0x65, 0x78, 0x74, 0x72, 0x61, 0x53, 0x65, 0x74, 0x74, 0x69, 0x6e, - 0x67, 0x73, 0x20, 0x7d, 0x3b, 0x0a, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, - 0x7d, 0x0a, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x72, 0x65, 0x74, 0x75, - 0x72, 0x6e, 0x20, 0x53, 0x74, 0x72, 0x69, 0x6e, 0x67, 0x28, 0x73, 0x74, - 0x72, 0x29, 0x2e, 0x72, 0x65, 0x70, 0x6c, 0x61, 0x63, 0x65, 0x41, 0x6c, - 0x6c, 0x28, 0x2f, 0x5c, 0x7b, 0x5c, 0x7b, 0x28, 0x2e, 0x2a, 0x3f, 0x29, - 0x5c, 0x7d, 0x5c, 0x7d, 0x2f, 0x67, 0x2c, 0x20, 0x28, 0x5f, 0x2c, 0x20, - 0x6b, 0x65, 0x79, 0x29, 0x20, 0x3d, 0x3e, 0x20, 0x74, 0x65, 0x6d, 0x70, - 0x6c, 0x61, 0x74, 0x65, 0x28, 0x73, 0x65, 0x74, 0x74, 0x69, 0x6e, 0x67, - 0x73, 0x5b, 0x6b, 0x65, 0x79, 0x5d, 0x29, 0x29, 0x3b, 0x0a, 0x20, 0x20, - 0x20, 0x20, 0x7d, 0x0a, 0x0a, 0x20, 0x20, 0x20, 0x20, 0x61, 0x73, 0x79, - 0x6e, 0x63, 0x20, 0x66, 0x75, 0x6e, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x20, - 0x72, 0x75, 0x6e, 0x4c, 0x6c, 0x61, 0x6d, 0x61, 0x28, 0x70, 0x72, 0x6f, - 0x6d, 0x70, 0x74, 0x2c, 0x20, 0x6c, 0x6c, 0x61, 0x6d, 0x61, 0x50, 0x61, - 0x72, 0x61, 0x6d, 0x73, 0x2c, 0x20, 0x63, 0x68, 0x61, 0x72, 0x29, 0x20, - 0x7b, 0x0a, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x63, 0x6f, 0x6e, 0x73, - 0x74, 0x20, 0x63, 0x75, 0x72, 0x72, 0x65, 0x6e, 0x74, 0x4d, 0x65, 0x73, - 0x73, 0x61, 0x67, 0x65, 0x73, 0x20, 0x3d, 0x20, 0x5b, 0x5d, 0x3b, 0x0a, - 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x63, 0x6f, 0x6e, 0x73, 0x74, 0x20, - 0x68, 0x69, 0x73, 0x74, 0x6f, 0x72, 0x79, 0x20, 0x3d, 0x20, 0x73, 0x65, - 0x73, 0x73, 0x69, 0x6f, 0x6e, 0x2e, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x2e, - 0x74, 0x72, 0x61, 0x6e, 0x73, 0x63, 0x72, 0x69, 0x70, 0x74, 0x3b, 0x0a, - 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x69, 0x66, 0x20, 0x28, 0x63, 0x6f, + 0x20, 0x20, 0x72, 0x65, 0x74, 0x75, 0x72, 0x6e, 0x20, 0x53, 0x74, 0x72, + 0x69, 0x6e, 0x67, 0x28, 0x73, 0x74, 0x72, 0x29, 0x2e, 0x72, 0x65, 0x70, + 0x6c, 0x61, 0x63, 0x65, 0x41, 0x6c, 0x6c, 0x28, 0x2f, 0x5c, 0x7b, 0x5c, + 0x7b, 0x28, 0x2e, 0x2a, 0x3f, 0x29, 0x5c, 0x7d, 0x5c, 0x7d, 0x2f, 0x67, + 0x2c, 0x20, 0x28, 0x5f, 0x2c, 0x20, 0x6b, 0x65, 0x79, 0x29, 0x20, 0x3d, + 0x3e, 0x20, 0x74, 0x65, 0x6d, 0x70, 0x6c, 0x61, 0x74, 0x65, 0x28, 0x73, + 0x65, 0x74, 0x74, 0x69, 0x6e, 0x67, 0x73, 0x5b, 0x6b, 0x65, 0x79, 0x5d, + 0x29, 0x29, 0x3b, 0x0a, 0x20, 0x20, 0x20, 0x20, 0x7d, 0x0a, 0x0a, 0x20, + 0x20, 0x20, 0x20, 0x61, 0x73, 0x79, 0x6e, 0x63, 0x20, 0x66, 0x75, 0x6e, + 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x20, 0x72, 0x75, 0x6e, 0x4c, 0x6c, 0x61, + 0x6d, 0x61, 0x28, 0x70, 0x72, 0x6f, 0x6d, 0x70, 0x74, 0x2c, 0x20, 0x6c, + 0x6c, 0x61, 0x6d, 0x61, 0x50, 0x61, 0x72, 0x61, 0x6d, 0x73, 0x2c, 0x20, + 0x63, 0x68, 0x61, 0x72, 0x29, 0x20, 0x7b, 0x0a, 0x20, 0x20, 0x20, 0x20, + 0x20, 0x20, 0x63, 0x6f, 0x6e, 0x73, 0x74, 0x20, 0x63, 0x75, 0x72, 0x72, + 0x65, 0x6e, 0x74, 0x4d, 0x65, 0x73, 0x73, 0x61, 0x67, 0x65, 0x73, 0x20, + 0x3d, 0x20, 0x5b, 0x5d, 0x3b, 0x0a, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, + 0x63, 0x6f, 0x6e, 0x73, 0x74, 0x20, 0x68, 0x69, 0x73, 0x74, 0x6f, 0x72, + 0x79, 0x20, 0x3d, 0x20, 0x73, 0x65, 0x73, 0x73, 0x69, 0x6f, 0x6e, 0x2e, + 0x76, 0x61, 0x6c, 0x75, 0x65, 0x2e, 0x74, 0x72, 0x61, 0x6e, 0x73, 0x63, + 0x72, 0x69, 0x70, 0x74, 0x3b, 0x0a, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, + 0x69, 0x66, 0x20, 0x28, 0x63, 0x6f, 0x6e, 0x74, 0x72, 0x6f, 0x6c, 0x6c, + 0x65, 0x72, 0x2e, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x29, 0x20, 0x7b, 0x0a, + 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x74, 0x68, 0x72, 0x6f, + 0x77, 0x20, 0x6e, 0x65, 0x77, 0x20, 0x45, 0x72, 0x72, 0x6f, 0x72, 0x28, + 0x22, 0x61, 0x6c, 0x72, 0x65, 0x61, 0x64, 0x79, 0x20, 0x72, 0x75, 0x6e, + 0x6e, 0x69, 0x6e, 0x67, 0x22, 0x29, 0x3b, 0x0a, 0x20, 0x20, 0x20, 0x20, + 0x20, 0x20, 0x7d, 0x0a, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x63, 0x6f, 0x6e, 0x74, 0x72, 0x6f, 0x6c, 0x6c, 0x65, 0x72, 0x2e, 0x76, 0x61, 0x6c, - 0x75, 0x65, 0x29, 0x20, 0x7b, 0x0a, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, - 0x20, 0x20, 0x74, 0x68, 0x72, 0x6f, 0x77, 0x20, 0x6e, 0x65, 0x77, 0x20, - 0x45, 0x72, 0x72, 0x6f, 0x72, 0x28, 0x22, 0x61, 0x6c, 0x72, 0x65, 0x61, - 0x64, 0x79, 0x20, 0x72, 0x75, 0x6e, 0x6e, 0x69, 0x6e, 0x67, 0x22, 0x29, - 0x3b, 0x0a, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x7d, 0x0a, 0x20, 0x20, - 0x20, 0x20, 0x20, 0x20, 0x63, 0x6f, 0x6e, 0x74, 0x72, 0x6f, 0x6c, 0x6c, - 0x65, 0x72, 0x2e, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x20, 0x3d, 0x20, 0x6e, - 0x65, 0x77, 0x20, 0x41, 0x62, 0x6f, 0x72, 0x74, 0x43, 0x6f, 0x6e, 0x74, - 0x72, 0x6f, 0x6c, 0x6c, 0x65, 0x72, 0x28, 0x29, 0x3b, 0x0a, 0x20, 0x20, - 0x20, 0x20, 0x20, 0x20, 0x66, 0x6f, 0x72, 0x20, 0x61, 0x77, 0x61, 0x69, - 0x74, 0x20, 0x28, 0x63, 0x6f, 0x6e, 0x73, 0x74, 0x20, 0x63, 0x68, 0x75, - 0x6e, 0x6b, 0x20, 0x6f, 0x66, 0x20, 0x6c, 0x6c, 0x61, 0x6d, 0x61, 0x28, - 0x70, 0x72, 0x6f, 0x6d, 0x70, 0x74, 0x2c, 0x20, 0x6c, 0x6c, 0x61, 0x6d, - 0x61, 0x50, 0x61, 0x72, 0x61, 0x6d, 0x73, 0x2c, 0x20, 0x7b, 0x20, 0x63, - 0x6f, 0x6e, 0x74, 0x72, 0x6f, 0x6c, 0x6c, 0x65, 0x72, 0x3a, 0x20, 0x63, - 0x6f, 0x6e, 0x74, 0x72, 0x6f, 0x6c, 0x6c, 0x65, 0x72, 0x2e, 0x76, 0x61, - 0x6c, 0x75, 0x65, 0x20, 0x7d, 0x29, 0x29, 0x20, 0x7b, 0x0a, 0x20, 0x20, - 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x63, 0x6f, 0x6e, 0x73, 0x74, 0x20, - 0x64, 0x61, 0x74, 0x61, 0x20, 0x3d, 0x20, 0x63, 0x68, 0x75, 0x6e, 0x6b, - 0x2e, 0x64, 0x61, 0x74, 0x61, 0x3b, 0x0a, 0x0a, 0x20, 0x20, 0x20, 0x20, - 0x20, 0x20, 0x20, 0x20, 0x69, 0x66, 0x20, 0x28, 0x64, 0x61, 0x74, 0x61, - 0x2e, 0x73, 0x74, 0x6f, 0x70, 0x29, 0x20, 0x7b, 0x0a, 0x20, 0x20, 0x20, - 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x77, 0x68, 0x69, 0x6c, 0x65, - 0x20, 0x28, 0x0a, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, - 0x20, 0x20, 0x20, 0x63, 0x75, 0x72, 0x72, 0x65, 0x6e, 0x74, 0x4d, 0x65, - 0x73, 0x73, 0x61, 0x67, 0x65, 0x73, 0x2e, 0x6c, 0x65, 0x6e, 0x67, 0x74, - 0x68, 0x20, 0x3e, 0x20, 0x30, 0x20, 0x26, 0x26, 0x0a, 0x20, 0x20, 0x20, + 0x75, 0x65, 0x20, 0x3d, 0x20, 0x6e, 0x65, 0x77, 0x20, 0x41, 0x62, 0x6f, + 0x72, 0x74, 0x43, 0x6f, 0x6e, 0x74, 0x72, 0x6f, 0x6c, 0x6c, 0x65, 0x72, + 0x28, 0x29, 0x3b, 0x0a, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x66, 0x6f, + 0x72, 0x20, 0x61, 0x77, 0x61, 0x69, 0x74, 0x20, 0x28, 0x63, 0x6f, 0x6e, + 0x73, 0x74, 0x20, 0x63, 0x68, 0x75, 0x6e, 0x6b, 0x20, 0x6f, 0x66, 0x20, + 0x6c, 0x6c, 0x61, 0x6d, 0x61, 0x28, 0x70, 0x72, 0x6f, 0x6d, 0x70, 0x74, + 0x2c, 0x20, 0x6c, 0x6c, 0x61, 0x6d, 0x61, 0x50, 0x61, 0x72, 0x61, 0x6d, + 0x73, 0x2c, 0x20, 0x7b, 0x20, 0x63, 0x6f, 0x6e, 0x74, 0x72, 0x6f, 0x6c, + 0x6c, 0x65, 0x72, 0x3a, 0x20, 0x63, 0x6f, 0x6e, 0x74, 0x72, 0x6f, 0x6c, + 0x6c, 0x65, 0x72, 0x2e, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x20, 0x7d, 0x29, + 0x29, 0x20, 0x7b, 0x0a, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, + 0x63, 0x6f, 0x6e, 0x73, 0x74, 0x20, 0x64, 0x61, 0x74, 0x61, 0x20, 0x3d, + 0x20, 0x63, 0x68, 0x75, 0x6e, 0x6b, 0x2e, 0x64, 0x61, 0x74, 0x61, 0x3b, + 0x0a, 0x0a, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x69, 0x66, + 0x20, 0x28, 0x64, 0x61, 0x74, 0x61, 0x2e, 0x73, 0x74, 0x6f, 0x70, 0x29, + 0x20, 0x7b, 0x0a, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, + 0x20, 0x77, 0x68, 0x69, 0x6c, 0x65, 0x20, 0x28, 0x0a, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x63, 0x75, 0x72, 0x72, 0x65, 0x6e, 0x74, 0x4d, 0x65, 0x73, 0x73, 0x61, 0x67, 0x65, 0x73, - 0x5b, 0x63, 0x75, 0x72, 0x72, 0x65, 0x6e, 0x74, 0x4d, 0x65, 0x73, 0x73, - 0x61, 0x67, 0x65, 0x73, 0x2e, 0x6c, 0x65, 0x6e, 0x67, 0x74, 0x68, 0x20, - 0x2d, 0x20, 0x31, 0x5d, 0x2e, 0x63, 0x6f, 0x6e, 0x74, 0x65, 0x6e, 0x74, - 0x2e, 0x6d, 0x61, 0x74, 0x63, 0x68, 0x28, 0x2f, 0x5c, 0x6e, 0x24, 0x2f, - 0x29, 0x20, 0x21, 0x3d, 0x20, 0x6e, 0x75, 0x6c, 0x6c, 0x0a, 0x20, 0x20, - 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x29, 0x20, 0x7b, 0x0a, + 0x2e, 0x6c, 0x65, 0x6e, 0x67, 0x74, 0x68, 0x20, 0x3e, 0x20, 0x30, 0x20, + 0x26, 0x26, 0x0a, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, + 0x20, 0x20, 0x20, 0x63, 0x75, 0x72, 0x72, 0x65, 0x6e, 0x74, 0x4d, 0x65, + 0x73, 0x73, 0x61, 0x67, 0x65, 0x73, 0x5b, 0x63, 0x75, 0x72, 0x72, 0x65, + 0x6e, 0x74, 0x4d, 0x65, 0x73, 0x73, 0x61, 0x67, 0x65, 0x73, 0x2e, 0x6c, + 0x65, 0x6e, 0x67, 0x74, 0x68, 0x20, 0x2d, 0x20, 0x31, 0x5d, 0x2e, 0x63, + 0x6f, 0x6e, 0x74, 0x65, 0x6e, 0x74, 0x2e, 0x6d, 0x61, 0x74, 0x63, 0x68, + 0x28, 0x2f, 0x5c, 0x6e, 0x24, 0x2f, 0x29, 0x20, 0x21, 0x3d, 0x20, 0x6e, + 0x75, 0x6c, 0x6c, 0x0a, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, + 0x20, 0x20, 0x29, 0x20, 0x7b, 0x0a, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, + 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x63, 0x75, 0x72, 0x72, 0x65, 0x6e, + 0x74, 0x4d, 0x65, 0x73, 0x73, 0x61, 0x67, 0x65, 0x73, 0x2e, 0x70, 0x6f, + 0x70, 0x28, 0x29, 0x3b, 0x0a, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, + 0x20, 0x20, 0x20, 0x7d, 0x0a, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, + 0x20, 0x20, 0x20, 0x74, 0x72, 0x61, 0x6e, 0x73, 0x63, 0x72, 0x69, 0x70, + 0x74, 0x55, 0x70, 0x64, 0x61, 0x74, 0x65, 0x28, 0x5b, 0x2e, 0x2e, 0x2e, + 0x68, 0x69, 0x73, 0x74, 0x6f, 0x72, 0x79, 0x2c, 0x20, 0x5b, 0x63, 0x68, + 0x61, 0x72, 0x2c, 0x20, 0x63, 0x75, 0x72, 0x72, 0x65, 0x6e, 0x74, 0x4d, + 0x65, 0x73, 0x73, 0x61, 0x67, 0x65, 0x73, 0x5d, 0x5d, 0x29, 0x0a, 0x20, + 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x63, 0x6f, 0x6e, + 0x73, 0x6f, 0x6c, 0x65, 0x2e, 0x6c, 0x6f, 0x67, 0x28, 0x22, 0x43, 0x6f, + 0x6d, 0x70, 0x6c, 0x65, 0x74, 0x69, 0x6f, 0x6e, 0x20, 0x66, 0x69, 0x6e, + 0x69, 0x73, 0x68, 0x65, 0x64, 0x3a, 0x20, 0x27, 0x22, 0x2c, 0x20, 0x63, + 0x75, 0x72, 0x72, 0x65, 0x6e, 0x74, 0x4d, 0x65, 0x73, 0x73, 0x61, 0x67, + 0x65, 0x73, 0x2e, 0x6d, 0x61, 0x70, 0x28, 0x6d, 0x73, 0x67, 0x20, 0x3d, + 0x3e, 0x20, 0x6d, 0x73, 0x67, 0x2e, 0x63, 0x6f, 0x6e, 0x74, 0x65, 0x6e, + 0x74, 0x29, 0x2e, 0x6a, 0x6f, 0x69, 0x6e, 0x28, 0x27, 0x27, 0x29, 0x2c, + 0x20, 0x22, 0x27, 0x2c, 0x20, 0x73, 0x75, 0x6d, 0x6d, 0x61, 0x72, 0x79, + 0x3a, 0x20, 0x22, 0x2c, 0x20, 0x64, 0x61, 0x74, 0x61, 0x29, 0x3b, 0x0a, + 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x7d, 0x20, 0x65, 0x6c, + 0x73, 0x65, 0x20, 0x7b, 0x0a, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, + 0x20, 0x20, 0x20, 0x63, 0x75, 0x72, 0x72, 0x65, 0x6e, 0x74, 0x4d, 0x65, + 0x73, 0x73, 0x61, 0x67, 0x65, 0x73, 0x2e, 0x70, 0x75, 0x73, 0x68, 0x28, + 0x64, 0x61, 0x74, 0x61, 0x29, 0x3b, 0x0a, 0x20, 0x20, 0x20, 0x20, 0x20, + 0x20, 0x20, 0x20, 0x20, 0x20, 0x73, 0x6c, 0x6f, 0x74, 0x5f, 0x69, 0x64, + 0x20, 0x3d, 0x20, 0x64, 0x61, 0x74, 0x61, 0x2e, 0x73, 0x6c, 0x6f, 0x74, + 0x5f, 0x69, 0x64, 0x3b, 0x0a, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, + 0x20, 0x20, 0x20, 0x69, 0x66, 0x20, 0x28, 0x73, 0x65, 0x6c, 0x65, 0x63, + 0x74, 0x65, 0x64, 0x5f, 0x69, 0x6d, 0x61, 0x67, 0x65, 0x20, 0x26, 0x26, + 0x20, 0x21, 0x64, 0x61, 0x74, 0x61, 0x2e, 0x6d, 0x75, 0x6c, 0x74, 0x69, + 0x6d, 0x6f, 0x64, 0x61, 0x6c, 0x29, 0x20, 0x7b, 0x0a, 0x20, 0x20, 0x20, + 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x61, 0x6c, 0x65, + 0x72, 0x74, 0x28, 0x22, 0x54, 0x68, 0x65, 0x20, 0x73, 0x65, 0x72, 0x76, + 0x65, 0x72, 0x20, 0x77, 0x61, 0x73, 0x20, 0x6e, 0x6f, 0x74, 0x20, 0x63, + 0x6f, 0x6d, 0x70, 0x69, 0x6c, 0x65, 0x64, 0x20, 0x66, 0x6f, 0x72, 0x20, + 0x6d, 0x75, 0x6c, 0x74, 0x69, 0x6d, 0x6f, 0x64, 0x61, 0x6c, 0x20, 0x6f, + 0x72, 0x20, 0x74, 0x68, 0x65, 0x20, 0x6d, 0x6f, 0x64, 0x65, 0x6c, 0x20, + 0x70, 0x72, 0x6f, 0x6a, 0x65, 0x63, 0x74, 0x6f, 0x72, 0x20, 0x63, 0x61, + 0x6e, 0x27, 0x74, 0x20, 0x62, 0x65, 0x20, 0x6c, 0x6f, 0x61, 0x64, 0x65, + 0x64, 0x2e, 0x22, 0x29, 0x3b, 0x0a, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, + 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x72, 0x65, 0x74, 0x75, 0x72, 0x6e, + 0x3b, 0x0a, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, + 0x7d, 0x0a, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, + 0x74, 0x72, 0x61, 0x6e, 0x73, 0x63, 0x72, 0x69, 0x70, 0x74, 0x55, 0x70, + 0x64, 0x61, 0x74, 0x65, 0x28, 0x5b, 0x2e, 0x2e, 0x2e, 0x68, 0x69, 0x73, + 0x74, 0x6f, 0x72, 0x79, 0x2c, 0x20, 0x5b, 0x63, 0x68, 0x61, 0x72, 0x2c, + 0x20, 0x63, 0x75, 0x72, 0x72, 0x65, 0x6e, 0x74, 0x4d, 0x65, 0x73, 0x73, + 0x61, 0x67, 0x65, 0x73, 0x5d, 0x5d, 0x29, 0x0a, 0x20, 0x20, 0x20, 0x20, + 0x20, 0x20, 0x20, 0x20, 0x7d, 0x0a, 0x0a, 0x20, 0x20, 0x20, 0x20, 0x20, + 0x20, 0x20, 0x20, 0x69, 0x66, 0x20, 0x28, 0x64, 0x61, 0x74, 0x61, 0x2e, + 0x74, 0x69, 0x6d, 0x69, 0x6e, 0x67, 0x73, 0x29, 0x20, 0x7b, 0x0a, 0x20, + 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x6c, 0x6c, 0x61, + 0x6d, 0x61, 0x53, 0x74, 0x61, 0x74, 0x73, 0x2e, 0x76, 0x61, 0x6c, 0x75, + 0x65, 0x20, 0x3d, 0x20, 0x64, 0x61, 0x74, 0x61, 0x2e, 0x74, 0x69, 0x6d, + 0x69, 0x6e, 0x67, 0x73, 0x3b, 0x0a, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, + 0x20, 0x20, 0x7d, 0x0a, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x7d, 0x0a, + 0x0a, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x63, 0x6f, 0x6e, 0x74, 0x72, + 0x6f, 0x6c, 0x6c, 0x65, 0x72, 0x2e, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x20, + 0x3d, 0x20, 0x6e, 0x75, 0x6c, 0x6c, 0x3b, 0x0a, 0x20, 0x20, 0x20, 0x20, + 0x7d, 0x0a, 0x0a, 0x20, 0x20, 0x20, 0x20, 0x2f, 0x2f, 0x20, 0x73, 0x65, + 0x6e, 0x64, 0x20, 0x6d, 0x65, 0x73, 0x73, 0x61, 0x67, 0x65, 0x20, 0x74, + 0x6f, 0x20, 0x73, 0x65, 0x72, 0x76, 0x65, 0x72, 0x0a, 0x20, 0x20, 0x20, + 0x20, 0x63, 0x6f, 0x6e, 0x73, 0x74, 0x20, 0x63, 0x68, 0x61, 0x74, 0x20, + 0x3d, 0x20, 0x61, 0x73, 0x79, 0x6e, 0x63, 0x20, 0x28, 0x6d, 0x73, 0x67, + 0x29, 0x20, 0x3d, 0x3e, 0x20, 0x7b, 0x0a, 0x20, 0x20, 0x20, 0x20, 0x20, + 0x20, 0x69, 0x66, 0x20, 0x28, 0x63, 0x6f, 0x6e, 0x74, 0x72, 0x6f, 0x6c, + 0x6c, 0x65, 0x72, 0x2e, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x29, 0x20, 0x7b, + 0x0a, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x63, 0x6f, 0x6e, + 0x73, 0x6f, 0x6c, 0x65, 0x2e, 0x6c, 0x6f, 0x67, 0x28, 0x27, 0x61, 0x6c, + 0x72, 0x65, 0x61, 0x64, 0x79, 0x20, 0x72, 0x75, 0x6e, 0x6e, 0x69, 0x6e, + 0x67, 0x2e, 0x2e, 0x2e, 0x27, 0x29, 0x3b, 0x0a, 0x20, 0x20, 0x20, 0x20, + 0x20, 0x20, 0x20, 0x20, 0x72, 0x65, 0x74, 0x75, 0x72, 0x6e, 0x3b, 0x0a, + 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x7d, 0x0a, 0x0a, 0x20, 0x20, 0x20, + 0x20, 0x20, 0x20, 0x74, 0x72, 0x61, 0x6e, 0x73, 0x63, 0x72, 0x69, 0x70, + 0x74, 0x55, 0x70, 0x64, 0x61, 0x74, 0x65, 0x28, 0x5b, 0x2e, 0x2e, 0x2e, + 0x73, 0x65, 0x73, 0x73, 0x69, 0x6f, 0x6e, 0x2e, 0x76, 0x61, 0x6c, 0x75, + 0x65, 0x2e, 0x74, 0x72, 0x61, 0x6e, 0x73, 0x63, 0x72, 0x69, 0x70, 0x74, + 0x2c, 0x20, 0x5b, 0x22, 0x7b, 0x7b, 0x75, 0x73, 0x65, 0x72, 0x7d, 0x7d, + 0x22, 0x2c, 0x20, 0x6d, 0x73, 0x67, 0x5d, 0x5d, 0x29, 0x0a, 0x0a, 0x20, + 0x20, 0x20, 0x20, 0x20, 0x20, 0x6c, 0x65, 0x74, 0x20, 0x70, 0x72, 0x6f, + 0x6d, 0x70, 0x74, 0x20, 0x3d, 0x20, 0x74, 0x65, 0x6d, 0x70, 0x6c, 0x61, + 0x74, 0x65, 0x28, 0x73, 0x65, 0x73, 0x73, 0x69, 0x6f, 0x6e, 0x2e, 0x76, + 0x61, 0x6c, 0x75, 0x65, 0x2e, 0x74, 0x65, 0x6d, 0x70, 0x6c, 0x61, 0x74, + 0x65, 0x2c, 0x20, 0x7b, 0x0a, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, + 0x20, 0x6d, 0x65, 0x73, 0x73, 0x61, 0x67, 0x65, 0x3a, 0x20, 0x6d, 0x73, + 0x67, 0x2c, 0x0a, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x68, + 0x69, 0x73, 0x74, 0x6f, 0x72, 0x79, 0x3a, 0x20, 0x73, 0x65, 0x73, 0x73, + 0x69, 0x6f, 0x6e, 0x2e, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x2e, 0x74, 0x72, + 0x61, 0x6e, 0x73, 0x63, 0x72, 0x69, 0x70, 0x74, 0x2e, 0x66, 0x6c, 0x61, + 0x74, 0x4d, 0x61, 0x70, 0x28, 0x0a, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, + 0x20, 0x20, 0x20, 0x20, 0x28, 0x5b, 0x6e, 0x61, 0x6d, 0x65, 0x2c, 0x20, + 0x64, 0x61, 0x74, 0x61, 0x5d, 0x29, 0x20, 0x3d, 0x3e, 0x0a, 0x20, 0x20, + 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x74, 0x65, + 0x6d, 0x70, 0x6c, 0x61, 0x74, 0x65, 0x28, 0x0a, 0x20, 0x20, 0x20, 0x20, + 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x73, 0x65, + 0x73, 0x73, 0x69, 0x6f, 0x6e, 0x2e, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x2e, + 0x68, 0x69, 0x73, 0x74, 0x6f, 0x72, 0x79, 0x54, 0x65, 0x6d, 0x70, 0x6c, + 0x61, 0x74, 0x65, 0x2c, 0x0a, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, + 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x7b, 0x0a, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, - 0x63, 0x75, 0x72, 0x72, 0x65, 0x6e, 0x74, 0x4d, 0x65, 0x73, 0x73, 0x61, - 0x67, 0x65, 0x73, 0x2e, 0x70, 0x6f, 0x70, 0x28, 0x29, 0x3b, 0x0a, 0x20, - 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x7d, 0x0a, 0x20, - 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x74, 0x72, 0x61, - 0x6e, 0x73, 0x63, 0x72, 0x69, 0x70, 0x74, 0x55, 0x70, 0x64, 0x61, 0x74, - 0x65, 0x28, 0x5b, 0x2e, 0x2e, 0x2e, 0x68, 0x69, 0x73, 0x74, 0x6f, 0x72, - 0x79, 0x2c, 0x20, 0x5b, 0x63, 0x68, 0x61, 0x72, 0x2c, 0x20, 0x63, 0x75, - 0x72, 0x72, 0x65, 0x6e, 0x74, 0x4d, 0x65, 0x73, 0x73, 0x61, 0x67, 0x65, - 0x73, 0x5d, 0x5d, 0x29, 0x0a, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, - 0x20, 0x20, 0x20, 0x63, 0x6f, 0x6e, 0x73, 0x6f, 0x6c, 0x65, 0x2e, 0x6c, - 0x6f, 0x67, 0x28, 0x22, 0x43, 0x6f, 0x6d, 0x70, 0x6c, 0x65, 0x74, 0x69, - 0x6f, 0x6e, 0x20, 0x66, 0x69, 0x6e, 0x69, 0x73, 0x68, 0x65, 0x64, 0x3a, - 0x20, 0x27, 0x22, 0x2c, 0x20, 0x63, 0x75, 0x72, 0x72, 0x65, 0x6e, 0x74, - 0x4d, 0x65, 0x73, 0x73, 0x61, 0x67, 0x65, 0x73, 0x2e, 0x6d, 0x61, 0x70, - 0x28, 0x6d, 0x73, 0x67, 0x20, 0x3d, 0x3e, 0x20, 0x6d, 0x73, 0x67, 0x2e, - 0x63, 0x6f, 0x6e, 0x74, 0x65, 0x6e, 0x74, 0x29, 0x2e, 0x6a, 0x6f, 0x69, - 0x6e, 0x28, 0x27, 0x27, 0x29, 0x2c, 0x20, 0x22, 0x27, 0x2c, 0x20, 0x73, - 0x75, 0x6d, 0x6d, 0x61, 0x72, 0x79, 0x3a, 0x20, 0x22, 0x2c, 0x20, 0x64, - 0x61, 0x74, 0x61, 0x29, 0x3b, 0x0a, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, - 0x20, 0x20, 0x7d, 0x20, 0x65, 0x6c, 0x73, 0x65, 0x20, 0x7b, 0x0a, 0x20, - 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x63, 0x75, 0x72, - 0x72, 0x65, 0x6e, 0x74, 0x4d, 0x65, 0x73, 0x73, 0x61, 0x67, 0x65, 0x73, - 0x2e, 0x70, 0x75, 0x73, 0x68, 0x28, 0x64, 0x61, 0x74, 0x61, 0x29, 0x3b, - 0x0a, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x73, - 0x6c, 0x6f, 0x74, 0x5f, 0x69, 0x64, 0x20, 0x3d, 0x20, 0x64, 0x61, 0x74, - 0x61, 0x2e, 0x73, 0x6c, 0x6f, 0x74, 0x5f, 0x69, 0x64, 0x3b, 0x0a, 0x20, - 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x69, 0x66, 0x20, - 0x28, 0x73, 0x65, 0x6c, 0x65, 0x63, 0x74, 0x65, 0x64, 0x5f, 0x69, 0x6d, - 0x61, 0x67, 0x65, 0x20, 0x26, 0x26, 0x20, 0x21, 0x64, 0x61, 0x74, 0x61, - 0x2e, 0x6d, 0x75, 0x6c, 0x74, 0x69, 0x6d, 0x6f, 0x64, 0x61, 0x6c, 0x29, - 0x20, 0x7b, 0x0a, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, - 0x20, 0x20, 0x20, 0x61, 0x6c, 0x65, 0x72, 0x74, 0x28, 0x22, 0x54, 0x68, - 0x65, 0x20, 0x73, 0x65, 0x72, 0x76, 0x65, 0x72, 0x20, 0x77, 0x61, 0x73, - 0x20, 0x6e, 0x6f, 0x74, 0x20, 0x63, 0x6f, 0x6d, 0x70, 0x69, 0x6c, 0x65, - 0x64, 0x20, 0x66, 0x6f, 0x72, 0x20, 0x6d, 0x75, 0x6c, 0x74, 0x69, 0x6d, - 0x6f, 0x64, 0x61, 0x6c, 0x20, 0x6f, 0x72, 0x20, 0x74, 0x68, 0x65, 0x20, - 0x6d, 0x6f, 0x64, 0x65, 0x6c, 0x20, 0x70, 0x72, 0x6f, 0x6a, 0x65, 0x63, - 0x74, 0x6f, 0x72, 0x20, 0x63, 0x61, 0x6e, 0x27, 0x74, 0x20, 0x62, 0x65, - 0x20, 0x6c, 0x6f, 0x61, 0x64, 0x65, 0x64, 0x2e, 0x22, 0x29, 0x3b, 0x0a, + 0x20, 0x6e, 0x61, 0x6d, 0x65, 0x2c, 0x0a, 0x20, 0x20, 0x20, 0x20, 0x20, + 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x6d, + 0x65, 0x73, 0x73, 0x61, 0x67, 0x65, 0x3a, 0x20, 0x41, 0x72, 0x72, 0x61, + 0x79, 0x2e, 0x69, 0x73, 0x41, 0x72, 0x72, 0x61, 0x79, 0x28, 0x64, 0x61, + 0x74, 0x61, 0x29, 0x20, 0x3f, 0x0a, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, - 0x72, 0x65, 0x74, 0x75, 0x72, 0x6e, 0x3b, 0x0a, 0x20, 0x20, 0x20, 0x20, - 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x7d, 0x0a, 0x20, 0x20, 0x20, 0x20, - 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x74, 0x72, 0x61, 0x6e, 0x73, 0x63, - 0x72, 0x69, 0x70, 0x74, 0x55, 0x70, 0x64, 0x61, 0x74, 0x65, 0x28, 0x5b, - 0x2e, 0x2e, 0x2e, 0x68, 0x69, 0x73, 0x74, 0x6f, 0x72, 0x79, 0x2c, 0x20, - 0x5b, 0x63, 0x68, 0x61, 0x72, 0x2c, 0x20, 0x63, 0x75, 0x72, 0x72, 0x65, - 0x6e, 0x74, 0x4d, 0x65, 0x73, 0x73, 0x61, 0x67, 0x65, 0x73, 0x5d, 0x5d, - 0x29, 0x0a, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x7d, 0x0a, - 0x0a, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x69, 0x66, 0x20, - 0x28, 0x64, 0x61, 0x74, 0x61, 0x2e, 0x74, 0x69, 0x6d, 0x69, 0x6e, 0x67, - 0x73, 0x29, 0x20, 0x7b, 0x0a, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, - 0x20, 0x20, 0x20, 0x6c, 0x6c, 0x61, 0x6d, 0x61, 0x53, 0x74, 0x61, 0x74, - 0x73, 0x2e, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x20, 0x3d, 0x20, 0x64, 0x61, - 0x74, 0x61, 0x2e, 0x74, 0x69, 0x6d, 0x69, 0x6e, 0x67, 0x73, 0x3b, 0x0a, - 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x7d, 0x0a, 0x20, 0x20, - 0x20, 0x20, 0x20, 0x20, 0x7d, 0x0a, 0x0a, 0x20, 0x20, 0x20, 0x20, 0x20, - 0x20, 0x63, 0x6f, 0x6e, 0x74, 0x72, 0x6f, 0x6c, 0x6c, 0x65, 0x72, 0x2e, - 0x76, 0x61, 0x6c, 0x75, 0x65, 0x20, 0x3d, 0x20, 0x6e, 0x75, 0x6c, 0x6c, - 0x3b, 0x0a, 0x20, 0x20, 0x20, 0x20, 0x7d, 0x0a, 0x0a, 0x20, 0x20, 0x20, - 0x20, 0x2f, 0x2f, 0x20, 0x73, 0x65, 0x6e, 0x64, 0x20, 0x6d, 0x65, 0x73, - 0x73, 0x61, 0x67, 0x65, 0x20, 0x74, 0x6f, 0x20, 0x73, 0x65, 0x72, 0x76, - 0x65, 0x72, 0x0a, 0x20, 0x20, 0x20, 0x20, 0x63, 0x6f, 0x6e, 0x73, 0x74, - 0x20, 0x63, 0x68, 0x61, 0x74, 0x20, 0x3d, 0x20, 0x61, 0x73, 0x79, 0x6e, - 0x63, 0x20, 0x28, 0x6d, 0x73, 0x67, 0x29, 0x20, 0x3d, 0x3e, 0x20, 0x7b, - 0x0a, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x69, 0x66, 0x20, 0x28, 0x63, - 0x6f, 0x6e, 0x74, 0x72, 0x6f, 0x6c, 0x6c, 0x65, 0x72, 0x2e, 0x76, 0x61, - 0x6c, 0x75, 0x65, 0x29, 0x20, 0x7b, 0x0a, 0x20, 0x20, 0x20, 0x20, 0x20, - 0x20, 0x20, 0x20, 0x63, 0x6f, 0x6e, 0x73, 0x6f, 0x6c, 0x65, 0x2e, 0x6c, - 0x6f, 0x67, 0x28, 0x27, 0x61, 0x6c, 0x72, 0x65, 0x61, 0x64, 0x79, 0x20, - 0x72, 0x75, 0x6e, 0x6e, 0x69, 0x6e, 0x67, 0x2e, 0x2e, 0x2e, 0x27, 0x29, - 0x3b, 0x0a, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x72, 0x65, - 0x74, 0x75, 0x72, 0x6e, 0x3b, 0x0a, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, - 0x7d, 0x0a, 0x0a, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x74, 0x72, 0x61, - 0x6e, 0x73, 0x63, 0x72, 0x69, 0x70, 0x74, 0x55, 0x70, 0x64, 0x61, 0x74, - 0x65, 0x28, 0x5b, 0x2e, 0x2e, 0x2e, 0x73, 0x65, 0x73, 0x73, 0x69, 0x6f, - 0x6e, 0x2e, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x2e, 0x74, 0x72, 0x61, 0x6e, - 0x73, 0x63, 0x72, 0x69, 0x70, 0x74, 0x2c, 0x20, 0x5b, 0x22, 0x7b, 0x7b, - 0x75, 0x73, 0x65, 0x72, 0x7d, 0x7d, 0x22, 0x2c, 0x20, 0x6d, 0x73, 0x67, - 0x5d, 0x5d, 0x29, 0x0a, 0x0a, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x6c, - 0x65, 0x74, 0x20, 0x70, 0x72, 0x6f, 0x6d, 0x70, 0x74, 0x20, 0x3d, 0x20, - 0x74, 0x65, 0x6d, 0x70, 0x6c, 0x61, 0x74, 0x65, 0x28, 0x73, 0x65, 0x73, - 0x73, 0x69, 0x6f, 0x6e, 0x2e, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x2e, 0x74, - 0x65, 0x6d, 0x70, 0x6c, 0x61, 0x74, 0x65, 0x2c, 0x20, 0x7b, 0x0a, 0x20, - 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x6d, 0x65, 0x73, 0x73, 0x61, - 0x67, 0x65, 0x3a, 0x20, 0x6d, 0x73, 0x67, 0x2c, 0x0a, 0x20, 0x20, 0x20, - 0x20, 0x20, 0x20, 0x20, 0x20, 0x68, 0x69, 0x73, 0x74, 0x6f, 0x72, 0x79, - 0x3a, 0x20, 0x73, 0x65, 0x73, 0x73, 0x69, 0x6f, 0x6e, 0x2e, 0x76, 0x61, - 0x6c, 0x75, 0x65, 0x2e, 0x74, 0x72, 0x61, 0x6e, 0x73, 0x63, 0x72, 0x69, - 0x70, 0x74, 0x2e, 0x66, 0x6c, 0x61, 0x74, 0x4d, 0x61, 0x70, 0x28, 0x0a, - 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x28, 0x5b, - 0x6e, 0x61, 0x6d, 0x65, 0x2c, 0x20, 0x64, 0x61, 0x74, 0x61, 0x5d, 0x29, - 0x20, 0x3d, 0x3e, 0x0a, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, - 0x20, 0x20, 0x20, 0x20, 0x74, 0x65, 0x6d, 0x70, 0x6c, 0x61, 0x74, 0x65, - 0x28, 0x0a, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, - 0x20, 0x20, 0x20, 0x20, 0x73, 0x65, 0x73, 0x73, 0x69, 0x6f, 0x6e, 0x2e, - 0x76, 0x61, 0x6c, 0x75, 0x65, 0x2e, 0x68, 0x69, 0x73, 0x74, 0x6f, 0x72, - 0x79, 0x54, 0x65, 0x6d, 0x70, 0x6c, 0x61, 0x74, 0x65, 0x2c, 0x0a, 0x20, + 0x64, 0x61, 0x74, 0x61, 0x2e, 0x6d, 0x61, 0x70, 0x28, 0x6d, 0x73, 0x67, + 0x20, 0x3d, 0x3e, 0x20, 0x6d, 0x73, 0x67, 0x2e, 0x63, 0x6f, 0x6e, 0x74, + 0x65, 0x6e, 0x74, 0x29, 0x2e, 0x6a, 0x6f, 0x69, 0x6e, 0x28, 0x27, 0x27, + 0x29, 0x2e, 0x72, 0x65, 0x70, 0x6c, 0x61, 0x63, 0x65, 0x28, 0x2f, 0x5e, + 0x5c, 0x73, 0x2f, 0x2c, 0x20, 0x27, 0x27, 0x29, 0x20, 0x3a, 0x0a, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, - 0x20, 0x7b, 0x0a, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, - 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x6e, 0x61, 0x6d, 0x65, 0x2c, - 0x0a, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, - 0x20, 0x20, 0x20, 0x20, 0x20, 0x6d, 0x65, 0x73, 0x73, 0x61, 0x67, 0x65, - 0x3a, 0x20, 0x41, 0x72, 0x72, 0x61, 0x79, 0x2e, 0x69, 0x73, 0x41, 0x72, - 0x72, 0x61, 0x79, 0x28, 0x64, 0x61, 0x74, 0x61, 0x29, 0x20, 0x3f, 0x0a, + 0x20, 0x20, 0x20, 0x20, 0x20, 0x64, 0x61, 0x74, 0x61, 0x2c, 0x0a, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, - 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x64, 0x61, 0x74, 0x61, 0x2e, 0x6d, - 0x61, 0x70, 0x28, 0x6d, 0x73, 0x67, 0x20, 0x3d, 0x3e, 0x20, 0x6d, 0x73, - 0x67, 0x2e, 0x63, 0x6f, 0x6e, 0x74, 0x65, 0x6e, 0x74, 0x29, 0x2e, 0x6a, - 0x6f, 0x69, 0x6e, 0x28, 0x27, 0x27, 0x29, 0x2e, 0x72, 0x65, 0x70, 0x6c, - 0x61, 0x63, 0x65, 0x28, 0x2f, 0x5e, 0x5c, 0x73, 0x2f, 0x2c, 0x20, 0x27, - 0x27, 0x29, 0x20, 0x3a, 0x0a, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, - 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x64, - 0x61, 0x74, 0x61, 0x2c, 0x0a, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, - 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x7d, 0x0a, 0x20, 0x20, 0x20, - 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x29, 0x0a, 0x20, - 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x29, 0x2e, 0x6a, 0x6f, 0x69, - 0x6e, 0x28, 0x22, 0x5c, 0x6e, 0x22, 0x29, 0x2c, 0x0a, 0x20, 0x20, 0x20, - 0x20, 0x20, 0x20, 0x7d, 0x29, 0x3b, 0x0a, 0x20, 0x20, 0x20, 0x20, 0x20, - 0x20, 0x69, 0x66, 0x20, 0x28, 0x73, 0x65, 0x6c, 0x65, 0x63, 0x74, 0x65, - 0x64, 0x5f, 0x69, 0x6d, 0x61, 0x67, 0x65, 0x29, 0x20, 0x7b, 0x0a, 0x20, - 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x70, 0x72, 0x6f, 0x6d, 0x70, - 0x74, 0x20, 0x3d, 0x20, 0x60, 0x41, 0x20, 0x63, 0x68, 0x61, 0x74, 0x20, - 0x62, 0x65, 0x74, 0x77, 0x65, 0x65, 0x6e, 0x20, 0x61, 0x20, 0x63, 0x75, - 0x72, 0x69, 0x6f, 0x75, 0x73, 0x20, 0x68, 0x75, 0x6d, 0x61, 0x6e, 0x20, - 0x61, 0x6e, 0x64, 0x20, 0x61, 0x6e, 0x20, 0x61, 0x72, 0x74, 0x69, 0x66, - 0x69, 0x63, 0x69, 0x61, 0x6c, 0x20, 0x69, 0x6e, 0x74, 0x65, 0x6c, 0x6c, - 0x69, 0x67, 0x65, 0x6e, 0x63, 0x65, 0x20, 0x61, 0x73, 0x73, 0x69, 0x73, - 0x74, 0x61, 0x6e, 0x74, 0x2e, 0x20, 0x54, 0x68, 0x65, 0x20, 0x61, 0x73, - 0x73, 0x69, 0x73, 0x74, 0x61, 0x6e, 0x74, 0x20, 0x67, 0x69, 0x76, 0x65, - 0x73, 0x20, 0x68, 0x65, 0x6c, 0x70, 0x66, 0x75, 0x6c, 0x2c, 0x20, 0x64, - 0x65, 0x74, 0x61, 0x69, 0x6c, 0x65, 0x64, 0x2c, 0x20, 0x61, 0x6e, 0x64, - 0x20, 0x70, 0x6f, 0x6c, 0x69, 0x74, 0x65, 0x20, 0x61, 0x6e, 0x73, 0x77, - 0x65, 0x72, 0x73, 0x20, 0x74, 0x6f, 0x20, 0x74, 0x68, 0x65, 0x20, 0x68, - 0x75, 0x6d, 0x61, 0x6e, 0x27, 0x73, 0x20, 0x71, 0x75, 0x65, 0x73, 0x74, - 0x69, 0x6f, 0x6e, 0x73, 0x2e, 0x5c, 0x6e, 0x55, 0x53, 0x45, 0x52, 0x3a, - 0x5b, 0x69, 0x6d, 0x67, 0x2d, 0x31, 0x30, 0x5d, 0x24, 0x7b, 0x6d, 0x73, - 0x67, 0x7d, 0x5c, 0x6e, 0x41, 0x53, 0x53, 0x49, 0x53, 0x54, 0x41, 0x4e, - 0x54, 0x3a, 0x60, 0x3b, 0x0a, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x7d, - 0x0a, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x61, 0x77, 0x61, 0x69, 0x74, - 0x20, 0x72, 0x75, 0x6e, 0x4c, 0x6c, 0x61, 0x6d, 0x61, 0x28, 0x70, 0x72, - 0x6f, 0x6d, 0x70, 0x74, 0x2c, 0x20, 0x7b, 0x0a, 0x20, 0x20, 0x20, 0x20, - 0x20, 0x20, 0x20, 0x20, 0x2e, 0x2e, 0x2e, 0x70, 0x61, 0x72, 0x61, 0x6d, - 0x73, 0x2e, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x2c, 0x0a, 0x20, 0x20, 0x20, - 0x20, 0x20, 0x20, 0x20, 0x20, 0x73, 0x6c, 0x6f, 0x74, 0x5f, 0x69, 0x64, - 0x3a, 0x20, 0x73, 0x6c, 0x6f, 0x74, 0x5f, 0x69, 0x64, 0x2c, 0x0a, 0x20, - 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x73, 0x74, 0x6f, 0x70, 0x3a, - 0x20, 0x5b, 0x22, 0x3c, 0x2f, 0x73, 0x3e, 0x22, 0x2c, 0x20, 0x74, 0x65, - 0x6d, 0x70, 0x6c, 0x61, 0x74, 0x65, 0x28, 0x22, 0x7b, 0x7b, 0x63, 0x68, - 0x61, 0x72, 0x7d, 0x7d, 0x3a, 0x22, 0x29, 0x2c, 0x20, 0x74, 0x65, 0x6d, - 0x70, 0x6c, 0x61, 0x74, 0x65, 0x28, 0x22, 0x7b, 0x7b, 0x75, 0x73, 0x65, - 0x72, 0x7d, 0x7d, 0x3a, 0x22, 0x29, 0x5d, 0x2c, 0x0a, 0x20, 0x20, 0x20, - 0x20, 0x20, 0x20, 0x7d, 0x2c, 0x20, 0x22, 0x7b, 0x7b, 0x63, 0x68, 0x61, - 0x72, 0x7d, 0x7d, 0x22, 0x29, 0x3b, 0x0a, 0x20, 0x20, 0x20, 0x20, 0x7d, - 0x0a, 0x0a, 0x20, 0x20, 0x20, 0x20, 0x63, 0x6f, 0x6e, 0x73, 0x74, 0x20, - 0x72, 0x75, 0x6e, 0x43, 0x6f, 0x6d, 0x70, 0x6c, 0x65, 0x74, 0x69, 0x6f, - 0x6e, 0x20, 0x3d, 0x20, 0x61, 0x73, 0x79, 0x6e, 0x63, 0x20, 0x28, 0x29, + 0x20, 0x7d, 0x0a, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, + 0x20, 0x20, 0x20, 0x29, 0x0a, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, + 0x20, 0x29, 0x2e, 0x6a, 0x6f, 0x69, 0x6e, 0x28, 0x22, 0x5c, 0x6e, 0x22, + 0x29, 0x2c, 0x0a, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x7d, 0x29, 0x3b, + 0x0a, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x69, 0x66, 0x20, 0x28, 0x73, + 0x65, 0x6c, 0x65, 0x63, 0x74, 0x65, 0x64, 0x5f, 0x69, 0x6d, 0x61, 0x67, + 0x65, 0x29, 0x20, 0x7b, 0x0a, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, + 0x20, 0x70, 0x72, 0x6f, 0x6d, 0x70, 0x74, 0x20, 0x3d, 0x20, 0x60, 0x41, + 0x20, 0x63, 0x68, 0x61, 0x74, 0x20, 0x62, 0x65, 0x74, 0x77, 0x65, 0x65, + 0x6e, 0x20, 0x61, 0x20, 0x63, 0x75, 0x72, 0x69, 0x6f, 0x75, 0x73, 0x20, + 0x68, 0x75, 0x6d, 0x61, 0x6e, 0x20, 0x61, 0x6e, 0x64, 0x20, 0x61, 0x6e, + 0x20, 0x61, 0x72, 0x74, 0x69, 0x66, 0x69, 0x63, 0x69, 0x61, 0x6c, 0x20, + 0x69, 0x6e, 0x74, 0x65, 0x6c, 0x6c, 0x69, 0x67, 0x65, 0x6e, 0x63, 0x65, + 0x20, 0x61, 0x73, 0x73, 0x69, 0x73, 0x74, 0x61, 0x6e, 0x74, 0x2e, 0x20, + 0x54, 0x68, 0x65, 0x20, 0x61, 0x73, 0x73, 0x69, 0x73, 0x74, 0x61, 0x6e, + 0x74, 0x20, 0x67, 0x69, 0x76, 0x65, 0x73, 0x20, 0x68, 0x65, 0x6c, 0x70, + 0x66, 0x75, 0x6c, 0x2c, 0x20, 0x64, 0x65, 0x74, 0x61, 0x69, 0x6c, 0x65, + 0x64, 0x2c, 0x20, 0x61, 0x6e, 0x64, 0x20, 0x70, 0x6f, 0x6c, 0x69, 0x74, + 0x65, 0x20, 0x61, 0x6e, 0x73, 0x77, 0x65, 0x72, 0x73, 0x20, 0x74, 0x6f, + 0x20, 0x74, 0x68, 0x65, 0x20, 0x68, 0x75, 0x6d, 0x61, 0x6e, 0x27, 0x73, + 0x20, 0x71, 0x75, 0x65, 0x73, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x2e, 0x5c, + 0x6e, 0x55, 0x53, 0x45, 0x52, 0x3a, 0x5b, 0x69, 0x6d, 0x67, 0x2d, 0x31, + 0x30, 0x5d, 0x24, 0x7b, 0x6d, 0x73, 0x67, 0x7d, 0x5c, 0x6e, 0x41, 0x53, + 0x53, 0x49, 0x53, 0x54, 0x41, 0x4e, 0x54, 0x3a, 0x60, 0x3b, 0x0a, 0x20, + 0x20, 0x20, 0x20, 0x20, 0x20, 0x7d, 0x0a, 0x20, 0x20, 0x20, 0x20, 0x20, + 0x20, 0x61, 0x77, 0x61, 0x69, 0x74, 0x20, 0x72, 0x75, 0x6e, 0x4c, 0x6c, + 0x61, 0x6d, 0x61, 0x28, 0x70, 0x72, 0x6f, 0x6d, 0x70, 0x74, 0x2c, 0x20, + 0x7b, 0x0a, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x2e, 0x2e, + 0x2e, 0x70, 0x61, 0x72, 0x61, 0x6d, 0x73, 0x2e, 0x76, 0x61, 0x6c, 0x75, + 0x65, 0x2c, 0x0a, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x73, + 0x6c, 0x6f, 0x74, 0x5f, 0x69, 0x64, 0x3a, 0x20, 0x73, 0x6c, 0x6f, 0x74, + 0x5f, 0x69, 0x64, 0x2c, 0x0a, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, + 0x20, 0x73, 0x74, 0x6f, 0x70, 0x3a, 0x20, 0x5b, 0x22, 0x3c, 0x2f, 0x73, + 0x3e, 0x22, 0x2c, 0x20, 0x74, 0x65, 0x6d, 0x70, 0x6c, 0x61, 0x74, 0x65, + 0x28, 0x22, 0x7b, 0x7b, 0x63, 0x68, 0x61, 0x72, 0x7d, 0x7d, 0x3a, 0x22, + 0x29, 0x2c, 0x20, 0x74, 0x65, 0x6d, 0x70, 0x6c, 0x61, 0x74, 0x65, 0x28, + 0x22, 0x7b, 0x7b, 0x75, 0x73, 0x65, 0x72, 0x7d, 0x7d, 0x3a, 0x22, 0x29, + 0x5d, 0x2c, 0x0a, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x7d, 0x2c, 0x20, + 0x22, 0x7b, 0x7b, 0x63, 0x68, 0x61, 0x72, 0x7d, 0x7d, 0x22, 0x29, 0x3b, + 0x0a, 0x20, 0x20, 0x20, 0x20, 0x7d, 0x0a, 0x0a, 0x20, 0x20, 0x20, 0x20, + 0x63, 0x6f, 0x6e, 0x73, 0x74, 0x20, 0x72, 0x75, 0x6e, 0x43, 0x6f, 0x6d, + 0x70, 0x6c, 0x65, 0x74, 0x69, 0x6f, 0x6e, 0x20, 0x3d, 0x20, 0x28, 0x29, 0x20, 0x3d, 0x3e, 0x20, 0x7b, 0x0a, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x69, 0x66, 0x20, 0x28, 0x63, 0x6f, 0x6e, 0x74, 0x72, 0x6f, 0x6c, 0x6c, 0x65, 0x72, 0x2e, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x29, 0x20, 0x7b, 0x0a, @@ -1091,494 +1101,524 @@ unsigned char index_html[] = { 0x6c, 0x75, 0x65, 0x2e, 0x74, 0x72, 0x61, 0x6e, 0x73, 0x63, 0x72, 0x69, 0x70, 0x74, 0x2c, 0x20, 0x5b, 0x22, 0x22, 0x2c, 0x20, 0x70, 0x72, 0x6f, 0x6d, 0x70, 0x74, 0x5d, 0x5d, 0x29, 0x3b, 0x0a, 0x20, 0x20, 0x20, 0x20, - 0x20, 0x20, 0x61, 0x77, 0x61, 0x69, 0x74, 0x20, 0x72, 0x75, 0x6e, 0x4c, - 0x6c, 0x61, 0x6d, 0x61, 0x28, 0x70, 0x72, 0x6f, 0x6d, 0x70, 0x74, 0x2c, - 0x20, 0x7b, 0x0a, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x2e, - 0x2e, 0x2e, 0x70, 0x61, 0x72, 0x61, 0x6d, 0x73, 0x2e, 0x76, 0x61, 0x6c, - 0x75, 0x65, 0x2c, 0x0a, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, - 0x73, 0x6c, 0x6f, 0x74, 0x5f, 0x69, 0x64, 0x3a, 0x20, 0x73, 0x6c, 0x6f, - 0x74, 0x5f, 0x69, 0x64, 0x2c, 0x0a, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, - 0x20, 0x20, 0x73, 0x74, 0x6f, 0x70, 0x3a, 0x20, 0x5b, 0x5d, 0x2c, 0x0a, - 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x7d, 0x2c, 0x20, 0x22, 0x22, 0x29, - 0x3b, 0x0a, 0x20, 0x20, 0x20, 0x20, 0x7d, 0x0a, 0x0a, 0x20, 0x20, 0x20, - 0x20, 0x63, 0x6f, 0x6e, 0x73, 0x74, 0x20, 0x73, 0x74, 0x6f, 0x70, 0x20, - 0x3d, 0x20, 0x28, 0x65, 0x29, 0x20, 0x3d, 0x3e, 0x20, 0x7b, 0x0a, 0x20, - 0x20, 0x20, 0x20, 0x20, 0x20, 0x65, 0x2e, 0x70, 0x72, 0x65, 0x76, 0x65, - 0x6e, 0x74, 0x44, 0x65, 0x66, 0x61, 0x75, 0x6c, 0x74, 0x28, 0x29, 0x3b, - 0x0a, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x69, 0x66, 0x20, 0x28, 0x63, - 0x6f, 0x6e, 0x74, 0x72, 0x6f, 0x6c, 0x6c, 0x65, 0x72, 0x2e, 0x76, 0x61, - 0x6c, 0x75, 0x65, 0x29, 0x20, 0x7b, 0x0a, 0x20, 0x20, 0x20, 0x20, 0x20, - 0x20, 0x20, 0x20, 0x63, 0x6f, 0x6e, 0x74, 0x72, 0x6f, 0x6c, 0x6c, 0x65, - 0x72, 0x2e, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x2e, 0x61, 0x62, 0x6f, 0x72, - 0x74, 0x28, 0x29, 0x3b, 0x0a, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, - 0x20, 0x63, 0x6f, 0x6e, 0x74, 0x72, 0x6f, 0x6c, 0x6c, 0x65, 0x72, 0x2e, - 0x76, 0x61, 0x6c, 0x75, 0x65, 0x20, 0x3d, 0x20, 0x6e, 0x75, 0x6c, 0x6c, - 0x3b, 0x0a, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x7d, 0x0a, 0x20, 0x20, - 0x20, 0x20, 0x7d, 0x0a, 0x0a, 0x20, 0x20, 0x20, 0x20, 0x63, 0x6f, 0x6e, - 0x73, 0x74, 0x20, 0x72, 0x65, 0x73, 0x65, 0x74, 0x20, 0x3d, 0x20, 0x28, + 0x20, 0x20, 0x72, 0x75, 0x6e, 0x4c, 0x6c, 0x61, 0x6d, 0x61, 0x28, 0x70, + 0x72, 0x6f, 0x6d, 0x70, 0x74, 0x2c, 0x20, 0x7b, 0x0a, 0x20, 0x20, 0x20, + 0x20, 0x20, 0x20, 0x20, 0x20, 0x2e, 0x2e, 0x2e, 0x70, 0x61, 0x72, 0x61, + 0x6d, 0x73, 0x2e, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x2c, 0x0a, 0x20, 0x20, + 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x73, 0x6c, 0x6f, 0x74, 0x5f, 0x69, + 0x64, 0x3a, 0x20, 0x73, 0x6c, 0x6f, 0x74, 0x5f, 0x69, 0x64, 0x2c, 0x0a, + 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x73, 0x74, 0x6f, 0x70, + 0x3a, 0x20, 0x5b, 0x5d, 0x2c, 0x0a, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, + 0x7d, 0x2c, 0x20, 0x22, 0x22, 0x29, 0x2e, 0x66, 0x69, 0x6e, 0x61, 0x6c, + 0x6c, 0x79, 0x28, 0x28, 0x29, 0x20, 0x3d, 0x3e, 0x20, 0x7b, 0x0a, 0x20, + 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x73, 0x65, 0x73, 0x73, 0x69, + 0x6f, 0x6e, 0x2e, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x2e, 0x70, 0x72, 0x6f, + 0x6d, 0x70, 0x74, 0x20, 0x3d, 0x20, 0x73, 0x65, 0x73, 0x73, 0x69, 0x6f, + 0x6e, 0x2e, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x2e, 0x74, 0x72, 0x61, 0x6e, + 0x73, 0x63, 0x72, 0x69, 0x70, 0x74, 0x2e, 0x6d, 0x61, 0x70, 0x28, 0x28, + 0x5b, 0x5f, 0x2c, 0x20, 0x64, 0x61, 0x74, 0x61, 0x5d, 0x29, 0x20, 0x3d, + 0x3e, 0x0a, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, + 0x41, 0x72, 0x72, 0x61, 0x79, 0x2e, 0x69, 0x73, 0x41, 0x72, 0x72, 0x61, + 0x79, 0x28, 0x64, 0x61, 0x74, 0x61, 0x29, 0x20, 0x3f, 0x20, 0x64, 0x61, + 0x74, 0x61, 0x2e, 0x6d, 0x61, 0x70, 0x28, 0x6d, 0x73, 0x67, 0x20, 0x3d, + 0x3e, 0x20, 0x6d, 0x73, 0x67, 0x2e, 0x63, 0x6f, 0x6e, 0x74, 0x65, 0x6e, + 0x74, 0x29, 0x2e, 0x6a, 0x6f, 0x69, 0x6e, 0x28, 0x27, 0x27, 0x29, 0x20, + 0x3a, 0x20, 0x64, 0x61, 0x74, 0x61, 0x0a, 0x20, 0x20, 0x20, 0x20, 0x20, + 0x20, 0x20, 0x20, 0x29, 0x2e, 0x6a, 0x6f, 0x69, 0x6e, 0x28, 0x27, 0x27, + 0x29, 0x3b, 0x0a, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x73, + 0x65, 0x73, 0x73, 0x69, 0x6f, 0x6e, 0x2e, 0x76, 0x61, 0x6c, 0x75, 0x65, + 0x2e, 0x74, 0x72, 0x61, 0x6e, 0x73, 0x63, 0x72, 0x69, 0x70, 0x74, 0x20, + 0x3d, 0x20, 0x5b, 0x5d, 0x3b, 0x0a, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, + 0x7d, 0x29, 0x0a, 0x20, 0x20, 0x20, 0x20, 0x7d, 0x0a, 0x0a, 0x20, 0x20, + 0x20, 0x20, 0x63, 0x6f, 0x6e, 0x73, 0x74, 0x20, 0x73, 0x74, 0x6f, 0x70, + 0x20, 0x3d, 0x20, 0x28, 0x65, 0x29, 0x20, 0x3d, 0x3e, 0x20, 0x7b, 0x0a, + 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x65, 0x2e, 0x70, 0x72, 0x65, 0x76, + 0x65, 0x6e, 0x74, 0x44, 0x65, 0x66, 0x61, 0x75, 0x6c, 0x74, 0x28, 0x29, + 0x3b, 0x0a, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x69, 0x66, 0x20, 0x28, + 0x63, 0x6f, 0x6e, 0x74, 0x72, 0x6f, 0x6c, 0x6c, 0x65, 0x72, 0x2e, 0x76, + 0x61, 0x6c, 0x75, 0x65, 0x29, 0x20, 0x7b, 0x0a, 0x20, 0x20, 0x20, 0x20, + 0x20, 0x20, 0x20, 0x20, 0x63, 0x6f, 0x6e, 0x74, 0x72, 0x6f, 0x6c, 0x6c, + 0x65, 0x72, 0x2e, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x2e, 0x61, 0x62, 0x6f, + 0x72, 0x74, 0x28, 0x29, 0x3b, 0x0a, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, + 0x20, 0x20, 0x63, 0x6f, 0x6e, 0x74, 0x72, 0x6f, 0x6c, 0x6c, 0x65, 0x72, + 0x2e, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x20, 0x3d, 0x20, 0x6e, 0x75, 0x6c, + 0x6c, 0x3b, 0x0a, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x7d, 0x0a, 0x20, + 0x20, 0x20, 0x20, 0x7d, 0x0a, 0x0a, 0x20, 0x20, 0x20, 0x20, 0x63, 0x6f, + 0x6e, 0x73, 0x74, 0x20, 0x72, 0x65, 0x73, 0x65, 0x74, 0x20, 0x3d, 0x20, + 0x28, 0x65, 0x29, 0x20, 0x3d, 0x3e, 0x20, 0x7b, 0x0a, 0x20, 0x20, 0x20, + 0x20, 0x20, 0x20, 0x73, 0x74, 0x6f, 0x70, 0x28, 0x65, 0x29, 0x3b, 0x0a, + 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x74, 0x72, 0x61, 0x6e, 0x73, 0x63, + 0x72, 0x69, 0x70, 0x74, 0x55, 0x70, 0x64, 0x61, 0x74, 0x65, 0x28, 0x5b, + 0x5d, 0x29, 0x3b, 0x0a, 0x20, 0x20, 0x20, 0x20, 0x7d, 0x0a, 0x0a, 0x20, + 0x20, 0x20, 0x20, 0x63, 0x6f, 0x6e, 0x73, 0x74, 0x20, 0x75, 0x70, 0x6c, + 0x6f, 0x61, 0x64, 0x49, 0x6d, 0x61, 0x67, 0x65, 0x20, 0x3d, 0x20, 0x28, 0x65, 0x29, 0x20, 0x3d, 0x3e, 0x20, 0x7b, 0x0a, 0x20, 0x20, 0x20, 0x20, - 0x20, 0x20, 0x73, 0x74, 0x6f, 0x70, 0x28, 0x65, 0x29, 0x3b, 0x0a, 0x20, - 0x20, 0x20, 0x20, 0x20, 0x20, 0x74, 0x72, 0x61, 0x6e, 0x73, 0x63, 0x72, - 0x69, 0x70, 0x74, 0x55, 0x70, 0x64, 0x61, 0x74, 0x65, 0x28, 0x5b, 0x5d, - 0x29, 0x3b, 0x0a, 0x20, 0x20, 0x20, 0x20, 0x7d, 0x0a, 0x0a, 0x20, 0x20, - 0x20, 0x20, 0x63, 0x6f, 0x6e, 0x73, 0x74, 0x20, 0x75, 0x70, 0x6c, 0x6f, - 0x61, 0x64, 0x49, 0x6d, 0x61, 0x67, 0x65, 0x20, 0x3d, 0x20, 0x28, 0x65, - 0x29, 0x20, 0x3d, 0x3e, 0x20, 0x7b, 0x0a, 0x20, 0x20, 0x20, 0x20, 0x20, - 0x20, 0x65, 0x2e, 0x70, 0x72, 0x65, 0x76, 0x65, 0x6e, 0x74, 0x44, 0x65, - 0x66, 0x61, 0x75, 0x6c, 0x74, 0x28, 0x29, 0x3b, 0x0a, 0x20, 0x20, 0x20, - 0x20, 0x20, 0x20, 0x64, 0x6f, 0x63, 0x75, 0x6d, 0x65, 0x6e, 0x74, 0x2e, - 0x67, 0x65, 0x74, 0x45, 0x6c, 0x65, 0x6d, 0x65, 0x6e, 0x74, 0x42, 0x79, - 0x49, 0x64, 0x28, 0x22, 0x66, 0x69, 0x6c, 0x65, 0x49, 0x6e, 0x70, 0x75, - 0x74, 0x22, 0x29, 0x2e, 0x63, 0x6c, 0x69, 0x63, 0x6b, 0x28, 0x29, 0x3b, - 0x0a, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x64, 0x6f, 0x63, 0x75, 0x6d, - 0x65, 0x6e, 0x74, 0x2e, 0x67, 0x65, 0x74, 0x45, 0x6c, 0x65, 0x6d, 0x65, - 0x6e, 0x74, 0x42, 0x79, 0x49, 0x64, 0x28, 0x22, 0x66, 0x69, 0x6c, 0x65, - 0x49, 0x6e, 0x70, 0x75, 0x74, 0x22, 0x29, 0x2e, 0x61, 0x64, 0x64, 0x45, - 0x76, 0x65, 0x6e, 0x74, 0x4c, 0x69, 0x73, 0x74, 0x65, 0x6e, 0x65, 0x72, - 0x28, 0x22, 0x63, 0x68, 0x61, 0x6e, 0x67, 0x65, 0x22, 0x2c, 0x20, 0x66, - 0x75, 0x6e, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x20, 0x28, 0x65, 0x76, 0x65, - 0x6e, 0x74, 0x29, 0x20, 0x7b, 0x0a, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, - 0x20, 0x20, 0x63, 0x6f, 0x6e, 0x73, 0x74, 0x20, 0x73, 0x65, 0x6c, 0x65, - 0x63, 0x74, 0x65, 0x64, 0x46, 0x69, 0x6c, 0x65, 0x20, 0x3d, 0x20, 0x65, - 0x76, 0x65, 0x6e, 0x74, 0x2e, 0x74, 0x61, 0x72, 0x67, 0x65, 0x74, 0x2e, - 0x66, 0x69, 0x6c, 0x65, 0x73, 0x5b, 0x30, 0x5d, 0x3b, 0x0a, 0x20, 0x20, - 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x69, 0x66, 0x20, 0x28, 0x73, 0x65, - 0x6c, 0x65, 0x63, 0x74, 0x65, 0x64, 0x46, 0x69, 0x6c, 0x65, 0x29, 0x20, + 0x20, 0x20, 0x65, 0x2e, 0x70, 0x72, 0x65, 0x76, 0x65, 0x6e, 0x74, 0x44, + 0x65, 0x66, 0x61, 0x75, 0x6c, 0x74, 0x28, 0x29, 0x3b, 0x0a, 0x20, 0x20, + 0x20, 0x20, 0x20, 0x20, 0x64, 0x6f, 0x63, 0x75, 0x6d, 0x65, 0x6e, 0x74, + 0x2e, 0x67, 0x65, 0x74, 0x45, 0x6c, 0x65, 0x6d, 0x65, 0x6e, 0x74, 0x42, + 0x79, 0x49, 0x64, 0x28, 0x22, 0x66, 0x69, 0x6c, 0x65, 0x49, 0x6e, 0x70, + 0x75, 0x74, 0x22, 0x29, 0x2e, 0x63, 0x6c, 0x69, 0x63, 0x6b, 0x28, 0x29, + 0x3b, 0x0a, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x64, 0x6f, 0x63, 0x75, + 0x6d, 0x65, 0x6e, 0x74, 0x2e, 0x67, 0x65, 0x74, 0x45, 0x6c, 0x65, 0x6d, + 0x65, 0x6e, 0x74, 0x42, 0x79, 0x49, 0x64, 0x28, 0x22, 0x66, 0x69, 0x6c, + 0x65, 0x49, 0x6e, 0x70, 0x75, 0x74, 0x22, 0x29, 0x2e, 0x61, 0x64, 0x64, + 0x45, 0x76, 0x65, 0x6e, 0x74, 0x4c, 0x69, 0x73, 0x74, 0x65, 0x6e, 0x65, + 0x72, 0x28, 0x22, 0x63, 0x68, 0x61, 0x6e, 0x67, 0x65, 0x22, 0x2c, 0x20, + 0x66, 0x75, 0x6e, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x20, 0x28, 0x65, 0x76, + 0x65, 0x6e, 0x74, 0x29, 0x20, 0x7b, 0x0a, 0x20, 0x20, 0x20, 0x20, 0x20, + 0x20, 0x20, 0x20, 0x63, 0x6f, 0x6e, 0x73, 0x74, 0x20, 0x73, 0x65, 0x6c, + 0x65, 0x63, 0x74, 0x65, 0x64, 0x46, 0x69, 0x6c, 0x65, 0x20, 0x3d, 0x20, + 0x65, 0x76, 0x65, 0x6e, 0x74, 0x2e, 0x74, 0x61, 0x72, 0x67, 0x65, 0x74, + 0x2e, 0x66, 0x69, 0x6c, 0x65, 0x73, 0x5b, 0x30, 0x5d, 0x3b, 0x0a, 0x20, + 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x69, 0x66, 0x20, 0x28, 0x73, + 0x65, 0x6c, 0x65, 0x63, 0x74, 0x65, 0x64, 0x46, 0x69, 0x6c, 0x65, 0x29, + 0x20, 0x7b, 0x0a, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, + 0x20, 0x63, 0x6f, 0x6e, 0x73, 0x74, 0x20, 0x72, 0x65, 0x61, 0x64, 0x65, + 0x72, 0x20, 0x3d, 0x20, 0x6e, 0x65, 0x77, 0x20, 0x46, 0x69, 0x6c, 0x65, + 0x52, 0x65, 0x61, 0x64, 0x65, 0x72, 0x28, 0x29, 0x3b, 0x0a, 0x20, 0x20, + 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x72, 0x65, 0x61, 0x64, + 0x65, 0x72, 0x2e, 0x6f, 0x6e, 0x6c, 0x6f, 0x61, 0x64, 0x20, 0x3d, 0x20, + 0x66, 0x75, 0x6e, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x20, 0x28, 0x29, 0x20, 0x7b, 0x0a, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, - 0x63, 0x6f, 0x6e, 0x73, 0x74, 0x20, 0x72, 0x65, 0x61, 0x64, 0x65, 0x72, - 0x20, 0x3d, 0x20, 0x6e, 0x65, 0x77, 0x20, 0x46, 0x69, 0x6c, 0x65, 0x52, - 0x65, 0x61, 0x64, 0x65, 0x72, 0x28, 0x29, 0x3b, 0x0a, 0x20, 0x20, 0x20, - 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x72, 0x65, 0x61, 0x64, 0x65, - 0x72, 0x2e, 0x6f, 0x6e, 0x6c, 0x6f, 0x61, 0x64, 0x20, 0x3d, 0x20, 0x66, - 0x75, 0x6e, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x20, 0x28, 0x29, 0x20, 0x7b, - 0x0a, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, - 0x20, 0x63, 0x6f, 0x6e, 0x73, 0x74, 0x20, 0x69, 0x6d, 0x61, 0x67, 0x65, - 0x5f, 0x64, 0x61, 0x74, 0x61, 0x20, 0x3d, 0x20, 0x72, 0x65, 0x61, 0x64, - 0x65, 0x72, 0x2e, 0x72, 0x65, 0x73, 0x75, 0x6c, 0x74, 0x3b, 0x0a, 0x20, - 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x73, - 0x65, 0x73, 0x73, 0x69, 0x6f, 0x6e, 0x2e, 0x76, 0x61, 0x6c, 0x75, 0x65, - 0x20, 0x3d, 0x20, 0x7b, 0x20, 0x2e, 0x2e, 0x2e, 0x73, 0x65, 0x73, 0x73, - 0x69, 0x6f, 0x6e, 0x2e, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x2c, 0x20, 0x69, - 0x6d, 0x61, 0x67, 0x65, 0x5f, 0x73, 0x65, 0x6c, 0x65, 0x63, 0x74, 0x65, - 0x64, 0x3a, 0x20, 0x69, 0x6d, 0x61, 0x67, 0x65, 0x5f, 0x64, 0x61, 0x74, - 0x61, 0x20, 0x7d, 0x3b, 0x0a, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, - 0x20, 0x20, 0x20, 0x20, 0x20, 0x70, 0x61, 0x72, 0x61, 0x6d, 0x73, 0x2e, - 0x76, 0x61, 0x6c, 0x75, 0x65, 0x20, 0x3d, 0x20, 0x7b, 0x0a, 0x20, 0x20, + 0x20, 0x20, 0x63, 0x6f, 0x6e, 0x73, 0x74, 0x20, 0x69, 0x6d, 0x61, 0x67, + 0x65, 0x5f, 0x64, 0x61, 0x74, 0x61, 0x20, 0x3d, 0x20, 0x72, 0x65, 0x61, + 0x64, 0x65, 0x72, 0x2e, 0x72, 0x65, 0x73, 0x75, 0x6c, 0x74, 0x3b, 0x0a, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, - 0x2e, 0x2e, 0x2e, 0x70, 0x61, 0x72, 0x61, 0x6d, 0x73, 0x2e, 0x76, 0x61, - 0x6c, 0x75, 0x65, 0x2c, 0x20, 0x69, 0x6d, 0x61, 0x67, 0x65, 0x5f, 0x64, - 0x61, 0x74, 0x61, 0x3a, 0x20, 0x5b, 0x0a, 0x20, 0x20, 0x20, 0x20, 0x20, - 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x7b, - 0x20, 0x64, 0x61, 0x74, 0x61, 0x3a, 0x20, 0x69, 0x6d, 0x61, 0x67, 0x65, - 0x5f, 0x64, 0x61, 0x74, 0x61, 0x2e, 0x72, 0x65, 0x70, 0x6c, 0x61, 0x63, - 0x65, 0x28, 0x2f, 0x64, 0x61, 0x74, 0x61, 0x3a, 0x69, 0x6d, 0x61, 0x67, - 0x65, 0x5c, 0x2f, 0x5b, 0x5e, 0x3b, 0x5d, 0x2b, 0x3b, 0x62, 0x61, 0x73, - 0x65, 0x36, 0x34, 0x2c, 0x2f, 0x2c, 0x20, 0x27, 0x27, 0x29, 0x2c, 0x20, - 0x69, 0x64, 0x3a, 0x20, 0x31, 0x30, 0x20, 0x7d, 0x5d, 0x0a, 0x20, 0x20, - 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x7d, 0x0a, - 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x7d, 0x3b, - 0x0a, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x73, - 0x65, 0x6c, 0x65, 0x63, 0x74, 0x65, 0x64, 0x5f, 0x69, 0x6d, 0x61, 0x67, - 0x65, 0x20, 0x3d, 0x20, 0x74, 0x72, 0x75, 0x65, 0x3b, 0x0a, 0x20, 0x20, - 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x72, 0x65, 0x61, 0x64, - 0x65, 0x72, 0x2e, 0x72, 0x65, 0x61, 0x64, 0x41, 0x73, 0x44, 0x61, 0x74, - 0x61, 0x55, 0x52, 0x4c, 0x28, 0x73, 0x65, 0x6c, 0x65, 0x63, 0x74, 0x65, - 0x64, 0x46, 0x69, 0x6c, 0x65, 0x29, 0x3b, 0x0a, 0x20, 0x20, 0x20, 0x20, - 0x20, 0x20, 0x20, 0x20, 0x7d, 0x0a, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, - 0x7d, 0x29, 0x3b, 0x0a, 0x20, 0x20, 0x20, 0x20, 0x7d, 0x0a, 0x0a, 0x20, - 0x20, 0x20, 0x20, 0x66, 0x75, 0x6e, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x20, - 0x4d, 0x65, 0x73, 0x73, 0x61, 0x67, 0x65, 0x49, 0x6e, 0x70, 0x75, 0x74, - 0x28, 0x29, 0x20, 0x7b, 0x0a, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x63, - 0x6f, 0x6e, 0x73, 0x74, 0x20, 0x6d, 0x65, 0x73, 0x73, 0x61, 0x67, 0x65, - 0x20, 0x3d, 0x20, 0x75, 0x73, 0x65, 0x53, 0x69, 0x67, 0x6e, 0x61, 0x6c, - 0x28, 0x22, 0x22, 0x29, 0x0a, 0x0a, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, - 0x63, 0x6f, 0x6e, 0x73, 0x74, 0x20, 0x73, 0x75, 0x62, 0x6d, 0x69, 0x74, - 0x20, 0x3d, 0x20, 0x28, 0x65, 0x29, 0x20, 0x3d, 0x3e, 0x20, 0x7b, 0x0a, - 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x73, 0x74, 0x6f, 0x70, - 0x28, 0x65, 0x29, 0x3b, 0x0a, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, - 0x20, 0x63, 0x68, 0x61, 0x74, 0x28, 0x6d, 0x65, 0x73, 0x73, 0x61, 0x67, - 0x65, 0x2e, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x29, 0x3b, 0x0a, 0x20, 0x20, - 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x6d, 0x65, 0x73, 0x73, 0x61, 0x67, - 0x65, 0x2e, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x20, 0x3d, 0x20, 0x22, 0x22, - 0x3b, 0x0a, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x7d, 0x0a, 0x0a, 0x20, - 0x20, 0x20, 0x20, 0x20, 0x20, 0x63, 0x6f, 0x6e, 0x73, 0x74, 0x20, 0x65, - 0x6e, 0x74, 0x65, 0x72, 0x53, 0x75, 0x62, 0x6d, 0x69, 0x74, 0x73, 0x20, - 0x3d, 0x20, 0x28, 0x65, 0x76, 0x65, 0x6e, 0x74, 0x29, 0x20, 0x3d, 0x3e, - 0x20, 0x7b, 0x0a, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x69, - 0x66, 0x20, 0x28, 0x65, 0x76, 0x65, 0x6e, 0x74, 0x2e, 0x77, 0x68, 0x69, - 0x63, 0x68, 0x20, 0x3d, 0x3d, 0x3d, 0x20, 0x31, 0x33, 0x20, 0x26, 0x26, - 0x20, 0x21, 0x65, 0x76, 0x65, 0x6e, 0x74, 0x2e, 0x73, 0x68, 0x69, 0x66, - 0x74, 0x4b, 0x65, 0x79, 0x29, 0x20, 0x7b, 0x0a, 0x20, 0x20, 0x20, 0x20, - 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x73, 0x75, 0x62, 0x6d, 0x69, 0x74, - 0x28, 0x65, 0x76, 0x65, 0x6e, 0x74, 0x29, 0x3b, 0x0a, 0x20, 0x20, 0x20, - 0x20, 0x20, 0x20, 0x20, 0x20, 0x7d, 0x0a, 0x20, 0x20, 0x20, 0x20, 0x20, - 0x20, 0x7d, 0x0a, 0x0a, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x72, 0x65, - 0x74, 0x75, 0x72, 0x6e, 0x20, 0x68, 0x74, 0x6d, 0x6c, 0x60, 0x0a, 0x20, - 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x3c, 0x66, 0x6f, 0x72, 0x6d, - 0x20, 0x6f, 0x6e, 0x73, 0x75, 0x62, 0x6d, 0x69, 0x74, 0x3d, 0x24, 0x7b, - 0x73, 0x75, 0x62, 0x6d, 0x69, 0x74, 0x7d, 0x3e, 0x0a, 0x20, 0x20, 0x20, - 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x3c, 0x64, 0x69, 0x76, 0x3e, - 0x0a, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, - 0x20, 0x3c, 0x74, 0x65, 0x78, 0x74, 0x61, 0x72, 0x65, 0x61, 0x0a, 0x20, + 0x73, 0x65, 0x73, 0x73, 0x69, 0x6f, 0x6e, 0x2e, 0x76, 0x61, 0x6c, 0x75, + 0x65, 0x20, 0x3d, 0x20, 0x7b, 0x20, 0x2e, 0x2e, 0x2e, 0x73, 0x65, 0x73, + 0x73, 0x69, 0x6f, 0x6e, 0x2e, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x2c, 0x20, + 0x69, 0x6d, 0x61, 0x67, 0x65, 0x5f, 0x73, 0x65, 0x6c, 0x65, 0x63, 0x74, + 0x65, 0x64, 0x3a, 0x20, 0x69, 0x6d, 0x61, 0x67, 0x65, 0x5f, 0x64, 0x61, + 0x74, 0x61, 0x20, 0x7d, 0x3b, 0x0a, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, + 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x70, 0x61, 0x72, 0x61, 0x6d, 0x73, + 0x2e, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x20, 0x3d, 0x20, 0x7b, 0x0a, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, - 0x20, 0x20, 0x63, 0x6c, 0x61, 0x73, 0x73, 0x4e, 0x61, 0x6d, 0x65, 0x3d, - 0x24, 0x7b, 0x67, 0x65, 0x6e, 0x65, 0x72, 0x61, 0x74, 0x69, 0x6e, 0x67, - 0x2e, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x20, 0x3f, 0x20, 0x22, 0x6c, 0x6f, - 0x61, 0x64, 0x69, 0x6e, 0x67, 0x22, 0x20, 0x3a, 0x20, 0x6e, 0x75, 0x6c, - 0x6c, 0x7d, 0x0a, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, - 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x6f, 0x6e, 0x69, 0x6e, 0x70, 0x75, - 0x74, 0x3d, 0x24, 0x7b, 0x28, 0x65, 0x29, 0x20, 0x3d, 0x3e, 0x20, 0x6d, - 0x65, 0x73, 0x73, 0x61, 0x67, 0x65, 0x2e, 0x76, 0x61, 0x6c, 0x75, 0x65, - 0x20, 0x3d, 0x20, 0x65, 0x2e, 0x74, 0x61, 0x72, 0x67, 0x65, 0x74, 0x2e, - 0x76, 0x61, 0x6c, 0x75, 0x65, 0x7d, 0x0a, 0x20, 0x20, 0x20, 0x20, 0x20, - 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x6f, 0x6e, - 0x6b, 0x65, 0x79, 0x70, 0x72, 0x65, 0x73, 0x73, 0x3d, 0x24, 0x7b, 0x65, - 0x6e, 0x74, 0x65, 0x72, 0x53, 0x75, 0x62, 0x6d, 0x69, 0x74, 0x73, 0x7d, - 0x0a, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, - 0x20, 0x20, 0x20, 0x20, 0x70, 0x6c, 0x61, 0x63, 0x65, 0x68, 0x6f, 0x6c, - 0x64, 0x65, 0x72, 0x3d, 0x22, 0x53, 0x61, 0x79, 0x20, 0x73, 0x6f, 0x6d, - 0x65, 0x74, 0x68, 0x69, 0x6e, 0x67, 0x2e, 0x2e, 0x2e, 0x22, 0x0a, 0x20, + 0x20, 0x2e, 0x2e, 0x2e, 0x70, 0x61, 0x72, 0x61, 0x6d, 0x73, 0x2e, 0x76, + 0x61, 0x6c, 0x75, 0x65, 0x2c, 0x20, 0x69, 0x6d, 0x61, 0x67, 0x65, 0x5f, + 0x64, 0x61, 0x74, 0x61, 0x3a, 0x20, 0x5b, 0x0a, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, - 0x20, 0x20, 0x72, 0x6f, 0x77, 0x73, 0x3d, 0x32, 0x0a, 0x20, 0x20, 0x20, + 0x7b, 0x20, 0x64, 0x61, 0x74, 0x61, 0x3a, 0x20, 0x69, 0x6d, 0x61, 0x67, + 0x65, 0x5f, 0x64, 0x61, 0x74, 0x61, 0x2e, 0x72, 0x65, 0x70, 0x6c, 0x61, + 0x63, 0x65, 0x28, 0x2f, 0x64, 0x61, 0x74, 0x61, 0x3a, 0x69, 0x6d, 0x61, + 0x67, 0x65, 0x5c, 0x2f, 0x5b, 0x5e, 0x3b, 0x5d, 0x2b, 0x3b, 0x62, 0x61, + 0x73, 0x65, 0x36, 0x34, 0x2c, 0x2f, 0x2c, 0x20, 0x27, 0x27, 0x29, 0x2c, + 0x20, 0x69, 0x64, 0x3a, 0x20, 0x31, 0x30, 0x20, 0x7d, 0x5d, 0x0a, 0x20, + 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x7d, + 0x0a, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x7d, + 0x3b, 0x0a, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, + 0x73, 0x65, 0x6c, 0x65, 0x63, 0x74, 0x65, 0x64, 0x5f, 0x69, 0x6d, 0x61, + 0x67, 0x65, 0x20, 0x3d, 0x20, 0x74, 0x72, 0x75, 0x65, 0x3b, 0x0a, 0x20, + 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x72, 0x65, 0x61, + 0x64, 0x65, 0x72, 0x2e, 0x72, 0x65, 0x61, 0x64, 0x41, 0x73, 0x44, 0x61, + 0x74, 0x61, 0x55, 0x52, 0x4c, 0x28, 0x73, 0x65, 0x6c, 0x65, 0x63, 0x74, + 0x65, 0x64, 0x46, 0x69, 0x6c, 0x65, 0x29, 0x3b, 0x0a, 0x20, 0x20, 0x20, + 0x20, 0x20, 0x20, 0x20, 0x20, 0x7d, 0x0a, 0x20, 0x20, 0x20, 0x20, 0x20, + 0x20, 0x7d, 0x29, 0x3b, 0x0a, 0x20, 0x20, 0x20, 0x20, 0x7d, 0x0a, 0x0a, + 0x20, 0x20, 0x20, 0x20, 0x66, 0x75, 0x6e, 0x63, 0x74, 0x69, 0x6f, 0x6e, + 0x20, 0x4d, 0x65, 0x73, 0x73, 0x61, 0x67, 0x65, 0x49, 0x6e, 0x70, 0x75, + 0x74, 0x28, 0x29, 0x20, 0x7b, 0x0a, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, + 0x63, 0x6f, 0x6e, 0x73, 0x74, 0x20, 0x6d, 0x65, 0x73, 0x73, 0x61, 0x67, + 0x65, 0x20, 0x3d, 0x20, 0x75, 0x73, 0x65, 0x53, 0x69, 0x67, 0x6e, 0x61, + 0x6c, 0x28, 0x22, 0x22, 0x29, 0x0a, 0x0a, 0x20, 0x20, 0x20, 0x20, 0x20, + 0x20, 0x63, 0x6f, 0x6e, 0x73, 0x74, 0x20, 0x73, 0x75, 0x62, 0x6d, 0x69, + 0x74, 0x20, 0x3d, 0x20, 0x28, 0x65, 0x29, 0x20, 0x3d, 0x3e, 0x20, 0x7b, + 0x0a, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x73, 0x74, 0x6f, + 0x70, 0x28, 0x65, 0x29, 0x3b, 0x0a, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, + 0x20, 0x20, 0x63, 0x68, 0x61, 0x74, 0x28, 0x6d, 0x65, 0x73, 0x73, 0x61, + 0x67, 0x65, 0x2e, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x29, 0x3b, 0x0a, 0x20, + 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x6d, 0x65, 0x73, 0x73, 0x61, + 0x67, 0x65, 0x2e, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x20, 0x3d, 0x20, 0x22, + 0x22, 0x3b, 0x0a, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x7d, 0x0a, 0x0a, + 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x63, 0x6f, 0x6e, 0x73, 0x74, 0x20, + 0x65, 0x6e, 0x74, 0x65, 0x72, 0x53, 0x75, 0x62, 0x6d, 0x69, 0x74, 0x73, + 0x20, 0x3d, 0x20, 0x28, 0x65, 0x76, 0x65, 0x6e, 0x74, 0x29, 0x20, 0x3d, + 0x3e, 0x20, 0x7b, 0x0a, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, + 0x69, 0x66, 0x20, 0x28, 0x65, 0x76, 0x65, 0x6e, 0x74, 0x2e, 0x77, 0x68, + 0x69, 0x63, 0x68, 0x20, 0x3d, 0x3d, 0x3d, 0x20, 0x31, 0x33, 0x20, 0x26, + 0x26, 0x20, 0x21, 0x65, 0x76, 0x65, 0x6e, 0x74, 0x2e, 0x73, 0x68, 0x69, + 0x66, 0x74, 0x4b, 0x65, 0x79, 0x29, 0x20, 0x7b, 0x0a, 0x20, 0x20, 0x20, + 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x73, 0x75, 0x62, 0x6d, 0x69, + 0x74, 0x28, 0x65, 0x76, 0x65, 0x6e, 0x74, 0x29, 0x3b, 0x0a, 0x20, 0x20, + 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x7d, 0x0a, 0x20, 0x20, 0x20, 0x20, + 0x20, 0x20, 0x7d, 0x0a, 0x0a, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x72, + 0x65, 0x74, 0x75, 0x72, 0x6e, 0x20, 0x68, 0x74, 0x6d, 0x6c, 0x60, 0x0a, + 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x3c, 0x66, 0x6f, 0x72, + 0x6d, 0x20, 0x6f, 0x6e, 0x73, 0x75, 0x62, 0x6d, 0x69, 0x74, 0x3d, 0x24, + 0x7b, 0x73, 0x75, 0x62, 0x6d, 0x69, 0x74, 0x7d, 0x3e, 0x0a, 0x20, 0x20, + 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x3c, 0x64, 0x69, 0x76, + 0x3e, 0x0a, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, + 0x20, 0x20, 0x3c, 0x74, 0x65, 0x78, 0x74, 0x61, 0x72, 0x65, 0x61, 0x0a, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, - 0x74, 0x79, 0x70, 0x65, 0x3d, 0x22, 0x74, 0x65, 0x78, 0x74, 0x22, 0x0a, + 0x20, 0x20, 0x20, 0x63, 0x6c, 0x61, 0x73, 0x73, 0x4e, 0x61, 0x6d, 0x65, + 0x3d, 0x24, 0x7b, 0x67, 0x65, 0x6e, 0x65, 0x72, 0x61, 0x74, 0x69, 0x6e, + 0x67, 0x2e, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x20, 0x3f, 0x20, 0x22, 0x6c, + 0x6f, 0x61, 0x64, 0x69, 0x6e, 0x67, 0x22, 0x20, 0x3a, 0x20, 0x6e, 0x75, + 0x6c, 0x6c, 0x7d, 0x0a, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, + 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x6f, 0x6e, 0x69, 0x6e, 0x70, + 0x75, 0x74, 0x3d, 0x24, 0x7b, 0x28, 0x65, 0x29, 0x20, 0x3d, 0x3e, 0x20, + 0x6d, 0x65, 0x73, 0x73, 0x61, 0x67, 0x65, 0x2e, 0x76, 0x61, 0x6c, 0x75, + 0x65, 0x20, 0x3d, 0x20, 0x65, 0x2e, 0x74, 0x61, 0x72, 0x67, 0x65, 0x74, + 0x2e, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x7d, 0x0a, 0x20, 0x20, 0x20, 0x20, + 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x6f, + 0x6e, 0x6b, 0x65, 0x79, 0x70, 0x72, 0x65, 0x73, 0x73, 0x3d, 0x24, 0x7b, + 0x65, 0x6e, 0x74, 0x65, 0x72, 0x53, 0x75, 0x62, 0x6d, 0x69, 0x74, 0x73, + 0x7d, 0x0a, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, + 0x20, 0x20, 0x20, 0x20, 0x20, 0x70, 0x6c, 0x61, 0x63, 0x65, 0x68, 0x6f, + 0x6c, 0x64, 0x65, 0x72, 0x3d, 0x22, 0x53, 0x61, 0x79, 0x20, 0x73, 0x6f, + 0x6d, 0x65, 0x74, 0x68, 0x69, 0x6e, 0x67, 0x2e, 0x2e, 0x2e, 0x22, 0x0a, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, - 0x20, 0x20, 0x20, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x3d, 0x22, 0x24, 0x7b, - 0x6d, 0x65, 0x73, 0x73, 0x61, 0x67, 0x65, 0x7d, 0x22, 0x0a, 0x20, 0x20, - 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x2f, 0x3e, - 0x0a, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x3c, - 0x2f, 0x64, 0x69, 0x76, 0x3e, 0x0a, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, - 0x20, 0x20, 0x20, 0x20, 0x3c, 0x64, 0x69, 0x76, 0x20, 0x63, 0x6c, 0x61, - 0x73, 0x73, 0x3d, 0x22, 0x72, 0x69, 0x67, 0x68, 0x74, 0x22, 0x3e, 0x0a, + 0x20, 0x20, 0x20, 0x72, 0x6f, 0x77, 0x73, 0x3d, 0x32, 0x0a, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, - 0x3c, 0x62, 0x75, 0x74, 0x74, 0x6f, 0x6e, 0x20, 0x74, 0x79, 0x70, 0x65, - 0x3d, 0x22, 0x73, 0x75, 0x62, 0x6d, 0x69, 0x74, 0x22, 0x20, 0x64, 0x69, - 0x73, 0x61, 0x62, 0x6c, 0x65, 0x64, 0x3d, 0x24, 0x7b, 0x67, 0x65, 0x6e, - 0x65, 0x72, 0x61, 0x74, 0x69, 0x6e, 0x67, 0x2e, 0x76, 0x61, 0x6c, 0x75, - 0x65, 0x7d, 0x3e, 0x53, 0x65, 0x6e, 0x64, 0x3c, 0x2f, 0x62, 0x75, 0x74, - 0x74, 0x6f, 0x6e, 0x3e, 0x0a, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, - 0x20, 0x20, 0x20, 0x20, 0x20, 0x3c, 0x62, 0x75, 0x74, 0x74, 0x6f, 0x6e, - 0x20, 0x6f, 0x6e, 0x63, 0x6c, 0x69, 0x63, 0x6b, 0x3d, 0x24, 0x7b, 0x75, - 0x70, 0x6c, 0x6f, 0x61, 0x64, 0x49, 0x6d, 0x61, 0x67, 0x65, 0x7d, 0x3e, - 0x55, 0x70, 0x6c, 0x6f, 0x61, 0x64, 0x20, 0x49, 0x6d, 0x61, 0x67, 0x65, - 0x3c, 0x2f, 0x62, 0x75, 0x74, 0x74, 0x6f, 0x6e, 0x3e, 0x0a, 0x20, 0x20, - 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x3c, 0x62, - 0x75, 0x74, 0x74, 0x6f, 0x6e, 0x20, 0x6f, 0x6e, 0x63, 0x6c, 0x69, 0x63, - 0x6b, 0x3d, 0x24, 0x7b, 0x73, 0x74, 0x6f, 0x70, 0x7d, 0x20, 0x64, 0x69, - 0x73, 0x61, 0x62, 0x6c, 0x65, 0x64, 0x3d, 0x24, 0x7b, 0x21, 0x67, 0x65, + 0x20, 0x74, 0x79, 0x70, 0x65, 0x3d, 0x22, 0x74, 0x65, 0x78, 0x74, 0x22, + 0x0a, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, + 0x20, 0x20, 0x20, 0x20, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x3d, 0x22, 0x24, + 0x7b, 0x6d, 0x65, 0x73, 0x73, 0x61, 0x67, 0x65, 0x7d, 0x22, 0x0a, 0x20, + 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x2f, + 0x3e, 0x0a, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, + 0x3c, 0x2f, 0x64, 0x69, 0x76, 0x3e, 0x0a, 0x20, 0x20, 0x20, 0x20, 0x20, + 0x20, 0x20, 0x20, 0x20, 0x20, 0x3c, 0x64, 0x69, 0x76, 0x20, 0x63, 0x6c, + 0x61, 0x73, 0x73, 0x3d, 0x22, 0x72, 0x69, 0x67, 0x68, 0x74, 0x22, 0x3e, + 0x0a, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, + 0x20, 0x3c, 0x62, 0x75, 0x74, 0x74, 0x6f, 0x6e, 0x20, 0x74, 0x79, 0x70, + 0x65, 0x3d, 0x22, 0x73, 0x75, 0x62, 0x6d, 0x69, 0x74, 0x22, 0x20, 0x64, + 0x69, 0x73, 0x61, 0x62, 0x6c, 0x65, 0x64, 0x3d, 0x24, 0x7b, 0x67, 0x65, 0x6e, 0x65, 0x72, 0x61, 0x74, 0x69, 0x6e, 0x67, 0x2e, 0x76, 0x61, 0x6c, - 0x75, 0x65, 0x7d, 0x3e, 0x53, 0x74, 0x6f, 0x70, 0x3c, 0x2f, 0x62, 0x75, + 0x75, 0x65, 0x7d, 0x3e, 0x53, 0x65, 0x6e, 0x64, 0x3c, 0x2f, 0x62, 0x75, 0x74, 0x74, 0x6f, 0x6e, 0x3e, 0x0a, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x3c, 0x62, 0x75, 0x74, 0x74, 0x6f, 0x6e, 0x20, 0x6f, 0x6e, 0x63, 0x6c, 0x69, 0x63, 0x6b, 0x3d, 0x24, 0x7b, - 0x72, 0x65, 0x73, 0x65, 0x74, 0x7d, 0x3e, 0x52, 0x65, 0x73, 0x65, 0x74, - 0x3c, 0x2f, 0x62, 0x75, 0x74, 0x74, 0x6f, 0x6e, 0x3e, 0x0a, 0x20, 0x20, - 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x3c, 0x2f, 0x64, 0x69, - 0x76, 0x3e, 0x0a, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x3c, - 0x2f, 0x66, 0x6f, 0x72, 0x6d, 0x3e, 0x0a, 0x20, 0x20, 0x20, 0x20, 0x20, - 0x20, 0x60, 0x0a, 0x20, 0x20, 0x20, 0x20, 0x7d, 0x0a, 0x0a, 0x20, 0x20, - 0x20, 0x20, 0x66, 0x75, 0x6e, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x20, 0x43, - 0x6f, 0x6d, 0x70, 0x6c, 0x65, 0x74, 0x69, 0x6f, 0x6e, 0x43, 0x6f, 0x6e, - 0x74, 0x72, 0x6f, 0x6c, 0x73, 0x28, 0x29, 0x20, 0x7b, 0x0a, 0x20, 0x20, - 0x20, 0x20, 0x20, 0x20, 0x63, 0x6f, 0x6e, 0x73, 0x74, 0x20, 0x73, 0x75, - 0x62, 0x6d, 0x69, 0x74, 0x20, 0x3d, 0x20, 0x28, 0x65, 0x29, 0x20, 0x3d, - 0x3e, 0x20, 0x7b, 0x0a, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, - 0x73, 0x74, 0x6f, 0x70, 0x28, 0x65, 0x29, 0x3b, 0x0a, 0x20, 0x20, 0x20, - 0x20, 0x20, 0x20, 0x20, 0x20, 0x72, 0x75, 0x6e, 0x43, 0x6f, 0x6d, 0x70, - 0x6c, 0x65, 0x74, 0x69, 0x6f, 0x6e, 0x28, 0x29, 0x3b, 0x0a, 0x20, 0x20, - 0x20, 0x20, 0x20, 0x20, 0x7d, 0x0a, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, - 0x72, 0x65, 0x74, 0x75, 0x72, 0x6e, 0x20, 0x68, 0x74, 0x6d, 0x6c, 0x60, - 0x0a, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x3c, 0x64, 0x69, - 0x76, 0x3e, 0x0a, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, - 0x20, 0x3c, 0x62, 0x75, 0x74, 0x74, 0x6f, 0x6e, 0x20, 0x6f, 0x6e, 0x63, - 0x6c, 0x69, 0x63, 0x6b, 0x3d, 0x24, 0x7b, 0x73, 0x75, 0x62, 0x6d, 0x69, - 0x74, 0x7d, 0x20, 0x74, 0x79, 0x70, 0x65, 0x3d, 0x22, 0x62, 0x75, 0x74, - 0x74, 0x6f, 0x6e, 0x22, 0x20, 0x64, 0x69, 0x73, 0x61, 0x62, 0x6c, 0x65, - 0x64, 0x3d, 0x24, 0x7b, 0x67, 0x65, 0x6e, 0x65, 0x72, 0x61, 0x74, 0x69, - 0x6e, 0x67, 0x2e, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x7d, 0x3e, 0x53, 0x74, - 0x61, 0x72, 0x74, 0x3c, 0x2f, 0x62, 0x75, 0x74, 0x74, 0x6f, 0x6e, 0x3e, - 0x0a, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x3c, + 0x75, 0x70, 0x6c, 0x6f, 0x61, 0x64, 0x49, 0x6d, 0x61, 0x67, 0x65, 0x7d, + 0x3e, 0x55, 0x70, 0x6c, 0x6f, 0x61, 0x64, 0x20, 0x49, 0x6d, 0x61, 0x67, + 0x65, 0x3c, 0x2f, 0x62, 0x75, 0x74, 0x74, 0x6f, 0x6e, 0x3e, 0x0a, 0x20, + 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x3c, 0x62, 0x75, 0x74, 0x74, 0x6f, 0x6e, 0x20, 0x6f, 0x6e, 0x63, 0x6c, 0x69, 0x63, 0x6b, 0x3d, 0x24, 0x7b, 0x73, 0x74, 0x6f, 0x70, 0x7d, 0x20, 0x64, 0x69, 0x73, 0x61, 0x62, 0x6c, 0x65, 0x64, 0x3d, 0x24, 0x7b, 0x21, 0x67, 0x65, 0x6e, 0x65, 0x72, 0x61, 0x74, 0x69, 0x6e, 0x67, 0x2e, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x7d, 0x3e, 0x53, 0x74, 0x6f, 0x70, 0x3c, 0x2f, 0x62, 0x75, 0x74, 0x74, 0x6f, 0x6e, 0x3e, 0x0a, 0x20, 0x20, 0x20, 0x20, 0x20, - 0x20, 0x20, 0x20, 0x20, 0x20, 0x3c, 0x62, 0x75, 0x74, 0x74, 0x6f, 0x6e, - 0x20, 0x6f, 0x6e, 0x63, 0x6c, 0x69, 0x63, 0x6b, 0x3d, 0x24, 0x7b, 0x72, - 0x65, 0x73, 0x65, 0x74, 0x7d, 0x3e, 0x52, 0x65, 0x73, 0x65, 0x74, 0x3c, - 0x2f, 0x62, 0x75, 0x74, 0x74, 0x6f, 0x6e, 0x3e, 0x0a, 0x20, 0x20, 0x20, - 0x20, 0x20, 0x20, 0x20, 0x20, 0x3c, 0x2f, 0x64, 0x69, 0x76, 0x3e, 0x60, - 0x3b, 0x0a, 0x20, 0x20, 0x20, 0x20, 0x7d, 0x0a, 0x0a, 0x20, 0x20, 0x20, - 0x20, 0x63, 0x6f, 0x6e, 0x73, 0x74, 0x20, 0x43, 0x68, 0x61, 0x74, 0x4c, - 0x6f, 0x67, 0x20, 0x3d, 0x20, 0x28, 0x70, 0x72, 0x6f, 0x70, 0x73, 0x29, - 0x20, 0x3d, 0x3e, 0x20, 0x7b, 0x0a, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, - 0x63, 0x6f, 0x6e, 0x73, 0x74, 0x20, 0x6d, 0x65, 0x73, 0x73, 0x61, 0x67, - 0x65, 0x73, 0x20, 0x3d, 0x20, 0x73, 0x65, 0x73, 0x73, 0x69, 0x6f, 0x6e, - 0x2e, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x2e, 0x74, 0x72, 0x61, 0x6e, 0x73, - 0x63, 0x72, 0x69, 0x70, 0x74, 0x3b, 0x0a, 0x20, 0x20, 0x20, 0x20, 0x20, - 0x20, 0x63, 0x6f, 0x6e, 0x73, 0x74, 0x20, 0x63, 0x6f, 0x6e, 0x74, 0x61, - 0x69, 0x6e, 0x65, 0x72, 0x20, 0x3d, 0x20, 0x75, 0x73, 0x65, 0x52, 0x65, - 0x66, 0x28, 0x6e, 0x75, 0x6c, 0x6c, 0x29, 0x0a, 0x0a, 0x20, 0x20, 0x20, - 0x20, 0x20, 0x20, 0x75, 0x73, 0x65, 0x45, 0x66, 0x66, 0x65, 0x63, 0x74, - 0x28, 0x28, 0x29, 0x20, 0x3d, 0x3e, 0x20, 0x7b, 0x0a, 0x20, 0x20, 0x20, - 0x20, 0x20, 0x20, 0x20, 0x20, 0x2f, 0x2f, 0x20, 0x73, 0x63, 0x72, 0x6f, - 0x6c, 0x6c, 0x20, 0x74, 0x6f, 0x20, 0x62, 0x6f, 0x74, 0x74, 0x6f, 0x6d, - 0x20, 0x28, 0x69, 0x66, 0x20, 0x6e, 0x65, 0x65, 0x64, 0x65, 0x64, 0x29, - 0x0a, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x63, 0x6f, 0x6e, - 0x73, 0x74, 0x20, 0x70, 0x61, 0x72, 0x65, 0x6e, 0x74, 0x20, 0x3d, 0x20, - 0x63, 0x6f, 0x6e, 0x74, 0x61, 0x69, 0x6e, 0x65, 0x72, 0x2e, 0x63, 0x75, - 0x72, 0x72, 0x65, 0x6e, 0x74, 0x2e, 0x70, 0x61, 0x72, 0x65, 0x6e, 0x74, - 0x45, 0x6c, 0x65, 0x6d, 0x65, 0x6e, 0x74, 0x3b, 0x0a, 0x20, 0x20, 0x20, - 0x20, 0x20, 0x20, 0x20, 0x20, 0x69, 0x66, 0x20, 0x28, 0x70, 0x61, 0x72, - 0x65, 0x6e, 0x74, 0x20, 0x26, 0x26, 0x20, 0x70, 0x61, 0x72, 0x65, 0x6e, - 0x74, 0x2e, 0x73, 0x63, 0x72, 0x6f, 0x6c, 0x6c, 0x48, 0x65, 0x69, 0x67, - 0x68, 0x74, 0x20, 0x3c, 0x3d, 0x20, 0x70, 0x61, 0x72, 0x65, 0x6e, 0x74, - 0x2e, 0x73, 0x63, 0x72, 0x6f, 0x6c, 0x6c, 0x54, 0x6f, 0x70, 0x20, 0x2b, - 0x20, 0x70, 0x61, 0x72, 0x65, 0x6e, 0x74, 0x2e, 0x6f, 0x66, 0x66, 0x73, - 0x65, 0x74, 0x48, 0x65, 0x69, 0x67, 0x68, 0x74, 0x20, 0x2b, 0x20, 0x33, - 0x30, 0x30, 0x29, 0x20, 0x7b, 0x0a, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, - 0x20, 0x20, 0x20, 0x20, 0x70, 0x61, 0x72, 0x65, 0x6e, 0x74, 0x2e, 0x73, - 0x63, 0x72, 0x6f, 0x6c, 0x6c, 0x54, 0x6f, 0x28, 0x30, 0x2c, 0x20, 0x70, - 0x61, 0x72, 0x65, 0x6e, 0x74, 0x2e, 0x73, 0x63, 0x72, 0x6f, 0x6c, 0x6c, - 0x48, 0x65, 0x69, 0x67, 0x68, 0x74, 0x29, 0x0a, 0x20, 0x20, 0x20, 0x20, - 0x20, 0x20, 0x20, 0x20, 0x7d, 0x0a, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, - 0x7d, 0x2c, 0x20, 0x5b, 0x6d, 0x65, 0x73, 0x73, 0x61, 0x67, 0x65, 0x73, - 0x5d, 0x29, 0x0a, 0x0a, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x63, 0x6f, - 0x6e, 0x73, 0x74, 0x20, 0x63, 0x68, 0x61, 0x74, 0x4c, 0x69, 0x6e, 0x65, - 0x20, 0x3d, 0x20, 0x28, 0x5b, 0x75, 0x73, 0x65, 0x72, 0x2c, 0x20, 0x64, - 0x61, 0x74, 0x61, 0x5d, 0x2c, 0x20, 0x69, 0x6e, 0x64, 0x65, 0x78, 0x29, - 0x20, 0x3d, 0x3e, 0x20, 0x7b, 0x0a, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, - 0x20, 0x20, 0x6c, 0x65, 0x74, 0x20, 0x6d, 0x65, 0x73, 0x73, 0x61, 0x67, - 0x65, 0x0a, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x63, 0x6f, - 0x6e, 0x73, 0x74, 0x20, 0x69, 0x73, 0x41, 0x72, 0x72, 0x61, 0x79, 0x4d, - 0x65, 0x73, 0x73, 0x61, 0x67, 0x65, 0x20, 0x3d, 0x20, 0x41, 0x72, 0x72, - 0x61, 0x79, 0x2e, 0x69, 0x73, 0x41, 0x72, 0x72, 0x61, 0x79, 0x28, 0x64, - 0x61, 0x74, 0x61, 0x29, 0x0a, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, - 0x20, 0x69, 0x66, 0x20, 0x28, 0x70, 0x61, 0x72, 0x61, 0x6d, 0x73, 0x2e, - 0x76, 0x61, 0x6c, 0x75, 0x65, 0x2e, 0x6e, 0x5f, 0x70, 0x72, 0x6f, 0x62, - 0x73, 0x20, 0x3e, 0x20, 0x30, 0x20, 0x26, 0x26, 0x20, 0x69, 0x73, 0x41, - 0x72, 0x72, 0x61, 0x79, 0x4d, 0x65, 0x73, 0x73, 0x61, 0x67, 0x65, 0x29, - 0x20, 0x7b, 0x0a, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, - 0x20, 0x6d, 0x65, 0x73, 0x73, 0x61, 0x67, 0x65, 0x20, 0x3d, 0x20, 0x68, - 0x74, 0x6d, 0x6c, 0x60, 0x3c, 0x24, 0x7b, 0x50, 0x72, 0x6f, 0x62, 0x61, - 0x62, 0x69, 0x6c, 0x69, 0x74, 0x69, 0x65, 0x73, 0x7d, 0x20, 0x64, 0x61, - 0x74, 0x61, 0x3d, 0x24, 0x7b, 0x64, 0x61, 0x74, 0x61, 0x7d, 0x20, 0x2f, - 0x3e, 0x60, 0x0a, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x7d, - 0x20, 0x65, 0x6c, 0x73, 0x65, 0x20, 0x7b, 0x0a, 0x20, 0x20, 0x20, 0x20, + 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x3c, 0x62, 0x75, 0x74, 0x74, + 0x6f, 0x6e, 0x20, 0x6f, 0x6e, 0x63, 0x6c, 0x69, 0x63, 0x6b, 0x3d, 0x24, + 0x7b, 0x72, 0x65, 0x73, 0x65, 0x74, 0x7d, 0x3e, 0x52, 0x65, 0x73, 0x65, + 0x74, 0x3c, 0x2f, 0x62, 0x75, 0x74, 0x74, 0x6f, 0x6e, 0x3e, 0x0a, 0x20, + 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x3c, 0x2f, 0x64, + 0x69, 0x76, 0x3e, 0x0a, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, + 0x3c, 0x2f, 0x66, 0x6f, 0x72, 0x6d, 0x3e, 0x0a, 0x20, 0x20, 0x20, 0x20, + 0x20, 0x20, 0x60, 0x0a, 0x20, 0x20, 0x20, 0x20, 0x7d, 0x0a, 0x0a, 0x20, + 0x20, 0x20, 0x20, 0x66, 0x75, 0x6e, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x20, + 0x43, 0x6f, 0x6d, 0x70, 0x6c, 0x65, 0x74, 0x69, 0x6f, 0x6e, 0x43, 0x6f, + 0x6e, 0x74, 0x72, 0x6f, 0x6c, 0x73, 0x28, 0x29, 0x20, 0x7b, 0x0a, 0x20, + 0x20, 0x20, 0x20, 0x20, 0x20, 0x63, 0x6f, 0x6e, 0x73, 0x74, 0x20, 0x73, + 0x75, 0x62, 0x6d, 0x69, 0x74, 0x20, 0x3d, 0x20, 0x28, 0x65, 0x29, 0x20, + 0x3d, 0x3e, 0x20, 0x7b, 0x0a, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, + 0x20, 0x73, 0x74, 0x6f, 0x70, 0x28, 0x65, 0x29, 0x3b, 0x0a, 0x20, 0x20, + 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x72, 0x75, 0x6e, 0x43, 0x6f, 0x6d, + 0x70, 0x6c, 0x65, 0x74, 0x69, 0x6f, 0x6e, 0x28, 0x29, 0x3b, 0x0a, 0x20, + 0x20, 0x20, 0x20, 0x20, 0x20, 0x7d, 0x0a, 0x20, 0x20, 0x20, 0x20, 0x20, + 0x20, 0x72, 0x65, 0x74, 0x75, 0x72, 0x6e, 0x20, 0x68, 0x74, 0x6d, 0x6c, + 0x60, 0x0a, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x3c, 0x64, + 0x69, 0x76, 0x3e, 0x0a, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, + 0x20, 0x20, 0x3c, 0x62, 0x75, 0x74, 0x74, 0x6f, 0x6e, 0x20, 0x6f, 0x6e, + 0x63, 0x6c, 0x69, 0x63, 0x6b, 0x3d, 0x24, 0x7b, 0x73, 0x75, 0x62, 0x6d, + 0x69, 0x74, 0x7d, 0x20, 0x74, 0x79, 0x70, 0x65, 0x3d, 0x22, 0x62, 0x75, + 0x74, 0x74, 0x6f, 0x6e, 0x22, 0x20, 0x64, 0x69, 0x73, 0x61, 0x62, 0x6c, + 0x65, 0x64, 0x3d, 0x24, 0x7b, 0x67, 0x65, 0x6e, 0x65, 0x72, 0x61, 0x74, + 0x69, 0x6e, 0x67, 0x2e, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x7d, 0x3e, 0x53, + 0x74, 0x61, 0x72, 0x74, 0x3c, 0x2f, 0x62, 0x75, 0x74, 0x74, 0x6f, 0x6e, + 0x3e, 0x0a, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, + 0x3c, 0x62, 0x75, 0x74, 0x74, 0x6f, 0x6e, 0x20, 0x6f, 0x6e, 0x63, 0x6c, + 0x69, 0x63, 0x6b, 0x3d, 0x24, 0x7b, 0x73, 0x74, 0x6f, 0x70, 0x7d, 0x20, + 0x64, 0x69, 0x73, 0x61, 0x62, 0x6c, 0x65, 0x64, 0x3d, 0x24, 0x7b, 0x21, + 0x67, 0x65, 0x6e, 0x65, 0x72, 0x61, 0x74, 0x69, 0x6e, 0x67, 0x2e, 0x76, + 0x61, 0x6c, 0x75, 0x65, 0x7d, 0x3e, 0x53, 0x74, 0x6f, 0x70, 0x3c, 0x2f, + 0x62, 0x75, 0x74, 0x74, 0x6f, 0x6e, 0x3e, 0x0a, 0x20, 0x20, 0x20, 0x20, + 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x3c, 0x62, 0x75, 0x74, 0x74, 0x6f, + 0x6e, 0x20, 0x6f, 0x6e, 0x63, 0x6c, 0x69, 0x63, 0x6b, 0x3d, 0x24, 0x7b, + 0x72, 0x65, 0x73, 0x65, 0x74, 0x7d, 0x3e, 0x52, 0x65, 0x73, 0x65, 0x74, + 0x3c, 0x2f, 0x62, 0x75, 0x74, 0x74, 0x6f, 0x6e, 0x3e, 0x0a, 0x20, 0x20, + 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x3c, 0x2f, 0x64, 0x69, 0x76, 0x3e, + 0x60, 0x3b, 0x0a, 0x20, 0x20, 0x20, 0x20, 0x7d, 0x0a, 0x0a, 0x20, 0x20, + 0x20, 0x20, 0x63, 0x6f, 0x6e, 0x73, 0x74, 0x20, 0x43, 0x68, 0x61, 0x74, + 0x4c, 0x6f, 0x67, 0x20, 0x3d, 0x20, 0x28, 0x70, 0x72, 0x6f, 0x70, 0x73, + 0x29, 0x20, 0x3d, 0x3e, 0x20, 0x7b, 0x0a, 0x20, 0x20, 0x20, 0x20, 0x20, + 0x20, 0x63, 0x6f, 0x6e, 0x73, 0x74, 0x20, 0x6d, 0x65, 0x73, 0x73, 0x61, + 0x67, 0x65, 0x73, 0x20, 0x3d, 0x20, 0x73, 0x65, 0x73, 0x73, 0x69, 0x6f, + 0x6e, 0x2e, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x2e, 0x74, 0x72, 0x61, 0x6e, + 0x73, 0x63, 0x72, 0x69, 0x70, 0x74, 0x3b, 0x0a, 0x20, 0x20, 0x20, 0x20, + 0x20, 0x20, 0x63, 0x6f, 0x6e, 0x73, 0x74, 0x20, 0x63, 0x6f, 0x6e, 0x74, + 0x61, 0x69, 0x6e, 0x65, 0x72, 0x20, 0x3d, 0x20, 0x75, 0x73, 0x65, 0x52, + 0x65, 0x66, 0x28, 0x6e, 0x75, 0x6c, 0x6c, 0x29, 0x0a, 0x0a, 0x20, 0x20, + 0x20, 0x20, 0x20, 0x20, 0x75, 0x73, 0x65, 0x45, 0x66, 0x66, 0x65, 0x63, + 0x74, 0x28, 0x28, 0x29, 0x20, 0x3d, 0x3e, 0x20, 0x7b, 0x0a, 0x20, 0x20, + 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x2f, 0x2f, 0x20, 0x73, 0x63, 0x72, + 0x6f, 0x6c, 0x6c, 0x20, 0x74, 0x6f, 0x20, 0x62, 0x6f, 0x74, 0x74, 0x6f, + 0x6d, 0x20, 0x28, 0x69, 0x66, 0x20, 0x6e, 0x65, 0x65, 0x64, 0x65, 0x64, + 0x29, 0x0a, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x63, 0x6f, + 0x6e, 0x73, 0x74, 0x20, 0x70, 0x61, 0x72, 0x65, 0x6e, 0x74, 0x20, 0x3d, + 0x20, 0x63, 0x6f, 0x6e, 0x74, 0x61, 0x69, 0x6e, 0x65, 0x72, 0x2e, 0x63, + 0x75, 0x72, 0x72, 0x65, 0x6e, 0x74, 0x2e, 0x70, 0x61, 0x72, 0x65, 0x6e, + 0x74, 0x45, 0x6c, 0x65, 0x6d, 0x65, 0x6e, 0x74, 0x3b, 0x0a, 0x20, 0x20, + 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x69, 0x66, 0x20, 0x28, 0x70, 0x61, + 0x72, 0x65, 0x6e, 0x74, 0x20, 0x26, 0x26, 0x20, 0x70, 0x61, 0x72, 0x65, + 0x6e, 0x74, 0x2e, 0x73, 0x63, 0x72, 0x6f, 0x6c, 0x6c, 0x48, 0x65, 0x69, + 0x67, 0x68, 0x74, 0x20, 0x3c, 0x3d, 0x20, 0x70, 0x61, 0x72, 0x65, 0x6e, + 0x74, 0x2e, 0x73, 0x63, 0x72, 0x6f, 0x6c, 0x6c, 0x54, 0x6f, 0x70, 0x20, + 0x2b, 0x20, 0x70, 0x61, 0x72, 0x65, 0x6e, 0x74, 0x2e, 0x6f, 0x66, 0x66, + 0x73, 0x65, 0x74, 0x48, 0x65, 0x69, 0x67, 0x68, 0x74, 0x20, 0x2b, 0x20, + 0x33, 0x30, 0x30, 0x29, 0x20, 0x7b, 0x0a, 0x20, 0x20, 0x20, 0x20, 0x20, + 0x20, 0x20, 0x20, 0x20, 0x20, 0x70, 0x61, 0x72, 0x65, 0x6e, 0x74, 0x2e, + 0x73, 0x63, 0x72, 0x6f, 0x6c, 0x6c, 0x54, 0x6f, 0x28, 0x30, 0x2c, 0x20, + 0x70, 0x61, 0x72, 0x65, 0x6e, 0x74, 0x2e, 0x73, 0x63, 0x72, 0x6f, 0x6c, + 0x6c, 0x48, 0x65, 0x69, 0x67, 0x68, 0x74, 0x29, 0x0a, 0x20, 0x20, 0x20, + 0x20, 0x20, 0x20, 0x20, 0x20, 0x7d, 0x0a, 0x20, 0x20, 0x20, 0x20, 0x20, + 0x20, 0x7d, 0x2c, 0x20, 0x5b, 0x6d, 0x65, 0x73, 0x73, 0x61, 0x67, 0x65, + 0x73, 0x5d, 0x29, 0x0a, 0x0a, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x63, + 0x6f, 0x6e, 0x73, 0x74, 0x20, 0x69, 0x73, 0x43, 0x6f, 0x6d, 0x70, 0x6c, + 0x65, 0x74, 0x69, 0x6f, 0x6e, 0x4d, 0x6f, 0x64, 0x65, 0x20, 0x3d, 0x20, + 0x73, 0x65, 0x73, 0x73, 0x69, 0x6f, 0x6e, 0x2e, 0x76, 0x61, 0x6c, 0x75, + 0x65, 0x2e, 0x74, 0x79, 0x70, 0x65, 0x20, 0x3d, 0x3d, 0x3d, 0x20, 0x27, + 0x63, 0x6f, 0x6d, 0x70, 0x6c, 0x65, 0x74, 0x69, 0x6f, 0x6e, 0x27, 0x0a, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x63, 0x6f, 0x6e, 0x73, 0x74, 0x20, - 0x74, 0x65, 0x78, 0x74, 0x20, 0x3d, 0x20, 0x69, 0x73, 0x41, 0x72, 0x72, - 0x61, 0x79, 0x4d, 0x65, 0x73, 0x73, 0x61, 0x67, 0x65, 0x20, 0x3f, 0x0a, - 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, - 0x64, 0x61, 0x74, 0x61, 0x2e, 0x6d, 0x61, 0x70, 0x28, 0x6d, 0x73, 0x67, - 0x20, 0x3d, 0x3e, 0x20, 0x6d, 0x73, 0x67, 0x2e, 0x63, 0x6f, 0x6e, 0x74, - 0x65, 0x6e, 0x74, 0x29, 0x2e, 0x6a, 0x6f, 0x69, 0x6e, 0x28, 0x27, 0x27, - 0x29, 0x2e, 0x72, 0x65, 0x70, 0x6c, 0x61, 0x63, 0x65, 0x28, 0x2f, 0x5e, - 0x5c, 0x73, 0x2b, 0x2f, 0x2c, 0x20, 0x27, 0x27, 0x29, 0x20, 0x3a, 0x0a, + 0x63, 0x68, 0x61, 0x74, 0x4c, 0x69, 0x6e, 0x65, 0x20, 0x3d, 0x20, 0x28, + 0x5b, 0x75, 0x73, 0x65, 0x72, 0x2c, 0x20, 0x64, 0x61, 0x74, 0x61, 0x5d, + 0x2c, 0x20, 0x69, 0x6e, 0x64, 0x65, 0x78, 0x29, 0x20, 0x3d, 0x3e, 0x20, + 0x7b, 0x0a, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x6c, 0x65, + 0x74, 0x20, 0x6d, 0x65, 0x73, 0x73, 0x61, 0x67, 0x65, 0x0a, 0x20, 0x20, + 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x63, 0x6f, 0x6e, 0x73, 0x74, 0x20, + 0x69, 0x73, 0x41, 0x72, 0x72, 0x61, 0x79, 0x4d, 0x65, 0x73, 0x73, 0x61, + 0x67, 0x65, 0x20, 0x3d, 0x20, 0x41, 0x72, 0x72, 0x61, 0x79, 0x2e, 0x69, + 0x73, 0x41, 0x72, 0x72, 0x61, 0x79, 0x28, 0x64, 0x61, 0x74, 0x61, 0x29, + 0x0a, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x69, 0x66, 0x20, + 0x28, 0x70, 0x61, 0x72, 0x61, 0x6d, 0x73, 0x2e, 0x76, 0x61, 0x6c, 0x75, + 0x65, 0x2e, 0x6e, 0x5f, 0x70, 0x72, 0x6f, 0x62, 0x73, 0x20, 0x3e, 0x20, + 0x30, 0x20, 0x26, 0x26, 0x20, 0x69, 0x73, 0x41, 0x72, 0x72, 0x61, 0x79, + 0x4d, 0x65, 0x73, 0x73, 0x61, 0x67, 0x65, 0x29, 0x20, 0x7b, 0x0a, 0x20, + 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x6d, 0x65, 0x73, + 0x73, 0x61, 0x67, 0x65, 0x20, 0x3d, 0x20, 0x68, 0x74, 0x6d, 0x6c, 0x60, + 0x3c, 0x24, 0x7b, 0x50, 0x72, 0x6f, 0x62, 0x61, 0x62, 0x69, 0x6c, 0x69, + 0x74, 0x69, 0x65, 0x73, 0x7d, 0x20, 0x64, 0x61, 0x74, 0x61, 0x3d, 0x24, + 0x7b, 0x64, 0x61, 0x74, 0x61, 0x7d, 0x20, 0x2f, 0x3e, 0x60, 0x0a, 0x20, + 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x7d, 0x20, 0x65, 0x6c, 0x73, + 0x65, 0x20, 0x7b, 0x0a, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, + 0x20, 0x20, 0x63, 0x6f, 0x6e, 0x73, 0x74, 0x20, 0x74, 0x65, 0x78, 0x74, + 0x20, 0x3d, 0x20, 0x69, 0x73, 0x41, 0x72, 0x72, 0x61, 0x79, 0x4d, 0x65, + 0x73, 0x73, 0x61, 0x67, 0x65, 0x20, 0x3f, 0x0a, 0x20, 0x20, 0x20, 0x20, + 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x64, 0x61, 0x74, 0x61, + 0x2e, 0x6d, 0x61, 0x70, 0x28, 0x6d, 0x73, 0x67, 0x20, 0x3d, 0x3e, 0x20, + 0x6d, 0x73, 0x67, 0x2e, 0x63, 0x6f, 0x6e, 0x74, 0x65, 0x6e, 0x74, 0x29, + 0x2e, 0x6a, 0x6f, 0x69, 0x6e, 0x28, 0x27, 0x27, 0x29, 0x2e, 0x72, 0x65, + 0x70, 0x6c, 0x61, 0x63, 0x65, 0x28, 0x2f, 0x5e, 0x5c, 0x73, 0x2b, 0x2f, + 0x2c, 0x20, 0x27, 0x27, 0x29, 0x20, 0x3a, 0x0a, 0x20, 0x20, 0x20, 0x20, + 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x64, 0x61, 0x74, 0x61, + 0x3b, 0x0a, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, + 0x6d, 0x65, 0x73, 0x73, 0x61, 0x67, 0x65, 0x20, 0x3d, 0x20, 0x69, 0x73, + 0x43, 0x6f, 0x6d, 0x70, 0x6c, 0x65, 0x74, 0x69, 0x6f, 0x6e, 0x4d, 0x6f, + 0x64, 0x65, 0x20, 0x3f, 0x0a, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, + 0x20, 0x20, 0x20, 0x20, 0x20, 0x74, 0x65, 0x78, 0x74, 0x20, 0x3a, 0x0a, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, - 0x64, 0x61, 0x74, 0x61, 0x3b, 0x0a, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, - 0x20, 0x20, 0x20, 0x20, 0x6d, 0x65, 0x73, 0x73, 0x61, 0x67, 0x65, 0x20, - 0x3d, 0x20, 0x68, 0x74, 0x6d, 0x6c, 0x60, 0x3c, 0x24, 0x7b, 0x4d, 0x61, - 0x72, 0x6b, 0x64, 0x6f, 0x77, 0x6e, 0x69, 0x73, 0x68, 0x7d, 0x20, 0x74, - 0x65, 0x78, 0x74, 0x3d, 0x24, 0x7b, 0x74, 0x65, 0x6d, 0x70, 0x6c, 0x61, - 0x74, 0x65, 0x28, 0x74, 0x65, 0x78, 0x74, 0x29, 0x7d, 0x20, 0x2f, 0x3e, - 0x60, 0x0a, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x7d, 0x0a, - 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x69, 0x66, 0x20, 0x28, - 0x75, 0x73, 0x65, 0x72, 0x29, 0x20, 0x7b, 0x0a, 0x20, 0x20, 0x20, 0x20, - 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x72, 0x65, 0x74, 0x75, 0x72, 0x6e, - 0x20, 0x68, 0x74, 0x6d, 0x6c, 0x60, 0x3c, 0x70, 0x20, 0x6b, 0x65, 0x79, - 0x3d, 0x24, 0x7b, 0x69, 0x6e, 0x64, 0x65, 0x78, 0x7d, 0x3e, 0x3c, 0x73, - 0x74, 0x72, 0x6f, 0x6e, 0x67, 0x3e, 0x24, 0x7b, 0x74, 0x65, 0x6d, 0x70, - 0x6c, 0x61, 0x74, 0x65, 0x28, 0x75, 0x73, 0x65, 0x72, 0x29, 0x7d, 0x3a, - 0x3c, 0x2f, 0x73, 0x74, 0x72, 0x6f, 0x6e, 0x67, 0x3e, 0x20, 0x24, 0x7b, - 0x6d, 0x65, 0x73, 0x73, 0x61, 0x67, 0x65, 0x7d, 0x3c, 0x2f, 0x70, 0x3e, - 0x60, 0x0a, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x7d, 0x20, - 0x65, 0x6c, 0x73, 0x65, 0x20, 0x7b, 0x0a, 0x20, 0x20, 0x20, 0x20, 0x20, - 0x20, 0x20, 0x20, 0x20, 0x20, 0x72, 0x65, 0x74, 0x75, 0x72, 0x6e, 0x20, - 0x68, 0x74, 0x6d, 0x6c, 0x60, 0x3c, 0x70, 0x20, 0x6b, 0x65, 0x79, 0x3d, - 0x24, 0x7b, 0x69, 0x6e, 0x64, 0x65, 0x78, 0x7d, 0x3e, 0x24, 0x7b, 0x6d, - 0x65, 0x73, 0x73, 0x61, 0x67, 0x65, 0x7d, 0x3c, 0x2f, 0x70, 0x3e, 0x60, - 0x0a, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x7d, 0x0a, 0x20, - 0x20, 0x20, 0x20, 0x20, 0x20, 0x7d, 0x3b, 0x0a, 0x0a, 0x20, 0x20, 0x20, - 0x20, 0x20, 0x20, 0x72, 0x65, 0x74, 0x75, 0x72, 0x6e, 0x20, 0x68, 0x74, - 0x6d, 0x6c, 0x60, 0x0a, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, - 0x3c, 0x73, 0x65, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x20, 0x69, 0x64, 0x3d, - 0x22, 0x63, 0x68, 0x61, 0x74, 0x22, 0x20, 0x72, 0x65, 0x66, 0x3d, 0x24, - 0x7b, 0x63, 0x6f, 0x6e, 0x74, 0x61, 0x69, 0x6e, 0x65, 0x72, 0x7d, 0x3e, - 0x0a, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x3c, - 0x69, 0x6d, 0x67, 0x20, 0x73, 0x74, 0x79, 0x6c, 0x65, 0x3d, 0x22, 0x77, - 0x69, 0x64, 0x74, 0x68, 0x3a, 0x20, 0x36, 0x30, 0x25, 0x3b, 0x24, 0x7b, - 0x21, 0x73, 0x65, 0x73, 0x73, 0x69, 0x6f, 0x6e, 0x2e, 0x76, 0x61, 0x6c, - 0x75, 0x65, 0x2e, 0x69, 0x6d, 0x61, 0x67, 0x65, 0x5f, 0x73, 0x65, 0x6c, - 0x65, 0x63, 0x74, 0x65, 0x64, 0x20, 0x3f, 0x20, 0x60, 0x64, 0x69, 0x73, - 0x70, 0x6c, 0x61, 0x79, 0x3a, 0x20, 0x6e, 0x6f, 0x6e, 0x65, 0x3b, 0x60, - 0x20, 0x3a, 0x20, 0x60, 0x60, 0x7d, 0x22, 0x20, 0x73, 0x72, 0x63, 0x3d, - 0x22, 0x24, 0x7b, 0x73, 0x65, 0x73, 0x73, 0x69, 0x6f, 0x6e, 0x2e, 0x76, - 0x61, 0x6c, 0x75, 0x65, 0x2e, 0x69, 0x6d, 0x61, 0x67, 0x65, 0x5f, 0x73, - 0x65, 0x6c, 0x65, 0x63, 0x74, 0x65, 0x64, 0x7d, 0x22, 0x2f, 0x3e, 0x0a, - 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x24, 0x7b, - 0x6d, 0x65, 0x73, 0x73, 0x61, 0x67, 0x65, 0x73, 0x2e, 0x66, 0x6c, 0x61, - 0x74, 0x4d, 0x61, 0x70, 0x28, 0x63, 0x68, 0x61, 0x74, 0x4c, 0x69, 0x6e, - 0x65, 0x29, 0x7d, 0x0a, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, - 0x3c, 0x2f, 0x73, 0x65, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x3e, 0x60, 0x3b, - 0x0a, 0x20, 0x20, 0x20, 0x20, 0x7d, 0x3b, 0x0a, 0x0a, 0x20, 0x20, 0x20, - 0x20, 0x63, 0x6f, 0x6e, 0x73, 0x74, 0x20, 0x43, 0x6f, 0x6e, 0x66, 0x69, - 0x67, 0x46, 0x6f, 0x72, 0x6d, 0x20, 0x3d, 0x20, 0x28, 0x70, 0x72, 0x6f, - 0x70, 0x73, 0x29, 0x20, 0x3d, 0x3e, 0x20, 0x7b, 0x0a, 0x20, 0x20, 0x20, - 0x20, 0x20, 0x20, 0x63, 0x6f, 0x6e, 0x73, 0x74, 0x20, 0x75, 0x70, 0x64, - 0x61, 0x74, 0x65, 0x53, 0x65, 0x73, 0x73, 0x69, 0x6f, 0x6e, 0x20, 0x3d, - 0x20, 0x28, 0x65, 0x6c, 0x29, 0x20, 0x3d, 0x3e, 0x20, 0x73, 0x65, 0x73, - 0x73, 0x69, 0x6f, 0x6e, 0x2e, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x20, 0x3d, - 0x20, 0x7b, 0x20, 0x2e, 0x2e, 0x2e, 0x73, 0x65, 0x73, 0x73, 0x69, 0x6f, - 0x6e, 0x2e, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x2c, 0x20, 0x5b, 0x65, 0x6c, - 0x2e, 0x74, 0x61, 0x72, 0x67, 0x65, 0x74, 0x2e, 0x6e, 0x61, 0x6d, 0x65, - 0x5d, 0x3a, 0x20, 0x65, 0x6c, 0x2e, 0x74, 0x61, 0x72, 0x67, 0x65, 0x74, - 0x2e, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x20, 0x7d, 0x0a, 0x20, 0x20, 0x20, - 0x20, 0x20, 0x20, 0x63, 0x6f, 0x6e, 0x73, 0x74, 0x20, 0x75, 0x70, 0x64, - 0x61, 0x74, 0x65, 0x50, 0x61, 0x72, 0x61, 0x6d, 0x73, 0x20, 0x3d, 0x20, - 0x28, 0x65, 0x6c, 0x29, 0x20, 0x3d, 0x3e, 0x20, 0x70, 0x61, 0x72, 0x61, - 0x6d, 0x73, 0x2e, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x20, 0x3d, 0x20, 0x7b, - 0x20, 0x2e, 0x2e, 0x2e, 0x70, 0x61, 0x72, 0x61, 0x6d, 0x73, 0x2e, 0x76, - 0x61, 0x6c, 0x75, 0x65, 0x2c, 0x20, 0x5b, 0x65, 0x6c, 0x2e, 0x74, 0x61, - 0x72, 0x67, 0x65, 0x74, 0x2e, 0x6e, 0x61, 0x6d, 0x65, 0x5d, 0x3a, 0x20, - 0x65, 0x6c, 0x2e, 0x74, 0x61, 0x72, 0x67, 0x65, 0x74, 0x2e, 0x76, 0x61, - 0x6c, 0x75, 0x65, 0x20, 0x7d, 0x0a, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, - 0x63, 0x6f, 0x6e, 0x73, 0x74, 0x20, 0x75, 0x70, 0x64, 0x61, 0x74, 0x65, - 0x50, 0x61, 0x72, 0x61, 0x6d, 0x73, 0x46, 0x6c, 0x6f, 0x61, 0x74, 0x20, - 0x3d, 0x20, 0x28, 0x65, 0x6c, 0x29, 0x20, 0x3d, 0x3e, 0x20, 0x70, 0x61, - 0x72, 0x61, 0x6d, 0x73, 0x2e, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x20, 0x3d, - 0x20, 0x7b, 0x20, 0x2e, 0x2e, 0x2e, 0x70, 0x61, 0x72, 0x61, 0x6d, 0x73, - 0x2e, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x2c, 0x20, 0x5b, 0x65, 0x6c, 0x2e, - 0x74, 0x61, 0x72, 0x67, 0x65, 0x74, 0x2e, 0x6e, 0x61, 0x6d, 0x65, 0x5d, - 0x3a, 0x20, 0x70, 0x61, 0x72, 0x73, 0x65, 0x46, 0x6c, 0x6f, 0x61, 0x74, - 0x28, 0x65, 0x6c, 0x2e, 0x74, 0x61, 0x72, 0x67, 0x65, 0x74, 0x2e, 0x76, - 0x61, 0x6c, 0x75, 0x65, 0x29, 0x20, 0x7d, 0x0a, 0x20, 0x20, 0x20, 0x20, - 0x20, 0x20, 0x63, 0x6f, 0x6e, 0x73, 0x74, 0x20, 0x75, 0x70, 0x64, 0x61, - 0x74, 0x65, 0x50, 0x61, 0x72, 0x61, 0x6d, 0x73, 0x49, 0x6e, 0x74, 0x20, - 0x3d, 0x20, 0x28, 0x65, 0x6c, 0x29, 0x20, 0x3d, 0x3e, 0x20, 0x70, 0x61, - 0x72, 0x61, 0x6d, 0x73, 0x2e, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x20, 0x3d, - 0x20, 0x7b, 0x20, 0x2e, 0x2e, 0x2e, 0x70, 0x61, 0x72, 0x61, 0x6d, 0x73, - 0x2e, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x2c, 0x20, 0x5b, 0x65, 0x6c, 0x2e, - 0x74, 0x61, 0x72, 0x67, 0x65, 0x74, 0x2e, 0x6e, 0x61, 0x6d, 0x65, 0x5d, - 0x3a, 0x20, 0x4d, 0x61, 0x74, 0x68, 0x2e, 0x66, 0x6c, 0x6f, 0x6f, 0x72, - 0x28, 0x70, 0x61, 0x72, 0x73, 0x65, 0x46, 0x6c, 0x6f, 0x61, 0x74, 0x28, - 0x65, 0x6c, 0x2e, 0x74, 0x61, 0x72, 0x67, 0x65, 0x74, 0x2e, 0x76, 0x61, - 0x6c, 0x75, 0x65, 0x29, 0x29, 0x20, 0x7d, 0x0a, 0x0a, 0x20, 0x20, 0x20, - 0x20, 0x20, 0x20, 0x63, 0x6f, 0x6e, 0x73, 0x74, 0x20, 0x67, 0x72, 0x61, - 0x6d, 0x6d, 0x61, 0x72, 0x4a, 0x73, 0x6f, 0x6e, 0x53, 0x63, 0x68, 0x65, - 0x6d, 0x61, 0x50, 0x72, 0x6f, 0x70, 0x4f, 0x72, 0x64, 0x65, 0x72, 0x20, - 0x3d, 0x20, 0x73, 0x69, 0x67, 0x6e, 0x61, 0x6c, 0x28, 0x27, 0x27, 0x29, - 0x0a, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x63, 0x6f, 0x6e, 0x73, 0x74, - 0x20, 0x75, 0x70, 0x64, 0x61, 0x74, 0x65, 0x47, 0x72, 0x61, 0x6d, 0x6d, - 0x61, 0x72, 0x4a, 0x73, 0x6f, 0x6e, 0x53, 0x63, 0x68, 0x65, 0x6d, 0x61, - 0x50, 0x72, 0x6f, 0x70, 0x4f, 0x72, 0x64, 0x65, 0x72, 0x20, 0x3d, 0x20, - 0x28, 0x65, 0x6c, 0x29, 0x20, 0x3d, 0x3e, 0x20, 0x67, 0x72, 0x61, 0x6d, - 0x6d, 0x61, 0x72, 0x4a, 0x73, 0x6f, 0x6e, 0x53, 0x63, 0x68, 0x65, 0x6d, - 0x61, 0x50, 0x72, 0x6f, 0x70, 0x4f, 0x72, 0x64, 0x65, 0x72, 0x2e, 0x76, - 0x61, 0x6c, 0x75, 0x65, 0x20, 0x3d, 0x20, 0x65, 0x6c, 0x2e, 0x74, 0x61, - 0x72, 0x67, 0x65, 0x74, 0x2e, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x0a, 0x20, - 0x20, 0x20, 0x20, 0x20, 0x20, 0x63, 0x6f, 0x6e, 0x73, 0x74, 0x20, 0x63, - 0x6f, 0x6e, 0x76, 0x65, 0x72, 0x74, 0x4a, 0x53, 0x4f, 0x4e, 0x53, 0x63, - 0x68, 0x65, 0x6d, 0x61, 0x47, 0x72, 0x61, 0x6d, 0x6d, 0x61, 0x72, 0x20, - 0x3d, 0x20, 0x28, 0x29, 0x20, 0x3d, 0x3e, 0x20, 0x7b, 0x0a, 0x20, 0x20, - 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x74, 0x72, 0x79, 0x20, 0x7b, 0x0a, - 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x63, 0x6f, - 0x6e, 0x73, 0x74, 0x20, 0x73, 0x63, 0x68, 0x65, 0x6d, 0x61, 0x20, 0x3d, - 0x20, 0x4a, 0x53, 0x4f, 0x4e, 0x2e, 0x70, 0x61, 0x72, 0x73, 0x65, 0x28, + 0x68, 0x74, 0x6d, 0x6c, 0x60, 0x3c, 0x24, 0x7b, 0x4d, 0x61, 0x72, 0x6b, + 0x64, 0x6f, 0x77, 0x6e, 0x69, 0x73, 0x68, 0x7d, 0x20, 0x74, 0x65, 0x78, + 0x74, 0x3d, 0x24, 0x7b, 0x74, 0x65, 0x6d, 0x70, 0x6c, 0x61, 0x74, 0x65, + 0x28, 0x74, 0x65, 0x78, 0x74, 0x29, 0x7d, 0x20, 0x2f, 0x3e, 0x60, 0x0a, + 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x7d, 0x0a, 0x20, 0x20, + 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x69, 0x66, 0x20, 0x28, 0x75, 0x73, + 0x65, 0x72, 0x29, 0x20, 0x7b, 0x0a, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, + 0x20, 0x20, 0x20, 0x20, 0x72, 0x65, 0x74, 0x75, 0x72, 0x6e, 0x20, 0x68, + 0x74, 0x6d, 0x6c, 0x60, 0x3c, 0x70, 0x20, 0x6b, 0x65, 0x79, 0x3d, 0x24, + 0x7b, 0x69, 0x6e, 0x64, 0x65, 0x78, 0x7d, 0x3e, 0x3c, 0x73, 0x74, 0x72, + 0x6f, 0x6e, 0x67, 0x3e, 0x24, 0x7b, 0x74, 0x65, 0x6d, 0x70, 0x6c, 0x61, + 0x74, 0x65, 0x28, 0x75, 0x73, 0x65, 0x72, 0x29, 0x7d, 0x3a, 0x3c, 0x2f, + 0x73, 0x74, 0x72, 0x6f, 0x6e, 0x67, 0x3e, 0x20, 0x24, 0x7b, 0x6d, 0x65, + 0x73, 0x73, 0x61, 0x67, 0x65, 0x7d, 0x3c, 0x2f, 0x70, 0x3e, 0x60, 0x0a, + 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x7d, 0x20, 0x65, 0x6c, + 0x73, 0x65, 0x20, 0x7b, 0x0a, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, + 0x20, 0x20, 0x20, 0x72, 0x65, 0x74, 0x75, 0x72, 0x6e, 0x20, 0x69, 0x73, + 0x43, 0x6f, 0x6d, 0x70, 0x6c, 0x65, 0x74, 0x69, 0x6f, 0x6e, 0x4d, 0x6f, + 0x64, 0x65, 0x20, 0x3f, 0x0a, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, + 0x20, 0x20, 0x20, 0x20, 0x20, 0x68, 0x74, 0x6d, 0x6c, 0x60, 0x3c, 0x73, + 0x70, 0x61, 0x6e, 0x20, 0x6b, 0x65, 0x79, 0x3d, 0x24, 0x7b, 0x69, 0x6e, + 0x64, 0x65, 0x78, 0x7d, 0x3e, 0x24, 0x7b, 0x6d, 0x65, 0x73, 0x73, 0x61, + 0x67, 0x65, 0x7d, 0x3c, 0x2f, 0x73, 0x70, 0x61, 0x6e, 0x3e, 0x60, 0x20, + 0x3a, 0x0a, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, + 0x20, 0x20, 0x68, 0x74, 0x6d, 0x6c, 0x60, 0x3c, 0x70, 0x20, 0x6b, 0x65, + 0x79, 0x3d, 0x24, 0x7b, 0x69, 0x6e, 0x64, 0x65, 0x78, 0x7d, 0x3e, 0x24, + 0x7b, 0x6d, 0x65, 0x73, 0x73, 0x61, 0x67, 0x65, 0x7d, 0x3c, 0x2f, 0x70, + 0x3e, 0x60, 0x0a, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x7d, + 0x0a, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x7d, 0x3b, 0x0a, 0x0a, 0x20, + 0x20, 0x20, 0x20, 0x20, 0x20, 0x63, 0x6f, 0x6e, 0x73, 0x74, 0x20, 0x68, + 0x61, 0x6e, 0x64, 0x6c, 0x65, 0x43, 0x6f, 0x6d, 0x70, 0x6c, 0x65, 0x74, + 0x69, 0x6f, 0x6e, 0x45, 0x64, 0x69, 0x74, 0x20, 0x3d, 0x20, 0x28, 0x65, + 0x29, 0x20, 0x3d, 0x3e, 0x20, 0x7b, 0x0a, 0x20, 0x20, 0x20, 0x20, 0x20, + 0x20, 0x20, 0x20, 0x73, 0x65, 0x73, 0x73, 0x69, 0x6f, 0x6e, 0x2e, 0x76, + 0x61, 0x6c, 0x75, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x6d, 0x70, 0x74, 0x20, + 0x3d, 0x20, 0x65, 0x2e, 0x74, 0x61, 0x72, 0x67, 0x65, 0x74, 0x2e, 0x69, + 0x6e, 0x6e, 0x65, 0x72, 0x54, 0x65, 0x78, 0x74, 0x3b, 0x0a, 0x20, 0x20, + 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x73, 0x65, 0x73, 0x73, 0x69, 0x6f, + 0x6e, 0x2e, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x2e, 0x74, 0x72, 0x61, 0x6e, + 0x73, 0x63, 0x72, 0x69, 0x70, 0x74, 0x20, 0x3d, 0x20, 0x5b, 0x5d, 0x3b, + 0x0a, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x7d, 0x0a, 0x0a, 0x20, 0x20, + 0x20, 0x20, 0x20, 0x20, 0x72, 0x65, 0x74, 0x75, 0x72, 0x6e, 0x20, 0x68, + 0x74, 0x6d, 0x6c, 0x60, 0x0a, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, + 0x20, 0x3c, 0x64, 0x69, 0x76, 0x20, 0x69, 0x64, 0x3d, 0x22, 0x63, 0x68, + 0x61, 0x74, 0x22, 0x20, 0x72, 0x65, 0x66, 0x3d, 0x24, 0x7b, 0x63, 0x6f, + 0x6e, 0x74, 0x61, 0x69, 0x6e, 0x65, 0x72, 0x7d, 0x20, 0x6b, 0x65, 0x79, + 0x3d, 0x24, 0x7b, 0x6d, 0x65, 0x73, 0x73, 0x61, 0x67, 0x65, 0x73, 0x2e, + 0x6c, 0x65, 0x6e, 0x67, 0x74, 0x68, 0x7d, 0x3e, 0x0a, 0x20, 0x20, 0x20, + 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x3c, 0x69, 0x6d, 0x67, 0x20, + 0x73, 0x74, 0x79, 0x6c, 0x65, 0x3d, 0x22, 0x77, 0x69, 0x64, 0x74, 0x68, + 0x3a, 0x20, 0x36, 0x30, 0x25, 0x3b, 0x24, 0x7b, 0x21, 0x73, 0x65, 0x73, + 0x73, 0x69, 0x6f, 0x6e, 0x2e, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x2e, 0x69, + 0x6d, 0x61, 0x67, 0x65, 0x5f, 0x73, 0x65, 0x6c, 0x65, 0x63, 0x74, 0x65, + 0x64, 0x20, 0x3f, 0x20, 0x60, 0x64, 0x69, 0x73, 0x70, 0x6c, 0x61, 0x79, + 0x3a, 0x20, 0x6e, 0x6f, 0x6e, 0x65, 0x3b, 0x60, 0x20, 0x3a, 0x20, 0x60, + 0x60, 0x7d, 0x22, 0x20, 0x73, 0x72, 0x63, 0x3d, 0x22, 0x24, 0x7b, 0x73, + 0x65, 0x73, 0x73, 0x69, 0x6f, 0x6e, 0x2e, 0x76, 0x61, 0x6c, 0x75, 0x65, + 0x2e, 0x69, 0x6d, 0x61, 0x67, 0x65, 0x5f, 0x73, 0x65, 0x6c, 0x65, 0x63, + 0x74, 0x65, 0x64, 0x7d, 0x22, 0x2f, 0x3e, 0x0a, 0x20, 0x20, 0x20, 0x20, + 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x3c, 0x73, 0x70, 0x61, 0x6e, 0x20, + 0x63, 0x6f, 0x6e, 0x74, 0x65, 0x6e, 0x74, 0x65, 0x64, 0x69, 0x74, 0x61, + 0x62, 0x6c, 0x65, 0x3d, 0x24, 0x7b, 0x69, 0x73, 0x43, 0x6f, 0x6d, 0x70, + 0x6c, 0x65, 0x74, 0x69, 0x6f, 0x6e, 0x4d, 0x6f, 0x64, 0x65, 0x7d, 0x20, + 0x72, 0x65, 0x66, 0x3d, 0x24, 0x7b, 0x63, 0x6f, 0x6e, 0x74, 0x61, 0x69, + 0x6e, 0x65, 0x72, 0x7d, 0x20, 0x6f, 0x6e, 0x69, 0x6e, 0x70, 0x75, 0x74, + 0x3d, 0x24, 0x7b, 0x68, 0x61, 0x6e, 0x64, 0x6c, 0x65, 0x43, 0x6f, 0x6d, + 0x70, 0x6c, 0x65, 0x74, 0x69, 0x6f, 0x6e, 0x45, 0x64, 0x69, 0x74, 0x7d, + 0x3e, 0x0a, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, + 0x20, 0x20, 0x24, 0x7b, 0x6d, 0x65, 0x73, 0x73, 0x61, 0x67, 0x65, 0x73, + 0x2e, 0x66, 0x6c, 0x61, 0x74, 0x4d, 0x61, 0x70, 0x28, 0x63, 0x68, 0x61, + 0x74, 0x4c, 0x69, 0x6e, 0x65, 0x29, 0x7d, 0x0a, 0x20, 0x20, 0x20, 0x20, + 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x3c, 0x2f, 0x73, 0x70, 0x61, 0x6e, + 0x3e, 0x0a, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x3c, 0x2f, + 0x64, 0x69, 0x76, 0x3e, 0x60, 0x3b, 0x0a, 0x20, 0x20, 0x20, 0x20, 0x7d, + 0x3b, 0x0a, 0x0a, 0x20, 0x20, 0x20, 0x20, 0x63, 0x6f, 0x6e, 0x73, 0x74, + 0x20, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x46, 0x6f, 0x72, 0x6d, 0x20, + 0x3d, 0x20, 0x28, 0x70, 0x72, 0x6f, 0x70, 0x73, 0x29, 0x20, 0x3d, 0x3e, + 0x20, 0x7b, 0x0a, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x63, 0x6f, 0x6e, + 0x73, 0x74, 0x20, 0x75, 0x70, 0x64, 0x61, 0x74, 0x65, 0x53, 0x65, 0x73, + 0x73, 0x69, 0x6f, 0x6e, 0x20, 0x3d, 0x20, 0x28, 0x65, 0x6c, 0x29, 0x20, + 0x3d, 0x3e, 0x20, 0x73, 0x65, 0x73, 0x73, 0x69, 0x6f, 0x6e, 0x2e, 0x76, + 0x61, 0x6c, 0x75, 0x65, 0x20, 0x3d, 0x20, 0x7b, 0x20, 0x2e, 0x2e, 0x2e, + 0x73, 0x65, 0x73, 0x73, 0x69, 0x6f, 0x6e, 0x2e, 0x76, 0x61, 0x6c, 0x75, + 0x65, 0x2c, 0x20, 0x5b, 0x65, 0x6c, 0x2e, 0x74, 0x61, 0x72, 0x67, 0x65, + 0x74, 0x2e, 0x6e, 0x61, 0x6d, 0x65, 0x5d, 0x3a, 0x20, 0x65, 0x6c, 0x2e, + 0x74, 0x61, 0x72, 0x67, 0x65, 0x74, 0x2e, 0x76, 0x61, 0x6c, 0x75, 0x65, + 0x20, 0x7d, 0x0a, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x63, 0x6f, 0x6e, + 0x73, 0x74, 0x20, 0x75, 0x70, 0x64, 0x61, 0x74, 0x65, 0x50, 0x61, 0x72, + 0x61, 0x6d, 0x73, 0x20, 0x3d, 0x20, 0x28, 0x65, 0x6c, 0x29, 0x20, 0x3d, + 0x3e, 0x20, 0x70, 0x61, 0x72, 0x61, 0x6d, 0x73, 0x2e, 0x76, 0x61, 0x6c, + 0x75, 0x65, 0x20, 0x3d, 0x20, 0x7b, 0x20, 0x2e, 0x2e, 0x2e, 0x70, 0x61, + 0x72, 0x61, 0x6d, 0x73, 0x2e, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x2c, 0x20, + 0x5b, 0x65, 0x6c, 0x2e, 0x74, 0x61, 0x72, 0x67, 0x65, 0x74, 0x2e, 0x6e, + 0x61, 0x6d, 0x65, 0x5d, 0x3a, 0x20, 0x65, 0x6c, 0x2e, 0x74, 0x61, 0x72, + 0x67, 0x65, 0x74, 0x2e, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x20, 0x7d, 0x0a, + 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x63, 0x6f, 0x6e, 0x73, 0x74, 0x20, + 0x75, 0x70, 0x64, 0x61, 0x74, 0x65, 0x50, 0x61, 0x72, 0x61, 0x6d, 0x73, + 0x46, 0x6c, 0x6f, 0x61, 0x74, 0x20, 0x3d, 0x20, 0x28, 0x65, 0x6c, 0x29, + 0x20, 0x3d, 0x3e, 0x20, 0x70, 0x61, 0x72, 0x61, 0x6d, 0x73, 0x2e, 0x76, + 0x61, 0x6c, 0x75, 0x65, 0x20, 0x3d, 0x20, 0x7b, 0x20, 0x2e, 0x2e, 0x2e, 0x70, 0x61, 0x72, 0x61, 0x6d, 0x73, 0x2e, 0x76, 0x61, 0x6c, 0x75, 0x65, - 0x2e, 0x67, 0x72, 0x61, 0x6d, 0x6d, 0x61, 0x72, 0x29, 0x0a, 0x20, 0x20, - 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x63, 0x6f, 0x6e, 0x73, - 0x74, 0x20, 0x63, 0x6f, 0x6e, 0x76, 0x65, 0x72, 0x74, 0x65, 0x72, 0x20, - 0x3d, 0x20, 0x6e, 0x65, 0x77, 0x20, 0x53, 0x63, 0x68, 0x65, 0x6d, 0x61, - 0x43, 0x6f, 0x6e, 0x76, 0x65, 0x72, 0x74, 0x65, 0x72, 0x28, 0x0a, 0x20, - 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x67, - 0x72, 0x61, 0x6d, 0x6d, 0x61, 0x72, 0x4a, 0x73, 0x6f, 0x6e, 0x53, 0x63, - 0x68, 0x65, 0x6d, 0x61, 0x50, 0x72, 0x6f, 0x70, 0x4f, 0x72, 0x64, 0x65, - 0x72, 0x2e, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x0a, 0x20, 0x20, 0x20, 0x20, - 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x2e, 0x73, - 0x70, 0x6c, 0x69, 0x74, 0x28, 0x27, 0x2c, 0x27, 0x29, 0x0a, 0x20, 0x20, - 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, - 0x2e, 0x72, 0x65, 0x64, 0x75, 0x63, 0x65, 0x28, 0x28, 0x61, 0x63, 0x63, - 0x2c, 0x20, 0x63, 0x75, 0x72, 0x2c, 0x20, 0x69, 0x29, 0x20, 0x3d, 0x3e, - 0x20, 0x28, 0x7b, 0x20, 0x2e, 0x2e, 0x2e, 0x61, 0x63, 0x63, 0x2c, 0x20, - 0x5b, 0x63, 0x75, 0x72, 0x2e, 0x74, 0x72, 0x69, 0x6d, 0x28, 0x29, 0x5d, - 0x3a, 0x20, 0x69, 0x20, 0x7d, 0x29, 0x2c, 0x20, 0x7b, 0x7d, 0x29, 0x0a, - 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x29, 0x0a, - 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x63, 0x6f, - 0x6e, 0x76, 0x65, 0x72, 0x74, 0x65, 0x72, 0x2e, 0x76, 0x69, 0x73, 0x69, - 0x74, 0x28, 0x73, 0x63, 0x68, 0x65, 0x6d, 0x61, 0x2c, 0x20, 0x27, 0x27, - 0x29, 0x0a, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, + 0x2c, 0x20, 0x5b, 0x65, 0x6c, 0x2e, 0x74, 0x61, 0x72, 0x67, 0x65, 0x74, + 0x2e, 0x6e, 0x61, 0x6d, 0x65, 0x5d, 0x3a, 0x20, 0x70, 0x61, 0x72, 0x73, + 0x65, 0x46, 0x6c, 0x6f, 0x61, 0x74, 0x28, 0x65, 0x6c, 0x2e, 0x74, 0x61, + 0x72, 0x67, 0x65, 0x74, 0x2e, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x29, 0x20, + 0x7d, 0x0a, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x63, 0x6f, 0x6e, 0x73, + 0x74, 0x20, 0x75, 0x70, 0x64, 0x61, 0x74, 0x65, 0x50, 0x61, 0x72, 0x61, + 0x6d, 0x73, 0x49, 0x6e, 0x74, 0x20, 0x3d, 0x20, 0x28, 0x65, 0x6c, 0x29, + 0x20, 0x3d, 0x3e, 0x20, 0x70, 0x61, 0x72, 0x61, 0x6d, 0x73, 0x2e, 0x76, + 0x61, 0x6c, 0x75, 0x65, 0x20, 0x3d, 0x20, 0x7b, 0x20, 0x2e, 0x2e, 0x2e, 0x70, 0x61, 0x72, 0x61, 0x6d, 0x73, 0x2e, 0x76, 0x61, 0x6c, 0x75, 0x65, - 0x20, 0x3d, 0x20, 0x7b, 0x0a, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, - 0x20, 0x20, 0x20, 0x20, 0x20, 0x2e, 0x2e, 0x2e, 0x70, 0x61, 0x72, 0x61, - 0x6d, 0x73, 0x2e, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x2c, 0x0a, 0x20, 0x20, - 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x67, 0x72, - 0x61, 0x6d, 0x6d, 0x61, 0x72, 0x3a, 0x20, 0x63, 0x6f, 0x6e, 0x76, 0x65, - 0x72, 0x74, 0x65, 0x72, 0x2e, 0x66, 0x6f, 0x72, 0x6d, 0x61, 0x74, 0x47, - 0x72, 0x61, 0x6d, 0x6d, 0x61, 0x72, 0x28, 0x29, 0x2c, 0x0a, 0x20, 0x20, - 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x7d, 0x0a, 0x20, 0x20, - 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x7d, 0x20, 0x63, 0x61, 0x74, 0x63, - 0x68, 0x20, 0x28, 0x65, 0x29, 0x20, 0x7b, 0x0a, 0x20, 0x20, 0x20, 0x20, - 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x61, 0x6c, 0x65, 0x72, 0x74, 0x28, - 0x60, 0x43, 0x6f, 0x6e, 0x76, 0x65, 0x72, 0x74, 0x20, 0x66, 0x61, 0x69, - 0x6c, 0x65, 0x64, 0x3a, 0x20, 0x24, 0x7b, 0x65, 0x2e, 0x6d, 0x65, 0x73, - 0x73, 0x61, 0x67, 0x65, 0x7d, 0x60, 0x29, 0x0a, 0x20, 0x20, 0x20, 0x20, - 0x20, 0x20, 0x20, 0x20, 0x7d, 0x0a, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, + 0x2c, 0x20, 0x5b, 0x65, 0x6c, 0x2e, 0x74, 0x61, 0x72, 0x67, 0x65, 0x74, + 0x2e, 0x6e, 0x61, 0x6d, 0x65, 0x5d, 0x3a, 0x20, 0x4d, 0x61, 0x74, 0x68, + 0x2e, 0x66, 0x6c, 0x6f, 0x6f, 0x72, 0x28, 0x70, 0x61, 0x72, 0x73, 0x65, + 0x46, 0x6c, 0x6f, 0x61, 0x74, 0x28, 0x65, 0x6c, 0x2e, 0x74, 0x61, 0x72, + 0x67, 0x65, 0x74, 0x2e, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x29, 0x29, 0x20, 0x7d, 0x0a, 0x0a, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x63, 0x6f, 0x6e, - 0x73, 0x74, 0x20, 0x46, 0x6c, 0x6f, 0x61, 0x74, 0x46, 0x69, 0x65, 0x6c, - 0x64, 0x20, 0x3d, 0x20, 0x28, 0x7b, 0x20, 0x6c, 0x61, 0x62, 0x65, 0x6c, - 0x2c, 0x20, 0x6d, 0x61, 0x78, 0x2c, 0x20, 0x6d, 0x69, 0x6e, 0x2c, 0x20, - 0x6e, 0x61, 0x6d, 0x65, 0x2c, 0x20, 0x73, 0x74, 0x65, 0x70, 0x2c, 0x20, - 0x76, 0x61, 0x6c, 0x75, 0x65, 0x20, 0x7d, 0x29, 0x20, 0x3d, 0x3e, 0x20, - 0x7b, 0x0a, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x72, 0x65, - 0x74, 0x75, 0x72, 0x6e, 0x20, 0x68, 0x74, 0x6d, 0x6c, 0x60, 0x0a, 0x20, - 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x3c, 0x64, 0x69, - 0x76, 0x3e, 0x0a, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, - 0x20, 0x20, 0x20, 0x3c, 0x6c, 0x61, 0x62, 0x65, 0x6c, 0x20, 0x66, 0x6f, - 0x72, 0x3d, 0x22, 0x24, 0x7b, 0x6e, 0x61, 0x6d, 0x65, 0x7d, 0x22, 0x3e, - 0x24, 0x7b, 0x6c, 0x61, 0x62, 0x65, 0x6c, 0x7d, 0x3c, 0x2f, 0x6c, 0x61, - 0x62, 0x65, 0x6c, 0x3e, 0x0a, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, - 0x20, 0x20, 0x20, 0x20, 0x20, 0x3c, 0x69, 0x6e, 0x70, 0x75, 0x74, 0x20, - 0x74, 0x79, 0x70, 0x65, 0x3d, 0x22, 0x72, 0x61, 0x6e, 0x67, 0x65, 0x22, - 0x20, 0x69, 0x64, 0x3d, 0x22, 0x24, 0x7b, 0x6e, 0x61, 0x6d, 0x65, 0x7d, - 0x22, 0x20, 0x6d, 0x69, 0x6e, 0x3d, 0x22, 0x24, 0x7b, 0x6d, 0x69, 0x6e, - 0x7d, 0x22, 0x20, 0x6d, 0x61, 0x78, 0x3d, 0x22, 0x24, 0x7b, 0x6d, 0x61, - 0x78, 0x7d, 0x22, 0x20, 0x73, 0x74, 0x65, 0x70, 0x3d, 0x22, 0x24, 0x7b, - 0x73, 0x74, 0x65, 0x70, 0x7d, 0x22, 0x20, 0x6e, 0x61, 0x6d, 0x65, 0x3d, - 0x22, 0x24, 0x7b, 0x6e, 0x61, 0x6d, 0x65, 0x7d, 0x22, 0x20, 0x76, 0x61, - 0x6c, 0x75, 0x65, 0x3d, 0x22, 0x24, 0x7b, 0x76, 0x61, 0x6c, 0x75, 0x65, - 0x7d, 0x22, 0x20, 0x6f, 0x6e, 0x69, 0x6e, 0x70, 0x75, 0x74, 0x3d, 0x24, - 0x7b, 0x75, 0x70, 0x64, 0x61, 0x74, 0x65, 0x50, 0x61, 0x72, 0x61, 0x6d, - 0x73, 0x46, 0x6c, 0x6f, 0x61, 0x74, 0x7d, 0x20, 0x2f, 0x3e, 0x0a, 0x20, + 0x73, 0x74, 0x20, 0x67, 0x72, 0x61, 0x6d, 0x6d, 0x61, 0x72, 0x4a, 0x73, + 0x6f, 0x6e, 0x53, 0x63, 0x68, 0x65, 0x6d, 0x61, 0x50, 0x72, 0x6f, 0x70, + 0x4f, 0x72, 0x64, 0x65, 0x72, 0x20, 0x3d, 0x20, 0x73, 0x69, 0x67, 0x6e, + 0x61, 0x6c, 0x28, 0x27, 0x27, 0x29, 0x0a, 0x20, 0x20, 0x20, 0x20, 0x20, + 0x20, 0x63, 0x6f, 0x6e, 0x73, 0x74, 0x20, 0x75, 0x70, 0x64, 0x61, 0x74, + 0x65, 0x47, 0x72, 0x61, 0x6d, 0x6d, 0x61, 0x72, 0x4a, 0x73, 0x6f, 0x6e, + 0x53, 0x63, 0x68, 0x65, 0x6d, 0x61, 0x50, 0x72, 0x6f, 0x70, 0x4f, 0x72, + 0x64, 0x65, 0x72, 0x20, 0x3d, 0x20, 0x28, 0x65, 0x6c, 0x29, 0x20, 0x3d, + 0x3e, 0x20, 0x67, 0x72, 0x61, 0x6d, 0x6d, 0x61, 0x72, 0x4a, 0x73, 0x6f, + 0x6e, 0x53, 0x63, 0x68, 0x65, 0x6d, 0x61, 0x50, 0x72, 0x6f, 0x70, 0x4f, + 0x72, 0x64, 0x65, 0x72, 0x2e, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x20, 0x3d, + 0x20, 0x65, 0x6c, 0x2e, 0x74, 0x61, 0x72, 0x67, 0x65, 0x74, 0x2e, 0x76, + 0x61, 0x6c, 0x75, 0x65, 0x0a, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x63, + 0x6f, 0x6e, 0x73, 0x74, 0x20, 0x63, 0x6f, 0x6e, 0x76, 0x65, 0x72, 0x74, + 0x4a, 0x53, 0x4f, 0x4e, 0x53, 0x63, 0x68, 0x65, 0x6d, 0x61, 0x47, 0x72, + 0x61, 0x6d, 0x6d, 0x61, 0x72, 0x20, 0x3d, 0x20, 0x28, 0x29, 0x20, 0x3d, + 0x3e, 0x20, 0x7b, 0x0a, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, + 0x74, 0x72, 0x79, 0x20, 0x7b, 0x0a, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, + 0x20, 0x20, 0x20, 0x20, 0x63, 0x6f, 0x6e, 0x73, 0x74, 0x20, 0x73, 0x63, + 0x68, 0x65, 0x6d, 0x61, 0x20, 0x3d, 0x20, 0x4a, 0x53, 0x4f, 0x4e, 0x2e, + 0x70, 0x61, 0x72, 0x73, 0x65, 0x28, 0x70, 0x61, 0x72, 0x61, 0x6d, 0x73, + 0x2e, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x2e, 0x67, 0x72, 0x61, 0x6d, 0x6d, + 0x61, 0x72, 0x29, 0x0a, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, + 0x20, 0x20, 0x63, 0x6f, 0x6e, 0x73, 0x74, 0x20, 0x63, 0x6f, 0x6e, 0x76, + 0x65, 0x72, 0x74, 0x65, 0x72, 0x20, 0x3d, 0x20, 0x6e, 0x65, 0x77, 0x20, + 0x53, 0x63, 0x68, 0x65, 0x6d, 0x61, 0x43, 0x6f, 0x6e, 0x76, 0x65, 0x72, + 0x74, 0x65, 0x72, 0x28, 0x0a, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, + 0x20, 0x20, 0x20, 0x20, 0x20, 0x67, 0x72, 0x61, 0x6d, 0x6d, 0x61, 0x72, + 0x4a, 0x73, 0x6f, 0x6e, 0x53, 0x63, 0x68, 0x65, 0x6d, 0x61, 0x50, 0x72, + 0x6f, 0x70, 0x4f, 0x72, 0x64, 0x65, 0x72, 0x2e, 0x76, 0x61, 0x6c, 0x75, + 0x65, 0x0a, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, + 0x20, 0x20, 0x20, 0x20, 0x2e, 0x73, 0x70, 0x6c, 0x69, 0x74, 0x28, 0x27, + 0x2c, 0x27, 0x29, 0x0a, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, + 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x2e, 0x72, 0x65, 0x64, 0x75, 0x63, + 0x65, 0x28, 0x28, 0x61, 0x63, 0x63, 0x2c, 0x20, 0x63, 0x75, 0x72, 0x2c, + 0x20, 0x69, 0x29, 0x20, 0x3d, 0x3e, 0x20, 0x28, 0x7b, 0x20, 0x2e, 0x2e, + 0x2e, 0x61, 0x63, 0x63, 0x2c, 0x20, 0x5b, 0x63, 0x75, 0x72, 0x2e, 0x74, + 0x72, 0x69, 0x6d, 0x28, 0x29, 0x5d, 0x3a, 0x20, 0x69, 0x20, 0x7d, 0x29, + 0x2c, 0x20, 0x7b, 0x7d, 0x29, 0x0a, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, + 0x20, 0x20, 0x20, 0x20, 0x29, 0x0a, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, + 0x20, 0x20, 0x20, 0x20, 0x63, 0x6f, 0x6e, 0x76, 0x65, 0x72, 0x74, 0x65, + 0x72, 0x2e, 0x76, 0x69, 0x73, 0x69, 0x74, 0x28, 0x73, 0x63, 0x68, 0x65, + 0x6d, 0x61, 0x2c, 0x20, 0x27, 0x27, 0x29, 0x0a, 0x20, 0x20, 0x20, 0x20, + 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x70, 0x61, 0x72, 0x61, 0x6d, 0x73, + 0x2e, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x20, 0x3d, 0x20, 0x7b, 0x0a, 0x20, + 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x2e, + 0x2e, 0x2e, 0x70, 0x61, 0x72, 0x61, 0x6d, 0x73, 0x2e, 0x76, 0x61, 0x6c, + 0x75, 0x65, 0x2c, 0x0a, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, + 0x20, 0x20, 0x20, 0x20, 0x67, 0x72, 0x61, 0x6d, 0x6d, 0x61, 0x72, 0x3a, + 0x20, 0x63, 0x6f, 0x6e, 0x76, 0x65, 0x72, 0x74, 0x65, 0x72, 0x2e, 0x66, + 0x6f, 0x72, 0x6d, 0x61, 0x74, 0x47, 0x72, 0x61, 0x6d, 0x6d, 0x61, 0x72, + 0x28, 0x29, 0x2c, 0x0a, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, + 0x20, 0x20, 0x7d, 0x0a, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, + 0x7d, 0x20, 0x63, 0x61, 0x74, 0x63, 0x68, 0x20, 0x28, 0x65, 0x29, 0x20, + 0x7b, 0x0a, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, + 0x61, 0x6c, 0x65, 0x72, 0x74, 0x28, 0x60, 0x43, 0x6f, 0x6e, 0x76, 0x65, + 0x72, 0x74, 0x20, 0x66, 0x61, 0x69, 0x6c, 0x65, 0x64, 0x3a, 0x20, 0x24, + 0x7b, 0x65, 0x2e, 0x6d, 0x65, 0x73, 0x73, 0x61, 0x67, 0x65, 0x7d, 0x60, + 0x29, 0x0a, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x7d, 0x0a, + 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x7d, 0x0a, 0x0a, 0x20, 0x20, 0x20, + 0x20, 0x20, 0x20, 0x63, 0x6f, 0x6e, 0x73, 0x74, 0x20, 0x46, 0x6c, 0x6f, + 0x61, 0x74, 0x46, 0x69, 0x65, 0x6c, 0x64, 0x20, 0x3d, 0x20, 0x28, 0x7b, + 0x20, 0x6c, 0x61, 0x62, 0x65, 0x6c, 0x2c, 0x20, 0x6d, 0x61, 0x78, 0x2c, + 0x20, 0x6d, 0x69, 0x6e, 0x2c, 0x20, 0x6e, 0x61, 0x6d, 0x65, 0x2c, 0x20, + 0x73, 0x74, 0x65, 0x70, 0x2c, 0x20, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x20, + 0x7d, 0x29, 0x20, 0x3d, 0x3e, 0x20, 0x7b, 0x0a, 0x20, 0x20, 0x20, 0x20, + 0x20, 0x20, 0x20, 0x20, 0x72, 0x65, 0x74, 0x75, 0x72, 0x6e, 0x20, 0x68, + 0x74, 0x6d, 0x6c, 0x60, 0x0a, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, + 0x20, 0x20, 0x20, 0x3c, 0x64, 0x69, 0x76, 0x3e, 0x0a, 0x20, 0x20, 0x20, + 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x3c, 0x6c, 0x61, + 0x62, 0x65, 0x6c, 0x20, 0x66, 0x6f, 0x72, 0x3d, 0x22, 0x24, 0x7b, 0x6e, + 0x61, 0x6d, 0x65, 0x7d, 0x22, 0x3e, 0x24, 0x7b, 0x6c, 0x61, 0x62, 0x65, + 0x6c, 0x7d, 0x3c, 0x2f, 0x6c, 0x61, 0x62, 0x65, 0x6c, 0x3e, 0x0a, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x3c, - 0x73, 0x70, 0x61, 0x6e, 0x3e, 0x24, 0x7b, 0x76, 0x61, 0x6c, 0x75, 0x65, - 0x7d, 0x3c, 0x2f, 0x73, 0x70, 0x61, 0x6e, 0x3e, 0x0a, 0x20, 0x20, 0x20, - 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x3c, 0x2f, 0x64, 0x69, 0x76, - 0x3e, 0x0a, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x60, 0x0a, - 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x7d, 0x3b, 0x0a, 0x0a, 0x20, 0x20, - 0x20, 0x20, 0x20, 0x20, 0x63, 0x6f, 0x6e, 0x73, 0x74, 0x20, 0x49, 0x6e, - 0x74, 0x46, 0x69, 0x65, 0x6c, 0x64, 0x20, 0x3d, 0x20, 0x28, 0x7b, 0x20, - 0x6c, 0x61, 0x62, 0x65, 0x6c, 0x2c, 0x20, 0x6d, 0x61, 0x78, 0x2c, 0x20, - 0x6d, 0x69, 0x6e, 0x2c, 0x20, 0x6e, 0x61, 0x6d, 0x65, 0x2c, 0x20, 0x76, - 0x61, 0x6c, 0x75, 0x65, 0x20, 0x7d, 0x29, 0x20, 0x3d, 0x3e, 0x20, 0x7b, - 0x0a, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x72, 0x65, 0x74, - 0x75, 0x72, 0x6e, 0x20, 0x68, 0x74, 0x6d, 0x6c, 0x60, 0x0a, 0x20, 0x20, - 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x3c, 0x64, 0x69, 0x76, - 0x3e, 0x0a, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, - 0x20, 0x20, 0x3c, 0x6c, 0x61, 0x62, 0x65, 0x6c, 0x20, 0x66, 0x6f, 0x72, - 0x3d, 0x22, 0x24, 0x7b, 0x6e, 0x61, 0x6d, 0x65, 0x7d, 0x22, 0x3e, 0x24, - 0x7b, 0x6c, 0x61, 0x62, 0x65, 0x6c, 0x7d, 0x3c, 0x2f, 0x6c, 0x61, 0x62, - 0x65, 0x6c, 0x3e, 0x0a, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, - 0x20, 0x20, 0x20, 0x20, 0x3c, 0x69, 0x6e, 0x70, 0x75, 0x74, 0x20, 0x74, - 0x79, 0x70, 0x65, 0x3d, 0x22, 0x72, 0x61, 0x6e, 0x67, 0x65, 0x22, 0x20, - 0x69, 0x64, 0x3d, 0x22, 0x24, 0x7b, 0x6e, 0x61, 0x6d, 0x65, 0x7d, 0x22, - 0x20, 0x6d, 0x69, 0x6e, 0x3d, 0x22, 0x24, 0x7b, 0x6d, 0x69, 0x6e, 0x7d, - 0x22, 0x20, 0x6d, 0x61, 0x78, 0x3d, 0x22, 0x24, 0x7b, 0x6d, 0x61, 0x78, - 0x7d, 0x22, 0x20, 0x6e, 0x61, 0x6d, 0x65, 0x3d, 0x22, 0x24, 0x7b, 0x6e, - 0x61, 0x6d, 0x65, 0x7d, 0x22, 0x20, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x3d, - 0x22, 0x24, 0x7b, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x7d, 0x22, 0x20, 0x6f, - 0x6e, 0x69, 0x6e, 0x70, 0x75, 0x74, 0x3d, 0x24, 0x7b, 0x75, 0x70, 0x64, - 0x61, 0x74, 0x65, 0x50, 0x61, 0x72, 0x61, 0x6d, 0x73, 0x49, 0x6e, 0x74, + 0x69, 0x6e, 0x70, 0x75, 0x74, 0x20, 0x74, 0x79, 0x70, 0x65, 0x3d, 0x22, + 0x72, 0x61, 0x6e, 0x67, 0x65, 0x22, 0x20, 0x69, 0x64, 0x3d, 0x22, 0x24, + 0x7b, 0x6e, 0x61, 0x6d, 0x65, 0x7d, 0x22, 0x20, 0x6d, 0x69, 0x6e, 0x3d, + 0x22, 0x24, 0x7b, 0x6d, 0x69, 0x6e, 0x7d, 0x22, 0x20, 0x6d, 0x61, 0x78, + 0x3d, 0x22, 0x24, 0x7b, 0x6d, 0x61, 0x78, 0x7d, 0x22, 0x20, 0x73, 0x74, + 0x65, 0x70, 0x3d, 0x22, 0x24, 0x7b, 0x73, 0x74, 0x65, 0x70, 0x7d, 0x22, + 0x20, 0x6e, 0x61, 0x6d, 0x65, 0x3d, 0x22, 0x24, 0x7b, 0x6e, 0x61, 0x6d, + 0x65, 0x7d, 0x22, 0x20, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x3d, 0x22, 0x24, + 0x7b, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x7d, 0x22, 0x20, 0x6f, 0x6e, 0x69, + 0x6e, 0x70, 0x75, 0x74, 0x3d, 0x24, 0x7b, 0x75, 0x70, 0x64, 0x61, 0x74, + 0x65, 0x50, 0x61, 0x72, 0x61, 0x6d, 0x73, 0x46, 0x6c, 0x6f, 0x61, 0x74, 0x7d, 0x20, 0x2f, 0x3e, 0x0a, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x3c, 0x73, 0x70, 0x61, 0x6e, 0x3e, 0x24, 0x7b, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x7d, 0x3c, 0x2f, 0x73, 0x70, 0x61, @@ -1586,1108 +1626,1137 @@ unsigned char index_html[] = { 0x20, 0x3c, 0x2f, 0x64, 0x69, 0x76, 0x3e, 0x0a, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x60, 0x0a, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x7d, 0x3b, 0x0a, 0x0a, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x63, 0x6f, - 0x6e, 0x73, 0x74, 0x20, 0x75, 0x73, 0x65, 0x72, 0x54, 0x65, 0x6d, 0x70, - 0x6c, 0x61, 0x74, 0x65, 0x52, 0x65, 0x73, 0x65, 0x74, 0x20, 0x3d, 0x20, - 0x28, 0x65, 0x29, 0x20, 0x3d, 0x3e, 0x20, 0x7b, 0x0a, 0x20, 0x20, 0x20, - 0x20, 0x20, 0x20, 0x20, 0x20, 0x65, 0x2e, 0x70, 0x72, 0x65, 0x76, 0x65, - 0x6e, 0x74, 0x44, 0x65, 0x66, 0x61, 0x75, 0x6c, 0x74, 0x28, 0x29, 0x3b, - 0x0a, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x75, 0x73, 0x65, - 0x72, 0x54, 0x65, 0x6d, 0x70, 0x6c, 0x61, 0x74, 0x65, 0x52, 0x65, 0x73, - 0x65, 0x74, 0x54, 0x6f, 0x44, 0x65, 0x66, 0x61, 0x75, 0x6c, 0x74, 0x41, - 0x6e, 0x64, 0x41, 0x70, 0x70, 0x6c, 0x79, 0x28, 0x29, 0x0a, 0x20, 0x20, - 0x20, 0x20, 0x20, 0x20, 0x7d, 0x0a, 0x0a, 0x20, 0x20, 0x20, 0x20, 0x20, - 0x20, 0x63, 0x6f, 0x6e, 0x73, 0x74, 0x20, 0x55, 0x73, 0x65, 0x72, 0x54, - 0x65, 0x6d, 0x70, 0x6c, 0x61, 0x74, 0x65, 0x52, 0x65, 0x73, 0x65, 0x74, - 0x42, 0x75, 0x74, 0x74, 0x6f, 0x6e, 0x20, 0x3d, 0x20, 0x28, 0x29, 0x20, - 0x3d, 0x3e, 0x20, 0x7b, 0x0a, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, - 0x20, 0x69, 0x66, 0x20, 0x28, 0x73, 0x65, 0x6c, 0x65, 0x63, 0x74, 0x65, - 0x64, 0x55, 0x73, 0x65, 0x72, 0x54, 0x65, 0x6d, 0x70, 0x6c, 0x61, 0x74, - 0x65, 0x2e, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x2e, 0x6e, 0x61, 0x6d, 0x65, - 0x20, 0x3d, 0x3d, 0x20, 0x27, 0x64, 0x65, 0x66, 0x61, 0x75, 0x6c, 0x74, - 0x27, 0x29, 0x20, 0x7b, 0x0a, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, + 0x6e, 0x73, 0x74, 0x20, 0x49, 0x6e, 0x74, 0x46, 0x69, 0x65, 0x6c, 0x64, + 0x20, 0x3d, 0x20, 0x28, 0x7b, 0x20, 0x6c, 0x61, 0x62, 0x65, 0x6c, 0x2c, + 0x20, 0x6d, 0x61, 0x78, 0x2c, 0x20, 0x6d, 0x69, 0x6e, 0x2c, 0x20, 0x6e, + 0x61, 0x6d, 0x65, 0x2c, 0x20, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x20, 0x7d, + 0x29, 0x20, 0x3d, 0x3e, 0x20, 0x7b, 0x0a, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x72, 0x65, 0x74, 0x75, 0x72, 0x6e, 0x20, 0x68, 0x74, 0x6d, 0x6c, 0x60, 0x0a, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, - 0x20, 0x20, 0x20, 0x20, 0x3c, 0x62, 0x75, 0x74, 0x74, 0x6f, 0x6e, 0x20, - 0x64, 0x69, 0x73, 0x61, 0x62, 0x6c, 0x65, 0x64, 0x3e, 0x55, 0x73, 0x69, - 0x6e, 0x67, 0x20, 0x64, 0x65, 0x66, 0x61, 0x75, 0x6c, 0x74, 0x20, 0x74, - 0x65, 0x6d, 0x70, 0x6c, 0x61, 0x74, 0x65, 0x3c, 0x2f, 0x62, 0x75, 0x74, - 0x74, 0x6f, 0x6e, 0x3e, 0x0a, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, - 0x20, 0x20, 0x20, 0x60, 0x0a, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, - 0x20, 0x7d, 0x0a, 0x0a, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, - 0x72, 0x65, 0x74, 0x75, 0x72, 0x6e, 0x20, 0x68, 0x74, 0x6d, 0x6c, 0x60, + 0x20, 0x20, 0x3c, 0x64, 0x69, 0x76, 0x3e, 0x0a, 0x20, 0x20, 0x20, 0x20, + 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x3c, 0x6c, 0x61, 0x62, + 0x65, 0x6c, 0x20, 0x66, 0x6f, 0x72, 0x3d, 0x22, 0x24, 0x7b, 0x6e, 0x61, + 0x6d, 0x65, 0x7d, 0x22, 0x3e, 0x24, 0x7b, 0x6c, 0x61, 0x62, 0x65, 0x6c, + 0x7d, 0x3c, 0x2f, 0x6c, 0x61, 0x62, 0x65, 0x6c, 0x3e, 0x0a, 0x20, 0x20, + 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x3c, 0x69, + 0x6e, 0x70, 0x75, 0x74, 0x20, 0x74, 0x79, 0x70, 0x65, 0x3d, 0x22, 0x72, + 0x61, 0x6e, 0x67, 0x65, 0x22, 0x20, 0x69, 0x64, 0x3d, 0x22, 0x24, 0x7b, + 0x6e, 0x61, 0x6d, 0x65, 0x7d, 0x22, 0x20, 0x6d, 0x69, 0x6e, 0x3d, 0x22, + 0x24, 0x7b, 0x6d, 0x69, 0x6e, 0x7d, 0x22, 0x20, 0x6d, 0x61, 0x78, 0x3d, + 0x22, 0x24, 0x7b, 0x6d, 0x61, 0x78, 0x7d, 0x22, 0x20, 0x6e, 0x61, 0x6d, + 0x65, 0x3d, 0x22, 0x24, 0x7b, 0x6e, 0x61, 0x6d, 0x65, 0x7d, 0x22, 0x20, + 0x76, 0x61, 0x6c, 0x75, 0x65, 0x3d, 0x22, 0x24, 0x7b, 0x76, 0x61, 0x6c, + 0x75, 0x65, 0x7d, 0x22, 0x20, 0x6f, 0x6e, 0x69, 0x6e, 0x70, 0x75, 0x74, + 0x3d, 0x24, 0x7b, 0x75, 0x70, 0x64, 0x61, 0x74, 0x65, 0x50, 0x61, 0x72, + 0x61, 0x6d, 0x73, 0x49, 0x6e, 0x74, 0x7d, 0x20, 0x2f, 0x3e, 0x0a, 0x20, + 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x3c, + 0x73, 0x70, 0x61, 0x6e, 0x3e, 0x24, 0x7b, 0x76, 0x61, 0x6c, 0x75, 0x65, + 0x7d, 0x3c, 0x2f, 0x73, 0x70, 0x61, 0x6e, 0x3e, 0x0a, 0x20, 0x20, 0x20, + 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x3c, 0x2f, 0x64, 0x69, 0x76, + 0x3e, 0x0a, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x60, 0x0a, + 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x7d, 0x3b, 0x0a, 0x0a, 0x20, 0x20, + 0x20, 0x20, 0x20, 0x20, 0x63, 0x6f, 0x6e, 0x73, 0x74, 0x20, 0x75, 0x73, + 0x65, 0x72, 0x54, 0x65, 0x6d, 0x70, 0x6c, 0x61, 0x74, 0x65, 0x52, 0x65, + 0x73, 0x65, 0x74, 0x20, 0x3d, 0x20, 0x28, 0x65, 0x29, 0x20, 0x3d, 0x3e, + 0x20, 0x7b, 0x0a, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x65, + 0x2e, 0x70, 0x72, 0x65, 0x76, 0x65, 0x6e, 0x74, 0x44, 0x65, 0x66, 0x61, + 0x75, 0x6c, 0x74, 0x28, 0x29, 0x3b, 0x0a, 0x20, 0x20, 0x20, 0x20, 0x20, + 0x20, 0x20, 0x20, 0x75, 0x73, 0x65, 0x72, 0x54, 0x65, 0x6d, 0x70, 0x6c, + 0x61, 0x74, 0x65, 0x52, 0x65, 0x73, 0x65, 0x74, 0x54, 0x6f, 0x44, 0x65, + 0x66, 0x61, 0x75, 0x6c, 0x74, 0x41, 0x6e, 0x64, 0x41, 0x70, 0x70, 0x6c, + 0x79, 0x28, 0x29, 0x0a, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x7d, 0x0a, + 0x0a, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x63, 0x6f, 0x6e, 0x73, 0x74, + 0x20, 0x55, 0x73, 0x65, 0x72, 0x54, 0x65, 0x6d, 0x70, 0x6c, 0x61, 0x74, + 0x65, 0x52, 0x65, 0x73, 0x65, 0x74, 0x42, 0x75, 0x74, 0x74, 0x6f, 0x6e, + 0x20, 0x3d, 0x20, 0x28, 0x29, 0x20, 0x3d, 0x3e, 0x20, 0x7b, 0x0a, 0x20, + 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x69, 0x66, 0x20, 0x28, 0x73, + 0x65, 0x6c, 0x65, 0x63, 0x74, 0x65, 0x64, 0x55, 0x73, 0x65, 0x72, 0x54, + 0x65, 0x6d, 0x70, 0x6c, 0x61, 0x74, 0x65, 0x2e, 0x76, 0x61, 0x6c, 0x75, + 0x65, 0x2e, 0x6e, 0x61, 0x6d, 0x65, 0x20, 0x3d, 0x3d, 0x20, 0x27, 0x64, + 0x65, 0x66, 0x61, 0x75, 0x6c, 0x74, 0x27, 0x29, 0x20, 0x7b, 0x0a, 0x20, + 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x72, 0x65, 0x74, + 0x75, 0x72, 0x6e, 0x20, 0x68, 0x74, 0x6d, 0x6c, 0x60, 0x0a, 0x20, 0x20, + 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x3c, 0x62, + 0x75, 0x74, 0x74, 0x6f, 0x6e, 0x20, 0x64, 0x69, 0x73, 0x61, 0x62, 0x6c, + 0x65, 0x64, 0x3e, 0x55, 0x73, 0x69, 0x6e, 0x67, 0x20, 0x64, 0x65, 0x66, + 0x61, 0x75, 0x6c, 0x74, 0x20, 0x74, 0x65, 0x6d, 0x70, 0x6c, 0x61, 0x74, + 0x65, 0x3c, 0x2f, 0x62, 0x75, 0x74, 0x74, 0x6f, 0x6e, 0x3e, 0x0a, 0x20, + 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x60, 0x0a, 0x20, + 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x7d, 0x0a, 0x0a, 0x20, 0x20, + 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x72, 0x65, 0x74, 0x75, 0x72, 0x6e, + 0x20, 0x68, 0x74, 0x6d, 0x6c, 0x60, 0x0a, 0x20, 0x20, 0x20, 0x20, 0x20, + 0x20, 0x20, 0x20, 0x20, 0x20, 0x3c, 0x62, 0x75, 0x74, 0x74, 0x6f, 0x6e, + 0x20, 0x6f, 0x6e, 0x63, 0x6c, 0x69, 0x63, 0x6b, 0x3d, 0x24, 0x7b, 0x75, + 0x73, 0x65, 0x72, 0x54, 0x65, 0x6d, 0x70, 0x6c, 0x61, 0x74, 0x65, 0x52, + 0x65, 0x73, 0x65, 0x74, 0x7d, 0x3e, 0x52, 0x65, 0x73, 0x65, 0x74, 0x20, + 0x61, 0x6c, 0x6c, 0x20, 0x74, 0x6f, 0x20, 0x64, 0x65, 0x66, 0x61, 0x75, + 0x6c, 0x74, 0x3c, 0x2f, 0x62, 0x75, 0x74, 0x74, 0x6f, 0x6e, 0x3e, 0x0a, + 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x60, 0x0a, 0x20, 0x20, + 0x20, 0x20, 0x20, 0x20, 0x7d, 0x3b, 0x0a, 0x0a, 0x20, 0x20, 0x20, 0x20, + 0x20, 0x20, 0x75, 0x73, 0x65, 0x45, 0x66, 0x66, 0x65, 0x63, 0x74, 0x28, + 0x28, 0x29, 0x20, 0x3d, 0x3e, 0x20, 0x7b, 0x0a, 0x20, 0x20, 0x20, 0x20, + 0x20, 0x20, 0x20, 0x20, 0x2f, 0x2f, 0x20, 0x61, 0x75, 0x74, 0x6f, 0x73, + 0x61, 0x76, 0x65, 0x20, 0x74, 0x65, 0x6d, 0x70, 0x6c, 0x61, 0x74, 0x65, + 0x20, 0x6f, 0x6e, 0x20, 0x65, 0x76, 0x65, 0x72, 0x79, 0x20, 0x63, 0x68, + 0x61, 0x6e, 0x67, 0x65, 0x0a, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, + 0x20, 0x75, 0x73, 0x65, 0x72, 0x54, 0x65, 0x6d, 0x70, 0x6c, 0x61, 0x74, + 0x65, 0x41, 0x75, 0x74, 0x6f, 0x73, 0x61, 0x76, 0x65, 0x28, 0x29, 0x0a, + 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x7d, 0x2c, 0x20, 0x5b, 0x73, 0x65, + 0x73, 0x73, 0x69, 0x6f, 0x6e, 0x2e, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x2c, + 0x20, 0x70, 0x61, 0x72, 0x61, 0x6d, 0x73, 0x2e, 0x76, 0x61, 0x6c, 0x75, + 0x65, 0x5d, 0x29, 0x0a, 0x0a, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x63, + 0x6f, 0x6e, 0x73, 0x74, 0x20, 0x47, 0x72, 0x61, 0x6d, 0x6d, 0x61, 0x72, + 0x43, 0x6f, 0x6e, 0x74, 0x72, 0x6f, 0x6c, 0x20, 0x3d, 0x20, 0x28, 0x29, + 0x20, 0x3d, 0x3e, 0x20, 0x28, 0x0a, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, + 0x20, 0x20, 0x68, 0x74, 0x6d, 0x6c, 0x60, 0x0a, 0x20, 0x20, 0x20, 0x20, + 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x3c, 0x64, 0x69, 0x76, 0x3e, 0x0a, + 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, + 0x3c, 0x6c, 0x61, 0x62, 0x65, 0x6c, 0x20, 0x66, 0x6f, 0x72, 0x3d, 0x22, + 0x74, 0x65, 0x6d, 0x70, 0x6c, 0x61, 0x74, 0x65, 0x22, 0x3e, 0x47, 0x72, + 0x61, 0x6d, 0x6d, 0x61, 0x72, 0x3c, 0x2f, 0x6c, 0x61, 0x62, 0x65, 0x6c, + 0x3e, 0x0a, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, + 0x20, 0x20, 0x3c, 0x74, 0x65, 0x78, 0x74, 0x61, 0x72, 0x65, 0x61, 0x20, + 0x69, 0x64, 0x3d, 0x22, 0x67, 0x72, 0x61, 0x6d, 0x6d, 0x61, 0x72, 0x22, + 0x20, 0x6e, 0x61, 0x6d, 0x65, 0x3d, 0x22, 0x67, 0x72, 0x61, 0x6d, 0x6d, + 0x61, 0x72, 0x22, 0x20, 0x70, 0x6c, 0x61, 0x63, 0x65, 0x68, 0x6f, 0x6c, + 0x64, 0x65, 0x72, 0x3d, 0x22, 0x55, 0x73, 0x65, 0x20, 0x67, 0x62, 0x6e, + 0x66, 0x20, 0x6f, 0x72, 0x20, 0x4a, 0x53, 0x4f, 0x4e, 0x20, 0x53, 0x63, + 0x68, 0x65, 0x6d, 0x61, 0x2b, 0x63, 0x6f, 0x6e, 0x76, 0x65, 0x72, 0x74, + 0x22, 0x20, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x3d, 0x22, 0x24, 0x7b, 0x70, + 0x61, 0x72, 0x61, 0x6d, 0x73, 0x2e, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x2e, + 0x67, 0x72, 0x61, 0x6d, 0x6d, 0x61, 0x72, 0x7d, 0x22, 0x20, 0x72, 0x6f, + 0x77, 0x73, 0x3d, 0x34, 0x20, 0x6f, 0x6e, 0x69, 0x6e, 0x70, 0x75, 0x74, + 0x3d, 0x24, 0x7b, 0x75, 0x70, 0x64, 0x61, 0x74, 0x65, 0x50, 0x61, 0x72, + 0x61, 0x6d, 0x73, 0x7d, 0x2f, 0x3e, 0x0a, 0x20, 0x20, 0x20, 0x20, 0x20, + 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x3c, 0x69, 0x6e, 0x70, 0x75, + 0x74, 0x20, 0x74, 0x79, 0x70, 0x65, 0x3d, 0x22, 0x74, 0x65, 0x78, 0x74, + 0x22, 0x20, 0x6e, 0x61, 0x6d, 0x65, 0x3d, 0x22, 0x70, 0x72, 0x6f, 0x70, + 0x2d, 0x6f, 0x72, 0x64, 0x65, 0x72, 0x22, 0x20, 0x70, 0x6c, 0x61, 0x63, + 0x65, 0x68, 0x6f, 0x6c, 0x64, 0x65, 0x72, 0x3d, 0x22, 0x6f, 0x72, 0x64, + 0x65, 0x72, 0x3a, 0x20, 0x70, 0x72, 0x6f, 0x70, 0x31, 0x2c, 0x70, 0x72, + 0x6f, 0x70, 0x32, 0x2c, 0x70, 0x72, 0x6f, 0x70, 0x33, 0x22, 0x20, 0x6f, + 0x6e, 0x69, 0x6e, 0x70, 0x75, 0x74, 0x3d, 0x24, 0x7b, 0x75, 0x70, 0x64, + 0x61, 0x74, 0x65, 0x47, 0x72, 0x61, 0x6d, 0x6d, 0x61, 0x72, 0x4a, 0x73, + 0x6f, 0x6e, 0x53, 0x63, 0x68, 0x65, 0x6d, 0x61, 0x50, 0x72, 0x6f, 0x70, + 0x4f, 0x72, 0x64, 0x65, 0x72, 0x7d, 0x20, 0x2f, 0x3e, 0x0a, 0x20, 0x20, + 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x3c, 0x62, + 0x75, 0x74, 0x74, 0x6f, 0x6e, 0x20, 0x74, 0x79, 0x70, 0x65, 0x3d, 0x22, + 0x62, 0x75, 0x74, 0x74, 0x6f, 0x6e, 0x22, 0x20, 0x6f, 0x6e, 0x63, 0x6c, + 0x69, 0x63, 0x6b, 0x3d, 0x24, 0x7b, 0x63, 0x6f, 0x6e, 0x76, 0x65, 0x72, + 0x74, 0x4a, 0x53, 0x4f, 0x4e, 0x53, 0x63, 0x68, 0x65, 0x6d, 0x61, 0x47, + 0x72, 0x61, 0x6d, 0x6d, 0x61, 0x72, 0x7d, 0x3e, 0x43, 0x6f, 0x6e, 0x76, + 0x65, 0x72, 0x74, 0x20, 0x4a, 0x53, 0x4f, 0x4e, 0x20, 0x53, 0x63, 0x68, + 0x65, 0x6d, 0x61, 0x3c, 0x2f, 0x62, 0x75, 0x74, 0x74, 0x6f, 0x6e, 0x3e, 0x0a, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x3c, - 0x62, 0x75, 0x74, 0x74, 0x6f, 0x6e, 0x20, 0x6f, 0x6e, 0x63, 0x6c, 0x69, - 0x63, 0x6b, 0x3d, 0x24, 0x7b, 0x75, 0x73, 0x65, 0x72, 0x54, 0x65, 0x6d, - 0x70, 0x6c, 0x61, 0x74, 0x65, 0x52, 0x65, 0x73, 0x65, 0x74, 0x7d, 0x3e, - 0x52, 0x65, 0x73, 0x65, 0x74, 0x20, 0x61, 0x6c, 0x6c, 0x20, 0x74, 0x6f, - 0x20, 0x64, 0x65, 0x66, 0x61, 0x75, 0x6c, 0x74, 0x3c, 0x2f, 0x62, 0x75, - 0x74, 0x74, 0x6f, 0x6e, 0x3e, 0x0a, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, - 0x20, 0x20, 0x60, 0x0a, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x7d, 0x3b, - 0x0a, 0x0a, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x75, 0x73, 0x65, 0x45, - 0x66, 0x66, 0x65, 0x63, 0x74, 0x28, 0x28, 0x29, 0x20, 0x3d, 0x3e, 0x20, - 0x7b, 0x0a, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x2f, 0x2f, - 0x20, 0x61, 0x75, 0x74, 0x6f, 0x73, 0x61, 0x76, 0x65, 0x20, 0x74, 0x65, - 0x6d, 0x70, 0x6c, 0x61, 0x74, 0x65, 0x20, 0x6f, 0x6e, 0x20, 0x65, 0x76, - 0x65, 0x72, 0x79, 0x20, 0x63, 0x68, 0x61, 0x6e, 0x67, 0x65, 0x0a, 0x20, - 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x75, 0x73, 0x65, 0x72, 0x54, - 0x65, 0x6d, 0x70, 0x6c, 0x61, 0x74, 0x65, 0x41, 0x75, 0x74, 0x6f, 0x73, - 0x61, 0x76, 0x65, 0x28, 0x29, 0x0a, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, - 0x7d, 0x2c, 0x20, 0x5b, 0x73, 0x65, 0x73, 0x73, 0x69, 0x6f, 0x6e, 0x2e, - 0x76, 0x61, 0x6c, 0x75, 0x65, 0x2c, 0x20, 0x70, 0x61, 0x72, 0x61, 0x6d, - 0x73, 0x2e, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x5d, 0x29, 0x0a, 0x0a, 0x20, - 0x20, 0x20, 0x20, 0x20, 0x20, 0x63, 0x6f, 0x6e, 0x73, 0x74, 0x20, 0x47, - 0x72, 0x61, 0x6d, 0x6d, 0x61, 0x72, 0x43, 0x6f, 0x6e, 0x74, 0x72, 0x6f, - 0x6c, 0x20, 0x3d, 0x20, 0x28, 0x29, 0x20, 0x3d, 0x3e, 0x20, 0x28, 0x0a, + 0x2f, 0x64, 0x69, 0x76, 0x3e, 0x0a, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, + 0x20, 0x20, 0x20, 0x20, 0x60, 0x0a, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, + 0x29, 0x3b, 0x0a, 0x0a, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x63, 0x6f, + 0x6e, 0x73, 0x74, 0x20, 0x50, 0x72, 0x6f, 0x6d, 0x70, 0x74, 0x43, 0x6f, + 0x6e, 0x74, 0x72, 0x6f, 0x6c, 0x46, 0x69, 0x65, 0x6c, 0x64, 0x53, 0x65, + 0x74, 0x20, 0x3d, 0x20, 0x28, 0x29, 0x20, 0x3d, 0x3e, 0x20, 0x28, 0x0a, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x68, 0x74, 0x6d, 0x6c, - 0x60, 0x0a, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, - 0x3c, 0x64, 0x69, 0x76, 0x3e, 0x0a, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, - 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x3c, 0x6c, 0x61, 0x62, 0x65, 0x6c, - 0x20, 0x66, 0x6f, 0x72, 0x3d, 0x22, 0x74, 0x65, 0x6d, 0x70, 0x6c, 0x61, - 0x74, 0x65, 0x22, 0x3e, 0x47, 0x72, 0x61, 0x6d, 0x6d, 0x61, 0x72, 0x3c, - 0x2f, 0x6c, 0x61, 0x62, 0x65, 0x6c, 0x3e, 0x0a, 0x20, 0x20, 0x20, 0x20, - 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x3c, 0x74, 0x65, 0x78, - 0x74, 0x61, 0x72, 0x65, 0x61, 0x20, 0x69, 0x64, 0x3d, 0x22, 0x67, 0x72, - 0x61, 0x6d, 0x6d, 0x61, 0x72, 0x22, 0x20, 0x6e, 0x61, 0x6d, 0x65, 0x3d, - 0x22, 0x67, 0x72, 0x61, 0x6d, 0x6d, 0x61, 0x72, 0x22, 0x20, 0x70, 0x6c, - 0x61, 0x63, 0x65, 0x68, 0x6f, 0x6c, 0x64, 0x65, 0x72, 0x3d, 0x22, 0x55, - 0x73, 0x65, 0x20, 0x67, 0x62, 0x6e, 0x66, 0x20, 0x6f, 0x72, 0x20, 0x4a, - 0x53, 0x4f, 0x4e, 0x20, 0x53, 0x63, 0x68, 0x65, 0x6d, 0x61, 0x2b, 0x63, - 0x6f, 0x6e, 0x76, 0x65, 0x72, 0x74, 0x22, 0x20, 0x76, 0x61, 0x6c, 0x75, - 0x65, 0x3d, 0x22, 0x24, 0x7b, 0x70, 0x61, 0x72, 0x61, 0x6d, 0x73, 0x2e, - 0x76, 0x61, 0x6c, 0x75, 0x65, 0x2e, 0x67, 0x72, 0x61, 0x6d, 0x6d, 0x61, - 0x72, 0x7d, 0x22, 0x20, 0x72, 0x6f, 0x77, 0x73, 0x3d, 0x34, 0x20, 0x6f, - 0x6e, 0x69, 0x6e, 0x70, 0x75, 0x74, 0x3d, 0x24, 0x7b, 0x75, 0x70, 0x64, - 0x61, 0x74, 0x65, 0x50, 0x61, 0x72, 0x61, 0x6d, 0x73, 0x7d, 0x2f, 0x3e, + 0x60, 0x0a, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x3c, 0x66, + 0x69, 0x65, 0x6c, 0x64, 0x73, 0x65, 0x74, 0x3e, 0x0a, 0x20, 0x20, 0x20, + 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x3c, 0x64, 0x69, 0x76, 0x3e, 0x0a, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, - 0x20, 0x3c, 0x69, 0x6e, 0x70, 0x75, 0x74, 0x20, 0x74, 0x79, 0x70, 0x65, - 0x3d, 0x22, 0x74, 0x65, 0x78, 0x74, 0x22, 0x20, 0x6e, 0x61, 0x6d, 0x65, - 0x3d, 0x22, 0x70, 0x72, 0x6f, 0x70, 0x2d, 0x6f, 0x72, 0x64, 0x65, 0x72, - 0x22, 0x20, 0x70, 0x6c, 0x61, 0x63, 0x65, 0x68, 0x6f, 0x6c, 0x64, 0x65, - 0x72, 0x3d, 0x22, 0x6f, 0x72, 0x64, 0x65, 0x72, 0x3a, 0x20, 0x70, 0x72, - 0x6f, 0x70, 0x31, 0x2c, 0x70, 0x72, 0x6f, 0x70, 0x32, 0x2c, 0x70, 0x72, - 0x6f, 0x70, 0x33, 0x22, 0x20, 0x6f, 0x6e, 0x69, 0x6e, 0x70, 0x75, 0x74, - 0x3d, 0x24, 0x7b, 0x75, 0x70, 0x64, 0x61, 0x74, 0x65, 0x47, 0x72, 0x61, - 0x6d, 0x6d, 0x61, 0x72, 0x4a, 0x73, 0x6f, 0x6e, 0x53, 0x63, 0x68, 0x65, - 0x6d, 0x61, 0x50, 0x72, 0x6f, 0x70, 0x4f, 0x72, 0x64, 0x65, 0x72, 0x7d, - 0x20, 0x2f, 0x3e, 0x0a, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, - 0x20, 0x20, 0x20, 0x20, 0x3c, 0x62, 0x75, 0x74, 0x74, 0x6f, 0x6e, 0x20, - 0x74, 0x79, 0x70, 0x65, 0x3d, 0x22, 0x62, 0x75, 0x74, 0x74, 0x6f, 0x6e, - 0x22, 0x20, 0x6f, 0x6e, 0x63, 0x6c, 0x69, 0x63, 0x6b, 0x3d, 0x24, 0x7b, - 0x63, 0x6f, 0x6e, 0x76, 0x65, 0x72, 0x74, 0x4a, 0x53, 0x4f, 0x4e, 0x53, - 0x63, 0x68, 0x65, 0x6d, 0x61, 0x47, 0x72, 0x61, 0x6d, 0x6d, 0x61, 0x72, - 0x7d, 0x3e, 0x43, 0x6f, 0x6e, 0x76, 0x65, 0x72, 0x74, 0x20, 0x4a, 0x53, - 0x4f, 0x4e, 0x20, 0x53, 0x63, 0x68, 0x65, 0x6d, 0x61, 0x3c, 0x2f, 0x62, - 0x75, 0x74, 0x74, 0x6f, 0x6e, 0x3e, 0x0a, 0x20, 0x20, 0x20, 0x20, 0x20, + 0x20, 0x3c, 0x6c, 0x61, 0x62, 0x65, 0x6c, 0x20, 0x68, 0x74, 0x6d, 0x6c, + 0x46, 0x6f, 0x72, 0x3d, 0x22, 0x70, 0x72, 0x6f, 0x6d, 0x70, 0x74, 0x22, + 0x3e, 0x50, 0x72, 0x6f, 0x6d, 0x70, 0x74, 0x3c, 0x2f, 0x6c, 0x61, 0x62, + 0x65, 0x6c, 0x3e, 0x0a, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, + 0x20, 0x20, 0x20, 0x20, 0x3c, 0x74, 0x65, 0x78, 0x74, 0x61, 0x72, 0x65, + 0x61, 0x20, 0x74, 0x79, 0x70, 0x65, 0x3d, 0x22, 0x74, 0x65, 0x78, 0x74, + 0x22, 0x20, 0x6e, 0x61, 0x6d, 0x65, 0x3d, 0x22, 0x70, 0x72, 0x6f, 0x6d, + 0x70, 0x74, 0x22, 0x20, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x3d, 0x22, 0x24, + 0x7b, 0x73, 0x65, 0x73, 0x73, 0x69, 0x6f, 0x6e, 0x2e, 0x76, 0x61, 0x6c, + 0x75, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x6d, 0x70, 0x74, 0x7d, 0x22, 0x20, + 0x6f, 0x6e, 0x69, 0x6e, 0x70, 0x75, 0x74, 0x3d, 0x24, 0x7b, 0x75, 0x70, + 0x64, 0x61, 0x74, 0x65, 0x53, 0x65, 0x73, 0x73, 0x69, 0x6f, 0x6e, 0x7d, + 0x2f, 0x3e, 0x0a, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, + 0x20, 0x3c, 0x2f, 0x64, 0x69, 0x76, 0x3e, 0x0a, 0x20, 0x20, 0x20, 0x20, + 0x20, 0x20, 0x20, 0x20, 0x3c, 0x2f, 0x66, 0x69, 0x65, 0x6c, 0x64, 0x73, + 0x65, 0x74, 0x3e, 0x0a, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, + 0x60, 0x0a, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x29, 0x3b, 0x0a, 0x0a, + 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x63, 0x6f, 0x6e, 0x73, 0x74, 0x20, + 0x43, 0x68, 0x61, 0x74, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x46, 0x6f, + 0x72, 0x6d, 0x20, 0x3d, 0x20, 0x28, 0x29, 0x20, 0x3d, 0x3e, 0x20, 0x28, + 0x0a, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x68, 0x74, 0x6d, + 0x6c, 0x60, 0x0a, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, + 0x20, 0x24, 0x7b, 0x50, 0x72, 0x6f, 0x6d, 0x70, 0x74, 0x43, 0x6f, 0x6e, + 0x74, 0x72, 0x6f, 0x6c, 0x46, 0x69, 0x65, 0x6c, 0x64, 0x53, 0x65, 0x74, + 0x28, 0x29, 0x7d, 0x0a, 0x0a, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, + 0x20, 0x20, 0x20, 0x3c, 0x66, 0x69, 0x65, 0x6c, 0x64, 0x73, 0x65, 0x74, + 0x20, 0x63, 0x6c, 0x61, 0x73, 0x73, 0x3d, 0x22, 0x74, 0x77, 0x6f, 0x22, + 0x3e, 0x0a, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, + 0x20, 0x20, 0x3c, 0x64, 0x69, 0x76, 0x3e, 0x0a, 0x20, 0x20, 0x20, 0x20, + 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x3c, 0x6c, + 0x61, 0x62, 0x65, 0x6c, 0x20, 0x66, 0x6f, 0x72, 0x3d, 0x22, 0x75, 0x73, + 0x65, 0x72, 0x22, 0x3e, 0x55, 0x73, 0x65, 0x72, 0x20, 0x6e, 0x61, 0x6d, + 0x65, 0x3c, 0x2f, 0x6c, 0x61, 0x62, 0x65, 0x6c, 0x3e, 0x0a, 0x20, 0x20, + 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, + 0x3c, 0x69, 0x6e, 0x70, 0x75, 0x74, 0x20, 0x74, 0x79, 0x70, 0x65, 0x3d, + 0x22, 0x74, 0x65, 0x78, 0x74, 0x22, 0x20, 0x6e, 0x61, 0x6d, 0x65, 0x3d, + 0x22, 0x75, 0x73, 0x65, 0x72, 0x22, 0x20, 0x76, 0x61, 0x6c, 0x75, 0x65, + 0x3d, 0x22, 0x24, 0x7b, 0x73, 0x65, 0x73, 0x73, 0x69, 0x6f, 0x6e, 0x2e, + 0x76, 0x61, 0x6c, 0x75, 0x65, 0x2e, 0x75, 0x73, 0x65, 0x72, 0x7d, 0x22, + 0x20, 0x6f, 0x6e, 0x69, 0x6e, 0x70, 0x75, 0x74, 0x3d, 0x24, 0x7b, 0x75, + 0x70, 0x64, 0x61, 0x74, 0x65, 0x53, 0x65, 0x73, 0x73, 0x69, 0x6f, 0x6e, + 0x7d, 0x20, 0x2f, 0x3e, 0x0a, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x3c, 0x2f, 0x64, 0x69, 0x76, 0x3e, 0x0a, - 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x60, 0x0a, - 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x29, 0x3b, 0x0a, 0x0a, 0x20, 0x20, - 0x20, 0x20, 0x20, 0x20, 0x63, 0x6f, 0x6e, 0x73, 0x74, 0x20, 0x50, 0x72, - 0x6f, 0x6d, 0x70, 0x74, 0x43, 0x6f, 0x6e, 0x74, 0x72, 0x6f, 0x6c, 0x46, - 0x69, 0x65, 0x6c, 0x64, 0x53, 0x65, 0x74, 0x20, 0x3d, 0x20, 0x28, 0x29, - 0x20, 0x3d, 0x3e, 0x20, 0x28, 0x0a, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, - 0x20, 0x20, 0x68, 0x74, 0x6d, 0x6c, 0x60, 0x0a, 0x20, 0x20, 0x20, 0x20, - 0x20, 0x20, 0x20, 0x20, 0x3c, 0x66, 0x69, 0x65, 0x6c, 0x64, 0x73, 0x65, - 0x74, 0x3e, 0x0a, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, + 0x0a, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x3c, 0x64, 0x69, 0x76, 0x3e, 0x0a, 0x20, 0x20, 0x20, 0x20, 0x20, - 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x3c, 0x6c, 0x61, 0x62, 0x65, - 0x6c, 0x20, 0x68, 0x74, 0x6d, 0x6c, 0x46, 0x6f, 0x72, 0x3d, 0x22, 0x70, - 0x72, 0x6f, 0x6d, 0x70, 0x74, 0x22, 0x3e, 0x50, 0x72, 0x6f, 0x6d, 0x70, - 0x74, 0x3c, 0x2f, 0x6c, 0x61, 0x62, 0x65, 0x6c, 0x3e, 0x0a, 0x20, 0x20, - 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x3c, 0x74, - 0x65, 0x78, 0x74, 0x61, 0x72, 0x65, 0x61, 0x20, 0x74, 0x79, 0x70, 0x65, - 0x3d, 0x22, 0x74, 0x65, 0x78, 0x74, 0x22, 0x20, 0x6e, 0x61, 0x6d, 0x65, - 0x3d, 0x22, 0x70, 0x72, 0x6f, 0x6d, 0x70, 0x74, 0x22, 0x20, 0x76, 0x61, - 0x6c, 0x75, 0x65, 0x3d, 0x22, 0x24, 0x7b, 0x73, 0x65, 0x73, 0x73, 0x69, - 0x6f, 0x6e, 0x2e, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x2e, 0x70, 0x72, 0x6f, - 0x6d, 0x70, 0x74, 0x7d, 0x22, 0x20, 0x6f, 0x6e, 0x69, 0x6e, 0x70, 0x75, - 0x74, 0x3d, 0x24, 0x7b, 0x75, 0x70, 0x64, 0x61, 0x74, 0x65, 0x53, 0x65, - 0x73, 0x73, 0x69, 0x6f, 0x6e, 0x7d, 0x2f, 0x3e, 0x0a, 0x20, 0x20, 0x20, - 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x3c, 0x2f, 0x64, 0x69, 0x76, - 0x3e, 0x0a, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x3c, 0x2f, - 0x66, 0x69, 0x65, 0x6c, 0x64, 0x73, 0x65, 0x74, 0x3e, 0x0a, 0x20, 0x20, - 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x60, 0x0a, 0x20, 0x20, 0x20, 0x20, - 0x20, 0x20, 0x29, 0x3b, 0x0a, 0x0a, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, - 0x63, 0x6f, 0x6e, 0x73, 0x74, 0x20, 0x43, 0x68, 0x61, 0x74, 0x43, 0x6f, - 0x6e, 0x66, 0x69, 0x67, 0x46, 0x6f, 0x72, 0x6d, 0x20, 0x3d, 0x20, 0x28, - 0x29, 0x20, 0x3d, 0x3e, 0x20, 0x28, 0x0a, 0x20, 0x20, 0x20, 0x20, 0x20, - 0x20, 0x20, 0x20, 0x68, 0x74, 0x6d, 0x6c, 0x60, 0x0a, 0x20, 0x20, 0x20, - 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x24, 0x7b, 0x50, 0x72, 0x6f, - 0x6d, 0x70, 0x74, 0x43, 0x6f, 0x6e, 0x74, 0x72, 0x6f, 0x6c, 0x46, 0x69, - 0x65, 0x6c, 0x64, 0x53, 0x65, 0x74, 0x28, 0x29, 0x7d, 0x0a, 0x0a, 0x20, - 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x3c, 0x66, 0x69, - 0x65, 0x6c, 0x64, 0x73, 0x65, 0x74, 0x20, 0x63, 0x6c, 0x61, 0x73, 0x73, - 0x3d, 0x22, 0x74, 0x77, 0x6f, 0x22, 0x3e, 0x0a, 0x20, 0x20, 0x20, 0x20, - 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x3c, 0x64, 0x69, 0x76, + 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x3c, 0x6c, 0x61, + 0x62, 0x65, 0x6c, 0x20, 0x66, 0x6f, 0x72, 0x3d, 0x22, 0x62, 0x6f, 0x74, + 0x22, 0x3e, 0x42, 0x6f, 0x74, 0x20, 0x6e, 0x61, 0x6d, 0x65, 0x3c, 0x2f, + 0x6c, 0x61, 0x62, 0x65, 0x6c, 0x3e, 0x0a, 0x20, 0x20, 0x20, 0x20, 0x20, + 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x3c, 0x69, 0x6e, + 0x70, 0x75, 0x74, 0x20, 0x74, 0x79, 0x70, 0x65, 0x3d, 0x22, 0x74, 0x65, + 0x78, 0x74, 0x22, 0x20, 0x6e, 0x61, 0x6d, 0x65, 0x3d, 0x22, 0x63, 0x68, + 0x61, 0x72, 0x22, 0x20, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x3d, 0x22, 0x24, + 0x7b, 0x73, 0x65, 0x73, 0x73, 0x69, 0x6f, 0x6e, 0x2e, 0x76, 0x61, 0x6c, + 0x75, 0x65, 0x2e, 0x63, 0x68, 0x61, 0x72, 0x7d, 0x22, 0x20, 0x6f, 0x6e, + 0x69, 0x6e, 0x70, 0x75, 0x74, 0x3d, 0x24, 0x7b, 0x75, 0x70, 0x64, 0x61, + 0x74, 0x65, 0x53, 0x65, 0x73, 0x73, 0x69, 0x6f, 0x6e, 0x7d, 0x20, 0x2f, 0x3e, 0x0a, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, - 0x20, 0x20, 0x20, 0x20, 0x3c, 0x6c, 0x61, 0x62, 0x65, 0x6c, 0x20, 0x66, - 0x6f, 0x72, 0x3d, 0x22, 0x75, 0x73, 0x65, 0x72, 0x22, 0x3e, 0x55, 0x73, - 0x65, 0x72, 0x20, 0x6e, 0x61, 0x6d, 0x65, 0x3c, 0x2f, 0x6c, 0x61, 0x62, - 0x65, 0x6c, 0x3e, 0x0a, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, - 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x3c, 0x69, 0x6e, 0x70, 0x75, 0x74, - 0x20, 0x74, 0x79, 0x70, 0x65, 0x3d, 0x22, 0x74, 0x65, 0x78, 0x74, 0x22, - 0x20, 0x6e, 0x61, 0x6d, 0x65, 0x3d, 0x22, 0x75, 0x73, 0x65, 0x72, 0x22, - 0x20, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x3d, 0x22, 0x24, 0x7b, 0x73, 0x65, - 0x73, 0x73, 0x69, 0x6f, 0x6e, 0x2e, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x2e, - 0x75, 0x73, 0x65, 0x72, 0x7d, 0x22, 0x20, 0x6f, 0x6e, 0x69, 0x6e, 0x70, - 0x75, 0x74, 0x3d, 0x24, 0x7b, 0x75, 0x70, 0x64, 0x61, 0x74, 0x65, 0x53, - 0x65, 0x73, 0x73, 0x69, 0x6f, 0x6e, 0x7d, 0x20, 0x2f, 0x3e, 0x0a, 0x20, - 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x3c, - 0x2f, 0x64, 0x69, 0x76, 0x3e, 0x0a, 0x0a, 0x20, 0x20, 0x20, 0x20, 0x20, - 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x3c, 0x64, 0x69, 0x76, 0x3e, - 0x0a, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, - 0x20, 0x20, 0x20, 0x3c, 0x6c, 0x61, 0x62, 0x65, 0x6c, 0x20, 0x66, 0x6f, - 0x72, 0x3d, 0x22, 0x62, 0x6f, 0x74, 0x22, 0x3e, 0x42, 0x6f, 0x74, 0x20, - 0x6e, 0x61, 0x6d, 0x65, 0x3c, 0x2f, 0x6c, 0x61, 0x62, 0x65, 0x6c, 0x3e, + 0x20, 0x20, 0x3c, 0x2f, 0x64, 0x69, 0x76, 0x3e, 0x0a, 0x20, 0x20, 0x20, + 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x3c, 0x2f, 0x66, 0x69, 0x65, + 0x6c, 0x64, 0x73, 0x65, 0x74, 0x3e, 0x0a, 0x0a, 0x20, 0x20, 0x20, 0x20, + 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x3c, 0x66, 0x69, 0x65, 0x6c, 0x64, + 0x73, 0x65, 0x74, 0x3e, 0x0a, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, + 0x20, 0x20, 0x20, 0x20, 0x20, 0x3c, 0x64, 0x69, 0x76, 0x3e, 0x0a, 0x20, + 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, + 0x20, 0x3c, 0x6c, 0x61, 0x62, 0x65, 0x6c, 0x20, 0x66, 0x6f, 0x72, 0x3d, + 0x22, 0x74, 0x65, 0x6d, 0x70, 0x6c, 0x61, 0x74, 0x65, 0x22, 0x3e, 0x50, + 0x72, 0x6f, 0x6d, 0x70, 0x74, 0x20, 0x74, 0x65, 0x6d, 0x70, 0x6c, 0x61, + 0x74, 0x65, 0x3c, 0x2f, 0x6c, 0x61, 0x62, 0x65, 0x6c, 0x3e, 0x0a, 0x20, + 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, + 0x20, 0x3c, 0x74, 0x65, 0x78, 0x74, 0x61, 0x72, 0x65, 0x61, 0x20, 0x69, + 0x64, 0x3d, 0x22, 0x74, 0x65, 0x6d, 0x70, 0x6c, 0x61, 0x74, 0x65, 0x22, + 0x20, 0x6e, 0x61, 0x6d, 0x65, 0x3d, 0x22, 0x74, 0x65, 0x6d, 0x70, 0x6c, + 0x61, 0x74, 0x65, 0x22, 0x20, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x3d, 0x22, + 0x24, 0x7b, 0x73, 0x65, 0x73, 0x73, 0x69, 0x6f, 0x6e, 0x2e, 0x76, 0x61, + 0x6c, 0x75, 0x65, 0x2e, 0x74, 0x65, 0x6d, 0x70, 0x6c, 0x61, 0x74, 0x65, + 0x7d, 0x22, 0x20, 0x72, 0x6f, 0x77, 0x73, 0x3d, 0x34, 0x20, 0x6f, 0x6e, + 0x69, 0x6e, 0x70, 0x75, 0x74, 0x3d, 0x24, 0x7b, 0x75, 0x70, 0x64, 0x61, + 0x74, 0x65, 0x53, 0x65, 0x73, 0x73, 0x69, 0x6f, 0x6e, 0x7d, 0x2f, 0x3e, 0x0a, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, - 0x20, 0x20, 0x20, 0x3c, 0x69, 0x6e, 0x70, 0x75, 0x74, 0x20, 0x74, 0x79, - 0x70, 0x65, 0x3d, 0x22, 0x74, 0x65, 0x78, 0x74, 0x22, 0x20, 0x6e, 0x61, - 0x6d, 0x65, 0x3d, 0x22, 0x63, 0x68, 0x61, 0x72, 0x22, 0x20, 0x76, 0x61, - 0x6c, 0x75, 0x65, 0x3d, 0x22, 0x24, 0x7b, 0x73, 0x65, 0x73, 0x73, 0x69, - 0x6f, 0x6e, 0x2e, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x2e, 0x63, 0x68, 0x61, - 0x72, 0x7d, 0x22, 0x20, 0x6f, 0x6e, 0x69, 0x6e, 0x70, 0x75, 0x74, 0x3d, - 0x24, 0x7b, 0x75, 0x70, 0x64, 0x61, 0x74, 0x65, 0x53, 0x65, 0x73, 0x73, - 0x69, 0x6f, 0x6e, 0x7d, 0x20, 0x2f, 0x3e, 0x0a, 0x20, 0x20, 0x20, 0x20, - 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x3c, 0x2f, 0x64, 0x69, + 0x20, 0x3c, 0x2f, 0x64, 0x69, 0x76, 0x3e, 0x0a, 0x0a, 0x20, 0x20, 0x20, + 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x3c, 0x64, 0x69, 0x76, 0x3e, 0x0a, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, - 0x20, 0x3c, 0x2f, 0x66, 0x69, 0x65, 0x6c, 0x64, 0x73, 0x65, 0x74, 0x3e, - 0x0a, 0x0a, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, - 0x3c, 0x66, 0x69, 0x65, 0x6c, 0x64, 0x73, 0x65, 0x74, 0x3e, 0x0a, 0x20, + 0x20, 0x20, 0x20, 0x20, 0x20, 0x3c, 0x6c, 0x61, 0x62, 0x65, 0x6c, 0x20, + 0x66, 0x6f, 0x72, 0x3d, 0x22, 0x74, 0x65, 0x6d, 0x70, 0x6c, 0x61, 0x74, + 0x65, 0x22, 0x3e, 0x43, 0x68, 0x61, 0x74, 0x20, 0x68, 0x69, 0x73, 0x74, + 0x6f, 0x72, 0x79, 0x20, 0x74, 0x65, 0x6d, 0x70, 0x6c, 0x61, 0x74, 0x65, + 0x3c, 0x2f, 0x6c, 0x61, 0x62, 0x65, 0x6c, 0x3e, 0x0a, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x3c, - 0x64, 0x69, 0x76, 0x3e, 0x0a, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, - 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x3c, 0x6c, 0x61, 0x62, 0x65, - 0x6c, 0x20, 0x66, 0x6f, 0x72, 0x3d, 0x22, 0x74, 0x65, 0x6d, 0x70, 0x6c, - 0x61, 0x74, 0x65, 0x22, 0x3e, 0x50, 0x72, 0x6f, 0x6d, 0x70, 0x74, 0x20, - 0x74, 0x65, 0x6d, 0x70, 0x6c, 0x61, 0x74, 0x65, 0x3c, 0x2f, 0x6c, 0x61, - 0x62, 0x65, 0x6c, 0x3e, 0x0a, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, - 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x3c, 0x74, 0x65, 0x78, 0x74, - 0x61, 0x72, 0x65, 0x61, 0x20, 0x69, 0x64, 0x3d, 0x22, 0x74, 0x65, 0x6d, - 0x70, 0x6c, 0x61, 0x74, 0x65, 0x22, 0x20, 0x6e, 0x61, 0x6d, 0x65, 0x3d, - 0x22, 0x74, 0x65, 0x6d, 0x70, 0x6c, 0x61, 0x74, 0x65, 0x22, 0x20, 0x76, - 0x61, 0x6c, 0x75, 0x65, 0x3d, 0x22, 0x24, 0x7b, 0x73, 0x65, 0x73, 0x73, - 0x69, 0x6f, 0x6e, 0x2e, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x2e, 0x74, 0x65, - 0x6d, 0x70, 0x6c, 0x61, 0x74, 0x65, 0x7d, 0x22, 0x20, 0x72, 0x6f, 0x77, - 0x73, 0x3d, 0x34, 0x20, 0x6f, 0x6e, 0x69, 0x6e, 0x70, 0x75, 0x74, 0x3d, - 0x24, 0x7b, 0x75, 0x70, 0x64, 0x61, 0x74, 0x65, 0x53, 0x65, 0x73, 0x73, - 0x69, 0x6f, 0x6e, 0x7d, 0x2f, 0x3e, 0x0a, 0x20, 0x20, 0x20, 0x20, 0x20, - 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x3c, 0x2f, 0x64, 0x69, 0x76, - 0x3e, 0x0a, 0x0a, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, - 0x20, 0x20, 0x20, 0x3c, 0x64, 0x69, 0x76, 0x3e, 0x0a, 0x20, 0x20, 0x20, - 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x3c, - 0x6c, 0x61, 0x62, 0x65, 0x6c, 0x20, 0x66, 0x6f, 0x72, 0x3d, 0x22, 0x74, - 0x65, 0x6d, 0x70, 0x6c, 0x61, 0x74, 0x65, 0x22, 0x3e, 0x43, 0x68, 0x61, - 0x74, 0x20, 0x68, 0x69, 0x73, 0x74, 0x6f, 0x72, 0x79, 0x20, 0x74, 0x65, - 0x6d, 0x70, 0x6c, 0x61, 0x74, 0x65, 0x3c, 0x2f, 0x6c, 0x61, 0x62, 0x65, - 0x6c, 0x3e, 0x0a, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, - 0x20, 0x20, 0x20, 0x20, 0x20, 0x3c, 0x74, 0x65, 0x78, 0x74, 0x61, 0x72, - 0x65, 0x61, 0x20, 0x69, 0x64, 0x3d, 0x22, 0x74, 0x65, 0x6d, 0x70, 0x6c, - 0x61, 0x74, 0x65, 0x22, 0x20, 0x6e, 0x61, 0x6d, 0x65, 0x3d, 0x22, 0x68, - 0x69, 0x73, 0x74, 0x6f, 0x72, 0x79, 0x54, 0x65, 0x6d, 0x70, 0x6c, 0x61, - 0x74, 0x65, 0x22, 0x20, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x3d, 0x22, 0x24, - 0x7b, 0x73, 0x65, 0x73, 0x73, 0x69, 0x6f, 0x6e, 0x2e, 0x76, 0x61, 0x6c, - 0x75, 0x65, 0x2e, 0x68, 0x69, 0x73, 0x74, 0x6f, 0x72, 0x79, 0x54, 0x65, - 0x6d, 0x70, 0x6c, 0x61, 0x74, 0x65, 0x7d, 0x22, 0x20, 0x72, 0x6f, 0x77, - 0x73, 0x3d, 0x31, 0x20, 0x6f, 0x6e, 0x69, 0x6e, 0x70, 0x75, 0x74, 0x3d, - 0x24, 0x7b, 0x75, 0x70, 0x64, 0x61, 0x74, 0x65, 0x53, 0x65, 0x73, 0x73, - 0x69, 0x6f, 0x6e, 0x7d, 0x2f, 0x3e, 0x0a, 0x20, 0x20, 0x20, 0x20, 0x20, - 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x3c, 0x2f, 0x64, 0x69, 0x76, - 0x3e, 0x0a, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, - 0x20, 0x20, 0x24, 0x7b, 0x47, 0x72, 0x61, 0x6d, 0x6d, 0x61, 0x72, 0x43, - 0x6f, 0x6e, 0x74, 0x72, 0x6f, 0x6c, 0x28, 0x29, 0x7d, 0x0a, 0x20, 0x20, - 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x3c, 0x2f, 0x66, 0x69, - 0x65, 0x6c, 0x64, 0x73, 0x65, 0x74, 0x3e, 0x0a, 0x20, 0x20, 0x20, 0x20, - 0x20, 0x20, 0x60, 0x0a, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x29, 0x3b, - 0x0a, 0x0a, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x63, 0x6f, 0x6e, 0x73, - 0x74, 0x20, 0x43, 0x6f, 0x6d, 0x70, 0x6c, 0x65, 0x74, 0x69, 0x6f, 0x6e, - 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x46, 0x6f, 0x72, 0x6d, 0x20, 0x3d, - 0x20, 0x28, 0x29, 0x20, 0x3d, 0x3e, 0x20, 0x28, 0x0a, 0x20, 0x20, 0x20, - 0x20, 0x20, 0x20, 0x20, 0x20, 0x68, 0x74, 0x6d, 0x6c, 0x60, 0x0a, 0x20, - 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x24, 0x7b, 0x50, - 0x72, 0x6f, 0x6d, 0x70, 0x74, 0x43, 0x6f, 0x6e, 0x74, 0x72, 0x6f, 0x6c, - 0x46, 0x69, 0x65, 0x6c, 0x64, 0x53, 0x65, 0x74, 0x28, 0x29, 0x7d, 0x0a, - 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x3c, 0x66, - 0x69, 0x65, 0x6c, 0x64, 0x73, 0x65, 0x74, 0x3e, 0x24, 0x7b, 0x47, 0x72, + 0x74, 0x65, 0x78, 0x74, 0x61, 0x72, 0x65, 0x61, 0x20, 0x69, 0x64, 0x3d, + 0x22, 0x74, 0x65, 0x6d, 0x70, 0x6c, 0x61, 0x74, 0x65, 0x22, 0x20, 0x6e, + 0x61, 0x6d, 0x65, 0x3d, 0x22, 0x68, 0x69, 0x73, 0x74, 0x6f, 0x72, 0x79, + 0x54, 0x65, 0x6d, 0x70, 0x6c, 0x61, 0x74, 0x65, 0x22, 0x20, 0x76, 0x61, + 0x6c, 0x75, 0x65, 0x3d, 0x22, 0x24, 0x7b, 0x73, 0x65, 0x73, 0x73, 0x69, + 0x6f, 0x6e, 0x2e, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x2e, 0x68, 0x69, 0x73, + 0x74, 0x6f, 0x72, 0x79, 0x54, 0x65, 0x6d, 0x70, 0x6c, 0x61, 0x74, 0x65, + 0x7d, 0x22, 0x20, 0x72, 0x6f, 0x77, 0x73, 0x3d, 0x31, 0x20, 0x6f, 0x6e, + 0x69, 0x6e, 0x70, 0x75, 0x74, 0x3d, 0x24, 0x7b, 0x75, 0x70, 0x64, 0x61, + 0x74, 0x65, 0x53, 0x65, 0x73, 0x73, 0x69, 0x6f, 0x6e, 0x7d, 0x2f, 0x3e, + 0x0a, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, + 0x20, 0x3c, 0x2f, 0x64, 0x69, 0x76, 0x3e, 0x0a, 0x20, 0x20, 0x20, 0x20, + 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x24, 0x7b, 0x47, 0x72, 0x61, 0x6d, 0x6d, 0x61, 0x72, 0x43, 0x6f, 0x6e, 0x74, 0x72, 0x6f, 0x6c, - 0x28, 0x29, 0x7d, 0x3c, 0x2f, 0x66, 0x69, 0x65, 0x6c, 0x64, 0x73, 0x65, - 0x74, 0x3e, 0x0a, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x60, - 0x0a, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x29, 0x3b, 0x0a, 0x0a, 0x20, - 0x20, 0x20, 0x20, 0x20, 0x20, 0x72, 0x65, 0x74, 0x75, 0x72, 0x6e, 0x20, - 0x68, 0x74, 0x6d, 0x6c, 0x60, 0x0a, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, - 0x20, 0x20, 0x3c, 0x66, 0x6f, 0x72, 0x6d, 0x3e, 0x0a, 0x20, 0x20, 0x20, - 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x3c, 0x66, 0x69, 0x65, 0x6c, - 0x64, 0x73, 0x65, 0x74, 0x20, 0x63, 0x6c, 0x61, 0x73, 0x73, 0x3d, 0x22, - 0x74, 0x77, 0x6f, 0x22, 0x3e, 0x0a, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, - 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x3c, 0x24, 0x7b, 0x55, 0x73, 0x65, - 0x72, 0x54, 0x65, 0x6d, 0x70, 0x6c, 0x61, 0x74, 0x65, 0x52, 0x65, 0x73, - 0x65, 0x74, 0x42, 0x75, 0x74, 0x74, 0x6f, 0x6e, 0x7d, 0x2f, 0x3e, 0x0a, + 0x28, 0x29, 0x7d, 0x0a, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, + 0x20, 0x20, 0x3c, 0x2f, 0x66, 0x69, 0x65, 0x6c, 0x64, 0x73, 0x65, 0x74, + 0x3e, 0x0a, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x60, 0x0a, 0x20, 0x20, + 0x20, 0x20, 0x20, 0x20, 0x29, 0x3b, 0x0a, 0x0a, 0x20, 0x20, 0x20, 0x20, + 0x20, 0x20, 0x63, 0x6f, 0x6e, 0x73, 0x74, 0x20, 0x43, 0x6f, 0x6d, 0x70, + 0x6c, 0x65, 0x74, 0x69, 0x6f, 0x6e, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, + 0x46, 0x6f, 0x72, 0x6d, 0x20, 0x3d, 0x20, 0x28, 0x29, 0x20, 0x3d, 0x3e, + 0x20, 0x28, 0x0a, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x68, + 0x74, 0x6d, 0x6c, 0x60, 0x0a, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, + 0x20, 0x20, 0x20, 0x24, 0x7b, 0x50, 0x72, 0x6f, 0x6d, 0x70, 0x74, 0x43, + 0x6f, 0x6e, 0x74, 0x72, 0x6f, 0x6c, 0x46, 0x69, 0x65, 0x6c, 0x64, 0x53, + 0x65, 0x74, 0x28, 0x29, 0x7d, 0x0a, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, + 0x20, 0x20, 0x20, 0x20, 0x3c, 0x66, 0x69, 0x65, 0x6c, 0x64, 0x73, 0x65, + 0x74, 0x3e, 0x24, 0x7b, 0x47, 0x72, 0x61, 0x6d, 0x6d, 0x61, 0x72, 0x43, + 0x6f, 0x6e, 0x74, 0x72, 0x6f, 0x6c, 0x28, 0x29, 0x7d, 0x3c, 0x2f, 0x66, + 0x69, 0x65, 0x6c, 0x64, 0x73, 0x65, 0x74, 0x3e, 0x0a, 0x20, 0x20, 0x20, + 0x20, 0x20, 0x20, 0x20, 0x20, 0x60, 0x0a, 0x20, 0x20, 0x20, 0x20, 0x20, + 0x20, 0x29, 0x3b, 0x0a, 0x0a, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x72, + 0x65, 0x74, 0x75, 0x72, 0x6e, 0x20, 0x68, 0x74, 0x6d, 0x6c, 0x60, 0x0a, + 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x3c, 0x66, 0x6f, 0x72, + 0x6d, 0x3e, 0x0a, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, + 0x20, 0x3c, 0x66, 0x69, 0x65, 0x6c, 0x64, 0x73, 0x65, 0x74, 0x20, 0x63, + 0x6c, 0x61, 0x73, 0x73, 0x3d, 0x22, 0x74, 0x77, 0x6f, 0x22, 0x3e, 0x0a, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, - 0x3c, 0x64, 0x69, 0x76, 0x3e, 0x0a, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, - 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x3c, 0x6c, 0x61, 0x62, - 0x65, 0x6c, 0x20, 0x63, 0x6c, 0x61, 0x73, 0x73, 0x3d, 0x22, 0x73, 0x6c, - 0x69, 0x6d, 0x22, 0x3e, 0x3c, 0x69, 0x6e, 0x70, 0x75, 0x74, 0x20, 0x74, - 0x79, 0x70, 0x65, 0x3d, 0x22, 0x72, 0x61, 0x64, 0x69, 0x6f, 0x22, 0x20, - 0x6e, 0x61, 0x6d, 0x65, 0x3d, 0x22, 0x74, 0x79, 0x70, 0x65, 0x22, 0x20, - 0x76, 0x61, 0x6c, 0x75, 0x65, 0x3d, 0x22, 0x63, 0x68, 0x61, 0x74, 0x22, - 0x20, 0x63, 0x68, 0x65, 0x63, 0x6b, 0x65, 0x64, 0x3d, 0x24, 0x7b, 0x73, - 0x65, 0x73, 0x73, 0x69, 0x6f, 0x6e, 0x2e, 0x76, 0x61, 0x6c, 0x75, 0x65, - 0x2e, 0x74, 0x79, 0x70, 0x65, 0x20, 0x3d, 0x3d, 0x3d, 0x20, 0x22, 0x63, - 0x68, 0x61, 0x74, 0x22, 0x7d, 0x20, 0x6f, 0x6e, 0x69, 0x6e, 0x70, 0x75, - 0x74, 0x3d, 0x24, 0x7b, 0x75, 0x70, 0x64, 0x61, 0x74, 0x65, 0x53, 0x65, - 0x73, 0x73, 0x69, 0x6f, 0x6e, 0x7d, 0x20, 0x2f, 0x3e, 0x20, 0x43, 0x68, - 0x61, 0x74, 0x3c, 0x2f, 0x6c, 0x61, 0x62, 0x65, 0x6c, 0x3e, 0x0a, 0x20, + 0x3c, 0x24, 0x7b, 0x55, 0x73, 0x65, 0x72, 0x54, 0x65, 0x6d, 0x70, 0x6c, + 0x61, 0x74, 0x65, 0x52, 0x65, 0x73, 0x65, 0x74, 0x42, 0x75, 0x74, 0x74, + 0x6f, 0x6e, 0x7d, 0x2f, 0x3e, 0x0a, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, + 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x3c, 0x64, 0x69, 0x76, 0x3e, 0x0a, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, - 0x20, 0x3c, 0x6c, 0x61, 0x62, 0x65, 0x6c, 0x20, 0x63, 0x6c, 0x61, 0x73, - 0x73, 0x3d, 0x22, 0x73, 0x6c, 0x69, 0x6d, 0x22, 0x3e, 0x3c, 0x69, 0x6e, - 0x70, 0x75, 0x74, 0x20, 0x74, 0x79, 0x70, 0x65, 0x3d, 0x22, 0x72, 0x61, - 0x64, 0x69, 0x6f, 0x22, 0x20, 0x6e, 0x61, 0x6d, 0x65, 0x3d, 0x22, 0x74, - 0x79, 0x70, 0x65, 0x22, 0x20, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x3d, 0x22, - 0x63, 0x6f, 0x6d, 0x70, 0x6c, 0x65, 0x74, 0x69, 0x6f, 0x6e, 0x22, 0x20, - 0x63, 0x68, 0x65, 0x63, 0x6b, 0x65, 0x64, 0x3d, 0x24, 0x7b, 0x73, 0x65, - 0x73, 0x73, 0x69, 0x6f, 0x6e, 0x2e, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x2e, - 0x74, 0x79, 0x70, 0x65, 0x20, 0x3d, 0x3d, 0x3d, 0x20, 0x22, 0x63, 0x6f, - 0x6d, 0x70, 0x6c, 0x65, 0x74, 0x69, 0x6f, 0x6e, 0x22, 0x7d, 0x20, 0x6f, - 0x6e, 0x69, 0x6e, 0x70, 0x75, 0x74, 0x3d, 0x24, 0x7b, 0x75, 0x70, 0x64, - 0x61, 0x74, 0x65, 0x53, 0x65, 0x73, 0x73, 0x69, 0x6f, 0x6e, 0x7d, 0x20, - 0x2f, 0x3e, 0x20, 0x43, 0x6f, 0x6d, 0x70, 0x6c, 0x65, 0x74, 0x69, 0x6f, - 0x6e, 0x3c, 0x2f, 0x6c, 0x61, 0x62, 0x65, 0x6c, 0x3e, 0x0a, 0x20, 0x20, - 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x3c, 0x2f, - 0x64, 0x69, 0x76, 0x3e, 0x0a, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, - 0x20, 0x20, 0x20, 0x3c, 0x2f, 0x66, 0x69, 0x65, 0x6c, 0x64, 0x73, 0x65, - 0x74, 0x3e, 0x0a, 0x0a, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, - 0x20, 0x20, 0x24, 0x7b, 0x73, 0x65, 0x73, 0x73, 0x69, 0x6f, 0x6e, 0x2e, + 0x20, 0x20, 0x3c, 0x6c, 0x61, 0x62, 0x65, 0x6c, 0x20, 0x63, 0x6c, 0x61, + 0x73, 0x73, 0x3d, 0x22, 0x73, 0x6c, 0x69, 0x6d, 0x22, 0x3e, 0x3c, 0x69, + 0x6e, 0x70, 0x75, 0x74, 0x20, 0x74, 0x79, 0x70, 0x65, 0x3d, 0x22, 0x72, + 0x61, 0x64, 0x69, 0x6f, 0x22, 0x20, 0x6e, 0x61, 0x6d, 0x65, 0x3d, 0x22, + 0x74, 0x79, 0x70, 0x65, 0x22, 0x20, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x3d, + 0x22, 0x63, 0x68, 0x61, 0x74, 0x22, 0x20, 0x63, 0x68, 0x65, 0x63, 0x6b, + 0x65, 0x64, 0x3d, 0x24, 0x7b, 0x73, 0x65, 0x73, 0x73, 0x69, 0x6f, 0x6e, + 0x2e, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x2e, 0x74, 0x79, 0x70, 0x65, 0x20, + 0x3d, 0x3d, 0x3d, 0x20, 0x22, 0x63, 0x68, 0x61, 0x74, 0x22, 0x7d, 0x20, + 0x6f, 0x6e, 0x69, 0x6e, 0x70, 0x75, 0x74, 0x3d, 0x24, 0x7b, 0x75, 0x70, + 0x64, 0x61, 0x74, 0x65, 0x53, 0x65, 0x73, 0x73, 0x69, 0x6f, 0x6e, 0x7d, + 0x20, 0x2f, 0x3e, 0x20, 0x43, 0x68, 0x61, 0x74, 0x3c, 0x2f, 0x6c, 0x61, + 0x62, 0x65, 0x6c, 0x3e, 0x0a, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, + 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x3c, 0x6c, 0x61, 0x62, 0x65, + 0x6c, 0x20, 0x63, 0x6c, 0x61, 0x73, 0x73, 0x3d, 0x22, 0x73, 0x6c, 0x69, + 0x6d, 0x22, 0x3e, 0x3c, 0x69, 0x6e, 0x70, 0x75, 0x74, 0x20, 0x74, 0x79, + 0x70, 0x65, 0x3d, 0x22, 0x72, 0x61, 0x64, 0x69, 0x6f, 0x22, 0x20, 0x6e, + 0x61, 0x6d, 0x65, 0x3d, 0x22, 0x74, 0x79, 0x70, 0x65, 0x22, 0x20, 0x76, + 0x61, 0x6c, 0x75, 0x65, 0x3d, 0x22, 0x63, 0x6f, 0x6d, 0x70, 0x6c, 0x65, + 0x74, 0x69, 0x6f, 0x6e, 0x22, 0x20, 0x63, 0x68, 0x65, 0x63, 0x6b, 0x65, + 0x64, 0x3d, 0x24, 0x7b, 0x73, 0x65, 0x73, 0x73, 0x69, 0x6f, 0x6e, 0x2e, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x2e, 0x74, 0x79, 0x70, 0x65, 0x20, 0x3d, - 0x3d, 0x3d, 0x20, 0x27, 0x63, 0x68, 0x61, 0x74, 0x27, 0x20, 0x3f, 0x20, - 0x43, 0x68, 0x61, 0x74, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x46, 0x6f, - 0x72, 0x6d, 0x28, 0x29, 0x20, 0x3a, 0x20, 0x43, 0x6f, 0x6d, 0x70, 0x6c, - 0x65, 0x74, 0x69, 0x6f, 0x6e, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x46, - 0x6f, 0x72, 0x6d, 0x28, 0x29, 0x7d, 0x0a, 0x0a, 0x20, 0x20, 0x20, 0x20, - 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x3c, 0x66, 0x69, 0x65, 0x6c, 0x64, - 0x73, 0x65, 0x74, 0x20, 0x63, 0x6c, 0x61, 0x73, 0x73, 0x3d, 0x22, 0x74, - 0x77, 0x6f, 0x22, 0x3e, 0x0a, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, - 0x20, 0x20, 0x20, 0x20, 0x20, 0x24, 0x7b, 0x49, 0x6e, 0x74, 0x46, 0x69, - 0x65, 0x6c, 0x64, 0x28, 0x7b, 0x20, 0x6c, 0x61, 0x62, 0x65, 0x6c, 0x3a, - 0x20, 0x22, 0x50, 0x72, 0x65, 0x64, 0x69, 0x63, 0x74, 0x69, 0x6f, 0x6e, - 0x73, 0x22, 0x2c, 0x20, 0x6d, 0x61, 0x78, 0x3a, 0x20, 0x32, 0x30, 0x34, - 0x38, 0x2c, 0x20, 0x6d, 0x69, 0x6e, 0x3a, 0x20, 0x2d, 0x31, 0x2c, 0x20, - 0x6e, 0x61, 0x6d, 0x65, 0x3a, 0x20, 0x22, 0x6e, 0x5f, 0x70, 0x72, 0x65, - 0x64, 0x69, 0x63, 0x74, 0x22, 0x2c, 0x20, 0x76, 0x61, 0x6c, 0x75, 0x65, - 0x3a, 0x20, 0x70, 0x61, 0x72, 0x61, 0x6d, 0x73, 0x2e, 0x76, 0x61, 0x6c, - 0x75, 0x65, 0x2e, 0x6e, 0x5f, 0x70, 0x72, 0x65, 0x64, 0x69, 0x63, 0x74, - 0x20, 0x7d, 0x29, 0x7d, 0x0a, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, - 0x20, 0x20, 0x20, 0x20, 0x20, 0x24, 0x7b, 0x46, 0x6c, 0x6f, 0x61, 0x74, - 0x46, 0x69, 0x65, 0x6c, 0x64, 0x28, 0x7b, 0x20, 0x6c, 0x61, 0x62, 0x65, - 0x6c, 0x3a, 0x20, 0x22, 0x54, 0x65, 0x6d, 0x70, 0x65, 0x72, 0x61, 0x74, - 0x75, 0x72, 0x65, 0x22, 0x2c, 0x20, 0x6d, 0x61, 0x78, 0x3a, 0x20, 0x31, - 0x2e, 0x35, 0x2c, 0x20, 0x6d, 0x69, 0x6e, 0x3a, 0x20, 0x30, 0x2e, 0x30, - 0x2c, 0x20, 0x6e, 0x61, 0x6d, 0x65, 0x3a, 0x20, 0x22, 0x74, 0x65, 0x6d, - 0x70, 0x65, 0x72, 0x61, 0x74, 0x75, 0x72, 0x65, 0x22, 0x2c, 0x20, 0x73, - 0x74, 0x65, 0x70, 0x3a, 0x20, 0x30, 0x2e, 0x30, 0x31, 0x2c, 0x20, 0x76, - 0x61, 0x6c, 0x75, 0x65, 0x3a, 0x20, 0x70, 0x61, 0x72, 0x61, 0x6d, 0x73, - 0x2e, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x2e, 0x74, 0x65, 0x6d, 0x70, 0x65, - 0x72, 0x61, 0x74, 0x75, 0x72, 0x65, 0x20, 0x7d, 0x29, 0x7d, 0x0a, 0x20, + 0x3d, 0x3d, 0x20, 0x22, 0x63, 0x6f, 0x6d, 0x70, 0x6c, 0x65, 0x74, 0x69, + 0x6f, 0x6e, 0x22, 0x7d, 0x20, 0x6f, 0x6e, 0x69, 0x6e, 0x70, 0x75, 0x74, + 0x3d, 0x24, 0x7b, 0x75, 0x70, 0x64, 0x61, 0x74, 0x65, 0x53, 0x65, 0x73, + 0x73, 0x69, 0x6f, 0x6e, 0x7d, 0x20, 0x2f, 0x3e, 0x20, 0x43, 0x6f, 0x6d, + 0x70, 0x6c, 0x65, 0x74, 0x69, 0x6f, 0x6e, 0x3c, 0x2f, 0x6c, 0x61, 0x62, + 0x65, 0x6c, 0x3e, 0x0a, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, + 0x20, 0x20, 0x20, 0x20, 0x3c, 0x2f, 0x64, 0x69, 0x76, 0x3e, 0x0a, 0x20, + 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x3c, 0x2f, 0x66, + 0x69, 0x65, 0x6c, 0x64, 0x73, 0x65, 0x74, 0x3e, 0x0a, 0x0a, 0x20, 0x20, + 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x24, 0x7b, 0x73, 0x65, + 0x73, 0x73, 0x69, 0x6f, 0x6e, 0x2e, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x2e, + 0x74, 0x79, 0x70, 0x65, 0x20, 0x3d, 0x3d, 0x3d, 0x20, 0x27, 0x63, 0x68, + 0x61, 0x74, 0x27, 0x20, 0x3f, 0x20, 0x43, 0x68, 0x61, 0x74, 0x43, 0x6f, + 0x6e, 0x66, 0x69, 0x67, 0x46, 0x6f, 0x72, 0x6d, 0x28, 0x29, 0x20, 0x3a, + 0x20, 0x43, 0x6f, 0x6d, 0x70, 0x6c, 0x65, 0x74, 0x69, 0x6f, 0x6e, 0x43, + 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x46, 0x6f, 0x72, 0x6d, 0x28, 0x29, 0x7d, + 0x0a, 0x0a, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, + 0x3c, 0x66, 0x69, 0x65, 0x6c, 0x64, 0x73, 0x65, 0x74, 0x20, 0x63, 0x6c, + 0x61, 0x73, 0x73, 0x3d, 0x22, 0x74, 0x77, 0x6f, 0x22, 0x3e, 0x0a, 0x20, + 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x24, + 0x7b, 0x49, 0x6e, 0x74, 0x46, 0x69, 0x65, 0x6c, 0x64, 0x28, 0x7b, 0x20, + 0x6c, 0x61, 0x62, 0x65, 0x6c, 0x3a, 0x20, 0x22, 0x50, 0x72, 0x65, 0x64, + 0x69, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x22, 0x2c, 0x20, 0x6d, 0x61, + 0x78, 0x3a, 0x20, 0x32, 0x30, 0x34, 0x38, 0x2c, 0x20, 0x6d, 0x69, 0x6e, + 0x3a, 0x20, 0x2d, 0x31, 0x2c, 0x20, 0x6e, 0x61, 0x6d, 0x65, 0x3a, 0x20, + 0x22, 0x6e, 0x5f, 0x70, 0x72, 0x65, 0x64, 0x69, 0x63, 0x74, 0x22, 0x2c, + 0x20, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x3a, 0x20, 0x70, 0x61, 0x72, 0x61, + 0x6d, 0x73, 0x2e, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x2e, 0x6e, 0x5f, 0x70, + 0x72, 0x65, 0x64, 0x69, 0x63, 0x74, 0x20, 0x7d, 0x29, 0x7d, 0x0a, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x24, 0x7b, 0x46, 0x6c, 0x6f, 0x61, 0x74, 0x46, 0x69, 0x65, 0x6c, 0x64, 0x28, - 0x7b, 0x20, 0x6c, 0x61, 0x62, 0x65, 0x6c, 0x3a, 0x20, 0x22, 0x50, 0x65, - 0x6e, 0x61, 0x6c, 0x69, 0x7a, 0x65, 0x20, 0x72, 0x65, 0x70, 0x65, 0x61, - 0x74, 0x20, 0x73, 0x65, 0x71, 0x75, 0x65, 0x6e, 0x63, 0x65, 0x22, 0x2c, - 0x20, 0x6d, 0x61, 0x78, 0x3a, 0x20, 0x32, 0x2e, 0x30, 0x2c, 0x20, 0x6d, - 0x69, 0x6e, 0x3a, 0x20, 0x30, 0x2e, 0x30, 0x2c, 0x20, 0x6e, 0x61, 0x6d, - 0x65, 0x3a, 0x20, 0x22, 0x72, 0x65, 0x70, 0x65, 0x61, 0x74, 0x5f, 0x70, - 0x65, 0x6e, 0x61, 0x6c, 0x74, 0x79, 0x22, 0x2c, 0x20, 0x73, 0x74, 0x65, - 0x70, 0x3a, 0x20, 0x30, 0x2e, 0x30, 0x31, 0x2c, 0x20, 0x76, 0x61, 0x6c, - 0x75, 0x65, 0x3a, 0x20, 0x70, 0x61, 0x72, 0x61, 0x6d, 0x73, 0x2e, 0x76, - 0x61, 0x6c, 0x75, 0x65, 0x2e, 0x72, 0x65, 0x70, 0x65, 0x61, 0x74, 0x5f, - 0x70, 0x65, 0x6e, 0x61, 0x6c, 0x74, 0x79, 0x20, 0x7d, 0x29, 0x7d, 0x0a, - 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, - 0x24, 0x7b, 0x49, 0x6e, 0x74, 0x46, 0x69, 0x65, 0x6c, 0x64, 0x28, 0x7b, - 0x20, 0x6c, 0x61, 0x62, 0x65, 0x6c, 0x3a, 0x20, 0x22, 0x43, 0x6f, 0x6e, - 0x73, 0x69, 0x64, 0x65, 0x72, 0x20, 0x4e, 0x20, 0x74, 0x6f, 0x6b, 0x65, - 0x6e, 0x73, 0x20, 0x66, 0x6f, 0x72, 0x20, 0x70, 0x65, 0x6e, 0x61, 0x6c, - 0x69, 0x7a, 0x65, 0x22, 0x2c, 0x20, 0x6d, 0x61, 0x78, 0x3a, 0x20, 0x32, - 0x30, 0x34, 0x38, 0x2c, 0x20, 0x6d, 0x69, 0x6e, 0x3a, 0x20, 0x30, 0x2c, - 0x20, 0x6e, 0x61, 0x6d, 0x65, 0x3a, 0x20, 0x22, 0x72, 0x65, 0x70, 0x65, - 0x61, 0x74, 0x5f, 0x6c, 0x61, 0x73, 0x74, 0x5f, 0x6e, 0x22, 0x2c, 0x20, - 0x76, 0x61, 0x6c, 0x75, 0x65, 0x3a, 0x20, 0x70, 0x61, 0x72, 0x61, 0x6d, - 0x73, 0x2e, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x2e, 0x72, 0x65, 0x70, 0x65, - 0x61, 0x74, 0x5f, 0x6c, 0x61, 0x73, 0x74, 0x5f, 0x6e, 0x20, 0x7d, 0x29, - 0x7d, 0x0a, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, - 0x20, 0x20, 0x24, 0x7b, 0x49, 0x6e, 0x74, 0x46, 0x69, 0x65, 0x6c, 0x64, - 0x28, 0x7b, 0x20, 0x6c, 0x61, 0x62, 0x65, 0x6c, 0x3a, 0x20, 0x22, 0x54, - 0x6f, 0x70, 0x2d, 0x4b, 0x20, 0x73, 0x61, 0x6d, 0x70, 0x6c, 0x69, 0x6e, - 0x67, 0x22, 0x2c, 0x20, 0x6d, 0x61, 0x78, 0x3a, 0x20, 0x31, 0x30, 0x30, - 0x2c, 0x20, 0x6d, 0x69, 0x6e, 0x3a, 0x20, 0x2d, 0x31, 0x2c, 0x20, 0x6e, - 0x61, 0x6d, 0x65, 0x3a, 0x20, 0x22, 0x74, 0x6f, 0x70, 0x5f, 0x6b, 0x22, - 0x2c, 0x20, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x3a, 0x20, 0x70, 0x61, 0x72, - 0x61, 0x6d, 0x73, 0x2e, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x2e, 0x74, 0x6f, - 0x70, 0x5f, 0x6b, 0x20, 0x7d, 0x29, 0x7d, 0x0a, 0x20, 0x20, 0x20, 0x20, - 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x24, 0x7b, 0x46, 0x6c, - 0x6f, 0x61, 0x74, 0x46, 0x69, 0x65, 0x6c, 0x64, 0x28, 0x7b, 0x20, 0x6c, - 0x61, 0x62, 0x65, 0x6c, 0x3a, 0x20, 0x22, 0x54, 0x6f, 0x70, 0x2d, 0x50, - 0x20, 0x73, 0x61, 0x6d, 0x70, 0x6c, 0x69, 0x6e, 0x67, 0x22, 0x2c, 0x20, - 0x6d, 0x61, 0x78, 0x3a, 0x20, 0x31, 0x2e, 0x30, 0x2c, 0x20, 0x6d, 0x69, + 0x7b, 0x20, 0x6c, 0x61, 0x62, 0x65, 0x6c, 0x3a, 0x20, 0x22, 0x54, 0x65, + 0x6d, 0x70, 0x65, 0x72, 0x61, 0x74, 0x75, 0x72, 0x65, 0x22, 0x2c, 0x20, + 0x6d, 0x61, 0x78, 0x3a, 0x20, 0x31, 0x2e, 0x35, 0x2c, 0x20, 0x6d, 0x69, 0x6e, 0x3a, 0x20, 0x30, 0x2e, 0x30, 0x2c, 0x20, 0x6e, 0x61, 0x6d, 0x65, - 0x3a, 0x20, 0x22, 0x74, 0x6f, 0x70, 0x5f, 0x70, 0x22, 0x2c, 0x20, 0x73, - 0x74, 0x65, 0x70, 0x3a, 0x20, 0x30, 0x2e, 0x30, 0x31, 0x2c, 0x20, 0x76, - 0x61, 0x6c, 0x75, 0x65, 0x3a, 0x20, 0x70, 0x61, 0x72, 0x61, 0x6d, 0x73, - 0x2e, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x2e, 0x74, 0x6f, 0x70, 0x5f, 0x70, + 0x3a, 0x20, 0x22, 0x74, 0x65, 0x6d, 0x70, 0x65, 0x72, 0x61, 0x74, 0x75, + 0x72, 0x65, 0x22, 0x2c, 0x20, 0x73, 0x74, 0x65, 0x70, 0x3a, 0x20, 0x30, + 0x2e, 0x30, 0x31, 0x2c, 0x20, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x3a, 0x20, + 0x70, 0x61, 0x72, 0x61, 0x6d, 0x73, 0x2e, 0x76, 0x61, 0x6c, 0x75, 0x65, + 0x2e, 0x74, 0x65, 0x6d, 0x70, 0x65, 0x72, 0x61, 0x74, 0x75, 0x72, 0x65, 0x20, 0x7d, 0x29, 0x7d, 0x0a, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x24, 0x7b, 0x46, 0x6c, 0x6f, 0x61, 0x74, 0x46, 0x69, 0x65, 0x6c, 0x64, 0x28, 0x7b, 0x20, 0x6c, 0x61, 0x62, 0x65, - 0x6c, 0x3a, 0x20, 0x22, 0x4d, 0x69, 0x6e, 0x2d, 0x50, 0x20, 0x73, 0x61, - 0x6d, 0x70, 0x6c, 0x69, 0x6e, 0x67, 0x22, 0x2c, 0x20, 0x6d, 0x61, 0x78, - 0x3a, 0x20, 0x31, 0x2e, 0x30, 0x2c, 0x20, 0x6d, 0x69, 0x6e, 0x3a, 0x20, - 0x30, 0x2e, 0x30, 0x2c, 0x20, 0x6e, 0x61, 0x6d, 0x65, 0x3a, 0x20, 0x22, - 0x6d, 0x69, 0x6e, 0x5f, 0x70, 0x22, 0x2c, 0x20, 0x73, 0x74, 0x65, 0x70, - 0x3a, 0x20, 0x30, 0x2e, 0x30, 0x31, 0x2c, 0x20, 0x76, 0x61, 0x6c, 0x75, + 0x6c, 0x3a, 0x20, 0x22, 0x50, 0x65, 0x6e, 0x61, 0x6c, 0x69, 0x7a, 0x65, + 0x20, 0x72, 0x65, 0x70, 0x65, 0x61, 0x74, 0x20, 0x73, 0x65, 0x71, 0x75, + 0x65, 0x6e, 0x63, 0x65, 0x22, 0x2c, 0x20, 0x6d, 0x61, 0x78, 0x3a, 0x20, + 0x32, 0x2e, 0x30, 0x2c, 0x20, 0x6d, 0x69, 0x6e, 0x3a, 0x20, 0x30, 0x2e, + 0x30, 0x2c, 0x20, 0x6e, 0x61, 0x6d, 0x65, 0x3a, 0x20, 0x22, 0x72, 0x65, + 0x70, 0x65, 0x61, 0x74, 0x5f, 0x70, 0x65, 0x6e, 0x61, 0x6c, 0x74, 0x79, + 0x22, 0x2c, 0x20, 0x73, 0x74, 0x65, 0x70, 0x3a, 0x20, 0x30, 0x2e, 0x30, + 0x31, 0x2c, 0x20, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x3a, 0x20, 0x70, 0x61, + 0x72, 0x61, 0x6d, 0x73, 0x2e, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x2e, 0x72, + 0x65, 0x70, 0x65, 0x61, 0x74, 0x5f, 0x70, 0x65, 0x6e, 0x61, 0x6c, 0x74, + 0x79, 0x20, 0x7d, 0x29, 0x7d, 0x0a, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, + 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x24, 0x7b, 0x49, 0x6e, 0x74, 0x46, + 0x69, 0x65, 0x6c, 0x64, 0x28, 0x7b, 0x20, 0x6c, 0x61, 0x62, 0x65, 0x6c, + 0x3a, 0x20, 0x22, 0x43, 0x6f, 0x6e, 0x73, 0x69, 0x64, 0x65, 0x72, 0x20, + 0x4e, 0x20, 0x74, 0x6f, 0x6b, 0x65, 0x6e, 0x73, 0x20, 0x66, 0x6f, 0x72, + 0x20, 0x70, 0x65, 0x6e, 0x61, 0x6c, 0x69, 0x7a, 0x65, 0x22, 0x2c, 0x20, + 0x6d, 0x61, 0x78, 0x3a, 0x20, 0x32, 0x30, 0x34, 0x38, 0x2c, 0x20, 0x6d, + 0x69, 0x6e, 0x3a, 0x20, 0x30, 0x2c, 0x20, 0x6e, 0x61, 0x6d, 0x65, 0x3a, + 0x20, 0x22, 0x72, 0x65, 0x70, 0x65, 0x61, 0x74, 0x5f, 0x6c, 0x61, 0x73, + 0x74, 0x5f, 0x6e, 0x22, 0x2c, 0x20, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x3a, + 0x20, 0x70, 0x61, 0x72, 0x61, 0x6d, 0x73, 0x2e, 0x76, 0x61, 0x6c, 0x75, + 0x65, 0x2e, 0x72, 0x65, 0x70, 0x65, 0x61, 0x74, 0x5f, 0x6c, 0x61, 0x73, + 0x74, 0x5f, 0x6e, 0x20, 0x7d, 0x29, 0x7d, 0x0a, 0x20, 0x20, 0x20, 0x20, + 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x24, 0x7b, 0x49, 0x6e, + 0x74, 0x46, 0x69, 0x65, 0x6c, 0x64, 0x28, 0x7b, 0x20, 0x6c, 0x61, 0x62, + 0x65, 0x6c, 0x3a, 0x20, 0x22, 0x54, 0x6f, 0x70, 0x2d, 0x4b, 0x20, 0x73, + 0x61, 0x6d, 0x70, 0x6c, 0x69, 0x6e, 0x67, 0x22, 0x2c, 0x20, 0x6d, 0x61, + 0x78, 0x3a, 0x20, 0x31, 0x30, 0x30, 0x2c, 0x20, 0x6d, 0x69, 0x6e, 0x3a, + 0x20, 0x2d, 0x31, 0x2c, 0x20, 0x6e, 0x61, 0x6d, 0x65, 0x3a, 0x20, 0x22, + 0x74, 0x6f, 0x70, 0x5f, 0x6b, 0x22, 0x2c, 0x20, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x3a, 0x20, 0x70, 0x61, 0x72, 0x61, 0x6d, 0x73, 0x2e, 0x76, 0x61, - 0x6c, 0x75, 0x65, 0x2e, 0x6d, 0x69, 0x6e, 0x5f, 0x70, 0x20, 0x7d, 0x29, + 0x6c, 0x75, 0x65, 0x2e, 0x74, 0x6f, 0x70, 0x5f, 0x6b, 0x20, 0x7d, 0x29, 0x7d, 0x0a, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, - 0x3c, 0x2f, 0x66, 0x69, 0x65, 0x6c, 0x64, 0x73, 0x65, 0x74, 0x3e, 0x0a, - 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x3c, 0x64, - 0x65, 0x74, 0x61, 0x69, 0x6c, 0x73, 0x3e, 0x0a, 0x20, 0x20, 0x20, 0x20, - 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x3c, 0x73, 0x75, 0x6d, - 0x6d, 0x61, 0x72, 0x79, 0x3e, 0x4d, 0x6f, 0x72, 0x65, 0x20, 0x6f, 0x70, - 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x3c, 0x2f, 0x73, 0x75, 0x6d, 0x6d, 0x61, - 0x72, 0x79, 0x3e, 0x0a, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, - 0x20, 0x20, 0x20, 0x20, 0x3c, 0x66, 0x69, 0x65, 0x6c, 0x64, 0x73, 0x65, - 0x74, 0x20, 0x63, 0x6c, 0x61, 0x73, 0x73, 0x3d, 0x22, 0x74, 0x77, 0x6f, - 0x22, 0x3e, 0x0a, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, - 0x20, 0x20, 0x20, 0x20, 0x20, 0x24, 0x7b, 0x46, 0x6c, 0x6f, 0x61, 0x74, - 0x46, 0x69, 0x65, 0x6c, 0x64, 0x28, 0x7b, 0x20, 0x6c, 0x61, 0x62, 0x65, - 0x6c, 0x3a, 0x20, 0x22, 0x54, 0x46, 0x53, 0x2d, 0x5a, 0x22, 0x2c, 0x20, - 0x6d, 0x61, 0x78, 0x3a, 0x20, 0x31, 0x2e, 0x30, 0x2c, 0x20, 0x6d, 0x69, - 0x6e, 0x3a, 0x20, 0x30, 0x2e, 0x30, 0x2c, 0x20, 0x6e, 0x61, 0x6d, 0x65, - 0x3a, 0x20, 0x22, 0x74, 0x66, 0x73, 0x5f, 0x7a, 0x22, 0x2c, 0x20, 0x73, - 0x74, 0x65, 0x70, 0x3a, 0x20, 0x30, 0x2e, 0x30, 0x31, 0x2c, 0x20, 0x76, - 0x61, 0x6c, 0x75, 0x65, 0x3a, 0x20, 0x70, 0x61, 0x72, 0x61, 0x6d, 0x73, - 0x2e, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x2e, 0x74, 0x66, 0x73, 0x5f, 0x7a, - 0x20, 0x7d, 0x29, 0x7d, 0x0a, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, - 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x24, 0x7b, 0x46, 0x6c, 0x6f, - 0x61, 0x74, 0x46, 0x69, 0x65, 0x6c, 0x64, 0x28, 0x7b, 0x20, 0x6c, 0x61, - 0x62, 0x65, 0x6c, 0x3a, 0x20, 0x22, 0x54, 0x79, 0x70, 0x69, 0x63, 0x61, - 0x6c, 0x20, 0x50, 0x22, 0x2c, 0x20, 0x6d, 0x61, 0x78, 0x3a, 0x20, 0x31, + 0x20, 0x20, 0x24, 0x7b, 0x46, 0x6c, 0x6f, 0x61, 0x74, 0x46, 0x69, 0x65, + 0x6c, 0x64, 0x28, 0x7b, 0x20, 0x6c, 0x61, 0x62, 0x65, 0x6c, 0x3a, 0x20, + 0x22, 0x54, 0x6f, 0x70, 0x2d, 0x50, 0x20, 0x73, 0x61, 0x6d, 0x70, 0x6c, + 0x69, 0x6e, 0x67, 0x22, 0x2c, 0x20, 0x6d, 0x61, 0x78, 0x3a, 0x20, 0x31, 0x2e, 0x30, 0x2c, 0x20, 0x6d, 0x69, 0x6e, 0x3a, 0x20, 0x30, 0x2e, 0x30, - 0x2c, 0x20, 0x6e, 0x61, 0x6d, 0x65, 0x3a, 0x20, 0x22, 0x74, 0x79, 0x70, - 0x69, 0x63, 0x61, 0x6c, 0x5f, 0x70, 0x22, 0x2c, 0x20, 0x73, 0x74, 0x65, - 0x70, 0x3a, 0x20, 0x30, 0x2e, 0x30, 0x31, 0x2c, 0x20, 0x76, 0x61, 0x6c, - 0x75, 0x65, 0x3a, 0x20, 0x70, 0x61, 0x72, 0x61, 0x6d, 0x73, 0x2e, 0x76, - 0x61, 0x6c, 0x75, 0x65, 0x2e, 0x74, 0x79, 0x70, 0x69, 0x63, 0x61, 0x6c, - 0x5f, 0x70, 0x20, 0x7d, 0x29, 0x7d, 0x0a, 0x20, 0x20, 0x20, 0x20, 0x20, - 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x24, 0x7b, 0x46, - 0x6c, 0x6f, 0x61, 0x74, 0x46, 0x69, 0x65, 0x6c, 0x64, 0x28, 0x7b, 0x20, - 0x6c, 0x61, 0x62, 0x65, 0x6c, 0x3a, 0x20, 0x22, 0x50, 0x72, 0x65, 0x73, - 0x65, 0x6e, 0x63, 0x65, 0x20, 0x70, 0x65, 0x6e, 0x61, 0x6c, 0x74, 0x79, - 0x22, 0x2c, 0x20, 0x6d, 0x61, 0x78, 0x3a, 0x20, 0x31, 0x2e, 0x30, 0x2c, - 0x20, 0x6d, 0x69, 0x6e, 0x3a, 0x20, 0x30, 0x2e, 0x30, 0x2c, 0x20, 0x6e, - 0x61, 0x6d, 0x65, 0x3a, 0x20, 0x22, 0x70, 0x72, 0x65, 0x73, 0x65, 0x6e, - 0x63, 0x65, 0x5f, 0x70, 0x65, 0x6e, 0x61, 0x6c, 0x74, 0x79, 0x22, 0x2c, - 0x20, 0x73, 0x74, 0x65, 0x70, 0x3a, 0x20, 0x30, 0x2e, 0x30, 0x31, 0x2c, - 0x20, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x3a, 0x20, 0x70, 0x61, 0x72, 0x61, - 0x6d, 0x73, 0x2e, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x2e, 0x70, 0x72, 0x65, - 0x73, 0x65, 0x6e, 0x63, 0x65, 0x5f, 0x70, 0x65, 0x6e, 0x61, 0x6c, 0x74, - 0x79, 0x20, 0x7d, 0x29, 0x7d, 0x0a, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, - 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x24, 0x7b, 0x46, 0x6c, - 0x6f, 0x61, 0x74, 0x46, 0x69, 0x65, 0x6c, 0x64, 0x28, 0x7b, 0x20, 0x6c, - 0x61, 0x62, 0x65, 0x6c, 0x3a, 0x20, 0x22, 0x46, 0x72, 0x65, 0x71, 0x75, - 0x65, 0x6e, 0x63, 0x79, 0x20, 0x70, 0x65, 0x6e, 0x61, 0x6c, 0x74, 0x79, + 0x2c, 0x20, 0x6e, 0x61, 0x6d, 0x65, 0x3a, 0x20, 0x22, 0x74, 0x6f, 0x70, + 0x5f, 0x70, 0x22, 0x2c, 0x20, 0x73, 0x74, 0x65, 0x70, 0x3a, 0x20, 0x30, + 0x2e, 0x30, 0x31, 0x2c, 0x20, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x3a, 0x20, + 0x70, 0x61, 0x72, 0x61, 0x6d, 0x73, 0x2e, 0x76, 0x61, 0x6c, 0x75, 0x65, + 0x2e, 0x74, 0x6f, 0x70, 0x5f, 0x70, 0x20, 0x7d, 0x29, 0x7d, 0x0a, 0x20, + 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x24, + 0x7b, 0x46, 0x6c, 0x6f, 0x61, 0x74, 0x46, 0x69, 0x65, 0x6c, 0x64, 0x28, + 0x7b, 0x20, 0x6c, 0x61, 0x62, 0x65, 0x6c, 0x3a, 0x20, 0x22, 0x4d, 0x69, + 0x6e, 0x2d, 0x50, 0x20, 0x73, 0x61, 0x6d, 0x70, 0x6c, 0x69, 0x6e, 0x67, 0x22, 0x2c, 0x20, 0x6d, 0x61, 0x78, 0x3a, 0x20, 0x31, 0x2e, 0x30, 0x2c, 0x20, 0x6d, 0x69, 0x6e, 0x3a, 0x20, 0x30, 0x2e, 0x30, 0x2c, 0x20, 0x6e, - 0x61, 0x6d, 0x65, 0x3a, 0x20, 0x22, 0x66, 0x72, 0x65, 0x71, 0x75, 0x65, - 0x6e, 0x63, 0x79, 0x5f, 0x70, 0x65, 0x6e, 0x61, 0x6c, 0x74, 0x79, 0x22, + 0x61, 0x6d, 0x65, 0x3a, 0x20, 0x22, 0x6d, 0x69, 0x6e, 0x5f, 0x70, 0x22, 0x2c, 0x20, 0x73, 0x74, 0x65, 0x70, 0x3a, 0x20, 0x30, 0x2e, 0x30, 0x31, 0x2c, 0x20, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x3a, 0x20, 0x70, 0x61, 0x72, - 0x61, 0x6d, 0x73, 0x2e, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x2e, 0x66, 0x72, - 0x65, 0x71, 0x75, 0x65, 0x6e, 0x63, 0x79, 0x5f, 0x70, 0x65, 0x6e, 0x61, - 0x6c, 0x74, 0x79, 0x20, 0x7d, 0x29, 0x7d, 0x0a, 0x20, 0x20, 0x20, 0x20, - 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x3c, 0x2f, 0x66, 0x69, - 0x65, 0x6c, 0x64, 0x73, 0x65, 0x74, 0x3e, 0x0a, 0x20, 0x20, 0x20, 0x20, - 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x3c, 0x68, 0x72, 0x20, - 0x2f, 0x3e, 0x0a, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, - 0x20, 0x20, 0x20, 0x3c, 0x66, 0x69, 0x65, 0x6c, 0x64, 0x73, 0x65, 0x74, - 0x20, 0x63, 0x6c, 0x61, 0x73, 0x73, 0x3d, 0x22, 0x74, 0x68, 0x72, 0x65, - 0x65, 0x22, 0x3e, 0x0a, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, - 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x3c, 0x64, 0x69, 0x76, 0x3e, 0x0a, + 0x61, 0x6d, 0x73, 0x2e, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x2e, 0x6d, 0x69, + 0x6e, 0x5f, 0x70, 0x20, 0x7d, 0x29, 0x7d, 0x0a, 0x20, 0x20, 0x20, 0x20, + 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x3c, 0x2f, 0x66, 0x69, 0x65, 0x6c, + 0x64, 0x73, 0x65, 0x74, 0x3e, 0x0a, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, + 0x20, 0x20, 0x20, 0x20, 0x3c, 0x64, 0x65, 0x74, 0x61, 0x69, 0x6c, 0x73, + 0x3e, 0x0a, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, + 0x20, 0x20, 0x3c, 0x73, 0x75, 0x6d, 0x6d, 0x61, 0x72, 0x79, 0x3e, 0x4d, + 0x6f, 0x72, 0x65, 0x20, 0x6f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x3c, + 0x2f, 0x73, 0x75, 0x6d, 0x6d, 0x61, 0x72, 0x79, 0x3e, 0x0a, 0x20, 0x20, + 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x3c, 0x66, + 0x69, 0x65, 0x6c, 0x64, 0x73, 0x65, 0x74, 0x20, 0x63, 0x6c, 0x61, 0x73, + 0x73, 0x3d, 0x22, 0x74, 0x77, 0x6f, 0x22, 0x3e, 0x0a, 0x20, 0x20, 0x20, + 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x24, + 0x7b, 0x46, 0x6c, 0x6f, 0x61, 0x74, 0x46, 0x69, 0x65, 0x6c, 0x64, 0x28, + 0x7b, 0x20, 0x6c, 0x61, 0x62, 0x65, 0x6c, 0x3a, 0x20, 0x22, 0x54, 0x46, + 0x53, 0x2d, 0x5a, 0x22, 0x2c, 0x20, 0x6d, 0x61, 0x78, 0x3a, 0x20, 0x31, + 0x2e, 0x30, 0x2c, 0x20, 0x6d, 0x69, 0x6e, 0x3a, 0x20, 0x30, 0x2e, 0x30, + 0x2c, 0x20, 0x6e, 0x61, 0x6d, 0x65, 0x3a, 0x20, 0x22, 0x74, 0x66, 0x73, + 0x5f, 0x7a, 0x22, 0x2c, 0x20, 0x73, 0x74, 0x65, 0x70, 0x3a, 0x20, 0x30, + 0x2e, 0x30, 0x31, 0x2c, 0x20, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x3a, 0x20, + 0x70, 0x61, 0x72, 0x61, 0x6d, 0x73, 0x2e, 0x76, 0x61, 0x6c, 0x75, 0x65, + 0x2e, 0x74, 0x66, 0x73, 0x5f, 0x7a, 0x20, 0x7d, 0x29, 0x7d, 0x0a, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, - 0x20, 0x20, 0x20, 0x20, 0x3c, 0x6c, 0x61, 0x62, 0x65, 0x6c, 0x3e, 0x3c, - 0x69, 0x6e, 0x70, 0x75, 0x74, 0x20, 0x74, 0x79, 0x70, 0x65, 0x3d, 0x22, - 0x72, 0x61, 0x64, 0x69, 0x6f, 0x22, 0x20, 0x6e, 0x61, 0x6d, 0x65, 0x3d, - 0x22, 0x6d, 0x69, 0x72, 0x6f, 0x73, 0x74, 0x61, 0x74, 0x22, 0x20, 0x76, - 0x61, 0x6c, 0x75, 0x65, 0x3d, 0x22, 0x30, 0x22, 0x20, 0x63, 0x68, 0x65, - 0x63, 0x6b, 0x65, 0x64, 0x3d, 0x24, 0x7b, 0x70, 0x61, 0x72, 0x61, 0x6d, - 0x73, 0x2e, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x2e, 0x6d, 0x69, 0x72, 0x6f, - 0x73, 0x74, 0x61, 0x74, 0x20, 0x3d, 0x3d, 0x20, 0x30, 0x7d, 0x20, 0x6f, - 0x6e, 0x69, 0x6e, 0x70, 0x75, 0x74, 0x3d, 0x24, 0x7b, 0x75, 0x70, 0x64, - 0x61, 0x74, 0x65, 0x50, 0x61, 0x72, 0x61, 0x6d, 0x73, 0x49, 0x6e, 0x74, - 0x7d, 0x20, 0x2f, 0x3e, 0x20, 0x6e, 0x6f, 0x20, 0x4d, 0x69, 0x72, 0x6f, - 0x73, 0x74, 0x61, 0x74, 0x3c, 0x2f, 0x6c, 0x61, 0x62, 0x65, 0x6c, 0x3e, + 0x20, 0x24, 0x7b, 0x46, 0x6c, 0x6f, 0x61, 0x74, 0x46, 0x69, 0x65, 0x6c, + 0x64, 0x28, 0x7b, 0x20, 0x6c, 0x61, 0x62, 0x65, 0x6c, 0x3a, 0x20, 0x22, + 0x54, 0x79, 0x70, 0x69, 0x63, 0x61, 0x6c, 0x20, 0x50, 0x22, 0x2c, 0x20, + 0x6d, 0x61, 0x78, 0x3a, 0x20, 0x31, 0x2e, 0x30, 0x2c, 0x20, 0x6d, 0x69, + 0x6e, 0x3a, 0x20, 0x30, 0x2e, 0x30, 0x2c, 0x20, 0x6e, 0x61, 0x6d, 0x65, + 0x3a, 0x20, 0x22, 0x74, 0x79, 0x70, 0x69, 0x63, 0x61, 0x6c, 0x5f, 0x70, + 0x22, 0x2c, 0x20, 0x73, 0x74, 0x65, 0x70, 0x3a, 0x20, 0x30, 0x2e, 0x30, + 0x31, 0x2c, 0x20, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x3a, 0x20, 0x70, 0x61, + 0x72, 0x61, 0x6d, 0x73, 0x2e, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x2e, 0x74, + 0x79, 0x70, 0x69, 0x63, 0x61, 0x6c, 0x5f, 0x70, 0x20, 0x7d, 0x29, 0x7d, 0x0a, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, - 0x20, 0x20, 0x20, 0x20, 0x20, 0x3c, 0x6c, 0x61, 0x62, 0x65, 0x6c, 0x3e, - 0x3c, 0x69, 0x6e, 0x70, 0x75, 0x74, 0x20, 0x74, 0x79, 0x70, 0x65, 0x3d, - 0x22, 0x72, 0x61, 0x64, 0x69, 0x6f, 0x22, 0x20, 0x6e, 0x61, 0x6d, 0x65, - 0x3d, 0x22, 0x6d, 0x69, 0x72, 0x6f, 0x73, 0x74, 0x61, 0x74, 0x22, 0x20, - 0x76, 0x61, 0x6c, 0x75, 0x65, 0x3d, 0x22, 0x31, 0x22, 0x20, 0x63, 0x68, - 0x65, 0x63, 0x6b, 0x65, 0x64, 0x3d, 0x24, 0x7b, 0x70, 0x61, 0x72, 0x61, - 0x6d, 0x73, 0x2e, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x2e, 0x6d, 0x69, 0x72, - 0x6f, 0x73, 0x74, 0x61, 0x74, 0x20, 0x3d, 0x3d, 0x20, 0x31, 0x7d, 0x20, - 0x6f, 0x6e, 0x69, 0x6e, 0x70, 0x75, 0x74, 0x3d, 0x24, 0x7b, 0x75, 0x70, - 0x64, 0x61, 0x74, 0x65, 0x50, 0x61, 0x72, 0x61, 0x6d, 0x73, 0x49, 0x6e, - 0x74, 0x7d, 0x20, 0x2f, 0x3e, 0x20, 0x4d, 0x69, 0x72, 0x6f, 0x73, 0x74, - 0x61, 0x74, 0x20, 0x76, 0x31, 0x3c, 0x2f, 0x6c, 0x61, 0x62, 0x65, 0x6c, - 0x3e, 0x0a, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, - 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x3c, 0x6c, 0x61, 0x62, 0x65, 0x6c, - 0x3e, 0x3c, 0x69, 0x6e, 0x70, 0x75, 0x74, 0x20, 0x74, 0x79, 0x70, 0x65, - 0x3d, 0x22, 0x72, 0x61, 0x64, 0x69, 0x6f, 0x22, 0x20, 0x6e, 0x61, 0x6d, - 0x65, 0x3d, 0x22, 0x6d, 0x69, 0x72, 0x6f, 0x73, 0x74, 0x61, 0x74, 0x22, - 0x20, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x3d, 0x22, 0x32, 0x22, 0x20, 0x63, - 0x68, 0x65, 0x63, 0x6b, 0x65, 0x64, 0x3d, 0x24, 0x7b, 0x70, 0x61, 0x72, - 0x61, 0x6d, 0x73, 0x2e, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x2e, 0x6d, 0x69, - 0x72, 0x6f, 0x73, 0x74, 0x61, 0x74, 0x20, 0x3d, 0x3d, 0x20, 0x32, 0x7d, - 0x20, 0x6f, 0x6e, 0x69, 0x6e, 0x70, 0x75, 0x74, 0x3d, 0x24, 0x7b, 0x75, - 0x70, 0x64, 0x61, 0x74, 0x65, 0x50, 0x61, 0x72, 0x61, 0x6d, 0x73, 0x49, - 0x6e, 0x74, 0x7d, 0x20, 0x2f, 0x3e, 0x20, 0x4d, 0x69, 0x72, 0x6f, 0x73, - 0x74, 0x61, 0x74, 0x20, 0x76, 0x32, 0x3c, 0x2f, 0x6c, 0x61, 0x62, 0x65, - 0x6c, 0x3e, 0x0a, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, - 0x20, 0x20, 0x20, 0x20, 0x20, 0x3c, 0x2f, 0x64, 0x69, 0x76, 0x3e, 0x0a, + 0x20, 0x20, 0x20, 0x24, 0x7b, 0x46, 0x6c, 0x6f, 0x61, 0x74, 0x46, 0x69, + 0x65, 0x6c, 0x64, 0x28, 0x7b, 0x20, 0x6c, 0x61, 0x62, 0x65, 0x6c, 0x3a, + 0x20, 0x22, 0x50, 0x72, 0x65, 0x73, 0x65, 0x6e, 0x63, 0x65, 0x20, 0x70, + 0x65, 0x6e, 0x61, 0x6c, 0x74, 0x79, 0x22, 0x2c, 0x20, 0x6d, 0x61, 0x78, + 0x3a, 0x20, 0x31, 0x2e, 0x30, 0x2c, 0x20, 0x6d, 0x69, 0x6e, 0x3a, 0x20, + 0x30, 0x2e, 0x30, 0x2c, 0x20, 0x6e, 0x61, 0x6d, 0x65, 0x3a, 0x20, 0x22, + 0x70, 0x72, 0x65, 0x73, 0x65, 0x6e, 0x63, 0x65, 0x5f, 0x70, 0x65, 0x6e, + 0x61, 0x6c, 0x74, 0x79, 0x22, 0x2c, 0x20, 0x73, 0x74, 0x65, 0x70, 0x3a, + 0x20, 0x30, 0x2e, 0x30, 0x31, 0x2c, 0x20, 0x76, 0x61, 0x6c, 0x75, 0x65, + 0x3a, 0x20, 0x70, 0x61, 0x72, 0x61, 0x6d, 0x73, 0x2e, 0x76, 0x61, 0x6c, + 0x75, 0x65, 0x2e, 0x70, 0x72, 0x65, 0x73, 0x65, 0x6e, 0x63, 0x65, 0x5f, + 0x70, 0x65, 0x6e, 0x61, 0x6c, 0x74, 0x79, 0x20, 0x7d, 0x29, 0x7d, 0x0a, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x24, 0x7b, 0x46, 0x6c, 0x6f, 0x61, 0x74, 0x46, 0x69, 0x65, 0x6c, 0x64, 0x28, 0x7b, 0x20, 0x6c, 0x61, 0x62, 0x65, 0x6c, 0x3a, 0x20, - 0x22, 0x4d, 0x69, 0x72, 0x6f, 0x73, 0x74, 0x61, 0x74, 0x20, 0x74, 0x61, - 0x75, 0x22, 0x2c, 0x20, 0x6d, 0x61, 0x78, 0x3a, 0x20, 0x31, 0x30, 0x2e, - 0x30, 0x2c, 0x20, 0x6d, 0x69, 0x6e, 0x3a, 0x20, 0x30, 0x2e, 0x30, 0x2c, - 0x20, 0x6e, 0x61, 0x6d, 0x65, 0x3a, 0x20, 0x22, 0x6d, 0x69, 0x72, 0x6f, - 0x73, 0x74, 0x61, 0x74, 0x5f, 0x74, 0x61, 0x75, 0x22, 0x2c, 0x20, 0x73, - 0x74, 0x65, 0x70, 0x3a, 0x20, 0x30, 0x2e, 0x30, 0x31, 0x2c, 0x20, 0x76, - 0x61, 0x6c, 0x75, 0x65, 0x3a, 0x20, 0x70, 0x61, 0x72, 0x61, 0x6d, 0x73, - 0x2e, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x2e, 0x6d, 0x69, 0x72, 0x6f, 0x73, - 0x74, 0x61, 0x74, 0x5f, 0x74, 0x61, 0x75, 0x20, 0x7d, 0x29, 0x7d, 0x0a, + 0x22, 0x46, 0x72, 0x65, 0x71, 0x75, 0x65, 0x6e, 0x63, 0x79, 0x20, 0x70, + 0x65, 0x6e, 0x61, 0x6c, 0x74, 0x79, 0x22, 0x2c, 0x20, 0x6d, 0x61, 0x78, + 0x3a, 0x20, 0x31, 0x2e, 0x30, 0x2c, 0x20, 0x6d, 0x69, 0x6e, 0x3a, 0x20, + 0x30, 0x2e, 0x30, 0x2c, 0x20, 0x6e, 0x61, 0x6d, 0x65, 0x3a, 0x20, 0x22, + 0x66, 0x72, 0x65, 0x71, 0x75, 0x65, 0x6e, 0x63, 0x79, 0x5f, 0x70, 0x65, + 0x6e, 0x61, 0x6c, 0x74, 0x79, 0x22, 0x2c, 0x20, 0x73, 0x74, 0x65, 0x70, + 0x3a, 0x20, 0x30, 0x2e, 0x30, 0x31, 0x2c, 0x20, 0x76, 0x61, 0x6c, 0x75, + 0x65, 0x3a, 0x20, 0x70, 0x61, 0x72, 0x61, 0x6d, 0x73, 0x2e, 0x76, 0x61, + 0x6c, 0x75, 0x65, 0x2e, 0x66, 0x72, 0x65, 0x71, 0x75, 0x65, 0x6e, 0x63, + 0x79, 0x5f, 0x70, 0x65, 0x6e, 0x61, 0x6c, 0x74, 0x79, 0x20, 0x7d, 0x29, + 0x7d, 0x0a, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, + 0x20, 0x20, 0x3c, 0x2f, 0x66, 0x69, 0x65, 0x6c, 0x64, 0x73, 0x65, 0x74, + 0x3e, 0x0a, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, + 0x20, 0x20, 0x3c, 0x68, 0x72, 0x20, 0x2f, 0x3e, 0x0a, 0x20, 0x20, 0x20, + 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x3c, 0x66, 0x69, + 0x65, 0x6c, 0x64, 0x73, 0x65, 0x74, 0x20, 0x63, 0x6c, 0x61, 0x73, 0x73, + 0x3d, 0x22, 0x74, 0x68, 0x72, 0x65, 0x65, 0x22, 0x3e, 0x0a, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, - 0x20, 0x20, 0x24, 0x7b, 0x46, 0x6c, 0x6f, 0x61, 0x74, 0x46, 0x69, 0x65, - 0x6c, 0x64, 0x28, 0x7b, 0x20, 0x6c, 0x61, 0x62, 0x65, 0x6c, 0x3a, 0x20, - 0x22, 0x4d, 0x69, 0x72, 0x6f, 0x73, 0x74, 0x61, 0x74, 0x20, 0x65, 0x74, - 0x61, 0x22, 0x2c, 0x20, 0x6d, 0x61, 0x78, 0x3a, 0x20, 0x31, 0x2e, 0x30, - 0x2c, 0x20, 0x6d, 0x69, 0x6e, 0x3a, 0x20, 0x30, 0x2e, 0x30, 0x2c, 0x20, - 0x6e, 0x61, 0x6d, 0x65, 0x3a, 0x20, 0x22, 0x6d, 0x69, 0x72, 0x6f, 0x73, - 0x74, 0x61, 0x74, 0x5f, 0x65, 0x74, 0x61, 0x22, 0x2c, 0x20, 0x73, 0x74, - 0x65, 0x70, 0x3a, 0x20, 0x30, 0x2e, 0x30, 0x31, 0x2c, 0x20, 0x76, 0x61, - 0x6c, 0x75, 0x65, 0x3a, 0x20, 0x70, 0x61, 0x72, 0x61, 0x6d, 0x73, 0x2e, - 0x76, 0x61, 0x6c, 0x75, 0x65, 0x2e, 0x6d, 0x69, 0x72, 0x6f, 0x73, 0x74, - 0x61, 0x74, 0x5f, 0x65, 0x74, 0x61, 0x20, 0x7d, 0x29, 0x7d, 0x0a, 0x20, - 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x3c, - 0x2f, 0x66, 0x69, 0x65, 0x6c, 0x64, 0x73, 0x65, 0x74, 0x3e, 0x0a, 0x20, + 0x3c, 0x64, 0x69, 0x76, 0x3e, 0x0a, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, + 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x3c, 0x6c, + 0x61, 0x62, 0x65, 0x6c, 0x3e, 0x3c, 0x69, 0x6e, 0x70, 0x75, 0x74, 0x20, + 0x74, 0x79, 0x70, 0x65, 0x3d, 0x22, 0x72, 0x61, 0x64, 0x69, 0x6f, 0x22, + 0x20, 0x6e, 0x61, 0x6d, 0x65, 0x3d, 0x22, 0x6d, 0x69, 0x72, 0x6f, 0x73, + 0x74, 0x61, 0x74, 0x22, 0x20, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x3d, 0x22, + 0x30, 0x22, 0x20, 0x63, 0x68, 0x65, 0x63, 0x6b, 0x65, 0x64, 0x3d, 0x24, + 0x7b, 0x70, 0x61, 0x72, 0x61, 0x6d, 0x73, 0x2e, 0x76, 0x61, 0x6c, 0x75, + 0x65, 0x2e, 0x6d, 0x69, 0x72, 0x6f, 0x73, 0x74, 0x61, 0x74, 0x20, 0x3d, + 0x3d, 0x20, 0x30, 0x7d, 0x20, 0x6f, 0x6e, 0x69, 0x6e, 0x70, 0x75, 0x74, + 0x3d, 0x24, 0x7b, 0x75, 0x70, 0x64, 0x61, 0x74, 0x65, 0x50, 0x61, 0x72, + 0x61, 0x6d, 0x73, 0x49, 0x6e, 0x74, 0x7d, 0x20, 0x2f, 0x3e, 0x20, 0x6e, + 0x6f, 0x20, 0x4d, 0x69, 0x72, 0x6f, 0x73, 0x74, 0x61, 0x74, 0x3c, 0x2f, + 0x6c, 0x61, 0x62, 0x65, 0x6c, 0x3e, 0x0a, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x3c, - 0x66, 0x69, 0x65, 0x6c, 0x64, 0x73, 0x65, 0x74, 0x3e, 0x0a, 0x20, 0x20, + 0x6c, 0x61, 0x62, 0x65, 0x6c, 0x3e, 0x3c, 0x69, 0x6e, 0x70, 0x75, 0x74, + 0x20, 0x74, 0x79, 0x70, 0x65, 0x3d, 0x22, 0x72, 0x61, 0x64, 0x69, 0x6f, + 0x22, 0x20, 0x6e, 0x61, 0x6d, 0x65, 0x3d, 0x22, 0x6d, 0x69, 0x72, 0x6f, + 0x73, 0x74, 0x61, 0x74, 0x22, 0x20, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x3d, + 0x22, 0x31, 0x22, 0x20, 0x63, 0x68, 0x65, 0x63, 0x6b, 0x65, 0x64, 0x3d, + 0x24, 0x7b, 0x70, 0x61, 0x72, 0x61, 0x6d, 0x73, 0x2e, 0x76, 0x61, 0x6c, + 0x75, 0x65, 0x2e, 0x6d, 0x69, 0x72, 0x6f, 0x73, 0x74, 0x61, 0x74, 0x20, + 0x3d, 0x3d, 0x20, 0x31, 0x7d, 0x20, 0x6f, 0x6e, 0x69, 0x6e, 0x70, 0x75, + 0x74, 0x3d, 0x24, 0x7b, 0x75, 0x70, 0x64, 0x61, 0x74, 0x65, 0x50, 0x61, + 0x72, 0x61, 0x6d, 0x73, 0x49, 0x6e, 0x74, 0x7d, 0x20, 0x2f, 0x3e, 0x20, + 0x4d, 0x69, 0x72, 0x6f, 0x73, 0x74, 0x61, 0x74, 0x20, 0x76, 0x31, 0x3c, + 0x2f, 0x6c, 0x61, 0x62, 0x65, 0x6c, 0x3e, 0x0a, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, - 0x24, 0x7b, 0x49, 0x6e, 0x74, 0x46, 0x69, 0x65, 0x6c, 0x64, 0x28, 0x7b, - 0x20, 0x6c, 0x61, 0x62, 0x65, 0x6c, 0x3a, 0x20, 0x22, 0x53, 0x68, 0x6f, - 0x77, 0x20, 0x50, 0x72, 0x6f, 0x62, 0x61, 0x62, 0x69, 0x6c, 0x69, 0x74, - 0x69, 0x65, 0x73, 0x22, 0x2c, 0x20, 0x6d, 0x61, 0x78, 0x3a, 0x20, 0x31, - 0x30, 0x2c, 0x20, 0x6d, 0x69, 0x6e, 0x3a, 0x20, 0x30, 0x2c, 0x20, 0x6e, - 0x61, 0x6d, 0x65, 0x3a, 0x20, 0x22, 0x6e, 0x5f, 0x70, 0x72, 0x6f, 0x62, - 0x73, 0x22, 0x2c, 0x20, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x3a, 0x20, 0x70, + 0x3c, 0x6c, 0x61, 0x62, 0x65, 0x6c, 0x3e, 0x3c, 0x69, 0x6e, 0x70, 0x75, + 0x74, 0x20, 0x74, 0x79, 0x70, 0x65, 0x3d, 0x22, 0x72, 0x61, 0x64, 0x69, + 0x6f, 0x22, 0x20, 0x6e, 0x61, 0x6d, 0x65, 0x3d, 0x22, 0x6d, 0x69, 0x72, + 0x6f, 0x73, 0x74, 0x61, 0x74, 0x22, 0x20, 0x76, 0x61, 0x6c, 0x75, 0x65, + 0x3d, 0x22, 0x32, 0x22, 0x20, 0x63, 0x68, 0x65, 0x63, 0x6b, 0x65, 0x64, + 0x3d, 0x24, 0x7b, 0x70, 0x61, 0x72, 0x61, 0x6d, 0x73, 0x2e, 0x76, 0x61, + 0x6c, 0x75, 0x65, 0x2e, 0x6d, 0x69, 0x72, 0x6f, 0x73, 0x74, 0x61, 0x74, + 0x20, 0x3d, 0x3d, 0x20, 0x32, 0x7d, 0x20, 0x6f, 0x6e, 0x69, 0x6e, 0x70, + 0x75, 0x74, 0x3d, 0x24, 0x7b, 0x75, 0x70, 0x64, 0x61, 0x74, 0x65, 0x50, + 0x61, 0x72, 0x61, 0x6d, 0x73, 0x49, 0x6e, 0x74, 0x7d, 0x20, 0x2f, 0x3e, + 0x20, 0x4d, 0x69, 0x72, 0x6f, 0x73, 0x74, 0x61, 0x74, 0x20, 0x76, 0x32, + 0x3c, 0x2f, 0x6c, 0x61, 0x62, 0x65, 0x6c, 0x3e, 0x0a, 0x20, 0x20, 0x20, + 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x3c, + 0x2f, 0x64, 0x69, 0x76, 0x3e, 0x0a, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, + 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x24, 0x7b, 0x46, 0x6c, + 0x6f, 0x61, 0x74, 0x46, 0x69, 0x65, 0x6c, 0x64, 0x28, 0x7b, 0x20, 0x6c, + 0x61, 0x62, 0x65, 0x6c, 0x3a, 0x20, 0x22, 0x4d, 0x69, 0x72, 0x6f, 0x73, + 0x74, 0x61, 0x74, 0x20, 0x74, 0x61, 0x75, 0x22, 0x2c, 0x20, 0x6d, 0x61, + 0x78, 0x3a, 0x20, 0x31, 0x30, 0x2e, 0x30, 0x2c, 0x20, 0x6d, 0x69, 0x6e, + 0x3a, 0x20, 0x30, 0x2e, 0x30, 0x2c, 0x20, 0x6e, 0x61, 0x6d, 0x65, 0x3a, + 0x20, 0x22, 0x6d, 0x69, 0x72, 0x6f, 0x73, 0x74, 0x61, 0x74, 0x5f, 0x74, + 0x61, 0x75, 0x22, 0x2c, 0x20, 0x73, 0x74, 0x65, 0x70, 0x3a, 0x20, 0x30, + 0x2e, 0x30, 0x31, 0x2c, 0x20, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x3a, 0x20, + 0x70, 0x61, 0x72, 0x61, 0x6d, 0x73, 0x2e, 0x76, 0x61, 0x6c, 0x75, 0x65, + 0x2e, 0x6d, 0x69, 0x72, 0x6f, 0x73, 0x74, 0x61, 0x74, 0x5f, 0x74, 0x61, + 0x75, 0x20, 0x7d, 0x29, 0x7d, 0x0a, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, + 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x24, 0x7b, 0x46, 0x6c, + 0x6f, 0x61, 0x74, 0x46, 0x69, 0x65, 0x6c, 0x64, 0x28, 0x7b, 0x20, 0x6c, + 0x61, 0x62, 0x65, 0x6c, 0x3a, 0x20, 0x22, 0x4d, 0x69, 0x72, 0x6f, 0x73, + 0x74, 0x61, 0x74, 0x20, 0x65, 0x74, 0x61, 0x22, 0x2c, 0x20, 0x6d, 0x61, + 0x78, 0x3a, 0x20, 0x31, 0x2e, 0x30, 0x2c, 0x20, 0x6d, 0x69, 0x6e, 0x3a, + 0x20, 0x30, 0x2e, 0x30, 0x2c, 0x20, 0x6e, 0x61, 0x6d, 0x65, 0x3a, 0x20, + 0x22, 0x6d, 0x69, 0x72, 0x6f, 0x73, 0x74, 0x61, 0x74, 0x5f, 0x65, 0x74, + 0x61, 0x22, 0x2c, 0x20, 0x73, 0x74, 0x65, 0x70, 0x3a, 0x20, 0x30, 0x2e, + 0x30, 0x31, 0x2c, 0x20, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x3a, 0x20, 0x70, 0x61, 0x72, 0x61, 0x6d, 0x73, 0x2e, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x2e, - 0x6e, 0x5f, 0x70, 0x72, 0x6f, 0x62, 0x73, 0x20, 0x7d, 0x29, 0x7d, 0x0a, - 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, - 0x3c, 0x2f, 0x66, 0x69, 0x65, 0x6c, 0x64, 0x73, 0x65, 0x74, 0x3e, 0x0a, - 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x3c, 0x2f, - 0x64, 0x65, 0x74, 0x61, 0x69, 0x6c, 0x73, 0x3e, 0x0a, 0x20, 0x20, 0x20, - 0x20, 0x20, 0x20, 0x20, 0x20, 0x3c, 0x2f, 0x66, 0x6f, 0x72, 0x6d, 0x3e, - 0x0a, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x60, 0x0a, 0x20, 0x20, 0x20, - 0x20, 0x7d, 0x0a, 0x0a, 0x20, 0x20, 0x20, 0x20, 0x63, 0x6f, 0x6e, 0x73, - 0x74, 0x20, 0x70, 0x72, 0x6f, 0x62, 0x43, 0x6f, 0x6c, 0x6f, 0x72, 0x20, - 0x3d, 0x20, 0x28, 0x70, 0x29, 0x20, 0x3d, 0x3e, 0x20, 0x7b, 0x0a, 0x20, - 0x20, 0x20, 0x20, 0x20, 0x20, 0x63, 0x6f, 0x6e, 0x73, 0x74, 0x20, 0x72, - 0x20, 0x3d, 0x20, 0x4d, 0x61, 0x74, 0x68, 0x2e, 0x66, 0x6c, 0x6f, 0x6f, - 0x72, 0x28, 0x31, 0x39, 0x32, 0x20, 0x2a, 0x20, 0x28, 0x31, 0x20, 0x2d, - 0x20, 0x70, 0x29, 0x29, 0x3b, 0x0a, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, - 0x63, 0x6f, 0x6e, 0x73, 0x74, 0x20, 0x67, 0x20, 0x3d, 0x20, 0x4d, 0x61, - 0x74, 0x68, 0x2e, 0x66, 0x6c, 0x6f, 0x6f, 0x72, 0x28, 0x31, 0x39, 0x32, - 0x20, 0x2a, 0x20, 0x70, 0x29, 0x3b, 0x0a, 0x20, 0x20, 0x20, 0x20, 0x20, - 0x20, 0x72, 0x65, 0x74, 0x75, 0x72, 0x6e, 0x20, 0x60, 0x72, 0x67, 0x62, - 0x61, 0x28, 0x24, 0x7b, 0x72, 0x7d, 0x2c, 0x24, 0x7b, 0x67, 0x7d, 0x2c, - 0x30, 0x2c, 0x30, 0x2e, 0x33, 0x29, 0x60, 0x3b, 0x0a, 0x20, 0x20, 0x20, - 0x20, 0x7d, 0x0a, 0x0a, 0x20, 0x20, 0x20, 0x20, 0x63, 0x6f, 0x6e, 0x73, - 0x74, 0x20, 0x50, 0x72, 0x6f, 0x62, 0x61, 0x62, 0x69, 0x6c, 0x69, 0x74, - 0x69, 0x65, 0x73, 0x20, 0x3d, 0x20, 0x28, 0x70, 0x61, 0x72, 0x61, 0x6d, - 0x73, 0x29, 0x20, 0x3d, 0x3e, 0x20, 0x7b, 0x0a, 0x20, 0x20, 0x20, 0x20, - 0x20, 0x20, 0x72, 0x65, 0x74, 0x75, 0x72, 0x6e, 0x20, 0x70, 0x61, 0x72, - 0x61, 0x6d, 0x73, 0x2e, 0x64, 0x61, 0x74, 0x61, 0x2e, 0x6d, 0x61, 0x70, - 0x28, 0x6d, 0x73, 0x67, 0x20, 0x3d, 0x3e, 0x20, 0x7b, 0x0a, 0x20, 0x20, + 0x6d, 0x69, 0x72, 0x6f, 0x73, 0x74, 0x61, 0x74, 0x5f, 0x65, 0x74, 0x61, + 0x20, 0x7d, 0x29, 0x7d, 0x0a, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, + 0x20, 0x20, 0x20, 0x20, 0x20, 0x3c, 0x2f, 0x66, 0x69, 0x65, 0x6c, 0x64, + 0x73, 0x65, 0x74, 0x3e, 0x0a, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, + 0x20, 0x20, 0x20, 0x20, 0x20, 0x3c, 0x66, 0x69, 0x65, 0x6c, 0x64, 0x73, + 0x65, 0x74, 0x3e, 0x0a, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, + 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x24, 0x7b, 0x49, 0x6e, 0x74, 0x46, + 0x69, 0x65, 0x6c, 0x64, 0x28, 0x7b, 0x20, 0x6c, 0x61, 0x62, 0x65, 0x6c, + 0x3a, 0x20, 0x22, 0x53, 0x68, 0x6f, 0x77, 0x20, 0x50, 0x72, 0x6f, 0x62, + 0x61, 0x62, 0x69, 0x6c, 0x69, 0x74, 0x69, 0x65, 0x73, 0x22, 0x2c, 0x20, + 0x6d, 0x61, 0x78, 0x3a, 0x20, 0x31, 0x30, 0x2c, 0x20, 0x6d, 0x69, 0x6e, + 0x3a, 0x20, 0x30, 0x2c, 0x20, 0x6e, 0x61, 0x6d, 0x65, 0x3a, 0x20, 0x22, + 0x6e, 0x5f, 0x70, 0x72, 0x6f, 0x62, 0x73, 0x22, 0x2c, 0x20, 0x76, 0x61, + 0x6c, 0x75, 0x65, 0x3a, 0x20, 0x70, 0x61, 0x72, 0x61, 0x6d, 0x73, 0x2e, + 0x76, 0x61, 0x6c, 0x75, 0x65, 0x2e, 0x6e, 0x5f, 0x70, 0x72, 0x6f, 0x62, + 0x73, 0x20, 0x7d, 0x29, 0x7d, 0x0a, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, + 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x3c, 0x2f, 0x66, 0x69, 0x65, 0x6c, + 0x64, 0x73, 0x65, 0x74, 0x3e, 0x0a, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, + 0x20, 0x20, 0x20, 0x20, 0x3c, 0x2f, 0x64, 0x65, 0x74, 0x61, 0x69, 0x6c, + 0x73, 0x3e, 0x0a, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x3c, + 0x2f, 0x66, 0x6f, 0x72, 0x6d, 0x3e, 0x0a, 0x20, 0x20, 0x20, 0x20, 0x20, + 0x20, 0x60, 0x0a, 0x20, 0x20, 0x20, 0x20, 0x7d, 0x0a, 0x0a, 0x20, 0x20, + 0x20, 0x20, 0x63, 0x6f, 0x6e, 0x73, 0x74, 0x20, 0x70, 0x72, 0x6f, 0x62, + 0x43, 0x6f, 0x6c, 0x6f, 0x72, 0x20, 0x3d, 0x20, 0x28, 0x70, 0x29, 0x20, + 0x3d, 0x3e, 0x20, 0x7b, 0x0a, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x63, + 0x6f, 0x6e, 0x73, 0x74, 0x20, 0x72, 0x20, 0x3d, 0x20, 0x4d, 0x61, 0x74, + 0x68, 0x2e, 0x66, 0x6c, 0x6f, 0x6f, 0x72, 0x28, 0x31, 0x39, 0x32, 0x20, + 0x2a, 0x20, 0x28, 0x31, 0x20, 0x2d, 0x20, 0x70, 0x29, 0x29, 0x3b, 0x0a, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x63, 0x6f, 0x6e, 0x73, 0x74, 0x20, - 0x7b, 0x20, 0x63, 0x6f, 0x6d, 0x70, 0x6c, 0x65, 0x74, 0x69, 0x6f, 0x6e, - 0x5f, 0x70, 0x72, 0x6f, 0x62, 0x61, 0x62, 0x69, 0x6c, 0x69, 0x74, 0x69, - 0x65, 0x73, 0x20, 0x7d, 0x20, 0x3d, 0x20, 0x6d, 0x73, 0x67, 0x3b, 0x0a, - 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x69, 0x66, 0x20, 0x28, - 0x0a, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x21, - 0x63, 0x6f, 0x6d, 0x70, 0x6c, 0x65, 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x70, - 0x72, 0x6f, 0x62, 0x61, 0x62, 0x69, 0x6c, 0x69, 0x74, 0x69, 0x65, 0x73, - 0x20, 0x7c, 0x7c, 0x0a, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, - 0x20, 0x20, 0x63, 0x6f, 0x6d, 0x70, 0x6c, 0x65, 0x74, 0x69, 0x6f, 0x6e, - 0x5f, 0x70, 0x72, 0x6f, 0x62, 0x61, 0x62, 0x69, 0x6c, 0x69, 0x74, 0x69, - 0x65, 0x73, 0x2e, 0x6c, 0x65, 0x6e, 0x67, 0x74, 0x68, 0x20, 0x3d, 0x3d, - 0x3d, 0x20, 0x30, 0x0a, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, - 0x29, 0x20, 0x72, 0x65, 0x74, 0x75, 0x72, 0x6e, 0x20, 0x6d, 0x73, 0x67, - 0x2e, 0x63, 0x6f, 0x6e, 0x74, 0x65, 0x6e, 0x74, 0x0a, 0x0a, 0x20, 0x20, - 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x69, 0x66, 0x20, 0x28, 0x63, 0x6f, - 0x6d, 0x70, 0x6c, 0x65, 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x70, 0x72, 0x6f, - 0x62, 0x61, 0x62, 0x69, 0x6c, 0x69, 0x74, 0x69, 0x65, 0x73, 0x2e, 0x6c, - 0x65, 0x6e, 0x67, 0x74, 0x68, 0x20, 0x3e, 0x20, 0x31, 0x29, 0x20, 0x7b, - 0x0a, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x2f, - 0x2f, 0x20, 0x4e, 0x6f, 0x74, 0x20, 0x66, 0x6f, 0x72, 0x20, 0x62, 0x79, - 0x74, 0x65, 0x20, 0x70, 0x61, 0x69, 0x72, 0x0a, 0x20, 0x20, 0x20, 0x20, - 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x69, 0x66, 0x20, 0x28, 0x63, 0x6f, - 0x6d, 0x70, 0x6c, 0x65, 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x70, 0x72, 0x6f, - 0x62, 0x61, 0x62, 0x69, 0x6c, 0x69, 0x74, 0x69, 0x65, 0x73, 0x5b, 0x30, - 0x5d, 0x2e, 0x63, 0x6f, 0x6e, 0x74, 0x65, 0x6e, 0x74, 0x2e, 0x73, 0x74, - 0x61, 0x72, 0x74, 0x73, 0x57, 0x69, 0x74, 0x68, 0x28, 0x27, 0x62, 0x79, - 0x74, 0x65, 0x3a, 0x20, 0x5c, 0x5c, 0x27, 0x29, 0x29, 0x20, 0x72, 0x65, - 0x74, 0x75, 0x72, 0x6e, 0x20, 0x6d, 0x73, 0x67, 0x2e, 0x63, 0x6f, 0x6e, - 0x74, 0x65, 0x6e, 0x74, 0x0a, 0x0a, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, - 0x20, 0x20, 0x20, 0x20, 0x63, 0x6f, 0x6e, 0x73, 0x74, 0x20, 0x73, 0x70, - 0x6c, 0x69, 0x74, 0x44, 0x61, 0x74, 0x61, 0x20, 0x3d, 0x20, 0x63, 0x6f, - 0x6d, 0x70, 0x6c, 0x65, 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x70, 0x72, 0x6f, - 0x62, 0x61, 0x62, 0x69, 0x6c, 0x69, 0x74, 0x69, 0x65, 0x73, 0x2e, 0x6d, - 0x61, 0x70, 0x28, 0x70, 0x72, 0x6f, 0x62, 0x20, 0x3d, 0x3e, 0x20, 0x28, - 0x7b, 0x0a, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, - 0x20, 0x20, 0x63, 0x6f, 0x6e, 0x74, 0x65, 0x6e, 0x74, 0x3a, 0x20, 0x70, - 0x72, 0x6f, 0x62, 0x2e, 0x63, 0x6f, 0x6e, 0x74, 0x65, 0x6e, 0x74, 0x2c, - 0x0a, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, - 0x20, 0x63, 0x6f, 0x6d, 0x70, 0x6c, 0x65, 0x74, 0x69, 0x6f, 0x6e, 0x5f, - 0x70, 0x72, 0x6f, 0x62, 0x61, 0x62, 0x69, 0x6c, 0x69, 0x74, 0x69, 0x65, - 0x73, 0x3a, 0x20, 0x5b, 0x70, 0x72, 0x6f, 0x62, 0x5d, 0x0a, 0x20, 0x20, - 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x7d, 0x29, 0x29, 0x0a, - 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x72, 0x65, - 0x74, 0x75, 0x72, 0x6e, 0x20, 0x68, 0x74, 0x6d, 0x6c, 0x60, 0x3c, 0x24, - 0x7b, 0x50, 0x72, 0x6f, 0x62, 0x61, 0x62, 0x69, 0x6c, 0x69, 0x74, 0x69, - 0x65, 0x73, 0x7d, 0x20, 0x64, 0x61, 0x74, 0x61, 0x3d, 0x24, 0x7b, 0x73, - 0x70, 0x6c, 0x69, 0x74, 0x44, 0x61, 0x74, 0x61, 0x7d, 0x20, 0x2f, 0x3e, - 0x60, 0x0a, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x7d, 0x0a, + 0x67, 0x20, 0x3d, 0x20, 0x4d, 0x61, 0x74, 0x68, 0x2e, 0x66, 0x6c, 0x6f, + 0x6f, 0x72, 0x28, 0x31, 0x39, 0x32, 0x20, 0x2a, 0x20, 0x70, 0x29, 0x3b, + 0x0a, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x72, 0x65, 0x74, 0x75, 0x72, + 0x6e, 0x20, 0x60, 0x72, 0x67, 0x62, 0x61, 0x28, 0x24, 0x7b, 0x72, 0x7d, + 0x2c, 0x24, 0x7b, 0x67, 0x7d, 0x2c, 0x30, 0x2c, 0x30, 0x2e, 0x33, 0x29, + 0x60, 0x3b, 0x0a, 0x20, 0x20, 0x20, 0x20, 0x7d, 0x0a, 0x0a, 0x20, 0x20, + 0x20, 0x20, 0x63, 0x6f, 0x6e, 0x73, 0x74, 0x20, 0x50, 0x72, 0x6f, 0x62, + 0x61, 0x62, 0x69, 0x6c, 0x69, 0x74, 0x69, 0x65, 0x73, 0x20, 0x3d, 0x20, + 0x28, 0x70, 0x61, 0x72, 0x61, 0x6d, 0x73, 0x29, 0x20, 0x3d, 0x3e, 0x20, + 0x7b, 0x0a, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x72, 0x65, 0x74, 0x75, + 0x72, 0x6e, 0x20, 0x70, 0x61, 0x72, 0x61, 0x6d, 0x73, 0x2e, 0x64, 0x61, + 0x74, 0x61, 0x2e, 0x6d, 0x61, 0x70, 0x28, 0x6d, 0x73, 0x67, 0x20, 0x3d, + 0x3e, 0x20, 0x7b, 0x0a, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, + 0x63, 0x6f, 0x6e, 0x73, 0x74, 0x20, 0x7b, 0x20, 0x63, 0x6f, 0x6d, 0x70, + 0x6c, 0x65, 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x70, 0x72, 0x6f, 0x62, 0x61, + 0x62, 0x69, 0x6c, 0x69, 0x74, 0x69, 0x65, 0x73, 0x20, 0x7d, 0x20, 0x3d, + 0x20, 0x6d, 0x73, 0x67, 0x3b, 0x0a, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, + 0x20, 0x20, 0x69, 0x66, 0x20, 0x28, 0x0a, 0x20, 0x20, 0x20, 0x20, 0x20, + 0x20, 0x20, 0x20, 0x20, 0x20, 0x21, 0x63, 0x6f, 0x6d, 0x70, 0x6c, 0x65, + 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x70, 0x72, 0x6f, 0x62, 0x61, 0x62, 0x69, + 0x6c, 0x69, 0x74, 0x69, 0x65, 0x73, 0x20, 0x7c, 0x7c, 0x0a, 0x20, 0x20, + 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x63, 0x6f, 0x6d, 0x70, + 0x6c, 0x65, 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x70, 0x72, 0x6f, 0x62, 0x61, + 0x62, 0x69, 0x6c, 0x69, 0x74, 0x69, 0x65, 0x73, 0x2e, 0x6c, 0x65, 0x6e, + 0x67, 0x74, 0x68, 0x20, 0x3d, 0x3d, 0x3d, 0x20, 0x30, 0x0a, 0x20, 0x20, + 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x29, 0x20, 0x72, 0x65, 0x74, 0x75, + 0x72, 0x6e, 0x20, 0x6d, 0x73, 0x67, 0x2e, 0x63, 0x6f, 0x6e, 0x74, 0x65, + 0x6e, 0x74, 0x0a, 0x0a, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, + 0x69, 0x66, 0x20, 0x28, 0x63, 0x6f, 0x6d, 0x70, 0x6c, 0x65, 0x74, 0x69, + 0x6f, 0x6e, 0x5f, 0x70, 0x72, 0x6f, 0x62, 0x61, 0x62, 0x69, 0x6c, 0x69, + 0x74, 0x69, 0x65, 0x73, 0x2e, 0x6c, 0x65, 0x6e, 0x67, 0x74, 0x68, 0x20, + 0x3e, 0x20, 0x31, 0x29, 0x20, 0x7b, 0x0a, 0x20, 0x20, 0x20, 0x20, 0x20, + 0x20, 0x20, 0x20, 0x20, 0x20, 0x2f, 0x2f, 0x20, 0x4e, 0x6f, 0x74, 0x20, + 0x66, 0x6f, 0x72, 0x20, 0x62, 0x79, 0x74, 0x65, 0x20, 0x70, 0x61, 0x69, + 0x72, 0x0a, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, + 0x69, 0x66, 0x20, 0x28, 0x63, 0x6f, 0x6d, 0x70, 0x6c, 0x65, 0x74, 0x69, + 0x6f, 0x6e, 0x5f, 0x70, 0x72, 0x6f, 0x62, 0x61, 0x62, 0x69, 0x6c, 0x69, + 0x74, 0x69, 0x65, 0x73, 0x5b, 0x30, 0x5d, 0x2e, 0x63, 0x6f, 0x6e, 0x74, + 0x65, 0x6e, 0x74, 0x2e, 0x73, 0x74, 0x61, 0x72, 0x74, 0x73, 0x57, 0x69, + 0x74, 0x68, 0x28, 0x27, 0x62, 0x79, 0x74, 0x65, 0x3a, 0x20, 0x5c, 0x5c, + 0x27, 0x29, 0x29, 0x20, 0x72, 0x65, 0x74, 0x75, 0x72, 0x6e, 0x20, 0x6d, + 0x73, 0x67, 0x2e, 0x63, 0x6f, 0x6e, 0x74, 0x65, 0x6e, 0x74, 0x0a, 0x0a, + 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x63, 0x6f, + 0x6e, 0x73, 0x74, 0x20, 0x73, 0x70, 0x6c, 0x69, 0x74, 0x44, 0x61, 0x74, + 0x61, 0x20, 0x3d, 0x20, 0x63, 0x6f, 0x6d, 0x70, 0x6c, 0x65, 0x74, 0x69, + 0x6f, 0x6e, 0x5f, 0x70, 0x72, 0x6f, 0x62, 0x61, 0x62, 0x69, 0x6c, 0x69, + 0x74, 0x69, 0x65, 0x73, 0x2e, 0x6d, 0x61, 0x70, 0x28, 0x70, 0x72, 0x6f, + 0x62, 0x20, 0x3d, 0x3e, 0x20, 0x28, 0x7b, 0x0a, 0x20, 0x20, 0x20, 0x20, + 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x63, 0x6f, 0x6e, 0x74, + 0x65, 0x6e, 0x74, 0x3a, 0x20, 0x70, 0x72, 0x6f, 0x62, 0x2e, 0x63, 0x6f, + 0x6e, 0x74, 0x65, 0x6e, 0x74, 0x2c, 0x0a, 0x20, 0x20, 0x20, 0x20, 0x20, + 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x63, 0x6f, 0x6d, 0x70, 0x6c, + 0x65, 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x70, 0x72, 0x6f, 0x62, 0x61, 0x62, + 0x69, 0x6c, 0x69, 0x74, 0x69, 0x65, 0x73, 0x3a, 0x20, 0x5b, 0x70, 0x72, + 0x6f, 0x62, 0x5d, 0x0a, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, + 0x20, 0x20, 0x7d, 0x29, 0x29, 0x0a, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, + 0x20, 0x20, 0x20, 0x20, 0x72, 0x65, 0x74, 0x75, 0x72, 0x6e, 0x20, 0x68, + 0x74, 0x6d, 0x6c, 0x60, 0x3c, 0x24, 0x7b, 0x50, 0x72, 0x6f, 0x62, 0x61, + 0x62, 0x69, 0x6c, 0x69, 0x74, 0x69, 0x65, 0x73, 0x7d, 0x20, 0x64, 0x61, + 0x74, 0x61, 0x3d, 0x24, 0x7b, 0x73, 0x70, 0x6c, 0x69, 0x74, 0x44, 0x61, + 0x74, 0x61, 0x7d, 0x20, 0x2f, 0x3e, 0x60, 0x0a, 0x20, 0x20, 0x20, 0x20, + 0x20, 0x20, 0x20, 0x20, 0x7d, 0x0a, 0x0a, 0x20, 0x20, 0x20, 0x20, 0x20, + 0x20, 0x20, 0x20, 0x63, 0x6f, 0x6e, 0x73, 0x74, 0x20, 0x7b, 0x20, 0x70, + 0x72, 0x6f, 0x62, 0x73, 0x2c, 0x20, 0x63, 0x6f, 0x6e, 0x74, 0x65, 0x6e, + 0x74, 0x20, 0x7d, 0x20, 0x3d, 0x20, 0x63, 0x6f, 0x6d, 0x70, 0x6c, 0x65, + 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x70, 0x72, 0x6f, 0x62, 0x61, 0x62, 0x69, + 0x6c, 0x69, 0x74, 0x69, 0x65, 0x73, 0x5b, 0x30, 0x5d, 0x0a, 0x20, 0x20, + 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x63, 0x6f, 0x6e, 0x73, 0x74, 0x20, + 0x66, 0x6f, 0x75, 0x6e, 0x64, 0x20, 0x3d, 0x20, 0x70, 0x72, 0x6f, 0x62, + 0x73, 0x2e, 0x66, 0x69, 0x6e, 0x64, 0x28, 0x70, 0x20, 0x3d, 0x3e, 0x20, + 0x70, 0x2e, 0x74, 0x6f, 0x6b, 0x5f, 0x73, 0x74, 0x72, 0x20, 0x3d, 0x3d, + 0x3d, 0x20, 0x6d, 0x73, 0x67, 0x2e, 0x63, 0x6f, 0x6e, 0x74, 0x65, 0x6e, + 0x74, 0x29, 0x0a, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x63, + 0x6f, 0x6e, 0x73, 0x74, 0x20, 0x70, 0x43, 0x6f, 0x6c, 0x6f, 0x72, 0x20, + 0x3d, 0x20, 0x66, 0x6f, 0x75, 0x6e, 0x64, 0x20, 0x3f, 0x20, 0x70, 0x72, + 0x6f, 0x62, 0x43, 0x6f, 0x6c, 0x6f, 0x72, 0x28, 0x66, 0x6f, 0x75, 0x6e, + 0x64, 0x2e, 0x70, 0x72, 0x6f, 0x62, 0x29, 0x20, 0x3a, 0x20, 0x27, 0x74, + 0x72, 0x61, 0x6e, 0x73, 0x70, 0x61, 0x72, 0x65, 0x6e, 0x74, 0x27, 0x0a, 0x0a, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x63, 0x6f, 0x6e, - 0x73, 0x74, 0x20, 0x7b, 0x20, 0x70, 0x72, 0x6f, 0x62, 0x73, 0x2c, 0x20, - 0x63, 0x6f, 0x6e, 0x74, 0x65, 0x6e, 0x74, 0x20, 0x7d, 0x20, 0x3d, 0x20, - 0x63, 0x6f, 0x6d, 0x70, 0x6c, 0x65, 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x70, - 0x72, 0x6f, 0x62, 0x61, 0x62, 0x69, 0x6c, 0x69, 0x74, 0x69, 0x65, 0x73, - 0x5b, 0x30, 0x5d, 0x0a, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, - 0x63, 0x6f, 0x6e, 0x73, 0x74, 0x20, 0x66, 0x6f, 0x75, 0x6e, 0x64, 0x20, - 0x3d, 0x20, 0x70, 0x72, 0x6f, 0x62, 0x73, 0x2e, 0x66, 0x69, 0x6e, 0x64, - 0x28, 0x70, 0x20, 0x3d, 0x3e, 0x20, 0x70, 0x2e, 0x74, 0x6f, 0x6b, 0x5f, - 0x73, 0x74, 0x72, 0x20, 0x3d, 0x3d, 0x3d, 0x20, 0x6d, 0x73, 0x67, 0x2e, - 0x63, 0x6f, 0x6e, 0x74, 0x65, 0x6e, 0x74, 0x29, 0x0a, 0x20, 0x20, 0x20, - 0x20, 0x20, 0x20, 0x20, 0x20, 0x63, 0x6f, 0x6e, 0x73, 0x74, 0x20, 0x70, - 0x43, 0x6f, 0x6c, 0x6f, 0x72, 0x20, 0x3d, 0x20, 0x66, 0x6f, 0x75, 0x6e, - 0x64, 0x20, 0x3f, 0x20, 0x70, 0x72, 0x6f, 0x62, 0x43, 0x6f, 0x6c, 0x6f, - 0x72, 0x28, 0x66, 0x6f, 0x75, 0x6e, 0x64, 0x2e, 0x70, 0x72, 0x6f, 0x62, - 0x29, 0x20, 0x3a, 0x20, 0x27, 0x74, 0x72, 0x61, 0x6e, 0x73, 0x70, 0x61, - 0x72, 0x65, 0x6e, 0x74, 0x27, 0x0a, 0x0a, 0x20, 0x20, 0x20, 0x20, 0x20, - 0x20, 0x20, 0x20, 0x63, 0x6f, 0x6e, 0x73, 0x74, 0x20, 0x70, 0x6f, 0x70, - 0x6f, 0x76, 0x65, 0x72, 0x43, 0x68, 0x69, 0x6c, 0x64, 0x72, 0x65, 0x6e, - 0x20, 0x3d, 0x20, 0x68, 0x74, 0x6d, 0x6c, 0x60, 0x0a, 0x20, 0x20, 0x20, - 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x3c, 0x64, 0x69, 0x76, 0x20, - 0x63, 0x6c, 0x61, 0x73, 0x73, 0x3d, 0x22, 0x70, 0x72, 0x6f, 0x62, 0x2d, - 0x73, 0x65, 0x74, 0x22, 0x3e, 0x0a, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, - 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x24, 0x7b, 0x70, 0x72, 0x6f, 0x62, - 0x73, 0x2e, 0x6d, 0x61, 0x70, 0x28, 0x28, 0x70, 0x2c, 0x20, 0x69, 0x6e, - 0x64, 0x65, 0x78, 0x29, 0x20, 0x3d, 0x3e, 0x20, 0x7b, 0x0a, 0x20, 0x20, - 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x72, 0x65, 0x74, 0x75, - 0x72, 0x6e, 0x20, 0x68, 0x74, 0x6d, 0x6c, 0x60, 0x0a, 0x20, 0x20, 0x20, - 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, - 0x20, 0x3c, 0x64, 0x69, 0x76, 0x0a, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, - 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, - 0x6b, 0x65, 0x79, 0x3d, 0x24, 0x7b, 0x69, 0x6e, 0x64, 0x65, 0x78, 0x7d, - 0x0a, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, - 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x74, 0x69, 0x74, 0x6c, 0x65, - 0x3d, 0x24, 0x7b, 0x60, 0x70, 0x72, 0x6f, 0x62, 0x3a, 0x20, 0x24, 0x7b, - 0x70, 0x2e, 0x70, 0x72, 0x6f, 0x62, 0x7d, 0x60, 0x7d, 0x0a, 0x20, 0x20, + 0x73, 0x74, 0x20, 0x70, 0x6f, 0x70, 0x6f, 0x76, 0x65, 0x72, 0x43, 0x68, + 0x69, 0x6c, 0x64, 0x72, 0x65, 0x6e, 0x20, 0x3d, 0x20, 0x68, 0x74, 0x6d, + 0x6c, 0x60, 0x0a, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, + 0x20, 0x3c, 0x64, 0x69, 0x76, 0x20, 0x63, 0x6c, 0x61, 0x73, 0x73, 0x3d, + 0x22, 0x70, 0x72, 0x6f, 0x62, 0x2d, 0x73, 0x65, 0x74, 0x22, 0x3e, 0x0a, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, - 0x20, 0x20, 0x20, 0x20, 0x73, 0x74, 0x79, 0x6c, 0x65, 0x3d, 0x24, 0x7b, - 0x7b, 0x0a, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, - 0x20, 0x20, 0x20, 0x20, 0x70, 0x61, 0x64, 0x64, 0x69, 0x6e, 0x67, 0x3a, - 0x20, 0x27, 0x30, 0x2e, 0x33, 0x65, 0x6d, 0x27, 0x2c, 0x0a, 0x20, 0x20, + 0x24, 0x7b, 0x70, 0x72, 0x6f, 0x62, 0x73, 0x2e, 0x6d, 0x61, 0x70, 0x28, + 0x28, 0x70, 0x2c, 0x20, 0x69, 0x6e, 0x64, 0x65, 0x78, 0x29, 0x20, 0x3d, + 0x3e, 0x20, 0x7b, 0x0a, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, + 0x20, 0x20, 0x72, 0x65, 0x74, 0x75, 0x72, 0x6e, 0x20, 0x68, 0x74, 0x6d, + 0x6c, 0x60, 0x0a, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, + 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x3c, 0x64, 0x69, 0x76, 0x0a, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, - 0x62, 0x61, 0x63, 0x6b, 0x67, 0x72, 0x6f, 0x75, 0x6e, 0x64, 0x43, 0x6f, - 0x6c, 0x6f, 0x72, 0x3a, 0x20, 0x70, 0x2e, 0x74, 0x6f, 0x6b, 0x5f, 0x73, - 0x74, 0x72, 0x20, 0x3d, 0x3d, 0x3d, 0x20, 0x63, 0x6f, 0x6e, 0x74, 0x65, - 0x6e, 0x74, 0x20, 0x3f, 0x20, 0x70, 0x72, 0x6f, 0x62, 0x43, 0x6f, 0x6c, - 0x6f, 0x72, 0x28, 0x70, 0x2e, 0x70, 0x72, 0x6f, 0x62, 0x29, 0x20, 0x3a, - 0x20, 0x27, 0x74, 0x72, 0x61, 0x6e, 0x73, 0x70, 0x61, 0x72, 0x65, 0x6e, - 0x74, 0x27, 0x0a, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, - 0x20, 0x20, 0x20, 0x7d, 0x7d, 0x0a, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, - 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x3e, 0x0a, + 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x6b, 0x65, 0x79, 0x3d, 0x24, 0x7b, + 0x69, 0x6e, 0x64, 0x65, 0x78, 0x7d, 0x0a, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, - 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x3c, 0x73, 0x70, 0x61, 0x6e, 0x3e, - 0x24, 0x7b, 0x70, 0x2e, 0x74, 0x6f, 0x6b, 0x5f, 0x73, 0x74, 0x72, 0x7d, - 0x3a, 0x20, 0x3c, 0x2f, 0x73, 0x70, 0x61, 0x6e, 0x3e, 0x0a, 0x20, 0x20, + 0x20, 0x74, 0x69, 0x74, 0x6c, 0x65, 0x3d, 0x24, 0x7b, 0x60, 0x70, 0x72, + 0x6f, 0x62, 0x3a, 0x20, 0x24, 0x7b, 0x70, 0x2e, 0x70, 0x72, 0x6f, 0x62, + 0x7d, 0x60, 0x7d, 0x0a, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, + 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x73, 0x74, + 0x79, 0x6c, 0x65, 0x3d, 0x24, 0x7b, 0x7b, 0x0a, 0x20, 0x20, 0x20, 0x20, + 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x70, 0x61, + 0x64, 0x64, 0x69, 0x6e, 0x67, 0x3a, 0x20, 0x27, 0x30, 0x2e, 0x33, 0x65, + 0x6d, 0x27, 0x2c, 0x0a, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, + 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x62, 0x61, 0x63, 0x6b, 0x67, 0x72, + 0x6f, 0x75, 0x6e, 0x64, 0x43, 0x6f, 0x6c, 0x6f, 0x72, 0x3a, 0x20, 0x70, + 0x2e, 0x74, 0x6f, 0x6b, 0x5f, 0x73, 0x74, 0x72, 0x20, 0x3d, 0x3d, 0x3d, + 0x20, 0x63, 0x6f, 0x6e, 0x74, 0x65, 0x6e, 0x74, 0x20, 0x3f, 0x20, 0x70, + 0x72, 0x6f, 0x62, 0x43, 0x6f, 0x6c, 0x6f, 0x72, 0x28, 0x70, 0x2e, 0x70, + 0x72, 0x6f, 0x62, 0x29, 0x20, 0x3a, 0x20, 0x27, 0x74, 0x72, 0x61, 0x6e, + 0x73, 0x70, 0x61, 0x72, 0x65, 0x6e, 0x74, 0x27, 0x0a, 0x20, 0x20, 0x20, + 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x7d, 0x7d, 0x0a, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, - 0x20, 0x20, 0x20, 0x20, 0x3c, 0x73, 0x70, 0x61, 0x6e, 0x3e, 0x24, 0x7b, - 0x4d, 0x61, 0x74, 0x68, 0x2e, 0x66, 0x6c, 0x6f, 0x6f, 0x72, 0x28, 0x70, - 0x2e, 0x70, 0x72, 0x6f, 0x62, 0x20, 0x2a, 0x20, 0x31, 0x30, 0x30, 0x29, - 0x7d, 0x25, 0x3c, 0x2f, 0x73, 0x70, 0x61, 0x6e, 0x3e, 0x0a, 0x20, 0x20, + 0x20, 0x20, 0x20, 0x20, 0x3e, 0x0a, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, - 0x20, 0x20, 0x3c, 0x2f, 0x64, 0x69, 0x76, 0x3e, 0x0a, 0x20, 0x20, 0x20, - 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x60, - 0x0a, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x7d, 0x29, 0x7d, - 0x0a, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x3c, - 0x2f, 0x64, 0x69, 0x76, 0x3e, 0x0a, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, - 0x20, 0x20, 0x60, 0x0a, 0x0a, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, - 0x20, 0x72, 0x65, 0x74, 0x75, 0x72, 0x6e, 0x20, 0x68, 0x74, 0x6d, 0x6c, - 0x60, 0x0a, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, - 0x3c, 0x24, 0x7b, 0x50, 0x6f, 0x70, 0x6f, 0x76, 0x65, 0x72, 0x7d, 0x20, - 0x73, 0x74, 0x79, 0x6c, 0x65, 0x3d, 0x24, 0x7b, 0x7b, 0x20, 0x62, 0x61, - 0x63, 0x6b, 0x67, 0x72, 0x6f, 0x75, 0x6e, 0x64, 0x43, 0x6f, 0x6c, 0x6f, - 0x72, 0x3a, 0x20, 0x70, 0x43, 0x6f, 0x6c, 0x6f, 0x72, 0x20, 0x7d, 0x7d, - 0x20, 0x70, 0x6f, 0x70, 0x6f, 0x76, 0x65, 0x72, 0x43, 0x68, 0x69, 0x6c, - 0x64, 0x72, 0x65, 0x6e, 0x3d, 0x24, 0x7b, 0x70, 0x6f, 0x70, 0x6f, 0x76, - 0x65, 0x72, 0x43, 0x68, 0x69, 0x6c, 0x64, 0x72, 0x65, 0x6e, 0x7d, 0x3e, - 0x0a, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, - 0x20, 0x24, 0x7b, 0x6d, 0x73, 0x67, 0x2e, 0x63, 0x6f, 0x6e, 0x74, 0x65, - 0x6e, 0x74, 0x2e, 0x6d, 0x61, 0x74, 0x63, 0x68, 0x28, 0x2f, 0x5c, 0x6e, - 0x2f, 0x67, 0x69, 0x6d, 0x29, 0x20, 0x3f, 0x20, 0x68, 0x74, 0x6d, 0x6c, - 0x60, 0x3c, 0x62, 0x72, 0x20, 0x2f, 0x3e, 0x60, 0x20, 0x3a, 0x20, 0x6d, - 0x73, 0x67, 0x2e, 0x63, 0x6f, 0x6e, 0x74, 0x65, 0x6e, 0x74, 0x7d, 0x0a, - 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x3c, 0x2f, - 0x3e, 0x0a, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x60, 0x0a, - 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x7d, 0x29, 0x3b, 0x0a, 0x20, 0x20, - 0x20, 0x20, 0x7d, 0x0a, 0x0a, 0x20, 0x20, 0x20, 0x20, 0x2f, 0x2f, 0x20, - 0x70, 0x6f, 0x6f, 0x72, 0x20, 0x6d, 0x61, 0x6e, 0x73, 0x20, 0x6d, 0x61, - 0x72, 0x6b, 0x64, 0x6f, 0x77, 0x6e, 0x20, 0x72, 0x65, 0x70, 0x6c, 0x61, - 0x63, 0x65, 0x6d, 0x65, 0x6e, 0x74, 0x0a, 0x20, 0x20, 0x20, 0x20, 0x63, - 0x6f, 0x6e, 0x73, 0x74, 0x20, 0x4d, 0x61, 0x72, 0x6b, 0x64, 0x6f, 0x77, - 0x6e, 0x69, 0x73, 0x68, 0x20, 0x3d, 0x20, 0x28, 0x70, 0x61, 0x72, 0x61, - 0x6d, 0x73, 0x29, 0x20, 0x3d, 0x3e, 0x20, 0x7b, 0x0a, 0x20, 0x20, 0x20, - 0x20, 0x20, 0x20, 0x63, 0x6f, 0x6e, 0x73, 0x74, 0x20, 0x6d, 0x64, 0x20, - 0x3d, 0x20, 0x70, 0x61, 0x72, 0x61, 0x6d, 0x73, 0x2e, 0x74, 0x65, 0x78, - 0x74, 0x0a, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x2e, 0x72, - 0x65, 0x70, 0x6c, 0x61, 0x63, 0x65, 0x28, 0x2f, 0x26, 0x2f, 0x67, 0x2c, - 0x20, 0x27, 0x26, 0x61, 0x6d, 0x70, 0x3b, 0x27, 0x29, 0x0a, 0x20, 0x20, + 0x3c, 0x73, 0x70, 0x61, 0x6e, 0x3e, 0x24, 0x7b, 0x70, 0x2e, 0x74, 0x6f, + 0x6b, 0x5f, 0x73, 0x74, 0x72, 0x7d, 0x3a, 0x20, 0x3c, 0x2f, 0x73, 0x70, + 0x61, 0x6e, 0x3e, 0x0a, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, + 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x3c, 0x73, + 0x70, 0x61, 0x6e, 0x3e, 0x24, 0x7b, 0x4d, 0x61, 0x74, 0x68, 0x2e, 0x66, + 0x6c, 0x6f, 0x6f, 0x72, 0x28, 0x70, 0x2e, 0x70, 0x72, 0x6f, 0x62, 0x20, + 0x2a, 0x20, 0x31, 0x30, 0x30, 0x29, 0x7d, 0x25, 0x3c, 0x2f, 0x73, 0x70, + 0x61, 0x6e, 0x3e, 0x0a, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, + 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x3c, 0x2f, 0x64, 0x69, + 0x76, 0x3e, 0x0a, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, + 0x20, 0x20, 0x20, 0x20, 0x20, 0x60, 0x0a, 0x20, 0x20, 0x20, 0x20, 0x20, + 0x20, 0x20, 0x20, 0x7d, 0x29, 0x7d, 0x0a, 0x20, 0x20, 0x20, 0x20, 0x20, + 0x20, 0x20, 0x20, 0x20, 0x20, 0x3c, 0x2f, 0x64, 0x69, 0x76, 0x3e, 0x0a, + 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x60, 0x0a, 0x0a, 0x20, + 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x72, 0x65, 0x74, 0x75, 0x72, + 0x6e, 0x20, 0x68, 0x74, 0x6d, 0x6c, 0x60, 0x0a, 0x20, 0x20, 0x20, 0x20, + 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x3c, 0x24, 0x7b, 0x50, 0x6f, 0x70, + 0x6f, 0x76, 0x65, 0x72, 0x7d, 0x20, 0x73, 0x74, 0x79, 0x6c, 0x65, 0x3d, + 0x24, 0x7b, 0x7b, 0x20, 0x62, 0x61, 0x63, 0x6b, 0x67, 0x72, 0x6f, 0x75, + 0x6e, 0x64, 0x43, 0x6f, 0x6c, 0x6f, 0x72, 0x3a, 0x20, 0x70, 0x43, 0x6f, + 0x6c, 0x6f, 0x72, 0x20, 0x7d, 0x7d, 0x20, 0x70, 0x6f, 0x70, 0x6f, 0x76, + 0x65, 0x72, 0x43, 0x68, 0x69, 0x6c, 0x64, 0x72, 0x65, 0x6e, 0x3d, 0x24, + 0x7b, 0x70, 0x6f, 0x70, 0x6f, 0x76, 0x65, 0x72, 0x43, 0x68, 0x69, 0x6c, + 0x64, 0x72, 0x65, 0x6e, 0x7d, 0x3e, 0x0a, 0x20, 0x20, 0x20, 0x20, 0x20, + 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x24, 0x7b, 0x6d, 0x73, 0x67, + 0x2e, 0x63, 0x6f, 0x6e, 0x74, 0x65, 0x6e, 0x74, 0x2e, 0x6d, 0x61, 0x74, + 0x63, 0x68, 0x28, 0x2f, 0x5c, 0x6e, 0x2f, 0x67, 0x69, 0x6d, 0x29, 0x20, + 0x3f, 0x20, 0x68, 0x74, 0x6d, 0x6c, 0x60, 0x3c, 0x62, 0x72, 0x20, 0x2f, + 0x3e, 0x60, 0x20, 0x3a, 0x20, 0x6d, 0x73, 0x67, 0x2e, 0x63, 0x6f, 0x6e, + 0x74, 0x65, 0x6e, 0x74, 0x7d, 0x0a, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, + 0x20, 0x20, 0x20, 0x20, 0x3c, 0x2f, 0x3e, 0x0a, 0x20, 0x20, 0x20, 0x20, + 0x20, 0x20, 0x20, 0x20, 0x60, 0x0a, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, + 0x7d, 0x29, 0x3b, 0x0a, 0x20, 0x20, 0x20, 0x20, 0x7d, 0x0a, 0x0a, 0x20, + 0x20, 0x20, 0x20, 0x2f, 0x2f, 0x20, 0x70, 0x6f, 0x6f, 0x72, 0x20, 0x6d, + 0x61, 0x6e, 0x73, 0x20, 0x6d, 0x61, 0x72, 0x6b, 0x64, 0x6f, 0x77, 0x6e, + 0x20, 0x72, 0x65, 0x70, 0x6c, 0x61, 0x63, 0x65, 0x6d, 0x65, 0x6e, 0x74, + 0x0a, 0x20, 0x20, 0x20, 0x20, 0x63, 0x6f, 0x6e, 0x73, 0x74, 0x20, 0x4d, + 0x61, 0x72, 0x6b, 0x64, 0x6f, 0x77, 0x6e, 0x69, 0x73, 0x68, 0x20, 0x3d, + 0x20, 0x28, 0x70, 0x61, 0x72, 0x61, 0x6d, 0x73, 0x29, 0x20, 0x3d, 0x3e, + 0x20, 0x7b, 0x0a, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x63, 0x6f, 0x6e, + 0x73, 0x74, 0x20, 0x6d, 0x64, 0x20, 0x3d, 0x20, 0x70, 0x61, 0x72, 0x61, + 0x6d, 0x73, 0x2e, 0x74, 0x65, 0x78, 0x74, 0x0a, 0x20, 0x20, 0x20, 0x20, + 0x20, 0x20, 0x20, 0x20, 0x2e, 0x72, 0x65, 0x70, 0x6c, 0x61, 0x63, 0x65, + 0x28, 0x2f, 0x26, 0x2f, 0x67, 0x2c, 0x20, 0x27, 0x26, 0x61, 0x6d, 0x70, + 0x3b, 0x27, 0x29, 0x0a, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, + 0x2e, 0x72, 0x65, 0x70, 0x6c, 0x61, 0x63, 0x65, 0x28, 0x2f, 0x3c, 0x2f, + 0x67, 0x2c, 0x20, 0x27, 0x26, 0x6c, 0x74, 0x3b, 0x27, 0x29, 0x0a, 0x20, + 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x2e, 0x72, 0x65, 0x70, 0x6c, + 0x61, 0x63, 0x65, 0x28, 0x2f, 0x3e, 0x2f, 0x67, 0x2c, 0x20, 0x27, 0x26, + 0x67, 0x74, 0x3b, 0x27, 0x29, 0x0a, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, + 0x20, 0x20, 0x2e, 0x72, 0x65, 0x70, 0x6c, 0x61, 0x63, 0x65, 0x28, 0x2f, + 0x5e, 0x23, 0x7b, 0x31, 0x2c, 0x36, 0x7d, 0x20, 0x28, 0x2e, 0x2a, 0x29, + 0x24, 0x2f, 0x67, 0x69, 0x6d, 0x2c, 0x20, 0x27, 0x3c, 0x68, 0x33, 0x3e, + 0x24, 0x31, 0x3c, 0x2f, 0x68, 0x33, 0x3e, 0x27, 0x29, 0x0a, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x2e, 0x72, 0x65, 0x70, 0x6c, 0x61, - 0x63, 0x65, 0x28, 0x2f, 0x3c, 0x2f, 0x67, 0x2c, 0x20, 0x27, 0x26, 0x6c, - 0x74, 0x3b, 0x27, 0x29, 0x0a, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, - 0x20, 0x2e, 0x72, 0x65, 0x70, 0x6c, 0x61, 0x63, 0x65, 0x28, 0x2f, 0x3e, - 0x2f, 0x67, 0x2c, 0x20, 0x27, 0x26, 0x67, 0x74, 0x3b, 0x27, 0x29, 0x0a, - 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x2e, 0x72, 0x65, 0x70, - 0x6c, 0x61, 0x63, 0x65, 0x28, 0x2f, 0x5e, 0x23, 0x7b, 0x31, 0x2c, 0x36, - 0x7d, 0x20, 0x28, 0x2e, 0x2a, 0x29, 0x24, 0x2f, 0x67, 0x69, 0x6d, 0x2c, - 0x20, 0x27, 0x3c, 0x68, 0x33, 0x3e, 0x24, 0x31, 0x3c, 0x2f, 0x68, 0x33, - 0x3e, 0x27, 0x29, 0x0a, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, - 0x2e, 0x72, 0x65, 0x70, 0x6c, 0x61, 0x63, 0x65, 0x28, 0x2f, 0x5c, 0x2a, - 0x5c, 0x2a, 0x28, 0x2e, 0x2a, 0x3f, 0x29, 0x5c, 0x2a, 0x5c, 0x2a, 0x2f, - 0x67, 0x2c, 0x20, 0x27, 0x3c, 0x73, 0x74, 0x72, 0x6f, 0x6e, 0x67, 0x3e, - 0x24, 0x31, 0x3c, 0x2f, 0x73, 0x74, 0x72, 0x6f, 0x6e, 0x67, 0x3e, 0x27, - 0x29, 0x0a, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x2e, 0x72, - 0x65, 0x70, 0x6c, 0x61, 0x63, 0x65, 0x28, 0x2f, 0x5f, 0x5f, 0x28, 0x2e, - 0x2a, 0x3f, 0x29, 0x5f, 0x5f, 0x2f, 0x67, 0x2c, 0x20, 0x27, 0x3c, 0x73, + 0x63, 0x65, 0x28, 0x2f, 0x5c, 0x2a, 0x5c, 0x2a, 0x28, 0x2e, 0x2a, 0x3f, + 0x29, 0x5c, 0x2a, 0x5c, 0x2a, 0x2f, 0x67, 0x2c, 0x20, 0x27, 0x3c, 0x73, 0x74, 0x72, 0x6f, 0x6e, 0x67, 0x3e, 0x24, 0x31, 0x3c, 0x2f, 0x73, 0x74, 0x72, 0x6f, 0x6e, 0x67, 0x3e, 0x27, 0x29, 0x0a, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x2e, 0x72, 0x65, 0x70, 0x6c, 0x61, 0x63, 0x65, - 0x28, 0x2f, 0x5c, 0x2a, 0x28, 0x2e, 0x2a, 0x3f, 0x29, 0x5c, 0x2a, 0x2f, - 0x67, 0x2c, 0x20, 0x27, 0x3c, 0x65, 0x6d, 0x3e, 0x24, 0x31, 0x3c, 0x2f, - 0x65, 0x6d, 0x3e, 0x27, 0x29, 0x0a, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, - 0x20, 0x20, 0x2e, 0x72, 0x65, 0x70, 0x6c, 0x61, 0x63, 0x65, 0x28, 0x2f, - 0x5f, 0x28, 0x2e, 0x2a, 0x3f, 0x29, 0x5f, 0x2f, 0x67, 0x2c, 0x20, 0x27, - 0x3c, 0x65, 0x6d, 0x3e, 0x24, 0x31, 0x3c, 0x2f, 0x65, 0x6d, 0x3e, 0x27, + 0x28, 0x2f, 0x5f, 0x5f, 0x28, 0x2e, 0x2a, 0x3f, 0x29, 0x5f, 0x5f, 0x2f, + 0x67, 0x2c, 0x20, 0x27, 0x3c, 0x73, 0x74, 0x72, 0x6f, 0x6e, 0x67, 0x3e, + 0x24, 0x31, 0x3c, 0x2f, 0x73, 0x74, 0x72, 0x6f, 0x6e, 0x67, 0x3e, 0x27, 0x29, 0x0a, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x2e, 0x72, - 0x65, 0x70, 0x6c, 0x61, 0x63, 0x65, 0x28, 0x2f, 0x60, 0x60, 0x60, 0x2e, - 0x2a, 0x3f, 0x5c, 0x6e, 0x28, 0x5b, 0x5c, 0x73, 0x5c, 0x53, 0x5d, 0x2a, - 0x3f, 0x29, 0x60, 0x60, 0x60, 0x2f, 0x67, 0x2c, 0x20, 0x27, 0x3c, 0x70, - 0x72, 0x65, 0x3e, 0x3c, 0x63, 0x6f, 0x64, 0x65, 0x3e, 0x24, 0x31, 0x3c, - 0x2f, 0x63, 0x6f, 0x64, 0x65, 0x3e, 0x3c, 0x2f, 0x70, 0x72, 0x65, 0x3e, - 0x27, 0x29, 0x0a, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x2e, - 0x72, 0x65, 0x70, 0x6c, 0x61, 0x63, 0x65, 0x28, 0x2f, 0x60, 0x28, 0x2e, - 0x2a, 0x3f, 0x29, 0x60, 0x2f, 0x67, 0x2c, 0x20, 0x27, 0x3c, 0x63, 0x6f, + 0x65, 0x70, 0x6c, 0x61, 0x63, 0x65, 0x28, 0x2f, 0x5c, 0x2a, 0x28, 0x2e, + 0x2a, 0x3f, 0x29, 0x5c, 0x2a, 0x2f, 0x67, 0x2c, 0x20, 0x27, 0x3c, 0x65, + 0x6d, 0x3e, 0x24, 0x31, 0x3c, 0x2f, 0x65, 0x6d, 0x3e, 0x27, 0x29, 0x0a, + 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x2e, 0x72, 0x65, 0x70, + 0x6c, 0x61, 0x63, 0x65, 0x28, 0x2f, 0x5f, 0x28, 0x2e, 0x2a, 0x3f, 0x29, + 0x5f, 0x2f, 0x67, 0x2c, 0x20, 0x27, 0x3c, 0x65, 0x6d, 0x3e, 0x24, 0x31, + 0x3c, 0x2f, 0x65, 0x6d, 0x3e, 0x27, 0x29, 0x0a, 0x20, 0x20, 0x20, 0x20, + 0x20, 0x20, 0x20, 0x20, 0x2e, 0x72, 0x65, 0x70, 0x6c, 0x61, 0x63, 0x65, + 0x28, 0x2f, 0x60, 0x60, 0x60, 0x2e, 0x2a, 0x3f, 0x5c, 0x6e, 0x28, 0x5b, + 0x5c, 0x73, 0x5c, 0x53, 0x5d, 0x2a, 0x3f, 0x29, 0x60, 0x60, 0x60, 0x2f, + 0x67, 0x2c, 0x20, 0x27, 0x3c, 0x70, 0x72, 0x65, 0x3e, 0x3c, 0x63, 0x6f, 0x64, 0x65, 0x3e, 0x24, 0x31, 0x3c, 0x2f, 0x63, 0x6f, 0x64, 0x65, 0x3e, - 0x27, 0x29, 0x0a, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x2e, - 0x72, 0x65, 0x70, 0x6c, 0x61, 0x63, 0x65, 0x28, 0x2f, 0x5c, 0x6e, 0x2f, - 0x67, 0x69, 0x6d, 0x2c, 0x20, 0x27, 0x3c, 0x62, 0x72, 0x20, 0x2f, 0x3e, - 0x27, 0x29, 0x3b, 0x0a, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x72, 0x65, - 0x74, 0x75, 0x72, 0x6e, 0x20, 0x68, 0x74, 0x6d, 0x6c, 0x60, 0x3c, 0x73, - 0x70, 0x61, 0x6e, 0x20, 0x64, 0x61, 0x6e, 0x67, 0x65, 0x72, 0x6f, 0x75, - 0x73, 0x6c, 0x79, 0x53, 0x65, 0x74, 0x49, 0x6e, 0x6e, 0x65, 0x72, 0x48, - 0x54, 0x4d, 0x4c, 0x3d, 0x24, 0x7b, 0x7b, 0x20, 0x5f, 0x5f, 0x68, 0x74, - 0x6d, 0x6c, 0x3a, 0x20, 0x6d, 0x64, 0x20, 0x7d, 0x7d, 0x20, 0x2f, 0x3e, - 0x60, 0x3b, 0x0a, 0x20, 0x20, 0x20, 0x20, 0x7d, 0x3b, 0x0a, 0x0a, 0x20, - 0x20, 0x20, 0x20, 0x63, 0x6f, 0x6e, 0x73, 0x74, 0x20, 0x4d, 0x6f, 0x64, - 0x65, 0x6c, 0x47, 0x65, 0x6e, 0x65, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, - 0x49, 0x6e, 0x66, 0x6f, 0x20, 0x3d, 0x20, 0x28, 0x70, 0x61, 0x72, 0x61, - 0x6d, 0x73, 0x29, 0x20, 0x3d, 0x3e, 0x20, 0x7b, 0x0a, 0x20, 0x20, 0x20, - 0x20, 0x20, 0x20, 0x69, 0x66, 0x20, 0x28, 0x21, 0x6c, 0x6c, 0x61, 0x6d, - 0x61, 0x53, 0x74, 0x61, 0x74, 0x73, 0x2e, 0x76, 0x61, 0x6c, 0x75, 0x65, - 0x29, 0x20, 0x7b, 0x0a, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, - 0x72, 0x65, 0x74, 0x75, 0x72, 0x6e, 0x20, 0x68, 0x74, 0x6d, 0x6c, 0x60, - 0x3c, 0x73, 0x70, 0x61, 0x6e, 0x2f, 0x3e, 0x60, 0x0a, 0x20, 0x20, 0x20, - 0x20, 0x20, 0x20, 0x7d, 0x0a, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x72, - 0x65, 0x74, 0x75, 0x72, 0x6e, 0x20, 0x68, 0x74, 0x6d, 0x6c, 0x60, 0x0a, - 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x3c, 0x73, 0x70, 0x61, - 0x6e, 0x3e, 0x0a, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, - 0x20, 0x24, 0x7b, 0x6c, 0x6c, 0x61, 0x6d, 0x61, 0x53, 0x74, 0x61, 0x74, - 0x73, 0x2e, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x2e, 0x70, 0x72, 0x65, 0x64, - 0x69, 0x63, 0x74, 0x65, 0x64, 0x5f, 0x70, 0x65, 0x72, 0x5f, 0x74, 0x6f, - 0x6b, 0x65, 0x6e, 0x5f, 0x6d, 0x73, 0x2e, 0x74, 0x6f, 0x46, 0x69, 0x78, - 0x65, 0x64, 0x28, 0x29, 0x7d, 0x6d, 0x73, 0x20, 0x70, 0x65, 0x72, 0x20, - 0x74, 0x6f, 0x6b, 0x65, 0x6e, 0x2c, 0x20, 0x24, 0x7b, 0x6c, 0x6c, 0x61, + 0x3c, 0x2f, 0x70, 0x72, 0x65, 0x3e, 0x27, 0x29, 0x0a, 0x20, 0x20, 0x20, + 0x20, 0x20, 0x20, 0x20, 0x20, 0x2e, 0x72, 0x65, 0x70, 0x6c, 0x61, 0x63, + 0x65, 0x28, 0x2f, 0x60, 0x28, 0x2e, 0x2a, 0x3f, 0x29, 0x60, 0x2f, 0x67, + 0x2c, 0x20, 0x27, 0x3c, 0x63, 0x6f, 0x64, 0x65, 0x3e, 0x24, 0x31, 0x3c, + 0x2f, 0x63, 0x6f, 0x64, 0x65, 0x3e, 0x27, 0x29, 0x0a, 0x20, 0x20, 0x20, + 0x20, 0x20, 0x20, 0x20, 0x20, 0x2e, 0x72, 0x65, 0x70, 0x6c, 0x61, 0x63, + 0x65, 0x28, 0x2f, 0x5c, 0x6e, 0x2f, 0x67, 0x69, 0x6d, 0x2c, 0x20, 0x27, + 0x3c, 0x62, 0x72, 0x20, 0x2f, 0x3e, 0x27, 0x29, 0x3b, 0x0a, 0x20, 0x20, + 0x20, 0x20, 0x20, 0x20, 0x72, 0x65, 0x74, 0x75, 0x72, 0x6e, 0x20, 0x68, + 0x74, 0x6d, 0x6c, 0x60, 0x3c, 0x73, 0x70, 0x61, 0x6e, 0x20, 0x64, 0x61, + 0x6e, 0x67, 0x65, 0x72, 0x6f, 0x75, 0x73, 0x6c, 0x79, 0x53, 0x65, 0x74, + 0x49, 0x6e, 0x6e, 0x65, 0x72, 0x48, 0x54, 0x4d, 0x4c, 0x3d, 0x24, 0x7b, + 0x7b, 0x20, 0x5f, 0x5f, 0x68, 0x74, 0x6d, 0x6c, 0x3a, 0x20, 0x6d, 0x64, + 0x20, 0x7d, 0x7d, 0x20, 0x2f, 0x3e, 0x60, 0x3b, 0x0a, 0x20, 0x20, 0x20, + 0x20, 0x7d, 0x3b, 0x0a, 0x0a, 0x20, 0x20, 0x20, 0x20, 0x63, 0x6f, 0x6e, + 0x73, 0x74, 0x20, 0x4d, 0x6f, 0x64, 0x65, 0x6c, 0x47, 0x65, 0x6e, 0x65, + 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x49, 0x6e, 0x66, 0x6f, 0x20, 0x3d, + 0x20, 0x28, 0x70, 0x61, 0x72, 0x61, 0x6d, 0x73, 0x29, 0x20, 0x3d, 0x3e, + 0x20, 0x7b, 0x0a, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x69, 0x66, 0x20, + 0x28, 0x21, 0x6c, 0x6c, 0x61, 0x6d, 0x61, 0x53, 0x74, 0x61, 0x74, 0x73, + 0x2e, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x29, 0x20, 0x7b, 0x0a, 0x20, 0x20, + 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x72, 0x65, 0x74, 0x75, 0x72, 0x6e, + 0x20, 0x68, 0x74, 0x6d, 0x6c, 0x60, 0x3c, 0x73, 0x70, 0x61, 0x6e, 0x2f, + 0x3e, 0x60, 0x0a, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x7d, 0x0a, 0x20, + 0x20, 0x20, 0x20, 0x20, 0x20, 0x72, 0x65, 0x74, 0x75, 0x72, 0x6e, 0x20, + 0x68, 0x74, 0x6d, 0x6c, 0x60, 0x0a, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, + 0x20, 0x20, 0x3c, 0x73, 0x70, 0x61, 0x6e, 0x3e, 0x0a, 0x20, 0x20, 0x20, + 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x24, 0x7b, 0x6c, 0x6c, 0x61, 0x6d, 0x61, 0x53, 0x74, 0x61, 0x74, 0x73, 0x2e, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x2e, 0x70, 0x72, 0x65, 0x64, 0x69, 0x63, 0x74, 0x65, 0x64, 0x5f, - 0x70, 0x65, 0x72, 0x5f, 0x73, 0x65, 0x63, 0x6f, 0x6e, 0x64, 0x2e, 0x74, - 0x6f, 0x46, 0x69, 0x78, 0x65, 0x64, 0x28, 0x32, 0x29, 0x7d, 0x20, 0x74, - 0x6f, 0x6b, 0x65, 0x6e, 0x73, 0x20, 0x70, 0x65, 0x72, 0x20, 0x73, 0x65, - 0x63, 0x6f, 0x6e, 0x64, 0x0a, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, - 0x20, 0x3c, 0x2f, 0x73, 0x70, 0x61, 0x6e, 0x3e, 0x0a, 0x20, 0x20, 0x20, - 0x20, 0x20, 0x20, 0x60, 0x0a, 0x20, 0x20, 0x20, 0x20, 0x7d, 0x0a, 0x0a, - 0x20, 0x20, 0x20, 0x20, 0x2f, 0x2f, 0x20, 0x73, 0x69, 0x6d, 0x70, 0x6c, - 0x65, 0x20, 0x70, 0x6f, 0x70, 0x6f, 0x76, 0x65, 0x72, 0x20, 0x69, 0x6d, - 0x70, 0x6c, 0x0a, 0x20, 0x20, 0x20, 0x20, 0x63, 0x6f, 0x6e, 0x73, 0x74, - 0x20, 0x50, 0x6f, 0x70, 0x6f, 0x76, 0x65, 0x72, 0x20, 0x3d, 0x20, 0x28, - 0x70, 0x72, 0x6f, 0x70, 0x73, 0x29, 0x20, 0x3d, 0x3e, 0x20, 0x7b, 0x0a, + 0x70, 0x65, 0x72, 0x5f, 0x74, 0x6f, 0x6b, 0x65, 0x6e, 0x5f, 0x6d, 0x73, + 0x2e, 0x74, 0x6f, 0x46, 0x69, 0x78, 0x65, 0x64, 0x28, 0x29, 0x7d, 0x6d, + 0x73, 0x20, 0x70, 0x65, 0x72, 0x20, 0x74, 0x6f, 0x6b, 0x65, 0x6e, 0x2c, + 0x20, 0x24, 0x7b, 0x6c, 0x6c, 0x61, 0x6d, 0x61, 0x53, 0x74, 0x61, 0x74, + 0x73, 0x2e, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x2e, 0x70, 0x72, 0x65, 0x64, + 0x69, 0x63, 0x74, 0x65, 0x64, 0x5f, 0x70, 0x65, 0x72, 0x5f, 0x73, 0x65, + 0x63, 0x6f, 0x6e, 0x64, 0x2e, 0x74, 0x6f, 0x46, 0x69, 0x78, 0x65, 0x64, + 0x28, 0x32, 0x29, 0x7d, 0x20, 0x74, 0x6f, 0x6b, 0x65, 0x6e, 0x73, 0x20, + 0x70, 0x65, 0x72, 0x20, 0x73, 0x65, 0x63, 0x6f, 0x6e, 0x64, 0x0a, 0x20, + 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x3c, 0x2f, 0x73, 0x70, 0x61, + 0x6e, 0x3e, 0x0a, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x60, 0x0a, 0x20, + 0x20, 0x20, 0x20, 0x7d, 0x0a, 0x0a, 0x20, 0x20, 0x20, 0x20, 0x2f, 0x2f, + 0x20, 0x73, 0x69, 0x6d, 0x70, 0x6c, 0x65, 0x20, 0x70, 0x6f, 0x70, 0x6f, + 0x76, 0x65, 0x72, 0x20, 0x69, 0x6d, 0x70, 0x6c, 0x0a, 0x20, 0x20, 0x20, + 0x20, 0x63, 0x6f, 0x6e, 0x73, 0x74, 0x20, 0x50, 0x6f, 0x70, 0x6f, 0x76, + 0x65, 0x72, 0x20, 0x3d, 0x20, 0x28, 0x70, 0x72, 0x6f, 0x70, 0x73, 0x29, + 0x20, 0x3d, 0x3e, 0x20, 0x7b, 0x0a, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, + 0x63, 0x6f, 0x6e, 0x73, 0x74, 0x20, 0x69, 0x73, 0x4f, 0x70, 0x65, 0x6e, + 0x20, 0x3d, 0x20, 0x75, 0x73, 0x65, 0x53, 0x69, 0x67, 0x6e, 0x61, 0x6c, + 0x28, 0x66, 0x61, 0x6c, 0x73, 0x65, 0x29, 0x3b, 0x0a, 0x20, 0x20, 0x20, + 0x20, 0x20, 0x20, 0x63, 0x6f, 0x6e, 0x73, 0x74, 0x20, 0x70, 0x6f, 0x73, + 0x69, 0x74, 0x69, 0x6f, 0x6e, 0x20, 0x3d, 0x20, 0x75, 0x73, 0x65, 0x53, + 0x69, 0x67, 0x6e, 0x61, 0x6c, 0x28, 0x7b, 0x20, 0x74, 0x6f, 0x70, 0x3a, + 0x20, 0x27, 0x30, 0x70, 0x78, 0x27, 0x2c, 0x20, 0x6c, 0x65, 0x66, 0x74, + 0x3a, 0x20, 0x27, 0x30, 0x70, 0x78, 0x27, 0x20, 0x7d, 0x29, 0x3b, 0x0a, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x63, 0x6f, 0x6e, 0x73, 0x74, 0x20, - 0x69, 0x73, 0x4f, 0x70, 0x65, 0x6e, 0x20, 0x3d, 0x20, 0x75, 0x73, 0x65, - 0x53, 0x69, 0x67, 0x6e, 0x61, 0x6c, 0x28, 0x66, 0x61, 0x6c, 0x73, 0x65, - 0x29, 0x3b, 0x0a, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x63, 0x6f, 0x6e, - 0x73, 0x74, 0x20, 0x70, 0x6f, 0x73, 0x69, 0x74, 0x69, 0x6f, 0x6e, 0x20, - 0x3d, 0x20, 0x75, 0x73, 0x65, 0x53, 0x69, 0x67, 0x6e, 0x61, 0x6c, 0x28, - 0x7b, 0x20, 0x74, 0x6f, 0x70, 0x3a, 0x20, 0x27, 0x30, 0x70, 0x78, 0x27, - 0x2c, 0x20, 0x6c, 0x65, 0x66, 0x74, 0x3a, 0x20, 0x27, 0x30, 0x70, 0x78, - 0x27, 0x20, 0x7d, 0x29, 0x3b, 0x0a, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, - 0x63, 0x6f, 0x6e, 0x73, 0x74, 0x20, 0x62, 0x75, 0x74, 0x74, 0x6f, 0x6e, - 0x52, 0x65, 0x66, 0x20, 0x3d, 0x20, 0x75, 0x73, 0x65, 0x52, 0x65, 0x66, - 0x28, 0x6e, 0x75, 0x6c, 0x6c, 0x29, 0x3b, 0x0a, 0x20, 0x20, 0x20, 0x20, - 0x20, 0x20, 0x63, 0x6f, 0x6e, 0x73, 0x74, 0x20, 0x70, 0x6f, 0x70, 0x6f, - 0x76, 0x65, 0x72, 0x52, 0x65, 0x66, 0x20, 0x3d, 0x20, 0x75, 0x73, 0x65, - 0x52, 0x65, 0x66, 0x28, 0x6e, 0x75, 0x6c, 0x6c, 0x29, 0x3b, 0x0a, 0x0a, + 0x62, 0x75, 0x74, 0x74, 0x6f, 0x6e, 0x52, 0x65, 0x66, 0x20, 0x3d, 0x20, + 0x75, 0x73, 0x65, 0x52, 0x65, 0x66, 0x28, 0x6e, 0x75, 0x6c, 0x6c, 0x29, + 0x3b, 0x0a, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x63, 0x6f, 0x6e, 0x73, + 0x74, 0x20, 0x70, 0x6f, 0x70, 0x6f, 0x76, 0x65, 0x72, 0x52, 0x65, 0x66, + 0x20, 0x3d, 0x20, 0x75, 0x73, 0x65, 0x52, 0x65, 0x66, 0x28, 0x6e, 0x75, + 0x6c, 0x6c, 0x29, 0x3b, 0x0a, 0x0a, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, + 0x63, 0x6f, 0x6e, 0x73, 0x74, 0x20, 0x74, 0x6f, 0x67, 0x67, 0x6c, 0x65, + 0x50, 0x6f, 0x70, 0x6f, 0x76, 0x65, 0x72, 0x20, 0x3d, 0x20, 0x28, 0x29, + 0x20, 0x3d, 0x3e, 0x20, 0x7b, 0x0a, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, + 0x20, 0x20, 0x69, 0x66, 0x20, 0x28, 0x62, 0x75, 0x74, 0x74, 0x6f, 0x6e, + 0x52, 0x65, 0x66, 0x2e, 0x63, 0x75, 0x72, 0x72, 0x65, 0x6e, 0x74, 0x29, + 0x20, 0x7b, 0x0a, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, + 0x20, 0x63, 0x6f, 0x6e, 0x73, 0x74, 0x20, 0x72, 0x65, 0x63, 0x74, 0x20, + 0x3d, 0x20, 0x62, 0x75, 0x74, 0x74, 0x6f, 0x6e, 0x52, 0x65, 0x66, 0x2e, + 0x63, 0x75, 0x72, 0x72, 0x65, 0x6e, 0x74, 0x2e, 0x67, 0x65, 0x74, 0x42, + 0x6f, 0x75, 0x6e, 0x64, 0x69, 0x6e, 0x67, 0x43, 0x6c, 0x69, 0x65, 0x6e, + 0x74, 0x52, 0x65, 0x63, 0x74, 0x28, 0x29, 0x3b, 0x0a, 0x20, 0x20, 0x20, + 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x70, 0x6f, 0x73, 0x69, 0x74, + 0x69, 0x6f, 0x6e, 0x2e, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x20, 0x3d, 0x20, + 0x7b, 0x0a, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, + 0x20, 0x20, 0x74, 0x6f, 0x70, 0x3a, 0x20, 0x60, 0x24, 0x7b, 0x72, 0x65, + 0x63, 0x74, 0x2e, 0x62, 0x6f, 0x74, 0x74, 0x6f, 0x6d, 0x20, 0x2b, 0x20, + 0x77, 0x69, 0x6e, 0x64, 0x6f, 0x77, 0x2e, 0x73, 0x63, 0x72, 0x6f, 0x6c, + 0x6c, 0x59, 0x7d, 0x70, 0x78, 0x60, 0x2c, 0x0a, 0x20, 0x20, 0x20, 0x20, + 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x6c, 0x65, 0x66, 0x74, + 0x3a, 0x20, 0x60, 0x24, 0x7b, 0x72, 0x65, 0x63, 0x74, 0x2e, 0x6c, 0x65, + 0x66, 0x74, 0x20, 0x2b, 0x20, 0x77, 0x69, 0x6e, 0x64, 0x6f, 0x77, 0x2e, + 0x73, 0x63, 0x72, 0x6f, 0x6c, 0x6c, 0x58, 0x7d, 0x70, 0x78, 0x60, 0x2c, + 0x0a, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x7d, + 0x3b, 0x0a, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x7d, 0x0a, + 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x69, 0x73, 0x4f, 0x70, + 0x65, 0x6e, 0x2e, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x20, 0x3d, 0x20, 0x21, + 0x69, 0x73, 0x4f, 0x70, 0x65, 0x6e, 0x2e, 0x76, 0x61, 0x6c, 0x75, 0x65, + 0x3b, 0x0a, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x7d, 0x3b, 0x0a, 0x0a, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x63, 0x6f, 0x6e, 0x73, 0x74, 0x20, - 0x74, 0x6f, 0x67, 0x67, 0x6c, 0x65, 0x50, 0x6f, 0x70, 0x6f, 0x76, 0x65, - 0x72, 0x20, 0x3d, 0x20, 0x28, 0x29, 0x20, 0x3d, 0x3e, 0x20, 0x7b, 0x0a, - 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x69, 0x66, 0x20, 0x28, - 0x62, 0x75, 0x74, 0x74, 0x6f, 0x6e, 0x52, 0x65, 0x66, 0x2e, 0x63, 0x75, - 0x72, 0x72, 0x65, 0x6e, 0x74, 0x29, 0x20, 0x7b, 0x0a, 0x20, 0x20, 0x20, - 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x63, 0x6f, 0x6e, 0x73, 0x74, - 0x20, 0x72, 0x65, 0x63, 0x74, 0x20, 0x3d, 0x20, 0x62, 0x75, 0x74, 0x74, - 0x6f, 0x6e, 0x52, 0x65, 0x66, 0x2e, 0x63, 0x75, 0x72, 0x72, 0x65, 0x6e, - 0x74, 0x2e, 0x67, 0x65, 0x74, 0x42, 0x6f, 0x75, 0x6e, 0x64, 0x69, 0x6e, - 0x67, 0x43, 0x6c, 0x69, 0x65, 0x6e, 0x74, 0x52, 0x65, 0x63, 0x74, 0x28, - 0x29, 0x3b, 0x0a, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, - 0x20, 0x70, 0x6f, 0x73, 0x69, 0x74, 0x69, 0x6f, 0x6e, 0x2e, 0x76, 0x61, - 0x6c, 0x75, 0x65, 0x20, 0x3d, 0x20, 0x7b, 0x0a, 0x20, 0x20, 0x20, 0x20, - 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x74, 0x6f, 0x70, 0x3a, - 0x20, 0x60, 0x24, 0x7b, 0x72, 0x65, 0x63, 0x74, 0x2e, 0x62, 0x6f, 0x74, - 0x74, 0x6f, 0x6d, 0x20, 0x2b, 0x20, 0x77, 0x69, 0x6e, 0x64, 0x6f, 0x77, - 0x2e, 0x73, 0x63, 0x72, 0x6f, 0x6c, 0x6c, 0x59, 0x7d, 0x70, 0x78, 0x60, - 0x2c, 0x0a, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, - 0x20, 0x20, 0x6c, 0x65, 0x66, 0x74, 0x3a, 0x20, 0x60, 0x24, 0x7b, 0x72, - 0x65, 0x63, 0x74, 0x2e, 0x6c, 0x65, 0x66, 0x74, 0x20, 0x2b, 0x20, 0x77, - 0x69, 0x6e, 0x64, 0x6f, 0x77, 0x2e, 0x73, 0x63, 0x72, 0x6f, 0x6c, 0x6c, - 0x58, 0x7d, 0x70, 0x78, 0x60, 0x2c, 0x0a, 0x20, 0x20, 0x20, 0x20, 0x20, - 0x20, 0x20, 0x20, 0x20, 0x20, 0x7d, 0x3b, 0x0a, 0x20, 0x20, 0x20, 0x20, - 0x20, 0x20, 0x20, 0x20, 0x7d, 0x0a, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, - 0x20, 0x20, 0x69, 0x73, 0x4f, 0x70, 0x65, 0x6e, 0x2e, 0x76, 0x61, 0x6c, - 0x75, 0x65, 0x20, 0x3d, 0x20, 0x21, 0x69, 0x73, 0x4f, 0x70, 0x65, 0x6e, - 0x2e, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x3b, 0x0a, 0x20, 0x20, 0x20, 0x20, - 0x20, 0x20, 0x7d, 0x3b, 0x0a, 0x0a, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, - 0x63, 0x6f, 0x6e, 0x73, 0x74, 0x20, 0x68, 0x61, 0x6e, 0x64, 0x6c, 0x65, - 0x43, 0x6c, 0x69, 0x63, 0x6b, 0x4f, 0x75, 0x74, 0x73, 0x69, 0x64, 0x65, - 0x20, 0x3d, 0x20, 0x28, 0x65, 0x76, 0x65, 0x6e, 0x74, 0x29, 0x20, 0x3d, - 0x3e, 0x20, 0x7b, 0x0a, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, - 0x69, 0x66, 0x20, 0x28, 0x70, 0x6f, 0x70, 0x6f, 0x76, 0x65, 0x72, 0x52, - 0x65, 0x66, 0x2e, 0x63, 0x75, 0x72, 0x72, 0x65, 0x6e, 0x74, 0x20, 0x26, - 0x26, 0x20, 0x21, 0x70, 0x6f, 0x70, 0x6f, 0x76, 0x65, 0x72, 0x52, 0x65, - 0x66, 0x2e, 0x63, 0x75, 0x72, 0x72, 0x65, 0x6e, 0x74, 0x2e, 0x63, 0x6f, - 0x6e, 0x74, 0x61, 0x69, 0x6e, 0x73, 0x28, 0x65, 0x76, 0x65, 0x6e, 0x74, - 0x2e, 0x74, 0x61, 0x72, 0x67, 0x65, 0x74, 0x29, 0x20, 0x26, 0x26, 0x20, - 0x21, 0x62, 0x75, 0x74, 0x74, 0x6f, 0x6e, 0x52, 0x65, 0x66, 0x2e, 0x63, - 0x75, 0x72, 0x72, 0x65, 0x6e, 0x74, 0x2e, 0x63, 0x6f, 0x6e, 0x74, 0x61, - 0x69, 0x6e, 0x73, 0x28, 0x65, 0x76, 0x65, 0x6e, 0x74, 0x2e, 0x74, 0x61, - 0x72, 0x67, 0x65, 0x74, 0x29, 0x29, 0x20, 0x7b, 0x0a, 0x20, 0x20, 0x20, - 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x69, 0x73, 0x4f, 0x70, 0x65, - 0x6e, 0x2e, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x20, 0x3d, 0x20, 0x66, 0x61, - 0x6c, 0x73, 0x65, 0x3b, 0x0a, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, - 0x20, 0x7d, 0x0a, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x7d, 0x3b, 0x0a, - 0x0a, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x75, 0x73, 0x65, 0x45, 0x66, - 0x66, 0x65, 0x63, 0x74, 0x28, 0x28, 0x29, 0x20, 0x3d, 0x3e, 0x20, 0x7b, - 0x0a, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x64, 0x6f, 0x63, - 0x75, 0x6d, 0x65, 0x6e, 0x74, 0x2e, 0x61, 0x64, 0x64, 0x45, 0x76, 0x65, - 0x6e, 0x74, 0x4c, 0x69, 0x73, 0x74, 0x65, 0x6e, 0x65, 0x72, 0x28, 0x27, - 0x6d, 0x6f, 0x75, 0x73, 0x65, 0x64, 0x6f, 0x77, 0x6e, 0x27, 0x2c, 0x20, 0x68, 0x61, 0x6e, 0x64, 0x6c, 0x65, 0x43, 0x6c, 0x69, 0x63, 0x6b, 0x4f, - 0x75, 0x74, 0x73, 0x69, 0x64, 0x65, 0x29, 0x3b, 0x0a, 0x20, 0x20, 0x20, - 0x20, 0x20, 0x20, 0x20, 0x20, 0x72, 0x65, 0x74, 0x75, 0x72, 0x6e, 0x20, - 0x28, 0x29, 0x20, 0x3d, 0x3e, 0x20, 0x7b, 0x0a, 0x20, 0x20, 0x20, 0x20, - 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x64, 0x6f, 0x63, 0x75, 0x6d, 0x65, - 0x6e, 0x74, 0x2e, 0x72, 0x65, 0x6d, 0x6f, 0x76, 0x65, 0x45, 0x76, 0x65, - 0x6e, 0x74, 0x4c, 0x69, 0x73, 0x74, 0x65, 0x6e, 0x65, 0x72, 0x28, 0x27, - 0x6d, 0x6f, 0x75, 0x73, 0x65, 0x64, 0x6f, 0x77, 0x6e, 0x27, 0x2c, 0x20, - 0x68, 0x61, 0x6e, 0x64, 0x6c, 0x65, 0x43, 0x6c, 0x69, 0x63, 0x6b, 0x4f, - 0x75, 0x74, 0x73, 0x69, 0x64, 0x65, 0x29, 0x3b, 0x0a, 0x20, 0x20, 0x20, - 0x20, 0x20, 0x20, 0x20, 0x20, 0x7d, 0x3b, 0x0a, 0x20, 0x20, 0x20, 0x20, - 0x20, 0x20, 0x7d, 0x2c, 0x20, 0x5b, 0x5d, 0x29, 0x3b, 0x0a, 0x0a, 0x20, - 0x20, 0x20, 0x20, 0x20, 0x20, 0x72, 0x65, 0x74, 0x75, 0x72, 0x6e, 0x20, - 0x68, 0x74, 0x6d, 0x6c, 0x60, 0x0a, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, - 0x20, 0x20, 0x3c, 0x73, 0x70, 0x61, 0x6e, 0x20, 0x73, 0x74, 0x79, 0x6c, - 0x65, 0x3d, 0x24, 0x7b, 0x70, 0x72, 0x6f, 0x70, 0x73, 0x2e, 0x73, 0x74, - 0x79, 0x6c, 0x65, 0x7d, 0x20, 0x72, 0x65, 0x66, 0x3d, 0x24, 0x7b, 0x62, - 0x75, 0x74, 0x74, 0x6f, 0x6e, 0x52, 0x65, 0x66, 0x7d, 0x20, 0x6f, 0x6e, - 0x43, 0x6c, 0x69, 0x63, 0x6b, 0x3d, 0x24, 0x7b, 0x74, 0x6f, 0x67, 0x67, - 0x6c, 0x65, 0x50, 0x6f, 0x70, 0x6f, 0x76, 0x65, 0x72, 0x7d, 0x3e, 0x24, - 0x7b, 0x70, 0x72, 0x6f, 0x70, 0x73, 0x2e, 0x63, 0x68, 0x69, 0x6c, 0x64, - 0x72, 0x65, 0x6e, 0x7d, 0x3c, 0x2f, 0x73, 0x70, 0x61, 0x6e, 0x3e, 0x0a, - 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x24, 0x7b, 0x69, 0x73, - 0x4f, 0x70, 0x65, 0x6e, 0x2e, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x20, 0x26, - 0x26, 0x20, 0x68, 0x74, 0x6d, 0x6c, 0x60, 0x0a, 0x20, 0x20, 0x20, 0x20, - 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x3c, 0x24, 0x7b, 0x50, 0x6f, 0x72, - 0x74, 0x61, 0x6c, 0x7d, 0x20, 0x69, 0x6e, 0x74, 0x6f, 0x3d, 0x22, 0x23, - 0x70, 0x6f, 0x72, 0x74, 0x61, 0x6c, 0x22, 0x3e, 0x0a, 0x20, 0x20, 0x20, - 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x3c, 0x64, 0x69, - 0x76, 0x0a, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, - 0x20, 0x20, 0x20, 0x20, 0x72, 0x65, 0x66, 0x3d, 0x24, 0x7b, 0x70, 0x6f, - 0x70, 0x6f, 0x76, 0x65, 0x72, 0x52, 0x65, 0x66, 0x7d, 0x0a, 0x20, 0x20, - 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, - 0x63, 0x6c, 0x61, 0x73, 0x73, 0x3d, 0x22, 0x70, 0x6f, 0x70, 0x6f, 0x76, - 0x65, 0x72, 0x2d, 0x63, 0x6f, 0x6e, 0x74, 0x65, 0x6e, 0x74, 0x22, 0x0a, - 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, - 0x20, 0x20, 0x73, 0x74, 0x79, 0x6c, 0x65, 0x3d, 0x24, 0x7b, 0x7b, 0x0a, - 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, - 0x74, 0x6f, 0x70, 0x3a, 0x20, 0x70, 0x6f, 0x73, 0x69, 0x74, 0x69, 0x6f, - 0x6e, 0x2e, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x2e, 0x74, 0x6f, 0x70, 0x2c, - 0x0a, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, - 0x20, 0x6c, 0x65, 0x66, 0x74, 0x3a, 0x20, 0x70, 0x6f, 0x73, 0x69, 0x74, - 0x69, 0x6f, 0x6e, 0x2e, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x2e, 0x6c, 0x65, - 0x66, 0x74, 0x2c, 0x0a, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, - 0x20, 0x20, 0x7d, 0x7d, 0x0a, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, - 0x20, 0x20, 0x20, 0x20, 0x20, 0x3e, 0x0a, 0x20, 0x20, 0x20, 0x20, 0x20, - 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x24, 0x7b, 0x70, - 0x72, 0x6f, 0x70, 0x73, 0x2e, 0x70, 0x6f, 0x70, 0x6f, 0x76, 0x65, 0x72, - 0x43, 0x68, 0x69, 0x6c, 0x64, 0x72, 0x65, 0x6e, 0x7d, 0x0a, 0x20, 0x20, - 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x3c, 0x2f, - 0x64, 0x69, 0x76, 0x3e, 0x0a, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, - 0x20, 0x20, 0x20, 0x3c, 0x2f, 0x24, 0x7b, 0x50, 0x6f, 0x72, 0x74, 0x61, - 0x6c, 0x7d, 0x3e, 0x0a, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, - 0x60, 0x7d, 0x0a, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x60, 0x3b, 0x0a, - 0x20, 0x20, 0x20, 0x20, 0x7d, 0x3b, 0x0a, 0x0a, 0x20, 0x20, 0x20, 0x20, - 0x2f, 0x2f, 0x20, 0x53, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x3a, 0x20, 0x70, - 0x72, 0x65, 0x61, 0x63, 0x74, 0x2d, 0x70, 0x6f, 0x72, 0x74, 0x61, 0x6c, - 0x20, 0x28, 0x68, 0x74, 0x74, 0x70, 0x73, 0x3a, 0x2f, 0x2f, 0x67, 0x69, - 0x74, 0x68, 0x75, 0x62, 0x2e, 0x63, 0x6f, 0x6d, 0x2f, 0x64, 0x65, 0x76, - 0x65, 0x6c, 0x6f, 0x70, 0x69, 0x74, 0x2f, 0x70, 0x72, 0x65, 0x61, 0x63, - 0x74, 0x2d, 0x70, 0x6f, 0x72, 0x74, 0x61, 0x6c, 0x2f, 0x62, 0x6c, 0x6f, - 0x62, 0x2f, 0x6d, 0x61, 0x73, 0x74, 0x65, 0x72, 0x2f, 0x73, 0x72, 0x63, - 0x2f, 0x70, 0x72, 0x65, 0x61, 0x63, 0x74, 0x2d, 0x70, 0x6f, 0x72, 0x74, - 0x61, 0x6c, 0x2e, 0x6a, 0x73, 0x29, 0x0a, 0x20, 0x20, 0x20, 0x20, 0x2f, - 0x2a, 0x2a, 0x20, 0x52, 0x65, 0x64, 0x69, 0x72, 0x65, 0x63, 0x74, 0x20, - 0x72, 0x65, 0x6e, 0x64, 0x65, 0x72, 0x69, 0x6e, 0x67, 0x20, 0x6f, 0x66, - 0x20, 0x64, 0x65, 0x73, 0x63, 0x65, 0x6e, 0x64, 0x61, 0x6e, 0x74, 0x73, - 0x20, 0x69, 0x6e, 0x74, 0x6f, 0x20, 0x74, 0x68, 0x65, 0x20, 0x67, 0x69, - 0x76, 0x65, 0x6e, 0x20, 0x43, 0x53, 0x53, 0x20, 0x73, 0x65, 0x6c, 0x65, - 0x63, 0x74, 0x6f, 0x72, 0x20, 0x2a, 0x2f, 0x0a, 0x20, 0x20, 0x20, 0x20, - 0x63, 0x6c, 0x61, 0x73, 0x73, 0x20, 0x50, 0x6f, 0x72, 0x74, 0x61, 0x6c, - 0x20, 0x65, 0x78, 0x74, 0x65, 0x6e, 0x64, 0x73, 0x20, 0x43, 0x6f, 0x6d, - 0x70, 0x6f, 0x6e, 0x65, 0x6e, 0x74, 0x20, 0x7b, 0x0a, 0x20, 0x20, 0x20, - 0x20, 0x20, 0x20, 0x63, 0x6f, 0x6d, 0x70, 0x6f, 0x6e, 0x65, 0x6e, 0x74, - 0x44, 0x69, 0x64, 0x55, 0x70, 0x64, 0x61, 0x74, 0x65, 0x28, 0x70, 0x72, - 0x6f, 0x70, 0x73, 0x29, 0x20, 0x7b, 0x0a, 0x20, 0x20, 0x20, 0x20, 0x20, - 0x20, 0x20, 0x20, 0x66, 0x6f, 0x72, 0x20, 0x28, 0x6c, 0x65, 0x74, 0x20, - 0x69, 0x20, 0x69, 0x6e, 0x20, 0x70, 0x72, 0x6f, 0x70, 0x73, 0x29, 0x20, + 0x75, 0x74, 0x73, 0x69, 0x64, 0x65, 0x20, 0x3d, 0x20, 0x28, 0x65, 0x76, + 0x65, 0x6e, 0x74, 0x29, 0x20, 0x3d, 0x3e, 0x20, 0x7b, 0x0a, 0x20, 0x20, + 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x69, 0x66, 0x20, 0x28, 0x70, 0x6f, + 0x70, 0x6f, 0x76, 0x65, 0x72, 0x52, 0x65, 0x66, 0x2e, 0x63, 0x75, 0x72, + 0x72, 0x65, 0x6e, 0x74, 0x20, 0x26, 0x26, 0x20, 0x21, 0x70, 0x6f, 0x70, + 0x6f, 0x76, 0x65, 0x72, 0x52, 0x65, 0x66, 0x2e, 0x63, 0x75, 0x72, 0x72, + 0x65, 0x6e, 0x74, 0x2e, 0x63, 0x6f, 0x6e, 0x74, 0x61, 0x69, 0x6e, 0x73, + 0x28, 0x65, 0x76, 0x65, 0x6e, 0x74, 0x2e, 0x74, 0x61, 0x72, 0x67, 0x65, + 0x74, 0x29, 0x20, 0x26, 0x26, 0x20, 0x21, 0x62, 0x75, 0x74, 0x74, 0x6f, + 0x6e, 0x52, 0x65, 0x66, 0x2e, 0x63, 0x75, 0x72, 0x72, 0x65, 0x6e, 0x74, + 0x2e, 0x63, 0x6f, 0x6e, 0x74, 0x61, 0x69, 0x6e, 0x73, 0x28, 0x65, 0x76, + 0x65, 0x6e, 0x74, 0x2e, 0x74, 0x61, 0x72, 0x67, 0x65, 0x74, 0x29, 0x29, + 0x20, 0x7b, 0x0a, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, + 0x20, 0x69, 0x73, 0x4f, 0x70, 0x65, 0x6e, 0x2e, 0x76, 0x61, 0x6c, 0x75, + 0x65, 0x20, 0x3d, 0x20, 0x66, 0x61, 0x6c, 0x73, 0x65, 0x3b, 0x0a, 0x20, + 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x7d, 0x0a, 0x20, 0x20, 0x20, + 0x20, 0x20, 0x20, 0x7d, 0x3b, 0x0a, 0x0a, 0x20, 0x20, 0x20, 0x20, 0x20, + 0x20, 0x75, 0x73, 0x65, 0x45, 0x66, 0x66, 0x65, 0x63, 0x74, 0x28, 0x28, + 0x29, 0x20, 0x3d, 0x3e, 0x20, 0x7b, 0x0a, 0x20, 0x20, 0x20, 0x20, 0x20, + 0x20, 0x20, 0x20, 0x64, 0x6f, 0x63, 0x75, 0x6d, 0x65, 0x6e, 0x74, 0x2e, + 0x61, 0x64, 0x64, 0x45, 0x76, 0x65, 0x6e, 0x74, 0x4c, 0x69, 0x73, 0x74, + 0x65, 0x6e, 0x65, 0x72, 0x28, 0x27, 0x6d, 0x6f, 0x75, 0x73, 0x65, 0x64, + 0x6f, 0x77, 0x6e, 0x27, 0x2c, 0x20, 0x68, 0x61, 0x6e, 0x64, 0x6c, 0x65, + 0x43, 0x6c, 0x69, 0x63, 0x6b, 0x4f, 0x75, 0x74, 0x73, 0x69, 0x64, 0x65, + 0x29, 0x3b, 0x0a, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x72, + 0x65, 0x74, 0x75, 0x72, 0x6e, 0x20, 0x28, 0x29, 0x20, 0x3d, 0x3e, 0x20, 0x7b, 0x0a, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, - 0x69, 0x66, 0x20, 0x28, 0x70, 0x72, 0x6f, 0x70, 0x73, 0x5b, 0x69, 0x5d, - 0x20, 0x21, 0x3d, 0x3d, 0x20, 0x74, 0x68, 0x69, 0x73, 0x2e, 0x70, 0x72, - 0x6f, 0x70, 0x73, 0x5b, 0x69, 0x5d, 0x29, 0x20, 0x7b, 0x0a, 0x20, 0x20, + 0x64, 0x6f, 0x63, 0x75, 0x6d, 0x65, 0x6e, 0x74, 0x2e, 0x72, 0x65, 0x6d, + 0x6f, 0x76, 0x65, 0x45, 0x76, 0x65, 0x6e, 0x74, 0x4c, 0x69, 0x73, 0x74, + 0x65, 0x6e, 0x65, 0x72, 0x28, 0x27, 0x6d, 0x6f, 0x75, 0x73, 0x65, 0x64, + 0x6f, 0x77, 0x6e, 0x27, 0x2c, 0x20, 0x68, 0x61, 0x6e, 0x64, 0x6c, 0x65, + 0x43, 0x6c, 0x69, 0x63, 0x6b, 0x4f, 0x75, 0x74, 0x73, 0x69, 0x64, 0x65, + 0x29, 0x3b, 0x0a, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x7d, + 0x3b, 0x0a, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x7d, 0x2c, 0x20, 0x5b, + 0x5d, 0x29, 0x3b, 0x0a, 0x0a, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x72, + 0x65, 0x74, 0x75, 0x72, 0x6e, 0x20, 0x68, 0x74, 0x6d, 0x6c, 0x60, 0x0a, + 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x3c, 0x73, 0x70, 0x61, + 0x6e, 0x20, 0x73, 0x74, 0x79, 0x6c, 0x65, 0x3d, 0x24, 0x7b, 0x70, 0x72, + 0x6f, 0x70, 0x73, 0x2e, 0x73, 0x74, 0x79, 0x6c, 0x65, 0x7d, 0x20, 0x72, + 0x65, 0x66, 0x3d, 0x24, 0x7b, 0x62, 0x75, 0x74, 0x74, 0x6f, 0x6e, 0x52, + 0x65, 0x66, 0x7d, 0x20, 0x6f, 0x6e, 0x43, 0x6c, 0x69, 0x63, 0x6b, 0x3d, + 0x24, 0x7b, 0x74, 0x6f, 0x67, 0x67, 0x6c, 0x65, 0x50, 0x6f, 0x70, 0x6f, + 0x76, 0x65, 0x72, 0x7d, 0x3e, 0x24, 0x7b, 0x70, 0x72, 0x6f, 0x70, 0x73, + 0x2e, 0x63, 0x68, 0x69, 0x6c, 0x64, 0x72, 0x65, 0x6e, 0x7d, 0x3c, 0x2f, + 0x73, 0x70, 0x61, 0x6e, 0x3e, 0x0a, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, + 0x20, 0x20, 0x24, 0x7b, 0x69, 0x73, 0x4f, 0x70, 0x65, 0x6e, 0x2e, 0x76, + 0x61, 0x6c, 0x75, 0x65, 0x20, 0x26, 0x26, 0x20, 0x68, 0x74, 0x6d, 0x6c, + 0x60, 0x0a, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, + 0x3c, 0x24, 0x7b, 0x50, 0x6f, 0x72, 0x74, 0x61, 0x6c, 0x7d, 0x20, 0x69, + 0x6e, 0x74, 0x6f, 0x3d, 0x22, 0x23, 0x70, 0x6f, 0x72, 0x74, 0x61, 0x6c, + 0x22, 0x3e, 0x0a, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, + 0x20, 0x20, 0x20, 0x3c, 0x64, 0x69, 0x76, 0x0a, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x72, 0x65, - 0x74, 0x75, 0x72, 0x6e, 0x20, 0x73, 0x65, 0x74, 0x54, 0x69, 0x6d, 0x65, - 0x6f, 0x75, 0x74, 0x28, 0x74, 0x68, 0x69, 0x73, 0x2e, 0x72, 0x65, 0x6e, - 0x64, 0x65, 0x72, 0x4c, 0x61, 0x79, 0x65, 0x72, 0x29, 0x3b, 0x0a, 0x20, - 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x7d, 0x0a, 0x20, - 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x7d, 0x0a, 0x20, 0x20, 0x20, - 0x20, 0x20, 0x20, 0x7d, 0x0a, 0x0a, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, - 0x63, 0x6f, 0x6d, 0x70, 0x6f, 0x6e, 0x65, 0x6e, 0x74, 0x44, 0x69, 0x64, - 0x4d, 0x6f, 0x75, 0x6e, 0x74, 0x28, 0x29, 0x20, 0x7b, 0x0a, 0x20, 0x20, - 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x74, 0x68, 0x69, 0x73, 0x2e, 0x69, - 0x73, 0x4d, 0x6f, 0x75, 0x6e, 0x74, 0x65, 0x64, 0x20, 0x3d, 0x20, 0x74, - 0x72, 0x75, 0x65, 0x3b, 0x0a, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, - 0x20, 0x74, 0x68, 0x69, 0x73, 0x2e, 0x72, 0x65, 0x6e, 0x64, 0x65, 0x72, - 0x4c, 0x61, 0x79, 0x65, 0x72, 0x20, 0x3d, 0x20, 0x74, 0x68, 0x69, 0x73, - 0x2e, 0x72, 0x65, 0x6e, 0x64, 0x65, 0x72, 0x4c, 0x61, 0x79, 0x65, 0x72, - 0x2e, 0x62, 0x69, 0x6e, 0x64, 0x28, 0x74, 0x68, 0x69, 0x73, 0x29, 0x3b, - 0x0a, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x74, 0x68, 0x69, - 0x73, 0x2e, 0x72, 0x65, 0x6e, 0x64, 0x65, 0x72, 0x4c, 0x61, 0x79, 0x65, - 0x72, 0x28, 0x29, 0x3b, 0x0a, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x7d, - 0x0a, 0x0a, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x63, 0x6f, 0x6d, 0x70, - 0x6f, 0x6e, 0x65, 0x6e, 0x74, 0x57, 0x69, 0x6c, 0x6c, 0x55, 0x6e, 0x6d, - 0x6f, 0x75, 0x6e, 0x74, 0x28, 0x29, 0x20, 0x7b, 0x0a, 0x20, 0x20, 0x20, - 0x20, 0x20, 0x20, 0x20, 0x20, 0x74, 0x68, 0x69, 0x73, 0x2e, 0x72, 0x65, - 0x6e, 0x64, 0x65, 0x72, 0x4c, 0x61, 0x79, 0x65, 0x72, 0x28, 0x66, 0x61, - 0x6c, 0x73, 0x65, 0x29, 0x3b, 0x0a, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, - 0x20, 0x20, 0x74, 0x68, 0x69, 0x73, 0x2e, 0x69, 0x73, 0x4d, 0x6f, 0x75, - 0x6e, 0x74, 0x65, 0x64, 0x20, 0x3d, 0x20, 0x66, 0x61, 0x6c, 0x73, 0x65, - 0x3b, 0x0a, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x69, 0x66, - 0x20, 0x28, 0x74, 0x68, 0x69, 0x73, 0x2e, 0x72, 0x65, 0x6d, 0x6f, 0x74, - 0x65, 0x20, 0x26, 0x26, 0x20, 0x74, 0x68, 0x69, 0x73, 0x2e, 0x72, 0x65, - 0x6d, 0x6f, 0x74, 0x65, 0x2e, 0x70, 0x61, 0x72, 0x65, 0x6e, 0x74, 0x4e, - 0x6f, 0x64, 0x65, 0x29, 0x20, 0x74, 0x68, 0x69, 0x73, 0x2e, 0x72, 0x65, - 0x6d, 0x6f, 0x74, 0x65, 0x2e, 0x70, 0x61, 0x72, 0x65, 0x6e, 0x74, 0x4e, - 0x6f, 0x64, 0x65, 0x2e, 0x72, 0x65, 0x6d, 0x6f, 0x76, 0x65, 0x43, 0x68, - 0x69, 0x6c, 0x64, 0x28, 0x74, 0x68, 0x69, 0x73, 0x2e, 0x72, 0x65, 0x6d, - 0x6f, 0x74, 0x65, 0x29, 0x3b, 0x0a, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, - 0x7d, 0x0a, 0x0a, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x66, 0x69, 0x6e, - 0x64, 0x4e, 0x6f, 0x64, 0x65, 0x28, 0x6e, 0x6f, 0x64, 0x65, 0x29, 0x20, - 0x7b, 0x0a, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x72, 0x65, - 0x74, 0x75, 0x72, 0x6e, 0x20, 0x74, 0x79, 0x70, 0x65, 0x6f, 0x66, 0x20, - 0x6e, 0x6f, 0x64, 0x65, 0x20, 0x3d, 0x3d, 0x3d, 0x20, 0x27, 0x73, 0x74, - 0x72, 0x69, 0x6e, 0x67, 0x27, 0x20, 0x3f, 0x20, 0x64, 0x6f, 0x63, 0x75, - 0x6d, 0x65, 0x6e, 0x74, 0x2e, 0x71, 0x75, 0x65, 0x72, 0x79, 0x53, 0x65, - 0x6c, 0x65, 0x63, 0x74, 0x6f, 0x72, 0x28, 0x6e, 0x6f, 0x64, 0x65, 0x29, - 0x20, 0x3a, 0x20, 0x6e, 0x6f, 0x64, 0x65, 0x3b, 0x0a, 0x20, 0x20, 0x20, - 0x20, 0x20, 0x20, 0x7d, 0x0a, 0x0a, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, - 0x72, 0x65, 0x6e, 0x64, 0x65, 0x72, 0x4c, 0x61, 0x79, 0x65, 0x72, 0x28, - 0x73, 0x68, 0x6f, 0x77, 0x20, 0x3d, 0x20, 0x74, 0x72, 0x75, 0x65, 0x29, - 0x20, 0x7b, 0x0a, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x69, - 0x66, 0x20, 0x28, 0x21, 0x74, 0x68, 0x69, 0x73, 0x2e, 0x69, 0x73, 0x4d, - 0x6f, 0x75, 0x6e, 0x74, 0x65, 0x64, 0x29, 0x20, 0x72, 0x65, 0x74, 0x75, - 0x72, 0x6e, 0x3b, 0x0a, 0x0a, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, - 0x20, 0x2f, 0x2f, 0x20, 0x63, 0x6c, 0x65, 0x61, 0x6e, 0x20, 0x75, 0x70, - 0x20, 0x6f, 0x6c, 0x64, 0x20, 0x6e, 0x6f, 0x64, 0x65, 0x20, 0x69, 0x66, - 0x20, 0x6d, 0x6f, 0x76, 0x69, 0x6e, 0x67, 0x20, 0x62, 0x61, 0x73, 0x65, - 0x73, 0x3a, 0x0a, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x69, - 0x66, 0x20, 0x28, 0x74, 0x68, 0x69, 0x73, 0x2e, 0x70, 0x72, 0x6f, 0x70, - 0x73, 0x2e, 0x69, 0x6e, 0x74, 0x6f, 0x20, 0x21, 0x3d, 0x3d, 0x20, 0x74, + 0x66, 0x3d, 0x24, 0x7b, 0x70, 0x6f, 0x70, 0x6f, 0x76, 0x65, 0x72, 0x52, + 0x65, 0x66, 0x7d, 0x0a, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, + 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x63, 0x6c, 0x61, 0x73, 0x73, 0x3d, + 0x22, 0x70, 0x6f, 0x70, 0x6f, 0x76, 0x65, 0x72, 0x2d, 0x63, 0x6f, 0x6e, + 0x74, 0x65, 0x6e, 0x74, 0x22, 0x0a, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, + 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x73, 0x74, 0x79, 0x6c, + 0x65, 0x3d, 0x24, 0x7b, 0x7b, 0x0a, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, + 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x74, 0x6f, 0x70, 0x3a, 0x20, 0x70, + 0x6f, 0x73, 0x69, 0x74, 0x69, 0x6f, 0x6e, 0x2e, 0x76, 0x61, 0x6c, 0x75, + 0x65, 0x2e, 0x74, 0x6f, 0x70, 0x2c, 0x0a, 0x20, 0x20, 0x20, 0x20, 0x20, + 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x6c, 0x65, 0x66, 0x74, 0x3a, + 0x20, 0x70, 0x6f, 0x73, 0x69, 0x74, 0x69, 0x6f, 0x6e, 0x2e, 0x76, 0x61, + 0x6c, 0x75, 0x65, 0x2e, 0x6c, 0x65, 0x66, 0x74, 0x2c, 0x0a, 0x20, 0x20, + 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x7d, 0x7d, 0x0a, 0x20, + 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x3e, + 0x0a, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, + 0x20, 0x20, 0x20, 0x24, 0x7b, 0x70, 0x72, 0x6f, 0x70, 0x73, 0x2e, 0x70, + 0x6f, 0x70, 0x6f, 0x76, 0x65, 0x72, 0x43, 0x68, 0x69, 0x6c, 0x64, 0x72, + 0x65, 0x6e, 0x7d, 0x0a, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, + 0x20, 0x20, 0x20, 0x20, 0x3c, 0x2f, 0x64, 0x69, 0x76, 0x3e, 0x0a, 0x20, + 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x3c, 0x2f, 0x24, + 0x7b, 0x50, 0x6f, 0x72, 0x74, 0x61, 0x6c, 0x7d, 0x3e, 0x0a, 0x20, 0x20, + 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x60, 0x7d, 0x0a, 0x20, 0x20, 0x20, + 0x20, 0x20, 0x20, 0x60, 0x3b, 0x0a, 0x20, 0x20, 0x20, 0x20, 0x7d, 0x3b, + 0x0a, 0x0a, 0x20, 0x20, 0x20, 0x20, 0x2f, 0x2f, 0x20, 0x53, 0x6f, 0x75, + 0x72, 0x63, 0x65, 0x3a, 0x20, 0x70, 0x72, 0x65, 0x61, 0x63, 0x74, 0x2d, + 0x70, 0x6f, 0x72, 0x74, 0x61, 0x6c, 0x20, 0x28, 0x68, 0x74, 0x74, 0x70, + 0x73, 0x3a, 0x2f, 0x2f, 0x67, 0x69, 0x74, 0x68, 0x75, 0x62, 0x2e, 0x63, + 0x6f, 0x6d, 0x2f, 0x64, 0x65, 0x76, 0x65, 0x6c, 0x6f, 0x70, 0x69, 0x74, + 0x2f, 0x70, 0x72, 0x65, 0x61, 0x63, 0x74, 0x2d, 0x70, 0x6f, 0x72, 0x74, + 0x61, 0x6c, 0x2f, 0x62, 0x6c, 0x6f, 0x62, 0x2f, 0x6d, 0x61, 0x73, 0x74, + 0x65, 0x72, 0x2f, 0x73, 0x72, 0x63, 0x2f, 0x70, 0x72, 0x65, 0x61, 0x63, + 0x74, 0x2d, 0x70, 0x6f, 0x72, 0x74, 0x61, 0x6c, 0x2e, 0x6a, 0x73, 0x29, + 0x0a, 0x20, 0x20, 0x20, 0x20, 0x2f, 0x2a, 0x2a, 0x20, 0x52, 0x65, 0x64, + 0x69, 0x72, 0x65, 0x63, 0x74, 0x20, 0x72, 0x65, 0x6e, 0x64, 0x65, 0x72, + 0x69, 0x6e, 0x67, 0x20, 0x6f, 0x66, 0x20, 0x64, 0x65, 0x73, 0x63, 0x65, + 0x6e, 0x64, 0x61, 0x6e, 0x74, 0x73, 0x20, 0x69, 0x6e, 0x74, 0x6f, 0x20, + 0x74, 0x68, 0x65, 0x20, 0x67, 0x69, 0x76, 0x65, 0x6e, 0x20, 0x43, 0x53, + 0x53, 0x20, 0x73, 0x65, 0x6c, 0x65, 0x63, 0x74, 0x6f, 0x72, 0x20, 0x2a, + 0x2f, 0x0a, 0x20, 0x20, 0x20, 0x20, 0x63, 0x6c, 0x61, 0x73, 0x73, 0x20, + 0x50, 0x6f, 0x72, 0x74, 0x61, 0x6c, 0x20, 0x65, 0x78, 0x74, 0x65, 0x6e, + 0x64, 0x73, 0x20, 0x43, 0x6f, 0x6d, 0x70, 0x6f, 0x6e, 0x65, 0x6e, 0x74, + 0x20, 0x7b, 0x0a, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x63, 0x6f, 0x6d, + 0x70, 0x6f, 0x6e, 0x65, 0x6e, 0x74, 0x44, 0x69, 0x64, 0x55, 0x70, 0x64, + 0x61, 0x74, 0x65, 0x28, 0x70, 0x72, 0x6f, 0x70, 0x73, 0x29, 0x20, 0x7b, + 0x0a, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x66, 0x6f, 0x72, + 0x20, 0x28, 0x6c, 0x65, 0x74, 0x20, 0x69, 0x20, 0x69, 0x6e, 0x20, 0x70, + 0x72, 0x6f, 0x70, 0x73, 0x29, 0x20, 0x7b, 0x0a, 0x20, 0x20, 0x20, 0x20, + 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x69, 0x66, 0x20, 0x28, 0x70, 0x72, + 0x6f, 0x70, 0x73, 0x5b, 0x69, 0x5d, 0x20, 0x21, 0x3d, 0x3d, 0x20, 0x74, + 0x68, 0x69, 0x73, 0x2e, 0x70, 0x72, 0x6f, 0x70, 0x73, 0x5b, 0x69, 0x5d, + 0x29, 0x20, 0x7b, 0x0a, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, + 0x20, 0x20, 0x20, 0x20, 0x72, 0x65, 0x74, 0x75, 0x72, 0x6e, 0x20, 0x73, + 0x65, 0x74, 0x54, 0x69, 0x6d, 0x65, 0x6f, 0x75, 0x74, 0x28, 0x74, 0x68, + 0x69, 0x73, 0x2e, 0x72, 0x65, 0x6e, 0x64, 0x65, 0x72, 0x4c, 0x61, 0x79, + 0x65, 0x72, 0x29, 0x3b, 0x0a, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, + 0x20, 0x20, 0x20, 0x7d, 0x0a, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, + 0x20, 0x7d, 0x0a, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x7d, 0x0a, 0x0a, + 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x63, 0x6f, 0x6d, 0x70, 0x6f, 0x6e, + 0x65, 0x6e, 0x74, 0x44, 0x69, 0x64, 0x4d, 0x6f, 0x75, 0x6e, 0x74, 0x28, + 0x29, 0x20, 0x7b, 0x0a, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, + 0x74, 0x68, 0x69, 0x73, 0x2e, 0x69, 0x73, 0x4d, 0x6f, 0x75, 0x6e, 0x74, + 0x65, 0x64, 0x20, 0x3d, 0x20, 0x74, 0x72, 0x75, 0x65, 0x3b, 0x0a, 0x20, + 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x74, 0x68, 0x69, 0x73, 0x2e, + 0x72, 0x65, 0x6e, 0x64, 0x65, 0x72, 0x4c, 0x61, 0x79, 0x65, 0x72, 0x20, + 0x3d, 0x20, 0x74, 0x68, 0x69, 0x73, 0x2e, 0x72, 0x65, 0x6e, 0x64, 0x65, + 0x72, 0x4c, 0x61, 0x79, 0x65, 0x72, 0x2e, 0x62, 0x69, 0x6e, 0x64, 0x28, + 0x74, 0x68, 0x69, 0x73, 0x29, 0x3b, 0x0a, 0x20, 0x20, 0x20, 0x20, 0x20, + 0x20, 0x20, 0x20, 0x74, 0x68, 0x69, 0x73, 0x2e, 0x72, 0x65, 0x6e, 0x64, + 0x65, 0x72, 0x4c, 0x61, 0x79, 0x65, 0x72, 0x28, 0x29, 0x3b, 0x0a, 0x20, + 0x20, 0x20, 0x20, 0x20, 0x20, 0x7d, 0x0a, 0x0a, 0x20, 0x20, 0x20, 0x20, + 0x20, 0x20, 0x63, 0x6f, 0x6d, 0x70, 0x6f, 0x6e, 0x65, 0x6e, 0x74, 0x57, + 0x69, 0x6c, 0x6c, 0x55, 0x6e, 0x6d, 0x6f, 0x75, 0x6e, 0x74, 0x28, 0x29, + 0x20, 0x7b, 0x0a, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x74, + 0x68, 0x69, 0x73, 0x2e, 0x72, 0x65, 0x6e, 0x64, 0x65, 0x72, 0x4c, 0x61, + 0x79, 0x65, 0x72, 0x28, 0x66, 0x61, 0x6c, 0x73, 0x65, 0x29, 0x3b, 0x0a, + 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x74, 0x68, 0x69, 0x73, + 0x2e, 0x69, 0x73, 0x4d, 0x6f, 0x75, 0x6e, 0x74, 0x65, 0x64, 0x20, 0x3d, + 0x20, 0x66, 0x61, 0x6c, 0x73, 0x65, 0x3b, 0x0a, 0x20, 0x20, 0x20, 0x20, + 0x20, 0x20, 0x20, 0x20, 0x69, 0x66, 0x20, 0x28, 0x74, 0x68, 0x69, 0x73, + 0x2e, 0x72, 0x65, 0x6d, 0x6f, 0x74, 0x65, 0x20, 0x26, 0x26, 0x20, 0x74, + 0x68, 0x69, 0x73, 0x2e, 0x72, 0x65, 0x6d, 0x6f, 0x74, 0x65, 0x2e, 0x70, + 0x61, 0x72, 0x65, 0x6e, 0x74, 0x4e, 0x6f, 0x64, 0x65, 0x29, 0x20, 0x74, + 0x68, 0x69, 0x73, 0x2e, 0x72, 0x65, 0x6d, 0x6f, 0x74, 0x65, 0x2e, 0x70, + 0x61, 0x72, 0x65, 0x6e, 0x74, 0x4e, 0x6f, 0x64, 0x65, 0x2e, 0x72, 0x65, + 0x6d, 0x6f, 0x76, 0x65, 0x43, 0x68, 0x69, 0x6c, 0x64, 0x28, 0x74, 0x68, + 0x69, 0x73, 0x2e, 0x72, 0x65, 0x6d, 0x6f, 0x74, 0x65, 0x29, 0x3b, 0x0a, + 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x7d, 0x0a, 0x0a, 0x20, 0x20, 0x20, + 0x20, 0x20, 0x20, 0x66, 0x69, 0x6e, 0x64, 0x4e, 0x6f, 0x64, 0x65, 0x28, + 0x6e, 0x6f, 0x64, 0x65, 0x29, 0x20, 0x7b, 0x0a, 0x20, 0x20, 0x20, 0x20, + 0x20, 0x20, 0x20, 0x20, 0x72, 0x65, 0x74, 0x75, 0x72, 0x6e, 0x20, 0x74, + 0x79, 0x70, 0x65, 0x6f, 0x66, 0x20, 0x6e, 0x6f, 0x64, 0x65, 0x20, 0x3d, + 0x3d, 0x3d, 0x20, 0x27, 0x73, 0x74, 0x72, 0x69, 0x6e, 0x67, 0x27, 0x20, + 0x3f, 0x20, 0x64, 0x6f, 0x63, 0x75, 0x6d, 0x65, 0x6e, 0x74, 0x2e, 0x71, + 0x75, 0x65, 0x72, 0x79, 0x53, 0x65, 0x6c, 0x65, 0x63, 0x74, 0x6f, 0x72, + 0x28, 0x6e, 0x6f, 0x64, 0x65, 0x29, 0x20, 0x3a, 0x20, 0x6e, 0x6f, 0x64, + 0x65, 0x3b, 0x0a, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x7d, 0x0a, 0x0a, + 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x72, 0x65, 0x6e, 0x64, 0x65, 0x72, + 0x4c, 0x61, 0x79, 0x65, 0x72, 0x28, 0x73, 0x68, 0x6f, 0x77, 0x20, 0x3d, + 0x20, 0x74, 0x72, 0x75, 0x65, 0x29, 0x20, 0x7b, 0x0a, 0x20, 0x20, 0x20, + 0x20, 0x20, 0x20, 0x20, 0x20, 0x69, 0x66, 0x20, 0x28, 0x21, 0x74, 0x68, + 0x69, 0x73, 0x2e, 0x69, 0x73, 0x4d, 0x6f, 0x75, 0x6e, 0x74, 0x65, 0x64, + 0x29, 0x20, 0x72, 0x65, 0x74, 0x75, 0x72, 0x6e, 0x3b, 0x0a, 0x0a, 0x20, + 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x2f, 0x2f, 0x20, 0x63, 0x6c, + 0x65, 0x61, 0x6e, 0x20, 0x75, 0x70, 0x20, 0x6f, 0x6c, 0x64, 0x20, 0x6e, + 0x6f, 0x64, 0x65, 0x20, 0x69, 0x66, 0x20, 0x6d, 0x6f, 0x76, 0x69, 0x6e, + 0x67, 0x20, 0x62, 0x61, 0x73, 0x65, 0x73, 0x3a, 0x0a, 0x20, 0x20, 0x20, + 0x20, 0x20, 0x20, 0x20, 0x20, 0x69, 0x66, 0x20, 0x28, 0x74, 0x68, 0x69, + 0x73, 0x2e, 0x70, 0x72, 0x6f, 0x70, 0x73, 0x2e, 0x69, 0x6e, 0x74, 0x6f, + 0x20, 0x21, 0x3d, 0x3d, 0x20, 0x74, 0x68, 0x69, 0x73, 0x2e, 0x69, 0x6e, + 0x74, 0x6f, 0x50, 0x6f, 0x69, 0x6e, 0x74, 0x65, 0x72, 0x29, 0x20, 0x7b, + 0x0a, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x74, 0x68, 0x69, 0x73, 0x2e, 0x69, 0x6e, 0x74, 0x6f, 0x50, 0x6f, 0x69, 0x6e, - 0x74, 0x65, 0x72, 0x29, 0x20, 0x7b, 0x0a, 0x20, 0x20, 0x20, 0x20, 0x20, - 0x20, 0x20, 0x20, 0x20, 0x20, 0x74, 0x68, 0x69, 0x73, 0x2e, 0x69, 0x6e, - 0x74, 0x6f, 0x50, 0x6f, 0x69, 0x6e, 0x74, 0x65, 0x72, 0x20, 0x3d, 0x20, - 0x74, 0x68, 0x69, 0x73, 0x2e, 0x70, 0x72, 0x6f, 0x70, 0x73, 0x2e, 0x69, - 0x6e, 0x74, 0x6f, 0x3b, 0x0a, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, - 0x20, 0x20, 0x20, 0x69, 0x66, 0x20, 0x28, 0x74, 0x68, 0x69, 0x73, 0x2e, - 0x69, 0x6e, 0x74, 0x6f, 0x20, 0x26, 0x26, 0x20, 0x74, 0x68, 0x69, 0x73, - 0x2e, 0x72, 0x65, 0x6d, 0x6f, 0x74, 0x65, 0x29, 0x20, 0x7b, 0x0a, 0x20, - 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x74, - 0x68, 0x69, 0x73, 0x2e, 0x72, 0x65, 0x6d, 0x6f, 0x74, 0x65, 0x20, 0x3d, - 0x20, 0x72, 0x65, 0x6e, 0x64, 0x65, 0x72, 0x28, 0x68, 0x74, 0x6d, 0x6c, - 0x60, 0x3c, 0x24, 0x7b, 0x50, 0x6f, 0x72, 0x74, 0x61, 0x6c, 0x50, 0x72, - 0x6f, 0x78, 0x79, 0x7d, 0x20, 0x2f, 0x3e, 0x60, 0x2c, 0x20, 0x74, 0x68, - 0x69, 0x73, 0x2e, 0x69, 0x6e, 0x74, 0x6f, 0x2c, 0x20, 0x74, 0x68, 0x69, - 0x73, 0x2e, 0x72, 0x65, 0x6d, 0x6f, 0x74, 0x65, 0x29, 0x3b, 0x0a, 0x20, - 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x7d, 0x0a, 0x20, - 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x74, 0x68, 0x69, - 0x73, 0x2e, 0x69, 0x6e, 0x74, 0x6f, 0x20, 0x3d, 0x20, 0x74, 0x68, 0x69, - 0x73, 0x2e, 0x66, 0x69, 0x6e, 0x64, 0x4e, 0x6f, 0x64, 0x65, 0x28, 0x74, - 0x68, 0x69, 0x73, 0x2e, 0x70, 0x72, 0x6f, 0x70, 0x73, 0x2e, 0x69, 0x6e, - 0x74, 0x6f, 0x29, 0x3b, 0x0a, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, - 0x20, 0x7d, 0x0a, 0x0a, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, - 0x74, 0x68, 0x69, 0x73, 0x2e, 0x72, 0x65, 0x6d, 0x6f, 0x74, 0x65, 0x20, - 0x3d, 0x20, 0x72, 0x65, 0x6e, 0x64, 0x65, 0x72, 0x28, 0x68, 0x74, 0x6d, - 0x6c, 0x60, 0x0a, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, - 0x20, 0x3c, 0x24, 0x7b, 0x50, 0x6f, 0x72, 0x74, 0x61, 0x6c, 0x50, 0x72, - 0x6f, 0x78, 0x79, 0x7d, 0x20, 0x63, 0x6f, 0x6e, 0x74, 0x65, 0x78, 0x74, - 0x3d, 0x24, 0x7b, 0x74, 0x68, 0x69, 0x73, 0x2e, 0x63, 0x6f, 0x6e, 0x74, - 0x65, 0x78, 0x74, 0x7d, 0x3e, 0x0a, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, - 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x24, 0x7b, 0x73, 0x68, 0x6f, 0x77, - 0x20, 0x26, 0x26, 0x20, 0x74, 0x68, 0x69, 0x73, 0x2e, 0x70, 0x72, 0x6f, - 0x70, 0x73, 0x2e, 0x63, 0x68, 0x69, 0x6c, 0x64, 0x72, 0x65, 0x6e, 0x20, - 0x7c, 0x7c, 0x20, 0x6e, 0x75, 0x6c, 0x6c, 0x7d, 0x0a, 0x20, 0x20, 0x20, - 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x3c, 0x2f, 0x24, 0x7b, 0x50, - 0x6f, 0x72, 0x74, 0x61, 0x6c, 0x50, 0x72, 0x6f, 0x78, 0x79, 0x7d, 0x3e, - 0x0a, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x60, 0x2c, 0x20, - 0x74, 0x68, 0x69, 0x73, 0x2e, 0x69, 0x6e, 0x74, 0x6f, 0x2c, 0x20, 0x74, - 0x68, 0x69, 0x73, 0x2e, 0x72, 0x65, 0x6d, 0x6f, 0x74, 0x65, 0x29, 0x3b, - 0x0a, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x7d, 0x0a, 0x0a, 0x20, 0x20, - 0x20, 0x20, 0x20, 0x20, 0x72, 0x65, 0x6e, 0x64, 0x65, 0x72, 0x28, 0x29, - 0x20, 0x7b, 0x0a, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x72, - 0x65, 0x74, 0x75, 0x72, 0x6e, 0x20, 0x6e, 0x75, 0x6c, 0x6c, 0x3b, 0x0a, + 0x74, 0x65, 0x72, 0x20, 0x3d, 0x20, 0x74, 0x68, 0x69, 0x73, 0x2e, 0x70, + 0x72, 0x6f, 0x70, 0x73, 0x2e, 0x69, 0x6e, 0x74, 0x6f, 0x3b, 0x0a, 0x20, + 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x69, 0x66, 0x20, + 0x28, 0x74, 0x68, 0x69, 0x73, 0x2e, 0x69, 0x6e, 0x74, 0x6f, 0x20, 0x26, + 0x26, 0x20, 0x74, 0x68, 0x69, 0x73, 0x2e, 0x72, 0x65, 0x6d, 0x6f, 0x74, + 0x65, 0x29, 0x20, 0x7b, 0x0a, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, + 0x20, 0x20, 0x20, 0x20, 0x20, 0x74, 0x68, 0x69, 0x73, 0x2e, 0x72, 0x65, + 0x6d, 0x6f, 0x74, 0x65, 0x20, 0x3d, 0x20, 0x72, 0x65, 0x6e, 0x64, 0x65, + 0x72, 0x28, 0x68, 0x74, 0x6d, 0x6c, 0x60, 0x3c, 0x24, 0x7b, 0x50, 0x6f, + 0x72, 0x74, 0x61, 0x6c, 0x50, 0x72, 0x6f, 0x78, 0x79, 0x7d, 0x20, 0x2f, + 0x3e, 0x60, 0x2c, 0x20, 0x74, 0x68, 0x69, 0x73, 0x2e, 0x69, 0x6e, 0x74, + 0x6f, 0x2c, 0x20, 0x74, 0x68, 0x69, 0x73, 0x2e, 0x72, 0x65, 0x6d, 0x6f, + 0x74, 0x65, 0x29, 0x3b, 0x0a, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, + 0x20, 0x20, 0x20, 0x7d, 0x0a, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, + 0x20, 0x20, 0x20, 0x74, 0x68, 0x69, 0x73, 0x2e, 0x69, 0x6e, 0x74, 0x6f, + 0x20, 0x3d, 0x20, 0x74, 0x68, 0x69, 0x73, 0x2e, 0x66, 0x69, 0x6e, 0x64, + 0x4e, 0x6f, 0x64, 0x65, 0x28, 0x74, 0x68, 0x69, 0x73, 0x2e, 0x70, 0x72, + 0x6f, 0x70, 0x73, 0x2e, 0x69, 0x6e, 0x74, 0x6f, 0x29, 0x3b, 0x0a, 0x20, + 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x7d, 0x0a, 0x0a, 0x20, 0x20, + 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x74, 0x68, 0x69, 0x73, 0x2e, 0x72, + 0x65, 0x6d, 0x6f, 0x74, 0x65, 0x20, 0x3d, 0x20, 0x72, 0x65, 0x6e, 0x64, + 0x65, 0x72, 0x28, 0x68, 0x74, 0x6d, 0x6c, 0x60, 0x0a, 0x20, 0x20, 0x20, + 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x3c, 0x24, 0x7b, 0x50, 0x6f, + 0x72, 0x74, 0x61, 0x6c, 0x50, 0x72, 0x6f, 0x78, 0x79, 0x7d, 0x20, 0x63, + 0x6f, 0x6e, 0x74, 0x65, 0x78, 0x74, 0x3d, 0x24, 0x7b, 0x74, 0x68, 0x69, + 0x73, 0x2e, 0x63, 0x6f, 0x6e, 0x74, 0x65, 0x78, 0x74, 0x7d, 0x3e, 0x0a, + 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, + 0x24, 0x7b, 0x73, 0x68, 0x6f, 0x77, 0x20, 0x26, 0x26, 0x20, 0x74, 0x68, + 0x69, 0x73, 0x2e, 0x70, 0x72, 0x6f, 0x70, 0x73, 0x2e, 0x63, 0x68, 0x69, + 0x6c, 0x64, 0x72, 0x65, 0x6e, 0x20, 0x7c, 0x7c, 0x20, 0x6e, 0x75, 0x6c, + 0x6c, 0x7d, 0x0a, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, + 0x20, 0x3c, 0x2f, 0x24, 0x7b, 0x50, 0x6f, 0x72, 0x74, 0x61, 0x6c, 0x50, + 0x72, 0x6f, 0x78, 0x79, 0x7d, 0x3e, 0x0a, 0x20, 0x20, 0x20, 0x20, 0x20, + 0x20, 0x20, 0x20, 0x60, 0x2c, 0x20, 0x74, 0x68, 0x69, 0x73, 0x2e, 0x69, + 0x6e, 0x74, 0x6f, 0x2c, 0x20, 0x74, 0x68, 0x69, 0x73, 0x2e, 0x72, 0x65, + 0x6d, 0x6f, 0x74, 0x65, 0x29, 0x3b, 0x0a, 0x20, 0x20, 0x20, 0x20, 0x20, + 0x20, 0x7d, 0x0a, 0x0a, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x72, 0x65, + 0x6e, 0x64, 0x65, 0x72, 0x28, 0x29, 0x20, 0x7b, 0x0a, 0x20, 0x20, 0x20, + 0x20, 0x20, 0x20, 0x20, 0x20, 0x72, 0x65, 0x74, 0x75, 0x72, 0x6e, 0x20, + 0x6e, 0x75, 0x6c, 0x6c, 0x3b, 0x0a, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, + 0x7d, 0x0a, 0x20, 0x20, 0x20, 0x20, 0x7d, 0x0a, 0x20, 0x20, 0x20, 0x20, + 0x2f, 0x2f, 0x20, 0x68, 0x69, 0x67, 0x68, 0x2d, 0x6f, 0x72, 0x64, 0x65, + 0x72, 0x20, 0x63, 0x6f, 0x6d, 0x70, 0x6f, 0x6e, 0x65, 0x6e, 0x74, 0x20, + 0x74, 0x68, 0x61, 0x74, 0x20, 0x72, 0x65, 0x6e, 0x64, 0x65, 0x72, 0x73, + 0x20, 0x69, 0x74, 0x73, 0x20, 0x66, 0x69, 0x72, 0x73, 0x74, 0x20, 0x63, + 0x68, 0x69, 0x6c, 0x64, 0x20, 0x69, 0x66, 0x20, 0x69, 0x74, 0x20, 0x65, + 0x78, 0x69, 0x73, 0x74, 0x73, 0x2e, 0x0a, 0x20, 0x20, 0x20, 0x20, 0x2f, + 0x2f, 0x20, 0x75, 0x73, 0x65, 0x64, 0x20, 0x61, 0x73, 0x20, 0x61, 0x20, + 0x63, 0x6f, 0x6e, 0x64, 0x69, 0x74, 0x69, 0x6f, 0x6e, 0x61, 0x6c, 0x20, + 0x72, 0x65, 0x6e, 0x64, 0x65, 0x72, 0x69, 0x6e, 0x67, 0x20, 0x70, 0x72, + 0x6f, 0x78, 0x79, 0x2e, 0x0a, 0x20, 0x20, 0x20, 0x20, 0x63, 0x6c, 0x61, + 0x73, 0x73, 0x20, 0x50, 0x6f, 0x72, 0x74, 0x61, 0x6c, 0x50, 0x72, 0x6f, + 0x78, 0x79, 0x20, 0x65, 0x78, 0x74, 0x65, 0x6e, 0x64, 0x73, 0x20, 0x43, + 0x6f, 0x6d, 0x70, 0x6f, 0x6e, 0x65, 0x6e, 0x74, 0x20, 0x7b, 0x0a, 0x20, + 0x20, 0x20, 0x20, 0x20, 0x20, 0x67, 0x65, 0x74, 0x43, 0x68, 0x69, 0x6c, + 0x64, 0x43, 0x6f, 0x6e, 0x74, 0x65, 0x78, 0x74, 0x28, 0x29, 0x20, 0x7b, + 0x0a, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x72, 0x65, 0x74, + 0x75, 0x72, 0x6e, 0x20, 0x74, 0x68, 0x69, 0x73, 0x2e, 0x70, 0x72, 0x6f, + 0x70, 0x73, 0x2e, 0x63, 0x6f, 0x6e, 0x74, 0x65, 0x78, 0x74, 0x3b, 0x0a, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x7d, 0x0a, 0x20, 0x20, 0x20, 0x20, - 0x7d, 0x0a, 0x20, 0x20, 0x20, 0x20, 0x2f, 0x2f, 0x20, 0x68, 0x69, 0x67, - 0x68, 0x2d, 0x6f, 0x72, 0x64, 0x65, 0x72, 0x20, 0x63, 0x6f, 0x6d, 0x70, - 0x6f, 0x6e, 0x65, 0x6e, 0x74, 0x20, 0x74, 0x68, 0x61, 0x74, 0x20, 0x72, - 0x65, 0x6e, 0x64, 0x65, 0x72, 0x73, 0x20, 0x69, 0x74, 0x73, 0x20, 0x66, - 0x69, 0x72, 0x73, 0x74, 0x20, 0x63, 0x68, 0x69, 0x6c, 0x64, 0x20, 0x69, - 0x66, 0x20, 0x69, 0x74, 0x20, 0x65, 0x78, 0x69, 0x73, 0x74, 0x73, 0x2e, - 0x0a, 0x20, 0x20, 0x20, 0x20, 0x2f, 0x2f, 0x20, 0x75, 0x73, 0x65, 0x64, - 0x20, 0x61, 0x73, 0x20, 0x61, 0x20, 0x63, 0x6f, 0x6e, 0x64, 0x69, 0x74, - 0x69, 0x6f, 0x6e, 0x61, 0x6c, 0x20, 0x72, 0x65, 0x6e, 0x64, 0x65, 0x72, - 0x69, 0x6e, 0x67, 0x20, 0x70, 0x72, 0x6f, 0x78, 0x79, 0x2e, 0x0a, 0x20, - 0x20, 0x20, 0x20, 0x63, 0x6c, 0x61, 0x73, 0x73, 0x20, 0x50, 0x6f, 0x72, - 0x74, 0x61, 0x6c, 0x50, 0x72, 0x6f, 0x78, 0x79, 0x20, 0x65, 0x78, 0x74, - 0x65, 0x6e, 0x64, 0x73, 0x20, 0x43, 0x6f, 0x6d, 0x70, 0x6f, 0x6e, 0x65, - 0x6e, 0x74, 0x20, 0x7b, 0x0a, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x67, - 0x65, 0x74, 0x43, 0x68, 0x69, 0x6c, 0x64, 0x43, 0x6f, 0x6e, 0x74, 0x65, - 0x78, 0x74, 0x28, 0x29, 0x20, 0x7b, 0x0a, 0x20, 0x20, 0x20, 0x20, 0x20, - 0x20, 0x20, 0x20, 0x72, 0x65, 0x74, 0x75, 0x72, 0x6e, 0x20, 0x74, 0x68, - 0x69, 0x73, 0x2e, 0x70, 0x72, 0x6f, 0x70, 0x73, 0x2e, 0x63, 0x6f, 0x6e, - 0x74, 0x65, 0x78, 0x74, 0x3b, 0x0a, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, - 0x7d, 0x0a, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x72, 0x65, 0x6e, 0x64, - 0x65, 0x72, 0x28, 0x7b, 0x20, 0x63, 0x68, 0x69, 0x6c, 0x64, 0x72, 0x65, - 0x6e, 0x20, 0x7d, 0x29, 0x20, 0x7b, 0x0a, 0x20, 0x20, 0x20, 0x20, 0x20, - 0x20, 0x20, 0x20, 0x72, 0x65, 0x74, 0x75, 0x72, 0x6e, 0x20, 0x63, 0x68, - 0x69, 0x6c, 0x64, 0x72, 0x65, 0x6e, 0x20, 0x7c, 0x7c, 0x20, 0x6e, 0x75, - 0x6c, 0x6c, 0x3b, 0x0a, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x7d, 0x0a, - 0x20, 0x20, 0x20, 0x20, 0x7d, 0x0a, 0x0a, 0x20, 0x20, 0x20, 0x20, 0x66, - 0x75, 0x6e, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x20, 0x41, 0x70, 0x70, 0x28, - 0x70, 0x72, 0x6f, 0x70, 0x73, 0x29, 0x20, 0x7b, 0x0a, 0x0a, 0x20, 0x20, - 0x20, 0x20, 0x20, 0x20, 0x72, 0x65, 0x74, 0x75, 0x72, 0x6e, 0x20, 0x68, - 0x74, 0x6d, 0x6c, 0x60, 0x0a, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, - 0x20, 0x3c, 0x64, 0x69, 0x76, 0x20, 0x63, 0x6c, 0x61, 0x73, 0x73, 0x3d, - 0x22, 0x6d, 0x6f, 0x64, 0x65, 0x2d, 0x24, 0x7b, 0x73, 0x65, 0x73, 0x73, - 0x69, 0x6f, 0x6e, 0x2e, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x2e, 0x74, 0x79, - 0x70, 0x65, 0x7d, 0x22, 0x3e, 0x0a, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, - 0x20, 0x20, 0x20, 0x20, 0x3c, 0x68, 0x65, 0x61, 0x64, 0x65, 0x72, 0x3e, + 0x20, 0x20, 0x72, 0x65, 0x6e, 0x64, 0x65, 0x72, 0x28, 0x7b, 0x20, 0x63, + 0x68, 0x69, 0x6c, 0x64, 0x72, 0x65, 0x6e, 0x20, 0x7d, 0x29, 0x20, 0x7b, + 0x0a, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x72, 0x65, 0x74, + 0x75, 0x72, 0x6e, 0x20, 0x63, 0x68, 0x69, 0x6c, 0x64, 0x72, 0x65, 0x6e, + 0x20, 0x7c, 0x7c, 0x20, 0x6e, 0x75, 0x6c, 0x6c, 0x3b, 0x0a, 0x20, 0x20, + 0x20, 0x20, 0x20, 0x20, 0x7d, 0x0a, 0x20, 0x20, 0x20, 0x20, 0x7d, 0x0a, + 0x0a, 0x20, 0x20, 0x20, 0x20, 0x66, 0x75, 0x6e, 0x63, 0x74, 0x69, 0x6f, + 0x6e, 0x20, 0x41, 0x70, 0x70, 0x28, 0x70, 0x72, 0x6f, 0x70, 0x73, 0x29, + 0x20, 0x7b, 0x0a, 0x0a, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x72, 0x65, + 0x74, 0x75, 0x72, 0x6e, 0x20, 0x68, 0x74, 0x6d, 0x6c, 0x60, 0x0a, 0x20, + 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x3c, 0x64, 0x69, 0x76, 0x20, + 0x63, 0x6c, 0x61, 0x73, 0x73, 0x3d, 0x22, 0x6d, 0x6f, 0x64, 0x65, 0x2d, + 0x24, 0x7b, 0x73, 0x65, 0x73, 0x73, 0x69, 0x6f, 0x6e, 0x2e, 0x76, 0x61, + 0x6c, 0x75, 0x65, 0x2e, 0x74, 0x79, 0x70, 0x65, 0x7d, 0x22, 0x3e, 0x0a, + 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x3c, 0x68, + 0x65, 0x61, 0x64, 0x65, 0x72, 0x3e, 0x0a, 0x20, 0x20, 0x20, 0x20, 0x20, + 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x3c, 0x68, 0x31, 0x3e, 0x6c, + 0x6c, 0x61, 0x6d, 0x61, 0x2e, 0x63, 0x70, 0x70, 0x3c, 0x2f, 0x68, 0x31, + 0x3e, 0x0a, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, + 0x3c, 0x2f, 0x68, 0x65, 0x61, 0x64, 0x65, 0x72, 0x3e, 0x0a, 0x0a, 0x20, + 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x3c, 0x6d, 0x61, + 0x69, 0x6e, 0x20, 0x69, 0x64, 0x3d, 0x22, 0x63, 0x6f, 0x6e, 0x74, 0x65, + 0x6e, 0x74, 0x22, 0x3e, 0x0a, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, + 0x20, 0x20, 0x20, 0x20, 0x20, 0x3c, 0x24, 0x7b, 0x63, 0x68, 0x61, 0x74, + 0x53, 0x74, 0x61, 0x72, 0x74, 0x65, 0x64, 0x2e, 0x76, 0x61, 0x6c, 0x75, + 0x65, 0x20, 0x3f, 0x20, 0x43, 0x68, 0x61, 0x74, 0x4c, 0x6f, 0x67, 0x20, + 0x3a, 0x20, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x46, 0x6f, 0x72, 0x6d, + 0x7d, 0x20, 0x2f, 0x3e, 0x0a, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, + 0x20, 0x20, 0x20, 0x3c, 0x2f, 0x6d, 0x61, 0x69, 0x6e, 0x3e, 0x0a, 0x0a, + 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x3c, 0x73, + 0x65, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x20, 0x69, 0x64, 0x3d, 0x22, 0x77, + 0x72, 0x69, 0x74, 0x65, 0x22, 0x3e, 0x0a, 0x20, 0x20, 0x20, 0x20, 0x20, + 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x3c, 0x24, 0x7b, 0x73, 0x65, + 0x73, 0x73, 0x69, 0x6f, 0x6e, 0x2e, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x2e, + 0x74, 0x79, 0x70, 0x65, 0x20, 0x3d, 0x3d, 0x3d, 0x20, 0x27, 0x63, 0x68, + 0x61, 0x74, 0x27, 0x20, 0x3f, 0x20, 0x4d, 0x65, 0x73, 0x73, 0x61, 0x67, + 0x65, 0x49, 0x6e, 0x70, 0x75, 0x74, 0x20, 0x3a, 0x20, 0x43, 0x6f, 0x6d, + 0x70, 0x6c, 0x65, 0x74, 0x69, 0x6f, 0x6e, 0x43, 0x6f, 0x6e, 0x74, 0x72, + 0x6f, 0x6c, 0x73, 0x7d, 0x20, 0x2f, 0x3e, 0x0a, 0x20, 0x20, 0x20, 0x20, + 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x3c, 0x2f, 0x73, 0x65, 0x63, 0x74, + 0x69, 0x6f, 0x6e, 0x3e, 0x0a, 0x0a, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, + 0x20, 0x20, 0x20, 0x20, 0x3c, 0x66, 0x6f, 0x6f, 0x74, 0x65, 0x72, 0x3e, 0x0a, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, - 0x20, 0x3c, 0x68, 0x31, 0x3e, 0x6c, 0x6c, 0x61, 0x6d, 0x61, 0x2e, 0x63, - 0x70, 0x70, 0x3c, 0x2f, 0x68, 0x31, 0x3e, 0x0a, 0x20, 0x20, 0x20, 0x20, - 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x3c, 0x2f, 0x68, 0x65, 0x61, 0x64, - 0x65, 0x72, 0x3e, 0x0a, 0x0a, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, - 0x20, 0x20, 0x20, 0x3c, 0x6d, 0x61, 0x69, 0x6e, 0x20, 0x69, 0x64, 0x3d, - 0x22, 0x63, 0x6f, 0x6e, 0x74, 0x65, 0x6e, 0x74, 0x22, 0x3e, 0x0a, 0x20, + 0x20, 0x3c, 0x70, 0x3e, 0x3c, 0x24, 0x7b, 0x4d, 0x6f, 0x64, 0x65, 0x6c, + 0x47, 0x65, 0x6e, 0x65, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x49, 0x6e, + 0x66, 0x6f, 0x7d, 0x20, 0x2f, 0x3e, 0x3c, 0x2f, 0x70, 0x3e, 0x0a, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x3c, - 0x24, 0x7b, 0x63, 0x68, 0x61, 0x74, 0x53, 0x74, 0x61, 0x72, 0x74, 0x65, - 0x64, 0x2e, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x20, 0x3f, 0x20, 0x43, 0x68, - 0x61, 0x74, 0x4c, 0x6f, 0x67, 0x20, 0x3a, 0x20, 0x43, 0x6f, 0x6e, 0x66, - 0x69, 0x67, 0x46, 0x6f, 0x72, 0x6d, 0x7d, 0x20, 0x2f, 0x3e, 0x0a, 0x20, - 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x3c, 0x2f, 0x6d, - 0x61, 0x69, 0x6e, 0x3e, 0x0a, 0x0a, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, - 0x20, 0x20, 0x20, 0x20, 0x3c, 0x73, 0x65, 0x63, 0x74, 0x69, 0x6f, 0x6e, - 0x20, 0x69, 0x64, 0x3d, 0x22, 0x77, 0x72, 0x69, 0x74, 0x65, 0x22, 0x3e, - 0x0a, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, - 0x20, 0x3c, 0x24, 0x7b, 0x73, 0x65, 0x73, 0x73, 0x69, 0x6f, 0x6e, 0x2e, - 0x76, 0x61, 0x6c, 0x75, 0x65, 0x2e, 0x74, 0x79, 0x70, 0x65, 0x20, 0x3d, - 0x3d, 0x3d, 0x20, 0x27, 0x63, 0x68, 0x61, 0x74, 0x27, 0x20, 0x3f, 0x20, - 0x4d, 0x65, 0x73, 0x73, 0x61, 0x67, 0x65, 0x49, 0x6e, 0x70, 0x75, 0x74, - 0x20, 0x3a, 0x20, 0x43, 0x6f, 0x6d, 0x70, 0x6c, 0x65, 0x74, 0x69, 0x6f, - 0x6e, 0x43, 0x6f, 0x6e, 0x74, 0x72, 0x6f, 0x6c, 0x73, 0x7d, 0x20, 0x2f, - 0x3e, 0x0a, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, - 0x3c, 0x2f, 0x73, 0x65, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x3e, 0x0a, 0x0a, - 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x3c, 0x66, - 0x6f, 0x6f, 0x74, 0x65, 0x72, 0x3e, 0x0a, 0x20, 0x20, 0x20, 0x20, 0x20, - 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x3c, 0x70, 0x3e, 0x3c, 0x24, - 0x7b, 0x4d, 0x6f, 0x64, 0x65, 0x6c, 0x47, 0x65, 0x6e, 0x65, 0x72, 0x61, - 0x74, 0x69, 0x6f, 0x6e, 0x49, 0x6e, 0x66, 0x6f, 0x7d, 0x20, 0x2f, 0x3e, - 0x3c, 0x2f, 0x70, 0x3e, 0x0a, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, - 0x20, 0x20, 0x20, 0x20, 0x20, 0x3c, 0x70, 0x3e, 0x50, 0x6f, 0x77, 0x65, - 0x72, 0x65, 0x64, 0x20, 0x62, 0x79, 0x20, 0x3c, 0x61, 0x20, 0x68, 0x72, - 0x65, 0x66, 0x3d, 0x22, 0x68, 0x74, 0x74, 0x70, 0x73, 0x3a, 0x2f, 0x2f, - 0x67, 0x69, 0x74, 0x68, 0x75, 0x62, 0x2e, 0x63, 0x6f, 0x6d, 0x2f, 0x67, - 0x67, 0x65, 0x72, 0x67, 0x61, 0x6e, 0x6f, 0x76, 0x2f, 0x6c, 0x6c, 0x61, - 0x6d, 0x61, 0x2e, 0x63, 0x70, 0x70, 0x22, 0x3e, 0x6c, 0x6c, 0x61, 0x6d, - 0x61, 0x2e, 0x63, 0x70, 0x70, 0x3c, 0x2f, 0x61, 0x3e, 0x20, 0x61, 0x6e, - 0x64, 0x20, 0x3c, 0x61, 0x20, 0x68, 0x72, 0x65, 0x66, 0x3d, 0x22, 0x68, - 0x74, 0x74, 0x70, 0x73, 0x3a, 0x2f, 0x2f, 0x67, 0x67, 0x6d, 0x6c, 0x2e, - 0x61, 0x69, 0x22, 0x3e, 0x67, 0x67, 0x6d, 0x6c, 0x2e, 0x61, 0x69, 0x3c, - 0x2f, 0x61, 0x3e, 0x2e, 0x3c, 0x2f, 0x70, 0x3e, 0x0a, 0x20, 0x20, 0x20, - 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x3c, 0x2f, 0x66, 0x6f, 0x6f, - 0x74, 0x65, 0x72, 0x3e, 0x0a, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, - 0x20, 0x3c, 0x2f, 0x64, 0x69, 0x76, 0x3e, 0x0a, 0x20, 0x20, 0x20, 0x20, - 0x20, 0x20, 0x60, 0x3b, 0x0a, 0x20, 0x20, 0x20, 0x20, 0x7d, 0x0a, 0x0a, - 0x20, 0x20, 0x20, 0x20, 0x72, 0x65, 0x6e, 0x64, 0x65, 0x72, 0x28, 0x68, - 0x28, 0x41, 0x70, 0x70, 0x29, 0x2c, 0x20, 0x64, 0x6f, 0x63, 0x75, 0x6d, - 0x65, 0x6e, 0x74, 0x2e, 0x71, 0x75, 0x65, 0x72, 0x79, 0x53, 0x65, 0x6c, - 0x65, 0x63, 0x74, 0x6f, 0x72, 0x28, 0x27, 0x23, 0x63, 0x6f, 0x6e, 0x74, - 0x61, 0x69, 0x6e, 0x65, 0x72, 0x27, 0x29, 0x29, 0x3b, 0x0a, 0x20, 0x20, - 0x3c, 0x2f, 0x73, 0x63, 0x72, 0x69, 0x70, 0x74, 0x3e, 0x0a, 0x3c, 0x2f, - 0x68, 0x65, 0x61, 0x64, 0x3e, 0x0a, 0x0a, 0x3c, 0x62, 0x6f, 0x64, 0x79, - 0x3e, 0x0a, 0x20, 0x20, 0x3c, 0x64, 0x69, 0x76, 0x20, 0x69, 0x64, 0x3d, - 0x22, 0x63, 0x6f, 0x6e, 0x74, 0x61, 0x69, 0x6e, 0x65, 0x72, 0x22, 0x3e, - 0x0a, 0x20, 0x20, 0x20, 0x20, 0x3c, 0x69, 0x6e, 0x70, 0x75, 0x74, 0x20, - 0x74, 0x79, 0x70, 0x65, 0x3d, 0x22, 0x66, 0x69, 0x6c, 0x65, 0x22, 0x20, - 0x69, 0x64, 0x3d, 0x22, 0x66, 0x69, 0x6c, 0x65, 0x49, 0x6e, 0x70, 0x75, - 0x74, 0x22, 0x20, 0x61, 0x63, 0x63, 0x65, 0x70, 0x74, 0x3d, 0x22, 0x69, - 0x6d, 0x61, 0x67, 0x65, 0x2f, 0x2a, 0x22, 0x20, 0x73, 0x74, 0x79, 0x6c, - 0x65, 0x3d, 0x22, 0x64, 0x69, 0x73, 0x70, 0x6c, 0x61, 0x79, 0x3a, 0x20, - 0x6e, 0x6f, 0x6e, 0x65, 0x3b, 0x22, 0x3e, 0x0a, 0x20, 0x20, 0x3c, 0x2f, - 0x64, 0x69, 0x76, 0x3e, 0x0a, 0x20, 0x20, 0x3c, 0x64, 0x69, 0x76, 0x20, - 0x69, 0x64, 0x3d, 0x22, 0x70, 0x6f, 0x72, 0x74, 0x61, 0x6c, 0x22, 0x3e, - 0x3c, 0x2f, 0x64, 0x69, 0x76, 0x3e, 0x0a, 0x3c, 0x2f, 0x62, 0x6f, 0x64, - 0x79, 0x3e, 0x0a, 0x0a, 0x3c, 0x2f, 0x68, 0x74, 0x6d, 0x6c, 0x3e, 0x0a, - 0x0a + 0x70, 0x3e, 0x50, 0x6f, 0x77, 0x65, 0x72, 0x65, 0x64, 0x20, 0x62, 0x79, + 0x20, 0x3c, 0x61, 0x20, 0x68, 0x72, 0x65, 0x66, 0x3d, 0x22, 0x68, 0x74, + 0x74, 0x70, 0x73, 0x3a, 0x2f, 0x2f, 0x67, 0x69, 0x74, 0x68, 0x75, 0x62, + 0x2e, 0x63, 0x6f, 0x6d, 0x2f, 0x67, 0x67, 0x65, 0x72, 0x67, 0x61, 0x6e, + 0x6f, 0x76, 0x2f, 0x6c, 0x6c, 0x61, 0x6d, 0x61, 0x2e, 0x63, 0x70, 0x70, + 0x22, 0x3e, 0x6c, 0x6c, 0x61, 0x6d, 0x61, 0x2e, 0x63, 0x70, 0x70, 0x3c, + 0x2f, 0x61, 0x3e, 0x20, 0x61, 0x6e, 0x64, 0x20, 0x3c, 0x61, 0x20, 0x68, + 0x72, 0x65, 0x66, 0x3d, 0x22, 0x68, 0x74, 0x74, 0x70, 0x73, 0x3a, 0x2f, + 0x2f, 0x67, 0x67, 0x6d, 0x6c, 0x2e, 0x61, 0x69, 0x22, 0x3e, 0x67, 0x67, + 0x6d, 0x6c, 0x2e, 0x61, 0x69, 0x3c, 0x2f, 0x61, 0x3e, 0x2e, 0x3c, 0x2f, + 0x70, 0x3e, 0x0a, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, + 0x20, 0x3c, 0x2f, 0x66, 0x6f, 0x6f, 0x74, 0x65, 0x72, 0x3e, 0x0a, 0x20, + 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x3c, 0x2f, 0x64, 0x69, 0x76, + 0x3e, 0x0a, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x60, 0x3b, 0x0a, 0x20, + 0x20, 0x20, 0x20, 0x7d, 0x0a, 0x0a, 0x20, 0x20, 0x20, 0x20, 0x72, 0x65, + 0x6e, 0x64, 0x65, 0x72, 0x28, 0x68, 0x28, 0x41, 0x70, 0x70, 0x29, 0x2c, + 0x20, 0x64, 0x6f, 0x63, 0x75, 0x6d, 0x65, 0x6e, 0x74, 0x2e, 0x71, 0x75, + 0x65, 0x72, 0x79, 0x53, 0x65, 0x6c, 0x65, 0x63, 0x74, 0x6f, 0x72, 0x28, + 0x27, 0x23, 0x63, 0x6f, 0x6e, 0x74, 0x61, 0x69, 0x6e, 0x65, 0x72, 0x27, + 0x29, 0x29, 0x3b, 0x0a, 0x20, 0x20, 0x3c, 0x2f, 0x73, 0x63, 0x72, 0x69, + 0x70, 0x74, 0x3e, 0x0a, 0x3c, 0x2f, 0x68, 0x65, 0x61, 0x64, 0x3e, 0x0a, + 0x0a, 0x3c, 0x62, 0x6f, 0x64, 0x79, 0x3e, 0x0a, 0x20, 0x20, 0x3c, 0x64, + 0x69, 0x76, 0x20, 0x69, 0x64, 0x3d, 0x22, 0x63, 0x6f, 0x6e, 0x74, 0x61, + 0x69, 0x6e, 0x65, 0x72, 0x22, 0x3e, 0x0a, 0x20, 0x20, 0x20, 0x20, 0x3c, + 0x69, 0x6e, 0x70, 0x75, 0x74, 0x20, 0x74, 0x79, 0x70, 0x65, 0x3d, 0x22, + 0x66, 0x69, 0x6c, 0x65, 0x22, 0x20, 0x69, 0x64, 0x3d, 0x22, 0x66, 0x69, + 0x6c, 0x65, 0x49, 0x6e, 0x70, 0x75, 0x74, 0x22, 0x20, 0x61, 0x63, 0x63, + 0x65, 0x70, 0x74, 0x3d, 0x22, 0x69, 0x6d, 0x61, 0x67, 0x65, 0x2f, 0x2a, + 0x22, 0x20, 0x73, 0x74, 0x79, 0x6c, 0x65, 0x3d, 0x22, 0x64, 0x69, 0x73, + 0x70, 0x6c, 0x61, 0x79, 0x3a, 0x20, 0x6e, 0x6f, 0x6e, 0x65, 0x3b, 0x22, + 0x3e, 0x0a, 0x20, 0x20, 0x3c, 0x2f, 0x64, 0x69, 0x76, 0x3e, 0x0a, 0x20, + 0x20, 0x3c, 0x64, 0x69, 0x76, 0x20, 0x69, 0x64, 0x3d, 0x22, 0x70, 0x6f, + 0x72, 0x74, 0x61, 0x6c, 0x22, 0x3e, 0x3c, 0x2f, 0x64, 0x69, 0x76, 0x3e, + 0x0a, 0x3c, 0x2f, 0x62, 0x6f, 0x64, 0x79, 0x3e, 0x0a, 0x0a, 0x3c, 0x2f, + 0x68, 0x74, 0x6d, 0x6c, 0x3e, 0x0a, 0x0a }; -unsigned int index_html_len = 32269; +unsigned int index_html_len = 33103; diff --git a/examples/server/public/index.html b/examples/server/public/index.html index 60659c1478f72f..175c52478918a9 100644 --- a/examples/server/public/index.html +++ b/examples/server/public/index.html @@ -160,6 +160,11 @@ height: 10em; } + [contenteditable] { + display: inline-block; + white-space: pre-wrap; + outline: 0px solid transparent; + } @keyframes loading-bg-wipe { 0% { @@ -462,18 +467,23 @@ }, "{{char}}"); } - const runCompletion = async () => { + const runCompletion = () => { if (controller.value) { console.log('already running...'); return; } const { prompt } = session.value; transcriptUpdate([...session.value.transcript, ["", prompt]]); - await runLlama(prompt, { + runLlama(prompt, { ...params.value, slot_id: slot_id, stop: [], - }, ""); + }, "").finally(() => { + session.value.prompt = session.value.transcript.map(([_, data]) => + Array.isArray(data) ? data.map(msg => msg.content).join('') : data + ).join(''); + session.value.transcript = []; + }) } const stop = (e) => { @@ -573,6 +583,7 @@ } }, [messages]) + const isCompletionMode = session.value.type === 'completion' const chatLine = ([user, data], index) => { let message const isArrayMessage = Array.isArray(data) @@ -582,20 +593,31 @@ const text = isArrayMessage ? data.map(msg => msg.content).join('').replace(/^\s+/, '') : data; - message = html`<${Markdownish} text=${template(text)} />` + message = isCompletionMode ? + text : + html`<${Markdownish} text=${template(text)} />` } if (user) { return html`

${template(user)}: ${message}

` } else { - return html`

${message}

` + return isCompletionMode ? + html`${message}` : + html`

${message}

` } }; + const handleCompletionEdit = (e) => { + session.value.prompt = e.target.innerText; + session.value.transcript = []; + } + return html` -
+
- ${messages.flatMap(chatLine)} -
`; + + ${messages.flatMap(chatLine)} + + `; }; const ConfigForm = (props) => { From 34b0a082074b073eb14c2bd93c0c070e20ddcd16 Mon Sep 17 00:00:00 2001 From: Kerfuffle <44031344+KerfuffleV2@users.noreply.github.com> Date: Fri, 10 Nov 2023 22:04:50 -0700 Subject: [PATCH 082/206] gguf-py: Refactor and allow reading/modifying existing GGUF files (#3981) * gguf-py: Refactor and add file reading support * Replay changes from #3871 Credit to @cebtenzzre for that pull * Various type annotation fixes. * sort imports with isort (again) * Fix missing return statement in add_tensor * style cleanup with flake8 * fix NamedTuple and Enum usage * Fix an issue with state init in GGUFReader Move examples to an examples/ directory Clean up examples Add an example of modifying keys in a GGUF file Update documentation with info on examples Try to support people importing gguf/gguf.py directly * Damagage is not a word. * Clean up gguf-py/examples/modify_gguf.py whitespace Co-authored-by: Jared Van Bortel * Update gguf-py/examples/modify_gguf.py formatting Co-authored-by: Jared Van Bortel * Update gguf-py/gguf/gguf_reader.py type hint Co-authored-by: Jared Van Bortel * Make examples executable, formatting changes * Add more information to GGUFReader and examples comments * Include a gguf Python package version bump * Add convert-gguf-endian.py script * cleanup * gguf-py : bump minor version * Reorganize scripts * Make GGUFReader endian detection less arbitrary * Add JSON dumping support to gguf-dump.py Which I kind of regret now * A few for gguf-dump.py cleanups * Murder accidental tuple in gguf-py/scripts/gguf-dump.py Co-authored-by: Jared Van Bortel * cleanup * constants : remove unneeded type annotations * fix python 3.8 compat * Set up gguf- scripts in pyproject.toml * And include scripts/__init__.py, derp * convert.py: We can't currently support Q8_0 on big endian. * gguf-py: SpecialVocab: Always try available sources for special token ids gguf-py: SpecialVocab: Try to load merges from merges.txt if not in tokenizer.json gguf-py: SpecialVocab: Add 'add_bos_token' type bools to GGUF metadata u * cleanup * Promote add_X_token to GGUF metadata for BOS and EOS --------- Co-authored-by: Jared Van Bortel Co-authored-by: Jared Van Bortel --- convert-baichuan-hf-to-gguf.py | 2 +- convert-llama-ggml-to-gguf.py | 24 +- convert-persimmon-to-gguf.py | 2 +- convert.py | 16 +- .../convert-train-checkpoint-to-gguf.py | 2 +- gguf-py/README.md | 10 + gguf-py/examples/writer.py | 40 + gguf-py/gguf/__init__.py | 6 +- gguf-py/gguf/constants.py | 470 +++++++ gguf-py/gguf/gguf.py | 1149 +---------------- gguf-py/gguf/gguf_reader.py | 264 ++++ gguf-py/gguf/gguf_writer.py | 409 ++++++ gguf-py/gguf/tensor_mapping.py | 257 ++++ gguf-py/gguf/vocab.py | 164 +++ gguf-py/pyproject.toml | 8 +- gguf-py/scripts/__init__.py | 12 + gguf-py/scripts/gguf-convert-endian.py | 113 ++ gguf-py/scripts/gguf-dump.py | 116 ++ gguf-py/scripts/gguf-set-metadata.py | 90 ++ gguf-py/tests/test_gguf.py | 4 +- 20 files changed, 1982 insertions(+), 1176 deletions(-) create mode 100755 gguf-py/examples/writer.py create mode 100644 gguf-py/gguf/constants.py create mode 100644 gguf-py/gguf/gguf_reader.py create mode 100644 gguf-py/gguf/gguf_writer.py create mode 100644 gguf-py/gguf/tensor_mapping.py create mode 100644 gguf-py/gguf/vocab.py create mode 100644 gguf-py/scripts/__init__.py create mode 100755 gguf-py/scripts/gguf-convert-endian.py create mode 100755 gguf-py/scripts/gguf-dump.py create mode 100755 gguf-py/scripts/gguf-set-metadata.py diff --git a/convert-baichuan-hf-to-gguf.py b/convert-baichuan-hf-to-gguf.py index 67ccbe99f132af..789602351ca9d7 100755 --- a/convert-baichuan-hf-to-gguf.py +++ b/convert-baichuan-hf-to-gguf.py @@ -16,7 +16,7 @@ from sentencepiece import SentencePieceProcessor # type: ignore[import] if 'NO_LOCAL_GGUF' not in os.environ: - sys.path.insert(1, str(Path(__file__).parent / 'gguf-py' / 'gguf')) + sys.path.insert(1, str(Path(__file__).parent / 'gguf-py')) import gguf diff --git a/convert-llama-ggml-to-gguf.py b/convert-llama-ggml-to-gguf.py index 871add64d4ca73..d898d81c4c445b 100755 --- a/convert-llama-ggml-to-gguf.py +++ b/convert-llama-ggml-to-gguf.py @@ -12,29 +12,9 @@ import os if 'NO_LOCAL_GGUF' not in os.environ: - sys.path.insert(1, str(Path(__file__).parent / 'gguf-py' / 'gguf')) + sys.path.insert(1, str(Path(__file__).parent / 'gguf-py')) import gguf -# Note: Does not support GGML_QKK_64 -QK_K = 256 -# Items here are (block size, type size) -GGML_QUANT_SIZES = { - gguf.GGMLQuantizationType.F32 : (1, 4), - gguf.GGMLQuantizationType.F16 : (1, 2), - gguf.GGMLQuantizationType.Q4_0 : (32, 2 + 16), - gguf.GGMLQuantizationType.Q4_1 : (32, 2 + 2 + 16), - gguf.GGMLQuantizationType.Q5_0 : (32, 2 + 4 + 16), - gguf.GGMLQuantizationType.Q5_1 : (32, 2 + 2 + 4 + 16), - gguf.GGMLQuantizationType.Q8_0 : (32, 2 + 32), - gguf.GGMLQuantizationType.Q8_1 : (32, 4 + 4 + 32), - gguf.GGMLQuantizationType.Q2_K : (256, 2 + 2 + QK_K // 16 + QK_K // 4), - gguf.GGMLQuantizationType.Q3_K : (256, 2 + QK_K // 4 + QK_K // 8 + 12), - gguf.GGMLQuantizationType.Q4_K : (256, 2 + 2 + QK_K // 2 + 12), - gguf.GGMLQuantizationType.Q5_K : (256, 2 + 2 + QK_K // 2 + QK_K // 8 + 12), - gguf.GGMLQuantizationType.Q6_K : (256, 2 + QK_K // 2 + QK_K // 4 + QK_K // 16), - gguf.GGMLQuantizationType.Q8_K : (256, 4 + QK_K + QK_K // 8), -} - class GGMLFormat(IntEnum): GGML = 0 GGMF = 1 @@ -125,7 +105,7 @@ def load(self, data, offset): (n_dims, name_len, dtype) = struct.unpack('<3I', data[offset:offset + 12]) assert n_dims >= 0 and n_dims <= 4, f'Invalid tensor dimensions {n_dims}' assert name_len < 4096, 'Absurd tensor name length' - quant = GGML_QUANT_SIZES.get(dtype) + quant = gguf.GGML_QUANT_SIZES.get(dtype) assert quant is not None, 'Unknown tensor type' (blksize, tysize) = quant offset += 12 diff --git a/convert-persimmon-to-gguf.py b/convert-persimmon-to-gguf.py index e022ffe46189e5..240f87306e5783 100644 --- a/convert-persimmon-to-gguf.py +++ b/convert-persimmon-to-gguf.py @@ -6,7 +6,7 @@ from pathlib import Path from sentencepiece import SentencePieceProcessor if 'NO_LOCAL_GGUF' not in os.environ: - sys.path.insert(1, str(Path(__file__).parent / 'gguf-py' / 'gguf')) + sys.path.insert(1, str(Path(__file__).parent / 'gguf-py')) import gguf def _flatten_dict(dct, tensors, prefix=None): diff --git a/convert.py b/convert.py index b0f44dbef8332a..a4b87e08849bcc 100755 --- a/convert.py +++ b/convert.py @@ -3,11 +3,9 @@ import argparse import concurrent.futures -import copy import enum import faulthandler import functools -import io import itertools import json import math @@ -23,14 +21,14 @@ from concurrent.futures import ProcessPoolExecutor, ThreadPoolExecutor from dataclasses import dataclass from pathlib import Path -from typing import IO, TYPE_CHECKING, Any, Callable, Generator, Iterable, Literal, Sequence, TypeVar +from typing import IO, TYPE_CHECKING, Any, Callable, Iterable, Literal, TypeVar import numpy as np from sentencepiece import SentencePieceProcessor import os if 'NO_LOCAL_GGUF' not in os.environ: - sys.path.insert(1, str(Path(__file__).parent / 'gguf-py' / 'gguf')) + sys.path.insert(1, str(Path(__file__).parent / 'gguf-py')) import gguf if TYPE_CHECKING: @@ -851,7 +849,7 @@ def add_meta_vocab(self, vocab: Vocab) -> None: elif isinstance(vocab, BpeVocab): self.gguf.add_tokenizer_model("gpt2") else: - raise ValueError(f'Unknown vocab type: Not BpeVocab or SentencePieceVocab') + raise ValueError('Unknown vocab type: Not BpeVocab or SentencePieceVocab') self.gguf.add_token_list(tokens) self.gguf.add_token_scores(scores) self.gguf.add_token_types(toktypes) @@ -905,7 +903,7 @@ def maybe_do_quantize(item: tuple[DataType, NDArray]) -> NDArray: return dt.quantize(arr) @staticmethod - def write_all(fname_out: Path, ftype: GGMLFileType, params: Params, model: LazyModel, vocab: Vocab, svocab: gguf.SpecialVocab, concurrency: int = DEFAULT_CONCURRENCY, endianess=gguf.GGUFEndian.LITTLE) -> None: + def write_all(fname_out: Path, ftype: GGMLFileType, params: Params, model: LazyModel, vocab: Vocab, svocab: gguf.SpecialVocab, concurrency: int = DEFAULT_CONCURRENCY, endianess: gguf.GGUFEndian = gguf.GGUFEndian.LITTLE) -> None: check_vocab_size(params, vocab) of = OutputFile(fname_out, endianess=endianess) @@ -1114,11 +1112,15 @@ def do_dump_model(model_plus: ModelPlus) -> None: def main(args_in: list[str] | None = None) -> None: + output_choices = ["f32", "f16"] + if np.uint32(1) == np.uint32(1).newbyteorder("<"): + # We currently only support Q8_0 output on little endian systems. + output_choices.append("q8_0") parser = argparse.ArgumentParser(description="Convert a LLaMa model to a GGML compatible file") parser.add_argument("--dump", action="store_true", help="don't convert, just show what's in the model") parser.add_argument("--dump-single", action="store_true", help="don't convert, just show what's in a single model file") parser.add_argument("--vocab-only", action="store_true", help="extract only the vocab") - parser.add_argument("--outtype", choices=["f32", "f16", "q8_0"], help="output format - note: q8_0 may be very slow (default: f16 or f32 based on input)") + parser.add_argument("--outtype", choices=output_choices, help="output format - note: q8_0 may be very slow (default: f16 or f32 based on input)") parser.add_argument("--vocab-dir", type=Path, help="directory containing tokenizer.model, if separate from model file") parser.add_argument("--outfile", type=Path, help="path to write to; default: based on input") parser.add_argument("model", type=Path, help="directory containing model file, or model file itself (*.pth, *.pt, *.bin)") diff --git a/examples/train-text-from-scratch/convert-train-checkpoint-to-gguf.py b/examples/train-text-from-scratch/convert-train-checkpoint-to-gguf.py index 887ed2e212786d..ed93673bcf306a 100644 --- a/examples/train-text-from-scratch/convert-train-checkpoint-to-gguf.py +++ b/examples/train-text-from-scratch/convert-train-checkpoint-to-gguf.py @@ -9,7 +9,7 @@ from pathlib import Path if 'NO_LOCAL_GGUF' not in os.environ: - sys.path.insert(1, str(Path(__file__).parent / '..' / '..' / 'gguf-py' / 'gguf')) + sys.path.insert(1, str(Path(__file__).parent / '..' / '..' / 'gguf-py')) import gguf # gguf constants diff --git a/gguf-py/README.md b/gguf-py/README.md index a28d8c57adc7d0..502b6a510cc70d 100644 --- a/gguf-py/README.md +++ b/gguf-py/README.md @@ -11,6 +11,16 @@ as an example for its usage. pip install gguf ``` +## API Examples/Simple Tools + +[examples/writer.py](https://github.com/ggerganov/llama.cpp/blob/master/gguf-py/examples/writer.py) — Generates `example.gguf` in the current directory to demonstrate generating a GGUF file. Note that this file cannot be used as a model. + +[scripts/gguf-dump.py](https://github.com/ggerganov/llama.cpp/blob/master/gguf-py/scripts/gguf-dump.py) — Dumps a GGUF file's metadata to the console. + +[scripts/gguf-set-metadata.py](https://github.com/ggerganov/llama.cpp/blob/master/gguf-py/scripts/gguf-set-metadata.py) — Allows changing simple metadata values in a GGUF file by key. + +[scripts/gguf-convert-endian.py](https://github.com/ggerganov/llama.cpp/blob/master/gguf-py/scripts/gguf-convert-endian.py) — Allows converting the endianness of GGUF files. + ## Development Maintainers who participate in development of this package are advised to install it in editable mode: diff --git a/gguf-py/examples/writer.py b/gguf-py/examples/writer.py new file mode 100755 index 00000000000000..f39eed1afe763f --- /dev/null +++ b/gguf-py/examples/writer.py @@ -0,0 +1,40 @@ +#!/usr/bin/env python3 +import sys +from pathlib import Path + +import numpy as np + +# Necessary to load the local gguf package +sys.path.insert(0, str(Path(__file__).parent.parent)) + +from gguf import GGUFWriter # noqa: E402 + + +# Example usage: +def writer_example() -> None: + # Example usage with a file + gguf_writer = GGUFWriter("example.gguf", "llama") + + gguf_writer.add_architecture() + gguf_writer.add_block_count(12) + gguf_writer.add_uint32("answer", 42) # Write a 32-bit integer + gguf_writer.add_float32("answer_in_float", 42.0) # Write a 32-bit float + gguf_writer.add_custom_alignment(64) + + tensor1 = np.ones((32,), dtype=np.float32) * 100.0 + tensor2 = np.ones((64,), dtype=np.float32) * 101.0 + tensor3 = np.ones((96,), dtype=np.float32) * 102.0 + + gguf_writer.add_tensor("tensor1", tensor1) + gguf_writer.add_tensor("tensor2", tensor2) + gguf_writer.add_tensor("tensor3", tensor3) + + gguf_writer.write_header_to_file() + gguf_writer.write_kv_data_to_file() + gguf_writer.write_tensors_to_file() + + gguf_writer.close() + + +if __name__ == '__main__': + writer_example() diff --git a/gguf-py/gguf/__init__.py b/gguf-py/gguf/__init__.py index f9b70a85b875e3..110ab342ccd719 100644 --- a/gguf-py/gguf/__init__.py +++ b/gguf-py/gguf/__init__.py @@ -1 +1,5 @@ -from .gguf import * +from .constants import * +from .gguf_reader import * +from .gguf_writer import * +from .tensor_mapping import * +from .vocab import * diff --git a/gguf-py/gguf/constants.py b/gguf-py/gguf/constants.py new file mode 100644 index 00000000000000..bf1ccf66922d0f --- /dev/null +++ b/gguf-py/gguf/constants.py @@ -0,0 +1,470 @@ +from __future__ import annotations + +import sys +from enum import Enum, IntEnum, auto +from typing import Any + +# +# constants +# + +GGUF_MAGIC = 0x46554747 # "GGUF" +GGUF_VERSION = 3 +GGUF_DEFAULT_ALIGNMENT = 32 + +# +# metadata keys +# + + +class Keys: + class General: + ARCHITECTURE = "general.architecture" + QUANTIZATION_VERSION = "general.quantization_version" + ALIGNMENT = "general.alignment" + NAME = "general.name" + AUTHOR = "general.author" + URL = "general.url" + DESCRIPTION = "general.description" + LICENSE = "general.license" + SOURCE_URL = "general.source.url" + SOURCE_HF_REPO = "general.source.huggingface.repository" + FILE_TYPE = "general.file_type" + + class LLM: + CONTEXT_LENGTH = "{arch}.context_length" + EMBEDDING_LENGTH = "{arch}.embedding_length" + BLOCK_COUNT = "{arch}.block_count" + FEED_FORWARD_LENGTH = "{arch}.feed_forward_length" + USE_PARALLEL_RESIDUAL = "{arch}.use_parallel_residual" + TENSOR_DATA_LAYOUT = "{arch}.tensor_data_layout" + + class Attention: + HEAD_COUNT = "{arch}.attention.head_count" + HEAD_COUNT_KV = "{arch}.attention.head_count_kv" + MAX_ALIBI_BIAS = "{arch}.attention.max_alibi_bias" + CLAMP_KQV = "{arch}.attention.clamp_kqv" + LAYERNORM_EPS = "{arch}.attention.layer_norm_epsilon" + LAYERNORM_RMS_EPS = "{arch}.attention.layer_norm_rms_epsilon" + + class Rope: + DIMENSION_COUNT = "{arch}.rope.dimension_count" + FREQ_BASE = "{arch}.rope.freq_base" + SCALING_TYPE = "{arch}.rope.scaling.type" + SCALING_FACTOR = "{arch}.rope.scaling.factor" + SCALING_ORIG_CTX_LEN = "{arch}.rope.scaling.original_context_length" + SCALING_FINETUNED = "{arch}.rope.scaling.finetuned" + + class Tokenizer: + MODEL = "tokenizer.ggml.model" + LIST = "tokenizer.ggml.tokens" + TOKEN_TYPE = "tokenizer.ggml.token_type" + SCORES = "tokenizer.ggml.scores" + MERGES = "tokenizer.ggml.merges" + BOS_ID = "tokenizer.ggml.bos_token_id" + EOS_ID = "tokenizer.ggml.eos_token_id" + UNK_ID = "tokenizer.ggml.unknown_token_id" + SEP_ID = "tokenizer.ggml.seperator_token_id" + PAD_ID = "tokenizer.ggml.padding_token_id" + ADD_BOS = "tokenizer.ggml.add_bos_token" + ADD_EOS = "tokenizer.ggml.add_eos_token" + HF_JSON = "tokenizer.huggingface.json" + RWKV = "tokenizer.rwkv.world" + + +# +# recommended mapping of model tensor names for storage in gguf +# + + +class MODEL_ARCH(IntEnum): + LLAMA = auto() + FALCON = auto() + BAICHUAN = auto() + GPT2 = auto() + GPTJ = auto() + GPTNEOX = auto() + MPT = auto() + STARCODER = auto() + PERSIMMON = auto() + REFACT = auto() + BERT = auto() + BLOOM = auto() + + +class MODEL_TENSOR(IntEnum): + TOKEN_EMBD = auto() + TOKEN_EMBD_NORM = auto() + TOKEN_TYPES = auto() + POS_EMBD = auto() + OUTPUT = auto() + OUTPUT_NORM = auto() + ROPE_FREQS = auto() + ATTN_Q = auto() + ATTN_K = auto() + ATTN_V = auto() + ATTN_QKV = auto() + ATTN_OUT = auto() + ATTN_NORM = auto() + ATTN_NORM_2 = auto() + ATTN_ROT_EMBD = auto() + FFN_GATE = auto() + FFN_DOWN = auto() + FFN_UP = auto() + FFN_NORM = auto() + ATTN_Q_NORM = auto() + ATTN_K_NORM = auto() + + +MODEL_ARCH_NAMES: dict[MODEL_ARCH, str] = { + MODEL_ARCH.LLAMA: "llama", + MODEL_ARCH.FALCON: "falcon", + MODEL_ARCH.BAICHUAN: "baichuan", + MODEL_ARCH.GPT2: "gpt2", + MODEL_ARCH.GPTJ: "gptj", + MODEL_ARCH.GPTNEOX: "gptneox", + MODEL_ARCH.MPT: "mpt", + MODEL_ARCH.STARCODER: "starcoder", + MODEL_ARCH.PERSIMMON: "persimmon", + MODEL_ARCH.REFACT: "refact", + MODEL_ARCH.BERT: "bert", + MODEL_ARCH.BLOOM: "bloom", +} + +TENSOR_NAMES: dict[MODEL_TENSOR, str] = { + MODEL_TENSOR.TOKEN_EMBD: "token_embd", + MODEL_TENSOR.TOKEN_EMBD_NORM: "token_embd_norm", + MODEL_TENSOR.TOKEN_TYPES: "token_types", + MODEL_TENSOR.POS_EMBD: "position_embd", + MODEL_TENSOR.OUTPUT_NORM: "output_norm", + MODEL_TENSOR.OUTPUT: "output", + MODEL_TENSOR.ROPE_FREQS: "rope_freqs", + MODEL_TENSOR.ATTN_NORM: "blk.{bid}.attn_norm", + MODEL_TENSOR.ATTN_NORM_2: "blk.{bid}.attn_norm_2", + MODEL_TENSOR.ATTN_QKV: "blk.{bid}.attn_qkv", + MODEL_TENSOR.ATTN_Q: "blk.{bid}.attn_q", + MODEL_TENSOR.ATTN_K: "blk.{bid}.attn_k", + MODEL_TENSOR.ATTN_V: "blk.{bid}.attn_v", + MODEL_TENSOR.ATTN_OUT: "blk.{bid}.attn_output", + MODEL_TENSOR.ATTN_ROT_EMBD: "blk.{bid}.attn_rot_embd", + MODEL_TENSOR.ATTN_Q_NORM: "blk.{bid}.attn_q_norm", + MODEL_TENSOR.ATTN_K_NORM: "blk.{bid}.attn_k_norm", + MODEL_TENSOR.FFN_NORM: "blk.{bid}.ffn_norm", + MODEL_TENSOR.FFN_GATE: "blk.{bid}.ffn_gate", + MODEL_TENSOR.FFN_DOWN: "blk.{bid}.ffn_down", + MODEL_TENSOR.FFN_UP: "blk.{bid}.ffn_up", +} + +MODEL_TENSORS: dict[MODEL_ARCH, list[MODEL_TENSOR]] = { + MODEL_ARCH.LLAMA: [ + MODEL_TENSOR.TOKEN_EMBD, + MODEL_TENSOR.OUTPUT_NORM, + MODEL_TENSOR.OUTPUT, + MODEL_TENSOR.ROPE_FREQS, + MODEL_TENSOR.ATTN_NORM, + MODEL_TENSOR.ATTN_Q, + MODEL_TENSOR.ATTN_K, + MODEL_TENSOR.ATTN_V, + MODEL_TENSOR.ATTN_OUT, + MODEL_TENSOR.ATTN_ROT_EMBD, + MODEL_TENSOR.FFN_NORM, + MODEL_TENSOR.FFN_GATE, + MODEL_TENSOR.FFN_DOWN, + MODEL_TENSOR.FFN_UP, + ], + MODEL_ARCH.GPTNEOX: [ + MODEL_TENSOR.TOKEN_EMBD, + MODEL_TENSOR.OUTPUT_NORM, + MODEL_TENSOR.OUTPUT, + MODEL_TENSOR.ATTN_NORM, + MODEL_TENSOR.ATTN_QKV, + MODEL_TENSOR.ATTN_OUT, + MODEL_TENSOR.FFN_NORM, + MODEL_TENSOR.FFN_DOWN, + MODEL_TENSOR.FFN_UP, + ], + MODEL_ARCH.FALCON: [ + MODEL_TENSOR.TOKEN_EMBD, + MODEL_TENSOR.OUTPUT_NORM, + MODEL_TENSOR.OUTPUT, + MODEL_TENSOR.ATTN_NORM, + MODEL_TENSOR.ATTN_NORM_2, + MODEL_TENSOR.ATTN_QKV, + MODEL_TENSOR.ATTN_OUT, + MODEL_TENSOR.FFN_DOWN, + MODEL_TENSOR.FFN_UP, + ], + MODEL_ARCH.BAICHUAN: [ + MODEL_TENSOR.TOKEN_EMBD, + MODEL_TENSOR.OUTPUT_NORM, + MODEL_TENSOR.OUTPUT, + MODEL_TENSOR.ROPE_FREQS, + MODEL_TENSOR.ATTN_NORM, + MODEL_TENSOR.ATTN_Q, + MODEL_TENSOR.ATTN_K, + MODEL_TENSOR.ATTN_V, + MODEL_TENSOR.ATTN_OUT, + MODEL_TENSOR.ATTN_ROT_EMBD, + MODEL_TENSOR.FFN_NORM, + MODEL_TENSOR.FFN_GATE, + MODEL_TENSOR.FFN_DOWN, + MODEL_TENSOR.FFN_UP, + ], + MODEL_ARCH.STARCODER: [ + MODEL_TENSOR.TOKEN_EMBD, + MODEL_TENSOR.POS_EMBD, + MODEL_TENSOR.OUTPUT_NORM, + MODEL_TENSOR.OUTPUT, + MODEL_TENSOR.ATTN_NORM, + MODEL_TENSOR.ATTN_QKV, + MODEL_TENSOR.ATTN_OUT, + MODEL_TENSOR.FFN_NORM, + MODEL_TENSOR.FFN_DOWN, + MODEL_TENSOR.FFN_UP, + ], + MODEL_ARCH.BERT: [ + MODEL_TENSOR.TOKEN_EMBD, + MODEL_TENSOR.TOKEN_TYPES, + MODEL_TENSOR.POS_EMBD, + MODEL_TENSOR.OUTPUT_NORM, + MODEL_TENSOR.ATTN_NORM, + MODEL_TENSOR.ATTN_Q, + MODEL_TENSOR.ATTN_K, + MODEL_TENSOR.ATTN_V, + MODEL_TENSOR.ATTN_OUT, + MODEL_TENSOR.FFN_NORM, + MODEL_TENSOR.FFN_DOWN, + MODEL_TENSOR.FFN_UP, + ], + MODEL_ARCH.MPT: [ + MODEL_TENSOR.TOKEN_EMBD, + MODEL_TENSOR.OUTPUT_NORM, + MODEL_TENSOR.OUTPUT, + MODEL_TENSOR.ATTN_NORM, + MODEL_TENSOR.ATTN_QKV, + MODEL_TENSOR.ATTN_OUT, + MODEL_TENSOR.FFN_NORM, + MODEL_TENSOR.FFN_DOWN, + MODEL_TENSOR.FFN_UP, + ], + MODEL_ARCH.GPTJ: [ + MODEL_TENSOR.TOKEN_EMBD, + MODEL_TENSOR.OUTPUT_NORM, + MODEL_TENSOR.OUTPUT, + MODEL_TENSOR.ATTN_NORM, + MODEL_TENSOR.ATTN_Q, + MODEL_TENSOR.ATTN_K, + MODEL_TENSOR.ATTN_V, + MODEL_TENSOR.ATTN_OUT, + MODEL_TENSOR.FFN_DOWN, + MODEL_TENSOR.FFN_UP, + ], + MODEL_ARCH.PERSIMMON: [ + MODEL_TENSOR.TOKEN_EMBD, + MODEL_TENSOR.OUTPUT, + MODEL_TENSOR.OUTPUT_NORM, + MODEL_TENSOR.ATTN_NORM, + MODEL_TENSOR.ATTN_QKV, + MODEL_TENSOR.ATTN_OUT, + MODEL_TENSOR.FFN_NORM, + MODEL_TENSOR.FFN_DOWN, + MODEL_TENSOR.FFN_UP, + MODEL_TENSOR.ATTN_Q_NORM, + MODEL_TENSOR.ATTN_K_NORM, + MODEL_TENSOR.ATTN_ROT_EMBD, + ], + MODEL_ARCH.REFACT: [ + MODEL_TENSOR.TOKEN_EMBD, + MODEL_TENSOR.OUTPUT_NORM, + MODEL_TENSOR.OUTPUT, + MODEL_TENSOR.ATTN_NORM, + MODEL_TENSOR.ATTN_Q, + MODEL_TENSOR.ATTN_K, + MODEL_TENSOR.ATTN_V, + MODEL_TENSOR.ATTN_OUT, + MODEL_TENSOR.FFN_NORM, + MODEL_TENSOR.FFN_GATE, + MODEL_TENSOR.FFN_DOWN, + MODEL_TENSOR.FFN_UP, + ], + MODEL_ARCH.BLOOM: [ + MODEL_TENSOR.TOKEN_EMBD, + MODEL_TENSOR.TOKEN_EMBD_NORM, + MODEL_TENSOR.OUTPUT_NORM, + MODEL_TENSOR.OUTPUT, + MODEL_TENSOR.ATTN_NORM, + MODEL_TENSOR.ATTN_QKV, + MODEL_TENSOR.ATTN_OUT, + MODEL_TENSOR.FFN_NORM, + MODEL_TENSOR.FFN_DOWN, + MODEL_TENSOR.FFN_UP, + ], + MODEL_ARCH.GPT2: [ + # TODO + ], + # TODO +} + +# tensors that will not be serialized +MODEL_TENSOR_SKIP: dict[MODEL_ARCH, list[MODEL_TENSOR]] = { + MODEL_ARCH.LLAMA: [ + MODEL_TENSOR.ROPE_FREQS, + MODEL_TENSOR.ATTN_ROT_EMBD, + ], + MODEL_ARCH.BAICHUAN: [ + MODEL_TENSOR.ROPE_FREQS, + MODEL_TENSOR.ATTN_ROT_EMBD, + ], + MODEL_ARCH.PERSIMMON: [ + MODEL_TENSOR.ROPE_FREQS, + ], +} + +# +# types +# + + +class TokenType(IntEnum): + NORMAL = 1 + UNKNOWN = 2 + CONTROL = 3 + USER_DEFINED = 4 + UNUSED = 5 + BYTE = 6 + + +class RopeScalingType(Enum): + NONE = 'none' + LINEAR = 'linear' + YARN = 'yarn' + + +class GGMLQuantizationType(IntEnum): + F32 = 0 + F16 = 1 + Q4_0 = 2 + Q4_1 = 3 + Q5_0 = 6 + Q5_1 = 7 + Q8_0 = 8 + Q8_1 = 9 + Q2_K = 10 + Q3_K = 11 + Q4_K = 12 + Q5_K = 13 + Q6_K = 14 + Q8_K = 15 + + +class GGUFEndian(IntEnum): + LITTLE = 0 + BIG = 1 + + +class GGUFValueType(IntEnum): + UINT8 = 0 + INT8 = 1 + UINT16 = 2 + INT16 = 3 + UINT32 = 4 + INT32 = 5 + FLOAT32 = 6 + BOOL = 7 + STRING = 8 + ARRAY = 9 + UINT64 = 10 + INT64 = 11 + FLOAT64 = 12 + + @staticmethod + def get_type(val: Any) -> GGUFValueType: + if isinstance(val, (str, bytes, bytearray)): + return GGUFValueType.STRING + elif isinstance(val, list): + return GGUFValueType.ARRAY + elif isinstance(val, float): + return GGUFValueType.FLOAT32 + elif isinstance(val, bool): + return GGUFValueType.BOOL + elif isinstance(val, int): + return GGUFValueType.INT32 + # TODO: need help with 64-bit types in Python + else: + print("Unknown type:", type(val)) + sys.exit() + + +# Note: Does not support GGML_QKK_64 +QK_K = 256 +# Items here are (block size, type size) +GGML_QUANT_SIZES = { + GGMLQuantizationType.F32: (1, 4), + GGMLQuantizationType.F16: (1, 2), + GGMLQuantizationType.Q4_0: (32, 2 + 16), + GGMLQuantizationType.Q4_1: (32, 2 + 2 + 16), + GGMLQuantizationType.Q5_0: (32, 2 + 4 + 16), + GGMLQuantizationType.Q5_1: (32, 2 + 2 + 4 + 16), + GGMLQuantizationType.Q8_0: (32, 2 + 32), + GGMLQuantizationType.Q8_1: (32, 4 + 4 + 32), + GGMLQuantizationType.Q2_K: (256, 2 + 2 + QK_K // 16 + QK_K // 4), + GGMLQuantizationType.Q3_K: (256, 2 + QK_K // 4 + QK_K // 8 + 12), + GGMLQuantizationType.Q4_K: (256, 2 + 2 + QK_K // 2 + 12), + GGMLQuantizationType.Q5_K: (256, 2 + 2 + QK_K // 2 + QK_K // 8 + 12), + GGMLQuantizationType.Q6_K: (256, 2 + QK_K // 2 + QK_K // 4 + QK_K // 16), + GGMLQuantizationType.Q8_K: (256, 4 + QK_K + QK_K // 8), +} + + +# Aliases for backward compatibility. + +# general +KEY_GENERAL_ARCHITECTURE = Keys.General.ARCHITECTURE +KEY_GENERAL_QUANTIZATION_VERSION = Keys.General.QUANTIZATION_VERSION +KEY_GENERAL_ALIGNMENT = Keys.General.ALIGNMENT +KEY_GENERAL_NAME = Keys.General.NAME +KEY_GENERAL_AUTHOR = Keys.General.AUTHOR +KEY_GENERAL_URL = Keys.General.URL +KEY_GENERAL_DESCRIPTION = Keys.General.DESCRIPTION +KEY_GENERAL_LICENSE = Keys.General.LICENSE +KEY_GENERAL_SOURCE_URL = Keys.General.SOURCE_URL +KEY_GENERAL_SOURCE_HF_REPO = Keys.General.SOURCE_HF_REPO +KEY_GENERAL_FILE_TYPE = Keys.General.FILE_TYPE + +# LLM +KEY_CONTEXT_LENGTH = Keys.LLM.CONTEXT_LENGTH +KEY_EMBEDDING_LENGTH = Keys.LLM.EMBEDDING_LENGTH +KEY_BLOCK_COUNT = Keys.LLM.BLOCK_COUNT +KEY_FEED_FORWARD_LENGTH = Keys.LLM.FEED_FORWARD_LENGTH +KEY_USE_PARALLEL_RESIDUAL = Keys.LLM.USE_PARALLEL_RESIDUAL +KEY_TENSOR_DATA_LAYOUT = Keys.LLM.TENSOR_DATA_LAYOUT + +# attention +KEY_ATTENTION_HEAD_COUNT = Keys.Attention.HEAD_COUNT +KEY_ATTENTION_HEAD_COUNT_KV = Keys.Attention.HEAD_COUNT_KV +KEY_ATTENTION_MAX_ALIBI_BIAS = Keys.Attention.MAX_ALIBI_BIAS +KEY_ATTENTION_CLAMP_KQV = Keys.Attention.CLAMP_KQV +KEY_ATTENTION_LAYERNORM_EPS = Keys.Attention.LAYERNORM_EPS +KEY_ATTENTION_LAYERNORM_RMS_EPS = Keys.Attention.LAYERNORM_RMS_EPS + +# RoPE +KEY_ROPE_DIMENSION_COUNT = Keys.Rope.DIMENSION_COUNT +KEY_ROPE_FREQ_BASE = Keys.Rope.FREQ_BASE +KEY_ROPE_SCALING_TYPE = Keys.Rope.SCALING_TYPE +KEY_ROPE_SCALING_FACTOR = Keys.Rope.SCALING_FACTOR +KEY_ROPE_SCALING_ORIG_CTX_LEN = Keys.Rope.SCALING_ORIG_CTX_LEN +KEY_ROPE_SCALING_FINETUNED = Keys.Rope.SCALING_FINETUNED + +# tokenization +KEY_TOKENIZER_MODEL = Keys.Tokenizer.MODEL +KEY_TOKENIZER_LIST = Keys.Tokenizer.LIST +KEY_TOKENIZER_TOKEN_TYPE = Keys.Tokenizer.TOKEN_TYPE +KEY_TOKENIZER_SCORES = Keys.Tokenizer.SCORES +KEY_TOKENIZER_MERGES = Keys.Tokenizer.MERGES +KEY_TOKENIZER_BOS_ID = Keys.Tokenizer.BOS_ID +KEY_TOKENIZER_EOS_ID = Keys.Tokenizer.EOS_ID +KEY_TOKENIZER_UNK_ID = Keys.Tokenizer.UNK_ID +KEY_TOKENIZER_SEP_ID = Keys.Tokenizer.SEP_ID +KEY_TOKENIZER_PAD_ID = Keys.Tokenizer.PAD_ID +KEY_TOKENIZER_HF_JSON = Keys.Tokenizer.HF_JSON +KEY_TOKENIZER_RWKV = Keys.Tokenizer.RWKV diff --git a/gguf-py/gguf/gguf.py b/gguf-py/gguf/gguf.py index 7e495cb19638d1..651a81eb828248 100644 --- a/gguf-py/gguf/gguf.py +++ b/gguf-py/gguf/gguf.py @@ -1,1146 +1,15 @@ -#!/usr/bin/env python3 -from __future__ import annotations +# This file left for compatibility. If you want to use the GGUF API from Python +# then don't import gguf/gguf.py directly. If you're looking for examples, see the +# examples/ directory for gguf-py -import json -import os -import shutil -import struct +import importlib import sys -import tempfile -from enum import Enum, IntEnum, auto -from io import BufferedWriter from pathlib import Path -from typing import IO, Any, BinaryIO, Callable, Sequence -import numpy as np +sys.path.insert(0, str(Path(__file__).parent.parent)) -# -# constants -# +# Compatibility for people trying to import gguf/gguf.py directly instead of as a package. +importlib.invalidate_caches() +import gguf # noqa: E402 -GGUF_MAGIC = 0x46554747 -GGUF_VERSION = 3 -GGUF_DEFAULT_ALIGNMENT = 32 - - -# general -KEY_GENERAL_ARCHITECTURE = "general.architecture" -KEY_GENERAL_QUANTIZATION_VERSION = "general.quantization_version" -KEY_GENERAL_ALIGNMENT = "general.alignment" -KEY_GENERAL_NAME = "general.name" -KEY_GENERAL_AUTHOR = "general.author" -KEY_GENERAL_URL = "general.url" -KEY_GENERAL_DESCRIPTION = "general.description" -KEY_GENERAL_LICENSE = "general.license" -KEY_GENERAL_SOURCE_URL = "general.source.url" -KEY_GENERAL_SOURCE_HF_REPO = "general.source.huggingface.repository" -KEY_GENERAL_FILE_TYPE = "general.file_type" - -# LLM -KEY_CONTEXT_LENGTH = "{arch}.context_length" -KEY_EMBEDDING_LENGTH = "{arch}.embedding_length" -KEY_BLOCK_COUNT = "{arch}.block_count" -KEY_FEED_FORWARD_LENGTH = "{arch}.feed_forward_length" -KEY_USE_PARALLEL_RESIDUAL = "{arch}.use_parallel_residual" -KEY_TENSOR_DATA_LAYOUT = "{arch}.tensor_data_layout" - -# attention -KEY_ATTENTION_HEAD_COUNT = "{arch}.attention.head_count" -KEY_ATTENTION_HEAD_COUNT_KV = "{arch}.attention.head_count_kv" -KEY_ATTENTION_MAX_ALIBI_BIAS = "{arch}.attention.max_alibi_bias" -KEY_ATTENTION_CLAMP_KQV = "{arch}.attention.clamp_kqv" -KEY_ATTENTION_LAYERNORM_EPS = "{arch}.attention.layer_norm_epsilon" -KEY_ATTENTION_LAYERNORM_RMS_EPS = "{arch}.attention.layer_norm_rms_epsilon" - -# RoPE -KEY_ROPE_DIMENSION_COUNT = "{arch}.rope.dimension_count" -KEY_ROPE_FREQ_BASE = "{arch}.rope.freq_base" -KEY_ROPE_SCALING_TYPE = "{arch}.rope.scaling.type" -KEY_ROPE_SCALING_FACTOR = "{arch}.rope.scaling.factor" -KEY_ROPE_SCALING_ORIG_CTX_LEN = "{arch}.rope.scaling.original_context_length" -KEY_ROPE_SCALING_FINETUNED = "{arch}.rope.scaling.finetuned" - -# tokenization -KEY_TOKENIZER_MODEL = "tokenizer.ggml.model" -KEY_TOKENIZER_LIST = "tokenizer.ggml.tokens" -KEY_TOKENIZER_TOKEN_TYPE = "tokenizer.ggml.token_type" -KEY_TOKENIZER_SCORES = "tokenizer.ggml.scores" -KEY_TOKENIZER_MERGES = "tokenizer.ggml.merges" -KEY_TOKENIZER_BOS_ID = "tokenizer.ggml.bos_token_id" -KEY_TOKENIZER_EOS_ID = "tokenizer.ggml.eos_token_id" -KEY_TOKENIZER_UNK_ID = "tokenizer.ggml.unknown_token_id" -KEY_TOKENIZER_SEP_ID = "tokenizer.ggml.seperator_token_id" -KEY_TOKENIZER_PAD_ID = "tokenizer.ggml.padding_token_id" -KEY_TOKENIZER_HF_JSON = "tokenizer.huggingface.json" -KEY_TOKENIZER_RWKV = "tokenizer.rwkv.world" - - -# -# recommended mapping of model tensor names for storage in gguf -# - - -class MODEL_ARCH(IntEnum): - LLAMA : int = auto() - FALCON : int = auto() - BAICHUAN : int = auto() - GPT2 : int = auto() - GPTJ : int = auto() - GPTNEOX : int = auto() - MPT : int = auto() - STARCODER : int = auto() - PERSIMMON : int = auto() - REFACT : int = auto() - BERT : int = auto() - BLOOM : int = auto() - - -class MODEL_TENSOR(IntEnum): - TOKEN_EMBD : int = auto() - TOKEN_EMBD_NORM : int = auto() - TOKEN_TYPES : int = auto() - POS_EMBD : int = auto() - OUTPUT : int = auto() - OUTPUT_NORM : int = auto() - ROPE_FREQS : int = auto() - ATTN_Q : int = auto() - ATTN_K : int = auto() - ATTN_V : int = auto() - ATTN_QKV : int = auto() - ATTN_OUT : int = auto() - ATTN_NORM : int = auto() - ATTN_NORM_2 : int = auto() - ATTN_ROT_EMBD : int = auto() - FFN_GATE : int = auto() - FFN_DOWN : int = auto() - FFN_UP : int = auto() - FFN_NORM : int = auto() - ATTN_Q_NORM : int = auto() - ATTN_K_NORM : int = auto() - - -MODEL_ARCH_NAMES: dict[MODEL_ARCH, str] = { - MODEL_ARCH.LLAMA: "llama", - MODEL_ARCH.FALCON: "falcon", - MODEL_ARCH.BAICHUAN: "baichuan", - MODEL_ARCH.GPT2: "gpt2", - MODEL_ARCH.GPTJ: "gptj", - MODEL_ARCH.GPTNEOX: "gptneox", - MODEL_ARCH.MPT: "mpt", - MODEL_ARCH.STARCODER: "starcoder", - MODEL_ARCH.PERSIMMON: "persimmon", - MODEL_ARCH.REFACT: "refact", - MODEL_ARCH.BERT: "bert", - MODEL_ARCH.BLOOM: "bloom", -} - -TENSOR_NAMES: dict[MODEL_TENSOR, str] = { - MODEL_TENSOR.TOKEN_EMBD: "token_embd", - MODEL_TENSOR.TOKEN_EMBD_NORM: "token_embd_norm", - MODEL_TENSOR.TOKEN_TYPES: "token_types", - MODEL_TENSOR.POS_EMBD: "position_embd", - MODEL_TENSOR.OUTPUT_NORM: "output_norm", - MODEL_TENSOR.OUTPUT: "output", - MODEL_TENSOR.ROPE_FREQS: "rope_freqs", - MODEL_TENSOR.ATTN_NORM: "blk.{bid}.attn_norm", - MODEL_TENSOR.ATTN_NORM_2: "blk.{bid}.attn_norm_2", - MODEL_TENSOR.ATTN_QKV: "blk.{bid}.attn_qkv", - MODEL_TENSOR.ATTN_Q: "blk.{bid}.attn_q", - MODEL_TENSOR.ATTN_K: "blk.{bid}.attn_k", - MODEL_TENSOR.ATTN_V: "blk.{bid}.attn_v", - MODEL_TENSOR.ATTN_OUT: "blk.{bid}.attn_output", - MODEL_TENSOR.ATTN_ROT_EMBD: "blk.{bid}.attn_rot_embd", - MODEL_TENSOR.ATTN_Q_NORM: "blk.{bid}.attn_q_norm", - MODEL_TENSOR.ATTN_K_NORM: "blk.{bid}.attn_k_norm", - MODEL_TENSOR.FFN_NORM: "blk.{bid}.ffn_norm", - MODEL_TENSOR.FFN_GATE: "blk.{bid}.ffn_gate", - MODEL_TENSOR.FFN_DOWN: "blk.{bid}.ffn_down", - MODEL_TENSOR.FFN_UP: "blk.{bid}.ffn_up", -} - -MODEL_TENSORS: dict[MODEL_ARCH, list[MODEL_TENSOR]] = { - MODEL_ARCH.LLAMA: [ - MODEL_TENSOR.TOKEN_EMBD, - MODEL_TENSOR.OUTPUT_NORM, - MODEL_TENSOR.OUTPUT, - MODEL_TENSOR.ROPE_FREQS, - MODEL_TENSOR.ATTN_NORM, - MODEL_TENSOR.ATTN_Q, - MODEL_TENSOR.ATTN_K, - MODEL_TENSOR.ATTN_V, - MODEL_TENSOR.ATTN_OUT, - MODEL_TENSOR.ATTN_ROT_EMBD, - MODEL_TENSOR.FFN_NORM, - MODEL_TENSOR.FFN_GATE, - MODEL_TENSOR.FFN_DOWN, - MODEL_TENSOR.FFN_UP, - ], - MODEL_ARCH.GPTNEOX: [ - MODEL_TENSOR.TOKEN_EMBD, - MODEL_TENSOR.OUTPUT_NORM, - MODEL_TENSOR.OUTPUT, - MODEL_TENSOR.ATTN_NORM, - MODEL_TENSOR.ATTN_QKV, - MODEL_TENSOR.ATTN_OUT, - MODEL_TENSOR.FFN_NORM, - MODEL_TENSOR.FFN_DOWN, - MODEL_TENSOR.FFN_UP, - ], - MODEL_ARCH.FALCON: [ - MODEL_TENSOR.TOKEN_EMBD, - MODEL_TENSOR.OUTPUT_NORM, - MODEL_TENSOR.OUTPUT, - MODEL_TENSOR.ATTN_NORM, - MODEL_TENSOR.ATTN_NORM_2, - MODEL_TENSOR.ATTN_QKV, - MODEL_TENSOR.ATTN_OUT, - MODEL_TENSOR.FFN_DOWN, - MODEL_TENSOR.FFN_UP, - ], - MODEL_ARCH.BAICHUAN: [ - MODEL_TENSOR.TOKEN_EMBD, - MODEL_TENSOR.OUTPUT_NORM, - MODEL_TENSOR.OUTPUT, - MODEL_TENSOR.ROPE_FREQS, - MODEL_TENSOR.ATTN_NORM, - MODEL_TENSOR.ATTN_Q, - MODEL_TENSOR.ATTN_K, - MODEL_TENSOR.ATTN_V, - MODEL_TENSOR.ATTN_OUT, - MODEL_TENSOR.ATTN_ROT_EMBD, - MODEL_TENSOR.FFN_NORM, - MODEL_TENSOR.FFN_GATE, - MODEL_TENSOR.FFN_DOWN, - MODEL_TENSOR.FFN_UP, - ], - MODEL_ARCH.STARCODER: [ - MODEL_TENSOR.TOKEN_EMBD, - MODEL_TENSOR.POS_EMBD, - MODEL_TENSOR.OUTPUT_NORM, - MODEL_TENSOR.OUTPUT, - MODEL_TENSOR.ATTN_NORM, - MODEL_TENSOR.ATTN_QKV, - MODEL_TENSOR.ATTN_OUT, - MODEL_TENSOR.FFN_NORM, - MODEL_TENSOR.FFN_DOWN, - MODEL_TENSOR.FFN_UP, - ], - MODEL_ARCH.BERT: [ - MODEL_TENSOR.TOKEN_EMBD, - MODEL_TENSOR.TOKEN_TYPES, - MODEL_TENSOR.POS_EMBD, - MODEL_TENSOR.OUTPUT_NORM, - MODEL_TENSOR.ATTN_NORM, - MODEL_TENSOR.ATTN_Q, - MODEL_TENSOR.ATTN_K, - MODEL_TENSOR.ATTN_V, - MODEL_TENSOR.ATTN_OUT, - MODEL_TENSOR.FFN_NORM, - MODEL_TENSOR.FFN_DOWN, - MODEL_TENSOR.FFN_UP, - ], - MODEL_ARCH.MPT: [ - MODEL_TENSOR.TOKEN_EMBD, - MODEL_TENSOR.OUTPUT_NORM, - MODEL_TENSOR.OUTPUT, - MODEL_TENSOR.ATTN_NORM, - MODEL_TENSOR.ATTN_QKV, - MODEL_TENSOR.ATTN_OUT, - MODEL_TENSOR.FFN_NORM, - MODEL_TENSOR.FFN_DOWN, - MODEL_TENSOR.FFN_UP, - ], - MODEL_ARCH.GPTJ: [ - MODEL_TENSOR.TOKEN_EMBD, - MODEL_TENSOR.OUTPUT_NORM, - MODEL_TENSOR.OUTPUT, - MODEL_TENSOR.ATTN_NORM, - MODEL_TENSOR.ATTN_Q, - MODEL_TENSOR.ATTN_K, - MODEL_TENSOR.ATTN_V, - MODEL_TENSOR.ATTN_OUT, - MODEL_TENSOR.FFN_DOWN, - MODEL_TENSOR.FFN_UP, - ], - MODEL_ARCH.PERSIMMON: [ - MODEL_TENSOR.TOKEN_EMBD, - MODEL_TENSOR.OUTPUT, - MODEL_TENSOR.OUTPUT_NORM, - MODEL_TENSOR.ATTN_NORM, - MODEL_TENSOR.ATTN_QKV, - MODEL_TENSOR.ATTN_OUT, - MODEL_TENSOR.FFN_NORM, - MODEL_TENSOR.FFN_DOWN, - MODEL_TENSOR.FFN_UP, - MODEL_TENSOR.ATTN_Q_NORM, - MODEL_TENSOR.ATTN_K_NORM, - MODEL_TENSOR.ATTN_ROT_EMBD, - ], - MODEL_ARCH.REFACT: [ - MODEL_TENSOR.TOKEN_EMBD, - MODEL_TENSOR.OUTPUT_NORM, - MODEL_TENSOR.OUTPUT, - MODEL_TENSOR.ATTN_NORM, - MODEL_TENSOR.ATTN_Q, - MODEL_TENSOR.ATTN_K, - MODEL_TENSOR.ATTN_V, - MODEL_TENSOR.ATTN_OUT, - MODEL_TENSOR.FFN_NORM, - MODEL_TENSOR.FFN_GATE, - MODEL_TENSOR.FFN_DOWN, - MODEL_TENSOR.FFN_UP, - ], - MODEL_ARCH.BLOOM: [ - MODEL_TENSOR.TOKEN_EMBD, - MODEL_TENSOR.TOKEN_EMBD_NORM, - MODEL_TENSOR.OUTPUT_NORM, - MODEL_TENSOR.OUTPUT, - MODEL_TENSOR.ATTN_NORM, - MODEL_TENSOR.ATTN_QKV, - MODEL_TENSOR.ATTN_OUT, - MODEL_TENSOR.FFN_NORM, - MODEL_TENSOR.FFN_DOWN, - MODEL_TENSOR.FFN_UP, - ], - MODEL_ARCH.GPT2: [ - # TODO - ], - # TODO -} - -# tensors that will not be serialized -MODEL_TENSOR_SKIP: dict[MODEL_ARCH, list[MODEL_TENSOR]] = { - MODEL_ARCH.LLAMA: [ - MODEL_TENSOR.ROPE_FREQS, - MODEL_TENSOR.ATTN_ROT_EMBD, - ], - MODEL_ARCH.BAICHUAN: [ - MODEL_TENSOR.ROPE_FREQS, - MODEL_TENSOR.ATTN_ROT_EMBD, - ], - MODEL_ARCH.PERSIMMON: [ - MODEL_TENSOR.ROPE_FREQS, - ] -} - - -class TensorNameMap: - mappings_cfg: dict[MODEL_TENSOR, tuple[str, ...]] = { - # Token embeddings - MODEL_TENSOR.TOKEN_EMBD: ( - "gpt_neox.embed_in", # gptneox - "transformer.wte", # gpt2 gpt-j mpt refact - "transformer.word_embeddings", # falcon - "word_embeddings", # bloom - "model.embed_tokens", # llama-hf - "tok_embeddings", # llama-pth - "embeddings.word_embeddings", # bert - "language_model.embedding.word_embeddings", # persimmon - ), - - # Token type embeddings - MODEL_TENSOR.TOKEN_TYPES: ( - "embeddings.token_type_embeddings", # bert - ), - - # Normalization of token embeddings - MODEL_TENSOR.TOKEN_EMBD_NORM: ( - "word_embeddings_layernorm", # bloom - ), - - # Position embeddings - MODEL_TENSOR.POS_EMBD: ( - "transformer.wpe", # gpt2 - "embeddings.position_embeddings", # bert - ), - - # Output - MODEL_TENSOR.OUTPUT: ( - "embed_out", # gptneox - "lm_head", # gpt2 mpt falcon llama-hf baichuan - "output", # llama-pth bloom - "word_embeddings_for_head", # persimmon - ), - - # Output norm - MODEL_TENSOR.OUTPUT_NORM: ( - "gpt_neox.final_layer_norm", # gptneox - "transformer.ln_f", # gpt2 gpt-j falcon - "model.norm", # llama-hf baichuan - "norm", # llama-pth - "embeddings.LayerNorm", # bert - "transformer.norm_f", # mpt - "ln_f", # refact bloom - "language_model.encoder.final_layernorm", # persimmon - ), - - # Rope frequencies - MODEL_TENSOR.ROPE_FREQS: ( - "rope.freqs", # llama-pth - ), - } - - block_mappings_cfg: dict[MODEL_TENSOR, tuple[str, ...]] = { - # Attention norm - MODEL_TENSOR.ATTN_NORM: ( - "gpt_neox.layers.{bid}.input_layernorm", # gptneox - "transformer.h.{bid}.ln_1", # gpt2 gpt-j refact - "transformer.blocks.{bid}.norm_1", # mpt - "transformer.h.{bid}.input_layernorm", # falcon7b - "h.{bid}.input_layernorm", # bloom - "transformer.h.{bid}.ln_mlp", # falcon40b - "model.layers.{bid}.input_layernorm", # llama-hf - "layers.{bid}.attention_norm", # llama-pth - "encoder.layer.{bid}.attention.output.LayerNorm", # bert - "language_model.encoder.layers.{bid}.input_layernorm", # persimmon - "model.layers.{bid}.ln1", # yi - ), - - # Attention norm 2 - MODEL_TENSOR.ATTN_NORM_2: ( - "transformer.h.{bid}.ln_attn", # falcon40b - ), - - # Attention query-key-value - MODEL_TENSOR.ATTN_QKV: ( - "gpt_neox.layers.{bid}.attention.query_key_value", # gptneox - "transformer.h.{bid}.attn.c_attn", # gpt2 - "transformer.blocks.{bid}.attn.Wqkv", # mpt - "transformer.h.{bid}.self_attention.query_key_value", # falcon - "h.{bid}.self_attention.query_key_value", # bloom - "language_model.encoder.layers.{bid}.self_attention.query_key_value", # persimmon - ), - - # Attention query - MODEL_TENSOR.ATTN_Q: ( - "model.layers.{bid}.self_attn.q_proj", # llama-hf - "layers.{bid}.attention.wq", # llama-pth - "encoder.layer.{bid}.attention.self.query", # bert - "transformer.h.{bid}.attn.q_proj", # gpt-j - ), - - # Attention key - MODEL_TENSOR.ATTN_K: ( - "model.layers.{bid}.self_attn.k_proj", # llama-hf - "layers.{bid}.attention.wk", # llama-pth - "encoder.layer.{bid}.attention.self.key", # bert - "transformer.h.{bid}.attn.k_proj", # gpt-j - ), - - # Attention value - MODEL_TENSOR.ATTN_V: ( - "model.layers.{bid}.self_attn.v_proj", # llama-hf - "layers.{bid}.attention.wv", # llama-pth - "encoder.layer.{bid}.attention.self.value", # bert - "transformer.h.{bid}.attn.v_proj", # gpt-j - ), - - # Attention output - MODEL_TENSOR.ATTN_OUT: ( - "gpt_neox.layers.{bid}.attention.dense", # gptneox - "transformer.h.{bid}.attn.c_proj", # gpt2 refact - "transformer.blocks.{bid}.attn.out_proj", # mpt - "transformer.h.{bid}.self_attention.dense", # falcon - "h.{bid}.self_attention.dense", # bloom - "model.layers.{bid}.self_attn.o_proj", # llama-hf - "layers.{bid}.attention.wo", # llama-pth - "encoder.layer.{bid}.attention.output.dense", # bert - "transformer.h.{bid}.attn.out_proj", # gpt-j - "language_model.encoder.layers.{bid}.self_attention.dense" # persimmon - ), - - # Rotary embeddings - MODEL_TENSOR.ATTN_ROT_EMBD: ( - "model.layers.{bid}.self_attn.rotary_emb.inv_freq", # llama-hf - "layers.{bid}.attention.inner_attention.rope.freqs", # llama-pth - ), - - # Feed-forward norm - MODEL_TENSOR.FFN_NORM: ( - "gpt_neox.layers.{bid}.post_attention_layernorm", # gptneox - "transformer.h.{bid}.ln_2", # gpt2 refact - "h.{bid}.post_attention_layernorm", # bloom - "transformer.blocks.{bid}.norm_2", # mpt - "model.layers.{bid}.post_attention_layernorm", # llama-hf - "layers.{bid}.ffn_norm", # llama-pth - "encoder.layer.{bid}.output.LayerNorm", # bert - "language_model.encoder.layers.{bid}.post_attention_layernorm", # persimmon - "model.layers.{bid}.ln2", # yi - ), - - # Feed-forward up - MODEL_TENSOR.FFN_UP: ( - "gpt_neox.layers.{bid}.mlp.dense_h_to_4h", # gptneox - "transformer.h.{bid}.mlp.c_fc", # gpt2 - "transformer.blocks.{bid}.ffn.up_proj", # mpt - "transformer.h.{bid}.mlp.dense_h_to_4h", # falcon - "h.{bid}.mlp.dense_h_to_4h", # bloom - "model.layers.{bid}.mlp.up_proj", # llama-hf refact - "layers.{bid}.feed_forward.w3", # llama-pth - "encoder.layer.{bid}.intermediate.dense", # bert - "transformer.h.{bid}.mlp.fc_in", # gpt-j - "language_model.encoder.layers.{bid}.mlp.dense_h_to_4h", # persimmon - ), - - # Feed-forward gate - MODEL_TENSOR.FFN_GATE: ( - "model.layers.{bid}.mlp.gate_proj", # llama-hf refact - "layers.{bid}.feed_forward.w1", # llama-pth - ), - - # Feed-forward down - MODEL_TENSOR.FFN_DOWN: ( - "gpt_neox.layers.{bid}.mlp.dense_4h_to_h", # gptneox - "transformer.h.{bid}.mlp.c_proj", # gpt2 refact - "transformer.blocks.{bid}.ffn.down_proj", # mpt - "transformer.h.{bid}.mlp.dense_4h_to_h", # falcon - "h.{bid}.mlp.dense_4h_to_h", # bloom - "model.layers.{bid}.mlp.down_proj", # llama-hf - "layers.{bid}.feed_forward.w2", # llama-pth - "encoder.layer.{bid}.output.dense", # bert - "transformer.h.{bid}.mlp.fc_out", # gpt-j - "language_model.encoder.layers.{bid}.mlp.dense_4h_to_h", # persimmon - ), - - MODEL_TENSOR.ATTN_Q_NORM: ( - "language_model.encoder.layers.{bid}.self_attention.q_layernorm", - ), - - MODEL_TENSOR.ATTN_K_NORM: ( - "language_model.encoder.layers.{bid}.self_attention.k_layernorm", - ), - - MODEL_TENSOR.ROPE_FREQS: ( - "language_model.encoder.layers.{bid}.self_attention.rotary_emb.inv_freq", # persimmon - ) - } - - mapping: dict[str, tuple[MODEL_TENSOR, str]] - - def __init__(self, arch: MODEL_ARCH, n_blocks: int): - self.mapping = {} - for tensor, keys in self.mappings_cfg.items(): - if tensor not in MODEL_TENSORS[arch]: - continue - tensor_name = TENSOR_NAMES[tensor] - self.mapping[tensor_name] = (tensor, tensor_name) - for key in keys: - self.mapping[key] = (tensor, tensor_name) - for bid in range(n_blocks): - for tensor, keys in self.block_mappings_cfg.items(): - if tensor not in MODEL_TENSORS[arch]: - continue - tensor_name = TENSOR_NAMES[tensor].format(bid = bid) - self.mapping[tensor_name] = (tensor, tensor_name) - for key in keys: - key = key.format(bid = bid) - self.mapping[key] = (tensor, tensor_name) - - def get_type_and_name(self, key: str, try_suffixes: Sequence[str] = ()) -> tuple[MODEL_TENSOR, str] | None: - result = self.mapping.get(key) - if result is not None: - return result - for suffix in try_suffixes: - if key.endswith(suffix): - result = self.mapping.get(key[:-len(suffix)]) - if result is not None: - return (result[0], result[1] + suffix) - return None - - def get_name(self, key: str, try_suffixes: Sequence[str] = ()) -> str | None: - result = self.get_type_and_name(key, try_suffixes = try_suffixes) - if result is None: - return None - return result[1] - - def get_type(self, key: str, try_suffixes: Sequence[str] = ()) -> MODEL_TENSOR | None: - result = self.get_type_and_name(key, try_suffixes = try_suffixes) - if result is None: - return None - return result[0] - - def __getitem__(self, key: str) -> str: - try: - return self.mapping[key][1] - except KeyError: - raise KeyError(key) - - def __contains__(self, key: str) -> bool: - return key in self.mapping - - def __repr__(self) -> str: - return repr(self.mapping) - -def get_tensor_name_map(arch: MODEL_ARCH, n_blocks: int) -> TensorNameMap: - return TensorNameMap(arch, n_blocks) - -class TokenType(IntEnum): - NORMAL = 1 - UNKNOWN = 2 - CONTROL = 3 - USER_DEFINED = 4 - UNUSED = 5 - BYTE = 6 - -class RopeScalingType(Enum): - NONE = 'none' - LINEAR = 'linear' - YARN = 'yarn' - -# -# implementation -# - - -class GGMLQuantizationType(IntEnum): - F32 = 0 - F16 = 1 - Q4_0 = 2 - Q4_1 = 3 - Q5_0 = 6 - Q5_1 = 7 - Q8_0 = 8 - Q8_1 = 9 - Q2_K = 10 - Q3_K = 11 - Q4_K = 12 - Q5_K = 13 - Q6_K = 14 - Q8_K = 15 - -class GGUFEndian(IntEnum): - LITTLE = 0 - BIG = 1 - - -class GGUFValueType(IntEnum): - UINT8 = 0 - INT8 = 1 - UINT16 = 2 - INT16 = 3 - UINT32 = 4 - INT32 = 5 - FLOAT32 = 6 - BOOL = 7 - STRING = 8 - ARRAY = 9 - UINT64 = 10 - INT64 = 11 - FLOAT64 = 12 - - @staticmethod - def get_type(val): - if isinstance(val, str) or isinstance(val, bytes) or isinstance(val, bytearray): - return GGUFValueType.STRING - elif isinstance(val, list): - return GGUFValueType.ARRAY - elif isinstance(val, float): - return GGUFValueType.FLOAT32 - elif isinstance(val, bool): - return GGUFValueType.BOOL - elif isinstance(val, int): - return GGUFValueType.INT32 - # TODO: need help with 64-bit types in Python - else: - print("Unknown type: "+str(type(val))) - sys.exit() - - -class WriterState(Enum): - EMPTY = auto() - HEADER = auto() - KV_DATA = auto() - TI_DATA = auto() - - -class GGUFWriter: - fout: BufferedWriter - temp_file: tempfile.SpooledTemporaryFile[bytes] | None - tensors: list[np.ndarray[Any, Any]] - - @property - def pack_prefix(self): - if self.endianess==GGUFEndian.LITTLE: - return "<" - else: - return ">" - - def __init__(self, path: os.PathLike[str] | str, arch: str, use_temp_file = True, endianess=GGUFEndian.LITTLE): - self.fout = open(path, "wb") - self.arch = arch - self.endianess = endianess - self._simple_value_packing = { - GGUFValueType.UINT8: f"{self.pack_prefix}B", - GGUFValueType.INT8: f"{self.pack_prefix}b", - GGUFValueType.UINT16: f"{self.pack_prefix}H", - GGUFValueType.INT16: f"{self.pack_prefix}h", - GGUFValueType.UINT32: f"{self.pack_prefix}I", - GGUFValueType.INT32: f"{self.pack_prefix}i", - GGUFValueType.FLOAT32: f"{self.pack_prefix}f", - GGUFValueType.UINT64: f"{self.pack_prefix}Q", - GGUFValueType.INT64: f"{self.pack_prefix}q", - GGUFValueType.FLOAT64: f"{self.pack_prefix}d", - GGUFValueType.BOOL: "?" , - } - self.offset_tensor = 0 - self.data_alignment = GGUF_DEFAULT_ALIGNMENT - self.kv_data = b"" - self.kv_data_count = 0 - self.ti_data = b"" - self.ti_data_count = 0 - self.use_temp_file = use_temp_file - self.temp_file = None - self.tensors = [] - endianess_str = "Big Endian" if self.endianess == GGUFEndian.BIG else "Little Endian" - print(f"This gguf file is for {endianess_str} only") - self.state = WriterState.EMPTY - - self.add_architecture() - - def write_header_to_file(self): - if self.state is not WriterState.EMPTY: - raise ValueError(f'Expected output file to be empty, got {self.state}') - - self.fout.write(struct.pack(" 0: - ltype = GGUFValueType.get_type(val[0]) - if not all(GGUFValueType.get_type(i) is ltype for i in val[1:]): - raise ValueError("All items in a GGUF array should be of the same type") - self.kv_data += struct.pack(f"{self.pack_prefix}I", ltype) - self.kv_data += struct.pack(f"{self.pack_prefix}Q", len(val)) - for item in val: - self.add_val(item, add_vtype=False) - else: - raise ValueError("Invalid GGUF metadata value type or value") - - @staticmethod - def ggml_pad(x: int, n: int) -> int: - return ((x + n - 1) // n) * n - - def add_tensor_info(self, name: str, tensor_shape: Sequence[int], tensor_dtype: np.dtype[np.float16] | np.dtype[np.float32], tensor_nbytes: int, raw_dtype: GGMLQuantizationType | None = None): - if self.state is not WriterState.EMPTY: - raise ValueError(f'Expected output file to be empty, got {self.state}') - - assert raw_dtype is not None or tensor_dtype in (np.float32, np.float16), "Only F32 and F16 tensors are supported for now" - - encoded_name = name.encode("utf8") - self.ti_data += struct.pack(f"{self.pack_prefix}Q", len(encoded_name)) - self.ti_data += encoded_name - n_dims = len(tensor_shape) - self.ti_data += struct.pack(f"{self.pack_prefix}I", n_dims) - for i in range(n_dims): - self.ti_data += struct.pack(f"{self.pack_prefix}Q", tensor_shape[n_dims - 1 - i]) - if raw_dtype is None: - dtype = GGMLQuantizationType.F32 if tensor_dtype == np.float32 else GGMLQuantizationType.F16 - else: - dtype = raw_dtype - self.ti_data += struct.pack(f"{self.pack_prefix}I", dtype) - self.ti_data += struct.pack(f"{self.pack_prefix}Q", self.offset_tensor) - self.offset_tensor += GGUFWriter.ggml_pad(tensor_nbytes, self.data_alignment) - self.ti_data_count += 1 - - def add_tensor(self, name: str, tensor: np.ndarray[Any, Any], raw_shape: Sequence[int] | None = None, raw_dtype: GGMLQuantizationType | None = None): - if self.endianess == GGUFEndian.BIG: - tensor.byteswap(inplace=True) - if self.use_temp_file and self.temp_file is None: - fp = tempfile.SpooledTemporaryFile(mode="w+b", max_size=256*1024*1024) - fp.seek(0) - self.temp_file = fp - - shape: Sequence[int] = raw_shape if raw_shape is not None else tensor.shape - self.add_tensor_info(name, shape, tensor.dtype, tensor.nbytes, raw_dtype = raw_dtype) - - if self.temp_file is None: - self.tensors.append(tensor) - return - - tensor.tofile(self.temp_file) - self.write_padding(self.temp_file, tensor.nbytes) - - def write_padding(self, fp: IO[bytes], n: int, align: int | None = None): - pad = GGUFWriter.ggml_pad(n, align if align is not None else self.data_alignment) - n - if pad != 0: - fp.write(bytes([0] * pad)) - - def write_tensor_data(self, tensor: np.ndarray[Any, Any]): - if self.state is not WriterState.TI_DATA: - raise ValueError(f'Expected output file to contain tensor info, got {self.state}') - - if self.endianess==GGUFEndian.BIG: - tensor.byteswap(inplace=True) - self.write_padding(self.fout, self.fout.tell()) - tensor.tofile(self.fout) - self.write_padding(self.fout, tensor.nbytes) - - def write_tensors_to_file(self): - self.write_ti_data_to_file() - - self.write_padding(self.fout, self.fout.tell()) - - if self.temp_file is None: - while True: - try: - tensor = self.tensors.pop(0) - except IndexError: - break - tensor.tofile(self.fout) - self.write_padding(self.fout, tensor.nbytes) - return - - self.temp_file.seek(0) - - shutil.copyfileobj(self.temp_file, self.fout) - self.flush() - self.temp_file.close() - - def flush(self): - self.fout.flush() - - def close(self): - self.fout.close() - - def add_architecture(self): - self.add_string(KEY_GENERAL_ARCHITECTURE, self.arch) - - def add_author(self, author: str): - self.add_string(KEY_GENERAL_AUTHOR, author) - - def add_tensor_data_layout(self, layout: str): - self.add_string(KEY_TENSOR_DATA_LAYOUT.format(arch=self.arch), layout) - - def add_url(self, url: str): - self.add_string(KEY_GENERAL_URL, url) - - def add_description(self, description: str): - self.add_string(KEY_GENERAL_DESCRIPTION, description) - - def add_source_url(self, url: str): - self.add_string(KEY_GENERAL_SOURCE_URL, url) - - def add_source_hf_repo(self, repo: str): - self.add_string(KEY_GENERAL_SOURCE_HF_REPO, repo) - - def add_file_type(self, ftype: int): - self.add_uint32(KEY_GENERAL_FILE_TYPE, ftype) - - def add_name(self, name: str): - self.add_string(KEY_GENERAL_NAME, name) - - def add_quantization_version(self, quantization_version: GGMLQuantizationType): - self.add_uint32( - KEY_GENERAL_QUANTIZATION_VERSION, quantization_version) - - def add_custom_alignment(self, alignment: int): - self.data_alignment = alignment - self.add_uint32(KEY_GENERAL_ALIGNMENT, alignment) - - def add_context_length(self, length: int): - self.add_uint32( - KEY_CONTEXT_LENGTH.format(arch=self.arch), length) - - def add_embedding_length(self, length: int): - self.add_uint32( - KEY_EMBEDDING_LENGTH.format(arch=self.arch), length) - - def add_block_count(self, length: int): - self.add_uint32( - KEY_BLOCK_COUNT.format(arch=self.arch), length) - - def add_feed_forward_length(self, length: int): - self.add_uint32( - KEY_FEED_FORWARD_LENGTH.format(arch=self.arch), length) - - def add_parallel_residual(self, use: bool): - self.add_bool( - KEY_USE_PARALLEL_RESIDUAL.format(arch=self.arch), use) - - def add_head_count(self, count: int): - self.add_uint32( - KEY_ATTENTION_HEAD_COUNT.format(arch=self.arch), count) - - def add_head_count_kv(self, count: int): - self.add_uint32( - KEY_ATTENTION_HEAD_COUNT_KV.format(arch=self.arch), count) - - def add_max_alibi_bias(self, bias: float): - self.add_float32( - KEY_ATTENTION_MAX_ALIBI_BIAS.format(arch=self.arch), bias) - - def add_clamp_kqv(self, value: float): - self.add_float32( - KEY_ATTENTION_CLAMP_KQV.format(arch=self.arch), value) - - def add_layer_norm_eps(self, value: float): - self.add_float32( - KEY_ATTENTION_LAYERNORM_EPS.format(arch=self.arch), value) - - def add_layer_norm_rms_eps(self, value: float): - self.add_float32( - KEY_ATTENTION_LAYERNORM_RMS_EPS.format(arch=self.arch), value) - - def add_rope_dimension_count(self, count: int): - self.add_uint32( - KEY_ROPE_DIMENSION_COUNT.format(arch=self.arch), count) - - def add_rope_freq_base(self, value: float): - self.add_float32(KEY_ROPE_FREQ_BASE.format(arch=self.arch), value) - - def add_rope_scaling_type(self, value: RopeScalingType): - self.add_string(KEY_ROPE_SCALING_TYPE.format(arch=self.arch), value.value) - - def add_rope_scaling_factor(self, value: float): - self.add_float32(KEY_ROPE_SCALING_FACTOR.format(arch=self.arch), value) - - def add_rope_scaling_orig_ctx_len(self, value: int): - self.add_uint32(KEY_ROPE_SCALING_ORIG_CTX_LEN.format(arch=self.arch), value) - - def add_rope_scaling_finetuned(self, value: bool): - self.add_bool(KEY_ROPE_SCALING_FINETUNED.format(arch=self.arch), value) - - def add_tokenizer_model(self, model: str): - self.add_string(KEY_TOKENIZER_MODEL, model) - - def add_token_list(self, tokens: Sequence[str] | Sequence[bytes] | Sequence[bytearray]): - self.add_array(KEY_TOKENIZER_LIST, tokens) - - def add_token_merges(self, merges: Sequence[str] | Sequence[bytes] | Sequence[bytearray]): - self.add_array(KEY_TOKENIZER_MERGES, merges) - - def add_token_types(self, types: Sequence[TokenType] | Sequence[int]): - self.add_array(KEY_TOKENIZER_TOKEN_TYPE, types) - - def add_token_scores(self, scores: Sequence[float]): - self.add_array(KEY_TOKENIZER_SCORES, scores) - - def add_bos_token_id(self, id: int): - self.add_uint32(KEY_TOKENIZER_BOS_ID, id) - - def add_eos_token_id(self, id: int): - self.add_uint32(KEY_TOKENIZER_EOS_ID, id) - - def add_unk_token_id(self, id: int): - self.add_uint32(KEY_TOKENIZER_UNK_ID, id) - - def add_sep_token_id(self, id: int): - self.add_uint32(KEY_TOKENIZER_SEP_ID, id) - - def add_pad_token_id(self, id: int): - self.add_uint32(KEY_TOKENIZER_PAD_ID, id) - - -class SpecialVocab: - merges: list[str] - special_token_ids: dict[str, int] - - def __init__( - self, path: str | os.PathLike[str], load_merges: bool = False, - special_token_types: tuple[str, ...] | None = None, - n_vocab: int | None = None, - ): - self.special_token_ids = {} - self.n_vocab = n_vocab - self.load_merges = load_merges - self.merges = [] - if special_token_types is not None: - self.special_token_types = special_token_types - else: - self.special_token_types = ('bos', 'eos', 'unk', 'sep', 'pad') - self._load(Path(path)) - - def _load(self, path: Path) -> None: - if not self._try_load_from_tokenizer_json(path): - self._try_load_from_config_json(path) - - def _set_special_token(self, typ: str, tid: Any): - if not isinstance(tid, int) or tid < 0: - return - if self.n_vocab is None or tid < self.n_vocab: - self.special_token_ids[typ] = tid - return - print(f'gguf: WARNING: Special token type {typ}, id {tid} out of range, must be under {self.n_vocab} - skipping', - file = sys.stderr) - - - def _try_load_from_tokenizer_json(self, path: Path) -> bool: - tokenizer_file = path / 'tokenizer.json' - if not tokenizer_file.is_file(): - return False - with open(tokenizer_file, encoding = 'utf-8') as f: - tokenizer = json.load(f) - if self.load_merges: - merges = tokenizer.get('model', {}).get('merges') - if isinstance(merges, list) and len(merges) > 0 and isinstance(merges[0], str): - self.merges = merges - tokenizer_config_file = path / 'tokenizer_config.json' - added_tokens = tokenizer.get('added_tokens') - if added_tokens is None or not tokenizer_config_file.is_file(): - return True - with open(tokenizer_config_file, encoding = 'utf-8') as f: - tokenizer_config = json.load(f) - for typ in self.special_token_types: - entry = tokenizer_config.get(f'{typ}_token') - if isinstance(entry, str): - tc_content = entry - elif isinstance(entry, dict): - entry_content = entry.get('content') - if not isinstance(entry_content, str): - continue - tc_content = entry_content - else: - continue - # We only need the first match here. - maybe_token_id = next(( - atok.get('id') for atok in added_tokens - if atok.get('content') == tc_content), None) - self._set_special_token(typ, maybe_token_id) - return True - - def _try_load_from_config_json(self, path: Path) -> bool: - config_file = path / 'config.json' - if not config_file.is_file(): - return False - with open(config_file, encoding = 'utf-8') as f: - config = json.load(f) - for typ in self.special_token_types: - self._set_special_token(typ, config.get(f'{typ}_token_id')) - return True - - def add_to_gguf(self, gw: GGUFWriter, quiet: bool = False) -> None: - if len(self.merges) > 0: - if not quiet: - print(f'gguf: Adding {len(self.merges)} merge(s).') - gw.add_token_merges(self.merges) - for typ, tokid in self.special_token_ids.items(): - handler: Callable[[int], None] | None = getattr(gw, f'add_{typ}_token_id', None) - if handler is None: - print(f'gguf: WARNING: No handler for special token type {typ} with id {tokid} - skipping', file = sys.stderr) - continue - if not quiet: - print(f'gguf: Setting special token type {typ} to {tokid}') - handler(tokid) - - def __repr__(self) -> str: - return f'' - - -# Example usage: -if __name__ == "__main__": - # Example usage with a file - gguf_writer = GGUFWriter("example.gguf", "llama") - - gguf_writer.add_architecture() - gguf_writer.add_block_count(12) - gguf_writer.add_uint32("answer", 42) # Write a 32-bit integer - gguf_writer.add_float32("answer_in_float", 42.0) # Write a 32-bit float - gguf_writer.add_custom_alignment(64) - - tensor1 = np.ones((32,), dtype=np.float32) * 100.0 - tensor2 = np.ones((64,), dtype=np.float32) * 101.0 - tensor3 = np.ones((96,), dtype=np.float32) * 102.0 - - gguf_writer.add_tensor("tensor1", tensor1) - gguf_writer.add_tensor("tensor2", tensor2) - gguf_writer.add_tensor("tensor3", tensor3) - - gguf_writer.write_header_to_file() - gguf_writer.write_kv_data_to_file() - gguf_writer.write_tensors_to_file() - - gguf_writer.close() +importlib.reload(gguf) diff --git a/gguf-py/gguf/gguf_reader.py b/gguf-py/gguf/gguf_reader.py new file mode 100644 index 00000000000000..8682765edbac09 --- /dev/null +++ b/gguf-py/gguf/gguf_reader.py @@ -0,0 +1,264 @@ +# +# GGUF file reading/modification support. For API usage information, +# please see the files scripts/ for some fairly simple examples. +# +from __future__ import annotations + +import os +from collections import OrderedDict +from typing import Any, Literal, NamedTuple, TypeVar, Union + +import numpy as np +import numpy.typing as npt + +if __name__ == "__main__": + import sys + from pathlib import Path + + # Allow running file in package as a script. + sys.path.insert(0, str(Path(__file__).parent.parent)) + +from gguf.constants import ( + GGML_QUANT_SIZES, + GGUF_DEFAULT_ALIGNMENT, + GGUF_MAGIC, + GGUF_VERSION, + GGMLQuantizationType, + GGUFValueType, +) + + +READER_SUPPORTED_VERSIONS = [2, GGUF_VERSION] + + +class ReaderField(NamedTuple): + # Offset to start of this field. + offset: int + + # Name of the field (not necessarily from file data). + name: str + + # Data parts. Some types have multiple components, such as strings + # that consist of a length followed by the string data. + parts: list[npt.NDArray[Any]] = [] + + # Indexes into parts that we can call the actual data. For example + # an array of strings will be populated with indexes to the actual + # string data. + data: list[int] = [-1] + + types: list[GGUFValueType] = [] + + +class ReaderTensor(NamedTuple): + name: str + tensor_type: GGMLQuantizationType + shape: npt.NDArray[np.uint32] + n_elements: int + n_bytes: int + data_offset: int + data: npt.NDArray[Any] + field: ReaderField + + +class GGUFReader: + # I - same as host, S - swapped + byte_order: Literal['I' | 'S'] = 'I' + alignment: int = GGUF_DEFAULT_ALIGNMENT + + # Note: Internal helper, API may change. + gguf_scalar_to_np: dict[GGUFValueType, type[np.generic]] = { + GGUFValueType.UINT8: np.uint8, + GGUFValueType.INT8: np.int8, + GGUFValueType.UINT16: np.uint16, + GGUFValueType.INT16: np.int16, + GGUFValueType.UINT32: np.uint32, + GGUFValueType.INT32: np.int32, + GGUFValueType.FLOAT32: np.float32, + GGUFValueType.UINT64: np.uint64, + GGUFValueType.INT64: np.int64, + GGUFValueType.FLOAT64: np.float64, + GGUFValueType.BOOL: np.bool_, + } + + def __init__(self, path: os.PathLike[str] | str, mode: Literal['r' | 'r+' | 'c'] = 'r'): + self.data = np.memmap(path, mode = mode) + offs = 0 + if self._get(offs, np.uint32, override_order = '<')[0] != GGUF_MAGIC: + raise ValueError('GGUF magic invalid') + offs += 4 + temp_version = self._get(offs, np.uint32) + if temp_version[0] & 65535 == 0: + # If we get 0 here that means it's (probably) a GGUF file created for + # the opposite byte order of the machine this script is running on. + self.byte_order = 'S' + temp_version = temp_version.newbyteorder(self.byte_order) + version = temp_version[0] + if version not in READER_SUPPORTED_VERSIONS: + raise ValueError(f'Sorry, file appears to be version {version} which we cannot handle') + self.fields: OrderedDict[str, ReaderField] = OrderedDict() + self.tensors: list[ReaderTensor] = [] + offs += self._push_field(ReaderField(offs, 'GGUF.version', [temp_version], [0], [GGUFValueType.UINT32])) + temp_counts = self._get(offs, np.uint64, 2) + offs += self._push_field(ReaderField(offs, 'GGUF.tensor_count', [temp_counts[:1]], [0], [GGUFValueType.UINT64])) + offs += self._push_field(ReaderField(offs, 'GGUF.kv_count', [temp_counts[1:]], [0], [GGUFValueType.UINT64])) + tensor_count, kv_count = temp_counts + offs = self._build_fields(offs, kv_count) + offs, tensors_fields = self._build_tensors_fields(offs, tensor_count) + new_align = self.fields.get('general.alignment') + if new_align is not None: + if new_align.types != [GGUFValueType.UINT64]: + raise ValueError('Bad type for general.alignment field') + self.alignment = new_align.parts[-1][0] + padding = offs % self.alignment + if padding != 0: + offs += self.alignment - padding + self._build_tensors(offs, tensors_fields) + + _DT = TypeVar('_DT', bound = npt.DTypeLike) + + # Fetch a key/value metadata field by key. + def get_field(self, key: str) -> Union[ReaderField, None]: + return self.fields.get(key, None) + + # Fetch a tensor from the list by index. + def get_tensor(self, idx: int) -> ReaderTensor: + return self.tensors[idx] + + def _get( + self, offset: int, dtype: npt.DTypeLike, count: int = 1, override_order: None | Literal['I' | 'S' | '<'] = None, + ) -> npt.NDArray[Any]: + count = int(count) + itemsize = int(np.empty([], dtype = dtype).itemsize) + end_offs = offset + itemsize * count + return ( + self.data[offset:end_offs] + .view(dtype = dtype)[:count] + .newbyteorder(override_order or self.byte_order) + ) + + def _push_field(self, field: ReaderField, skip_sum: bool = False) -> int: + if field.name in self.fields: + raise KeyError(f'Duplicate {field.name} already in list at offset {field.offset}') + self.fields[field.name] = field + return 0 if skip_sum else sum(int(part.nbytes) for part in field.parts) + + def _get_str(self, offset: int) -> tuple[npt.NDArray[np.uint64], npt.NDArray[np.uint8]]: + slen = self._get(offset, np.uint64) + return slen, self._get(offset + 8, np.uint8, slen[0]) + + def _get_field_parts( + self, orig_offs: int, raw_type: int, + ) -> tuple[int, list[npt.NDArray[Any]], list[int], list[GGUFValueType]]: + offs = orig_offs + types: list[GGUFValueType] = [] + gtype = GGUFValueType(raw_type) + types.append(gtype) + # Handle strings. + if gtype == GGUFValueType.STRING: + sparts: list[npt.NDArray[Any]] = list(self._get_str(offs)) + size = sum(int(part.nbytes) for part in sparts) + return size, sparts, [1], types + # Check if it's a simple scalar type. + nptype = self.gguf_scalar_to_np.get(gtype) + if nptype is not None: + val = self._get(offs, nptype) + return int(val.nbytes), [val], [0], types + # Handle arrays. + if gtype == GGUFValueType.ARRAY: + raw_itype = self._get(offs, np.uint32) + offs += int(raw_itype.nbytes) + alen = self._get(offs, np.uint64) + offs += int(alen.nbytes) + aparts: list[npt.NDArray[Any]] = [raw_itype, alen] + data_idxs: list[int] = [] + for idx in range(alen[0]): + curr_size, curr_parts, curr_idxs, curr_types = self._get_field_parts(offs, raw_itype[0]) + if idx == 0: + types += curr_types + idxs_offs = len(aparts) + aparts += curr_parts + data_idxs += (idx + idxs_offs for idx in curr_idxs) + offs += curr_size + return offs - orig_offs, aparts, data_idxs, types + # We can't deal with this one. + raise ValueError('Unknown/unhandled field type {gtype}') + + def _get_tensor(self, orig_offs: int) -> ReaderField: + offs = orig_offs + name_len, name_data = self._get_str(offs) + offs += int(name_len.nbytes + name_data.nbytes) + n_dims = self._get(offs, np.uint32) + offs += int(n_dims.nbytes) + dims = self._get(offs, np.uint64, n_dims[0]) + offs += int(dims.nbytes) + raw_dtype = self._get(offs, np.uint32) + offs += int(raw_dtype.nbytes) + offset_tensor = self._get(offs, np.uint64) + offs += int(offset_tensor.nbytes) + return ReaderField( + orig_offs, + str(bytes(name_data), encoding = 'utf-8'), + [name_len, name_data, n_dims, dims, raw_dtype, offset_tensor], + [1, 3, 4, 5], + ) + + def _build_fields(self, offs: int, count: int) -> int: + for _ in range(count): + orig_offs = offs + kv_klen, kv_kdata = self._get_str(offs) + offs += int(kv_klen.nbytes + kv_kdata.nbytes) + raw_kv_type = self._get(offs, np.uint32) + offs += int(raw_kv_type.nbytes) + parts: list[npt.NDArray[Any]] = [kv_klen, kv_kdata, raw_kv_type] + idxs_offs = len(parts) + field_size, field_parts, field_idxs, field_types = self._get_field_parts(offs, raw_kv_type[0]) + parts += field_parts + self._push_field(ReaderField( + orig_offs, + str(bytes(kv_kdata), encoding = 'utf-8'), + parts, + [idx + idxs_offs for idx in field_idxs], + field_types, + ), skip_sum = True) + offs += field_size + return offs + + def _build_tensors_fields(self, offs: int, count: int) -> tuple[int, list[ReaderField]]: + tensor_fields = [] + for _ in range(count): + field = self._get_tensor(offs) + offs += sum(int(part.nbytes) for part in field.parts) + tensor_fields.append(field) + return offs, tensor_fields + + def _build_tensors(self, start_offs: int, fields: list[ReaderField]) -> None: + tensors = [] + for field in fields: + _name_len, name_data, _n_dims, dims, raw_dtype, offset_tensor = field.parts + ggml_type = GGMLQuantizationType(raw_dtype[0]) + n_elems = np.prod(dims) + block_size, type_size = GGML_QUANT_SIZES[ggml_type] + n_bytes = n_elems * type_size // block_size + data_offs = int(start_offs + offset_tensor[0]) + item_type: npt.DTypeLike + if ggml_type == GGMLQuantizationType.F32: + item_count = n_elems + item_type = np.float32 + elif ggml_type == GGMLQuantizationType.F16: + item_count = n_elems + item_type = np.float16 + else: + item_count = n_bytes + item_type = np.uint8 + tensors.append(ReaderTensor( + name = str(bytes(name_data), encoding = 'utf-8'), + tensor_type = ggml_type, + shape = dims, + n_elements = n_elems, + n_bytes = n_bytes, + data_offset = data_offs, + data = self._get(data_offs, item_type, item_count), + field = field, + )) + self.tensors = tensors diff --git a/gguf-py/gguf/gguf_writer.py b/gguf-py/gguf/gguf_writer.py new file mode 100644 index 00000000000000..75fb6976f9ca28 --- /dev/null +++ b/gguf-py/gguf/gguf_writer.py @@ -0,0 +1,409 @@ +from __future__ import annotations + +import os +import shutil +import struct +import tempfile +from enum import Enum, auto +from io import BufferedWriter +from typing import IO, Any, Sequence + +import numpy as np + +from .constants import ( + GGUF_DEFAULT_ALIGNMENT, + GGUF_MAGIC, + GGUF_VERSION, + GGMLQuantizationType, + GGUFEndian, + GGUFValueType, + Keys, + RopeScalingType, + TokenType, +) + + +class WriterState(Enum): + EMPTY = auto() + HEADER = auto() + KV_DATA = auto() + TI_DATA = auto() + + +class GGUFWriter: + fout: BufferedWriter + temp_file: tempfile.SpooledTemporaryFile[bytes] | None + tensors: list[np.ndarray[Any, Any]] + _simple_value_packing = { + GGUFValueType.UINT8: "B", + GGUFValueType.INT8: "b", + GGUFValueType.UINT16: "H", + GGUFValueType.INT16: "h", + GGUFValueType.UINT32: "I", + GGUFValueType.INT32: "i", + GGUFValueType.FLOAT32: "f", + GGUFValueType.UINT64: "Q", + GGUFValueType.INT64: "q", + GGUFValueType.FLOAT64: "d", + GGUFValueType.BOOL: "?", + } + + def __init__( + self, path: os.PathLike[str] | str, arch: str, use_temp_file: bool = True, + endianess: GGUFEndian = GGUFEndian.LITTLE, + ): + self.fout = open(path, "wb") + self.arch = arch + self.endianess = endianess + self.offset_tensor = 0 + self.data_alignment = GGUF_DEFAULT_ALIGNMENT + self.kv_data = b"" + self.kv_data_count = 0 + self.ti_data = b"" + self.ti_data_count = 0 + self.use_temp_file = use_temp_file + self.temp_file = None + self.tensors = [] + print("gguf: This GGUF file is for {0} Endian only".format( + "Big" if self.endianess == GGUFEndian.BIG else "Little", + )) + self.state = WriterState.EMPTY + + self.add_architecture() + + def write_header_to_file(self) -> None: + if self.state is not WriterState.EMPTY: + raise ValueError(f'Expected output file to be empty, got {self.state}') + + self._write_packed(" None: + if self.state is not WriterState.HEADER: + raise ValueError(f'Expected output file to contain the header, got {self.state}') + + self.fout.write(self.kv_data) + self.flush() + self.state = WriterState.KV_DATA + + def write_ti_data_to_file(self) -> None: + if self.state is not WriterState.KV_DATA: + raise ValueError(f'Expected output file to contain KV data, got {self.state}') + + self.fout.write(self.ti_data) + self.flush() + self.state = WriterState.TI_DATA + + def add_key(self, key: str) -> None: + self.add_val(key, GGUFValueType.STRING, add_vtype=False) + + def add_uint8(self, key: str, val: int) -> None: + self.add_key(key) + self.add_val(val, GGUFValueType.UINT8) + + def add_int8(self, key: str, val: int) -> None: + self.add_key(key) + self.add_val(val, GGUFValueType.INT8) + + def add_uint16(self, key: str, val: int) -> None: + self.add_key(key) + self.add_val(val, GGUFValueType.UINT16) + + def add_int16(self, key: str, val: int) -> None: + self.add_key(key) + self.add_val(val, GGUFValueType.INT16) + + def add_uint32(self, key: str, val: int) -> None: + self.add_key(key) + self.add_val(val, GGUFValueType.UINT32) + + def add_int32(self, key: str, val: int) -> None: + self.add_key(key) + self.add_val(val, GGUFValueType.INT32) + + def add_float32(self, key: str, val: float) -> None: + self.add_key(key) + self.add_val(val, GGUFValueType.FLOAT32) + + def add_uint64(self, key: str, val: int) -> None: + self.add_key(key) + self.add_val(val, GGUFValueType.UINT64) + + def add_int64(self, key: str, val: int) -> None: + self.add_key(key) + self.add_val(val, GGUFValueType.INT64) + + def add_float64(self, key: str, val: float) -> None: + self.add_key(key) + self.add_val(val, GGUFValueType.FLOAT64) + + def add_bool(self, key: str, val: bool) -> None: + self.add_key(key) + self.add_val(val, GGUFValueType.BOOL) + + def add_string(self, key: str, val: str) -> None: + if not val: + return + self.add_key(key) + self.add_val(val, GGUFValueType.STRING) + + def add_array(self, key: str, val: Sequence[Any]) -> None: + if not isinstance(val, Sequence): + raise ValueError("Value must be a sequence for array type") + + self.add_key(key) + self.add_val(val, GGUFValueType.ARRAY) + + def add_val(self, val: Any, vtype: GGUFValueType | None = None, add_vtype: bool = True) -> None: + if vtype is None: + vtype = GGUFValueType.get_type(val) + + if add_vtype: + self.kv_data += self._pack("I", vtype) + self.kv_data_count += 1 + + pack_fmt = self._simple_value_packing.get(vtype) + if pack_fmt is not None: + self.kv_data += self._pack(pack_fmt, val, skip_pack_prefix = vtype == GGUFValueType.BOOL) + elif vtype == GGUFValueType.STRING: + encoded_val = val.encode("utf8") if isinstance(val, str) else val + self.kv_data += self._pack("Q", len(encoded_val)) + self.kv_data += encoded_val + elif vtype == GGUFValueType.ARRAY and isinstance(val, Sequence) and val: + ltype = GGUFValueType.get_type(val[0]) + if not all(GGUFValueType.get_type(i) is ltype for i in val[1:]): + raise ValueError("All items in a GGUF array should be of the same type") + self.kv_data += self._pack("I", ltype) + self.kv_data += self._pack("Q", len(val)) + for item in val: + self.add_val(item, add_vtype=False) + else: + raise ValueError("Invalid GGUF metadata value type or value") + + @staticmethod + def ggml_pad(x: int, n: int) -> int: + return ((x + n - 1) // n) * n + + def add_tensor_info( + self, name: str, tensor_shape: Sequence[int], tensor_dtype: np.dtype[np.float16] | np.dtype[np.float32], + tensor_nbytes: int, raw_dtype: GGMLQuantizationType | None = None, + ) -> None: + if self.state is not WriterState.EMPTY: + raise ValueError(f'Expected output file to be empty, got {self.state}') + + if raw_dtype is None and tensor_dtype not in (np.float32, np.float16): + raise ValueError("Only F32 and F16 tensors are supported for now") + + encoded_name = name.encode("utf8") + self.ti_data += self._pack("Q", len(encoded_name)) + self.ti_data += encoded_name + n_dims = len(tensor_shape) + self.ti_data += self._pack("I", n_dims) + for i in range(n_dims): + self.ti_data += self._pack("Q", tensor_shape[n_dims - 1 - i]) + if raw_dtype is None: + dtype = GGMLQuantizationType.F32 if tensor_dtype == np.float32 else GGMLQuantizationType.F16 + else: + dtype = raw_dtype + self.ti_data += self._pack("I", dtype) + self.ti_data += self._pack("Q", self.offset_tensor) + self.offset_tensor += GGUFWriter.ggml_pad(tensor_nbytes, self.data_alignment) + self.ti_data_count += 1 + + def add_tensor( + self, name: str, tensor: np.ndarray[Any, Any], raw_shape: Sequence[int] | None = None, + raw_dtype: GGMLQuantizationType | None = None, + ) -> None: + if self.endianess == GGUFEndian.BIG: + tensor.byteswap(inplace=True) + if self.use_temp_file and self.temp_file is None: + fp = tempfile.SpooledTemporaryFile(mode="w+b", max_size=256*1024*1024) + fp.seek(0) + self.temp_file = fp + + shape: Sequence[int] = raw_shape if raw_shape is not None else tensor.shape + self.add_tensor_info(name, shape, tensor.dtype, tensor.nbytes, raw_dtype = raw_dtype) + + if self.temp_file is None: + self.tensors.append(tensor) + return + + tensor.tofile(self.temp_file) + self.write_padding(self.temp_file, tensor.nbytes) + + def write_padding(self, fp: IO[bytes], n: int, align: int | None = None) -> None: + pad = GGUFWriter.ggml_pad(n, align if align is not None else self.data_alignment) - n + if pad != 0: + fp.write(bytes([0] * pad)) + + def write_tensor_data(self, tensor: np.ndarray[Any, Any]) -> None: + if self.state is not WriterState.TI_DATA: + raise ValueError(f'Expected output file to contain tensor info, got {self.state}') + + if self.endianess == GGUFEndian.BIG: + tensor.byteswap(inplace=True) + self.write_padding(self.fout, self.fout.tell()) + tensor.tofile(self.fout) + self.write_padding(self.fout, tensor.nbytes) + + def write_tensors_to_file(self) -> None: + self.write_ti_data_to_file() + + self.write_padding(self.fout, self.fout.tell()) + + if self.temp_file is None: + while True: + try: + tensor = self.tensors.pop(0) + except IndexError: + break + tensor.tofile(self.fout) + self.write_padding(self.fout, tensor.nbytes) + return + + self.temp_file.seek(0) + + shutil.copyfileobj(self.temp_file, self.fout) + self.flush() + self.temp_file.close() + + def flush(self) -> None: + self.fout.flush() + + def close(self) -> None: + self.fout.close() + + def add_architecture(self) -> None: + self.add_string(Keys.General.ARCHITECTURE, self.arch) + + def add_author(self, author: str) -> None: + self.add_string(Keys.General.AUTHOR, author) + + def add_tensor_data_layout(self, layout: str) -> None: + self.add_string(Keys.LLM.TENSOR_DATA_LAYOUT.format(arch=self.arch), layout) + + def add_url(self, url: str) -> None: + self.add_string(Keys.General.URL, url) + + def add_description(self, description: str) -> None: + self.add_string(Keys.General.DESCRIPTION, description) + + def add_source_url(self, url: str) -> None: + self.add_string(Keys.General.SOURCE_URL, url) + + def add_source_hf_repo(self, repo: str) -> None: + self.add_string(Keys.General.SOURCE_HF_REPO, repo) + + def add_file_type(self, ftype: int) -> None: + self.add_uint32(Keys.General.FILE_TYPE, ftype) + + def add_name(self, name: str) -> None: + self.add_string(Keys.General.NAME, name) + + def add_quantization_version(self, quantization_version: GGMLQuantizationType) -> None: + self.add_uint32( + Keys.General.QUANTIZATION_VERSION, quantization_version) + + def add_custom_alignment(self, alignment: int) -> None: + self.data_alignment = alignment + self.add_uint32(Keys.General.ALIGNMENT, alignment) + + def add_context_length(self, length: int) -> None: + self.add_uint32(Keys.LLM.CONTEXT_LENGTH.format(arch=self.arch), length) + + def add_embedding_length(self, length: int) -> None: + self.add_uint32(Keys.LLM.EMBEDDING_LENGTH.format(arch=self.arch), length) + + def add_block_count(self, length: int) -> None: + self.add_uint32(Keys.LLM.BLOCK_COUNT.format(arch=self.arch), length) + + def add_feed_forward_length(self, length: int) -> None: + self.add_uint32(Keys.LLM.FEED_FORWARD_LENGTH.format(arch=self.arch), length) + + def add_parallel_residual(self, use: bool) -> None: + self.add_bool(Keys.LLM.USE_PARALLEL_RESIDUAL.format(arch=self.arch), use) + + def add_head_count(self, count: int) -> None: + self.add_uint32(Keys.Attention.HEAD_COUNT.format(arch=self.arch), count) + + def add_head_count_kv(self, count: int) -> None: + self.add_uint32(Keys.Attention.HEAD_COUNT_KV.format(arch=self.arch), count) + + def add_max_alibi_bias(self, bias: float) -> None: + self.add_float32(Keys.Attention.MAX_ALIBI_BIAS.format(arch=self.arch), bias) + + def add_clamp_kqv(self, value: float) -> None: + self.add_float32(Keys.Attention.CLAMP_KQV.format(arch=self.arch), value) + + def add_layer_norm_eps(self, value: float) -> None: + self.add_float32(Keys.Attention.LAYERNORM_EPS.format(arch=self.arch), value) + + def add_layer_norm_rms_eps(self, value: float) -> None: + self.add_float32(Keys.Attention.LAYERNORM_RMS_EPS.format(arch=self.arch), value) + + def add_rope_dimension_count(self, count: int) -> None: + self.add_uint32(Keys.Rope.DIMENSION_COUNT.format(arch=self.arch), count) + + def add_rope_freq_base(self, value: float) -> None: + self.add_float32(Keys.Rope.FREQ_BASE.format(arch=self.arch), value) + + def add_rope_scaling_type(self, value: RopeScalingType) -> None: + self.add_string(Keys.Rope.SCALING_TYPE.format(arch=self.arch), value.value) + + def add_rope_scaling_factor(self, value: float) -> None: + self.add_float32(Keys.Rope.SCALING_FACTOR.format(arch=self.arch), value) + + def add_rope_scaling_orig_ctx_len(self, value: int) -> None: + self.add_uint32(Keys.Rope.SCALING_ORIG_CTX_LEN.format(arch=self.arch), value) + + def add_rope_scaling_finetuned(self, value: bool) -> None: + self.add_bool(Keys.Rope.SCALING_FINETUNED.format(arch=self.arch), value) + + def add_tokenizer_model(self, model: str) -> None: + self.add_string(Keys.Tokenizer.MODEL, model) + + def add_token_list(self, tokens: Sequence[str] | Sequence[bytes] | Sequence[bytearray]) -> None: + self.add_array(Keys.Tokenizer.LIST, tokens) + + def add_token_merges(self, merges: Sequence[str] | Sequence[bytes] | Sequence[bytearray]) -> None: + self.add_array(Keys.Tokenizer.MERGES, merges) + + def add_token_types(self, types: Sequence[TokenType] | Sequence[int]) -> None: + self.add_array(Keys.Tokenizer.TOKEN_TYPE, types) + + def add_token_scores(self, scores: Sequence[float]) -> None: + self.add_array(Keys.Tokenizer.SCORES, scores) + + def add_bos_token_id(self, id: int) -> None: + self.add_uint32(Keys.Tokenizer.BOS_ID, id) + + def add_eos_token_id(self, id: int) -> None: + self.add_uint32(Keys.Tokenizer.EOS_ID, id) + + def add_unk_token_id(self, id: int) -> None: + self.add_uint32(Keys.Tokenizer.UNK_ID, id) + + def add_sep_token_id(self, id: int) -> None: + self.add_uint32(Keys.Tokenizer.SEP_ID, id) + + def add_pad_token_id(self, id: int) -> None: + self.add_uint32(Keys.Tokenizer.PAD_ID, id) + + def add_add_bos_token(self, value: bool) -> None: + self.add_bool(Keys.Tokenizer.ADD_BOS, value) + + def add_add_eos_token(self, value: bool) -> None: + self.add_bool(Keys.Tokenizer.ADD_EOS, value) + + def _pack(self, fmt: str, value: Any, skip_pack_prefix: bool = False) -> bytes: + pack_prefix = '' + if not skip_pack_prefix: + pack_prefix = '<' if self.endianess == GGUFEndian.LITTLE else '>' + return struct.pack(f'{pack_prefix}{fmt}', value) + + def _write_packed(self, fmt: str, value: Any, skip_pack_prefix: bool = False) -> None: + self.fout.write(self._pack(fmt, value, skip_pack_prefix)) diff --git a/gguf-py/gguf/tensor_mapping.py b/gguf-py/gguf/tensor_mapping.py new file mode 100644 index 00000000000000..22ad8b8fc558d9 --- /dev/null +++ b/gguf-py/gguf/tensor_mapping.py @@ -0,0 +1,257 @@ +from __future__ import annotations + +from typing import Sequence + +from .constants import MODEL_ARCH, MODEL_TENSOR, MODEL_TENSORS, TENSOR_NAMES + + +class TensorNameMap: + mappings_cfg: dict[MODEL_TENSOR, tuple[str, ...]] = { + # Token embeddings + MODEL_TENSOR.TOKEN_EMBD: ( + "gpt_neox.embed_in", # gptneox + "transformer.wte", # gpt2 gpt-j mpt refact + "transformer.word_embeddings", # falcon + "word_embeddings", # bloom + "model.embed_tokens", # llama-hf + "tok_embeddings", # llama-pth + "embeddings.word_embeddings", # bert + "language_model.embedding.word_embeddings", # persimmon + ), + + # Token type embeddings + MODEL_TENSOR.TOKEN_TYPES: ( + "embeddings.token_type_embeddings", # bert + ), + + # Normalization of token embeddings + MODEL_TENSOR.TOKEN_EMBD_NORM: ( + "word_embeddings_layernorm", # bloom + ), + + # Position embeddings + MODEL_TENSOR.POS_EMBD: ( + "transformer.wpe", # gpt2 + "embeddings.position_embeddings", # bert + ), + + # Output + MODEL_TENSOR.OUTPUT: ( + "embed_out", # gptneox + "lm_head", # gpt2 mpt falcon llama-hf baichuan + "output", # llama-pth bloom + "word_embeddings_for_head", # persimmon + ), + + # Output norm + MODEL_TENSOR.OUTPUT_NORM: ( + "gpt_neox.final_layer_norm", # gptneox + "transformer.ln_f", # gpt2 gpt-j falcon + "model.norm", # llama-hf baichuan + "norm", # llama-pth + "embeddings.LayerNorm", # bert + "transformer.norm_f", # mpt + "ln_f", # refact bloom + "language_model.encoder.final_layernorm", # persimmon + ), + + # Rope frequencies + MODEL_TENSOR.ROPE_FREQS: ( + "rope.freqs", # llama-pth + ), + } + + block_mappings_cfg: dict[MODEL_TENSOR, tuple[str, ...]] = { + # Attention norm + MODEL_TENSOR.ATTN_NORM: ( + "gpt_neox.layers.{bid}.input_layernorm", # gptneox + "transformer.h.{bid}.ln_1", # gpt2 gpt-j refact + "transformer.blocks.{bid}.norm_1", # mpt + "transformer.h.{bid}.input_layernorm", # falcon7b + "h.{bid}.input_layernorm", # bloom + "transformer.h.{bid}.ln_mlp", # falcon40b + "model.layers.{bid}.input_layernorm", # llama-hf + "layers.{bid}.attention_norm", # llama-pth + "encoder.layer.{bid}.attention.output.LayerNorm", # bert + "language_model.encoder.layers.{bid}.input_layernorm", # persimmon + "model.layers.{bid}.ln1", # yi + ), + + # Attention norm 2 + MODEL_TENSOR.ATTN_NORM_2: ( + "transformer.h.{bid}.ln_attn", # falcon40b + ), + + # Attention query-key-value + MODEL_TENSOR.ATTN_QKV: ( + "gpt_neox.layers.{bid}.attention.query_key_value", # gptneox + "transformer.h.{bid}.attn.c_attn", # gpt2 + "transformer.blocks.{bid}.attn.Wqkv", # mpt + "transformer.h.{bid}.self_attention.query_key_value", # falcon + "h.{bid}.self_attention.query_key_value", # bloom + "language_model.encoder.layers.{bid}.self_attention.query_key_value", # persimmon + ), + + # Attention query + MODEL_TENSOR.ATTN_Q: ( + "model.layers.{bid}.self_attn.q_proj", # llama-hf + "layers.{bid}.attention.wq", # llama-pth + "encoder.layer.{bid}.attention.self.query", # bert + "transformer.h.{bid}.attn.q_proj", # gpt-j + ), + + # Attention key + MODEL_TENSOR.ATTN_K: ( + "model.layers.{bid}.self_attn.k_proj", # llama-hf + "layers.{bid}.attention.wk", # llama-pth + "encoder.layer.{bid}.attention.self.key", # bert + "transformer.h.{bid}.attn.k_proj", # gpt-j + ), + + # Attention value + MODEL_TENSOR.ATTN_V: ( + "model.layers.{bid}.self_attn.v_proj", # llama-hf + "layers.{bid}.attention.wv", # llama-pth + "encoder.layer.{bid}.attention.self.value", # bert + "transformer.h.{bid}.attn.v_proj", # gpt-j + ), + + # Attention output + MODEL_TENSOR.ATTN_OUT: ( + "gpt_neox.layers.{bid}.attention.dense", # gptneox + "transformer.h.{bid}.attn.c_proj", # gpt2 refact + "transformer.blocks.{bid}.attn.out_proj", # mpt + "transformer.h.{bid}.self_attention.dense", # falcon + "h.{bid}.self_attention.dense", # bloom + "model.layers.{bid}.self_attn.o_proj", # llama-hf + "layers.{bid}.attention.wo", # llama-pth + "encoder.layer.{bid}.attention.output.dense", # bert + "transformer.h.{bid}.attn.out_proj", # gpt-j + "language_model.encoder.layers.{bid}.self_attention.dense", # persimmon + ), + + # Rotary embeddings + MODEL_TENSOR.ATTN_ROT_EMBD: ( + "model.layers.{bid}.self_attn.rotary_emb.inv_freq", # llama-hf + "layers.{bid}.attention.inner_attention.rope.freqs", # llama-pth + ), + + # Feed-forward norm + MODEL_TENSOR.FFN_NORM: ( + "gpt_neox.layers.{bid}.post_attention_layernorm", # gptneox + "transformer.h.{bid}.ln_2", # gpt2 refact + "h.{bid}.post_attention_layernorm", # bloom + "transformer.blocks.{bid}.norm_2", # mpt + "model.layers.{bid}.post_attention_layernorm", # llama-hf + "layers.{bid}.ffn_norm", # llama-pth + "encoder.layer.{bid}.output.LayerNorm", # bert + "language_model.encoder.layers.{bid}.post_attention_layernorm", # persimmon + "model.layers.{bid}.ln2", # yi + ), + + # Feed-forward up + MODEL_TENSOR.FFN_UP: ( + "gpt_neox.layers.{bid}.mlp.dense_h_to_4h", # gptneox + "transformer.h.{bid}.mlp.c_fc", # gpt2 + "transformer.blocks.{bid}.ffn.up_proj", # mpt + "transformer.h.{bid}.mlp.dense_h_to_4h", # falcon + "h.{bid}.mlp.dense_h_to_4h", # bloom + "model.layers.{bid}.mlp.up_proj", # llama-hf refact + "layers.{bid}.feed_forward.w3", # llama-pth + "encoder.layer.{bid}.intermediate.dense", # bert + "transformer.h.{bid}.mlp.fc_in", # gpt-j + "language_model.encoder.layers.{bid}.mlp.dense_h_to_4h", # persimmon + ), + + # Feed-forward gate + MODEL_TENSOR.FFN_GATE: ( + "model.layers.{bid}.mlp.gate_proj", # llama-hf refact + "layers.{bid}.feed_forward.w1", # llama-pth + ), + + # Feed-forward down + MODEL_TENSOR.FFN_DOWN: ( + "gpt_neox.layers.{bid}.mlp.dense_4h_to_h", # gptneox + "transformer.h.{bid}.mlp.c_proj", # gpt2 refact + "transformer.blocks.{bid}.ffn.down_proj", # mpt + "transformer.h.{bid}.mlp.dense_4h_to_h", # falcon + "h.{bid}.mlp.dense_4h_to_h", # bloom + "model.layers.{bid}.mlp.down_proj", # llama-hf + "layers.{bid}.feed_forward.w2", # llama-pth + "encoder.layer.{bid}.output.dense", # bert + "transformer.h.{bid}.mlp.fc_out", # gpt-j + "language_model.encoder.layers.{bid}.mlp.dense_4h_to_h", # persimmon + ), + + MODEL_TENSOR.ATTN_Q_NORM: ( + "language_model.encoder.layers.{bid}.self_attention.q_layernorm", + ), + + MODEL_TENSOR.ATTN_K_NORM: ( + "language_model.encoder.layers.{bid}.self_attention.k_layernorm", + ), + + MODEL_TENSOR.ROPE_FREQS: ( + "language_model.encoder.layers.{bid}.self_attention.rotary_emb.inv_freq", # persimmon + ), + } + + mapping: dict[str, tuple[MODEL_TENSOR, str]] + + def __init__(self, arch: MODEL_ARCH, n_blocks: int): + self.mapping = {} + for tensor, keys in self.mappings_cfg.items(): + if tensor not in MODEL_TENSORS[arch]: + continue + tensor_name = TENSOR_NAMES[tensor] + self.mapping[tensor_name] = (tensor, tensor_name) + for key in keys: + self.mapping[key] = (tensor, tensor_name) + for bid in range(n_blocks): + for tensor, keys in self.block_mappings_cfg.items(): + if tensor not in MODEL_TENSORS[arch]: + continue + tensor_name = TENSOR_NAMES[tensor].format(bid = bid) + self.mapping[tensor_name] = (tensor, tensor_name) + for key in keys: + key = key.format(bid = bid) + self.mapping[key] = (tensor, tensor_name) + + def get_type_and_name(self, key: str, try_suffixes: Sequence[str] = ()) -> tuple[MODEL_TENSOR, str] | None: + result = self.mapping.get(key) + if result is not None: + return result + for suffix in try_suffixes: + if key.endswith(suffix): + result = self.mapping.get(key[:-len(suffix)]) + if result is not None: + return result[0], result[1] + suffix + return None + + def get_name(self, key: str, try_suffixes: Sequence[str] = ()) -> str | None: + result = self.get_type_and_name(key, try_suffixes = try_suffixes) + if result is None: + return None + return result[1] + + def get_type(self, key: str, try_suffixes: Sequence[str] = ()) -> MODEL_TENSOR | None: + result = self.get_type_and_name(key, try_suffixes = try_suffixes) + if result is None: + return None + return result[0] + + def __getitem__(self, key: str) -> str: + try: + return self.mapping[key][1] + except KeyError: + raise KeyError(key) + + def __contains__(self, key: str) -> bool: + return key in self.mapping + + def __repr__(self) -> str: + return repr(self.mapping) + + +def get_tensor_name_map(arch: MODEL_ARCH, n_blocks: int) -> TensorNameMap: + return TensorNameMap(arch, n_blocks) diff --git a/gguf-py/gguf/vocab.py b/gguf-py/gguf/vocab.py new file mode 100644 index 00000000000000..71192a928d6647 --- /dev/null +++ b/gguf-py/gguf/vocab.py @@ -0,0 +1,164 @@ +from __future__ import annotations + +import json +import os +import sys +from pathlib import Path +from typing import Any, Callable + +from .gguf_writer import GGUFWriter + + +class SpecialVocab: + merges: list[str] + add_special_token: dict[str, bool] + special_token_ids: dict[str, int] + + def __init__( + self, path: str | os.PathLike[str], load_merges: bool = False, + special_token_types: tuple[str, ...] | None = None, + n_vocab: int | None = None, + ): + self.special_token_ids = {} + self.add_special_token = {} + self.n_vocab = n_vocab + self.load_merges = load_merges + self.merges = [] + if special_token_types is not None: + self.special_token_types = special_token_types + else: + self.special_token_types = ('bos', 'eos', 'unk', 'sep', 'pad') + self._load(Path(path)) + + def __repr__(self) -> str: + return ''.format( + len(self.merges), self.special_token_ids or "unset", self.add_special_token or "unset", + ) + + def add_to_gguf(self, gw: GGUFWriter, quiet: bool = False) -> None: + if self.merges: + if not quiet: + print(f'gguf: Adding {len(self.merges)} merge(s).') + gw.add_token_merges(self.merges) + elif self.load_merges: + print( + 'gguf: WARNING: Adding merges requested but no merges found, output may be non-functional.', + file = sys.stderr, + ) + for typ, tokid in self.special_token_ids.items(): + id_handler: Callable[[int], None] | None = getattr(gw, f'add_{typ}_token_id', None) + if id_handler is None: + print( + f'gguf: WARNING: No handler for special token type {typ} with id {tokid} - skipping', + file = sys.stderr, + ) + continue + if not quiet: + print(f'gguf: Setting special token type {typ} to {tokid}') + id_handler(tokid) + for typ, value in self.add_special_token.items(): + add_handler: Callable[[bool], None] | None = getattr(gw, f'add_add_{typ}_token', None) + if add_handler is None: + print( + f'gguf: WARNING: No handler for add_{typ}_token with value {value} - skipping', + file = sys.stderr, + ) + continue + if not quiet: + print(f'gguf: Setting add_{typ}_token to {value}') + add_handler(value) + + def _load(self, path: Path) -> None: + self._try_load_from_tokenizer_json(path) + self._try_load_from_config_json(path) + if self.load_merges and not self.merges: + self._try_load_merges_txt(path) + + def _try_load_merges_txt(self, path: Path) -> bool: + merges_file = path / 'merges.txt' + if not merges_file.is_file(): + return False + with open(merges_file, 'r') as fp: + first_line = next(fp, '').strip() + if not first_line.startswith('#'): + fp.seek(0) + line_num = 0 + else: + line_num = 1 + merges = [] + for line in fp: + line_num += 1 + line = line.strip() + if not line: + continue + parts = line.split(None, 3) + if len(parts) != 2: + print( + f'gguf: WARNING: {merges_file.name}: Line {line_num}: Entry malformed, ignoring', + file = sys.stderr, + ) + continue + merges.append(f'{parts[0]} {parts[1]}') + self.merges = merges + return True + + def _set_special_token(self, typ: str, tid: Any) -> None: + if not isinstance(tid, int) or tid < 0: + return + if self.n_vocab is None or tid < self.n_vocab: + if typ in self.special_token_ids: + return + self.special_token_ids[typ] = tid + return + print( + f'gguf: WARNING: Special token type {typ}, id {tid} out of range, must be under {self.n_vocab} - skipping', + file = sys.stderr, + ) + + def _try_load_from_tokenizer_json(self, path: Path) -> bool: + tokenizer_file = path / 'tokenizer.json' + if not tokenizer_file.is_file(): + return False + with open(tokenizer_file, encoding = 'utf-8') as f: + tokenizer = json.load(f) + if self.load_merges: + merges = tokenizer.get('model', {}).get('merges') + if isinstance(merges, list) and merges and isinstance(merges[0], str): + self.merges = merges + tokenizer_config_file = path / 'tokenizer_config.json' + added_tokens = tokenizer.get('added_tokens') + if added_tokens is None or not tokenizer_config_file.is_file(): + return True + with open(tokenizer_config_file, encoding = 'utf-8') as f: + tokenizer_config = json.load(f) + for typ in self.special_token_types: + add_entry = tokenizer_config.get(f'add_{typ}_token') + if isinstance(add_entry, bool): + self.add_special_token[typ] = add_entry + entry = tokenizer_config.get(f'{typ}_token') + if isinstance(entry, str): + tc_content = entry + elif isinstance(entry, dict): + entry_content = entry.get('content') + if not isinstance(entry_content, str): + continue + tc_content = entry_content + else: + continue + # We only need the first match here. + maybe_token_id = next( + (atok.get('id') for atok in added_tokens if atok.get('content') == tc_content), + None, + ) + self._set_special_token(typ, maybe_token_id) + return True + + def _try_load_from_config_json(self, path: Path) -> bool: + config_file = path / 'config.json' + if not config_file.is_file(): + return False + with open(config_file, encoding = 'utf-8') as f: + config = json.load(f) + for typ in self.special_token_types: + self._set_special_token(typ, config.get(f'{typ}_token_id')) + return True diff --git a/gguf-py/pyproject.toml b/gguf-py/pyproject.toml index c6cb2c37a0e0a1..624e1cda628e1a 100644 --- a/gguf-py/pyproject.toml +++ b/gguf-py/pyproject.toml @@ -1,11 +1,12 @@ [tool.poetry] name = "gguf" -version = "0.4.6" +version = "0.5.0" description = "Write ML models in GGUF for GGML" authors = ["GGML "] packages = [ {include = "gguf"}, {include = "gguf/py.typed"}, + {include = "scripts"}, ] readme = "README.md" homepage = "https://ggml.ai" @@ -27,3 +28,8 @@ pytest = "^5.2" [build-system] requires = ["poetry-core>=1.0.0"] build-backend = "poetry.core.masonry.api" + +[tool.poetry.scripts] +gguf-convert-endian = "scripts:gguf_convert_endian_entrypoint" +gguf-dump = "scripts:gguf_dump_entrypoint" +gguf-set-metadata = "scripts:gguf_set_metadata_entrypoint" diff --git a/gguf-py/scripts/__init__.py b/gguf-py/scripts/__init__.py new file mode 100644 index 00000000000000..77132db7a0e948 --- /dev/null +++ b/gguf-py/scripts/__init__.py @@ -0,0 +1,12 @@ +import os + +from importlib import import_module + + +os.environ["NO_LOCAL_GGUF"] = "TRUE" + +gguf_convert_endian_entrypoint = import_module("scripts.gguf-convert-endian").main +gguf_dump_entrypoint = import_module("scripts.gguf-dump").main +gguf_set_metadata_entrypoint = import_module("scripts.gguf-set-metadata").main + +del import_module, os diff --git a/gguf-py/scripts/gguf-convert-endian.py b/gguf-py/scripts/gguf-convert-endian.py new file mode 100755 index 00000000000000..b79d86e072041b --- /dev/null +++ b/gguf-py/scripts/gguf-convert-endian.py @@ -0,0 +1,113 @@ +#!/usr/bin/env python3 +from __future__ import annotations + +import argparse +import os +import sys +from pathlib import Path + +import numpy as np + +# Necessary to load the local gguf package +if "NO_LOCAL_GGUF" not in os.environ and (Path(__file__).parent.parent.parent / 'gguf-py').exists(): + sys.path.insert(0, str(Path(__file__).parent.parent)) + +import gguf + + +def convert_byteorder(reader: gguf.GGUFReader, args: argparse.Namespace) -> None: + if np.uint32(1) == np.uint32(1).newbyteorder("<"): + # Host is little endian + host_endian = "little" + swapped_endian = "big" + else: + # Sorry PDP or other weird systems that don't use BE or LE. + host_endian = "big" + swapped_endian = "little" + if reader.byte_order == "S": + file_endian = swapped_endian + else: + file_endian = host_endian + if args.order == "native": + order = host_endian + print(f"* Host is {host_endian.upper()} endian, GGUF file seems to be {file_endian.upper()} endian") + if file_endian == order: + print(f"* File is already {order.upper()} endian. Nothing to do.") + sys.exit(0) + print("* Checking tensors for conversion compatibility") + for tensor in reader.tensors: + if tensor.tensor_type not in ( + gguf.GGMLQuantizationType.F32, + gguf.GGMLQuantizationType.F16, + gguf.GGMLQuantizationType.Q8_0, + ): + raise ValueError(f"Cannot handle type {tensor.tensor_type.name} for tensor {repr(tensor.name)}") + print(f"* Preparing to convert from {file_endian.upper()} to {order.upper()}") + if args.dry_run: + return + print("\n*** Warning *** Warning *** Warning **") + print("* This conversion process may damage the file. Ensure you have a backup.") + if order != host_endian: + print("* Requested endian differs from host, you will not be able to load the model on this machine.") + print("* The file will be modified immediately, so if conversion fails or is interrupted") + print("* the file will be corrupted. Enter exactly YES if you are positive you want to proceed:") + response = input("YES, I am sure> ") + if response != "YES": + print("You didn't enter YES. Okay then, see ya!") + sys.exit(0) + print(f"\n* Converting fields ({len(reader.fields)})") + for idx, field in enumerate(reader.fields.values()): + print(f"- {idx:4}: Converting field {repr(field.name)}, part count: {len(field.parts)}") + for part in field.parts: + part.byteswap(inplace=True) + print(f"\n* Converting tensors ({len(reader.tensors)})") + for idx, tensor in enumerate(reader.tensors): + print( + f" - {idx:4}: Converting tensor {repr(tensor.name)}, type={tensor.tensor_type.name}, " + f"elements={tensor.n_elements}... ", + end="", + ) + tensor_type = tensor.tensor_type + for part in tensor.field.parts: + part.byteswap(inplace=True) + if tensor_type != gguf.GGMLQuantizationType.Q8_0: + tensor.data.byteswap(inplace=True) + print() + continue + # A Q8_0 block consists of a f16 delta followed by 32 int8 quants, so 34 bytes + block_size = 34 + n_blocks = len(tensor.data) // block_size + for block_num in range(n_blocks): + block_offs = block_num * block_size + # I know I said f16, but it doesn't matter here - any simple 16 bit type works. + delta = tensor.data[block_offs:block_offs + 2].view(dtype=np.uint16) + delta.byteswap(inplace=True) + if block_num % 100000 == 0: + print(f"[{(n_blocks - block_num) // 1000}K]", end="") + sys.stdout.flush() + print() + print("* Completion") + + +def main() -> None: + parser = argparse.ArgumentParser(description="Convert GGUF file byte order") + parser.add_argument( + "model", type=str, + help="GGUF format model filename", + ) + parser.add_argument( + "order", type=str, choices=['big', 'little', 'native'], + help="Requested byte order", + ) + parser.add_argument( + "--dry-run", action="store_true", + help="Don't actually change anything", + ) + args = parser.parse_args(None if len(sys.argv) > 1 else ["--help"]) + print(f'* Loading: {args.model}') + reader = gguf.GGUFReader(args.model, 'r' if args.dry_run else 'r+') + convert_byteorder(reader, args) + + +if __name__ == "__main__": + main() diff --git a/gguf-py/scripts/gguf-dump.py b/gguf-py/scripts/gguf-dump.py new file mode 100755 index 00000000000000..5141873de7321e --- /dev/null +++ b/gguf-py/scripts/gguf-dump.py @@ -0,0 +1,116 @@ +#!/usr/bin/env python3 +from __future__ import annotations + +import argparse +import os +import sys +from pathlib import Path +from typing import Any + +import numpy as np + +# Necessary to load the local gguf package +if "NO_LOCAL_GGUF" not in os.environ and (Path(__file__).parent.parent.parent / 'gguf-py').exists(): + sys.path.insert(0, str(Path(__file__).parent.parent)) + +from gguf import GGUFReader, GGUFValueType # noqa: E402 + + +def get_file_host_endian(reader: GGUFReader) -> tuple[str, str]: + host_endian = 'LITTLE' if np.uint32(1) == np.uint32(1).newbyteorder("<") else 'BIG' + if reader.byte_order == 'S': + file_endian = 'BIG' if host_endian == 'LITTLE' else 'LITTLE' + else: + file_endian = host_endian + return (host_endian, file_endian) + + +# For more information about what field.parts and field.data represent, +# please see the comments in the modify_gguf.py example. +def dump_metadata(reader: GGUFReader, args: argparse.Namespace) -> None: + host_endian, file_endian = get_file_host_endian(reader) + print(f'* File is {file_endian} endian, script is running on a {host_endian} endian host.') + print(f'\n* Dumping {len(reader.fields)} key/value pair(s)') + for n, field in enumerate(reader.fields.values(), 1): + if not field.types: + pretty_type = 'N/A' + elif field.types[0] == GGUFValueType.ARRAY: + nest_count = len(field.types) - 1 + pretty_type = '[' * nest_count + str(field.types[-1].name) + ']' * nest_count + else: + pretty_type = str(field.types[-1].name) + print(f' {n:5}: {pretty_type:10} | {len(field.data):8} | {field.name}', end = '') + if len(field.types) == 1: + curr_type = field.types[0] + if curr_type == GGUFValueType.STRING: + print(' = {0}'.format(repr(str(bytes(field.parts[-1]), encoding='utf8')[:60])), end = '') + elif field.types[0] in reader.gguf_scalar_to_np: + print(' = {0}'.format(field.parts[-1][0]), end = '') + print() + if args.no_tensors: + return + print(f'\n* Dumping {len(reader.tensors)} tensor(s)') + for n, tensor in enumerate(reader.tensors, 1): + prettydims = ', '.join('{0:5}'.format(d) for d in list(tensor.shape) + [1] * (4 - len(tensor.shape))) + print(f' {n:5}: {tensor.n_elements:10} | {prettydims} | {tensor.tensor_type.name:7} | {tensor.name}') + + +def dump_metadata_json(reader: GGUFReader, args: argparse.Namespace) -> None: + import json + host_endian, file_endian = get_file_host_endian(reader) + metadata: dict[str, Any] = {} + tensors: dict[str, Any] = {} + result = { + "filename": args.model, + "endian": file_endian, + "metadata": metadata, + "tensors": tensors, + } + for idx, field in enumerate(reader.fields.values()): + curr: dict[str, Any] = { + "index": idx, + "type": field.types[0].name if field.types else 'UNKNOWN', + "offset": field.offset, + } + metadata[field.name] = curr + if field.types[:1] == [GGUFValueType.ARRAY]: + curr["array_types"] = [t.name for t in field.types][1:] + if not args.json_array: + continue + itype = field.types[-1] + if itype == GGUFValueType.STRING: + curr["value"] = [str(bytes(field.parts[idx]), encoding="utf-8") for idx in field.data] + else: + curr["value"] = [pv for idx in field.data for pv in field.parts[idx].tolist()] + elif field.types[0] == GGUFValueType.STRING: + curr["value"] = str(bytes(field.parts[-1]), encoding="utf-8") + else: + curr["value"] = field.parts[-1].tolist()[0] + for idx, tensor in enumerate(reader.tensors): + tensors[tensor.name] = { + "index": idx, + "shape": tensor.shape.tolist(), + "type": tensor.tensor_type.name, + "offset": tensor.field.offset, + } + json.dump(result, sys.stdout) + + +def main() -> None: + parser = argparse.ArgumentParser(description="Dump GGUF file metadata") + parser.add_argument("model", type=str, help="GGUF format model filename") + parser.add_argument("--no-tensors", action="store_true", help="Don't dump tensor metadata") + parser.add_argument("--json", action="store_true", help="Produce JSON output") + parser.add_argument("--json-array", action="store_true", help="Include full array values in JSON output (long)") + args = parser.parse_args(None if len(sys.argv) > 1 else ["--help"]) + if not args.json: + print(f'* Loading: {args.model}') + reader = GGUFReader(args.model, 'r') + if args.json: + dump_metadata_json(reader, args) + else: + dump_metadata(reader, args) + + +if __name__ == '__main__': + main() diff --git a/gguf-py/scripts/gguf-set-metadata.py b/gguf-py/scripts/gguf-set-metadata.py new file mode 100755 index 00000000000000..3ebdfa898a7792 --- /dev/null +++ b/gguf-py/scripts/gguf-set-metadata.py @@ -0,0 +1,90 @@ +#!/usr/bin/env python3 +import argparse +import os +import sys +from pathlib import Path + +# Necessary to load the local gguf package +if "NO_LOCAL_GGUF" not in os.environ and (Path(__file__).parent.parent.parent / 'gguf-py').exists(): + sys.path.insert(0, str(Path(__file__).parent.parent)) + +from gguf import GGUFReader # noqa: E402 + + +def minimal_example(filename: str) -> None: + reader = GGUFReader(filename, 'r+') + field = reader.fields['tokenizer.ggml.bos_token_id'] + if field is None: + return + part_index = field.data[0] + field.parts[part_index][0] = 2 # Set tokenizer.ggml.bos_token_id to 2 + # + # So what's this field.data thing? It's helpful because field.parts contains + # _every_ part of the GGUF field. For example, tokenizer.ggml.bos_token_id consists + # of: + # + # Part index 0: Key length (27) + # Part index 1: Key data ("tokenizer.ggml.bos_token_id") + # Part index 2: Field type (4, the id for GGUFValueType.UINT32) + # Part index 3: Field value + # + # Note also that each part is an NDArray slice, so even a part that + # is only a single value like the key length will be a NDArray of + # the key length type (numpy.uint32). + # + # The .data attribute in the Field is a list of relevant part indexes + # and doesn't contain internal GGUF details like the key length part. + # In this case, .data will be [3] - just the part index of the + # field value itself. + + +def set_metadata(reader: GGUFReader, args: argparse.Namespace) -> None: + field = reader.get_field(args.key) + if field is None: + print(f'! Field {repr(args.key)} not found', file = sys.stderr) + sys.exit(1) + # Note that field.types is a list of types. This is because the GGUF + # format supports arrays. For example, an array of UINT32 would + # look like [GGUFValueType.ARRAY, GGUFValueType.UINT32] + handler = reader.gguf_scalar_to_np.get(field.types[0]) if field.types else None + if handler is None: + print( + f'! This tool only supports changing simple values, {repr(args.key)} has unsupported type {field.types}', + file = sys.stderr, + ) + sys.exit(1) + current_value = field.parts[field.data[0]][0] + new_value = handler(args.value) + print(f'* Preparing to change field {repr(args.key)} from {current_value} to {new_value}') + if current_value == new_value: + print(f'- Key {repr(args.key)} already set to requested value {current_value}') + sys.exit(0) + if args.dry_run: + sys.exit(0) + if not args.force: + print('*** Warning *** Warning *** Warning **') + print('* Changing fields in a GGUF file can make it unusable. Proceed at your own risk.') + print('* Enter exactly YES if you are positive you want to proceed:') + response = input('YES, I am sure> ') + if response != 'YES': + print("You didn't enter YES. Okay then, see ya!") + sys.exit(0) + field.parts[field.data[0]][0] = new_value + print('* Field changed. Successful completion.') + + +def main() -> None: + parser = argparse.ArgumentParser(description="Set a simple value in GGUF file metadata") + parser.add_argument("model", type=str, help="GGUF format model filename") + parser.add_argument("key", type=str, help="Metadata key to set") + parser.add_argument("value", type=str, help="Metadata value to set") + parser.add_argument("--dry-run", action="store_true", help="Don't actually change anything") + parser.add_argument("--force", action="store_true", help="Change the field without confirmation") + args = parser.parse_args(None if len(sys.argv) > 1 else ["--help"]) + print(f'* Loading: {args.model}') + reader = GGUFReader(args.model, 'r' if args.dry_run else 'r+') + set_metadata(reader, args) + + +if __name__ == '__main__': + main() diff --git a/gguf-py/tests/test_gguf.py b/gguf-py/tests/test_gguf.py index 512531dd2a8f0a..0adeb7d55731a4 100644 --- a/gguf-py/tests/test_gguf.py +++ b/gguf-py/tests/test_gguf.py @@ -1,7 +1,7 @@ -import gguf +import gguf # noqa: F401 # TODO: add tests -def test_write_gguf(): +def test_write_gguf() -> None: pass From d96ca7ded77df764db797b68b4a29e34c5b56285 Mon Sep 17 00:00:00 2001 From: Alexey Parfenov Date: Sat, 11 Nov 2023 05:48:21 +0000 Subject: [PATCH 083/206] server : fix crash when prompt exceeds context size (#3996) --- examples/server/server.cpp | 58 +++++++++++++++++++------------------- 1 file changed, 29 insertions(+), 29 deletions(-) diff --git a/examples/server/server.cpp b/examples/server/server.cpp index cbf36ad6752b67..46862a84b99da7 100644 --- a/examples/server/server.cpp +++ b/examples/server/server.cpp @@ -1557,6 +1557,35 @@ struct llama_server_context slot.num_prompt_tokens = prompt_tokens.size(); + if (slot.params.n_keep < 0) + { + slot.params.n_keep = slot.num_prompt_tokens; + } + slot.params.n_keep = std::min(slot.n_ctx - 4, slot.params.n_keep); + + // if input prompt is too big, truncate it + if (slot.num_prompt_tokens >= slot.n_ctx) + { + const int n_left = slot.n_ctx - slot.params.n_keep; + const int n_block_size = n_left / 2; + const int erased_blocks = (slot.num_prompt_tokens - slot.params.n_keep - n_block_size) / n_block_size; + + std::vector new_tokens(prompt_tokens.begin(), prompt_tokens.begin() + slot.params.n_keep); + new_tokens.insert(new_tokens.end(), prompt_tokens.begin() + slot.params.n_keep + erased_blocks * n_block_size, prompt_tokens.end()); + + LOG_VERBOSE("input truncated", { + {"n_ctx", slot.n_ctx}, + {"n_keep", slot.params.n_keep}, + {"n_left", n_left}, + {"new_tokens", tokens_to_str(ctx, new_tokens.cbegin(), new_tokens.cend())}, + }); + slot.truncated = true; + prompt_tokens = new_tokens; + + slot.num_prompt_tokens = prompt_tokens.size(); + GGML_ASSERT(slot.num_prompt_tokens < slot.n_ctx); + } + if (!slot.params.cache_prompt) { llama_sampling_reset(slot.ctx_sampling); @@ -1566,35 +1595,6 @@ struct llama_server_context } else { - if (slot.params.n_keep < 0) - { - slot.params.n_keep = slot.num_prompt_tokens; - } - slot.params.n_keep = std::min(slot.n_ctx - 4, slot.params.n_keep); - - // if input prompt is too big, truncate it - if (slot.num_prompt_tokens >= slot.n_ctx) - { - const int n_left = slot.n_ctx - slot.params.n_keep; - const int n_block_size = n_left / 2; - const int erased_blocks = (slot.num_prompt_tokens - slot.params.n_keep - n_block_size) / n_block_size; - - std::vector new_tokens(prompt_tokens.begin(), prompt_tokens.begin() + slot.params.n_keep); - new_tokens.insert(new_tokens.end(), prompt_tokens.begin() + slot.params.n_keep + erased_blocks * n_block_size, prompt_tokens.end()); - - LOG_VERBOSE("input truncated", { - {"n_ctx", slot.n_ctx}, - {"n_keep", slot.params.n_keep}, - {"n_left", n_left}, - {"new_tokens", tokens_to_str(ctx, new_tokens.cbegin(), new_tokens.cend())}, - }); - slot.truncated = true; - prompt_tokens = new_tokens; - - slot.num_prompt_tokens = prompt_tokens.size(); - GGML_ASSERT(slot.num_prompt_tokens < slot.n_ctx); - } - // push the prompt into the sampling context (do not apply grammar) for (auto &token : prompt_tokens) { From e86fc56f7521ca4b18d1d9939e82abd40c2f1c01 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?M=2E=20Yusuf=20Sar=C4=B1g=C3=B6z?= Date: Sat, 11 Nov 2023 18:35:31 +0300 Subject: [PATCH 084/206] Fix gguf-convert-endian script (#4037) * Fix gguf-convert-endian script * Bump version and update description --- gguf-py/pyproject.toml | 4 ++-- gguf-py/scripts/gguf-convert-endian.py | 3 +-- 2 files changed, 3 insertions(+), 4 deletions(-) diff --git a/gguf-py/pyproject.toml b/gguf-py/pyproject.toml index 624e1cda628e1a..e21c3cd94f22ae 100644 --- a/gguf-py/pyproject.toml +++ b/gguf-py/pyproject.toml @@ -1,7 +1,7 @@ [tool.poetry] name = "gguf" -version = "0.5.0" -description = "Write ML models in GGUF for GGML" +version = "0.5.1" +description = "Read and write ML models in GGUF for GGML" authors = ["GGML "] packages = [ {include = "gguf"}, diff --git a/gguf-py/scripts/gguf-convert-endian.py b/gguf-py/scripts/gguf-convert-endian.py index b79d86e072041b..10a16ad063ce67 100755 --- a/gguf-py/scripts/gguf-convert-endian.py +++ b/gguf-py/scripts/gguf-convert-endian.py @@ -28,8 +28,7 @@ def convert_byteorder(reader: gguf.GGUFReader, args: argparse.Namespace) -> None file_endian = swapped_endian else: file_endian = host_endian - if args.order == "native": - order = host_endian + order = host_endian if args.order == "native" else args.order print(f"* Host is {host_endian.upper()} endian, GGUF file seems to be {file_endian.upper()} endian") if file_endian == order: print(f"* File is already {order.upper()} endian. Nothing to do.") From 532dd74e38c29e16ea1cfc4e7eedb4f2fab3f3cd Mon Sep 17 00:00:00 2001 From: Richard Kiss Date: Sat, 11 Nov 2023 22:04:58 -0800 Subject: [PATCH 085/206] Fix some documentation typos/grammar mistakes (#4032) * typos * Update examples/parallel/README.md Co-authored-by: Kerfuffle <44031344+KerfuffleV2@users.noreply.github.com> --------- Co-authored-by: Kerfuffle <44031344+KerfuffleV2@users.noreply.github.com> --- README.md | 2 +- docs/token_generation_performance_tips.md | 2 +- examples/main/README.md | 2 +- examples/parallel/README.md | 2 +- grammars/README.md | 4 ++-- 5 files changed, 6 insertions(+), 6 deletions(-) diff --git a/README.md b/README.md index 9c9e36ad07accf..af39e8c0e386ea 100644 --- a/README.md +++ b/README.md @@ -424,7 +424,7 @@ Building the program with BLAS support may lead to some performance improvements ``` The environment variable [`HIP_VISIBLE_DEVICES`](https://rocm.docs.amd.com/en/latest/understand/gpu_isolation.html#hip-visible-devices) can be used to specify which GPU(s) will be used. - If your GPU is not officialy supported you can use the environment variable [`HSA_OVERRIDE_GFX_VERSION`] set to a similar GPU, for example 10.3.0 on RDNA2 or 11.0.0 on RDNA3. + If your GPU is not officially supported you can use the environment variable [`HSA_OVERRIDE_GFX_VERSION`] set to a similar GPU, for example 10.3.0 on RDNA2 or 11.0.0 on RDNA3. The following compilation options are also available to tweak performance (yes, they refer to CUDA, not HIP, because it uses the same code as the cuBLAS version above): | Option | Legal values | Default | Description | diff --git a/docs/token_generation_performance_tips.md b/docs/token_generation_performance_tips.md index c9acff7d4f18cc..d7e863dff5c01b 100644 --- a/docs/token_generation_performance_tips.md +++ b/docs/token_generation_performance_tips.md @@ -17,7 +17,7 @@ llama_model_load_internal: [cublas] total VRAM used: 17223 MB If you see these lines, then the GPU is being used. ## Verifying that the CPU is not oversaturated -llama accepts a `-t N` (or `--threads N`) parameter. It's extremely important that this parameter is not too large. If your token generation is extremely slow, try setting this number to 1. If this significantly improves your token generation speed, then your CPU is being oversaturated and you need to explicitly set this parameter to the number of the physicial CPU cores on your machine (even if you utilize a GPU). If in doubt, start with 1 and double the amount until you hit a performance bottleneck, then scale the number down. +llama accepts a `-t N` (or `--threads N`) parameter. It's extremely important that this parameter is not too large. If your token generation is extremely slow, try setting this number to 1. If this significantly improves your token generation speed, then your CPU is being oversaturated and you need to explicitly set this parameter to the number of the physical CPU cores on your machine (even if you utilize a GPU). If in doubt, start with 1 and double the amount until you hit a performance bottleneck, then scale the number down. # Example of runtime flags effect on inference speed benchmark These runs were tested on the following machine: diff --git a/examples/main/README.md b/examples/main/README.md index a3428b48763d0b..c7997f66569a56 100644 --- a/examples/main/README.md +++ b/examples/main/README.md @@ -142,7 +142,7 @@ The `--ctx-size` option allows you to set the size of the prompt context used by ### Extended Context Size -Some fine-tuned models have extened the context length by scaling RoPE. For example, if the original pretrained model have a context length (max sequence length) of 4096 (4k) and the fine-tuned model have 32k. That is a scaling factor of 8, and should work by setting the above `--ctx-size` to 32768 (32k) and `--rope-scale` to 8. +Some fine-tuned models have extended the context length by scaling RoPE. For example, if the original pre-trained model have a context length (max sequence length) of 4096 (4k) and the fine-tuned model have 32k. That is a scaling factor of 8, and should work by setting the above `--ctx-size` to 32768 (32k) and `--rope-scale` to 8. - `--rope-scale N`: Where N is the linear scaling factor used by the fine-tuned model. diff --git a/examples/parallel/README.md b/examples/parallel/README.md index 4d0fe5cef12fa7..df04567337b15e 100644 --- a/examples/parallel/README.md +++ b/examples/parallel/README.md @@ -1,3 +1,3 @@ # llama.cpp/example/parallel -Simplified simluation for serving incoming requests in parallel +Simplified simulation of serving incoming requests in parallel diff --git a/grammars/README.md b/grammars/README.md index 7f3b11ca5b5922..e1383fa5c6a58c 100644 --- a/grammars/README.md +++ b/grammars/README.md @@ -55,7 +55,7 @@ The order of symbols in a sequence matter. For example, in `"1. " move " " move Alternatives, denoted by `|`, give different sequences that are acceptable. For example, in `move ::= pawn | nonpawn | castle`, `move` can be a `pawn` move, a `nonpawn` move, or a `castle`. -Parentheses `()` can be used to group sequences, which allows for embedding alternatives in a larger rule or applying repetition and optptional symbols (below) to a sequence. +Parentheses `()` can be used to group sequences, which allows for embedding alternatives in a larger rule or applying repetition and optional symbols (below) to a sequence. ## Repetition and Optional Symbols @@ -67,7 +67,7 @@ Parentheses `()` can be used to group sequences, which allows for embedding alte Comments can be specified with `#`: ``` -# defines optional whitspace +# defines optional whitespace ws ::= [ \t\n]+ ``` From 21fd874c8d2a14dea2d56724e4357c0824aee6a8 Mon Sep 17 00:00:00 2001 From: Kerfuffle <44031344+KerfuffleV2@users.noreply.github.com> Date: Sun, 12 Nov 2023 16:39:37 -0700 Subject: [PATCH 086/206] gguf-py: gguf_writer: Use bytearray to build metadata (#4051) * gguf-py: gguf_writer: Use BytesIO to build metadata * Use bytearray instead Bump gguf-py package version --- gguf-py/gguf/gguf_writer.py | 4 ++-- gguf-py/pyproject.toml | 2 +- 2 files changed, 3 insertions(+), 3 deletions(-) diff --git a/gguf-py/gguf/gguf_writer.py b/gguf-py/gguf/gguf_writer.py index 75fb6976f9ca28..c3b8c588f17cdb 100644 --- a/gguf-py/gguf/gguf_writer.py +++ b/gguf-py/gguf/gguf_writer.py @@ -57,9 +57,9 @@ def __init__( self.endianess = endianess self.offset_tensor = 0 self.data_alignment = GGUF_DEFAULT_ALIGNMENT - self.kv_data = b"" + self.kv_data = bytearray() self.kv_data_count = 0 - self.ti_data = b"" + self.ti_data = bytearray() self.ti_data_count = 0 self.use_temp_file = use_temp_file self.temp_file = None diff --git a/gguf-py/pyproject.toml b/gguf-py/pyproject.toml index e21c3cd94f22ae..af777c3e0f2b6c 100644 --- a/gguf-py/pyproject.toml +++ b/gguf-py/pyproject.toml @@ -1,6 +1,6 @@ [tool.poetry] name = "gguf" -version = "0.5.1" +version = "0.5.2" description = "Read and write ML models in GGUF for GGML" authors = ["GGML "] packages = [ From bb50a792ec2a49944470c82694fa364345e95170 Mon Sep 17 00:00:00 2001 From: Kerfuffle <44031344+KerfuffleV2@users.noreply.github.com> Date: Mon, 13 Nov 2023 01:58:15 -0700 Subject: [PATCH 087/206] Add ReLU and SQR CUDA ops to (partially) fix Persimmon offloading (#4041) * Add ReLU and SQR CUDA ops to fix Persimmon offloading * Persimmon loader: More helpful error on CUDA/ROCM when offloading too many layers --- ggml-cuda.cu | 72 ++++++++++++++++++++++++++++++++++++++++++++++++++++ llama.cpp | 7 +++++ 2 files changed, 79 insertions(+) diff --git a/ggml-cuda.cu b/ggml-cuda.cu index f87f18802c8f87..8d03ba6641981f 100644 --- a/ggml-cuda.cu +++ b/ggml-cuda.cu @@ -433,6 +433,8 @@ static_assert(sizeof(block_q6_K) == sizeof(ggml_fp16_t) + 13*QK_K/16, "wrong q6_ #define CUDA_MUL_BLOCK_SIZE 256 #define CUDA_GELU_BLOCK_SIZE 256 #define CUDA_SILU_BLOCK_SIZE 256 +#define CUDA_RELU_BLOCK_SIZE 256 +#define CUDA_SQR_BLOCK_SIZE 256 #define CUDA_CPY_BLOCK_SIZE 32 #define CUDA_SCALE_BLOCK_SIZE 256 #define CUDA_CLAMP_BLOCK_SIZE 256 @@ -553,6 +555,24 @@ static __global__ void silu_f32(const float * x, float * dst, const int k) { dst[i] = x[i] / (1.0f + expf(-x[i])); } +static __global__ void relu_f32(const float * x, float * dst, const int k) { + const int i = blockDim.x*blockIdx.x + threadIdx.x; + + if (i >= k) { + return; + } + dst[i] = fmaxf(x[i], 0); +} + +static __global__ void sqr_f32(const float * x, float * dst, const int k) { + const int i = blockDim.x*blockIdx.x + threadIdx.x; + + if (i >= k) { + return; + } + dst[i] = x[i] * x[i]; +} + static __device__ __forceinline__ float2 warp_reduce_sum(float2 a) { #pragma unroll for (int mask = 16; mask > 0; mask >>= 1) { @@ -4759,6 +4779,16 @@ static void silu_f32_cuda(const float * x, float * dst, const int k, cudaStream_ silu_f32<<>>(x, dst, k); } +static void relu_f32_cuda(const float * x, float * dst, const int k, cudaStream_t stream) { + const int num_blocks = (k + CUDA_RELU_BLOCK_SIZE - 1) / CUDA_RELU_BLOCK_SIZE; + relu_f32<<>>(x, dst, k); +} + +static void sqr_f32_cuda(const float * x, float * dst, const int k, cudaStream_t stream) { + const int num_blocks = (k + CUDA_SQR_BLOCK_SIZE - 1) / CUDA_SQR_BLOCK_SIZE; + sqr_f32<<>>(x, dst, k); +} + static void norm_f32_cuda(const float * x, float * dst, const int ncols, const int nrows, cudaStream_t stream) { GGML_ASSERT(ncols % WARP_SIZE == 0); if (ncols < 1024) { @@ -6128,6 +6158,34 @@ inline void ggml_cuda_op_silu( (void) src1_dd; } +inline void ggml_cuda_op_relu( + const ggml_tensor * src0, const ggml_tensor * src1, ggml_tensor * dst, + const float * src0_dd, const float * src1_dd, float * dst_dd, const cudaStream_t & main_stream) { + + GGML_ASSERT(src0->type == GGML_TYPE_F32); + GGML_ASSERT( dst->type == GGML_TYPE_F32); + + relu_f32_cuda(src0_dd, dst_dd, ggml_nelements(src0), main_stream); + + (void) src1; + (void) dst; + (void) src1_dd; +} + +inline void ggml_cuda_op_sqr( + const ggml_tensor * src0, const ggml_tensor * src1, ggml_tensor * dst, + const float * src0_dd, const float * src1_dd, float * dst_dd, const cudaStream_t & main_stream) { + + GGML_ASSERT(src0->type == GGML_TYPE_F32); + GGML_ASSERT( dst->type == GGML_TYPE_F32); + + sqr_f32_cuda(src0_dd, dst_dd, ggml_nelements(src0), main_stream); + + (void) src1; + (void) dst; + (void) src1_dd; +} + inline void ggml_cuda_op_norm( const ggml_tensor * src0, const ggml_tensor * src1, ggml_tensor * dst, const float * src0_dd, const float * src1_dd, float * dst_dd, const cudaStream_t & main_stream) { @@ -7160,6 +7218,14 @@ static void ggml_cuda_silu(const ggml_tensor * src0, const ggml_tensor * src1, g ggml_cuda_op_flatten(src0, src1, dst, ggml_cuda_op_silu); } +static void ggml_cuda_relu(const ggml_tensor * src0, const ggml_tensor * src1, ggml_tensor * dst) { + ggml_cuda_op_flatten(src0, src1, dst, ggml_cuda_op_relu); +} + +static void ggml_cuda_sqr(const ggml_tensor * src0, const ggml_tensor * src1, ggml_tensor * dst) { + ggml_cuda_op_flatten(src0, src1, dst, ggml_cuda_op_sqr); +} + static void ggml_cuda_norm(const ggml_tensor * src0, const ggml_tensor * src1, ggml_tensor * dst) { ggml_cuda_op_flatten(src0, src1, dst, ggml_cuda_op_norm); } @@ -7891,6 +7957,9 @@ bool ggml_cuda_compute_forward(struct ggml_compute_params * params, struct ggml_ case GGML_UNARY_OP_SILU: func = ggml_cuda_silu; break; + case GGML_UNARY_OP_RELU: + func = ggml_cuda_relu; + break; default: return false; } break; @@ -7909,6 +7978,9 @@ bool ggml_cuda_compute_forward(struct ggml_compute_params * params, struct ggml_ case GGML_OP_SCALE: func = ggml_cuda_scale; break; + case GGML_OP_SQR: + func = ggml_cuda_sqr; + break; case GGML_OP_CLAMP: if (!any_on_device) { return false; diff --git a/llama.cpp b/llama.cpp index d682d2864d2836..a5f3876cc19e0c 100644 --- a/llama.cpp +++ b/llama.cpp @@ -2877,6 +2877,13 @@ static void llm_load_tensors( ggml_backend_type backend_output; if (n_gpu_layers > int(n_layer)) { +#ifdef GGML_USE_CUBLAS + if (n_gpu_layers > int(n_layer + 1)) { + LLAMA_LOG_ERROR("%s: CUDA backend missing Persimmon CUDA ops, can offload at most %ld layers. See: https://github.com/ggerganov/llama.cpp/issues/4038\n", + __func__, n_layer + 1); + throw std::runtime_error("Persimmon CUDA offload failed"); + } +#endif // norm is not performance relevant on its own but keeping it in VRAM reduces data copying // on Windows however this is detrimental unless everything is on the GPU #ifndef _WIN32 From 4760e7cc0b68570d58f55e8dda469805d1759d0d Mon Sep 17 00:00:00 2001 From: Georgi Gerganov Date: Mon, 13 Nov 2023 14:16:23 +0200 Subject: [PATCH 088/206] sync : ggml (backend v2) (#3912) * sync : ggml (backend v2) (wip) * sync : migrate examples and llama.cpp to dynamic graphs (wip) * sync : update tests + fix max op params to 64 ggml-ci * sync : ggml-cuda ggml-ci * llama : fix save/load state context size ggml-ci * sync : try to fix build on tvOS * sync : pass custom graph sizes in training examples * sync : update graph copies to new ggml API * sync : update sync-ggml.sh with new files * scripts : fix header in sync script * train : fix context size calculations * llama : increase inference graph size up to 4096 nodes * train : allocate grads for backward graphs * train : allocate grads for gb_tmp --- common/train.cpp | 1 + common/train.h | 2 + examples/benchmark/benchmark-matmult.cpp | 21 +- examples/export-lora/export-lora.cpp | 4 +- examples/finetune/finetune.cpp | 23 +- examples/llava/clip.cpp | 2 +- examples/metal/metal.cpp | 10 +- .../train-text-from-scratch.cpp | 23 +- ggml-alloc.c | 586 +++++---- ggml-alloc.h | 84 +- ggml-backend-impl.h | 87 ++ ggml-backend.c | 591 +++++++++- ggml-backend.h | 147 ++- ggml-cuda.cu | 16 +- ggml-impl.h | 14 +- ggml-metal.m | 25 +- ggml.c | 1047 ++++++++++------- ggml.h | 89 +- llama.cpp | 40 +- scripts/sync-ggml.sh | 12 +- tests/test-grad0.cpp | 7 +- tests/test-opt.cpp | 11 +- 22 files changed, 1986 insertions(+), 856 deletions(-) create mode 100644 ggml-backend-impl.h diff --git a/common/train.cpp b/common/train.cpp index bc15b7a03c0cd4..964b156b5abe4e 100644 --- a/common/train.cpp +++ b/common/train.cpp @@ -32,6 +32,7 @@ struct train_state * init_train_state() { state->opt = new struct ggml_opt_context; state->opt->ctx = NULL; state->opt->params = ggml_opt_default_params(GGML_OPT_ADAM); + state->opt->params.graph_size = LLAMA_TRAIN_MAX_NODES; state->opt->loss_after = 0.0f; return state; diff --git a/common/train.h b/common/train.h index d86c93cc4f1472..263d940c042985 100644 --- a/common/train.h +++ b/common/train.h @@ -9,6 +9,8 @@ #include "ggml.h" #include "llama.h" +#define LLAMA_TRAIN_MAX_NODES 16384 + typedef std::string mt19937_state; struct train_state { diff --git a/examples/benchmark/benchmark-matmult.cpp b/examples/benchmark/benchmark-matmult.cpp index 76e3f57ccce8e0..284733b1035c96 100644 --- a/examples/benchmark/benchmark-matmult.cpp +++ b/examples/benchmark/benchmark-matmult.cpp @@ -171,7 +171,8 @@ int main(int argc, char ** argv) { struct ggml_tensor * m11xm2 = ggml_mul_mat(ctx, m11, m2); // printf("Creating compute graph\n"); - struct ggml_cgraph gf = ggml_build_forward(m11xm2); + struct ggml_cgraph * gf = ggml_new_graph(ctx); + ggml_build_forward_expand(gf, m11xm2); printf("n_threads=%i\n", benchmark_params.n_threads); @@ -180,9 +181,9 @@ int main(int argc, char ** argv) { std::vector work_buffer; - ggml_graph_compute_helper(work_buffer, &gf, benchmark_params.n_threads); + ggml_graph_compute_helper(work_buffer, gf, benchmark_params.n_threads); - TENSOR_DUMP(gf.nodes[0]); + TENSOR_DUMP(gf->nodes[0]); printf("\n------ Test 2 - Matrix Mult via %s code\n", ggml_type_name(qtype)); @@ -200,7 +201,8 @@ int main(int argc, char ** argv) { struct ggml_tensor * q31 = ggml_mul_mat(ctx, q11, m2); // printf("Creating compute graph\n"); - struct ggml_cgraph gf31 = ggml_build_forward(q31); + struct ggml_cgraph * gf31 = ggml_new_graph(ctx); + ggml_build_forward_expand(gf31, q31); // Set up a second graph computation to make sure we override the CPU cache lines // printf("Creating new tensor q12 & Running quantize\n"); @@ -211,7 +213,8 @@ int main(int argc, char ** argv) { struct ggml_tensor * q32 = ggml_mul_mat(ctx, q12, m2); //printf("Creating compute graph\n"); - struct ggml_cgraph gf32 = ggml_build_forward(q32); + struct ggml_cgraph * gf32 = ggml_new_graph(ctx); + ggml_build_forward_expand(gf32, q32); printf("n_threads=%i\n", benchmark_params.n_threads); const int dimx = sizex; @@ -223,7 +226,7 @@ int main(int argc, char ** argv) { // Let's use the F32 result from above as a reference for the quantized multiplication - float sum_of_F32_reference = tensor_sum_elements(gf.nodes[0]); + float sum_of_F32_reference = tensor_sum_elements(gf->nodes[0]); printf("Iteration;NThreads; SizeX; SizeY; SizeZ; Required_FLOPS; Elapsed_u_Seconds; gigaFLOPS\n"); printf("=====================================================================================\n"); @@ -233,7 +236,7 @@ int main(int argc, char ** argv) { long long int start = ggml_time_us(); //printf("Running ggml_graph_compute\n"); - ggml_graph_compute_helper(work_buffer, &gf31, benchmark_params.n_threads); + ggml_graph_compute_helper(work_buffer, gf31, benchmark_params.n_threads); long long int stop = ggml_time_us(); long long int usec = stop-start; @@ -251,7 +254,7 @@ int main(int argc, char ** argv) { // Check that the matrix multiplication result is in the right ballpark // We cannot use the exact value from the F32 multiplication because the quantizuation will be slightly different - float sum_of_Q4_result = tensor_sum_elements(gf31.nodes[0]); + float sum_of_Q4_result = tensor_sum_elements(gf31->nodes[0]); float delta = std::abs(sum_of_Q4_result - sum_of_F32_reference); float allowed_delta = (sum_of_F32_reference) / 1000 / 1000; // Let's accept an epsilon of 10^-6 @@ -266,7 +269,7 @@ int main(int argc, char ** argv) { } // Running a different graph computation to make sure we override the CPU cache lines - ggml_graph_compute_helper(work_buffer, &gf32, benchmark_params.n_threads); + ggml_graph_compute_helper(work_buffer, gf32, benchmark_params.n_threads); } printf("\n"); printf("Average%78.2f\n",gflops_sum/((double)benchmark_params.n_iterations)); diff --git a/examples/export-lora/export-lora.cpp b/examples/export-lora/export-lora.cpp index d803cfd5cb2d5e..c8754ce70f37df 100644 --- a/examples/export-lora/export-lora.cpp +++ b/examples/export-lora/export-lora.cpp @@ -240,7 +240,7 @@ static struct lora_data * load_lora(struct lora_info * info) { } struct ggml_init_params params_ggml; - params_ggml.mem_size = ggml_tensor_overhead() * GGML_MAX_NODES; + params_ggml.mem_size = ggml_tensor_overhead() * GGML_DEFAULT_GRAPH_SIZE; params_ggml.mem_buffer = NULL; params_ggml.no_alloc = true; result->ctx = ggml_init(params_ggml); @@ -334,7 +334,7 @@ static bool apply_lora(struct ggml_tensor * tensor, struct lora_data * lora, int float scaling = lora->info.scale * (float)lora->lora_alpha / (float)lora->lora_r; struct ggml_init_params params; - params.mem_size = GGML_OBJECT_SIZE + GGML_GRAPH_SIZE + ggml_tensor_overhead()*4 + GGML_MEM_ALIGN*5; + params.mem_size = GGML_OBJECT_SIZE + ggml_graph_overhead() + ggml_tensor_overhead()*4 + GGML_MEM_ALIGN*5; params.mem_buffer = NULL; params.no_alloc = true; struct ggml_context * ctx = NULL; diff --git a/examples/finetune/finetune.cpp b/examples/finetune/finetune.cpp index fa7dbe496b2c51..5a6cf22ce1b951 100644 --- a/examples/finetune/finetune.cpp +++ b/examples/finetune/finetune.cpp @@ -772,7 +772,7 @@ static struct ggml_tensor * llama_build_lora_finetune_graphs( if (enable_checkpointing) { ggml_build_backward_gradient_checkpointing(ctx, gf, gb, gb_tmp, checkpoints.data(), (int) checkpoints.size()); } else { - *gb = *gf; + ggml_graph_cpy(gf, gb); ggml_build_backward_expand(ctx, gf, gb, true); } @@ -1615,6 +1615,7 @@ int main(int argc, char ** argv) { opt->params = ggml_opt_default_params(GGML_OPT_ADAM); opt->params.print_forward_graph = false; opt->params.print_backward_graph = false; + opt->params.graph_size = LLAMA_TRAIN_MAX_NODES; opt->params.n_threads = params.common.n_threads; opt->params.past = params.common.opt_past; opt->params.delta = params.common.opt_delta; @@ -1741,11 +1742,9 @@ int main(int argc, char ** argv) { ggml_allocr_free(alloc); // context for compute tensors without their data - size_t estimated_compute_size_wo_data = ( - ggml_tensor_overhead()*GGML_MAX_NODES*2 - + (GGML_OBJECT_SIZE+GGML_GRAPH_SIZE)*( - params.common.use_checkpointing ? 3 : 2 - ) + const size_t estimated_compute_size_wo_data = ( + 2*LLAMA_TRAIN_MAX_NODES*ggml_tensor_overhead() + + (params.common.use_checkpointing ? 3 : 2)*(GGML_OBJECT_SIZE+ggml_graph_overhead_custom(LLAMA_TRAIN_MAX_NODES, true)) ); struct ggml_init_params ctx_compute_params = { estimated_compute_size_wo_data, // mem_size @@ -1768,11 +1767,11 @@ int main(int argc, char ** argv) { for (unsigned order = 0; order < (unsigned) GGML_CGRAPH_EVAL_ORDER_COUNT; ++order) { ctx_compute = ggml_init(ctx_compute_params); alloc = ggml_allocr_new_measure(tensor_alignment); - gf = ggml_new_graph(ctx_compute); + gf = ggml_new_graph_custom(ctx_compute, LLAMA_TRAIN_MAX_NODES, true); gf->order = (enum ggml_cgraph_eval_order) order; - gb = ggml_new_graph(ctx_compute); + gb = ggml_new_graph_custom(ctx_compute, LLAMA_TRAIN_MAX_NODES, true); gb_tmp = params.common.use_checkpointing - ? ggml_new_graph(ctx_compute) + ? ggml_new_graph_custom(ctx_compute, LLAMA_TRAIN_MAX_NODES, true) : NULL; loss = llama_build_lora_finetune_graphs( &model, &lora, alloc, ctx_compute, @@ -1801,11 +1800,11 @@ int main(int argc, char ** argv) { mem_compute_data.resize(max_compute_size); ctx_compute = ggml_init(ctx_compute_params); alloc = ggml_allocr_new(mem_compute_data.data(), mem_compute_data.size(), tensor_alignment); - gf = ggml_new_graph(ctx_compute); + gf = ggml_new_graph_custom(ctx_compute, LLAMA_TRAIN_MAX_NODES, true); gf->order = best_order; - gb = ggml_new_graph(ctx_compute); + gb = ggml_new_graph_custom(ctx_compute, LLAMA_TRAIN_MAX_NODES, true); gb_tmp = params.common.use_checkpointing - ? ggml_new_graph(ctx_compute) + ? ggml_new_graph_custom(ctx_compute, LLAMA_TRAIN_MAX_NODES, true) : NULL; loss = llama_build_lora_finetune_graphs( &model, &lora, alloc, ctx_compute, diff --git a/examples/llava/clip.cpp b/examples/llava/clip.cpp index 3c909c7d3c6ab2..c26ee4957090c8 100644 --- a/examples/llava/clip.cpp +++ b/examples/llava/clip.cpp @@ -664,7 +664,7 @@ struct clip_ctx * clip_model_load(const char * fname, const int verbosity = 1) { // measure mem requirement and allocate { static const size_t tensor_alignment = 32; - new_clip->buf_compute.resize(ggml_tensor_overhead()*GGML_MAX_NODES + ggml_graph_overhead()); + new_clip->buf_compute.resize(ggml_tensor_overhead()*GGML_DEFAULT_GRAPH_SIZE + ggml_graph_overhead()); new_clip->alloc = ggml_allocr_new_measure(tensor_alignment); clip_image_f32_batch batch; batch.size = 1; diff --git a/examples/metal/metal.cpp b/examples/metal/metal.cpp index c05a4fa933d316..16c1146f94e33f 100644 --- a/examples/metal/metal.cpp +++ b/examples/metal/metal.cpp @@ -34,7 +34,7 @@ int main(int argc, char ** argv) { struct ggml_context * ctx_data = NULL; struct ggml_context * ctx_eval = NULL; - struct ggml_cgraph gf = ggml_graph_import(fname_cgraph, &ctx_data, &ctx_eval); + struct ggml_cgraph * gf = ggml_graph_import(fname_cgraph, &ctx_data, &ctx_eval); // this allocates all Metal resources and memory buffers auto * ctx_metal = ggml_metal_init(1); @@ -46,13 +46,13 @@ int main(int argc, char ** argv) { // main { - struct ggml_tensor * input = ggml_graph_get_tensor(&gf, "embd"); + struct ggml_tensor * input = ggml_graph_get_tensor(gf, "embd"); *(int32_t *) input->data = 1; // BOS ggml_metal_set_tensor(ctx_metal, input); // warmup - ggml_metal_graph_compute(ctx_metal, &gf); + ggml_metal_graph_compute(ctx_metal, gf); const int n_iter = 16; @@ -60,7 +60,7 @@ int main(int argc, char ** argv) { // the actual inference happens here for (int i = 0; i < n_iter; ++i) { - ggml_metal_graph_compute(ctx_metal, &gf); + ggml_metal_graph_compute(ctx_metal, gf); } const int64_t t1 = ggml_time_us(); @@ -70,7 +70,7 @@ int main(int argc, char ** argv) { // debug output { - struct ggml_tensor * logits = gf.nodes[gf.n_nodes - 1]; + struct ggml_tensor * logits = gf->nodes[gf->n_nodes - 1]; ggml_metal_get_tensor(ctx_metal, logits); float * ptr = (float *) ggml_get_data(logits); diff --git a/examples/train-text-from-scratch/train-text-from-scratch.cpp b/examples/train-text-from-scratch/train-text-from-scratch.cpp index 2a257e63215e3c..f049a3923669b1 100644 --- a/examples/train-text-from-scratch/train-text-from-scratch.cpp +++ b/examples/train-text-from-scratch/train-text-from-scratch.cpp @@ -436,7 +436,7 @@ static struct ggml_tensor * llama_build_train_graphs( if (enable_checkpointing) { ggml_build_backward_gradient_checkpointing(ctx, gf, gb, gb_tmp, checkpoints.data(), (int) checkpoints.size()); } else { - *gb = *gf; + ggml_graph_cpy(gf, gb); ggml_build_backward_expand(ctx, gf, gb, true); } @@ -1006,6 +1006,7 @@ int main(int argc, char ** argv) { opt->params = ggml_opt_default_params(GGML_OPT_ADAM); opt->params.print_forward_graph = false; opt->params.print_backward_graph = false; + opt->params.graph_size = LLAMA_TRAIN_MAX_NODES; opt->params.n_threads = params.common.n_threads; opt->params.past = params.common.opt_past; opt->params.delta = params.common.opt_delta; @@ -1108,11 +1109,9 @@ int main(int argc, char ** argv) { ggml_allocr_free(alloc); // context for compute tensors without their data - size_t estimated_compute_size_wo_data = ( - ggml_tensor_overhead()*GGML_MAX_NODES*2 - + (GGML_OBJECT_SIZE+GGML_GRAPH_SIZE)*( - params.common.use_checkpointing ? 3 : 2 - ) + const size_t estimated_compute_size_wo_data = ( + 2*LLAMA_TRAIN_MAX_NODES*ggml_tensor_overhead() + + (params.common.use_checkpointing ? 3 : 2)*(GGML_OBJECT_SIZE+ggml_graph_overhead_custom(LLAMA_TRAIN_MAX_NODES, true)) ); struct ggml_init_params ctx_compute_params = { estimated_compute_size_wo_data, // mem_size @@ -1135,11 +1134,11 @@ int main(int argc, char ** argv) { for (unsigned order = 0; order < (unsigned) GGML_CGRAPH_EVAL_ORDER_COUNT; ++order) { ctx_compute = ggml_init(ctx_compute_params); alloc = ggml_allocr_new_measure(tensor_alignment); - gf = ggml_new_graph(ctx_compute); + gf = ggml_new_graph_custom(ctx_compute, LLAMA_TRAIN_MAX_NODES, true); gf->order = (enum ggml_cgraph_eval_order) order; - gb = ggml_new_graph(ctx_compute); + gb = ggml_new_graph_custom(ctx_compute, LLAMA_TRAIN_MAX_NODES, true); gb_tmp = params.common.use_checkpointing - ? ggml_new_graph(ctx_compute) + ? ggml_new_graph_custom(ctx_compute, LLAMA_TRAIN_MAX_NODES, true) : NULL; loss = llama_build_train_graphs( &model, alloc, ctx_compute, @@ -1168,11 +1167,11 @@ int main(int argc, char ** argv) { mem_compute_data.resize(max_compute_size); ctx_compute = ggml_init(ctx_compute_params); alloc = ggml_allocr_new(mem_compute_data.data(), mem_compute_data.size(), tensor_alignment); - gf = ggml_new_graph(ctx_compute); + gf = ggml_new_graph_custom(ctx_compute, LLAMA_TRAIN_MAX_NODES, true); gf->order = best_order; - gb = ggml_new_graph(ctx_compute); + gb = ggml_new_graph_custom(ctx_compute, LLAMA_TRAIN_MAX_NODES, true); gb_tmp = params.common.use_checkpointing - ? ggml_new_graph(ctx_compute) + ? ggml_new_graph_custom(ctx_compute, LLAMA_TRAIN_MAX_NODES, true) : NULL; loss = llama_build_train_graphs( &model, alloc, ctx_compute, diff --git a/ggml-alloc.c b/ggml-alloc.c index b553eb7c132719..cdfe4caf69613d 100644 --- a/ggml-alloc.c +++ b/ggml-alloc.c @@ -1,51 +1,21 @@ #include "ggml-alloc.h" -#include "ggml-backend.h" +#include "ggml-backend-impl.h" #include "ggml.h" +#include "ggml-impl.h" #include +#include #include #include #include #include - -#define UNUSED(x) (void)(x) #define MAX(a, b) ((a) > (b) ? (a) : (b)) -#define GGML_MAX_CONCUR (2*GGML_MAX_NODES) +#define MAX_FREE_BLOCKS 256 //#define GGML_ALLOCATOR_DEBUG -//#define AT_PRINTF printf -#define AT_PRINTF(...) ((void)0) - -struct hash_node { - struct ggml_tensor * t; - int n_children; - int n_views; -}; - -static size_t hash(void * p) { - return (size_t)p % GGML_GRAPH_HASHTABLE_SIZE; -} - -static struct hash_node * hash_get(struct hash_node hash_table[], struct ggml_tensor * t) { - size_t h = hash(t); - - // linear probing - size_t i = h; - while (hash_table[i].t != NULL) { - if (hash_table[i].t == t) { - return &hash_table[i]; - } - i = (i + 1) % GGML_GRAPH_HASHTABLE_SIZE; - if (i == h) { - // hash table is full - GGML_ASSERT(false); - } - } - - hash_table[i].t = t; - return &hash_table[i]; -} +//#define AT_PRINTF(...) fprintf(stderr, __VA_ARGS__) +#define AT_PRINTF(...) // TODO: GGML_PAD ? static size_t aligned_offset(const void * buffer, size_t offset, size_t alignment) { @@ -59,20 +29,18 @@ struct free_block { size_t size; }; -#define MAX_FREE_BLOCKS 256 - -struct ggml_allocr { +struct ggml_tallocr { struct ggml_backend_buffer * buffer; bool buffer_owned; - void * data; + void * base; size_t alignment; + int n_free_blocks; struct free_block free_blocks[MAX_FREE_BLOCKS]; - struct hash_node hash_table[GGML_GRAPH_HASHTABLE_SIZE]; + size_t max_size; + bool measure; - int parse_seq[GGML_MAX_CONCUR]; - int parse_seq_len; #ifdef GGML_ALLOCATOR_DEBUG struct ggml_tensor * allocated_tensors[1024]; @@ -80,7 +48,7 @@ struct ggml_allocr { }; #ifdef GGML_ALLOCATOR_DEBUG -static void add_allocated_tensor(struct ggml_allocr * alloc, struct ggml_tensor * tensor) { +static void add_allocated_tensor(ggml_tallocr_t alloc, struct ggml_tensor * tensor) { for (int i = 0; i < 1024; i++) { if (alloc->allocated_tensors[i] == NULL) { alloc->allocated_tensors[i] = tensor; @@ -89,7 +57,7 @@ static void add_allocated_tensor(struct ggml_allocr * alloc, struct ggml_tensor } GGML_ASSERT(!"out of allocated_tensors"); } -static void remove_allocated_tensor(struct ggml_allocr * alloc, struct ggml_tensor * tensor) { +static void remove_allocated_tensor(ggml_tallocr_t alloc, struct ggml_tensor * tensor) { for (int i = 0; i < 1024; i++) { if (alloc->allocated_tensors[i] == tensor || (alloc->allocated_tensors[i] != NULL && alloc->allocated_tensors[i]->data == tensor->data)) { @@ -103,7 +71,7 @@ static void remove_allocated_tensor(struct ggml_allocr * alloc, struct ggml_tens #endif // check if a tensor is allocated by this buffer -static bool ggml_allocr_is_own(struct ggml_allocr * alloc, const struct ggml_tensor * tensor) { +static bool ggml_tallocr_is_own(ggml_tallocr_t alloc, const struct ggml_tensor * tensor) { return tensor->buffer == alloc->buffer; } @@ -111,7 +79,7 @@ static bool ggml_is_view(struct ggml_tensor * t) { return t->view_src != NULL; } -void ggml_allocr_alloc(struct ggml_allocr * alloc, struct ggml_tensor * tensor) { +void ggml_tallocr_alloc(ggml_tallocr_t alloc, struct ggml_tensor * tensor) { GGML_ASSERT(!ggml_is_view(tensor)); // views generally get data pointer from one of their sources GGML_ASSERT(tensor->data == NULL); // avoid allocating tensor which already has memory allocated @@ -162,9 +130,10 @@ void ggml_allocr_alloc(struct ggml_allocr * alloc, struct ggml_tensor * tensor) } tensor->data = addr; - AT_PRINTF("%s: allocated data at %p\n", __func__, tensor->data); tensor->buffer = alloc->buffer; - ggml_backend_buffer_init_tensor(alloc->buffer, tensor); + if (!alloc->measure) { + ggml_backend_buffer_init_tensor(alloc->buffer, tensor); + } #ifdef GGML_ALLOCATOR_DEBUG add_allocated_tensor(alloc, tensor); @@ -180,16 +149,16 @@ void ggml_allocr_alloc(struct ggml_allocr * alloc, struct ggml_tensor * tensor) } #endif - alloc->max_size = MAX(alloc->max_size, (char*)addr - (char*)alloc->data + size); + alloc->max_size = MAX(alloc->max_size, (char*)addr - (char*)alloc->base + size); } // this is a very naive implementation, but for our case the number of free blocks should be very small -static void ggml_allocr_free_tensor(struct ggml_allocr * alloc, struct ggml_tensor * tensor) { - if (ggml_allocr_is_own(alloc, tensor) == false) { +static void ggml_tallocr_free_tensor(ggml_tallocr_t alloc, struct ggml_tensor * tensor) { + if (ggml_tallocr_is_own(alloc, tensor) == false) { // the tensor was not allocated in this buffer // this can happen because the graph allocator will try to free weights and other tensors from different buffers // the easiest way to deal with this is just to ignore it - AT_PRINTF("ignoring %s (their buffer: %p, our buffer: %p)\n", tensor->name, (void *)tensor->buffer, (void *)alloc->buffer); + // AT_PRINTF("ignoring %s (their buffer: %p, our buffer: %p)\n", tensor->name, (void *)tensor->buffer, (void *)alloc->buffer); return; } @@ -199,7 +168,9 @@ static void ggml_allocr_free_tensor(struct ggml_allocr * alloc, struct ggml_tens size = aligned_offset(NULL, size, alloc->alignment); AT_PRINTF("%s: freeing %s at %p (%zu bytes) - n_free_blocks = %d\n", __func__, tensor->name, ptr, size, alloc->n_free_blocks); - ggml_backend_buffer_free_tensor(alloc->buffer, tensor); + if (!alloc->measure) { + ggml_backend_buffer_free_tensor(alloc->buffer, tensor); + } #ifdef GGML_ALLOCATOR_DEBUG remove_allocated_tensor(alloc, tensor); @@ -253,91 +224,180 @@ static void ggml_allocr_free_tensor(struct ggml_allocr * alloc, struct ggml_tens alloc->n_free_blocks++; } -void ggml_allocr_set_parse_seq(struct ggml_allocr * alloc, const int * list, int n) { - for (int i = 0; i < n; i++) { - alloc->parse_seq[i] = list[i]; - } - alloc->parse_seq_len = n; -} - -void ggml_allocr_reset(struct ggml_allocr * alloc) { +void ggml_tallocr_reset(ggml_tallocr_t alloc) { alloc->n_free_blocks = 1; - size_t align_offset = aligned_offset(alloc->data, 0, alloc->alignment); - alloc->free_blocks[0].addr = (char *)alloc->data + align_offset; - alloc->free_blocks[0].size = ggml_backend_buffer_get_size(alloc->buffer) - align_offset; + size_t align_offset = aligned_offset(alloc->base, 0, alloc->alignment); + alloc->free_blocks[0].addr = (char *)alloc->base + align_offset; + + if (alloc->measure) { + alloc->free_blocks[0].size = SIZE_MAX/2; // restrict maximum size of a measure allocator to half size_t max to avoid overflows + } else { + alloc->free_blocks[0].size = ggml_backend_buffer_get_size(alloc->buffer) - align_offset; + } } -struct ggml_allocr * ggml_allocr_new(void * data, size_t size, size_t alignment) { +ggml_tallocr_t ggml_tallocr_new(void * data, size_t size, size_t alignment) { struct ggml_backend_buffer * buffer = ggml_backend_cpu_buffer_from_ptr(NULL, data, size); - struct ggml_allocr * alloc = (struct ggml_allocr *)malloc(sizeof(struct ggml_allocr)); + ggml_tallocr_t alloc = (ggml_tallocr_t)malloc(sizeof(struct ggml_tallocr)); - *alloc = (struct ggml_allocr){ + *alloc = (struct ggml_tallocr) { /*.buffer = */ buffer, /*.buffer_owned = */ true, /*.base = */ ggml_backend_buffer_get_base(buffer), /*.alignment = */ alignment, /*.n_free_blocks = */ 0, /*.free_blocks = */ {{0}}, - /*.hash_table = */ {{0}}, /*.max_size = */ 0, /*.measure = */ false, - /*.parse_seq = */ {0}, - /*.parse_seq_len = */ 0, #ifdef GGML_ALLOCATOR_DEBUG /*.allocated_tensors = */ {0}, #endif }; - ggml_allocr_reset(alloc); + ggml_tallocr_reset(alloc); + + return alloc; +} + +ggml_tallocr_t ggml_tallocr_new_measure(size_t alignment) { + ggml_tallocr_t alloc = ggml_tallocr_new((void *)0x1000, SIZE_MAX/2, alignment); + alloc->measure = true; return alloc; } -struct ggml_allocr * ggml_allocr_new_measure(size_t alignment) { - struct ggml_allocr * alloc = ggml_allocr_new((void *)0x1000, (size_t)-0x1001, alignment); +ggml_tallocr_t ggml_tallocr_new_measure_from_backend(struct ggml_backend * backend) { + // create a backend buffer to get the correct tensor allocation sizes + ggml_backend_buffer_t buffer = ggml_backend_alloc_buffer(backend, 1); + + // TODO: move alloc initialization to a common ggml_tallocr_new_impl function + ggml_tallocr_t alloc = ggml_tallocr_new_from_buffer(buffer); + alloc->buffer_owned = true; alloc->measure = true; + ggml_tallocr_reset(alloc); + return alloc; +} +ggml_tallocr_t ggml_tallocr_new_from_backend(struct ggml_backend * backend, size_t size) { + ggml_backend_buffer_t buffer = ggml_backend_alloc_buffer(backend, size); + ggml_tallocr_t alloc = ggml_tallocr_new_from_buffer(buffer); + alloc->buffer_owned = true; return alloc; } -struct ggml_allocr * ggml_allocr_new_from_buffer(struct ggml_backend_buffer * buffer) { - struct ggml_allocr * alloc = (struct ggml_allocr *)malloc(sizeof(struct ggml_allocr)); +ggml_tallocr_t ggml_tallocr_new_from_buffer(struct ggml_backend_buffer * buffer) { + ggml_tallocr_t alloc = (ggml_tallocr_t)malloc(sizeof(struct ggml_tallocr)); - *alloc = (struct ggml_allocr){ + *alloc = (struct ggml_tallocr) { /*.buffer = */ buffer, /*.buffer_owned = */ false, /*.base = */ ggml_backend_buffer_get_base(buffer), /*.alignment = */ ggml_backend_buffer_get_alignment(buffer), /*.n_free_blocks = */ 0, /*.free_blocks = */ {{0}}, - /*.hash_table = */ {{0}}, /*.max_size = */ 0, /*.measure = */ false, - /*.parse_seq = */ {0}, - /*.parse_seq_len = */ 0, #ifdef GGML_ALLOCATOR_DEBUG /*.allocated_tensors = */ {0}, #endif }; - ggml_allocr_reset(alloc); + ggml_tallocr_reset(alloc); return alloc; } -void ggml_allocr_free(struct ggml_allocr * alloc) { +struct ggml_backend_buffer * ggml_tallocr_get_buffer(ggml_tallocr_t alloc) { + return alloc->buffer; +} + +void ggml_tallocr_free(ggml_tallocr_t alloc) { + if (alloc == NULL) { + return; + } + if (alloc->buffer_owned) { ggml_backend_buffer_free(alloc->buffer); } free(alloc); } -bool ggml_allocr_is_measure(struct ggml_allocr * alloc) { +bool ggml_tallocr_is_measure(ggml_tallocr_t alloc) { return alloc->measure; } -//////////// compute graph allocator +size_t ggml_tallocr_max_size(ggml_tallocr_t alloc) { + return alloc->max_size; +} + +// graph allocator + +struct hash_node { + int n_children; + int n_views; +}; + +struct ggml_gallocr { + ggml_tallocr_t talloc; + struct ggml_hash_set hash_set; + struct hash_node * hash_values; + size_t hash_values_size; + ggml_tallocr_t * hash_allocs; + int * parse_seq; + int parse_seq_len; +}; + +ggml_gallocr_t ggml_gallocr_new(void) { + ggml_gallocr_t galloc = (ggml_gallocr_t)malloc(sizeof(struct ggml_gallocr)); + + *galloc = (struct ggml_gallocr) { + /*.talloc = */ NULL, + /*.hash_set = */ {0}, + /*.hash_values = */ NULL, + /*.hash_values_size = */ 0, + /*.hash_allocs = */ NULL, + /*.parse_seq = */ NULL, + /*.parse_seq_len = */ 0, + }; + + return galloc; +} + +void ggml_gallocr_free(ggml_gallocr_t galloc) { + if (galloc == NULL) { + return; + } + + if (galloc->hash_set.keys != NULL) { + free(galloc->hash_set.keys); + } + if (galloc->hash_values != NULL) { + free(galloc->hash_values); + } + if (galloc->hash_allocs != NULL) { + free(galloc->hash_allocs); + } + if (galloc->parse_seq != NULL) { + free(galloc->parse_seq); + } + free(galloc); +} + +void ggml_gallocr_set_parse_seq(ggml_gallocr_t galloc, const int * list, int n) { + free(galloc->parse_seq); + galloc->parse_seq = malloc(sizeof(int) * n); + + for (int i = 0; i < n; i++) { + galloc->parse_seq[i] = list[i]; + } + galloc->parse_seq_len = n; +} + +static struct hash_node * hash_get(ggml_gallocr_t galloc, struct ggml_tensor * t) { + size_t i = ggml_hash_find_or_insert(galloc->hash_set, t); + return &galloc->hash_values[i]; +} static bool ggml_are_same_layout(const struct ggml_tensor * a, const struct ggml_tensor * b) { if (a->type != b->type) { @@ -378,27 +438,40 @@ static bool ggml_op_can_inplace(enum ggml_op op) { } } -static void init_view(struct ggml_allocr * alloc, struct ggml_tensor * view, bool update_backend) { - assert(view->view_src != NULL && view->view_src->data != NULL); +static ggml_tallocr_t node_tallocr(ggml_gallocr_t galloc, struct ggml_tensor * node) { + if (galloc->talloc != NULL) { + return galloc->talloc; + } + + return galloc->hash_allocs[ggml_hash_find_or_insert(galloc->hash_set, node)]; +} + +static void init_view(ggml_gallocr_t galloc, struct ggml_tensor * view, bool update_backend) { + ggml_tallocr_t alloc = node_tallocr(galloc, view); + //printf("init_view: %s from src %s\n", view->name, view->view_src->name); + GGML_ASSERT(view->view_src != NULL && view->view_src->data != NULL); if (update_backend) { view->backend = view->view_src->backend; } - view->buffer = view->view_src->buffer; view->data = (char *)view->view_src->data + view->view_offs; // FIXME: the view should be initialized by the owning buffer, but currently this breaks the CUDA backend // due to the ggml_tensor_extra_gpu ring buffer overwriting the KV cache extras - assert(ggml_allocr_is_measure(alloc) || !view->buffer || view->buffer->backend == alloc->buffer->backend); - ggml_backend_buffer_init_tensor(alloc->buffer, view); + assert(ggml_tallocr_is_measure(alloc) || !view->buffer || view->buffer->backend == alloc->buffer->backend); + + if (!alloc->measure) { + ggml_backend_buffer_init_tensor(alloc->buffer, view); + } } -static void allocate_node(struct ggml_allocr * alloc, struct ggml_tensor * node) { - struct hash_node * ht = alloc->hash_table; +static void allocate_node(ggml_gallocr_t galloc, struct ggml_tensor * node) { + ggml_tallocr_t alloc = node_tallocr(galloc, node); + if (node->data == NULL) { if (ggml_is_view(node)) { - init_view(alloc, node, true); + init_view(galloc, node, true); } else { // see if we can reuse a parent's buffer (inplace) if (ggml_op_can_inplace(node->op)) { @@ -409,16 +482,16 @@ static void allocate_node(struct ggml_allocr * alloc, struct ggml_tensor * node) } // if the node's data is external, then we cannot re-use it - if (ggml_allocr_is_own(alloc, parent) == false) { + if (ggml_tallocr_is_own(alloc, parent) == false) { AT_PRINTF("not reusing parent %s for %s as %p is external\n", parent->name, node->name, parent->data); continue; } - struct hash_node * p_hn = hash_get(ht, parent); + struct hash_node * p_hn = hash_get(galloc, parent); if (parent->data != NULL && p_hn->n_children == 1 && p_hn->n_views == 0 && ggml_are_same_layout(node, parent)) { if (ggml_is_view(parent)) { struct ggml_tensor * view_src = parent->view_src; - struct hash_node * view_src_hn = hash_get(ht, view_src); + struct hash_node * view_src_hn = hash_get(galloc, view_src); if (view_src_hn->n_views == 1 && view_src_hn->n_children == 0 && view_src->data == parent->data) { // TODO: the offset of the view parent must be kept to ensure that the op doesn't overwrite // the parent's data that it will need later (same layout requirement). the problem is that then @@ -428,170 +501,267 @@ static void allocate_node(struct ggml_allocr * alloc, struct ggml_tensor * node) AT_PRINTF("reusing view parent %s (%s) for %s\n", parent->name, view_src->name, node->name); node->view_src = view_src; view_src_hn->n_views += 1; - init_view(alloc, node, false); + init_view(galloc, node, false); return; } } else { AT_PRINTF("reusing parent %s for %s\n", parent->name, node->name); node->view_src = parent; p_hn->n_views += 1; - init_view(alloc, node, false); + init_view(galloc, node, false); return; } } } } - ggml_allocr_alloc(alloc, node); + ggml_tallocr_alloc(alloc, node); } } } -size_t ggml_allocr_alloc_graph_n( - struct ggml_allocr * alloc, - struct ggml_cgraph ** graphs, int n_graphs, - struct ggml_tensor *** inputs, struct ggml_tensor *** outputs) { +static void free_node(ggml_gallocr_t galloc, struct ggml_tensor * node) { + ggml_tallocr_t alloc = node_tallocr(galloc, node); - // reset hash table - struct hash_node * ht = alloc->hash_table; - memset(ht, 0, sizeof(struct hash_node) * GGML_GRAPH_HASHTABLE_SIZE); + ggml_tallocr_free_tensor(alloc, node); +} + +static void ggml_tallocr_alloc_graph_impl(ggml_gallocr_t galloc, struct ggml_cgraph * gf) { + const int * parse_seq = galloc->parse_seq; + int parse_seq_len = galloc->parse_seq_len; // count number of children and views - for (int g = 0; g < n_graphs; g++) { - struct ggml_cgraph * gf = graphs[g]; - for (int i = 0; i < gf->n_nodes; i++) { + for (int i = 0; i < gf->n_nodes; i++) { + struct ggml_tensor * node = gf->nodes[i]; + + if (ggml_is_view(node)) { + struct ggml_tensor * view_src = node->view_src; + hash_get(galloc, view_src)->n_views += 1; + if (node->buffer == NULL && node->data != NULL) { + // view of a pre-allocated tensor, didn't call init_view() yet + init_view(galloc, node, true); + } + } + + for (int j = 0; j < GGML_MAX_SRC; j++) { + struct ggml_tensor * parent = node->src[j]; + if (parent == NULL) { + break; + } + hash_get(galloc, parent)->n_children += 1; + if (ggml_is_view(parent) && parent->buffer == NULL && parent->data != NULL) { + init_view(galloc, parent, true); + } + } + } + + // allocate tensors + // if we have parse_seq then we allocate nodes following the list, and we only free nodes at barriers + int last_barrier_pos = 0; + int n_nodes = parse_seq_len ? parse_seq_len : gf->n_nodes; + + for (int ind = 0; ind < n_nodes; ind++) { + // allocate a node if there is no parse_seq or this is not a barrier + if (parse_seq_len == 0 || parse_seq[ind] != -1) { + int i = parse_seq_len ? parse_seq[ind] : ind; struct ggml_tensor * node = gf->nodes[i]; - if (ggml_is_view(node)) { - struct ggml_tensor * view_src = node->view_src; - hash_get(ht, view_src)->n_views += 1; - if (node->buffer == NULL && node->data != NULL) { - // view of a pre-allocated tensor, didn't call init_view() yet - init_view(alloc, node, true); + // allocate parents (leafs) + for (int j = 0; j < GGML_MAX_SRC; j++) { + struct ggml_tensor * parent = node->src[j]; + if (parent == NULL) { + break; } + allocate_node(galloc, parent); } + // allocate node + allocate_node(galloc, node); + + AT_PRINTF("exec: %s (%s) <= ", ggml_op_name(node->op), node->name); for (int j = 0; j < GGML_MAX_SRC; j++) { struct ggml_tensor * parent = node->src[j]; if (parent == NULL) { break; } - hash_get(ht, parent)->n_children += 1; - if (ggml_is_view(parent) && parent->buffer == NULL && parent->data != NULL) { - init_view(alloc, parent, true); + AT_PRINTF("%s", parent->name); + if (j < GGML_MAX_SRC - 1 && node->src[j + 1] != NULL) { + AT_PRINTF(", "); } } + AT_PRINTF("\n"); } - } - - // allocate tensors - for (int g = 0; g < n_graphs; g++) { - struct ggml_cgraph * gf = graphs[g]; - AT_PRINTF("####### graph %d/%d\n", g, n_graphs); - // graph inputs are allocated first to ensure that they are not overwritten by each other - if (inputs != NULL && inputs[g] != NULL) { - for (int i = 0; inputs[g][i] != NULL; i++) { - struct ggml_tensor * input = inputs[g][i]; - AT_PRINTF("input: %s\n", input->name); - allocate_node(alloc, input); - } - } - // if we have parse_seq then we allocate nodes following the list, and we only free nodes at barriers - int last_barrier_pos = 0; - int n_nodes = alloc->parse_seq_len ? alloc->parse_seq_len : gf->n_nodes; - for (int ind = 0; ind < n_nodes; ind++) { - // allocate a node if there is no parse_seq or this is not a barrier - if ((alloc->parse_seq_len==0) || alloc->parse_seq[ind] != -1) { - int i = alloc->parse_seq_len ? alloc->parse_seq[ind] : ind; - struct ggml_tensor * node = gf->nodes[i]; + // update parents + // update immediately if there is no parse_seq + // update only at barriers if there is parse_seq + if ((parse_seq_len == 0) || parse_seq[ind] == -1) { + int update_start = parse_seq_len ? last_barrier_pos : ind; + int update_end = parse_seq_len ? ind : ind + 1; + for (int i = update_start; i < update_end; i++) { + int node_i = parse_seq_len ? parse_seq[i] : i; + struct ggml_tensor * node = gf->nodes[node_i]; - // allocate parents (leafs) for (int j = 0; j < GGML_MAX_SRC; j++) { struct ggml_tensor * parent = node->src[j]; if (parent == NULL) { break; } - allocate_node(alloc, parent); - } + struct hash_node * p_hn = hash_get(galloc, parent); + p_hn->n_children -= 1; - // allocate node - allocate_node(alloc, node); + //AT_PRINTF("parent %s: %d children, %d views\n", parent->name, parent->n_children, parent->n_views); - AT_PRINTF("exec: %s (%s) <= ", ggml_op_name(node->op), node->name); - for (int j = 0; j < GGML_MAX_SRC; j++) { - struct ggml_tensor * parent = node->src[j]; - if (parent == NULL) { - break; - } - AT_PRINTF("%s", parent->name); - if (j < GGML_MAX_SRC - 1 && node->src[j + 1] != NULL) { - AT_PRINTF(", "); - } - } - AT_PRINTF("\n"); - } - - // update parents - // update immediately if there is no parse_seq - // update only at barriers if there is parse_seq - if ((alloc->parse_seq_len == 0) || alloc->parse_seq[ind] == -1) { - int update_start = alloc->parse_seq_len ? last_barrier_pos : ind; - int update_end = alloc->parse_seq_len ? ind : ind + 1; - for (int i = update_start; i < update_end; i++) { - int node_i = alloc->parse_seq_len ? alloc->parse_seq[i] : i; - struct ggml_tensor * node = gf->nodes[node_i]; - - for (int j = 0; j < GGML_MAX_SRC; j++) { - struct ggml_tensor * parent = node->src[j]; - if (parent == NULL) { - break; - } - struct hash_node * p_hn = hash_get(ht, parent); - p_hn->n_children -= 1; - - //AT_PRINTF("parent %s: %d children, %d views\n", parent->name, parent->n_children, parent->n_views); - - if (p_hn->n_children == 0 && p_hn->n_views == 0) { - if (ggml_is_view(parent)) { - struct ggml_tensor * view_src = parent->view_src; - struct hash_node * view_src_hn = hash_get(ht, view_src); - view_src_hn->n_views -= 1; - AT_PRINTF("view_src %s: %d children, %d views\n", view_src->name, view_src_hn->n_children, view_src_hn->n_views); - if (view_src_hn->n_views == 0 && view_src_hn->n_children == 0 && view_src->data != node->data) { - ggml_allocr_free_tensor(alloc, view_src); - } - } - else { - if (parent->data != node->data) { - ggml_allocr_free_tensor(alloc, parent); - } + if (p_hn->n_children == 0 && p_hn->n_views == 0) { + if (ggml_is_view(parent)) { + struct ggml_tensor * view_src = parent->view_src; + struct hash_node * view_src_hn = hash_get(galloc, view_src); + view_src_hn->n_views -= 1; + AT_PRINTF("view_src %s: %d children, %d views\n", view_src->name, view_src_hn->n_children, view_src_hn->n_views); + if (view_src_hn->n_views == 0 && view_src_hn->n_children == 0) { + free_node(galloc, view_src); } } + else { + free_node(galloc, parent); + } } } - AT_PRINTF("\n"); - if (alloc->parse_seq_len) { - last_barrier_pos = ind + 1; - } } - } - // free graph outputs here that wouldn't be freed otherwise because they have no children - if (outputs != NULL && outputs[g] != NULL) { - for (int i = 0; outputs[g][i] != NULL; i++) { - struct ggml_tensor * output = outputs[g][i]; - AT_PRINTF("output: %s\n", output->name); - ggml_allocr_free_tensor(alloc, output); + AT_PRINTF("\n"); + if (parse_seq_len) { + last_barrier_pos = ind + 1; } } } +} - return alloc->max_size; +size_t ggml_gallocr_alloc_graph(ggml_gallocr_t galloc, ggml_tallocr_t talloc, struct ggml_cgraph * graph) { + size_t hash_size = graph->visited_hash_table.size; + + // check if the hash table is initialized and large enough + if (galloc->hash_set.size < hash_size) { + if (galloc->hash_set.keys != NULL) { + free(galloc->hash_set.keys); + } + if (galloc->hash_values != NULL) { + free(galloc->hash_values); + } + galloc->hash_set.keys = malloc(sizeof(struct ggml_tensor *) * hash_size); + galloc->hash_set.size = hash_size; + galloc->hash_values = malloc(sizeof(struct hash_node) * hash_size); + } + + // reset hash table + memset(galloc->hash_set.keys, 0, sizeof(struct ggml_tensor *) * hash_size); + memset(galloc->hash_values, 0, sizeof(struct hash_node) * hash_size); + + galloc->talloc = talloc; + ggml_tallocr_alloc_graph_impl(galloc, graph); + galloc->talloc = NULL; + + size_t max_size = ggml_tallocr_max_size(talloc); + + return max_size; } -size_t ggml_allocr_alloc_graph(struct ggml_allocr * alloc, struct ggml_cgraph * graph) { - return ggml_allocr_alloc_graph_n(alloc, &graph, 1, NULL, NULL); +void ggml_gallocr_alloc_graph_n(ggml_gallocr_t galloc, struct ggml_cgraph * graph, struct ggml_hash_set hash_set, ggml_tallocr_t * hash_node_talloc) { + const size_t hash_size = hash_set.size; + + GGML_ASSERT(hash_size >= (size_t)(graph->n_nodes + graph->n_leafs)); + + galloc->talloc = NULL; + + // alloc hash_values if needed + if (galloc->hash_values == NULL || galloc->hash_values_size < hash_size) { + free(galloc->hash_values); + galloc->hash_values = malloc(sizeof(struct hash_node) * hash_size); + galloc->hash_values_size = hash_size; + } + + // free hash_set.keys if needed + if (galloc->hash_set.keys != NULL) { + free(galloc->hash_set.keys); + } + galloc->hash_set = hash_set; + + // reset hash values + memset(galloc->hash_values, 0, sizeof(struct hash_node) * hash_size); + + galloc->hash_allocs = hash_node_talloc; + + ggml_tallocr_alloc_graph_impl(galloc, graph); + + // remove unowned resources + galloc->hash_set.keys = NULL; + galloc->hash_allocs = NULL; } -size_t ggml_allocr_max_size(struct ggml_allocr * alloc) { - return alloc->max_size; +// legacy API wrapper + +struct ggml_allocr { + ggml_tallocr_t talloc; + ggml_gallocr_t galloc; +}; + +static ggml_allocr_t ggml_allocr_new_impl(ggml_tallocr_t talloc) { + ggml_allocr_t alloc = (ggml_allocr_t)malloc(sizeof(struct ggml_allocr)); + *alloc = (struct ggml_allocr) { + /*.talloc = */ talloc, + /*.galloc = */ ggml_gallocr_new(), + }; + return alloc; +} + +ggml_allocr_t ggml_allocr_new(void * data, size_t size, size_t alignment) { + return ggml_allocr_new_impl(ggml_tallocr_new(data, size, alignment)); +} + +ggml_allocr_t ggml_allocr_new_measure(size_t alignment) { + return ggml_allocr_new_impl(ggml_tallocr_new_measure(alignment)); +} + +ggml_allocr_t ggml_allocr_new_from_buffer(struct ggml_backend_buffer * buffer) { + return ggml_allocr_new_impl(ggml_tallocr_new_from_buffer(buffer)); +} + +ggml_allocr_t ggml_allocr_new_from_backend(struct ggml_backend * backend, size_t size) { + return ggml_allocr_new_impl(ggml_tallocr_new_from_backend(backend, size)); +} + +ggml_allocr_t ggml_allocr_new_measure_from_backend(struct ggml_backend * backend) { + return ggml_allocr_new_impl(ggml_tallocr_new_measure_from_backend(backend)); +} + +struct ggml_backend_buffer * ggml_allocr_get_buffer(ggml_allocr_t alloc) { + return ggml_tallocr_get_buffer(alloc->talloc); +} + +void ggml_allocr_set_parse_seq(ggml_allocr_t alloc, const int * list, int n) { + ggml_gallocr_set_parse_seq(alloc->galloc, list, n); +} + +void ggml_allocr_free(ggml_allocr_t alloc) { + ggml_gallocr_free(alloc->galloc); + ggml_tallocr_free(alloc->talloc); + free(alloc); +} + +bool ggml_allocr_is_measure(ggml_allocr_t alloc) { + return ggml_tallocr_is_measure(alloc->talloc); +} + +void ggml_allocr_reset(ggml_allocr_t alloc) { + ggml_tallocr_reset(alloc->talloc); +} + +void ggml_allocr_alloc(ggml_allocr_t alloc, struct ggml_tensor * tensor) { + ggml_tallocr_alloc(alloc->talloc, tensor); +} + +size_t ggml_allocr_max_size(ggml_allocr_t alloc) { + return ggml_tallocr_max_size(alloc->talloc); +} + +size_t ggml_allocr_alloc_graph(ggml_allocr_t alloc, struct ggml_cgraph * graph) { + return ggml_gallocr_alloc_graph(alloc->galloc, alloc->talloc, graph); } diff --git a/ggml-alloc.h b/ggml-alloc.h index e38758878b91a3..dde2a06bf80309 100644 --- a/ggml-alloc.h +++ b/ggml-alloc.h @@ -6,27 +6,79 @@ extern "C" { #endif +struct ggml_backend; struct ggml_backend_buffer; -GGML_API struct ggml_allocr * ggml_allocr_new(void * data, size_t size, size_t alignment); -GGML_API struct ggml_allocr * ggml_allocr_new_measure(size_t alignment); -GGML_API struct ggml_allocr * ggml_allocr_new_from_buffer(struct ggml_backend_buffer * buffer); +// +// Legacy API +// + +typedef struct ggml_allocr * ggml_allocr_t; + +// initialize allocator for use with CPU backend only +GGML_API ggml_allocr_t ggml_allocr_new(void * data, size_t size, size_t alignment); +GGML_API ggml_allocr_t ggml_allocr_new_measure(size_t alignment); + +// initialize allocator for use with ggml-backend +GGML_API ggml_allocr_t ggml_allocr_new_from_buffer(struct ggml_backend_buffer * buffer); +GGML_API ggml_allocr_t ggml_allocr_new_from_backend(struct ggml_backend * backend, size_t size); // allocates an owned buffer +GGML_API ggml_allocr_t ggml_allocr_new_measure_from_backend(struct ggml_backend * backend); + +GGML_API struct ggml_backend_buffer * ggml_allocr_get_buffer(ggml_allocr_t alloc); // tell the allocator to parse nodes following the order described in the list // you should call this if your graph are optimized to execute out-of-order -GGML_API void ggml_allocr_set_parse_seq(struct ggml_allocr * alloc, const int * list, int n); - -GGML_API void ggml_allocr_free (struct ggml_allocr * alloc); -GGML_API bool ggml_allocr_is_measure (struct ggml_allocr * alloc); -GGML_API void ggml_allocr_reset (struct ggml_allocr * alloc); -GGML_API void ggml_allocr_alloc (struct ggml_allocr * alloc, struct ggml_tensor * tensor); -GGML_API size_t ggml_allocr_alloc_graph(struct ggml_allocr * alloc, struct ggml_cgraph * graph); -GGML_API size_t ggml_allocr_max_size (struct ggml_allocr * alloc); - -GGML_API size_t ggml_allocr_alloc_graph_n( - struct ggml_allocr * alloc, - struct ggml_cgraph ** graphs, int n_graphs, - struct ggml_tensor *** inputs, struct ggml_tensor *** outputs); +GGML_API void ggml_allocr_set_parse_seq(ggml_allocr_t alloc, const int * list, int n); + +GGML_API void ggml_allocr_free (ggml_allocr_t alloc); +GGML_API bool ggml_allocr_is_measure (ggml_allocr_t alloc); +GGML_API void ggml_allocr_reset (ggml_allocr_t alloc); +GGML_API void ggml_allocr_alloc (ggml_allocr_t alloc, struct ggml_tensor * tensor); +GGML_API size_t ggml_allocr_max_size (ggml_allocr_t alloc); + +GGML_API size_t ggml_allocr_alloc_graph(ggml_allocr_t alloc, struct ggml_cgraph * graph); + +// +// ggml-backend v2 API +// + +// Seperate tensor and graph allocator objects +// This is necessary for multi-backend allocation because the graph allocator needs to use multiple tensor allocators +// The original API is kept as a wrapper around the new API + +// Tensor allocator +typedef struct ggml_tallocr * ggml_tallocr_t; + +GGML_API ggml_tallocr_t ggml_tallocr_new(void * data, size_t size, size_t alignment); +GGML_API ggml_tallocr_t ggml_tallocr_new_measure(size_t alignment); +GGML_API ggml_tallocr_t ggml_tallocr_new_from_buffer(struct ggml_backend_buffer * buffer); +GGML_API ggml_tallocr_t ggml_tallocr_new_from_backend(struct ggml_backend * backend, size_t size); // allocates an owned buffer +GGML_API ggml_tallocr_t ggml_tallocr_new_measure_from_backend(struct ggml_backend * backend); + +GGML_API struct ggml_backend_buffer * ggml_tallocr_get_buffer(ggml_tallocr_t talloc); + +GGML_API void ggml_tallocr_free (ggml_tallocr_t talloc); +GGML_API bool ggml_tallocr_is_measure (ggml_tallocr_t talloc); +GGML_API void ggml_tallocr_reset (ggml_tallocr_t talloc); +GGML_API void ggml_tallocr_alloc (ggml_tallocr_t talloc, struct ggml_tensor * tensor); +GGML_API size_t ggml_tallocr_max_size (ggml_tallocr_t talloc); + + +// Graph allocator +typedef struct ggml_gallocr * ggml_gallocr_t; + +GGML_API ggml_gallocr_t ggml_gallocr_new(void); +GGML_API void ggml_gallocr_free(ggml_gallocr_t galloc); + +GGML_API void ggml_gallocr_set_parse_seq(ggml_gallocr_t galloc, const int * list, int n); +GGML_API size_t ggml_gallocr_alloc_graph(ggml_gallocr_t galloc, ggml_tallocr_t talloc, struct ggml_cgraph * graph); + +// Allocate tensors from the allocators given by the hash table +GGML_API void ggml_gallocr_alloc_graph_n( + ggml_gallocr_t galloc, + struct ggml_cgraph * graph, + struct ggml_hash_set hash_set, + ggml_tallocr_t * hash_node_talloc); #ifdef __cplusplus } diff --git a/ggml-backend-impl.h b/ggml-backend-impl.h new file mode 100644 index 00000000000000..211e3d4247387b --- /dev/null +++ b/ggml-backend-impl.h @@ -0,0 +1,87 @@ +#pragma once + +// ggml-backend internal header + +#include "ggml-backend.h" + +#ifdef __cplusplus +extern "C" { +#endif + + // + // Backend buffer + // + + typedef void * ggml_backend_buffer_context_t; + + struct ggml_backend_buffer_i { + void (*free_buffer) (ggml_backend_buffer_t buffer); + void * (*get_base) (ggml_backend_buffer_t buffer); // get base pointer + size_t (*get_alloc_size)(ggml_backend_buffer_t buffer, struct ggml_tensor * tensor); // pre-allocation callback + void (*init_tensor) (ggml_backend_buffer_t buffer, struct ggml_tensor * tensor); // post-allocation callback + void (*free_tensor) (ggml_backend_buffer_t buffer, struct ggml_tensor * tensor); // pre-free callback + }; + + struct ggml_backend_buffer { + struct ggml_backend_buffer_i iface; + + ggml_backend_t backend; + ggml_backend_buffer_context_t context; + + size_t size; + }; + + GGML_API ggml_backend_buffer_t ggml_backend_buffer_init( + struct ggml_backend * backend, + struct ggml_backend_buffer_i iface, + ggml_backend_buffer_context_t context, + size_t size); + + // + // Backend + // + + typedef void * ggml_backend_context_t; + + struct ggml_backend_i { + const char * (*get_name)(ggml_backend_t backend); + + void (*free)(ggml_backend_t backend); + + // buffer allocation + ggml_backend_buffer_t (*alloc_buffer)(ggml_backend_t backend, size_t size); + + // get buffer alignment + size_t (*get_alignment)(ggml_backend_t backend); + + // tensor data access + // these functions can be asynchronous, helper functions are provided for synchronous access that automatically call synchronize + void (*set_tensor_async)(ggml_backend_t backend, struct ggml_tensor * tensor, const void * data, size_t offset, size_t size); + void (*get_tensor_async)(ggml_backend_t backend, const struct ggml_tensor * tensor, void * data, size_t offset, size_t size); + void (*synchronize) (ggml_backend_t backend); + + // (optional) copy tensor between different backends, allow for single-copy tranfers + void (*cpy_tensor_from)(ggml_backend_t backend, struct ggml_tensor * src, struct ggml_tensor * dst); + void (*cpy_tensor_to) (ggml_backend_t backend, struct ggml_tensor * src, struct ggml_tensor * dst); + + // compute graph with a plan + ggml_backend_graph_plan_t (*graph_plan_create) (ggml_backend_t backend, struct ggml_cgraph * cgraph); + void (*graph_plan_free) (ggml_backend_t backend, ggml_backend_graph_plan_t plan); + void (*graph_plan_compute)(ggml_backend_t backend, ggml_backend_graph_plan_t plan); + + // compute graph without a plan + void (*graph_compute)(ggml_backend_t backend, struct ggml_cgraph * cgraph); + + // check if the backend supports an operation + bool (*supports_op)(ggml_backend_t backend, const struct ggml_tensor * op); + }; + + struct ggml_backend { + struct ggml_backend_i iface; + + ggml_backend_context_t context; + }; + +#ifdef __cplusplus +} +#endif diff --git a/ggml-backend.c b/ggml-backend.c index ca8d83dafe47c9..f6e5fceed0f4df 100644 --- a/ggml-backend.c +++ b/ggml-backend.c @@ -1,7 +1,9 @@ -#include "ggml-backend.h" +#include "ggml-backend-impl.h" #include "ggml-alloc.h" +#include "ggml-impl.h" #include +#include #include #include #include @@ -33,6 +35,10 @@ ggml_backend_buffer_t ggml_backend_buffer_init( } void ggml_backend_buffer_free(ggml_backend_buffer_t buffer) { + if (buffer == NULL) { + return; + } + if (buffer->iface.free_buffer != NULL) { buffer->iface.free_buffer(buffer); } @@ -43,15 +49,20 @@ size_t ggml_backend_buffer_get_alignment(ggml_backend_buffer_t buffer) { return ggml_backend_get_alignment(buffer->backend); } -void * ggml_backend_buffer_get_base(ggml_backend_buffer_t buffer) { - return buffer->iface.get_base(buffer); -} - size_t ggml_backend_buffer_get_size(ggml_backend_buffer_t buffer) { return buffer->size; } +void * ggml_backend_buffer_get_base(ggml_backend_buffer_t buffer) { + void * base = buffer->iface.get_base(buffer); + + GGML_ASSERT(base != NULL && "backend buffer base cannot be NULL"); + + return base; +} + size_t ggml_backend_buffer_get_alloc_size(ggml_backend_buffer_t buffer, struct ggml_tensor * tensor) { + // get_alloc_size is optional, defaults to ggml_nbytes if (buffer->iface.get_alloc_size) { return buffer->iface.get_alloc_size(buffer, tensor); } @@ -59,12 +70,14 @@ size_t ggml_backend_buffer_get_alloc_size(ggml_backend_buffer_t buffer, struct g } void ggml_backend_buffer_init_tensor(ggml_backend_buffer_t buffer, struct ggml_tensor * tensor) { + // init_tensor is optional if (buffer->iface.init_tensor) { buffer->iface.init_tensor(buffer, tensor); } } void ggml_backend_buffer_free_tensor(ggml_backend_buffer_t buffer, struct ggml_tensor * tensor) { + // free_tensor is optional if (buffer->iface.free_tensor) { buffer->iface.free_tensor(buffer, tensor); } @@ -73,14 +86,21 @@ void ggml_backend_buffer_free_tensor(ggml_backend_buffer_t buffer, struct ggml_t // backend ggml_backend_t ggml_get_backend(const struct ggml_tensor * tensor) { - return tensor->buffer->backend; + return tensor->buffer ? tensor->buffer->backend : NULL; } const char * ggml_backend_name(ggml_backend_t backend) { + if (backend == NULL) { + return "NULL"; + } return backend->iface.get_name(backend); } void ggml_backend_free(ggml_backend_t backend) { + if (backend == NULL) { + return; + } + backend->iface.free(backend); } @@ -101,13 +121,23 @@ void ggml_backend_tensor_get_async(const struct ggml_tensor * tensor, void * dat } void ggml_backend_tensor_set(struct ggml_tensor * tensor, const void * data, size_t offset, size_t size) { - ggml_get_backend(tensor)->iface.set_tensor_async(ggml_get_backend(tensor), tensor, data, offset, size); - ggml_get_backend(tensor)->iface.synchronize(ggml_get_backend(tensor)); + ggml_backend_t backend = ggml_get_backend(tensor); + + GGML_ASSERT(tensor->data != NULL && "tensor not allocated"); + GGML_ASSERT(backend != NULL && "tensor backend not set"); + + backend->iface.set_tensor_async(backend, tensor, data, offset, size); + backend->iface.synchronize(backend); } void ggml_backend_tensor_get(const struct ggml_tensor * tensor, void * data, size_t offset, size_t size) { - ggml_get_backend(tensor)->iface.get_tensor_async(ggml_get_backend(tensor), tensor, data, offset, size); - ggml_get_backend(tensor)->iface.synchronize(ggml_get_backend(tensor)); + ggml_backend_t backend = ggml_get_backend(tensor); + + GGML_ASSERT(tensor->data != NULL && "tensor not allocated"); + GGML_ASSERT(backend != NULL && "tensor backend not set"); + + backend->iface.get_tensor_async(backend, tensor, data, offset, size); + backend->iface.synchronize(backend); } void ggml_backend_synchronize(ggml_backend_t backend) { @@ -156,7 +186,7 @@ void ggml_backend_tensor_copy(struct ggml_tensor * src, struct ggml_tensor * dst //printf("dst: %s ne: [%d %d %d %d] nb: [%d %d %d %d]\n", dst->name, (int)dst->ne[0], (int)dst->ne[1], (int)dst->ne[2], (int)dst->ne[3], (int)dst->nb[0], (int)dst->nb[1], (int)dst->nb[2], (int)dst->nb[3]); GGML_ASSERT(ggml_are_same_layout(src, dst) && "cannot copy tensors with different layouts"); - // printf("cpy tensor %s from %s to %s (%lu bytes)\n", src->name, ggml_backend_name(src->backend), ggml_backend_name(dst->backend), ggml_nbytes(src)); + // fprintf(stderr, "cpy tensor %s from %s to %s (%lu bytes)\n", src->name, ggml_backend_name(src->backend), ggml_backend_name(dst->backend), ggml_nbytes(src)); if (src == dst) { return; @@ -234,6 +264,8 @@ static ggml_backend_buffer_t ggml_backend_cpu_alloc_buffer(ggml_backend_t backen size += TENSOR_ALIGNMENT; // malloc may return an address that is not aligned void * data = malloc(size); // TODO: maybe use GGML_ALIGNED_MALLOC? + GGML_ASSERT(data != NULL && "failed to allocate buffer"); + return ggml_backend_buffer_init(backend, cpu_backend_buffer_i, data, size); } @@ -271,8 +303,7 @@ static void ggml_backend_cpu_cpy_tensor_from(ggml_backend_t backend, struct ggml } static void ggml_backend_cpu_cpy_tensor_to(ggml_backend_t backend, struct ggml_tensor * src, struct ggml_tensor * dst) { - // for a backend such as CUDA that can queue async calls, it is ok to do this asynchronously, but it may not be the case for other backends - ggml_backend_tensor_set_async(dst, src->data, 0, ggml_nbytes(src)); + ggml_backend_tensor_set(dst, src->data, 0, ggml_nbytes(src)); UNUSED(backend); } @@ -383,3 +414,537 @@ void ggml_backend_cpu_set_n_threads(ggml_backend_t backend_cpu, int n_threads) { ggml_backend_buffer_t ggml_backend_cpu_buffer_from_ptr(ggml_backend_t backend_cpu, void * ptr, size_t size) { return ggml_backend_buffer_init(backend_cpu, cpu_backend_buffer_i_from_ptr, ptr, size); } + +// scheduler + +#define GGML_MAX_BACKENDS 4 +#define GGML_MAX_SPLITS 256 +#define GGML_MAX_SPLIT_INPUTS 16 + +struct ggml_backend_sched_split { + ggml_tallocr_t tallocr; + int i_start; + int i_end; + struct ggml_tensor * inputs[GGML_MAX_SPLIT_INPUTS]; + int n_inputs; + struct ggml_cgraph * graph; +}; + +struct ggml_backend_sched { + int n_backends; + ggml_backend_t backends[GGML_MAX_BACKENDS]; + ggml_tallocr_t tallocs[GGML_MAX_BACKENDS]; + + ggml_gallocr_t galloc; + + struct ggml_hash_set hash_set; + ggml_tallocr_t * node_talloc; // [hash_set.size] + struct ggml_tensor * (* node_copies)[GGML_MAX_BACKENDS]; // [hash_set.size][GGML_MAX_BACKENDS] + + struct ggml_cgraph * graph; + struct ggml_backend_sched_split splits[GGML_MAX_SPLITS]; + int n_splits; + + struct ggml_context * ctx; + + // align context_buffer to GGML_MEM_ALIGN + #ifdef _MSC_VER + __declspec(align(GGML_MEM_ALIGN)) + #else + __attribute__((aligned(GGML_MEM_ALIGN))) + #endif + char context_buffer[GGML_MAX_SPLITS*GGML_MAX_SPLIT_INPUTS*sizeof(struct ggml_tensor) + GGML_MAX_SPLITS*sizeof(struct ggml_cgraph)]; +}; + +#define hash_id(node) ggml_hash_find_or_insert(sched->hash_set, node) +#define node_allocr(node) sched->node_talloc[hash_id(node)] + +static bool ggml_is_view_op(enum ggml_op op) { + return op == GGML_OP_VIEW || op == GGML_OP_RESHAPE || op == GGML_OP_PERMUTE || op == GGML_OP_TRANSPOSE; +} + +// returns the priority of the backend, lower is better +static int sched_backend_prio(ggml_backend_sched_t sched, ggml_backend_t backend) { + for (int i = 0; i < sched->n_backends; i++) { + if (sched->backends[i] == backend) { + return i; + } + } + return INT_MAX; +} + +static int sched_allocr_prio(ggml_backend_sched_t sched, ggml_tallocr_t allocr) { + for (int i = 0; i < sched->n_backends; i++) { + if (sched->tallocs[i] == allocr) { + return i; + } + } + return INT_MAX; +} + +// returns the backend that should be used for the node based on the current locations +char causes[GGML_DEFAULT_GRAPH_SIZE*4 + GGML_MAX_SPLITS*GGML_MAX_SPLIT_INPUTS][128]; // debug, remove +static ggml_backend_t sched_backend_from_cur(ggml_backend_sched_t sched, struct ggml_tensor * node) { + // if the dst tensor is already allocated in a buffer, we must assume that it is critical to keep it there + // ie. kv cache updates + // note that this doesn't allow fallback to CPU. need to add output tensors to the splits to copy the data back to the original backend. + // dst + ggml_backend_t cur_backend = ggml_get_backend(node); + if (cur_backend != NULL) { + sprintf(causes[hash_id(node)], "1.dst"); + return cur_backend; + } + + // view_src + if (node->view_src != NULL && ggml_get_backend(node->view_src) != NULL) { + sprintf(causes[hash_id(node)], "1.vsrc"); + return ggml_get_backend(node->view_src); + } + + // src + int cur_prio = INT_MAX; + size_t cur_size = 0; + + for (int i = 0; i < GGML_MAX_SRC; i++) { + const struct ggml_tensor * src = node->src[i]; + if (src == NULL) { + break; + } + ggml_backend_t src_backend = ggml_get_backend(src); + if (src_backend != NULL) { + int src_prio = sched_backend_prio(sched, src_backend); + size_t src_size = ggml_nbytes(src); + if (src_prio < cur_prio && src_size >= cur_size) { + cur_prio = src_prio; + cur_size = src_size; + cur_backend = src_backend; + sprintf(causes[hash_id(node)], "1.src%d", i); + } + } + } + return cur_backend; +} + +static char * fmt_size(size_t size) { + static char buffer[128]; + if (size >= 1024*1024) { + sprintf(buffer, "%zuM", size/1024/1024); + } else { + sprintf(buffer, "%zuK", size/1024); + } + return buffer; +} + +static void sched_print_assignments(ggml_backend_sched_t sched, struct ggml_cgraph * graph) { + int cur_split = 0; + for (int i = 0; i < graph->n_nodes; i++) { + if (cur_split < sched->n_splits && i == sched->splits[cur_split].i_start) { + ggml_backend_t split_backend = ggml_tallocr_get_buffer(sched->splits[cur_split].tallocr)->backend; + fprintf(stderr, "\n## SPLIT #%d: %s # %d inputs: ", cur_split, ggml_backend_name(split_backend), sched->splits[cur_split].n_inputs); + for (int j = 0; j < sched->splits[cur_split].n_inputs; j++) { + fprintf(stderr, "[%s (%5.5s)] ", sched->splits[cur_split].inputs[j]->name, fmt_size(ggml_nbytes(sched->splits[cur_split].inputs[j]))); + } + fprintf(stderr, "\n"); + cur_split++; + } + struct ggml_tensor * node = graph->nodes[i]; + if (ggml_is_view_op(node->op)) { + continue; + } + ggml_tallocr_t node_allocr = node_allocr(node); + ggml_backend_t node_backend = node_allocr ? ggml_tallocr_get_buffer(node_allocr)->backend : NULL; + fprintf(stderr, "node #%3d (%10.10s): %20.20s (%4.4s) [%4.4s %8.8s]:", i, ggml_op_name(node->op), node->name, fmt_size(ggml_nbytes(node)), node_allocr ? ggml_backend_name(node_backend) : "NULL", causes[hash_id(node)]); + for (int j = 0; j < GGML_MAX_SRC; j++) { + struct ggml_tensor * src = node->src[j]; + if (src == NULL) { + break; + } + ggml_tallocr_t src_allocr = node_allocr(src); + ggml_backend_t src_backend = src_allocr ? ggml_tallocr_get_buffer(src_allocr)->backend : NULL; + fprintf(stderr, " %20.20s (%4.4s) [%4.4s %8.8s]", src->name, fmt_size(ggml_nbytes(src)), src_backend ? ggml_backend_name(src_backend) : "NULL", causes[hash_id(src)]); + } + fprintf(stderr, "\n"); + } +} + +// creates a copy of the tensor with the same memory layout +static struct ggml_tensor * ggml_dup_tensor_layout(struct ggml_context * ctx, const struct ggml_tensor * tensor) { + struct ggml_tensor * dup = ggml_dup_tensor(ctx, tensor); + for (int i = 0; i < GGML_MAX_DIMS; i++) { + dup->nb[i] = tensor->nb[i]; + } + return dup; +} + +// assigns backends to ops and splits the graph into subgraphs that can be computed on the same backend +// TODO: merge passes +static void sched_split_graph(ggml_backend_sched_t sched, struct ggml_cgraph * graph) { + // reset state + size_t hash_size = sched->hash_set.size; + memset(sched->hash_set.keys, 0, sizeof(sched->hash_set.keys[0]) * hash_size); + memset(sched->node_talloc, 0, sizeof(sched->node_talloc[0]) * hash_size); + memset(sched->node_copies, 0, sizeof(sched->node_copies[0]) * hash_size); + sched->n_splits = 0; + + struct ggml_init_params params = { + /*.mem_size = */ sizeof(sched->context_buffer), + /*.mem_buffer = */ sched->context_buffer, + /*.no_alloc = */ true + }; + + if (sched->ctx != NULL) { + ggml_free(sched->ctx); + } + + sched->ctx = ggml_init(params); + + // pass 1: assign backends to ops with allocated inputs + for (int i = 0; i < graph->n_leafs; i++) { + struct ggml_tensor * leaf = graph->leafs[i]; + if (node_allocr(leaf) != NULL) { + // do not overwrite user assignments + continue; + } + ggml_backend_t leaf_backend = ggml_get_backend(leaf); + if (leaf_backend == NULL && leaf->view_src != NULL) { + leaf_backend = ggml_get_backend(leaf->view_src); + } + if (leaf_backend != NULL) { + node_allocr(leaf) = ggml_backend_sched_get_tallocr(sched, leaf_backend); + } + } + + for (int i = 0; i < graph->n_nodes; i++) { + struct ggml_tensor * node = graph->nodes[i]; + if (node_allocr(node) != NULL) { + // do not overwrite user assignments + continue; + } + ggml_backend_t node_backend = sched_backend_from_cur(sched, node); + if (node_backend != NULL) { + node_allocr(node) = ggml_backend_sched_get_tallocr(sched, node_backend); + } + } + //printf("PASS 1 ASSIGNMENTS\n"); sched_print_assignments(sched, graph); + + // pass 2: assign backends to ops from current assignments + // TODO: + // - reuse sched_backend_from_cur + for (int i = 0; i < graph->n_nodes; i++) { + struct ggml_tensor * node = graph->nodes[i]; + ggml_tallocr_t node_allocr = node_allocr(node); + if (node_allocr == NULL) { + int cur_prio = INT_MAX; + size_t cur_size = 0; + for (int j = 0; j < GGML_MAX_SRC; j++) { + struct ggml_tensor * src = node->src[j]; + if (src == NULL) { + break; + } + ggml_tallocr_t src_allocr = node_allocr(src); + if (src_allocr != NULL) { + int src_prio = sched_allocr_prio(sched, src_allocr); + size_t src_size = ggml_nbytes(src); + if (src_prio < cur_prio && src_size >= cur_size) { + cur_prio = src_prio; + cur_size = src_size; + node_allocr = src_allocr; + sprintf(causes[hash_id(node)], "2.src%d", j); + } + } + } + if (node_allocr != NULL) { + node_allocr(node) = node_allocr; + } + } + } + //printf("PASS 2 ASSIGNMENTS\n"); sched_print_assignments(sched, graph); + + // pass 3: assign backends to remaining src from dst (should only be leafs) + for (int i = 0; i < graph->n_nodes; i++) { + struct ggml_tensor * node = graph->nodes[i]; + ggml_tallocr_t node_allocr = node_allocr(node); + for (int j = 0; j < GGML_MAX_SRC; j++) { + struct ggml_tensor * src = node->src[j]; + if (src == NULL) { + break; + } + ggml_tallocr_t src_allocr = node_allocr(src); + if (src_allocr == NULL) { + node_allocr(src) = node_allocr; + } + } + } + //printf("PASS 3 ASSIGNMENTS\n"); sched_print_assignments(sched, graph); + + // pass 4: split graph, find tensors that need to be copied + // TODO: + // - when switching from a less preferred backend to a more preferred backend, check if it is possible to move the switch to an earlier point for the same cost + // find first backend + int cur_split = 0; + for (int i = 0; i < graph->n_nodes; i++) { + struct ggml_tensor * node = graph->nodes[i]; + if (node->view_src == NULL) { + sched->splits[0].tallocr = node_allocr(node); + break; + } + } + sched->splits[0].i_start = 0; + sched->splits[0].n_inputs = 0; + memset(sched->splits[0].inputs, 0, sizeof(sched->splits[0].inputs)); //HACK + ggml_tallocr_t cur_allocr = sched->splits[0].tallocr; + size_t cur_backend_id = sched_allocr_prio(sched, cur_allocr); + for (int i = 0; i < graph->n_nodes; i++) { + struct ggml_tensor * node = graph->nodes[i]; + + if (ggml_is_view_op(node->op)) { + continue; + } + + ggml_tallocr_t node_allocr = node_allocr(node); + + if (node_allocr != cur_allocr) { + sched->splits[cur_split].i_end = i; + cur_split++; + GGML_ASSERT(cur_split < GGML_MAX_SPLITS); + sched->splits[cur_split].tallocr = node_allocr; + sched->splits[cur_split].i_start = i; + sched->splits[cur_split].n_inputs = 0; + memset(sched->splits[cur_split].inputs, 0, sizeof(sched->splits[cur_split].inputs)); //HACK + cur_allocr = node_allocr; + cur_backend_id = sched_allocr_prio(sched, cur_allocr); + } + + // find inputs that are not on the same backend + for (int j = 0; j < GGML_MAX_SRC; j++) { + struct ggml_tensor * src = node->src[j]; + if (src == NULL) { + break; + } + ggml_tallocr_t src_allocr = node_allocr(src); + if (src_allocr != node_allocr) { + int n_inputs = sched->splits[cur_split].n_inputs++; + GGML_ASSERT(n_inputs < GGML_MAX_SPLIT_INPUTS); + sched->splits[cur_split].inputs[n_inputs] = (struct ggml_tensor *)src; + + // create copies + size_t id = hash_id(src); + if (sched->node_copies[id][cur_backend_id] == NULL) { + struct ggml_tensor * tensor_copy = ggml_dup_tensor_layout(sched->ctx, src); + sched->node_copies[id][cur_backend_id] = tensor_copy; + node_allocr(tensor_copy) = cur_allocr; + ggml_backend_t backend = ggml_tallocr_get_buffer(cur_allocr)->backend; + ggml_format_name(tensor_copy, "%s#%s", ggml_backend_name(backend), src->name); + } + node->src[j] = sched->node_copies[id][cur_backend_id]; + } + } + } + sched->splits[cur_split].i_end = graph->n_nodes; + sched->n_splits = cur_split + 1; + + //fprintf(stderr, "PASS 4 ASSIGNMENTS\n"); sched_print_assignments(sched, graph); fflush(stdout); + +#if 1 + // sanity check: all sources should have the same backend as the node + for (int i = 0; i < graph->n_nodes; i++) { + struct ggml_tensor * node = graph->nodes[i]; + ggml_tallocr_t node_allocr = node_allocr(node); + if (node_allocr == NULL) { + fprintf(stderr, "!!!!!!! %s has no backend\n", node->name); + } + for (int j = 0; j < GGML_MAX_SRC; j++) { + struct ggml_tensor * src = node->src[j]; + if (src == NULL) { + break; + } + ggml_tallocr_t src_allocr = node_allocr(src); + if (src_allocr != node_allocr /* && src_backend != NULL */) { // ignore nulls for now + fprintf(stderr, "!!!! %s has backend %s, src %d (%s) has backend %s\n", + node->name, node_allocr ? ggml_backend_name(ggml_tallocr_get_buffer(node_allocr)->backend) : "NULL", + j, src->name, src_allocr ? ggml_backend_name(ggml_tallocr_get_buffer(src_allocr)->backend) : "NULL"); + } + } + } +#endif + + // create copies of the graph for each split + // FIXME: avoid this copy, pass split inputs to ggml_gallocr_alloc_graph_n in some other way + struct ggml_cgraph * graph_copy = ggml_new_graph_custom(sched->ctx, graph->n_nodes + sched->n_splits*GGML_MAX_SPLIT_INPUTS, false); + for (int i = 0; i < sched->n_splits; i++) { + struct ggml_backend_sched_split * split = &sched->splits[i]; + split->graph = ggml_graph_view(sched->ctx, graph, split->i_start, split->i_end); + + // add inputs to the graph copy so that they are allocated by ggml-alloc at the start of the split + for (int j = 0; j < split->n_inputs; j++) { + struct ggml_tensor * input = split->inputs[j]; + struct ggml_tensor * input_cpy = sched->node_copies[hash_id(input)][sched_allocr_prio(sched, split->tallocr)]; + input_cpy->src[0] = input; + graph_copy->nodes[graph_copy->n_nodes++] = input_cpy; + } + + for (int j = split->i_start; j < split->i_end; j++) { + graph_copy->nodes[graph_copy->n_nodes++] = graph->nodes[j]; + } + } + sched->graph = graph_copy; +} + +static void sched_alloc_splits(ggml_backend_sched_t sched) { + ggml_gallocr_alloc_graph_n( + sched->galloc, + sched->graph, + sched->hash_set, + sched->node_talloc); +} + +static void sched_compute_splits(ggml_backend_sched_t sched) { + uint64_t copy_us[GGML_MAX_BACKENDS] = {0}; + uint64_t compute_us[GGML_MAX_BACKENDS] = {0}; + + struct ggml_backend_sched_split * splits = sched->splits; + + for (int i = 0; i < sched->n_splits; i++) { + struct ggml_backend_sched_split * split = &splits[i]; + ggml_backend_t split_backend = ggml_tallocr_get_buffer(split->tallocr)->backend; + int split_backend_id = sched_backend_prio(sched, split_backend); + + // copy the input tensors to the split backend + uint64_t copy_start_us = ggml_time_us(); + for (int j = 0; j < split->n_inputs; j++) { + struct ggml_tensor * input_cpy = sched->node_copies[hash_id(split->inputs[j])][sched_backend_prio(sched, split_backend)]; + if (split->inputs[j]->buffer == NULL) { + if (split->inputs[j]->view_src == NULL) { + fprintf(stderr, "input %s has no buffer and no view_src\n", split->inputs[j]->name); + exit(1); + } + struct ggml_tensor * view = split->inputs[j]; + view->backend = view->view_src->backend; + view->buffer = view->view_src->buffer; + view->data = (char *)view->view_src->data + view->view_offs; + ggml_backend_buffer_init_tensor(ggml_backend_sched_get_buffer(sched, view->buffer->backend), view); + } + if (input_cpy->buffer == NULL) { + fprintf(stderr, "input_cpy %s has no buffer\n", input_cpy->name); + exit(1); + } + GGML_ASSERT(split->inputs[j]->buffer->backend != input_cpy->buffer->backend); + GGML_ASSERT(input_cpy->buffer->backend == split_backend); + ggml_backend_tensor_copy(split->inputs[j], input_cpy); + } + // ggml_backend_synchronize(split_backend); + int64_t copy_end_us = ggml_time_us(); + copy_us[split_backend_id] += copy_end_us - copy_start_us; + +#if 0 + char split_filename[GGML_MAX_NAME]; + snprintf(split_filename, GGML_MAX_NAME, "split_%i_%s.dot", i, ggml_backend_name(split_backend)); + ggml_graph_dump_dot(split->graph, NULL, split_filename); +#endif + + uint64_t compute_start_us = ggml_time_us(); + ggml_backend_graph_compute(split_backend, split->graph); + // ggml_backend_synchronize(split_backend); + uint64_t compute_end_us = ggml_time_us(); + compute_us[split_backend_id] += compute_end_us - compute_start_us; + } + +#if 0 + // per-backend timings + fprintf(stderr, "sched_compute_splits times (%d splits):\n", sched->n_splits); + for (int i = 0; i < sched->n_backends; i++) { + if (copy_us[i] > 0 || compute_us[i] > 0) { + fprintf(stderr, "\t%5.5s: %lu us copy, %lu us compute\n", ggml_backend_name(sched->backends[i]), copy_us[i], compute_us[i]); + } + } +#endif +} + +static void sched_reset(ggml_backend_sched_t sched) { + for (int i = 0; i < sched->n_backends; i++) { + ggml_tallocr_reset(sched->tallocs[i]); + } +} + +ggml_backend_sched_t ggml_backend_sched_new(ggml_backend_t * backends, int n_backends) { + GGML_ASSERT(n_backends <= GGML_MAX_BACKENDS); + + struct ggml_backend_sched * sched = malloc(sizeof(struct ggml_backend_sched)); + memset(sched, 0, sizeof(struct ggml_backend_sched)); + + fprintf(stderr, "ggml_backend_sched size: %lu KB\n", sizeof(struct ggml_backend_sched)/1024); + + sched->n_backends = n_backends; + for (int i = 0; i < n_backends; i++) { + sched->backends[i] = backends[i]; + } + + sched->galloc = ggml_gallocr_new(); + + // init measure allocs for each backend + for (int i = 0; i < n_backends; i++) { + sched->tallocs[i] = ggml_tallocr_new_measure_from_backend(backends[i]); + } + + return sched; +} + +void ggml_backend_sched_free(ggml_backend_sched_t sched) { + if (sched == NULL) { + return; + } + for (int i = 0; i < sched->n_backends; i++) { + ggml_tallocr_free(sched->tallocs[i]); + } + ggml_gallocr_free(sched->galloc); + free(sched->hash_set.keys); + free(sched->node_talloc); + free(sched->node_copies); + free(sched); +} + +void ggml_backend_sched_init_measure(ggml_backend_sched_t sched, struct ggml_cgraph * measure_graph) { + // initialize hash tables + size_t hash_size = measure_graph->visited_hash_table.size + GGML_MAX_SPLITS*GGML_MAX_SPLIT_INPUTS; + sched->hash_set.size = hash_size; + sched->hash_set.keys = malloc(sizeof(sched->hash_set.keys[0]) * hash_size); + sched->node_talloc = malloc(sizeof(sched->node_talloc[0]) * hash_size); + sched->node_copies = malloc(sizeof(sched->node_copies[0]) * hash_size); + + sched_split_graph(sched, measure_graph); + sched_alloc_splits(sched); + + // allocate buffers and reset allocators + for (int i = 0; i < sched->n_backends; i++) { + size_t size = ggml_tallocr_max_size(sched->tallocs[i]); + ggml_tallocr_free(sched->tallocs[i]); + sched->tallocs[i] = ggml_tallocr_new_from_backend(sched->backends[i], size); + } + + sched_reset(sched); +} + +void ggml_backend_sched_graph_compute(ggml_backend_sched_t sched, struct ggml_cgraph * graph) { + GGML_ASSERT(sched->hash_set.size >= graph->visited_hash_table.size + GGML_MAX_SPLITS*GGML_MAX_SPLIT_INPUTS); + + sched_split_graph(sched, graph); + sched_alloc_splits(sched); + sched_compute_splits(sched); + sched_reset(sched); +} + +ggml_tallocr_t ggml_backend_sched_get_tallocr(ggml_backend_sched_t sched, ggml_backend_t backend) { + int backend_index = sched_backend_prio(sched, backend); + return sched->tallocs[backend_index]; +} + +ggml_backend_buffer_t ggml_backend_sched_get_buffer(ggml_backend_sched_t sched, ggml_backend_t backend) { + int backend_index = sched_backend_prio(sched, backend); + return ggml_tallocr_get_buffer(sched->tallocs[backend_index]); +} + +void ggml_backend_sched_set_node_backend(ggml_backend_sched_t sched, struct ggml_tensor * node, ggml_backend_t backend) { + int backend_index = sched_backend_prio(sched, backend); + GGML_ASSERT(backend_index >= 0 && backend_index < sched->n_backends); + node_allocr(node) = sched->tallocs[backend_index]; +} diff --git a/ggml-backend.h b/ggml-backend.h index da134b0dbed514..966687320ac96d 100644 --- a/ggml-backend.h +++ b/ggml-backend.h @@ -1,51 +1,20 @@ #pragma once #include "ggml.h" +#include "ggml-alloc.h" #ifdef __cplusplus extern "C" { #endif - struct ggml_backend; - struct ggml_backend_buffer; - - // type-erased backend-specific types / wrappers - typedef void * ggml_backend_context_t; - typedef void * ggml_backend_graph_plan_t; - typedef void * ggml_backend_buffer_context_t; - - // avoid accessing internals of these types - typedef struct ggml_backend * ggml_backend_t; - typedef struct ggml_backend_buffer * ggml_backend_buffer_t; // - // backend buffer + // Backend buffer // - struct ggml_backend_buffer_i { - void (*free_buffer) (ggml_backend_buffer_t buffer); - void * (*get_base) (ggml_backend_buffer_t buffer); // get base pointer - size_t (*get_alloc_size)(ggml_backend_buffer_t buffer, struct ggml_tensor * tensor); // pre-allocation callback - void (*init_tensor) (ggml_backend_buffer_t buffer, struct ggml_tensor * tensor); // post-allocation callback - void (*free_tensor) (ggml_backend_buffer_t buffer, struct ggml_tensor * tensor); // pre-free callback - }; - - // TODO: hide behind API - struct ggml_backend_buffer { - struct ggml_backend_buffer_i iface; - - ggml_backend_t backend; - ggml_backend_buffer_context_t context; - - size_t size; - }; + struct ggml_backend_buffer; + typedef struct ggml_backend_buffer * ggml_backend_buffer_t; // backend buffer functions - GGML_API ggml_backend_buffer_t ggml_backend_buffer_init( - struct ggml_backend * backend, - struct ggml_backend_buffer_i iface, - ggml_backend_buffer_context_t context, - size_t size); - GGML_API void ggml_backend_buffer_free (ggml_backend_buffer_t buffer); GGML_API size_t ggml_backend_buffer_get_alignment (ggml_backend_buffer_t buffer); GGML_API void * ggml_backend_buffer_get_base (ggml_backend_buffer_t buffer); @@ -55,50 +24,13 @@ extern "C" { GGML_API void ggml_backend_buffer_free_tensor (ggml_backend_buffer_t buffer, struct ggml_tensor * tensor); // - // backend + // Backend // - struct ggml_backend_i { - const char * (*get_name)(ggml_backend_t backend); - - void (*free)(ggml_backend_t backend); - - // buffer allocation - ggml_backend_buffer_t (*alloc_buffer)(ggml_backend_t backend, size_t size); - - // get buffer alignment - size_t (*get_alignment)(ggml_backend_t backend); - - // tensor data access - // these functions can be asynchronous, helper functions are provided for synchronous access that automatically call synchronize - void (*set_tensor_async)(ggml_backend_t backend, struct ggml_tensor * tensor, const void * data, size_t offset, size_t size); - void (*get_tensor_async)(ggml_backend_t backend, const struct ggml_tensor * tensor, void * data, size_t offset, size_t size); - void (*synchronize) (ggml_backend_t backend); - - // (optional) copy tensor between different backends, allow for single-copy tranfers - void (*cpy_tensor_from)(ggml_backend_t backend, struct ggml_tensor * src, struct ggml_tensor * dst); - void (*cpy_tensor_to) (ggml_backend_t backend, struct ggml_tensor * src, struct ggml_tensor * dst); - - // compute graph with a plan - ggml_backend_graph_plan_t (*graph_plan_create) (ggml_backend_t backend, struct ggml_cgraph * cgraph); - void (*graph_plan_free) (ggml_backend_t backend, ggml_backend_graph_plan_t plan); - void (*graph_plan_compute)(ggml_backend_t backend, ggml_backend_graph_plan_t plan); - - // compute graph without a plan - void (*graph_compute)(ggml_backend_t backend, struct ggml_cgraph * cgraph); - - // check if the backend supports an operation - bool (*supports_op)(ggml_backend_t backend, const struct ggml_tensor * op); - }; - - // TODO: hide behind API - struct ggml_backend { - struct ggml_backend_i iface; - - ggml_backend_context_t context; - }; + struct ggml_backend; + typedef struct ggml_backend * ggml_backend_t; + typedef void * ggml_backend_graph_plan_t; - // backend helper functions GGML_API ggml_backend_t ggml_get_backend(const struct ggml_tensor * tensor); GGML_API const char * ggml_backend_name(ggml_backend_t backend); @@ -133,11 +65,72 @@ extern "C" { GGML_API ggml_backend_t ggml_backend_cpu_init(void); GGML_API bool ggml_backend_is_cpu(ggml_backend_t backend); - GGML_API void ggml_backend_cpu_set_n_threads(ggml_backend_t backend_cpu, int n_threads); + // Create a backend buffer from an existing pointer GGML_API ggml_backend_buffer_t ggml_backend_cpu_buffer_from_ptr(ggml_backend_t backend_cpu, void * ptr, size_t size); + + // + // Backend scheduler + // + + // The backend scheduler allows for multiple backends to be used together + // Handles compute buffer allocation, assignment of tensors to backends, and copying of tensors between backends + // The backends are selected based on: + // - the backend that supports the operation + // - the location of the pre-allocated tensors (e.g. the weights) + /* + Example usage: + + sched = ggml_backend_sched_new({backend_gpu, backend_gpu2, backend_cpu}, num_backends); + // sched is initialized with measure allocators and cannot be used until allocated with a measure graph + + // initialize buffers from a measure graph + measure_graph = build_graph(sched); // use the allocr to allocate inputs as needed + + // in build_graph: + build_graph(...) { + // allocating tensors in a specific backend (optional, recommended: pre-allocate inputs in a different buffer) + alloc_cpu = ggml_backend_sched_get_allocr(sched, backend_cpu); + ggml_allocr_alloc(alloc_cpu, tensor); + + // manually assigning nodes to a backend (optional, shouldn't be needed in most cases) + struct ggml_tensor * node = ggml_mul_mat(ctx, ...); + ggml_backend_sched_set_node_backend(sched, node, backend_gpu); + } + + // allocate backend buffers from measure graph + ggml_backend_sched_init_measure(sched, measure_graph); + + // the scheduler is now ready to compute graphs + + // compute + graph = build_graph(sched); + ggml_backend_sched_graph_compute(sched, graph); + */ + + struct ggml_backend_sched; + typedef struct ggml_backend_sched * ggml_backend_sched_t; + + // Initialize a backend scheduler + GGML_API ggml_backend_sched_t ggml_backend_sched_new(ggml_backend_t * backends, int n_backends); + + GGML_API void ggml_backend_sched_free(ggml_backend_sched_t sched); + + // Initialize backend buffers from a measure graph + GGML_API void ggml_backend_sched_init_measure(ggml_backend_sched_t sched, struct ggml_cgraph * measure_graph); + + GGML_API ggml_tallocr_t ggml_backend_sched_get_tallocr(ggml_backend_sched_t sched, ggml_backend_t backend); + GGML_API ggml_backend_buffer_t ggml_backend_sched_get_buffer (ggml_backend_sched_t sched, ggml_backend_t backend); + + GGML_API void ggml_backend_sched_set_node_backend(ggml_backend_sched_t sched, struct ggml_tensor * node, ggml_backend_t backend); + + // Allocate a graph on the backend scheduler + GGML_API void ggml_backend_sched_graph_compute( + ggml_backend_sched_t sched, + struct ggml_cgraph * graph); + #ifdef __cplusplus } #endif diff --git a/ggml-cuda.cu b/ggml-cuda.cu index 8d03ba6641981f..1634024466542a 100644 --- a/ggml-cuda.cu +++ b/ggml-cuda.cu @@ -81,6 +81,7 @@ #include "ggml-cuda.h" #include "ggml.h" +#include "ggml-backend-impl.h" #define MIN_CC_DP4A 610 // minimum compute capability for __dp4a, an intrinsic for byte-wise dot products #define CC_VOLTA 700 @@ -7751,11 +7752,11 @@ static size_t g_temp_tensor_extra_index = 0; static ggml_tensor_extra_gpu * ggml_cuda_alloc_temp_tensor_extra() { if (g_temp_tensor_extras == nullptr) { - g_temp_tensor_extras = new ggml_tensor_extra_gpu[GGML_MAX_NODES]; + g_temp_tensor_extras = new ggml_tensor_extra_gpu[GGML_DEFAULT_GRAPH_SIZE]; } size_t alloc_index = g_temp_tensor_extra_index; - g_temp_tensor_extra_index = (g_temp_tensor_extra_index + 1) % GGML_MAX_NODES; + g_temp_tensor_extra_index = (g_temp_tensor_extra_index + 1) % GGML_DEFAULT_GRAPH_SIZE; ggml_tensor_extra_gpu * extra = &g_temp_tensor_extras[alloc_index]; memset(extra, 0, sizeof(*extra)); @@ -8070,11 +8071,11 @@ struct ggml_backend_buffer_context_cuda { ggml_tensor_extra_gpu * ggml_cuda_alloc_temp_tensor_extra() { if (temp_tensor_extras == nullptr) { - temp_tensor_extras = new ggml_tensor_extra_gpu[GGML_MAX_NODES]; + temp_tensor_extras = new ggml_tensor_extra_gpu[GGML_DEFAULT_GRAPH_SIZE]; } size_t alloc_index = temp_tensor_extra_index; - temp_tensor_extra_index = (temp_tensor_extra_index + 1) % GGML_MAX_NODES; + temp_tensor_extra_index = (temp_tensor_extra_index + 1) % GGML_DEFAULT_GRAPH_SIZE; ggml_tensor_extra_gpu * extra = &temp_tensor_extras[alloc_index]; memset(extra, 0, sizeof(*extra)); @@ -8160,7 +8161,12 @@ static ggml_backend_buffer_t ggml_backend_cuda_alloc_buffer(ggml_backend_t backe ggml_cuda_set_device(g_main_device); ggml_backend_buffer_context_cuda * ctx = new ggml_backend_buffer_context_cuda; + + size = std::max(size, (size_t)1); // cudaMalloc returns null for size 0 + + ggml_cuda_set_device(g_main_device); CUDA_CHECK(cudaMalloc(&ctx->device, size)); + return ggml_backend_buffer_init(backend, cuda_backend_buffer_interface, ctx, size); } @@ -8227,6 +8233,8 @@ static void ggml_backend_cuda_graph_compute(ggml_backend_t backend, ggml_cgraph for (int i = 0; i < cgraph->n_nodes; i++) { ggml_tensor * node = cgraph->nodes[i]; + if (node->op == GGML_OP_RESHAPE || node->op == GGML_OP_TRANSPOSE || node->op == GGML_OP_VIEW || node->op == GGML_OP_PERMUTE) + continue; assert(node->backend == GGML_BACKEND_GPU); for (int j = 0; j < GGML_MAX_SRC; j++) { if (node->src[j] != nullptr) { diff --git a/ggml-impl.h b/ggml-impl.h index 5ec18a50c8da57..d88f261449f058 100644 --- a/ggml-impl.h +++ b/ggml-impl.h @@ -230,7 +230,19 @@ inline static float ggml_lookup_fp16_to_fp32(ggml_fp16_t f) { #endif - // TODO: backend v2 PR +#define GGML_HASHTABLE_FULL ((size_t)-1) +#define GGML_HASHTABLE_ALREADY_EXISTS ((size_t)-2) + +bool ggml_hash_contains (const struct ggml_hash_set hash_set, struct ggml_tensor * key); + +// returns GGML_HASHTABLE_FULL if table is full, otherwise the current index of the key or where it should be inserted +size_t ggml_hash_find (const struct ggml_hash_set hash_set, struct ggml_tensor * key); + +// returns GGML_HAHSHTABLE_ALREADY_EXISTS if key already exists, index otherwise, asserts if table is full +size_t ggml_hash_insert ( struct ggml_hash_set hash_set, struct ggml_tensor * key); + +// return index, asserts if table is full +size_t ggml_hash_find_or_insert( struct ggml_hash_set hash_set, struct ggml_tensor * key); #ifdef __cplusplus } diff --git a/ggml-metal.m b/ggml-metal.m index 78ae4485da8e27..c2cda0bf546d30 100644 --- a/ggml-metal.m +++ b/ggml-metal.m @@ -1,5 +1,6 @@ #import "ggml-metal.h" +#import "ggml-backend-impl.h" #import "ggml.h" #import @@ -23,7 +24,7 @@ #define UNUSED(x) (void)(x) -#define GGML_MAX_CONCUR (2*GGML_MAX_NODES) +#define GGML_MAX_CONCUR (2*GGML_DEFAULT_GRAPH_SIZE) struct ggml_metal_buffer { const char * name; @@ -744,6 +745,20 @@ void ggml_metal_graph_compute( struct ggml_tensor * src1 = gf->nodes[i]->src[1]; struct ggml_tensor * dst = gf->nodes[i]; + switch (dst->op) { + case GGML_OP_NONE: + case GGML_OP_RESHAPE: + case GGML_OP_VIEW: + case GGML_OP_TRANSPOSE: + case GGML_OP_PERMUTE: + { + // noop -> next node + } continue; + default: + { + } break; + } + const int64_t ne00 = src0 ? src0->ne[0] : 0; const int64_t ne01 = src0 ? src0->ne[1] : 0; const int64_t ne02 = src0 ? src0->ne[2] : 0; @@ -797,14 +812,6 @@ void ggml_metal_graph_compute( //} switch (dst->op) { - case GGML_OP_NONE: - case GGML_OP_RESHAPE: - case GGML_OP_VIEW: - case GGML_OP_TRANSPOSE: - case GGML_OP_PERMUTE: - { - // noop - } break; case GGML_OP_CONCAT: { const int64_t nb = ne00; diff --git a/ggml.c b/ggml.c index 009d5b3985e55f..da78e6de9586ba 100644 --- a/ggml.c +++ b/ggml.c @@ -100,6 +100,49 @@ typedef void * thread_ret_t; #include #endif +#if defined(__APPLE__) +#include +#endif + +#if (defined(__linux__) || defined(__APPLE__) || defined(__FreeBSD__) || defined(__NetBSD__) || defined(__OpenBSD__)) && \ + (!defined(TARGET_OS_TV) && !defined(TARGET_OS_WATCH)) + +#include + +void ggml_print_backtrace(void) { + /* + #include + #include + + void * trace[100]; + + int nptrs = backtrace(trace, sizeof(trace)/sizeof(trace[0])); + + backtrace_symbols_fd(trace, nptrs, STDERR_FILENO); + */ + + // backtrack_symbols does not show line numbers, use gdb instead + char attach[32]; + snprintf(attach, sizeof(attach), "attach %d", getpid()); + int pid = fork(); + if (pid == 0) { + execlp("gdb", "gdb", "--batch", + "-ex", "set style enabled on", + "-ex", attach, + "-ex", "bt -frame-info source-and-location", + "-ex", "detach", + "-ex", "quit", + NULL); + } else { + waitpid(pid, NULL, 0); + } +} +#else +void ggml_print_backtrace(void) { + // platform not supported +} +#endif + /*#define GGML_PERF*/ #define GGML_DEBUG 0 #define GGML_GELU_FP16 @@ -1352,6 +1395,7 @@ inline static void ggml_vec_step_f32 (const int n, float * y, const float * x) { inline static void ggml_vec_tanh_f32 (const int n, float * y, const float * x) { for (int i = 0; i < n; ++i) y[i] = tanhf(x[i]); } inline static void ggml_vec_elu_f32 (const int n, float * y, const float * x) { for (int i = 0; i < n; ++i) y[i] = (x[i] > 0.f) ? x[i] : expf(x[i])-1; } inline static void ggml_vec_relu_f32 (const int n, float * y, const float * x) { for (int i = 0; i < n; ++i) y[i] = (x[i] > 0.f) ? x[i] : 0.f; } +inline static void ggml_vec_leaky_f32 (const int n, float * y, const float * x) { for (int i = 0; i < n; ++i) y[i] = (x[i] > 0.f) ? x[i] : 0.1f*x[i]; } static const float GELU_COEF_A = 0.044715f; static const float GELU_QUICK_COEF = -1.702f; @@ -3769,6 +3813,14 @@ struct ggml_tensor * ggml_relu_inplace( return ggml_unary_inplace(ctx, a, GGML_UNARY_OP_RELU); } +// ggml_leaky + +struct ggml_tensor * ggml_leaky( + struct ggml_context * ctx, + struct ggml_tensor * a) { + return ggml_unary(ctx, a, GGML_UNARY_OP_LEAKY); +} + // ggml_gelu struct ggml_tensor * ggml_gelu( @@ -5411,7 +5463,7 @@ struct ggml_tensor * ggml_conv_transpose_2d_p0( // ggml_pool_* -static int64_t ggml_calc_pool_output_size(int64_t ins, int ks, int s, int p) { +static int64_t ggml_calc_pool_output_size(int64_t ins, int ks, int s, float p) { return (ins + 2 * p - ks) / s + 1; } @@ -5458,8 +5510,8 @@ struct ggml_tensor * ggml_pool_2d( int k1, int s0, int s1, - int p0, - int p1) { + float p0, + float p1) { bool is_node = false; @@ -8921,6 +8973,48 @@ static void ggml_compute_forward_silu( } } +// ggml_compute_forward_leaky + +static void ggml_compute_forward_leaky_f32( + const struct ggml_compute_params * params, + const struct ggml_tensor * src0, + struct ggml_tensor * dst) { + assert(params->ith == 0); + assert(ggml_are_same_shape(src0, dst)); + + if (params->type == GGML_TASK_INIT || params->type == GGML_TASK_FINALIZE) { + return; + } + + const int n = ggml_nrows(src0); + const int nc = src0->ne[0]; + + assert(dst->nb[0] == sizeof(float)); + assert(src0->nb[0] == sizeof(float)); + + for (int i = 0; i < n; i++) { + ggml_vec_leaky_f32(nc, + (float *) ((char *) dst->data + i*( dst->nb[1])), + (float *) ((char *) src0->data + i*(src0->nb[1]))); + } +} + +static void ggml_compute_forward_leaky( + const struct ggml_compute_params * params, + const struct ggml_tensor * src0, + struct ggml_tensor * dst) { + switch (src0->type) { + case GGML_TYPE_F32: + { + ggml_compute_forward_leaky_f32(params, src0, dst); + } break; + default: + { + GGML_ASSERT(false); + } break; + } +} + // ggml_compute_forward_silu_back static void ggml_compute_forward_silu_back_f32( @@ -12454,14 +12548,11 @@ static void ggml_compute_forward_pool_1d( ggml_compute_forward_pool_1d_sk_p0(params, op, src0, k0, dst); } -// ggml_compute_forward_pool_2d_sk_p0 +// ggml_compute_forward_pool_2d -static void ggml_compute_forward_pool_2d_sk_p0( +static void ggml_compute_forward_pool_2d( const struct ggml_compute_params * params, - const enum ggml_op_pool op, const struct ggml_tensor * src, - const int k0, - const int k1, struct ggml_tensor * dst) { assert(src->type == GGML_TYPE_F32); assert(params->ith == 0); @@ -12470,6 +12561,14 @@ static void ggml_compute_forward_pool_2d_sk_p0( return; } + const int32_t * opts = (const int32_t *)dst->op_params; + enum ggml_op_pool op = opts[0]; + const int k0 = opts[1]; + const int k1 = opts[2]; + const int s0 = opts[3]; + const int s1 = opts[4]; + const int p0 = opts[5]; + const int p1 = opts[6]; const char * cdata = (const char*)src->data; const char * const data_end = cdata + ggml_nbytes(src); @@ -12480,6 +12579,8 @@ static void ggml_compute_forward_pool_2d_sk_p0( float * dplane = (float *)dst->data; const int ka = k0 * k1; + const int offset0 = -p0; + const int offset1 = -p1; while (cdata < data_end) { for (int oy = 0; oy < py; ++oy) { @@ -12492,13 +12593,15 @@ static void ggml_compute_forward_pool_2d_sk_p0( case GGML_OP_POOL_COUNT: GGML_ASSERT(false); break; } - const int ix = ox * k0; - const int iy = oy * k1; + const int ix = offset0 + ox * s0; + const int iy = offset1 + oy * s1; for (int ky = 0; ky < k1; ++ky) { + if (iy + ky < 0 || iy + ky >= src->ne[1]) continue; const float * const srow = (const float *)(cdata + src->nb[1] * (iy + ky)); for (int kx = 0; kx < k0; ++kx) { int j = ix + kx; + if (j < 0 || j >= src->ne[0]) continue; switch (op) { case GGML_OP_POOL_AVG: *out += srow[j]; break; case GGML_OP_POOL_MAX: if (srow[j] > *out) *out = srow[j]; break; @@ -12519,29 +12622,6 @@ static void ggml_compute_forward_pool_2d_sk_p0( } } -// ggml_compute_forward_pool_2d - -static void ggml_compute_forward_pool_2d( - const struct ggml_compute_params * params, - const struct ggml_tensor * src0, - struct ggml_tensor * dst) { - - const int32_t * opts = (const int32_t *)dst->op_params; - enum ggml_op_pool op = opts[0]; - const int k0 = opts[1]; - const int k1 = opts[2]; - const int s0 = opts[3]; - const int s1 = opts[4]; - const int p0 = opts[5]; - const int p1 = opts[6]; - GGML_ASSERT(p0 == 0); - GGML_ASSERT(p1 == 0); // padding not supported - GGML_ASSERT(k0 == s0); - GGML_ASSERT(k1 == s1); // only s = k supported - - ggml_compute_forward_pool_2d_sk_p0(params, op, src0, k0, k1, dst); -} - // ggml_compute_forward_upscale static void ggml_compute_forward_upscale_f32( @@ -13743,6 +13823,10 @@ static void ggml_compute_forward_unary( { ggml_compute_forward_silu(params, src0, dst); } break; + case GGML_UNARY_OP_LEAKY: + { + ggml_compute_forward_leaky(params, src0, dst); + } break; default: { GGML_ASSERT(false); @@ -14651,62 +14735,109 @@ static void ggml_compute_forward(struct ggml_compute_params * params, struct ggm //////////////////////////////////////////////////////////////////////////////// -static_assert(GGML_GRAPH_HASHTABLE_SIZE > GGML_MAX_NODES * 2, "GGML_GRAPH_HT_SIZE is too small"); +static size_t ggml_hash_size(size_t min_sz) { + // next primes after powers of two + static const size_t primes[] = { + 2, 3, 5, 11, 17, 37, 67, 131, 257, 521, 1031, + 2053, 4099, 8209, 16411, 32771, 65537, 131101, + 262147, 524309, 1048583, 2097169, 4194319, 8388617, + 16777259, 33554467, 67108879, 134217757, 268435459, + 536870923, 1073741827, 2147483659 + }; + static const size_t n_primes = sizeof(primes)/sizeof(primes[0]); + + // find the smallest prime that is larger or equal to min_sz + size_t l = 0; + size_t r = n_primes; + while (l < r) { + size_t m = (l + r)/2; + if (primes[m] < min_sz) { + l = m + 1; + } else { + r = m; + } + } + size_t sz = l < n_primes ? primes[l] : min_sz | 1; + return sz; +} -static size_t hash(void * p) { - return (size_t)p % GGML_GRAPH_HASHTABLE_SIZE; +static size_t ggml_hash(const void * p) { + return (size_t)p; } -static size_t hash_find(void * hash_table[], void * p) { - size_t h = hash(p); +size_t ggml_hash_find(const struct ggml_hash_set hash_set, struct ggml_tensor * key) { + size_t h = ggml_hash(key) % hash_set.size; // linear probing size_t i = h; - while (hash_table[i] != NULL && hash_table[i] != p) { - i = (i + 1) % GGML_GRAPH_HASHTABLE_SIZE; + while (hash_set.keys[i] != NULL && hash_set.keys[i] != key) { + i = (i + 1) % hash_set.size; if (i == h) { // visited all hash table entries -> not found - return GGML_GRAPH_HASHTABLE_SIZE; + return GGML_HASHTABLE_FULL; } } return i; } -static bool hash_insert(void * hash_table[], void * p) { - size_t i = hash_find(hash_table, p); +bool ggml_hash_contains(struct ggml_hash_set hash_set, struct ggml_tensor * key) { + size_t i = ggml_hash_find(hash_set, key); + return i != GGML_HASHTABLE_FULL && hash_set.keys[i] == key; +} - GGML_ASSERT(i < GGML_GRAPH_HASHTABLE_SIZE); // assert that not full +size_t ggml_hash_insert(struct ggml_hash_set hash_set, struct ggml_tensor * key) { + size_t i = ggml_hash_find(hash_set, key); - if (hash_table[i] == p) { - return true; + GGML_ASSERT(i != GGML_HASHTABLE_FULL); + + if (hash_set.keys[i] == key) { + return GGML_HASHTABLE_ALREADY_EXISTS; } // insert - GGML_ASSERT(hash_table[i] == NULL); - hash_table[i] = p; - return false; + GGML_ASSERT(hash_set.keys[i] == NULL); + hash_set.keys[i] = key; + return i; +} + +size_t ggml_hash_find_or_insert(struct ggml_hash_set hash_set, struct ggml_tensor * key) { + size_t i = ggml_hash_find(hash_set, key); + + GGML_ASSERT(i != GGML_HASHTABLE_FULL); + + hash_set.keys[i] = key; + return i; +} + +static struct ggml_hash_set ggml_hash_set_new(size_t size) { + size = ggml_hash_size(size); + struct ggml_hash_set result; + result.size = size; + result.keys = malloc(sizeof(struct ggml_tensor *) * size); + memset(result.keys, 0, sizeof(struct ggml_tensor *) * size); + return result; } -static bool hash_contains(void * hash_table[], void * p) { - size_t i = hash_find(hash_table, p); - return (i < GGML_GRAPH_HASHTABLE_SIZE) && (hash_table[i] == p); +static void ggml_hash_set_free(struct ggml_hash_set hash_set) { + free(hash_set.keys); } struct hash_map { - void * keys[GGML_GRAPH_HASHTABLE_SIZE]; - void * vals[GGML_GRAPH_HASHTABLE_SIZE]; + struct ggml_hash_set set; + struct ggml_tensor ** vals; }; -static struct hash_map * new_hash_map(void) { +static struct hash_map * ggml_new_hash_map(size_t size) { struct hash_map * result = malloc(sizeof(struct hash_map)); - for (int i=0; ikeys[i] = NULL; - result->vals[i] = NULL; - } + result->set = ggml_hash_set_new(size); + result->vals = malloc(sizeof(struct ggml_tensor *) * result->set.size); + memset(result->vals, 0, sizeof(struct ggml_tensor *) * result->set.size); return result; } -static void free_hash_map(struct hash_map * map) { +static void ggml_hash_map_free(struct hash_map * map) { + ggml_hash_set_free(map->set); + free(map->vals); free(map); } @@ -14726,7 +14857,7 @@ static struct ggml_tensor * ggml_recompute_graph_node( return node; } - if (!hash_contains(graph->visited_hash_table, node)) { + if (!ggml_hash_contains(graph->visited_hash_table, node)) { return node; } @@ -14741,17 +14872,17 @@ static struct ggml_tensor * ggml_recompute_graph_node( return node; } - size_t i = hash_find(replacements->keys, node); - GGML_ASSERT(i < GGML_GRAPH_HASHTABLE_SIZE); // assert that not full - if (replacements->keys[i] == node) { - return (struct ggml_tensor *) replacements->vals[i]; + size_t i = ggml_hash_find(replacements->set, node); + GGML_ASSERT(i != GGML_HASHTABLE_FULL); // assert that not full + if (replacements->set.keys[i] == node) { + return replacements->vals[i]; } struct ggml_tensor * clone = ggml_new_tensor(ctx, node->type, node->n_dims, node->ne); // insert clone into replacements - GGML_ASSERT(replacements->keys[i] == NULL); // assert that we don't overwrite - replacements->keys[i] = node; + GGML_ASSERT(replacements->set.keys[i] == NULL); // assert that we don't overwrite + replacements->set.keys[i] = node; replacements->vals[i] = clone; clone->op = node->op; @@ -14788,26 +14919,26 @@ void ggml_build_backward_gradient_checkpointing( struct ggml_cgraph * gb_tmp, struct ggml_tensor * * checkpoints, int n_checkpoints) { - *gb_tmp = *gf; + ggml_graph_cpy(gf, gb_tmp); ggml_build_backward_expand(ctx, gf, gb_tmp, true); if (n_checkpoints <= 0) { - *gb = *gb_tmp; + ggml_graph_cpy(gb_tmp, gb); return; } - struct hash_map * replacements = new_hash_map(); + struct hash_map * replacements = ggml_new_hash_map(gf->n_nodes + gf->n_leafs + n_checkpoints); // insert checkpoints in replacements for (int i = 0; i < n_checkpoints; ++i) { - size_t k = hash_find(replacements->keys, checkpoints[i]); - GGML_ASSERT(k < GGML_GRAPH_HASHTABLE_SIZE); // assert that not full - GGML_ASSERT(replacements->keys[k] == NULL); // assert that we don't overwrite - replacements->keys[k] = checkpoints[i]; - replacements->vals[k] = checkpoints[i]; + size_t k = ggml_hash_find(replacements->set, checkpoints[i]); + GGML_ASSERT(k != GGML_HASHTABLE_FULL); // assert that not full + GGML_ASSERT(replacements->set.keys[k] == NULL); // assert that we don't overwrite + replacements->set.keys[k] = checkpoints[i]; + replacements->vals[k] = checkpoints[i]; } - *gb = *gf; + ggml_graph_cpy(gf, gb); // rewrite gb_tmp->nodes[gf->n_nodes:gb_tmp->n_nodes], // replacing references to gb_tmp->nodes[0:gf->n_nodes] ( == gf->nodes[0:gf->n_nodes]), // by recomputing them from checkpoints @@ -14824,21 +14955,21 @@ void ggml_build_backward_gradient_checkpointing( ggml_build_forward_expand(gb, node); } - free_hash_map(replacements); + ggml_hash_map_free(replacements); } // functions to change gradients considering the case that input a might be initial gradient with zero value -static struct ggml_tensor * ggml_add_or_set(struct ggml_context * ctx, struct ggml_tensor * a, struct ggml_tensor * b, void * zero_table[]) { - if (hash_contains(zero_table, a)) { +static struct ggml_tensor * ggml_add_or_set(struct ggml_context * ctx, struct ggml_tensor * a, struct ggml_tensor * b, struct ggml_hash_set zero_table) { + if (ggml_hash_contains(zero_table, a)) { return b; } else { return ggml_add_impl(ctx, a, b, false); } } -static struct ggml_tensor * ggml_acc_or_set(struct ggml_context * ctx, struct ggml_tensor * a, struct ggml_tensor * b, size_t nb1, size_t nb2, size_t nb3, size_t offset, void * zero_table[]) { - if (hash_contains(zero_table, a)) { +static struct ggml_tensor * ggml_acc_or_set(struct ggml_context * ctx, struct ggml_tensor * a, struct ggml_tensor * b, size_t nb1, size_t nb2, size_t nb3, size_t offset, struct ggml_hash_set zero_table) { + if (ggml_hash_contains(zero_table, a)) { struct ggml_tensor * a_zero = ggml_scale(ctx, a, ggml_new_f32(ctx, 0)); return ggml_acc_impl(ctx, a_zero, b, nb1, nb2, nb3, offset, false); } else { @@ -14846,23 +14977,23 @@ static struct ggml_tensor * ggml_acc_or_set(struct ggml_context * ctx, struct gg } } -static struct ggml_tensor * ggml_add1_or_set(struct ggml_context * ctx, struct ggml_tensor * a, struct ggml_tensor * b, void * zero_table[]) { - if (hash_contains(zero_table, a)) { +static struct ggml_tensor * ggml_add1_or_set(struct ggml_context * ctx, struct ggml_tensor * a, struct ggml_tensor * b, struct ggml_hash_set zero_table) { + if (ggml_hash_contains(zero_table, a)) { return ggml_repeat(ctx, b, a); } else { return ggml_add1_impl(ctx, a, b, false); } } -static struct ggml_tensor * ggml_sub_or_set(struct ggml_context * ctx, struct ggml_tensor * a, struct ggml_tensor * b, void * zero_table[]) { - if (hash_contains(zero_table, a)) { +static struct ggml_tensor * ggml_sub_or_set(struct ggml_context * ctx, struct ggml_tensor * a, struct ggml_tensor * b, struct ggml_hash_set zero_table) { + if (ggml_hash_contains(zero_table, a)) { return ggml_neg(ctx, b); } else { return ggml_sub_impl(ctx, a, b, false); } } -static void ggml_compute_backward(struct ggml_context * ctx, struct ggml_tensor * tensor, void * zero_table[]) { +static void ggml_compute_backward(struct ggml_context * ctx, struct ggml_tensor * tensor, struct ggml_hash_set zero_table) { struct ggml_tensor * src0 = tensor->src[0]; struct ggml_tensor * src1 = tensor->src[1]; @@ -15695,7 +15826,7 @@ static void ggml_visit_parents(struct ggml_cgraph * cgraph, struct ggml_tensor * } // check if already visited - if (hash_insert(cgraph->visited_hash_table, node)) { + if (ggml_hash_insert(cgraph->visited_hash_table, node) == GGML_HASHTABLE_ALREADY_EXISTS) { return; } @@ -15711,7 +15842,7 @@ static void ggml_visit_parents(struct ggml_cgraph * cgraph, struct ggml_tensor * if (node->op == GGML_OP_NONE && node->grad == NULL) { // reached a leaf node, not part of the gradient graph (e.g. a constant) - GGML_ASSERT(cgraph->n_leafs < GGML_MAX_NODES); + GGML_ASSERT(cgraph->n_leafs < cgraph->size); if (strlen(node->name) == 0) { ggml_format_name(node, "leaf_%d", cgraph->n_leafs); @@ -15720,22 +15851,24 @@ static void ggml_visit_parents(struct ggml_cgraph * cgraph, struct ggml_tensor * cgraph->leafs[cgraph->n_leafs] = node; cgraph->n_leafs++; } else { - GGML_ASSERT(cgraph->n_nodes < GGML_MAX_NODES); + GGML_ASSERT(cgraph->n_nodes < cgraph->size); if (strlen(node->name) == 0) { ggml_format_name(node, "node_%d", cgraph->n_nodes); } cgraph->nodes[cgraph->n_nodes] = node; - cgraph->grads[cgraph->n_nodes] = node->grad; + if (cgraph->grads) { + cgraph->grads[cgraph->n_nodes] = node->grad; + } cgraph->n_nodes++; } } static void ggml_build_forward_impl(struct ggml_cgraph * cgraph, struct ggml_tensor * tensor, bool expand) { if (!expand) { - cgraph->n_nodes = 0; - cgraph->n_leafs = 0; + // TODO: this branch isn't accessible anymore, maybe move this to ggml_build_forward_expand + ggml_graph_clear(cgraph); } const int n0 = cgraph->n_nodes; @@ -15756,25 +15889,6 @@ void ggml_build_forward_expand(struct ggml_cgraph * cgraph, struct ggml_tensor * ggml_build_forward_impl(cgraph, tensor, true); } -struct ggml_cgraph ggml_build_forward(struct ggml_tensor * tensor) { - struct ggml_cgraph result = { - /*.n_nodes =*/ 0, - /*.n_leafs =*/ 0, - /*.nodes =*/ { NULL }, - /*.grads =*/ { NULL }, - /*.leafs =*/ { NULL }, - /*.hash_table =*/ { NULL }, - /*.order =*/ GGML_CGRAPH_EVAL_ORDER_LEFT_TO_RIGHT, - /*.perf_runs =*/ 0, - /*.perf_cycles =*/ 0, - /*.perf_time_us =*/ 0, - }; - - ggml_build_forward_impl(&result, tensor, false); - - return result; -} - void ggml_build_backward_expand(struct ggml_context * ctx, struct ggml_cgraph * gf, struct ggml_cgraph * gb, bool keep) { GGML_ASSERT(gf->n_nodes > 0); @@ -15791,11 +15905,10 @@ void ggml_build_backward_expand(struct ggml_context * ctx, struct ggml_cgraph * } // remember original gradients which start with zero values - void ** zero_table = malloc(sizeof(void *) * GGML_GRAPH_HASHTABLE_SIZE); - memset(zero_table, 0, sizeof(void*) * GGML_GRAPH_HASHTABLE_SIZE); + struct ggml_hash_set zero_table = ggml_hash_set_new(gf->size); for (int i = 0; i < gf->n_nodes; i++) { if (gf->grads[i]) { - hash_insert(zero_table, gf->grads[i]); + ggml_hash_insert(zero_table, gf->grads[i]); } } @@ -15818,26 +15931,54 @@ void ggml_build_backward_expand(struct ggml_context * ctx, struct ggml_cgraph * } } - free(zero_table); + ggml_hash_set_free(zero_table); } -struct ggml_cgraph ggml_build_backward(struct ggml_context * ctx, struct ggml_cgraph * gf, bool keep) { - struct ggml_cgraph result = *gf; - ggml_build_backward_expand(ctx, gf, &result, keep); - return result; +static size_t ggml_graph_nbytes(size_t size, bool grads) { + size_t nbytes = sizeof(struct ggml_cgraph); + nbytes += size * sizeof(struct ggml_tensor *) * 2; // leafs + nodes + if (grads) { + nbytes += size * sizeof(struct ggml_tensor *); // grads + } + nbytes += ggml_hash_size(size * 2) * sizeof(struct ggml_tensor *); // hash set + return nbytes; } -struct ggml_cgraph * ggml_new_graph(struct ggml_context * ctx) { - struct ggml_object * obj = ggml_new_object(ctx, GGML_OBJECT_GRAPH, GGML_GRAPH_SIZE); +size_t ggml_graph_overhead_custom(size_t size, bool grads) { + return GGML_OBJECT_SIZE + GGML_PAD(ggml_graph_nbytes(size, grads), GGML_MEM_ALIGN); +} + +size_t ggml_graph_overhead(void) { + return ggml_graph_overhead_custom(GGML_DEFAULT_GRAPH_SIZE, false); +} + +struct ggml_cgraph * ggml_new_graph_custom(struct ggml_context * ctx, size_t size, bool grads) { + const size_t obj_size = ggml_graph_nbytes(size, grads); + struct ggml_object * obj = ggml_new_object(ctx, GGML_OBJECT_GRAPH, obj_size); struct ggml_cgraph * cgraph = (struct ggml_cgraph *) ((char *) ctx->mem_buffer + obj->offs); + struct ggml_tensor ** data_start = (struct ggml_tensor **) (cgraph + 1); + + size_t hash_size = ggml_hash_size(size * 2); + struct ggml_tensor ** nodes_ptr = data_start; + struct ggml_tensor ** leafs_ptr = nodes_ptr + size; + struct ggml_tensor ** hash_keys_ptr = leafs_ptr + size; + struct ggml_tensor ** grads_ptr = grads ? hash_keys_ptr + hash_size : NULL; + + // check that we allocated the correct amount of memory + assert(obj_size == (size_t) ( + (grads ? (char *)(grads_ptr + size) : (char *)(hash_keys_ptr + hash_size)) - (char *)cgraph)); + + memset(hash_keys_ptr, 0, hash_size * sizeof(struct ggml_tensor *)); + *cgraph = (struct ggml_cgraph) { + /*.size =*/ size, /*.n_nodes =*/ 0, /*.n_leafs =*/ 0, - /*.nodes =*/ { NULL }, - /*.grads =*/ { NULL }, - /*.leafs =*/ { NULL }, - /*.hash_table =*/ { NULL }, + /*.nodes =*/ nodes_ptr, + /*.grads =*/ grads_ptr, + /*.leafs =*/ leafs_ptr, + /*.hash_table =*/ { hash_size, hash_keys_ptr }, /*.order =*/ GGML_CGRAPH_EVAL_ORDER_LEFT_TO_RIGHT, /*.perf_runs =*/ 0, /*.perf_cycles =*/ 0, @@ -15847,14 +15988,85 @@ struct ggml_cgraph * ggml_new_graph(struct ggml_context * ctx) { return cgraph; } -struct ggml_cgraph * ggml_build_forward_ctx(struct ggml_context * ctx, struct ggml_tensor * tensor) { - struct ggml_cgraph * cgraph = ggml_new_graph(ctx); - ggml_build_forward_impl(cgraph, tensor, false); +struct ggml_cgraph * ggml_new_graph(struct ggml_context * ctx) { + return ggml_new_graph_custom(ctx, GGML_DEFAULT_GRAPH_SIZE, false); +} + +struct ggml_cgraph * ggml_graph_view(struct ggml_context * ctx, struct ggml_cgraph * cgraph0, int i0, int i1) { + const size_t obj_size = sizeof(struct ggml_cgraph); + struct ggml_object * obj = ggml_new_object(ctx, GGML_OBJECT_GRAPH, obj_size); + struct ggml_cgraph * cgraph = (struct ggml_cgraph *) ((char *) ctx->mem_buffer + obj->offs); + + *cgraph = (struct ggml_cgraph) { + /*.size =*/ 0, + /*.n_nodes =*/ i1 - i0, + /*.n_leafs =*/ 0, + /*.nodes =*/ cgraph0->nodes + i0, + /*.grads =*/ cgraph0->grads ? cgraph0->grads + i0 : NULL, + /*.leafs =*/ NULL, + /*.hash_table =*/ { 0, NULL }, + /*.order =*/ cgraph0->order, + /*.perf_runs =*/ 0, + /*.perf_cycles =*/ 0, + /*.perf_time_us =*/ 0, + }; + return cgraph; } -size_t ggml_graph_overhead(void) { - return GGML_OBJECT_SIZE + GGML_PAD(GGML_GRAPH_SIZE, GGML_MEM_ALIGN); +void ggml_graph_cpy(struct ggml_cgraph * src, struct ggml_cgraph * dst) { + GGML_ASSERT(dst->size >= src->n_leafs); + GGML_ASSERT(dst->size >= src->n_nodes); + GGML_ASSERT(dst->visited_hash_table.size >= src->visited_hash_table.size); + + dst->n_leafs = src->n_leafs; + dst->n_nodes = src->n_nodes; + dst->order = src->order; + + for (int i = 0; i < src->n_leafs; ++i) { + dst->leafs[i] = src->leafs[i]; + } + + for (int i = 0; i < src->n_nodes; ++i) { + dst->nodes[i] = src->nodes[i]; + } + + if (src->grads) { + GGML_ASSERT(dst->grads != NULL); + for (int i = 0; i < src->n_nodes; ++i) { + dst->grads[i] = src->grads[i]; + } + } + + for (size_t i = 0; i < src->visited_hash_table.size; ++i) { + if (src->visited_hash_table.keys[i]) { + ggml_hash_insert(dst->visited_hash_table, src->visited_hash_table.keys[i]); + } + } +} + +struct ggml_cgraph * ggml_graph_dup(struct ggml_context * ctx, struct ggml_cgraph * cgraph) { + struct ggml_cgraph * result = ggml_new_graph_custom(ctx, cgraph->size, cgraph->grads != NULL); + ggml_graph_cpy(cgraph, result); + return result; +} + +void ggml_graph_reset(struct ggml_cgraph * cgraph) { + GGML_ASSERT(cgraph->grads != NULL); + + for (int i = 0; i < cgraph->n_nodes; i++) { + struct ggml_tensor * grad = cgraph->grads[i]; + + if (grad) { + ggml_set_zero(grad); + } + } +} + +void ggml_graph_clear(struct ggml_cgraph * cgraph) { + cgraph->n_leafs = 0; + cgraph->n_nodes = 0; + memset(cgraph->visited_hash_table.keys, 0, cgraph->visited_hash_table.size * sizeof(struct ggml_tensor *)); } // @@ -16007,13 +16219,252 @@ static void ggml_graph_compute_perf_stats_node(struct ggml_tensor * node, const node->perf_time_us += time_us_cur; } +static int ggml_get_n_tasks(struct ggml_tensor * node, int n_threads) { + int n_tasks = 0; + + switch (node->op) { + case GGML_OP_CPY: + case GGML_OP_DUP: + case GGML_OP_ADD: + case GGML_OP_ADD1: + case GGML_OP_ACC: + { + n_tasks = n_threads; + } break; + case GGML_OP_SUB: + case GGML_OP_DIV: + case GGML_OP_SQR: + case GGML_OP_SQRT: + case GGML_OP_LOG: + case GGML_OP_SUM: + case GGML_OP_SUM_ROWS: + case GGML_OP_MEAN: + case GGML_OP_ARGMAX: + case GGML_OP_REPEAT: + case GGML_OP_REPEAT_BACK: + { + n_tasks = 1; + } break; + case GGML_OP_UNARY: + switch (ggml_get_unary_op(node)) { + case GGML_UNARY_OP_ABS: + case GGML_UNARY_OP_SGN: + case GGML_UNARY_OP_NEG: + case GGML_UNARY_OP_STEP: + case GGML_UNARY_OP_TANH: + case GGML_UNARY_OP_ELU: + case GGML_UNARY_OP_RELU: + case GGML_UNARY_OP_LEAKY: + { + n_tasks = 1; + } break; + + case GGML_UNARY_OP_GELU: + case GGML_UNARY_OP_GELU_QUICK: + case GGML_UNARY_OP_SILU: + { + n_tasks = n_threads; + } break; + } + break; + case GGML_OP_SILU_BACK: + case GGML_OP_MUL: + case GGML_OP_NORM: + case GGML_OP_RMS_NORM: + case GGML_OP_RMS_NORM_BACK: + case GGML_OP_GROUP_NORM: + case GGML_OP_CONCAT: + { + n_tasks = n_threads; + } break; + case GGML_OP_MUL_MAT: + { + n_tasks = n_threads; + + // TODO: use different scheduling for different matrix sizes + //const int nr0 = ggml_nrows(node->src[0]); + //const int nr1 = ggml_nrows(node->src[1]); + + //n_tasks = MIN(n_threads, MAX(1, nr0/128)); + //printf("nr0 = %8d, nr1 = %8d, nr0*nr1 = %8d, n_tasks%d\n", nr0, nr1, nr0*nr1, n_tasks); + +#if defined(GGML_USE_CUBLAS) + if (ggml_cuda_can_mul_mat(node->src[0], node->src[1], node)) { + n_tasks = 1; // TODO: this actually is doing nothing + // the threads are still spinning + } +#elif defined(GGML_USE_CLBLAST) + if (ggml_cl_can_mul_mat(node->src[0], node->src[1], node)) { + n_tasks = 1; // TODO: this actually is doing nothing + // the threads are still spinning + } +#endif +#if defined(GGML_USE_ACCELERATE) || defined(GGML_USE_OPENBLAS) + if (ggml_compute_forward_mul_mat_use_blas(node->src[0], node->src[1], node)) { + n_tasks = 1; // TODO: this actually is doing nothing + // the threads are still spinning + } +#endif + } break; + case GGML_OP_OUT_PROD: + { + n_tasks = n_threads; + } break; + case GGML_OP_SCALE: + case GGML_OP_SET: + case GGML_OP_CONT: + case GGML_OP_RESHAPE: + case GGML_OP_VIEW: + case GGML_OP_PERMUTE: + case GGML_OP_TRANSPOSE: + case GGML_OP_GET_ROWS: + case GGML_OP_GET_ROWS_BACK: + case GGML_OP_DIAG: + { + n_tasks = 1; + } break; + case GGML_OP_DIAG_MASK_ZERO: + case GGML_OP_DIAG_MASK_INF: + case GGML_OP_SOFT_MAX: + case GGML_OP_SOFT_MAX_BACK: + case GGML_OP_ROPE: + case GGML_OP_ROPE_BACK: + case GGML_OP_ADD_REL_POS: + { + n_tasks = n_threads; + } break; + case GGML_OP_ALIBI: + { + n_tasks = 1; //TODO + } break; + case GGML_OP_CLAMP: + { + n_tasks = 1; //TODO + } break; + case GGML_OP_CONV_1D: + { + n_tasks = n_threads; + } break; + case GGML_OP_CONV_1D_STAGE_0: + { + n_tasks = n_threads; + } break; + case GGML_OP_CONV_1D_STAGE_1: + { + n_tasks = n_threads; + } break; + case GGML_OP_CONV_TRANSPOSE_1D: + { + n_tasks = n_threads; + } break; + case GGML_OP_CONV_2D: + { + n_tasks = n_threads; + } break; + case GGML_OP_CONV_2D_STAGE_0: + { + n_tasks = n_threads; + } break; + case GGML_OP_CONV_2D_STAGE_1: + { + n_tasks = n_threads; + } break; + case GGML_OP_CONV_TRANSPOSE_2D: + { + n_tasks = n_threads; + } break; + case GGML_OP_POOL_1D: + case GGML_OP_POOL_2D: + { + n_tasks = 1; + } break; + case GGML_OP_UPSCALE: + { + n_tasks = n_threads; + } break; + case GGML_OP_FLASH_ATTN: + { + n_tasks = n_threads; + } break; + case GGML_OP_FLASH_FF: + { + n_tasks = n_threads; + } break; + case GGML_OP_FLASH_ATTN_BACK: + { + n_tasks = n_threads; + } break; + case GGML_OP_WIN_PART: + case GGML_OP_WIN_UNPART: + case GGML_OP_GET_REL_POS: + case GGML_OP_MAP_UNARY: + case GGML_OP_MAP_BINARY: + case GGML_OP_MAP_CUSTOM1_F32: + case GGML_OP_MAP_CUSTOM2_F32: + case GGML_OP_MAP_CUSTOM3_F32: + { + n_tasks = 1; + } break; + case GGML_OP_MAP_CUSTOM1: + { + struct ggml_map_custom1_op_params * p = (struct ggml_map_custom1_op_params *) node->op_params; + if (p->n_tasks == GGML_N_TASKS_MAX) { + n_tasks = n_threads; + } else { + n_tasks = MIN(p->n_tasks, n_threads); + } + } break; + case GGML_OP_MAP_CUSTOM2: + { + struct ggml_map_custom2_op_params * p = (struct ggml_map_custom2_op_params *) node->op_params; + if (p->n_tasks == GGML_N_TASKS_MAX) { + n_tasks = n_threads; + } else { + n_tasks = MIN(p->n_tasks, n_threads); + } + } break; + case GGML_OP_MAP_CUSTOM3: + { + struct ggml_map_custom3_op_params * p = (struct ggml_map_custom3_op_params *) node->op_params; + if (p->n_tasks == GGML_N_TASKS_MAX) { + n_tasks = n_threads; + } else { + n_tasks = MIN(p->n_tasks, n_threads); + } + } break; + case GGML_OP_CROSS_ENTROPY_LOSS: + { + n_tasks = n_threads; + } break; + case GGML_OP_CROSS_ENTROPY_LOSS_BACK: + { + n_tasks = n_threads; + } break; + case GGML_OP_NONE: + { + n_tasks = 1; + } break; + case GGML_OP_COUNT: + { + GGML_ASSERT(false); + } break; + default: + { + GGML_ASSERT(false); + } break; + } + + assert(n_tasks > 0); + + return n_tasks; +} + static thread_ret_t ggml_graph_compute_thread(void * data) { struct ggml_compute_state * state = (struct ggml_compute_state *) data; const struct ggml_cgraph * cgraph = state->shared->cgraph; const struct ggml_cplan * cplan = state->shared->cplan; - const int * n_tasks_arr = cplan->n_tasks; const int n_threads = state->shared->n_threads; set_numa_thread_affinity(state->ith, n_threads); @@ -16038,9 +16489,9 @@ static thread_ret_t ggml_graph_compute_thread(void * data) { if (node_n != -1) { /* FINALIZE */ - struct ggml_tensor * node = state->shared->cgraph->nodes[node_n]; + struct ggml_tensor * node = cgraph->nodes[node_n]; if (GGML_OP_HAS_FINALIZE[node->op]) { - params.nth = n_tasks_arr[node_n]; + params.nth = ggml_get_n_tasks(node, n_threads); ggml_compute_forward(¶ms, node); } ggml_graph_compute_perf_stats_node(node, state->shared); @@ -16051,7 +16502,7 @@ static thread_ret_t ggml_graph_compute_thread(void * data) { GGML_PRINT_DEBUG_5("%s: %d/%d\n", __func__, node_n, cgraph->n_nodes); struct ggml_tensor * node = cgraph->nodes[node_n]; - const int n_tasks = n_tasks_arr[node_n]; + const int n_tasks = ggml_get_n_tasks(node, n_threads); state->shared->perf_node_start_cycles = ggml_perf_cycles(); state->shared->perf_node_start_time_us = ggml_perf_time_us(); @@ -16109,7 +16560,7 @@ static thread_ret_t ggml_graph_compute_thread(void * data) { /* COMPUTE */ struct ggml_tensor * node = cgraph->nodes[node_n]; - const int n_tasks = n_tasks_arr[node_n]; + const int n_tasks = ggml_get_n_tasks(node, n_threads); struct ggml_compute_params params = { /*.type =*/ GGML_TASK_COMPUTE, @@ -16143,121 +16594,46 @@ struct ggml_cplan ggml_graph_plan(struct ggml_cgraph * cgraph, int n_threads) { struct ggml_tensor * node = cgraph->nodes[i]; + size_t cur = 0; + switch (node->op) { case GGML_OP_CPY: case GGML_OP_DUP: { n_tasks = n_threads; - size_t cur = 0; if (ggml_is_quantized(node->type)) { cur = ggml_type_size(GGML_TYPE_F32) * node->ne[0] * n_tasks; } - - work_size = MAX(work_size, cur); } break; case GGML_OP_ADD: case GGML_OP_ADD1: { n_tasks = n_threads; - size_t cur = 0; - if (ggml_is_quantized(node->src[0]->type)) { cur = ggml_type_size(GGML_TYPE_F32) * node->src[0]->ne[0] * n_tasks; } - - work_size = MAX(work_size, cur); } break; case GGML_OP_ACC: { n_tasks = n_threads; - size_t cur = 0; - if (ggml_is_quantized(node->src[0]->type)) { cur = ggml_type_size(GGML_TYPE_F32) * node->src[1]->ne[0] * n_tasks; } - - work_size = MAX(work_size, cur); - } break; - case GGML_OP_SUB: - case GGML_OP_DIV: - case GGML_OP_SQR: - case GGML_OP_SQRT: - case GGML_OP_LOG: - case GGML_OP_SUM: - case GGML_OP_SUM_ROWS: - case GGML_OP_MEAN: - case GGML_OP_ARGMAX: - case GGML_OP_REPEAT: - case GGML_OP_REPEAT_BACK: - { - n_tasks = 1; - } break; - - case GGML_OP_UNARY: - { - switch (ggml_get_unary_op(node)) { - case GGML_UNARY_OP_ABS: - case GGML_UNARY_OP_SGN: - case GGML_UNARY_OP_NEG: - case GGML_UNARY_OP_STEP: - case GGML_UNARY_OP_TANH: - case GGML_UNARY_OP_ELU: - case GGML_UNARY_OP_RELU: - { - n_tasks = 1; - } break; - - case GGML_UNARY_OP_GELU: - case GGML_UNARY_OP_GELU_QUICK: - case GGML_UNARY_OP_SILU: - { - n_tasks = n_threads; - } break; - } } break; - case GGML_OP_SILU_BACK: - case GGML_OP_MUL: - case GGML_OP_NORM: - case GGML_OP_RMS_NORM: - case GGML_OP_RMS_NORM_BACK: - case GGML_OP_GROUP_NORM: - { - n_tasks = n_threads; - } break; - case GGML_OP_CONCAT: case GGML_OP_MUL_MAT: { - n_tasks = n_threads; - - // TODO: use different scheduling for different matrix sizes - //const int nr0 = ggml_nrows(node->src[0]); - //const int nr1 = ggml_nrows(node->src[1]); - - //n_tasks = MIN(n_threads, MAX(1, nr0/128)); - //printf("nr0 = %8d, nr1 = %8d, nr0*nr1 = %8d, n_tasks%d\n", nr0, nr1, nr0*nr1, n_tasks); - - size_t cur = 0; const enum ggml_type vec_dot_type = type_traits[node->src[0]->type].vec_dot_type; -#if defined(GGML_USE_CUBLAS) - if (ggml_cuda_can_mul_mat(node->src[0], node->src[1], node)) { - n_tasks = 1; // TODO: this actually is doing nothing - // the threads are still spinning - } else -#elif defined(GGML_USE_CLBLAST) +#if defined(GGML_USE_CLBLAST) if (ggml_cl_can_mul_mat(node->src[0], node->src[1], node)) { - n_tasks = 1; // TODO: this actually is doing nothing - // the threads are still spinning cur = ggml_cl_mul_mat_get_wsize(node->src[0], node->src[1], node); } else #endif #if defined(GGML_USE_ACCELERATE) || defined(GGML_USE_OPENBLAS) if (ggml_compute_forward_mul_mat_use_blas(node->src[0], node->src[1], node)) { - n_tasks = 1; // TODO: this actually is doing nothing - // the threads are still spinning if (node->src[0]->type != GGML_TYPE_F32) { // here we need memory just for single 2D matrix from src0 cur = ggml_type_size(GGML_TYPE_F32)*(node->src[0]->ne[0]*node->src[0]->ne[1]); @@ -16266,62 +16642,18 @@ struct ggml_cplan ggml_graph_plan(struct ggml_cgraph * cgraph, int n_threads) { #endif if (node->src[1]->type != vec_dot_type) { cur = ggml_type_size(vec_dot_type)*ggml_nelements(node->src[1])/ggml_blck_size(vec_dot_type); - } else { - cur = 0; } - - work_size = MAX(work_size, cur); } break; case GGML_OP_OUT_PROD: { n_tasks = n_threads; - size_t cur = 0; - if (ggml_is_quantized(node->src[0]->type)) { cur = ggml_type_size(GGML_TYPE_F32) * node->src[0]->ne[0] * n_tasks; } - - work_size = MAX(work_size, cur); - } break; - case GGML_OP_SCALE: - { - n_tasks = 1; - } break; - case GGML_OP_SET: - case GGML_OP_CONT: - case GGML_OP_RESHAPE: - case GGML_OP_VIEW: - case GGML_OP_PERMUTE: - case GGML_OP_TRANSPOSE: - case GGML_OP_GET_ROWS: - case GGML_OP_GET_ROWS_BACK: - case GGML_OP_DIAG: - { - n_tasks = 1; - } break; - case GGML_OP_DIAG_MASK_ZERO: - case GGML_OP_DIAG_MASK_INF: - case GGML_OP_SOFT_MAX: - case GGML_OP_SOFT_MAX_BACK: - case GGML_OP_ROPE: - case GGML_OP_ROPE_BACK: - case GGML_OP_ADD_REL_POS: - { - n_tasks = n_threads; - } break; - case GGML_OP_ALIBI: - { - n_tasks = 1; //TODO - } break; - case GGML_OP_CLAMP: - { - n_tasks = 1; //TODO } break; case GGML_OP_CONV_1D: { - n_tasks = n_threads; - GGML_ASSERT(node->src[0]->ne[3] == 1); GGML_ASSERT(node->src[1]->ne[2] == 1); GGML_ASSERT(node->src[1]->ne[3] == 1); @@ -16342,8 +16674,6 @@ struct ggml_cplan ggml_graph_plan(struct ggml_cgraph * cgraph, int n_threads) { UNUSED(ne10); UNUSED(ne11); - size_t cur = 0; - if (node->src[0]->type == GGML_TYPE_F16 && node->src[1]->type == GGML_TYPE_F32) { cur = sizeof(ggml_fp16_t)*(ne0*ne1*ew0); @@ -16353,21 +16683,9 @@ struct ggml_cplan ggml_graph_plan(struct ggml_cgraph * cgraph, int n_threads) { } else { GGML_ASSERT(false); } - - work_size = MAX(work_size, cur); - } break; - case GGML_OP_CONV_1D_STAGE_0: - { - n_tasks = n_threads; - } break; - case GGML_OP_CONV_1D_STAGE_1: - { - n_tasks = n_threads; } break; case GGML_OP_CONV_TRANSPOSE_1D: { - n_tasks = n_threads; - GGML_ASSERT(node->src[0]->ne[3] == 1); GGML_ASSERT(node->src[1]->ne[2] == 1); GGML_ASSERT(node->src[1]->ne[3] == 1); @@ -16379,7 +16697,6 @@ struct ggml_cplan ggml_graph_plan(struct ggml_cgraph * cgraph, int n_threads) { const int64_t ne10 = node->src[1]->ne[0]; // L const int64_t ne11 = node->src[1]->ne[1]; // Cin - size_t cur = 0; if (node->src[0]->type == GGML_TYPE_F16 && node->src[1]->type == GGML_TYPE_F32) { cur += sizeof(ggml_fp16_t)*ne00*ne01*ne02; @@ -16391,13 +16708,9 @@ struct ggml_cplan ggml_graph_plan(struct ggml_cgraph * cgraph, int n_threads) { } else { GGML_ASSERT(false); } - - work_size = MAX(work_size, cur); } break; case GGML_OP_CONV_2D: { - n_tasks = n_threads; - const int64_t ne00 = node->src[0]->ne[0]; // W const int64_t ne01 = node->src[0]->ne[1]; // H const int64_t ne02 = node->src[0]->ne[2]; // C @@ -16417,8 +16730,6 @@ struct ggml_cplan ggml_graph_plan(struct ggml_cgraph * cgraph, int n_threads) { UNUSED(ne03); UNUSED(ne2); - size_t cur = 0; - if (node->src[0]->type == GGML_TYPE_F16 && node->src[1]->type == GGML_TYPE_F32) { // im2col: [N*OH*OW, IC*KH*KW] @@ -16429,21 +16740,9 @@ struct ggml_cplan ggml_graph_plan(struct ggml_cgraph * cgraph, int n_threads) { } else { GGML_ASSERT(false); } - - work_size = MAX(work_size, cur); - } break; - case GGML_OP_CONV_2D_STAGE_0: - { - n_tasks = n_threads; - } break; - case GGML_OP_CONV_2D_STAGE_1: - { - n_tasks = n_threads; } break; case GGML_OP_CONV_TRANSPOSE_2D: { - n_tasks = n_threads; - const int64_t ne00 = node->src[0]->ne[0]; // W const int64_t ne01 = node->src[0]->ne[1]; // H const int64_t ne02 = node->src[0]->ne[2]; // Channels Out @@ -16453,141 +16752,66 @@ struct ggml_cplan ggml_graph_plan(struct ggml_cgraph * cgraph, int n_threads) { const int64_t ne11 = node->src[1]->ne[1]; // H const int64_t ne12 = node->src[1]->ne[2]; // Channels In - size_t cur = 0; cur += sizeof(ggml_fp16_t)*ne00*ne01*ne02*ne03; cur += sizeof(ggml_fp16_t)*ne10*ne11*ne12; - - work_size = MAX(work_size, cur); - } break; - case GGML_OP_POOL_1D: - case GGML_OP_POOL_2D: - { - n_tasks = 1; - } break; - case GGML_OP_UPSCALE: - { - n_tasks = n_threads; } break; case GGML_OP_FLASH_ATTN: { n_tasks = n_threads; - size_t cur = 0; - const int64_t ne11 = ggml_up(node->src[1]->ne[1], GGML_SOFT_MAX_UNROLL); if (node->src[1]->type == GGML_TYPE_F32) { cur = sizeof(float)*ne11*n_tasks; // TODO: this can become (n_tasks-1) cur += sizeof(float)*ne11*n_tasks; // this is overestimated by x2 - } - - if (node->src[1]->type == GGML_TYPE_F16) { + } else if (node->src[1]->type == GGML_TYPE_F16) { cur = sizeof(float)*ne11*n_tasks; // TODO: this can become (n_tasks-1) cur += sizeof(float)*ne11*n_tasks; // this is overestimated by x2 } - - work_size = MAX(work_size, cur); } break; case GGML_OP_FLASH_FF: { n_tasks = n_threads; - size_t cur = 0; - if (node->src[1]->type == GGML_TYPE_F32) { cur = sizeof(float)*node->src[1]->ne[1]*n_tasks; // TODO: this can become (n_tasks-1) cur += sizeof(float)*node->src[1]->ne[1]*n_tasks; // this is overestimated by x2 - } - - if (node->src[1]->type == GGML_TYPE_F16) { + } else if (node->src[1]->type == GGML_TYPE_F16) { cur = sizeof(float)*node->src[1]->ne[1]*n_tasks; // TODO: this can become (n_tasks-1) cur += sizeof(float)*node->src[1]->ne[1]*n_tasks; // this is overestimated by x2 } - - work_size = MAX(work_size, cur); } break; case GGML_OP_FLASH_ATTN_BACK: { n_tasks = n_threads; - size_t cur = 0; - const int64_t D = node->src[0]->ne[0]; const int64_t ne11 = ggml_up(node->src[1]->ne[1], GGML_SOFT_MAX_UNROLL); const int64_t mxDn = MAX(D, ne11) * 2; // *2 because of S and SM in ggml_compute_forward_flash_attn_back if (node->src[1]->type == GGML_TYPE_F32) { cur = sizeof(float)*mxDn*n_tasks; // TODO: this can become (n_tasks-1) cur += sizeof(float)*mxDn*n_tasks; // this is overestimated by x2 - } - - if (node->src[1]->type == GGML_TYPE_F16) { + } else if (node->src[1]->type == GGML_TYPE_F16) { cur = sizeof(float)*mxDn*n_tasks; // TODO: this can become (n_tasks-1) cur += sizeof(float)*mxDn*n_tasks; // this is overestimated by x2 } - - work_size = MAX(work_size, cur); - } break; - case GGML_OP_WIN_PART: - case GGML_OP_WIN_UNPART: - case GGML_OP_GET_REL_POS: - case GGML_OP_MAP_UNARY: - case GGML_OP_MAP_BINARY: - case GGML_OP_MAP_CUSTOM1_F32: - case GGML_OP_MAP_CUSTOM2_F32: - case GGML_OP_MAP_CUSTOM3_F32: - { - n_tasks = 1; - } break; - case GGML_OP_MAP_CUSTOM1: - { - struct ggml_map_custom1_op_params * p = (struct ggml_map_custom1_op_params *) node->op_params; - if (p->n_tasks == GGML_N_TASKS_MAX) { - n_tasks = n_threads; - } else { - n_tasks = MIN(p->n_tasks, n_threads); - } - } break; - case GGML_OP_MAP_CUSTOM2: - { - struct ggml_map_custom2_op_params * p = (struct ggml_map_custom2_op_params *) node->op_params; - if (p->n_tasks == GGML_N_TASKS_MAX) { - n_tasks = n_threads; - } else { - n_tasks = MIN(p->n_tasks, n_threads); - } - } break; - case GGML_OP_MAP_CUSTOM3: - { - struct ggml_map_custom3_op_params * p = (struct ggml_map_custom3_op_params *) node->op_params; - if (p->n_tasks == GGML_N_TASKS_MAX) { - n_tasks = n_threads; - } else { - n_tasks = MIN(p->n_tasks, n_threads); - } } break; + case GGML_OP_CROSS_ENTROPY_LOSS: { n_tasks = n_threads; - size_t cur = ggml_type_size(node->type)*(n_tasks + node->src[0]->ne[0]*n_tasks); - - work_size = MAX(work_size, cur); - } break; - case GGML_OP_CROSS_ENTROPY_LOSS_BACK: - { - n_tasks = n_threads; - } break; - case GGML_OP_NONE: - { - n_tasks = 1; + cur = ggml_type_size(node->type)*(n_tasks + node->src[0]->ne[0]*n_tasks); } break; case GGML_OP_COUNT: { GGML_ASSERT(false); } break; + default: + break; } - cplan.n_tasks[i] = n_tasks; + work_size = MAX(work_size, cur); } if (work_size > 0) { @@ -16609,12 +16833,6 @@ int ggml_graph_compute(struct ggml_cgraph * cgraph, struct ggml_cplan * cplan) { if (cplan->work_size > 0) { GGML_ASSERT(cplan->work_data); } - - for (int i = 0; i < cgraph->n_nodes; ++i) { - if (cgraph->nodes[i]->op != GGML_OP_NONE) { - GGML_ASSERT(cplan->n_tasks[i] > 0); - } - } } const int n_threads = cplan->n_threads; @@ -16687,16 +16905,6 @@ int ggml_graph_compute(struct ggml_cgraph * cgraph, struct ggml_cplan * cplan) { return compute_status; } -void ggml_graph_reset(struct ggml_cgraph * cgraph) { - for (int i = 0; i < cgraph->n_nodes; i++) { - struct ggml_tensor * grad = cgraph->grads[i]; - - if (grad) { - ggml_set_zero(grad); - } - } -} - void ggml_graph_compute_with_ctx(struct ggml_context * ctx, struct ggml_cgraph * cgraph, int n_threads) { struct ggml_cplan cplan = ggml_graph_plan(cgraph, n_threads); @@ -16823,12 +17031,12 @@ void ggml_graph_export(const struct ggml_cgraph * cgraph, const char * fname) { const uint32_t magic = GGML_FILE_MAGIC; const uint32_t version = GGML_FILE_VERSION; const uint32_t n_leafs = cgraph->n_leafs; - const uint32_t nodes = cgraph->n_nodes; + const uint32_t n_nodes = cgraph->n_nodes; fwrite(&magic, sizeof(uint32_t), 1, fout); fwrite(&version, sizeof(uint32_t), 1, fout); fwrite(&n_leafs, sizeof(uint32_t), 1, fout); - fwrite(&nodes, sizeof(uint32_t), 1, fout); + fwrite(&n_nodes, sizeof(uint32_t), 1, fout); fwrite(&size_eval, sizeof(uint64_t), 1, fout); } @@ -16916,7 +17124,7 @@ void ggml_graph_export(const struct ggml_cgraph * cgraph, const char * fname) { if (idx == -1) { for (int k = 0; k < cgraph->n_nodes; ++k) { if (args[j] == cgraph->nodes[k]) { - idx = GGML_MAX_NODES + k; + idx = cgraph->n_leafs + k; break; } } @@ -16943,11 +17151,11 @@ void ggml_graph_export(const struct ggml_cgraph * cgraph, const char * fname) { } } -struct ggml_cgraph ggml_graph_import(const char * fname, struct ggml_context ** ctx_data, struct ggml_context ** ctx_eval) { +struct ggml_cgraph * ggml_graph_import(const char * fname, struct ggml_context ** ctx_data, struct ggml_context ** ctx_eval) { assert(*ctx_data == NULL); assert(*ctx_eval == NULL); - struct ggml_cgraph result = { 0 }; + struct ggml_cgraph * result = NULL; struct ggml_tensor * data = NULL; @@ -17019,13 +17227,11 @@ struct ggml_cgraph ggml_graph_import(const char * fname, struct ggml_context ** const uint32_t n_leafs = *(const uint32_t *) ptr; ptr += sizeof(n_leafs); const uint32_t n_nodes = *(const uint32_t *) ptr; ptr += sizeof(n_nodes); const uint64_t size_eval = *(const uint64_t *) ptr; ptr += sizeof(size_eval); - - result.n_leafs = n_leafs; - result.n_nodes = n_nodes; + const int graph_size = MAX(n_leafs, n_nodes); // create the data context { - const size_t overhead = (n_leafs + n_nodes)*ggml_tensor_overhead(); + const size_t overhead = (n_leafs + n_nodes)*ggml_tensor_overhead() + ggml_graph_overhead_custom(graph_size, false); struct ggml_init_params params = { .mem_size = size_eval + overhead, @@ -17041,6 +17247,12 @@ struct ggml_cgraph ggml_graph_import(const char * fname, struct ggml_context ** } } + result = ggml_new_graph_custom(*ctx_eval, graph_size, false); + + result->n_leafs = n_leafs; + result->n_nodes = n_nodes; + + // leafs { uint32_t type; @@ -17079,7 +17291,7 @@ struct ggml_cgraph ggml_graph_import(const char * fname, struct ggml_context ** tensor->nb[j] = nb[j]; } - result.leafs[i] = tensor; + result->leafs[i] = tensor; ptr += ggml_nbytes(tensor); @@ -17131,10 +17343,10 @@ struct ggml_cgraph ggml_graph_import(const char * fname, struct ggml_context ** continue; } - if (arg_idx < GGML_MAX_NODES) { - args[j] = result.leafs[arg_idx]; + if (arg_idx < result->n_leafs) { + args[j] = result->leafs[arg_idx]; } else { - args[j] = result.nodes[arg_idx - GGML_MAX_NODES]; + args[j] = result->nodes[arg_idx - result->n_leafs]; } } @@ -17186,7 +17398,7 @@ struct ggml_cgraph ggml_graph_import(const char * fname, struct ggml_context ** tensor->src[j] = args[j]; } - result.nodes[i] = tensor; + result->nodes[i] = tensor; fprintf(stderr, "%s: loaded node %d: '%16s', %3d dims, %9zu bytes\n", __func__, i, tensor->name, n_dims, ggml_nbytes(tensor)); } @@ -18091,10 +18303,11 @@ struct ggml_opt_params ggml_opt_default_params(enum ggml_opt_type type) { case GGML_OPT_ADAM: { result = (struct ggml_opt_params) { - .type = GGML_OPT_ADAM, - .n_threads = 1, - .past = 0, - .delta = 1e-5f, + .type = GGML_OPT_ADAM, + .graph_size = GGML_DEFAULT_GRAPH_SIZE, + .n_threads = 1, // FIXME: GGML_DEFAULT_N_THREADS ? + .past = 0, + .delta = 1e-5f, .max_no_improvement = 100, @@ -18121,10 +18334,11 @@ struct ggml_opt_params ggml_opt_default_params(enum ggml_opt_type type) { case GGML_OPT_LBFGS: { result = (struct ggml_opt_params) { - .type = GGML_OPT_LBFGS, - .n_threads = 1, - .past = 0, - .delta = 1e-5f, + .type = GGML_OPT_LBFGS, + .graph_size = GGML_DEFAULT_GRAPH_SIZE, + .n_threads = 1, + .past = 0, + .delta = 1e-5f, .max_no_improvement = 0, @@ -18266,14 +18480,11 @@ enum ggml_opt_result ggml_opt_resume( struct ggml_tensor * f) { // build forward + backward compute graphs - struct ggml_tensor * gfbuf = ggml_new_tensor_1d(ctx, GGML_TYPE_I32, sizeof(struct ggml_cgraph) / ggml_type_size(GGML_TYPE_I32)+ (sizeof(struct ggml_cgraph) % ggml_type_size(GGML_TYPE_I32) ? 1 : 0)); - struct ggml_tensor * gbbuf = ggml_new_tensor_1d(ctx, GGML_TYPE_I32, sizeof(struct ggml_cgraph) / ggml_type_size(GGML_TYPE_I32)+ (sizeof(struct ggml_cgraph) % ggml_type_size(GGML_TYPE_I32) ? 1 : 0)); - - struct ggml_cgraph * gf = (struct ggml_cgraph *) gfbuf->data; - struct ggml_cgraph * gb = (struct ggml_cgraph *) gbbuf->data; + struct ggml_cgraph * gf = ggml_new_graph_custom(ctx, opt->params.graph_size, true); + ggml_build_forward_expand(gf, f); - *gf = ggml_build_forward (f); - *gb = ggml_build_backward(ctx, gf, true); + struct ggml_cgraph * gb = ggml_graph_dup(ctx, gf); + ggml_build_backward_expand(ctx, gf, gb, true); return ggml_opt_resume_g(ctx, opt, f, gf, gb, NULL, NULL); } diff --git a/ggml.h b/ggml.h index 26654fc8ecdc84..0118c99dbafddd 100644 --- a/ggml.h +++ b/ggml.h @@ -58,7 +58,8 @@ // { // ... // -// struct ggml_cgraph gf = ggml_build_forward(f); +// struct ggml_cgraph * gf = ggml_new_graph(ctx); +// ggml_build_forward_expand(gf, f); // // // set the input variable and parameter values // ggml_set_f32(x, 2.0f); @@ -213,15 +214,14 @@ #define GGML_QNT_VERSION 2 // bump this on quantization format changes #define GGML_QNT_VERSION_FACTOR 1000 // do not change this -#define GGML_MAX_DIMS 4 -#define GGML_MAX_NODES 16384 -#define GGML_MAX_PARAMS 1024 -#define GGML_MAX_CONTEXTS 64 -#define GGML_MAX_SRC 6 -#define GGML_MAX_NAME 64 -#define GGML_MAX_OP_PARAMS 64 -#define GGML_DEFAULT_N_THREADS 4 - +#define GGML_MAX_DIMS 4 +#define GGML_MAX_PARAMS 1024 +#define GGML_MAX_CONTEXTS 64 +#define GGML_MAX_SRC 6 +#define GGML_MAX_NAME 64 +#define GGML_MAX_OP_PARAMS 64 +#define GGML_DEFAULT_N_THREADS 4 +#define GGML_DEFAULT_GRAPH_SIZE 2048 #if UINTPTR_MAX == 0xFFFFFFFF #define GGML_MEM_ALIGN 4 #else @@ -245,7 +245,10 @@ do { \ if (!(x)) { \ fprintf(stderr, "GGML_ASSERT: %s:%d: %s\n", __FILE__, __LINE__, #x); \ - abort(); \ + fflush(stderr); \ + fflush(stdout); \ + ggml_print_backtrace(); \ + exit(1); \ } \ } while (0) @@ -451,6 +454,7 @@ extern "C" { GGML_UNARY_OP_GELU, GGML_UNARY_OP_GELU_QUICK, GGML_UNARY_OP_SILU, + GGML_UNARY_OP_LEAKY }; enum ggml_object_type { @@ -531,37 +535,33 @@ extern "C" { int n_threads; - // the `n_tasks` of nodes, 1:1 mapping to cgraph nodes - int n_tasks[GGML_MAX_NODES]; - // abort ggml_graph_compute when true bool (*abort_callback)(void * data); void * abort_callback_data; }; - // next prime after GGML_MAX_NODES - // #define GGML_GRAPH_HASHTABLE_SIZE 4099 - // next prime after GGML_MAX_NODES * 2 (nodes + leafs) - // #define GGML_GRAPH_HASHTABLE_SIZE 8273 - // #define GGML_GRAPH_HASHTABLE_SIZE 16411 - #define GGML_GRAPH_HASHTABLE_SIZE 32771 - enum ggml_cgraph_eval_order { GGML_CGRAPH_EVAL_ORDER_LEFT_TO_RIGHT = 0, GGML_CGRAPH_EVAL_ORDER_RIGHT_TO_LEFT, GGML_CGRAPH_EVAL_ORDER_COUNT }; + struct ggml_hash_set { + size_t size; + struct ggml_tensor ** keys; + }; + // computation graph struct ggml_cgraph { + int size; int n_nodes; int n_leafs; - struct ggml_tensor * nodes[GGML_MAX_NODES]; - struct ggml_tensor * grads[GGML_MAX_NODES]; - struct ggml_tensor * leafs[GGML_MAX_NODES]; + struct ggml_tensor ** nodes; + struct ggml_tensor ** grads; + struct ggml_tensor ** leafs; - void * visited_hash_table[GGML_GRAPH_HASHTABLE_SIZE]; + struct ggml_hash_set visited_hash_table; enum ggml_cgraph_eval_order order; @@ -571,8 +571,6 @@ extern "C" { int64_t perf_time_us; }; - static const size_t GGML_GRAPH_SIZE = sizeof(struct ggml_cgraph); - // scratch buffer struct ggml_scratch { size_t offs; @@ -617,6 +615,8 @@ extern "C" { GGML_API int64_t ggml_cycles(void); GGML_API int64_t ggml_cycles_per_ms(void); + GGML_API void ggml_print_backtrace(void); + GGML_API void ggml_numa_init(void); // call once for better performance on NUMA systems GGML_API bool ggml_is_numa(void); // true if init detected that system has >1 NUMA node @@ -709,7 +709,7 @@ extern "C" { // Context tensor enumeration and lookup GGML_API struct ggml_tensor * ggml_get_first_tensor(struct ggml_context * ctx); GGML_API struct ggml_tensor * ggml_get_next_tensor (struct ggml_context * ctx, struct ggml_tensor * tensor); - GGML_API struct ggml_tensor * ggml_get_tensor (struct ggml_context * ctx, const char * name); + GGML_API struct ggml_tensor * ggml_get_tensor(struct ggml_context * ctx, const char * name); GGML_API struct ggml_tensor * ggml_set_zero(struct ggml_tensor * tensor); GGML_API struct ggml_tensor * ggml_set_i32 (struct ggml_tensor * tensor, int32_t value); @@ -943,6 +943,10 @@ extern "C" { struct ggml_context * ctx, struct ggml_tensor * a); + GGML_API struct ggml_tensor * ggml_leaky( + struct ggml_context * ctx, + struct ggml_tensor * a); + GGML_API struct ggml_tensor * ggml_relu_inplace( struct ggml_context * ctx, struct ggml_tensor * a); @@ -1482,6 +1486,8 @@ extern "C" { int s0, // stride int p0); // padding + // the result will have 2*p0 padding for the first dimension + // and 2*p1 padding for the second dimension GGML_API struct ggml_tensor * ggml_pool_2d( struct ggml_context * ctx, struct ggml_tensor * a, @@ -1490,8 +1496,8 @@ extern "C" { int k1, int s0, int s1, - int p0, - int p1); + float p0, + float p1); // nearest interpolate // used in stable-diffusion @@ -1732,19 +1738,22 @@ extern "C" { GGML_API void ggml_build_forward_expand (struct ggml_cgraph * cgraph, struct ggml_tensor * tensor); GGML_API void ggml_build_backward_expand(struct ggml_context * ctx, struct ggml_cgraph * gf, struct ggml_cgraph * gb, bool keep); - GGML_API struct ggml_cgraph ggml_build_forward (struct ggml_tensor * tensor); - GGML_API struct ggml_cgraph ggml_build_backward(struct ggml_context * ctx, struct ggml_cgraph * gf, bool keep); - // graph allocation in a context - GGML_API struct ggml_cgraph * ggml_new_graph (struct ggml_context * ctx); - GGML_API struct ggml_cgraph * ggml_build_forward_ctx(struct ggml_context * ctx, struct ggml_tensor * tensor); + GGML_API struct ggml_cgraph * ggml_new_graph (struct ggml_context * ctx); // size = GGML_DEFAULT_GRAPH_SIZE, grads = false + GGML_API struct ggml_cgraph * ggml_new_graph_custom (struct ggml_context * ctx, size_t size, bool grads); + GGML_API struct ggml_cgraph * ggml_graph_dup (struct ggml_context * ctx, struct ggml_cgraph * cgraph); + GGML_API struct ggml_cgraph * ggml_graph_view (struct ggml_context * ctx, struct ggml_cgraph * cgraph, int i0, int i1); + GGML_API void ggml_graph_cpy (struct ggml_cgraph * src, struct ggml_cgraph * dst); + GGML_API void ggml_graph_reset (struct ggml_cgraph * cgraph); // zero grads + GGML_API void ggml_graph_clear (struct ggml_cgraph * cgraph); + GGML_API size_t ggml_graph_overhead(void); + GGML_API size_t ggml_graph_overhead_custom(size_t size, bool grads); // ggml_graph_plan() has to be called before ggml_graph_compute() // when plan.work_size > 0, caller must allocate memory for plan.work_data GGML_API struct ggml_cplan ggml_graph_plan (struct ggml_cgraph * cgraph, int n_threads /*= GGML_DEFAULT_N_THREADS*/); - GGML_API int ggml_graph_compute(struct ggml_cgraph * cgraph, struct ggml_cplan * cplan); - GGML_API void ggml_graph_reset (struct ggml_cgraph * cgraph); + GGML_API int ggml_graph_compute(struct ggml_cgraph * cgraph, struct ggml_cplan * cplan); // same as ggml_graph_compute() but the work data is allocated as a part of the context // note: the drawback of this API is that you must have ensured that the context has enough memory for the work data @@ -1752,8 +1761,8 @@ extern "C" { GGML_API struct ggml_tensor * ggml_graph_get_tensor(struct ggml_cgraph * cgraph, const char * name); - GGML_API void ggml_graph_export(const struct ggml_cgraph * cgraph, const char * fname); - GGML_API struct ggml_cgraph ggml_graph_import(const char * fname, struct ggml_context ** ctx_data, struct ggml_context ** ctx_eval); + GGML_API void ggml_graph_export(const struct ggml_cgraph * cgraph, const char * fname); + GGML_API struct ggml_cgraph * ggml_graph_import(const char * fname, struct ggml_context ** ctx_data, struct ggml_context ** ctx_eval); // print info and performance information for the graph GGML_API void ggml_graph_print(const struct ggml_cgraph * cgraph); @@ -1816,6 +1825,8 @@ extern "C" { struct ggml_opt_params { enum ggml_opt_type type; + size_t graph_size; + int n_threads; // delta-based convergence test diff --git a/llama.cpp b/llama.cpp index a5f3876cc19e0c..76ee4ea2300e86 100644 --- a/llama.cpp +++ b/llama.cpp @@ -91,6 +91,8 @@ #define LLAMA_ATTRIBUTE_FORMAT(...) #endif +#define LLAMA_MAX_NODES 4096 + // // logging // @@ -3618,7 +3620,7 @@ struct llm_build_context { } struct ggml_cgraph * build_llama() { - struct ggml_cgraph * gf = ggml_new_graph(ctx0); + struct ggml_cgraph * gf = ggml_new_graph_custom(ctx0, LLAMA_MAX_NODES, false); GGML_ASSERT(n_embd_head == hparams.n_rot); @@ -3730,7 +3732,7 @@ struct llm_build_context { } struct ggml_cgraph * build_baichuan() { - struct ggml_cgraph * gf = ggml_new_graph(ctx0); + struct ggml_cgraph * gf = ggml_new_graph_custom(ctx0, LLAMA_MAX_NODES, false); struct ggml_tensor * cur; struct ggml_tensor * inpL; @@ -3850,7 +3852,7 @@ struct llm_build_context { } struct ggml_cgraph * build_falcon() { - struct ggml_cgraph * gf = ggml_new_graph(ctx0); + struct ggml_cgraph * gf = ggml_new_graph_custom(ctx0, LLAMA_MAX_NODES, false); struct ggml_tensor * cur; struct ggml_tensor * inpL; @@ -3972,7 +3974,7 @@ struct llm_build_context { } struct ggml_cgraph * build_starcoder() { - struct ggml_cgraph * gf = ggml_new_graph(ctx0); + struct ggml_cgraph * gf = ggml_new_graph_custom(ctx0, LLAMA_MAX_NODES, false); struct ggml_tensor * cur; struct ggml_tensor * pos; @@ -4071,7 +4073,7 @@ struct llm_build_context { } struct ggml_cgraph * build_persimmon() { - struct ggml_cgraph * gf = ggml_new_graph(ctx0); + struct ggml_cgraph * gf = ggml_new_graph_custom(ctx0, LLAMA_MAX_NODES, false); const int64_t n_rot = n_embd_head / 2; @@ -4281,7 +4283,7 @@ struct llm_build_context { } struct ggml_cgraph * build_refact() { - struct ggml_cgraph * gf = ggml_new_graph(ctx0); + struct ggml_cgraph * gf = ggml_new_graph_custom(ctx0, LLAMA_MAX_NODES, false); struct ggml_tensor * cur; struct ggml_tensor * inpL; @@ -4372,7 +4374,7 @@ struct llm_build_context { } struct ggml_cgraph * build_bloom() { - struct ggml_cgraph * gf = ggml_new_graph(ctx0); + struct ggml_cgraph * gf = ggml_new_graph_custom(ctx0, LLAMA_MAX_NODES, false); struct ggml_tensor * cur; struct ggml_tensor * inpL; @@ -4466,7 +4468,7 @@ struct llm_build_context { } struct ggml_cgraph * build_mpt() { - struct ggml_cgraph * gf = ggml_new_graph(ctx0); + struct ggml_cgraph * gf = ggml_new_graph_custom(ctx0, LLAMA_MAX_NODES, false); struct ggml_tensor * cur; struct ggml_tensor * inpL; @@ -8208,7 +8210,7 @@ struct llama_context * llama_new_context_with_model( { static const size_t tensor_alignment = 32; // the compute buffer is used to store the tensor and graph structs, while the allocator buffer is used for the tensor data - ctx->buf_compute.resize(ggml_tensor_overhead()*GGML_MAX_NODES + ggml_graph_overhead()); + ctx->buf_compute.resize(ggml_tensor_overhead()*LLAMA_MAX_NODES + ggml_graph_overhead()); // create measure allocator ctx->alloc = ggml_allocr_new_measure(tensor_alignment); @@ -8597,8 +8599,8 @@ static void llama_copy_state_data_internal(struct llama_context * ctx, llama_dat if (kv_buf_size) { const size_t elt_size = ggml_element_size(kv_self.k); - ggml_context * cpy_ctx = ggml_init({ 4096, NULL, /* no_alloc */ true }); - ggml_cgraph gf{}; + ggml_context * cpy_ctx = ggml_init({ 6*ggml_tensor_overhead() + ggml_graph_overhead(), NULL, /* no_alloc */ true }); + ggml_cgraph * gf = ggml_new_graph(cpy_ctx); ggml_tensor * kout3d = ggml_new_tensor_3d(cpy_ctx, kv_self.k->type, n_embd, kv_head, n_layer); std::vector kout3d_data(ggml_nbytes(kout3d), 0); @@ -8616,9 +8618,9 @@ static void llama_copy_state_data_internal(struct llama_context * ctx, llama_dat kv_head, n_embd, n_layer, elt_size*n_ctx, elt_size*n_ctx*n_embd, 0); - ggml_build_forward_expand(&gf, ggml_cpy(cpy_ctx, k3d, kout3d)); - ggml_build_forward_expand(&gf, ggml_cpy(cpy_ctx, v3d, vout3d)); - ggml_graph_compute_helper(ctx->work_buffer, &gf, /*n_threads*/ 1); + ggml_build_forward_expand(gf, ggml_cpy(cpy_ctx, k3d, kout3d)); + ggml_build_forward_expand(gf, ggml_cpy(cpy_ctx, v3d, vout3d)); + ggml_graph_compute_helper(ctx->work_buffer, gf, /*n_threads*/ 1); ggml_free(cpy_ctx); @@ -8725,8 +8727,8 @@ size_t llama_set_state_data(struct llama_context * ctx, uint8_t * src) { const size_t elt_size = ggml_element_size(kv_self.k); - ggml_context * cpy_ctx = ggml_init({ 4096, NULL, /* no_alloc */ true }); - ggml_cgraph gf{}; + ggml_context * cpy_ctx = ggml_init({ 6*ggml_tensor_overhead() + ggml_graph_overhead(), NULL, /* no_alloc */ true }); + ggml_cgraph * gf = ggml_new_graph(cpy_ctx); ggml_tensor * kin3d = ggml_new_tensor_3d(cpy_ctx, kv_self.k->type, n_embd, kv_head, n_layer); kin3d->data = (void *) inp; @@ -8744,9 +8746,9 @@ size_t llama_set_state_data(struct llama_context * ctx, uint8_t * src) { kv_head, n_embd, n_layer, elt_size*n_ctx, elt_size*n_ctx*n_embd, 0); - ggml_build_forward_expand(&gf, ggml_cpy(cpy_ctx, kin3d, k3d)); - ggml_build_forward_expand(&gf, ggml_cpy(cpy_ctx, vin3d, v3d)); - ggml_graph_compute_helper(ctx->work_buffer, &gf, /*n_threads*/ 1); + ggml_build_forward_expand(gf, ggml_cpy(cpy_ctx, kin3d, k3d)); + ggml_build_forward_expand(gf, ggml_cpy(cpy_ctx, vin3d, v3d)); + ggml_graph_compute_helper(ctx->work_buffer, gf, /*n_threads*/ 1); ggml_free(cpy_ctx); } diff --git a/scripts/sync-ggml.sh b/scripts/sync-ggml.sh index 4311268bd2d176..4024531b10f702 100755 --- a/scripts/sync-ggml.sh +++ b/scripts/sync-ggml.sh @@ -2,14 +2,20 @@ cp -rpv ../ggml/src/ggml.c ./ggml.c cp -rpv ../ggml/src/ggml-alloc.c ./ggml-alloc.c +cp -rpv ../ggml/src/ggml-backend-impl.h ./ggml-backend-impl.h cp -rpv ../ggml/src/ggml-backend.c ./ggml-backend.c -cp -rpv ../ggml/src/ggml-cuda.h ./ggml-cuda.h cp -rpv ../ggml/src/ggml-cuda.cu ./ggml-cuda.cu -cp -rpv ../ggml/src/ggml-opencl.h ./ggml-opencl.h -cp -rpv ../ggml/src/ggml-opencl.cpp ./ggml-opencl.cpp +cp -rpv ../ggml/src/ggml-cuda.h ./ggml-cuda.h +cp -rpv ../ggml/src/ggml-impl.h ./ggml-impl.h cp -rpv ../ggml/src/ggml-metal.h ./ggml-metal.h cp -rpv ../ggml/src/ggml-metal.m ./ggml-metal.m cp -rpv ../ggml/src/ggml-metal.metal ./ggml-metal.metal +cp -rpv ../ggml/src/ggml-mpi.h ./ggml-mpi.h +cp -rpv ../ggml/src/ggml-mpi.c ./ggml-mpi.c +cp -rpv ../ggml/src/ggml-opencl.cpp ./ggml-opencl.cpp +cp -rpv ../ggml/src/ggml-opencl.h ./ggml-opencl.h +cp -rpv ../ggml/src/ggml-quants.c ./ggml-quants.c +cp -rpv ../ggml/src/ggml-quants.h ./ggml-quants.h cp -rpv ../ggml/include/ggml/ggml.h ./ggml.h cp -rpv ../ggml/include/ggml/ggml-alloc.h ./ggml-alloc.h cp -rpv ../ggml/include/ggml/ggml-backend.h ./ggml-backend.h diff --git a/tests/test-grad0.cpp b/tests/test-grad0.cpp index 0a559b27ab370e..7fe9154ddbb166 100644 --- a/tests/test-grad0.cpp +++ b/tests/test-grad0.cpp @@ -231,9 +231,10 @@ static bool check_gradient( printf("GGML_N_THREADS = %d\n", n_threads); } - struct ggml_cgraph * gf = ggml_build_forward_ctx(ctx0, f); - struct ggml_cgraph * gb = ggml_new_graph(ctx0); - *gb = *gf; + struct ggml_cgraph * gf = ggml_new_graph_custom(ctx0, GGML_DEFAULT_GRAPH_SIZE, true); + struct ggml_cgraph * gb = ggml_new_graph_custom(ctx0, GGML_DEFAULT_GRAPH_SIZE, true); + ggml_build_forward_expand(gf, f); + ggml_graph_cpy(gf, gb); ggml_build_backward_expand(ctx0, gf, gb, false); ggml_graph_compute_with_ctx(ctx0, gf, n_threads); diff --git a/tests/test-opt.cpp b/tests/test-opt.cpp index bb8af59620b146..2c9997fca77050 100644 --- a/tests/test-opt.cpp +++ b/tests/test-opt.cpp @@ -109,10 +109,11 @@ int main(void) { struct ggml_tensor * d = ggml_sub(ctx, c, ab); struct ggml_tensor * e = ggml_sum(ctx, ggml_sqr(ctx, d)); - struct ggml_cgraph ge = ggml_build_forward(e); - ggml_graph_reset(&ge); + struct ggml_cgraph * ge = ggml_new_graph_custom(ctx, GGML_DEFAULT_GRAPH_SIZE, true); + ggml_build_forward_expand(ge, e); + ggml_graph_reset(ge); - ggml_graph_compute_with_ctx(ctx, &ge, /*n_threads*/ 1); + ggml_graph_compute_with_ctx(ctx, ge, /*n_threads*/ 1); const float fe = ggml_get_f32_1d(e, 0); printf("%s: e = %.4f\n", __func__, fe); @@ -121,9 +122,9 @@ int main(void) { ggml_opt(ctx, opt_params, e); - ggml_graph_reset(&ge); + ggml_graph_reset(ge); - ggml_graph_compute_with_ctx(ctx, &ge, /*n_threads*/ 1); + ggml_graph_compute_with_ctx(ctx, ge, /*n_threads*/ 1); const float fe_opt = ggml_get_f32_1d(e, 0); printf("%s: original e = %.4f\n", __func__, fe); From c049b37d7baf558944501705b91ac89b26ee3e41 Mon Sep 17 00:00:00 2001 From: Georgi Gerganov Date: Mon, 13 Nov 2023 14:18:08 +0200 Subject: [PATCH 089/206] readme : update hot topics --- README.md | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/README.md b/README.md index af39e8c0e386ea..c7d23277845bc3 100644 --- a/README.md +++ b/README.md @@ -10,7 +10,7 @@ Inference of [LLaMA](https://arxiv.org/abs/2302.13971) model in pure C/C++ ### Hot topics -- ⚠️ **Upcoming change that might break functionality. Help with testing is needed:** https://github.com/ggerganov/llama.cpp/pull/3912 +- *No hot topics atm. Open to suggestions about what is hot today* ---- From 3d68f364f15778dc326f5024f2e5af1ad6dfddef Mon Sep 17 00:00:00 2001 From: Georgi Gerganov Date: Mon, 13 Nov 2023 16:55:52 +0200 Subject: [PATCH 090/206] ggml : sync (im2col, GPU conv, 32-bit arm compat) (#4060) ggml-ci --- ggml-cuda.cu | 106 +++- ggml-impl.h | 6 - ggml-metal.h | 2 +- ggml-metal.m | 106 +++- ggml-metal.metal | 108 +++- ggml-quants.c | 241 ++++++--- ggml.c | 1287 ++++++++-------------------------------------- ggml.h | 19 +- 8 files changed, 693 insertions(+), 1182 deletions(-) diff --git a/ggml-cuda.cu b/ggml-cuda.cu index 1634024466542a..7be63925f4edad 100644 --- a/ggml-cuda.cu +++ b/ggml-cuda.cu @@ -4489,6 +4489,13 @@ static __device__ void cpy_1_f32_f16(const char * cxi, char * cdsti) { *dsti = __float2half(*xi); } +static __device__ void cpy_1_f16_f16(const char * cxi, char * cdsti) { + const half * xi = (const half *) cxi; + half * dsti = (half *) cdsti; + + *dsti = *xi; +} + template static __global__ void cpy_f32_f16(const char * cx, char * cdst, const int ne, const int ne00, const int ne01, const int nb00, const int nb01, const int nb02, @@ -4742,6 +4749,25 @@ static __global__ void clamp_f32(const float * x, float * dst, const float min, dst[i] = x[i] < min ? min : (x[i] > max ? max : x[i]); } +static __global__ void im2col_f32_f16( + const float * x, half * dst, + int ofs0, int ofs1, int IW, int IH, int CHW, + int s0, int s1, int p0, int p1, int d0, int d1) { + const int iiw = blockIdx.z * s0 + threadIdx.z * d0 - p0; + const int iih = blockIdx.y * s1 + threadIdx.y * d1 - p1; + + const int offset_dst = + (threadIdx.x * gridDim.y * gridDim.z + blockIdx.y * gridDim.z + blockIdx.z) * CHW + + (blockIdx.x * (blockDim.y * blockDim.z) + threadIdx.y * blockDim.z + threadIdx.z); + + if (iih < 0 || iih >= IH || iiw < 0 || iiw >= IW) { + dst[offset_dst] = __float2half(0.0f); + } else { + const int offset_src = threadIdx.x * ofs0 + blockIdx.x * ofs1; + dst[offset_dst] = __float2half(x[offset_src + iih * IW + iiw]); + } +} + template static void get_rows_cuda(const void * x, const int32_t * y, float * dst, const int nrows, const int ncols, cudaStream_t stream) { const dim3 block_dims(CUDA_GET_ROWS_BLOCK_SIZE, 1, 1); @@ -5642,6 +5668,16 @@ static void ggml_cpy_f32_f16_cuda( (cx, cdst, ne, ne00, ne01, nb00, nb01, nb02, ne10, ne11, nb10, nb11, nb12); } +static void ggml_cpy_f16_f16_cuda( + const char * cx, char * cdst, const int ne, + const int ne00, const int ne01, const int nb00, const int nb01, const int nb02, + const int ne10, const int ne11, const int nb10, const int nb11, const int nb12, cudaStream_t stream) { + + const int num_blocks = (ne + CUDA_CPY_BLOCK_SIZE - 1) / CUDA_CPY_BLOCK_SIZE; + cpy_f32_f16<<>> + (cx, cdst, ne, ne00, ne01, nb00, nb01, nb02, ne10, ne11, nb10, nb11, nb12); +} + static void scale_f32_cuda(const float * x, float * dst, const float scale, const int k, cudaStream_t stream) { const int num_blocks = (k + CUDA_SCALE_BLOCK_SIZE - 1) / CUDA_SCALE_BLOCK_SIZE; scale_f32<<>>(x, dst, scale, k); @@ -5725,6 +5761,15 @@ static void soft_max_f32_cuda(const float * x, float * dst, const int ncols_x, c soft_max_f32<<>>(x, dst, ncols_x); } +static void im2col_f32_f16_cuda(const float * x, half * dst, + int OH, int IW, int IH, int OW, int IC, + int KH, int KW, int N, int ofs0, int ofs1, + int s0, int s1, int p0, int p1, int d0, int d1, cudaStream_t stream) { + dim3 block_nums(IC, OH, OW); + dim3 block_dims(N, KH, KW); + im2col_f32_f16<<>>(x, dst, ofs0, ofs1, IW, IH, (IC * KH * KW), s0, s1, p0, p1, d0, d1); +} + // buffer pool for cuda #define MAX_CUDA_BUFFERS 256 @@ -6522,8 +6567,7 @@ inline void ggml_cuda_op_mul_mat_cublas( src1_as_f16 = (half *) ggml_cuda_pool_malloc(ne * sizeof(half), &src1_as); to_fp16_cuda(src1_ddf_i, src1_as_f16, ne, stream); } - const half * src1_ptr = src1->type == GGML_TYPE_F16 ? (const half *) src1_ddq_i : src1_as_f16; - + const half * src1_ptr = src1->type == GGML_TYPE_F16 ? (const half *) src1_ddf_i : src1_as_f16; size_t dst_as = 0; half * dst_f16 = (half *) ggml_cuda_pool_malloc(row_diff*src1_ncols * sizeof(half), &dst_as); @@ -6698,6 +6742,45 @@ inline void ggml_cuda_op_alibi( (void) src1_dd; } +inline void ggml_cuda_op_im2col( + const ggml_tensor * src0, const ggml_tensor * src1, ggml_tensor * dst, + const float * src0_dd, const float * src1_dd, float * dst_dd, const cudaStream_t & main_stream) { + + GGML_ASSERT(src0->type == GGML_TYPE_F16); + GGML_ASSERT(src1->type == GGML_TYPE_F32); + GGML_ASSERT( dst->type == GGML_TYPE_F16); + + const int32_t s0 = ((const int32_t*)(dst->op_params))[0]; + const int32_t s1 = ((const int32_t*)(dst->op_params))[1]; + const int32_t p0 = ((const int32_t*)(dst->op_params))[2]; + const int32_t p1 = ((const int32_t*)(dst->op_params))[3]; + const int32_t d0 = ((const int32_t*)(dst->op_params))[4]; + const int32_t d1 = ((const int32_t*)(dst->op_params))[5]; + + const bool is_2D = ((const int32_t*)(dst->op_params))[6] == 1; + + const int64_t N = src1->ne[is_2D ? 3 : 2]; + const int64_t IC = src1->ne[is_2D ? 2 : 1]; + const int64_t IH = is_2D ? src1->ne[1] : 1; + const int64_t IW = src1->ne[0]; + + const int64_t KH = is_2D ? src0->ne[1] : 1; + const int64_t KW = src0->ne[0]; + + const int64_t OH = is_2D ? dst->ne[2] : 1; + const int64_t OW = dst->ne[1]; + + const size_t ofs0 = src1->nb[is_2D ? 3 : 2] / 4; // nb is byte offset, src is type float32 + const size_t ofs1 = src1->nb[is_2D ? 2 : 1] / 4; // nb is byte offset, src is type float32 + + im2col_f32_f16_cuda(src1_dd, (half*) dst_dd, + OH, IW, IH, OW, IC, KH, KW, N, + ofs0, ofs1, s0, s1, p0, p1, d0, d1, main_stream); + + (void) src0; + (void) src0_dd; +} + inline void ggml_cuda_op_diag_mask_inf( const ggml_tensor * src0, const ggml_tensor * src1, ggml_tensor * dst, const float * src0_dd, const float * src1_dd, float * dst_dd, const cudaStream_t & main_stream) { @@ -7610,6 +7693,9 @@ static void ggml_cuda_cpy(const ggml_tensor * src0, const ggml_tensor * src1, gg } else if (src0->type == GGML_TYPE_F32 && src1->type == GGML_TYPE_F16) { ggml_cpy_f32_f16_cuda(src0_ddc, src1_ddc, ne, ne00, ne01, nb00, nb01, nb02, ne10, ne11, nb10, nb11, nb12, main_stream); + } else if (src0->type == GGML_TYPE_F16 && src1->type == GGML_TYPE_F16) { + ggml_cpy_f16_f16_cuda(src0_ddc, src1_ddc, ne, ne00, ne01, nb00, nb01, nb02, + ne10, ne11, nb10, nb11, nb12, main_stream); } else { fprintf(stderr, "%s: unsupported type combination (%s to %s)\n", __func__, ggml_type_name(src0->type), ggml_type_name(src1->type)); @@ -7641,6 +7727,10 @@ static void ggml_cuda_alibi(const ggml_tensor * src0, const ggml_tensor * src1, ggml_cuda_op_flatten(src0, src1, dst, ggml_cuda_op_alibi); } +void ggml_cuda_im2col(const ggml_tensor * src0, const ggml_tensor * src1, ggml_tensor * dst) { + ggml_cuda_op_flatten(src0, src1, dst, ggml_cuda_op_im2col); +} + static void ggml_cuda_nop(const ggml_tensor * src0, const ggml_tensor * src1, ggml_tensor * dst) { (void) src0; (void) src1; @@ -7934,6 +8024,15 @@ bool ggml_cuda_compute_forward(struct ggml_compute_params * params, struct ggml_ return false; } + if (tensor->op == GGML_OP_MUL_MAT) { + if (tensor->src[0]->ne[3] != tensor->src[1]->ne[3]) { +#ifndef NDEBUG + fprintf(stderr, "%s: cannot compute %s: src0->ne[3] = %d, src1->ne[3] = %d - fallback to CPU\n", __func__, tensor->name, tensor->src[0]->ne[3], tensor->src[1]->ne[3]); +#endif + return false; + } + } + switch (tensor->op) { case GGML_OP_REPEAT: func = ggml_cuda_repeat; @@ -8012,6 +8111,9 @@ bool ggml_cuda_compute_forward(struct ggml_compute_params * params, struct ggml_ case GGML_OP_ALIBI: func = ggml_cuda_alibi; break; + case GGML_OP_IM2COL: + func = ggml_cuda_im2col; + break; default: return false; } diff --git a/ggml-impl.h b/ggml-impl.h index d88f261449f058..06c07339e92699 100644 --- a/ggml-impl.h +++ b/ggml-impl.h @@ -39,12 +39,6 @@ extern "C" { #endif #endif -#undef MIN -#undef MAX - -#define MIN(a, b) ((a) < (b) ? (a) : (b)) -#define MAX(a, b) ((a) > (b) ? (a) : (b)) - // 16-bit float // on Arm, we use __fp16 // on x86, we use uint16_t diff --git a/ggml-metal.h b/ggml-metal.h index 096b844e32c6fe..be2731f8ba4767 100644 --- a/ggml-metal.h +++ b/ggml-metal.h @@ -26,7 +26,7 @@ #include // max memory buffers that can be mapped to the device -#define GGML_METAL_MAX_BUFFERS 16 +#define GGML_METAL_MAX_BUFFERS 64 #define GGML_METAL_MAX_COMMAND_BUFFERS 32 struct ggml_tensor; diff --git a/ggml-metal.m b/ggml-metal.m index c2cda0bf546d30..3d22b0b27e444f 100644 --- a/ggml-metal.m +++ b/ggml-metal.m @@ -86,6 +86,7 @@ GGML_METAL_DECL_KERNEL(rms_norm); GGML_METAL_DECL_KERNEL(norm); GGML_METAL_DECL_KERNEL(mul_mv_f32_f32); + GGML_METAL_DECL_KERNEL(mul_mv_f16_f16); GGML_METAL_DECL_KERNEL(mul_mv_f16_f32); GGML_METAL_DECL_KERNEL(mul_mv_f16_f32_1row); GGML_METAL_DECL_KERNEL(mul_mv_f16_f32_l4); @@ -114,6 +115,7 @@ GGML_METAL_DECL_KERNEL(rope_f32); GGML_METAL_DECL_KERNEL(rope_f16); GGML_METAL_DECL_KERNEL(alibi_f32); + GGML_METAL_DECL_KERNEL(im2col_f16); GGML_METAL_DECL_KERNEL(cpy_f32_f16); GGML_METAL_DECL_KERNEL(cpy_f32_f32); GGML_METAL_DECL_KERNEL(cpy_f16_f16); @@ -126,7 +128,7 @@ // MSL code // TODO: move the contents here when ready // for now it is easier to work in a separate file -static NSString * const msl_library_source = @"see metal.metal"; +//static NSString * const msl_library_source = @"see metal.metal"; // Here to assist with NSBundle Path Hack @interface GGMLMetalClass : NSObject @@ -142,7 +144,8 @@ void ggml_metal_log_set_callback(ggml_log_callback log_callback, void * user_dat ggml_metal_log_user_data = user_data; } -static void ggml_metal_log(enum ggml_log_level level, const char* format, ...){ +GGML_ATTRIBUTE_FORMAT(2, 3) +static void ggml_metal_log(enum ggml_log_level level, const char * format, ...){ if (ggml_metal_log_callback != NULL) { va_list args; va_start(args, format); @@ -210,7 +213,13 @@ static void ggml_metal_log(enum ggml_log_level level, const char* format, ...){ } else { GGML_METAL_LOG_INFO("%s: default.metallib not found, loading from source\n", __func__); - NSString * sourcePath = [bundle pathForResource:@"ggml-metal" ofType:@"metal"]; + NSString * sourcePath; + NSString * ggmlMetalPathResources = [[NSProcessInfo processInfo].environment objectForKey:@"GGML_METAL_PATH_RESOURCES"]; + if (ggmlMetalPathResources) { + sourcePath = [ggmlMetalPathResources stringByAppendingPathComponent:@"ggml-metal.metal"]; + } else { + sourcePath = [bundle pathForResource:@"ggml-metal" ofType:@"metal"]; + } if (sourcePath == nil) { GGML_METAL_LOG_WARN("%s: error: could not use bundle path to find ggml-metal.metal, falling back to trying cwd\n", __func__); sourcePath = @"ggml-metal.metal"; @@ -281,6 +290,7 @@ static void ggml_metal_log(enum ggml_log_level level, const char* format, ...){ GGML_METAL_ADD_KERNEL(rms_norm); GGML_METAL_ADD_KERNEL(norm); GGML_METAL_ADD_KERNEL(mul_mv_f32_f32); + GGML_METAL_ADD_KERNEL(mul_mv_f16_f16); GGML_METAL_ADD_KERNEL(mul_mv_f16_f32); GGML_METAL_ADD_KERNEL(mul_mv_f16_f32_1row); GGML_METAL_ADD_KERNEL(mul_mv_f16_f32_l4); @@ -311,6 +321,7 @@ static void ggml_metal_log(enum ggml_log_level level, const char* format, ...){ GGML_METAL_ADD_KERNEL(rope_f32); GGML_METAL_ADD_KERNEL(rope_f16); GGML_METAL_ADD_KERNEL(alibi_f32); + GGML_METAL_ADD_KERNEL(im2col_f16); GGML_METAL_ADD_KERNEL(cpy_f32_f16); GGML_METAL_ADD_KERNEL(cpy_f32_f32); GGML_METAL_ADD_KERNEL(cpy_f16_f16); @@ -329,7 +340,7 @@ static void ggml_metal_log(enum ggml_log_level level, const char* format, ...){ // https://developer.apple.com/metal/Metal-Feature-Set-Tables.pdf for (int i = MTLGPUFamilyApple1 + 20; i >= MTLGPUFamilyApple1; --i) { if ([ctx->device supportsFamily:i]) { - GGML_METAL_LOG_INFO("%s: GPU family: MTLGPUFamilyApple%d (%d)\n", __func__, i - MTLGPUFamilyApple1 + 1, i); + GGML_METAL_LOG_INFO("%s: GPU family: MTLGPUFamilyApple%d (%d)\n", __func__, i - (int) MTLGPUFamilyApple1 + 1, i); break; } } @@ -380,6 +391,7 @@ void ggml_metal_free(struct ggml_metal_context * ctx) { GGML_METAL_DEL_KERNEL(rms_norm); GGML_METAL_DEL_KERNEL(norm); GGML_METAL_DEL_KERNEL(mul_mv_f32_f32); + GGML_METAL_DEL_KERNEL(mul_mv_f16_f16); GGML_METAL_DEL_KERNEL(mul_mv_f16_f32); GGML_METAL_DEL_KERNEL(mul_mv_f16_f32_1row); GGML_METAL_DEL_KERNEL(mul_mv_f16_f32_l4); @@ -410,6 +422,7 @@ void ggml_metal_free(struct ggml_metal_context * ctx) { GGML_METAL_DEL_KERNEL(rope_f32); GGML_METAL_DEL_KERNEL(rope_f16); GGML_METAL_DEL_KERNEL(alibi_f32); + GGML_METAL_DEL_KERNEL(im2col_f16); GGML_METAL_DEL_KERNEL(cpy_f32_f16); GGML_METAL_DEL_KERNEL(cpy_f32_f32); GGML_METAL_DEL_KERNEL(cpy_f16_f16); @@ -467,6 +480,10 @@ int ggml_metal_if_optimized(struct ggml_metal_context * ctx) { const int64_t tsize = ggml_nbytes(t); + if (t->buffer && t->buffer->backend && t->buffer->backend->context) { + ctx = t->buffer->backend->context; + } + // find the view that contains the tensor fully for (int i = 0; i < ctx->n_buffers; ++i) { const int64_t ioffs = (int64_t) t->data - (int64_t) ctx->buffers[i].data; @@ -567,7 +584,7 @@ bool ggml_metal_add_buffer( ctx->device.recommendedMaxWorkingSetSize / 1024.0 / 1024.0); if (ctx->device.currentAllocatedSize > ctx->device.recommendedMaxWorkingSetSize) { - GGML_METAL_LOG_WARN(", warning: current allocated size is greater than the recommended max working set size\n", __func__); + GGML_METAL_LOG_WARN("%s: warning: current allocated size is greater than the recommended max working set size\n", __func__); } else { GGML_METAL_LOG_INFO("\n"); } @@ -1024,7 +1041,7 @@ void ggml_metal_graph_compute( [encoder setBytes:&ne00 length:sizeof(ne00) atIndex:2]; [encoder setBytes:&ne01 length:sizeof(ne01) atIndex:3]; [encoder setBytes:&ne02 length:sizeof(ne02) atIndex:4]; - [encoder setThreadgroupMemoryLength:MAX(16, nth/32*sizeof(float)) atIndex:0]; + [encoder setThreadgroupMemoryLength:GGML_PAD(nth/32*sizeof(float), 16) atIndex:0]; [encoder dispatchThreadgroups:MTLSizeMake(ne01*ne02*ne03, 1, 1) threadsPerThreadgroup:MTLSizeMake(nth, 1, 1)]; } break; @@ -1133,6 +1150,7 @@ void ggml_metal_graph_compute( switch (src0t) { case GGML_TYPE_F32: { + GGML_ASSERT(src1t == GGML_TYPE_F32); [encoder setComputePipelineState:ctx->pipeline_mul_mv_f32_f32]; nrows = 4; } break; @@ -1140,13 +1158,18 @@ void ggml_metal_graph_compute( { nth0 = 32; nth1 = 1; - if (ne11 * ne12 < 4) { - [encoder setComputePipelineState:ctx->pipeline_mul_mv_f16_f32_1row]; - } else if (ne00 >= 128 && ne01 >= 8 && ne00%4 == 0) { - [encoder setComputePipelineState:ctx->pipeline_mul_mv_f16_f32_l4]; - nrows = ne11; + if (src1t == GGML_TYPE_F32) { + if (ne11 * ne12 < 4) { + [encoder setComputePipelineState:ctx->pipeline_mul_mv_f16_f32_1row]; + } else if (ne00 >= 128 && ne01 >= 8 && ne00%4 == 0) { + [encoder setComputePipelineState:ctx->pipeline_mul_mv_f16_f32_l4]; + nrows = ne11; + } else { + [encoder setComputePipelineState:ctx->pipeline_mul_mv_f16_f32]; + nrows = 4; + } } else { - [encoder setComputePipelineState:ctx->pipeline_mul_mv_f16_f32]; + [encoder setComputePipelineState:ctx->pipeline_mul_mv_f16_f16]; nrows = 4; } } break; @@ -1336,7 +1359,7 @@ void ggml_metal_graph_compute( [encoder setBytes:&ne00 length:sizeof( int64_t) atIndex:2]; [encoder setBytes:&nb01 length:sizeof(uint64_t) atIndex:3]; [encoder setBytes:&eps length:sizeof( float) atIndex:4]; - [encoder setThreadgroupMemoryLength:nth/32*sizeof(float) atIndex:0]; + [encoder setThreadgroupMemoryLength:GGML_PAD(nth/32*sizeof(float), 16) atIndex:0]; const int64_t nrows = ggml_nrows(src0); @@ -1355,7 +1378,7 @@ void ggml_metal_graph_compute( [encoder setBytes:&ne00 length:sizeof( int64_t) atIndex:2]; [encoder setBytes:&nb01 length:sizeof(uint64_t) atIndex:3]; [encoder setBytes:&eps length:sizeof( float) atIndex:4]; - [encoder setThreadgroupMemoryLength:MAX(16, nth*sizeof(float)) atIndex:0]; + [encoder setThreadgroupMemoryLength:GGML_PAD(nth*sizeof(float), 16) atIndex:0]; const int64_t nrows = ggml_nrows(src0); @@ -1410,8 +1433,7 @@ void ggml_metal_graph_compute( const int n_past = ((int32_t *) dst->op_params)[0]; const int n_dims = ((int32_t *) dst->op_params)[1]; const int mode = ((int32_t *) dst->op_params)[2]; - // skip 3, n_ctx, used in GLM RoPE, unimplemented in metal - const int n_orig_ctx = ((int32_t *) dst->op_params)[4]; + const int n_orig_ctx = ((int32_t *) dst->op_params)[3]; float freq_base, freq_scale, ext_factor, attn_factor, beta_fast, beta_slow; memcpy(&freq_base, (int32_t *) dst->op_params + 5, sizeof(float)); @@ -1459,6 +1481,58 @@ void ggml_metal_graph_compute( [encoder dispatchThreadgroups:MTLSizeMake(ne01, ne02, ne03) threadsPerThreadgroup:MTLSizeMake(nth, 1, 1)]; } break; + case GGML_OP_IM2COL: + { + GGML_ASSERT(src0->type == GGML_TYPE_F16); + GGML_ASSERT(src1->type == GGML_TYPE_F32); + GGML_ASSERT( dst->type == GGML_TYPE_F16); + + const int32_t s0 = ((const int32_t *)(dst->op_params))[0]; + const int32_t s1 = ((const int32_t *)(dst->op_params))[1]; + const int32_t p0 = ((const int32_t *)(dst->op_params))[2]; + const int32_t p1 = ((const int32_t *)(dst->op_params))[3]; + const int32_t d0 = ((const int32_t *)(dst->op_params))[4]; + const int32_t d1 = ((const int32_t *)(dst->op_params))[5]; + const bool is_2D = ((const int32_t *)(dst->op_params))[6] == 1; + + const int32_t N = src1->ne[is_2D ? 3 : 2]; + const int32_t IC = src1->ne[is_2D ? 2 : 1]; + const int32_t IH = is_2D ? src1->ne[1] : 1; + const int32_t IW = src1->ne[0]; + + const int32_t KH = is_2D ? src0->ne[1] : 1; + const int32_t KW = src0->ne[0]; + + const int32_t OH = is_2D ? dst->ne[2] : 1; + const int32_t OW = dst->ne[1]; + + const int32_t CHW = IC * KH * KW; + + const int32_t ofs0 = src1->nb[is_2D ? 3 : 2] / 4; + const int32_t ofs1 = src1->nb[is_2D ? 2 : 1] / 4; + + switch (src0->type) { + case GGML_TYPE_F32: GGML_ASSERT(false && "not implemented"); break; + case GGML_TYPE_F16: [encoder setComputePipelineState:ctx->pipeline_im2col_f16]; break; + default: GGML_ASSERT(false); + }; + + [encoder setBuffer:id_src1 offset:offs_src1 atIndex:0]; + [encoder setBuffer:id_dst offset:offs_dst atIndex:1]; + [encoder setBytes:&ofs0 length:sizeof( int32_t) atIndex:2]; + [encoder setBytes:&ofs1 length:sizeof( int32_t) atIndex:3]; + [encoder setBytes:&IW length:sizeof( int32_t) atIndex:4]; + [encoder setBytes:&IH length:sizeof( int32_t) atIndex:5]; + [encoder setBytes:&CHW length:sizeof( int32_t) atIndex:6]; + [encoder setBytes:&s0 length:sizeof( int32_t) atIndex:7]; + [encoder setBytes:&s1 length:sizeof( int32_t) atIndex:8]; + [encoder setBytes:&p0 length:sizeof( int32_t) atIndex:9]; + [encoder setBytes:&p1 length:sizeof( int32_t) atIndex:10]; + [encoder setBytes:&d0 length:sizeof( int32_t) atIndex:11]; + [encoder setBytes:&d1 length:sizeof( int32_t) atIndex:12]; + + [encoder dispatchThreadgroups:MTLSizeMake(IC, OH, OW) threadsPerThreadgroup:MTLSizeMake(N, KH, KW)]; + } break; case GGML_OP_DUP: case GGML_OP_CPY: case GGML_OP_CONT: diff --git a/ggml-metal.metal b/ggml-metal.metal index 7c35f23a7612fd..5d1357cd72d459 100644 --- a/ggml-metal.metal +++ b/ggml-metal.metal @@ -792,7 +792,7 @@ kernel void kernel_mul_mv_f32_f32( constant int64_t & ne0, constant int64_t & ne1, uint3 tgpig[[threadgroup_position_in_grid]], - uint tiisg[[thread_index_in_simdgroup]]) { + uint tiisg[[thread_index_in_simdgroup]]) { const int64_t r0 = tgpig.x; const int64_t rb = tgpig.y*N_F32_F32; @@ -844,6 +844,79 @@ kernel void kernel_mul_mv_f32_f32( } } +#define N_F16_F16 4 + +kernel void kernel_mul_mv_f16_f16( + device const char * src0, + device const char * src1, + device float * dst, + constant int64_t & ne00, + constant int64_t & ne01, + constant int64_t & ne02, + constant uint64_t & nb00, + constant uint64_t & nb01, + constant uint64_t & nb02, + constant int64_t & ne10, + constant int64_t & ne11, + constant int64_t & ne12, + constant uint64_t & nb10, + constant uint64_t & nb11, + constant uint64_t & nb12, + constant int64_t & ne0, + constant int64_t & ne1, + uint3 tgpig[[threadgroup_position_in_grid]], + uint tiisg[[thread_index_in_simdgroup]]) { + + const int64_t r0 = tgpig.x; + const int64_t rb = tgpig.y*N_F16_F16; + const int64_t im = tgpig.z; + + device const half * x = (device const half *) (src0 + r0*nb01 + im/(ne12/ne02)*nb02); + + if (ne00 < 128) { + for (int row = 0; row < N_F16_F16; ++row) { + int r1 = rb + row; + if (r1 >= ne11) { + break; + } + + device const half * y = (device const half *) (src1 + r1*nb11 + im*nb12); + + float sumf = 0; + for (int i = tiisg; i < ne00; i += 32) { + sumf += (half) x[i] * (half) y[i]; + } + + float all_sum = simd_sum(sumf); + if (tiisg == 0) { + dst[im*ne1*ne0 + r1*ne0 + r0] = all_sum; + } + } + } else { + device const half4 * x4 = (device const half4 *)x; + for (int row = 0; row < N_F16_F16; ++row) { + int r1 = rb + row; + if (r1 >= ne11) { + break; + } + + device const half * y = (device const half *) (src1 + r1*nb11 + im*nb12); + device const half4 * y4 = (device const half4 *) y; + + float sumf = 0; + for (int i = tiisg; i < ne00/4; i += 32) { + for (int k = 0; k < 4; ++k) sumf += (half) x4[i][k] * y4[i][k]; + } + + float all_sum = simd_sum(sumf); + if (tiisg == 0) { + for (int i = 4*(ne00/4); i < ne00; ++i) all_sum += (half) x[i] * y[i]; + dst[im*ne1*ne0 + r1*ne0 + r0] = all_sum; + } + } + } +} + kernel void kernel_mul_mv_f16_f32_1row( device const char * src0, device const char * src1, @@ -1229,6 +1302,39 @@ kernel void kernel_rope( template [[host_name("kernel_rope_f32")]] kernel rope_t kernel_rope; template [[host_name("kernel_rope_f16")]] kernel rope_t kernel_rope; +kernel void kernel_im2col_f16( + device const float * x, + device half * dst, + constant int32_t & ofs0, + constant int32_t & ofs1, + constant int32_t & IW, + constant int32_t & IH, + constant int32_t & CHW, + constant int32_t & s0, + constant int32_t & s1, + constant int32_t & p0, + constant int32_t & p1, + constant int32_t & d0, + constant int32_t & d1, + uint3 tgpig[[threadgroup_position_in_grid]], + uint3 tgpg[[threadgroups_per_grid]], + uint3 tpitg[[thread_position_in_threadgroup]], + uint3 ntg[[threads_per_threadgroup]]) { + const int32_t iiw = tgpig[2] * s0 + tpitg[2] * d0 - p0; + const int32_t iih = tgpig[1] * s1 + tpitg[1] * d1 - p1; + + const int32_t offset_dst = + (tpitg[0] * tgpg[1] * tgpg[2] + tgpig[1] * tgpg[2] + tgpig[2]) * CHW + + (tgpig[0] * (ntg[1] * ntg[2]) + tpitg[1] * ntg[2] + tpitg[2]); + + if (iih < 0 || iih >= IH || iiw < 0 || iiw >= IW) { + dst[offset_dst] = 0.0f; + } else { + const int32_t offset_src = tpitg[0] * ofs0 + tgpig[0] * ofs1; + dst[offset_dst] = x[offset_src + iih * IW + iiw]; + } +} + kernel void kernel_cpy_f16_f16( device const half * src0, device half * dst, diff --git a/ggml-quants.c b/ggml-quants.c index 740be6dc5c7981..a48eda7320c46d 100644 --- a/ggml-quants.c +++ b/ggml-quants.c @@ -14,26 +14,6 @@ // #include -#if !defined(__aarch64__) -inline static int32_t vaddvq_s16(int16x8_t v) { - return - (int32_t)vgetq_lane_s16(v, 0) + (int32_t)vgetq_lane_s16(v, 1) + - (int32_t)vgetq_lane_s16(v, 2) + (int32_t)vgetq_lane_s16(v, 3) + - (int32_t)vgetq_lane_s16(v, 4) + (int32_t)vgetq_lane_s16(v, 5) + - (int32_t)vgetq_lane_s16(v, 6) + (int32_t)vgetq_lane_s16(v, 7); -} - -inline static int16x8_t vpaddq_s16(int16x8_t a, int16x8_t b) { - int16x4_t a0 = vpadd_s16(vget_low_s16(a), vget_high_s16(a)); - int16x4_t b0 = vpadd_s16(vget_low_s16(b), vget_high_s16(b)); - return vcombine_s16(a0, b0); -} - -inline static int32_t vaddvq_s32(int32x4_t v) { - return vgetq_lane_s32(v, 0) + vgetq_lane_s32(v, 1) + vgetq_lane_s32(v, 2) + vgetq_lane_s32(v, 3); -} -#endif - #else #ifdef __wasm_simd128__ @@ -47,13 +27,15 @@ inline static int32_t vaddvq_s32(int32x4_t v) { #if defined(_MSC_VER) || defined(__MINGW32__) #include #else -#if !defined(__riscv) && !defined(__s390__) +#if defined(__AVX__) || defined(__AVX2__) || defined(__AVX512F__) || defined(__SSSE3__) || defined(__SSE3__) +#if !defined(__riscv) #include #endif #endif #endif #endif #endif +#endif #ifdef __riscv_v_intrinsic #include @@ -61,6 +43,7 @@ inline static int32_t vaddvq_s32(int32x4_t v) { #undef MIN #undef MAX + #define MIN(a, b) ((a) < (b) ? (a) : (b)) #define MAX(a, b) ((a) > (b) ? (a) : (b)) @@ -283,9 +266,31 @@ static inline float hsum_float_4x4(const __m128 a, const __m128 b, const __m128 #endif // defined(__AVX__) || defined(__AVX2__) || defined(__AVX512F__) || defined(__SSSE3__) #if defined(__ARM_NEON) - #if !defined(__aarch64__) +// 64-bit compatibility + +// vaddvq_s16 +// vpaddq_s16 +// vaddvq_s32 +// vaddvq_f32 +// vmaxvq_f32 +// vcvtnq_s32_f32 + +inline static int32_t vaddvq_s16(int16x8_t v) { + return + (int32_t)vgetq_lane_s16(v, 0) + (int32_t)vgetq_lane_s16(v, 1) + + (int32_t)vgetq_lane_s16(v, 2) + (int32_t)vgetq_lane_s16(v, 3) + + (int32_t)vgetq_lane_s16(v, 4) + (int32_t)vgetq_lane_s16(v, 5) + + (int32_t)vgetq_lane_s16(v, 6) + (int32_t)vgetq_lane_s16(v, 7); +} + +inline static int16x8_t vpaddq_s16(int16x8_t a, int16x8_t b) { + int16x4_t a0 = vpadd_s16(vget_low_s16(a), vget_high_s16(a)); + int16x4_t b0 = vpadd_s16(vget_low_s16(b), vget_high_s16(b)); + return vcombine_s16(a0, b0); +} + inline static int32_t vaddvq_s32(int32x4_t v) { return vgetq_lane_s32(v, 0) + vgetq_lane_s32(v, 1) + vgetq_lane_s32(v, 2) + vgetq_lane_s32(v, 3); } @@ -311,6 +316,96 @@ inline static int32x4_t vcvtnq_s32_f32(float32x4_t v) { return res; } +// vld1q_s16_x2 +// vld1q_u8_x2 +// vld1q_u8_x4 +// vld1q_s8_x2 +// vld1q_s8_x4 +// TODO: double-check these work correctly + +typedef struct ggml_int16x8x2_t { + int16x8_t val[2]; +} ggml_int16x8x2_t; + +inline static ggml_int16x8x2_t ggml_vld1q_s16_x2(const int16_t * ptr) { + ggml_int16x8x2_t res; + + res.val[0] = vld1q_s16(ptr + 0); + res.val[1] = vld1q_s16(ptr + 8); + + return res; +} + +typedef struct ggml_uint8x16x2_t { + uint8x16_t val[2]; +} ggml_uint8x16x2_t; + +inline static ggml_uint8x16x2_t ggml_vld1q_u8_x2(const uint8_t * ptr) { + ggml_uint8x16x2_t res; + + res.val[0] = vld1q_u8(ptr + 0); + res.val[1] = vld1q_u8(ptr + 16); + + return res; +} + +typedef struct ggml_uint8x16x4_t { + uint8x16_t val[4]; +} ggml_uint8x16x4_t; + +inline static ggml_uint8x16x4_t ggml_vld1q_u8_x4(const uint8_t * ptr) { + ggml_uint8x16x4_t res; + + res.val[0] = vld1q_u8(ptr + 0); + res.val[1] = vld1q_u8(ptr + 16); + res.val[2] = vld1q_u8(ptr + 32); + res.val[3] = vld1q_u8(ptr + 48); + + return res; +} + +typedef struct ggml_int8x16x2_t { + int8x16_t val[2]; +} ggml_int8x16x2_t; + +inline static ggml_int8x16x2_t ggml_vld1q_s8_x2(const int8_t * ptr) { + ggml_int8x16x2_t res; + + res.val[0] = vld1q_s8(ptr + 0); + res.val[1] = vld1q_s8(ptr + 16); + + return res; +} + +typedef struct ggml_int8x16x4_t { + int8x16_t val[4]; +} ggml_int8x16x4_t; + +inline static ggml_int8x16x4_t ggml_vld1q_s8_x4(const int8_t * ptr) { + ggml_int8x16x4_t res; + + res.val[0] = vld1q_s8(ptr + 0); + res.val[1] = vld1q_s8(ptr + 16); + res.val[2] = vld1q_s8(ptr + 32); + res.val[3] = vld1q_s8(ptr + 48); + + return res; +} + +#else + +#define ggml_int16x8x2_t int16x8x2_t +#define ggml_uint8x16x2_t uint8x16x2_t +#define ggml_uint8x16x4_t uint8x16x4_t +#define ggml_int8x16x2_t int8x16x2_t +#define ggml_int8x16x4_t int8x16x4_t + +#define ggml_vld1q_s16_x2 vld1q_s16_x2 +#define ggml_vld1q_u8_x2 vld1q_u8_x2 +#define ggml_vld1q_u8_x4 vld1q_u8_x4 +#define ggml_vld1q_s8_x2 vld1q_s8_x2 +#define ggml_vld1q_s8_x4 vld1q_s8_x4 + #endif #endif @@ -3557,7 +3652,7 @@ void ggml_vec_dot_q2_K_q8_K(const int n, float * restrict s, const void * restri const int32x4_t vzero = vdupq_n_s32(0); #endif - int8x16x2_t q2bytes; + ggml_int8x16x2_t q2bytes; uint8_t aux[16]; float sum = 0; @@ -3576,8 +3671,8 @@ void ggml_vec_dot_q2_K_q8_K(const int n, float * restrict s, const void * restri vst1q_u8(aux, scales); const uint8x16_t mins = vshrq_n_u8(mins_and_scales, 4); - const int16x8x2_t q8sums = vld1q_s16_x2(y[i].bsums); - const int16x8x2_t mins16 = {vreinterpretq_s16_u16(vmovl_u8(vget_low_u8(mins))), vreinterpretq_s16_u16(vmovl_u8(vget_high_u8(mins)))}; + const ggml_int16x8x2_t q8sums = ggml_vld1q_s16_x2(y[i].bsums); + const ggml_int16x8x2_t mins16 = {vreinterpretq_s16_u16(vmovl_u8(vget_low_u8(mins))), vreinterpretq_s16_u16(vmovl_u8(vget_high_u8(mins)))}; const int32x4_t s0 = vaddq_s32(vmull_s16(vget_low_s16 (mins16.val[0]), vget_low_s16 (q8sums.val[0])), vmull_s16(vget_high_s16(mins16.val[0]), vget_high_s16(q8sums.val[0]))); const int32x4_t s1 = vaddq_s32(vmull_s16(vget_low_s16 (mins16.val[1]), vget_low_s16 (q8sums.val[1])), @@ -3605,7 +3700,7 @@ void ggml_vec_dot_q2_K_q8_K(const int n, float * restrict s, const void * restri #endif #define SHIFT_MULTIPLY_ACCUM_WITH_SCALE(shift, index)\ - q8bytes = vld1q_s8_x2(q8); q8 += 32;\ + q8bytes = ggml_vld1q_s8_x2(q8); q8 += 32;\ q2bytes.val[0] = vreinterpretq_s8_u8(vandq_u8(vshrq_n_u8(q2bits.val[0], (shift)), m3));\ q2bytes.val[1] = vreinterpretq_s8_u8(vandq_u8(vshrq_n_u8(q2bits.val[1], (shift)), m3));\ MULTIPLY_ACCUM_WITH_SCALE((index)); @@ -3613,9 +3708,9 @@ void ggml_vec_dot_q2_K_q8_K(const int n, float * restrict s, const void * restri for (int j = 0; j < QK_K/128; ++j) { - const uint8x16x2_t q2bits = vld1q_u8_x2(q2); q2 += 32; + const ggml_uint8x16x2_t q2bits = ggml_vld1q_u8_x2(q2); q2 += 32; - int8x16x2_t q8bytes = vld1q_s8_x2(q8); q8 += 32; + ggml_int8x16x2_t q8bytes = ggml_vld1q_s8_x2(q8); q8 += 32; q2bytes.val[0] = vreinterpretq_s8_u8(vandq_u8(q2bits.val[0], m3)); q2bytes.val[1] = vreinterpretq_s8_u8(vandq_u8(q2bits.val[1], m3)); MULTIPLY_ACCUM_WITH_SCALE(0); @@ -3949,7 +4044,7 @@ void ggml_vec_dot_q2_K_q8_K(const int n, float * restrict s, const void * restri const int32x4_t vzero = vdupq_n_s32(0); #endif - int8x16x4_t q2bytes; + ggml_int8x16x4_t q2bytes; uint32_t aux32[2]; const uint8_t * scales = (const uint8_t *)aux32; @@ -3974,7 +4069,7 @@ void ggml_vec_dot_q2_K_q8_K(const int n, float * restrict s, const void * restri const uint8x16_t q2bits = vld1q_u8(q2); - const int8x16x4_t q8bytes = vld1q_s8_x4(q8); + const ggml_int8x16x4_t q8bytes = ggml_vld1q_s8_x4(q8); q2bytes.val[0] = vreinterpretq_s8_u8(vandq_u8(q2bits, m3)); q2bytes.val[1] = vreinterpretq_s8_u8(vandq_u8(vshrq_n_u8(q2bits, 2), m3)); @@ -4238,7 +4333,7 @@ void ggml_vec_dot_q3_K_q8_K(const int n, float * restrict s, const void * restri const uint8x16_t m3 = vshlq_n_u8(m0, 3); const int8_t m32 = 32; - int8x16x4_t q3bytes; + ggml_int8x16x4_t q3bytes; float sum = 0; @@ -4250,9 +4345,9 @@ void ggml_vec_dot_q3_K_q8_K(const int n, float * restrict s, const void * restri const uint8_t * restrict qh = x[i].hmask; const int8_t * restrict q8 = y[i].qs; - uint8x16x2_t qhbits = vld1q_u8_x2(qh); + ggml_uint8x16x2_t qhbits = ggml_vld1q_u8_x2(qh); - uint8x16x4_t q3h; + ggml_uint8x16x4_t q3h; int32_t isum = 0; @@ -4268,9 +4363,9 @@ void ggml_vec_dot_q3_K_q8_K(const int n, float * restrict s, const void * restri for (int j = 0; j < QK_K/128; ++j) { - const uint8x16x2_t q3bits = vld1q_u8_x2(q3); q3 += 32; - const int8x16x4_t q8bytes_1 = vld1q_s8_x4(q8); q8 += 64; - const int8x16x4_t q8bytes_2 = vld1q_s8_x4(q8); q8 += 64; + const ggml_uint8x16x2_t q3bits = ggml_vld1q_u8_x2(q3); q3 += 32; + const ggml_int8x16x4_t q8bytes_1 = ggml_vld1q_s8_x4(q8); q8 += 64; + const ggml_int8x16x4_t q8bytes_2 = ggml_vld1q_s8_x4(q8); q8 += 64; q3h.val[0] = vshlq_n_u8(vbicq_u8(m0, qhbits.val[0]), 2); q3h.val[1] = vshlq_n_u8(vbicq_u8(m0, qhbits.val[1]), 2); @@ -4772,7 +4867,7 @@ void ggml_vec_dot_q3_K_q8_K(const int n, float * restrict s, const void * restri const uint8x16_t m3b = vdupq_n_u8(0x3); const uint8x16_t mh = vdupq_n_u8(4); - int8x16x4_t q3bytes; + ggml_int8x16x4_t q3bytes; uint16_t aux16[2]; int8_t * scales = (int8_t *)aux16; @@ -4781,11 +4876,11 @@ void ggml_vec_dot_q3_K_q8_K(const int n, float * restrict s, const void * restri for (int i = 0; i < nb; ++i) { - uint8x16x4_t q3h; + ggml_uint8x16x4_t q3h; const uint8x8_t hbits = vld1_u8(x[i].hmask); const uint8x16_t q3bits = vld1q_u8(x[i].qs); - const int8x16x4_t q8bytes = vld1q_s8_x4(y[i].qs); + const ggml_int8x16x4_t q8bytes = ggml_vld1q_s8_x4(y[i].qs); const uint16_t a = *(const uint16_t *)x[i].scales; aux16[0] = a & 0x0f0f; @@ -5134,8 +5229,8 @@ void ggml_vec_dot_q4_K_q8_K(const int n, float * restrict s, const void * restri const int32x4_t mzero = vdupq_n_s32(0); #endif - int8x16x2_t q4bytes; - int8x16x2_t q8bytes; + ggml_int8x16x2_t q4bytes; + ggml_int8x16x2_t q8bytes; float sumf = 0; @@ -5170,17 +5265,17 @@ void ggml_vec_dot_q4_K_q8_K(const int n, float * restrict s, const void * restri for (int j = 0; j < QK_K/64; ++j) { - const uint8x16x2_t q4bits = vld1q_u8_x2(q4); q4 += 32; + const ggml_uint8x16x2_t q4bits = ggml_vld1q_u8_x2(q4); q4 += 32; #ifdef __ARM_FEATURE_DOTPROD - q8bytes = vld1q_s8_x2(q8); q8 += 32; + q8bytes = ggml_vld1q_s8_x2(q8); q8 += 32; q4bytes.val[0] = vreinterpretq_s8_u8(vandq_u8 (q4bits.val[0], m4b)); q4bytes.val[1] = vreinterpretq_s8_u8(vandq_u8 (q4bits.val[1], m4b)); const int32x4_t p1 = vdotq_s32(vdotq_s32(mzero, q4bytes.val[0], q8bytes.val[0]), q4bytes.val[1], q8bytes.val[1]); sumi1 += vaddvq_s32(p1) * scales[2*j+0]; - q8bytes = vld1q_s8_x2(q8); q8 += 32; + q8bytes = ggml_vld1q_s8_x2(q8); q8 += 32; q4bytes.val[0] = vreinterpretq_s8_u8(vshrq_n_u8(q4bits.val[0], 4)); q4bytes.val[1] = vreinterpretq_s8_u8(vshrq_n_u8(q4bits.val[1], 4)); @@ -5188,7 +5283,7 @@ void ggml_vec_dot_q4_K_q8_K(const int n, float * restrict s, const void * restri sumi2 += vaddvq_s32(p2) * scales[2*j+1]; #else - q8bytes = vld1q_s8_x2(q8); q8 += 32; + q8bytes = ggml_vld1q_s8_x2(q8); q8 += 32; q4bytes.val[0] = vreinterpretq_s8_u8(vandq_u8 (q4bits.val[0], m4b)); q4bytes.val[1] = vreinterpretq_s8_u8(vandq_u8 (q4bits.val[1], m4b)); const int16x8_t p0 = vaddq_s16(vmull_s8(vget_low_s8 (q4bytes.val[0]), vget_low_s8 (q8bytes.val[0])), @@ -5197,7 +5292,7 @@ void ggml_vec_dot_q4_K_q8_K(const int n, float * restrict s, const void * restri vmull_s8(vget_high_s8(q4bytes.val[1]), vget_high_s8(q8bytes.val[1]))); sumi1 += vaddvq_s16(vaddq_s16(p0, p1)) * scales[2*j+0]; - q8bytes = vld1q_s8_x2(q8); q8 += 32; + q8bytes = ggml_vld1q_s8_x2(q8); q8 += 32; q4bytes.val[0] = vreinterpretq_s8_u8(vshrq_n_u8(q4bits.val[0], 4)); q4bytes.val[1] = vreinterpretq_s8_u8(vshrq_n_u8(q4bits.val[1], 4)); const int16x8_t p2 = vaddq_s16(vmull_s8(vget_low_s8 (q4bytes.val[0]), vget_low_s8 (q8bytes.val[0])), @@ -5512,8 +5607,8 @@ void ggml_vec_dot_q4_K_q8_K(const int n, float * restrict s, const void * restri float sumf = 0; - int8x16x2_t q4bytes; - int8x16x4_t q8bytes; + ggml_int8x16x2_t q4bytes; + ggml_int8x16x4_t q8bytes; float sum_mins = 0.f; @@ -5534,10 +5629,10 @@ void ggml_vec_dot_q4_K_q8_K(const int n, float * restrict s, const void * restri const float d = y[i].d * (float)x[i].d[0]; - const uint8x16x2_t q4bits = vld1q_u8_x2(q4); + const ggml_uint8x16x2_t q4bits = ggml_vld1q_u8_x2(q4); #ifdef __ARM_FEATURE_DOTPROD - q8bytes = vld1q_s8_x4(q8); + q8bytes = ggml_vld1q_s8_x4(q8); q4bytes.val[0] = vreinterpretq_s8_u8(vandq_u8 (q4bits.val[0], m4b)); q4bytes.val[1] = vreinterpretq_s8_u8(vandq_u8 (q4bits.val[1], m4b)); @@ -5551,7 +5646,7 @@ void ggml_vec_dot_q4_K_q8_K(const int n, float * restrict s, const void * restri const int32_t sumi2 = vaddvq_s32(p2) * scales[1]; #else - q8bytes = vld1q_s8_x4(q8); + q8bytes = ggml_vld1q_s8_x4(q8); q4bytes.val[0] = vreinterpretq_s8_u8(vandq_u8 (q4bits.val[0], m4b)); q4bytes.val[1] = vreinterpretq_s8_u8(vandq_u8 (q4bits.val[1], m4b)); const int16x8_t p0 = vaddq_s16(vmull_s8(vget_low_s8 (q4bytes.val[0]), vget_low_s8 (q8bytes.val[0])), @@ -5785,7 +5880,7 @@ void ggml_vec_dot_q5_K_q8_K(const int n, float * restrict s, const void * restri const int32x4_t mzero = vdupq_n_s32(0); #endif - int8x16x4_t q5bytes; + ggml_int8x16x4_t q5bytes; float sumf = 0; @@ -5815,16 +5910,16 @@ void ggml_vec_dot_q5_K_q8_K(const int n, float * restrict s, const void * restri const uint8_t * restrict qh = x[i].qh; const int8_t * restrict q8 = y[i].qs; - uint8x16x2_t qhbits = vld1q_u8_x2(qh); + ggml_uint8x16x2_t qhbits = ggml_vld1q_u8_x2(qh); - uint8x16x4_t q5h; + ggml_uint8x16x4_t q5h; int32_t sumi = 0; for (int j = 0; j < QK_K/64; ++j) { - const uint8x16x2_t q5bits = vld1q_u8_x2(q5); q5 += 32; - const int8x16x4_t q8bytes = vld1q_s8_x4(q8); q8 += 64; + const ggml_uint8x16x2_t q5bits = ggml_vld1q_u8_x2(q5); q5 += 32; + const ggml_int8x16x4_t q8bytes = ggml_vld1q_s8_x4(q8); q8 += 64; q5h.val[0] = vshlq_n_u8(vandq_u8(mone, qhbits.val[0]), 4); q5h.val[1] = vshlq_n_u8(vandq_u8(mone, qhbits.val[1]), 4); @@ -6218,8 +6313,8 @@ void ggml_vec_dot_q5_K_q8_K(const int n, float * restrict s, const void * restri const int32x4_t mzero = vdupq_n_s32(0); #endif - int8x16x4_t q5bytes; - uint8x16x4_t q5h; + ggml_int8x16x4_t q5bytes; + ggml_uint8x16x4_t q5h; float sumf = 0; @@ -6234,8 +6329,8 @@ void ggml_vec_dot_q5_K_q8_K(const int n, float * restrict s, const void * restri const uint8x8_t qhbits = vld1_u8(qh); - const uint8x16x2_t q5bits = vld1q_u8_x2(q5); - const int8x16x4_t q8bytes = vld1q_s8_x4(q8); + const ggml_uint8x16x2_t q5bits = ggml_vld1q_u8_x2(q5); + const ggml_int8x16x4_t q8bytes = ggml_vld1q_s8_x4(q8); const uint8x16_t htmp = vcombine_u8(qhbits, vshr_n_u8(qhbits, 1)); q5h.val[0] = vbicq_u8(mh, vshlq_n_u8(htmp, 4)); @@ -6511,8 +6606,8 @@ void ggml_vec_dot_q6_K_q8_K(const int n, float * restrict s, const void * restri const uint8x16_t mone = vdupq_n_u8(3); - int8x16x4_t q6bytes; - uint8x16x4_t q6h; + ggml_int8x16x4_t q6bytes; + ggml_uint8x16x4_t q6h; for (int i = 0; i < nb; ++i) { @@ -6524,9 +6619,9 @@ void ggml_vec_dot_q6_K_q8_K(const int n, float * restrict s, const void * restri const int8_t * restrict scale = x[i].scales; - const int16x8x2_t q8sums = vld1q_s16_x2(y[i].bsums); + const ggml_int16x8x2_t q8sums = ggml_vld1q_s16_x2(y[i].bsums); const int8x16_t scales = vld1q_s8(scale); - const int16x8x2_t q6scales = {vmovl_s8(vget_low_s8(scales)), vmovl_s8(vget_high_s8(scales))}; + const ggml_int16x8x2_t q6scales = {vmovl_s8(vget_low_s8(scales)), vmovl_s8(vget_high_s8(scales))}; const int32x4_t prod = vaddq_s32(vaddq_s32(vmull_s16(vget_low_s16 (q8sums.val[0]), vget_low_s16 (q6scales.val[0])), vmull_s16(vget_high_s16(q8sums.val[0]), vget_high_s16(q6scales.val[0]))), @@ -6538,9 +6633,9 @@ void ggml_vec_dot_q6_K_q8_K(const int n, float * restrict s, const void * restri for (int j = 0; j < QK_K/128; ++j) { - uint8x16x2_t qhbits = vld1q_u8_x2(qh); qh += 32; - uint8x16x4_t q6bits = vld1q_u8_x4(q6); q6 += 64; - int8x16x4_t q8bytes = vld1q_s8_x4(q8); q8 += 64; + ggml_uint8x16x2_t qhbits = ggml_vld1q_u8_x2(qh); qh += 32; + ggml_uint8x16x4_t q6bits = ggml_vld1q_u8_x4(q6); q6 += 64; + ggml_int8x16x4_t q8bytes = ggml_vld1q_s8_x4(q8); q8 += 64; q6h.val[0] = vshlq_n_u8(vandq_u8(mone, qhbits.val[0]), 4); q6h.val[1] = vshlq_n_u8(vandq_u8(mone, qhbits.val[1]), 4); @@ -6583,7 +6678,7 @@ void ggml_vec_dot_q6_K_q8_K(const int n, float * restrict s, const void * restri scale += 2; #endif - q8bytes = vld1q_s8_x4(q8); q8 += 64; + q8bytes = ggml_vld1q_s8_x4(q8); q8 += 64; shifted = vshrq_n_u8(qhbits.val[0], 4); q6h.val[0] = vshlq_n_u8(vandq_u8(mone, shifted), 4); @@ -6987,8 +7082,8 @@ void ggml_vec_dot_q6_K_q8_K(const int n, float * restrict s, const void * restri const uint8x16_t mone = vdupq_n_u8(3); - int8x16x4_t q6bytes; - uint8x16x4_t q6h; + ggml_int8x16x4_t q6bytes; + ggml_uint8x16x4_t q6h; for (int i = 0; i < nb; ++i) { @@ -7002,9 +7097,9 @@ void ggml_vec_dot_q6_K_q8_K(const int n, float * restrict s, const void * restri int32_t isum = 0; - uint8x16_t qhbits = vld1q_u8(qh); - uint8x16x2_t q6bits = vld1q_u8_x2(q6); - int8x16x4_t q8bytes = vld1q_s8_x4(q8); + uint8x16_t qhbits = vld1q_u8(qh); + ggml_uint8x16x2_t q6bits = ggml_vld1q_u8_x2(q6); + ggml_int8x16x4_t q8bytes = ggml_vld1q_s8_x4(q8); q6h.val[0] = vshlq_n_u8(vandq_u8(mone, qhbits), 4); uint8x16_t shifted = vshrq_n_u8(qhbits, 2); diff --git a/ggml.c b/ggml.c index da78e6de9586ba..3202a517b78686 100644 --- a/ggml.c +++ b/ggml.c @@ -271,6 +271,12 @@ inline static void * ggml_aligned_malloc(size_t size) { // floating point type used to accumulate sums typedef double ggml_float; +#undef MIN +#undef MAX + +#define MIN(a, b) ((a) < (b) ? (a) : (b)) +#define MAX(a, b) ((a) > (b) ? (a) : (b)) + // // global data // @@ -604,6 +610,18 @@ ggml_type_traits_t ggml_internal_get_type_traits(enum ggml_type type) { // simd mappings // +#if defined(__ARM_NEON) +#if !defined(__aarch64__) + +// 64-bit compatibility + +inline static float vaddvq_f32(float32x4_t v) { + return vgetq_lane_f32(v, 0) + vgetq_lane_f32(v, 1) + vgetq_lane_f32(v, 2) + vgetq_lane_f32(v, 3); +} + +#endif +#endif + // we define a common set of C macros which map to specific intrinsics based on the current architecture // we then implement the fundamental computation operations below using only these macros // adding support for new architectures requires to define the corresponding SIMD macros @@ -1616,13 +1634,8 @@ static const char * GGML_OP_NAME[GGML_OP_COUNT] = { "ROPE_BACK", "ALIBI", "CLAMP", - "CONV_1D", - "CONV_1D_STAGE_0", - "CONV_1D_STAGE_1", "CONV_TRANSPOSE_1D", - "CONV_2D", - "CONV_2D_STAGE_0", - "CONV_2D_STAGE_1", + "IM2COL", "CONV_TRANSPOSE_2D", "POOL_1D", "POOL_2D", @@ -1653,7 +1666,7 @@ static const char * GGML_OP_NAME[GGML_OP_COUNT] = { "CROSS_ENTROPY_LOSS_BACK", }; -static_assert(GGML_OP_COUNT == 73, "GGML_OP_COUNT != 73"); +static_assert(GGML_OP_COUNT == 68, "GGML_OP_COUNT != 68"); static const char * GGML_OP_SYMBOL[GGML_OP_COUNT] = { "none", @@ -1703,13 +1716,8 @@ static const char * GGML_OP_SYMBOL[GGML_OP_COUNT] = { "rope_back(x)", "alibi(x)", "clamp(x)", - "conv_1d(x)", - "conv_1d_stage_0(x)", - "conv_1d_stage_1(x)", "conv_transpose_1d(x)", - "conv_2d(x)", - "conv_2d_stage_0(x)", - "conv_2d_stage_1(x)", + "im2col(x)", "conv_transpose_2d(x)", "pool_1d(x)", "pool_2d(x)", @@ -1740,7 +1748,7 @@ static const char * GGML_OP_SYMBOL[GGML_OP_COUNT] = { "cross_entropy_loss_back(x,y)", }; -static_assert(GGML_OP_COUNT == 73, "GGML_OP_COUNT != 73"); +static_assert(GGML_OP_COUNT == 68, "GGML_OP_COUNT != 68"); static_assert(GGML_OP_POOL_COUNT == 2, "GGML_OP_POOL_COUNT != 2"); @@ -1768,13 +1776,7 @@ static void ggml_setup_op_has_task_pass(void) { p[GGML_OP_GET_ROWS_BACK ] = true; p[GGML_OP_DIAG_MASK_INF ] = true; p[GGML_OP_DIAG_MASK_ZERO ] = true; - p[GGML_OP_CONV_1D ] = true; - p[GGML_OP_CONV_1D_STAGE_0 ] = true; - p[GGML_OP_CONV_1D_STAGE_1 ] = true; p[GGML_OP_CONV_TRANSPOSE_1D ] = true; - p[GGML_OP_CONV_2D ] = true; - p[GGML_OP_CONV_2D_STAGE_0 ] = true; - p[GGML_OP_CONV_2D_STAGE_1 ] = true; p[GGML_OP_CONV_TRANSPOSE_2D ] = true; p[GGML_OP_FLASH_ATTN_BACK ] = true; p[GGML_OP_CROSS_ENTROPY_LOSS ] = true; @@ -5128,82 +5130,6 @@ static int64_t ggml_calc_conv_output_size(int64_t ins, int64_t ks, int s, int p, return (ins + 2 * p - d * (ks - 1) - 1) / s + 1; } -// im2col: [N, IC, IL] => [N, OL, IC*K] -// a: [OC,IC, K] -// b: [N, IC, IL] -// result: [N, OL, IC*K] -static struct ggml_tensor * ggml_conv_1d_stage_0( - struct ggml_context * ctx, - struct ggml_tensor * a, - struct ggml_tensor * b, - int s0, - int p0, - int d0) { - GGML_ASSERT(a->ne[1] == b->ne[1]); - bool is_node = false; - - if (a->grad || b->grad) { - GGML_ASSERT(false); // TODO: implement backward - is_node = true; - } - - const int64_t OL = ggml_calc_conv_output_size(b->ne[0], a->ne[0], s0, p0, d0); - - const int64_t ne[4] = { - a->ne[1] * a->ne[0], - OL, - b->ne[2], - 1, - }; - struct ggml_tensor * result = ggml_new_tensor(ctx, GGML_TYPE_F16, 4, ne); - - int32_t params[] = { s0, p0, d0 }; - ggml_set_op_params(result, params, sizeof(params)); - - result->op = GGML_OP_CONV_1D_STAGE_0; - result->grad = is_node ? ggml_dup_tensor(ctx, result) : NULL; - result->src[0] = a; - result->src[1] = b; - - return result; -} - -// ggml_conv_1d_stage_1 - -// gemm: [N, OC, OL] = [OC, IC * K] x [N*OL, IC * K] -// a: [OC, IC, K] -// b: [N, OL, IC * K] -// result: [N, OC, OL] -static struct ggml_tensor * ggml_conv_1d_stage_1( - struct ggml_context * ctx, - struct ggml_tensor * a, - struct ggml_tensor * b) { - - bool is_node = false; - - if (a->grad || b->grad) { - GGML_ASSERT(false); // TODO: implement backward - is_node = true; - } - - const int64_t ne[4] = { - b->ne[1], - a->ne[2], - b->ne[2], - 1, - }; - struct ggml_tensor * result = ggml_new_tensor(ctx, GGML_TYPE_F32, 4, ne); - - result->op = GGML_OP_CONV_1D_STAGE_1; - result->grad = is_node ? ggml_dup_tensor(ctx, result) : NULL; - result->src[0] = a; - result->src[1] = b; - - return result; -} - -// ggml_conv_1d - GGML_API struct ggml_tensor * ggml_conv_1d( struct ggml_context * ctx, struct ggml_tensor * a, @@ -5211,43 +5137,17 @@ GGML_API struct ggml_tensor * ggml_conv_1d( int s0, int p0, int d0) { - struct ggml_tensor * result = ggml_conv_1d_stage_0(ctx, a, b, s0, p0, d0); - result = ggml_conv_1d_stage_1(ctx, a, result); - return result; -} - -// GGML_API struct ggml_tensor * ggml_conv_1d( -// struct ggml_context * ctx, -// struct ggml_tensor * a, -// struct ggml_tensor * b, -// int s0, -// int p0, -// int d0) { -// GGML_ASSERT(ggml_is_matrix(b)); -// GGML_ASSERT(a->ne[1] == b->ne[1]); -// bool is_node = false; - -// if (a->grad || b->grad) { -// GGML_ASSERT(false); // TODO: implement backward -// is_node = true; -// } + struct ggml_tensor * im2col = ggml_im2col(ctx, a, b, s0, 0, p0, 0, d0, 0, false); // [N, OL, IC * K] -// const int64_t ne[4] = { -// ggml_calc_conv_output_size(b->ne[0], a->ne[0], s0, p0, d0), -// a->ne[2], 1, 1, -// }; -// struct ggml_tensor * result = ggml_new_tensor(ctx, GGML_TYPE_F32, 2, ne); + struct ggml_tensor * result = + ggml_mul_mat(ctx, + ggml_reshape_2d(ctx, im2col, im2col->ne[0], (im2col->ne[2] * im2col->ne[1])), // [N, OL, IC * K] => [N*OL, IC * K] + ggml_reshape_2d(ctx, a, (a->ne[0] * a->ne[1]), a->ne[2])); // [OC,IC, K] => [OC, IC * K] -// int32_t params[] = { s0, p0, d0 }; -// ggml_set_op_params(result, params, sizeof(params)); + result = ggml_reshape_3d(ctx, result, im2col->ne[1], a->ne[2], im2col->ne[2]); // [N, OC, OL] -// result->op = GGML_OP_CONV_1D; -// result->grad = is_node ? ggml_dup_tensor(ctx, result) : NULL; -// result->src[0] = a; -// result->src[1] = b; - -// return result; -// } + return result; +} // ggml_conv_1d_ph @@ -5310,7 +5210,7 @@ GGML_API struct ggml_tensor * ggml_conv_transpose_1d( // a: [OC,IC, KH, KW] // b: [N, IC, IH, IW] // result: [N, OH, OW, IC*KH*KW] -static struct ggml_tensor * ggml_conv_2d_stage_0( +struct ggml_tensor * ggml_im2col( struct ggml_context * ctx, struct ggml_tensor * a, struct ggml_tensor * b, @@ -5319,9 +5219,14 @@ static struct ggml_tensor * ggml_conv_2d_stage_0( int p0, int p1, int d0, - int d1) { + int d1, + bool is_2D) { - GGML_ASSERT(a->ne[2] == b->ne[2]); + if(is_2D) { + GGML_ASSERT(a->ne[2] == b->ne[2]); + } else { + GGML_ASSERT(a->ne[1] == b->ne[1]); + } bool is_node = false; if (a->grad || b->grad) { @@ -5329,81 +5234,51 @@ static struct ggml_tensor * ggml_conv_2d_stage_0( is_node = true; } - const int64_t OH = ggml_calc_conv_output_size(b->ne[1], a->ne[1], s1, p1, d1); - const int64_t OW = ggml_calc_conv_output_size(b->ne[0], a->ne[0], s0, p0, d0); + const int64_t OH = is_2D ? ggml_calc_conv_output_size(b->ne[1], a->ne[1], s1, p1, d1) : 0; + const int64_t OW = ggml_calc_conv_output_size(b->ne[0], a->ne[0], s0, p0, d0); const int64_t ne[4] = { - a->ne[2] * a->ne[1] * a->ne[0], + is_2D ? (a->ne[2] * a->ne[1] * a->ne[0]) : a->ne[1] * a->ne[0], OW, - OH, - b->ne[3], + is_2D ? OH : b->ne[2], + is_2D ? b->ne[3] : 1, }; - struct ggml_tensor * result = ggml_new_tensor(ctx, GGML_TYPE_F16, 4, ne); - int32_t params[] = { s0, s1, p0, p1, d0, d1 }; + struct ggml_tensor * result = ggml_new_tensor(ctx, GGML_TYPE_F16, 4, ne); + int32_t params[] = { s0, s1, p0, p1, d0, d1, (is_2D ? 1 : 0) }; ggml_set_op_params(result, params, sizeof(params)); - result->op = GGML_OP_CONV_2D_STAGE_0; - result->grad = is_node ? ggml_dup_tensor(ctx, result) : NULL; - result->src[0] = a; - result->src[1] = b; - - return result; - -} - -// gemm: [N, OC, OH, OW] = [OC, IC * KH * KW] x [N*OH*OW, IC * KH * KW] -// a: [OC, IC, KH, KW] -// b: [N, OH, OW, IC * KH * KW] -// result: [N, OC, OH, OW] -static struct ggml_tensor * ggml_conv_2d_stage_1( - struct ggml_context * ctx, - struct ggml_tensor * a, - struct ggml_tensor * b) { - - bool is_node = false; - - if (a->grad || b->grad) { - GGML_ASSERT(false); // TODO: implement backward - is_node = true; - } - - const int64_t ne[4] = { - b->ne[1], - b->ne[2], - a->ne[3], - b->ne[3], - }; - struct ggml_tensor * result = ggml_new_tensor(ctx, GGML_TYPE_F32, 4, ne); - - result->op = GGML_OP_CONV_2D_STAGE_1; + result->op = GGML_OP_IM2COL; result->grad = is_node ? ggml_dup_tensor(ctx, result) : NULL; result->src[0] = a; result->src[1] = b; return result; - } // a: [OC,IC, KH, KW] // b: [N, IC, IH, IW] // result: [N, OC, OH, OW] struct ggml_tensor * ggml_conv_2d( - struct ggml_context * ctx, - struct ggml_tensor * a, - struct ggml_tensor * b, - int s0, - int s1, - int p0, - int p1, - int d0, - int d1) { + struct ggml_context * ctx, + struct ggml_tensor * a, + struct ggml_tensor * b, + int s0, + int s1, + int p0, + int p1, + int d0, + int d1) { + struct ggml_tensor * im2col = ggml_im2col(ctx, a, b, s0, s1, p0, p1, d0, d1, true); // [N, OH, OW, IC * KH * KW] - struct ggml_tensor * result = ggml_conv_2d_stage_0(ctx, a, b, s0, s1, p0, p1, d0, d1); // [N, OH, OW, IC * KH * KW] - result = ggml_conv_2d_stage_1(ctx, a, result); + struct ggml_tensor * result = + ggml_mul_mat(ctx, + ggml_reshape_2d(ctx, im2col, im2col->ne[0], im2col->ne[3] * im2col->ne[2] * im2col->ne[1]), // [N, OH, OW, IC * KH * KW] => [N*OH*OW, IC * KH * KW] + ggml_reshape_2d(ctx, a, (a->ne[0] * a->ne[1] * a->ne[2]), a->ne[3])); // [OC,IC, KH, KW] => [OC, IC * KH * KW] - return result; + result = ggml_reshape_4d(ctx, result, im2col->ne[1], im2col->ne[2], a->ne[3], im2col->ne[3]); // [N, OC, OH, OW] + return result; } // ggml_conv_2d_sk_p0 @@ -9498,6 +9373,8 @@ static bool ggml_compute_forward_mul_mat_use_blas( // TODO: find the optimal values for these if (ggml_is_contiguous(src0) && ggml_is_contiguous(src1) && + src0->type == GGML_TYPE_F32 && + src1->type == GGML_TYPE_F32 && (ne0 >= 32 && ne1 >= 32 && ne10 >= 32)) { /*printf("BLAS: %d %d %d %d %d\n", ne0, ne1, ne10, ne00, ne01);*/ @@ -9536,7 +9413,7 @@ static void ggml_compute_forward_mul_mat( // we don't support permuted src0 or src1 GGML_ASSERT(nb00 == ggml_type_size(type)); - GGML_ASSERT(nb10 == sizeof(float)); + GGML_ASSERT(nb10 == ggml_type_size(src1->type)); // dst cannot be transposed or permuted GGML_ASSERT(nb0 == sizeof(float)); @@ -11434,9 +11311,9 @@ static void ggml_compute_forward_rope_back( } } -// ggml_compute_forward_conv_1d +// ggml_compute_forward_conv_transpose_1d -static void ggml_compute_forward_conv_1d_f16_f32( +static void ggml_compute_forward_conv_transpose_1d_f16_f32( const struct ggml_compute_params * params, const struct ggml_tensor * src0, const struct ggml_tensor * src1, @@ -11453,14 +11330,7 @@ static void ggml_compute_forward_conv_1d_f16_f32( const int ith = params->ith; const int nth = params->nth; - const int nk = ne00; - - // size of the convolution row - the kernel size unrolled across all input channels - const int ew0 = nk*ne01; - - const int32_t s0 = ((const int32_t*)(dst->op_params))[0]; - const int32_t p0 = ((const int32_t*)(dst->op_params))[1]; - const int32_t d0 = ((const int32_t*)(dst->op_params))[2]; + const int nk = ne00*ne01*ne02; GGML_ASSERT(nb00 == sizeof(ggml_fp16_t)); GGML_ASSERT(nb10 == sizeof(float)); @@ -11468,23 +11338,37 @@ static void ggml_compute_forward_conv_1d_f16_f32( if (params->type == GGML_TASK_INIT) { memset(params->wdata, 0, params->wsize); - ggml_fp16_t * const wdata = (ggml_fp16_t *) params->wdata + 0; + // permute kernel data (src0) from (K x Cout x Cin) to (Cin x K x Cout) + { + ggml_fp16_t * const wdata = (ggml_fp16_t *) params->wdata + 0; - for (int64_t i11 = 0; i11 < ne11; i11++) { - const float * const src = (float *)((char *) src1->data + i11*nb11); - ggml_fp16_t * dst_data = wdata; + for (int64_t i02 = 0; i02 < ne02; i02++) { + for (int64_t i01 = 0; i01 < ne01; i01++) { + const ggml_fp16_t * const src = (ggml_fp16_t *)((char *) src0->data + i02*nb02 + i01*nb01); + ggml_fp16_t * dst_data = wdata + i01*ne00*ne02; + for (int64_t i00 = 0; i00 < ne00; i00++) { + dst_data[i00*ne02 + i02] = src[i00]; + } + } + } + } - for (int64_t i0 = 0; i0 < ne0; i0++) { - for (int64_t ik = 0; ik < nk; ik++) { - const int idx0 = i0*s0 + ik*d0 - p0; + // permute source data (src1) from (L x Cin) to (Cin x L) + { + ggml_fp16_t * const wdata = (ggml_fp16_t *) params->wdata + nk; + ggml_fp16_t * dst_data = wdata; - if(!(idx0 < 0 || idx0 >= ne10)) { - dst_data[i0*ew0 + i11*nk + ik] = GGML_FP32_TO_FP16(src[idx0]); - } + for (int64_t i11 = 0; i11 < ne11; i11++) { + const float * const src = (float *)((char *) src1->data + i11*nb11); + for (int64_t i10 = 0; i10 < ne10; i10++) { + dst_data[i10*ne11 + i11] = GGML_FP32_TO_FP16(src[i10]); } } } + // need to zero dst since we are accumulating into it + memset(dst->data, 0, ggml_nbytes(dst)); + return; } @@ -11492,8 +11376,10 @@ static void ggml_compute_forward_conv_1d_f16_f32( return; } + const int32_t s0 = ((const int32_t*)(dst->op_params))[0]; + // total rows in dst - const int nr = ne2; + const int nr = ne1; // rows per thread const int dr = (nr + nth - 1)/nth; @@ -11502,22 +11388,26 @@ static void ggml_compute_forward_conv_1d_f16_f32( const int ir0 = dr*ith; const int ir1 = MIN(ir0 + dr, nr); - ggml_fp16_t * const wdata = (ggml_fp16_t *) params->wdata + 0; - - for (int i2 = 0; i2 < ne2; i2++) { - for (int i1 = ir0; i1 < ir1; i1++) { - float * dst_data = (float *)((char *) dst->data + i2*nb2 + i1*nb1); + ggml_fp16_t * const wdata = (ggml_fp16_t *) params->wdata + 0; + ggml_fp16_t * const wdata_src = wdata + nk; - for (int i0 = 0; i0 < ne0; i0++) { - ggml_vec_dot_f16(ew0, dst_data + i0, - (ggml_fp16_t *) ((char *) src0->data + i1*nb02), - (ggml_fp16_t *) wdata + i2*nb2 + i0*ew0); + for (int i1 = ir0; i1 < ir1; i1++) { + float * dst_data = (float *)((char *) dst->data + i1*nb1); + ggml_fp16_t * wdata_kernel = wdata + i1*ne02*ne00; + for (int i10 = 0; i10 < ne10; i10++) { + const int i1n = i10*ne11; + for (int i00 = 0; i00 < ne00; i00++) { + float v = 0; + ggml_vec_dot_f16(ne02, &v, + (ggml_fp16_t *) wdata_src + i1n, + (ggml_fp16_t *) wdata_kernel + i00*ne02); + dst_data[i10*s0 + i00] += v; } } } } -static void ggml_compute_forward_conv_1d_f32( +static void ggml_compute_forward_conv_transpose_1d_f32( const struct ggml_compute_params * params, const struct ggml_tensor * src0, const struct ggml_tensor * src1, @@ -11534,13 +11424,7 @@ static void ggml_compute_forward_conv_1d_f32( const int ith = params->ith; const int nth = params->nth; - const int nk = ne00; - - const int ew0 = nk*ne01; - - const int32_t s0 = ((const int32_t*)(dst->op_params))[0]; - const int32_t p0 = ((const int32_t*)(dst->op_params))[1]; - const int32_t d0 = ((const int32_t*)(dst->op_params))[2]; + const int nk = ne00*ne01*ne02; GGML_ASSERT(nb00 == sizeof(float)); GGML_ASSERT(nb10 == sizeof(float)); @@ -11548,23 +11432,37 @@ static void ggml_compute_forward_conv_1d_f32( if (params->type == GGML_TASK_INIT) { memset(params->wdata, 0, params->wsize); - float * const wdata = (float *) params->wdata + 0; + // prepare kernel data (src0) from (K x Cout x Cin) to (Cin x K x Cout) + { + float * const wdata = (float *) params->wdata + 0; - for (int64_t i11 = 0; i11 < ne11; i11++) { - const float * const src = (float *)((char *) src1->data + i11*nb11); - float * dst_data = wdata; + for (int64_t i02 = 0; i02 < ne02; i02++) { + for (int64_t i01 = 0; i01 < ne01; i01++) { + const float * const src = (float *)((char *) src0->data + i02*nb02 + i01*nb01); + float * dst_data = wdata + i01*ne00*ne02; + for (int64_t i00 = 0; i00 < ne00; i00++) { + dst_data[i00*ne02 + i02] = src[i00]; + } + } + } + } - for (int64_t i0 = 0; i0 < ne0; i0++) { - for (int64_t ik = 0; ik < nk; ik++) { - const int idx0 = i0*s0 + ik*d0 - p0; + // prepare source data (src1) + { + float * const wdata = (float *) params->wdata + nk; + float * dst_data = wdata; - if(!(idx0 < 0 || idx0 >= ne10)) { - dst_data[i0*ew0 + i11*nk + ik] = src[idx0]; - } + for (int64_t i11 = 0; i11 < ne11; i11++) { + const float * const src = (float *)((char *) src1->data + i11*nb11); + for (int64_t i10 = 0; i10 < ne10; i10++) { + dst_data[i10*ne11 + i11] = src[i10]; } } } + // need to zero dst since we are accumulating into it + memset(dst->data, 0, ggml_nbytes(dst)); + return; } @@ -11572,8 +11470,10 @@ static void ggml_compute_forward_conv_1d_f32( return; } + const int32_t s0 = ((const int32_t*)(dst->op_params))[0]; + // total rows in dst - const int nr = ne02; + const int nr = ne1; // rows per thread const int dr = (nr + nth - 1)/nth; @@ -11582,94 +11482,50 @@ static void ggml_compute_forward_conv_1d_f32( const int ir0 = dr*ith; const int ir1 = MIN(ir0 + dr, nr); - float * const wdata = (float *) params->wdata + 0; - - for (int i2 = 0; i2 < ne2; i2++) { - for (int i1 = ir0; i1 < ir1; i1++) { - float * dst_data = (float *)((char *) dst->data + i2*nb2 + i1*nb1); + float * const wdata = (float *) params->wdata + 0; + float * const wdata_src = wdata + nk; - for (int i0 = 0; i0 < ne0; i0++) { - ggml_vec_dot_f32(ew0, dst_data + i0, - (float *) ((char *) src0->data + i1*nb02), - (float *) wdata + i2*nb2 + i0*ew0); + for (int i1 = ir0; i1 < ir1; i1++) { + float * dst_data = (float *)((char *) dst->data + i1*nb1); + float * wdata_kernel = wdata + i1*ne02*ne00; + for (int i10 = 0; i10 < ne10; i10++) { + const int i1n = i10*ne11; + for (int i00 = 0; i00 < ne00; i00++) { + float v = 0; + ggml_vec_dot_f32(ne02, &v, + wdata_src + i1n, + wdata_kernel + i00*ne02); + dst_data[i10*s0 + i00] += v; } } } } -// TODO: reuse ggml_mul_mat or implement ggml_im2col and remove stage_0 and stage_1 -static void gemm_f16_out_f32(int64_t m, int64_t n, int64_t k, - ggml_fp16_t * A, - ggml_fp16_t * B, - float * C, - const int ith, const int nth) { - // does not seem to make a difference - int64_t m0, m1, n0, n1; - // patches per thread - if (m > n) { - n0 = 0; - n1 = n; - - // total patches in dst - const int np = m; - - // patches per thread - const int dp = (np + nth - 1)/nth; - - // patch range for this thread - m0 = dp*ith; - m1 = MIN(m0 + dp, np); - } else { - m0 = 0; - m1 = m; - - // total patches in dst - const int np = n; - - // patches per thread - const int dp = (np + nth - 1)/nth; - - // patch range for this thread - n0 = dp*ith; - n1 = MIN(n0 + dp, np); - } - - // block-tiling attempt - int64_t blck_n = 16; - int64_t blck_m = 16; - - // int64_t CACHE_SIZE = 2 * 1024 * 1024; // 2MB - // int64_t blck_size = CACHE_SIZE / (sizeof(float) + 2 * sizeof(ggml_fp16_t) * K); - // if (blck_size > 0) { - // blck_0 = 4; - // blck_1 = blck_size / blck_0; - // if (blck_1 < 0) { - // blck_1 = 1; - // } - // // blck_0 = (int64_t)sqrt(blck_size); - // // blck_1 = blck_0; - // } - // // printf("%zd %zd %zd %zd\n", blck_size, K, blck_0, blck_1); - - for (int j = n0; j < n1; j+=blck_n) { - for (int i = m0; i < m1; i+=blck_m) { - // printf("i j k => %d %d %d\n", i, j, K); - for (int ii = i; ii < i + blck_m && ii < m1; ii++) { - for (int jj = j; jj < j + blck_n && jj < n1; jj++) { - ggml_vec_dot_f16(k, - C + ii*n + jj, - A + ii * k, - B + jj * k); - } - } - } +static void ggml_compute_forward_conv_transpose_1d( + const struct ggml_compute_params * params, + const struct ggml_tensor * src0, + const struct ggml_tensor * src1, + struct ggml_tensor * dst) { + switch (src0->type) { + case GGML_TYPE_F16: + { + ggml_compute_forward_conv_transpose_1d_f16_f32(params, src0, src1, dst); + } break; + case GGML_TYPE_F32: + { + ggml_compute_forward_conv_transpose_1d_f32(params, src0, src1, dst); + } break; + default: + { + GGML_ASSERT(false); + } break; } } -// src0: kernel [OC, IC, K] -// src1: signal [N, IC, IL] -// dst: result [N, OL, IC*K] -static void ggml_compute_forward_conv_1d_stage_0_f32( +// src0: kernel [OC, IC, KH, KW] +// src1: image [N, IC, IH, IW] +// dst: result [N, OH, OW, IC*KH*KW] +static void ggml_compute_forward_im2col_f16( const struct ggml_compute_params * params, const struct ggml_tensor * src0, const struct ggml_tensor * src1, @@ -11683,26 +11539,35 @@ static void ggml_compute_forward_conv_1d_stage_0_f32( GGML_TENSOR_BINARY_OP_LOCALS; - const int64_t N = ne12; - const int64_t IC = ne11; - const int64_t IL = ne10; - - const int64_t K = ne00; - - const int64_t OL = ne1; + const int32_t s0 = ((const int32_t *)(dst->op_params))[0]; + const int32_t s1 = ((const int32_t *)(dst->op_params))[1]; + const int32_t p0 = ((const int32_t *)(dst->op_params))[2]; + const int32_t p1 = ((const int32_t *)(dst->op_params))[3]; + const int32_t d0 = ((const int32_t *)(dst->op_params))[4]; + const int32_t d1 = ((const int32_t *)(dst->op_params))[5]; + const bool is_2D = ((const int32_t *)(dst->op_params))[6] == 1; const int ith = params->ith; const int nth = params->nth; - const int32_t s0 = ((const int32_t*)(dst->op_params))[0]; - const int32_t p0 = ((const int32_t*)(dst->op_params))[1]; - const int32_t d0 = ((const int32_t*)(dst->op_params))[2]; + const int64_t N = is_2D ? ne13 : ne12; + const int64_t IC = is_2D ? ne12 : ne11; + const int64_t IH = is_2D ? ne11 : 1; + const int64_t IW = ne10; + + const int64_t KH = is_2D ? ne01 : 1; + const int64_t KW = ne00; + + const int64_t OH = is_2D ? ne2 : 1; + const int64_t OW = ne1; + + int ofs0 = is_2D ? nb13 : nb12; + int ofs1 = is_2D ? nb12 : nb11; GGML_ASSERT(nb00 == sizeof(ggml_fp16_t)); GGML_ASSERT(nb10 == sizeof(float)); if (params->type == GGML_TASK_INIT) { - memset(dst->data, 0, ggml_nbytes(dst)); return; } @@ -11710,424 +11575,27 @@ static void ggml_compute_forward_conv_1d_stage_0_f32( return; } - // im2col: [N, IC, IL] => [N, OL, IC*K] + // im2col: [N, IC, IH, IW] => [N, OH, OW, IC*KH*KW] { ggml_fp16_t * const wdata = (ggml_fp16_t *) dst->data; for (int64_t in = 0; in < N; in++) { - for (int64_t iol = 0; iol < OL; iol++) { - for (int64_t iic = ith; iic < IC; iic+=nth) { - - // micro kernel - ggml_fp16_t * dst_data = wdata + (in*OL + iol)*(IC*K); // [IC, K] - const float * const src_data = (float *)((char *) src1->data + in*nb12 + iic*nb11); // [IL] - - for (int64_t ik = 0; ik < K; ik++) { - const int64_t iil = iol*s0 + ik*d0 - p0; - - if (!(iil < 0 || iil >= IL)) { - dst_data[iic*K + ik] = GGML_FP32_TO_FP16(src_data[iil]); - } - } - } - } - } - } -} - -// gemm: [N, OC, OL] = [OC, IC * K] x [N*OL, IC * K] -// src0: [OC, IC, K] -// src1: [N, OL, IC * K] -// result: [N, OC, OL] -static void ggml_compute_forward_conv_1d_stage_1_f16( - const struct ggml_compute_params * params, - const struct ggml_tensor * src0, - const struct ggml_tensor * src1, - struct ggml_tensor * dst) { - GGML_ASSERT(src0->type == GGML_TYPE_F16); - GGML_ASSERT(src1->type == GGML_TYPE_F16); - GGML_ASSERT( dst->type == GGML_TYPE_F32); - - int64_t t0 = ggml_perf_time_us(); - UNUSED(t0); - - if (params->type == GGML_TASK_INIT) { - return; - } - - if (params->type == GGML_TASK_FINALIZE) { - return; - } - - GGML_TENSOR_BINARY_OP_LOCALS; - - GGML_ASSERT(nb00 == sizeof(ggml_fp16_t)); - GGML_ASSERT(nb10 == sizeof(ggml_fp16_t)); - GGML_ASSERT(nb0 == sizeof(float)); - - const int N = ne12; - const int OL = ne11; - - const int OC = ne02; - const int IC = ne01; - const int K = ne00; - - const int ith = params->ith; - const int nth = params->nth; - - int64_t m = OC; - int64_t n = OL; - int64_t k = IC * K; - - // [N, OC, OL] = [OC, IC * K] x [N*OL, IC * K] - for (int i = 0; i < N; i++) { - ggml_fp16_t * A = (ggml_fp16_t *)src0->data; // [m, k] - ggml_fp16_t * B = (ggml_fp16_t *)src1->data + i * m * k; // [n, k] - float * C = (float *)dst->data + i * m * n; // [m, n] - - gemm_f16_out_f32(m, n, k, A, B, C, ith, nth); - } -} - -static void ggml_compute_forward_conv_1d( - const struct ggml_compute_params * params, - const struct ggml_tensor * src0, - const struct ggml_tensor * src1, - struct ggml_tensor * dst) { - switch(src0->type) { - case GGML_TYPE_F16: - { - ggml_compute_forward_conv_1d_f16_f32(params, src0, src1, dst); - } break; - case GGML_TYPE_F32: - { - ggml_compute_forward_conv_1d_f32(params, src0, src1, dst); - } break; - default: - { - GGML_ASSERT(false); - } break; - } -} - -static void ggml_compute_forward_conv_1d_stage_0( - const struct ggml_compute_params * params, - const struct ggml_tensor * src0, - const struct ggml_tensor * src1, - struct ggml_tensor * dst) { - switch(src0->type) { - case GGML_TYPE_F16: - { - ggml_compute_forward_conv_1d_stage_0_f32(params, src0, src1, dst); - } break; - default: - { - GGML_ASSERT(false); - } break; - } -} - -static void ggml_compute_forward_conv_1d_stage_1( - const struct ggml_compute_params * params, - const struct ggml_tensor * src0, - const struct ggml_tensor * src1, - struct ggml_tensor * dst) { - switch(src0->type) { - case GGML_TYPE_F16: - { - ggml_compute_forward_conv_1d_stage_1_f16(params, src0, src1, dst); - } break; - default: - { - GGML_ASSERT(false); - } break; - } -} - -// ggml_compute_forward_conv_transpose_1d - -static void ggml_compute_forward_conv_transpose_1d_f16_f32( - const struct ggml_compute_params * params, - const struct ggml_tensor * src0, - const struct ggml_tensor * src1, - struct ggml_tensor * dst) { - GGML_ASSERT(src0->type == GGML_TYPE_F16); - GGML_ASSERT(src1->type == GGML_TYPE_F32); - GGML_ASSERT( dst->type == GGML_TYPE_F32); - - int64_t t0 = ggml_perf_time_us(); - UNUSED(t0); - - GGML_TENSOR_BINARY_OP_LOCALS - - const int ith = params->ith; - const int nth = params->nth; - - const int nk = ne00*ne01*ne02; - - GGML_ASSERT(nb00 == sizeof(ggml_fp16_t)); - GGML_ASSERT(nb10 == sizeof(float)); - - if (params->type == GGML_TASK_INIT) { - memset(params->wdata, 0, params->wsize); - - // permute kernel data (src0) from (K x Cout x Cin) to (Cin x K x Cout) - { - ggml_fp16_t * const wdata = (ggml_fp16_t *) params->wdata + 0; - - for (int64_t i02 = 0; i02 < ne02; i02++) { - for (int64_t i01 = 0; i01 < ne01; i01++) { - const ggml_fp16_t * const src = (ggml_fp16_t *)((char *) src0->data + i02*nb02 + i01*nb01); - ggml_fp16_t * dst_data = wdata + i01*ne00*ne02; - for (int64_t i00 = 0; i00 < ne00; i00++) { - dst_data[i00*ne02 + i02] = src[i00]; - } - } - } - } - - // permute source data (src1) from (L x Cin) to (Cin x L) - { - ggml_fp16_t * const wdata = (ggml_fp16_t *) params->wdata + nk; - ggml_fp16_t * dst_data = wdata; - - for (int64_t i11 = 0; i11 < ne11; i11++) { - const float * const src = (float *)((char *) src1->data + i11*nb11); - for (int64_t i10 = 0; i10 < ne10; i10++) { - dst_data[i10*ne11 + i11] = GGML_FP32_TO_FP16(src[i10]); - } - } - } - - // need to zero dst since we are accumulating into it - memset(dst->data, 0, ggml_nbytes(dst)); - - return; - } - - if (params->type == GGML_TASK_FINALIZE) { - return; - } - - const int32_t s0 = ((const int32_t*)(dst->op_params))[0]; - - // total rows in dst - const int nr = ne1; - - // rows per thread - const int dr = (nr + nth - 1)/nth; - - // row range for this thread - const int ir0 = dr*ith; - const int ir1 = MIN(ir0 + dr, nr); - - ggml_fp16_t * const wdata = (ggml_fp16_t *) params->wdata + 0; - ggml_fp16_t * const wdata_src = wdata + nk; - - for (int i1 = ir0; i1 < ir1; i1++) { - float * dst_data = (float *)((char *) dst->data + i1*nb1); - ggml_fp16_t * wdata_kernel = wdata + i1*ne02*ne00; - for (int i10 = 0; i10 < ne10; i10++) { - const int i1n = i10*ne11; - for (int i00 = 0; i00 < ne00; i00++) { - float v = 0; - ggml_vec_dot_f16(ne02, &v, - (ggml_fp16_t *) wdata_src + i1n, - (ggml_fp16_t *) wdata_kernel + i00*ne02); - dst_data[i10*s0 + i00] += v; - } - } - } -} - -static void ggml_compute_forward_conv_transpose_1d_f32( - const struct ggml_compute_params * params, - const struct ggml_tensor * src0, - const struct ggml_tensor * src1, - struct ggml_tensor * dst) { - GGML_ASSERT(src0->type == GGML_TYPE_F32); - GGML_ASSERT(src1->type == GGML_TYPE_F32); - GGML_ASSERT( dst->type == GGML_TYPE_F32); - - int64_t t0 = ggml_perf_time_us(); - UNUSED(t0); - - GGML_TENSOR_BINARY_OP_LOCALS - - const int ith = params->ith; - const int nth = params->nth; - - const int nk = ne00*ne01*ne02; - - GGML_ASSERT(nb00 == sizeof(float)); - GGML_ASSERT(nb10 == sizeof(float)); - - if (params->type == GGML_TASK_INIT) { - memset(params->wdata, 0, params->wsize); - - // prepare kernel data (src0) from (K x Cout x Cin) to (Cin x K x Cout) - { - float * const wdata = (float *) params->wdata + 0; - - for (int64_t i02 = 0; i02 < ne02; i02++) { - for (int64_t i01 = 0; i01 < ne01; i01++) { - const float * const src = (float *)((char *) src0->data + i02*nb02 + i01*nb01); - float * dst_data = wdata + i01*ne00*ne02; - for (int64_t i00 = 0; i00 < ne00; i00++) { - dst_data[i00*ne02 + i02] = src[i00]; - } - } - } - } - - // prepare source data (src1) - { - float * const wdata = (float *) params->wdata + nk; - float * dst_data = wdata; - - for (int64_t i11 = 0; i11 < ne11; i11++) { - const float * const src = (float *)((char *) src1->data + i11*nb11); - for (int64_t i10 = 0; i10 < ne10; i10++) { - dst_data[i10*ne11 + i11] = src[i10]; - } - } - } - - // need to zero dst since we are accumulating into it - memset(dst->data, 0, ggml_nbytes(dst)); - - return; - } - - if (params->type == GGML_TASK_FINALIZE) { - return; - } - - const int32_t s0 = ((const int32_t*)(dst->op_params))[0]; - - // total rows in dst - const int nr = ne1; - - // rows per thread - const int dr = (nr + nth - 1)/nth; - - // row range for this thread - const int ir0 = dr*ith; - const int ir1 = MIN(ir0 + dr, nr); - - float * const wdata = (float *) params->wdata + 0; - float * const wdata_src = wdata + nk; - - for (int i1 = ir0; i1 < ir1; i1++) { - float * dst_data = (float *)((char *) dst->data + i1*nb1); - float * wdata_kernel = wdata + i1*ne02*ne00; - for (int i10 = 0; i10 < ne10; i10++) { - const int i1n = i10*ne11; - for (int i00 = 0; i00 < ne00; i00++) { - float v = 0; - ggml_vec_dot_f32(ne02, &v, - wdata_src + i1n, - wdata_kernel + i00*ne02); - dst_data[i10*s0 + i00] += v; - } - } - } -} - -static void ggml_compute_forward_conv_transpose_1d( - const struct ggml_compute_params * params, - const struct ggml_tensor * src0, - const struct ggml_tensor * src1, - struct ggml_tensor * dst) { - switch (src0->type) { - case GGML_TYPE_F16: - { - ggml_compute_forward_conv_transpose_1d_f16_f32(params, src0, src1, dst); - } break; - case GGML_TYPE_F32: - { - ggml_compute_forward_conv_transpose_1d_f32(params, src0, src1, dst); - } break; - default: - { - GGML_ASSERT(false); - } break; - } -} - -// ggml_compute_forward_conv_2d - -// src0: kernel [OC, IC, KH, KW] -// src1: image [N, IC, IH, IW] -// dst: result [N, OH, OW, IC*KH*KW] -static void ggml_compute_forward_conv_2d_stage_0_f32( - const struct ggml_compute_params * params, - const struct ggml_tensor * src0, - const struct ggml_tensor * src1, - struct ggml_tensor * dst) { - GGML_ASSERT(src0->type == GGML_TYPE_F16); - GGML_ASSERT(src1->type == GGML_TYPE_F32); - GGML_ASSERT( dst->type == GGML_TYPE_F16); - - int64_t t0 = ggml_perf_time_us(); - UNUSED(t0); - - GGML_TENSOR_BINARY_OP_LOCALS; - - const int64_t N = ne13; - const int64_t IC = ne12; - const int64_t IH = ne11; - const int64_t IW = ne10; - - // const int64_t OC = ne03; - // const int64_t IC = ne02; - const int64_t KH = ne01; - const int64_t KW = ne00; - - const int64_t OH = ne2; - const int64_t OW = ne1; - - const int ith = params->ith; - const int nth = params->nth; - - const int32_t s0 = ((const int32_t*)(dst->op_params))[0]; - const int32_t s1 = ((const int32_t*)(dst->op_params))[1]; - const int32_t p0 = ((const int32_t*)(dst->op_params))[2]; - const int32_t p1 = ((const int32_t*)(dst->op_params))[3]; - const int32_t d0 = ((const int32_t*)(dst->op_params))[4]; - const int32_t d1 = ((const int32_t*)(dst->op_params))[5]; - - GGML_ASSERT(nb00 == sizeof(ggml_fp16_t)); - GGML_ASSERT(nb10 == sizeof(float)); - - if (params->type == GGML_TASK_INIT) { - memset(dst->data, 0, ggml_nbytes(dst)); - return; - } - - if (params->type == GGML_TASK_FINALIZE) { - return; - } - - // im2col: [N, IC, IH, IW] => [N, OH, OW, IC*KH*KW] - { - ggml_fp16_t * const wdata = (ggml_fp16_t *) dst->data; - - for (int64_t in = 0; in < N; in++) { - for (int64_t ioh = 0; ioh < OH; ioh++) { - for (int64_t iow = 0; iow < OW; iow++) { - for (int64_t iic = ith; iic < IC; iic+=nth) { + for (int64_t ioh = 0; ioh < OH; ioh++) { // 1 + for (int64_t iow = 0; iow < OW; iow++) { + for (int64_t iic = ith; iic < IC; iic += nth) { // micro kernel ggml_fp16_t * dst_data = wdata + (in*OH*OW + ioh*OW + iow)*(IC*KH*KW); // [IC, KH, KW] - const float * const src_data = (float *)((char *) src1->data + in*nb13 + iic*nb12); // [IH, IW] + const float * const src_data = (float *)((char *) src1->data + in*ofs0 + iic*ofs1); // [IH, IW] - for (int64_t ikh = 0; ikh < KH; ikh++) { + for (int64_t ikh = 0; ikh < KH; ikh++) { // 1 for (int64_t ikw = 0; ikw < KW; ikw++) { const int64_t iiw = iow*s0 + ikw*d0 - p0; const int64_t iih = ioh*s1 + ikh*d1 - p1; - if (!(iih < 0 || iih >= IH || iiw < 0 || iiw >= IW)) { + if (iih < 0 || iih >= IH || iiw < 0 || iiw >= IW) { + dst_data[iic*(KH*KW) + ikh*KW + ikw] = 0; + } else { dst_data[iic*(KH*KW) + ikh*KW + ikw] = GGML_FP32_TO_FP16(src_data[iih*IW + iiw]); } } @@ -12139,223 +11607,7 @@ static void ggml_compute_forward_conv_2d_stage_0_f32( } } -// gemm: [N, OC, OH, OW] = [OC, IC * KH * KW] x [N*OH*OW, IC * KH * KW] -// src0: [OC, IC, KH, KW] -// src1: [N, OH, OW, IC * KH * KW] -// result: [N, OC, OH, OW] -static void ggml_compute_forward_conv_2d_stage_1_f16( - const struct ggml_compute_params * params, - const struct ggml_tensor * src0, - const struct ggml_tensor * src1, - struct ggml_tensor * dst) { - GGML_ASSERT(src0->type == GGML_TYPE_F16); - GGML_ASSERT(src1->type == GGML_TYPE_F16); - GGML_ASSERT( dst->type == GGML_TYPE_F32); - - int64_t t0 = ggml_perf_time_us(); - UNUSED(t0); - - if (params->type == GGML_TASK_INIT) { - return; - } - - if (params->type == GGML_TASK_FINALIZE) { - return; - } - - GGML_TENSOR_BINARY_OP_LOCALS; - - GGML_ASSERT(nb00 == sizeof(ggml_fp16_t)); - GGML_ASSERT(nb10 == sizeof(ggml_fp16_t)); - GGML_ASSERT(nb0 == sizeof(float)); - - const int N = ne13; - const int OH = ne12; - const int OW = ne11; - - const int OC = ne03; - const int IC = ne02; - const int KH = ne01; - const int KW = ne00; - - const int ith = params->ith; - const int nth = params->nth; - - int64_t m = OC; - int64_t n = OH * OW; - int64_t k = IC * KH * KW; - - // [N, OC, OH, OW] = [OC, IC * KH * KW] x [N*OH*OW, IC * KH * KW] - for (int i = 0; i < N; i++) { - ggml_fp16_t * A = (ggml_fp16_t *)src0->data; // [m, k] - ggml_fp16_t * B = (ggml_fp16_t *)src1->data + i * m * k; // [n, k] - float * C = (float *)dst->data + i * m * n; // [m, n] - - gemm_f16_out_f32(m, n, k, A, B, C, ith, nth); - } -} - -static void ggml_compute_forward_conv_2d_f16_f32( - const struct ggml_compute_params * params, - const struct ggml_tensor * src0, - const struct ggml_tensor * src1, - struct ggml_tensor * dst) { - GGML_ASSERT(src0->type == GGML_TYPE_F16); - GGML_ASSERT(src1->type == GGML_TYPE_F32); - GGML_ASSERT( dst->type == GGML_TYPE_F32); - - int64_t t0 = ggml_perf_time_us(); - UNUSED(t0); - - GGML_TENSOR_BINARY_OP_LOCALS - - // src1: image [N, IC, IH, IW] - // src0: kernel [OC, IC, KH, KW] - // dst: result [N, OC, OH, OW] - // ne12: IC - // ne0: OW - // ne1: OH - // nk0: KW - // nk1: KH - // ne13: N - - const int N = ne13; - const int IC = ne12; - const int IH = ne11; - const int IW = ne10; - - const int OC = ne03; - // const int IC = ne02; - const int KH = ne01; - const int KW = ne00; - - const int OH = ne1; - const int OW = ne0; - - const int ith = params->ith; - const int nth = params->nth; - - // const int nk0 = ne00; - // const int nk1 = ne01; - - // size of the convolution row - the kernel size unrolled across all channels - // const int ew0 = nk0*nk1*ne02; - // ew0: IC*KH*KW - - const int32_t s0 = ((const int32_t*)(dst->op_params))[0]; - const int32_t s1 = ((const int32_t*)(dst->op_params))[1]; - const int32_t p0 = ((const int32_t*)(dst->op_params))[2]; - const int32_t p1 = ((const int32_t*)(dst->op_params))[3]; - const int32_t d0 = ((const int32_t*)(dst->op_params))[4]; - const int32_t d1 = ((const int32_t*)(dst->op_params))[5]; - - GGML_ASSERT(nb00 == sizeof(ggml_fp16_t)); - GGML_ASSERT(nb10 == sizeof(float)); - - if (params->type == GGML_TASK_INIT) { - memset(params->wdata, 0, params->wsize); - - // prepare source data (src1) - // im2col: [N, IC, IH, IW] => [N*OH*OW, IC*KH*KW] - - { - ggml_fp16_t * const wdata = (ggml_fp16_t *) params->wdata + 0; - - for (int in = 0; in < N; in++) { - for (int iic = 0; iic < IC; iic++) { - for (int ioh = 0; ioh < OH; ioh++) { - for (int iow = 0; iow < OW; iow++) { - - // micro kernel - ggml_fp16_t * dst_data = wdata + (in*OH*OW + ioh*OW + iow)*(IC*KH*KW); // [IC, KH, KW] - const float * const src_data = (float *)((char *) src1->data + in*nb13 + iic*nb12); // [IH, IW] - - for (int ikh = 0; ikh < KH; ikh++) { - for (int ikw = 0; ikw < KW; ikw++) { - const int iiw = iow*s0 + ikw*d0 - p0; - const int iih = ioh*s1 + ikh*d1 - p1; - - if (!(iih < 0 || iih >= IH || iiw < 0 || iiw >= IW)) { - dst_data[iic*(KH*KW) + ikh*KW + ikw] = GGML_FP32_TO_FP16(src_data[iih*IW + iiw]); - } - } - } - } - } - } - } - } - - return; - } - - if (params->type == GGML_TASK_FINALIZE) { - return; - } - - ggml_fp16_t * const wdata = (ggml_fp16_t *) params->wdata + 0; - // wdata: [N*OH*OW, IC*KH*KW] - // dst: result [N, OC, OH, OW] - // src0: kernel [OC, IC, KH, KW] - - int64_t m = OC; - int64_t n = OH * OW; - int64_t k = IC * KH * KW; - - // [N, OC, OH, OW] = [OC, IC * KH * KW] x [N*OH*OW, IC * KH * KW] - for (int i = 0; i < N; i++) { - ggml_fp16_t * A = (ggml_fp16_t *)src0->data; // [m, k] - ggml_fp16_t * B = (ggml_fp16_t *)wdata + i * m * k; // [n, k] - float * C = (float *)dst->data + i * m * n; // [m * k] - - gemm_f16_out_f32(m, n, k, A, B, C, ith, nth); - } -} - -static void ggml_compute_forward_conv_2d( - const struct ggml_compute_params * params, - const struct ggml_tensor * src0, - const struct ggml_tensor * src1, - struct ggml_tensor * dst) { - switch (src0->type) { - case GGML_TYPE_F16: - { - ggml_compute_forward_conv_2d_f16_f32(params, src0, src1, dst); - } break; - case GGML_TYPE_F32: - { - //ggml_compute_forward_conv_2d_f32(params, src0, src1, dst); - GGML_ASSERT(false); - } break; - default: - { - GGML_ASSERT(false); - } break; - } -} - -static void ggml_compute_forward_conv_2d_stage_0( - const struct ggml_compute_params * params, - const struct ggml_tensor * src0, - const struct ggml_tensor * src1, - struct ggml_tensor * dst) { - switch (src0->type) { - case GGML_TYPE_F16: - { - ggml_compute_forward_conv_2d_stage_0_f32(params, src0, src1, dst); - } break; - case GGML_TYPE_F32: - { - GGML_ASSERT(false); - } break; - default: - { - GGML_ASSERT(false); - } break; - } -} - -static void ggml_compute_forward_conv_2d_stage_1( +static void ggml_compute_forward_im2col( const struct ggml_compute_params * params, const struct ggml_tensor * src0, const struct ggml_tensor * src1, @@ -12363,7 +11615,7 @@ static void ggml_compute_forward_conv_2d_stage_1( switch (src0->type) { case GGML_TYPE_F16: { - ggml_compute_forward_conv_2d_stage_1_f16(params, src0, src1, dst); + ggml_compute_forward_im2col_f16(params, src0, src1, dst); } break; case GGML_TYPE_F32: { @@ -14580,33 +13832,13 @@ static void ggml_compute_forward(struct ggml_compute_params * params, struct ggm { ggml_compute_forward_clamp(params, tensor->src[0], tensor); } break; - case GGML_OP_CONV_1D: - { - ggml_compute_forward_conv_1d(params, tensor->src[0], tensor->src[1], tensor); - } break; - case GGML_OP_CONV_1D_STAGE_0: - { - ggml_compute_forward_conv_1d_stage_0(params, tensor->src[0], tensor->src[1], tensor); - } break; - case GGML_OP_CONV_1D_STAGE_1: - { - ggml_compute_forward_conv_1d_stage_1(params, tensor->src[0], tensor->src[1], tensor); - } break; case GGML_OP_CONV_TRANSPOSE_1D: { ggml_compute_forward_conv_transpose_1d(params, tensor->src[0], tensor->src[1], tensor); } break; - case GGML_OP_CONV_2D: - { - ggml_compute_forward_conv_2d(params, tensor->src[0], tensor->src[1], tensor); - } break; - case GGML_OP_CONV_2D_STAGE_0: - { - ggml_compute_forward_conv_2d_stage_0(params, tensor->src[0], tensor->src[1], tensor); - } break; - case GGML_OP_CONV_2D_STAGE_1: + case GGML_OP_IM2COL: { - ggml_compute_forward_conv_2d_stage_1(params, tensor->src[0], tensor->src[1], tensor); + ggml_compute_forward_im2col(params, tensor->src[0], tensor->src[1], tensor); } break; case GGML_OP_CONV_TRANSPOSE_2D: { @@ -15588,31 +14820,11 @@ static void ggml_compute_backward(struct ggml_context * ctx, struct ggml_tensor { GGML_ASSERT(false); // TODO: not implemented } break; - case GGML_OP_CONV_1D: - { - GGML_ASSERT(false); // TODO: not implemented - } break; - case GGML_OP_CONV_1D_STAGE_0: - { - GGML_ASSERT(false); // TODO: not implemented - } break; - case GGML_OP_CONV_1D_STAGE_1: - { - GGML_ASSERT(false); // TODO: not implemented - } break; case GGML_OP_CONV_TRANSPOSE_1D: { GGML_ASSERT(false); // TODO: not implemented } break; - case GGML_OP_CONV_2D: - { - GGML_ASSERT(false); // TODO: not implemented - } break; - case GGML_OP_CONV_2D_STAGE_0: - { - GGML_ASSERT(false); // TODO: not implemented - } break; - case GGML_OP_CONV_2D_STAGE_1: + case GGML_OP_IM2COL: { GGML_ASSERT(false); // TODO: not implemented } break; @@ -16341,31 +15553,11 @@ static int ggml_get_n_tasks(struct ggml_tensor * node, int n_threads) { { n_tasks = 1; //TODO } break; - case GGML_OP_CONV_1D: - { - n_tasks = n_threads; - } break; - case GGML_OP_CONV_1D_STAGE_0: - { - n_tasks = n_threads; - } break; - case GGML_OP_CONV_1D_STAGE_1: - { - n_tasks = n_threads; - } break; case GGML_OP_CONV_TRANSPOSE_1D: { n_tasks = n_threads; } break; - case GGML_OP_CONV_2D: - { - n_tasks = n_threads; - } break; - case GGML_OP_CONV_2D_STAGE_0: - { - n_tasks = n_threads; - } break; - case GGML_OP_CONV_2D_STAGE_1: + case GGML_OP_IM2COL: { n_tasks = n_threads; } break; @@ -16450,6 +15642,7 @@ static int ggml_get_n_tasks(struct ggml_tensor * node, int n_threads) { } break; default: { + printf("%s: op %s not implemented\n", __func__, ggml_op_name(node->op)); GGML_ASSERT(false); } break; } @@ -16652,38 +15845,6 @@ struct ggml_cplan ggml_graph_plan(struct ggml_cgraph * cgraph, int n_threads) { cur = ggml_type_size(GGML_TYPE_F32) * node->src[0]->ne[0] * n_tasks; } } break; - case GGML_OP_CONV_1D: - { - GGML_ASSERT(node->src[0]->ne[3] == 1); - GGML_ASSERT(node->src[1]->ne[2] == 1); - GGML_ASSERT(node->src[1]->ne[3] == 1); - - const int64_t ne00 = node->src[0]->ne[0]; - const int64_t ne01 = node->src[0]->ne[1]; - const int64_t ne02 = node->src[0]->ne[2]; - - const int64_t ne10 = node->src[1]->ne[0]; - const int64_t ne11 = node->src[1]->ne[1]; - - const int64_t ne0 = node->ne[0]; - const int64_t ne1 = node->ne[1]; - const int64_t nk = ne00; - const int64_t ew0 = nk * ne01; - - UNUSED(ne02); - UNUSED(ne10); - UNUSED(ne11); - - if (node->src[0]->type == GGML_TYPE_F16 && - node->src[1]->type == GGML_TYPE_F32) { - cur = sizeof(ggml_fp16_t)*(ne0*ne1*ew0); - } else if (node->src[0]->type == GGML_TYPE_F32 && - node->src[1]->type == GGML_TYPE_F32) { - cur = sizeof(float)*(ne0*ne1*ew0); - } else { - GGML_ASSERT(false); - } - } break; case GGML_OP_CONV_TRANSPOSE_1D: { GGML_ASSERT(node->src[0]->ne[3] == 1); @@ -16709,37 +15870,9 @@ struct ggml_cplan ggml_graph_plan(struct ggml_cgraph * cgraph, int n_threads) { GGML_ASSERT(false); } } break; - case GGML_OP_CONV_2D: + case GGML_OP_IM2COL: { - const int64_t ne00 = node->src[0]->ne[0]; // W - const int64_t ne01 = node->src[0]->ne[1]; // H - const int64_t ne02 = node->src[0]->ne[2]; // C - const int64_t ne03 = node->src[0]->ne[3]; // N - - const int64_t ne10 = node->src[1]->ne[0]; // W - const int64_t ne11 = node->src[1]->ne[1]; // H - const int64_t ne12 = node->src[1]->ne[2]; // C - - const int64_t ne0 = node->ne[0]; - const int64_t ne1 = node->ne[1]; - const int64_t ne2 = node->ne[2]; - const int64_t ne3 = node->ne[3]; - const int64_t nk = ne00*ne01; - const int64_t ew0 = nk * ne02; - - UNUSED(ne03); - UNUSED(ne2); - - if (node->src[0]->type == GGML_TYPE_F16 && - node->src[1]->type == GGML_TYPE_F32) { - // im2col: [N*OH*OW, IC*KH*KW] - cur = sizeof(ggml_fp16_t)*(ne3*ne0*ne1*ew0); - } else if (node->src[0]->type == GGML_TYPE_F32 && - node->src[1]->type == GGML_TYPE_F32) { - cur = sizeof(float)* (ne10*ne11*ne12); - } else { - GGML_ASSERT(false); - } + n_tasks = n_threads; } break; case GGML_OP_CONV_TRANSPOSE_2D: { diff --git a/ggml.h b/ggml.h index 0118c99dbafddd..8e6b646066b7a4 100644 --- a/ggml.h +++ b/ggml.h @@ -403,13 +403,8 @@ extern "C" { GGML_OP_ROPE_BACK, GGML_OP_ALIBI, GGML_OP_CLAMP, - GGML_OP_CONV_1D, - GGML_OP_CONV_1D_STAGE_0, // internal - GGML_OP_CONV_1D_STAGE_1, // internal GGML_OP_CONV_TRANSPOSE_1D, - GGML_OP_CONV_2D, - GGML_OP_CONV_2D_STAGE_0, // internal - GGML_OP_CONV_2D_STAGE_1, // internal + GGML_OP_IM2COL, GGML_OP_CONV_TRANSPOSE_2D, GGML_OP_POOL_1D, GGML_OP_POOL_2D, @@ -1403,6 +1398,18 @@ extern "C" { float min, float max); + GGML_API struct ggml_tensor * ggml_im2col( + struct ggml_context * ctx, + struct ggml_tensor * a, + struct ggml_tensor * b, + int s0, + int s1, + int p0, + int p1, + int d0, + int d1, + bool is_2D); + GGML_API struct ggml_tensor * ggml_conv_1d( struct ggml_context * ctx, struct ggml_tensor * a, From bd90eca237b498dd106d315dcb9ad3e6fae3906f Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?M=2E=20Yusuf=20Sar=C4=B1g=C3=B6z?= Date: Mon, 13 Nov 2023 18:20:52 +0300 Subject: [PATCH 091/206] llava : fix regression for square images in #3613 (#4056) --- examples/llava/clip.cpp | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/examples/llava/clip.cpp b/examples/llava/clip.cpp index c26ee4957090c8..fc0656c231a0cf 100644 --- a/examples/llava/clip.cpp +++ b/examples/llava/clip.cpp @@ -761,7 +761,7 @@ bool clip_image_preprocess(const clip_ctx * ctx, const clip_image_u8 * img, clip temp->ny = img->ny; temp->size = img->size; temp->data = new uint8_t[temp->size](); - *temp->data = *img->data; // copy + memcpy(&temp->data[0], &img->data[0], temp->size); // copy } const int nx = temp->nx; From b46d12f86d56bef3dc8b596dfb3d22f3b08102be Mon Sep 17 00:00:00 2001 From: afrideva <95653597+afrideva@users.noreply.github.com> Date: Mon, 13 Nov 2023 17:03:40 -0800 Subject: [PATCH 092/206] convert.py: also look for plain model.safetensors (#4043) * add safetensors to convert.py help message * Check for single-file safetensors model * Update convert.py "model" option help message * revert convert.py help message change --- convert.py | 5 +++-- 1 file changed, 3 insertions(+), 2 deletions(-) diff --git a/convert.py b/convert.py index a4b87e08849bcc..3d6216f1d4e7ab 100755 --- a/convert.py +++ b/convert.py @@ -1036,7 +1036,8 @@ def load_some_model(path: Path) -> ModelPlus: # Be extra-friendly and accept either a file or a directory: if path.is_dir(): # Check if it's a set of safetensors files first - files = list(path.glob("model-00001-of-*.safetensors")) + globs = ["model-00001-of-*.safetensors", "model.safetensors"] + files = [file for glob in globs for file in path.glob(glob)] if not files: # Try the PyTorch patterns too, with lower priority globs = ["consolidated.00.pth", "pytorch_model-00001-of-*.bin", "*.pt", "pytorch_model.bin"] @@ -1123,7 +1124,7 @@ def main(args_in: list[str] | None = None) -> None: parser.add_argument("--outtype", choices=output_choices, help="output format - note: q8_0 may be very slow (default: f16 or f32 based on input)") parser.add_argument("--vocab-dir", type=Path, help="directory containing tokenizer.model, if separate from model file") parser.add_argument("--outfile", type=Path, help="path to write to; default: based on input") - parser.add_argument("model", type=Path, help="directory containing model file, or model file itself (*.pth, *.pt, *.bin)") + parser.add_argument("model", type=Path, help="directory containing model file, or model file itself (*.pth, *.pt, *.bin, *.safetensors)") parser.add_argument("--vocabtype", choices=["spm", "bpe"], help="vocab format (default: spm)", default="spm") parser.add_argument("--ctx", type=int, help="model training context (default: based on input)") parser.add_argument("--concurrency", type=int, help=f"concurrency used for conversion (default: {DEFAULT_CONCURRENCY})", default = DEFAULT_CONCURRENCY) From 36eed0c42c5b0bf74af81fb9243d262014f9382f Mon Sep 17 00:00:00 2001 From: Galunid Date: Tue, 14 Nov 2023 11:17:12 +0100 Subject: [PATCH 093/206] stablelm : StableLM support (#3586) * Add support for stablelm-3b-4e1t * Supports GPU offloading of (n-1) layers --- README.md | 1 + convert-hf-to-gguf.py | 30 ++- gguf-py/gguf/constants.py | 17 ++ llama.cpp | 284 +++++++++++++++++++++++- models/ggml-vocab-stablelm-3b-4e1t.gguf | Bin 0 -> 1768581 bytes tests/CMakeLists.txt | 2 + 6 files changed, 322 insertions(+), 12 deletions(-) create mode 100644 models/ggml-vocab-stablelm-3b-4e1t.gguf diff --git a/README.md b/README.md index c7d23277845bc3..4de06476569f92 100644 --- a/README.md +++ b/README.md @@ -93,6 +93,7 @@ as the main playground for developing new features for the [ggml](https://github - [X] [Persimmon 8B](https://github.com/ggerganov/llama.cpp/pull/3410) - [X] [MPT](https://github.com/ggerganov/llama.cpp/pull/3417) - [X] [Bloom](https://github.com/ggerganov/llama.cpp/pull/3553) +- [X] [StableLM-3b-4e1t](https://github.com/ggerganov/llama.cpp/pull/3586) **Bindings:** diff --git a/convert-hf-to-gguf.py b/convert-hf-to-gguf.py index f7fe29fd4262ac..e7db7591260af8 100755 --- a/convert-hf-to-gguf.py +++ b/convert-hf-to-gguf.py @@ -150,8 +150,6 @@ def load_hparams(dir_model): @staticmethod def from_model_architecture(model_architecture): - if model_architecture == "StableLMEpochForCausalLM": - return StableLMModel if model_architecture == "GPTNeoXForCausalLM": return GPTNeoXModel if model_architecture == "BloomForCausalLM": @@ -168,6 +166,8 @@ def from_model_architecture(model_architecture): return RefactModel if model_architecture == "PersimmonForCausalLM": return PersimmonModel + if model_architecture in ("StableLMEpochForCausalLM", "LlavaStableLMEpochForCausalLM"): + return StableLMModel return Model def _is_model_safetensors(self) -> bool: @@ -201,6 +201,8 @@ def _get_model_architecture(self) -> gguf.MODEL_ARCH: return gguf.MODEL_ARCH.REFACT if arch == "PersimmonForCausalLM": return gguf.MODEL_ARCH.PERSIMMON + if arch in ("StableLMEpochForCausalLM", "LlavaStableLMEpochForCausalLM"): + return gguf.MODEL_ARCH.STABLELM raise NotImplementedError(f'Architecture "{arch}" not supported!') @@ -294,15 +296,6 @@ def _set_vocab_sentencepiece(self): special_vocab.add_to_gguf(self.gguf_writer) -class StableLMModel(Model): - def set_gguf_parameters(self): - super().set_gguf_parameters() - self.gguf_writer.add_rope_dimension_count( - int(self.hparams["rope_pct"] * (self.hparams["hidden_size"] // self.hparams["num_attention_heads"])), - ) - self.gguf_writer.add_layer_norm_eps(1e-5) - - class GPTNeoXModel(Model): def set_gguf_parameters(self): block_count = self.hparams["num_hidden_layers"] @@ -824,6 +817,21 @@ def write_tensors(self): self.gguf_writer.add_tensor(new_name, data) +class StableLMModel(Model): + def set_gguf_parameters(self): + hparams = self.hparams + block_count = hparams["num_hidden_layers"] + + self.gguf_writer.add_name(dir_model.name) + self.gguf_writer.add_context_length(hparams["max_position_embeddings"]) + self.gguf_writer.add_embedding_length(hparams["hidden_size"]) + self.gguf_writer.add_block_count(block_count) + self.gguf_writer.add_feed_forward_length(hparams["intermediate_size"]) + self.gguf_writer.add_rope_dimension_count(int(hparams["rope_pct"]*(hparams["hidden_size"] // hparams["num_attention_heads"]))) + self.gguf_writer.add_head_count(hparams["num_attention_heads"]) + self.gguf_writer.add_parallel_residual(hparams["use_parallel_residual"] if "use_parallel_residual" in hparams else True) + self.gguf_writer.add_layer_norm_eps(1e-5) + ###### CONVERSION LOGIC ###### def parse_args() -> argparse.Namespace: diff --git a/gguf-py/gguf/constants.py b/gguf-py/gguf/constants.py index bf1ccf66922d0f..7f63361bd32bc3 100644 --- a/gguf-py/gguf/constants.py +++ b/gguf-py/gguf/constants.py @@ -90,6 +90,7 @@ class MODEL_ARCH(IntEnum): REFACT = auto() BERT = auto() BLOOM = auto() + STABLELM = auto() class MODEL_TENSOR(IntEnum): @@ -129,6 +130,7 @@ class MODEL_TENSOR(IntEnum): MODEL_ARCH.REFACT: "refact", MODEL_ARCH.BERT: "bert", MODEL_ARCH.BLOOM: "bloom", + MODEL_ARCH.STABLELM: "stablelm", } TENSOR_NAMES: dict[MODEL_TENSOR, str] = { @@ -299,6 +301,21 @@ class MODEL_TENSOR(IntEnum): MODEL_TENSOR.FFN_DOWN, MODEL_TENSOR.FFN_UP, ], + MODEL_ARCH.STABLELM: [ + MODEL_TENSOR.TOKEN_EMBD, + MODEL_TENSOR.OUTPUT_NORM, + MODEL_TENSOR.OUTPUT, + MODEL_TENSOR.ROPE_FREQS, + MODEL_TENSOR.ATTN_NORM, + MODEL_TENSOR.ATTN_Q, + MODEL_TENSOR.ATTN_K, + MODEL_TENSOR.ATTN_V, + MODEL_TENSOR.ATTN_OUT, + MODEL_TENSOR.FFN_NORM, + MODEL_TENSOR.FFN_GATE, + MODEL_TENSOR.FFN_DOWN, + MODEL_TENSOR.FFN_UP, + ], MODEL_ARCH.GPT2: [ # TODO ], diff --git a/llama.cpp b/llama.cpp index 76ee4ea2300e86..01522fdb4e74f2 100644 --- a/llama.cpp +++ b/llama.cpp @@ -192,6 +192,7 @@ enum llm_arch { LLM_ARCH_PERSIMMON, LLM_ARCH_REFACT, LLM_ARCH_BLOOM, + LLM_ARCH_STABLELM, LLM_ARCH_UNKNOWN, }; @@ -207,6 +208,7 @@ static std::map LLM_ARCH_NAMES = { { LLM_ARCH_PERSIMMON, "persimmon" }, { LLM_ARCH_REFACT, "refact" }, { LLM_ARCH_BLOOM, "bloom" }, + { LLM_ARCH_STABLELM, "stablelm" }, }; enum llm_kv { @@ -495,6 +497,25 @@ static std::map> LLM_TENSOR_NAMES = { LLM_TENSOR_FFN_DOWN, "blk.%d.ffn_down" }, }, }, + { + LLM_ARCH_STABLELM, + { + { LLM_TENSOR_TOKEN_EMBD, "token_embd" }, + { LLM_TENSOR_OUTPUT_NORM, "output_norm" }, + { LLM_TENSOR_OUTPUT, "output" }, + { LLM_TENSOR_ROPE_FREQS, "rope_freqs" }, + { LLM_TENSOR_ATTN_NORM, "blk.%d.attn_norm" }, + { LLM_TENSOR_ATTN_Q, "blk.%d.attn_q" }, + { LLM_TENSOR_ATTN_K, "blk.%d.attn_k" }, + { LLM_TENSOR_ATTN_V, "blk.%d.attn_v" }, + { LLM_TENSOR_ATTN_OUT, "blk.%d.attn_output" }, + { LLM_TENSOR_FFN_NORM, "blk.%d.ffn_norm" }, + { LLM_TENSOR_FFN_GATE, "blk.%d.ffn_gate" }, + { LLM_TENSOR_FFN_DOWN, "blk.%d.ffn_down" }, + { LLM_TENSOR_FFN_UP, "blk.%d.ffn_up" }, + }, + }, + { LLM_ARCH_UNKNOWN, { @@ -2216,6 +2237,16 @@ static void llm_load_hparams( default: model.type = e_model::MODEL_UNKNOWN; } } break; + case LLM_ARCH_STABLELM: + { + GGUF_GET_KEY(ctx, hparams.f_norm_eps, gguf_get_val_f32, GGUF_TYPE_FLOAT32, true, kv(LLM_KV_ATTENTION_LAYERNORM_EPS)); + + switch (hparams.n_layer) { + case 32: model.type = e_model::MODEL_3B; break; + default: model.type = e_model::MODEL_UNKNOWN; + } + } break; + default: (void)0; } @@ -3087,6 +3118,81 @@ static void llm_load_tensors( } } } break; + case LLM_ARCH_STABLELM: + { + model.tok_embd = ml.create_tensor(ctx, tn(LLM_TENSOR_TOKEN_EMBD, "weight"), {n_embd, n_vocab}, GGML_BACKEND_CPU); + + // output + { + ggml_backend_type backend_norm; + ggml_backend_type backend_output; + + if (n_gpu_layers > int(n_layer)) { + // norm is not performance relevant on its own but keeping it in VRAM reduces data copying + // on Windows however this is detrimental unless everything is on the GPU +#ifndef _WIN32 + backend_norm = llama_backend_offload; +#else + backend_norm = n_gpu_layers <= (int) n_layer + 2 ? GGML_BACKEND_CPU : llama_backend_offload; +#endif // _WIN32 + + backend_output = llama_backend_offload_split; + } else { + backend_norm = GGML_BACKEND_CPU; + backend_output = GGML_BACKEND_CPU; + } + + model.output_norm_b = ml.create_tensor(ctx, tn(LLM_TENSOR_OUTPUT_NORM, "bias"), {n_embd}, backend_norm); + model.output_norm = ml.create_tensor(ctx, tn(LLM_TENSOR_OUTPUT_NORM, "weight"), {n_embd}, backend_norm); + model.output = ml.create_tensor(ctx, tn(LLM_TENSOR_OUTPUT, "weight"), {n_embd, n_vocab}, backend_output); + + if (backend_norm == GGML_BACKEND_GPU) { + vram_weights += ggml_nbytes(model.output_norm); + } + if (backend_output == GGML_BACKEND_GPU_SPLIT) { + vram_weights += ggml_nbytes(model.output); + } + } + + const uint32_t n_ff = hparams.n_ff; + + const int i_gpu_start = n_layer - n_gpu_layers; + + model.layers.resize(n_layer); + + for (uint32_t i = 0; i < n_layer; ++i) { + /* + llama_model_loader: - tensor 4: blk.0.attn_output.weight f16 [ 2560, 2560, 1, 1 ] + */ + const ggml_backend_type backend = int(i) < i_gpu_start ? GGML_BACKEND_CPU : llama_backend_offload; // NOLINT + const ggml_backend_type backend_split = int(i) < i_gpu_start ? GGML_BACKEND_CPU : llama_backend_offload_split; // NOLINT + + auto & layer = model.layers[i]; + + layer.attn_norm = ml.create_tensor(ctx, tn(LLM_TENSOR_ATTN_NORM, "weight", i), {n_embd}, backend); + layer.attn_norm_b = ml.create_tensor(ctx, tn(LLM_TENSOR_ATTN_NORM, "bias", i), {n_embd}, backend); + + layer.wq = ml.create_tensor(ctx, tn(LLM_TENSOR_ATTN_Q, "weight", i), {n_embd, n_embd}, backend_split); + layer.wk = ml.create_tensor(ctx, tn(LLM_TENSOR_ATTN_K, "weight", i), {n_embd, n_embd_gqa}, backend_split); + layer.wv = ml.create_tensor(ctx, tn(LLM_TENSOR_ATTN_V, "weight", i), {n_embd, n_embd_gqa}, backend_split); + layer.wo = ml.create_tensor(ctx, tn(LLM_TENSOR_ATTN_OUT, "weight", i), {n_embd, n_embd}, backend_split); + + layer.ffn_norm = ml.create_tensor(ctx, tn(LLM_TENSOR_FFN_NORM, "weight", i), {n_embd}, backend); + layer.ffn_norm_b = ml.create_tensor(ctx, tn(LLM_TENSOR_FFN_NORM, "bias", i), {n_embd}, backend); + + layer.ffn_gate = ml.create_tensor(ctx, tn(LLM_TENSOR_FFN_GATE, "weight", i), {n_embd, n_ff}, backend_split); + layer.ffn_down = ml.create_tensor(ctx, tn(LLM_TENSOR_FFN_DOWN, "weight", i), { n_ff, n_embd}, backend_split); + layer.ffn_up = ml.create_tensor(ctx, tn(LLM_TENSOR_FFN_UP, "weight", i), {n_embd, n_ff}, backend_split); + + if (backend == GGML_BACKEND_GPU) { + vram_weights += + ggml_nbytes(layer.attn_norm) + ggml_nbytes(layer.wq) + ggml_nbytes(layer.wk) + + ggml_nbytes(layer.wv) + ggml_nbytes(layer.wo) + ggml_nbytes(layer.ffn_norm) + + ggml_nbytes(layer.ffn_gate) + ggml_nbytes(layer.ffn_down) + ggml_nbytes(layer.ffn_up); + } + } + } break; + default: throw std::runtime_error("unknown architecture"); } @@ -4565,6 +4671,177 @@ struct llm_build_context { return gf; } + + struct ggml_cgraph * build_stablelm() { + struct ggml_cgraph * gf = ggml_new_graph(ctx0); + + struct ggml_tensor * cur; + struct ggml_tensor * inpL; + + inpL = llm_build_inp_embd(ctx0, hparams, batch, model.tok_embd, cb); + cb(inpL, "inp_embd", -1); + + // inp_pos - contains the positions + struct ggml_tensor * inp_pos = ggml_new_tensor_1d(ctx0, GGML_TYPE_I32, n_tokens); + cb(inp_pos, "inp_pos", -1); + + // KQ_scale + struct ggml_tensor * KQ_scale = ggml_new_tensor_1d(ctx0, GGML_TYPE_F32, 1); + cb(KQ_scale, "KQ_scale", -1); + + // KQ_mask (mask for 1 head, it will be broadcasted to all heads) + struct ggml_tensor * KQ_mask = ggml_new_tensor_3d(ctx0, GGML_TYPE_F32, n_kv, n_tokens, 1); + cb(KQ_mask, "KQ_mask", -1); + + // shift the entire K-cache if needed + if (do_rope_shift) { + llm_build_k_shift(ctx0, hparams, cparams, kv_self, gf, LLM_ROPE_NEOX, n_ctx, hparams.n_rot, freq_base, freq_scale, cb); + } + + for (int il = 0; il < n_layer; ++il) { + struct ggml_tensor * inpSA = inpL; + + // norm + cur = llm_build_norm(ctx0, inpL, hparams, + model.layers[il].attn_norm, + model.layers[il].attn_norm_b, + LLM_NORM, cb, il); + cb(cur, "attn_norm", il); + + // self-attention + { + // compute Q and K and RoPE them + struct ggml_tensor * tmpq = ggml_mul_mat(ctx0, model.layers[il].wq, cur); + cb(tmpq, "tmpq", il); + + struct ggml_tensor * tmpk = ggml_mul_mat(ctx0, model.layers[il].wk, cur); + cb(tmpk, "tmpk", il); + + struct ggml_tensor * Vcur = ggml_mul_mat(ctx0, model.layers[il].wv, cur); + cb(Vcur, "Vcur", il); + + // RoPE the first n_rot of q/k, pass the other half, and concat. + struct ggml_tensor * qrot = ggml_cont(ctx0, ggml_view_3d( + ctx0, tmpq, hparams.n_rot, n_head, n_tokens, + ggml_element_size(tmpq) * n_embd_head, + ggml_element_size(tmpq) * n_embd_head * n_head, + 0 + )); + cb(qrot, "qrot", il); + + struct ggml_tensor * krot = ggml_cont(ctx0, ggml_view_3d( + ctx0, tmpk, hparams.n_rot, n_head, n_tokens, + ggml_element_size(tmpk) * n_embd_head, + ggml_element_size(tmpk) * n_embd_head * n_head_kv, + 0 + )); + cb(krot, "krot", il); + + // get the second half of tmpq, e.g tmpq[n_rot:, :, :] + struct ggml_tensor * qpass = ggml_view_3d( + ctx0, tmpq, (n_embd_head - hparams.n_rot), n_head, n_tokens, + ggml_element_size(tmpq) * n_embd_head, + ggml_element_size(tmpq) * n_embd_head * n_head, + ggml_element_size(tmpq) * hparams.n_rot + ); + cb(qpass, "qpass", il); + + struct ggml_tensor * kpass = ggml_view_3d( + ctx0, tmpk, (n_embd_head - hparams.n_rot), n_head_kv, n_tokens, + ggml_element_size(tmpk) * (n_embd_head), + ggml_element_size(tmpk) * (n_embd_head) * n_head_kv, + ggml_element_size(tmpk) * hparams.n_rot + ); + cb(kpass, "kpass", il); + + struct ggml_tensor * qrotated = ggml_rope_custom( + ctx0, qrot, inp_pos, hparams.n_rot, 2, 0, n_orig_ctx, + freq_base, freq_scale, ext_factor, attn_factor, beta_fast, beta_slow + ); + cb(qrotated, "qrotated", il); + + struct ggml_tensor * krotated = ggml_rope_custom( + ctx0, krot, inp_pos, hparams.n_rot, 2, 0, n_orig_ctx, + freq_base, freq_scale, ext_factor, attn_factor, beta_fast, beta_slow + ); + cb(krotated, "krotated", il); + + // ggml currently only supports concatenation on dim=2 + // so we need to permute qrot, qpass, concat, then permute back. + qrotated = ggml_cont(ctx0, ggml_permute(ctx0, qrotated, 2, 1, 0, 3)); + cb(qrotated, "qrotated", il); + + krotated = ggml_cont(ctx0, ggml_permute(ctx0, krotated, 2, 1, 0, 3)); + cb(krotated, "krotated", il); + + qpass = ggml_cont(ctx0, ggml_permute(ctx0, qpass, 2, 1, 0, 3)); + cb(qpass, "qpass", il); + + kpass = ggml_cont(ctx0, ggml_permute(ctx0, kpass, 2, 1, 0, 3)); + cb(kpass, "kpass", il); + + struct ggml_tensor * Qcur = ggml_concat(ctx0, qrotated, qpass); + cb(Qcur, "Qcur", il); + + struct ggml_tensor * Kcur = ggml_concat(ctx0, krotated, kpass); + cb(Kcur, "Kcur", il); + + struct ggml_tensor * Q = ggml_cont(ctx0, ggml_permute(ctx0, Qcur, 2, 1, 0, 3)); + cb(Q, "Q", il); + + Kcur = ggml_cont(ctx0, ggml_permute(ctx0, Kcur, 2, 1, 0, 3)); + cb(Kcur, "Kcur", il); + + llm_build_kv_store(ctx0, hparams, kv_self, gf, Kcur, Vcur, n_ctx, n_tokens, kv_head, cb, il); + + cur = llm_build_kqv(ctx0, hparams, kv_self, + model.layers[il].wo, NULL, + Q, KQ_scale, KQ_mask, n_ctx, n_tokens, n_kv, -1.0f, cb, il); + cb(cur, "kqv_out", il); + } + + struct ggml_tensor * ffn_inp = ggml_add(ctx0, cur, inpSA); + cb(ffn_inp, "ffn_inp", il); + + // feed-forward network + { + cur = llm_build_norm(ctx0, ffn_inp, hparams, + model.layers[il].ffn_norm, + model.layers[il].ffn_norm_b, + LLM_NORM, cb, il); + cb(cur, "ffn_norm", il); + + cur = llm_build_ffn(ctx0, cur, + model.layers[il].ffn_up, NULL, + model.layers[il].ffn_gate, NULL, + model.layers[il].ffn_down, NULL, + LLM_FFN_SILU, LLM_FFN_PAR, cb, il); + cb(cur, "ffn_out", il); + } + + cur = ggml_add(ctx0, cur, ffn_inp); + cb(cur, "l_out", il); + + // input for next layer + inpL = cur; + } + + cur = inpL; + + cur = llm_build_norm(ctx0, cur, hparams, + model.output_norm, + model.output_norm_b, + LLM_NORM, cb, -1); + cb(cur, "result_norm", -1); + + // lm_head + cur = ggml_mul_mat(ctx0, model.output, cur); + cb(cur, "result_output", -1); + + ggml_build_forward_expand(gf, cur); + + return gf; + } }; // @@ -5034,6 +5311,10 @@ static struct ggml_cgraph * llama_build_graph( { result = llm.build_mpt(); } break; + case LLM_ARCH_STABLELM: + { + result = llm.build_stablelm(); + } break; default: GGML_ASSERT(false); } @@ -5209,7 +5490,8 @@ static int llama_decode_internal( model.arch == LLM_ARCH_FALCON || model.arch == LLM_ARCH_REFACT || model.arch == LLM_ARCH_MPT || - model.arch == LLM_ARCH_STARCODER; + model.arch == LLM_ARCH_STARCODER || + model.arch == LLM_ARCH_STABLELM; const bool fully_offloaded = model.n_gpu_layers >= (int) hparams.n_layer + 3; if (ggml_cpu_has_cublas() && full_offload_supported && fully_offloaded) { diff --git a/models/ggml-vocab-stablelm-3b-4e1t.gguf b/models/ggml-vocab-stablelm-3b-4e1t.gguf new file mode 100644 index 0000000000000000000000000000000000000000..ebb0cdb7d6a4ac313f45758010a6fda6ac530443 GIT binary patch literal 1768581 zcmd?S`IlVRapx(|zV`IYnYE{u+BB*FU1ae|XgVPmlh)TaW8$H9T8Q`+Lp2?#~y~`rqL<``7+v^Qt$jhok>s z_3}x__ixzGy!&50`&d2d z)q_DZ-pvpGdms6T&o}z(4cq>shy8Xjo?B->$-n>j+0J(Adhl?ko$goDLGJIr{>VrE zn`fV$wv+nopc&QUS<{Zyu?_q~4_EVfJ)ZO2*}b|N=;5E_;eYaM&x=|8a8dze;P`M_ z&zixa8vffn^%Fn!?4#;;SRK^UhvRlSdRR|p&9EK+TRi+bKREG{Lt@@Os>jXy_4MrS z?r3;+)DG&Q?OFjX|J|L;FZ^dm13dF=`8fM``O#kZ`EK>pe|Yrpr@v5-2knmS))!v= z_v6b+Rrcizul~f*S0DApF>id#8^^uzQ{MQvH%@rtq&H4^?z3?TuHw5zzQ)|K>B^*zm?{-q`fU z6>nVi#x-wT_r~kqxZ#aAyzyCY-1NpRZ`}6A9dF$A#?N@;o;S928+Z$<03qxCz{`n_oVezg7|THlG*ccb-((fXrk{c*JZ zBwBwOtv`#_pGWI2qV>IK{bjWNDq7!<)?a(;@JrG9au*1Nx!@#h^z_7!>u*1Nx!@#h^z_7!>u*1Nx z!{3b=1g;$ht{n!h9R{u)2Cf|jt{n!hbwBU@b^k0{fe~F`L>Cy*1x9p%5nW(J7Z}k6 zMs$G@U0_5P7|{hrbb%3FU_=)f(FI0yfe~F`L>Cy*1x9p%5nW(J7Z}k6Ms$G@U0_5P z7|{hrbb%3FU_=)f(FI0yfe~F`L>Cy*1x9p%5nW(J7Z}k6Ms$G@U0_5P7|{hrbb%3F zU_=)f(FI0yfe~F`L??_0>eB`F>4N%nL4CTQK3!0sE~rlz)TayT(*^bEg8KX*@b3qK ze?JKP`@xr80PB|B(narXIajlCtlg`<)r`G0jqlpA3*pw2{qt`g@d|p=tM$}htw$k)$8%LoyBey)iJCiV_5_zI=7gZHuiG6Tp_=Mj zfu>!kw}F}?Zz7L-2Tflc%v=9<7_fcrIsdMj398k2;QEY%-2dy3PZJDRS`2$)`{9#7 zFYssk(W=!NYGTvcU+R`I^I1IF#=^Q~f^Mv#8Tvp?Y)u<)H>3DXXJ<1lo1W4RyZhy>!c2z~XT_RtNrUGvD*E7XgWW+Qsw! za}%gMOFtiIj)qtIw4Zuz67KKSS~v={&!bx&)10dSZ-4J6ZOQyO*8WZZ^^WFRWgEck zmQ8@6Ti((PCetL!SUz#@QrGWG#09ehV*)8vC zUI*<$ANH%6x<2k*zqe4=gXjfiT+d$cXWIbiG${2X(X`k6oxOUhakoX0UwOr!XoOI; z4)$Kc&iaWzTdfQktL@9A{B;e|ZxdYGbU@-v3=?6z?<*2@g2D+9vtq%q0qwZjpUVi*Ui5pAzTdaF$Tq6hzoOVJCedU;A zQW3oa!_w9VBkY#%EnE*;$p!j<&1o-*-(UK{m;Mvq$s@w0UDHA@IjkOj;H~MxTh(Lr zoiOs$8GpZ(C|ybDMlAM~cMR0kr@^2r>6b-d#nY!gw|aQsi&lpm&0r?YG>s`$;`!&jhiV*XUS%HiLOdYC zDrD`JpVd<85a#MC2sj=SvbFW)$lZ`{w*7M6M>2c=Z>%U-UNO zMIsC%1V5j~i9~~pVh3ez_Y)(*y%zY7f*6E%f`sK`AT2cB>ytDZ+CwY{tuKytI~@oi zM^&O?le|5Zc0%%KI}HRvIBW&U*47|g>D3}fZ5v_(ZrIq!X6Q%J&^!O*i`hv_+?{Hy z{VaYxt`mQq1*_|ppB8-1U-m8!f(&)b&-qlQb;8Q6H0UfWNRQ&Z#Wc9mu!-OIGsWEt z>QyZ=ii|@|F5=!_Yl6q`Yjsr*q9{mK!&$3O2rvmd=1^%|$mLG}^lqoMl&xVKcCmV@ z19IfArh^f(gac_m=8y2;K&s}UGrs*99|93}rpaNR2-tMX6aG-QoQ9z)3rQ_Vsh4L+ zZ+fE*qgu1m1golb0FY~Atf}_l{sW?T>p{Jek!Hw;3JIVjDm8mUu+H6ORH z6a8*FeBJUzJw9l59Pn5zgMmIh?ww4V*bO*E@BlVHAlXQpYZJR8s6vy6LInL9G8rGV zTi!^tkR^%=gCVh$X}j+Tx*K$DCoQUxGJgJ?citZ^LSEdflj^t;=+h1pJ!~aeoWCp> zjq5l+-SVlvpDkkJoAD#9b=W-0Y#Kzu9Aq>=v?*l8YXFsc05c> ze_HoNQ4rG8PR$Za=X}SO9Dcp~xel51M(JH~|}4EKVS(~h+2IB$ud~FSCgZDkE;^Y7JR{8A`L5MN^8^osufP{Jdc3RQz}~TX zo|zSYH255 zc2MpoX%JhxKhcf@voSl!*CSiQ;g>)BO@U#CMZmjkhwZNBOn8}h3aA|4fBKl7sGlSQ zz*G|+3h3Zl6;`EBKd*5gjoW>FVgZT6%|x=BA=X-pJ+nV^_{?qHKADFPHJM49Oon4( z`C?Ttet+S?N&j|!AXh;n-`n}~{y?`pA4a-n7CqGAm16UCrm-c-mG|_%Piv7a!x(wg z`A*ng=at~lPd3@+iN5b)CiFhd5}Obr#PBxssV0tfJ4{0AviR%ndDtvb4q)3tikb#}} zk4F9zy?R__#Q}chLtRaNky$g+VVv(3H1eHByoi(DKM))zb(=(Q6bPa92ph0NX;G({ z?i9rJo~Mg`THp>clK+AUA{bNg!UWJqlFk;gFCgEC%HA!*1UYQP3Hrlim&H2}jHStp zx!T2JrI!}7H5A-W?zwtRBirYI`{buA#5ZHnL-7qz^h0WU#st6|<{n(_2xI^CE^%xG zzgw7?;$1d<$!8^&*Xe2B#8Pe6?7%pWZ~d{ST4FhVv2|bRl0l{!SBcjpuU929Ft1}S zNK(G+4hw4_%`wp-x{sk?i|>WqZ!4ab@=#Ahq~VkfG-bHTAl!|$9R^=m`^0B}&ACnj zSBY5H&_nDj4%=?|_GkS4Q^&n^KCEc1tas-x{^Zdqmo4;1w=@)ZGWNIrCfeXzp5DTu z=M!x4km#5Go`mKHVU6BdFS!`drAeJVd)6_pTV6PK zP9Fx@^)YJX+gsK}($G+&D(k_6Ht(FQio>jBnXIg#Fn~Hh+BT_W`c^vGB-_mfYj#wF zzN5zBS3mr|_c@j8e<<>7rj^X6W_TX`Z9^e{yV(gj1B}8T><_Jr@3s#jTfN#iy?0R! zqvvX#T{XR+BPUM@o_%x-fAzu1kNy17(e=aj|J8rBUjs2?X>V8}gY^;Ul=M~vmqgJ~ zDlo4)UOzbFt@AJYnyYNC?@O#AJR^P9+ z-}Ik2kq4piaCrUvbW2mClW0)P9TCDg9qGaO9!?)Y4eP5u^y)iBc# zwx)p9LfZiPP2&g?e^8BO-CR?wQ;^d}lBIeza*L00BY?&RIMBM~Zm@Nuya|Fc**PIe zf1nlPu?zc6AFT7qcledSQ9I6Kv&1gCaKlSM68q+b14ldPXq$=pr%kVQ1GZ4E>TBncy^aF<}2C3w(-ADvz2>^98R6`*2R#JFh|Q1{#GqDlub3WqK4*+c?@k#*^T{%n|{j9sawwS9kiwMWT?Z=IHX9ZtZ!!@}LPqjbR7#jEo z_|$s~2gY3AV;+_Xq#A`@W7Z=KKK}Z`v}2%B$fbI;za#?EBxmU(Tb$r4gCD zz2URKsu>_PkAL#(9)8*1tf$GcKGx}bJrOc9f~F9Z9h~%nAX2k{!%`9S^y91w5eq|f zit3on31;$%0_>2r&)oN8JS>J0^@ZN(tB0dZ*Sa(Fbf6=Gov)5jya}(-JSWAh<>O35-(ix$INBm?P)^)O3`P>!*X{b&}UjrlAU2TO3EoP39^7k_8UIePd@*= z*8RHt5rYzl=m07_J6OoUSYjZqjiSKl$r1iH?~)j1F|6Zfd`XADs#Li)xJosOY=|m? zcM(5AhPV@0KTNXw+s8@5JP|7} z!-WPdMHK`Aql)@*7YlI`M39NRYlNFWOK%Z?>u`gzNPov!^$k6SR1@p_e7g{$H6uGY z8W+oI6PY4|qy`Zil5A6^2smV}-u9_=ODRNCNpaPhzp9Sz6(Bv8u>x|M&gXQI%sGfB zxB+6=Es^nJRF-+HZR5yq9x)<;0wI4WOf)Z^1@-86E4kqd!XFnL31@`dT05~FU~+nS zh~%T~0Mb6&3m5b?TAl;^Y!TuA+Rs%p4Y2^%eGckvi2F$fTB?Z1UR(8wvrhx;R6!l8 zh+Mz1?GLcWvUi&QEn7WI-D{IL`X26u*y@)90_1u4(&o{sldBE8ibD~ACPbnDNHQWI z+xWC_JfzU6xBYA5fuV0)3O~*#LnG|(1vzE1LJDM>d9t3M()_Z+nO*YcUXI1SU5CjQ z!Q=%Y1wNioZXmOZq{J{xQX!!v<0M&uvMkrfs^>wUp^af?O`;JH2>G^Zl8I&)Lq_-k zt~H4#4&**Qp@7aoxU!1Gqyt|;SQIAz(eRM%r?`W55?k6l(yOL=W`B|uW$|5toGZ>7 zE}&c?KF> z5Ho2Ue*)}R+Of~j9QkpyF^NR{1wPOup8hcq>UiBXgDev{+$M_cm@j8oqeRfrHb_n_#16 zjiyKdKnQ#kCGy7zU~&{)@U^yYPa6enb5O*K5k82foVGt3Of3Y+hrk2vE5L}zJOrA# zse`_ZjhlK5!)~&C8|73!crOl>m9P>yFx^Q+zL{4;Ikr}AXtvlF2tk+zzL-ZN(2_Y? zO(lm44~EFO?tB?m5pwz9tfoPf5xS5s>Qsp5NZ@IFIR9Y8rZMIg#Eud1dJgLw1tzZr+KEdlrc3 zLt3CwnDm~W)YsJ@ks!Ogt%r1c}9sDqJH__TzJ{{#ZJCzAVnqEMd@&gfNIfI!Z_%0hJPy z6sLZ3+t-Fe-DiY(qDgAA+3*Rc@n8FUcyNU+*e>8i@A~@84Hky+ z3zsz)fELg_tGI7sir|a4=Qa8;7QP2XxKE#vt}yyxnLD}-oaO`6(C3{an0lG zKru^)zw*upA=03&ps#2pC;78o2#oi^fk~R<`OBBJO5*jJZ2dxtIiLa!em5JT9_oLU(K){?^T_f|8tWcc-m((L{=DO}-mDckik% z=S0q1Le=)~6Yh7*V1HoGZzZ%uC-Zfh>Kf^*drdeZE-9`HoY%YNbnpcj_ItY;jRIWt zFdGSHq4M6?baXZc;#kF3D3R>oxp>)oM9@v5ulYC;0{q85p?&_!iR8A&h7cl7iSh`R zaPrire9aJrER<~whkg#tjQ)&;33GT3>bjOpBxE6(nk3+um)B>Aw<07FB;jyXQR3vB z4_7FJ>CL`-%`6;Li3_b z<0!d&s0zduY7$@*#j?dyykKmqhbM*$493jH&OQL5pj!<6D7vA3n<{FzB~0*D55DYe^wA*7)k0G$-Xie9pF_e2`w?es?O$15$`n2UIhw_J?8^8?LhKjF|1l7hcDI&nxtdgTi~zrH!CIin7_e- z@f~4aNzfdzVm^1T*5el4{k?!01Kk^HSUF(qI$PY+la$c3S76m9yFpv z69!7wh?3sb(}zAyf_Or9Sb71KL=)$>Fl5ht0`%5AzN=InlW3Xu5tX58sh6k zJmL5Ttj@&B2@~*duWtLmPaj77;<|4Je@@`%EBCzyJV3b66a;5EMo}tt3dLDn>WL_}4Q6<4ePPD-s2zd2CttT1Xub zYq5t+_N2JLZlOyyT`dp+GlYGdkUm>bNYtAYW}Qy#F*(|5XBzc!UXDxx&W#YHe#U^j z5gP5J(cDb2Tv171Ce%*TY)L!QCzg=}z3sJ9Bb$mzUF@VlyTYgCK)~3cHCe5f1i_nW z%U=s*8ApwllP4rEkqOx#6$@??#kQD%QE=F@sGOt-P{zVnWV@105j%=;Z<2KTVi*zN zlMW_C49x}>U|e+V3p0x8i=(Sp{J?D7b;KctF02(VUiO)h=}ywOug)$y37&x9u3|jR zPV6`W&=ei~KzobBn6E69Kp;0KOtNV~QNt%=Gg)^UFch&QK2v1aFce{c6*a4dJDRuE z64a+YUTyz`EcsjA9|iox9ALvC2B=pI7Mk{^1v~0vmD9Z%)D)j1Ub8r_n+lt4vmvhP zB>-DjTF%0f>?vfC&#$nI~aWK6rOM&k$(vc#$ zK6#OfL}Ks^JRmtVSRH|q78~l&#h8*p_mgZzJWjcoCo}hgV zW8vbY-s%hT8q6QzKr+9V_e&&C^kQ0{=0;c&608npvAV>wCZh;H=QswO{_TdL_0F4m zVlTN4I5okrRM5vXD2-MD?@&J2cOMut)p$pfEb+xbVIHJnjvw{aqN*pNU~W+VWr{gN zK!VVl)l)y_aJrF6qW)7ekSJ+20UVY{THYMy8PBq*|ISoYd}UBv=K$F01+_l7<)d1h znEyiP@tTBmPgT6GWy=O>1R+OSn3sjR2zd3_tUlf+zjMH}1D3m)0JoUr$RGl$We>Rg zg5I+GszRb1hf!nHBobdgNe=Uu)62wrscr)CzEZ|Ut56%Ct68iwcVF|vozJSw?=ncZ#(kJNZ_2%U6YbJyo`8houCGenW%NhmklGEKoh?F!U*h)ZbvE zm4|4SE8QFL+;zKOkq`sPTlChg{6qOKjgzSY zR3)G1uvl(=(iSKfhws!U{cC)EnZaWWNkZZzd3TAX;gtKlZ`=@=u=Iy+R8K>lyBv{D z4Tc0C&-iEpF@aWL@pw;}F?G+uXnKN0m;1nXZuu|f(#t1&^*4Az(DCh-+~dDtpWZ;262Rv$LnHzgH>TQtEWDF={5x0NeD7y7YFEq7Bf z0fHMbl{gUaB}PaT?u1GT*epynSun%b@2}EvvNof;`EG1nafC$oRYjU+kgyxo-yh$) z>W^0@(Rla9UHp-kUh>w9FAH5Jdu<}EV+)~>biB&Z%JKteCaZZwZ#iILP~I!Y1Zhy+ z%-hvBKE-Z%T-NDD@uc|F#sNswdIObhL7-{qOgesQvyRn<9JXdJ0deeq3V`D<+towT+_4v_=3-#a7-JhI-V7% z@&yn-@dqj_SL$&2Po@WC5A?~mHLn4&@X3`A*>K1kK?$-=Mh?5Evuy~djNx)DeSY;g z?8K!MM1sgkrIeeE+h>8=KF18;(*^eutPYnSe#3#VAAvo)l`v)%%os!&1uqMdxJ23% zaYJ2LR0Qp1_Z6RoIe@4(=(A`F?-AY-2U)&JG+pEzgYcF;ICWMM z4IU=2%1oY`%{)R?D%8aIU^F>+^(gGEmm>7patX3!+NwGwVpu=vYwJiAz*DGI4{}l! z5kwB`moGaelvqmt`bH2CD#s*CJWOO2L!HRi8yGGf42yC7nIm`ckFI6*b)*u#qv<3vt#Vr#@$p^OPJj$*blT|cs? zhxCRiK!&V^)ko26%xWEH&Y}yGB4VeE!F_$lo}N1E+i>Z!4#29ySF(j2$thwqtB^wp zYpVw|KQnbYR1FHIW~u*GpjLpE@R4j~By3%lPBcV#hfjE$NMfxN@AZ3$%Wwe^X-)np z%1A=kTUDysidEY}ztcgZ2!Nv%L223wlWR)Ci{@_0V!&HpVAHX@UA+YPu&4?t7ELe7 zr$7pts497Qg%c;p-e~HV1VD>z!Ix%*%QSvr-Q#3MIgsbiu4MnA?SG<;DOq4iqVTCZ zQe3&}t@=G59ms{s)F>f*lxBe6Uh}iqRBo#|d308P6i<4VN7-jh5}N@MzE9--Lm>d> z&N?Yd^VTZ#`9>1*rAZX=RM%rP(kTwsBmGt5E`+h&{sW-J&0+fZBPiEWAdOj3iL23*3 zUhloz_C)`R7ZGIBh;|GWp);j&kB@#Ysg}(X|F&EHoK^xuOpXS{9^JTo-8ZUF329Ac zQ%cmi3l6ipN$whCt1UzB}$rK4CO8<7mjD=OWg2O8t6u6m((Z zx&!jgmbcD^sRI>O!|(5>S|sF|Dvr;h<^zI|MASnc>Z$4+TGA>6-C+i3XJLbm6nAG` ziO5k$DFMt7|Sw&>9C7mJe=O~&zLJ#!3 zC3&+neX**hoaC;1bak!z^@KdFw+OxQ6%lZx z37M-64&lgT=o_f;YFzGwjUy>Z8~I)X{K%u^{Ou2QD+clJi9A?7v$8V?`j>kK^>7V{ zJ1{9Nh5Y?0c#f&~vaXP7zOKZQAp}qGqls%4Ez8k|Xx-$DCQoD9vRq&JuALjf#RS{6 z)C-=*7~Kk^v=_?*ql-6jm9>Z4_C`bm5F zl5Zn{usXxl&|GQ4Vy0S(ReaDC@f2P5YF$aIs zL4WF`c7Mn0fW8y85?D$ykoRxlXxbuNt6-RSw)whm_*S8Q{)ycDRN1kqlV#3sJ5uT_ zVua(rS< ze6ouad_884#{{ZpF&>8p>zycNKTq=YPBAM9I;g&SeSnY5Kdn?sJqV-uC$8)N2nkm_ zz!yPYrZ=a1;gU1oNN}}H^ily0LWW4-@E`#1uYH{suEWlGclexrmUiYo{FY9$F;oJ) zl2xeWLaGurX5v57qKI^fo+yKR_*IR_6`VqCXrWef?p)m7W5si{fR)JUy_ayTm~t*G zLv~QIwJ7UxGEDP^Wa5NL)Oe>#;R@G6B`U!?i3cmp*Mw%9L9Djg-;jL zaP>rI-O}5VT_24Rh1$Ejk%C058me{_B{9h<;HP{(s~py6L@RS*?j|!DC4WmKk?#t# zBHhBFSdEgxzoD>$b1!S+#r`gFb0Z>|tVUcmz8<*)QYr(gl?Aytbb4e%`HGJ}=bSH_ zpZ!Uz9WTt;!NC|HwrExT1gune`FZ}dPAWDhR6^uTMkkBVjc89iX+EoSFA4cDlfs{U zMmPn65Ny1r39M}@B=#W3^x&0Bs?~&0r&twvBqkFJ%tS93*VOzs!9gD#U)yc{0>O)f zO+@|(XF%8pgSG{WCL7z%Fd2Xe=|GlOfBOWr1OzVnn8eBkV;mY=(hCre*vLy49GS>K z@OK}adHXZ|_^QUE|3U!xZ6E8>i#jV?*$!^XPKFKVn|03qL6FPv2~j`KhiAXl-QAoh z1B*|qwc@6tZ&uX>^O#PAQiov#34yTiak#aNiL8*n+I`a(YyL}JL*0x6XL6Vx+n1;f z83}7@FJT$RGJT{Rx{a5DWUB~3B6Y%_+MADt89YUFVO^=tHDSDpNF+Tai5#H1~uVa&N7l+ad zX(6**t`(jJIOO z)XR1|V?BtA@css&RnK27<|ENs*hZ;c?sK&(;I+^5mPBLdeSx~3j#AIog_H(&0N)xxb)d^y^<=E!z#5i2rlIVkF?gVMIG9v3^TJ*NV*{V8a zQwE}wsEL)J)Z|PhUyyYha^<{AGM5!7QhA(HPaOj04)fKZAsAWku}wGCMQSg-=;Y%0 z)84vq*C7mKbSM4xJ&~Jw{6k(Aa%WQ*pm`-Df^Ij9P)f{f`yupOip*f)GsaL*Ly~>Q zX7lZ2IR? zk#)2r6NVh>h!%e(Jj1$f8RGsUd!8&*Bs)2f!S&rpDt#;EwhZaqVPLAYYJnfPK-zETT-tlWut>G)&A;$(>@nHWqCriR^H0 zv%ihNpQHiazyFUy9Fz%Zz2#HnHmrD)gSHmmLS+z+f$!nwO>eyrIvb?|R7U`Pz{=>x z+3WL3kiqK()exxJi6D9*gi$TuU{xkDt}+8)~Kb| zPjrLf%NKoFcdz;)3`e|E!nM?C*(dL(nDM{sZ*K)bzwtSVLl)<|^5}Zs%r|zLX!~RS zJy{{C^2sXagN=095pS!AKNx+knD~f_WDau+REHzQiAQo-yyhq8M}z~fpP;|%leTJA zGAT0@D7eGyN%k9TB5q5ChbQ52!sLs3;h?*A%|VJrifz}wzjM=pgS7hx-o`~4biOWD zZh;#Fvgj@PmZY!>6e*Fz>f&c2%o*1Q2^gAy3D(;1W=vGFzqVc;#`4ROPHwdz2x=B# zCcSc9$-Hn`;9G$~Rkn_u`joG_jnzTBv2&|O0zd3BAb;?I!X7LM*)atS$kf{;l5BKD z6ITVgy*e>B2uH|xL<{>ccem8@EI)4^FHO!0vtV6t6nJ|4j8GCDg>BElXhm}Tx4}`J zJpTF%%q%iZ5Z^j3AX{dz}NNg?P5}vz`(EH*3z!jsYZs$ zg4v`#P@fnJjyq^n_lZKbc;tOmEC06;{LOn>({&OuG~M2Mg8kS+&KwqYZJge_n}Q&q z>G}M;xvgdF=bL&dih>&8voaQ)Zru3d%uPn?bpuf^!t<@*f?{*JmQgTm{^~p_c`aUK5ynBh~bQ zQlMM=`fNeLFVX0^OD<&)2; zefTwhXWg>OP+%g?GhDFg$MonicqdUyniyDWb#y{6IgwI!@oprKEajI&P}~Ug zx-y`ktvK=4z~?K)1&l#q2w$_nU7L<-4Xi}pAc*~lgHzm65OHdNwOlnJ#>2R?hUU8|#9!lPywko#_+_hEa_5Xv9?QMk8+yz_ zht$EHYnmbkBB(PqX~_acGH|wQZdMuxo)(Br|va3%9W zmEJK*3I!><6D0SY(>?^(5G2jOxLR0spiNPp@#SzFw>buu4NlGRC8FHA$zCkq_aC*lH9 z!OT5^XqNSuckg2+6VHv^x3# z1|t9t6|%x|3EEQl-5<3ho2q-D5&VXJ;u!QpQ8A8#Unj-gScW&T2m;Lx)YgGf?WWzm zE4lG%D7`OT@Cjmio&_mJ*mDTk4LLewP%*YzqY>tm%xZY~vZfly(Cqx7Uap_7B zqXrtI{#TK{0`f{yT?>R+T|cFm0>W9fgR3CQWDK(?8Ud61zd<5*rR-$NZ+I#A*-FEZ z&6j!!E2~7Bn$;vRl-2ApCGF{|w-*yxczQ-KBVaSq`;B%7wM9eDV*D!q5#zU0kS|1h zt|i8%=oYA$<(#F{w6Z`1kX!1``9@QNc@SN`kn~(DGw2T$L#m?y$y-T2S=|&k%ybfi zKmz%GH48Rq=O4?jub>}-45FBS`SN*hU5@~sRS0nM@PH*d*`W+^$aMk=qf(I!aydgV z#ACfkM46iiDAOWAu>ifK)e3w^Mw5_+kx_+15cN7pIUclnkUCq%^I^Ew^QMv~l7a(< z6aZiGEY4ZAy9gpihL2$X(N}y^tl~p#DelJPHhK**pjWOZx1<#>jF*5*D0&3kddF7E zJ@5nT*KX)gT+9$^1`tpD8O~$!l8zpMdUorb(;jqjpAfkxNhLqZ?u3j8Mn=UFdEZ0? zPN#}PtCC#T>qM~GcFPdg*%}jirtN@#n^dueDJ$}kh(JM|jfiSJb)u{keNS^83fnr=j$ zLjTb;(9~2t=cS7R+7(E=_7Re66>t3KVm=HuQUci>Z77GDi|^WI*lik`rbXzf3W^zp zwu!%-rq~-wdP%2%Vi3TKk`DEwdMn+#C7{AFvA7V3GI>#|U^<7CSd~yyLrpZx7ya7;bY5rB-GanSs2mVyZ);!4w@w9$7RFjm5AASwfLpqyU?vOS8oYu$Eb@V$!A2l@`f=ZH7vY zluD+?tv{)eQt#sP|AdAM&SHk3lk8f7MR1;@Ai~?z#J1e(^=N8#s`E(>Dhwl^6(+n zA)G^vs23K9sI*Kp_zajeslgbarN%12W4e%8^VWt>h-GHoq7+vUqI2Iz$>??{zXx4jHN^6x0|3RX;`1;=hN3p#4}zpS zfngC!H@1+|U>NrwobqXyGT^{C{YBA-tyRTAf2?6a8u@iP^Iy8<$3L#uR{qBT&)u7P zX%A%6C;S|VsB!aMMt@6efc?l@C+EGz3n(xW30z6GMxdQ6WK^1oLrl<8n3j_R#t5RjR2rx}_ zeOxlFW0Sk(&F~dbj?+>(YXyjyS;6e~2qJ=O@qn3ZBA~Tn5GuW=NbYoWTXLHPxp^BQ zrIbyw?tC{+9(N2xsNdEg<@(U*k1G9N|M2%TtGh6)qFM_;l+#qS8^v@Z@~vCG9oOG1 zrb*&50*zv?E?BGF65aB}xTOiIqRbCGOWB%YWk+)QWa0YSsuCO(C$5=|lDN7BBhB)I zW%D}B+V$dEgvD9yKF!sNu*AMv+^S>EVw`NXX6|0Ihq7LOuw4YEJysc6W2sG2Pjih= z1jD59mm<|5P}(zuh8)|6xCx>-(ppIcMWTTMvedkz@`aRq`nu!BDFT`C~da%i%(X4V5PBG;C}fi|TtNG>!v&LOu~tp!kENKAC9 zo`*Mt`1W)bH&)d&mCd92A<$>-?7)IhH8tY_0>kS$gy|YikhE10-QU!c8~KKcO=yvR z!kVQ1-=Kb=1N^n9ZFDEeOH}t}f`SHY$?kh}Xw+3w=b*W&60%A=)5@?T=!lvXUj)6# z`NVYTh!>-nm7Y1Kh>0~y`@=DM>JH5j$zvMv9eo5NUos4BO$#G@O;dc`>d-nkt|l;X ze_x^$y?k>WW}37^aBv?9;S}-0@e@8A?0`~maW0VIVO#zVRFF(Hhiz}Iz^fBI?~zLu zl1K>~@QI&##XCR}_nxb{xz7EOJ7F*}OTMD;Zzj_Th1?6UwDl&Ra5B92r%xjU?ES1^ z8}x~N6lI_gEdi}Q6a1Cu@(uEqH|*G1Seq{pJt%-l_7)i9BcD3;Db31kx9K+&7mZ{| zx85D5kJt_Fph$Y%YpT3}4v_>D6O2i6P9i^%`wqi5Z)*;4O)H7x^W(-FUlDb=4}#L( zSyLG4J2HOAM?=gK$W~=Ge5fnY9U=9@#0TCm1D`Ljon|S6pc}yfh=)#2ENs9V$BBr~ z51I9_n`=fEGna`d?olYbhTsOrFZrU$*pqTl_WP0!8L81ixUEf{dF(r@Ql}2a#kA*Z zBS}1rniy*Wq7Ye45h9&zBh;J3xaU(cFJ&Tvd#Y$k1goNwYy*?U)E4CL zV)nW#R6xO3Y&Ym63l9?ZTJ+rB0Wz2<{3?WVm||d8Ipdsbz5Ha<>nd`prkwm(1YXHNKr zaO?FsXMcYO^Lv&ZdKb6azz|Z)lN->kk5U-wFfuL8XcT_OBz9Glh}1ThD@lc=#Ao+Y z;j4`!8>{xKBmiJLQ4d5BL5r8 zT9se*o}SUQ*UDuFt!}oHuL%g?gJ%%5q7Z&x_Dw z%L-w@e;f5)mXZK#DdB4g?Di&_vXEXHmPYplo74f4O{-^+#-LdkVe9$GXKgfI8$D;jo? zTz}=d3I7R8Mrfh;ADs0GG;_iflAC_UeW9znmi$G6))MCenn@(L61`zz8@8e|*H=bI zUl`@*vm{@xWYy-;3vAT)cvE&SZbS|{(RC}f=+6+P<_iE-IrZuc{|-(He~>&#Uk67m zZ0+Zc{#HEN#+#EIa)Z52LtX01QDj})1G}}+L80`gujbL91vAJ9+4)LiEJ|-@1g|ME{#ohvd(PN3g^2Oh- zlbzkNYt@^Vu2coQy&GWB=>E(_cx;#>X{ZZsB^3vatro+pCh7Oz<&hG{ zLJ~jF_|ug!o8i;KpOLS#-iHunBtXcv@&~!&oeDKYGsMi|AF3XHXg^pbG!Cge2P!jr z(#F-<-b!V6YIf%>UmBLWxsDZF6m>L1PYbz!d+i>La?k{aX9`17C3)J!{cKwi zfmE+aWi5J!uLtJc1UxMBZ#Ng-K*g~zGMyiaFhd=*mU69Gu}eF)5N%-+5wuvVyM_v? z-?`e#=ycLPQTmyZo}iXfp(j2`=A}|{#+CE~c6W`#X+HmP7yJK~JRynvj;54UPQn7> z+Njnd4FN5!L+Usg}Sq3OvI^e&T7X`}>RYmrAIhz}{zSgc2rj7jLkld@W)qBpJn17egX-9i z4O<`c`E-ltuu2Em;;(|OH&kR0Edj#}IzCxy;e=dI;!I$!)nJWdiIBRV_&$C!5XwOX zEMZ-7R$DSy_0wcVDGK4!K(}&*zhd_LDz?Rykwgvn*2E zeXEg)myyN!EG}7tjqSt>lMFSfB$$}}RU!ydtPR9yjq%{RfuuQXQN+TIR=lL)h=f!& z5&8jis~z!sye(K9=6TOnDoIi&w_o=$NcoL1>?*qq5xXortb*^quOZBqTWGWQ(k<_Q z+IPu}0u$lO&Z+P7YWIr z`U(uRp!ty1t3^u5vye>?A-M`|6LCW;OYdRCGNAYc!gHHIogmpK3tH&I)c#5kIJ$c+ zP#)?Q3qH_uxzdQ1Ozw}G-mlS*f+7;uTHsjX_|-HF ztQepP2J@Y=*mVmA)UYN3r>v%84);;f_I^|I;j;8(Bq21!E=|zSR)#}UJPNf-%Q26( zhAkXOvRrEf$$^A`IqK;|5=)f*z9FsX33JUS;$(&j;Re-)omjrMrFVKQ5qSa&zD1H& z{IlIcZbFG3x0($J-Z;XttJP`|<#0e#jZN_J{w!wE?3P*~F?8yjYD?2J)|f-9oo^NL zOfr!(i@t89J$X_+A<9gX5z1~+U~^+XfBl(TYbZ0Cid-Fq6=+wi@&0)1-@JNPiS4+7JsadTvICFU@vY!hNha; zE%A8HaG0J~48|vjik<~iiVL9o8R>QixA{uU-Xa2awzq}lh#7EMrKMp$O+M~^GwtU{ zbZlEC{za?D$ZnQKp$TFTih)Dpt>hepMUcd*p^XMiakJ@N?z~U#x~4?B2xQbEY?G0R z)LJP|TreU_Rf}m>t4ni9pRwjC`ls+ws@DmBSR{SjyPjm^#fj4j7 zbbLYo(o&EJf~&ss2}jsFQG=0c6#nJq3*LGZmYFRTik{0%0Bue+I5F+|@nHdzy+*vd zDD?=?aca!MjU5vSFm%5MVQCiRfAlG*6T@Hxj;ns$hKN~?Y>*-G( z=^d~99e+ANmWUnJNUW~f=W?$>GV8$Gn#UT^rpA~K6M}`33}l`k3|syq@l0WZIQ5){ zfDR?u2&x|%`Q?lLE^*jtW5}vRkxJ-rPTmIt6`O>?kIcK|9eGqG z2M)YDN{v=ZHz6P_i72Zgt+D@xz@f8K0Wv0FHVonUO;l0JPdSK*)^|8wyQ!y2!s9f& zMQ`5p=O5So<3F}BAm2(x`8;@=MzE`))3mpd3#sdn()~Q+Xvi+ibGlX8Z>zMoXy#rdoenP zIr4QadNi&>SjCFZbM#W7q&c9cTaM9XGfrCV}Qb9q>G`SSYp#w_5J~F*y(`zZ-b!D41AvSHH-7seMD1YJX+nUKJ zU(#|XWw0V$8g)`gU=e@Js%VOugQncnogzfItjr?8lZ}I7JihHuAAS%SW78^^%!da# z+E?RLf#ha!_4#1t>(0?=7tWniTA{itdYz9+7H-t6KX>kek8<|-afiXw95~+~ZmM?nQ17jTC_qKfnn8^@Kh3MFN+x@iBE4QauL=m^4n<97j zv|gwtd%_ETX{Kdi$U><6L{xy{`;e3WVJB!HGCxu z{80jA$;n0B6YVWF?Nkj9lOM8KULYuc*>RZkH-}USAlQjAKcHE@WTf9F)VE-;@QI&5X5zy;03tmsMdT&24?s8=y2z5=u|4Qys9j5ykD z$R0KpX6rAPsHvBQN;ogq%D@^s$n^&7bfH&}Ozh~kjn1Ur7SMh5h6V*+_b0pM!-qc2 z$vjv;)FG>|uXCG5Hc0MOVYM%Hv!H}qz7gES1Xcx7mbR^MsA~J2Y2#1MzEVVrwUeQZm zu>#CZYm{M?wWyi*$OqBFEI%|kX`jCHLELk7>K$Jo5_O#j^bupqlDq{JCKR>{-I;^~ zx|g#(5H0+B3~-v)3Z4n@#nmPsb6DsEWNT02fZI9C^ zd5P=mj%(P7MTLb)n@R`{0=urab++KPcC$bV_JD;&FV@R`fJHf1fM<-pY9VHJr2d0% zFaV4t0Y@C?Q%E~F>^r76AuUDkQp@5Rg@?cNM8|I23l}jbXvHv zi<>%zQMn(JLKw7(edPXmhYiA*_WInpivp*4OtLq~5>vF zpzfe#>00$ur%!9Qt6mD9GAmBcHw%*mCY_ax=VM!iv3W8_a4DR+vU=`G)LWGq7w|14 zmi4Nu*HM^#@}pTYB;rnGmjuz7**95J9*V08@-A4aITX~p!x!PJH0V% zqI64M{_LYBm-NFl5?gsH&cJZppBA>ROV346(<1qwswX(p#)Mz zr`__P;f^=`jsZEB?koY{*_DUsJ){r{j|6xrPm=F6d2LlK+vh{5PC6NeOr+R4K2H^a zh5VPQ!C5sf&5V>No1DXyC1znC8>7`5^FH7C;H2P4#2b4M$hdrzvVWdFO+F`!lu!1) z*+ITnVHV+S_@ZR-rDsv6d*&rD29V5S1%zyz3Jq8ivoFH}4pgxAZ;3zmm631Z?`{$% zk#^EFz7UZ1@CxPQ`xpr?`}Is-f#T|jF0YcA;`?Pqp1j>H4}Z-^gePVv_dcl+!bf9m zNqV99ftB^p1_IkDK_lLmX+_DyQa?dK3${i!DE!>F%%t_4XoDymcmR5fZEkE|bvV7X zm#kxRR5js)1%Ec_oXxOjD$v2|A?R7GR~T`$2nwAcIVZH=S|q1kGk$lW3LRJOISdVR zy^R4jRd6ci$8|`U`#C30Qr9`G3df$F(XJxZVo=GLokzcO$44Z3IME67ulp|*am7vp z2VpsN)ykjZRK%Ib{Pm~4gND0NBh9pq*juKYlSB|J1D&ikkaukLczusc;7BMHPoF*% z<3vowamGR=j1V<^q*ZG&TlszuD*)9-Fp!m((SyApCkMx`6>zt zvU}USw{ne$&;v~n0vRmf?Gur~&h;Z!tE3@(W1db48ME^eCkvI^c0$ z$YNR{W{2*$5&3MUCkSWW3X7>me@EyyAbtf{K$7qR1!$!rBnY5op#bTba-mf=@mWrF z3}4pk%eTvD?axWntw00}a~i_xa$pG3O7}^S)=5AxsZH+pUhbwVWxxX?rMfbi>&!TtDl_{*j2{TX03CNsWqo@#jai3Bn6k?K5h*E@UQerDMFmE&TdVLrG53o zKXTBrGY5&v-7#TXz03~Ae4)>9Zi1`*Nag*-&*fi#@|a`jiBq47F~8k~0}b@5#UYJc zbApPZsU;T6o9(jqg_x;%+?bgM=JdZJyt7_m#UatV9dk(se_Fb-=LY<_gyP zq&Mhsi$=1^dUsY;rTdm(+*>3;bakJ(1BQaEac`1o4{X)E=`L|1}#{3Vo+bm$&o z-}7I%ndY>(3QAKkQxtnI6 z7ufKSWE1#ZGD>`K2x+9xy{lzg;c^{OtiLAg>I=eQAnTd?TJXJMA6{#M6C!#ES>Jv~ zlfuH#53>`~6*6M(E9hv3B;N=to<0$j3}0c`Nq0c7`mR3=PDxC(w1i}-u)3}NWRUr} zpL0;K!jy=)k>qyRQnJgIAb%Z$8D8I9$BL=pV@h;^c%Nd+(9#W!dpc$XB?ieIoAYi6 zMAl}bj3qk3x)kEsEuVW;502|a(z487mM_1eQfbKNC~>#^cq8k3b1eYe7(qnBSxwhn z?n7?+aswV2ukG}yOvk-+GXYMg$P%`^bM#GN69<=Te51U_gHH-l)P>4*1UH5=`6LMB zN()A#)oay&I*+0ZW=u4*q(6QL9n6&BZjLE|&*CWw2KxR%csn-Jc=-2H7}-i`2;J`5 z+GH^5G)b+Uaqnv8jb_ZEeuAIv8wqM;KBlUV1m$J&hSr7&*pb)LRDF4f{YYWHla)0wHETiRhGJR{yOgi%=iie=WjNh70f4)14KlzX1?5n5e_T?Lmk)6C4v2@^bb zbU)-O5J3G~1bc`ox2}qSucQrVUI>NvA0!F+)3FiLT)d0S{qUIo)EVE76Q2}_#|mlh zM=Cw^MpJ@nwn?sL1}nRg=7v%aza^bLR?9UyS+~P*A;cr3gg8Q<%jR8SD}nI|cOiix z_U}^y-QRdO)sjbY&*Sls}9Q4lRH=p8v-o zzajma^#joXZr@LmuUX2o>uk1m2?a8D5LV;0`(c84ecNZzEib>InHTB7CxLOKNe1Fw zjHi0*(hJTdu9ouC@E89Axr*6i@)Nw@pcIKlAzgYWofq5GpfFb7D$Z=VD%Qs|Z?aE~ zyoC%rlkl?3*u>K$F@YgI_`54gEga#3sibh4J0W{aFUGx4nE1Y%76{{joWyjXG(qpX z<*84JK$SuX$>Id2B#g%eFbheG(yTsBtLcOs<%oFtWQ>GkGpUWYWy;`{lnT#v~S)VG?#C z17@oC4S>X($x$OwA3V?!1fQmAFDsE-;2A7}ZM|^roP^0zLc_w}ec{00Qh7tNBcjNc zfDcqJns=2YNaQ9oM%D~VicBQo1`?GF$cVhe6KWYq#3_>+I~D6PRMWbvA%V&cEmVXQ znDTkZvATk4#!YD`7zsKzs~q;mPaviY8d!tP*KUhaXQVI0ZZA-yeFC`4Yqc3Uus*i#Dul(bPLin!`1>JCUl>FCb{=RxF z#U!Jj<-kI!(|!`e9YGi@6;S&oV2k`M4YDfH0nVr^i1u3HFzyo5uvWP&7H$TTkQ28o zq|5iSR9gLQ(Ex&7ODR&H1XL_l2j)(!Qe(m2Hd#`)tnZK|=I@cQ6k|ZT@P$WCa@=g> z`RwYA(ANm|LUP3W6~=|Ti0=v(oW7Ruox z!TG&+yHP-Ru`!!b4^Rt7^q?%rjM@P)9S-`pGzTKpV@4dVwZzLII;xBeNO0wX$71Bu z*HDpCj+0ZKbKN=FcITm^2N;!Qw|ka$tlwU*lAeuzZk3uGE3btVi?=7agGf~|6)fls z_VApAWS4yr(Ch#^ckTrZ*IPsc#<_Es{Cikz%GIW}m%obv(I@lfEeE^ZC*y4j1SBLv zBkiZ0bBe4cG&0S@2ebs25C$=@acx>i)0Sx@fwr+qsmd@|@iu(bsS5SQ7uD@B%Xo+$ zL=gB|h(FSyJKCeXF;+@d3$GIUnl1W>AhT4z>gk!t1t7{Q5E94BF`o-tG)VV&-bB9A zX@lN2ku~$vL3~I))XE9FAR{(dmIdZz%bEp;W#i8EHs1tS>V*i^DTnmPRp!Gl{oqUa zU!SlY%CMo7N}@Vjlv)JE^yHnL2r*|}&>sgfb!s%K6&lf+K}kgvbjz2{i5GfMs2*-9 z*&PK-%%3XV#G;GImGa50?pu*>)NmtPAQ0_0g{|A4)AV5U5n?gfoA?s�XD_>n;^B z;?z8U@;RR+3tAWQp!MmBjV+WdANcSWKKzbDZ-`wfOb^7!`!n70&ChC;W!Pz|EO=DV zG-&j@)#EhSE%>E(1X)Ts^e{w|fqmI4bH2}oV9rHyyA^7i#P=2d(!;l^WSXX@92|hf z0iu`91p5~BAW+tp+GS_iDaus|eHauik|fc6E3{Nn<$U5ua~uvmVc{ju*4ev9!k|A- z1k*@M_9xMUA4D#9~&7eL7WVkNz}4)=o6#_-q()T*&-!{_7|;v!r@oBi@SF9V$?6 zKW12V@N#7R<_W5HFqTD+fbIyBJ@)*o4$ZCCHQ6JD+j}Ra(g(%;6BMh2@rBrVjp%}; zO7AWL$mAOg;skHxJBKagEPf$8MI;V(uoiwn;7c`><4LJ+*@(+H*$jlwCDl+Uvna_A z8U+(cNpW=eT2x|^K4D^}YRhIgUJGO65C)GTl-w%wdq>y~6tIfBHpT995Gm`=J_;vS zI41YLU6pZNZt~N9x+W4Y%B>nqKHs#@Dhj%?Y4C3^Rvr(g0Q@q5_3>J zkr~q5uA_)(%S)%pI&c@MF;ER@rU)NCZ<5}xdK=;Qd!lU^FyTt*n?4^LE1B6{dRf>! zBK%r#wbV1m*qa-AiZhT#$^Rx%(giWWr}3MlgZV@!yC zI!~TD6U`6q3nX0VJbomkvOHUW=PR&;eqrF2`747XT4f~lQ&~k00TYL1nA1#OAzVlM zgNIzIHm>aCJKRU5WsedPta2qX`hrdxhzP^~{~aLpQ`(-FZ~+heMeMvn6U!X(;z44y2!D7-FWf*KjiMnDiOA2pdHP&h5QOf04_3=DW7 zV%8h?LZLDxTKK*RS+#?&Z(xXa;c3`)y5;tMN((YKat^F6WoEDu`2=rPquwBiXE4`v zd}|`i{YkswQxwN6reszIc|h{0LO20A6TQ24JEboG4&R!^Fv_@`eQ?r;A%u0E0Dn(w zU6r!Qk{cM7%M9E_xYRabVMWlRH^;XsNt@6a3(wNi#XxKIdHiYJBY98PzY;%CuQ*G4I%+Swa2E_y~!}j6V zy03lsH9fqZ5yqViN!KfdQm4U?@%CDRZX(|7`ky&Qa6W?VY~9dO$BT5@u<9p&AcmTJ z(0r@&pE*1Pt$ai5gb>VoVdVMoP0@w$F^NpVNiz_+r8fo8Uhrd z#6iw2$y6Pc91bZEm<|&uGS!{RTk6R29FxcI&53M<^os4UY=U|pG`r>m)e$$N1W_9) z#ioQ=QtVX+701=uo#X(`qV?3t4(S)>PSxpKSwG;e!Q!`JTE2PN6~?XHIRqNB8428} z?~0O)=HS&DEF)-5Odd5)VzR&3m45`G{sB1?K5$KNzi?uQB5S_ao451|!9S9guu&}% zyjDq2ie-SCEs_fsLZ_|J+aL)hDU1RW!K&^uCPLbQfn1 zU2ivO0vPwJl~zIn&L zq*$AfwIsu}N*cgklvAHe5h~Mk1Vl`4DX)bS=t$sb#iTC^f|ivPY|U;j3fjKHWOYm$ zSyJmjJIk{bqI0-S*>w~>8~M%HD2$bANSK{cR-QinKqTC&1qH&v(z$zCGvB_GK(h+M zPV}Wu#fCH^+@W^Oi1x%Jgpxm#+5bY;xke^ONGkwcm-ry*4^#B;k>icSt4-lVV%BI!*6xp_ccF#PWz4%At?~Ori8w0H|@gG*!8=2e3cfoo03Hs7G*8g zVcBqr5TK-_@?AJX!lhxQ-l5fLMHW1mzd#AJOn;yGL1IAWJ!jw`w}fMxl}+e$>4LxZ z@%v&2M-}`4q!x3l8czNm;+|x)zkf^9XDczT>+@`3yvdhv;;c{COd+A6K1$^BHLF>x zt+N|26LR-Vn@%O0N*2|~ybH<3di|z@&n`uCnuS-@r}5fNpGGOtEtpd2{lFsC0O6X-L6{j6TGj~n@RDcsczlEoQJ zE>x*;I1?u7BXrBp#@A<+ZFG|R1BD@$_a;|8bwD`pTAh_o5+Oq7!;US@jNoD7>;@Wn}|G9(v&8vFkH3bckIhkFeFe>O!Lbln+^7?%-)bLH#05fPpTEjNRNlezGcQHGr z?b}eLtP>A3l}&{%m%31MAw{7ACE!WQY)fQ|5b&PL7_z!jjzbh!c#-YmNOWH*kqS!bXlfNsgCy_nLp?MEm6b3~?vsd^WEgSFLv_KIFo#PS+T#tH*`!N44fn2+IeMkY7>QViWkT@6abtpZ1C2#lyDaBXP}k z8%1IFn0x>H(7-@>i>SeAeQF><0KI zj5>EijP8~s8~dQhq;=0fkf4xlPnhS(?(wsQ!^@Ip{i7AzA(+%L?jV#bYQO3kg06BR zEJ0Yla<)I64%UP6DduSFvphJzvvO4n{>T4+KTa4|2$%Iq<^TfhVwZ&F#Fw+l{@iOf z{1^8RID}9&+Aw+(X#oiQ5EHSar;y?N!y^k%5JcWICC0lq=;Nb~KKAOs21|qs@7mak zn22AZ#}C^B0O(_oy`VTGll&EAiwTi}DgGcyUb$Gw$~PDZQKx>|;b!PJOKQkI(pNI8 zT(fdITEsQ<8d;@C!b;);zQ<-q8%xyLc*sTM4mFI^>Su<4q^ul**TQScr3$`;Z3c^D zPbs{VeuPQgpCSra#|J?yb9yWtV#=1NmDG8{e-j#-`es_T_gd~T@DE$f?kH`nh2F=5 zMxK-6bLFZ4dlMs_9|F6j!k2FaBaHTIo6YG`;v4jGAMc!SiMtRb(=peydg_hpIs8LS ziW}ad1Yv}Yz;u(jk&Mlxu383xPnBZHVc)-g&u3(O^auaUpRqE$+UimS0`*-BGI;{f zXMTfVW#va59lrR*FKXH|5;+pbz&6na-XLB0sFddkxZ`cmF}@X!1}cngSSB|Zq2_dT}kt(>sY zAodMYwZxKfVF>55g?6Nu&3Z3$0s*Y_?@uBM$F7vpo0olBXmbanF!7kjP1M2UknVT1 zXGa#egadQRSV`?F*J5W~vRvho=$T1967vD#pZYXTTbb}P0_l}1q=uvWlQfN0Hos7@ zh-)dooQpYp-48$bAlVX7zwjnnsEM#{b!Ew#VQ}RZ)6m5T?J;l`-+5rf162>aG%R*ajTZY6^U{A z;WzZIneZ^-Xi{qtC!9B!e8xE!=t5}Q3rV5G5^Ef&Rgi<%QGc{w<=&8FZz9S>(hUw6`l_C_gPr5$qJl)yX6zW#QJ{Ab|!9phtab8-SGF?-*f(z`DFF_ zw&RVMh>h(T%BZUU<(xcu^5o^oOm#j1#o}*|PsCmOXN6Yq6DYD2YT#qP z8JD8hkwO;|c{Dk;X~QFr8@dv)X;LXU&m`rNT7p}#|KgI@5szk(N6GwLC|&QT2Y7YO zB*H(lQ}hs+8;MvpXkQhN3k@hMM%}s?{EHua#T$c?9V1*uv84FZzL`blnrqe~qlrO; zNbVwdq1g1y*EE^pRGYaM&)yqx(#3}}e66`=O_F`rw38F?wDhlQ?d-sP`3iDDfRzXH zlEmEe38Nkkz)<1xpyKbW#4;*$VZZe^|Ed|YrMC#AYLr*G05l!$xoENyJXW2b0>Sl|9` z>aMr*ut(CG250n>k58@NQsEV)Gt?^ZU$F+Z|pcav}#OeOGo@fh_8jh4s@<3V<}Xqd-9 zR9*SE9>X4%K|GHn{JO2Sn?EsD4~e$s+W?Rx)j#fS`&`jtk=Fi_bk5PhTxnVmS!&e#Mq&+!>hVqx&9-OnKW>HwL4syDT!y>gw2@ zP)#r8CkSLjWH26@n<~Tj$qUV`V(Q{P1SnKsq8)4qb<~fL{#LG7IB5CbCV=`$+3(dv z#Sep0VCiO*VQsLW&TbK~l%WHiTBFjtYykyc%}mM+RNDXxl4)HK4$IhDGkt!rj&+#C z6Wn#k9CB1D|Ep}ZwAjMG{Re0?91Jn!_2feVgI$YlGRjEG&6`2FTg(YRRlHtSvwGK+ z$lxBbC$0v%CJ2T8M*X~<} zQD!$2N(s0TFChViq`5oY{ZPuLQsxu>1&61y#-GzSqkAn(El}eOWBm4EQrN% zo#|D#iv$=q%K&Z6Qzb5v-h3r$7LkWk&eKRZLBEeQ8Kkyw>tLo(3`3$TDZ;9_PAZ^1Q3>0=6%qH))E<& ze024SDNS=BSAa%=otrsBE4uG~DaOv}bUj#Zxu&XY6uasjbcJ|7%)8>3;>tYA;czC# zqi|*aq)cA1c~^;XY87Yd8xHFx&bS73|GB?oTpXI|I&DtAU%wd3G{A|hlx7cG2_mT= zk@*rE`it4$<@Si(Iq41^wPEK zug2f3qfdvo_Ri#qVXILb3-bkQ-(ty4yX{*gr_}C(C$jC^HexE@Ar$L zL!|vaMGL%y7$5s-6*X1QH#Zn>+3!=pRn;m9PLpD6k)XI25#=-wAIOUxv&JQr`wzd; zrbhvud{|O=Tl@mn2ogQ|LTm^klY3=4sC2d5CsH->1#Ga5pwVhCla|5V#IyEU#;;sE{HQ~zS5A6|XlaCVNr z)&sD)A-!`bKT0Py4)q|<`mLa(}NjX&9{n18-qVg`(>PqF%+knlG~V(_RZm7 zjrmc0D;{%RV)MHkN{8ypRyqlw`N_<71EXdo|AogSWI%jU*xVK^26W64*WR!=8dm!c z%=&NlRM^U$KmlFEZI>7CR;LRkMox-mcMIJt-i)yfkk;(>G3peGjo%{sKrj+NdbJk( zZK;2`s-OsKy}+l6rOEAW&Fb>vwO4}Waav?>N-?ZnzD_BBIW5RkOlfd+DU^mgDQeGJ zE14KB@p%U~2%Mn_Ibn}T=68eDNGMQlQLHpIOH34Dyzr#OXoY`>A3G>FCT8%(Vkyul z+%cu_@|l%;H);~(=8~C@Gj}Ep357Jqs1PtlwLh+H(;?AorA=1df2b}Q$T{8~ms+`j z58p{s8dTcVUqBNn9J9r1QjUeS^Od7uW1GuP(unT zBp(!792Dt)V+!Q(>!?K=bVq0GBVru*ASa-@dok(f60HpjOTEM=9OK2NIz9jubnb0;0rqmV&ukGEKTV-d`MAhA7aQ*u;!{%@6%&$_TE6~8hIGyAuo5Gu(I(yW(n_`d$>2aZ^@e5z} zOnO97%5Rjt3-YYyX*{T1sZ^0xHbm^2mQ>p!2*HHUTz%18rUb6SSZEE7IIX%(>!?p+ zkmn|(x@Ufax)8p!bw-aDv^A=@0ByYj@_$<(`nXuXjq#BtwqT`}0wwQjn7aB#zijpf zj34U&H{HlKsvZ)RoWc!M!*Ig#dbL}&L0cQoi}Y9;0|v9G%x$Kv5$pX=u@I+OTX3^h zKwPcqC?z)K=OH?Sq{xUFV_3PBgMSi>9 zp}z#5G*Lx6^xNQFj!gmq>SO6|6cbbV)Zk_SyYVN5dtxfY+yysxa3iYZ_qUO}pgM+b1_3_9sB1&Ns1wa~R z#9MWx-=kokAIBLZ$z#lMt26mW<$`JMVad!?PE&^n_KPW+UiLwY;;`&~?=gkFhw1rR zI4DM`l#ndGIXHpqf+{M3#)8GdtAbnnjYV0w1&<1DLz9|C!TXW{mnZ-Eai)-MZ3dB5 zj3lM`^=sE--LHNSq+Qrv4V!GzH-BAU!nEI3NJpMt9^(eXI93{w{4q~N6=Ht4XS<~_EC-EHmihbhgeLOiz zIQah^tAbTr(o!G`UmJx@{dS_*t>0w4sfR-)XiB&?z9yvdW-JCSK?@T6Gs)h= zaiY?}xmW9l_P*qI$8)K3ZCkp3jAL)uG8|o(l~t;h)};%lB7!i-o1khuK@pGz2VM@s z0VNv8^o5S%>KMN6FH-1GnMPhd{3Om6r1MsPx56kQi`{3IV%NS~nVBhwIa_0d4gH)) zCq@`io>=eARTFtN<(RDC%OAuqG{4DJp=^YAPjj_-G-%MT#Yxz(w^-JAo~uJyB0d&^ zLJmO{RM=WdkQ2;}({_=}4Or>2OOvwFd+xf8i%x#6oZ6lE>!?(ro`yiHGfYJ-l05Lv z;9ik?(=OG6JfA5)VG0ni#ZXEu28oZCmpS?h(&*5N%8`zq*DI#HobJP;vPY1`uvkz= zXoo!LImBFcmdZ?YgDDbXVv~T=%fRP5xR}f&twKj~hMKOYXhQ*xA7E1gf4&PuiHljO z_;oTDtn!`5e`mRc=p}l=|irPpDasSa36?#@R3?_vsmyanMkzSlQ+4psmu^LYjjsUJ4tGxHa!} zO0^X=9p5JLKt);0Tkuf(RE_VQ{h>>uNY`F7mhlUoO$j|tWedJJqoLZ$7Atow_W z%3e|@Yw_#fnUo#KmM9=$ttb+}&*@yKC4+`&c;~DSk~L*EKJ1VVm?u11cd{-n5Ozac zrcb4&OT#9j2i8(Ul-o;R){Q}lZA*TUlV&4Z>ZXf#E>R*8NoYGQqV#kV#GNx^c9o|Qa&$M3 z_U3gV!@$IQub&<7@dn0-a8ne}>*gc@af6fMbWo8wm&XI!@;C9u8|%SwV1O~BYQTdJ zfHmVy7+xvJdgJr|Wg7UJnl=C3{!a|=^5P3`g#aQ}U}lz~i2)E3{_unNYbQ;l%@QmG z50HVO4_|n}^uJw8aZt>*X!qa;2QmcGkfcoK5504z=|8)6^C;D;JGpi@U(Uqc|@cdqcWN&;v>j3;o92S_I$z z;iLdWiPljPo?H-Ha~&9g?_%{YFJ5^i{sy*^d+nJ=>4%%!QNya@%ATr}UpNSJc#s_Q9tL*VaxLr# z|5e2V&DF1NT^>SuJ$f+~PTf%b_vYMPC1;qw# zno7?RKc5&)8XL=7x6FHxxZ`)P2PgZ%nIVguZ@)IPw5?jmG^AKgF5T6)vT)~Uy<11KclJ zn<2W)6^yeOKkIob{+{EXS7(FnCPdP_b3nSl2vyO+Z`mVO^IOufFPsDcv}D9`a3!uBY&VotQe+%_g1A&VGr* z;nAay%nDt#dg*H6XoK)f8|_Y#HI|b$sXvK#6G_?6CNkS@xeX^IVnAiQ>sDjCS9DV+ zn9~ilsl1yBt$oyR#L`v0v(4P1K&zjMz@y zu8wjs6ktSL&iN)f)Pl@!=8h#t%ce zO|kkjcLGW*n4x9VE%e!x7I@YjLXr7l$$^W29I{etQq=+A;aYZ_c&cZ#KP21H#6wYN zri;IkHi`aR2#!*Qw2Eh<#Hcs!?2#CJ;%Do6O-aayIy#mjvIexg?7!q_pp5hz2xdsCA; z*sxBF%S2PwvLXbr`gs3JZ0l*@tMY(YQQyuBa2VUEhm#i&nUQM^4pWANk>w!gQGqq% zpDqXS$85}?%w!~s&#zR?MOtaJwDilm}z%k5ZmV%j60Xc@O3;`T}ZC+-jRRqz{d zr>iJdiaxewCbBKuh({yfY7}mBF@E5DwkC(U(fS ze`-Aq3P-h+q83;#E^MOmrHro&C6|O`ZH<0DPAmq(Ak;&w7VABF&(1cdN4-TVZs5QoM*ec%gui zIbKRUKKT5jFMQ<3fL_3PQqgF86CW_RVbG;Lh}JPW)hq&{qnNF#BTdRljf_V{-Eo9l z9TP_g$fr^Z@3z7!Azlb*(nzJa>OlZ?D5^s8n-?{1{F#TSZO6We^;9(=&u6k=Z5@98nA#={x&WTCF8f zR189C|4wm%Uxnfdee-M2$HdrC_+wM89^yUmo2V_8iWbM1Y!^>Rem2Dw{S7Ub37l3$ z5Rn0VIey?8Do`vUUg7Wg7mS!fU84FP;N}i$gg>H}oRminA5>S7?BrtD9;o4P!bdjx znF501p5R_$avNqC5};&h$H>6{&%|ZHoGA4&R&i-sy48%aJxQhY+4T7+4(f=(G$Q+H z%n3yRKt&q3H_|`@&`@gcQgZAxt--@V?LgP5&Q;cRkkslJBWzGw2u+(qd)&8NxOm~6 zn7tGxXTv{HiKt&c?;i!p3WkMlei*JqvxO9UaxHU{cOJF4cnoABB?Sxw+&? zV;z4l22Uu|4iK()OluSOZ&}AzUiSm8zxn2y@u?|wR>2PiK8?{l^XRIlgMN#^cBTfZ zT6Nwco$`>xo~@j)jewAr9dkKNgdQ?1-h~S*hqP0@i7N;jZ;lp^ckJb6iC$3K=w6=1 z>ea@ z;_w~hMsQNZK%`=gOKs2QF^cIP;}EW{0>a@BOdC)TlVYnq#F_3Nm4ASkizi{g?v~;_ zEMYm2yYgMIi|Ntw2?;sI>7h~*F-aJ}JRuLaELXO9@B}qARN%r7V-bN6SLI^MXap#o zKAOSDN68(V8y@>=r2@)`MF^QWeoxo%q*uK4%BF0-D3f0y{@udA3q2C#W9T}b8G2mJ z$?;QFhb%u+IILJ)tUyf=Ii?$jmjJbi60hf`)yeQ=>OW5rgoQ_URX_g#E_@h-e)QH| zD^j3=yS6dDm^j(s_}@(78_)eS|9qQ@MUMZc>20;GlC2<7mCL*)NVfHDVE1EjH;&InQno67xd@ zlJTcG+}W`efJv|U?^j;-RVQ$Hunrh0X@IF@$u*Pb+tyrMw%KY0@0IQr&50m)Qr@Z( zc!kTs_9zfg)GoBX;{Wfo8)U9|M@nmh8+i2yT-G$nmTWUjIV*2xez?52S*tpB)vS;f z!KQOJm;78R_Do7B;zl2LyEBZgKKE>pZWS+{*=(nmg%%F8#VUu79}GF|fE9uy7v#ww zUViFR2l_#HDoV{z06;#yp%7AjQ^vwQcp0>K+QGxm>0!zkpJb}9C$Hm?hNN0rcW%0kEQlhRne0lM2%c-McHRS|G$!W06 z;ysB{bbcTp7J5&X;k-8-oims;X*Bfz0jm?A^>Axqkgx zP=61ltMJz{He`sSQcyq`y5f!A=oOR$0rSIBE(19%%(8?)umxV1S-8RtkN~)>JakeU zg`SFe;b2OlZ_-GjpYd4~iN7^N6CA}eGz)55$KO+D1etT|BkjIbWNM$r4=jN-o;d?m+j>l;7Mm2NPU_FSJ?wV&FdI)5>74Fwy1|aBsG1mC*@}27?gAIZCx` z{!T2l%E>X+_y~_t&Y|oDl?J1_LhOBXlb9bUwGv%xbv{{VGrGLhOwUi|*!=pWD(lLY z#JD>w2#;n&j=CP2ulzTws+SkBK#Iwdy-|jB(mStYB_{~^>rspB80e)3%|D_j2N~EY zRVQuv9wF#=QvIMZft4T7ZYg?jcRQ9vQ2bV`SMMRbUY2tjZ47c!uuuw^xuf@BkSA9s z>*7hAR?b2G>}O3HaI0vo@d$k5ESGOucHJ1#$}C(+1KNyxWPo{IB;zFJf?_S{##g1p z)ci;id^6~?vS~@6Ld!FjWN&AHT>hIHzaFGhr$J3V_OgK=93$J}!8bNzc^2R=&lqER zrgs!4KFUvAJ3Ki^ge?b(`VbeG?Cv=wzAMgGR(+CO?XbO&dQZ>@g$@?-{Qe-qN|fgQa$mb#}_O_hdJ*>&!$Xe0d1e0--8+aeA0zE}Lk=31LNl=p^DcFCv|o}TqyrO!kW0v}S$rzzxd6cMahb%p z5xKd)u`<~qW#H!oFIeNa7SL8X24rcK(m~o@s>Ne+A@C8Th0EUt7!+Kq0(~lu6VgiM zI>nxLGsQvR_A5USX!)E|yPO^zJyxOVMzk6yt)glee_jc-UHrx9h7<2_heg!#NL zRJDK-SBNY#an`Ex!q5!S6WMFk^8Fd>lpw4>?@Fagr`r66-%Ux?1Dmc=*GOap8%Jo% zWW={eO^mWnIRRUAp{$#jCI~VbR2bz}m)2m`+^ix}EiH05{{ctTANjpIsBx%_@mO3S z#pkk@VHtSYmn`fZjqh#d>QeOsoT70}M5>z4^O~Y=9qkgn< z1U!7onF^%j=gkj_bCr_pQfg8U#ARLfC-E90UgcSwG)N_Xft07_Po_R>PssR;nN#HC zQx9qnl&0ldhlql^OahvBk*8ozC(&`>HKBdUap8ql*=SHi8b%BnT-jA!)HB91JU%Zj z=Q$#@o`Z0Q#S2bN0?ulW3tNmjb+Lpi3nnB1YY!JFSnYTO#%`3XL8#CduGg9e?}CCf zjc%||rct2v3aY2um0gNaOzraGnP)>7F5lkri{$Fg&2aa_t1re?R0Q?(c9hRij>uL* z2L;3QfdJAtV%{uT-Ie&T5uL@u-nQ1B5N-Nue7n9J59*!NTD(gIh>c)JASQc* z8-C4XJnWISQ|c16#IR(Kt^snMJoo~jqE`xq!*MHwjel7lANC4Hai>;cog+B{PK|XT z>!RwU`@PazQ+y*mr|ckAyqdXTp_c_BP=kfuD_6J5y7hqMq`v>!zryOcyohlt(V&O{ z4%!k(8h|E`HTunxqq5!=x%SrCCRiOu-n23>j?ehLYq%6|Bx~6kH_jDUcK=gg%x z7iHwi9yWAI0bZa+8US*i-4vx$Y9ij{Jn{!S!epA)$U$?AfX!jVDy;re><_B@<~dix zjU?;&QDbeuAU+@q^e2;U85&nsf`>EnSl#sEO>{K+Q$G^zx!Q2!ue-xGx(iOM!b7cc ze_1+07#4&deDIZcgU%$63w;pvB$x;SOG~D-iNE1o9&8VHlY*PX0eYp1!4WDHM6EhL zP?v891J9M64AaL_wi5iFD9!)}apxA^39&~MB*sn6_kv#QTBEYGmVYfSbFZ+D$~JhW zq~!t~^W|O8CRGlwKfS`*3Wi~h%m@qGTE$?B9>Gq=E(&71I@}GOl{BVSjjoarD zeQ^&~q^bfIOR@ucl)Rtjokqj1SuK>dr9w@uzpg%#_Nk-Z$l~=nqF^3F&ZG~vJsMt& zNWslm8*S{ODo9N%JBWQZldu{AOsml-(Ws8dPD!m5j_Y$?;mTRKCJJ6_mzL38LrmpTJy&h~Xb z_+XMQ3W3D^uF600C(0Bg0R1Fw(94Up+rhxM9(q|bu$P(*2$*tQw)57f z8t(m8d?=&LA;o8xR;4hcHXzR8>sGI#NR5~ z<4H~n8WckqJKD7=Q7di7D>pOuD+SbEig^pQwwuXFtf_qX@uOC)w)Pk(c!5S|_Mzxw za59N3UM@;;8^$`-yL{mr-rMrb$yiL$LK47I*JJj0y=#qS1Z|f{XC7l> z80AHhDLH{Pc3K%)!yT{a7TyU#C_QGd3)XKqD+NoiNJ6+`q{n{y?ca`(sTf@LFz6H+ z4{F@sucBkM%SQIC;J)Jn#I!dVrD0NfG7y|QOVNS+D^V8u>Yn+1yRd(WtzBL`9J;)9 zhwC1sJ>2@g1iPbLrq39zt4Tmql%ByiDk5PQ*NeaPMU$}A4vZPKBFMoxYa5xjf$8WG zHw*8u1R%B%J`xbV1a+ENeX)@1sjM_ZI^q_m!&s#b+ZmGsA!JzO^%nc6oO77c1hc)X0%ovPQKO6U5w2|Yv6xXAucsprL`b|oV^iRv9 z514ncw!yw9?}M$RIju>Au2gDx2_0>JCYb^SA8>Y7ItDA}I<2Z(n-&%e(Urk=L+NAh z_fn}RkN*WV{|NSMkg|&1BP7{VXe&1LIEAse>O435^Ir=lK(#2o2es+Mvt#WBokD<< zXw@qn_Pt}!%GbHb))Mei@INvv5QdWxTn1)K2@ z-pVQ$I5Is*v=cCWwd=~ND!?ODW7uRIoAxDjHt2U;10ZVsqpYi#C>qT#gI_raD>zK zh7*gA;S^kh6u{$~n;R#6E5#gZ)(Di7RHm6ZmiiB@3};NdY03uWkdmd|GK+oJiQLNy zBE%~sUtIW$PY2|g#v>UlrK#Ov1_Z4q$`J?>qkuP-n^Mtq@1Pxh6sgX^mF#<4G_?@VZ>%Qh;3hE=Vo zc@YVV_p3$jVxWt5U?Yfr$$!7H@~2|&=nI0mRl5QxR|d#wQD|;mwRMXUvFL@NuGn9j z-13MZPJAD*kHSVu?(wa2qgrKBslh3s@iW}=RFdld3yWzD3V@1-PEGlXm$@XXcc{WN&DmI4E)>TS~1R5kn&1xs-z`cN8gwSZGC z*z(EmUw#_QMEfbPvg%ynm5%FWLsxz+q;hkMC6ni;if(Z{{1Ki_X_%Rdnv9!9BkkYeCvDrj~C*d9)i6K28;Vxa(#O zFO&+UVV%!l?(&7U61U?9nDqu)}!8?bg0NhzTic`qus7ah0fSur9+X2#2GF zU`9ne0INtUasZRcD6bXoi;aJI@wHFlZ5wh1#19|iSPCYYCjd25Vu*?{sd%V@)~sno zmwJr;m=W6>1t=cNsZdlDy3-Y*vNX;uBG=jFPrCn&QK*s}=%^Si#Jra}i6{$RB3162 z$xQqL!&?s_tqnhr9|AJPF95$+O%9$(fIB9lRAF0;09gEx^@WU=%@?Phs8%%d=ZcMa z6HsDN)M=SeCRW@TcgkUVpQv4M<)V9|K;T<3Mx82q#V0rBEI0%2@rixF4~e zDFKfjJs;QJzHRY|W{ex`m==if>)iXEA%MC2o=9`Dpwh(et&i2O;$;%y@wdR9-rKDQ zr4d#92y)Mt?&Nr(cM-yBc$+61N11cyp7IvU??l);wk|r@uL)#)*ZW&p97h}xC0ZtU zlMf7eJ~*6e$(Pj%?V1J^QmrSct8}84$%aFsq^Z2SXoe<{BSywaBHJS$GvUr4rUXWR zv=|EAQ(aE~Ta+@tRrZ5(1kS3#j-H&u0hpM&D)Mlc=ke!j9~>IM$-5itH`wo$e3_}O zVmGVug}v{wy+%hRWv8oAEVVlxw@Kbv3_Nm<%t?S-;MHO-DX9ca*v8Lk}?FnR`(4ZSGW2ci9*Z@C0e}EtfcxD35O!OK`6x)Qzv*#OPW}0u=#X+ zefZIfalOn)JeZy;&%?=FnH?`h9o`P~l!|JAhayg0~fJkN zF7!dpcYzm;3~WAQi)@_;%+ZGl3g3aGQp|CEJP4{0Zge;lrDH<3x43NNQg|vr9bX^x zN)bLfkER@J=c|`)3R|#=LPNjchb!5P2!>7Xnv-+7<2H$Szc-ER%}2U z#Q-1O&ExmQ7vf9mS;Wg94t*wLtxStE`MbgK0bWFfMq-=HOEpZsBVDtkFQCUDaC&9u zH@CX6eXFZpYc)lf*{8sLV7_>d5)drU;RFCHu~G+UWkuNnj27(1&if0G{)i;P-cwnG zEB%5Vn;K{}DkCEZ#K+Qqk=$T17#D?|0#A!N!SCgZ|a&dr=r zXcsE?R?fyX?T&aGTK#bAvSy>gkYO^9lJDGYY>+gZ@yV3aZ-GQJ3|a?lL!c7>Pu7&!gm>C zTtT0=FkP3j7))@xU+zJFND3mH4_hPNSW3-8$dS17(I*M|!xT(Ob>IpXmlyA^-HFR- z1?B}u2I1^(?!Or04t0tADnj6>5b+y`l`^hY%B=#V1k4~xZ}wOfD5LI5+(3Cs()7dB zo;o}K(p22U;_#<~-fn*C&V(2fam4~19_Gn~c<>>*vf-tp>51ou`-2_k5l>x1arMDD zp-KPR8i`eJ=na4qF%UV>;%|6$$$x*G-`{=i?sM@t{6k8TIL)@3$^h_;QJqRneX@h? zjiWrJonm-=?aNjKvYgK<=&_RyVb1Q}$O2#ryn@v{(v1Wxpvet0lJw4a-f@y~; zcsMM!{VuTPAe{0hOSo4^PA@ifhFko>kJ=unv~+BZ&azJ%OSMn1Xyehc{4@4-R&bqk zx~ecwnN#JZsz*qT;?&FtfSZUMWc|YRonmED@mYmU%CW{EsORko91P?l^AM?d9viOb zs<6@-eGmg~C4R$>3F6*Zi@%=x$d_$1d0d6HPQ&U>K}?_3C=X{lS*l6M2^1M+gjR(< zJ!I82wed7LDU{i>h0w~S893FMHXfXeT4vz26OB=_%*WKcDhxJP4RxGnY6xaJZ}d;7 zcp`sQuq1NfuRLT_V?2dsBn%@!iXNS`u{L=EXmFq)_TZM=z`k*pv4 z;T4ZxkdSx+q8CWlAcJhM6Y-cL`a$j#h$iv4;tbCDlofgCg6o}+Al#V1>X>g(&1vTk3Rb3 zS&xPiChp$2o-zt;42e>zY{D8!cM&Kr*!rxT4eB6NJneGcXZE(ne}0r*bhgLo6DK9_ z?}`pI9vXxy&sU6iG~UZa(+cUci7+_+SV~R>H;RvIDme+~@vJ@m#oyaBj00HKjZ=WJqig^&{@-i@yMbF<|Fv+|l8(}dK( z?4vOh^QCf}#UOv@jx8buh%wIbp~z!zr9ePC`kIJ9aaIA6K%TS2@tkKr@bmA(Q637) zP*rkT>B5)-W_(H+p=Hwk!9_b$;QXM6TAAyNFJLCWTMX)w#b+QV;FcJ{?XfUCmjzT1Pkx76zvt+Arnza36LDk{)Dz=auA;krJ zVvKv~qh`H~rN^?{N&9ShVaXF~wre@Hc7;JH8U&$>SMv~hm^RovP}s`)v`)t<806DN zK+TD9Jq6#!R78jH*@pcPHwtz{L_Zj0;ayV)em`^+yHgt?y_qa~U4IJ-QmkVOB?5<<3ZD|;Q!1)LFo z_O`Q>&0Ojsi}PL=xvwmX*+<0~-y{{ozs=3B6aX*0RcHok#gBZO8TV5NLaSl+6$D99 zOQvx|3dakLUISV$i`^ab3@I!hy2l zue0DeA=sJt-U z_!^TUx3juhrO2f}7Rwh;#lz#XtpoJ<39r6s89=^!9_mczDc5MLQ>#Ndp1XWppo;4l ziRNGAS*wZf#o}pL2L8-4Fe-mu?LQdoq~ikTOsTj@wSzRXIiA*+&3h7k49SKd+Sg7g zxIzM|P`Va@mlt=x>=`qPk}r+JmZM+6@>m13iQs|xrML~{ct)W`0M68D6S$W*s}3O}p-LhrTPDjOHGn`;Vhe3Hg@yzeu)slU^r zYK2siNZuUUn**lt->b zmWa%S#`?vl{smLaZDs!JVzIYirLSkqmWHCm_8@&=OaLZGimWu66+MPxc`+`|!Stw2 zdTFUIuExEbA=Up$X{-qm<}|rM*OYovLPpTu%mKB;L=BI0{4f|q%0(txP)1eZ% zOESmiu*bG&ll>-jvdOzR1oj{h2erdcwbxypIpjYC7=mW9h-APe488ykA z=`cJc&9}@QiQe$Pp5W0iV~UdZ#wc*Iei9T}UyV)8F~Mn7;_!d?_*ed|?dY^a8Ob0~ zA<9dVFUJ?7cCrCX7&JYpIcDx@C&^si?U`60&;^DPj6k;&`WnQ;IciY2Ek@BlN+q=u zSuw?*gUL>kL79}eC?19Tg|pPV1yc_IPcQ=KSEzIV@Z4PhlRYNNp0gR)GuoQ^s7!)ZT<>aKAr(dZA>$t3UY!n>P%_e2ywXqC|X47NK#8fXoef)hd%Q~La@`wtuZ zvUo^3s(>NU{i@B{p{P&A^+yqlMrjC-}WQ%j& z89#uBHqG&?VY8ca@H34H_biqFBVhDMNKsgx<=a!mcOPBxq4v1XQelbCRV9JlV~#4a6!npWmYccJSTJ zrLC9(l5j-hp?Dbf>F;}K*T{K|?SMwX5!bp+IiY7bC<>*P0<$n?Bs^lNBh!D-07(`& zIE?Y_n9+MXB&e3CKA5C&q6Apz^b1e|FH4D^_8!EfP=;etx6mhWka}Gd6-P~sOOcUr zDViZ^oXOmBhoBr~z73jEZ9pJ7j`Ypt*eB^DueJZ^k1i)+Dl zwZIHT88{E)*_Rh@yy_wDN#+Nc@q^ZCWQ#N_m6aMKzM0|{>|1>K;WP1IbtUqHK`Oe= z>AziyA3B|=#5n0tEiB5o3cj;Z22seLhzU|^C-xjV^rx3Uh!-Oxidoda?=d5Rq7q)B zu)bY-m^~?#sNv4WN^#FbiaIQ?y^3(8de}-gykag^n6WS=Rk&lwrOH3XdQG{+tz^*| zUe{u(v}1v+t#TYJ4oaqL3k`P#|NR^tTO7yLkB-Jy%inZaAK3OuyP4h{3z)Zl9KlsA zXyj%r9`cDma4Fd?F9?T8VdI8Ck7dFsMn@rkLTVIg-c{xDv5NvSO>*`7FsSd9bDRJq zR7}-kt@PtLLC#i-1ur>%sTBvsAAO=0t;S9a3KVqK2^Xti;gu-I2 zPtOt)rLGi3U7~G7svZsWlGLYgNP4+&kRRodz90`@VYn3-7Rr42QDH^mYfE9&+gK1E#HXTaP#Z&stt9}rU0}#mm zJl8UF{c5a;wslN|xmq-^xg=vjR3fEnB7{Bs6P5i^r>0HjpIdCSESzy_Fazz~7%V2u zE5U8I7h(ayVR(dW&O@Q1;>Wqor5LRG1Qe?ex^t3Cl9#*LNCePwpdyxeQE4XW7-`xP zZaZ@oP}Qm`pg~RmX~ftsFFu!5{RNh)yE2-EdUHw7Xnhp@ct9{duJCabvAA#`5DEm2 zSpy~Tv#0`kPCWRas476I23a!ijD!x$8(@iOE+UK9@riOg?nnDAR62~Z)z{loiA;fZ+ye_(RNiUjOr&5#U}Z#CXHdi;5$mw92SH- z@F`*(H7+oQgA;A9LY%3U9MY#QW;3_3QAa^Tjy?-l6i#8jhIuP>!crSUBck1XuG0fm zck$}>KtRwd&;&;csCZKsmy22O-LT~0Zc2=Bh{kpt9e8kHrv;5iyNa>j>K_*MznMxi z5xPfU*oP7YsA@7$q+r;RhJ`Qe9NK?ZD7mF70T3hvwF2&_$*jxdUm zzcE&lU@Q=(5~2v_sk10^zz8S~QD|fI3TnjP*d;}m-DW$~{76jSzY5|uW72QE1-eF^ z1s>R`9bngvAlN2ffC2cwU4F;Qft2YV{^q~Zb#r;)2jaA=5#hPfz#KRR zfY&+KOw?t$SxRu|_T1`HumlJDa8wfi_DJw49w|JBXKCdS7xmtuegmvtY+I%534#l$ zJ=7JIvoR5Y(eBV4XcAsqLyu&ceI8}^SMQnlTKrk+l5&`|FREkMn+JZS(r zVBf2*I;P}czPP!byffw6P>w5d)}uwW1!gi6gnGMwhDjaFt8(!v{4gyDvD8}42wE)k zqOR_MYUSVzep3&+c$B&x6uL{V$(;^cGp@L6pKjSfqq&8$kn=3DDL<0Z>A5h$*8eTH zsDK?I!tN~lw7~gNEFUC#Q87rHfs+OW6ojj2KjjDT3DduGsn0~970gnTy%GviqoiK> zypK?`+QF{3GekC3@kc-~5-{>K9&${ZJr)ip+d(gig(d9}y+!=D@Q-T2y(o2x%#o(Sl_KjScZol`RyP4Z=feumOk$Z2~fML7`3fAU{&6Vp^; zS^(ZK&Q1Nwc}N5tng%t1xh4%5kuu@Wg<+xH}>1_=3nK@iUxg=2Nw>589wb zU@Md(Ljs{_$keK#?n%`-_ed;rM0{ysD;R`h_s?UHs$H!%1o0c*fSeZ{MONZ_&i|L| zz&!hv^5n~l=dOCdbC>}o@4LlzPXFiNihrcIYZrxy*W!gKvJ9A?3aZABPyX=l|D7fG zYU}C%upp%MZn5W~POS~h9^)HNn&56cbO#RfLnY1{upnkd(OY@`;vLUE7Yk2*lO#9hib$Z0&2fR2SB$A_<(xZV$}thAZG2|~fE34UYa0vQRS%$0%(^Ju;m z1=U;+O`G;yX=*;>zQ<#Xko1hCc^TD&rP(h&z$r@88RwWa>da}m(A)hOHY(8HZ$19G zUrzL}whdfk#e-VglVX(N9;BeC-3D8?ib{djnBJmQ*9Mgx;c1Kq_$3P%VhWr>l6Y~c z`OmWmZ3LB132nuA?rg01O46qAi_Zi>mPSQ0YN?@~CXh)_jI%iQGI|Py2)*D}p3TZ& zBaX!w25Dg>;f3@0mLqJvMsHCdO!81rU z1T*%J(t2W$DlU3Y=E&~#?jz3&)hXVV8IMAiVVuU; zfj9reODQwj7_f%2i3>^RP2B3rV$h_;n|~bJ@AFsuQW>>rY;pkPE>abjQf_<9bBp%^ zv%Z96?}@$*c_S|9&^n!--FpgGZ&=m72W5C{(=P54KA!sH=Tr?lsX)#-N@u6X3C5jyF{6}8r5#fx& zr3a~|a7vJD=!GY%({)~3&Qzg;GN@)_k4J+IZ$)O_C6~vj&tL$*b*q-wg;(WstHudy zGE6AMlTRQ2)HXFJ8+kQ?RAA;jH3idy1%59*pC3G9az*Zct)T)J-@^mp#~_UgBdcnL zf7?eQdYZ&@TC_ZU4?Qy8#RV~KtkNQ`TgCJCyO)P@BoZ zVxz}fkUV_C@bVX`SVvsQUu>Q$KYhN~fyV7SkxI8XW)pO#_@J~BNTI|G0Me0Q$ ziR>3Gg9su@;vFd)w+?L%MV$rn%NAx#Y;?rIaJM{gy(`QRyhubg{(N-xOThv>B9+4r zKY7IekBiuDaS;aObuXZNq6t_bwBRYUk^_}_X>vdTl^pW^aJcVw461%g(95^C&N9_? zBH@myTw?8NBIdJ}lFy@J=x^gfzZb*5{@S%zvsqq9UQ%w#ItvoBlIq4_PM}sX z*&}8=EUN9XI-RjNhJ$Bg3_srJuk-$+xnxFI98^K=}V| zX^6RaQk$#s_me-k{PURL<;CQ9QmD)=6rJ&}a{a|i@0OY)(!8Gb@l7FqaruXLV=Y07 z4TneNEf~za$Mop@kBFhd8;30mGEWiYc#K~?DdNJ^&R&RL*{83YtnaoWY(8CD*?nQ$ z%VE4Ybt}ej(QQcv}_Zyy&-`0vpTC)%F3Lltcb$(Hz)Z@l0UN`JkV zo*d#&>;A24^B6AyI?T&$Bk=J*x6!w+7*d8-7cX>yRhlP^CXq?>Tc zdg*nz#V>tY8Th?qtJGo00h?+4^J^wHwxa?iRQodNx^}c!03oBm?&n)b1&9qR0v9~! zKHJwU-{R%n62$cou9pEQbHxib<#1JZ#vFX+Spq}LWx4XT7}QxUk}MzdwA@9KXS1Dq z9uc&U+%D5eai7BVd>rhh@4TzB*Mg6}1K>+9%@Qs42w{eQIW2;J z%nR}_`965$V3OP*1La){lv3W%iq^sc@df6$G7oBPHHuiONRgS1m_~XE2n*)loM68&&$0K^ z7l@aY&4#$8Ih-FgPlcEu6V)CI*8n^4n3T_mMpAe;8kr>0behc_kG|0m@s`=t-kptK zk`_mLc|LJk$wHjwo1g^5^+GJH-HE|6yFBd>>Zs5}k+P@)Dw8q{{;p6%(P{is#9w2G z^a3r;4HCG18V|=gmq-+{bz;*^&-NTM6{VUL1&QO}Wh1iH@zN9;L-V2(68&mT!h*Qy zS?W|&7ll$lWsB0IB@dh@|HtJIOdhpvKd~R>n~%R2Q|lr5PRhAUERo8{!id|Of0Fa}c-3_GN9GOzP0K#a)z)mxzSVAz;6%R%7gY(|e z!A1a-LT`HZYq1Oz@0vaE7XW4#f^83v3X)MKNv#Tb)<0x{DihpV-i(*rnhOf5vQ5RM z-CB%aWVTp(sB&{AlB)ESM|_@9Jrz4;>Or;3i!Z+w%dd0+xr}n>u`IpJ=S0Z@vDzPd zky3e3EF~UDf>05F$;NM951=EvK4nd^XS88<08=S{qvM-oJ~jDHqODkfG^%o z(yMWCGG>RMiI3!I34CeFZ=)cqc$FznZbD?fH~?-34Q5Z#Jb?&Vaw#Q5bnmA z6(RwV_bVl$nmi!&pH8IPu1f0Khyw2(4^cB=`}_qeB?fHLZ`_V%Pw+iJy&qLs7 z%*o&02r|4?`0wPFwcO#pml;bVv6A#kkC!SkkZcL! z9I%;xn)iyMpTTQ>@)-Eu0ovhA)PO)oK_v8ON!I59BQK>_GKZA z1Y=Q=cZTBLAQT13k$AxjpA;Vf!i2Sr>(s&bxOkH`2ZXlhDwLv{-C5RZoL|CQ zxYwRg@(&NPGuhfZK?!xeEIvmTkzjqW=SKBxED5M++23J}jTP7K!WCB(l~WZnN|KYl z|7yttR{%D61s!vj6W!Yx6T1f@CD`Ei_~#c-t^0DhdWvxhd6jS&PCz%a$g_tNYlQT- zAZ%7Zs|64)7`jo{-+TfJmNgvej<-_Oi#s9od2d^g!Bg(*0m;9O;_ zd-PfsIbMCh>_0JiuM9#n{7ZmXhFtFq0{rbam&7@y zb&$G6ax)DehI&U2hweRYm5lZocX{#PDEXH}$QTZAJ=UTOM{z|} zzVaN|k@11LW{1Uty3iFTj?qcOV+R!;T(PGbql@GLj-)C9T9f%1IYZ(T2cx+IKv9to z#C~{(S6jLoOL4%5yA#Gz)_>6EZ-r(G}g2>3_P#6bzRJ@3I#368(O83o7 zkOi!42BpA>3(5Bn(_p#!sGwLTat6i%P_dbSV@Pe_O7T)0;nG-Y$h3Fd(!FB3*wbl# zW1FWt1FpcP@aD&e+COHO)i47%|13A~?%YbepV7o>Lcmrt-;-NYk%jn(`lO&-KkDRfV$KYjpO7fV3y9l5uf|VC|}9?0w3!%DC)rpjiHDE;f;*(O+F6Nkbf97Q)bL@(H9inA5BZ1 zdVETb=@0kfrWS_&aZr~CXTtUInZ--yHWp0})og-PX;5Ppqj|-pn@d~43wTlf1jhTh znX9Rq6n8ZyV$uL3et6TCd3p?QvFX!h@>6xnG4=3zNuJu*L81De#bhRwA`GVJ&l;H; zL;Qr2ZFsDF&$S)6%Bjx8F;*t&9zm5iS?9XiY5rn-G?d=$smWYa1S;O!3WNurDvE@2 zZ-+fxEo};`Si!^q^wX5-joO$t-ETe{WbZIiw(^l#wa^MN;hlTlL5h!e7N_A}63pJP ztuI#4^z!2M5(4iEep~Tx=mh+4{dS-QOT6a1Z<+T4YU{;<%GhT?<0I-TXL?as$1+?C zJ8Tc}=M@or)(Ub+?-rco@R!x^p9STZ%LjeH^T zOoVxP=NPbwN#45#s zxoU<$Jxr>Rs*+)lt@qcLf||roe(9NP_bulz`tFzQEDVS8_A~C+d*F8Qn6+~r80>^{ z{ZwS8Ly%aVI+|=c}FZYFLRd^MBv3f72 zhMe~gVQTz~JINn+vb1i~4NMjGexAoOfIzORVGkV86*xNXPm1W>RNn>Ekf6bX6ndC& zY}Pbr2myo1H-UQ9NR>JnHtBs__#oqI)p~d;q%Xb-h4x#vh+&^ zME||Gixc+%fURFL?TcV4k+2k$-OS*ZDyEc#!#=nK-?1`CagF$1GQmhC+VeJSfxww< zcr@OxCOI84>XC}oG2BV@8cQbQc~8FeIB_D>#VcAY7;~JON!cg=>GE%5=1^H`Oc0&w zOR2>ZWHd9QCS&b5j=1M8qjwX(Ph24$w7PVA%kP+1c9%Eow9u>5I$nA;yU$R#J!tqI zNu+5%;j>VJP%Oe(Mn%XvcTooRdkXLQmdYFtJ3qW%qkb(@U{H+VJtWmn3R`conLL4J zmDW*ownp*yLy_c2CRCr)iI8-(( ztL!0)KBbnYaOEX&LYebHyZ2H$%N7VVpz{MG^IPAJSVw&Dxu%p$D>fNYi)(Y^p%H*F zEQM7#F3??ZNJwg$k=X8NWT$1L+;p+1C*OJem+|H&-}(D*nq>f@_jY%HXUBTdvDn28 zQB7X)D`;-ieCYn>s1=5&RnVs0XNwo<;_+VwG4a3Yyo~j}{?f~_ruZCir^F99K;xE1 zy`7SCaW*(TP}ITeuN0C=3i_*8ujYDe_$f%DDv!ZB7VnO{z4oBdW%TtJ$s4s4dZGrP zd|a?l>qbq0rGRWrP8D!vm=!QjK?qqA(rlEtZYhq2YSYeXlSJqkuN2j}@hhS3dFEZ! zg;=>(rrdV}!O8Q12H^L2SX>EDIVg;$vd~EzvM}b0FZzMxy5_EF#issqu#|v+@i+DM zzh!FVEtio=F;h~@uX5XVQc(H@j+B^}OSSG2VlDM8C3`KKtSmSYc0Ltph zQn96tVBT0;jnT4EOTrHBwh2bCb)}QxFkO&J4^OoZsnMFOn4-6bS*8=Ph6Ao$weBR( z<;BB^*K`g=d{@f2`GU%%YTs+ zIa_n1YOp3gF1x}H5-BkyIuh2jV~PQFJ>O$w)NA2g@kzPp_6@;cMo}Tqu!=mxJq&Ri zPqBmEt+A@+tD%!ZOo1{@%x{>19KwRmY+OSLjHlf4?jFK)H5CB2EZV!sLKrql?0wg9B&&N`VU`{;$c9u;jdKv4Sg zHqMEI4s~yN6NZT zNXF<)jP)6`u2j2z$#K}jK;l^zj=BJO3(uEF&a1|q2i&qiir~I-_mkkV^`(uVc=OCA zmBGE|z9cPzG~(FPxYzm_=hd>JTf{Tq0Jdp7{H0^l+M?p0&lbmJ~1s2C}~EW z9+jkU%6+JHp7c7)Wmctib8uGO`rL-+ExVf%y|KOxzG#|Fi@oQ@1o{nj)=PF1ITEY9 zSiLhh@L|LcSD(*v4@%%R15le6*P2PNqp*xmC%pBnCsNUqAwodvlO%6Ed~@d@{=BoX z+-r3Y zZ1K~%g^nw^@IBHfi`uk|-c*-DY$Cq$$no`%*?~CnD{!Lj&RWFsV|nQJ)g&oc4Sxz6?J4vD^41Q5%`bTVyFJX)o|xI$G*)U3}IIh*Q8)jc|= zUwr@KQ~$g&s@N4z;|O@F#2R(WEflEk6z9}?$$iZ|Q7cYQTi=>)y8vuQ9(qJ3+%up7tJj>G8rBvmWWx zRa)t-*th3GFClhH&`a4yj4;*Gxvg9NskI;|=P&Ed2(|NK6qM;C{1D>#?xQQ-k{U5N z86L>A7Eh8!WfVw2^XqQl39R0Bk`!l(S>ZRRa&cICZj4XafAOSd)BNvt$PwuL+v;hd zh7o=%$vX3JYv31yXYY2zKLtO{oBz`)+ME$pZ}CPh(X`b17n&*K~zv;%CfT)SD0`nrr+*#Vl;6 zpd((|Z(n}<@wekjxj;;Y#vFy`hgthzegY837jpbHKUDDQ)?$!mV|W&{AgxUonWg|7 z;)a`RF+QPhs*Tn{cX;h$getog*9szXxEBxP*%rqtQ5%=tXLHySrLI+;zM5NCT+JJA z#N(8Op8#0oTUy`p|HPb#nyOy5Kkg1^x_WW`suf!v(3ck<4-2+oxdSPb+oeXt2re() zdh4y2_h9EBM?f8})bgMF`Q^7`qR@k4nejAjTg0?~#Br1;uP%qy!MC;6{w7xKq?gL# z_KcfX{_J$h@dDvTP0VwE?=DAj?Rxsp1~uEIDey^=b#$w=Qt`Jh`Ekm|m}F>zs+MFn z;(f7drX=335M*p?l2^JGIYGlA>3OBP?BTE!zsDKaj;WwfGg!YDEO~3i{N3VHtV~>q zpOT6^u{30me&4?UlP~kR`;E8?1v2ko9>tfbL=e#MxulmDzm>~=^|@zXhy_LO)In3` zuL{Zp`l|Xio(8I7DIeV}8LbOiF+Xl=2q&4|w_D>sKfbq3Mcb$HrZKU-7p407Y1)BM<5l7kZLn!<~9cpyto3%0oe5ovT;YlbDO5?H-p}N z2I|4Jo=9HMUTa2B0cnWHqMxIPjS9R(0W9H0OskzCHci?uIMMtQ1?UgY%gq8)3f2)? zj+|L4Hxufp2_7!0ae9=L8-}@S!d-tkllbx2uEu0Rd1XN-6vvmXceFFQRd+{-dXQZq z2;aV3PCw4{R?H^esw{^Tra~$jApm?OB{R6vqDa8c669zl!HIi=2VMX%tv0jQ76+hE zXx>QZ!X+LHFt_fj*Gq|*b{Lr_I0;)|ByHN>!-s_t2zuo{#K-?i&JvJ9p4*5v1W96> z9)0a@T!xITWulZI6B6Sj+i?vZJ~XKhduQiW=a~Z=RYOT#p@w3oDSFTv=EXBs(8uIi zFsCT*2b>Zq8gUKI28wgvD@PHvglfcuiaf;p?Qp$*Ju3|El|lp0K0zV4{8ezqRm@LM zeX)i)nxTr@DlVn%(JQgmgVDXB4PDMC`(xcDw#FODT8tG)ou5=Jpp=1 z-ko7F0zsrHZ`?^?5uUz?&C5arB8lt<9+W>A>(m|`wQ?b(&vKvKlhnn1nnK~ z^e!vSxp()|5CIzI%i?tRU{9$q9Q4>7q*raVcfOq~avFQxei| zRNM(Tv*J07;qPJ$TZ%Ww#MS=-&Il6z?(hC?+(PH-BoGVwFhoV|5xAgiXtN!24yeDBg5TOTUWcN>Ig8>V?9>TbL_>w8NB}bfCt4 zaz0;W@VxVB)n`vh!*LtOM2!qb@8SGe&{n5j2wr+p0BA}PZpL8{4dWz14Y${AUXw|d z&4+^{u3vj4UIPLO7gRaA=j0sMN1*#a4aFQ9;;kcl`U2{rpc>KmV7M(fQY26C+2*EbbV>L4#B| zQJsqZf+v?EA_eJ-n)OnwxKfCUdW(`$JBoGUwutV9miK2;uNTPP;1p0%7Fg@b3ivfk zN7e!E;8X5)3uwH2!*eJ32gT5gBsPxBi7*AXJQ(3nP zN@opC15B$S#;s{i8Qi2DyL2O#j&2h%o<|?VAX+H|lWGxqrsgZ%q3C328hDWU1#aGt z6y$o0P%@%~Dtm|fNVU_0qw-Xc5HY6f*Io(=-<}Z>A3kp#3NI;DF^{WGLjgV)EmExv z6JPMYR`e_$ti#jOy?#AyfYZ|~|Jpk%6zw15mYX?871s5MN4!HNe<%sHQ{Em^!Od7GXSTM?8+RI*m9IBeYw&S>JEN&l^4&hr z%^=X$@Fd>$8=sE>-NYph&(!^RUj@nR>U|T|t0&=k+&=Bk9Xu z_G{?!UDKmCZqF4aB3)yIA13Xx=RYa%p=F9^SoiI?E4W>f#iWtV9+bz8Mf#y_WO?x| z2|6ruU1wZ=;H=8VV%e0={&$`N!Af~s{RSy`)ir`=*=FPLEQ2FdgXKI467|kf!GYBZ zYtA+AVR5~z0#m34h8%ew&Yn|#@(wY=G;uewaQQcG^io|Jfy!{#@12lig@5w#&a z{Wo8Z%e2p=x5P+sy#3_z+wtq=f4Kbjmw$BmN2Vo#uP(OX0ZxPevxkNXYL#FxjzO3| zA1n7Q?g1~onzy7-!2$47Gj4NRyO{1(tKyEWf)HOg>+*nfsGsij#t#;vOH2z^BDeC;xKpfKHvr z9RSLPERh+ul~ITHHP()Xs&luuyvlE+l7mQDi=mCH)d{L-P~6||un9P#E%^jztPb&( z?37yhwd?7)rdps&c2T_P4Lko^LRKqLIb-n>C1mn3T6XhZuqsVja!L(+8b91#+KRn~ zv#CZr3Hepgw~EX%gRZx9_vR;zC%sr(REbkZ#FTO{+;HCmd#Z(d*EPZ~tCZrekUq@_Numxu&a6!&0ot9$R#QgU=7S7>4jw)Qw-U0mn2N{4 zU8H_Ydu&@rR%;0*9Z-+4IvApwVmSpN5+RMMOR|gi2FbqnyP6p>^G#4u=GM%`D1x=s zc;~c^(cP(FQ<9HZ(sxzfnt7uadbVqBoNZJARRMcmk*k$(6LMpgtuK-!89I#8@_ZlZ^;L;N9 z1r^H!_%BZky3R1$nBCg3<&kiKwWga2m-xl+b>b>EG#c0-T^ddMLyPq`;oJfAO2H>z zPZu`N`eZ=aYfJQ%fzX{XC{F*PhjpE7JP;+P5M7L$n>4`x;uoz{1SQFbz7?xgk-WH- zpz1^+F@Hu)nNdsLydFzGzvZixJ8cK{9Uv-(QK4v}Y{pEsi|cgjfVg`RJM%O6JU#@! zVB&fs5(gfTTU<^@Ksqfq(c@9!w8~P;pi4^C-81FvRfCfNL_h1#Fy)M33VH=Qnv~bC zr-$}8T8!`4CML3O590ZYX`W}yU8yW)h_+0sCGP8m%|%%H3z9~Zj~9nYi+QK^*|L3$ z7~m)!pwD`Xs|kiw<{14@v1JeDjn_=D%?4OAk+Duxw26)7Yj&}Ot;XCh?nfbeQ(9eK zeBp~Pc*F!YG9-a%T~rz-i{r~z_V@R5bSPIv&Pue)HRThLN`zDuRZs2WHu#;%3?P4n z?7LK`>B0HKcojKr^xB9|n8ff;(1^qDdU{!v^qWd2q%0QBHVv5V*;ps5HUdH9JX2UJ5SVrI+Fa&5?DJl=2xrHrXN$*5wZeZ5^l4Jb8n)j6Q zaZX*S;sJyPxDru29KNTS8>KDE)`M_ei@_?_x{fv0`T!4cjJyDk>*Ia=X7?^SX6#Ogx8@)%`N2o~QGSy9k* zXSKWrj>tTT`VRz^E*8-iF>Oz3317~B24#E44?|9V{;pRahTH+cYu1xW;u*8$5Tb!J=jzVT63PV!MwlbR9LIS z6?^o%3ZmPhCEagpb;WNXB*P(4k!&Y4Y4=;#u^PQNP5h~gGkh2HKtT1zCAWkOG2(Y- z+tx`)4k_no}>CtxgG^33v%%Wvi10?mBkUQ~WkcqBO;L~qEl$JpsAZeR?boiq=EdoX zhfvlv-U~{zUws133c##K4fh%ljyOC$AXhp!n*xq7hPM|1KcbVtD4e92oqoUx740bb zdvcIlXKp7&`be=R4{F~zNkrP0)?y(V;>1)V$Xf}45O1W3sV~;vl}yHEQIxNgtN#a= zrLYL~ZAw}RaPmnxy;Vt=H=3T1Uasxh>qIF?Y*R1K85>UTHcL)3*PFc+qd?nDOx13$ zNS`gTR$}@$$bTij$P^}bHxK7lPB~@K9h8a$G0bSK>l~OBw!_Kl+ShY|DD$eqjl9P1 zL~Lb#J2=hi@(?z!G5{4P5}NCW{iEk&#X+RT!*R_2@#S~CRMM6>JgOi6jKd*LfYtQ=A0&1GSjR)Z5G~LjRtG7l zZ-Hru_pi=o!KX2P442r%_1-CcJ7v@i2anmMU;kTa$}S5c%m zb5CIVe0)1|AV7>0U_t&({`{ZDL#R_5m!MhX?Ha9`pvGdoyXuv=e*NXRMTqp6z#chJ zZuGusa5aLrIpK?&SA3y8TQv|JK&fXmVNI)>q6Pqb5~3j$;B7pQT~|JzBplo1scPZ# z1fWk}kX+}Ueu~j6pkfBuJcS8jGKedRJX)kE_guHe@0^URL_8QM97QM{e!rS6s0R1a z&5vR*j5m`~^1UD^ak2`_-#w}(XONtva9OPV+D*^%CLs4-iQ$$Vz%Bp$R{?T<83$9j%QYEfT#mP~za!Br{v9Tksi&1C#2c_Y$rqucAx;yPSOR%Vn|R5Lr! z&Qm_G^!LilDX8j)Z)y?D1}*%MXCs=)q?BM*SU9Ng-0wZ-*+}h1jQI;dE%#Xnf1!k6 z!XI9IGM@!iBR3^CC#MKpnY88dnCKnZ?(qZl=8gj}@tLRonHdPwp>&~5#DLMkJZsRB zB}eqn9{(`jL?-2+c<-KBQ*DO;FDo?-@E%|PY-th1ZkxoLiR{D+JuMd3I zhMj!?&P%l7)?ikNQZT#LTSTt}G|j@qTNF%r{IeLy0#2O4J{L#|#wvqP|FLSXdv!qX zk$sW^_Q`*_{F5Na^2+ko`*EjsAe}OkuP!g{lqeD$511ju&%wmcUqID^&tfSi<2-$& zyo`>@vTb9kO44|5BG6|5Kgyy;MIPrU|??sW~=*hH_#l^Uc=;lV5)MOjs z6`$p45xIhfu;mWb?r<&gWsn)LO<_rTvLtooW+i91=(SVSR&p~|QNNVORbbfOL1bi* z39pRDJ6Adoinohg1w~7hjJOLtN6bWd86?RT8+3=xvEIYiUwYlN!uC`ytCs>i)8Vc< zu*94x5kZflV1A;a$}x_QybHfsdgr!5Smur;LoDms;lBAvJP>J)RERl#eKIUUYGasE zh_1RjS&KqCUhUjIUxmu9n)Q+t&O&)&_@riSrkl63zZ*7q>i)~OH*I-Dc`#XbMUf0XpH{0Z+f+?JgyfB#NS&JF)WU!F zd5`J_j;x`2ZN!sd2$be{GFnbctgZ_00%FTtvaS+1gA^iv)lE83f~1gT&AYYRO%Xs}xWMe_uk%jyD%Oe0}q>82^!a>EBFo>hx!&&>Mz zo3CAqSxpfje;c3ieN!2C#^c4>TY;HYf}f$kdL&jL6p-Q|@so?kpFaMT2SZ+*042%5 z==F{eefY_X!2pz7`&B$c3ifg82n<*t;h*_eC7*lvbOf3?->I9MDj|)ZuEa)ce*B+2 z;~QIipsWEUh`Ww8eiJdg;s6&Wt+*IA4Jlc=?|blBlpxxBTjoa;rY`q9L=q#7`!Q1wY^`KcH02zt@ECfCr_TdJejH6xV9uiE4f#KRIS@E zRUnYKo_bHt`g#Ln^XU}pPKZg}Bmy)t0W=kYaW>)SF&&wVCyU!bko(D7{03eG%K5~B zlez(lE#ATLKg%5=pQB*PL9&oy6fCrP+mnY{GDyutY8-5I#-m49f&;MofBodg{$@+( zeW!SRPz= zgKeH?S`tL1APPd#^C1>axsn)LdQ_>DIon(;G3dqPXG%$rKgOZW26-g%t;e;;7?&S! z^49dw$P3mHh%_g@*qrMmI#22mayO!4r=CQ6nxZa~NEG|GlVI&Fa}ZubHJ*zJat%{L z#DyC0!~P~dW7&Fn-s4;e^gZ{deVV--7s&y5=v6Olj|vjiycq0AWj0CYMn&uD0SBYW zYEhrJO1*KxO1<++&r__-f5D_yygM~3F?_4knaflf9A-e8Qn~``{;t<>NW+`9Bjs0c z2HQam+U{pC9%TstO{{y19^jN+2C@ z^=eQT35$t_{Z@nQ7?R8Skxw9AKVe_ufNrWcYvJ{IsRsp?kLO3efa>5nP(a;|&*`a` z%>_Vqz%Io4*{U&=j%~g(+^GfRw#TKvbnDL0^{tGlk*~v%jwAZ@VbTuRM>-jAF3!K< z*EQ5{yl#_x*1l%BNbFl{0si~PD0fw}fDqvf&PdU^$X8bWt;S8go z)=w$(ppxjVR7XskNM|10JA`jXr8G0Rm&xn`VwV8VCIfdW;v(8sqn2=g)yEJAnK&{W z4!tehdUA+z?*;9lu^7nlgn=V_T-DUbAR8q;V^wN)3?YvZ%n*a%h&^)EK+NqQm=$QeK%I!7oB~QYOXS`AX;@L7{}X>} zWuzQ@lb8yv<992%&j#j2JgHV6lRn;7hYc2rQMWQzRpwTD zdet#KhTlb9G!x<<=#q=P7_=kh9>qe}Cvr2P!718Bj;7AV@#LQCb>odbD}L;jO zT)lkF#|S+KIJyH~_~E#B%+WW|&pbcXXk^J=19u2@q~SOJiRR97oL9x@vI@8rqdEL% z-kMF{71+IWUJ9bC7>qib$D<838go0RZgd685;s8p`>YW1u60w%A5K-RfUEqfd&3=C z%E#!9PI)mt{qW-NpL}FGKn7L)@uCq6W;qgf`i0W!@OLouiZ~T5 zK`7;ZzL5kjKNW{ zaDefKtRaKBopRY9T>RMIJnfVA>mBCmVM4)7?C?12xHg|#`He%EZYaD?=(EV-;Eknu zB@>ub52#Sruu1+C$3n}s%jl?Ocl(AX1?>5ku1CJm<9}&!1ARZ%-9ThMYt-X;WRi2< zFR=+GIc?ipMUB`y*siOI6Qmk)8EkuT@EyuLE}^-c|EH4A|GR&UH{n=6OaX@x{C?0J zQ9$+bcuc;|>{zF`Apd^t4^eqyEoz9O`>+oBLoZ#>6Gd9QIBJ4?8qXtFa|XJbis#W$ z>G3q?=zt;1t2qMl*2F>i${*Tc1e@aw{?t|7rHv#nf9J_>3&Ss_0&2qxQ-~dh zxGe}E9C_c*uB@BDh>#OfwS9EQM>2(!99{I(4*bo8CmIIN_jAmZLC&78TL}Wc4Bh-~ ztXnNW1XGFallfa(EE3vG*9YPH51xFY{~PNi2OT!6Qzs^1KSlq%c%PN&(XY|-&MF!d zbB7Fhp@PsjgK>Kg;-_1Ae+i9}_6i+bvVsazfBo~4m{X_yjgJX#W%!CC zIgxR@%+`2OgqT$AJP5>!ZFFOx$sZh_0lXdOK_JR7E-DnB^T~uC07q0k@K>j&UyaFa zjcdJm_^>MoSZ;0VL&47RJVEQF_!)3iDf3MTopu>!bNNo?Z}j#b4j=M)OrzN~W<9wc zaJ(UUVtA~Zzw?(jq{|l&EH(#yLM8?8GV(jV%{hA2vJgW9)opjOB8@KGeyY@pBNxqF zl?UVP4Soo4){+7ctPqsNW1%=dKEUJRV;=xV9i*0yud#G$$2iFbs=?z30_v{>8{T~s zuje5Bl7FAXX}O01{)u9twD{nK7p{6b_xt6t@2nsA-p3pHIZM(hx(p`Drf{~?_4C^jO$=gawqE`_?H<1sZ3cLEWVB*XfG;iqQObG_Y99-#A7@Cr&EX#?^2 z;3&(yvIpPgUpCnyB9Sk|PxWKJ{0m<&74SM)nb?Hg|eI5t)w0l{g}km)YME2jV9Vp{|_xp z^2)&#G2S%;3A6x;IfmFHIGSq&%!ay>qrCIZK?xOdGUUQiH>F@QdJ%j|2l;i3E1yRE ztVDdj<9HufQf>z4F$9Av=UrxK3(08+2(l)DPK6D)f&7XkDy{pFfzDM0ui`A? z$Sdt2$%@(|!5s6&ru}LR4XKcN{$!Z@j8OCWqDJ^j!G?Q->SfcEPnsq+=SrQ=1OYgO^1Y zMTq-L(^Idx%QTGm~4pDI>?u+k5od=N)UQ^ez9E@@!} zRu=COp2kzc%Sws3q9L+ds5(6Fxm*XzV7lmn>lD?>UqHe?b@6@g;q%R5mdd zhI%pS3YY0zkQv;jmu;fGI#Ap8ztalENr{|~yheq;gx`y1Cxohr!em?hG|y{!A}@#x z#FS!KVdb0JVbuXKK`s@N#wkH$;^|Aiwin^lR$C+pa(mGS4N^0irl-d$Z<3mgBJTlc zgxR&B^Tu&llqMZtK}gH?^a_=wt2!k9%fWD$(AnW&A3W|GBqf(XagiLio$Wvb@ssxg`e!MK5%}-N`^dwW(E*?M3Lx4*P zmwzza&0+lX*>CyF!h#oOLTA@yecEt zUdOno3oJoNZ4CzFExdPxl=i?Y!%{T>BMGgv0b2cq(Fx_%eRS}m0=+2J19!O0z2~<| z179>}ltS}3%GiV-8*7MmxNHY%K3@iUg z^+9X!6c%@su48JSk$krOVqBoVr?y?aUSo-i!p6lf@~NjG1xzP(QE7%18bY2fW!cQ? z9CJZ;>s11!kWeM;xC6w5Yc9yA?Cj>?kE4g$_};~bamduwA%12iMFf!QpPY_zTU|O` z25xR2X28tnO*@dHMHj~os_F?`+0^$S?Pbk|kVjhu)~Q=*urn7#{}+b{W0|rcOj3HF zvh(Cahdje{FzsYXg8PWC;>t=CFxF&zW#1ib=zv;;7Wr0(Su=%h-@kbJ-%KY!B%vHj*7m-e6UdSSkM8p9$M;x<`I&H+>)rxfHYf*Eg+7A+MDa28)%q&dRAjPD#7ia<79DxK>L%4VQjOjO)v@ zd2I_~Vc81Kl;hQUbjB1pf8!zy$jg*q3=ZEwSTxhQuD4Yn=AoD+(Hx|CtPSC+o5e5P zIX_kXs_M`Yw7`r#H#i}D;WsF6zH4FOV|Zy?x&u6I^5Xr|O6LaX*3ud@Ei| zG-<-iMX;}e>QL6xy^9kX^5pxdpI+e@9gK(v{IW#Jm`?N9{wn4bgu=cvt?O3P!ZS0iGa-~+Pqf2!VZZ~818#Q>kU9p$K8d;mU``Fy(^Ho`&F-`+em~>()s^1$kt8!OtlsO%PP#Hxr z?{B^oY;|YCoqv6pLoZ+5(dt7SU>-xfffK3te>@6-Y&Tq!zsXF-C_!&q_ z*@{+vf8SnXP8>ovL$4TbMVbO+9GrCNxE2wSq2+X>r5me36!v*gGO^V1Po+*FO-z&| zvHkM`C?-R(3mV{@FjJ^!SYdX0T3`jlCNHlCoxk`@7MYi;1wmm52!LUaM-rWx3}4D& zQMr|yB&8#-ND{Ig`9l^~T+6a>&9RmyE0e1?`bB{{L~j07&iJKL>!P_%tNy*(e<$|p z>7QTx#FWNPfcca-8J`2#%RnmSYN` zgi@>`_eCzne|Kpd&|mi-{bv%9XO!>5J?u(6Z@pW>^du9-4iU32ohEk_yz)m!Lyt@) zlvJf~z@ya=+6n4WCo;bE^gliMZu|^CO&0$X!j)5Gk`WYaaF3vUgZalVX9C=B{Hqy7 z)mGKdG0TgOF8(eSLUv318AYt@B(7#l4ERbokfN(2*b1(IAPMd`-!GL7B6^YLY z+hscNrxq6qde67&6~VWc1yYg;2iz+0qzWp#fpW{J`kO-F<{&^m#;P!UGs1lLGj~4| z!+=GGb8r4D6FYViyUWP8t*WH=k3LlzwdpEdL_g*xnRAf3z&1$?saPpbq`Ch9R2-dKv}aWPZr6dqcboaC{lih= z$(~9DD2v+T&9*Y;8I{GVn(bK=TBo__5EalM6d7!}zShJ%Sm<~_;oO2(3x9xo1;x*# zy0mH4ONposYphTrs%EP#lvJx*Pd_p?it_(|5EMTiXS^1bvH%-S$Ed(NB_7_6DsZIO zAQ8SyW|q37emOJ2$K|VNi&_CFVyQ+fZ7!{?`s2$~=#;?RIDhfvXJ!&`C4E4I0nB`faDtpSk3I89$E7{j@KNgd5tZv+Z0KFPi#rK^*lo-zRR0lnwUifRtc zoyKACSJ6LlWw9sy<8p3<_7@Z!wu=>`N{t`J3Q(n>jyGmqX^ydig|*3SdhA;a`LjdN!p$h=_tA%vlLUV( z@}+AeguBH1h-pnrsAOdPAj>ho{bBLJA#C#y%k+!Q;uO7iQBp|XpzNo6Xy5J^4skVp zzg~PrRM3Z7>-YSL8TAuE#%<5j@hE=z16>>*3tv5al*K^kjTlU`nKf}KIPzXl4`gn{ z#myPu6UG#(kggCXko(d1{(BeSi=}s2HSyFYQ1+M`Gkq|iYh+MzVQC|Fkc(YOd4BS1 zc#BN1shO99vsTs?dzt78AZnP)dGt&?L$~Se0!1PP%mY(102RRG=^C|_Kag~>Pj!zN zOUV?Uk7!N9dRVS_=jE~x31Fdf`};QuIqEJ#0V6%?B@X_#EYb{!$B3aSh6*Xcg)X8r86(Brh_}8dB2=fZ0fp_+M9qKTg`@1GMf0b4D{k>Pkt@jB!Z|8j z;$Qn)V9ZQkT>nK*w&I@>y?&lT;?>LCm0JsnDSxkABnrrJX%_IrGmpM#5reM@E= z<$S@9+%Pd9&iiU)LqDpW>|`}>L`H+^#-hg+?s?Wvrk60aFQ2S>JeppTTuO(&z-2Ua z5uYH7I#5}o2HF>t1vvjV@iSwl2tmI*OQCo5%GG#gcu=tnYRqgqRCx?}5-%Pf`1&$Jx8DyCE zMFC!IpbEckc2|J!AOP;nUk8KCA7mB;sCs5U*i9GM-M8M1$EIg8s|z@6N#IluU}R#j zC*isAR*;JV*oggdqlvHjC16LH(ZizS%c&%Mw#l+|deUc39-SCZPHFm@A7tZ2o}eF# zolu6`6*QUO3I8VXNE!H14m(qZ<-DTO6qK3-cXR|d+sWRLKP<)|z!EcQ*?iMM5lD>2 znB0f+xGWY38K~Z3AaW+DmnOiVU)5EhGw5k^NQKRKFFK;1gl>B^<&8FmX)F%U-Wini z{}E>d>Nqxno;+de&7-?{6Mz>6N4umBMLl`U+h@8^aJUfCQsBAG7p@FHG1D>0LJQ29j-{MAwT`Z$GQGkg#;`fXH3MeyO z1!z9mw8D4=wHdF5V3t4W$@iL9>3czmOJ`I$7^IibOCi3~JhiP(_I9~YTRg|~ns){@ zuJ2Fc-DEkZCZKVOI8#sV$`7siZMq(JTNorL)AcFw3|YNm-N7BF_cBon^{Qc2s$SQs zFToKpDbi093#s zI(|cZ@S@n|rypE=-?Ie2>QYb7QV`D{H7{^{OPW;dj@=qI^!pdzw~u<3=DAJ$HBng(;7Jl~ z@o3vb>+oE%Yl2J|+5gwYhe6?Ys!3172^=h3sPS%mehFP7L8HVW2M3It5r(e9Z3w^7 z+a?)yZAIYUr>98_u3pt9CTR>Or7y2As~#Wq8bZYv@oSbttJ3-a;k;|(Ru8D0LCH!w z(qwM^7rvY}RBQO4Zq4yOK~edDa(Td`@nr{5@lnw3x%Wx zzEEJTau3yU!6U8IuEu2hH{Vcv4EV(0+OOP=SIRCRR&2VAwq&-l7;liW{9Ni+ibJMi zb04P9LK|B=vVwSnP)+X=>g#>GygIe39g4yVSZU}^CL@*5&n8Mf%q}mNOI4_PoXA9h zVwG(~gaQ4V;m@tugJx^k5u#fFg zWHv-Cj*4xPPpMbrg{}~PkNIM7R9TPG_rdzV$&GaJ*b`PlE`XQXXoWPB3=usjWftTI zOo_CyIJ;okta4V|#7Q&wy%;_M^Rh2q^&6$y1I~y0K>?-P7A5GW1C?k*UCRw09+0QC z7??oqHM2{=w_`BUek)lhHuSMt)A21Q-&Wzid8|FJ4dX}Byy&IuBKNijCIFHGmuoP8 zCVe!uC$(&jZ*Uj?@^^opzkP7=n< z4_uoMSOB=a{N%={$gJh_f}NaxITNHPm-3Mrgi3au@*MMp7!ca~aC%65Jw62CjG5*A zT%YUtqrC%nZbaw8_2W@)5Kt}>Os)oyljSdp&DOI|O_e%HLY?BV5E|5?_0eD42!V0y zq~wMx9ZJa&F_k{M zntZHS1^UG~=b2%Ho|`upH!Ry*xKNr`P5JCyV(a8y3@T83&5)($owt}}?lQ>ZQA)M- zAI2|N)hjmMP2T;w7x2c>s05s-5bpDWzX&9MeC=7l?Eczn|9%G4X?*Eva7C#oN4#0t zeZ`^x1AlVQC$RN$SL#LCA!RkhJ4fKIJfB&DJeB~|x-TIEn9mQYb8w{-Rkma17@+Sz zc|XP{KvD|rp{nhs*A-G{9wZ=ess)=Pz@H%DF@XI_9>J=Qaqr-xj-RhDEid{xI%jaQ z;xjJFoEb_zy@}#vEez9XiwA+?L&ujOs$RZ^cY=3kZ!dalDP_J}s>OlV#?LCg@(nlQ z_d_zMCH8}!)DkH0Cjz%b4Ngiv-DM4AukQ?{zrlO`C)^yg7NZDxEb?r0&pHytQ~$xT zVS9Ko;gDNj+t~CXV7r<|geqR`Wjrze*@Tzxr+PX!nwQLY9A(}+_;|52}yiThfg${Ag4?vHtxQ(8805c^`fc0d!nFToSXg@#|^~ukIPo`hZ>Oi`|(lYHUQkkdg zI~U)J;ZZUAyPnPpFDq&DySis&rv`E6ax0f+mp#UDKOIQ>6oob)qjOTA-sJ(w!EZ$y zyjetBA#bn{x57ddm8k4Nh^3R4-i*-EnYOXrr>tF-Wv&xd5^{+*Tg1dKJjpm5+}osf zd?xgmnFe(_GuednLHc=ac-i=}Jz{Sxe#0!%iWkSASUIJ5xSX3PMtLWTy$>z7xaXy3 z$Yk(vt&v9zhxc)&OrtAtnrrRy9O7@x8F2cSQk z+sipgc={r_sqqv7ifb_5Cb5y{U&sn}$4~B~jF*ku-KCgE@p=Nj(k&8m5~304z(~-E zI9M-yIcR|B|4EE2XT^kVM7_HBjIx2ayyM{Cca;`}uU{!$n+bX}c&0_IlIj7q9Hm9Q zHrXNRYrVBqPZmVdr(oQ(>2aN!p7v?PBZr^LTifE?u`D@m{gJBgq&%WFUbc%lI8W~Y zow(B72jcUT3&J5J;x^fHVPnlSxjCxwBBjfuXaRQe2W1B649QtCEgX`+a`(j;a74M~ zS~x9mv9;`1kFctuv1Z)iQPQYggw<#{bnXF(?6`rcy zkjcaw7HBdAnY^HnsJI>LP@JqnY?`ly>y|q_Ht6lqFhu|wQ-AMBfhh0p;vsdUi!26G zjj|t0)B(&cE&DVS&yyqkL}Ih3$nf#g|7KEPGANg!Kx`G9uQ}Bq?!h(KJw}00pERyv z?uSX0TH#GE94Rq@vKi74)BsuK#RZ8Jl>gm#%pk08SZ=bGfz74`xh@`m=1o7$wRBdr z+vXVo99lA-h^1uBG?n{PzHhu*Db21$%h{X^7lH`PUD}(7Aw0U`DJ+ajo*gZyOeNAO zsRuCO;8S(Hc00rr@SNh5v8+BF^CmPje1PQ@1d7uMfERH&zTLt)nFje>I@x-3N-@D) z@#D0nThH&8LaP0pw+Rtx{eGkAS5MlYC|d&}{qnWTWg9mO zp!VQ0A3OCR80Xq9Zw-rb6)}dF2F3kzS#YA)ou<0UDIo zdQmqg66i;%K+jxD@C=b^60W@2pDnePaf#5uJcgqPq77To$Vf@9uBp;pc4v5E2Oado z=Y_%{#I>1QeXb!`46a)-XK|fF=MNTYEKsSDCNc#W?pMIg{qcd&NhrRtAaE@O4b*uo zg5j8#7|DD=_fnkQs+s9m2;Npp9W)tWRY^XYa)a=KExkTwPuTAJC3-{(6hIXVBN?CZ zC`#wrdQ2OyR`C-Zb2+xQ@bN5Jif363VVJJ$uNikJoN){WY~lSNJiZIze7pe7Fg?Hc zV)Ax=kiva|I~>$<3`bS8v?g%c~KmnFj%e?#84tN1Zefl^D6cm=%ZO~J|;=ea|P8|Dtt z_J=f3e&4h*Uw z2Zgc9PzCxa^1)H+r$2u3$;A(23@|aC&1{zo>Ftuc3)iIQeeOMem+;bFW z;mB6(qy$+VjQTB27&UvD0_QwvzkV2^yWugs=(2kGUr(sD+ zID*911v_nBp$Y4+QctSdnB*8M2(E4*pxX^6c$+5UofT1%wOo z@OC9h#>RmU@TTBmN$XqPoA^CB0;41q2Eqgjv3sSC z#vIbN9EzsdPXyD5Y^k)^Fa17(PTK|ZOO0q#s5$NNxdk~!HNw?pU5+w1*tC4hL-w~2 z>yA+OW_+yXyn+C0rM9NplFni#Je=&pQ^b@hpIOA>DLfCanZ;V8&1%RH1FV90p=M|3 zA|^bK=HmfcXu;)QHcR89W1(K{(EQSuf+(}S0&^Bf#I)@R&KEq5+F7E3>r{J`2iX#= ztER1*d>-HdHg{g)^BJH;SN-6kv{ps0(fP1MR;}dbAUP}7AnCSHBSggKu6o&BU^Y)P zHK6imyg}1(&{UJ92)KjMdDR*}KQ$E(w)Ql!vL)`@Xir&KOISWq;Vn^SA>FvJI^_fa zji+fyKxE*s_Ln~MQWlV3yFF`)sWH~+&G0Awo-DZE5Ap4uh^?Nxbt8C~JZ-in_I7b~ zal>K+t`nlI`m&NrIVya= z0WJLol~!>#$acB^z|aspL9*ZQbv9$nD?t$>TX$j?)Ty-0l&~I*Z2BLLIQM=p;t5sk zh5zpT2u`|q{K5+_cyz8Wi+VCD3(Wv}S4~cBcD69DeVCuisyE2tenHhxJ)z5a^s=Zx zF4aEH%EpEn7m~D~r`I@pfQ-(YU}mPY{i0f?diNMMgQui6%f(|e#F8p53e`gO;&+OI z_`$_T!F)h*xyHRsx{6vJPjC#%mrVzNyF)uQ-^{SiLK!j=mXSL*-i5qAC?64$u z%?iaD$ABw7&#Rzt7eBUAfSo>&4_01)m>Y+{zV$EtkryNlf)!a>Effrf0AYpM*%0c8 z)ZqFd`O-{z*NLlOgC{C7sFtF+N+rOGZi+g~E!+Gb-ek(jPdo&8j`GCpLQOeeC$bV* z)FJ+`i-2OunPS_rkGB6I|Gqm4O6(IY%#JN|68E>Qd3rJ!LAb)rt~a~Bex~0n5iGQ( zqjYr8&dDr%1CS$0O{J{BKFJxV|1HzYbm6)+EOdxH=#L0CRxdrVx6j96>3zd&3Z$TT zFE_{3A}lVj&Ij8>j=!=DO@k^_D5|JY3IuwIY`cnjTAhAMBKec|dj?2M|0>*T>y9n`hdP(3WCV|&ESKq{aT`yaI`>!cr0RlAmq^u{2QTx0y0|>aKek{ zRH?FrO2x!XjuGD{!)(uj6;5u$@A2e^9z--6YNZ0GApZXr4Sh zpbjwK4Z2^4frFgmvrH*_m`FHTPc`nfr2_AyI|r$NR1S+rid^FuZhFS?Eu}uRjG&nF zaO?#Ug5w=n7anugC`4QoMfYu4Q!)8D2pK<h-63P!wTXH;@35evmmqY)l*R-QAf?c9ZBkHO`rm%1g5ysh_# z8J(Y9y%Fph~HfNFlI~oPF8~B5TvBT#*cY|6pW{Z?+J3?OB`m>39_`6 z&=vU7+k~r#c}_Gwr?IJ7R4bd)LjOKw&v2D}{G`}-ct-^$^W7nTrWN6*|M7Q!7VD+*#>es8 zC4eb|8VL9Fg5u3NExr(zdp1B}G68hCX7R2~SlAy)#ut^p2R=TpoGzuidMZ)2F_xNT zVb;CrOGc$0jz-5pYqD@mi5A?ka8eSpS&3V+YcCS~*D09sn<@eLfNlZLjMoD%JShQ8 z+)~m(;!{EfHh)1el_y0Lb}Xj4c~TOx_t960HxzeI7l7pXpd8!!-jkoc6fZ(j*Rqmx z0@}u-3jP3JH>Y*=sVkZfrGLBPW%29d&yB%VOC;|U{7}b5UF3{aWXWvrjd&0Ic$qou zaaJ}`{Q%J0FY1I4_1+GI73I7+*};CiwWU<~5nVV`e%(&FOv$-%USkhksr%{oe)s1w z1HvaYrHn%}kPkB%{_&Ha#^wFxuf$)shI=J(kkX%wg;E1HHXD}QNWKwec+v3CHbgwU5+5pz;FR^^^n&}1kAyF(`@vvNGpHeV> z5UGRJ9m{^W8!v^!-hc9=U?SlS3S-NqUfNj8jH-btyfHeZ_lxur-XE8(-%Bn40jGFd z>kpsa)r-eVM}>dMHaW`~UVT31#I;FPnv|?-D~rK85Fo1U`Vc7uoh9S{DX8bjHEok9b*i!GN4Py#UX`HrIo+c(J(GdUM8v{rv!|`d+Tb2am4Zb1$ z{-~&IM1AFh31&GfO1Jz5f{>QYq0VRS+We~5Ee~T%|H@nMdScx8$AetidOwXTG}Nur zqODNkI0ksug=|1DjN{MK*Ix6+Qos}+8k2KFaf-dfD%QU48T49j_1OI-iho+-5XBLC zv0pUx7VD40vI<8SxfO0Qe ziYL&%M3gDnVpR88>Bs(dUu8*@mK4l-g-kZnQF<|TcZaPGelW`1(xw*<7&Z5RPVc!} zM8nSZJ*&u7uAz-3EH^agc(Ve5W|7T)c39hf$A89bkLStTE~V^%JAabiZS}xq==d7P zt!8)17D5jzK^7#Aw#L(y=NXfZzr~`7(8q9URZzmLz0OA+pW1zJ&)` zHW>rQCByYI-fI14f)j46#1bD+x!WULggd0t14 zUVQf1cm?i+qM=dDAU=S+ugA}4+`5)M3p&q!ao3RrOihq#ZQ+(F39(6d7r8A+eXl0# z$k7ttpYNx*LX;bxquxn1cD7s77&k^K)>Nb7q{QIVM~LBlygFmXb7ZHPe$B&QrU_gu z9_d+PLMj1KK?SWYiu{`1r>*gBsy_-0pJfksJVYm8(IJL+UycHy732FTqr4z~qZl$% zkEuNU-jkoj&jcFF`4V)iHJdJs*-D4m-q`OSB&44gYRLwN@iUd;eqdsv`&_m%GK*XuC+jd>r}|*{!Y^Q=ku8>1z-9$<(-QmjtBVt6a+M-4nEe!d00>g`z}mH9#^LGNOkj9 zV60#HF2ZDj$v>68AnbE|fq~c_6A8Tog|!?XQLKg?4>%n+NnKthSF z5MimYLjqJ)h!D>-dI$tVP~+)$o_sf6Fy1Mqg)o3g796G|pPE5&Wg;P+ML1<(LQ3Gf|4#ge{LJLm3A~jPTjdelfm;U|x%#Pe#?) zSto%tJ|TZ&&Kstp`?lPc>;xmgjm$3={3S42|1aK+%h2G+xGZ&sOV*odR_Z#Ey^k4@Xz&CJe}mUuKe z_uFyXo~R|8YZ*wH9T1oB>Qokwh(NqqCGkDMJE}8}s-pv}x=VVbF_na`W;aa$p$tMS62)14TL+)6ot?8LY?Lr|d`bKbI#l%D1r%9l=8 zf%bsHU;Zxg4|OGE2WjniO>$p=6y(n+99cU}Z3LSV89oFrV^$eSs6a6yJq-yj#cy0b z6hjEEe*VfA%oFIWRRcH~WcBIZbvrHuH;oF>ekpl%6Um;@N-H*(Q@VOR*39kpEB^hh zzYdCq!pPS9iCp>0=ewh$!e5{RiZ>XeqEtaSvrJ}>W;Zs2dMj*5&;Z+E-{_HbbXl|H zJhywPYXF&rGyy*8^VO*!lMsJhA|7&7(l+q=V$u{Cgj7H|**u^VZekvHke2|;V)A&^ z(>$JC-(1{?53t{o^$8l6lEMTfFtvl?O7T%R#1sJMpFWXJhf>agcSJa9SS-zDYVLlQ zfA6Ct{M4g%*>1s2Th%;U8a+sfNsR}>3bmC3f9RqbW{}=EL!plvZ zZHX2>av)^{b#&T>90LAWTAMP3$`KYWBnSz!l=x43@}%Tuk1*)Flp*h$u;i_5Zr*;} z(lH(lZIWX;wERe{|&9Qi^m~K=jUv(b)JIOsM^%+G%Cdbihxpu zwi-JaZ7JCj<$NY5U9Ridqs)s`JbW03%L*?Ui(~w}bXnMiQ%m}J&C%a+Lq;qxl>mKzDRE^Bq z!_3KqSYZDj`LOdrOQVSO)A-a1K=T0C02S}&o*2VA9(#CFv2nEVZZ=bpVrFV)%G*d> zokD37?iOkAo=yu9l|omPX)8s!+96MF-We9#0Fj3bFwxNrUh{ho&r#w|iXiXo%*isd zJzQu-3|?30hm!IZd0WV_y;MvPNE9-!#_?hQ+^rNgv4@4^@%33C4h8#%wig{tJl}?iea@&zovMklX$3Gt_lwz(<rPO!#^Bt#@OS@$#kc;*)ZHbEo{+8@59)pI|~|93+F2E>aZK zz6$YrOZu%hk6%G!>3gYx%EMb0*{TpIp(`HeCIW>}s>Gl=;mph(R_u*Gl*krEl2td* z@9!yIn_p;keDTwGBkwG2-dwwts>sc=v-EEZF<*8-$VXL~4;m2MyuKO)egKXxX5IBm zPB)WMw}!=Kp>8|?fMXbrs!fr(Z0{Cx71m!x-Al}rK{;qGr zKa|`NjpqD0;uNJkwo3WONpRFQ;N|!;-MnL|hBP1x;Ez-a)CMuN4prT*^;~e1D2VQC z`svq-OdcO|2r8awXV|c9!Ni!R9U#i{rjQr_zR)V>7B~D6h9UUA*c+8>FOH7sSF+;6 zV5jeEtuvY1y%D5DT5zFyTy}%?2B|P$5M&U-%)=E=h*y?+(D~x=2D4+A z;?O_+>x*Cbm~fpmAF%(T#GMLYg&opjymi4A>Y5Wj=~t0tl$BE|`aze&;kdYIJFp}tCx*pxc`e?yGP~)y9rPd2O)#}8tXK3> zCnc)!dQ3+evXf{Vv;0OjO6(4k5-vVoNgtJu{Z-CGO5q#yZjYx__9!?^Xr@xchRvpF zm5>WXK+hRxd+}KLLFSdMhQ)}15Ak%B*Hfs#^IX!Pz6VI!L~#SuWU^3nMlFCmICWgP zrebj9WwFtBX$*k;Z*FTJFiDF(Auy)zVUC9nNy_(o@LlV$#<*-#@x;z@&BZ6=85LMM zB}=$(LO-~XVqxvhs);#k%en#Af2IFx4547bzDvMgao+$8qqs>?$U4K(=L5s-4HK?i z0yRbbFq+nMQD3To~|F3R{A+n@P|WI<{wy7M#r z|DeS5%b$-IgZ3?32k)1!(q4`7Kto*9juN9nl~6EhBPugrG5a>GTx|YIkt<9J@4Pj^ zmD$x`y$+0AF;y~(aHL8H3395aR0bkuUbz+oAQK7id8}uSvhwlvY6+_{5A*akw?kAf z&~J>!o0QLsFE7@EA%AiHE)zsXk`?)OBO=zsqcL4>y~>&MrR8nI4#(ORK-0fiz|+T1 zejL=itk~q0GK;)mI#b?W>Of;(06^K#v3uSs;iN9C~r^EH7 z7KZ5sf7>o3VxssegS&e3Nh%cPgE3b_ERN~S#TLlf-(V*3;nV+k@pHd)%~vTJEgqX7 zm~g4&&=jZiu2{nt7zgEWjNdt^h*O+aP-(mza2fd{QmD2p$9DQfHz1RF`<|aXqlZob z#w$%FA=!Xv?#A*_cw@sj8w7Ygaow2}>NWs^P&~&CxSd-haD5Vc@a<%*i^nssykaq@ z{yl(SMpbSLeOHAIrwP?D-DS7oRQr8#%Ex?xE8tX#4I-%AzZqv*w10_t4GhhaO19k%qe^&U2}J7Fs|( zaD}5HnOmAoh_|ox%-I6#<&MhaC$ekNQodsa4PH+r!f zdpMASgO;IX;x-f4kBT);H%b#N=x?4><8Qg_ zgKJwE9?y|XKHr21`;lyjsl(Uv9*>xcN_N#!;NTa)3D(}x2$)s_$OR@o_msZK7k?Lo z93Keu9lueG1j*p{)#gY7V&g1K@qj7YA}D|9o|`vu=*4ED)Kw6?8l|9e1dSCoRuPqW zF}uyh65kd0qtu8(@DD-%6xT)T0oZCY1TkrdEmFm=#KZ_AWF!qd?9SvRhNj`SEuNN} zmLfsbmT9J546F93MUzBnD>ut2?aShf(e^H=C_t3O9#uSLvBbJ@TjFF2eq3LTFZFi} z$Kn!O3917JZ-{QhO(}!11-MbAjbc=+WAKBP8?y2#U0$Tnks4jaD2!}tlC=)kS^~LMlRuk8L@#n5LpqLYKmzP!-V**{}fHHwvY)4%beAIFJ zknbL3!c~@!qMa1AV4W*{oMCv$v3%!S zZ@pzIH|@Sa)n%U^W+Z%mZT0%njhMJP`NUL6^8CTY&w|C|`@~wdD42-*`20}Hyco7P zS~4C@cm+3o4PF8jExJ{wTxNX)?pR0YC+q3}d7|CGQ4Jt-U625YO%n_;+)K8DkBCXe zvWOJWD%Z=pS+hsy4*{Ljk{^KM_)il#VEL1Mkloc z(i^%bxKf7a-r(w^~l*vl)cF^DaU)@gCYwd1FT+=UzjRE{!e$E_~y7Lgf4poidA zH({}`y+MFixF%x7mq{htsWwtoF*th+_{z6D`2x+P0imtVs!dNT^a=0f# z1;>`$MMi1tsnX#ysxV$qAWGGR(fw{b2j4FWaq9%-R@hrb5R=;pbJ=InG*k45>ZHOt z!dUm{dt*F+{>;va$`SzG^MvJ!qY1b;^0q>_}TSy<79L=%eEM8GrZ z6HQQE!lg{AWwllT@bYWkHk4BrY`>wru%I+()dF#xo12dh#+;KFK&Uss!dB}$s|bk_ z8-emhWXkjF9GZ42qAu7=htnwih#*r};+3#%`YvuSZWPOzMA8}{I+87){%%fkOjKg< zVa=hz`v_7U!`Dw>j_eQq@N%~>P5&905f)w#1_xk>GrU1DfxP9hJnX1iy|ujTt)-m| z5C@ZwT2NHg#d3_@Rx851ZUmL%FvMrH@0ndr?peI;VWa{y=dJ%l!^Bp|JTD>iwqHE? zNsL8R_?^?Q$J+lHEC1(E=hM#wPbiXlR@`pnfRJ?Lxi+d00!=Ea8Z)0)sUUMJ@S2@s1MzqfV#{dr#Z7Vt zK_ov&+CgXiG$k$`%U5Z+CcHowL-)Iuhd}7Oc4=QHS+4lqM=?WUWQVB+&o^LH8q_47 z&%7KXu27Y8v{SUT`TkZ|k#D`=xhUM(sd@I;)jDnv4CT`aBQfj1GHPljlv*X|Z85`RWZ%=QXVCn)f) z0FM9W$fo}$_X}C0Tb2ye0Uea8Cb#=pBl7O&f+$?q_fi_7w|JHm1Wq%MLOHQ9!q=e5 z2eoc)t`uP}Xz`fVj=?MA5=QL=5(^FThyD{}r3$B&qj_rYcc|^U zC2)f>L@f(+eO-dPV{M|bt6uaKjt_E(2eYdA4d_u=0a!3D-53Nq(5fzGHs3Gamid#K zmjDtKuV2thG3NN0l)NPU#^QBr`0ey4_Coo!!YRPxUiL#saEYU_fc7~&w)CIG@Wg_% zAfzI<#2A~$JLCseAIP9{q*$zi@JW1mX~HyPQ%9|Gb9{X9cJ2p z=f8NfrinA5AtmF@SD$$%m~um8i!;H^c!aXBAHwiBX^4x&7OcpE)#$e4>u}LBSS|K6=#u;T~wwo3m!1|EC=s2+*M7FC_NJ!fz(c|-r z$8-kv_mwJ7nIe1mc^GG4JWcWVqcfM%iVLOfNfLxJEKmP;O~ISQ9%5BWT!^3XPG_OY zmZXJg6R*O^vf(?wu^6mFN>FU*1F#hZx!AhznTd~qEO5;yy5V|yFy+`N77b9$+QW(? zgI!-tfODm=#_JzF`%=shpUAlM`y^k4zBwp;L`3|^+_*$zZ-Xi%HO-&m{|s7OdGzkx zkbG3Ez&}VrO1yP<^w7uP{(y8veVUpdB88i+>b3B846F{KF%C`~69@pKGA=R(xlS8O zGxaX`eA~dAV6TfROtO>3k1Rcx-XK6!ZjYn%XQK+d{@P3|eO1oy_=Q-${rq4r5=?;8 z;{7K-394X>b#2jNWz?(sj>*#0UkKroI1~=ps!PiU7jN5lhwjyiLdQ(lQfHh|hpK1` zidUFAbL7k)q!nIb#xXZMCAmIQneHX?-5`pU+N?ab*z2y4^3scbM{_isR#AtB5(s1; z*mXVb`;V{V>!EbHISh{+(BG0Mc8hC2mOM3c&C{T;5}EixOHM}? zY*kiM{GIgvzYiJSn~Q;F=jS~T+6uKQ{X5wsY-VgAt{ivYL)KHjJuSTcJ|VN1I}C|V z$K&y8<;eK3wH3A0c*(_iSGz;=Vfl66A&PLRx(25Ql+7br$zO0l!|ugdt=l)$9qdWic2Y`GCR&- z2+8%a;7iYaRlHF6@=M&&F-4-E#Q{6tULRwR&ePta07R@9KfvOGSEXc7s#2IxlpZVY zFF;Yu7OHr!TEC`v$KW)nMIruQ1p^=UkJ0wyJFT&aO-EGUEH9z54Kmbgrw>wnmsH=(4|B!*A*go=#Bo?yOvo5I8mi1 z>hZO#4s)U^)BGdr<@sH ze%2Q!5Qx%Vk(r40){G%oY5KMFhk)uFr*MJ__Ji2F3C?ZWi3y7~K9v{Wgh2~v`1APA=-@b=@f!o=e!2S51-kk~#qWSC z7&MvCoH>jCLbQs1-ltk$eYiK#*0c3Tg3xAyvj-$c2T`!t@p@CsuXYXE+#k@k*$b1} z1Jr@5x^}*3j!h|jj2?@FxQA7f!DH97o0#eXSg(ql>$#OQZo%YwdA05r8YuM-p^q{e zyH1>}YNmTIeR}FB>Q~S@isS6`Jt>tHYWTg>#d_t0%!6ji%dzv!m*Y;(K5`=cB9ESn zbs_mkmhh${T!t#qlw0=r){!njUy0~2pa63{#HdFFMnwo63Eb2i@B3wK@012`e(}fy z!bGSkLhYAQ+X-ODr`C?{Wb=7J25E*LiOe4!2WxO$ogu{3h}nOVhDHMJ;B>+d(@<8O zz^pIp%JkYUlm4BK8fK4#Gz9bFF_ki7dq-m|bib8?8ia%=R3zQ{rOpCuNMtoF2!$ML z^*(-6J}po994G`(&|uv6X`JGtne&6QAm+=+lf~bKaZ1eu!c=XYS09y=(lFR2P$6$v^MG?M{g;;TR9rV)w!9o& z&kh=xhqbBYWYM%Gmy)1E893fMT*H7O3Lt{EzixJSoH<0ClZ+%?ee;_c1sLIMFUh%` z(jONA_3+ZQr(4lPlPCWCkN=IK2N#bETSPm9Q`%7mnitnXf~Y%Hla?t}EX}!zkg1SB+ zrc(2<6(NbpEY4Rzc_*gF?Cu_%ha%Q5P#9F7PC}dMiPLB=vXtW4uGg<)&Hwrj`yHe{ z_5h^@G(`{(D5Zl6ICcPX#z%;zLAgiv(K8s=+YKfG#}$7H&VE2TV*E_!jh}hGkQN>3 zEUl1370gMk1OU$8$M^79r+0n&^{EhQgRBQ3O%m5r;jslWzCkC(2!uNmE=6p3D*k?* zny0Zz!drq$4U%B-5w7=^Debw+n64vX&3fq6(-<04^vCft1*Fn3xqfTYgVHq^BwjqG z{ASN)1@$CJCvZ&cL374>48d$&++h0J%Q0M^g3Nvl?F5L7$=^z5P3)bH1Mbk1qQ5LY zY)z)ERH;JS0sCZ5B7L4@jB;D{)QBN$gW=A@`0<6iU-GwvrsU<8$6e&Wp=@+l-MpK(dxtBH-ec>MA!Q{I8KEgA8aH-Zj~rP1Whl5<^L{sgBz(%i6QK!6?x zA=JRQK`K>sj0LIu`*snYJsNMrz40j8-nV05iRKbMlb<15PxT#M5z;pna@xAKSk5>- za(ps@!0Fv9EvO{UTN0IJg`DYl$ZPuT?F=1Gzw&A@&ef}TO*EEHgipnVV?LJ^l#7}L zO(}6#$Z>c+DCNE>+zXGjn?J^AVbH&L@^?X^t5>hSXlg$D?7Me^U{am()hz>^a6){D z6sqdaJYMZs#9R%I+x=&LZP9)o&y`E-*s}nWnQE+3U{WT_)+;v2rZWv16i^qVN11;8 z)zAAF?ocL%FUQA3oN-deZ@5ozeR@i_v8(x8!67jTdG(aul|0-W=U&!^YGVdj$>b|x z4ta7_Fvg#rBnFFnjPQEA8>5@zxH9TJ^%dOhkR&FHqB62}*10uajD5kuHOdJNcc~^C zBg-w{ssN=rf>gpZif11Q2IS&H5}n27bkdu^{$642K=c&5iNoX#MHusZPu{#$uKx>5T+H-K#3JR9RltmPhcnUFeQ&@F&3A)VNH@j@N( z*!E}sD7Ia8V=a^|e?zPr`&^y4VgQ+DK9DMZsvMyB6b5-K)+$%K5*|DdG4@eWmUW>} z6&)F!(~}p$2d9G>f6BMPty>{ey`kzTUJIk^pM`awM00z-Xt@*VWL@|rK_`^?bFwO@ zag=qA@fhC<5)hs&5qUC!xJ{*Gds+L)o`yR#qH%WqX6(h1tes@Dhy3Sn3V?T6#J+G1 zF2quRw8QqrXmIq&IIkkaPR+>?x*vYV#aVvH5HrBRYlzs$AsnAcg?h;?^ zuvrR=m+QBoDvLa$&b+W5afq;1`dj8K<#UnHk=GsEL`B|^7IUtTsk$KYRCyd~;@QC~ zXv^q`I>IIAbZB~;ePMi-eLDC#hSnS&i;JtjNV1o%P`$EUK(3@~tE2?yvfddUhX8u> z*?4??aemXJrB)UfW`>`K#~EiN$Z?b$!v*8O^Tp$5z7*UsIxj-!rkXnkiMfSz7K2xo z1oMyC%w0G4LFthy2gLVKJ=AB-ClQR7jC>8|Wh%EmRz!5=7cn2oJX)s1jgtX1N)x3T zM|wOMxfoF%?|K5>9=TD}@afoWCR~33F`+`s{vD#%T{OfCqFRhgbtzkb_s~7LbQaqUqJ%lU^gbnxr(>(#iIX^ zXgkVOfZ1E|{K>F{Kybelwc|33kfnb()2D_IE4D;Fkf(uBcTy^T%sVMn*$~yksq&fU zMRx5e5xJI^kt&!f3GqK@V3JVaiQbqs|58_R`dg^nO<6DJ;sh9djdyMJ zcMj5TJQ#arx7otP$^uGh22>Dmbd=5*Dcgi-Q?2KeXEoOU{c0Q&+(O=O+6;iM3l>!c z5@*-ZX{>7bsEEIgG2|7_W4KCo0Yk=T+vj{SeqjpRH)42H=K{;XE9+j1&wca7IJwV$ z(O=#%j8!5FG6!?vS6U5O$oWSYjO$4mVx6FJYA^(tC)VQZLxHC;3_}ON)KwwR%ocy=HlGg?*qh1 zXMuDILIpu39N3O!j4O5l4QLC$K)n3Hv2Pn#w%K*gm5@~>kfiMcc)VPQXe&A-jsya~ z-76-7v>E%4=o?X*EhosV9J<*Kbm;RSb}L8(AVj~ltjeOQm?5$7Z3!@czeUW=hPYzf zlm+ukwoh;Y<}3jS>(N(egBDa;NlO?33&;5dE7Y>46L`E{Svy&5k3bwYsXyw~(9TLr zH1Va8z|J@r$mJ3<1c4}7;ukMIRGL+qG4s`-(W1n{yB0m3)T1+8x_Jgzy*&GV8$RrE z-;m20gb4(c7c#q*EH1R;lOOulWRw#EHItEC>HPM_4=Lh}C&R-NT^`=c>q*(26I|av zdgwiG$TyN-p+zA&62wxK*aVlO&w~7K8NXBU!2BY1Vr_4?&`&Fj^3{b>D!6|3Km3oK z1!8Z}|380A>bJ&CJ*=>M;uzT#PabR+C`D?d8BT2X zpx6srT(B`6faLMx=pdtaq$`c$&r9=fnCGaZ2GjWXEgca-&}AHVgg~a0&+z$}rJ$(4 zPi(w@uSl*3%8aT-CpNm0uRb(%XYuLnf)jZuqe^Dit}D-$La>m97<{Y%|Fx(0HMK8` z;gz&gv@ZlhzgPuo^E^r;SDvXd^X9u6r!q)UxRJiFnambvg5>1zCAX}GC?)5s4=gFA$W=m!d_kx<~qu=9_@}Z zbf9c%uE12l?-T>Axom&)rXxaVIpbCw{)B5XxB|<3T#)c@D7w7kyO@e-c~JmF#xM!; zZd{^0gVoU767tJc(Mnasu1w9m7LSn=kY#C6QR9i}Yp+`T%Vs7;KdqORJyY)F2WTi} z9`4OSK~PbUcdKAY)Q$pP3-)~TrI^Ul>h-nw`Qq{OU-HlELvQbpyx`8`WHB;75xv54 zMK_P}=d!8o-9LzLa8u%N@Pu*B4N9#Lg&vZ_Tcb`@^W%1mEmB@wLPX}dHPK}YAmCK` zlE6CgzVRUoE;5#OuKD$rc7Siy9JNcE_z5|xx_H1}t!F3{jK|Xi+E|Olu+ep85A&EyHv{uTaf3`8`j+~^SOIJ>bLf0yo&F7)lusS2zm;+W!|MadB0xY!lj=Qu*x zsu+n(I|Glp0uJ#J)#1#e?>-xUew!`_C5H?5*k$HHx%MtQG1j|-+EqJcT9_cmd0Wg< zsDt7X4qk$S-LY~e8ydh;JH6(S)*WOg&s<}NNhoIa?aUX+))8K@Y}WOqO;f)W)hn`P znMudS(V^zA3{MwC;$eJ5yd0NU>?t=N?PQ9VFx|FAIaT=OH1D?IJMU!rEdhG!dK`KA zc<_Kpb-JnFxBx0Fq|-b(q8+9*=72S+wlHQ~!bk+UfDptJ*!F<=lYE_Bo99VP?sq-? zPrv)KB7QD&aqg!HO0q<;&T#R;62Z09Jd0N@KEw^~K+^o$!lFsPKrE(T)NKLu;OZ0# z&5%(XG7c$j(kd>kB2y)yDTzW_RlW&O#2R}Q{C2F zA_|Vyn%({VtRAqq@r_tnmkiK9@Y&<54Hu$oGxi?nyo6D_d}gW?6PND6)Xe1yil?Q0 zE{|J$@8xURy5-hlF9vVmDU=7W7JQ+IkQ5cCOOl2EksP)$H-aZ)%6p`$)#}kT$ye=MP+WwxYT*cB zgD!nH-nC1So@z7_{xrL8E&w7JB>>^+M;G4@_3(a4pMgh-v{v%|A$0A!k3gr=h=hQac2 z^P?VCS>C5~VocQ;19$sg9FH$QbM@-y@_%EAu))yp=>R*heS8cBe(Oo07Z+s%wdhh+ zk5{1dP>K}&p-Ge&15y;f+}d%mx2Uit12YcU(J=iUsDx=hQx*`@HQ1tU*I!b12e_PwOM0a;J*#tE9(eBDcpOa`9m^|dQIaE+lb8wu z7!Pt$$=9v*k$gj_6j*GmfvSGxvLhiAP-m^z`87fXy%uDS_disQOp*5UC*!hKn4utk zkk;f?$2U1_H2U&YQKtpVart%PdTKiDXw%;32%*OG+;Ys?JPL#hkcSD>#ErSw%xw z+vvqt2zZ&B&~`d%1w|FRJ1IDtKE=;lUyEX%;!4yNW`7+a6*2(4{;pzqg84QrFBX-Y zU(`5q!+4z&zw%CKKX_932*lhbbH+LQ+)Jo@*Ztn%Vm zjX!2a<#5faJgD~r6Ka4nHmz0N4it#@+d4e<$!-gCS8`CqC|htV5nOh*BWVK&eY~B*9x6R zjz-Wiy%37d;T{OSx%Bx?NbN(QQ7J2%&YemLQjPU^Ir1P@5db`~w&#U3Txu1GwcIZU zgu%|nlkRQ8!>X(jq*h$I+(->%QScxSq}Im5N6ajKmWLsJMsQ^I$n_G8ZKj-??tlu(JZiF$|4V#@h&?Jzu*hx!(s2JY*fYEkcgN>`&wsJ#E1!G*`Scv) z4SB@}u32j>QkysY^tCJoPv5~0`1igUzJ-I79^^l5p(;NZ?zrCqeUES&`|v4ejoVVt z{$dnDHhfQHl zUYOmS4XU?5`VZp0poS-I^)Z-@#I~z!2Y}yLd2?!(5-HSo?B9!F$q1#WQfwwg(n~vV z?w4>D#Jw0C7ym!LxY9qp=Y zp%c8H&Q37G+Qu94O&*e(O?RW8V%PG-*J#_e=&91&0v( z4@zuD-s^Dz>F-kK0wG=R;SK{ip?TQ3ALPRP9TWxcjE0eAQ_$TDo(D$}?yJ0tX-rAE z!&+5Deo1BB4*nlYc+yVF``=Bjsx+;pRD~yUlXqo4Gx&`v9X!El`sx1?n@ZcWNi@BF zizn~?$y@JwYgCkgX%-2m^alG}RShKT_W5gay$zcBAA2a&r?u4oZ89*n6XbYbMcz>$wP1?x*`TdWYmpS;X0^KzxdX-yfu}7j@=p z+z;p-m=ky>9(rRG`r@<&Yiwf&1wG^+dh%0~nq#TL5O#^Pi}HI1g@jhhfj~K=E2%%O z&K3bFxiRBg6tQsrAeU!t;tcs|F@VY&n|rg|^~*v|6bJ{w2h<909}*luX1qqxnc^;( zC*Fw|i%sm`chwg`r&}w67)qDC&?P1sJR~$he0%zpYcavu%x&Sm#juM2tIWK239W80bfFftVruG=X$a>j$b7$;m6w%Q>e=hJ z$|P}&%=C;2ENx6BG|DC+KPLb$Yr|5eiHhk>{f9fMVF$sDRWuDX>soz4dgGG&fn)dbwh`P*EJ1Q34W@zuf<;H4Wz0 zv6?-_B)HQ6I9WJ#Qs4f!`+}{-92I6N$Lv@xW)FmfNqN1pv>F=^bTI)~iz|z(n;|oI z(zYZFPtr@gg5)iev#!sR=76LuvAG5tfcf;8#Sa+ zl`}Cj{c21c2g}QLeypk8A<{`)lkoMiG0zJ&Kkaru$wsbV4&4vP_KY@NsSTi<;C z4?GT&Jk7_rsQ!I?v5k7;S@NTpCOTYi`> zsB%;qP4V=5Pks{Pj;MeWKdVkvne^zqMEx(z@pUSG?4@+iYawK=K9gxU4~vnhVrq0_ zG1UlkpODoT%kB|RcWyO-QZi+V#}}_e^~;>V`z>3_<6)R8A|G?QBP?&sX>hb3KTd!C zwV1Mbs9bTl-~!rkCmRMp4b0q{HabGsY19nMAQ5 z1dr9?Y<$!L(hE5>2Ig35_H>2rUyjF zOX3c2&eu#dUdym5PWjc#3q>v6|0Vwg<0DVL^Skf(x4FrxMhb<*hg`rDWwg}PRVJi~ zC4KPFPO9zU3G197_SQf(G1KYIZH0arl)G#MQj$|o(QI3Wtlt4nUHSerl)>nuZVjP|CfI&B*WzwDOfkn-ejM{TQZIJ0}Gybj&9|KZwL%d&% zmo?mT)Ic#Y2q=>2(})qm;9ga(dbfk$kpabyS-{mYs+pjB6aBq*N9AID05*Cbh1+uk zIEDcC$=K^q#qp|Vu3X7nMIuGD7#oNkLizEaBC#G-4O`Cg!UY#L%!I>{a?ZVK7&n5% z{yNTRCh2WC0k|%;&R<)+%!(>fV1rPHhv=VzeaVR8%v7mM^8mO5-wT$wqu!utoSvmC z6ekm|1K&?*y67yW1R=bG9E-i8=tPVt=)p-*=aE0*krrSOVTOO6vAD!N;ZyZzDTdtw z3F)RlgADR~yaK-}$$jyg3dp9+D{Hn)gJ2c5}9<+HpZuM^sk(O(VTh7RLrxVE)V5Zixi0vtut zFlJRcmz$oswv{0&2DZ)h)Qg$612m?NN4|fW5M)#%QP1qS+=#0($%X zHxop_7415Ugcby44*IA%?PPhg9WvuR%Hn1yORcPb{Bj-VY&op66C8e>Xhw3W3eMQ7 z5Wp-@l}d~LgnF~E_rBazO#1lQeWmfWmb zY?gE_Kt5OhUzwmWU+IRH7;-by_^;+7QpQy-*cybHBf4hhpxsA?vN6IQ6pIX0RBFaE zQ!mFm=V=ZZ-v=%A>x+*(1H@~Wml8XyA|h;fo;)mg7n>OFmAgouq^=NfY8*LAhNO2WBc3CF@*m3Xs{H+rw8%oH>??I4rVr=VW|T zN=@y9ADs{y3%3`1m7jt)M)`@kkTJu)_+y;3B)HWRL^K?uH|VC6YN<};8(l0u22{Nr z!`2(3)CLWq@o^QAYx8cZ)U@okTUxEy2<DMR#y;RDV%DZf(^uG9lbnEa$=%o=7(oZU z@I4ecK~_+kuK$E(OL!hW2)4TW?p=RMzN#Xr2X)>1JYuPhE5Yx)r&Pb zmku+pK)#7rOJiApT{VL zsjIIp_ImM6x;FhPCN`oSZdx)|u4ZJg)hzWM9GA*6*U@uY^N1kX%HoQjq2s}Dz| znmt)~$E`Y9Jlj=Ch)EH5T8v2vihDn3AbCnPHQNCsg3E}Ll^I{AxbIJ5Hq)OkCBNn- zNAUXb$Z%EwDR%*%3?(vQ=lkR zkG!IGx~>WwZx1L5ir2pAqPR6M@P+M(wQiuj`i127cn35011@&iY?O z1p%n;Mr-I;OZJO##KhXK?Acdso1{e9vhI>~kLZTJ*Z%Spo_w-s*%3SA+1Q8~%`}Yy z{&oK68Aill88ExKzT0IGKtn9ZoobiZ$pO2oe!v~@xwe`jzq(ejF* zZ@Y@~puE*xUD3MUeDh8Hclkbw6B>YA-6 zhn0s5bZF{Ebf%0esaz_Nar&s1YS?I{QpAWWcg$h|x*aP%d1uAVjRP~88+I?+$>Fex zHBIaGaEP$dC!*XRo`1{BxxKKmu&(9I#VN7@-rgMWC_s3QU_<6n=Bsa)#wp=h3)Dlk z%`qG^y7l6vmo8F>%S``H` z&I+NagfZcr!<1>Zo7cm+CL+j`Jaf7@2k0zC`C#V)-KTdnXyV73Kok|^`#u?646rKq z*?)q5$c>sZdXG-PT45km@~(}F7>$$~KQilxQ2AjX&63_=T}x%Saf-ax7N8E4TpN!h zSKr;xcL$H{3~E)PathI2@Qg;RWsf6b!+tW_lcE8&J!brJ07FUKP3IAIdkGuvqNb+4 z7&=8m&5NqEj3M*etH$A8!}#R<#A(y)sc!InUm`aNGmd+h>+e8rQu&S^$HgOaQ4}xO z-kZK0l8D>N+t}OX#68sMo4#??e?pG*Yc2&Q76AlLeQtQ{8Uf+3i>6>UA93}hhK6(e ztLI{~ic2PZx|Win<IGAL=%OuqG+)ucmsms7-? zvEcN_*qzc}Vre)cXOfN>UI?{V*gi8qCST*&JjNTL)1rT;t`E%QXZ-t)h(9&+n)m_r zh5J4d(ftDUqNMeVYLv3Wqo_6pFabH<$`gy5+a9M&bz{g)VH3?aNZTigV0-(HwGTwd zVcc~jAD8pQnXIW7qz?giFa-h`97V}tHyvZL@|`o^z~`% zh>nHJiyha^{A!jWedC1^#RepFkSsd4bT?gtwDc20KL7aqr}~+cyxboYI*QS3r9CnI zS~-HC%(znZ_T6F`ob-@;QQTSky4vmf5{i}gz`!HTD==fl7^B*vsg1o6^3<0X zoVia)5p5Mt(&vA2{-Nd(;We?}rAt|}eca7Q56z)g`Rq{QF^Ydp>{b_wdJemNAOK;2 zYXB5T^aRRl35Sr&i4(M26iU3baW6k=P1DzJXcC;| zAL(bH?8$4x*rTE)+~Ydb*GVuH6XMTWTPvQ9d(}gS!bIu#RO?P+{h@uE>WAdQ7ZM?$ zqF7~>KT#dk6@u8d<55j;p95o;nmXBpi&?O4bh=aC)2F()mU;g4)1UFi9h_nQA4e2W z%bU6~WuBYItehyXvl{{y5$R2#AZx3ipx0e=d(|!?AK)j0f+8a5FJH8!Dq^6ytc&)4 z`lNpxnwO>z$R~Xy__g%w&B1Q5V*}=gpwyJmZCPbGwY$yu^wGiVs}CWDiZ2^2xv`-s zMov}Se%xT@-mo#pnT16G0MN+1izt8Z@@1a~0CSNo?Ml6zhoMyttUC6#cNkZcNePD# zC>HnO@*NCIy(?yMLg4-rir`y)5*MRbfxgCRXFqgzZx<@H$_%0?0l_t8G{XdiUcU6Q7(4ALFLv&xCp)4Wc<30W7~PigfQ%lb?zDrxRJZ`K z{)zqtoXJvvaR-lJb=u(Xdr&SK76!LzN}+*kz9|35Ya~OGHklcN{BJ{z_B7C*_lM(P zfY}X%QhztPXL(JC{O|b%A+5@-j}!LC&py<{_z2h`Z^`ZB9;t--D-1=JuI7!?`#aiK ze806%>)zS+WU^>;YDH|vLwC=x1B*V?4Nn-EC=yS%`Ux4BzEi$YS~ z9Yj9$=R-ObvwbJ5?Gkv}=nX)NX~~0=JW_x3iisW}B7dU)Qr=$pZ@5^rA(vkL!b|yF zWL0Fk)ZL}dx?VDK?TUAeVzD|6Yj_^UNMMb3{`7ZJCx{dwcf5)O&WK)X%uBDln=hui zq057ZzZwc?!~--lzkG9;X8?ju)S5(<)^pedTh-i+%G^;;QS*i=qb4r1^KbAnw=@C& zCZZ9QR$L<`Ub=%kW52YV&idQl{uZsPCnm~g*s=foFVBCd>*x4vB7+`ZaViC2B0!iL zTi*hk=3@z0QW?sE5-YeE;s7JCDT~J~0%p~eIvsK*JZY*2LKR-;7tpbXpg=LNKt6{< zO?v|rv;=zL%GAEzy$q_@M{Jy+6AnKQPG)Mj^0a5+g2=!)q4yN(TYEH5r8br~2Imj1 z$?6vj>^2%&>RxfjyC2@`FI@Vvc@0%H?Y?lQ^^UV?D&z@Ig~dQ9Wm|bktlqk%m2+*T zeSKQg*1O3NGgH}y4pYZxJf=>yFBFHAiuZAmX?{ea*p>2l-1U^4Kn{vhkv?AFuED1C zgYZTYUZHR*-XifPJpuINXtx-(tw4E$%lUoHsX)*O6~?#dn3cvp4ox&_z`0UpzvaS*LhBq8tUbNx7XS;zUbepb7ZkGJ}i$O*+$?>K`q2#z-wMQdN4A`{|T6+|!UW ze<>${LvV`Q!R!&Ut+6$Vu4fyV-4amS!QRCYR;j(Y+YY5(6j1=}3+L5U*$LYIo`_CY zDAQ8cT6llVXiylT<&l;K3Ge=+A}4!H+vngSKAM??&;PoDv5A6ELm#;#z2H+B^F{L* zCii`gjnhmCWt$a9#~(odNoAQf$GvihI~*ol>fRT^WKq7~DJh+Cp9H;OYIfGE+v;;Jbh#A5g{=5?gQL@}kOnL{nfB)`+-Z=cyNV|n6xm@Ef>WLduQ~D7| zrMZ??JvHZrb|?6FCBV=j<9;|S`$2%1&x2C>F2brD$D$o^HSqr}8IQsf+6)p+lA^YG za!{rtBCIKKDdLs?n)1=M)C#9D&g=D=+Gus04WOIXCsyr1F90YRm&AE*1Uv#8ZBQ`%Lt+&HGf zp}rqprro?21sf?==*2Y4Z(3CdB0(C01P|JG=uH995<_6a>mg!6^bgk*2=zF$_554U zes=yd{b^J?nsi2aE78{cw6|yFF?)2?O+^ze#J=vA&jWeZCkniM=h?SRcQB)&qN6wJ ziO4tib$k>>eEg^9|K+^&ND4(Nu4eyzn~;eXgRV5p1H^@&WhS_;%_oiOoR%We(4fhY z^c>gl*@P@9sp?ZR<_aVru0{u;IEi7dRy_`loS4Og(c5W7$u-Kw-t!Vki6~)#<1p72 zPErHSgoe=wn0Qg)2{<|Hd|`dXrvt24#c@YXP@n8plygv! zyFinM0X-YQh)Mj`6u={51cu(Dt%L808`l`ArKs) zXfWP=9Qnc%8zhN996TX`5?uaShR|C5Z`s!LHSrsq0=rv*IEEptY0oUO-&? zqb*%dON-*H=!G0*+f0_NNGT94(RnD);)gfSqJ*Kw$JJR*Mr$dG45&#_`z(Bv2J89L zcYmwAh_9CtH)ER~s(9s;a@Yr};mJhhIR6YI z62}9?6y3P3!w*T4PVPhk8K%)L0Z(V8)9r{hbNF#L(SW__A+N{PyYvn2hkkXrE)|z6 z(LM17Nf5^qHBOt~d02Wuj<4J1I@X$Q7YH@BSS_vb5-#9xoPXbx2M}Y3;PI%W7^0(6 zL^FLb-Gmn1U7XjO@SFUDR@6-A6iaj!$8#i#z&pdJ8{p5x3ri9}92zS^Y}t2-F#o_$ zclIL~V4_eu-hbSq(u=dQxAqfoPtKzz`sPcQe%s$T9T9g*Rt%4)I0=D2cEfw`s2PGS zA7-E4jUd@5r=JUP;2isgmRDaXKB#4`OReJb zuWOiAbp!j}!=OnqztCQM{dU9N8*`Vxc+f5fi>D|G-FzJW9#<|5#EsEE`GgTEgrFg9 zyp&g& zMu6FBhrq~FE*ccl=kK0!Y4nh^zeiERh_|c*iC)} zxWfMMtOkuDEt-EHXlXB9dgV<&A6ia$^~G5)W?fj))h}#MFC?xg*MUHtst{E?^hJH5 z{sC#>dMTZ9-qS1Icu~J)S|52U=k6@bz3q=9un2`5p;ePCW9u!vj%=Yu{oU7dXCrS@ ztTQ!pqcC3--fsM-Z@rPZ`d~0d`#bvm=hjUk4En&sq}hn%1hA8d2k)%uOD8GS$vjCt z2`AVn<-+u9rM?Hk9q%ki7jHU@NHByg79GI-Mnwv(Evn#_+lO**#Q3~Fu=wAc|FWFl zfK)U!KIydm#M~2|Y^dk%UqmNjlwhx&zD}puUsd=I_j6YXER~rJ-5U^x73!SlMa-H4 zfBwv@SfnI*vxzdlYpGw-SDGP-cW=A+@F-OqDAVdoKHA#Mdx6oYj^HJrrvD^Vb*~a5 zCrE6XuH8)lY!7>;l%vIBir8RnW<{m#;^&U@6M;GL+ste>oNC(6kMj>#uIopTKAH^v z=;$H7RpKW{!=nfuO&t6NjX|u7E4T(@`5N%J|5%H`rgmqNxNy@1js0s(&#iQd9C$$h7? zkA8pt^!n74r!vQa-PCYzbC{xnl$(qsVXe%*U{lNyeu(IV(WjCPIR7LKZ+ z$-e#@8U(5NwvQvuPqp1E@P`mc)Vc^h((Ipq^;cglA``^|krmi9PYA8DOz5J?lL$<9 zPdV|xFe!Ayqd-x3yUm72MLu;oF7*Nf_lu7tJ#|1!A z9z@}e1WTc9gg4XgCvcU{ag<9?4!p~#Z?@;dbV7z+xtr1L#R%a<)S^VC>EF9mrQz+> zHO+Z;`ifTefVhAT3Kntb!lL|g@3_`cKt-Y1wP_jESrDfHlf0{V7x1~Ui)^4?Hhm-O z*tAkm6V>a1?G%K5YZ?}vjWj!8q-kHVoJNikP~e~iTG{jGeD=%zE(obs6rO>UU!mjb zJ>070{stUf=J13graeOUJs#wF2_?78hLnZS;56nD*{Y^ia4oMdjE!wmQq{it>>u=S zvkv|?{`;#x)?l%{V%d`dds=uL@JF|%4(tIiszMU7vT9wER;mi7GR>9)2<4AtIS)$S z)@YO7oE8_jG-_4daV=Lb19xBgGFCV5TUB%C=8P^bEG&quw>BmVI4_4H9=fi@ zeW^jYy2pt3sH~~&1&TOI@f;;^bTGqW-U1{gYLX=Mb$y9b^}S-u(7GNiLA;A4qwlOQ z%x#!TM~IM8u>~JdAeQQBjj-7xZ7EhdMd`NUiH!fEcBMha1T)Nhap~y2VVh|>SEsJ( z*B#Dg{=DBWDCYI17)sV zyo0iDxxE)M)A|976X}6=pIp?m`dF{r9#)@S2Y%PM$8=C~yP2abGuzvas$$k|A{BeG zG#8nEhO)Mtb5ZiuPru;JY>srDu6y&o+X7a_lKcu3sAk$~{b4*6Q@{6!N}qZ~x1Z&? z@1*2?>Am;F6A<7R7s=WGAR>^Tmg1MPYcIU4!8TS`M2^-TCwU%|X_ye|+q8K#rS9Ig zS7Wj($SazEl;_GWEEf~{r8f;D9T|_n)-)*6YU=RU*l<5i;Tz5wm&jaoATbRM^_=IuTEgjZrZ7?q zbK*V=SX+cl{ARlBY@Pkv3;n$Sfc>K=i3`LIlPKrwLGTF~p@fS0J&Pq$-ASPqK`wn- zlN|KQdbfte=JOhCu^_^CB_H-CXm-tmD|DDF)G7;VGs1d)_C515S49+MNKT5p@M8LL zSocC5_hdWK=_ZNCpNi6jx)P3A^THesC`|L(geADCH%6?9-)~_dYRzA~{Gt?z$QYwK z@g9yev}RLs8}~s5NcjtI)VKMHIFt88B@KsfXz8LC(qu5>!uUjb$($5{n`pm1&O%Fp zh!T_86Uab1B-vdSeBGl+owo5XA#WswjSu;EOqj@n#f4STWK5VkwN?D}Mq878xDYg< zD<-Qf7ST#l;qU-$F*#zLa+Lk7_7Rxn+6rZRIN6^Um{x7qOKif80;aFJXGhc^{BxHG z;C>tB8$)sHrF-m;F8(K?lSP#}zE2K6nJN?<=_5NUQBgl(PxJNk^-}ogCLYa_)eX2B zN8wmK)HckG#1J?NfZF4l*Po8FaW2R4<7YoHhn<9yZMDV4K2rpe73wpz)T6Tt`ywtp za0xt=f8wP&d??Q)Vt9ujsNYFNuhX7JxVf|-vY||;##=Jtjn&gnyM3;r7%K*QSmWF2hgaq(P^V;neDOch)? zt(%S(vP9mT#R&Ha`)XhVPmc>hMu;LWM_jt??9hbK>jo`cWX|HaaEU0lJIFP27wo3h zK8Dw7c7&8$a~DhG^Pir7;2j^g4MaiqEf!Un-7k?d{TJsShs!zUC!)Rmwl@F{i6FGz zcwzKlzGzFrzV%#TpQ2A~pA~0Wl+#2VMa-E7UDDsR_tEHnzgM+a%Gy_xgeYT}93s)< zla%&oc!N!k3WT`?wz@qU<7O;}la57EYq3Y`j0!+I9hO$}yTDLw9e@5Mza--I=B-FK zCoW)(5hb{~Og2H2R<2sdwH@hwCiFb+sGdO#apzOeUc^}jo7C%1l&yf_!(Cj1rQMCr z#zI$|%F%l4eZv*VH)^ue&AE^~H}L5Z;P&cq@8@x2Y2x(rr<|a% z@m_R>C0r2`L8E8iXrZGF8iB+RJE6o4Y%yagDtUoIy)bo z|E)gJJ|0+%XN!)v9bkr9c^Mq)s30ysi5J&yM0S@nL{=ckVN)~D!-o^GAGAa0zkhy(GTi{ zLw&0PG9YI$+%Tr=oqiFXVb>sbHO}|*^VpT)?&H$lUqABYwPaEu<*G%evl-G*b ztJj5dm@a%iQE~dmAaAXNW`;$ffAz%|&5UnRsm-5g#8;ZGg^vNaJ2n1Gt^-MVyCoy) zEp|l|nP0eh_qMm4mrl4$T4^+I6|vXw_i3x9W^ZVmC?%ABu+YATQF1^`d{F%K(^sx& z@>Bq#?zQ)egE`K8({TtW2-qcHSegTu+t_?CEx}hqW38b>(fU%U8y^iSvv;GlkHRi2 zVmZRgPW2gXZkBj-;D$y760~A6IJ{}_aZx(1kgUV`Zh_Zq^$w}htM8wDNn3z>rJgnP3yPhzG+OZ#)=n`gTHNorRglt# zpe5r=^M*8}SV$v15PtLHjE=ti?#qcMZfJuv%$AUUK9$`Gk$-ZQr>XaZcB>|6f)vt& zhXdqglZ3RY!{mer6%o2M!~v28w9=RJEp%YCSFsLC82&=a5Sn#Bt3flr;3P6;MNj&j z&P@3OAzF_V^%lTRdg`=^jUXY+DpFw&5my%J3C9*s-@SD`)r*qX$9@a+&{yzaZ0Zm6 z8>!-hP&4EClxz*7AckQgz{7eY@$AgZjE0GJ53BwfH7f9;IE{T6_P@^xPQI932zdW; zK>75O#xw3{@# zxs)#1KYPMFv~e56{`GUbQq)J=9ThS~v$qKIr!1yu?MX3;5aR@ynn7E;o2gvKR&hG+ zgV9Sr1&6pNFAB;m*)!V&5>Jt0X7tX=>N_2C?;&}&M?K;;DOT>*-so^v=)e*=Zx<;= z4tWJYo>K3$cwO#~k|_HW1k$FwZO;|e*)TLjEr2Hp9j8Ni9>pjgk}?PzVSAWhir!Yz z53XNJHJ;;;Xk_|){=HuD*2szZ=TDyu?&qv#r><#}Q7@vL%n!t*#4S(zr65A5_Gu0j z`d}x8;+Lt{qlNY!$MZMs~*EL3zUZF3t-iPaWki?_RjDA1ZqD`@0^uQk4m1VsW;jttF0i{H(sUBD` zbOD|%716`D=DPwds-#&H7b9XjF4ZWhx{MQwoWlK5ovl?s35AhY*_YRQPz;E@ep>l- z0wLqaR6z}iE_oA>&|!1(75B|snE8Y$QiFu+$~+rdq6Jo~FMtGxG~=+KMDs6xUi71b zw8)P}7zU~qiAF=(Hjs5umALxHW-~(5dwvur&K?dC6McX5cvP!Gj=KbSu(i&{9nEHa zUa#1ui<@Tl{-rnc>u}R+mbd~!L-j=NKsQe$iaW8FChZu67dxYYmwPqlji)253;hua z=uHGb5k`4;Q0fVu(Uj(DW_-H07rYTo!!2TP2X9yAkYJ2cQp`%z`BTi<^a7AO z2-!4Lx2X*diG>mqxuTj}l!dCS7x#YI1}cDZG|DlXnwlzk>>FWevJbxU(NLI+fmNNA zu5Yq+Wt{tYiiU-+te7rr{m9UV*{Q3NAl%i%Y!5d(HKQGmY>x?FH~ke`4|Q?J^1{5w zcxN#axXz!x{H|vL0u|7T=@>eWML9`urJh=YA5>Ka4ZK0Wpp>Q+8uq}Y-KeI-bqtnE zl&1+C2j8sF{hTMsm9NPId-aHObnLUe4i+GHon#(PRDQ`2glLnZ0Y>B7Ui;g;JwIqr zfUlblfyawl+6qKD<&P68B%dHDiy=lBWKPXz=*k(iNjE$L`$=Fo*`Zt4;wcsse08;LCZ|EVw2qO=?J{)F@{bBK6kB%bK z&-))WOInl06umGuNiu_C{)LAvF9J&0{nD@5K>B#fj|RuSZWDT!C;XW5Vuz5zCUed62$(wBAwa~ED2}A0h zB{3HQ&7sQ~bA;?d!_hfft>T{F1msmSLdrrmeU)lV(Z8wq8nzlNUw@|vQUeP6GY;gh zpMPINcPMU{=bJR6i9#LRg8@RzC>}24IZXpF2zs=JLHg_uwEuIYtip2s+sQC>XAU^D z*Po*9N|B_bpgi=(F$@}j0~t88bTq&4<@aBeTz>aAeME3=wp76xpUl|P+#Xa)jYAG1A?;*1@MUa zcSh9$iS~OUfE6xo5nvCThQ>dCdcXHDsd33)0)<9>FH4#pIcy@!edMuIs6uOCHYwZD zag5GhYy|}*CbmwKw8+<$wfqj}*`;<*hr?3YtaWcSPHb`Ic31Ogb+fK%_A5)`Ary&v zLjhXQH^xo4M2P2KJO8N(@!}UtKjGU8>HNWBV?r7s;1`D}Lt*N4Va*THzuUykPN}zi zw_Ea4U8BebM=O-2eIfKj~fJo%1LFc1Ky%B_gWOS!+Mu5s9x(&qxYp3bW(j7KeJ`R`=1YuN z3zNmDBat^9S8>41ex%uhUK#gk&W7-g%gglT+mQT~8-x!4=Ug%C+eY7Ve|23eow%&A-bihlnV zxLHl)tsZ=?Kj7YfPh7&mi58dU(G#LW6IViabqo^LH;@qFJoEN(;Sw$cn6Fa=8&Ypw zuf}X}R1ZI8g)=BMR;*>%f}e zRF;AS7eFU{KT;1tK8iVJu4N`fhpOZH_3Ttht)Qtj`0T*V)IC8aM*2{HoS3lST%v0Dz>>DVivaCj8UGsO9YnF~-eEq3=krjDX&ev^JFDElZ*w!$ChX+(c8{#wB=!oP#Bhm-;hK z$2Bh<;A8#e{BS@eh13W;sS!Tx7qND{cSap3 z^8teehCQ`C9C^U6HSw2T_znHIbyWNrpe7HzPr*pFWlR5}({0}OaPE8-8SIf3FCIXH zEk&-HeH9QVD;PnNsMf;P_#6|BBxGJB&8}q@#z-%5Kap8`oJCtBp{`!tYk~fBT$#$k zbrIvFjdE;(e;)aSaNM}~xU5r`h+k-zcTmnvAGUGRVtcWh3MgS(qZ2tPqXJ?lmIrje zi$>_NO?d=*yCY8q%?-10GEtxD0xM!`w1GkqDff&|y3_a3mb!oE!}W3DAj>i4lk#oSZk;nm;Tdi;F5k z;rqNtUw$Qb{%uP1X}1Zl5B&3;HT`_XsGdL|lnR&LBo;DEU_$HXuI~+ws%?3*rhoK7 zj31|~GW_5n5G5-}`K_G?=2n-NJ1g^^!a90%;*w#m(I$6RsQn`Afy1m+K=z?%_2s~i zX@*<;FMpm$B|@r$@-AlF3MzyiT`5K4DH9j~8sw%0nc03$r@sy=o=4{mDJQSgs=Z68&oa+C@H8{uZ`97 z|6u4Le7UICt;t`NMq8BqZXM};Ni;}BSU6yr?1UeCt+JZ;b>{+*ZmTL`%I@u zXuE^s{jp-{uK*X_N$Gtz0L)s^8Qum>iaxmBukfq(0OqCUi#wVIa(>Un z{=rk*%3t{JUh?Jqsi+yv98<&udZm{pS{ODZ)Aa&q`O)*g(mHQ1tcy{%@kTT}sSg>k zIRX(iKtnV!+-=1gCngk18%=95Dl9ZN;&1l4S+@ck|`0IJ(+J5w^ z4KB(U6Cs?CL%1Tcq|4osK8DzhV2GwcbJJ2_1EGQg4eSe>tz{v2qO3_TL6W=8z?Y0e zh9FeDi-74GmmlbK zlqnG~gm&t$b9Xvx1pD>39yVz5El%EE!J-fZ&NUL}qM)2ez=cksdz4NMLmKB#%6%4P z*blMywLd%$cs0*|h(0T9k#qECe)4A6LVnRk;LMNnphc=fO}CgQTw10TVFvU{?FD}< zrth;~KKr?Y4ZSGcfmap*k8rct;WHRxe(zsV#DmB*BY9f%0tvXFUKFWT)?K&9bvn$B zTZ1Uu#*2y{s6P_%0L%C8i~rbz&+2r{tuE;qyl)kK-+e?}`lLWJS2`P>?>Lc;m+9Mt zrdJa8#j4MW(|DX9&BNY;cf#QD(x@GjBVobp6un~;+u1W;T-D5bo5@sLdn71YX1k+E9gH8FTL`vHxGSwlmWSja+Fhu_17<3Pc7((i&YQ? zIY}qhQ}H}?-dPawkE!uuITrO;3!bb8sJ%v1@T0q4*X@Zz8a`JT%={WHV(?7`DBkCUx#V`-6C#4(uT@k7X1VZd=YyPgqaxV&A>P3J_ zOfUL#Z&NgC&+PrEKbfjawt%+=9EP8yp8bsJDFF)T+RdG4PX7!7oKRqo_L8C7rB}xpO?+;{bAH>h+wZ zwC-<_Q0Yl-j&_Td#B?^p`Q>FajAg!>d+q@Lh{=2M2v>#P^8Djxf2~=e-858@PbnV! zHnN@w-w_ACgbmhKH8Ax^!@;FTqc}E!isEzY($3E0lEGPg${>9yLyiopR>sQmy5>p= zjVvaCdWr&J6ihU9)3%56H%X02)8=7SFXf!A=zRsctF6ZjoNB|CUX z^D2()nA#k+c|c1+!?}C(pi<_QSPnuj%(Nti>Sr z-&)o*hZx);wTVJ!KVA74mlO^wG`wa`Ldi)l0#U)dMW(M$+cf|QNJpj|^F*gynqa^i z8kRhS@9C^`vNlFg$m!Zt{k2JdG(@iW8Q}`FG-b5Z=gdY0G|KUQia!mil z|Cu2-@r+D>Bu>uQ_J)P?jYfXq5mHlJP20Fb&<{QmNYKdc{*j0X%3!pagu`0#P{h@7 zV-^t+mjtJx;^EiGuCOZ>w`fqsQx5m)X)zVpN^ardVKAz$fZK~JerE$pI<(ewRxuFv z-DxUV;SW{){UR38W2==GK8CuZmcALd(l>#;`vp-1(T(Xk zE2XCM0;zTxN$Skci&O+f)6zRCCmU>i`al;^EVa|f?NKhF8B3AnoHi=SS{fNcF=Y+A ztYzbv7?O57LC4R$`8#5IR0iUAUJ^Vz5m*Dr0Y1i0KmXHbzwpyzd&E~kq(EZlTM;{E zw!B*3r(^~uvc8W_jX-kbB^8Ch#0e58@3sK%)|+pwv%ax7=jW-{PcY!9%xeSGn#QKuONo87Bjy_i#DaC7 zqX)Y_lM$tf0zbrwxcfyNbl6rU&+-dTi#>wvh8vUwPfCN03++#gLEj31-6qscnsagW zdqq(?9wVZ}$lF9BVOl&$9R4(e-s^LkP;1NcFz=&)4M~voP6`IG!I_FCM?ZS4QRyb) z515s-OuZl?@xA@RoGnu~VJhLd2R);A&fBvXnnGHz4a~2lAh>#k!sDtHe)}9g)MYYL{7EN zsQ6X$=w@HL;uix%o;At9lJ1qBS-{q}qMDZH@+dNP(lVs=OsdH=X({O(O3h4TIr88$ z?S)j@pPQ@5(ZyWJJw{J+MppR0e)W~|>|Vo8dB$CGh0}o*d%hFcB%LWRir=t+i6T2_ z|JJR^uqjGP7qvkAfWYupQn5XIWI|GsCLI}YR&Lu?JzI`5T=?i^S5_NzYe^jFgf+j8PMz-6GT=+(s!yi4*2hDy4>(#X7zufzy9^D1>cXD$dxo0nHnsh z;)limi~^nJ02Q{1O@>n;A{C}0w$9cA?$SYtjW%!7DLQ)5V|Xk%gIt~;x(xXU?4}kt z*IBbyElN*e*N@>i-jwO92`B=QMrc^8CBgFN{x&!KuvIV(+>jZD;TTtnKp)EvOitL& zrbVR7+Ydx~I;wp1;#6ZW#XD4-E$zdibTkK3UXjH+8Ulp^#?=y| zduK8D0WcRY2|6G_l2oZVa_%~gVl`b7d^-(ys!>~!LQm~cxbF3S)Me1>`XkW;OpZ*i zfpfF5C{~^`?%*1XBMS!%w9qZh$r}KB!VBf!w9+0)dqC-H1CkJjea#55C?HNQOuC_WRysYmIii2!Eu3V9k9 z?DvAZw3^x3S#9r#>cG9qQDl)grh^aly5CGpvm(?2V@Z5iBLT|U>?dpn%HIvrPAVhj#oeb=-Kzqe`HbBB2H1J*%lPca-0>A;#hk*!M6BG{Pjqx1ENKZ z#}o{dsw(Io3Uc%w+6&D7sHa_8^-uO}-5mG##_3aSqx|AXfIu2~&EbtM{*H#a7(UX` zTP+D$-2eSu(%w?Y?1s;y$#01i&St5^f)^=eC5Nh)D_h1NRWCrASC*iXtcp8vTbkdv2(f-G3urZy#65p{ zq0DaexB;ZKUT&6U{e0LzE72?pqL9_gMy8iJ`0(Ip69v~~ ze`|1XPz2LE9j$;XW^q|_#>EX3Mx6EFk(nPCI*JozE>8~r&;NBSH2%l18vg&gO5doD zfNA?gMmkUNy@hMIe{8W7?W+!3EwFljd=B75IV~%}om6Y_J1_>w%CKS^cMynFQ1*Ie zi)_6|c&E(e7oFwnwMXE|XAE5?;FjV(G@Q|K)kn5kufnP;^v1y^R3wk`4qzcvACH1V zDUB5R3RPwC4j5A3v=^sP=W4`NUIYeln8;xCiy$+u^0cM`$4&lU5!Br9J3D@!@(Czx zDf>TvSqJN_%p7Z#Kg!g%$!nI#xx2pTHBoLMHL6X}Y7pO3Z6HWgaP0(|=V+hcxz2b) zLTlLB=@}1;Pjg03MIL^tM>DfmEIdf^KOBlmx5&LV8w6Zp9OL?T7pg?4FXun_r~kDq zX@UKdW-{cYZlrh}fHp2m3ta~MyO%~XG~Q^B7FcBZ5CFU_JF`vzwuU6TxH?PsT7dhQ zHa%L&rn%j8NRTkvqc6THDlHTK@p){6YHbo{{*R$M(RwhDJj67!( z_$Fz*30EKhL$;zGSxUQyj5}467xs}lC+7lGUMJ>dY-W?zimk{#wSq& zf^-yfEeYH}q%+(=&BoLh*wi1Le^-x!Bw>N*Ncu(ilySkDromC5{EG2Dcj4;SDo8t8 zv%I(n&eV-0g&ciCEQ&~S$_L-=r7JulpN67dpAuss49!dmG-|@*FxiC8L1M7Y;W+M5 zJ6MC=@0C#3Z~R8i3XxK@^T8p?eBe^V8J*ECCbQ4X;EEqr6TiOnDIXEe423HExq+>Ad}MGPp;U+8eaj931hGT6qEKYuM8G;_ z58jxtLCe^f-?n+5fAH*Q=Reb&9_At_aN!{_GV73IA)ihe{6aq$Cp#ua(-R7-`Y6E(P(uRgE9)I4b8EZ%Ofm1gfgP?eh{fjZVe|6f?PK zcILVUBX)`wre#KIbQ4`>KU(QN2FPkGOGs8jgH{d>^GKKkP!M-9# zm1t3u&a2xuH$0Kaa!Xvqg$~0;2k-iCzA2gv9+ZeM;Nl`#BJPA`mQE-*@gLn$pf&tW zN!+J?NOsSF2ci^{-an70rB)BI& z2ES6+Kw2J%xTH|$FwH75lwN(+rwqmXQSHMLtzS_sG_j~w0dtwWfDJN(mwr3nuX`Q` zAKPDtbU%TjaIfoStTA3BsN@k|Tq{1F3ow!vLE*0S_klb4;8|93!|_=pA>llH)>>9K z>Q?K;*4wy#qq0BEy}Vi%W|epDT~V`4StC^E>n@s=QXm&DKFO5{3f@ zvl71KjzvZ|Iis+VX47?bwP+w=Y9eX_M;g9N|B^mMtxgHsqXF;?X~wW} z#HN0vKXFn#d7NmNUMUVay>oMV5D>LaHPM^A=idABmo4gP^>$%G!ixU&SlNpcU(kmw z^(A~hey$|$?UxfaPtwIE_;J1d`L~|^T+4}kzZ0@>%F(y)1SvInR-4UEN{POW3d*c% zrpDXMNQ{LzB+#a3L#anXC&XYaV3*pnKve~BsWz#n(4}U~d;ha}nat;dqJ+KES-QK> zHM>Q5Ow@j7gUnn_!z>acTndPe*q$B73e>piK<90}{;iSK0(5{>+nZP_)(<5@xGbqU zX$5qCabt0HMWdoU_VRw%eInMFxt@`w$UZ$TC`$(9PCwdTf@?LXC0UbxD!!=C%TuujLDtezqn4drC)flC>i5x zLtjDOZipcO_N{lljbMeo7XM6Qw4a^U{X+xK%-$%^|M|1;(!K4Q+DMYIQ4!43l-=3i z&WoAsJ2EhG>exmZ2#hip)*nb0;HcI+VbgJg2DsD<;me6AXNL-^gDge-{+or80jV7w zSMQ+^KWXI z_sI|S^&Q@Wao~z;-Uo$WN0uH3BdN#HZOs%-q9dwPNkp(3y#SQs_x;cz`>b7Vs)`qg z$`mMB{bGM`;e6CUa~tbRW?J9{{fC}-Ct3?eLNIZkLNxSgvNcMgO{iN~4z*bH+Z_KC zP&a`X^Vt(-qti^9n!TdgMeZ{RNxgcn6MaBJLsTNKU3*Jj6iez{; z*z21kf>dDgWK6-Ep4M%!9R37oI_ZW+fD0J|($`p&c^Mb=7x$cISi5n<-|9@TEPMo; zm4bVCSsr;ezjFRfGd6sIS!~AU>qTK&N>_LQ=(jUVZ{SWYQDNCwj1~pVn>?ITVGb4W zEDjI9d@jc9ZLt!WLqBg4n5FMCbTa z+R3xgGk|}7te>NRyf>4oeRzPxmX!M%pJ5-H@N7idXZyWL8!GUGrZ&w+-Lb%yeq~yz}Hf;U;i`g6J_~k_|tx*rqZgI?8#$Af(AP;?5*#J*m;WHKEA-_0A%4{pi*) zId!GW`|=>`sbIx%n)JQaWJfG8x75+tcy+$5XQJJ7`j@cUxL8t?MVt6Xbg#ehs|gGT zh@~6^3^GDn^@!eG^G0H)P+l@6@-3J;wOi^MJYj1zDbR)`iHF;$_-j`m<>u^#tEODm zQO~Ssx+FR0h$!1>noeJvwm#8H&~O*0x1iOgEcP*_M|by72pL#(3Q>@obBT(%CQ8GG zfDzD2qK=UTEuN{GB|tgF$jlSxhn(L}gOMbI(}IY{kM=MS5E^)N&`X1Vsh5ZQjTc_f zCSRG# zF;ar{Z7efxQxT8M((Q69cO!E`uf^+y9+>yf!iv*+#&XtEIHR5>l34GEtDwdc2?+&2 z#~Gl#2;fxlgU%c1p46BL5)Vgp<8Xbf~~(JN7+s<#=jD zh4Ry*{^o<)3t;KOd#R_v=Jj^d`h5PU=O1|roVY0MV$P!sH%gig3s%5qMaf9xgx~$> zJRw})jx#f*w$eP52r#L)lR=v`QA9p|OO1_t36Mk?8{3;(!e2+V$5Y1$npuxnpvblU zS^f66-~H`u&`Ef50!JPW*W$nDKh-Q|r(V<25vEr~rpS{D6;!OW zLF^pirjO@OAK+;Ep*~W7=|A#M^XBFla@pZKGVLhMSR`ncLn3L{niK6O3wi*J!%7gG zpG?(SOlqZQWUBB5q4mD;xv=EqnTSs@Wa56iHek-5zWB2EDqMhh9Kcy8hz#RmU{qlw zyXDofOGu_#A&4tNPUW2SAMa7F1R>`cVHktcK!{coZA2rdW{GQiUwU1<)LKQb33=mU zKXmm+=7!&r8(JH(i4Z*=WHY9#R;KuxHJtfw7}l2_)140`0av^ed1;xKQ)$ z{fB-K?d%`TS5*I;NFIqqn)8qS_eh+szUzp<6fWXy2PUA%UhvlRK`Xwf1=6)+s@evy zrKi%uUT9Jg4b(nSO2B^q`3pIxlHZ zDM?Sz(alNA@JvXklvtsL7p!+TBEZ$f8Ayw8ahgKm=`T_uA~1Qt1I2ul%+T2=X~bk8 zkp-h+E)Y3dee}7~pAh}%%*CUI5*b~?%uI`%>wrB$GYtT0pkQRU5V!j9L|%upQ?n)} z?viljYfk`K^9v0`i{I46VXommqv%P5|5u)Uta;u&4D4*4H?Hc_?Cd#nW7_WvskPrv z2hSH?PDcoy{p2;AAK6y36&cOz9#dtaNbWf4PtyQQK=qVzchEUB0g)$9v@XQaFSL#b z8T{(2X6&$|@jj$szpHb1>z-ePn$#c? z<+O8z4o4oRM@TQ%!ck#XX6}mbKkB_l}s2);CfkW$hC#gvawp&tr6fuP97N$8o9~NG;kx!p;W-2I0vBe$!5}XRN`!bP>s;C zXY#fUT2S1!@ZkaXvjpBSgp3)4n=raUMed?D2OKrFO4*!o;Rd_$c@}pf(nn*y|02!t z^r6+M6Z>AebUBmbTJnVG&v)9eD4F-Y|RLk&2h@(PE=vp)h?U=sunkl&uw7Da}9?e?)WDqex33fflZ_<1VB>>(N zuaIO{8mVAAVD422Cvdit!nw_h7;fPz=qXE{37sKteVz}{PNI618IU;#4eAc#EbVXq z`_#4mxuS?(;RWJ`f?#65s4E^_^7Q$l9Lz8Mor~-!&^P(a7!UUyJ#*nl{s3R{Kj?SO zq{LjLKUXRN6iT1mL-Y$?u7lh_vctUOvC}95U)E19W;pKh4h5)DoB2dBm=^Cb5s756 zHv1*;zUJ){;ENC>dLGiTp1nsBvF()id+Sd(of6} zQ>|*W>FLn>ZCO`nuU5f^Q*2zW7hhvv&`H1gSt5-DMAw-&)#Ut+l*}1`3?^w1laA6=91osoNuewu8S~U~%I8I*)5vDkhroyx zC&HsBpP82+9`F6s-=Y;4cY*euJtFkYFSIt`YF+>FXt*7T3i>17{Guy#1Mfykv9~b- zUMPe~TWs+M*^4oxTIp|dGI%_J18!Yl?F#@Olv)bA63)K=UBdd;RIt?yK|Fk*Ex>sJ zUaOFST4$bXq`C(rT_hwjRYSzsccaWoiCXKH9dF~B)cd*=zw%!?hue7>AP!K?T-JkP ze3NO=4Oj2^L?fRtlKqMCM5UCmWRTmZQ@^x#R%-I z`MMeJTfqE11m3}c>ujOAl*4;WX1!Pa{Lg>&$ENvisM(rw6lB*ip1E?xPf-MAn5N(K z^>Q8DukGPby8;1dwjj)xJBG9DI7jbx%K<04x0{D%$VxPy^QZ5B*%QAB7+te_|I!yd zJ3?e$I&QUZif(feD)Mtcm-~-(%PxkGlj$MlfA4}*pMT|7U-@tO=iLZJwss5*SP7Ecv{#sCVISV+F4fEt0`uwP$4yd1?c22t#1V#=zC+PwmvW`Nn`7zN9mB&kT=y`B z^&JFCwYv~q)f^2z@W+j6B4i|hQr!gmHb;#HE`|SCe2Pwn?srxT!o*RWgxGt;v^b^$L7?)6DS zl5lOtsn!I@pXWvw`4BW#qQU{1&WFy%T;es6h98`kC|qdsk$9HtAZ}|R&Mt;NSjey0guAw^Fk1ot*?|=Kk#wHc}>(F6$_7YMz~W*>1ryamWjs-@gh3+ z9_0E7abP2Ra__9`E8!-5Qql}@c`}A?WG0bhiw;(v?*8CGpQ?OCH3JHIlDfcWenXy* z&wWlyc>aH#e}w-2V@(A6D%Cew>~OoaJ=Xd|_v7-@>v4;_$SBEmA3yts7Xi1Wdn($C zZWY$#g6fx>WfAm+NObv&mwg~0UUX-0H}BIYUhN!ZkfJ11E@fh!E zf)9=`30GGZJScFX@P8Ai?iLTSmjUvH55YhT zKotZ&O?;AINQuMR^<3913L}1AGyXgKA0JUqiRv_)UQTm zY?U=r7hYSM(?&JeLP!#E%+l&@{W#_BcO>ln{5<`Nx2V})(`(iv3%n3CD!=G(+`KKS zgG4#A3~P0^T0Lbm`4hbVdK%wlRtUR=wAF;i`WCX;U!zt_%B6so=+&Q2)DBoLPcI-` z!ujTrAQA%j)?OeIxqjd{~ zcUVj`j6{5}E&Ocx7Hnk%_NgYanhjzEtF8>{cFo9hEzX9 zwJlY|60(3**LvS~$6cH!qAiQ$EBhmj&{0e_D*+XkNj7bc=~@= z0%ek30a8z=2>U%K&Pv(xxoNK@UJPT>Shi&-7XUOphi}DlEB@^P&QK;YnonO?|fMn!|(}kDj2BpFLHbp1WbI6H3?imE#U08p|r;nHh z_PPWQ>Ue5EGqcz9qSd==p43Hc63^nw4LwI`ZT$!i+b0VCj6C;dIM z$P7{^MFRf7ue>;X=?jRaRqcT2Z7;s^qWSqK$_v{Ndmd#!etGFt5phUJnX_RMkb_0g zPJ;854579JQ?onzgZ&gUv#)79lmpDnJZcX^InXL^AsaRD>!0_6BGMh6PtAYiYSkRo zUV3Cu;9B7$9mDi>l0+%~;XOsJ)+(i!H(+Fr zT3JGE$P!TsZ9r68IR6Ko+O>t-;vG&P%1Yr2y4^tO42cHl&Zh)T>XhshZ&bAU*(RkT z-}86p-+cCgD026+ev;TJk?-0l@}ggGM!~A9M{79h4N9q#=y(Vy2zW=u1<`ftFoN!W z7yb`CcJ((=tgwQRk)=Q3-?ueyaNGr)$|2u#-P>{%&ws{lC%vN;G9dx*82le__KPxE1u_2){_F%s^^j~jD%Xr!$ zUR&g>f~8lgCqAasj!EspY+gEfa1rzk(D^Je`9lZT~&QcE2{ZAWOaFduiM7`=T-y zS8&L*w0|KgDN&x_h^Fa}=%^ub1)4AviC{S2*by+m%52(6FgAbM%UuTR1LX~6`ndbwaQPidJw>=i40V`*Jq!1gbPmm*bz67vxxR3qZJxH_A= z1G3*?8nUVv^p|Kw-^*J&E)rZMX&;0wRx-?@?iJU*zq{9~4b0yfIZ}o(Ft2%cZZD*4 z#5-T1ER>%R<>*3eN^Mg)vlj73Qld`k#p8aHdOCt5k#nn+M~Cyoh*6A2n>KFoU7)I5 z=oov^u3Iw&!5Y`XuW8sIXh}*Q_f|^#VX`WJxRCorO%BN#4TuP^Xt_`NrRK|W zlpj#F37xjrKci=tPj)Rdq7)z)o$;k1ibM*;u!!fafZcJ~`0vmE{QUc(7#A1Y=r%Tp zu(641xzRa(0F1Afj+Xrl84p0B_^MLNWloyjHJ~WSuf8Ix6LXlX9~Cx}qny|{MU+L! z!~_()|L)80YQ{t`S66N=7B%pY0@Qh8wtN7QrvB1ei^+N_z3uy>@;YqJ)1m*0=3V4P zB#moH;S+Xp3Yo$+(hLLD*ueCaQfV+i0BL2d-?eOo@uDn?Q`~TX!vV=2vs2f-jOff< z8xzhw;riFyg1~OVf$Bx<%sl(Z(1#FmrH>cqo@+piWWi28RDgp#)mQ^NW>esecV93A zoDj8Yy4?Pmi*i$f67@AK%(y%?o0B~hF-^Neq|9#3h)h**{4#ehqEZh>!4ju0ExC+W zIPXU(7vK^alK5J~8_nHJxU0mXRyS6?R$O~j9VpUv`pS%C0a-fl=>R^i1!?D47Ns1# z+H5=zkJR3(t?*mG>5@;kd#Th6Th_B{<2?V%^Y7~Tg#()733vELJHcc=#nyITk_cnO zHC`B)T*Rufg?gdoJpb_7N9HJ^lEer_-T-zwu&~lu(w8oQ{`8kxW@PSWyD9v~O$^)J zEOj!EPBPdpW?EELm{8#gIytnD5*EAD-!B$h_Z0LX`==@O_K%7?n4P8Az){)J$lg58 zI4%H@ALyGSA`a$Tl78UaPav0TBmv-1~6LaEV>JMNUQGdRd0VXu$ zqN-k$duyl1y9uC5rH6Hh&}C3w1L|$+PT$CqwtGF-TKtGs5gEfGJeX{Zw*i-3abheH zDWH>QxoM+;=^hHUcbDQ6@Ky0>uSIg4HZ1zv<5WBIXYT6FVRG#8;Hb1;I|#g5tii)` zbHqvVGAYhIVL;r9dOu~&?&OQtZ!7g1EQMC_O;1L$cd}KK0Sz|uYdaXbs1)T|7{hp7 zA(vAzF^|=#Y^=>cfBI5Jgm^RNyjhg~56mml*2Vg7zF_ju@R50HmN=~cy0}#7vV(9K zln86YNkg`I0Pao#6Jo5T)vSbYRtrD|F&rA?%Ip1yi*wM5i2e)z(WGy!NS@+`Myb@4 zqX1}eA0D@AvUrJ2n)S3ufy)$_LARz{0{xHJi^+yXb8!Wj7eZl&IZ8~KTVpTqzD37J zBrrW6o^Uco?tdwyL3>Co>yJL8g@gt?V~!vWg)6PNsPACyP+!%L-|Q7P5#gH1u`C!P z_+VHPobi}O?QbJK))teZ9K_@PnWpseZ|WVe!$0QrAO5PwIQ%k|9N9zN6zgJ>B`BLkqeH(lEsa%(2; z@cilLU-ASF00xKo=<9tYf;xVnx!EUh3iv(D9%DH{l5V+pc7hw6tIoY2OZ1su?(c6> zlCWkvjuY$|#RrN|1eq(E*9Fu(c^~%Mp8$Wre8`2;3y2zg-|=VxZLB%$ON(<0E8PVR z&!x-(&+GOJFTSkp4B0MjT0jUSqx&;A@-}Q9wOC!%U0L0`5 zY!!KWM7aKB&vU27N4Nz_I^Y@wT0{5b@QELZn>BM_@=^)SnU3F@TNj^7YZKrl&9VFD z6*~A7d+Oe@zR3Os@TNL_LwhFvY!WWtaS6`pEB-y()=CG+xR28k!ZQZkLwt2K zv^$B!_s3pH*Zm5|ZN#0o5(XoH+2SA%ycwzf{`T~hX%W4-K}z5GK;b*?e(mZFe{1#D zEp0cp=Hs!G--zIQR;clWcDq_cG;}n>h3Bp11Bvnxvc`+-vl&_Gn3tL$xC)BYorEv z_ukViP5|;p2WvCgNq_JJspwxs0xi-;FCl1+6>4VqJbM`p=Ja=f$E0~{9aA%TfMC@q z%2ckO@TN59h^E%UQ<^i8@((o4Ataa#FU;NSdUx?VM)L*D2gtj53iK1Mi z;|#=xF4#*j2#y=)&_UTD2!`C6z^uf-l!;t?$U!NGm)`U%fX-D|a5Ql$B#5E|-#AE> zF6vDfh5Eqhb>Z;wibJ$Ta$^yfq-tv`q11lGnSmZD%J{g-EcdrhW0$~2yBa;h?O~pW zE3ZvmzA!`1|MNeWv>4K_{y&d2eRW!FMHHn#M3yloOeQqR8crm!4I%7NR@~l}(~F$2 z=|F|}FfJyw-x>EtBEa*F^Y3a7)No9y@d{&<@u&{7^c&mRON7&%Tbd@`n=sKd7HtWC zU{4>KS-LWH24|-d>-CDYwcm*Uc>xH+FjFJEC_X)Z`tId-FY7(H-HM(zhrbOop(y1i zk5F1HH5OqN*gfMavkcbveFi~TT z@RFzsPDj6{#%p?zZc_S4D?FX&cZB(*}c z>V_Hcc9CkFDoP6}<5%*!S}?nY>>?bED~^+rjC?vMj^=QsA;UB^P_nj>(3!u@zaRP( zqmnkC(1c4Znp(pF9%65B-yfK+c;iv=9^UEEd$=ZZk2?rX=#UtgE&o1e-fn*Rm$#2m$E=Ayi39Nqi{j83H3Q;_tr@hBV(d^uOOEs}pSwoz2Ym@-pjbS>J7V?_k zz<-q#M2)E#!zYh=N$qeSd;)C}26NM#wZZJv1P$;L@8W7ZDk|Ihg_FS7MpK13+o*!+ z-@tHbna`pJdT8sfptJmBuOCrL-{745sE}Qiu*i%y5_*k>IuoAmKlns#OY&$-9Z%G^ zA-zZs(ULRVze@c)h%@nTq@-}BS&AhlaXOl^F_4Fwvo=vJ`88N97WsI42*!SE1o%+4ez8Tyko zRK9%FNAdN^<{pB4-Vdtb(1yel3fFD3_Wa0w($AfLcI4_8=w}B-uEs)nlcd(v)bv#y zy3GQQY$Z9Oij4@=;G!Rh)^UdtYOplH#T!MNL_k`mU(14blvF#xtCE-7ASy4V6boh7 zy-1#a_^YpI5aOZR<<`E#fzY&J{TzU3*xp$&?OM%c?w<;CAf+NprZvIG%Cs-9yz-{W zh&vh2W-glCqG!MKDlaOWXo8U_(A6*}y-aPRMvw`0uXkn}pgZmZaNw$oHE*=P9I3rIp<29qdU3pHQ=_ z%KfkVvtGTvu%REPZ%m8MG!gppyZZgsPST2FLgi><-NA60?ue+(qS$d>&=W&(_am)P zu*RL4&081c+%dGp6*};Udcb2o4BVZ@xHoRWF1F-yPIaI7&!ty2I9#P68hjL(tiJOW z2iIG58?DPvvT-oIMM@}T+k@TWglnj4y=CNH0~EFa0hP!Sc)IHJ7hP)nqD|nG{nXZ! zD+(a;Ct4|&LfDd;^#BK`eg@{XtSRhN9d?dV_&z)ff$*)>6wu1qJ-fNk(n5x4R^5iP z7#59lo9m7EfZj`yv3wZ6N4U|_40-8=-xT+RFD7qg%1!F`faFJo8IE9$lr~7a56}Nb zFS@hPnU|1!^OBaj(aCf~QcU#scVE)@{ZMs$%z*QZyCv!SGgoH(SIPvVz87L3wkbrG z;Yn426mjB)Ac$K0pZmjt^Fd{Lkc;72yw&C+uD$4ETR0W_SASl>`~euqOtcQ-M%CT} zdlTl7o)5(me+b0a)aKA5qhp(%;7#G_p9rYD(>xDi`)(vTm7w*#PFI6*JB-k{lJ8JO z@*ctMR)RtAzfWOyjX19Ctvz|^_w@U%VOSt#@K)ztJpOP1wxG9fB^I^v0)0^YL_P9d zQ#@lkKn)GwN>(S`@!?>~`ekDP(62?&lo;TduOAN9;!=rY1cIOx*qynN)r6a?cA|#d z9ZGMSy*7tuG$$yD+TG?;6@#B062aLqQ(xm7ouH+h2=;k$XQr{~Nt9ytL_(XvgZqr zEh2cRhW#v(j#|nfjkYpdh^jWl(H|Mm^EJfPZsPxkntL-FSlO3nniZ=uqLg?QlU5nn?{8fQ zu$|dICJL38Oy1@7MZYv+Z2iQVZBtg%Vvb^vcqvcIwTJtFp7cdoI>f>8vFH)`F~cxQGg9mBut$31EP^MIC2uzz?7O)I@5%13aNf+Yc(}@rd!CWf&}uI!*42I@4mr z(J{dzQk;FMK$DiE-;UXEeOQY&$7%>;xCs@VHir)_$`AVhR3cN+zuGu!HRo%-83i8m zI_4TBp`jQy)CMtTlQP{6NGX=WZqI{^+LbXLY?SS^RzZ}PU0B_0U6zbZBz{+*TD$va z=#U~`1VSzAHzQBdE9nEMYf0yt4S>pY&WE{n3WJKSQT7n6LwnkI&Tm;-U7y#d(S8)o z(#5<7D4I@4(SN($PpIMSl`C2` zo^YPjr?5z#{m5?%w+ zHPyun%wb8iJHi_?Ji##$)`o}M7;I|J?IuryI;hxE4a6s0s`|0_Aa%i)frr1W5pWRD zZq)Zk)Q*dKF~dVc)UZu1N(4`!P`eSR-^ohg1i9=;;uH}w_U*Mq%3}?_ktf^=j+;Fe zmKM7meQfysdX=4o(xY>NEJxRMe1R^)8+W@F=KyBM9e*`k=xQfU*SOy+Njc3>c57us z!QeQRVlr~RZrWgDMPX}bBkHXqPhCfV{ z;*=GX6QTNmYilA8Bf2T)0LNzR{O1HZ&=(DrH8fF$fLMVl2_fO?;V=1uHGKlS-6k#@Z5I(xbVCgh$0U8wt_DL;&ExIrvQ zrgtx2)+iC0JS}nK-yh*s@$R1Njhn&yo(g0l;Y%+CdjPdqD_zBA;eh(j$PGZXdNT!* z^{0!B{q(2=O7AcaBtWv|H9Z;%0s-fo<_0bluoV^)VZB7IsvVo$>)O8&zA!%;&4|aq z5V7J?RtDktP~$Mw6)im`==m?;J*mYyKw*}J zFQef4**m>s>{SoUeV>4P>`oF^Sgf6h#MCm)VO((vu!6t6j`;F^BCdGc%!^S|%6*dap(FP%<$VGpjAH_d?CKVzQAh-O5 z<#E23@2W^AgIj7ZjxLf9w?rxiD38A+aG9m?`!%ix_eZT|zoAFA6vN#Y+5SI@0-rw> z%?|JiJ@%HLppy90TB)ZS&TF022vHVSJe5nn{i{FLIh~y;)tuO&0JK)UEZmIP!;*VQ zUzCrQm=GST6w=KbXU{Zcv*H-@thZ}z1b{HHXTZW{b#BoF{?43B(Cl!yaGV=Y4V>0Q zMkBGb@AB{4qRAar|EFoek0cTs5E|(V2nTNuexd35>FPCeYA5X8$bIyQ0X=!)=t`M->4lfQ zr|WQM-ncw##R+xp>($xPB|0Wz(I~iDYQ5vn4$?Qiv09>3BkFUvmQG@nL9bv1=ZJYT zTb&FHE$BMj$)L!}>OH;jAX3*vO*~}@Wcc(Kep}y1>|=*ds9*E_O#%)YN&@==tigznvr;BjGTwAAi~hPM#efuzub>`sE+!cR^zupjLo26Eq@$GI397i33;S}k@qrqO)=Fe0UTV4fR1!gGG$W`94L zpZbuaS*YrhDLaR@p;Pz93mLmV>^`mo`t-U&f0UA(bH+pP5;u$QA zzyNn_gCIynS}f|w7GP;l6?uj(Gogw{gEzAe)80?lBK&$-l$CCzOdb{|$bE)_zdW(% zSuaAZe=>*j-S?gBiO^otlIMWDQaza~xg7E(H2P!m;qp}Cu867@+7_xU4fp6IshNkI zMUT+O^QZSIC{uV0ZYW9^J!Ze}NBc({OVcPyngXv2p4BfGBZzJc(@Vsu~Zq5ZFT@jI~C8K^9F2a4+tS!xl zDt9T?Q5=YVppg4;5NL|rZMIG?yV-Kei|HE)@;Z@ROSKpg7?~akhMDWbRS%lMi{M5p zGLLYC3sMb7o9sgMhg0346ne(1##sz9i<+&Yaj=GFii0#7uli%F;BL0KdE9?IC~UGb z(K{Wdb|Rradr;ya2VAdVD`^?yw!@-lZ?|^UWk`G_dNO)@**g;MjY+{hHR)O5;!%LUSd|$l>r`K`~8~ z>E-X7lp;>=a6yVIh|rXX;W{xblLx7Kyrs7w9=N!)PJ&j4rR3HUULBe?Od6mkXT;i5 z;X)fIQKE(!(;VoLR(#E^_BMMars~XfCBi~GYm14~rKzB3{6ECKZFAh`aqd~W-(dG_ z?b$uA5=D+hS&~5j1W7weGI%0E0U(S4Nh_wM9RLGhNB{#5FeE{lsl?v1b#|+EH~T7Q z-!ZmiTejn8J91>%RV4Ji_ILdU-B*unr?zrV$~uP*X6FBJ-+lMpcRyU+J$Kt5W`~*b zEPVv434hZco$Jj_rq#djKk?4_UglwJ2qCWcas0N~Enw}D#!znh8UQ8vb zbVd@mX#SM#<4E!pcj#n-cdJej>)W+*CTFh1siSZg$BsBWWDQ@9`loss5lHtlkh?JJ zH>qxo{BOnaMC@|Wk5UhNM30HPu%AaGnIfHi6|0I>#EX_%H@#qrq;5PVa&?(UF*1-3 zzV+o-BIfkld(AVMh0Rj=zFu`pDg(Ph`zfHlVB3Db9ub?QkP;8|d;g-A4U@&$Y4(cO ztR^^mAoCQmD+))4O`p4-Or;P$}o-c2dT%Z)8lF7nJZmf}F%#(VWWfo$W@oKn4a4Q4+R#vG>|b*UX*!{ZlX# zx!dixk4KYm^&tM{xXr`9QA2XQUcdW}`4Z!iv+v*iely#bGB@g6c{hzrPX%MM0{GoO zz8lmcYW>S+zlcu)LswS;&!P`WCm}VjzjHU3t*N?J9Y?~L3LqIYnY?%<{+!+1jQKwQ z-r4sL1exq_~54!?tIx09oVR@`(>y zqJ+nEAt?inSD?hIjiV_EC}dP~USlE>d;VaH$}pu6qi&G(Rz`}mc{Zj|&WdsYDksyCMN6FY9Ra4$id zF(YxuJw}njd-G)jtStm3#k!pX%YSM|?l$hPV1f2RBK>bzMPS<4;;{e<4KP!`;P|K* zW*82M!=d<&-%M=!pW_qBI~>}k2Z|JBmSPZ-SBkS?qc-V%Wo8R3VJQu&r{ zDN`;RkCkYcE}lx7w`Z`eqVuNz(s^-mizBrt9yDOK8eu%?Z};o4x1>4+`!J4_3b>GB zc1=pj-?<%k1HmnB!bqg_Xp{&A<~b`xAkGd7wsd=h2vATl)}W)pFhN{UBLErV^{f`T zQ9C<9hQoed2siZhGmMEnS{jqA(zj_l?S%gDky#JAAmviRa@gEY1z8xCpa)jqgBS&x zAl(iQ(|w7av77CE0WD#-j!6hYLuHtNMN)APFCH8Z?buu)Jm7^@=s$g)YcQkb3}Cx( zM0zq5UBy3@K{?=0`Cb<4-DltNr41g)3DgpG2nfWHFJX5LjtbW3KR^3^(0#a{aIdvN z|Iw+(C1XMJXFbgWIEw}5N@ENd1GPqDo~QUn2z$he>x1h&EO(M6WyJgRY;1aow6wU| z-Qu~x^3DWCBF$uLmMssF3=)$5v1@1Ltcpp-Sej}m8Hv1b;lk%VDN+gu_q%KC8v1N- z2;p}*g40BhM{_cWx3T_a{MmVs=-}1a__DRM8eFGyJieZqnU0r_fbab82XWz(C$$(a z`+dx-h6^fPeoVI{21>BkkWBBkWP2D(uoGJtatYf_CY+Zno98E|`!^qkaUwtv=tMbVN*896Dnzwob&VJvjqY1?`SV|ctfP#ElBucUE?O*>0-7SQ<>K&GtfYJVKb|X6zpd2k zn^?$O6bbW@widf7WFszz3HucUMUIPGTO4Gvek@2EL_yq%hE}UuK4gb>Q;D9sJQ2@9 z#1at#LtYEhjnGOdOb}aObYi8E9c#8{6Ca@AaNF)9Q{-j@Yj@_%Uo7oaC#MH?d*Rw* zEch3QY!bnKCZ*Z+c{F z^4I~}u|zF~j&+3b2<2wb50h&?D~Oy}>m=Cq{n!_v{y+YDP=bRH9{2Ed ze4ti(^4D-8r{YG`vLFx?|AkgohcUCy6>4LZ;p&BXWh|v(lJpfV@e?R}< zDp`j}e%c+y9OS?(Dz+-HR;8fzmF$I$&Qn}zDvtt_dV0sF7zA^Tswln zLZcWL=2mfEVo+Bv_6QI@8;}HQF_suZ4~IxF`{kJ_4^UxDOkTg9Sdz<9;`EershXgqNvQNqIfW2Z z0}?udIs{QtvBm+BSZJ2Vpe`yVxQX-RbU_Ai9)h2zQfCZzy%we4oRswTvl=>=SBjEf@Uo5Exa zHkK4o1__dRNpmPZk&*kr^F$T_-;VwK7QyrHp8f2zqyLjTk41Pto{lx>F%EnL!rGRvKRw_=x+lM8H8`laOJ96C>Ync=6uONh`#Q> zWxd9QO~wHd-E+7qBPc=P@8_1* zJ)GMeFhfOors0Js<{hDy6fC-LCqX^-1iAt|@>>iC3A4hQ~ z7GU-}ELFkRF)?wKdmt3RS6LZfeJ}4MS!Tmi!h8XgP~HmQ(8HRO^2{zM7`3MOMP}}x z96Ff>{SQA%kdfZkek|zFDX(`E|A@i%M-@ZdE%po0Sc1r+XViPil?ASs;}eP(FEF3B zFs|#1FO_mBDjHl%L2)=NwSDnCY3Uy2bq1E~Ev5Q6PaD;zDea|Ei-y^80Zc>N^zCo} zI(Ccm{Ioozr5xl~qjk{979fpKqnJb`15wNdcWlBm$2IF5g9Z`~C*)Go052>1and}u?kh{#%Q8n7$GUOP zbOAD`7 zO?^Pc%91gEapr0Wqu1_!Db|*0biiZRLacnm43252c7AT54s#3&tBQTB2|malyN4`c zi7S`lG9@Z_j>4azQ=&EJ^Y;s5J$Wso`5Yz`yonbGpgmnsYRmo92#DKt(*PF)Jf4g~ zQ=>(Vy~~*s17q7K-6p4}%+S;tggb)5=QgH)p6}epEnBv^_Ain?UMoy#!b~t>7SDB3dt=KI-Y3;rTXX9fU$xFa4H9{B!QNgiN&JlhSE2&5NXo(R z=wz#C3UYR_-D0|nOUp45X)?rqs$^bRk0~oPF+&7k;^=e`#6ciZ{wtq2HGSFZyTk6* ziu+nBZFP72ktV>$8P`A!I6*@%uJfo8Uil4uEq9GxZ0P8fi7O`C-M8a!Jg()4qL7_J zwuv*Cx|rP4SC6TfG3(tes$SFmthRgxyUqc(LKb!s7<0$#odhxph6 zG;l7PlmfWM!^5?-#(sX*Iq-Mrt(#i;v#e3yKie;IaoHUtdyA%ve zyG>_B7XEtc<(GUN-IthU1=)&Rr$mo;tcp3neZ_bq3HN>x zPkW#)>{Uhj;gtwVuPrYv%*O<7Z7le)9IG&f;R(BLLqSU0wb_lhi%gxM233nM#@BPb zqU0|E^ofg9Uc|3KeBdl}Pz<;sXMJtCDi>DGS|u@U&?|Aw`)WH-EDVrdJh2>Ns_L$y z(7`pkY2$&7?-jcn1&sm;vC^nyN0h(zN3crWtR4Q!YcZeQ-k|vF$!m25Z|bFRwscCl zh2bG4ZzR_G(JYCzfam}5S(dh&^&1BdEH&tvB1CIAhH)Or(*%eWb1#(*nC^1c2E1FW z09&n>PJfMJq6UvEUAv5}wVsOc zI~m>Y$%GzzH19AA6k!slZoIHzm-zRvL}jc#Hz zJGgGfTr;hs8`h&JfkhtwSe67^nu;d!%MMdDM!M}6^9$s6>48HQr!!G;%2i88Z6`;& z!m?QzrBz*TjjBTxE~3|;$$?*?738>ATJAmoViJ=?FAze}@1Sbo%P+qYtTcIf+N<%q zMB!jqZSSriC|U1>fZM1gkwltd)f0e5|*8TW|NWNrW zF(Hv_WrPup@Pom->Tv;}44C z?%JK86kcYkyYTPlevANV>@n+WLeK0rrQ}mivJH#%i9K3RFEv+{8$6~!QD!`X@d@-N zp4-@8oHyNczRFFFV%n^aIf5-rqkG>z%k%x~0fAB@hJP#P_BLBCh)0&)mrcBNm8A7k z@!|CY1$OrIw>;^5VVW^Lon^UxaxDmx5-(Eve>}MfGko8#eG}|qL1(qe42av#o_^|y z=>k3@egKJ>0+Rz}!Y}l7tLmAq#0xVOAvMfm|-=kdf&y zg0Kb;bheeIR}}zA41O+!p4=s+P8Y%FUwZV4%7djEcMdd+7%RdHLD7Ny=Xa5lMRaqKLyRtO2 z*l>hyYU;AjAmZ4gjQ-%O;SMmMc+zOEnNVy@!uh$?SXlVQgU6@_TjzcoYx_geoGkrE z(p)EEi0{KR0OywH%u4GN*GLLtwB|;V-<1T%D-<&-=0{@w~q)qJ)2sp>}0@D>mpSe**(3b;ZZ=jgLF6`w@m1KJ)b107q zt};i2CY{2Hnli=jOgtVzCahbm;p?9={r5;9>!fIF4v!Cm6e?I(t2(K3NS?oJK|96T zr+FyKg%#90>6dXZZgAkFx$PH=0Ymcmyy>TziLGDT1X3ATJNhug>tB8Aos{cp1Nd95 z08LbI?}+YdAfREPgd^%rK+Q`$p5o<&CR+H5SP8&=2^2m% zc3v;wKV)p=ca{pGAe!&LMMi%bBgD(dug}MwSEP!u?GllR^-_7bgH$>Al#9i#Ga$by z7UGm^x#e7hm~F&V@@)zdv(kP8HRm%}tu{t5X<9H;_QPz0kiPt+Zx>(xem0UIPSz*| zCz%}j*%xB^{DR-6d-yNY>))vAkVlR>cfWkX0qRXNIvTyNCoiU7g)`N6br0{AGWK&E zjl~ey0EfF5J7sF>ilr`Qq8f4|_<|-#QGBc!{JxhvY-;9;55Cc{+2mw-40JFrY_NG! zqB~U8Z<nl`34%dZ&0ZV?*%<&ZEGzGKQmZJHLxAD$K6}?YlA{uL zl|9u9L^FbtjVC%ddybtIKWs`inivpgn9kw;$b04DOij2bQv6Fk9Y+cFDx#i9Kq7oV zV3mzafU}pbTyRRYaOF0IL}Y&UwoDr7Axx;zHDcVPJeQ&$Yeb>gP^QiGlauuCwK&0I zR@8)06g{3o-`i5<3wXxKFn9KF(=Ab#=C1n);;6;5omA6VThfB$!-ucZ&!@Gn5rl^N z1!XVDu?mS7)m&$f)A{iXg3$5Ojy#@^%l2pSzh%CbTG^Na8L3rNv~iT-!zm;psmhy; zqI_hD1t(Bv{-}sX#R*b|Y$S+>bL{^##ovRr-9tCPwuy?i5RFCL2mA#vZ? z)4#}WV0~3a#LpFH>h?p=lY{ZR+@GHV+oe*`zhA>r;Mw-8!g-22ghB-P&b_dhCmbUw zEjEUwcnQ(|rrJhOY~pYb6l2Dw^m*T;yg`O!vL$gku@esrna30J++#QX~4|^D@bs z{jqN!Y5PbcZ`t$UZ%QHgx!I;3hH|h;Zv%P}uMl93?HV5uGN_xy{$=MC)CKiqI;Fzq zHbC1dgE-h8)g)c9D#{a)Og|qHagYP#adKsNfJAxl%*)sde#5_3>`TR4VDV4%1t$*_ zh+M3Zth9bX9FV+^Z<$-!@u zr(?{Z?TUB%UN@oi3m`I}zw7D?2&^OCoZi}}3RVKD{`B>`W@AVPTjvnujL&Lrz*8Q3 z_I4cAo*R~E`ly2rp(`Juu*%b?hBtY|Fn-veK>{n z%yEbEtfIaRR6i>2xWQ*mc~61pHznfZ*`NUYlY%@h^e8NWJm-ZwI^@2&KKNpn)UZ-5 z_3Y^vzZiU=W(`AA2KUgtX3vbB+l#%EBHNpOvsGX(Y}=8_`Bv*SRJs9E?*KDjB7 z`%iw>EjjOF=?RmL;RSGV;IOikf6J}^9zi|x#b1}=#B&5uP4U)to<9uy^rW<=x(-t} zWuIgJb?7WsfF-#eBv9CS*D4w>`OBI(IbZdU=@u89%%lVE!rD*1KGP+-4&_M15MTebZ0pzTB z_CZiVx+|t>?1gz$hYe0;+e%c366B_)eX>X881Rthq}|u{N*sl_BH|sKm2@^vu?@Py zT}7J0q`|bXiJYE7%H(Lj^XwmLeSp?3d|3;{0`IiIfsKqzMA<9h^e2PeK&_fxt6nESS2{~!MT9*fG|pj#9p>%72hzeM8za(3^h)KN})#Y@V#puR>CIKZxnE)(dqQE_4v%T0z#u*!X+@9_)jVd)abc}?x-pf-qKwL$qHuElRg z@}C#yJVFk<3EUNuex1h+A!mMOagpH5wNO)%)fVo-A5(vi%>)=bsuv>Tp;)h&0PF(fo3V` zW}+xnMu^Uye)+{1z@NVUrv*kQp8{b46={gYo0^*T;?A9$Vue3j^9v#1A3O-2pfTB%3E%0W~7yd;#xT&5=yX}z@3Hpg>5s(^|)z6o(~bonvtv^>mp>{E9m)tX~8=$QH0 z)2WG4U>qDqae$GArt>~e#P!@nq+YuO3Ff5za6Ja+fzkY-@c?==p!LY{!HRg9MquW~ zAn~|fd^Hz+X@29n`9xKEi?zE))^Bdh7e4n+`o+NqtEG*}2F9f}H^BLq^I2=Hw^oA3 z5X1$@xrz7~%Xe1H=i{%(8XeWr`WV0kuuRTw8RxyugU%z1jJduL0j10`aV2OPV|f1l z*?X~?V0XL5g%ZWKT_H}8F)klmjOdlSi3(AI!V%VeJawZaYA^P>``g8|Lro}p1F_c+ zp8d!YN~dTIL}{yYc>IWe|Lo-+9w7&NwGaZRMoHfo019zCmbmE~>3h%HniN!Wcp}Vc zw1I!=rI=^GUrOH|?5B zykw&{6us*RgkwzgfC2+$SKJ%fq(wMxV|fZ~A49w3DmuivZJkhOp+MTz7^L>ZrU-1Q zPxMABo;JK+)){9hxj*sK#7y?+QpX{5w2+Sb1q?;bL@|WE^yOD#Vj@iffaNX8CYG}q zeZ0{-@E%+s$%1@4FA2#j)YppLO*Z<*lH1Cv#zU$TE7gT456J!>KwuYsMK9f_R79}$ z;?4CH|9W0IzTlW;UZLZ}<#bOjdM_#qp>j(}ykts9MM_#?oEHT+-1YXSj)VgKjpX#r zSY2{9jDZr{R+!KAzMqA7A#4L6E@++ExS;2?HWBXvg{c+Y+IeYq7M5A+TgvQSPDR_^ER?|V{$_?uy zvH3S9oqDLrf1Sz)RjyV2*%KivalfR~F>4`eiv_cHN~z!N-s6VW=4T4$6x z15;1k6lGJ!n47A&Ucbh@M`a@gFFclIBA3N6PrRj!{5(6{BX}sUS1r9KQy!nNrt3E) z_6bCX-?T}&(DD(`M89aI42rrONqpKJef)KE8IM;UFs5uwPDrNQt*!NyxKqaXu(-&z zZvfH2R2BnDHK?v&N765J>o9^uC%|yVn z#Y{xP7s>^R_o3#l-&%;fhUh`@?uGV7Tm=8HXXA&dOQj{qEa>ZE{E}dr9UFpJ&;Qfe zhdoaNcTHnP#n0rf^ouh5rMvON3JX}g>_?ohqwauc0$v*)CmG;db4#`KAxoKvxl_OMdqoG*^ z9sfZC(oS#C^?s6X9{+(Kq&~{12r)oPaDAJ9rH!3Vy(A(|=foT1uQ1Mo3Y@sVg1&gH zmgZ9_9BSLg3k35!I@wDJ1@w+sTJcqXfqfNgM$k16m?r)~&8k5F*>Q^INIwCff_w4E8 zs^F;mL6AtvRIc;{64?^`Cf(hOgLUD6*tma1pxY>vq3i-ao25r>11ZG=;u|^qhQ(tx zIzNlNq%yFM#~3@Y_UZ>->!bU+GS}qQvhUUiZ~8qrguA6|*DYEY79ZmJn4vm3aV6e| zcfctsWTsx6G0QZljlx3nLYt=i?%ls1i@4k$27%}0^{*!|##pHarSK*Yh!lVV*VGEg zG{%KBkC5^82-C4wvxq2@)q4znhi)+G3K#a&pDw508qLBFwIWw_X?Ze#>pA(tTKQv6G&E>)DS7e*o2c zmo5g!ET9waR$-C_->`bQssE!a#=X8zLO71y@tW{DKRDiZd7Mj38 zs%#4LjRp28&|rGqk$xOgBfdW@W*J^$PWTc&d;WK4zlvL1SSLuTxD^0{LxOAUuCN&Y z{KdEf`MZZXo0A@-Es2(g7d7r!xfYj9T$zYJTdS+_oKvuP*X8<@wiJU*^Asr zB>WF7atyV#er=(YA(Ybjn4^gL@IL=IB?hOBcQ#g1S`g|}If(a9AwaTD|Fnq49Ri-U@55EeljocWYx$kP^; z^_uwyeKu2pq)IC3%Ar5(w&7-mROL1BYv(Y_TF-G+y$izYoH&QUn)ADRI=n7-(XhG3MuA`}l7?$89y1Dr-f77WRl=&=ilt zol{Gtu24NXp75#j|H|sJK*F5|=OP>Gd5n2t10MTc1&|B(=(tw%4zJWrxpE{6yi(9a z@y%e(w_d*M%V*`oRn7PqAwjXG|Cw*6aBZ2dAlj zYF|H&EjJ1gzYwSVZ+9R6#5XaJ#u-rtGIiyWmmR{>=#XTmM0MeE9ALu~vgnB2Z7O!= zWLBy}WmM$3l3j@^F#4EVgVKNnw%xcjD~J>EtCXwUY|<8{O!hb21rXx|%y%+4p~eq` zF;?2w%nhs~?AgtUha9+p z4!Q&|$5?X?FqZuAe?DKn-;ozL`pVa1!wgGFx|PP-@$`;aIreh~*?>Iw-r4@AZ{_GT5D?yt>t$7xXeEE!^t%&R+&~MA5JL4CiQTOZ1Hq+f&57a zm`ve^#WREp8sH7eSvf)HY4BBw)se1OI(d)?h)~*9a%Ze#d-tu6SFHpKX5MYFkWt*HHYzlo6H={Ne3>rmW849<= z&lp*Sg3AshJ_H|Z75m96@7#^)Pfrvt<029G;5=wZM4_B&92OKzu25lQ5^XruL@u5- z+u8RP8_6mT{TLptZE_}5oXK`o>HrYe!0#Cy-Y-Z{-o;)d0h%gv9-`b4CE%}OC3*M} zs@&f+-f0sijhA3DX@<|@BI%dDxTri^_>0&UsjA;vUo~5j4bJ)L%?a|>AKvJc zqO9vGmqgsu8?j5ICB{Q5%HkX#obnT*q2G*OX%Db!j={9XkD5utU(M|1WzOrOF^-Bq z=v0cunY?@@UcbcATvcqKVfX9dF#>?rwU!2qf0}oAD9F%Vbb)v%yhv~@K5+!n|IkZ? zhkiV0NJZlEEJ}c*jU}tF74w{W_0Lo5X_BRo1zv(}mdxNZwNbMxlx^29p6F}yYe7=v zP055)D@3q*sZ5`{A;0T=sOs)iI4q&~djNpqcPgI1B_egK{=w<-SJL~_CqWR+)bGCg zQv5A&S}ad%buMVfVfDRd@5aNVOq8yV;Nb9m!uQsL*qS(oDw~pF>7Qj%Q+WrW=kHMP z(G{h`_%$2$=FF|k$M*nYYQohyfV|N&;}XHl6DcWU7s2E}!Pd#36sM+)70zMCQ=A3D zbl@HJQ7rTfaPuQxR=i4R<&8rmsMiAjX9iQl(gHOhASXGW7&owigxiRm1-+*yN*Uy> z(k|<@w-cv{5yQ~pz+K4v-P<>Vl5EzWYqJWz;GIDb<+L0nOsDEn4^&Ri(Hz-!4S2I+ zNhlcc*j$e2WB0^3jtX`K1@irr|I}D`J&5!Ce?R-s>$B3DD)BKJTYMIgInp#@&LF|4kZ)@Xpy&>^~XG9?H_7^OYj3=p-WH7&piy7OX3c{|4S%Mo0Sf z7@wWcXEjELkpyw#6EYg17BnZqP$~g}SS#@~Ic`DenJ7BPq>aX?5MG=Kij%QXp*UsYQPUHXWjLL(VIf{FBgQLOfZC8~wY70GCW=F_m=lC8VOX@#8eVII zj6sr^sU)9jJe#DtbC`r_+yYyM<1N03;`U`LsD*IuQMLjOHq-N_PN10F>dg(;%!2<( zc2MS%ml(I`KD|#W(o67UabFBtNk5jd#0vU(5I!jSE~wD+y&qEZjrAna-#`0qj8VO1 z$UyT#+&J%|8nbP341;mbd#$_`g8}wWm2EFb15jw1AXp{>Smh`xWJbPLy%-6WSr`Q5 z3o;Z;6IqE6G{!>kvbM#M@e@BGq$=Ut{Fa=nG8AN`Sp^$V&LI|d>e5X7NwqUmenI(D zV|{uK1gW>Vg|?`8WRQYc)8sEc!Lo2) z!YxKEcXbh!j<4}=1pWE*a52*QsPSv zmePrphGg+Nb!pmnlbEZdr!+HHFu|%yM2wwtr*QK*JsS+JiG)u0px1rqFAfM!lxj?K zs2cU$IY4?VDYxuu2#CJh1wksPkqdb_BvY+zQr)O zJm|Q1>~iZ(ult+KuK&>F*eB@D_R5V3Z%fe$A;DS|@yuxiT5`Xb4+uvE6=DJ^?SORk zsK~P}E@JjP4-4ieUs zAplQZvw`)>sOl?ee!kJIP5g+^{w4-@q&BGzJlt7YEfXCpK(mDZ)p01Qx;6<{K~!i` zj?WcMDE9W$l`Q&G1|rX_jfW8?)t*i%EU3to2OyPPN3T-c>SI|0-lknA+* z9p(ErC=On%yEhiz^kkb74Si)%l?806yh-rLH6^v#H>m<@@$$uU=t4{kMaOyvyGX6- zf<;ij3ldR5Wyl4%j)v^r%u9pu7>KSgkBQtn;(rac$58S1PF^ z*k|or{SRu)Z7c^TDSno7M$f^}HXCqiyjLpxafUqmvFDERd_tz9XVLUT!)q4L-bUyZ zUA9dcZkAzy!>*L(a;#=&7vH~q%9JEdkHsi;m}|^P%k7xl!pPTCM95~Z^p8c$QLQJB z^5&=(Rqm~}lATJaG{s|Zdd9}UwaIxLdsG%0l~-bhC~!84A|B{#n-U#yEgt$#x;ok< z-`anGSEPs_vhE7q zhN;(?5OvwwvqarS9Y8`zng?SOa*sdPT~TU!Vk#Kt2xB7N)jdRGDf~%r!&qLNKiB5W zH2i`m!-^yQRUf2H3CARkc3I*esJfJa)hsvWMDM0JRo2>yVJ|J*iSeo9WQ7oR4~fUX zs2&wU9JR`X+;dSC6^4+X#$+D_Ft9&jbZ?Z%3wG1}eyM~`wGvY5QnkaQB%ZXL=2mr% z)~O^-KK$($yijtdpO-IV+U0TwXG};B%o*ARo)lnm(*#|F4mw@<<9N?jw^vF%BIX+d zMz4ygfMl?O*7#~F=kIsGh$wClBO}0r&F7(S_3^#P%jMzGbd^iu`RGTNv$Gn#1L$Pn z?MSXi-JOI1j}t~MKO^kWwND6nM0u2MrU@*~rAVdb5$vdE z=yWP48;SyZ*ibACLSPD3NJ$y^`?Fv8P+9C9;@d3J7f$)7pg&Rb0+-tE@??YE37W{7 zEiJC`LGx;?xga;GQ{OxLMiA^Z)Z}lR{lcWix*Hypi5_!cL9lrH-14n<{JleJrUy-9 zB?2C4)`I!$=hjh%0p96&3-s8QW#*e@B5(Ig=Y=l}yPMsTL`a_ENk7werlwQP=eRDy zc)m;MIL0^IntLN=t;q9m%y$n_)VaY-b^WxQa1?8(^%IWBj*rpH&4AgK1SMOP);fO9 z`a;RDlj~Q!9HQ-7KSP^Hr{Ta}8Hz9!wfW+eQUQ`Zq)ci(C-y&5Pxai&u9Gfa;#v{u zN@B26J;tTETPFS?VO#cNTnS(2w*RZYihI674WSem9H)dEwhNnVQg4E1#PFI_?`C+W z+E!nUwt^5dm$JAff#Dd8jM#Tvdx{>-2j|aN(A^+0WWFshC8p}`b zT2#UKL=1OnWg${ws&*2=DgM7jmn_95i9O!b7`!PM zWl>}iA{f4xZ6}AlA^@cuD4RO93S11Snkbd6upU}k>cwaxBjWkgL2BabX$}G_%gt8} zTqdM|Y!Z~BAT&y9{CU#Pa;X8Z#cu4@n7|_hbOpIC@F@U1@WxFzX`_@W)-|MCZf|5dH0cWu( zM4Voo$W)8XD=S>DqJ3x6wkZ7_3tV__x!CPOtd z$yWQNJfaFr93}W=cv1i#(joA6WIUkBAq;9f)Q&KOl4d{O3oaZ1jXn5v%7@GLim{x+ zYr*wMxAiQcLeUhfd$Lxt=gVC2-CtWtWtF76g`!cRnL{J*k8`nLkTBa+rCYNk9Hrdx zl_q~LemR}Vr5#7{Ep#U*M6QwRX!3KmZ}#Z7nM&c8?!Nrxm;NMXhy~Rz z@X8yQV9v2y+E`eR9W_5JYf9Tuq#lk0v%VIFb(b5=Oh=XkmW(SJF6|b%_|B8h`^q`k zT!HUcJuwMtTW%vt<0rea2@Sg<&B>=2_A;7L79N>9OxT%^=pDgMvl zF^UuW*_|U*l0vzT!Jr#^=~jrDQ4=O>K3ur)a?JeF^i@AarFU_W)+mb##CL|gE^e5b zESPbQS$tQ*I-6&yNr;_eK3XDZ%2g>s*dMXJ05H-Tl859V;lavUscH)q3Or04T#6kz zU5Y(-l5%;MM9LzTmS}eDB~wa0qxAspzUUt@nkhF`a+VYU5UvY-C*}v3Kx>nngF@v8 zkV7QsuAlzaU7z|0hX!4Nu9A_Oa7TG=Y*w+R%@*yCK@kaZz}IqF*E%_K)Z5BU0^`CL&i=1;qk-RLl25<0oz+=JCzEx_X^R z7zRN|a(~kVDFKLodJwq`|3-!IYqhN3{yXTj)7y!m%Ej!Vp7&mCSRtk-n3J;m>r!D< zeh^f>Ie%+8C7HrOrtrC4Y%zo2N<~+NGO{?7b}eAr6uLkJinEXiE`iY)GQihzIua=< z0#A5QX}TW#dBw<6PZEKF+-Y#Zh2M_h1v>-wXjK~2>YhA~KbN})W?`XQ3^4kWIaqQb}jX{ zJbLhu(%bC2#KSTu9t84Gn~iR#q@pb6SfR=gbC4W(ZajHI5k^S_D`wb2slb2?l))EU z=I&QZ9k2&j^iD9q6rt{+nQH!KT2FXOZ-=sWtK~~3Co-OL-nO8bLXi0glPz96b>(XO z`SQ!3Gk+d;ALSW6*IM_si(r`TFJV`&#@eF;a3GWX6EBlx5`Ux8=9x}jYkSd;?KNeP zbhgGA=GCkqadQBan>oaLByUwMChgwVMuDKOuLSqv?0kSal%kN+6GjhSmMPh(1>{D( zfeSK6W<43KF(}+iC^>OE1>R{rSU$=M$jqaeiS%+1LTJsGyNH?>mm4;KhpQ5u20S6B zXi-ys<`4xa5g72SE>6ejY=E*!7q(>gpvhxInidgcGuv&`WgA*XT; zS%o6uHhN`~FOV{ZG6q{$y_j5nN)DceaSF)bBmVumaU_B4Hf7gA@)p^kdRe`lG zwo`r`Hit+t0JN0otGwLA*&Y^%0T{t9-hMDhd1jGV9n8Pc&t^7DQo6QNcLxbHimL?| zO@At^A#*5rct_)an+^2TYw{#Oew>koScl|%}ig4$=~TT z`33|6V?g-s)7wj&_Iq&?vend`BT65#Ni_>(uCl>AHo>Ei#r|S+=oJfR69LA_0;5C&ZmF1*F(k_iw`0waOQo&E3TK6>0<4;@o`f_~PBl1g8wy0~2b5$> z*>~Zs0wM;55-(XJh8ln4oGfri1^Lh*3~0K(5(mlBjreOLkx6U9EEmvO6SqCUky4b( z6%}&!}sxaS`g%!_4vTShxE?$M!(bD6fiTU1d}JE^K*asLOcYIk29LorY{{=bIXO#78q(! zeB^RBh@{iJ*i(O~N`p;lkQ6FAJlUCL0(=#(9&(uVi*W*S%xy{(2?(HBkNS&XnouV~ z7EWEc9Dfp{F}tZp&rXb}k4IDr0@*031ZH4`&(nTDcqh01vSxgiJG1tjAu;7l%r-j)w?sR9j;c z3hV0kAm8(EfBd)c_GZ9wqq;v?-B;Wc?f8#Ulm2akIlsZ@>@h+^rC5)7g>m%eN)zt= z+OyxpqD)VhJiOKZ;9+-{q|W$tYG%gNm;?KOY&0#K6M768h&9k19>whNlv*DJA68Oc zC{p*MUDE7 z=B*+K)vk_23aV_!8hu)Vf=lG{j{9l;kS81QLv~im zo{etjAl^7LamCY@C%p=N&YJoH!cc_)I5d97zaPbLRuo-{x5Ib;XlB~%Z2@^MkHUZv zz&-{sF2tM~in|Nmp1wQ1&h@~Ut6Yzq$ca$#YV9;C>E3Zzy z<6(wuX!lU>9UeZ!GiB(vvca$r;#HK5W2`Y1>@5$G&?;e$atOd7)Uit8diL~%x8lZQ zJCb4IgY+3FaMR!MOb#djC3WY&{clFXqKQHML0ot#k^aDMkA)ywi5?kx!cGDFtEDs< zIl-x9v(|%nIni+P=2|&_*t-vU4c#>ETfs}5AlxM^KkuATxy%>|8-1(~&*C7X?^Bmc z)-H+KzcjbV$z7sK(*z*=z-8Po7949x(XB4liSGtxzJtaQcPO*psqKLMPwo2J(*=HQn9MdoebYB=S@i3iTDFSH!| zYOP*dh{xUAI(YQ|=Q#d1Wd5Kc@aq)oXHQ>#$;ZM-tf{q*lW;h-&{wNsgPDfj6HlWR zUO~5Fs~dI*ap3s02_HQwfwpNkak|}cNvwVpxDkS z^wqKXFwg6Qq3gjbDUE&q@6TED_f#^f#2IZNZVS$C4SaSTs;q@KT=x+XCDe%G?z(6@ zUR;KR4MtK)?a;fF;7c{NSWBF#4LjSW4$wObt() z-;bxvS`)~GA3pnKJYBX$1z9c}r?St)`c$z#x@@d%!>!X*(-FIli8RG(Lq;>I)S^`2{ z>2LkhKP{fFi&rL1atx^9K?w>0q{RVHV^G~kDrYeAEykMC9B{vYG8TYGkI3-A871ERWyt@7J>F<{bWHx9Tj9?GGTWLg<|E{tO z(>Q!`;etm<>_fg10>EW8h<=--iwq}M8<@qKM+yw1mxk}G%c5YFla!ElH65TXT{4l# zl|$={;o}uvnGfR7)cIQ$Q0NQ*GxKmX4evTvn|3x%j-T{k(RV!_EVJ-i=_@tphx zQh0ysf4vyAl^LWFwgBf0c7CVO`p~z`cRBZiAShVycBpq0MEYI-#cJNSx%>G)ivch8 zaXZB01pN1MG>D=SqgiUL#$An9E;)zlo6W8zx*r!TFSObV-sYU*lGrs|awy54P82e! z9Xz`@bvd(J3Dfo-RZeSfV1juYVV;NtF~{D$47;hcL`+PODkt<2J8C z$SaK)1YA>JH<14(mYyTH0HjGlMSY8#+f6IyiZ5(@P`LDZmQd8^P_-<=hecaDK01o| z+~|}b!wWCIVv8V%X(Qgq`Xc+tsc^0H~H zxYlZAJOWIk-LM^JV7u(s6|^cU`3D!!FXklOvoF)~@ip^|KnqCNF=cMG>(udcms=2G zxtsbH+jAqnCJx{^@6b-$M+f3zX*KqZ&OlCrxE9jjAhZvSMgP@GTr1s(^3>jl6fUN5 zSQ8X2qkypmv9 z-WVas3+1WBV`iztR6-qSS=w7g*IU_|SGDdEEktIi18{kWa{gWkMo|4mE1Io+PMA`; zdbuP0OTmB`J0Z6ZVsPrhGBETL(rj*~9Db@2pcI+P^SHIY4Unmn3lp-bnqD`lEzMYh zDaj*&8e>J`NtW`qQ1%a&V=N468OccQ*ET%N8tG)0I=A{9bD6FvB;lSJV5^U->b!tb z5S`Xq25BqRuLgt8AwG~d20~q00-L+Z#K@qmwD;swvF_LA3U*jsVxkK zn|W-&Ygpm2awKcrA07p@6r(t|@ZvFe$uiFw>tuU-V382X>Sf%REGp7dq|h5qY@uXF zNP$}iCKp&ZbT~my9B1FEt$T0xGI!@rQ>)cPU0U5RBhAuKD_0(O)96I1zV$^)|1QMb zo2>dg3Q9R{k|&T1K>K0 z_6kPP!RFw!_<|vJBf*PSMwI%2TukvID#lp$caC>m3s)wCAAK7@&tXAZb7JG;te$+_ z6$G4$n;Aw(u9|UqN8#PX<{rVtD=Ez~O2T8*r`$RM9);=$4M|ka&X;EeF`Wp2b~9lgiH zn4B!j)mB(`H*GW}h^Ov<_VkOJHC?vd2w?N@bp)jhve;Mqy|1KesI~ndzNWoX5pZ&e zS)^@t$U#=b?`y&IzgzG9UApkU(RVRya_7G9RRIy(D<@ljYioE^06$HYFr{Y1hHJ_| zebcPEGhkWY^Tct*c21X(BC#$A{$8X4)BWcwl^+`{R^xby_u+i# zpiZS2o0?8$=A0;r>?$pqUOSjg#gi)Pnt7pZF)xb_8LVK}5TYKE`mC4KEMd~mF*hMH zg(p+dsuGTQh|4PX;*%$z2}&T-9g(FLn{`^X=`}DGg8r&3Nt1-XHAWZ2!HGc6Wu({e zfTiV&y;R%{q*D2A9eJvRDleru9^*(`vFPH6b$lzvbD*trN7>w~R5I`b06?i|NbbfX zO2(yDfX}Mr(W$m`gY^xW@%jq|#H==5$E8Hac~Y?E6AY||u|ZQ9ty}6dF^}v7VX=iW zCfLik3-1fJ=<^t>*g9};m9@>aX#EgpA!wCZ!w1=wG;-->z#@*H)`f2Bwqvl8ehDxVjFBMPD=)ts?>Oq5 zj&E@#)G)pxGTuSp{vxgiElA4PUhFYGWIc&d;|Zgx$Vn_pVB?~1wpNa}Y5g8gB6L*> zXR)Dnb}-gcK3<+EK$-b-b)bSCOEKRevbk2O*12qhQ=#4mwYTjYA+TBvcD=J-V+5+y z%&tt&ZKF<%b8dTuWHZlIZLr9zq0?<@D2uqi?ck6pb8r}X`bNSmRE#U_A{-dYUTjhU z8%4$Ekid4M9y?~P#-OZIB{jL$>OZewp8m*NA7ut%v?AsA+TE**Dwn_Hz0 z2J6VJ6Rb_sP!o+KL}_2S^{4nR-hrTS!Dk2$)YUgie~@tytcps>`k4@z4^SUpoo9&~ z6`!DQ;0eWdv|2+JUAhjaCr0Kx$?OG8@^}dvBY0|)0d$&8R)azE4A1i zzs*&^wfmZHr4)cm?>+zSv!8l+Q2D%?RjlsIpM5#jkD8x@;u~n1T1$~OI}fR1b##=f z_wye<`_;#P8!zTC{0>u#kEmzbPr+xekNxL$AHgNC-(nQk5jAtHlb>bBrd^@4U&t1Q-!@GaJc8CiBN5RY`f4?$)dx!(0~rdyp;fXV@l+BacCio+&-xyEe(5JjtrnDcB&*+# za8CsQ_v3ebl<-#k+6|7vtcpWrH(PUqaB$>JfOgZD6CZC{KZ~$(c6Vr{p`!0aHRzzf z7$J!B=YG*6?@%u(&*0s|Verlyvmtzyk`sS_g$g_Ix4Nc#hReLu)KvSAUTI2+@MtP| z`U+hBsMKDQCfoqQD@K=tINx?>uUAZm=4xR-y!Jc8^apUDl$`p#+Mu%Osx=gSVj8&p z=i@1AZCnUP9e*Te{3x8&oNHi#1t?bt_2J$xRVMpb2e?-X#?q zkswDu3VKb>B(`3rTI}IFS^p6M&ao1YBVKUJ1E98}+GduMa*MO`r$KW1#epq~Ti<@? z)w{kGWr5gBmB3sYHxl_`h(TifZmo@Y3JoLHy%uU$Tbtym?*I>tS#0jbUw`tNpEh|h zJ7(c&-#y4=cqS(YL^=tczxVOq`p{5*mKrA)_FY+xr{b}$K7@$UOD8gi!{8_q=YRL{-^4QFqQpm`F74gpuclx4Uk?=hFTFDeN4SlX!<%Di-AmYkq%oHh za({w-q)%ie?zQV{@jY2!4!Ke>AdL*FUKNPI?PKP=qqiPc-HGwN!M*Cv`GSXMkoZ=m-50L}H(^gXkK}g~a4hghA>m7<} zfW5_Yn;jY?Ty3`@lnqOC04_8u9VpR)^E0MIg$EG4@%)2FCPnK&9ElN zwD9Uy$Kb4@ng8ybsm83F(FflOU{&adpaNUscnmLngrf^jk#9ibCL_J<3#be3-r zt1&G~Rk;q})Xdcw+JKl8>SkPqc6W6u76nv5Yqn(;+Uk>X#-h0Z>07V98k1OFT74sK zlnHv4K@n9}6b1btf#pUuKg~x{>9B%Fh}sfkXe@W|tSHj)csK_mGd_U^A_5YhNPI1) z9#P-b_ysUAC|?hP;*A2nV0Xk6NI=UqTKfbbQwkVGw#xZBY#g3JxNTAt3InizB@lz5 zA0(qMk$p2LA{ik)ira-yUG1hNM@8@I5J3I00n9&Pg3f>n@MGRb4cVwIA<)H1Z%aU1j0+!n%)LcDj16seGAb8%PDP!^uQlC*pKZtv}Mb(${vmF0%&1r(d=)i7bqsF5JhC^~qBQd-nvyAt zsPw&5za^B{G5FJ?Lxh@_8?&VFusHpPOa4R?;xwxY5@${m?384PVi ziW2!tH2=JdjhmYPWvM)|j0eBiJCl=Fa~%J0_TJe~f*hTq7_rf3JvXE~N!=bVGd=7- zB(8uXI+z>v`R<=tn0EwsYmUr#*$O+%$&!Mk+F0*x*Fso85XssoPo{&rxtR{iZhFaM zi2lSYh~2&S>^m_ca6{Ja5qPO4!UDx#NAprlZq%@HT+?h-I}ML!Sy24G%*h)=ZD|o& zuKBpvOSi?SX--Z7E8rhe*l-RtLN(5!`P=d4L!4&vMJVMf_r~>Gu|Q`}mok9(=?fQ1 zP1RX?$EsO$2-nM#rI4m%q=MxIxriV}oo0$9)&7Jz$NeOI22F7+piKrDvUGsggW^L@ zdSQrPjN!5a%CemRNzAH5IL*vmBpQ~Jd6;fjAQHXOYz~*YaP3Hu7#hk1TQG(;>LZ2H z>L>5+pOV($3-BEibjSbn1hL0?5M7f-5IaG`huUet^N$vD~IqYR`UkX zDnCn#0U4t_%in2PMprFnj5A1+(CGpp^alaV*emf74Hz%^r>(9tQ`WtJI{Z z4)IFti#-q`#XS{ZWCoY~%2DnJf^e$EJupl3m-2*W037hur%J7@_xSg1&+2|=K2J=~ zcy-nrIBm>mqm!qH%EYi1uxAXp@e^4ZBRW7;Y$uNj^YiTI9??Q)td;KST&v|Dk5L5k zAY!0D5?K9@&J7dlX1vG0cYojijY+YAb1R|=Cte;8QEI+OZ6GfmwaQE2GtAh=e&bc} z?>Yt0wF<^%6;LmJx*RP!ubOw2#lZc+NstahMJlGu1W2qJ%AUAX+?R^Wc_a15YMhE) z2jEq`Ry*`WaPUbsN8y#V^#zlc!noU++O?i#+Gi%FJ<~aM4$i$AF2b8n1Sz*BXE_lh zvz46B^32!Ttjt`HhvFEgauz-@zvp;wc(P5bC)kMIBmKm{pdCoS2MwC~OC>72*Dobc z&NB>54{T`=ba8K6Mp9ZsdaxCgOA}LZ8>hfO6VfjiQmP_L*`lYVU+Sd`7YfRM6nR5X zrQ}<0v>HnQX!VJH5v*7D(8e6OYQQZ585!f2QsTh$!XfBI|Ae8D85Fp-ESQ*qavlzM z{rFmq(Lo`6D0j__7*=pBd>>y`;jca;tl%Z{Yp6QbT_jVc=p0(ApeB+3qGbl;}I< zGnD8|3)7*vqxXB8Ib&(!6}zb9*Aku^m1dL-j2CaLw@l(D9@i@N-kh15R{7%HBRE%fP`aq)P36pv>R>ZAA&!0strqB-cw<1$81k! zSWA76I}}N(W$u8H5$!66_3Utad#^aA;Bwr-JTvAvb?KtH1ge13ch6|WK8_2#cT|5N zCOCpB=l%dSh>qb!mC{@@=+p(+>IrZ+ADT2q$6z+E9Gw*R%RUVLuEz+j{vmFsZTp_U=Z9zSdZTa@s4`-_QqzT8AgwvLd5GEQzA7V9u zDuaoee)7Q;3x`-t{X;oNVvs`qIoJQ{)eI5hRp{>)8?${%sZm!$Q*^%CFZe(S6PDQ4 zJPc^HY)_p-Of)J!V)-8wH^F+JJTbsAkAolpGx}Le6*gwfLA*`5GvKV`c6t$Z_xne6 zUU;8rVQ1E8V+kZc7W_x>+swq(pr=T|+WyivZBSLYP>MX~EHB((xIEVD^UJ<%r$=>) z`f>lD>>?G}tq!Lp;d`h|XX>a0b%*XrA(;sTzu{N)wi8{df(AG?I7c+cUeq*(qV{ z<=G%3n&{V`ean;Nr05-_?3UGH4RG@6WL!ve`(5BEKZ?h64o-=+1m!_^3KtPSDg)p! zYpu06(&B4(QY&9Wbc-8eK~+skWtmi%%+G0q`HXokouqbiX`;ZhRof9{QmyX~h%Cpp zIqXSd#zYvZ07At=q!(9zJA>qF#8=n5rMKgW$}`kjCbM*9mbGsjcKuj)_t&M}=p|dd6OsizEmv7aEpu$g#x~;0XJJJi-Y^ z4^Jqn;>~@m%BfYMr2-pfg)czzsgGPN%uAns%_AhllSUT0(t4@TKw)UbFvpU%4mQ!w zDB4q$8=x4c_8kb6o(XNGmQTEV{pM;&0wH=VDpY7!ph~cbUW=WkB`$_x@=33$)*4Tn zTe}t16I7eA1qa}tv_^xTV819jPvLrK({qK_RUCD|Z<=NFgH-@|J1Thmo{Ac;#)~21 z&Gk>7Is~DhV~m=L=svkA9MYk@af(wVt=8T_qrn8tz){Yb2SK+(GA}0TPx`i-ks$Tt9$PMFL6X zVaf5%MMf5a%Fgf=iDX%SjX21%XL+ru*%;GUQ3`yGQ^Kay$&Lw3O;5&^bKu)RPRB2| z+SlXH#icKLPMr8yE#>UJIcpgK5K0X|M*BKQw+uIVxtDucnYsahgENpzDYJwwJh>1f zV(&^@G=W;j#A}$1ziF%;f}rs({7m;fRuxU5&z+yQK!7`E_~y-9QTOF{KvMLTpQM;3 zvo@8tdmvE~R!fYyo%ZC!bgCendo>D(r129Ls6gRBwSqE!-6))pt{t54MU--Cl&WL+ z`ieezR#phg1~asnYxjqGOl{5#J+@?glEQ2~g!ZxIE6 zjS(M-eA7Fvpn;EvB$|v>zaacJ*2fv3Q!m~33x|R-T880^F63GM1N^$(~;f z;CGnEJ@uu;zj>PAJjXxmS%4ssjB|}YVl-;l1egV;?UkZiFtO@}!DoKewmK&V+o8h} z7~4jNu`IbbF*$8w_s@|ZxlNYl7TVqet1h`m`K)*>adAv2j-ujbyMd}(h_eP7eAtEaJ>* z^+bTXfnRfj8cKZO8uIDID5z|PAKB*&>R_58TGs| z%oBUH{~+drh!I!B$Hwt#P5$Pch4m2XZ(s2Gw1=fLtKcb=xzC*|e~Vm6o()!ydN_=G z6B5*$3h04qGQhLu$vJ|^6cY>Xrb2vb@}$P3K?It;zga}+_;9F5%2a& zLN;;fy{1_i{OlP{U7T1ByY0VPY0$H$F_W{WPhRv8+e506desKqdl^1!6BCO_0#?VU ziO%0W`ze3_Iv#)zjjrNafNDT?x;6nUbPQ8yoU}xe$+7 zJEgEki5+or4cXOBMd<81CwGT6F2l#KEE^-Jqu!q>4w(rDX}kqn7U!Plc`R@_W-1Yw zkeITI!#?)7Z+7nEZc2vDXQ=My`%t75#9)rcfh*#AlcCr2-DJWW(ALw&+8aK`2B6|ERRZ5Q`0BPnZ%py*kWYZS( zDbeb}WIC8qCu4YBX_@lOE>?7xLpKa$-6BNX&5sXb;`Vj)gc5UtE z`i7-mi=dNAOvNmkln9@EO%Vh)420dp{TKHGPlVCS)(4L`N_VxGZleJY zaxK7f{03ad4`UYCb))9#URJ6nlwAli?POQg$BKq6(91CK?`yNcJk0#C@G_iCbPxT@ z&t5`X@X$a~Az?feY@_jlOZdBEs_>$cNaAKTY?HMWk48#xO2ow&a-t-BMzZn2sw9iWzBrmR2)Dk1@p)s8!tb5diU)w#SgtB#}SD#&Xr+I=kpN%oe zi5_>2u$tZUT0Qx*xatD`dBQRv2>TVl;jQ~kxieQk!(2cGDPrLHe>(fHcuMA)W+?WD zN@3Ai7<9we(0>@?QeABrtiU{5Bg9>EdImj)`-#tMdghuPfv;|+48MHoQrtlu5>v1( zo6Kz|sb7k>Am+bLSRjU`*zr4Foa;~{erk%q`MqrghvN|s0qMpfelFX8)0rbKID7h+ zFT@3p)7s! z<6u=XLQ?KtCNTA@`w|eQR0gxkdiPXq^HQEKr_UJ)(JjTdi9DP`$b%kpx7uw>cUBzJ zQGv6V3_D20ZF5LF$DykW=5&l}NtLS<6!aof3|rum)?m4iQg$1j)T}=Y>q1R$2_I<1q0zge3(WfhJRdxKs85z zsE_Z-q$uZ9tG#A#my$F(bUkHT*^ZUqvbuB9c|@%n+|BVg^pB;Os~}HlEnf(P(YjPcm&_xkYICDF#Lez^N z3o>eoT~l#`ZXU&0Zy&}oHCS&t(KFW|gACB>zif8om?LSszm>cyL}w;Z&o9g^2Y0jq zWMx=tdOA18%gujr72O)#@l~Nn>D@(gi0G=G7uVkaUYhTIi~37Hh_z^~v||R1#ZYZ( z!7!7(Nwm)c1}|U5wvc4lfW+cKD@_A#6LP-Ey(-a+g$xk|K~=$s647#4#eO>M_X|4l zHa|Zm(>Niyn7+A_=SZ@#*u=B^J}Or3gC|d(l=Bw=!6~OsQiGI;PVqd_P_>ssAbl>& zc|m35MUd@FUw$!Yt3W{E(Zhc4IOQ}tb3G)6`(vQq>4{<^Gk^9@fzoZ)0`nW{5^9Pz z{?XH7yDc@odnA1A4vKkB5NgayhBiOVsRNYVO@Vo3qEzbNkzcH+iL$0>Y93Q2kE0d? zTf?}w^96FhlO>&-BQ0)|uW&jj__&){1e7a5_#_PxV~5x$!3X{QlgGhXI*)ClU~;wR z;wNVID<>w1mK=9}^W@}AJP>_u)?)Dae|+}qlG=yCy@S8mIk<0qHr{|*Mge6l&jGdXnGh zWp9f1SJK$)mCh45U*fiVo8DG82Xgmh0E1%?9CPpxrL1}-3y`A-(CJBWpK%z6+sA}m z8vrcATNo3CX9w6grBoNXo9!s^Si12mti|h~PGy5yf?RU@Vo3qG4X7msjZ=RwMP?WG zTKv$Vr$EYf_-85h<+bkZxQ3hi=~|j_Zz2k){^0BblWn<6G_0ySZ{p8%`^bicf{RFYe0>sf{O)McCtGqDNU3M`_fYJ)Jhr zQ9^hJ>)OgCDV)lP#DU-;6YJYULkeOnD5aE=Am-4ct_zcX|Jk>F=WYDGm^?u$xsu7l z^6w4i*9Mm$9K*Pn$_y&j6CcUh+wK^~d1vG-i!n839-dDxCwcoR`KY|G*Dpp1m@+hp z!FKT?5K(E`I_LllS^BBhV~UErmed0_ESiu)H04MJ-Vrz6dBdZozQ>^4N)jg}TF~0; z)skhFdElmO~m3uR2|1?-&2AI@dr@2@v*(^tS4$_*WUqU;GWgQl%kNSpk*m_O;l-#GhKOl_Yn zm0W&6QeTB6;NuW-8Xk_D6{>e4*mn?*J+HDO|zxwipg6TOTt>(j^J2BW&=jm44Y@om$ zey+lx!oQ!0xqS0(^?$0?=vEA7eb4OC1XyvepW~A3bpXrCp|YcHMnMG(y;YwLly+9# z=jz|u#Km07ea|hdZFr258+(8sdRu-*_wey)vwzJX*qu6IiuZ(;qkR`_Lj6O)6lOy` z^m?pN_^#|1MZT%_?cZO*a{sa4kCopg{=QXoEn=8IJdb1k+Ji^iSIhx6H-VSu<~?kl zL~N%E^O3k3(l6gIEy;K8?)c{QrlFsI z7DdW2ywN3T!oR8nTCoLMTWSc{+lra3)y`CtGndOskf=`B6+_>|-vpGpiJszbWT18M zg!+ZYGThDHR0KzQHzsauhV#+{O$3+H?KK4p-6I{H^U6+(wL7AFQ;fM?!$Rk|4TYA& zO}1zHi>EG5c~0k^#bR%&z;(*V=kGrI041FNefC|i#YXQWHuMXh_RQx%+u&0!Hf9_3 zDz3bp2XUKoA$HMXlTK4viNeAv)D^cK6{B5PUiNYo__L(c$Fo$9ymREasS~ImZw`qQ z_NuEp=lu(2jT61ZsGGV3ocQjytc-$^7epXdo|agG^JuW1@4gx3N*mM<&MMP&HyP&WTU*NDTi!5U%O)bJa zI6_m75UW9emcS9M-mn=Ixek%=1{Gkum_!!waj>(?90}BS9(edv8rOQcwArzwa!~Zq zK^np!7Q2J=`86o@F_oSq=a}W%fRL_p_{n$qf6l%gqdG@(sKV;IF>1B23PyO))hQlV zV;#`#PuYHB+(&woWxr|-)_p71QQHC+1^wKof-mSGKQ|xa9FCfhhn%^yX!gUz1z}Xh zOdAhd+(@iQ12Pro(Ti~lJ;)Pw{$_eMG1aI-8`C40fDc=c+nZ4*H-Q%H=7$7eSw^w+h%8H2y?=_u;ob4!M{%yY6aEDDAM z${H*)8lu+>_a$rQ>V}-92*orFu)W9P_Mml|uJpM#a001jN>FhW6pzJu=mtcMC7xS^ zl3PJM%#Dq5$g8&@5c*Hp7E>PZ!U*VG8qwf}x=$vRvZZoZz~9i5vy*ZoFXD~D37f`; zAWBczd4*3O02~@45w&%YPQf@FMx-C+xgmXArevBP9Te2y)u#Sv?3I_ZwZpxF%xlU^ zDY>ae()hB8)Op@`U>a|+7nB6;p85_5@9mT190(3kklL{jbIS`cB2^#FQ)nPVF@C{_ zyL>r_L^Xmu7j#bE^GMXPu}|u4T#8pXX){7;6o0|}(mLW6o+#nn6vztGsHbkz$6CTc;+ zh9C*Ye7}T*2r?*9&f^gNjMHKi74_7})mE_r4@}wp#l44hL?`s3#N4Bm60RnXi$r@m z3XWMPD$-rTolS9DW^aC2!#hj}mgwbNflX)_MUO z0VRD*(Q9wh)-Jd?!tQtgO}HJrjGi)^!b0MJh5wQUTj#i`=)tQ)Ge7hC{c_+7yqyMB0+TV_9>#scP6hc4 z{XL$46UD3CrKlt5oAEWJ=sa3=y2Jc-yB_)z56QOCSodMAZdd*|AETUS+e zxlUug3u#4bk}@sYqeoDzL;?{1U3BT>@SXRCu8>V;YVW8>W!ZA!_+rSngQrc&urcnEtS+Ta+L12@M^c+5jaVnAEot*Y}H_emMB-@<$SX zrJZ8AB4;v@863;dKaGAs=Owq)A7x04?c`Nypp@qGe$~=b{o<$JAN+9ev467Spn0?0 z$KV=z=6uCbIb#I3#sZy=O8*(^{0~jd*RHUVPbC*Zi)YUK#4>~=sPN>k-|X(vMAF%? zGV0tT&A|L}BK)wQi;x?Jn4>z4uLHY={3@p7Pse?fCXRQueQ;j{r`8H1v#5IZ3*o({ z?su+Qf7E;UP*F1jdfGj{H!78=c2HOT-i^o4yeJafCYvS6c??jBl&3AyFj$u^H(8UU z9b0^BKejYh0$nm)HV$Foy;N!h#i>+@5Y-3^ot%*(;#5ZV%q9>EUS}h_zy7Mei>5r)szVcCB1h#nSVbJ8r*8=*6U&C*svg7xBj3|Tr z$sHEmojX_W*G*Xkb_?*RLf!OoO<-^X}uXr?MslJ%k(sH`SKRM!?}v z!wDiq!GfA25I$hLR$n|cX)8uCQXHZ=0tRo?sy>R1?5O~Urm)PZd8K@1uuM~Z;fmFk zQ~3doBD^XANnFUFtb5O=ek8>J9FcgXkQ=IMR>Z9};XgF&NjTk=X+CW7rqjjClSsS(~Bo4olq z4x4!L{|v0~-{&sT;rKWZ=PPir_CCQTG-r6)C&MXj`b(M|;+1x9PKWKoRLQHogOl5; zXlNnp1ljc7rM0`-qrmpe?$;izvVqKZl#U~G2zLYch=_ktAV3+!oQ7CY0h=*5WVpc% zu$zs&67R@y>K&5_KEAn?wQBSvSYY6?WO+^wW8kI{5Sr6(?rYBL=BKl>!=m$|0+EZ5 zrX*&i0swQ=`oQ$|Az0Wp(TmJye+XI`kxVB$NlhZ8)jc5`>{k*u{`~SC--wh$8WZWj zR#3%Nw_>$*9^}aFVLw5Mt?f1(o*pF6P=4p{$W$;SO;)limIdOHp7d^&LXlt{vu3@>iX=MO@edjIlmza0Ns z%YU&X@qTFh)#XYmyu%MDt*#NyZ) ztYw8Ej!qA*3Y?~qoLuA$X-U3_COxR$C!luNPEK_|uy&7-N?4fh3_ic{yiXSF5%cj^ zSV~o@xJo9+t;htOn7pa8hXj|a%3-;*07hRij=_*#vpu64e&me{x0gAc1(rP z9kAnDmY3+GGWZ>pp0(b?(m+Y%S_|c*Y|SZqpIr<(Ot+F2en72Sy$l&;E0)-tdcDT` zzKOA6L-*lpEPVe3F*N3XZ!~KWFUGe9t}fg?*B<|+c%OK&B`W4@l4g4+qMi6U2z1R0--4=2{JM=MI}*1$AW4nylXY+r1qbpaw%qH8XbEIb}%EomqGzW+c+ z`A&otIZ(SHX6=lZAH>`nK8PJ_Yo4fF4oP;EWDeGY8BeTrID-+Lze0q*%fIjG!mC5X z_)=dI*Rd{hcHB619zVVpT+P4`&VT(cACP|-ociDJi}UGKyuxWv675n=sdEmhvr(|X zs~gRh3;pWSn*aHwdtdTDtNmcN!ILjV(-2#3m^>_D<`{Dn3@nz>(ayUv;$&usZsMFK=V!j?r3^+^SJ;P!gs{$)q*x&`u zQYd;SXa6d!{y#u#zZx+#Ii}YU^fu(z$aq$lnofvicuKu=uHAUu8RTL_M<1U~SXvY_ z^glv&l5k*knAUN@t4e&dk3a?;Mr0LAuKDBHOtIC_k2~Q)XkwB0MI2@6kbLI)wLkSq zqA30%Y)9hl75`!IWcFq%_d-{)J&6cb8N)$k(!q0`m}^FSaMGbpKstOk*_l@3BRuFu z>2l&xjgdNuKHA5RpLxN5dHVL{XP3Y5Pgrf}A)*#WYZn5PW6!VYAE9Q3Sb`+UjW}_0 zMw{6nT;&keflU@pFS#d;=B6r&CRn&SF3pxfKOY(tA`VIbw#;Fa=PXWTj3Pv;Z&~;f z&P^G1tIJFN5;MXi#9xoShr4V0K4_IVuM98a>Bf5RX0Q>&Y^yVapDgKb+hhUjMN!IWRK(-X90EL3G;19B70~8u9thrt- zvjfuGZ74ym$^<|8hO3|GyZJwW510Z($sCui%3c5 z=@Ir=qk&>9{&Z*nm^G@SY_MU5sY#Ri1iHeXZJokH7sYa3s-txtVG!l8=)4wjbiN- zHQO!~0s;1IASnG>ISRDsuvCw-XHl3#Y1$|P(?@06f`tH_vh_+e(PCKJOa+?F3BwL6sD)HXiFKu*d3WNh+H&Mw&}BZI^ZbRba) zydy1PM)ClMJwM2wcS0d^l5|}{9*u-X`;=;>?Wj-8mbRbVF&3l5m5b*iMc0}fdn4M( zLKY(!HdY$!AiWh+kIJ%IhAOyK7|1yR$FH>&Nd`{6m(rQ3Dvv8Tj)dMtbU1kQXCk?^ z-hpb!JH5wF1tP_w^0ujoE@!Qgo1v@7)90+vwgd5iHGU%Y-e}J2nT7xd&*5XgQ4G#c zuDb}KrzfZwd+5LWqqK8>3qb&x-32N9F6P3|eCP`c%Z++1q@+1+e96H609F(s7@omd z+5f%;HaqDAnm-{-EP=3{6k8*-kCXU6J&b-mW|IZ)DUQ}3}JK>3{Crfp~1F?fV6 zik9AlONHtN&NZ-{m0le=x0dlC>33kkcu}`wQ(;4vN{>!p+V+r|BATID98i`?JDcs1 z6a(S!XGp_6;UAg z`@5H^0^SPM{sAQVaMm`hz;g1wf3~V^C^FYLaQxmoC5b#ws4;C@dc%;yU@x?{`LlFtM zFl-r2X_6R|DVb2EY8NzXj!&eDyp8sK z&9*dK3|wns#(C z(m4Sx3MVKwn1KUC^dj+otp!qgQnA({p({UGO*Tc`HB#qREu_O#WuAP z(B`z04f#aI1f5U(mQ(9{ui={8EUTS!0CVIouu>3du0ZRP|FGTP?H{cd&!pww6rHt~9d^Jm7!{f=A8L&4^bsJqo}j%sAnGn0}tXrG)nfohy_>n3j9^gqXE#&ruG7S)h}c)e6{_I^9rDp-03?kjO|5-zS)|zti3VV28D7uoHJqy@6r#@!+3c8vf54S{TGqaZWqWm4=b; z`qJR?dDm$NV*#2#-08>5xDbuyp{PPqMXo_@y)aIaU(@`f=+KA9y5mkS9l33=@%~l| z?Lx({$8L^|`9o*MBG(MvB()6z z?9XNsN@!+m!tcS96aiP;R>j7NV%0Kmr8!Z|LGFKB7*-rf=K~Y;>FhVn+X8ecqGskw ziOB7y$u`kuaZs&kQI}wZC#iIEz#@~r8jMu}IQ(rp^P~eh^9;0BG7b$3;Tn}ugh6f1yvmfLknvx6 z(O*IHto2|)g3}58Kuf+YM9@hN)NF0_OXN7TnMw)sIq6BM3AE|G*C`pN*#cWd>D2&H zCE{$u7DNiA-wyhTYfg#~iRz5R@^z#rv1UWDcRh#WKMZ6`V#A2d3G=NJmDe;C z94V@oR1WAKaF%KVuMii2;yTr;!xEg(RkCxK9zs!JN$y5L4=L1)g0j3?bIu~dk&VlG zJI(W|3acN?_rpTqCPJ>Dm!0mGuBB-=I^C2mY=?t+3g9)(j?{K)#3bTWkX{%qKCD9m zW$9?eI1h%l0;S{rveUC1rfXgtlJ4lg*(mcm?qgi1XY%v0FX3T74l8O0u-YMupZ?<) z-||7Br1cR$dcM`$676xOV`URcJxWl-x{6=0#L4dL9t!kPplWFZ2>9cWz2~EK)Yf4W zjo{|&_^dNu=?vv#rS3TeAA_E3vo_{~ZZzbyK9MJtqzBEl(l)0)4h~ zz`|*o^Uew6ANoW6Md)W+skEcHrvY`NP#s0rh1EDengmH;o}7M%7c=!`v&6{|F@&i5 z$q@o45oL}r3Om-EbIxnHx8J_}?cit5rU1;Am!mT6#MFdn3^2*E9Kz?a&K%g@@!r=^ zbW?^kq_JWTc}YnCC9A`7c3fZ-3ny!KR_4B!Du7dGch=7(<{V}Nj^M94EpJXv-JJ9Z z^ep!~44rv`WmZ_xAHnpRzI&f}bkA>Ung1#VWh8>2evc)-`*?9$9FTG-l@;`?u}Fz_ zi=R73R(jJS;C|db{))eLY%0M-fX2b34vxd&{Qqj!mDZAK23Bu8d_VAgyBi8!rSDfR zAEA5z!d2Nz_HXLd-hNAzSw}Zb0v7BN8dLBvMjz~frRRU4Xq43=h(dJw{Cax(aomO+U_t}1aAIF@s4DW<8%!9-x~3vfqYatK&J*yOR# z12GA&C9z)OEGa08F6h^?yr><{bkkFl{%)xJR?x!2JX%4loBia;7&dF=gx{e7jrMCktZa*dNr0vsa94u23!aL|>+I_^H`q&AyadxzaZ*5)eJREc-_wB42)xrl>8^Y$uW>KNdTZ0CB&c|7`@lcdz=O}-%nM+=R2F4Z+$D81us@w`u?Do zc$1iIf*9d%8HuzvuM#d|yUdQI`0q-)8(B-!V-r3vL`?D|*J_KJ7NX&d;9%I4gmC>X zIy#0bZ9eZ#KoIz)B8fBHd-}XW#NjA{#GE;NN{4(w3Fb=A)ylj`(cB9=`oPBJ%duJ1 zb&VTOOHsIdu*42O6~p5gvCIg?RUbco`JP4yT!#d5e>7Nsk{k7C#3eo!g}IUbxJ^@b zX$P+h&>OvugBvv%F3=;bq`7AG?6ZL}2ZNKcO3`V)Gx(j}jVwY?=0-fcTh(4@{_PJn z9De8uc+aB*9DcE+%a<_lPkdv|j7_^-!_qTrMw2L>JhZRiIvCAl5-wINkSZ$I>^Fvy(a6_gxUH@PvY6Uuxvc5 z;;RQOHEIja4klSga;3I<_;vNG)hS4a1x~~CBX;^>NRa7UDcocZ1@niBhI_CxaUW;n zR*(9sngXiS=61xj37>`O12W=Uev{?cC2b85f>qtu#Hy*tJIFCis3h~b+QOY0(R^*8 zva!ZhRY3hc3+T)B7yV@Ke#5UX9kBiD8-MO!51xGOYhQCh%v=BVR0*NTJhT*3 z*>pptTJsx9wo`0RY+LN4fCwU%IEA#Gi)X}QYQFAvYMNrK&kYcl8TJ9ApdcND%01AY^~&GrA<_O@5{|mP>-xdZ5R%IA&5xMtl3MB8Giop%%On^Cv^IlTK0iG!VC7 zNfK}mc~%g%zFDXuD6$w_-W&-UBBGjULK~kUP>yN8mvET>NyS?q8Cwj~Dy%#r3xJ&A z6TfugRycz>#s9C2z`?(UcldjLW%uZz|JmM6!fvw|K6;;}^M<7CJit|k78@f*PDWS; zzX?W4avB<1-Hc>Cj1-pb&VuJk{dcj}eTSGOmf}DR5_dOoi4YwnLE$H2m{Ytf(g;sG zrQEDNCvxn zjI+ys1xqYiB@omy)*whGPDXlv@$S zu;V`b_?>XISot$UV`to$xfeTucA37V@@?;d>ie7;bicR^{6Fa<$~Arb_$6&!wip>t z%7U%bSA4Y3J#zB&kM^ne8*<|54+p;-e5ympAYY|s_Cb=Jd|}W6r^6C{uZ#cbYI~q0 zzkB(S{de#|`R_*>&|GKFAcQF?wKP9PX6c%}_GamGsc(t_f_klQPA_nql8m4u9arfw zhk;M8^}xr5=dY{GkE>#Xx({b2B2%+}|MJtz5B%Pew&Fq#7i~=VghE{*Tn0~GHG^d< zr75jKw7qj2IXX*FVoI`X&ZaT3O6(z5rv_q-nn#Zg9|LKKjoX?z+K=VsN9`Oi$zewz zWFHbf(4H(CDB%lupVdx6a>p5g(v_>*j8BC_6-y09)-c8=Rr&G>C+pt!Nhhg^WEaO- zjx;68;e^BKl|tkN*DLy0hjk^j?STsrS{eaQ)tU*M$T4ETU_E>(8Tgo0%ur8X?3Vbe zcJ^}8rSR$Yx3`mL$5P}^NTmuX#z7Y&ZyFl+G$p|AcVVXNoFXVgf$6w;@Z|bmYg$8D z2IYNl#?+-5whP9}N=uX4p3|-9Ai$nDa(!XVxG;F9xPlljiHA8mWkK2|?sL^nF#B-u zQ-6}BQ5qIdPpVkNg4|tSckCXeKQ%U1{d?R+^Hn`&jly&1DK1aiR@l%b6)Sca&*K$7 z>FKFyJ+XEe`MQ*Zh-)Tph41BcGNp9=X6IbRfr|DytU!WMlw{S8^(xjmXVE}?GEZH` zq=QW1Sag1Q`oVyzoBj!69UiBIZM?UZMl(`@4qK@cq6f_|N2Z#1U0|5ldTiJJrk0uT z(SGsWI)|7?uC(XW>HrgCHu<5{hv6{G&Jv)^X-9O9-lCDTrtq+Lxan+0Il1r;85$yK zz81pZ5KI~x4ht=}b%D*Pf-MF?G?F`XjY;I(j1fM=ZgK&AKZsl+l!+({opa}o$(cz# zZ_Z4~Wba2~Tc1viYMlPJ<+}L+XC)2M1^wd=zF-FGA6Zps8j9#jEfPvgJ*wERrE1O! zYNT$Y!hR<>_QvDu_q3P?9OoQSTS>8R5?r!I8!<*AFOgV?3(VJu{sjEs6Cnxa1OElh z_#X#9b8cY%-C@gFy>^}^iiYS(!4{e`Sx1nxA?OFr>W1&j;*$ygfI?|a8*TMsAda@TzE_qC^neEF@2UL(XC5j zP&(@^RUPu`t|GY2`qNjI3~y$3_(q6Rs7)Xn3v)oVwC1fkPw&_XZCS1=FhCwq5FO9X z6+T)6^wNy_2HDOc>?XB-{qzEG5>{6_ZkiS$qONYxl+1%rlV4SVB#MG0!>NH|jPp-i z2nJ4lZN_d*`6SS%mntpaKg86JDNvc%evjOM0B~Tnn~UF97nDUh`4`s0<_Gh>wl?pV zQG6w^spwf%9~A4?=dAR1o6i>x3d7aF1Eve*z+H*Bhd&XM;j_Ufng0;ACDy74Y(xxL z$A9}r_)A|9b69OeI%X+_|IJ6y1AtDtT1YN5*Hg&PW!PpnLCkI=OGIzXgFsM%#88&O zVl^CM=7tza++1C|t9@j3ZopWLdQgC|Il;+#3o8QzSUcQa_)c?!KuM#Us3R3E;M2iR{2ts;oNJsfc#53gd!+T8DxH}q#HWgE8DRZ{{&+)~ z>T^J@r9igTe6z5x^Af2^sG`1BV+!IzjLx~g)`AlwWv`0E&|)=V$G29Rntih%LPGdr zqeHOdnyTPV%I8}tTjJ!!@c51WtH;qO+92PqkFi%eN!nh!mU7y0rXTI+rV#|u+WS`( zZ=57e2<3khj!Ev9mgPZcfk@IJsJs=C0haL{r`5|ZMDr}`COhF2g>aNM^vAPz8a}a7 z`-{>gdOl}IDV?n)w_ZA@od;yhJKw$h=X&yXU&Uos@QtPQvEq4p(L$+o<~Jh$5xYL5 zv9XD$uahFizjpGPwwU0oD5|6GklVeGL|EPLUB2tD!CCO$<*)rUR-|7`2zUx*IpTs4 zskfa9NZa{T`de99lj-w4^Vm`P!zT^!@4ApL@>9zkp$@F_9CjXbyTQkAH9&EVi=Y z4_NQSO~aDbkxR}&`Ri<I}A{1IVDNL zNIEUSL~C{GKO(Z<^+!-)t&^0;ul5Zv7=aS%ZFH@Llh5^;0=xK)Q+|5(mgE9Lr%!}G z^fh`1Jv+jTR?EQ-LMbT(P&v}gS+4kUjkI?cBO9rN02$yz&xg;#fJPD?<{1_?6tAR^ z9C#w|<%+yd|MMWFumX!VkBOdCL4!Pq>U1+RQ$Be-iIhZ&!(+bknzk?gw&L?oVi|>T zHwlwrcqh&Rr)cB7Foh}Ahv zO%k5|{ot2Q)T@R?{vMS4fAYs2^aBO5gk_|6E`e(HNtrQWaIkD!V{&v(qVBdu#_sq0 z1uJWnd4D%rhUv{|%mt_5mvVqdOLMdV>i!3m4*j*Xfb>hNSVTVTeI+xtj~+&)lirFi z7U;&aWS-UfpuV=Llj*~v|GBBS{%mSBCAbNW(&JJZ$gie@gY`Ys$IUi0@prMN8?kUZ z?EnZ>4nVIvMe8Ny)o*DNm`ahUGdwEct}jO~@}`Ysp_u!0DODY$FLth7s^M3wasjHie%MV6xsJiC=Y~qF7#ZTovbX%^_DNzaOI%H7 zg?S{PCf7^4z-|mSxf+*b+kg|D7GNRGRZnU9@X5NmY63+&k^S6`=0m;2! zhXXgK^U3gr?JQL(i{^V_7`jSZjTF?=QpUx3y`1EJIs9dH;(WE?y+v^k(P9g#_>Fj% z)DU2fPIy|Z3I+xNL2Y2-jIkgS)1fexKCwU78{jTO>x!f}#VIF~zuDz<28|Y)3I<(? z#_Hv!_gd+rIE5Koy!_f=MmOC;Z~)Op4z4?5r*j6J*}{ELEHBbDt|#lqFQg)F{uJeW_@9R`r8(BDNZWGkI(= zXu6gYlb%O~hOIEPaX#z?#$uuOK)V~|`D9y1PjLqf6X9drgC{S&^5vIYdeZ0B7uGZi zr$6OLDSF^K>TXKQIK4QCS}0pvT_Wv2Pg1_ zLCj6FPvWXMp{7a21=`$F$tn^oUV76j*P1IXQXcY9xi$ zERT2qB?JJBh?=-8E%mF_7Wi?ZCxzrj_gwqr(buZJly`Rs(1`zTQdp>`T1ttTl7 z`Y*W#@pt+#&=)i75uG`S>Yx@@WHLOm7(PK#3_p-InpST!Q~#7c$syHI>*mD?Y(Ie8 zIjLk0Lzn0KvxW`ioa(zOGl1tZX=lVqN%>Ay_pdUOY^9ZC@1A9zFBBq&We4z=zp#}4 zh%E{1T}SqvZs&aNVmWyI=FPDQok}RTR)$R+gbKC?Uzo{3C%HhZd>fQpsy8m3WLBhaXzLS~A%4>-cMVU7K*n`CstFkvfI z3FRXf(i&Ae2kBF@(rEgbm5*qERKemnT7MgWzEtyguLKfdvD>N zEF_9U*?tHkdgt`ceJyOW1?g-zW~AX_hLhl;J0U0EZzNncH6bbw6H0w0arcLtt(osR z>!+l)Ivo*-13Qg-+ZUkg$PyNf@gekxuqSCSUys`K^!7N9;ZU{oQFPumt=5MEd^3D( z&%F64ZRG_F%ssK`)>WPHBy+d+zz7C)Q(6~ojk#IlTm!lV3@n^lmS3h{+vbrW^~fj6 z0-#J0Ig{{U+7$ZgQdvnnut1Vv9ACJt(@!Kgs0Tc*zTaz|bletU0Q#H+0WeZRN?ze- zG|V}Ywkg1ol%qLdVsZh_+|&esa$7#S25Tke<4(}nkR$fH<=Q4V`bu~6qOA=;Z0+a# z`)mIFsp~P5x2Ci(!_MnT=K*jt+K+>gSQsXmF^IAsD~;4rHBVC_Rkhyq&9u&{t8E?u z%%ADS1^R*Bl$LVcb_hs0HxZgC@Haj=;a|V^Qb18xQQK7@dWc>r*gc+0ZM}8LN@fAzQ`;n3&45_)vC`0` znAD}rpjM*_LN_!p4qDI<{v2E;p_*_YkfN;mzuTLUUec$u7iV8O?Q3{;!q zSFs^=X_Ii=z-`*BeVZPSZ zL|!V5ZCkQ(($!kEkq)t?vH~miCw~mI%9;mh>{zl2#e$fdldMHu@SJN6##Ab^@=`2r zukWI?MF}~xsGUsKTU?~rH>rCR=g@=j1XI!0g#~rLa_JWAFFfeP(L)gd$jhhX!2w_$ zyB7lhy*WaNRifPZF%v^oA>%`mEHp{v!PZqvU|$$~2Z&7nWMGJH=rq9jKh`fKgG4oE zGU`vF($K=jYPGezwko<=bEJ|f?X+nvEjNAEr*C(0(-kARXGjxA$p=AMgXFSlAK;J2`~1$ z!Bv3y(lF+z(~(X*y!0OuWdDGFKXgVdQCP4eIv3TMFdq(_y$DpF()u1W@DH?k=?1dx zFaO$~Og!QfDE+;-%+R(Pn^m+={d!8ly+cYxa7;&FZ;rUWb`1vm2@X)7>?V;OsZfP` zlipLju%lQHl9?@SEmb-~={jb436;bSyfrrA1U8cipNF;H_BoMOUkL7>iBq+y0-bQ8H8FEw6`-uF z>N$fHI!m*tlywu?;dexSHjQw+kyoTEOqD5Vj-duqMTJ~-e`4(cIM>8c4RoZVF!cbt zoY3!8MCm-R;z<~{KA9cWaztaKQ`<>`AmC-sy&?!>=NQVX?QEYTyo^Z6J}+r%VbC&l zbBs90qJ5e_Od~6bA04J+aluz~+ako3H9&HR?lJ6Sd8;$+;K_^kd}fokCjDjBEEtRTU207xwAt90h?4tBF(mhva{#B4Gat3WP9l1amWXK9#x|VwcAC32e2at$ z0?;2`e&mMC?bm$ttF^DSG`XUafQw8tIwd-YTRC~PddUi-&BZ|d(;dTV!?A59XJ)lh+ju+)2JffKt$799r9+o8H^r_n`S0YJLsw4# z6wDc`8VQQTFQ9ULKYVCx&vvp?uQpK7vDG;`8dr?{zcCry-v}u2Bpmt-H4a8t#;RUL zly$gJI##D=C#7kYqA4Z^1@xqvU^`jSSnSpzJc(brj9EIocp+i+0Zv z5L10xBn{q2Gr{TfvL z5Ur?j2;$%}M1WL$YOp1RzjR-V3yA^s)NTgw9-9D|wW?`eDSRLM3^3m2Z%e3hq7%#R zLn|jM)t7Z`V=pTM4l7)Q^ku*94hNM5ocq!rM^H`{b9s}67H&W>mGtA#v#|ZOVXmRTUgQGBO z{fF1zc;rjFHos9!C%@2Y_vA?B6;3P(XChcSfI#@nhaZ1pX2OR-++;=5HLZ%EneE0X z<+-ro*W%9zG8q!dzsG7wN(gGL#hmfrI*l3zC9w>}NQ!*@dUD<$S+Oj?mXn{13|vo1 z|EpYe4whOsHWd|P7)Zi~f@va0&5!calAV_1#xVaOIhRj6DD^3Uhg8(WTBP3{XJi*a z(o@^cej4xO^f0i+hsml35JfMsbXpNOCR*u3D=zz7VF9~=d7DhrSurdX4;)aeM>TpU zJJH_sKD00EpW^HS^W)pe`p*z=gf{0K1gU{!_<-s|51-b-4k-%-=!q~UK0qDO<{#@8 z2O0LT)8stDfy8a~G^B@Z-QcRPuKP6ZGbFvkjGV)8ewzm{q(aeF{VrlINvgt`Bal%P zWlFeljO)R`*%APSiV$0&?9iH21fIA#?j*spN*pE3+-kMfY-;w_Zq#5rMUnOUyDVeu z5j=f6(ksjh&g%M=fJ2rdf3;+$rl zS7ELYMMowP{8z|Kj<{Hblq$nA#OZvI0!}3=pm&TJ0Zd) z=@4;jwxK~FCb`{avf)6f!inuLi!l=WTRp4@J-1ldlygpOMrtWls`SJ@(cWA_+I0j5 zBZ}U)t*lwCccL}$mtGQ;z}v<8P0l9w*A5GmN*kS!#@;zc_Ui@f7C{#0G|vb^=NR=_ z4^DC3Sg*!@+ykj%Imy23ZV+|#abg1P#&&-8FZ>e2m1z1%%mmz7p4&)g<8#lw8LyP? zN56D)M0)4@e$Z_vS}X}IEppY#KZj#`w|zavCw zeM^cU@ps;Br(-%} zp4WYtS-J5X;a5xYm%eJc(^+`AA@L{t`9=PK*(W ztuJWvW&RhR7lMo_JL8jhwBmaMa-0S&zz9plz(0g&^JRuP{LjhR>GY(OPmO%VqguE_ ztPS7_m@Lor*;QZj2X%I0faZKt2KQgQ^nzbg&(#t+m#v47PjipODhuYrY1DGBmZI#Y z$U@DEb2w$6erxbs{{27$1)^%1)w!;q#imYhAdCN1lG;hz!<5RExv=bLJ?i8ods_@d zrXvICjrzmx1)apX4~reKTy3# zd=8yRVJgX@Kk*qX^bJ_z%d;4fuI1HAwtX$r2J~YNa#c2}{*b3{(pV8Lx~e%Cr;p`*Sok{K-wBG|O&ir{II>Y&=0C}p z7F;7L?SxsjRB7nl0iNI%-ytu(@RDx`1S~*#;`8EaiiH`ZEN7n*>qH?8yV{)lBO|DH zh-5nZZ5&heBu{K&Eahn8E93dyQ1s{NzUV7+cdc>T`2jcKPjzro$WleZ@V<_mCu3>~ zTp?=OIoDi>l$RiQ)G9^MA%i+sDl3TbT+akRZjoam5iMs7li?E_rl5Y}E4SUoS>+p( zy<9qA=w?H>B`sqOmxRx8@Z_26kM8Nx_9iWSI-xri{W~kwlNR*)uwRt$RcCv)F;@4wK8Ej3|HL!too6W9!QUb0ZI=*&RyDxJx z?w;NWUR03?_C)Dndpe2QM;E!V6~+I1IF`&7g!(yP{x%`EX z#o+j7NiQNzO!iv;1O#P_p%j|d7~7=^NwVcp)bSSPWiguvM1Z2UPyaUxg}i8gGf^RjbNG{QpsQ;FZY5XnWS4b}B+u$Dt z-*)E5qY%aFa8Fn^pZ{b<*hTN-?tPF|fed~8_?c(K380uQMUZv&x%Rq7AAi)+TD|7K z5JE$;i{B}9S^5v@X;yIvX6@$eb0`Zu$r;2-Jq9ZI3>5IDGie19p%{xTAYI#12<;?x zwm^xnz+OxSIUf7mfDBtfgD+is<;xoQJYw93z>!v2@wXCdHnl~+Eu18{9yl(NLc`%! zn#`2-@#_KpADsTLe`IvPMb7DVKz1yc?4-s6|5rXNVwbeONZP(sc>E>0e{P_qP!)N=?%hrhySDz21*{U`Cw_>H^+RJ+>cs#%SWg)O)D8C&M*w4k7mEqO`Zj8Fpy#40-&Rc zY$BXy=`04WUxcl{{P(=@{0Oh+W+f#29LGGbFdrXkBQ=Lck8{=2?_GX>@TpH6lLpZp zho|Lz9H$f-ket8Qh6|9>nTf5_U;xrrV@-!fB2Gyk{G@Wo4><&*A_#&J%4v}G_FE`) zVP#lQn__1{u4?PG^_oBP*=JsK8e0y2mPHmIyK>n1)i=HwA>i4u5rsG#)iLi0K^EGyuIf8j&(G5GwdhY{qU9#d=`TS*Jq_*2US&7b4?-U$#RY>``IUmFV>}lJSBQ4W)FHqUM~>#5BTGQ$l~im~$Br+@#&xAk1a@P4lI0Y=Hm|N86CJMGLBavaiPW-7R%-mGeI zYY>OpRv90Up7{e8p13SV^&n02Dl8Le#&+br!2diHCOv?|ktst-{UOiJN}k<9pxq`z)O* z9XE7K)-*~}3SuQYp%vV``o|wjA}EHJwpORVp8&5<|If>}MWEw!hEReLTwsdifPKK4 z6xe{Xjs<+r-(#`fi)fJg`inPw__nNR_|vmvIuERp3IfBKgesp4ReivE_OUfkPJF-B zV@YW(5GKBrNKJS=Y&8p9TMDf|1J7dGa)grsZ=->TO3b#*aF*=)bl;Kf!9z-tSIi>l+h+!8qMyY7FME)gG#Pn$wrtyi- zk-Y4`5Fq^3WlCG+@rgO;_mVOkN9nnsc`IZ@ zP(VvqolAkmVd_)*{q9n&5z#*?S(tcA_yg=)GfFb*m`a>hF3tp@n4G{ok@Z*&RHB#x zWlUnK6Iu*w|#$fMkPHV8iUNtU=as3P>DTrC9S0$WdWo9BWD;a^R#hM2NrQ z@#DwO`;V+H+6zfr{sk;f*c)DU_9Z8azH$DEs(K#`e(D!jyz7w&RR!F?pLAMAmP>+8 zj7QDcVYe1%pn5;ar1eU;@HfC!=I5;gWy_#mB$hcyGVG%SDUn1WM`{mnNu4EpDZ&OH zE$i-!W0OP?@xgTW6EmIm32mIllorrmiA?CwylNsOfZL6GuRDQQ!R?ZxsV_Q|(;Trd zA9iR_s(}#9IgXO-B&p`A0K};++>wV`IY?5#>QP$1Bt4BJ@lP&)ISORQ5U}p0g@hMv4egE>iDW#R!y`zpA)!K#6N@&ilO!j8vcMGz6}4v8{%#UEf|c8Z zdBR0vwTdI^Jejlo$)do|DtTg33!M@Q*j$oztMQ8F*O0y^DleJdMXMyEy z9`DaJQFHTK-PPo)#=JzcD%o0opE*UWqN#tz2z?9XJAv{`YeS36CBc6TCXA&O50yMF zi^}N$XXqbGvw3&${uh6%iM`e)6z=atUm+*$jPDu+gb{&92#_7g;w>n zGt+goW7fMVfN03{gA1~15*axNEU2cX82Ug*+p+U5WZW4Ymjkau`o4BGX+bGFe*D&q zUSJ3l%bD+50Gz$r2-5YAYNJj|_N}Cdw>8t1+=7NU zL->1WD7l`5#$FGfsMrDk9lgLVzZW=*JIl4Zy1e8q#yfF*_jZ!%#<_xURZ+7A4X1OI z+I`b6n!02Dqlu@4LbHI@@28{^0n#eAK)I$cWDSisd5UoW0jqQ-DH7e<4SXkOFtv-5 z*xz3;Arg-q!pO#>4&h3*8!&|xH8O<;ssoRGQx`);1?HnMF5(%R%G*1n;( zabb9$B4IHqEhP{1WKL&(wNDc+NeYh46L1QeQR!f7tgpwN^(mjAkEcDnuObQ3db1q> zFLLEN=;b+{33d-;k>9}D{?2lBHH1cil*Yz>=JiL9Q)dLK`R!t@`nj^$jsQ=s8mw*p zC^?}GDI%%CTC}y^{jRhM7FB#>Ch)b*xa$Aj>T$ej;Kn-w3l_^ zU{l}jJJTVbnxYwcq59RJYQ^fXa;Fg*H+eiP3K8SQs8`-7&T_11r=-C&Y&U6iDDdW8 z%_8nDO?5&`Cq_m*T1EysU@4k2idHjF1MHd-8Sp)VD-@|Xr-~R*($(X~U-u1K3dU=L zn(GR7wJLYHb1ZyUioxHHgZ5#Z8-|P&8IQv|!5YPpW}rfmmu+|)BsFLCgcAuQK?%?M zXEPeG8Il*3q$Yy3>ebrb*-r-NIkF=>s;^eWwd*AHvaj_;5phiV3!{{ES%geo^jND^UIw9x#e z;-PvsdS+Wo#^T5EUnx8)K>6#hzamixSCzVbTJ5{mR3ma~>fx%fpyYP4k-A6L+J(ba zw^CXOgrK+{Ig2r45ge_g1xxXFNCY6ML-FI%z0N}sz2x^>wMxB%Dv}91Xc^Ipz)jy8 z*NaM~U*-_BrC|q6e;6x={=OY?0x$<25{I*S zYm7#w9BW*gmQJPai3@9udLvD0qED=}g!U^-{yn_!KbnU1(0@dxwn>E! z7XYF_KM@ebMD}&wKx`t zofw<(M-84}+L6}nRDfqTT1$Q#D#;tZgzVrgO6E%=EU{%;z(Pa62LANhb1f}6yEC7D#l_B8aEyqg48)@WCz?Pul7v>BVbopqM+8&5D;6b32$}S z;aV$*TPu!;$;nhXh{&*Sk;=+#QRrL=5O2QlN_sa*4VI%Q&xZ+JAUQh54FC`!9z%X_ zs&6NDQb;QM3D>rQFi*dK`L^DKp-M$6RNZW4SbC`Rp@RO3SZH-^wbjs+;iuqr*BBELN?W%h1}L$HtP~HkX3RDaqkwpB9|UaQz%c}4d|(xW z{)YPdjZ`nMe`F!LkUafOY4lbO1T#a#!MR`<7ZgH&u9aG8r{Pl`$f5`j1|ptpqYULo9we> z^jU^{-Jv;_L5Trb=2>119^B!T(oQ z^kIA|xm+6@<`(twVc1fca;C!r*pj9Hn0e>#GGM!p(;#i*semfcIHVM2nrH}0-h`6o z)~YlXYvCD$)yyJwy5J_jAr=)PaX6t3w-5gJSTt{z@&En*>LkY@@lX8w8_xHiiqaF~ zftRb6nmtayB}zg28d8N`r||HYvO1rMbPm9dPAd|4_AW!N!(FRl~PmTEdlL!j1HiCsaWgOCqS19 zG7$U+9^uqrV-;GF=@03Mc1x59Y1AT;4>C!v+8oe+A};$7zv8m;@E+ucau zTENZh!@CF6(Fee?!=8(+Ivj7~DhU0`csX-cb zu%US~a4Sm898q0JpY9SA_ZO45mKevp2rMaU$p20k?p6P&|Df6^DcL8q^coJF&Xr2BIF8ojAgM&Hf>sY5W;z zviLni)oM1Pe0`e=KzT?+@>ZmaqvSM4&hFAKB}Ytv zCmHs`Y1u4(bV5~WYu(KX=Q&<9LlFDABHc|ZO^O9kyD|Tjpsy7<2;S%)r0$^>?Nhm) zc~h5C{|DyuzN$6SCGxrF{HdnX5)z1T1L~R%FHXZ{r<~c{odznbB$Gs=+BMShBK9%ij1>DuMLjxYL-A}M zYulhcBH0{doVOeO2dV5&x;>sAy>c;o4}E`RifwuAZI$|;%NZZ0QGa(kK< z7tLzKEFx8#z6QZz1wNYO^I|(XpkVd=!{8&W0J}HEF>BT{+=asJT4PnjgbFw%A)ThZ z0w5#Y<_5vL3a|SEy12WwSY#>MB%CW=xqd_QLE~YE_zuG&Mz^)|-!Go4G^CDM+Bq45 zIOLO;?LJsJ?$LPX**XQq-gWZgLbMv_3@$$77UwkS5ztiguQ3jGF zm_Vi|@-b9vLpi`}pZdz+O$hPCIq*J;+i#sGtc+P+Hk%kaPB2u<*tOuNNZ8UgFf%8B9?#x*#vg6c zNEruh3YhyZ!%S3vHl133>A#@EU?cfx9l91Wk|NiVzs0zXo@r#UJO18%xz+?nqkXY3;V)+ZAJ9nrlYC(541TguX9+xAf)L!2pwmM zR&BA?@YfXV-p6{)xwU%m!M%9x>$(noGo6Y6ygBThBbaLYkeNC;S-}FtY4Mm6(~$fY38sM)x1ja=*A-lH6h#SISVQ^mj-KqGE@RhU}(zgB?y}ZrN@XgilL2#d16r* zALuIjf@o^TW-sZU-&n84y7dk&M(QQjDWK?2K?`~8R7I=Y4B8K#EJwX@1k72S3~j6D zhW9AB{V<)(Un8HnYLM%(<(H8rQ75?7 zIm}i5(6{^+kc4SGP3xxNoO(U^TUXbZrIN;5_gk zp!#UiZ z7}nO)FSAOH8okLJWwoQ6r6COSQCI^lvq8Wu3o8@^kZxeAoi@~>{%G(+jd)^gT6Dz$ zf;W6l+KSL)o@=xlbjwvsZ7 zQe^EdoFK$@ANd4^;Zgro0#Cp3>ELI`DU;2i(jMEINO(g;)Vw;hbXRTb*72MH^UKAzV>vwxTR9D&a?icbxn0L?Ri6C}9MDC7O>VdL*NVObW_W zv2re@k;@vvmAg1KuV5*@Gx&v=>2^uhOoG`!FhLH}m5C~@vD+v%iYlekik{&m4D*(J8g4jy_9H{vD z;CucF-5HI?2{UF!-UaUs=9g(wfOX(4cwXN%_OA{@84El_N?QgGL@NZ*YzhgCFQ zF(UJ@!*)b{Im2nDz11nrEqt-=w$q7Qug&}DaAa+0bhh@rq{4D|lJf7*Xyu=9VN;4t zx2~h(s~Ai19Qfw@@|%7cZZhCnxs;)~qn#T-Vb9*u6pP!2{ohN$R?|gsBKh+gonbq0 zoy8Od$m#|J8iovfzUVl=@%h}En)O#Bc)C%Y;+4PDim&u~Nu9My$vSg5d<}J_Fb6uf z$#30Fy-V%pzZ+crpO3{VuIXBk@?n6H_7=MF^1^FQ$ip4Kl_KO}3Rv&G5kElLImv#) zJ)99T=KhaB8J>X-9zCbS;K^UUaKk?w;;u@kJQa-nm$};F@~V?@Vq#p2hAlwECA%rm z992yfjYc&WOmu$nEv+5xF?*?EfCXn!veQs4%RXG6OYGZhUg0SS{ka@yM`?F?V82U>m>QcPc5B|bW1(J^bpFr-EX zhT+lwauo^v=K|O1J1s2MnuSmiP3v&@lp)2W7$fYUfnR+so5@ZLbU(Pe*ldS zlDCQ4I~M=);lm?8_~LJ6=CetNn{h%3qX$oz!s<(~o-xgv)b{cpu%iCf|0Xt*PDh-f zNIo$lNabGeQh@+=k`@BY5;ubB?wUBRqFrm|t#5dP1+hb!>%z+Z_Pfi?I8?xw{rEDX zD$hPnMF0ugSVt#~s|K<%ZOFOYnwF|+1}$>B4ddPZ!s^CyOOK(D@_9N0`)~;UNs4o5 z^tOoRIQ4Hzh0dSZe|@_E!t96GAgd&!bt&71Mk?MR74E-EO|p0pr z^hXfX*+w;V0$PfbznGSDhlyc#N(x_M*ao7)CHCTmF9+}l|JXaG3x`tD;c|j9{v!mp5Vam1sqK^@ zxX{|sN{L#IX36-STOHbl=S($@J;RuZTC^KT_3cn%&$B@eX8Lp$<;7=cvQ%tS@m(&)NvX%HKsadg}tQoJ5kTP$q-+;rbN%KHw zq4{7>%x;YgSfCzBq1SclzWJ}HMay3tY)`}#BQdURe`1O0Qzu{B)%JU)>`KHr1eZ{qZ5d2C$zkQ z|D9B*6QGg;mPEmXNB6Wt2;y7e2rIkbY~JdoEoe|}Cz%EI;|RIKk>o$XNII@TUjKmN za>*cCT3@O9XiWVBrRwA|`z0$Vhpn}S&wq`?SWXDaLHN~G${}nA{@F4|Zi@5JsyY_5 zu|vn-Ml2bxNNje)&~u16PteA=b3$eBwDGDvj&LJ~4}wUE)r!NU_sJ3fl+UDP(U6d9 zEz=u9RX0ODoUn|;4}5fX{MtD$;TwI6Z-oVGtQb~aNzfVLfhy;90)3U#0!nrymHO4* zW}MtaV}8O0{Hc=!=!*3q^^X)kvK9-@u&3xR1{38xadv3*k`er>Anx!LO(rAuhZ164 zB){uqg4T!O-Fkz6v;j)HU0h&Cu@bBk8Vh9il_pfTyBTI5N2Mj(1|}F8uJK#l-jq;! z7ZcFX(=Iuwzj50yHoYrpiXc4uR1i}3GI$mZ!076qaQR_v`(s!i$|uC7JEgq0&)bgU zr1X@G#B?}Ms^1RvbE5+5&;aun7_@!6n+kQpSTJ3fbPV7<7(MGEB%SY)VBm{(mfdjl zyH7XocD_MyGDkJ6o3}H7)e^Y!jrojt+dFAtK$VR9YbIx>v{8tr!^QnWEi}P~?l4T{ zypNo+67wOPhE_&ul+1PqCBYVUdDvX^m8_nf9Qe0qsqhD&)yxjm1`EyO6kXc@?6E_U zRGdr_8lV*3DV3Tjmgvu}f6d=m2j}L5A+TgF-7JotemMB-@>fm^IK*e34?&4q+xH}} za;%+ttes6_GKhq5JleD+QSMqgU0!X6tpMEDghP@}FzGfv;SkF>wU%7I*{&ifsGVd9 z=txo+wHg=}M5%QH`Q?DYLe_yPYR)?!&;ckD@-CLI{{--BrQzIL1#6TL7`JXs`!6V& z)-=_3RezJPdYoFrtQ5cW)!v@9%XW%_)u3bd&^fHpcka^(eOs$(N|v2uriuBoob+Sg z;>O3bmAv||)e`rXRN!vxpjsou2D7uswBEak3sSnCRkOkbk6BTQF9YDrLEwXV^UWx1 zRwWun^*twjm4m(wFyl|GH&TNKP`mN5sPuV=91`6be5xbyVS=h^H z5sIaONsy&J0j!SS4SpytaVqH{B^FCosvY1?kC~Y$`%{6Xx0a-sbH`cGf~ySF2|mxzuwvh^h* z{^5rX&W`uA2J^k7y&`wcB6x$OudLtJW2mKM>aTM=QJdoYgSVZT5u&(~{XHk${RC;+ z88&SQU7sYlUOHo~O|)~R1J>*))Z6AjuUaOK(kH%q`L4eU|Ni&WoK8ZcM}r)xcY21; zR^SzY2SYy2(oxdKm0JOYe0ds?S4pm&^%r?hiUai~q}u;zp2 zC<&Fk2Vgwu%t%-D__Y(0y=8OP9P3-F8rwEC69@!I4zgBOn&7Gmt;EZ!d8i0k`Cd9L z89C|2n>|LQ^)zRY-Z?Wi?mw}wNs@QY+Z12`H68oRf1sGQ6AU}S>N7=Bh2<8d%tmHN z^KOz|bRKL|3?o+wM7|!;Eg^X&tborDcI<=fqMD~hG^P?+)XgNYkLmHf)K&|K2Z&9- z8(+h8#M7)HV>%c}zUzr|;QoFp-_+j3f{RoH3%^poFb)lpvW`RZ`m6A{aIUAY4{9r% z>AX={pO4K9{6^cr3W);e$ewZYF56M>ltu@}rj#1Qk%yEQw`?3)6g`12YSdjhmX8)} zUici5>Q~2zk8F+|39ylir740ATFU@{3>L$h~HGvZX?u4T=m%*zEHD#SCI5c#} z@{uKl!QcKayuoIV`iSVK6(JnDO>O5ArPfsv)Ko0~wQJY38!Sc2GS+?hSFkP~h)#wN zIXO-#n5&&L^T~#K3p;;irmr@=MOsULwY(WkP9izr_fcpgssY-0XlN`G;*|@1I5ehG zf&%bP;EcZgI7!!4*L}_s+~a&i->_h+h}A+U2QLz+T1!<2*&G22!6~~QZXA5Ci>oe? zi>xPu#nc;d*b_J~Q*_jr#zt^as&|^xO1=K2&z2fMok*(cL(I^X(*fMBo zHapg62Bc2#CEQ`DUE!jjhFUVg%p`-$q+yn*K9(bTU^pajT43~vu{{@n8^wd`wD<9W zT4pi0SD&R7(CM%f(<91j4k+-PGs#HnoB_u!A;Wg2i)Sm(QxxCCmRh*ChKP(C7;@rX*8P%Mt}v9XDW ziFA%0c9Y|&*xe&Qt+89;87u4ky?9LLs26`jiR3o~jZBmQ72Jg$mTerr^R;z7aisur zHCN0R%P2_9VU>+?L9!?&z7_ff?GRgQzOgKDJ7kWKC_Mm^NgCbnJ@DhC`&M|vINc~v;PY!WyjitVfSib3%7CS{$Fff>!=YYM zqgZIC<>1M)*ZfV#?X)x4IK89p!(!Hn8q{&UpMo>(O}5IWxs;sUo__EJppp3W_T{IS zf$1zTNVqXkb9_Q)-5d%cE_ew5%5_7#AU;~t)_y$Ms%5|LPvQ_)4Ir}GDRbf0l^Ww< zFn6gN;FF;uH065ZU?p;3?jnD_rMVr4AT)=o%KlmLsp$Awk5$_;ewGlx+lRIirU$6U z&}weH6zxz*cJN7X`!8bYUS({@VK4TvV&a2U=8EV}@$Tl%&K$G}F2B0ATJz`Qj?Q_V zEqpmFollq)=$V-@Nrfx*xTxOFEY^6{nWc8Rr3Wu8ulVHcule#h-2F906LMBJ9}SvOJROT|<6m823SRz0|M=3gG*(zsYgH#= zcGy3ZaAhNLo}W>iDtQD>$#Bu!$czd{Y9o#~6aE~T7l*M~SI zK}n}83xK=L5{3N%l@@`)riNZIkFv`nYNGT>!-8Y{&9A@Ymk#^AB&Yf^6&Den0cSs^ zf{iaU!KcaSbg{Ov>JKr47SZLGJ%0Sgqw790TBavCixrZyO8~LZ;?gfA0>EJ_PH$NS z_ERe1j~wvS>y0&Pn4{<<-D16e`1kv`w?7j{VvNuoMW3Ng>a2zMA$nmo4Rr;6R~{@8 zNa71tUJ|EPPC+9lbacNRA@ixRo4)igYnc6blG$mR>^AT4&H12v->HYP)n4@vB{DA) z+9J#)Qf1m!Y+A#a>anepUOEU+&5lk1vSzE%3R#?`Yd{AA@?df?gN5$SzmAFsf6!0m zgIrjyt)!*^Ra06l>)n4x^=l{Pv1a1jSqI|Nt0&wnILpEDSgF{&zp3F7YlE6(#W&;Z zD%+I!o{TN_%0`RC~-Xr`Xn>9S_=Er+o@Vu>+JgTx+h!dg7<>H5<## z=&d*oZ0f%~zV`gT{g(;&!V8jgRI9eKh#KdrHFstdK28KF>afPSOg7lCf4dKkXw95o zT{OoOGP&P7u>MxD!uLCAMfS{9?oa8_zCyz?TsNo}*F@!E|S{jTmql7*w1FGk#V zF5mGHadLu~311 z*I0($R>1lTW*)@6mH@m7Ua=5tR?tEZWRid=EzBjBTdzg_SH0F+TAO$BVJljPQYXhM zhM!(@m9gcgVf!2Y*TaNPs?d`py@?c1r@m=qvw@0w`n|#LG{c@j=)>MeoLQTf`+D_N z89+U1wbxG7b-{iCO(B9S1?uMPba*(;F(mdyZ%m3XzUWkTi;luXZqYo#1=FKI6;jnH zg5efBYjR>-GdLy(NJ~JpG1;iwv>gS+k;u~Oq~+v|;6J3SQK{V0Z%D<82qXSVt!P0e zoYUeW*xZa8VHHn14><6gR<+g2+)B;IkFcxwpv~GjKojsQEqf#bh`=@Gc8Uk|L&3oJ zML8m3&(7>@k6lS@953$-KKbMSD>cCwz8GfW(D_>ASD__fGW!Iq?3@OS(x2!I;F$(; z85;)fJ0}@h4KsfbKDtex3(}BaHb=St*xbwAT}#!mN;Tx@^kkBwlWMcqwt~i*78`+* z3;14qccIg9LWZ4T2WYAci%+5lQdYc1)2XnZ8ieSQm!Mv8xkOn*)%eEO!-ipDsm9Q%R{p4_;nQLxCV7@fH^Q!&;hDl%g1YL8<89jI34{cG^qabRW=pcCiiZ#3JtWwdwY>W*XLx zbXGGR{WwfuYSpE5$@x+fW0VaI;*osz`1(-Ie1(<-cC8zn#3xjMAvqOeuBeYSSjdWonrtP`mVelwf!e%XPr^{ zC$Le{U*_A2g|7EEhb^w0aUVZ^@tJ3QD8y7O7dHG<*Pj_lRUqZjuVfce%SNs!{ORl3 zch;!51iJ-?H{jC0aVz#e4$##9`Tx^Oo^G{wEbT$GmR7*729+XCY*KTz*hqceXV!njKg{)z(#{r4 znTU~wNy9aej9DXZ4BJV?&U$>~%}35@D_`}8;9iIn_Hn1~OtS=QmrpHZ3;_hp%37+Wo`eJG%Q#=k$|-_Im0hh^08OZjaxW6FS{mgr#w zOFY@5Ye&>!qYO{-K4MN;%3g}+EW{Ej>hv}9YEcN`7 zG_$AKv6!I4g-Z`N_R@x2cnx)GF#ITa{yi%WzSF`sI`_* zX@`L0aostU2>#&?*3I|Z2FvnaYk+NzQ-#ASPIqbz6qQ%^?2MHE!ENx7-@kz1%)lQL z`;0Y0PF8)5=_hfDz$^p}h3xegUUAZ!<3G6vH^Dq5K$3|rv$;=QCNdBJB!B#WwZ*A< z2aV7`{mmTA0;v$|iNgwIxLT2th6Fhx&BB5c@?XLEztpq$IA{$^nHmZ$8_LeqAAg-m z{*b`9VNZIb?i=*OH_4y9AnM~(VWiFthQh)G)tSN8HEfBm$S|o|ERL4JacUL*vQulm z*Y~YYxZxuNpqZGz@`sql`Y>q&G!x&lo$BK^s7pA!W~Qb!`(ao z-hXcYz5J0LO<2&$@KyrcK%~;$CTltD3>2gk+e61IVIDngwU(R~o4xQOSuCQ!J2mI} z2=|p_NBwxaCw#imv~Or0f6ZxcC)Z-4(L_NDrHOZ2ecTTII6x)fTKsNwm+{n?2$P){ zSkf>n+X?-SdsK^ClGv`05?X`p&L$C_G`t?@jAYN@gYJfOF)04UQHm*?bZ~?CKn)I) ziy}|loR+kZ2kV`<=^AwIlE`WuJSkm_iF z&Us=%v&Jll)eChG@n4TEq z*+U6fNChqd%Q#DZpN#>}#QN=UX6OAej~Kx15x=9tE!9@VI*1`6awG6;n4QxomyQ|c zL0NoOGheu3dHKP=@|Can3JtqlB@+QR%*k0v6^vux*=L@2eQWUK`k(sWv_1`1T3%gQ zUad*;?RTkOb`jb(zRI(xR155psukKPM$)TuEmWG&oqf42^>k9tuY(#wn)|P~ae4=0 z+9JbCNrylcvE>9Q#BGZz7;@QC)K2~jdvEp}_jTlZ#{3C0F)=X@^J2BSxmcn=Vj*B( zd0Z$I77?4O08+9jsS7{>C}JrXg(3*bywPpnI}vkZZnyiPqi@H3p|x7_CR?q=k|o~& zOaIq=@)W+AS?*<~WBPXV!%zmzLj9I=^5n^r%QrKNI>NH0>JY>hl%TzSX7ZkYh)g*{R}&zT2`{sUT8i{ zep}Un$N;b777^lt!H7ZMT3+#D=_ApTm{I-`bLeeR79+6dup)E-lMb4ZxiHyx&aq^z zzkB{wECY^rEBwi!>kCD2ButIjIG3~sHcb+bBtD|-xZ4R)<+Mce!JkQlJ3=kyK8dna%~LnF zG1HE%$fu^mNLClpaXiRa)1VP(Q6p-F`lM00=rKiHh}rKqgX6Z4-&R@^|82+@1O>2h z!^!5vt7RIV$9$1Nc3Y?ooQ8ALK8Q`YJ}!n*R37C-)D+0fd++=$T@9TlQUc~YnIUO< z)5mmQ@;DsHd5Oe0#cnUm)Mm8uRtCw66~~k37iP60OR(c`^;4^26}Qd-DQh_ruCZ2M z*JHM0xt`%JTLy0B62+ zm>ZzuqH7k6rArL#-e9AN@#`A*Jo=@Z)^u*Lj^&aJwP-@|?6X&|#(bzPf`co(;Qhx` z`;Soc#N_o5LttXiL6V%MERlm$7*p40}&f!zdDOG&RLw2pYDXV^g|xgEz#2 z;i_fN#f21`h%e_*_5yTeC64qEa<|~?WGcU#PsDugwQ>8EWhgk?VyNJ@DWnpgr(0yF zB(%X!+gpeds&u80T>?~-G`U01%dip_rdYAjT}EI^CN{tsb&#ZgIiB@q(m!Z^nEe#{ zv>DoU3kAiNf$3`88X(q=J%$JtT$8=~gV*%W>5eZMx=waFRJJV0tL@1V{!ae`vDSQL z*)c6BAtytSAjGNbHl`&IzIJKT;g22yYQ~Ql+~n6^sgdlr9!jBvQxKPHjg049}n2k|T_lA&z0T49E>2`8Wckxrf z%TYFe16iew)FAN#hkl%o)Q(3jOx#Wkb?Ht_$6V<{E|0rExEpsjDR*sloMS|b6XIv( z?+=0mG+{#2Nw663n*-pa^jFis49xm4o6|E?_Dudj>aKUu-9)Pf)}Bi_IQlFx>TT8t zXOEuT7=+ApCq9`@atf^C#ZM7A119-y7#X=f6_cPO!BD}0If}?`$DFJtNe5A%28p)R zO~-N7yk+zV3T_3W&-QkIj8&z3Qj~z+c2dj1DK>ufb$usYJ-f%#v#CJ z(dI}4C4S?_R}%$2jYO_qac}|H=))%EYZ4Zb+}NBRuuA8IVdo4=VjDdAP>8xzF#Al) z00PJLqY7Oe#B}x4ZDY<#3X2CjC1=yE_eYN&J$+5XLz(%V2a;9?K+i}RaVgfewOH)D-ER)&M4QC)QC7F6;~s6rcoP*d>_Yt7NbVfD%#rHAHqeS$ZB(f?HBg3OeN z@#<}cv`VLrc@$vND5+!Q%w^(GzK*(_A+a#^o~v66tG6|>E7zXVB~Sk2`CHyA5zW&t zKiN+s=B&vZ7lW6~06lGTa!OA_5wAEJ6#eAwuYVL%!;T71SEcH3VFSscgoQVq5EBVk zDcg!R@}gIvUaIxTrRKRTput#GByy=ZuDcMBA4D4#>%nAbFQ=3Bu}NkL$8g%7_)pqM z19WgrxObGi@V2ysK-~!hS|gu8DykI}sx7T)Se9XR9Xk(W}kT+7ay*n3w}p%?H#>uh5irW&an{NzQf(n z1c~yQ_dVA0XtN!ncpii%h2TO0B73maMZbH^WX&85x_bQ4|7+*nvl=3Pij7>u01av1 z<)dwlRx12%w(SsvuXcn#FY4*Rxo5alL({a{!qmsLu4tn1rd5sR0Mf2L;G_)p-X+ME zd?RR~9?>70a2IWNZzF^oEg>yG0Wt>dN-l{Nq|uX+2g8M<*J#B~d`@J5ONK&X?C9U6 zy+k@`nYkDcB`G-h=J=!*WpXOjESQN_!)#2~`DirkXF2Ydq>mcxZzw`t52KsPpBifu z@sv;e{hrrX#A$%uo~4u&6@ zm@p}Hd!*IqyB8^2oD`GIW+@tg9;;;<8M!g0ZS?5wH*}xvXK#0UoYaO}xFlkvR5eNU zCe1+71Hcpo(2i|^eiO;A=E8-ybS-HhQy%|9mo&RmW1_fe%zT?ou#0k~FK^yVt}z2w z?l=>bB$zl##wTuQ##NAz@koM4Eix|?p987GZ9>O_xX7G2T;m#AZTWV<$VziQs}F1W zib0w~tna9kLEkH|{p1ddMY<)MFxI$DMN8j_3yc64nMMp!GC9QYyt)GkU5E5<4`Tsd z*Yj;NcikK}C*W>M7+EWBPtkwVB#~a$e|+tEJp>`2^5W)J7gl0^RUD5k*TwOLCrAiU zUCTHEXyRLkjfjbmz`fL0^2OrIb}L#wudi!g|M6Xo(fG%icBZmcQ}q7t^}@Y&?YR(f zJ002|bRCK}GZz%!08NvQe88i`E10gP*f3{2t1e=cD2i{anz_!zhYO9W?!p6v>6D9t zSP|`RK8^8B_AEb9cY{y}iG|orUh;2Mw1y0?jN*@n4Zc)Tx{U~3G!Nq)1?7BeDZpbh=47) zTg|^-Ta2#`DOb~S%=jf|MFVv%rocUiL}??9r#oh9@oV#$X>QicNi^a(3oDeetLZkY z2*Y|Lxo?4e{-_kp#1f#wh{Wp5aU}Pjzr|Ss??wXai9f1BiWa2n6jsh{YSU zJq@@BLN__aEXj^ixS9pwt*EvLq%Qf_=9Axg&TcCW#54#@Z4P67V$952L)z+qEsw{d zhkg{2gtcN5CB>p3Jv>(=BBC?J**)?g&Yqk~E$(*__}(n-`IMTB-g}61J^QKltP^eW zaXq^1#ZbsNFk>cw7{xUn+NxnLt&~_`gr^B^IDf1~tfAcs%%jCDx=z93)*FpyvY3{F zo4oz~$75lt4FEAZM8Ek1?HURt?fQ0vWvQqit71*SmtwTvKQG)i0+ zxxoRa_zqcjNer3gRGKx$yPF_&hN9!E2CPlHZjRid^btK_Cn?c`@{^_-#2_d}qQPyn zytwP?{LA4msKN)(*P>p!kN8V=y8`e(hZjLKLFRp*s=m_=xuVA5`@ozC0_VXTlOppAh?hnY4u<%g7kMiW_4 zZCejkypuS6Sz`J#ZL6nbF2f{qcGw{sXC4y(@@@)q$X+`HABrXbDPxTYk6N$#_Gail zNM@5)MRRni71{vbPmti9{j;M~ zKWAlQ0AvTMAnBy^=|tS>dVN6*U=im-*DguM(ie+0Qpe`Cz@_P%hQH8tP8>87h{1D? z;D9}FFEdSMzu@Qh;*ARGQz|}&2?WSNFX7l;S=I-_kVgrzYnf@0GK}VW?B!CrhY40d z(o2cy1n-(-z|wsS+9OtlD$6vH;n47Hd|_$%q43((t0KlAh^kNB1FHraK(zXkg8#!lsl=nPtsY~+`r$lXuIA3B`BI5pDLId!Fi?7L6?w)_N@i}^>4zz ziM>s~BWEPti=sqnq+DwlbHNa`#e))qr)vbqvkS)x7DUg@vrciUuv4`1TLe}tU8c1a zgg4-{J+y2GUC4eS%PSk6SRym2^Uc#wUyeZ>;J4}>B-{>1ITW?7Iia<2c)-K)nFMX- zCq60uL4CQBPGWPgrwUH?_><(!p+91Q&&I=X@FnNi48C#>*kFZH*`6AF18h-D z8o_gbM(0A2B5;*O4Y6P=wYiRt9L;J`z*0drO}4v7oX@op=nDp?B%>vwQOW@V#LG`H zj!nnV``B{1s15>p1CO6`4SEH;G|iF~IswHe4znssdKBoku#QpYyrjM7Y-#6PX6nX9 zvL?C)u<^6pItvAtQqa}>Sy4cp4PjkN^jbbPyap*A$I+Pm9#DHwP$85F-CY#83ITle znX9p+OcuvlJbG-@O%UFLSss_JnF*6TA&6hhujtOV-$`0a!;m|%hrzt3xY$~F-T|KZ zPF`PJYsBkH73h=^-9EH)B<5)a1AbM~Q8TLZ@kL(}c-m*?ICzH_`o7@IOEvChC=trt&{ zWJ8X8Sj3L^BnWY?9>p-}v4L+l#;|1d3m}o5mO&I66PWkIeRdCOnoWIW#jydVpM~Xw zli$xcLMiPGt^^|zx_tqi?I*UT)*v<3^@iGmwx1^>6XUTN0DeVSxK>Z@J`f3eEmswA ztm!6PG?oLc=`VLqW-a0rM;aUVV*PauYNJ&%|478Zg>q3)UF>cWI`Muj!}+Z48KShg zA2QwILIk1*L3HspDbBy|f38oI+Z{?Gm~C&w{cs6tB#$*TrHBNC>LrcGBE~5&UORL& z8B%C^WqmEC#)A&O-}9-RZSG^kVy{V(o@}~vl|!I?Uw77v8zXPy$`-v^7?7qgy*@iz zt7~|;Z9dU6k9x_7p9iwANod6t7)bq-RP68$_9%u;uscVEmCWk6ivgzy`9!d%rf)z} z-(ZY78P_uajl>F23=B>3W)ak+4SAAGq#)yP$Ua&W)1Ic(e|&pIlcj|eT1$MheD=R6 z;B4hPKQCyNi{ktsR^ANpGi#3I2#L_@A*Mhbc~6-;IU<^MKTm-+5y+lJ_^$SHHn+c z(ugc5DnyxLD2SAj2&C_8%WDgj#aIx~2pxCkVOC{qG5)HrzW#Gv%TZhd2B(i!nH#Wa zY~xY@zQ}RC3((pJ!Gj=pM1p$FPeDT{>w3gKR)qMS*_+PdcH6^zDlI_?(V>dxeowhE zvCP|ToK44W^pott;h=l4W7&oTlFk37DK)H-@8*zK68a{X-CDJ-DSZ1+{z$9aq5IvD zo`Eg854t2KQNa+3bU_2V^?w!1gMqkX2Z^3l1=8T=d+C>*p|Ga%cDugciHPn-16<18 ze0KhY9)v&|pAu8y&~lBqn2Jsn{2GuUr~+2Wv}`=9t~WjT;n#l_R9!MOZb$-}zZX3@ z$>Qbk+^n)nL5T)(=B-XJjCBk)7ihL9*tJ%);Y(2n`XhQ+Mt8fITZLj&A)5!bf{7>S z7_3xdoEmoNo^DSOK?v_RV;}};G4DcjbcaX|VkIStK$h11(Ur?u>S@bRYaRcuE_VI_ z=nZfk-HCpt#c5izPfd{wj3XSCM5lsOTf|U9LeZp7?=&kPLqrd^e~n=sl@Gc!WebHjaCqp*)8`wC<*8YD}R^(W2<9 z&52QvUm^J@reiG|1N99QaWB>cRSsd44X$;=9mV=KsRbLCmo5QiOKYGn;>e2ceWLC{ z0;t776NCgSrw?5N(&9vO>aVYpzToCaZ81!tN&^D83wLWATF5u^VY zodai1JciP3+vYWnE6mjwoqY^%=~9mDa$VOs?17t(=z9SiV+|#_Ku;P}5}4JK;)n`( zwry~nH>JuRnWZKbXOhZr2Twa3Z0`1ukL2u`HtAC(H0QB|`GwmtOG8mu@LilUHgQ7} z!Cvps%GE^%Q4FuU-+%l)_cqjtG>O|z9l3z9lVCBlCVQX_M`-K6oPVUZY+2y}D){O% zp1x)ae_ULRVd`A7u=POf+JF4r@5V(``8*B66`8vU)Hq}k*$MDadJO>&e3tP9j)H?A zU{S8a8}sblXT6)x!iwu#oS1q*YZIHvX=Qn_!QO=I(U6)HfzS&Y(jc@#hQS!svnul}qtr5S)53OU?loyN+0a z`OhQlPao0`B)>>w8LrQE+fXN(AsP`13cMN5M(f`eqx^{@;n{hub^;^LV|aVyp2D$z z*8fF}b*FV2AL?z70Np3T!zm>$p1e5~%zz@rDUXysNs5@&8xT2Q4VMEHoZ%Xyu$w%_f)(|=FT z=|6t@mAkQ)G@c|Z66ySVgmVs0+!&?(Y67MLMZ_b76w^Dc%VO-b6YFu2eBpI$1QJ+t zY#nb+iB~)NtzLU^z1DD_B?eb7B}6S*z4rZ71p&}1Ie9Uc#ZI#60j2MtVg}1JNKrrm zj1iW4E-uMeR~DAnboKF|wd?+wSn6A^YWZJ%<`qr&(yLd#9jtKSmJMz{z?nD8ueSdSt0ZCwb4kO4C_4-+<4s@hz| zUET3zee$#Z$6;I?x7zd&!qI9`0!{k(Bpt*rP|lVEK~JlLx+VxL(p5^R4HHZE;%5`V zKxH@p2bbeJ=U?iZ^;8Z98UCaGyRocuxdWlB?IO5gJ%i^BO&i#8U-B&jbJAD01Z z4`uaa5@y@mu4avoxy3{$r~m=%Tid~y-olNsCDiU!nV{>JbM1JW>20P zx%g&4YOHXj#S(Pw^G{t8>zQ+s$~3N!voCRBX$B_Ob3OJ!_GsGq^F35KjBY<@$1)<2 zm(YSg$m%n;VJkcG^_+G57wt(xGz^Go~io=ezL zaNM~QdgtQ_C(==Nry$8IOErz|%C)ECop`bfN?%{1ed+b)h%*ykQ`xo{qys(Znc`Jy zUcLM36;E77E_M?M3lxJo@JaafPI5~LYZw}jwYoSyfWP?qM`+`3Yk!s$3~R8g3&>yF zB!9s-xPfr+@9eua+*q559n$r(^yY85sbwJH9J2eKuZAjXe&VTkITME21@CU#cJ0Br55Wy9n#*Vc=!>JLZmFk}hZ3=wn{ zH61Z=m~ai1V}cJMg6iwbL8Ibsf#1_B$TUAm&R~}3q?96vo6Ii-TFokgrN`Esu0$80 zz0T!D2Vbo;d~bF(lFto42*T5BB+;y1Cy=0WXwQ!&~5TfOy)rxKy;#NTO|##IQKX%vgByE#V%D z@0*9<`81DBB2TzK^YzvmHy43gwM?F_`Nm_!Cg*xhD?6hx;8?q@mk#pr@ly`nU@jqm z%rOw0IJxwVH@OJ$(|=5*Z1-3JC?D!T0N}us%-95MY6w{{ZyMIh>S9g9LRYYqpXi2c zK{G#Vp8Re91AcxQYxLSH-xaaf8!t;FK6&r!KR#L-OhZ1M& z7}Lj_o^fA%EFGTN!(RIAJI}`CO!jbt%~&s|a^-rE_yro#0n}+9C5f>`wjcKe1t|H0 zOiu_j1oWUCc3fkZKqZazNhyMTP4l-%(4O;;c{QQAH`AuHYd4O=Y0I(N+@-5_>PSX0 zA7q6nOPo8NybLZy^k;u&?q3ia&;ATL&Qgj(chNs-Ka{x}Q%_&)bT1?MPy8b& zU`@tQDn$Vcf@0zasmazNG({vSc?GvkR^vF-;(OSZ;`nB|_wO80>lk z^6ezE>VhS3@#G}S3?80TsFJO*l~B=eik*5XMb)}KYKi)fzu`s|;niRFf1^1g-hsRU zvDSF$UK7X8IvMduNzuMmUlFH(TfkGF=!%!!D(E@$BKapqhL!@Ss0CJnctFwI?4!ug zVf4(ZgG?|Xuu&?hxY^Ku{M2`2C4poUrHH;dq?))N;ze_l1XIGsSgdjDLGPIVWv*P^ zZTVq)F;Efj`ozdc=nNGCcAy3$bdC+3qWm?BQ=Yi?0Tr&Ns*sSD0mry%1#2aWU@X7i z|CNYc3C|kG)=sPtwq7sUAk>pY%4o2x=lAt*5HJbvQkqK^sj4?LB1eSVIOJowj$7My zz9eKUAzPf@hrNUnsaAX^92S*8JcJUSeIyk0pe|ZsX7WZJrWhEtUNz*cp_ss036>Kd z3i1Odf+t|}U__L%o}2P`;W)+hWep57U{HN5UAzyI`%G|O7l4KZpu|^j%5B`G#UTmLq?Ws9!!L#~}$SRndl29X~OS@4_bGaUy~Q8KusbrfR(Hjy)0 zKmvT;|7ARDalu+}22boBB(AL;W?4{cSg0nbgGT?M2H0-(9*ARCqXPk9@TRK|I|mV8 zMv7Zd&VX%Lom$Bbo+pGVrBk(%0GLg8m#hcXgliO+Zg#o7@6&!35@8zCqMegpa` z3{I6`3p6n6Q`$M!OYgYf8eMbEs}ftw6l)Wd#mo2&ZwN$6t|(~g5NFv?q9)#EGm_&H zlq__Jtz;41;us2^AmGI5rzJSW_m>2?C0>deXngm@I);IF%bM1NLwg^8vZnaW=k?%= z+zUMbDLj@yuZD3)O2nQ;JLkBtx~eggZtoCRR1VcIt#~p9!Z4{g=5?XgkJtj;U$da% zxSuMWP2#sX0NVosO0dCacIC>`*Yu3Zn_l{+UYm(|G&&ne-S9kpIsh_Rg)pb0kf zXXkJFlRr5B=<6Q_qr%!W987ewQZ|^E8W!NY5cb;u=erp^KuzsbLXX)3=SWf(pjFa0 zzK0PV3k)OfS=20EG~M!W6YOC*#z96D{H{mi7CRKC2&Se-4g91D8Y({5Qv){iKpgsw zl;&tkD2(mT860-;PAfCRu@SE!VBgz!JRmMA@SH)%Ra^ihr)HSrd=byijDO0Xm@HTd zeSq9~$I_PU8Lj-?M~}4H=4@lFa%m^1`oKXjmExNj~UafMn=_C2uPkwm*p2mif zp9-~+{cGaJ?=@#eI=(Cc?m;=xL>iK{%t%NI9!e42i_C8(CdaFG&|nsao*rdb1)`gQ z{4-pFv+kfLug1Gd5eZ(B=*9a`5HteQ20)lQWC>Y9LnpnPfG0>h&Qv$Aji8-QszHHP zo}x17#vI!CQpA>~lO}f!XBpkSGRaszV~()GSP1PjM;4Q<@K@q)4OadG=b;5 zG2%;4$3qQ2kZ>^zFVH>PxJXl3A#KIiAxPkwU#<@qQ6=&$-8$L;2y)N)LX z-wcxC%OM{yIO~2JOjhzu!#Ppw*}#eH3(dL#54%SJp%<^iUQOry%mOVJ7OHD8Q*8bX zo*Rc-XqNd1+5+nnuqL!5T0m$Eu{vgmYAiDpF!C^Cz2ojJ65aMRyETpvYT&|wBE{rb zs-ur~>cWpwsxev&=6(D_qU@umuUyk8&|2|xdl-cDq#!LoSQ>cfN(4X;=@9(5R)l6& ziC0$^HL&XJTu{?m2E}8nW=_)fS&<>^8R5nv%yce3?V6h<*BoYV^X3`th@nY7sH-dP z(YNA7sn2@d+#vA0hIp&x*#oo!G*DKCP@1(#s9qPN_iClEV5@bR2DttP&8oQB?J_fJ z{m!MxTRDmR^ZB;pvk?e$WmBZqv;z zHmd&JfBdb>@$c52p0jNS3&*zWnVdt3+_Q**B%zGN;ERRmHFl1^ds;`zC-{B)?)@pez2k0IgL_A_{c4y)dKH?FQ)_aDrXjQgbq2-t)7n41&08- zhBFHxV27hT`QZG+{+nq;9Xj4S1vlD`$XKZ_i0--`KTL`MfWQ0ASoNdsnK^JeJNPU` zAhQbqB)D2r>3Y&Crs!S!2?n=1_PC%lRP!{m^Fm=UY{`Jt;&;HiTBY7!N<78zqjsjI zU`{KynwTN1MYK2@9~99>X=IJ{ zq)o1t%pix;J^2s0Pawr0;AyaUx!BW&RlPHRMyyr6q`rCp^x7-0iFM&Vn249VY7A1e znc9O6wmJ>(lA8@Do3+-{GcYhY?7#{+@1Z*$%%|8rQUakfEGJNt@rWgl|HwHt1C#ocR}EAxzY1 z)l1+t)Rz9^yRTf;>JiBZZlorb?y(9&lz@mq9>Lt0wrQEj4I=97j&Rr?jmvmtD6o2C z@?z-QR&YB~6z@JQ(L_a* zFLjyanG)vAfC~W3CG|Q8LpPMzo6zFOH5*(SB&Wwb=AsV?07Z_YkV&OXiKkRlt7}0~ zs>4S60o2OR&)A{()s zk&%g!kQf`SU6dWnwCwq#H4=e+-}9XyOP92Y7Cvhqy9S}vBh5^Ca_{SnQPL{rgdNBOG~)uO@mxd5K^mXvZ@H)_hw)(ZVAHleSOcUHTSDt$JBo&q z@$lku{4`$0vxC%cpMTl^R6M|5jxz`TZzj=opI$`q3cRVfk$h)?25)+0{+SdS*BzEZ zQxf{cVMyrY$;5kYXD<6OC{#lo-AGCCgVur$Sv8B*9KInraP^@X#NkdBrj?EnXG~0t zYMu{vPYx0gV5XC*`r!9<0n4ge>c;(0fruztb|`fRc~LCMX))0_@$?4s4W`bbtVl7J z<3X0#LLg#*ABYF73d$-vtnQMc2VG)Atx~0_F+j0~3IT80K0xn@b)KVuelQUV#(^&< zPfSP$Ycic3`NfDy=M>G6sc!*KIk*TD{SDFe@++@u{KX(P_RLNUtHTP6DJFn^Mm(i> zlQme>#-E@6#Gm-hD`FE|hsh!`SXFOdPCH8-{R)g%$v61v0SY|t53%i772UWi~DgHYIfu*{J8(Q-pHrF zj)h~vt?njI6(d_N{Q0N+{Z4QT+8}$gU^~MMwgWJ0sSV=vZQt1({%FZ!BNheII&7uM z$q6l*!8I__Ei@Ap>>Z_I2=pyl(Fj*5&)!DQ1Wvq&7nd- zMc5ZJ@X?kUR=8*Z(~t_c<}$5q^1 z@40z`!>a8YYXd>9dr-H4zG-ll`fE-NeLHYO>eG{-o&Ti&Rj?K5e+4?+N$%l6Ol$Cz zuz0A^58`Z4o?Q3Cd~coqto-fCxrdt@Y5ZeTBeAk*k!&14f^SaU)Ue=ns>#Ha%d}WV zGY=xoSz?FCLLL7sLFbyzX8p%2%Zo2ZC6j^rO~ZEcz2c=nSx-jm=;Va>qK4A1x8abt zGbM9|ExPnvOD*9!qI!}YGB+C$F9n4MYn&>rvTWn*B1my31c>bZL zh@6g6cQ>aJklwvA%Rm7(ps)FH_kFjOTZ4QQ-Z69~hwqu)C@MVU&Z6A==WO8smX+jWSNXFnYFl^ zDn>}9q;**wl0E2K^$y95IVYh-QFs!Ya|X<5doPs(Ejj!Y?(Tm?RWSS zk6CV=GX7wzrB>9SZ$9j}dePeFGQF+1lkO)sAOV1kh-4ogyHYhZHL7iZss159zvIXA z=J2+`AsP^ER9f(Z!`kjF$9}N06~vtfp$IGzhQg69QWBZa2#rgUwtTzP!V+UWy8Ol) zo*X@}7C^&!EQ_->(V3!s7_31c)DI^mK5B1Q*QTgbq^qnfH?$2*Y!+KPQW+>^;NDk| z@NSZAjZ+(4#drL&PYp;CdF5fv=MAODH?52h~FF^fnP(H2{d z*;%wC^`Q3v#25M}#*dbXqpY~z^&(XEiy+c5B#GASXf?NU(*0;wbi(A zpd`5CqE?!6r~Q8E(KD_F4LVz1{H;gh!D}RiZ}i5L#>h92*WK>a6>?*i6-Nx4Co_5N z{$u}bM|j}C|Bcp@bitOT7oY?OSFFv~Ue<)B#;0^RuO{7c(LBH!Ovmu~9cQ!FAiN zrP)&nxyhY!7vI7+JrCwa}G1BCEK7m4G71syudO1c{(`0a4tOOoIXMXH7rSTe*G zIG&(PX(XEKaHFD|sTw8AY9KXt(t&HHG644EDB2(bmbvBfm1p#Voz{ujT{(%45~Xx@ zFyEe*`)D&ceAcOI8r46xtcO`rb}Ze-FoZFsQiTEkj2HQ{`$`~FdS>Bud#<<5TqfdlrAvSBA121qi3n% zLB`e2k;!Y(W_-en-!ClRt}HIh=+L9iu0>J@CbS-pHV(_g({MYY0lPlDiccu?Fq>oS zW^y3#P_m?ck`S$b5Rcl0Ixw%^M4=i=o@tmf&176qZ=bLOMuWeX>|o*@qB{u{?v4V{ zlC+}Ai@IiEEmy!~i-j^_=hOo~EQzp2xi$@_+w^h^I7+NYubvAAGv%LNl$1yo^2+oJ zwd$H^Tzsy^*oImY588{xvSsMceWD?83KSObd{5zOqghsmBVAOWy@t+a_2IN2Z-exlf-^D7=uLbWVPWX)hMm@Qy34IuUvU1W=dHjJeIL1 z=uvZ=_El$4tP{T1!^n|%v;X^;iWMg|sBeIO8w4Rrkut3cXB7i*@^^cb{t8XqoD>#$ z($yvI6cVSXV(+E_lQxV8JrS<5mUK>Q2gKPf9^S)tkFl~`h-j$pr}T0qML01U4^qt7 z!su+=^d=H3Vw5z_Idzz&B2j6JPrtAl7vpBnv5>_U$Rm%J(i%wPB_i!odzaFJu?cD> zic>*g7@_AJI{@sPbP7UkSy4m^uEijxS?PQMmMreFbQa%2JO@kB24;|tE=6_8s~wSi znB8L+L2ge#SWOsRYeQAaU1=Nz@tzaXpiHC@-EX@hgM$i9-yJE9NMoTyoK-AMRa^&V zSdO#cIww^&=*N)p^u(TLZB6OZsI1Ne?j(IA(HGU)NrPHW7*2H9zZaJ^5k`)p;*{BI zooX!&Kp4wG?BGp{G3Ztbv6yQw1~`LjC@Zn3{wF&<-*-5r0iehD*>o7N?h@dTc4lNp zp{`T+As&uFh`5cN0NAkT{8`08yCF`i{evM!YBL zfOhr?97SY-orI9ph#Gj-Op1wC1WA_l6mv4V3zIV&%_kB*aLjzn&qw@xrBP5Lb+Z{| zA8>X>VyOYtpxdwy0*#nQ>ULI*9s_=^ldbaMGa9F%qF5!@a*DqqNram>+^>nXOGYlL zSgYw#m^qXp)rAijH)?+xIPvkCvJ{v z(ukYQg3*X&A*GKKsc>}6mX2D?UJsaSZv>lpJ$Eho)u&>AlO-NrqeqV(J^hU4y8%5ELs*R5!!=GV32@$svo3e!~FBXv_FsRXY9 ze#7U`v0^J`ENe1LJJNXn{U7}CCEJQ)2TV~8e2?a==YuxEg#cV)8*e4v>vFs_bMV&6 zsMQyy*VjZagXy+YZBN__w3$mni-%_z6{Q03Q4+Y;K%W80E8ZXruAPz+sTf0KKIJkN zKt_>S@%rnNHyyHVC*!Et;nmmQh`q(>I#>YH0Y$}%M>xiEr*|ssr&Gy1alKS(;vvb7 z&24}*VyBA}SPN1if4Fm6ln-+lCSr0S`kKo+LyVzx_$C7s;Rk1H*ts%I3Jc;b;(>Oy zV8}_Du+IU;ne`pJlyYu|TPEV&(}Uqa<(tSmo1@ia@^ZY8YEO_}u~dD2@bMkDR1H{^*BBxv)mt1lf&0ka5w10HOMZ1 zH`*@wx(EV&+1iSp#E8qviy9QC_k2}C2EkOEsxCh!gLhVDUe?1p56;||M@wl>v9t&o z@6@CqQah)iUbJ{l9x{vJAtCN3e?PUyO~=;oR%z6hw60}b`L36tDD1XMklpEsiq?y= zNEE3E;X)%!IEY7w0Ht$tX*mRw#F)($rc4S8>0zk6Uo^-8D%a6d*n}QQ0oIk2Dce{H zhJjB6zBq}E2V+{7UMmWj^Dx$0yKJ)HR(!c__7!G$EKl*Nhl{yX3XE@YNsyxGZp($G z%3Murwz^n(S#sGjWmsp2J1th0XM#|0Hkpm65~2wu<`A}V^TSdNPT#W1n~@=plxQbO zXL%RhaM!WfE)WBUNv3V|uu4wczOHvtEi!1ijVhj9J0x$yWHD=!L6~;({eY+?Jwy`+ z{QX*t{RrdZxUKJSAvmYa5A#OJ5E+1xH2#Wphiq@r(Tm2Lu4v&+YoUXJjg$<%=9K_2 z1$sLs@?x~6Ctw){6SnRHyk?oiTE3o>Iz>s6cfLNYsl4zc3x-o9Ah4@|fgiWob3WYfIEh4s{BE=A2_Y zGdB0M`VCsl$7s!^VD@rzU+c}`0NxZ`0Hbs>dE#gzLl&p~7sCM2l9tbUs}@+P#2A&! z-*_Wvg-I1p;nI*!{UA& zVn_pMvqBm0OPf5X7ASh~y2hs(Z_j+sA}gLp+flA*{mIiY&Qifi1RWU}Pr!0E3wBRj zx`?Kju=KRzB?ufWU$w3)Z7@~wt43=B!3kycg8pacf9L;x{L-Q>T^%Ifnw--63K0A> z9W0z2s0*%RQCQfUN|yX0a$Q_zRLOaY)fybPVd?<>bwVNiPCKzGoKA?5-7{Q2iHi)> zfXq1TWl;{0gRSLu38C}DB*PBK@d$0zdI&lw|A`e}d^=bVP){1pZ0?pu8Z=BzG)AK7 z)^0F71)0ezKIsI~&j9|G4G}|U+ynqD&n_R41})Mu5PWuT1!fK&D)A6(({fFIeB5IU zc(ttA%11$9@*u(VpZYRm1bSq+d72CaBV(bOSQEd69r)>l+_19yhYm!VZ91ZU8pAMZ z?N|_``A`gGdSJnqMNb$Q#Im><(bIIZj45Wvzt8jy$7=(tvLS^HBN5 z%{3O~3w}Npv(!@|Eaadje&yOL+GOy$HGgj zLg~Snho(x$z0@-Ij9hDXfQQHxhOvoJQHFq@ozB^T1B-N%m`4RcGd5d7_mCJqD1oM} zv1})=l@~?vhyj6t%`OO3T?}JiQD9rxX}j6Bsao<&qw;ariy=f^T zv8ZUbkU}emKKzkbVPUnVH>2uUS7kdy^zC%dWtJUH8LLTO$)U81a$njnH9$)gPSwNN z(F0Spa~!gkLZ8+T#uQ|YwYS-3svl*>kCy0`w`RD1Ei;@zt^qJTT&l5C8ns$bjPetN&^j&c$VL+Q> zPO+swrp-iGfQJChl1_dSeZ?mjx15O@GF!ov>0){43Zl8FJTczoxm>xFz`YlKH>D&y zdTOhU7y_oZ)y~jU>7ztXuOK9L{sAt7B(Q)QP)PIG)o;#@&bHPY*M6x`LFi2*!`awP zO^Jx;;rmKQ147aoH)QK`kJCc8<9586(4#A>3(G@w6zR9>UI!fZj(9FjZ+@$VfmQ(* zZBjZh`;abQ@ropt@YO#QV7l2T3rX3{H+?vW36+`&T=V>{!w+fVnQJ5M)Xmm@1@avVQF&59{Jd2b7%i)T{$VF4o8v zBKb7LyMZ&3C69j7d3d&y9Lq-P8G>TL}0 zrk{+JnMMo>dPW-!LP^C+4ponmDQ&{!#YGY;xmrzzPjM-6SY)pS41^~>77pw7xaEYj zo~Qkv>03Y)<^b?hp5DT!r_My=*5u^jS@d;paRy{F1!@fQJ8)FRNE_?7MbN47lgBwvv^<-QEFBmK_ghKBjw$X|U?SR|UPhPk!|E zk7BWAP+3Zy;JAVc+NOyd< z)byNLR-rt~eS;{!?rXFWfZdDTO6-Oe&q?nb0+#M?2a6UKCn8H5;=lAndMm!gM#6_4 z6Q?3V^vy=){4GC@1eh7>pE^1@6*S}+0gIQrn~jfodso9u7pr{u?|AF&)?sLH#eF)| zRQ^ZjZ^!8Ytm(_If2bJ%b~UJGngMMka~NsO3Lv_)N4$X-33&s&=v2+%~R_6QJUCjX43bfI~FlPQam%PE|(gMU$s; z1@cmDAnl-q)fCdf<4O0Fq&BWCHe$~#b#|=@ko&D)98^yWPQ1r}e-cxB0p5slmU6^a z#*N_y&C#og0^>7^3km(ukq)1mKZyw1V)W)6A+SX9I76qkBK?1B@G&&@ht;gH(Jf=I)lB`v7 zYb)Goh(POshl*i1k@$VFV|l(+GmGWN_+h=@iRbO3ju7h(VwSv7o)r6#>>Mk2gW!#; z3_*n!>Gp%A3b~?s)CQX6tFf{^H!tBeDD!xd3Z2xpH~~a{`}_P|&5urE3`L(h2WPBT>~AnYgBm)zfGFL~6CDe|_bL z?mrkl0RQzr1%2zdj$>n5%c9su1l25W?$`W$g;U|}VDv%0R1|Kz7lBPq%6m}hJYBn=~-WEt|PJ+IXygK=layxxMs%b zfn&^Fg=06y^ejU2q227!@lbqT0OX&<1MW~V$b(p9J$>H)B>1V;Gk=JlSrD{wXJsFE zP;gNLOVDBvBJLp$CDGFW#O66JOBkDw6BP#$u&MZ!l|*J^wziUfL7hV!FeogjqMF9$ z@!3(XEp|JQ3D6Z`n2d7{9P z*StZ|2E#&##j+L=DtJdVWDcdbBV7=Z49ZxO!=x+tU333Pe0p$SPoekm&tp-R!Hb2d zjY9c?256-rD8*jHe$2LWyph&{!^d@V|G)8Hk^-^P8?A@2J+T@i zwHwYrTKQ=|sQCU?umxPMmckO%SMI(NJV!m0-cX++tFs8R>*DF~sR5|_ZERk=zGWa~ zR|KCn@I?|K1OY8>fBu@dok&n}%5)E{pI|D$WmQQHX-NiY%>2&6@~+O~f6y%bmn8V$ zS$a7t(5{!{;CL9?|HvX&1PA#y4z}pAJQvkMW9CnNJ7{EHJNoqv*@j2N7QX(GlfAm+ zF{3V+Mo;}cJ##S(7A6@R+r6P$BP1wT_Lw_0Fb1NdkmX2m8jA_g8@@2zLGvaxY-L#^ zzkKQ1b0Nf=Te#9JP!QZ$Mc{Z!F4`-_R6zIpv;V693!eR3J-bvQ(6iy)+o8;t{PXO@ z8XdhJZw8T14tc66eSDS+6EvyJrgu=1CX!HqlDSTfH|2-tzthl5;g+qQMrsm1>md!y z#CSb!?ScG{UlC{L>|&`k9Wxv>9iS;ClPY<|=12hBOFA_Y9cDjCE22RB(&*&|gwn$u<-o4Ou%-!Gsio07h!CzcK*e86~zbjE{nLx@;O1QEv$91M3| zU_Lzj8C;|b1`$i=FI*0t6jeAOqL*~^;&eOGFc#Zh$K# zCs>@0rz}Qwo5Lj$rU2g~Mrd^f%vg(Q;d|S$M5v@|T)TP|_@5r!M7d8qi=ma2v2Bbq z{7MN$MuY;^)y?-ndAMZ=xk~z8kPqJRHKWf>(_@!t8+FeaMsPI47-zu6oo)V}0Irh> z4q^)LO>o@NAqp4q_-0xSGcS=BqVH^_n!n93%Z0nETzlDkeC1WZzc92OA|PC{KAr#=vxt6!H`jicsGV; z2Gvz`-c`L*K-GFS$Ad1Kv}$tHsw!HuJ`?yo-9=}N8z@(kB~t(K%WkGlH{Ag4KfXRW z;V@-$h$USC08S|G=(ujS+C;Rw9Syk@g=aWQOvg%Y4}`S^(IcPbspo>R z7IAMzj1KX5NbS5(<#2gD>>%AsKX^^v6wBSDt^xNjy+xEH%9qP1fVslWy6J&MdbYUn z+S%F=B{&GY5K3caQ4h0Rma!miaTjPC6n8edmuGA~{m3OH>PGu0PGnS4CYpqyg>n{x z3>73mlwae(8N!|1IV)A2Vi*QX-_@>OyAy1UV#xAGbdWxL8pQKY4FHJztqCB6ZMlw8 z2>s<=g5BZ{wB7_UOk^Mf6~jDeyAFDxsC^h`$wktFIZyRE)DAQoJiem2dDyk6mC{O7 zgSA86TGHb})k-X4f+n>`u{LMlwJ5GNllw^Q(oZHRH%&G|0>2KLsl?YWba2_ld^Zq` z`OKoWFmfV&Z$2P*){#9l7h4=3RhORx!iC(OkopDpoz#aK{%0-e%F7y(K@Ay#s60W&vsW zdr^{i)yn;8p)3u`1qVQZkx|_-F_F5{un?3rigRy!INSz`7ssxi$#z+p5!>Jt&sbu- z+okJ}zNUV&E0gA|YB|FC_O$5OJUnae_C#dB^6iZcCkDfZ*EV;<^^BVd#(h}KEIk4d z{QLep+IqOl2E3h)CJwy!{NS>JR#<9!B_;)Mk(@C=Rb%iZT>=pSX#sYhi-{2B7!C(7 z4kt{bwd?tmi$(u;*Hw)i-q6HQ7H?OBGpa?pXkJ^b4frIjyEVE52a?$QsVlk|$`R(t zyPmf51%Biq6GX$u=KJO6Eq;C-OS;kWYI6j09MA-y;LE<30tHl-6cr8#1BAuvXF!c; z4G!21(A3y?j|eZ2&8mqGV8PA+)GigiSjeL9lg!P&-m0t zkO8>`6R~{){y8yzBSt;TsS)o*X*1VCGHHmF|C*oFqKC@zsTmN_M)+}nfDt{LW)5h) zsM=Vo)Yn9Xk&&@+4Z`%? z>yr1t=TNKYED^t;Wr)R)MTusQec(+u$wrRm$L!oBL~eD`n%drM#pblw)&RmE%KS82 zO|)od{cy9FszCxLD-+X|BUuB&mu>xBHM9L5`!e=Sso0s0quwFarS?yD{kQ_!mFqO% z^6j+Knrya@k2xUP&Oaj)9*wlx2;Gyj{nU=f3L79U2%iz>5a9u?{anMduvqdSq*+2cnSV&AoaBgBq%Z}?g=PVq!Qb`|`TyUqt<|e3mmZG2LB!VbR zRHyj2Q$Ma5(oAqaZn|b}CsXm#xRVBNX)0f)sDNl{y|;(Z$#({@fAQ{m)lne`<$YIT zIE3DZ$z9xBfK6Ew&6$JjmtX&xnCb;W@cITOrD(CdLFv;&t8DDXc8+!|~d|@YY9j2!p+hH6zr9FN_t^>nL>&cqlHg z=w_*btM^V$UDrN2e5kw7J#Z4mF-0E9yLJ}Fu$FKphn9ohiJUn|3{?<#xF#|(d9HK# z5bve?9-1WnQ9ML1F5)QaXahoq-Q9hS&T9MBtXGRy@OP zAQAz#?%Uf$N)AbM7WY72zMOO`Jc^#H-8$$6(~VBJPCYd;8uY@<#3?!j17e+~HPt zZ%DJ9r;HG%X6vEGV01}ZjVFv5|nMW_MF>9%G9=HUG0u%s~IICmAjKny7=8$Xdr=t7F*s)3N zyUI*TWzo>OK*-{54qbe%VgwFnU>;(!=P%#YO}m|=w4b(HCxHP)J``E*Vt%o9ySAuj z90r`nl+{kxO1~WVX7!Q=K@h8EorDC|WdU|M6JQdUCs9)<%GY1@Kh&D@mad~Jth#mh zvIjQiK&KCnn1QbFx(5mB%N5sPS+0wGR!yk7_EKCr-`nwbSxi*lR89W4TMCUlAUNG!oUR*#HFQ$gbxvzQFWTMk8xk0*O=VaBP5j?wX;BT6LI2@jkP z%zWS=v5@KS;&nJvv4otLK*PNgIV%Gf9fz@oivXc9N-HTT0C8YHZ(|pdJ6H{V1XBjf z5|xT+S%}mMTQ|1GLpDn!f+Kj)#VeisLvmFmc7V(ZnC0-LyU5R#$WmguDOEInwsol_tk#T1O> zD1+7HTW`ukiwkoa_!*_+RY!)?_el5em1f%kv*hyDvrVN1EnWR=;54DTlXBg^|5rVetFG#f2AXu5P@F0S0JY4Ekf zts}q#t}CM6ehNbM-Ttr6e;ET<0qP^xTb)iBkSHhMf|75vODh~Z?pp%X^x4snvfcMa9@IdPOG9uA~??_@5ZLO%8zTv!Jb}?#16|VT`yFjJV zxU>r0<5QQmlo~h3B<{>~WNlhn3wmGTvC8od(3x>9SV8_GZDavaQ6a4pVBX=~kx~^hEmxjwk5=c+d z-n1l9^W#XpqV^Yg{w2X^Gkc3N>seA_n3&;JD0%?1I4nRvQ$SjYY;O=#?3O>8#Xm>Fx?8CYVj zu2Zt>yf#(6vC@d~FLb-)_Um<5=rs}FSzVKTU!o`P_utO}nfuLj&eE%ltgfMBZ=QV> zyQ0)x552yWi%il0kzz4i&wj73uX++2I(gUCiPALo9jyi`H6a6yzEOK|UH@jS(+)oF zFqIe#Cx8{tiJ4+M5SooOT8PZG=bwH4SzUNhspR9>j~BT;oX_G`j5e}n4Z#qPYb-T_ zby@OO>RWBiSc(5&WTkUqnns3r)Wp%vBTpn@(b2fBYMYKlC4q0VzjlzN&QiQ|ye>E2_+6HE1H1bo=#>qBSAD z(U18V>P#x*-~|^C9Ct~#JV3GkNi5|m0EOi9;(Uty7i=mjI3y_V7Bve|_hMtfYQ^5v z06E#u|PN3}P)7L8-E1%ECyxpV7=z#8Y1TR@^bO;IL$L ztlgyRjE|&#A%~W%5T`a3BZ3sS(FvSrYxT-ow7ZXP}w%_FXR- zyOB{{)bLEZga#k@f<<2(Td`RE#}i{CvCimQy9VXa%37z*{+luIC3asnR6#QtU+8kv z4cbXlpe!5Vtavf^#-)Y|(*Ey1e&dhzZv?X|Cs)4p{M}IbaC)HFP4Mz5+~(AO z^8Wc3@gj2XX*AVxb+2)f#9_reh-u4?-x!ZOt&*0V`*HkS#AwoC-JsNE()F$IrKMYG z)|R@SX?mwkb+HGUWu1`EkO1s*@TQZk%At*OQ>$l1pCrO?u9Uo}1aq%f+;l@ob&rBN zR;SIM(tmvQ^_XL$w{Z{c!z4P8_YJ zw!dC$UG`EzoaRGzO*{4k;Xa&&N#U8H(B3}bxs=*5HZiWr+L(_WU@|^67Nn}mpUn$m zI0)Y%AP8b0va9gg9J`YhS3K(Eelp8|nj8*rV(z%|omgw~pLta@5{STZ>{-$#rS2DRe3)$RV9*24zC^ zpu^U#81?zT_ecCR%{Vo}18RD>`eF`p)137sNhCc_DzJuA?F2!?h($f!+>MJzC#FL1 zQ7RX7t@yB1ZO^qgZ%$3dn$*ZUJp?SB^ie`sC=5Y5vVU^4)0~>c#Kt8e)l zb4EE+JV+0iY@#&bFzc-nv?Qa7TFGJgSZ-acD3w)ECsY0(;?WR^Nslw(Q~G&p1Sb3w z7MB9hoKns}IC1t9OddaQ4hk==`mAu`wNqgz4E`;%QqzOx)>cC-tsIkch71g=mxGro zoaHg1YN>TW?r=;H-ONgmNwS=ElegVcHV?6>QoaJNcgtl?CK%`dwzSp-buXnvQQc@S z(VoFso7Ye!QS1F)w#{+#G$}|sxc*&nTh6-#`zD`(961(%(;#tnNB2$ohqT7Sr%HZonN5~EWP#!3|5mc--lFmHK#wVPe7zAyS0x4Sf`C4TrOOi@; zjk2a9Ke6JRDa*rRAWG5>p@f;;PRHCfN2=Q|TN~+oI-oi_XMwMBfszJ$m4v<~=aEK@ z3^q+JoGsQC;kX&<93FJ!9ZE8rJeCc6#ZEQnyYF23b};C!IV!~ja2tufI- zKU(C2s|Opn)f|-qjfSf0+3Lhdmk>sg*Q-kC@o>fnnQUK+DJtu0UU{qCI(ca8r*RfP z3%*RP9)2)9ra`$A(vXpmE%$eAQo$w~6q-ZPIv9!ymfHlZS7LFw?I#bZu8sraP%@Mj zu2)?+IW;D+@KO@aSc{v)?uS(B2IwHm$%ru$Yl0zJeJzwZKMBWyEYb7ls|n-x>#n1X)=F2qWNaQ10&dH+{ zYZtt$2jz>2kgFcga?H_#Gi$Q6mJ(g*KcscLH6x-?2l(CC@1v79b@g=5Em(6g=2-md z1#C~3QWa4w35p3MogQfURPL7JbaGYi`pESO-F+{-l?G~yluld+%`>yDHR?IH#Yu2? z#4-@jXeUJ4iWdT1Xsmk8_r^sfD;)*^Yc^1Ut%6hT9z<8{=2nzHx`S> z4+CCk3QK2>$v|C(<#&_z1IBDy3)nbm;YRZP2{`0~zGqrLICE=t{m{kM$b|cIwwqqh zwz0OdD*1?5*fZ#J;tSw)Ie1RJOOEtdBkTbOh3~fR#}{1Y>t*eyHEwG-g#N9Cc(Lj$ z49+X{udp(%<~M_=y0!nfG=b3$#)|@d45}D!U09x72^lx=G{)pWkaUi^&WxBXYzcim zO<7gEO<@M`I)b5kcJuTQ>7!2=^f>n7)2+kKgQs0MjQl_T3Favx(eRa7ZSEcJTSZ;B zLCy7r$aSW_4-hM;!kUFUmlKU(7$PCJVr(}HIH1kN%idt@`*4XcO+h%a6R$A~&em;(oQkNPOWPC2YL!Ej6EZ3LyrPT#6 z^HX%#KBjPhKA@uZlDxoN$LXb+tv0HnEp#e2S1zV;y9LNU!j_bJ$ayqdIB%1VW-)Ib zK?s<3q2O-lRF-K^%8>v251x~Rbq^l-x!N>AgUH@kww zj$u2-ep>{s8(7p2IgGuwW;JQ;mh61SXHDOYheJ3n9#pboMdt>7U$?J_yd>rHgd#*q zRWuMewm~8t6`$C-mJk{2NEZvbow)oUv_rOVY-q#S`!W&`3#wVY#&!~jXi7WDg0!4A zFPS=i^Ljk8j!)I)F#+%0(4#JhmUI5r3H%vKSOSiy#^?R_^l9Le`5#@}u{mr{D-bUJ zyY$T6m@##?k(enH^<>x7Rd_0vnUL2|6#>rhmBksom9XEh`fsJH$s0lVd#nVVjr=;j ze0ARakx$+`|D6_+aL8&k$XS#?XDgJTqK+VpsUjFxRf}wsnzM@gC>~dd9)CwCo<)z= z3$yzpDy2G9+*ZnSXkO&4W4}wzF3#yaL>j%V%33d&*f{@!P}&bT<1G{WFZ=JAUq~cC zAnRF>;3NH-Dq8wyWwqfr*ZDPp0Tm9I5bQ->Yi)^OXcz)wACBhbRBJJKFGWhpe!zl4t?#1X>%a5cw8{H`Q|A) z8v6{53b&h{3XYh59xob;HoPS}dwfxJiQ`WJhy#x*sEU0KAR8@Z@hPTJX31pU1e!M)hYFVf?^WO0oVeu zv!(s^tt;^?c2;ybDX39b4!tpM&>Zg$p2{QFg>ikda%m`2sghO|OEpsPtal^GIbDln zZ0^P`s2p~Q=EO7@mQo+a>u~-E$)(n2hEEH;1Uj3t~?(SiGHXgKMZ<}h}aKOsUccb60{>XJtkT~ zX~lzTD!+z_UvoPk1r)sMXgIL@`>^VJ`V5syt*=;4azO$diJA(tTK2IXO&AWJpl*jA zZ2pXgv1mJ`8vT619WvBZYaWjWR;88!5$jb}kTdLs9(<-HHp|2DRsfjI=<{2zPmZL9 zKrPa^IDUd-kJxg!HZ@W!8%IpXRUB+0_tIKj0AT)c>0+C`=99F#V>F0K?tJPNmRjJk z?nDhNe3tdDb)DuIXSz;6YGGq4bGfOy4ms7c~k zb&TIW$$J_H!vegnt0)uiwEzr=o>CtTty#QsXux{&3=Qb4N5g)~0hl}32&;V?>Z;pu=BFolugOj~zX&a)Lr8T50kpN!98XfBQ7dq72 zVi7jfS=zT|wS~t3vy$K`K7BpBft3^wpM~1#q`Jelb!1!P$UUSWXxu-I@s6p05~39= zudtdWZq{~olLQ0qoT>^WXoW@reM#eRzHQ!1-X!MxR?rmH*CJP;qgep4oS%jn)C{S{ z<)vc~VUd4OEBzN;SemZTJBV3qk$GmF;v^sLeq->Tp4-@MyK|$lT63_`b8jU4B(59M zJHT4QAL6u1Jvpzoh-~iV@V1FE3&@F^ERl|V!U2=`<8}L^1-A%$3LVd!A^M0ti=LPW z*c=Zv4V0JOF&$Q7q9;?i<@LT_dz+iX-f&y8#JLr?$*l%ECOAZgdZ2QMf$X&@ zg+Q=Q92!0W=(R>HYJJe3QrFhOP`8^RhUpb3cviGlw_9J*|cam-HMx_9poL^}4EM*UK z(ZZ>E0g84~qkCX|<6^@#fRi~%=PDAU1HM;pMVD`-g=AxT|(!4u^sHhMhS!)F!tQsNPV{asGsTdTyMrwK1qfA$rQ7kKTU?T zj1%`{U3uy|dLi&lJ0~>Ad(91fL+->G;m+V4Y!bs6#rI3-sqR7BZh60#>TZYppx6Sq z@%B)i9D!?2S`SL0wtXnNn8m`+PlJH4ghLq6n5;0FE@pYi(9iXiyL%j>F}%7}iW1+^ z1LKo8)&L(N!|)8HeO&Aw(Sai;9)!WR=>Q)by%d0rK`hnQ=2zS&ftv^vxQI^lI34)C zw$$F*otWWsSFip%eIkNJy}Ed|&HgBP0hc@8TCfCmeyiE_ANL+==r_l&#}i5;W#7#f z#J{iWy89{Dpk7;yL66al&cQ8J4gGTzl_;r49?k*#3&z#*q$q8OyJ z{S-g&J{vZdPhM(z(m4l)D?Hb)kBr2-vB1%YlZ%_4MkxCz9l?&xlVnE>@{mnPCaW2< z+%V9`&3wX9W{*}uX4CrlIyN;GtZ*Nh_5N)+y8gRV&ku_U7x8aO^+8<4u?^R-97)=Y zUl0c-r!hgv4Pk8Vx=q)7*P0|91t9iEq@Dke3>^MHb}0=9Ig$es{*71ez9OEe)a#X( z}`mVk)L~Q@7APb48=G{vBo88$ARf_VU)3KX?_Mw!d zRmKO5%+kY43gk!ngig!)>eCYZzMD4vO!RD_f+hoQdVin0|32P`!iPc@+iXP}M3dv3 zaO()g6qwOjoaOKK-~IZ}W1}oB*DDQ)nxlj0n`YgegT;&M3Ug_gT~^8t3``l&BiH{g zfVswgK^y}*f;|-h;71%q@iozU2B z1~l8d7n5umKqALR*XC>Wn(n*ylxwv8$Lnj=aMoMw#QF`ji)$<;uppSN%3TP;8>8`R ze9rn{K33-gVvR#O66*B{9-QoDd7-!ETEkQZL~faI_1B#px|z!FKe4S7!U?eVn=$nR z+yTUXG$7;6l=Rsks_8!D-4tFl!@*1z-B%jMi-$T7unavSho(>(gy~yc3Dj@XCp(PK zKNP8FYju4=$MS^WnKtW|dM;v#4nj`v#uKwTr|n~gA%1B+aD03+_-=s^ZCJ0nTg}Ah z$XIKSF76>pi74LM&b`iO7iKFlI?9cD({N6jpK{`S#{d617VMSR<3-$g~9k3|Z?(_YOChfEHQfw^mu@}MgT6D0(dA#wJ5 zIoooWfr;ZONqrz-vE4!@AA7W@vbZ)c`jg^vS>wTChHe>M>C3v75Ao**PyzJUKS15hCZhlz=g57aWZ4ZE>XJhA=%Hi-KPww__^X#V=IC zP1-C5!C@9oYtGEi`oGW+5kou4&vNl*O^v3yXp8!^uyBAAR(JO7Fwv|HE-02DyEEq8 z1g(-5(lpIvYaX&q;OdyD!S(PO`iXCzrCnZ{4mEQi>Q(LSB<^Ns>RNa6p_Ts18ea>k z8QZxD)_c&HB5oiv&>Tuqw*KUsnkV&wR+qIpL<@+X*^2u@VxDXzg`nG^azO%i7eEq` zbf?=r98W$!!*bNB3zfxC2AZ4bI4rj@l_+mVWydW`LcYK0f25JV;19F*;B7;z#;kz! z@-*MR%_HD$)^ammH^WVcno@8@Q#LGOGPp)3N8^czWV>3Iq8%(W#A=qiKsY-EX=)EZ zXBx~R+Z<-xo4d&?3b3PL4EQu$$?J_7(VT;skO-B@bF&*VzF?)YO*A&!BKU2>mr>of zP$$u+V8^QMd@%**Qc@!tx$VG{ANM~B8kNH2=s5_QM3&Z$TNd~uI~jg5lQRu19R+At z7+A0n=zl|>Cy(9DsWFj}^7j8XbMO8f_j#Rp{t-L7wLi^H6geVgi3SOP0Oi<8qtUpC zxU|p!DOs|d2G9VSxHQy_CI~8YO{7WHBvqM8wzg_&_b1vCB}=j;JB}`vEU5}?{a^bz zZ{wWjv^|rVWG0(cBAFn%zu)(A-t(TzbDmS^r+l3A?QUE&O_HV|O62x)7K6Ak#GtNILo_kSune}2qtQGifZiki?!_IDNfaIii7pO z%f}FJ!t;Ecd0ofi)c%1|mC7{`3a`0o!|{)^{p1b~0+VpMr7z-)TaAMRje?2om_3CA zp&=~u#5BUJx%8(`_({H>Mq(8wWY_gTf?z5$#MFA{{FnMAA)b+-2#63LH5!Kc8j+D) zw(3k5i~crkVx)a(JkQDsCp>n2O7A$rk9#DkAwja>YIwd<)#vt@TskGSwTk4Xf6PfX z8`fo^-P@1VW~5{!^x-OTl7ccTJ9^YxD=3s?XCZ!$V`||OETK#J93m@<5 zVsc@ucCUZvf`e>#=|_XZp~5I2JV6LRoZF~fkivNM2kgbqeL{5E4$sMonRHjJrJ&Ak zKUN6WAX{n`a}_ig`j8cDu!>y`gC!ML?_BWW`uj;lgsxyNS=cK?NThSQ$*kg^W3A}m zPY1v7=NtmP(7MBVj0(q!aKYi8-itT|$v0B;^flA_8(hxq4E$c<+ zQgMBJ%->R3oNf4Lh~leT!s2>$L8l)1gcLL|BrFltv;Hd7g%wrk(|ZJfvElq33nX&= z&+FI(df)iOb!oTZ#c1^>s>w2D7C0O?JKJ#s8SFEs)n4mUx+)#8Znt*$9aR0{&E%9u z16`d%3EaqrNcQn9Z!9G+#SCy#jds72I$dEBZd;yIv!+p)bxx6q+9`Yp_~-{g`&NPD zh}Yv#WhZ~RlX`f9i?GzK`CNMT0*Eh^iN@01OK#sRZss=EWnj600MmTB*gv+K5*+i5 zt>c^?RGFRi*I0*v(xX|QuaJthk?Ql~Q>jGN;spe2PEwTjqE;dq)41o`cT$Ngp~z3{ zU%h47vQU6E^cGOsQVo>F0VU0-kS2LgRqI;jJTc)83-4p!E?@r6rDy#h(JlC=gAaYP z;R27UNK?V8>wBQ`XL9KN?bVI-`o_9{+nOZ$h)cPQ|3Feu;dO?t1UKCfNh_7$MapX! z5tb?w7L!J~^yDS2(sl<+5a-^v3xkxhZDQk+I0LzWV;KcJQOs`{dO!R<)nOqUDm#z({KO1X_ znnIwnKtAcBTc9>U&oj>uNE79#zDO2!?wr~BYA+_PPWg@v9{%z7{TI#qBXq0vUaKEh zACX*Z0z??zciuV#PKMHKbas+52Ai;l8Jm`1hBS2SVzbxWPsybh2#$r0-($KyxC4%v zuY=j{x2WmYR#zZ6p8Sz*pg$c$N>Q6JV)ZuZESl0r7|HAB=l@}RYY z?gwaA5HH%k%2MOsdQRkT2LDJRO7pkAXed^nEz$fE9`WH`e()f;ONis4N?dII>^Yq= zPN1=dki7OuueA&&PR-we&hiwc)X2QhQ zDc3IXDSskXrV$bZ#QKQ`(4^`>G-&R;&$SSDPpRsBFD4ACYA0^$A*)Udmc`@g8SrZ1 zgpJ<(e$6*6v-LTCE%1@Ql;XzYNSozr(6B z{s$KUFer(p$8zCD4VZ%BWI*zdOQu`=%Y>!FX0Bs;#4mVbT)Y;v^YuP8pPp+}ON^6z z+%!C_uS$o!nH-E^f>rst$*GAnm3>oJv3b?irIBpj;)}ijlvwzX7i+b2{EBPUi{jAf zvDBDyr#*B6{hJktJS1wCA|3V?kTgOcy%lWhM#3~=*GCoY;txXlua5*o5eJMnKTXz% zDh7aqU`9x$VuypsVel&j*BvLk#nqZ5)f!ZHM-Xi^G(jcnOFiIu-C>|f z5s#Q9aA3sR}Az@4RTgZdhkuzRlwlGcJm| zpmNZ)J_M%HP1%&~(mpH2Iv{bDrNiA$EJ^WUY}~W^IvF@XrOrp*UVA@^Y0E{uvF!7^ z$;?~HrEglL_b?dmA9?)lH*XF;@Sn^C&lN12Myg33IKvZarB8H6cU2uyw~LTK3h5BV zG733aN}K$cp&e9X!cF7Hd)*{RwO6kU-t^zFEj=ReaWH8_?!(pWP21+j4m+t!2@E7q>Zuwu&WVmnLy7BtcR!S>6AC-X+UHZwU<^%^McsfqR z{7UgS%EKRI6;(NIHE}H=1i8=fI3yZY?r~QK*2Ej&h3rr?NHclUb$^ zfrssGWw^<>IBK0*Y=UFpgCwy3D97%(t47F=DhIoC?TTi*7DQ?u>c|v2qQ#kw_0_ew z2uiod1m2s&_&F2srHXKKbg@o2C|e=d<{D4?sy8Vda6QejL79@mNcyZ~hfGnsNR$c& zFdjZFf|0;Tv^}*_4_HDz()P&j(Ka^t0eFoB{+_X5ygzuW3s*0WmdgN!#$qH2jjnIk zPC!nb@AZ3|JU5kqlu~G_3GC|d=i+Bz7f1-{Pi2mZkAI8e#H~G7~OMTe@SG+QFQ`eZW;34yf4+&Co7HeZC_4V~q%ZtH&9%iz=M$@G@D(3yzw* zDXr8$0uf8N*^gJZ1$B`)OxhSY5jE#f+rDTuv51cOu2$1r0fJ#R74076nW-5$M*Kpm zr8*=I73LDL&R_UIsHz2k9&s{rHo?MUru)WHz4oaDG_@+0_k>Pqh0lCrGBMi~GwQE> zd+|4Oi3~Y+D89mcUM7&(4u1t))e)z`hiML7isde1(Gp);UGXgjsq7!ElB^{K;&O{B z<}&oz3?6>#Tl!7yu$>GgNFoqoTht`DRb+q+s2!+y*Z8$+|?0Fy%;K-|Jq{5yYTkvsI4K`1Rhte`B7B;wv` zaEwNfr}Nh4Dee4IV`F=QxwBS_)RRFo*OgFP%^vq|}fhzo9|OAoRfm!=`KT=x)m6_Qj|Boqo#j1{5x z(|$SEDa?|JivXD!wp5*RGy1!0#1x`T3NWa3bWR}$&^xA(wL_bd^wlijg`fG%p|#ETRONu{EO3CmjxJrol~#lp{=;@B$r8cTV?vPHB1&AbPKN*s z*R~f~nAUa#RUiI-Un2dD>jZij`id>iEG{jszvMqskKG_@ycFC>N!$NUU>5^XS*s>Fo!381P)b?Fn9xtDSW6I0jkptr|K$$Lp7 zYBQ#NAE@(^!~j&yO)BP^F)1Tc$S5HKuNu-e+;LX&F0){efh)d4aY zp&5JC4Gp_dTl0~=kSd7)jz4mFwn!>VDqoTNkbYhIHIyMcolb1W#Eo=)Q=&-iLHcax zY?I-pzkrdJEWC!h2DCKORlzIJ$6gK2F|Y#u?Mh=|ablR4eaoM(Y3OEx zYT?*#>&5IkhpWjdu>8|7oSOp`Ag;~Ggc&=50Vq!r_6=B<;Z+b)*UczQ4w(PMo!0Dy zfU@~paYq^Mk)(67E+!@>eMc^T`;xEWO!H_zQv8qqpt=d&Eje5Br@#+7G|M(I>N7o`HnU-j&32#^IIo4qx-hpH;?9u%W zH7w0ZO32(lvz&a7u3wMq2=685I~o>}2;b0*b=Bp9Tu)&WqX!G|uyotF&U+nCm&S$C z_l5qTAYAl!2tK5*b`AL!97My2VOqbHnsA}*$YPaCy_2r`5<4u(301SPVqSJPB=o6Cr5J#fSYuyyHDB z$y&+PC+`KaiwKR7{|2J%BVf2>Am~>W`?Z_7ahnu^g=Qir_|w7H!A)y69SiD{EpxFd zJo@Lj(y$N~Kv>X#9X+M#2Ys~!IaG0h&(2LA0FntaJ;Q8Zhv>r zr@RSEPAhQs^~$;*3%CG%5AgMo%>GF7qqec;<43Q5VaZpQ+?yd09jer%?aSHgj}%`J zEXI0u# zF3P|+f`vSZTlo90#}x%E)jz@)vf(l@HSVv&0kGc6{z2QviTpe8e7;r}fWKq_O{hX} zNVS*eMQjH1C1DM$zbg(yj$0Z6B4Of*=-v!m&OL8Q{fW?Ce(b4#oKHC#5U>Pu&VU@T4JMBt9fGf!AwAmqQ5h}K{(%D7ON zv(Y3wBjCo`+O5TEt>JTP(ZwbQZvo8`76EAO@31N#vX;I{O^bB+4_q5#Q<9cVtSgpH ze?efcg;S=#SjMCWC94?@+pn=P{yIztjMA`^$oXYA3kzG{Mc>&q)jV*O^5g>-7mJa# zc4XWd#6fRZwl+ihvBDxJWpaHovG=XbRsHF!2@ke_&*Upu?uxfGEOJ@EmqrvAcW2=- zR;e;~a|%!p#5Jf10_p*p1$U*w*8uRe+@GW zCtrWD+}=LrsA=uU44#-l9tWlR;*0t?<_rs!B9a#;Z!~m2XYvy%G8h@OY>ieT0b{Ps zWE8Ro6n|(a1;xgpO<$k9dQ~+NXc9*RFEoJJ%1ip;sFQd_ z4!PjcAppfC3Wx^}E&wYU;a%D)Mb!!8D|Y<=rKL4iy3N!}8P}Mw3xGa<=?rW_{-{?TTzT?I|Am$9`KS*%p|xjQy?rRQ^+Etd z0$4u&+i%|Rr# zKunoE3jyaU;Dy;rRkPno0q;Alm}QZHykA_#DeSq@PK}sw{J4m%ZG~^kgIB+;Lqv%Y z<8{sT%pYlG;e$JYe8LAqm+LPEzxL&$Wyqzdp~4f+aYN1m${**_Kiq`TH)1Xio_Qtp z_+m2!mbV~VcpN4BF{K?EqISh>1aX*IdRj^p0VSW??$*CL4d*EF%nae8F)OmWD0?c0F2l?l(N zAg65r{MJ5*wt&}{+im-Oa8~nsf4YVk55yl zovI_MLmSFpvfqOGG6%F)Z;80U44rS@@Q-4=8~ci{x5Y2o38iV?Ab~ZNi*uP!zU}K@ zG5my2fXgdf5X;?xf$-1m_Rn&)H(|(R1x=0xEdh-eoBO()hZX$A;@9!sYSRlM&ID=X zegq=SH^vjw0T`0e|B?gno=

I{twzcwRx)iTXUq>b*33J|G<4S7abF^ zMbajVqKc$=mQ1UwP)Di`aG^+A!y?%UAOR!`Kth?wI*_I#+U~w^$9O!x8uuM6QDR&6 zNVe?up>4@AEa-pD^X@O#dRLZO4j(R?4%-DF^ZWK*d+l|6*ILiFXXBq&+j?vYa?z$J z{&2h9vi!oL=-DzwtmEA(5-h-0rKSyHpuRI6D}&D?x;Vx3YB~gQdtoZg$CCW9{vLm? z?O-8Ny5|sz#@O7~JnQJuAEGw>KuU)8Ch7gdTh@W^ zU}mOU!C$)F7GzPJ;3wuvRhu2{yKbD)Z5t0g)RR`d?#OP^A0-roP=1wn_(x>0+rU3$ zp>b$0w-^2D$$=6Gm4JfZ)?|J+fMVfX6{~OwM}uCaSloCBcec8syab`QY1;&fRGoC8 z6>J}hwg%Sjg$Yd? z*}dZfw?U=EElbVOh~{#nuEhmMO-q3|ho!sdx8ST0Tup7$oix94awz|xHb#*<1)eFM zhuB66q-y6`6=jLmk8M9BxRJSAiQQw|9xns(S1kIYA0fs=urP8X*FRKd0ix1#@?WSzs%kqD`&xm#Tg71 zS!>tgpRd?!o#2$Z z8&qP*hWVq<2T*T4A4`{iRI5*px%`{9X0!pZqSpEg<#B475A^*|T_QMZjmA892fWJPS5qTEw z+rN^Wkk?V5a)Da1BVEc+32CoTW#vx0ixZ*4=@y zGgDB408V-KaE|e`Ar156c*KU|l~>z4Hr>>YWSQ;{stKc*oTqY@C*m(E)hhj;5GFDz zkUX&=Fz86@_zC-i2y!gw^yDrmxBI?=J#AvQKrN0IYjNw`_|^CApZ71HqS9l8Dyj}+ zhsjR_8;U^22s}J?oC95`?BZrG1P)?eYL~n#*~r<5=4QJV#FF9uXYqpyIZ~w{a+Q$q zA;4z6Qy>D#_6zt3h2TfZI`}ca-m)TqC@%JZYqCC*2 z)e-PFH?0J#6sd6MP%Oc6j1oaJ;bbki=;HC5xfj)_ zRB)UOZv8X|1mU1;^=7B>SJ|CkHAQYKI$>M-kboh!Hpz37cL-P`D}0`aVmLJ(DNpCS z{`?IX1@^HKqrZRdonAme)Wy?vt|mb^`+TzEDo@4fl&*Rx4q-XsDNDG7D+J$ex4cDs zrCI8G!910r_g;v?0H=+;VDcRoJ_eX-U`@XQ>?{`awr72*o8zSD^c_G$2&7n`Yz1AB zG_TIK56$r1v?&o)MJ~O3F2f?TV?#Y*`rX&tM!cfuI9ObPBO#NKHn5F@cw9vN7(3Xk zt$&D}8y$9f@)pw6Yh;pWo5N&2MrsSs`X@D_i>HokYl+)l_oslpB==)`#@dM)K=&E!?MSpRe?lh{pkrq=gfY57{hJ1yz1u!TMzLEt0BDs6`DZU zhze)KO5|n!H~)30TP`10k=9Dn9=bWDRFE2;Y~@OMti!EvN6Bo$)hvX%n>e(mYtg@Q zRmC#qjc#v4Z@A#q9(VNoZhG2P<5%V0HI#LP;p#F|sh*V#6wW8HYwvu(Q)jSY=*nSc z1rDfk($%cqTzQ+MB*R($&K#shA60A&2u<5?nhumMn*8D-hYBo&BE6{E&xR>*eMb8b*Sbz>)kjum>4Z0DG47v%;1XkQn*&aHP+N4rcqOU3? zh6cQ(!Lh&reoPVdTCX@-tgkW(PBX8vDEIV)NTe5{BC+D)5aD%=*{yctoIlyYZRVDn z?UI+P#)2pTU{;pC>X3sRDX6O6H%xwZJXBMiF{?}m!e{a?<1L8m`=uaAT}{#^zIda~ zg69rk3dD1J@!3)F_PapMZeet`kHeo*gEu*@s)wCmj!ER@^?Th=1N48Tj&6}3ar}^8 zT^Z5nD=gY!ooiHbOub)>?U)h84kafwgBcHtV}o14B{Z=q)N(hjnRfRFP*vlShV zvx_V2sfrPfKP~={+Hc!k6mV>CQ!RdH!oTLH4(m zJ{d7JL=Au|-!RmS^5TUDP*D5n(1Kqm=f7TzUb3_ym0qi3kcpe8$mg8WqvDoUjJyIh zu!9*uc*_#GB;o{3%Ae)h21)w(qy(&S9`2|~K%my{FOl#jzwB+q2j^psTF#O*SVdTG zlV}MdE}=#w)p5}iRQqxx{XCitvy4(%^qQwt^)O0(yPCQTL7lS5DZzox;}&ZK=768% zm3Xy^7&MqE1))aSX{2k3v_6Q6uJ|X*YG`s4ag@JlC_4ZGtQnI*;!$O#-e(Wl5L8P2 zGF5xfI3y+jP-Peoex>^U640eX5<({rQ(32C3~0zOZ`d#VE(Bzq3w0muQ&Orb@$^*V zEyIdsJjQBXZ!g9_FW0KCafer$=+c2ZD|KpeuShvpr6&LAfi}raWidO0AV&^2& z>h9Z>K;qG*GZq4l#om+-qgV@|BD!g3o;_6luf*LItB5D5B&@_NqWpU$MAM~XkwR=sSjd!T$)zI?5Yg|G2OfJn!l>8O*g^H}}e2!(VGJnhHo2-(mS`ArD@0J20e^ zWdl+>H9NAs2$&6Iu0f5De&>tt*4Q@^c~%Z$c-_OqVNe9Kk%lXxhfVvWnZ;jG2?3#=!bu!9Jh@`C-h%c?{Enf=J_?F*bl-@7s~Ev-gJ1&| zMz7-FN!jHHM0^@A53}N-jVT_m?_ic}>4aWEQ3R~1sBZIqFlDyAqoz#+!jT=DyMj$T zM-xtesf$mpqdAWYD2!I_hmFS@d#jrwD|u8k^a(;MduRTJA?+na`hVBM+9;j{RzM}3`YmkjdFg^j;qnA8z1qGS4AjA1UKm=$&=mz&bGxEPdpW0J|T=g8l(C~ zQnbtjm4h;Afn3^(MtcaCR5{sihjY9FOsKkzTjN3xs)}=UigA=v6w5!Q z)%y-BUvKB)C>7_a-qQfY@a>pVPRdPnw*{`X2Yjm1zLkrEJIBXs!mGzz-4h7{^LIZ! zDsakOXW6~ULQq>Obk~_V%unZfuC`0&1RRg}K@;(k4d~faKpdJkTo40r85%=z;-JXs ziSR@5cCi5nBL7GV!Kkr|ZC0!X1c4^P`Lp2PNB1qdB_fe?#8>0y5}__a21x{0cpa=Q zubjM>nsMT93Tm3435Da5`paM+i=MfOiinqG72kL9DQs@W!E%ATy^ zeA9CAhRYLQB@Ien>txm~qJG8X=q4V)MazH}V`)4z-#>cX!rkiQl;a&lHjs&FCNUK% z_CyZxsNh0lh)b=xyTm?`M5z}ELmH9R5nY}yeR}dWJ6pZ=LJHdM0F!OV)smcKa-o#1rG{6+kVT!W~0$C7Aq@02}Gy{jIQXB@wd~HUunPM zuP)QkNwoFaa+`A9@s9VRRBREKoBUYs&C!Cw9Q2-};ROexuox;K9@dB7da!g)Y`We@ zeWbn@3nr>zmlWo7qyLQ>b(9+8^d05ZNt7g-dTymK5Uwr*|Cqyn;U1AxN2+UR8~{?I z$R;ptf`6DJ1vCZfAD<>8o0%8gjvMDJ*FIVhzEs@6w_m}N!BF$1RXw;VNP$e8_{Db3 zI#Yi1ZjJm3Tj^7cfmXp{O19p4FII`SK1;>2ieW12P}Cr~M#D|gpi#`q6*oe_OiQek zr5$By4g#Za%)2-Jr6h0_6H-;dZ)ca9T!bP0$6}G1t z7k-(~S*JhGvS~XJTjuY(ISqw%mB=|MrByIDCcOt}P~VnJiGh+qldjq)#aUlz?~F;i zGKy9s62xfbNQL*ReJ?c69)1eq2vdi<7#9(C#O#YR`=GR+mCXBYu+SUr<@m)8uN7~|pKE_jtd#1Bi@RG_}q{t+rTmh~BHM?9>c zbQHZUvX37#wMAUwxZLIFp6pV*o*nMZksw`q8)D0chojUPC}Ala6Xpt6!&|&W9?uiW zaPE-nf9wxa^j%JwP89Bv*fD&?yu&SUr^EAZ2gg($AN(BOfW?}1H)SVdZv}x+{Kc7- zSa$~`YztMHdhzH1VnlJ6cwlvk@O+F1gYti9NioD6)xY7%#N=e2;uk?st&4D;WfIL2 zOoRwC;zY^(73UKlu<+6q_UYdAgow>W7%$c30RmLg=8ANc(~~g~-_)=cte)6E!7E$$ zaH8`}a!t_k@jc`#E@6@!d$?37;qok{X*?H+Q1OK7_YpVlOnWm9PTobl08$8f5udtQ zYexv}yWZtmO^O^UBcLWt<__r1=WTIQo0jm!k5^(3{b(Lc%H`Tdc!BVaH31WO;l=i< zf8_3Z2K}H_W5>cvB$biu3XjA-&wz#X0`HR*5ZlFH6-@LiP_MPfj7FC61_by~@oLX= zui{Sx0;};PBcIiIB2J$MoY0B4+YPS1r%OS|kM6vuhN4^`dDet8fW$D@($#o3Da10NNsq|?Zdg#hB|Jt1YGXsoVBz+qm? zBm`;=!%I0-A&tt$4K2&k@^^H%jM$)pqKu$Z zRL^@N*5laV5O(n&f%iQurX{M|YV}}b6ptc8MJM+*3_L3i3gKqvK-5X0mm4(nkt$J? zBTl(`iHhK^o0N)zI0>V;pF?H4IAK&)`on-bRaI| znQH^eEZ9e#Bg&aE|14<4s2fpZb+_OFI&8^TUzrUTAjsv}l%ySAM?HUW+$G3sK7|P|K%3IQ>ii0tq2|A>Pxi>Uhsh z5{SDs%fJZPUV!1RZ@|5j)c6;>_|}__X>{1WPii@0v6@_rox#uu-a15r#F<@gmtrHj z`51piUSaaO&Qy@N)M27d4eZkZI=`ooA2K7ZpMVC;sEEz$bbc5=Xsm5^>8bZvRqMW@Z`8?HaNIOWH)JD&I!YYU{=z@DM}Kho-JlQG^dR}1$S${bQ6eW> zV7o_K@k&xg!BpKTO82lg;E7MY(OxyHu;n2k;rH_^Pku(zz$<<#QmI^tO|XKVl1p$w zswdv_X?rtvA(yS>eu1*l%Rc@H2do4(&&E;^(zFgJP!En*2rU)<$Ct4mW<_U;%q6&? zs#q zc7z8&fcTN;2-=WniboP3%WWytn6*GJ@E;TYUUOdX;0cB=sgD?^FrTcT`2}n2gG1}8*pjKiqNp5ghakc{7?(!qv;m)pb&=Zb=q@ z8u*borPxNw>*Cvn5L$EovSWOT4!uu^7Ywc+wokKwRm!U4zzAo|rILtxLaJx z0p~fI(L2P{74`rShTlQ)^wlnmw zKHMt(dz40HXOOX&JSsdn$|@1WPOKX4i2l3YB;n&MnNWfTb77HEU#(Vj!E7!ckGJ;t z$o|x0E_H~#j4&R0mk-=e-T0B=R0`;qtErx4FjyS8kTRr){4Nd*cIrVCe@5n1!NZY6 z8aYmMyMg7iu$U=|RiKTJcK{X&VK#XX=L4)LE-YcHLeYB|JkA6&1s2ZnTfo1u1|Pd} zd2}ExP9wnz;itPVt_l4orM664iGvrP4rk`E0>AKC;{PxEO9V%AP_-;anYV*dIWA76#)=SQ z#&gi5nd4+*nburu7h_3eN50ceKMPvN*1TKX@qUeqzqg#7L;fP3(k-G%!Lp6e2m|%w z$)piFSSru|fl(%y5?gm;n{dotu5_cV$M07r2ulANU)#ZM785GOp*SnvL_x#o$$vV@ zgIAxgv%opv!mYigE3FG4j5Wm#A9jMpxU5H}-9Zk3pp2mQ<>|v5vy2~TG{Tj56&ms&Uq!4HT~RpYpf^kkmioAT z>^E&_IOz!GSya-$=q;>5`qWQ}UbyN?|U)oGY9SXZw@E^xh6)w0~ z7A;mpxzNJ1a)5$b6@Tn_sB!kOsr!=At{vo?!UhaXDZ{-mG|t#6IeDEi%SVI+Z9X(M zhUk#PdJ@s&LSWc_n#~1IPqfOPn0I1;YF-nyZB9cMxm_rFf4r&4MP5htQ*yUGm1pwz zzJHiqX7Jv;B95*HMZAna+7wPeLlISs)9v0%Mz@EKdByBPWC-Ev*LMrP^c){@74de( z0RGr3Lu1z_q&LZ?PGK{DptltAY6CJ_HrJ64e*K{GY;;b$g!5+LolSFwplua^p~D_* zD1T-fxMCfhtawh*R>+p*jPq^7-`gZPStnQMW=^k)bZJj8eRhOd&W_oB8G5HGJh6ksZCZv2U9)(5cO%m?5GW@hY} zIu-*H3!~lSSG#>>@bkR?%i|N^XgA$TQZA4wKC#}cu9#HAC?K=M1}`X4a2U9Apm!`Z zqCbuv+eyz2lvN3UFDI_%4a5-lh~_)(X_F1CCctt3e5CY6s{YNk=>r%7n#JA#lQzu= znR1gIX9ecDvp7&A|2{q)>`Z6}K*Eh^@&C zoJFx8HRBV5w_aUxAW)!|=Bj%wu1=gLZmqMzLjBNI!%L7Tfld`mJYAitJy8z+FUr{- z1LK*NNSzAu?ILRWmLtRD^J_v*ZKr10?PLk|FeO8-v7K0bH=un`LJx!MjK3qiW=qxK4+|@dABcUO*Lk*`jW4P}Mdg}h=YhfmN*9?l;RBiKOKdAK z&R~eslb70y7LaS);-X}5nbi0r*plFhG`x&Ie`S}#n3S4&+*f10-MA|Km^-k9)PjPn z@fl%>j*IKh8>ucUI$;Q?-=p_uELjs!vnMQp2vkTpl&Xi1ik=d7) zUe%kA{nsA*`Ay$FbSojI;4uZf{mNm4PYF0ntxoRwy?u?mIyNvOxKScbB=U5XlUY<3 znNgZE?JIthID=47JV!1;#wgyXCB<)5z{wtT$7QR5RJHr9QQ~tXR3~}iSbxbx-Ds>| zM;=GkLueV$JbU(eS~@)+f5eiR&Wg~QBiY6oRmE}CC0UWo_)#k!HdVJNe~{ANJ-S{k zxJ*}M_jUqx8iK@ z6zMtj_i^Jj7w2w0e;p49QZpRCDD@)ThV()C3*o;luVVN`jR&qFhvHQ{wq=#Sf@$L% z{X?Y%^l8TxPdA8Z%%~I$nj}QeSMNlswCU$T_%40d$_Z<@XbFR=e%L5Pqu3f5qFU z_X{s96W>-ys90sH)Z01EmgrV25pc?WX|CI?_SzRmqcYreof@Om6rZ-Q`^@2jWrj=K z>g99{8Bu_sSr0%xtl1#I1y~BJe%Uq>bp;jQBd*25BP8rISBdo0LvcE4#w+NF1RSpr zC=-qeH*n;0qjYhZM=+ITHI@(@P3udiHy}Ts#fAiPsP(N}_{Zu?u_kLdsIr>V%uC8X z!$^yRdBbDZu=-v8bhEt^AGup=jHgF(>MMg^7v~oG9w6$wd1~oCWEx7JQUD{eBI|~A zPu>Wo-@vO0^7YKsXeWE05e*0TVP}P?U`bGrbt18~n(EVNEgui5=U%88^t?DjwG^33 z$1N5{eA8d)ZTFBYUQ!>yQB3A#C2Jg~65$Xjwj)Q>PXo%=CqtPZ5U22;x}d}Z1}NX_ zW;wj$?d{q=uxp;$_~dvGoE>mB8WF@*yDjI?k0irfy)?lcvHau1rzbSVjujJTNHYZX z#bNPMsCFCeTQPS$M1}yhEG}k|^SG_B1Mup2HVjj=i@1}F(LB`@XIDp6o$uz|yc88v zT~#YK6*mZ0U2p60Pi*MenJ^%JJUSt5BlK+>YOwY|DaW9|tgzMX{OA^>C9tlTX_RpWKPvoM|7#LOG4YaZv)H4y$j{ z(XoZ+55@`<7<&%-qNFg{Uz+WTZhDDxDQIs2385a^z6S3SLfW5H)+T$4H(}#piRG&D zh(Hq~UxFLH*~TS&xfpt6A_z+s4NcdbX{aD&Lq^9U6!Bi4ib7aO zuSM-P)8|~VC+~qz0p$4hqWq>iqt)NP==cchrAtBV=61Vyt9-FncGYAxhKpiUrZ6u% zF4lx5lw-zt&8Rc(lOO>8ohG_G-e;59ni^f@VIfM{WtKsqt%xBF+BXTkIBoPRkags!K z@Ga!DtmquguPY%N=u?=I%~4$AmoKMX_*gm~90K?ht|&Gct2>yta&47YM=+bC=VQdR z!Y|%|$a%LYAxwRYZ9bAamLVD|>Y)skM20{SCcZe}6!lDMQzuxIB+-ko*lX(V<*7Lwd+~j6Qa9R#IGfXxC)yKUu?D7A@Ez-1xYYLqr#8o~W>ka(^=4h~vO8kD zPknjsCNmG+?1NQ0f=*%c*!p@UCQO@do4{$@_|)c@e05M$D&ag9JjvK2qQWx+l{l`J zPS2T@fas8*a(7YmY&9zA65^-b+ zD#KDA$jkf*^*I4+n(AaIkf@1UB}dXa;#@{aclBGUbH8w41<}hpN^oynly&Di3h_f` zAhXiVQXZ%D;_Ov}UZjfp)dnGsAGwKW^WPQBCEA$W^T8+W8MA{BnjBBE?#bO+o=45w zGp2N}Hz^_2`NLM{2BFJi4QlJ;ushF(k%D7yWwVYFavUhjxf;`sRaoa@lZ$rz5luJ(l7; zF(z=?i`tespRB@#z`YNGox|3Me=~Bc*c5bPn3kHn4so88U^{uriu~@~S;Pyfn+N~gM&#%P~M88`xk%vgHPS&sP;DlNc?#q%YNO4x|8n$EIwuRpZ;N1?c|JX}*-No#mbJv zNpA2&6L*uAd7u&EePmpG<$I@_!a*1$hoz?`CUMkO#@7k;PfCWVPc&@znZ6oS;p|uB z+H5?ORD~vcuRWbDG8nP0-b)j75@1_%9x=xrAuQ2_UK%ned`mj6&7uYf_!;20zlGkA z5!EAQ=AeVoN4N;oz{Z|>wN3m$Dd_3<;(k5T&SWtKnpSO+<4+1>cqIhKNTvF$r*s0a~51J*fl;z;y3@zWzG<~ zl1>g)QzS~LAdZ9H*zx4<0GYMu71?9egsS5OtmA_aS<3dV7Z2(COme~Rgne6Yq))tSK#(m;UN?DPD`;QQyFkJn#!Gadw4iSmyQg$3I$%IZFv zw&YH(3*ahuCLXn32eCorAwuix0ngr>l;H2BF=}5WC9(hBM<-Sa)N{yW`vC%dR3Am= z^5UReKxo6%OdpmuP(olw*aaEY1%%{&IJY^tL}ctTI@wElqrDJ^h*wu$My>>_A`Vq+ ze>Qv33(@(UpYeKoA%2*`Y2_iCfEs;P8alHmXlfRn)yi1%M+Q!pinIr^J(WBxPQ8q4vaU}|a zGtNX*pEOn)Kq*6L&f~(x^N6@%j>n7MYaaxSiEM(e+REUaat-EN!l~liZXKmdri8`x z$ix+M>x4@@O~&7k%aX|m;{ zZ_g&68FYEwTIg0Q19p^0%!UGRhZwKu8QuK}GY*24-fv$s`QAm+)E2Mjpq8^p!Wepo zLPZvlgC1svn6F-jfa4pFK0p0?pxb7nb*`$JA2rVfu?0dW%gj=()szh3quSUr2E_!+ zU0Pihagd`)DkQvS0vh!5_IrC^kR~ockE@lIDs(RGdN&jqfby{=9;Y~byjmu8`P-3f z#W;1`(_)LrR}HEI^4!zij-M6rXAfx{Kk%?3II@IBFvV(^7a%^!my}b3)=2pQ%79>1 zxn3hs4aXTNWB|?MAtzh_`xz?x89#{4P+(OUL4u>m77Wlb9dLaBoCI|r)M-bGw|m!I zcy6;-sGJkL8}EGL*V?bfzbEbJ$ehOa&@&uMyo3OhAA_5(SdZCGn$d5)-7~ZPE3KD_xJ2glDc(~2V7fk!g{e3K7nWUhDa z_M_-cJM!Ejys%;L+axKCN@^J*4BN+XywP6qHUuW0 z<`5T!lO`pWjM?aRRvNPTN?e?a!&C$KFn)s+lx`=iY8uipZL1b%qpWtyP1C6xw4ngF zuRUHerdF)F0}X;^iX2pucB`~v3A%lq=8wX9P7h)6b&cQ?*---99`|zOcDz~8_SfPE z1yO%oHcST=p$v0jl3b}=@Gj4M-S?YzEo!CV*j;a4;FD0gW}Zf&`f|tL6rzF9d796* zXY!4hYnAbCAX$U_`dTT&f(%}%>>IZv>XF&_8aIs#ls>ZBYaf|d7sM}HDesgFyB>?_ z%^lKbBP{66j~~cfUAfO_oz$pA@O?V&VI1+<9I-0ajx-` zfW8%Sa8*cDhx4)BHAKssQmItw#h|F)0Y$dFBG{C=g#EU;$moL{pn`$=mV~~aC!_b( zptds5Md4p-ql(r9=`2Qe@Q1)WN`;CP_UI?7L?{`8qA9m(REwDW;AE#aY4!JTTrK3@ zmwF7bx!x{R6*WL{$i(jJDAxALkD$ zAxNF7G+dl%??~nD!n%tK12a$@ho#ZWkTma7DBdVbBzUP0#4-1rgEQ#SYEobmQH_uN zgF_eJk+}H27o$nOxi&5)y(IH+2z_Bm?K&KT$;pUkDAj!^{~n1no6=!fqM~-wKsq4A zEA&*jIG@0+-mD-POoafW5`IwVLT&7Vg)i9-X9;&pf{y{n-R(@NMX(g3!nP*Nxpf!3 ze-IJ%`HEkQph;j4e|b6n0xM!V1>q>haPLdSYhzt+x9jm4X8P_VXh3Bwh27+tZaATE4p`VU*H3xWQ7eSn8y_YlYpWPCPdksbbOY)cW%YL&v> z0-*$Lgog8#jU%kb*4t;NB!C-A0sefk)q#ek;~y}NEVGf19JHXXiWxdNB^;X z@#jHQK!{ThkmaR$Z%v5;lvr~H(^c~sa|QFXVj|F`db;10 zY{;jJ#ZD!H+wn@a%!eLyYOIo*!1&ai@@I%F{Cv05TZpsbp|nT8fBOBT5>y#^+jCHG z#GB&C3C^aU!y%sLl#ZucW+w$Yb15v#@zaL-lvE4nTRwOLGca}yUs*w26^tnJ2(gZH z*V?uC__OVdPj#(lAPojpa4qu!{Cm4x?#mqzk)9z+aC%sRSyYvmP`7Se1-yb%^jqaM z5)CbSZCSnRD|wdxs2o3g*585}YN~bkd`7{-KK>v7{{qF-2ZHmk1Mmss{75ymD?uiy z2lHF+^;nEzc>sVQ;{woo4*8i-@h95zzZN9kXji=K2tve5R8SpnS57a5;Er&nzEPLB z*#@0yko+QPuY#WKv|Z6-x4rC+k<7md3S|Kx)x#i0!+J0;D8O)9kv&pmCdtIx{Y>7* z>6$S~pKNtz_FY~I9}L~w<)PWVBx;xDF0<=O&>c}Km|b-&VfXumAWH05 z@lmJ9gJY>er;Rd+@pi44Qw0ZBOj+jTF^FI@MaM7JmI>>M|0P!@CO-(Ft{NBP2O02x zXO)z-iD?63L zVk?#5#dv-nH)mcdYGlNh=!vdAJYf2%*x9#*m@X4<(+UyUQwFTJ+d)1?D4z+@Kz+D3 zFUoC|%3L+5QhzFOOMIDgDR2p9C?V0}2+TTja%&Qi;ubrYhZpWWtndbhkEK8>&`KVwiM0FFH{{)N#psNpd?$pyqhF z^>Y-t2UF1#tPavyLuz*cVmXa6#^zZ;Pf&7ID)&0jpcWyFAJ9&V#Uwirh&|ZcE{3GE zdyxPn13a*?&;ParBac83_Q0}J_RuJ;Cixphkltc0qEAN@iu0z>Bb!v6ZSOx5bOgWs zxRC!lZ+^b7Vu}?ra~s`Odz`GxDE1*P+bSX6gEdF3bhVR)6@JiPfWAarXH*Bp7iKM2 zjaY5dKDDfVMmdS$^2&9wmL5eIxmZbe)zAbCISEiPmCrzaN$CWtpPvBQtr z(y(aV2!9xxd{+8PQw+T{CJkRyc-jUofboO~E5I`<&&Ok6M}W8Di0~ki1{{iS6496* z;PJ`cXg5;~tQB<#kY2_~>=#i?{BgzO#7x-x%D==y^vmh_wPlw;sn)@*O`~cxbabP= z?S^VyIqak!>(O$I&*6VeXilYKRpeNSOXCBNzH|DW_QiMNNBQ>Uud-QJ9NeKJ_N)kt zp;d9w58{fy37*lVN(|0MyXvh|LtO5yk}o9*tPoe7Mvk6OhlJonnK6+DE)RGi8;B88 z|BR_CW^nZW5H>5}5AjfqU2e}9flzo%iaay{4AtX7rqd04=At_Kaa0&+MiR;0o#sz=)Zayk^>)g z2-Ed;Z;Es6oj40A#TpQ|_Zna4ZY!-@T$0{Fc$E1Lggtkob~fkgl)jH#oDEN`+er5xMEsLMd_+l=xx$*Z~_2 z0KlGENrCYt5=X@V+)=?34b%pMc%5#$*NQsw;wQi(jL23KDMqkos0wCF!bgKy92bwCoce&ZONzZI$^g zeNrkCTv@3&D{bH<)3w)A77tscJn_6G>&K4*BmhM7O%R8w65qd(c!2TJXFS`oH3Ctb zuE+gqRYb30w8{0&3`nbBa z((Bo((}Smyy6n;S+UcLi?Yz)7;-9Fe;5gI?o)!!q@_<*sL2;-Ohw&F#OGcImw}!X` zxEX&ZV8&^BQ%mvd1L@h{Nzr5gWN8-`-|sp7e46}@>i9%@6<#n=9w41*Mf+{d=PV*D*rd*0n%t?Tf-ot;!nvG)XwW2>SG?g3*EXp25Q;(I!H5ZAE#-5 zGM(*84SxKX33^*7qFArofmuc_K3ER@^ZJP@)(kmOm~u;Y0JE$#>yo2fN(vvMOAFwg z_#>X7?@FIYn!1k?jTLEfq<2*li|Eug0zVD_j7M?8zv)U!#CVnhKBGavW~5%iYEZ}N z$$7~O?^Z@h(Q4#M;x$?Jh5O24O{uCW8@|IC8y`r+mJCbNs zLeI%t5EKZe!NjXAA&DIGG-e{i-?_oHRt+pLAMv}-A%9{~Pq+8|2fWr)S3}>tW3vu& zMABW)wkMC@@vv0LuA{4Y)x-%@BCEt{(w$awH*+rS`H9HHs^uc4;!^fGp`e(#7`3s^ z`}pBb38SA%H?wsJ>F00I>$w886pbqQ4Xm9WkS_vHo=J5V?jFi-4*pW&EmjL(8#-}S zEy*94X;y&q?7wzw+?p|(Yz5Yj8Xad_-{k#uDatEj&9aL+`m|-%4rt;ak%J1 zgG(YrXlo8K$i%t!pMxXTykFvsnOI|sJJVR;24wbbX`pBK0NGGau|4`vacSyr`0hwwpmd^6_3N)+BvH(v{HAk69h9(-WoD zl|YM*?%_E25eOk%gK~j-$_mYa4-rIEqs)^3zKy&brQ+PGSBtNWX2smSOpMp@V1kU< zz5C*=wh_-qu+5SaPESzpC2YApQIQ%hV!F;kDQ3z~45$Jp1b95|(MkK_d-21Xy8GH5 z5^4@}QU_^@v+4?5J7Qq6!i_~^2dt~2$me1ZFDdyrJ)Fr&0{lb%% za?IBSR5ZC zeLl@bLJE?ZN&>_C-pOXHW(NVGVjFZoUm@W(`Rl^6k}s%ZSFIn(xXTmU;KUBf(f0zD z{A-Vyr7%;@{-P<4;+Ume=2)j0WKtI!wL&^_r zTgh4&t&V!Za!LaG;1GDTv@;+5Hh73&&^~`8T^!PfMh7b=S%8a=e)I(e&Ofv^Bc#_6 zh{W=JF-Am_xrFg*Hllv0rrJ>3B<>7KBlVBU1C$CW`jK$9`XIXlOniT^Ex+&YV@@U*^21L+Rro9jT9G^^mR7@|6h!U8wJVX= z0VJ^LfuT)ro)AR!5a$V9isjgbtEF^TnF_) zS~N@TAaR{2R-?U0Jb~qb<;Cdr`!vQf$VCO=qmwT_v+`gwOqi%5b`{RVWfg?Ba%Y;K zXN?DS$}CKCULH@;H4y zZXp{@^eZo7c~6RIsj+^WmXNZ+Nrjl`H^w;WW~Fa06gqab9{2obzZ0L96=y!hhJ1tv!Dj8RS-(9*QjhwWG zUAW)DXk6su(tt>q>|Vqm-zeSy{}D-upYGFV@A1mC&rQ)DX+18pW)H_$rW|sS)*Ix- zD8h&}sPWdwH;1Yc1<7P225o=6U5iuhxhCgk2*K=Fs}21PiCLsR7UbmHW`izRgr7hy zr)-(DtW$%>!*xfK7tSM|pnI0y3I0FDJNV~9tZkAxb&I! zE4~LGL8Hk$Q$;=Bl99RY=#}7PFV87_@Q=fH%yf^Ye(hVsQn*xH{(S zqv?HAK1EXg)i$@LXW#uR;G2p8kM=E;6#N6{CNnc`A1%lV#+67x*In_Hn9s)yn2>Yw zG|O}v4L|7qVT}>dp+cjuQ}Pk^4)S+&{9%Ty;jhevU2d0S`&S5SE2AIlPz*tXj!}+5 zFIwIr%Hj){lZfNJlx(b0tz**O;6kQfqDrI-u^sRWwLn3d+?`TSPk^KsNw;cJ5~orZ z-Fz0!VVp-Sdxh8Fh+GaE_eFrP>Cwc(NL1`f zJ9A=brEEcs==~^7TB!pcHeksT&Sd3kcS4g#Q;h_K1RHesj`qg;1h0$lOrVy0L){hD z;RC-{p|6^#e8%jKpF5VRTnh3Ktnxnu+mB7EXwQ6%celAeio0{Q;(*R=w*gERF(jTjWlULm-64kgrXC!}f> z$P{~2)r-N>-*mS79KN+2yX7_}mEX6+=Us1ZjGE8<1Olnt3 zQ^~Uo@Vv z1Yp@+ICT``46fe1;Tv-RN`=@<9JPW=Wk3W*b4}}O4yNUWGU`*`M`bclY{!dqtf*-B zx&e7{d^ioUFW|>_)0+9Toq5W)Qd9XnJ3vv2G8!3Lu0T^UR?Jb#;Jg@{xr)7IQ|eZA zBEKo)B~~JKzvj0K#ea}1Q%Wr!NWbsDYQ=KGh9RRBJcZuOZ>g5;jfnF6++2>D>({@3 z0e&jhht*^IRsq!0e`sgp2q#;DcTa1JvR;mr2Am4dLMA-la@5(4no;;jC4xRmAR^0d zuCWC6b8oeo(U3qb!ghK@=1*CuI1Mo_&Yblm4hNFNBL$Bi*6PE4yV1_^bu3rx#EdH* zka;++ty{Nbd~6oUct8#xNQ4ivx9`crlYKNU?v8NE41kIqe)WYH+bi*d5=nm^Kj<$n z1qCm;?D>iI+!MhWir-K3J_{;Q6)n0>gb7AbblFsy_aT=npdgIGd*kBDvG5rUSIX8@ zz`mQBR-|)?mu3uVKv3Q2KmZn#saKNh)=DZRHF07V=@tVgAs7^7P-HJJ@97ECp-gF- zev)1EWfyb+NVaPUK5btPQlY?TXI1vEmaL_Hv!KffPSMr86?DV#K8k+}>=jSk6(6KU zt=hz!x8iR~(oOHh9+Ye>P8*dKJ;5H`NzJhOcEF`9?LRj>~ ziS-N-gCE3?+BvEre(dlHJN61E#2OVkotA=%>-48Jt>2^0mle(C)E_0sJ06!FsSpoZysJ&avP7>XxvV9;xsvRbq8x zex9uN=np7!3clopT$Xz#a;t?x1ors53zs{PCZx+qt{Ej9o{)T@_R;M?d>Sjof|WOm zt@K@IE=0=XqFGgnpZKgl%XKJiS-EjU8Djl3B!aaMM&_Hb?x<4ML2ubrX^$S#3bOP~ z2Q;eC4}pOUEeJz-CMXLtBma`1`XjuIM%M*{>Q?$M>T=Jhi{z!F7S?-D># z4JzcHWzrN0tC(z(4G>gukZBdMH5Nd~PggVYI1+4KSOMqX@u;4v_NTGNQ|YK5^Yf-V z&NpC_;z`Ti`>H9S-jnG8fP?t6J{KaWEJdClR+Nx&TYmOP;%KXEQYe_MghlAfFI!%Z zi3axMNoE@n(%38Lyqd{$Rh_>|HW*6<>p2bOOP=MWx{h?=F1Z@=zDn?t9edXu^H-l{ znAQLy3&j=m95^k1QW{`tf?-L|_bg6j_zhvnwBnQxW>fx3FB&u1xI?4@N>FND;Gxip z3O{12bS%tc8gEZ|p215u@_yUK;lI*Wn4{$B1>V1(4w;4q>$rSlEB=G$+kZYwi|0V-opW6Kr!a7 zNEj~euDAQvLIbbj^Sah#GRR%Ws;{l|cIK3~rs;ctsw^Ud!VyJ;X z?W7O^@#&uVNwnz>p}zQwfDn^xmLXKyBr!YE5or$-q>S%O!5ljTI|P19uxb-yB3olo zVO|<-brM(ZLg;P=8Nna9)tYXL>>%%G8%Tr^bc(;wcfkirE{fxuO-e{7+S}X5p)edWw*9i!Y;sfQHz+|Y!9gZ zu3yVl8)|vymTxLNT*Bk?d%&d#)RhBJK2D6`*Q4cUBsyaGYbXVuXE%+~_Mt@tmX7}_ z1g|YUxO*!r80AtIo{r(QR(sFC*Cg%cW@MI zRt?|^SdxsiaAEOxOn>Y;^tyimN(O_xn8=4n81=nK**V&RfMO2(;F2_(A z`i}*XaCEU#^Q?4^CG(0|0EzY=a$tS~7Dn8l|HOM%h18`X$XiXTY)**8^g+S zggaK)5_4rVNog+yAB*kP5VldLTMq^63YOSt*G>KF7LPqBaQege+Xb1qab#*_W9f&t z$9}yq`2w$oDjqQCy@BRRguiQ(c%E9yW{1eyD#~YdOn)FqBf?6JVtxfKhq{TVVIJ53b#2(ZY$23t6AEz*lEG7(K z?=m5?m}y7gNl+BU!cfWcsDwF|GeXPVLf*@44q|oSOtOvCr(2GzU)^ZMLts%>Q8If{ zOd(yW92Tz{P|`Jw4Q-k6!CFW|^>MS95^Bkf|41R17&RP)*!p|lRK`ZB&@eW$+A89y z_KZ&m=RmVHiUK>qh!}nBXYJsxmiieP@3pRCB&lHeNf5Y|WU!!_Qp(%)yy(t}G|d|Y zQm)jafjmhC{ZxDEj+aW~FGzr{^{>eN;F%*)Og8n_$a-K5#?Kr7=es3Hq7978Gz}c` zS*>bu!{1XSSzK`g!Mx9D(D)E<6by;roiMPahw1d3r?T# zf*Pg7_k^VSi+J^=wjSKsN8qT?&A*V#G4m)+HtEjMsP2dV;h$KACw}_J{#(hd|6?Eg zMSqLEot}KTy&1$pxEFV+``Gw1vGe!ad+|?F_VLe+njJbHY~T(g_2(=r4%pUk9x2S>gd@x>w}d z&J<+t%45|Feq7)^=Q=(LCQF+Z&6INhG5btUn#len&Y6&C(I?wdFn{d32M-^X;q>Q9 z3$XZ*G7A9T%=i04rLE}>W?{g*`B&YfQ83A55+>(ikqWoEX%rL^WPMtQiWI`13tbWt{Qg5&WHZ(+i!D zjKOi-j?`cztLXo?9;a=Y&;)=+NifRuq%tDS@gUdDiNTHta*2*NpWtbIVbf7oa@;{1C?y;l6v z!&@Q75~`rLEDNnb)=eQ4;&DifQAW}xK)`~$By(Q%Ym{Wmpyk!u?TYzyo7CcPM|@UP z?C;TsI9XdDs(+^h5QxBzlX}= zIb`~Z@A3|M)aNb9vBE14E}+4)RFX+`)}h&9#~eMp8QIC z({Cp}!G_~%2qj_cn#4evX#w+LB9FE*dwY8FRQu$UJ0>>FD#xb4Be3pNJ>cW(8Ek)p zc(pQYg&Z6JQEBhzNFL3!gy$f3hsYmW?NPtb(twjPniR=$NYffe{poh3-cq!*^qwi> zDK5%QjdX}VvN(JpuZBRuA%fC!>fQvmM|Y|038xM0dZ{Kj!iH^!&>?tT1xUAi2t#AK zcxP{4TUZhUnbu_fDnvIusW_Zwu<#Kq`aM5ZEeLlr8ASPTD$)n-pfVY`t(B>!8~}+9 zW9{TamIa*`pu32%K0dh{;^`|Xs_YsOwU}Di9H57bF!IpDTgK-s)H(9-F4BF|`=+;I zx`nu}O=i9nZ;G92%>0eV`Q$d|EoQnEo?W*_F%Y*x+ z!6ykhJUw}(z2Yazqs~w2+ANA?+Ven;6?gl>%(M((aL+;y5%K=O>~eXE=`(b>jIgCT zmJnuC9Jh$8N1wOTzlzl=1-3hlCjzC(aIh49qD=Z!h0dG zTbQ0{q$y4p9D;AXA`>@S(S=us=|3(wRJy~-ukKn|+#x_!Z(iC00~%4lWM!ZQBr6sK zlTE0mFLDQ?8IG>)9J&pR7f?=uhBaGd8pY`esGt{oZfFpfbm443-`K8Bcv{}a!?(CU zwOtDKUxs(+*XF~R-^X$+K4X_cC3K~@w{B_uN)Lf~W&F5bayW7k4AuoHi{;^!akY7ZGqjwNY8wYlDf%#gy6mO8&tXbBLb|L;GQ^T?z z3v-cv3N~nw5vttGmFbl06f?~yECFIQ6we;nH>oy{x5>>KWvu^9`}9uyMKtqYaKXM6 z{7lA;CDw?$RGTG<1oQ3quyO;MP8g^7^;QPaTw`kWq%dDE_5E~Wr z>6h*Wx>5*8L^3N2E~S*jWtf7KZv=NSdst1jjkvzvE`(e^(?0klK8Gu;@`#cjVl6)K zLmW}r-^FZJAm8o3*6|Vlm;ZH$m_A2uZlCq6CeGda%}(Q^!xD(4Jq75j1`+hvPeTRQ{&UJ^r!@4w}|r-RV|5d zUXh~VgDAVO>X=FJ^)QuYW4j1lRK6-J?tnL^HU5#l7g zom^1lz36!OWfCg}U)>5&RVjdb@e&L>63Nv-M>S{!aS1pNpcqOB%I{Ud{NttpL|7#m&Ut7Mm#j(BEN+ zliE|1DY%FjPM!PO&3k;=B0-nX>S~)PeN!gsA)5yo~3ie(ToK97Z8LJPujRm^ZN+XZWw!6IfH)xodl zb^y;-cmFz4m<-+#4@B?< z2Z6%C>${uR0s}x*BNbhKcdQOXhP|S@*a%y9W1Cm0I(8g}9!a!Ff+woIPHzl*3b)<ePK*&XY;*y@oq!!<%!iq?O z4`CNqdGx^aU_kA3Q98cRr2V)cCS(<9@N_&5g8>cZccWZua$1Fiya28;iBy#d8@nKb zX6nY1$*DKvD5=oBbEHMAS*9)eqDUFeSZ$1|!E>9qMLmKHT}?isfIlwCM!V&WU^W!G z$Vjq4QVpDUu$z%#yeSe-D zCnxZwCC=yB$NmG*y!xNN$bARh@W1`S-Ur^*xs~0DkyLw==V^GW_%0<^91*3gx**MU6vyv6H zSR^`A5>oP9nm)BC%~yYUKm{tk|ta2C}@J5%heCAOMC?* zgel^|q?k)J1-6U!s5Ou>{Njlghg*_s*QrP^Rpz_03IzZhmfdo#V| z*Ytky7B|}bdRrx5dAM8Hw8t4G5iIq`L0I+Ya6g`JXn+&FVB(qA+nN_Bq)?H8J+Kgu z(R7W&;%#%~Ap)+ZEreUW>9f1cpPu|5)y|~PRy!)80n9Jd1sKxeDP=oPKNS=d*`vCy zciB&#;OWVyEvqbtY`=x#RQ*I<5~|=6%|$ai2s(UMW8HKvL6Bc|mz=?R+^zljQOj@> zuq+D}<~?FK>ZzU_rL z7GgX5l@qVZdycEY&XPj}D4dnB|1B8b+%y!B+B_hzF+-0&Rt+o|XvI!k+LwW_U97-u zgbX?=Igy~Yz1|H*5uxCM1j1VVwa3&WIG7*m_%q zW@s9eYm$dp?h07(IURfrQX7KSh=ACHQqHe?6XJ=L@>oD&H0q68fPGjXHV2dTfLe|; zloToW+)KU1@uV-Tc9Yzvqs*17Ff)T`kY!S=nz@SM>V(Q@1X)dvjkaNq7XSn9k;e7{ z1R*Ow;>hn5X4`;67PFNkB^A>enB{!3l57VF&phAW4(^)~+)~UUeuf)+t2}e}ReW(8 zI%Xj#CzDY-Atg(0l#(XZ^HwNP&m6E@KOZ4RM#5s{NMJh{jGRRQ1Yz&U9%O3>(O3TE zHumX>d18y}g7ov&jz$caD3s=sFcz#N4~S?T3kO_AtvzPsW5y5v6MVR?<&<9uaWMK=!pf4S!?~3>4gw>6JI%~JBZ9K2!I#1_9w%&u z8xZ_*`(>}3xzOm;^d)el_c%v?@oG^duovEBkU&K*8}WYKma(DLjPWC8x|(R92((WVy#eeg4q9y70|*Y!(bht)IRdD18pTLa4)aJrM3}EU*i-? zq>+e%I3`|1Fa`Stvn)Q2!@QTMaa{7BN`DpD_x8g>#bpYJy*EqB)0htJ)#Gtov{4$A zvOHl}AN5cjR#FwR)=d}Jd&Y9J8p2z@jRmU;9Y2+M#y|lG6j%nKfr#Q#{xmL1Kx-Si zp@$3wC7wNio@!^}*R<72M;wbCgVbx3TTYV)0B^qkGJ}Xp_2x>!G-7%DSTmcdF8vrf zV6hsFpbpB(+)m*R#WOF-0J&B=s{p2~5n3o%K)smTE5d=7Ae9QK1~l_=5tai|-phXR zjW^m0Cih#@J)Y^vaxVva;6=aOUiBW8w)M-@+y@0#nQy5Z{I(sL9q}|>{DfuPM%xA( zC@=yfAQIJV42FDok5s4?P zU4}SiiMV)=fRAg35J9hbEA$^?>wrnjx`(usP#yAYV)G4WQXO!8n>2o0{rwPv{-(P0 zK^37N^UR2!r%02%*$1+CoBNYP<>38`?cN+L!;|OhASG<(`JO#@*_4zYBfONWG=CB< zCBji}s6y5~160nmb5@);QRfQzy{)ffdze@7Xp89WGne8hPJgeRg1+?oDGcr19pm8& zUX%zG3MixsT$ZzWgbBJBU+HuTZeFyH^IbH_zjY%6RYU=#hzxP;r6y(7L|07OA(>b5 zP&f(LJw)p3L4yIq3k09Y7#zavSNhaExJWDp(HU)9lG*B%So|E z4u&SJSuRAJgWByNg_lFND*)F&qXYJe|8<`!uE=aYl?Y|+MN|;ljzX`5YwHe!Rb)Z7vw)Zd7?cN z?;}B#ur-OCIt}g)ojlLR2a#T7vc?v5E3C#+hYsC1m&7B?79xp^px`q}k$S0>Ngy&- zQXWHG1_2ODrHh2USq)>z=|dkFtRfTa_3~PzAE>wz#?Ah7u@^?q+=`RB(JsZ?Wb@tj zgH)(0C!zlNo%q^EhMuV6+NcMp7=rSw0^ObmVoFFM7Uu|LdL&pZrTTld{{fJ z4RxByMLoa@wS9>?mfDOXE67<$!KVGUtMj%$I*3z|MN<1`1FDtln5Gf4idM{N0^2FFUo6lNnz%ASWmM1{Bf(Q z8Q$Mvukc+r5)ZacPmW%V&z7+5(9ux=a{(Lg|B2Nz8+hU zh2|!>e3<$qEH+=6H4r^plu+1!pqZzFxENd3Nci5>6I9DYxHaX3?vsaQ>1KR2t`t39 z=BZvWmK@zjloYu+@9CbPwO~3A>#WDC8puW23;kD6>5izZpgepHx*vXyuD3FS+buCyWfc%qkesb|4+h66G%x2G6s@eavl6~hI)76IqG$E97ZMwg}ALqmn2)U+QH z3`h6*EOED(4I?ofEYgcCYV#vtBWm_x3O<(jr*)-HSS`j-6^*R3x<39lM~3 zfW32U2@_3;ATDOSO7Ni-JR&!tcHouqy|;34dw|3{Aw-@2&6ntZ;Wu+02;*ROEQgE! z7UlLU4wPDYJ(hN$bZ@^(cBpSbr4ARW%n3%70kZDLs6;OOujDTKM*jYAqsb+;gh#v&OoOsL8 z6s3d$xbg&1;^<`#IjkXfzLM`X_jDGb63z(nbK-IylPUU%);Uy@!!A4O9mdp1RKbgM zD2!(jA5i)y^K4-k#!cX}uUk66_%bpv_v45tM8#N0_}azxPJ9!LdSX6-Hy$A};!jKv zE0VDJy_H>$T4ZdnnILW<*EqUd5hGGgBLkzpw>Utt~Hg7?CX%R|_*)ZBg9v z<{RxBvBuswGpFZp!DM22d2geC1rSlAtMWhD^!h;1yV`BjiF+8Zh#mly2eAl1-=N=5R< zU`{~80;`cNSDF}hl$#^R>Mxs}mG#~G_swhkk*!&uZdmTXhDQ(k92~ntYVaYgU$c2! zbvlft9lq8FR>d2jH+rQaO-Vqv(OrHOFQv%9EU-Bx1Rj5Ru6-ESOZHBY5pStdm57*Q z>ex9)a%Y#LWG%V?4iEVL;FPNj5sb%jhYj8ANoDu@kut*b$T*|Sesjx4yLlrv*_AVR zuDEAIcmsQ_L>*;Xp7*Gx^oDx4_Op>rdDw_+%~(q4=;z6@MG zC^F!m)nLKD868N>)l|^^Ue)AOjHDF@Uq?)-FHwPX$hYv-(#qigPgUklO3}qy6|yR} z-MbK(Q$wr8-PF8sImksd_Si*tvP{k+1^LB(#aiHd?tX~uf_W+)oG=uy`V{!6+rrJL zU;aERE#$EPWa)I1Ea*uPddG5^#x)oQ6OzZp%ay6+^1lVy5R+TYc35({KReQ#<-2M* zTd~%Pg_M{#y$Js_=%|lY6>+>oH>~8g)V&km(2d|NRRCgR7pwzy7E8uYH8&-z_z=D)tQ- z6++_&AwsUTn=`@=wlcop3Si{E%im}Z)$ z5jbecvvHyP($|!|VfouTd-ban95@t6*7PXVHYL<3Q6mkSTS8<2$8a$=C)Rarjv+ta zuE$L$5h4_QybCV^RqhBc^5`&Kj48?Gv|qkb8%EC;+eC*}qCAj&oOToLDt3d<=%b~H z6RS{j+R434v|$^UmrAaO|sD*)n;TanEny(!=f!6h~%`s>dg)#_A2m^-eAzRgy+`Ct;-(s zmpYn!#kL%g+s9q;ExmyXq*YbiQV7f=;wMpbdRS#NcXL^4=sovzQ^m-v0OIO@i$!( z8$`@`fRbr!NX{Dyj02?(Vg+Ntld-$ff#WJF789()b=wSTO0TYE_LIU`^MgpK=tr>b zv#mJQv~pjm*kMRdZLn5jJ%FH0U)oO1(?z_1Sd4>|biU%xDS%S~oOAR}yT4aG-%Hq2 zDRD8di&ISb?Wp8Ru>Of+u#+GF!?LM;d}0{$_|sVV zeVC@umwV>JJscA+LyE^&2Ru>vG)cL54vk`}q$kNohqGWHv?xwNu)I+3!eGVEJ!Z$K z?YRGs{x4Pj|NiOc78B}50DJ@PcK)*i>#U$AhosFq(l*{IgQbEDBk7Zdf$A;yO<9be zu_|w<&ILwLawaA^sayj zm376T2vD1*wGdL%G?1QN8Se&Qs|VEMi)6S*r93U1FNd_%H~_I^&OlKdluEf}=Xv=I z@yrd(u#8n^;h(upfGPO8_<_;g(ec4y&R)hYUf{pdbwrbjXjy6ISIu+xcg+&_2ygi( z0ay!R$;6N>4!)GMVVFu;5ZP~O2Ox3r8F;d{*oaMv(~p9C&O2`S{s--Bd`B*GqmhQU z4p2<(1TxdF;6IYnALt=v&OLe z&dP1%aBQ1kQ2vF+KLwION@JRAk3nbz)T8gW)6y|ifm2~sVkw_`DQoi1;XrSV)8vvH zP$~bq-V4=lcTtvVlm2SC6&YvL7n|Ei@&*4$vK&3F;AVDn4?00)`-d5VsW6ClNi`aVm*3%PpzZkx3%jL4rDJhhp_FCx?5yfV_tR zDA>PPv&p2Y$Sx&7@;5;KkBSp8BspA3alKtPJ20}hD;ZyPp~~RYpfh>9zu z%#-c`TC?(k3&2PQ`E+rA;H?~tLHc5`uG9lxYvTfEgam}>0KpW=*~<_B8d|(Xv)jzv z5_EH4Xx5tpIprP(-Qmu9ceQNqSwgEcr6!D5b2aCF#VOFcL$=d~?@)Dlz`HnHceR7Y-06PJU# zLWDQY<@7(d)BiJn0BZG8$kK}~0TB}-HU2tnY|8Xm6v=kmB((2`Ty@JwfK(`;cqAcz z@8!Bgx1WlVDy=m^HL0k;=e(?duV762+xY8h#%_TN1crcv-z5NU@fOH}w*+{hoaZIo zw%)DVV3Zs9D}Ib@nxrhI+djVD=BzCHx~gy-KmPO+gH#DDTl~SKsrpV&zSVBN9WT`E zOfv;iZHCQ*Pi_l=(OorA2r@!;O;BGrsItWAj7FF6gLwvwKbotKNxIGf>LPld+nhzJ zX#`(6hX^VEnLoWDXUaeBp}q4*!pfSu8>>_PxWMK~GR6t2*4cXeTJ2z>DP6vuL`ii$ zy`hM$#gq0PHWJbP`!Id8hMn;vT|%ioEF-V)yaD^Yqe%Sa&a>^#v++=jAEF?KyT`dP zuqAuBY1MK4|Fpg9bDY<8?)g8QsrfwHvSLfJB#V+Q$yVe!APAALc!3E>j$_GA189&f zHqdZ4KoXMWs^ernBvo^gZ<*A5VQl$!oY={U?{=Ij5&pmC_w2XVdR8-MoSHd_oeKeU zzwh2_ue~nMx?yP)cI3Q`bgC(^lRGeB@K$5-a2NpPyI4oRwezO2jt7tv-Rk=i5`0eKFNUzDL&lyj>g) zqGhrFCFqgJq2@w29D6TP2@A0@BD3*DR?qei%g;j3YBRoQLSHVELg4XaO-UT5pWn5i zEI`u_^$3pJQ}n#SMPtr`cf;8UcZ_E${_98yO>>j--PpB3K>juy&5XNDE_|)sh+RI_ zzT!{rj&G0dBB*O$m}`5rfB?U~C@3rqG85CjR0xQYyDTw-qpGzJag|~g0_K#Ybnk^? z@$`N=W`v*#N-SMCuT!o)+hJF6LR0L%)krpBIJa_8^Vy1C07;%nl=%Q|j*n@}Bn%+| zV8!D9(d3tjKRvl^hQkR|lC9S%;}3_#Ul|>pX)H?oLaEg!DhZ8^Vb`j9-!}#k7YFlp zd)y!E8AkCK00`w6P7+5N9Wrnn9p=v~O{|Dfg<6wTidUZ^y!=*BvY;cj5)lI@4hJOl z6Mvu!je@*lB_G6@f>Ndk14wa@93a510w;HR-=~Rppk8S!({;F*({TwfDy}MdE0Z%8 z{{C0qYfpGxYO~~dI_UfO6NaO-ZfSH3|Db(@H5ij2F*l?t=g34Gp+3Q%LQ9>12EApT z@5Zio`6)h0RqbzTnl+_uPO}DbbqotD>mYeW$JgeIgMtc7j@|)N)`orXVtgR5cv&wE zPJ$ocOs9vkdj|F6i((K3iBkYGoFkZJi_ylVpQEbT5l!=7?xKi(B&O}PFSIx8{FTx?RoB=^QBo*I zXK(o9v%EJR}R z#GCr?wyIA%JG&{pvWm|ZXL+R&j#77b;snnh;D0E`HN{T{|M)-We-X=5?nm}mXIsLQ zRw53nGh=%}u-X1T1~{MVAB&atJg?JIU@G0!?_IuH&CbP_75<@A58K=Xk#X;yKmwL) z-MWUO#qHUoG|}4}^6d)C@Attn#iUWZ>C^FL+{AQJE-LH0-j@BbQjb$Tae25C_fy@= zrK@!fRDsE;u}+m|WPat3+4;Jq>7T_n4uqh?&145);A#=0)S`W6!VJsHaRMe5^K@?9w3 z#+UZTd#_CrvEtzq21(2OFiG~ulIR)ZP>>E`7*|g)9nwMP!kx))1)y)UWJcB=A6i9H z@!|*gBRF7vPz$smD0QG5A}X_H9}guB1v^*%M(%+di#~MM5=x(nq0$9=yibM>FoA%7 z6Z=MUjuBpMZHcVn(LZq1;(wSP@8AW=EJ0U%=T6t)9p$?H$j6|tv^Ev|xn1WjGb>94 zCr5i{X7!F(b{{j?TPSwC4Z{kVu$uyf`_CaIjE0w;;EuG})p&+bzPROz#RDLlEQC)> zUjaGLbVu<4E0!>qeX>^%0t~TMDrw#uUo@i#PE)#7(1n((HoP8^o&NnG$!W`EQ&bGw zN%KigLGow6OY@XH*oa)jI&Ny8{Iou?L1vLpAgVvS-yJdsf-o08-JXjj3QxWef8gJr zi9aZZJmrBJ?;Ay27tZ0_U8PZ2r9uD%Y&MWrb>3&<6kt$7j%i$IcaH$lLmP5wu~_=% zQ9E|3EyAK!xd;a3oa+73BXM@}^5Ty55Ql$pjpX9VUgA4iVQGuZ@)wCZ;Gj^iYepr! zf)JI8jpb>#d!<7Zb>~a8+k||pea;lna!O4QfVqqirBuWz$hrwW0d9z|Dq(rW7?)UG zX?01MX?{|8hgNd4yc)WrKaFQ$lgR7y4U-l<%|PgyOP;ZyP7f^~SuO!-op8HKkct`u$h~)huh)cG z2l;jU8Ap=@$zR2Wbvd#h;^kI6#SQgt4^#V-%m1|aM=;JB{Y-;#E>*ZwKKa)&c;BP^ z-q-k8m&gq$d%!-0Em#%oS*|nX4e>sNLSh$}+BF}QyfQ1L?cS=U^te%fh|*TCR~&I7 zUJ|YU6=Y1K3~@G;&G{qD|HU7!t`8M~%5@_vCjF^ROE4v--cP~&kfPlm#i?i_9+pzk zM~pFm6^~{$xT5rTmEZus2M6Fa@-y{meiHUHK8=5tZ*p2xh;n|%rA0XDw!IIrYE{e? zm?&1^>x4;a;`5GL50yvuDW)aRv^YCbVm)OXejt{po2Jnh#eexc=}re0aEs;jHsaVF zkF-){VNrSRbD$|rZ=deN2R$t&T6b;AYHtf|DPFALK}cII>WANFOs!S%?K2vyG3}6> zWu8(%5$YRhf={(+pD@}s%j>pi?p6d5mo!0p9W?fmH$)W@u!j>|m8Y{FyAxrp2WgC{ ziY!xawoBqsi#_^zDVpYaBDX$i3(=%E^9CYV0PKu0~N8FSNky}U7 zF_knS3N=s~TxbNIH;f0BQDi9msOD5lz+0XZSWC4z0VEzH5_J?|9x=47Q~P9gOiDO#(Op9Be|DM!z{qz zh6m5!n!%WEy9OZE4&wp6Q%Jmr%G6>VNHP#;Kk7{pO!QsJmK5fk^UV?5=nn3ece zq(S8^K@p1MtC(BXoD^!3Ly(RI8tYcc&WI*hXn|lMMz!vkE}Ae-gMiqJ-iJ&K#^guh zHq*3i4r&GSuDWMwbw$BIw9<#?5cFpNzJekun97V8>5B_VM)_$V{qae>Sm$dJ)^3O_ z-w;gL-~DNPRPN-b&Dg}Jx%miG@A5|LJwJj3XXtD*9EpO%^-y;)e@A2D3jnB$;dr6S z=zr`__KR^Vi+SH~pLolstVgQ=4Q?w-B$Xf9RZBu++C1!J2H3&i^WZ<_d7Vx_6R^p@ z0h$;-8U6)`GW<99T!;9c|GD3CrHxAIwDvWEvt!%mA5bl3PT5lSK=~1Yo~>=iK3_|usbR?tZq^ga&N@_}IZ9s|;`J--z1 z>}zHI7I+Zong6_reegBwk3DRtp#&>lM|=ea4Z7nrzmm-LK+p$s*gN1>YI(`=;7fA6 zx=irX6)7D$T?DBv+V&#%ARESBQ{SDQ-$1TFZkt5Li6-+W>LP)Xt5tR5+c#<1LZ9@)|3q$WVKmILaKK6J;)(vR1SJ6k2VAjT`WtzmN zv19i!>+?PcG?L|(MQ^g>YL)%zv9d=E-~hwiFAPHcU@z)7g{jJzB`SFhCiaQL^jX*h zdXc54=GTc#1n)QHW}p`;9(ml>XaLQ^CN_%*Jl@%Qca zrER~Jk#6X>c;MT#>+;_^hgWfl7?ar@pH_OiH5d0<6)ADJ)Kwm(uD@YBhUBR_cnbx4 zi~&^&4GH(fGsL84<*fOxbaDLo{K3oZ(_St)a448K0ks>h?Hexzv=wsUvL(6_$l}*; zx5wi7m`ioakl=W?s{Uh-r&$_p1r*plT`0|o6%?6Dj!QRneqJ^!={X=qOXb_8tfLZ@d)ZRCkWMizXpxFlh;N_N=|zf$e#)dj zmVPetk~O&@){hICLW28+>*N&*n6q$Bw7aAu3~Go;XN6M4&`PEIkS9RzVmyC8t7}Xw z@fzQN0{{p7nPrI;@r5L)x(o)fd+I=)&wHC{CEx_)vbl;`faHFf5*;t1g19jLP_*Hv zD!x;aX3$+#N}~MLx6oD{rV&D+*SPz&yI4SXip#u9S2^x zq$vBYCDn?{dcEY}?%lD}rWFn0ec~}DJH zu`y%ea@sx>oO!*i$Juk!N&{bA?&(=!jkRsV>wQ>FadtToeA%8T;r;3k9^x|OqM@KZ z6{}(y1x4y1WQ8?N53Ussxwj?D_mot>*vR>VkF^{sBjkpbT7G@gJaCNz_r+AD;V}2u zFQF$td>fAh3Tj0puHPm*gjmEVeR!px_`vEWkJ;jbSc6jhwBi_R~4G~ zEk;mOzTEWy+cJ5XTm!myI6ee~;AO<`$Nr8_>K=Z?c>2o!Eac_uc@WMz9Rl1h{%qa(QX8hZ-)gfVgS?ipO|4RAO2 z3#!FzI9VgKr9C#YonW5rDOcT#g7x3E3DM2Mxi7T!zm&g`|3;ZTbY%Z>YF+sHl@-@9a{6jeRmHd1y2i z0UwNYDgDMLBS4M7#7ahjqLY~SE73es=;;qxR5o_KBty_0p-~({rAkzbN5hP$Zps`P zc)k{rlbLn88$?u3#iS@ZspDl8!4q@EXTiG8zZEaqh!u!B$t^*2ndEWmD5s#6xHJsf0t&{szCRxAxh{J@-*Lx@p2f8xvFy1kEWtfqVMnhPjH-k3l)OK z>kcr%tE}mkS&@2Dj*%M4q89 zum)pYjqk|>d7-Qk8!hQ^wZ7o|!Bg$YIBb$C<FB$Wi)&8KhGb; z+p!_)0V^x@s7xCq>*9C+#fsq?Tbj=nGZ)nFS7bxY%4$P+Ik5aLwTPlFX?akOGAY)T zq~nl<5?fSLbD^oBcAjWY1>G^Fjx0lU_lC3_&Obgv9pKI24kj5PNXEA;4{l)YVpY`o z7)h>q>dPmw%)M_mBG-qsq_|vQEm)R5?mhT|-1Rs|j2w+ci^X8H+>dYe+Q!_}V@?cO zhgwHPa^k)w!WXq_{}ksY*3JOFZhol(B3IK3qmUf4tT@XcGKir^a(YJlG;1hWk;o}Q z=I3K@9{d(*%fo=6-jsDGkR zWxV>0o9(JUOgSU_p!Oi~XepO@F`|c3aBrIiIc1-UTm~1(bXu9F_IP_D8!4jTJbZ(= zKGvFyasQ@iV*2a-t)ye86!cky!?XK z$>6D(spP9Q!P$NUPNdVQhUQw|_Ft9F`_s5Aq*UV_gaml3J+}7$D&DGe)4x~5-J^f3 zgJ6~8MFK^<64k!aXyL{7e7yg9Z8pCvgjeb=m;zp!q}|AMtZs=8k2buj|$QoOq z@l+9HJ%)2OwR9CGBt6|-$v7sZV(v{6%R#%9;^FYRi|va5#ida8WnM)jNNrmS2Or-q za5PoC0&9FOs&hU#@ZD8YP7GiFf}RANNqVPW9JgI_CBh5=7t06Ol5!4`pFbd#sn7s~ zZ;vA~)m42N6W-z(J#te=!_kpSBJi~49msx2ouPYin7Gnfd)-n)HQvKK)i^sLuPNQ0 zZ12Ap?^W9aMrsG^ke6-o6~16b7oh?vP_z{PY7F$AQO|^H2}WEMr!(qkQt%h*~23%qJ@#0six{ zl*JXFMc6tbdUd67vZ^)W?tZ*I?zdy(!1nP3>6yHKy#-a7&9G3*yArNb8>JGDCsIsm zh=qVMC2so4b+BETsQwkTDx}SoXR3Dq&~zRsqmyD~SKH)H>KOpjjo8h5?Xmbgm7Xa_ zxRx2-Ia9)GJV=fGQmwp(q(ZfLNxhaIAzpYvgwk$#VhWlZCxjkkY65vn( zMnFqkVAvk5r>?Jl7Qrx2{O%3*7P%;Zec1`#b|z6IDl@AjV5GcrBJkE3aULVMv9y67kk_QnCDz2eO4sb^1A@fopj^QFodLRHtkk z;dcb881MXQ`<&_C+eTswR;>eKD%gNUJDI_ye(DyBJSaAgLh-@FM6F(lQ&zr7_DXpBz4sgc8=oU1KmAAf z%@=&We2ZXte31A_2?Y+_Oe(X(yo&kd`fp)O-}0OwjiOZm)2kn*sMVZTbVvt<(??TB zKFU6%8pJ~by>C-0HcDoioJS3g?2eIAr2PnkTV!QG!_096%r3ya?MtiHaM3Q|t zTz+#2`Fry{<)p?p-fnNjNux(+DBAKe!3Mm~Ck?)<2rEL(k7R&&S2n4!2r;J@&}S#b zXKh736Pwl9zGOj!ovuDhR%KHKli*-3T0BL?W*xM3ze`!>^m$A!hvT@MDlhWVWt|U_ z(EG(ICE{qV8T!N-y~f&;tjvt4ueWEsR!)9G{*-sJ-M0MKI?x^`ckrCoADKl}DzL~0I)11|2*Ug5?Nd>eh{q+T1 z7jKsjU=aHYs!Zp1Dc1xH6gUDpA)=~&?6aCc8iHsFFi}|;KZ-kvF!==-q&n;(PS<+# z2U$1M(#ex_UTH{W>cl~+5bt)rXnsm=(_Sh~1@08QEmYDx^Za|yKgHS(uAC6-#h{@1rITSFqx%0U3Ur~x`xf;L@PlOc_PY-E2u3d z-)bovoM`Aoc^>~9TRwmAOcl}C3upD9B>sOCpCu1#RP+oIO2JtBG79t7u~G|6g9YGJ z1vB5oh>5ck(pe{8u4%+4rfNFKLtu`JTAV{r1X!s^vSWgAFiLO}EqiE5E+8U)!!_%* zJGCR_U6~A+U}i}!%u!~0Ddw9$NMrum;)K|s=Hls{MVHZHh-hx{^e^&)l-?TJu!4)! zxjd973%M>W#CQc!NwIkr&PKT`Zm$U~Nbz&*5Tp)UTCu^+Wz`9-`!Q0FZ8AAO$r zI$>QDJ+aKRhw|3eY7-+!tB_twwV1Ay%H8S_i=d4%^cZfA(G=8_4at}M<5UH~vBZb} zHwo>g*ylMc(8l~LSLBEBP2NOJO40d?p|-6=P)IlI@Gg1Z4ghE8@JAe4T6Cb=>BA6j zBWw9qvXyRUX(&PfWPTWz3kaJ|I40gcfdXQK;wcl4^W}bm;95fN(f{!{4*>&lPV3+_ zKc8q{jgJBY^~J`|LNq6LCxwV)-yN3Z+wNo4=Dha^7;|`)O|G=f?XVrMw6!2>#|vtb z2bE2b;^M?`$l$_o;>$LD=&LpKGrMhY0iOI9py?9oeW9qLO7Q zaMmX%BSx@@;>ym&kn>+8`VBIoOUXUCc{<#3APcon*rz|ZS8^)2@%W@96b1k4EMubt z#=j3T6c_1)P8;#3KE-R1m;-OGIDURVL_j0XRcL%B*!h7Rt+liHtsQUI3@j3QKkxgOY zF?pX=WgO9BzwM?x$3;EOjAl9NLYaasR6wcRVc(Hmnf99H=_A1iS%Ga)uQl0aO{1&l zBoF)cab%!jSbvK=F*Q;H2@JWS7;UdOH^o%aLg_06H*<5R%36t$(mdJ=#Ip|wNY){E_;SI!+EoGld)C-!Xn z%9sDMPpqR<{*n6iMPYdgvD958W3bo>3Dbcu6wV&i04#!DZVj0=RK$8mQLgFJ5Eeb@ zz2b^tLF0Bxt_vnf=FS*%65u(=e~%yB%8kU__uuy(UgMAH_g-%~zUkFgI2B9T-YZAp zlqpygSh#R8yiuYIgrSehU0Jp;Nb;(6?S{9EX(r!uQ7`2(DQ^{fU+=mz<>OX+GdKej zruQ6(yg%d$jAf~zgc(BcI(E5V zK&xu%vf_^6+p7?uj=kuiMOm2s&W=*6ie-ft*xotKJ2=D2tvc(3a6pmi>fIu~@$-hs zr@<(AKI-Hhe(b39SbY>hk&UNu$}!n5iKH8aBgjM3CL}t6aD~JnK(UYHxliPp3}PDA4MkV=;NabI3V{dlD4sbCezW}x*33? z=8KTxH&4(SPZPERg76Cj$Wz!6%zC4}9;8uDj+2A%#SV{^!x6kpEHkYRWpms*J1**l zyn2>h$|jUXEj+VWPN)}+TJZa*nk}6odeGfO#GA&ZurL5%z|Qc#zwwRsjc-`L^{Ne) zP`<&4PW8wu4cY);Pf6$(53FiJfpa)82@xZPt%0xcg2kdFPmSSTiN5g$W#w+hJWndnU^LW8?q)GM0@TL#!=Ilk3Y0#1_6$+;)VsK^CleQwJtu73*?u zW(iZYr99Q$a$Bdmz~(Z3c(-EY;~uFP?G^nmMka~EFUMBhr%Am_Z`F_UOR?SS?M4Ai zdk%E$zIzV}a0NBDYPV$M5_j33RmN258iQ0KF1wrMoBgdj|pN=4UgsDD@L48dqr z+r|bix9wQ7IIG`!-@Va%cLGq2IS`kaP`dFbDt6Qf8@&>H8Y=(I<@c1|R+9e?fj)5~_NX4Y85ry#p?jveEY=d|=yM$Ukmw*InN<*y7K-t>hkw`3|HXfu;|Rdu@mD3a z=e-^3sj3Aa-))w**7<|`?J(ILF`O5JY+D_ZkB*r4vhkk@L?k;iC)RPjr-jP=-22eS zLJ^lrb*eDkRo>tia1CV-A&h)gNY`Y@!-~kF>QMUF9E3`%nXm-wWayh6dWrw-**z2J z^_I6Pf6Fj4S{Ztvp*9f|$Od=A|t*9CshxzVLD87vK2mv{aA9Q;! z#e>B6xSL5GWSDZF6dp9HSx1#Jm;Ka39tfg*P0Loxr_EADB9l5v`p?l2jG3MVv0U{U&uIF+!m!pF~7*W zCpu6AQ8%SxyrIznP~N=cLa}6-u}jS_rh|E;F|t_joPD_db(ijm?L4s;>?iMSK7L-8qIpdxQ-=Y=8jHE z$*6#ATwXs8n@6%dg$Yy!0QKK{y$wjUuQ-w!ej=n)Jni1z9s9?pb*#~eGb+fAGpFXX zy_0lA5(gc5qZ-YdT*~^O0UEg!P~I!S{^O&wgZSQ%h452ff)`!aBG@tBMNZ!(Q;0Ed zUD6f@@d^?w?=QUqc7k1KZTRhm?*Emy)s17QOvE+TfhC}PBfnxW_W4zOf&t2_a1xZk zRWV5c4ZEl|5~SHDzl^QXhij4t1VgNN)_pN%DhOK)A(fsKIjH{EP$r)x|L-&bB${iM znS5(_4Qepy17IZTOc0-UN{j6i#Z!{0nIi#df~i$IO5z>q?5jTX$%w#oUL55|_>sK> zgo3@n-=A$y#Ca^!@(g-N2-ZGOa5Aav<|L&`3~n}JD^89qc}T8=AX4GifiF$o-h8p>De=w6!WgmXB^Za>H4s9HTK_9L<%cxQoGY&sx5JsqipoQ8>=c?+<)I$RV_ zmYZ_yr;tkOR0yab7P9D3_B8!Y(n0xy_KN>FO!cl11{$gZ>x5QGzT-AW2HXz6K@?AvlgFkCWtfPofFh{LMzSnC87DHa+6FRu*-TM$KlsdNs~T zyDH2}+}&l115P?MFO7~B)14K>0Y<>ci$%?jX!Zcd7Q*fKN<|yN%6p4<%fHT94s@XQ zt;7#KK879Q!GbUm6zSE{!I?6IqpaWj_|xs<=C3LB*_^sx(;ogXo-{OT*(=^*%;hn^ zz0q!74MyDdC?@UB}O=P zOViK5a@$KEL4I0>Y9CUinb}5|mv9`4gN)+ZkI|?2${pxKKaS^$so=bL0{b( z7hH!Blx;j(CdLQVp2GcJBsfw@po29;C_Z^cU1<&L(*1>dJ1MBZfhysDR<6)C#!PW$ zxqV2z5IUPH=M9$_3L*uL6t0Y}>7)rB7H$<->G;zHdQ0169PJPJS{&^ z(eOfiedM*XyrQw#9Cb7PM6b3uvXR&uZUoPNv5x;mLx)*1;P5CIR)gan=Zq0?JAY75 z$&xs(U=v)E7Z>HMo`t3B6Z#2KD#KdJ&m2cPQY0J(L>!qix7{3%6uS@qAy&W-zxz{* zDJnF0>mH%%`0fnl>Nc`6nm*+>G39TFYxq}(8vX?cS@dYkoi8XH^Hu&K_#05OXM_hs z(026&U*2sbeIh2po8xPpq+CJ2`rZWVRN*0WNH5MhO)v9FYTtcSH~lECI?k`(3cj*m z;P;NU8)C>VG>G# zN(V^{ufbyQdVA)TkMZb~DQew5H7S-YUE*Aa*%p4n{tU4i1OWoxy8~(#AC_V0)Bs_^ zg?7R*ED1^NdiGgAb{B&%Z<`wI(Pb|-3VdxxL{nY<)*dYQ5FiF97^=*BxQ2}4oZLn)G@ zb1F@;t3gKwF_t?GqKS@L<3A-FE6jUqi_p?aQ^W zuo~Z|#UyL@fzT?&6ECFGe(fjhLeK%x4bnh}?ySUmap`^`Z{{DphRt>@o-RL$^Rl{D zhnYcJCse_FV0;X}lfGuKmeSJVDM(~(B7W?BaVmBKz0!8$@guxHZ}&}4DObtkoiQxTWke-S5kVP9le?y`xhI8|Iw2E`oOkk z)F`x*vfsPwP5ub9HxMCoMQgTN*pKVl0P>6rzC<+jQ&nH?7tVRUjR zs8tqkY)UA=>%Q~cW!8GvE>z#84{-=lS|DF#*DG-prr}J=y@RoJLE=8l_B0?oe(E`D zY31oOXQ}Rv5d}pqsvXQ~H*dKD^K5_GHGHw*ZxRL~WiWP0W>ZUxofbQ#fdtKm5LZKr ziZ3eHdZOV%T!}cTt+sa4gjn_q%$ttYxN^f{cu3xBDk7jv2J&Rnq9@|vmsU`%*OXcl ze|qximYrpKn+NjK;grQO7U;aB%Tm3jGDJ!Cc*QmzU;G;vc*ECv z(Yq+b8+%e^tq|fe(bC-?M<`&sn@Skiz4|C_6xm$p?<86$LV9LQi{@|fE(N+Mq@5J$ zDYOg`SVM<+dcAL^x7-_os2P0mD8R&_0eP|JJRFa^UN>@tZkoX@x)Y_q_z>}|&~*Iy z{r2qRkNe6T4Bgs{h0sEQRo=;6qF5-t2cfS@ z6#^X-^pnZb!vG5MlisXSFv5tjZ8RogD)u2LVcTEsUB5EctCj=2$}5uXp;leCvZX|E z;VR}p&`^Owe{0l3N9w#1Z&d@4U-$#3M?)>5qt<9@3fe>tuWd&9dhH;fU|cGaK1gs> z(q+&AM2CvUL3^uS=$c0;2yOO}WxnmhZ~pF^^?zIaLI3tTghkaN&27bWVrOzLvtuo1 zUOP+NA^PT%#AWAu^ttrCKztYjeXU*cJB8C3^_8_=DTDbrTY-HvR-IWv{yv5cMP2^+ zV5-oWuNi+=FvzRJbMHQIIxaN1BzbgldT4Zlt9Z!K$)4w+7V(E?m;b?s8vf%A_6~;e zR@4mgd8HdA@ZdYar@cHcXKsAaTzWPIyFtd7e`4yrX}Pa7uDuckpfniKTjK&9CWN~~ z@!qu=qWsX+I#P&_A{|T(r`=*iM6FS2ZO19z_1Yw_&S~oGt2H$)4WCENeBg+;M&ZO;1VD3otd>V911{)Lgx7P;H~|YAx?$2 ze{QfsDU^wFaJ*)DL1}r^86ha7)bn4&fgNB56$vj1OmhjiJw6~{ z9h)n@-1i=TI~A~8djUQC*7>*Ehu?}1%YrFlV2{XJ_lM(_pq+z8V5%g1nTWh+VBh%c zPP-K3rtI}zkcY$Xsj%@J5Yl#ejc%zHQWNN+(HA6I_M>GU8T zi%2Tu?Y&gvlT_9yPd@fcam=(F7W-Ah-}tLW2)!H+?Xw-ywI)5(<|W%Y$O9yHLe;{g zToSOUm3f%pI@q!#mc^sbAG||taB!>|RUs|W_CdDZ%^Z`)pMUk$_G+vWDSSIET@rWB z(qnn}1G*E%&Y=O@I$Xr6*w^8k>iF+~f{x0eT~~QUZQME9T@Y(}WPp*H zN%<~!Gu6x7m6KS^%3+p;NpOw3m*dNXG0xJ3ey=@Y{dmU znf$f9XOh7F;|OKb&dLM&cy(okg$0JFzyh2r$#5{DecB!4!@QX+5{*;*NbbJ*{X%;o z8-WNYzZQF!Kb9TrWa_?PV@Qg4m=Gd~;pLLQE5YDLY__6#!N@he&t*^uNgx&zPkmL= zy2fubYJuZhvm;=CI=b}y;b2es4tX zdML4sS5J7g4`E8Z(OEUg)-XQiH6*w{IO)YDT@c_w4kB0LIad{|QzPZCp7upU@Lc2Fm$w;|QK44eW8#f(+XNjSo&b z&|iF7y8BZ}5&ZE26W+#T1Gi$O>Eql~f+P)si?HSxrfD04a}@Ysa0<}usJv>Za7 z-nia2V*8*=MF?&6Ui5yWmU=7cb`ON04^^pD9;tP~AvobK6~Y0h?#Egq+=p7L6 zqpZ635Fdgc7b$oUdTOj(ZMMB-0a|PjiyPB0Q7&bFxSyRC^P5-<#tJD2K?!QOx`7Z! z@F#!L{v_6bVxA`jl@5i8AK$}kVac`dlq8B%7ioaxh%4izYS?O&Na4()<8A|(r4CZs zAJ%8A0bAu({!IgXaTDV>2>KG$hj59Q_i}xc1jF@oB|F~P;^o)4DnN9CaA@V^XCvp; z6*wW(I)O z7W0Kp0&x`5N!dcAEKa8U85 zqS&2VgKJ3J`Bc2E5AR;FBGx*%7DRhvxQf9UY#SCd*|%`GnKGL4><#XL1z=Rm=!o5EH03_w3C}rYzs!!pJ|%zA`kZ$e;Vx({^F4aU%#&x z*niC5V)e3#3wqVZ2c&o;OQuHHLC@9Mi2P)Ko6hsW3MA3sIC;-9o9Z;qjEjS=03uYNvY}t$wHv|N*Y=dTbjG%*T~p;Ar^R#1=S8;aM_~7H-yWbjB{)WJ4aNTdJFl8gLPOQjrVjRR);2fK9)@yR+TjE?Wb+qDY1;m>QS|^0N#(? zm96*d@v(4W)i)k4j>mU}?d`LQbZ78{k7Gr+o<+TlkKXCXN)T$e276;GSYlI@YhaHm z*6;UA`}7>-;#aZiK~NF{}Zeav|8q-v!FCkPTN+Qhs@9Em9GYPuOVNI#g^rPS!> zz00RS)4Zth6vMP<566DgEQ`E@w3@(TW~vKCR{6y==2dzt?=GjfMoVEog{}qY$l$X} zRS&B&gukjkZfsoL>|^BKkIvX>c}*GVgyYZUNEI=^68*3 zpt1#{x(44)oeW9728>%q?;I!}k1fW2)tfat&&ZN^DP)3rna&Cv+rb*_wq#BKyV>b9 zFOe#{q^yIx7VWO7yg*mo@fonDy)1$rw^FN4cx9H4RIGgTjvHz#8v=2VE6~^*d~V-% z`P?eULy5-*N)ML_pASGg3H?ixfl`4Ol{u?fk(L z?eR=%djBgloARH4^Aj-HW_Ff=jPnO_h|(}s$GDT#N)?O%R^w-3V2msn{>Mm%TG0u% zE@0w;+z@hY3eU)r;o`Sm~|7_=fasKD$e{KpZ zg>(o)d8uTGbQED#KM}4#bvPqHS#R8!d^Y_lux<=+44L^Sd2-SWzwiYlG2#}D71Gvv zwf6jiaTm1Vj}3pEse5rRNn?`+OQ)L)goietWqR=t75h7{Ye(1M*6oj`3wQo+IdSnW zS!1G3tYh;{kJS%r|cuon@2ekj-=+!H@z+jJA%(oosAHG`=L9ZupK@CtDdIlM;q_oomxD6KtLJMBEyg80rrbiN*%a*uhmK zsHY{VW!=kRk^>MCYFc&+UUBpl21fe@`zopLAjk`Fl}XhME0oO?e*#b5334lPQzP62 z#!&MnA0lZ|I@fK1=;0uBU?v{-?8+ENWtYu07B-iq@7&Eb@1E_)+Yx=+(6Nz0talV36lbF+ z_e#19KtO`w9~t7e|6&>=VDX6Ruosx=}+wo z3$Sr@wg)=Rb}&E(b!zkH7*-Q1(_7&X1pH-v-~uW27mI z|F(-eH#PLLg=jijEJKWywCH3uv7#P0UL@|kBxWshtyWQimAkspu6lo0TDk)Ll ziul?-B~4AWPvsKO-a=8)BJhccaxHQTc#o-`Nete&WM+^4)+G$cXg!4QBXdqsV$M5% zO2gis_HjkiT5x(dP{rTCuAXNf8pSLv?^zO4h~nA~l|hs#>mZc2}s zA&0~6xL+`~+^V0LzU!cslJH4?`34u_3CKX0S2#6Bx$or~auvzgT}SUk>15#Cspi4@ zcG|U)V@UK&q0^erf6ToqFDKh$e&TZ*BlcycAO@0fFbfn}jsqZFsQ7Zrjy&;4Qf=B7u9p2M z(IH2cTPSbuw6z@{)3z`c ztes$|jhpWEu7o_@)Y!nWvTReqgV!|*DdLN)x~LU7YA4t*s1$q`9im18s*EiaYc=Z+;=<*Zevx@9j(x4-_=lm*Y zajA{V=q+*w5UX)%d&OY<@Q^V?rQ^|;+IUn7wp3?vTI`43HlEj*%p8hbWeij^MXvzl zj{E5Dshk|oyL6w8UKib^jMd0WKaD-0Gm9YGLG7iUrr1IjZ?V}`xSzIxXnyD@16C+v zAs)MGYHyM50`-E>gHrcQgtg3Ww0Q&P>TSU~=MN+Wvnd9`ni?k~LOpx5@(74~Z2(J$ z2y(*jAvNQHlXq}zkIN}cnR_(Ji$nbLr0tfW;|L>?<_qUM(f%NtWqhSQ9UDjZ&f*<3 zdq9(a$LGkVj-{q*bOmJDEsn3XjLQ&*ivv>`!@I$iH2Edl**}3crzRj$b2st%YisR~ z;t4WIbMlkaW>3!Wd8}ct)Kobx{b44%OYJH}>NGED$13=hd5>-s5>ZDvMdQ`>Uc6kb z2@4aU8TZPQghSd^LbRTc#a8_!qz}(757;4 z3x#@EabY_|-cIV88L56jrgxT9N+BmlrMPn2M3ifwTaHFS9($__BvzUmg08p=@pv*c z*=*^j_oOE@(wZ^@Fug`Gh^d;b9|oPl&QKlkQ|I%{umo?9KXfm+d~6$K?mLSJRf5U`3;wv#E_>&f5tzH9lJ~h_w!q$P z!VUh+VQ-UL&Kb2&I+`nHu_rnKytobB+oCHGK$=V>fIT>Y)&lo2Ruqs58gTp7QPqrb z2;2%H&wxJ%VaV~UkQyzFF~@4aus6q+*AhoXtgO9eVK{UJUKFeo(Z;J|6v>ZPYwAg7JCK*9V^*(uzc#6-F zBbGr_IHo&`!CX=RHfLl8NYccsAAUe@>G-ut|w8X)&aUphj0g1B}P6A9BA4RUsN0Z!lVZjxC=U}i&F;iqeXD)Ww*ibq>Z8v{Gf@%!hBO2Y7Te-b zB?&ox=B&n*u3#T~ix_%DqmH;){vFmZl>>{5t3XV^28` zSz&dlU5ZaCntikEkycd5!}vvqrB?ms(H^NQAaD9@ke?c-mC_3?d{SM_8Bl7sidi1n zgUS8)epsjR(q(#paaY=mPJ(o#vm`Vs@mWRzk5pA(gtj~z+VsRXl@{bz0BYj?7Z039 zDSL22e43_mFZlrRygB>ofKaA^{d~Lh&p~Y*w%s&B6bV`b$=CnK^`<1v|JYN-fN15FEC+%t9H4;0f)7d=^BAyS^AX!R?Yofjj zg0B3DbqPYV(w1JZSCsm8%bQM7TJp6-DhjPmGE(W_a=>p==k;LRUXb2|M(;QZ)Kq&Xc=VFhMOZ4;j z1#t)(pCj)~lpQgLfed+z>e^Z4uTc07na06T<-scB5f2}Hh$-+}uWF>Z?RrLhUgb)T z@xA?Cdo~C}W$%Z-jR#@wqdwDMVe!B111$V^L}|rS5CuZc{P!&xPVsC_MtnSOG07!L zFL}&_7~(hObZ9+~S*zJ>exO3MB^rS*P7-izO9-1YlrT{Jy_*CdN^JCtZO6QeL8nzC z@Zy-Y>iF(9JcBhHn^1y8n&?WmMP;YpKyFa4Xa<2R`=BcS2K6filHbrk0?W#+c9IYg zY7)6>@@%LebuWo|{@}g#UaYYXNP0PQyHuoi;WMTRxLRqH35JL*yu9mi92ZeVii-yY zP>FdDAO0UJlfk#!T6_u}k_*D}bD5xod_}PZ%BHBne1qkc-BqAC!f6Yd9eL4LxLmQi znL;<1hCxTAK*~^bb4TSd*SzK1E3n2osI_ktVo&j0@WSjZ)wvOyMod`-Y^=45@k;L8 z?K9n)RBXVNFZ-{kW>!>vcH)JrjruTmNtQJukwy|m4s}DZSB8T3P>WCgNc9lkF3``L z_{PbfJU&okKu;Aj z`BKqs5r7zdQrg~;A0!K=;2Pk76`~-WA-N8Vd^+`^E&BAJ{=nc7nE{JbN6h{Wt5q6% zK*MUf%ixGwD&7LOAbqmV_#^^N_bWe!jAv!l^zf*UdYhWmF&TCK9B!_}Ko3~P>S$Vt zMl?aRBt=?{YOD)EXH|XKaHh#} zFbOFZ5bj^x?JZd3uuN!Fycp`qQAjfm?Dw=-{1>R=i{ssQ0RwHk%5x9;q}Nz0rU17e zQ!Mr-1pK%0SLo1Jkp=~jIP+_2Itk;-iILEQr>S zOx;alzlam<iAEJR?vYH*h8A9#Fu2wyg-bu_cM=J9`KuS7QGnGg0BZ? zB*bl6YsMIn!?LK;VcuH_dlH`E-W}kp#&cycyzes+qN>cQIK&<3f2)=ot#@0c)Uwd! zTL=jR@KY%1mz++J8D@OJ^d>coPZ#LpGI);~Y9%M7QVQz8zL|GBSl1@|)Dw$P7yR55l`a8o65dwEj_J zM!p{}CJV}6)bRQ`I|zoLfk1pz7I0~8%OyNc#N^2*+b7H5r)|L$^*BI{M*G!ef1-Un zcEkmwfGVhh9x5`2P%JejK9k)ciOa+}z_hxR%bg~xE18ilA(^YNZkV4VY_(~0_f)aCJtPUAAkMffAa^9m*HIh29`olm=9z@off19 z3xV~b{9`}7vtF8)e3=i&LmV%UMEZR#4v-GK&p!J}{wIEM{@^agF?~K`wRl0RF@89? z!^rV!8dtyMKdxAqX0`1t3mh*$R;$M)M&95?zZf)rR#k+|=fz}phwucRaO&{&+ zCzD8B$vK}`8xHnH{F_rK$t=CKlFcdf`4Ud>R=_(ZmM)UUo+2k-c2r~`iAAyK^9L%b z%_ZQ(el1pv&VQ^u<~L|YtG$Nv2VW!oN$f?RN;BsrOfFOrJXQ!NQMh-fNY2AvAT*j9 z`nlfK8u2s*+RYCjGXoswH&DJlo+?D>Rt+Sy;C+N0u+z3os^tJKxHER#+q}1#@5=Gw zj9cDEH)F+{t$~M;e3MjZN^F|JF7Y71Zx!VQ_dH~}#D6oh5d2bKnjmHm45sIcPxC=b z@9`n?V1DCoGN3>$JWP0m6wmU{#}h1)Lc2}*)fA^bf|Cd)p{rc1K%Lhrdo%V-R(dR7 zFiHkiH65UKe}sKVJd7vlzSmsF4ZQly3(eBQa%&J7>s!j0FL| z)wV^ztVGzlK@xX_MV6p0PB9}54cPT?W`-#*x2;%y*Jv4nc&)ah!v~S>izN{EtY#T7 z>0ui8eNxpbzCLuYgw=u*cU#t{cds&G_B)iirHI2BU6gTISiM|URr#3}vE9I77FkXM zQET}%R9Wy8M<8zDTcjhy4>{UFD*2?u6o$f77vzGP>#M$iSYQBA%mFgj6XmKSr>?*a zC4UAZMb)Y+!57u7sWisda-sg7-QDuILwFhR$DJZ$P6+Cf6(5H@ z%ZBicvaOZWn<#pZM5t3A*Kx^pN3G z(PwhQ+OGFPJu7HpUQ|V}z7y9B^XldJ_YT4}-r39N%L(*pyVHsH$4#Pq8;3YWGedqX z#^i|2@`4g(h?k&JNM#U=dOiNw-(Jx6v_ig0jb5>PfEKg1eZVlwkHR++#S`&vQ*mUD-dtz= z*7k^=CO>!q@6+7dKCXLO`52!uQS z{`)=#q(uk8SB~KwrSI$!#SOB#KMNx2BI;s^K=yUC3M%cIyL%&5RADim4?NyLf z7yU1B6G1}L$HcU)6aNl~cvbi MF*7z7qtJB^hP*A6O^p8?$MOZLqYF7GhHZY)Mo z`~vkcimbR>UsO{Knw|z*bsgjLG<|)Z2wKrsJwwg(KWLTFvXh zfzp(|Y3aJaoasj-0pd05$Ou8D+0bz|!9hW%z*K@b>f6Vk<9smvhV1hAdoLV6Nh?Xk zqsgHJ>_&_qFop>TT?7yfe)jIf(O&ped(MnV&9F2kxJi&+#ZYN)tX*qcCi74&Fezve zhB8hb5t{LSv8#uI2+jljm5Qi`>+-?Z@TcRABQVOWjEH#KKL#PVY_ndh5jUgz{bc~M zg4hWQk;?I%4}a72|AKJpSpi4ZZ75P>_=fz*g8Ks<5G$z}75n=g=dMNh^wv=BnXKdB z^X+yo{`rP=CH6AM5<7kPe>L}b8UVyk#orXCe%c3fgs;rWK*acfqLjbPP4V27Qv0hq zHt%s?BNyoF8$<*5M|TA8;e)~CF9diB_7UB+JzTm zotT<+eu=_fL#x$dG8}{JfD6P|l)tbUKd`bz83sXGg;M{0Gu8sfHHRGH81ac%&)}|E zI|+?ZpaMx2FOk#s6F=al%XIq&D9=fOYI z8IV0wE$ZIbH9n0i+YO@v@G8$+Nx%ij>1iM&UmTYPQ~2k+KAj8#45xE@*mzs&>x(Al zNx?soCUl7kj%AcUEQ1Tk%(cJz@HZhawn*fP&z-emjuMBCD)C?}vL{GP@u+fcNb;|m zcPA)FFWmD?Qeg;Jl|Z22(;`mBLnb8Vq=491CTfOlf_=v_#bt52x(mgoUoaRgx7A2jWR|3rW~APppSgHz5PTmoNmk_(^QF(J|8RLRzFd?x~Pu< zr(kj^UF4)pm?rBV2lM2Va2Wa5zR%QgvhHc_R=G(&%d4Dax_z`gde#fWm0fYkx>v_t z@uL}`Df}xtb0q8Ae5ZN8Ilag0mIYCtq8&#;YL0J9kR(S+p{Oo50O?m&g$;Wyj5#VqfZn!;&{6C<1O zG{vSy3mvDTk_s!gQ+fCkHCGE-3Mk`&9B@1gOYl5p_F$*v-!SNpGkN(^%d7|P-an|$ z;al;L9*6RhcTrDw+5ws+?t*ltzVShP1M6imv;6DG!fsENj-?WOL8#(TB#{vkT}%d< zo^78sHHk#wF~#GR30(4>53!vZjqS@}g98`WiZNKHCHtAfBNYc!rxaP^)ULLqK%}{v z>-@pz;rBSqx7yRC$M$ROVycdwFA=Mhgvg|oUY5pw;~7K_GIW6dn2ZADO3nc}akDg8 zSYfv5hS=X0#d9zv(r273q)5q+y^PPc&-y;%gp}y1LM$+P93-t~9;n~UgZT1@cBBPp zK;rHtE+XAfXO@3~YvX1*R9@nI#e30QK%90-G~I_gRzntBQ{_C%FG-eAQ5`;9q65!8 z2ifA0vY|(bnDvX|KjAMQnAMcyqCEcC)h)WF7Dq&ZUf|mJ>$~rg-ehq@)B^|*SweDQ zFyuBt#z#$~U&Z%3;uAkm4l>Pp3pnC7^47sgUFhs!-SMpP+;2vs&w4*NingpyCA*Yk zC*Ppx7-08#IoM4+U7)T!*avC@jwR?L&@2JqQ-c&3WnK2i(&f+p=a6quPvX>no3vyE z8QQ*_8YuBhusLR5cKjx-&9|Le4$zf51#uVP*V}W)Dawg@r_=@`L#LKPXnrhP73TW$ ztg)zoEm&Q4mkBCXSE6`LP}!g_&yAb0ubxI55A0@T&U3*Jpw9F^B`koogcaM1)Vs1U zWtD&id!Lo4s@_bYk5bY@zOA(lzi8CHR&u3=`9W>+?zr?_S%=EYI$VDbi0m_Q#?b2> zUBTi@^oadIUSJeEhj^>-Qum!6(RNam8j9(+T!PS+<^;l(Dl9C>A5kAVUpT=r98eL# z;-1tzn<_95&H|7kn32pR^t{do!JlRHUU6ed7yBXo!s9BkT3%M;@&tVbS2hk6Z);q- ztt{K47n_!Ylo?jkd7nu$FBR`fHR>j|sshY~nj86u#-)DgU*e1}w=3ReZ&Vu;12zSM zGsU0|i(Hhv9&k|>)eRBvb#Z&fRf~r((u*&}HdP*#N?d8gx*tVC^xSK6DLW!?h=WjimHElwL9ZoFTS?wAG_r8auX1a0c?4#yjL ztN$H9%Qzhg=vLA&)~F$I%vHt6@kwRs)am2g>e4=ht-;@^1kq%!de!`}c6!GI=EFVd z*ggdzzwg%s4@Kve&{CYi)%GRRNly?$UN}cyR02b)B;lpCIP=DRDUNbm;ASFxawh?H z;w>LL-sOd>-KzsP&G=04W7YpI74h?=~-{H zix8fDpOBV;axnTTCXaDPdo=$$4gn6!E!7~Pdl zgyJK7kgnBNX>SIxg4^f^%gz(HM#r_+p)`HkFg3k=nxjnh1qbdRrTx|8>!3vt_RavpxKefaCV z1K8+wMBI9>P{AxWWf}bk3;1!w7ct?>2asRbl>Pz`!`^%rCw=~a=ymLxGDgw|vV;pE z6Kwj4_7y*&7e^GXJHhwqiozd3h!!M?#)c=^!0ft5L2YlgKi-I!OS`Oe_0zyiCAazm zbOb+5XJLa$jsDX*C{Su%WCJ42s{xDzl!y;6q^QmJ99z1;%99JqY^r66pRr5folhRy zw_E{&Fv4qBCIt7cG#wO6`%qdj*T~J}*`r|BC6Ua)@^Ry*M*Y@Hh`pODa-?mLb;j807R_@N<{7_j$5-Gtak|O->~}mq0!s z9QVlQgOem%&6xeNRxRV^yRGCVQ+WqD_SfSq{*9CP{pTbMA`yrVqnXOHd-$5YCs3#u zxj^1^Z+%G0P>_#p?akt&gb}jVh6r8))|zKtS)ZLA z=aFuZi%P^RKB|PT5+Nn8%5$7vBlGlfe2|@vOW!YLO>4fk#%srNh0UCn_#z^MtOm~; zxk;-5smMx|=rzyBV!;Z+p)6vn2!6tUb?U@ErC4}b)oXhVD7LS`w|>evEyP#qhJb#8h=O8Ed6rzv z;wdh;zmDT@ZDfV5Ivd5RAYy zHEbeB_5|PlT^w$&u`5^%)hxx#Si%PJ;aRUkGoeRQ0*3Tv*iS-}<5#rFcDrdZOY9Y~ zLBpA)FK=oLO-~DG$1l1ynC4keT}_N@+XWWyGw|&#uZ)O!S`U8 zA2Zb66v7B1>kWGB+M+NyS#>&e6vjl}egq5FExFx^V~dNJE1D#P?M@@WY@Ys;5kKm5hd3ka04=GT6?s=51Px zZmjS437=@6h#!>VRIqyHcmQwRHdaqIk35^SV7RK20(9Tfxh|{lEo!o0(jD(MX^;?f zlKnU?b&(75YSfil2OMiwH;3D2Q!_PF0++EIfEzOootiPiOpgOBV*+@=1y|cv(EpY8 zSk5fzh#4Bmc|`21=w9mn8C@Rh?TOIEXHZ>WTVMvIc1H0~E*YzmjIH%>S7p_tQ#MH9 zyazWy*}VKi6$AfmF8`U@VTaKRLV-$c;B}@Y#X%y=&;P+kfY*a6?p4t72{JT1HWo}P z`-30}DmaTg*(+%&xE4GIJrqYnM?K#4-{Y}L5BXGVy%!gb7NvvD7XE5&C?-2bespfS zH_TVYil2XVHUD}?CUl%A4so1Iw`=q4BR2ePQI4?^` zSib_XL7sA2jD!Oua*@Ald>KJBD`-%!Y%8*r`km6$;3}=njWVEnFeog>fSY-rO-?L` zTjT$A>FvDw$bXuCHrc4~vzJDJH3&p(7pSk~a3FhMh}|ipqs)We?-5A<5l!e9>;aOL z_y#T}oNfRIVYwUMq^rP(-;N(hWNxdiRWV!o;B~u!eJCzZON}yA#l%^o!9fVN|NY#) zM?J$>>-qQF`A>puU@Ix=K^y8JYX*V^56stdiHgrL!#*v^C&aQi9_K_MH>K;Xd(GrY=n8PA;WY(3gKo=oxIf@}o{)q=M}7fnsvt8`CT;08J4u6KaXe@=E)| zPsYE=$Xk)@m*Ysk(w+>Ok$zwMvlG~$A8$G)OshO(d?4OReEt{!GDH0&P9V(*O~->T z){fHa)VNf<`Jjgzin%r=|J?nSltAl~ADYa?PUI@bjZ&ZETI)#|AuV_=*HH;{t;4UX z_{M6f(?sb`1%fhoL}rNbefdchLEdFXG7$IiiThM38!9`9l5#UE3`_|mJw}M@7HPFc zHVM`0IqFo-&Dyptvn_xf2q+UY_SZeq+^zb_+ZsBij|r%nlglc2w02Ipn@7AZ<99f-?*CZ#Xsfl}UL zZ~Z_La>3dF)F_cUR!OEz3A60dWd!@sh73`@%^TOP6Bo;F2<0-WZPh=SkzkMg@%?(t zJr}0{DOgEZFfb3h6Y{nm<=FVJ~8b#o-Zw&Io@7fGyo9 zkyT;kAep^<3Nm>7tmNV#7!m!wH-^8Bw<`pY;~E{85fdtl;Tm@S3<7}FwbT2t8`*Zy z%SX0FTT%}dywq;Q`ti$(0V9KcTqfn{DJvEf2C351Yv@TqwNu@;g32zZd+N8oSTOAj z>j@4g(4N>8jUH>hC)jE!7}#AVyLltB^{X>QLEWA7_2`IvX6mUwDmmm++{xO*5*r&c z1E0wcrS5mMIQB++OF|MJ+&T@raE>esY7>mZEA|&|_mO#=NRd-u3fx|Ey#S)CL3Tub zfia=#Zi;~J-Nr$Jp8%(qqVU&gVT%Hd=e&ucvgj)_Ud(G$m|!f_M&5!T(7Lf1P4XCJ z@}hE!`9{bwQdICQZcj?v9sFEa&(H(PBT=CpcQ$`W{?xaEM4BEHQ!dOrWU$antvf0i zSyA}P=Et|%`FDdBpt~wu3n@YfW>LByo?vh?%sB|d@Z6hmz}P>%ZZ`I$Bw(O5Q;}rU z7lInsZs5gUi=Su@L5@x6;`iHQ@duPVUVQvC)MEPi#Ff-!&4+-JQwxvsn`_`NGhhxs z0oUOG0S+xt3JjltIG9>?su!tPU;0RRw&$f!mIQ*p9&a3t zhfy*w?FkRh$km89%i^f%sb^&2dl)r#W2fVI{=dY&2~s{1aW>U4X;q)(rS8c0@Q;E% zQ-}1q3pF```G46XwY}p`vnc^{bYSczS zX+)Esdqu8@v6ns*R!~|j2vwGwBcTZ4T0%a{NZ@KD9Ap?BFe)(;HMOr%o;~PLERY4U z=QcZtH^TE-V4|=@@FF1DouYox9H=5Q9OppEqzMmO%!>df1f_rlg56)iw?QDA7Ry$Y zK}OZyj*8AwbX6%qCC%os_uDI$R+Lf;A5_k9d8&99brw%WT+r&k))Yil*QNcO@t{U* zJq0}oLbXJGy9?jECtXfEuA=rEAjRHbhu;n@{<{;T@+{{c?P++19RlT?2EiYGvz`Aa ze$bOs6e+vpBqb^HI-iY?kx5Y94>YXu2V?K5{qa3MfAF{)qcS*Ro^MtwxKElyvgP&= zZzWY+HczC#zbTd{ z<|M#dhp2)`8WI%$mUHX}Uc*=6U_HNRTk(k6c2iL&*wDVipo*fyQV{zxvUfGxat$4j zmVn|HqikVGE{Rh4s9s7s0gh#|k07VD3}#_j<+zwjHD|Mzl=?;zrQ#iFP$>ubxuC0% zu9Con*gq&^ixna0yTz$d;&ED1}BvVCrKQRiZ={nQ1$uxTag@S9izq;OZj-j$#T z$OX+gQ8tBA7>sw^FA<+uer?c7QwUd#JgCu>^AH@gps!wV5C$Nr7ai=3Um>VwN}=Ex zu{2%1GtW{MmO)4I$y& zGY2kFkDxC>g_xF5`jkXTT20&C;s|P!n4wNHQNG^Ng_bx$8hLl|24MdCl3yn`(z<;# z5cB`-XA=18e}lWYTD{ew=?^cd5lKaVH<1W55Dg;ZpbSuGQIa=nq7uf$@syp%#!ytB zF|)?RG%}fjYaV{^Ax(9E6+cP>@xEXHwch!G7pz2Vx3GI$V=ev10e6pY$_y;IIa5tn zot7(d6+SyH0N5}^#E{c`*%C8(Wm(rcDruQL|4ZG*Ou}RQyIy=;IEtA9LwG#a_V5Sn z%JG4mkpA1^A3^AjnB{Jj z?nx{mXbp7pGjf23|L1ot6L9hOV!hhUYrZGf1^OY%f>g>oBa5pf`0)mBR112^bx(C3t>fiX z-+)EnvR%yI$z3CcON+3ZF@;&E6#?-sa*SCS2;Kd3d=%rqdbOzXnOeHY5j!f16d!Bi z&tMV>BEp=Wr8hnqwah5^0)_7u4H|IYVZ69zL(iAzgMlz)9f+PTTR6paCav}p=Dy`T zuyU9(RE|rwso%!%PU-*wve&-LoZn%^irba74 zD^w!#ETek&kjNQk?S6fVwwoJm?UK*7H|$d^A`Vk`t@v_1O`YTNi(cnyr=6oL*6hyM zYw6Qq=_Re0x%yHTB}5F6sxIGt0`tfW@=EijRL3NA*zkxnpK1vFqW$hq;~@{fbN*X8Cx+7b zMtjm6%#kTsJw!C#XYYjw{hdL+?pD}BI5Mq}WTAwlhzLMUkNmidK(GX);3OSVRiD3* zm5J)y^nLRY9Y2>ge=HvQNYeU@7O?*`ucY$%@ge=Ht#)xczRT1gFucN&aAd_oFJtT1 z0fU@}WMuu7qVY{b219yi%0gW)yxFtwf?MC^xQg)DxEWu3qs>dBh+c;XHIqn4NCZc$ z9OvbOcK%b#VsswD2J}idBOZxtDq2rv&9#;SXI_Z-ZTE{b>Oqiuh3X(FG%n` zA-nX;dlhcpx*moHFY)!=RhcAQUfcpyjMPFb8=^a-3stQmM#~WmN-1$1MT)$OE98W( z5KnK4ZFH9X@Pl{_qP6DPC>!D+(QOQfqb$@_83b+L%{2>pR*RKtcLaukk)@nEAfPgt>IH}SB@pNNEv-`hskCDye*cqav6Raq>?f6MN3-FtcX^nt#O%dDko zq7sOV-a-~c(Yxdk(x*OZtR;2#pt(ofv92DT_aYAHW#EiKps{)8HIQs@;ewy#KIZCFa`1=+Oe0IDc@#nmFuxxNv@d8kb^S_8iljeh|29W1p+~ipC4*ZLI$wMXSFpR>uq+DNSJIZk<3ZnW*4I6N7)zx4Nqdt4r5-;Zt?!2Nz=Zx$IxeFb6X^`ge;p;HJ+fk)TM>A3>` z-p-xl3Y?lY0BqUH%SZ9>FV8{c|Lcc8k2ij~J@u5o)Pr02kvi-k;ts%6k%n7JY;dcY zYw%0#^LD%EeUZTAbXFy5*G12=jj$pdL4T$BA-;;QfOBKXKa6yaH%PRU!3b&`F-dm1 z{94OY8>toDDPJ+bqYhuyV-S9tj`I0~uePUs4hL`rVcNK1#QjHy1Ek}XAuWMTP9?~-(_V{z-gW}Q){4n56s_kgP$(E*s`k`S`))L z9z_-v;8JjvLjB*3KS>*o4!p2yqzu>A8V94hQlNP0L9mN0&w8`1UG;+y90|NpTU+7n zeFZgZo7G`ZF(#$ulJX2FmQTYKt~xCO15JW=S8jvQ1DcVUR{#?<@7Yh>9>yFNda*>$ zIN(G9W&CkX&yjg{?LPu93gQa{Nv~;{&S@Dzi+;7Mtcb`u8H#(gk`;ptSJ@)&t1d63 z&5BVF`VJ~wWZjzSyTfA76 zA!U+;&|}qTq){LNH952(L=g^&Yup9%rD}uh{|MK8-ei+9LhtS8NvnjF3uWF!45bIi zRjmofz%ieSi>OgrNEk9P@VVof-GeWuF5}nL%7Tpzfb$|WNk8!1RwODrjV<(&OU^Ey zs!5P<~9C9Jb#%06+d<=J1(Qi zy96FWbb!;PKiaY6Pd{hsDmK-#i!H`M$q2E~t~&vf8*?$P-}d>oedD?pK0f9;ua+{b z8QrFi;)3rn2&ZtqAe_98|A)ADeU9`x&OBrOg^k$Vm~TfkJ!DfPMK+tHD2uYuX!J#F zbYlQCX$>iH3-@9JCB zgzsAMaBUOUMCq^I3$&oxE7#L=t6jb64-8p?2#o~`f1DGxU5q?n+>!bku&p(QCN6Yg zhpsZ2apB^#LOqxqlMrQYGRreZ(H<2o_CX%fkhtzKBx0T4Q?s`ity%$%b`P;oOUaof z3{6UwTO3s41ji-LGL5WDPcf232`I(iMD8~&kP0s!Q{g4v23&DM1w7!8tS7;*IPS-# z4%Ud2lr0vYq z^U+{7WSTp;tF+44&a;O!wT&I6sT)pPks?E2`4eld(bjv4?`cK+ zev#NxHj01lM4OFlT)Yf_N7Dw-g;u*d6jHVkJ{*AU~0mo#>0O3VS zB*nD59G!jSaVsevbY&j-SU7H(bSXY*@$t|sT(B&4=qs8dW|`pO=~)OU4dv^ix8|J*3-JnVt17UjX%UHzJXRsdVs{sab|+>{>HBv-$httiE1M(HJ;s$E zRg%Rd9e|5;D2e3~XMxN|OT(vIDW?x?#QrAk4duJ|^?lt;@#VUFp_Oj&=*(l|ofsm= zSl`${;%j-Tn<7fGh~8N4;~dUaMXLR$D@?)z$L~EZi?4Dn(0{z~_h$Pb#oc6&=|Pcd zg={9M0X!!}Q^=;o#E+ogI6;SACd|9s*_O3j%AiPm6?H(X+*P~=cMZ=WW&%s_yRF3k zNI?|!)23qgy5rNbly9%Z6tF=mZ}DidPbEFjy`=YX-pE;xGc$HT;ft7L@f6^FEZ0ib|L*nn??MI@=$d09^F??I>25CJp}45JrojD}kp$YDc9oCu5f20>5On4r z1;*r!5AnG+3E#NNSC3r z@5h(P(<>hG!g*S=Hxq2ON3gXDR=43LIMZbFpcPz^3wAuk@KNr9*Ze(tJCs`xx9Fw# zBk?M9PC-h~iPpexRvKkr<4~Qq(`EPKQ1@!}#k#>ZRYL)PKzleY$vl{thsAR3jt6^5 z1j$wisUDSA2+pbtshA5pD3;pr@H`JwUTREH^j9K7v9;vqKKemO2v;M_9P`Q6RhKO^ z&6Qf77-Wp5Gcj2*2PjP)A6#z*D?z{u>tRc&B=vIK68Vh%$wmu;X&#hAEoRg~3|79~ z%@8S37&!8#*i2x+mpn*`GKR^Z^1V?af{U3S7c^Kn9OIsW8p4S57lpIX-i*&;wDocu zEN3hkqks?NfUuY(-Y~!@V-advh|Omb!zwm2#2rPMz9f{cSIEQEo2xctMgcIREpI4Ha zOM8XsVUSS!UkqLX&RF(7EJ6?R&J*D+&B|WL*X0bv5-9Iom|_GUY}DEyQ<1%qN-vm# z!D1nJuG}w>jyzW^qLS$(V}O<#O1$zzhVYMTQ?1ulfA~zwMaecfpKqU!by`@f1b?+4 zC|kRRYv6KZ@=J>u?^FPkVzBqmBm%M|&}rm1D@dd(n}~2i4A{ z6~qhi*&~MLt_C-B;YY)03mFEHFF+z#)S}31`}6Hn&pj7g zL(FW!!|M$rr#Awu{Y27crK$hR{(a7EkC$D()2_wFFv4TDEcETtE!a0!6HpM))SQ=+ zj3>~db}g8m>Ji>CWutS?d7C!(afLCe;`J|HYKcmUyCl}moJQ#Fc;!Ep^_Gnzrm){b z#vk9zilmRbdGS3Hj0nfD=O~)tJ}5kI>_n~+LYif3+T*W3`c)Y-#;&s5ye9|1J2fRx zDQ!&Z)Boa4$^?TDw6llw9ec)aKsO-iu@mb3Ru)lt$ko(T+qexO3|0eAxiXiC!=o}| zYO$QBEqjXeq(D2l&f#iX^?I{JY$7plulE8WUP|=EE?#UGOH)jOf-Fa6W3f394XrN# zx5oI1jJSXs*c4t3*v&R$(H7qc85P+ad(L9n@4TL37^6W(3(tgg$$<+f*xm*6_RNRQ zuXCF#MO|u#4B2aQ7aQeX53(>H*8+>@c)v`H6ef~2enDop1JB?U+q9;Zuiy}k(qkRz z_@3eIQG#M5TQWf$$S)}n#C3tUKsR1exNzL+noB&Fs7ui=*Tl`8!U*!N z?UCIitfaTe^*RA!690=+I+ow-KPs49gC>fkem3r|we}@TG{ln%4X<;}k#gmA1?-l3 z6L^t=rP?nnbgYwp9Hm7ry6mjX7^zSUuE;Ri8Q*G;{@U-@J_PuH#x}pjRIC?B97EMB zd5FKI{BslmArhMlq7VJoSXqp%OafQ97>wJG8{oDd#%CdVy%cNNqn`1@G7Dd-G%qN; zX;Oktt5+R*Xd#j*d;CVMQTO}P6YRK0Eh>(NBO=-JMPj}J)-X-{DsW;Mi=bX)#_(FUjj8^eD2e zJcu|%`1$+WYH4b`>8j*CRvB5(Bu`koo||YrC4jks7Kb$82AUUxb57?w6PooVH|vFVK!9WNHa@E_P{b-s5jU{XhGDj9l4r z--sUoHD;+mq4~WOa~G*KKco}o?YIUpu4ro1k1%xj!LWqjyp!Oap+LRt&J zB^+szD4#s0a&CoxSAyd@96I09<3kmWcdQyASF}2Y|M>gu>~CXn6dl88PfgkDENmBT zsMw)2`Mvb)l$(`m98|a#k3sY9jW&G2Y~L53q97I=wd5M1<1fah2$Cf96Ju1gEeA~S zW0r51c~SJ(JCBl*E`^Zt2FIV5=#Qt3fn zT!n$jMcCjX?eUU0%su8*-grnbaB>tf$Y##GHeGoEv&DGuqaTzjj!XKy`&CFP_OPtt z_;D8(Z^P$%gNlzCC<1&0obbB@=+*Ea-O|7!;>}{tl@CWWE{4rQl^P9uSlS8*6>w}w zQozB_{g~fKHO7x2fTrhRNk(= zuDyeF!ENe9rs7vY`&nL|_uDiz-{WtQvKB*u1V5MX3CQ0QDRTE7(`WuCVMV;&zL2|$ z0xi@J$~=peQ8Wfn1{pGyFOap8YVHt*aW6BAWjzLz3E9$T($Z&N1wJWkrobowVKzlPo9cK@3kh2p*fbONv zwmI16+tdCN!5ARHIC{4UyoH$RZlI=#j&b~%@^mj-CG{I;fE{-$xuP#F$44}u5RUbw zwzlr!3)?*2zVBZUQ-LmC96qFg$OkIbL+#ZMY(gMjg#y&+=S6=C8Gk>oa~;o{R!<^) z>w<|rlP=%h&$B_(hU@k-sr=X<)>pzKJ`&zE$-2M1feM1eBK%B9qLX4x-QdsWdZ=W_ zVKFUFWWyA0-AOIq-SHHayYynPp+r|g{ubLUzgH~5actQI7z<_4@(Z_fqAOU?{vILo zX|bWOOVXEkvXtIZM&c|U4_5f9ts)9clPRhDeLrKlH1o+}-fn;Bv8;(~<)AXlbF^;* z=n&gXq}x$uiQ%epFb+oE8Nhex*-xgBEC_%1N+~W@RVDE+Im~g)1xi;)K!qSv2Vn8y zZN(DFj|D=Dxnu5r)iTx13Z5HV!sQVke7aruAYM20PmB*S(0Dk0I5g}6qN&&l-rn3$ zDbG|S4wHkMB3Bn`j8}I^SxJq+q|XFIhH^HScvYiY=p8inc}(jWEuPk<%4*I zhe4@iRCh)p-fnOEZ^ybS?EB;Vpen1oFSfl1;;|MYa=zrd!f7dmi@Cw9o#?>)rrAh? z_Ae@uq~ySO;sqt0dUZJ<*mjWlIQMbk@k z0}2+lU=lNgU6!|L7a3X6e7(+fji03Jc zkyqe{&$cI@jK2ujDW)XKE`>$Q{#twv1>kZT#5YkGCnd4*yq6f1tysU?SK1vfFuQOU zL_qssEgcbT9qo8e$EM;LiyYTjRVA&O0>)7xFkPad{33NsF9?vzQXDF{#@@Z&FmLYE z%2dKx^KcOb)kv*nJZ_?g@*0qdR8p3^8q!>;S#T4yG zvBUeB1*2o6@yGJ>N2)-^QT=___7}`wEIiD($x*J;*~8b{J258Z+>|v2SqS>4B{>eF z5gNQ^9kqD#L0i5Pk+g^ZvY_Ta^{I@r)el%Y1zQj07Q6~<8!1f|B^ZKW=QSf|Su~ry za(k){6Yd(~5tcwNQozRfF=Q6X!p%c~Ah@M2&@;i||Vwt~b8e+&Od_!$x$gXYD>+gs`J zQ|=2dmd>SAVoX}vo!9yUTqHfX76PUH-x*g+f}!>ptffSu1d>+rQHxA@W@z>m0Wgv}@ zi?czZ4GBA^Frng|m@1+V`<<8#NJA=U2mm_iMKgyY6)k_ibVzo&!7syi@mK^DMxu$* zZ`>{=+WXx3f0QN=-w=hvLsSYK)yVI~UrC3|Ryt}wq-^?XODaR}un_N5SFJO24Jut} zSG~7r0s!Ec=h?#-+Kcgn?29Z;0INm{CxK)kqm+*DqA#pFFI_e*B2ri&z~xX;Aw8S_ zC&nQN;7K89P*@b%+fHa(?R|s?K^BDb=7wfNRgGT7W(E9-ox-$8l2T7VeD#%BS=5Sw0$>8r zjTkKWX*_^9YZleQb-eglpB81MX*~-$P~mJv{o_HDleeZ=k%qU7ZuK&OrLqTV7)FC` zsd)K|uePEnPq~0H(-uf^Vfpd1@HZ>Vod#^(s(rtrS-P|s zf4mcy+1XHvy5g@DR27{^_6_!w&8L#(76Eg+0njs2xBu22X($<$&<9%{g^SGe% zCZ%dTq$;5fu@z9Y5gbBTUJ~WUBzAOe=;_+^W{GnmSxnI2@z>72!T)`vzVAQrqw$-R zoEl<9sG;oO4T74fj43BWsF4{~3k2h^_csrb5MDgXFh3Qe(u}$AOe^KX01Rv}E+|M{ z%O*g^Ql#sF6Nu&Bl^1stQ>l7I1$3rSEU&mSC4>iiUcmX(yEmbtm5u-^!M{|J1$Mub zPb`Rxg?K283~SnR+F^X|Hf~>tfy9Svuw08o{JyL};1rerdc)vDm>rH+yimHThIRnhC8X@tOF8sNJPZrtpZ#AiuN+MOgrHPbGTTsX(=Nt5}G6$KPFP+r{qSP?ZXw-F-dzt|eSU>FD& z(G0mFSP_H8dacaO!%q8PzvAzty@($6mQM+)}%$cl># z5&e)-&m@YYgRYhW@ova3de!fzq8`Di<06UOw-yPpRtaAd3|W-wRXub}OA-$QTYzDw zfr_3IeYc_m2Vm|l@4hZPKx`{7=s<~KjoI8nK*b=X-`1d_Jn}2so$pg@IGQ69rHrkg zv2L<5ypVA22h+iN?z}A!D0HshjjJtPYfuX+J&~kuO&@Gn!vZjck;d3xrIXx*kH>ekS`%cD);{xaOZgVi+Lm%j%+qb%0xxPdy#_xM|WusyqTJ(;j+*Pv?gwN%Et-8P9Aa{eSSb1wu`O9{f385jrt1bVvRQp7eYR9_pjV$F*MPOqyN*$qfq z7C;t?r9Doogf6Z`m|{G=6+^8#Mcxo{r79=!I2^2^@u{T1x!Ee<($;vOT*fQ$#<}0b z!}A;nH?fkbq%M=g<3ECCL59yTH^f?Di230cUv^a)bnD1hDW4iJZUiwVL|rc6ydU}L zS145te=3$wb+;?8oq{(Ca)rvfRgoas-qWI?4`jb5p4x%ULSPUbUN?qct?C1qc>V<{ zOZp=OR6vCS4>9(gF@-vurXj8-0VE%afz3E){$ghk-0xx3Sbw5WLrmJ1!r#&y+K-{4 z%YeN5R{#{_qa%hEhgq5NdFh`$e5ySa;vlW#R}prWF*EU_hAQzqD!xEB$b6E-Kd%lJ z157fVEJVMC$no4teJ~t(wt7}z@pGlrox?Lg6t?oZQva`y{dJ`?r;?}A_c*tr7b*=|>J%&js{YJnZhb$xC*@h9a8ShKiETqQ)UX_8L+L@K;K|kv;er;hva~5I>QMcb}x4e7{1y@jR#~E-4|6|H4f;Bnxy#@V`+YN>yH8N}OEi48; zh+d5G$TfWcq!28#{i@7`*w7Ec9{l{;vOJ7VoX+p1r@2sdrn z^AdwHewuV@YWxIVLy~X2xmVV!VV=|A#Qug$I)>qU+SzQd&V z(GnYgz3#fgcB#(MB+~}CP11wqWWorKVG~Az7~#cQJ1?8+zu#)Es)q*8P{ey&qcKKN z*nojU4%Zgv78JK)*O!7ub~#?C5X`_=%C-1&+9dexx1q4aTX0C&n%o)jtufw@OGsd+ z3y=>y=j(t7yFs3=Q8vbTBU8^}mgFN+wDzI#ST|x96+=Zu4L0w_Hep9lucj6~5RZSH z*$E>)?yWoysg zqz*pSq_|E4L$s5|I~t)w01(F9hkXyeYNf$*J$Z7fP zfH>v9UQe~IyObJ#bPGBA+gK)6o!Abs=R>EtW7>~Y47G9eNZl-KB|JQ)Dah@~;lK-r(AaK$)OXX!!6JilmOSc?m4lh8q7i9s@Bt^ z3@^{HXOrCX%cwK2T5pDIda?Gizl`tWc{36pa$p23%Je2VRIkM>-ejG9W^qh+glyGH z&ES=Ai+y$SnITgg05Bv>A_F7K58{^MI_85V$AfCqYW@p!5MNWtOE4Ngo4O2#q@*6qo)&0vL3r&MYQ#C4s!4|GOoJr+~8xvC$l8^s+lDo=0P#dbQ%~x z;gi_@p^lix%aVi%aURbQ<#t4IrH?VL&8sc1Eb93zLKp&~MC`N%1<|mv2 zcB+kIEWHS~$dy%_!~q9AWCi*V%jtu{%`z6ltiU>J1DRiO?^t2n(B9xLekOLwfjrKL zD5{@p#$(W(iQVIuV!{=lYzrXrsQe5RM-FQ|ythFwb}#VYK{*gpSUq<`G#Q0qeEBq9 z8A>a6Pb_w#TLjYtJyr}cavn?v)TOx``$6Whk{bkQ)S65iqUft&TRdC%z<9f4J7R4U zbtM-iCZ&UnlTmIQMa&C1n`E`Tcp*Ef3ky{Jm?sx`Vvd5VOEITw9FS@+Y|<2)d$EA% zD_U;wvRtW~?Z&O87(2#qr5AQaViBj_zsKrA+yaVZggh$>nOc*LD#^0K=(rK)kU(rO zRkn^-pBDwQn5-9+t`Fq6SSKqoDrO~xgL9tme)rw>uBBQfzaJ=NKHelekd6}Z<0AZ} zt2eI2EPC6z_!}wQ-)m<-^~P~GfEZYz)L>nE&O#53LlB-Hy4sSFKs-#LlNz699cRVx zJGX_i@`~{z#D{~ER zAQU*5vV!z;PcatXDyqgMILO#~_mg82r%te>jW+sGoaWM?6 z24&fMxuH#c)?!JVQ5RAyz@X0*n+)A@y=XsJfxD%Q1{pTpiZU&Nfc4kO8!$~5Ah&2vAthXj4D zhw5N+DU#72gT$Buhs%;Esu)u${_TJdPfKRQf$HCOwGOpUKq|#ZEE3!f)7*xf<#r)VP-(=1k)_EiHABXIh za=a+I)HWvg3o%dhJoOy@?LXnPrjr_{`bsei-x56M6;gKDiQ7}k51174q*5C}pncL| zT5xK)*UPVZRtG9EUdNuk>OIBOLPN&LuMyikN;wn-yU=nkx66yM{G>VAtLWKBkmG%q z3|^lA+}I)o)?iF{TY8VxRPh?$JM6GV z1}dQ*mITQuv5uOlCTw@|iIBEfA$!|T1s5uB->!JeB!b(0ID~)GgeMO~10#-t_hu zF8A+x*9+afdF5eS{+vKFihqq1E3IS*3|vf=%<+n-=K68vE)Gki4C9!k zq`~L|5{7zIY@{-c1c^@*Kt1nM@0Viz9p!Nl@Zw&fqTOaLG}om(^jI81fZVT^ZjvW4 zGP%w?{(BJr=L#eo4Gqh@1?P>+d8~hjU7>IXYQSS$mLcXJyT=r-o(vh+B4)(c7BP>j z4SOWTT6{ZI#VNN4sUX109>H5uX99`RzdNDzx?c~-vsJ*+-pzfWR9G}`!ZWETbT2*CD1+U|=mEAWoe)Q9|jvAq<}U8XoPa6mC8nd(N|)9>+)I zOOd5FQMs|K@3havUTsV!G(?Xna5LuV=;UN0lw6|4$V2br1;r9xC%eArJ&4r&xoqhI zw~hz9xCb{}khD8q=u(`OnE81ikcr3Hlk`gDGoJ#Vs)XPcCkF>Z%EBBp0R?}gv~Mev zepj9+H^kPk^kmqjfZ_=Al~|xIuG#&9%N1&J3<@I!6+xsgp40)mmN+J&Tp%gxAQvQV z#5G4}p!8fi#~>CI)}pC7t4r;w!2lgQvI_aTKq9|boXwZ~F|G&cHf+>dM;;`h{ZYI` zG=QwYt7yIG7d_q(iU{WxMEilRsE`oEAR*WrIvQ3oZ24qUIyL1&`9rVy14HjI1xft^ zc;y5w7-{_Kvgm)xoUeIW*GUA84e8)(LB6gHF+&F*rQ-_~)q=L8Es>o%=oD|f)aIIt-5=^GO+#viP(9g@9@Xjqa;4o& z@W7BXd0iS|US*DA12O>4wG!zoNx=&0q&_RRWJ zVOlA9$cHDPHeh1JLwn$_G+g04(;qi+`kiWUbL5a!)I~Me?;q?pgA4+Z;aw3XD1iM2 z1w>ObsiAtjd|i2Ad3lmdUDJ)^n=Zskp>jthPNK50^6$~9CE7KlZuK1uAp|_#rlXYN zU2(#CUx!Mwp(J^Yfd((c9xJWBKcB)VipE-GT2{yJ(Y0r6w? zsKeBbZI^$vNpQLk+sD2(SUTJ-tZDE&pp}8c7I!a~3AqB3)LwA=$@56v;7?(ZLB2u%aoxGDOXsZjW;0zC}9#;$M2IDJUyfzb7?N5kFKHM7d2~OjeKlsp} z=!PM6*Bo>a1rcyHzJLpZ$?8Iay1aCE4ZD>~q~MDPZ{P5;UT181B`k0!rX#1Cq}fDH z$&!G-?w$ekgVPzue{r=f#lMlS1T9Xn8YyLwxQ{X>kBUEcBx;MwA;pO&hcV{VF`we5 zVOMh`tipQpBxv~-t_Qr@WBTw(*Ujsh{Y5%ecc0zM zj%}t8<_GV@hk*pq3QF;cRlu~m@1t{Xo1WG}cbs*Ea^gCzw9tMU;_=Y8Y

xNl7% z(yfXpWte$IU7{`LJAxbR|F7eNRl4h4C}vh>91=W?+;lwXszeTJ(F0xh7h^GQ5fS!T z!v4Z-kFl%xYC226IUnH=#!4wC5O}qCNE90R&7XRy2J3yU!~u~aqPsS!woqvBx!67f zp-19hoT6-FfZFm=GE^tx5?Jc5@%<9a!x1~79cCOj69U`_6wt)eh4nMNn}T{x!N|PD zGT%8%7CcwGFAwO;v?@^Srexj65QvXq_HoT{cf_&^S^0WQ@A>x5+i(AYWnZB0B!v^) zWRh_}+M>s|dW0}TU#Np$h`#r62rX$qm*U?<0@gg5C>B@EeeU^F4}cPJ$bk2;`=F>L zP_lSCQAiLS?}7yCl7FP%R_*6-ZD`vSf9GbctT^O$gwbjzovT)MM{#hjPC!FM42egm zs`UQk)S?Kui7!@TD|cjp#1fDcb;-x-6qlAf)z}TrR7+Hu3&2S(?9`?6 zY>R#zW|Tt0{)LjV*zH&>4mr1(*Y_%+M^}y+u^R5$t?o7$pg?6YIyPgs=vm0V4|>~E zFNILAg7cn(fNA7lmxIE7)59BWko@e%X(BnN)g#ILNj#dP2K-eZGj1Uu46pjKGL7Ju z#TR<}mT8_o=o&+}sbZsZ7pYr_*2g2_;zD&LswbL98czU0x^ykTT|YowbVN|Cj@bM> zCU3iZBnq?#g)GeqN2259O8A+*N)=mgS~Zq{$R$o2Jg@U@J%#?0UfM0;uNC(4q%?(% zT2WrHtK#i*!mR>#_Tk(QX*fmg;>Y-!Ae3>^c3J3e_t(&&MAo;r={WE*3`{7AqV zb0e4JsGEs$D=jiIPVNir1gQ38iU z6srgQZB&dlh z?Ae1fpolM?{U8P`ll8c`7*moD%5C)cKheD+q(Y=xJX)^>#wbQ!_M%YfXZf_H(;wGI zw-Hk0$mvSpND6vZ=nn#HrzI5+34~TvmrlJjV%ui;7v;2F#pNwq3SrQ40ZdDIyZtn^ zxWwz9gUe=-J?Q~d|4cDI94-HhwF@)St3TS>8v^`Z8wH**roDE!?lor;gKoc|dmG2V zW_+dM0V;8@TbK#=#+clF--{MgwBQ^ZFBjOMaw+(Bob=l_+m#p!P&8Yfhgtc@36no7 zA|14n_^gC=&g40qQNXj9&R}Eh?ANg;x~?|8EIkl9AGTZW#{tCJZ&>9!g+DyR>MZY8 z($4rK6|k-)W2_`ExtiwQ$?3+!^QdTGPtBS+$Z~-$DSOUz?4j}!^`M&?_B6{Gr-l}n z4BE$BeBvHCaSIy7u#c@>=|S-ee;eez;PB^b9*g+++cdARo@b=kLE`nq2c+e)Dh35p zUx-~M;+pIF_?vQSzHNcLHHc0;MDX&u!4Slm`;oJ%2#eQBnG_>J{AuAMfkzo6S;b=n z@$ou9OzX*spep_)Ks>xoo_x=$>wrrX=>sJzy#-rc6s&@JRxSk zod*a6g9wEBGSm)v>zq^CTX~jZm_j8Q+Hv{U9Y20x$g@LpMyd!+1=tt$IDzOJZewWe8k-WiWW{3=y?qzL-f$ZMq9 zszcp?v=-M|IaP}|BKvtMvz+oGiozJtl!acA2_Jn}+h&mMP}x5UJ&sj}&cbyEG?p#0 z%Eh3O&5*MbS7B24d$1~GqxCB1mW$~7kP1aeRSAsG`AL*xDti* z708*!6Hhk}#zs3h@)?tNLc;5I?aF`=Nf=15*c@mzFpLH*Twe)#{$)0AO2LzihkFAq7GNu!a0$$RQ;jUK|)du2-^51`_hfF|Wl)RKzH8IhLb4 z6!CaTLhUnir|svfg%BN<(={X_e8NJ~cx0yLA>d-rb!DaWu*3UKz2VgM+sbz(*3u}^ zS+D{sHF?4(0LQZTV#Lf2UoKP{#F9cZrmt@S?Qqc2=Ar?ws#V29L_;&(mNXbLrhM$E zgui`qoPpP!n&h`}JYj@J17-S`OGhm-_^VaDz~r9z=~rWCmNU-4FhmQO*=@&U!oUH+ z+D=QNQ{bpJ>cJjRoe&)#+EZQjDCF(G!vU(=71M*tkeO-9;2@-VxCA2vH`L2&$T=)P z7Z*J@fGkpjS?GFZ$_1)-<@pnVaH(?E;PWunu{@13coIUhraF^mOr>yHXyS-x7 z;_h&{|7-`Vok0F@UI8$y)Yi9qWl~C6cxk^n6NK%gVO|6Vux|5@G;b6qKomOPUsHuP zHl^^vpleWG0CIQe8!R$d7N&~;YqVRp$|LAVh@)Tb4c@>RVCL>|dmimY@y%5P!P`P# z$D6p@<{1N2#Pv#YGpk{VKirQ^{@hd1015qt0jA8WOZ?};nit!l?Hqf77It=tGsr?5 z72t;tN%_zDh*LDZ_9$UrGUL1+T_KWZS9#o|IC+)dB;@n69I_i<;%WwxE9Lk0KrF1r`U2Z;XcXYm6gb3;TR z+!q4sq~u|&v+P}T%3+bkD`;$W+KR`BueuyRh#Bxg{DA$wU$g;841yNRB)1kGN4!Y; zhh$Ynb>0WKCJddM((wXtfyY0LABaX6@m*m={KK94ITe?p`pE@vw#_dHGLjNaD~DI&HyQ3=4>wLtSN zm*l3vv#k_s5co;?t1cp#oN=X`J$wmTg~xImAAtOls-{t&-NwqB?w%f*t062P33am3 z$0d1)L<;fH6!A|-mSCv+lJ%}3n?)_JQ8iS;9+E4J`>Qcpxp38dK&x}h#cqy}&iI-F zk$F?{A(a`o)+!m!+&OdfE?l0{lu=g((B#gnNM?^|5T&TO_RuS16kvp}#J`^>faRrR zF^Jq^vXAp*N3Hx~1FHVlWUHdUq;74O4iMB~=jhE>T2jngwS)dH=Fgj-e>P<3?BSD7 zwh!KkRqQrh4q>nj2=v6CrPN$xNp~Jm$Cwf6ZN*9sFcm|@;!O}^?mR3@PYUeva4-WM zpVZ)3rENXI?hJM9aMgPe0)&OS7(F*EBIPjrk{5^rKxuD<3{@% z5czWhIT967r~NQwqXN_rL=6MssrCi$8urg|+14YLO5z=}#2NeW=~%lC8J5T&zBeSu zGNvgg-drXUFlx6!JpIw;uzhh{QARJt%aHPEVU{RrvIYIgU&cpJ?td{G@s}%HAi@pm zFwuIHch|@nih%)#*_8g>UB><6kJW<_&vc@d}*+M0^h9IY#;}DPH%vh7}5>F9?Mf#C{ z8PsIp!gz1j5+N8*wBHZGP;xx5LHr0|0r4sp3j`tD(s+t8Ik&z$xPB0^am2%KS|ebBrh7TO%70l7NKFc< z{-R62X^< zIVet3V8iK*Dl;mE(Th^%wskUKd_}zA()9(NP%FW7h=IDl%rjDo& z);7g%NllUO7i)b8&`vhjT!FS;OS^VuoGJ=S_^TZAUcys>5DW1#_9@ko;*_o#h>_u% zX!h^5Cwvf8HDR1~=R03+PkXe7+q;2E`j=hpi#qO8EyJo@4GUDUM+p?SV7iT);fOH<4yR)p_#22c(48<5F;Fkv;P!U%#KgT zsv@fiTyYUmjNsT9fg-tCIs1QW&&TOfxpPYd>VY)zv|4&8Bxk`Ys9zlx6(|0Ocu*%L z<{`etxm3x>`0FMheZ6s(H`}$?B1Oc?4I%{G9gm-CUw$?u=*jkb@w|?A7MWaT@@5Un zgSizO(HY+=loV1b$U3!8kLDov@o6-OZj0Lg{G6bjFgI5Y+u)Hv(kR%_9tF)i`V+5% znOzNTR>r4Pcwt)ay)jG7mDD9~yo`!z7LrkCQ*D|@fHz)lt^8Flw-Qx3f zL(9+`uJiV>UuwQ%IkF4q99f^EE}BJ6KZdCGYpcI)h0=98>$Th|k zFV!g~jcamhgU9^jI8Rdjh0pikTupcYOTaNYbfGxNuPcbpKoSs@yLcqz2;y_xt$|&E zhBbE#T1UO$sO(=3)j8=dmR=0FjnuH6qxp9_I-nUSwwgukoCu4llMb_DbkhquUtTNU!!?L{`Ywn~1iTmlo zDo#F1Z*u`-D7B81B~3a`{>{G`9IHUt{{Q;l|L!g)|Dy*QqKF|_M4tjf38G2WaK};u ztz5-qZ3%3x(E|8M_^c^gLx^c7&c?xfwP-1uZ4sBudLN< zRtK6+PD5AYtf;l~piu%{GIz6^YrS-;i-Mtdf##7R^#>;QO;r6Zw2Dv{@N<>cxGNT6n2B+TwB!!mV(pr_rlbIsx zCEUu5$PM>?(f!=Tn=^J2>mnux#6ZS^{XL4<7NJm}>#H-1IAiT=T%(p(KvG~$#RHe_ z4t9U1?}_O@eq7+f>86A|6tR5=lmXC*V_jZQ4s zD%U?HB8#4QLrzw#`f9M0V^3vLCYzmGsNYR_g8~k}8lP0!OcoNC%8Ffm@lt!m7Yn9@ zqA<>r9eNuUI{;l+(<^jKZx^!OKA8VTp;Gl`NJwQY+S~297!sx4uilOy&K^G9KJOpk zy;?NK6a(O95=du>TExmpvxkG+R3KcybD!?Hj@}(+a#34YiGnCNTrM)bpumFH8yof&;h-+ z=9BIKq+iY+6qwBS`FBNgp48Ph> ze0)$YL%PwDpl^Fb6rd-#FB{v^LpoS@C5zT4CS2T*bx?UKcUJ0!;%KkMxCy%IHZ7Lr zRkap9Ni2uNPnITRm%_5QVqbbdH=`Jv6hxy(`}RRhMdg)q?UZe;0xBN)CiW-{TQ3CH z1ho6uvh%tye%AsU8Upq~doun2w89Cp$q2m2aT;tnFJ1IOuJFaa7;j~Oa;A>*YFdDz z2^oMB5c3hIyYwXkKOpVtybhKHNvz8K*;d{OAiappexVi1(>(92U2noI(J%0J0zgC&9A-)yDq8w-Lj8tJV6`((7> zns&jY#NNXxNPBVy#G3a8`9EBGvDo-56{^~zQXn#2Knv-*THtk!#KAZ-9sfQ1o7gJ= z>avipz1Ci{i3RCGvd|IkIiF0pfUpE_(i9pjWv*j{Pqz;~C=8iRcK=*8`YXe9b9pG7 z?W%D+f4f{C*m^`(@$BRIi0NL^9(wYbOE)hJ)_%yw!p*kkT^X2P08_q1bmAx-#kbtG zfehuWIkH5V{22AE;j$U+`^fTjv59mIc?)3ub*&bIxcEYCEP)x+T@s&&PK!u<;Xwy0 z}&!GM8JUd5TXaMbNt4c=8%1gGVJpcS>`2TJApO~`f6l1@R zb!NYGA;kwCvnIdx@L+G%>doTaoI4xwB2W|hZR$*Svn`&Z#VJv{+oYQMMDz^!!|hZL zD7M6fgm7kiEj%^Mh4i6@&HUQgH)DV@9tr)&X9cH z?-_prPteE~7f=q>yyMD_p}?Psf3CM{@i674B^ai>qxge}*WZhk0k{C(Fq+Oec|YbQ zkCU>*e)Sp}gX@unjgN32D(38|F9HT3$`-R80s*v&Z^+0e56=&kXCb4mdO1Wo?i$+c zHKo!xC9w^ILuCYGLLf91WSgLt?&*Z8)7hIOu7sND zk(4r5&_H}F2ac7@)oU+?)@Hz5CDc?ku)%p%C$Ny&ue7hkhgC=sf-HNNzJ<11C0b&s zX^;0pjBY%^-cH4YXOF#D8203vj6L9b;_N5KbiJsZC9qZEi#{zkuscxw&c2MRNB>J$!*3D!W)}0-^}T)I z$1JGpTW``^gQ$J!itQpNVn==pOa!&Tg z3Ct5@XlWTEUo@*Ck+~SHoC~iF0)28ap=Z;eza&AL= z9Nx>Yf!g5s#N(7{@k)^oPD~XSq~kcR5L8UftX@=l%>FoaJcfqgl9<4bJR&c_5xJjq zoUJWQ`i96=k5f!QJfu%r|Nd3kt8*FTtl{CvfO_~i-42PwbEp@ILgJ`7B)31 zhw2{bkHmP4$#mT}6nKg?>uiK_`weWILL!2yg*hGzfe_7#X66vYU!;DP;|IW1N8AUe zRm&Mu@35Gy3p~;X8MWOaZ*G)T6wY89{Z)Le*MoBb(4AH|SzLBv`8O5KjXUZypZHJv z#Lrnv_`+wNZcjfKgBMctvQI&GNuoSs0&~VGC<)6t|OeI6A4%h@?{G5rfhEs&5FOg_)>e( zPopBs6zzk)^UbZDCMChwJllXHP>pPMRt}QMOw?>$Ux?iph!o$8_lpxhXID`Efy}bl zAC#ABhn1kM&PA!wBrfmhZaiF-)i$h>-f9c0OO{e78?b8z^dv50Nu~r-(%JtJVzP{? zy6d&z{EhZJk_4_S9>J3XTHR&-lQi&yK%eZ@KVhKwNWo#=;Ep~ zSb0WZ2XxWs;u9M}w#K&^`lG-1xuX{6g;);}f~a2+*UDux&amL{8rerLk2+@~v0bD> zATX}ph<_76EWJUekn*wv){yQ|R3wLLpZG~k7*Fj~*KrJ6MjVKt?m`b0bmDOCQpcJU zIOaGnuOYL(raZ$T-ERyiIh)Xd{a{HU@MoTb=K^hdg@1Bvpl zy(HKwIC$|bZnl2Zbhgf1Par{wOC^5hHgyUbg5;;1~ za-U*Oi>T-15Bfq=G!~21FX4Er)(@Us=}2BGCsRStqH#XTYF_m!dFX9F5gP&xtpdwQ zemKA+3q>Id`zQZvZ1!KO9?)-L`tjL!{;0kCfu9V%)TOuRN*ORG#mgh&&7-Ge|q7amKmVE>T$tip{4=ef!UNDx^tIMKXbQ;tZ_Vdk}M@O zov2*gl-+lQY?9V-CoW#}8bLEyzg4B7@T#uI4KD&-J=w7_Yf%JQ1m6MN zsfD>e6>nz_zu_LdM@dzWT4deSXdGIOR6HC9kD&X#?F@a$wtC*0yho!+Yr1a%=uyk@ zX37tIpQA)`JC63~C*A{&bT2UDc!YdP#T;VQG~2uRG*z9<;L?b!@Pb2O{jd@)Wu?M6 z&hgcST?3VD3=M18{mieq-f~g#6gt7CP=1K0D&FgrQRoojaEw!!W+g8zDwd5}j6KGG zaqqdFBk&*9-zJ{dZ44!ddnFMy<~K_CmQ2b^ahoqVO^kb9u4}yKYP)mG8+3dBf&6Z7 zhe~D@u5ag#gAm4ZKWv|igRg{`r@T`@vT7L14IFQx^W}OBpW{sYf$K9KbhEt~|GZtV z-$iWKjrhxb$NIE%&+bi*n zw-9_6S|hCkH|0M)`gt6TUI@~}87@UGMr&aV`XLlow@f=B56hc~-HAp#$|dhD2fG>P zr4j#)n%}na0RLjMIf(4`mC$6Ei9+aYao52{d zI$8LlcInzdj*0iWS4bEqRzQMe8~d-iwm4R?%))j@Wn+6^Kh>u6H-su3-YqxrdAUS$ zEjsAPN4!8sWJ_b5;X_(r+w)JvTi+BHE8TQa{M1at`?4r|m+Lkb6ZsRKkZI)bSc&NQ z(Mv6T{4Wx*P7&l#mPRPk!Lu!g1P?5MLV(SSo`7sc>7TlR@=-7~ViIz2#--jIy)=3R zSbjkDfW@gKVKPR$+R&dN#!L)YFLPG=Zls^ShPCW(!L0P2EU}4LjDC#7t!!i5I~nK} zE9~b!AG9o+aq=okv;yaT|KHnM`YluGw~eR&x8Hoi)~b7}5)Q}7=!9oDl^Huc#FWB|4i+3NPG#U5s8B% zJF%KOzgR2=#H|RD#^W$5_;z_ac68ltneTZUkW_UmHdyr9WS1^2x20R_p@_Fyy3r6Q zl|2@36Kq?J&-Y;A5m3+p_ks)2mmr5;zR*7Z84qv+bEd|avYc*_Hu@Qfw3#~uGEoi@ z2j`WTh0v#$j0xQy_(Jb@LGwFhm>g#+nwUB5?4w72;~}eh(RQ)qF~N-DeWiW*>EH3g zWNlB23Gzhy-5AXUeKJNp+9Z?DVb2N1C4kX5!8#C|_P!OsdmYo?DJ2FlD1qE#={s?1 z!=IndHaq%j?#knDwMRdSp+ITfNF9Z~+suS4lqdDL3lk&?mky`P@8br&6njXgp=U!p z?ziGn_qyaKlVbA0bzrew$9N41gv#IrB(wqZ)jtE%0F!!V$2u@QJF#pKVy>5*12@}?f4}(I_G+9i@i%(s6+eh-hP|mo>{8Ed85OFpN|o)FRi|TFMy@t%V|8e? zYcU+PRH{9|ZUDZ;PRtIlq5V3X#Ix_mc%}y!k_1(b%JgAHp-*a`86ml&+)TE)lFCLK zv`650)D^zXwJ|F0HjD!i4wR}HOT$IL*S5FHF9@{XWr<_l5NK+1Qdmzo27JS+iDmCR ztK#ro$3Ca-Rw5*@pJ2~sI;>ceb7zw~VXGP`R0e8)^p0gi%JhC|;?%Fi*|F-^#~R}3 z!JDB*R9fTbbjAugLCJwXX+=SISUja}lh=hK{(X3ug>gE^{gqdJi}7}zjWXIiSvbGA zp803~)Bc&iKV!*gkN@;Hf9fBpL_SxLtrHyM&)obRqhceGX@b_pW%k;2ll8!;-sbpN zNoaHHzrk6f=y1jwxEy5%hm=G+ssbWUF~zk6#^MHY(LuR0r}X<^M`ExFDibb?k%^Zj zDBpr~yWQRIg<0agxP1?{l!oE8R~`{36CQOc0Z|w4rAzhVrFPNZ$DiZg%5w}b`cVzp z-xj_}c9vHM)gvA%0zKHSf+Q`DB(=;#wRYc3z<(0)pV{bB{(H>%{!^b+sjf2lRe41euo&sI1_>= zZ1=P<(-H9`1^5xb{~E7%?3joyf>aBbEJf~N(IKmdWGRPA{AGW8J43`5DG%ZqpYqoV z0|Nq69W`CZ6E{Tym=l!Z8w1;%kg=izA###1RAOwT>es}d1Sy~mt-7VgA_&0cNNC`I z7>OQ;D0mH~y@6O7B|YuX)ceYMNO)RQu+M9OO>#_+iU2n7beRuZVgpeM*5H>icw-7yUj0lTYrt-$H zB$;I_BXv9CpV!)zH~>V8QUR!To2_%{!T7?>>bOJmQ}x+UKmwJ&6occ=C3sxiWo}Zu zCa(rXAvtn*#{LyGbA(~d4q{9s#cagWMJ>4-e?WJZ;#iybiJD-1RJTboOJpCoYAwMY z`En&1+!MEIbw~1FHw*1?sjbI;4IN?Pr>jE3%VA%bKNMFLXiA~(1+eTS^~CC!%pN4n z_&4yt#h3;nH_jrb%5wc??81RbzzEs*;HAW}Z>cJ44U)))^Fi4mD{Y%7NZxwjUfdxT zVbGr0{?W;2{>4uBoKqbm?-s0;ukd|(>kxPI?^IV?2_iLA207Jr4|^D*CZAJOr#PJl zSafO8b4|Py?}oX1lu8~_-aoXn@A$O{(PY}wyJF70W)TvR`U}m%RT&!s++6NHkT=vU zzr(tW2LOLbVoaN1OMcZ#oJt8rCnZXFTk8RJ!MuI~6RK(Mn~u1@?59FCSm8cp@DlT| zl6E25Vd5Rc%n{tGl>y0ShoySn9{4kQplF{f?aeEGC#A`8H72JaN@SpE%lrW@0OoyI z58j@7)l@5GXRzzam+0gp-a$OImjrVH$Lmjs32lW8qh@m~rB!F5dbtl0a=u=nY>bWv z<=TUHM|>8HGuG&cg+$7Vr$hWnmWo`(*kcYsMmMGU2ku+#H-i-AfG zDQGjJi8t%S19YZeN7$66F+Ytzv*if8=|25Fi=FJPCoEq}$~c_lK7%eDTlDFL=VxBU zn{vuy*Mqrk@3!Bws-?E3R?5Xgblk>evv(nQV^bvLf}{Mg9OWPTWj7=bh4=}E&`Y(& z=HI3)>0lzoyuSQgd;1SUe|C@fJwNUUeYc(AE+}}#vXVE!hQ)JUcmc|1{J=X7v%t%< z?d|xzj!P~R=0Gx{pgmIXHa-mQgaE!n_|<=(krAY7uKNWDekrHBEHi%dZZ)#RH(Y!>5@c$jh5rG>4aF5gOjIAFw`7v z3m_f`1dq(9lcJb#+X(aVz9-5AKz^~JV!`&P{I3twnoh<{&bpMEF) zjpb4Zu(Cq8)3kr4hH@{rl$2Fm$#jz!+FF{!IwF%IU*Pt}@lX( zk*JOLb5C_s)>552fV;I>YVs3#@ z200Y}f_bhZA!xCX*<09~+(f?LeM4>AX!#@g_J8T*(L6nLbK^82j_N*%S3NpF9x!lh~t$Wmuw}av#<#NT;|5+>MLdf&4{RKQ`jM7+7`JZLw zIgj(j-LZjC2y;phl>o}8v3U}FY)GJqMZxYnr<}%{2JY;4V<5d2E6?yTT9bcSFBK$X zEBeR8Q&e(cvY+YivTqy}@6~qt^2S?~@v4NO5LfV5YO}>j;Jb$bvoM8hx1azS)bVlnDLG-UBS|%_0E`uCdvA<*<2dwQFY2kz)74q>aWdQm9C)xsZJlRw| z@p{LlLV}`Ly+?dZpx+AR@M-+GsqtrRTq})?Zcb+0_+&JekO>u`6eaa>0YB-w%%*gB1so@onCBfM z3@9YtbzyGLl;Xt}$9`i)Q#SYgyjb68*R8uzMTecweGg;7-;g0WEztrM(WmK-m*l*L zY$=E)a?z~d!h-FU3&B-k;atXGodO#eIhFZraN!Sk*H4VY1A3^axZ zXbTZtyxHy~){3j0e|Suj)Q|I1OfdHjcZ&*HSd6ircoF6Jis%ST664ry#l4v#D^%=f zx#PRHNpJVvMh7K8s5X`B7@{I8fcQX-iSBV$Cine;1qHrqTvY^WW|;?3WQ*cmsvc=e zKC8~aaZ;=(U4s^`uiqr)jT_2p5#$IugslBCNHPC8E&ul1>`)1nv!4zB-RcjPno-k< z2%+y`Z4f4GpqSW->_EBPmIzLfj#9bWF)qM67+a={qWEZKbmVKar zgsjh+<(U4B2RCH?0p!Kq(&Mr9OGCkO1`?m>7+3}Bh|%{rLwrED8L4YA@Rwd~FURWK zY&RB{Zmlha0J2}3<*Gc`<_^V7*er*I+^KGmd(5LP6-UI(@e6OT(-}`=3cCDau?FaA zer+}sJ3+0)XGrx?Q$cH3WRv(w44WGjE#<7CH5qjN?s;f5v@10vsG;dUtD4CEb}40b zNPeekg{i(qk>!Q9V2KbM#!He>@|6$TGqGx{_juDEz05W%WbWVWk9!BKqRQB`_#9w) zO{1YyZd8=?R<3EGJ(yjR?1t2ecnH-4#+%kH!L6u=n*|XzvR|MCx@5xPLIK3AH&Eto zCZQPYjmVUE{S*Ib!_Ohmv{!#E1|#Cu&*KMFoDUM@Pxh}^D{tuzj%T0(l9B=nxYAY% z`t#Nt&PnGL5Px@l;c|##6fM1xx1@+-DW^@QL1)=9b>| zuXZS1){C#iTY*78q4GGvD2Fs*6?!UdIHD0_Q6)pL!1l}d`b9pc@+R8_6w!neG>1pyb=zQD&1{rWkU@9FtKHFPrQ43Ng1v58x(+C3 zYTCL-?@7CBNs4Y|IS~+Ra`e+)SCf>MnP>yW>XKLCXu7Q;@BW1#GXQlj-=644U!~y@ zc0viEb@-|011F#P8K90h2l5t0nlR@=Y4(>rZs^IfcPh3-WsUvB#%AqqT6KhnD#8GT z6?vF%grldI>dHH!U{+L1u<6}$scD*sWgLL+LB!ZGrtvt5r{wS_*L}tIc>JxiUlN#(&%>D0?!}vB!H$&i8-G`< zlpOq+gM!*W4hd3w^4d&&@4*(K`@FbreVYhaHj`7W;7ckDz+J766{osntib>71E z1%txX_KtKXbVwS`Q=}vrnN|MO1Z)Tw{1brVPKO(cYrQ@i17aqihh${~jz4bCc{9|1 z@nio8;@}Qn1WpA_$5s)P`SR2G(Z4+Vj|BeXZ+C8zI~^C(MWPw8I08Oe<^ZgK#(=i# zP{udU9)8}o3BzQi6OX_>P}I{Ex-PWm-pLUFa6&5zwbi2ptZ_V-m|LX|fB@&Yk;_}S z_zD|1i$j6^&DsEvf}J{OHQ5^1aHNhC<+`N5<&a5ZWzj=Kbpdr@mE^OXv66E(Qh892 z_60ixJ(U`NHS+6za z(9Ao}h&u24m?k+LA((UIm1wTILKeU)3^W!E#@|Wa8=HH8U69?1NeWapxOm>i-a`Y* zSlWh5=y6<^c;x4vXcs<+RZ*R9Vi&P3q|OMiIEbIc8{eFR=TI#ixx9^Tx2spo>xC$;Pci`+2K5@thx(yP zhj*|G_3@}25Mp3ToC05q^AE>3eI(4v;?Ao-g%mSV?bixuVNH4v+c$WZI8@|V_)jeF z);4(qu1Kshr5w(bNqG!RJ62l(C<5OJpdpWg-tA=Tm2WMYR_Ua~V?4lU= zErLBF6FY`uh$G`Ee$f6PHfD|e_6p}+%KefmSD4&H5RV^s6HO2h(;v5@e@B?S1tyIU zc$&12zusyR0x>GOt*Gi{Jnd7C^2-#OYMf`hSY!rE%jj4+(u3XNjeb+Gba5*d z4hbM4{9Z>Qom>f6Z|Q+AoVVPG)qz3wl4q@R9OGPUcjBL4ZXfzHD{U{8iJ)=m=i=S% zbbl|U6R!Rtt<~an|GwQ>ihobARb$75;oF)Zc{)BsKlc~n-{Jy$_NQvgQA{pUqd1YN zcPWA+X9hvGR#TxPeladJLVEz6(_9`T#Do8{%023Ea}N>4c%lA|^2q7R5)UB8CVxd9 zetcMdX^|kgi_@3l#Qpbo;`COaM9ZNOt79aFP3k1fzL%`U!tC?JDEh5aPO3_|HhcCf z#Ah$tJy6xI4B{kX7~l}4$39kdmG2xUB)iaV#MGBna61M}4G{{(#zQ(fx0~0oqLXwu zWZC@U7;*WPu0OedX{safGrC5_3p+C~1V(U%Uq|r7Y-`2m9G-mqL#T^qzlwp*=2~kl zv{}(%ci=Sc+18zsSc@3q0Ms4Fc%IvZS3Qa=1fOyZ9)I)f$L-M%y(6MN1G8Y#HzCCAc;#Mewh5OO&#==l=FrF0B!Mox! z6vx>UK_VBbL($H90ph(J%e2VBCo#whHP%_8@73>|9yW3=7^rZe%TZ7|`j(H>b(xI# z`|+N$homlAG4w*nC~}Jn*#vN=@X*J|Y;Fp-06BYpEG{pH!Rb6QDXWI+@ddN=xxR23 z+;;_^_lMo_^~|6-?NJz4RM!}*P)Obp2Zyuf29Szpyc+6^TMDORRwz<9f9J?w>^fB( zidC#Km=&16LdA&@(Mb#8DlhkZF*-V**5kc75-i#iSK>>;<2_t(tXa8rZfP>L_#}58 z%_+Uacp*S^F~D^HeQkzc{YwMbL;?V$o5k z73I%U|4LP0W6gV=9#5JAOL`tu>*CYyi^nrd!~5*vJMGin8HKwxO9`%XfP;nd>QWnK zU)&y#s<#RA2PAE1@UO?}DAW`~?4fNh6|q<3nMyV}2RU8){s2rl;2O17j(tPkizrK* znbw1GLa7B0?AF3sTZrXaR7iFM+ubXq3*o$4VSI^u2#uObpw})mye&631~RbJJX8X4 zLj6%G)(huXeaGx$39c!g84ppwr`GX#TEVyPo&BA+1hED1kG15=+we+bSWlUAqE%}x zscU!IjVtj+aVUQf69hg`s4;$If&$!Pkr1&7YPzaU1%s)5?=NuJY@t!Z?I+|@KKgq< z0q!qhnsPdc7$k~Do2nVLqFkK;BIMneo${4q&|hdzU-&&g^n?m{MQ-~I84JO!!p3Wv zytULT0@&!-ODKKL*Wr6)WS3*FDoZcMOK`r|IObLg2Q|Hl_oEX^cxvH9r|~4)^N5ID z%0(B%bE>!)$apz2LVZ(x&`Tig7U}rTtS{OK^h7~~nC`%i=87x07cec(pVF{uuG$d+ z4VLY2JPLJlwJpbD!*xn#44AI$jgU(Co8YeT6Qv0bYtG1)Qr_c%@3yD?If4bLws`EIo!Qo66^Wjg^;iBPsNEnT^sq zs*30yIT(vGzje!_1a>J0NDQ!9mD^B&~RPsN6%=jQg|`y-E%5UgAX8533| z%hau%WGx+KQh++^e;$1+grfG}1cyG7`S)LYfr4V)p~$}99(@$jAsqfV zK@euFei}p)|Qqr{|48?k4;gD%uN~D4OZJ+v3EOVJ)Tb;(|IVK{jPYW`& za@>_$4b4x0{7|8Uqx^0*iQ4cSr}EVfS4>e3e>@znntr7fOiiqhPAfL9Pc;gRh-^ z6!Ux<{Phd|O1HffX^8{%q>FY_$s+u1PpaRDE-WKkuTrfQF)7X`HBHO^zkMz@o~Y7ZvL-O+ zUn^pI9~v`s8J-m1e7$914<$;>_IKhhUuvsfH(^#3My^G&7r0lpGf(Huwh~{QI;IOE zaX0N`0g1DOj-dsWl0w`c<1H0GE(Z;vC2XbpBc@Gm28YZ6P!ZUw2A~cmcJPLGk3;d- z?d;d(`1gK)-8;&~+iLGy%E-=u0#^&J$<0=oPA>*M3^_}ick2ZM=$PA1mTk>SC1teS z|7+U-Cmx~t{8Ad#zd%iE&c6RK$Yw{OFm9rFyq|~iol%$AG@c?g#XD8H!uUsABt$BNI71Dqsfs%fAiX-bFFl5gy87%)q2EJF%Ry? z)X-~Vx610beIgcUFaZtfhZZ02MIR0fNs{-(S|~ushswA>SyTuSODR;5(pYQR3iT2= zr}X;aw+f^<-q~p-Ku>{#fqGEJvfM`RZ3Y5n2_>)gFMp*y>$8D3##$44 zh>x<=C7pA9c1*Bix)(b?#K~VSqI7@z7zl(wQY;%=FfCevg8G!NpzVTvU&0ZI$w*MC zs}>8ot4lMrAr(WP4q?JTQc#aS$BV@DTaGVMr4L6aW+LBWCgwOa9;AaVKfg>UQ-|@)Su*{%s6#cTJ+^QHT_p-~cBVw}x{NURmurhD_ zIx#>%equ-NthcN2Z}8;eVXd|G)p!ZYxSAVVEaZOP8dqC_Gprg?oOvu?l)II(p@L+w zqGu0Z1r7G77G;xDFghAqBDv$r{H-FZO9)?LL{RdI9fCt#W zHJ*tftHK7Uf-~Aqw;o8ZaTCXtd5eIG8|35%w(grP@!bx8?j|Ai zhJtQAu2i@Q(_Un+D79Vo_4l zaNxY@ATA^y=>9^X#5-BtS7Xh3MP}AFB?<^uN1^(Mmx(LCR}|7BLrM2hlj{pqLOm+% zq#kvlUhsPagA@}Zsn8*E044Zvu@c=hj~@gI7XyN%0NG6O*Y|ZgWB)MPx$W|pNp36B zr#v5-nekoxy;1SSB;$%4@bnm#qJkxRltR%zh!yXi4#qeR&u{)Dp4hiPBz#(lcAO8f zc6U>6DSs48y_%f+w12Agm4=FZ2%i^nxpVLQ^OUcQOYS7*7Ok?ac7#%uDhE+*+jCNs zFJYeB8|}SnX>H(xiuLO2ZfU74#cE$&Z*R?&=U?vW^*FO+*oS~AQEzXApB3xY{lH^r zi%P7CbyoiBEH40vVy=!Mynrk)hgKgyY>&PZU*tIcv^r^gRpv^FI+yFc*qha*wz%$< z!{Y$%_H6OBJ3p=-_8tN~^~hDab5pmEtsW-g^ceN6PNqTJa`FD|fyTh-;P7(%VFAY> z1J&w8T*&>M9^kaaHJarWC~R#mvzmb&6#a2Jfj5vxN~h@kShXG-FTgN$u9&|6%I%*V zm+R24%88%=*4|vc3r5=zUWy%$>V4mh`E5}~qgc)hVmpTn9jgtKTcjPeo%jnosoIJ* zy*hJHd|B@y^-8E=65}2!Qp5^B|5SUzpXAHPRGiGW+7;iaozlk){U(e&p_VQ}-QB|i zeGsak+;Y4LV<;OmTyLq`@r~aN(NQmuDAw?USQlh|TCRmB+7r*lBiGyQTd@#Bd$&9) zatbG#@yBaCWW(!sr(HAZ^NA)^&&4|Rx`an%yBHchdg9jWd;{Gj+zK%s{IUX7fM|47 z*Wb8`s^yrnIWaJ<0ENHeEd=_uIpnTc^kz%Ti7LW`==@3M2Pq|;8-?*XnyEOkYq+7P zQ51`dXbW}0PeM`oF?dOMlc`j$a8cxzfskePG3NFD+&v1}Qb*i=dA|h+3jQzWp2#PN z@BPR2dMqy>5e9y2GEYBDM%+MuF(VGD<=Ia?;$>72l@vlsFSMs({l^eH{JG7^1g=|N z!99=%`@I&jyR1}?7?IjfT0%3NMpW zpL5D@fqup0RPmY@$0~AG5*g=pCvs1HwNV!%ErSJ`_$D(zz|klr8NUH;Ymv`g`70&w zQRdZaUYp(q(72-x0;+94xu1vTVp8E2J)y=yKlag5F)yKjmD-`DWykMTL-GTux_q*-UKlte+|KKZFz)R8P zTqLfJbRLMEajy{PaDE`f+*GI{kr5-09S3)i@Wv8dSJ2zQONz}c5^8{xUGOg z{IJ{>*4~IGii-g*77qdKVy=_Qj+OJZJ4^6E&d+u+m0?;W7=#ir>tc~$NvzmOyXQRv z@KXKLN$t+Hiep32#UDBkMgc!rNr8QV-xU<)rczve=+rlsjW@M{FHmo{-%BHbR7LdkNaV%6@qX zxw+Ry^Z+@@Pz}lgHwnAg`-?(8#@F@)h_>GK;7B#Z4URWoc)LCAcXc+0rAq}Vv;)=W zL(a>)HpGvv!nca-^_*N!_6oAd9g!zc2rWU5JWR`Q;o4|>t~L(0WG4>Q==j?D!a4<+ zQd&5Y_r0k?Sov1XIHWq?UB5nDmR~2_jc9bR>N8{`v=k6TL zDE7-u0X1_$KPnye*k1~t74H5wR%=|Rh~NTcpRr2?LWlxq}I4gJq78O_}rIm=bb(L z5>I$N(CPoiT;|KAkel&O{lA#>1M2k%sWWLg1f9`gu}J^k zwml*1QZfxQF->qR*rM`O&#zEX7!BS;J(Jg|^Y8Ji;5pT|vL-3U#*5x-**0#qAbnb2 zg(@+g4tgR>KUBq)rFPZ20Vk+ud{n%2YqB|uSh&Zd+9QVrcfiiLWNo#Kj|Qzjj8`;z zM5Oq~FJlkD;C)ZL!Gf1Na0MWPdNV+`u?a%ttNka0Eg^93<$@W>RPYx?G#VYmW5hzP zc*Aw@9wOsjgH=JQy1@!Pgt8&ysea9PJWGIi^63iBgZhYchnV9U46~xOpKnh-X~F#> zqb`9#?C49SLn+t`d0)8ud$nta`bF**juy|bPt9&#zH`lT(fgN|wpc`E$ulYzPP7UP zae#mRTC5Zn&3HfWH@Od3;17-xc|T|+eDdO@b}1&dCjGQuHj`s5m~vxQ4mR2et{f!5 zkSVUyb7#matehd%J|?ulp{*Soz_WyR+Pdg{6XgUKl`jJq4IUdp@OGo{5^hQ-&M3MO zD}dZ>gf0=BKDgOf2jwkth9H4XkdYYLBDWpILK3P$M^i{=cP}&qpi-=>u&64ny;f1y zLVl+e*48Eyyf;eAeA>Pm50R}?Pbr4p_Ks4K7->vZZd~z@I@${?q{AFrrRXTGoTXok zMWO00o1kP$LRaz7XWwbhCVO5;61Q_ajeFye>OJvM$|2NSW%{7yDICE`-7f~1`7CMR ztC82UvAnXzn+#&Ps-N_Yx0XP_$NMIhR}<+P@dc3vPG{kQ45glSQ9}JPzz#B6ofAu^j2Gq55nqK;M*@# z5Bd0-iW_XipVSMh>LKU`dTQKHNbN)^j4V)mqth^FA>uSQyy(BYb-ms8SE+^r(1a@! zA0TXsuv_?nP0yP%)cmQTTV+qXC@#9X9u&>02+k*4VhA~6&(c4_1+AIlya49eVX^Q{lw&(m&ILdcfni%XBw;4>2_`8ZTaM4yaA!U^IAGG%wQS9ovz7aq4SI4R&3`FvL zEY&I%^R3t<7(I~k?C+_-7FSunxoRs=G`!>#C%eG8`{Ix$Vkadq@?DL#9&OQDjfd@~ zeNQopTkZCn3zqf6?Nf1<2)Ctvy3_V7Ns5OtKj(tKQS5w4?m^;4`6X)+kOi*MvN{QgJtelbmhrPFbt~4#qJY)U@ z&+hET#(ufGfpx#rT~%~-gTYowsxCEBm5|ikFf_D}-cOgLBXp#yQbSLKvAq#?#CScQ zcJ^Bt^KP&Kg8^e8qDuT<`@8N(nO7ccSjW!BeDU_gn38nP^V~0a=bd@wm6=6{^;Co1 z)V9O9MK~)o$vBE2&0+SVdP>NTMzy!(BL|l2H zC6LuFE)jQ2f*w=M7U&!v8@3U7UbG&m{N@Smg%ok~iFh5w%Aib8feg1(Mm$h$6j3i> z1tACU6=J*OB*tcZzI`@+&1uR&*qZJjT=ST-(%L^V?AkGTO0CcZ5 znNjlT5k`oY{E{lFU5m#<7^p-D;`A_+?CA)u2-#Ke9R-(H6;`pTWX0ehh1Pl+HA!6^ zT|@&5MXl530su0x&{hw8C#}LPKrZkDM~TFGFNH5Qx|hus`G_b@2!q_L8g89+UxSGDWqnb9g(8Yt z-pY?cIB;GP4!cU|Ird99{3Ls<@|P>IU<4R?Q3yZz2I~J^70$4PEiU*$bT`h#%8Q53 zwYUAvO%w`TsEQ`a(a0O)fPScaAYMK2RjQRG){YR#DXSNsQq&9UHSO6|whXHPQeo@? zvIth6_P31Y&v?9Oz68jL*p=tb-=Qb&T70sTMp7=wq0?E6B|wd&-GF;)$_oqLZI2Lu z4w3z6pvQmHE7NJCYR?d#qvlYj@KRUOPexHz1}u^32vp<}DjS70^pm+|f8I&2F;sba zu`+xbyDvj3LUVhUu3qfa^2Hg#6q3}&q6^LD>>k)WnvykfnH^A1hlsmum|uucP7;+O zq)TxSRZ55H=l6BV?WG7pcYeVxo&p6G@W9tPDC8Xxji++>e56&6bb2?FRZ6{;#rCO~ zk-n#!G`5=<(KS;u0AoMB;k$stC388qAA&`8&=GD{%yBLlMy0E@w+sf>?Rx6q=o*poFw>Y9B(W zJr5|}rc$84^xHOMJI5fqk$2+jZ6JXE&NEX-P0N^l|1`{!mAu!6v;INMm*52B*n?hE(j#mQRtpS`h#W&~ADaCqn~T?5?8h7P%pSw$ce( zZ|Awfix?%Q2fCb z)+R+RsCw-M0p<|_$S)Ijmp!RPq7`r+auQ;|ef(+aLYo;ERCW=LM6Y&b6#{h2P>4rb z9qU)s?<#Jb2+8GnWUh2NWhzuK?qUHd7D?3Iegq!dq@c=>0q=Qzp7u;txDY_u02!{7 zi!8h=eeaA$Oc>0L1AF;I%dlNz&z2x;5)}w-Lt0%q&+E6_YFM?j8_9J0qJN@6 zLjq@zk@qK)oP};Jcq{Q39fZUJ7dVJ9luoPS-t|M@;O_GE2TpgQlRG{99mfFw>>~eP zyZEyhc}G3{Bq-`6HTXTmtJs2Ajgz(;vnQ%`B@`ft?`2s7>(eusRqWN7@>87;jK?65 z(kEk;8yFZnM#EVbl|SY+L`C9{OShTiTCsVsL_{GkX4)F!f)9?Y#yD~NvZPaQ%cC19 z-j6lV35Gw8nTQTP5VK+k>l--C#d+K3(&P=XMX(JWQeIt{{p=TmYr;7^uZbDet)?d`@lTM8JtB5NSnk#!t&g$9_t*%9y(b$5JNt`gsR~r z9+iq1*yfl0k>dOzv)~GO6%$f|QWOJH;IPtQ?h4e|DFxCnZc*4s+h`BnTU)PUbz)K> z|8BcE)be~#a_Vol8*{PJSTLoz7Mxr>f1lvdW#3v)4Nn0C-al^=ku$-+TgoLNq!;;k z-jaBPsAf-8KV%+|eJP0_IE#`pU&hDyNF5$*DF$>WJv+gaif53ECQ4nRA5_B3%p%Z~ z`Nfzo;73VVnrv+`MU279b-#4YLGeSkDI}_njQ6}UQ&ma9&WIyC9rEs{r{|T@tb~W7 zf=Q0<4+r03@X5Axd%=&LC*FsHdtl|i-}C#_HeshEL@8<>a~E$J=}9%c$}x`V5~)A+ z)9)TZpDP-x)91=0hm{Y}e&Z62;k9u%PGN|og{Z=x=eZ*t83&dLAs&R#o5(#XP_R4*?g(RY8Vi~#}NH8&ezK0S)RP#RM`NApva#AeI z=;5MqBX|J4sWxI5Q=bs0&-2Q{u~zXWWmmBlIT8Vu8@Coq;`wNEiVd;ppfUC+=@P{_ zppzbZ6-zICNW|Upej2-3>#^l1x5Svqg}fDlD1N8PQYpUZ0b&~fj)zoGJEU|2CgLH_ zWT)^7b@$WQYFbtciM!(A(f*VpSwx95R9R(mE<0dRFC4HqR94D1z3Q1?qb1Pcj?z+N zrPZFcL;>N{$l^x}#|MP!mYQm!{}%cj(>WtywpvEq=t^#h=_43b07 zh@WhC;-Am8%o*YGORF7Sb}be~AWa&-iiQ)h_vPL-@~Z%!AulhnAvLUE^gd95sm-I#S;sa)a>lJ`SlLLYEBl6adn%FiN2 zCY(P<_?ak8)mr!`sRV@-k4oP5ErxR#QxJ+;;&4#ZI-%J-12&B{^)`*Rl?t$EO7B5+cc6~D}Vf2UoQ>@<9S`HYK^cNMcg>2$g;3)JC z9kEjojn6zREKHSfhjmbFT86IXX>4132^jFU-*IV}#2@LQdEI~HD3EbhBD%+f!%*Kb z>-VY8`CR-kk8(S;m7ygY>>z|V9xv$);k|7d04}rvwNBK%7b^rXi(%~{X@%fLc-9CX z;h)|`(wcUPy0Vj|I3tc)2!uo!bjHtwU&pB|5+F{{o*cVhqug+QQl0mKAtOFxK*iig zSc37Q&W4=n7BqMlc?dB;an6#aiH6+t9J+0j`WXB8UMX1hrJ9aGM5mK(>`M_5!0j$G zgfPaLu#Cf>;43;8u{CT#Ip%v}qwJL!hwc)ryIP-qNER?-Kt_8S_c+UwbTL$`E;CHd zF;>&vR3JTj%)moYT$*dftAH4xF#L0m;-tT;bTlv`7lx4)&`SE84g}JXm&p|Z4zWFY zSLus}x|TLh$OZl#3e#^^cy&|+g5%aP%XnNTwAHGnchQRF@ai<G|^xjY9<6U)y{ zRjNu8#e)Q}O}3u5tOLy*G!2D-v9&eO2V#^c$V6e9*yIjUYYOt|t*0z{zTrN}5+Rhl*)MfyLAHkO&d z?VT0UcWy6u8P>2w&CN#)K}HTq$;JYU9pyl!fuqv*RutUqp!ZN4W1^JK*h;jR070c+ zD=tV;zNCQ2?!u;tuioCdZ--2Y1QI$Qd{ppkDDNg3y>wlY^I-wzFr95$9z?A}R|2?t z;`1W-vt;S#S?W$k{sdJKDfJ+Gj^#MxtB)TN>$u1e)C(`X&o}+lsFxh@i&VQ1D~cU` zB>qiBix*>Pw}6>3B||O*eWk}p=yW=x%ZYodvr>E)Pv-V_M-0ECZ5<|5Z_o9 zJhGFrzCa>P?8nd{MW$;UCym9oA_@inx)cKxFmud8;)!4Z#T$lZ$z_*Gl0zleYx;)z zlZ+4HP{m@Yn7(4>@w0L@WnUL_0a3ZE(Hw6gW#MFookAdfoJijsW=u`cc%9xmROj*B zUSbz~wihu*AVO3pmN4k(0Y;g>LKrpuDCG@(J=S;%$EWJ42jZ>Piba|KM)j2fUyimx(1$)AGtR^Aqv^8VXwHz(7Smj zym5*tmdGG?zZE1Zv|K(*y@5Gg(lp%hT6k13^|ZT8RzBWNgaLkIp3iT}|C#;$Z%GvL z2V>(TxYHES&WIW%qBp^$+9|p1;tWF>CLid(#(oeeS&LIbj1bbi&|k|AQC9_vDy|6) zzr&X4F;c*2S11XXiuZy0fbi9x5VTKSE=(;*@c-uD187*j>0#pPV>}S-i+NpnwOx&W zgHT+JIU+#<&k0rg%Yz2R!HzGG_i-h}Uo5TbvD%z7PG$Vm>m-%d#*ziO$=KY-c|DP8 zr3|bWtjMTX&jA_w55-#q&Sd$iH)?LD!sW_1k53|@2c^OfDoF?=Y%1g+CW+n>#7$w( z&?k7N0y^SH^pB3Hmq1rgue%jzW~?&V;qlIV-(40qf#|fOg=&X{uZv@ zP(pRDsHb)EhT}y&b%VM%v8ci)PSe<)zt!eeA%etT0cD|H#!rgfjS8zt41k=n_+C`v zj5Sf%8m0&4b9|fh?0>9%!qvb7Q?iB>0B_NC@dZpY456Gn8GocO#yk{kLN%2nwGA`{ zG50*2N*ChJ@fQ}jRIAORHOcq?{^9`AT#Iu#lNGJ z5Q9-ZuGfQD7u8&k*L?d<5OEG?CmIpJcK_92M-QvsfPiGoyDyP_;Rqj*kK4 zFUQA(VAf2P_4|MUn9cEbS(9@c9y^p03>>Y5Ke5EWL(*j7d)O-j#aeUf+^SCo@M%6}4$B}c$lq`8#5Y8}decMMqzSxM z_odhwp zGE$!Nj(+9+6UTh#Zmi5x&$qYzH}OEvGQKM8zaC+^xI0BU<}XXaRrU@8BG0Gn16t?J z#n_e;u#9m9^}A!1TQO4JM~!@`%zz6RzNV0y7e=HBF+445@Nn#9tfQ3}emPTDOPg4H!jMU)GrtHv)CzgPKzzg7u? zc%KMKUcpUjVUq%ITDpMWXIE!3^wGTqG7)>n%{%ZA5h3in5L6m}#GBrK|0yWn@zY4G z(t;QqP9%Jwuoi~{1<{LzxQAef-@>B>hLm?VZ9JLG(hLuhkN*5SDJeDBb5D^A!va)g}@vnb_!ua#KKWde~a zg{_zi|M*WHeZQsKOf$|`VieMg)~uohuFiQ6?r>KTFpCc>dF!BX?4*qlr9^ks9-t=fYa(>Mm@ zRz61iVLLH1{montk*d>;+F{;P*O0I+)oZIC5c6)B5X3VzLoM zVHlZ91~@>5Ha*vE25+e-I<(56Hb7ntozsMh!Tt0&rpMwBou{nT0^~%vL2;R5Nj1{fqsT%>z}1EF(c5Apz7BDPS%)UIn7Js56WD4aEX$ zrWErBTOz8He_L>}n*!hI(jK>>8I*jI(DITvwy_EX*})xSLi|BwCa#6_%yF&rMpG0e z)fNY+pxLJra_#wRS>6w~PyAk-lioAi*~zLf2qUw`M`8bnYUM9H{?qp87hcCpAIo>P z(wTA1(e`n4G;j?eoOea(U2hj*oR*I|cui+_lsEQQjrh?BdH5fn*WSPK&pB z=Ky`dy2k4<0@&HfgOFx*a$bxdfck5p*&fzIndN=j3F=jDR_E4esWBfrgFVul+hg^| z^yKcrpNu!);@A2Fy>JJki;Whk#WvY-ySwKTt#BH`LzKA3Edaa)=63qK`+WW~vXdGES5Du-4{dWGMKa z?H-HKNojFLfG`SKcA(7_9CoExcA3QGMMl9N6=Ro}998$dM2m_7tSR`Dmc=rM) zJGMX?=OlgiA?d|`8*!}tSnw(-D-UCy2bG%$ixC){$e$ko(EE{C4<)RMPvY+r`#LEG zGf2&Lo(z=yHa?5;=QpIKzoyh3MC@0aiyF1IPx}fw8?k;W2{VJJJ9smZ(%U5H3 zI!*^|zjT$Poit%gP(rW4eT4|N6EUJUS%Hb)2VC+0t^wwWnq;~^t#npZ1jb`nJ!}TN8%Km#R<84A_+kM*N#YU zVqO4YPoT`uqB<22yza7Y_lmr`kC57avxn-fmY4gOAk(r4C>G<=3d)E`kgJWPXG?GSqt0x- z^xUjt2-|f_)>>l&xMN4~n|~z-v)ri4fibEAr$dD_DeKp*`20 ziFvByK>Z{IUeTZFlA~C5{TiiGD3hlKiJ?9lmjvK@#Z!Zy&>M=!YnNL|Jwy?cPT(*e zgdtA&K|w%zADK#}pQ^=tFHBjmI*4$c#1b+>0Z#D8Z+H}hBXP%#3n}=0oQPD9GN(i- zQOPtSDi8{ zm)DZ^gFoiL*8c-H<~JH{9_{KIYi;$8YyB|sp%ZfqqnCLvC2nB- zm5|qgk|fP|Rf4)}bKzIa@@oMWeR&H2oXOhaP;*C^a`5=;bfdZWu@zqr^{Fi&z_W+9 zf!`fs0A_SD^5oRq7e}bnspOUc`d3Po6|Ac36*`!z_>Aru_F&tx*=xcR@3noqTrimT zhr~lJ&-)_QK0 zL_P54e-a`e8V9CQq6(^uawnurbYTl7#tM}=i}eBqRSP=9K!4G@I6Bya=>tZVS1=Z* zinEk~5*t$^=~QRB!d7B2z}3%7Jdv)sps^}Gm+(9y%T2*d4hk>wbwWmmMa8|@=3+X! z%$8O{87f8qFDgWk)X8$>&_U2IDHA+B{G)$2D4u^CKF@ESLhH`$khEBfKFd1Ej^RT)7 zq&HQ?8>`DtLWj9B1KGO|YUu42tA#p@Rlwfs=QK{;85To@FbQqROe`V5`=G`AGJQf7 zKf+o&F6v%6mb<$iA~m&8K}NYh+n$MI_quZ}hsr=@9{7ze8^wjaUy_|mhV9f(7Lq=QoazZ~}ebvRpxWgw!WV?3;2wNy* zYTE)@??*z3?;UAidzVdHW63Cf@5!ty-?=o9-%%9{H;~k`7qo6*rVspUe{?T2R3fh1LbO z9$yp`@U!@Vo7xHGK-UT$bnyNzU;bg%8!i+-ohtBUW1@B=gX!4izDZDT&~+YMnu)aSUZF z>ax$^KyrUVoKbHju}R+_rBSIkw3GI((B>#4P#V^!E5p4pcts_T(P!1tIia7l1J7M$ zsCauk5fW61Dz*=NAP0zRU6WSFMm9ut<<2zf>9WrJAIz;Hz7BJ&%_AdYIou_ zU3SlkN)T*R!gZZ-x3leIraEdqWU}4WEA4tb5$q8kB8~Gk^(w=mtjV3*{Ww(d zXTPcis;CcQnz2p9GJAdUa5of!EF>C;F_2oQ|3)a%ZrIT%o+2T=xXvj?L=MBzf5*%K;;ZpEe9OpRb ze!NTp3+Lu!Sebav#lsJ^PsHgZtE$B8Q4X~LQE$hEqnRijVhcW-kgUWA?=oJz186tN zzDtNnuLTzaSrGm;BLc8*z7T>v*#afqD*fWF*D?~F`oZOAbdnI}H{-qdCi|nj|HJ@2 z!EB1@QY6-Nwz(rSVM*E}anDEeHl;W2*G^x(`TEXju`>y*B8QfQn`pSqseerS#vfL( z=;{N&BhC=8eGFM%t_$d_^3a}>m0DnlO8>ENXs1hZT1-jYx@`Rh&*-c4>x@D5(oZmP_ZXCrt;uHmw9dV0PR14LOvYby*Gx(RKPOKl+j2%^Y@?;T zw(G*?Yv`bJVb0z#%i;1TKq%QGusRr;>+Tu&Kx^EgIFBzBa~p>A>XTO z?M}Q!_b`fX*Ty2ZhyMuApW<4jViRhJ197yMUM&`r^Gitm71reMuan1s@jN2|YE%Ho zeEq)-@rpyDOga#!^t&%B+_Fzz6;X4R=Gz;~b8FV<$d|y=zE<%t@hW5h##iTW5x8%2 zHoRcB>g0rA}l7U2Qgl>?2w;%P?$K}Lb;=Q`+v6VBavi?`{8fS zEw?uo*DaSS9}EVGj))yWZqxtlDL}3@hOifE7PgQIaayNB-ps3>ipI82Fh%Tsa2epn z7w3)&LWt}=s_~khztEO8mhRk+XN$Wu%3El!y_bW%?(iQG1z?LpmA}^7kiJ8qm1^w&Mo8q*soL}2 z0ybPh9=5<=A*)_g+^_mK*l61OypcFkoJtNu$gRTPiI&PDTwDvqh`H8#3*))EnV2Fj zqqKGKC^&tt@zD$MC8{+AA#w={<+Igq@G4ZxuS zynrD#O7*6MkOWS-mRv&RbM1r0;^A^4fwf3KRklNDL=atDkCzZ5`|=|i4Do-w^jKxP zSkqWIDNtA2tK}A{XC{F#+YAoZEyDqJphJ7hoA1K3x31r@eB%azSCQUP+mAXEd#d-%vdAGEpR+Howu0ra|e5d!EFca*stR zKw4{Zw{UV&lS;}NPW>?$5KHB!!3xHkh@Wrp(`*|QwA4YmfW-2y#W+=_U=!xr$PO?G z2w(G`FDup;_SH>JgqmFYGejIEqbX>;oCW(IMII#U*3(FU&sjz#W?j5N3fUbl5f`De99`e)k*v-6Zn{N~A2ajf#3?NPg544Tba zr7;^Wo|UEK#U)b{{>9KK%9HOFzjTld)b0SE^&(FG+_rQ(WU;iIdqF+Vm)VyWZJm&z z3X+f);tI+rmiO`;#&^+n*v}y)^UG~6wpbY0_u^w}TjIDB$Jj*<&-gbPumt>76-OuwkdL35b`_h zRLpoElaYKZzo=JWEJix=;OEL=NT-}smzQi64rQ#c^Aa{xHWkJDeR8STytlu}!Q}C= zc0+_B#{G1oEVosOIGuugukmR~>Hvq7K5fGCu{7iT zBF7a%%CyJZ;;{s zZ085;5J$(IuNEnt9zzpDNw%EFaz5Vanmz{GFP$1SX5BDYIV1T(PFMG?Ec3G0p?})q zhbO@5a}mU2dvN07yMC|A|ELXnT%T{W8}Uz?Oxv&pBfkXh7#@*Ts zW?4b1+De;vI+3GvyC454MbkVM4l9tvum0FlLEi+VqF5{Q?bES_Dhnc1$)8?O1m8C^ zQdugxEkN(?9BA)0W-4NYgH&!7`cSUL8-V6|_i(>KC zJ{`aR7=5|3?e&21l@tm=_cLZCU=>-uWnhbKn$as(oq9#_lGhBBCJ`qT4kD8365Ex$ zv)C;CgmDqF3}Q7gk!%k5=$5CwLFOUb5bH9({08XR?brMPPR-fjQQ9qvLP2M9D;r1^ zEj1azf0fJjKsLB3OLdo?F9#Y{(JE|9?y%}^eo7uYep*c6;^7lie~fvvT4`#|(b6P# z;qm|3E`AXQMqJy=UJ@3d>1YZ>k7(YNC`6K{&6D!X<_w+=%?dQ&KnyuF{r zaOjlqN(@1Y`XjM)N-FFaTdeH{oC+z-`1#`DUBPt!5I|`!~VujJGSRAi0ZpO+aWln2U$_h^f(#3i;aJKop?jhUJj% zJlLnik$fPJ4RqjuiYmPohL3ZPn ztO`_QGuJnu{d0w1_8y9G#wBb+`916#747l$BU!?wF6USzfC5ik zb~*M2zjUo#52?g+zaA?9I&tv}Z%tNr;)e9Z<~XY}WW@nk%f%64fIfq9PB;W3wR+8W zV08Z|hQcz=AZZwK9cGNb!zn93mXKn$D0X38WX8jcO~JtGg@4j2?kuB9*cqr!qCe&0 ztgA>1hcq2Kt{T(3*A`}`82@|9Q-n!?{KK_gL=qG+Hm69<*OYT)qO}qG(KY?ykfdz*UM6#;| z0?-Wt3igU6)cfDD5Pr&{iY>@qiv3Yu$h1OMmvQEQA0gz2D}+LOu=X-m8`_z5MkjHb z=%q311##+X=E=d%7upn{Ca(~bVQ$Qa+LQ0cCao{Fc?)h&@aV_K`r~u$+4%77_Qp!Q z660`F86<@vFx03znM_g{V4M~%E&(OA5vi#(-2=~vDQa&oZmo~E&-x@B>IV5*oP_&0 zbm{y_gAvhBi|me;(Uh?K{jwx)@tMR0c=I~BR`Slf@ zF)6&%fZdrWNe=6s1yFfaou$K#nwB@J57%(BC>scYTz)<&^P)S)#?NiE4R5>>{YWW` z?Fr6TEJyFr7=P6EVDuM&l0bm66!t82OL9s?xMLyXA)=NP_7xW~90OEdUc(q9SG~l7 zl@w!%@sAG=^_zGaZ=94M0toz=7$mAz{`yxi&d>p1{Lwihi-rwmkd`6IS+36+#%jBP@IxU9O_WG5q&G*ys=&KhZyh+dPE*>8IN>&%~e9&Ark! z1w-LqVeQ4jt+wUG_;fcKG7yOEf%+-A+e7l+Ns(KwPs)g;m5Mr&TTqj#Odu9{3 z&0(GemNN!IPQrCdxFSg1KC1=KdqAoZl)%~RgTSWtQx>A_qk)2lZ7bUY%Tt>80p!LWGX`ZE+m$2He1Nq842$Z}!gBSHpJ9jqNAox% zWBd)Da4WAI>U`Ja9eg2lNT>b80*GWeEn?mg+7l3Yz8DKimI)D%taR2@xtHva*^=;I zbC0%(X|pI8ht_bs7s>r!(-`b*Dbc2W{r`Dsg3m<(!&ut~l1MSR;uld`>X-gnPe;@( zFiqhOn&$W8Q^N372Ua-#-^MSy3rz%C{Uy^9!GOcet-DJ^USsgo!~vhAoq>38k|tK% z+h@@hA&$9dKI8IMc&%M|RV_(OlBh#D1S27wt+EXk^`qrsOI6mp|weEt1Ferls1HKkq2A`)MMXaHU!)Hpb zK8r(Efn|^=v$J*G?xcYO=7Wt>EGQtwQVCW>_7QQcR25iYfBLIGYGWU2z*;r!8c^n# zJ4lJjWyAm<|4C^arzlU@WzpIFFa?tf5(%iq@0WU79nh%{XIH}_8+nPktKm0_l~%x2 zn#Q34V{}Y{3Il)T=c)cWcuEaEDeM~}g%IRaE*}1F`#ryw(W0t0;}_Xq(XRqPJxhdd zyA?8n8X%5S4ts`lM&l@_0>aSVOKajSG*^rVN91ZO?L1CXx=iw2opN#YT6;Aljjtat z(n9G`=D3uuM~YURrZ0mOxpD#}yx)r{VzzX3ncR=xU<(y70Y=hAaPJQu?V)$29LLCLT*B8`9Va+M>3b+U@Y^e~8QAU#b%xQvrv1^wpR-xDF@S zp09%WJ4M`k{R^Fo7^vclqk`MYlRe98*9`{%Lrlll1sR+!VmPMU$0t1k=wmIY!h<-vO2}Z z!}r@4eSRS@xqsOEPq)u|N@7lXHN*<|ZphL+TA<1j!REc42XW7%d}aLi`)KY`upfW< zS6}{Z{~ucFZ~uT*$>!NugwwLy!pGEO-DBt01P1j-7BL@awuqM7#8v}Cr9b7b7yYn_jI>Z$8zAfV_tbHc4%R`={piHWZL_{ z=vI!UReCoS&*j(Wqh$vk6qT&iNR`YNYV%GpaeDmNR-LHP%F6Kqi~Eh?=8=iWkdnsT zR2I6I;x7jMmT?dc6a!6qkFeNs(GdK+>iyx5x>u@&PEl%I*)kzxBp}f0ffB2akkgen ze(D%=A~IlF^GQ3#@A+ywEm=KsNpLoyv&8vPz}}Y##C{l1CwYRDv612pTOR(Q(t|6Y z4S^eC9wt^1OzB|Sx9+Ca?HN(T_`TM+^Kvm_e?gouW!2r&>TV>%$f@_EyDb-_H|{t& z47H;l*J7~%U&Vp-8mom253)(1(ZuR0%y(1{%mMwn+I>pu`b>SQSNIPs=)>&;@z2k+ z&%}ft{~w}|{)NlGsg_QfnFu3y$1%lZaz$jPDs~s+P;|lt;wbY*+ktO;N_^Ov>k7VO z_Ire{%djpdcZrK3(duW~=bwpX1|S9KSNEQsWEPyZ_qrk}=4heyyv zBu3tatbqOU*{RDhQqg5gEEXQgP=+`S`PJn%A9IkF8l6fd#mX(;X|Kh zCu$f|r5M~H1s}YuYwTGKZjV6q+$`lrdo8wu3+D9PhD)y~NEQn@Q}JwN-r#h{7al*P z5L*0jv#q+|J$A7SLUzqxCi);Vk>T!pV<{OUo3(GG~T`A z{gw^0SvAci))LH|Dp>W79F1Atyt&YfXGxBeTF50)ge5P5j+MRYQ%O4SY-5t`F$$D2 zeMP2TYpd(A1hP8-VB$y6cy9RY_NrA5ZN)^VV0E1>*g`nm{ zpky6t%Z4|OfWNyV>4N}eKn=Dz)n3Y_c4H-e>bXhjAim%Jw^;ovl<1BR^)$m0*xB4+ z{d9rwexo%= zTX8)C&OnS?D$GKzkP*%8wn0te(RdgDAr``2nIw1Pud6KeRuM@YhMkjAl9jN>6j2Mzt)93>4Hip|UMS`uL(`o$hezDpQ|sN2haPS50$fW)$^ZHt&Z@mg%SB5j}w z_u`WrZnmUW0^2?r(Ly-%63yw?-n1Mk=mc~TeEPFKg2Jkme=slqgUyXIXsS6?W{YTqpJ-<$Sz=KP*jxsB?FF3ggUn$7nYq!PuHK0u9 zTL>D&25^DgVMr8KveM_rxrtA<&pl_!&}B^9;#kztS=)_2Vr%|&w>9Gz8t^ zG>=CRS=dZWrDn-B4~mJnq@FU14#!5BQA#PW@-;G~1UkX7DSD7wK}dXTH~2KKRCbY1 z-CLkoUSDuX%QLjD_@p*;G2|UV*vs0--K!Ird#_bU*XhPO(tn@P!yduP%^h81eZ80uP-0gc18(ClfiYi2%C{z%CMXy+cvM1e8 zZMpAM>MAHESJJ^8R@%jG22oT2=AL zJ@l=8I!wywv9jrgF~pVA5=$EfkCQSkbxu_lW9OjwmzV8L&8HSa`5fCJ#R8g{aIW$* zwb}rxJmi;VLLWD9U_(wiQVWPepX*K>~miGx{w;Xo-4ZAx(nn~;<737V z`WYsS=n&OoU+)>eA&u)8NT>Vp8e#dOl!;E0N?9f2OK{+Hq72s(3xn`Zh;ksN#_&f6 zJx1VR>}tcSuDGK^Q433MtK31qkMmvvcge5=J+*m-^WHkt#X~DcLG|~xn+``zk8{VC z=7K5mX{nS$esnkgctl}|7{92aa5&>{+N$+fdnFuie6)og{Cr#dYTKdRh z2N;+1Sy>Nhhr9Tc)j3~N+{I7kRF+&iAVCVTr}vLJ^EV{3`qR_)h4NjUAWyYx>Jqi; zv3!%cfsg*FrdAWwVyP|Px;#2WW)-?NAt0=7i6zW%uM^R5IK7N(T=3;bb$Km*F^5q$aNr=(L;{CjarD?GK5#dUtGl(8868H2o;W0g>F36q@H}X2 zzf-PTgSIlpJ)Ot@h4d7!OXvLvI`l3qb96%Nhe5VSdDY^db!5@^`cb#z=*-jw#E&6h zJ@#S~j;AVf@ut#IL=r; zb+@iO0+=rxPjx>JmHT{q!57L2c|M8}#7bei9T1j^g-G=s*8^8n$v`0AF;Qfd=KES~ z=qkD^Wm=qoyQ#oM+hs(6w~RjoU7Q`c@bR-Ebb^mO=LO`LdTA>A5{o@h=whf@n#a$; zTcZe|p8;S+DMMYVsb?-8UTZI2^H-**$m6sOMRtmrqw`BSn@^Gp1!Na5zj*i-MkbkX zu>dOOIf{X5Qzq$vBEbc!*l5dZ@g<=kJ_ql@vk zi-)+qqcj`(l?7^>z78HI$)FYsz(#ttx+b6!@*(H=aa8Pa&{J;BLQG-~>o8R=h&>j9 z$E#8|v7p3)3oD*;L)n=!LNsv0_>EUy_G*p=SEhaEdB?NiYYy9msGe%HN>>w6< z@xxd}-IeUJo2MxYU~tX-kU3}r6u2EF`VW*4n@v55(^5%ui`cT-w^iH}2aiO+FUM{S z`py~#Cgb3q#Unv}s&<4o$5O^0-+#M3_jZh)v^NL~H?~v z9~-{b-mvOMM0?>V=baoP%Kx*|FYN;u8~<#sXZQkyU1U&f!5+B`Z-?V=1yx@7q_B^1DX z30#Wy5X*gMquue;!AbeZL0L0)_DV>q65`&8FFv4ZvcEX5%bQv;p1s;=!G+WkYxcge zaDIn%w@`z$q^&hk_7myXiTx`l@&|Erkzuhkb=~=iQ<5r98zApdMzPWTgA-}VnGnyk z*bUL?!wh@DZ+P@|%bh~c1c1G?06vcT+@5R8u_#ZA!|X2(InKG+0&d4?dZ^|Qhi)%z zgaz42a+Ioayc}PX zd$Uta$9c3`B5Q*)u8?VHmv`Dn{mqrR_UZV;huhOJj~T8`r(+>tv1L8uQ5I%M3i8Rk zYtT(Kemcd_=O6jX=mRnAQRTYLYmEaKLIn9a<9?)3WgvBOGw%P1ZrVC9Rdq|cCU zDuI%5P(IO~j1eg@21m{jJ)#AF)e3HncF}z{E)jLAK468TEBz2|Y??2p{4KXTSy<(~ z)pEOic^vZPmTjXaE{_d$ooD`2q34uq7(J41?$bYHp0sB@WaLZlIXte`+6&S#`txibqBuk^A$s1P5-3x=0ype}le|#b+eGhH97pmKTcz zWm3q8kTuBc7RD8$S*4^xtTRWLT|bY(J=Z=IuR>sc($WM`#m*U8(cCs7k58Oy4!+;= zd=l3>DUIw`h>%jlha^0&FW8=rP_?GX&A}%0(A4Wbkzj5o*%C3lPSLyPpJ<+#kx_OD z5XtMhx=#w((Ksox7Y{$%p7KZz#etWMP) zm;s6NbB7sQj#s##HYxR5dzn*^gL)jgw=87}5|IxU4~pE2SgOw5JD4F3A*IqSrKz0? znSS4o>{Q%Z=Yl3q^NY9F7vm*ts5EE_G1W?+i__L`k62(GcIJRJ7k}<>h@Z2;tyiZWM- zM#nZJmz1W#t&#Hckb39bGoPXL9VJhx4~^$X=CMHunF7inNMG@-w&KT$%>}=!GU(VA zF%Z12EA5s`A-9*V`7fl5lrR?tNI_0vxVmz`y%dj? z`fV5z`BrfTLpX5-V3}xa1@a~oKxY*WLV>#f3lc4U8&viJUco9TM zetbbG-u^#6m7`s%fb>=K?_Q(i9r9YR{!6FQbH@vw?Hn|B&=Tqsa@S9tcZagADjLLT zEr!VuWJdgpLjbCW(BN{`*AoIJ>|H8hy>*#(6FdFRJMF1Z+?3Gb0x+)J8<4=x%2*My zqOw(kTz7UrBj3@Y|ESzv;`(zT$;~nr@!*(>!0(` zshoRqH&%=)&3WuViMR@}QY-=;ZR*fb3{qCMGiSw__-c%JqD7ev{LHr->)w6pUG2*m zf3LxW?}zvRR6e6SiLi$V&$Ep;Qc7*O}W9sEYUZ1glI1!UkPlwTJ10m zufz+f;`40~!59q1a$S3i7XknKRZzmRk3_#MlTJs&_*h|8W)%DoPbBL1=tqW4<;^Ba z9NP;@om9i;i$(bX{%arbbkMQ8q~&iZcpMYJ;(MmO{Y< zrT4gF-C|LYK~5M)A{~#pQfYah#8?K28=~N2jfnrgQj`X#4RQ5wKLhJD18*dVBF`>E zM?J>Z7UMypf5pN;@H6U0T6{I+Z?{_o?!3ZX5Z_Xac+O+lgXT-PI@TYp8_^OEZw_fG zszoi~g-^u?phA7SJ^Ehkx;##lc?O|LGUWKE+!qTT{`_)ViB~Gfs{4GrTf8n^Z5Kak z>DS$)LDPz-u~umdXj&XjLfvm<2pIoE;^4>(OoPz;Gv%GbFuqf zqaKj2ozTw!JjA<|7nCB4RdBt;J-e|5Q7@!zd_;~-xkS*_!Ueo3iUqa94EZ8GY5F6l zORF34QT%N@v|=UD!v!BPqXBXZI;$l13brn)rbOh$Dwy0&A?HR@-4T-HeHd2vsm6~? za>7#d*{%Zd_s|rIF(#<4Aa(+=`Fq>0fAAx*qp@azr)HeH1DTvNrxJ|#T|ZPK4*??V z7K;F!Ke|66rD*1eGk^{``vSwQR-agms5nh?lwgm?_Z|m@ZZQ;|V0`92a=U-K!D-E$ zg16iIS*=MHO^G3&j@VQcVX~Oh21wAhwZct^bup9;z8-U1Y-w+7p!}SofsM#5;=P>N+IqXvDI<4dsR6#Mc=gwO8*TOJqFaxH)pUr~c4k9xpvP4^G04V73{ z2yYvGk3AmBfClnA7?}n3Lwl%M0{*J^j~2tqI*Tu1oNwKoi8lO4C5tBn1J;2!?wopb zSecV8f^)L+lL~T&tyaEFJQh<1yn~yz zFl!k|ue$7v9R`Z0nPSx)@4xqQ96> z!@Zj%e71$`(+Zj$qu0Suim>4E?-d^^T^7z&XtF)w{2Bzm)Qdf|?aRXW_}%0PPlYU}~29U@1Tz>HA!lNryLe3KVroU00@ z0zT!9N;_08pfdMOwk_c>;;_H^a!dnG%VfJ)4ulZRgVb5crx4TxvMG-2&{K=+t}dtXAE|yh0cCsOzRfY8iyP4-zSIa;$ggeZLw{{zq!O;ij`NRyf3~Tf7#k?xpDx+ z7^v8y@yA8Da~5QD9ZsGvCYs#JyVSks55d?&ug3#OuF2b8#+rknq*c*dF60L$=+`O8x>o$!IDvvai=*-ssX*}#wx!HK>Yh3P@x9komBxagZZ5@OKn8R~te9jnvgvyT z%pCF!;$`qj(vPPA7bnqmYZN=Fo=G%Zi%H5J={Qs@VWxkpn19TiYX{f(Aa&1J!R43^ zB*V56Vpkn@j>zp{@hj@6-kkZ?(Dgc%lSZ!QYfof(Hg9 zv6!Hd<&@rkU!6SSxoFMFp|hr+YJU)a0XmmcRo;Pgmd)T2rP%i9z-k;DIM^w9CNZZ7 zf{au5;|R&Yid4?mMri8AxIxg@5gy6YzG#&)qPohZFNBtyoLVA^fd%S6H#IxO84@PWII-i-*- zhBQYdW`A{gkyzbg3D$K5z~gPc3QEW9EcW!99P6F;=r9wfXpXw`e&#k&at69Q2-DqOeUH};JZ9I{>sThAVI>4Q;WSQY9}%gOVOFEA=YpKL5vBFc=QM= zWF%{Bp4uM&$;H<~0b@l+FHx&f0oni~v23!m39b~SxTW@0FeUurhKSB4ZfEGsM0;-6gq4#UU`Z>1VABlfce=J>Y zZN{WfE(m}YKWfb3rre^JSmsfotE~_`4Z&1N!Lrmcp%ud{4yb-5qq;c%b8fs#Xgf{m z*Eq!l$Ya3hi}b{Xe#5?$Ndf3dSf=obWf?+yzhx=vt23n>Ae}vkg$xp- zSBC!gW9PZB@wkgkrV>`u;xW;WSN_hHDs*`96IfX1@;)u}EI?2ilZw^0l{tD8D4CQ9 z%ytKOu>A5Y%~8XHh|p1jt?*p&tx4NB!cOw~z!}@EOe?U1m;G02PR!`REI15Ix z6us@n1V<;fW}VfGf4{Nb=KQQ>X|{6f-)?s@CJ$ojh|8*~m959ZP}edRR;jCW%kf^3 z<05v5WpfoDyEd3zkrR>Vky5!&NIdtLwEGBQVlv>-Tu1Rdxg2P~bl_In+LB+6LqNbU z%keUnox+BubZ}=xl^a*E%IKh07&_|Qix`fw(BO~)gT;g|{`}(42$#iD{C;~P1Wp@~ zCB}we^OgA6U3cXX<-#?KUdNC^mQv6#XRwLmk*W%i;`q__yRnO^u;Bf_z1ZH0^|^Sc zq&>Ga>5(G=3Na&{6@tUeI+ji74HSF>mwPIEn9-1B5r0h5T-cZ${sd>k>hg4Q#;=qa6>Bj^t!~f$ zzHYz5#9pOwWjqVs&U{;n7jX$g@&B2>I&`P;(0a)atxJ5P^FYp%KGzk-~V2_5h}<2j1}!kU(~pt-1o4LCheY=;0Uu)7`{0~yyRS2?3JT| z8l^dE8U^S~sNS_wAJvICXWm(*9`MuNv#nbo*7d z8Pwyoh1k!t1Kh;Q(W5Gt7hr*-SLA#SN*|G)^!j?Ky>hCKxd~X$;wTV?gI`|0@ELSy zNG+WeON?<#ZD-731s^}c8YzpUt0hTs@3wbiHO^(<#1ALJZer$xG)GHkB1xL?*AEtp5AjJoouy~^SQRA zz8e^yK5>*$6xXKW+(gSrkI@0~fI!x8>SETa{#r`W5R3al2=)H?Hd997dT_U7<@r&% zzpMAOOory@D7@n#MOt~>s)1ScMHj`rg0{{wc|W5Wki)cr?!9cHDk>- zfHm{X=`uJi{x4@>h)u<(Z*nN}{Hevu>mD<5``&w*1WcyMT3hpm?mXDq5tT1JA6+0a zcPINZs3e)8;yAhk-i_acYSwY$^aj1{F`^|e?qFnf^FJVl+g z_UOkRo$xSS#>M@)(K~_HHb;X6pzve}`-j`_$3K_aCp}0mqBObY+DSoSH!xu+Lmgrn zWZ9A$dklm%nF_p-7WXqZ$gB5s#9vEz0lj${G>y6IQjdQVqD!0NIi*TG7mrc%4myVW zJX3sKMHYo}n>$I_!^;BjY)c6ot#{tz?rscGF-s$H1C&s%%w-jSar8`yu%NAqLt2C& zUoNfB#m_^=OT9p+-6D&{}CR0fZ>4wRO!7UWPRh2mKG)A6S^SRJk-Q)ZV@_ zy>v=um*h`)yTr~@4tt+bj+u8FSUKi(j1T~d)Zr?#i|mCM;JqHsb{rjmNYNQg95BXs zrj%oP@QVHvUNzOXI)BJcvPccv|MGSBUoO61rp)cJS6~E%(WtH!BU2&3sy${wQpLQj6;rPX{~3YPU-2 z-p|EPV*~HJ0i$lw8?}ohqWFt0I3T4py>>Yp>VM+NEFy(m4W7E_#APMar=qx#xA+lf z-5(dMmdPpj+oXiCJjh1O7I^M7TYf}Ah1yjOP_{;ir2A+wX;GKER$zvC){8*J(P#eE z>g_kFz4-laP=5~RXasxmArRqM_zgvt5_(r?Rsvq#b$3+Ad?JJfRZzM;{@$Rkc~(WYIjUI6fz*Nq`h7@oLDv zOpw3vzEOAsL}2kG-QaA079zn${-Bq0i&dgKJ$SSqF(*ovnwWUuxbCKD$Sc-9O_xR> zg~(bZZx|q)o9*C$GZ&-ez>iM;r<4SmKF|E8F3>SPM0QIY>-5Ap@rZuyejr=-C6A6D zKk~B0c~0$PaP4c#?23*FHA;C->dU9wlh1j>2^9dg#+vYPwl7`*UGST1Tl@%8&Kb&M z$|s4``AMj+PmeP=LCK5~W5m6=Y-AX+H0FT%(9DC$3%`lq$a`e9C%Z|`2ZntsM#*0b zJwX^_a$bJ7eePrNkdBFu1eAD<7j`xktB5CI>mZgfCbCb;N7s7PCmjzi-pN_21@4X> zb1k?j^$#&LxS8_l0G=s~(Tk%Zqn8hZ8Bkd%vuPqr8Q!w1{%`FpoVZN8vqlf6$K ztr`PJi&bi4Lzd5-wTwZG@V{*3Fes8!`)qC6s^YVV*xlTVQ7mZ%l%?)vTVS9HipNJ5 z)U;2*R)6FM1d=@pg!mrD7iay4qIO>IdD%O0IzIlSo8lay={85n0Od4rZ+w9q?b67O zGA<~jI+C#th?0~228AOE5+XJZ%IY30l`zCA;u&$6br05xYEZ5)4CzcAQ^4Th9x1v>Hdj%j%hcX=;l0Tj*Urv% ztxQR@`l!_NS5ek(vN~B@bo;&^<&qWdI4$%{>6993yn*(6FfKOKE{PfJY-}Qt1a}np zbUQ3DszV4&oIT(72fAfj3OW$dVJW4yi!p>)#d`F{mj;YTrR1$;d4&Fy@XeKW<(lPP z@=f{QlsfF3*a)q&QGX7%bg-|0cU24~Gtc~canhwpHp%>OdBCgJYZXh{vE+6Ry#EC6 zKi1xli4mr2*bp(;4&I_4a#?y5JfuoEmuMU@dYK7lxklo6j`CtL9{DH(3cm^O?!BB7z9 z;0EmMjnyJ(*3q7x<8`?8sf?0B#~f&jaR4|UmDmQ?6-0^sjHR8MZ|i=aJe)yu*gaM8 zt@$@&gc|T!DaHO8k!#HM(NWasQMP0Kuhk#)?|Ab9NL#4a4eCKoJ82;P%7zxm`O={C@r{GX#o!Unwv@|p( zhJch#fQ{`<PGWtI-D5+QbcG>X z!8{WLb?3wpdHaPsTbv%NBUa3Xi#pWR0v0#( z-L8Ymp#wA!%;)^|f#I-OL|s9(8Fv^%w1hEM_uboA=KbnotZqjfMu)y*-X$YzuY-p% zxAjHm!@N!`$zO%)YSOQ0UGndJG~q}V-xF~$rHc{^xxoV?;UD9%%F4u+jcr4%;y_V4 zE`~!Bs?y?Vg8_L%k)?e+KHzugLm~b-@)YyFLYa z$=I}9@X|KxHdHWZ9yy6lMy#%^#WyD?BbNHc*D|#KOYRPKx0w z*U^J>?>Byr9@e>degg=-s$kTr7aMrpDevc?C;LRBLiDiHR5CVRxXk*-;(|k*ZTZl# zJeNV3bSVi+7Dz|vL#7_XE#QEkWPlTr5u-5kjPp)KwVeclZ>UUAaadT{?Tjuz{d9Z! zW1gd$?m_*SrcAA3b`-G&LjZkGcc%l!S7L@6Ywa`fZ}7Gxz>`N3zXnvf9gBc9(I)W+ z8p2(RVd%_KYL@@`Nqm<4tXfZ9_{a=4iQQN`3gFXPmr%=}OVU421+k{@2l0q5eI;an z>B`u^#ilu_f(DhX=X}LJbI|wp9B+P+|L|pA^!5(WY47z$yWLrk3Fj7wo1d1c>`*O66(q@qcxYf^=m2q~0y!gh7+M)I8p>aZ* z+sA0jbc3I`|@Z?YlSU9Z4XDKKCx^qH|!n2p!!>+^Cc4jxD<(3S9t*`)ceNOm!Ru+Y6L}M#>dI zDfluq;JXJO4_I!C3mz&|pW_bh#WO`EYhQ?yL!|=PBCZ!r=3;jNTnG{aGgs!)`SsJe z0w~K(?u!?K>;=V66WMv!Eq+~79OW@r;K4WI5iYG#-7$G`DZmPu z=AoBj4+NUb>-q^9q9u<+X2DT$miGlPZja8(Gv~=1D2AXSKc>;M_vQX2DemX((a$}A zOEa>f)^E}#?q*CFJaN}MLMjQjs2bFVC~%&MuecD_#lz3Eax)N-|6z~Fd&_oV?CM~6 z8Hf4VES{969hROZfB0nk=B_a~F^Di_*j%n}1J4Gl_t zJ^D__z0zWS^~WK8x_(Q}0?au@5K&KFjn}yRZ3>8pR#1zf-)ijyN&F~00yFYuNR`ls z@{kK341LEW?&Ac7^b)2%PV3|G|4x}Jul5<23(>I@XDU4)kY0PG&;dEbd_}_i!dW~R zTdX3I02koxGd-{|;AYb77TYozYd!?KAYq-)&9cQ)af_TIDX$V^p=>_Qm1?OcazeO| z_R8MFp@4$Ix#>QBY#~EE_BPDPbT4XSk0UYzm7n%Hz=&adpXhAnF>+;aN#&s*y=S_j zfJ=b2_}2B8+f^^NV%2i^Jr2bIT?iOhStJNFpM{&uB4uaQI}g2wb?yCC;37Q(kQwv- zSv#x}Z{8A16+@#4LSc?@52JNi$DGyLOetWcPnT;Bop)ZmIlv7l1!Qc^WV^1TRriX4 zg{BfuD)&C)gX73Z8Hji8PUJ2o8E zs3l2Jzef|L#DWm@0aNj;hdL#PaaVtyigMqkW#@;7|BZd6Jky_Cd@J6;W2mopQm6## zg2hOx&Nya9DWvP|>^N2(Z2wiS?AZ~J4b>Npn7vno!}H!y5i8z#m!1HsT@+7Qf`Jmp zStU3Nf9OEn#=Hp!J7;L&!*Mp4fqyv4BB!V_gXX<&&sKVvDq31Q9MRCKHlmS#cijdA zC*~M@fumLK%2l+ET=){QhKehOWdV%ft6Yt_5PfQQLfhjMDswe0P{R=B5_JyUWS4wNE++Kb!xi|tFX3pXf9lB}62a76rJSy`g-g^#v(JhWc9)UR7Q z+9s6@wVI%i-W87-joK)q-0*i5SRCiXL=7nx%)#V7s(^u7rZH-I*VtcJ^P^Nm*T^lA zET z65o4oX8hk5Pwl%N|MFzg)A-`8;GHk~`*MiWuSRxIswb8=__Rz(HT_cW5l$Qz1+|CX z7ZkK_iJ^|z&xkj~qFg-Wjx8cCLMeQheBhgSxf}j4-bp49JpS6R{}dP~pzdC0PlbSpsP|0#RRlNH4*e(382nr-ySmYlYjpk z{dPYP6Iwos=R;53!qHZG01!ZRk0`gQhz=^=UqUeQZXv)*Mjvbvzhs$U4Js<;M8IN{ zS0!dC@AqeiuF55K>ppN0i4H!{gx5$u4w2}k(Q7CgL!>}nL=8}Zh3iB4YQ)k{yN#mW zO8VDZZQesU-3KtMwW#)KF#&%uR-euI$^|vW`v0W3zFKV^v1-)|)ErRx?fVHULLhzG z%Lv~rAvF;6)al51TUztL-W-XTR%m2uyQF3GDY}UT!oIxFO8KT<2fh}(yD>*(cio>~P{|4p zSS{yjhAnQvG%q(5(&C60Jxz{l_V*E=WIZN^IiQ=@VS7$3wzpqIekEXP9OE@q$8nqm z0v6Jf{D#&T*6-?pDyn@!MPxFN7-UpTi4jOkDp<9YosXA*epfQk0CSWJa2qi@(`Lk7 zOwB(p6ffG@{tv$;O#QrvDyKx4Y_;omD1FXQC;pW3c)4?GN#FyPi5Z6A$t^R13yKz1 zkUx1;NJ{J8^ z(YB=iNtk1Boy~Zuu!_V$46xVEhF0VEV2td=ZfOluJ~&p_uXry=n5fYeq0MQD6TLdj z>`Ri%*EjNs-0x+)%g73ztykc(q^?y15<-rdkOluPouicHyM*B>?fYxUY^~|Lt_r%8 z(H+OWDweWUtT8-F)*i6YgTXg!R7Ue^E^;1r)U~xH)DJzVgvELL=Mb}cD|OCn`p0uX zqW&zt?f7srhSh^Oug31a(BA%R?7g6a(Ue*NRY?x@hU3eCHN{z=lAB8z^foB5d*Hj@ zN2$cucqc*V*rJ}`<2j29sq%jq{qcF!N|C_#+Y8C{05?%>u6R$+Qn*%He`98+Z8A|~ zmPDFx<=iVxRYNO30jYKjH`t#fPYznKcG}TA{1H$kdUJmuUVUOJP=I(di)-@{dy?dXWIv3-OgA> z5Vn1<$pX-?Hx{IDRBj9CvP55jH7Y1c6(&pyyDqFtt$agleR6QGNV6a;S|`OS37ve) zQ`(>_u>d}&L zYLj}BUII#=jf)*Cey#&Jlufv95!qIfOeZtuhoK-{&H4&^_Q*GhDzbahM`A|kS!KZ$Ey%u+gr zVnLr44^BOJiowABDiG4NC0U+GrU|0Ze}U&bqZK}!(R2=*S=3hME`)+rxi}7?UiMn*~MYEX73KdeE_3maIFwwAbe9EkG3e zi4}EPXkXZ9rny_>R#{sYf~QXQZ6?SRm5#k zyc^uE$peNQJ9e30;c@lG6=^@H)&_aJR6`C(_Lh!s`=UiHqjxT@sZ zV@~)*`yeb(cl{zD%gwxGSMi}17h*PVY_vshx{A#MKbp=$S=7`5LK7v+l(P zsVMzR@duoy@@^7Jrp1i_@AT_V&j>_G#^P7v$l^NGuIajw!SFmgl1D*HO4RzUD%;g` zUr2{~Y`HsoClIv)hzn`2rBkc?k#PYbgtQ#*6t{^0B25rNHbex(D9ol3lx&GrKxR%# zAcc~P%{?9Ho5BfWfi1^u6MjF43P|I0nT%mD6{Oy7Z%O@%>l}IUL2Z zRuXY9l8^|Oa!^W21#5+l^RwpKTH5ySfflFc=6aiHEb*2SC2$MktF&9-NtXTlYQqvg zzR^|}<~9O80Q|148s=X?{=N&_>wDvi#=o+fJ)}`n*wStp)@$g?{C3+b_ZhOL0Zmxir&U1?K5E_Cqk}JAV6T;BeOhUF+;$j`ZT;huZsIDKLrrlPI*Q-?aqrr(bJJmLPmP3W;#J7Px`S$u(Dp z7HGxcV)(YJ=+A`qNAbo2isOi#WuYZPrRtWY5m0xBu=P4aJA`Po-IHspC8BCKFg#O{ z3R&WHc`6_ONxS%|XCl%D@yxEDz5n;snc`r)n*-F%k69MGuV7T}6FJhYFv&?om5ZQi zvZ=j^_f>#frTfmPIFm@zV~VXNdqs+c;6c1gfBSAMluFQ3ML*`R>RI4qb=PS8#LW`w z9;4>EZ@dHAejKU|L)unnyW*V1+)wf5oa8LILbhpsI-`w^kXcW z2vGQ+@u&4$wV3y99wTEY9;l?Fd)r(FV;{i{1IJ7f9i`R0DE~n(Ezd^ZO z8CZ^^DPJgy7O{h)JojpEq1csYn!Z4aGne+b!2b|7*r}=Rli1>qw-1F-=?>)%8Wm&z z@n2BEIToka_pC+hdV8ycNp%w$UtFi3dmIeZk8=UR5zB})3GyShPXBM}-u=1m^EmUY zt^E&n_lMn?t=cz=9E-9fOCm){c5DX(KoSx_!T>1RF)i)EjW{BJ0|PjOKt?il)PuiRB=GF0UsU-dn8}E9lcciyO-tYoWGO3V_ z^1D9C9tnt%JTZw0|172_CTcPnJ7vl$eqNO@D6BTz*P_Q0y3!>}Cw`8yBmX*MSyA(> z{%_CIjjs?!Q$dh)uoMENE=N8xO7F!W?<$ZMJ4x6IGdVS_e#~uD=?&+vs8Ax03IVtzXYRc%kc`$p(fL=K?cZfalV|^*%Zt72FiENH#Mf^d{RxYA>Ys zr|Zb0 zzsa$AyR<=p-9^cTm@b+Gs>AV4s4DJ{SFFp)fx#&i*{&ecj03uPEZy5JYy8vs;uT-C z=U4TV(b?0O#zjWf+i~fdNqZLMS`6-AkuB*2%u;i- zQ^bT0|Ml5-{Gjt@A+dWLX%`q*45*fV1~#HDcpViEer=od8Mn(E^-{AygfR%xc|FBF z3|jyb$*HF(J0~q0^lA+(ietH3P2*FNVN{V2Bi{Rtv!$CVbbU7MIjO z9ooXyQQfOSscgI{DmlJV>NBWR?{HB4R^%GmsVaq(x1ebFUc42rg+o>geh`@BO7BK) z$R(fOFRrvgV`yydOY-(CfPfU-45{{dL4{G_rBGWQ6Smi9@-2nLjSf4uEFd{X1)k5M zIvj9KbcvFN$q4vs9)(rPA3IA^l{Vji9IE-NE` zK}rw-e~Ryfp#4Gn)E&z_%gLG_=U52Zk&76w7MuBjPb|4T$)dm{H*taXB+xg?LBVM& zk6a-oEkyo78^=g!Li@ajDbF09yk%9*7J5i*-?uj+^-b_SN(uk<+57&k;Gz9tLf^tu zI8Rgd_25BaG*4o;U6sdF(H`zgfh4%B3;B*15s8JNX%uyO*q=4Vhm#njull_@S3Bh(){8p7@02Z1q63QR0I=RBPef7k0g_ zs+K$CDaMyb4yTPqB@3uN+s3_A_o(q=TBA(l-NssjDq>Phg~AWWlvO%FeB|oZI*gD~ z!}{#u=i28iMS{(|Sh%HcFuc*KxT$diQu{Y7yf;F;LBwTCojX9(f`_T4e*) z^e{1!hZeLehcsS5Rl6wV!7dvGWZ~B>g1qNuO(NSDV?~9F14<-vL%yH0cW~9xC4iP+ zcv|yca+2+&u=&sZ#9NK**%aV;d3ZH@3g`!vk4ML`db2dxMJXI6P4MjDpSDtNewG`o zn3yw^NAe`EaHdNl(RFn10mWwyGqgdANe>Z^mJ@2(18<5SR1hE&n#HMec$^?qMLKWe z)=3wJc;j$A?*XmAC`cA2da&FB&{PRjg;c3Mlb4G?0O7=6$1|7kmSbd8?rh6ee@7?| z7P`pQ6*00rN%iczS9l1drl=r^qIWUOj^I4{MSMpfq1^E}4s+NsZH;UlJA+iiL%(nb z;hDtOIOM$`qsKOj4&y>9(S7bQU;u0Y(mbDV78@5pGjRxkO%=vglVi@NYl* z>Qs%9nY+sWul2o^jty8HgJMYBT)OIFc6`1EIxDs#k3zjfQ)%=rQ$piITK~k!KPWzo ztncQSzIXP0>Ji&I*&DWZR7cVRkwkZpLO-Cf$EaBc_>05=yr+fXcnH;^8` znIiN$J2o;^Zi24w$zswHzWH>}H6ATs2=kQ$)TBA8@x{dUis39ylD|Tn$jEYpP(Iqhae>4U&x175Ah-WQ zdG^1EM+tZ&Y1c~^;nolgu!EZxg~c|Pe1t=cAr*IwPg2$-Esm$3YEOGg<6Ti53qcNf zU9XaG1pA4zYzovYACKpfL%L=O84%?FNj%*3a^3q@{9CKTug+KG88?TKiWXM*+l zvK6H&oe7N>G!*Y8DJ8v6c(oVfQA*~ge{ifm;m~*A^Y}&-u_Um7*BH-HUpxC61Bilf zou-}p1Ieo;zLz;O7!UEGaCMiY3E%x^f=PcD9{Rg}Utrakzk(uju{LM|Rr8CfsK~u? zDc%oFDD!Dr$D-RX{il2y@{G|F@f9lWsg+kEbE?3X0ll+_Uu;jtNdDkc?GFmE6O3g< zFzQI)UhD-p>m~2{?76Ec$1XlGgM$&b$NhFc1cqKuqutZ-|C`GmYpfg=vO8nIv`Wkj zg9VyW`dDxk<3`p7*}~+6zZAu$ikfLc01eAe=? zFxS?uTZ~j^-62=PW%w&T#!q~ry_?Yp(cfw4RMa9O2_)x_D@vNPTb@~IGfS~%D6;*+ zKVFyWnW}dn`bhiWE3ttb{?ecYT&R`Pejsv^=kPWb$>1Gpn^|-Tds^z~D+b6?$6y|R zr=5KxBnM79xOFNfuH*amkGix7i~#D*k34h7EFuiYcQ9o3^Vw_QXz4jz9c~fJPfJYE zNOs3|3xz{}f!`UL0pvm`XN?<5Pv~y?A^-hPklifFHf3@mbw z)Vsbn+tNs^gAsy+2oYze4BDHd9>mm1N;=px4N83BaowT{weR5g(iwF2xwcYjD%my#XPog6Y03mmCkgn6#TvUy)p2 zE-#g;Un(nUM+=tU{uBk6Q#&pj%tZ{jKdY?0-ezL?#*}#WvIvSEx#ZLH?Q%A&#LSYv zFbIXaTM2i{{4BoR7upw}i9Ke$u+RJxmwEP*hTRn7cnwjoI-K8DOuC9FDPbxs7{^Q1 zD%KX<9KP3%Nt0%23X)Z5{pWlCOxHoVRe#sT>)-oiYyuSSaxvwByn1t`E&1=!XQZ(8 z6|a@873u+vSOE;Nr{OdEqzGPwtf$&hkw-Bs=KBo$uXR zE#`d~q<BbX8efS_$@j4p48#i#NN+Ii@x=}*h0RI{xnNeDNAPQyg{O_<)Krn z(5^AD(S%9divxxPk)3OV`&%gO7}wduKNkBSe(Z70v!BNtbuU%dM?EYG$y6l`ejBe< zNQ5guA%nzqIa*}RRriFF;=8*g)ss`T>h$w%buB)pgf9+!jPK%0wMP}2 z^x)X?h2KMAr;x8$!N>pR?3Zom^La$(Zg&gg11fPcD%SfK+MW0lf1ts=-*C|Dx5+gU z%HoYX6vmARvVk!pRQ2Lw%;G%A9im_C-IEM@{9wl z!P?D8R04^O6$>j>0%Ih8!>^0Mb<*nDFXF+M=)fG4c=O%%RA$>^6%s=GMttsCTb#F? zQNuv=!}ue~zzH;hCD~qM17D37EAe&kpkUiHIYIU}Q*)j@{A9ZjpI${^7H*UZ_1j#I zc(MhdU*Y`iZ)LK%GUSd6E1=ug_Ud7kz|rcluh?(Se*tpTtPSA8lT;H-PFZ{!WkdRk zrC_+fLnbi?(9YTteYJ8;uuZLMmKBU2Y~*0cqZ+nxeHgUIo|TXsPQnqnsK~;oBpRNe z0p+E{SRen#N8f7vzaLo{B3aZv^B`;FRLTF!|#HU?PqftRW9^aC@GR5W?>3vi4 z#H#SAh{iEYXy%W8bo9^UIM^%rFZ&mchR3;xvamTf34AWpq}rF;3x-=djG+&4fy7c^ zgXXeOw;`b+uDoBj5563NsFacCo+v70%(CL(#!vvHI$be@laS691?xvCe~>H_;*tRu z?}rAJ>#Q@rTegXk)0aX5rAE0u!^@nhnLLyV*E%@Hqw4+u%rTfAuhss?1`_YDWl3*$ z%RZ(mvd6gioTCH5+O>uTO?JN8v>P!7h(cr1R`Y2X!Nm&L-P*q^`hW$Y>oyP>^kBjp z`p$eQ=eofy{Xo|q4MlwCauWf#b?7@*7Y2~h18Y4pBCM=Q%GijQzeQ(T!AzKjG5Bpc zPB@n_F|sN1G9(#fCvR-lOl_DpD-=RC14wMuoLALlVk=eT|H*4&0JZWx1WfD6vGIo&kLe57K$tT zA>^-OMHMRViIBLXkyu3`!&IvE4Swk*my6M*n}J>IUdn>S=-{j{|A5WDR0tK zks^Tp1^sN$F9pY-{upW*-BAlz<9=-B1^5yE)Aaw!<`Ys9Vwz%(hQW~Y3aEO8Bb8ts zXnM#TegehBU%2T9lxQ?q&w8n3h60YH$Dng0I5sLMoxH3-FHj z%|Nz{f8#3@z(ftUIy~WpW%x`{?9BMeh4$WC9`1I}$SviKiT3vhhg`oHwJdU_Wyk!9 z-%o{e2^?&w40YZgTt(}7yZnQ~K6p5JFA`sY0^(DvY6hi#8Zv&G*0R(>dl0L_vH_`V z{C=1jfAwMWV5(`H{ur}9nWP~bAHGO2DT@UgLOWqBzTLtyWZ#JPmvw^gfG5&xC1-$~ z!&b~D>iA=uP8|#-dU07!BR-tBlR~YSpvBWRntW5*izLx{zz2Yam^Frxryg$~Ge zyX<4UF*=yS(AeWn#PtkNg%l6}SLZwXk7cM%e&=t4>JteqU-9Af z>Mh)*=;~q5zv7MvO$MJ}m{}s*9LkN7p(tK1+TW`xqe1lf*e4+m%eTTt!talNKGi-F zZ7wB7_PlH_DqbG3gvznG-WF#p7Nx9xOx@(5*4!0P~Th7-htp+LUk*ozSonaE@h-C>?-H_&sH^_x7 zlYx+!5#kf5@5M)W?-`o=#fm@$c-dll6*)*2y$DXDsRk(AO5BhbXV&iMxFfhaQU>uz zjE`NP&x!Jr?GYnYceb3D)^9!jg7tWsi_R^0oV)<)nA*qYov;q5cpzD_z@yc{>ix5d zB(=PB2}Sb|6Y4Oj9k-MRl(((^ok#?f%VRjYq0%uh*oh4gHcnGI9Jf~5jSyPg6DG^$ zZ3z*V+&_^yRSIUWD8P<|lrLnQqE^O|khVE2NC~m8AB!FH_4xbkHnVcWf(5pKK~0pa>YTb?Q;jyBZo38s!^yN6Fp=M&UvDQ?gA-8=HR_EvTkz}jHNlfy9IGCTi` zb~C2OS?BuWtc&I1SCUIMI4u{K%1*jQfTUCfNF5=Q9}D~`xU7(o*6~Xo_*lp|BF!Rf zkb_o8P-+I&@}8kUOZ!wT*yF!!XTP+ZEw!21`qL=A3(>pTNgoaIv+zC*L?awXq`wU; zT}<*zu*kdw6mgWrmd^|69gmM<3Nq(n?4Dq5Fk3GH$$uj-(vGOcw;q3k zCZA`&3W)&fk4-1#b(o^l%?_bm{_Bl4_Nehj$P)iN`Kmn{Gu#$N3gvVb{EEQjv63Aon7PFs)pb*w8zOBC7=t=L~e7hQd6UvIBiC3eS zg=E1Y^)mIlQ{^K3A-d5sf_g=4p`h`kc7eF#J@Yr9oLcy*HRt265Sv$KMU@@%juhlQ zE-)>0bYUK0#k|{|Ou7IuTo@stQ9EyDl3;J|pE~Ab*LhkK1CJ{$o`+6r#%Yk8>vpV=lr zxX=LV8V^|q4B_8;1#@5XS6 ze+ro?p5$xf@Aj14m@m){Nx>nr(HHE^7#2w}L}E+m^~?}5gCW}TSjuA-tLSfR#$Y; zPLPOs>W*{5AAqJwKgu5hb&9cGzEsP*pd`DSU7n3KGHDfMDFcYbtYZhqMSZBWDTaGK z7az~Ed+vc0dEw*2SZuNMzJsp)-$MpPHBV|-#sZ3sr3LU_Zi!dSHmv&skPax9vgy}x8_MfUx~3Jo~{U*0IKZ{-7tGFgQ` z7P%A5y=)3b-yd$B_&0k41=?IzZdMul^^)7;b^s(Lpyd66DKJu!9lwl)&{^jUXK)VZ zx%-Fdu|gXq_wPF)P@F9I+ZM>#cDj+LaN&qM!3Dk-=>QOt*_)|C4?Xevz=GvmLM4i^ z$dgy*bF3G;SgS&F=i)p=lkqCZUQ^5`p3v*WptwCgvXN`zBU*zuK)Wgd+htHxf{*1P zd&E!OR{)C}0ARRT?Ldkr6LgD#s(xIOyucpvD&IM7<+RFF5luOi>ZyCsuWBA$E-Uc5`ZwC zOoSyX`>TBE*Zmr4thDeTMG);uhZ#qbM(z_&=GQT6dKLS$ZsLSZPO#-dP7K9Is+4hZ zj9Bd<*Jx4?Vfj|OvgnDikk~IJ4F~@R$fGNTyp#Jae9;5JuVB8~R6j^p8v*$E1tqhE zJs8!N%}RBRI2sB8k4k+8+&OR$54k^GgM958dZ~7Neto`uz9@oT>}90v#k^&RF)5gxP~|nHnIiBZ3WVtlVUc3E(Lsxt zCc#X?9U+61m>(W7zxb{^h{6@26;GyIfcCxUU6y(Ri*m3V@K>E~yikg~rkynI$&$7I zEY-dl=Qu~LXn)COC*ZCEHYwlAoLG!eo;`$Z9Kt<=x8S#Sq$>Ud@8EKLhut0I)82y@ z`!VaG&yHo|+2uHU_|f)}5ctow&&Kn>3SP;7(+9=!Hz8@MJ-j!YOA)iBBflXM99g_P z>=l2CZjDNSP`u0o5~izekAA^6(o3liR>XUNvhVbgQcGw~HPffc!%D0vKI71iY>IJO z(eN||<;B9V=w;{ zRmlS1zbX)qr^RB$`cSrs{=EFsqXE3bBZ(IZZpyp&ws4j>YR9egwF1#gY!u)_H;SKl zMC{y^fhP<|bE8o`FTddJq1I1P;B->iFXgm2L@YhT>b&)p;uQNZv$|UQeX=l96T>7R zE^Kv8zEpQzD}x+$oB=<8iHv)J6I_fXIZ%dbyc1!>lq%v(7>YZ_^SRNk%~}hbfEZ$n z+R~*tI@V@9S|v-8C%#Cy!YsvwmO*tFU3A$8)!<`X03cn7#nE1Xa1%p4r34&kr3bUM zkIe@_QG}nH0vxH32|)!C1L^f<1Z!E-^+1zM%k*wvui-K%{FJ6EEi#bT0m-j~EoS2817CXFf+Mq0f2VUm61A%LhkHuCMR*);7vjfxsr@)Wk+~Lz` z{o_PkXulV)!?+vndQrGbCcc?2sF)Wq7l86OW)oq7DBdFD?d5DndRO+~VOldVX+;{#K@dXSQF(JOS-Z-lLSI+Ola_P!y;WjlF5Dyq5nsN-@r^o++x-Wi( znz5=wQuzQieSRJrBQ`~$>Y*=qzB569suVFFZhN0q91w#A*EJqV665#U+1Fy{o}2Jq ze&#K}m&L#-^FGoYxwVys!0ckXwHiZ0r|%Z4VnZb{*g>ekw)bVSS3c8VawRbD;)ZAb#O9nd0Y00 z*y}LIhjkEAb^LMr#LdT3c9}&r9#Gs0q&hiM-hVFIE6miCr8Ufg?|xRj%BSY&auO(-=8`f_TanTcY>YJPS93*u$Dxi@ zZnNJvc19+@>s+i*CEeHBznl7T73N+Hk!{OD))y{(ocadO#>Mk=`=odNKwaOff_(B$ z;S`ZHm?Nhm0Kvs_O({KGJx6*GV?VKH02xYB08UESiME%q9%BkDIw0pTiK1|{%aK@Z zx0Yf;6pKoe2!X})5r@UA28)A!opPCL6Nnvwk%|XPVwXcgfF48e#g<%J z^G_7Wx#6kF#b5}&{(6lWU?+zq6mKMAJIb<|H2lE#Hd?-jp+1!osV042x8TcX>)EJV zn?)BqLddXhH9tz;!b$v@ z%kdOXqyWTo2x;L%y@qKADLl4*nTw#9?id-k!kaX{i|3Py{qz@~`u$iA!7QUf6n;(4 zrgLH`E;PRqXOS3q*>k#t3uy#k@oDUiHj<&c!t!|vUseT(@+{!y6ovb4d+(F}@GQnB zgoB!7Oeth=-<+cQs-)!#Y|P1z*#j?{$RJwLzyifxjh@&SB<98KnFP5uP$p;y^M0I% z>+OoirHmfbjDdaCG2x~&iG`}7rcsHyxV0DKf~s0;m*G$L2#W9hNdl#jds%RNKc{({ zMD4Zte4J`G#3at+#0BOezlNCMm^7}mv1H1*b`j?Hlt>X)tNSIQz*q`3HvW}@RIZ4yG#_Ili^^xdtNuV1V|StW%1Mj-haO>Y#BWCL#hOW-k85q1!J=4XikTdV>K@h zKpJf?G^&G^PBL6n=X3$TVQ`=fy%F5^I0S?vct&WGB?Uq^@e5=67cR7?7^OU%6r>LuVBR)5}5kJo2ws2kwG?fu*JH<%^ zo+oDs5T|`A zKI4<#&MU-%D5_U72Nj^fQ<=hCG4R1;N^D;ipC3Z;s;sw+GLGF62gf6{7u7ot>a6OKk(nfw58%k-7qF7ij_*W>0 zdgo40iO3~_vQbEDs!+wwbc5Wxu1F33d#&fRfh{A5`fhw`js{oQ{?#0(EJpjhSExpb zetr`9KqUP9Oa5P*yeyUsgJC>N%rsq`Eo@z6j)zl$Ap)=+<$X`DmpFv6_o0153I{+qxlZ+*|cZP&4?x zG{%lo(kVC6R$gU7x3q9)ys=W?7QX?v@V&V~<_TaP;gD)gb3+^&ID7o9_UH%J(VCgv z$8NS`Rcw?K{aSl11|_hjqxYUr^!2Qi9BM}5Hi^)e5C%?}q zUMEf7OIoG352dmsWf0r(DvL>irydjKwif0VqTjvdK`peY;y1$N%=qGxA7fMI@Px-< zam6FxDe;O*B*u@ygp6edrAYKgxwkWO<(pUwwZ6UF7!SRdMxLfJpA!A&3!=f|SFed$k z`FD8Bgi2lwd8&Y@e(P!Y;vj9m=D)wm{D#;v0kD++@sT~k;KT^uPX7&P@-+1yF`<_IWUzVg#y~d3`jRrg(5TBptq6fH#L_R+lw!z#9eAH#cDxb z?HM|RcVlSkcahNg z+qnkZP)K$?ItvZDs@lbcr7o?p;Jy4KueS7xtFL+v6kQ5+56p2NZbbJCVoDSyO|B>G zmW#12D%^{iwFz@R)@6H(O7?MH91keD-= zud+M+320uOu${Dh3HflN6kNyS8U5AYd`Z_n6DO zu~n>;WF~Ke&enVMGs8{yN4q@}`w$Lej1gZNLRB$&>B|fKJI@n$U9EuiHhJAZSl(+2 zXX2kw>%gxWZ1;9q-C7BWyNzvrtnw;;?<~=yoXBHJK7Buykfd*aOlWNO)vfp7dGpBYO15tAu``<(eg62pAd*s>?0s&hf7uMHkxq{Cs>34|>Xso!O;; z^=XMH-6lLpX{~gUxYt{fl0VV@=!tlec+dvIlwn}16EV#qk@KYbWWez}^kEtj*1JaF z#W;a>eqNg>^%DVjkznK9opD&uQCXjCZ>$Ey+*Ys=YMu@Xql+51V$c=f3j^%sIAsc` zdCfN=IJE5;+em=3j1{85jt+9S2?jg?p0~<<19IUp${}_WYgz811rSq~6HIv@e}{Ag z?uF{Mf%X$TE_Vu64V0O9C}&bU$?^m_J0SAb$nrm_x-k82euHM2l4{QK{h}-hXQB)F zL@X(;-v@sl%a3}1Ng1Q)9S`le^awe&{qLnG@VoJd4)J@itST6?0(H6Z0(02yco({X z2>x$lv#7XP5@v6pd``CRlBFGgr+?~Yug!X(OCaOe*4>HyYbEtD!6(nScV-Wm4vm~5<<%)7&4=I|k1 zpK08&64gDR4k4FHykHHNcrV_J@&Ag~L)i$dsMkZ+T73`U!^&^Q9R9mb&W?dC@%h8d zUj)W{{5O`l_2C9>r79&uewp%5Vl>ZRYOh)fdYxPzh_3K2adDXqaJECsa<}*q8_oki z(ajV;Dv6sC_TD7iGZ|-&-OWmSpj!$s3TmIhwSlUFl<$xCxnX%{ra*93sKe$quoTyn znu&SIqx3ouL#GZwt&#%rqRM@#w9g(s(cX+NBa?a4N#^%*nu@Ts(YEKcN658XVgK!a zGtoF=5&Vl{p#R@{TL^)2Loss0K-q336Xp$WHWelbF@`yUsS^iA%-bYmc8uJGfFNJq z8*+1*u)aifB=g`GITDiVAAe z_v*RFS=q!Ymz9Ru)vIk`#gcopNBX+J&RBFA2_w${c9-^u4x|1KHX<(t35)NXJ$$1r z`#T(Jd}>mAa#FOf8lBXhs@E{o68S?Tu5K@E?AI1Yvr_4zKRPi)@pVIbJwv- z>RMczkJC#B^S#smg6sePiG!18-wX8FmY2lY!>8L*MnNVzQOGwL1aXmL&&isuW|d-5 z*j@2RppII>iH{c=EEbEhE)&py=bhRv41*;__6@FlG*=uz*8A`3NaqZnK~}n4;*qNE zi^YjU;Zue)d5n0+FESG}#>~C+#ZSiO(I1x5E@?$ficy4>tIJzN@=55;hX?O}d2wNW zF2>tafzsiYk0_koqDmuz$us;2At)?u>Kr}dauQPK_yUp}Q-E7S^cE68OhGqb#ote5N3nRf3Q=BuLu>qx=K}n@>af-2IzMye_M2&k zX{IZDJe9!+s!O~NQ?%fMun==CC5_VCG4b9;!Q!bP_tOFGcQUMl^W%;nC}H z2LC~k!2jBlm!~R@IQGbc7@nl_m3pXD_f-|336*+K#OM`K%io++nhyqP5lXwqGnJ8e zGsG32RFN+W#)`0q%E$&~K+%?9nYh^9+9|HX3Ob{;V?G&bHyA5*6-*W54J8+4VpJJJwr<##} zBCjEV!qR!+`Iq8>q~5@k zVS$V11X-DRCmcJ8LRP}bHY&7JpJVQ$^$sB>1jdQS1HPB)0nCQ7O0h%WJykhe!S36w zzzoRfehC1elmR$B74F`v@#0nfsGan5w&hvlgv4r0k`NJLZ#PBPF_^QDhp1`8#yyLgwHvLv^({JQIc~%_S57#RrVhhm`NF^2q#H}XoX^7mnDi;)n?M=5? zLnaSPV-;5Mpvc#34<^LnB3k=CwJc)ptnoW-Cw}S+QDCDP1(bGcD7i~`rWh@AsN=hM z$Mk{pEEUx`9zmas9ir$jp>@nVFB$yLY9RjTFQv`*)7v1%& z$UugVu9?Qo@m{-d!DA%~)d3^I`(>M;Iv_o!yrVOyhV9t4$TOSTFFdvu}E07%pNB=Z*ZSC;q5?EW`*KUgiA!>8m=Z z+oMuwRp)V(fm-4}TonJ)KVe`z>q+V+bEypS*Ir0Xb55G?4sR?mLoM%K1r#$?OxcW6 zyUyX}T8z1T9tPsI>ud32=gGfN`c6Q&O&trGLs($HgfownAGuMg5zU;(B>g;MEQ540 z6hU83JWiERCsdg<`IN%?TArhSE7zX?XG*XF@)SE7<`+h?FTJIUZJa6{yB@>6moByt zP(unOoL?uj=$vq6i0|KvZK7J=&(6L!`TxrD$sfE;tCFa6r{<9^z~g^h1^&|Cg6WI# z5i0~Ltg{f-CuQ6|^qe)j)+q~&sxJa6)7vKix zyXqmR_jr~*PO2EBHHkS7VQj}a1*ICMK_*>523!11?L>-?L*1UFFQ{6r(p7%#5mQ13k>U;YHFAk>YLClV% z*@V7?i9PwQR|0gfW2mvQ@>A(K>`5s8240ybhQ@6w?au0X2pX1JAILn92MI;k9TdD) zqZXUFRt+WocYc1dGvsP5Jw|F~@BZR>-E7AR^gK;3<@THfi<}#BOvFFV>qJb9E!T|* zd@@!BI$Yjo!l%~a4|tx%YupC^plX9hLDc$2NL#;nu^nQ`SS%r{MjH`|U$v$9xY~=T z|9}ti`VhzSbe~`?lmsu|6WD^!#X}Xk#dNKShuAQOi9%3i_^mYER1(l-d%D8C7$S$R zG@YKzyvtrboDz%-oL&PL#B`2T_%mjHMOEaAA0iV_r?NQBOB-#D?gP7}*v-0n3L-S+ zK|KfenV4*^gln^S22v*^Uyp2&Gl@P>946+WFzGM6rq_mtP!(+lRD;1 z)ETLtt3Tl$<>=@IoZo=_FEa7>nur3Jk zEQJiRoBp&lzbol{vG8Oz*+w&U2!PKQAC(ir|rczQ2 z2(e}o-0ex}Y`+8ydd&iN1+&z`0YZ0v1=e83Rh4X?kHwgj*~lvgt0ilsDzyj&6Kg^k zq@`UCC;J=XO8qcAlizD^#V`oF!Qk|LLZKC39~d$S+=gLA*rq6MpZ+&s|HX!5QCqut^CL7pI?fcOz=3%EJ|&xs?& z3KJK_a4yYkc%P{JO7Mz_&b00Amj67mWGN&^HGnejhf95e^S^TsuRgs-A{JJp4YvGS!2jOAF{$L5E%ySFX<^jEobK;&z*+>pe z%_q<-CscUfzTG{CB?*e|v4PFe1j~w3=@+C5^+Jdn|675XdmqWna! zHEIL{1Dr*;ua5=}AErOZZu!+D&Cwhmlz{H$q7-hZ$ceuJm#)_$$^(2r$xP>V3g|8A ztGz~l=c>o5(X6qM@_S>-qf+iC^7}Qpkao7!u{2$YTcxHPlfufELCD@%`SF|1j_Tr; z%H*@R)JHkCn5e;!YLCUZRDe+AYNcny+fltQRQwg=asRaBGhXRS9l`~K#_El-_6q3S zRPO)Vbd{VgtvExf=P>P2UJgncPwU?H(Q!Ua_6>MrsU>zyWV{WLsqdtp#7EF&K$0PG zXAhrgPd*uEPSsoR_Ku6bm|+KTR^szq-z<1&3N=mNI?U+Y$`(hZvf_7%i4JRjVq`zY z&sugFjGuv%b7A_gya)KDGRb40J-5lLglUZB6XSXO%@6*po&CmV@HH$CDD4iB^nln# zc2PJInc44B9u0m~tn)JzjEz?vpyH>MgaBGjQYBoy+fe2|i?4ttqw9`#V(9uQFJ#&p ziEC}19)Res743PLq@TH%U9a0_n^qymQU#EcxV_z*7{%~kCKJZ`eJ;$AO2Hkpcam5? zM)Pw^cr|v1#4znzE2vL^sO%n{z{%Q685UK1uv_CbMUrs0Xb;^*GBKCBngUL^4(MkU_Ts;wc28o0)xA;;(~M*Ee%wGpO{Qu74bX4h80@% ziJCahb>_>&P-^3@aEcnzbyByHJVlS`Vtf9j_(|F82WjdlT6M+O;L%nuyDgjC2xW)9 zLY5GXQcPW!6*!k+#*6d~x)`sLzH#M#SV&@Kdl$MXBM&F!y4NtQ0XOb-Z3}RxCyh3z zlv|MiDX9_#zJZNut+%z&n`x9>!tUrp&$HYe!S)&#PoDF1(G@mda<$Fnxpvh9xyFmA ztl;NhwR1;Ue!18LOQCUhi)Y7KSGv4>ubB5Dp)M#2eiG}3x{N=I*Glv%MBsDn^Zuj^ zIy1%=S#Q5zq55-+CShQ8LV3<#S6z@n#ugucmjIxT1glSQzN3<{`%kxzKOMVEYU<#y zAS8oL#@Wq(AE2=+@f{o_%JTScvE)AZ^B4e^@VA9s#n0rtbfR?#%wArjSP+Na!(5{!Wg|5!+sT0WcbYA6dl%5c zC`e0845k;8b^&GaYwhgY76xEqEYIU9gn`_H-XAk&*U{oMB;mSjC9E|Gzg@C?9&WMC z#J)Fc*GFo!0=SkjzYWm>5S3+2x}FkQ>G$l*;i`)T^|xHMq+P`Wkbk^U#z98KAng#r zx!?AoJLgJ3@U8ZM2w``}F-fWbp`hdSgp_J456lkE%>HQC6Wt!rvPl&sYETJ=$KjS4 ztMub-_lnhfd#VT|^yP8L`|IsWyqfr)TikKSJ9N0V(rzXGr}zRdw}j6SPG?ChE2P~| zDkb|fL|;&21=Z~lAPy}zQydK}2`quK{*}eVx%ZR!Xmo6N<|HJrKnl; z{@jOo#hS%z)huK#_F$~K`NiUckflZh@#oIxd-+4#%1{LF$LJ3^RS-+!EAlhmNu(Kl zo9=%w6S1Ee&C`EcV3s8n8!e!{uT0iDc)yC-En%P9SV$%1uRQ;OF z+-?#0d-Bw9=WD5AA#T3!bq)N`pI4=d{)kvd;8jaacjJA=@h#CmRu5EmE&v3o67SJ) zA=DO%Cd7K8yx7C-xN9y`y1V>IzK4HYgr%4T5INoWu5oFob|*$k$0ooO{Oe%d{8Yt4 zy!NtT2ye#P1520iMqg8bZ!vkuk!=|F6{?2RMLT|zLK3B!o}%~wK-JuXp2&I%^Ngn) zftqad4lg;GA#!T>ONH**e-Uu-XAgGlwEyStYKF*I4AnUdM{ z4XhI@e`P@eh9a^tC=D^`0g6FYV1?c8l#On#`Al#mD8p4FbgKv4AS9%C9i#_ySwP*n z0aE0K+y^BTPibN`mChkuZ^wJPhMetA^!q$+5$IiC$Jo;UoP9GSuiL^?WlNU8(D0;l zWFT3(w!Ndv1gSbzDmJY%)-vF5vu&g(JtlMe`~D`A#*^5t`ikC!QWoB1S}afZuAie6 zQ%VH!{8}ELF2-I!3JdI|O1z9&{Z2kVkXr!ye4}R@bAQ+P$oL& z8|7l!Q&RcK4vwh5G(!cHokI56BNW12k#`%xy_!>~YGp%ggpAIRfm?2Gp$RS7l|*V( zuEsKWquq`lC<64spT#DF{~jID0YMUb^j?DS|Fs0?|9hWr_3N>U3YOyU`5-N<04v5M zIM|2Dc0L|J#SQ7|W=(ckSiatt<1b`da!BF{sv(jN6H1%&l@KOmg!aX!e5%osd{dn8 zc$Ug-y?e*P#QE}Z75hUATAjE7^^F7&As1eI0*%L92GguXQqGl&5O3-QWy*V%ZWn8W zHi8)k+dkCTO1t87jd22GtEcyK9bdY!+Ey8co#+u&QuwUT-5v$OICze2Qx8VT0r8f- z_Kw$PhO{)q78Jq1yPYXc)@fo-Y*3oO2LR8B81?^fE_C6!#sA8K^S7Z}ck)cE3ib}x ztKSGCQve~Ln!iKaRB{?>P6e(`C%=|Ep+_}gj(S+fIAi;%1u!^KB3x%7H!vS+q&-5O zQ>v3wb(hNRihCMdy#OdKlb)X&bU&tdbwc5QM?f!g(NT9x(5Rk3Q7y8 z;*>6)KFaM|7Q#}g!+Z@@AN^_-D2<}AV53my%NABIYE@at9trjVmSU`cT$xuh&x)gljwv1(bvr7Wre0q8b_y_Ec*bKB z#f6J7enFx#3J>aJ?5A*FviM>ky^}Oc?LW`AYgbqNqjDchoNQTEkH{J+)*q{d7U5$E z>d0GBrVQ!tejLW#IE|+HH>l$3yI$dc5%MI_)|IeVkmYpcDZK2wwyjgvB@PuBN)1Ka z1_VZ^W1>pV65q@}ugVWQuyxoUA#Tk@9g1gBf0uTSKr$wH%f^fv3UY-j?)_B zVpAqddG7FVgDNqE;Lrv|$dz1D`b(>6AO{&GV;v>CM#T{HO|PLU{+4;HQ9#W$_w1O? zz*8=W3cI}FtA@XFB=!comUyLi?E+NSly>^B(m!odfTrAqc+>C1vnZI>4NWYh7@Uq` z>39H$!!Jl|ihJx@OJbvN1z7;GB8aV=6dvZa$G+O!G>;rdEonpwN8u9lofwX&kK0n| zwv0O{V!!Em%CM%dMGcyjjG03BRb^&2-cQ5dr##e@OsUcWD1ZmJzS{$zF$Y0=LJln( z9jdw%%R{czuiM#AEgNJ zHH!+Q6#otU@6{a8SD}BM{Wy*gVKbX+(iB%b)gA!bEv`B<-yfY2=n?iGXF(PC@BE=} zi@`3I%H)cQcP{B*I;B}0TD*|+_Iw#LnzvIX3au6;uTT)WBvEp)iCBbC{Yuo1O(p(8{!SL`awQRO z6@&)L+Sk#3E`DI~Yp~~7Y}rA)Mu4NmkX=Em=lqFz#`cOyGc0XbiD{?`h4fL3QkD(p z*W1^7BVuR!`FEl6k`3jLP@M*g=5BtZH`Wv@BL?3iXqI?|leQ9=%S2qoIU(vd%}C#Z+1v6?!*07sLmetmPymK?u4jW*-Hlj zMsKD-cP81RpL%Tbpi(s<`v&DLXZ&ZbDo9i@9LD^^o~{u^g&u$9>>K>|{rF$ti|Jp) z3^CSxibt$*W{wcc8z%gun+@KSCjdoO&Wz2(T^b#sqWuZoHK>-#le(b}4WxJExVgXp zLBK*SuB%}1+&786_5jjF|6K}@tc&E2Jx{k5pEsP2mW7v~zf2dAMzpzKAzR0~DW{L~ zf>=x0QJ>`V}N{T`UX#iefG-7XsrqQ#enKO?*b7 z0tNh$=lGR!TCtRG_bM$P`uXYbGJw2=-uAkJ=R4%+2Q{=AsPYl&wp^kXlTMyAwqGG~5PjP{i zA9?3gXJV`9 zCt;2)gMyq+v-w01N=_v}n$jtXB}?^N*{UI*U|;E@YRBA;1_t=c9^?@Z0)tM4Dw(}o zPAg^cfh_avU8XdX*Hm4=cP=Et`1s2od|CfhNwUemKmp^Q{+C}q1JhczSIEbW71HQp z287El#eVe&R?0=^tGZ9wHDO}xg`0OQfmJ2sBB^p%OV!Itg+q)Tl`SWL<~eS({et53 ztRhmvbbSeRljTp0s+?G#9F(h%nXdNO8w z@ul`ceaX=Q4yaG7f~%CV^d7=!5yddJLY#cmZXPRQ{K&~X`{mjDUadVC`=oTI zT)&1fHZC-IOvp*cR;!Vl6E>WT_?TeNP>{nDo+uYG3)l}j0vECk#tV!Qa>EsVf5;`S z0C|j0QzaY`+wv;8PVpz87Wj3=uU=)_F;>%+zs`kKQh?+q*L1yOJxE}l4%8nY@93b;@;=52 z?Ona2t8!}6FF_q->q@NDS7Vh~oT0Y<@BPI!?iANSm{6;-2>gUN=y6|fbBq4!HeYq4 zTUphFP|D#O_5x4^saxo*F`D0b!Dhwk9nkwP?l{HOU-uhP3rUa$U8U6~(h!>~+bBT4 zjrO)5@rCwgOc}=fq`69rdsaEfMWCgJ`9*982leK%*+FvV2Pg5u_u9wr#Bw$WOx=O*h7;V#py70t$Ecx6Gt!( zb*B@&FIK_(XMYm|7+{X>VpVx#Znv43Cr(4DpfTHStVYQ2?BQGOtq=Vtwv+up^iOI- zvAcAkI)FJBZxutXLKY-SJo;LU6Yq*-#2uC5x|e1VP)Rwk8PZL_c^QHs)fi$LSOt|` zJGaHnPe)m;Xnf>3f`VU&nchZc#g3x&E!>}nsu16huYJWrsFq9L^rX3xpm^u)(A_ax zt3(N3jL$-EU>pAZqaVhz26P-HHWsS`BSPj>KB}k5?2lrst7|R&+f^Du#bcMb96I*l zg(S-!P)qn_A)zud7ja#HrV|flyGv?<%Hoof1yMJKsl;l|VGMH}q+Gf<-p{aEWP%6& z#SM=CXfp%P%j(O#vUT6u`&boTP#4xVR|fb*>uCHK6_8f}+Eq0JHh@1r8>RJQSa`3v zU3C(}PG{c9IGCv67!jzPAzDtMgJU$$E3|)a_1!}t+R1)db)kLOYzo?? zkSJV&(mO0;rvp9J!TZ@LA_pRHkb^@B!U*xpsC1yBxPp;l**g#k5D=&6s;>x^rtPH} z0r&$hvR$ixAN69@)evW~MSU!SA`GkCD^Gp2J!iQD5_jEb*>NkS$_vH)S$f4P%LGU( z!-Ax^STVUU8}?7r(pcp-AUqL^DL0P|?#mRrM5X*2vAqWz*^;cj-}^UdfhrYc8#61%bb|vs7Bb;4`*ZE5}5U^P%f&-w-wpo2!rBkEbs3 z{jJTY6ZE?(_B?BCh|jR{;R{aJPBF682g9e6Dn$b-%-1LnR(0)h2QXi?vUtCjFcv2!q*KI>7g@3KBR@0K@sI$G`Y z9qxRjJsF~)eIZZ;3|qs5R9NeDyu75ra|Cpo`Kt0ONw_1a(}gG%`&#s@uVJlI`8GC+ zNY?>XLP{(ekw)>Swjize#rW{7hDEkO%#`%L>+$ca3+;2BB3H7Uv2yj_Y*(ykVB8%E zl$j`o5c5Sre-;RS%q9ap0|G8eN8*m-ofq1B7h-H2{chTNDOKh=`8QzJa^2{bOqTXCFp-1>B$^mg+z*u7SPGbj1?H-_vN($6X7Pb zq4?X?t8gbQnTwRfy0Wmiu=e>_boga_-3he}Qr5*FR?(0rga%Lj#h4)a#76;o z7qc0s&2a@kR^px)7gef=hm@68wy@NutD^QUZbbitX{r`d9f=BNa9`G&pF=#}OJ`ZU zA!y{Am{l=fOnS;!T=E`_f;W^$Yhkv{uUb-Wa8<-qRU^>CzYNtt5Mco!@;zz5CB*RM6T*=*b*qN8E?|7N~cYso>J2e#Sq_kidXnbaQZz z`i71#!QJ@a%dutf-ts0yr<1KiarSs%_w<4!7UB$5dr}c0_L1A1k;VhLno~)mM%7Zt zj{_0bEURC<08>&c6El5-i=QT>@i-F9Ux+_-*z#hn0|=!=UEmSH_zXpQzz+(lMLdD( zDhire@({e^VZL!)CbnzfD}=)VDMz1xHT%RFEjT`OMkF&vsm`b&BER3B2`#eH%LP_d zL`^JfV$~@6X$&0DmMt$1a_Zub z@S%;3!lP!dsx-<29;LxZ;GMnzC9Xj|l&!X(ci-8=&-XGWu?C-S^RWgZ9KlFT4{FW) zQq`R9w9De}tU^|H*Rq@>q07NQdFBy?Ll{e-3;C`=L{K5>F;Cy2 z*V$EltZ47$Wn9!X>r$e%Tn9ipF(u3^JR0BhXAj?PAB{5)n$?OrLGPv)LRZj1K7`53 zd?=iOt#>xIM7*h98n+d3>)a*^bW%wCuh0J0r~3pqomOgj=6W*BupHqfHSoAQf+|tG z1ITKqz1`Ta<#r`x4A%>mjP*96PsCQnVXUgiffy~J4>&IfF4O^!38#-Q)S_{4GP3Bc zWGpwh1b)zNawP=GmjL^GT1Igi*QWXlZcqgabWT-#a|&=~ zboY3~o$Co^?BCp1Dn8h`f1{+Ob!zg*N-0QMS@~CijOOEy0(yt3+wrwA@M3)MGA{YC zfSHIvO>>^TNo8ouU$0Uku?P|Cv~s`bPJ8PM@h2cghlmwIo}KlZDCI=Gpu@bL@WaL# zphDNHD{4r@T>ESc7cqUu54ox+7soAjPrSUT&>_EAU2f@Io;`f7eLRLI3UppKh`LzX zTZ#9**Zwd*cmc}L1up^a5NpKIvRvM}hgt!WbL~eM;&_Q>&R`Wj{x@g8EKt(yq!E|# zX7wxX;6$wEI>gV>iRX5#l$DxPJfj*L=O|ciC<~d?(Id3(M!G;$xIiDQ=OyF2}uPs3h{$I_(>h|930Jj!tD@3bOa#R>+>n#YcZfb zC&F}E6!78J5Tu#xqf|TtypYN^MB)TZhdkg;qw%vt%_9C@T4$GDagQ;sF;Q~;?tW8% z%lkKQZ0Pf1EVP@bA!t_wF{Yc0ux6a1;%rPL~Omjy)nHQ@3XQ zsE~m=0>;!If8~R(po&8P(I0|dOA3%CqU&9=2(hGZ^b0&3??!Aw6MZ>A@Vz4N2aefB7>fsw=E zOKU*B>M!FNN_v3p)GO-6vUK0OqQj&j$z-hP+a|>#i*_ZgC3Ab`=G*LIi2N#R;IJYc zShHc@)BK03aq&U!Oll5!)aRH^Vu0MXxU##dHyU5TD+h&)pWakOxgW$X(Vb&8<}>kv zD!R~%>*aA!L5?KN3O21wDUyR*Co9I&matDjc0JNL*6(0!NKB@4dr#r9;ZvQ&)`Je+Ki=B`-2 z@!hkB^k6Jd0V~o&*Zw<{p3;hBOoB-(nDr)oRg-JApTcoz;rgId4abNzNJ@sck@KQ@_Z|eUSKqUEf%|0UM9G2;vdS1s-+Mq_;85v9kb9ryqopo4C!r${UMfRM3^rbPO&oJF)TjKZt zt9v1k6uV0Yci2j=BmAxOIo@vT)_pfIMzNMN&ZHX{4u44I;z%x`y%H`zmJt-zupHiU zW>xxw%bdA&p|PIm4B`Si!NPhd%3&VxE{zGC1xu>)rMyLKwYbNusJBRtcFCCmZ`vsF z#Z}Ck*dK5i*T2Fv!FGv1Nz4{v8LyG@8!Mnl<|wf_BAHqJjq27vXlK9hS}ty}6p%zb zG6*E@=OduF~73l+_p!h!NUlwC{_!43*9`t#RqaG zGoKv_fUcKV9~SWYD1yfsCz-0NQfsXI|K;B?vtA~^>{_a*O|FsHbEOW6s_2+2;@_Z;I%I(Wr}*3uyhY`^#NoWM z#EGG&Ne562g)XG=@%P)KZ~5Dx=jh~GWmg{ev`>yTM-e8E64uW+R-0fJNL#H0zkuyw z3i54Xxm+kQt8U5DPa~0HWmxgT*n47+6dNfH)+(h7c;ew^$^ItxMaeOKt;;r)`bXNQ zVu-9gr)o;cQh>0B)QZAHP!8kEx7+J6Bkq)Y*qY_*q+?SlN=B{$LE$ub@G)%7f>cER zNUO2^L@h`qz^uJjHmzrPA6t!SS&*XL@)6p<_F#y|wRGB6Kf4$ki4fPGNh)=in63y> z_y#e%kGHfzx_RbdUSVa6LnR8=SoPb>J+qJWo8L=0KbRrKHULn^0Vsv4))4a;BR7;w ziSX*T-hKCu#ra?d3wVS_THT2gg!$;$$u^uB9_Zut2KSTj_u71AVpqtw@M87QYq@)M z(ROFrl89eJ^~h>rbCOhp>R+Gh)DQQeD9tMD?H@s~+EG7_4;5ZQ?`$A7Hk>Eft+Dph z2SAVfDnxd-aT4=Ta3m>DnAf?&YB*iss0e&KKsP>RJpAJvwee-gG~Gy7e6Ad#nRYcRZt1cg=4Gam+Lb>IO-`UcxjSJf z;CPfi-9(^O|AZlg^iaCYrR*2T5B2UUQ^he!Bv^M|e)hl0iQJtDr=x0KAC7#;{h}j%BE) z+(CY0`W-Q{>RR98_8M*@sg4rse!SrmLE-*h*T15I>s^~TLv(h&Ew6dB^74;&;K&R& z{G6@*2NW&S;kOWVlOA8Wk{PsRQH0%g^`b7}6HI|Z>Xbmc2Y=!zAs0XL zH~>1Z>A{6cT_d8@v>=4TF3yGL!NtOobuiPbzSw5xJOiGldS*2v^5_KBS$LPd%nc$F za!HNKkA)24m{q&K*YM^lqQQa+t z%M~H0ndmbcy|M0aBj&k`tL3-D0(!aZ5v9xzF|i=&dso)QtX{`z@~^O?!D*?B?SUVO zx%m?xh9e3#prD0QCfpiDO?u3pz8*FI5sS-pTj#_p%oYm=Al1%pU^%>;v*MXxX!$qm z0H+-649Y+;xabK6N?V+2k`f^8#YY~$|L7NT8~C3`-}N%BaRx&hJkdVtm6(ONB?P}X z(nPs!tk9|;#FJFD{bVW=KTvLjg{wEv#g0{ zvx$jMZfZpLkILrHa&^TH;?R#VDGEnnQqV*#P!lv$-c8NEJKEaq7*xpC3bps+f%w-r zq(=$c39C=!Nnp-t{^_oo^>_=kU! zNYtxGB#JHaw#@NzS>T6u>wZcT@H?6+fk~E^5+ZRl3o{w3$I%bE3+&(!bn-Viv2n5CJU@fTtNPGb|ho;TK=t05bGdN7ByFK48 z1Z4#B`Ad>7ta#mgq3}M@J`$5vdim^J44HTXkrr#L+wH0a6~`Wggva>Zs|{>2zO2Ze zhUT@M!<-lW;eOukP_Gg!#_dv_@OYYnUHBv(*lhd!a_qvV+Y_IR4}y0gj~N$y8&T^C zQr0(DsCa_{N;9V?W>E#uPoPuHULF#uHP_WcKk{@FnZ)#)RCSF_!izuqmEXoeAP1t_ zIK9wmvBjxNuX&&Zl0jk5YY>SLGa^O~ zfx%Df)TcaHREW7BD@q1wTPvo9ItFtO{R3a=q6a@Oa+oY5Pm@B6_msh?dpHLuE+!WK zPRP|n?Zk`s5a}YY`#oC=e4q&ReO$lVFiy)a_IP>2JbK8fe!NX!6L&zo1%B1DonGwGA7_V}n6uL}9VX)Il!#DR3! z+Y?pE2Gu$75PKz%LN=jJ38AUIVpn0>KomVJRC9`P;OQe^#q|XwMBuzV)F7V({Vg%e zVEqK95bRRi7pv;cJFzNy_t2LLcWP)w3kcPGb46!XTH&(o^YI5X-SZKjxvP|L*cT`h zujMudZUOFej_frEuquacQJFb;sszXkO7wSjk&;ELamr{5;Fn-*j!Pq^v?G|4x8rES zJ1Ft9bw)bC{mv9Fv_#vrq|paj45#z9o8HFj%DK#{;d`yz=LA)@DDo=(>FqghK~b-G z%#2|&55P0=`F-IluVsqtteU}w=b@92?#CXX8c{_abFa!ZIy?~Ux0fE)%^Phl_8l1M zaOeXtDLx@0L=0eNLZ;eE76>4vK;xr~uZq~X>iHd=fZZy)(b9BY6=|AVMZyo_ZcU8w zgLG`v196@rTy~9#h+O3XgPvf_;Q^#F1{X3Zjk)YD+X)}qdr$mYZa>5x0h2aZTb*SF; z=)|XXL)Fkl(cz;W|7~4A-UPZK;J;M&j*L3ch8T(BwA=n+uFZuc%xs<)mzmbpJGxN| zaZ9hK87uIsFRk({WF!iq!IwRJy_f8@tzZ`cIaC1V^4$HEV$E(tmtZU zz=7s6iqAj(-r3)@v+u@_y&GsfVfVKvTRs_btUPzU6N)M7b~$zdpXAs0B!APr6`!PD z$ZjebMBRn3xQbM5PWp|+FxI*K=-eFgIwP+(*P0*>w}Rl!kYL4eddo4Jm$MKzRXHm* z2oF`gp^&$Dhf2kig#ZTUz})vnQ1gi)0{$Li)B8;X4sb|8GeU?uo#?>>M?U+FqOZki znwfE-lqC#`SV$6{pM1{e^>u9b8rQ&BR>z8RA+whl_x{F_v{o62C|>tkY6ScpMmmgv z0@29w9hH(8oXJVinKD1JFJX*M!>6wn0#xs)%yz^u6y83;(DH_B?tq~%8Dj;g!SHeK z#1hQb6M&jfdvCPF^svib3UTTPr=p@n)XJPz=G#o(GNW3O8KRnp#Se$l=$;t~a4I(A zY|s~9cD4dFMLAWj`Le{n6X!d1fhQFlqfw+CKc;BzcS6cAS_O5-VofNK z6`{ISbCh&N!y$|nc>J}qUz~lnP5viV>r1UTi0!%OJ|3r}H%S3s?2arujv*ebYrtxcCSi50W!W)xJVDxznC0?Xf1#F0vQLz8z;AEyjCFq)`54g%m+rYE(i@A<9FAY`xJ| z{iJI+oP~oYl(XJ}gALVP;)b>SDPR+iI<5U@Ce|OQUG5?Tx2>a6GzD{IU-3D4AhCHY zMqUwII+OG4@uA!Z_-E?t^OxES|69;%9{$U+XFf#Y@roR@gT2_jMJO%>G7!B53>+R-kAMH&xuyF_YXwUZc zYgh>7-oubdFeHXX@KS|DcC&~Nx39dzJOco`WDdqSI{m!5vmj+4E|s7_o-7wcjP%MP z_+Iw6xvEMrcHQNd)&P@$6qEhAZm+b@`KSTNV=XYaYCatxKB=IU6IsfHTv1ISfQJ}M z57C!37W{> zhb&UKgk->R?WVM#k>n5=0Kt}BPdq^Zo^i?NLZf3SIvB=9o20&8`_?95zl4l?$qTE@ zqHA4bygT9#EG)&DC3IRUpWfu1UZ2}ips^9;c z-)?8$3Ngx!gxT@Mq*p^oMM~F3lsD$bu?jII!z2~t=h@K z3?2V^OPn1SY1jK2gv+lhlQotH&g$h%q<|VBu0SXZvP)6^BYR8aFmi^Y5S3+U6%dmc zaJtM_+qICJ?)T^56;z;TK-yBQIo8j4vGG_|8U5Tj39>xhKJ6i397E@c_4UD3o&*)N zV-a@Rf%U0^;l0^WjQE@jUCa)#ZfBQcQIs5>)&2U(b3=l6(B&Q}OZlr>y#(S(tq8t? zg0tDAAfOyK!?k2x784wMgdOl!%NgPYKaRERTc5oeRIyFLib++lVx64mw((o(%EoaR z(&{)C=cMh0U~`$B?%}1WBgt^%fBvV*TF=HBEmJ*h)(fF~)64&v_G$k~ZQ$xnaRjV? zP#j0L2en`?QPtW9V2R$qRXieIQ2hp!6xa*?ygx-5FdQu|u#jKG1Vgv1%w1Nucxvj2 zWygc-CY?Z?ikG}b46f%%Oko6HKcgwlS7SLANt{@V7x#)0Z_LO^%NGm)z^=R`C-61d<#$HxvDtLF3 z$~xZ0-rG(4<^{r@RopHO6S%&_!Tst5NG7TOyB^=l5SDI2Ks28RayrlHd=^oVNCpy1 zz?u80nCb7wFH|ZT*SLE9Bw(_D9^%0+iTW@MJP+y~A952po#Z}7qv0Y_&PwjWq;gNs zHc!+DvX3q{k5Z9M^Hc%N>X9G>(#tEL)Vv7_PDn%Gy?HJpax7WQSiYsn+48_~Xa>hJ z0KDf&%s!Fn^D@(AL^!GSC=>Befv*B)ER7j7@Lq{(DD^~NGZXId*Ur9%Rd(T-7?X%? zEN{F>_mX0CA(ARU^>XHNQ$Ftn-*RM$G39(FH#pS_7D|^ZQu<*9&0H`Ju|6!H-J7doNZbwPKmKd7d?;G z+6$Oc^?ERTjuLoBA^$=^>(|8s@kje*CF&$H?B5ci+v}3e`o`Y8_a3N4i~`jq#XtBR zxAKQ6)&JDbg>wCme>7wwLP#D5@^i6N$UEUeggwZ({gKzm%3q5MWfm7V{{6{2?UV8E zS>h)#)w746Y}wk32U5+Sc$?|*xCinUthczmRY@iGc1`)@88zq$(fj*@(n{kPD*5;> zmf$!s!{^$kLbz4BlJ?CXx}b=4jI2HdhA9mXE$nmGIXed~hCLzD9VX%;JM{4QUnS`A z;kAx!h ztF{!Q03UGhiG6Td3IvJ>yCFPLf%v-zV&R^GgRw1E` zUi3;+**N9w;cbkWWV8}1fAZN_?#ExHPTixQ#g9rI!yfVy0)-E?B>k3+PqD6;!>Qi1 zN(GOF^!keRfEa^{!0VRwB9PHt&|cX;B7G&x>FlALdf(7;&E#54xJ_Ozj1W?kM|%am zg~lyE4>cVJ9ubz9XQ^A&vt76zW9h(;F9`ak`0?-`lo+^CQq~8ZsMk$MKmWey$M47L zKqIFZbm`-&N5)Ov0|I=15fJJbX z`^6DZa?^)$ab#b4-+{Fe9XWUspD1!NbN-(e|FC+whTVS7MfI2&#xL75Uedd$K`Ok& zk|QiKxP7p1wY5+dxbt8=6g4dvDCYEOFWQ|P#p~_!@i3)apitvsB$=N5Jhn%{-N4fZk)P&*_)y||MMW-2L;ieRj8dBpT%tk0%Xr;s9F3DMUle zPsMF(u2>H?4CM#KcZt`)4dV{*(9nbs(FR8)3P@R@K2%Et?{u-o?!1;#Lu5N};kfn} zun5XBUa4|oh^rIfu@1*+$laRg;rMX3TSTah^r>f>pF3madq(?S9?`=K$SjUiolZWeSGeuH+t>p;Hn%N9XQ^*?H@MBSCA0y|gC}UHa3{y~jr|!$2p> z{R$9(oB_xSLBZv`H+>^na$U=?XUBw}!NuSSGvWc=s*t5+$^ zzcimjfR1a$W{O_|k=E-1)lKVCTYLfd7rcWezeLmG;pAfU9N1WI%DE+Q-Anwq3=6q; z#b76uK~=^RO~9?{rG#R$D6to(8+zTaIEZt|OkPy)z4>l?ItGIIcB(g0|i3M3!w(X0xDPB zaE}1X`CAfjZnLZtTz4@UHu5YYML&_xoYo;7=f2sV{M_@##-Y`p0-GUADk)p+6Ow6u z(@wwT%>hy_DIf%OkG%vJ0H~Y*yBYx*kxV3$`*B54^AUg$r86uy=;WIEjegc) zgB|1zlmWmUj@2Gx7ITC9Ft4_M(=ON{;z}J#jKr3Yb$*2e=NN(@U0}O11kNH;iutl& z=;fFKT}IdfB@y^vX3q*Adx(R3RARy_szu&5dyO4i7EdV>@@JmGt^QI!bb(dpuxB6R z+wCNk-CwGrFg#QJznF%&A{1DOKX+hmki8MldlRb%6db=cruBI~U%wj*GP}|i;}iP2 z+*wHQg|l%tUkux9jDlbkS#wi)m%J5*d-1i4>ZR6^xN`_ zf8<@D>KFqozo$)5JPbOwD=F!FCwnnbL_vxUA3h${6+HzUii0@rTTvK)J()!k6~o~% zJfr|pGtxsIKlsu(h7>;-*T5^+I|<39g8DL!n14O)M2eXa6MCSCjX!7g}W|TU=v$7!?};=sguWdy(APQQhUxc%4PGSH)|sv30t10-Z%$KK9&$w)V1g@{11rh z=;WYik|Roo$YwO^FzW2@kSgvLtQ0>$Ss>?ncPT{Gf%#ZcLh%5a2QiHWva%#+>m|@Z zfRy&-{h$=ZtA>DYEzjP%oo=P-G$1ByaI6;Tv?)%I9}Y_dk(;VFzlgCCyTLG_v`~x$ zzefduaFSxEqJtHstIUvV@g`USH{wzJlDaWO2;zt1Vi+l_P*}&&JO6-F!nIPgjVkEs zp5^7lzJ5nDF~F*qTUE%gKL@8G^k zm7V2U%?Z2_c#ng`gJq$Y6GbA0XenNJf=}x89&g{o_Ex${p2(hm5<^+J-DYQ2)MYL(M#OThK zJ#jeNHP?k$?aT3qx4+Wfi$`#rSn&PaE|kEM$sLsZxDvm5`Q?_$T!I~vexYgsuDn5F z)HUlYLfXWP&{?5_q@HHuk7=&ZGVN7XJ3e~)m{Rv)J-^Y)GEa`%_ml-ypg^^v(~Mum z_J$_D7r&oscYH3kaKNPHVqO^Fg=}$?!QL%9vjmYgOG{pbHyl5suV}QrRm&qMI z{d1p8$ey^^APG)&dl#?md#8W@!ubERcfD(aHEs<_ZAd7J{^#wn1o;CcV$RD@i z*t5JmzysyX#N@a!C<{S)JU?@wse=IWh+L|-Hc{&hs02!up>d9Dfmzx*B39tag1lP$VF2ew{mBY3nS|A zFezw!_iFqO@lDlBcwm5|X7k+v!(Xy$6O!sCiw-xT3r^bYSFQre3q)ZnNv8_|Yj z1%*=Y$nmDJZ*Gtf1gKvjI{3i;Ll#b<%3;rl;_L@qAuR`7ultAYe#k^}GZtsuGa# z30>I=ZpKp2@cpu0czy`07guW?@`EW*DlL%RMkx}H6#6QaA{~h70R{9*{8~*ntuL2Q z0Gw*sOn%v9dzjaik^)?JePN=_+Y(T!+&Md)JmZsnf*2Kx z9DxzGofrQM9sRu168}x>31P0X-qP9qT)X6PFSMl;T@>(sA%rWt((H14?*t;JjF zRD^ip1RxR-+}IR=*!84M13^GeI-t|w>B!a$)vn*zx%H8)yk6&_Z^US6O6$yNZQJOu;TUV6h297+z`~(Q&Y zUwI{7I_`oPPg+4?Vsz&^>$1bvD9hDT2K+2oM72;I7( z5HjcADxC()a*$vKIzSs3Llk%>BRc*DDe1?+$9$fXMmREb5c-lRaYx|dJCq^kjivf8 zCr7m;>Wu{-ewZ7-4jdH{efs!Lo3p^cz!1@RcE?RnOCj_X78_I zPL9u=C(rU@$8}lBeZ!lQUti>NeXvR0YhO=7umyIE3ZH^lGWw(#3o9I;;qEy-L1cNP76FUvH{A1_;?mi^Yo8SyIyM$J-B9$8AnNjLY9Mf}??d9jroqR|!FU64* zCT9SUtCJS<3qmtP5$(ZTZgLtV^19|O2d2<7s%_}K`{ z5IRxJc1p2NRjKt)Nr#NNKGQyUJAMd20o;<~W7*XB_aJ^JPrZC7!)8BIDTM0K+Z|M< zGaoGe!~I_CSY)Qy>kr%K;`J{QfsBEUxtgGS!PW%t?~(og_y4>7KmXL5e1I>a)T;D7 zbVdd{X>U%nllby1y=->?F0cc0Ww&;LKq}Nbj{X)!H`CYPsP~`=5muBIVfzcq6qbe2KGq925(T z4HWV^G5LC|I{NWwfEr9i)OE>tD?ZMDq-p7p2C-f5h4jx)wAZI&?|YH2G;=XTs8A9z zLR3C4Y*oFdny3e(K7)Bd$owBA(A2r%1s_M6K~poq{4?#n7|MchL7RK3EVN4;IX+&V zZIi2hB@0`(4{i=em*|hrZZFj$dFxAReu6hQsj6I z^Qf@c1V^2tUaZKV?Y2=ojev9s{LewsNsIJ4xLHh-LL82_P*!!i-LkBpU^w>Z%Xlb= zcKY$Bj~CrdwZ|QX$y3p16TW)R&aMAp4x0@VIpm2#T-^B^#mX2HS!+hkBn3RYIx8rY z_=|8BLyWCj8#^1GM9*J;h%FdXe){e9M1`33gQ2WPe3@$x|?ei!WYy&&m7K>sSSr(IUAr^hApfLsDC$aqiuGeGF z2rhL0%nR8eQDu>&>|dnsSuHNr4^1kx6@lcKa`ALkgZx+j62_NTL7B5zFZJoilsJ9z z)A*EHE+>ThJU)@Ih{Hmmh>eC+zgu<*+9njc+J_qjb^>`be}I&@bj67}-6nuaGzwjp z^oY~Pud(U)83jik9I1q}8VFeJ^a@Zgdt+}1{y;vm4!y~_$0Pwg&*%&!&6ch+mlBVK z`C55G;{toq^08YSNVr@l2Y#?R1n!O7ZO4jX>D(w#JT>iZ`V@?tXQz5x!YZ@=r&cWK zuG;%*PH(i&t$DsmNkIqA%he zmYYRCP+;4eJ~iEDChp9w#@xvXY$`hxf6J%RGdzTY(p+L()g)wCo@lO;a5~9hY#ZUi zUx;Y%u?M>(N81mkUSHZ}v+P$KMydfn@_^@&F#(zJQpI`hSo=^-L@EDf$czaNfA!=C ze(!6<;fno=OE9S4}exH1J4*Ey^^oSOqPVxt=noH2d>1a zalA~$@U~!x#BbDu=f;hglh~VHxW9qt4C(Kq`0nCjyEQlM4U*-4oK&i3+okZoxQyaA z#21)Ld~mV-QG9Uv_zUd|Ux+zQsTJr(DW=jDDaKQ0`};Q&ZE`w((^dFJ9&b7I_<-43 z2Q4S&jCG$tBSAHLi5%(1#&h2}-!Wrwu56jaiChdvkYSqa9A`KNgOjA?mDBoT5>|jFeVIB2@@K zUDPptx!k-A7{ny*S0z5!R8T2oKv!DHY8q_3-%h0DW8Ycl&Mc2jQI_?v+~E5NQ8tef zAE^*aQITWzc!)1MQq*`)B@wXV(UNtfI%X_aj88P6>+yRKk5q>gecOpa>Tu#drc28@ zh}n;RiC@A+Nbp^C!vD`o?E;|`n)}`OGBWTe{u{rqs8H@}K*2P<5eKYHU!T;a0cpel z&*7thTS;bG#%ijR8uiHPIDu!fL2lT_@Kz)%{kpKl|X|c=Ba2WM-n4}GmjQ0W*b zpw4yCR8xClC6=XklK?J`zmcc!Q34_L)fAl*IdQ}b*OiM1#@SKC7dLM=XH~rIvTWVD+oQ>LynxW_$>hydRcfR9lcUG#A__T7AP*( z5nJSXJd!2;l~QuPefqN}nI=Oj77^e0I_|{_emnA{5|<(1Z*4>PQFrNq|4AN|YB3b;c0?stS*fLRm5}HIuA7+aYI_ia zR9gIwGpIJ&xKOKSC6t(9criYH@(GEM@%yew(ot1>Gv|-k4638~n&KzKnS9O`uo#^K@0>I*e&e{-TsM#yX(0|4pT*F-A9QZ>;I%_PQ4*ig$bzlFU^-N}mYb{Yc>) z%bHFdTG9K=YgGbH*Frj4f4UGr6lsTCK+A?N+c398wU; z0`lI%q#!O^bndK#Ar6YTY)DLj!3g7XjgrZjS`p6AC}*(((_HtCM?`p;+Q1dnQ|N4m z3>YuNv+dt`lH*NM_mX&s&VcA%hj~R$%(Yeje52j;&pEOx+sbVp`#yuK)WlD=;1JVS zvhYT3x@xT1G*$q^#aFs7eEQRP9J{umY6azS6N>PWYM8dQ96Bc+F1I@iahyQdrrM;R zwaDQc+%F^z(k1s3LKvq*=FvnCsM1F$Kd|5He(p5wPGU{e*et(>g_01}JIYh^Z{WM$ zcSW({G)Azn%Ut{$vGFSEYIb zU^&0Xh^@sr5v2WLsxf>h4zG8RJiuPjxa=UIk2TAIDyTxk1S$_P5?f>39@~#0Cy|+* z4_8vMitDq2Ni#UOMl_qEOs@~G=1^gmcojBXr!F3U92hWA5)u0ay^Q*Uw$a5BAy zZb%=Ko2Fw+9^9UWYE^;cV6n#EBpw}YGwAr6O~2o_*Wn+{NqTj=@Oszp?U%G)6>e#Ml)l%Vc15P^x8`-RPtE9*D*1w_^D~ z2md1h*pL`jM5#mCL%(VYcY&sAfpVm{0v3T%HuhY-@S!h9LY{v`n4AMX7J z5AE?HJmeT4XyNG4zZ)E`5$40<+^KccdzggTmkV|4gt7O^60xo`s)&RvdM;xhQ&Fp! zGA<8Q@0EpD3=V5|A?yBhF#GboF(33~ zB{PiRaH^elN}$pKv7KWdA(Uj`p6OtSS>zPksr4N?0NVp=i7&6VxfQR*%9u!$n1GN9 z7O~)Y6C3S)OaZ@RTsJ8e9Z%|)s(n$~qpqv8Kln`uD~!zjVNv84Xzb<@PsrZrb@1&dy9qtt8TAmcY;s6Z#A z9seTU1~y(Z6JhEpteN3n77lNn8T$Twj0^rJrXrb^^Maz$=Tx;_8vR)bWZMT^_?)50x(2CWJ4M+e!gnQQ%eV~=22+X&iODjt8jmG^sfLpY6hpq)$r8X% zzeOHh{Bj+TycJUy-aALmaX{4=*ioaxnENwlys6le>r{5PdVv7P`7PG_>EpNCr))qV zx`N+>+9Y1yIQyH@&&lRoe2tV zat!D!0PO<|=!PkLvTG%$nt@z6r22U+dSIgHS)l%&1r}mJKTtbwiM=p(z27K1KjlRrczUvDJ+;(>=`4)j+P(UNbZc3| zf*g!^o4?A&&`!Ku0Sin#SNv(OFH(?~X{t2oK1q&;__FvggY@X zLU26Ae2&90HYws4$YHOi8R8%*y=55_TL8o_J$NjBdM(D6Z-Rf9>v4hu#A0Bb#TTF% z9p+w9CNgchag+DI^NWDwWbftyP^Wb$72KHRpSDhVy^29Dz8qN9P%IPfcDt&%CBCFl z<{&QoN&AxDdJ6|rE}XY}kuX~M6C34l7@M7h6f6W9Jq0% zih}|D`t7^JEs<3Vi-fWC`@sk55#r=uEK|4}2qrH?mM z!8@lJp`u<{{BKKEh$M@U0^M4Lk`6g_qh|e9LD&zJhADR)3GWQlgnQ07Ru7V z;&8T&DA})MA*=Bj%7S5|HTDG6?Q99Saf3&Q<+;o)2+qIb8l3OMq~4&1bqwVELe?y7 zr30Gr2&g|!ea$(O@L16)W_X|?_X(aUc!EgyWO75?kEvtpcb7I}3RG4{@7Ny{NsoF% zy3^wcT-}3;PRm!@Q3a6lP-?!ALJ_~vptm#NM?n68HG@q5!wp|?9Umb^(_t(hnHSot zfBHfwWNzBr$rCk|^!J4RCBb2^gWSZQ_|(F8yaU`E%y=b^AqwM((^SdBST zZsi)=qPxPl>ZmUPa4KR5Ud?$bA9-GP1Te;jH|20=sGVM4KL?NYcXfA{!%Z&9&VG^% zAGK1EfZz%H`kk_1grisBVPHH`q}dkpd%2z9GsN<57azJWvfuoC6uB0^Fj zczUR>AsA^gLIfZasqD)U@n(aODug`cYoD{rqximc@lCa2=uj%`*2&db_rUDfQow#bE0y!w~zBr6(NPB>1QtK62n=FF_0xg zT!@3_uN)n#7oE@S)33GDU&Y&SjJY}E)QW0UqI&?+C?)5AEVFC1U&dhV@PvJ-a;$|I>W))Fo_fi!iy1;btUNX?zqgZ1V8iw~1fA$_2hk4xOh83@!ZNs%q8%5>J zdKHm)@`Ol+p6u0jwQh*?MJc^P!050reRvXiC=4@`)_8X*I0HC=6azoT;;*4KBvlz< zniSIs?Nat{lIywrxzZ+C44b}3UGejB`HP&Jc$ZK>rbReZmCPV6RG}=AS9K+6T zCtgJH8qrB`EvfG18$O5D{h6Qz+rpaLp>1JCjRsBTz92Szv1V$N{ zn^n^kH7}rLIxP11EEUW}oL1Ul$&P#W?e=QStJmnSVG1l8^pS7(?GusH#D9gxWQhd) z#9{H@_%*nS@zjG}t~yn?@b^R*K$MckG>dNy5jZBU6+cPsrWSCc`|(*ns=1&#Ehv@v zPpQua5J?W2eQ6O3V_-wEPZys@wweRI6a)GQ>aHc(@N`iQ)j> zd|(v<6Kif%rVX6W&L&6>QT-U0>Pdg_c8r6f0BfnvmHL%{k~s17W)?#UiK90`Oy z9#}4Bl-+AVz0)%-CpvZ9jWmUhKTJk$XwV(B?zw?$_k!UCk^fRW3JmcY2wZ79Bqa?DY`Mg5+5$f zcXNjwg{%M81HjMhJ%%bxW~rYmSe#+vQYXmICdPF&K6tf#)*Gd6Xj^H*sSX_z+b=%C zpmvyy;epq&0Hp>nwbe+UsolAuTDMM8uK}i&w!Ks0IS<=t%O)t=YG+gRIl9y5TedKI z3qBK;Us0z=+ahK-*d%0@RqtdFXvpt^D#H`D!} z{z6+?TwaZl)4$sCOv0V80+ED=B^pK7oi1Wh$9Xnx(G>K~lI2YNgf8d>lq4TOTV4A% zQu4flQCRitA(9x%QMtO$$8=@SPAq$VLKb@6R&UcJ7rVi6UGsNc?*jR-xx}@j#BSGf z-&kvzv2}tH!k4Ok*R+uI^cacJT94o;vC@@*Nqub)lk_*KD>Y!#i}OKKl#OCrNhcJ)VnDbYGqv{d3N# zj9V}mxPM5m_Z1{Z6(^`6Wly%>0VpvFUSU6H@sZ^k>Jz(Q7uK~icm#kjIzRgJYI`A^ zK7re(;P-fkHmyrKX80e%YIr+kP(3)0+WshZ{#$Xu2mb zJ=s5q3>le*rFQP7q|Ak*xH5;9r!AGQfwE&Uqz49P`a!6QUYT^1#X*>OHLF&p+oD&U za*G=rGgj5-kn(=#TD*@dwXmBP{uxr=@3+G9nPtZ@D}3T-+D&V4OdgcuxB!F4BvY@u zvl$KSoisx|ud2qTFok$1AZSIj@5DsWq5cJ!h@*GMeHVpeLf;f$#X`Zkfpf|m(RGQz!bEwH*imN zC*q|~|3f?doDstuqn12azo?u##ag3H+Z{tE@Umg5@NUlHjrRsax7 zbcq03(%lc5PCt8c<)!7@mFF|=hzyT@`R;C9O1C!M2?B`R9A>G$yqKgIK%^3 zWY+TBtN5b!(I&9LxAyBjOLrdvDFstpA8~{4b-sJNWQL0+Zv(MZZh4!L-{GP1{`w(` z1!nS^J7eEU8&DxJHzw#2oATPh8guJceo=M!5It8y=B^KZMsSS1h`JkVxPzCwlL$E$ zqk2@w!0?UKo|hLnfHkXw<1{%DNe_!)FN)ieJ|@Lq=ZaNDj_3IF#({^ln)hgDa}7%^ zMJgr82|8z`-$Y>y68&JyRqWD}<#hqZ?vN#rU9hc_QH+ib3qtH3r1sx|k05<#l{i4fefsFY=CgNjZ_ z6H4n9bYh%6T6oE0#3&Tuo?3__2xTjK`tdPHYV193CVTIdp_fZ{Xvnkj&pP9`zdQu| z^zo(kcFdDDN7v$=RD|pPx1WfmO&S;iWU@1{3?`Fz0-o+VzAx98UrBIZHw_qFjC7Jm z943c?o;h%DKe1kbPa;e&#v$AbtK?gGM|^yW@%x?7)33L;Vk!&RUaNlb*g(2>ZrP6R z1HJb|wwN;bR&g+?DaVZi2Peg65&wnr9A8(_+!zUQEKWzpafCe}S20T@1bf`Gm_gW2 z(k>C4wQp5|=cqvLU`=VZ1!ABV<8nN(XVG5wZ}1o>zQz^9bDhZwm%PR@RKE?M%W9Dz zF6D{qH3A5%3MJ>|i;%)0wP`W>JzFp&_sD__!L7@URj#N5tP&|$@8vYYQaHMV#1+*$ zS9a^HYJ8qGa_wI6CT=Ke9}d9X@|q=X#=8iiuX~alh_GQT>xPN-!gc^0xA+2CsUok~ zNAVTzU<#ebUn)d3C|-G-y7XjLaeeo%nrrbF|CMr-!J~-(8!rMngR2%|yQaJuFBw(? zHeg26&oZ1@sW z)6KP(5-+P2jTh=A$%SZ#d_#3~2uYCLFn?4o){ zIG3`qVyKJrZDH0kJvyteC0`{&F~S0vc*-Iggm6i`fn?k2vZxmlEV39H@iU@ImRt7f zSSgy|2r;IxNWbUbtW34n{TmDlQ5jBHzgTS*)%VvY+M6peA#~A60X@b&JUCGCHddaS zWbiOIZl3!r=A!FxSx{PMy?d^)Z-nOYC1!MMOWmeZcs(eMNd=YdM;_JGM zOd>3C(G7%>$HFZuwT9mlYsqOo+e!ol@xEp61E((_>;SW0Tdx5d*nZh-PMgK*5@CQ)bPRGFHmM0^ctETJny8vy>M0@!De80R_ zwjt0)^5r-%s<&6IG6-#=)M0RX@{~An>I9L14wHI=bS0{d+}#jWsYh|K3;hHSXm_V0 zi)?)Yv@{(F8BmnokCYnH*-zp7d|L|PBlLJu7EYNToWnSH9Z5~)vMf|TJx2my)nWpv zZ088inE2{?i6P;cs8)L+FwJnl*3$x;gZ&sUhDFvcUy1z#xK@}e-TTSOHodZ9gNcsd zV{%wGzT+93lme`xpf)#cEy*s&{3{p;C|G8uPalHnB-fvug41u5v2^GnmRbe9{n)|Y zLyVK1yxRa5zAr}mKltZ_j5x+252EbgY{t_7+4G|@Ve%I?nFxq*k;CLjX%dz|AjuEM zp^YmGxR zu?r%BUN4oyd&%>=iI~6EraVEwG@*@L6V{#@x(t_YZ&;FEry~f7j4{MXTt9GnZ^YgS zyN^@khj*9(dAwWX@fHjOD)ITTSU1poig%m#3WM@811y95dx~VCZH2D-OnV_lrP7Od zX|cK2$cyqbHnn%9K-(cb#G!<7!}vQ~4bTQWs}cwuwg%D^Z&;di&+%=Mom7q> zj`~H#)?>`_?zU1Pe5(C%d<$3IENrv&+5fJLgip$!$a>1Pjl7OwC05nS-;B@K@HiL*Tgl{2Q5UsYN@N zq?pw>st09udsxoMw>J&)$Yl#35<(S7uMW$~zvRPZ^AA!JDMt*I;a@Lv=dY*6Rvgp8 zp{KO%`$s=`i9qTbvs3;pU>Nj#Z$Dvn-Mttno+zf>aRwP!dVt)&dh)ASljU~X&%dca zckMGqp-==x8j<*dMTH176a(y<(@Bh}AISR(ZtF1(*=vCwa} za?k};sltzs{wiLv4s3{nAU_O-ypgyFsamt_F_>nhXZ9Qt2pe7r>Lm&w z0zLG%ARNegO@Kk+mh&T-$Jrg^VY@@Q99=(Ns;iXOu{Z)aBBC2R>9tI;3fURv&9=Dw zdWa0*>rcz2eXqy*Luj%SV@wX8Fzc>NFb}Hyd1hE?V=_cmk7fly!_Jj$>_{lf<-xM( zq#L6dy&iN*WBN|sI7=@7hnVjM0=boydk7<7(Vg2$pg(2RrP;G6z$T`A`uLe=L67{p z!5R(eSg_c5VYTN=*vXpA~R@;5bj)OQ^ zbm9?~uW)4FJspU28wC8tPM-9db9lLpK~-y8M$!v|_7!5sEuwIR1s{kEod{s+`7344DMAJh33WlXxmU8^KkY#&Jj1Bg2zYWY@vsyhoKV$|`B{vK2oEMy zvDjdj(CaHcKA{vI5JQYb;l;yZk`aMbDWrmI$lm~mt=lDk4%E5u#Gd}|Pkz$q>GjKa z4R;y}E3Y_I($`xu33^ae8xDvN=k8%UvE$#dP;P0&?VOF|R4soOPo`&7>o8cI3abPe zNg(qSTFKYqtPnLg%*vi%XGoq4Okg`#dfNfGGmPM*HB)`%nP)61Q1*K^FH#UwAJOd? zI4@pyyDiR7$7c#ib9Go?Hil0NXI2v62oMIB6X%}AoQ%_<_w?v2#cW9fKi?@qw zAVbQZLiR2RwQ3D_fa;1R=&jM%I2MqF7Wu%%gq^h?Ye6xe7vp5zYips6 zX7^=atq_JBSa=}+ZsfRtRCfy8LeL56H@vUjU-tLJA1d{mNoE-<@fgMLhDG!-f_+>< z#rzA|W3dq9%Lt~B)!>R8^+FeW`rRj#T>T_g0@q6SLIOt0WkTPDbP4nNy^fU7>F8UH zfE=YUv7_#jerb1!L40E(ggCRR#Gbl{XDy;0Pp|r7e(*j(US%IZZlEY|qDqsi?X9Kh zc=D&(GnYap*z=u2N&@>L*2%lNK^#B0*uIj{f7&+{1a65Mbew3PQc@h8xUSmNn_e|k zBf|_4dA-fW77$!npHwzoJg3K_Z3|Dfm6iAi#2*H3RxSZsjj4#inZo$&r`jLJ+V;C% z89I3(t>^}F)DYMOdj#wyCOp~pQj5dLI?O|-t!9QtMO-!GV-9cpwx;7qG;0T3>Mr|M zT#&6^`;h`eJgnTtHDr*WU$ysW%T=mYEC9`?+qB5(4sE7r)#OafAL|Q1P*qtiqTrn* z2XiIGcKm7k11~Y5&Z}3f%K@En!vFfoulb*!#ks~b-91@Hffp-7YI%?WrEwehE=hj0 zI=>e3_a39t6nz!(Pyl!cswUmZ3qIM41%A+GW-K}Dog|SLA;%p;N{RtG>5AVi22VOz z@ZGM&df|)`;)xf$2;dn1{Ri#hrC7B|@XQ^U`TJ&1$?&1thhG7~Y`+jG6f7tBlo-BB zCCNQZCqi*iu7`LX<9#Xqdx*{JdCrSvT)U(kfG<-bIHaF)2h~Ep*#-~1Q{3Z#4Nz=~ ztM?%gZk){(l#@J_cs1Qc(u)B`>09WbppYN%w0QFuS?{641o#eEC%DvOno2AUYscOc zFdhSe>#MKVuvmIW%BLSH-PsfuuQhMEw0$ zcpD6c<44HL^Wv#6_P2}Gxj~)@Xo9i{;_ZU5cd4MoZ$o-|fGLwU<)US&2C_$3sdz;< z@QKssJ8{7{Sbz1~uleam%6+KSg~W)9eW5A+bFrX8ix8do=*CMeC1;+DOmF@)3x;2M zE^&lZ>?c2n-RE-o9V<;$F0E}iccZ-m z#n3J)Y5gF^2Z=1`h?l~Tou#oK8#iok#O9tpzR>>I%3?aQc6~dZ)gOozg@wLJ9*)wH z;-Ao5z2wfh1QqIJ-*0(2-EN7YU)G+2AO+F-55H4(x1y4BsF?D ze*59u?JMzHl}T4$t@!LxyL7>m>W#GV2%z7F6M+(JU%OT+RUW?8i8}iFgdI1c^}Pt&xv*T4kWdV8h1XU8}jIIeUvR94^p4n za$w;VD81dODdO4qb`u16Ms0TT;wk{X;x``YO@jF$ke_d9N^~*U-~V6#Wu8Mk;8uGh zmR2_?u$6^#5~~T#Xy2qevIX?H1-UhTEm)y+Vb?Ljm5YU7%r;Yr0sp)`DS7G2Xe|r4 z?Q*jOkO#`#;*|Lt6dRvjo{l4@)+SPT1;~I)ftt}_uzXSy8Aok>TW+wAg@_x_E^?z} zu;p~_Bc6rM)XT$*@fmZ1A9N+wxwHQ(-m+Bt(KWy%-G0Sq)al_c;b1yh13Vc@p zQmpSJU)aYVju&#b-0(78Z8u(w*Hd!$T9({JNvvZKY6+^*OWD3^-;LSbe52-E2$v@9 z!wdA~lb!>Zmjw{WhtI{w+I_4lDC+Sq*#2DlM_V=IIj0V>aZV^zOIjnQ% zrJ7XM@ZI7kvis=wI`J!sT66ys>Pm$OZ>*@eC3+Iai|>dUT%WoG&9YSjJ1A;TAgc7C z9*~xMkOBPZ2ZHpD3uhX=C0HE6h<&40kq8fI$nH=wI%bSU$FPPHkrU(;?(7mjvZWAR z^31-x*xr7}PZ(_A>>Q0sTb&^#jpYD%1el?;75Co?!yRNxT$Yq=^XY)KfZpgCU^(gu zcGB8#S5TY!1NxC%H*8gGoC3Un{Kmq@C0D=WVc*Bn(5di4p_-4ofcT&2lbt^a$ zL`sGsv9jy1VdKwUqStmNgr$K$T6svHrBDW!(FQMv&bWobj-``sw_c@*i7a!~@9=5QE+iQ91Ra_27kU5N2lN+G4A~RGzZ2#WN ziKz=AU8lyf096Cq6d>3S#_a>~{YM#6>Ku_+5rm8J;&5X87ARZ$D)E&di5 zipYKJ6nprMS6fUrwQR0#BtA7}9}1I6#ZLttQH|WB9Gvg> z6;mils+8W}qkVw-9mOHU5Q3d}{#NfYa>6C#)r#A++~9wI@ebR!A3GU5 zi`=7_!X&8#^RdzbX3XQdB`eD7+N;10_es<7qc2|}SvUS1p&uwG?4i6)stO^1w6bv^ zUD#22=!T_j6Jc#-%jbwg#{N?C+r~MYKjc1+A#*Ig?H*pl`hZXU)cGFmkGW1LB;#EX z#epr5V;WC`0!jxqi`tPc1D#EO0a+^{O=v*^-13ylU%NdWJBS_NXVE%Nnbme>xkZzE z(}Vfv?k!#T#prvTt?`+?gBtFEhaM=74M5Y%TZ`#~n>B2o#5p!=;g}TwFpH9I$@L+9 zPWQJGZC4X_=IWWxool3~SWqHl>g>E^ML4-lhqGEe;#jmY-TZs87jLxL={Nn0aprNp z8{iWdzF3#K!qD5L&f|;+hP}1vN9~SIRD>};Mh~J2yz4zU`MWviu#-!#$W>9e^TeD! zo(A%(Rn>%;d)yj$gs51isi5sDa?2cJygC#N~PSc z0vhz8r|QMI7|?GvK1^{BK;nV3-hxDt7#U{fI`zlN_< zOBHifb8NnRVx>ji*%^~5PKB7xCsbnb-$Z;sx{Ck_t{N0Kg!1$~pQe0;Z6VV;{z1Wu zo}KTlSRQkvQ!zaA&Icd1(u7A9QMYi>t>Xp>(@(>VjzyB|f$+tjoug+e-9wJm$P7a` zL%@LXNcfsJLPjxzK)tv5Hpy=uW$!hCk>LeXlwLiFU4LH9;^XzIBSnTsNT;;jiWd_5 zm17hi3VY9;ljb{0wgg$L#APsrN{u4W4fJTd2mqL*(p+E}NphEq5sVum9Q&NH|0aP` zmx&V16=s5=II&cbe-*c=6)$=xWNwC%2PAw6u9sI&QA^oP9D!I~FoQ3};T|^{@!=Ne zCnHyBjJ1rYB4s*B{Bzt9k6NM-xKM2;mX9gh_mLHw*C>I#O5uVAF*wMD?$Ls@nOVjW zq2Zk@Wh4iPknB-Gt7*8PK)t)I4>sG~;$njr!KaDgzSbrzK1(PhX$Jqx>LHpFDANnW z9b0bCE6Ak3y#ltox3f`x-w!KAbu5tgaM!5p3I8GlO}S;dM?+R{VEHp0%ua=J4-#yu zojhfWtL=8|ABpBy{hHp55HGBT7{pLfG3S~W^g9LI6bnhAohGwmcl6L=fGVXXJB zAA9N3P+U5|RIqUR8c36ei8kIS4&ctd+LGyF_s>y`swMA(A|2ifQ?3_7u$R*X3-Y5G z^;Rl1Vwx01dF0LLl|i`Lj*~svo4^2FWMos-5jguXPfFcfi8bM{C?k)7`iU5F2)?>o z$Jp6b{16HV#v|@P`LV9h31%Y4T}Q=lfFOD4o&&U<-%!~lWL>~9sxIGPqpz9_dk=7} zXEwTT@yy~f1u4_*;4LU4zja+pO#ZIb@F4thE5@{xd18sgoMmNn>0<)r zi9+buSt}K4WA`t{D|$G;8d2k-WV(FQRlg2Y?Dg&mLcJ25rBC&8R5a&1F|^NKXwQV$ z!e~NpL*}rt|g^D4VfiPJJ=^5#S41oUAnjOKSiFf;KDiw zqwcZMP&8D{z<@f+eKY6ZgE|5pRCb;>YZ51fdm`RDyV7PCZpB~J^&l`EUmh3jzzT?u zyZQGhK1NwXa-)iHEErHw0+YZEki$U;Vw3Jj`M3ngGFNd80&*ALi`fEKfA5rPnBRQz z%O^kZyDzjqxZro6D6`eAvlW8@DMQng6LXk(_+z6g20As}3Vluqmv>W3Dm@!uHeS>X zeY!s-ap~#bw9_B^AkNhcn7ACDe!V^Uxd#rf{6;+Q^s&5HDn7)z^jw71+woIur1oy? zFdBdh?4TfVon3&RN;$ba9xOEna&yzisPn?`Cdc=?2ST34wRpDLs&Lz<_1?2Zh>XOz zNVdfkPoqygp*;ukO`*e;Fd*&KT(S_`0KLDJh68r=eqyh0w7h*gO}<2DM1JUKw_kJ< zncpf6JRf5Q@uk6f@@%mA$m55s?>c2Y7)`FoM{!=oI8~gs6dmtu7=I4#+Q|+XW)k}2 zh2tK{3la8WGCy@7;N%r$hl#gUR3l^KU&LFz$)xW!wsU{x{{qdFeY=^+`FQx*har^L48 z1*YcHhinVCY8<1d--iL<7vi7e9|HC|Hw(Gp+)Fol9q^nT^qyQ?XsgTqP-2&B)+KdV zNJh%X9&izzvn$AN#5XV4`Ww5z$vb;H#o-qds7HaSJ%5&(CO^k#fmk>8ePJoA5!x}5 zHqlA$X}Ev6RvBe=*MRy{i4m4EnxiUXsFPwPfwvI1!C803P1S(HarE)5m0Mh4x(V0_?5ClcFx`AkUmySeoA z=W$w}-DeoCdk9kdBZ|oE#u6Ru2fLH@h8zJsY{!3d z17;fKjMzRg)VJG*?|l$MK`;cIu*W5!!vdT1d;0jj_G~;9;$1;*AXTxn4#5#lNUHcO zq`MGVDIEmBaH|5y%(uL9pHeibY&#&~hTY01EdnMyMSIQ;#w^I~Us3wUIWt2p#CKxS zstWYT$^f)emsQ71(v!K_=4WF+SeJ7-2hyZT%)!1xMlv>l>(?)bFtV_da6pyJW|1ML zFDe*4)^b&lYA|LRt9jQ|vB0e%R+igjj31#lcS?0=aF+5^c9d{4UM6(-lvo7+=gE&^ z*e9ECqjZJk6uCyEK(QntnRh#Bdf7(A6VUvbWv7w@3 zn|mO5aMtTSv5QNdloO%Sh5CZ_T9~M05i)(+@hCy4v35!u{1r&j@nr0Mq|5--?3BT6 zWKU+0FY8IPRBq`UO2=#xSRwJqvn;Iyps&;W!GgEZtwkH#O6>sau^~0Ce=9u8he4n_ z=pVHt4yqNc76$|3H5uT!iToB8eb07eka|I^GN2Ruu^6iwgu+Y97@6V^Tgph$^2See zIZS4E11_Xkk$0bd^~sOgyCK#Hxq!=lLzhtqq63lWJLb*D;v*xg_S_yLxK;>{_tJvczEhK%G1nHaDaJCdx!;n#N|GNq?j3v=F z`JIP8r4!eQa^Y>vwBnwtqKkJ!F`%;?t|Cf3R|{m32Eyy{kPGeEP$Oz%h05D6_6$ZX zraRVxrDmxgz?l0r5?10-Z&Y{dOo`_aUsij7f>?B*Lugo7LZ7{G>Cy}N|2O_C#`Ok) zA8wiROdghbK(XsscOuu{q)f<@Ph!tDQ2VLuuqr>6dHGKvHB^byz2{kNikFbif$~Yq zSSgJvs?UBoJ6~dem)^02PPa;GfM;1~!}v41gGBa}L6!|_V(DlA?q_We+P#>U5}G;0 zuE4+vJx&mo_!-TXu^ms!X$qvoibu)ldn6&n>=kw+-0wHO0WK5^N3oe{$l7!pezcB=B26K zap*Azv&E<^9?n_(8cVNJLv0-*2jJV|Jhs125q`Oq=xOiw6T=bSL73fQvWnN>P|O$F zT*xTzQuA?PDL&fD(>_xrsi&7j`ihlboo=_L6EYZ6O26l~q2lS~663sdp?xU^*3&UY z4&^Ui;xpaml z(PI;O4Rx7og~Vkf{m2|UGqU<9tX@~hDjus>4`%F0isvs-D@ME^WY7z(>l)+0JM-nX zG?{xUChECrw0H;mB|4A2A4@GOcrEwj^zm%V9u(**_^h$GgQL6bHc(puQ1K2axHBl` zDx3*2dMuwXV{t&v!C91D80GQ3_WwN-rBQi+5J zXsK=>B9g@#0xyXbA=YoV3(v(8KrQ7)@TO44Xmak(6ww(k?`?7A;>Sd!()W(6u3}Kl zVtc4m@hJUh@v-+eP6~;34H2qE4<@K`LcAe{iz~vhHp6F*0ZPfllFUHkEfF8lH;Sx~ zAy#9mg7?y_5lA-NjF~)a8#zb}!{hTLn@uh(>+h!@|MsgihOF29xTYEBD#qPm`GaAe zR?VrB8ZNZ2#1tshdstFd;ZW+zNl8?Jwcl9Y=LvnD472hymi?#O+n;{>)3!4Zm~G0$ zD9G)mHww8Om-_a{R0EK}R%Q!dz;#qd4X*AD(`% z>Hj?WrdMG?##|AJc^ruQgOj_%lJ-E1=~&`xF%h>~eR|`_;Z(Bi=MAA_wxpT_f_JOS z)qB|Pp^Q8j+O$7hr&{ks$y2l$6<9g=8E`3^t4|>IU!MrhB_&JVj+uGUVu z{stoS{d82E+A$kEeLnU1VXM8l=ueUp?uA+Mpk*41JOT1KW59TTt$;2ZG=*z%FUo9X zb?g*A(3q-GnGh=Nc)6qxjTz8Hp^T2Gu>W36uqTye_XrYTx&IT=gGf_PV%J};eIX07 zjO8r3i`}udQ4T~;gG^K9aI~&Og4h_E5Kz7KhcOz6Jk&z4h=|JvG?9=6NlFoIzwIV9bAb#?)8u6u-VWE-3w9@aP5qT( z*A&kGAZBwG1&==!F>I&5jL8Wit4Yt?kK-(zc$Nqe(;lTzwDl8?jM$R#%t`2GII!_d zRUCCAW?7*ROIVN$iy>5%N_?WnLobq)LM7w_K5+isl{4Bnju_gS=!Z3>v>^4GVyVg2 zL%(d-cAC1qcaD9mh(z}O+-;OpX9U0WzLll5o`ubyen%qlQ5={zoYqP8E7&aMYUsSY z_2SWyfkZ(*RfLaAfT5IQKSta8Z8CkM6@pyAM8g*A;`1Ny-Jh;3(&YC;96;|hGlhAWM6a9kA5yl8oDx<-Z2 z?mEY_#f5u0>FJQokbry`Yx|pa`dvSp6fILQ0nM(&V|dAVM4I9U@mcT0Th0Wys8%em zVQMUM3&r#hdv{e3&Iqn?RCELyo=g*+SKFqLCj_Z?jK(`NBe-SPh0EEx|JY7{?b+Z7 z)Jk7+9TXts&W!J{0N+C2#GeTBY zp#G*sK*|2a$Wx`TM9Y<{fJMGu;`u!>{2N)&T5SM}(27X{ZIo3iQoyNHQT7-)%O|Tc zq#moNT=?^CdMXAgB=C+0%C=#$7TcSPn}WjWfvleUkVpco$#S_Z;jCcWgFlsLJ6 z+TM#732xjYl9p-=%TghM_#yE?iZ5Gbbd*b4gR}*90%PI ze!_vE-c$&Ge%k+EJN@$*m8i7of~y1}CPaL;#|EWoKReUjiZRmYEWUGi($>?pkxOwN z92qB478g;x{K~je>u!%U!j(&9`390*D z<03_`@0U|}$bGx!ItZ7ZQhVa`m#zPw-^A<(oL;xm5NEN3xwsfGE6<$}yl(+xv1c!| z4`R>M{6RSZdsaD<7?0e&#UG=LEDlc|IB{x*n_iP%^NFm=G+`FkU<#trYFF|j zmE}M+`aO}h9BaRW4mjrvujrbW+CPhV|FaZ^vk$!MxTPvQ!@}g4IHv0Dz!l+vQ)G6W zA{d8T*B-_220Q)^fnbg(D6wS%w~i8Xo(M(51YZ?3^YjN~4Ekc>ToDM>m1y!PBGb+6 z*ceVv;qYT-y1p3N)>q|!)kp6fDCW?nak zLn_XA4>A48J8gE=&mxRGxSy`0Y8oR$d&CMW&#u?p6+|7gQBn)x0OCNj z;xFW#OjJt+ozM`SLSZFM_4o4u)iy{NM7#!@jlRPYxl*qazk-MFZwONhm318gNxT|w znwohrd-=^AoS5*N+Z@*YVY<#-Xw;U<=4!&=M!RY6D#AOFRlwh+EPp%)qUHvADYbo1 zE{qY(u+w`*yfh3o7H6Yp>+rC z1J#?h_IKh7JLfygaoOS{st#8?h2uReWkF?q->=X_@6We@MU_9$tz=O_&kau$%2_mo6hn{r8eZotCZ5OLLyQBm%t)#>GsXQekfr%>7)H0AF~$ZfVwPTI1KxvtXrzEu44^*-cB`{_S)*=a;(TA z@8xR3GDx*ICi5rnzb9f{wvYmbfB_on!lM16Jm~eB3+>BJMo1)N1<s;(vwM=XotyM~rld*mv0Y9@LCb1U`WQQQiuSXp%vDl+)8G}sfv|WbJEe85AI(-q3|+Hy?6#}sHUdj@i+D_ ztMAnGBUx2qJ9B%5#8B+3#GGD3Nc`&+9O257l~jED>3@o)V-LWULZaS%x4m142ynz9 zMuQ~o=N_?#*ijDOdlb{%;DsQ3e#e5Sr zWUs~`F$Y~L`^?dd_Yjm;Sfi9oMgk?D>Ay0PZ;GE@B1-qENwKA}QL&|{n}TX?k5QSB z)UDx{+mWacG1Z>v=%z~4A&CfFVl%w-BUjYuRFBJ7;%(R3D=+zci4TC(pX^zEDn)5$ z9Bqt2u-d&pN$R>wV~^?AR>UX_acfWkp=MXwtysfJ+K24Mzk`!`E}~&qk)cAOyF{?X zT*{QZ$OA6T$6aXU1E=L9DnUN^jcU=x8|K>=;?2M!D=Qx5?6?PfQEpGaefld>Zen!= zMaNQ{yM3+4(H#)nx_5CDEDQyf(LGQWC`Ziy*(qtICpLWgLHDf^;6uS~^Bz>I@2WEINwP2!g>J!0r5mS~&gAR#d(TpW4Jx&ta#7arcMnoT_p?l6RQ*S`BD`Sb| zOGq1f9|@0kK-u5Gz@t_}y#JO8cpv1SXK1Y@A37Cb1jteal?Bd4khQ6P55XLCyk$aH z21Ui=Jc)Mlw=qdt;jG5{pMH!La{8d21t4DyRign&5`7-rXkb*rcM0iyES z!wRXLP?~n(iAMd8}*3>UU1=wpo z3bYY{7&JdhbAY=eac@kLYJY+V;Y3@+6r~&0{2|Sq!OB_jf-Z3(P)$1pNiHz~=1>^8 zvLy6|qJ5$Ld+*(xDn}G=oFwzR2wWw>O+migc=+G{cVmPJ;?D14O#grTI|Sm*_Ijr8 z&!HQTLtYRV6ea8ZIN*t;2Rr$zvz$$n+k^qiaU+H{STbR)8gz7elf znC1I*hKK2ioJd`{478k#6VeUSAlApbMFfEk>04I})YUe*GV*qozq85g2nKZsaC(>QEqS% z;wNobm$Hw&n8{hGaOF}cB|wL5ElVQ~0Ec!ewSl-? zKCeeTmd71d0uliETnN|~9@vo>$u`FnGvns4NZp}dB~*&S)0xI_-8nD_F1l!L2Ny+L zP%%bOgv6bB20x6?R_Lu2%PZvI$~DjCa0n9>bFRvxFsVY&JAl|AG}J$u z-50iq@14LEq#pf4>MS*uOh^|--Nr^O6Rp0^BIu=jT)5CKSnzsM;u_zHL8FfMPab&f z2y;>!@SxP0SwKtl%7?K59D!eTN8fq_bwMUVZ!J|0veYj(unx%irkA2s;ZdZ?$PNm{ z=DRU0^<;Ddk#)>8Qhej&>e60v?~j|9hA`7&i)8I6ULJqL+ESo4#a6M!#Zs|VKK@?3 zSD^P&VqBO5x7vcwC=;TrL}$FjV(14w``Bv?aEux+Asi`$QsWa#!V~3$LRpBhvh@H6 zwZ#SJ_o@ds(YCTD#^9`2G4TmQ6!bkWgx>do7;ImT_YM6OZ##WFgE}3iWRdFgQl|Qd z`E&ZSbGI?zOy0iugFL!ubxJ5?x)STQ+DbQ19Klc8Zd!G#ZJ|_w1C_lM@(0@o_8-@0 z8fcw}QlGW^!!xHoy*Z(n zpCyehmk`!ckTF&8IN~vCe2$#MQHXy*(AAKEu4#TS=qSr|+zX(3fl#|>jh-CqK||Si zSXk6L@!u3nzAgzj5Vs3&#|nLOtsqcFDIX(ody8fRaCi{+76G1w4?EbnemADO%Alw2Os*0uiRsb>W2;g4 zFy&sDMtfM~N+|rEG7_PE#R{K3{#3gVFGNhrVWM^`G~Jy?-Cgzi+~%UYSE6TwHFCQ# z%z)iOE}wpj_G3@Jk$-+2&GICP$hufYhuF|w5$@!xzFu$2lqD0;8e~V63JFETFJ#ZH zXS-`6T69PFI6j1ei+vm~KNeH=g$@9K4wEFLL@=rXsd>Z}+Ty}=JW1@Anxnlcq>Gx|0_H)tVX z?Gna)_Z~u~5F-~!o2HYqA3xJ(=R(>b5MT3y0h>md#f74`62LuaC7eEwO5GpI%h)-x zc2qez7)%%_DI{YX%@fClpqNI!L%`;p|LKfq^4Wjp|7)KO!57G_O2*#EdBr>TPWG%` zIDsf3P!l2I}qRoEtihbLD<}>XxpNWAkwrS5uN~^3r+&=1* zbB7(Ku7<9B$(zI02&hzXfOyD+R9CHUIe`$5?uULOY{gheC>SJW|4w@$_TsENSqW&& z?&E}UUNX4@L)XC;8U5F?eDva-wh&M1-aP6sB$T?7?|$%X43cV`wKj^JtD2~#O4oFo zj&G~>7VIh?j#V}1ID%y(CV{_&^X(m6?r_>+`IVa4DNpwMgcN>d>BYrD03Y}tv|yBw z@QRU@qMmc>JN^!Brc`>)s#VEkcmPFL_kt{*ggD3d1{sBVsl9kPB!ygC!yuwia7H~V zQbc{|IT-h1NkO#-Z6W@`@WZk&jv!jWIKFrelkHq-KYSI_yj(fs6lwZbRIFS##PGO$ z3*st|)?9NsGra15IXm*F|9pX*=VP=p^sbpJjb$b=rC6YnAu2a#P%PbPCBWsCA_$tM zbBRU~qyTrPmiLHChMt~^1?8?`!hJ)Tj(K0AvQb?GYC~x8;`*KJeO{$Y0$pdTEXYT} z#f+CAyWAn)^_e_6*@BJFk-3+Lb{dZ@H(L-+Xli3WwF^V!OC{f8#L~}L@*eRlMgUC{B_}aTkz;Pg55{(GiDlgzk)X{QnqiBDyQ&OpVO2g4GeVVC7vgv8W8DGDB9)ZN9E1WD5L^4PNMNMEq{BW)< zI^c|a5WIDkruf|DhFMeA46bIZ^kiFDjsIp(5;N*a&$%cg(QramgryR~Di{`BjljLI z?5y^IJAeB%Z%)rhMU%zCc0AU(MryD?xqTNE6nvPZa&lcSUW#AtJW_RlEX3Y(Af9{_ zW8==|hkIG^NdqsS63tNXxacPt-^XL+I8_Om#KFB6FB7H!E^2}4p^LG{6sr$>*2>P1 zk+`2y{^1Ai@`iDT=k6HC${ov@45;C5>0^dr}fvExZ#SLL>e*xp9xY&_18D_wseL4Q~Q7h!P32F`3Hxf819VajvUrF-a)9;c?;R#Y3 zUl^xm|9i~e z_%D25bEO}Uey}XeQ*C-OMhD8SME%&8UXn9DQz9rnx$nfSL!#3W}+qJ9Xl z>t$~scd`QQuI2ewTgtM|J&JNU77$WDim<{Q?c&=ZN4<#KBQ3W4%qxzs zi_t%nF7sLn-CxhQgTg3`JM!mvSm#U(v4pbJL{BY1U&Pm8*us=LM_6EMa?=5*Fxo9e zc;Q>{-t*4xCFH1Pqk?ijwku4FMws?QuljwTGA9^Uz<3_ZaT>+-#72yBvz7Dcmb@bZK&O&2WZ#oC$pD#$_xsxvI-=#pAH9 z$(0lhyTB4XHF$&kBa7e3KDoIYs^u>o9ix-~2Ig6d9AhIB;W;WiS-95%^{bqO*7+x& zrkmtO_j&-iC#xtj-#^n2goVTE+-E-VehCHUnyHUlToXk^=)T5@@V53wXH;d5^Zcy4 z!(45zyb`C1yBuLF2G!J{IqbdEQ%glW2q_1Gy7bI5F(@uoDHI0}pMBc~ved2aH~?5ZaLeB3XJa!@Ha&W;;-X02SKAEc*K2ZvjEx$5X;44--v#n_}$m|}hV zC0@w+78r%2w4X|mC*s@0s@%k{slb7vo5jSWD=8y}Deg%uQ@BD|?MdAD@M3%KgOJF% z_W5`;5a^5T6$|eK7LNR`kWvNQmnN3|Cp@{mV&>0)VGM~{_<741cs_BPJF;*{!Yff7 z%J1g>9UKZ^DHfta^bV6Ex%@gdTj-}C&~JL8s#;0Z@?qJnUigKGY-lUuGdtmP{4;QV z{&UO!I{Ob#d=eeG=7~)Lz;imOT^=qMG}E6x`SG~HLk696op=`zRr_0%o1;nmrI;MN z+TjpZVoK56bX&~`j0D1*Th41IW>`JH`+R#QB%YF9!{pEGVk#g#T-mYvpKc%eognlG zN(Kt9lh73FrdX1vk3}{t3=T}}fupJ`#LKowg z(7IkMmG{xl88zqrQKkP1-mWT|K%YuJv68_>VO=d0>REb@ih2w3spou`cR)e%_m$dm zj=^&J_=OkRAH>Fvxrlh&YB(Qn<*r9JRs4Qc`7u5R;9Z~bUw4P?jd*INk?0KvdaC<(r*NVuzR zp|RUNHd+hiV!3QvaIt-X4s(}IAmV8O^G@Z$B#}47h&=PGQoKm(G8+zLXW>e5d=;|u zQr%(8t&lLSw|CZd%3a1)rUqZJ5$Jb3uv*wj#1634%&5Xr;yOO(m2XqR8w+_*_KSfO zyzFER`~uJgtsgH^m`7!@z2w^psX7^rqGqJ46?)_zIU>S9V_ga9iX-F)+#7U0V0_M8 zRn*HXG|A1UHn`nP_s_^|7T+qiT2+3&6VI5DI z*#zb}iAS3kqi(0k2^)|-0L1+-lfjsu4$43J4cofl#s4j>@n$>mQ}$TAwO=f$+YzC| z(WYQo?7%ox|E4|fJyC#+L3@?(SxRCBtF}Y>Da@qUsIyWE-fXNL?(kk0)mEP%{Lx8h z4q`yxL7#SK`seW=S=a4Rv4z)hX?lun%tgVdgG?F@kKZ>gj^%~eStX`!g^f8NrnpB4 zS)~+fpH;k+-LybV5aDzF$9lV4Fq?5M=^3ZSo3k-o!wu6t8?PZvXt!2g+$FralPXH2 zz=v(WM0vO@0f%Cl5Pb5j%JI2Mi)P=0%1*|*b`6rQIiG$FXA#h`P&lbPK70^DDXrFx zMx8o470cYfSfxkW)qKTYsPv2?7k?ht!^A;*Erv%KhL75lPrPK5HzTQEKLdE)zB z6`i*%*VDC-qmGb_^4*|FD9-(3|3H4*{UWRKG~QnyW)_xXS%j{HWE zWvYPSd2<2z)s*Q}Z>jan$>lq|Nt({qp$=YQzVezA(;6Xg{hrLjj4+{TQ}#kVQd5{u z4Jasg9}Dw#n_jSZq0rE@h-t}7;B4sDt97i-k`3jCc_C&=&_qq1D*gKe9 z0#1sX144zH(zAH;mibhB=DE1VP9Hzp{xJT4bnb?K@XY6C7p83!+(k)J`yzD(=r8qz zLMkBWLdY!1d0ObWMAN_uL%)4Cj#f_|zvj2hi;%`;5uXEdVYOMnxgm6qo~v%jN#)(K z2qK8X)Jc_3N&^N%RuBY8E1=z*x3TwcPcO$3pV`h707Bu%+)f{V5gEzg^y%ZfW`8L# zP7YI?7G<6){q&dqEug~g?*C|?{;UwxxEz$u<)6pM1^H1jszk$74X{K=Nf&)oknyY~ zQ)z{VLs0@cS;u2|y)7)h84^RV`f3^PkGgXg>#dZ@lO(Lk29dnQ*98hU4s1#{Y`<@{ zC%^XoaMV?7SrL=mLxrvEG{JG&iOc+t+XZjE(oz+w3UMTxqQ)>_s9laJ;02WJ*k}j2 zzJJ*xv=u2ieH;%|37sqz0ihBG85IXM%xo0Z12vL0x|<*gcBPiKgq69 za&(P^lc{o;(HT|6K|EuDZ-d2{xg(r}io$-!^fD+>dj9gLs9hAxk2V_TNyW~>Dw|!M zp8CTx>H4o_%aL9e!|16O8KEISP9s6v%yLe6vy~Q1XR#DGX;!a9@Vd#E@l|0gxxV-# zkVePwRty(2Qix9OBH{9w8f}HZ!r*AdcNx-Bn@z|<_7=L?TwwL`b-lYqJc2zJTYF40 zE|)L`hb@^KTn;#sYD`cFUg(-#Wk4O^;uem@!s2S1_ViI^{g*ZgBr3OV#%khGa7z4r z5sN6$%_SVup$6f%Vd2IPwEWjgy7X=3R+Y)#q%k8$DFjh7FDt*ay!ZxfHl_=sf$+o{ zoHIY4-XAluSMzfC&X=I(+B+x+A%%-5eVqaOmDS~TC!V#z-CL-&NI~5t;rv(PMPP8` zEu$C2+zV34@{Atc!p>F1xP-%5;IEeCqc)4_++$23qQzduC!`i)A? zf>^~m*U*H@+hr}Wr(Y#O+EZG=fH}MyTizGRZzi3Xeow_F?HA`O@olPa4>IqpM<KBB#jaRS)oB=$?{)OUB1`^b z1#N5~#0aQUOz(#qEK;Ypx*w#nMJ~PIUF!&^RTJ0a?^ItEB%PZ^ zb)6J_;^vP$h#hHbomY$P(Fi;n)mLH=6Eq*f*({Q+x|@5BB218>Sgjz+_Wt(IV%bDo zI!gLy_Y^2a5p#afW;`4Ua&eaukaBPV!poX3+apQ{N&1ILx&)ateP;!;WQX92e{i2; zj}co=2)j7n-t;aK&45*30in0sSKjs$2J4*+MLJ)Mvxkw1lc*ut_q-?&P9!QsbuQNxZJ+d#d`J(3l(zlX1f`OTyuC7|9;TETxNr4Jt~AQ8o+Z=kW!s0 zmhzGWzdWTaZXQzA;+Sz|gDF$ zx={ywgAbp%6eGpR`lpuhKlm4CS|=ox%CA2m*DeHVqhH(DL@}g!%EYex-^{&RbEMaG zrWx}WW-cb?Vj{L^+ALBeRb(All$shPfUE*YoQOnKiDr>fW+DM33qXQMWL2SPcG%K5 zCc0-@J-X=VcJvKI@gPdFM42Wf>KI*>{9p6D`@=dU+wDV4*kRd}2q5$O_FjAKb$r)a zAw@`8)8;c*({GI?1<*&j&6v!TP1zIYH33=Jh*TJ+n z_=~jkZ_0xo*>aG7b*QXq3;GzUNBK@wv++wVsv&^ln`3;pD6i}t@zy|v(sb*JZAvcb z+KMgb0O#DdPn4=(=ADz&L!y}n6lA|6FHe?>JwVN&9CQWDyTf-J>kB#c*?9FebhNRh zGzuv?+k)I{)s7i)xu66BsGv^RKFYF3#OBk2=@XGHBeKhlhmgxuI#}AZ_}A8q)4cBj zlx6TxcdF@})TDXrNC}98uK~{=L{RrtB@5m9W26XM5hUw^MezQv2-9r5>5D&^(7VQH z0bA)FUP;=myy$0)elX8plRPu}L3={|N8ZH}V})BB;=RUMaC@TJ7Zg@JBKFlow;qns zRdHYlCAS5a*2@iBTZ=c?9t6c%gbR> z@aY;EK9*e-X$_9(*wCP#`MMbkigBLL>IvKep0Ryyg@SeSPZM!D0?Z=ug-E-6> zCMyk4!0^MjZh0KikvQ0T5md3dVn(W8v39~1#;I?hIb%`#oV^+`J^GK2g+67A>Xum%ze9ig#77eg;+A=E9AcQxS|je?s3mk4^}bj*&wna}5nO!efV0AB3v2 zgWH#Sj%VyDUz+0oGMaa6`xh@4kB8I5q!((0G0d8oBB>SR)c92RKcUklQ|`hBv9HY| z++)(MMi=<#g!=AS5CY-1C0#&t*?}#QQ5#q6->*;u6zsgf`fSru&8|h~ z2@AL}p*U4q9+faC&5Ti?zluj5)N9!m-UZn{qnw6DAvt^J-gey|E_mo(WLP&Yk?jhY&L=ZcS#j?MD-4q%c z@c-Zc{T(1x+a#8YMl53{1t%(;LKXH*RE&ZFG7jmwEK%dmxV9QMG;&;px7catpKsxI zQW_jLbSrp^2*glrRYHa~I1+Ad{A!FYk~s4qH2`u&-IJO`fgQRHDxYHPn(O)Vd3q{F zcgN#j6}FN3ZM4XLj_%rZCs7%&Kf6+|Gba3XI|K5SS_$@=5t2ZX0($mo_3-@2WD8A+#ZiUlLLxQNnyk0gpV)xu1$}f;Op>HX}F^M-$BV4xCdrI2In2}qQOmXEZG_y3; zLt;uzF5_Js4KG8S#jn=8HGU8|N9HyR1}Q>9s_Bhj!8W}e&@LGH@fvlPo&mQ24A@KQ zEKF2EV9-cLH*%rdDK))?qpUF(x@hs(t)^Ve4nsd3e4z=68joW>dqRQ^SpZG*H+-nT9s@vhG3aC zm58;a`m3-2D5a-8O|2Opc}>>2TrYJ=#5D?j;+D%GR82d*UnQ$)JrAkRd+XxHKbFUgT+(t-sGAEU7`L(;m*bSL9CfRFOk zsN*958cvdM=#SJP4gR%9wpYuIz%P>c?Bjkar$Je9I%6U=w4}6~t2vKUPSPvV_-Nd8 zs5c_WsVDK}*LX=rBsmUSJg}xVyn|Hjw&h>KLxb%<9EnBaE!(s9*l-6e3&>a8$EPKY;+j)>EL#A+% z92&Y%uEnc+0%j$vV~Zd!849*n*O2T1g1w+23HaI*1YijXM-ZnXnFdF}iNeD-HI6F9G6JE8QuT31;3q!;WCEoGWQ(i(k00lB@qMiM;GT3ZiC|2z+p1=W!v5H-I2 zEcx)D8VPz9>+ZouyDY~a4+dZ`iWf{g7HkU^8ACZiInYO-l#U_GopjAbxf*P=t*&SN zAtf3fH&;6s6H(TWhyZ0A2dZ8}Vh&rN8efsR$pJuyi7*<;)#z()A(daZy;lIIJ0~r0 zwejqZ&WxUnaq>+Q9XfIo-#bX{g(-qkc_+tXz`Ms@Q9e1cX2`+-lblwXE!UPITUbKk4RQZb(PgLP6=8vC*fXdOj z7P*}hO^Z2M0PcnF9w;JYtI_tb2kb|M0w9DfZVS%oQDkf@k?s6(oGj;vn9qhzyLQm9 zr`74D4?eVS6Q4YEtwe18jd&8e7fGH^oh-(|Nnr8hZH(n#*wO^Pk;n>Ct0*0Tmddg& zqfF!T#HeoP%TMDRv4)){TD_KurI@$y6?`Zj(U(t_OsO$d3dx;Udc#cq9n81v;6_{ z#{}ZQjOtA%Ror+-3=>0UIeRr5WOso`#+4wzE?TAd2+!RCfbDgjL1W*d(T+%o>qY61+ zh$q(+2-hYhUTXKyD|Sj}=ql6TUoL-Ui$vd`DD87<1UKK)C|&KJo0AQwNM*>}B*GV^byW;V+z+TQu0_y7smN!}+v;E9JnyCe z3F(n21+Hk#XJa=U+Mb)|NMLz(_8>JLJWP4B8yLt#S04bVA>qFJWDmciaYf&mz8ia< z95APY`<Mxjr)2HWurK;WovnJ=+uxm9U{?S-=0is$VQ>B^Bd zu~Fvc?M&v0Mg?Eh@ExrM6b-SbQ_~?4V23Y{<*I#~0prhvp@RttVQV-FFt2@Pomf&3 zkjr*HR;KLXZI>MjnDIOAQZ2X3RkuXw0Gj*0!DPdfk|bByPxBsyte+h9!Y8G@f@?;R zO0|Br2<&orYHiz=3qm2)++gFVSB8SHNnY*fX7x_eWxL63!~;ne2K9g6o(U#3X&sW_ zCn}d6_QNnk_E!m56+{T`>jJ-_xv0*xXVLA*t!^$Fe@EejK<4CNN%m)gfVV04%ZE(5 zznO9GJFMj}wfI`=ot1U~X0b=CGn5;f4qMfDLW#cX8%y!y7ShKJVq$vULt`0#{MzQC zF+ejL)%LP9j>%=R@vvNDCnzl(B|-}(VjuqJ>2EY2{ttF^EqO;+NwJK1QV+>PS)t{w zy_pnqIZTlBS}7X+0kFAAnb^WuSBAmhk<=Vqu)5H?s_GYMb0<6oQcx#HVmXIq8i&%o z&DV+E*^89X5mR>tf?n8r zq){1_5I?q%C<%s9JEQ=3iuOjhP{(c<_vLRJcaZ(V6h*@xhKxe*)p3ToWjy{5sbfL= z6L;e13-N&(v$eDcW9?4d5gj$*Cp>E7jIB|BBAXs<73aZYnoQs|8EAltlAIA;EoKR0)O?fjul;4cj3Ts{9j2gv_$I^WAnz1SQ(2Z3v za`7`Fa;Ep7UQ&)z@M~}~sHJr2orpZ-fF~dpl((S$OR!2EhN6z84RS4Lfc(NnOXU^I zYnmNpi*4}0#`#2%9Gb1XZqwsCt%GzVd0V%Rpt>xFI<+>@QFFyO=2*mCX3Ytzy>G7- zt~a;+lJM`5WN^O1#?k#wd*TJB==R>wh`#(CO#S>Pj>IhF;H>xvlN>cIM3qg+Cs!jrK7xJD&ES+)Wg@XY% z+CTARc_NsMy8seg;z|?nxWNkS8L0_B8nx8k!1XWj z&$O*pkGps5p;T3{Nsh$=`cyt+8W;%~wsyrHD+f3HCAu};3HqaNiPC6Ba%wm5@Vy5= zD-S-kwU3T=kg00$8H|9P4DfnJFwW*z#(3n4c_-OQE`D$rE7pi7BI>c06*a#%_cX|~ zP1s7|ArFDxD{b4_##&i($jAm3B1k6t8xkyM0|gICCMvCB5`G#tLgsF|2a8fQ(aw(s z3PV?1=gZ|w!T%r*`K}S3@ro&g3Sb2n&lwv%*|RUMtDcH6w?>dvvJqF6HVm1|%{LaY zPa$)Ulv(@O=tHClNmXZU7}r(>knRgH-sGLD_oJNl?p|mOW%W25X>x9&U51B0o&JGd zUiPB~`_9zc5P|E(GvFiwL$${iCX*e4RKhoHVRA7pB!EPlWPyecp!tyDp-PwmMx zRZS#Y6s)CBHq1>bPT&V6b&S0!H=NRexFDz`B$2MST_gNw%J(=c@6K1T(9W;gK?*Qn zap$lX-VtDecoqq4uFoWGkj4W-h{pM;_jegjY=hI&8)eN7Q)3dbqvCI?<>Y@g%)u%7 z_FpUK?$ccWJ-rj}=HRRAVAOhpwZ+cwsLfifK(wP%YF1x{a5th2*#`Y_-0 zfO=>~CoCuAMzTm)O;dAcpJ=ZF9&RlM9}BE9a{GjA8m>tIB#8C>zI^@{_HitM(gu-f zKWiofDZ0H70#tS|B%~?|dnMS(A17wTDEAmP=>}K6S24yCLv3@z)Any-$)Z_}nD>$8 z_IxQ-Xk(O_C!cg15KmKe7HqIREcb-Sv#+UQ3WwdkhW}oZC)-qY1Q5Q1?2#8~H&t~K zB6~u9GOuvMmi4vrCx2w8KSCvr4uewHpmQ%hh=va!1U&mlnaaE5+k*)KKfqwAVHka$ zyF(9S;4t>Jve=;d?;%Vb@J#4Rxii}@6H=K1^BY^9q}!$E?M8JmHQkoiWLtuR_C-RR zh&HD$%6tG&kW>soY&Qldoc#RHV=ee8tKxp7L^gF@W-+5zTw_A8Tar@M?v?&Jz@h{dsMfl7LP#O*kl`v$NMdHz?3qQGPrgLqr$9gfGoy}*65S1ZGbCE?jaFpz@?R9~bJz`jN7Bgrw!2K~-H z4}vh<7tTfQfpog6(~O*&zKshI3?Ve1OqL+laq*}>qD-2mziV$_KrY0FaIeWCrX4~& zF=&WmdVcXMd{U2hVY_s1$8lkA%IVK8<@It%L9Z>CiWjWEWB*Jh%ilfn%{3KKzg|7Sm6Rb34cm?V0Xnh58tu`ex(g4?hkL{=FDJ!@V?t~o6YCj{r+?mFsOMwjv+ zAPLKhu|WJWWijr$hG9eSnSGU(KWp|?fFo%z-1Jm75CT7*MLSD&#@dX8FdBi=G^Gyn zooJywT#YB}qg=lU!yday_~{no>LuxdjRb*#+~%H+sirjdN>9fQY@py|_6K887{@y={xTTU)Q&FLcids$`oFeG-#-mbS`0 zWA5qcH_A8S!L(N{cY~abk=F-RA0n;}%m4%vZ&Oz4%}Ki0gmjMfgRYHt5OJ%gZk4Be zF5ME{VvNMn*0vTxe>jCs!CLno7oyczZ(VxJ*n2t{n%FZ41Uo+b{$ua#FHi?7q2IT2gBv1?;!^3RuYC7%V-KK2U|b8mp@PN^FlXP+2vS zGF3^|P)r-ad1Rp6hjyw8yemPn?_i;aKYj2)nZA=3*STRoDU26g2l2|A)`<9NZcBke z$p9~1KwBvO8%-QOu!j*iFR$K;v+mHBiU!)z0)!}Wk1q7?g_9H+C%|=|#Q^griEUd) zOYS>2;X~EoN+~6=fzQJ{J@v5w>usc#;;^1bx>>t9sp0B+NlVR z&6LB~*fN2__`)2?Pwa%`9d*=p0&CTd3GYaKrlL8*`dV39-Lyv>olRxmJ2BS#YI)6W z^=nU-SB(JNj>o?i+@fSEehRbva{TcX+=ScAkIo?%lGL&}FgfBp7*7iH5>_<1&_h+@ zM>W*4rFXHGp=xCMFyjRyM$p@%07BH6HZ#x>tm><_g-%(UH~y~)18hdy(!-x%m`14V zXfWKz86`j|sbCKvdyq_l7a1W+?Tp6}HdK260>H@Gs)&3V>AX+bE85Gd@n23U^U7XX(8M?mJ>xs$1v@ zDIz1HzRW#SNGG26N97T_T*|4FW40e_eRKpS1q z)UN!_j`{5u$~mJ41*9nEI@re6Ef!X;$3D(n%g2mO75r5XE%z*RtBC>HeiXN1x}vd& zxbRpIi6lK1VQUiZ;4ke37iZ9`G$hjONc-cLUM=VB-@p0I@=bfw8p50Ap|NoSu6Jz< z;QFEK%q?U?)VFUXkF&OA-E+3W=Xt}$_aSkf1mWi(O4f+P-*`a^JV{%$R@%1E$Db~b z8iiGZiNB@5g8fLFz7}} zrlOtICc>yjXm5b$W~`6JdE}nx!|#3m-P)uuPLDJe;oNZ3RT&J3KO6>mC19Bti(q@} zY?cLkJesZaY_&Bw^_-m$zPS#>Nup}y{K*TcZ@ zWSCc^mJgQ{hs=de2~80J7G#sssCHIe^1DbrBP@duQ~Pbk*l_q^y=cZo+7+P(Y6fb| z$PNRWxtl_ltL!-oHaaDcpI23-bH~fqQ>|y6UUA?Oy~Y!Bo*c)K@Zrq%$pEGXyiKxg zhRo~oI0UIk-5*;-5~Ambk~h~^gKP>-A#bYuHFiO{GCm*PRvFSm!#* znrNc36!A-mO)6~E7t7Y75akU!p0nN^#DQnNBd1{t1b-}usua#JxV@RJMAT}=3A+xJ zJl;o0VRobqu#{>O$5W695Eo*w6I&lKz2&y84{d-vAg$tc3m9Se2u7KQe?=;b|N2p! zUU~i1Z`c`J@03?eB0v1#^S_`Gki7jWzFwY+y;dQ{cjOh=A~eeKdQhI;#f-8h7IK(`&K5Pga{7WK>Q#oyNKnWv zF7e?m7`_w|sUL!Hc7_k%nZ7gqv^;I}kP@J)x=~VbWA@bDx3$q!x;qX9Jf}P+L*EW& zCUs(x5N~yBQ;>&v{;Cj8b}+h@+uQbV1qS_3f=c9I588W3pBww8dgSkgy;+`p{*TWD z#Xq8#Z=T_-(VH@VjUqMOO|T;lfZ8?{;*bSQf!1vT_;6?-L-4sX-L8>ZS)BCZe2{oR zpt700spp2Zfz5TdFi_I(BwvWQ>>uO58V4MIQJdW=3uRU#XI(S6e!{`!hX#q-!VTIOS|(QR|fPxsJCr+tZ)9jJ^(q-WrdAkz=Yjdp{(8@J`UU zjS+<~aWss`@=R*IOVnIeS*#HBlt6yA95Qu;3Vv=p3?$V5gf)a{^wX0^J4p@|?x~C; zTew`DH|+=EsS494@JEq_a6MdA=P8|(zBz4+bSfFTh?EGBOL3D{eh zVQ7?P+K3ud7)!Uo?92bRpM_^c&87HbOaqM1ATK$QU?*B_(#94Zw~r`}f(S_HLn?!F zkWCH{TiXSbp7*5t1Yck!+j`}tr6GM}31Cy>8w=OCB3VF@#;7uVm*5AsXHcktzDYxe z2;4o{Vna+i?M04s5gF=4lYSyMX9ux>VV1ON)08^>V!yhEiA9Oy9meIL5rBe4{&Y|x zzb@0C*`^2{k@aF4c5n@C0}_h^ip%n(jrmS$EeKZVBk_@~mD2OcJ!5ZjsUGhYB%mbC0lU1I^7pZWN< zECT(>bUycZdF`C195Lc}U=VNrg!~$TB2%BY$~SDQa)B4^hfVH^_c3B8N8>bctH$?T z+tGFX*$}|YAy6L#u`h_T8Rk5wJrZDJd-=`cIfLmbonXAxJYhF9Vy(T8Qv~X9-nI<> zEoFmmq7antNLsH?+hSPIqsvNRsE86^b)57|(t5!+$``U^JDj@$mL(OzWV9paVcDg3 zY;8E9h*TpTk%(yBUF8SxF5B{#%M14W>blOri!PRn7viW?`5|Ew+7SOj%Ki&etO4Z` zr_T@$s!_b22@#t|p4YGxUCNA*AcR~8qEy079rcyB&pG!AKxQ1{*eG+TAAQ;$LWmJ8ZQ=YTuj$b=Qf8V0Rp5XzpU80Gx5Z{q6GYZ^!20 z>A0G>n_Fq|n%nuL@y4z4_S?n|idZ6?(M+*ZQO;#2b~J~oVQBUNJW++S?8Qh6`n1Hc z)RGn;%gIH2gBIjtOqL3oaw@5HF+w^w0%;vk^-FJ0TPaZILz4q?Qy;uvs<-+FV{M@m z9O?cBS{sJg%kppp6{#x;CfLMq9wuy^+#H4Wh!#C zm3<-b)ZBStK_g)bc+J^d?~mb+y81UpW2-kvQv8n2b7o}}Bt5~;9_rTNX+ zIS~-1QFaePpC(H<>pM!oWZcBFsT}Be-3}*L{cl?oXy9QW_BH@w7-7&h{o8isH-V9A z;0WAm8@NI+Azp&siCn&#T*u0Z8IV%B?e6&7kCKNpAw~cf!4L&VEg}a&a!p}osFZzJ zuFeD@W5SvfZ-j9+=-12IvaNCcYPl9`#Qi=>HE5g`{SV`1s?zFZn@mvCDjrQAKN|L> zbSwQYa#V?yz0A@?Hl}+&zn1HV=*RYqQIrf4&VsT^9?n5Tdbthd$Mnvdh5v4WhQu~W z8UJTJ<1g=he#2ZHqN!&S8+p95XZ7g)+bcZE=tO6@+xG7`myQpV5FffNbFFN&+b4ugs9_#b}f^Y2uUeXO|Fy6#K}FdJ#k zm*>tKFAzD8j}8xXUJ{{puVgYzZpUS<>PsQhtkFqE<3e_Q*50e-$#^EP0doIR#zq#h zgS})xRY4ZqO~yAM`tp)Orq=vx|Ms8!Q{vW7#ieV;fZNJYAQBB?&17U9l54?xiR~}b zB`y}nP_5}NgZiW^Fd=f=6`-3C7Gu#0P|NX%hX{Q18{;5uAbT42ohZsK zVcqAZo~x`R?;7t?5o9}H<-lDLqUk7l`Ovmbo1?PUv3JAMVkG-xl~k$U1!-(D80D_& z1!8@TFER*n)v+JUp+qjuX#egO8-elct2GkNQd8vZ9;Ux=65B3A9zz1yMOiG}yY^)) z4012H@FARNETgH6)dUQZW1k)(-{Q6CDl{%SGtK~mPe-NEimM(B$nh}|O9!4H?Q4r=H7>>>)iyEIO3;$)u^si+g|<02 zfZ3?LpOTjGo)*q2xpo4b*cb6^%0!KJ|9ok0QdlNNX`$FXxErYP3 zay{@43DcQKs%*5|PAD0uB)pxSkWzOX&-C!e4?Y5pnwCa`wZJ|tVU{?8XKf>Zub=^_ zSxx4Vazw3`9l#p4JAWVCHKw7^I~nF1rFAtpbx3$*d@Ky700~Bw8a$Y~DW|DlKZpYm z!lIBHcm1_bj6Dxg0tQAPE%rs#Q1aF1>-8mMVeCWLIKC}MTVN(%%6A|92uy#xozSBk z-qF4hb-8+b_Ic%`NOi@_g-5;94YoKOpVU~_Xor-G(M&E5c$IKnNj`W@`qNK8eXHod z;<}76?yxLV1RVq;cQy>DicD%c+{Z74EF91%;dnQdlv5~vNH=WT4({H}z)K}AVX;7~ zvsQGXGV39xHfzL0;?|?h6+wq>B0lsRyp4 zcI<^{u{BzXb=*a_2J&7~BNcE`;8m>-!{REZ(?`3~+9@|xOkZg^eay=Qo>l`sL zMX!W-_u5m=sUsRB$A)to9u2Q96~P335P(jK^Yvb0BGinw~8)m3dDj~OG%Zc`$Jlk)LT4*AYG0xpjpUq z=idl3BiU(O(d5~~gqoh-dbK=ZC#y`8ZHSN(K~ei2G2q%-CJ1?m-J*7-=>)076Xw8k z5j=uSgpGiR2h)u8+4yOGBSu!P5Y4f_&?StYYys7%oCzj_3#Gsd6GyTp!sRD9kvok=?z59_V{;N@JP?`;Pn@ePJ%255dzL>;gNMCz|7^9WE1y50@VJQcxmT zl;!`1PXmq(3{hOng}wVCUh4vPQtzfij^MxY64=M$aV%PN-U7m z9txkE6+~;xeD0;*8E3t8+xES(d%W*g zSyt?*7X%Z4+J!f4q3Wk{CoD5itjt&#k;vBjf*0-r?1rAGS$hv(1|}AZHV&KLDZP8m{kAUC>d>=}}gGLh;Z$3u`4dMVUy%#i{&3 zBJKN8BbIXt8(5ICXZB(wqTI5Q)Zz;FiKeh{-o72ILKJTqvCoxPU$eb5VQg`7G(*6+ zN5r7=lLv?S2zlt%ho-c3KYahePqA?0v0r}N-lrs2^7NEE!Bnx}r)quq=XWal{RRJg z7DocpCKL&zLRfE~B&TVitleC-*X$xV+{YKjCI;VQ%uz?k7j36X3Il~g4fj3tM@l{? zJlJ7h-8pPW^!P{>p^6#&+TN^|wg1U(R~;LNjHXmArs}9+#)DsKyz6_xP#CS;YO1I> z!tmUwSi`+fR&@uo2X>7S8Vf|$y&XBH)pYx`byC=TIkvfiA*XR-p$?fLi>Ykao(!W5 zqBNC+KxEX{12BLY)4KsbHT)a@Isa1g>Iy-aUU+gA%d zrJ66C4Do}?Hs0Z%pV`;tDLSByPy9i>g2g(yHVSB^665K@QyH)L1sN`*8^?5gZW(N= zNg?5(WKIP8Nm0d5tC-KRv5uhm6qk-&ib*6-=(BG7LGnp8#{+0HZ zQ;H@arma(#uL_)aZVm3*j}L$H;0I;;``|v6>SCkoJ3&ijBsu2hN%8BwuJX-{uF&KP zdqkZMtMJJo_R?N1(qc8v9fMcSm23a!fMeSF=vab zp6nBJ@&hHMxxP-$CO!kORCYa-jomG3&Sc1WNkI8tZHaQzvH(49hrvX6685$g+@Jt~XtC{5>A<9X$=< zKa>MJ@FI~^#M~|uv%_~vvVRz*0&+5FQ~R9HfR~mCfZDF;j(fpYjI*HW)wX?U-bma~ zGM5ZXSxm@AGVgY9HDXSL$JX>n3<8m7nH47xYcPhvcvpu<5OX$12C>wKz?Qj4#PhL# zQXu*%+lC_Iw_P%^DQYT+nanba;^|4UYgpxsp}XBx%gPiWXgrR)aF6@ffUdkbuS{bi zQzMTe5fLOfVw6S%3Ds#AyIxb82~-0nn>y-QgT-xIsReD%#s}K!H9|1a^T(9#T}l_6 zu+6}OY&5q|C|(Y*vM}rxC%9bO@W%7Sr1a^EgzilWkda}*T!TZUVUPB6Nf}K)Zl{LK z%_w3RAnqa;YjC7MAO922!tddAa`NreNi3!tBB&ae9ZoI)9x6*+n>YGAUfzEFt#etA zE1yN|AEPf4X4qY+6)<=h#qaD9RWy0hojyD%b1O?9ffl`*HkkUBk`Ex{x#aluY$4s@ zCRcgv({k3!E5ZBnsb;~UxKL5g@bCkAo@K(RF96CI$b=CI<#;cS5N{S~YC;G5B-g4d zGMi2_su<<)s9DzagJLf{$-wz`y0K5zgEm zu%qs~Ven;;I4=UMpTH+FhMdCks(jq*P~GaH`U`#AvAZK_+(6`_^hd2wJjSiwWzOO_ zzbSbSNHD3j?I9?7D)drCAHs<;UsPf8tau?38)w$_HcLf;9r!FCZb2Oe)8y2Qp_buJ zV}fPw;eKj-cm^*GSV0d!wy~GAXU%}vIgmNUGz+^yf?&i5uVB|})XIs??0wWNu3z7@ zm9*PsJwAID>-2C=`<_INRD^y0vGVqlZk{n;+Gb4iFZKdecXpCp3gYH@Tdw{F)g7R+ zh@f*99#c>1MnU2f|NP78yTt|jLwX!9Bn#;``M#&2%Y!&=b=dr%jr zJ*_nxrAv^_1eUzF-^V^>y8xn`IcFS3d=0z64D=fjl2xN#+b@9y+aCc_`Frks%4m)8 zpJx>sq26lu#0Ni)b=K=?=i9*&3o+3~%rXghwB=T99arHpj=K3?$&TD}yswVrsQJoVHQPXsO3V33V+ zbE(tzg?xY?_uw}sOF{gSN~PFLAsM@P7&1)^$i7rxc-uLKd$0UghG%TEtqN5}*lpLi0+*544SbyQA&j0R zACQMJjNx@T>CwQktqxl|N#|p>F{~D#eGsIlL}uoxbLH_T?M!w6#Wf(uAS+#?873qi zb$gbH*eN`A9W9)i4uQp@z%K>nC}7EYnlvX8B9F(WwM%1r4fmQpm}dhYSFL_{GTLzl~hDO~9iUZ?M%{hfkff~ocz z!8V8U;VhM4Au+yL;{H(1wN^Mh^tsQK~DYdWh+?I&oK9LYm);=OKIg@)%=Rh=S1+e#V_<}jOCEX z#IWg=nz)6I&UiOJwPTe4Tea`=_TMttINNu86K08%?M2!n8p7^&Q&mSbx@Kl=RIBl} zOW}Jb<-WZUa+9+_y~Hnw%q#6;he+`a{$2LQzw$)+lKr(F7k!h%`&9jiLv%cg#|T0$8N$rKP>i(hUu z?yT&+IMo|vY2B^~%_zci=swT*J2EM%`^P#qDwBtfGY9W3s z=rP9hhQq;e()^T<*cV7lu#}Kpjji~8T05Nr1P@HaY?aMqB-cTAX;#B+;iY2PoTL;0 zr{Vlbho{YWKiYY`pNb4XRbY}+9TTUgqNMo3HYlFEuFpM>m_!7UY&|{?KcI$!|Cjm2 z-q3NZc>k^{gi3I&uuGqi?f6=|%K6mY9dEolarRb03v|WONWx)Doo3aCGFCGuPRJjT z8&B>gC4ygKYV=-k9<$4E#>GBnTdzdhHlhHBImBpa2QSxI&-n8?i?O!$vjq{ z>r?+4++8neqq3j$-oUBFFj)TTp2A*v!@eme3Bl&bJrvq)&cd!}1-aVlNg9y%sTo20 zy4pe=a@T?Z78Fs-mN$X1#FgHE*7}<0cz(e%E_UbOuRzDCTy9w zJ?A_Jqn5KT0v-Twf%Lp$7&Mb zR(h8phb^t6Af=?Dkx!QGsy(U(a6L$j(V%cp)J+k@`hTVFVRaz1x-;e zE~F$r%WP9NI#!XA4b8=gk|7D0IrU#3<=MUv7fyE5t0wcaSb2^zUn4cHKDGDqaRyz_ znrYf)TucBGpQUjR7%Q-D1S~IK3KW6rd2`de9$kNFS9|DjLpp(fd8X3(3`>@rU?S}k zO;U=x(T2gO$MO>;vnmWtGOdpY~4_rGdTEvtc+|U1a5G9T-gIg5u;WE}OA+VTLWiACfRu z>W>P*xYWvI`K?pN4y68$->dx^V+@1;esR;YOB#gt*{WNbGub7=o3RTIe@-Oh!5{2Q z$vgkHESnnmQXcb$3j%OGrz40}0!5K?7feL?Fzuv(9eID;Cl%Q>(+C9ZZt&ZYQlrUg zsl7oQkvG5GDoZQ2dO6+H{0N~DrXuo$o^-6>V8DHj2X|^lA%cRnKEy@5Vhw8h5PZl< z9Cn8;xu}StxIX^8A^=`c4V{ZQ#tV&V$~HxjwVIHT-Z4RjQ@fKo(P91RgjuzJz2fDb z4QA^o-H`J7$W3V}dz)~((cwz~!cUC6zz;4M6_~np7K;skX=@TzL~KveQxfMlbQa)9^93q^a1~TlEsT6Vzw?N zitA0abrA-;V_#SK2aaK#I1<0(9+Uh(;M&=Do@b_&Dx1T42}skz={>v)>g`n zS?d4x>_5f+#m^IaHgSqD5_U3DQ@QQIK6xRKN{+xlACZZ$17baBGQ?&|5YSd~lUk{{ z>zhVIp<`gU^d&A0gIbde)D6hUz43r}1+23sF{aJdH&d z%XsDr^;q!?=GR;>D!)cEg1D4Oc?hlE>9SRgw5_Ol!>_p#he8ZbG_n`r(yOgV9P0f+ z4-a@7aF%hM>3=(_R{!?F=x$>QDF&M;%tvKWy7&})SQe1qP9QYJ9RYeX%*3DO)DD)+ zYUFykC}SKo7vV?>2Ha3Pm_cx3v3-#i@t~OCSwv4_DA+Ykd`~Y#gGZGF)rTtf|!5RP_{>Hd6>(~o1qxwHG;dy%}QtEMa4=qkU}nkt#loiV&dTp zMasyn_@$M_vO72y?82!F7lGAq4-mTf$KeD+!?js7x&z69ZSSJBN;o$+Uh}>w(_2X)84v23B z+=9!)WdIYWNJNn9n3Gd<)X3HJS&hGUF%3!2I2&L?wyw49IM*p%d?j9ajrW0!u)p%h z)rB}ME&>I~GHQ%SY#TzRECYEHwaT==!vlQ8lcy4u!z4Nh)et|3;Fu646;L9D7+dH~ zNRi@M8b1=xPp#`9Fph)r46zSNuxB4ZO%-}0e*~B~QFT|3R6vy1;{k^!swEm4Arw+^ zh6L3!!#z!@RRemny|ie%p;~_iT^<8YHZw}IgE5%77~aR{>EfM3u#hM8f8wUc-mAHS z4;;)&fVoSMtt#g@n;s$GT2133e?3}CcQp<;IZDesBcbm=bL_^6!G>GetKKD^8-JY_ zCewZ-C-_^c2-JtrUac40W3-(2*lh?`U~xZmlxLG^pwFO!Aam|RIuuTXP$szI+Qa- z9}XP1J?s|g)VrfNFBGAK(e4RBvcze^z94sKhnO`57qX5cvy%{t;vfml{Sz1@jOz)7 zR|79tX^KnU4Hg}NHS$BP9$o^)VfU%&+411O&Xu5N@I0ON-<%AodQmA3^Ha9 zwJXjS0tOYnX6GZ|fg7&`FcsCa{fV~*?u&p4@x%|&z&aG{`4Gnfds z0exj}q(+ClHS@4d05TiGM{R3JU73EGcE{#a&XCxY5^!;Yr_x1!bT>AJJrVMGO*SbG z^kITER^fb>9=1(0t8GA#4e=Il#$w^tP|0Sef-OtvFMcB5=f!I3t@>857%9$hNQsU7 zMiJakjH5Pk&p(M)TQ@@`UNsrQCjxjEf>a$d4xc&AH}#R6FQ5nVWdlc^3gFp<*6Wz@Y+dLe*h3jafoLetfV|81& zB*jBt)Ew-gad!UqGibr|^ou>Bf%4dYf@t^(NQ5S8!l{N7r_nN5yvDG%Wl!*HQD+~|+?0zR zgx5?KeDVmw!zp}&_>zDu#@x#2wA(>VrsSfm={jLTZV_hRHa?O!t=_+~={+n>ali<4 z=L}@$(rj1<(Mfv+d#57%0aj6)f?wL(uJae?WAeIn%ejdD>JXj7wt__gL=i>{>q8~| zb{=x+(ZsfV-JNZ205>CW-SjcR+j4lDyvJ*6!O{5r2M1}g875K=h=I23#9D|vpcBGF zWZ-6>IhR3R!X5hrPjX*sP$;aDDi0%%j%2hQEVm9k2US24)jpxb&3HdvOM;esW6WczsGb3{y-Kh8Jp{HzO zyay1OcS0gorjj~J&+UXpwG=b&eYI@Fm#?xadyi@Wp11E&0xX*{e(WrjKW)d4>P7Rb zpb*wKHqhXH{N@PV2ziogNk|$fGBlEp9SkrQc;5r4HG=qjJVxD!N%FaRz^KLO^n@vK zXfDf+@9$NxDYu@8{SiWOdvzPa?8pudcA=C`)?^MqYA}2w+)WNtb=k$ci(k--4#UZF zO|_uYXtUH5`bRjh=@O(uQOzDSYLnDBPEql7?6umxB$H+z3oU?694=*lp2sKB1p|1^y!MS zwdkLkLev(A%{J4vA`o3y$E}6ZiRDq}HZy4%!s3}E9#WgYA#jNrJ&I#qpU)*WH3Fwd zSPtdj393{mXd{p@F-SYMI-h*{6Z^Z+HVTC}qjv8IRuCbG>FL@`vw*8|IeZEzCeA3O z)C6i*SceM{i>4awhLIWAb*j4DR?Y8w#SLMBxPN6|+$00mO-AT4`x+kjKI*iitrYIq zCI(OIk{r#!8dG%6oo^YY!1>}F#vJVD-whGDp#DrMS8#IRECA)m2Ma_>&Y#g!aLdui z!5ic&h{dwd42Tubl=s@sB$(&DdQ(b|Q9FrXE5Hp*GU$AGAb+U+^gvr{Bcn!v>Df5%DP0PzqertkyY_~SiSs1V}a66O)m zx!=+7*cRIc@C3p*2L!KjXgT$_SzF1vK0#dKWToHjvW1dqsZV{OQ9Uo&OYOcIhe-%{?4)?M z*6gkDDL+i}D4Gr>>DpnSr|erFj81ni8@QWgxfAznWkke?4mzGLsAKDAAgym`?LUac zyr^`4#7=5wc{S`)PB{pM;wJ5{09$+%{ER|J8mSk6(^_pdavz#rx9&sl)QnKNy8;zP z4;1o>ZgnHD(6Fo`2@J2fMi9o11-ZiL5BElf)D=6dtFkX@XGQxBf(o$*m$>Mf6xu!- zJ>>p^FEp_xSJPVo;YOkRjA7<$11tLpO!mEOqb$f%j3RUfZ>ypuB!{9agKb;XSZQtn z6%%ICMhSCx_L6EGT(mC{3*(do)rfE;LbXnj#n=(LDE`oJB5yfBlLULHAv)~vUOaP8 zrV&_&pfH7g95PRf`)duk5a{taU{M?>;whHPSOB-H4++onN2Bc8^>S^|B%!+JDpi{L zEVV(F#mH~*7;y_bC1|VAjp^@6c_T>(0l{)l4-!sP$GG1G*~qbA)q50Q!FZ^ZwkHt+ zAHXEG!nI~jXxw6j9B7_lC)&{f6)uVW41VkjP5AKKfL^8BBU@bXL^MXCVjc7ffIkIA zTICL@6kCbbFN47;Nm%&V%jf=SRn||%Iv136c1T_=jCjA+M?84n$jwkqT^RduM4*(D z4B-H%?5xk1eS~=E(zVhFl9Dd2*j@y)ae#)ad4O^HU(2iC^+3w3zWtZ}38f#z-rypG zGO~?9h#g8H>|&7wRUs>VH8eW+`_p&hI-C_JM*GS)QmR@EFO)>IX;1_?`v!X_lpkTC zfe5#=!t^9`o-Z#B2^Z6@0T(P~{Fz;fec=uZau4NL@r8Cj1=oze5fv?#jz^J;U)!tk zkdL~w{R%m;jpPB#;L@D16K%~$9`Amtyk!SX5NPZ&jJrnu&tgBdt+gx8pn6-Q5%yeV zDy+d)gPrQB?A20FI~iG^C)p&f7ck;25Ey?<#)R!(;`BMMx7-Z-mCGyDw z6!Y-ykw;r8sJLiP1e(Di+P={;(O<7AoA@-Ia+8>F@w6kEL`Ls?nkYaf6d7($-coMU zA!If5NW?6-t%ILi6GHZK?7~3`v7H6zt7Q}y7Yz=0U<+7Y{0_i9CIq0stz_S9$rO_s zkhN?S?;o8Y6;m_C8(5ODK_;4Z4^m7*3XLFp`g|uo=d4JkNKUH|5Ks?Y(z$^6?FPJ1 zp0*nxe=h~3#z-83&1s;cE>lJPQso_#xXC((rs>ZT|9>A1|Ng)IBU!{=ukhEd_wzx+ z%*qETVYflgD%BimGl4=F1GW8f(BP5swYcXsoXIp>q^AiXNf1oz&%f(xdVVDmcgQC? zT$lpCTTWb5p{vHp9oojtL3MfLiG?$w0Ylw(CPyX4-qlJ&q8r8_m?tC;u~F>fN(8K} z+BVKTR-XP!sDSR4qKo)J?ndH|;j=~=Jw_;Png~63=mz8Gul0t{> z{_57m=u%aLq6PNl4z_gyp7dq<)>{#414SWK2s7B1s%K*dek`*KSs!Ob`vQt8nI94A zS_+}gGq#1`h$%W3z1S2F;U!eTu)O%<;Si5m#<-E0lQ}HcjDzS4^-QqiodzwZ{n-cb zQd|KF;tT-T9u9m`VrV$SM*CFFM=XW#CZR|r6W#7ROE5bhs?KGUvNy+Sm71J~8j383 zww`73DfaJFPJ2B^Q{Rh^7eY`LGb*yyNGAPE9CRJ-cjqItQhw-g&mR0t(C6w zGGmj3B*|b96HR8#(=>$uX=;z}@_|UnfP>(Ok|nr}Q%PJdM|zbFLKD~n-oUpF&lUmI z6wC%q#bJJ+)6*Mx_4ZvEHXB~?kNhcJ^M!a2yiv0Q3U@_Oh8h-uw*%K2=X7{5pt;|{ zkW7wU95{pTOX?Weua?$)%HCe((${V>acTCLxCWEhFVxtUJlAnbGzde1ptK_ZfC$z# zW1aBG)`(#+9o=ZbEv4ASX+flb9H{Z@x$=K}H5QMWtQ7(A8x{Y7OUN#mLIAoA zwwwjjwpurtdYz96G5{<1Lp+T+n5-sRZ=4jxZD&T zxl2Di1z&&J@%4%jW_v{#t+1kHc)M#Rl}P~6HZ&n=phhD0GZ9hs-FI=(6n97W75zL& zO(W{PGiIwpG2S!zL_bj?a5U8o;YYvP8BzGB{MkHonWfIt+Cpm=ZfrGPY*ZmVz)Xg6A~ziUVAPLDGhAo}e|8V8BGe#2fFL zVAw1-gC}Lh!Xc5z)u38i%?pn-xL3hnZ8wc*rqnheBwnF(KK-4bsm9mDidABU?6d6y zd~mxNMy}T6N1?Z@W=BzHdn%$-lM2Wf+hNq~h0&m;m`;Ca>xKuol9h@cELv7C-DHj} z)I=yE-no4fxnxURn3+OEg0$~*ch#kWdeDTVkcX-sEXY8b_odCZTY*zmN|_3aCLuJgZaI7L2Qdg2wo(^T9pyWaSbsZVh^3ro1pbST8X z=QQT7m#fC-kCo^ur)F3749v@@>$!x<5+YT`HTQ;V7m#F%@Vy}&mY2d8u>g!qwm`eG z_!UqHauam(j(wvk$7xpG%MI%`04XCgM~HW0Ygf^CobT|UiE_kUvh-2 zhU`O#c)aTMw~EKB<|S~r4#_O+&>m`I)5t*6I_O^wq(V_HS?0%;_FI8JshbGEhw z3b0*v!J^Xu%N{^e0s8XTvvu(kcfW*4jx%THNX)p|3d`2On|5bUVBE}}+nHG9uCLJA zB$I>uz!aN(8Y$1u7-xXNR4Cz#*<)H~?%@aAl0;w9f0(GW!`ow2lWh&8AX(|SokY=b z2h(x&het=81Sk_<13j#an6+Wb`1oWm4T@8v6NHZPn(Vad~%Z^j@ zf+TniJ^ZCb8&CiKSmHnTUu&!~j!I5307~OoQiAG&Wv8@M_8n$QfeivWu3!skri}Q> zILr-+QK%!)Y%1+*dBWtvV81p`>bqYd0ydE>4`ivFCUs-<9E{zM8xUNrOX3V}BI{M= zV#{W1wZntdwsS86gaU=3-d$iB0GeYm8lW zIe*30aD$<+u|ni09cv>LszfM+j}8fkL=f(T*hQzv2mC_N@2rY+rnE!Q6G#-n?X|ng zsK%?;)EN&!YWvwnUHLU6TbCmKhzYJ?Fb5q~q@V15n1dDA0(A+SroFB=``JH|E z%+n4uw^p~1N(?AUL!Zg@4myc%)n+%&a~mP%rrEzKU$>*h9C8BU^gJD}CF*+@-nX3+ z-zJ~d_nj*OuMBEjA~K>F-?ClowaR}GV~>wCLqOGt+}?kM_^Ms6gL2=#3D0O;b4V`= ztN>ArJ#sD%aV&nMK$~X7(1e5=l`6S<0BP)v0ZN(b(ieia+d63%p zvUW5YBLZN|ex&W(&j6I#8D1@m@kXk#)uTOn2&o+F+qOD-?zo+xr73-E`;gO;XKbi3 zto@A!(Wqbhsb1tOwwl{&2C?IRTqgJsaN>+G zq6r|olLktZMa0EKDHFyF7VJnqBYfhK#Q9J}OxCk65k=hXG4L_=2EI+1a;R=niI`jt zEAp0!{fW@ae)2}X8G{7Y6rmNS!QKQ7@bXW*hEjopc4`_5Duug_&Lci=bQ?)Hs)?KZ z0=gmM(hL@z`o$}DV?o_F#a^}6%N98nC^4QOl}}{bI0fqfF7jy13+3D``y%f3VTxmH z!!)6xM*ys=ws2Ll+-5TFvT-%#ACCRv68A^r6XIClF715O+XO1pfFMXj>;&9V9LD@E z-{4LJo5-!na~~U{@cYw`j1zem(I4AFR~p5fzv=0jJ8eZ9@nUvD_9JMrvC7yV;~O0% zHMJ@_x82QemT$(V7lG9HD|zGC6%Mg>1c=A9b-gjTj08lcI8L?Kw+px4w00%1w~CKxc=(^Eza(e>;K#uqq}xGU zIahPXqC*YIGes9&uHa@+DsB%(QzkI;7u<2ek@qtb5KD>&ekKbLzTW&IB zCUm4}a;Nk*cxc%F#H@icRLzDDgAgc_i8su9qr6Octp`lnWq!d{&Jn^?_LCyb^Vi}jYUQ%`_?ee$703Uh z|Mj9|TOp2A3hXw+PZUP5Iv6@1{Brs?&3~8tgAt|H0>vI-UQ+9(?5aXN!2+biPX@r( z`CutdO{7aIB&_i!@9f+SUXCY7%_J*;#=0n9WVOQ~j%0^=q5>(UbYx_-5XJ)mUEQ9+ zJUkH=JlLk`Y?8z6Txy=_Zh|}-vcMtVw*kFVSw8YIN{zDXdIw(|jS8&DTkC4@SzLgo z{b*Peg4c+uZesLscd7iIBqNDvq{gd+_bF55dM9{9z*IXT5p&ZO?7y4V|0~V^O#fYQ z5`9+!!!9+i(uzczE!3teDQD?bk(d$h<+6rl6jr06Q8`gXo3tlgw-JB8iG7Ah7{LxXK8e0kfRgt01& zPyC3QKS+tQMXGkPdhDM-X|k_Ji5FW(2$7tTsLI9T9>SNTs;G7DIZxxJAS=bR$wqAGZ)Gu5?MDT+guF zAH}9{N|B$wCJv5hPqc#D)AF&=Va${lYyq5vDlD%7SEzT zWkR3yp>f8*yoBRDOr}>;#M-q7Z;;jO#AcS@Uq&a}UjhE+z!&3x1-Woid`OSJoG{~# zB=ycfPRc+yo}|teRhkyp2*ecbcK9VtPu(O=5KOY8qD{;OlRdpeRO8~uZqqb#+uyU* zNgI&5v8{C4<>l-41CY~v{fUpUvshLG#L017xj^LKgXl~2OR}fE@Kkx>DWjqFJNhJU z@>waIj00nCd{T?ZlmTIDUuRpKx*9~?P(YR8JzT3exOD^yVk+gN5N_Np zdaR|allr$1o;>qZ>=cC%@yKD9h9mLVIkF-rfWhpH)u5rGu!7f+^jps7yJ&Gd%Kz(M zQ!*$D&8`6$Q}IVKJZRY{9U0?909dCiYXd*2u_p-6vx={Y_QRh}Nfo^l7j^|Q5U`8Y z?C^Z?j(tOZ5Hyc(bR+{}2)Nq62jE5%F#s7G5vHeal|Qy0>ckcM;qkAQ7jA{8PhE|? z65@L^mALo|0=I;woErer3sAV8|7dI%y{DSt2qjl+&uHjJc#Br_K4Vl_NTNAtJTEck zFx1vR0_Uq@0*6%S8x<2dBj`2XIOx>^xdSxP_BlPJJ+ZMm?jwg4>#cZV0l0#?WZZMi znQZLu%QQJ&(XVVb}ZjgetO;WZ63#lWf z-vAIcm*U&C?;lCQ)SlduXmEzDxr2T?^29d!S9x+3FWQo2bKyb;sc*vi{n0&pzy_Kx z1Lg%NMc*eaI*(XYwAjuTlOeotqMS{AVXE{%I=N#iUw7=yXQ3(WVj{>pJ$<5lE4Iwk zr8i^+Ipbm?96IRzEheglkvw8R6Npa&m2n!F2;|VhD3A+Q$2phevft)*e5wYU2mkS3 zmqIoWi0wx{{DZ$KPliCJ<=>Fzk-F+mB1%_wJwc*E_nhjbY6hPu3~G>SzX42RVsG~Z zC_r_of@bQ2B_0CIWeXt#M5JnzSYnj`!S-m+?W6NFR}2p05nq5kOyIGB_Z^|t%`sOn*Ii zyKj81JXjJHM2@>&2TqaXFZGYAC4XsGNhbSp(r_{?xt)$vUAkJ@b}jdnW$dTQ9?q1b z`}73oMCuJN$#m4o4Hi|2rx1@ zFZb%C2zw=k%#WMyv`(3aA5K4;Y1_L{5g*iHyryBoX)#M2IqZos+po-SZpx~7NOoqx1Ebt@j+5-!}E z{?!n?tpMfeaiqf`nEVh}Bs*uPoRRRx&~q9cfJ!_ovb3l1iCyFB7ryYH^1=(Y1InkO zdlbQ`d!gR(FtB1^!C-BwEj+V^^NS6eu#N(f0p9JWfw0NAAdQiEnYZ2ZLlq;`$-w zdqWRJa7+|v1v?qeDd{mO&wfH3AfXk*qS3u`C6=&kGNWJZN9BJcxKBUNwP1XDzpF2y zn{XIU6LWp{%bP|R40?g0xR?DDtf_|u{5>rhh8_zRZAV=J@$q-MHC6HLSwVFeOF`nn zJzb!BeT|No(jS#+3Q(%4>x}JBP@K!_k8ERVJA#Iy%Te(Fn~<}Y&&SCk+{He2Mr=_f zj~;xN5_0fZQ%jbIaZsPjeGlnHsO-HW_!{wpU&c%BVqp@4wU?09tN1me&G zrSS+U1ZRHw8K;)e1lfR-L*O_*r7tzJD=4hGMLO4qFIQ_^;~Vs6lF5`5H$u09WRr>6 zzQ>XeWSbfrspRWI0+Q34A8j9OhAJS9WZP&^r8(_iR@Uv0L*-P4saGBNX2d&Axic+n zS~l0i)LDZ`xtEga#*w&pNIgAk==C{GOB}ia#CFv|T+I_rLY<6rlA~5wAK)tDz1o2( z<)MBB$+-jvB#B@rrcRZq#nWZ z`4EFqfHht~&Ivz;(a3?1K3BeN2Z3>|c3m^o#n))(jv((R$<;ribz(BxcCaM|ZdtDK z;kY0;YfvyYgE_+s?s$$s*@c*2jerk#vZwUGzHLk~^OkRnD1kzBMgo2;_svM4vt7-? zL2ig^ydW%EMaIPa!l!O};Ki?cgzb+;qy2H~%pd#8Kk%RO5B&Wrw#0}3wM@MQ_92qW zcSiCa64^dwUBCWToETwa>=Y;wT#VG*1EL#xuT6jNAn{dVMclwdbTA=cZi}mq#Wvu( z_J)y0fy4Pt8bjL}gci-k?s{UZ$U8EGa3iP!lsry-TOl#`$hLYVslxrykr*nPI-xm1 zJqHig-iy3HO1P2j)zXY0l!9DPAOto!3z7*`PpD-;oG0$r2@sy(&aaopz834;t#MzY zgG_CDc^`X%Sob{keKrCf0pk`*Paq11FIA9j9FuHWPA(R`f@O#Fu-DP!pkdK?1fg-T1*C0sgH7Ryd}?fJE-V!-;)T*;_HMccjSd z5#KSgAlrm^a5>9`PSM1TfEre$ESyOy$7C|d@QY@1e6u96FSoc4a)X`4>44S78-TOp z@adJPtQ+iMA5%aX^Go<6d&^>(cSIJ|gQv8Yeeg5;JaFMAO&W);j+;hL^-Nv~ij!Wz z3YF(~^8J6EA($8l#72KrrtjNPlg`Cb^^@n+*tm;nYy*Nz?>Y{n@o;j_`Ku^Xq#g0; zeD61wjLMX)xc&H2Sug^s74iG=4RUJF**6%I;J&(SOpex0h1VNe)ILr3fCPP{5OM4a z>4Kw-=!0BWw?dye|4g}TkH8mqwietom0c4krkGpNN8`t9#e)#^wg~t=2<0@)P8D9> zHnVb=4P=+*G2;d%G45J6V1DmB2_fpUStT5`0(p;`|%;8?a z;5YGr;iTKKn=CQnt9#paO*?4TL#i^29l#F#81ST#h5>!+z9rXdL)Roc2!qNuTs!61REm!bUUiBW~-PUnNpd9Tv9(R>A2^k4DPK+;JXKZy)ltJWoG-1+3 zD3T_lm=zd@5$pu-m6n~LoP)`JBnOFskB)kjiq^sb`&0u?w4Y!cgqof2O+l|w5VkT! zwPIzx>T2wb+;~#dbA8DkML(iec_n1MoHc?5h=i1aXa;e)vU`MbDm!>sy6WS;E1p}g zm8WbeJLNEy-Z53H>)C-DGXbd@dIk4xdlD2L3M9x4Zmu&ETfmsQW2*)^t=6i)Q?bt{ z!NXVyBXGI#i)I9O^17~;>#O!>l40n0$Y_+=W-@FZ+XaIqxuakOkwV+(Ivw$i<_geO zXEOKXxiOG~5S2gV2XTC3yIX|E^K1k`9E2&RsMuDB35r)mUO9bBA+&y+h0K`QOE@oB zL_C2!s|q>e2|~Vaxu;RB+-2LynFAq!s~M3P8gh>aBUA&-PC!n+P`QHYxx>M2=^n%_ z-DGK7H8z+-{Y3hyUnF~x!8JS#vl7Ukokd5%b$1(YB6bs}9FK6PlRZmi3ZJGxHC9KP zI(ek3QVwEmE56CzOax;-)5RnOC`o-GDD8-q!wf>!Cbq*QV=>-{U0 zO#E#_@oJa#!f5Y?bq682{R+RwIb2un7G>zswukRbX#)F!E&VKB^*_7(l;rItz z$7AKmTR{PhNI-$5vhm|x^l}Y`#LG!9!8VCdvLe=&7wqNaZGTRoT>63vtWYnhUS&;F zRBRFJpyAw7`hcE$cFbxuxIQ05R#h9J1!F$9k5t`}M}*8b!qT3(PScYl?pj<_do_yG zduY&5N(#bDX%`yp|*IhB{#V&-oQQ+(Y z1-Jx@?vh;W?TDxBg$Xy-%zKw`;`40P(T zSNt{=z!~!iL9or$TRkpIeFBM%c%sOV z+ex=4((`@lf1CkG(+Z%9Ic%=j z=YanFQ6Gvr+G2KQiptW0??P}dFmELks-CgQ}9w3H)drP(3M!^a+7G z>8^PGd>qk$Qb|C82}gOz9u+UPMGHT{#-lMU+5ZTHt5*nyNjlDDd{T^|`1%Gu zGD<&|OnX=2#xtUr;$Z5EbgSZf>+lvW225=%EtgkpkFp~pT&t#}=l;mP*z^^c$!=dP zmoD04I;4K%1UD#d>Uq`OsC44wXjB!(dINLzBVZuGhq0D2t-bkANYhq+evdtqSHeoqkvMtmmh81sVQy`a+$9$5^;5Z~^|lEeD1`r0Ul zPvhcJ;o^aNfduQ`&yB=;gI%=>N`^jDktLq*aK}yD+~a4iY&GBFUiF1FPI#e3?l9g< zES4l0mmq$o7a5x|BGOo6n2de=uH&*}(O zgn&TmN2*qBRyc&@xY14rmNA-jXe4cGBTajlB5hT{V-v;wmP)H}Q2X|1H+N|jjz2XU zsR$M(aAg}oWJgIjlXnR+2D?4{UU~4t*cd@gIgI15A_+3|q`2kG27Zeop9Ak!MaU0N z*2v#(W)r*JQLUEAer#!Ls0azBh)J1SU%K_BqW?0gbkHRpAnmME7Hhlz3SisNUc|WY zcHnpE3HwNEqolYW<mT?3&9hW-xus~0D zIx^=h;=0h;_iazuQ&^n4-L&sx*i|7 zgM%OlYLlH&x2pZcA;ToOk;+op?8K3yQkYsE!14`^I~jJ3I_elK5Y#kX!jCls4cOEh#n`U4)cyj+g;Z=@`MNJ zoLMAqm{G}NhRZ>dy*%QuBV#=s5)rpi&vtjMv|7PR>|A*_v(H{GL%VEeY~qHy3OVwu zJs%M1hS>s^y<7c?6&$BwE_eA^MYwi$j3T4!i!JJL&>;3=L5*`g;2T)?_BYCl$_Wip zgU1k4>L-@sH4L&O0Buj+rp=Qb{5*XFD7uTg)>Bp+%g(XnH6rc_#fN`YrXLxtzg52%k^=K%!MlIUoh zI@_B{Y2s;Q%IV@6tK-m*+)mb{oA&17nulu?Tch^DjzgG%Y&}aa>P!+)b zElmX?$xVLnsGBt_zCfR$by~j1SuU36pAUAqqon7DH!W89&ofvh^anxtzfp768l8{p zyUE(o1F1}p#6NVNr1}Zr=Y@Cf_T66Jq^68yh8@?#@6vDLJE~{nU*dW*CSHlvsDbzr z1E*g#wm_S2v|fo%A95KhaqR=@kFc^HA3zZ)f0@CGo66{@Wty*qzAVDS{c!s3gP$8C zkOYfXV9YzfEk5x3^->xC-*yNI3vJ=Nf64z%aYVbdXJLTY$z$ak_BVASF4Nx!fp$?t zqE8YD)cuaTVceFs7v9k~Hrs*pAq#B?)wexc7jYk)#kdMJ*e4HA7F$ryCq~)=)JDSnmxVgdZFDH}E3Qw);4+!OC?b|6 zpcWqhvLO-WCOR=B*Zxgm6)MXXs;*P)ThL)un;$KaFV#K+sBDF>s8-bZdv=K|YS|)GA zrV+~oE^_L7a}f|%+rInsBgM_{>FM>dIUgw#rM8&&LuZK^QcT$wRh6mHXOgGSUWg?i zah(%uAl+F-*h)@1cnzoExEyF&hf!~O`XcSn-Dsh$^ZB6kR+ZHLT5MU55+lMttyeB& zoT?k%30mw^C5+pZTz2kULLGKcM0Y%5zgtqySk4buAov4whAtDfX4>5Y0rSKvc8XBM z@ppi`n5(n;|G`Ig1{Bq7*pt{j64ABs9C-RJ&GRFRPJn$z>91e2dnfM~JI8Hz*yu`7 zZi&4?dE>ubcqrWs1h?!Aj|>tsM#R&3ZIaIAD&;?|SSZ(PZ`+t0efFc;T9SYHh5Zuj z^+AQE$xgRrl1H!cqF@*LJzTNB)}nd9W${-O&w~BL8NVBmJL?VhiET>Q1n<|lFKwE( z*gvAFc|wVUUC%f@LAtmrD(11bEvaxIS<{D1I4OP$D37mM0-SQAGSLl``hT1{u2!Jw zOPB3=;?P`&bVOKwf+J*nfLZFWAr3(H&rG_dPC1P8ec^@jSddM#7We(0A^dQUoFNfG z>>c)x&u`*{M5b3jyMrjd))9zmEYrDqJ&@NEW&b|V=*peRs z`WoEbU)$loSSIv=Gzw~Vbh7~4=pi})sDS;rL=S5M-S%ztM?xdgB_c)3n=Qyt;&GVnw z*^T`)r`57W$`U2k$zNJ^G$cNMAt6j4AyC=^LhO~W8{*isgWBp*BZYLh&7J0{nvr7=N~W&gQM!TWc$}anLUafa2w{QuYa;J2u^R z0Y?;|cQ+Tfg7J0$;!iNyzCttMnH-9WeJ!?@!Yt`-QbrSLsIq5UQg7+8d(Z^=e~LY3 zCe>>Z0qK)ZgNj2N#T>;^=b4^y);&;7sxkImymg-MK@#A#$q*r+OItV_y3CSmm*TN< zwDpbn`^~c0iPfm}sdF%G;Xy6+sf#c8(Ndw?da`VgT1K&d0^DhjiUQe$3`0)aRdtxu=oU;SO=qS#&6BUQ?6GQ1^9jr^ z^6%3+4a&B=NL4B_a|w7R?G3%rDw{BzM#PLPNM$K&D@;F=D0bPFR((!r6%vAegAk=# zk1Qr3sn`@MvLz7(q!&tMe|~U``p;wqqbkWRl0T2~FD-gy(D}oXCT+^OD{>$>0wjb-|I7d%N$v$>TR@4HCMci$e zLfmG=f|bPi=7Iv5jg70?7D`$5%xwnMTl_~kjnqD{iJnoFc&05sFC~4R*noWm6(v=J z2-qly9poBzhzHfjNN2DEd%q-_821!uoB4iJj#A%OL%q{{2Wz;HIk}~EuKX`g*<&A; zG^W7=#SJrRCFn5nfB}L)x_%tTK*W<7oJc&Pm~@BJ1u|libN*2wrP_X)_SLN{6#F_0 z!h#0@{zb~*$E)^*5PV>(g*!^)*TWOlj=u`Hi(k|f7*bo=z%F=@Ut<^7g=}j8)Ui-1g z0*fiDqb1=UWw+Zc}Vev9bxTQn)a%j3f$Wd8?sK~y%ayc8alJNlGzn;T4SCIXvkx-{rWCD z6ogapavOu{f&=Qh3L}bKEeD6EsQs#VN@g>3bRA@bMK*z$_cqeya%(B^Zm41ts!0v5 z$Eg(qxC^E2W-=6MjEPY+N_O?QDV_pcG4hg^Eq0K61yn=5MC_aq*+|{8w+AW!CeS}D zteV+TU8q=0Vui`YoyW6gP~%iRoxah+x@DJXg+sW&28b}sLkM>Qe``S8NXVt!NWlo9O?q4=aQq! zhBKVt5|J*W(g8L)NLJm$U7HZv1U~^LohvXXO)s2#-cH|>@6bx(^hYKN>h|C{%vHZf z#_gk2??6-%d&KIkjk0!Y!yZxl4BKE(JTM6CC(-62xWSR-kefoMpRh`^$e;~JoI6L6 zfXL!m%#o6FPnUDCbp5f6R)CI}`b`yWm>wp>Rv}+eq13t{K1Zz&X=DfPxZ0g0D2y~x zbdVK~uJ3>{gvmVJ@2VRh%9_{w7MxP6FKC`?eT_K&Y{b>!#<0 z*|3Tki_;aqP_i{a}B0 zcT+s@4J1F3`G_0E9mvu~y074ia+|UrvBxddngY$YwSvm>Yt;(;MJQ#ugh^E%P87Yj zOKq#%v)R{fsjt|=AQ$%>2?VDR_#Ur=Xn<`rN~xF)?}86G1?T`xz3JFYXQQ=I7Crx| z!gfi?O%!a_Hrdi*Eh)NcKaOD0V*S|YKq`x)^}2pPLd6z*{{dx3xh$zV`>mK$X=DitW%I+ zX4z$3TNT|0bQ-QM; zBSmYg%A+hF`z2KZ@LK7+;}DQTHj6=>ucj3p&sFWuc}PtyeaFm3b$OL>eOor0Z^(t+Jlg462Zno z9yRe4-IOWYgh^BR9@VCrfA}w{8-hxq78mrm&@0&ghk!;LeWb%ai#0%at`V1Hw_beF zUcbSZ+bOYMWgC?AXTo!UC_(3p;Yimi}18;h%D;lbIFkF}h~ zxWRgHK~;^d_$$~giYwNP`5h&%hO_~0x-GWcrYJW?Oz-%4sS*lp{E7&;Yqus0+kGsN z>D=)ZLgwbs{R(ezf5*~;5gcG1bIq6tfi`g~C$Xs!^;j9w*)FGv?KY*eOj6scG;&CQ zCff2%TT^T!S5b>)VIdve#&eu(+D9S6r-sj4!}35u6rUn?z`}Y6WetdL!mSI|D2Smj z0dKY;!fCkh0+(O*EcwOq1wS!Yua!$NJC3=yt}cf9-qkcP6b?oNKCW%c5VDsqH)G(6 zo*wpMq&T0AoaN?yJz+vvt&aH#e~>Wxe5X`>a|~{svhTiGijU-Ai^2wo_TYd`y#X=tA_GxAC0Sg|Q#O5oGF_I;{vC21r8x)XkMkj;)O$Z$bQpV~w(k z$|^lv+E|);Csbya6u1qg^1>63w9m3(StS6768zlqHXIBK;#mof$0R>44?{R7UEZNG zG2zi7&z>c}x{ifVZY|qvUuTsmHy0D>QtiQFBKDytAJI|s^!xUsd0Vl>*Y4psZpF1Jmpi6jJr%J(LjJ&224N%j)Y&KVfw+AM1iDAIAyb#5W}^E}IsIzr z*aK{;LoX7jtKMOzZR*}7uTaJgIFHODdlc!`t|&0|QYm6A8E%l&MjS!W6!V4k1x$io zw|C!xc+8=JhgoFA#h;|1-L@x@a%@AmmYO9~{sc#MoPgJA*OHFJ^i5wue2KIw^py(y zL==#~o?N`3g5!1qy4zIM z4o6muDYAVsFa_6ec5J+aS?Ca#;wa?!V_PL>2QPnnKqq$lhA=lgt~hG7eA?n1kk#tC z-!n~>yF?_tjw(4i4hEGHnSi}fj!keQCJ0n=Es0BRPKC7bjhP~%l`~V1xh+c1h(R`D z?o{dkrs@I@MCL~JXXpE$$ zs{@;{DVcRIg#y9GV044-+o3tcd_D{j9*&pZ90{ZZw}X0v}bl~N_Z)y|3XNPiWgd;lM+Ce6y$ z@jFHcK(tiHi+nLgCrb@{#)Mj~ba6`XL`wGngsD9+=M`QpX` zuD2@UnFNT0Z=WV1N9FhY;i6d0gxfz;K4S-f2rD$%WWP~vMwCmfbuy9KvTBKOXAcdU z)1{a&7ACsg!95V{oWOTq0f8OE1>P-AVCDVvxm~e!BCgDtiNQ%7rx2aGYF&+!K)x3G zA3d|@Hh{_qSyfBdXLB&rY7aPg&Oa=3P>OKwH0-aXjM0&5=qt8o(rWRSBWJ#-`I{ zqEd&7Aer3CLi4q_xloqu1$-Bp3N_1YZ50IvO|mbdB)8nCc!@OCD<)0ULx<@V!5JyC zxt15gK(;8J(`1usj<%B|MPgvtPnCq`3hu0TV_bRId{t-Z0gkrHp@f*NpHu(HOj(Iy%+^y!?dVn z5w7{tLfMG%u8Y>1KnAW>wI1Q&w(6&kpDACCEvMv)T9l`^Z{7vTKEkE35w1v6ZC7cs zb6^pT-=eyIZQCny3$4~{C8l5!`C4p^0ND5AH~>r~uc^ruc3yNJyE^nUEDwC6VMF6kU&3?vU zM&hGKN#!HODMwvgcCeBVHIuYOlTK8Y%BKvAF18RZSx)dj!0iD#vkjmnBwb#sA`B5x zQ!t#i*w#B`E&j@l2Ev`V0@Sv_4woR<{^)#!P*EHQFfRUv5t2^D4RotZqdc1f_x+g_ z))F38l!u_oOWN5pi#1lEMXL#!X?RB_Iz4%4h#=l@2hJNhW?L3#!u? zE58P=ST=WKjO2-8eFHYeXV%%C;b1F%u}PlV_|SnXc?hq(5~HS+?YfQnjhv;Y+=< zG>8dr1}sP$hx0H^T)+u3ur(gLO-Q0xG9Z*oJjz8IC&WN|3_ad3p-easKniNDlNdG5 z^|mRGGsV6xd$wX?kJuAhSWJs6H#SyR>^IyJ6$;C+rY2TIl6=JT-&YrqDRgB@2Me^0RMpO1LiAZW*Nm~(RB zp>Q1RuN^h6OBV`$2;QTe+Kb8+3{kP@>rX;IH!0*wBe#zY8jh<{v`r>nD+@7;YBI-a zA`hs^2xjRbRK}xy0aL&FhRz)96^~-7nn`8*9wiPY35_BPU6o=nF%wnZEYSh2?wOH>kR z#tSjZDF~592Xr235YlG5p1jHFUfi&GwD!8?VcfL%h#R&Ei|8+^PV`f}m59uXwjuPO z2#v(+NdOOd-|F@RVY)kSGZXl1Z1gZEX2Zg#QT$@qX9dd$A@{Hm!NR1|GJWlkn zu`0&N*r-r!6EBLicJ*4gc*UNkz*Ti%;`N6+G^J-$6alw2sUe7qCPS44OjH!J$s|s< z@Qew23z;)H2NoZ?VNO5}CI$2Y`|Fd()WC@i<2k}zlS8(|4!S8W?gMrHY$%vbgwVTF zy3N`_#8!8Inlpk29d(q6?|zepeO@%YTAeJjO}@^o=Eg#Ej)Tw_3Ch7$dR@`;?~k!I z^JO)$a%<9|n>Vr-QpJ(EA(*zW{rl8LgfRB8J&MDHpBuIVc+;8Pm2GVLtmziEFDX56 zeK8jB4!EUE%uvO0Q)gqRxcmwr=OVb0+=P3eCO3B+d8m!+r`ujXt*823183I~#@(>x zSM5~W9b8I;xJ+k)QVVcbdTi6IAjU^ z64L>^bjgMT^hUMK+W**orPGY5(Y+U}Hq^sR6>Th4n)3*CB%6m3O;z5xl#&>4yD0-G&O7q2V8z*;}J-$5iS4vshJjA)H zO=xWZ5{WA>+nWes()W6l5JlVElEj@p#f^BqernUPpd_T ziYJgXrCPsjrc#sZwSQwggZ2nQ%(rIfw+rjHA-OGdY^DRKmv}YtEgA<2Fq*`h7r_yT zAG`kIrU3wo?ZGmBIkZ`iKkFil-_VX?Ag?oivn;O!vUj}otPv?&7HhJFvA6Fq6#?8C zCsNKqd>{Q;9rEtTB=za?$Ye$UBW+8=8W&$UK`qN%-FYEfyttQC(p4xlG2QiYy=^by zYO{q@2nmPKe$>T8s3b;+)I!@6ifDHiI-RwurRbwN0hR&3a?A#_TsEzsBU4X+sX?`J z$Kh{dfXJK&x!M~UvmqBG!-O(UZM;}wWxBOxhg+#glmF48{Evrwd(-{#4#5}VJ&ZV; zV>_#M~%vtu5F?sk)-J@U{=q!nIb1P379Pa?zHuU)~J?S(gn+rSSO8G7R(koYGiO zj1HCt9p{EbyS({kd_#OjRxjbac4%jVDA#e*$U?Uo#GMHp?N7R1jtN;vX0oqKva6NU_&Y>?N3q`xAG*Z%=yxKE1tPOE~$PI z=aT~)!&D!oC?LaicK+!L@%Y{0ND3C~A)z2dXe7P12;>~NO$-MZ^UefANlV_zizpDd zW0q*DDyRS)YS2v&mI*E7m^hJ*em^o6$7k^%d%kK2zGM^S%wPb-(Dy}H23o@D`gbmy zWMp$f2vm~!)8sa!r63MLd@_WlUT$TJUT!wk*6q>?p}7r}MDB+T2QJ}>H+2v#+lHof z$Q<4q)~G2ypm29=$`VR-H?4Rr*4Z1z(AH{N`ebM9)yU__kLlR+h$dfn`e_?i&8f91 zU@`l(Fb|-+do*_VAJD97H~S(lwFq|Dg1{x>UrHOr`gG}033TDg zbDbbDO#+g!!>m<>%#QWz(NSVW&T+aPxs2rjjnWB~x`Ro|l@#0PPoFj2%&z z`|jbka}9Bvv(#}OQova3M$qSNzh~)eD)y~Cjw$0@tGR*g5FrwN3Q4D$#2!8|eGF_o zE30LG!5%;~exH6!tU>%n57qr*oAu-yryrb>>BK+cLw)cztRSE9Ujp4k+zWeGIcuCb z4rn$Y$^F&cl`jO)mDp22M+DAx3P*dzhE3<7i|l_(-|8O86u{!v+DqL+6n}Qu)f6s$~th)lFbkW<}|P zxDoLyP$uq$nMn3Aki+!QkB)L~vQx0iLEY!8O+;qpFwGheqcOtpz2brQ2J-dEwTvk5 zjT#0vxsu+RrYm%f`xz{9*bPxwhLFfA#p}Dck-{d!gIFocsnQ5XT#kuL{T3G5VB{VM z(msixbg}++UYxe{IJ}&381Z=r6KDx2sE`eKe(lR4S#k&iD_-BgEIxbWMZPI z3g<1VLSo(|tCpkQr1^6e03l#3i~p?x+qifgWH z*ft3E82i$nJ6F!x!w@=sHHWjvR>d4M)lAWt-xIuPJJgow;$2ln~cQP zac(i7wa@XNqW=rL5Mi$Oajn)Lb5YViDLuLrW5b5~qCTNhO-i3CZ^slknv^zA6lxK% zS@)D@H8{Yn1v8HKY=LZ|E~J!fa@bYe{-&a8^?}qr>N+3Z-;Dc%x&(yqalBK5>9LqJ z(x{H%D}(Qby>$t22Rmh{hGuM;2wf$ULRT@R92^vp-TCMp`z(I^B^w|2EHtG^EbZoy4Jv`S>q-&Y7@K|KfGQ#_5%QD@q)jUmAPmmY8_>|oGSh0 z;;$ICUQ3C4@)!IMTpu&jRzwnx*LsBVJxuWa0VNPNhq!f^ME0f^UnyQ7cD<~a&^`H> zu3;zN2)SZM29&>-_cU}d=5~~gW79dWJWYSX2PZT`{9ztaUlU=sT`TT}=|#Rh7-+Cw z-lgBsdfJp)r6h#21{mU^YpzNY8j(yz)UJp{}79 z+@KcWWG7@nm`ClZsx>u1!dJNy2Dp+FyFlSE7BbjIhu4cHAVM66hvWE;igKQ@q#vSl zgFZdjr_1;luIJ^3isSRzIwb|G@%;|unyVSn1ggO+TPVNbVe+j zSW*}|WzP#mU>eE{v_IpDA}!gu>l`EAU}J8FQ9wb~D3_7VnoZdvZfrl|_+Zc{ARjKR zYz0PXuTgzoq6{fCiGzcW9}^>du2UT=|9T?LvUT81u}NUhx>sWjD6Pu#bM22x)oLpI z#7pj?eTBa#M59G3RQJSje$?m$o5)VI=I%dGIU#t8SVTZ2JgoI$KP7~L4)3t2^+jQ^BYGhQ;@MNl%cdoBqT{Js`XhnGV3;>0 zi>w=qeQ8%QuWzS`u#K#p{aMdk^rW0uUMVky0wrxuHDt8!S(V>m9wBzz9OHMV@uk{u zNl^hH`g8-Uh1pMH6!7`kuR(kb;{e0+$K6*0Ik(DCIEprSQZHu<2s4?~ zI2nY%;@H3(oO-aRxyL*J&FUp0A(WYe&;2)tqrE89Mmc!*U-<2RF34@ak6^d`=D+gK zIDHHIiXbAUrd&Zlu7=FxDX4tD@5=T>vhhJI7uWca_m6%;WbM}`CN}}@a#6q)E+`Ih z+**q&ML?BQslIq4qK2l-r$8a#2t%{k5FG#=Loyo8|y%-&og=-vyNheu9g3u>I;N+Ji)NJRcz@a(Gz5@NmjK*9O`|87n*X(InU z|6Bg^mz((YFwYW1B32R*Nv=sWC*+paF(Jch>QR&s5b6|*h=Q55W=Q%R8I0^~7}s|Z z=1<ip-f9=P-T3T<@~l1k$%hz7pL{jO(j<{U zJ_#$4F>hhvaT=;0W9O*4^bcGFz$?@8*#qZ6TuB4YldBBkM43Z|0a3?$b^!^BcD6VD zvj7@w6COQX{>)yFB?B03KM|KnbxK}%q5Mf~7T6lvK;ySpV1W!BjtA@-A3pal)OcB= z#gS48B-)ypnfZ2k&(^nwd;^J6EvMi~3=gnzxsmL9H*)vur7<0IsT_`(W78K)F}<`N zxUjQi>c!;?S!Csgtqyfx$-1;X!Qmzyt{N*b%zLyEn}(c%xFX+GZek&cwVjEQJ@`7E z_JSHBkm)#+UKY^@B8wz6Lf+BNUe|4A%0s7Xn%3_d>#Os(+Dhh%7vsf}4No_ybU(Iw zO4g0f4ENQpQy`V?vMQNewn;Z>f}cuMp!5yr$j5Sl7>>d0Uz?1C3aU0}Og^X9AjNbOjzP}R zkKBIJ9&Eb9a^XqC$2@<*{=N+PF57I2+hxHf_*!|*{`K9AU3SX5DXfEy3}fIy8tOe@ zmmJL?VN>>(s3SkK4I`>BbneZ1mkpb`^?>ZFVy%6YX_p5!@7*%*xga$oeBx*;z|LXn zyaz3men}>9c(BB|u~7nRZ!T6OG;EE@RChk1=G}o??6lKUHz~h0#PAs(f&@v6+MP_h z&@RQbUrXl{Bb5lK{&hlq%2e(U7>P057s*)VArFtmFctoST9ttxwa)dsVggoUA?+|q)L zj6t#}sx=V{hUJ54qa?@KI6=Ftt;MF0r@Pom?3tV6tu3TKIWJ&97vmawovbuvlOd?_ zkR{xLzMnG`vrKd4oP@lr9ggd5koKQKomC-*k_2BzP7l_6$2m5*^;PV=FPic!|po+9Y&zy=4;H({TosWD@(5JWF|jNKYviruS` z+;NgbQM}n0;rV|45S)=EGnUV(=;6-}_k z=4L@h;}L~njUh@J{d(d*jwugfx^00lF|jWDQKicmp#8|*^J98y*)Gko5lQ!uQ3t)< zKC9?DfiU~*OP44U7Jqx`Dj9UP!F2G6pn(#>&{=r)_G#2M*md&KdrG1S>#Lorm<=gh z_ONRYzKtx}0BX}0`$1{z*X;SMh%nUHn>mbm?d=XJm!Zwhy3Qx~wvu8_e`Z>+p1rgC zXqmPr*GpT*jzJ^dfnrHc*Pa%XV~J2!6`>^QL>>EUj1`_G5oi*>Hi)%0`A9 zKYbA6!bqCHU5$C2UD+fg#k1PAx&`b8j%JL6LM+`$$`g}npK=-T)p-6oBtfbK+5xr1 zg~Jph77>>l+$)yolBhL!w1V+EbSQ;NW1y-YTkXzt!e9=jl#$(GA4lcjfMXq4K$}%J zYz#9ao=0h9IAyx6?49U)=+;qeMuBV8t-p`U=6lgUhHT;Ql_&q{B;}WiIaX9e)4^~q zEKFrHUAWM;32{g@ikVDs!z>n@|1m)KGE*C#(b{S|EtZRcOQeB~vQ zSYC7%p-Ll{Wg>*0uCSXMsHy*Fm@yB^oN0WymoHrjRoOb)K~~TNwo5pI)nqdfBdP(L zTnf>Syt3%IS?a~$uJS`}G8?q!8MC;WYfJreZ}rnPbN4L`g1HLFC4<@gk%tIC2kbj{GM33+*%%I=Ejba1&~9rYGc_s-1i2@ob6atYMF2(tI=({E@mHVlvCI@3Da(X79)5SKnwhMIjf?`$&)Z7R^Nf4xV3S6w#CYn1x_*EWj}#n^W6`K2!3o&& z(~2=kfo|J!b@IjSh$F=2oxw#hnQdS;nk~mK+OU>MIkbmxDbT0uck#~K9Ni&V&2g^a z##Mgr%-pcPlD4CpQ{}rPHBqH*BUb-9MZFkq-%&-i#>*vKD172kXqC3Km1DE}-IVOV zK|Iv1i_aB@G-?vC_az~&j|=8X90Tj;GdV2RSt&aa^1NBhP2yz5G#Zou z4Qne}xxjAR5IH zUbo?r=Nin3N7V0r$NnPk)^`F!5!9XwBi?};;P!MatORR<6|v+CQ$hF%>ss9?Hy091 zX0e7|{dM@oZj@9qdxUa8g1Imq4Y*cbzG7cfT+-b%F-m7VOqx)h#Vu{YsW5{oaa~Ny zAZI|UgpMa%vg5~&1XSt<8;e;TadV{_naXG-B)67IoTOx7>@A8O#d+sem9awc23stA z9=5Bp$n1&OzsVflhYi=WrRP*svhhdu_3GQA*Z@pynoqt%Gv$+C*;ka90uGe3r>IZE z%8h#w-EI(w!)ZzySU}l_p^ee7SB{-)0Q*4Jlh5jxs^l7*C#=}t< zn16TrcjfeNoB!Gv4=SOPT$Wg)Nt@OKO~4egX~+hq0=+%y z2|clY8Dl%Zo#XY%YNQJG2*v{sjoV-!e9BYY&O!sP#tRVG{q2|z&ITHR0`G_LYRm_z zrZJXrv4633-D0rus1081#bN+^WaTLjFh;CwVKq^V_gUYzPc3^j?%5{$s!+0~9=wSi zfW01;rYUfqmk>BSi0j?C#nPRX3xIOs>C`Fu4 zWja<$gvnrg=@wZ(HbS`@jQiVeY;Z92z&Q2b8dR&fmwCiu*-t$qj^I`mss>KEEtC^MXM&b1AS!dd=N)atZ6^zO!d?)bo0fcuIS zVum11*#DfVJWFl3QKKk7Qz1i0<52Mzlrc8~c` z_JV;t^b46_RjoZuRX3X^oc>zs(_Y(xKuNJd`uW6@wz`=9ssuOa$Hf7*I;{yB6eiqs z=V+nqxs`a_C^CB!P$ITajyI}kN!Rnd7J(UUjmR_V*~DtbOl&!JX=}sACcu}{E)|0M zk(reDO0rU5=-_aeybKOGVLnx=sW%X#y-IlacDZLqNUg(bfG!A|q{_)D+B9~`zgjz2-G~A!r6RN+I8jtgY?Y&S4FP9V# zuZibg99XP5Wfu6sZGEaU0sh9+Z`azPjI@xjcVFjB*+oKxxMqrCivCm%+8<0XaL}lN znNH$O40p#VN%Bc8IJsHQ^3K9Gp5S$eOgc^wbqUDaQ!J*XgH*CM3e7~Ij#6ecO)4(cQx!x`rl~ek z8~B@$K}rDv=+%mhF^9bx-*WXp@a@2nRjxYB0ok{~f`7j{?)33O4Ggb=MzyqG|oH@G^rhJi_FM@3l{*6r|W zMQqsD0R2Vj;*+NCUZo@#L(i((G+OrklGJ)b5-*+vvWamM~u z+5pQGQ$7O3y-S8tavq`7_M09=rk(Nas<)rP(`8DV=Erb9%WH{W5zmL1JjflI=7#cl zm8S2i@e9!xZKBCUWr-KJdql{j&8NEl48-fC_*Pa9c!w_7ueAegQy9u{9o!aP%o8;5z1vI&%*{l z=GFYhP5TLfqe@K3Gm2w0q}+ES(6+2sk=Ym(L6u6^j#X>w$UtLHc=5c>3K})0%2}Z_ zt($~5Da;#(kq7~&6Zx@swkR1MqkNNX3Jp|kWpC66asp!PF9Pji>CqJSZMDJ}II|tR zC^h;$ef-(-r!lo9Y%S0o_XW?Dx1X^Awa2Ap8?W3T!ZkLc?a>s(Co1M~pMvv!*QT!U zDf<@pEj2fEQ<(NbH!5T8_m!S(qoyAR3u0pTMS1V`gMb9*4t7B<<5d%a`*E>;6=(6d>sE|*jUmRO)PL%Pj-xF-B2 zb8;8T=2Xc(@mjPdZ3F0686d|QItA%r9G382N%1u%OjNjLbBbSM%TgE8#@bQ+#3Ze{ zxi&rggeP=*(CoM-$NauZw?ctq3TC!)NV_U8?j{*#Xafe-Z>x&GR@*~GKj?BiQlZQ z&%-fVm^WMM^)d|Ep%lvR%IP<4Y|5H-_)7e;Bp?SW_Q2igZe4Xvy>SRjT88mfsBTBv zu6Wh9k_y6)S?5hDxKI2+D5T9<)$MHc85BNTqWMu+Og&vD|AU)aaP7ssKzd;a@5o`d zFLP(1`}Vb#rf&`2%+d;TKkWj1kAL=ha}^9RFj?oCv2Z&Pb4c#Hx!S^gS+fE zrXUN>rYX)XbxYw<_A^MV2pj%r)oyZCR1xyX4qkY(tl6AdSoEO#(Sr7Qsq2?vIXevx%DfFzb~ME-Oa$9y&e_Dup5~6WPXgN`V+jVlqi9de2VAHYEJ|7& z0gK90LTZ*`w>HY1)%b}Z_bavvb8nT61^W|if6B^22+|S<2DYP{O{^m&&D?AfLgwmw zA9AQ2MHKMwq=o+&Fdq>Xn7g&JQcjJeiLjh%x3+Zg*;8n$j+VsGwUq`puDVRVSU|S~ zW^e~boJx3oGZ}_l$Qb@L#n7I)Ot51@?GP8nP8p5w#|toC{xbr~1ezUH*X$9f>MkWk zN-~Nk#2J8M!%NvDC5XD`wwuDeQua?%rRj3KmEQ1`x)%gP$lVD?yUoK`2YX!IhHmq6)U_>t%c#+3i;;+B{SCCORL&ah=JEUnI*kDOj*I zf-z1%;x#h4g@2Tn}@JdQK#7jxSy_QP+OjP%Y zJJJr{8>JQdraoPO`JtCKYA=&`lU0g{g;vt5uO!aLCc(YX4=t49sEZ>JTS<9O`42M_ z*f~b3=^bYau42m*kEVyANbigoEJ7!+B8Kwxxo66S7(0IN@n{gopq7EJ^+ROl2Pxs% z=TqX*2PYpBs*Y#M6HXT}wU@d|Ep~7F^zjnlOQo^zq%G~W(ByMel z(B_K;|J!o%o7gOF1?&ZFmd)ZO%xQZKDA1mw1<8)ii>&o$4 z`M%Os#2rY12O<%~C#0jeI!p?On^G>iU}8+UvlfgHT5%~IJ>+l&Ta+!297S;?Q?Fd` za``67z-h|>!3v1=eiIKWq_0idFw?hRpv@#xp(W_2aPnJQ=XJt7gA^q}d2J zw$@e~CrDT+LZ!=UCm9>qD|{i;?kqf9IdGr=c8DkRP*-5QrTY9mI>a6i^t)@}GhR%>jbN z;uCrd#C7_ieV070k`2-9SgoEHhJ)tWTk^p5NK)0c417ynpf=`$eTC5vorbnBek6qq0SN ziPkSp+#yepz@6U!+(h?m-mT>|vV!alZSo>W1@8L$0-6x zMi6HdABN31d2&m-IFkjk0s@_THB$bz+)vv@!K?C4yg*Sro)DNMzzUpaz&V=N4iG@=?=f$$jbYO}Yy!*m-elt;W*9-S>t z1#vQ08tKP?v1yA1JKeP@a?k-^s5EUN3*tPy8}sFhn1Ef{!Y4-7+1>uBqD@k)ftiDF z)#9oRu4#K9Yz4FFfgQMW<A(`P8JGYi>|61ZAt*s8WG zjnC=|itZ@uOVP53Iw7x^xT8jpcyaAwDve`Dy~^|mYIWa2W_!o@}~r#mqLsv_O*8)>84&})?&&ypX}Mj%FPNC1_OkXWfND5m)LLi zSzSPp$ffzaW2LJ<#VMP90{FNN4%f_vM7nLl;}pV>E~R~heO6@(F4+&KkD+qd7sX@0wH`8n88sX~AdiNB zaUG>g;BdGb%C{(nu=US_&_rDME2RFfTM9}i(L@i4e<{%--%Q%lY8i~z`gkfZxlDkkoYv%=n z<-6Sb_R=bD$Ywg}mBidb%iAPiZmig#QJ-+r#=Tfw;x#`~sQW|k0N=YxGjoGSKP}8h zJSUL?(&}_7v%ofyW+s$=u%8H8CI$h0rpSQ!vCZybNyiYXN-s8*64^}<%5RsOy1!!f z*tSBvY^u8?Ys5Ge7OxN(_;F}L%pvv-MP@Qkiz+x>y=a(|*=UJ=g`T$2MV0bD`X z)tq3YAlZ3q3j9unA&ZP&N=a^+?MeqXt?O+7xyU)OGbrEn(rVec0Nm}~w{_m);jsxF zsEr_mHZ(eeR@NyPc$=u4L3#ajVj=LfXD5H3ZsI4MdE%bP%>90)NQP? zQT8UHE`*qDTRGV}d=z~N;a7?0%;(?f9x#(5tm@djcBS9W?B~30N}lD>g%0bMzkxjp zN!d;FM_(cq=mkn^6zISt(H?y~b@sX4{aiSx4LIirLvRO2eS9AV=7r70XW;TD1a7 z|Cp@jXwSp_vR>MWX>gg2pRr<;eS@YrLu6fGSaI+H$2dR=F51cv$-8JHxB-b_*>=5y zubBJD;H0{Q*V{6|{5r)`5Tei+*ktMA;XI@>RQoWGy>3BuA@P6X3VV{KIt0cGEy3kKk4-GtL1qc z^3psAiSLTyYn+r-@t9Q>ieIuVRo%Cn|-?4}(B*rOSq|rB3a)Rzft#y-MK= z)rFg{KIySi_rczQaze~Nuw*SsKw$L4*P^s6grMX&S5{fQ6KvYA5ty@S zbI}Tdu_0DFUShI2Tg8$R*J^_ZniQL)ECDhtyw(|Q?`-wfP~Ren+FRJ*Z_;!GP&=iFxZ~>fYtPj zZ&B+cYHH)S(Z<7v4g8LY?N~AvN(3-jX9z7HhxGN-vlw1tPFihkL%G4psHGZht~E>| z$JkDbj_^1!10iz&g)poX$hiscEYUmGu_R#!g{mNC!(^L+&oMbZm2EKVHJg_G27nDf zMNETYm(?l18j-#s-M_?Zp@L-evu>_*d+DadNv4lwgFTD{1ZO-P6zt6PQpl+~H0 zU~ouD_pB_Ay?{f9<2n=USjT!{c%~>i6zFf8oUbgDx8h+FI4}yb+eek!?$PFH54^`) zt5;lloX!Y$^YAFFFO(5?#wJd1)j+o1)4m9kzErRRhJ&=NASXy4hMz|e+k&qSBY_M@ z7c6CP1jAz#Z-di^bFZLHeVa**T*7g>Q-xAf;3c#qfjn+rg7|PI_iWaXih>m{K)PP3 zN#b$5!g8Rf)mMWt3ct=fU=vrk6x#ySuCmRx5P(R!_H3i8uB19f{0vEKR&$F9Q@xfr zzjLbg-ot{7Us0}wQPh>`AB6%K8`KAkMsutZROT-nOj)5bt6`_ zpRCr&jyym1-v&V0NrcvB(Uh_IMLZk#4+2>niAmEKIJOq23mM;baV8j9LlJ0ody0;Q z?t6P>g&fimKPy)r&y)y50`hflDB3A!hG0L1ifzyezhqsm!<1bsfKEP-MUyOxEnk2q z|B)7)K34VvI@m#CMWT+}XJb3ug}{<#)nm}F!&TSGH1qsc)MH}8fNV=Pe_f3Cvw`O& zbL|tbUoofS**#+6+bnYI1FA{BhwU=qh^VauJT==LuCq{5{G(kBoCL2e0mP&qG4`VD zbc<}Dg-*xb1NZjiLmMT%Klo_abTvs7)ypE-UA5jS*JGPD>4D_&UgF-@Y^`1|DFuD0 ztk~CEv>!An!ssL%F!ekTP{qp$2Upq#hbyTFj6UQDEG5n)7ZHFjYRaa7Hees)6)XjS zu8kP`?pp-T0nxV&R2%K|gM0}eL$tS<7{F9UK z+Zj;8aO~4OCm>dQ?-&7=ee8@T**@~LvJZlp2i4LE3XDMF=94D0$>zo)vF#G${_|&i z?Egwd`{#4T=;z}o&C#UcDAi-R*#!)D-ivBwM4T86E)Zz!6p1gDKZ^lXxuS(b%zKES zfr_SCaB<*bD}FExbm zFDx#Xwb;?_rtD+)flePkRW3Yb!=Sn#Xjq~!X)6$s1g-Cp_!=hW(U?9LaQ#eA{z-W* zmPK_Ml%-=&$4u6Fv_T4D@^Hi-+gKo}D>2F2IB8;i2YYqBc%~asi{F&f-(uX-qLKf zc*ZfXV2?&|euA~bP?LExV5{xga=B%XswH9)V%`7~vCiO9m;ToCAS`M6zt2Ct*nNoH z0q7A80Fs8p2vH!xuT1n0mz9$nVF&|SlelGTLH9n}_*!CX8@DHd?tGTh5Fc2a9CiiB z`{fCi2;$=Hj_@*KOSx7DM-L}l@bQ2)&JjaM;S#F&*E;?&DWSF;i3D(RaHvMW0SK^B zei(KY`A&^f_S5*jZUm`dJ9rdj9XchFo7fevLK~laH61)a+phOP(j9!|rFiVJ(*K~_ zo9Halm=g>$1<)ve?GCVNe10Jx$<~@zs;)UGy-dGcK~tE}-IoS(ZALk`qDvVjdzKp-JlCB27%~uN=qpuudfP^biNswJD^oLc zIc4$p)>>(Y4qjU*t7}or87|21kkMws6zA@sW}xKTSO^icu`q)u-+4@SkLm)&)Rs3( zdknj4QG_=H4rldMjs@k6;Fx%AL2mS&`rRcG+v?elV6z%n_n-9 zZ~aD%hM3g7^TqPc+a@cO(V7@}M0U#BIL+)nI|G#qo5czDWC#rClo%7`BngG06p)Rt zQq4h~eV$d_VUD4_{mN4Wt((4rg+en`KLnyZKs$`R1Ae&{FD15suR~iek7TH%Dd%%I z-Lqvt?Rp62-FXxS@rHd8H%kCgZgEWihL_`I zlx`MCp$sgWHFANpPdp__@c}i4Oq&j9d8cFGA39W^`)`qV2nj%4@$_mx5q`j!wkzZH z;;QxZF-3-Lv+$9}LCyixUM>rXT3C|+(BXDKx>SEi(k-e9?j%iB?Ri>;xmA-Pz&<&` zHbwSBxhBbn*@T_If`;hMDV8<^CCy_=jRT;nIJLHZe3(LgU9}q4WfMDq^l~thbkIYVKVlhCPmKm6*3{?VQ}`qczg*(#<>_5B)YU##`KpF0Qf^nA${_7D#XXb z=ru|2b+@t*MbwDe62~GwM3PNcV_M@j-rQJTibpKLc1+p4b!zr)c*Fp7w(;$flz^x- zT|hx0M?Ysgb>a$%?#A⋘i*T%!Di81EG9X`fB@ci{1OF>c_SV~33tvVuDN|#xe5gB~UK_h3iInP*gC&j*!aWbM>@w>V-jO8{8eeTTx`V#gKY8{m<+*q}5C_hILWFVbtMrxM&HOOxewvih@G=;;fWoXcqV?2ZD zE7COMa*gjP(Mn3&|u77b>Ys++W z?G2_U>?rhLuN7+x&^*mgnX&9-LL?T*iX9)TY;l?oMJ>c-HEq@{7no13!U3L1J)6 zB;&z-f)4$FV^&Q=AF(w3Kma~RBV_8@R#FN4t#n(y*2PQerN!AEpmU`^a>@e5fu*)g z%3yvHTQFVNp`T65gc0C%)jl>VPyR#h>?7XeUe_61AUf_Q-4RX7mJ#}HHM4l5iTiWq zJ(C5sf{?`oH3(i!9KGw@&IqBc)sqjQ<(&L1R`#{-9@fxicJtuIxI?&oR5F!Slw};S zz#!R3(*3>>&!hZ)^+jU@kOVkP9>TiK8E?+MEr^J_*ODuFAY&47wOOu|sjWfPj}4S& z>-_4g<<(bho$2jzCC0+(nz8HXvjfY*OH82Ab1V0X(ec_CRtq!L1iw-l5DF3Z;uEyb zS&N^*Y+jDfx450kG1?)?)dEt^`HBxwF&AeXcrR;LLFn}ANC5g0p9%H9jPlG_5$u62 zUjZF#y@sZ_~7&xBzM>sb?MrguF|dUhJ8-t0j!ZYQk-tI-mw}H#9FU$ zz=8LCo5Adfc;kUPx((qaHQ1nn10B<@+aAA_BPVP=ZGLUk3cKI)p^3`j@jp3^-F}L; zqf&{@;-|v>c3iYFv21F`$K}|qOZrWG>Dic=t|=9$z+egSCErku?rb$k6#ip5{m6g( zh`iy`4{Z?_J7qb(+DBV(B<-`Hyk9LR-?txEYaHE8vh(GO>Au1MN{Ia&4k3)~h?@gd zW31q0+9HX(mra{P0?yk}dT|Z>GHI1LXd{s;k<=eH_Hnt7;`^bA zE9acWb{~t}9t%mq3po*2&t4z$nz{SyBBDTN*amoVT$VAAiwKMIKW(oEMP*U zJ2=x<7;g6-ZhrtO)gA$ALkbBVe@H^jYf*aBUy;QCBzd0hh-^(NccT|a z7plvCLez**>w*gjT~y<@bkRP!?%Cd*o(a@z(W zIt%kA3@i>d&j+@A9Qtcfya{{=LI5AZpM{~=qZ=vOE8#Jh+QxYQHu)0*c4s{gtUKeVdKQ+ z0>M*iImOQLKUGt~ry*TGCEF>M>e*+@+cqir4m}}nep(c^+n%P7a&_GX)R7tZ{XnHs z_mzaPpq$CO?WZqMmJ*uYp7tzWj96YWk?j-SBJ;%!*X!Vm&|)%+k=zyTj{NnPUkU3M zz5xrtLB-dL({?x@i?lx_=nk4-D>h#?=1ntzT1J#t*7m-ezCzC9ZC}8UvM-?^hlzxC zz!e3`Y*XO`)Mi+=DFR(NZ#wVxtL-s0Us%|Kp{r#ao~S|!l*M0Y$)hMc5# z9U%&wKo>v05t1XEATQc9cZb5HJu6F{%U8Xm3aKW5&1slLm|81kC21eHBzwcek`c8U zdoL<%Ei6PU6431&d7+|1Vy~ez;u=L&_mcu4I_uTpSxy1#$K?jW6_0>%iG8D_4sHXQ zCUSBxfuaByr{S2}7z8IL2hhVneY283enZ$sEw5md<-;*z#Us1P1> zKndzNejIwroKY$-DL^y;w^(BHp)#w6^mon^dP(9!Z@-}po5)@&Bgs>lcaBdrzDz>M z&p;eLY4fXf)7)@fp-$!~u>x))(Edkd*)Dls=V`fbtD>CUi!pH(?h!X2c3^XOsDuOA zqc-$sNU^dptm|fZV24*(n*@7&Z{B{V{Auhid`ofH?J?)yE>FjsQ6DqajY+Mce(2Xu zeifUv%m$?PM3o9JJk8!9(S&W2YY8+Iu$nz+Gd_R5Jo~(90&XK7%F1J2hgQX6#I-)F zGGcR6C|%?b6JV9|&^3<%Z$qq+-TmZJL(CWDcqY$5jR6|kX%eM17>U;x-;5#1Rm1IV zGGybGe3=suK`2Hg-zjUZ%jM;aH2|rAv8GY0abDfdSo_04)4VpnhudHNLHZR+0+Plc zX{Oi4l~5;VDyio5cv;nYih+8o7n^ym*furERGEc zDamo=66w;mHKw6?QKu^0ul-%sk&h|th!7}>dx-bi6z5OXt#Sj!ee)S+SR-%Px2kiB zrXuo|>qKnhS|i27%@8gM#)$mILdJ~gG=1_1I|J8VE*E3*Zj`y@_z9rnW&501O#TS! znEm$TkD;58`<>|o1A|^h|C6y*{{HmuoiP%G6?=MXWu+{56o|kd1`DL+<(s(>^5RhmW5`L#9V5At4n#5S zdX~ax&i+&EU+h(1ypT;if@r9Lfgek7i60O>2sz-)gKV-_COaD(ceV-eFXo8#=2gj0 zACtm{f=VpT!M-^1O6s*iY@m5!0e7dB$7XL(NV(FZ91l@4q`eIR1Ua`2W+4>!wyz2+ zMq!{=?E~CbL+(WT0JrT~SB-f6hA`lUeAdB{ zulj=Fqs9qrs3q(JIWRE?6=m=q=olkj+zFd4im_qyI1e?uXOIUr^z{ zcq7#%C@g^-Q@huutYQ}!ZQo9}z8KavpnLR)L|ko7cz?3UxG`m56XUcg~=ckWfM+EOXxw}v3RNiD5+1@ibd9i3Qvl~N)Ts5Xao*-VLj1PtZm#aK3vxx3S~CBr4=+NH-EuAHAY zp${~UjJG2i%9M{^bO!2k<#CI4xfAPz;n}6#2q)Oyza`Z%)3Uc#p{sn+(Q$l}Zj{0~ zc(84eu!t)cZM~^XAY3zjfs7=~D6XxT%m%S3DLeZTdVzNzhbT=T^xzjl-HfcVDf8>5 zC@(fQOJegSb5L3Np|QjroB?>aiivQi*4!L+#&sm>&i=o%!2YLrC%+L9#fCW@K@f?U z)}<%>_-&Y!u5<%eodjb(~o12I51~*j>owd!0)xMdYUIPZFaf+QhS$%vD8mNIt5Qs zHSE7$#@i20KTJ)NajNLYf|S~-!*pza*$l9M+vZL7%2x=;r+t->CVc=+8mmwR$!!gk zrsGxR4;2%$Cjshv&#}|TYNZ+L4RUqX%qo@)p9yxr5y;Psei9aIFB?+U%Y8$rBC_vO zsmpF(Vo23;yqC4yn+y#?laR3xX-zZ?Aj9{YaVnS4sHTI6onss%3j!I&DMgy?0U~Q)3+GU#;wZUe-FRzjf&zB0@z$ISm=yg9=1J}^H{6OCg*2^3&$lHOD8J$S8~;G&!zyC5LImFmSDnJIK_UWeGsoZ zSDB!WIF8Q$uT%OOw`I+8pP!Zgj>Eu7%K`+VVdBf`(}3U3El?lvfV{) z06dUbE+M#5sGpdwhz*p9qkqLCjW=2 z?<8y?cK`qV+l;i^Yh(DG(zzMSPZrvyx1gv6sb(sVDhG%y3Gth!Atp5u?5NMJmN(7$ zd#xeC);f7>JB0=$z@HdR`y8<9Qf``{9(8)ctM z|GVWbnIZ4n6{IW29Sx{Hk>(2kHB7N7vK?zs!L@_70$pMcngXJ@4XuFCw#tf!+9R=! zjlt`(WoZI?-wK%)eAah2W_ZAXMQ_BR8JyKvvIo>!=v^-Xrt*|35$05L9O$F8YfIFG zC)8tW%?U~bm}NP4X$NU%1gX&GMuGlkp0j~s8V}@W+tx@KEA_}P*0n>9>yew1LSbq6 zH%Ybu23c*7NgrdUjH~c^s$dUZBMt~ws|`YY^tbE>`ssb%`|0W4KOqa7Na@MX!sTpG z)(VH>t=MqAv{NkOKAm6_m(zHoM;=1Krl!lDqDp|k9*OIu_6#h3_0H`r3M66(^pi?v zSy0N;l#y63QLgIv()Nff?>#>pz$ihBVJSe4xbJP?*lj3&Kj`2iscfp!9+GWyc4=ln zbuz^X!I`wTw;q_u}nyhva?w!!e;l&uCI|u3j4?D!f3CTwfOLX1R+J4@xTD={OHK8 zju(;BnnS{(l%gP%GZyOeNTu@FyY#DlCI2e?Vv9pIdK?FV^Y-;aIAaxi3eb-L&kCh! zZ!E0aI*D7*b#=7Cqf+7szD^UZv3H$5K36^)U%kW45!XE7qRPp@5-P*JvG3vZT%wtwS0C#0u--$pBe}^p*{8iajo!7Uo3z43x3a z04-dKCBWJ~NVnbLfB+%lDgm>#mp`6ZB6sU`XFCkS_ER7uDE?nP{aLKp94B;i6hBkW zgHt)PV~SI_Blwt~kSDIrTEiy2PPHzwJ0S_P#RQ^YLt`>ZqxwbscvRL`yi*9q2uFcJ zeeU0E1Ig24***`eVjt#?{fL^FaB^%BZ~$|d2)->k2kup}Q+ooJBOiSuRPUqmS&yt& z$Th{IR;el!>-2gBM&iCv=Ntvla>TaV*DZq}+t?JiOH^|yAi)l9@WDyAYL39G($oLsk@qT!57;pf`$H}0( z75(v8*OY1^5~oS(TDvUUcGP5tLtpLAQB$YVZ>b$TdO#89lT!oI|3+o^8ud2uW1qPB zv9@Bo%Q!`miovy%ViF9Apm6fDmZ=_b813Dme{>&4TSgGfChN&w2Mj5G*~)l^6}G2^r8)s+SC=pG>W zc*_LsLOCO(PYBVAU+ql^DIbo)_+z@=1V23JofqKcYcx_7K5Sd5c*hA!w@{lA7>!j+S-hFrPkec9o zhQn0u+L521i;lr$IdC_{Q`ag28m9`dYzqK3#!8HlG7E<(>UbnoQyEmeU`_f)DpAtA{;_C!l?YCJ`9hR2qaRd_t{i>s8LbTEfhh%Z3vq5Emr5IHo`Q3JkhR)o} z4j>*3g{awc5EM_@S;h>(4>9ffR++wVre^+|3O7Cfd~7sk(??X3NWXXtmT~gE&=`m> zs+w3wDol9R4f-dx`C{@P`Bi@fPO1%`TmQva?;|SSyY$vtRe>ke&I^P6zd4)HllMP) z|6lVz!iWF52gGKvM!zi=Vntd+!j+T{z!vL9bG&{04Eq#8iw{o$g+Bb`e?9fM^R_ZG zvl;^di9*%Ov^lVsR#rVYXZQuUr3kVb++p6?>N1RkgVugbDQeXe%KF$o%pQ$xcvx#S zb_2=~Y2eEown3%{5V7P^f*)L9Gb;v{F6s(nDq*jxm&m39R5$3y8HZ z&=2YwsdO<$0Y5Ulli;e5M^`!sn<1V&-$W>EB&@g4Qa3R$h*aT*>Spu{@crthWQqFf zt1tOhmh0E!>9W?5H4`lisgn2+n0jy)NfQah(8kdPok^c=*s!S?%=ui8>DHz-@<7v00 zEdwgS7n$O~+peJxC3O^>ONXVhJ{W`T#~YPe8>zn7n5Y1g6_J%hvcU62c5S1@gS+-( zJkH1bwIN_T6UP8fpv;Gf4&_Z#k274jt*uyPMeT;7rN$E~90#bLTSFbx zJhP;FOkl4&k+OldYVrtOTsUPyurZ*NnTuFD)e9>l1xapjGushS z@zk>A@L-R;>6!FIoW*|sIGUH=n~qeKN$Ewfya&nrBCysyATBDO+3!dBA(s;|ouJBM zXKg*+K8Lw7we3X4+}~qKajjAL-DD177xuY+J=D??UN`VHu9M+TF_oGswQ=7^k07w( zRO3pyiewRmtS%EKlG=&itktC1oVPNEti?gt8WO(1ld3m_y`7{y6ULh|?@5XWx3~S5 zjTM!zgjD5*oG6cG3`wTYEVVUPtgZ@8#=&px7t$C_%?r;+Min3xZN_6pB!qI7GLIZQ zL8X0XQc1sDhgIN=`>?yAb|G_T-3=iE zu``*6Yh~qz?TxY?c)$;g$_ zOK>S(H>u3w2t>9$3H{l1Y3 zJ30Leryk80t2K|t?Iknih+I?|WAO>%=J2L$&!2gwJRKsT%seDz;=yW4{9Ur4iwpmz zEtrN1<~~a+{@MwoQ_>sl;k??n;`H&C%LRLrvU0Ud2~AieiPyeIFShv@Flp1A<*W^K zUy?;vzaRI*s&Ek-9|=Afs1R7mgpS|wC0+IKdt(#17gUDJZOm<=hw;}w^UMVh(Hj%j zm3La}RI^&S5rQtW7o17qZo8V*b%a;smL2_p?h!n-a)vjZKK?X@HdE~Ud0UL0?vD`?t*^vp!hm)Zy9h|v4`$==+253kJH8)t*+#2! zDB{nHWp3VnTD(E>-F&>ODSV~loVXJc;qCiwE~0w32zyY%H`lJfLO`V+c)$qG(}ky9 zP;Z0#469j@_K|88)eeolBWz7Owm@sgierV0<@eDL5(V0I=LSH{u>q*kFvw$8RG7gr zsqR^<%U%We6CtfBKjP=~kif?$U$up*f%k`YumqJ{wsBEn_@=$9vbvf&{ij|>Ky)n7 zG?&+-Nl8c@5JXMho~aal`j|=)_BOP~Fcqkx@!{&NHT$FLAwL_^wTd#BaVgK%I6e*z zZNhnGQl%}q3HN$JJl&RwRUXiFG|EG^`b`qCU$rm^-4Y_}3*(hLW8jjiZ12T6U#fC_ z&HkMP6t~kbLko4EGl6GLxx{`6W4(-1))Z#_f%pQUh_Mf99w)3ocf2qJFDFfcZvi5Y zblK!A05S{#+!D2vvzZ=@AtiMnr#2q8g*7PSCLEj!Snip-mnjs{`a(}`;bKTvh=tC*Pp zte+*@#KeFv6;?jlNzA`F^cRIh6TzqhI;$!v4)83jZehMyR54t*iRNZ5QHmM51abd*KJ@-!eiisUUan(M_ z095}iDd#n%)^a7d-C1L(!0WbmGKr7`dp2P?Xoz9;MhOZ#-97 zzK2;YV4d65(*e~K2aa)qblTW>m1y7~Nk!kdFGJ}xb+A~LI5v&>-*7%+gXh>Tw%k^) z7IX2RP9MKqUc4Bi#bgC)1J81M5Dy?K>bm3vOKu_xM;&slHhxlXq1u-@dhx_5ZWX(s#?C8C%#mVR731(TR5a}#vDRlA(T$< z)2{kGkIVyMg?y5i%K^yA^Vi`a?Ut=+BxDn&5E$yinm+mVdB=od!ghK_lHR( zfk?_9qoZMC_#~(;`)fEBw4?d$$uI5sG+9U19UyB5+P_>{>#xTQ6rJfs$~5<#md)nh za8_*oLZgZE`ZnQHO7)E0L{uppwQqIY77~PW1$ohK^toa?Y9l;XJAm>yoc5}F1Dt;j zmaQz-kC$|iD+_jMV}u=1R@=5{^+pn7+gXXkyBJk;0adr9S3_31s@z5znB zv&E=z;%xhsxH?GY_-Oio3s19&tq?dh9YOI4=%9j4wL3zrOdKv7-fo!-)hC>qbq&(r zMg?4wL8jbZe1=qNM?MBi#{Y+awxLdt?iAM$<>69Q?kC?<<+&f*{JIEcuK+m~xOF>XkSQodOXTmB+3%nXyx;kTq{sxH?Qpjal> zsy<6TG2(%HXIBxvkd5|0z1%(Jh$ttrzifY0=3-=ukeXbp^pX}HqB2KKP_7N%M;o<8=a17rRC7mSdBD(5!+MO7I@eFI(p5i z!kYu3?fH^!0QJSz!LtW+Hz^4KfrtII`{W9dGLtVNk-4>NO$_Oo2Dn*z_PIxo%Jc7p zwxE4e?VXTK*QyCc@Adc4s?8pCP*TTa;#Bw!_OUO&SN_^w!_Anxj_d>Z1ThmI5Xp7< z-y?B^i$0={=tixyLo!EY`(Ydw)dNL?#hXSY9fBHH8Se&;p{V@v9@nQWWHs=Im8cXC zortv>t;H)*CjLr*m?t<>vPtirQHSDfX%@B?&z5uNLUoIj(a>m;ZUb|B@uGbm7uG!x zjMLwhUHi)olw*vfU;Qx$2Bk*A&-96LD6*J0TRmW6Q#gK zlxKF5!)je3!WnRC*}Nec53Q%TY-s~Tf5%)zCu__ zO2V~oh(N{T39W?m0{1i;D+vJhX{9tI1*9^)AZ!l20{^iY?wEkTES=m2HvHW5>)Sw${t!mcQ` z>xI0~7`wh>kDe~C#<(w1qT|Jo0)KM{Cs9g!ux%G$is4ynqI z@IT7#eEsA%_DdBh5oW1qm@_}nA1DCg#I|xm+&ogQ2BKppMXrt5GBvg`BT@*k&5e%PY{vS1M2uW47kDfxEIdj+#GXOxyLu(oAAVNTrpd<6 zv27~k|3B=#+jHF4mG4)T`~^v+DwT&+ZPT11QPzbNNs+Q7&or)th?~$LY1x!D-9Q6q zqR|a^qX~jCT^W0vsxuF+$xDu>@GloUVH6z{jRm7U2W$o^L!dYfNup^*>O}YBq>irD8WZM5*{fzk?X_ ziZ#q%briANgO3G!$Gz8NV+j-qcXv)EhG|GTMkUt_p)D2MyoTc$^B;c)oEYc;Z?b+m zGtN7qsD7RNoPC}%z=GM_P9lu4+K9S?Ft@)=nPe};j;6m%mj^rFtGMt%YeJiQxcIH) zZOn7LaijZVe1c9myl<6vyi^VuPv>Pj5DcZ9j-FN@i_n^I@xb#}G*7x}>$$Za?XdCt z(RD^C)%Me2BYwou*mHHG$i?)uahVc{^wIOhDE8_%ZKbrawuzdXp>n@C)V2<|qxqEW zLTIMMV-yJn_hS%EY*-B7IFHlVjy>QW6$?xHWh6y!$3lMFP0PWU3ML z*9bV6F8t`%pK_pBUA8 z)xKS-%)y9teqK$XR0Trv6DV7LwLWVDwYLnhus zju79Z-^O$cu{F^m>)OTG;vKc-P7P7z_@)bvsYZCnZ%MPZf$ z+J)GbIfX%LZLI993_5J%I2-G;D@;kS7BM$0QU!bLJl-l_N}Pn|ueaM)qpsl>FDCL5 zE$K1~$qgmNy=Zxvv<1pS9bkeCjhkh`R<3?#*Mo%9!+}CsUWEjzm{?4*jA*;VXwP8| z;H{L#JakUlf z#N8*|qCjr2Bt!K&-JS{I8tdm{2P%V!&OT&bJ}xIew@uE_qtk@O>vX*<&1$8@y_Qnw zb&mX&)yz#0j3`+OFISOLrH(xliKs?rOd3fVRE`TsU^_NteNksdh-b^^ZFOo09ZQjy zstFJF*XPRlyY`IZ4^MtglSm_NMT4i(9^@)e(}}em1gb96q_2O5B+JGA?~E%S3o&sB z@*Rb_eHPns(3(VZO@uVbwA@TWV=|&tPzGD&$>WELwb}+q<6p8b&&)Ek%)TtsPrb4$ z%LOTCsG_f76#J5}u*r{Eo;`z66OWv#8>5TLt~b(h(C0&^r@HOg(ul=2ZPk>c{GmK~ z&;CT;@$12vAcFTWnBxVjG=Ytu2pL?q=V(qNM6@3X$L0_(VFOiV6Atqf>jXNaYZpwg zIS_Ir z;l7J<8MlLZCv`f`N_K?Acdy2~0j8x8)g4?f;onUdtq?WU0J5OZ@sG*C#1fX}EtoJpO8VJHD?0!}5zmpAc9$4U*e3n#hb!XxwQeNbR>D zoGE7+6&lq!>e=B{-PFGZsJ3eIfW5442L_3corw8yMcrRWrmm{CWwxJ$CygjTtl6-csL<5N5blXO;2^fycW+c{D`EvXSVaBP5Y6)bk z2~~SFwMdtOa2Us4U%Jjs3_5M7y^qpm>1uPt%CEbsp`9opl+=QCW6~5No{x#?-D$CK zjy#B?_8{do+nH1$a(Cngj3!L4dxTZ!gL!-C%B?cB=vHTgV$FrMrP&#K6okaAB!^(X zfQsbx&-D2p;cXL#qFk!uZo=^g^yvsz9M-Ol^)9;=MsQ2sYAe~>YmIWp7RH=_e?Ixt zBtN{8!DosD11d?`=z+L6^C??d3<)#A6~T}pVI~nih5G*W$sgm;kH1y^^6fad)=(~$ zdhOYf-zh6=wn$l6V-KG36J!S&oBQ3PmF`nt#RF_FihA7(u7qi@M(v_>Kq*d4FMA(W zqqB%G9~2P9mnjs4@b+b8X~|R~1#E%1>Syj;OKw0b-E~s731xbR@7glYJt`MovFFfa z;SAbv?i6MgM~rU4!P4Nm5PHxeL%>EdgAhLReO|M#{t|7*KE zxC8*JrX!MMw|jD$X3X*!8Y;nFZ$X@(%Gn3~5MOd}NZJu`rSl$K6FiJKJKJ0D0wwj& z$0F473q&4>LF4AyAn)Y2_Mpm%(2PU$H3 zqv3kB@i~lzawL;1RaK)a@$~$UN@QP4vVD&Q`

nF zcd%E&(ArL*j)wWd4ig`5+vwcBB~;q8p^Cc&D?x zJq9PTs@rTJSyp9sz#2w+0yhJJ$GK|pjv0U?$*QjAU3@{l@34azWjn+e@u2kzk$huZ zg7Cd`07D92JU{qQvS)609G`H`GD3pN#%G+gPnk z3Gc?6h}97+4(l1$_eyM=%Cp?)ns7XZxl;)gx&{*&=S=2p4QkDPynrc!6?zp9C6zq# zok;WCG42^q#u(q+(dEM-jgzJ*MS9L2Re{+jzpx)`o;47E)Rw=wx^7>dWm0qva|VGn zD6tO#Mhppnt9=~LLNDJO*wWx@&$`G=n_V~lr_z6rydbWwyCS?Y$qM2D%~S}w#kfDD zx^T7rg>!(4?Tt7PFB|(*K7sVOX91&aawjQ>3u&(s?!a?`mb;$r_!8;O5L5&@Id;Tx z+!G=@aPnXu0=vk<;Jj}Kf@M9y8GBlu4v+!)AY<0&Dc=BUAoK^0vPrBlgRP@2ewk43M zbY_--x2J0?1wJuG;Le)+nc8Y&E-SAi2ygGwbrziFY1=qveYr;%1i0P>Q#+{Xdk`-Bl{5aH_C#2uOb}X71yQ6d`nC74YwqzgX5^C7a z4R`(nx${4ZPf`CSrth5X9IESyS^CNv{l4n3DX0YWRCGJLdKi;yBs~VkW(WVM_$1axZ$RTXz_{KC3 z2yEc>Gm;O-I6*O(J2vQ{#DeYs?6#1Fwbb0sN*C_MYj(ZZO|F}w5FovpGahsug{;v( z>Ao<^XX5tr4*1Z^_zCO&~IMz_SOx%i5G;^8EBkGYp=riR*Ec0v%pM`6^- z4^5lVZY_m%w`_pCu^tseHQDRo^IAaYng-ljOd@&hTsi-=t+b+v(sV-7EVvjs-?%`$ zv`!pAircq@Pw9dZu+{PWhsM-giHQhHV*Os(Y&!@P%TSd~j;_@Wt)_+DK#XrvFu${x z$ye9-sd$0VUrIGb0}*}#wGhX=&(CxxcLJ!lprZq@!g9GZ`B%SawUQN|F5bn#oh~mf z5Tg!ST{&X$u_fB60th5jW6Q3tIY>C$)nG)tH`)O1BILuU51C_&b03C%X!`QMl>$ zXIM-@LCN`S=$e-E!hL3A88_iIO3rn##$fYooCynBzgNCdLPLa~zpX0^(8{ zSyc5qyBHLu5{iLm_SXvC%lem?R`>pyuP>E#cQhvT(MZc6?CMUpM@*W0yFIh4a7Ir2y;!^lE*`}7RB{h|UKDa=nPjHWCjOi5dbipeR z-JS$r_voGY?0%~pCRx$1aBuY0PO{9@iy>0A&X0m05_8J)Cr-x@D!Tq>dk0zTpPs=P zby8a|vYC}DG-phdv?lJ;uFZe?LL{p`~C@R;}WBbo2Ve# zj_h~HpE$7Ub0x)_R&SQ+pgkg=tQ3+cmj9yawc>Iv3mY`(CCdw~$z_$}84VPv#igZ> z@$piJDL4x>$&AA%uZTOQ?O=DNsUlr)#4xdFYG>T2P?SmN$6XI;ClGRRHKa`F*ANR; zqL&U#4qwA9mIr80Rjft+f$%aP+3CDgUb+@svxH^8?Sio?KySDwaBe`!1hxvaIWS&g zQbAO24iU*I955A4qZDW-W@N%JU%9nRLk&A^Deu;Olib|ER+5SO5hErJO3?Ak_G8U! z9VHtU1S-UaiS2ERnmVq`xc@?@p{ROZ<_7!)wng4dtZJGVh#IMf$Cqb=!E*rSG90O*!##He9h-y~a{IGJP484QmET9X{jh*Pz z=gO-FSXe3PZdIH9415h~ix_C7rjbjndsuKOhQ2z)4sUsid0L1RxiR*(m&>Iz6JZ^X zXD}pn5~Sh6Uk(=C#0^IaIb}gup-_UdtF*hp6R>R_ehWpBJUL}5f~;~3BF{@RcUC- zJvPdG!{{`JM7MPia6B6Z{VhyUN8Em7Pr5^OLMnWOWXOo&k)3P5+_NPrL_WLr=Ec7; z19_TZwA9jb_L#NV^>S-%If$dSe0WWmWbr#Doz#Adn@LSBAnfzFk}cxV;8}X^rBT5= z-c%`tyI2(*6`uaHV5oI-$$CC=cYo^ZA^ur7Z)Y0()Ga9s+qV6ztd8vXxcG z!FI^wRD3VBc;G!8sI|Nl95KR_a5q9R+`#%xaX*Z`)cy;}kZ9Rcn}uXUyfG#UZ%YJH zfiV&^t!5A4!k{Ug%g3OJ;3uOPy}U2lzkjVf^K3{tRZyw(^jT+Fsxc`Dtggk`cq85e z5#62(kozT#2=6Q0m(uc!2vPLHLf>W!P{Y7v-jWAL1j#60KTNg}Jws?2(yQU0AlCtL znH;+!L!m}>OpwfNZlHIxuVE2f;-t?vR8zDu2naM>Hgu}0byKSrC`W{>B2|QKLu&=C z0~KKOouxIPos~utw7RkKZ5*+I8?$77om;R(xrok4WV2IE3ML$(Le3=KOC}8&#?4GQ zH~o%wte8?k4F}uS?w{2{OdO_m_%9ltt4)O0axSiwH(~=#OuF0#YyE}c6^KAL!7RR?*x^CQ8+li_z2Ylk0Ied?CPc%Cf6) zyU&M$3n* zZ%?2NImvp-W6KIsrAsBv1WLVRH)%(?wap{i)pSz|?{@9>OG?%@PyX}h3tGbY6ERNboU$O?RR`EGdLU|KPW+TQWCF7SKX5! zgLR;4QO>m;B^vv=F^XZ$Hrc6O3jja(UMY`-A$1-meP z&Gsf5)Tsoy9PIeJ<>Zg?%sEasm#0yoi4WXO5R`Quiqut`8P8T@oaO$3)g3)p<=(aoU^!lPQ*@Lpml7hOD{@o$9>x*aXp9a5&lV1*st)w&w{N9DkB6R5?0q z^G(z!DE;fsn)*5S8t1`_na2X7*ylJ(22evgi@3JjmnNoG-b8%%W)M-5?Z|dptl6VFUzN zR`u8apntJrWwK34@wvuwnU393TqqyZc6&OHGX6ekrIA)~{L}K6Z`t3`CC~<8YVVRu zosRerb%v_M%!)KJDo7Zzu!)Ip|FI+urS|uvl4DmS9cwG6Dou=*w61CfF=Q8LifW9q zRco)`1OG_R^a;bjH_$crKE zl5J|bP#S8wcqRg+#)}c}tQ~fX2G`iPD-t0EBA61ug7bi+eQ-Ov*eTgt6ALNKbN{D!JY>3O>4UzMMDmzD|h(-fvnz zmA3v9;UA_n7>8k+Ln|*(rhtTXt1J1PvkRep5b#gyf*2W+h=<03#@T@BsLj*uB_j}B?oARCe8HaoeEHhluYUC(f;E-rq0|_v zd*1dXqO&coCo3o~HITQ5JLj zAu;ijU)h1xW{B)9Gof^|eA9U2(uOcJ@iQS33`AStWTKp}Kyg;&Z;CAeYKtVgA~Cx@ zHoIE|i*X?RIc|hORw|#_n#q0IC%7XRmywg09hRigqC#)}T>d_uS?y8#Go?Hl^yWcXrG&1(XdmwO z!AWIr^%nkeo(DQhkM_732OU*qp)c*Mt1r>&Q|=wxoB+d>F)~6fO`QSma$WcQZ-R+! z{wIg#|8Q zM9CHZQaSgmv5gY3_w9{B4UIMRJAR>>d3I&jqD8H5zBV$77~}0O5ZG>-f9{VwOL?z+ zJ(fZo1#>|lD%S?5%ZMk^yfwnOsniHqx`s3*TMGK;CZ?jq4Ac2st{seuIrtz2&QA3XCy1j~pEaccdf z=+|Y}V@k(vgmWis&l+quz`8s|hdwGt;bd}FgkAW%SSI#>p`@zHxSNtwd?Ixp5qy3U z|Fx9@$+(#3Wy8!aFy1MTJmQCMNrW&PK{v6?4pK39Ynv>A0OQah7PX|naxZ1~W)NtT zg$8IwhMGzhjNY}OqQ~H?&1k!tM)n_2gq~-K5Cp;YN#qBx)pPB=!wYp8Q*4ve7l;nR z5k>_G05`k3=+Mx;lbH4(yemcl>=5|v3N~Qu#->guO?Zl(eiq%RSf@vz8J5C|{KCG7 zVYnykE5Vd_!;V4|@&$18gCx`FG+(cbIr~98a8PZgxwqU#AW80ag}`QkiWDG z18-r^p`T;+=9HkZb9|Zq;E28avVC`>D|>G2`5R`_rKms31lw<%oTv&pgSy z3mkW#jxycvQV3!zVyf0uS}tKr;@R;9VXdf{**ZuuUS7RJb0B=B>`ils%(*2p2S_|f zPY4Tj)*d`C>MQ^}wLjBTAJ0V7Q9YbObRcO=Gal8N`mqs;5(aQU-+l72J(B6QS1ctv zQ(lk#(}Y3xf4ZELsho1aZ2yYoq&VgcSc(ykG@fz^uZYgjr@2;mQu4sY?n9RO~g{YEL=YMW{u6u7nHCNupAC@N{gbGCg zw>BpN&~~)MW#4uOQ;@V)^cG!cf&`thk4oDP6552mG(NUVu{YepHfYoEd5(XQrm~Ta z%w@T>!*k7y6hMUeq0;_!VR_wXh-?b2aE*afc4C65jz@_e0oI@cTdl7h-cq_Q$;a)Hju`CUwbQ@b>#n1Q!GF8I_;vSNfowQ(%iZnGn zy0spXu9`ofB{2Hjxmu|6b&~-OEPhjQ*?4$&kIp(3DF~X!b9P@Y3qhJUk)U3$4t={y zdPH)buXhFVi=&+_OU>9Xsd{8fuECuZXFf2dTvAUx3^xzx9W*LsWA#Ah*Wzq07vqCYiCp86j5zE%wSuTf$F*T+=8Z;uH4hlgIBA zAE>BErcS`VEa!m1r?FwWcz{D3rREs`r_qhE01|Z=YxK8|H_i~KA4EG1&Hj7xp8c6B zut~2}BTB7hO%DWvfi;EHvkxpZ>G{7FZ#=@O+R-ftGmu;(HQ?CoGNWxXuS)<#Ie>iK z&lqG_I=si8tZV{mt;hTTd$oeoH)c)RDiYGcinJH7PTH-!ZrK~c&1e(ZlmZ8SMKHkd1UNZG(+_8fS- zmipxuh8heJf7f6WI^=k(tcs!o!EFQ}+k8*02kk70St9DTww(RRumVg?!V+vJbdQYd zT9jNF6^V_0LRU6>2jx66H(b1oFiq;vL|bNe@BoA$m%=ZSxVXD#XGZLIwCkbo(}txP zN43a&lgJ|h#@RWe$>~{n{o;#%Bi7MDj;H@po(tLtOGF6W_IdL7O`71`v}=3=`@=Zr zf}i;m8I0x5P%D&N%F3C6_e{ECvL8zhCf4(FDr}p>IIxXJxNZ3~3BFD+WgLlisMi-rN1;PUM(+QwST9f)#VWU6OgEii0qAYgt$wG zVEcFjwPFYGPynppz-=lp!v#B{(6Hq(I62!jTV&r|UoWdo`(bfmsw7c$k0yrL*(HHD zZNLnW@|gw9tHs8Go1xDuK2n`m1Db5n4 zw=I_oJ@6C3LMGbxAWo!u_r03w>I@@LgHGO{VWi9k8YkpA zOY|G1X2q@^cOzAfs~PT<*j)wM4to3EQ~JxLXO|zmE%N&q93H%2T%u1PKtU88@sW9=uQi(T=Gbr@X0U z$QUy!5g6pgSmKAQ4I@RHW)CT+gB8WKE|H!cskkDIj|aO`odZ`XxON1894WNJ6c+ok5Sv(TMqhH~&SBr4qGM6EhjO1iA!Hj*Ci}nRJZ{1l9GVHBDknYK0nWx?QG>LYR36 z2OhtfnFUa7+}RRp4@)KANnJ~9OENU4VtkLURa~2 zE=_h_7M99NTmnV_0dlO2Y41i74Jpe@?i3A);#etws{N5%;tF!c|2|d+qJIy1tBECH z?i3#yl3Mc$OiiSuzX#}@Px1iFukfX0T+e9;1xFSqFwfhWmFNC6$wuP8M&Sz=USKkm z5eC780N*nTtE}W7?UMv{ahv10y%sT!!G2Or%6eogQ!>5nmmi5%w(LdNa7Ydh15yon z-`*pqB}^+sd8h0gZ6@N*MQ=;=1|c>7CWi6YnG4I~bbw3rKYO-((^jC1OvWR~&+19- zs+hr^ck=ivWoK-}P_i*WE1p)fkDokwKL`TAd2omyYkadZ6$2)YfA|F4@9*Qold8C! z#+?IN$3YoWZmD-)8|`ZEM|eh{Yrd>5#xf~(yJ8>1#cVlv=wJZfw*y^CCSiwm`Xr8G zC9BH4lf8<^EtQ!yd)&$6SIg&&(FJ6r-ozhYZ#%v42^QtvrWqn6A14i8%&Pmx5!2!q- zea4WxQRQEfa-?DviAxYUf#@p!$*8L3B&t$s$Rvk2Ydkhi5^bnq>T%7OZ3-et0luL5 z9>o(SS*(;_DT#;!YF5e3l4#kd4^Vj!3~`{AB3zDm;5uWVBO0iSo$D~j*4E^6ENUMf zoU~lB!`1jMIM4Qa=}Nhl;?Z;jiGg^ZFYlZWYL8kGr$ENY(u+nu>sl$ZBV(n^%i7y_ zI~3MtYw8jc>f;bA;Q&3EZrKc+$EitTlNxIy3n~Ch*CSLjgSyxtq|_j~6c^CGK`e#ams<1b$96hwim}jq`ii}j z8%pvk(_qpSj~f)5P+Tru9D*DW#f2p~`5ci|vjtR}ohI#G&p#Ur#LUT!rKtG9omJd0 znkuQG43!%39lB<(rMAe-9+RBPq5L)o`qN1(JY~xWJ%weI!s@879B8~+uD)c-k~XD|sycZ*@g$N;x<_f6 zeN?UaLsHe86&D=lun$}-SG<#7Yfm-6MlPbWlsUxR5_C#49bH;>deosM9P;aB`DSqU zKFuIy8Ft+TAvdi$jcgbwn=bs6y>qj6v@lMPZvZ0Ot@({=^qF3eD#PqPMF5}-;vg%E za&Q>;x_XG^;4nBvFq6chNQu^#J%92K3>~-Y^TGiQx$!gTx8XP+t+d3vEvDx z*~zN%uVK$31WX1XHE_TnNgmU#)RaAmFb9V~g4|xkqXiL7`AeGiBjIDGboQ#413TZh z0AAQSrcg;q)(Ezlpr)$gMO1d%UiC5H_3B7N^DtYz;1&Efeq58jh}XYSn&bf^C%}T` zGb{EK;Y=jA#Za6%s1~OYK18>@^EH*>MV=53w)PTmkPi4KSN`f*^U_88?#W|HsTDhp zU%ZZx!WA*HTqpS%f9yOEAGwt?1v4b19Kw=`nX$(e;SZC1t!WKrrX(qIDrx7)*x z1uhmA(OA89?(oUH4@)Edz>QATtw?wL6g$J?55PeO`EMYTlCrB_g$qFC)Cc396KsCI ztOqBlmMfrXi<%7^1_``?>H*;+`_ooNNbtvHWEICMu)|Q-KTdvEfA}CC-k`X$?=R*> zNgHj`Q3CSWjz~sCR2vXEHhJPNcy{QUf9v^j{=9upeHd>AB}l#pT9xq5kyHIXS8*j?SPQ1B4z~D=;6q!lnbw87;n&qbErfYNp#KBWKFX$ko9AERctEfO6 z9OXjdZm!qP1e2%7H*0Ve3flTPP>pOga>(5~)5N>8hqff`8L^DUTA6R!t_;8Qt9b8e zP<)@Fmt>D!I<1{FI;uU*CIh?eL8|IoooTGk+QOCz0y4eGK2Q8dfo@-@Km%(Fb}KQ; zA216{{Z?!zwE=nxG%Ng|(8IfSI3(nKeh)5=XM#c3D0t&&iI-@09IK=RTJ>heZ2TGC=Ka(=ts;1o=W_oVZQ3|AEeVPS(8xMMnZRh%^(uH zp$A5!SCXwbT=h1lhT`l_138e!4YeSP#1JU z__=d-I_fz#%{k&+aZGoi4bC(Ay0!2y(1(;G*TtfMQG4SFs*Lv#ibABEt=vM zxXihQ#j-w|KrhtO^tU^*>x~ZF07rq6Xi}sl$G#e6bS+HG8F46FS=PXZD%-Pn%-u^i0EEA?Dm1!L zM(G*kzr7Rw1xjpxl-Y`fZL3oV;|-(7I+bhjIj&f^U05A7gI;zB(@T_c2bCQflQy71o)KYBod!`!o_eje68e?4?B+_l^3nFM&D zJQez0@bM8H(}YpA)85n2Cnp(U;GdT#zp*ue-IS5$HxiUd>QZ<>?r)S!g|U+Qwa1m6 zMjOMz{{0*!L-yR)CXJ}QT_nB(xwX`5bW?yf8A_O?Z#Q+c;3}3jHQYPbsG6)rQ=L7}WOn4?+1b$El8(S1+mE$? zBDY(SWDG|LiylxHzX&y@gnEF%7`&i`E`}l=?W)bFK;fPQgD=8$)ekDD zjaOq7Uo>g3Q``bp>6(_2-1P<^Nh~De1C{Q!9&~!ap$LkK39d~Rj4YZVeLvRz!ZV*M zuZDy=*pW&lS{YKP^WYTWk8fu0B4mMeu%E8qf_4?eKY9Fj<@sP5WH-4i9izQj1-dji zwG)x0iA50RN{zu|lX*J;L4*|fvVE*UQ;&x#j7+3E`Ek^YU|U7lxCW62sVD`O<4Z}h zHYL+M%Epn=ugF%*rQ^egwptAk1;*HSWk6?SH4F1z*vrbj>EVfBI}pIfP=K++L>u(S z8$H_4#itZN444w_=0KbiDkrPIbukim~?uW>3bkSMcDWaq6r5f&j^@ODj~p+Nb`R2Qa}l7hfbo< zKQ2$cZ=#pRC0;S2i%S5_4x76{-v&EhB-&5^^1tNCD?+*3XW?u{F4#|mG-xOtms9mx zN4i?ht+zOKj1PzR5$lC?@qPF*`4wROxNCk=iNbaaSQ#XeB>0qEX(f4pf zIMw|i66OU10>@wi?QFXD%GmzB3EHV8d9^Wm0 zXVPXgDb=!tEh*Ny7a&La5ajmeZ84C1e;-`kEG`6&|Mzn81ADH_Ln7nG?>gaXgydRD zrW{Hk`SJGDv|n+B*brSF{2AScN9ng~+omUqfG?%D?C7FW(_cSTHkE<ds?G^jhxWL&t->Cf5|$X_N&%_YLN*07p#;PsB&wPaLH$&h!ZZID(<+u+MY zTxg2Q55h!QU~cxQj6``F5DF9{(0T06Y| z%KpriNAB6doZ3&o@Qh>R`U7s^AMc&K8*DupHX1)OI{=f$zN8yO-GIb?WE$v_K)I5= zSLvdmhlhCYk&xrR9{sof&BQwS%w;`mq5S_p{eOW1#$7U-l)(({>hyuRBMnhSadDZ` zFqujbG5C$)5y?@_(nIwUg-^*QnhtUmw?5OOUD`OMdt$R4>7$ zTFV>bm#7&I^Hha1abmbj^6%m;niO#*NO=6MC%-BQHMB(`5Om6=l&S*Ra3A7`ztTZF zwh4aRNsKVeP&0xo9gIq&IZ+=+34!DNYI(+#hdR)H6z52fFrpptMY4fcV+W+5w-cVT zFThv)RX6B?{kU^P1~`&xR4?Kb{yC^aG7+8Rx*Eq-HG3%tNfc+%9#;FpZJ)VUb(*&t zn7N5JnQohA0;&*8fnH%1?MY3XH+s|B?w2)|$%r(I?H`mNU#l)vZVJIuF9zwCN^>cq zEOVqlq*y*2blu?rj=t`gnByOv{Dx3(d>_b#qjzW&bZW^5b0`BEg=v_EYDXMje^8E6 zWe}-wW`~P-wlBGx&hT;>Rgae}oq5z~I*VGc?!ir>3rMH+thqQ@3v#W zBpkFue2^yY4UR<3AX@WGM}I8hja_$*C$+QwaN<<>W)2&k#FFtgYq24?z2F|1J<(Tp zzzx6~=f>E&7Fk2mX$VtN#VpJAHI!#4x8JVDBBIRxD7;fOU6WF*AI5jE&)_5>^V?ld z@vztyquOw~u-I*(>L*Ef)m5$%^Cy3vq$^T|Jf?AEeXyE`-fBf)8obq5Cym6Ntul1ke

gi@#h^;Wn7)GhY@q=3{(+(BdPCbqlG|Qo@X;l|RETXB~O5bT0b2R0l zMM<#a9ZggkI^R<|M|YS$aYbisAvu&7^oSm<-_)haj$7!Osb&^Vm|gel>O;;fA`%?E ztb7WqO8j#ZukoQ>#7mdTwQF%s2P7eDfHq!utIVw!f1TcAB0muvnFn}c2hTZ?Aqx3H zWz(OCDh35~+kltYFFH)TWOvy{L8IpkpQ3n3MOjy|%1vH!hMkePg_c1>lfhMN7Qnw7 zyP&ONxW=PLPcwtS(V9AW&+=7;F>)T5qx(+0afv{0*`~!sqH{8r`4P`Vz$Fs%w zC1pW`_iG^82#@{+Qf91(CGq>3H#R8)uVa)$JGWual?(3}p>bgr?LqU%k34~{2@00N zqXdPXYY^)QO3+`QpaEusyl!pk^p{KLZl0RnLqpg)W9hdDW zB;!4%T34DnwV-ka2t*HL4)ztA4uCA0FNTX;1jcXcW>P2uA3Tx0lEg;pXoIe+qzliM^T9aFjKGF6XPV;x zg^?9!gaVsBWC(dIt9;^xpSa?Ah? z2{J^jy;JrnS!erF&ENPjC_U{?7<~2cXfH9tBqZ$;c}2%WOyV#OnqEB$Le+KJ8$i`y z@!C&ywrV0T*Hdm_C&U5b>Wb|Valn^(Ph7Xj(1eWlM5GEE(eqFNvE&U#tgsdPS}jlM zz;KS8nwyL6`eavWvGp@!KL&>YOLg{(wSYds*zx**JQflm%2L(+I!Tq8Z_I>nh6C4@ zJ@PGITh>uJwrPNk#!y$Oa=L2EhFfa?z9{CK3{Dw&w1QwZu3MT`u!FlNM3FS99r>!l zLL2GoFOzHQA-7^{sEpszbU79c^Wciv6KJsXqw?hUwn#Mq`=XsFpasg~?3;BS8tfK4 zQL$3cO>R_h`i4encWa@TeRY~{N64hGt=<<3``_cGQ+X&ljR}$5c7d;!D_3l>wew}L zF(%q?%E`~{ub7c&pxw)-;DghqOMmk}m`rJ>1DjD$Gah9VOXJR59>E|x@`H?0sL3WD^8IGBfiQNs4vTh4v)9R20&RFxc+jcR|zd87`= ze!^@39u;S^dA9c#eFpjac2dXde;r{N$r9XmDa@9)LJIH5M^^Sc_OOB}Fwp#m>~2r< zsSS^xEiq(f`bKPlp|0c&jT5h5FIRnx27r-XGUs4KhlJOPEbm3dZKyr3eIFUZpOG=q;kg+&9R|7X2=E_2<`Mx3A|2p;#2&1wR4re@I4G(7{ z*=ul7Dk!e=E)XB?t{A+Jd& zBdFC-k0Q@{QpVmI?bxyjQ2R`Ry>i#6E~g?}=bqaedS7!^z`oXD5B) zdk!sv!|YpQLFyF4E(g~Ypt71s)eM4oq)X zTh&~a*g6rOEk#L)ING~vmnBN?;?cAW<>k>HYMOSa_Gyw)ghygFkOH4#DGxglh~8l* z_%?2|){GM+ShWr-Ueg|eaRYQ5G*UMqjJPOxp!kAK=veO6ywY-(kiqNIcf{Ii2>s#80khD{G6$;TqySwFkx~< za*{;=4ALZAoWyAu_pVMW$o3#@gTaGbLp6u6H5`Atocz|djZL9yL;G6`$1dHcT4?44 z;!Mw#KnUHvs2o*B#5RSgbFHGmOj~YGD%V2v->$?eH!Rn{%SpJFoCJ+En%LMOS2h6V z2=dDBqpFbbMNHSmH3yy4Pa2mR&dH+f#T9c!L?o`%2J6D7O43t3$Bj)?-xhTTJ9vMo zevY1?#3*O$tc|M1N;{K^VQ)A%_5md<#8~d&GCOtF4v74zW`@M-WmuK{ zSp7CtNSm!a52Y_-DcEsJQF@A97cAJD!&I#dYAz&6qs+}MtStp@#XDevkkaa+2`-2M+fa@LKI(V!eO?Sw{8cg8^2F z1wKp#b&8p)N-_b-S_g=4l(kdz_=obV-ZxWZHuVG~Y~xW=xv^dEYpHy3E=u2|zo~}} zkjTZwvo?~uQf>;DP1dFF)-0F%n$KH4o#i0WC;9g){`aX-Nkh}Se)I&_qr6d&j>W|Y zk`UnIz17z|TbF}+R9R^pN9O?vhdS@0<0cqW)0;+*ISc?}Fcw3z*#GV1hxUt&9oR#i zOKpiQYKy|tK6(5!$cbm{j{?CplUh_UVLm&_V&n7U?{g|oei|2$dB~%JFY9)<;E>(z z^QWfu$>X;ReaHWc|A}=x{kigVkN{A-PN`0Zu=gvoRUWv6K9CT~pza1N1Yg-BVPJw% z_LBfLmwcQ>&D3B-MJS3)h#5F;u-EVS8Xxe}y)fhHVc8&O}Oi8FmVaaO}j;S$Vz* zQE(djvVGRMN`nKRTY%$6w{)h%c?44da>QjMgb$&9H2xN1iVS6pAxBAW7*ntG%d`sE zttDH$bsYnI&zH=t5~d?~iva~(Z0MQB_hMhlMPkGixmIIyd|>a2gH?j2^P*w&rXo-F zPGWu)C}CvcqrWVf-)SBIZcB|$?JG)DV?X*M#NQ$qrlzcEi4!7eY3i!8SwPf3%b!Y)W+s`n0TN_`ehXh=L z9CZ?MNya4=nlk%9>R!t5VYqa_GZy1oVRDZ9Jj6&u#jL_=p~q;CcsSbiPO8#l_(Miy zX<2IX#jD0~68VGP=Ktu*N$^Q6U-Owh=6`?U7w;MWD22VZf8YRSlMf%pRi+~( zQcYDe@ey#3-97tJnwzXsd_z}Hw=*ZFI2W$g8I-RYbESU2nwSCmL@g_|z%0dUJun$? z796{$W_sL^okZZa8rfoGgmD7!t0!X%7d|O=#c)grOyk8sJFajr8uyy;pER%_*0xRg zRQNmb<2!=r*z0S0e$vV*ZZ{oj=p`4d*96!HKgryUU(!|+9}!VKP964yIM~;G5aW=$ zbgX{b7A@yk%4Iv@${Pa9fRTs?*B+t*3}nZSkwxs6KxC_DOGh(-4-}9!S@S z4`UUz-_1rE82`^qBpwJJX&;weF;1o2r_;5AFGpWz(Fe&Xk&dA89L40p%6IRUyYZDC ziYSd|RM73DgT^b2S;9@e6@sCF1Rc1rDK4BcqyT#?5TGi*C z5}R}q*yoUvEzPn8kwV+p^C-H?ZQ04x4GvO#9xQzwZ%OzhblYhs7=}V6<*KyoZ|(4j zq%fI4VIq6afJ9D;eR9U-~6wi3sI9?GZ{updi zizkHnj02b|vx{*6r-jDxjRx^}nr}n^{87!C#y1uRnu?nO8;$BQXZ$CvV2OQa#|EGv z5amulQBVNJq&uV$aD@fW;$N3TK zM8@rgE>ODxj_6=$4`8Br2u8BB%zWcy|VgCwdlnUb&xFZoxI{b}wD zhfFdnIB&GC^aXpd?NfsO`!vfJT?Bf<9;S{~*W!FvcPN>jm@h#JWS{J7N1I8E5|8---tV@|jAVYBt#6I!&8@}tlwwos z$t^$bwt6}s8a<0lSTSMiu(sNE*CbmyDg2>45b3@Il~~v#S5_Mq#VsRjQQOA42$Hj2 zQhS<3Mw|zUVIp}mq=#Gqfv{m7+q^jznCLos{BPySpW-?AO_+_ex3R+^pOeHQR}%!0J-Eir?1PfE zkzi-gjW>9vF9-%->`U2&Ue6waEGvHVnCRtiK6!kTdl&rA94d%ME#es;SoM1^o^$8$ zv!EidLudYDZw25MfPYlk4Ue*(JpMA`#y+<~Q}8ug`YMqz7&A2?kfU*ECyp~Oq+~G1 z$>a0oA3_}7M*yb0!8%QkIn!u=0fN_W$n(!XU;h4FoJ-A4?{s=8V|@rSSJ%?kD%MN@ zYd{p+X?TXfUjyc%WQG*RhWW!G7qOucw|1oJbDC!JzN;L@$EF=<9PSEEL-Gcr-Dr1{#ls?I#fotSKm_B+?s--A}4e zLckDIQ+IKpGh&JzM)tTBY`!`iI4s|2twdpvl7)bOiMql^ua)QS+D0d6pg-A<0&G*k zY684f7K~e_5s2Q~?K2{d;7{vekU~jSZg-wiy*EC9yGSh%h%t8Amsgl@k(~#Ve%lzj zZs_TbVA_v!7;CyLtiNdwsW2jU#MS$P8H@N0}P@HAUF=RD3Z={crX) zYz1~Va;%hxMq+MqBJlztWI5z^wYgf1aAvS(@Yje#$sqv{DP?%X!{a+^a2S(g?HBA_ zeQ3fbtZ~{5k|MVMvVD32O$Q0+znED55l_YzP{=WhlvbY_LB<8xt{6gd%P6Zj+O@0p zH1*eFlLL=1isMP*^s!Uo8#4>#O=B1d#y5?x5Dm>T)3Ep8etNeaz31v>d&ayx$Rsh* zE?pP#sO%r-X$J~p)mF2??8y8fR$|N6JxlnBa?Or=F?3lu)1~G%Pe>%8pF1Z`tT!Hc zT9fRAB{$q(JCNokrZ-ya|!) zgcNR%C~CECKhXc5d{CQz{^g&H^b39U{e|c3#dGL0#$g$Q%#sB2H{cYx2TOUkpPxqW zso!CufQGCYBVBl<+%>m`yrLQ}*I5lA0SNeQQbC?ko$B0+zZry=fh|#xA{J{#99b}l>s;`?t1Mkm(ifOl_^cXhNd-YO3edH0qJt67wRL zmC3=xie2+I`GB}{MwRE!l~-cXZC=4m@xozJ+%Ysl5ms`nc0@J9F-~2#8Z}ph9gu>J zD$~0kyIp62n=Y|Qm_kZHG>s30Zj$}N>Ao~JQwrf}8GC8*! zzu-^;>`va!)CyI7MmY7#Fx~lu^2lyYl@`pst3o55Bs(QJ_Sze=#{S+wFk=R!gD3!(^Z_!9T^_;soh6i%Sbg*RI_*18^`-_k7v3c5$3o9Z>@JM~M*=A@q(7Pw;SU z#{?MF-Y`Ux4uog~yR$P#Y09C1)u6SJlO9;VpizWTq=Jl*ZT2(P9G5qqD{pBs05GCb zJQuQeliw$kku_GdqCH8yy68Oaej`z7&k@Z`;x&mxJVB}aPi&bdk6*kB=+Y=gq?5=e zVJJJPCMHF)^p;TA8L!w_nq4DW>&DL)%ef17uJ_As2(Z@)PpXtuolIRNK;M1Bbv~c8 zIs3*fHOO0Nc$}8LEbpBipZ)JbM15%M!&QpKQgqcjQvltsQWLqG_9sKM*Nd~m+z~W{ z(stJ|G|jneAFxcaGRW`7EgQ*VOtT6ZTrcD{SruEEIMLHqON3ClrN+t%^Q^4LPtTMK zuf!7RI|G>fFb=qhAlujt*#@Iy8Y&l|s~=ySCkjE&1zQ2)D;ivo)WRO4pk9%+IDOfK zNjpdajPfAy`&3SLyes88oF6nGkX=Dl@iYSNd6V zcCV#qlW~B|G})CE*chL>hr!p1b~?1oD5?JxTgMR&THzn6c+Od0-Efr)kLQuaZk>r^ z?Np5fLG3*8n2UoV>NWn6oBlZHDK<3;NZJOF%#&_+{EI`*84*Cumni}O(B5KIlAq2N>sUYH0u-qKCJu*4AM!Xy z-f?INkEV;Ea4AYFNm7JO&6ma+xhPxlP`b@>8(O_|Z9$>!SC9dT=d?F2fxCgPmM=Yh z;T7A$Q|0eN3fz=opTGnl!&9x-jg%%M`ryH53U~?dkE1m_R@Ph;u1Jh2$B;ILO{Vy9 z3cs&Ijf}w9L9oz2#a6jh)?(YBebMgS6sMp|c{S4gstUP~*jhDro=lcZ8>Ud*hzYHj z)0Bq-3T_+v<1v$1}Gb|VL2*2Z+GE^XUga7 z>X0Vv4An77ONdR@XLLfE5EBu;EglG1c-zNwR2}x@1G~m|==QK4^lnnL#GWJ6fLJA8 zm0m`@AaF5P$;Tg-CqIbY@5>$;^kRbuQyd%m33*&uacnoEgyU^Y1vQz$TsGZ8`eJ$K zZd@zbsk9iB!QkxB!ocWJOdC6(m4>2S;YKU$aplF>!5zhex&!yp$RU~$DaS%mA$-w=}zWacomXrU)Q8I=Wg|m&6&AD8B&dbU8J8adw zrCsd5D^J_TW))KQV?z0o3$kr0kcep;(fg+_lyh+xNHC7~Vj-P=4g6JcZ+k&`);Uo! znRGf>Y{ni$(}yXOr2wg2)M*OcZGB+u%F>kmq%H>C`?kL0Zz1pbzfbI$0uZRb zs-@gk5=zJaO#UjQKxeP5NB|%3hE`MMrah0|vTIBBHjLWcZhlk~uf-3Ma_~>flV93N z@nM!KaceYxuAGZMHYfMWZ|6C#XV^itw1I0?U9C_v2V+X7>HaW@NZT5ff}S_})N%^H zj#IM1HcsLM$uxLV`XRWlFQH2hPo_Wnspa zNm%b(k{QR}!gsd6sZ=sGU9JPHK8?Gnp^B%70VBs#<+U%_zrR&3d@-146~;!}W0(zH z#4G?*S`Kxlxb>mNi`teIm|k9hl3CEh;Z>to%D zuzi&Eh>#9O8q!hNYgh0fIN+hMUL;ut`|-ZEYPKrAFUg}iLOh3#W^k7<=wj8t#|J4U zFim=jx5~XX65VbrCU)IxQ3zuEsY*~cU`dRF_KI^kkT;(pxhse~)qM}G2T0DRm?<#% z=b2PX{!=h3=oulb@=)xi%vIsRv*(zos!}Tq!~rNSBfuiDRVW(_QfJ>`;v1nW(2fTh zp(}wfTI<2uY9JVC{J|!1!f1fCBN1EUD<%)(GO`Xdi*85cD~QAp8RNbf;qk?Jpl6_} z??>cJha+##4nbj0Z#39f@MXW}2>;n~!?sGejA&T`0&A#&x(1;(?K6}9^{)Q0vEF|| zyJh}y@?-yu2IoEbfxVvTF#BFDsbYgkH6djPMlzDwv^cFdCM+d3k;Kdm5cBr@iUf)| zCsdOv9Tu&3h)R(9bCCQrim>l?66QJKL|dDhU$yM^;i|iR3bvY-3+Nd8Bd{&6Fv2FA z7@+ld!q-`n#>kUY))bL!4-V~h6cd#+;r0$dI?x&^^*n`I6#&WJ`Z7(lj7pU`%l;S} z{VsMl64FWeybIle&%gBTk#oxJ}im-|5bO8RK!2HDNh<{qJXhZ=U_cN>!`|;#4 z;&w05l$^n&A4$qJ(qV)q)IW(OEtJ(pJfVr7lS(L7cc?nsr-;R90DvuAhDontzYI8_ zn{*TL%oqemnT%3&LMdB927Y9cj9EobYe2El=fe5&N-PXe@RwZ#;fg7L8k?YV$JOAa z+Q{l}Ywb5ZpN(ZU9cDfj_euPzN%%66&F`zqn46Vf_3v{p?A2l$; zmhPoGuGS{!jsl5c+#o8DlB8MeIHZ?P8S82qnX-QAj-F@pqp$tD{qLlvFpFQb9dc~G?3YfJ=>2zO8XdK#RfY)|VhA@+j zR1knjxnf^MvLl=0k4MJ>iFuG46MGasuapn*9T?58FcunKxQEIJgeM7^7UWW@w*u=OaOGB|MFC!8Yw z6A5jJxcJn!7}_*pg0i9dxBq(`AoO4Ucx-SBEtCf}t|qIZ@HZ)je>RfA}WuHHl z;;;Qz{!?Ci%~k|O)jYvb-wLV2{En2K1*Jq_M;42bvMb(*>(G#sL^8sjsNN(((iyYj zENmpr+rZk<6FDh+;0^A=ui3_AK1nF$7jnw#s#-aHuTe1rlah*Q70AF^1?xd42Yry)gA+_ zIYJ_sXj7y5RWCllg(iwINJ;A|vUix-Y0FV1n33wK=+~pBT$Ee7VyD-EyV%%F89Tau zV>Si~&{?Ux5Huw(L&D1zCqC|b?1{^VRLankCc>@XNu|OY1Cq*OGg~cSLiYs-9?^LN zu#sJF!os9sVg9f{2!I@LA~-mn!~w*fX}cosl$3Qt$vr3hBf!n}Il7-e8`ooI zjkXB(3cw0B+l#ym2_V+^}t;qT(`9q zP2DC;Bc`&Qurv~dANh{}O3RZ^f(3^<8Yd@q*j4*mjjmzp+nT_Sx^*F3X#Th`X~uFJ!?Bhm$?mGbq|A+W7`WO=2^*Ljh)T0K;*G|bWK1I zH1D+M&6isX#?x}WS)q}v#*$Fat_z1)&e^bJ{0_oCS=S4bo@>N*>={i3V~9wlFx+X! zLOzO(XoR|WiP@LNZb&Ko%6z#%a+Sk~UD_4lyR?lx9hDlBlF4krU#=%N} zM|;mo>nIV?p|j&S$yQG(5iqgFRj90Q$P_i*Tm2JY@3OFu!m zF*en(N7>44;{v){d@1N~lNg|Soft@_LVbRv|9|qsNrA!7Y*T7^eDO+%6drwA-WQKs z8=zgt%^I8GaN!qucM_zTmuIgN76+-V za{<^ z4n5^syG6r)j4D!hG0Gd1?pald0k}al&dYtD0V5i($2y|&EO|Ph#^5TvrC0*LqGUBz zNL^Gf0eICuWUxHHXOHDk$+m&Twy9K%V^8_Ls zF+tRtAPAmB_+7X9FbqDFK$$_3CPme@OO95tjbEoOLu*LbQOB2t1NNVb5GZC(B;m7- z1_qwJLle}NSwWk|(rUSBn*%1!sDSd6o!%) z5)^ce%bK(U&z!;sz-o}D>4gda*-d7hdv4Eu?F(7XY6=^^kue;%Po07}iU7o^$*F*> zlh+$h3Dz&WE8SF8SM8^t*P&UaSp?JAAJD08YkpzT2070hg#RyRT6^T;R# zv*6-OP7(cxHRH?H*q=Q{#>Ui?5rdJJC^WbAsjYJ9#LTC=$-qT!*PI>Ewb)Ve;UC`S~Pc}@8mr^5Ujr7B06@sfzJI#A=d6`vFt_q zek$yiUcA=YDy>d(q1SQ96FhE2aFamZ&_yqqBmnF19XiJ^kIIBKOV`!{s2cA-ws#ZO zNZxLZM7BL&p(esO682@ac$rG`T|plSQ{^ok-ziTS56Xp~F&>(krStppY%q<)x59|= zV8(r2vhOa?cGLsXZ$O02R5uOp}MmO@fY1hn9;_8qe4#pdbv-a~;X?VE z?Q;M;AUUq9LgSpt436j=T2d*?q~Ibu%{M{uwszC(S_UI=SKQ+|dHkrn9jC2ZL_^dt z{qd|O_(9znyZ#tKDT<0~?>5GU3dQ+k5SKP!hj^ALhIy!%d-St-t^8w94UP^ZALSmc z;&b5dw0i2!5@Nk}O;y7@q_z=SWd+wS#`AkcU0m^dOL13>zzujaYxGr}g4(4<< zt|5K3CT-IEh%loaF}+COtRL!3!9Vo$sXDVIu1QJwb%XEMB7sacGDm<037v(wAhHo( zR0D`zkKEM14jyzj`InR+!P$7x4vh(jSM1*pgFy*Qu+kz41DbU4vP1NQp^C4P^ zA0z}zW3N@DH;f!WNMp|7QviD7D31SAIr)vfpn8;aZT!T(cueP<_u|8A9BMMur&n&3 zW&h|8%gG;N^;>LNDE6qkAz>5sg{q?Ay4ud~GJDZp!-)8g%M%9*Du6~-(EnNkd4C9A zX)@O$0HbtQRRu((*^hrtTYmfU1oW2*k5#n_`wFdW`?$j`NX9iSD{%+7Z|ua#r1*Jl zXKBns{DUqL_7?0y)|}3Jwk*%bcWGe`)3~HZIB%*>-kwDh67+!m)TK8}?s$%aNXRk8 zN^E^@MLu<*JohDA3Z;5PL4Rv6$1q{LY}Wp+K2L-FWac(1A|`hLIXrcv?JIcY`1pQu z*JyKYr%3;tDO>pQcZhYn%Y|if-9B|6Ki-JVJsVC~wc>QITeQ1nEQzmF&L zmv7r@uwwN0&@=d5vMR*_!6S4Yqbt8rLG@}_C1InfGSJq|EYWYg{f<2z`u&bFSNjn( zm)d36XGy`J4eclV7^<}FC*^1QyPwPW4q-e_|2IJ>whAEjs54(qeA@DYJj za{6)2D6&M@&r?5TYQ=nSHwyEx3371x5Le<&YTT6Ytc=QfmoTZ&{`Dh8` zNGQCd_sd`YDfC)P*=bjzGK@5iFEQ(7uIYsGo|B# z327Ruq(JbxU80l6i1c{ikz$o41;aNx6f;5LGkybsem54BZ45NO$`UB%wOztbM6+6{ zW)EyM0SFgS@-$Ya;s`KhG-}8*Yn0LNy1(;sx#o5N$wS66I)VeSt5_j1HQSi(rJrQ2 z5sl#-HA1M|m1_ok^d;=|SU02w9v_F@*izPA12FwNwJU6Fh2RXFZN&d7GZT!DYKSRX zU2tq~f-uW-(TPXNW>;v(WB@fyJT4Zr2W7(JYNAyL+{9g`YDCpWfH>nTvi!#JKz$`~ zBURZCj9568xnTscLG03v+Hu)=8`uABdp2IE+}d=EjWX>v73QTt{c6o^UznMlMk=h$ z+U{_&0aXNf={(A%ui5|D(*zT$+(UeAp{%67#v{0^<3q8ka6xtlJJGBGKOxRYzWutp z)Gej}(%wws-d-?iupak;L*3lWVR&IfSdbkvPM#Ojs;%*zFteExdFg0>w`PhNmp(wp0Zk27wm-#bnA^5!d2=TWK4n5 zlVC|4JHN6rQ=0Jw;^cX@!|A=T0a7T$vmMa{VT^U)B_(t!p&#AgY^~{DrUcj?*<>Zm zLgH=_m8d39H~_?t)c1@h;aw&#W9mp9?>K4?de5JYAMPP1H{GdeVfFP>6u5N=Vh#xq zZWccKUj@yoOYn6sy2LQL;x~AYF(>vzM+m661=SeZ9=kuPa9@~88l$J7q4K$mZ5HNf z1v8zT_tUXaNB`5QO;R%|!GE&4}%9RT5?{Zwm`y!dVvXc15AdJ9Qf3l(o_!+j^}(ArJ>$wmQZ1dFLH&Sqxm z4N9OP2KaTO2K`ep?A#7OjGn;_&BzU)5FwBVw2n9zr+(-sII6TQ2@<=0Z<58be+SZ&t~!w#Hx023hP7HWL@(mI zR(^CgEdxd$;LlX~)g+UK#>rU|XZyOavj*p4I@#(Olg zlV#(n0eq&HZ&CbrzW{Rwmip5|AAD{hhEUQk6#ep=G zDUnR^n@Z4_) zaIa3W#S`g6jsdgf8Tc+MLWj4(56E91Tm1)rbH- z>?U6tnX5QG?2+UI2}sTsPdJDx0ADJ;V9T$*5Il#2Si9SOYQGX186#Y{7Q2y{eOVI= zXrwOWIf6)o>ojQwvj@gSR|jp;68rwZ8mW#I*1nVBSbVXZKksdh)mo0)&v?)rH1l3& zWCfRvKKf*1-EM;tV3mb3+jn4-Wa_S#xnv3{4a?A+SeC}@UA%ZPj;l>ci9o*g#+tVM z#C}q1=&%zTJ;Du`V)3}07Oh|+lu>WG+cIkS*tShB6M>y-C0#z$gPomDo+!noxahMrvk;HpPyi~@wRDc`GWR3?PA4;xA8uu%e1AsK zVVXF^kSWR2p~iaKN{Bt{ceVOEg%@kXMb~hr_c>lQ$5lEr2kT&m)l^7gJoxKls|p%j zJvATC^uqreeyHv9|IgyiwK?wVNbjE_l}aTqjREbefo4i{dBi&7uZ=3V-c-?ce-;#{`6h05k>%l7&0)*{!oUgJFDE)Z`mROfv79@>~&6UFA+y87>`rB4xSzqtZcnqG?JDEOX_y#4Iqf|954OH?Eq!+=(nXf< z!&sQxJ$2?wUM=LeG!aT_y=f0ZbB|LxFDXw91ZX0L+5qQ;F-itPOnx1c7YbNrknNSb zwh=eVt=P7jc7>H?GpP!VZc6d6iW^Fi#{`ze;m`0AxiLqH!U_`_I!d@0hqc{K!qR(n zF{2cj8bX(Aeas3Q$B4`bOs`}o0o#B#jDRQXGhBsev^`I=yIvM-j<4rtd97+oB(sb& zj43O1$kSped4pQRBZQ{jVN;}Xg@0CFd)?k`q!)W3zKES21#qYu7%PZ6r6>k%;&t)a}_S+@xe0dmx>Seh1+t28wvXC+QLdk3E4(C}m~ek@Z^cyKa*mN*!Pu zz&pfj-*$F~Ql<+z)4$-qzcR@~s&eaxEm1Oo>|ziwWsj6w6*w7^h^=Dp5CL+S{Jth3 zkU$z>s=;9t6IOV*y_9%epq@I!5HFBzo8?Q~OKf!%!u+#j-u$e4u4+=AhmGR&81IR(O^$-4tF)}7z=t z3e|_|LP-mu4={$)A)!UhjL zRb^Y;tx|#)K2qEU7WPKx#4l0k$b98i+k-HnAcl3Fmhg3oSpAW`l13!gHsUmB(;Zhh zINUF{lMd|;VIwM5?>8)bjJh@PDyEk@AOQ0VybC=_@rfDJ{~G@(`k(m7GL>9ZT_%nY zjud@V=~v>?I8x#;B1A@jO$2)~NNDmi85s&Y)?WVFK=%Tlv(2q3fvyY`xaBLmIpj9u z250y1_xX4EeLfT?Y`eCiojIiR!+~hyDi6z9x3$|JB>dsVczVq`^dgFDyoeP8+@4V-dey={r3~8F2%`UuY{Qz!z8f^>a(M)T$_ygNoNN zD7ts-$m8)nX8#65O+1svw02d}I*e!qG_A{24wMVF-}WSSlvbu7f(*-~bc3Z% zyV<}*kBlSg;wVlLFWAQ58M}Y+*f(ccR96@}Z`at#=bqKV7FO=0AHZx$IUMjy#!~w%JMx|?zVpDW! zM3b}VHWEC4&SVlsg0)>I2ZQ78VIQYZ~3Mn19G+gLq3UuaDHvath*dbF)1U;Cx( zajXQX@slrX8g~d|h5q3#pmufSoawr0G~@5waC>E?;}z5AVWCj~b?SAt@ zh$GRcO88GPH;NztLC|b2LLTT%4%nUy4%?;EpPc=YqM7!d%Jn8k*Cu)T->|!4lB|bl zy-1i#P=_-c6W&5t@S<$OZT=X8>{65u;R!eZirsf@MldMJ2e6+S_;1lf@Z7VqL7}I@ z21W@Gi3tkR6Hk=aZNR`wpIWi7hc-o&7o*DkI^kTUy0!ob99Db8uE351tr*6o%~K7e zxiGigpkJqeU9+*dwB`f=EG)h;_6c35km;>eTNf$=y_Uv<<9H{jK|oVjpaRYC&>JGVM?F8Al}B~;a13vAF?wVsrXt;;JKmn7s4Ul89920l ziqP4cB?Tzl{=ua`0D_1GlbjNj301+*F!^lD$SOaF0Hves&)Dq@Cpv-=$*4v3 z0>#(O;k3ghYMpCOK2PmAW{(-wLX+VmD$zjD=8^m4FHb6P&Vt)I+@<~=R?u+Y&I$=! z(ZN*=-VQWoTF|-8h+!KwBHOI{U=XM|2k7TYEq#~&zRwSTZ|76UT4*OFJkZO)wZ;vA zy-gbuhOv0iVfIy1tPVU9uwdVQpErE}>;qh|U&e*WDUhU8E|XHle#o&E#26C1%{la z0QRfo5r2^=-6=-_2c=KmG>ab3y8(1FPFLZoQiC9T%7mD0F$=(i>rOV2t-;=2*-P0D z^F8G|boOj)(v7%iwtj3ISE>Cpg%0hbK&=Gl+h%$J+kH>C-1qpeeL=Z!x&1OlSAZ;H z9xJ3n^b?nE4-*wD%^sPh;fX(6D;596m)p<4O1@REg1f zF*)N3<}*oYsNNw&c+=#{Q6?};)+-t+9@L?k0H>R>i{`eim2@jBkuaCxTiX%}CXPsi zoDgWPB=!FoMk89vy|qK3tO%T-k8#rC<} z6c{sFf$O99x8`U4K|a>+<6`~upZO=fv=CY4tPYbW%u$|}GjnAorMVnmSXWHmmoz$t z;&;V%SNL6BX-tbuky@lRT@owAD>B$a9I^&@La@hDHL+o+H(q8*+dG)DeM&Ja% zw}7Nt85Slbbfmv+16UTP%5;ukeMoWkw|RMSA$db$7%{S2cwoWyMcC@H^z2b67jfO= z#}%wR2hM1Vz@0q`%M{jt!=_v1u8lysV$L;D`6XAD(7&2t>CO{ikoVze|Fqh2^69W5 z4`bo3BMTTPS@Gc4s%RAK|CnRD`mymd_&oTx2@$bw9UL-=3+r(sUK0Nuc5dRNO-VIGUHzW{* zz;-+#6(G3MdD0@1`Ix#_9BWHcr37XD?fKm>q30Oi_XP1jDv!ldOu@sL?Rm9~-geWJ zPoC3N|r@bf^ zZ6w6Jc6+;{Kx@})Ziz`p5*|Y}SwjilL)3gpdguL8ofUEVkjVboZ$g(eJZHMn zHX}MyLwP43+S;jBBU@>Uyfjz5D#r`u@(Z?+GWzewL{RxKiA+RvX{c>S3Lp|Tu4@n< zVdi2S+x1*)E$sEl_eQkhQ~1#AoyT{v|0Rp(c#G}+>0eR#)p-hYQ>uW?;6_>T){m%n zE+Jbi*+h&A+{GrMbZ!|#vEb8Ki)d%@yB#?!+&US*z7+Itk( zmkS*~ukyU83y2XyagwMVjMrXww(F4pRG@?xM$QTh(v`WjWt$%DG=QbhT=zl_1y>EL zk<8d^g<7j+HCDWqKI(bR3-R_{(Ba7evUWyJcceC(5CW45ZoZrbwa7fYqE0n^Lya&c zoUa7X)^5jbd(tv?i90AA@Jntc+ipeJ+Vi1)C>uE_{7*JP2=)pFb-?ND0XeL1d1B|& z<%xJ2)zJsm-TwblsP4t`V!Rg(1z0j<{(b{Nna7c(Coc@1$tz1zfA7-&P4wfURSacXBi%gq_x(n^KM$y}xu+bpJ5jqGKzb%D=CY{?h+p`BRu}i7D z2G^1pTMF>SH-P68BbN9h?ZoV_g3tTiICwe?)V||@lqCD3s^IVB9EUc6%$an{M!DVb zToG8fm!7kCRA3^7QQDYeQ9toYx%9lvN6W!I5(~0P4~$3m)E^sm5!izN2B- zDV2185c`5PEG)|tK4@pjnq`X_&!*-Z#epH`UU5uQ>=(*~w{0@A!=AlpBP2#H_KaIJ zzz6u~LpPRHNW108J+RZfJ#%HVtj7yCrzbRC0$$~5OxP;C!=G7GD$^C&rDeJJwKgX8 zS;o0as|c@z_`hk9>hu7j&fL3ARWNt{@Qt#t7>}!+jNZ3Bst)?l?-YC@M(51bH7Jtl zMMM$-Ao2x2ExTl@7-oL)9RAaf&VDNj#SczCIP-QwaFfSk6~u68UWJhq8@$f`sYq7b z&gUrLhxLToXnKmi)8_)wHuHL~B%L^ox^o=eXKztqeKV%r=I(S30D;UXU-lR+j8Y=k zn7RSUdm;5kWzS-7VeVat6L+^?8yuhta+O~p)08tw#xqF@tVO0dtn?%Q6}9I&LU!B0 zZwRFqas;Gn-2MQ(#q`D)vz2(f{HX~hmum9WLF>6HlB%=}?jMBkQaIp{>P4Z!h1dx( z?4&g3?(p#5w9arCOmiUl_S$w?UAMKa?OjfOZiBet@L7l;@|3t|MTQ$Hd>VMp}l>XAP-+OiLOKh9QI7E*{2I@J>>RuG-d9W zM8bjOC&r5dEtocZD=rSMBl6s1n?wcUqNBW=jpX!wA=p2)ANO&ix`!TMW{LVCw{XpB zp*!l~`fai{e}fJ=vC7CDSqJH75YB#rs5Mn(XjYuZ){2pnX}C3Xe~ubGc=S{@b5;R2 zYj9U<8e%#ayB&cWG0nzgSCz8Rpti!4VimO$PFfsFTw>3edgG1qbSx#(4Dov=X|h45 z`;O}gjJWb`+n5zxZNK#K?#Le5-%3-aUT5cU4_cm7wtb1KGy=vd{6D5xy^R(Z1q~v` zo~;lL7i&gCC7a~9f9F0ZaF@m+PUIxZ$D0qGJ@|H6@_f6^vUDvTwW(r91K&a@BphBF z7>SJ~)sY!-xz$RKVQjwkwb#B@Ui(@scUr?Sxuz%Gg*`mPJVUv zLwgD&N<7cNMFXa;xhL^>2G9HAXT*~Vcs4syh~({OUuInxpR%~^RpPQd`gr_AzmHT5 zmvRw^YvjDwZXs8%*b7ED?T1S5G^wFi;FFKoTA#lECA|Va3B}Rb+Nue%vJ31RyZD*u zE~b>y)E{#!krOO65&0TiIePkFuOBo;WXB7w_JmC@`=Z!c^Y{~5kaQE){ zsb4H!u&3-Hp4!heq6a35bQxVVGBd?blFyr(J&|Um8jM4biEn4A;l(lPj-C~9i zFm$K}?l;QCi@=_?Vl^gbTV5;I<0aT1L0d|y=|ywY8WnJ)0{k?!bGeB}26$*+Mj1tO z<{=rWfgB>ApHPn3KB+2Dz6$aY&j<;L60{L?l>hFUFp?`N|gVf@#SSxn;V{_VoxB2X!zLu7 z%lR3VOvg8pYNWWR7uc_@Ah>mHvP3ypjRe)*;!wh|a)CPikfk%#hqaPQGyqPh?uTvX z#(A}Md$ckraIbA1pkha0$+gUbE#atm;)&Y#Qn@RK{J}PMMO==x=?O6AnX&HY*JLK^ zB7fqiWx6rQJP}HKY&OV-ryks>w|Mx7%&L9jq_Klg*GF&)xP`ax4Z=AWrJK1;QgsaQ z=ca83WE(T28>DQtad~9plSc%GFtHK#}(&1B*3T8oE5)EV_7{&{PX)-5e z84a`*$-30+@H|cAg3vn}UNBS4h5lHB+P@$H;5Zw2w5BIzMJoF$Rb<#@NQ{ETG=W?O z(;6mPD&y8?s{I}-%)P+k>`vm*WVR~V(AGkDjD4H}u2Rsws5vMu#e`~-DJV380S-Am zMF6jHT<_afOb}SF2|Wi@FoaPRBUFX_$VqL-m~YO>dHS{D5dx8=CCV}EWvYawr051g z#t^sW+G=UHUcS*<-CSxc#~WdpGPVqGcLbcMe@{GF5)K?^3!>WJ_HQ2B2ss+DMXIQn zzPk=}g7aky$kC?qgmjXfP>vww3I|?{hXyt8yzPN7m@zc|%T*N{svm;x#yP=C+0StJ?u}Im~KTu8seU+#h2~RG&pxV6Z|*0EWkuH z>HX>-Kz34EmZ_jtq*kNqPCgCwu!kOT01%yx>V9Yo!Lf$fVn40SR9IE)k#_LobjM)f za*u-9wwkY( zSDkDq8)7Bl=kjJM*CM{YqgqjT-Z8*efc@=ZS18?M{{}EiX__JwAPfohkjIi+=t1fQ zDpX2fTAgiN0M;uPUgu0tlL~s?PZGXey!E+fY-~3mn{2f0FO;N9qaJay+lnj0{$gzI z>0i$_Vt)(OkM2c5*@G+Js%a0#lnnPm&4}V-m&=vs_XoG$6k;gEY?T;Kg zECF|;F;@fz*o>h&J#XKm^kBNvNc>TTlC3N*L;%KzRr0V=O{F!>$c$$JMLus|hLRD4 zE|kC>*B7cegI!}^_W;*Ah{n*KEW9n9Q$xY z60FJRcNF-D_u~e@6^Nf#OY3H9BerpeucSY3L9q)iIvqcLS57{#5h;FOA$(IHx;4=J z1khZ-u@#EDMPzwJo;>|1kf^8!aQY*P5}tivn>6)4?WeN`SIaZWAzQEBayK~=w7ZZE z*&BzSwXH7dtiNueFjI(9P0QNns3VZv)1Oe1%Sbc z&aG^5lYKP({ z)MAxk(n}C2Z|_Qhdk7OGq4c$7ck1~*PqG-QJE_fiT*DX@1z|8z6*{rkuBc&e#Il7$ z4F}`Plz(1Zw$BUHKxA#HbXuEkY*X-bZPh;6+ykhKtmj%qC1fF@E^*SSfAeVh zUfg`>PqrXKRg7a3C`lC~RlU^Mj2*_`i|1%$zv|duny8n*8}*B{}rQ!E%#De4JzuHRv>=|4VRS8 zyl@L5iDA}OfR~b^$_B_40JxSm{OA`8*2_xSilr2~*yE}lQAuu)_!)e0#BCtU zKCHcFxz|m~2#Qc#idgf>P@J0ofHyOvyzHYSiL?=-KE9^_;z7Equ+?z4QYrdP?kWOq zKr}gFQ~8|95GHEhj;B&+LkNCSWu&4L6^LWlI!{&wE_Yi4=-B0%{E!QwKFImPQOK@l ze-~w)azi(3#&0DK*}1A((0%}_FDa@>EEXfrB;k;u7I$8aG3 zP!C6Nk)0e+Z5^K6ZLcPj)Zjp@i>`gOWsR-D*tzT0sS>KbX@W+HvTLzkO_DLD-p6}h zvqP3!wDKW*i)O){jWMl?BA)3YVtyUsYs$j4_b3A_zS8YeQL&Ra2w3y`(0`QdZ_A<7 zt1Fl6JLqu*-A8G2>D@@LDv{B1dFH104Y^|7F1OvZUxz=ME0nC4*oRsb!EMa52XBSh z3?QSL4e5{FD7+1(+68j6b>{lor8~0yDCz+MYCpb1gH{Ut*n*HD;!p%9>%L99zmI)% z6p%J}$bpi7$S0IOG)Yr8zi)=DkiAzcxMfUGd&qsi%J*RpYgN?x`>5d6xFEvaYuwnRH*+?DpvHmbWCw|yEbMXjsG4Q5mBpOq(Xls( zmr_mQ@3oFc9LyzB)wJP4d(lL;Y76l0=j;bEQ37=)$|U6)>6Vf~kO6yx>;#rSM$l%_ zW{6Dy@m?4Cm@+wsD2LDiEqcMg`c3}BP44`(hhLB&`Y5V0;+YreMNMuX$}t{0v~yv=u_pfP@$DvN5joiTBJAw5)cV5AP0*UWIn2p zPb_`=9?2Av(-5AUvX$%;_7xPIZ7TMCRE+I(cJAUhe(4Q3&K^*Vq??X5IGF05*sV@y z>#LzDWmRz}ypg zr8GULfEY}AWi842xxa$PYcoIn5F`D|KMy#g5+r*V!nKf=%~17?Ou*z;uP6DNL+^o? zh6qg#I6n%;DjFK!Vh!E&RU_0VV|t2z9{OoJd!T$$8==VJfS&doEaO3Rk{EF(9A-dM z2wu|OYM~UbAu)Aayvq@3IK#JM)Klo;yn~vp?%GOet=LE~YLtueZ|&GW_(#eR=fdtA z_#Uyaxc-Is0IEbhI(sH`N6MCb8hd{YCl{wO!R4~;6_(|$39Th!iQ+avZM;$4@+i&F zW+hy0)Y9s?{C#6Y!wL^VBK?_KMY{Q6qDYL-VM@l3$5{($r%k(6?m6bG>)Sorl`{0R z17np`477qRfDmIIYzr=zE)^(N;(Re`bZl@Bf5nWSUn0)f#?_gHiFy$(yILCS|i%5nCw8{{-YwSRz zqQW8kTtvc8ViQ%&1nNSXJ{4S^dB&9AHR1K^1R3oY3o6rh6DPoip|la;gupT_I<`-Diw*e{cyg-H$FOVyHDeKG8^UR8X$ zo0>_>Yd+!_RyKLeSHVREmORu_$&AykBKD~k$`sg3enp%C^a!6k+JPPwf{!vV$gH zG9J0KQJV2W;(|v#zX{kJ{1!)m6s2z@EPf8K0G!nqzM{mnQinNi>t)AgrY~>BrYY#A z>_Zz!&6`Swv~DG~i4$PYS}nKiS?vNm8tbqlf{Jv|+`XlwA+KRYW1h!5FzvQV3SOw6 zO!CwAi0`Y6LCA%|S#Bl?_*H=*d^=l79O z9@DfdYWzCip z=TL~+l6n$2Y}NsT%C(5Mh4eSgnrTYN^`K+DJ`&{BT($; z5WN(0+=fcS{XFV-Z0y@Z94(KduMi7BG+j(Z+zy(V;4V9SiV~GgeQ3Ky8ZLpRnf&rwsI6Q$pvpI0mrY2pnR*9SaH@je z?@#}?O#d&&+Q3J0XqT`v5OaW4jD)ziDA#^T+}6l!#n2K&gKQCK*+1Gk5m zUNpv_$B^Gdb>KG7vj;Df7hZ^;1VzVB4O$o3=s398bt1voh>nGZ*ii4`8QNg9Z6`kz zVD;YFC#DbCd0J1^V(j*-u}sPX34lAX>)dv+y-_TCT4n?fvNT_T}tP#!|T z)N;6b>`MH76?sU!>qhlrz4Q#cQm)$y!e_@90&Aj|&yn2#{GQtqL!NtC_9>;qsa!44! zR-OCdfGU$vLP8{JEGSeW1txGK0b`P6wh3vdIBwd5)*)VB&#g#`6>v+ZmmffIVV`7e zgqem4Czp#thtlOS_=Datv@)EE^!;ks%H`>$-%bntgX7==#AgA^C9;@`xpMm!ew}g( z(%jo*xT`*iDG(-_v%zrdK_#7X4a=35LAEd|5;~^rKJaw1QHPXqVmP4SCb!2p*CoY8 z=KG{Q@}AsHgS7J_-&U+bx4s)xkQGbV6dA^heLeg@E@SZnNk&Qui0lo-L}-~|`fxQ- zh|&eCaie@Yj?i^5i{V}GcmzbjWdyl2-aJ+!*%Um8ul2!b$q&dxA!9nDgH6xIPv zg{R#1>H?YwlikZ%=a7K)+6Hh#5Uh%$I7VC4Y1?a0Dck>ny-2}QYT?9`+eA8{X4=zk zR&u2ML}hs(G5blGlutfln=8p?zSFaF<=i_~(!{^yKee&VQ%}X{R;0hr^uTQ=JA^Y+#Lyb+{|H@O$p2QYzH64Gc;Tt1Tz~X&7 z8F*XH$dT-H{QQ9>5Clz0$_F%*vf;H_rPZ=W9%CY))a*x;wPFGxV_;|lPyy%H;GkY? zp0DpWzHz={|B^+X{j4pnc)S7k!%fWVzDRT2kdayz^f19uJ9sfOB^8^1=leDzD1x$h z>_yexx)eXLYEA+WBuG`OxJq|`BLea`d+=iUkM>P<>)B6krwG}-vAaex6@Q7r`0(tf z)o%Rq&+UtwGISk_Qd6yYn;T9~N1NsbgQ(9QK~Z5z;^7HZ!aczncYIszGQ8h;t@b?d zzMdfAefK1jDSQ!4S|##nPy68tQ`*hUdP2D2!T48!OK5nPmX?xSQ!&hqTta+3mHOcF{|0Yn92j zfnK2H`Q!FOhvd2g0}-y3SM2w4Gm`2)=-~*l75jcHbfW2WVH2*X!pUc$Ai1{0nSsKC z%k9dL&0(Y=PFm~u6+4l>`c|g zyJ<>mQ+Wyi41CLI2H(5|*Nb6(L@%Kmjwhf7dL+6zjK+ z=tX@lIyT_zd9P_eSXXm8FMusE@9V`24#M^XGKz;TmmY<2W1ZV%3fnwL)5zp1l{!qN z7%2A+w49!Vv3w`l?qW4~?`C_!qfeJtU-w3sGCI&2RhaChKtmLWeACu}s1>WncH5oE z+4q`{i^#68n8wqkJa5{hc_q<6FtP78fdov6p}Xl!A>IA( zpJPy*M(&5CK59oHdk+=eJo<+uBMd*Xb-GX7fs&gp=#{XTbF<+=3_UDPJ<4btg=z6I ziu~;JP^Qov-c#$kRHbXBwYj)Plci0Wo`Q*umW8_BFh3tK6C zz;UI^%o2;6<*onaF=V5_{Qvp?e};H-_P{ocYwmsdlAqhTsO*uLBz2Y8np#HH)@ekJ zkckF1^XKL46Z;W!OpK0LWoS&pdo1%zMM2y%{8VaT#OIECSj^HdZrN>fu6A~8KduX{ zP9Eb5e1mCnYC`7Tk{%Dq=uGM5j>a!ZcFO$ z&`pl*T%$=py^XaSIT8uTNj}qNJI1zj?Z;n1 zR%hhUVA>OW0WZr=Vi90md?mMHnX#*^fk{%MtTt@!9C@xfr*i4bGc}RaL_O{%Qs!@l zO`&YAkzbl4E~?IcKW4Nz?Vc?&(w0Qlek7?<{s)Ee{WG`oZjf=cX``f?$tC+Y^+u90 zLAFE}d^z4ksJp)dU6v3;EODC{8@1~l^;s1L0-%`eJ6NVNAxPQG`6^0bAtl0;B#vkG zY;7zVBGb3khcBoTe^*X^8x|8Myh8sUbKc*z@giHwG?_IJ$Z==eDAO9R#T%fFOl%YZ z#cYo+sI)~cRH7WMl(XBy1|iehzE4J>9vFubAA(qUB0DT|2IK9EI2hT2qdlKMyEx^0 z)axU~!=5kI%?`x+i?%LoCB8ALFq>PBs`$x9`ei!gFvt5x>MEbyk6H}+ep;USTQKM6 zV?NtztGSmlqA6<22yJ{d9W=_BfOn(ylu2yklV6M)$jBk|AYuqJmKP90H&u&+_lGc= zcmr;6564fqO#XPRM($4%Cro)&V`%QZ*Ws!fV(Z1bUL&u8WAhxQ|X z7J0}f(}J>y><7VexLt$EUU`D+!53Sf1cq|`Fqa~I3Z~&l06oOCUgk8$z;Thz6SCuX z!!5xSD(ApO-GoU}b7k)yW-&LaNebP*pLa7;0c^jYkBGE|@=>w# zZ`*@`^3?|-&f}EP5h%El04v)q)RoNN+Gvd;dPy2$B&x)*5lb;OJZmz(tsGi5q4@QI zM9lMS&K|s4{>X+Ur6(UdyMY~&y+DIQ6_BKjeAriN16Zmg-FIQfCX}x0yT1qHP4{b@ zwc3BuRz_AfrORyd+RDJ$G(i!xS5rvrb^M}-o}yeyINy_Hc%D(4 zijj4za|e_dZ`cPJAWmx&RFy}CgYD;Q18vQ=hwcQnaoss(jh`1=jQJIt5Y&`Lx!#Hq z?+Bd)O^=;oOx27|WCbkvzbq$zABUDBM72w8y*-!$`#&gWKeLsp6?OfOymgn-;0AC| z0mbAM?=hn9(_}q-p~KXV5}V`PBezu6~LG1g@Wkt;dnYB3nKkJdNJvI);}2@nvIN!F6QqmP9IMmI^-Chefpq*W9J zw6|i$q*6%@Vo67YT|SEaRDu=q5>Tg_Y$dMKqT)}fR~<*=$3Q+OKey?l1PBJk%v$v} zG1=uFQ$(xlDwFWWMf)l%vNudl%0Oj~K_U?^6JNaYUzL-e#(0}@yA%zJU(xwWhq~h} za6oob&ExOH={sVt{IXUpzp&A$TZd#PSY+?sA&mflayt8y#D;)VO#~H*5Bq^yx+AdB z6mQ;Fgpygc{Dj{VG%vQ)~avqOQeS(3k?rL2x`P`j0s?hF4cB)oANb8fqIUZEb7J&cfkM8q~g-%wDotZZ5B+LlN_WVHIE>!|=gvWJNx z=-6yE?#`Xs%$QqIh&d5-$$p}OfK9ti(tijPX{pQc(_I2Hv0_pUl~omMN@~o$mtUUd z7+wtHM%xIR92OcRw0P=5X|2W`h#^dc39dlDox%KoVh&vYqCp{)V6!+fsdK@@Z5-7e zj$_R!ma6vNr)4650yD>To<3ETz*Uta$$uh*c}_ol<3zRghwIL zCuaeBZRnns6sK1)1bONuq)d@C^c04$JrltlQ12}6OuLZ0ha9{ z7}k@pDQ?@ZRmU`zWl^|9wl{>%gmlJh&plsz;Cgx4HW(@XO;tYp<5PPJu#s~f*{lKO zQa!7V0%{;dLl}6j;mQ9xwo?Xpq72)FqEbJrDuJ{!y#ven4Z=>PvZb(HV+?~-R+jew z_)b}j(?O~0bH62cdNP%`e8B(`xWg}hS`NlGwMPiO$G&Z>mlZEN&M3V<2;qvzRr&gq zrTb|d5$bc^43X+=j}WS@a4waL?B!yTg5=7WWNPi?s5fShC4o(hP6cQa{i{J^=vl0uz9i6jcfyhw)@r5Ol z8`RY&L{Gl!@X+G+=v`8BRNoCMHC{3?xY#O^rtkvreDR4)BvHK#3OR{)04OQ-Dgk8; z<@7|;DWu`ZF0mO!#Xg>f^7)SZaP&8 z)Wlxa;Y2Xl!wHDHatU!LotDXk(r4?Q|xj-zUAi2Z9Os%6$+% z0z?!*O;(k?QUEr_z&awG{d8E`tkgdBS$pnc!sYOb;$FdZv$wNw2+PsF-%VgLFFX^* z1uI4~WZDO5>EfM>Yqx4C!Vt~QcCF^g*y2;KDD#cbMF^JiT|TyhgF8d*JJUk*<+|<0 zHKlFZ56?VH^>16e6>JJ%{G`B_R>{Pfr{u1EUJg%^ngy*+KV5>9f!e;4$+l`PGTgPz z-vltUf0HfiV_#NR#?Vh~+^)Va?+Y-<HJ{E1G~jK zkz^Ugm+n)HA=_;S+}XK2Kh5*FE+HP;5SqZno&} )6E{pxaT+j+Z5ltpYCNDrqm zV!ln-=)AD+QJVSEbZy^e^=)^i*~-z{sG)LaF{ztuL~5}@KM!DpYWk%8QsEV@MB4(^ z*`eJw-SKRS710hcZFfd@&|=U!j&uFhtqfr=(AemI4?u$k>BG>EXOzGQJ(b9-SA zFg(H3i+g1ZMVC{+>579i8lw&RTlQ+0v|_Q@k8<28uNp6DbL|n-i@(3DV_+gjm!3zT zw7=nyC^j4`syILmf0-!-Omy617s*~bO8444B?BR|`^`j=U>aLUXL&?RJRE37v3w+= zy%0Zfs*+dLRCEX2K85vO!kq57Q7LC6F@Ikxuf1xrs91H#wUX+N-70A;luer~aOp2` zoope9Zxts7B6+CKbOCOV5Lf(qk{3)&Fd+(!`S}gngtu+!u*e>sn`!nwQER|;V()HHa8SLj=HDNgA;H#HC&7Ddc>UDVGM$YQ8+JVGFLbT?+z zi&a7vuI)vuynLgvY|C=`Ujg4^3QY{s)Ki^leqKCg%lT7~E`&?aveWiu_w6UY}YB#ad^+(r+ZO_yar5YLxqUkD8{&r#V$U)aY<#DV5a(8b;%X7&WS zVH{Kft)zs>GC}g~NvziqzaqyWrjDY|EUrzgHqABfC(ot_2^NmMUw;1#(se+La>mYS z07ig@JLBr5a-+RzTfr4M%u=|D3K}N&dEKs>8Iw|$77ToPkA@}Bp+x5 zPsrL3PsJ%?%Erg=PD&2UVBeP+twb^fn_;_!ZLjdR?YhvdyqTvTQ1j#LXEu-K+Io4# zwhS|0x-CF28s#XjB^{vJIVW753on+Z;z;&LR;h$|*L>|-Pv4Opk$+NE3M!HLO@v7)T5;B=m107BRQ~eS_y$j{n-WNR^)*dLq49>Qric`q?!53$ zc_O|KkVPRD);+1=Lse>dXe-ARev}M89l!kqq-tg;{lvAb-D6}GW`I_0K}38GLzHAV z0`9gL9sX1FKlWOop04o#A(bKTF7(ZEE#!D~Od=nl_O!GJ3w#v(pAf^vGI39)Y9StP zqE!3foqb}~&G*W;>@Nxza_fbRGdbCqHNqOJxILU~QN}o3g|bRm_nGHJrpD?2pcaKG zB639z1&H;M;=HXL0QmG>BmSKlqZrF2v3_fWo+YsL>k5mCaVxl2JIdha*qU>}swLun zLNq{y_O_2Ybuq(*Q<)8_ch}(2Fd3d)cPIoNZPT2lTRZ;JF3m;zlZ>zaAX(9 zeGFPxzSsFw^JZ#|<*?fG9V5t)$kA0!x}+U%FTgMW9@o~za%J%(a|XpG2PUu@ zIe)mD@Kx-II8?-cZOD~h05#Ix|2;BX+yo&00v{pn3YK3mjXg@doIyI9&=f~bqHuGTl3fR3Cz0Og

=mTgC?eHdrvDDyb`@jK^>Pm&4)PDO!*uKL}eRtk) z&Fyr}*^xXW#h1=}6=b@<9M_GER#M4^0q4luy{Z9}*d`XZ{>BQq_=|L%oppA(=QS*V zq0->1n{b$7`J`d5H4}l_N%Ni=TRtjM2OPQ3ZPw5stN`qpR#GyhZWy3%_6Drvi91rS z-^2vL(`%=B;=31P2Peg~6P&aZ@RI{i*vxs$7iv=T{d7^)4KP8@Lz1xEF}@JJw>VkZ z{9aj3G^Wke2w6FfU-bNX*tZW|p+k$Auy}Ne!3`tIOB-4d`NR%>6X< zu;De-b^N7%GMOoIJ2=^kg2~NOpR{SPgsB-uV^A{rv|Fn!-sBqM^#_;z+t~i6;e71K z{%#(I2CG)9KX3!?Ukom)W!X7s?po@~mu2wq{;3aq=Oi-on0A6j7?(3KH~-2q4G<+X zoEP&xh@bfC8b;9y5B;%sa_r?4s6rW%NW$f~Ar#FJQ1k=j* zd_-$>67(g<+wX**bLrA0e~vL)Li3igC0iX#A6K-{J$!lol^(LT9KL*^-1!IpUk}1DK3P@%g}jdOy@l8Gwisieo1oPp$GUwY&yX^W)Y*fm(VeTwqXs z^FATWdICzrT!8O*y_qg`jxj-^hy-BPJ$S`GI6a-CR>aKWxT2j>(x}%r3Po9p7-8fi zq-m_)9sgQRI_A@&O9z!44gWK7yP&3=x^d0NQ8`ISCN89YtehE)f^*`GTqqvrRniU=U<-xL?Xk&)1C#^M^?-~1Hc;N!e?-q zN-_TpJUs6Xe&IV|r)QhN06C^>a_y*?MTU%E?{Ik64Buxj+Fp+K-{3_}n}i6XxV|2AlWZshrl zro{K83KZ1;Ob$7G&{@23j;8#wR!r55SZn28ChGsEpxUo$+bF!Q*-QS`o=Zi zez3i?t7jpOQM`b4SCdp2ABd#EM?{Rke2{*6c^6)u6{^0!4`EPwl z_wM`O+qqOIhn-nU#d;9Z5{BYQuE8u>QA}rz%zDkrkEm8s%bx~(h}Ln?-(_S@=Rb8R zc>Es*Z^fNXt)K7R!dcv&R+JYCn*0&_A%VS>fK!31tZBUhC@{-UVY54e5(hdc^!nZ-_v_rH-1{qOv>j)Av)cXc*){ko(Xj~oi-*o0l|h4V+gfJh+J zA|bDj2A}z|71*|j;il|Fv@uI*Rt)*9)XL_1%2(Ef1EZNpCEPN&bD9R%_^s|uR zC8a<#AsEmAH(;ri^uLdtc-8pRH@)SXg86KHQWv1HWxSX@U8jH96Gr*j=)Z%np#quHGX$y8VB; zPx=2F7Gp1XbOD1{M@xfb7>+b2$J{!yq7e6=4u z`CWf4CWRhHA6T&weCIoVpe-SU1Sda3_LB`hD@ujAUb^4@r|T>oTMTh}$+%tj^6S2q zmelV=^aY9)rOeyMq;ro>{5$ihBj-|w-U4^6JjLhBK~68TLnJ!~6H7Pa((1~*uT0en z!F=iQiilmC-Jr4EZEp1?enbx2L;Sr~%~CqWBl2g?EiMTbKv?B0V;mMu=u3p$RS=gG z%q(FvO;4m7ba7>UZFP2|G&k29@%X^_c5;xVvPp5EKG8n1zGcK@d?%qRnDA*5I4c3q z3?hcUg4UA0lvgf?=zt};o{$OKls-c5VJ zeV9m{BkLCT^_8Ry?N;4)|I)Yh*K4bh&zhxa$yN$|(hAX-_oW?)eZe!GRvT8Nh&7o< z1h^+%S!iq$58Uu+m#${9hTu;x1qD?=81b)&2t=Q+8`UqvxOZ zKfM-?1b4=3DXoAmu(D7I_TA3!)HG{P7gc?D(Q5T%coQt^gqa3w$a%IMfgj~+sv4l6lnF+1 zK>75;h95j!TV3>l8*Y#m+df$<3xOOW78f%ZZ`#+5Z*I5}A<+1%!P|cNti;0NiBiZr zR1BpeNQ>rRdFi#(%w{H0cElp6{`AKBTCK9I_r5SAuM-CtumR$28YgE&W&sxX1B_Qi zrFIm1X@&svF_c|M9=!fze~p0_;udZe~cpXNKU!CHuV zw`0mWVy`GYydMr$R_6wz<1?-=UPzI&qHjLcAUJVGfdbN84}jJML8ry551cM?C#m}i zDNg^5ndlKkf!J?I59lv`>N`Ggj0FIUj5Bh5#RX<^J7a3~q@Gw6j3H@hnz-xMJMLzhH6!S;sIkw0*xyCzqZAgRY>ZA# zJVXtranjk(7@-Qmi(UjPiildp|6AwDNg=QenVg=%&STd3vNW3sy2m?dM2bxK>3WG8 z7XLG@P%MP`Dx4wzRhz;oZLy_Y3tg2@uVG0l?)t5CCMm~96-PjiYQ~&km<-;@N*~fWW>0A zSuH+54>T@PmrqPQlKTmdhO_4L0Om_t#eW(+Twz%L6ZgU|zxfN_&6$xT^%)rQcnF{m z+-m>e)$y^b{s{0a*T=?u3m4}U%C_5yc(qg`+LaWYmShtVmYq`-S5Uio|9+sYZV6*! zP=FZ2*x0njP$LYe4PYX+d9>+D-rlgIZ3;bdULFm8?OLmxp`o(>(3cffH7k{O{f(P& zMm$=rg-O!Aeg3`<0IaZpb`f`9j>+p&K1S zF+<6l2jB;{LrG%?wvi?uu+j8xYXGIGBrLZ^3*sLcJcRAjCDj;=n&DFcg2Qg{-#z}t z;M2iZzPVQ?Ci#E$@-+(2(r4M_a6f4%YlIfo!@U{3dh+U|G+X6__J<>ytzx#3j$nb} zX2zkS_Dyq#?|8ozdaD@zfv>)CJ#cRrX4NHMf4F**!#9%no@wq{4;qaY9tQnD6_XA~ z39Q++<-6blD`N%gngeI1%{A(a3Rf5gkLwJ4Nnc;<*o4)y7v z7Gm?UP}L5%m`8V8XKIy(-m&mPi5}>!cUX{I{<@CCG?Kj_MgzP!DF+Vzo&K=?-3RpG zd(Zkp&017Q0*-G%JuQrQalQ>g%)!wL!F1AU9>$p?@z%QK%Y}7{(XQh_O~Ww!p$Y7= z!kGlS+N7sR_;QA=82nZ*yM+fy$)t`5U?u}mmR0fT>|_l^uMI;tq*HG4v@S88Gaem+ z%Dw_jXXIW#jLan*Z~h^|AI(%1v)+lY)3>kqX9$K|3mSUTzLY!ov@QE!7LAX1PF#j& zqxkaS;!w^TKOrC86n-iP23_b6wH#E7{!;#VcvId!|H>t;hIsj(w@5U*%CwSqh~xcB z>kBxDv;INT_x>A9oWp*cj6H9m;J=2~BYCvw#gGVEIx8yo{Z!AsygI9$g)_*iH+(XN zHp3H*1ANlCC#)9%?VCPC%fk7eA6$O$ivJmxFP0lCtYwKYJ)A?_K8}?_%F(+Li5m7S zv5VO4RqJ1-1zR+XWxNNnNM8fP|H{|K1|i4jTRc5A>0cqq70wJte#G~pDc3Ls=Ffy8 zf>@b9Wxf)KQdlpd@T=21OE^7qv`G3fCI1>F6#mDGqKy_S>ZMY|);-8bbXW~7f>W;c zsrU%=MMC4pq^@VSP@|CUrM<8p%*TH@csndM#5%4Fi+qQ@U{o;R zl|$KgkHf`5p1`bQ`$QbilDj~VwkLpL|taNebtg zw#UlokCMAq;fQn-6+4j(Od$#@^MwWR_l3UqG45GZ#kUMJI)_*9z~wmVhhN(}IgI2E zl@bu*nv>u|wMe;?2hJphUs|!-&4ZkX18+^!dl_zjZHVUK|M|!Y`pHMe))F=IBeUO( za0{Aj2qepKAw;`y%9Tqe-29IA9QnBth0ZS4A88ee!~AuxonsQ78;Lb zvzVaJl!z%)YsHOek;Q}Xeu1WZq7fapif1DkAv{l81$&$?0~WS`ox|Ytb3OyE3S>t! zz)CmVA9Bz=bKBkRO#feB!yVyBun?L@0p25?QHt;~%LhXs0GQr%(G%x)7I> z`<&Q+T%v%I(o!N9pYa#F;SzCQ((oc#Miz6S>t27;XJ&?hwsd`=vhE|YsN~@55?%Sw-NL)cKG?=63I$BuIBvg6cXt>b^LHR9 z-QBysHrRqC*(1apwLO@Nn~p*F7sIuC!rTRzZZt{m?6@b6CY;_9^Y^L06gGlY+xsK1 zLnHdYdK~q75%BrW_pkVY0>I{yX&aU-&)Fs+fl%xi>>__07WQR5U;zjt_ztKmpo2%} zzxGF)hmjhdf&@H_Slskf%9C1HK9SH?tOs!{gn8{KIiFw~T1^*PYYJmv`Y#$d1a3a0I#> z+n@MYAAd0Tm2cNZd<5U29rp`1E}LW%^hwpsvp{5HhsZ7T!hgGy8Yy80kOL4Fz$kJ3 z)g7W6+J}|KZE3$=pU`_OFVnQLml+rKhu`}j{6~i$ddK6~x52|d_ya$27`0~v)l#io z^o{x4PdgC?7KX3{)gBuApyiUXC`nnlqOay!3jNj z*3t=K?p@SvJkpRa9Qlx73bFKVoFI$VNKs;Zn74~bK#8-<9&}^`*In9J0pG3ZvFXGr zX87Aqdxgu^!HbQUw1%tBIi!2WvSB8o`B8( zbLkRQ-D|N$DMeT38XTr5{o6;)RD#;PBlgL`!x#GPNYt0`Rj-h>r|?hvh!DMzQu=;z z+u;TIQc}%+!+%pp?eus3ty>2PbV{P?sB;Q(7=nxl-J%Mw-_X2hHPt_ez)fW%1sZl> zDnp}b`HTS_KQDIC>go6=5mTGamyj7(Mkii;dvSJsL6TDG??^^D`cQusNa5Ykf!C)~ zrEO^qn@O$<9|Z8<v?ZwRKY!+xVJTi#p zX1sym!IpVX{>{cIsB{0JbIcL<9UDCSqd%0~E@9sEo8dNHdig2eziOxA8%rQ8VQqI0 zaHGZD&h_@T?01V|V1LHtN1c7bf>`{Ff4}4o`p^f5hti3Pz#rnxYM?Xo{vrDBDw{J0 zwgLdX5A@2@!BE9Ek5;`H;4RnP_u>Eh9;QvSENQZz zC$3+WQZP;ovjCHM3H^knDv~pQK;P6+?rJ#CeIl#NiPf-dXr@?(9|J^rMiJDAEK|~; zQ-O~ADZ1`s1@ppNz1uQcp|mk#Ssq_`_UiY0r2XCC=URVSo5SE5fh(eJE*cLWynf0544D2m`i>P^ z6B*QB05mt^5np3+Y(6sxVRQQf{A#QqeGwzX5E=T5)wS8$T5Z;~^Ubyd1D1o|4u0w* z0&g7Q8Gt}(`;RI8<{k?V$Q-vIi)Z!PdfV;xiEWAAVL(>-0Bi8&>USlaAFc=7P`=2( z3GQdu74d9xZN2{`3>%|ZH|PniT7f;M^exzl0|`yD9~SeJ2$?QFc-05IjwGE}y50Yx zz^+HLY!f#oeOx`ON5I3mI(6)_s+CPF_X3xbJtuQ@)GQrLzdZphq@ikm?-wdwu zWX}}eIG^B&xV=hmKy0HzDarjkd;MdQ(PFAwa0dQsL#!(uX_lHk8-n~fBsYZt<{)WGNz zYsjKtBO*|U*Ax{bKUzuhS{ETluXTt5jU@aF6%C6fDZoT@#@CIpUh8mCE3QdVXcvn{ zU}ozqdAJn@M0r?J0c&8kk*6%TduPWTy76cMSJfRmtT3q4HN@lMrE3UJUV7?^20Yt2 zJ7_6@ZiRrLkXhU!@h&A#KdSMM4dF05u#B05wdBAIO9=RO0|+DCF?KX zh-yq0JPmmU)r^nj2aKcr5&ssJ0j&Y_Q~F_FUE$l{p}OHf-{0$mp)oc#o}#R^nOl0h z-JJ1ujNmWMKiB!WXr3y?u~4tOM!E|c#vcP@*68=O>KJM%o5ih6CBA={Gy!)(+>KCt zZ(II21-^-d5Bou*)?K`6_pBTCS^dl zLiW-vZ$QONQfvv^VK`IT6*IzRfznllyPg+$>-=);84-abtGES0yThg|ExFi1wrWgm zaM`J&F*XWV_hiV;n~9p*o39d3gXq9dVgZ41d#7gt{=gDtakNSB0JXt=r7IV}Z)nAW zz59hfvFuQ~#J=-Hv@JF^;~Op*tIL6dE)CHv0H;yRSVuS#Bu5EVJTV^k^MfZZKjRPD z?xu|IZi~pf?<(i%nu`tqGUK!nfD|5wc7(h^k^E-xj(-n$dn!7(;E}Pkw#H|VLXy$I zS|!j@1=Z@WM3Cr!iR4JOnbTc=3zkC~_-t)vBOK6DbmH^F8+O?NK5#Emo)$r|{TC_B z)KswinD3tAFiX%g`a?F`i8P1RO4YQ`2}m^Rg5dQvEX;c~wmET2b(4%<3A z^Em%$!tfWQjJ73NEw|v7-*=wzL6OO6ZE2k&vlSPR8AUZw{9zOXk&0@}&_cXR8B}Xg;^39tr?6wC8i;ei%%T-EXBsHHji+f(AenhZk0ukumlZ~1SKhZ>@zGl$*0R>6y1HDith^+Fru08( z7Cc#3jM!Zs5kVqDlB;Vexu+#q?_=uxdO~WbAc{yfg*H7 z3=WdJcEJy#Dz-YHO}jevNOOa^08%Mq(U^%{w?POAm6$`>&29Ty)&@kClKQfS2!?Nw zA!9Lopk}J;sCGK^kJ30SBUDH#;>1~;B4g*tWJ|!Xp8NY+DBARq3h*;Q{mvadnQuXS zbHGin=j{%e0cdYsE#hq-^UBVT>fpiM-O zsMQVCLcPj}WZ$l=1aCfVQ?5!xxMdEH&A1_pmbQVy)LyW)LfHtzZ|@R%?lW{fV&)*&XW?r(nO#Zo{)Gf6rHR{f`quc=g#XYvgb>{)hWh=D z|Dpm9a0;6n7DueT6e^GVmj(}C3Jfp4!p54m7@MVSIUN2sGz)6F+uqO0iP#GH-O$wh zk+miO;6iNhpwcne|6;CK((Jv4Ff-?NUE?f?!;$}T=mUp&H_M2Atw5Ld^Y%{ ztJ(YMG~H}&YTqDdC0muE*$d1?vxVS0aSZLZZfS6qBrQ0bT%~Ln)i%n<_qgy4pxYuj zMsTp$$C56tHH*elPm}#bZjK(#vWcA5x=zs>OB&nAMj#0xKgLj?p@XO7qx$)jMAcqA zj98?>b5T9M(OkwbrTR}CL+mGgyG~}X*OrhmWR!{dFTNEG2A2w!sp7%l@?R1St5jUh ziS9?S8|hTNq(P~HfPN$)*cX2Sm#CN>*IDK&heBV4+n)jX=nXp6I+Wgldf7rspI9_a zm?{ioLSOuqzXSeu8K=-Inp)STV~AeRQj(p$CXIc}O4SZkhn#h>%oZmpr!g`jjb`d4 zSA{>oBB4Hj2d}<S$xQAGQsQDz{@}6SDPmf@y#V zu-rFeiE<);J?ov*_ZV#mk=0)LoS9yMs zPI0@eRa26AV)}+wvvv=d*a%Fp)=WWe&ORGT)X8f2whLa*my`(CNfK%slT`xi8o;5+WO@@5QBzvz3Vtr7TI!{W zf{cUDvCI7garMzXJ_XaSg{@v)WYKzsv!vMU2&(psKyO#{>zIl6!Hb4zM0Uv*l1y!m^o}hur0#c3m!{v2ADQRF7_Ac@ zC->^>Xn^+fpXqzESn1%Kd=`W=`qp6Hb&yew>vtC3?j=3|<8C&y-ye=^4;o&p$}uWb zeA@p*Yqel@oxXBRuH1h*<|+{uhMR}VSRV>F_DJHeqVEnq(qlKx1~MKK$zNp`xA`Jg z?=vW&d7MQTehy+MfwRaD5CLGp{qs~=^*1poeRyjH-=U+9gX-j?{t;>v;Y`*NklL}% zd1-;?OW#C|oo3Ny_31k6apZi23`S5{RQ>pbk8QpHurp^Lj&F{nE{av-mho5~B|n@? zrA=WE?o|?ufc{Pdh5pGYSl$|rS_79i5#gVU46%BLlX{Cf3tHEy<074 z+DRf0!QhE39ScQ44NO)b+jf0x0u$>3K(Uo}j#vR=#l7I$0FJNQP0_U(f(NY=GWiF3 zkAbT$io}r|UvVbiNw|HyTSyrsR@RgSnrvM0UKA7dsW8&nv}Q>w|16m@PLM1+5ed54A zfR&BqncAAqpmKDS?7`k1{t^4@yH90NJ7U0vZQ^4+6$XcF#{d*WQAkwqQS2{VXh~*S zung7fPe9D=&h>Yh%biZ?kPlanvtTWjiO*t)X7xu|p*sAT{!7w6S>bXpvMYkkYX+xE z*Q+}*;m3b@{#6Xn?g|LRH9#CA7gR_U{qN01P4m79j+Z6$YkMPQmZ(AwV1%)YW-H2B z9sQ{mp-qo<_goNZ6=`Tl=Io2JR+-aQY&CZRxCWTd*X3puqA-XR|7K-2and(hC;b4s z)5JXmX91`a8coL=vkLovD~0N3C^*BJ31=Tt^b8fo!<1)^3H8?C9T!z2XWlNIIeb{P zm7xB&%Y@KK2~tQptDV!$&RMJ-EH~}_5PkC%xUN%71diBr)pgr2(NK3Afn+RrGWa2f zc5tvJd@Jy6J5o|m@!;AfE(0qLBSzqx1%h&VGW$u~s@R`8cg0H9)Jwe=cD3GMnHY=d zCKwR6ImgjRT9Or1Zl6gvv;EZD=QQLCO^ZuMD^xQqQK%7Kofe3^ z7#k=-WNVSm?Pvb!G^Nit1*g0v&j(-nC@A@b10j0GtVY=)v8~NJnIi=^4Fizl9nW0e@TVB7LyF=S zpWK(G4S7BZTYYJzY;0vHz0^kiVZWs93^c!wI{(x3g8m@E3Mc41U*=$=S|MW zQhSt>28+FJlPGcAX#~Z7s-_fo``Ww}9Jl*DZd(@~?np9-w!(4ThSUTPHIEW_+)4pD zPJ@M8b`>)-&jn+~pd{wS{Jp-6>NcI}a%OoXSG7mmp(zQjfAeNKN!wHwYM0@T(gT;t z;)6tX`B;Yi@KBwp2?vX9n!Ei6TI|_MJ?K#lY){`-y<(*mM&hYmAwI^j>>66FTss^{ zd|H1vmub57Pdf)YY01qsF=|p@g1Zn7XgYMCw1QlJ60s!0y#7%*RuW$gSu%ak>i#%2 znn9&rjV*jwudRZ%(WjxCv(CM`QaMM3N;F@NL_$FM=reebXS;+oj#Bz|c?%EN7<>HL zW?xvaFFr%18fs=FdSKYDRtgZ}VAy3SFxZ9YL=CS+bpD#6#Uji}OD2|1(MJuR?dw)o5G-hUZ&F_FUNzODQ z2#KQ$;zZX!vIqoBJ2Y{9T5lsd*W|)eFr8L`xI*oIbiAE!MPnzh`&2@+XH@_A@Ovoe z+%Akz$zI49ua0XKPdO;mXtXn$P%(4G>XTKmGB$O?mz0~u8pr#OW|lVvajL8@uC7#; z6vjYut&6kSX)#Izcyv-oZ-z(cD3Ixlr!z+w0{0|-bRqj4m_~{G1R+lqtA$d&2uL(C z@N^b6%L-M6QmNf;cpwX9XR+YY^nw3P z%(iB<@E}Z7H#csE79@wX;&MUpfU)G$Sxen${BfyNQ`*4h7lVd@x4bnqMd@A*6B&-UN92fxr7ArvLE z?~7YsiWI5&!zp(|$@N}bX^q+odIluG7>wEIoNe86Nu(`gRU1o}8|VkwSbe*JAebkr zzoEB9F^7G5A-OFOXa5<18ZpVN5Up~8{G!(K2imRT+;hAV*o@4k@$u_)SC06(HfeJQ>meipp2vuz5|%%&y^9+$kS_iki%h=GA%2wP@;t)}H@-7|k>gfa;f z{g(Xg0zL;yT#PzbuVipd;rVcJxjq|w>T?Isbr_-jee#6*mR;oGI#34RBt7r@{`8NZ z(OWGnSMp7!ptUbUwXTrd<4528#rfy{V%B*k85ZTbPn*i6L__&bQX~!Vq-2Anv_M%# zEej{ms&k1psFJY|qyFE6clWnDAA#zAXT1(jq9yZ4f3fI!Q6SM7YyGx^1ua>!lpd zkIUiw&~<6)zQVWnECBBBE1*vGWE(uRR5SYB=Kxv^cv$BQCQW}f)(FMxfXcXGq~MO= zD8mZ!@BJ5!Nvg)5Xc>Eu70|sog!)5?)ZZ;$5bQ|U12Z+LIvuSvf3-B|q z`(Cq0=J5>;Wa*{3<^j^>3-kCL;!nTU8j_EukwnpQ`Uh@tPRr@_`O~~-I=NaP`MIDYQnS+cITK(Yu9hv6=lI9RI5{=x zZ{VyPj#y8fyCm47qMVYkbc`+L#OpMe%4RAIWHUm2 zVrn4Fvn>z7BaUf5`_Wt;XXVbOks)XY~=K+hmFKAAKA@jnk9zWTHdsa+n_ zqGKmWBF+4eQ$y|H*MoO`l!#YjaK_u7BCkuNk#p>Hf%PMZWrLv`^o!X zL@N!$=PujC?X%|00=W*SF&*m8V@_pyh zr2rb8injWw74#`h+D2hG%Ff7#Yr-Md69T7X8Ya8Uh=Ts&D20kmH z&~QYN2-6(?*~UouKW*tta6{$|0L&DY;mcGnH}&#ilSoUPYp@0b8vMc+@xe1s`aDT< z@O|wGQvE1|{D-eUc;!bg`?sFTG4Rd4(B6byfhYvU#dY!iF4EP)-&CXcL($)50@eN- zZuqWvPGAH1V(`dEWKro&1syl-lo@+37+`-g5OjXJTrTU4l#`N@6Uvjg#&fhQpeb2{jSvu}%|MbN52^|(f5Lj$7 ze|K8;f4=*RplGCbD?|2t?IajtO0RT+C$EJG$vW;rfUD1+U{{d*9V> z3A|Z3k^bT)$N=zQniBkuO2%QPTM|_xY1C$YM_CJK`+V|aH>O3IQd*}#OEQErB^5^R zzfLobgdrvYL-}ClI0BaWh-5~Ez75KqV9r(gDYnAN>$?HPR-B!So> zmn1GeyqnUSFO+v@C;SSFCSZJ@awhZ*2-tv59kMfesE;0zw$>wlb_fr0a_q$V-|TiEYG|RkLwk~M3uhEJ%@^97+w=GJ+|t=l ztJu1?2bj$?wr>^X2G*gn(~TI2o&R?VpmOBDKEI`Qq&>JxK^&MeiDB*Xlcqn{Pq}cr zdCd+iVN8AI2mRccX|-A~ z%r`NLHICU@f)`Z@F8NLyW?3JDGuj1*rL!pS**unIBvrUseiGph3v6?|OS~hC_eDqU zq=ewL2G5_|()kM@WRwCCO)xSRLPwe`rUrEO+Fk;a)K$_kJFdEw&dCY}m2Y0XGV4_8dB@lJwJk!i{aYD2E6>rA8Pvy8=z$H z<(Hq-8#(;Bu$sDoFZuUV&0)EHkXl9#$Ms4n7^yXa)mty+r-A-L+W3J~B%HX67mI%0M?qpx#zITWnSa>|d*| z)#eso)Vs~2DJnUsMe~=;+|4*qT#c-7CZ@8NO@}4i1&!6Z42?#0x~>+fx1LBw;TsCfSQUp>H>>Q%=JE?J zH0HLMf^bo_1`E(!idq63zd!$CO4#{%2lug<^ngFHQTO)E7&6U9FUGMMrY5!a1x ze*{g=*HS9TGQ~=5F5KS#=8%NH8T`%fgAx2K+y}MDHIM-{HsjQjkDL=%ukQc}tP}^Y z9q;{40;*6TTN>3zifQ5_WpU{1=KE*uFGo0Gt>oPRSXwEri(>$N&46ov>v#U~m;CTb zvZXB1PiENPickLY!Kd13PA&j{F<7k=(B@Dc#+76gszGyGBA^t3Xi%*qOdqQk^p~$I ze~iD^H>i=s&cahE2tZAiFFUfOOB+;if{zo-F>QLzDvrgL6FdZQ+^sP*WUv} zi8GQC=jcgm6s1V@u#pwt^W7YqzN)h{gbxs%?4>#dq&s@6ZuWy(WgAuKv%}CxMX>dg z?i`E`mj0Cn$Jm7vORu7$93+TQ>4#O0zQH_tJFaER<43*WUAl}&h)F386W+d`Ez`KZ za@-925=rw6;Kr3iUzGjJTHr1ue&F98Jp9&WKeTYYfK`sP-8T6)=A_gPgat;wg>oC& z3Shagj@p2I195qI83N0vQXFj_Afpy?;Nm;KfuJ}26@WUtluP$mlDOF;JjMkH2a!GD?58!EuheH3G)pdOww*@QLTyY> zq-(cQZAgC}cM64Nc(sRzT|H?CdBm{LX~X!9E3#t7n=i@h55DVyM{(<0&@{d2)4@)p zb7!*q*We6?Ff4Qdn2ui?#zS(lgb&gdNC={&&)}^1NJGN`7;RFgX1{S|`%FtIDW7m*;Mm7a)a~4p$|uI* zx$gL~sXxkTWGVf8@Q!*p;)`JPx5KBX*Q&3&#q%%PO2aqw#$<}J8Lm8?Y#^si&Z@+M zYJ{LAruZ+g4o9tM?J{^sBQdQm5t?B;5iJrfNB8W6Xvc_xA&%(F+umu0F9kFJgE?`$ z;lUpdSsRkBg}g(S1Larg-lvOIQnU4}MHX7!`w}%{#>D#VaeKLDK=HnP{)I~ccjnK{ zDwI-O(yo$Nr7&9u&ZX49Y)N_u|5B|3VfcjKjNFJbDB!@Gy^E;w>#{s&d<%O3^3u>_tyMWzR#~T2$gFK=3$)u12+M)txz#)gMfEa~xeDyLo0j8Y3j(|;sy3jj17^~r0~J~^ue zPqyMgn<$J~ZvU5S+K0dIS^jJPjmF++EVu-+T-=zNvK11BnMB&Lf5Hy9yz$`{xa>H! zGpjGU&-8qFDu4$r$*p^>eB-3Zg-++b$k!K90%sX_a6n-uoJVwPuDlN^3(&b7@Q{N5d4 z@o%_#{H>Q}=ZXd8RIxJ7Vehtwx}v5|H9%MfRyNs2+^2&J7_f5t}>adJYls4(i zkX}BUOHV!ZjE@eei@BlxK2)a_RD=anhhjwCF|BNn4)N)V#tJf4UAis?*cL+H#DYwy zg7iO@p}!mq?@RvY%MZR6t%%A6{B~KoU&}!o1_~{#$C~*dooYo(c0{u{-3kb-0rpOk z?_<5{NM;be47(V>oi75bY^Iw(-y+>)4U-0C84f`>=nUA+#?7g3(2kS5txs&36XU;Q z?uAO%M~j(6D9=yA*!9E-NJc%`6~mh>2L*e5yzZlTYvsaw^*2$YM~rGKMWim8v@7hm z)Wm)YlZlIyjnl%_ypMnu2I>G^m~8Dw=l|d**G^jE%^>q=)sUgWtMkHgdzCD_wLopSNvPFkV!Twv{#u^Y*-9ZiYQ|3-Z=j_ zVmQ~YrrODc)eV;vqvF(2M1R6eqMI%6e_S zw&sgsshdzE>wp0KyC7FAe~ak*O9l@w{gJ+QyOpw3i-)B^1+OC=V302a*8+(6_&=W~ z%s^pGlZL|Ri3^UO5$)ddpr_4z0IrY{JO z^8;)y2mk&;ddAKe_0QT9>+Eg<+zkCfjH^`0>N8-4heo{K{pkGD^G7j~k7!l%YmJ1A z7D>gC^{u^FtH#HFcK*}z4}6SB8rNNpD`AJg#fesvhBjU0@JG(6;Qt=`5A7WM$LlkGLE^_25zxh<0Rbh;cU)go_Jhs3wywUOU{R zb~;)6)OpKhSY26q$v32rg^ME6ZC)KlBYY6>TKcLHA ze%3eCgnX>&oUE->d>t?ZC~4=Q`>$#LN)t%`sY(K^6L}QNA|8Z#K)I*v#E`y?GB}Bs zF$E}JwH_P%;Ey5&Yazpw|5)v&+1<35faJv3lt|NLf4#ZQ0YW&n4Yh>Oy-*%;b&O42 zcT@Vg*L{0f>^|gM#O6f_UI^~)W9u|Cfi5?Jab@@l)5d2zYA~&*8dsY(wAZ5buzs8a zxt{E%jbKbW>ZJVxNax~LuP*x*81%avtJ*jzWNmSKU@{X92psNN?z^#J&}-ig3T=(~ zBK4nGHl7E6;cTu^gEnBqC0VAAxO&IVaFwcGl^1dNdvJ)Gn?&M#os83A%GJNc7HN0L zT5D}?QiKq9nz7ti9kjkp`vL9Mg$>F!_)h-#d-~`TPo#7K$0eI!=p#DDXM@+!XU4*j zLWEaujF0)i&bWL=0tZyqv{Uoqb@&JoZ{H>4h+7MTg*Z!5((SGOOUbwygc<)jj7p_@U2GL=?fOdN4pJY`|yfyGe1Xtj*IBF&3 zY1r?`xWJ}mX=d$3y|Zt38tTJ$z)F}CtMSD2Ro{1m%XiFsNJcT0r9_81hx!b(Uz<=t zClNg+JG$qq@c4uC07m0pHtz@BGC6fb!gsDwDp%6Hfd!BpibNvt1n4}qQq?^o;O3b4 z1I$%B+D?1DW*vH@W^V6wVr%d)?!*~Vi$eefBW%U5C`GafGz^G=hbG&g(S4L&Peyk@TT& zG8NfRJ*79&kSjTvR3g4N_)zm`9~o>TX-xyZwtdI3T~8?5C1>tiOl5mewBk=66S(pj z8&uJHtYV?`lQja;G1yEj$+pSGn#(fC$c66Y&jr+`C9rIFT;iZ1-uDA+iRh!yN?ZsB z*62}R>_HwIr4|?kQm=79*5=K`4sRmulR@fIt%-uTr_ZQQ6{KhwL{A>QJtzlulxT1Ba zkUOP}m&lRU>Uk{nR&;bAfc_4t#&^E?_k$?u-!&4$q=Z17gc-2hiUNPY*p|dstPJ0c z6Et+1d_B9FZmTrTP>Q55)qLcir1)F|M_(B49QY33uw@Qk&LIOB;!^CT>-oe4U*gSZ zsLVU?;cWqK>Xjx(c8_YstaLQR`-q^MUsXI4eW>0%J4*Ktx}IjeXU?fX0Lg27zbb$J z!u5vvAT$#p8RfIw>;}0brr1IAP+Fr#=DWOC0KUpbRgbb54tG-==E?Ev+Etv@ktVHV zWUUU#)fKXwoR3|jRuVXPFl?qZV$7_{dd3V&PEh4!2)Ruq9{8hV@zl(xHt(;+Oi2{a zTW4E#ZsOSepdEcF0e1LCW0p0d0{SwL!szYBB)YAL*jVucR-!NL;>w&q0kE@@zNU?# zeGf2x!3!&rjQVT@5&!k8sr6q~HDi?aOBQx)3fh&{kZnIE1x2xsgGQCT6ttMxXe_Qk z{^lBmo0lMMzJd?{gCghy7@=Xe8R%10s0J1X^XeZh)Z}q}3?AMmLDNomvc(}YUiIqq zjcLun0v%YhNfxbeA2L2{!}O5Z5+~P6Y(u~p_dKSE!Je>P2^9FIGzB9Eb3LsVu8VVX zl3nayM95_Z`z3)6xp|L_-caYdYD8W+iE|5`6ZbNq~~T~u7WfHiAj=g>3-hr%R@mZ z{Q0p@X2zvy63DS;fhLcB#(^$PkLh_AecnPFa8bBigIS7Omr2mc4E=?GO0vxnOl}pi zZM~(}3N$N0X()PKJPU}A?86A4ikw3;(O_cI+odlI7Vfq4q5Ay@2GrQ(haEc-wJoL; zQ1DdVVKIkt$7|MT7t8B~{vj1YP$36=CZ?~u@ZyI6Fyo3MC9-z@R?es-t8X{U?Ux99z-iyI%I8{$MhXR9PE`^a5ezvsF^%jOs{Q{jQ-sUF{oB7r%eEj?+Q@-N7U%W(YZ01NM@*~ zqFh2pWtp@D8?3*mTg6^G9DK&wPmP;)=yYw-DKq{2t#c88Tfj)BqLx^)Z!qSJx}~FS zb89jVe|dM37xV%qby`QuddK0d98qZHS^$SWSug-+c9LCY^=XNo%PR8eVQ3L&E%gz$ zn^{r54-?mK__H7X=Rq7?PB{Gz%E6VZOeWPp#31o5EL^1zxG=Uc3egj(3nam{<0v|( z=JBnR`>A5sW=KaxeTGpPFhD0W1}1}0*z|ox2m9zN)w=H?YMUZLC^vLMssK4tl&7{$ zSkt!%r+X`WS72@&0_g}kn==>0-Ir#GY`U+ZY4L&(O>quxXq*%Ogc%MycD+UPu2_IM;g-`Fcvx+j5Id-mDy`pai2`1fHG-%*;DkT%u%8P!XiK{f!I5-dro5!x}l z?TDa3?6oql@cU@rZ!M%1&c>w%6;v*es;|uZ7T8^aj(RQn5Ccb#B^W$>FL>JvJD>o;p_fqi(&c_Qh8Zv z==vLO9rCNX0j!m&>>J@)3c3RsU-tiI!HXmd;lwK9vL(_$DaT=Z>ZvES<6L@(K`*kJ zlh?<6xIlic?-g8DiCe(9&6zZ)w)c=)3${tBxFEGjE>;^KC^ZLKn^ z{e_vZo7)wdZA#z$YaTTiB6ol#dkd7X5C! zPeVL0uXgVCLbctltgRrKIx?&&Pqvh(FM@eejxud&h>VS6n!IdXN0B@0gD7Sjr zEv^qU^Yj1$G*&w4a!CuS@Xglpi$D(;JCJYl^V00(v0C>v5$GaXLtjSAK=D5>*3X4& zAfR%EFVvUC0wVW)G(-m`{Ee0Rg1@BTaSGM*Aj*ynOGE6#KYh|mVJAFrs|X$8E*(64 z`elC&f%=4tonKqsi2mAyh@Y0+ta6;Q(=}X7e+-^eiUODclh%2`(DdzCg966rAB3z| zwoAccTzmcfFg|!JRv|6SLj1VQ7H52>mAShAapGDSM3{IS`Hak%!R?Og5U^M(203ow z;%tuqs7hTK2R#(aNNX(ARd=NJlTeFA;8X9f9iV*JQqsb>rzD}!R@qdy+DI%4IR+MN zqm@k1EI$2hoytS{NBU3L!MKBNscppViMKTt<_?!#YEtLq@;UKSm=KLHGo89)NTyF( zvX8BMD((_2%0+-&sazBnNH-#VWFcY9(&3sx>%c)O0csGduvK-DsjaQQq+>yCIO@** zEzEtANKot)4et}TGPTT=@){~OYiJ9#v8ak)5$DcsWpR!Q_mWSxDbYGt14=v6w^n$H72yMABiU+yD*T`AFvjN~tDrCRmcF;iAp3;+++=)U4zfTBJqLVK&TdXt4Cwh0KQ8Cj2- zFgGp33YVO|P{m0u4FA=9w?Tk~CzDQOub?9*reIf)kEE9d~DS{S{fv* z`(_^;Vn7Hib(8CGL%TIc)MJ0A zuToC}<$x_RLtmL5657o8bNjEW^e4WpH*#6_(*;d$?$f9@<%aqr`kSS~Bhhvr zK-AgUA{!qAhh6Bi!F#kC_~QH%e>A;a`GoIdHNwx!m9?9`rmVpULDt}F;T~c=e7iu|-p!Ua{KzBuRfqoW%-#N6M1O`%ETLLA9iFfxCGZGg3S?@H~ajTI3Sj_JS+6x*-tMF|Gt;SK*_t>l6i^i^N^QXo7E2EHKi^6o^g;g~G#yMpYhrjk7Csj@mk8hoawA-Cmx=rdI^=cEYy z5s%?t5-sfG_u+H;rJn*z1_j0eErp}t>vq_Y-q_lU0pVYBDfx$pDlAFS6_^Y_D2rlo zA^3+HP({0p*jBZ-Q_z7*Ib$*C#%X5_fH`SJs^r=+yW*H#y*55E>1K4P)BCHz#sB#W zua3!AB41&>GK2Y{O%~H<&tNjt9wxj*enuj?Z5>!4&rFJOg&>9G16!3!8I6@QMfy zgc&IDXk6mULPRv(d3Y9w&4hino>v@ZfB0r+rV||EAPujqJU`5Vi2HVO@MPPkYvxH|4nIX=~piLh_$zJLAC{rPC(#5RvpCTtwO5}g8E zJP!fQTQ<`lc~NuV^E%nR@1HbkD^?){1!zhVYK!O7fgsX*q-)Q>Kc;FlLRkfFqUQlC z#tQQx8xTUj+cflKK-Nm7C>tQD1#M#k_i;Xb>5>$zW#bD-BI%>6H^#2{hYxyTzGL_O zHX3El3kOW1+kBTOCPWErrJDqF>~RW~l#UX+BYo?t88bXSouC7FT)286b_lTDZGNJV zDV>;#^=V9IZ;jhA$=2>cUj`#*8cIn1)K%kDffceV~A zoL_EG7Y>GI`Keg45)ANrjB|}X^T$2&%FF(X$M2Gkc*{>P>tPF$j8)di()^0>;z~{i zXc6IW1xMVa%YWuiC9*(ylMk7yoL~@W?so%L3(Z(Vs|{~Cw{bcd)T0q|kOG@Ueyt%> zn0bvOval3T`wB36E0zcuvcL?<5+6oqdp~yN!IRHvtk=fI$F#{P{lgKk*qvT#kJmV} z*bZifFBxKj(`A&ge$p-o20r6|Kli^g5&~jwrp%Hqpz{M;=HnxV6j@aKhE5_?HFcTG zirl%)f^-7^5#c(2nwk3kXK19u1b~r6frμ9=G=-d|GAWCUHQkvQ!87MoP(oO(Mt zp&8xF*1q35DAysLK4T9Nmx+{5s&+umhCwroU?mU4K-|iA7TnXM7Mfa&(n`Y@m9sab zTSf)?RH-!ku6DML{J09CvgkO<+cu6c!O@1<0AXybYmTttULUpC*{H82bPWQ-pJ=@K zsT;VpWkea6{tdIfSbS1Ww0N|1lMk@R?l(go9{=UwOESel=R0i36C4Mnwm4*;#Xjgm z2ch#ryHBbvWx2k2{txOZ&M|(my5#47a_X8^75ngzz-WTy%y(0``lwGjOn8$Yz8+;g z6*KvL-OrA5&XUj!F(v;UQQ}CCOin~U>W%f_X2tjI#mLcB&3>3aTrsV~P--~r)v900 zq@%#D#1M#0#F1I0+^0*3*^Ps{0b=BN*i#Ypq+ddko%Ixt#QG>^A@-yz7%Ch}1j8qG z=U6q|zgNWi;Bt!4;~_`Axrz0S8!ki@e4G^jMIv_4)TC^*LztQdNKM0_3DkB@cF%E9 zg&H(nW58wE#{HvVqR-G*et0agT=30=6nrccy>&+lGf@0KR|_j7VJk7iP&SueefdxQ zA#+?p(KG0U=UvvVR9(8e} zK8N|Pi~H1#ajAE*EyGNJOHl>d3p#WAhi94zciLc@rp;n0hg}2zYDht$<=_ci3h~9D znvRoBbn@>8ANUd-m=Wa*%Z=8S3RUq`+V0X!C2Ga2f>gCpm6V^rJn@m1uXMVwL!zaU z-&DHkO2xtp2pND6Wo&FJ=wr;Tr9Lr{_u%boE3osEC|T7C&hQ9NF!9G&)JnbP>-*fZ z&uZOxLx=O_k0smuoAXb6m8=vrLQaE+cRCSiUg#X8Syyzlh?jjf_)H>>6flDm;AlQY zGBI2FJ?1%yXe*%74qOmLV)7(UoA?oi9>#?$lplUyf(jI=98jMIbhZ4WSz2rW-@*ls z0Ly6A4|_sbRMW29T=d~W7BsB~Rl6Oo;)pq*jtB>6957@6#mT|sO|~QzYgA_%J_s&^ zRhT)Y*;rF9I{l0#Od+~YiSyb#i?e5zbgls#0YqcFV#r4t#^_n|)Yi57a4eSs3vFi4 z+8+8lIa7d0H6oxxAmOx8hWdlo59c|q+gT1_SB4V20hz61(x6oU^tJE+)ZNL>HY?JYy|y!Q-juM z#d*@6^}xO+6W|WcOzZ49$9(+tDDTpLKTnRY4??lG7#)erI`j(lGa^6fLPt(cbZnOBtR@fiy=KWUUsd)U$Z~kKN#o%pULc6yDi1_Tc zv^GY;n}Dr_S!PGl!uh2`7U2&`+X0@NR69XuK2yN{BH$p>|EodN+0530_Ud^!qZ-9N z(DAftkCu9_Tb4kp)4ZF)pQaIt%FyAh;HgkM6xN^t%19lk{L#L%&0mX9p1Bo1_K%`M>9BmWfiL6{ff!HQ!leary1f*bbKqE}Gg!)9o z`NIbf&p>GDr$b<;IuWKnsRkzaknAQ>tf|6l26Yqra{0<-t$XXpkhB6^SC^AnzP>tJ zdC5g}E;0G9;Qwk-@CEch%O=o{J7p^_7;B66(Cp`V1Sm}3Z-VHzB#@;BA*95&X>xp0 zFWv8_2+wUcoXv10vO4TYt_36D|4Y4k1y_hq>o1^)uw189HV4USVP!M{01T^T@bKjy z>ASE|+IiD=>wL6CZTD2+W{(oVD!K(eua zwH_1Gl~vSL@Bh%)rbo3UQEXb?at5kz-0PG_7?=0ytgURO5j7^*sW+`8Aai z@AALD<=>C}bmNDS&^4D}D}uX=)eYamvFUN8N| zR}+2FdOzYp1|B2V#Ax(ou8X@ZD~$E=6U@0IVOt zQn?sM$k-}<#kg+s=^>y_aftuY@_Px(H~@um*Gg90b&V6?c3rGxh&dQb%nlfI*r|k%4v59VV=@4z1lIe?^oQ0oR6+ zIO!Zo{g|78CNx;O=~~Gs>c}*BxIR?iUeVi)WCs&dK9w;{4L+0TF0HQ2>!pP{+cy!S zuOU~j)Fqo}OoRkx^_@8~AK_*q(ok=lYIq!&xB2&{K6?s%m}GTwudl)? zRU!gG1o+#2vJXY1Ep~&?k4qb#kcUw0# z^#S*|e3IX^#;Cig?^kmZUR$fn%CxsLfNDmz7lx+xh+dt%nqmR%9*jE2=^Km0Qb?I0 z-#);}8{;k;cY)}#elAbw5!1C96KTr%koaH#jGw!rW#mFgmhH_>iUb<)ckTWQkL!AXf%KpaZ-1KG%Zq^Mxy~VacCHgCJ4%OWjuRQ zmAOdv)s81ul&rHowq^Mw%d)G$_V?Pq=WVP5oSE6XCR3?YDso9Ci0=RYJ*;=V>+q~+ z^*24j#0nBC!7#KpuZ*j4ZH*N|1wF#KiKoxdMK6AmNfn>QJv``Q!`PhaRnO}#AGl^F zZ;@-s<$DMGnl|{h|O%)!r=dqK6^ip(cK|l6(78D?Q;Au zdF`f2Yst?Dk(o{FwP7S)*4aAO6Y8d7^Se0V;QOtehaO<90=2d`2|oIX{eVy|FU0U> z34p}30K^2a9%`Q=+j0D54isWML& z{H9lZs^PFFQOFtLGsc_hB*E|`b#l+h|EY5lIOt*^8{t=o@xlA>>J{08~DCc&~VpY+TTo8!xo1K&n-3+Fh1wmo+4!H=$H0WiMG3i zQEl8?Y1tQRkI}1y>q2P2=!G%Yp;FHSKG^8IblvOpCL-vMX;f?3?a+s=-9&_}RkU}7 zur@;UqC^=r3GG>Q;B((M~i#pZCGDwSWV*ygco+Wt_d}3IS@{8g3O;gm4}7yy@Qa_eP(N ze)Y=u|6&C5%L_BLM$?|MfXLfuC{($ABYhIyRJQ8?_FxT_*-sn6_NeWKA7R~<-N*5bHJDKW$S0Dt(hv$Z;oH-_N3PWgqe?)7JADw(^Q$-`RlhN}?pon6|*$E!V z-)`@u{wlvKmLE+ykb*DyvkwcI`RwH9&p#f0aPn2i)orpe8P>kjXNm0fGosG5AE++L z^JM1#T(k&ge0?gY1i->F=V)wHT};)%E)A*oF4)@h0dk$C6;o z-m$kl9MCol7FI3#S;$MU8iRtI)KH9Sy1|cG!IgMy}d^0vR@4F81qV5 zG89=haax5`NJ`)WYODrIcVi#w+okog5R8c zVIz?ug32-;4g6Tis~`#Br;IMyFI>1jVPYDdH5S!ptS%jM>?Nw7!}E!kusP#Fg&aYP z-R|MD#D^9MmQZ(^?bzuam?fJC{D#C~?GD&Y5t$B&RR0);acTt1rE<+Tmto*h2(53c z+^j6xx*?+S)YE~b(gdhYzQJn*eMvoHwe{NeJ(;-A|L)I!V=K|wVygB_=`k|$X>e62 z6=N0FI3_q`$-Wz4N^v*I2`qtscp5^0D@~$`ev}_9|6l6HJefHx!{ft7kM9-!7U%UAb(# zZi!O+Szt42coUQ#hGO?Mz_$GG)39da7L-Oy+@ zT}>|a=&1HL5p6W4{hqjCbi-_NI~)=tB!ce%nXjdI%F69(EON8(CTIKlDkUc|qb0%v z5RJqo$u%Zc;vG;t_I9$(`<_x+?CuTg)OE)8lB&XeVG2@_v1e-kJ5FBM-&F)YIF+>! z_9kei5O$P3#oIAHl7x_=IGT^-5AU)EBA34?RyO}{yKv*iR0stsV$*e*ZYA#}A(lJ0 zQUJ|FuxM2=msmh65sy>|9*c_A&^)=Eue!Xt)VgO&Ce8iG1iQ_ti}~^s#zTk@Y=hVA zCuLBMlXXb3aEad8w+V2R6srHssw>i)2iKACJdtZ3gQ+ez?JMZE4q7~27Mq!{3v`HE z-J%lS>w6IqD=p9V^H>|AD}VvQN+W<@>%UZ*|fqnF1e;HEWTi&fx$q0KvVRV_+U$i92jy-Fo3lZs`7<>=sW4R&@Le~mM3cYwS+4tcq#ZK`uQhf9ve zB(%OCOWlB#DDKpfMPfzerr7qeS?AvS(i)Veqp2TbHkd zgbb)@F)1VF7)Y$?)keIU2wNCC_mWR-A?1}d?1EH`klrdQI;T7 zt4&LyPLN>VX>}DVEYNsa9wC&TB(`O(E!AvWxw+cC815z(j9ZD5w_FygI#Dhplyk-i zF_l0ccM>89A6@aA+}~M$g;EfGV7zR%%Z(VqD7w3`bssBCLBv(tQ4D7L-^FjK@sG>_*%#IYk#W@U!f2^gl5bx$kemldk2P^nBIPW7|hA)I69#1fg)MS17+WZ z+=y9I0lLRWVw;b8-kwUTjpO3XS#!N)^HnL`jp7vwz2V(tGi?*TdF~x=`9}3`U2U_H zFgJ0b!-$!@cGU(@Mf)cgNG*^2G~QbX3wm~=jLj+k_IEw)J70H6i)k+MQWF$&TyyAxW4PqHg-Y8-D$c)7J6>^)=8DV|m89{5k_ zq{H@OI#02@CLs5}|MolHL=x*93xcxSGrAB}h}g%fedC?`CIxHlwK!8E#Ug$x`@~k2 zY$N7Zx|SeLT8m%efdu+Zokl=QCc}84tA33(9Zgy!*1@GhpyPiV{UIJV+uO;dHJ}fn zyhu=k7TI1>F=8gM2~160w!Nd!)?>Gd&SOc&y0J?IYoEkdu^u@He*KT0tuD>jgRft? zWe+B~=ryl1Tph8qrLxR@Rg{!ebw>q4wmsYvUH@`}6*74mM>nt?WBtjvs>)9>lo+_c zZK|-!Z^gaa@!-ZHHzPl{9XfZfgsth96j#I!HqTB^Y5!9dl z`G@w~()I||XD>mB(k>q9k~zSriIgKDZ~#AHXA*z?L%KTH8?ufu5t3J;RVjD6Eoi0H zjLGY2CK?_+`XKQ6f$gJEyQnD}90%VM*Wgvb&W1Cwyi#k}q!dx-i|$C@ymieW#wYtMHG ziL>{R!yNh#v~r6UQL=J&Qkm!S5!vk6tt!R0P#Fb`th417Aq~E4Ah0VRcRcuv$Sk!? zR|r+7UgW8mwpNTlSvK~8njH6w$E^l%Pf5MC~wzHHur1H=(*E7u!!?X#Y zpaVN{`Ic+tA?)L}$jiz%kI`7RlkJC5MMmKxz^nx3d2jT^=(Eseb6o%>w5^N3;jU6H zDiw*`X2+A8G@#A8js4tfuhP-h-&`QqsQOOArKHFVkgJYKiDV`(rs_!Nw4n!JxyJmG?CcAow21a4hI*7{md*; z9T}woG&P3Orrvv}9TO9Z5@Pubx`cNoF*$SWQ0wZEZ7U~}WOtvqppChid*~EI#h<<% zi!M7a)`f!Bo`(<#W8S=KpX}`ca!Fp?>Hrj*y#j#Q`Y@#WuuE|Xw|ER`@wmN5)o4tt z;AkakwrGpf84ii*>A%7JU;j^zS^lptj{JY_leR`h5Iv%Imt3_Z$$q;dKt#HHh|nuH z=Mql+azL6@PZq_}ju@OZI$kas-WjM2BL)&NN!GFEE`t-ICF6}x=Cm>nSyFh5Z- zk(|?SoqN#+jS=}M4$$mSaq0M(a$6>1uWpk!PGv&;u=5_v+E z1XiA|15ggwA6FZpU*sAcIa?VWGXzK30grKCvbk zQb;dWW$FlX8^vcULqM(Xz!o~)IAsF>*A=gf#@fL(T64=17+i$*_#|=_GPml<9}-=g z&@U(F8+yIcl_E~b7*QeO%`?|IjakL37TXME%OQSqF}pK?|MYN2WZMpTRwQai{uWBX zCdu_%nX_GxvVj4^GYg|EJBk6zYe38%NLR z#pdoFkvgF@CoISHtKPlC^;;|z{z;1Zl#Fb-7ej#-8|;o1dZa`_H{Oa?waR)*t+9z!`Kia4Qu7t$L5X;z2bd>J2tGVdOovGi49hgLiBy>^b*#DQ98HSV5t#vz6WY7CcoSLKboG&0 zIb=h9)*NBCbVSzS(py5V{S@`!4#GWB64?Zac@(E4(yvc|kJ?$qbQ7i4`}p~S2K>^# zg@L#W#t`X#m(VYifLz=q~TQ8OFs#5=d>;34sfzhtM>v3CfzHrBW#q}Te7Z87SVMf)YJ z%=EtysMmh^q?c|mxBLEn^vu?Uisdi)_cQy#ck(RAQixIoQ(I|bAJaXg-h614M};=+s<0HLE$AL)BPAN|$_cyG~04GEd; zdg<Ln~MAr#eE)5 zNjOmSam09T4~!XT21@PP``?%&&&LJ+=uij;$k)TU8q^Thhs!$)y3dpXi`* zcEP3-M6-7y5*rQ;kfEd+O5Gav9u-!hKwMdaH19pKZzIH&Mr_|Ucug+F9BZc=cr3=- zZ@=wSr5NEfiX`l3hxfH_#?-mx#V;LWSdhu!XNL&k&qu$B=Mw()F@L8_R?z1>VYox~5?A(L9nz#SbW^t4SndEds-KV<^6S*vg^JqvAdsB6T2 zU56yPVK)T)DL5co2z8iQCkWJe=x#H(* zwl-K};*+(t0lxhoMj!ajaQbaxs2-#+3`hIK+^_X~v z?1iO{?1PvuRkp0Piam0GhXg^s;qRylDf4_m6dnRA`ufhXlhg>{1oG7;++KeOokz+q zK&i2@;o?jtg(|mr7hzPrkIL15{zBTrmukvZ1?_J+DXRf~^a` zUp=K9X4-;L%Fnu>Wdd}>lNW}MBUypnvxVG!Vq0~YvcoI?I=tCSFU1~{3?;N{a~nOo zx7@gGr*VEJI6yuf{XWErLP7fmpk5ZtR!hNAAXtPbY?u!@AMv(%>ZUUP539B75r1%m zTfI~=!a3+}BC5%+TwF}NUmzGDIE#mG6JF{jUuTwL16EjOJQYURt0)J*|JIxK zwMrOnPXNw@1Z+~v?#K9;(4@zca6C&|!q&OfT4~y!=~0$c%tHWIn5+phx0;Gk6r9M= zvT{+&nNNEn;~}r=(H32j=A{sl?{s3lNeZB@hFUwMxOOOI#X>uW{Zr?Y^zTR_L)9b>O=EM zs>mT>Nj5myy5b`xrV*auKQmfJ>7n(2(GNlx@H*uCd*i)r9%Y-qDi?Vf=Io#hgqvgokz^D>uC(MY+tJmUrEmGqd?#3IGkB;o? zwyvHqnAbrUD(n^Mr1jzMuA5*OhTBQWP`;$YVnVjncMN z^zltu$Zwv%7!Om(aAU*P{*9JNC4$&Xt!JbUN?P581eNL564s4s-TZPUxpssMnLM?y zcI~-^0XPkEKenH7$AlZ!9 z`QC0kWo_K4D;+Q|Xi5WJtU9j!!2_pg5(Drj_IifLZ4ruq>flX?$0ImJLnK$m>LryM zMFP1bu#lADO{!^9InCI9OIG7r!FZYhB)T$%HvKpg4NgPB0ALS@o|y#TnM~Vy(5vRK zXOozzE!0{y`xxZ1vmptU17bDyEoM7&+r9-=#7<(&L~)N?zme3Iwc`Ft1-zW^Zhdh% zR03@ql<7;=S86SXaldy65r9Y?{0ZZXA)H@p?;C9^G{Ab zhy?}Fcm#u-O=Ef5*~UrTd_4Mne6rN-JsjE)}&?Repf7p|t3u{uL zX^T*()jaY-RR$xB{nNDsaYuNp1P+D=iM>imytJDDBuK3Dd3zfNZO8=?b1no@oQD{= z5+gBKmOdeZXR!w3lSJvcYixNYCeFWTv(Ua|HWw-w@bCzG1+$4_EBn6p?A>Z3F?Ooi zpvD~=z1B^_K25{N+xELNY5!TFtYg^H205&oD z7LT;yf+?N6VRy_KlC5HcySAk9LHD734mxMEK)8RE2q{==`}NdyXS!<}@HOcJGdutLT+)pN=R zH%?91SCM$4ZQFj}SqbmqyG}qe*KJ?Fm7I%hE)RPMZaum=lL29#heE?EVx}H`WD|uv z4*&GQ)B7$u?KM{-I*gkM&qrNP;W~DJd!N_BAHhDyRGa)={M2BJVnyF*V%|nfn&+d~ z6;g=+9NP6XTHt$6ajCNY{xjbp;t_c!flb6^XY#lkq+IYz+(@OPN>}CYy$&VPuA5vp zuUxnFB(C&W*SfC?IC|SZDK?lNV^+&Rzmg(UC>{GaK~EO@1qp4slPL<}!j71Lsv_Sz z3O=dCUlX;D*Q)T;6%2hav_P-BFay|X zCG$!htBnWm@za5oE|$WdkM-Zt7v;|%#&av2On)QTf>_qgcCLf(fggxTOk;fK%eib_(BCtnRsw~XC-xi?FwS!Wv31Wh9PXHEaSLd|Ce9^u z#)uxKrV1PCMoF{bwBS2zt)GY;0@xWJU!Sj4_=Ky;IQJd#%k>jHls;Zt!B+2u-RsQI;5IF}pabWhhAmlZ@*(oF#=5zU}S-^2QaO zGL36$KO508a+KJFxVhOlrccav&WHhy)C#1#wn1XcMheQxeVB0r%GdyQtFT?#Fq_K> zYO>h&{_Iz7UGt8aB(*Z*9fl6jQO(xGXJILH0rwMyW1gE9ABcyAMZSbeCX3C$!~2aAmzm7Yzuk$W@7F-JVKGDTR;I5@yXbS*N)u`JNK<~ z_QY?No5-osM$eAuk`q%w--_SUzj?6lAhQj?s3!MBa8h_{Pkdh_$pVtXt@9qYjyH8d ztFMa;YWleD5C)<#M2Bs^Q8uNz+}_Z|8L}-aEvr7WpX4g#;0^jL90<%~AUGLusj#)M z9qF>x6{YwnH<^QTiPR1`_HrzE z6el0sN`llT%w<1eQCFIYduQA!G&-B$fbk-zv22U5#*zmn^hGizF@w1p5)fPoly0A{ z_3;3Z1)f9{S~#6#r1ay_=Qcw!m;GeHO;?h`#D-);U&M+YIlycrWFTy}Z4CNVStVj{7~qi0`#IV1x6hExe#Wv`_A z55_+GGR7QNb3|Yi6-GOGjj*sDNUn_?q9PCEp#ccOrZWIIo`|wrQwnwV6YgF@o3&sQ zMdYEQj@UzNh?p@DN!k#>X5i}VxwhsGRZX+~dWm$79-CHYtAFGyT34`K}8wGV zmsX7Mk?@hBjP zU;dAN7dy`VY*!vo@3ELoYQo(lMmW>`BRm0T+?^|8MBO7fG9k|;J<9>HovnSRT3bog z;s@Q5=86U22A5+Oe}PUB&#bL7q-I3419&DfN-s>3z}cI)RzEv=*OcVep+n<2eguYW z4UzTyaL5z>=S-#F_g<$**bBb$iZLI>8B>Lcs@`1&SND2c`;N6j&SS*Hf^wR*Bp&>@ zHM<4LV6!dIY)-!YER9SS$pKFs6ttFNro*%gTr_~7T^jZweXw=tZ~O5i6At{*)&U3o zOepNCH)8C=oe-azo~wpztkGA*P5JHKeyqrF4-O8s1-2LW9uDn~&F?u9zrI*$+>3`( z-EJaGg!{<8Fm=tDQ%2A3f8BN%qBZd5bVD4SN2=S_^quc}Gxynr z<$S#oC)jaqx4KB7fqiGI@5+s#a13K?O>t+1lq&oa%^G%`F%=>e?6UY8R$^$@OP5}} zbSXfEXFJ-)vqkpOPKH)N^rXEe@N@{eC|^v|q&c{oV(o;QSx!WxL@hzk*O%C&7* z3BC@~gU6W+c?PjY>Ju92xB4BTc=m13ouTkJ64dENPzY&#Ux>^1?b84aYmGP_i)UIO z-O86`F>1V z{n@$4?6(-czZiXC>I55UEn;_w9gJ-NhzS##FoO0MJwZ%-vAz%sI@2Y;kS=bahBx|; z?bC|LxIrQXFQ3Ax=eU>>a&L^qBpif?SV-U>qH@-M<<9H>bAl*+ciw*av5=h5!Gj z|4*%e$rUDy5@aTcSRQ*q2urbsc}GLj!d4z(V~8WkyTSY`pdC(0I5OyMfqTy~FU%A0 z3-!YlwMrd9<6a!R_MY7N^s`tdJu1NylM4w4EV|z&aOw)1;#g`sN`u!dFV^DbZEnFZ ze~@C>dz^{iC)=U|3nEZ@d$RDY#DC)fY8a9ZyfQVOmLZ4kU9+aHOqx&GN{Mz_Bg<6U zNym~96p^h|M!LX(<_ZgYM}tUbob2V*6e_|v=&t)EiNg7%Qfj6fas08J?5aJ@Vkny- zp0r=YH95|rA_umJQ#Zpb#Bhym!MVamis!>4em#~;J;xI~g#MQFugX}kgh^keDe zL@xy@In?+;0t}JT!=$sZ9UM9y1nS{8FWGk>RZNYX2h`gp8jH!BlQHN;NCD%gJ^8Zo4 z=N`n;Y|G&g&p#RcHuw*ehj+9F-u~ELj-4mF7L_%4P4H*NYabt{5(v_$J4&PaOYzfO zfaPxyZ%Zc7TqZlE5cePgb~xVI7RA)yvX9IEG*bB|qLp99Sk~GJCk-(sTp+fLYXjeF zz{EIL_KRkKnenWDdh%{4OND$7)MzQ0-&Jr9_Civl2dNB~JspF+q(*FGIi=+FVlAs% zJNM3h>|XS131)ifuMA`5lS4G==7f1=&)4Y=_C2U-*5c%oaNOyu0kREPs<{zk zS%BTtJ;_qF=Z_^kLk|a2E_kR4gzlD#D1cV+2n>;+h7^O$-T?8pQ@enH821c{uk;$X zsZh95B+YhWAsEzzX3;9fm&ZaGp=vg<4=UXg4Gd9j5Y$A$ttyeMK@MjdIu4vNs7Rnn=DP_2-DMVHOM&0b|tA& zF2Odb4Xs4FGtjDV6#?L~X>g2)KaJVvVk6+P&n)$|b&OC(Z(^e5+UeQ7b$1x!C948y z1(ENrXI;LtXwRjSqH>#Q8%WUZvoW?R&22AnUVj+J6V$6prFV0r5Dm3b4^>8NAP32K zQOW}&@`|k6lBnv_H=^3C$LQfo1y8LV@B`hbIxJ)}*fa$`pndM6%?&+LuEO`7IQ*1;|8exG z9ZV3aYVqpx6Wju&o)oAEv0_SyGFnWAOpFb-r!q;CB$Okb^?{{eik530ahS2AhG$O zceX?BBmIde6Pvc@lh^vw=si0DuV><0N?uS)+CcmDgQw@Sw8{=H>tfraOrEkzfW>W- zafv;MO~B}G94l`0Pd0(X3$7UtkS!sO%@ld(#`c&R`r`TDjy{O>Qq_g3L}GrZthq1=X$HQ81m2|PX@4U%zIp*n{Wl zP5UZUV<<%XY4q%Fx^fGzo`Xubh1d-n5I5|sbKE{WZSELbg+(D==C=CuoeUU_nD}>88ug00*1ho;h zI9rFQuzR&q3-M9iAS@cr%|VYfdzA(P+QAOFlEX^K5&K~;QzlpGUi5o`MA`tX)mwlJ z#^5DtiA$aq2PVd#Kp??Ow~|yBufSY_|#!<-2>Gvz<@Ccj>}IR zYJf9UVm)%K;v}o=4G_P8)V4XG<>O^9U3w{gK=s+`Gos`N&D^oSheG2G+w#Ygbg#w5^j>-H5p zw*i9k=$Rr?_PkI+m#k!*@wUnYQEz>BUM}DC)>)@f^!PNz&V#KZZCm`^K7u;Nd)cZ^ z+;<-kDA(0EN)&5T^z$Sn_SOwQ97OK?s+f53e5~9OtvLzuB}+pH2uvkgmN^2sxmbBk zu!IBjb3(n-?|iI+iz2s8SjEOu%0ITfzFCQru-M)kZs73YGiT>umz<@2L#YH`Z!(*H z-*)$&9Pg&xF5y&Hoy+V7&iGO6B@`R78f9bHvkBNorG;>4X(c&l92Tg9Y|SUHUAI|r zq6Gy^5bQ#(o$+@v!>NN$`4vh{;I4M_SFvs4tU;nbJL??HFrEMCn>4hTab1G5w}tm$D>Hhw znR1TYIq?|o816sM816fTH?>cIGfAAR`(hN>iTA1JCKqOwij43Rlw``|o zKSKao2Ryhv40ScWEa=n_yTg%CzKU%O*_bal4x8AySI?dEEhp_J`_F!dAVxPwaYwtl zQ2wQzGs(MLpquG#Z2ezi3OK$PY&Mhj!A7V|5V9;muM^{5-Y?w>(Jm%ctcy}bHnhb` zt2!UQTcfRpgAZ=roV;c7wwCg!qUd*!t%^;Y<%>!G?jYU$z}D0n!bkhyz9&IpcHB9% zS&)MXu@u37j0pYF_j`M`o(f6ldE0MdswCwlCHQ0UFD;)6uA z11*Dk^thH{iOGMO>92=#RUGBC8jFCj-S7~Sit<#GgIVs@EgR~3+bOJf@7MvYwdTht zdi&H~7^jym{lFeHdNy^Y-x-dfg)&;JCQUM}?azQqQXW&>=K1z(wZ9inZti=%D>O0b z;v0bC3I%xchKE|!8QRM?IXU<>I29=b3@HwnkD`n1X43^E2tDhL=*HW*JmRR$3P*&n z+3H9Oa&u(bQ*~1p=EK&j()3tO4Ih#~bqJ~m-=h}??Q%p=8P8rG6RU-=8w#bJGBqpE z@5f}>U#Nyb2)Y0exDtJMJQbEakZ*oCtBXmHR$$Oe+ro3sB}xo_&SjLY=P3l}ErlOnKs+kU7k(P9HA zcY0F2gy8o5p1yh2Q8whx|6mUwr}oIRB3xm(WU(M}ueeY9sSt04+ZC(WzFym1%f8kO zd)!Ypw9sFv74mK37j~nucb~D3zqBWj!hhsDg?}!I&vE^dk{|!2@6YbguQmZ257W>o zq7=No>pm5(+wW|lxX-DLGL_J#2Ys9kth}DroPw}n5OIMnh#ubkXAA*=P_FTb-)wl= zrWgd?plYR!nVw?-*Qb)14RCsFu!c_PfW+Yb=dYs*ii}Nq^#IUky88gDl##|n4yc+= z#}7_7e2|nwgW=ZJUX%_VPun_E|1*4yhrSl7Kxc}ARXjP{Nc=Kvp&a@LTa&Asrm@|_ z8N0Vovu#-2Bc+s3Mlxbj%h%2R`g(iUqq|Fe_m#P|Jn70w34vw52rcurPy&_N*?MC! zK$0D@CLtAAL|(dd`SP_XQ&;N8jk=2bNqx5N!Q%&SUa*y8n`e^Y2Cj85C`?~ANBC0+ zM470q;KJIv5lbx-w%>upaq=96OL7$F#?{H#f+MKdHrHWROhGir5lUb^`icnbHh@6g zW~4feB4svo5V_GjrMRu3>*VvJZ_l7i)O{kBRbIsb+h_6ylvR`tF-l~bUa2w)>?5SK zdrzLEf*`4NnNaamgjhrxu3$;-zBl^7uC!XPXw58mqz~_|=j?7|6?1{>I;3m1zUWQY zp^QEAvlX1HVpsO2>r+$S95P70)gV&+W52Q`q)ltj_x5#273>#GbzvcLWJId|$w~xn zD_r1M_U>aM-XeOBuyI6*~K?6U5cU2 zHPaIg%uEN;)7lo#H|CV zgUQS2*@KHVBC;UA7=3OVaqj6`_LBBE)L{ESGL0@^8A8Lv4wUf^9kr<-8T7e+i`AO* z@!R3vDKU_}TB20B7wgsUbcUFU_*iagt@lcN$xgbui^A;?z6RB6?T7YTTw>1k8F~~f zd?kZyPPpm|m8Qc&>F{pfReG_!I(-y)e#%m_9pfyAed24lF7J&z6A#}+CVpNXWSBLE zaEV=*1z&eonaS%nVvnoy%eF_{;x|GpMv;XQYMpEo!g?V%f|~9xyBf~ zy$ps%rN?bi_~5Uy0E6Ki5IxBPQvomOUzeVzO2*CvRx^;#LwEPiJ(#c;t-$2MsBOJT zt$3!CZ>13_tb;@3AEb)kDxVdaFrEeWns3)FA(! zdD5_&*4SB}42~D)FY0_z!~#|6R`@dXed!5Iu6}$>ZKT5inK+xmdgZVZ`^S-C;S@u?>x2!=Suqm|nkme?Qchg_b_b+% z{8ar;)uX-*gu{LV@|SZO1A_7;8nZtsnD1EE$!oX%Tc22|Nj%Md1C$I1WfzYMv7)gI zD+6IeNC8a>*a^~E@yKS+bzK8aAkz}sLre;bw%KU8z?N6Npc|p$7_Sjx^Ps?TwG{$X z3L^_--Ht*lJF~UgvRk}c-6I>5T7)Jow<8{waX2y0xejyo4=)5i zA(rJGP#5;zlCv#TD>g9X5JHHgwTPK@x}@$psd9om2%gj)4cP~SK@TdtN)`7 zw5R^?_Cx9EX`;MZuiD>(8XD}|-@ka_TpW*aR9Cn=`}I7%>JWPU zyc2-wJodE9%HM!sXIU7-)DTu`*+Wwn+8ZLaK4J{rUNS9DN=OMQsyX65|vVg976*?`lwj zBR@FojVv-@$w4BMm%(EUj&KN#lNz_pd7qQpjZ$R*WUAgIQe5d0ym=rrKyMh z2U&}8!g>yNQy|Djk5Y_`>#ZwJKMD&#FG-Wo)N zMbXu-x%2eWzxk)i%%+bxz*iC2EY;WdA0|@RsyCjBL8td zg~5jaKZ_Zl37RH`7MmHO7H!u6dLOtij)r$mKy1-%O(%YPo@Av{1n4wq$Z(B69@ zJIp3Bx3U@sa&l_Y9waAa9nPd!MZlH7IED#eGgH535`Y@JXUosQZq9hW5eNX%3ns7G z4|BLqNxQ)_=~|y#^V8&jbF`F_N=Ed6B2A1(8!MqVO2oQu+>43KPnTMWXLSf$Fiuux zKOR>$`Vta1g@`tk7J=!`mIqA#p1O2cY3zR?Dhf{kgs?>*aZ8fRM)O>c+WS9c=uzh>NH~(z`io^ zjr(ztY=|9HY~CSz%x)dLR8pu!J8{Hfg%#@MC}%LK>Y2t7qgWT}VU&^!?WznHX%#R$ zaa{5K&cNA1rF*JlC*Px*VoRoL<9~8DXB6&wV3C zN-z|`_bSBKQHZwmgagUR*-r|r4-?@{MfnIWVn@ldG(?GFdM#RkyIv*7h9#9mtpJZe z`@~f;pjmT!+>gtI0Urh>a+scib&U8Y+YqShHIt5Q+^4v_U~+Xn8ygkmgm3Q$f( z)hjrL_7qXg#sJtfx}cf>v8(p6E4Qw=yUvhQUtj%vXvNRAyF8PITOo>Dg%G@z^)#fd=ge9%Lg1#(Nmy^8LXvVRt&1 zaV7dtZr7Uj>C2a=CgWt7{F*cw9gX!hv1)C3*@OG2E@YIe#IqQY8d-P#E1oI$8 z4w(&F!hTyhpv8)Vm55(?!V_r6p{u0kA(6l2=F%XIdpWJPEDNOH=0y(8Wg7YFwP>q7~_Z#YicV8lJ1u$@7e;OLhw^PXFE_UhIk~t#khG= zcH))XNUo6#GKMCsSUmJloEs)?`}-b9m?=?-v?6?)TmEw!-00cW$;*kEpI9C7726Lm zpy?e7sim8)vRG@>qrKUy=O%3H9&Lxtc^xH5R9ltyz0x6^f~Pup+>x z@cx-Lp8rV7?;{&k4{BNa^TBZE*bWrdnpj#72;H4w4cO=1@fl1EEO?4vJ;*8Eidnmy?g;S;?NDrOZ$?Hv;-plKxgN2-SY1hXbA(as@yy*XL_1@x&$vB?JZOQnFTG@uMRr zcGs6y3BG_75^`=IuS75n3<-5HCV~sj$yn}L0x7n|GKUla5~wUT<6-HTV#|v6nwVY+ zHKc7mU*9WLJ+_F13goEAqiT;SAWKS!rjx~rwuj?Ey7#TK_=%g(*mV)SFOf7Gw4ek0 zD+qz3a_1TVENt(Ncbz|wRqDa~Wxr|G5_F;9sQSHxh8(~zf&Ur>>ZmXjtdJt1=M;@t z11h>1scT4n$fa`(#w-lBs|O|Z7ji-i_*fa3^C15T`-b2MwWjL|CfmMECm92}V2{N! z{`BOxu?K{!P$B7}921Sks}$dBCxmDBU|8~NF*5kz;(2f<%5zPp510^z&uk7T^Z3$m z&wZuWzIAcp8#e3<-!^d==WY@nWAa8SOqq|o%l?kXMaACwa2V_*gdS!cn2+vJfIF%1 z!65^CH|5h?;Ej_2ZsD&)^SzBmOq1*75xj#cF6uWGaN*$1=8ab8+Ddq&j z4H>e{4Cnz;7F6(JD8q~O2lF$7w1sS`sw%{bbC1zU^EBIT>@vdHCc)6-+Bx9R1C?Ve zygH_(?-K}!Zv*I0me}hAXzX{oSx!E*3Er*LTd^2TQEk#)%SI_Qc`62Det>%X<{@VM zUdY?Nu(gTG1~rB^kkXrkSKVqMQ(Ku1fl;SOH%VT7=YAXz%s+Bgy03k=>97L^C`Af6 zsMxFFRjL%~YOKv&m7Mk6n;{2v*Jh9V>2u{`8M<*UZVk7CRE7=vM|z|166mmGe@|UY z#dVe0GYta>LyZ9|_S%W3uiTIEt?r8&DV=1sxZUtoCKvC_%LK4hAnYd)i+gj;wm>&> zM(3_~y8$4wpn?BCfrJbY2&?Y18)5faDS z)ZgL5z}T!1pHA8TrXYLic%fJD0Bl{0pEZb(9M)$iaxYJwL`_4qc6~bWqN7D%N#c3! zo!E;~O@`H&pnIJyXX1t8G+sS}z2A4w5h~s>`pnjfws}R_j3vy(<<0XB{=r|s|I*cK zoA9j%_NS)%Jh~gmPlVzv2|iq!xDn*?6n8up7295o zz(z_@U3|UKs@k?v4xJnZ)$?qnpMhMR$#xSWq)Bq9Z>qM|y}oCG!ZjMW6ko?LHEFU9r{PlfR%TmRFIntc=`@_x*Mh%QhTTV!E?4`Nf{ zyr9IE$Y5BJLNm%DIgN7J7r|w&IxYmo2th6Hx6w}@x?7?gK|Wji{q+m zI4!}@Lv|wGI60M2;#6;D{=@4d;kz9(2O(9{oaeiVC5ZOnV6baS^9?JxW z!R?`BYyenc5xKx1z!Lzf>UV4*0kE$YozUNp7p+laQczY&ji`NTj*ZOV;Y1-WYK9C; zJfW(bn@*i?-kr)q2;pvf%j;Yz?{KS-T zO-Lbt2rx0ZO=nZyCCI%nSTReI<3)5Gqb&N1FF=)(i6+!fC~-9)t8(3KP0EU>I+yLU zQD}kI5V7tccxJY^T1r5{&UBnC27u--*RdTDMIOXhY1>Y?r<;iK?H?ozS^=U3(}DxkavOV2QLOr^>Ltwt6Rf8w@k2g z+aB_SW5)Am8(p{kz@w^Jvpd6UJuj8O{3+I5*@SQr7E$V%F2BL;v(`VdnNt>#I``e} z?U0;L$;oLj5L`w!=UX=u084v5L=}IMZio%;Esq{Kzh`Jcws&*fo&vB)Mzu8y#?)^G zR7R{PH1f%apgbFheW*8ZyyBBJv7?#kkig2sO z#6qud#Hhis_wrM23@IAe(rd+jPpYHYuooZiCZCn2HK~46-IfWZpiS*n9Q$$Al&t{{ z=+AO>&M`oT&^^Si3%5YZSAm-U-$y@*kDvSOt44q8^C~ybjtc+G7p5l6$H`75AA@z{bZ!amfv7Q^q9mQL8U(VpeXPsoE0n zdqR!00qr9$Pcn1!u84nR!o(zQAEeU<(++Gk$v1262LM2(h8P7hKO)8mvyL#pOb<@% zlD3SM;Q%4%9O;M!-n^1#(dn;Y?%Q;yK^vQ5FCGI2RM?6*W9@ZiYyo7NQX!MhUPp|W zLfoI6{L;7cL5g()72@u+4QiCNus7EzB(e)us1{By9BLt?Tx+M|9gsM$)GAdI{OuN)gE^&bf>P zdo%q>zW=t<4c@rseBa;wPKV2i;06 zg&8&u%q3DUw#pzgTeda0KIYO1cZmafTK;7pSgg#&Q1RuvNwI*mL=3nuvvmyODL9_R z78lh~9CDlj+u#FuO~yc08yMYJ_tp*6_s>{?*&_VE`{$V0Mt3)N)L5Bm*}68U+SHcD zHltu***JK@gSO_IrEKe-uwr=vw9f0m$g6Kbq~D0&#n8a1x9s zN}@W-*f6CC&DOLHgZSP4czl>n#7r+zWy*Qk1eL`Qti>iHGpUpkrg1#~yU|ynO@@04 zv2q#4MJRYe%xlqc~0tYKQgwuteub(14b0T~vd*i`zcr~Mm7Rm$0Ilo;S__;+)&YVqw z0J(@TFHLyn+G`=Xdvr~Oda>9z=&D~&DWXrmd#BH73yK}F;dCp{sm^KkP|0H(VF={+U22-)^1Q80AmHFb3 z{vH=*Q63Q+j=G^CQPgdZSyeaAood`~{ulqTk+FVMQ9D`|J%vC!+_VPbL}=hRr7|Bw z!YjBJM8)l47-eVxznmTuj0q{MU}*3#cEmPeUP%m(6hHDM&}s=>9@CY_OJz-3!1Jn3 zjQ!%eLqABlrvPMii)mVZM{XwPR4?-`hxu$aDGG>B2Kkjt3T+ zHZ07sZ=ZY7B=*t|F4{Hn;#2!NsLuq)Rv-2l-~zD=H_k{3A(T~&_GB8|9xB%m-w@YY znlmwGac-wCaHH)EAKQrj`o+Hv+wb(irsj|6QV?-?I@h0QGP+-<+rgH3q-~l@}ZaFv53oemuxnD2&Mg3%2=KJ-j)|P zxO5YI|volJ;$KV(qia@__|+!GHel$(N%a+w4fMSJUNG6OJb`g3tf%zw3q|pr-^YBKx~{rsMHi zdov+_Fqst&Im@#+U9H(v2VFNRbtBO1G3HqxKsL%P9+Lr-%}IB+r&*eU+N0)H>#;vo zhGpwh;w49cX$5#r; zZrn%>Knn3<5>LU9umEi2WFnGNwV;Y+jA0>LHtj(g+9Y1!cSV#>{l zhHA-FyJ{=0Bf2h7-QR_c&W1-|Cb~Nj|D~y(_)(JE2s3)JHfcX<%ee?Wf<2~0uWg`` zK{b*edfl-ivi3C`A%|g*g>g3#{M|>fJ5ABF`m2cbyBi?uiE>kJAH;MJ5(cT?&eMC4a|AdRI7_#bi&J-T^ zJsrdhL`)NVR9RfPZSw(G3$yeETN>a3;}lk)*4cHM{}BYq3}Czc{$9E8Vhd)#5ghVw zOsqQVMR2?sZufm_Mel7NFN)uDVS1w>xCrGbhN>hh=~m#v1GyH1_zW!)zj%~JUM-R!L;8Muwjf`J*@2#tq6p@_-#K?#xI0T7oHLK-bvdC*MV?vq1 zfl7^rf_R-h0E_N~DTub_y8^o%syrImdH_`qf^!e2Xz6BBRQU-uy> zjlr!T$Y)Ya#Q6>qVjtT`DECkYx7$zJ+gtH-OLbN5os6D6eLH@~hQRC>)-!&{DV{4V z%K2f+*0%b7y>}=sMHpkGl)@dI8Oagx-zZ)G=3mDOZEy^2g}}JCy`Ah`m_NJ8)L^c> z1P?beJ5Jra9FGDS;*(b)v2>%Ycezov({Z=!)HPtexe_NI=9)q_n#f$f zdENeq4l%Ptikx^4JOU}4Hb*$Em$(_^c7hC1qBZufvfGwYw;4a8JFq?f!o;hw;?Mt& z(Qi$Jio5lrlh5r-*hufkCmXoY@l$nqb;&>X@#xDK@dk5*YBp*L=u4cwxWI|Ck=%dr zygiYwJD;66WQ%YRK~MjkoSZL14w?{|yD3g%uo5-!!TkK>r}o8yz)G1Q#nqrxPU`~M zcrsI|(^4ZhwfD9)Q7l)yZMLgxibdJ;i-no?v$ixB--8pE&5lu5*10b2F-K(g{EIA5 zrq5+RAN5mc;B-`Opk0`F>034eT$ry;erGS`{_7}*X1^tq-rrAVV?{`ZT-VQSrBiL6 zmwOuINmEy+Y@=Eg$IFvIecyeP8&_`HM;=8JH}V>O)M$HCjXZ@YVQB9w6VJv>bo1Tk zfBWaZwJ{7!ug_5b_$QcaiVMY34ydL`z5)sbt!M**WzJT4LYM`6FiZ8tpMPjSkxgZ_ z?1Sn_o^%)~C;JUKfW7ViDQEJP&HrcqwGooo%_M*gavU~YPIMn0(F`C4c(y|?9yc3E zqvF-rvjAfZ$?C=VA_OJ5%UO?^!sZa}_CjT8&X(i*=UkMDVu2cHyn^073vX-DiZC@Z6y!ehc zFCy{Onh#mPKojxrvSc$6@M^VDi`#)-tLl#EM-V8@t;_Li zDoV;V6)V@IoIJYt0IsP!;0*;~%75YDArxZm1JxD#23M{~cNp(AK4Psd7^7FUs@IUsXp%Xn+bFX0BGi#$E*QOK4M zEl!v373u}u6ff9Bep1j3H+6BjXF@pN{6Fu5OXnZ?5bfri_a$+ zqqCE3GTv6I5Eq`A>Np)Imx#Asc-x+eZ^rDC_*JTIyNtscVtutw&(wf>G-@^&QNiBH6NvBBmlM4vG4-zZA30^vjg6#ATeRl!-RbZx+iR37pQ$>9nPiYm zR46RzSQIiTipuaM7ot*+vd7fNSM1`uLW`xlK`Bwy?!?wp_~$9v`cidabtd)etVyNA za<&zC>g{&oTA9gG(A^_b5f9$p>#x~qAgv$6N7*7Eu@qCe+qB9*F&7dmjd)_Sw!X{a z94cKBPgq&0+neg0v{2w70mRsz6Mi~t{mv~f&opCPRH9SnY7-bA0_)sSNQM3abr$b` zlF@%eHW0*Y5^=s#N`=Fs{7a}tXnXc#V}$)oXhifutUNiVI7!pTFhjAM9)=Y?dd!Xv zM(gfgw()#Zn6iC@Zqk@cB$0}7MRv?z&;FhleBHQm-Ou6fvB#aTZc?qQtdGs1duhb0 z!ZI0mcPL8V*0erXTdF4ib=}7Lll*xvLH)S+h`HNAy4Q7>)NzD66pe7pi+XDC?z#DmU^QI4-F3^U zKreo_Xd%lz*kk zMco?QOAgaN0<>le^15imEtI>GfQ95Z&SawTyD4FdBfQ?`18J2N=kgWbh|o%XZ=KwL zP59=N7la__?~nem9Dt8wI!o2*db~v3!zwio5reP_5n~7JiBnf^#c$=l;(Y`+s3=@) zstaAflJV@?E)Zr+1sgPHsOq|JY@Iu$nvk{eO*Kc8$76~GEi2NX0L_6PTYZWFlC-13YK*J3(FoQV^J4AMdug@Lb~cw3yK%8RQ(y8}6~-O8@CfHL zh=P_|9!Ova>>1!NDnAq?@pme}VjwarfJDdPA|I33H|epI&i-bi=>}xaEpQvUNbI#w4V4cEscr z8#vz;cT))$C&?mph2a&LO5{T^gLvH2*Io%} z!h!lD|9)kE1CkSaR&WJ-{TlvLj2{05$D^CWUu(JGsC3RiR&1pd=HB;bDSm3JHs)Ci zv;5Wb=bdnqkSC>P@r+r(?Y3QaYD;#e_*kW7(v8k~Y=C(idzXLW!z;7N_#j`0;S+gZMC&0_F0`(w;!;M)@)Ww?MI-4?PH`O zDrt1{eZec@m;3G1i-T)`Bnv4SP6C^Vl|-lPesRB3x->CnvAOL9HpI9S`mhS&5t;IA>hZ_^rS(VbZb3KO+tk zO9qtOHRO%+_f~~F6NWFkl0$xF@}R1Tgr|*F1gj^C@sH@aD+yf5eYp_ z5!{e(vy7jmi~%L2Ye`R+oA-kdr2(+IGjP|oXmma?tKe%%BLk#)KL)9bkn~}?4v1?e z^IYtlY1Nfk>q}wUQQ7M~wgoGF)i!j+#kgR))wSn5rywZQ3{H7bQBTAIpb|@Qu$$?< zy@%tnNqbYhRWbQ|@p;MGn{1d0eVMUc#3lFsPSRQm}HM2@up{jpaOL90xM zqm4&LfSw$*qDuBQen@YlFoHI)4h(r63sMp6SVoXE+CR^-5tdU9)cgLD5yj1leN$JX zRCqz>9TJ#>WHE_>ftE}uVS+(M^;@VK5v&Mc7R0H{3(0@dO(FM6t!W2-CPU{*hLw;| zEXmAF!+szfJV-ZLwc*?;7&k!aQZZ_syYGNhT5^MkYjX8=d?f*^zG_ca+Q&r^rTf9? zr?%h(UXsQrbj~S~SHfd)2a-g@pDlK~iTJs+T^fCPUE{ z@ptU?p}HIFBn-~o?oP*3(w81Y&xG3CY$K$bU3D94Z-4h;Qh1#{U@)m68yQUW((U%6 zuttdwn}9Qcs8CXr6K4B)_Vl0lzyh?Vh;JOmS?bPMqCJ-~=t&jN1Q~4$rIeyQq4;5n zS;ZpDvyoQfQ?`F9P4d%Bl8?`C6f>IC1N0q<7}TaqA{sg?GgIx&fft zijLO}tn{chOea3Qm9Y2=y@JXryiJXi*2jjtc2+6Vpv zH7C<`TUjCXQ*!F|9@}px_vzCM_QxmfWDQithI6b*qwscRATsF4Eu=fw?cM3qIP{26 zyc^cMIwd>E=I&+L-yNccLdMU%{Bo*|nU>JTD%QyqrPMvzL2hUa5F7^4t?&{W$Y*@J zqTs!~i9VBqhr`L6iY~5TS)mF|U}ReWqGz+AF|kF&Nu{ zHWY+TXN96tlcngXJ9*Qyw)9Z4hs`am+F>GOooGOz`P?~-vFLJw>w|t(9Qi1F!`{KB zwc+3_Vmii*kKjK_`79A@b46eeWWW}UTY0kL523*-8=U6Clxcq_`}X8dQn)97u(5Av zTm$(fFtA>=!1OCsQw>C}k)p-6vk{5^5;MRSj@stn^(H6xg1wdNUhT`dICJ8^*@6kU zWusVVdzySh@x8=xt0b8(4(9iJ0b1#d)7drx(u|Y0LdwW_2>+aNIRtq3`!wo_h&%Cl z;10U+ZTX-7XOxXJ+E$3`}GMrdT%E#->qsV4KkGB`VEqXcOn}+tQ-< zh6mfp)?u5mf5P&gCDR?kS0X4`6F~5F=%lzH$)273G8Pyfx(zaCZe$7oGx~ivOh9Ib zaHolrAem#xdyx1J_b~x--}~7hD)Fx$3Dus~aYb2*gH!Pi}D z8%!z>iHRx8y~oKnGn!pSGxqkb{p;QESRmw&TtFOka1Y^3shEvrv2n?Y%`4A9`c7@6 zU^?;klY9 zN!gX#+^u`G@Nv@&@;5JtpelD~CnCBWh+mZ&$#G9(kDC!R3cVsL>~TanS1S#tdBEZ% z02&rnF@7}Ks2wSsIUnG=e2CxjTmJpZHh=2M6+0DjKITk7h=;fZCJ4)!@E-j3u`^7Z z)pq9?RT+v4ZlBQ?_NvKiH$#w6p~Ib=bnk%v^FG`vK^u6wn(GIkyuSe$Z9-~e;j2Zhjw*9fQN1`Pc3~s@yRm4BOR&vjN>=ndZ>)JyKoecm3$$t!`UD&4KA2UB^ zIo*(`ksMT6UMn%4M>&izw!nmkcjKv5Ylb>lTzZ;_Zu# zSg6gmY;RY%s^W`KW<;K49X|}a0VH6rV^dC-RH8?&Dj(#JQDsd68!WU_991q;&&#?f z=7?FGmeBslypw+;|Ks1sfEoDnf(sdy|O$XIjMsJaZzY;sEOGF%n zwkBo>yE4`>AYN|29|M62+owH4d`rV`!Yc8x>F&DhOB+}X_VbViJmnPb1gv@BZpM&e zb2|#`yo$4a5i{DL0?QA+xQbJ9VO-W3DVzImz4=nQ(yOrZUboM}Q+7;5EaoV@N)T^xskIht!rya-w#jTi#b3%dWa0Ry$BB6rqCK`!ERl>8nOxfjT|i7Qw;SjSK>qZyR_H6 zxb&|s#MeqVET%WEjPI^boAx-r=LH9G&0CBFX#1;pqEGyF*yY-uQ-i>fDte_)WOmcB)aiYb!-UYo%u2X+MO3`=fY) z&L0V)_~L_?orIpSIa^wKbEmK! zNAoPzTJfYmvV~G8KCmnGX)nLOb>o&j5F`3Yj2+NpRE49{1L3fg(C#f#ns|{Wi zp%@~uvL)UxxKR96DH{+ag?$Ol3BJQGAwg@E^h=t4p>j7q+?0nIKQ7Ur+|O^H)C`Bm zb`CdEo7eGZbXhr^4)(Y7X%M{N|`FEyenajil>k z?x34pcG;VQ&Gx|YXLC9wegpwnrs@ou?K%I2EXz;<%mQ?8z5rI6O%t7k1Yd6t;Z4nk zP;(~K2E1uI{F`ro6sOXVwi2Uf4~LH(*7WiVCK#O1>W=lv{V(!6#Tzja2Uz4EgsG3m zTEfd{uXrhn>USX>5LXJTbeO6z)M8caP4Q%Iy^faB76Ck6ViY7!OYvO+0mmj0%q4Lg zk2r#e6;O0K@zrW2hC#exxTBWvG0rR3E+?FNeQBoNs8!wfL*;;J3;jB~x{z!!=yf7> zfuh->rn1+`?u3enb!6jx6*x6w{2QgwXbcdFDLD>%DImNkfr?GLop}O45wg=h+r8d) z|53c^E}(7u;w%w~6y!Txa~lSDZF0LoeidHg7!r0rlwNGQJxUn$q^CBhbPG+0F=7KK zjhd1ID;&AXr=^1|ua6nTf;5LuhTFbChzE4rVYTgbhbiBLpRGLMS9Z#wcTrM2b35TD zx#ig=T#4uER}^J1ntBkf75gn?*TG!zKrAPlI%l^1(B>z$n4u_M`Z$&!Q6XgGp>*r*<@X4ljjom0 zZBWW&#%y2{6YtpgapJFBwQ=z+kP<0jMsB(9!A2-cYaUdtR%YyJi>;M-QLCJ@c>2^8 zug}0pz&VXyZRL)Wo#O&#w2`P9b3g7S0Sd?t$A*pRaq=Bl?IzxU)w%h2l6p>jU>m_& z!}J(GE${Rk5xBgcL;{yxk=Arm77`RwnLwDm()al%qd$(`BUS(5$QwW$^#JH*Vh!3g z4t>M$K}uu46h7w^#Ip~j} zTD?w4>FcUJvlrp~QhH*OKrB8+EdFF-K{SoHt5`?l>l?I>@Wty7IsB*)+kPQ8{bB#W z!+vz~a~mSq0(FExG9~Cm>$COOH80j_tXGqy4$cL`lcTr|~I?E@nh(3tGZ# z%7WwJ;sI1?BY6G+an_&NPrKdS&b|xJqh2p$5r~lZp)&qtd1kfRiUmQAbBdKLX)lFc zG&xm^DV$*|!*B<%AG+JKBHJ9oAfGO^yB>Ivd6dL_vyBT8N3$~A1X_f)v&B>w@zMs+ zK**MPkJ_RFzs5>W6~>9VM-t+oA-bHcu3>kmNRFI{|~EpX~-j%3^0`&-9@ykkHj zJjY0ER7}Sz6l#vdUyW}_*FoH>_shh6MCj4@*%i%?dwprkd?AY4+WzH>urNj*DpcV;!4H@ z2eQ_2vr%XIC?-nJ4f}!G>*!L$F00})p4)^zh9*YNi$bdIcNlBWPQDm@YES5~i+d5w zOTiUj_iAG8fu8AMUoaR_ z+*(VO7gklg&Wlvuq*5bMl2_8&mTbwAWm&2Mn*Ucm=ht)2_cS4G*-oW4l{)ZJJKg>J zJ(u%*pL2iC!x>zsl~snne94<0Q(na)Vh-xl&%GQMEIs}9$6tN&Lv4F*kG4a3CYSy3 zo#8OaL_!9w;S!tLR>>;j_{fI1*&-HZ$^dQ`HOA%Cu^$K)S>51lYOUbn7wI3E=e2hf zJ99`rtEOmG@w`(uyDnj#k|i_;q?%&qe!^(O5xIprmXvUEf%eI%*>Ul6GnvqM@7SwO zl5_m?zfJG=CAOF>{M&!Rw~$Kba08sB`-F*ptHf!}AZ=s2<%HEG145A| zbiJP=QDR6W6|Jq`bRI`ci?Ye++RoF+oG?j>7V}t(b?Hoa0WD+OfokbaJROF`?D+S8 z`?a90l?2b`=jvdboK6(TMY9g5a8=$%&O;w`QiTkRa*l#}qxa{fd!1w?lw)Z6(DCz( zrVI#hJGsV@bkD_u)W^wTV77(bRxV2P@MzMU2s{^BIUmv_;xGcvC#8$Hg2^hzB)7-C zUJh|kK*KJ3(K6vv;#zHxw{?^*Z?LS`{{A$bh`)bt#o<5!wcxC*8Nyq`B(Bj`Om+G# z-@fJqx`Td(3^n8YZ{>pzr!H zaXk35)FOnBqObY0#L}mn7fHbfYl$=9PA#^^sPn#)eEOghrAeLtrcNzVK58sg74=2{xT$11NnX@+!LbTdd>w!Ki6E-9w zpeeC|EHNv1;2ZaKI+$x9u7wkU11z8|J7WlHvx}Voh@WjWOPj50&i4UJJwIaUq1;7%nby4*7xLWke`8ZC>`NI2=ZqCgl8tGU!0&*hvoL2AYA1z$;`WWuq zIwJMz%JPcO4Cd`*w)|Z^vvldA_9>py4t7N+T=9s8h`ZvQO~kMgP?2Z=TyC#uWMejw zBbiHX@7!4pmoLFvTXOMRl$Dd?SN=Zy4J*k<_}X(XJpZEq#JL`#5dMj#Fo%7HOG_91 z9efiKGaM>M7Ww2!t_sVFiW0U~A*DG1&@w4Cvvl^NzqrXs2mgD(p%@>8!EzAvWDk8e z%=_7~UHz*~&B;RC6Un85%8I2Mt`nU1saDImvto`b=12)ln1eg51W^Ea0C#jA@ABDm zTG_3>xxKKQ)hH3{l%&m}quw+{qmcA_War6G#*IRDX3*Ewv}oeR`gmuR@}Q#1HjWql zxM^ns{6`AA|MJOqd?3)I^&C)v!pfgMs7AONc{#j73yYq;W+Qr=1Y5DCPGFiJ5Wgj!XUKNfiO6( z4ni~Ow!s+@ln`WV#>-KJcpypJ3B}42y-2soUK@Vmj=F380}Y1!JXAj%^-jFo+%u>k zCmxpJVDdivCHULM-MhE^VJvx&D9Eg=gxb2ce)o<)w$etZoV1q7skILOzBVgGE1v&~ z;LREs&G!1+;%lH2;&m8&xXx`b}%_4|Wvm^TlaDgm|w(Z%VlNt?9rhj?rw1de! zCKsY$-L#v^zZkQbM^?66kGs7o`r#w%A^pZM@j(j8^VS%~4Zo3IwL;lgb+`uyb&!(| zA$6=(q%k`6=-JClF}9Wa%R=#l%aZWX`C{V*qax2F-8-_Nw^U#zcmT4o_hY2m;mQi;MzbhCb@&`6wldU=<{p%e6tw; z^1sb_tfj>>u^*E5fFciUEB$%{Y5!N=i{*1i+yjMk)FDudej*xi^VmYgnl_!@{L~@? zGT3gM(+Uj8()OWoJmk|3p|u;=#dTIlN^jNokK&*{ijpp1z=Mz0G)zgmciKy9)dC)j z_QD!w>pgIYMMLlQr`!%K8rr=CcBxxV6S#Qkg1^vvY=PKuT0d^I;?fXOcyraz72?~u z@Duk$lSXYiofGwu1ujHzJ1`xZ-0b*qEDdP4pqwbxjf$_h0=t6$pl8=OioTys7SM;3 z_4ptz;CWpa>}>Ym_W||ycBUhFT=4{;fB@VbV`S?7#ZL>5oHDYiNZG$-NmK0}O(<NOwI(?6d5wfATzTv1ITJL9Px zpZ~?%{xbrFjx-5|l$W_(qD^z1~UNtG%a1iL?kt zYA(vT&pCl{r{CFic6n@+$YG%0AAS}}171NYJ3Ftad)4`Jzi@Noo&H&2{M z(a@#y7yMpx+T6N{ z+fl!=eyKtO*QD5b+U4-@52w(LTX(Pe-*CBbjS|WLgF5=vLhCQRZ8jlPR?q9L%UqKDL7x>~P;mZ`xhN&^H(r*;#vuzmoEV+U)bccqw8c__@r%lrYJy z5**e{du`ilD)^}T#N4~@i&1+*2vhN~(Bxxdwn=zC;!s8MTfsG9Kniz0{okK_+X*;3 zesk)b;Z$W#&V$TD$#R(ILS1!HJLub=G5x${eVM0TUtU>%*>$+TNA|C;1uV52`Ig8f z?mHE$ft&4<2i#_H`tEeMof32|pTF#rBDD`0qIJL8$JR>{0CnwxHv~HEcp_yYd@Su< zexlU}DG)|2V(;VAFTIqmOSTJ0381UdeIm*hK^GZs;~?iCh{!mw;?jf}(pxKbhKXa; zY3Ss!^}Bj${irRUHQf^nr11G^OGOzh>9uDEpetlplRI}1R%H!nh==1}2GCZ`o| z3qtnxP26cZH~ZX-BIoV$B+ev};dS@s9W}@#EuU}UOawVvoTW%0J3y2VJF&OOe1Di! zA`-vD`N%Y-*?2hV%O3!ysWW8_Bm&*7m;?3Vj3)IQ}Nb^9hK1s)5@8)n{| z7$3fg?%rO#?E^ruAPN7!^UeD!3TV!beRv$d>Fw>wTr@RHX1TP9EX$w3mqui>jaT#NEaxaK&09y|pg^LA*>`&w8-X;ziPcW<3FGUeNhd9bWl~*Dd^LwY z03zK(D^`h|h(P4i?>_mS|Bmn_B*X;?_y^TCSshXL(DcEK(0ePngyW+QW{EkY#Guw$ z`iE$6J~&LPJQ%U1U1;h-+A&4O%Jiw#t1a}2?e7b zN`F{2erfu+fMu50<6+p6{Why2j{1@_Z5J}MNs->q7Qsi{NlDJwJU2-DQ%CLnzLP1j zm)OcMkfS|s1MLX&%4*MQtTfwA*Ik-fIWqSW59u|FTUij>zrm@^buUaR6th+1eV<5q zfE_%Bp1uxP-_!jEonW2o8{Duf>JRq2_wOu+O9zW4;bHmOn`i~*=ofolO{}>&nDqGO@oRV ztWwynx8ds|0A?aSG;OQ5cu+vh^)Y||fW z6E*E!?th$&t>T z0m;6hr!9hwT6bb7p_#P)N>j1IyM5{0WgqnH_>27mO=)vT>|E)Al$5y>gXWqYr~1XE z%Ra01FZy3^e_8EpaIzV1lE2FdrVZ_f)+oe5?BCpLuj{lG7F6HZH_w58rv6WR@YCD- zt>;eto|rYrWm9m$kT=MoAzWKIYXU>+qnVXl`e^oVeClgf@z4jqeAkJGHwiH>98Efp zW!*phhM8rWCq?{Gmy_{-2e*z!&4~i4cX`Y3WOBdtX``P59-LEyooH5DN0y&T>V%JX zYuFn{(DinkwUp4YaA_f@7VrC7;ek|_mRYDz>+l+%%j#pI31))&=;7t$Zgj%|2*X1~ zjT0c07os>uKA`$muXQ_$awIbiLt)sW*tXYyY5i#5&i|`1E#nnc6X-elKsN4H%U%s! zJzzji9=3Qd(&SIQ^ZThuu4nRTQWA5wJwD{>$nql`_9q7+y{Y~tXW_L$NhACv|NVmF zlizacnE0}kKL2bH5vy@42+r?!kje%LqrIV_ zG3A!PZdW1^i||xiPsH1^0-CW8VDl7Z%GbnSrChDtLIF)s#+m88?BFIc6{Z^6@9>Sst?QeA_9qqW&YTm7>L~#>|5w)KkCBR z(meTIyFfqGTHDniR`%DwX;Eei9M;{vPFlcm??Hqs2%h5*OAENN+ffF%X#?h}R^2ab zlhtJ|g!i#sybw;q_DEh5A-HUjoAPa089j3f$_~HR!Fukb;Krph7qoAg|$ zq-sTXh`DH`md(6NNooi+bofa>n}e&}l$(2%{2j^|B}?yKKZV%H50BGRharC_h$r94 z9IL}21jF+6Bs`jHx!<|J>@*@oqLEUKj| z%JMkXEGD^{GA6N;htuukQ8Hs^;hqpv6xH8b{XrK+J&lhrFSPSnw!+A4ckSX8M0A)9 zOXmaJcCIASefRs3B15e`WU;#3HbWI@DZLIoWwJ{e(8Gl^qL?~#VL$oGiqPGvv>-J2}FYwwQ!vp@(_^pP#-#a8HXg}@xR#$AT zg!E-wEgFXfO0*`2I~O2YcqO){)jcqxo3f>C9z~ z!<6n}R8sh2?lK=RMC}>wahJVI+pYdq9PF@6aWcfH`W!5JUT{$(zK*w*Iye3{0&P=6 zSm8=Roj*Q>TJycxkMyq9VPuB>?sf_qko?h}(F$ht-PzyF{=%OGvB_bxbpCui_U$L% zntfm5nVS)jc$T39H`ES&)M7s6KD1#GZ7LGNd%3g&rCPq@U6Alk5ev_tl=|NH;bux`U@y=Oa+p5ma(WGLzs@cm7vBf;m8 z5g;*hUz|QafBHuse@*;nZUvj?A?xCseUvP=KvMMaoPXP)P6~*|w9$#J)E3K3XCU`G z0fSZk`K%7vH&Xt3`{tY$e(rPd3}2b|b?Mx>t0%O4_0_*WB^TfDQIpUzjjE1Z+m}du zvzNl*c%nFCC|GPy(M>|okY9)s)MsUxRg*X-VQY24yJP=!B1TM`)RmEVqwuV<4rj-& z{f@uI?F)O!E+b{6HwlZvEW4kemVImXXQrHe{0IKlhABY~1+F`}=A*Yu-&mUv+a#5S z*J_iZrUa|~QB&?MYVFNXL_Ka?Wv5WW*?+N0u}*;NZ3xgmJ^jwde;~wgdzi;8uqr+> z%L0jKHDZ6@P0{d8e;KtN_m}pSZCk1yaQTSi1pnE*nxFRnNjddgXx<9D!XrWq+&W5W z#as&n42@}oc7&~gd&)^kVc*Mf-0jp?o`2DY+Ii5~cSHL67yMfbQEN2&z1D!(3m$7X z#Od&z8P-pJDxPLXj8PELeba$prkE>FbvYLp9Fo$;%6o@)tKs>h#@6)U4#&wg7!Ct+ z@STieZG5h4v!YsUSz-s{e8j25sX5!)+-M==7T3Qo-&fO=n?RhBRG>$=$`jLCRJ?I_ zBRx+A6l2~yuCdt6xli4Ad5%mnFxrh)SCMGupY)+ZO$)U#3?IH7I`h z@xRh=uCKo7DB2*A8ykC&+z|N9=2fnJK0OB9CF{td*MO#%7=VRGr#!O-BO-J;JB<>c8+l>?}IMAnkp-7Q#Dt zm`+@L7ysJ#eWBhbBW@tzS{9j3;owi{4MS*Amo2XMpz`kzKK_H*&!p-gtUuyvpi~y; zHg_)Hy_P)wq*f4d!)EnST~y^BhTlzx0+vBnn;Y+B!NW=c^M0Gd;)FmLKK?%yZTvf? zJN*3tbi;Y)-F`ZnU{XFh)8N}3;aPfm&jI(f!hAteZf{%rO`;bDWkIPFh6Xmn&b zxcUmC25XLkyZXukS_?;Bxjbfj+kyW&vLKmPxBKW~#vsK#&?3wc>$5SJ(_@&fAYkW1 zEu-3pNv${}fPb*REQ`c%vBv(% zq+2drLMfsxBYI0EmvKZ5G!Bx^=#d%x1K+CTfkO55g3ATGy#!rIjZmYaE9(@!#|l+PlMO z7&R7$n!<5r)|*uoe#6o_nQ@anSIYc@gw;lKK@neHRnXPAyF%w z#6i`gpsGEGmg&D-Rrm;Thc=G=?fcM*7N(L0m=^QXo*Msf6a5cbM*b;rLBF0;I|*kL ze%&^)%FzxNHuoL!I%{3`Zmp>AYwNeP8XU608zzm%^vNgRIepsaf8*Bt7Hp>uwEEtY zU&JS~Ori$X<+CYt`rPSPH6|;53O9mX_%N=mUSGSDuIydZ0Tv4CtXj!T?~ww!WNfQt zN-FFgwO7Bna6WV9V(4-JE#xF8;>0#^_kZ4-96rX^8pfj4)FGfeJfYm1mF`#4$9Z-2_!DQv1WJj1qWvjr)vtNmh^ zHwRbX0B4)w7x?9Q*i%2f|2ncx+Xg-tK8jX%{h_bWI;Rdc90umDvQ&B>7oW*pA>&h4 z>Amq0Lf-;+WX}j-B#~mAsZ@+b+|3js14@nWbsiAn^Ep|iRp#U$Nk<}yD1t(!zU(W( zh>tf{@817{uO{+k`Sn$ea~n4szFsW|V`&xC%z^Nt{-rh_J7U_k(>2~E-oJk{`*SCZ zRfUY7=x%2v>I!}#oT$bpoCxIg!_r3mgLG}zTiBBj#0KEF=0ue zx;|x7LM+%ZoM9q3oWgik5-aPJde{t3c$?tQx^EV?bCgL$99a-E6=8rAgOBjbQStEi zG(4`dr1DJ)1w)m2Z2^=XUpr2T^bW_#KG>uZoW-UBpq%z-Z3};63wsf z2)^;A51(jFa0V<=04qG&iB1k2nd>)?XJhsA3;8WKR_@-qdq39JI&<$6;l$qVJpA2i z)V_iDj}jQiWZ)!A6=O%z;okjQ?)teUpO)z;=qIh7f?rE_Z&@|I4y=eFTXf00qZ*GR z3FfrFWtS;`SKrvl5i%Y_C$}m)q8`>Pw@)T8JpH$`Uq1Pf&zF7NNxWp$n7t#CUG#A>P7WZ)j`{BFub=#x2DyTfVi6a8GryOl6PFrb7^yzz7h@ld zrn&~oqM`wNvNb5^EvCGCbU+Bj>P68A z(?!}Sv(KwdSW6z@)$w}{Tpp$VcBHTnJpGedh{oMcHqltXpEH+O6>wo=GhDt<8)+uEETmDAL3 zqsnRl+c9$1!C^Yxi&}f+6y!|$NIi?D2v*c>uoKr=;A$993(Xt7bSX-L zz%xQ(+=of@QlUUgVdI{hl-u!ECnjka$5rCvA#W@EiETV@ZtLZ=rrSg$Q~o2zI;7XF zcOD5Ux9;B3hey41BQm?&Jl3Hpaqk(_5~cc{~x zKVkyYH2>^|M+~v^7J>+bntc(>-1WcDUO9W#|HkqPhRzQ9o7oSYmA%0pY3oh+FfH?O zG)QI2HZ2I>oks9&{eVs6yqvZ8=}9EPV(5S4bb%d^T`QJHwR39I`EbsiJ$pv`$22ue za~8qJ(57JZK2h`??ZndSybbni)cm4mXW9$%gS{x1-kha>Pj`~%XI^)d7JPs=h_l({ zr~*lM@X$v{NR3st_*{-sa9}G1WIs=hY%w0omzt7NA!^)OyS}PV%iK`y3jkxJ)JXyu znisi;8ap>=ZN>cqU;f~o4&=_B!cgA6nuA%9q67G-L#KeSDM> z89CtI-a78{$5}lu7oAus?vh*UtIkfYbySMQPWw7pjSo;5C)`&ro_g*xzJ7N%mgB;h z&Vy+P^atK3W&1<~4(D~Us5e)Hg|AxBsk^yB=xyEmoKJx#|CaxL!GC|@?RRz}qpd}& zQmH6e(%V5f78AbSJJfnjIM$c^epZNtU7YBT6q%ogP%?(49W8hx?QVKf8GWc6$-q;1M}g8Fey)LrAMhlE<4kXO!z0%Ju$gEyl1avDag_GXV6;EoDeZgoSJ z(wW2|A`FRQy0W8Wk27gL>ktv>;Z`f%|;TjBu#IB?-Icq^w9rPm|?cUm*8&3G$9Lpg-p!K4dW70T~VO#KQ&HR?R zy|voQFc=GZ=mFG;?U?Je-Zl=i#VY0doQHv7OUcHILe7q5Kx0|w8_6M>Z6(2AeY!cn zx7??(G6+U~lnP#hLNwdG5E^IBTsb4r4t;*af1kMc;pW}9tbms~<7mFcO_XVG<;=(+ z*i0xTxBmye50F!;ad+)?Uja^9#r+Cr&Yp|Q9B#K&-V&ExyR))(ZS~HEzlf2LAaMg) z^pjY6G~9Za6B>!W4x^~z#xOSbriOcq+)1&Obz<;g$zHj9Il4t}@F_js=AUH77C{R0 z?sPEz(q=GgF#;9lS<}g__qq5h4H_LLmw4tbBBYnp<`pg-^R9`OlY@Rp5B+{~Esj|Z$ui7o6}WFv@a!Bx&dwmt={K;4^A#tS z00}QMozI6mo^UiW&I^*P$e8>$Bo9>t)%<#!%cEaEmk9Eum!5ywf3f24*@usE-v(7& zZNW`{hyb)hJ?VJXHA^EIr#K^pmQg42eK*(17!j>5E=9twc{-8_vO*ZaYFP?)pQT1p z=xaQkvK!xd=WX8`#4+^>2Y(Jj8Cbrt;j_cN_mkNV9SEfBN1hglLXfM4`yAZ~=Ky9Q z0lj2RFJHUH{Wnw>z z_#XR+fRFYOj?YMHMzPvU`uhHGey5hxld~FD2@Sbslhcac1U23!z}ajvEB;Q86ov`HyU<2K^*@X69?6ZG3GV1Fq`QV z-toLPzPvX?VHXsOW$bWvx>NSEZ>(?la#Q<1!|RR(52eoCI}rj4+8P=U-f4}8GZI+d zTkLXglq+(yQD6)fY}4~xZOU!(9EA+e0RgWLR#-Ruyt}YmUnAIgHfBlmd0hi+Llcv> zRtRZ6jWHKQgmSK$jLOsp(A>-k@k`nZj-?ysn*(?0noa#E75Az5dV>e*VaMw z$#+ivd4G1k-uqj(au>=)e)GDpIX=WuH?(_?ydj`K#z7X3H>aWg8~>nThlyP)H2a}z z6kqv_e{T_|-E_8s>7c%~Avw))YdGDUWZ8r|0DW^C%2R*(e?0kySOi#s%O*bepym9% zND{kzHdVntNnl8UdW+XGWv5jaFN?OAMTzW~6PAH!vdj%qd&!Q+?XgSAcO#q(9Xn8u zn6~f)5rk0c%;kfPC92t}&G0p>ucW-%+xGxjNR>FYhMI)ky(P{}|B25eMeeV0o-NT!KkxG8!ZYc>1ZiDET{H}TqPS&ruB zlfw=!M**kg@v*DJ#KgOxlCrF6`0KZ?YV~2FA@RVIc~%pb>+kOW_b2IWFa4T-J}w3h zuLT4?aI4WaqDqapz>9Ccq#p?*KI{bxvx);)xq|InXD3I(6`otUXdXwx26#o)G|m@a z1DH#?GYD0b%e<_Xmgu#CD>2*>mQUCwBzR-%D3T$2Eyj$`CQRVnj;aceVR0;!(-JzT zbGu|(UCp&7`!>6idIE)k1m|um; z2``@pm8b?u+%F$eaJ_>+0#e{TXFI|r41a%!Ee~V*JlneA8$i2qK(KL9jR0BTe`%T!+a_ zdbkxG?Eob)z_H~7azzwdP7J|Bx!D#z9`2lSn!qcDI*fnnRH_ZSKC!k?r$ye*d)yxS z@z=Z^4ifpFuRWipc=6JCpV6Dp2=Bf>wsbBXxH{~RM;S7D?f&OZqOT4G@?ce1@hXSf ze9(m9zd!q~vqU?j@uWSf@#If5=T&OJ?sM?S8-tT<#*ylkwm^k3QO&Kb)g-YoZ0|Gf zcReCvmnDo*S;ls|33d5>f|eYGrK}Y&iN#sO1D4gXK)&#w>Upkck@I8u^$nlL{4^WN zDcj%a_37tc_6bmQev;G+Z_y`ti0M^$f2qjMy?m{AZ^4JyN41f_qQ1hgHT|-xa$LAmDDfx2crQD^J_2 z;(ZM*L^qGWYRht|0t$WW>*)+6yvc?~X1DbwEb-sz&Pl8AcAn&KemActZP{)C2AzRZ zeChIK9{}u&W58B|CV~gXqQjn^+QEjiW|Nt=Mo_wWJ@GP;X4CSKF`_l+b_P8Zqvfy-IjOP(6>ou zR6vbOGjY#2rlJ3CZ#x(Sr$bPqIaVS~@xmp4mcm~o@`Tc77MtZ^s-tm_Et1MIAuMsN zBq2=v^c%hv?Z_>_XyXw69iuVt`Sg?75es(N1}E%9XEM>PJf-M-b>)T+c$ZKTC=lOh zV?(wdbTWR_dw|Qt@iW{^2~ivzhU)JhrN)=n?t9nfDmx<2A*m;K2XJ!D1(y^)lIHgH*50){U-W@O@hn}caO%fAmnc3Q*F8^YC| zV!S=B5VK%Ia?5fy?WAIwF3Q@QK6W}{@>p7v6X~-#QVO&|JF0nrgtHo0o+9NZbu(%;z`nYxUD0gN|Qo&w42MQV7qMk=Ey3c$N=YLE^ z?+oyXC>M*!xr50(2*^tfmO8&kz+5MC$O^Ql0X2?W7F+XGxqSA#)5u~F-EdKx`+6&K zFqkOUxcN&LFP!u3YZXDV-Up{Q5c$vM0U-_>Y9Vd{bg_hX0|F0VlWQ18Q|$*ZMnuVg z>)l^f^{_Tp|6_0bT(B|!O3_K!=hvj1N9*c zhv^^^3=8MB76qLSD}3@lo>)@9zIxVcJ`ft%=YmM)D2oWM>^uh|5hkVQW3q1c(rM9q z;M=5S&JcC6e5Vu&}A*MFP8W?>T7Rv@HpZCVyI9h99H;!WN92{t=f%}3zv~oW5+q+7g#J#s(`g(xGY_t|ja?d)oDW=w z!(ErD9rSf64x2~t8*;A&3bskkZ>Hnv9!nBZqW_RuLQdv8H&(O(xEycg)*I_p%G%dI}+dlGNoX8P3#!vFKpt_%;fVf|`=1*a_D3ORgkVW~=E zonK!RjAngG-b2=jIjE3pgDhLlFW1>SG_B;2e1P1SEh4V1T+{f+$(v(oO31JP0h0KK zCW*D^_vYv61V)-|FA5G`w%>~gA)c|$RLgGheXb5*Q)>wk$-_`it)vCT&C-4{c?+Y% zt)8?OZq#CwTuF5h8|b>uecsqu5r)z7e=+;9PnPS6BX#rI+ImEp@B<)PebKrPQ|-$g zjx?myR!%1kCh4A|6Ne>vTML#v-b#)h%0EnGFj$m@+l?e#h!i$-{h^$A!plra&JosWKxogd0vOGoS$bF2D z$QH%Q`ia1WOJvf+`Kn7RJA~*$zkiD>JqeWh+#e9VReXWFF8v1%>(ZwK2FGnm-=80X zoTU3{Ejx&kuxvEHJr@6Nu-(5-!}Zr@zK&sC83HQk$J?D*&rFx-I9iEQP276@z4IJu4f zUQ$nZ>3&FP$03j*wuisN2XD?q`(kJs|I=8_j_BQbH;Mp(_q=sYpXF8IE*ZQG9 z-Nw1vqF?%km^a2*w#g~2om#Wv%(>heP=M(yG7)SDA_~L3guSaI6j=U-I@&;iq8LWf z%F$hbmCw;K$#M(a=hK?ULNDcFv~4Y0G78PFL_N2D>)vomb++RNlVs}5w$=WQoIO|s z>@)5$KA_!nB^i>`5}nPq{iQfO8v^4Q#syFGu3mWIMFECs+AWZdxd=Pu*`xY!;8f^+ zJqK{F6SGls2pdy|oyF~Nk%E3az*m!;w;R*om|VJb`t++>fg7Al#mPB|2TYVJ_bXa`3rxN~jwwoer=Oy9f`$UcYv!{j0&Jwt7*j@(%OI17 zRpFRaunX%EeK0Trh3HBIibSAX@HoIzu(z z6~CCDx@pxs%n+Z$?bW~=!G-KixQ6AZrq{U|&qJJTc;ch>f4^h@JG1}&*6dH6M)%>) zA}O_)wK&*%l;hK_bU04nV%hNmH#5Iozblg_veXx0Zng&dCp)P6+uNP}a2pfIz;++Y ze{CLq9N^)tw}P;Q6-9AexporQiF37|0&#ftrf?shv;Y23w7b`f8l#-+?oakQ2cqSP+M2+PbCuw7+;_cv z)Kk*9xj*rtT3CV2DWPPk3z6Kv7pEio$cu>G{)iaTyfJ@(cGwd~c8uP4#>R<@hf#~H9Lh{Pe$$dh7P3A!`{9h>aM1OGU$FxQHDhQPWm$P0wTO60Y{KSjG@n8rp=!((ZTmhG9Ohym9|NN!4nOa|0>& zqZj#S4zWuYE{G0xwg-Hud%N}fVe(lY&Z|gAcjo3qD66{_ij;VJ6o{Os+xZs=I$W|C z-o6^B2tVLIJ8<%&^xcpB$chiL(@n`v>+H|{ovU00h2O^4ojZ@V(x~pEYaEKo56lNh z`m;~WinC#*gvZt!#o@T*?^$G`qXXB^MI<}u1=esNLr$se4Nf8u+Wvx{H57PIiJ zV>5s0%aFcvvg<2HethEy^e-H8C!ZZv^N$Yh_LK8{``{SjPZt4AF@3I_ogVuyZ%Fe zzuD0b!)DvN>NF#L-}HlM!)EI5LZ_|t_n!aVZ-()Bzv-%(Ni+232crEe%`5(Grx}at zo&9F(zl}ApWjR?G`@`QDHJQM>xvyOxPj6pz?ds27{&S!=n^Di-q{EoWSdR~zm@7|Y z;s;Fn&A^A)*ZK|BM!(t913Rh19sl}~NHkqQ+D3YMEseV$KpLv|&caw<^u`Cxk-zy0 z3lJ+X%?RuIVss`=Uu!yS4*lKP@n#<7D2;60U!EOrH^a1Pw*2L%-)v^z{7+(qC%a8& z;=e&xbvPAX`F=TdvZCi%AO8W$rT&im>puI~=RXyNx=mj1v;IV1dttor#F2)~XlBRy zc-ZtO(L;Y>1MT)ijxJO4G106!v5p-ihra?v0^P$b7B(Ag(Sg|M zD6v^49c|L}TRx&;bKrkI)QYpwqs8GO^SCdv?l;4{D{g5zFe3l3*Yx%${!X`LgFYXq z5o1EcgphuzdCA|N9q(&Yj60CR-*>zn{(i+@===xDKeYY8pE=c>_OCnX5y%}>nTlBs z(<7ZkqJFcfF&!qpIc<6x0y4(?f^#R$bE36{UkA7BY^L>N(C5|ETC?wqK07{yZ3kw$ z!p7`4;ga`1t1+6mP;0#71Dg({Vf2%H=_IZX=3*`3x6u$^_eL647ixu=F=66;&{k3u zGMZ<{TT*p9O*gm%uS7S@q#vSUI898c(>&CqI^q~-nzNd^F$rJ&PIHjQnl`|Ka<$q>6@Qo#D_{?<_Y z?{st8znuzxNClKwwagF`hir^PEYJI)qks4Ro$RJx|4^D%p6=T|tb<0AI7Y{K+4`O68 z+9T7HHv#vyxolfZcEpGt$EII*O{}-2h?EPVkb7se$wkr%U5e-sevXWW2#BVXaOn& zz%&oy{XcAGf9M}Z=7SJ*_t@Amq8FOP;A}+FMuvx3m>n>s*gs}d2dh8eL;Ny39yEPl zmdCl5Elpz&)j^-I5z`7{7-~r#2C!)$Ng-5g5gDh)g7DkyC5AU_ zh7Y~HUY2U0-gkAoIkY?F{Mi;enH}#mBlQXAoJxKzt{dnXdjfx=~o!Z#VzKUo|y-tdW`*D8wPOf5D&Q?@_o97nHmqc0p3E;6!_A zm4;1tKKt7&fe(kN#QvEbKh!$ms)%)Zr+HVa0UHV7*8w3uGGHjKY)&k_0>6%ZY5G|D zX^h(EEtrce0Rn^ucVPHFVq{134>f+rzh||=(QC9&_b|H{WuF}yYl`+8TbHK3Ao}-_ z9U&pi^_>QZFHvoq-svOgCKmCa=}9@I?Ipc}hOzBa?(gqNw13zX$J}u9J_6Jk(+i`pv_v zN*Flx>lp7$8q&=SLFkyUioFs02BSRoCx}c9pn|Azl%YXVPrztb+{!GIw3uuKy=b;k ztl86#n%PHA&dzkfI+`?x!4JOFd?0#c>xey@=7HP|j{p|3X_xK@U;~V@_gY zrPsxcyfBPfGW+liSREP!3DEa0*}DDA zmH=s*0$bD{G_!md91BLgA6}QU%{k!*K0X?!$sRSmPVCGB+nH){3+|{-tm%&0nc(={ zAL}#XDya>qx1p_Kwu&wlTH7<5~a_%DCBua*+ zp7>(3}AH zbSHVzUlt+WY2J3mGCMZc&2$iO>A# zkV?i=`N5!MlnuU=*ZmspE8i+oCzhWT zF0nVW<6vvJu(&uCeDXL|(rb_+OJs9c`-3=ov!$xN@e-Jyu_v}CH|36VO{$VfMTM0W!F~Fd4(!g*# ziP|Vv`(f9^^SeoQj4%bJqp$?tZC-X_oNX@qD&UVw%1Jk=ftFL0dcxT~Nya5snQWJH z%|#y{W5}*$K2Bn#;P!zg#Irmft@Rfh`bg%I?SJ2Rv*RN?4$pdze(bZtuGX_COv7=~ zRLxs1;^HGtJQX-38lWSkEr8sg`czKWyVD3G>NR{%Jf$U^K-cklUE+j_$cj?XNUfghX)Eo{=T+VmTv8IXpo5 ziqOaG*n$G_=u2c+z2ulN6T|<8KH;*$cnj0-x#lyzE_a(d8V$)M9j%ffb`)HIh@VAY zoB$|z*^$UtLrUSwRU`0&|--t=#5#hDm0CjzbD#aip7J*=6>MBG!wly3K@%~ zDKEt%beUl+BZ}1H=omTkvc?C^`}+?S`q{=6+iK0~1$1sDuo90A7?{k;4S(8a2dN(V z-#qj=C->RrlE}^Chw}JJ^BJdxd6z=h>paZ#BOhHuH%Er@dXbkJGCy^aA8Q9;6EK?{ zuMiOOQNWa^V%PK>(=qv??X*ALYc^#)y!(No-pucPb!~Qa0Px-Bop<#U@Z-wut zNe}lw<(xjN3cs3P`+IGy%c*-L7RI2SepR&U9GKt9CvD-Set0F8n+^TI7U=sY3sQH3 zK@PWadck#MnP#@vOuON8zil*0VPwev=)}LBxgsP91%}~{V?l*CT5+_lzw0J;hbtt`Jd;TQ0Xk7FbHzv36C%cV$NQN?|diz*gwvW z-+8-vJ1}7?L(q-QjV-T(2#9K*H4Z=lY=`)UK3M?T$8NTndfEc}DGWBAOxa{wBy+6v zk^|fHno-D^9c`8MWa)P$MOZMGgi{E6Ab!EA5B>cY@@vDUYkhRnQS^-%#vPC}U=^n`Xz4 zAl)>Y;{)LW#|e&sc3(S9(8Gft-)WZBQ_8v9BkLrm3p7u^=C9-FNs$sf5Mg@|=hjM( zr%1|_JhcrkO3D~OPNcWl?`F*cgEBNk3nQcqB=#IO8HY{ra`TR}1ooM^Y_*tX zPdh~rX{2T4M!Pr*MOzqQ2t4b}r*7g-3yo2|i6hZRg6}wc`Ov7$fDOIuTyw#nY(ql+ z0r4@`qJmxLgU`!*`hw#2crp)?vu4atMGT` zU2Pd8s2J%ZhH|I{HI6PDbcm+yQwM3`dtW@dVXhco2Ga^cEqJc^w0}=G3YEX0DYZ@@ zb#L))S!Zpc;C_-fI~GhzdXuSh-h-)RKNb1SHy#u* zK0{+=8XtarB&jdKR*n{Ip%Wx;U~B#OgR9LQA0`BnZHUnt3)E>AVwlJbf16$Hi@^ND z??X#jVIk=^Im}F*+Ek2kwOJQS>Fk)YqtFJ%NbryqTmBuSq3B~~%|PfKn`G#3_tRXl z`sKE=plygni#VO}!J|gSYS|V@Ex=Ve$sn>-@Y$bh&S}S49+$7i={6&N_RDATo83vUEwrqp zWWQ$tq)?SED+CPZkFN=r`?Pyaip84a$^m3YT@vTOIg%|7^pHS3=h7jvH%PEm^Mk>K zxphHdyiYe_Fhil>-8Oc?2Q>`-Jq*zS4xj#!FM_S3{|$}PUcJ-w!pCkZ@kJr*5u*+x z&$#+DnR4djPDZvdsQsiD8qlGD_lXxT8AH2rx@VnkWTAO~1D{^~v2OQI$3FQSlhh3!b^bu-i zh~opiV_I#Ls%_==;#oL}2ssUhgW--C1-kSovV$!2o~)X6HnrNdh+M2HA;{<+Qhd=@ z+Ysc^>L5oNFUq$4{sjtKAhD%1_=S4KEZnDR8X>| zqJN$p)b#geem&CRfks-&~w#2_ofN7!F9gqNE;4&0^Yf|JaWZ~bpe&d?k%q)9#zW{}LGu42E0aS3E{ zq57_N`4y|yIln=Nrwz4_u$tNXZshNN1pc=XRUqU^sC2*pcEk1(I<{ zWn^ix->B89e1a%QaVnosVE7o(aab~ciphQ6|7NELH-!0wDm35Njt7`_W(UU4lBGhV z#MVv07(4Kn^b&#z;br}>dC^e<;kK00IXk|VW9>8%&Tcpr8n%zcg7V>PGV0|xBxgCS<9PunEPcmu{+lGHU7Dd;p$vkL~Y(royKu;g2{he4WhiF%LC z!zD3^_B$M=Xp1?5CbA&WbVg|ayXMW8Ea|e0mL=lDefJ$$oR5O?0U^yzbrqm2qL}Bw zw6?-E$A{SE?Bw*_s%T$EIKd~L{lcgGc-Lq8^$}Pjz^9OJkfls-X1JPMGek3 z7k#N#vlrL=_`K~5x46ZaKDZ(4F0%v{}B ztW}@T(9)RNs#WivyZSp8uydz#tsP1$u1P#&3`e_N!;_zB_{z=r_J-w zv#imBBsa|X*Rg_DOoiga_#&GKO#$gE!`>|DQJ9$T&whUk=e zYdDs&K%7Es1COuCi%q9BG60F5d{vu-0QyM~aNJ;LNAHiPJC;+Fn5dmc!FX_zy!;6d zCw9a`hXDvl?%tiD?X)m~Op}g)3S-iVkMqrvuQ3x8C5Rw+%O+JFOzpY{Gvb79^1RIj zY)tRzA3ym8M-sesxIizOX-oKol*3t(GCx@5B(*W#Ab<}A*L;}9=-VLx=ccv~vTIS} z2;z}gHnotIFYH>g?#)>s&Hu(cil$$HK;D?~bBWVWabE6I{!?dfv)M{2?Ust0tu@!Y zzw_BhfRV8>!qD9KN*jgk5JKTrvzn-x_f7g9DO{5-se#8NG?QREQ51TkNY7Yu(m@TINw}Jz4Ve+54G(WNQix0_EP9x$j27V2eTj| zNr-Vuk_`KMPD$f4S*ax3X5bro?z=JzUZaNAgrR(Rv*YVc@;TYjMDv@8<|uE2a5ceh z1Vq#FQ>2_M8}GK|I<1Sr2aFCFYyuBtYTp!>5J0nIba-Dz1eu0`4be{~s-@Z|?QK14 z!rfg+LM)`0<^>ld^wjK0LXg)0@8(POVWGR&fXf)(3s293sRBI$=8)$_SX2A5f!5~U zWiJmW?owxXxf8dX+tPpKbBE!nr(L4=EU`ud@EAK$Zht&weSk^jXW2nm$B8-86%4jh zGr%I&CvC=@Lu}5@RF(wIoaNFaEQcrQK~}6mA$3c5)g%lp9gaB%27vcgwE~a7mUp~JNn=gIf5Hu(9rqVy)AWqOWL)~XhB-Fp}JYdUfHCY9?xBoah zzM%-l5_4)vMU#|Lx`IySsXUcmd(K{xYs<&;7gWmGG>tg}FD@+3$(RbhbDSeIq=A2fZ9vo*P4#_(dN z6v3gyECoZ(<*~xo3>`!)thJIBB z1wIo;>x<0`sdac4L!@AW%fxJ zfinyZ9}i)Mzy%5$X+UHaY5mCtq4V*CBo-~l9{SpT3Md%sjxZcYoFQ)aVG0JBW3fN# z`l!v^4`&>1a-)+L`6x8|DrQ zdRVfukAiDERLkN_b3urO&m9bg!EB7&!+hk3k~(Qm9-=j*FkU z7+y{YAct|6$d`IFHb*>FdXEK*&U=U03b9#DNenvYd`pg>W?P$VpraIW?Czquu|$Cf zwqUf0*iei;OJMV!e$!_J%gp)i#^^(wvDpxtLaFHoqh#u1Wy#Y*C?$*th8d@C?UbW; zX7crdwiIVS5Ivx{M zYcnx@F-^7%57k?He1l>D`UYcTG(iQ?WZ;(GIw?LOS}ZQGpQ4PGhTR_~mc`U~AbN(& zEhr}z@Nw)B@bW+JHgAY@tc0RMw&lWSmkO-W@K#dqP^e%kK4L?M4}n|))?Kupr}78e zdUv5<@NYQ*@N8rr1NIcRWya%0(bD>({Z-DQ=9E%4$^#vzMt77>jIU$T@iCi*=zrrS zN#n=-GS6a{P(CE;4OR_Ms2`7f$;c;v(NvUqOJ&i{l$ zY$sPB|4#Cp(4~3Qw1#$QKP?Y~80XOZ6qjpqhgrQHIa01P7qu^kR-`giWBe*kBNUA! zRPghpgpA9%1(F)s+71Wu>5(hmK8=jCYG@gD3tERI-TAA=;c{UDtirTPC}a_8KHwZ)Vq-oApw&-fE(g)@PGA4@EfqAq8GX}^I3~Usj zoSp>DkrnYu^Nu#kiFeAk=#r|!(XS$_UzUks@%pqThZZ!B18~?tREAj$+@J}hp&(3E zxsNDY2z6$?x49L`w3az5z2j8#X;E)wLGzf93Yj!rf(Dx17^?@4;j>$7)`{A|M^qU=33iP(2?vO6#OUuccyC?B_AIEaO zF~|%P|ACw=ks6F}Pg_VhYgCrT?D)2#Irvw6Ds-IW`2eE|)87aEiJB2pxzj()1vCtt z;gC1h;#>*#!Pd|0&8)qJ6~})cOpKjnhCravjIKdS%!hg7P9BHIqr9L`1IJ?MKtylb z(+<)KV9Q-Qjq4z=%WUGq?D(omk1zy_>v^+gE@@LD3xX43Yha;BfPrJOLkz#c4>93` z1Cpf&_sZ6U6*uTCinnPHex6#LoNDGN_d(^lptuEtJ)(Q%R5v0Bt$#HsIhKMj+DXdU zR*zguos}}aoFbQ&YUQ0miEx1}sFXKT0;#p1EL}bZ$60Q!IU!B;nnYqLibUEKBwPk7 zTW?nV732=;moHx{+I$OKYA*SoOU-3RF3>^yN<17d8G$mX{dkZquOKTpZ5gMueJHdZ zyw60Wx6K|ijaF^p_~*Oi&YK-SpTY_p6F1@KqeR2-X_+fTnhHhM5=Q{Uv9fJevcFHV zrT>XX#A#xl^kDU5h6u)7*sUep4*g{ z@Pa%?A24~x4NUNqPZ#}lL3;pfId(ih{1*GtN-3T8S8-A^@~95$~8P>O^Y!?85|4oI3J#o^Q^I z8!};1c^yY{&vw%2P4{Qe{(QH0(1u6+d2{zsN!I7YE*fP&CXL4q*~LzaTpenv$X9o_ z*-(2NhUtemy_j{)xl7AOV&z|OR0b=A;bZ+fnHW@<;lzs_k(Q^B%-~UvG$&5`q&O(D ze9jBh5BT7`avot)EV?;}itE$7@40M&cj4P(t-0aEYNPeCx?v}Qv)0MzgEr?x3YKDj zncVPJ<^>7;lZk-synL2#G9K%|$CgP`EENmv_>@kPfE4>VK)2&}91v<3K{J)~;v7j^ zlf{2;Q!~Tcyxc7Nd@22@oxR*(Gkta?u3pH#9`?PPYa(NIF@{#eNM=h$Ps-~C^GAUsqzwLL^`e`cZcW8G` zfz!KV*;wvQD-8Puj7>fK@0+kgcxV;+FY` z>jtNc)8&E=8K8bGh7-1iI0};E7Aw80wcSD?z$Gd=P^}Mn;G?x1IPc`LE)=jZjLuk+ z`t5_?>){C0^kDk)LmQbdezqCz#?B@}(DRG4ygKEt`~ROQWBJJkeOgby`tet(X!P+{ zeOTrY3s^Galxf3EI>~irN7WRF!&jVn$sYQ3$ebl*>)|TOwx6l42pNILk`YZG$pXZK zA)#~PIss3<@PL=Cq-8;G#hu~5TH0hMW%aZ>bfDWpp2NmD?dx`-xuQ+eYil*;A`Y%b ziTw^YE&Vp>mnV;CsIa>{Vdz4xQSvg_Cxed0yV_(&Z)Z%9A-wcvDkG|Vu{N5cgEVu? zg?KN?wXU&VrHh-{mC%hklxm2NX?c6`;D!}BadcZ@Y^_qOc#XC|zsR-MLw#o<^o==N zy9hnK2Ol!GFp1CJE=!$MY^Z0YsJoe?VEJlq5 z&y1asml4NFSB?7y3Qc~Nh#ChJ)m4|hu6=0UTUm|fkKIcGie+ze%hdWh48+BCYck}u z#yASi=&~$BQu>_qPycl@`_97XTK!Akz*OM={Se5w1W0<0kS8C}&PK_iBISG}B;$`` zvW9o>5tg)IEl^%yH}i6HGK6Y6Zr&S(9f!e`(z#BOw)-KmNGx_jw9G80iEP4kbD=gO1i~(CDJQ!F;))DM?BsVth8uokePy3Vk0|4 zOtvTmb85jgy-PaJrWk7*Sq47IYt`;c1+dWe`hnXV3cv#sXH3yt{$~--WO0fau{$02 zYj>I(zK|5q7f{V?kkxpmIp+lA0D{I&;4eATyU;8Pq{Cg~q~5%7s?cC?9`%y?h^=V6 z$op7nR{T$FYX9?JHviJwqCSX(fqA&%RU8@q5Ht{>EKI0z-^gykuUjecbeCTLaS}YO zFFzNopg_3=K?Dq|>D-;hVZ+`*n}V+DaxnhGSYPjJ_J=+gbJNJ34$4BYvvlKpa0!j! zVJ`N@IsF_#hqX+CpFT36$VGkM8#WMuBzF=4Z0X_z6LC?7#$Wb z2Ui5M6|?WXKV*QaJM6L57c#UlH5J7~*r{{_321v_H&D8*$R|y}wkf6igQCZ_*U83F zDL_k??}XRY%GR2N>WIf+jrfotszuYXPiML+r(4@~OOP$Q9?c7{wzst=Hr_HMwbXnHRF8~`xb4*im?2Jfqxv`k1@lv1 zTHM*OI<(U*nT~r_K%vheTFHvGunV3M?--;fA7f{qlTrCBvibD%@;toViTrwe6m2$5 z)|SbJ%9Zxe*&1swHF9J(0%R8|94{Zbh3P59h|=$x-`Ey#oDB`*PU8dY4L!8kV+fD* zd5g~S0N-LtaWW1$nKHC+*S9L2Z{!y6ftSzd;zz3)nM}fE(4!-^@H-@M;j;$^rjPQ$R!5wLTEl#zO~gEmy&RL{UYgkZ*2+_{<{Q zg_i1`G8s5p#aO8J@3bH{6rgJvTh|q;soJ*}I94FaZ<=>K{Jm@jU5H{H%%nGRO+>)m zLh~VEERGp_W`5%F>plU{2T~Fk_gHB-Vr<^aDl-C(Q&a)a?-558!?7xllo@!``_12+ z>#BKw{rmX4W!e8eI`%$&*~!j=q|SuxVa%lHC_pgu~U;xrIlJ85ReUunLg?J;jaA?8cED0bRB6h*M9A7+dB z9_UXKry2$uKwlhKsr1YONwS5E1haFgv(_@5gjsP{*_5;67n_%a`{AbffyjK*fXp+Z z1O`}_id&f>(N9D=#OPQ!asHJ1(3{!MG~5%dJ=P=L0s;BoQ?DrhY|?lEfl)T^;GZ!<)+8FgubrkeHB;Vi^0dk34+$@?axR&OSi zL@Z`Co+!+9oy@yrhMI(m(3V9me6NwcA=L>Xn9C)=$5Sa9vY&_oIcacDq2J2i+wS_E z2u^-UH@r;pv0B2{6NaTp5Uo##@vxYtvBIX;VNL5<6KDUEJ3xMwV(uJQm$}xazNw%mx})(U?`th*Be8D7eJ#2f#z}9&DY~QR9XcDQ;Tgq39H*@q zwUa!PV^3}qSF%e@!vR51H~B$0TNw(~ndY))F^`R?bUNh;yur5CXLfsg&?X%yGW&7L z83YF`u5BHjdA@nYwS4#Xw_yN(xV^kp2;iy__JaS|=5Fgp=*OP)A76cyYPdd5D+#=S zI#ZD$p0DY)-2fLZL)6B|I!QS2dKyWk*?I9w~H8Gvj1F0zo#S zzm1{L7Q)g3SSpOkZp~)J;<7#rJ5X~+zZo-)L)};}N{%r_C}Iq4zH|@(%bY|=&D2WD zV3nvpV!`oS;4M;KzV?9^b=AJ#e;Rpl}FSd5(Ox&1|_e|F%-L zc;~2x#|<+=dkOaM!^XLjGtu#m8LBe?>>i!4h~xn@k2j)WV0 zd;2@8U0`WeVvl^{VP8+Oi5>E-c!qtcoesJOdZHd+j(fS{6b_fXJTSw(|c&hhy8PAx40sbqpFTy&w#1?3RbHVau zd~)r{0rPpO&$hgY=4zwgN_cs#`Hc4itdo|o(cE11R?GuMhM&V3NjcEfpVZH1NdBm0 z>%i~4O-r-&*9HW5v)XhU@fAYwVsq)gi4El?L+ySnk=JtAW8~mGA6b9;)#k~c`08_J z*ZXHI;Y$F=3!YC)S9{TMe8`%I{y`v|3ZT(fdz-yCj==H8aZH4cX1Q3Z8DQ~Re_>5m zRj_zb%G%T^W4+#(d9(a)tEc2tn;qX|Xj(u76ZzBn6(?tCb5{Bxv)u(ZaT|R~i6@rZ zq^c2AnNt2IkG>~7vigGo0?kLqIs`V!R(sV+XzrM<6bX0HcCad)78mop#tWs<&G8#s z1^A;WrWo*T2c|YNd z^+;&HB|;q92G&lvpaW(fa^1B$N2IoD3G1qL+9S7gxR51FfIi_D5w@$eOynCG7m=sj zDpR4`ni7OR2e^NOy)Nk4OPC;E*{rbMBJsYIW;-4Jo6

8oe9ls{yyc;e7qpiw91jzecHlx4#Wg+RBm;SD0^F5V+9;l_3G`{N_4lWHJ(z= zH;?Jc_%o^(GqGW}ZA{z-RfX;v&8l;^*0TG!O1(9q&bJ*en|E&4hrK5i!S63qr`&68 zidp|9Met84w_EGgH(od#dmNQpGafd4RaIv$ZX#N-j$OESyNK)McdsY>E=D5<|JgoekTc=re*6PF7 zn68(eRQ-55Hj<-i<*kES@!2&hqYJyWbNto4sydmz(cP}Au%}d?yCd2Jw5*y+dn6Xy zes!X~V`};H_s1e#qYk6{glgm5Lt3lZ^;*nqJjFY|UEN#vU@W)^rEPw%R^0DVx7d9u zRK1n{PoJ&PI*FgkM`{5 zCUs8xMV;S1shZlHjM=a;HvM?Ds2leynq{TAOLcQDc31O{$93MVVwsKG&d%YugS1oG z(c7yUJ{yl?JKK~27uKo@eM8l%cU;F(&&KltTJKgodE7aoo??ED`lqhdN_HPr2K9zi zAQuiOF|Qs~9qWv%3-E{ZVQVz@6_aI^R*tr|DAT(e6xO;+^{O+jGV4C34`(J-@;;sv zoqJG+JYQ2Xnn%?bJCAKv_p@E~&>z!Md-o{zD`Q$)zb{s^gG%t+7A@fArxf<;(QP5R ze^S+LZdY7~wVLYeSMGIVsrvHV#(L%Y%;83e*S%ZCG`m+B(TPXm&3GB8J*@R?52;G} zCzLI-u?z8s;!f0N^();`txM|;eY10is&ey)y303`sxYgYHwH_rhigP z?>?bK`$LM}JD{BC-lf!bPN-*@J*buU$HtWCW9rSD<&~O4F%A1;(dO!4=I+y0c4l%s zMCd%FCf7ZxdG_wytq&j5Ds>*yth?*9*0WpVhT=YTTHSa{MQ3fy-FTMNe_59~g` zgIcKHqlI=3tD*XM#-Mpjv3l#ZyYq}roS)Z{oAE+%Yt69M<7li1ld+j@)j9HKA5ubh zs|%goq&dAZ7Ay94Wl8UllGfd#cGDh;&1bJ_jNhXb=^c4E%w=wms`<>MGOx8(3HAF_ zbaPe(*^HNSJNxyO?grJ#PF#BDxaw$534)v-KCPBJ)}D6pVIcGJGLRLb$6e>;`giXyzq!>h2NtO+r#Q{ zpVj%fb86t7J5~I>C-wEF-4^CCzg>OADfN8Kqe^ivo=u^gU-+?q#K33~ffqgwHLRBKrCpM6pb zeK}@A(<(3gZtWO7uR?4dSJi9nRGsU@O9bZLPR{^}7$73Kh$Z|Lg72iG*|N zYvyw$c_yCb=w>S9xkuFf`EiY2iKk#YTegRc=x)(^&&6)~!rpi>Q^zFDc&p;1|Z9=Dwr`|H_)!G3|@( zCEi;!XLa7{>)KHG$Muc5t)r?ILt40xn|%MIYH~C7`<>gBgw7FlhqFf>)b$bNdhcO1 zy3;yaa#nL{PR62oR4X@^saDT@Nk#QF&9=2gI}a}@5v{m8ITLU5nHy3jKd(m9c`9z1 z4k{yik85gfSL~(^>b%3#x@q89wX_%1ZO+EqCweE;I_6_9<&SHfn&oBgc=o)tTg5wb zSSji5jH!D}`Q1Aa3-4Z4!rqru7B4@cYSbRmtUDXE)Mr(oGn1+ToqJ+`KBh$bgKC{OmANBlw6-KNzlG!Lnvb{^9zyfR|rRgrs@yMA1y zHoH-6?`!Ir<{#8IJL}blb>a!`*?6Ga+1Y|zE0e2>&EW+LazBYC)9+yPbq`DaoOE1>UQV1shM^U#IM8)sIT0w4C`%B-t^)T zzK=!RoE!v2GMd&`ErgkBoIcy$NnYV|OtL=EtNbk6+b?<~K&|J)j`K{`m zyPIM?irb0V$*tk-WVf#5XG{38pj$5b_{TqW(?$Hcc=_^=@Z_HPLKyt#-+$)7f9Ak{ z<^T`Be)8tC#@G|&g)=6BMFs~C^H=dZmA~QvStO_-MI9ESXkZK(_94dv9L}J?G(39f z<29I*#(XQ@fbYQ~{4f^dr%=J0umo>M6~Bt5_#GrC`E6k>Np%V-NP>%XkF)k>hb3#FLo75gf&F zIGn^Od6#2?|0@nL)fe}X?nifeHluE$4l13re2V+C%+C-6z!giqmStVA7uhFh=-x8gQ@ z8lS;xd={U>pX2lR0{#Mj2|Hyh%n)OkVa6~cjA7O=hFQxP=61#~cQA$-Wel^9G0b|# zFn2P(Gv=Ay!1P^A!`#gnW+P*m24k3e7{h##G0eS;VKy;_*~}Pb3uBo37{iP)hS|y( z=6=R7+Ze-aXAHB0G0aZJFb^<>$r!`zVhr;jW0)^7h8brJvzsx@9>y?x8N=*j4D%3U zm@hMid6+TGBaC4lWel^QG0XwRFgatG#~8yr&KTwi#xMsN!yIA^^CV-Krx?RbForqI z80JV>PbZm%Im#I37-N{@jA2eNhH;Ex{)#coNyac!jA2eOhB?g`<|~Y0zRDQp3}cw5 z8N)oo80Kq?VV-3S^Vf`F&N7C1jxkKZ80LA#FfTBM`5VSCf6Ex=9AlUl8N-}s3^UCb z<|W23UuO*S4aP8k#~9|nGKTqk#xVcD7{)V(X)=asF@|X~hM8du(_swLWehXR7^cS< zW{xq;%ZyWv#-M;r0t*N8O-#eW!F)5*uy8Qn!Za)# z%(pTP3kUOUOvA!`+bQ+&$+t5N2lE|-!@|M5foWJcnD1m777pgSn1+Re`EI6R;b6Xp zX;?Uz?`0Ym4(9uqhJ}Oqex_mJU=}eA3kUN9OvA##{2Ju52lFFL z!@|M*DATZTFh9mLEF8>YreWb=ew=AoIGCSc8Ws-bCz*zYgZU|@Vc}qYnrT=#n4e)9 z77pgWFbxX_Q(+nw4(4Z>hJ}N9Bh#>OFh9pMEF8?6n1+Rec{9_na4RnTCafc^lKPa4^5XG%Os<+nI)igZV|KVc}pdVj30><{eDK!ogITuEu)m z_}}~z)39(bzsxi&9L%pU4GRbJt4zbf!TcK2uy8QH&NM6>%x^Fa3kS25X;?Uz-((sV z4(7L*hJ}OqZKh%2V19>bSU8w>G7Sp{^SeyL!omC=)39(b3DdA}Fc&io3kP!v)39(b zmog0t2lFnbVc}rj%`_|=%zK!Ig@bu7)39(b%b13RgSm`pSU8yXF%1g`^ZQK0!oj?s zX;?Uz%bA9SgSmofSU8v~nTCafsWA-;2lEF^!@|K_#WXA&%+*Z8!ohriX;?Uz4>Ao4 z2lFAOVc}q|VHy?=W;xTaa4>(!G%OsbIVc}r@jA>Xn zm|K{Jg@akeG%OsktR)-?Qdneg@G!R%jsz(zGB|jcI|xUD6c!mA zJj^KJNRYxJgM)`zM>rCsu*l%xVb&9l1Su>sICz*l2}gny78x8o%m%`dAcaK+2M==> z;Yg6eB7=j6xtnk#NMVt|!NY7M90^ibWN`2>4Z@Kig+&Gj4|5OUNRYxJgM)|pBH>7o z!Xkr%hq;$TM0*k6c!mAJk0%sBS8v_3=SS<8{tTh!Xkr%huKaz5~Q%m;NW3) z5RL>XEHXHFn4N?pK?;iu4j$$K!jT|_MFs~ClM#*tDJ(KLc$i&;BS8v_3=SUVLBf$B zg+&Gj5A!9$ksyUd1_uu_PB;>zu*l%xVRjRa1Su>sICz*ngd;%;iwq7PW-sALkisH^ zgNNBiI1;3=$l%~%9wHnGQdneg@GxH{90^ibWN`2>4-<|ADJ(KLc$h~BM}ia<85}&! zql6sICz-Dgd;%;iwq7P<_O_PkisH^gNKM+rxQ z6c!mAJj^k|ksyUd1_uvwoNy#aVUfYX!<---2~t>OaPTmWa3n}!k-@>k{1xFykisH^ zgNHduI1;3=$l%~%rU*xZ6c!mAJj^M=ksyUd1_uvwns6jYVUfYX!+eEsBuHVA!NJ3P zm2f0TVUfYX!<-=;2~t>OaPTls6OIHaEHXHFm}dw_f)o}R96Zd|2uFex78x8o%(H|e zK?;iu4j$&O2}gny78x8o%vr*bAcaK+2M_Zc;Yg6eB7=j6DF{b`6c!mAJk0ZiBS8v_ z3=SUV1;UXag+&Gj5A!#KBS8v_3=SUVZwW_&6c!mAJj^-5ksyUd1_uxGBH>7o!Xkr% zhdEC;5~Q%m;NW4V2}gny78x8o%u9qLK?;iu4j$&~gd;%;iwq7P<{N|~K?;iu4j$(3 z2uFex78x8o%zq^u2~t>OaPTmHPdE~!u*l%xVg7+|BuHVA!NG$@W-3UK!Xkr%hiMXy z1Su>sICz*A;Yg6eB7=j6X%mhFDJ(KLc$gW&ksyUd1_uw*Ash)(SY&YUFkQltAcaK+ z2M;q#I1;3=$l%~%dW0iE3X2R59%hbkBuHVA!NJ44OgIvxu*l%xVJ;Ak1Su>sICz+O z!jT|_MFs~C^9tcekisH^gNOM?!jT|_MFs~C^D5y;kisH^gNNx8jsz(zGB|jc|3)|x zq_D`~;9>rWa3n}!k-@>k{CC2UAcaK+2M_blgd;%;iwq7P=6?{51Su>sICz+UAsh)( zSY&YUF#nTqBuHVA!NJ44MmQ3ru*l%xVg484NRYxJgM)|pSHh7Xg+&Gj5A(kXM}ia< z85}&!zY&fEDJ(KLc$oh~I1;3=$l%~%{+)0nNMVt|!NdGt!jT|_MFs~CHn3(9DySks z4JqodXdpulhXNjbm~SFJDySks4JqodXdpulhXNjbm~SRNDySks4JqodXdpulhXNjb zm~SCIDySks4JqodXdpulhXNjbm~SOMDySks4JqodXdpulhXNjbm~SIKDySks4Jqod zXdpulhXNjbm~SUODySks4JqodXdpulhXNjbnC~DyDySks4JqodXdpulhXNjbm^Tm~ z6;zR+h7@&JG>{>OLjjLI%y$wW6;zR+h7@&JG>{>OLjjLI%y$tV6;zR+h7@&JG>{>O zLjjLI%y$zX6;zR+h7@&JG>{>OLjjLI%=Zu<6;zR+h7@&JG>{>OLjjLI%=Z!>6;zR+ zh7@&JG>{>OLjjLI%=Zx=6;zR+h7@&JG>{>OLjjLI%=Z%?6;zR+h7@&JG>{>OLjjLI z%p&5Wf+`Z!kfIKY1~TMuDB#hD`2pgif+`Z!kfIKY1~TMuDB#hD`9b2Nf+`Z!kfIKY z1~TMuDB#hD`61$?f+`Z!kfIKY1~TMuDB#hD`C;Otf+`Z!kfIKY1~TMuDB#hD`4Qry zf+`Z!kfIKY1~TMuDB#hD`BCDdf+`Z!kfIKY1~TMuDB#hD`7z?7f+`Z!kfIKY1~TMu zDB#hDSxkIXP(^|oQq*D5K!zL+1w8sNKTdp9P(^|oQq*D5K!zL+1w8sNKS6v{P(^|o zQq*D5K!zL+1w8sNKS_L4P(^|oQq*D5K!zL+1w8sNKSg|0P(^|oQq*D5K!zL+1w8sN zKTUj8P(^|oQq*D5K!zL+1w8sNKSO*}P(^|oQq*D5K!zL+1w8sN|AqLdpo#=Fq^QH9 zfebku3V8Iv!L?b03aUs@Ly9^q8px2tp@2sp=4XkI3aUs@Ly9^q8px2tp@2sp=8eQh z1yv-dAw?Y)4P?mSP{5-P^K-;U1yv-dAw?Y)4P?mSP{5-P^Cse>f+`Z!kfIKY1~TMu zDB#hDc{A}*K@|yVNKuDH0~vBS6!7T7{5bq)M3#;h8zwBJo+$I;-i8p64a2Q4vPjdd{j_Hf*MlPVbMT_91aCM`Y@Lg9~D%QpoSE6STv9!heH96KFqs_ zj|!?tP(zA3EE>p=!=ZpjALiY}M+H?Rs3Aoi77b*`;ZVS%5Az=4qk<|D)R3YMiv}{} za46u>hj}mYQ9%_6YDiIsMFSafI27>c!z?2{DySks4JqodXdpulhXNjbn9GQd3aUs@ zLy9^q8px2tp@2sp=6%FR1yv-dAw?Y)4P?mSP{5-P^ZUd{1yv-dAw?Y)4P?mSP{5-P z^M2x^f+`Z!kfIKY1~TMuDB#hDxt#c@po#=Fq^QH9febku3V8Hkt{^@ts3JiPDeACj zAVUs^0v>&sD~XQ^sz^{niaIPB$dJRKfJYyuMtoFIMS>bq)M3#;h8zwBJo+$yKzvkC zMS>bq)M3#;h8zwBJo+$K5g!#)k)VbYbyzfzA%{Z&k3P)R#76~HB&Z=p9Tp8_$l*}H zqYv`|;-i8p64a2Q4vPjdhxrKcQ9%_6YDiIsMFSafI27>c!~6;HQ9%_6 zYDiIsMFSafI27>c!~7}nQ9%_6YDiIsMFSafI27>cgNy2B5h|!6K@BPDuxKDd4u=9B zeVA*Bj|!?tP(zA3EE>p=!=ZpjALcsZqk<|D)R3YMiv}{}a46u>hq<2ksGy1jHKeG+ zqJa!K913{!VLnQHR8U2N8dB6@(LjbA4h1~=FgFk%6;zR+h7@&JG>{>OLjjLI%*Tk2 z3aUs@Ly9^q8px2tp@2sp=HtXi1yv-dAw?Y)4P?mSP{5-Pvx4}jpo#=Fq^QH9febku z3iuVLr@YNKiwHIxHHs#76~H zB&Z=p9Tp8_$l*}HqYrZv@lin)32I1DheZP!ayS(5=)-)9_^6bq)M3#;h8zwBJo+%7BR(pqB0&u)>ab`aLk@=m9(|ZU zCq62uB0&u)>ab`aLk@=m9(|b46CV{+k)VbYbyzfzA%{Z&k3P&7h>r@YNKiwHIxHH< zki(&XM<3=dh>r@YNKiwHIxHHK*B(ZCoo>_d(TIGjO&X?XO|$7?V{EFX)o7!@o*6-$v| z8EROL6f022YFLb-fiYy*ha3}dID-Py@aUnB*I&Ai36`OTj_;hJDB}0f#dvFb$6$ z`gjdyEz8FuEJg)OP{mRtScV#wBgG2Tu^JYmXkZK(_94dv9L}J?G(39f<29JuSw0qF zF)CPsDwZO_GSsjfDORA4)vy>v17par4>=~_a0UgY;n71Mufg2G^05euQNa>au@niG zp@!v1u>y6hhQ%lv7(<4A$T0zjGbk_(j~@DX4Q7<(V-Xgkf+eV8DH1G04a<>Y1?pH0 zi%~Q%h79|VV*(CmP+%G!J@oM!%sQ5jMOcgqmY|BINU#hwEJunJsADxOM$y0+GVDW+ z2{@cVfoXX3(8p^q>sdY)VKFLLf-06G!7|ja94S_yj@7UjMFV5Vun##V;BW>7rs2^; ze|@?Cd?(AtA}mG)OHjp9Bv^(TmLtUq)Ug^CqiA3Z8TKK^1RTzwz%)F1=;Jk*4J;pv zuox9AK^04pU>Ryyjub0U$7)!NqJc4F*oPbwa5#ek)9~n_kJn)CV)NWx4U8efKIE8y!xby#Zn|#h8mV5#R}B1 z8Wy8yU z920Ojg96j==%J6-U>YnRi?A3KEI}1Zkzg5WSdJ7cP{(RmjG}=tWY~us6L2_#0@Lv5 zp^w*K?qT^@gvF>}3949%1j|sva->*+I#$DC6b+0a!#?DgfWsLSn1)9WeY^(qMV606 zSd0pmpo*nPunaXUM~W4wV>SLij{YD#g7|^|`L_HJgl;w3LFm%ZLDbM-5Ufhu?epZY zhaC2h!ya|vkX*`4nuq-5k2lvLELY1pu(Wk*NP;BVAtNWh4Yh?p@Grp%bLV9APv zl#HB$l8Tx&4I8$!?C9tjJm@ndU_?kn%$NyNX3SZzWJN+sMovLVMa`Oq4O?1vbo31V zCVhqkj0lN{88czZj5!OItVl@7$SEkPs9DpnVN1)7j-J8)LZ2Z4BSIo##!Q$pW6pvl zD-u#NatcZ+YSuJt*wV72qi66p>oX)^L`X!;m z^bB%+h6IcViHI39Vakj-3zn=%NXf`4D5#2#JUp zGhxb%ISZDoNJz=ZDJZF^S<|p#OUsUq9yiks_81Z{A|xVa%!Da3<}6sUA|WLsr=X;w zW=+F}EiF4bdIo>HK0^XVgha%QnJ{I>oCQl(B&1~I6qHodtZCS=rDaD)&*1OSXGp+^ zkcgNu6Q<0VvtY@Jgp`b&f|81wH4PiKwCw2U8T_643<($!5)m_I!ju_v7A#qjkdl#8 zP*PE|reVXDmK_~EgC~841dIrYh#50s%8WS+maIrf$;c@vsi;}guwhHfj*gze-=)ux zfDs`PF=Hl7nK5U(bwF%zcDn6qHXiiDJmoPv^ynl%j@wzTZ%=o$P&`V0ve5fTwIX2O&i za~3RFk&u#+Q&3V-v!-FgmX;kIJ%dW0Aps*oB4Wl&m@;F|f+Z^wQZjN1N-ApBG;G+? zvZJGC@DJ-VBw$2HM9i28Q)bLruw+F-N=8mWNkz??h7DU(bwF%zcDn6qHXiiDJmoPv^ynl%j@wzTZ%=o$QD`V0ve5fTwI zX2O&ia~3RFk&u#+Q&3V-v!-FgmX;kIJ%fK-pCJJwLLy?uOqeob&VnT?5>hg93Q8(! z)--I`(z2tYXYfzxGbCU{NJPw-2~%dwS+Hb9LP|zXK}ki;nuZNqT6T2w3~GIb1dIrY zh#50s%8WS+maIrf$;c@vsi;}guwhHfj*gze|3;r70V6^pV#Z9EGGoqyB`XqAGI9z^ zDr(j=Y}nGWqoZf=ztv|*z=)8Dm@yNk%$T!a$%=%OjGTg!ikdYI8@9CU=;#^z@AMfG zFd`%(X3T^sGv+Khg93Q8(!)--I`(z2tYXYfzy zGbCU{NJPw-2~%dwS+Hb9LP|zXK}ki;nuZNqT6T2w4E||-h6IcViHI39Vakj-3zn=% zNXf`4D5Wq-5k2lvLELY1pu(Wk*NP;GflJNWh4Yh?p@Grp%bL zV9APvl#HB$l8Tx&4I8$!?C9tj{O|P{5-=hpB4*5lDKq9QSh6A^B_pSxq@rd`!-g#_ zJ34v>Z~6=g7!eW?GiJh+8FLmaS&@*EkyB7oQM0CD!L`X!;m^bG!w`V0ve5fTwIX2O&ia~3RFk&u#+Q&3V-v!-FgmX;kIJ)WGw9zy~~gha%Q znJ{I>oCQl(B&1~I6qHodtZCS=rDaD)&*1;0&yau-ArUcSCQO+zXTg#c2`L#l1tk?V zYZ^9eY1z@yGx$I2GbCU{NJPw-2~%dwS+Hb9LP|zXK}ki;nuZNqT6T2w4E`_r3<($! z5)m_I!ju_v7A#qjkdl#8P*PE|reVXDmK_~EgLi#~1dIrYh#50s%8WS+maIrf$;c@v zsi;}guwhHfj*gze|5cwM0V6^pV#Z9EGGoqyB`XqAGI9z^Dr(j=Y}nGWqoZf=f7543 zz=)8Dm@yNk%$T!a$%=%OjGTg!ikdYI8@9CU=;#^zbNUPk7!eW?GiJh+8FLmaS&@*E zkyB7oQM0CD!L`X!;m^bGz*eTD>#2#JUpGhxb%ISZDoNJz=ZDJZF^S<|p#OUsUq zp23GcLjp#GM8u4lFlEM^1xr>Wq-5k2lvLELY1pu(Wk*NP;9t^bNWh4Yh?p@Grp%bL zV9APvl#HB$l8Tx&4I8$!?C9tj{LA_b2^bL)5i@4Olo@jtELo9|l95wTQc<&}VZ)Y| z9UVP`e?^}m0V6^pV#Z9EGGoqyB`XqAGI9z^Dr(j=Y}nGWqoc>0qQM?R0!D;H#Eh9R zWyYKZOI9SLWaJc-RMf0#*s!H#M@P@#U)5(wz=)8Dm@yNk%$T!a$%=%OjGTg!ikdYI z8@9CU=;#^zYx)cc7!eW?GiJh+8FLmaS&@*EkyB7oQM0CD!pCJJwLLy?uOqeob&VnT?5>hg93Q8(! z)--I`(z2tYXYl{fXGp+^kcgNu6Q<0VvtY@Jgp`b&f|81wH4PiKwCw2U8T{+|3<($! z5)m_I!ju_v7A#qjkdl#8P*PE|reVXDmK_~EgMUMxAps*oB4Wl&m@;F|f+Z^wQZjN1 zN-ApBG;G+?vZJHN2cp3qLjp#GM8u4lFlEM^1xr>Wq-5k2lvLELY1pu(Wk*NP;NR3| zNWh4Yh?p@Grp%bLV9APvl#HB$l8Tx&4I8$!?C9tj{9F1A2^bL)5i@4Olo@jtELo9| zl95wTQc<&}VZ)Y|9UVP`|EE4f0!D;H#Eh9RWyYKZOI9SLWaJc-RMf0#*s!H#M@P@# zOP?VDBSIo##!Q$pW6pvlD-u#NatcZ+YSuJt*wV72qi68{(q~A(h>(bwF%zcDn6qHX ziiDJmoPv^ynl%j@wzTZ%=o$RK^%)W{A|xVa%!Da3<}6sUA|WLsr=X;wW=+F}EiF4b zdItZtK0^XVgha%QnJ{I>oCQl(B&1~I6qHodtZCS=rDaD)kBcFLJ%$8~2#JUpGhxb% zISZDoNJz=ZDJZF^S<|p#OUsUqp25GP&yau-ArUcSCQO+zXTg#c2`L#l1tk?VYZ^9e zY1z@yGx&G)84@reBqCj0lN{88czZj5!OItVl@7$SEkPs9DpnVN1)7j-J7PpwEzi z5g`#VVoCQl(B&1~I6qHodtZCS=rDaD)&)`R& zAps*oB4Wl&m@;F|f+Z^wQZjN1N-ApBG;G+?vZJGC@E_|lBw$2HM9i28Q)bLruw+F- zN=8mWNkz??h7DUhg93Q8(!)--I` z(z2tYXYl{mXGp+^kcgNu6Q<0VvtY@Jgp`b&f|81wH4PiKwCw2U8T=Re3<($!5)m_I z!ju_v7A#qjkdl#8P*PE|reVXDmK_~Ega1;WAps*oB4Wl&m@;F|f+Z^wQZjN1N-ApB zG;G+?vZJHN7odYbu*W__4hT48#1SFKM4S+F%9t}IoHONu8JEntV!<^_Zdh?k!W}91 zWIT}bNWl{&&s4ln^U9hx8s6FP!In>2zS!|i#}7Tf4E`JTv&TL|4hT48#1SFKM4S+F z%9t}IoHONu8JEntV!<^_Zdh?k!W}91WIT}bNWl{&&s4ln^U9hx8s6FP!In>2zS!|i z#}7Tf4E`wl*<+s}2Lv24;)sxAB2I`oWy~29&Y5z-j7#QRvEZ5|H>|iN;f|DhG9Jiz zq~M8?XDVK(d1cKT4exCDV9O^hU+nm%~0|E{iaYV>55hui)GUkj4 z=S;a^#wByESa8jf8&=$sa7W5L84u(@k7rq zgFo2M9{UVAAmES@M}!;`aYD=~W6qdx&XfygTr%g11=lROVZ|*8cck2t@j%Wa1y7Vb zQ}II0D{J0pcxS^0TRv&|V#hZfKlJ=E_+#v6k9~$55OBzdBSMaeI3ebgF=tFTXUYXL zE}3)1f@_xCu;P}4J5uh+cp&GIf+tFzsd%C0l{IfPytCnhEuXY}vE!SLA9{Wn{IT}4 z$38<22smWK5h2G!oDg%$m@_7vGv$IAm(00h!8J>6SaD0j9Vz!@JdpE9!4oCVRJ>60 z%9=MC-r4ZMmQPx~*zrxr4?VvO{y6*DW1k@h1ROHrh>&9c9>{s5;E9rFDqg60Wz8E6?`-&B%O@>g?D(eRhn`>j_-XJ5_Sk30 z0Re}MI3nbjh!bK?8FR*jbEaG{Q!5?owd+amhfPh0r91(I%#0fE{j5%Y%Ia4l}amk!37F@IB zh84FY+>vrm#sfKz6g*M#OvMW|udI2a;hha1Z26?+iyhx|{Lu5u;7_ohJ@y%LK)@j* zjtDs>;)Iw}#+)(XoGBN~xMa>13$9sm!-`uH?nt>Oy;)R-5*1Xa1&V~=ReA4p8 zj&C}C==o*vC)v*)`wTfC;E)kVgd7ubLd+>+&X{n{lnZ8DGUti~*DSeV#VrYUq}-G7 zK+YotPn0}U@j}fjYu;#hXTt|uK56-4$2T26^!zgTlkI1ZeTEzmaL9-wLXL?zA?B1Z zXG}O}$^|nnnRCU0YnI%w;+BLvQtrukAm@>SCrX~Fc%kN%HE%S$v*Cj+pR|0jS zCrX~Fc%kN%HE%S$v*Cj+pR|0j&9c9>{s5;E9rFDqg60Wz8E6?`-&B%O@>g?D(eRhn`;s z|1JC3W1k@h1ROHrh>&9c9>{s5;E9rF zDqg60Wz8E6?`-&B%O@>g?D(eRhn`;se~SI=vCohL0uC8*M947_C&ZjG=8OsFOu1mj zC3CJ=aLtk%R@{&rd$~_qmgdomu#d8FWpl4mMjsCi}08x8Ml_+ZN?Enn>TrsId6Uj~1w z{p_*NkOKk^8F56&F%c)koHFK&3Fl0?V8$hLu2^u*k{edsl5j`LJsA(=JW}vP$uku% z)V#9hjfQtNe6Zz{mM?aE)A2*kFN6P%{p_*NkOKk^8F56&F%c)koHFK&3Fl0?V8$hL zu2^u*k{edsl5j`LJsA(=JW}vP$uku%)V#9hjfQtNe6Zz{mM?aE)A2*kFN6QC{p_*N zkOKk^8F56&F%c)koHFK&3Fl0?V8$hLu2^u*k{edsl5j`LJsA(=JW}vP$uku%)V#9h zjfQtNe6Zz{mM?aE)A2*kFK&7o{DD388FE0tAtR0mIVR$Sm{Z1_G2xsk7tFY1&J_!; zS#raQTN3U_xhLa+oJR_tD0!yhg_>8^ywUK^h7Y!U((=WQZ#sVH`DO5@+0P#P3^^d+ zkP%0O920Ru%qe5em~hUN3uat0=ZXc_EV*IDEeUs|+>`M@&Lag+lsr@MLd`2{-e`Dd z!v|YFY58KuHyuCp{4)5{?PrgDh8z%Z$cQ6Cj)^!S=9DpKOgLxC1v4(0bH##dmfW!7 zmV`S}?#Xx{=aGUZN}j2Bq2`q}Z#2BK;e#!ow0yDSn~ooPei{53_Or)6Lk=+_2)7gga91 z$#@{=k%A{mo~d}D=9M*XG`zFngDs!5e6i!3jvsn{8T^^{v&TL|4hT48#1SFKM4S+F z%9t}IoHONu8JEntV!<^_Zdh?k!W}91WIT}bNWl{&&s4ln^U9hx8s6FP!In>2zS!|i z#}7Tf4E`+p*<+s}2Lv24;)sxAB2I`oWy~29&Y5z-j7#QRvEZ5|H>|iN;f|DhG9Jiz zq~M8?XDVK(d1cKT4exCDV9O^hU+nm%|iN;f|DhG9Jizq~M8?XDVK(d1cKT4exCDV9O^hU+nm%r;Isc!Z}kem~qLRD;8X{&azMZ#BaR3;CgOycQ^uSz;hZTK z%(!IE6$`Fea>I&S67ER3C*y&fM+%-Od8Xopnpf7m(eTcO54L>L^2LsCI)3Q+W$@?N z&mQ{>IUwMW5l4g^6LCV!DPzuCJX7&P z%`0o(Xn1GC2U|XA`C`X69Y6H^GWhS?&mQ{>IUwMW5l4g^6LCV!DPzuCJX7&P%`0o(Xn1GC2U|XA`C`X69Y6H^GB~oIJ@y%L zK)@j*jtDs>;)Iw}#+)(XoGBN~xMa>13$9sm!-`uH?nt>O~0|E{iaYV>5 z5hui)GUkj4=S;a^#wByESa8jf8&=$sa7W5L84u(@k7rqgFoMX_Sk300Re}MI3nbjh!bK?8FR*jbEaG{Q!CzoMd+amhfPh0r91(I%#0fE{ zj5%Y%Ia4l}amk!37F@IBh84FY+>vrm#sfKz6g*M#OvMW|udI2a;hha1Z26?+iyhx| z{Lu5u;4ieFJ@y%LK)@j*jtDs>;)Iw}#+)(XoGBN~xMa>13$9sm!-`uH?nt>O@(zmfI~(c5pqn#2{EUPIb*^( zQ!bcs$($<|T(jhc6}KeZk#bMQ138ZrJW=vY#S1mBta+p1oedvs`K0BG9p7~P(DTdS zFS4IK_8D?Oz#$`!2stL=gqTytoH5~?DHqJRWX=@}u32)!idz!yNVzBDft*JQo+x>y z;)R-5*1Xa1&V~=ReA4p8j&C}C==o*vKeV4c_8D?Oz#$`!2stL=gqTytoH5~?DHqJR zWX=@}u32)!idz!yNVzBDft*JQo+x>y;)R-5*1Xa1&V~=ReA4p8j&C}C==o*v7u(Ms z`wTfC;E)kVgd7ubLd+>+&X{n{lnZ8DGUti~*DSeV#VrYUq}-G7K+YotPn0}U@j}fj zYu;#hXTt|uK56-4$2T26^!(zWv%w$OW1k@h1ROHrh>&9c9>{s5;E9rFDqg60Wz8E6?`-&B%O@>g?D(eRhn`;se~JCord%-Nk~vo_xMs-> zD{e`+Bjuiq2XY=Mc%tN)iWh2LS@TB2I~zXO@=41VJHF}oq34&u|Hyv!*k{NA0f&q@ zBIKBe6JkyobH;>ord%-Nk~vo_xMs->D{e`+Bjuiq2XY=Mc%tN)iWh2LS@TB2I~zXO z@=41VJHF}oq34&uiT&)c&yWKG4jFMo$T1No#GEqbj0xvVxnRa6bFNr$&5|2d+>&rd z$~_qmZa;hMGvt7PLq;4Ca!kYtF{g|8^ywUK^h7Y!U((=WQZ#sVH z`NbipgFmpxK0^)&IAp{TA;(0V5Od0yGbWrf<$@WP%(-I0HA`+-aZAD-DfeVNkn>2v z6D7}7yioJXnl~EW+3>-ZPg=g%@lD4MJ--b8O8ePkpCJbX95UjFkYgfFh&g4<857Q# za>0yC=3KGhnk6@^xFz9^lzTEB$a$pTiIQh3UZ{Cx%^MBxZ1`ZyCoNy>_@?8Bo?ixk zmHq6o&yWKG4jFMo$T1No#GEqbj0xvVxnRa6bFNr$&5|2d+>&rd$~_qmR|j5s3Xn1~Z%P8oB?gmb1`FyoRrS1h<@ z$qg%RNw_2Bo{R@_9w~UDWYF=6MM#DQBKG^a}%NIMo>G+}Nm%(3aKYQ#mr;Isc!Z}kem~qLRD;8X{&azMZ#BaR3;CgOycQ^uSz;hZTK%(!IE6$`Fea>I&S z67ER3C*y&fM+%-Od8Xopnpf7m(eTcO54L>L^2LsCI)3Q+#R2DoKd{F>Lkg zdomu#d8FWpl4mMjsCi}08x8Ml_+ZN?Enn>TrsId6Uk3kU``KfkAqNB;GUAAkVgdomu#d8FWpl4mMjsCi}08x8Ml_+ZN?Enn>T zrsId6Uj~1J{p_*NkOKk^8F56&F%c)koHFK&3Fl0?V8$hLu2^u*k{edsl5j`LJsA(= zJW}vP$uku%)V#9hjfQtNe6Zz{mM?aE)A2*kFM~7t*<+s}2Lv24;)sxAB2I`oWy~29 z&Y5z-j7#QRvEZ5|H>|iN;f|DhG9Jizq~M8?XDVK(d1cKT4exCDV9O^hU+nm%|iN;f|DhG9Jizq~M8? zXDVK(d1cKT4exCDV9O^hU+nm%|iN;f|DhG9Jizq~M8?XDVK(d1cKT4exCDV9O^hU+nm%|iN;f|DhG9Jizq~M8?XDVK( zd1cKT4exCDV9O^hU+nm%n~-QjB!037G%Fhq(J2~wm;kRnBb6e&`qNRc2xg3GzgWnRu@ zE^|3A=VdPEWnRwZyv$`Tb2%^PGMBl`WiCjOB1MW6DH5beks?Kk6bVwKNRc8%iWDhw z&x;3sZ$EthfFBW}#E6q1Ns2TXvgF89ph$@_6{^&z)1XOc87z#u_}7$(FB zql6h_oCzkGB0`iHaS|j+ktRcy9C->9DN&|El^S&#G-=VMLzii0m}QQ67U;3a63eWx z${Kyv*ric(FMw|pmQl!a{B}bkDMM{*Z zP^Ctl22EPD>Ck1G8D^Pdo&|a=vcxhgtg=R*bvD>!i*0t;Wskr=(9Zya1Q}wO5F?Bd zW{hzrm}H6wQDVeNkR(N#3|VsIDNv+DnF>{E)M?P9MVk&?rkP=uIp$fQ$0AECv%)HC z^jT+vO}5x(hh6pvT-471g9I63m=Gh35@w8XCYWT32vK6hNsuH(nhaTTd7d8DNkgLktsQgi*qb zG0p^&Oc5bUj5rCBq)3w?OO8ARij*i*p-PQ94VtuQ)1k{WGt4r_JPY(#WQk=~SY?eq z>uj*e7TfHw%N`z^1@b+H~kL%?z{5G0y@$7FlAM6;@fJ&pI1yvc)z#?6ODTKhw_ug9I63m=Gh35@w8X zCYWT32vK6hNsuH(nhaTT9DN&|El^S&# zG-=VMLzii0m}QQ67U;3a63eWx${Kyv*b+H~kL%?z{5G0y@$7FlAM6;@fJ&pI1y zvc)z#?6ODTvVH~_B*+lMgcxCzFk_4}!6Z{eh!P`Cf+Q)@WXO^uPk|yO%2cRQqfUb+ zE!uSGGR+LL%rVabJr-GFnH5%9qt7}UY_i2RJM6MY;2-K|fI)%`F-(XNMhP>{E)M?P9 zMVk&?rkP=uIp$fQ$0AECv%)HC^jT+vO}5x(hh6pv{3HDgFi4Ogh6ypkC}GAJXM#zl zh!7=4oCHZyq{)yaN1g&jN|dQkrAD0wOric(FMw|pmQl!a{B}bkDMM{*ZP^Ctl22EPD z>Ck1G8D^Pdo&|a=vcxhgtg=R*bvD>!i*0t;Wsksrp`QT;2{ObmAx0P_%oyWLFv%1V zqQr=kAW4ce8M5TaQ=mwRG8L-SsMDZHi#8p)Of$nQbIh|qk42VPW`$MO=(ElSn{2Vo z4!i6TNb6^SL4pi1Oo$Oi2{Xnx6HGEigeWoMBuJ7XO@=Hv@)RgiqD+Mric(F zMw|pmQl!a{B}bkDMM{*ZP^Ctl22EPD>Ck1G8D^Pdo&|a=vcxhgtg=R*bvD>!i*0t; zWsksB{R}WjkRgT%F~TTe#u#UUNv4PpB}SYCNm8WAkR?Z+0!2!csZgaxod!)>wCT`g zni*!9W1awwCT`gni*!9W1a9DN&|El^S&#G-=VMLzii0m}QQ67U;3a63eWx${Kyv*FqrbCx$W|(D; zc^2rg$P&w}u*w>J*4bc_Ewet13@}KLA%+Pt!YEric(FMw|pmQl!a{ zB}bkDMM{*ZP^Ctl22EPD>Ck1G8D^Pdo&|a=vcxhgtg=R*bvD>!i*0t;WskrO{R}Wj zkRgT%F~TTe#u#UUNv4PpB}SYCNm8WAkR?Z+0!2!csZgaxod!)>wCT`gni*!9W1a9DN&|El^S&#G-=VMLzii0m}QQ67U;3a z63eWx${Kyv*b+H~kL%?z{5G0y@$7FlAM6;@fJ&pI1yvc)z#?6ODTrhWz(B*+lM zgcxCzFk_4}!6Z{eh!P`Cf+Q)@WXO^uPk|yO%2cRQqfUb+E!uSGGR+LL%rVabJr-GF znH5%9qt7}UY_i2RJM6MY;J?$)0D}Y>Vweykj1p#yaVD5#iU?6+#7U4OMVbs*a^xvc zq(qqtRch2}(4FqrbCx$W|(D;c^2rg$P&w} zu*w>J*4bc_EwsZpmvlNN0{beU#`S>~8$fgX!2vCImq ztkGwk4K~?gn;mx9Bk(WuGr%B0h8QNq2&04FqrbCx$W|(D;c^2rg$P&w}u*w>J*4bc_EwwCT`gni*!9W1amvp|nUmRM$mRo3XU z&IX%ovCR&<>=F2v`Wax5AVUljVuVq`j4{pxlS~mIN{l!OlB7tJAxn-t1&Wj?Q=v+Y zIt`k%Xw#v~G&9UH$2<%4SY(N1R#;_?KI?3-$rjt}u*)8Sl70plB*+lMgcxCzFk_4} z!6Z{eh!P`Cf+Q)@WXO^uPk|yO%2cRQqfUb+E!uSGGR+LL%rVabJr-GFnH5%9qt7}U zY_i2RJM6MY;9u!yfI)%`F-(XNMhP>dMvWUGApdIMxS*y*kp@scGzW)Kv_Qn3=(9BVM2^BN|-UmnP8GB zB1DN1Cqa@FX)sZpmvlNN0{beU#`S>~8$fgX!2vCImqtkGwk4K~?g zn;mx9Bk-^FGr%B0h8QNq2&04Fq zrbCx$W|(D;c^2rg$P&w}u*w>J*4bc_EwsZpmvlNN0{ zbeU#`S>~8$fgX!2vCImqtkGwk4K~?gn;mx9BT#uC_>b7feg-(eAO{I@h#?L$%n?Ey zWrSmla-1+H7~>@4oMM90Omc=P&Jy7qQO*nNU7j<|3ubu9EU%d3HS@e-fw%N{ z$0F}p;seWkWQ9+x@|iWh(B~`bd}D*}Z1RIGezMIkcKFRMf7s(Mf&bC|>|;Lz9AJ=x z1UbYIhZ*JwA&xS_F-AE~m=la~l5tKk!D%Kr!xU$UaE>VFiE)8A7fEo5B$r8Xg)~>m zaE&b2$#H``Hz{z7BDX1Vhcb7maE~hYsquh14{7j-CXZ?Hgf>s<@Qg0cndSvEykwSF z%<-Ce-mt)1dc0$i_bl;&Wj?aPCsz5)8ei!1m36+c!FM+K!4^N+<`+BsW|u$g@t43o z`?HVz3~+!!4ie-LLmXz9BZN502*()ZIAKmO#!1FG#RR9BO7>uBbq#>#S_{*rNcA2 zJZG90%glBd}M`Btn!&PzR>3@>wIH_ z?`-meEq=1iFLwCNE`QkLFM+E4*~fkcIKUtW337-b4l~RVLL6m;V~lc~Feez}B;%Z7 zg40ZLhAGYx;T%!U6XODLE|TC9NiLJ(3TdvA;Tl=4lj8RC<0RvpVuI65a)v3+65$+C&J*JTaW0bJ5=kzT z;tFZ5lHnRzu9M>id2UkR7DaAT;tpl*QsEv|?o;Cdbso~-5ltS`;t6e@(%~6ho-@r0 zW_ZagubAUC^Soh!xAb_&BJWw^1Iv75g-@*VnKi!9=PT=cV}tK(@`EjYvdu4c_{}bV z*yAsOn*G_weg-(eAO{I@h#?L$%n?EyWrSmla-1+H7~>@4oMM90Omc=P&Jy7qQO*nNU7j<|3ubu9EU%d3HS@e-fw%N{$0F}p;seWkWQ9+x@|iWh(B~`bd}D*}Z1RIG zezMIkcKFRMf7s(Mf&bP1>|;Lz9AJ=x1UbYIhZ*JwA&xS_F-AE~m=la~l5tKk!D%Kr z!xU$UaE>VFiE)8A7fEo5B$r8Xg)~>maE&b2$#H``Hz{z7BDX1Vhcb7maE~hYsquh1 z4{7j-CXZ?Hgf>s<@Qg0cndSvEykwSF%<-Ce-mt)1dc0$i_bl;&Wj?aPCsz5)8ei!1 zm36+c!FM+K!4^N+<`+BsW|u$g@t42@`?HVz3~+!!4ie-LLmXz9BZN502*()ZIAKmO z#!1FG#RR9BO7>uBbq#>#S_{*rNcA2JZG90%glBd}M`Btn!&PzR>3@>wIH_?`-meEq=1iFLwCNE`QkLFM+!K*~fkcIKUtW z337-b4l~RVLL6m;V~lc~Feez}B;%Z7g40ZLhAGYx;T%!U6XODLE|TC9NiLJ(3TdvA z;Tl=4lj8i%PZ!1%{*^d;4MAgvB-Ot_`otBS>Y3_d}fU=^!ds<-`LglBd}M`Btn!&PzR>3@>wIH_ z?`-meEq=1iFLwCNE`QkLFM)q&fA+DT0S++8L4q7&h{FtXgb+s=;TWSFC(H@PILSDt znBX*%oMDQyL^wy3^TfD7oQov5M3T#-xI&t%WVl9_>*TmWo|_c7MUmT-xI>w{RJcc# z`_yi%PZ!1%{*^d;4MAgvB-Ot z_`otBS>Y3_d}fU=^!ds<-`L$dBXy4>G6(5-m}C9mifpEpIGHHYkZ;4SJwH)2H)A_2V4AP zn_ukkn_d2}$6o^fyZzb6eg-(eAO{I@h#?L$%n?EyWrSmla-1+H7~>@4oMM90Omc=P z&Jy7qQO*nNU7j<|3ubu9EU%d3HS@e-fw%N{$0F}p;seWkWQ9+x@|iWh(B~`b zd}D*}Z1RIGezMIkcKFRMf7s(Mfu{Z0$9@Jlz#s<+a)==gGt3b}9A$)KjB=bXCm7=- zglBd}M`B ztn!&PzR>3@>wIH_?`-meEq=1iFLwCNE`QkLFMF|s$&za^0GrVM$SIqI6dET(VTY9`> zk@qa|fn`3j!Y5Yw%o<#&J!6lMhCdC!fTqVOb zvRo&}4f5Qiz%7d0ro1L{1a!6TYHro|K5Jf*`kx;$r^7tHXISza;6 zYvy^w0&nT@jz!+H#0Qr7$O@lWRC<0RvpVuI65a)v3+65$+C z&J*JTaW0bJ5=kzT;tFZ5lHnRzu9M>id2UkR7DaAT;tpl*QsEv|?o;Cdbso~-5ltS` z;t6e@(%~6ho-@r0W_ZagubAUC^Soh!xAb_&BJWw^1Iv75g-@*VnKi!9=PT=cV}tK( z@`EjYvdu4c_{}bV*yAsO|I7aDV?P5NV330ZIm8f$8RiHfjxxeAMmbKH6O3_^aZWM8 zX(ldBXy4>G6(5-m}C9 zmifpEpIGHHYkZ;4SJwH)2H)A_2V4APn_ukkn_d2}$6o^fxBc12eg-(eAO{I@h#?L$ z%n?EyWrSmla-1+H7~>@4oMM90Omc=P&Jy7qQO*nNU7j<|3ubu9EU%d3HS@e- zfw%N{$0F}p;seWkWQ9+x@|iWh(B~`bd}D*}Z1RIGezMIkcKFRMf7s(MfwukG$9@Jl zz#s<+a)==gGt3b}9A$)KjB=bXCm7=-glBd}M`Btn!&PzR>3@>wIH_?`-meEq=1iFLwCNE`QkL zFMF|s$&za^0GrVM$SIqI6dET(VTY9`>k@qa|fn`3j!Y5Yw%o<#&J!6lMhCdC!fTqVObvRo&}4f5Qiz%7d0ro1L{1a z!6TYHro|K5Jf*`kx;$r^7tHXISza;6Yvy^w0&nT@jz!+H#0Qr7$O@lWRC<0RvpVuI65a)v3+65$+C&J*JTaW0bJ5=kzT;tFZ5lHnRzu9M>id2UkR z7DaAT;tpl*QsEv|?o;Cdbso~-5ltS`;t6e@(%~6ho-@r0W_ZagubAUC^Soh!xAb_& zBJWw^1Iv75g-@*VnKi!9=PT=cV}tK(@`EjYvdu4c_{}bV*yAsO|IhyHV?P5NV330Z zIm8f$8RiHfjxxeAMmbKH6O3_^aZWM8X(ldBXy4>G6(5-m}C9mifpEpIGHHYkZ;4SJwH)2H)A_2V4APn_ukk zn_d2}$6o^fzx~@4oMM90Omc=P&Jy7q zQO*nNU7j<|3ubu9EU%d3HS@e-fw%N{$0F}p;seWkWQ9+x@|iWh(B~`bd}D*} zZ1RIGezMIkcKFRMf7s(Mfv)}8$9@Jlz#s<+a)==gGt3b}9A$)KjB=bXCm7=-5^4sHKj28fc`6W?E>ajdnWdq>FBP=%tT-1{h?BVMZ8bjBzHIWQu8Km}QQ67FcA7 zWmZ^ajdeEIWQ%Qf*kzA>4mjk9V@^2bjB_ryyl*d7$7FlAM6;@eeoeehGVw)Xy*<+sr4mskO6HYnfoC_|w;+h+7x#OM(9(m%K z7hZYeoew_w;+r3S`6FOie*y_2m=HnT31QJOinG{k*l~0tzXjm=a1UqnrvVsiK-1YN?~11{!IinHE}Uqn!>q>7tt+dg-H|0R|ajm=Q)9 zW1I;lnPQq5W|?E21r}LinH5%9W1S5)*!6Z{mGs7%%%(K8EODwa(Dr>B>!6sX5v%@ZX>~p{&M;vp)DQBE>!6jE* zbHgon-1ERAPdxL&D{s8>!6#pQ^TRKH1gz*!AVCBZLMUN`6G0?VL=!_Sam15AB1t5Z zLMmyblR+j~WRpWKdE`?-Aw?8ZLMdgGQ$ZzFR8vDOb=1>9BTY2ZLMv^w(?KU)bkjpG zee^THAVUl@!YE^mGr=TNOf$nQbIh~AB15^4sHKj28fc`6W?E>ajdnWd zq>FBP=%tT-1{h?BVMZ8bjBzHIWQu8Km}QQ67FcA7WmZ^ajdeEIWQ%Qf*kzA>4mjk9 zV@^2bjB_ryyl*dg0fiJ%ObMlwQBDPwR8dV0 zwbW5h1C2D%Obe~F(M|`QbkR)@z4Xz~0D}xM%m|~5G0p^&Ofk(2v&=Ei0*frM%nGZl zvCamYY_ZJ_yX>*g0f!uM%n7HQan1#oTyf0}x7=~h1CKoM%nPr)@y-XIeDTc>zx)yK zuk|O8Ac6@YlrX}HAd)Dei6NFa;z=NpB$7!Xl{C`HAd@Vz$sw0K@+qK@B8n-YlrqYx zppq)8siBrS>S>^nCYouXl{VVxpp!1T>7kcC`Wax5A%+=YlrhGcV3H}OnPHYW=2>8o zC6-xXl{MDcV3RGj*kRXByA(Sw}i6D|FqKP4vIO0hlktC8yA(b@J$sm&~vdJNrJn|`^kRpmH zp_DSpsi2Z7s;QxtI_hblktUjHp_Mk;>7bJ?y6K^pKKdD8kRgT{VU#h(nP8GBrkP=u zIp$elktLQ{VU;!3*8zMmP~f5=AsI#1cn52_%w4GAX2zMmiZ}l0`N-X?_+;Yb~4?Ob3 zGcUaI#ycN;^2Ikl{PIV@hW-Q+L@*(Q5=J-?L=r_bF~kx_JP9O{L^3I)l14fiWRgWT zIpmT@J_Qs~L@_0lQbsuyR8mDXHPli^Jqh9qTyn)VH{5c^ zJr6wc#4|6v^2R$KeDcLNKm77Xz`xa>K!OM+giyi=CxS?#h$ewe^2n!vLW(G+gi^{Vr-DkVsHTQm>ZqrIMw)1*g;v^Vr-M$q=%$BW`sinX zL53J+gi*#AXM#zlm}Z7q=9p)JMV44*g;myAXM;_)*k*@a_SoluLykD+gj3Eq=YmVF zxaNji?zrcHN1k}*g;(Bq=Yvna_~wUS{s`FApFn~LCWKJJ2q%I_qKGDjSmKB$fkcu> zCWTbeNGF3#vdAWfT=K}LfI^BWri4<;D5ru-s;H)hTI#5$fkv8WriE78Xs3fty6C2d zUi#=~fI)^BW`t437-xb>rkG}iS>~8$fkl>BW`$MOSZ9Mxw%BHeUG~`LfJ2Tr=7dwu zIOl>(uDIrgTkg2$fk&Qr=7m?@c;|ypzWC;cU;YUAAM_`XAc6@YlrX}HAd)Dei6NFa z;z=NpB$7!Xl{C`HAd@Vz$sw0K@+qK@B8n-YlrqYxppq)8siBrS>S>^nCYouXl{VVx zpp!1T>7kcC`Wax5A%+=YlrhGcV3H}OnPHYW=2>8oC6-xXl{MDcV3RGj*7bJ?y6K^pKKdD8kRgT{VU#h(nP8GBrkP=uIp$elktLQ{VU;!3*5^4sHKj2 z8fc`6W?E>ajdnWdq>FBP=%tT-1{h?BVMZ8bjBzHIWQu8Km}QQ67FcA7WmZ^ajdeEI zWQ%Qf*kzA>4mjk9V@^2bjB_ryyl*d7$ z7FlAM6;@eeoeehGVw)Xy*<+sr4mskO6HYnfoC_|w;+h+7x#OM(9(m%K7hZYeoew_w z;+r3S`6FOQe*y_2m=HnT31QJOinG{k*l~0tzXj zm=a1UqnrvVsiK-1YN?~11{!IinHE}Uqn!>q>7tt+dg-H|0R|ajm=Q)9W1I;lnPQq5 zW|?E21r}LinH5%9W1S5)*CWctzh$n$Wl1L_nRMJQ%gG{o>CWl<| z$ftlpiYTUpQpzZ&f=a5WriNPTsHcHOnrNnlR@!K%gHF2WriWho=x2aIh8SjqQN|c& zf=Q;BW`we^2n!vLW(G+gi^{Vr-DkVsHTQm>ZqrIMw)1*g;v^Vr-M$q=%$BW`sinXL53J+ zgi*#AXM#zlm}Z7q=9p)JMV44*g;myAXM;_)*k*@a_SoluLykD+gj3Eq=YmVFxaNji z?zrcHN1k}*g;(Bq=Yvna_~wUS{s{PA^e2!Yf(ap%Fv5u-k|?5yA(lAeNg$CVl1U+z zG}6f+lPt2yA(uSzDWH%diYcL#GRmo-k}9gHp_V%8X`qoNnrWexHrnZ+lP%G_Vg!^Ac6@YlrX}HAd)Dei6NFa;z=Np zB$7!Xl{C`HAd@Vz$sw0K@+qK@B8n-YlrqYxppq)8siBrS>S>^nCYouXl{VVxpp!1T z>7kcC`Wax5A%+=YlrhGcV3H}OnPHYW=2>8oC6-xXl{MDcV3RGj*{a3Y8#ifCep zC60I!NF<45Qb;9@bTY^!i)?bpC69axD5QvDN+_j_aw@2#ifU@8rH* zb~@;!i*9=8rH_6F7-WcHMi^y`aVD5#ifLw;WsZ3kSY(N1R#;_?bvD>!i*0t;WsiLh zIOK?9PB`U^b1t~#ifeAT<&JwEc;ty^UU=n=cRu*!i*J7T<&S`U{Rt$9U_uBbjBp}| zB#LNah$W7A5=bP8WKu{ajdU`|B#Ufv$R&?_3MizAVoE5bjB+Zdq>5^4sHKj28fc`6 zW?E>ajdnWdq>FBP=%tT-1{h?BVMZ8bjBzHIWQu8Km}QQ67FcA7WmZ^ajdeEIWQ%Qf z*kzA>4mjk9V@^2bjB_ry7$7FlAM z6;@eeoeehGVw)Xy*<+sr4mskO6HYnfoC_|w;+h+7x#OM(9(m%K7hZYeoew_w;+r3S z`6J+e*PlRw2quJ3!U!jVNTP@)hFIc=CxJwgNG63;(nu$ROtQ!(hg|Z=r+`9=D5iu` z$|$FTN~)-)hFa>Vr-4SAXr_f$+GwYPPP*u(hhF;VXMjP57-ob~#u#UUNv4=)hFRvA zXMshQSZ0M))>vnQO}5x(hh6sA=YT_wIOc>?&N%0SORl))hFk8q=YdC_c;CWctzh$n$Wl1L_nRMJQ%gG{o>CWl<|$ftlp ziYTUpQpzZ&f=a5WriNPTsHcHOnrNnlR@!K%gHF2WriWho=x2aIh8SjqQN|c&f=Q;B zW`3trCy*e52_cj)!igY~D58lWmN?=`Adw`JNg%G{!jf0B#2-_2qlbgB8Vi4Xkv&Zj(8GCB#C5F zNF|MQGRP#0Y;wpYk9-O!q=;flD5Z>YDyXE2YHFyZj(Qqsq={x)Xr+yII_RW}ZhGjY zkA4OiWQbu#7-fucCYWT3X=a#Zj(HYXWQk=~SY?fMHrQl~ZFbmYk9`g}8zMmP~f5=AsI#1cn5 z2_%w4GAX2zMmiZ}l0`N-X?_+;Yb~4?Ob3GcUaI#ycN;^2Ikl{PIV@|D`{H1QARKp@b1m1d&7$ zO$@Qb5l;e%B#}%Csicuk2AO1$O%A!_kxv1I6j4kGrIb-l1(j4$O%1iwQBMPnG|@~8 zt+dfj2c2}$O%J{F(a!*b3^B|Iql_`m1d~iL%?z{5G0y^vEV0ZAtE{ok2AgcL%?`Wl zvCjdA9C6GEr<`%l1(#fL%?-EQanA#fJn_s6ue|Zj2cLZL%@4o);bjx>4}k;`ObDTb z5l#e=L=jC4vBVKi0*NG%ObV%_kxmAgWRXn{x#W>g0fiJ%ObMlwQBDPwR8dV0wbW5h z1C2D%Obe~F(M|`QbkR)@z4Xz~0D}xM%m|~5G0p^&Ofk(2v&=Ei0*frM%nGZlvCamY zY_ZJ_yX>*g0f!uM%n7HQan1#oTyf0}x7=~h1CKoM%nPr)@y-XIeDTc>zx)yKf9p>m zK?D;*C}D&XK_pQ`6GJR<#FIcGNhFg(DruyXK_*#blS3|f!6Z{mGs7%%%(K8EODwa( zDr>B>!6sX5v%@ZX>~p{&M;vp)DQBE>!6jE*bHgon-1ERAPdxL&D{s8>!6#pQ^TRKH z_|Odahd_b|CWKJJ2q%I_qKGDjSmKB$fkcu>CWTbeNGF3#vdAWfT=K}LfI^BWri4<; zD5ru-s;H)hTI#5$fkv8WriE78Xs3fty6C2dUi#=~fI)^BW`t437-xb>rkG}iS>~8$ zfkl>BW`$MOSZ9Mxw%BHeUG~`LfJ2Tr=7dwuIOl>(uDIrgTkg2$fk&Qr=7m?@c;|yp zzWC;cU;YUAfAlAiAc6@YlrX}HAd)Dei6NFa;z=NpB$7!Xl{C`HAd@Vz$sw0K@+qK@ zB8n-YlrqYxppq)8siBrS>S>^nCYouXl{VVxpp!1T>7kcC`Wax5A%+=YlrhGcV3H}O znPHYW=2>8oC6-xXl{MDcV3RGj*Kq5&blR_$Kq?17=S!9z# zE_virKp{mGQ$i_alv6<^Ra8?$Ep^n>KqE~w(?Tn4w9`Q+U3Ak!FMaegz#u~mGr}li zj5EO`Q%p0%EOX4Wz#>a5v%)HCth2!;TWqt#E_>{Az#&H*bHXWSoO8h?S6p+$EqC1W zz#~sQ^TI1{yz{{)Uwre!FMkC5zxoqM5W$2HN*Lip5J?o##1Kmy@g$H)63L{HN*d{8 zkVzKVk4wwS|A&?+~2_cj)!igY~D58lWmN?=`Adw`J zNgh9qTyn)VH{5c^Jr6wc#4|6v^2R$KeDcLNKm78C!{vZ~2qcJLLI@>{a3Y8# zifCepC60I!NF<45Qb;9@bTY^!i)?bpC69axD5QvDN+_j_aw@2#ifU@8rH*b~@;!i*9=8rH_6F7-WcHMi^y`aVD5#ifLw;WsZ3kSY(N1R#;_?bvD>!i*0t; zWsiLhIOK?9PB`U^b1t~#ifeAT<&JwEc;ty^UU=n=cRu*!i*J7T<&S`WuRnnV5ljf7 zgb_{zkwg(q46(!!PXdV~kxUAyq>)YrnPibo4!Pu!PXUD#QA`P?lu=Fvl~hqp4Ykx! zPXmoK(M$`iw9!rnopjMn554r!&j5oAG0X^~j4{pxlT0zq471EJ&jO1qvCImqtg+4p zn{2Vo4!i8J&jE)Vam)#)oN>+tmt1kp4Y%BJ&jXJ<@yrXayz$NlpM3Gn55N53VkzJs z0tq6R5JCwfoCqR`BAOUti6fo_5=kPN6jDheoeVO`BAXm?$s?Zv3MrzP5=tqfoC+$b zqM90NsiU3-8fl`L7FubeoenzbqMIIi>7$7FlAM z6;@eeoeehGVw)Xy*<+sr4mskO6HYnfoC_|w;+h+7x#OM(9(m%K7hZYeoew_w;+r3S z`6J*z=uaR)1QS9iVT2PwBvC{YLo9K`lRzR#B$GlaX{3`uCRt>YLoRvbQ$Qg_6jMSe zWt3AvB~?^YLoIdG(?BClG}A&WZM4%tCtY;YLoa>wGr%B23^T$gV~jJwBvVW?!z^>m zv%n%tEVIHYYpk=uCR=Q?!!CR5bHE`-9CN}cXPk4vC0AT?!!38*^S~odJoCaUZ@lxt zCtrN?!!LihR1WxuK!OM+giyi=CxS?#h$ewe^2n!v zLW(G+gi^{Vr-DkVsHTQm>ZqrIMw)1*g;v^Vr-M$q=%$BW`sinXL53J+gi*#AXM#zl zm}Z7q=9p)JMV44*g;myAXM;_)*k*@a_SoluLykD+gj3Eq=YmVFxaNji?zrcHN1k}* zg;(Bq=Yvna_~wUS{s{R0^e2!Yf(ap%Fv5u-k|?5yA(lAeNg$CVl1U+zG}6f+lPt2y zA(uSzDWH%diYcL#GRmo-k}9gHp_V%8X`qoNnrWexHrnZ+lP%Qlmz@kAVCBZLMUN`6G0?VL=!_Sam15AB1t5ZLMmyb zlR+j~WRpWKdE`?-Aw?8ZLMdgGQ$ZzFR8vDOb=1>9BTY2ZLMv^w(?KU)bkjpGee^TH zAVUl@!YE^mGr=TNOf$nQbIh~AB17~w<^Nfgn<5KA2KB#=lF z$)u1<8tG(^Nfz1UkV_u<6i`SJ#gtG=8Rb+^Nfp)9P)i;4G|)&B&9u-;8|`$^Nf+Jp z&`Tfv3^2$L!;CP>7~@PZ$rRJfFv}eCEU?HD%dD`<8tZJZ$rjt}u*)9%9B{}H$DDA= z8RuMZ$rab!aLXO{Jn+a9&%E%;8}EGZ$rs=J@XH^D$pZc%kRXByA(Sw}i6D|FqKP4v zIO0hlktC8yA(b@J$sm&~vdJNrJn|`^kRpmHp_DSpsi2Z7s;QxtI_hblktUjHp_Mk; z>7bJ?y6K^pKKdD8kRgT{VU#h(nP8GBrkP=uIp$elktLQ{VU;!3*8zMmP~f z5=AsI#1cn52_%w4GAX2zMmiZ}l0`N-X?_+;Yb~4?Ob3GcUaI#ycN;^2Ikl{PKrE(tv*mB#2-_ z2qlbgB8Vi4Xkv&Zj(8GCB#C5FNF|MQGRP#0Y;wpYk9-O!q=;flD5Z>YDyXE2YHFyZ zj(Qqsq={x)Xr+yII_RW}ZhGjYkA4OiWQbu#7-fucCYWT3X=a#Zj(HYXWQk=~SY?fM zHrQl~ZFbmYk9`g}g0fiJ%ObMlwQBDPw zR8dV0wbW5h1C2D%Obe~F(M|`QbkR)@z4Xz~0D}xM%m|~5G0p^&Ofk(2v&=Ei0*frM z%nGZlvCamYY_ZJ_yX>*g0f!uM%n7HQan1#oTyf0}x7=~h1CKoM%nPr)@y-XIeDTc> zzx-i{Jm4P!2_l#fLJ1?B2qK9hniyhKq5&blR_$Kq?17=S!9z#E_vir zKp{mGQ$i_alv6<^Ra8?$Ep^n>KqE~w(?Tn4w9`Q+U3Ak!FMaegz#u~mGr}lij5EO` zQ%p0%EOX4Wz#>a5v%)HCth2!;TWqt#E_>{Az#&H*bHXWSoO8h?S6p+$EqC1Wz#~sQ z^TI1{yz{{)Uwre!FMn7d3HXOVf(RyrP{If&f=Hr>CWctzh$n$Wl1L_nRMJQ%gG{o> zCWl<|$ftlpiYTUpQpzZ&f=a5WriNPTsHcHOnrNnlR@!K%gHF2WriWho=x2aIh8Sjq zQN|c&f=Q;BW`37h6G#xjgb+#?;Y1Kg6w$;GOC0eekVq2Aq>xG) z>12>e7TM&GOCI?YP)HHQlu$|;uj*e7TfHw%O3k2aL5tIoN&q+=Ui~f z71!Kw%N_ST@W>O-yzt5!?|ksd7vKEw%O94<0{$V8Ac6@YlrX}HAd)Dei6NFa;z=Np zB$7!Xl{C`HAd@Vz$sw0K@+qK@B8n-YlrqYxppq)8siBrS>S>^nCYouXl{VVxpp!1T z>7kcC`Wax5A%+=YlrhGcV3H}OnPHYW=2>8oC6-xXl{MDcV3RGj*qRnJo3adFTC=`J0E=V#Wz3v@`uIIfPV-ih+skpC5&() zh$M<=Vu&Syl*dk|arzBuSDaNs=Td!&)heKN=-i)?bph9qTyn)VH~iri2O|>xB9SDLNg(TYdE`?-ArE;(5sxY62~T;(b6)V05?=9|HrkG}iS>~8$fkl>BW`$MOSZ9Mxw%BHeUG~`LfJ2Tr=7dwuIOl>(uDIrg zKirb=KkQE;NhFg(DtEZcJ<>?$J{e?^MK(F)@_;<@DWH&tJfeul6!V0qJmWbpcu5Jb zc+DG1DWjYUDtXI0-c!W~s;QxtkJM351C2D%Obe~F@riai_{EbKj_|6Z0@{8Ye z(?c(P^fSO9Lku&*C}WH>!6Z{mGs7%%%(K8EODwa(Dr>B>!6sX5v%@ZX>~p{&M;vp) zDQBE>!6jE*bHg8QaVRR`FA_;2nG{mF!(Hx?MmqP&Ad@Vz$sv~qHb7<2yh2$uEA>O%J{F(a!*b3^B|Iql_`m1d~iL%?z{5G0y^vEV0ZA ztE{ok2AgcL%?`WlvCjdA9C6GEr<`%l1(#fL%?*FJ#gVv#zepsBWKu}w4tKdn8tL38 zgG{o>CWl-ekVifR6!MTq6!Dm1p74}sJm&>3Dd82bc|$2>lv6<^Z+XXis`x-PHPrHv zI_hblktUjHp_Mj1(M|`S`9ddMeB~S8`N24mjk9V@^2bjB_ryOwwoN~rF7hH11H8=d> z76&2|{vweil1U+zJKW_SX{2+X3^K_gn;dd^Kpy!NP{>0bQN&}4dBRhk@thaDq=Z+z z<_)EkQBDPwyyYG5sp13G)KJSu>ZqrIMw)1*g;v`5L^~aP<_n#4@s)3U=LbLe#c#Ul zp_e}T8DNkhh8ba$F~*r-k}0N{VU{`OSzwVRmRVtyHP+c+lP$K{VV6DjIpB~Zjyd6! zGtRl-k}Iyc;SaYY{BQe{ND|4UkjfqIa*s69xlaa}WRXn{xjZ0`dq4$PmMfFv=L?Ofbn5)66i-9P=!&$P&w}u*w?iY_Q1|+w8E* z9{U_{$Pve!aLO6yTyV)1*WB=jTO5Z?_=`l6NG63;?r@iTq>;{jGRP#0Y;wru0eR$8 zKp_u#L=lfE<_S-E#&cfqk`i9=nm3eEMmZH!@|JhJr-~0$Q$sBusiU3-8fl`L7FucJ z6YX^HnJ;wG#aF)Zoge(<7r*JIhhF;VXMjP57-ob~#u#UUNv4=)hFRvAXMshQSZ0M) z)>vnQO}5x(hh6sA=YT_wIOc>?&N%0SORl))hCker@PF)2B1t5ZLMnH-%RSOa=RO%^ zl0`N-w8Rb+^$y?s> zo+>_2O%1htq>g$TXrzf|T4<$>Pqfp)XTH!$7hn0tcYg4bU;L(<9(w7cp8*CLVwe#| z8DpFYCYfTI8D^Pdo&^?JVwn|IS!10IHrZmE9d_Acp92m#;+PXoIpdrQF1g~G8~$)h z!vD2Di6oIs3aQ-TF84?yo%>{vNfz1Ukjn$|$ftlp9`cAH9#hN{p7M<6yx=7zyy7)) zD5Z>YDyZZw?|4rYAE>5=T0T-oJqEJV8=%kCUeB(Pm_{lGR(@hV( z^wG}%gA6gu2&0TK&IFT8G0hCK%rVabi!8Cs3ahNK&IX%ovCR&we9*{>q1r+j-M-=gxVxI7n zXFTTxFDc;_uX#f$Wt3AvC2x7hd#d7$7FlAM6;@ee zoeehGVw)Xy*<+sr4mskO6HYnfoC_|w;`*PK{zF1SLi2z5`@c2(r?$T}75?4d{pb3^ ze|h^KoBwO$-~PQ1{~ZbckdW|~$G`noNuD9TgJ|IY&j$XD-+zejAR34UqJd~28i)p> zfoLEahz6p8XdoJh2BLvzAR34UqJd~28i)p>foLEahz6p8XdoJh2BLvzAR34UqJd~2 z8i)p>foLEahz6p8XdoJh2BLvzAR34UqJd~28i)p>foLEahz6p8XdoJh2BLvzAR34U zqJd~28i)p>foLEahz6p8XdoJh2BLvzAR34UqJd~28i)p>foLEahz6p8XdoJh2BLvz zAR34UqJd~28i)p>foLEahz6p8XdoJh2BLvzAR34UqJd~28i)p>foLEahz6p8XdoJh z2BLvzAR34UqJd~28i)p>foLEahz6p8XdoJh2BLvzAR34UqJd~28i)p>foLEahz6p8 zXdoJh2BLvzAR34UqJd~28i)p>foLEahz6p8XdoJh2BLvzAR34UqJd~28i)p>foLEa zhz6p8XdoJh2BLvzAR34UqJd~28i)p>foLEahz6p8XdoJh2BLvzAR34UqJd~28i)p> zfoLEahz6p8XdoJh2BLvzAR34UqJd~28i)p>foLEahz6p8XdoJh2BLvzAR34UqJd~2 z8i)p>foLEahz6p8XdoJh2BLvzAR34UqJd~28i)p>foLEahz6p8XdoJh2BLvzAR34U zqJd~28i)p>foLEahz6p8XdoJh2BLvzAR34UqJd~28i)p>foLEahz6p8XdoJh2BLvz zAR34UqJd~28i)p>foLEahz6p8XdoJh2BLvzAR34UqJd~28i)p>foLEahz6p8XdoJh z2BLvzAR34UqJd~28i)p>foLEahz6p8XdoJh2BLvzAR34UqJd~28i)p>foLEahz6p8 zXdoJh2BLvzAR34UqJd~28i)p>foLEahz6p8XdoJh2BLvzAR34UqJd~28i)p>foLEa zhz6p8XdoJh2BLvzAR34UqJd~28i)p>foLEahz6p8XdoJh2BLvzAR34UqJd~28i)p> zfoLEahz6p8XdoJh2BLvzAR34UqJd~28i)p>foLEahz6p8XdoJh2BLvzAR34UqJd~2 z8i)p>foLEahz6p8XdoJh2BLvzAR34UqJd~28i)p>foLEahz6p8XdoJh2BLvzAR34U WqJd~28i)p>foLEahz9;&4g535-DSc6 From 21958bb393a654591ed26f339791b752d58f5c8b Mon Sep 17 00:00:00 2001 From: slaren Date: Thu, 2 Nov 2023 13:10:33 +0100 Subject: [PATCH 049/206] cmake : disable LLAMA_NATIVE by default (#3906) --- CMakeLists.txt | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/CMakeLists.txt b/CMakeLists.txt index 611ed3f4d2a64d..3c49d645c3196f 100644 --- a/CMakeLists.txt +++ b/CMakeLists.txt @@ -44,7 +44,7 @@ endif() # general option(LLAMA_STATIC "llama: static link libraries" OFF) -option(LLAMA_NATIVE "llama: enable -march=native flag" ON) +option(LLAMA_NATIVE "llama: enable -march=native flag" OFF) option(LLAMA_LTO "llama: enable link time optimization" OFF) # debug From 4ff1046d75e64f0e556d8dcd930ea25c23eb8b18 Mon Sep 17 00:00:00 2001 From: Georgi Gerganov Date: Thu, 2 Nov 2023 16:22:30 +0200 Subject: [PATCH 050/206] gguf : print error for GGUFv1 files (#3908) --- ggml.c | 7 +++++++ 1 file changed, 7 insertions(+) diff --git a/ggml.c b/ggml.c index d5a49d8e4f3148..605a27940fc81f 100644 --- a/ggml.c +++ b/ggml.c @@ -18884,6 +18884,13 @@ struct gguf_context * gguf_init_from_file(const char * fname, struct gguf_init_p ok = ok && gguf_fread_el(file, &ctx->header.n_tensors, sizeof(ctx->header.n_tensors), &offset); ok = ok && gguf_fread_el(file, &ctx->header.n_kv, sizeof(ctx->header.n_kv), &offset); + if (ctx->header.version == 1) { + fprintf(stderr, "%s: GGUFv1 is no longer supported. please use a more up-to-date version\n", __func__); + fclose(file); + gguf_free(ctx); + return NULL; + } + if (!ok) { fprintf(stderr, "%s: failed to read header\n", __func__); fclose(file); From d6069051de7165a4e06662c89257f5d2905bb156 Mon Sep 17 00:00:00 2001 From: Oleksii Maryshchenko Date: Thu, 2 Nov 2023 18:10:39 +0100 Subject: [PATCH 051/206] cuda : use CUDA memory pool with async memory allocation/deallocation when available (#3903) * Using cuda memory pools for async alloc/dealloc. * If cuda device doesnt support memory pool than use old implementation. * Removed redundant cublasSetStream --------- Co-authored-by: Oleksii Maryshchenko --- ggml-cuda.cu | 128 +++++++++++++++++++++++++++++++-------------------- 1 file changed, 77 insertions(+), 51 deletions(-) diff --git a/ggml-cuda.cu b/ggml-cuda.cu index e4629512611b6c..58b58f33154286 100644 --- a/ggml-cuda.cu +++ b/ggml-cuda.cu @@ -181,11 +181,11 @@ static_assert(sizeof(half) == sizeof(ggml_fp16_t), "wrong fp16 size"); do { \ cudaError_t err_ = (err); \ if (err_ != cudaSuccess) { \ - int id; \ - cudaGetDevice(&id); \ + int dev_id; \ + cudaGetDevice(&dev_id); \ fprintf(stderr, "\nCUDA error %d at %s:%d: %s\n", err_, __FILE__, __LINE__, \ cudaGetErrorString(err_)); \ - fprintf(stderr, "current device: %d\n", id); \ + fprintf(stderr, "current device: %d\n", dev_id); \ exit(1); \ } \ } while (0) @@ -195,11 +195,11 @@ static_assert(sizeof(half) == sizeof(ggml_fp16_t), "wrong fp16 size"); do { \ cublasStatus_t err_ = (err); \ if (err_ != CUBLAS_STATUS_SUCCESS) { \ - int id; \ - cudaGetDevice(&id); \ + int dev_id; \ + cudaGetDevice(&dev_id); \ fprintf(stderr, "\ncuBLAS error %d at %s:%d: %s\n", \ err_, __FILE__, __LINE__, cublasGetStatusString(err_)); \ - fprintf(stderr, "current device: %d\n", id); \ + fprintf(stderr, "current device: %d\n", dev_id); \ exit(1); \ } \ } while (0) @@ -465,6 +465,7 @@ static_assert(K_QUANTS_PER_ITERATION == 1 || K_QUANTS_PER_ITERATION == 2, "K_QUA #define MAX_STREAMS 8 static cudaStream_t g_cudaStreams[GGML_CUDA_MAX_DEVICES][MAX_STREAMS] = { nullptr }; +static cudaMemPool_t g_cudaMemPools[GGML_CUDA_MAX_DEVICES] = { nullptr }; struct ggml_tensor_extra_gpu { void * data_device[GGML_CUDA_MAX_DEVICES]; // 1 pointer for each device for split tensors @@ -5772,6 +5773,16 @@ static void * ggml_cuda_pool_malloc(size_t size, size_t * actual_size) { return ptr; } +static void * ggml_cuda_pool_malloc_async(size_t size, size_t * actual_size, int id, cudaStream_t stream) { + if (g_cudaMemPools[id] == nullptr) { + return ggml_cuda_pool_malloc(size, actual_size); + } + void *ptr; + CUDA_CHECK(cudaMallocFromPoolAsync(&ptr, size, g_cudaMemPools[id], stream)); + *actual_size = size; + return ptr; +} + static void ggml_cuda_pool_free(void * ptr, size_t size) { scoped_spin_lock lock(g_cuda_pool_lock); int id; @@ -5790,6 +5801,13 @@ static void ggml_cuda_pool_free(void * ptr, size_t size) { } +static void ggml_cuda_pool_free_async(void * ptr, size_t actual_size, int id, cudaStream_t stream) { + if (g_cudaMemPools[id] == nullptr) { + return ggml_cuda_pool_free(ptr, actual_size); + } + CUDA_CHECK(cudaFreeAsync(ptr, stream)); +} + void ggml_init_cublas() { static bool initialized = false; @@ -5844,6 +5862,13 @@ void ggml_init_cublas() { // create cublas handle CUBLAS_CHECK(cublasCreate(&g_cublas_handles[id])); CUBLAS_CHECK(cublasSetMathMode(g_cublas_handles[id], CUBLAS_TF32_TENSOR_OP_MATH)); + + // configure memory pool + cudaError_t err = cudaDeviceGetMemPool(&g_cudaMemPools[id], id); + if (err == cudaSuccess) { + size_t treshold = UINT64_MAX; + CUDA_CHECK(cudaMemPoolSetAttribute(g_cudaMemPools[id], cudaMemPoolAttrReleaseThreshold, &treshold)); + } } // configure logging to stdout @@ -6437,7 +6462,7 @@ inline void ggml_cuda_op_mul_mat_cublas( const to_fp16_cuda_t to_fp16_cuda = ggml_get_to_fp16_cuda(src0->type); GGML_ASSERT(to_fp16_cuda != nullptr); size_t ne = row_diff*ne00; - src0_as_f16 = (half *) ggml_cuda_pool_malloc(ne * sizeof(half), &src0_as); + src0_as_f16 = (half *) ggml_cuda_pool_malloc_async(ne * sizeof(half), &src0_as, id, stream); to_fp16_cuda(src0_dd_i, src0_as_f16, ne, stream); } const half * src0_ptr = src0->type == GGML_TYPE_F16 ? (const half *) src0_dd_i : src0_as_f16; @@ -6448,13 +6473,12 @@ inline void ggml_cuda_op_mul_mat_cublas( const to_fp16_cuda_t to_fp16_cuda = ggml_get_to_fp16_cuda(src1->type); GGML_ASSERT(to_fp16_cuda != nullptr); size_t ne = src1_ncols*ne10; - src1_as_f16 = (half *) ggml_cuda_pool_malloc(ne * sizeof(half), &src1_as); + src1_as_f16 = (half *) ggml_cuda_pool_malloc_async(ne * sizeof(half), &src1_as, id, stream); to_fp16_cuda(src1_ddf_i, src1_as_f16, ne, stream); } const half * src1_ptr = src1->type == GGML_TYPE_F16 ? (const half *) src1_ddq_i : src1_as_f16; - - size_t dst_as = 0; - half * dst_f16 = (half *) ggml_cuda_pool_malloc(row_diff*src1_ncols * sizeof(half), &dst_as); + size_t dst_f16_as = 0; + half * dst_f16 = (half *) ggml_cuda_pool_malloc_async(row_diff*src1_ncols * sizeof(half), &dst_f16_as, id, stream); const half alpha_f16 = 1.0f; const half beta_f16 = 0.0f; @@ -6472,14 +6496,15 @@ inline void ggml_cuda_op_mul_mat_cublas( const to_fp32_cuda_t to_fp32_cuda = ggml_get_to_fp32_cuda(GGML_TYPE_F16); to_fp32_cuda(dst_f16, dst_dd_i, row_diff*src1_ncols, stream); - ggml_cuda_pool_free(dst_f16, dst_as); + if (dst_f16_as != 0) { + ggml_cuda_pool_free_async(dst_f16, dst_f16_as, id, stream); + } if (src0_as != 0) { - ggml_cuda_pool_free(src0_as_f16, src0_as); + ggml_cuda_pool_free_async(src0_as_f16, src0_as, id, stream); } - if (src1_as != 0) { - ggml_cuda_pool_free(src1_as_f16, src1_as); + ggml_cuda_pool_free_async(src1_as_f16, src1_as, id, stream); } } else { @@ -6489,7 +6514,7 @@ inline void ggml_cuda_op_mul_mat_cublas( if (src0->type != GGML_TYPE_F32) { const to_fp32_cuda_t to_fp32_cuda = ggml_get_to_fp32_cuda(src0->type); GGML_ASSERT(to_fp32_cuda != nullptr); - src0_ddq_as_f32 = (float *) ggml_cuda_pool_malloc(row_diff*ne00 * sizeof(float), &src0_as); // NOLINT + src0_ddq_as_f32 = (float *) ggml_cuda_pool_malloc_async(row_diff*ne00 * sizeof(float), &src0_as, id, stream); // NOLINT to_fp32_cuda(src0_dd_i, src0_ddq_as_f32, row_diff*ne00, stream); } const float * src0_ddf_i = src0->type == GGML_TYPE_F32 ? (const float *) src0_dd_i : src0_ddq_as_f32; @@ -6506,7 +6531,7 @@ inline void ggml_cuda_op_mul_mat_cublas( &beta, dst_dd_i, ldc)); if (src0_as != 0) { - ggml_cuda_pool_free(src0_ddq_as_f32, src0_as); + ggml_cuda_pool_free_async(src0_ddq_as_f32, src0_as, id, stream); } } @@ -6929,21 +6954,22 @@ static void ggml_cuda_op_mul_mat( src0_dd[id] = (char *) src0_extra->data_device[id]; } else { const size_t size_src0_ddq = split ? (row_high[id]-row_low[id])*ne00 * src0_ts/src0_bs : ggml_nbytes(src0); - src0_dd[id] = (char *) ggml_cuda_pool_malloc(ggml_nbytes(src0), &src0_as[id]); + src0_dd[id] = (char *) ggml_cuda_pool_malloc_async(ggml_nbytes(src0), &src0_as[id], id, stream); } if (src1_on_device && src1_is_contiguous) { src1_ddf[id] = (float *) src1_extra->data_device[id]; } else { - src1_ddf[id] = (float *) ggml_cuda_pool_malloc(ggml_nbytes(src1), &src1_asf[id]); + src1_ddf[id] = (float *) ggml_cuda_pool_malloc_async(ggml_nbytes(src1), &src1_asf[id], id, stream); } if (convert_src1_to_q8_1) { - src1_ddq[id] = (char *) ggml_cuda_pool_malloc(nrows1*src1_padded_col_size*q8_1_ts/q8_1_bs, &src1_asq[id]); + const size_t size_dst_ddq = nrows1*src1_padded_col_size*q8_1_ts/q8_1_bs; + src1_ddq[id] = (char *) ggml_cuda_pool_malloc_async(size_dst_ddq, &src1_asq[id], id, stream); if (src1_on_device && src1_is_contiguous) { quantize_row_q8_1_cuda(src1_ddf[id], src1_ddq[id], ne10, nrows1, src1_padded_col_size, stream); - CUDA_CHECK(cudaGetLastError()); + // CUDA_CHECK(cudaGetLastError()); } } @@ -6951,7 +6977,7 @@ static void ggml_cuda_op_mul_mat( dst_dd[id] = (float *) dst_extra->data_device[id]; } else { const size_t size_dst_ddf = split ? (row_high[id]-row_low[id])*ne1*sizeof(float) : ggml_nbytes(dst); - dst_dd[id] = (float *) ggml_cuda_pool_malloc(size_dst_ddf, &dst_as[id]); + dst_dd[id] = (float *) ggml_cuda_pool_malloc_async(size_dst_ddf, &dst_as[id], id, stream); } } @@ -7077,24 +7103,6 @@ static void ggml_cuda_op_mul_mat( } } - for (int64_t id = 0; id < g_device_count; ++id) { - CUDA_CHECK(ggml_cuda_set_device(id)); - - // free buffers again when done - if (src0_as[id] > 0) { - ggml_cuda_pool_free(src0_dd[id], src0_as[id]); - } - if (src1_asf[id] > 0) { - ggml_cuda_pool_free(src1_ddf[id], src1_asf[id]); - } - if (src1_asq[id] > 0) { - ggml_cuda_pool_free(src1_ddq[id], src1_asq[id]); - } - if (dst_as[id] > 0) { - ggml_cuda_pool_free(dst_dd[id], dst_as[id]); - } - } - // main device waits for all other devices to be finished if (split && g_device_count > 1) { int64_t is_max = (ne11 + MUL_MAT_SRC1_COL_STRIDE - 1) / MUL_MAT_SRC1_COL_STRIDE; @@ -7112,6 +7120,21 @@ static void ggml_cuda_op_mul_mat( CUDA_CHECK(ggml_cuda_set_device(g_main_device)); CUDA_CHECK(cudaDeviceSynchronize()); } + + for (int64_t id = 0; id < g_device_count; ++id) { + if (src0_as[id] > 0) { + ggml_cuda_pool_free_async(src0_dd[id], src0_as[id], id, g_cudaStreams[id][0]); + } + if (src1_asf[id] > 0) { + ggml_cuda_pool_free_async(src1_ddf[id], src1_asf[id], id, g_cudaStreams[id][0]); + } + if (src1_asq[id] > 0) { + ggml_cuda_pool_free_async(src1_ddq[id], src1_asq[id], id, g_cudaStreams[id][0]); + } + if (dst_as[id] > 0) { + ggml_cuda_pool_free_async(dst_dd[id], dst_as[id], id, g_cudaStreams[id][0]); + } + } } static void ggml_cuda_repeat(const ggml_tensor * src0, const ggml_tensor * src1, ggml_tensor * dst) { @@ -7298,11 +7321,11 @@ static void ggml_cuda_mul_mat_mat_batched_cublas(const ggml_tensor * src0, const GGML_ASSERT(to_fp16_cuda != nullptr); size_t src1_as = 0; - half * src1_as_f16 = (half *) ggml_cuda_pool_malloc(ne1 * sizeof(half), &src1_as); + half * src1_as_f16 = (half *) ggml_cuda_pool_malloc_async(ne1 * sizeof(half), &src1_as, id, main_stream); to_fp16_cuda(src1_ddf, src1_as_f16, ne1, main_stream); size_t dst_as = 0; - half * dst_f16 = (half *) ggml_cuda_pool_malloc(ne * sizeof(half), &dst_as); + half * dst_f16 = (half *) ggml_cuda_pool_malloc_async(ne * sizeof(half), &dst_as, id, main_stream); GGML_ASSERT(ne12 % ne02 == 0); GGML_ASSERT(ne13 % ne03 == 0); @@ -7349,10 +7372,9 @@ static void ggml_cuda_mul_mat_mat_batched_cublas(const ggml_tensor * src0, const } else { // use cublasGemmBatchedEx const int ne23 = ne12*ne13; - - void ** ptrs_as = nullptr; + // allocate device memory for pointers size_t ptrs_s = 0; - ptrs_as = (void **) ggml_cuda_pool_malloc(3*ne23*sizeof(void *), &ptrs_s); + void ** ptrs_as = (void **)ggml_cuda_pool_malloc_async(3*ne23*sizeof(void *), &ptrs_s, id, main_stream); dim3 block_dims(ne13, ne12); k_compute_batched_ptrs<<<1, block_dims, 0, main_stream>>>( @@ -7365,7 +7387,6 @@ static void ggml_cuda_mul_mat_mat_batched_cublas(const ggml_tensor * src0, const dst->nb[2], dst->nb[3], r2, r3); CUDA_CHECK(cudaGetLastError()); - CUBLAS_CHECK( cublasGemmBatchedEx(g_cublas_handles[id], CUBLAS_OP_T, CUBLAS_OP_N, ne01, ne11, ne10, @@ -7375,16 +7396,21 @@ static void ggml_cuda_mul_mat_mat_batched_cublas(const ggml_tensor * src0, const ne23, CUBLAS_COMPUTE_16F, CUBLAS_GEMM_DEFAULT_TENSOR_OP)); - - ggml_cuda_pool_free(ptrs_as, ptrs_s); + // free device memory for pointers + if (ptrs_s != 0) { + ggml_cuda_pool_free_async(ptrs_as, ptrs_s, id, main_stream); + } } #endif const to_fp32_cuda_t to_fp32_cuda = ggml_get_to_fp32_cuda(GGML_TYPE_F16); to_fp32_cuda(dst_f16, dst_ddf, ne, main_stream); - - ggml_cuda_pool_free(src1_as_f16, src1_as); - ggml_cuda_pool_free(dst_f16, dst_as); + if (src1_as != 0) { + ggml_cuda_pool_free_async(src1_as_f16, src1_as, id, main_stream); + } + if (dst_as != 0) { + ggml_cuda_pool_free_async(dst_f16, dst_as, id, main_stream); + } } static void ggml_cuda_mul_mat(const ggml_tensor * src0, const ggml_tensor * src1, ggml_tensor * dst) { From c7743fe1c1cbda5a886362aa371480360580fdf0 Mon Sep 17 00:00:00 2001 From: Georgi Gerganov Date: Thu, 2 Nov 2023 20:32:11 +0200 Subject: [PATCH 052/206] cuda : fix const ptrs warning causing ROCm build issues (#3913) --- ggml-cuda.cu | 37 +++++++++++++++++++++++-------------- 1 file changed, 23 insertions(+), 14 deletions(-) diff --git a/ggml-cuda.cu b/ggml-cuda.cu index 58b58f33154286..06c28f5651b72f 100644 --- a/ggml-cuda.cu +++ b/ggml-cuda.cu @@ -7248,7 +7248,7 @@ static void ggml_cuda_mul_mat_vec_nc(const ggml_tensor * src0, const ggml_tensor __global__ void k_compute_batched_ptrs( const half * src0_as_f16, const half * src1_as_f16, half * dst_f16, - void ** ptrs, + const void ** ptrs_src, void ** ptrs_dst, int ne12, int ne13, int ne23, int nb02, int nb03, @@ -7265,9 +7265,9 @@ __global__ void k_compute_batched_ptrs( int i03 = i13 / r3; int i02 = i12 / r2; - ptrs[0*ne23 + i12 + i13*ne12] = (char *) src0_as_f16 + i02*nb02 + i03*nb03; - ptrs[1*ne23 + i12 + i13*ne12] = (char *) src1_as_f16 + i12*nb12/2 + i13*nb13/2; - ptrs[2*ne23 + i12 + i13*ne12] = (char *) dst_f16 + i12* nb2/2 + i13* nb3/2; + ptrs_src[0*ne23 + i12 + i13*ne12] = (const char *) src0_as_f16 + i02*nb02 + i03*nb03; + ptrs_src[1*ne23 + i12 + i13*ne12] = (const char *) src1_as_f16 + i12*nb12/2 + i13*nb13/2; + ptrs_dst[0*ne23 + i12 + i13*ne12] = ( char *) dst_f16 + i12* nb2/2 + i13* nb3/2; } static void ggml_cuda_mul_mat_mat_batched_cublas(const ggml_tensor * src0, const ggml_tensor * src1, ggml_tensor * dst) { @@ -7372,14 +7372,20 @@ static void ggml_cuda_mul_mat_mat_batched_cublas(const ggml_tensor * src0, const } else { // use cublasGemmBatchedEx const int ne23 = ne12*ne13; - // allocate device memory for pointers - size_t ptrs_s = 0; - void ** ptrs_as = (void **)ggml_cuda_pool_malloc_async(3*ne23*sizeof(void *), &ptrs_s, id, main_stream); + + const void ** ptrs_src = nullptr; + void ** ptrs_dst = nullptr; + + size_t ptrs_src_s = 0; + size_t ptrs_dst_s = 0; + + ptrs_src = (const void **) ggml_cuda_pool_malloc_async(2*ne23*sizeof(void *), &ptrs_src_s, id, main_stream); + ptrs_dst = ( void **) ggml_cuda_pool_malloc_async(1*ne23*sizeof(void *), &ptrs_dst_s, id, main_stream); dim3 block_dims(ne13, ne12); k_compute_batched_ptrs<<<1, block_dims, 0, main_stream>>>( src0_as_f16, src1_as_f16, dst_f16, - ptrs_as, + ptrs_src, ptrs_dst, ne12, ne13, ne23, nb02, nb03, @@ -7390,15 +7396,18 @@ static void ggml_cuda_mul_mat_mat_batched_cublas(const ggml_tensor * src0, const CUBLAS_CHECK( cublasGemmBatchedEx(g_cublas_handles[id], CUBLAS_OP_T, CUBLAS_OP_N, ne01, ne11, ne10, - &alpha_f16, (const void * const *) (ptrs_as + 0*ne23), CUDA_R_16F, nb01/sizeof(half), - (const void * const *) (ptrs_as + 1*ne23), CUDA_R_16F, nb11/sizeof(float), - &beta_f16, ( void ** ) (ptrs_as + 2*ne23), CUDA_R_16F, ne01, + &alpha_f16, (const void **) (ptrs_src + 0*ne23), CUDA_R_16F, nb01/sizeof(half), + (const void **) (ptrs_src + 1*ne23), CUDA_R_16F, nb11/sizeof(float), + &beta_f16, ( void **) (ptrs_dst + 0*ne23), CUDA_R_16F, ne01, ne23, CUBLAS_COMPUTE_16F, CUBLAS_GEMM_DEFAULT_TENSOR_OP)); - // free device memory for pointers - if (ptrs_s != 0) { - ggml_cuda_pool_free_async(ptrs_as, ptrs_s, id, main_stream); + + if (ptrs_src_s != 0) { + ggml_cuda_pool_free_async(ptrs_src, ptrs_src_s, id, main_stream); + } + if (ptrs_dst_s != 0) { + ggml_cuda_pool_free_async(ptrs_dst, ptrs_dst_s, id, main_stream); } } #endif From 224e7d5b14cbabab7ae45c64db2cfde979c8455d Mon Sep 17 00:00:00 2001 From: Georgi Gerganov Date: Thu, 2 Nov 2023 20:44:12 +0200 Subject: [PATCH 053/206] readme : add notice about #3912 --- README.md | 4 +--- 1 file changed, 1 insertion(+), 3 deletions(-) diff --git a/README.md b/README.md index b56ecaec74b434..9c9e36ad07accf 100644 --- a/README.md +++ b/README.md @@ -2,7 +2,6 @@ ![llama](https://user-images.githubusercontent.com/1991296/230134379-7181e485-c521-4d23-a0d6-f7b3b61ba524.png) -[![Actions Status](https://github.com/ggerganov/llama.cpp/workflows/CI/badge.svg)](https://github.com/ggerganov/llama.cpp/actions) [![License: MIT](https://img.shields.io/badge/license-MIT-blue.svg)](https://opensource.org/licenses/MIT) [Roadmap](https://github.com/users/ggerganov/projects/7) / [Project status](https://github.com/ggerganov/llama.cpp/discussions/3471) / [Manifesto](https://github.com/ggerganov/llama.cpp/discussions/205) / [ggml](https://github.com/ggerganov/ggml) @@ -11,8 +10,7 @@ Inference of [LLaMA](https://arxiv.org/abs/2302.13971) model in pure C/C++ ### Hot topics -- LLaVA support: https://github.com/ggerganov/llama.cpp/pull/3436 -- ‼️ BPE tokenizer update: existing Falcon and Starcoder `.gguf` models will need to be reconverted: [#3252](https://github.com/ggerganov/llama.cpp/pull/3252) +- ⚠️ **Upcoming change that might break functionality. Help with testing is needed:** https://github.com/ggerganov/llama.cpp/pull/3912 ---- From 51b2fc11f7f605fff49725a4540e9a6ef7b51b70 Mon Sep 17 00:00:00 2001 From: Andrei Date: Thu, 2 Nov 2023 15:40:31 -0400 Subject: [PATCH 054/206] cmake : fix relative path to git submodule index (#3915) --- common/CMakeLists.txt | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/common/CMakeLists.txt b/common/CMakeLists.txt index 0150114e3bd2ce..ac594b2ca84ea8 100644 --- a/common/CMakeLists.txt +++ b/common/CMakeLists.txt @@ -11,7 +11,7 @@ if(EXISTS "${CMAKE_CURRENT_SOURCE_DIR}/../.git") if(NOT IS_DIRECTORY "${GIT_DIR}") file(READ ${GIT_DIR} REAL_GIT_DIR_LINK) string(REGEX REPLACE "gitdir: (.*)\n$" "\\1" REAL_GIT_DIR ${REAL_GIT_DIR_LINK}) - set(GIT_DIR "${CMAKE_CURRENT_SOURCE_DIR}/${REAL_GIT_DIR}") + set(GIT_DIR "${CMAKE_CURRENT_SOURCE_DIR}/../${REAL_GIT_DIR}") endif() set(GIT_INDEX "${GIT_DIR}/index") From 629f917cd6b96ba1274c49a8aab163b1b189229d Mon Sep 17 00:00:00 2001 From: Kerfuffle <44031344+KerfuffleV2@users.noreply.github.com> Date: Thu, 2 Nov 2023 13:58:22 -0600 Subject: [PATCH 055/206] cuda : add ROCM aliases for CUDA pool stuff (#3918) --- ggml-cuda.cu | 6 ++++++ 1 file changed, 6 insertions(+) diff --git a/ggml-cuda.cu b/ggml-cuda.cu index 06c28f5651b72f..baf02df2b22948 100644 --- a/ggml-cuda.cu +++ b/ggml-cuda.cu @@ -39,6 +39,10 @@ #define cudaDeviceCanAccessPeer hipDeviceCanAccessPeer #define cudaDeviceDisablePeerAccess hipDeviceDisablePeerAccess #define cudaDeviceEnablePeerAccess hipDeviceEnablePeerAccess +#define cudaDeviceGetMemPool hipDeviceGetMemPool +#define cudaMemPoolAttrReleaseThreshold hipMemPoolAttrReleaseThreshold +#define cudaMemPoolSetAttribute hipMemPoolSetAttribute +#define cudaMemPool_t hipMemPool_t #define cudaDeviceProp hipDeviceProp_t #define cudaDeviceSynchronize hipDeviceSynchronize #define cudaError_t hipError_t @@ -48,6 +52,7 @@ #define cudaEvent_t hipEvent_t #define cudaEventDestroy hipEventDestroy #define cudaFree hipFree +#define cudaFreeAsync hipFreeAsync #define cudaFreeHost hipHostFree #define cudaGetDevice hipGetDevice #define cudaGetDeviceCount hipGetDeviceCount @@ -55,6 +60,7 @@ #define cudaGetErrorString hipGetErrorString #define cudaGetLastError hipGetLastError #define cudaMalloc hipMalloc +#define cudaMallocFromPoolAsync hipMallocFromPoolAsync #define cudaMallocHost(ptr, size) hipHostMalloc(ptr, size, hipHostMallocDefault) #define cudaMemcpy hipMemcpy #define cudaMemcpy2DAsync hipMemcpy2DAsync From 3fdbe6b66b7b5c6ad3b2f245cbad1517c27ff776 Mon Sep 17 00:00:00 2001 From: cebtenzzre Date: Fri, 3 Nov 2023 02:31:58 -0400 Subject: [PATCH 056/206] llama : change yarn_ext_factor placeholder to -1 (#3922) --- llama.cpp | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/llama.cpp b/llama.cpp index bb60044b4707f7..cc0211ceb02113 100644 --- a/llama.cpp +++ b/llama.cpp @@ -7982,7 +7982,7 @@ struct llama_context_params llama_context_default_params() { /*.rope_scaling_type =*/ LLAMA_ROPE_SCALING_UNSPECIFIED, /*.rope_freq_base =*/ 0.0f, /*.rope_freq_scale =*/ 0.0f, - /*.yarn_ext_factor =*/ NAN, + /*.yarn_ext_factor =*/ -1.0f, /*.yarn_attn_factor =*/ 1.0f, /*.yarn_beta_fast =*/ 32.0f, /*.yarn_beta_slow =*/ 1.0f, @@ -8125,7 +8125,7 @@ struct llama_context * llama_new_context_with_model( cparams.rope_freq_scale = 1.0f; // never scale if scaling type is none } - if (std::isnan(cparams.yarn_ext_factor)) { // NaN indicates 'not set' + if (cparams.yarn_ext_factor < 0.0f) { // negative indicates 'not set' cparams.yarn_ext_factor = rope_scaling_type == LLAMA_ROPE_SCALING_YARN ? 1.0f : 0.0f; } From 05816027d649f977468fc804cdb54e99eac246d1 Mon Sep 17 00:00:00 2001 From: Georgi Gerganov Date: Fri, 3 Nov 2023 09:24:00 +0200 Subject: [PATCH 057/206] common : YAYF (yet another YARN fix) (#3925) ggml-ci --- common/common.h | 44 ++++++++++++++++++++++---------------------- llama.h | 10 +++++----- 2 files changed, 27 insertions(+), 27 deletions(-) diff --git a/common/common.h b/common/common.h index 72a49b8901f26e..9ad62563302872 100644 --- a/common/common.h +++ b/common/common.h @@ -43,29 +43,29 @@ extern char const *LLAMA_BUILD_TARGET; int32_t get_num_physical_cores(); struct gpt_params { - uint32_t seed = -1; // RNG seed + uint32_t seed = -1; // RNG seed int32_t n_threads = get_num_physical_cores(); - int32_t n_threads_batch = -1; // number of threads to use for batch processing (-1 = use n_threads) - int32_t n_predict = -1; // new tokens to predict - int32_t n_ctx = 512; // context size - int32_t n_batch = 512; // batch size for prompt processing (must be >=32 to use BLAS) - int32_t n_keep = 0; // number of tokens to keep from initial prompt - int32_t n_draft = 16; // number of tokens to draft during speculative decoding - int32_t n_chunks = -1; // max number of chunks to process (-1 = unlimited) - int32_t n_parallel = 1; // number of parallel sequences to decode - int32_t n_sequences = 1; // number of sequences to decode - int32_t n_gpu_layers = -1; // number of layers to store in VRAM (-1 - use default) - int32_t n_gpu_layers_draft = -1; // number of layers to store in VRAM for the draft model (-1 - use default) - int32_t main_gpu = 0; // the GPU that is used for scratch and small tensors - float tensor_split[LLAMA_MAX_DEVICES] = {0}; // how split tensors should be distributed across GPUs - int32_t n_beams = 0; // if non-zero then use beam search of given width. - float rope_freq_base = 0.0f; // RoPE base frequency - float rope_freq_scale = 0.0f; // RoPE frequency scaling factor - float yarn_ext_factor = NAN; // YaRN extrapolation mix factor - float yarn_attn_factor = 1.0f; // YaRN magnitude scaling factor - float yarn_beta_fast = 32.0f;// YaRN low correction dim - float yarn_beta_slow = 1.0f; // YaRN high correction dim - int32_t yarn_orig_ctx = 0; // YaRN original context length + int32_t n_threads_batch = -1; // number of threads to use for batch processing (-1 = use n_threads) + int32_t n_predict = -1; // new tokens to predict + int32_t n_ctx = 512; // context size + int32_t n_batch = 512; // batch size for prompt processing (must be >=32 to use BLAS) + int32_t n_keep = 0; // number of tokens to keep from initial prompt + int32_t n_draft = 16; // number of tokens to draft during speculative decoding + int32_t n_chunks = -1; // max number of chunks to process (-1 = unlimited) + int32_t n_parallel = 1; // number of parallel sequences to decode + int32_t n_sequences = 1; // number of sequences to decode + int32_t n_gpu_layers = -1; // number of layers to store in VRAM (-1 - use default) + int32_t n_gpu_layers_draft = -1; // number of layers to store in VRAM for the draft model (-1 - use default) + int32_t main_gpu = 0; // the GPU that is used for scratch and small tensors + float tensor_split[LLAMA_MAX_DEVICES] = {0}; // how split tensors should be distributed across GPUs + int32_t n_beams = 0; // if non-zero then use beam search of given width. + float rope_freq_base = 0.0f; // RoPE base frequency + float rope_freq_scale = 0.0f; // RoPE frequency scaling factor + float yarn_ext_factor = -1.0f; // YaRN extrapolation mix factor + float yarn_attn_factor = 1.0f; // YaRN magnitude scaling factor + float yarn_beta_fast = 32.0f; // YaRN low correction dim + float yarn_beta_slow = 1.0f; // YaRN high correction dim + int32_t yarn_orig_ctx = 0; // YaRN original context length int8_t rope_scaling_type = LLAMA_ROPE_SCALING_UNSPECIFIED; // // sampling parameters diff --git a/llama.h b/llama.h index 3f1becd7616885..e8dc04bb54b81c 100644 --- a/llama.h +++ b/llama.h @@ -175,11 +175,11 @@ extern "C" { }; struct llama_context_params { - uint32_t seed; // RNG seed, -1 for random - uint32_t n_ctx; // text context, 0 = from model - uint32_t n_batch; // prompt processing maximum batch size - uint32_t n_threads; // number of threads to use for generation - uint32_t n_threads_batch; // number of threads to use for batch processing + uint32_t seed; // RNG seed, -1 for random + uint32_t n_ctx; // text context, 0 = from model + uint32_t n_batch; // prompt processing maximum batch size + uint32_t n_threads; // number of threads to use for generation + uint32_t n_threads_batch; // number of threads to use for batch processing int8_t rope_scaling_type; // RoPE scaling type, from `enum llama_rope_scaling_type` // ref: https://github.com/ggerganov/llama.cpp/pull/2054 From 8f961abdc4e134c83bf8c2ad618ab256b4cae0f9 Mon Sep 17 00:00:00 2001 From: Georgi Gerganov Date: Fri, 3 Nov 2023 09:41:17 +0200 Subject: [PATCH 058/206] speculative : change default p_accept to 0.5 + CLI args (#3919) ggml-ci --- common/common.cpp | 14 ++++++++++++++ common/common.h | 8 ++++++-- examples/speculative/speculative.cpp | 8 +++++--- 3 files changed, 25 insertions(+), 5 deletions(-) diff --git a/common/common.cpp b/common/common.cpp index e938dee165d9da..20cc4a081b2225 100644 --- a/common/common.cpp +++ b/common/common.cpp @@ -403,6 +403,18 @@ bool gpt_params_parse_ex(int argc, char ** argv, gpt_params & params) { break; } params.n_sequences = std::stoi(argv[i]); + } else if (arg == "--p-accept" || arg == "-pa") { + if (++i >= argc) { + invalid_param = true; + break; + } + params.p_accept = std::stof(argv[i]); + } else if (arg == "--p-split" || arg == "-ps") { + if (++i >= argc) { + invalid_param = true; + break; + } + params.p_split = std::stof(argv[i]); } else if (arg == "-m" || arg == "--model") { if (++i >= argc) { invalid_param = true; @@ -778,6 +790,8 @@ void gpt_print_usage(int /*argc*/, char ** argv, const gpt_params & params) { printf(" --chunks N max number of chunks to process (default: %d, -1 = all)\n", params.n_chunks); printf(" -np N, --parallel N number of parallel sequences to decode (default: %d)\n", params.n_parallel); printf(" -ns N, --sequences N number of sequences to decode (default: %d)\n", params.n_sequences); + printf(" -pa N, --p-accept N speculative decoding accept probability (default: %.1f)\n", (double)params.p_accept); + printf(" -ps N, --p-split N speculative decoding split probability (default: %.1f)\n", (double)params.p_split); printf(" -cb, --cont-batching enable continuous batching (a.k.a dynamic batching) (default: disabled)\n"); printf(" --mmproj MMPROJ_FILE path to a multimodal projector file for LLaVA. see examples/llava/README.md\n"); printf(" --image IMAGE_FILE path to an image file. use with multimodal models\n"); diff --git a/common/common.h b/common/common.h index 9ad62563302872..dd6b002eb94ba2 100644 --- a/common/common.h +++ b/common/common.h @@ -44,6 +44,7 @@ int32_t get_num_physical_cores(); struct gpt_params { uint32_t seed = -1; // RNG seed + int32_t n_threads = get_num_physical_cores(); int32_t n_threads_batch = -1; // number of threads to use for batch processing (-1 = use n_threads) int32_t n_predict = -1; // new tokens to predict @@ -54,6 +55,8 @@ struct gpt_params { int32_t n_chunks = -1; // max number of chunks to process (-1 = unlimited) int32_t n_parallel = 1; // number of parallel sequences to decode int32_t n_sequences = 1; // number of sequences to decode + float p_accept = 0.5f; // speculative decoding accept probability + float p_split = 0.1f; // speculative decoding split probability int32_t n_gpu_layers = -1; // number of layers to store in VRAM (-1 - use default) int32_t n_gpu_layers_draft = -1; // number of layers to store in VRAM for the draft model (-1 - use default) int32_t main_gpu = 0; // the GPU that is used for scratch and small tensors @@ -66,7 +69,8 @@ struct gpt_params { float yarn_beta_fast = 32.0f; // YaRN low correction dim float yarn_beta_slow = 1.0f; // YaRN high correction dim int32_t yarn_orig_ctx = 0; // YaRN original context length - int8_t rope_scaling_type = LLAMA_ROPE_SCALING_UNSPECIFIED; + int8_t rope_scaling_type = LLAMA_ROPE_SCALING_UNSPECIFIED; // TODO: better to be int32_t for alignment + // pinging @cebtenzzre // // sampling parameters struct llama_sampling_params sparams; @@ -90,7 +94,7 @@ struct gpt_params { int ppl_output_type = 0; // = 0 -> ppl output is as usual, = 1 -> ppl output is num_tokens, ppl, one per line // (which is more convenient to use for plotting) // - bool hellaswag = false; // compute HellaSwag score over random tasks from datafile supplied in prompt + bool hellaswag = false; // compute HellaSwag score over random tasks from datafile supplied in prompt size_t hellaswag_tasks = 400; // number of tasks to use when computing the HellaSwag score bool mul_mat_q = true; // if true, use mul_mat_q kernels instead of cuBLAS diff --git a/examples/speculative/speculative.cpp b/examples/speculative/speculative.cpp index 798684f66678e2..3a8e278110c20e 100644 --- a/examples/speculative/speculative.cpp +++ b/examples/speculative/speculative.cpp @@ -37,9 +37,11 @@ int main(int argc, char ** argv) { // max number of parallel drafting sequences (i.e. tree branches) const int n_seq_dft = params.n_parallel; - // TODO: make this configurable - const float p_accept = 0.80f; - const float p_split = 0.10f; + // probability threshold for accepting a token from the draft model + const float p_accept = params.p_accept; + + // probability threshold for splitting a draft branch (only for n_seq_dft > 1) + const float p_split = params.p_split; #ifndef LOG_DISABLE_LOGS log_set_target(log_filename_generator("speculative", "log")); From abb77e7319aabc0b5cfb7c22da690a692489b6b7 Mon Sep 17 00:00:00 2001 From: slaren Date: Fri, 3 Nov 2023 12:13:09 +0100 Subject: [PATCH 059/206] ggml-cuda : move row numbers to x grid dim in mmv kernels (#3921) --- ggml-cuda.cu | 53 ++++++++++++++++++++++++++-------------------------- 1 file changed, 27 insertions(+), 26 deletions(-) diff --git a/ggml-cuda.cu b/ggml-cuda.cu index baf02df2b22948..bdbcca0cabb88e 100644 --- a/ggml-cuda.cu +++ b/ggml-cuda.cu @@ -989,7 +989,7 @@ static __global__ void dequantize_mul_mat_vec_q2_k(const void * __restrict__ vx, static_assert(16%K_QUANTS_PER_ITERATION == 0, "16 must be divisible by K_QUANTS_PER_ITERATION"); - const int row = blockIdx.y*blockDim.y + threadIdx.y; + const int row = blockIdx.x*blockDim.y + threadIdx.y; if (row > nrows) return; const int num_blocks_per_row = ncols / QK_K; @@ -1093,7 +1093,7 @@ static __global__ void dequantize_mul_mat_vec_q2_k(const void * __restrict__ vx, static __global__ void dequantize_mul_mat_vec_q3_k(const void * __restrict__ vx, const float * __restrict__ yy, float * __restrict__ dst, const int ncols, int nrows) { - const int row = blockIdx.y*blockDim.y + threadIdx.y; + const int row = blockIdx.x*blockDim.y + threadIdx.y; if (row > nrows) return; const int num_blocks_per_row = ncols / QK_K; @@ -1197,7 +1197,7 @@ static __global__ void dequantize_mul_mat_vec_q3_k(const void * __restrict__ vx, static __global__ void dequantize_mul_mat_vec_q4_k(const void * __restrict__ vx, const float * __restrict__ yy, float * __restrict__ dst, const int ncols, int nrows) { - const int row = blockIdx.y*blockDim.y + threadIdx.y; + const int row = blockIdx.x*blockDim.y + threadIdx.y; if (row > nrows) return; const int num_blocks_per_row = ncols / QK_K; const int ib0 = row*num_blocks_per_row; @@ -1451,7 +1451,7 @@ static __global__ void dequantize_mul_mat_vec_q6_k(const void * __restrict__ vx, static_assert(16%K_QUANTS_PER_ITERATION == 0, "16 must be divisible by K_QUANTS_PER_ITERATION"); - const int row = blockIdx.y*blockDim.y + threadIdx.y; + const int row = blockIdx.x*blockDim.y + threadIdx.y; if (row > nrows) return; const int num_blocks_per_row = ncols / QK_K; @@ -4261,7 +4261,7 @@ template static __global__ void template static __global__ void mul_mat_vec_q(const void * __restrict__ vx, const void * __restrict__ vy, float * __restrict__ dst, const int ncols, const int nrows) { - const int row = blockIdx.y*blockDim.y + threadIdx.y; + const int row = blockIdx.x*blockDim.y + threadIdx.y; if (row >= nrows) { return; @@ -4301,7 +4301,7 @@ template static __global__ void dequantize_mul_mat_vec(const void * __restrict__ vx, const dfloat * __restrict__ y, float * __restrict__ dst, const int ncols, const int nrows) { // qk = quantized weights per x block // qr = number of quantized weights per data value in x block - const int row = blockIdx.y*blockDim.y + threadIdx.y; + const int row = blockIdx.x*blockDim.y + threadIdx.y; if (row >= nrows) { return; @@ -4874,7 +4874,8 @@ static void dequantize_row_q6_K_cuda(const void * vx, dst_t * y, const int k, cu static void dequantize_mul_mat_vec_q4_0_cuda(const void * vx, const dfloat * y, float * dst, const int ncols, const int nrows, cudaStream_t stream) { GGML_ASSERT(ncols % GGML_CUDA_DMMV_X == 0); const int block_num_y = (nrows + GGML_CUDA_MMV_Y - 1) / GGML_CUDA_MMV_Y; - const dim3 block_nums(1, block_num_y, 1); + // the number of rows may exceed maximum grid size in the y or z dimensions, use the x dimension instead + const dim3 block_nums(block_num_y, 1, 1); const dim3 block_dims(WARP_SIZE, GGML_CUDA_MMV_Y, 1); dequantize_mul_mat_vec <<>>(vx, y, dst, ncols, nrows); @@ -4883,7 +4884,7 @@ static void dequantize_mul_mat_vec_q4_0_cuda(const void * vx, const dfloat * y, static void dequantize_mul_mat_vec_q4_1_cuda(const void * vx, const dfloat * y, float * dst, const int ncols, const int nrows, cudaStream_t stream) { GGML_ASSERT(ncols % GGML_CUDA_DMMV_X == 0); const int block_num_y = (nrows + GGML_CUDA_MMV_Y - 1) / GGML_CUDA_MMV_Y; - const dim3 block_nums(1, block_num_y, 1); + const dim3 block_nums(block_num_y, 1, 1); const dim3 block_dims(WARP_SIZE, GGML_CUDA_MMV_Y, 1); dequantize_mul_mat_vec <<>>(vx, y, dst, ncols, nrows); @@ -4892,7 +4893,7 @@ static void dequantize_mul_mat_vec_q4_1_cuda(const void * vx, const dfloat * y, static void dequantize_mul_mat_vec_q5_0_cuda(const void * vx, const dfloat * y, float * dst, const int ncols, const int nrows, cudaStream_t stream) { GGML_ASSERT(ncols % GGML_CUDA_DMMV_X == 0); const int block_num_y = (nrows + GGML_CUDA_MMV_Y - 1) / GGML_CUDA_MMV_Y; - const dim3 block_nums(1, block_num_y, 1); + const dim3 block_nums(block_num_y, 1, 1); const dim3 block_dims(WARP_SIZE, GGML_CUDA_MMV_Y, 1); dequantize_mul_mat_vec <<>>(vx, y, dst, ncols, nrows); @@ -4901,7 +4902,7 @@ static void dequantize_mul_mat_vec_q5_0_cuda(const void * vx, const dfloat * y, static void dequantize_mul_mat_vec_q5_1_cuda(const void * vx, const dfloat * y, float * dst, const int ncols, const int nrows, cudaStream_t stream) { GGML_ASSERT(ncols % GGML_CUDA_DMMV_X == 0); const int block_num_y = (nrows + GGML_CUDA_MMV_Y - 1) / GGML_CUDA_MMV_Y; - const dim3 block_nums(1, block_num_y, 1); + const dim3 block_nums(block_num_y, 1, 1); const dim3 block_dims(WARP_SIZE, GGML_CUDA_MMV_Y, 1); dequantize_mul_mat_vec <<>>(vx, y, dst, ncols, nrows); @@ -4910,7 +4911,7 @@ static void dequantize_mul_mat_vec_q5_1_cuda(const void * vx, const dfloat * y, static void dequantize_mul_mat_vec_q8_0_cuda(const void * vx, const dfloat * y, float * dst, const int ncols, const int nrows, cudaStream_t stream) { GGML_ASSERT(ncols % GGML_CUDA_DMMV_X == 0); const int block_num_y = (nrows + GGML_CUDA_MMV_Y - 1) / GGML_CUDA_MMV_Y; - const dim3 block_nums(1, block_num_y, 1); + const dim3 block_nums(block_num_y, 1, 1); const dim3 block_dims(WARP_SIZE, GGML_CUDA_MMV_Y, 1); dequantize_mul_mat_vec <<>>(vx, y, dst, ncols, nrows); @@ -4920,7 +4921,7 @@ static void dequantize_mul_mat_vec_q2_K_cuda(const void * vx, const float * y, f GGML_ASSERT(ncols % QK_K == 0); const int ny = 2; // very slightly faster than 1 even when K_QUANTS_PER_ITERATION = 2 const int block_num_y = (nrows + ny - 1) / ny; - const dim3 block_nums(1, block_num_y, 1); + const dim3 block_nums(block_num_y, 1, 1); const dim3 block_dims(32, ny, 1); dequantize_mul_mat_vec_q2_k<<>>(vx, y, dst, ncols, nrows); } @@ -4929,7 +4930,7 @@ static void dequantize_mul_mat_vec_q3_K_cuda(const void * vx, const float * y, f GGML_ASSERT(ncols % QK_K == 0); const int ny = 2 / K_QUANTS_PER_ITERATION; const int block_num_y = (nrows + ny - 1) / ny; - const dim3 block_nums(1, block_num_y, 1); + const dim3 block_nums(block_num_y, 1, 1); const dim3 block_dims(32, ny, 1); dequantize_mul_mat_vec_q3_k<<>>(vx, y, dst, ncols, nrows); } @@ -4938,7 +4939,7 @@ static void dequantize_mul_mat_vec_q4_K_cuda(const void * vx, const float * y, f GGML_ASSERT(ncols % QK_K == 0); const int ny = 2 / K_QUANTS_PER_ITERATION; const int block_num_y = (nrows + ny - 1) / ny; - const dim3 block_nums(1, block_num_y, 1); + const dim3 block_nums(block_num_y, 1, 1); const dim3 block_dims(32, ny, 1); dequantize_mul_mat_vec_q4_k<<>>(vx, y, dst, ncols, nrows); } @@ -4953,7 +4954,7 @@ static void dequantize_mul_mat_vec_q6_K_cuda(const void * vx, const float * y, f GGML_ASSERT(ncols % QK_K == 0); const int ny = 2 / K_QUANTS_PER_ITERATION; const int block_num_y = (nrows + ny - 1) / ny; - const dim3 block_nums(1, block_num_y, 1); + const dim3 block_nums(block_num_y, 1, 1); const dim3 block_dims(32, ny, 1); dequantize_mul_mat_vec_q6_k<<>>(vx, y, dst, ncols, nrows); } @@ -4961,7 +4962,7 @@ static void dequantize_mul_mat_vec_q6_K_cuda(const void * vx, const float * y, f static void mul_mat_vec_q4_0_q8_1_cuda(const void * vx, const void * vy, float * dst, const int ncols, const int nrows, cudaStream_t stream) { GGML_ASSERT(ncols % QK4_0 == 0); const int block_num_y = (nrows + GGML_CUDA_MMV_Y - 1) / GGML_CUDA_MMV_Y; - const dim3 block_nums(1, block_num_y, 1); + const dim3 block_nums(block_num_y, 1, 1); const dim3 block_dims(WARP_SIZE, GGML_CUDA_MMV_Y, 1); mul_mat_vec_q <<>>(vx, vy, dst, ncols, nrows); @@ -4970,7 +4971,7 @@ static void mul_mat_vec_q4_0_q8_1_cuda(const void * vx, const void * vy, float * static void mul_mat_vec_q4_1_q8_1_cuda(const void * vx, const void * vy, float * dst, const int ncols, const int nrows, cudaStream_t stream) { GGML_ASSERT(ncols % QK4_1 == 0); const int block_num_y = (nrows + GGML_CUDA_MMV_Y - 1) / GGML_CUDA_MMV_Y; - const dim3 block_nums(1, block_num_y, 1); + const dim3 block_nums(block_num_y, 1, 1); const dim3 block_dims(WARP_SIZE, GGML_CUDA_MMV_Y, 1); mul_mat_vec_q <<>>(vx, vy, dst, ncols, nrows); @@ -4979,7 +4980,7 @@ static void mul_mat_vec_q4_1_q8_1_cuda(const void * vx, const void * vy, float * static void mul_mat_vec_q5_0_q8_1_cuda(const void * vx, const void * vy, float * dst, const int ncols, const int nrows, cudaStream_t stream) { GGML_ASSERT(ncols % QK5_0 == 0); const int block_num_y = (nrows + GGML_CUDA_MMV_Y - 1) / GGML_CUDA_MMV_Y; - const dim3 block_nums(1, block_num_y, 1); + const dim3 block_nums(block_num_y, 1, 1); const dim3 block_dims(WARP_SIZE, GGML_CUDA_MMV_Y, 1); mul_mat_vec_q <<>>(vx, vy, dst, ncols, nrows); @@ -4988,7 +4989,7 @@ static void mul_mat_vec_q5_0_q8_1_cuda(const void * vx, const void * vy, float * static void mul_mat_vec_q5_1_q8_1_cuda(const void * vx, const void * vy, float * dst, const int ncols, const int nrows, cudaStream_t stream) { GGML_ASSERT(ncols % QK5_1 == 0); const int block_num_y = (nrows + GGML_CUDA_MMV_Y - 1) / GGML_CUDA_MMV_Y; - const dim3 block_nums(1, block_num_y, 1); + const dim3 block_nums(block_num_y, 1, 1); const dim3 block_dims(WARP_SIZE, GGML_CUDA_MMV_Y, 1); mul_mat_vec_q <<>>(vx, vy, dst, ncols, nrows); @@ -4997,7 +4998,7 @@ static void mul_mat_vec_q5_1_q8_1_cuda(const void * vx, const void * vy, float * static void mul_mat_vec_q8_0_q8_1_cuda(const void * vx, const void * vy, float * dst, const int ncols, const int nrows, cudaStream_t stream) { GGML_ASSERT(ncols % QK8_0 == 0); const int block_num_y = (nrows + GGML_CUDA_MMV_Y - 1) / GGML_CUDA_MMV_Y; - const dim3 block_nums(1, block_num_y, 1); + const dim3 block_nums(block_num_y, 1, 1); const dim3 block_dims(WARP_SIZE, GGML_CUDA_MMV_Y, 1); mul_mat_vec_q <<>>(vx, vy, dst, ncols, nrows); @@ -5006,7 +5007,7 @@ static void mul_mat_vec_q8_0_q8_1_cuda(const void * vx, const void * vy, float * static void mul_mat_vec_q2_K_q8_1_cuda(const void * vx, const void * vy, float * dst, const int ncols, const int nrows, cudaStream_t stream) { GGML_ASSERT(ncols % QK_K == 0); const int block_num_y = (nrows + GGML_CUDA_MMV_Y - 1) / GGML_CUDA_MMV_Y; - const dim3 block_nums(1, block_num_y, 1); + const dim3 block_nums(block_num_y, 1, 1); const dim3 block_dims(WARP_SIZE, GGML_CUDA_MMV_Y, 1); mul_mat_vec_q <<>>(vx, vy, dst, ncols, nrows); @@ -5015,7 +5016,7 @@ static void mul_mat_vec_q2_K_q8_1_cuda(const void * vx, const void * vy, float * static void mul_mat_vec_q3_K_q8_1_cuda(const void * vx, const void * vy, float * dst, const int ncols, const int nrows, cudaStream_t stream) { GGML_ASSERT(ncols % QK_K == 0); const int block_num_y = (nrows + GGML_CUDA_MMV_Y - 1) / GGML_CUDA_MMV_Y; - const dim3 block_nums(1, block_num_y, 1); + const dim3 block_nums(block_num_y, 1, 1); const dim3 block_dims(WARP_SIZE, GGML_CUDA_MMV_Y, 1); mul_mat_vec_q <<>>(vx, vy, dst, ncols, nrows); @@ -5024,7 +5025,7 @@ static void mul_mat_vec_q3_K_q8_1_cuda(const void * vx, const void * vy, float * static void mul_mat_vec_q4_K_q8_1_cuda(const void * vx, const void * vy, float * dst, const int ncols, const int nrows, cudaStream_t stream) { GGML_ASSERT(ncols % QK_K == 0); const int block_num_y = (nrows + GGML_CUDA_MMV_Y - 1) / GGML_CUDA_MMV_Y; - const dim3 block_nums(1, block_num_y, 1); + const dim3 block_nums(block_num_y, 1, 1); const dim3 block_dims(WARP_SIZE, GGML_CUDA_MMV_Y, 1); mul_mat_vec_q <<>>(vx, vy, dst, ncols, nrows); @@ -5033,7 +5034,7 @@ static void mul_mat_vec_q4_K_q8_1_cuda(const void * vx, const void * vy, float * static void mul_mat_vec_q5_K_q8_1_cuda(const void * vx, const void * vy, float * dst, const int ncols, const int nrows, cudaStream_t stream) { GGML_ASSERT(ncols % QK_K == 0); const int block_num_y = (nrows + GGML_CUDA_MMV_Y - 1) / GGML_CUDA_MMV_Y; - const dim3 block_nums(1, block_num_y, 1); + const dim3 block_nums(block_num_y, 1, 1); const dim3 block_dims(WARP_SIZE, GGML_CUDA_MMV_Y, 1); mul_mat_vec_q <<>>(vx, vy, dst, ncols, nrows); @@ -5042,7 +5043,7 @@ static void mul_mat_vec_q5_K_q8_1_cuda(const void * vx, const void * vy, float * static void mul_mat_vec_q6_K_q8_1_cuda(const void * vx, const void * vy, float * dst, const int ncols, const int nrows, cudaStream_t stream) { GGML_ASSERT(ncols % QK_K == 0); const int block_num_y = (nrows + GGML_CUDA_MMV_Y - 1) / GGML_CUDA_MMV_Y; - const dim3 block_nums(1, block_num_y, 1); + const dim3 block_nums(block_num_y, 1, 1); const dim3 block_dims(WARP_SIZE, GGML_CUDA_MMV_Y, 1); mul_mat_vec_q <<>>(vx, vy, dst, ncols, nrows); @@ -5061,7 +5062,7 @@ static void convert_fp32_to_fp16_cuda(const void * vx, half * y, const int k, cu static void convert_mul_mat_vec_f16_cuda(const void * vx, const dfloat * y, float * dst, const int ncols, const int nrows, cudaStream_t stream) { GGML_ASSERT(ncols % GGML_CUDA_DMMV_X == 0); const int block_num_y = (nrows + GGML_CUDA_MMV_Y - 1) / GGML_CUDA_MMV_Y; - const dim3 block_nums(1, block_num_y, 1); + const dim3 block_nums(block_num_y, 1, 1); const dim3 block_dims(WARP_SIZE, GGML_CUDA_MMV_Y, 1); dequantize_mul_mat_vec<1, 1, convert_f16> <<>>(vx, y, dst, ncols, nrows); From 5ba37461711095c0284233dbd14f0d9010cdbf56 Mon Sep 17 00:00:00 2001 From: Xiao-Yong Jin Date: Fri, 3 Nov 2023 13:00:31 -0500 Subject: [PATCH 060/206] ggml-metal: fix yarn rope (#3937) --- ggml-metal.m | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/ggml-metal.m b/ggml-metal.m index b33a3cb8fd0128..acdb8384316862 100644 --- a/ggml-metal.m +++ b/ggml-metal.m @@ -1403,7 +1403,8 @@ void ggml_metal_graph_compute( const int n_past = ((int32_t *) dst->op_params)[0]; const int n_dims = ((int32_t *) dst->op_params)[1]; const int mode = ((int32_t *) dst->op_params)[2]; - const int n_orig_ctx = ((int32_t *) dst->op_params)[3]; + // skip 3, n_ctx, used in GLM RoPE, unimplemented in metal + const int n_orig_ctx = ((int32_t *) dst->op_params)[4]; float freq_base, freq_scale, ext_factor, attn_factor, beta_fast, beta_slow; memcpy(&freq_base, (int32_t *) dst->op_params + 5, sizeof(float)); From d9b33fe95bd257b36c84ee5769cc048230067d6f Mon Sep 17 00:00:00 2001 From: Peter Sugihara Date: Fri, 3 Nov 2023 12:18:18 -0700 Subject: [PATCH 061/206] metal : round up to 16 to fix MTLDebugComputeCommandEncoder assertion (#3938) --- ggml-metal.m | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/ggml-metal.m b/ggml-metal.m index acdb8384316862..78ae4485da8e27 100644 --- a/ggml-metal.m +++ b/ggml-metal.m @@ -1017,7 +1017,7 @@ void ggml_metal_graph_compute( [encoder setBytes:&ne00 length:sizeof(ne00) atIndex:2]; [encoder setBytes:&ne01 length:sizeof(ne01) atIndex:3]; [encoder setBytes:&ne02 length:sizeof(ne02) atIndex:4]; - [encoder setThreadgroupMemoryLength:nth/32*sizeof(float) atIndex:0]; + [encoder setThreadgroupMemoryLength:MAX(16, nth/32*sizeof(float)) atIndex:0]; [encoder dispatchThreadgroups:MTLSizeMake(ne01*ne02*ne03, 1, 1) threadsPerThreadgroup:MTLSizeMake(nth, 1, 1)]; } break; @@ -1348,7 +1348,7 @@ void ggml_metal_graph_compute( [encoder setBytes:&ne00 length:sizeof( int64_t) atIndex:2]; [encoder setBytes:&nb01 length:sizeof(uint64_t) atIndex:3]; [encoder setBytes:&eps length:sizeof( float) atIndex:4]; - [encoder setThreadgroupMemoryLength:nth*sizeof(float) atIndex:0]; + [encoder setThreadgroupMemoryLength:MAX(16, nth*sizeof(float)) atIndex:0]; const int64_t nrows = ggml_nrows(src0); From f28af0d81aa1010afa5de74cf627dcb04bea3157 Mon Sep 17 00:00:00 2001 From: Kerfuffle <44031344+KerfuffleV2@users.noreply.github.com> Date: Sat, 4 Nov 2023 16:20:34 -0600 Subject: [PATCH 062/206] gguf-py: Support 01.AI Yi models (#3943) --- gguf-py/gguf/gguf.py | 2 ++ 1 file changed, 2 insertions(+) diff --git a/gguf-py/gguf/gguf.py b/gguf-py/gguf/gguf.py index 727b4e55495a76..a2271d225d0012 100644 --- a/gguf-py/gguf/gguf.py +++ b/gguf-py/gguf/gguf.py @@ -393,6 +393,7 @@ class TensorNameMap: "layers.{bid}.attention_norm", # llama-pth "encoder.layer.{bid}.attention.output.LayerNorm", # bert "language_model.encoder.layers.{bid}.input_layernorm", # persimmon + "model.layers.{bid}.ln1", # yi ), # Attention norm 2 @@ -464,6 +465,7 @@ class TensorNameMap: "layers.{bid}.ffn_norm", # llama-pth "encoder.layer.{bid}.output.LayerNorm", # bert "language_model.encoder.layers.{bid}.post_attention_layernorm", # persimmon + "model.layers.{bid}.ln2", # yi ), # Feed-forward up From 48ade94538fa509465d71023e49d07aab0ec8cd5 Mon Sep 17 00:00:00 2001 From: slaren Date: Sun, 5 Nov 2023 08:12:13 +0100 Subject: [PATCH 063/206] cuda : revert CUDA pool stuff (#3944) * Revert "cuda : add ROCM aliases for CUDA pool stuff (#3918)" This reverts commit 629f917cd6b96ba1274c49a8aab163b1b189229d. * Revert "cuda : use CUDA memory pool with async memory allocation/deallocation when available (#3903)" This reverts commit d6069051de7165a4e06662c89257f5d2905bb156. ggml-ci --- ggml-cuda.cu | 131 ++++++++++++++++++++------------------------------- 1 file changed, 50 insertions(+), 81 deletions(-) diff --git a/ggml-cuda.cu b/ggml-cuda.cu index bdbcca0cabb88e..dc14f2f5d76c66 100644 --- a/ggml-cuda.cu +++ b/ggml-cuda.cu @@ -39,10 +39,6 @@ #define cudaDeviceCanAccessPeer hipDeviceCanAccessPeer #define cudaDeviceDisablePeerAccess hipDeviceDisablePeerAccess #define cudaDeviceEnablePeerAccess hipDeviceEnablePeerAccess -#define cudaDeviceGetMemPool hipDeviceGetMemPool -#define cudaMemPoolAttrReleaseThreshold hipMemPoolAttrReleaseThreshold -#define cudaMemPoolSetAttribute hipMemPoolSetAttribute -#define cudaMemPool_t hipMemPool_t #define cudaDeviceProp hipDeviceProp_t #define cudaDeviceSynchronize hipDeviceSynchronize #define cudaError_t hipError_t @@ -52,7 +48,6 @@ #define cudaEvent_t hipEvent_t #define cudaEventDestroy hipEventDestroy #define cudaFree hipFree -#define cudaFreeAsync hipFreeAsync #define cudaFreeHost hipHostFree #define cudaGetDevice hipGetDevice #define cudaGetDeviceCount hipGetDeviceCount @@ -60,7 +55,6 @@ #define cudaGetErrorString hipGetErrorString #define cudaGetLastError hipGetLastError #define cudaMalloc hipMalloc -#define cudaMallocFromPoolAsync hipMallocFromPoolAsync #define cudaMallocHost(ptr, size) hipHostMalloc(ptr, size, hipHostMallocDefault) #define cudaMemcpy hipMemcpy #define cudaMemcpy2DAsync hipMemcpy2DAsync @@ -187,11 +181,11 @@ static_assert(sizeof(half) == sizeof(ggml_fp16_t), "wrong fp16 size"); do { \ cudaError_t err_ = (err); \ if (err_ != cudaSuccess) { \ - int dev_id; \ - cudaGetDevice(&dev_id); \ + int id; \ + cudaGetDevice(&id); \ fprintf(stderr, "\nCUDA error %d at %s:%d: %s\n", err_, __FILE__, __LINE__, \ cudaGetErrorString(err_)); \ - fprintf(stderr, "current device: %d\n", dev_id); \ + fprintf(stderr, "current device: %d\n", id); \ exit(1); \ } \ } while (0) @@ -201,11 +195,11 @@ static_assert(sizeof(half) == sizeof(ggml_fp16_t), "wrong fp16 size"); do { \ cublasStatus_t err_ = (err); \ if (err_ != CUBLAS_STATUS_SUCCESS) { \ - int dev_id; \ - cudaGetDevice(&dev_id); \ + int id; \ + cudaGetDevice(&id); \ fprintf(stderr, "\ncuBLAS error %d at %s:%d: %s\n", \ err_, __FILE__, __LINE__, cublasGetStatusString(err_)); \ - fprintf(stderr, "current device: %d\n", dev_id); \ + fprintf(stderr, "current device: %d\n", id); \ exit(1); \ } \ } while (0) @@ -471,7 +465,6 @@ static_assert(K_QUANTS_PER_ITERATION == 1 || K_QUANTS_PER_ITERATION == 2, "K_QUA #define MAX_STREAMS 8 static cudaStream_t g_cudaStreams[GGML_CUDA_MAX_DEVICES][MAX_STREAMS] = { nullptr }; -static cudaMemPool_t g_cudaMemPools[GGML_CUDA_MAX_DEVICES] = { nullptr }; struct ggml_tensor_extra_gpu { void * data_device[GGML_CUDA_MAX_DEVICES]; // 1 pointer for each device for split tensors @@ -5780,16 +5773,6 @@ static void * ggml_cuda_pool_malloc(size_t size, size_t * actual_size) { return ptr; } -static void * ggml_cuda_pool_malloc_async(size_t size, size_t * actual_size, int id, cudaStream_t stream) { - if (g_cudaMemPools[id] == nullptr) { - return ggml_cuda_pool_malloc(size, actual_size); - } - void *ptr; - CUDA_CHECK(cudaMallocFromPoolAsync(&ptr, size, g_cudaMemPools[id], stream)); - *actual_size = size; - return ptr; -} - static void ggml_cuda_pool_free(void * ptr, size_t size) { scoped_spin_lock lock(g_cuda_pool_lock); int id; @@ -5808,13 +5791,6 @@ static void ggml_cuda_pool_free(void * ptr, size_t size) { } -static void ggml_cuda_pool_free_async(void * ptr, size_t actual_size, int id, cudaStream_t stream) { - if (g_cudaMemPools[id] == nullptr) { - return ggml_cuda_pool_free(ptr, actual_size); - } - CUDA_CHECK(cudaFreeAsync(ptr, stream)); -} - void ggml_init_cublas() { static bool initialized = false; @@ -5869,13 +5845,6 @@ void ggml_init_cublas() { // create cublas handle CUBLAS_CHECK(cublasCreate(&g_cublas_handles[id])); CUBLAS_CHECK(cublasSetMathMode(g_cublas_handles[id], CUBLAS_TF32_TENSOR_OP_MATH)); - - // configure memory pool - cudaError_t err = cudaDeviceGetMemPool(&g_cudaMemPools[id], id); - if (err == cudaSuccess) { - size_t treshold = UINT64_MAX; - CUDA_CHECK(cudaMemPoolSetAttribute(g_cudaMemPools[id], cudaMemPoolAttrReleaseThreshold, &treshold)); - } } // configure logging to stdout @@ -6469,7 +6438,7 @@ inline void ggml_cuda_op_mul_mat_cublas( const to_fp16_cuda_t to_fp16_cuda = ggml_get_to_fp16_cuda(src0->type); GGML_ASSERT(to_fp16_cuda != nullptr); size_t ne = row_diff*ne00; - src0_as_f16 = (half *) ggml_cuda_pool_malloc_async(ne * sizeof(half), &src0_as, id, stream); + src0_as_f16 = (half *) ggml_cuda_pool_malloc(ne * sizeof(half), &src0_as); to_fp16_cuda(src0_dd_i, src0_as_f16, ne, stream); } const half * src0_ptr = src0->type == GGML_TYPE_F16 ? (const half *) src0_dd_i : src0_as_f16; @@ -6480,12 +6449,13 @@ inline void ggml_cuda_op_mul_mat_cublas( const to_fp16_cuda_t to_fp16_cuda = ggml_get_to_fp16_cuda(src1->type); GGML_ASSERT(to_fp16_cuda != nullptr); size_t ne = src1_ncols*ne10; - src1_as_f16 = (half *) ggml_cuda_pool_malloc_async(ne * sizeof(half), &src1_as, id, stream); + src1_as_f16 = (half *) ggml_cuda_pool_malloc(ne * sizeof(half), &src1_as); to_fp16_cuda(src1_ddf_i, src1_as_f16, ne, stream); } const half * src1_ptr = src1->type == GGML_TYPE_F16 ? (const half *) src1_ddq_i : src1_as_f16; - size_t dst_f16_as = 0; - half * dst_f16 = (half *) ggml_cuda_pool_malloc_async(row_diff*src1_ncols * sizeof(half), &dst_f16_as, id, stream); + + size_t dst_as = 0; + half * dst_f16 = (half *) ggml_cuda_pool_malloc(row_diff*src1_ncols * sizeof(half), &dst_as); const half alpha_f16 = 1.0f; const half beta_f16 = 0.0f; @@ -6503,15 +6473,14 @@ inline void ggml_cuda_op_mul_mat_cublas( const to_fp32_cuda_t to_fp32_cuda = ggml_get_to_fp32_cuda(GGML_TYPE_F16); to_fp32_cuda(dst_f16, dst_dd_i, row_diff*src1_ncols, stream); - if (dst_f16_as != 0) { - ggml_cuda_pool_free_async(dst_f16, dst_f16_as, id, stream); - } + ggml_cuda_pool_free(dst_f16, dst_as); if (src0_as != 0) { - ggml_cuda_pool_free_async(src0_as_f16, src0_as, id, stream); + ggml_cuda_pool_free(src0_as_f16, src0_as); } + if (src1_as != 0) { - ggml_cuda_pool_free_async(src1_as_f16, src1_as, id, stream); + ggml_cuda_pool_free(src1_as_f16, src1_as); } } else { @@ -6521,7 +6490,7 @@ inline void ggml_cuda_op_mul_mat_cublas( if (src0->type != GGML_TYPE_F32) { const to_fp32_cuda_t to_fp32_cuda = ggml_get_to_fp32_cuda(src0->type); GGML_ASSERT(to_fp32_cuda != nullptr); - src0_ddq_as_f32 = (float *) ggml_cuda_pool_malloc_async(row_diff*ne00 * sizeof(float), &src0_as, id, stream); // NOLINT + src0_ddq_as_f32 = (float *) ggml_cuda_pool_malloc(row_diff*ne00 * sizeof(float), &src0_as); // NOLINT to_fp32_cuda(src0_dd_i, src0_ddq_as_f32, row_diff*ne00, stream); } const float * src0_ddf_i = src0->type == GGML_TYPE_F32 ? (const float *) src0_dd_i : src0_ddq_as_f32; @@ -6538,7 +6507,7 @@ inline void ggml_cuda_op_mul_mat_cublas( &beta, dst_dd_i, ldc)); if (src0_as != 0) { - ggml_cuda_pool_free_async(src0_ddq_as_f32, src0_as, id, stream); + ggml_cuda_pool_free(src0_ddq_as_f32, src0_as); } } @@ -6961,22 +6930,21 @@ static void ggml_cuda_op_mul_mat( src0_dd[id] = (char *) src0_extra->data_device[id]; } else { const size_t size_src0_ddq = split ? (row_high[id]-row_low[id])*ne00 * src0_ts/src0_bs : ggml_nbytes(src0); - src0_dd[id] = (char *) ggml_cuda_pool_malloc_async(ggml_nbytes(src0), &src0_as[id], id, stream); + src0_dd[id] = (char *) ggml_cuda_pool_malloc(ggml_nbytes(src0), &src0_as[id]); } if (src1_on_device && src1_is_contiguous) { src1_ddf[id] = (float *) src1_extra->data_device[id]; } else { - src1_ddf[id] = (float *) ggml_cuda_pool_malloc_async(ggml_nbytes(src1), &src1_asf[id], id, stream); + src1_ddf[id] = (float *) ggml_cuda_pool_malloc(ggml_nbytes(src1), &src1_asf[id]); } if (convert_src1_to_q8_1) { - const size_t size_dst_ddq = nrows1*src1_padded_col_size*q8_1_ts/q8_1_bs; - src1_ddq[id] = (char *) ggml_cuda_pool_malloc_async(size_dst_ddq, &src1_asq[id], id, stream); + src1_ddq[id] = (char *) ggml_cuda_pool_malloc(nrows1*src1_padded_col_size*q8_1_ts/q8_1_bs, &src1_asq[id]); if (src1_on_device && src1_is_contiguous) { quantize_row_q8_1_cuda(src1_ddf[id], src1_ddq[id], ne10, nrows1, src1_padded_col_size, stream); - // CUDA_CHECK(cudaGetLastError()); + CUDA_CHECK(cudaGetLastError()); } } @@ -6984,7 +6952,7 @@ static void ggml_cuda_op_mul_mat( dst_dd[id] = (float *) dst_extra->data_device[id]; } else { const size_t size_dst_ddf = split ? (row_high[id]-row_low[id])*ne1*sizeof(float) : ggml_nbytes(dst); - dst_dd[id] = (float *) ggml_cuda_pool_malloc_async(size_dst_ddf, &dst_as[id], id, stream); + dst_dd[id] = (float *) ggml_cuda_pool_malloc(size_dst_ddf, &dst_as[id]); } } @@ -7110,6 +7078,24 @@ static void ggml_cuda_op_mul_mat( } } + for (int64_t id = 0; id < g_device_count; ++id) { + CUDA_CHECK(ggml_cuda_set_device(id)); + + // free buffers again when done + if (src0_as[id] > 0) { + ggml_cuda_pool_free(src0_dd[id], src0_as[id]); + } + if (src1_asf[id] > 0) { + ggml_cuda_pool_free(src1_ddf[id], src1_asf[id]); + } + if (src1_asq[id] > 0) { + ggml_cuda_pool_free(src1_ddq[id], src1_asq[id]); + } + if (dst_as[id] > 0) { + ggml_cuda_pool_free(dst_dd[id], dst_as[id]); + } + } + // main device waits for all other devices to be finished if (split && g_device_count > 1) { int64_t is_max = (ne11 + MUL_MAT_SRC1_COL_STRIDE - 1) / MUL_MAT_SRC1_COL_STRIDE; @@ -7127,21 +7113,6 @@ static void ggml_cuda_op_mul_mat( CUDA_CHECK(ggml_cuda_set_device(g_main_device)); CUDA_CHECK(cudaDeviceSynchronize()); } - - for (int64_t id = 0; id < g_device_count; ++id) { - if (src0_as[id] > 0) { - ggml_cuda_pool_free_async(src0_dd[id], src0_as[id], id, g_cudaStreams[id][0]); - } - if (src1_asf[id] > 0) { - ggml_cuda_pool_free_async(src1_ddf[id], src1_asf[id], id, g_cudaStreams[id][0]); - } - if (src1_asq[id] > 0) { - ggml_cuda_pool_free_async(src1_ddq[id], src1_asq[id], id, g_cudaStreams[id][0]); - } - if (dst_as[id] > 0) { - ggml_cuda_pool_free_async(dst_dd[id], dst_as[id], id, g_cudaStreams[id][0]); - } - } } static void ggml_cuda_repeat(const ggml_tensor * src0, const ggml_tensor * src1, ggml_tensor * dst) { @@ -7328,11 +7299,11 @@ static void ggml_cuda_mul_mat_mat_batched_cublas(const ggml_tensor * src0, const GGML_ASSERT(to_fp16_cuda != nullptr); size_t src1_as = 0; - half * src1_as_f16 = (half *) ggml_cuda_pool_malloc_async(ne1 * sizeof(half), &src1_as, id, main_stream); + half * src1_as_f16 = (half *) ggml_cuda_pool_malloc(ne1 * sizeof(half), &src1_as); to_fp16_cuda(src1_ddf, src1_as_f16, ne1, main_stream); size_t dst_as = 0; - half * dst_f16 = (half *) ggml_cuda_pool_malloc_async(ne * sizeof(half), &dst_as, id, main_stream); + half * dst_f16 = (half *) ggml_cuda_pool_malloc(ne * sizeof(half), &dst_as); GGML_ASSERT(ne12 % ne02 == 0); GGML_ASSERT(ne13 % ne03 == 0); @@ -7386,8 +7357,8 @@ static void ggml_cuda_mul_mat_mat_batched_cublas(const ggml_tensor * src0, const size_t ptrs_src_s = 0; size_t ptrs_dst_s = 0; - ptrs_src = (const void **) ggml_cuda_pool_malloc_async(2*ne23*sizeof(void *), &ptrs_src_s, id, main_stream); - ptrs_dst = ( void **) ggml_cuda_pool_malloc_async(1*ne23*sizeof(void *), &ptrs_dst_s, id, main_stream); + ptrs_src = (const void **) ggml_cuda_pool_malloc(2*ne23*sizeof(void *), &ptrs_src_s); + ptrs_dst = ( void **) ggml_cuda_pool_malloc(1*ne23*sizeof(void *), &ptrs_dst_s); dim3 block_dims(ne13, ne12); k_compute_batched_ptrs<<<1, block_dims, 0, main_stream>>>( @@ -7400,6 +7371,7 @@ static void ggml_cuda_mul_mat_mat_batched_cublas(const ggml_tensor * src0, const dst->nb[2], dst->nb[3], r2, r3); CUDA_CHECK(cudaGetLastError()); + CUBLAS_CHECK( cublasGemmBatchedEx(g_cublas_handles[id], CUBLAS_OP_T, CUBLAS_OP_N, ne01, ne11, ne10, @@ -7411,22 +7383,19 @@ static void ggml_cuda_mul_mat_mat_batched_cublas(const ggml_tensor * src0, const CUBLAS_GEMM_DEFAULT_TENSOR_OP)); if (ptrs_src_s != 0) { - ggml_cuda_pool_free_async(ptrs_src, ptrs_src_s, id, main_stream); + ggml_cuda_pool_free(ptrs_src, ptrs_src_s); } if (ptrs_dst_s != 0) { - ggml_cuda_pool_free_async(ptrs_dst, ptrs_dst_s, id, main_stream); + ggml_cuda_pool_free(ptrs_dst, ptrs_dst_s); } } #endif const to_fp32_cuda_t to_fp32_cuda = ggml_get_to_fp32_cuda(GGML_TYPE_F16); to_fp32_cuda(dst_f16, dst_ddf, ne, main_stream); - if (src1_as != 0) { - ggml_cuda_pool_free_async(src1_as_f16, src1_as, id, main_stream); - } - if (dst_as != 0) { - ggml_cuda_pool_free_async(dst_f16, dst_as, id, main_stream); - } + + ggml_cuda_pool_free(src1_as_f16, src1_as); + ggml_cuda_pool_free(dst_f16, dst_as); } static void ggml_cuda_mul_mat(const ggml_tensor * src0, const ggml_tensor * src1, ggml_tensor * dst) { From a7fac013cf1cc7bbc0160a226aa2412e9f22e78a Mon Sep 17 00:00:00 2001 From: Eve <139727413+netrunnereve@users.noreply.github.com> Date: Sun, 5 Nov 2023 07:46:44 +0000 Subject: [PATCH 064/206] ci : use intel sde when ci cpu doesn't support avx512 (#3949) --- .github/workflows/build.yml | 15 ++++++++++++++- 1 file changed, 14 insertions(+), 1 deletion(-) diff --git a/.github/workflows/build.yml b/.github/workflows/build.yml index 5af497a3ce3214..bc295d52d2d5d2 100644 --- a/.github/workflows/build.yml +++ b/.github/workflows/build.yml @@ -288,6 +288,7 @@ jobs: OPENBLAS_VERSION: 0.3.23 OPENCL_VERSION: 2023.04.17 CLBLAST_VERSION: 1.6.0 + SDE_VERSION: 9.21.1-2023-04-24 strategy: matrix: @@ -383,11 +384,23 @@ jobs: - name: Test id: cmake_test - if: ${{ matrix.build != 'clblast' && (matrix.build != 'avx512' || env.HAS_AVX512F == '1') }} # Test AVX-512 only when possible + if: ${{ matrix.build != 'clblast' && (matrix.build != 'avx512' || env.HAS_AVX512F == '1') }} # not all machines have native AVX-512 run: | cd build ctest -C Release --verbose --timeout 900 + - name: Test (Intel SDE) + id: cmake_test_sde + if: ${{ matrix.build == 'avx512' && env.HAS_AVX512F == '0' }} # use Intel SDE for AVX-512 emulation + run: | + curl.exe -o $env:RUNNER_TEMP/sde.tar.xz -L "https://downloadmirror.intel.com/777395/sde-external-${env:SDE_VERSION}-win.tar.xz" + # for some weird reason windows tar doesn't like sde tar.xz + 7z x "-o${env:RUNNER_TEMP}" $env:RUNNER_TEMP/sde.tar.xz + 7z x "-o${env:RUNNER_TEMP}" $env:RUNNER_TEMP/sde.tar + $sde = $(join-path $env:RUNNER_TEMP sde-external-${env:SDE_VERSION}-win/sde.exe) + cd build + & $sde -future -- ctest -C Release --verbose --timeout 900 + - name: Determine tag name id: tag shell: bash From c41ea36eaa3548776de4cb3d5d49b925cd3fc0f2 Mon Sep 17 00:00:00 2001 From: Eve <139727413+netrunnereve@users.noreply.github.com> Date: Sun, 5 Nov 2023 08:03:09 +0000 Subject: [PATCH 065/206] cmake : MSVC instruction detection (fixed up #809) (#3923) * Add detection code for avx * Only check hardware when option is ON * Modify per code review sugguestions * Build locally will detect CPU * Fixes CMake style to use lowercase like everywhere else * cleanup * fix merge * linux/gcc version for testing * msvc combines avx2 and fma into /arch:AVX2 so check for both * cleanup * msvc only version * style * Update FindSIMD.cmake --------- Co-authored-by: Howard Su Co-authored-by: Jeremy Dunn --- CMakeLists.txt | 8 +++- cmake/FindSIMD.cmake | 100 +++++++++++++++++++++++++++++++++++++++++++ 2 files changed, 106 insertions(+), 2 deletions(-) create mode 100644 cmake/FindSIMD.cmake diff --git a/CMakeLists.txt b/CMakeLists.txt index 3c49d645c3196f..7b4eb18403c0bf 100644 --- a/CMakeLists.txt +++ b/CMakeLists.txt @@ -10,7 +10,7 @@ endif() set(CMAKE_RUNTIME_OUTPUT_DIRECTORY ${CMAKE_BINARY_DIR}/bin) -if(CMAKE_SOURCE_DIR STREQUAL CMAKE_CURRENT_SOURCE_DIR) +if (CMAKE_SOURCE_DIR STREQUAL CMAKE_CURRENT_SOURCE_DIR) set(LLAMA_STANDALONE ON) # configure project version @@ -44,7 +44,7 @@ endif() # general option(LLAMA_STATIC "llama: static link libraries" OFF) -option(LLAMA_NATIVE "llama: enable -march=native flag" OFF) +option(LLAMA_NATIVE "llama: enable -march=native flag" ON) option(LLAMA_LTO "llama: enable link time optimization" OFF) # debug @@ -510,6 +510,10 @@ if ((${CMAKE_SYSTEM_PROCESSOR} MATCHES "arm") OR (${CMAKE_SYSTEM_PROCESSOR} MATC elseif (${CMAKE_SYSTEM_PROCESSOR} MATCHES "^(x86_64|i686|AMD64)$" OR "${CMAKE_GENERATOR_PLATFORM_LWR}" MATCHES "^(x86_64|i686|amd64|x64)$" ) message(STATUS "x86 detected") if (MSVC) + # instruction set detection for MSVC only + if (LLAMA_NATIVE) + include(cmake/FindSIMD.cmake) + endif () if (LLAMA_AVX512) add_compile_options($<$:/arch:AVX512>) add_compile_options($<$:/arch:AVX512>) diff --git a/cmake/FindSIMD.cmake b/cmake/FindSIMD.cmake new file mode 100644 index 00000000000000..33377ec44de12c --- /dev/null +++ b/cmake/FindSIMD.cmake @@ -0,0 +1,100 @@ +include(CheckCSourceRuns) + +set(AVX_CODE " + #include + int main() + { + __m256 a; + a = _mm256_set1_ps(0); + return 0; + } +") + +set(AVX512_CODE " + #include + int main() + { + __m512i a = _mm512_set_epi8(0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0); + __m512i b = a; + __mmask64 equality_mask = _mm512_cmp_epi8_mask(a, b, _MM_CMPINT_EQ); + return 0; + } +") + +set(AVX2_CODE " + #include + int main() + { + __m256i a = {0}; + a = _mm256_abs_epi16(a); + __m256i x; + _mm256_extract_epi64(x, 0); // we rely on this in our AVX2 code + return 0; + } +") + +set(FMA_CODE " + #include + int main() + { + __m256 acc = _mm256_setzero_ps(); + const __m256 d = _mm256_setzero_ps(); + const __m256 p = _mm256_setzero_ps(); + acc = _mm256_fmadd_ps( d, p, acc ); + return 0; + } +") + +macro(check_sse type flags) + set(__FLAG_I 1) + set(CMAKE_REQUIRED_FLAGS_SAVE ${CMAKE_REQUIRED_FLAGS}) + foreach (__FLAG ${flags}) + if (NOT ${type}_FOUND) + set(CMAKE_REQUIRED_FLAGS ${__FLAG}) + check_c_source_runs("${${type}_CODE}" HAS_${type}_${__FLAG_I}) + if (HAS_${type}_${__FLAG_I}) + set(${type}_FOUND TRUE CACHE BOOL "${type} support") + set(${type}_FLAGS "${__FLAG}" CACHE STRING "${type} flags") + endif() + math(EXPR __FLAG_I "${__FLAG_I}+1") + endif() + endforeach() + set(CMAKE_REQUIRED_FLAGS ${CMAKE_REQUIRED_FLAGS_SAVE}) + + if (NOT ${type}_FOUND) + set(${type}_FOUND FALSE CACHE BOOL "${type} support") + set(${type}_FLAGS "" CACHE STRING "${type} flags") + endif() + + mark_as_advanced(${type}_FOUND ${type}_FLAGS) +endmacro() + +# flags are for MSVC only! +check_sse("AVX" " ;/arch:AVX") +if (NOT ${AVX_FOUND}) + set(LLAMA_AVX OFF) +else() + set(LLAMA_AVX ON) +endif() + +check_sse("AVX2" " ;/arch:AVX2") +check_sse("FMA" " ;/arch:AVX2") +if ((NOT ${AVX2_FOUND}) OR (NOT ${FMA_FOUND})) + set(LLAMA_AVX2 OFF) +else() + set(LLAMA_AVX2 ON) +endif() + +check_sse("AVX512" " ;/arch:AVX512") +if (NOT ${AVX512_FOUND}) + set(LLAMA_AVX512 OFF) +else() + set(LLAMA_AVX512 ON) +endif() From 3d48f42efcd05381221654376e9f6f69d76af739 Mon Sep 17 00:00:00 2001 From: Meng Zhang Date: Sun, 5 Nov 2023 04:40:08 -0800 Subject: [PATCH 066/206] llama : mark LLM_ARCH_STARCODER as full offload supported (#3945) as done in https://github.com/ggerganov/llama.cpp/pull/3827 --- llama.cpp | 11 ++++++----- 1 file changed, 6 insertions(+), 5 deletions(-) diff --git a/llama.cpp b/llama.cpp index cc0211ceb02113..e165390005c850 100644 --- a/llama.cpp +++ b/llama.cpp @@ -5164,11 +5164,12 @@ static int llama_decode_internal( // If all tensors can be run on the GPU then using more than 1 thread is detrimental. const bool full_offload_supported = - model.arch == LLM_ARCH_LLAMA || - model.arch == LLM_ARCH_BAICHUAN || - model.arch == LLM_ARCH_FALCON || - model.arch == LLM_ARCH_REFACT || - model.arch == LLM_ARCH_MPT; + model.arch == LLM_ARCH_LLAMA || + model.arch == LLM_ARCH_BAICHUAN || + model.arch == LLM_ARCH_FALCON || + model.arch == LLM_ARCH_REFACT || + model.arch == LLM_ARCH_MPT || + model.arch == LLM_ARCH_STARCODER; const bool fully_offloaded = model.n_gpu_layers >= (int) hparams.n_layer + 3; if (ggml_cpu_has_cublas() && full_offload_supported && fully_offloaded) { From 132d25b8a62ea084447e0014a0112c1b371fb3f8 Mon Sep 17 00:00:00 2001 From: Jared Van Bortel Date: Sun, 5 Nov 2023 10:08:57 -0500 Subject: [PATCH 067/206] cuda : fix disabling device with --tensor-split 1,0 (#3951) Co-authored-by: slaren --- ggml-cuda.cu | 16 +++++++++++++--- 1 file changed, 13 insertions(+), 3 deletions(-) diff --git a/ggml-cuda.cu b/ggml-cuda.cu index dc14f2f5d76c66..9f873035ad0c05 100644 --- a/ggml-cuda.cu +++ b/ggml-cuda.cu @@ -6893,6 +6893,8 @@ static void ggml_cuda_op_mul_mat( int64_t row_low[GGML_CUDA_MAX_DEVICES]; int64_t row_high[GGML_CUDA_MAX_DEVICES]; + int used_devices = 0; + for (int64_t id = 0; id < g_device_count; ++id) { // by default, use all rows row_low[id] = 0; @@ -6920,6 +6922,8 @@ static void ggml_cuda_op_mul_mat( continue; } + used_devices++; + const bool src1_on_device = src1->backend == GGML_BACKEND_GPU && id == g_main_device; const bool dst_on_device = dst->backend == GGML_BACKEND_GPU && id == g_main_device; @@ -6958,12 +6962,12 @@ static void ggml_cuda_op_mul_mat( // if multiple devices are used they need to wait for the main device // here an event is recorded that signals that the main device has finished calculating the input data - if (split && g_device_count > 1) { + if (split && used_devices > 1) { CUDA_CHECK(ggml_cuda_set_device(g_main_device)); CUDA_CHECK(cudaEventRecord(src0_extra->events[g_main_device][0], g_cudaStreams[g_main_device][0])); } - const int64_t src1_col_stride = split && g_device_count > 1 ? MUL_MAT_SRC1_COL_STRIDE : ne11; + const int64_t src1_col_stride = split && used_devices > 1 ? MUL_MAT_SRC1_COL_STRIDE : ne11; for (int64_t src1_col_0 = 0; src1_col_0 < ne11; src1_col_0 += src1_col_stride) { const int64_t is = split ? (src1_col_0/src1_col_stride) % MAX_STREAMS : 0; const int64_t src1_ncols = src1_col_0 + src1_col_stride > ne11 ? ne11 - src1_col_0 : src1_col_stride; @@ -7079,6 +7083,9 @@ static void ggml_cuda_op_mul_mat( } for (int64_t id = 0; id < g_device_count; ++id) { + if ((!split && id != g_main_device) || row_low[id] == row_high[id]) { + continue; + } CUDA_CHECK(ggml_cuda_set_device(id)); // free buffers again when done @@ -7103,6 +7110,9 @@ static void ggml_cuda_op_mul_mat( CUDA_CHECK(ggml_cuda_set_device(g_main_device)); for (int64_t id = 0; id < g_device_count; ++id) { + if (row_low[id] == row_high[id]) { + continue; + } for (int64_t is = 0; is < is_max; ++is) { CUDA_CHECK(cudaStreamWaitEvent(g_cudaStreams[g_main_device][0], src0_extra->events[id][is], 0)); } @@ -7400,7 +7410,7 @@ static void ggml_cuda_mul_mat_mat_batched_cublas(const ggml_tensor * src0, const static void ggml_cuda_mul_mat(const ggml_tensor * src0, const ggml_tensor * src1, ggml_tensor * dst) { const bool all_on_device = - (src0->backend == GGML_BACKEND_GPU) && + (src0->backend == GGML_BACKEND_GPU || src0->backend == GGML_BACKEND_GPU_SPLIT) && (src1->backend == GGML_BACKEND_GPU) && ( dst->backend == GGML_BACKEND_GPU); From bb60fd0bf6bb270744d86dd45b3a95af01b7de45 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Th=C3=A1i=20Ho=C3=A0ng=20T=C3=A2m?= <75922889+RoyalHeart@users.noreply.github.com> Date: Sun, 5 Nov 2023 23:15:27 +0700 Subject: [PATCH 068/206] server : fix typo for --alias shortcut from -m to -a (#3958) --- examples/server/README.md | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/examples/server/README.md b/examples/server/README.md index 715007735c122f..089ebe2d1533f8 100644 --- a/examples/server/README.md +++ b/examples/server/README.md @@ -7,7 +7,7 @@ Command line options: - `--threads N`, `-t N`: Set the number of threads to use during generation. - `-tb N, --threads-batch N`: Set the number of threads to use during batch and prompt processing. If not specified, the number of threads will be set to the number of threads used for generation. - `-m FNAME`, `--model FNAME`: Specify the path to the LLaMA model file (e.g., `models/7B/ggml-model.gguf`). -- `-m ALIAS`, `--alias ALIAS`: Set an alias for the model. The alias will be returned in API responses. +- `-a ALIAS`, `--alias ALIAS`: Set an alias for the model. The alias will be returned in API responses. - `-c N`, `--ctx-size N`: Set the size of the prompt context. The default is 512, but LLaMA models were built with a context of 2048, which will provide better results for longer input/inference. The size may differ in other models, for example, baichuan models were build with a context of 4096. - `-ngl N`, `--n-gpu-layers N`: When compiled with appropriate support (currently CLBlast or cuBLAS), this option allows offloading some layers to the GPU for computation. Generally results in increased performance. - `-mg i, --main-gpu i`: When using multiple GPUs this option controls which GPU is used for small tensors for which the overhead of splitting the computation across all GPUs is not worthwhile. The GPU in question will use slightly more VRAM to store a scratch buffer for temporary results. By default GPU 0 is used. Requires cuBLAS. From d9ccce2e339ca0396560d18b8637f2c848d72a08 Mon Sep 17 00:00:00 2001 From: Kerfuffle <44031344+KerfuffleV2@users.noreply.github.com> Date: Sun, 5 Nov 2023 10:06:06 -0700 Subject: [PATCH 069/206] Allow common process_escapes to handle \x sequences (#3928) * Allow common process_escapes to handle \x sequences * Fix edge case when second hex digit is NUL --- common/common.cpp | 13 +++++++++++++ 1 file changed, 13 insertions(+) diff --git a/common/common.cpp b/common/common.cpp index 20cc4a081b2225..37e3ace8ac5d92 100644 --- a/common/common.cpp +++ b/common/common.cpp @@ -90,6 +90,19 @@ void process_escapes(std::string& input) { case '\'': input[output_idx++] = '\''; break; case '\"': input[output_idx++] = '\"'; break; case '\\': input[output_idx++] = '\\'; break; + case 'x': + // Handle \x12, etc + if (input_idx + 2 < input_len) { + const char x[3] = { input[input_idx + 1], input[input_idx + 2], 0 }; + char *err_p = nullptr; + const long val = std::strtol(x, &err_p, 16); + if (err_p == x + 2) { + input_idx += 2; + input[output_idx++] = char(val); + break; + } + // Intentionally fall through to default. + } default: input[output_idx++] = '\\'; input[output_idx++] = input[input_idx]; break; } From 2833a6f63c1b87c7f4ac574bcf7a15a2f3bf3ede Mon Sep 17 00:00:00 2001 From: slaren Date: Sun, 5 Nov 2023 18:45:16 +0100 Subject: [PATCH 070/206] ggml-cuda : fix f16 mul mat (#3961) * ggml-cuda : fix f16 mul mat ggml-ci * silence common.cpp warning (bonus) --- common/common.cpp | 2 +- ggml-cuda.cu | 8 +++++--- 2 files changed, 6 insertions(+), 4 deletions(-) diff --git a/common/common.cpp b/common/common.cpp index 37e3ace8ac5d92..6a711420004b48 100644 --- a/common/common.cpp +++ b/common/common.cpp @@ -101,8 +101,8 @@ void process_escapes(std::string& input) { input[output_idx++] = char(val); break; } - // Intentionally fall through to default. } + // fall through default: input[output_idx++] = '\\'; input[output_idx++] = input[input_idx]; break; } diff --git a/ggml-cuda.cu b/ggml-cuda.cu index 9f873035ad0c05..2d9ffffbf74966 100644 --- a/ggml-cuda.cu +++ b/ggml-cuda.cu @@ -7414,6 +7414,8 @@ static void ggml_cuda_mul_mat(const ggml_tensor * src0, const ggml_tensor * src1 (src1->backend == GGML_BACKEND_GPU) && ( dst->backend == GGML_BACKEND_GPU); + const bool split = src0->backend == GGML_BACKEND_GPU_SPLIT; + int64_t min_compute_capability = INT_MAX; for (int64_t id = 0; id < g_device_count; ++id) { if (min_compute_capability > g_compute_capabilities[id] && g_tensor_split[id] < (id + 1 < g_device_count ? g_tensor_split[id + 1] : 1.0f)) { @@ -7435,13 +7437,13 @@ static void ggml_cuda_mul_mat(const ggml_tensor * src0, const ggml_tensor * src1 //printf("src0 is contiguous %d, transposed %d, type = %s, name = %s\n", ggml_is_contiguous(src0), ggml_is_transposed(src0), ggml_type_name(src0->type), src0->name); //printf("src1 is contiguous %d, transposed %d, type = %s, name = %s\n", ggml_is_contiguous(src1), ggml_is_transposed(src1), ggml_type_name(src1->type), src1->name); - if (all_on_device && !use_tensor_cores && src0->type == GGML_TYPE_F16 && ggml_is_permuted(src0) && ggml_is_permuted(src1) && src1->ne[1] == 1) { + if (!split && all_on_device && !use_tensor_cores && src0->type == GGML_TYPE_F16 && ggml_is_permuted(src0) && ggml_is_permuted(src1) && src1->ne[1] == 1) { // KQ single-batch ggml_cuda_mul_mat_vec_p021(src0, src1, dst); - } else if (all_on_device && !use_tensor_cores && src0->type == GGML_TYPE_F16 && !ggml_is_contiguous(src0) && !ggml_is_transposed(src1) && src1->ne[1] == 1) { + } else if (!split && all_on_device && !use_tensor_cores && src0->type == GGML_TYPE_F16 && !ggml_is_contiguous(src0) && !ggml_is_transposed(src1) && src1->ne[1] == 1) { // KQV single-batch ggml_cuda_mul_mat_vec_nc(src0, src1, dst); - } else if (all_on_device && use_tensor_cores && src0->type == GGML_TYPE_F16 && src1->type == GGML_TYPE_F32 && !ggml_is_transposed(src0) && !ggml_is_transposed(src1)) { + } else if (!split && all_on_device && use_tensor_cores && src0->type == GGML_TYPE_F16 && src1->type == GGML_TYPE_F32 && !ggml_is_transposed(src0) && !ggml_is_transposed(src1)) { // KQ + KQV multi-batch ggml_cuda_mul_mat_mat_batched_cublas(src0, src1, dst); } else if (src0->type == GGML_TYPE_F32) { From 381efbf480959bb6d1e247a8b0c2328f22e350f8 Mon Sep 17 00:00:00 2001 From: Damian Stewart Date: Mon, 6 Nov 2023 22:36:23 +0100 Subject: [PATCH 071/206] llava : expose as a shared library for downstream projects (#3613) MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit * wip llava python bindings compatibility * add external llava API * add base64 in-prompt image support * wip refactor image loading * refactor image load out of llava init * cleanup * further cleanup; move llava-cli into its own file and rename * move base64.hpp into common/ * collapse clip and llava libraries * move llava into its own subdir * wip * fix bug where base64 string was not removed from the prompt * get libllava to output in the right place * expose llava methods in libllama.dylib * cleanup memory usage around clip_image_* * cleanup and refactor *again* * update headerdoc * build with cmake, not tested (WIP) * Editorconfig * Editorconfig * Build with make * Build with make * Fix cyclical depts on Windows * attempt to fix build on Windows * attempt to fix build on Windows * Upd TODOs * attempt to fix build on Windows+CUDA * Revert changes in cmake * Fix according to review comments * Support building as a shared library * address review comments --------- Co-authored-by: M. Yusuf Sarıgöz Co-authored-by: Jared Van Bortel --- .gitignore | 2 +- Makefile | 7 +- common/CMakeLists.txt | 1 + common/base64.hpp | 392 +++++++++++++++++++++++++++++++++ examples/llava/CMakeLists.txt | 44 +++- examples/llava/README.md | 7 +- examples/llava/clip.cpp | 86 +++++--- examples/llava/clip.h | 41 +++- examples/llava/llava-cli.cpp | 315 ++++++++++++++++++++++++++ examples/llava/llava-utils.h | 147 ------------- examples/llava/llava.cpp | 230 ++++++++++--------- examples/llava/llava.h | 50 +++++ examples/server/CMakeLists.txt | 2 +- 13 files changed, 996 insertions(+), 328 deletions(-) create mode 100644 common/base64.hpp create mode 100644 examples/llava/llava-cli.cpp delete mode 100644 examples/llava/llava-utils.h create mode 100644 examples/llava/llava.h diff --git a/.gitignore b/.gitignore index 50cbd0b47cae36..708e8582e16c49 100644 --- a/.gitignore +++ b/.gitignore @@ -46,7 +46,7 @@ models-mnt /infill /libllama.so /llama-bench -/llava +/llava-cli /main /metal /perplexity diff --git a/Makefile b/Makefile index 300c1e6c7e1276..f2d4fd0312ad90 100644 --- a/Makefile +++ b/Makefile @@ -1,7 +1,7 @@ # Define the default target now so that it is always the first target BUILD_TARGETS = \ main quantize quantize-stats perplexity embedding vdot q8dot train-text-from-scratch convert-llama2c-to-ggml \ - simple batched batched-bench save-load-state server gguf llama-bench llava baby-llama beam-search \ + simple batched batched-bench save-load-state server gguf llama-bench libllava.a llava-cli baby-llama beam-search \ speculative infill benchmark-matmult parallel finetune export-lora tests/test-c.o # Binaries only useful for tests @@ -617,7 +617,10 @@ convert-llama2c-to-ggml: examples/convert-llama2c-to-ggml/convert-llama2c-to-ggm llama-bench: examples/llama-bench/llama-bench.cpp ggml.o llama.o $(COMMON_DEPS) $(OBJS) $(CXX) $(CXXFLAGS) $(filter-out %.h,$^) -o $@ $(LDFLAGS) -llava: examples/llava/llava.cpp examples/llava/llava-utils.h examples/llava/clip.cpp examples/llava/clip.h common/stb_image.h ggml.o llama.o $(COMMON_DEPS) $(OBJS) +libllava.a: examples/llava/llava.cpp examples/llava/llava.h examples/llava/clip.cpp examples/llava/clip.h common/stb_image.h common/base64.hpp ggml.o llama.o $(COMMON_DEPS) $(OBJS) + $(CXX) $(CXXFLAGS) -static -fPIC -c $< -o $@ $(LDFLAGS) -Wno-cast-qual + +llava-cli: examples/llava/llava-cli.cpp examples/llava/clip.h examples/llava/clip.cpp examples/llava/llava.h examples/llava/llava.cpp ggml.o llama.o $(COMMON_DEPS) $(OBJS) $(CXX) $(CXXFLAGS) $(filter-out %.h,$^) -o $@ $(LDFLAGS) -Wno-cast-qual baby-llama: examples/baby-llama/baby-llama.cpp ggml.o llama.o $(COMMON_DEPS) train.o $(OBJS) diff --git a/common/CMakeLists.txt b/common/CMakeLists.txt index ac594b2ca84ea8..4f930bdc590592 100644 --- a/common/CMakeLists.txt +++ b/common/CMakeLists.txt @@ -41,6 +41,7 @@ endif() set(TARGET common) add_library(${TARGET} STATIC + base64.hpp common.h common.cpp sampling.h diff --git a/common/base64.hpp b/common/base64.hpp new file mode 100644 index 00000000000000..563247a6e5f7db --- /dev/null +++ b/common/base64.hpp @@ -0,0 +1,392 @@ +/* +This is free and unencumbered software released into the public domain. + +Anyone is free to copy, modify, publish, use, compile, sell, or +distribute this software, either in source code form or as a compiled +binary, for any purpose, commercial or non-commercial, and by any +means. + +In jurisdictions that recognize copyright laws, the author or authors +of this software dedicate any and all copyright interest in the +software to the public domain. We make this dedication for the benefit +of the public at large and to the detriment of our heirs and +successors. We intend this dedication to be an overt act of +relinquishment in perpetuity of all present and future rights to this +software under copyright law. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, +EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF +MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. +IN NO EVENT SHALL THE AUTHORS BE LIABLE FOR ANY CLAIM, DAMAGES OR +OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, +ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR +OTHER DEALINGS IN THE SOFTWARE. + +For more information, please refer to +*/ + +#ifndef PUBLIC_DOMAIN_BASE64_HPP_ +#define PUBLIC_DOMAIN_BASE64_HPP_ + +#include +#include +#include +#include + +class base64_error : public std::runtime_error +{ +public: + using std::runtime_error::runtime_error; +}; + +class base64 +{ +public: + enum class alphabet + { + /** the alphabet is detected automatically */ + auto_, + /** the standard base64 alphabet is used */ + standard, + /** like `standard` except that the characters `+` and `/` are replaced by `-` and `_` respectively*/ + url_filename_safe + }; + + enum class decoding_behavior + { + /** if the input is not padded, the remaining bits are ignored */ + moderate, + /** if a padding character is encounter decoding is finished */ + loose + }; + + /** + Encodes all the elements from `in_begin` to `in_end` to `out`. + + @warning The source and destination cannot overlap. The destination must be able to hold at least + `required_encode_size(std::distance(in_begin, in_end))`, otherwise the behavior depends on the output iterator. + + @tparam Input_iterator the source; the returned elements are cast to `std::uint8_t` and should not be greater than + 8 bits + @tparam Output_iterator the destination; the elements written to it are from the type `char` + @param in_begin the beginning of the source + @param in_end the ending of the source + @param out the destination iterator + @param alphabet which alphabet should be used + @returns the iterator to the next element past the last element copied + @throws see `Input_iterator` and `Output_iterator` + */ + template + static Output_iterator encode(Input_iterator in_begin, Input_iterator in_end, Output_iterator out, + alphabet alphabet = alphabet::standard) + { + constexpr auto pad = '='; + const char* alpha = alphabet == alphabet::url_filename_safe + ? "ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklmnopqrstuvwxyz0123456789-_" + : "ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklmnopqrstuvwxyz0123456789+/"; + + while (in_begin != in_end) { + std::uint8_t i0 = 0, i1 = 0, i2 = 0; + + // first character + i0 = static_cast(*in_begin); + ++in_begin; + + *out = alpha[i0 >> 2 & 0x3f]; + ++out; + + // part of first character and second + if (in_begin != in_end) { + i1 = static_cast(*in_begin); + ++in_begin; + + *out = alpha[((i0 & 0x3) << 4) | (i1 >> 4 & 0x0f)]; + ++out; + } else { + *out = alpha[(i0 & 0x3) << 4]; + ++out; + + // last padding + *out = pad; + ++out; + + // last padding + *out = pad; + ++out; + + break; + } + + // part of second character and third + if (in_begin != in_end) { + i2 = static_cast(*in_begin); + ++in_begin; + + *out = alpha[((i1 & 0xf) << 2) | (i2 >> 6 & 0x03)]; + ++out; + } else { + *out = alpha[(i1 & 0xf) << 2]; + ++out; + + // last padding + *out = pad; + ++out; + + break; + } + + // rest of third + *out = alpha[i2 & 0x3f]; + ++out; + } + + return out; + } + /** + Encodes a string. + + @param str the string that should be encoded + @param alphabet which alphabet should be used + @returns the encoded base64 string + @throws see base64::encode() + */ + static std::string encode(const std::string& str, alphabet alphabet = alphabet::standard) + { + std::string result; + + result.reserve(required_encode_size(str.length()) + 1); + + encode(str.begin(), str.end(), std::back_inserter(result), alphabet); + + return result; + } + /** + Encodes a char array. + + @param buffer the char array + @param size the size of the array + @param alphabet which alphabet should be used + @returns the encoded string + */ + static std::string encode(const char* buffer, std::size_t size, alphabet alphabet = alphabet::standard) + { + std::string result; + + result.reserve(required_encode_size(size) + 1); + + encode(buffer, buffer + size, std::back_inserter(result), alphabet); + + return result; + } + /** + Decodes all the elements from `in_begin` to `in_end` to `out`. `in_begin` may point to the same location as `out`, + in other words: inplace decoding is possible. + + @warning The destination must be able to hold at least `required_decode_size(std::distance(in_begin, in_end))`, + otherwise the behavior depends on the output iterator. + + @tparam Input_iterator the source; the returned elements are cast to `char` + @tparam Output_iterator the destination; the elements written to it are from the type `std::uint8_t` + @param in_begin the beginning of the source + @param in_end the ending of the source + @param out the destination iterator + @param alphabet which alphabet should be used + @param behavior the behavior when an error was detected + @returns the iterator to the next element past the last element copied + @throws base64_error depending on the set behavior + @throws see `Input_iterator` and `Output_iterator` + */ + template + static Output_iterator decode(Input_iterator in_begin, Input_iterator in_end, Output_iterator out, + alphabet alphabet = alphabet::auto_, + decoding_behavior behavior = decoding_behavior::moderate) + { + //constexpr auto pad = '='; + std::uint8_t last = 0; + auto bits = 0; + + while (in_begin != in_end) { + auto c = *in_begin; + ++in_begin; + + if (c == '=') { + break; + } + + auto part = _base64_value(alphabet, c); + + // enough bits for one byte + if (bits + 6 >= 8) { + *out = (last << (8 - bits)) | (part >> (bits - 2)); + ++out; + + bits -= 2; + } else { + bits += 6; + } + + last = part; + } + + // check padding + if (behavior != decoding_behavior::loose) { + while (in_begin != in_end) { + auto c = *in_begin; + ++in_begin; + + if (c != '=') { + throw base64_error("invalid base64 character."); + } + } + } + + return out; + } + /** + Decodes a string. + + @param str the base64 encoded string + @param alphabet which alphabet should be used + @param behavior the behavior when an error was detected + @returns the decoded string + @throws see base64::decode() + */ + static std::string decode(const std::string& str, alphabet alphabet = alphabet::auto_, + decoding_behavior behavior = decoding_behavior::moderate) + { + std::string result; + + result.reserve(max_decode_size(str.length())); + + decode(str.begin(), str.end(), std::back_inserter(result), alphabet, behavior); + + return result; + } + /** + Decodes a string. + + @param buffer the base64 encoded buffer + @param size the size of the buffer + @param alphabet which alphabet should be used + @param behavior the behavior when an error was detected + @returns the decoded string + @throws see base64::decode() + */ + static std::string decode(const char* buffer, std::size_t size, alphabet alphabet = alphabet::auto_, + decoding_behavior behavior = decoding_behavior::moderate) + { + std::string result; + + result.reserve(max_decode_size(size)); + + decode(buffer, buffer + size, std::back_inserter(result), alphabet, behavior); + + return result; + } + /** + Decodes a string inplace. + + @param[in,out] str the base64 encoded string + @param alphabet which alphabet should be used + @param behavior the behavior when an error was detected + @throws base64::decode_inplace() + */ + static void decode_inplace(std::string& str, alphabet alphabet = alphabet::auto_, + decoding_behavior behavior = decoding_behavior::moderate) + { + str.resize(decode(str.begin(), str.end(), str.begin(), alphabet, behavior) - str.begin()); + } + /** + Decodes a char array inplace. + + @param[in,out] str the string array + @param size the length of the array + @param alphabet which alphabet should be used + @param behavior the behavior when an error was detected + @returns the pointer to the next element past the last element decoded + @throws base64::decode_inplace() + */ + static char* decode_inplace(char* str, std::size_t size, alphabet alphabet = alphabet::auto_, + decoding_behavior behavior = decoding_behavior::moderate) + { + return decode(str, str + size, str, alphabet, behavior); + } + /** + Returns the required decoding size for a given size. The value is calculated with the following formula: + + $$ + \lceil \frac{size}{4} \rceil \cdot 3 + $$ + + @param size the size of the encoded input + @returns the size of the resulting decoded buffer; this the absolute maximum + */ + static std::size_t max_decode_size(std::size_t size) noexcept + { + return (size / 4 + (size % 4 ? 1 : 0)) * 3; + } + /** + Returns the required encoding size for a given size. The value is calculated with the following formula: + + $$ + \lceil \frac{size}{3} \rceil \cdot 4 + $$ + + @param size the size of the decoded input + @returns the size of the resulting encoded buffer + */ + static std::size_t required_encode_size(std::size_t size) noexcept + { + return (size / 3 + (size % 3 ? 1 : 0)) * 4; + } + +private: + static std::uint8_t _base64_value(alphabet& alphabet, char c) + { + if (c >= 'A' && c <= 'Z') { + return c - 'A'; + } else if (c >= 'a' && c <= 'z') { + return c - 'a' + 26; + } else if (c >= '0' && c <= '9') { + return c - '0' + 52; + } + + // comes down to alphabet + if (alphabet == alphabet::standard) { + if (c == '+') { + return 62; + } else if (c == '/') { + return 63; + } + } else if (alphabet == alphabet::url_filename_safe) { + if (c == '-') { + return 62; + } else if (c == '_') { + return 63; + } + } // auto detect + else { + if (c == '+') { + alphabet = alphabet::standard; + + return 62; + } else if (c == '/') { + alphabet = alphabet::standard; + + return 63; + } else if (c == '-') { + alphabet = alphabet::url_filename_safe; + + return 62; + } else if (c == '_') { + alphabet = alphabet::url_filename_safe; + + return 63; + } + } + + throw base64_error("invalid base64 character."); + } +}; + +#endif // !PUBLIC_DOMAIN_BASE64_HPP_ diff --git a/examples/llava/CMakeLists.txt b/examples/llava/CMakeLists.txt index 03d32c26efaddd..8ea3e5c836c135 100644 --- a/examples/llava/CMakeLists.txt +++ b/examples/llava/CMakeLists.txt @@ -1,14 +1,36 @@ -set(TARGET clip) -add_library(${TARGET} clip.cpp clip.h) -install(TARGETS ${TARGET} LIBRARY) -target_link_libraries(${TARGET} PRIVATE common ggml ${CMAKE_THREAD_LIBS_INIT}) -target_compile_features(${TARGET} PRIVATE cxx_std_11) +add_library(llava OBJECT + llava.cpp + llava.h + clip.cpp + clip.h + ) + +target_link_libraries(llava PRIVATE ggml llama ${CMAKE_THREAD_LIBS_INIT}) + +target_include_directories(llava PUBLIC .) +target_include_directories(llava PUBLIC ../..) +target_include_directories(llava PUBLIC ../../common) + +target_compile_features(llava PRIVATE cxx_std_11) + +add_library(llava_static STATIC $) +if (BUILD_SHARED_LIBS) + set_target_properties(llava PROPERTIES POSITION_INDEPENDENT_CODE ON) + target_compile_definitions(llava PRIVATE LLAMA_SHARED LLAMA_BUILD) + add_library(llava_shared SHARED $) + target_link_libraries(llava_shared PRIVATE ggml llama ${CMAKE_THREAD_LIBS_INIT}) + install(TARGETS llava_shared LIBRARY) +endif() + if (NOT MSVC) - target_compile_options(${TARGET} PRIVATE -Wno-cast-qual) # stb_image.h + target_compile_options(llava PRIVATE -Wno-cast-qual) # stb_image.h + endif() +if(TARGET BUILD_INFO) + add_dependencies(llava BUILD_INFO) endif() -set(TARGET llava) -add_executable(${TARGET} llava.cpp) -install(TARGETS ${TARGET} RUNTIME) -target_link_libraries(${TARGET} PRIVATE common llama clip ${CMAKE_THREAD_LIBS_INIT}) -target_compile_features(${TARGET} PRIVATE cxx_std_11) +set(TARGET llava-cli) +add_executable(llava-cli llava-cli.cpp) +install(TARGETS llava-cli RUNTIME) +target_link_libraries(llava-cli PRIVATE common llama llava ${CMAKE_THREAD_LIBS_INIT}) +target_compile_features(llava PRIVATE cxx_std_11) diff --git a/examples/llava/README.md b/examples/llava/README.md index fc3446b60fd7db..323c5fdd028355 100644 --- a/examples/llava/README.md +++ b/examples/llava/README.md @@ -9,12 +9,12 @@ models are available. After API is confirmed, more models will be supported / uploaded. ## Usage -Build with cmake or run `make llava` to build it. +Build with cmake or run `make llava-cli` to build it. -After building, run: `./llava` to see the usage. For example: +After building, run: `./llava-cli` to see the usage. For example: ```sh -./llava -m llava-v1.5-7b/ggml-model-q5_k.gguf --mmproj llava-v1.5-7b/mmproj-model-f16.gguf --image path/to/an/image.jpg +./llava-cli -m llava-v1.5-7b/ggml-model-q5_k.gguf --mmproj llava-v1.5-7b/mmproj-model-f16.gguf --image path/to/an/image.jpg ``` **note**: A lower temperature like 0.1 is recommended for better quality. add `--temp 0.1` to the command to do so. @@ -51,7 +51,6 @@ Now both the LLaMA part and the image encoder is in the `llava-v1.5-7b` director ## TODO -- [ ] Support server mode. - [ ] Support non-CPU backend for the image encoding part. - [ ] Support different sampling methods. - [ ] Support more model variants. diff --git a/examples/llava/clip.cpp b/examples/llava/clip.cpp index 61932e659543c5..3c909c7d3c6ab2 100644 --- a/examples/llava/clip.cpp +++ b/examples/llava/clip.cpp @@ -680,26 +680,44 @@ struct clip_ctx * clip_model_load(const char * fname, const int verbosity = 1) { return new_clip; } -clip_image_u8 * make_clip_image_u8() { return new clip_image_u8(); } - +clip_image_u8 * make_clip_image_u8() { + auto img = new clip_image_u8(); + return img; +} clip_image_f32 * make_clip_image_f32() { return new clip_image_f32(); } -bool clip_image_load_from_file(const char * fname, clip_image_u8 * img) { - int nx, ny, nc; - auto data = stbi_load(fname, &nx, &ny, &nc, 3); - if (!data) { - fprintf(stderr, "%s: failed to load '%s'\n", __func__, fname); - return false; - } +void clip_image_u8_free(clip_image_u8 * img) { if (img->data) { delete[] img->data; } delete img; } +void clip_image_f32_free(clip_image_f32 * img) { if (img->data) { delete[] img->data; } delete img; } +static void build_clip_img_from_data(const stbi_uc * data, int nx, int ny, clip_image_u8 * img) { img->nx = nx; img->ny = ny; img->size = nx * ny * 3; img->data = new uint8_t[img->size](); memcpy(img->data, data, img->size); +} +bool clip_image_load_from_file(const char * fname, clip_image_u8 * img) { + int nx, ny, nc; + auto data = stbi_load(fname, &nx, &ny, &nc, 3); + if (!data) { + fprintf(stderr, "%s: failed to load image '%s'\n", __func__, fname); + return false; + } + build_clip_img_from_data(data, nx, ny, img); stbi_image_free(data); + return true; +} +bool clip_image_load_from_bytes(const unsigned char * bytes, size_t bytes_length, struct clip_image_u8 * img) { + int nx, ny, nc; + auto data = stbi_load_from_memory(bytes, bytes_length, &nx, &ny, &nc, 3); + if (!data) { + fprintf(stderr, "%s: failed to decode image bytes\n", __func__); + return false; + } + build_clip_img_from_data(data, nx, ny, img); + stbi_image_free(data); return true; } @@ -714,39 +732,40 @@ bool clip_image_preprocess(const clip_ctx * ctx, const clip_image_u8 * img, clip // the logic below is to pad the shorter side to the longer side with a background color: rgb(122, 116, 104) // see https://github.com/haotian-liu/LLaVA/blob/e854a2bf85118c504f6f16bf5c3c7c92f8fa8c6b/llava/conversation.py#L113-L156 - clip_image_u8 temp; // we will keep the input image data here temporarily + clip_image_u8 * temp = make_clip_image_u8(); // we will keep the input image data here temporarily if (pad2square && img->nx != img->ny) { int longer_side = std::max(img->nx, img->ny); - temp.nx = longer_side; - temp.ny = longer_side; - temp.size = 3 * longer_side * longer_side; - temp.data = new uint8_t[temp.size](); + temp->nx = longer_side; + temp->ny = longer_side; + temp->size = 3 * longer_side * longer_side; + temp->data = new uint8_t[temp->size](); uint8_t bc[3] = {122, 116, 104}; // bakground color in RGB from LLaVA // fill with background color - for (size_t i = 0; i < temp.size; i++) { - temp.data[i] = bc[i % 3]; + for (size_t i = 0; i < temp->size; i++) { + temp->data[i] = bc[i % 3]; } // copy from the input image for (int y = 0; y < img->ny; y++) { for (int x = 0; x < img->nx; x++) { const int i = 3 * (y * img->nx + x); - const int j = 3 * (y * temp.nx + x); - temp.data[j] = img->data[i]; - temp.data[j+1] = img->data[i+1]; - temp.data[j+2] = img->data[i+2]; + const int j = 3 * (y * temp->nx + x); + temp->data[j] = img->data[i]; + temp->data[j+1] = img->data[i+1]; + temp->data[j+2] = img->data[i+2]; } } } else { - temp.nx = img->nx; - temp.ny = img->ny; - temp.size = img->size; - temp.data = img->data; + temp->nx = img->nx; + temp->ny = img->ny; + temp->size = img->size; + temp->data = new uint8_t[temp->size](); + *temp->data = *img->data; // copy } - const int nx = temp.nx; - const int ny = temp.ny; + const int nx = temp->nx; + const int ny = temp->ny; const int nx2 = ctx->vision_model.hparams.image_size; const int ny2 = ctx->vision_model.hparams.image_size; @@ -785,10 +804,10 @@ bool clip_image_preprocess(const clip_ctx * ctx, const clip_image_u8 * img, clip const int j10 = 3 * (y1 * nx + x0) + c; const int j11 = 3 * (y1 * nx + x1) + c; - const float v00 = temp.data[j00]; - const float v01 = temp.data[j01]; - const float v10 = temp.data[j10]; - const float v11 = temp.data[j11]; + const float v00 = temp->data[j00]; + const float v01 = temp->data[j01]; + const float v10 = temp->data[j10]; + const float v11 = temp->data[j11]; const float v0 = v00 * (1.0f - dx) + v01 * dx; const float v1 = v10 * (1.0f - dx) + v11 * dx; @@ -803,6 +822,7 @@ bool clip_image_preprocess(const clip_ctx * ctx, const clip_image_u8 * img, clip } } } + clip_image_u8_free(temp); return true; } @@ -1049,16 +1069,16 @@ bool clip_model_quantize(const char * fname_inp, const char * fname_out, const i return true; } -int clip_n_mmproj_embd(struct clip_ctx * ctx) { +int clip_n_mmproj_embd(const struct clip_ctx * ctx) { return ctx->vision_model.mm_2_b->ne[0]; } -int clip_n_patches(struct clip_ctx * ctx) { +int clip_n_patches(const struct clip_ctx * ctx) { auto & params = ctx->vision_model.hparams; return (params.image_size / params.patch_size) * (params.image_size / params.patch_size); } -size_t clip_embd_nbytes(struct clip_ctx * ctx) { +size_t clip_embd_nbytes(const struct clip_ctx * ctx) { return clip_n_patches(ctx) * clip_n_mmproj_embd(ctx) * sizeof(float); } diff --git a/examples/llava/clip.h b/examples/llava/clip.h index 3d7261e299a35f..f11df85de9a73b 100644 --- a/examples/llava/clip.h +++ b/examples/llava/clip.h @@ -1,7 +1,22 @@ #ifndef CLIP_H #define CLIP_H -#include "ggml.h" +#include +#include + +#ifdef LLAMA_SHARED +# if defined(_WIN32) && !defined(__MINGW32__) +# ifdef LLAMA_BUILD +# define CLIP_API __declspec(dllexport) +# else +# define CLIP_API __declspec(dllimport) +# endif +# else +# define CLIP_API __attribute__ ((visibility ("default"))) +# endif +#else +# define CLIP_API +#endif struct clip_ctx; @@ -20,19 +35,20 @@ struct clip_vision_hparams { float eps; }; -struct clip_ctx * clip_model_load(const char * fname, const int verbosity); - -void clip_free(struct clip_ctx * ctx); +/** load mmproj model */ +CLIP_API struct clip_ctx * clip_model_load(const char * fname, const int verbosity); +/** free mmproj model */ +CLIP_API void clip_free(struct clip_ctx * ctx); -size_t clip_embd_nbytes(struct clip_ctx * ctx); -int clip_n_patches(struct clip_ctx * ctx); -int clip_n_mmproj_embd(struct clip_ctx * ctx); +size_t clip_embd_nbytes(const struct clip_ctx * ctx); +int clip_n_patches(const struct clip_ctx * ctx); +int clip_n_mmproj_embd(const struct clip_ctx * ctx); // RGB uint8 image struct clip_image_u8 { int nx; int ny; - uint8_t * data; + uint8_t * data = NULL; size_t size; }; @@ -41,7 +57,7 @@ struct clip_image_u8 { struct clip_image_f32 { int nx; int ny; - float * data; + float * data = NULL; size_t size; }; @@ -57,7 +73,12 @@ struct clip_image_f32_batch { struct clip_image_u8 * make_clip_image_u8(); struct clip_image_f32 * make_clip_image_f32(); -bool clip_image_load_from_file(const char * fname, struct clip_image_u8 * img); +CLIP_API void clip_image_u8_free(clip_image_u8 * img); +CLIP_API void clip_image_f32_free(clip_image_f32 * img); +CLIP_API bool clip_image_load_from_file(const char * fname, struct clip_image_u8 * img); +/** interpret bytes as an image file with length bytes_length, and use the result to populate img */ +CLIP_API bool clip_image_load_from_bytes(const unsigned char * bytes, size_t bytes_length, struct clip_image_u8 * img); + bool clip_image_preprocess(const struct clip_ctx * ctx, const struct clip_image_u8 * img, struct clip_image_f32 * res, const bool pad2square); bool clip_image_encode(const struct clip_ctx * ctx, const int n_threads, struct clip_image_f32 * img, float * vec); diff --git a/examples/llava/llava-cli.cpp b/examples/llava/llava-cli.cpp new file mode 100644 index 00000000000000..19374c67ff6c59 --- /dev/null +++ b/examples/llava/llava-cli.cpp @@ -0,0 +1,315 @@ +#include "ggml.h" +#include "common.h" +#include "clip.h" +#include "llava.h" +#include "llama.h" + +#include "base64.hpp" + +#include +#include +#include + +static bool eval_tokens(struct llama_context * ctx_llama, std::vector tokens, int n_batch, int * n_past) { + int N = (int) tokens.size(); + for (int i = 0; i < N; i += n_batch) { + int n_eval = (int) tokens.size() - i; + if (n_eval > n_batch) { + n_eval = n_batch; + } + if (llama_decode(ctx_llama, llama_batch_get_one(&tokens[i], n_eval, *n_past, 0))) { + fprintf(stderr, "%s : failed to eval. token %d/%d (batch size %d, n_past %d)\n", __func__, i, N, n_batch, *n_past); + return false; + } + *n_past += n_eval; + } + return true; +} + +static bool eval_id(struct llama_context * ctx_llama, int id, int * n_past) { + std::vector tokens; + tokens.push_back(id); + return eval_tokens(ctx_llama, tokens, 1, n_past); +} + +static bool eval_string(struct llama_context * ctx_llama, const char* str, int n_batch, int * n_past, bool add_bos){ + std::string str2 = str; + std::vector embd_inp = ::llama_tokenize(ctx_llama, str2, add_bos); + eval_tokens(ctx_llama, embd_inp, n_batch, n_past); + return true; +} + +// TODO: use common/sampling.h +static llama_token sample_id(llama_context * ctx_llama, gpt_params & params) { + auto & sparams = params.sparams; + + // out of user input, sample next token + const float temp = sparams.temp; + const int32_t top_k = sparams.top_k <= 0 ? llama_n_vocab(llama_get_model(ctx_llama)) : sparams.top_k; + const float top_p = sparams.top_p; + const float tfs_z = sparams.tfs_z; + const float typical_p = sparams.typical_p; + // const int32_t repeat_last_n = sparams.repeat_last_n < 0 ? n_ctx : sparams.repeat_last_n; + // const float repeat_penalty = sparams.repeat_penalty; + // const float alpha_presence = sparams.presence_penalty; + // const float alpha_frequency = sparams.frequency_penalty; + const int mirostat = sparams.mirostat; + const float mirostat_tau = sparams.mirostat_tau; + const float mirostat_eta = sparams.mirostat_eta; + // const bool penalize_nl = sparams.penalize_nl; + + llama_token id = 0; + { + auto logits = llama_get_logits(ctx_llama); + auto n_vocab = llama_n_vocab(llama_get_model(ctx_llama)); + + // Apply params.logit_bias map + for (auto it = sparams.logit_bias.begin(); it != sparams.logit_bias.end(); it++) { + logits[it->first] += it->second; + } + + std::vector candidates; + candidates.reserve(n_vocab); + for (llama_token token_id = 0; token_id < n_vocab; token_id++) { + candidates.emplace_back(llama_token_data{token_id, logits[token_id], 0.0f}); + } + + llama_token_data_array candidates_p = { candidates.data(), candidates.size(), false }; + + if (temp <= 0) { + // Greedy sampling + id = llama_sample_token_greedy(ctx_llama, &candidates_p); + } else { + if (mirostat == 1) { + static float mirostat_mu = 2.0f * mirostat_tau; + const int mirostat_m = 100; + llama_sample_temp(ctx_llama, &candidates_p, temp); + id = llama_sample_token_mirostat(ctx_llama, &candidates_p, mirostat_tau, mirostat_eta, mirostat_m, &mirostat_mu); + } else if (mirostat == 2) { + static float mirostat_mu = 2.0f * mirostat_tau; + llama_sample_temp(ctx_llama, &candidates_p, temp); + id = llama_sample_token_mirostat_v2(ctx_llama, &candidates_p, mirostat_tau, mirostat_eta, &mirostat_mu); + } else { + // Temperature sampling + llama_sample_top_k(ctx_llama, &candidates_p, top_k, 1); + llama_sample_tail_free(ctx_llama, &candidates_p, tfs_z, 1); + llama_sample_typical(ctx_llama, &candidates_p, typical_p, 1); + llama_sample_top_p(ctx_llama, &candidates_p, top_p, 1); + llama_sample_temp(ctx_llama, &candidates_p, temp); + id = llama_sample_token(ctx_llama, &candidates_p); + } + } + } + + return id; +} + +static const char * sample(struct llama_context * ctx_llama, gpt_params & params, int * n_past) { + int id = sample_id(ctx_llama, params); + static std::string ret; + if (id == llama_token_eos(llama_get_model(ctx_llama))) { + ret = ""; + } else { + ret = llama_token_to_piece(ctx_llama, id); + } + eval_id(ctx_llama, id, n_past); + return ret.c_str(); +} + +static const char* IMG_BASE64_TAG_BEGIN = ""; + +static void find_image_tag_in_prompt(const std::string& prompt, size_t& begin_out, size_t& end_out) { + begin_out = prompt.find(IMG_BASE64_TAG_BEGIN); + end_out = prompt.find(IMG_BASE64_TAG_END, (begin_out == std::string::npos) ? 0UL : begin_out); +} + +static bool prompt_contains_image(const std::string& prompt) { + size_t begin, end; + find_image_tag_in_prompt(prompt, begin, end); + return (begin != std::string::npos); +} + +// replaces the base64 image tag in the prompt with `replacement` +static llava_image_embed * llava_image_embed_make_with_prompt_base64(struct clip_ctx * ctx_clip, int n_threads, const std::string& prompt) { + size_t img_base64_str_start, img_base64_str_end; + find_image_tag_in_prompt(prompt, img_base64_str_start, img_base64_str_end); + if (img_base64_str_start == std::string::npos || img_base64_str_end == std::string::npos) { + fprintf(stderr, "%s: invalid base64 image tag. must be %s%s\n", __func__, IMG_BASE64_TAG_BEGIN, IMG_BASE64_TAG_END); + return NULL; + } + + auto base64_bytes_start = img_base64_str_start + strlen(IMG_BASE64_TAG_BEGIN); + auto base64_bytes_count = img_base64_str_end - base64_bytes_start; + auto base64_str = prompt.substr(base64_bytes_start, base64_bytes_count ); + + auto required_bytes = base64::required_encode_size(base64_str.size()); + auto img_bytes = std::vector(required_bytes); + base64::decode(base64_str.begin(), base64_str.end(), img_bytes.begin()); + + auto embed = llava_image_embed_make_with_bytes(ctx_clip, n_threads, img_bytes.data(), img_bytes.size()); + if (!embed) { + fprintf(stderr, "%s: could not load image from base64 string.\n", __func__); + return NULL; + } + + return embed; +} + +static std::string remove_image_from_prompt(const std::string& prompt, const char * replacement = "") { + size_t begin, end; + find_image_tag_in_prompt(prompt, begin, end); + if (begin == std::string::npos || end == std::string::npos) { + return prompt; + } + auto pre = prompt.substr(0, begin); + auto post = prompt.substr(end + strlen(IMG_BASE64_TAG_END)); + return pre + replacement + post; +} + +struct llava_context { + struct clip_ctx * ctx_clip = NULL; + struct llama_context * ctx_llama = NULL; + struct llama_model * model = NULL; +}; + +static void show_additional_info(int /*argc*/, char ** argv) { + printf("\n example usage: %s -m --mmproj --image [--temp 0.1] [-p \"describe the image in detail.\"]\n", argv[0]); + printf(" note: a lower temperature value like 0.1 is recommended for better quality.\n"); +} + +static struct llava_image_embed * load_image(llava_context * ctx_llava, gpt_params * params) { + + // load and preprocess the image + llava_image_embed * embed = NULL; + auto prompt = params->prompt; + if (prompt_contains_image(prompt)) { + if (!params->image.empty()) { + printf("using base64 encoded image instead of command line image path\n"); + } + embed = llava_image_embed_make_with_prompt_base64(ctx_llava->ctx_clip, params->n_threads, prompt); + if (!embed) { + fprintf(stderr, "%s: can't load image from prompt\n", __func__); + return NULL; + } + params->prompt = remove_image_from_prompt(prompt); + } else { + embed = llava_image_embed_make_with_filename(ctx_llava->ctx_clip, params->n_threads, params->image.c_str()); + if (!embed) { + fprintf(stderr, "%s: is %s really an image file?\n", __func__, params->image.c_str()); + return NULL; + } + } + + return embed; +} + +static void process_prompt(struct llava_context * ctx_llava, struct llava_image_embed * image_embed, gpt_params * params, const std::string & prompt) { + int n_past = 0; + + const int max_tgt_len = params->n_predict < 0 ? 256 : params->n_predict; + + // llava chat format is "\nUSER:\n\nASSISTANT:" + eval_string(ctx_llava->ctx_llama, "A chat between a curious human and an artificial intelligence assistant. The assistant gives helpful, detailed, and polite answers to the human's questions.\nUSER:", params->n_batch, &n_past, true); + llava_eval_image_embed(ctx_llava->ctx_llama, image_embed, params->n_batch, &n_past); + eval_string(ctx_llava->ctx_llama, (prompt + "\nASSISTANT:").c_str(), params->n_batch, &n_past, false); + + // generate the response + + printf("\n"); + + for (int i = 0; i < max_tgt_len; i++) { + const char * tmp = sample(ctx_llava->ctx_llama, *params, &n_past); + if (strcmp(tmp, "") == 0) break; + + printf("%s", tmp); + fflush(stdout); + } + + printf("\n"); +} + + +static struct llava_context * llava_init(gpt_params * params) { + const char * clip_path = params->mmproj.c_str(); + + auto prompt = params->prompt; + if (prompt.empty()) { + prompt = "describe the image in detail."; + } + + auto ctx_clip = clip_model_load(clip_path, /*verbosity=*/ 1); + + llama_backend_init(params->numa); + + llama_model_params model_params = llama_model_default_params(); + llama_model * model = llama_load_model_from_file(params->model.c_str(), model_params); + if (model == NULL) { + fprintf(stderr , "%s: error: unable to load model\n" , __func__); + return NULL; + } + + llama_context_params ctx_params = llama_context_default_params(); + + ctx_params.n_ctx = params->n_ctx < 2048 ? 2048 : params->n_ctx; // we need a longer context size to process image embeddings + ctx_params.n_threads = params->n_threads; + ctx_params.n_threads_batch = params->n_threads_batch == -1 ? params->n_threads : params->n_threads_batch; + + llama_context * ctx_llama = llama_new_context_with_model(model, ctx_params); + + if (ctx_llama == NULL) { + fprintf(stderr , "%s: error: failed to create the llama_context\n" , __func__); + return NULL; + } + + auto ctx_llava = (struct llava_context *)malloc(sizeof(llava_context)); + + ctx_llava->ctx_llama = ctx_llama; + ctx_llava->ctx_clip = ctx_clip; + ctx_llava->model = model; + return ctx_llava; +} + +static void llava_free(struct llava_context * ctx_llava) { + if (ctx_llava->ctx_clip) { + clip_free(ctx_llava->ctx_clip); + ctx_llava->ctx_clip = NULL; + } + + llama_free(ctx_llava->ctx_llama); + llama_free_model(ctx_llava->model); + llama_backend_free(); +} + +int main(int argc, char ** argv) { + ggml_time_init(); + + gpt_params params; + + if (!gpt_params_parse(argc, argv, params)) { + show_additional_info(argc, argv); + return 1; + } + if (params.mmproj.empty() || (params.image.empty() && !prompt_contains_image(params.prompt))) { + gpt_print_usage(argc, argv, params); + show_additional_info(argc, argv); + return 1; + } + + auto ctx_llava = llava_init(¶ms); + if (ctx_llava == NULL) { + fprintf(stderr, "%s: error: failed to init llava\n", __func__); + return 1; + } + + auto image_embed = load_image(ctx_llava, ¶ms); + + // process the prompt + process_prompt(ctx_llava, image_embed, ¶ms, params.prompt); + + llama_print_timings(ctx_llava->ctx_llama); + + llava_image_embed_free(image_embed); + llava_free(ctx_llava); + return 0; +} diff --git a/examples/llava/llava-utils.h b/examples/llava/llava-utils.h deleted file mode 100644 index 320c719670b02d..00000000000000 --- a/examples/llava/llava-utils.h +++ /dev/null @@ -1,147 +0,0 @@ -#pragma once - -// this one and clip lib will be eventually merged to a single lib, let's keep it this way for now - -#include "common.h" -#include "llama.h" - -#include -#include -#include - -inline bool eval_image_embd(llama_context * ctx_llama, float * embd, int N, int n_batch, int * n_past) { - int n_embd = llama_n_embd(llama_get_model(ctx_llama)); - - for (int i = 0; i < N; i += n_batch) { - int n_eval = N - i; - if (n_eval > n_batch) { - n_eval = n_batch; - } - llama_batch batch = {int32_t(n_eval), nullptr, (embd+i*n_embd), nullptr, nullptr, nullptr, nullptr, *n_past, 1, 0, }; - if (llama_decode(ctx_llama, batch)) { - fprintf(stderr, "%s : failed to eval\n", __func__); - return false; - } - *n_past += n_eval; - } - return true; -} - -inline bool eval_tokens(struct llama_context * ctx_llama, std::vector tokens, int n_batch, int * n_past) { - int N = (int) tokens.size(); - for (int i = 0; i < N; i += n_batch) { - int n_eval = (int) tokens.size() - i; - if (n_eval > n_batch) { - n_eval = n_batch; - } - if (llama_decode(ctx_llama, llama_batch_get_one(&tokens[i], n_eval, *n_past, 0))) { - fprintf(stderr, "%s : failed to eval\n", __func__); - return false; - } - *n_past += n_eval; - } - return true; -} - -inline bool eval_id(struct llama_context * ctx_llama, int id, int * n_past) { - std::vector tokens; - tokens.push_back(id); - return eval_tokens(ctx_llama, tokens, 1, n_past); -} - -inline bool eval_string(struct llama_context * ctx_llama, const char* str, int n_batch, int * n_past, bool add_bos){ - std::string str2 = str; - std::vector embd_inp = ::llama_tokenize(ctx_llama, str2, add_bos); - eval_tokens(ctx_llama, embd_inp, n_batch, n_past); - return true; -} - -// TODO: use common/sampling.h -inline llama_token sample_id(llama_context * ctx_llama, gpt_params & params) { - auto & sparams = params.sparams; - - // out of user input, sample next token - const float temp = sparams.temp; - const int32_t top_k = sparams.top_k <= 0 ? llama_n_vocab(llama_get_model(ctx_llama)) : sparams.top_k; - const float top_p = sparams.top_p; - const float tfs_z = sparams.tfs_z; - const float typical_p = sparams.typical_p; - // const int32_t repeat_last_n = sparams.repeat_last_n < 0 ? n_ctx : sparams.repeat_last_n; - // const float repeat_penalty = sparams.repeat_penalty; - // const float alpha_presence = sparams.presence_penalty; - // const float alpha_frequency = sparams.frequency_penalty; - const int mirostat = sparams.mirostat; - const float mirostat_tau = sparams.mirostat_tau; - const float mirostat_eta = sparams.mirostat_eta; - // const bool penalize_nl = sparams.penalize_nl; - - llama_token id = 0; - { - auto logits = llama_get_logits(ctx_llama); - auto n_vocab = llama_n_vocab(llama_get_model(ctx_llama)); - - // Apply params.logit_bias map - for (auto it = sparams.logit_bias.begin(); it != sparams.logit_bias.end(); it++) { - logits[it->first] += it->second; - } - - std::vector candidates; - candidates.reserve(n_vocab); - for (llama_token token_id = 0; token_id < n_vocab; token_id++) { - candidates.emplace_back(llama_token_data{token_id, logits[token_id], 0.0f}); - } - - llama_token_data_array candidates_p = { candidates.data(), candidates.size(), false }; - - // TODO: Apply penalties - // float nl_logit = logits[llama_token_nl(ctx)]; - // auto last_n_repeat = std::min(std::min((int)last_n_tokens.size(), repeat_last_n), n_ctx); - // llama_sample_repetition_penalty(ctx, &candidates_p, - // last_n_tokens.data() + last_n_tokens.size() - last_n_repeat, - // last_n_repeat, repeat_penalty); - // llama_sample_frequency_and_presence_penalties(ctx, &candidates_p, - // last_n_tokens.data() + last_n_tokens.size() - last_n_repeat, - // last_n_repeat, alpha_frequency, alpha_presence); - // if (!penalize_nl) { - // logits[llama_token_nl(ctx)] = nl_logit; - // } - - if (temp <= 0) { - // Greedy sampling - id = llama_sample_token_greedy(ctx_llama, &candidates_p); - } else { - if (mirostat == 1) { - static float mirostat_mu = 2.0f * mirostat_tau; - const int mirostat_m = 100; - llama_sample_temp(ctx_llama, &candidates_p, temp); - id = llama_sample_token_mirostat(ctx_llama, &candidates_p, mirostat_tau, mirostat_eta, mirostat_m, &mirostat_mu); - } else if (mirostat == 2) { - static float mirostat_mu = 2.0f * mirostat_tau; - llama_sample_temp(ctx_llama, &candidates_p, temp); - id = llama_sample_token_mirostat_v2(ctx_llama, &candidates_p, mirostat_tau, mirostat_eta, &mirostat_mu); - } else { - // Temperature sampling - llama_sample_top_k(ctx_llama, &candidates_p, top_k, 1); - llama_sample_tail_free(ctx_llama, &candidates_p, tfs_z, 1); - llama_sample_typical(ctx_llama, &candidates_p, typical_p, 1); - llama_sample_top_p(ctx_llama, &candidates_p, top_p, 1); - llama_sample_temp(ctx_llama, &candidates_p, temp); - id = llama_sample_token(ctx_llama, &candidates_p); - } - } - } - - return id; -} - -inline const char * sample(struct llama_context * ctx_llama, gpt_params & params, int * n_past) { - int id = sample_id(ctx_llama, params); - static std::string ret; - if (id == llama_token_eos(llama_get_model(ctx_llama))) { - ret = ""; - } else { - ret = llama_token_to_piece(ctx_llama, id); - } - eval_id(ctx_llama, id, n_past); - return ret.c_str(); -} diff --git a/examples/llava/llava.cpp b/examples/llava/llava.cpp index f0974d5bcf452a..d10bcf2d224657 100644 --- a/examples/llava/llava.cpp +++ b/examples/llava/llava.cpp @@ -1,164 +1,156 @@ #include "clip.h" -#include "llava-utils.h" #include "common.h" #include "llama.h" +#include "llava.h" #include #include #include -static void show_additional_info(int /*argc*/, char ** argv) { - printf("\n example usage: %s -m --mmproj --image [--temp 0.1] [-p \"describe the image in detail.\"]\n", argv[0]); - printf(" note: a lower temperature value like 0.1 is recommended for better quality.\n"); -} - -int main(int argc, char ** argv) { - ggml_time_init(); - - gpt_params params; +#include "base64.hpp" - if (!gpt_params_parse(argc, argv, params)) { - show_additional_info(argc, argv); - return 1; +static bool encode_image_with_clip(clip_ctx * ctx_clip, int n_threads, const clip_image_u8 * img, float * image_embd, int * n_img_pos) { + clip_image_f32 * img_res = make_clip_image_f32(); + if (!clip_image_preprocess(ctx_clip, img, img_res, /*pad2square =*/ true)) { + fprintf(stderr, "%s: unable to preprocess image\n", __func__); + clip_image_f32_free(img_res); + return false; } - if (params.mmproj.empty() || params.image.empty()) { - gpt_print_usage(argc, argv, params); - show_additional_info(argc, argv); - return 1; - } + *n_img_pos = clip_n_patches(ctx_clip); - const char * clip_path = params.mmproj.c_str(); - const char * img_path = params.image.c_str(); + const int64_t t_img_enc_start_us = ggml_time_us(); + bool encoded = clip_image_encode(ctx_clip, n_threads, img_res, image_embd); + clip_image_f32_free(img_res); + if (!encoded) { + fprintf(stderr, "Unable to encode image\n"); - if (params.prompt.empty()) { - params.prompt = "describe the image in detail."; + return false; } - auto ctx_clip = clip_model_load(clip_path, /*verbosity=*/ 1); - - // load and preprocess the image - clip_image_u8 img; - clip_image_f32 img_res; + const int64_t t_img_enc_end_us = ggml_time_us(); + float t_img_enc_ms = (t_img_enc_end_us - t_img_enc_start_us) / 1000.0; - if (!clip_image_load_from_file(img_path, &img)) { - fprintf(stderr, "%s: is %s really an image file?\n", __func__, img_path); + printf("\n%s: image encoded in %8.2f ms by CLIP (%8.2f ms per image patch)\n", __func__, t_img_enc_ms, t_img_enc_ms / *n_img_pos); - clip_free(ctx_clip); - return 1; - } - - if (!clip_image_preprocess(ctx_clip, &img, &img_res, /*pad2square =*/ true)) { - fprintf(stderr, "%s: unable to preprocess %s\n", __func__, img_path); + return true; +} - clip_free(ctx_clip); - return 1; +bool llava_validate_embed_size(const llama_context * ctx_llama, const clip_ctx * ctx_clip) { + // make sure that the correct mmproj was used, i.e., compare apples to apples + int n_llama_embd = llama_n_embd(llama_get_model(ctx_llama)); + auto n_image_embd = clip_n_mmproj_embd(ctx_clip); + if (n_image_embd != n_llama_embd) { + printf("%s: embedding dim of the multimodal projector (%d) is not equal to that of LLaMA (%d). Make sure that you use the correct mmproj file.\n", __func__, n_image_embd, n_llama_embd); + return false; } + return true; +} - int n_img_pos = clip_n_patches(ctx_clip); - int n_img_embd = clip_n_mmproj_embd(ctx_clip); - +static bool llava_image_embed_make_with_clip_img(clip_ctx * ctx_clip, int n_threads, const clip_image_u8 * img, float ** image_embd_out, int * n_img_pos_out) { float * image_embd = (float *)malloc(clip_embd_nbytes(ctx_clip)); - if (!image_embd) { fprintf(stderr, "Unable to allocate memory for image embeddings\n"); - - return 1; + free(image_embd); + return false; } - const int64_t t_img_enc_start_us = ggml_time_us(); - if (!clip_image_encode(ctx_clip, params.n_threads, &img_res, image_embd)) { - fprintf(stderr, "Unable to encode image\n"); - - return 1; + int n_img_pos; + if (!encode_image_with_clip(ctx_clip, n_threads, img, image_embd, &n_img_pos)) { + fprintf(stderr, "%s: cannot encode image, aborting\n", __func__); + free(image_embd); + return false; } - const int64_t t_img_enc_end_us = ggml_time_us(); + *image_embd_out = image_embd; + *n_img_pos_out = n_img_pos; - // we get the embeddings, free up the memory required for CLIP - clip_free(ctx_clip); - - llama_backend_init(params.numa); - - llama_model_params model_params = llama_model_default_params(); - model_params.n_gpu_layers = params.n_gpu_layers; - model_params.main_gpu = params.main_gpu; - model_params.tensor_split = params.tensor_split; - model_params.use_mmap = params.use_mmap; - model_params.use_mlock = params.use_mlock; + return true; +} - llama_model * model = llama_load_model_from_file(params.model.c_str(), model_params); - if (model == NULL) { - fprintf(stderr , "%s: error: unable to load model\n" , __func__); - return 1; +bool llava_eval_image_embed(llama_context * ctx_llama, const struct llava_image_embed * image_embed, int n_batch, int * n_past) { + int n_embd = llama_n_embd(llama_get_model(ctx_llama)); + + for (int i = 0; i < image_embed->n_image_pos; i += n_batch) { + int n_eval = image_embed->n_image_pos - i; + if (n_eval > n_batch) { + n_eval = n_batch; + } + llama_batch batch = {int32_t(n_eval), nullptr, (image_embed->embed+i*n_embd), nullptr, nullptr, nullptr, nullptr, *n_past, 1, 0, }; + if (llama_decode(ctx_llama, batch)) { + fprintf(stderr, "%s : failed to eval\n", __func__); + return false; + } + *n_past += n_eval; } + return true; +} - llama_context_params ctx_params = llama_context_default_params(); - - ctx_params.n_ctx = params.n_ctx < 2048 ? 2048 : params.n_ctx; // we need a longer context size to process image embeddings - ctx_params.n_threads = params.n_threads; - ctx_params.n_threads_batch = params.n_threads_batch == -1 ? params.n_threads : params.n_threads_batch; - ctx_params.seed = params.seed; - - llama_context * ctx_llama = llama_new_context_with_model(model, ctx_params); - - if (ctx_llama == NULL) { - fprintf(stderr , "%s: error: failed to create the llama_context\n" , __func__); - return 1; +LLAVA_API struct llava_image_embed * llava_image_embed_make_with_bytes(struct clip_ctx * ctx_clip, int n_threads, const unsigned char * image_bytes, int image_bytes_length) { + clip_image_u8 * img = make_clip_image_u8(); + if (!clip_image_load_from_bytes(image_bytes, image_bytes_length, img)) { + clip_image_u8_free(img); + fprintf(stderr, "%s: can't load image from bytes, is it a valid image?", __func__); + return NULL; } - // make sure that the correct mmproj was used, i.e., compare apples to apples - const int n_llama_embd = llama_n_embd(llama_get_model(ctx_llama)); - - if (n_img_embd != n_llama_embd) { - printf("%s: embedding dim of the multimodal projector (%d) is not equal to that of LLaMA (%d). Make sure that you use the correct mmproj file.\n", __func__, n_img_embd, n_llama_embd); - - llama_free(ctx_llama); - llama_free_model(model); - llama_backend_free(); - free(image_embd); - - return 1; + float* image_embed = NULL; + int n_image_pos = 0; + bool image_embed_result = llava_image_embed_make_with_clip_img(ctx_clip, n_threads, img, &image_embed, &n_image_pos); + if (!image_embed_result) { + clip_image_u8_free(img); + fprintf(stderr, "%s: coulnd't embed the image\n", __func__); + return NULL; } - // process the prompt - // llava chat format is "USER: \n\nASSISTANT:" - - int n_past = 0; - - const int max_tgt_len = params.n_predict < 0 ? 256 : params.n_predict; - - eval_string(ctx_llama, "A chat between a curious human and an artificial intelligence assistant. The assistant gives helpful, detailed, and polite answers to the human's questions.\nUSER:", params.n_batch, &n_past, true); - eval_image_embd(ctx_llama, image_embd, n_img_pos, params.n_batch, &n_past); - eval_string(ctx_llama, (params.prompt + "\nASSISTANT:").c_str(), params.n_batch, &n_past, false); - - // generate the response + clip_image_u8_free(img); + auto result = (llava_image_embed*)malloc(sizeof(llava_image_embed)); + result->embed = image_embed; + result->n_image_pos = n_image_pos; + return result; +} - printf("\n"); - printf("prompt: '%s'\n", params.prompt.c_str()); - printf("\n"); +static bool load_file_to_bytes(const char* path, unsigned char** bytesOut, long *sizeOut) { + auto file = fopen(path, "rb"); + if (file == NULL) { + fprintf(stderr, "%s: can't read file %s\n", __func__, path); + return false; + } - for (int i = 0; i < max_tgt_len; i++) { - const char * tmp = sample(ctx_llama, params, &n_past); - if (strcmp(tmp, "") == 0) break; + fseek(file, 0, SEEK_END); + auto fileSize = ftell(file); + fseek(file, 0, SEEK_SET); - printf("%s", tmp); - fflush(stdout); + auto buffer = (unsigned char *)malloc(fileSize); // Allocate memory to hold the file data + if (buffer == NULL) { + fprintf(stderr, "%s: failed to alloc %ld bytes for file %s\n", __func__, fileSize, path); + perror("Memory allocation error"); + fclose(file); + return false; } + fread(buffer, 1, fileSize, file); // Read the file into the buffer + fclose(file); // Close the file - printf("\n"); - - { - const float t_img_enc_ms = (t_img_enc_end_us - t_img_enc_start_us) / 1000.0; + *bytesOut = buffer; + *sizeOut = fileSize; + return true; +} - printf("\n%s: image encoded in %8.2f ms by CLIP (%8.2f ms per image patch)\n", __func__, t_img_enc_ms, t_img_enc_ms / n_img_pos); +LLAVA_API struct llava_image_embed * llava_image_embed_make_with_filename(struct clip_ctx * ctx_clip, int n_threads, const char * image_path) { + unsigned char* image_bytes; + long image_bytes_length; + auto loaded = load_file_to_bytes(image_path, &image_bytes, &image_bytes_length); + if (!loaded) { + fprintf(stderr, "%s: failed to load %s\n", __func__, image_path); + return NULL; } - llama_print_timings(ctx_llama); + auto embed = llava_image_embed_make_with_bytes(ctx_clip, n_threads, image_bytes, image_bytes_length); + free(image_bytes); - llama_free(ctx_llama); - llama_free_model(model); - llama_backend_free(); - free(image_embd); + return embed; +} - return 0; +LLAVA_API void llava_image_embed_free(struct llava_image_embed * embed) { + free(embed->embed); + free(embed); } diff --git a/examples/llava/llava.h b/examples/llava/llava.h new file mode 100644 index 00000000000000..e08ce78839dcb8 --- /dev/null +++ b/examples/llava/llava.h @@ -0,0 +1,50 @@ +#ifndef LLAVA_H +#define LLAVA_H + +#include "ggml.h" + + +#ifdef LLAMA_SHARED +# if defined(_WIN32) && !defined(__MINGW32__) +# ifdef LLAMA_BUILD +# define LLAVA_API __declspec(dllexport) +# else +# define LLAVA_API __declspec(dllimport) +# endif +# else +# define LLAVA_API __attribute__ ((visibility ("default"))) +# endif +#else +# define LLAVA_API +#endif + +struct clip_ctx; + +#ifdef __cplusplus +extern "C" { +#endif + +struct llava_image_embed { + float * embed; + int n_image_pos; +}; + +/** sanity check for clip <-> llava embed size match */ +LLAVA_API bool llava_validate_embed_size(const llama_context * ctx_llama, const clip_ctx * ctx_clip); + +/** build an image embed from image file bytes */ +LLAVA_API struct llava_image_embed * llava_image_embed_make_with_bytes(struct clip_ctx * ctx_clip, int n_threads, const unsigned char * image_bytes, int image_bytes_length); +/** build an image embed from a path to an image filename */ +LLAVA_API struct llava_image_embed * llava_image_embed_make_with_filename(struct clip_ctx * ctx_clip, int n_threads, const char * image_path); +LLAVA_API void llava_image_embed_free(struct llava_image_embed * embed); +/** free an embedding made with llava_image_embed_make_* */ + +/** write the image represented by embed into the llama context with batch size n_batch, starting at context pos n_past. on completion, n_past points to the next position in the context after the image embed. */ +LLAVA_API bool llava_eval_image_embed(struct llama_context * ctx_llama, const struct llava_image_embed * embed, int n_batch, int * n_past); + + +#ifdef __cplusplus +} +#endif + +#endif diff --git a/examples/server/CMakeLists.txt b/examples/server/CMakeLists.txt index 1f0d26f7776894..859cd12c6c6b1f 100644 --- a/examples/server/CMakeLists.txt +++ b/examples/server/CMakeLists.txt @@ -6,7 +6,7 @@ install(TARGETS ${TARGET} RUNTIME) target_compile_definitions(${TARGET} PRIVATE SERVER_VERBOSE=$ ) -target_link_libraries(${TARGET} PRIVATE common llama clip ${CMAKE_THREAD_LIBS_INIT}) +target_link_libraries(${TARGET} PRIVATE common llama llava ${CMAKE_THREAD_LIBS_INIT}) if (WIN32) TARGET_LINK_LIBRARIES(${TARGET} PRIVATE ws2_32) endif() From 46876d2a2c92e60579dc732cdb8cbd243b06f317 Mon Sep 17 00:00:00 2001 From: Meng Zhang Date: Mon, 6 Nov 2023 22:49:08 -0800 Subject: [PATCH 072/206] cuda : supports running on CPU for GGML_USE_CUBLAS=ON build (#3946) * protyping the idea that supports running on CPU for a GGML_USE_CUBLAS=on build * doc: add comments to ggml_cublas_loaded() * fix defined(...) --- ggml-cuda.cu | 17 ++++- ggml-cuda.h | 5 ++ llama.cpp | 179 ++++++++++++++++++++++++++++++--------------------- 3 files changed, 126 insertions(+), 75 deletions(-) diff --git a/ggml-cuda.cu b/ggml-cuda.cu index 2d9ffffbf74966..f87f18802c8f87 100644 --- a/ggml-cuda.cu +++ b/ggml-cuda.cu @@ -5790,6 +5790,11 @@ static void ggml_cuda_pool_free(void * ptr, size_t size) { CUDA_CHECK(cudaFree(ptr)); } +static bool g_cublas_loaded = false; + +bool ggml_cublas_loaded(void) { + return g_cublas_loaded; +} void ggml_init_cublas() { static bool initialized = false; @@ -5803,7 +5808,12 @@ void ggml_init_cublas() { CUDA_CHECK(cudaDeviceSynchronize()); #endif - CUDA_CHECK(cudaGetDeviceCount(&g_device_count)); + if (cudaGetDeviceCount(&g_device_count) != cudaSuccess) { + initialized = true; + g_cublas_loaded = false; + return; + } + GGML_ASSERT(g_device_count <= GGML_CUDA_MAX_DEVICES); int64_t total_vram = 0; #if defined(GGML_CUDA_FORCE_MMQ) @@ -5851,6 +5861,7 @@ void ggml_init_cublas() { // CUBLAS_CHECK(cublasLoggerConfigure(1, 1, 0, nullptr)); initialized = true; + g_cublas_loaded = true; } } @@ -7158,6 +7169,8 @@ static void ggml_cuda_rms_norm(const ggml_tensor * src0, const ggml_tensor * src } bool ggml_cuda_can_mul_mat(const struct ggml_tensor * src0, const struct ggml_tensor * src1, struct ggml_tensor * dst) { + if (!g_cublas_loaded) return false; + const int64_t ne10 = src1->ne[0]; const int64_t ne0 = dst->ne[0]; @@ -7843,6 +7856,8 @@ void ggml_cuda_free_scratch() { } bool ggml_cuda_compute_forward(struct ggml_compute_params * params, struct ggml_tensor * tensor) { + if (!g_cublas_loaded) return false; + ggml_cuda_func_t func; const bool any_on_device = tensor->backend == GGML_BACKEND_GPU || (tensor->src[0] != nullptr && (tensor->src[0]->backend == GGML_BACKEND_GPU || tensor->src[0]->backend == GGML_BACKEND_GPU_SPLIT)) diff --git a/ggml-cuda.h b/ggml-cuda.h index 57adc9cf34bc5b..528e66c33a2073 100644 --- a/ggml-cuda.h +++ b/ggml-cuda.h @@ -17,7 +17,12 @@ extern "C" { #define GGML_CUDA_MAX_DEVICES 16 +// Always success. To check if CUDA is actually loaded, use `ggml_cublas_loaded`. GGML_API void ggml_init_cublas(void); + +// Returns `true` if there are available CUDA devices and cublas loads successfully; otherwise, it returns `false`. +GGML_API bool ggml_cublas_loaded(void); + GGML_API void * ggml_cuda_host_malloc(size_t size); GGML_API void ggml_cuda_host_free(void * ptr); diff --git a/llama.cpp b/llama.cpp index e165390005c850..d220ff3e9b130c 100644 --- a/llama.cpp +++ b/llama.cpp @@ -596,19 +596,37 @@ static void ggml_graph_compute_helper(std::vector & buf, ggml_cgraph * // llama helpers // +inline void * llama_host_malloc(size_t n) { #ifdef GGML_USE_CUBLAS -# define llama_host_malloc(n) ggml_cuda_host_malloc(n) -# define llama_host_free(data) ggml_cuda_host_free(data) + if (ggml_cublas_loaded()) { + return ggml_cuda_host_malloc(n); + } else { + return malloc(n); + } #elif GGML_USE_METAL -# define llama_host_malloc(n) ggml_metal_host_malloc(n) -# define llama_host_free(data) ggml_metal_host_free(data) + return ggml_metal_host_malloc(n); #elif GGML_USE_CPU_HBM -# define llama_host_malloc(n) hbw_malloc(n) -# define llama_host_free(data) if (data != NULL) hbw_free(data) + return hbw_malloc(n); #else -# define llama_host_malloc(n) malloc(n) -# define llama_host_free(data) free(data) + return malloc(n); #endif +} + +inline void llama_host_free(void * ptr) { +#ifdef GGML_USE_CUBLAS + if (ggml_cublas_loaded()) { + return ggml_cuda_host_free(ptr); + } else { + return free(ptr); + } +#elif GGML_USE_METAL + return ggml_metal_host_free(ptr); +#elif GGML_USE_CPU_HBM + return hbw_free(ptr); +#else + return free(ptr); +#endif +} #if defined(_WIN32) static std::string llama_format_win_err(DWORD err) { @@ -1200,9 +1218,11 @@ struct llama_kv_cache { } #ifdef GGML_USE_CUBLAS - ggml_cuda_free_data(k); - ggml_cuda_free_data(v); -#endif // GGML_USE_CUBLAS + if (ggml_cublas_loaded()) { + ggml_cuda_free_data(k); + ggml_cuda_free_data(v); + } +#endif } }; @@ -1302,11 +1322,15 @@ struct llama_model { } #ifdef GGML_USE_CUBLAS - for (size_t i = 0; i < tensors_by_name.size(); ++i) { - ggml_cuda_free_data(tensors_by_name[i].second); + if (ggml_cublas_loaded()) { + for (size_t i = 0; i < tensors_by_name.size(); ++i) { + ggml_cuda_free_data(tensors_by_name[i].second); + } + ggml_cuda_free_scratch(); } - ggml_cuda_free_scratch(); -#elif defined(GGML_USE_CLBLAST) +#endif + +#if defined(GGML_USE_CLBLAST) for (size_t i = 0; i < tensors_by_name.size(); ++i) { ggml_cl_free_data(tensors_by_name[i].second); } @@ -1418,23 +1442,26 @@ static bool llama_kv_cache_init( ggml_set_name(cache.v, "cache_v"); (void) n_gpu_layers; + #ifdef GGML_USE_CUBLAS - size_t vram_kv_cache = 0; + if (ggml_cublas_loaded()) { + size_t vram_kv_cache = 0; - if (n_gpu_layers > (int)n_layer + 1) { - ggml_cuda_assign_buffers_no_scratch(cache.v); - LLAMA_LOG_INFO("%s: offloading v cache to GPU\n", __func__); - vram_kv_cache += ggml_nbytes(cache.v); - } - if (n_gpu_layers > (int)n_layer + 2) { - ggml_cuda_assign_buffers_no_scratch(cache.k); - LLAMA_LOG_INFO("%s: offloading k cache to GPU\n", __func__); - vram_kv_cache += ggml_nbytes(cache.k); - } - if (vram_kv_cache > 0) { - LLAMA_LOG_INFO("%s: VRAM kv self = %.2f MB\n", __func__, vram_kv_cache / 1024.0 / 1024.0); + if (n_gpu_layers > (int)n_layer + 1) { + ggml_cuda_assign_buffers_no_scratch(cache.v); + LLAMA_LOG_INFO("%s: offloading v cache to GPU\n", __func__); + vram_kv_cache += ggml_nbytes(cache.v); + } + if (n_gpu_layers > (int)n_layer + 2) { + ggml_cuda_assign_buffers_no_scratch(cache.k); + LLAMA_LOG_INFO("%s: offloading k cache to GPU\n", __func__); + vram_kv_cache += ggml_nbytes(cache.k); + } + if (vram_kv_cache > 0) { + LLAMA_LOG_INFO("%s: VRAM kv self = %.2f MB\n", __func__, vram_kv_cache / 1024.0 / 1024.0); + } } -#endif // GGML_USE_CUBLAS +#endif return true; } @@ -2521,18 +2548,22 @@ static void llm_load_tensors( } (void) main_gpu; + + enum ggml_backend_type llama_backend_offload = GGML_BACKEND_CPU; + enum ggml_backend_type llama_backend_offload_split = GGML_BACKEND_CPU; + #ifdef GGML_USE_CUBLAS - LLAMA_LOG_INFO("%s: using " GGML_CUDA_NAME " for GPU acceleration\n", __func__); - ggml_cuda_set_main_device(main_gpu); -#define LLAMA_BACKEND_OFFLOAD GGML_BACKEND_GPU -#define LLAMA_BACKEND_OFFLOAD_SPLIT GGML_BACKEND_GPU_SPLIT + if (ggml_cublas_loaded()) { + LLAMA_LOG_INFO("%s: using " GGML_CUDA_NAME " for GPU acceleration\n", __func__); + ggml_cuda_set_main_device(main_gpu); + + llama_backend_offload = GGML_BACKEND_GPU; + llama_backend_offload_split = GGML_BACKEND_GPU_SPLIT; + } #elif defined(GGML_USE_CLBLAST) - LLAMA_LOG_INFO("%s: using OpenCL for GPU acceleration\n", __func__); -#define LLAMA_BACKEND_OFFLOAD GGML_BACKEND_GPU -#define LLAMA_BACKEND_OFFLOAD_SPLIT GGML_BACKEND_GPU -#else -#define LLAMA_BACKEND_OFFLOAD GGML_BACKEND_CPU -#define LLAMA_BACKEND_OFFLOAD_SPLIT GGML_BACKEND_CPU + LLAMA_LOG_INFO("%s: using OpenCL for GPU acceleration\n", __func__); + llama_backend_offload = GGML_BACKEND_GPU; + llama_backend_offload_split = GGML_BACKEND_GPU; #endif // prepare memory for the weights @@ -2559,12 +2590,12 @@ static void llm_load_tensors( // norm is not performance relevant on its own but keeping it in VRAM reduces data copying // on Windows however this is detrimental unless everything is on the GPU #ifndef _WIN32 - backend_norm = LLAMA_BACKEND_OFFLOAD; + backend_norm = llama_backend_offload; #else - backend_norm = n_gpu_layers <= (int) n_layer + 2 ? GGML_BACKEND_CPU : LLAMA_BACKEND_OFFLOAD; + backend_norm = n_gpu_layers <= (int) n_layer + 2 ? GGML_BACKEND_CPU : llama_backend_offload; #endif // _WIN32 - backend_output = LLAMA_BACKEND_OFFLOAD_SPLIT; + backend_output = llama_backend_offload_split; } else { backend_norm = GGML_BACKEND_CPU; backend_output = GGML_BACKEND_CPU; @@ -2588,8 +2619,8 @@ static void llm_load_tensors( model.layers.resize(n_layer); for (uint32_t i = 0; i < n_layer; ++i) { - const ggml_backend_type backend = int(i) < i_gpu_start ? GGML_BACKEND_CPU : LLAMA_BACKEND_OFFLOAD; // NOLINT - const ggml_backend_type backend_split = int(i) < i_gpu_start ? GGML_BACKEND_CPU : LLAMA_BACKEND_OFFLOAD_SPLIT; // NOLINT + const ggml_backend_type backend = int(i) < i_gpu_start ? GGML_BACKEND_CPU : llama_backend_offload; // NOLINT + const ggml_backend_type backend_split = int(i) < i_gpu_start ? GGML_BACKEND_CPU : llama_backend_offload_split; // NOLINT auto & layer = model.layers[i]; @@ -2625,12 +2656,12 @@ static void llm_load_tensors( // norm is not performance relevant on its own but keeping it in VRAM reduces data copying // on Windows however this is detrimental unless everything is on the GPU #ifndef _WIN32 - backend_norm = LLAMA_BACKEND_OFFLOAD; + backend_norm = llama_backend_offload; #else - backend_norm = n_gpu_layers <= (int) n_layer + 2 ? GGML_BACKEND_CPU : LLAMA_BACKEND_OFFLOAD; + backend_norm = n_gpu_layers <= (int) n_layer + 2 ? GGML_BACKEND_CPU : llama_backend_offload; #endif // _WIN32 - backend_output = LLAMA_BACKEND_OFFLOAD_SPLIT; + backend_output = llama_backend_offload_split; } else { backend_norm = GGML_BACKEND_CPU; backend_output = GGML_BACKEND_CPU; @@ -2654,8 +2685,8 @@ static void llm_load_tensors( model.layers.resize(n_layer); for (uint32_t i = 0; i < n_layer; ++i) { - const ggml_backend_type backend = int(i) < i_gpu_start ? GGML_BACKEND_CPU : LLAMA_BACKEND_OFFLOAD; // NOLINT - const ggml_backend_type backend_split = int(i) < i_gpu_start ? GGML_BACKEND_CPU : LLAMA_BACKEND_OFFLOAD_SPLIT; // NOLINT + const ggml_backend_type backend = int(i) < i_gpu_start ? GGML_BACKEND_CPU : llama_backend_offload; // NOLINT + const ggml_backend_type backend_split = int(i) < i_gpu_start ? GGML_BACKEND_CPU : llama_backend_offload_split; // NOLINT auto & layer = model.layers[i]; @@ -2695,12 +2726,12 @@ static void llm_load_tensors( // norm is not performance relevant on its own but keeping it in VRAM reduces data copying // on Windows however this is detrimental unless everything is on the GPU #ifndef _WIN32 - backend_norm = LLAMA_BACKEND_OFFLOAD; + backend_norm = llama_backend_offload; #else - backend_norm = n_gpu_layers <= (int) n_layer + 2 ? GGML_BACKEND_CPU : LLAMA_BACKEND_OFFLOAD; + backend_norm = n_gpu_layers <= (int) n_layer + 2 ? GGML_BACKEND_CPU : llama_backend_offload; #endif // _WIN32 - backend_output = LLAMA_BACKEND_OFFLOAD_SPLIT; + backend_output = llama_backend_offload_split; } else { backend_norm = GGML_BACKEND_CPU; backend_output = GGML_BACKEND_CPU; @@ -2726,8 +2757,8 @@ static void llm_load_tensors( model.layers.resize(n_layer); for (uint32_t i = 0; i < n_layer; ++i) { - const ggml_backend_type backend = int(i) < i_gpu_start ? GGML_BACKEND_CPU : LLAMA_BACKEND_OFFLOAD; // NOLINT - const ggml_backend_type backend_split = int(i) < i_gpu_start ? GGML_BACKEND_CPU : LLAMA_BACKEND_OFFLOAD_SPLIT; // NOLINT + const ggml_backend_type backend = int(i) < i_gpu_start ? GGML_BACKEND_CPU : llama_backend_offload; // NOLINT + const ggml_backend_type backend_split = int(i) < i_gpu_start ? GGML_BACKEND_CPU : llama_backend_offload_split; // NOLINT auto & layer = model.layers[i]; @@ -2772,12 +2803,12 @@ static void llm_load_tensors( // norm is not performance relevant on its own but keeping it in VRAM reduces data copying // on Windows however this is detrimental unless everything is on the GPU #ifndef _WIN32 - backend_norm = LLAMA_BACKEND_OFFLOAD; + backend_norm = llama_backend_offload; #else - backend_norm = n_gpu_layers <= (int) n_layer + 2 ? GGML_BACKEND_CPU : LLAMA_BACKEND_OFFLOAD; + backend_norm = n_gpu_layers <= (int) n_layer + 2 ? GGML_BACKEND_CPU : llama_backend_offload; #endif // _WIN32 - backend_output = LLAMA_BACKEND_OFFLOAD_SPLIT; + backend_output = llama_backend_offload_split; } else { backend_norm = GGML_BACKEND_CPU; backend_output = GGML_BACKEND_CPU; @@ -2803,8 +2834,8 @@ static void llm_load_tensors( model.layers.resize(n_layer); for (uint32_t i = 0; i < n_layer; ++i) { - const ggml_backend_type backend = int(i) < i_gpu_start ? GGML_BACKEND_CPU : LLAMA_BACKEND_OFFLOAD; // NOLINT - const ggml_backend_type backend_split = int(i) < i_gpu_start ? GGML_BACKEND_CPU : LLAMA_BACKEND_OFFLOAD_SPLIT; // NOLINT + const ggml_backend_type backend = int(i) < i_gpu_start ? GGML_BACKEND_CPU : llama_backend_offload; // NOLINT + const ggml_backend_type backend_split = int(i) < i_gpu_start ? GGML_BACKEND_CPU : llama_backend_offload_split; // NOLINT auto & layer = model.layers[i]; @@ -2849,12 +2880,12 @@ static void llm_load_tensors( // norm is not performance relevant on its own but keeping it in VRAM reduces data copying // on Windows however this is detrimental unless everything is on the GPU #ifndef _WIN32 - backend_norm = LLAMA_BACKEND_OFFLOAD; + backend_norm = llama_backend_offload; #else - backend_norm = n_gpu_layers <= (int) n_layer + 2 ? GGML_BACKEND_CPU : LLAMA_BACKEND_OFFLOAD; + backend_norm = n_gpu_layers <= (int) n_layer + 2 ? GGML_BACKEND_CPU : llama_backend_offload; #endif // _WIN32 - backend_output = LLAMA_BACKEND_OFFLOAD_SPLIT; + backend_output = llama_backend_offload_split; } else { backend_norm = GGML_BACKEND_CPU; backend_output = GGML_BACKEND_CPU; @@ -2877,8 +2908,8 @@ static void llm_load_tensors( const int i_gpu_start = n_layer - n_gpu_layers; model.layers.resize(n_layer); for (uint32_t i = 0; i < n_layer; ++i) { - const ggml_backend_type backend = int(i) < i_gpu_start ? GGML_BACKEND_CPU : LLAMA_BACKEND_OFFLOAD; - const ggml_backend_type backend_split = int(i) < i_gpu_start ? GGML_BACKEND_CPU : LLAMA_BACKEND_OFFLOAD_SPLIT; + const ggml_backend_type backend = int(i) < i_gpu_start ? GGML_BACKEND_CPU : llama_backend_offload; + const ggml_backend_type backend_split = int(i) < i_gpu_start ? GGML_BACKEND_CPU : llama_backend_offload_split; auto & layer = model.layers[i]; layer.attn_norm = ml.create_tensor(ctx, tn(LLM_TENSOR_ATTN_NORM, "weight", i), {n_embd}, backend); layer.attn_norm_b = ml.create_tensor(ctx, tn(LLM_TENSOR_ATTN_NORM, "bias", i), {n_embd}, backend); @@ -2915,12 +2946,12 @@ static void llm_load_tensors( // norm is not performance relevant on its own but keeping it in VRAM reduces data copying // on Windows however this is detrimental unless everything is on the GPU #ifndef _WIN32 - backend_norm = LLAMA_BACKEND_OFFLOAD; + backend_norm = llama_backend_offload; #else - backend_norm = n_gpu_layers <= (int) n_layer + 2 ? GGML_BACKEND_CPU : LLAMA_BACKEND_OFFLOAD; + backend_norm = n_gpu_layers <= (int) n_layer + 2 ? GGML_BACKEND_CPU : llama_backend_offload; #endif // _WIN32 - backend_output = LLAMA_BACKEND_OFFLOAD_SPLIT; + backend_output = llama_backend_offload_split; } else { backend_norm = GGML_BACKEND_CPU; backend_output = GGML_BACKEND_CPU; @@ -2946,8 +2977,8 @@ static void llm_load_tensors( model.layers.resize(n_layer); for (uint32_t i = 0; i < n_layer; ++i) { - const ggml_backend_type backend = int(i) < i_gpu_start ? GGML_BACKEND_CPU : LLAMA_BACKEND_OFFLOAD; // NOLINT - const ggml_backend_type backend_split = int(i) < i_gpu_start ? GGML_BACKEND_CPU : LLAMA_BACKEND_OFFLOAD_SPLIT; // NOLINT + const ggml_backend_type backend = int(i) < i_gpu_start ? GGML_BACKEND_CPU : llama_backend_offload; // NOLINT + const ggml_backend_type backend_split = int(i) < i_gpu_start ? GGML_BACKEND_CPU : llama_backend_offload_split; // NOLINT auto & layer = model.layers[i]; @@ -2993,12 +3024,12 @@ static void llm_load_tensors( // norm is not performance relevant on its own but keeping it in VRAM reduces data copying // on Windows however this is detrimental unless everything is on the GPU #ifndef _WIN32 - backend_norm = LLAMA_BACKEND_OFFLOAD; + backend_norm = llama_backend_offload; #else - backend_norm = n_gpu_layers <= (int) n_layer + 2 ? GGML_BACKEND_CPU : LLAMA_BACKEND_OFFLOAD; + backend_norm = n_gpu_layers <= (int) n_layer + 2 ? GGML_BACKEND_CPU : llama_backend_offload; #endif // _WIN32 - backend_output = LLAMA_BACKEND_OFFLOAD_SPLIT; + backend_output = llama_backend_offload_split; } else { backend_norm = GGML_BACKEND_CPU; backend_output = GGML_BACKEND_CPU; @@ -3022,8 +3053,8 @@ static void llm_load_tensors( model.layers.resize(n_layer); for (uint32_t i = 0; i < n_layer; ++i) { - const ggml_backend_type backend = int(i) < i_gpu_start ? GGML_BACKEND_CPU : LLAMA_BACKEND_OFFLOAD; // NOLINT - const ggml_backend_type backend_split = int(i) < i_gpu_start ? GGML_BACKEND_CPU : LLAMA_BACKEND_OFFLOAD_SPLIT; // NOLINT + const ggml_backend_type backend = int(i) < i_gpu_start ? GGML_BACKEND_CPU : llama_backend_offload; // NOLINT + const ggml_backend_type backend_split = int(i) < i_gpu_start ? GGML_BACKEND_CPU : llama_backend_offload_split; // NOLINT auto & layer = model.layers[i]; From 54b4df8886103b436a4bb3b60f4d84824f9e8868 Mon Sep 17 00:00:00 2001 From: Matthew Tejo Date: Mon, 6 Nov 2023 23:43:59 -0800 Subject: [PATCH 073/206] Use params when loading models in llava-cli (#3976) llava-cli was loading models with default params and ignoring settings from the cli. This switches to a generic function to load the params from the cli options. --- examples/llava/llava-cli.cpp | 8 +++----- 1 file changed, 3 insertions(+), 5 deletions(-) diff --git a/examples/llava/llava-cli.cpp b/examples/llava/llava-cli.cpp index 19374c67ff6c59..633afd1dad1bf5 100644 --- a/examples/llava/llava-cli.cpp +++ b/examples/llava/llava-cli.cpp @@ -242,18 +242,16 @@ static struct llava_context * llava_init(gpt_params * params) { llama_backend_init(params->numa); - llama_model_params model_params = llama_model_default_params(); + llama_model_params model_params = llama_model_params_from_gpt_params(*params); + llama_model * model = llama_load_model_from_file(params->model.c_str(), model_params); if (model == NULL) { fprintf(stderr , "%s: error: unable to load model\n" , __func__); return NULL; } - llama_context_params ctx_params = llama_context_default_params(); - + llama_context_params ctx_params = llama_context_params_from_gpt_params(*params); ctx_params.n_ctx = params->n_ctx < 2048 ? 2048 : params->n_ctx; // we need a longer context size to process image embeddings - ctx_params.n_threads = params->n_threads; - ctx_params.n_threads_batch = params->n_threads_batch == -1 ? params->n_threads : params->n_threads_batch; llama_context * ctx_llama = llama_new_context_with_model(model, ctx_params); From e9c1cecb9d7d743d30b4a29ecd56a411437def0a Mon Sep 17 00:00:00 2001 From: xaedes Date: Tue, 7 Nov 2023 09:04:51 +0100 Subject: [PATCH 074/206] ggml : fix backward rope after YaRN (#3974) * fix backward process of rope rope backward process was broken after YaRN RoPE (#2268) implementation, due to missing changes in backward functions. the code for the backward process is nearly identically to the forward process: the only difference is the sign of the sin-values. to avoid future regressions remove the near-duplicate backward functions and reuse the forward code: for this a new function argument `bool forward` was added to `ggml_compute_forward_rope_f32` and `ggml_compute_forward_rope_f16`. the sin-values will be negated when forward is false. * fix finetune rope call to use correct default attn_factor of 1.0f * remove unused `ggml_rope_xpos_back` it is better to have only one `ggml_rope_back` function that accepts all rope parameters, so that `ggml_compute_backward` can propagate all parameters without having to switch between different rope_back variants. * fix comments explaining the sinus sign in ggml_forward_rope * add missing function arguments in declaration * fix function argument type in declaration --- examples/finetune/finetune.cpp | 2 +- ggml.c | 330 ++++++++------------------------- ggml.h | 5 + 3 files changed, 84 insertions(+), 253 deletions(-) diff --git a/examples/finetune/finetune.cpp b/examples/finetune/finetune.cpp index 649a3b7c1941e5..fa7dbe496b2c51 100644 --- a/examples/finetune/finetune.cpp +++ b/examples/finetune/finetune.cpp @@ -643,7 +643,7 @@ static struct ggml_tensor * llama_build_lora_finetune_graphs( return ggml_rope_custom(ctx, t, KQ_pos, n_rot, rope_mode, n_ctx, 0, - rope_freq_base, rope_freq_scale, 0.0f, 0.0f, 0.0f, 0.0f + rope_freq_base, rope_freq_scale, 0.0f, 1.0f, 0.0f, 0.0f ); }; diff --git a/ggml.c b/ggml.c index 605a27940fc81f..009d5b3985e55f 100644 --- a/ggml.c +++ b/ggml.c @@ -4970,8 +4970,13 @@ struct ggml_tensor * ggml_rope_back( int n_dims, int mode, int n_ctx, + int n_orig_ctx, float freq_base, float freq_scale, + float ext_factor, + float attn_factor, + float beta_fast, + float beta_slow, float xpos_base, bool xpos_down) { GGML_ASSERT(ggml_is_vector(b)); @@ -4988,11 +4993,15 @@ struct ggml_tensor * ggml_rope_back( struct ggml_tensor * result = ggml_dup_tensor(ctx, a); - int32_t params[8] = { /*n_past*/ 0, n_dims, mode, n_ctx }; - memcpy(params + 4, &freq_base, sizeof(float)); - memcpy(params + 5, &freq_scale, sizeof(float)); - memcpy(params + 6, &xpos_base, sizeof(float)); - memcpy(params + 7, &xpos_down, sizeof(bool)); + int32_t params[13] = { /*n_past*/ 0, n_dims, mode, n_ctx, n_orig_ctx }; + memcpy(params + 5, &freq_base, sizeof(float)); + memcpy(params + 6, &freq_scale, sizeof(float)); + memcpy(params + 7, &ext_factor, sizeof(float)); + memcpy(params + 8, &attn_factor, sizeof(float)); + memcpy(params + 9, &beta_fast, sizeof(float)); + memcpy(params + 10, &beta_slow, sizeof(float)); + memcpy(params + 11, &xpos_base, sizeof(float)); + memcpy(params + 12, &xpos_down, sizeof(bool)); ggml_set_op_params(result, params, sizeof(params)); result->op = GGML_OP_ROPE_BACK; @@ -10974,7 +10983,8 @@ static void ggml_compute_forward_rope_f32( const struct ggml_compute_params * params, const struct ggml_tensor * src0, const struct ggml_tensor * src1, - struct ggml_tensor * dst) { + struct ggml_tensor * dst, + const bool forward) { if (params->type == GGML_TASK_INIT || params->type == GGML_TASK_FINALIZE) { return; } @@ -11033,6 +11043,11 @@ static void ggml_compute_forward_rope_f32( const bool is_neox = mode & 2; const bool is_glm = mode & 4; + // backward process uses inverse rotation by cos and sin. + // cos and sin build a rotation matrix, where the inverse is the transpose. + // this essentially just switches the sign of sin. + const float sin_sign = forward ? 1.0f : -1.0f; + const int32_t * pos = (const int32_t *) src1->data; for (int64_t i3 = 0; i3 < ne3; i3++) { @@ -11049,9 +11064,9 @@ static void ggml_compute_forward_rope_f32( float block_theta = MAX(p - (n_ctx - 2), 0); for (int64_t i0 = 0; i0 < ne0 / 4; i0++) { const float cos_theta = cosf(theta_base); - const float sin_theta = sinf(theta_base); + const float sin_theta = sinf(theta_base) * sin_sign; const float cos_block_theta = cosf(block_theta); - const float sin_block_theta = sinf(block_theta); + const float sin_block_theta = sinf(block_theta) * sin_sign; theta_base *= theta_scale; block_theta *= theta_scale; @@ -11075,6 +11090,7 @@ static void ggml_compute_forward_rope_f32( rope_yarn( theta_base, freq_scale, corr_dims, i0, ext_factor, attn_factor, &cos_theta, &sin_theta ); + sin_theta *= sin_sign; // zeta scaling for xPos only: float zeta = xpos_base != 0.0f ? powf((i0 + 0.4f * ne0) / (1.4f * ne0), p / xpos_base) : 1.0f; @@ -11105,6 +11121,7 @@ static void ggml_compute_forward_rope_f32( theta_base, freq_scale, corr_dims, cur_rot, ext_factor, attn_factor, &cos_theta, &sin_theta ); + sin_theta *= sin_sign; theta_base *= theta_scale; @@ -11130,7 +11147,8 @@ static void ggml_compute_forward_rope_f16( const struct ggml_compute_params * params, const struct ggml_tensor * src0, const struct ggml_tensor * src1, - struct ggml_tensor * dst) { + struct ggml_tensor * dst, + const bool forward) { if (params->type == GGML_TASK_INIT || params->type == GGML_TASK_FINALIZE) { return; } @@ -11182,6 +11200,11 @@ static void ggml_compute_forward_rope_f16( const bool is_neox = mode & 2; const bool is_glm = mode & 4; + // backward process uses inverse rotation by cos and sin. + // cos and sin build a rotation matrix, where the inverse is the transpose. + // this essentially just switches the sign of sin. + const float sin_sign = forward ? 1.0f : -1.0f; + const int32_t * pos = (const int32_t *) src1->data; for (int64_t i3 = 0; i3 < ne3; i3++) { @@ -11198,9 +11221,9 @@ static void ggml_compute_forward_rope_f16( float block_theta = MAX(p - (n_ctx - 2), 0); for (int64_t i0 = 0; i0 < ne0 / 4; i0++) { const float cos_theta = cosf(theta_base); - const float sin_theta = sinf(theta_base); + const float sin_theta = sinf(theta_base) * sin_sign; const float cos_block_theta = cosf(block_theta); - const float sin_block_theta = sinf(block_theta); + const float sin_block_theta = sinf(block_theta) * sin_sign; theta_base *= theta_scale; block_theta *= theta_scale; @@ -11224,6 +11247,7 @@ static void ggml_compute_forward_rope_f16( rope_yarn( theta_base, freq_scale, corr_dims, i0, ext_factor, attn_factor, &cos_theta, &sin_theta ); + sin_theta *= sin_sign; theta_base *= theta_scale; @@ -11250,6 +11274,7 @@ static void ggml_compute_forward_rope_f16( theta_base, freq_scale, corr_dims, cur_rot, ext_factor, attn_factor, &cos_theta, &sin_theta ); + sin_theta *= sin_sign; theta_base *= theta_scale; @@ -11279,11 +11304,11 @@ static void ggml_compute_forward_rope( switch (src0->type) { case GGML_TYPE_F16: { - ggml_compute_forward_rope_f16(params, src0, src1, dst); + ggml_compute_forward_rope_f16(params, src0, src1, dst, true); } break; case GGML_TYPE_F32: { - ggml_compute_forward_rope_f32(params, src0, src1, dst); + ggml_compute_forward_rope_f32(params, src0, src1, dst, true); } break; default: { @@ -11294,216 +11319,6 @@ static void ggml_compute_forward_rope( // ggml_compute_forward_rope_back -static void ggml_compute_forward_rope_back_f32( - const struct ggml_compute_params * params, - const struct ggml_tensor * src0, - const struct ggml_tensor * src1, - struct ggml_tensor * dst) { - - if (params->type == GGML_TASK_INIT || params->type == GGML_TASK_FINALIZE) { - return; - } - - // y = rope(x, src1) - // dx = rope_back(dy, src1) - // src0 is dy, src1 contains options - - float freq_base; - float freq_scale; - - // these two only relevant for xPos RoPE: - float xpos_base; - bool xpos_down; - - //const int n_past = ((int32_t *) dst->op_params)[0]; - const int n_dims = ((int32_t *) dst->op_params)[1]; - const int mode = ((int32_t *) dst->op_params)[2]; - const int n_ctx = ((int32_t *) dst->op_params)[3]; UNUSED(n_ctx); - memcpy(&freq_base, (int32_t *) dst->op_params + 4, sizeof(float)); - memcpy(&freq_scale, (int32_t *) dst->op_params + 5, sizeof(float)); - memcpy(&xpos_base, (int32_t *) dst->op_params + 6, sizeof(float)); - memcpy(&xpos_down, (int32_t *) dst->op_params + 7, sizeof(bool)); - - GGML_TENSOR_UNARY_OP_LOCALS - - //printf("ne0: %d, ne1: %d, ne2: %d, ne3: %d\n", ne0, ne1, ne2, ne3); - //printf("n_past = %d, ne2 = %d\n", n_past, ne2); - - assert(nb0 == sizeof(float)); - - const int ith = params->ith; - const int nth = params->nth; - - const int nr = ggml_nrows(dst); - - // rows per thread - const int dr = (nr + nth - 1)/nth; - - // row range for this thread - const int ir0 = dr*ith; - const int ir1 = MIN(ir0 + dr, nr); - - // row index used to determine which thread to use - int ir = 0; - - const float theta_scale = powf(freq_base, -2.0f/n_dims); - - const bool is_neox = mode & 2; - - const int32_t * pos = (const int32_t *) src1->data; - - for (int64_t i3 = 0; i3 < ne3; i3++) { - for (int64_t i2 = 0; i2 < ne2; i2++) { - const int64_t p = pos[i2]; - for (int64_t i1 = 0; i1 < ne1; i1++) { - if (ir++ < ir0) continue; - if (ir > ir1) break; - - float theta_base = freq_scale * (float)p; - - if (!is_neox) { - for (int64_t i0 = 0; i0 < ne0; i0 += 2) { - const float cos_theta = cosf(theta_base); - const float sin_theta = sinf(theta_base); - - // zeta scaling for xPos only: - float zeta = xpos_base != 0.0f ? powf((i0 + 0.4f * ne0) / (1.4f * ne0), p / xpos_base) : 1.0f; - if (xpos_down) zeta = 1.0f / zeta; - - theta_base *= theta_scale; - - const float * const dy = (float *)((char *) src0->data + i3*nb03 + i2*nb02 + i1*nb01 + i0*nb00); - float * dx = (float *)((char *) dst->data + i3*nb3 + i2*nb2 + i1*nb1 + i0*nb0); - - const float dy0 = dy[0]; - const float dy1 = dy[1]; - - dx[0] = dy0*cos_theta*zeta + dy1*sin_theta*zeta; - dx[1] = - dy0*sin_theta*zeta + dy1*cos_theta*zeta; - } - } else { - for (int64_t ib = 0; ib < ne0/n_dims; ++ib) { - for (int64_t ic = 0; ic < n_dims; ic += 2) { - const float cos_theta = cosf(theta_base); - const float sin_theta = sinf(theta_base); - - theta_base *= theta_scale; - - const int64_t i0 = ib*n_dims + ic/2; - - const float * const dy = (float *)((char *) src0->data + i3*nb03 + i2*nb02 + i1*nb01 + i0*nb00); - float * dx = (float *)((char *) dst->data + i3*nb3 + i2*nb2 + i1*nb1 + i0*nb0); - - const float dy0 = dy[0]; - const float dy1 = dy[n_dims/2]; - - dx[0] = dy0*cos_theta + dy1*sin_theta; - dx[n_dims/2] = - dy0*sin_theta + dy1*cos_theta; - } - } - } - } - } - } -} - -static void ggml_compute_forward_rope_back_f16( - const struct ggml_compute_params * params, - const struct ggml_tensor * src0, - const struct ggml_tensor * src1, - struct ggml_tensor * dst) { - - if (params->type == GGML_TASK_INIT || params->type == GGML_TASK_FINALIZE) { - return; - } - - // y = rope(x, src1) - // dx = rope_back(dy, src1) - // src0 is dy, src1 contains options - - //const int n_past = ((int32_t *) dst->op_params)[0]; - const int n_dims = ((int32_t *) dst->op_params)[1]; - const int mode = ((int32_t *) dst->op_params)[2]; - - GGML_TENSOR_UNARY_OP_LOCALS - - //printf("ne0: %d, ne1: %d, ne2: %d, ne3: %d\n", ne0, ne1, ne2, ne3); - //printf("n_past = %d, ne2 = %d\n", n_past, ne2); - - assert(nb0 == sizeof(ggml_fp16_t)); - - const int ith = params->ith; - const int nth = params->nth; - - const int nr = ggml_nrows(dst); - - // rows per thread - const int dr = (nr + nth - 1)/nth; - - // row range for this thread - const int ir0 = dr*ith; - const int ir1 = MIN(ir0 + dr, nr); - - // row index used to determine which thread to use - int ir = 0; - - const float theta_scale = powf(10000.0, -2.0f/n_dims); - - const bool is_neox = mode & 2; - - const int32_t * pos = (const int32_t *) src1->data; - - for (int64_t i3 = 0; i3 < ne3; i3++) { - for (int64_t i2 = 0; i2 < ne2; i2++) { - const int64_t p = pos[i2]; - for (int64_t i1 = 0; i1 < ne1; i1++) { - if (ir++ < ir0) continue; - if (ir > ir1) break; - - float theta_base = (float)p; - - if (!is_neox) { - for (int64_t i0 = 0; i0 < ne0; i0 += 2) { - const float cos_theta = cosf(theta_base); - const float sin_theta = sinf(theta_base); - - theta_base *= theta_scale; - - const ggml_fp16_t * const dy = (ggml_fp16_t *)((char *) src0->data + i3*nb03 + i2*nb02 + i1*nb01 + i0*nb00); - ggml_fp16_t * dx = (ggml_fp16_t *)((char *) dst->data + i3*nb3 + i2*nb2 + i1*nb1 + i0*nb0); - - const float dy0 = GGML_FP16_TO_FP32(dy[0]); - const float dy1 = GGML_FP16_TO_FP32(dy[1]); - - dx[0] = GGML_FP32_TO_FP16( dy0*cos_theta + dy1*sin_theta); - dx[1] = GGML_FP32_TO_FP16(-dy0*sin_theta + dy1*cos_theta); - } - } else { - for (int64_t ib = 0; ib < ne0/n_dims; ++ib) { - for (int64_t ic = 0; ic < n_dims; ic += 2) { - const float cos_theta = cosf(theta_base); - const float sin_theta = sinf(theta_base); - - theta_base *= theta_scale; - - const int64_t i0 = ib*n_dims + ic/2; - - const ggml_fp16_t * const dy = (ggml_fp16_t *)((char *) src0->data + i3*nb03 + i2*nb02 + i1*nb01 + i0*nb00); - ggml_fp16_t * dx = (ggml_fp16_t *)((char *) dst->data + i3*nb3 + i2*nb2 + i1*nb1 + i0*nb0); - - const float dy0 = GGML_FP16_TO_FP32(dy[0]); - const float dy1 = GGML_FP16_TO_FP32(dy[n_dims/2]); - - dx[0] = GGML_FP32_TO_FP16( dy0*cos_theta + dy1*sin_theta); - dx[n_dims/2] = GGML_FP32_TO_FP16(-dy0*sin_theta + dy1*cos_theta); - } - } - } - } - } - } -} - static void ggml_compute_forward_rope_back( const struct ggml_compute_params * params, const struct ggml_tensor * src0, @@ -11512,11 +11327,11 @@ static void ggml_compute_forward_rope_back( switch (src0->type) { case GGML_TYPE_F16: { - ggml_compute_forward_rope_back_f16(params, src0, src1, dst); + ggml_compute_forward_rope_f16(params, src0, src1, dst, false); } break; case GGML_TYPE_F32: { - ggml_compute_forward_rope_back_f32(params, src0, src1, dst); + ggml_compute_forward_rope_f32(params, src0, src1, dst, false); } break; default: { @@ -15559,17 +15374,20 @@ static void ggml_compute_backward(struct ggml_context * ctx, struct ggml_tensor // necessary for llama if (src0->grad) { //const int n_past = ((int32_t *) tensor->op_params)[0]; - const int n_dims = ((int32_t *) tensor->op_params)[1]; - const int mode = ((int32_t *) tensor->op_params)[2]; - const int n_ctx = ((int32_t *) tensor->op_params)[3]; - float freq_base; - float freq_scale; - float xpos_base; - bool xpos_down; - memcpy(&freq_base, (int32_t *) tensor->op_params + 4, sizeof(float)); - memcpy(&freq_scale, (int32_t *) tensor->op_params + 5, sizeof(float)); - memcpy(&xpos_base, (int32_t *) tensor->op_params + 6, sizeof(float)); - memcpy(&xpos_down, (int32_t *) tensor->op_params + 7, sizeof(bool)); + const int n_dims = ((int32_t *) tensor->op_params)[1]; + const int mode = ((int32_t *) tensor->op_params)[2]; + const int n_ctx = ((int32_t *) tensor->op_params)[3]; + const int n_orig_ctx = ((int32_t *) tensor->op_params)[4]; + float freq_base, freq_scale, ext_factor, attn_factor, beta_fast, beta_slow, xpos_base, xpos_down; + + memcpy(&freq_base, (int32_t *) tensor->op_params + 5, sizeof(float)); + memcpy(&freq_scale, (int32_t *) tensor->op_params + 6, sizeof(float)); + memcpy(&ext_factor, (int32_t *) tensor->op_params + 7, sizeof(float)); + memcpy(&attn_factor, (int32_t *) tensor->op_params + 8, sizeof(float)); + memcpy(&beta_fast, (int32_t *) tensor->op_params + 9, sizeof(float)); + memcpy(&beta_slow, (int32_t *) tensor->op_params + 10, sizeof(float)); + memcpy(&xpos_base, (int32_t *) tensor->op_params + 11, sizeof(float)); + memcpy(&xpos_down, (int32_t *) tensor->op_params + 12, sizeof(bool)); src0->grad = ggml_add_or_set(ctx, src0->grad, @@ -15579,8 +15397,13 @@ static void ggml_compute_backward(struct ggml_context * ctx, struct ggml_tensor n_dims, mode, n_ctx, + n_orig_ctx, freq_base, freq_scale, + ext_factor, + attn_factor, + beta_fast, + beta_slow, xpos_base, xpos_down), zero_table); @@ -15590,17 +15413,20 @@ static void ggml_compute_backward(struct ggml_context * ctx, struct ggml_tensor { if (src0->grad) { //const int n_past = ((int32_t *) tensor->op_params)[0]; - const int n_dims = ((int32_t *) tensor->op_params)[1]; - const int mode = ((int32_t *) tensor->op_params)[2]; - const int n_ctx = ((int32_t *) tensor->op_params)[3]; - float freq_base; - float freq_scale; - float xpos_base; - bool xpos_down; - memcpy(&freq_base, (int32_t *) tensor->op_params + 4, sizeof(float)); - memcpy(&freq_scale, (int32_t *) tensor->op_params + 5, sizeof(float)); - memcpy(&xpos_base, (int32_t *) tensor->op_params + 6, sizeof(float)); - memcpy(&xpos_down, (int32_t *) tensor->op_params + 7, sizeof(bool)); + const int n_dims = ((int32_t *) tensor->op_params)[1]; + const int mode = ((int32_t *) tensor->op_params)[2]; + const int n_ctx = ((int32_t *) tensor->op_params)[3]; + const int n_orig_ctx = ((int32_t *) tensor->op_params)[4]; + float freq_base, freq_scale, ext_factor, attn_factor, beta_fast, beta_slow, xpos_base, xpos_down; + + memcpy(&freq_base, (int32_t *) tensor->op_params + 5, sizeof(float)); + memcpy(&freq_scale, (int32_t *) tensor->op_params + 6, sizeof(float)); + memcpy(&ext_factor, (int32_t *) tensor->op_params + 7, sizeof(float)); + memcpy(&attn_factor, (int32_t *) tensor->op_params + 8, sizeof(float)); + memcpy(&beta_fast, (int32_t *) tensor->op_params + 9, sizeof(float)); + memcpy(&beta_slow, (int32_t *) tensor->op_params + 10, sizeof(float)); + memcpy(&xpos_base, (int32_t *) tensor->op_params + 11, sizeof(float)); + memcpy(&xpos_down, (int32_t *) tensor->op_params + 12, sizeof(bool)); src0->grad = ggml_add_or_set(ctx, src0->grad, @@ -15609,14 +15435,14 @@ static void ggml_compute_backward(struct ggml_context * ctx, struct ggml_tensor src1, n_dims, mode, - 0, n_ctx, + n_orig_ctx, freq_base, freq_scale, - 0.0f, - 1.0f, - 0.0f, - 0.0f, + ext_factor, + attn_factor, + beta_fast, + beta_slow, xpos_base, xpos_down, false), diff --git a/ggml.h b/ggml.h index 70eb25a6bf3afc..26654fc8ecdc84 100644 --- a/ggml.h +++ b/ggml.h @@ -1372,8 +1372,13 @@ extern "C" { int n_dims, int mode, int n_ctx, + int n_orig_ctx, float freq_base, float freq_scale, + float ext_factor, + float attn_factor, + float beta_fast, + float beta_slow, float xpos_base, bool xpos_down); From 413503d4b92500d82b002d03c580a71a54747138 Mon Sep 17 00:00:00 2001 From: Georgi Gerganov Date: Tue, 7 Nov 2023 19:25:32 +0200 Subject: [PATCH 075/206] make : do not add linker flags when compiling static llava lib (#3977) --- Makefile | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/Makefile b/Makefile index f2d4fd0312ad90..d6be254a0f3626 100644 --- a/Makefile +++ b/Makefile @@ -618,7 +618,7 @@ llama-bench: examples/llama-bench/llama-bench.cpp ggml.o llama.o $(COMMON_DEPS) $(CXX) $(CXXFLAGS) $(filter-out %.h,$^) -o $@ $(LDFLAGS) libllava.a: examples/llava/llava.cpp examples/llava/llava.h examples/llava/clip.cpp examples/llava/clip.h common/stb_image.h common/base64.hpp ggml.o llama.o $(COMMON_DEPS) $(OBJS) - $(CXX) $(CXXFLAGS) -static -fPIC -c $< -o $@ $(LDFLAGS) -Wno-cast-qual + $(CXX) $(CXXFLAGS) -static -fPIC -c $< -o $@ -Wno-cast-qual llava-cli: examples/llava/llava-cli.cpp examples/llava/clip.h examples/llava/clip.cpp examples/llava/llava.h examples/llava/llava.cpp ggml.o llama.o $(COMMON_DEPS) $(OBJS) $(CXX) $(CXXFLAGS) $(filter-out %.h,$^) -o $@ $(LDFLAGS) -Wno-cast-qual From 0a7c980b6f94a049cb804573df2d8092a34df8e4 Mon Sep 17 00:00:00 2001 From: Jared Van Bortel Date: Tue, 7 Nov 2023 12:43:04 -0500 Subject: [PATCH 076/206] gguf : track writer state, free unneeded tensors, cleanup (#3871) --- gguf-py/gguf/gguf.py | 82 +++++++++++++++++++++++++++--------------- gguf-py/pyproject.toml | 2 +- 2 files changed, 54 insertions(+), 30 deletions(-) diff --git a/gguf-py/gguf/gguf.py b/gguf-py/gguf/gguf.py index a2271d225d0012..7e495cb19638d1 100644 --- a/gguf-py/gguf/gguf.py +++ b/gguf-py/gguf/gguf.py @@ -646,18 +646,17 @@ def get_type(val): sys.exit() +class WriterState(Enum): + EMPTY = auto() + HEADER = auto() + KV_DATA = auto() + TI_DATA = auto() + + class GGUFWriter: fout: BufferedWriter - arch: str - offset_tensor = 0 - data_alignment = GGUF_DEFAULT_ALIGNMENT - kv_data = b"" - kv_data_count = 0 - ti_data = b"" - ti_data_count = 0 - use_temp_file: bool - temp_file: tempfile.SpooledTemporaryFile[bytes] | None = None - tensors: list[tuple[np.ndarray[Any, Any], int]] + temp_file: tempfile.SpooledTemporaryFile[bytes] | None + tensors: list[np.ndarray[Any, Any]] @property def pack_prefix(self): @@ -683,27 +682,47 @@ def __init__(self, path: os.PathLike[str] | str, arch: str, use_temp_file = True GGUFValueType.FLOAT64: f"{self.pack_prefix}d", GGUFValueType.BOOL: "?" , } - self.add_architecture() + self.offset_tensor = 0 + self.data_alignment = GGUF_DEFAULT_ALIGNMENT + self.kv_data = b"" + self.kv_data_count = 0 + self.ti_data = b"" + self.ti_data_count = 0 self.use_temp_file = use_temp_file + self.temp_file = None self.tensors = [] endianess_str = "Big Endian" if self.endianess == GGUFEndian.BIG else "Little Endian" print(f"This gguf file is for {endianess_str} only") + self.state = WriterState.EMPTY + + self.add_architecture() def write_header_to_file(self): + if self.state is not WriterState.EMPTY: + raise ValueError(f'Expected output file to be empty, got {self.state}') + self.fout.write(struct.pack(" int: return ((x + n - 1) // n) * n def add_tensor_info(self, name: str, tensor_shape: Sequence[int], tensor_dtype: np.dtype[np.float16] | np.dtype[np.float32], tensor_nbytes: int, raw_dtype: GGMLQuantizationType | None = None): + if self.state is not WriterState.EMPTY: + raise ValueError(f'Expected output file to be empty, got {self.state}') + assert raw_dtype is not None or tensor_dtype in (np.float32, np.float16), "Only F32 and F16 tensors are supported for now" encoded_name = name.encode("utf8") @@ -825,23 +847,22 @@ def add_tensor(self, name: str, tensor: np.ndarray[Any, Any], raw_shape: Sequenc shape: Sequence[int] = raw_shape if raw_shape is not None else tensor.shape self.add_tensor_info(name, shape, tensor.dtype, tensor.nbytes, raw_dtype = raw_dtype) - pad = GGUFWriter.ggml_pad(tensor.nbytes, self.data_alignment) - tensor.nbytes - - if self.temp_file is None: - self.tensors.append((tensor, pad)) + if self.temp_file is None: + self.tensors.append(tensor) return tensor.tofile(self.temp_file) + self.write_padding(self.temp_file, tensor.nbytes) - if pad != 0: - self.temp_file.write(bytes([0] * pad)) - - def write_padding(self, fp: BinaryIO, n: int, align: int | None = None): + def write_padding(self, fp: IO[bytes], n: int, align: int | None = None): pad = GGUFWriter.ggml_pad(n, align if align is not None else self.data_alignment) - n if pad != 0: fp.write(bytes([0] * pad)) def write_tensor_data(self, tensor: np.ndarray[Any, Any]): + if self.state is not WriterState.TI_DATA: + raise ValueError(f'Expected output file to contain tensor info, got {self.state}') + if self.endianess==GGUFEndian.BIG: tensor.byteswap(inplace=True) self.write_padding(self.fout, self.fout.tell()) @@ -854,10 +875,13 @@ def write_tensors_to_file(self): self.write_padding(self.fout, self.fout.tell()) if self.temp_file is None: - for (currtensor, currpad) in self.tensors: - currtensor.tofile(self.fout) - if currpad != 0: - self.fout.write(bytes([0] * currpad)) + while True: + try: + tensor = self.tensors.pop(0) + except IndexError: + break + tensor.tofile(self.fout) + self.write_padding(self.fout, tensor.nbytes) return self.temp_file.seek(0) @@ -1002,11 +1026,8 @@ def add_pad_token_id(self, id: int): class SpecialVocab: - load_merges: bool = False - merges: list[str] = [] - special_token_types: tuple[str, ...] = ('bos', 'eos', 'unk', 'sep', 'pad') - special_token_ids: dict[str, int] = {} - n_vocab: int | None = None + merges: list[str] + special_token_ids: dict[str, int] def __init__( self, path: str | os.PathLike[str], load_merges: bool = False, @@ -1016,8 +1037,11 @@ def __init__( self.special_token_ids = {} self.n_vocab = n_vocab self.load_merges = load_merges + self.merges = [] if special_token_types is not None: self.special_token_types = special_token_types + else: + self.special_token_types = ('bos', 'eos', 'unk', 'sep', 'pad') self._load(Path(path)) def _load(self, path: Path) -> None: diff --git a/gguf-py/pyproject.toml b/gguf-py/pyproject.toml index f0741a7c23e034..c6cb2c37a0e0a1 100644 --- a/gguf-py/pyproject.toml +++ b/gguf-py/pyproject.toml @@ -1,6 +1,6 @@ [tool.poetry] name = "gguf" -version = "0.4.5" +version = "0.4.6" description = "Write ML models in GGUF for GGML" authors = ["GGML "] packages = [ From 875fb42871a0f5a88fbe31a0b5edd697b84038e4 Mon Sep 17 00:00:00 2001 From: slaren Date: Wed, 8 Nov 2023 13:15:14 +0100 Subject: [PATCH 077/206] ggml-alloc : fix backend assignments of views (#3982) --- ggml-alloc.c | 21 ++++++++++++--------- 1 file changed, 12 insertions(+), 9 deletions(-) diff --git a/ggml-alloc.c b/ggml-alloc.c index 34eba3f830e849..b553eb7c132719 100644 --- a/ggml-alloc.c +++ b/ggml-alloc.c @@ -378,9 +378,13 @@ static bool ggml_op_can_inplace(enum ggml_op op) { } } -static void init_view(struct ggml_allocr * alloc, struct ggml_tensor * view) { +static void init_view(struct ggml_allocr * alloc, struct ggml_tensor * view, bool update_backend) { assert(view->view_src != NULL && view->view_src->data != NULL); - view->backend = view->view_src->backend; + + if (update_backend) { + view->backend = view->view_src->backend; + } + view->buffer = view->view_src->buffer; view->data = (char *)view->view_src->data + view->view_offs; @@ -394,7 +398,7 @@ static void allocate_node(struct ggml_allocr * alloc, struct ggml_tensor * node) struct hash_node * ht = alloc->hash_table; if (node->data == NULL) { if (ggml_is_view(node)) { - init_view(alloc, node); + init_view(alloc, node, true); } else { // see if we can reuse a parent's buffer (inplace) if (ggml_op_can_inplace(node->op)) { @@ -424,15 +428,14 @@ static void allocate_node(struct ggml_allocr * alloc, struct ggml_tensor * node) AT_PRINTF("reusing view parent %s (%s) for %s\n", parent->name, view_src->name, node->name); node->view_src = view_src; view_src_hn->n_views += 1; - init_view(alloc, node); + init_view(alloc, node, false); return; } - } - else { + } else { AT_PRINTF("reusing parent %s for %s\n", parent->name, node->name); node->view_src = parent; p_hn->n_views += 1; - init_view(alloc, node); + init_view(alloc, node, false); return; } } @@ -463,7 +466,7 @@ size_t ggml_allocr_alloc_graph_n( hash_get(ht, view_src)->n_views += 1; if (node->buffer == NULL && node->data != NULL) { // view of a pre-allocated tensor, didn't call init_view() yet - init_view(alloc, node); + init_view(alloc, node, true); } } @@ -474,7 +477,7 @@ size_t ggml_allocr_alloc_graph_n( } hash_get(ht, parent)->n_children += 1; if (ggml_is_view(parent) && parent->buffer == NULL && parent->data != NULL) { - init_view(alloc, parent); + init_view(alloc, parent, true); } } } From 57ad015dc3011b046ed5a23186c86ea55f987c54 Mon Sep 17 00:00:00 2001 From: Mihai Date: Thu, 9 Nov 2023 04:00:34 +0200 Subject: [PATCH 078/206] server : add min_p param (#3877) * Update server.cpp with min_p after it was introduced in https://github.com/ggerganov/llama.cpp/pull/3841 * Use spaces instead of tabs * Update index.html.hpp after running deps.sh * Fix test - fix line ending --- examples/server/README.md | 2 + examples/server/index.html.hpp | 4356 +++++++++++++++-------------- examples/server/public/index.html | 2 + examples/server/server.cpp | 2 + 4 files changed, 2191 insertions(+), 2171 deletions(-) diff --git a/examples/server/README.md b/examples/server/README.md index 089ebe2d1533f8..a6eda3b32d5761 100644 --- a/examples/server/README.md +++ b/examples/server/README.md @@ -122,6 +122,8 @@ node index.js `top_p`: Limit the next token selection to a subset of tokens with a cumulative probability above a threshold P (default: 0.95). + `min_p`: The minimum probability for a token to be considered, relative to the probability of the most likely token (default: 0.05). + `n_predict`: Set the maximum number of tokens to predict when generating text. **Note:** May exceed the set limit slightly if the last token is a partial multibyte character. When 0, no tokens will be generated but the prompt is evaluated into the cache. (default: -1, -1 = infinity). `n_keep`: Specify the number of tokens from the prompt to retain when the context size is exceeded and tokens need to be discarded. diff --git a/examples/server/index.html.hpp b/examples/server/index.html.hpp index 5d3bdfbdd7da31..207412513ae71a 100644 --- a/examples/server/index.html.hpp +++ b/examples/server/index.html.hpp @@ -374,1189 +374,1161 @@ unsigned char index_html[] = { 0x7a, 0x65, 0x0a, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x74, 0x6f, 0x70, 0x5f, 0x70, 0x3a, 0x20, 0x30, 0x2e, 0x35, 0x2c, 0x20, 0x2f, 0x2f, 0x20, 0x31, 0x2e, 0x30, 0x20, 0x3d, 0x20, 0x64, 0x69, 0x73, 0x61, 0x62, 0x6c, - 0x65, 0x64, 0x0a, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x74, 0x66, 0x73, - 0x5f, 0x7a, 0x3a, 0x20, 0x31, 0x2e, 0x30, 0x2c, 0x20, 0x2f, 0x2f, 0x20, - 0x31, 0x2e, 0x30, 0x20, 0x3d, 0x20, 0x64, 0x69, 0x73, 0x61, 0x62, 0x6c, - 0x65, 0x64, 0x0a, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x74, 0x79, 0x70, - 0x69, 0x63, 0x61, 0x6c, 0x5f, 0x70, 0x3a, 0x20, 0x31, 0x2e, 0x30, 0x2c, - 0x20, 0x2f, 0x2f, 0x20, 0x31, 0x2e, 0x30, 0x20, 0x3d, 0x20, 0x64, 0x69, - 0x73, 0x61, 0x62, 0x6c, 0x65, 0x64, 0x0a, 0x20, 0x20, 0x20, 0x20, 0x20, - 0x20, 0x70, 0x72, 0x65, 0x73, 0x65, 0x6e, 0x63, 0x65, 0x5f, 0x70, 0x65, - 0x6e, 0x61, 0x6c, 0x74, 0x79, 0x3a, 0x20, 0x30, 0x2e, 0x30, 0x2c, 0x20, - 0x2f, 0x2f, 0x20, 0x30, 0x2e, 0x30, 0x20, 0x3d, 0x20, 0x64, 0x69, 0x73, - 0x61, 0x62, 0x6c, 0x65, 0x64, 0x0a, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, - 0x66, 0x72, 0x65, 0x71, 0x75, 0x65, 0x6e, 0x63, 0x79, 0x5f, 0x70, 0x65, - 0x6e, 0x61, 0x6c, 0x74, 0x79, 0x3a, 0x20, 0x30, 0x2e, 0x30, 0x2c, 0x20, - 0x2f, 0x2f, 0x20, 0x30, 0x2e, 0x30, 0x20, 0x3d, 0x20, 0x64, 0x69, 0x73, + 0x65, 0x64, 0x0a, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x6d, 0x69, 0x6e, + 0x5f, 0x70, 0x3a, 0x20, 0x30, 0x2e, 0x30, 0x35, 0x2c, 0x20, 0x2f, 0x2f, + 0x20, 0x30, 0x20, 0x3d, 0x20, 0x64, 0x69, 0x73, 0x61, 0x62, 0x6c, 0x65, + 0x64, 0x0a, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x74, 0x66, 0x73, 0x5f, + 0x7a, 0x3a, 0x20, 0x31, 0x2e, 0x30, 0x2c, 0x20, 0x2f, 0x2f, 0x20, 0x31, + 0x2e, 0x30, 0x20, 0x3d, 0x20, 0x64, 0x69, 0x73, 0x61, 0x62, 0x6c, 0x65, + 0x64, 0x0a, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x74, 0x79, 0x70, 0x69, + 0x63, 0x61, 0x6c, 0x5f, 0x70, 0x3a, 0x20, 0x31, 0x2e, 0x30, 0x2c, 0x20, + 0x2f, 0x2f, 0x20, 0x31, 0x2e, 0x30, 0x20, 0x3d, 0x20, 0x64, 0x69, 0x73, 0x61, 0x62, 0x6c, 0x65, 0x64, 0x0a, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, - 0x6d, 0x69, 0x72, 0x6f, 0x73, 0x74, 0x61, 0x74, 0x3a, 0x20, 0x30, 0x2c, - 0x20, 0x2f, 0x2f, 0x20, 0x30, 0x2f, 0x31, 0x2f, 0x32, 0x0a, 0x20, 0x20, - 0x20, 0x20, 0x20, 0x20, 0x6d, 0x69, 0x72, 0x6f, 0x73, 0x74, 0x61, 0x74, - 0x5f, 0x74, 0x61, 0x75, 0x3a, 0x20, 0x35, 0x2c, 0x20, 0x2f, 0x2f, 0x20, - 0x74, 0x61, 0x72, 0x67, 0x65, 0x74, 0x20, 0x65, 0x6e, 0x74, 0x72, 0x6f, - 0x70, 0x79, 0x0a, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x6d, 0x69, 0x72, - 0x6f, 0x73, 0x74, 0x61, 0x74, 0x5f, 0x65, 0x74, 0x61, 0x3a, 0x20, 0x30, - 0x2e, 0x31, 0x2c, 0x20, 0x2f, 0x2f, 0x20, 0x6c, 0x65, 0x61, 0x72, 0x6e, - 0x69, 0x6e, 0x67, 0x20, 0x72, 0x61, 0x74, 0x65, 0x0a, 0x20, 0x20, 0x20, - 0x20, 0x20, 0x20, 0x67, 0x72, 0x61, 0x6d, 0x6d, 0x61, 0x72, 0x3a, 0x20, - 0x27, 0x27, 0x2c, 0x0a, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x6e, 0x5f, - 0x70, 0x72, 0x6f, 0x62, 0x73, 0x3a, 0x20, 0x30, 0x2c, 0x20, 0x2f, 0x2f, - 0x20, 0x6e, 0x6f, 0x20, 0x63, 0x6f, 0x6d, 0x70, 0x6c, 0x65, 0x74, 0x69, - 0x6f, 0x6e, 0x5f, 0x70, 0x72, 0x6f, 0x62, 0x61, 0x62, 0x69, 0x6c, 0x69, - 0x74, 0x69, 0x65, 0x73, 0x2c, 0x0a, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, - 0x69, 0x6d, 0x61, 0x67, 0x65, 0x5f, 0x64, 0x61, 0x74, 0x61, 0x3a, 0x20, - 0x5b, 0x5d, 0x2c, 0x0a, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x63, 0x61, - 0x63, 0x68, 0x65, 0x5f, 0x70, 0x72, 0x6f, 0x6d, 0x70, 0x74, 0x3a, 0x20, - 0x74, 0x72, 0x75, 0x65, 0x0a, 0x20, 0x20, 0x20, 0x20, 0x7d, 0x29, 0x0a, - 0x0a, 0x20, 0x20, 0x20, 0x20, 0x2f, 0x2a, 0x20, 0x53, 0x54, 0x41, 0x52, - 0x54, 0x3a, 0x20, 0x53, 0x75, 0x70, 0x70, 0x6f, 0x72, 0x74, 0x20, 0x66, - 0x6f, 0x72, 0x20, 0x73, 0x74, 0x6f, 0x72, 0x69, 0x6e, 0x67, 0x20, 0x70, - 0x72, 0x6f, 0x6d, 0x70, 0x74, 0x20, 0x74, 0x65, 0x6d, 0x70, 0x6c, 0x61, - 0x74, 0x65, 0x73, 0x20, 0x61, 0x6e, 0x64, 0x20, 0x70, 0x61, 0x72, 0x61, - 0x6d, 0x65, 0x74, 0x65, 0x72, 0x73, 0x20, 0x69, 0x6e, 0x20, 0x62, 0x6f, - 0x72, 0x77, 0x73, 0x65, 0x72, 0x20, 0x4c, 0x6f, 0x63, 0x61, 0x6c, 0x53, - 0x74, 0x6f, 0x72, 0x61, 0x67, 0x65, 0x20, 0x2a, 0x2f, 0x0a, 0x0a, 0x20, - 0x20, 0x20, 0x20, 0x63, 0x6f, 0x6e, 0x73, 0x74, 0x20, 0x6c, 0x6f, 0x63, - 0x61, 0x6c, 0x5f, 0x73, 0x74, 0x6f, 0x72, 0x61, 0x67, 0x65, 0x5f, 0x73, - 0x74, 0x6f, 0x72, 0x61, 0x67, 0x65, 0x4b, 0x65, 0x79, 0x20, 0x3d, 0x20, - 0x22, 0x6c, 0x6c, 0x61, 0x6d, 0x61, 0x63, 0x70, 0x70, 0x5f, 0x73, 0x65, - 0x72, 0x76, 0x65, 0x72, 0x5f, 0x6c, 0x6f, 0x63, 0x61, 0x6c, 0x5f, 0x73, - 0x74, 0x6f, 0x72, 0x61, 0x67, 0x65, 0x22, 0x3b, 0x0a, 0x0a, 0x20, 0x20, - 0x20, 0x20, 0x66, 0x75, 0x6e, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x20, 0x6c, - 0x6f, 0x63, 0x61, 0x6c, 0x5f, 0x73, 0x74, 0x6f, 0x72, 0x61, 0x67, 0x65, - 0x5f, 0x73, 0x65, 0x74, 0x44, 0x61, 0x74, 0x61, 0x46, 0x72, 0x6f, 0x6d, - 0x4f, 0x62, 0x6a, 0x65, 0x63, 0x74, 0x28, 0x74, 0x61, 0x67, 0x2c, 0x20, - 0x63, 0x6f, 0x6e, 0x74, 0x65, 0x6e, 0x74, 0x29, 0x20, 0x7b, 0x0a, 0x20, - 0x20, 0x20, 0x20, 0x20, 0x20, 0x6c, 0x6f, 0x63, 0x61, 0x6c, 0x53, 0x74, - 0x6f, 0x72, 0x61, 0x67, 0x65, 0x2e, 0x73, 0x65, 0x74, 0x49, 0x74, 0x65, - 0x6d, 0x28, 0x6c, 0x6f, 0x63, 0x61, 0x6c, 0x5f, 0x73, 0x74, 0x6f, 0x72, - 0x61, 0x67, 0x65, 0x5f, 0x73, 0x74, 0x6f, 0x72, 0x61, 0x67, 0x65, 0x4b, - 0x65, 0x79, 0x20, 0x2b, 0x20, 0x27, 0x2f, 0x27, 0x20, 0x2b, 0x20, 0x74, - 0x61, 0x67, 0x2c, 0x20, 0x4a, 0x53, 0x4f, 0x4e, 0x2e, 0x73, 0x74, 0x72, - 0x69, 0x6e, 0x67, 0x69, 0x66, 0x79, 0x28, 0x63, 0x6f, 0x6e, 0x74, 0x65, - 0x6e, 0x74, 0x29, 0x29, 0x3b, 0x0a, 0x20, 0x20, 0x20, 0x20, 0x7d, 0x0a, - 0x0a, 0x20, 0x20, 0x20, 0x20, 0x66, 0x75, 0x6e, 0x63, 0x74, 0x69, 0x6f, - 0x6e, 0x20, 0x6c, 0x6f, 0x63, 0x61, 0x6c, 0x5f, 0x73, 0x74, 0x6f, 0x72, - 0x61, 0x67, 0x65, 0x5f, 0x73, 0x65, 0x74, 0x44, 0x61, 0x74, 0x61, 0x46, - 0x72, 0x6f, 0x6d, 0x52, 0x61, 0x77, 0x54, 0x65, 0x78, 0x74, 0x28, 0x74, - 0x61, 0x67, 0x2c, 0x20, 0x63, 0x6f, 0x6e, 0x74, 0x65, 0x6e, 0x74, 0x29, - 0x20, 0x7b, 0x0a, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x6c, 0x6f, 0x63, - 0x61, 0x6c, 0x53, 0x74, 0x6f, 0x72, 0x61, 0x67, 0x65, 0x2e, 0x73, 0x65, + 0x70, 0x72, 0x65, 0x73, 0x65, 0x6e, 0x63, 0x65, 0x5f, 0x70, 0x65, 0x6e, + 0x61, 0x6c, 0x74, 0x79, 0x3a, 0x20, 0x30, 0x2e, 0x30, 0x2c, 0x20, 0x2f, + 0x2f, 0x20, 0x30, 0x2e, 0x30, 0x20, 0x3d, 0x20, 0x64, 0x69, 0x73, 0x61, + 0x62, 0x6c, 0x65, 0x64, 0x0a, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x66, + 0x72, 0x65, 0x71, 0x75, 0x65, 0x6e, 0x63, 0x79, 0x5f, 0x70, 0x65, 0x6e, + 0x61, 0x6c, 0x74, 0x79, 0x3a, 0x20, 0x30, 0x2e, 0x30, 0x2c, 0x20, 0x2f, + 0x2f, 0x20, 0x30, 0x2e, 0x30, 0x20, 0x3d, 0x20, 0x64, 0x69, 0x73, 0x61, + 0x62, 0x6c, 0x65, 0x64, 0x0a, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x6d, + 0x69, 0x72, 0x6f, 0x73, 0x74, 0x61, 0x74, 0x3a, 0x20, 0x30, 0x2c, 0x20, + 0x2f, 0x2f, 0x20, 0x30, 0x2f, 0x31, 0x2f, 0x32, 0x0a, 0x20, 0x20, 0x20, + 0x20, 0x20, 0x20, 0x6d, 0x69, 0x72, 0x6f, 0x73, 0x74, 0x61, 0x74, 0x5f, + 0x74, 0x61, 0x75, 0x3a, 0x20, 0x35, 0x2c, 0x20, 0x2f, 0x2f, 0x20, 0x74, + 0x61, 0x72, 0x67, 0x65, 0x74, 0x20, 0x65, 0x6e, 0x74, 0x72, 0x6f, 0x70, + 0x79, 0x0a, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x6d, 0x69, 0x72, 0x6f, + 0x73, 0x74, 0x61, 0x74, 0x5f, 0x65, 0x74, 0x61, 0x3a, 0x20, 0x30, 0x2e, + 0x31, 0x2c, 0x20, 0x2f, 0x2f, 0x20, 0x6c, 0x65, 0x61, 0x72, 0x6e, 0x69, + 0x6e, 0x67, 0x20, 0x72, 0x61, 0x74, 0x65, 0x0a, 0x20, 0x20, 0x20, 0x20, + 0x20, 0x20, 0x67, 0x72, 0x61, 0x6d, 0x6d, 0x61, 0x72, 0x3a, 0x20, 0x27, + 0x27, 0x2c, 0x0a, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x6e, 0x5f, 0x70, + 0x72, 0x6f, 0x62, 0x73, 0x3a, 0x20, 0x30, 0x2c, 0x20, 0x2f, 0x2f, 0x20, + 0x6e, 0x6f, 0x20, 0x63, 0x6f, 0x6d, 0x70, 0x6c, 0x65, 0x74, 0x69, 0x6f, + 0x6e, 0x5f, 0x70, 0x72, 0x6f, 0x62, 0x61, 0x62, 0x69, 0x6c, 0x69, 0x74, + 0x69, 0x65, 0x73, 0x2c, 0x0a, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x69, + 0x6d, 0x61, 0x67, 0x65, 0x5f, 0x64, 0x61, 0x74, 0x61, 0x3a, 0x20, 0x5b, + 0x5d, 0x2c, 0x0a, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x63, 0x61, 0x63, + 0x68, 0x65, 0x5f, 0x70, 0x72, 0x6f, 0x6d, 0x70, 0x74, 0x3a, 0x20, 0x74, + 0x72, 0x75, 0x65, 0x0a, 0x20, 0x20, 0x20, 0x20, 0x7d, 0x29, 0x0a, 0x0a, + 0x20, 0x20, 0x20, 0x20, 0x2f, 0x2a, 0x20, 0x53, 0x54, 0x41, 0x52, 0x54, + 0x3a, 0x20, 0x53, 0x75, 0x70, 0x70, 0x6f, 0x72, 0x74, 0x20, 0x66, 0x6f, + 0x72, 0x20, 0x73, 0x74, 0x6f, 0x72, 0x69, 0x6e, 0x67, 0x20, 0x70, 0x72, + 0x6f, 0x6d, 0x70, 0x74, 0x20, 0x74, 0x65, 0x6d, 0x70, 0x6c, 0x61, 0x74, + 0x65, 0x73, 0x20, 0x61, 0x6e, 0x64, 0x20, 0x70, 0x61, 0x72, 0x61, 0x6d, + 0x65, 0x74, 0x65, 0x72, 0x73, 0x20, 0x69, 0x6e, 0x20, 0x62, 0x6f, 0x72, + 0x77, 0x73, 0x65, 0x72, 0x20, 0x4c, 0x6f, 0x63, 0x61, 0x6c, 0x53, 0x74, + 0x6f, 0x72, 0x61, 0x67, 0x65, 0x20, 0x2a, 0x2f, 0x0a, 0x0a, 0x20, 0x20, + 0x20, 0x20, 0x63, 0x6f, 0x6e, 0x73, 0x74, 0x20, 0x6c, 0x6f, 0x63, 0x61, + 0x6c, 0x5f, 0x73, 0x74, 0x6f, 0x72, 0x61, 0x67, 0x65, 0x5f, 0x73, 0x74, + 0x6f, 0x72, 0x61, 0x67, 0x65, 0x4b, 0x65, 0x79, 0x20, 0x3d, 0x20, 0x22, + 0x6c, 0x6c, 0x61, 0x6d, 0x61, 0x63, 0x70, 0x70, 0x5f, 0x73, 0x65, 0x72, + 0x76, 0x65, 0x72, 0x5f, 0x6c, 0x6f, 0x63, 0x61, 0x6c, 0x5f, 0x73, 0x74, + 0x6f, 0x72, 0x61, 0x67, 0x65, 0x22, 0x3b, 0x0a, 0x0a, 0x20, 0x20, 0x20, + 0x20, 0x66, 0x75, 0x6e, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x20, 0x6c, 0x6f, + 0x63, 0x61, 0x6c, 0x5f, 0x73, 0x74, 0x6f, 0x72, 0x61, 0x67, 0x65, 0x5f, + 0x73, 0x65, 0x74, 0x44, 0x61, 0x74, 0x61, 0x46, 0x72, 0x6f, 0x6d, 0x4f, + 0x62, 0x6a, 0x65, 0x63, 0x74, 0x28, 0x74, 0x61, 0x67, 0x2c, 0x20, 0x63, + 0x6f, 0x6e, 0x74, 0x65, 0x6e, 0x74, 0x29, 0x20, 0x7b, 0x0a, 0x20, 0x20, + 0x20, 0x20, 0x20, 0x20, 0x6c, 0x6f, 0x63, 0x61, 0x6c, 0x53, 0x74, 0x6f, + 0x72, 0x61, 0x67, 0x65, 0x2e, 0x73, 0x65, 0x74, 0x49, 0x74, 0x65, 0x6d, + 0x28, 0x6c, 0x6f, 0x63, 0x61, 0x6c, 0x5f, 0x73, 0x74, 0x6f, 0x72, 0x61, + 0x67, 0x65, 0x5f, 0x73, 0x74, 0x6f, 0x72, 0x61, 0x67, 0x65, 0x4b, 0x65, + 0x79, 0x20, 0x2b, 0x20, 0x27, 0x2f, 0x27, 0x20, 0x2b, 0x20, 0x74, 0x61, + 0x67, 0x2c, 0x20, 0x4a, 0x53, 0x4f, 0x4e, 0x2e, 0x73, 0x74, 0x72, 0x69, + 0x6e, 0x67, 0x69, 0x66, 0x79, 0x28, 0x63, 0x6f, 0x6e, 0x74, 0x65, 0x6e, + 0x74, 0x29, 0x29, 0x3b, 0x0a, 0x20, 0x20, 0x20, 0x20, 0x7d, 0x0a, 0x0a, + 0x20, 0x20, 0x20, 0x20, 0x66, 0x75, 0x6e, 0x63, 0x74, 0x69, 0x6f, 0x6e, + 0x20, 0x6c, 0x6f, 0x63, 0x61, 0x6c, 0x5f, 0x73, 0x74, 0x6f, 0x72, 0x61, + 0x67, 0x65, 0x5f, 0x73, 0x65, 0x74, 0x44, 0x61, 0x74, 0x61, 0x46, 0x72, + 0x6f, 0x6d, 0x52, 0x61, 0x77, 0x54, 0x65, 0x78, 0x74, 0x28, 0x74, 0x61, + 0x67, 0x2c, 0x20, 0x63, 0x6f, 0x6e, 0x74, 0x65, 0x6e, 0x74, 0x29, 0x20, + 0x7b, 0x0a, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x6c, 0x6f, 0x63, 0x61, + 0x6c, 0x53, 0x74, 0x6f, 0x72, 0x61, 0x67, 0x65, 0x2e, 0x73, 0x65, 0x74, + 0x49, 0x74, 0x65, 0x6d, 0x28, 0x6c, 0x6f, 0x63, 0x61, 0x6c, 0x5f, 0x73, + 0x74, 0x6f, 0x72, 0x61, 0x67, 0x65, 0x5f, 0x73, 0x74, 0x6f, 0x72, 0x61, + 0x67, 0x65, 0x4b, 0x65, 0x79, 0x20, 0x2b, 0x20, 0x27, 0x2f, 0x27, 0x20, + 0x2b, 0x20, 0x74, 0x61, 0x67, 0x2c, 0x20, 0x63, 0x6f, 0x6e, 0x74, 0x65, + 0x6e, 0x74, 0x29, 0x3b, 0x0a, 0x20, 0x20, 0x20, 0x20, 0x7d, 0x0a, 0x0a, + 0x20, 0x20, 0x20, 0x20, 0x66, 0x75, 0x6e, 0x63, 0x74, 0x69, 0x6f, 0x6e, + 0x20, 0x6c, 0x6f, 0x63, 0x61, 0x6c, 0x5f, 0x73, 0x74, 0x6f, 0x72, 0x61, + 0x67, 0x65, 0x5f, 0x67, 0x65, 0x74, 0x44, 0x61, 0x74, 0x61, 0x41, 0x73, + 0x4f, 0x62, 0x6a, 0x65, 0x63, 0x74, 0x28, 0x74, 0x61, 0x67, 0x29, 0x20, + 0x7b, 0x0a, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x63, 0x6f, 0x6e, 0x73, + 0x74, 0x20, 0x69, 0x74, 0x65, 0x6d, 0x20, 0x3d, 0x20, 0x6c, 0x6f, 0x63, + 0x61, 0x6c, 0x53, 0x74, 0x6f, 0x72, 0x61, 0x67, 0x65, 0x2e, 0x67, 0x65, 0x74, 0x49, 0x74, 0x65, 0x6d, 0x28, 0x6c, 0x6f, 0x63, 0x61, 0x6c, 0x5f, 0x73, 0x74, 0x6f, 0x72, 0x61, 0x67, 0x65, 0x5f, 0x73, 0x74, 0x6f, 0x72, 0x61, 0x67, 0x65, 0x4b, 0x65, 0x79, 0x20, 0x2b, 0x20, 0x27, 0x2f, 0x27, - 0x20, 0x2b, 0x20, 0x74, 0x61, 0x67, 0x2c, 0x20, 0x63, 0x6f, 0x6e, 0x74, - 0x65, 0x6e, 0x74, 0x29, 0x3b, 0x0a, 0x20, 0x20, 0x20, 0x20, 0x7d, 0x0a, - 0x0a, 0x20, 0x20, 0x20, 0x20, 0x66, 0x75, 0x6e, 0x63, 0x74, 0x69, 0x6f, - 0x6e, 0x20, 0x6c, 0x6f, 0x63, 0x61, 0x6c, 0x5f, 0x73, 0x74, 0x6f, 0x72, - 0x61, 0x67, 0x65, 0x5f, 0x67, 0x65, 0x74, 0x44, 0x61, 0x74, 0x61, 0x41, - 0x73, 0x4f, 0x62, 0x6a, 0x65, 0x63, 0x74, 0x28, 0x74, 0x61, 0x67, 0x29, - 0x20, 0x7b, 0x0a, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x63, 0x6f, 0x6e, - 0x73, 0x74, 0x20, 0x69, 0x74, 0x65, 0x6d, 0x20, 0x3d, 0x20, 0x6c, 0x6f, - 0x63, 0x61, 0x6c, 0x53, 0x74, 0x6f, 0x72, 0x61, 0x67, 0x65, 0x2e, 0x67, - 0x65, 0x74, 0x49, 0x74, 0x65, 0x6d, 0x28, 0x6c, 0x6f, 0x63, 0x61, 0x6c, - 0x5f, 0x73, 0x74, 0x6f, 0x72, 0x61, 0x67, 0x65, 0x5f, 0x73, 0x74, 0x6f, - 0x72, 0x61, 0x67, 0x65, 0x4b, 0x65, 0x79, 0x20, 0x2b, 0x20, 0x27, 0x2f, - 0x27, 0x20, 0x2b, 0x20, 0x74, 0x61, 0x67, 0x29, 0x3b, 0x0a, 0x20, 0x20, - 0x20, 0x20, 0x20, 0x20, 0x69, 0x66, 0x20, 0x28, 0x21, 0x69, 0x74, 0x65, - 0x6d, 0x29, 0x20, 0x7b, 0x0a, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, - 0x20, 0x72, 0x65, 0x74, 0x75, 0x72, 0x6e, 0x20, 0x6e, 0x75, 0x6c, 0x6c, - 0x3b, 0x0a, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x7d, 0x20, 0x65, 0x6c, - 0x73, 0x65, 0x20, 0x7b, 0x0a, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, - 0x20, 0x72, 0x65, 0x74, 0x75, 0x72, 0x6e, 0x20, 0x4a, 0x53, 0x4f, 0x4e, - 0x2e, 0x70, 0x61, 0x72, 0x73, 0x65, 0x28, 0x69, 0x74, 0x65, 0x6d, 0x29, - 0x3b, 0x0a, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x7d, 0x0a, 0x20, 0x20, - 0x20, 0x20, 0x7d, 0x0a, 0x0a, 0x20, 0x20, 0x20, 0x20, 0x66, 0x75, 0x6e, - 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x20, 0x6c, 0x6f, 0x63, 0x61, 0x6c, 0x5f, - 0x73, 0x74, 0x6f, 0x72, 0x61, 0x67, 0x65, 0x5f, 0x67, 0x65, 0x74, 0x44, - 0x61, 0x74, 0x61, 0x41, 0x73, 0x52, 0x61, 0x77, 0x54, 0x65, 0x78, 0x74, - 0x28, 0x74, 0x61, 0x67, 0x29, 0x20, 0x7b, 0x0a, 0x20, 0x20, 0x20, 0x20, - 0x20, 0x20, 0x63, 0x6f, 0x6e, 0x73, 0x74, 0x20, 0x69, 0x74, 0x65, 0x6d, - 0x20, 0x3d, 0x20, 0x6c, 0x6f, 0x63, 0x61, 0x6c, 0x53, 0x74, 0x6f, 0x72, - 0x61, 0x67, 0x65, 0x2e, 0x67, 0x65, 0x74, 0x49, 0x74, 0x65, 0x6d, 0x28, - 0x6c, 0x6f, 0x63, 0x61, 0x6c, 0x5f, 0x73, 0x74, 0x6f, 0x72, 0x61, 0x67, - 0x65, 0x5f, 0x73, 0x74, 0x6f, 0x72, 0x61, 0x67, 0x65, 0x4b, 0x65, 0x79, - 0x20, 0x2b, 0x20, 0x27, 0x2f, 0x27, 0x20, 0x2b, 0x20, 0x74, 0x61, 0x67, - 0x29, 0x3b, 0x0a, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x69, 0x66, 0x20, - 0x28, 0x21, 0x69, 0x74, 0x65, 0x6d, 0x29, 0x20, 0x7b, 0x0a, 0x20, 0x20, - 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x72, 0x65, 0x74, 0x75, 0x72, 0x6e, - 0x20, 0x6e, 0x75, 0x6c, 0x6c, 0x3b, 0x0a, 0x20, 0x20, 0x20, 0x20, 0x20, - 0x20, 0x7d, 0x20, 0x65, 0x6c, 0x73, 0x65, 0x20, 0x7b, 0x0a, 0x20, 0x20, - 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x72, 0x65, 0x74, 0x75, 0x72, 0x6e, - 0x20, 0x69, 0x74, 0x65, 0x6d, 0x3b, 0x0a, 0x20, 0x20, 0x20, 0x20, 0x20, - 0x20, 0x7d, 0x0a, 0x20, 0x20, 0x20, 0x20, 0x7d, 0x0a, 0x0a, 0x20, 0x20, - 0x20, 0x20, 0x2f, 0x2f, 0x20, 0x63, 0x72, 0x65, 0x61, 0x74, 0x65, 0x20, - 0x61, 0x20, 0x63, 0x6f, 0x6e, 0x74, 0x61, 0x69, 0x6e, 0x65, 0x72, 0x20, - 0x66, 0x6f, 0x72, 0x20, 0x75, 0x73, 0x65, 0x72, 0x20, 0x74, 0x65, 0x6d, - 0x70, 0x6c, 0x61, 0x74, 0x65, 0x73, 0x20, 0x61, 0x6e, 0x64, 0x20, 0x73, - 0x65, 0x74, 0x74, 0x69, 0x6e, 0x67, 0x73, 0x0a, 0x0a, 0x20, 0x20, 0x20, - 0x20, 0x63, 0x6f, 0x6e, 0x73, 0x74, 0x20, 0x73, 0x61, 0x76, 0x65, 0x64, - 0x55, 0x73, 0x65, 0x72, 0x54, 0x65, 0x6d, 0x70, 0x6c, 0x61, 0x74, 0x65, - 0x73, 0x20, 0x3d, 0x20, 0x73, 0x69, 0x67, 0x6e, 0x61, 0x6c, 0x28, 0x7b, - 0x7d, 0x29, 0x0a, 0x20, 0x20, 0x20, 0x20, 0x63, 0x6f, 0x6e, 0x73, 0x74, - 0x20, 0x73, 0x65, 0x6c, 0x65, 0x63, 0x74, 0x65, 0x64, 0x55, 0x73, 0x65, - 0x72, 0x54, 0x65, 0x6d, 0x70, 0x6c, 0x61, 0x74, 0x65, 0x20, 0x3d, 0x20, - 0x73, 0x69, 0x67, 0x6e, 0x61, 0x6c, 0x28, 0x7b, 0x20, 0x6e, 0x61, 0x6d, - 0x65, 0x3a, 0x20, 0x27, 0x27, 0x2c, 0x20, 0x74, 0x65, 0x6d, 0x70, 0x6c, - 0x61, 0x74, 0x65, 0x3a, 0x20, 0x7b, 0x20, 0x73, 0x65, 0x73, 0x73, 0x69, - 0x6f, 0x6e, 0x3a, 0x20, 0x7b, 0x7d, 0x2c, 0x20, 0x70, 0x61, 0x72, 0x61, - 0x6d, 0x73, 0x3a, 0x20, 0x7b, 0x7d, 0x20, 0x7d, 0x20, 0x7d, 0x29, 0x0a, - 0x0a, 0x20, 0x20, 0x20, 0x20, 0x2f, 0x2f, 0x20, 0x6c, 0x65, 0x74, 0x27, - 0x73, 0x20, 0x69, 0x6d, 0x70, 0x6f, 0x72, 0x74, 0x20, 0x6c, 0x6f, 0x63, - 0x61, 0x6c, 0x6c, 0x79, 0x20, 0x73, 0x61, 0x76, 0x65, 0x64, 0x20, 0x74, - 0x65, 0x6d, 0x70, 0x6c, 0x61, 0x74, 0x65, 0x73, 0x20, 0x61, 0x6e, 0x64, - 0x20, 0x73, 0x65, 0x74, 0x74, 0x69, 0x6e, 0x67, 0x73, 0x20, 0x69, 0x66, - 0x20, 0x74, 0x68, 0x65, 0x72, 0x65, 0x20, 0x61, 0x72, 0x65, 0x20, 0x61, - 0x6e, 0x79, 0x0a, 0x20, 0x20, 0x20, 0x20, 0x2f, 0x2f, 0x20, 0x75, 0x73, - 0x65, 0x72, 0x20, 0x74, 0x65, 0x6d, 0x70, 0x6c, 0x61, 0x74, 0x65, 0x73, - 0x20, 0x61, 0x6e, 0x64, 0x20, 0x73, 0x65, 0x74, 0x74, 0x69, 0x6e, 0x67, - 0x73, 0x20, 0x61, 0x72, 0x65, 0x20, 0x73, 0x74, 0x6f, 0x72, 0x65, 0x64, - 0x20, 0x69, 0x6e, 0x20, 0x6f, 0x6e, 0x65, 0x20, 0x6f, 0x62, 0x6a, 0x65, - 0x63, 0x74, 0x0a, 0x20, 0x20, 0x20, 0x20, 0x2f, 0x2f, 0x20, 0x69, 0x6e, - 0x20, 0x66, 0x6f, 0x72, 0x6d, 0x20, 0x6f, 0x66, 0x20, 0x7b, 0x20, 0x22, - 0x74, 0x65, 0x6d, 0x70, 0x6c, 0x61, 0x74, 0x65, 0x6e, 0x61, 0x6d, 0x65, - 0x22, 0x3a, 0x20, 0x22, 0x74, 0x65, 0x6d, 0x70, 0x6c, 0x61, 0x74, 0x65, - 0x64, 0x61, 0x74, 0x61, 0x22, 0x20, 0x7d, 0x20, 0x61, 0x6e, 0x64, 0x20, - 0x7b, 0x20, 0x22, 0x73, 0x65, 0x74, 0x74, 0x69, 0x6e, 0x67, 0x73, 0x74, + 0x20, 0x2b, 0x20, 0x74, 0x61, 0x67, 0x29, 0x3b, 0x0a, 0x20, 0x20, 0x20, + 0x20, 0x20, 0x20, 0x69, 0x66, 0x20, 0x28, 0x21, 0x69, 0x74, 0x65, 0x6d, + 0x29, 0x20, 0x7b, 0x0a, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, + 0x72, 0x65, 0x74, 0x75, 0x72, 0x6e, 0x20, 0x6e, 0x75, 0x6c, 0x6c, 0x3b, + 0x0a, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x7d, 0x20, 0x65, 0x6c, 0x73, + 0x65, 0x20, 0x7b, 0x0a, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, + 0x72, 0x65, 0x74, 0x75, 0x72, 0x6e, 0x20, 0x4a, 0x53, 0x4f, 0x4e, 0x2e, + 0x70, 0x61, 0x72, 0x73, 0x65, 0x28, 0x69, 0x74, 0x65, 0x6d, 0x29, 0x3b, + 0x0a, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x7d, 0x0a, 0x20, 0x20, 0x20, + 0x20, 0x7d, 0x0a, 0x0a, 0x20, 0x20, 0x20, 0x20, 0x66, 0x75, 0x6e, 0x63, + 0x74, 0x69, 0x6f, 0x6e, 0x20, 0x6c, 0x6f, 0x63, 0x61, 0x6c, 0x5f, 0x73, + 0x74, 0x6f, 0x72, 0x61, 0x67, 0x65, 0x5f, 0x67, 0x65, 0x74, 0x44, 0x61, + 0x74, 0x61, 0x41, 0x73, 0x52, 0x61, 0x77, 0x54, 0x65, 0x78, 0x74, 0x28, + 0x74, 0x61, 0x67, 0x29, 0x20, 0x7b, 0x0a, 0x20, 0x20, 0x20, 0x20, 0x20, + 0x20, 0x63, 0x6f, 0x6e, 0x73, 0x74, 0x20, 0x69, 0x74, 0x65, 0x6d, 0x20, + 0x3d, 0x20, 0x6c, 0x6f, 0x63, 0x61, 0x6c, 0x53, 0x74, 0x6f, 0x72, 0x61, + 0x67, 0x65, 0x2e, 0x67, 0x65, 0x74, 0x49, 0x74, 0x65, 0x6d, 0x28, 0x6c, + 0x6f, 0x63, 0x61, 0x6c, 0x5f, 0x73, 0x74, 0x6f, 0x72, 0x61, 0x67, 0x65, + 0x5f, 0x73, 0x74, 0x6f, 0x72, 0x61, 0x67, 0x65, 0x4b, 0x65, 0x79, 0x20, + 0x2b, 0x20, 0x27, 0x2f, 0x27, 0x20, 0x2b, 0x20, 0x74, 0x61, 0x67, 0x29, + 0x3b, 0x0a, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x69, 0x66, 0x20, 0x28, + 0x21, 0x69, 0x74, 0x65, 0x6d, 0x29, 0x20, 0x7b, 0x0a, 0x20, 0x20, 0x20, + 0x20, 0x20, 0x20, 0x20, 0x20, 0x72, 0x65, 0x74, 0x75, 0x72, 0x6e, 0x20, + 0x6e, 0x75, 0x6c, 0x6c, 0x3b, 0x0a, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, + 0x7d, 0x20, 0x65, 0x6c, 0x73, 0x65, 0x20, 0x7b, 0x0a, 0x20, 0x20, 0x20, + 0x20, 0x20, 0x20, 0x20, 0x20, 0x72, 0x65, 0x74, 0x75, 0x72, 0x6e, 0x20, + 0x69, 0x74, 0x65, 0x6d, 0x3b, 0x0a, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, + 0x7d, 0x0a, 0x20, 0x20, 0x20, 0x20, 0x7d, 0x0a, 0x0a, 0x20, 0x20, 0x20, + 0x20, 0x2f, 0x2f, 0x20, 0x63, 0x72, 0x65, 0x61, 0x74, 0x65, 0x20, 0x61, + 0x20, 0x63, 0x6f, 0x6e, 0x74, 0x61, 0x69, 0x6e, 0x65, 0x72, 0x20, 0x66, + 0x6f, 0x72, 0x20, 0x75, 0x73, 0x65, 0x72, 0x20, 0x74, 0x65, 0x6d, 0x70, + 0x6c, 0x61, 0x74, 0x65, 0x73, 0x20, 0x61, 0x6e, 0x64, 0x20, 0x73, 0x65, + 0x74, 0x74, 0x69, 0x6e, 0x67, 0x73, 0x0a, 0x0a, 0x20, 0x20, 0x20, 0x20, + 0x63, 0x6f, 0x6e, 0x73, 0x74, 0x20, 0x73, 0x61, 0x76, 0x65, 0x64, 0x55, + 0x73, 0x65, 0x72, 0x54, 0x65, 0x6d, 0x70, 0x6c, 0x61, 0x74, 0x65, 0x73, + 0x20, 0x3d, 0x20, 0x73, 0x69, 0x67, 0x6e, 0x61, 0x6c, 0x28, 0x7b, 0x7d, + 0x29, 0x0a, 0x20, 0x20, 0x20, 0x20, 0x63, 0x6f, 0x6e, 0x73, 0x74, 0x20, + 0x73, 0x65, 0x6c, 0x65, 0x63, 0x74, 0x65, 0x64, 0x55, 0x73, 0x65, 0x72, + 0x54, 0x65, 0x6d, 0x70, 0x6c, 0x61, 0x74, 0x65, 0x20, 0x3d, 0x20, 0x73, + 0x69, 0x67, 0x6e, 0x61, 0x6c, 0x28, 0x7b, 0x20, 0x6e, 0x61, 0x6d, 0x65, + 0x3a, 0x20, 0x27, 0x27, 0x2c, 0x20, 0x74, 0x65, 0x6d, 0x70, 0x6c, 0x61, + 0x74, 0x65, 0x3a, 0x20, 0x7b, 0x20, 0x73, 0x65, 0x73, 0x73, 0x69, 0x6f, + 0x6e, 0x3a, 0x20, 0x7b, 0x7d, 0x2c, 0x20, 0x70, 0x61, 0x72, 0x61, 0x6d, + 0x73, 0x3a, 0x20, 0x7b, 0x7d, 0x20, 0x7d, 0x20, 0x7d, 0x29, 0x0a, 0x0a, + 0x20, 0x20, 0x20, 0x20, 0x2f, 0x2f, 0x20, 0x6c, 0x65, 0x74, 0x27, 0x73, + 0x20, 0x69, 0x6d, 0x70, 0x6f, 0x72, 0x74, 0x20, 0x6c, 0x6f, 0x63, 0x61, + 0x6c, 0x6c, 0x79, 0x20, 0x73, 0x61, 0x76, 0x65, 0x64, 0x20, 0x74, 0x65, + 0x6d, 0x70, 0x6c, 0x61, 0x74, 0x65, 0x73, 0x20, 0x61, 0x6e, 0x64, 0x20, + 0x73, 0x65, 0x74, 0x74, 0x69, 0x6e, 0x67, 0x73, 0x20, 0x69, 0x66, 0x20, + 0x74, 0x68, 0x65, 0x72, 0x65, 0x20, 0x61, 0x72, 0x65, 0x20, 0x61, 0x6e, + 0x79, 0x0a, 0x20, 0x20, 0x20, 0x20, 0x2f, 0x2f, 0x20, 0x75, 0x73, 0x65, + 0x72, 0x20, 0x74, 0x65, 0x6d, 0x70, 0x6c, 0x61, 0x74, 0x65, 0x73, 0x20, + 0x61, 0x6e, 0x64, 0x20, 0x73, 0x65, 0x74, 0x74, 0x69, 0x6e, 0x67, 0x73, + 0x20, 0x61, 0x72, 0x65, 0x20, 0x73, 0x74, 0x6f, 0x72, 0x65, 0x64, 0x20, + 0x69, 0x6e, 0x20, 0x6f, 0x6e, 0x65, 0x20, 0x6f, 0x62, 0x6a, 0x65, 0x63, + 0x74, 0x0a, 0x20, 0x20, 0x20, 0x20, 0x2f, 0x2f, 0x20, 0x69, 0x6e, 0x20, + 0x66, 0x6f, 0x72, 0x6d, 0x20, 0x6f, 0x66, 0x20, 0x7b, 0x20, 0x22, 0x74, 0x65, 0x6d, 0x70, 0x6c, 0x61, 0x74, 0x65, 0x6e, 0x61, 0x6d, 0x65, 0x22, - 0x3a, 0x22, 0x73, 0x65, 0x74, 0x74, 0x69, 0x6e, 0x67, 0x73, 0x64, 0x61, - 0x74, 0x61, 0x22, 0x20, 0x7d, 0x0a, 0x0a, 0x20, 0x20, 0x20, 0x20, 0x63, - 0x6f, 0x6e, 0x73, 0x6f, 0x6c, 0x65, 0x2e, 0x6c, 0x6f, 0x67, 0x28, 0x27, - 0x49, 0x6d, 0x70, 0x6f, 0x72, 0x74, 0x69, 0x6e, 0x67, 0x20, 0x73, 0x61, - 0x76, 0x65, 0x64, 0x20, 0x74, 0x65, 0x6d, 0x70, 0x6c, 0x61, 0x74, 0x65, - 0x73, 0x27, 0x29, 0x0a, 0x0a, 0x20, 0x20, 0x20, 0x20, 0x6c, 0x65, 0x74, + 0x3a, 0x20, 0x22, 0x74, 0x65, 0x6d, 0x70, 0x6c, 0x61, 0x74, 0x65, 0x64, + 0x61, 0x74, 0x61, 0x22, 0x20, 0x7d, 0x20, 0x61, 0x6e, 0x64, 0x20, 0x7b, + 0x20, 0x22, 0x73, 0x65, 0x74, 0x74, 0x69, 0x6e, 0x67, 0x73, 0x74, 0x65, + 0x6d, 0x70, 0x6c, 0x61, 0x74, 0x65, 0x6e, 0x61, 0x6d, 0x65, 0x22, 0x3a, + 0x22, 0x73, 0x65, 0x74, 0x74, 0x69, 0x6e, 0x67, 0x73, 0x64, 0x61, 0x74, + 0x61, 0x22, 0x20, 0x7d, 0x0a, 0x0a, 0x20, 0x20, 0x20, 0x20, 0x63, 0x6f, + 0x6e, 0x73, 0x6f, 0x6c, 0x65, 0x2e, 0x6c, 0x6f, 0x67, 0x28, 0x27, 0x49, + 0x6d, 0x70, 0x6f, 0x72, 0x74, 0x69, 0x6e, 0x67, 0x20, 0x73, 0x61, 0x76, + 0x65, 0x64, 0x20, 0x74, 0x65, 0x6d, 0x70, 0x6c, 0x61, 0x74, 0x65, 0x73, + 0x27, 0x29, 0x0a, 0x0a, 0x20, 0x20, 0x20, 0x20, 0x6c, 0x65, 0x74, 0x20, + 0x69, 0x6d, 0x70, 0x6f, 0x72, 0x74, 0x65, 0x64, 0x54, 0x65, 0x6d, 0x70, + 0x6c, 0x61, 0x74, 0x65, 0x73, 0x20, 0x3d, 0x20, 0x6c, 0x6f, 0x63, 0x61, + 0x6c, 0x5f, 0x73, 0x74, 0x6f, 0x72, 0x61, 0x67, 0x65, 0x5f, 0x67, 0x65, + 0x74, 0x44, 0x61, 0x74, 0x61, 0x41, 0x73, 0x4f, 0x62, 0x6a, 0x65, 0x63, + 0x74, 0x28, 0x27, 0x75, 0x73, 0x65, 0x72, 0x5f, 0x74, 0x65, 0x6d, 0x70, + 0x6c, 0x61, 0x74, 0x65, 0x73, 0x27, 0x29, 0x0a, 0x0a, 0x20, 0x20, 0x20, + 0x20, 0x69, 0x66, 0x20, 0x28, 0x69, 0x6d, 0x70, 0x6f, 0x72, 0x74, 0x65, + 0x64, 0x54, 0x65, 0x6d, 0x70, 0x6c, 0x61, 0x74, 0x65, 0x73, 0x29, 0x20, + 0x7b, 0x0a, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x2f, 0x2f, 0x20, 0x73, + 0x61, 0x76, 0x65, 0x64, 0x20, 0x74, 0x65, 0x6d, 0x70, 0x6c, 0x61, 0x74, + 0x65, 0x73, 0x20, 0x77, 0x65, 0x72, 0x65, 0x20, 0x73, 0x75, 0x63, 0x63, + 0x65, 0x73, 0x73, 0x66, 0x75, 0x6c, 0x79, 0x20, 0x69, 0x6d, 0x70, 0x6f, + 0x72, 0x74, 0x65, 0x64, 0x2e, 0x0a, 0x0a, 0x20, 0x20, 0x20, 0x20, 0x20, + 0x20, 0x63, 0x6f, 0x6e, 0x73, 0x6f, 0x6c, 0x65, 0x2e, 0x6c, 0x6f, 0x67, + 0x28, 0x27, 0x50, 0x72, 0x6f, 0x63, 0x65, 0x73, 0x73, 0x69, 0x6e, 0x67, + 0x20, 0x73, 0x61, 0x76, 0x65, 0x64, 0x20, 0x74, 0x65, 0x6d, 0x70, 0x6c, + 0x61, 0x74, 0x65, 0x73, 0x20, 0x61, 0x6e, 0x64, 0x20, 0x75, 0x70, 0x64, + 0x61, 0x74, 0x69, 0x6e, 0x67, 0x20, 0x64, 0x65, 0x66, 0x61, 0x75, 0x6c, + 0x74, 0x20, 0x74, 0x65, 0x6d, 0x70, 0x6c, 0x61, 0x74, 0x65, 0x27, 0x29, + 0x0a, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x70, 0x61, 0x72, 0x61, 0x6d, + 0x73, 0x2e, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x20, 0x3d, 0x20, 0x7b, 0x20, + 0x2e, 0x2e, 0x2e, 0x70, 0x61, 0x72, 0x61, 0x6d, 0x73, 0x2e, 0x76, 0x61, + 0x6c, 0x75, 0x65, 0x2c, 0x20, 0x69, 0x6d, 0x61, 0x67, 0x65, 0x5f, 0x64, + 0x61, 0x74, 0x61, 0x3a, 0x20, 0x5b, 0x5d, 0x20, 0x7d, 0x3b, 0x0a, 0x0a, + 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x2f, 0x2f, 0x63, 0x6f, 0x6e, 0x73, + 0x6f, 0x6c, 0x65, 0x2e, 0x6c, 0x6f, 0x67, 0x28, 0x69, 0x6d, 0x70, 0x6f, + 0x72, 0x74, 0x65, 0x64, 0x54, 0x65, 0x6d, 0x70, 0x6c, 0x61, 0x74, 0x65, + 0x73, 0x29, 0x3b, 0x0a, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x73, 0x61, + 0x76, 0x65, 0x64, 0x55, 0x73, 0x65, 0x72, 0x54, 0x65, 0x6d, 0x70, 0x6c, + 0x61, 0x74, 0x65, 0x73, 0x2e, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x20, 0x3d, 0x20, 0x69, 0x6d, 0x70, 0x6f, 0x72, 0x74, 0x65, 0x64, 0x54, 0x65, 0x6d, - 0x70, 0x6c, 0x61, 0x74, 0x65, 0x73, 0x20, 0x3d, 0x20, 0x6c, 0x6f, 0x63, - 0x61, 0x6c, 0x5f, 0x73, 0x74, 0x6f, 0x72, 0x61, 0x67, 0x65, 0x5f, 0x67, - 0x65, 0x74, 0x44, 0x61, 0x74, 0x61, 0x41, 0x73, 0x4f, 0x62, 0x6a, 0x65, + 0x70, 0x6c, 0x61, 0x74, 0x65, 0x73, 0x3b, 0x0a, 0x0a, 0x20, 0x20, 0x20, + 0x20, 0x20, 0x20, 0x2f, 0x2f, 0x6f, 0x76, 0x65, 0x72, 0x72, 0x69, 0x64, + 0x65, 0x20, 0x64, 0x65, 0x66, 0x61, 0x75, 0x6c, 0x74, 0x20, 0x74, 0x65, + 0x6d, 0x70, 0x6c, 0x61, 0x74, 0x65, 0x0a, 0x20, 0x20, 0x20, 0x20, 0x20, + 0x20, 0x73, 0x61, 0x76, 0x65, 0x64, 0x55, 0x73, 0x65, 0x72, 0x54, 0x65, + 0x6d, 0x70, 0x6c, 0x61, 0x74, 0x65, 0x73, 0x2e, 0x76, 0x61, 0x6c, 0x75, + 0x65, 0x2e, 0x64, 0x65, 0x66, 0x61, 0x75, 0x6c, 0x74, 0x20, 0x3d, 0x20, + 0x7b, 0x20, 0x73, 0x65, 0x73, 0x73, 0x69, 0x6f, 0x6e, 0x3a, 0x20, 0x73, + 0x65, 0x73, 0x73, 0x69, 0x6f, 0x6e, 0x2e, 0x76, 0x61, 0x6c, 0x75, 0x65, + 0x2c, 0x20, 0x70, 0x61, 0x72, 0x61, 0x6d, 0x73, 0x3a, 0x20, 0x70, 0x61, + 0x72, 0x61, 0x6d, 0x73, 0x2e, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x20, 0x7d, + 0x0a, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x6c, 0x6f, 0x63, 0x61, 0x6c, + 0x5f, 0x73, 0x74, 0x6f, 0x72, 0x61, 0x67, 0x65, 0x5f, 0x73, 0x65, 0x74, + 0x44, 0x61, 0x74, 0x61, 0x46, 0x72, 0x6f, 0x6d, 0x4f, 0x62, 0x6a, 0x65, 0x63, 0x74, 0x28, 0x27, 0x75, 0x73, 0x65, 0x72, 0x5f, 0x74, 0x65, 0x6d, - 0x70, 0x6c, 0x61, 0x74, 0x65, 0x73, 0x27, 0x29, 0x0a, 0x0a, 0x20, 0x20, - 0x20, 0x20, 0x69, 0x66, 0x20, 0x28, 0x69, 0x6d, 0x70, 0x6f, 0x72, 0x74, - 0x65, 0x64, 0x54, 0x65, 0x6d, 0x70, 0x6c, 0x61, 0x74, 0x65, 0x73, 0x29, - 0x20, 0x7b, 0x0a, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x2f, 0x2f, 0x20, + 0x70, 0x6c, 0x61, 0x74, 0x65, 0x73, 0x27, 0x2c, 0x20, 0x73, 0x61, 0x76, + 0x65, 0x64, 0x55, 0x73, 0x65, 0x72, 0x54, 0x65, 0x6d, 0x70, 0x6c, 0x61, + 0x74, 0x65, 0x73, 0x2e, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x29, 0x0a, 0x20, + 0x20, 0x20, 0x20, 0x7d, 0x20, 0x65, 0x6c, 0x73, 0x65, 0x20, 0x7b, 0x0a, + 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x2f, 0x2f, 0x20, 0x6e, 0x6f, 0x20, 0x73, 0x61, 0x76, 0x65, 0x64, 0x20, 0x74, 0x65, 0x6d, 0x70, 0x6c, 0x61, - 0x74, 0x65, 0x73, 0x20, 0x77, 0x65, 0x72, 0x65, 0x20, 0x73, 0x75, 0x63, - 0x63, 0x65, 0x73, 0x73, 0x66, 0x75, 0x6c, 0x79, 0x20, 0x69, 0x6d, 0x70, - 0x6f, 0x72, 0x74, 0x65, 0x64, 0x2e, 0x0a, 0x0a, 0x20, 0x20, 0x20, 0x20, - 0x20, 0x20, 0x63, 0x6f, 0x6e, 0x73, 0x6f, 0x6c, 0x65, 0x2e, 0x6c, 0x6f, - 0x67, 0x28, 0x27, 0x50, 0x72, 0x6f, 0x63, 0x65, 0x73, 0x73, 0x69, 0x6e, - 0x67, 0x20, 0x73, 0x61, 0x76, 0x65, 0x64, 0x20, 0x74, 0x65, 0x6d, 0x70, - 0x6c, 0x61, 0x74, 0x65, 0x73, 0x20, 0x61, 0x6e, 0x64, 0x20, 0x75, 0x70, - 0x64, 0x61, 0x74, 0x69, 0x6e, 0x67, 0x20, 0x64, 0x65, 0x66, 0x61, 0x75, - 0x6c, 0x74, 0x20, 0x74, 0x65, 0x6d, 0x70, 0x6c, 0x61, 0x74, 0x65, 0x27, - 0x29, 0x0a, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x70, 0x61, 0x72, 0x61, - 0x6d, 0x73, 0x2e, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x20, 0x3d, 0x20, 0x7b, - 0x20, 0x2e, 0x2e, 0x2e, 0x70, 0x61, 0x72, 0x61, 0x6d, 0x73, 0x2e, 0x76, - 0x61, 0x6c, 0x75, 0x65, 0x2c, 0x20, 0x69, 0x6d, 0x61, 0x67, 0x65, 0x5f, - 0x64, 0x61, 0x74, 0x61, 0x3a, 0x20, 0x5b, 0x5d, 0x20, 0x7d, 0x3b, 0x0a, - 0x0a, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x2f, 0x2f, 0x63, 0x6f, 0x6e, - 0x73, 0x6f, 0x6c, 0x65, 0x2e, 0x6c, 0x6f, 0x67, 0x28, 0x69, 0x6d, 0x70, - 0x6f, 0x72, 0x74, 0x65, 0x64, 0x54, 0x65, 0x6d, 0x70, 0x6c, 0x61, 0x74, - 0x65, 0x73, 0x29, 0x3b, 0x0a, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x73, - 0x61, 0x76, 0x65, 0x64, 0x55, 0x73, 0x65, 0x72, 0x54, 0x65, 0x6d, 0x70, - 0x6c, 0x61, 0x74, 0x65, 0x73, 0x2e, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x20, - 0x3d, 0x20, 0x69, 0x6d, 0x70, 0x6f, 0x72, 0x74, 0x65, 0x64, 0x54, 0x65, - 0x6d, 0x70, 0x6c, 0x61, 0x74, 0x65, 0x73, 0x3b, 0x0a, 0x0a, 0x20, 0x20, - 0x20, 0x20, 0x20, 0x20, 0x2f, 0x2f, 0x6f, 0x76, 0x65, 0x72, 0x72, 0x69, - 0x64, 0x65, 0x20, 0x64, 0x65, 0x66, 0x61, 0x75, 0x6c, 0x74, 0x20, 0x74, - 0x65, 0x6d, 0x70, 0x6c, 0x61, 0x74, 0x65, 0x0a, 0x20, 0x20, 0x20, 0x20, - 0x20, 0x20, 0x73, 0x61, 0x76, 0x65, 0x64, 0x55, 0x73, 0x65, 0x72, 0x54, - 0x65, 0x6d, 0x70, 0x6c, 0x61, 0x74, 0x65, 0x73, 0x2e, 0x76, 0x61, 0x6c, - 0x75, 0x65, 0x2e, 0x64, 0x65, 0x66, 0x61, 0x75, 0x6c, 0x74, 0x20, 0x3d, - 0x20, 0x7b, 0x20, 0x73, 0x65, 0x73, 0x73, 0x69, 0x6f, 0x6e, 0x3a, 0x20, - 0x73, 0x65, 0x73, 0x73, 0x69, 0x6f, 0x6e, 0x2e, 0x76, 0x61, 0x6c, 0x75, - 0x65, 0x2c, 0x20, 0x70, 0x61, 0x72, 0x61, 0x6d, 0x73, 0x3a, 0x20, 0x70, - 0x61, 0x72, 0x61, 0x6d, 0x73, 0x2e, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x20, - 0x7d, 0x0a, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x6c, 0x6f, 0x63, 0x61, - 0x6c, 0x5f, 0x73, 0x74, 0x6f, 0x72, 0x61, 0x67, 0x65, 0x5f, 0x73, 0x65, - 0x74, 0x44, 0x61, 0x74, 0x61, 0x46, 0x72, 0x6f, 0x6d, 0x4f, 0x62, 0x6a, - 0x65, 0x63, 0x74, 0x28, 0x27, 0x75, 0x73, 0x65, 0x72, 0x5f, 0x74, 0x65, - 0x6d, 0x70, 0x6c, 0x61, 0x74, 0x65, 0x73, 0x27, 0x2c, 0x20, 0x73, 0x61, - 0x76, 0x65, 0x64, 0x55, 0x73, 0x65, 0x72, 0x54, 0x65, 0x6d, 0x70, 0x6c, - 0x61, 0x74, 0x65, 0x73, 0x2e, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x29, 0x0a, - 0x20, 0x20, 0x20, 0x20, 0x7d, 0x20, 0x65, 0x6c, 0x73, 0x65, 0x20, 0x7b, - 0x0a, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x2f, 0x2f, 0x20, 0x6e, 0x6f, - 0x20, 0x73, 0x61, 0x76, 0x65, 0x64, 0x20, 0x74, 0x65, 0x6d, 0x70, 0x6c, - 0x61, 0x74, 0x65, 0x73, 0x20, 0x64, 0x65, 0x74, 0x65, 0x63, 0x74, 0x65, - 0x64, 0x2e, 0x0a, 0x0a, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x63, 0x6f, - 0x6e, 0x73, 0x6f, 0x6c, 0x65, 0x2e, 0x6c, 0x6f, 0x67, 0x28, 0x27, 0x49, - 0x6e, 0x69, 0x74, 0x69, 0x61, 0x6c, 0x69, 0x7a, 0x69, 0x6e, 0x67, 0x20, - 0x4c, 0x6f, 0x63, 0x61, 0x6c, 0x53, 0x74, 0x6f, 0x72, 0x61, 0x67, 0x65, - 0x20, 0x61, 0x6e, 0x64, 0x20, 0x73, 0x61, 0x76, 0x69, 0x6e, 0x67, 0x20, - 0x64, 0x65, 0x66, 0x61, 0x75, 0x6c, 0x74, 0x20, 0x74, 0x65, 0x6d, 0x70, - 0x6c, 0x61, 0x74, 0x65, 0x27, 0x29, 0x0a, 0x0a, 0x20, 0x20, 0x20, 0x20, - 0x20, 0x20, 0x73, 0x61, 0x76, 0x65, 0x64, 0x55, 0x73, 0x65, 0x72, 0x54, - 0x65, 0x6d, 0x70, 0x6c, 0x61, 0x74, 0x65, 0x73, 0x2e, 0x76, 0x61, 0x6c, - 0x75, 0x65, 0x20, 0x3d, 0x20, 0x7b, 0x20, 0x22, 0x64, 0x65, 0x66, 0x61, - 0x75, 0x6c, 0x74, 0x22, 0x3a, 0x20, 0x7b, 0x20, 0x73, 0x65, 0x73, 0x73, - 0x69, 0x6f, 0x6e, 0x3a, 0x20, 0x73, 0x65, 0x73, 0x73, 0x69, 0x6f, 0x6e, - 0x2e, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x2c, 0x20, 0x70, 0x61, 0x72, 0x61, - 0x6d, 0x73, 0x3a, 0x20, 0x70, 0x61, 0x72, 0x61, 0x6d, 0x73, 0x2e, 0x76, - 0x61, 0x6c, 0x75, 0x65, 0x20, 0x7d, 0x20, 0x7d, 0x0a, 0x20, 0x20, 0x20, - 0x20, 0x20, 0x20, 0x6c, 0x6f, 0x63, 0x61, 0x6c, 0x5f, 0x73, 0x74, 0x6f, - 0x72, 0x61, 0x67, 0x65, 0x5f, 0x73, 0x65, 0x74, 0x44, 0x61, 0x74, 0x61, - 0x46, 0x72, 0x6f, 0x6d, 0x4f, 0x62, 0x6a, 0x65, 0x63, 0x74, 0x28, 0x27, - 0x75, 0x73, 0x65, 0x72, 0x5f, 0x74, 0x65, 0x6d, 0x70, 0x6c, 0x61, 0x74, - 0x65, 0x73, 0x27, 0x2c, 0x20, 0x73, 0x61, 0x76, 0x65, 0x64, 0x55, 0x73, - 0x65, 0x72, 0x54, 0x65, 0x6d, 0x70, 0x6c, 0x61, 0x74, 0x65, 0x73, 0x2e, - 0x76, 0x61, 0x6c, 0x75, 0x65, 0x29, 0x0a, 0x20, 0x20, 0x20, 0x20, 0x7d, + 0x74, 0x65, 0x73, 0x20, 0x64, 0x65, 0x74, 0x65, 0x63, 0x74, 0x65, 0x64, + 0x2e, 0x0a, 0x0a, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x63, 0x6f, 0x6e, + 0x73, 0x6f, 0x6c, 0x65, 0x2e, 0x6c, 0x6f, 0x67, 0x28, 0x27, 0x49, 0x6e, + 0x69, 0x74, 0x69, 0x61, 0x6c, 0x69, 0x7a, 0x69, 0x6e, 0x67, 0x20, 0x4c, + 0x6f, 0x63, 0x61, 0x6c, 0x53, 0x74, 0x6f, 0x72, 0x61, 0x67, 0x65, 0x20, + 0x61, 0x6e, 0x64, 0x20, 0x73, 0x61, 0x76, 0x69, 0x6e, 0x67, 0x20, 0x64, + 0x65, 0x66, 0x61, 0x75, 0x6c, 0x74, 0x20, 0x74, 0x65, 0x6d, 0x70, 0x6c, + 0x61, 0x74, 0x65, 0x27, 0x29, 0x0a, 0x0a, 0x20, 0x20, 0x20, 0x20, 0x20, + 0x20, 0x73, 0x61, 0x76, 0x65, 0x64, 0x55, 0x73, 0x65, 0x72, 0x54, 0x65, + 0x6d, 0x70, 0x6c, 0x61, 0x74, 0x65, 0x73, 0x2e, 0x76, 0x61, 0x6c, 0x75, + 0x65, 0x20, 0x3d, 0x20, 0x7b, 0x20, 0x22, 0x64, 0x65, 0x66, 0x61, 0x75, + 0x6c, 0x74, 0x22, 0x3a, 0x20, 0x7b, 0x20, 0x73, 0x65, 0x73, 0x73, 0x69, + 0x6f, 0x6e, 0x3a, 0x20, 0x73, 0x65, 0x73, 0x73, 0x69, 0x6f, 0x6e, 0x2e, + 0x76, 0x61, 0x6c, 0x75, 0x65, 0x2c, 0x20, 0x70, 0x61, 0x72, 0x61, 0x6d, + 0x73, 0x3a, 0x20, 0x70, 0x61, 0x72, 0x61, 0x6d, 0x73, 0x2e, 0x76, 0x61, + 0x6c, 0x75, 0x65, 0x20, 0x7d, 0x20, 0x7d, 0x0a, 0x20, 0x20, 0x20, 0x20, + 0x20, 0x20, 0x6c, 0x6f, 0x63, 0x61, 0x6c, 0x5f, 0x73, 0x74, 0x6f, 0x72, + 0x61, 0x67, 0x65, 0x5f, 0x73, 0x65, 0x74, 0x44, 0x61, 0x74, 0x61, 0x46, + 0x72, 0x6f, 0x6d, 0x4f, 0x62, 0x6a, 0x65, 0x63, 0x74, 0x28, 0x27, 0x75, + 0x73, 0x65, 0x72, 0x5f, 0x74, 0x65, 0x6d, 0x70, 0x6c, 0x61, 0x74, 0x65, + 0x73, 0x27, 0x2c, 0x20, 0x73, 0x61, 0x76, 0x65, 0x64, 0x55, 0x73, 0x65, + 0x72, 0x54, 0x65, 0x6d, 0x70, 0x6c, 0x61, 0x74, 0x65, 0x73, 0x2e, 0x76, + 0x61, 0x6c, 0x75, 0x65, 0x29, 0x0a, 0x20, 0x20, 0x20, 0x20, 0x7d, 0x0a, + 0x0a, 0x20, 0x20, 0x20, 0x20, 0x66, 0x75, 0x6e, 0x63, 0x74, 0x69, 0x6f, + 0x6e, 0x20, 0x75, 0x73, 0x65, 0x72, 0x54, 0x65, 0x6d, 0x70, 0x6c, 0x61, + 0x74, 0x65, 0x52, 0x65, 0x73, 0x65, 0x74, 0x54, 0x6f, 0x44, 0x65, 0x66, + 0x61, 0x75, 0x6c, 0x74, 0x28, 0x29, 0x20, 0x7b, 0x0a, 0x20, 0x20, 0x20, + 0x20, 0x20, 0x20, 0x63, 0x6f, 0x6e, 0x73, 0x6f, 0x6c, 0x65, 0x2e, 0x6c, + 0x6f, 0x67, 0x28, 0x27, 0x52, 0x65, 0x73, 0x65, 0x74, 0x69, 0x6e, 0x67, + 0x20, 0x74, 0x68, 0x65, 0x6d, 0x70, 0x6c, 0x61, 0x74, 0x65, 0x20, 0x74, + 0x6f, 0x20, 0x64, 0x65, 0x66, 0x61, 0x75, 0x6c, 0x74, 0x27, 0x29, 0x0a, + 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x73, 0x65, 0x6c, 0x65, 0x63, 0x74, + 0x65, 0x64, 0x55, 0x73, 0x65, 0x72, 0x54, 0x65, 0x6d, 0x70, 0x6c, 0x61, + 0x74, 0x65, 0x2e, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x2e, 0x6e, 0x61, 0x6d, + 0x65, 0x20, 0x3d, 0x20, 0x27, 0x64, 0x65, 0x66, 0x61, 0x75, 0x6c, 0x74, + 0x27, 0x3b, 0x0a, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x73, 0x65, 0x6c, + 0x65, 0x63, 0x74, 0x65, 0x64, 0x55, 0x73, 0x65, 0x72, 0x54, 0x65, 0x6d, + 0x70, 0x6c, 0x61, 0x74, 0x65, 0x2e, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x2e, + 0x64, 0x61, 0x74, 0x61, 0x20, 0x3d, 0x20, 0x73, 0x61, 0x76, 0x65, 0x64, + 0x55, 0x73, 0x65, 0x72, 0x54, 0x65, 0x6d, 0x70, 0x6c, 0x61, 0x74, 0x65, + 0x73, 0x2e, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x5b, 0x27, 0x64, 0x65, 0x66, + 0x61, 0x75, 0x6c, 0x74, 0x27, 0x5d, 0x3b, 0x0a, 0x20, 0x20, 0x20, 0x20, + 0x7d, 0x0a, 0x0a, 0x20, 0x20, 0x20, 0x20, 0x66, 0x75, 0x6e, 0x63, 0x74, + 0x69, 0x6f, 0x6e, 0x20, 0x75, 0x73, 0x65, 0x72, 0x54, 0x65, 0x6d, 0x70, + 0x6c, 0x61, 0x74, 0x65, 0x41, 0x70, 0x70, 0x6c, 0x79, 0x28, 0x74, 0x29, + 0x20, 0x7b, 0x0a, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x73, 0x65, 0x73, + 0x73, 0x69, 0x6f, 0x6e, 0x2e, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x20, 0x3d, + 0x20, 0x74, 0x2e, 0x64, 0x61, 0x74, 0x61, 0x2e, 0x73, 0x65, 0x73, 0x73, + 0x69, 0x6f, 0x6e, 0x3b, 0x0a, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x73, + 0x65, 0x73, 0x73, 0x69, 0x6f, 0x6e, 0x2e, 0x76, 0x61, 0x6c, 0x75, 0x65, + 0x20, 0x3d, 0x20, 0x7b, 0x20, 0x2e, 0x2e, 0x2e, 0x73, 0x65, 0x73, 0x73, + 0x69, 0x6f, 0x6e, 0x2e, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x2c, 0x20, 0x69, + 0x6d, 0x61, 0x67, 0x65, 0x5f, 0x73, 0x65, 0x6c, 0x65, 0x63, 0x74, 0x65, + 0x64, 0x3a, 0x20, 0x27, 0x27, 0x20, 0x7d, 0x3b, 0x0a, 0x20, 0x20, 0x20, + 0x20, 0x20, 0x20, 0x70, 0x61, 0x72, 0x61, 0x6d, 0x73, 0x2e, 0x76, 0x61, + 0x6c, 0x75, 0x65, 0x20, 0x3d, 0x20, 0x74, 0x2e, 0x64, 0x61, 0x74, 0x61, + 0x2e, 0x70, 0x61, 0x72, 0x61, 0x6d, 0x73, 0x3b, 0x0a, 0x20, 0x20, 0x20, + 0x20, 0x20, 0x20, 0x70, 0x61, 0x72, 0x61, 0x6d, 0x73, 0x2e, 0x76, 0x61, + 0x6c, 0x75, 0x65, 0x20, 0x3d, 0x20, 0x7b, 0x20, 0x2e, 0x2e, 0x2e, 0x70, + 0x61, 0x72, 0x61, 0x6d, 0x73, 0x2e, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x2c, + 0x20, 0x69, 0x6d, 0x61, 0x67, 0x65, 0x5f, 0x64, 0x61, 0x74, 0x61, 0x3a, + 0x20, 0x5b, 0x5d, 0x20, 0x7d, 0x3b, 0x0a, 0x20, 0x20, 0x20, 0x20, 0x7d, 0x0a, 0x0a, 0x20, 0x20, 0x20, 0x20, 0x66, 0x75, 0x6e, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x20, 0x75, 0x73, 0x65, 0x72, 0x54, 0x65, 0x6d, 0x70, 0x6c, 0x61, 0x74, 0x65, 0x52, 0x65, 0x73, 0x65, 0x74, 0x54, 0x6f, 0x44, 0x65, - 0x66, 0x61, 0x75, 0x6c, 0x74, 0x28, 0x29, 0x20, 0x7b, 0x0a, 0x20, 0x20, - 0x20, 0x20, 0x20, 0x20, 0x63, 0x6f, 0x6e, 0x73, 0x6f, 0x6c, 0x65, 0x2e, - 0x6c, 0x6f, 0x67, 0x28, 0x27, 0x52, 0x65, 0x73, 0x65, 0x74, 0x69, 0x6e, - 0x67, 0x20, 0x74, 0x68, 0x65, 0x6d, 0x70, 0x6c, 0x61, 0x74, 0x65, 0x20, - 0x74, 0x6f, 0x20, 0x64, 0x65, 0x66, 0x61, 0x75, 0x6c, 0x74, 0x27, 0x29, - 0x0a, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x73, 0x65, 0x6c, 0x65, 0x63, - 0x74, 0x65, 0x64, 0x55, 0x73, 0x65, 0x72, 0x54, 0x65, 0x6d, 0x70, 0x6c, - 0x61, 0x74, 0x65, 0x2e, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x2e, 0x6e, 0x61, - 0x6d, 0x65, 0x20, 0x3d, 0x20, 0x27, 0x64, 0x65, 0x66, 0x61, 0x75, 0x6c, - 0x74, 0x27, 0x3b, 0x0a, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x73, 0x65, - 0x6c, 0x65, 0x63, 0x74, 0x65, 0x64, 0x55, 0x73, 0x65, 0x72, 0x54, 0x65, - 0x6d, 0x70, 0x6c, 0x61, 0x74, 0x65, 0x2e, 0x76, 0x61, 0x6c, 0x75, 0x65, - 0x2e, 0x64, 0x61, 0x74, 0x61, 0x20, 0x3d, 0x20, 0x73, 0x61, 0x76, 0x65, + 0x66, 0x61, 0x75, 0x6c, 0x74, 0x41, 0x6e, 0x64, 0x41, 0x70, 0x70, 0x6c, + 0x79, 0x28, 0x29, 0x20, 0x7b, 0x0a, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, + 0x75, 0x73, 0x65, 0x72, 0x54, 0x65, 0x6d, 0x70, 0x6c, 0x61, 0x74, 0x65, + 0x52, 0x65, 0x73, 0x65, 0x74, 0x54, 0x6f, 0x44, 0x65, 0x66, 0x61, 0x75, + 0x6c, 0x74, 0x28, 0x29, 0x0a, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x75, + 0x73, 0x65, 0x72, 0x54, 0x65, 0x6d, 0x70, 0x6c, 0x61, 0x74, 0x65, 0x41, + 0x70, 0x70, 0x6c, 0x79, 0x28, 0x73, 0x65, 0x6c, 0x65, 0x63, 0x74, 0x65, 0x64, 0x55, 0x73, 0x65, 0x72, 0x54, 0x65, 0x6d, 0x70, 0x6c, 0x61, 0x74, - 0x65, 0x73, 0x2e, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x5b, 0x27, 0x64, 0x65, - 0x66, 0x61, 0x75, 0x6c, 0x74, 0x27, 0x5d, 0x3b, 0x0a, 0x20, 0x20, 0x20, + 0x65, 0x2e, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x29, 0x0a, 0x20, 0x20, 0x20, 0x20, 0x7d, 0x0a, 0x0a, 0x20, 0x20, 0x20, 0x20, 0x66, 0x75, 0x6e, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x20, 0x75, 0x73, 0x65, 0x72, 0x54, 0x65, 0x6d, - 0x70, 0x6c, 0x61, 0x74, 0x65, 0x41, 0x70, 0x70, 0x6c, 0x79, 0x28, 0x74, - 0x29, 0x20, 0x7b, 0x0a, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x73, 0x65, - 0x73, 0x73, 0x69, 0x6f, 0x6e, 0x2e, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x20, - 0x3d, 0x20, 0x74, 0x2e, 0x64, 0x61, 0x74, 0x61, 0x2e, 0x73, 0x65, 0x73, - 0x73, 0x69, 0x6f, 0x6e, 0x3b, 0x0a, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, - 0x73, 0x65, 0x73, 0x73, 0x69, 0x6f, 0x6e, 0x2e, 0x76, 0x61, 0x6c, 0x75, - 0x65, 0x20, 0x3d, 0x20, 0x7b, 0x20, 0x2e, 0x2e, 0x2e, 0x73, 0x65, 0x73, - 0x73, 0x69, 0x6f, 0x6e, 0x2e, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x2c, 0x20, - 0x69, 0x6d, 0x61, 0x67, 0x65, 0x5f, 0x73, 0x65, 0x6c, 0x65, 0x63, 0x74, - 0x65, 0x64, 0x3a, 0x20, 0x27, 0x27, 0x20, 0x7d, 0x3b, 0x0a, 0x20, 0x20, - 0x20, 0x20, 0x20, 0x20, 0x70, 0x61, 0x72, 0x61, 0x6d, 0x73, 0x2e, 0x76, - 0x61, 0x6c, 0x75, 0x65, 0x20, 0x3d, 0x20, 0x74, 0x2e, 0x64, 0x61, 0x74, - 0x61, 0x2e, 0x70, 0x61, 0x72, 0x61, 0x6d, 0x73, 0x3b, 0x0a, 0x20, 0x20, - 0x20, 0x20, 0x20, 0x20, 0x70, 0x61, 0x72, 0x61, 0x6d, 0x73, 0x2e, 0x76, - 0x61, 0x6c, 0x75, 0x65, 0x20, 0x3d, 0x20, 0x7b, 0x20, 0x2e, 0x2e, 0x2e, - 0x70, 0x61, 0x72, 0x61, 0x6d, 0x73, 0x2e, 0x76, 0x61, 0x6c, 0x75, 0x65, - 0x2c, 0x20, 0x69, 0x6d, 0x61, 0x67, 0x65, 0x5f, 0x64, 0x61, 0x74, 0x61, - 0x3a, 0x20, 0x5b, 0x5d, 0x20, 0x7d, 0x3b, 0x0a, 0x20, 0x20, 0x20, 0x20, - 0x7d, 0x0a, 0x0a, 0x20, 0x20, 0x20, 0x20, 0x66, 0x75, 0x6e, 0x63, 0x74, - 0x69, 0x6f, 0x6e, 0x20, 0x75, 0x73, 0x65, 0x72, 0x54, 0x65, 0x6d, 0x70, - 0x6c, 0x61, 0x74, 0x65, 0x52, 0x65, 0x73, 0x65, 0x74, 0x54, 0x6f, 0x44, - 0x65, 0x66, 0x61, 0x75, 0x6c, 0x74, 0x41, 0x6e, 0x64, 0x41, 0x70, 0x70, - 0x6c, 0x79, 0x28, 0x29, 0x20, 0x7b, 0x0a, 0x20, 0x20, 0x20, 0x20, 0x20, - 0x20, 0x75, 0x73, 0x65, 0x72, 0x54, 0x65, 0x6d, 0x70, 0x6c, 0x61, 0x74, - 0x65, 0x52, 0x65, 0x73, 0x65, 0x74, 0x54, 0x6f, 0x44, 0x65, 0x66, 0x61, - 0x75, 0x6c, 0x74, 0x28, 0x29, 0x0a, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, - 0x75, 0x73, 0x65, 0x72, 0x54, 0x65, 0x6d, 0x70, 0x6c, 0x61, 0x74, 0x65, - 0x41, 0x70, 0x70, 0x6c, 0x79, 0x28, 0x73, 0x65, 0x6c, 0x65, 0x63, 0x74, - 0x65, 0x64, 0x55, 0x73, 0x65, 0x72, 0x54, 0x65, 0x6d, 0x70, 0x6c, 0x61, - 0x74, 0x65, 0x2e, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x29, 0x0a, 0x20, 0x20, - 0x20, 0x20, 0x7d, 0x0a, 0x0a, 0x20, 0x20, 0x20, 0x20, 0x66, 0x75, 0x6e, - 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x20, 0x75, 0x73, 0x65, 0x72, 0x54, 0x65, - 0x6d, 0x70, 0x6c, 0x61, 0x74, 0x65, 0x4c, 0x6f, 0x61, 0x64, 0x41, 0x6e, - 0x64, 0x41, 0x70, 0x70, 0x6c, 0x79, 0x41, 0x75, 0x74, 0x6f, 0x73, 0x61, - 0x76, 0x65, 0x64, 0x28, 0x29, 0x20, 0x7b, 0x0a, 0x20, 0x20, 0x20, 0x20, - 0x20, 0x20, 0x2f, 0x2f, 0x20, 0x67, 0x65, 0x74, 0x20, 0x61, 0x75, 0x74, - 0x6f, 0x73, 0x61, 0x76, 0x65, 0x64, 0x20, 0x6c, 0x61, 0x73, 0x74, 0x20, - 0x75, 0x73, 0x65, 0x64, 0x20, 0x74, 0x65, 0x6d, 0x70, 0x6c, 0x61, 0x74, - 0x65, 0x0a, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x6c, 0x65, 0x74, 0x20, - 0x6c, 0x61, 0x73, 0x74, 0x55, 0x73, 0x65, 0x64, 0x54, 0x65, 0x6d, 0x70, - 0x6c, 0x61, 0x74, 0x65, 0x20, 0x3d, 0x20, 0x6c, 0x6f, 0x63, 0x61, 0x6c, - 0x5f, 0x73, 0x74, 0x6f, 0x72, 0x61, 0x67, 0x65, 0x5f, 0x67, 0x65, 0x74, - 0x44, 0x61, 0x74, 0x61, 0x41, 0x73, 0x4f, 0x62, 0x6a, 0x65, 0x63, 0x74, - 0x28, 0x27, 0x75, 0x73, 0x65, 0x72, 0x5f, 0x74, 0x65, 0x6d, 0x70, 0x6c, - 0x61, 0x74, 0x65, 0x73, 0x5f, 0x6c, 0x61, 0x73, 0x74, 0x27, 0x29, 0x0a, - 0x0a, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x69, 0x66, 0x20, 0x28, 0x6c, + 0x70, 0x6c, 0x61, 0x74, 0x65, 0x4c, 0x6f, 0x61, 0x64, 0x41, 0x6e, 0x64, + 0x41, 0x70, 0x70, 0x6c, 0x79, 0x41, 0x75, 0x74, 0x6f, 0x73, 0x61, 0x76, + 0x65, 0x64, 0x28, 0x29, 0x20, 0x7b, 0x0a, 0x20, 0x20, 0x20, 0x20, 0x20, + 0x20, 0x2f, 0x2f, 0x20, 0x67, 0x65, 0x74, 0x20, 0x61, 0x75, 0x74, 0x6f, + 0x73, 0x61, 0x76, 0x65, 0x64, 0x20, 0x6c, 0x61, 0x73, 0x74, 0x20, 0x75, + 0x73, 0x65, 0x64, 0x20, 0x74, 0x65, 0x6d, 0x70, 0x6c, 0x61, 0x74, 0x65, + 0x0a, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x6c, 0x65, 0x74, 0x20, 0x6c, 0x61, 0x73, 0x74, 0x55, 0x73, 0x65, 0x64, 0x54, 0x65, 0x6d, 0x70, 0x6c, - 0x61, 0x74, 0x65, 0x29, 0x20, 0x7b, 0x0a, 0x0a, 0x20, 0x20, 0x20, 0x20, - 0x20, 0x20, 0x20, 0x20, 0x63, 0x6f, 0x6e, 0x73, 0x6f, 0x6c, 0x65, 0x2e, - 0x6c, 0x6f, 0x67, 0x28, 0x27, 0x41, 0x75, 0x74, 0x6f, 0x73, 0x61, 0x76, - 0x65, 0x64, 0x20, 0x74, 0x65, 0x6d, 0x70, 0x6c, 0x61, 0x74, 0x65, 0x20, - 0x66, 0x6f, 0x75, 0x6e, 0x64, 0x2c, 0x20, 0x72, 0x65, 0x73, 0x74, 0x6f, - 0x72, 0x69, 0x6e, 0x67, 0x27, 0x29, 0x0a, 0x0a, 0x20, 0x20, 0x20, 0x20, - 0x20, 0x20, 0x20, 0x20, 0x73, 0x65, 0x6c, 0x65, 0x63, 0x74, 0x65, 0x64, - 0x55, 0x73, 0x65, 0x72, 0x54, 0x65, 0x6d, 0x70, 0x6c, 0x61, 0x74, 0x65, - 0x2e, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x20, 0x3d, 0x20, 0x6c, 0x61, 0x73, - 0x74, 0x55, 0x73, 0x65, 0x64, 0x54, 0x65, 0x6d, 0x70, 0x6c, 0x61, 0x74, - 0x65, 0x0a, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x7d, 0x0a, 0x20, 0x20, - 0x20, 0x20, 0x20, 0x20, 0x65, 0x6c, 0x73, 0x65, 0x20, 0x7b, 0x0a, 0x0a, - 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x63, 0x6f, 0x6e, 0x73, - 0x6f, 0x6c, 0x65, 0x2e, 0x6c, 0x6f, 0x67, 0x28, 0x27, 0x4e, 0x6f, 0x20, - 0x61, 0x75, 0x74, 0x6f, 0x73, 0x61, 0x76, 0x65, 0x64, 0x20, 0x74, 0x65, - 0x6d, 0x70, 0x6c, 0x61, 0x74, 0x65, 0x20, 0x66, 0x6f, 0x75, 0x6e, 0x64, - 0x2c, 0x20, 0x75, 0x73, 0x69, 0x6e, 0x67, 0x20, 0x64, 0x65, 0x66, 0x61, - 0x75, 0x6c, 0x74, 0x20, 0x74, 0x65, 0x6d, 0x70, 0x6c, 0x61, 0x74, 0x65, - 0x27, 0x29, 0x0a, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x2f, - 0x2f, 0x20, 0x6e, 0x6f, 0x20, 0x61, 0x75, 0x74, 0x6f, 0x73, 0x61, 0x76, - 0x65, 0x64, 0x20, 0x6c, 0x61, 0x73, 0x74, 0x20, 0x75, 0x73, 0x65, 0x64, - 0x20, 0x74, 0x65, 0x6d, 0x70, 0x6c, 0x61, 0x74, 0x65, 0x20, 0x77, 0x61, - 0x73, 0x20, 0x66, 0x6f, 0x75, 0x6e, 0x64, 0x2c, 0x20, 0x73, 0x6f, 0x20, - 0x6c, 0x6f, 0x61, 0x64, 0x20, 0x66, 0x72, 0x6f, 0x6d, 0x20, 0x64, 0x65, - 0x66, 0x61, 0x75, 0x6c, 0x74, 0x2e, 0x0a, 0x0a, 0x20, 0x20, 0x20, 0x20, + 0x61, 0x74, 0x65, 0x20, 0x3d, 0x20, 0x6c, 0x6f, 0x63, 0x61, 0x6c, 0x5f, + 0x73, 0x74, 0x6f, 0x72, 0x61, 0x67, 0x65, 0x5f, 0x67, 0x65, 0x74, 0x44, + 0x61, 0x74, 0x61, 0x41, 0x73, 0x4f, 0x62, 0x6a, 0x65, 0x63, 0x74, 0x28, + 0x27, 0x75, 0x73, 0x65, 0x72, 0x5f, 0x74, 0x65, 0x6d, 0x70, 0x6c, 0x61, + 0x74, 0x65, 0x73, 0x5f, 0x6c, 0x61, 0x73, 0x74, 0x27, 0x29, 0x0a, 0x0a, + 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x69, 0x66, 0x20, 0x28, 0x6c, 0x61, + 0x73, 0x74, 0x55, 0x73, 0x65, 0x64, 0x54, 0x65, 0x6d, 0x70, 0x6c, 0x61, + 0x74, 0x65, 0x29, 0x20, 0x7b, 0x0a, 0x0a, 0x20, 0x20, 0x20, 0x20, 0x20, + 0x20, 0x20, 0x20, 0x63, 0x6f, 0x6e, 0x73, 0x6f, 0x6c, 0x65, 0x2e, 0x6c, + 0x6f, 0x67, 0x28, 0x27, 0x41, 0x75, 0x74, 0x6f, 0x73, 0x61, 0x76, 0x65, + 0x64, 0x20, 0x74, 0x65, 0x6d, 0x70, 0x6c, 0x61, 0x74, 0x65, 0x20, 0x66, + 0x6f, 0x75, 0x6e, 0x64, 0x2c, 0x20, 0x72, 0x65, 0x73, 0x74, 0x6f, 0x72, + 0x69, 0x6e, 0x67, 0x27, 0x29, 0x0a, 0x0a, 0x20, 0x20, 0x20, 0x20, 0x20, + 0x20, 0x20, 0x20, 0x73, 0x65, 0x6c, 0x65, 0x63, 0x74, 0x65, 0x64, 0x55, + 0x73, 0x65, 0x72, 0x54, 0x65, 0x6d, 0x70, 0x6c, 0x61, 0x74, 0x65, 0x2e, + 0x76, 0x61, 0x6c, 0x75, 0x65, 0x20, 0x3d, 0x20, 0x6c, 0x61, 0x73, 0x74, + 0x55, 0x73, 0x65, 0x64, 0x54, 0x65, 0x6d, 0x70, 0x6c, 0x61, 0x74, 0x65, + 0x0a, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x7d, 0x0a, 0x20, 0x20, 0x20, + 0x20, 0x20, 0x20, 0x65, 0x6c, 0x73, 0x65, 0x20, 0x7b, 0x0a, 0x0a, 0x20, + 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x63, 0x6f, 0x6e, 0x73, 0x6f, + 0x6c, 0x65, 0x2e, 0x6c, 0x6f, 0x67, 0x28, 0x27, 0x4e, 0x6f, 0x20, 0x61, + 0x75, 0x74, 0x6f, 0x73, 0x61, 0x76, 0x65, 0x64, 0x20, 0x74, 0x65, 0x6d, + 0x70, 0x6c, 0x61, 0x74, 0x65, 0x20, 0x66, 0x6f, 0x75, 0x6e, 0x64, 0x2c, + 0x20, 0x75, 0x73, 0x69, 0x6e, 0x67, 0x20, 0x64, 0x65, 0x66, 0x61, 0x75, + 0x6c, 0x74, 0x20, 0x74, 0x65, 0x6d, 0x70, 0x6c, 0x61, 0x74, 0x65, 0x27, + 0x29, 0x0a, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x2f, 0x2f, + 0x20, 0x6e, 0x6f, 0x20, 0x61, 0x75, 0x74, 0x6f, 0x73, 0x61, 0x76, 0x65, + 0x64, 0x20, 0x6c, 0x61, 0x73, 0x74, 0x20, 0x75, 0x73, 0x65, 0x64, 0x20, + 0x74, 0x65, 0x6d, 0x70, 0x6c, 0x61, 0x74, 0x65, 0x20, 0x77, 0x61, 0x73, + 0x20, 0x66, 0x6f, 0x75, 0x6e, 0x64, 0x2c, 0x20, 0x73, 0x6f, 0x20, 0x6c, + 0x6f, 0x61, 0x64, 0x20, 0x66, 0x72, 0x6f, 0x6d, 0x20, 0x64, 0x65, 0x66, + 0x61, 0x75, 0x6c, 0x74, 0x2e, 0x0a, 0x0a, 0x20, 0x20, 0x20, 0x20, 0x20, + 0x20, 0x20, 0x20, 0x75, 0x73, 0x65, 0x72, 0x54, 0x65, 0x6d, 0x70, 0x6c, + 0x61, 0x74, 0x65, 0x52, 0x65, 0x73, 0x65, 0x74, 0x54, 0x6f, 0x44, 0x65, + 0x66, 0x61, 0x75, 0x6c, 0x74, 0x28, 0x29, 0x0a, 0x20, 0x20, 0x20, 0x20, + 0x20, 0x20, 0x7d, 0x0a, 0x0a, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x63, + 0x6f, 0x6e, 0x73, 0x6f, 0x6c, 0x65, 0x2e, 0x6c, 0x6f, 0x67, 0x28, 0x27, + 0x41, 0x70, 0x70, 0x6c, 0x79, 0x69, 0x6e, 0x67, 0x20, 0x74, 0x65, 0x6d, + 0x70, 0x6c, 0x61, 0x74, 0x65, 0x27, 0x29, 0x0a, 0x20, 0x20, 0x20, 0x20, + 0x20, 0x20, 0x2f, 0x2f, 0x20, 0x61, 0x6e, 0x64, 0x20, 0x75, 0x70, 0x64, + 0x61, 0x74, 0x65, 0x20, 0x69, 0x6e, 0x74, 0x65, 0x72, 0x6e, 0x61, 0x6c, + 0x20, 0x64, 0x61, 0x74, 0x61, 0x20, 0x66, 0x72, 0x6f, 0x6d, 0x20, 0x74, + 0x65, 0x6d, 0x70, 0x6c, 0x61, 0x74, 0x65, 0x73, 0x0a, 0x0a, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x75, 0x73, 0x65, 0x72, 0x54, 0x65, 0x6d, 0x70, - 0x6c, 0x61, 0x74, 0x65, 0x52, 0x65, 0x73, 0x65, 0x74, 0x54, 0x6f, 0x44, - 0x65, 0x66, 0x61, 0x75, 0x6c, 0x74, 0x28, 0x29, 0x0a, 0x20, 0x20, 0x20, - 0x20, 0x20, 0x20, 0x7d, 0x0a, 0x0a, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, - 0x63, 0x6f, 0x6e, 0x73, 0x6f, 0x6c, 0x65, 0x2e, 0x6c, 0x6f, 0x67, 0x28, - 0x27, 0x41, 0x70, 0x70, 0x6c, 0x79, 0x69, 0x6e, 0x67, 0x20, 0x74, 0x65, - 0x6d, 0x70, 0x6c, 0x61, 0x74, 0x65, 0x27, 0x29, 0x0a, 0x20, 0x20, 0x20, - 0x20, 0x20, 0x20, 0x2f, 0x2f, 0x20, 0x61, 0x6e, 0x64, 0x20, 0x75, 0x70, - 0x64, 0x61, 0x74, 0x65, 0x20, 0x69, 0x6e, 0x74, 0x65, 0x72, 0x6e, 0x61, - 0x6c, 0x20, 0x64, 0x61, 0x74, 0x61, 0x20, 0x66, 0x72, 0x6f, 0x6d, 0x20, - 0x74, 0x65, 0x6d, 0x70, 0x6c, 0x61, 0x74, 0x65, 0x73, 0x0a, 0x0a, 0x20, - 0x20, 0x20, 0x20, 0x20, 0x20, 0x75, 0x73, 0x65, 0x72, 0x54, 0x65, 0x6d, - 0x70, 0x6c, 0x61, 0x74, 0x65, 0x41, 0x70, 0x70, 0x6c, 0x79, 0x28, 0x73, + 0x6c, 0x61, 0x74, 0x65, 0x41, 0x70, 0x70, 0x6c, 0x79, 0x28, 0x73, 0x65, + 0x6c, 0x65, 0x63, 0x74, 0x65, 0x64, 0x55, 0x73, 0x65, 0x72, 0x54, 0x65, + 0x6d, 0x70, 0x6c, 0x61, 0x74, 0x65, 0x2e, 0x76, 0x61, 0x6c, 0x75, 0x65, + 0x29, 0x0a, 0x20, 0x20, 0x20, 0x20, 0x7d, 0x0a, 0x0a, 0x20, 0x20, 0x20, + 0x20, 0x2f, 0x2f, 0x63, 0x6f, 0x6e, 0x73, 0x6f, 0x6c, 0x65, 0x2e, 0x6c, + 0x6f, 0x67, 0x28, 0x73, 0x61, 0x76, 0x65, 0x64, 0x55, 0x73, 0x65, 0x72, + 0x54, 0x65, 0x6d, 0x70, 0x6c, 0x61, 0x74, 0x65, 0x73, 0x2e, 0x76, 0x61, + 0x6c, 0x75, 0x65, 0x29, 0x0a, 0x20, 0x20, 0x20, 0x20, 0x2f, 0x2f, 0x63, + 0x6f, 0x6e, 0x73, 0x6f, 0x6c, 0x65, 0x2e, 0x6c, 0x6f, 0x67, 0x28, 0x73, 0x65, 0x6c, 0x65, 0x63, 0x74, 0x65, 0x64, 0x55, 0x73, 0x65, 0x72, 0x54, 0x65, 0x6d, 0x70, 0x6c, 0x61, 0x74, 0x65, 0x2e, 0x76, 0x61, 0x6c, 0x75, - 0x65, 0x29, 0x0a, 0x20, 0x20, 0x20, 0x20, 0x7d, 0x0a, 0x0a, 0x20, 0x20, - 0x20, 0x20, 0x2f, 0x2f, 0x63, 0x6f, 0x6e, 0x73, 0x6f, 0x6c, 0x65, 0x2e, - 0x6c, 0x6f, 0x67, 0x28, 0x73, 0x61, 0x76, 0x65, 0x64, 0x55, 0x73, 0x65, - 0x72, 0x54, 0x65, 0x6d, 0x70, 0x6c, 0x61, 0x74, 0x65, 0x73, 0x2e, 0x76, - 0x61, 0x6c, 0x75, 0x65, 0x29, 0x0a, 0x20, 0x20, 0x20, 0x20, 0x2f, 0x2f, + 0x65, 0x29, 0x0a, 0x0a, 0x20, 0x20, 0x20, 0x20, 0x66, 0x75, 0x6e, 0x63, + 0x74, 0x69, 0x6f, 0x6e, 0x20, 0x75, 0x73, 0x65, 0x72, 0x54, 0x65, 0x6d, + 0x70, 0x6c, 0x61, 0x74, 0x65, 0x41, 0x75, 0x74, 0x6f, 0x73, 0x61, 0x76, + 0x65, 0x28, 0x29, 0x20, 0x7b, 0x0a, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x63, 0x6f, 0x6e, 0x73, 0x6f, 0x6c, 0x65, 0x2e, 0x6c, 0x6f, 0x67, 0x28, - 0x73, 0x65, 0x6c, 0x65, 0x63, 0x74, 0x65, 0x64, 0x55, 0x73, 0x65, 0x72, - 0x54, 0x65, 0x6d, 0x70, 0x6c, 0x61, 0x74, 0x65, 0x2e, 0x76, 0x61, 0x6c, - 0x75, 0x65, 0x29, 0x0a, 0x0a, 0x20, 0x20, 0x20, 0x20, 0x66, 0x75, 0x6e, - 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x20, 0x75, 0x73, 0x65, 0x72, 0x54, 0x65, - 0x6d, 0x70, 0x6c, 0x61, 0x74, 0x65, 0x41, 0x75, 0x74, 0x6f, 0x73, 0x61, - 0x76, 0x65, 0x28, 0x29, 0x20, 0x7b, 0x0a, 0x20, 0x20, 0x20, 0x20, 0x20, - 0x20, 0x63, 0x6f, 0x6e, 0x73, 0x6f, 0x6c, 0x65, 0x2e, 0x6c, 0x6f, 0x67, - 0x28, 0x27, 0x54, 0x65, 0x6d, 0x70, 0x6c, 0x61, 0x74, 0x65, 0x20, 0x41, - 0x75, 0x74, 0x6f, 0x73, 0x61, 0x76, 0x65, 0x2e, 0x2e, 0x2e, 0x27, 0x29, - 0x0a, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x69, 0x66, 0x20, 0x28, 0x73, - 0x65, 0x6c, 0x65, 0x63, 0x74, 0x65, 0x64, 0x55, 0x73, 0x65, 0x72, 0x54, - 0x65, 0x6d, 0x70, 0x6c, 0x61, 0x74, 0x65, 0x2e, 0x76, 0x61, 0x6c, 0x75, - 0x65, 0x2e, 0x6e, 0x61, 0x6d, 0x65, 0x20, 0x3d, 0x3d, 0x20, 0x27, 0x64, - 0x65, 0x66, 0x61, 0x75, 0x6c, 0x74, 0x27, 0x29, 0x20, 0x7b, 0x0a, 0x20, - 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x2f, 0x2f, 0x20, 0x77, 0x65, - 0x20, 0x64, 0x6f, 0x6e, 0x27, 0x74, 0x20, 0x77, 0x61, 0x6e, 0x74, 0x20, - 0x74, 0x6f, 0x20, 0x73, 0x61, 0x76, 0x65, 0x20, 0x6f, 0x76, 0x65, 0x72, - 0x20, 0x64, 0x65, 0x66, 0x61, 0x75, 0x6c, 0x74, 0x20, 0x74, 0x65, 0x6d, - 0x70, 0x6c, 0x61, 0x74, 0x65, 0x2c, 0x20, 0x73, 0x6f, 0x20, 0x6c, 0x65, - 0x74, 0x27, 0x73, 0x20, 0x63, 0x72, 0x65, 0x61, 0x74, 0x65, 0x20, 0x61, - 0x20, 0x6e, 0x65, 0x77, 0x20, 0x6f, 0x6e, 0x65, 0x0a, 0x20, 0x20, 0x20, - 0x20, 0x20, 0x20, 0x20, 0x20, 0x6c, 0x65, 0x74, 0x20, 0x6e, 0x65, 0x77, - 0x54, 0x65, 0x6d, 0x70, 0x6c, 0x61, 0x74, 0x65, 0x4e, 0x61, 0x6d, 0x65, - 0x20, 0x3d, 0x20, 0x27, 0x55, 0x73, 0x65, 0x72, 0x54, 0x65, 0x6d, 0x70, - 0x6c, 0x61, 0x74, 0x65, 0x2d, 0x27, 0x20, 0x2b, 0x20, 0x44, 0x61, 0x74, - 0x65, 0x2e, 0x6e, 0x6f, 0x77, 0x28, 0x29, 0x2e, 0x74, 0x6f, 0x53, 0x74, - 0x72, 0x69, 0x6e, 0x67, 0x28, 0x29, 0x0a, 0x20, 0x20, 0x20, 0x20, 0x20, - 0x20, 0x20, 0x20, 0x6c, 0x65, 0x74, 0x20, 0x6e, 0x65, 0x77, 0x54, 0x65, - 0x6d, 0x70, 0x6c, 0x61, 0x74, 0x65, 0x20, 0x3d, 0x20, 0x7b, 0x20, 0x27, - 0x6e, 0x61, 0x6d, 0x65, 0x27, 0x3a, 0x20, 0x6e, 0x65, 0x77, 0x54, 0x65, - 0x6d, 0x70, 0x6c, 0x61, 0x74, 0x65, 0x4e, 0x61, 0x6d, 0x65, 0x2c, 0x20, - 0x27, 0x64, 0x61, 0x74, 0x61, 0x27, 0x3a, 0x20, 0x7b, 0x20, 0x27, 0x73, - 0x65, 0x73, 0x73, 0x69, 0x6f, 0x6e, 0x27, 0x3a, 0x20, 0x73, 0x65, 0x73, - 0x73, 0x69, 0x6f, 0x6e, 0x2e, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x2c, 0x20, - 0x27, 0x70, 0x61, 0x72, 0x61, 0x6d, 0x73, 0x27, 0x3a, 0x20, 0x70, 0x61, - 0x72, 0x61, 0x6d, 0x73, 0x2e, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x20, 0x7d, - 0x20, 0x7d, 0x0a, 0x0a, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, + 0x27, 0x54, 0x65, 0x6d, 0x70, 0x6c, 0x61, 0x74, 0x65, 0x20, 0x41, 0x75, + 0x74, 0x6f, 0x73, 0x61, 0x76, 0x65, 0x2e, 0x2e, 0x2e, 0x27, 0x29, 0x0a, + 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x69, 0x66, 0x20, 0x28, 0x73, 0x65, + 0x6c, 0x65, 0x63, 0x74, 0x65, 0x64, 0x55, 0x73, 0x65, 0x72, 0x54, 0x65, + 0x6d, 0x70, 0x6c, 0x61, 0x74, 0x65, 0x2e, 0x76, 0x61, 0x6c, 0x75, 0x65, + 0x2e, 0x6e, 0x61, 0x6d, 0x65, 0x20, 0x3d, 0x3d, 0x20, 0x27, 0x64, 0x65, + 0x66, 0x61, 0x75, 0x6c, 0x74, 0x27, 0x29, 0x20, 0x7b, 0x0a, 0x20, 0x20, + 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x2f, 0x2f, 0x20, 0x77, 0x65, 0x20, + 0x64, 0x6f, 0x6e, 0x27, 0x74, 0x20, 0x77, 0x61, 0x6e, 0x74, 0x20, 0x74, + 0x6f, 0x20, 0x73, 0x61, 0x76, 0x65, 0x20, 0x6f, 0x76, 0x65, 0x72, 0x20, + 0x64, 0x65, 0x66, 0x61, 0x75, 0x6c, 0x74, 0x20, 0x74, 0x65, 0x6d, 0x70, + 0x6c, 0x61, 0x74, 0x65, 0x2c, 0x20, 0x73, 0x6f, 0x20, 0x6c, 0x65, 0x74, + 0x27, 0x73, 0x20, 0x63, 0x72, 0x65, 0x61, 0x74, 0x65, 0x20, 0x61, 0x20, + 0x6e, 0x65, 0x77, 0x20, 0x6f, 0x6e, 0x65, 0x0a, 0x20, 0x20, 0x20, 0x20, + 0x20, 0x20, 0x20, 0x20, 0x6c, 0x65, 0x74, 0x20, 0x6e, 0x65, 0x77, 0x54, + 0x65, 0x6d, 0x70, 0x6c, 0x61, 0x74, 0x65, 0x4e, 0x61, 0x6d, 0x65, 0x20, + 0x3d, 0x20, 0x27, 0x55, 0x73, 0x65, 0x72, 0x54, 0x65, 0x6d, 0x70, 0x6c, + 0x61, 0x74, 0x65, 0x2d, 0x27, 0x20, 0x2b, 0x20, 0x44, 0x61, 0x74, 0x65, + 0x2e, 0x6e, 0x6f, 0x77, 0x28, 0x29, 0x2e, 0x74, 0x6f, 0x53, 0x74, 0x72, + 0x69, 0x6e, 0x67, 0x28, 0x29, 0x0a, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, + 0x20, 0x20, 0x6c, 0x65, 0x74, 0x20, 0x6e, 0x65, 0x77, 0x54, 0x65, 0x6d, + 0x70, 0x6c, 0x61, 0x74, 0x65, 0x20, 0x3d, 0x20, 0x7b, 0x20, 0x27, 0x6e, + 0x61, 0x6d, 0x65, 0x27, 0x3a, 0x20, 0x6e, 0x65, 0x77, 0x54, 0x65, 0x6d, + 0x70, 0x6c, 0x61, 0x74, 0x65, 0x4e, 0x61, 0x6d, 0x65, 0x2c, 0x20, 0x27, + 0x64, 0x61, 0x74, 0x61, 0x27, 0x3a, 0x20, 0x7b, 0x20, 0x27, 0x73, 0x65, + 0x73, 0x73, 0x69, 0x6f, 0x6e, 0x27, 0x3a, 0x20, 0x73, 0x65, 0x73, 0x73, + 0x69, 0x6f, 0x6e, 0x2e, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x2c, 0x20, 0x27, + 0x70, 0x61, 0x72, 0x61, 0x6d, 0x73, 0x27, 0x3a, 0x20, 0x70, 0x61, 0x72, + 0x61, 0x6d, 0x73, 0x2e, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x20, 0x7d, 0x20, + 0x7d, 0x0a, 0x0a, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x63, + 0x6f, 0x6e, 0x73, 0x6f, 0x6c, 0x65, 0x2e, 0x6c, 0x6f, 0x67, 0x28, 0x27, + 0x53, 0x61, 0x76, 0x69, 0x6e, 0x67, 0x20, 0x61, 0x73, 0x20, 0x27, 0x20, + 0x2b, 0x20, 0x6e, 0x65, 0x77, 0x54, 0x65, 0x6d, 0x70, 0x6c, 0x61, 0x74, + 0x65, 0x4e, 0x61, 0x6d, 0x65, 0x29, 0x0a, 0x0a, 0x20, 0x20, 0x20, 0x20, + 0x20, 0x20, 0x20, 0x20, 0x2f, 0x2f, 0x20, 0x73, 0x61, 0x76, 0x65, 0x20, + 0x69, 0x6e, 0x20, 0x74, 0x68, 0x65, 0x20, 0x61, 0x75, 0x74, 0x6f, 0x73, + 0x61, 0x76, 0x65, 0x20, 0x73, 0x6c, 0x6f, 0x74, 0x0a, 0x20, 0x20, 0x20, + 0x20, 0x20, 0x20, 0x20, 0x20, 0x6c, 0x6f, 0x63, 0x61, 0x6c, 0x5f, 0x73, + 0x74, 0x6f, 0x72, 0x61, 0x67, 0x65, 0x5f, 0x73, 0x65, 0x74, 0x44, 0x61, + 0x74, 0x61, 0x46, 0x72, 0x6f, 0x6d, 0x4f, 0x62, 0x6a, 0x65, 0x63, 0x74, + 0x28, 0x27, 0x75, 0x73, 0x65, 0x72, 0x5f, 0x74, 0x65, 0x6d, 0x70, 0x6c, + 0x61, 0x74, 0x65, 0x73, 0x5f, 0x6c, 0x61, 0x73, 0x74, 0x27, 0x2c, 0x20, + 0x6e, 0x65, 0x77, 0x54, 0x65, 0x6d, 0x70, 0x6c, 0x61, 0x74, 0x65, 0x29, + 0x0a, 0x0a, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x2f, 0x2f, + 0x20, 0x61, 0x6e, 0x64, 0x20, 0x6c, 0x6f, 0x61, 0x64, 0x20, 0x69, 0x74, + 0x20, 0x62, 0x61, 0x63, 0x6b, 0x20, 0x61, 0x6e, 0x64, 0x20, 0x61, 0x70, + 0x70, 0x6c, 0x79, 0x0a, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, + 0x75, 0x73, 0x65, 0x72, 0x54, 0x65, 0x6d, 0x70, 0x6c, 0x61, 0x74, 0x65, + 0x4c, 0x6f, 0x61, 0x64, 0x41, 0x6e, 0x64, 0x41, 0x70, 0x70, 0x6c, 0x79, + 0x41, 0x75, 0x74, 0x6f, 0x73, 0x61, 0x76, 0x65, 0x64, 0x28, 0x29, 0x0a, + 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x7d, 0x20, 0x65, 0x6c, 0x73, 0x65, + 0x20, 0x7b, 0x0a, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x6c, + 0x6f, 0x63, 0x61, 0x6c, 0x5f, 0x73, 0x74, 0x6f, 0x72, 0x61, 0x67, 0x65, + 0x5f, 0x73, 0x65, 0x74, 0x44, 0x61, 0x74, 0x61, 0x46, 0x72, 0x6f, 0x6d, + 0x4f, 0x62, 0x6a, 0x65, 0x63, 0x74, 0x28, 0x27, 0x75, 0x73, 0x65, 0x72, + 0x5f, 0x74, 0x65, 0x6d, 0x70, 0x6c, 0x61, 0x74, 0x65, 0x73, 0x5f, 0x6c, + 0x61, 0x73, 0x74, 0x27, 0x2c, 0x20, 0x7b, 0x20, 0x27, 0x6e, 0x61, 0x6d, + 0x65, 0x27, 0x3a, 0x20, 0x73, 0x65, 0x6c, 0x65, 0x63, 0x74, 0x65, 0x64, + 0x55, 0x73, 0x65, 0x72, 0x54, 0x65, 0x6d, 0x70, 0x6c, 0x61, 0x74, 0x65, + 0x2e, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x2e, 0x6e, 0x61, 0x6d, 0x65, 0x2c, + 0x20, 0x27, 0x64, 0x61, 0x74, 0x61, 0x27, 0x3a, 0x20, 0x7b, 0x20, 0x27, + 0x73, 0x65, 0x73, 0x73, 0x69, 0x6f, 0x6e, 0x27, 0x3a, 0x20, 0x73, 0x65, + 0x73, 0x73, 0x69, 0x6f, 0x6e, 0x2e, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x2c, + 0x20, 0x27, 0x70, 0x61, 0x72, 0x61, 0x6d, 0x73, 0x27, 0x3a, 0x20, 0x70, + 0x61, 0x72, 0x61, 0x6d, 0x73, 0x2e, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x20, + 0x7d, 0x20, 0x7d, 0x29, 0x0a, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x7d, + 0x0a, 0x20, 0x20, 0x20, 0x20, 0x7d, 0x0a, 0x0a, 0x20, 0x20, 0x20, 0x20, 0x63, 0x6f, 0x6e, 0x73, 0x6f, 0x6c, 0x65, 0x2e, 0x6c, 0x6f, 0x67, 0x28, - 0x27, 0x53, 0x61, 0x76, 0x69, 0x6e, 0x67, 0x20, 0x61, 0x73, 0x20, 0x27, - 0x20, 0x2b, 0x20, 0x6e, 0x65, 0x77, 0x54, 0x65, 0x6d, 0x70, 0x6c, 0x61, - 0x74, 0x65, 0x4e, 0x61, 0x6d, 0x65, 0x29, 0x0a, 0x0a, 0x20, 0x20, 0x20, - 0x20, 0x20, 0x20, 0x20, 0x20, 0x2f, 0x2f, 0x20, 0x73, 0x61, 0x76, 0x65, - 0x20, 0x69, 0x6e, 0x20, 0x74, 0x68, 0x65, 0x20, 0x61, 0x75, 0x74, 0x6f, - 0x73, 0x61, 0x76, 0x65, 0x20, 0x73, 0x6c, 0x6f, 0x74, 0x0a, 0x20, 0x20, - 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x6c, 0x6f, 0x63, 0x61, 0x6c, 0x5f, - 0x73, 0x74, 0x6f, 0x72, 0x61, 0x67, 0x65, 0x5f, 0x73, 0x65, 0x74, 0x44, - 0x61, 0x74, 0x61, 0x46, 0x72, 0x6f, 0x6d, 0x4f, 0x62, 0x6a, 0x65, 0x63, - 0x74, 0x28, 0x27, 0x75, 0x73, 0x65, 0x72, 0x5f, 0x74, 0x65, 0x6d, 0x70, - 0x6c, 0x61, 0x74, 0x65, 0x73, 0x5f, 0x6c, 0x61, 0x73, 0x74, 0x27, 0x2c, - 0x20, 0x6e, 0x65, 0x77, 0x54, 0x65, 0x6d, 0x70, 0x6c, 0x61, 0x74, 0x65, - 0x29, 0x0a, 0x0a, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x2f, - 0x2f, 0x20, 0x61, 0x6e, 0x64, 0x20, 0x6c, 0x6f, 0x61, 0x64, 0x20, 0x69, - 0x74, 0x20, 0x62, 0x61, 0x63, 0x6b, 0x20, 0x61, 0x6e, 0x64, 0x20, 0x61, - 0x70, 0x70, 0x6c, 0x79, 0x0a, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, + 0x27, 0x43, 0x68, 0x65, 0x63, 0x6b, 0x69, 0x6e, 0x67, 0x20, 0x66, 0x6f, + 0x72, 0x20, 0x61, 0x75, 0x74, 0x6f, 0x73, 0x61, 0x76, 0x65, 0x64, 0x20, + 0x6c, 0x61, 0x73, 0x74, 0x20, 0x75, 0x73, 0x65, 0x64, 0x20, 0x74, 0x65, + 0x6d, 0x70, 0x6c, 0x61, 0x74, 0x65, 0x27, 0x29, 0x0a, 0x20, 0x20, 0x20, 0x20, 0x75, 0x73, 0x65, 0x72, 0x54, 0x65, 0x6d, 0x70, 0x6c, 0x61, 0x74, 0x65, 0x4c, 0x6f, 0x61, 0x64, 0x41, 0x6e, 0x64, 0x41, 0x70, 0x70, 0x6c, 0x79, 0x41, 0x75, 0x74, 0x6f, 0x73, 0x61, 0x76, 0x65, 0x64, 0x28, 0x29, - 0x0a, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x7d, 0x20, 0x65, 0x6c, 0x73, - 0x65, 0x20, 0x7b, 0x0a, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, - 0x6c, 0x6f, 0x63, 0x61, 0x6c, 0x5f, 0x73, 0x74, 0x6f, 0x72, 0x61, 0x67, - 0x65, 0x5f, 0x73, 0x65, 0x74, 0x44, 0x61, 0x74, 0x61, 0x46, 0x72, 0x6f, - 0x6d, 0x4f, 0x62, 0x6a, 0x65, 0x63, 0x74, 0x28, 0x27, 0x75, 0x73, 0x65, - 0x72, 0x5f, 0x74, 0x65, 0x6d, 0x70, 0x6c, 0x61, 0x74, 0x65, 0x73, 0x5f, - 0x6c, 0x61, 0x73, 0x74, 0x27, 0x2c, 0x20, 0x7b, 0x20, 0x27, 0x6e, 0x61, - 0x6d, 0x65, 0x27, 0x3a, 0x20, 0x73, 0x65, 0x6c, 0x65, 0x63, 0x74, 0x65, - 0x64, 0x55, 0x73, 0x65, 0x72, 0x54, 0x65, 0x6d, 0x70, 0x6c, 0x61, 0x74, - 0x65, 0x2e, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x2e, 0x6e, 0x61, 0x6d, 0x65, - 0x2c, 0x20, 0x27, 0x64, 0x61, 0x74, 0x61, 0x27, 0x3a, 0x20, 0x7b, 0x20, - 0x27, 0x73, 0x65, 0x73, 0x73, 0x69, 0x6f, 0x6e, 0x27, 0x3a, 0x20, 0x73, - 0x65, 0x73, 0x73, 0x69, 0x6f, 0x6e, 0x2e, 0x76, 0x61, 0x6c, 0x75, 0x65, - 0x2c, 0x20, 0x27, 0x70, 0x61, 0x72, 0x61, 0x6d, 0x73, 0x27, 0x3a, 0x20, - 0x70, 0x61, 0x72, 0x61, 0x6d, 0x73, 0x2e, 0x76, 0x61, 0x6c, 0x75, 0x65, - 0x20, 0x7d, 0x20, 0x7d, 0x29, 0x0a, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, - 0x7d, 0x0a, 0x20, 0x20, 0x20, 0x20, 0x7d, 0x0a, 0x0a, 0x20, 0x20, 0x20, - 0x20, 0x63, 0x6f, 0x6e, 0x73, 0x6f, 0x6c, 0x65, 0x2e, 0x6c, 0x6f, 0x67, - 0x28, 0x27, 0x43, 0x68, 0x65, 0x63, 0x6b, 0x69, 0x6e, 0x67, 0x20, 0x66, - 0x6f, 0x72, 0x20, 0x61, 0x75, 0x74, 0x6f, 0x73, 0x61, 0x76, 0x65, 0x64, - 0x20, 0x6c, 0x61, 0x73, 0x74, 0x20, 0x75, 0x73, 0x65, 0x64, 0x20, 0x74, - 0x65, 0x6d, 0x70, 0x6c, 0x61, 0x74, 0x65, 0x27, 0x29, 0x0a, 0x20, 0x20, - 0x20, 0x20, 0x75, 0x73, 0x65, 0x72, 0x54, 0x65, 0x6d, 0x70, 0x6c, 0x61, - 0x74, 0x65, 0x4c, 0x6f, 0x61, 0x64, 0x41, 0x6e, 0x64, 0x41, 0x70, 0x70, - 0x6c, 0x79, 0x41, 0x75, 0x74, 0x6f, 0x73, 0x61, 0x76, 0x65, 0x64, 0x28, - 0x29, 0x0a, 0x0a, 0x20, 0x20, 0x20, 0x20, 0x2f, 0x2a, 0x20, 0x45, 0x4e, - 0x44, 0x3a, 0x20, 0x53, 0x75, 0x70, 0x70, 0x6f, 0x72, 0x74, 0x20, 0x66, - 0x6f, 0x72, 0x20, 0x73, 0x74, 0x6f, 0x72, 0x69, 0x6e, 0x67, 0x20, 0x70, - 0x72, 0x6f, 0x6d, 0x70, 0x74, 0x20, 0x74, 0x65, 0x6d, 0x70, 0x6c, 0x61, - 0x74, 0x65, 0x73, 0x20, 0x61, 0x6e, 0x64, 0x20, 0x70, 0x61, 0x72, 0x61, - 0x6d, 0x65, 0x74, 0x65, 0x72, 0x73, 0x20, 0x69, 0x6e, 0x20, 0x62, 0x72, - 0x6f, 0x77, 0x73, 0x65, 0x72, 0x73, 0x20, 0x4c, 0x6f, 0x63, 0x61, 0x6c, - 0x53, 0x74, 0x6f, 0x72, 0x61, 0x67, 0x65, 0x20, 0x2a, 0x2f, 0x0a, 0x0a, - 0x20, 0x20, 0x20, 0x20, 0x63, 0x6f, 0x6e, 0x73, 0x74, 0x20, 0x6c, 0x6c, - 0x61, 0x6d, 0x61, 0x53, 0x74, 0x61, 0x74, 0x73, 0x20, 0x3d, 0x20, 0x73, - 0x69, 0x67, 0x6e, 0x61, 0x6c, 0x28, 0x6e, 0x75, 0x6c, 0x6c, 0x29, 0x0a, - 0x20, 0x20, 0x20, 0x20, 0x63, 0x6f, 0x6e, 0x73, 0x74, 0x20, 0x63, 0x6f, - 0x6e, 0x74, 0x72, 0x6f, 0x6c, 0x6c, 0x65, 0x72, 0x20, 0x3d, 0x20, 0x73, - 0x69, 0x67, 0x6e, 0x61, 0x6c, 0x28, 0x6e, 0x75, 0x6c, 0x6c, 0x29, 0x0a, - 0x0a, 0x20, 0x20, 0x20, 0x20, 0x2f, 0x2f, 0x20, 0x63, 0x75, 0x72, 0x72, - 0x65, 0x6e, 0x74, 0x6c, 0x79, 0x20, 0x67, 0x65, 0x6e, 0x65, 0x72, 0x61, - 0x74, 0x69, 0x6e, 0x67, 0x20, 0x61, 0x20, 0x63, 0x6f, 0x6d, 0x70, 0x6c, - 0x65, 0x74, 0x69, 0x6f, 0x6e, 0x3f, 0x0a, 0x20, 0x20, 0x20, 0x20, 0x63, - 0x6f, 0x6e, 0x73, 0x74, 0x20, 0x67, 0x65, 0x6e, 0x65, 0x72, 0x61, 0x74, - 0x69, 0x6e, 0x67, 0x20, 0x3d, 0x20, 0x63, 0x6f, 0x6d, 0x70, 0x75, 0x74, - 0x65, 0x64, 0x28, 0x28, 0x29, 0x20, 0x3d, 0x3e, 0x20, 0x63, 0x6f, 0x6e, - 0x74, 0x72, 0x6f, 0x6c, 0x6c, 0x65, 0x72, 0x2e, 0x76, 0x61, 0x6c, 0x75, - 0x65, 0x20, 0x21, 0x3d, 0x20, 0x6e, 0x75, 0x6c, 0x6c, 0x29, 0x0a, 0x0a, - 0x20, 0x20, 0x20, 0x20, 0x2f, 0x2f, 0x20, 0x68, 0x61, 0x73, 0x20, 0x74, - 0x68, 0x65, 0x20, 0x75, 0x73, 0x65, 0x72, 0x20, 0x73, 0x74, 0x61, 0x72, - 0x74, 0x65, 0x64, 0x20, 0x61, 0x20, 0x63, 0x68, 0x61, 0x74, 0x3f, 0x0a, - 0x20, 0x20, 0x20, 0x20, 0x63, 0x6f, 0x6e, 0x73, 0x74, 0x20, 0x63, 0x68, - 0x61, 0x74, 0x53, 0x74, 0x61, 0x72, 0x74, 0x65, 0x64, 0x20, 0x3d, 0x20, - 0x63, 0x6f, 0x6d, 0x70, 0x75, 0x74, 0x65, 0x64, 0x28, 0x28, 0x29, 0x20, - 0x3d, 0x3e, 0x20, 0x73, 0x65, 0x73, 0x73, 0x69, 0x6f, 0x6e, 0x2e, 0x76, - 0x61, 0x6c, 0x75, 0x65, 0x2e, 0x74, 0x72, 0x61, 0x6e, 0x73, 0x63, 0x72, - 0x69, 0x70, 0x74, 0x2e, 0x6c, 0x65, 0x6e, 0x67, 0x74, 0x68, 0x20, 0x3e, - 0x20, 0x30, 0x29, 0x0a, 0x0a, 0x20, 0x20, 0x20, 0x20, 0x63, 0x6f, 0x6e, - 0x73, 0x74, 0x20, 0x74, 0x72, 0x61, 0x6e, 0x73, 0x63, 0x72, 0x69, 0x70, - 0x74, 0x55, 0x70, 0x64, 0x61, 0x74, 0x65, 0x20, 0x3d, 0x20, 0x28, 0x74, - 0x72, 0x61, 0x6e, 0x73, 0x63, 0x72, 0x69, 0x70, 0x74, 0x29, 0x20, 0x3d, - 0x3e, 0x20, 0x7b, 0x0a, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x73, 0x65, - 0x73, 0x73, 0x69, 0x6f, 0x6e, 0x2e, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x20, - 0x3d, 0x20, 0x7b, 0x0a, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, - 0x2e, 0x2e, 0x2e, 0x73, 0x65, 0x73, 0x73, 0x69, 0x6f, 0x6e, 0x2e, 0x76, - 0x61, 0x6c, 0x75, 0x65, 0x2c, 0x0a, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, - 0x20, 0x20, 0x74, 0x72, 0x61, 0x6e, 0x73, 0x63, 0x72, 0x69, 0x70, 0x74, - 0x0a, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x7d, 0x0a, 0x20, 0x20, 0x20, - 0x20, 0x7d, 0x0a, 0x0a, 0x20, 0x20, 0x20, 0x20, 0x2f, 0x2f, 0x20, 0x73, - 0x69, 0x6d, 0x70, 0x6c, 0x65, 0x20, 0x74, 0x65, 0x6d, 0x70, 0x6c, 0x61, - 0x74, 0x65, 0x20, 0x72, 0x65, 0x70, 0x6c, 0x61, 0x63, 0x65, 0x0a, 0x20, - 0x20, 0x20, 0x20, 0x63, 0x6f, 0x6e, 0x73, 0x74, 0x20, 0x74, 0x65, 0x6d, - 0x70, 0x6c, 0x61, 0x74, 0x65, 0x20, 0x3d, 0x20, 0x28, 0x73, 0x74, 0x72, - 0x2c, 0x20, 0x65, 0x78, 0x74, 0x72, 0x61, 0x53, 0x65, 0x74, 0x74, 0x69, - 0x6e, 0x67, 0x73, 0x29, 0x20, 0x3d, 0x3e, 0x20, 0x7b, 0x0a, 0x20, 0x20, - 0x20, 0x20, 0x20, 0x20, 0x6c, 0x65, 0x74, 0x20, 0x73, 0x65, 0x74, 0x74, - 0x69, 0x6e, 0x67, 0x73, 0x20, 0x3d, 0x20, 0x73, 0x65, 0x73, 0x73, 0x69, - 0x6f, 0x6e, 0x2e, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x3b, 0x0a, 0x20, 0x20, - 0x20, 0x20, 0x20, 0x20, 0x69, 0x66, 0x20, 0x28, 0x65, 0x78, 0x74, 0x72, - 0x61, 0x53, 0x65, 0x74, 0x74, 0x69, 0x6e, 0x67, 0x73, 0x29, 0x20, 0x7b, - 0x0a, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x73, 0x65, 0x74, - 0x74, 0x69, 0x6e, 0x67, 0x73, 0x20, 0x3d, 0x20, 0x7b, 0x20, 0x2e, 0x2e, - 0x2e, 0x73, 0x65, 0x74, 0x74, 0x69, 0x6e, 0x67, 0x73, 0x2c, 0x20, 0x2e, - 0x2e, 0x2e, 0x65, 0x78, 0x74, 0x72, 0x61, 0x53, 0x65, 0x74, 0x74, 0x69, - 0x6e, 0x67, 0x73, 0x20, 0x7d, 0x3b, 0x0a, 0x20, 0x20, 0x20, 0x20, 0x20, - 0x20, 0x7d, 0x0a, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x72, 0x65, 0x74, - 0x75, 0x72, 0x6e, 0x20, 0x53, 0x74, 0x72, 0x69, 0x6e, 0x67, 0x28, 0x73, - 0x74, 0x72, 0x29, 0x2e, 0x72, 0x65, 0x70, 0x6c, 0x61, 0x63, 0x65, 0x41, - 0x6c, 0x6c, 0x28, 0x2f, 0x5c, 0x7b, 0x5c, 0x7b, 0x28, 0x2e, 0x2a, 0x3f, - 0x29, 0x5c, 0x7d, 0x5c, 0x7d, 0x2f, 0x67, 0x2c, 0x20, 0x28, 0x5f, 0x2c, - 0x20, 0x6b, 0x65, 0x79, 0x29, 0x20, 0x3d, 0x3e, 0x20, 0x74, 0x65, 0x6d, - 0x70, 0x6c, 0x61, 0x74, 0x65, 0x28, 0x73, 0x65, 0x74, 0x74, 0x69, 0x6e, - 0x67, 0x73, 0x5b, 0x6b, 0x65, 0x79, 0x5d, 0x29, 0x29, 0x3b, 0x0a, 0x20, - 0x20, 0x20, 0x20, 0x7d, 0x0a, 0x0a, 0x20, 0x20, 0x20, 0x20, 0x61, 0x73, - 0x79, 0x6e, 0x63, 0x20, 0x66, 0x75, 0x6e, 0x63, 0x74, 0x69, 0x6f, 0x6e, - 0x20, 0x72, 0x75, 0x6e, 0x4c, 0x6c, 0x61, 0x6d, 0x61, 0x28, 0x70, 0x72, - 0x6f, 0x6d, 0x70, 0x74, 0x2c, 0x20, 0x6c, 0x6c, 0x61, 0x6d, 0x61, 0x50, - 0x61, 0x72, 0x61, 0x6d, 0x73, 0x2c, 0x20, 0x63, 0x68, 0x61, 0x72, 0x29, - 0x20, 0x7b, 0x0a, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x63, 0x6f, 0x6e, - 0x73, 0x74, 0x20, 0x63, 0x75, 0x72, 0x72, 0x65, 0x6e, 0x74, 0x4d, 0x65, - 0x73, 0x73, 0x61, 0x67, 0x65, 0x73, 0x20, 0x3d, 0x20, 0x5b, 0x5d, 0x3b, - 0x0a, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x63, 0x6f, 0x6e, 0x73, 0x74, - 0x20, 0x68, 0x69, 0x73, 0x74, 0x6f, 0x72, 0x79, 0x20, 0x3d, 0x20, 0x73, - 0x65, 0x73, 0x73, 0x69, 0x6f, 0x6e, 0x2e, 0x76, 0x61, 0x6c, 0x75, 0x65, - 0x2e, 0x74, 0x72, 0x61, 0x6e, 0x73, 0x63, 0x72, 0x69, 0x70, 0x74, 0x3b, - 0x0a, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x69, 0x66, 0x20, 0x28, 0x63, - 0x6f, 0x6e, 0x74, 0x72, 0x6f, 0x6c, 0x6c, 0x65, 0x72, 0x2e, 0x76, 0x61, - 0x6c, 0x75, 0x65, 0x29, 0x20, 0x7b, 0x0a, 0x20, 0x20, 0x20, 0x20, 0x20, - 0x20, 0x20, 0x20, 0x74, 0x68, 0x72, 0x6f, 0x77, 0x20, 0x6e, 0x65, 0x77, - 0x20, 0x45, 0x72, 0x72, 0x6f, 0x72, 0x28, 0x22, 0x61, 0x6c, 0x72, 0x65, - 0x61, 0x64, 0x79, 0x20, 0x72, 0x75, 0x6e, 0x6e, 0x69, 0x6e, 0x67, 0x22, - 0x29, 0x3b, 0x0a, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x7d, 0x0a, 0x20, - 0x20, 0x20, 0x20, 0x20, 0x20, 0x63, 0x6f, 0x6e, 0x74, 0x72, 0x6f, 0x6c, - 0x6c, 0x65, 0x72, 0x2e, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x20, 0x3d, 0x20, - 0x6e, 0x65, 0x77, 0x20, 0x41, 0x62, 0x6f, 0x72, 0x74, 0x43, 0x6f, 0x6e, - 0x74, 0x72, 0x6f, 0x6c, 0x6c, 0x65, 0x72, 0x28, 0x29, 0x3b, 0x0a, 0x20, - 0x20, 0x20, 0x20, 0x20, 0x20, 0x66, 0x6f, 0x72, 0x20, 0x61, 0x77, 0x61, - 0x69, 0x74, 0x20, 0x28, 0x63, 0x6f, 0x6e, 0x73, 0x74, 0x20, 0x63, 0x68, - 0x75, 0x6e, 0x6b, 0x20, 0x6f, 0x66, 0x20, 0x6c, 0x6c, 0x61, 0x6d, 0x61, - 0x28, 0x70, 0x72, 0x6f, 0x6d, 0x70, 0x74, 0x2c, 0x20, 0x6c, 0x6c, 0x61, - 0x6d, 0x61, 0x50, 0x61, 0x72, 0x61, 0x6d, 0x73, 0x2c, 0x20, 0x7b, 0x20, - 0x63, 0x6f, 0x6e, 0x74, 0x72, 0x6f, 0x6c, 0x6c, 0x65, 0x72, 0x3a, 0x20, - 0x63, 0x6f, 0x6e, 0x74, 0x72, 0x6f, 0x6c, 0x6c, 0x65, 0x72, 0x2e, 0x76, - 0x61, 0x6c, 0x75, 0x65, 0x20, 0x7d, 0x29, 0x29, 0x20, 0x7b, 0x0a, 0x20, - 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x63, 0x6f, 0x6e, 0x73, 0x74, - 0x20, 0x64, 0x61, 0x74, 0x61, 0x20, 0x3d, 0x20, 0x63, 0x68, 0x75, 0x6e, - 0x6b, 0x2e, 0x64, 0x61, 0x74, 0x61, 0x3b, 0x0a, 0x0a, 0x20, 0x20, 0x20, - 0x20, 0x20, 0x20, 0x20, 0x20, 0x69, 0x66, 0x20, 0x28, 0x64, 0x61, 0x74, - 0x61, 0x2e, 0x73, 0x74, 0x6f, 0x70, 0x29, 0x20, 0x7b, 0x0a, 0x20, 0x20, - 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x77, 0x68, 0x69, 0x6c, - 0x65, 0x20, 0x28, 0x0a, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, - 0x20, 0x20, 0x20, 0x20, 0x63, 0x75, 0x72, 0x72, 0x65, 0x6e, 0x74, 0x4d, - 0x65, 0x73, 0x73, 0x61, 0x67, 0x65, 0x73, 0x2e, 0x6c, 0x65, 0x6e, 0x67, - 0x74, 0x68, 0x20, 0x3e, 0x20, 0x30, 0x20, 0x26, 0x26, 0x0a, 0x20, 0x20, - 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x63, 0x75, - 0x72, 0x72, 0x65, 0x6e, 0x74, 0x4d, 0x65, 0x73, 0x73, 0x61, 0x67, 0x65, - 0x73, 0x5b, 0x63, 0x75, 0x72, 0x72, 0x65, 0x6e, 0x74, 0x4d, 0x65, 0x73, - 0x73, 0x61, 0x67, 0x65, 0x73, 0x2e, 0x6c, 0x65, 0x6e, 0x67, 0x74, 0x68, - 0x20, 0x2d, 0x20, 0x31, 0x5d, 0x2e, 0x63, 0x6f, 0x6e, 0x74, 0x65, 0x6e, - 0x74, 0x2e, 0x6d, 0x61, 0x74, 0x63, 0x68, 0x28, 0x2f, 0x5c, 0x6e, 0x24, - 0x2f, 0x29, 0x20, 0x21, 0x3d, 0x20, 0x6e, 0x75, 0x6c, 0x6c, 0x0a, 0x20, - 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x29, 0x20, 0x7b, - 0x0a, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, - 0x20, 0x63, 0x75, 0x72, 0x72, 0x65, 0x6e, 0x74, 0x4d, 0x65, 0x73, 0x73, - 0x61, 0x67, 0x65, 0x73, 0x2e, 0x70, 0x6f, 0x70, 0x28, 0x29, 0x3b, 0x0a, - 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x7d, 0x0a, - 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x74, 0x72, - 0x61, 0x6e, 0x73, 0x63, 0x72, 0x69, 0x70, 0x74, 0x55, 0x70, 0x64, 0x61, - 0x74, 0x65, 0x28, 0x5b, 0x2e, 0x2e, 0x2e, 0x68, 0x69, 0x73, 0x74, 0x6f, - 0x72, 0x79, 0x2c, 0x20, 0x5b, 0x63, 0x68, 0x61, 0x72, 0x2c, 0x20, 0x63, - 0x75, 0x72, 0x72, 0x65, 0x6e, 0x74, 0x4d, 0x65, 0x73, 0x73, 0x61, 0x67, - 0x65, 0x73, 0x5d, 0x5d, 0x29, 0x0a, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, - 0x20, 0x20, 0x20, 0x20, 0x63, 0x6f, 0x6e, 0x73, 0x6f, 0x6c, 0x65, 0x2e, - 0x6c, 0x6f, 0x67, 0x28, 0x22, 0x43, 0x6f, 0x6d, 0x70, 0x6c, 0x65, 0x74, - 0x69, 0x6f, 0x6e, 0x20, 0x66, 0x69, 0x6e, 0x69, 0x73, 0x68, 0x65, 0x64, - 0x3a, 0x20, 0x27, 0x22, 0x2c, 0x20, 0x63, 0x75, 0x72, 0x72, 0x65, 0x6e, - 0x74, 0x4d, 0x65, 0x73, 0x73, 0x61, 0x67, 0x65, 0x73, 0x2e, 0x6d, 0x61, - 0x70, 0x28, 0x6d, 0x73, 0x67, 0x20, 0x3d, 0x3e, 0x20, 0x6d, 0x73, 0x67, - 0x2e, 0x63, 0x6f, 0x6e, 0x74, 0x65, 0x6e, 0x74, 0x29, 0x2e, 0x6a, 0x6f, - 0x69, 0x6e, 0x28, 0x27, 0x27, 0x29, 0x2c, 0x20, 0x22, 0x27, 0x2c, 0x20, - 0x73, 0x75, 0x6d, 0x6d, 0x61, 0x72, 0x79, 0x3a, 0x20, 0x22, 0x2c, 0x20, - 0x64, 0x61, 0x74, 0x61, 0x29, 0x3b, 0x0a, 0x20, 0x20, 0x20, 0x20, 0x20, - 0x20, 0x20, 0x20, 0x7d, 0x20, 0x65, 0x6c, 0x73, 0x65, 0x20, 0x7b, 0x0a, - 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x63, 0x75, - 0x72, 0x72, 0x65, 0x6e, 0x74, 0x4d, 0x65, 0x73, 0x73, 0x61, 0x67, 0x65, - 0x73, 0x2e, 0x70, 0x75, 0x73, 0x68, 0x28, 0x64, 0x61, 0x74, 0x61, 0x29, - 0x3b, 0x0a, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, - 0x73, 0x6c, 0x6f, 0x74, 0x5f, 0x69, 0x64, 0x20, 0x3d, 0x20, 0x64, 0x61, - 0x74, 0x61, 0x2e, 0x73, 0x6c, 0x6f, 0x74, 0x5f, 0x69, 0x64, 0x3b, 0x0a, - 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x69, 0x66, - 0x20, 0x28, 0x73, 0x65, 0x6c, 0x65, 0x63, 0x74, 0x65, 0x64, 0x5f, 0x69, - 0x6d, 0x61, 0x67, 0x65, 0x20, 0x26, 0x26, 0x20, 0x21, 0x64, 0x61, 0x74, - 0x61, 0x2e, 0x6d, 0x75, 0x6c, 0x74, 0x69, 0x6d, 0x6f, 0x64, 0x61, 0x6c, - 0x29, 0x20, 0x7b, 0x0a, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, - 0x20, 0x20, 0x20, 0x20, 0x61, 0x6c, 0x65, 0x72, 0x74, 0x28, 0x22, 0x54, - 0x68, 0x65, 0x20, 0x73, 0x65, 0x72, 0x76, 0x65, 0x72, 0x20, 0x77, 0x61, - 0x73, 0x20, 0x6e, 0x6f, 0x74, 0x20, 0x63, 0x6f, 0x6d, 0x70, 0x69, 0x6c, - 0x65, 0x64, 0x20, 0x66, 0x6f, 0x72, 0x20, 0x6d, 0x75, 0x6c, 0x74, 0x69, - 0x6d, 0x6f, 0x64, 0x61, 0x6c, 0x20, 0x6f, 0x72, 0x20, 0x74, 0x68, 0x65, - 0x20, 0x6d, 0x6f, 0x64, 0x65, 0x6c, 0x20, 0x70, 0x72, 0x6f, 0x6a, 0x65, - 0x63, 0x74, 0x6f, 0x72, 0x20, 0x63, 0x61, 0x6e, 0x27, 0x74, 0x20, 0x62, - 0x65, 0x20, 0x6c, 0x6f, 0x61, 0x64, 0x65, 0x64, 0x2e, 0x22, 0x29, 0x3b, - 0x0a, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, - 0x20, 0x72, 0x65, 0x74, 0x75, 0x72, 0x6e, 0x3b, 0x0a, 0x20, 0x20, 0x20, - 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x7d, 0x0a, 0x20, 0x20, 0x20, - 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x74, 0x72, 0x61, 0x6e, 0x73, - 0x63, 0x72, 0x69, 0x70, 0x74, 0x55, 0x70, 0x64, 0x61, 0x74, 0x65, 0x28, - 0x5b, 0x2e, 0x2e, 0x2e, 0x68, 0x69, 0x73, 0x74, 0x6f, 0x72, 0x79, 0x2c, - 0x20, 0x5b, 0x63, 0x68, 0x61, 0x72, 0x2c, 0x20, 0x63, 0x75, 0x72, 0x72, - 0x65, 0x6e, 0x74, 0x4d, 0x65, 0x73, 0x73, 0x61, 0x67, 0x65, 0x73, 0x5d, - 0x5d, 0x29, 0x0a, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x7d, - 0x0a, 0x0a, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x69, 0x66, - 0x20, 0x28, 0x64, 0x61, 0x74, 0x61, 0x2e, 0x74, 0x69, 0x6d, 0x69, 0x6e, - 0x67, 0x73, 0x29, 0x20, 0x7b, 0x0a, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, - 0x20, 0x20, 0x20, 0x20, 0x6c, 0x6c, 0x61, 0x6d, 0x61, 0x53, 0x74, 0x61, - 0x74, 0x73, 0x2e, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x20, 0x3d, 0x20, 0x64, - 0x61, 0x74, 0x61, 0x2e, 0x74, 0x69, 0x6d, 0x69, 0x6e, 0x67, 0x73, 0x3b, - 0x0a, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x7d, 0x0a, 0x20, - 0x20, 0x20, 0x20, 0x20, 0x20, 0x7d, 0x0a, 0x0a, 0x20, 0x20, 0x20, 0x20, - 0x20, 0x20, 0x63, 0x6f, 0x6e, 0x74, 0x72, 0x6f, 0x6c, 0x6c, 0x65, 0x72, - 0x2e, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x20, 0x3d, 0x20, 0x6e, 0x75, 0x6c, - 0x6c, 0x3b, 0x0a, 0x20, 0x20, 0x20, 0x20, 0x7d, 0x0a, 0x0a, 0x20, 0x20, - 0x20, 0x20, 0x2f, 0x2f, 0x20, 0x73, 0x65, 0x6e, 0x64, 0x20, 0x6d, 0x65, - 0x73, 0x73, 0x61, 0x67, 0x65, 0x20, 0x74, 0x6f, 0x20, 0x73, 0x65, 0x72, - 0x76, 0x65, 0x72, 0x0a, 0x20, 0x20, 0x20, 0x20, 0x63, 0x6f, 0x6e, 0x73, - 0x74, 0x20, 0x63, 0x68, 0x61, 0x74, 0x20, 0x3d, 0x20, 0x61, 0x73, 0x79, - 0x6e, 0x63, 0x20, 0x28, 0x6d, 0x73, 0x67, 0x29, 0x20, 0x3d, 0x3e, 0x20, - 0x7b, 0x0a, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x69, 0x66, 0x20, 0x28, - 0x63, 0x6f, 0x6e, 0x74, 0x72, 0x6f, 0x6c, 0x6c, 0x65, 0x72, 0x2e, 0x76, - 0x61, 0x6c, 0x75, 0x65, 0x29, 0x20, 0x7b, 0x0a, 0x20, 0x20, 0x20, 0x20, - 0x20, 0x20, 0x20, 0x20, 0x63, 0x6f, 0x6e, 0x73, 0x6f, 0x6c, 0x65, 0x2e, - 0x6c, 0x6f, 0x67, 0x28, 0x27, 0x61, 0x6c, 0x72, 0x65, 0x61, 0x64, 0x79, - 0x20, 0x72, 0x75, 0x6e, 0x6e, 0x69, 0x6e, 0x67, 0x2e, 0x2e, 0x2e, 0x27, - 0x29, 0x3b, 0x0a, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x72, - 0x65, 0x74, 0x75, 0x72, 0x6e, 0x3b, 0x0a, 0x20, 0x20, 0x20, 0x20, 0x20, - 0x20, 0x7d, 0x0a, 0x0a, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x74, 0x72, - 0x61, 0x6e, 0x73, 0x63, 0x72, 0x69, 0x70, 0x74, 0x55, 0x70, 0x64, 0x61, - 0x74, 0x65, 0x28, 0x5b, 0x2e, 0x2e, 0x2e, 0x73, 0x65, 0x73, 0x73, 0x69, - 0x6f, 0x6e, 0x2e, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x2e, 0x74, 0x72, 0x61, - 0x6e, 0x73, 0x63, 0x72, 0x69, 0x70, 0x74, 0x2c, 0x20, 0x5b, 0x22, 0x7b, - 0x7b, 0x75, 0x73, 0x65, 0x72, 0x7d, 0x7d, 0x22, 0x2c, 0x20, 0x6d, 0x73, - 0x67, 0x5d, 0x5d, 0x29, 0x0a, 0x0a, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, - 0x6c, 0x65, 0x74, 0x20, 0x70, 0x72, 0x6f, 0x6d, 0x70, 0x74, 0x20, 0x3d, - 0x20, 0x74, 0x65, 0x6d, 0x70, 0x6c, 0x61, 0x74, 0x65, 0x28, 0x73, 0x65, + 0x0a, 0x0a, 0x20, 0x20, 0x20, 0x20, 0x2f, 0x2a, 0x20, 0x45, 0x4e, 0x44, + 0x3a, 0x20, 0x53, 0x75, 0x70, 0x70, 0x6f, 0x72, 0x74, 0x20, 0x66, 0x6f, + 0x72, 0x20, 0x73, 0x74, 0x6f, 0x72, 0x69, 0x6e, 0x67, 0x20, 0x70, 0x72, + 0x6f, 0x6d, 0x70, 0x74, 0x20, 0x74, 0x65, 0x6d, 0x70, 0x6c, 0x61, 0x74, + 0x65, 0x73, 0x20, 0x61, 0x6e, 0x64, 0x20, 0x70, 0x61, 0x72, 0x61, 0x6d, + 0x65, 0x74, 0x65, 0x72, 0x73, 0x20, 0x69, 0x6e, 0x20, 0x62, 0x72, 0x6f, + 0x77, 0x73, 0x65, 0x72, 0x73, 0x20, 0x4c, 0x6f, 0x63, 0x61, 0x6c, 0x53, + 0x74, 0x6f, 0x72, 0x61, 0x67, 0x65, 0x20, 0x2a, 0x2f, 0x0a, 0x0a, 0x20, + 0x20, 0x20, 0x20, 0x63, 0x6f, 0x6e, 0x73, 0x74, 0x20, 0x6c, 0x6c, 0x61, + 0x6d, 0x61, 0x53, 0x74, 0x61, 0x74, 0x73, 0x20, 0x3d, 0x20, 0x73, 0x69, + 0x67, 0x6e, 0x61, 0x6c, 0x28, 0x6e, 0x75, 0x6c, 0x6c, 0x29, 0x0a, 0x20, + 0x20, 0x20, 0x20, 0x63, 0x6f, 0x6e, 0x73, 0x74, 0x20, 0x63, 0x6f, 0x6e, + 0x74, 0x72, 0x6f, 0x6c, 0x6c, 0x65, 0x72, 0x20, 0x3d, 0x20, 0x73, 0x69, + 0x67, 0x6e, 0x61, 0x6c, 0x28, 0x6e, 0x75, 0x6c, 0x6c, 0x29, 0x0a, 0x0a, + 0x20, 0x20, 0x20, 0x20, 0x2f, 0x2f, 0x20, 0x63, 0x75, 0x72, 0x72, 0x65, + 0x6e, 0x74, 0x6c, 0x79, 0x20, 0x67, 0x65, 0x6e, 0x65, 0x72, 0x61, 0x74, + 0x69, 0x6e, 0x67, 0x20, 0x61, 0x20, 0x63, 0x6f, 0x6d, 0x70, 0x6c, 0x65, + 0x74, 0x69, 0x6f, 0x6e, 0x3f, 0x0a, 0x20, 0x20, 0x20, 0x20, 0x63, 0x6f, + 0x6e, 0x73, 0x74, 0x20, 0x67, 0x65, 0x6e, 0x65, 0x72, 0x61, 0x74, 0x69, + 0x6e, 0x67, 0x20, 0x3d, 0x20, 0x63, 0x6f, 0x6d, 0x70, 0x75, 0x74, 0x65, + 0x64, 0x28, 0x28, 0x29, 0x20, 0x3d, 0x3e, 0x20, 0x63, 0x6f, 0x6e, 0x74, + 0x72, 0x6f, 0x6c, 0x6c, 0x65, 0x72, 0x2e, 0x76, 0x61, 0x6c, 0x75, 0x65, + 0x20, 0x21, 0x3d, 0x20, 0x6e, 0x75, 0x6c, 0x6c, 0x29, 0x0a, 0x0a, 0x20, + 0x20, 0x20, 0x20, 0x2f, 0x2f, 0x20, 0x68, 0x61, 0x73, 0x20, 0x74, 0x68, + 0x65, 0x20, 0x75, 0x73, 0x65, 0x72, 0x20, 0x73, 0x74, 0x61, 0x72, 0x74, + 0x65, 0x64, 0x20, 0x61, 0x20, 0x63, 0x68, 0x61, 0x74, 0x3f, 0x0a, 0x20, + 0x20, 0x20, 0x20, 0x63, 0x6f, 0x6e, 0x73, 0x74, 0x20, 0x63, 0x68, 0x61, + 0x74, 0x53, 0x74, 0x61, 0x72, 0x74, 0x65, 0x64, 0x20, 0x3d, 0x20, 0x63, + 0x6f, 0x6d, 0x70, 0x75, 0x74, 0x65, 0x64, 0x28, 0x28, 0x29, 0x20, 0x3d, + 0x3e, 0x20, 0x73, 0x65, 0x73, 0x73, 0x69, 0x6f, 0x6e, 0x2e, 0x76, 0x61, + 0x6c, 0x75, 0x65, 0x2e, 0x74, 0x72, 0x61, 0x6e, 0x73, 0x63, 0x72, 0x69, + 0x70, 0x74, 0x2e, 0x6c, 0x65, 0x6e, 0x67, 0x74, 0x68, 0x20, 0x3e, 0x20, + 0x30, 0x29, 0x0a, 0x0a, 0x20, 0x20, 0x20, 0x20, 0x63, 0x6f, 0x6e, 0x73, + 0x74, 0x20, 0x74, 0x72, 0x61, 0x6e, 0x73, 0x63, 0x72, 0x69, 0x70, 0x74, + 0x55, 0x70, 0x64, 0x61, 0x74, 0x65, 0x20, 0x3d, 0x20, 0x28, 0x74, 0x72, + 0x61, 0x6e, 0x73, 0x63, 0x72, 0x69, 0x70, 0x74, 0x29, 0x20, 0x3d, 0x3e, + 0x20, 0x7b, 0x0a, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x73, 0x65, 0x73, + 0x73, 0x69, 0x6f, 0x6e, 0x2e, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x20, 0x3d, + 0x20, 0x7b, 0x0a, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x2e, + 0x2e, 0x2e, 0x73, 0x65, 0x73, 0x73, 0x69, 0x6f, 0x6e, 0x2e, 0x76, 0x61, + 0x6c, 0x75, 0x65, 0x2c, 0x0a, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, + 0x20, 0x74, 0x72, 0x61, 0x6e, 0x73, 0x63, 0x72, 0x69, 0x70, 0x74, 0x0a, + 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x7d, 0x0a, 0x20, 0x20, 0x20, 0x20, + 0x7d, 0x0a, 0x0a, 0x20, 0x20, 0x20, 0x20, 0x2f, 0x2f, 0x20, 0x73, 0x69, + 0x6d, 0x70, 0x6c, 0x65, 0x20, 0x74, 0x65, 0x6d, 0x70, 0x6c, 0x61, 0x74, + 0x65, 0x20, 0x72, 0x65, 0x70, 0x6c, 0x61, 0x63, 0x65, 0x0a, 0x20, 0x20, + 0x20, 0x20, 0x63, 0x6f, 0x6e, 0x73, 0x74, 0x20, 0x74, 0x65, 0x6d, 0x70, + 0x6c, 0x61, 0x74, 0x65, 0x20, 0x3d, 0x20, 0x28, 0x73, 0x74, 0x72, 0x2c, + 0x20, 0x65, 0x78, 0x74, 0x72, 0x61, 0x53, 0x65, 0x74, 0x74, 0x69, 0x6e, + 0x67, 0x73, 0x29, 0x20, 0x3d, 0x3e, 0x20, 0x7b, 0x0a, 0x20, 0x20, 0x20, + 0x20, 0x20, 0x20, 0x6c, 0x65, 0x74, 0x20, 0x73, 0x65, 0x74, 0x74, 0x69, + 0x6e, 0x67, 0x73, 0x20, 0x3d, 0x20, 0x73, 0x65, 0x73, 0x73, 0x69, 0x6f, + 0x6e, 0x2e, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x3b, 0x0a, 0x20, 0x20, 0x20, + 0x20, 0x20, 0x20, 0x69, 0x66, 0x20, 0x28, 0x65, 0x78, 0x74, 0x72, 0x61, + 0x53, 0x65, 0x74, 0x74, 0x69, 0x6e, 0x67, 0x73, 0x29, 0x20, 0x7b, 0x0a, + 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x73, 0x65, 0x74, 0x74, + 0x69, 0x6e, 0x67, 0x73, 0x20, 0x3d, 0x20, 0x7b, 0x20, 0x2e, 0x2e, 0x2e, + 0x73, 0x65, 0x74, 0x74, 0x69, 0x6e, 0x67, 0x73, 0x2c, 0x20, 0x2e, 0x2e, + 0x2e, 0x65, 0x78, 0x74, 0x72, 0x61, 0x53, 0x65, 0x74, 0x74, 0x69, 0x6e, + 0x67, 0x73, 0x20, 0x7d, 0x3b, 0x0a, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, + 0x7d, 0x0a, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x72, 0x65, 0x74, 0x75, + 0x72, 0x6e, 0x20, 0x53, 0x74, 0x72, 0x69, 0x6e, 0x67, 0x28, 0x73, 0x74, + 0x72, 0x29, 0x2e, 0x72, 0x65, 0x70, 0x6c, 0x61, 0x63, 0x65, 0x41, 0x6c, + 0x6c, 0x28, 0x2f, 0x5c, 0x7b, 0x5c, 0x7b, 0x28, 0x2e, 0x2a, 0x3f, 0x29, + 0x5c, 0x7d, 0x5c, 0x7d, 0x2f, 0x67, 0x2c, 0x20, 0x28, 0x5f, 0x2c, 0x20, + 0x6b, 0x65, 0x79, 0x29, 0x20, 0x3d, 0x3e, 0x20, 0x74, 0x65, 0x6d, 0x70, + 0x6c, 0x61, 0x74, 0x65, 0x28, 0x73, 0x65, 0x74, 0x74, 0x69, 0x6e, 0x67, + 0x73, 0x5b, 0x6b, 0x65, 0x79, 0x5d, 0x29, 0x29, 0x3b, 0x0a, 0x20, 0x20, + 0x20, 0x20, 0x7d, 0x0a, 0x0a, 0x20, 0x20, 0x20, 0x20, 0x61, 0x73, 0x79, + 0x6e, 0x63, 0x20, 0x66, 0x75, 0x6e, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x20, + 0x72, 0x75, 0x6e, 0x4c, 0x6c, 0x61, 0x6d, 0x61, 0x28, 0x70, 0x72, 0x6f, + 0x6d, 0x70, 0x74, 0x2c, 0x20, 0x6c, 0x6c, 0x61, 0x6d, 0x61, 0x50, 0x61, + 0x72, 0x61, 0x6d, 0x73, 0x2c, 0x20, 0x63, 0x68, 0x61, 0x72, 0x29, 0x20, + 0x7b, 0x0a, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x63, 0x6f, 0x6e, 0x73, + 0x74, 0x20, 0x63, 0x75, 0x72, 0x72, 0x65, 0x6e, 0x74, 0x4d, 0x65, 0x73, + 0x73, 0x61, 0x67, 0x65, 0x73, 0x20, 0x3d, 0x20, 0x5b, 0x5d, 0x3b, 0x0a, + 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x63, 0x6f, 0x6e, 0x73, 0x74, 0x20, + 0x68, 0x69, 0x73, 0x74, 0x6f, 0x72, 0x79, 0x20, 0x3d, 0x20, 0x73, 0x65, 0x73, 0x73, 0x69, 0x6f, 0x6e, 0x2e, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x2e, - 0x74, 0x65, 0x6d, 0x70, 0x6c, 0x61, 0x74, 0x65, 0x2c, 0x20, 0x7b, 0x0a, - 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x6d, 0x65, 0x73, 0x73, - 0x61, 0x67, 0x65, 0x3a, 0x20, 0x6d, 0x73, 0x67, 0x2c, 0x0a, 0x20, 0x20, - 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x68, 0x69, 0x73, 0x74, 0x6f, 0x72, - 0x79, 0x3a, 0x20, 0x73, 0x65, 0x73, 0x73, 0x69, 0x6f, 0x6e, 0x2e, 0x76, - 0x61, 0x6c, 0x75, 0x65, 0x2e, 0x74, 0x72, 0x61, 0x6e, 0x73, 0x63, 0x72, - 0x69, 0x70, 0x74, 0x2e, 0x66, 0x6c, 0x61, 0x74, 0x4d, 0x61, 0x70, 0x28, - 0x0a, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x28, - 0x5b, 0x6e, 0x61, 0x6d, 0x65, 0x2c, 0x20, 0x64, 0x61, 0x74, 0x61, 0x5d, - 0x29, 0x20, 0x3d, 0x3e, 0x0a, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, - 0x20, 0x20, 0x20, 0x20, 0x20, 0x74, 0x65, 0x6d, 0x70, 0x6c, 0x61, 0x74, - 0x65, 0x28, 0x0a, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, - 0x20, 0x20, 0x20, 0x20, 0x20, 0x73, 0x65, 0x73, 0x73, 0x69, 0x6f, 0x6e, - 0x2e, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x2e, 0x68, 0x69, 0x73, 0x74, 0x6f, - 0x72, 0x79, 0x54, 0x65, 0x6d, 0x70, 0x6c, 0x61, 0x74, 0x65, 0x2c, 0x0a, + 0x74, 0x72, 0x61, 0x6e, 0x73, 0x63, 0x72, 0x69, 0x70, 0x74, 0x3b, 0x0a, + 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x69, 0x66, 0x20, 0x28, 0x63, 0x6f, + 0x6e, 0x74, 0x72, 0x6f, 0x6c, 0x6c, 0x65, 0x72, 0x2e, 0x76, 0x61, 0x6c, + 0x75, 0x65, 0x29, 0x20, 0x7b, 0x0a, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, + 0x20, 0x20, 0x74, 0x68, 0x72, 0x6f, 0x77, 0x20, 0x6e, 0x65, 0x77, 0x20, + 0x45, 0x72, 0x72, 0x6f, 0x72, 0x28, 0x22, 0x61, 0x6c, 0x72, 0x65, 0x61, + 0x64, 0x79, 0x20, 0x72, 0x75, 0x6e, 0x6e, 0x69, 0x6e, 0x67, 0x22, 0x29, + 0x3b, 0x0a, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x7d, 0x0a, 0x20, 0x20, + 0x20, 0x20, 0x20, 0x20, 0x63, 0x6f, 0x6e, 0x74, 0x72, 0x6f, 0x6c, 0x6c, + 0x65, 0x72, 0x2e, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x20, 0x3d, 0x20, 0x6e, + 0x65, 0x77, 0x20, 0x41, 0x62, 0x6f, 0x72, 0x74, 0x43, 0x6f, 0x6e, 0x74, + 0x72, 0x6f, 0x6c, 0x6c, 0x65, 0x72, 0x28, 0x29, 0x3b, 0x0a, 0x20, 0x20, + 0x20, 0x20, 0x20, 0x20, 0x66, 0x6f, 0x72, 0x20, 0x61, 0x77, 0x61, 0x69, + 0x74, 0x20, 0x28, 0x63, 0x6f, 0x6e, 0x73, 0x74, 0x20, 0x63, 0x68, 0x75, + 0x6e, 0x6b, 0x20, 0x6f, 0x66, 0x20, 0x6c, 0x6c, 0x61, 0x6d, 0x61, 0x28, + 0x70, 0x72, 0x6f, 0x6d, 0x70, 0x74, 0x2c, 0x20, 0x6c, 0x6c, 0x61, 0x6d, + 0x61, 0x50, 0x61, 0x72, 0x61, 0x6d, 0x73, 0x2c, 0x20, 0x7b, 0x20, 0x63, + 0x6f, 0x6e, 0x74, 0x72, 0x6f, 0x6c, 0x6c, 0x65, 0x72, 0x3a, 0x20, 0x63, + 0x6f, 0x6e, 0x74, 0x72, 0x6f, 0x6c, 0x6c, 0x65, 0x72, 0x2e, 0x76, 0x61, + 0x6c, 0x75, 0x65, 0x20, 0x7d, 0x29, 0x29, 0x20, 0x7b, 0x0a, 0x20, 0x20, + 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x63, 0x6f, 0x6e, 0x73, 0x74, 0x20, + 0x64, 0x61, 0x74, 0x61, 0x20, 0x3d, 0x20, 0x63, 0x68, 0x75, 0x6e, 0x6b, + 0x2e, 0x64, 0x61, 0x74, 0x61, 0x3b, 0x0a, 0x0a, 0x20, 0x20, 0x20, 0x20, + 0x20, 0x20, 0x20, 0x20, 0x69, 0x66, 0x20, 0x28, 0x64, 0x61, 0x74, 0x61, + 0x2e, 0x73, 0x74, 0x6f, 0x70, 0x29, 0x20, 0x7b, 0x0a, 0x20, 0x20, 0x20, + 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x77, 0x68, 0x69, 0x6c, 0x65, + 0x20, 0x28, 0x0a, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, + 0x20, 0x20, 0x20, 0x63, 0x75, 0x72, 0x72, 0x65, 0x6e, 0x74, 0x4d, 0x65, + 0x73, 0x73, 0x61, 0x67, 0x65, 0x73, 0x2e, 0x6c, 0x65, 0x6e, 0x67, 0x74, + 0x68, 0x20, 0x3e, 0x20, 0x30, 0x20, 0x26, 0x26, 0x0a, 0x20, 0x20, 0x20, + 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x63, 0x75, 0x72, + 0x72, 0x65, 0x6e, 0x74, 0x4d, 0x65, 0x73, 0x73, 0x61, 0x67, 0x65, 0x73, + 0x5b, 0x63, 0x75, 0x72, 0x72, 0x65, 0x6e, 0x74, 0x4d, 0x65, 0x73, 0x73, + 0x61, 0x67, 0x65, 0x73, 0x2e, 0x6c, 0x65, 0x6e, 0x67, 0x74, 0x68, 0x20, + 0x2d, 0x20, 0x31, 0x5d, 0x2e, 0x63, 0x6f, 0x6e, 0x74, 0x65, 0x6e, 0x74, + 0x2e, 0x6d, 0x61, 0x74, 0x63, 0x68, 0x28, 0x2f, 0x5c, 0x6e, 0x24, 0x2f, + 0x29, 0x20, 0x21, 0x3d, 0x20, 0x6e, 0x75, 0x6c, 0x6c, 0x0a, 0x20, 0x20, + 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x29, 0x20, 0x7b, 0x0a, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, - 0x20, 0x20, 0x7b, 0x0a, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, - 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x6e, 0x61, 0x6d, 0x65, - 0x2c, 0x0a, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, - 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x6d, 0x65, 0x73, 0x73, 0x61, 0x67, - 0x65, 0x3a, 0x20, 0x41, 0x72, 0x72, 0x61, 0x79, 0x2e, 0x69, 0x73, 0x41, - 0x72, 0x72, 0x61, 0x79, 0x28, 0x64, 0x61, 0x74, 0x61, 0x29, 0x20, 0x3f, - 0x0a, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, - 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x64, 0x61, 0x74, 0x61, 0x2e, - 0x6d, 0x61, 0x70, 0x28, 0x6d, 0x73, 0x67, 0x20, 0x3d, 0x3e, 0x20, 0x6d, - 0x73, 0x67, 0x2e, 0x63, 0x6f, 0x6e, 0x74, 0x65, 0x6e, 0x74, 0x29, 0x2e, - 0x6a, 0x6f, 0x69, 0x6e, 0x28, 0x27, 0x27, 0x29, 0x2e, 0x72, 0x65, 0x70, - 0x6c, 0x61, 0x63, 0x65, 0x28, 0x2f, 0x5e, 0x5c, 0x73, 0x2f, 0x2c, 0x20, - 0x27, 0x27, 0x29, 0x20, 0x3a, 0x0a, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, + 0x63, 0x75, 0x72, 0x72, 0x65, 0x6e, 0x74, 0x4d, 0x65, 0x73, 0x73, 0x61, + 0x67, 0x65, 0x73, 0x2e, 0x70, 0x6f, 0x70, 0x28, 0x29, 0x3b, 0x0a, 0x20, + 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x7d, 0x0a, 0x20, + 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x74, 0x72, 0x61, + 0x6e, 0x73, 0x63, 0x72, 0x69, 0x70, 0x74, 0x55, 0x70, 0x64, 0x61, 0x74, + 0x65, 0x28, 0x5b, 0x2e, 0x2e, 0x2e, 0x68, 0x69, 0x73, 0x74, 0x6f, 0x72, + 0x79, 0x2c, 0x20, 0x5b, 0x63, 0x68, 0x61, 0x72, 0x2c, 0x20, 0x63, 0x75, + 0x72, 0x72, 0x65, 0x6e, 0x74, 0x4d, 0x65, 0x73, 0x73, 0x61, 0x67, 0x65, + 0x73, 0x5d, 0x5d, 0x29, 0x0a, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, + 0x20, 0x20, 0x20, 0x63, 0x6f, 0x6e, 0x73, 0x6f, 0x6c, 0x65, 0x2e, 0x6c, + 0x6f, 0x67, 0x28, 0x22, 0x43, 0x6f, 0x6d, 0x70, 0x6c, 0x65, 0x74, 0x69, + 0x6f, 0x6e, 0x20, 0x66, 0x69, 0x6e, 0x69, 0x73, 0x68, 0x65, 0x64, 0x3a, + 0x20, 0x27, 0x22, 0x2c, 0x20, 0x63, 0x75, 0x72, 0x72, 0x65, 0x6e, 0x74, + 0x4d, 0x65, 0x73, 0x73, 0x61, 0x67, 0x65, 0x73, 0x2e, 0x6d, 0x61, 0x70, + 0x28, 0x6d, 0x73, 0x67, 0x20, 0x3d, 0x3e, 0x20, 0x6d, 0x73, 0x67, 0x2e, + 0x63, 0x6f, 0x6e, 0x74, 0x65, 0x6e, 0x74, 0x29, 0x2e, 0x6a, 0x6f, 0x69, + 0x6e, 0x28, 0x27, 0x27, 0x29, 0x2c, 0x20, 0x22, 0x27, 0x2c, 0x20, 0x73, + 0x75, 0x6d, 0x6d, 0x61, 0x72, 0x79, 0x3a, 0x20, 0x22, 0x2c, 0x20, 0x64, + 0x61, 0x74, 0x61, 0x29, 0x3b, 0x0a, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, + 0x20, 0x20, 0x7d, 0x20, 0x65, 0x6c, 0x73, 0x65, 0x20, 0x7b, 0x0a, 0x20, + 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x63, 0x75, 0x72, + 0x72, 0x65, 0x6e, 0x74, 0x4d, 0x65, 0x73, 0x73, 0x61, 0x67, 0x65, 0x73, + 0x2e, 0x70, 0x75, 0x73, 0x68, 0x28, 0x64, 0x61, 0x74, 0x61, 0x29, 0x3b, + 0x0a, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x73, + 0x6c, 0x6f, 0x74, 0x5f, 0x69, 0x64, 0x20, 0x3d, 0x20, 0x64, 0x61, 0x74, + 0x61, 0x2e, 0x73, 0x6c, 0x6f, 0x74, 0x5f, 0x69, 0x64, 0x3b, 0x0a, 0x20, + 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x69, 0x66, 0x20, + 0x28, 0x73, 0x65, 0x6c, 0x65, 0x63, 0x74, 0x65, 0x64, 0x5f, 0x69, 0x6d, + 0x61, 0x67, 0x65, 0x20, 0x26, 0x26, 0x20, 0x21, 0x64, 0x61, 0x74, 0x61, + 0x2e, 0x6d, 0x75, 0x6c, 0x74, 0x69, 0x6d, 0x6f, 0x64, 0x61, 0x6c, 0x29, + 0x20, 0x7b, 0x0a, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, + 0x20, 0x20, 0x20, 0x61, 0x6c, 0x65, 0x72, 0x74, 0x28, 0x22, 0x54, 0x68, + 0x65, 0x20, 0x73, 0x65, 0x72, 0x76, 0x65, 0x72, 0x20, 0x77, 0x61, 0x73, + 0x20, 0x6e, 0x6f, 0x74, 0x20, 0x63, 0x6f, 0x6d, 0x70, 0x69, 0x6c, 0x65, + 0x64, 0x20, 0x66, 0x6f, 0x72, 0x20, 0x6d, 0x75, 0x6c, 0x74, 0x69, 0x6d, + 0x6f, 0x64, 0x61, 0x6c, 0x20, 0x6f, 0x72, 0x20, 0x74, 0x68, 0x65, 0x20, + 0x6d, 0x6f, 0x64, 0x65, 0x6c, 0x20, 0x70, 0x72, 0x6f, 0x6a, 0x65, 0x63, + 0x74, 0x6f, 0x72, 0x20, 0x63, 0x61, 0x6e, 0x27, 0x74, 0x20, 0x62, 0x65, + 0x20, 0x6c, 0x6f, 0x61, 0x64, 0x65, 0x64, 0x2e, 0x22, 0x29, 0x3b, 0x0a, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, - 0x64, 0x61, 0x74, 0x61, 0x2c, 0x0a, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, - 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x7d, 0x0a, 0x20, 0x20, - 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x29, 0x0a, - 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x29, 0x2e, 0x6a, 0x6f, - 0x69, 0x6e, 0x28, 0x22, 0x5c, 0x6e, 0x22, 0x29, 0x2c, 0x0a, 0x20, 0x20, - 0x20, 0x20, 0x20, 0x20, 0x7d, 0x29, 0x3b, 0x0a, 0x20, 0x20, 0x20, 0x20, - 0x20, 0x20, 0x69, 0x66, 0x20, 0x28, 0x73, 0x65, 0x6c, 0x65, 0x63, 0x74, - 0x65, 0x64, 0x5f, 0x69, 0x6d, 0x61, 0x67, 0x65, 0x29, 0x20, 0x7b, 0x0a, - 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x70, 0x72, 0x6f, 0x6d, - 0x70, 0x74, 0x20, 0x3d, 0x20, 0x60, 0x41, 0x20, 0x63, 0x68, 0x61, 0x74, - 0x20, 0x62, 0x65, 0x74, 0x77, 0x65, 0x65, 0x6e, 0x20, 0x61, 0x20, 0x63, - 0x75, 0x72, 0x69, 0x6f, 0x75, 0x73, 0x20, 0x68, 0x75, 0x6d, 0x61, 0x6e, - 0x20, 0x61, 0x6e, 0x64, 0x20, 0x61, 0x6e, 0x20, 0x61, 0x72, 0x74, 0x69, - 0x66, 0x69, 0x63, 0x69, 0x61, 0x6c, 0x20, 0x69, 0x6e, 0x74, 0x65, 0x6c, - 0x6c, 0x69, 0x67, 0x65, 0x6e, 0x63, 0x65, 0x20, 0x61, 0x73, 0x73, 0x69, - 0x73, 0x74, 0x61, 0x6e, 0x74, 0x2e, 0x20, 0x54, 0x68, 0x65, 0x20, 0x61, - 0x73, 0x73, 0x69, 0x73, 0x74, 0x61, 0x6e, 0x74, 0x20, 0x67, 0x69, 0x76, - 0x65, 0x73, 0x20, 0x68, 0x65, 0x6c, 0x70, 0x66, 0x75, 0x6c, 0x2c, 0x20, - 0x64, 0x65, 0x74, 0x61, 0x69, 0x6c, 0x65, 0x64, 0x2c, 0x20, 0x61, 0x6e, - 0x64, 0x20, 0x70, 0x6f, 0x6c, 0x69, 0x74, 0x65, 0x20, 0x61, 0x6e, 0x73, - 0x77, 0x65, 0x72, 0x73, 0x20, 0x74, 0x6f, 0x20, 0x74, 0x68, 0x65, 0x20, - 0x68, 0x75, 0x6d, 0x61, 0x6e, 0x27, 0x73, 0x20, 0x71, 0x75, 0x65, 0x73, - 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x2e, 0x5c, 0x6e, 0x55, 0x53, 0x45, 0x52, - 0x3a, 0x5b, 0x69, 0x6d, 0x67, 0x2d, 0x31, 0x30, 0x5d, 0x24, 0x7b, 0x6d, - 0x73, 0x67, 0x7d, 0x5c, 0x6e, 0x41, 0x53, 0x53, 0x49, 0x53, 0x54, 0x41, - 0x4e, 0x54, 0x3a, 0x60, 0x3b, 0x0a, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, - 0x7d, 0x0a, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x61, 0x77, 0x61, 0x69, - 0x74, 0x20, 0x72, 0x75, 0x6e, 0x4c, 0x6c, 0x61, 0x6d, 0x61, 0x28, 0x70, - 0x72, 0x6f, 0x6d, 0x70, 0x74, 0x2c, 0x20, 0x7b, 0x0a, 0x20, 0x20, 0x20, - 0x20, 0x20, 0x20, 0x20, 0x20, 0x2e, 0x2e, 0x2e, 0x70, 0x61, 0x72, 0x61, - 0x6d, 0x73, 0x2e, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x2c, 0x0a, 0x20, 0x20, - 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x73, 0x6c, 0x6f, 0x74, 0x5f, 0x69, - 0x64, 0x3a, 0x20, 0x73, 0x6c, 0x6f, 0x74, 0x5f, 0x69, 0x64, 0x2c, 0x0a, - 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x73, 0x74, 0x6f, 0x70, - 0x3a, 0x20, 0x5b, 0x22, 0x3c, 0x2f, 0x73, 0x3e, 0x22, 0x2c, 0x20, 0x74, - 0x65, 0x6d, 0x70, 0x6c, 0x61, 0x74, 0x65, 0x28, 0x22, 0x7b, 0x7b, 0x63, - 0x68, 0x61, 0x72, 0x7d, 0x7d, 0x3a, 0x22, 0x29, 0x2c, 0x20, 0x74, 0x65, - 0x6d, 0x70, 0x6c, 0x61, 0x74, 0x65, 0x28, 0x22, 0x7b, 0x7b, 0x75, 0x73, - 0x65, 0x72, 0x7d, 0x7d, 0x3a, 0x22, 0x29, 0x5d, 0x2c, 0x0a, 0x20, 0x20, - 0x20, 0x20, 0x20, 0x20, 0x7d, 0x2c, 0x20, 0x22, 0x7b, 0x7b, 0x63, 0x68, - 0x61, 0x72, 0x7d, 0x7d, 0x22, 0x29, 0x3b, 0x0a, 0x20, 0x20, 0x20, 0x20, - 0x7d, 0x0a, 0x0a, 0x20, 0x20, 0x20, 0x20, 0x63, 0x6f, 0x6e, 0x73, 0x74, - 0x20, 0x72, 0x75, 0x6e, 0x43, 0x6f, 0x6d, 0x70, 0x6c, 0x65, 0x74, 0x69, - 0x6f, 0x6e, 0x20, 0x3d, 0x20, 0x61, 0x73, 0x79, 0x6e, 0x63, 0x20, 0x28, - 0x29, 0x20, 0x3d, 0x3e, 0x20, 0x7b, 0x0a, 0x20, 0x20, 0x20, 0x20, 0x20, - 0x20, 0x69, 0x66, 0x20, 0x28, 0x63, 0x6f, 0x6e, 0x74, 0x72, 0x6f, 0x6c, - 0x6c, 0x65, 0x72, 0x2e, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x29, 0x20, 0x7b, - 0x0a, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x63, 0x6f, 0x6e, - 0x73, 0x6f, 0x6c, 0x65, 0x2e, 0x6c, 0x6f, 0x67, 0x28, 0x27, 0x61, 0x6c, - 0x72, 0x65, 0x61, 0x64, 0x79, 0x20, 0x72, 0x75, 0x6e, 0x6e, 0x69, 0x6e, - 0x67, 0x2e, 0x2e, 0x2e, 0x27, 0x29, 0x3b, 0x0a, 0x20, 0x20, 0x20, 0x20, - 0x20, 0x20, 0x20, 0x20, 0x72, 0x65, 0x74, 0x75, 0x72, 0x6e, 0x3b, 0x0a, + 0x72, 0x65, 0x74, 0x75, 0x72, 0x6e, 0x3b, 0x0a, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x7d, 0x0a, 0x20, 0x20, 0x20, 0x20, - 0x20, 0x20, 0x63, 0x6f, 0x6e, 0x73, 0x74, 0x20, 0x7b, 0x20, 0x70, 0x72, - 0x6f, 0x6d, 0x70, 0x74, 0x20, 0x7d, 0x20, 0x3d, 0x20, 0x73, 0x65, 0x73, - 0x73, 0x69, 0x6f, 0x6e, 0x2e, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x3b, 0x0a, - 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x74, 0x72, 0x61, 0x6e, 0x73, 0x63, - 0x72, 0x69, 0x70, 0x74, 0x55, 0x70, 0x64, 0x61, 0x74, 0x65, 0x28, 0x5b, - 0x2e, 0x2e, 0x2e, 0x73, 0x65, 0x73, 0x73, 0x69, 0x6f, 0x6e, 0x2e, 0x76, - 0x61, 0x6c, 0x75, 0x65, 0x2e, 0x74, 0x72, 0x61, 0x6e, 0x73, 0x63, 0x72, - 0x69, 0x70, 0x74, 0x2c, 0x20, 0x5b, 0x22, 0x22, 0x2c, 0x20, 0x70, 0x72, - 0x6f, 0x6d, 0x70, 0x74, 0x5d, 0x5d, 0x29, 0x3b, 0x0a, 0x20, 0x20, 0x20, - 0x20, 0x20, 0x20, 0x61, 0x77, 0x61, 0x69, 0x74, 0x20, 0x72, 0x75, 0x6e, - 0x4c, 0x6c, 0x61, 0x6d, 0x61, 0x28, 0x70, 0x72, 0x6f, 0x6d, 0x70, 0x74, - 0x2c, 0x20, 0x7b, 0x0a, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, - 0x2e, 0x2e, 0x2e, 0x70, 0x61, 0x72, 0x61, 0x6d, 0x73, 0x2e, 0x76, 0x61, - 0x6c, 0x75, 0x65, 0x2c, 0x0a, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, - 0x20, 0x73, 0x6c, 0x6f, 0x74, 0x5f, 0x69, 0x64, 0x3a, 0x20, 0x73, 0x6c, - 0x6f, 0x74, 0x5f, 0x69, 0x64, 0x2c, 0x0a, 0x20, 0x20, 0x20, 0x20, 0x20, - 0x20, 0x20, 0x20, 0x73, 0x74, 0x6f, 0x70, 0x3a, 0x20, 0x5b, 0x5d, 0x2c, - 0x0a, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x7d, 0x2c, 0x20, 0x22, 0x22, - 0x29, 0x3b, 0x0a, 0x20, 0x20, 0x20, 0x20, 0x7d, 0x0a, 0x0a, 0x20, 0x20, - 0x20, 0x20, 0x63, 0x6f, 0x6e, 0x73, 0x74, 0x20, 0x73, 0x74, 0x6f, 0x70, - 0x20, 0x3d, 0x20, 0x28, 0x65, 0x29, 0x20, 0x3d, 0x3e, 0x20, 0x7b, 0x0a, - 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x65, 0x2e, 0x70, 0x72, 0x65, 0x76, - 0x65, 0x6e, 0x74, 0x44, 0x65, 0x66, 0x61, 0x75, 0x6c, 0x74, 0x28, 0x29, - 0x3b, 0x0a, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x69, 0x66, 0x20, 0x28, - 0x63, 0x6f, 0x6e, 0x74, 0x72, 0x6f, 0x6c, 0x6c, 0x65, 0x72, 0x2e, 0x76, - 0x61, 0x6c, 0x75, 0x65, 0x29, 0x20, 0x7b, 0x0a, 0x20, 0x20, 0x20, 0x20, - 0x20, 0x20, 0x20, 0x20, 0x63, 0x6f, 0x6e, 0x74, 0x72, 0x6f, 0x6c, 0x6c, - 0x65, 0x72, 0x2e, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x2e, 0x61, 0x62, 0x6f, - 0x72, 0x74, 0x28, 0x29, 0x3b, 0x0a, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, - 0x20, 0x20, 0x63, 0x6f, 0x6e, 0x74, 0x72, 0x6f, 0x6c, 0x6c, 0x65, 0x72, - 0x2e, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x20, 0x3d, 0x20, 0x6e, 0x75, 0x6c, - 0x6c, 0x3b, 0x0a, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x7d, 0x0a, 0x20, - 0x20, 0x20, 0x20, 0x7d, 0x0a, 0x0a, 0x20, 0x20, 0x20, 0x20, 0x63, 0x6f, - 0x6e, 0x73, 0x74, 0x20, 0x72, 0x65, 0x73, 0x65, 0x74, 0x20, 0x3d, 0x20, - 0x28, 0x65, 0x29, 0x20, 0x3d, 0x3e, 0x20, 0x7b, 0x0a, 0x20, 0x20, 0x20, - 0x20, 0x20, 0x20, 0x73, 0x74, 0x6f, 0x70, 0x28, 0x65, 0x29, 0x3b, 0x0a, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x74, 0x72, 0x61, 0x6e, 0x73, 0x63, 0x72, 0x69, 0x70, 0x74, 0x55, 0x70, 0x64, 0x61, 0x74, 0x65, 0x28, 0x5b, - 0x5d, 0x29, 0x3b, 0x0a, 0x20, 0x20, 0x20, 0x20, 0x7d, 0x0a, 0x0a, 0x20, - 0x20, 0x20, 0x20, 0x63, 0x6f, 0x6e, 0x73, 0x74, 0x20, 0x75, 0x70, 0x6c, - 0x6f, 0x61, 0x64, 0x49, 0x6d, 0x61, 0x67, 0x65, 0x20, 0x3d, 0x20, 0x28, - 0x65, 0x29, 0x20, 0x3d, 0x3e, 0x20, 0x7b, 0x0a, 0x20, 0x20, 0x20, 0x20, - 0x20, 0x20, 0x65, 0x2e, 0x70, 0x72, 0x65, 0x76, 0x65, 0x6e, 0x74, 0x44, - 0x65, 0x66, 0x61, 0x75, 0x6c, 0x74, 0x28, 0x29, 0x3b, 0x0a, 0x20, 0x20, - 0x20, 0x20, 0x20, 0x20, 0x64, 0x6f, 0x63, 0x75, 0x6d, 0x65, 0x6e, 0x74, - 0x2e, 0x67, 0x65, 0x74, 0x45, 0x6c, 0x65, 0x6d, 0x65, 0x6e, 0x74, 0x42, - 0x79, 0x49, 0x64, 0x28, 0x22, 0x66, 0x69, 0x6c, 0x65, 0x49, 0x6e, 0x70, - 0x75, 0x74, 0x22, 0x29, 0x2e, 0x63, 0x6c, 0x69, 0x63, 0x6b, 0x28, 0x29, - 0x3b, 0x0a, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x64, 0x6f, 0x63, 0x75, - 0x6d, 0x65, 0x6e, 0x74, 0x2e, 0x67, 0x65, 0x74, 0x45, 0x6c, 0x65, 0x6d, - 0x65, 0x6e, 0x74, 0x42, 0x79, 0x49, 0x64, 0x28, 0x22, 0x66, 0x69, 0x6c, - 0x65, 0x49, 0x6e, 0x70, 0x75, 0x74, 0x22, 0x29, 0x2e, 0x61, 0x64, 0x64, - 0x45, 0x76, 0x65, 0x6e, 0x74, 0x4c, 0x69, 0x73, 0x74, 0x65, 0x6e, 0x65, - 0x72, 0x28, 0x22, 0x63, 0x68, 0x61, 0x6e, 0x67, 0x65, 0x22, 0x2c, 0x20, - 0x66, 0x75, 0x6e, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x20, 0x28, 0x65, 0x76, - 0x65, 0x6e, 0x74, 0x29, 0x20, 0x7b, 0x0a, 0x20, 0x20, 0x20, 0x20, 0x20, - 0x20, 0x20, 0x20, 0x63, 0x6f, 0x6e, 0x73, 0x74, 0x20, 0x73, 0x65, 0x6c, - 0x65, 0x63, 0x74, 0x65, 0x64, 0x46, 0x69, 0x6c, 0x65, 0x20, 0x3d, 0x20, - 0x65, 0x76, 0x65, 0x6e, 0x74, 0x2e, 0x74, 0x61, 0x72, 0x67, 0x65, 0x74, - 0x2e, 0x66, 0x69, 0x6c, 0x65, 0x73, 0x5b, 0x30, 0x5d, 0x3b, 0x0a, 0x20, - 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x69, 0x66, 0x20, 0x28, 0x73, - 0x65, 0x6c, 0x65, 0x63, 0x74, 0x65, 0x64, 0x46, 0x69, 0x6c, 0x65, 0x29, + 0x2e, 0x2e, 0x2e, 0x68, 0x69, 0x73, 0x74, 0x6f, 0x72, 0x79, 0x2c, 0x20, + 0x5b, 0x63, 0x68, 0x61, 0x72, 0x2c, 0x20, 0x63, 0x75, 0x72, 0x72, 0x65, + 0x6e, 0x74, 0x4d, 0x65, 0x73, 0x73, 0x61, 0x67, 0x65, 0x73, 0x5d, 0x5d, + 0x29, 0x0a, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x7d, 0x0a, + 0x0a, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x69, 0x66, 0x20, + 0x28, 0x64, 0x61, 0x74, 0x61, 0x2e, 0x74, 0x69, 0x6d, 0x69, 0x6e, 0x67, + 0x73, 0x29, 0x20, 0x7b, 0x0a, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, + 0x20, 0x20, 0x20, 0x6c, 0x6c, 0x61, 0x6d, 0x61, 0x53, 0x74, 0x61, 0x74, + 0x73, 0x2e, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x20, 0x3d, 0x20, 0x64, 0x61, + 0x74, 0x61, 0x2e, 0x74, 0x69, 0x6d, 0x69, 0x6e, 0x67, 0x73, 0x3b, 0x0a, + 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x7d, 0x0a, 0x20, 0x20, + 0x20, 0x20, 0x20, 0x20, 0x7d, 0x0a, 0x0a, 0x20, 0x20, 0x20, 0x20, 0x20, + 0x20, 0x63, 0x6f, 0x6e, 0x74, 0x72, 0x6f, 0x6c, 0x6c, 0x65, 0x72, 0x2e, + 0x76, 0x61, 0x6c, 0x75, 0x65, 0x20, 0x3d, 0x20, 0x6e, 0x75, 0x6c, 0x6c, + 0x3b, 0x0a, 0x20, 0x20, 0x20, 0x20, 0x7d, 0x0a, 0x0a, 0x20, 0x20, 0x20, + 0x20, 0x2f, 0x2f, 0x20, 0x73, 0x65, 0x6e, 0x64, 0x20, 0x6d, 0x65, 0x73, + 0x73, 0x61, 0x67, 0x65, 0x20, 0x74, 0x6f, 0x20, 0x73, 0x65, 0x72, 0x76, + 0x65, 0x72, 0x0a, 0x20, 0x20, 0x20, 0x20, 0x63, 0x6f, 0x6e, 0x73, 0x74, + 0x20, 0x63, 0x68, 0x61, 0x74, 0x20, 0x3d, 0x20, 0x61, 0x73, 0x79, 0x6e, + 0x63, 0x20, 0x28, 0x6d, 0x73, 0x67, 0x29, 0x20, 0x3d, 0x3e, 0x20, 0x7b, + 0x0a, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x69, 0x66, 0x20, 0x28, 0x63, + 0x6f, 0x6e, 0x74, 0x72, 0x6f, 0x6c, 0x6c, 0x65, 0x72, 0x2e, 0x76, 0x61, + 0x6c, 0x75, 0x65, 0x29, 0x20, 0x7b, 0x0a, 0x20, 0x20, 0x20, 0x20, 0x20, + 0x20, 0x20, 0x20, 0x63, 0x6f, 0x6e, 0x73, 0x6f, 0x6c, 0x65, 0x2e, 0x6c, + 0x6f, 0x67, 0x28, 0x27, 0x61, 0x6c, 0x72, 0x65, 0x61, 0x64, 0x79, 0x20, + 0x72, 0x75, 0x6e, 0x6e, 0x69, 0x6e, 0x67, 0x2e, 0x2e, 0x2e, 0x27, 0x29, + 0x3b, 0x0a, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x72, 0x65, + 0x74, 0x75, 0x72, 0x6e, 0x3b, 0x0a, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, + 0x7d, 0x0a, 0x0a, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x74, 0x72, 0x61, + 0x6e, 0x73, 0x63, 0x72, 0x69, 0x70, 0x74, 0x55, 0x70, 0x64, 0x61, 0x74, + 0x65, 0x28, 0x5b, 0x2e, 0x2e, 0x2e, 0x73, 0x65, 0x73, 0x73, 0x69, 0x6f, + 0x6e, 0x2e, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x2e, 0x74, 0x72, 0x61, 0x6e, + 0x73, 0x63, 0x72, 0x69, 0x70, 0x74, 0x2c, 0x20, 0x5b, 0x22, 0x7b, 0x7b, + 0x75, 0x73, 0x65, 0x72, 0x7d, 0x7d, 0x22, 0x2c, 0x20, 0x6d, 0x73, 0x67, + 0x5d, 0x5d, 0x29, 0x0a, 0x0a, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x6c, + 0x65, 0x74, 0x20, 0x70, 0x72, 0x6f, 0x6d, 0x70, 0x74, 0x20, 0x3d, 0x20, + 0x74, 0x65, 0x6d, 0x70, 0x6c, 0x61, 0x74, 0x65, 0x28, 0x73, 0x65, 0x73, + 0x73, 0x69, 0x6f, 0x6e, 0x2e, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x2e, 0x74, + 0x65, 0x6d, 0x70, 0x6c, 0x61, 0x74, 0x65, 0x2c, 0x20, 0x7b, 0x0a, 0x20, + 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x6d, 0x65, 0x73, 0x73, 0x61, + 0x67, 0x65, 0x3a, 0x20, 0x6d, 0x73, 0x67, 0x2c, 0x0a, 0x20, 0x20, 0x20, + 0x20, 0x20, 0x20, 0x20, 0x20, 0x68, 0x69, 0x73, 0x74, 0x6f, 0x72, 0x79, + 0x3a, 0x20, 0x73, 0x65, 0x73, 0x73, 0x69, 0x6f, 0x6e, 0x2e, 0x76, 0x61, + 0x6c, 0x75, 0x65, 0x2e, 0x74, 0x72, 0x61, 0x6e, 0x73, 0x63, 0x72, 0x69, + 0x70, 0x74, 0x2e, 0x66, 0x6c, 0x61, 0x74, 0x4d, 0x61, 0x70, 0x28, 0x0a, + 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x28, 0x5b, + 0x6e, 0x61, 0x6d, 0x65, 0x2c, 0x20, 0x64, 0x61, 0x74, 0x61, 0x5d, 0x29, + 0x20, 0x3d, 0x3e, 0x0a, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, + 0x20, 0x20, 0x20, 0x20, 0x74, 0x65, 0x6d, 0x70, 0x6c, 0x61, 0x74, 0x65, + 0x28, 0x0a, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, + 0x20, 0x20, 0x20, 0x20, 0x73, 0x65, 0x73, 0x73, 0x69, 0x6f, 0x6e, 0x2e, + 0x76, 0x61, 0x6c, 0x75, 0x65, 0x2e, 0x68, 0x69, 0x73, 0x74, 0x6f, 0x72, + 0x79, 0x54, 0x65, 0x6d, 0x70, 0x6c, 0x61, 0x74, 0x65, 0x2c, 0x0a, 0x20, + 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x7b, 0x0a, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, - 0x20, 0x63, 0x6f, 0x6e, 0x73, 0x74, 0x20, 0x72, 0x65, 0x61, 0x64, 0x65, - 0x72, 0x20, 0x3d, 0x20, 0x6e, 0x65, 0x77, 0x20, 0x46, 0x69, 0x6c, 0x65, - 0x52, 0x65, 0x61, 0x64, 0x65, 0x72, 0x28, 0x29, 0x3b, 0x0a, 0x20, 0x20, - 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x72, 0x65, 0x61, 0x64, - 0x65, 0x72, 0x2e, 0x6f, 0x6e, 0x6c, 0x6f, 0x61, 0x64, 0x20, 0x3d, 0x20, - 0x66, 0x75, 0x6e, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x20, 0x28, 0x29, 0x20, + 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x6e, 0x61, 0x6d, 0x65, 0x2c, + 0x0a, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, + 0x20, 0x20, 0x20, 0x20, 0x20, 0x6d, 0x65, 0x73, 0x73, 0x61, 0x67, 0x65, + 0x3a, 0x20, 0x41, 0x72, 0x72, 0x61, 0x79, 0x2e, 0x69, 0x73, 0x41, 0x72, + 0x72, 0x61, 0x79, 0x28, 0x64, 0x61, 0x74, 0x61, 0x29, 0x20, 0x3f, 0x0a, + 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, + 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x64, 0x61, 0x74, 0x61, 0x2e, 0x6d, + 0x61, 0x70, 0x28, 0x6d, 0x73, 0x67, 0x20, 0x3d, 0x3e, 0x20, 0x6d, 0x73, + 0x67, 0x2e, 0x63, 0x6f, 0x6e, 0x74, 0x65, 0x6e, 0x74, 0x29, 0x2e, 0x6a, + 0x6f, 0x69, 0x6e, 0x28, 0x27, 0x27, 0x29, 0x2e, 0x72, 0x65, 0x70, 0x6c, + 0x61, 0x63, 0x65, 0x28, 0x2f, 0x5e, 0x5c, 0x73, 0x2f, 0x2c, 0x20, 0x27, + 0x27, 0x29, 0x20, 0x3a, 0x0a, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, + 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x64, + 0x61, 0x74, 0x61, 0x2c, 0x0a, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, + 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x7d, 0x0a, 0x20, 0x20, 0x20, + 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x29, 0x0a, 0x20, + 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x29, 0x2e, 0x6a, 0x6f, 0x69, + 0x6e, 0x28, 0x22, 0x5c, 0x6e, 0x22, 0x29, 0x2c, 0x0a, 0x20, 0x20, 0x20, + 0x20, 0x20, 0x20, 0x7d, 0x29, 0x3b, 0x0a, 0x20, 0x20, 0x20, 0x20, 0x20, + 0x20, 0x69, 0x66, 0x20, 0x28, 0x73, 0x65, 0x6c, 0x65, 0x63, 0x74, 0x65, + 0x64, 0x5f, 0x69, 0x6d, 0x61, 0x67, 0x65, 0x29, 0x20, 0x7b, 0x0a, 0x20, + 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x70, 0x72, 0x6f, 0x6d, 0x70, + 0x74, 0x20, 0x3d, 0x20, 0x60, 0x41, 0x20, 0x63, 0x68, 0x61, 0x74, 0x20, + 0x62, 0x65, 0x74, 0x77, 0x65, 0x65, 0x6e, 0x20, 0x61, 0x20, 0x63, 0x75, + 0x72, 0x69, 0x6f, 0x75, 0x73, 0x20, 0x68, 0x75, 0x6d, 0x61, 0x6e, 0x20, + 0x61, 0x6e, 0x64, 0x20, 0x61, 0x6e, 0x20, 0x61, 0x72, 0x74, 0x69, 0x66, + 0x69, 0x63, 0x69, 0x61, 0x6c, 0x20, 0x69, 0x6e, 0x74, 0x65, 0x6c, 0x6c, + 0x69, 0x67, 0x65, 0x6e, 0x63, 0x65, 0x20, 0x61, 0x73, 0x73, 0x69, 0x73, + 0x74, 0x61, 0x6e, 0x74, 0x2e, 0x20, 0x54, 0x68, 0x65, 0x20, 0x61, 0x73, + 0x73, 0x69, 0x73, 0x74, 0x61, 0x6e, 0x74, 0x20, 0x67, 0x69, 0x76, 0x65, + 0x73, 0x20, 0x68, 0x65, 0x6c, 0x70, 0x66, 0x75, 0x6c, 0x2c, 0x20, 0x64, + 0x65, 0x74, 0x61, 0x69, 0x6c, 0x65, 0x64, 0x2c, 0x20, 0x61, 0x6e, 0x64, + 0x20, 0x70, 0x6f, 0x6c, 0x69, 0x74, 0x65, 0x20, 0x61, 0x6e, 0x73, 0x77, + 0x65, 0x72, 0x73, 0x20, 0x74, 0x6f, 0x20, 0x74, 0x68, 0x65, 0x20, 0x68, + 0x75, 0x6d, 0x61, 0x6e, 0x27, 0x73, 0x20, 0x71, 0x75, 0x65, 0x73, 0x74, + 0x69, 0x6f, 0x6e, 0x73, 0x2e, 0x5c, 0x6e, 0x55, 0x53, 0x45, 0x52, 0x3a, + 0x5b, 0x69, 0x6d, 0x67, 0x2d, 0x31, 0x30, 0x5d, 0x24, 0x7b, 0x6d, 0x73, + 0x67, 0x7d, 0x5c, 0x6e, 0x41, 0x53, 0x53, 0x49, 0x53, 0x54, 0x41, 0x4e, + 0x54, 0x3a, 0x60, 0x3b, 0x0a, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x7d, + 0x0a, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x61, 0x77, 0x61, 0x69, 0x74, + 0x20, 0x72, 0x75, 0x6e, 0x4c, 0x6c, 0x61, 0x6d, 0x61, 0x28, 0x70, 0x72, + 0x6f, 0x6d, 0x70, 0x74, 0x2c, 0x20, 0x7b, 0x0a, 0x20, 0x20, 0x20, 0x20, + 0x20, 0x20, 0x20, 0x20, 0x2e, 0x2e, 0x2e, 0x70, 0x61, 0x72, 0x61, 0x6d, + 0x73, 0x2e, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x2c, 0x0a, 0x20, 0x20, 0x20, + 0x20, 0x20, 0x20, 0x20, 0x20, 0x73, 0x6c, 0x6f, 0x74, 0x5f, 0x69, 0x64, + 0x3a, 0x20, 0x73, 0x6c, 0x6f, 0x74, 0x5f, 0x69, 0x64, 0x2c, 0x0a, 0x20, + 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x73, 0x74, 0x6f, 0x70, 0x3a, + 0x20, 0x5b, 0x22, 0x3c, 0x2f, 0x73, 0x3e, 0x22, 0x2c, 0x20, 0x74, 0x65, + 0x6d, 0x70, 0x6c, 0x61, 0x74, 0x65, 0x28, 0x22, 0x7b, 0x7b, 0x63, 0x68, + 0x61, 0x72, 0x7d, 0x7d, 0x3a, 0x22, 0x29, 0x2c, 0x20, 0x74, 0x65, 0x6d, + 0x70, 0x6c, 0x61, 0x74, 0x65, 0x28, 0x22, 0x7b, 0x7b, 0x75, 0x73, 0x65, + 0x72, 0x7d, 0x7d, 0x3a, 0x22, 0x29, 0x5d, 0x2c, 0x0a, 0x20, 0x20, 0x20, + 0x20, 0x20, 0x20, 0x7d, 0x2c, 0x20, 0x22, 0x7b, 0x7b, 0x63, 0x68, 0x61, + 0x72, 0x7d, 0x7d, 0x22, 0x29, 0x3b, 0x0a, 0x20, 0x20, 0x20, 0x20, 0x7d, + 0x0a, 0x0a, 0x20, 0x20, 0x20, 0x20, 0x63, 0x6f, 0x6e, 0x73, 0x74, 0x20, + 0x72, 0x75, 0x6e, 0x43, 0x6f, 0x6d, 0x70, 0x6c, 0x65, 0x74, 0x69, 0x6f, + 0x6e, 0x20, 0x3d, 0x20, 0x61, 0x73, 0x79, 0x6e, 0x63, 0x20, 0x28, 0x29, + 0x20, 0x3d, 0x3e, 0x20, 0x7b, 0x0a, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, + 0x69, 0x66, 0x20, 0x28, 0x63, 0x6f, 0x6e, 0x74, 0x72, 0x6f, 0x6c, 0x6c, + 0x65, 0x72, 0x2e, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x29, 0x20, 0x7b, 0x0a, + 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x63, 0x6f, 0x6e, 0x73, + 0x6f, 0x6c, 0x65, 0x2e, 0x6c, 0x6f, 0x67, 0x28, 0x27, 0x61, 0x6c, 0x72, + 0x65, 0x61, 0x64, 0x79, 0x20, 0x72, 0x75, 0x6e, 0x6e, 0x69, 0x6e, 0x67, + 0x2e, 0x2e, 0x2e, 0x27, 0x29, 0x3b, 0x0a, 0x20, 0x20, 0x20, 0x20, 0x20, + 0x20, 0x20, 0x20, 0x72, 0x65, 0x74, 0x75, 0x72, 0x6e, 0x3b, 0x0a, 0x20, + 0x20, 0x20, 0x20, 0x20, 0x20, 0x7d, 0x0a, 0x20, 0x20, 0x20, 0x20, 0x20, + 0x20, 0x63, 0x6f, 0x6e, 0x73, 0x74, 0x20, 0x7b, 0x20, 0x70, 0x72, 0x6f, + 0x6d, 0x70, 0x74, 0x20, 0x7d, 0x20, 0x3d, 0x20, 0x73, 0x65, 0x73, 0x73, + 0x69, 0x6f, 0x6e, 0x2e, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x3b, 0x0a, 0x20, + 0x20, 0x20, 0x20, 0x20, 0x20, 0x74, 0x72, 0x61, 0x6e, 0x73, 0x63, 0x72, + 0x69, 0x70, 0x74, 0x55, 0x70, 0x64, 0x61, 0x74, 0x65, 0x28, 0x5b, 0x2e, + 0x2e, 0x2e, 0x73, 0x65, 0x73, 0x73, 0x69, 0x6f, 0x6e, 0x2e, 0x76, 0x61, + 0x6c, 0x75, 0x65, 0x2e, 0x74, 0x72, 0x61, 0x6e, 0x73, 0x63, 0x72, 0x69, + 0x70, 0x74, 0x2c, 0x20, 0x5b, 0x22, 0x22, 0x2c, 0x20, 0x70, 0x72, 0x6f, + 0x6d, 0x70, 0x74, 0x5d, 0x5d, 0x29, 0x3b, 0x0a, 0x20, 0x20, 0x20, 0x20, + 0x20, 0x20, 0x61, 0x77, 0x61, 0x69, 0x74, 0x20, 0x72, 0x75, 0x6e, 0x4c, + 0x6c, 0x61, 0x6d, 0x61, 0x28, 0x70, 0x72, 0x6f, 0x6d, 0x70, 0x74, 0x2c, + 0x20, 0x7b, 0x0a, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x2e, + 0x2e, 0x2e, 0x70, 0x61, 0x72, 0x61, 0x6d, 0x73, 0x2e, 0x76, 0x61, 0x6c, + 0x75, 0x65, 0x2c, 0x0a, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, + 0x73, 0x6c, 0x6f, 0x74, 0x5f, 0x69, 0x64, 0x3a, 0x20, 0x73, 0x6c, 0x6f, + 0x74, 0x5f, 0x69, 0x64, 0x2c, 0x0a, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, + 0x20, 0x20, 0x73, 0x74, 0x6f, 0x70, 0x3a, 0x20, 0x5b, 0x5d, 0x2c, 0x0a, + 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x7d, 0x2c, 0x20, 0x22, 0x22, 0x29, + 0x3b, 0x0a, 0x20, 0x20, 0x20, 0x20, 0x7d, 0x0a, 0x0a, 0x20, 0x20, 0x20, + 0x20, 0x63, 0x6f, 0x6e, 0x73, 0x74, 0x20, 0x73, 0x74, 0x6f, 0x70, 0x20, + 0x3d, 0x20, 0x28, 0x65, 0x29, 0x20, 0x3d, 0x3e, 0x20, 0x7b, 0x0a, 0x20, + 0x20, 0x20, 0x20, 0x20, 0x20, 0x65, 0x2e, 0x70, 0x72, 0x65, 0x76, 0x65, + 0x6e, 0x74, 0x44, 0x65, 0x66, 0x61, 0x75, 0x6c, 0x74, 0x28, 0x29, 0x3b, + 0x0a, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x69, 0x66, 0x20, 0x28, 0x63, + 0x6f, 0x6e, 0x74, 0x72, 0x6f, 0x6c, 0x6c, 0x65, 0x72, 0x2e, 0x76, 0x61, + 0x6c, 0x75, 0x65, 0x29, 0x20, 0x7b, 0x0a, 0x20, 0x20, 0x20, 0x20, 0x20, + 0x20, 0x20, 0x20, 0x63, 0x6f, 0x6e, 0x74, 0x72, 0x6f, 0x6c, 0x6c, 0x65, + 0x72, 0x2e, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x2e, 0x61, 0x62, 0x6f, 0x72, + 0x74, 0x28, 0x29, 0x3b, 0x0a, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, + 0x20, 0x63, 0x6f, 0x6e, 0x74, 0x72, 0x6f, 0x6c, 0x6c, 0x65, 0x72, 0x2e, + 0x76, 0x61, 0x6c, 0x75, 0x65, 0x20, 0x3d, 0x20, 0x6e, 0x75, 0x6c, 0x6c, + 0x3b, 0x0a, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x7d, 0x0a, 0x20, 0x20, + 0x20, 0x20, 0x7d, 0x0a, 0x0a, 0x20, 0x20, 0x20, 0x20, 0x63, 0x6f, 0x6e, + 0x73, 0x74, 0x20, 0x72, 0x65, 0x73, 0x65, 0x74, 0x20, 0x3d, 0x20, 0x28, + 0x65, 0x29, 0x20, 0x3d, 0x3e, 0x20, 0x7b, 0x0a, 0x20, 0x20, 0x20, 0x20, + 0x20, 0x20, 0x73, 0x74, 0x6f, 0x70, 0x28, 0x65, 0x29, 0x3b, 0x0a, 0x20, + 0x20, 0x20, 0x20, 0x20, 0x20, 0x74, 0x72, 0x61, 0x6e, 0x73, 0x63, 0x72, + 0x69, 0x70, 0x74, 0x55, 0x70, 0x64, 0x61, 0x74, 0x65, 0x28, 0x5b, 0x5d, + 0x29, 0x3b, 0x0a, 0x20, 0x20, 0x20, 0x20, 0x7d, 0x0a, 0x0a, 0x20, 0x20, + 0x20, 0x20, 0x63, 0x6f, 0x6e, 0x73, 0x74, 0x20, 0x75, 0x70, 0x6c, 0x6f, + 0x61, 0x64, 0x49, 0x6d, 0x61, 0x67, 0x65, 0x20, 0x3d, 0x20, 0x28, 0x65, + 0x29, 0x20, 0x3d, 0x3e, 0x20, 0x7b, 0x0a, 0x20, 0x20, 0x20, 0x20, 0x20, + 0x20, 0x65, 0x2e, 0x70, 0x72, 0x65, 0x76, 0x65, 0x6e, 0x74, 0x44, 0x65, + 0x66, 0x61, 0x75, 0x6c, 0x74, 0x28, 0x29, 0x3b, 0x0a, 0x20, 0x20, 0x20, + 0x20, 0x20, 0x20, 0x64, 0x6f, 0x63, 0x75, 0x6d, 0x65, 0x6e, 0x74, 0x2e, + 0x67, 0x65, 0x74, 0x45, 0x6c, 0x65, 0x6d, 0x65, 0x6e, 0x74, 0x42, 0x79, + 0x49, 0x64, 0x28, 0x22, 0x66, 0x69, 0x6c, 0x65, 0x49, 0x6e, 0x70, 0x75, + 0x74, 0x22, 0x29, 0x2e, 0x63, 0x6c, 0x69, 0x63, 0x6b, 0x28, 0x29, 0x3b, + 0x0a, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x64, 0x6f, 0x63, 0x75, 0x6d, + 0x65, 0x6e, 0x74, 0x2e, 0x67, 0x65, 0x74, 0x45, 0x6c, 0x65, 0x6d, 0x65, + 0x6e, 0x74, 0x42, 0x79, 0x49, 0x64, 0x28, 0x22, 0x66, 0x69, 0x6c, 0x65, + 0x49, 0x6e, 0x70, 0x75, 0x74, 0x22, 0x29, 0x2e, 0x61, 0x64, 0x64, 0x45, + 0x76, 0x65, 0x6e, 0x74, 0x4c, 0x69, 0x73, 0x74, 0x65, 0x6e, 0x65, 0x72, + 0x28, 0x22, 0x63, 0x68, 0x61, 0x6e, 0x67, 0x65, 0x22, 0x2c, 0x20, 0x66, + 0x75, 0x6e, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x20, 0x28, 0x65, 0x76, 0x65, + 0x6e, 0x74, 0x29, 0x20, 0x7b, 0x0a, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, + 0x20, 0x20, 0x63, 0x6f, 0x6e, 0x73, 0x74, 0x20, 0x73, 0x65, 0x6c, 0x65, + 0x63, 0x74, 0x65, 0x64, 0x46, 0x69, 0x6c, 0x65, 0x20, 0x3d, 0x20, 0x65, + 0x76, 0x65, 0x6e, 0x74, 0x2e, 0x74, 0x61, 0x72, 0x67, 0x65, 0x74, 0x2e, + 0x66, 0x69, 0x6c, 0x65, 0x73, 0x5b, 0x30, 0x5d, 0x3b, 0x0a, 0x20, 0x20, + 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x69, 0x66, 0x20, 0x28, 0x73, 0x65, + 0x6c, 0x65, 0x63, 0x74, 0x65, 0x64, 0x46, 0x69, 0x6c, 0x65, 0x29, 0x20, 0x7b, 0x0a, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, - 0x20, 0x20, 0x63, 0x6f, 0x6e, 0x73, 0x74, 0x20, 0x69, 0x6d, 0x61, 0x67, - 0x65, 0x5f, 0x64, 0x61, 0x74, 0x61, 0x20, 0x3d, 0x20, 0x72, 0x65, 0x61, - 0x64, 0x65, 0x72, 0x2e, 0x72, 0x65, 0x73, 0x75, 0x6c, 0x74, 0x3b, 0x0a, + 0x63, 0x6f, 0x6e, 0x73, 0x74, 0x20, 0x72, 0x65, 0x61, 0x64, 0x65, 0x72, + 0x20, 0x3d, 0x20, 0x6e, 0x65, 0x77, 0x20, 0x46, 0x69, 0x6c, 0x65, 0x52, + 0x65, 0x61, 0x64, 0x65, 0x72, 0x28, 0x29, 0x3b, 0x0a, 0x20, 0x20, 0x20, + 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x72, 0x65, 0x61, 0x64, 0x65, + 0x72, 0x2e, 0x6f, 0x6e, 0x6c, 0x6f, 0x61, 0x64, 0x20, 0x3d, 0x20, 0x66, + 0x75, 0x6e, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x20, 0x28, 0x29, 0x20, 0x7b, + 0x0a, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, + 0x20, 0x63, 0x6f, 0x6e, 0x73, 0x74, 0x20, 0x69, 0x6d, 0x61, 0x67, 0x65, + 0x5f, 0x64, 0x61, 0x74, 0x61, 0x20, 0x3d, 0x20, 0x72, 0x65, 0x61, 0x64, + 0x65, 0x72, 0x2e, 0x72, 0x65, 0x73, 0x75, 0x6c, 0x74, 0x3b, 0x0a, 0x20, + 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x73, + 0x65, 0x73, 0x73, 0x69, 0x6f, 0x6e, 0x2e, 0x76, 0x61, 0x6c, 0x75, 0x65, + 0x20, 0x3d, 0x20, 0x7b, 0x20, 0x2e, 0x2e, 0x2e, 0x73, 0x65, 0x73, 0x73, + 0x69, 0x6f, 0x6e, 0x2e, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x2c, 0x20, 0x69, + 0x6d, 0x61, 0x67, 0x65, 0x5f, 0x73, 0x65, 0x6c, 0x65, 0x63, 0x74, 0x65, + 0x64, 0x3a, 0x20, 0x69, 0x6d, 0x61, 0x67, 0x65, 0x5f, 0x64, 0x61, 0x74, + 0x61, 0x20, 0x7d, 0x3b, 0x0a, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, + 0x20, 0x20, 0x20, 0x20, 0x20, 0x70, 0x61, 0x72, 0x61, 0x6d, 0x73, 0x2e, + 0x76, 0x61, 0x6c, 0x75, 0x65, 0x20, 0x3d, 0x20, 0x7b, 0x0a, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, - 0x73, 0x65, 0x73, 0x73, 0x69, 0x6f, 0x6e, 0x2e, 0x76, 0x61, 0x6c, 0x75, - 0x65, 0x20, 0x3d, 0x20, 0x7b, 0x20, 0x2e, 0x2e, 0x2e, 0x73, 0x65, 0x73, - 0x73, 0x69, 0x6f, 0x6e, 0x2e, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x2c, 0x20, - 0x69, 0x6d, 0x61, 0x67, 0x65, 0x5f, 0x73, 0x65, 0x6c, 0x65, 0x63, 0x74, - 0x65, 0x64, 0x3a, 0x20, 0x69, 0x6d, 0x61, 0x67, 0x65, 0x5f, 0x64, 0x61, - 0x74, 0x61, 0x20, 0x7d, 0x3b, 0x0a, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, - 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x70, 0x61, 0x72, 0x61, 0x6d, 0x73, - 0x2e, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x20, 0x3d, 0x20, 0x7b, 0x0a, 0x20, + 0x2e, 0x2e, 0x2e, 0x70, 0x61, 0x72, 0x61, 0x6d, 0x73, 0x2e, 0x76, 0x61, + 0x6c, 0x75, 0x65, 0x2c, 0x20, 0x69, 0x6d, 0x61, 0x67, 0x65, 0x5f, 0x64, + 0x61, 0x74, 0x61, 0x3a, 0x20, 0x5b, 0x0a, 0x20, 0x20, 0x20, 0x20, 0x20, + 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x7b, + 0x20, 0x64, 0x61, 0x74, 0x61, 0x3a, 0x20, 0x69, 0x6d, 0x61, 0x67, 0x65, + 0x5f, 0x64, 0x61, 0x74, 0x61, 0x2e, 0x72, 0x65, 0x70, 0x6c, 0x61, 0x63, + 0x65, 0x28, 0x2f, 0x64, 0x61, 0x74, 0x61, 0x3a, 0x69, 0x6d, 0x61, 0x67, + 0x65, 0x5c, 0x2f, 0x5b, 0x5e, 0x3b, 0x5d, 0x2b, 0x3b, 0x62, 0x61, 0x73, + 0x65, 0x36, 0x34, 0x2c, 0x2f, 0x2c, 0x20, 0x27, 0x27, 0x29, 0x2c, 0x20, + 0x69, 0x64, 0x3a, 0x20, 0x31, 0x30, 0x20, 0x7d, 0x5d, 0x0a, 0x20, 0x20, + 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x7d, 0x0a, + 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x7d, 0x3b, + 0x0a, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x73, + 0x65, 0x6c, 0x65, 0x63, 0x74, 0x65, 0x64, 0x5f, 0x69, 0x6d, 0x61, 0x67, + 0x65, 0x20, 0x3d, 0x20, 0x74, 0x72, 0x75, 0x65, 0x3b, 0x0a, 0x20, 0x20, + 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x72, 0x65, 0x61, 0x64, + 0x65, 0x72, 0x2e, 0x72, 0x65, 0x61, 0x64, 0x41, 0x73, 0x44, 0x61, 0x74, + 0x61, 0x55, 0x52, 0x4c, 0x28, 0x73, 0x65, 0x6c, 0x65, 0x63, 0x74, 0x65, + 0x64, 0x46, 0x69, 0x6c, 0x65, 0x29, 0x3b, 0x0a, 0x20, 0x20, 0x20, 0x20, + 0x20, 0x20, 0x20, 0x20, 0x7d, 0x0a, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, + 0x7d, 0x29, 0x3b, 0x0a, 0x20, 0x20, 0x20, 0x20, 0x7d, 0x0a, 0x0a, 0x20, + 0x20, 0x20, 0x20, 0x66, 0x75, 0x6e, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x20, + 0x4d, 0x65, 0x73, 0x73, 0x61, 0x67, 0x65, 0x49, 0x6e, 0x70, 0x75, 0x74, + 0x28, 0x29, 0x20, 0x7b, 0x0a, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x63, + 0x6f, 0x6e, 0x73, 0x74, 0x20, 0x6d, 0x65, 0x73, 0x73, 0x61, 0x67, 0x65, + 0x20, 0x3d, 0x20, 0x75, 0x73, 0x65, 0x53, 0x69, 0x67, 0x6e, 0x61, 0x6c, + 0x28, 0x22, 0x22, 0x29, 0x0a, 0x0a, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, + 0x63, 0x6f, 0x6e, 0x73, 0x74, 0x20, 0x73, 0x75, 0x62, 0x6d, 0x69, 0x74, + 0x20, 0x3d, 0x20, 0x28, 0x65, 0x29, 0x20, 0x3d, 0x3e, 0x20, 0x7b, 0x0a, + 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x73, 0x74, 0x6f, 0x70, + 0x28, 0x65, 0x29, 0x3b, 0x0a, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, + 0x20, 0x63, 0x68, 0x61, 0x74, 0x28, 0x6d, 0x65, 0x73, 0x73, 0x61, 0x67, + 0x65, 0x2e, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x29, 0x3b, 0x0a, 0x20, 0x20, + 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x6d, 0x65, 0x73, 0x73, 0x61, 0x67, + 0x65, 0x2e, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x20, 0x3d, 0x20, 0x22, 0x22, + 0x3b, 0x0a, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x7d, 0x0a, 0x0a, 0x20, + 0x20, 0x20, 0x20, 0x20, 0x20, 0x63, 0x6f, 0x6e, 0x73, 0x74, 0x20, 0x65, + 0x6e, 0x74, 0x65, 0x72, 0x53, 0x75, 0x62, 0x6d, 0x69, 0x74, 0x73, 0x20, + 0x3d, 0x20, 0x28, 0x65, 0x76, 0x65, 0x6e, 0x74, 0x29, 0x20, 0x3d, 0x3e, + 0x20, 0x7b, 0x0a, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x69, + 0x66, 0x20, 0x28, 0x65, 0x76, 0x65, 0x6e, 0x74, 0x2e, 0x77, 0x68, 0x69, + 0x63, 0x68, 0x20, 0x3d, 0x3d, 0x3d, 0x20, 0x31, 0x33, 0x20, 0x26, 0x26, + 0x20, 0x21, 0x65, 0x76, 0x65, 0x6e, 0x74, 0x2e, 0x73, 0x68, 0x69, 0x66, + 0x74, 0x4b, 0x65, 0x79, 0x29, 0x20, 0x7b, 0x0a, 0x20, 0x20, 0x20, 0x20, + 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x73, 0x75, 0x62, 0x6d, 0x69, 0x74, + 0x28, 0x65, 0x76, 0x65, 0x6e, 0x74, 0x29, 0x3b, 0x0a, 0x20, 0x20, 0x20, + 0x20, 0x20, 0x20, 0x20, 0x20, 0x7d, 0x0a, 0x20, 0x20, 0x20, 0x20, 0x20, + 0x20, 0x7d, 0x0a, 0x0a, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x72, 0x65, + 0x74, 0x75, 0x72, 0x6e, 0x20, 0x68, 0x74, 0x6d, 0x6c, 0x60, 0x0a, 0x20, + 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x3c, 0x66, 0x6f, 0x72, 0x6d, + 0x20, 0x6f, 0x6e, 0x73, 0x75, 0x62, 0x6d, 0x69, 0x74, 0x3d, 0x24, 0x7b, + 0x73, 0x75, 0x62, 0x6d, 0x69, 0x74, 0x7d, 0x3e, 0x0a, 0x20, 0x20, 0x20, + 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x3c, 0x64, 0x69, 0x76, 0x3e, + 0x0a, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, + 0x20, 0x3c, 0x74, 0x65, 0x78, 0x74, 0x61, 0x72, 0x65, 0x61, 0x0a, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, - 0x20, 0x2e, 0x2e, 0x2e, 0x70, 0x61, 0x72, 0x61, 0x6d, 0x73, 0x2e, 0x76, - 0x61, 0x6c, 0x75, 0x65, 0x2c, 0x20, 0x69, 0x6d, 0x61, 0x67, 0x65, 0x5f, - 0x64, 0x61, 0x74, 0x61, 0x3a, 0x20, 0x5b, 0x0a, 0x20, 0x20, 0x20, 0x20, + 0x20, 0x20, 0x63, 0x6c, 0x61, 0x73, 0x73, 0x4e, 0x61, 0x6d, 0x65, 0x3d, + 0x24, 0x7b, 0x67, 0x65, 0x6e, 0x65, 0x72, 0x61, 0x74, 0x69, 0x6e, 0x67, + 0x2e, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x20, 0x3f, 0x20, 0x22, 0x6c, 0x6f, + 0x61, 0x64, 0x69, 0x6e, 0x67, 0x22, 0x20, 0x3a, 0x20, 0x6e, 0x75, 0x6c, + 0x6c, 0x7d, 0x0a, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, + 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x6f, 0x6e, 0x69, 0x6e, 0x70, 0x75, + 0x74, 0x3d, 0x24, 0x7b, 0x28, 0x65, 0x29, 0x20, 0x3d, 0x3e, 0x20, 0x6d, + 0x65, 0x73, 0x73, 0x61, 0x67, 0x65, 0x2e, 0x76, 0x61, 0x6c, 0x75, 0x65, + 0x20, 0x3d, 0x20, 0x65, 0x2e, 0x74, 0x61, 0x72, 0x67, 0x65, 0x74, 0x2e, + 0x76, 0x61, 0x6c, 0x75, 0x65, 0x7d, 0x0a, 0x20, 0x20, 0x20, 0x20, 0x20, + 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x6f, 0x6e, + 0x6b, 0x65, 0x79, 0x70, 0x72, 0x65, 0x73, 0x73, 0x3d, 0x24, 0x7b, 0x65, + 0x6e, 0x74, 0x65, 0x72, 0x53, 0x75, 0x62, 0x6d, 0x69, 0x74, 0x73, 0x7d, + 0x0a, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, + 0x20, 0x20, 0x20, 0x20, 0x70, 0x6c, 0x61, 0x63, 0x65, 0x68, 0x6f, 0x6c, + 0x64, 0x65, 0x72, 0x3d, 0x22, 0x53, 0x61, 0x79, 0x20, 0x73, 0x6f, 0x6d, + 0x65, 0x74, 0x68, 0x69, 0x6e, 0x67, 0x2e, 0x2e, 0x2e, 0x22, 0x0a, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, - 0x7b, 0x20, 0x64, 0x61, 0x74, 0x61, 0x3a, 0x20, 0x69, 0x6d, 0x61, 0x67, - 0x65, 0x5f, 0x64, 0x61, 0x74, 0x61, 0x2e, 0x72, 0x65, 0x70, 0x6c, 0x61, - 0x63, 0x65, 0x28, 0x2f, 0x64, 0x61, 0x74, 0x61, 0x3a, 0x69, 0x6d, 0x61, - 0x67, 0x65, 0x5c, 0x2f, 0x5b, 0x5e, 0x3b, 0x5d, 0x2b, 0x3b, 0x62, 0x61, - 0x73, 0x65, 0x36, 0x34, 0x2c, 0x2f, 0x2c, 0x20, 0x27, 0x27, 0x29, 0x2c, - 0x20, 0x69, 0x64, 0x3a, 0x20, 0x31, 0x30, 0x20, 0x7d, 0x5d, 0x0a, 0x20, - 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x7d, - 0x0a, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x7d, - 0x3b, 0x0a, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, - 0x73, 0x65, 0x6c, 0x65, 0x63, 0x74, 0x65, 0x64, 0x5f, 0x69, 0x6d, 0x61, - 0x67, 0x65, 0x20, 0x3d, 0x20, 0x74, 0x72, 0x75, 0x65, 0x3b, 0x0a, 0x20, - 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x72, 0x65, 0x61, - 0x64, 0x65, 0x72, 0x2e, 0x72, 0x65, 0x61, 0x64, 0x41, 0x73, 0x44, 0x61, - 0x74, 0x61, 0x55, 0x52, 0x4c, 0x28, 0x73, 0x65, 0x6c, 0x65, 0x63, 0x74, - 0x65, 0x64, 0x46, 0x69, 0x6c, 0x65, 0x29, 0x3b, 0x0a, 0x20, 0x20, 0x20, - 0x20, 0x20, 0x20, 0x20, 0x20, 0x7d, 0x0a, 0x20, 0x20, 0x20, 0x20, 0x20, - 0x20, 0x7d, 0x29, 0x3b, 0x0a, 0x20, 0x20, 0x20, 0x20, 0x7d, 0x0a, 0x0a, - 0x20, 0x20, 0x20, 0x20, 0x66, 0x75, 0x6e, 0x63, 0x74, 0x69, 0x6f, 0x6e, - 0x20, 0x4d, 0x65, 0x73, 0x73, 0x61, 0x67, 0x65, 0x49, 0x6e, 0x70, 0x75, - 0x74, 0x28, 0x29, 0x20, 0x7b, 0x0a, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, - 0x63, 0x6f, 0x6e, 0x73, 0x74, 0x20, 0x6d, 0x65, 0x73, 0x73, 0x61, 0x67, - 0x65, 0x20, 0x3d, 0x20, 0x75, 0x73, 0x65, 0x53, 0x69, 0x67, 0x6e, 0x61, - 0x6c, 0x28, 0x22, 0x22, 0x29, 0x0a, 0x0a, 0x20, 0x20, 0x20, 0x20, 0x20, - 0x20, 0x63, 0x6f, 0x6e, 0x73, 0x74, 0x20, 0x73, 0x75, 0x62, 0x6d, 0x69, - 0x74, 0x20, 0x3d, 0x20, 0x28, 0x65, 0x29, 0x20, 0x3d, 0x3e, 0x20, 0x7b, - 0x0a, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x73, 0x74, 0x6f, - 0x70, 0x28, 0x65, 0x29, 0x3b, 0x0a, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, - 0x20, 0x20, 0x63, 0x68, 0x61, 0x74, 0x28, 0x6d, 0x65, 0x73, 0x73, 0x61, - 0x67, 0x65, 0x2e, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x29, 0x3b, 0x0a, 0x20, - 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x6d, 0x65, 0x73, 0x73, 0x61, - 0x67, 0x65, 0x2e, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x20, 0x3d, 0x20, 0x22, - 0x22, 0x3b, 0x0a, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x7d, 0x0a, 0x0a, - 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x63, 0x6f, 0x6e, 0x73, 0x74, 0x20, - 0x65, 0x6e, 0x74, 0x65, 0x72, 0x53, 0x75, 0x62, 0x6d, 0x69, 0x74, 0x73, - 0x20, 0x3d, 0x20, 0x28, 0x65, 0x76, 0x65, 0x6e, 0x74, 0x29, 0x20, 0x3d, - 0x3e, 0x20, 0x7b, 0x0a, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, - 0x69, 0x66, 0x20, 0x28, 0x65, 0x76, 0x65, 0x6e, 0x74, 0x2e, 0x77, 0x68, - 0x69, 0x63, 0x68, 0x20, 0x3d, 0x3d, 0x3d, 0x20, 0x31, 0x33, 0x20, 0x26, - 0x26, 0x20, 0x21, 0x65, 0x76, 0x65, 0x6e, 0x74, 0x2e, 0x73, 0x68, 0x69, - 0x66, 0x74, 0x4b, 0x65, 0x79, 0x29, 0x20, 0x7b, 0x0a, 0x20, 0x20, 0x20, - 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x73, 0x75, 0x62, 0x6d, 0x69, - 0x74, 0x28, 0x65, 0x76, 0x65, 0x6e, 0x74, 0x29, 0x3b, 0x0a, 0x20, 0x20, - 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x7d, 0x0a, 0x20, 0x20, 0x20, 0x20, - 0x20, 0x20, 0x7d, 0x0a, 0x0a, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x72, - 0x65, 0x74, 0x75, 0x72, 0x6e, 0x20, 0x68, 0x74, 0x6d, 0x6c, 0x60, 0x0a, - 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x3c, 0x66, 0x6f, 0x72, - 0x6d, 0x20, 0x6f, 0x6e, 0x73, 0x75, 0x62, 0x6d, 0x69, 0x74, 0x3d, 0x24, - 0x7b, 0x73, 0x75, 0x62, 0x6d, 0x69, 0x74, 0x7d, 0x3e, 0x0a, 0x20, 0x20, - 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x3c, 0x64, 0x69, 0x76, - 0x3e, 0x0a, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, - 0x20, 0x20, 0x3c, 0x74, 0x65, 0x78, 0x74, 0x61, 0x72, 0x65, 0x61, 0x0a, + 0x20, 0x20, 0x72, 0x6f, 0x77, 0x73, 0x3d, 0x32, 0x0a, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, - 0x20, 0x20, 0x20, 0x63, 0x6c, 0x61, 0x73, 0x73, 0x4e, 0x61, 0x6d, 0x65, - 0x3d, 0x24, 0x7b, 0x67, 0x65, 0x6e, 0x65, 0x72, 0x61, 0x74, 0x69, 0x6e, - 0x67, 0x2e, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x20, 0x3f, 0x20, 0x22, 0x6c, - 0x6f, 0x61, 0x64, 0x69, 0x6e, 0x67, 0x22, 0x20, 0x3a, 0x20, 0x6e, 0x75, - 0x6c, 0x6c, 0x7d, 0x0a, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, - 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x6f, 0x6e, 0x69, 0x6e, 0x70, - 0x75, 0x74, 0x3d, 0x24, 0x7b, 0x28, 0x65, 0x29, 0x20, 0x3d, 0x3e, 0x20, - 0x6d, 0x65, 0x73, 0x73, 0x61, 0x67, 0x65, 0x2e, 0x76, 0x61, 0x6c, 0x75, - 0x65, 0x20, 0x3d, 0x20, 0x65, 0x2e, 0x74, 0x61, 0x72, 0x67, 0x65, 0x74, - 0x2e, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x7d, 0x0a, 0x20, 0x20, 0x20, 0x20, - 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x6f, - 0x6e, 0x6b, 0x65, 0x79, 0x70, 0x72, 0x65, 0x73, 0x73, 0x3d, 0x24, 0x7b, - 0x65, 0x6e, 0x74, 0x65, 0x72, 0x53, 0x75, 0x62, 0x6d, 0x69, 0x74, 0x73, - 0x7d, 0x0a, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, - 0x20, 0x20, 0x20, 0x20, 0x20, 0x70, 0x6c, 0x61, 0x63, 0x65, 0x68, 0x6f, - 0x6c, 0x64, 0x65, 0x72, 0x3d, 0x22, 0x53, 0x61, 0x79, 0x20, 0x73, 0x6f, - 0x6d, 0x65, 0x74, 0x68, 0x69, 0x6e, 0x67, 0x2e, 0x2e, 0x2e, 0x22, 0x0a, + 0x74, 0x79, 0x70, 0x65, 0x3d, 0x22, 0x74, 0x65, 0x78, 0x74, 0x22, 0x0a, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, - 0x20, 0x20, 0x20, 0x72, 0x6f, 0x77, 0x73, 0x3d, 0x32, 0x0a, 0x20, 0x20, + 0x20, 0x20, 0x20, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x3d, 0x22, 0x24, 0x7b, + 0x6d, 0x65, 0x73, 0x73, 0x61, 0x67, 0x65, 0x7d, 0x22, 0x0a, 0x20, 0x20, + 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x2f, 0x3e, + 0x0a, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x3c, + 0x2f, 0x64, 0x69, 0x76, 0x3e, 0x0a, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, + 0x20, 0x20, 0x20, 0x20, 0x3c, 0x64, 0x69, 0x76, 0x20, 0x63, 0x6c, 0x61, + 0x73, 0x73, 0x3d, 0x22, 0x72, 0x69, 0x67, 0x68, 0x74, 0x22, 0x3e, 0x0a, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, - 0x20, 0x74, 0x79, 0x70, 0x65, 0x3d, 0x22, 0x74, 0x65, 0x78, 0x74, 0x22, - 0x0a, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, - 0x20, 0x20, 0x20, 0x20, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x3d, 0x22, 0x24, - 0x7b, 0x6d, 0x65, 0x73, 0x73, 0x61, 0x67, 0x65, 0x7d, 0x22, 0x0a, 0x20, - 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x2f, - 0x3e, 0x0a, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, - 0x3c, 0x2f, 0x64, 0x69, 0x76, 0x3e, 0x0a, 0x20, 0x20, 0x20, 0x20, 0x20, - 0x20, 0x20, 0x20, 0x20, 0x20, 0x3c, 0x64, 0x69, 0x76, 0x20, 0x63, 0x6c, - 0x61, 0x73, 0x73, 0x3d, 0x22, 0x72, 0x69, 0x67, 0x68, 0x74, 0x22, 0x3e, - 0x0a, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, - 0x20, 0x3c, 0x62, 0x75, 0x74, 0x74, 0x6f, 0x6e, 0x20, 0x74, 0x79, 0x70, - 0x65, 0x3d, 0x22, 0x73, 0x75, 0x62, 0x6d, 0x69, 0x74, 0x22, 0x20, 0x64, - 0x69, 0x73, 0x61, 0x62, 0x6c, 0x65, 0x64, 0x3d, 0x24, 0x7b, 0x67, 0x65, + 0x3c, 0x62, 0x75, 0x74, 0x74, 0x6f, 0x6e, 0x20, 0x74, 0x79, 0x70, 0x65, + 0x3d, 0x22, 0x73, 0x75, 0x62, 0x6d, 0x69, 0x74, 0x22, 0x20, 0x64, 0x69, + 0x73, 0x61, 0x62, 0x6c, 0x65, 0x64, 0x3d, 0x24, 0x7b, 0x67, 0x65, 0x6e, + 0x65, 0x72, 0x61, 0x74, 0x69, 0x6e, 0x67, 0x2e, 0x76, 0x61, 0x6c, 0x75, + 0x65, 0x7d, 0x3e, 0x53, 0x65, 0x6e, 0x64, 0x3c, 0x2f, 0x62, 0x75, 0x74, + 0x74, 0x6f, 0x6e, 0x3e, 0x0a, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, + 0x20, 0x20, 0x20, 0x20, 0x20, 0x3c, 0x62, 0x75, 0x74, 0x74, 0x6f, 0x6e, + 0x20, 0x6f, 0x6e, 0x63, 0x6c, 0x69, 0x63, 0x6b, 0x3d, 0x24, 0x7b, 0x75, + 0x70, 0x6c, 0x6f, 0x61, 0x64, 0x49, 0x6d, 0x61, 0x67, 0x65, 0x7d, 0x3e, + 0x55, 0x70, 0x6c, 0x6f, 0x61, 0x64, 0x20, 0x49, 0x6d, 0x61, 0x67, 0x65, + 0x3c, 0x2f, 0x62, 0x75, 0x74, 0x74, 0x6f, 0x6e, 0x3e, 0x0a, 0x20, 0x20, + 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x3c, 0x62, + 0x75, 0x74, 0x74, 0x6f, 0x6e, 0x20, 0x6f, 0x6e, 0x63, 0x6c, 0x69, 0x63, + 0x6b, 0x3d, 0x24, 0x7b, 0x73, 0x74, 0x6f, 0x70, 0x7d, 0x20, 0x64, 0x69, + 0x73, 0x61, 0x62, 0x6c, 0x65, 0x64, 0x3d, 0x24, 0x7b, 0x21, 0x67, 0x65, 0x6e, 0x65, 0x72, 0x61, 0x74, 0x69, 0x6e, 0x67, 0x2e, 0x76, 0x61, 0x6c, - 0x75, 0x65, 0x7d, 0x3e, 0x53, 0x65, 0x6e, 0x64, 0x3c, 0x2f, 0x62, 0x75, + 0x75, 0x65, 0x7d, 0x3e, 0x53, 0x74, 0x6f, 0x70, 0x3c, 0x2f, 0x62, 0x75, 0x74, 0x74, 0x6f, 0x6e, 0x3e, 0x0a, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x3c, 0x62, 0x75, 0x74, 0x74, 0x6f, 0x6e, 0x20, 0x6f, 0x6e, 0x63, 0x6c, 0x69, 0x63, 0x6b, 0x3d, 0x24, 0x7b, - 0x75, 0x70, 0x6c, 0x6f, 0x61, 0x64, 0x49, 0x6d, 0x61, 0x67, 0x65, 0x7d, - 0x3e, 0x55, 0x70, 0x6c, 0x6f, 0x61, 0x64, 0x20, 0x49, 0x6d, 0x61, 0x67, - 0x65, 0x3c, 0x2f, 0x62, 0x75, 0x74, 0x74, 0x6f, 0x6e, 0x3e, 0x0a, 0x20, - 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x3c, + 0x72, 0x65, 0x73, 0x65, 0x74, 0x7d, 0x3e, 0x52, 0x65, 0x73, 0x65, 0x74, + 0x3c, 0x2f, 0x62, 0x75, 0x74, 0x74, 0x6f, 0x6e, 0x3e, 0x0a, 0x20, 0x20, + 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x3c, 0x2f, 0x64, 0x69, + 0x76, 0x3e, 0x0a, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x3c, + 0x2f, 0x66, 0x6f, 0x72, 0x6d, 0x3e, 0x0a, 0x20, 0x20, 0x20, 0x20, 0x20, + 0x20, 0x60, 0x0a, 0x20, 0x20, 0x20, 0x20, 0x7d, 0x0a, 0x0a, 0x20, 0x20, + 0x20, 0x20, 0x66, 0x75, 0x6e, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x20, 0x43, + 0x6f, 0x6d, 0x70, 0x6c, 0x65, 0x74, 0x69, 0x6f, 0x6e, 0x43, 0x6f, 0x6e, + 0x74, 0x72, 0x6f, 0x6c, 0x73, 0x28, 0x29, 0x20, 0x7b, 0x0a, 0x20, 0x20, + 0x20, 0x20, 0x20, 0x20, 0x63, 0x6f, 0x6e, 0x73, 0x74, 0x20, 0x73, 0x75, + 0x62, 0x6d, 0x69, 0x74, 0x20, 0x3d, 0x20, 0x28, 0x65, 0x29, 0x20, 0x3d, + 0x3e, 0x20, 0x7b, 0x0a, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, + 0x73, 0x74, 0x6f, 0x70, 0x28, 0x65, 0x29, 0x3b, 0x0a, 0x20, 0x20, 0x20, + 0x20, 0x20, 0x20, 0x20, 0x20, 0x72, 0x75, 0x6e, 0x43, 0x6f, 0x6d, 0x70, + 0x6c, 0x65, 0x74, 0x69, 0x6f, 0x6e, 0x28, 0x29, 0x3b, 0x0a, 0x20, 0x20, + 0x20, 0x20, 0x20, 0x20, 0x7d, 0x0a, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, + 0x72, 0x65, 0x74, 0x75, 0x72, 0x6e, 0x20, 0x68, 0x74, 0x6d, 0x6c, 0x60, + 0x0a, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x3c, 0x64, 0x69, + 0x76, 0x3e, 0x0a, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, + 0x20, 0x3c, 0x62, 0x75, 0x74, 0x74, 0x6f, 0x6e, 0x20, 0x6f, 0x6e, 0x63, + 0x6c, 0x69, 0x63, 0x6b, 0x3d, 0x24, 0x7b, 0x73, 0x75, 0x62, 0x6d, 0x69, + 0x74, 0x7d, 0x20, 0x74, 0x79, 0x70, 0x65, 0x3d, 0x22, 0x62, 0x75, 0x74, + 0x74, 0x6f, 0x6e, 0x22, 0x20, 0x64, 0x69, 0x73, 0x61, 0x62, 0x6c, 0x65, + 0x64, 0x3d, 0x24, 0x7b, 0x67, 0x65, 0x6e, 0x65, 0x72, 0x61, 0x74, 0x69, + 0x6e, 0x67, 0x2e, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x7d, 0x3e, 0x53, 0x74, + 0x61, 0x72, 0x74, 0x3c, 0x2f, 0x62, 0x75, 0x74, 0x74, 0x6f, 0x6e, 0x3e, + 0x0a, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x3c, 0x62, 0x75, 0x74, 0x74, 0x6f, 0x6e, 0x20, 0x6f, 0x6e, 0x63, 0x6c, 0x69, 0x63, 0x6b, 0x3d, 0x24, 0x7b, 0x73, 0x74, 0x6f, 0x70, 0x7d, 0x20, 0x64, 0x69, 0x73, 0x61, 0x62, 0x6c, 0x65, 0x64, 0x3d, 0x24, 0x7b, 0x21, 0x67, 0x65, 0x6e, 0x65, 0x72, 0x61, 0x74, 0x69, 0x6e, 0x67, 0x2e, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x7d, 0x3e, 0x53, 0x74, 0x6f, 0x70, 0x3c, 0x2f, 0x62, 0x75, 0x74, 0x74, 0x6f, 0x6e, 0x3e, 0x0a, 0x20, 0x20, 0x20, 0x20, 0x20, - 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x3c, 0x62, 0x75, 0x74, 0x74, - 0x6f, 0x6e, 0x20, 0x6f, 0x6e, 0x63, 0x6c, 0x69, 0x63, 0x6b, 0x3d, 0x24, - 0x7b, 0x72, 0x65, 0x73, 0x65, 0x74, 0x7d, 0x3e, 0x52, 0x65, 0x73, 0x65, - 0x74, 0x3c, 0x2f, 0x62, 0x75, 0x74, 0x74, 0x6f, 0x6e, 0x3e, 0x0a, 0x20, - 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x3c, 0x2f, 0x64, - 0x69, 0x76, 0x3e, 0x0a, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, - 0x3c, 0x2f, 0x66, 0x6f, 0x72, 0x6d, 0x3e, 0x0a, 0x20, 0x20, 0x20, 0x20, - 0x20, 0x20, 0x60, 0x0a, 0x20, 0x20, 0x20, 0x20, 0x7d, 0x0a, 0x0a, 0x20, - 0x20, 0x20, 0x20, 0x66, 0x75, 0x6e, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x20, - 0x43, 0x6f, 0x6d, 0x70, 0x6c, 0x65, 0x74, 0x69, 0x6f, 0x6e, 0x43, 0x6f, - 0x6e, 0x74, 0x72, 0x6f, 0x6c, 0x73, 0x28, 0x29, 0x20, 0x7b, 0x0a, 0x20, - 0x20, 0x20, 0x20, 0x20, 0x20, 0x63, 0x6f, 0x6e, 0x73, 0x74, 0x20, 0x73, - 0x75, 0x62, 0x6d, 0x69, 0x74, 0x20, 0x3d, 0x20, 0x28, 0x65, 0x29, 0x20, - 0x3d, 0x3e, 0x20, 0x7b, 0x0a, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, - 0x20, 0x73, 0x74, 0x6f, 0x70, 0x28, 0x65, 0x29, 0x3b, 0x0a, 0x20, 0x20, - 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x72, 0x75, 0x6e, 0x43, 0x6f, 0x6d, - 0x70, 0x6c, 0x65, 0x74, 0x69, 0x6f, 0x6e, 0x28, 0x29, 0x3b, 0x0a, 0x20, - 0x20, 0x20, 0x20, 0x20, 0x20, 0x7d, 0x0a, 0x20, 0x20, 0x20, 0x20, 0x20, - 0x20, 0x72, 0x65, 0x74, 0x75, 0x72, 0x6e, 0x20, 0x68, 0x74, 0x6d, 0x6c, - 0x60, 0x0a, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x3c, 0x64, - 0x69, 0x76, 0x3e, 0x0a, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, - 0x20, 0x20, 0x3c, 0x62, 0x75, 0x74, 0x74, 0x6f, 0x6e, 0x20, 0x6f, 0x6e, - 0x63, 0x6c, 0x69, 0x63, 0x6b, 0x3d, 0x24, 0x7b, 0x73, 0x75, 0x62, 0x6d, - 0x69, 0x74, 0x7d, 0x20, 0x74, 0x79, 0x70, 0x65, 0x3d, 0x22, 0x62, 0x75, - 0x74, 0x74, 0x6f, 0x6e, 0x22, 0x20, 0x64, 0x69, 0x73, 0x61, 0x62, 0x6c, - 0x65, 0x64, 0x3d, 0x24, 0x7b, 0x67, 0x65, 0x6e, 0x65, 0x72, 0x61, 0x74, - 0x69, 0x6e, 0x67, 0x2e, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x7d, 0x3e, 0x53, - 0x74, 0x61, 0x72, 0x74, 0x3c, 0x2f, 0x62, 0x75, 0x74, 0x74, 0x6f, 0x6e, - 0x3e, 0x0a, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, - 0x3c, 0x62, 0x75, 0x74, 0x74, 0x6f, 0x6e, 0x20, 0x6f, 0x6e, 0x63, 0x6c, - 0x69, 0x63, 0x6b, 0x3d, 0x24, 0x7b, 0x73, 0x74, 0x6f, 0x70, 0x7d, 0x20, - 0x64, 0x69, 0x73, 0x61, 0x62, 0x6c, 0x65, 0x64, 0x3d, 0x24, 0x7b, 0x21, - 0x67, 0x65, 0x6e, 0x65, 0x72, 0x61, 0x74, 0x69, 0x6e, 0x67, 0x2e, 0x76, - 0x61, 0x6c, 0x75, 0x65, 0x7d, 0x3e, 0x53, 0x74, 0x6f, 0x70, 0x3c, 0x2f, - 0x62, 0x75, 0x74, 0x74, 0x6f, 0x6e, 0x3e, 0x0a, 0x20, 0x20, 0x20, 0x20, - 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x3c, 0x62, 0x75, 0x74, 0x74, 0x6f, - 0x6e, 0x20, 0x6f, 0x6e, 0x63, 0x6c, 0x69, 0x63, 0x6b, 0x3d, 0x24, 0x7b, - 0x72, 0x65, 0x73, 0x65, 0x74, 0x7d, 0x3e, 0x52, 0x65, 0x73, 0x65, 0x74, - 0x3c, 0x2f, 0x62, 0x75, 0x74, 0x74, 0x6f, 0x6e, 0x3e, 0x0a, 0x20, 0x20, - 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x3c, 0x2f, 0x64, 0x69, 0x76, 0x3e, - 0x60, 0x3b, 0x0a, 0x20, 0x20, 0x20, 0x20, 0x7d, 0x0a, 0x0a, 0x20, 0x20, - 0x20, 0x20, 0x63, 0x6f, 0x6e, 0x73, 0x74, 0x20, 0x43, 0x68, 0x61, 0x74, - 0x4c, 0x6f, 0x67, 0x20, 0x3d, 0x20, 0x28, 0x70, 0x72, 0x6f, 0x70, 0x73, - 0x29, 0x20, 0x3d, 0x3e, 0x20, 0x7b, 0x0a, 0x20, 0x20, 0x20, 0x20, 0x20, - 0x20, 0x63, 0x6f, 0x6e, 0x73, 0x74, 0x20, 0x6d, 0x65, 0x73, 0x73, 0x61, - 0x67, 0x65, 0x73, 0x20, 0x3d, 0x20, 0x73, 0x65, 0x73, 0x73, 0x69, 0x6f, - 0x6e, 0x2e, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x2e, 0x74, 0x72, 0x61, 0x6e, - 0x73, 0x63, 0x72, 0x69, 0x70, 0x74, 0x3b, 0x0a, 0x20, 0x20, 0x20, 0x20, - 0x20, 0x20, 0x63, 0x6f, 0x6e, 0x73, 0x74, 0x20, 0x63, 0x6f, 0x6e, 0x74, - 0x61, 0x69, 0x6e, 0x65, 0x72, 0x20, 0x3d, 0x20, 0x75, 0x73, 0x65, 0x52, - 0x65, 0x66, 0x28, 0x6e, 0x75, 0x6c, 0x6c, 0x29, 0x0a, 0x0a, 0x20, 0x20, - 0x20, 0x20, 0x20, 0x20, 0x75, 0x73, 0x65, 0x45, 0x66, 0x66, 0x65, 0x63, - 0x74, 0x28, 0x28, 0x29, 0x20, 0x3d, 0x3e, 0x20, 0x7b, 0x0a, 0x20, 0x20, - 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x2f, 0x2f, 0x20, 0x73, 0x63, 0x72, - 0x6f, 0x6c, 0x6c, 0x20, 0x74, 0x6f, 0x20, 0x62, 0x6f, 0x74, 0x74, 0x6f, - 0x6d, 0x20, 0x28, 0x69, 0x66, 0x20, 0x6e, 0x65, 0x65, 0x64, 0x65, 0x64, - 0x29, 0x0a, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x63, 0x6f, - 0x6e, 0x73, 0x74, 0x20, 0x70, 0x61, 0x72, 0x65, 0x6e, 0x74, 0x20, 0x3d, - 0x20, 0x63, 0x6f, 0x6e, 0x74, 0x61, 0x69, 0x6e, 0x65, 0x72, 0x2e, 0x63, - 0x75, 0x72, 0x72, 0x65, 0x6e, 0x74, 0x2e, 0x70, 0x61, 0x72, 0x65, 0x6e, - 0x74, 0x45, 0x6c, 0x65, 0x6d, 0x65, 0x6e, 0x74, 0x3b, 0x0a, 0x20, 0x20, - 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x69, 0x66, 0x20, 0x28, 0x70, 0x61, - 0x72, 0x65, 0x6e, 0x74, 0x20, 0x26, 0x26, 0x20, 0x70, 0x61, 0x72, 0x65, - 0x6e, 0x74, 0x2e, 0x73, 0x63, 0x72, 0x6f, 0x6c, 0x6c, 0x48, 0x65, 0x69, - 0x67, 0x68, 0x74, 0x20, 0x3c, 0x3d, 0x20, 0x70, 0x61, 0x72, 0x65, 0x6e, - 0x74, 0x2e, 0x73, 0x63, 0x72, 0x6f, 0x6c, 0x6c, 0x54, 0x6f, 0x70, 0x20, - 0x2b, 0x20, 0x70, 0x61, 0x72, 0x65, 0x6e, 0x74, 0x2e, 0x6f, 0x66, 0x66, - 0x73, 0x65, 0x74, 0x48, 0x65, 0x69, 0x67, 0x68, 0x74, 0x20, 0x2b, 0x20, - 0x33, 0x30, 0x30, 0x29, 0x20, 0x7b, 0x0a, 0x20, 0x20, 0x20, 0x20, 0x20, - 0x20, 0x20, 0x20, 0x20, 0x20, 0x70, 0x61, 0x72, 0x65, 0x6e, 0x74, 0x2e, - 0x73, 0x63, 0x72, 0x6f, 0x6c, 0x6c, 0x54, 0x6f, 0x28, 0x30, 0x2c, 0x20, - 0x70, 0x61, 0x72, 0x65, 0x6e, 0x74, 0x2e, 0x73, 0x63, 0x72, 0x6f, 0x6c, - 0x6c, 0x48, 0x65, 0x69, 0x67, 0x68, 0x74, 0x29, 0x0a, 0x20, 0x20, 0x20, - 0x20, 0x20, 0x20, 0x20, 0x20, 0x7d, 0x0a, 0x20, 0x20, 0x20, 0x20, 0x20, - 0x20, 0x7d, 0x2c, 0x20, 0x5b, 0x6d, 0x65, 0x73, 0x73, 0x61, 0x67, 0x65, - 0x73, 0x5d, 0x29, 0x0a, 0x0a, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x63, - 0x6f, 0x6e, 0x73, 0x74, 0x20, 0x63, 0x68, 0x61, 0x74, 0x4c, 0x69, 0x6e, - 0x65, 0x20, 0x3d, 0x20, 0x28, 0x5b, 0x75, 0x73, 0x65, 0x72, 0x2c, 0x20, - 0x64, 0x61, 0x74, 0x61, 0x5d, 0x2c, 0x20, 0x69, 0x6e, 0x64, 0x65, 0x78, - 0x29, 0x20, 0x3d, 0x3e, 0x20, 0x7b, 0x0a, 0x20, 0x20, 0x20, 0x20, 0x20, - 0x20, 0x20, 0x20, 0x6c, 0x65, 0x74, 0x20, 0x6d, 0x65, 0x73, 0x73, 0x61, - 0x67, 0x65, 0x0a, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x63, - 0x6f, 0x6e, 0x73, 0x74, 0x20, 0x69, 0x73, 0x41, 0x72, 0x72, 0x61, 0x79, - 0x4d, 0x65, 0x73, 0x73, 0x61, 0x67, 0x65, 0x20, 0x3d, 0x20, 0x41, 0x72, - 0x72, 0x61, 0x79, 0x2e, 0x69, 0x73, 0x41, 0x72, 0x72, 0x61, 0x79, 0x28, - 0x64, 0x61, 0x74, 0x61, 0x29, 0x0a, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, - 0x20, 0x20, 0x69, 0x66, 0x20, 0x28, 0x70, 0x61, 0x72, 0x61, 0x6d, 0x73, - 0x2e, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x2e, 0x6e, 0x5f, 0x70, 0x72, 0x6f, - 0x62, 0x73, 0x20, 0x3e, 0x20, 0x30, 0x20, 0x26, 0x26, 0x20, 0x69, 0x73, - 0x41, 0x72, 0x72, 0x61, 0x79, 0x4d, 0x65, 0x73, 0x73, 0x61, 0x67, 0x65, - 0x29, 0x20, 0x7b, 0x0a, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, - 0x20, 0x20, 0x6d, 0x65, 0x73, 0x73, 0x61, 0x67, 0x65, 0x20, 0x3d, 0x20, - 0x68, 0x74, 0x6d, 0x6c, 0x60, 0x3c, 0x24, 0x7b, 0x50, 0x72, 0x6f, 0x62, - 0x61, 0x62, 0x69, 0x6c, 0x69, 0x74, 0x69, 0x65, 0x73, 0x7d, 0x20, 0x64, - 0x61, 0x74, 0x61, 0x3d, 0x24, 0x7b, 0x64, 0x61, 0x74, 0x61, 0x7d, 0x20, - 0x2f, 0x3e, 0x60, 0x0a, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, - 0x7d, 0x20, 0x65, 0x6c, 0x73, 0x65, 0x20, 0x7b, 0x0a, 0x20, 0x20, 0x20, - 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x63, 0x6f, 0x6e, 0x73, 0x74, - 0x20, 0x74, 0x65, 0x78, 0x74, 0x20, 0x3d, 0x20, 0x69, 0x73, 0x41, 0x72, - 0x72, 0x61, 0x79, 0x4d, 0x65, 0x73, 0x73, 0x61, 0x67, 0x65, 0x20, 0x3f, - 0x0a, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, - 0x20, 0x64, 0x61, 0x74, 0x61, 0x2e, 0x6d, 0x61, 0x70, 0x28, 0x6d, 0x73, - 0x67, 0x20, 0x3d, 0x3e, 0x20, 0x6d, 0x73, 0x67, 0x2e, 0x63, 0x6f, 0x6e, - 0x74, 0x65, 0x6e, 0x74, 0x29, 0x2e, 0x6a, 0x6f, 0x69, 0x6e, 0x28, 0x27, - 0x27, 0x29, 0x2e, 0x72, 0x65, 0x70, 0x6c, 0x61, 0x63, 0x65, 0x28, 0x2f, - 0x5e, 0x5c, 0x73, 0x2b, 0x2f, 0x2c, 0x20, 0x27, 0x27, 0x29, 0x20, 0x3a, - 0x0a, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, - 0x20, 0x64, 0x61, 0x74, 0x61, 0x3b, 0x0a, 0x20, 0x20, 0x20, 0x20, 0x20, - 0x20, 0x20, 0x20, 0x20, 0x20, 0x6d, 0x65, 0x73, 0x73, 0x61, 0x67, 0x65, - 0x20, 0x3d, 0x20, 0x68, 0x74, 0x6d, 0x6c, 0x60, 0x3c, 0x24, 0x7b, 0x4d, - 0x61, 0x72, 0x6b, 0x64, 0x6f, 0x77, 0x6e, 0x69, 0x73, 0x68, 0x7d, 0x20, - 0x74, 0x65, 0x78, 0x74, 0x3d, 0x24, 0x7b, 0x74, 0x65, 0x6d, 0x70, 0x6c, - 0x61, 0x74, 0x65, 0x28, 0x74, 0x65, 0x78, 0x74, 0x29, 0x7d, 0x20, 0x2f, - 0x3e, 0x60, 0x0a, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x7d, - 0x0a, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x69, 0x66, 0x20, - 0x28, 0x75, 0x73, 0x65, 0x72, 0x29, 0x20, 0x7b, 0x0a, 0x20, 0x20, 0x20, - 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x72, 0x65, 0x74, 0x75, 0x72, - 0x6e, 0x20, 0x68, 0x74, 0x6d, 0x6c, 0x60, 0x3c, 0x70, 0x20, 0x6b, 0x65, - 0x79, 0x3d, 0x24, 0x7b, 0x69, 0x6e, 0x64, 0x65, 0x78, 0x7d, 0x3e, 0x3c, - 0x73, 0x74, 0x72, 0x6f, 0x6e, 0x67, 0x3e, 0x24, 0x7b, 0x74, 0x65, 0x6d, - 0x70, 0x6c, 0x61, 0x74, 0x65, 0x28, 0x75, 0x73, 0x65, 0x72, 0x29, 0x7d, - 0x3a, 0x3c, 0x2f, 0x73, 0x74, 0x72, 0x6f, 0x6e, 0x67, 0x3e, 0x20, 0x24, - 0x7b, 0x6d, 0x65, 0x73, 0x73, 0x61, 0x67, 0x65, 0x7d, 0x3c, 0x2f, 0x70, + 0x20, 0x20, 0x20, 0x20, 0x20, 0x3c, 0x62, 0x75, 0x74, 0x74, 0x6f, 0x6e, + 0x20, 0x6f, 0x6e, 0x63, 0x6c, 0x69, 0x63, 0x6b, 0x3d, 0x24, 0x7b, 0x72, + 0x65, 0x73, 0x65, 0x74, 0x7d, 0x3e, 0x52, 0x65, 0x73, 0x65, 0x74, 0x3c, + 0x2f, 0x62, 0x75, 0x74, 0x74, 0x6f, 0x6e, 0x3e, 0x0a, 0x20, 0x20, 0x20, + 0x20, 0x20, 0x20, 0x20, 0x20, 0x3c, 0x2f, 0x64, 0x69, 0x76, 0x3e, 0x60, + 0x3b, 0x0a, 0x20, 0x20, 0x20, 0x20, 0x7d, 0x0a, 0x0a, 0x20, 0x20, 0x20, + 0x20, 0x63, 0x6f, 0x6e, 0x73, 0x74, 0x20, 0x43, 0x68, 0x61, 0x74, 0x4c, + 0x6f, 0x67, 0x20, 0x3d, 0x20, 0x28, 0x70, 0x72, 0x6f, 0x70, 0x73, 0x29, + 0x20, 0x3d, 0x3e, 0x20, 0x7b, 0x0a, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, + 0x63, 0x6f, 0x6e, 0x73, 0x74, 0x20, 0x6d, 0x65, 0x73, 0x73, 0x61, 0x67, + 0x65, 0x73, 0x20, 0x3d, 0x20, 0x73, 0x65, 0x73, 0x73, 0x69, 0x6f, 0x6e, + 0x2e, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x2e, 0x74, 0x72, 0x61, 0x6e, 0x73, + 0x63, 0x72, 0x69, 0x70, 0x74, 0x3b, 0x0a, 0x20, 0x20, 0x20, 0x20, 0x20, + 0x20, 0x63, 0x6f, 0x6e, 0x73, 0x74, 0x20, 0x63, 0x6f, 0x6e, 0x74, 0x61, + 0x69, 0x6e, 0x65, 0x72, 0x20, 0x3d, 0x20, 0x75, 0x73, 0x65, 0x52, 0x65, + 0x66, 0x28, 0x6e, 0x75, 0x6c, 0x6c, 0x29, 0x0a, 0x0a, 0x20, 0x20, 0x20, + 0x20, 0x20, 0x20, 0x75, 0x73, 0x65, 0x45, 0x66, 0x66, 0x65, 0x63, 0x74, + 0x28, 0x28, 0x29, 0x20, 0x3d, 0x3e, 0x20, 0x7b, 0x0a, 0x20, 0x20, 0x20, + 0x20, 0x20, 0x20, 0x20, 0x20, 0x2f, 0x2f, 0x20, 0x73, 0x63, 0x72, 0x6f, + 0x6c, 0x6c, 0x20, 0x74, 0x6f, 0x20, 0x62, 0x6f, 0x74, 0x74, 0x6f, 0x6d, + 0x20, 0x28, 0x69, 0x66, 0x20, 0x6e, 0x65, 0x65, 0x64, 0x65, 0x64, 0x29, + 0x0a, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x63, 0x6f, 0x6e, + 0x73, 0x74, 0x20, 0x70, 0x61, 0x72, 0x65, 0x6e, 0x74, 0x20, 0x3d, 0x20, + 0x63, 0x6f, 0x6e, 0x74, 0x61, 0x69, 0x6e, 0x65, 0x72, 0x2e, 0x63, 0x75, + 0x72, 0x72, 0x65, 0x6e, 0x74, 0x2e, 0x70, 0x61, 0x72, 0x65, 0x6e, 0x74, + 0x45, 0x6c, 0x65, 0x6d, 0x65, 0x6e, 0x74, 0x3b, 0x0a, 0x20, 0x20, 0x20, + 0x20, 0x20, 0x20, 0x20, 0x20, 0x69, 0x66, 0x20, 0x28, 0x70, 0x61, 0x72, + 0x65, 0x6e, 0x74, 0x20, 0x26, 0x26, 0x20, 0x70, 0x61, 0x72, 0x65, 0x6e, + 0x74, 0x2e, 0x73, 0x63, 0x72, 0x6f, 0x6c, 0x6c, 0x48, 0x65, 0x69, 0x67, + 0x68, 0x74, 0x20, 0x3c, 0x3d, 0x20, 0x70, 0x61, 0x72, 0x65, 0x6e, 0x74, + 0x2e, 0x73, 0x63, 0x72, 0x6f, 0x6c, 0x6c, 0x54, 0x6f, 0x70, 0x20, 0x2b, + 0x20, 0x70, 0x61, 0x72, 0x65, 0x6e, 0x74, 0x2e, 0x6f, 0x66, 0x66, 0x73, + 0x65, 0x74, 0x48, 0x65, 0x69, 0x67, 0x68, 0x74, 0x20, 0x2b, 0x20, 0x33, + 0x30, 0x30, 0x29, 0x20, 0x7b, 0x0a, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, + 0x20, 0x20, 0x20, 0x20, 0x70, 0x61, 0x72, 0x65, 0x6e, 0x74, 0x2e, 0x73, + 0x63, 0x72, 0x6f, 0x6c, 0x6c, 0x54, 0x6f, 0x28, 0x30, 0x2c, 0x20, 0x70, + 0x61, 0x72, 0x65, 0x6e, 0x74, 0x2e, 0x73, 0x63, 0x72, 0x6f, 0x6c, 0x6c, + 0x48, 0x65, 0x69, 0x67, 0x68, 0x74, 0x29, 0x0a, 0x20, 0x20, 0x20, 0x20, + 0x20, 0x20, 0x20, 0x20, 0x7d, 0x0a, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, + 0x7d, 0x2c, 0x20, 0x5b, 0x6d, 0x65, 0x73, 0x73, 0x61, 0x67, 0x65, 0x73, + 0x5d, 0x29, 0x0a, 0x0a, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x63, 0x6f, + 0x6e, 0x73, 0x74, 0x20, 0x63, 0x68, 0x61, 0x74, 0x4c, 0x69, 0x6e, 0x65, + 0x20, 0x3d, 0x20, 0x28, 0x5b, 0x75, 0x73, 0x65, 0x72, 0x2c, 0x20, 0x64, + 0x61, 0x74, 0x61, 0x5d, 0x2c, 0x20, 0x69, 0x6e, 0x64, 0x65, 0x78, 0x29, + 0x20, 0x3d, 0x3e, 0x20, 0x7b, 0x0a, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, + 0x20, 0x20, 0x6c, 0x65, 0x74, 0x20, 0x6d, 0x65, 0x73, 0x73, 0x61, 0x67, + 0x65, 0x0a, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x63, 0x6f, + 0x6e, 0x73, 0x74, 0x20, 0x69, 0x73, 0x41, 0x72, 0x72, 0x61, 0x79, 0x4d, + 0x65, 0x73, 0x73, 0x61, 0x67, 0x65, 0x20, 0x3d, 0x20, 0x41, 0x72, 0x72, + 0x61, 0x79, 0x2e, 0x69, 0x73, 0x41, 0x72, 0x72, 0x61, 0x79, 0x28, 0x64, + 0x61, 0x74, 0x61, 0x29, 0x0a, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, + 0x20, 0x69, 0x66, 0x20, 0x28, 0x70, 0x61, 0x72, 0x61, 0x6d, 0x73, 0x2e, + 0x76, 0x61, 0x6c, 0x75, 0x65, 0x2e, 0x6e, 0x5f, 0x70, 0x72, 0x6f, 0x62, + 0x73, 0x20, 0x3e, 0x20, 0x30, 0x20, 0x26, 0x26, 0x20, 0x69, 0x73, 0x41, + 0x72, 0x72, 0x61, 0x79, 0x4d, 0x65, 0x73, 0x73, 0x61, 0x67, 0x65, 0x29, + 0x20, 0x7b, 0x0a, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, + 0x20, 0x6d, 0x65, 0x73, 0x73, 0x61, 0x67, 0x65, 0x20, 0x3d, 0x20, 0x68, + 0x74, 0x6d, 0x6c, 0x60, 0x3c, 0x24, 0x7b, 0x50, 0x72, 0x6f, 0x62, 0x61, + 0x62, 0x69, 0x6c, 0x69, 0x74, 0x69, 0x65, 0x73, 0x7d, 0x20, 0x64, 0x61, + 0x74, 0x61, 0x3d, 0x24, 0x7b, 0x64, 0x61, 0x74, 0x61, 0x7d, 0x20, 0x2f, 0x3e, 0x60, 0x0a, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x7d, 0x20, 0x65, 0x6c, 0x73, 0x65, 0x20, 0x7b, 0x0a, 0x20, 0x20, 0x20, 0x20, + 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x63, 0x6f, 0x6e, 0x73, 0x74, 0x20, + 0x74, 0x65, 0x78, 0x74, 0x20, 0x3d, 0x20, 0x69, 0x73, 0x41, 0x72, 0x72, + 0x61, 0x79, 0x4d, 0x65, 0x73, 0x73, 0x61, 0x67, 0x65, 0x20, 0x3f, 0x0a, + 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, + 0x64, 0x61, 0x74, 0x61, 0x2e, 0x6d, 0x61, 0x70, 0x28, 0x6d, 0x73, 0x67, + 0x20, 0x3d, 0x3e, 0x20, 0x6d, 0x73, 0x67, 0x2e, 0x63, 0x6f, 0x6e, 0x74, + 0x65, 0x6e, 0x74, 0x29, 0x2e, 0x6a, 0x6f, 0x69, 0x6e, 0x28, 0x27, 0x27, + 0x29, 0x2e, 0x72, 0x65, 0x70, 0x6c, 0x61, 0x63, 0x65, 0x28, 0x2f, 0x5e, + 0x5c, 0x73, 0x2b, 0x2f, 0x2c, 0x20, 0x27, 0x27, 0x29, 0x20, 0x3a, 0x0a, + 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, + 0x64, 0x61, 0x74, 0x61, 0x3b, 0x0a, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, + 0x20, 0x20, 0x20, 0x20, 0x6d, 0x65, 0x73, 0x73, 0x61, 0x67, 0x65, 0x20, + 0x3d, 0x20, 0x68, 0x74, 0x6d, 0x6c, 0x60, 0x3c, 0x24, 0x7b, 0x4d, 0x61, + 0x72, 0x6b, 0x64, 0x6f, 0x77, 0x6e, 0x69, 0x73, 0x68, 0x7d, 0x20, 0x74, + 0x65, 0x78, 0x74, 0x3d, 0x24, 0x7b, 0x74, 0x65, 0x6d, 0x70, 0x6c, 0x61, + 0x74, 0x65, 0x28, 0x74, 0x65, 0x78, 0x74, 0x29, 0x7d, 0x20, 0x2f, 0x3e, + 0x60, 0x0a, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x7d, 0x0a, + 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x69, 0x66, 0x20, 0x28, + 0x75, 0x73, 0x65, 0x72, 0x29, 0x20, 0x7b, 0x0a, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x72, 0x65, 0x74, 0x75, 0x72, 0x6e, 0x20, 0x68, 0x74, 0x6d, 0x6c, 0x60, 0x3c, 0x70, 0x20, 0x6b, 0x65, 0x79, - 0x3d, 0x24, 0x7b, 0x69, 0x6e, 0x64, 0x65, 0x78, 0x7d, 0x3e, 0x24, 0x7b, + 0x3d, 0x24, 0x7b, 0x69, 0x6e, 0x64, 0x65, 0x78, 0x7d, 0x3e, 0x3c, 0x73, + 0x74, 0x72, 0x6f, 0x6e, 0x67, 0x3e, 0x24, 0x7b, 0x74, 0x65, 0x6d, 0x70, + 0x6c, 0x61, 0x74, 0x65, 0x28, 0x75, 0x73, 0x65, 0x72, 0x29, 0x7d, 0x3a, + 0x3c, 0x2f, 0x73, 0x74, 0x72, 0x6f, 0x6e, 0x67, 0x3e, 0x20, 0x24, 0x7b, 0x6d, 0x65, 0x73, 0x73, 0x61, 0x67, 0x65, 0x7d, 0x3c, 0x2f, 0x70, 0x3e, - 0x60, 0x0a, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x7d, 0x0a, - 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x7d, 0x3b, 0x0a, 0x0a, 0x20, 0x20, - 0x20, 0x20, 0x20, 0x20, 0x72, 0x65, 0x74, 0x75, 0x72, 0x6e, 0x20, 0x68, - 0x74, 0x6d, 0x6c, 0x60, 0x0a, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, - 0x20, 0x3c, 0x73, 0x65, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x20, 0x69, 0x64, - 0x3d, 0x22, 0x63, 0x68, 0x61, 0x74, 0x22, 0x20, 0x72, 0x65, 0x66, 0x3d, - 0x24, 0x7b, 0x63, 0x6f, 0x6e, 0x74, 0x61, 0x69, 0x6e, 0x65, 0x72, 0x7d, - 0x3e, 0x0a, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, - 0x3c, 0x69, 0x6d, 0x67, 0x20, 0x73, 0x74, 0x79, 0x6c, 0x65, 0x3d, 0x22, - 0x77, 0x69, 0x64, 0x74, 0x68, 0x3a, 0x20, 0x36, 0x30, 0x25, 0x3b, 0x24, - 0x7b, 0x21, 0x73, 0x65, 0x73, 0x73, 0x69, 0x6f, 0x6e, 0x2e, 0x76, 0x61, - 0x6c, 0x75, 0x65, 0x2e, 0x69, 0x6d, 0x61, 0x67, 0x65, 0x5f, 0x73, 0x65, - 0x6c, 0x65, 0x63, 0x74, 0x65, 0x64, 0x20, 0x3f, 0x20, 0x60, 0x64, 0x69, - 0x73, 0x70, 0x6c, 0x61, 0x79, 0x3a, 0x20, 0x6e, 0x6f, 0x6e, 0x65, 0x3b, - 0x60, 0x20, 0x3a, 0x20, 0x60, 0x60, 0x7d, 0x22, 0x20, 0x73, 0x72, 0x63, - 0x3d, 0x22, 0x24, 0x7b, 0x73, 0x65, 0x73, 0x73, 0x69, 0x6f, 0x6e, 0x2e, - 0x76, 0x61, 0x6c, 0x75, 0x65, 0x2e, 0x69, 0x6d, 0x61, 0x67, 0x65, 0x5f, - 0x73, 0x65, 0x6c, 0x65, 0x63, 0x74, 0x65, 0x64, 0x7d, 0x22, 0x2f, 0x3e, - 0x0a, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x24, - 0x7b, 0x6d, 0x65, 0x73, 0x73, 0x61, 0x67, 0x65, 0x73, 0x2e, 0x66, 0x6c, - 0x61, 0x74, 0x4d, 0x61, 0x70, 0x28, 0x63, 0x68, 0x61, 0x74, 0x4c, 0x69, - 0x6e, 0x65, 0x29, 0x7d, 0x0a, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, - 0x20, 0x3c, 0x2f, 0x73, 0x65, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x3e, 0x60, - 0x3b, 0x0a, 0x20, 0x20, 0x20, 0x20, 0x7d, 0x3b, 0x0a, 0x0a, 0x20, 0x20, - 0x20, 0x20, 0x63, 0x6f, 0x6e, 0x73, 0x74, 0x20, 0x43, 0x6f, 0x6e, 0x66, - 0x69, 0x67, 0x46, 0x6f, 0x72, 0x6d, 0x20, 0x3d, 0x20, 0x28, 0x70, 0x72, - 0x6f, 0x70, 0x73, 0x29, 0x20, 0x3d, 0x3e, 0x20, 0x7b, 0x0a, 0x20, 0x20, - 0x20, 0x20, 0x20, 0x20, 0x63, 0x6f, 0x6e, 0x73, 0x74, 0x20, 0x75, 0x70, - 0x64, 0x61, 0x74, 0x65, 0x53, 0x65, 0x73, 0x73, 0x69, 0x6f, 0x6e, 0x20, - 0x3d, 0x20, 0x28, 0x65, 0x6c, 0x29, 0x20, 0x3d, 0x3e, 0x20, 0x73, 0x65, - 0x73, 0x73, 0x69, 0x6f, 0x6e, 0x2e, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x20, - 0x3d, 0x20, 0x7b, 0x20, 0x2e, 0x2e, 0x2e, 0x73, 0x65, 0x73, 0x73, 0x69, - 0x6f, 0x6e, 0x2e, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x2c, 0x20, 0x5b, 0x65, - 0x6c, 0x2e, 0x74, 0x61, 0x72, 0x67, 0x65, 0x74, 0x2e, 0x6e, 0x61, 0x6d, - 0x65, 0x5d, 0x3a, 0x20, 0x65, 0x6c, 0x2e, 0x74, 0x61, 0x72, 0x67, 0x65, - 0x74, 0x2e, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x20, 0x7d, 0x0a, 0x20, 0x20, - 0x20, 0x20, 0x20, 0x20, 0x63, 0x6f, 0x6e, 0x73, 0x74, 0x20, 0x75, 0x70, - 0x64, 0x61, 0x74, 0x65, 0x50, 0x61, 0x72, 0x61, 0x6d, 0x73, 0x20, 0x3d, - 0x20, 0x28, 0x65, 0x6c, 0x29, 0x20, 0x3d, 0x3e, 0x20, 0x70, 0x61, 0x72, - 0x61, 0x6d, 0x73, 0x2e, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x20, 0x3d, 0x20, - 0x7b, 0x20, 0x2e, 0x2e, 0x2e, 0x70, 0x61, 0x72, 0x61, 0x6d, 0x73, 0x2e, - 0x76, 0x61, 0x6c, 0x75, 0x65, 0x2c, 0x20, 0x5b, 0x65, 0x6c, 0x2e, 0x74, - 0x61, 0x72, 0x67, 0x65, 0x74, 0x2e, 0x6e, 0x61, 0x6d, 0x65, 0x5d, 0x3a, - 0x20, 0x65, 0x6c, 0x2e, 0x74, 0x61, 0x72, 0x67, 0x65, 0x74, 0x2e, 0x76, - 0x61, 0x6c, 0x75, 0x65, 0x20, 0x7d, 0x0a, 0x20, 0x20, 0x20, 0x20, 0x20, - 0x20, 0x63, 0x6f, 0x6e, 0x73, 0x74, 0x20, 0x75, 0x70, 0x64, 0x61, 0x74, - 0x65, 0x50, 0x61, 0x72, 0x61, 0x6d, 0x73, 0x46, 0x6c, 0x6f, 0x61, 0x74, - 0x20, 0x3d, 0x20, 0x28, 0x65, 0x6c, 0x29, 0x20, 0x3d, 0x3e, 0x20, 0x70, - 0x61, 0x72, 0x61, 0x6d, 0x73, 0x2e, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x20, - 0x3d, 0x20, 0x7b, 0x20, 0x2e, 0x2e, 0x2e, 0x70, 0x61, 0x72, 0x61, 0x6d, - 0x73, 0x2e, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x2c, 0x20, 0x5b, 0x65, 0x6c, - 0x2e, 0x74, 0x61, 0x72, 0x67, 0x65, 0x74, 0x2e, 0x6e, 0x61, 0x6d, 0x65, - 0x5d, 0x3a, 0x20, 0x70, 0x61, 0x72, 0x73, 0x65, 0x46, 0x6c, 0x6f, 0x61, - 0x74, 0x28, 0x65, 0x6c, 0x2e, 0x74, 0x61, 0x72, 0x67, 0x65, 0x74, 0x2e, - 0x76, 0x61, 0x6c, 0x75, 0x65, 0x29, 0x20, 0x7d, 0x0a, 0x20, 0x20, 0x20, + 0x60, 0x0a, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x7d, 0x20, + 0x65, 0x6c, 0x73, 0x65, 0x20, 0x7b, 0x0a, 0x20, 0x20, 0x20, 0x20, 0x20, + 0x20, 0x20, 0x20, 0x20, 0x20, 0x72, 0x65, 0x74, 0x75, 0x72, 0x6e, 0x20, + 0x68, 0x74, 0x6d, 0x6c, 0x60, 0x3c, 0x70, 0x20, 0x6b, 0x65, 0x79, 0x3d, + 0x24, 0x7b, 0x69, 0x6e, 0x64, 0x65, 0x78, 0x7d, 0x3e, 0x24, 0x7b, 0x6d, + 0x65, 0x73, 0x73, 0x61, 0x67, 0x65, 0x7d, 0x3c, 0x2f, 0x70, 0x3e, 0x60, + 0x0a, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x7d, 0x0a, 0x20, + 0x20, 0x20, 0x20, 0x20, 0x20, 0x7d, 0x3b, 0x0a, 0x0a, 0x20, 0x20, 0x20, + 0x20, 0x20, 0x20, 0x72, 0x65, 0x74, 0x75, 0x72, 0x6e, 0x20, 0x68, 0x74, + 0x6d, 0x6c, 0x60, 0x0a, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, + 0x3c, 0x73, 0x65, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x20, 0x69, 0x64, 0x3d, + 0x22, 0x63, 0x68, 0x61, 0x74, 0x22, 0x20, 0x72, 0x65, 0x66, 0x3d, 0x24, + 0x7b, 0x63, 0x6f, 0x6e, 0x74, 0x61, 0x69, 0x6e, 0x65, 0x72, 0x7d, 0x3e, + 0x0a, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x3c, + 0x69, 0x6d, 0x67, 0x20, 0x73, 0x74, 0x79, 0x6c, 0x65, 0x3d, 0x22, 0x77, + 0x69, 0x64, 0x74, 0x68, 0x3a, 0x20, 0x36, 0x30, 0x25, 0x3b, 0x24, 0x7b, + 0x21, 0x73, 0x65, 0x73, 0x73, 0x69, 0x6f, 0x6e, 0x2e, 0x76, 0x61, 0x6c, + 0x75, 0x65, 0x2e, 0x69, 0x6d, 0x61, 0x67, 0x65, 0x5f, 0x73, 0x65, 0x6c, + 0x65, 0x63, 0x74, 0x65, 0x64, 0x20, 0x3f, 0x20, 0x60, 0x64, 0x69, 0x73, + 0x70, 0x6c, 0x61, 0x79, 0x3a, 0x20, 0x6e, 0x6f, 0x6e, 0x65, 0x3b, 0x60, + 0x20, 0x3a, 0x20, 0x60, 0x60, 0x7d, 0x22, 0x20, 0x73, 0x72, 0x63, 0x3d, + 0x22, 0x24, 0x7b, 0x73, 0x65, 0x73, 0x73, 0x69, 0x6f, 0x6e, 0x2e, 0x76, + 0x61, 0x6c, 0x75, 0x65, 0x2e, 0x69, 0x6d, 0x61, 0x67, 0x65, 0x5f, 0x73, + 0x65, 0x6c, 0x65, 0x63, 0x74, 0x65, 0x64, 0x7d, 0x22, 0x2f, 0x3e, 0x0a, + 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x24, 0x7b, + 0x6d, 0x65, 0x73, 0x73, 0x61, 0x67, 0x65, 0x73, 0x2e, 0x66, 0x6c, 0x61, + 0x74, 0x4d, 0x61, 0x70, 0x28, 0x63, 0x68, 0x61, 0x74, 0x4c, 0x69, 0x6e, + 0x65, 0x29, 0x7d, 0x0a, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, + 0x3c, 0x2f, 0x73, 0x65, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x3e, 0x60, 0x3b, + 0x0a, 0x20, 0x20, 0x20, 0x20, 0x7d, 0x3b, 0x0a, 0x0a, 0x20, 0x20, 0x20, + 0x20, 0x63, 0x6f, 0x6e, 0x73, 0x74, 0x20, 0x43, 0x6f, 0x6e, 0x66, 0x69, + 0x67, 0x46, 0x6f, 0x72, 0x6d, 0x20, 0x3d, 0x20, 0x28, 0x70, 0x72, 0x6f, + 0x70, 0x73, 0x29, 0x20, 0x3d, 0x3e, 0x20, 0x7b, 0x0a, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x63, 0x6f, 0x6e, 0x73, 0x74, 0x20, 0x75, 0x70, 0x64, - 0x61, 0x74, 0x65, 0x50, 0x61, 0x72, 0x61, 0x6d, 0x73, 0x49, 0x6e, 0x74, - 0x20, 0x3d, 0x20, 0x28, 0x65, 0x6c, 0x29, 0x20, 0x3d, 0x3e, 0x20, 0x70, - 0x61, 0x72, 0x61, 0x6d, 0x73, 0x2e, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x20, - 0x3d, 0x20, 0x7b, 0x20, 0x2e, 0x2e, 0x2e, 0x70, 0x61, 0x72, 0x61, 0x6d, - 0x73, 0x2e, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x2c, 0x20, 0x5b, 0x65, 0x6c, + 0x61, 0x74, 0x65, 0x53, 0x65, 0x73, 0x73, 0x69, 0x6f, 0x6e, 0x20, 0x3d, + 0x20, 0x28, 0x65, 0x6c, 0x29, 0x20, 0x3d, 0x3e, 0x20, 0x73, 0x65, 0x73, + 0x73, 0x69, 0x6f, 0x6e, 0x2e, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x20, 0x3d, + 0x20, 0x7b, 0x20, 0x2e, 0x2e, 0x2e, 0x73, 0x65, 0x73, 0x73, 0x69, 0x6f, + 0x6e, 0x2e, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x2c, 0x20, 0x5b, 0x65, 0x6c, 0x2e, 0x74, 0x61, 0x72, 0x67, 0x65, 0x74, 0x2e, 0x6e, 0x61, 0x6d, 0x65, - 0x5d, 0x3a, 0x20, 0x4d, 0x61, 0x74, 0x68, 0x2e, 0x66, 0x6c, 0x6f, 0x6f, - 0x72, 0x28, 0x70, 0x61, 0x72, 0x73, 0x65, 0x46, 0x6c, 0x6f, 0x61, 0x74, + 0x5d, 0x3a, 0x20, 0x65, 0x6c, 0x2e, 0x74, 0x61, 0x72, 0x67, 0x65, 0x74, + 0x2e, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x20, 0x7d, 0x0a, 0x20, 0x20, 0x20, + 0x20, 0x20, 0x20, 0x63, 0x6f, 0x6e, 0x73, 0x74, 0x20, 0x75, 0x70, 0x64, + 0x61, 0x74, 0x65, 0x50, 0x61, 0x72, 0x61, 0x6d, 0x73, 0x20, 0x3d, 0x20, + 0x28, 0x65, 0x6c, 0x29, 0x20, 0x3d, 0x3e, 0x20, 0x70, 0x61, 0x72, 0x61, + 0x6d, 0x73, 0x2e, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x20, 0x3d, 0x20, 0x7b, + 0x20, 0x2e, 0x2e, 0x2e, 0x70, 0x61, 0x72, 0x61, 0x6d, 0x73, 0x2e, 0x76, + 0x61, 0x6c, 0x75, 0x65, 0x2c, 0x20, 0x5b, 0x65, 0x6c, 0x2e, 0x74, 0x61, + 0x72, 0x67, 0x65, 0x74, 0x2e, 0x6e, 0x61, 0x6d, 0x65, 0x5d, 0x3a, 0x20, + 0x65, 0x6c, 0x2e, 0x74, 0x61, 0x72, 0x67, 0x65, 0x74, 0x2e, 0x76, 0x61, + 0x6c, 0x75, 0x65, 0x20, 0x7d, 0x0a, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, + 0x63, 0x6f, 0x6e, 0x73, 0x74, 0x20, 0x75, 0x70, 0x64, 0x61, 0x74, 0x65, + 0x50, 0x61, 0x72, 0x61, 0x6d, 0x73, 0x46, 0x6c, 0x6f, 0x61, 0x74, 0x20, + 0x3d, 0x20, 0x28, 0x65, 0x6c, 0x29, 0x20, 0x3d, 0x3e, 0x20, 0x70, 0x61, + 0x72, 0x61, 0x6d, 0x73, 0x2e, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x20, 0x3d, + 0x20, 0x7b, 0x20, 0x2e, 0x2e, 0x2e, 0x70, 0x61, 0x72, 0x61, 0x6d, 0x73, + 0x2e, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x2c, 0x20, 0x5b, 0x65, 0x6c, 0x2e, + 0x74, 0x61, 0x72, 0x67, 0x65, 0x74, 0x2e, 0x6e, 0x61, 0x6d, 0x65, 0x5d, + 0x3a, 0x20, 0x70, 0x61, 0x72, 0x73, 0x65, 0x46, 0x6c, 0x6f, 0x61, 0x74, 0x28, 0x65, 0x6c, 0x2e, 0x74, 0x61, 0x72, 0x67, 0x65, 0x74, 0x2e, 0x76, - 0x61, 0x6c, 0x75, 0x65, 0x29, 0x29, 0x20, 0x7d, 0x0a, 0x0a, 0x20, 0x20, - 0x20, 0x20, 0x20, 0x20, 0x63, 0x6f, 0x6e, 0x73, 0x74, 0x20, 0x67, 0x72, - 0x61, 0x6d, 0x6d, 0x61, 0x72, 0x4a, 0x73, 0x6f, 0x6e, 0x53, 0x63, 0x68, - 0x65, 0x6d, 0x61, 0x50, 0x72, 0x6f, 0x70, 0x4f, 0x72, 0x64, 0x65, 0x72, - 0x20, 0x3d, 0x20, 0x73, 0x69, 0x67, 0x6e, 0x61, 0x6c, 0x28, 0x27, 0x27, - 0x29, 0x0a, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x63, 0x6f, 0x6e, 0x73, - 0x74, 0x20, 0x75, 0x70, 0x64, 0x61, 0x74, 0x65, 0x47, 0x72, 0x61, 0x6d, - 0x6d, 0x61, 0x72, 0x4a, 0x73, 0x6f, 0x6e, 0x53, 0x63, 0x68, 0x65, 0x6d, - 0x61, 0x50, 0x72, 0x6f, 0x70, 0x4f, 0x72, 0x64, 0x65, 0x72, 0x20, 0x3d, - 0x20, 0x28, 0x65, 0x6c, 0x29, 0x20, 0x3d, 0x3e, 0x20, 0x67, 0x72, 0x61, + 0x61, 0x6c, 0x75, 0x65, 0x29, 0x20, 0x7d, 0x0a, 0x20, 0x20, 0x20, 0x20, + 0x20, 0x20, 0x63, 0x6f, 0x6e, 0x73, 0x74, 0x20, 0x75, 0x70, 0x64, 0x61, + 0x74, 0x65, 0x50, 0x61, 0x72, 0x61, 0x6d, 0x73, 0x49, 0x6e, 0x74, 0x20, + 0x3d, 0x20, 0x28, 0x65, 0x6c, 0x29, 0x20, 0x3d, 0x3e, 0x20, 0x70, 0x61, + 0x72, 0x61, 0x6d, 0x73, 0x2e, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x20, 0x3d, + 0x20, 0x7b, 0x20, 0x2e, 0x2e, 0x2e, 0x70, 0x61, 0x72, 0x61, 0x6d, 0x73, + 0x2e, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x2c, 0x20, 0x5b, 0x65, 0x6c, 0x2e, + 0x74, 0x61, 0x72, 0x67, 0x65, 0x74, 0x2e, 0x6e, 0x61, 0x6d, 0x65, 0x5d, + 0x3a, 0x20, 0x4d, 0x61, 0x74, 0x68, 0x2e, 0x66, 0x6c, 0x6f, 0x6f, 0x72, + 0x28, 0x70, 0x61, 0x72, 0x73, 0x65, 0x46, 0x6c, 0x6f, 0x61, 0x74, 0x28, + 0x65, 0x6c, 0x2e, 0x74, 0x61, 0x72, 0x67, 0x65, 0x74, 0x2e, 0x76, 0x61, + 0x6c, 0x75, 0x65, 0x29, 0x29, 0x20, 0x7d, 0x0a, 0x0a, 0x20, 0x20, 0x20, + 0x20, 0x20, 0x20, 0x63, 0x6f, 0x6e, 0x73, 0x74, 0x20, 0x67, 0x72, 0x61, 0x6d, 0x6d, 0x61, 0x72, 0x4a, 0x73, 0x6f, 0x6e, 0x53, 0x63, 0x68, 0x65, - 0x6d, 0x61, 0x50, 0x72, 0x6f, 0x70, 0x4f, 0x72, 0x64, 0x65, 0x72, 0x2e, - 0x76, 0x61, 0x6c, 0x75, 0x65, 0x20, 0x3d, 0x20, 0x65, 0x6c, 0x2e, 0x74, - 0x61, 0x72, 0x67, 0x65, 0x74, 0x2e, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x0a, - 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x63, 0x6f, 0x6e, 0x73, 0x74, 0x20, - 0x63, 0x6f, 0x6e, 0x76, 0x65, 0x72, 0x74, 0x4a, 0x53, 0x4f, 0x4e, 0x53, - 0x63, 0x68, 0x65, 0x6d, 0x61, 0x47, 0x72, 0x61, 0x6d, 0x6d, 0x61, 0x72, - 0x20, 0x3d, 0x20, 0x28, 0x29, 0x20, 0x3d, 0x3e, 0x20, 0x7b, 0x0a, 0x20, - 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x74, 0x72, 0x79, 0x20, 0x7b, - 0x0a, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x63, - 0x6f, 0x6e, 0x73, 0x74, 0x20, 0x73, 0x63, 0x68, 0x65, 0x6d, 0x61, 0x20, - 0x3d, 0x20, 0x4a, 0x53, 0x4f, 0x4e, 0x2e, 0x70, 0x61, 0x72, 0x73, 0x65, - 0x28, 0x70, 0x61, 0x72, 0x61, 0x6d, 0x73, 0x2e, 0x76, 0x61, 0x6c, 0x75, - 0x65, 0x2e, 0x67, 0x72, 0x61, 0x6d, 0x6d, 0x61, 0x72, 0x29, 0x0a, 0x20, - 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x63, 0x6f, 0x6e, - 0x73, 0x74, 0x20, 0x63, 0x6f, 0x6e, 0x76, 0x65, 0x72, 0x74, 0x65, 0x72, - 0x20, 0x3d, 0x20, 0x6e, 0x65, 0x77, 0x20, 0x53, 0x63, 0x68, 0x65, 0x6d, - 0x61, 0x43, 0x6f, 0x6e, 0x76, 0x65, 0x72, 0x74, 0x65, 0x72, 0x28, 0x0a, - 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, - 0x67, 0x72, 0x61, 0x6d, 0x6d, 0x61, 0x72, 0x4a, 0x73, 0x6f, 0x6e, 0x53, - 0x63, 0x68, 0x65, 0x6d, 0x61, 0x50, 0x72, 0x6f, 0x70, 0x4f, 0x72, 0x64, - 0x65, 0x72, 0x2e, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x0a, 0x20, 0x20, 0x20, - 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x2e, - 0x73, 0x70, 0x6c, 0x69, 0x74, 0x28, 0x27, 0x2c, 0x27, 0x29, 0x0a, 0x20, - 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, - 0x20, 0x2e, 0x72, 0x65, 0x64, 0x75, 0x63, 0x65, 0x28, 0x28, 0x61, 0x63, - 0x63, 0x2c, 0x20, 0x63, 0x75, 0x72, 0x2c, 0x20, 0x69, 0x29, 0x20, 0x3d, - 0x3e, 0x20, 0x28, 0x7b, 0x20, 0x2e, 0x2e, 0x2e, 0x61, 0x63, 0x63, 0x2c, - 0x20, 0x5b, 0x63, 0x75, 0x72, 0x2e, 0x74, 0x72, 0x69, 0x6d, 0x28, 0x29, - 0x5d, 0x3a, 0x20, 0x69, 0x20, 0x7d, 0x29, 0x2c, 0x20, 0x7b, 0x7d, 0x29, - 0x0a, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x29, - 0x0a, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x63, - 0x6f, 0x6e, 0x76, 0x65, 0x72, 0x74, 0x65, 0x72, 0x2e, 0x76, 0x69, 0x73, - 0x69, 0x74, 0x28, 0x73, 0x63, 0x68, 0x65, 0x6d, 0x61, 0x2c, 0x20, 0x27, - 0x27, 0x29, 0x0a, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, - 0x20, 0x70, 0x61, 0x72, 0x61, 0x6d, 0x73, 0x2e, 0x76, 0x61, 0x6c, 0x75, - 0x65, 0x20, 0x3d, 0x20, 0x7b, 0x0a, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, - 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x2e, 0x2e, 0x2e, 0x70, 0x61, 0x72, - 0x61, 0x6d, 0x73, 0x2e, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x2c, 0x0a, 0x20, + 0x6d, 0x61, 0x50, 0x72, 0x6f, 0x70, 0x4f, 0x72, 0x64, 0x65, 0x72, 0x20, + 0x3d, 0x20, 0x73, 0x69, 0x67, 0x6e, 0x61, 0x6c, 0x28, 0x27, 0x27, 0x29, + 0x0a, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x63, 0x6f, 0x6e, 0x73, 0x74, + 0x20, 0x75, 0x70, 0x64, 0x61, 0x74, 0x65, 0x47, 0x72, 0x61, 0x6d, 0x6d, + 0x61, 0x72, 0x4a, 0x73, 0x6f, 0x6e, 0x53, 0x63, 0x68, 0x65, 0x6d, 0x61, + 0x50, 0x72, 0x6f, 0x70, 0x4f, 0x72, 0x64, 0x65, 0x72, 0x20, 0x3d, 0x20, + 0x28, 0x65, 0x6c, 0x29, 0x20, 0x3d, 0x3e, 0x20, 0x67, 0x72, 0x61, 0x6d, + 0x6d, 0x61, 0x72, 0x4a, 0x73, 0x6f, 0x6e, 0x53, 0x63, 0x68, 0x65, 0x6d, + 0x61, 0x50, 0x72, 0x6f, 0x70, 0x4f, 0x72, 0x64, 0x65, 0x72, 0x2e, 0x76, + 0x61, 0x6c, 0x75, 0x65, 0x20, 0x3d, 0x20, 0x65, 0x6c, 0x2e, 0x74, 0x61, + 0x72, 0x67, 0x65, 0x74, 0x2e, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x0a, 0x20, + 0x20, 0x20, 0x20, 0x20, 0x20, 0x63, 0x6f, 0x6e, 0x73, 0x74, 0x20, 0x63, + 0x6f, 0x6e, 0x76, 0x65, 0x72, 0x74, 0x4a, 0x53, 0x4f, 0x4e, 0x53, 0x63, + 0x68, 0x65, 0x6d, 0x61, 0x47, 0x72, 0x61, 0x6d, 0x6d, 0x61, 0x72, 0x20, + 0x3d, 0x20, 0x28, 0x29, 0x20, 0x3d, 0x3e, 0x20, 0x7b, 0x0a, 0x20, 0x20, + 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x74, 0x72, 0x79, 0x20, 0x7b, 0x0a, + 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x63, 0x6f, + 0x6e, 0x73, 0x74, 0x20, 0x73, 0x63, 0x68, 0x65, 0x6d, 0x61, 0x20, 0x3d, + 0x20, 0x4a, 0x53, 0x4f, 0x4e, 0x2e, 0x70, 0x61, 0x72, 0x73, 0x65, 0x28, + 0x70, 0x61, 0x72, 0x61, 0x6d, 0x73, 0x2e, 0x76, 0x61, 0x6c, 0x75, 0x65, + 0x2e, 0x67, 0x72, 0x61, 0x6d, 0x6d, 0x61, 0x72, 0x29, 0x0a, 0x20, 0x20, + 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x63, 0x6f, 0x6e, 0x73, + 0x74, 0x20, 0x63, 0x6f, 0x6e, 0x76, 0x65, 0x72, 0x74, 0x65, 0x72, 0x20, + 0x3d, 0x20, 0x6e, 0x65, 0x77, 0x20, 0x53, 0x63, 0x68, 0x65, 0x6d, 0x61, + 0x43, 0x6f, 0x6e, 0x76, 0x65, 0x72, 0x74, 0x65, 0x72, 0x28, 0x0a, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x67, - 0x72, 0x61, 0x6d, 0x6d, 0x61, 0x72, 0x3a, 0x20, 0x63, 0x6f, 0x6e, 0x76, - 0x65, 0x72, 0x74, 0x65, 0x72, 0x2e, 0x66, 0x6f, 0x72, 0x6d, 0x61, 0x74, - 0x47, 0x72, 0x61, 0x6d, 0x6d, 0x61, 0x72, 0x28, 0x29, 0x2c, 0x0a, 0x20, - 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x7d, 0x0a, 0x20, - 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x7d, 0x20, 0x63, 0x61, 0x74, - 0x63, 0x68, 0x20, 0x28, 0x65, 0x29, 0x20, 0x7b, 0x0a, 0x20, 0x20, 0x20, - 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x61, 0x6c, 0x65, 0x72, 0x74, - 0x28, 0x60, 0x43, 0x6f, 0x6e, 0x76, 0x65, 0x72, 0x74, 0x20, 0x66, 0x61, - 0x69, 0x6c, 0x65, 0x64, 0x3a, 0x20, 0x24, 0x7b, 0x65, 0x2e, 0x6d, 0x65, - 0x73, 0x73, 0x61, 0x67, 0x65, 0x7d, 0x60, 0x29, 0x0a, 0x20, 0x20, 0x20, - 0x20, 0x20, 0x20, 0x20, 0x20, 0x7d, 0x0a, 0x20, 0x20, 0x20, 0x20, 0x20, - 0x20, 0x7d, 0x0a, 0x0a, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x63, 0x6f, - 0x6e, 0x73, 0x74, 0x20, 0x46, 0x6c, 0x6f, 0x61, 0x74, 0x46, 0x69, 0x65, - 0x6c, 0x64, 0x20, 0x3d, 0x20, 0x28, 0x7b, 0x20, 0x6c, 0x61, 0x62, 0x65, - 0x6c, 0x2c, 0x20, 0x6d, 0x61, 0x78, 0x2c, 0x20, 0x6d, 0x69, 0x6e, 0x2c, - 0x20, 0x6e, 0x61, 0x6d, 0x65, 0x2c, 0x20, 0x73, 0x74, 0x65, 0x70, 0x2c, - 0x20, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x20, 0x7d, 0x29, 0x20, 0x3d, 0x3e, - 0x20, 0x7b, 0x0a, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x72, - 0x65, 0x74, 0x75, 0x72, 0x6e, 0x20, 0x68, 0x74, 0x6d, 0x6c, 0x60, 0x0a, - 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x3c, 0x64, - 0x69, 0x76, 0x3e, 0x0a, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, - 0x20, 0x20, 0x20, 0x20, 0x3c, 0x6c, 0x61, 0x62, 0x65, 0x6c, 0x20, 0x66, - 0x6f, 0x72, 0x3d, 0x22, 0x24, 0x7b, 0x6e, 0x61, 0x6d, 0x65, 0x7d, 0x22, - 0x3e, 0x24, 0x7b, 0x6c, 0x61, 0x62, 0x65, 0x6c, 0x7d, 0x3c, 0x2f, 0x6c, - 0x61, 0x62, 0x65, 0x6c, 0x3e, 0x0a, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, - 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x3c, 0x69, 0x6e, 0x70, 0x75, 0x74, - 0x20, 0x74, 0x79, 0x70, 0x65, 0x3d, 0x22, 0x72, 0x61, 0x6e, 0x67, 0x65, - 0x22, 0x20, 0x69, 0x64, 0x3d, 0x22, 0x24, 0x7b, 0x6e, 0x61, 0x6d, 0x65, - 0x7d, 0x22, 0x20, 0x6d, 0x69, 0x6e, 0x3d, 0x22, 0x24, 0x7b, 0x6d, 0x69, - 0x6e, 0x7d, 0x22, 0x20, 0x6d, 0x61, 0x78, 0x3d, 0x22, 0x24, 0x7b, 0x6d, - 0x61, 0x78, 0x7d, 0x22, 0x20, 0x73, 0x74, 0x65, 0x70, 0x3d, 0x22, 0x24, - 0x7b, 0x73, 0x74, 0x65, 0x70, 0x7d, 0x22, 0x20, 0x6e, 0x61, 0x6d, 0x65, - 0x3d, 0x22, 0x24, 0x7b, 0x6e, 0x61, 0x6d, 0x65, 0x7d, 0x22, 0x20, 0x76, - 0x61, 0x6c, 0x75, 0x65, 0x3d, 0x22, 0x24, 0x7b, 0x76, 0x61, 0x6c, 0x75, - 0x65, 0x7d, 0x22, 0x20, 0x6f, 0x6e, 0x69, 0x6e, 0x70, 0x75, 0x74, 0x3d, - 0x24, 0x7b, 0x75, 0x70, 0x64, 0x61, 0x74, 0x65, 0x50, 0x61, 0x72, 0x61, - 0x6d, 0x73, 0x46, 0x6c, 0x6f, 0x61, 0x74, 0x7d, 0x20, 0x2f, 0x3e, 0x0a, + 0x72, 0x61, 0x6d, 0x6d, 0x61, 0x72, 0x4a, 0x73, 0x6f, 0x6e, 0x53, 0x63, + 0x68, 0x65, 0x6d, 0x61, 0x50, 0x72, 0x6f, 0x70, 0x4f, 0x72, 0x64, 0x65, + 0x72, 0x2e, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x0a, 0x20, 0x20, 0x20, 0x20, + 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x2e, 0x73, + 0x70, 0x6c, 0x69, 0x74, 0x28, 0x27, 0x2c, 0x27, 0x29, 0x0a, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, - 0x3c, 0x73, 0x70, 0x61, 0x6e, 0x3e, 0x24, 0x7b, 0x76, 0x61, 0x6c, 0x75, - 0x65, 0x7d, 0x3c, 0x2f, 0x73, 0x70, 0x61, 0x6e, 0x3e, 0x0a, 0x20, 0x20, - 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x3c, 0x2f, 0x64, 0x69, - 0x76, 0x3e, 0x0a, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x60, - 0x0a, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x7d, 0x3b, 0x0a, 0x0a, 0x20, - 0x20, 0x20, 0x20, 0x20, 0x20, 0x63, 0x6f, 0x6e, 0x73, 0x74, 0x20, 0x49, - 0x6e, 0x74, 0x46, 0x69, 0x65, 0x6c, 0x64, 0x20, 0x3d, 0x20, 0x28, 0x7b, - 0x20, 0x6c, 0x61, 0x62, 0x65, 0x6c, 0x2c, 0x20, 0x6d, 0x61, 0x78, 0x2c, - 0x20, 0x6d, 0x69, 0x6e, 0x2c, 0x20, 0x6e, 0x61, 0x6d, 0x65, 0x2c, 0x20, + 0x2e, 0x72, 0x65, 0x64, 0x75, 0x63, 0x65, 0x28, 0x28, 0x61, 0x63, 0x63, + 0x2c, 0x20, 0x63, 0x75, 0x72, 0x2c, 0x20, 0x69, 0x29, 0x20, 0x3d, 0x3e, + 0x20, 0x28, 0x7b, 0x20, 0x2e, 0x2e, 0x2e, 0x61, 0x63, 0x63, 0x2c, 0x20, + 0x5b, 0x63, 0x75, 0x72, 0x2e, 0x74, 0x72, 0x69, 0x6d, 0x28, 0x29, 0x5d, + 0x3a, 0x20, 0x69, 0x20, 0x7d, 0x29, 0x2c, 0x20, 0x7b, 0x7d, 0x29, 0x0a, + 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x29, 0x0a, + 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x63, 0x6f, + 0x6e, 0x76, 0x65, 0x72, 0x74, 0x65, 0x72, 0x2e, 0x76, 0x69, 0x73, 0x69, + 0x74, 0x28, 0x73, 0x63, 0x68, 0x65, 0x6d, 0x61, 0x2c, 0x20, 0x27, 0x27, + 0x29, 0x0a, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, + 0x70, 0x61, 0x72, 0x61, 0x6d, 0x73, 0x2e, 0x76, 0x61, 0x6c, 0x75, 0x65, + 0x20, 0x3d, 0x20, 0x7b, 0x0a, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, + 0x20, 0x20, 0x20, 0x20, 0x20, 0x2e, 0x2e, 0x2e, 0x70, 0x61, 0x72, 0x61, + 0x6d, 0x73, 0x2e, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x2c, 0x0a, 0x20, 0x20, + 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x67, 0x72, + 0x61, 0x6d, 0x6d, 0x61, 0x72, 0x3a, 0x20, 0x63, 0x6f, 0x6e, 0x76, 0x65, + 0x72, 0x74, 0x65, 0x72, 0x2e, 0x66, 0x6f, 0x72, 0x6d, 0x61, 0x74, 0x47, + 0x72, 0x61, 0x6d, 0x6d, 0x61, 0x72, 0x28, 0x29, 0x2c, 0x0a, 0x20, 0x20, + 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x7d, 0x0a, 0x20, 0x20, + 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x7d, 0x20, 0x63, 0x61, 0x74, 0x63, + 0x68, 0x20, 0x28, 0x65, 0x29, 0x20, 0x7b, 0x0a, 0x20, 0x20, 0x20, 0x20, + 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x61, 0x6c, 0x65, 0x72, 0x74, 0x28, + 0x60, 0x43, 0x6f, 0x6e, 0x76, 0x65, 0x72, 0x74, 0x20, 0x66, 0x61, 0x69, + 0x6c, 0x65, 0x64, 0x3a, 0x20, 0x24, 0x7b, 0x65, 0x2e, 0x6d, 0x65, 0x73, + 0x73, 0x61, 0x67, 0x65, 0x7d, 0x60, 0x29, 0x0a, 0x20, 0x20, 0x20, 0x20, + 0x20, 0x20, 0x20, 0x20, 0x7d, 0x0a, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, + 0x7d, 0x0a, 0x0a, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x63, 0x6f, 0x6e, + 0x73, 0x74, 0x20, 0x46, 0x6c, 0x6f, 0x61, 0x74, 0x46, 0x69, 0x65, 0x6c, + 0x64, 0x20, 0x3d, 0x20, 0x28, 0x7b, 0x20, 0x6c, 0x61, 0x62, 0x65, 0x6c, + 0x2c, 0x20, 0x6d, 0x61, 0x78, 0x2c, 0x20, 0x6d, 0x69, 0x6e, 0x2c, 0x20, + 0x6e, 0x61, 0x6d, 0x65, 0x2c, 0x20, 0x73, 0x74, 0x65, 0x70, 0x2c, 0x20, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x20, 0x7d, 0x29, 0x20, 0x3d, 0x3e, 0x20, 0x7b, 0x0a, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x72, 0x65, 0x74, 0x75, 0x72, 0x6e, 0x20, 0x68, 0x74, 0x6d, 0x6c, 0x60, 0x0a, 0x20, @@ -1571,1109 +1543,1151 @@ unsigned char index_html[] = { 0x20, 0x69, 0x64, 0x3d, 0x22, 0x24, 0x7b, 0x6e, 0x61, 0x6d, 0x65, 0x7d, 0x22, 0x20, 0x6d, 0x69, 0x6e, 0x3d, 0x22, 0x24, 0x7b, 0x6d, 0x69, 0x6e, 0x7d, 0x22, 0x20, 0x6d, 0x61, 0x78, 0x3d, 0x22, 0x24, 0x7b, 0x6d, 0x61, - 0x78, 0x7d, 0x22, 0x20, 0x6e, 0x61, 0x6d, 0x65, 0x3d, 0x22, 0x24, 0x7b, - 0x6e, 0x61, 0x6d, 0x65, 0x7d, 0x22, 0x20, 0x76, 0x61, 0x6c, 0x75, 0x65, - 0x3d, 0x22, 0x24, 0x7b, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x7d, 0x22, 0x20, - 0x6f, 0x6e, 0x69, 0x6e, 0x70, 0x75, 0x74, 0x3d, 0x24, 0x7b, 0x75, 0x70, - 0x64, 0x61, 0x74, 0x65, 0x50, 0x61, 0x72, 0x61, 0x6d, 0x73, 0x49, 0x6e, - 0x74, 0x7d, 0x20, 0x2f, 0x3e, 0x0a, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, - 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x3c, 0x73, 0x70, 0x61, 0x6e, 0x3e, - 0x24, 0x7b, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x7d, 0x3c, 0x2f, 0x73, 0x70, - 0x61, 0x6e, 0x3e, 0x0a, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, - 0x20, 0x20, 0x3c, 0x2f, 0x64, 0x69, 0x76, 0x3e, 0x0a, 0x20, 0x20, 0x20, - 0x20, 0x20, 0x20, 0x20, 0x20, 0x60, 0x0a, 0x20, 0x20, 0x20, 0x20, 0x20, - 0x20, 0x7d, 0x3b, 0x0a, 0x0a, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x63, - 0x6f, 0x6e, 0x73, 0x74, 0x20, 0x75, 0x73, 0x65, 0x72, 0x54, 0x65, 0x6d, - 0x70, 0x6c, 0x61, 0x74, 0x65, 0x52, 0x65, 0x73, 0x65, 0x74, 0x20, 0x3d, - 0x20, 0x28, 0x65, 0x29, 0x20, 0x3d, 0x3e, 0x20, 0x7b, 0x0a, 0x20, 0x20, - 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x65, 0x2e, 0x70, 0x72, 0x65, 0x76, - 0x65, 0x6e, 0x74, 0x44, 0x65, 0x66, 0x61, 0x75, 0x6c, 0x74, 0x28, 0x29, - 0x3b, 0x0a, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x75, 0x73, - 0x65, 0x72, 0x54, 0x65, 0x6d, 0x70, 0x6c, 0x61, 0x74, 0x65, 0x52, 0x65, - 0x73, 0x65, 0x74, 0x54, 0x6f, 0x44, 0x65, 0x66, 0x61, 0x75, 0x6c, 0x74, - 0x41, 0x6e, 0x64, 0x41, 0x70, 0x70, 0x6c, 0x79, 0x28, 0x29, 0x0a, 0x20, - 0x20, 0x20, 0x20, 0x20, 0x20, 0x7d, 0x0a, 0x0a, 0x20, 0x20, 0x20, 0x20, - 0x20, 0x20, 0x63, 0x6f, 0x6e, 0x73, 0x74, 0x20, 0x55, 0x73, 0x65, 0x72, - 0x54, 0x65, 0x6d, 0x70, 0x6c, 0x61, 0x74, 0x65, 0x52, 0x65, 0x73, 0x65, - 0x74, 0x42, 0x75, 0x74, 0x74, 0x6f, 0x6e, 0x20, 0x3d, 0x20, 0x28, 0x29, - 0x20, 0x3d, 0x3e, 0x20, 0x7b, 0x0a, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, - 0x20, 0x20, 0x69, 0x66, 0x20, 0x28, 0x73, 0x65, 0x6c, 0x65, 0x63, 0x74, - 0x65, 0x64, 0x55, 0x73, 0x65, 0x72, 0x54, 0x65, 0x6d, 0x70, 0x6c, 0x61, - 0x74, 0x65, 0x2e, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x2e, 0x6e, 0x61, 0x6d, - 0x65, 0x20, 0x3d, 0x3d, 0x20, 0x27, 0x64, 0x65, 0x66, 0x61, 0x75, 0x6c, - 0x74, 0x27, 0x29, 0x20, 0x7b, 0x0a, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, - 0x20, 0x20, 0x20, 0x20, 0x72, 0x65, 0x74, 0x75, 0x72, 0x6e, 0x20, 0x68, - 0x74, 0x6d, 0x6c, 0x60, 0x0a, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, - 0x20, 0x20, 0x20, 0x20, 0x20, 0x3c, 0x62, 0x75, 0x74, 0x74, 0x6f, 0x6e, - 0x20, 0x64, 0x69, 0x73, 0x61, 0x62, 0x6c, 0x65, 0x64, 0x3e, 0x55, 0x73, - 0x69, 0x6e, 0x67, 0x20, 0x64, 0x65, 0x66, 0x61, 0x75, 0x6c, 0x74, 0x20, - 0x74, 0x65, 0x6d, 0x70, 0x6c, 0x61, 0x74, 0x65, 0x3c, 0x2f, 0x62, 0x75, - 0x74, 0x74, 0x6f, 0x6e, 0x3e, 0x0a, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, + 0x78, 0x7d, 0x22, 0x20, 0x73, 0x74, 0x65, 0x70, 0x3d, 0x22, 0x24, 0x7b, + 0x73, 0x74, 0x65, 0x70, 0x7d, 0x22, 0x20, 0x6e, 0x61, 0x6d, 0x65, 0x3d, + 0x22, 0x24, 0x7b, 0x6e, 0x61, 0x6d, 0x65, 0x7d, 0x22, 0x20, 0x76, 0x61, + 0x6c, 0x75, 0x65, 0x3d, 0x22, 0x24, 0x7b, 0x76, 0x61, 0x6c, 0x75, 0x65, + 0x7d, 0x22, 0x20, 0x6f, 0x6e, 0x69, 0x6e, 0x70, 0x75, 0x74, 0x3d, 0x24, + 0x7b, 0x75, 0x70, 0x64, 0x61, 0x74, 0x65, 0x50, 0x61, 0x72, 0x61, 0x6d, + 0x73, 0x46, 0x6c, 0x6f, 0x61, 0x74, 0x7d, 0x20, 0x2f, 0x3e, 0x0a, 0x20, + 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x3c, + 0x73, 0x70, 0x61, 0x6e, 0x3e, 0x24, 0x7b, 0x76, 0x61, 0x6c, 0x75, 0x65, + 0x7d, 0x3c, 0x2f, 0x73, 0x70, 0x61, 0x6e, 0x3e, 0x0a, 0x20, 0x20, 0x20, + 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x3c, 0x2f, 0x64, 0x69, 0x76, + 0x3e, 0x0a, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x60, 0x0a, + 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x7d, 0x3b, 0x0a, 0x0a, 0x20, 0x20, + 0x20, 0x20, 0x20, 0x20, 0x63, 0x6f, 0x6e, 0x73, 0x74, 0x20, 0x49, 0x6e, + 0x74, 0x46, 0x69, 0x65, 0x6c, 0x64, 0x20, 0x3d, 0x20, 0x28, 0x7b, 0x20, + 0x6c, 0x61, 0x62, 0x65, 0x6c, 0x2c, 0x20, 0x6d, 0x61, 0x78, 0x2c, 0x20, + 0x6d, 0x69, 0x6e, 0x2c, 0x20, 0x6e, 0x61, 0x6d, 0x65, 0x2c, 0x20, 0x76, + 0x61, 0x6c, 0x75, 0x65, 0x20, 0x7d, 0x29, 0x20, 0x3d, 0x3e, 0x20, 0x7b, + 0x0a, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x72, 0x65, 0x74, + 0x75, 0x72, 0x6e, 0x20, 0x68, 0x74, 0x6d, 0x6c, 0x60, 0x0a, 0x20, 0x20, + 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x3c, 0x64, 0x69, 0x76, + 0x3e, 0x0a, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, + 0x20, 0x20, 0x3c, 0x6c, 0x61, 0x62, 0x65, 0x6c, 0x20, 0x66, 0x6f, 0x72, + 0x3d, 0x22, 0x24, 0x7b, 0x6e, 0x61, 0x6d, 0x65, 0x7d, 0x22, 0x3e, 0x24, + 0x7b, 0x6c, 0x61, 0x62, 0x65, 0x6c, 0x7d, 0x3c, 0x2f, 0x6c, 0x61, 0x62, + 0x65, 0x6c, 0x3e, 0x0a, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, + 0x20, 0x20, 0x20, 0x20, 0x3c, 0x69, 0x6e, 0x70, 0x75, 0x74, 0x20, 0x74, + 0x79, 0x70, 0x65, 0x3d, 0x22, 0x72, 0x61, 0x6e, 0x67, 0x65, 0x22, 0x20, + 0x69, 0x64, 0x3d, 0x22, 0x24, 0x7b, 0x6e, 0x61, 0x6d, 0x65, 0x7d, 0x22, + 0x20, 0x6d, 0x69, 0x6e, 0x3d, 0x22, 0x24, 0x7b, 0x6d, 0x69, 0x6e, 0x7d, + 0x22, 0x20, 0x6d, 0x61, 0x78, 0x3d, 0x22, 0x24, 0x7b, 0x6d, 0x61, 0x78, + 0x7d, 0x22, 0x20, 0x6e, 0x61, 0x6d, 0x65, 0x3d, 0x22, 0x24, 0x7b, 0x6e, + 0x61, 0x6d, 0x65, 0x7d, 0x22, 0x20, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x3d, + 0x22, 0x24, 0x7b, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x7d, 0x22, 0x20, 0x6f, + 0x6e, 0x69, 0x6e, 0x70, 0x75, 0x74, 0x3d, 0x24, 0x7b, 0x75, 0x70, 0x64, + 0x61, 0x74, 0x65, 0x50, 0x61, 0x72, 0x61, 0x6d, 0x73, 0x49, 0x6e, 0x74, + 0x7d, 0x20, 0x2f, 0x3e, 0x0a, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, + 0x20, 0x20, 0x20, 0x20, 0x20, 0x3c, 0x73, 0x70, 0x61, 0x6e, 0x3e, 0x24, + 0x7b, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x7d, 0x3c, 0x2f, 0x73, 0x70, 0x61, + 0x6e, 0x3e, 0x0a, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, + 0x20, 0x3c, 0x2f, 0x64, 0x69, 0x76, 0x3e, 0x0a, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x60, 0x0a, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, - 0x20, 0x20, 0x7d, 0x0a, 0x0a, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, - 0x20, 0x72, 0x65, 0x74, 0x75, 0x72, 0x6e, 0x20, 0x68, 0x74, 0x6d, 0x6c, + 0x7d, 0x3b, 0x0a, 0x0a, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x63, 0x6f, + 0x6e, 0x73, 0x74, 0x20, 0x75, 0x73, 0x65, 0x72, 0x54, 0x65, 0x6d, 0x70, + 0x6c, 0x61, 0x74, 0x65, 0x52, 0x65, 0x73, 0x65, 0x74, 0x20, 0x3d, 0x20, + 0x28, 0x65, 0x29, 0x20, 0x3d, 0x3e, 0x20, 0x7b, 0x0a, 0x20, 0x20, 0x20, + 0x20, 0x20, 0x20, 0x20, 0x20, 0x65, 0x2e, 0x70, 0x72, 0x65, 0x76, 0x65, + 0x6e, 0x74, 0x44, 0x65, 0x66, 0x61, 0x75, 0x6c, 0x74, 0x28, 0x29, 0x3b, + 0x0a, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x75, 0x73, 0x65, + 0x72, 0x54, 0x65, 0x6d, 0x70, 0x6c, 0x61, 0x74, 0x65, 0x52, 0x65, 0x73, + 0x65, 0x74, 0x54, 0x6f, 0x44, 0x65, 0x66, 0x61, 0x75, 0x6c, 0x74, 0x41, + 0x6e, 0x64, 0x41, 0x70, 0x70, 0x6c, 0x79, 0x28, 0x29, 0x0a, 0x20, 0x20, + 0x20, 0x20, 0x20, 0x20, 0x7d, 0x0a, 0x0a, 0x20, 0x20, 0x20, 0x20, 0x20, + 0x20, 0x63, 0x6f, 0x6e, 0x73, 0x74, 0x20, 0x55, 0x73, 0x65, 0x72, 0x54, + 0x65, 0x6d, 0x70, 0x6c, 0x61, 0x74, 0x65, 0x52, 0x65, 0x73, 0x65, 0x74, + 0x42, 0x75, 0x74, 0x74, 0x6f, 0x6e, 0x20, 0x3d, 0x20, 0x28, 0x29, 0x20, + 0x3d, 0x3e, 0x20, 0x7b, 0x0a, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, + 0x20, 0x69, 0x66, 0x20, 0x28, 0x73, 0x65, 0x6c, 0x65, 0x63, 0x74, 0x65, + 0x64, 0x55, 0x73, 0x65, 0x72, 0x54, 0x65, 0x6d, 0x70, 0x6c, 0x61, 0x74, + 0x65, 0x2e, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x2e, 0x6e, 0x61, 0x6d, 0x65, + 0x20, 0x3d, 0x3d, 0x20, 0x27, 0x64, 0x65, 0x66, 0x61, 0x75, 0x6c, 0x74, + 0x27, 0x29, 0x20, 0x7b, 0x0a, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, + 0x20, 0x20, 0x20, 0x72, 0x65, 0x74, 0x75, 0x72, 0x6e, 0x20, 0x68, 0x74, + 0x6d, 0x6c, 0x60, 0x0a, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, + 0x20, 0x20, 0x20, 0x20, 0x3c, 0x62, 0x75, 0x74, 0x74, 0x6f, 0x6e, 0x20, + 0x64, 0x69, 0x73, 0x61, 0x62, 0x6c, 0x65, 0x64, 0x3e, 0x55, 0x73, 0x69, + 0x6e, 0x67, 0x20, 0x64, 0x65, 0x66, 0x61, 0x75, 0x6c, 0x74, 0x20, 0x74, + 0x65, 0x6d, 0x70, 0x6c, 0x61, 0x74, 0x65, 0x3c, 0x2f, 0x62, 0x75, 0x74, + 0x74, 0x6f, 0x6e, 0x3e, 0x0a, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, + 0x20, 0x20, 0x20, 0x60, 0x0a, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, + 0x20, 0x7d, 0x0a, 0x0a, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, + 0x72, 0x65, 0x74, 0x75, 0x72, 0x6e, 0x20, 0x68, 0x74, 0x6d, 0x6c, 0x60, + 0x0a, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x3c, + 0x62, 0x75, 0x74, 0x74, 0x6f, 0x6e, 0x20, 0x6f, 0x6e, 0x63, 0x6c, 0x69, + 0x63, 0x6b, 0x3d, 0x24, 0x7b, 0x75, 0x73, 0x65, 0x72, 0x54, 0x65, 0x6d, + 0x70, 0x6c, 0x61, 0x74, 0x65, 0x52, 0x65, 0x73, 0x65, 0x74, 0x7d, 0x3e, + 0x52, 0x65, 0x73, 0x65, 0x74, 0x20, 0x61, 0x6c, 0x6c, 0x20, 0x74, 0x6f, + 0x20, 0x64, 0x65, 0x66, 0x61, 0x75, 0x6c, 0x74, 0x3c, 0x2f, 0x62, 0x75, + 0x74, 0x74, 0x6f, 0x6e, 0x3e, 0x0a, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, + 0x20, 0x20, 0x60, 0x0a, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x7d, 0x3b, + 0x0a, 0x0a, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x75, 0x73, 0x65, 0x45, + 0x66, 0x66, 0x65, 0x63, 0x74, 0x28, 0x28, 0x29, 0x20, 0x3d, 0x3e, 0x20, + 0x7b, 0x0a, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x2f, 0x2f, + 0x20, 0x61, 0x75, 0x74, 0x6f, 0x73, 0x61, 0x76, 0x65, 0x20, 0x74, 0x65, + 0x6d, 0x70, 0x6c, 0x61, 0x74, 0x65, 0x20, 0x6f, 0x6e, 0x20, 0x65, 0x76, + 0x65, 0x72, 0x79, 0x20, 0x63, 0x68, 0x61, 0x6e, 0x67, 0x65, 0x0a, 0x20, + 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x75, 0x73, 0x65, 0x72, 0x54, + 0x65, 0x6d, 0x70, 0x6c, 0x61, 0x74, 0x65, 0x41, 0x75, 0x74, 0x6f, 0x73, + 0x61, 0x76, 0x65, 0x28, 0x29, 0x0a, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, + 0x7d, 0x2c, 0x20, 0x5b, 0x73, 0x65, 0x73, 0x73, 0x69, 0x6f, 0x6e, 0x2e, + 0x76, 0x61, 0x6c, 0x75, 0x65, 0x2c, 0x20, 0x70, 0x61, 0x72, 0x61, 0x6d, + 0x73, 0x2e, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x5d, 0x29, 0x0a, 0x0a, 0x20, + 0x20, 0x20, 0x20, 0x20, 0x20, 0x63, 0x6f, 0x6e, 0x73, 0x74, 0x20, 0x47, + 0x72, 0x61, 0x6d, 0x6d, 0x61, 0x72, 0x43, 0x6f, 0x6e, 0x74, 0x72, 0x6f, + 0x6c, 0x20, 0x3d, 0x20, 0x28, 0x29, 0x20, 0x3d, 0x3e, 0x20, 0x28, 0x0a, + 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x68, 0x74, 0x6d, 0x6c, 0x60, 0x0a, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, - 0x3c, 0x62, 0x75, 0x74, 0x74, 0x6f, 0x6e, 0x20, 0x6f, 0x6e, 0x63, 0x6c, - 0x69, 0x63, 0x6b, 0x3d, 0x24, 0x7b, 0x75, 0x73, 0x65, 0x72, 0x54, 0x65, - 0x6d, 0x70, 0x6c, 0x61, 0x74, 0x65, 0x52, 0x65, 0x73, 0x65, 0x74, 0x7d, - 0x3e, 0x52, 0x65, 0x73, 0x65, 0x74, 0x20, 0x61, 0x6c, 0x6c, 0x20, 0x74, - 0x6f, 0x20, 0x64, 0x65, 0x66, 0x61, 0x75, 0x6c, 0x74, 0x3c, 0x2f, 0x62, + 0x3c, 0x64, 0x69, 0x76, 0x3e, 0x0a, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, + 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x3c, 0x6c, 0x61, 0x62, 0x65, 0x6c, + 0x20, 0x66, 0x6f, 0x72, 0x3d, 0x22, 0x74, 0x65, 0x6d, 0x70, 0x6c, 0x61, + 0x74, 0x65, 0x22, 0x3e, 0x47, 0x72, 0x61, 0x6d, 0x6d, 0x61, 0x72, 0x3c, + 0x2f, 0x6c, 0x61, 0x62, 0x65, 0x6c, 0x3e, 0x0a, 0x20, 0x20, 0x20, 0x20, + 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x3c, 0x74, 0x65, 0x78, + 0x74, 0x61, 0x72, 0x65, 0x61, 0x20, 0x69, 0x64, 0x3d, 0x22, 0x67, 0x72, + 0x61, 0x6d, 0x6d, 0x61, 0x72, 0x22, 0x20, 0x6e, 0x61, 0x6d, 0x65, 0x3d, + 0x22, 0x67, 0x72, 0x61, 0x6d, 0x6d, 0x61, 0x72, 0x22, 0x20, 0x70, 0x6c, + 0x61, 0x63, 0x65, 0x68, 0x6f, 0x6c, 0x64, 0x65, 0x72, 0x3d, 0x22, 0x55, + 0x73, 0x65, 0x20, 0x67, 0x62, 0x6e, 0x66, 0x20, 0x6f, 0x72, 0x20, 0x4a, + 0x53, 0x4f, 0x4e, 0x20, 0x53, 0x63, 0x68, 0x65, 0x6d, 0x61, 0x2b, 0x63, + 0x6f, 0x6e, 0x76, 0x65, 0x72, 0x74, 0x22, 0x20, 0x76, 0x61, 0x6c, 0x75, + 0x65, 0x3d, 0x22, 0x24, 0x7b, 0x70, 0x61, 0x72, 0x61, 0x6d, 0x73, 0x2e, + 0x76, 0x61, 0x6c, 0x75, 0x65, 0x2e, 0x67, 0x72, 0x61, 0x6d, 0x6d, 0x61, + 0x72, 0x7d, 0x22, 0x20, 0x72, 0x6f, 0x77, 0x73, 0x3d, 0x34, 0x20, 0x6f, + 0x6e, 0x69, 0x6e, 0x70, 0x75, 0x74, 0x3d, 0x24, 0x7b, 0x75, 0x70, 0x64, + 0x61, 0x74, 0x65, 0x50, 0x61, 0x72, 0x61, 0x6d, 0x73, 0x7d, 0x2f, 0x3e, + 0x0a, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, + 0x20, 0x3c, 0x69, 0x6e, 0x70, 0x75, 0x74, 0x20, 0x74, 0x79, 0x70, 0x65, + 0x3d, 0x22, 0x74, 0x65, 0x78, 0x74, 0x22, 0x20, 0x6e, 0x61, 0x6d, 0x65, + 0x3d, 0x22, 0x70, 0x72, 0x6f, 0x70, 0x2d, 0x6f, 0x72, 0x64, 0x65, 0x72, + 0x22, 0x20, 0x70, 0x6c, 0x61, 0x63, 0x65, 0x68, 0x6f, 0x6c, 0x64, 0x65, + 0x72, 0x3d, 0x22, 0x6f, 0x72, 0x64, 0x65, 0x72, 0x3a, 0x20, 0x70, 0x72, + 0x6f, 0x70, 0x31, 0x2c, 0x70, 0x72, 0x6f, 0x70, 0x32, 0x2c, 0x70, 0x72, + 0x6f, 0x70, 0x33, 0x22, 0x20, 0x6f, 0x6e, 0x69, 0x6e, 0x70, 0x75, 0x74, + 0x3d, 0x24, 0x7b, 0x75, 0x70, 0x64, 0x61, 0x74, 0x65, 0x47, 0x72, 0x61, + 0x6d, 0x6d, 0x61, 0x72, 0x4a, 0x73, 0x6f, 0x6e, 0x53, 0x63, 0x68, 0x65, + 0x6d, 0x61, 0x50, 0x72, 0x6f, 0x70, 0x4f, 0x72, 0x64, 0x65, 0x72, 0x7d, + 0x20, 0x2f, 0x3e, 0x0a, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, + 0x20, 0x20, 0x20, 0x20, 0x3c, 0x62, 0x75, 0x74, 0x74, 0x6f, 0x6e, 0x20, + 0x74, 0x79, 0x70, 0x65, 0x3d, 0x22, 0x62, 0x75, 0x74, 0x74, 0x6f, 0x6e, + 0x22, 0x20, 0x6f, 0x6e, 0x63, 0x6c, 0x69, 0x63, 0x6b, 0x3d, 0x24, 0x7b, + 0x63, 0x6f, 0x6e, 0x76, 0x65, 0x72, 0x74, 0x4a, 0x53, 0x4f, 0x4e, 0x53, + 0x63, 0x68, 0x65, 0x6d, 0x61, 0x47, 0x72, 0x61, 0x6d, 0x6d, 0x61, 0x72, + 0x7d, 0x3e, 0x43, 0x6f, 0x6e, 0x76, 0x65, 0x72, 0x74, 0x20, 0x4a, 0x53, + 0x4f, 0x4e, 0x20, 0x53, 0x63, 0x68, 0x65, 0x6d, 0x61, 0x3c, 0x2f, 0x62, 0x75, 0x74, 0x74, 0x6f, 0x6e, 0x3e, 0x0a, 0x20, 0x20, 0x20, 0x20, 0x20, - 0x20, 0x20, 0x20, 0x60, 0x0a, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x7d, - 0x3b, 0x0a, 0x0a, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x75, 0x73, 0x65, - 0x45, 0x66, 0x66, 0x65, 0x63, 0x74, 0x28, 0x28, 0x29, 0x20, 0x3d, 0x3e, - 0x20, 0x7b, 0x0a, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x2f, - 0x2f, 0x20, 0x61, 0x75, 0x74, 0x6f, 0x73, 0x61, 0x76, 0x65, 0x20, 0x74, - 0x65, 0x6d, 0x70, 0x6c, 0x61, 0x74, 0x65, 0x20, 0x6f, 0x6e, 0x20, 0x65, - 0x76, 0x65, 0x72, 0x79, 0x20, 0x63, 0x68, 0x61, 0x6e, 0x67, 0x65, 0x0a, - 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x75, 0x73, 0x65, 0x72, - 0x54, 0x65, 0x6d, 0x70, 0x6c, 0x61, 0x74, 0x65, 0x41, 0x75, 0x74, 0x6f, - 0x73, 0x61, 0x76, 0x65, 0x28, 0x29, 0x0a, 0x20, 0x20, 0x20, 0x20, 0x20, - 0x20, 0x7d, 0x2c, 0x20, 0x5b, 0x73, 0x65, 0x73, 0x73, 0x69, 0x6f, 0x6e, - 0x2e, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x2c, 0x20, 0x70, 0x61, 0x72, 0x61, - 0x6d, 0x73, 0x2e, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x5d, 0x29, 0x0a, 0x0a, - 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x63, 0x6f, 0x6e, 0x73, 0x74, 0x20, - 0x47, 0x72, 0x61, 0x6d, 0x6d, 0x61, 0x72, 0x43, 0x6f, 0x6e, 0x74, 0x72, - 0x6f, 0x6c, 0x20, 0x3d, 0x20, 0x28, 0x29, 0x20, 0x3d, 0x3e, 0x20, 0x28, - 0x0a, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x68, 0x74, 0x6d, - 0x6c, 0x60, 0x0a, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, + 0x20, 0x20, 0x20, 0x20, 0x20, 0x3c, 0x2f, 0x64, 0x69, 0x76, 0x3e, 0x0a, + 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x60, 0x0a, + 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x29, 0x3b, 0x0a, 0x0a, 0x20, 0x20, + 0x20, 0x20, 0x20, 0x20, 0x63, 0x6f, 0x6e, 0x73, 0x74, 0x20, 0x50, 0x72, + 0x6f, 0x6d, 0x70, 0x74, 0x43, 0x6f, 0x6e, 0x74, 0x72, 0x6f, 0x6c, 0x46, + 0x69, 0x65, 0x6c, 0x64, 0x53, 0x65, 0x74, 0x20, 0x3d, 0x20, 0x28, 0x29, + 0x20, 0x3d, 0x3e, 0x20, 0x28, 0x0a, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, + 0x20, 0x20, 0x68, 0x74, 0x6d, 0x6c, 0x60, 0x0a, 0x20, 0x20, 0x20, 0x20, + 0x20, 0x20, 0x20, 0x20, 0x3c, 0x66, 0x69, 0x65, 0x6c, 0x64, 0x73, 0x65, + 0x74, 0x3e, 0x0a, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x3c, 0x64, 0x69, 0x76, 0x3e, 0x0a, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x3c, 0x6c, 0x61, 0x62, 0x65, - 0x6c, 0x20, 0x66, 0x6f, 0x72, 0x3d, 0x22, 0x74, 0x65, 0x6d, 0x70, 0x6c, - 0x61, 0x74, 0x65, 0x22, 0x3e, 0x47, 0x72, 0x61, 0x6d, 0x6d, 0x61, 0x72, - 0x3c, 0x2f, 0x6c, 0x61, 0x62, 0x65, 0x6c, 0x3e, 0x0a, 0x20, 0x20, 0x20, - 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x3c, 0x74, 0x65, - 0x78, 0x74, 0x61, 0x72, 0x65, 0x61, 0x20, 0x69, 0x64, 0x3d, 0x22, 0x67, - 0x72, 0x61, 0x6d, 0x6d, 0x61, 0x72, 0x22, 0x20, 0x6e, 0x61, 0x6d, 0x65, - 0x3d, 0x22, 0x67, 0x72, 0x61, 0x6d, 0x6d, 0x61, 0x72, 0x22, 0x20, 0x70, - 0x6c, 0x61, 0x63, 0x65, 0x68, 0x6f, 0x6c, 0x64, 0x65, 0x72, 0x3d, 0x22, - 0x55, 0x73, 0x65, 0x20, 0x67, 0x62, 0x6e, 0x66, 0x20, 0x6f, 0x72, 0x20, - 0x4a, 0x53, 0x4f, 0x4e, 0x20, 0x53, 0x63, 0x68, 0x65, 0x6d, 0x61, 0x2b, - 0x63, 0x6f, 0x6e, 0x76, 0x65, 0x72, 0x74, 0x22, 0x20, 0x76, 0x61, 0x6c, - 0x75, 0x65, 0x3d, 0x22, 0x24, 0x7b, 0x70, 0x61, 0x72, 0x61, 0x6d, 0x73, - 0x2e, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x2e, 0x67, 0x72, 0x61, 0x6d, 0x6d, - 0x61, 0x72, 0x7d, 0x22, 0x20, 0x72, 0x6f, 0x77, 0x73, 0x3d, 0x34, 0x20, - 0x6f, 0x6e, 0x69, 0x6e, 0x70, 0x75, 0x74, 0x3d, 0x24, 0x7b, 0x75, 0x70, - 0x64, 0x61, 0x74, 0x65, 0x50, 0x61, 0x72, 0x61, 0x6d, 0x73, 0x7d, 0x2f, - 0x3e, 0x0a, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, - 0x20, 0x20, 0x3c, 0x69, 0x6e, 0x70, 0x75, 0x74, 0x20, 0x74, 0x79, 0x70, - 0x65, 0x3d, 0x22, 0x74, 0x65, 0x78, 0x74, 0x22, 0x20, 0x6e, 0x61, 0x6d, - 0x65, 0x3d, 0x22, 0x70, 0x72, 0x6f, 0x70, 0x2d, 0x6f, 0x72, 0x64, 0x65, - 0x72, 0x22, 0x20, 0x70, 0x6c, 0x61, 0x63, 0x65, 0x68, 0x6f, 0x6c, 0x64, - 0x65, 0x72, 0x3d, 0x22, 0x6f, 0x72, 0x64, 0x65, 0x72, 0x3a, 0x20, 0x70, - 0x72, 0x6f, 0x70, 0x31, 0x2c, 0x70, 0x72, 0x6f, 0x70, 0x32, 0x2c, 0x70, - 0x72, 0x6f, 0x70, 0x33, 0x22, 0x20, 0x6f, 0x6e, 0x69, 0x6e, 0x70, 0x75, - 0x74, 0x3d, 0x24, 0x7b, 0x75, 0x70, 0x64, 0x61, 0x74, 0x65, 0x47, 0x72, - 0x61, 0x6d, 0x6d, 0x61, 0x72, 0x4a, 0x73, 0x6f, 0x6e, 0x53, 0x63, 0x68, - 0x65, 0x6d, 0x61, 0x50, 0x72, 0x6f, 0x70, 0x4f, 0x72, 0x64, 0x65, 0x72, - 0x7d, 0x20, 0x2f, 0x3e, 0x0a, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, - 0x20, 0x20, 0x20, 0x20, 0x20, 0x3c, 0x62, 0x75, 0x74, 0x74, 0x6f, 0x6e, - 0x20, 0x74, 0x79, 0x70, 0x65, 0x3d, 0x22, 0x62, 0x75, 0x74, 0x74, 0x6f, - 0x6e, 0x22, 0x20, 0x6f, 0x6e, 0x63, 0x6c, 0x69, 0x63, 0x6b, 0x3d, 0x24, - 0x7b, 0x63, 0x6f, 0x6e, 0x76, 0x65, 0x72, 0x74, 0x4a, 0x53, 0x4f, 0x4e, - 0x53, 0x63, 0x68, 0x65, 0x6d, 0x61, 0x47, 0x72, 0x61, 0x6d, 0x6d, 0x61, - 0x72, 0x7d, 0x3e, 0x43, 0x6f, 0x6e, 0x76, 0x65, 0x72, 0x74, 0x20, 0x4a, - 0x53, 0x4f, 0x4e, 0x20, 0x53, 0x63, 0x68, 0x65, 0x6d, 0x61, 0x3c, 0x2f, - 0x62, 0x75, 0x74, 0x74, 0x6f, 0x6e, 0x3e, 0x0a, 0x20, 0x20, 0x20, 0x20, - 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x3c, 0x2f, 0x64, 0x69, 0x76, 0x3e, - 0x0a, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x60, - 0x0a, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x29, 0x3b, 0x0a, 0x0a, 0x20, - 0x20, 0x20, 0x20, 0x20, 0x20, 0x63, 0x6f, 0x6e, 0x73, 0x74, 0x20, 0x50, - 0x72, 0x6f, 0x6d, 0x70, 0x74, 0x43, 0x6f, 0x6e, 0x74, 0x72, 0x6f, 0x6c, - 0x46, 0x69, 0x65, 0x6c, 0x64, 0x53, 0x65, 0x74, 0x20, 0x3d, 0x20, 0x28, + 0x6c, 0x20, 0x68, 0x74, 0x6d, 0x6c, 0x46, 0x6f, 0x72, 0x3d, 0x22, 0x70, + 0x72, 0x6f, 0x6d, 0x70, 0x74, 0x22, 0x3e, 0x50, 0x72, 0x6f, 0x6d, 0x70, + 0x74, 0x3c, 0x2f, 0x6c, 0x61, 0x62, 0x65, 0x6c, 0x3e, 0x0a, 0x20, 0x20, + 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x3c, 0x74, + 0x65, 0x78, 0x74, 0x61, 0x72, 0x65, 0x61, 0x20, 0x74, 0x79, 0x70, 0x65, + 0x3d, 0x22, 0x74, 0x65, 0x78, 0x74, 0x22, 0x20, 0x6e, 0x61, 0x6d, 0x65, + 0x3d, 0x22, 0x70, 0x72, 0x6f, 0x6d, 0x70, 0x74, 0x22, 0x20, 0x76, 0x61, + 0x6c, 0x75, 0x65, 0x3d, 0x22, 0x24, 0x7b, 0x73, 0x65, 0x73, 0x73, 0x69, + 0x6f, 0x6e, 0x2e, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x2e, 0x70, 0x72, 0x6f, + 0x6d, 0x70, 0x74, 0x7d, 0x22, 0x20, 0x6f, 0x6e, 0x69, 0x6e, 0x70, 0x75, + 0x74, 0x3d, 0x24, 0x7b, 0x75, 0x70, 0x64, 0x61, 0x74, 0x65, 0x53, 0x65, + 0x73, 0x73, 0x69, 0x6f, 0x6e, 0x7d, 0x2f, 0x3e, 0x0a, 0x20, 0x20, 0x20, + 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x3c, 0x2f, 0x64, 0x69, 0x76, + 0x3e, 0x0a, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x3c, 0x2f, + 0x66, 0x69, 0x65, 0x6c, 0x64, 0x73, 0x65, 0x74, 0x3e, 0x0a, 0x20, 0x20, + 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x60, 0x0a, 0x20, 0x20, 0x20, 0x20, + 0x20, 0x20, 0x29, 0x3b, 0x0a, 0x0a, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, + 0x63, 0x6f, 0x6e, 0x73, 0x74, 0x20, 0x43, 0x68, 0x61, 0x74, 0x43, 0x6f, + 0x6e, 0x66, 0x69, 0x67, 0x46, 0x6f, 0x72, 0x6d, 0x20, 0x3d, 0x20, 0x28, 0x29, 0x20, 0x3d, 0x3e, 0x20, 0x28, 0x0a, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x68, 0x74, 0x6d, 0x6c, 0x60, 0x0a, 0x20, 0x20, 0x20, - 0x20, 0x20, 0x20, 0x20, 0x20, 0x3c, 0x66, 0x69, 0x65, 0x6c, 0x64, 0x73, - 0x65, 0x74, 0x3e, 0x0a, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, - 0x20, 0x20, 0x3c, 0x64, 0x69, 0x76, 0x3e, 0x0a, 0x20, 0x20, 0x20, 0x20, - 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x3c, 0x6c, 0x61, 0x62, - 0x65, 0x6c, 0x20, 0x68, 0x74, 0x6d, 0x6c, 0x46, 0x6f, 0x72, 0x3d, 0x22, - 0x70, 0x72, 0x6f, 0x6d, 0x70, 0x74, 0x22, 0x3e, 0x50, 0x72, 0x6f, 0x6d, - 0x70, 0x74, 0x3c, 0x2f, 0x6c, 0x61, 0x62, 0x65, 0x6c, 0x3e, 0x0a, 0x20, - 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x3c, - 0x74, 0x65, 0x78, 0x74, 0x61, 0x72, 0x65, 0x61, 0x20, 0x74, 0x79, 0x70, - 0x65, 0x3d, 0x22, 0x74, 0x65, 0x78, 0x74, 0x22, 0x20, 0x6e, 0x61, 0x6d, - 0x65, 0x3d, 0x22, 0x70, 0x72, 0x6f, 0x6d, 0x70, 0x74, 0x22, 0x20, 0x76, - 0x61, 0x6c, 0x75, 0x65, 0x3d, 0x22, 0x24, 0x7b, 0x73, 0x65, 0x73, 0x73, - 0x69, 0x6f, 0x6e, 0x2e, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x2e, 0x70, 0x72, - 0x6f, 0x6d, 0x70, 0x74, 0x7d, 0x22, 0x20, 0x6f, 0x6e, 0x69, 0x6e, 0x70, + 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x24, 0x7b, 0x50, 0x72, 0x6f, + 0x6d, 0x70, 0x74, 0x43, 0x6f, 0x6e, 0x74, 0x72, 0x6f, 0x6c, 0x46, 0x69, + 0x65, 0x6c, 0x64, 0x53, 0x65, 0x74, 0x28, 0x29, 0x7d, 0x0a, 0x0a, 0x20, + 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x3c, 0x66, 0x69, + 0x65, 0x6c, 0x64, 0x73, 0x65, 0x74, 0x20, 0x63, 0x6c, 0x61, 0x73, 0x73, + 0x3d, 0x22, 0x74, 0x77, 0x6f, 0x22, 0x3e, 0x0a, 0x20, 0x20, 0x20, 0x20, + 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x3c, 0x64, 0x69, 0x76, + 0x3e, 0x0a, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, + 0x20, 0x20, 0x20, 0x20, 0x3c, 0x6c, 0x61, 0x62, 0x65, 0x6c, 0x20, 0x66, + 0x6f, 0x72, 0x3d, 0x22, 0x75, 0x73, 0x65, 0x72, 0x22, 0x3e, 0x55, 0x73, + 0x65, 0x72, 0x20, 0x6e, 0x61, 0x6d, 0x65, 0x3c, 0x2f, 0x6c, 0x61, 0x62, + 0x65, 0x6c, 0x3e, 0x0a, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, + 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x3c, 0x69, 0x6e, 0x70, 0x75, 0x74, + 0x20, 0x74, 0x79, 0x70, 0x65, 0x3d, 0x22, 0x74, 0x65, 0x78, 0x74, 0x22, + 0x20, 0x6e, 0x61, 0x6d, 0x65, 0x3d, 0x22, 0x75, 0x73, 0x65, 0x72, 0x22, + 0x20, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x3d, 0x22, 0x24, 0x7b, 0x73, 0x65, + 0x73, 0x73, 0x69, 0x6f, 0x6e, 0x2e, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x2e, + 0x75, 0x73, 0x65, 0x72, 0x7d, 0x22, 0x20, 0x6f, 0x6e, 0x69, 0x6e, 0x70, 0x75, 0x74, 0x3d, 0x24, 0x7b, 0x75, 0x70, 0x64, 0x61, 0x74, 0x65, 0x53, - 0x65, 0x73, 0x73, 0x69, 0x6f, 0x6e, 0x7d, 0x2f, 0x3e, 0x0a, 0x20, 0x20, + 0x65, 0x73, 0x73, 0x69, 0x6f, 0x6e, 0x7d, 0x20, 0x2f, 0x3e, 0x0a, 0x20, + 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x3c, + 0x2f, 0x64, 0x69, 0x76, 0x3e, 0x0a, 0x0a, 0x20, 0x20, 0x20, 0x20, 0x20, + 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x3c, 0x64, 0x69, 0x76, 0x3e, + 0x0a, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, + 0x20, 0x20, 0x20, 0x3c, 0x6c, 0x61, 0x62, 0x65, 0x6c, 0x20, 0x66, 0x6f, + 0x72, 0x3d, 0x22, 0x62, 0x6f, 0x74, 0x22, 0x3e, 0x42, 0x6f, 0x74, 0x20, + 0x6e, 0x61, 0x6d, 0x65, 0x3c, 0x2f, 0x6c, 0x61, 0x62, 0x65, 0x6c, 0x3e, + 0x0a, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, + 0x20, 0x20, 0x20, 0x3c, 0x69, 0x6e, 0x70, 0x75, 0x74, 0x20, 0x74, 0x79, + 0x70, 0x65, 0x3d, 0x22, 0x74, 0x65, 0x78, 0x74, 0x22, 0x20, 0x6e, 0x61, + 0x6d, 0x65, 0x3d, 0x22, 0x63, 0x68, 0x61, 0x72, 0x22, 0x20, 0x76, 0x61, + 0x6c, 0x75, 0x65, 0x3d, 0x22, 0x24, 0x7b, 0x73, 0x65, 0x73, 0x73, 0x69, + 0x6f, 0x6e, 0x2e, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x2e, 0x63, 0x68, 0x61, + 0x72, 0x7d, 0x22, 0x20, 0x6f, 0x6e, 0x69, 0x6e, 0x70, 0x75, 0x74, 0x3d, + 0x24, 0x7b, 0x75, 0x70, 0x64, 0x61, 0x74, 0x65, 0x53, 0x65, 0x73, 0x73, + 0x69, 0x6f, 0x6e, 0x7d, 0x20, 0x2f, 0x3e, 0x0a, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x3c, 0x2f, 0x64, 0x69, - 0x76, 0x3e, 0x0a, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x3c, - 0x2f, 0x66, 0x69, 0x65, 0x6c, 0x64, 0x73, 0x65, 0x74, 0x3e, 0x0a, 0x20, - 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x60, 0x0a, 0x20, 0x20, 0x20, - 0x20, 0x20, 0x20, 0x29, 0x3b, 0x0a, 0x0a, 0x20, 0x20, 0x20, 0x20, 0x20, - 0x20, 0x63, 0x6f, 0x6e, 0x73, 0x74, 0x20, 0x43, 0x68, 0x61, 0x74, 0x43, - 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x46, 0x6f, 0x72, 0x6d, 0x20, 0x3d, 0x20, - 0x28, 0x29, 0x20, 0x3d, 0x3e, 0x20, 0x28, 0x0a, 0x20, 0x20, 0x20, 0x20, - 0x20, 0x20, 0x20, 0x20, 0x68, 0x74, 0x6d, 0x6c, 0x60, 0x0a, 0x20, 0x20, - 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x24, 0x7b, 0x50, 0x72, - 0x6f, 0x6d, 0x70, 0x74, 0x43, 0x6f, 0x6e, 0x74, 0x72, 0x6f, 0x6c, 0x46, - 0x69, 0x65, 0x6c, 0x64, 0x53, 0x65, 0x74, 0x28, 0x29, 0x7d, 0x0a, 0x0a, - 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x3c, 0x66, - 0x69, 0x65, 0x6c, 0x64, 0x73, 0x65, 0x74, 0x20, 0x63, 0x6c, 0x61, 0x73, - 0x73, 0x3d, 0x22, 0x74, 0x77, 0x6f, 0x22, 0x3e, 0x0a, 0x20, 0x20, 0x20, - 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x3c, 0x64, 0x69, 0x76, 0x3e, 0x0a, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, - 0x20, 0x20, 0x20, 0x20, 0x20, 0x3c, 0x6c, 0x61, 0x62, 0x65, 0x6c, 0x20, - 0x66, 0x6f, 0x72, 0x3d, 0x22, 0x75, 0x73, 0x65, 0x72, 0x22, 0x3e, 0x55, - 0x73, 0x65, 0x72, 0x20, 0x6e, 0x61, 0x6d, 0x65, 0x3c, 0x2f, 0x6c, 0x61, + 0x20, 0x3c, 0x2f, 0x66, 0x69, 0x65, 0x6c, 0x64, 0x73, 0x65, 0x74, 0x3e, + 0x0a, 0x0a, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, + 0x3c, 0x66, 0x69, 0x65, 0x6c, 0x64, 0x73, 0x65, 0x74, 0x3e, 0x0a, 0x20, + 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x3c, + 0x64, 0x69, 0x76, 0x3e, 0x0a, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, + 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x3c, 0x6c, 0x61, 0x62, 0x65, + 0x6c, 0x20, 0x66, 0x6f, 0x72, 0x3d, 0x22, 0x74, 0x65, 0x6d, 0x70, 0x6c, + 0x61, 0x74, 0x65, 0x22, 0x3e, 0x50, 0x72, 0x6f, 0x6d, 0x70, 0x74, 0x20, + 0x74, 0x65, 0x6d, 0x70, 0x6c, 0x61, 0x74, 0x65, 0x3c, 0x2f, 0x6c, 0x61, 0x62, 0x65, 0x6c, 0x3e, 0x0a, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, - 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x3c, 0x69, 0x6e, 0x70, 0x75, - 0x74, 0x20, 0x74, 0x79, 0x70, 0x65, 0x3d, 0x22, 0x74, 0x65, 0x78, 0x74, - 0x22, 0x20, 0x6e, 0x61, 0x6d, 0x65, 0x3d, 0x22, 0x75, 0x73, 0x65, 0x72, - 0x22, 0x20, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x3d, 0x22, 0x24, 0x7b, 0x73, - 0x65, 0x73, 0x73, 0x69, 0x6f, 0x6e, 0x2e, 0x76, 0x61, 0x6c, 0x75, 0x65, - 0x2e, 0x75, 0x73, 0x65, 0x72, 0x7d, 0x22, 0x20, 0x6f, 0x6e, 0x69, 0x6e, - 0x70, 0x75, 0x74, 0x3d, 0x24, 0x7b, 0x75, 0x70, 0x64, 0x61, 0x74, 0x65, - 0x53, 0x65, 0x73, 0x73, 0x69, 0x6f, 0x6e, 0x7d, 0x20, 0x2f, 0x3e, 0x0a, - 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, - 0x3c, 0x2f, 0x64, 0x69, 0x76, 0x3e, 0x0a, 0x0a, 0x20, 0x20, 0x20, 0x20, - 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x3c, 0x64, 0x69, 0x76, - 0x3e, 0x0a, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, - 0x20, 0x20, 0x20, 0x20, 0x3c, 0x6c, 0x61, 0x62, 0x65, 0x6c, 0x20, 0x66, - 0x6f, 0x72, 0x3d, 0x22, 0x62, 0x6f, 0x74, 0x22, 0x3e, 0x42, 0x6f, 0x74, - 0x20, 0x6e, 0x61, 0x6d, 0x65, 0x3c, 0x2f, 0x6c, 0x61, 0x62, 0x65, 0x6c, - 0x3e, 0x0a, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, - 0x20, 0x20, 0x20, 0x20, 0x3c, 0x69, 0x6e, 0x70, 0x75, 0x74, 0x20, 0x74, - 0x79, 0x70, 0x65, 0x3d, 0x22, 0x74, 0x65, 0x78, 0x74, 0x22, 0x20, 0x6e, - 0x61, 0x6d, 0x65, 0x3d, 0x22, 0x63, 0x68, 0x61, 0x72, 0x22, 0x20, 0x76, + 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x3c, 0x74, 0x65, 0x78, 0x74, + 0x61, 0x72, 0x65, 0x61, 0x20, 0x69, 0x64, 0x3d, 0x22, 0x74, 0x65, 0x6d, + 0x70, 0x6c, 0x61, 0x74, 0x65, 0x22, 0x20, 0x6e, 0x61, 0x6d, 0x65, 0x3d, + 0x22, 0x74, 0x65, 0x6d, 0x70, 0x6c, 0x61, 0x74, 0x65, 0x22, 0x20, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x3d, 0x22, 0x24, 0x7b, 0x73, 0x65, 0x73, 0x73, - 0x69, 0x6f, 0x6e, 0x2e, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x2e, 0x63, 0x68, - 0x61, 0x72, 0x7d, 0x22, 0x20, 0x6f, 0x6e, 0x69, 0x6e, 0x70, 0x75, 0x74, - 0x3d, 0x24, 0x7b, 0x75, 0x70, 0x64, 0x61, 0x74, 0x65, 0x53, 0x65, 0x73, - 0x73, 0x69, 0x6f, 0x6e, 0x7d, 0x20, 0x2f, 0x3e, 0x0a, 0x20, 0x20, 0x20, - 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x3c, 0x2f, 0x64, - 0x69, 0x76, 0x3e, 0x0a, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, - 0x20, 0x20, 0x3c, 0x2f, 0x66, 0x69, 0x65, 0x6c, 0x64, 0x73, 0x65, 0x74, + 0x69, 0x6f, 0x6e, 0x2e, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x2e, 0x74, 0x65, + 0x6d, 0x70, 0x6c, 0x61, 0x74, 0x65, 0x7d, 0x22, 0x20, 0x72, 0x6f, 0x77, + 0x73, 0x3d, 0x34, 0x20, 0x6f, 0x6e, 0x69, 0x6e, 0x70, 0x75, 0x74, 0x3d, + 0x24, 0x7b, 0x75, 0x70, 0x64, 0x61, 0x74, 0x65, 0x53, 0x65, 0x73, 0x73, + 0x69, 0x6f, 0x6e, 0x7d, 0x2f, 0x3e, 0x0a, 0x20, 0x20, 0x20, 0x20, 0x20, + 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x3c, 0x2f, 0x64, 0x69, 0x76, 0x3e, 0x0a, 0x0a, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, - 0x20, 0x3c, 0x66, 0x69, 0x65, 0x6c, 0x64, 0x73, 0x65, 0x74, 0x3e, 0x0a, + 0x20, 0x20, 0x20, 0x3c, 0x64, 0x69, 0x76, 0x3e, 0x0a, 0x20, 0x20, 0x20, + 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x3c, + 0x6c, 0x61, 0x62, 0x65, 0x6c, 0x20, 0x66, 0x6f, 0x72, 0x3d, 0x22, 0x74, + 0x65, 0x6d, 0x70, 0x6c, 0x61, 0x74, 0x65, 0x22, 0x3e, 0x43, 0x68, 0x61, + 0x74, 0x20, 0x68, 0x69, 0x73, 0x74, 0x6f, 0x72, 0x79, 0x20, 0x74, 0x65, + 0x6d, 0x70, 0x6c, 0x61, 0x74, 0x65, 0x3c, 0x2f, 0x6c, 0x61, 0x62, 0x65, + 0x6c, 0x3e, 0x0a, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, + 0x20, 0x20, 0x20, 0x20, 0x20, 0x3c, 0x74, 0x65, 0x78, 0x74, 0x61, 0x72, + 0x65, 0x61, 0x20, 0x69, 0x64, 0x3d, 0x22, 0x74, 0x65, 0x6d, 0x70, 0x6c, + 0x61, 0x74, 0x65, 0x22, 0x20, 0x6e, 0x61, 0x6d, 0x65, 0x3d, 0x22, 0x68, + 0x69, 0x73, 0x74, 0x6f, 0x72, 0x79, 0x54, 0x65, 0x6d, 0x70, 0x6c, 0x61, + 0x74, 0x65, 0x22, 0x20, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x3d, 0x22, 0x24, + 0x7b, 0x73, 0x65, 0x73, 0x73, 0x69, 0x6f, 0x6e, 0x2e, 0x76, 0x61, 0x6c, + 0x75, 0x65, 0x2e, 0x68, 0x69, 0x73, 0x74, 0x6f, 0x72, 0x79, 0x54, 0x65, + 0x6d, 0x70, 0x6c, 0x61, 0x74, 0x65, 0x7d, 0x22, 0x20, 0x72, 0x6f, 0x77, + 0x73, 0x3d, 0x31, 0x20, 0x6f, 0x6e, 0x69, 0x6e, 0x70, 0x75, 0x74, 0x3d, + 0x24, 0x7b, 0x75, 0x70, 0x64, 0x61, 0x74, 0x65, 0x53, 0x65, 0x73, 0x73, + 0x69, 0x6f, 0x6e, 0x7d, 0x2f, 0x3e, 0x0a, 0x20, 0x20, 0x20, 0x20, 0x20, + 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x3c, 0x2f, 0x64, 0x69, 0x76, + 0x3e, 0x0a, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, + 0x20, 0x20, 0x24, 0x7b, 0x47, 0x72, 0x61, 0x6d, 0x6d, 0x61, 0x72, 0x43, + 0x6f, 0x6e, 0x74, 0x72, 0x6f, 0x6c, 0x28, 0x29, 0x7d, 0x0a, 0x20, 0x20, + 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x3c, 0x2f, 0x66, 0x69, + 0x65, 0x6c, 0x64, 0x73, 0x65, 0x74, 0x3e, 0x0a, 0x20, 0x20, 0x20, 0x20, + 0x20, 0x20, 0x60, 0x0a, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x29, 0x3b, + 0x0a, 0x0a, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x63, 0x6f, 0x6e, 0x73, + 0x74, 0x20, 0x43, 0x6f, 0x6d, 0x70, 0x6c, 0x65, 0x74, 0x69, 0x6f, 0x6e, + 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x46, 0x6f, 0x72, 0x6d, 0x20, 0x3d, + 0x20, 0x28, 0x29, 0x20, 0x3d, 0x3e, 0x20, 0x28, 0x0a, 0x20, 0x20, 0x20, + 0x20, 0x20, 0x20, 0x20, 0x20, 0x68, 0x74, 0x6d, 0x6c, 0x60, 0x0a, 0x20, + 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x24, 0x7b, 0x50, + 0x72, 0x6f, 0x6d, 0x70, 0x74, 0x43, 0x6f, 0x6e, 0x74, 0x72, 0x6f, 0x6c, + 0x46, 0x69, 0x65, 0x6c, 0x64, 0x53, 0x65, 0x74, 0x28, 0x29, 0x7d, 0x0a, + 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x3c, 0x66, + 0x69, 0x65, 0x6c, 0x64, 0x73, 0x65, 0x74, 0x3e, 0x24, 0x7b, 0x47, 0x72, + 0x61, 0x6d, 0x6d, 0x61, 0x72, 0x43, 0x6f, 0x6e, 0x74, 0x72, 0x6f, 0x6c, + 0x28, 0x29, 0x7d, 0x3c, 0x2f, 0x66, 0x69, 0x65, 0x6c, 0x64, 0x73, 0x65, + 0x74, 0x3e, 0x0a, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x60, + 0x0a, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x29, 0x3b, 0x0a, 0x0a, 0x20, + 0x20, 0x20, 0x20, 0x20, 0x20, 0x72, 0x65, 0x74, 0x75, 0x72, 0x6e, 0x20, + 0x68, 0x74, 0x6d, 0x6c, 0x60, 0x0a, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, + 0x20, 0x20, 0x3c, 0x66, 0x6f, 0x72, 0x6d, 0x3e, 0x0a, 0x20, 0x20, 0x20, + 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x3c, 0x66, 0x69, 0x65, 0x6c, + 0x64, 0x73, 0x65, 0x74, 0x20, 0x63, 0x6c, 0x61, 0x73, 0x73, 0x3d, 0x22, + 0x74, 0x77, 0x6f, 0x22, 0x3e, 0x0a, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, + 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x3c, 0x24, 0x7b, 0x55, 0x73, 0x65, + 0x72, 0x54, 0x65, 0x6d, 0x70, 0x6c, 0x61, 0x74, 0x65, 0x52, 0x65, 0x73, + 0x65, 0x74, 0x42, 0x75, 0x74, 0x74, 0x6f, 0x6e, 0x7d, 0x2f, 0x3e, 0x0a, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x3c, 0x64, 0x69, 0x76, 0x3e, 0x0a, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x3c, 0x6c, 0x61, 0x62, - 0x65, 0x6c, 0x20, 0x66, 0x6f, 0x72, 0x3d, 0x22, 0x74, 0x65, 0x6d, 0x70, - 0x6c, 0x61, 0x74, 0x65, 0x22, 0x3e, 0x50, 0x72, 0x6f, 0x6d, 0x70, 0x74, - 0x20, 0x74, 0x65, 0x6d, 0x70, 0x6c, 0x61, 0x74, 0x65, 0x3c, 0x2f, 0x6c, - 0x61, 0x62, 0x65, 0x6c, 0x3e, 0x0a, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, - 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x3c, 0x74, 0x65, 0x78, - 0x74, 0x61, 0x72, 0x65, 0x61, 0x20, 0x69, 0x64, 0x3d, 0x22, 0x74, 0x65, - 0x6d, 0x70, 0x6c, 0x61, 0x74, 0x65, 0x22, 0x20, 0x6e, 0x61, 0x6d, 0x65, - 0x3d, 0x22, 0x74, 0x65, 0x6d, 0x70, 0x6c, 0x61, 0x74, 0x65, 0x22, 0x20, - 0x76, 0x61, 0x6c, 0x75, 0x65, 0x3d, 0x22, 0x24, 0x7b, 0x73, 0x65, 0x73, - 0x73, 0x69, 0x6f, 0x6e, 0x2e, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x2e, 0x74, - 0x65, 0x6d, 0x70, 0x6c, 0x61, 0x74, 0x65, 0x7d, 0x22, 0x20, 0x72, 0x6f, - 0x77, 0x73, 0x3d, 0x34, 0x20, 0x6f, 0x6e, 0x69, 0x6e, 0x70, 0x75, 0x74, - 0x3d, 0x24, 0x7b, 0x75, 0x70, 0x64, 0x61, 0x74, 0x65, 0x53, 0x65, 0x73, - 0x73, 0x69, 0x6f, 0x6e, 0x7d, 0x2f, 0x3e, 0x0a, 0x20, 0x20, 0x20, 0x20, - 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x3c, 0x2f, 0x64, 0x69, - 0x76, 0x3e, 0x0a, 0x0a, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, - 0x20, 0x20, 0x20, 0x20, 0x3c, 0x64, 0x69, 0x76, 0x3e, 0x0a, 0x20, 0x20, - 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, - 0x3c, 0x6c, 0x61, 0x62, 0x65, 0x6c, 0x20, 0x66, 0x6f, 0x72, 0x3d, 0x22, - 0x74, 0x65, 0x6d, 0x70, 0x6c, 0x61, 0x74, 0x65, 0x22, 0x3e, 0x43, 0x68, - 0x61, 0x74, 0x20, 0x68, 0x69, 0x73, 0x74, 0x6f, 0x72, 0x79, 0x20, 0x74, - 0x65, 0x6d, 0x70, 0x6c, 0x61, 0x74, 0x65, 0x3c, 0x2f, 0x6c, 0x61, 0x62, - 0x65, 0x6c, 0x3e, 0x0a, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, - 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x3c, 0x74, 0x65, 0x78, 0x74, 0x61, - 0x72, 0x65, 0x61, 0x20, 0x69, 0x64, 0x3d, 0x22, 0x74, 0x65, 0x6d, 0x70, - 0x6c, 0x61, 0x74, 0x65, 0x22, 0x20, 0x6e, 0x61, 0x6d, 0x65, 0x3d, 0x22, - 0x68, 0x69, 0x73, 0x74, 0x6f, 0x72, 0x79, 0x54, 0x65, 0x6d, 0x70, 0x6c, - 0x61, 0x74, 0x65, 0x22, 0x20, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x3d, 0x22, - 0x24, 0x7b, 0x73, 0x65, 0x73, 0x73, 0x69, 0x6f, 0x6e, 0x2e, 0x76, 0x61, - 0x6c, 0x75, 0x65, 0x2e, 0x68, 0x69, 0x73, 0x74, 0x6f, 0x72, 0x79, 0x54, - 0x65, 0x6d, 0x70, 0x6c, 0x61, 0x74, 0x65, 0x7d, 0x22, 0x20, 0x72, 0x6f, - 0x77, 0x73, 0x3d, 0x31, 0x20, 0x6f, 0x6e, 0x69, 0x6e, 0x70, 0x75, 0x74, - 0x3d, 0x24, 0x7b, 0x75, 0x70, 0x64, 0x61, 0x74, 0x65, 0x53, 0x65, 0x73, - 0x73, 0x69, 0x6f, 0x6e, 0x7d, 0x2f, 0x3e, 0x0a, 0x20, 0x20, 0x20, 0x20, - 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x3c, 0x2f, 0x64, 0x69, - 0x76, 0x3e, 0x0a, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, - 0x20, 0x20, 0x20, 0x24, 0x7b, 0x47, 0x72, 0x61, 0x6d, 0x6d, 0x61, 0x72, - 0x43, 0x6f, 0x6e, 0x74, 0x72, 0x6f, 0x6c, 0x28, 0x29, 0x7d, 0x0a, 0x20, - 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x3c, 0x2f, 0x66, - 0x69, 0x65, 0x6c, 0x64, 0x73, 0x65, 0x74, 0x3e, 0x0a, 0x20, 0x20, 0x20, - 0x20, 0x20, 0x20, 0x60, 0x0a, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x29, - 0x3b, 0x0a, 0x0a, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x63, 0x6f, 0x6e, - 0x73, 0x74, 0x20, 0x43, 0x6f, 0x6d, 0x70, 0x6c, 0x65, 0x74, 0x69, 0x6f, - 0x6e, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x46, 0x6f, 0x72, 0x6d, 0x20, - 0x3d, 0x20, 0x28, 0x29, 0x20, 0x3d, 0x3e, 0x20, 0x28, 0x0a, 0x20, 0x20, - 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x68, 0x74, 0x6d, 0x6c, 0x60, 0x0a, - 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x24, 0x7b, - 0x50, 0x72, 0x6f, 0x6d, 0x70, 0x74, 0x43, 0x6f, 0x6e, 0x74, 0x72, 0x6f, - 0x6c, 0x46, 0x69, 0x65, 0x6c, 0x64, 0x53, 0x65, 0x74, 0x28, 0x29, 0x7d, - 0x0a, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x3c, - 0x66, 0x69, 0x65, 0x6c, 0x64, 0x73, 0x65, 0x74, 0x3e, 0x24, 0x7b, 0x47, - 0x72, 0x61, 0x6d, 0x6d, 0x61, 0x72, 0x43, 0x6f, 0x6e, 0x74, 0x72, 0x6f, - 0x6c, 0x28, 0x29, 0x7d, 0x3c, 0x2f, 0x66, 0x69, 0x65, 0x6c, 0x64, 0x73, - 0x65, 0x74, 0x3e, 0x0a, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, - 0x60, 0x0a, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x29, 0x3b, 0x0a, 0x0a, - 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x72, 0x65, 0x74, 0x75, 0x72, 0x6e, - 0x20, 0x68, 0x74, 0x6d, 0x6c, 0x60, 0x0a, 0x20, 0x20, 0x20, 0x20, 0x20, - 0x20, 0x20, 0x20, 0x3c, 0x66, 0x6f, 0x72, 0x6d, 0x3e, 0x0a, 0x20, 0x20, - 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x3c, 0x66, 0x69, 0x65, - 0x6c, 0x64, 0x73, 0x65, 0x74, 0x20, 0x63, 0x6c, 0x61, 0x73, 0x73, 0x3d, - 0x22, 0x74, 0x77, 0x6f, 0x22, 0x3e, 0x0a, 0x20, 0x20, 0x20, 0x20, 0x20, - 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x3c, 0x24, 0x7b, 0x55, 0x73, - 0x65, 0x72, 0x54, 0x65, 0x6d, 0x70, 0x6c, 0x61, 0x74, 0x65, 0x52, 0x65, - 0x73, 0x65, 0x74, 0x42, 0x75, 0x74, 0x74, 0x6f, 0x6e, 0x7d, 0x2f, 0x3e, - 0x0a, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, - 0x20, 0x3c, 0x64, 0x69, 0x76, 0x3e, 0x0a, 0x20, 0x20, 0x20, 0x20, 0x20, - 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x3c, 0x6c, 0x61, - 0x62, 0x65, 0x6c, 0x20, 0x63, 0x6c, 0x61, 0x73, 0x73, 0x3d, 0x22, 0x73, - 0x6c, 0x69, 0x6d, 0x22, 0x3e, 0x3c, 0x69, 0x6e, 0x70, 0x75, 0x74, 0x20, - 0x74, 0x79, 0x70, 0x65, 0x3d, 0x22, 0x72, 0x61, 0x64, 0x69, 0x6f, 0x22, - 0x20, 0x6e, 0x61, 0x6d, 0x65, 0x3d, 0x22, 0x74, 0x79, 0x70, 0x65, 0x22, - 0x20, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x3d, 0x22, 0x63, 0x68, 0x61, 0x74, - 0x22, 0x20, 0x63, 0x68, 0x65, 0x63, 0x6b, 0x65, 0x64, 0x3d, 0x24, 0x7b, - 0x73, 0x65, 0x73, 0x73, 0x69, 0x6f, 0x6e, 0x2e, 0x76, 0x61, 0x6c, 0x75, - 0x65, 0x2e, 0x74, 0x79, 0x70, 0x65, 0x20, 0x3d, 0x3d, 0x3d, 0x20, 0x22, - 0x63, 0x68, 0x61, 0x74, 0x22, 0x7d, 0x20, 0x6f, 0x6e, 0x69, 0x6e, 0x70, - 0x75, 0x74, 0x3d, 0x24, 0x7b, 0x75, 0x70, 0x64, 0x61, 0x74, 0x65, 0x53, - 0x65, 0x73, 0x73, 0x69, 0x6f, 0x6e, 0x7d, 0x20, 0x2f, 0x3e, 0x20, 0x43, - 0x68, 0x61, 0x74, 0x3c, 0x2f, 0x6c, 0x61, 0x62, 0x65, 0x6c, 0x3e, 0x0a, - 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, - 0x20, 0x20, 0x3c, 0x6c, 0x61, 0x62, 0x65, 0x6c, 0x20, 0x63, 0x6c, 0x61, - 0x73, 0x73, 0x3d, 0x22, 0x73, 0x6c, 0x69, 0x6d, 0x22, 0x3e, 0x3c, 0x69, - 0x6e, 0x70, 0x75, 0x74, 0x20, 0x74, 0x79, 0x70, 0x65, 0x3d, 0x22, 0x72, - 0x61, 0x64, 0x69, 0x6f, 0x22, 0x20, 0x6e, 0x61, 0x6d, 0x65, 0x3d, 0x22, - 0x74, 0x79, 0x70, 0x65, 0x22, 0x20, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x3d, - 0x22, 0x63, 0x6f, 0x6d, 0x70, 0x6c, 0x65, 0x74, 0x69, 0x6f, 0x6e, 0x22, + 0x65, 0x6c, 0x20, 0x63, 0x6c, 0x61, 0x73, 0x73, 0x3d, 0x22, 0x73, 0x6c, + 0x69, 0x6d, 0x22, 0x3e, 0x3c, 0x69, 0x6e, 0x70, 0x75, 0x74, 0x20, 0x74, + 0x79, 0x70, 0x65, 0x3d, 0x22, 0x72, 0x61, 0x64, 0x69, 0x6f, 0x22, 0x20, + 0x6e, 0x61, 0x6d, 0x65, 0x3d, 0x22, 0x74, 0x79, 0x70, 0x65, 0x22, 0x20, + 0x76, 0x61, 0x6c, 0x75, 0x65, 0x3d, 0x22, 0x63, 0x68, 0x61, 0x74, 0x22, 0x20, 0x63, 0x68, 0x65, 0x63, 0x6b, 0x65, 0x64, 0x3d, 0x24, 0x7b, 0x73, 0x65, 0x73, 0x73, 0x69, 0x6f, 0x6e, 0x2e, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x2e, 0x74, 0x79, 0x70, 0x65, 0x20, 0x3d, 0x3d, 0x3d, 0x20, 0x22, 0x63, - 0x6f, 0x6d, 0x70, 0x6c, 0x65, 0x74, 0x69, 0x6f, 0x6e, 0x22, 0x7d, 0x20, - 0x6f, 0x6e, 0x69, 0x6e, 0x70, 0x75, 0x74, 0x3d, 0x24, 0x7b, 0x75, 0x70, - 0x64, 0x61, 0x74, 0x65, 0x53, 0x65, 0x73, 0x73, 0x69, 0x6f, 0x6e, 0x7d, - 0x20, 0x2f, 0x3e, 0x20, 0x43, 0x6f, 0x6d, 0x70, 0x6c, 0x65, 0x74, 0x69, - 0x6f, 0x6e, 0x3c, 0x2f, 0x6c, 0x61, 0x62, 0x65, 0x6c, 0x3e, 0x0a, 0x20, - 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x3c, - 0x2f, 0x64, 0x69, 0x76, 0x3e, 0x0a, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, - 0x20, 0x20, 0x20, 0x20, 0x3c, 0x2f, 0x66, 0x69, 0x65, 0x6c, 0x64, 0x73, - 0x65, 0x74, 0x3e, 0x0a, 0x0a, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, - 0x20, 0x20, 0x20, 0x24, 0x7b, 0x73, 0x65, 0x73, 0x73, 0x69, 0x6f, 0x6e, - 0x2e, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x2e, 0x74, 0x79, 0x70, 0x65, 0x20, - 0x3d, 0x3d, 0x3d, 0x20, 0x27, 0x63, 0x68, 0x61, 0x74, 0x27, 0x20, 0x3f, - 0x20, 0x43, 0x68, 0x61, 0x74, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x46, - 0x6f, 0x72, 0x6d, 0x28, 0x29, 0x20, 0x3a, 0x20, 0x43, 0x6f, 0x6d, 0x70, - 0x6c, 0x65, 0x74, 0x69, 0x6f, 0x6e, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, - 0x46, 0x6f, 0x72, 0x6d, 0x28, 0x29, 0x7d, 0x0a, 0x0a, 0x20, 0x20, 0x20, - 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x3c, 0x66, 0x69, 0x65, 0x6c, - 0x64, 0x73, 0x65, 0x74, 0x20, 0x63, 0x6c, 0x61, 0x73, 0x73, 0x3d, 0x22, - 0x74, 0x77, 0x6f, 0x22, 0x3e, 0x0a, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, - 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x24, 0x7b, 0x49, 0x6e, 0x74, 0x46, - 0x69, 0x65, 0x6c, 0x64, 0x28, 0x7b, 0x20, 0x6c, 0x61, 0x62, 0x65, 0x6c, - 0x3a, 0x20, 0x22, 0x50, 0x72, 0x65, 0x64, 0x69, 0x63, 0x74, 0x69, 0x6f, - 0x6e, 0x73, 0x22, 0x2c, 0x20, 0x6d, 0x61, 0x78, 0x3a, 0x20, 0x32, 0x30, - 0x34, 0x38, 0x2c, 0x20, 0x6d, 0x69, 0x6e, 0x3a, 0x20, 0x2d, 0x31, 0x2c, - 0x20, 0x6e, 0x61, 0x6d, 0x65, 0x3a, 0x20, 0x22, 0x6e, 0x5f, 0x70, 0x72, - 0x65, 0x64, 0x69, 0x63, 0x74, 0x22, 0x2c, 0x20, 0x76, 0x61, 0x6c, 0x75, - 0x65, 0x3a, 0x20, 0x70, 0x61, 0x72, 0x61, 0x6d, 0x73, 0x2e, 0x76, 0x61, - 0x6c, 0x75, 0x65, 0x2e, 0x6e, 0x5f, 0x70, 0x72, 0x65, 0x64, 0x69, 0x63, - 0x74, 0x20, 0x7d, 0x29, 0x7d, 0x0a, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, - 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x24, 0x7b, 0x46, 0x6c, 0x6f, 0x61, - 0x74, 0x46, 0x69, 0x65, 0x6c, 0x64, 0x28, 0x7b, 0x20, 0x6c, 0x61, 0x62, - 0x65, 0x6c, 0x3a, 0x20, 0x22, 0x54, 0x65, 0x6d, 0x70, 0x65, 0x72, 0x61, - 0x74, 0x75, 0x72, 0x65, 0x22, 0x2c, 0x20, 0x6d, 0x61, 0x78, 0x3a, 0x20, - 0x31, 0x2e, 0x35, 0x2c, 0x20, 0x6d, 0x69, 0x6e, 0x3a, 0x20, 0x30, 0x2e, - 0x30, 0x2c, 0x20, 0x6e, 0x61, 0x6d, 0x65, 0x3a, 0x20, 0x22, 0x74, 0x65, - 0x6d, 0x70, 0x65, 0x72, 0x61, 0x74, 0x75, 0x72, 0x65, 0x22, 0x2c, 0x20, - 0x73, 0x74, 0x65, 0x70, 0x3a, 0x20, 0x30, 0x2e, 0x30, 0x31, 0x2c, 0x20, - 0x76, 0x61, 0x6c, 0x75, 0x65, 0x3a, 0x20, 0x70, 0x61, 0x72, 0x61, 0x6d, - 0x73, 0x2e, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x2e, 0x74, 0x65, 0x6d, 0x70, - 0x65, 0x72, 0x61, 0x74, 0x75, 0x72, 0x65, 0x20, 0x7d, 0x29, 0x7d, 0x0a, - 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, - 0x24, 0x7b, 0x46, 0x6c, 0x6f, 0x61, 0x74, 0x46, 0x69, 0x65, 0x6c, 0x64, - 0x28, 0x7b, 0x20, 0x6c, 0x61, 0x62, 0x65, 0x6c, 0x3a, 0x20, 0x22, 0x50, - 0x65, 0x6e, 0x61, 0x6c, 0x69, 0x7a, 0x65, 0x20, 0x72, 0x65, 0x70, 0x65, - 0x61, 0x74, 0x20, 0x73, 0x65, 0x71, 0x75, 0x65, 0x6e, 0x63, 0x65, 0x22, - 0x2c, 0x20, 0x6d, 0x61, 0x78, 0x3a, 0x20, 0x32, 0x2e, 0x30, 0x2c, 0x20, - 0x6d, 0x69, 0x6e, 0x3a, 0x20, 0x30, 0x2e, 0x30, 0x2c, 0x20, 0x6e, 0x61, - 0x6d, 0x65, 0x3a, 0x20, 0x22, 0x72, 0x65, 0x70, 0x65, 0x61, 0x74, 0x5f, - 0x70, 0x65, 0x6e, 0x61, 0x6c, 0x74, 0x79, 0x22, 0x2c, 0x20, 0x73, 0x74, - 0x65, 0x70, 0x3a, 0x20, 0x30, 0x2e, 0x30, 0x31, 0x2c, 0x20, 0x76, 0x61, - 0x6c, 0x75, 0x65, 0x3a, 0x20, 0x70, 0x61, 0x72, 0x61, 0x6d, 0x73, 0x2e, - 0x76, 0x61, 0x6c, 0x75, 0x65, 0x2e, 0x72, 0x65, 0x70, 0x65, 0x61, 0x74, - 0x5f, 0x70, 0x65, 0x6e, 0x61, 0x6c, 0x74, 0x79, 0x20, 0x7d, 0x29, 0x7d, - 0x0a, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, - 0x20, 0x24, 0x7b, 0x49, 0x6e, 0x74, 0x46, 0x69, 0x65, 0x6c, 0x64, 0x28, - 0x7b, 0x20, 0x6c, 0x61, 0x62, 0x65, 0x6c, 0x3a, 0x20, 0x22, 0x43, 0x6f, - 0x6e, 0x73, 0x69, 0x64, 0x65, 0x72, 0x20, 0x4e, 0x20, 0x74, 0x6f, 0x6b, - 0x65, 0x6e, 0x73, 0x20, 0x66, 0x6f, 0x72, 0x20, 0x70, 0x65, 0x6e, 0x61, - 0x6c, 0x69, 0x7a, 0x65, 0x22, 0x2c, 0x20, 0x6d, 0x61, 0x78, 0x3a, 0x20, - 0x32, 0x30, 0x34, 0x38, 0x2c, 0x20, 0x6d, 0x69, 0x6e, 0x3a, 0x20, 0x30, - 0x2c, 0x20, 0x6e, 0x61, 0x6d, 0x65, 0x3a, 0x20, 0x22, 0x72, 0x65, 0x70, - 0x65, 0x61, 0x74, 0x5f, 0x6c, 0x61, 0x73, 0x74, 0x5f, 0x6e, 0x22, 0x2c, - 0x20, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x3a, 0x20, 0x70, 0x61, 0x72, 0x61, - 0x6d, 0x73, 0x2e, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x2e, 0x72, 0x65, 0x70, - 0x65, 0x61, 0x74, 0x5f, 0x6c, 0x61, 0x73, 0x74, 0x5f, 0x6e, 0x20, 0x7d, - 0x29, 0x7d, 0x0a, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, - 0x20, 0x20, 0x20, 0x24, 0x7b, 0x49, 0x6e, 0x74, 0x46, 0x69, 0x65, 0x6c, - 0x64, 0x28, 0x7b, 0x20, 0x6c, 0x61, 0x62, 0x65, 0x6c, 0x3a, 0x20, 0x22, - 0x54, 0x6f, 0x70, 0x2d, 0x4b, 0x20, 0x73, 0x61, 0x6d, 0x70, 0x6c, 0x69, - 0x6e, 0x67, 0x22, 0x2c, 0x20, 0x6d, 0x61, 0x78, 0x3a, 0x20, 0x31, 0x30, - 0x30, 0x2c, 0x20, 0x6d, 0x69, 0x6e, 0x3a, 0x20, 0x2d, 0x31, 0x2c, 0x20, - 0x6e, 0x61, 0x6d, 0x65, 0x3a, 0x20, 0x22, 0x74, 0x6f, 0x70, 0x5f, 0x6b, - 0x22, 0x2c, 0x20, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x3a, 0x20, 0x70, 0x61, - 0x72, 0x61, 0x6d, 0x73, 0x2e, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x2e, 0x74, - 0x6f, 0x70, 0x5f, 0x6b, 0x20, 0x7d, 0x29, 0x7d, 0x0a, 0x20, 0x20, 0x20, - 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x24, 0x7b, 0x46, - 0x6c, 0x6f, 0x61, 0x74, 0x46, 0x69, 0x65, 0x6c, 0x64, 0x28, 0x7b, 0x20, - 0x6c, 0x61, 0x62, 0x65, 0x6c, 0x3a, 0x20, 0x22, 0x54, 0x6f, 0x70, 0x2d, - 0x50, 0x20, 0x73, 0x61, 0x6d, 0x70, 0x6c, 0x69, 0x6e, 0x67, 0x22, 0x2c, - 0x20, 0x6d, 0x61, 0x78, 0x3a, 0x20, 0x31, 0x2e, 0x30, 0x2c, 0x20, 0x6d, - 0x69, 0x6e, 0x3a, 0x20, 0x30, 0x2e, 0x30, 0x2c, 0x20, 0x6e, 0x61, 0x6d, - 0x65, 0x3a, 0x20, 0x22, 0x74, 0x6f, 0x70, 0x5f, 0x70, 0x22, 0x2c, 0x20, - 0x73, 0x74, 0x65, 0x70, 0x3a, 0x20, 0x30, 0x2e, 0x30, 0x31, 0x2c, 0x20, - 0x76, 0x61, 0x6c, 0x75, 0x65, 0x3a, 0x20, 0x70, 0x61, 0x72, 0x61, 0x6d, - 0x73, 0x2e, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x2e, 0x74, 0x6f, 0x70, 0x5f, - 0x70, 0x20, 0x7d, 0x29, 0x7d, 0x0a, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, - 0x20, 0x20, 0x20, 0x20, 0x3c, 0x2f, 0x66, 0x69, 0x65, 0x6c, 0x64, 0x73, - 0x65, 0x74, 0x3e, 0x0a, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, - 0x20, 0x20, 0x3c, 0x64, 0x65, 0x74, 0x61, 0x69, 0x6c, 0x73, 0x3e, 0x0a, + 0x68, 0x61, 0x74, 0x22, 0x7d, 0x20, 0x6f, 0x6e, 0x69, 0x6e, 0x70, 0x75, + 0x74, 0x3d, 0x24, 0x7b, 0x75, 0x70, 0x64, 0x61, 0x74, 0x65, 0x53, 0x65, + 0x73, 0x73, 0x69, 0x6f, 0x6e, 0x7d, 0x20, 0x2f, 0x3e, 0x20, 0x43, 0x68, + 0x61, 0x74, 0x3c, 0x2f, 0x6c, 0x61, 0x62, 0x65, 0x6c, 0x3e, 0x0a, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, - 0x3c, 0x73, 0x75, 0x6d, 0x6d, 0x61, 0x72, 0x79, 0x3e, 0x4d, 0x6f, 0x72, - 0x65, 0x20, 0x6f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x3c, 0x2f, 0x73, - 0x75, 0x6d, 0x6d, 0x61, 0x72, 0x79, 0x3e, 0x0a, 0x20, 0x20, 0x20, 0x20, - 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x3c, 0x66, 0x69, 0x65, - 0x6c, 0x64, 0x73, 0x65, 0x74, 0x20, 0x63, 0x6c, 0x61, 0x73, 0x73, 0x3d, - 0x22, 0x74, 0x77, 0x6f, 0x22, 0x3e, 0x0a, 0x20, 0x20, 0x20, 0x20, 0x20, - 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x24, 0x7b, 0x46, - 0x6c, 0x6f, 0x61, 0x74, 0x46, 0x69, 0x65, 0x6c, 0x64, 0x28, 0x7b, 0x20, - 0x6c, 0x61, 0x62, 0x65, 0x6c, 0x3a, 0x20, 0x22, 0x54, 0x46, 0x53, 0x2d, - 0x5a, 0x22, 0x2c, 0x20, 0x6d, 0x61, 0x78, 0x3a, 0x20, 0x31, 0x2e, 0x30, - 0x2c, 0x20, 0x6d, 0x69, 0x6e, 0x3a, 0x20, 0x30, 0x2e, 0x30, 0x2c, 0x20, - 0x6e, 0x61, 0x6d, 0x65, 0x3a, 0x20, 0x22, 0x74, 0x66, 0x73, 0x5f, 0x7a, - 0x22, 0x2c, 0x20, 0x73, 0x74, 0x65, 0x70, 0x3a, 0x20, 0x30, 0x2e, 0x30, - 0x31, 0x2c, 0x20, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x3a, 0x20, 0x70, 0x61, - 0x72, 0x61, 0x6d, 0x73, 0x2e, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x2e, 0x74, - 0x66, 0x73, 0x5f, 0x7a, 0x20, 0x7d, 0x29, 0x7d, 0x0a, 0x20, 0x20, 0x20, + 0x20, 0x3c, 0x6c, 0x61, 0x62, 0x65, 0x6c, 0x20, 0x63, 0x6c, 0x61, 0x73, + 0x73, 0x3d, 0x22, 0x73, 0x6c, 0x69, 0x6d, 0x22, 0x3e, 0x3c, 0x69, 0x6e, + 0x70, 0x75, 0x74, 0x20, 0x74, 0x79, 0x70, 0x65, 0x3d, 0x22, 0x72, 0x61, + 0x64, 0x69, 0x6f, 0x22, 0x20, 0x6e, 0x61, 0x6d, 0x65, 0x3d, 0x22, 0x74, + 0x79, 0x70, 0x65, 0x22, 0x20, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x3d, 0x22, + 0x63, 0x6f, 0x6d, 0x70, 0x6c, 0x65, 0x74, 0x69, 0x6f, 0x6e, 0x22, 0x20, + 0x63, 0x68, 0x65, 0x63, 0x6b, 0x65, 0x64, 0x3d, 0x24, 0x7b, 0x73, 0x65, + 0x73, 0x73, 0x69, 0x6f, 0x6e, 0x2e, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x2e, + 0x74, 0x79, 0x70, 0x65, 0x20, 0x3d, 0x3d, 0x3d, 0x20, 0x22, 0x63, 0x6f, + 0x6d, 0x70, 0x6c, 0x65, 0x74, 0x69, 0x6f, 0x6e, 0x22, 0x7d, 0x20, 0x6f, + 0x6e, 0x69, 0x6e, 0x70, 0x75, 0x74, 0x3d, 0x24, 0x7b, 0x75, 0x70, 0x64, + 0x61, 0x74, 0x65, 0x53, 0x65, 0x73, 0x73, 0x69, 0x6f, 0x6e, 0x7d, 0x20, + 0x2f, 0x3e, 0x20, 0x43, 0x6f, 0x6d, 0x70, 0x6c, 0x65, 0x74, 0x69, 0x6f, + 0x6e, 0x3c, 0x2f, 0x6c, 0x61, 0x62, 0x65, 0x6c, 0x3e, 0x0a, 0x20, 0x20, + 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x3c, 0x2f, + 0x64, 0x69, 0x76, 0x3e, 0x0a, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, + 0x20, 0x20, 0x20, 0x3c, 0x2f, 0x66, 0x69, 0x65, 0x6c, 0x64, 0x73, 0x65, + 0x74, 0x3e, 0x0a, 0x0a, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, + 0x20, 0x20, 0x24, 0x7b, 0x73, 0x65, 0x73, 0x73, 0x69, 0x6f, 0x6e, 0x2e, + 0x76, 0x61, 0x6c, 0x75, 0x65, 0x2e, 0x74, 0x79, 0x70, 0x65, 0x20, 0x3d, + 0x3d, 0x3d, 0x20, 0x27, 0x63, 0x68, 0x61, 0x74, 0x27, 0x20, 0x3f, 0x20, + 0x43, 0x68, 0x61, 0x74, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x46, 0x6f, + 0x72, 0x6d, 0x28, 0x29, 0x20, 0x3a, 0x20, 0x43, 0x6f, 0x6d, 0x70, 0x6c, + 0x65, 0x74, 0x69, 0x6f, 0x6e, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x46, + 0x6f, 0x72, 0x6d, 0x28, 0x29, 0x7d, 0x0a, 0x0a, 0x20, 0x20, 0x20, 0x20, + 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x3c, 0x66, 0x69, 0x65, 0x6c, 0x64, + 0x73, 0x65, 0x74, 0x20, 0x63, 0x6c, 0x61, 0x73, 0x73, 0x3d, 0x22, 0x74, + 0x77, 0x6f, 0x22, 0x3e, 0x0a, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, + 0x20, 0x20, 0x20, 0x20, 0x20, 0x24, 0x7b, 0x49, 0x6e, 0x74, 0x46, 0x69, + 0x65, 0x6c, 0x64, 0x28, 0x7b, 0x20, 0x6c, 0x61, 0x62, 0x65, 0x6c, 0x3a, + 0x20, 0x22, 0x50, 0x72, 0x65, 0x64, 0x69, 0x63, 0x74, 0x69, 0x6f, 0x6e, + 0x73, 0x22, 0x2c, 0x20, 0x6d, 0x61, 0x78, 0x3a, 0x20, 0x32, 0x30, 0x34, + 0x38, 0x2c, 0x20, 0x6d, 0x69, 0x6e, 0x3a, 0x20, 0x2d, 0x31, 0x2c, 0x20, + 0x6e, 0x61, 0x6d, 0x65, 0x3a, 0x20, 0x22, 0x6e, 0x5f, 0x70, 0x72, 0x65, + 0x64, 0x69, 0x63, 0x74, 0x22, 0x2c, 0x20, 0x76, 0x61, 0x6c, 0x75, 0x65, + 0x3a, 0x20, 0x70, 0x61, 0x72, 0x61, 0x6d, 0x73, 0x2e, 0x76, 0x61, 0x6c, + 0x75, 0x65, 0x2e, 0x6e, 0x5f, 0x70, 0x72, 0x65, 0x64, 0x69, 0x63, 0x74, + 0x20, 0x7d, 0x29, 0x7d, 0x0a, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, + 0x20, 0x20, 0x20, 0x20, 0x20, 0x24, 0x7b, 0x46, 0x6c, 0x6f, 0x61, 0x74, + 0x46, 0x69, 0x65, 0x6c, 0x64, 0x28, 0x7b, 0x20, 0x6c, 0x61, 0x62, 0x65, + 0x6c, 0x3a, 0x20, 0x22, 0x54, 0x65, 0x6d, 0x70, 0x65, 0x72, 0x61, 0x74, + 0x75, 0x72, 0x65, 0x22, 0x2c, 0x20, 0x6d, 0x61, 0x78, 0x3a, 0x20, 0x31, + 0x2e, 0x35, 0x2c, 0x20, 0x6d, 0x69, 0x6e, 0x3a, 0x20, 0x30, 0x2e, 0x30, + 0x2c, 0x20, 0x6e, 0x61, 0x6d, 0x65, 0x3a, 0x20, 0x22, 0x74, 0x65, 0x6d, + 0x70, 0x65, 0x72, 0x61, 0x74, 0x75, 0x72, 0x65, 0x22, 0x2c, 0x20, 0x73, + 0x74, 0x65, 0x70, 0x3a, 0x20, 0x30, 0x2e, 0x30, 0x31, 0x2c, 0x20, 0x76, + 0x61, 0x6c, 0x75, 0x65, 0x3a, 0x20, 0x70, 0x61, 0x72, 0x61, 0x6d, 0x73, + 0x2e, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x2e, 0x74, 0x65, 0x6d, 0x70, 0x65, + 0x72, 0x61, 0x74, 0x75, 0x72, 0x65, 0x20, 0x7d, 0x29, 0x7d, 0x0a, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x24, 0x7b, 0x46, 0x6c, 0x6f, 0x61, 0x74, 0x46, 0x69, 0x65, 0x6c, 0x64, 0x28, - 0x7b, 0x20, 0x6c, 0x61, 0x62, 0x65, 0x6c, 0x3a, 0x20, 0x22, 0x54, 0x79, - 0x70, 0x69, 0x63, 0x61, 0x6c, 0x20, 0x50, 0x22, 0x2c, 0x20, 0x6d, 0x61, - 0x78, 0x3a, 0x20, 0x31, 0x2e, 0x30, 0x2c, 0x20, 0x6d, 0x69, 0x6e, 0x3a, - 0x20, 0x30, 0x2e, 0x30, 0x2c, 0x20, 0x6e, 0x61, 0x6d, 0x65, 0x3a, 0x20, - 0x22, 0x74, 0x79, 0x70, 0x69, 0x63, 0x61, 0x6c, 0x5f, 0x70, 0x22, 0x2c, - 0x20, 0x73, 0x74, 0x65, 0x70, 0x3a, 0x20, 0x30, 0x2e, 0x30, 0x31, 0x2c, - 0x20, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x3a, 0x20, 0x70, 0x61, 0x72, 0x61, - 0x6d, 0x73, 0x2e, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x2e, 0x74, 0x79, 0x70, - 0x69, 0x63, 0x61, 0x6c, 0x5f, 0x70, 0x20, 0x7d, 0x29, 0x7d, 0x0a, 0x20, - 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, - 0x20, 0x24, 0x7b, 0x46, 0x6c, 0x6f, 0x61, 0x74, 0x46, 0x69, 0x65, 0x6c, - 0x64, 0x28, 0x7b, 0x20, 0x6c, 0x61, 0x62, 0x65, 0x6c, 0x3a, 0x20, 0x22, - 0x50, 0x72, 0x65, 0x73, 0x65, 0x6e, 0x63, 0x65, 0x20, 0x70, 0x65, 0x6e, - 0x61, 0x6c, 0x74, 0x79, 0x22, 0x2c, 0x20, 0x6d, 0x61, 0x78, 0x3a, 0x20, - 0x31, 0x2e, 0x30, 0x2c, 0x20, 0x6d, 0x69, 0x6e, 0x3a, 0x20, 0x30, 0x2e, - 0x30, 0x2c, 0x20, 0x6e, 0x61, 0x6d, 0x65, 0x3a, 0x20, 0x22, 0x70, 0x72, - 0x65, 0x73, 0x65, 0x6e, 0x63, 0x65, 0x5f, 0x70, 0x65, 0x6e, 0x61, 0x6c, - 0x74, 0x79, 0x22, 0x2c, 0x20, 0x73, 0x74, 0x65, 0x70, 0x3a, 0x20, 0x30, - 0x2e, 0x30, 0x31, 0x2c, 0x20, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x3a, 0x20, - 0x70, 0x61, 0x72, 0x61, 0x6d, 0x73, 0x2e, 0x76, 0x61, 0x6c, 0x75, 0x65, - 0x2e, 0x70, 0x72, 0x65, 0x73, 0x65, 0x6e, 0x63, 0x65, 0x5f, 0x70, 0x65, - 0x6e, 0x61, 0x6c, 0x74, 0x79, 0x20, 0x7d, 0x29, 0x7d, 0x0a, 0x20, 0x20, - 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, - 0x24, 0x7b, 0x46, 0x6c, 0x6f, 0x61, 0x74, 0x46, 0x69, 0x65, 0x6c, 0x64, - 0x28, 0x7b, 0x20, 0x6c, 0x61, 0x62, 0x65, 0x6c, 0x3a, 0x20, 0x22, 0x46, - 0x72, 0x65, 0x71, 0x75, 0x65, 0x6e, 0x63, 0x79, 0x20, 0x70, 0x65, 0x6e, - 0x61, 0x6c, 0x74, 0x79, 0x22, 0x2c, 0x20, 0x6d, 0x61, 0x78, 0x3a, 0x20, - 0x31, 0x2e, 0x30, 0x2c, 0x20, 0x6d, 0x69, 0x6e, 0x3a, 0x20, 0x30, 0x2e, - 0x30, 0x2c, 0x20, 0x6e, 0x61, 0x6d, 0x65, 0x3a, 0x20, 0x22, 0x66, 0x72, - 0x65, 0x71, 0x75, 0x65, 0x6e, 0x63, 0x79, 0x5f, 0x70, 0x65, 0x6e, 0x61, - 0x6c, 0x74, 0x79, 0x22, 0x2c, 0x20, 0x73, 0x74, 0x65, 0x70, 0x3a, 0x20, - 0x30, 0x2e, 0x30, 0x31, 0x2c, 0x20, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x3a, - 0x20, 0x70, 0x61, 0x72, 0x61, 0x6d, 0x73, 0x2e, 0x76, 0x61, 0x6c, 0x75, - 0x65, 0x2e, 0x66, 0x72, 0x65, 0x71, 0x75, 0x65, 0x6e, 0x63, 0x79, 0x5f, + 0x7b, 0x20, 0x6c, 0x61, 0x62, 0x65, 0x6c, 0x3a, 0x20, 0x22, 0x50, 0x65, + 0x6e, 0x61, 0x6c, 0x69, 0x7a, 0x65, 0x20, 0x72, 0x65, 0x70, 0x65, 0x61, + 0x74, 0x20, 0x73, 0x65, 0x71, 0x75, 0x65, 0x6e, 0x63, 0x65, 0x22, 0x2c, + 0x20, 0x6d, 0x61, 0x78, 0x3a, 0x20, 0x32, 0x2e, 0x30, 0x2c, 0x20, 0x6d, + 0x69, 0x6e, 0x3a, 0x20, 0x30, 0x2e, 0x30, 0x2c, 0x20, 0x6e, 0x61, 0x6d, + 0x65, 0x3a, 0x20, 0x22, 0x72, 0x65, 0x70, 0x65, 0x61, 0x74, 0x5f, 0x70, + 0x65, 0x6e, 0x61, 0x6c, 0x74, 0x79, 0x22, 0x2c, 0x20, 0x73, 0x74, 0x65, + 0x70, 0x3a, 0x20, 0x30, 0x2e, 0x30, 0x31, 0x2c, 0x20, 0x76, 0x61, 0x6c, + 0x75, 0x65, 0x3a, 0x20, 0x70, 0x61, 0x72, 0x61, 0x6d, 0x73, 0x2e, 0x76, + 0x61, 0x6c, 0x75, 0x65, 0x2e, 0x72, 0x65, 0x70, 0x65, 0x61, 0x74, 0x5f, 0x70, 0x65, 0x6e, 0x61, 0x6c, 0x74, 0x79, 0x20, 0x7d, 0x29, 0x7d, 0x0a, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, + 0x24, 0x7b, 0x49, 0x6e, 0x74, 0x46, 0x69, 0x65, 0x6c, 0x64, 0x28, 0x7b, + 0x20, 0x6c, 0x61, 0x62, 0x65, 0x6c, 0x3a, 0x20, 0x22, 0x43, 0x6f, 0x6e, + 0x73, 0x69, 0x64, 0x65, 0x72, 0x20, 0x4e, 0x20, 0x74, 0x6f, 0x6b, 0x65, + 0x6e, 0x73, 0x20, 0x66, 0x6f, 0x72, 0x20, 0x70, 0x65, 0x6e, 0x61, 0x6c, + 0x69, 0x7a, 0x65, 0x22, 0x2c, 0x20, 0x6d, 0x61, 0x78, 0x3a, 0x20, 0x32, + 0x30, 0x34, 0x38, 0x2c, 0x20, 0x6d, 0x69, 0x6e, 0x3a, 0x20, 0x30, 0x2c, + 0x20, 0x6e, 0x61, 0x6d, 0x65, 0x3a, 0x20, 0x22, 0x72, 0x65, 0x70, 0x65, + 0x61, 0x74, 0x5f, 0x6c, 0x61, 0x73, 0x74, 0x5f, 0x6e, 0x22, 0x2c, 0x20, + 0x76, 0x61, 0x6c, 0x75, 0x65, 0x3a, 0x20, 0x70, 0x61, 0x72, 0x61, 0x6d, + 0x73, 0x2e, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x2e, 0x72, 0x65, 0x70, 0x65, + 0x61, 0x74, 0x5f, 0x6c, 0x61, 0x73, 0x74, 0x5f, 0x6e, 0x20, 0x7d, 0x29, + 0x7d, 0x0a, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, + 0x20, 0x20, 0x24, 0x7b, 0x49, 0x6e, 0x74, 0x46, 0x69, 0x65, 0x6c, 0x64, + 0x28, 0x7b, 0x20, 0x6c, 0x61, 0x62, 0x65, 0x6c, 0x3a, 0x20, 0x22, 0x54, + 0x6f, 0x70, 0x2d, 0x4b, 0x20, 0x73, 0x61, 0x6d, 0x70, 0x6c, 0x69, 0x6e, + 0x67, 0x22, 0x2c, 0x20, 0x6d, 0x61, 0x78, 0x3a, 0x20, 0x31, 0x30, 0x30, + 0x2c, 0x20, 0x6d, 0x69, 0x6e, 0x3a, 0x20, 0x2d, 0x31, 0x2c, 0x20, 0x6e, + 0x61, 0x6d, 0x65, 0x3a, 0x20, 0x22, 0x74, 0x6f, 0x70, 0x5f, 0x6b, 0x22, + 0x2c, 0x20, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x3a, 0x20, 0x70, 0x61, 0x72, + 0x61, 0x6d, 0x73, 0x2e, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x2e, 0x74, 0x6f, + 0x70, 0x5f, 0x6b, 0x20, 0x7d, 0x29, 0x7d, 0x0a, 0x20, 0x20, 0x20, 0x20, + 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x24, 0x7b, 0x46, 0x6c, + 0x6f, 0x61, 0x74, 0x46, 0x69, 0x65, 0x6c, 0x64, 0x28, 0x7b, 0x20, 0x6c, + 0x61, 0x62, 0x65, 0x6c, 0x3a, 0x20, 0x22, 0x54, 0x6f, 0x70, 0x2d, 0x50, + 0x20, 0x73, 0x61, 0x6d, 0x70, 0x6c, 0x69, 0x6e, 0x67, 0x22, 0x2c, 0x20, + 0x6d, 0x61, 0x78, 0x3a, 0x20, 0x31, 0x2e, 0x30, 0x2c, 0x20, 0x6d, 0x69, + 0x6e, 0x3a, 0x20, 0x30, 0x2e, 0x30, 0x2c, 0x20, 0x6e, 0x61, 0x6d, 0x65, + 0x3a, 0x20, 0x22, 0x74, 0x6f, 0x70, 0x5f, 0x70, 0x22, 0x2c, 0x20, 0x73, + 0x74, 0x65, 0x70, 0x3a, 0x20, 0x30, 0x2e, 0x30, 0x31, 0x2c, 0x20, 0x76, + 0x61, 0x6c, 0x75, 0x65, 0x3a, 0x20, 0x70, 0x61, 0x72, 0x61, 0x6d, 0x73, + 0x2e, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x2e, 0x74, 0x6f, 0x70, 0x5f, 0x70, + 0x20, 0x7d, 0x29, 0x7d, 0x0a, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, + 0x20, 0x20, 0x20, 0x20, 0x20, 0x24, 0x7b, 0x46, 0x6c, 0x6f, 0x61, 0x74, + 0x46, 0x69, 0x65, 0x6c, 0x64, 0x28, 0x7b, 0x20, 0x6c, 0x61, 0x62, 0x65, + 0x6c, 0x3a, 0x20, 0x22, 0x4d, 0x69, 0x6e, 0x2d, 0x50, 0x20, 0x73, 0x61, + 0x6d, 0x70, 0x6c, 0x69, 0x6e, 0x67, 0x22, 0x2c, 0x20, 0x6d, 0x61, 0x78, + 0x3a, 0x20, 0x31, 0x2e, 0x30, 0x2c, 0x20, 0x6d, 0x69, 0x6e, 0x3a, 0x20, + 0x30, 0x2e, 0x30, 0x2c, 0x20, 0x6e, 0x61, 0x6d, 0x65, 0x3a, 0x20, 0x22, + 0x6d, 0x69, 0x6e, 0x5f, 0x70, 0x22, 0x2c, 0x20, 0x73, 0x74, 0x65, 0x70, + 0x3a, 0x20, 0x30, 0x2e, 0x30, 0x31, 0x2c, 0x20, 0x76, 0x61, 0x6c, 0x75, + 0x65, 0x3a, 0x20, 0x70, 0x61, 0x72, 0x61, 0x6d, 0x73, 0x2e, 0x76, 0x61, + 0x6c, 0x75, 0x65, 0x2e, 0x6d, 0x69, 0x6e, 0x5f, 0x70, 0x20, 0x7d, 0x29, + 0x7d, 0x0a, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x3c, 0x2f, 0x66, 0x69, 0x65, 0x6c, 0x64, 0x73, 0x65, 0x74, 0x3e, 0x0a, - 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, - 0x3c, 0x68, 0x72, 0x20, 0x2f, 0x3e, 0x0a, 0x20, 0x20, 0x20, 0x20, 0x20, - 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x3c, 0x66, 0x69, 0x65, 0x6c, - 0x64, 0x73, 0x65, 0x74, 0x20, 0x63, 0x6c, 0x61, 0x73, 0x73, 0x3d, 0x22, - 0x74, 0x68, 0x72, 0x65, 0x65, 0x22, 0x3e, 0x0a, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x3c, 0x64, - 0x69, 0x76, 0x3e, 0x0a, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, - 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x3c, 0x6c, 0x61, 0x62, - 0x65, 0x6c, 0x3e, 0x3c, 0x69, 0x6e, 0x70, 0x75, 0x74, 0x20, 0x74, 0x79, - 0x70, 0x65, 0x3d, 0x22, 0x72, 0x61, 0x64, 0x69, 0x6f, 0x22, 0x20, 0x6e, - 0x61, 0x6d, 0x65, 0x3d, 0x22, 0x6d, 0x69, 0x72, 0x6f, 0x73, 0x74, 0x61, - 0x74, 0x22, 0x20, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x3d, 0x22, 0x30, 0x22, - 0x20, 0x63, 0x68, 0x65, 0x63, 0x6b, 0x65, 0x64, 0x3d, 0x24, 0x7b, 0x70, - 0x61, 0x72, 0x61, 0x6d, 0x73, 0x2e, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x2e, - 0x6d, 0x69, 0x72, 0x6f, 0x73, 0x74, 0x61, 0x74, 0x20, 0x3d, 0x3d, 0x20, - 0x30, 0x7d, 0x20, 0x6f, 0x6e, 0x69, 0x6e, 0x70, 0x75, 0x74, 0x3d, 0x24, - 0x7b, 0x75, 0x70, 0x64, 0x61, 0x74, 0x65, 0x50, 0x61, 0x72, 0x61, 0x6d, - 0x73, 0x49, 0x6e, 0x74, 0x7d, 0x20, 0x2f, 0x3e, 0x20, 0x6e, 0x6f, 0x20, - 0x4d, 0x69, 0x72, 0x6f, 0x73, 0x74, 0x61, 0x74, 0x3c, 0x2f, 0x6c, 0x61, - 0x62, 0x65, 0x6c, 0x3e, 0x0a, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, - 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x3c, 0x6c, 0x61, - 0x62, 0x65, 0x6c, 0x3e, 0x3c, 0x69, 0x6e, 0x70, 0x75, 0x74, 0x20, 0x74, - 0x79, 0x70, 0x65, 0x3d, 0x22, 0x72, 0x61, 0x64, 0x69, 0x6f, 0x22, 0x20, - 0x6e, 0x61, 0x6d, 0x65, 0x3d, 0x22, 0x6d, 0x69, 0x72, 0x6f, 0x73, 0x74, - 0x61, 0x74, 0x22, 0x20, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x3d, 0x22, 0x31, - 0x22, 0x20, 0x63, 0x68, 0x65, 0x63, 0x6b, 0x65, 0x64, 0x3d, 0x24, 0x7b, - 0x70, 0x61, 0x72, 0x61, 0x6d, 0x73, 0x2e, 0x76, 0x61, 0x6c, 0x75, 0x65, - 0x2e, 0x6d, 0x69, 0x72, 0x6f, 0x73, 0x74, 0x61, 0x74, 0x20, 0x3d, 0x3d, - 0x20, 0x31, 0x7d, 0x20, 0x6f, 0x6e, 0x69, 0x6e, 0x70, 0x75, 0x74, 0x3d, - 0x24, 0x7b, 0x75, 0x70, 0x64, 0x61, 0x74, 0x65, 0x50, 0x61, 0x72, 0x61, - 0x6d, 0x73, 0x49, 0x6e, 0x74, 0x7d, 0x20, 0x2f, 0x3e, 0x20, 0x4d, 0x69, - 0x72, 0x6f, 0x73, 0x74, 0x61, 0x74, 0x20, 0x76, 0x31, 0x3c, 0x2f, 0x6c, - 0x61, 0x62, 0x65, 0x6c, 0x3e, 0x0a, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, - 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x3c, 0x6c, - 0x61, 0x62, 0x65, 0x6c, 0x3e, 0x3c, 0x69, 0x6e, 0x70, 0x75, 0x74, 0x20, - 0x74, 0x79, 0x70, 0x65, 0x3d, 0x22, 0x72, 0x61, 0x64, 0x69, 0x6f, 0x22, - 0x20, 0x6e, 0x61, 0x6d, 0x65, 0x3d, 0x22, 0x6d, 0x69, 0x72, 0x6f, 0x73, - 0x74, 0x61, 0x74, 0x22, 0x20, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x3d, 0x22, - 0x32, 0x22, 0x20, 0x63, 0x68, 0x65, 0x63, 0x6b, 0x65, 0x64, 0x3d, 0x24, - 0x7b, 0x70, 0x61, 0x72, 0x61, 0x6d, 0x73, 0x2e, 0x76, 0x61, 0x6c, 0x75, - 0x65, 0x2e, 0x6d, 0x69, 0x72, 0x6f, 0x73, 0x74, 0x61, 0x74, 0x20, 0x3d, - 0x3d, 0x20, 0x32, 0x7d, 0x20, 0x6f, 0x6e, 0x69, 0x6e, 0x70, 0x75, 0x74, - 0x3d, 0x24, 0x7b, 0x75, 0x70, 0x64, 0x61, 0x74, 0x65, 0x50, 0x61, 0x72, - 0x61, 0x6d, 0x73, 0x49, 0x6e, 0x74, 0x7d, 0x20, 0x2f, 0x3e, 0x20, 0x4d, - 0x69, 0x72, 0x6f, 0x73, 0x74, 0x61, 0x74, 0x20, 0x76, 0x32, 0x3c, 0x2f, - 0x6c, 0x61, 0x62, 0x65, 0x6c, 0x3e, 0x0a, 0x20, 0x20, 0x20, 0x20, 0x20, - 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x3c, 0x2f, 0x64, - 0x69, 0x76, 0x3e, 0x0a, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, - 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x24, 0x7b, 0x46, 0x6c, 0x6f, 0x61, - 0x74, 0x46, 0x69, 0x65, 0x6c, 0x64, 0x28, 0x7b, 0x20, 0x6c, 0x61, 0x62, - 0x65, 0x6c, 0x3a, 0x20, 0x22, 0x4d, 0x69, 0x72, 0x6f, 0x73, 0x74, 0x61, - 0x74, 0x20, 0x74, 0x61, 0x75, 0x22, 0x2c, 0x20, 0x6d, 0x61, 0x78, 0x3a, - 0x20, 0x31, 0x30, 0x2e, 0x30, 0x2c, 0x20, 0x6d, 0x69, 0x6e, 0x3a, 0x20, - 0x30, 0x2e, 0x30, 0x2c, 0x20, 0x6e, 0x61, 0x6d, 0x65, 0x3a, 0x20, 0x22, - 0x6d, 0x69, 0x72, 0x6f, 0x73, 0x74, 0x61, 0x74, 0x5f, 0x74, 0x61, 0x75, - 0x22, 0x2c, 0x20, 0x73, 0x74, 0x65, 0x70, 0x3a, 0x20, 0x30, 0x2e, 0x30, - 0x31, 0x2c, 0x20, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x3a, 0x20, 0x70, 0x61, - 0x72, 0x61, 0x6d, 0x73, 0x2e, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x2e, 0x6d, - 0x69, 0x72, 0x6f, 0x73, 0x74, 0x61, 0x74, 0x5f, 0x74, 0x61, 0x75, 0x20, - 0x7d, 0x29, 0x7d, 0x0a, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, - 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x24, 0x7b, 0x46, 0x6c, 0x6f, 0x61, - 0x74, 0x46, 0x69, 0x65, 0x6c, 0x64, 0x28, 0x7b, 0x20, 0x6c, 0x61, 0x62, - 0x65, 0x6c, 0x3a, 0x20, 0x22, 0x4d, 0x69, 0x72, 0x6f, 0x73, 0x74, 0x61, - 0x74, 0x20, 0x65, 0x74, 0x61, 0x22, 0x2c, 0x20, 0x6d, 0x61, 0x78, 0x3a, - 0x20, 0x31, 0x2e, 0x30, 0x2c, 0x20, 0x6d, 0x69, 0x6e, 0x3a, 0x20, 0x30, - 0x2e, 0x30, 0x2c, 0x20, 0x6e, 0x61, 0x6d, 0x65, 0x3a, 0x20, 0x22, 0x6d, - 0x69, 0x72, 0x6f, 0x73, 0x74, 0x61, 0x74, 0x5f, 0x65, 0x74, 0x61, 0x22, + 0x65, 0x74, 0x61, 0x69, 0x6c, 0x73, 0x3e, 0x0a, 0x20, 0x20, 0x20, 0x20, + 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x3c, 0x73, 0x75, 0x6d, + 0x6d, 0x61, 0x72, 0x79, 0x3e, 0x4d, 0x6f, 0x72, 0x65, 0x20, 0x6f, 0x70, + 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x3c, 0x2f, 0x73, 0x75, 0x6d, 0x6d, 0x61, + 0x72, 0x79, 0x3e, 0x0a, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, + 0x20, 0x20, 0x20, 0x20, 0x3c, 0x66, 0x69, 0x65, 0x6c, 0x64, 0x73, 0x65, + 0x74, 0x20, 0x63, 0x6c, 0x61, 0x73, 0x73, 0x3d, 0x22, 0x74, 0x77, 0x6f, + 0x22, 0x3e, 0x0a, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, + 0x20, 0x20, 0x20, 0x20, 0x20, 0x24, 0x7b, 0x46, 0x6c, 0x6f, 0x61, 0x74, + 0x46, 0x69, 0x65, 0x6c, 0x64, 0x28, 0x7b, 0x20, 0x6c, 0x61, 0x62, 0x65, + 0x6c, 0x3a, 0x20, 0x22, 0x54, 0x46, 0x53, 0x2d, 0x5a, 0x22, 0x2c, 0x20, + 0x6d, 0x61, 0x78, 0x3a, 0x20, 0x31, 0x2e, 0x30, 0x2c, 0x20, 0x6d, 0x69, + 0x6e, 0x3a, 0x20, 0x30, 0x2e, 0x30, 0x2c, 0x20, 0x6e, 0x61, 0x6d, 0x65, + 0x3a, 0x20, 0x22, 0x74, 0x66, 0x73, 0x5f, 0x7a, 0x22, 0x2c, 0x20, 0x73, + 0x74, 0x65, 0x70, 0x3a, 0x20, 0x30, 0x2e, 0x30, 0x31, 0x2c, 0x20, 0x76, + 0x61, 0x6c, 0x75, 0x65, 0x3a, 0x20, 0x70, 0x61, 0x72, 0x61, 0x6d, 0x73, + 0x2e, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x2e, 0x74, 0x66, 0x73, 0x5f, 0x7a, + 0x20, 0x7d, 0x29, 0x7d, 0x0a, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, + 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x24, 0x7b, 0x46, 0x6c, 0x6f, + 0x61, 0x74, 0x46, 0x69, 0x65, 0x6c, 0x64, 0x28, 0x7b, 0x20, 0x6c, 0x61, + 0x62, 0x65, 0x6c, 0x3a, 0x20, 0x22, 0x54, 0x79, 0x70, 0x69, 0x63, 0x61, + 0x6c, 0x20, 0x50, 0x22, 0x2c, 0x20, 0x6d, 0x61, 0x78, 0x3a, 0x20, 0x31, + 0x2e, 0x30, 0x2c, 0x20, 0x6d, 0x69, 0x6e, 0x3a, 0x20, 0x30, 0x2e, 0x30, + 0x2c, 0x20, 0x6e, 0x61, 0x6d, 0x65, 0x3a, 0x20, 0x22, 0x74, 0x79, 0x70, + 0x69, 0x63, 0x61, 0x6c, 0x5f, 0x70, 0x22, 0x2c, 0x20, 0x73, 0x74, 0x65, + 0x70, 0x3a, 0x20, 0x30, 0x2e, 0x30, 0x31, 0x2c, 0x20, 0x76, 0x61, 0x6c, + 0x75, 0x65, 0x3a, 0x20, 0x70, 0x61, 0x72, 0x61, 0x6d, 0x73, 0x2e, 0x76, + 0x61, 0x6c, 0x75, 0x65, 0x2e, 0x74, 0x79, 0x70, 0x69, 0x63, 0x61, 0x6c, + 0x5f, 0x70, 0x20, 0x7d, 0x29, 0x7d, 0x0a, 0x20, 0x20, 0x20, 0x20, 0x20, + 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x24, 0x7b, 0x46, + 0x6c, 0x6f, 0x61, 0x74, 0x46, 0x69, 0x65, 0x6c, 0x64, 0x28, 0x7b, 0x20, + 0x6c, 0x61, 0x62, 0x65, 0x6c, 0x3a, 0x20, 0x22, 0x50, 0x72, 0x65, 0x73, + 0x65, 0x6e, 0x63, 0x65, 0x20, 0x70, 0x65, 0x6e, 0x61, 0x6c, 0x74, 0x79, + 0x22, 0x2c, 0x20, 0x6d, 0x61, 0x78, 0x3a, 0x20, 0x31, 0x2e, 0x30, 0x2c, + 0x20, 0x6d, 0x69, 0x6e, 0x3a, 0x20, 0x30, 0x2e, 0x30, 0x2c, 0x20, 0x6e, + 0x61, 0x6d, 0x65, 0x3a, 0x20, 0x22, 0x70, 0x72, 0x65, 0x73, 0x65, 0x6e, + 0x63, 0x65, 0x5f, 0x70, 0x65, 0x6e, 0x61, 0x6c, 0x74, 0x79, 0x22, 0x2c, + 0x20, 0x73, 0x74, 0x65, 0x70, 0x3a, 0x20, 0x30, 0x2e, 0x30, 0x31, 0x2c, + 0x20, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x3a, 0x20, 0x70, 0x61, 0x72, 0x61, + 0x6d, 0x73, 0x2e, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x2e, 0x70, 0x72, 0x65, + 0x73, 0x65, 0x6e, 0x63, 0x65, 0x5f, 0x70, 0x65, 0x6e, 0x61, 0x6c, 0x74, + 0x79, 0x20, 0x7d, 0x29, 0x7d, 0x0a, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, + 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x24, 0x7b, 0x46, 0x6c, + 0x6f, 0x61, 0x74, 0x46, 0x69, 0x65, 0x6c, 0x64, 0x28, 0x7b, 0x20, 0x6c, + 0x61, 0x62, 0x65, 0x6c, 0x3a, 0x20, 0x22, 0x46, 0x72, 0x65, 0x71, 0x75, + 0x65, 0x6e, 0x63, 0x79, 0x20, 0x70, 0x65, 0x6e, 0x61, 0x6c, 0x74, 0x79, + 0x22, 0x2c, 0x20, 0x6d, 0x61, 0x78, 0x3a, 0x20, 0x31, 0x2e, 0x30, 0x2c, + 0x20, 0x6d, 0x69, 0x6e, 0x3a, 0x20, 0x30, 0x2e, 0x30, 0x2c, 0x20, 0x6e, + 0x61, 0x6d, 0x65, 0x3a, 0x20, 0x22, 0x66, 0x72, 0x65, 0x71, 0x75, 0x65, + 0x6e, 0x63, 0x79, 0x5f, 0x70, 0x65, 0x6e, 0x61, 0x6c, 0x74, 0x79, 0x22, 0x2c, 0x20, 0x73, 0x74, 0x65, 0x70, 0x3a, 0x20, 0x30, 0x2e, 0x30, 0x31, 0x2c, 0x20, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x3a, 0x20, 0x70, 0x61, 0x72, - 0x61, 0x6d, 0x73, 0x2e, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x2e, 0x6d, 0x69, - 0x72, 0x6f, 0x73, 0x74, 0x61, 0x74, 0x5f, 0x65, 0x74, 0x61, 0x20, 0x7d, - 0x29, 0x7d, 0x0a, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, - 0x20, 0x20, 0x20, 0x3c, 0x2f, 0x66, 0x69, 0x65, 0x6c, 0x64, 0x73, 0x65, - 0x74, 0x3e, 0x0a, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, + 0x61, 0x6d, 0x73, 0x2e, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x2e, 0x66, 0x72, + 0x65, 0x71, 0x75, 0x65, 0x6e, 0x63, 0x79, 0x5f, 0x70, 0x65, 0x6e, 0x61, + 0x6c, 0x74, 0x79, 0x20, 0x7d, 0x29, 0x7d, 0x0a, 0x20, 0x20, 0x20, 0x20, + 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x3c, 0x2f, 0x66, 0x69, + 0x65, 0x6c, 0x64, 0x73, 0x65, 0x74, 0x3e, 0x0a, 0x20, 0x20, 0x20, 0x20, + 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x3c, 0x68, 0x72, 0x20, + 0x2f, 0x3e, 0x0a, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x3c, 0x66, 0x69, 0x65, 0x6c, 0x64, 0x73, 0x65, 0x74, + 0x20, 0x63, 0x6c, 0x61, 0x73, 0x73, 0x3d, 0x22, 0x74, 0x68, 0x72, 0x65, + 0x65, 0x22, 0x3e, 0x0a, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, + 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x3c, 0x64, 0x69, 0x76, 0x3e, 0x0a, + 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, + 0x20, 0x20, 0x20, 0x20, 0x3c, 0x6c, 0x61, 0x62, 0x65, 0x6c, 0x3e, 0x3c, + 0x69, 0x6e, 0x70, 0x75, 0x74, 0x20, 0x74, 0x79, 0x70, 0x65, 0x3d, 0x22, + 0x72, 0x61, 0x64, 0x69, 0x6f, 0x22, 0x20, 0x6e, 0x61, 0x6d, 0x65, 0x3d, + 0x22, 0x6d, 0x69, 0x72, 0x6f, 0x73, 0x74, 0x61, 0x74, 0x22, 0x20, 0x76, + 0x61, 0x6c, 0x75, 0x65, 0x3d, 0x22, 0x30, 0x22, 0x20, 0x63, 0x68, 0x65, + 0x63, 0x6b, 0x65, 0x64, 0x3d, 0x24, 0x7b, 0x70, 0x61, 0x72, 0x61, 0x6d, + 0x73, 0x2e, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x2e, 0x6d, 0x69, 0x72, 0x6f, + 0x73, 0x74, 0x61, 0x74, 0x20, 0x3d, 0x3d, 0x20, 0x30, 0x7d, 0x20, 0x6f, + 0x6e, 0x69, 0x6e, 0x70, 0x75, 0x74, 0x3d, 0x24, 0x7b, 0x75, 0x70, 0x64, + 0x61, 0x74, 0x65, 0x50, 0x61, 0x72, 0x61, 0x6d, 0x73, 0x49, 0x6e, 0x74, + 0x7d, 0x20, 0x2f, 0x3e, 0x20, 0x6e, 0x6f, 0x20, 0x4d, 0x69, 0x72, 0x6f, + 0x73, 0x74, 0x61, 0x74, 0x3c, 0x2f, 0x6c, 0x61, 0x62, 0x65, 0x6c, 0x3e, + 0x0a, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, + 0x20, 0x20, 0x20, 0x20, 0x20, 0x3c, 0x6c, 0x61, 0x62, 0x65, 0x6c, 0x3e, + 0x3c, 0x69, 0x6e, 0x70, 0x75, 0x74, 0x20, 0x74, 0x79, 0x70, 0x65, 0x3d, + 0x22, 0x72, 0x61, 0x64, 0x69, 0x6f, 0x22, 0x20, 0x6e, 0x61, 0x6d, 0x65, + 0x3d, 0x22, 0x6d, 0x69, 0x72, 0x6f, 0x73, 0x74, 0x61, 0x74, 0x22, 0x20, + 0x76, 0x61, 0x6c, 0x75, 0x65, 0x3d, 0x22, 0x31, 0x22, 0x20, 0x63, 0x68, + 0x65, 0x63, 0x6b, 0x65, 0x64, 0x3d, 0x24, 0x7b, 0x70, 0x61, 0x72, 0x61, + 0x6d, 0x73, 0x2e, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x2e, 0x6d, 0x69, 0x72, + 0x6f, 0x73, 0x74, 0x61, 0x74, 0x20, 0x3d, 0x3d, 0x20, 0x31, 0x7d, 0x20, + 0x6f, 0x6e, 0x69, 0x6e, 0x70, 0x75, 0x74, 0x3d, 0x24, 0x7b, 0x75, 0x70, + 0x64, 0x61, 0x74, 0x65, 0x50, 0x61, 0x72, 0x61, 0x6d, 0x73, 0x49, 0x6e, + 0x74, 0x7d, 0x20, 0x2f, 0x3e, 0x20, 0x4d, 0x69, 0x72, 0x6f, 0x73, 0x74, + 0x61, 0x74, 0x20, 0x76, 0x31, 0x3c, 0x2f, 0x6c, 0x61, 0x62, 0x65, 0x6c, 0x3e, 0x0a, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, - 0x20, 0x20, 0x20, 0x20, 0x24, 0x7b, 0x49, 0x6e, 0x74, 0x46, 0x69, 0x65, + 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x3c, 0x6c, 0x61, 0x62, 0x65, 0x6c, + 0x3e, 0x3c, 0x69, 0x6e, 0x70, 0x75, 0x74, 0x20, 0x74, 0x79, 0x70, 0x65, + 0x3d, 0x22, 0x72, 0x61, 0x64, 0x69, 0x6f, 0x22, 0x20, 0x6e, 0x61, 0x6d, + 0x65, 0x3d, 0x22, 0x6d, 0x69, 0x72, 0x6f, 0x73, 0x74, 0x61, 0x74, 0x22, + 0x20, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x3d, 0x22, 0x32, 0x22, 0x20, 0x63, + 0x68, 0x65, 0x63, 0x6b, 0x65, 0x64, 0x3d, 0x24, 0x7b, 0x70, 0x61, 0x72, + 0x61, 0x6d, 0x73, 0x2e, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x2e, 0x6d, 0x69, + 0x72, 0x6f, 0x73, 0x74, 0x61, 0x74, 0x20, 0x3d, 0x3d, 0x20, 0x32, 0x7d, + 0x20, 0x6f, 0x6e, 0x69, 0x6e, 0x70, 0x75, 0x74, 0x3d, 0x24, 0x7b, 0x75, + 0x70, 0x64, 0x61, 0x74, 0x65, 0x50, 0x61, 0x72, 0x61, 0x6d, 0x73, 0x49, + 0x6e, 0x74, 0x7d, 0x20, 0x2f, 0x3e, 0x20, 0x4d, 0x69, 0x72, 0x6f, 0x73, + 0x74, 0x61, 0x74, 0x20, 0x76, 0x32, 0x3c, 0x2f, 0x6c, 0x61, 0x62, 0x65, + 0x6c, 0x3e, 0x0a, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, + 0x20, 0x20, 0x20, 0x20, 0x20, 0x3c, 0x2f, 0x64, 0x69, 0x76, 0x3e, 0x0a, + 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, + 0x20, 0x20, 0x24, 0x7b, 0x46, 0x6c, 0x6f, 0x61, 0x74, 0x46, 0x69, 0x65, 0x6c, 0x64, 0x28, 0x7b, 0x20, 0x6c, 0x61, 0x62, 0x65, 0x6c, 0x3a, 0x20, - 0x22, 0x53, 0x68, 0x6f, 0x77, 0x20, 0x50, 0x72, 0x6f, 0x62, 0x61, 0x62, - 0x69, 0x6c, 0x69, 0x74, 0x69, 0x65, 0x73, 0x22, 0x2c, 0x20, 0x6d, 0x61, - 0x78, 0x3a, 0x20, 0x31, 0x30, 0x2c, 0x20, 0x6d, 0x69, 0x6e, 0x3a, 0x20, - 0x30, 0x2c, 0x20, 0x6e, 0x61, 0x6d, 0x65, 0x3a, 0x20, 0x22, 0x6e, 0x5f, - 0x70, 0x72, 0x6f, 0x62, 0x73, 0x22, 0x2c, 0x20, 0x76, 0x61, 0x6c, 0x75, - 0x65, 0x3a, 0x20, 0x70, 0x61, 0x72, 0x61, 0x6d, 0x73, 0x2e, 0x76, 0x61, - 0x6c, 0x75, 0x65, 0x2e, 0x6e, 0x5f, 0x70, 0x72, 0x6f, 0x62, 0x73, 0x20, - 0x7d, 0x29, 0x7d, 0x0a, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, - 0x20, 0x20, 0x20, 0x20, 0x3c, 0x2f, 0x66, 0x69, 0x65, 0x6c, 0x64, 0x73, - 0x65, 0x74, 0x3e, 0x0a, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, - 0x20, 0x20, 0x3c, 0x2f, 0x64, 0x65, 0x74, 0x61, 0x69, 0x6c, 0x73, 0x3e, - 0x0a, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x3c, 0x2f, 0x66, - 0x6f, 0x72, 0x6d, 0x3e, 0x0a, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x60, - 0x0a, 0x20, 0x20, 0x20, 0x20, 0x7d, 0x0a, 0x0a, 0x20, 0x20, 0x20, 0x20, - 0x63, 0x6f, 0x6e, 0x73, 0x74, 0x20, 0x70, 0x72, 0x6f, 0x62, 0x43, 0x6f, - 0x6c, 0x6f, 0x72, 0x20, 0x3d, 0x20, 0x28, 0x70, 0x29, 0x20, 0x3d, 0x3e, - 0x20, 0x7b, 0x0a, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x63, 0x6f, 0x6e, - 0x73, 0x74, 0x20, 0x72, 0x20, 0x3d, 0x20, 0x4d, 0x61, 0x74, 0x68, 0x2e, - 0x66, 0x6c, 0x6f, 0x6f, 0x72, 0x28, 0x31, 0x39, 0x32, 0x20, 0x2a, 0x20, - 0x28, 0x31, 0x20, 0x2d, 0x20, 0x70, 0x29, 0x29, 0x3b, 0x0a, 0x20, 0x20, - 0x20, 0x20, 0x20, 0x20, 0x63, 0x6f, 0x6e, 0x73, 0x74, 0x20, 0x67, 0x20, - 0x3d, 0x20, 0x4d, 0x61, 0x74, 0x68, 0x2e, 0x66, 0x6c, 0x6f, 0x6f, 0x72, - 0x28, 0x31, 0x39, 0x32, 0x20, 0x2a, 0x20, 0x70, 0x29, 0x3b, 0x0a, 0x20, - 0x20, 0x20, 0x20, 0x20, 0x20, 0x72, 0x65, 0x74, 0x75, 0x72, 0x6e, 0x20, - 0x60, 0x72, 0x67, 0x62, 0x61, 0x28, 0x24, 0x7b, 0x72, 0x7d, 0x2c, 0x24, - 0x7b, 0x67, 0x7d, 0x2c, 0x30, 0x2c, 0x30, 0x2e, 0x33, 0x29, 0x60, 0x3b, - 0x0a, 0x20, 0x20, 0x20, 0x20, 0x7d, 0x0a, 0x0a, 0x20, 0x20, 0x20, 0x20, - 0x63, 0x6f, 0x6e, 0x73, 0x74, 0x20, 0x50, 0x72, 0x6f, 0x62, 0x61, 0x62, - 0x69, 0x6c, 0x69, 0x74, 0x69, 0x65, 0x73, 0x20, 0x3d, 0x20, 0x28, 0x70, - 0x61, 0x72, 0x61, 0x6d, 0x73, 0x29, 0x20, 0x3d, 0x3e, 0x20, 0x7b, 0x0a, - 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x72, 0x65, 0x74, 0x75, 0x72, 0x6e, - 0x20, 0x70, 0x61, 0x72, 0x61, 0x6d, 0x73, 0x2e, 0x64, 0x61, 0x74, 0x61, - 0x2e, 0x6d, 0x61, 0x70, 0x28, 0x6d, 0x73, 0x67, 0x20, 0x3d, 0x3e, 0x20, - 0x7b, 0x0a, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x63, 0x6f, - 0x6e, 0x73, 0x74, 0x20, 0x7b, 0x20, 0x63, 0x6f, 0x6d, 0x70, 0x6c, 0x65, - 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x70, 0x72, 0x6f, 0x62, 0x61, 0x62, 0x69, - 0x6c, 0x69, 0x74, 0x69, 0x65, 0x73, 0x20, 0x7d, 0x20, 0x3d, 0x20, 0x6d, - 0x73, 0x67, 0x3b, 0x0a, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, - 0x69, 0x66, 0x20, 0x28, 0x0a, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, - 0x20, 0x20, 0x20, 0x21, 0x63, 0x6f, 0x6d, 0x70, 0x6c, 0x65, 0x74, 0x69, - 0x6f, 0x6e, 0x5f, 0x70, 0x72, 0x6f, 0x62, 0x61, 0x62, 0x69, 0x6c, 0x69, - 0x74, 0x69, 0x65, 0x73, 0x20, 0x7c, 0x7c, 0x0a, 0x20, 0x20, 0x20, 0x20, - 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x63, 0x6f, 0x6d, 0x70, 0x6c, 0x65, - 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x70, 0x72, 0x6f, 0x62, 0x61, 0x62, 0x69, - 0x6c, 0x69, 0x74, 0x69, 0x65, 0x73, 0x2e, 0x6c, 0x65, 0x6e, 0x67, 0x74, - 0x68, 0x20, 0x3d, 0x3d, 0x3d, 0x20, 0x30, 0x0a, 0x20, 0x20, 0x20, 0x20, - 0x20, 0x20, 0x20, 0x20, 0x29, 0x20, 0x72, 0x65, 0x74, 0x75, 0x72, 0x6e, - 0x20, 0x6d, 0x73, 0x67, 0x2e, 0x63, 0x6f, 0x6e, 0x74, 0x65, 0x6e, 0x74, - 0x0a, 0x0a, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x69, 0x66, - 0x20, 0x28, 0x63, 0x6f, 0x6d, 0x70, 0x6c, 0x65, 0x74, 0x69, 0x6f, 0x6e, + 0x22, 0x4d, 0x69, 0x72, 0x6f, 0x73, 0x74, 0x61, 0x74, 0x20, 0x74, 0x61, + 0x75, 0x22, 0x2c, 0x20, 0x6d, 0x61, 0x78, 0x3a, 0x20, 0x31, 0x30, 0x2e, + 0x30, 0x2c, 0x20, 0x6d, 0x69, 0x6e, 0x3a, 0x20, 0x30, 0x2e, 0x30, 0x2c, + 0x20, 0x6e, 0x61, 0x6d, 0x65, 0x3a, 0x20, 0x22, 0x6d, 0x69, 0x72, 0x6f, + 0x73, 0x74, 0x61, 0x74, 0x5f, 0x74, 0x61, 0x75, 0x22, 0x2c, 0x20, 0x73, + 0x74, 0x65, 0x70, 0x3a, 0x20, 0x30, 0x2e, 0x30, 0x31, 0x2c, 0x20, 0x76, + 0x61, 0x6c, 0x75, 0x65, 0x3a, 0x20, 0x70, 0x61, 0x72, 0x61, 0x6d, 0x73, + 0x2e, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x2e, 0x6d, 0x69, 0x72, 0x6f, 0x73, + 0x74, 0x61, 0x74, 0x5f, 0x74, 0x61, 0x75, 0x20, 0x7d, 0x29, 0x7d, 0x0a, + 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, + 0x20, 0x20, 0x24, 0x7b, 0x46, 0x6c, 0x6f, 0x61, 0x74, 0x46, 0x69, 0x65, + 0x6c, 0x64, 0x28, 0x7b, 0x20, 0x6c, 0x61, 0x62, 0x65, 0x6c, 0x3a, 0x20, + 0x22, 0x4d, 0x69, 0x72, 0x6f, 0x73, 0x74, 0x61, 0x74, 0x20, 0x65, 0x74, + 0x61, 0x22, 0x2c, 0x20, 0x6d, 0x61, 0x78, 0x3a, 0x20, 0x31, 0x2e, 0x30, + 0x2c, 0x20, 0x6d, 0x69, 0x6e, 0x3a, 0x20, 0x30, 0x2e, 0x30, 0x2c, 0x20, + 0x6e, 0x61, 0x6d, 0x65, 0x3a, 0x20, 0x22, 0x6d, 0x69, 0x72, 0x6f, 0x73, + 0x74, 0x61, 0x74, 0x5f, 0x65, 0x74, 0x61, 0x22, 0x2c, 0x20, 0x73, 0x74, + 0x65, 0x70, 0x3a, 0x20, 0x30, 0x2e, 0x30, 0x31, 0x2c, 0x20, 0x76, 0x61, + 0x6c, 0x75, 0x65, 0x3a, 0x20, 0x70, 0x61, 0x72, 0x61, 0x6d, 0x73, 0x2e, + 0x76, 0x61, 0x6c, 0x75, 0x65, 0x2e, 0x6d, 0x69, 0x72, 0x6f, 0x73, 0x74, + 0x61, 0x74, 0x5f, 0x65, 0x74, 0x61, 0x20, 0x7d, 0x29, 0x7d, 0x0a, 0x20, + 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x3c, + 0x2f, 0x66, 0x69, 0x65, 0x6c, 0x64, 0x73, 0x65, 0x74, 0x3e, 0x0a, 0x20, + 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x3c, + 0x66, 0x69, 0x65, 0x6c, 0x64, 0x73, 0x65, 0x74, 0x3e, 0x0a, 0x20, 0x20, + 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, + 0x24, 0x7b, 0x49, 0x6e, 0x74, 0x46, 0x69, 0x65, 0x6c, 0x64, 0x28, 0x7b, + 0x20, 0x6c, 0x61, 0x62, 0x65, 0x6c, 0x3a, 0x20, 0x22, 0x53, 0x68, 0x6f, + 0x77, 0x20, 0x50, 0x72, 0x6f, 0x62, 0x61, 0x62, 0x69, 0x6c, 0x69, 0x74, + 0x69, 0x65, 0x73, 0x22, 0x2c, 0x20, 0x6d, 0x61, 0x78, 0x3a, 0x20, 0x31, + 0x30, 0x2c, 0x20, 0x6d, 0x69, 0x6e, 0x3a, 0x20, 0x30, 0x2c, 0x20, 0x6e, + 0x61, 0x6d, 0x65, 0x3a, 0x20, 0x22, 0x6e, 0x5f, 0x70, 0x72, 0x6f, 0x62, + 0x73, 0x22, 0x2c, 0x20, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x3a, 0x20, 0x70, + 0x61, 0x72, 0x61, 0x6d, 0x73, 0x2e, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x2e, + 0x6e, 0x5f, 0x70, 0x72, 0x6f, 0x62, 0x73, 0x20, 0x7d, 0x29, 0x7d, 0x0a, + 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, + 0x3c, 0x2f, 0x66, 0x69, 0x65, 0x6c, 0x64, 0x73, 0x65, 0x74, 0x3e, 0x0a, + 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x3c, 0x2f, + 0x64, 0x65, 0x74, 0x61, 0x69, 0x6c, 0x73, 0x3e, 0x0a, 0x20, 0x20, 0x20, + 0x20, 0x20, 0x20, 0x20, 0x20, 0x3c, 0x2f, 0x66, 0x6f, 0x72, 0x6d, 0x3e, + 0x0a, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x60, 0x0a, 0x20, 0x20, 0x20, + 0x20, 0x7d, 0x0a, 0x0a, 0x20, 0x20, 0x20, 0x20, 0x63, 0x6f, 0x6e, 0x73, + 0x74, 0x20, 0x70, 0x72, 0x6f, 0x62, 0x43, 0x6f, 0x6c, 0x6f, 0x72, 0x20, + 0x3d, 0x20, 0x28, 0x70, 0x29, 0x20, 0x3d, 0x3e, 0x20, 0x7b, 0x0a, 0x20, + 0x20, 0x20, 0x20, 0x20, 0x20, 0x63, 0x6f, 0x6e, 0x73, 0x74, 0x20, 0x72, + 0x20, 0x3d, 0x20, 0x4d, 0x61, 0x74, 0x68, 0x2e, 0x66, 0x6c, 0x6f, 0x6f, + 0x72, 0x28, 0x31, 0x39, 0x32, 0x20, 0x2a, 0x20, 0x28, 0x31, 0x20, 0x2d, + 0x20, 0x70, 0x29, 0x29, 0x3b, 0x0a, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, + 0x63, 0x6f, 0x6e, 0x73, 0x74, 0x20, 0x67, 0x20, 0x3d, 0x20, 0x4d, 0x61, + 0x74, 0x68, 0x2e, 0x66, 0x6c, 0x6f, 0x6f, 0x72, 0x28, 0x31, 0x39, 0x32, + 0x20, 0x2a, 0x20, 0x70, 0x29, 0x3b, 0x0a, 0x20, 0x20, 0x20, 0x20, 0x20, + 0x20, 0x72, 0x65, 0x74, 0x75, 0x72, 0x6e, 0x20, 0x60, 0x72, 0x67, 0x62, + 0x61, 0x28, 0x24, 0x7b, 0x72, 0x7d, 0x2c, 0x24, 0x7b, 0x67, 0x7d, 0x2c, + 0x30, 0x2c, 0x30, 0x2e, 0x33, 0x29, 0x60, 0x3b, 0x0a, 0x20, 0x20, 0x20, + 0x20, 0x7d, 0x0a, 0x0a, 0x20, 0x20, 0x20, 0x20, 0x63, 0x6f, 0x6e, 0x73, + 0x74, 0x20, 0x50, 0x72, 0x6f, 0x62, 0x61, 0x62, 0x69, 0x6c, 0x69, 0x74, + 0x69, 0x65, 0x73, 0x20, 0x3d, 0x20, 0x28, 0x70, 0x61, 0x72, 0x61, 0x6d, + 0x73, 0x29, 0x20, 0x3d, 0x3e, 0x20, 0x7b, 0x0a, 0x20, 0x20, 0x20, 0x20, + 0x20, 0x20, 0x72, 0x65, 0x74, 0x75, 0x72, 0x6e, 0x20, 0x70, 0x61, 0x72, + 0x61, 0x6d, 0x73, 0x2e, 0x64, 0x61, 0x74, 0x61, 0x2e, 0x6d, 0x61, 0x70, + 0x28, 0x6d, 0x73, 0x67, 0x20, 0x3d, 0x3e, 0x20, 0x7b, 0x0a, 0x20, 0x20, + 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x63, 0x6f, 0x6e, 0x73, 0x74, 0x20, + 0x7b, 0x20, 0x63, 0x6f, 0x6d, 0x70, 0x6c, 0x65, 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x70, 0x72, 0x6f, 0x62, 0x61, 0x62, 0x69, 0x6c, 0x69, 0x74, 0x69, - 0x65, 0x73, 0x2e, 0x6c, 0x65, 0x6e, 0x67, 0x74, 0x68, 0x20, 0x3e, 0x20, - 0x31, 0x29, 0x20, 0x7b, 0x0a, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, - 0x20, 0x20, 0x20, 0x2f, 0x2f, 0x20, 0x4e, 0x6f, 0x74, 0x20, 0x66, 0x6f, - 0x72, 0x20, 0x62, 0x79, 0x74, 0x65, 0x20, 0x70, 0x61, 0x69, 0x72, 0x0a, - 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x69, 0x66, - 0x20, 0x28, 0x63, 0x6f, 0x6d, 0x70, 0x6c, 0x65, 0x74, 0x69, 0x6f, 0x6e, + 0x65, 0x73, 0x20, 0x7d, 0x20, 0x3d, 0x20, 0x6d, 0x73, 0x67, 0x3b, 0x0a, + 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x69, 0x66, 0x20, 0x28, + 0x0a, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x21, + 0x63, 0x6f, 0x6d, 0x70, 0x6c, 0x65, 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x70, + 0x72, 0x6f, 0x62, 0x61, 0x62, 0x69, 0x6c, 0x69, 0x74, 0x69, 0x65, 0x73, + 0x20, 0x7c, 0x7c, 0x0a, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, + 0x20, 0x20, 0x63, 0x6f, 0x6d, 0x70, 0x6c, 0x65, 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x70, 0x72, 0x6f, 0x62, 0x61, 0x62, 0x69, 0x6c, 0x69, 0x74, 0x69, - 0x65, 0x73, 0x5b, 0x30, 0x5d, 0x2e, 0x63, 0x6f, 0x6e, 0x74, 0x65, 0x6e, - 0x74, 0x2e, 0x73, 0x74, 0x61, 0x72, 0x74, 0x73, 0x57, 0x69, 0x74, 0x68, - 0x28, 0x27, 0x62, 0x79, 0x74, 0x65, 0x3a, 0x20, 0x5c, 0x5c, 0x27, 0x29, + 0x65, 0x73, 0x2e, 0x6c, 0x65, 0x6e, 0x67, 0x74, 0x68, 0x20, 0x3d, 0x3d, + 0x3d, 0x20, 0x30, 0x0a, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x29, 0x20, 0x72, 0x65, 0x74, 0x75, 0x72, 0x6e, 0x20, 0x6d, 0x73, 0x67, 0x2e, 0x63, 0x6f, 0x6e, 0x74, 0x65, 0x6e, 0x74, 0x0a, 0x0a, 0x20, 0x20, - 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x63, 0x6f, 0x6e, 0x73, - 0x74, 0x20, 0x73, 0x70, 0x6c, 0x69, 0x74, 0x44, 0x61, 0x74, 0x61, 0x20, - 0x3d, 0x20, 0x63, 0x6f, 0x6d, 0x70, 0x6c, 0x65, 0x74, 0x69, 0x6f, 0x6e, - 0x5f, 0x70, 0x72, 0x6f, 0x62, 0x61, 0x62, 0x69, 0x6c, 0x69, 0x74, 0x69, - 0x65, 0x73, 0x2e, 0x6d, 0x61, 0x70, 0x28, 0x70, 0x72, 0x6f, 0x62, 0x20, - 0x3d, 0x3e, 0x20, 0x28, 0x7b, 0x0a, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, - 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x63, 0x6f, 0x6e, 0x74, 0x65, 0x6e, - 0x74, 0x3a, 0x20, 0x70, 0x72, 0x6f, 0x62, 0x2e, 0x63, 0x6f, 0x6e, 0x74, - 0x65, 0x6e, 0x74, 0x2c, 0x0a, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, - 0x20, 0x20, 0x20, 0x20, 0x20, 0x63, 0x6f, 0x6d, 0x70, 0x6c, 0x65, 0x74, - 0x69, 0x6f, 0x6e, 0x5f, 0x70, 0x72, 0x6f, 0x62, 0x61, 0x62, 0x69, 0x6c, - 0x69, 0x74, 0x69, 0x65, 0x73, 0x3a, 0x20, 0x5b, 0x70, 0x72, 0x6f, 0x62, - 0x5d, 0x0a, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, - 0x7d, 0x29, 0x29, 0x0a, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, - 0x20, 0x20, 0x72, 0x65, 0x74, 0x75, 0x72, 0x6e, 0x20, 0x68, 0x74, 0x6d, - 0x6c, 0x60, 0x3c, 0x24, 0x7b, 0x50, 0x72, 0x6f, 0x62, 0x61, 0x62, 0x69, - 0x6c, 0x69, 0x74, 0x69, 0x65, 0x73, 0x7d, 0x20, 0x64, 0x61, 0x74, 0x61, - 0x3d, 0x24, 0x7b, 0x73, 0x70, 0x6c, 0x69, 0x74, 0x44, 0x61, 0x74, 0x61, - 0x7d, 0x20, 0x2f, 0x3e, 0x60, 0x0a, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, - 0x20, 0x20, 0x7d, 0x0a, 0x0a, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, - 0x20, 0x63, 0x6f, 0x6e, 0x73, 0x74, 0x20, 0x7b, 0x20, 0x70, 0x72, 0x6f, - 0x62, 0x73, 0x2c, 0x20, 0x63, 0x6f, 0x6e, 0x74, 0x65, 0x6e, 0x74, 0x20, - 0x7d, 0x20, 0x3d, 0x20, 0x63, 0x6f, 0x6d, 0x70, 0x6c, 0x65, 0x74, 0x69, - 0x6f, 0x6e, 0x5f, 0x70, 0x72, 0x6f, 0x62, 0x61, 0x62, 0x69, 0x6c, 0x69, - 0x74, 0x69, 0x65, 0x73, 0x5b, 0x30, 0x5d, 0x0a, 0x20, 0x20, 0x20, 0x20, - 0x20, 0x20, 0x20, 0x20, 0x63, 0x6f, 0x6e, 0x73, 0x74, 0x20, 0x66, 0x6f, - 0x75, 0x6e, 0x64, 0x20, 0x3d, 0x20, 0x70, 0x72, 0x6f, 0x62, 0x73, 0x2e, - 0x66, 0x69, 0x6e, 0x64, 0x28, 0x70, 0x20, 0x3d, 0x3e, 0x20, 0x70, 0x2e, - 0x74, 0x6f, 0x6b, 0x5f, 0x73, 0x74, 0x72, 0x20, 0x3d, 0x3d, 0x3d, 0x20, - 0x6d, 0x73, 0x67, 0x2e, 0x63, 0x6f, 0x6e, 0x74, 0x65, 0x6e, 0x74, 0x29, - 0x0a, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x63, 0x6f, 0x6e, - 0x73, 0x74, 0x20, 0x70, 0x43, 0x6f, 0x6c, 0x6f, 0x72, 0x20, 0x3d, 0x20, - 0x66, 0x6f, 0x75, 0x6e, 0x64, 0x20, 0x3f, 0x20, 0x70, 0x72, 0x6f, 0x62, - 0x43, 0x6f, 0x6c, 0x6f, 0x72, 0x28, 0x66, 0x6f, 0x75, 0x6e, 0x64, 0x2e, - 0x70, 0x72, 0x6f, 0x62, 0x29, 0x20, 0x3a, 0x20, 0x27, 0x74, 0x72, 0x61, - 0x6e, 0x73, 0x70, 0x61, 0x72, 0x65, 0x6e, 0x74, 0x27, 0x0a, 0x0a, 0x20, - 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x63, 0x6f, 0x6e, 0x73, 0x74, - 0x20, 0x70, 0x6f, 0x70, 0x6f, 0x76, 0x65, 0x72, 0x43, 0x68, 0x69, 0x6c, - 0x64, 0x72, 0x65, 0x6e, 0x20, 0x3d, 0x20, 0x68, 0x74, 0x6d, 0x6c, 0x60, - 0x0a, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x3c, - 0x64, 0x69, 0x76, 0x20, 0x63, 0x6c, 0x61, 0x73, 0x73, 0x3d, 0x22, 0x70, - 0x72, 0x6f, 0x62, 0x2d, 0x73, 0x65, 0x74, 0x22, 0x3e, 0x0a, 0x20, 0x20, - 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x24, 0x7b, - 0x70, 0x72, 0x6f, 0x62, 0x73, 0x2e, 0x6d, 0x61, 0x70, 0x28, 0x28, 0x70, - 0x2c, 0x20, 0x69, 0x6e, 0x64, 0x65, 0x78, 0x29, 0x20, 0x3d, 0x3e, 0x20, + 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x69, 0x66, 0x20, 0x28, 0x63, 0x6f, + 0x6d, 0x70, 0x6c, 0x65, 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x70, 0x72, 0x6f, + 0x62, 0x61, 0x62, 0x69, 0x6c, 0x69, 0x74, 0x69, 0x65, 0x73, 0x2e, 0x6c, + 0x65, 0x6e, 0x67, 0x74, 0x68, 0x20, 0x3e, 0x20, 0x31, 0x29, 0x20, 0x7b, + 0x0a, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x2f, + 0x2f, 0x20, 0x4e, 0x6f, 0x74, 0x20, 0x66, 0x6f, 0x72, 0x20, 0x62, 0x79, + 0x74, 0x65, 0x20, 0x70, 0x61, 0x69, 0x72, 0x0a, 0x20, 0x20, 0x20, 0x20, + 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x69, 0x66, 0x20, 0x28, 0x63, 0x6f, + 0x6d, 0x70, 0x6c, 0x65, 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x70, 0x72, 0x6f, + 0x62, 0x61, 0x62, 0x69, 0x6c, 0x69, 0x74, 0x69, 0x65, 0x73, 0x5b, 0x30, + 0x5d, 0x2e, 0x63, 0x6f, 0x6e, 0x74, 0x65, 0x6e, 0x74, 0x2e, 0x73, 0x74, + 0x61, 0x72, 0x74, 0x73, 0x57, 0x69, 0x74, 0x68, 0x28, 0x27, 0x62, 0x79, + 0x74, 0x65, 0x3a, 0x20, 0x5c, 0x5c, 0x27, 0x29, 0x29, 0x20, 0x72, 0x65, + 0x74, 0x75, 0x72, 0x6e, 0x20, 0x6d, 0x73, 0x67, 0x2e, 0x63, 0x6f, 0x6e, + 0x74, 0x65, 0x6e, 0x74, 0x0a, 0x0a, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, + 0x20, 0x20, 0x20, 0x20, 0x63, 0x6f, 0x6e, 0x73, 0x74, 0x20, 0x73, 0x70, + 0x6c, 0x69, 0x74, 0x44, 0x61, 0x74, 0x61, 0x20, 0x3d, 0x20, 0x63, 0x6f, + 0x6d, 0x70, 0x6c, 0x65, 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x70, 0x72, 0x6f, + 0x62, 0x61, 0x62, 0x69, 0x6c, 0x69, 0x74, 0x69, 0x65, 0x73, 0x2e, 0x6d, + 0x61, 0x70, 0x28, 0x70, 0x72, 0x6f, 0x62, 0x20, 0x3d, 0x3e, 0x20, 0x28, 0x7b, 0x0a, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, - 0x72, 0x65, 0x74, 0x75, 0x72, 0x6e, 0x20, 0x68, 0x74, 0x6d, 0x6c, 0x60, + 0x20, 0x20, 0x63, 0x6f, 0x6e, 0x74, 0x65, 0x6e, 0x74, 0x3a, 0x20, 0x70, + 0x72, 0x6f, 0x62, 0x2e, 0x63, 0x6f, 0x6e, 0x74, 0x65, 0x6e, 0x74, 0x2c, 0x0a, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, - 0x20, 0x20, 0x20, 0x20, 0x20, 0x3c, 0x64, 0x69, 0x76, 0x0a, 0x20, 0x20, + 0x20, 0x63, 0x6f, 0x6d, 0x70, 0x6c, 0x65, 0x74, 0x69, 0x6f, 0x6e, 0x5f, + 0x70, 0x72, 0x6f, 0x62, 0x61, 0x62, 0x69, 0x6c, 0x69, 0x74, 0x69, 0x65, + 0x73, 0x3a, 0x20, 0x5b, 0x70, 0x72, 0x6f, 0x62, 0x5d, 0x0a, 0x20, 0x20, + 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x7d, 0x29, 0x29, 0x0a, + 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x72, 0x65, + 0x74, 0x75, 0x72, 0x6e, 0x20, 0x68, 0x74, 0x6d, 0x6c, 0x60, 0x3c, 0x24, + 0x7b, 0x50, 0x72, 0x6f, 0x62, 0x61, 0x62, 0x69, 0x6c, 0x69, 0x74, 0x69, + 0x65, 0x73, 0x7d, 0x20, 0x64, 0x61, 0x74, 0x61, 0x3d, 0x24, 0x7b, 0x73, + 0x70, 0x6c, 0x69, 0x74, 0x44, 0x61, 0x74, 0x61, 0x7d, 0x20, 0x2f, 0x3e, + 0x60, 0x0a, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x7d, 0x0a, + 0x0a, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x63, 0x6f, 0x6e, + 0x73, 0x74, 0x20, 0x7b, 0x20, 0x70, 0x72, 0x6f, 0x62, 0x73, 0x2c, 0x20, + 0x63, 0x6f, 0x6e, 0x74, 0x65, 0x6e, 0x74, 0x20, 0x7d, 0x20, 0x3d, 0x20, + 0x63, 0x6f, 0x6d, 0x70, 0x6c, 0x65, 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x70, + 0x72, 0x6f, 0x62, 0x61, 0x62, 0x69, 0x6c, 0x69, 0x74, 0x69, 0x65, 0x73, + 0x5b, 0x30, 0x5d, 0x0a, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, + 0x63, 0x6f, 0x6e, 0x73, 0x74, 0x20, 0x66, 0x6f, 0x75, 0x6e, 0x64, 0x20, + 0x3d, 0x20, 0x70, 0x72, 0x6f, 0x62, 0x73, 0x2e, 0x66, 0x69, 0x6e, 0x64, + 0x28, 0x70, 0x20, 0x3d, 0x3e, 0x20, 0x70, 0x2e, 0x74, 0x6f, 0x6b, 0x5f, + 0x73, 0x74, 0x72, 0x20, 0x3d, 0x3d, 0x3d, 0x20, 0x6d, 0x73, 0x67, 0x2e, + 0x63, 0x6f, 0x6e, 0x74, 0x65, 0x6e, 0x74, 0x29, 0x0a, 0x20, 0x20, 0x20, + 0x20, 0x20, 0x20, 0x20, 0x20, 0x63, 0x6f, 0x6e, 0x73, 0x74, 0x20, 0x70, + 0x43, 0x6f, 0x6c, 0x6f, 0x72, 0x20, 0x3d, 0x20, 0x66, 0x6f, 0x75, 0x6e, + 0x64, 0x20, 0x3f, 0x20, 0x70, 0x72, 0x6f, 0x62, 0x43, 0x6f, 0x6c, 0x6f, + 0x72, 0x28, 0x66, 0x6f, 0x75, 0x6e, 0x64, 0x2e, 0x70, 0x72, 0x6f, 0x62, + 0x29, 0x20, 0x3a, 0x20, 0x27, 0x74, 0x72, 0x61, 0x6e, 0x73, 0x70, 0x61, + 0x72, 0x65, 0x6e, 0x74, 0x27, 0x0a, 0x0a, 0x20, 0x20, 0x20, 0x20, 0x20, + 0x20, 0x20, 0x20, 0x63, 0x6f, 0x6e, 0x73, 0x74, 0x20, 0x70, 0x6f, 0x70, + 0x6f, 0x76, 0x65, 0x72, 0x43, 0x68, 0x69, 0x6c, 0x64, 0x72, 0x65, 0x6e, + 0x20, 0x3d, 0x20, 0x68, 0x74, 0x6d, 0x6c, 0x60, 0x0a, 0x20, 0x20, 0x20, + 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x3c, 0x64, 0x69, 0x76, 0x20, + 0x63, 0x6c, 0x61, 0x73, 0x73, 0x3d, 0x22, 0x70, 0x72, 0x6f, 0x62, 0x2d, + 0x73, 0x65, 0x74, 0x22, 0x3e, 0x0a, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, + 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x24, 0x7b, 0x70, 0x72, 0x6f, 0x62, + 0x73, 0x2e, 0x6d, 0x61, 0x70, 0x28, 0x28, 0x70, 0x2c, 0x20, 0x69, 0x6e, + 0x64, 0x65, 0x78, 0x29, 0x20, 0x3d, 0x3e, 0x20, 0x7b, 0x0a, 0x20, 0x20, + 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x72, 0x65, 0x74, 0x75, + 0x72, 0x6e, 0x20, 0x68, 0x74, 0x6d, 0x6c, 0x60, 0x0a, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, - 0x20, 0x20, 0x20, 0x20, 0x6b, 0x65, 0x79, 0x3d, 0x24, 0x7b, 0x69, 0x6e, - 0x64, 0x65, 0x78, 0x7d, 0x0a, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, - 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x74, - 0x69, 0x74, 0x6c, 0x65, 0x3d, 0x24, 0x7b, 0x60, 0x70, 0x72, 0x6f, 0x62, - 0x3a, 0x20, 0x24, 0x7b, 0x70, 0x2e, 0x70, 0x72, 0x6f, 0x62, 0x7d, 0x60, - 0x7d, 0x0a, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, - 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x73, 0x74, 0x79, 0x6c, - 0x65, 0x3d, 0x24, 0x7b, 0x7b, 0x0a, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, - 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x70, 0x61, 0x64, 0x64, - 0x69, 0x6e, 0x67, 0x3a, 0x20, 0x27, 0x30, 0x2e, 0x33, 0x65, 0x6d, 0x27, - 0x2c, 0x0a, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, - 0x20, 0x20, 0x20, 0x20, 0x62, 0x61, 0x63, 0x6b, 0x67, 0x72, 0x6f, 0x75, - 0x6e, 0x64, 0x43, 0x6f, 0x6c, 0x6f, 0x72, 0x3a, 0x20, 0x70, 0x2e, 0x74, - 0x6f, 0x6b, 0x5f, 0x73, 0x74, 0x72, 0x20, 0x3d, 0x3d, 0x3d, 0x20, 0x63, - 0x6f, 0x6e, 0x74, 0x65, 0x6e, 0x74, 0x20, 0x3f, 0x20, 0x70, 0x72, 0x6f, - 0x62, 0x43, 0x6f, 0x6c, 0x6f, 0x72, 0x28, 0x70, 0x2e, 0x70, 0x72, 0x6f, - 0x62, 0x29, 0x20, 0x3a, 0x20, 0x27, 0x74, 0x72, 0x61, 0x6e, 0x73, 0x70, - 0x61, 0x72, 0x65, 0x6e, 0x74, 0x27, 0x0a, 0x20, 0x20, 0x20, 0x20, 0x20, - 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x7d, 0x7d, 0x0a, 0x20, 0x20, + 0x20, 0x3c, 0x64, 0x69, 0x76, 0x0a, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, - 0x20, 0x20, 0x3e, 0x0a, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, - 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x3c, 0x73, - 0x70, 0x61, 0x6e, 0x3e, 0x24, 0x7b, 0x70, 0x2e, 0x74, 0x6f, 0x6b, 0x5f, - 0x73, 0x74, 0x72, 0x7d, 0x3a, 0x20, 0x3c, 0x2f, 0x73, 0x70, 0x61, 0x6e, - 0x3e, 0x0a, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, - 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x3c, 0x73, 0x70, 0x61, - 0x6e, 0x3e, 0x24, 0x7b, 0x4d, 0x61, 0x74, 0x68, 0x2e, 0x66, 0x6c, 0x6f, - 0x6f, 0x72, 0x28, 0x70, 0x2e, 0x70, 0x72, 0x6f, 0x62, 0x20, 0x2a, 0x20, - 0x31, 0x30, 0x30, 0x29, 0x7d, 0x25, 0x3c, 0x2f, 0x73, 0x70, 0x61, 0x6e, - 0x3e, 0x0a, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, - 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x3c, 0x2f, 0x64, 0x69, 0x76, 0x3e, + 0x6b, 0x65, 0x79, 0x3d, 0x24, 0x7b, 0x69, 0x6e, 0x64, 0x65, 0x78, 0x7d, 0x0a, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, - 0x20, 0x20, 0x20, 0x60, 0x0a, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, - 0x20, 0x7d, 0x29, 0x7d, 0x0a, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, - 0x20, 0x20, 0x20, 0x3c, 0x2f, 0x64, 0x69, 0x76, 0x3e, 0x0a, 0x20, 0x20, - 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x60, 0x0a, 0x0a, 0x20, 0x20, 0x20, - 0x20, 0x20, 0x20, 0x20, 0x20, 0x72, 0x65, 0x74, 0x75, 0x72, 0x6e, 0x20, - 0x68, 0x74, 0x6d, 0x6c, 0x60, 0x0a, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, - 0x20, 0x20, 0x20, 0x20, 0x3c, 0x24, 0x7b, 0x50, 0x6f, 0x70, 0x6f, 0x76, - 0x65, 0x72, 0x7d, 0x20, 0x73, 0x74, 0x79, 0x6c, 0x65, 0x3d, 0x24, 0x7b, - 0x7b, 0x20, 0x62, 0x61, 0x63, 0x6b, 0x67, 0x72, 0x6f, 0x75, 0x6e, 0x64, - 0x43, 0x6f, 0x6c, 0x6f, 0x72, 0x3a, 0x20, 0x70, 0x43, 0x6f, 0x6c, 0x6f, - 0x72, 0x20, 0x7d, 0x7d, 0x20, 0x70, 0x6f, 0x70, 0x6f, 0x76, 0x65, 0x72, - 0x43, 0x68, 0x69, 0x6c, 0x64, 0x72, 0x65, 0x6e, 0x3d, 0x24, 0x7b, 0x70, - 0x6f, 0x70, 0x6f, 0x76, 0x65, 0x72, 0x43, 0x68, 0x69, 0x6c, 0x64, 0x72, - 0x65, 0x6e, 0x7d, 0x3e, 0x0a, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, - 0x20, 0x20, 0x20, 0x20, 0x20, 0x24, 0x7b, 0x6d, 0x73, 0x67, 0x2e, 0x63, - 0x6f, 0x6e, 0x74, 0x65, 0x6e, 0x74, 0x2e, 0x6d, 0x61, 0x74, 0x63, 0x68, - 0x28, 0x2f, 0x5c, 0x6e, 0x2f, 0x67, 0x69, 0x6d, 0x29, 0x20, 0x3f, 0x20, - 0x68, 0x74, 0x6d, 0x6c, 0x60, 0x3c, 0x62, 0x72, 0x20, 0x2f, 0x3e, 0x60, - 0x20, 0x3a, 0x20, 0x6d, 0x73, 0x67, 0x2e, 0x63, 0x6f, 0x6e, 0x74, 0x65, - 0x6e, 0x74, 0x7d, 0x0a, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, - 0x20, 0x20, 0x3c, 0x2f, 0x3e, 0x0a, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, - 0x20, 0x20, 0x60, 0x0a, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x7d, 0x29, - 0x3b, 0x0a, 0x20, 0x20, 0x20, 0x20, 0x7d, 0x0a, 0x0a, 0x20, 0x20, 0x20, - 0x20, 0x2f, 0x2f, 0x20, 0x70, 0x6f, 0x6f, 0x72, 0x20, 0x6d, 0x61, 0x6e, - 0x73, 0x20, 0x6d, 0x61, 0x72, 0x6b, 0x64, 0x6f, 0x77, 0x6e, 0x20, 0x72, - 0x65, 0x70, 0x6c, 0x61, 0x63, 0x65, 0x6d, 0x65, 0x6e, 0x74, 0x0a, 0x20, - 0x20, 0x20, 0x20, 0x63, 0x6f, 0x6e, 0x73, 0x74, 0x20, 0x4d, 0x61, 0x72, - 0x6b, 0x64, 0x6f, 0x77, 0x6e, 0x69, 0x73, 0x68, 0x20, 0x3d, 0x20, 0x28, - 0x70, 0x61, 0x72, 0x61, 0x6d, 0x73, 0x29, 0x20, 0x3d, 0x3e, 0x20, 0x7b, - 0x0a, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x63, 0x6f, 0x6e, 0x73, 0x74, - 0x20, 0x6d, 0x64, 0x20, 0x3d, 0x20, 0x70, 0x61, 0x72, 0x61, 0x6d, 0x73, - 0x2e, 0x74, 0x65, 0x78, 0x74, 0x0a, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, - 0x20, 0x20, 0x2e, 0x72, 0x65, 0x70, 0x6c, 0x61, 0x63, 0x65, 0x28, 0x2f, - 0x26, 0x2f, 0x67, 0x2c, 0x20, 0x27, 0x26, 0x61, 0x6d, 0x70, 0x3b, 0x27, + 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x74, 0x69, 0x74, 0x6c, 0x65, + 0x3d, 0x24, 0x7b, 0x60, 0x70, 0x72, 0x6f, 0x62, 0x3a, 0x20, 0x24, 0x7b, + 0x70, 0x2e, 0x70, 0x72, 0x6f, 0x62, 0x7d, 0x60, 0x7d, 0x0a, 0x20, 0x20, + 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, + 0x20, 0x20, 0x20, 0x20, 0x73, 0x74, 0x79, 0x6c, 0x65, 0x3d, 0x24, 0x7b, + 0x7b, 0x0a, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, + 0x20, 0x20, 0x20, 0x20, 0x70, 0x61, 0x64, 0x64, 0x69, 0x6e, 0x67, 0x3a, + 0x20, 0x27, 0x30, 0x2e, 0x33, 0x65, 0x6d, 0x27, 0x2c, 0x0a, 0x20, 0x20, + 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, + 0x62, 0x61, 0x63, 0x6b, 0x67, 0x72, 0x6f, 0x75, 0x6e, 0x64, 0x43, 0x6f, + 0x6c, 0x6f, 0x72, 0x3a, 0x20, 0x70, 0x2e, 0x74, 0x6f, 0x6b, 0x5f, 0x73, + 0x74, 0x72, 0x20, 0x3d, 0x3d, 0x3d, 0x20, 0x63, 0x6f, 0x6e, 0x74, 0x65, + 0x6e, 0x74, 0x20, 0x3f, 0x20, 0x70, 0x72, 0x6f, 0x62, 0x43, 0x6f, 0x6c, + 0x6f, 0x72, 0x28, 0x70, 0x2e, 0x70, 0x72, 0x6f, 0x62, 0x29, 0x20, 0x3a, + 0x20, 0x27, 0x74, 0x72, 0x61, 0x6e, 0x73, 0x70, 0x61, 0x72, 0x65, 0x6e, + 0x74, 0x27, 0x0a, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, + 0x20, 0x20, 0x20, 0x7d, 0x7d, 0x0a, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, + 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x3e, 0x0a, + 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, + 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x3c, 0x73, 0x70, 0x61, 0x6e, 0x3e, + 0x24, 0x7b, 0x70, 0x2e, 0x74, 0x6f, 0x6b, 0x5f, 0x73, 0x74, 0x72, 0x7d, + 0x3a, 0x20, 0x3c, 0x2f, 0x73, 0x70, 0x61, 0x6e, 0x3e, 0x0a, 0x20, 0x20, + 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, + 0x20, 0x20, 0x20, 0x20, 0x3c, 0x73, 0x70, 0x61, 0x6e, 0x3e, 0x24, 0x7b, + 0x4d, 0x61, 0x74, 0x68, 0x2e, 0x66, 0x6c, 0x6f, 0x6f, 0x72, 0x28, 0x70, + 0x2e, 0x70, 0x72, 0x6f, 0x62, 0x20, 0x2a, 0x20, 0x31, 0x30, 0x30, 0x29, + 0x7d, 0x25, 0x3c, 0x2f, 0x73, 0x70, 0x61, 0x6e, 0x3e, 0x0a, 0x20, 0x20, + 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, + 0x20, 0x20, 0x3c, 0x2f, 0x64, 0x69, 0x76, 0x3e, 0x0a, 0x20, 0x20, 0x20, + 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x60, + 0x0a, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x7d, 0x29, 0x7d, + 0x0a, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x3c, + 0x2f, 0x64, 0x69, 0x76, 0x3e, 0x0a, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, + 0x20, 0x20, 0x60, 0x0a, 0x0a, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, + 0x20, 0x72, 0x65, 0x74, 0x75, 0x72, 0x6e, 0x20, 0x68, 0x74, 0x6d, 0x6c, + 0x60, 0x0a, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, + 0x3c, 0x24, 0x7b, 0x50, 0x6f, 0x70, 0x6f, 0x76, 0x65, 0x72, 0x7d, 0x20, + 0x73, 0x74, 0x79, 0x6c, 0x65, 0x3d, 0x24, 0x7b, 0x7b, 0x20, 0x62, 0x61, + 0x63, 0x6b, 0x67, 0x72, 0x6f, 0x75, 0x6e, 0x64, 0x43, 0x6f, 0x6c, 0x6f, + 0x72, 0x3a, 0x20, 0x70, 0x43, 0x6f, 0x6c, 0x6f, 0x72, 0x20, 0x7d, 0x7d, + 0x20, 0x70, 0x6f, 0x70, 0x6f, 0x76, 0x65, 0x72, 0x43, 0x68, 0x69, 0x6c, + 0x64, 0x72, 0x65, 0x6e, 0x3d, 0x24, 0x7b, 0x70, 0x6f, 0x70, 0x6f, 0x76, + 0x65, 0x72, 0x43, 0x68, 0x69, 0x6c, 0x64, 0x72, 0x65, 0x6e, 0x7d, 0x3e, + 0x0a, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, + 0x20, 0x24, 0x7b, 0x6d, 0x73, 0x67, 0x2e, 0x63, 0x6f, 0x6e, 0x74, 0x65, + 0x6e, 0x74, 0x2e, 0x6d, 0x61, 0x74, 0x63, 0x68, 0x28, 0x2f, 0x5c, 0x6e, + 0x2f, 0x67, 0x69, 0x6d, 0x29, 0x20, 0x3f, 0x20, 0x68, 0x74, 0x6d, 0x6c, + 0x60, 0x3c, 0x62, 0x72, 0x20, 0x2f, 0x3e, 0x60, 0x20, 0x3a, 0x20, 0x6d, + 0x73, 0x67, 0x2e, 0x63, 0x6f, 0x6e, 0x74, 0x65, 0x6e, 0x74, 0x7d, 0x0a, + 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x3c, 0x2f, + 0x3e, 0x0a, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x60, 0x0a, + 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x7d, 0x29, 0x3b, 0x0a, 0x20, 0x20, + 0x20, 0x20, 0x7d, 0x0a, 0x0a, 0x20, 0x20, 0x20, 0x20, 0x2f, 0x2f, 0x20, + 0x70, 0x6f, 0x6f, 0x72, 0x20, 0x6d, 0x61, 0x6e, 0x73, 0x20, 0x6d, 0x61, + 0x72, 0x6b, 0x64, 0x6f, 0x77, 0x6e, 0x20, 0x72, 0x65, 0x70, 0x6c, 0x61, + 0x63, 0x65, 0x6d, 0x65, 0x6e, 0x74, 0x0a, 0x20, 0x20, 0x20, 0x20, 0x63, + 0x6f, 0x6e, 0x73, 0x74, 0x20, 0x4d, 0x61, 0x72, 0x6b, 0x64, 0x6f, 0x77, + 0x6e, 0x69, 0x73, 0x68, 0x20, 0x3d, 0x20, 0x28, 0x70, 0x61, 0x72, 0x61, + 0x6d, 0x73, 0x29, 0x20, 0x3d, 0x3e, 0x20, 0x7b, 0x0a, 0x20, 0x20, 0x20, + 0x20, 0x20, 0x20, 0x63, 0x6f, 0x6e, 0x73, 0x74, 0x20, 0x6d, 0x64, 0x20, + 0x3d, 0x20, 0x70, 0x61, 0x72, 0x61, 0x6d, 0x73, 0x2e, 0x74, 0x65, 0x78, + 0x74, 0x0a, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x2e, 0x72, + 0x65, 0x70, 0x6c, 0x61, 0x63, 0x65, 0x28, 0x2f, 0x26, 0x2f, 0x67, 0x2c, + 0x20, 0x27, 0x26, 0x61, 0x6d, 0x70, 0x3b, 0x27, 0x29, 0x0a, 0x20, 0x20, + 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x2e, 0x72, 0x65, 0x70, 0x6c, 0x61, + 0x63, 0x65, 0x28, 0x2f, 0x3c, 0x2f, 0x67, 0x2c, 0x20, 0x27, 0x26, 0x6c, + 0x74, 0x3b, 0x27, 0x29, 0x0a, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, + 0x20, 0x2e, 0x72, 0x65, 0x70, 0x6c, 0x61, 0x63, 0x65, 0x28, 0x2f, 0x3e, + 0x2f, 0x67, 0x2c, 0x20, 0x27, 0x26, 0x67, 0x74, 0x3b, 0x27, 0x29, 0x0a, + 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x2e, 0x72, 0x65, 0x70, + 0x6c, 0x61, 0x63, 0x65, 0x28, 0x2f, 0x5e, 0x23, 0x7b, 0x31, 0x2c, 0x36, + 0x7d, 0x20, 0x28, 0x2e, 0x2a, 0x29, 0x24, 0x2f, 0x67, 0x69, 0x6d, 0x2c, + 0x20, 0x27, 0x3c, 0x68, 0x33, 0x3e, 0x24, 0x31, 0x3c, 0x2f, 0x68, 0x33, + 0x3e, 0x27, 0x29, 0x0a, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, + 0x2e, 0x72, 0x65, 0x70, 0x6c, 0x61, 0x63, 0x65, 0x28, 0x2f, 0x5c, 0x2a, + 0x5c, 0x2a, 0x28, 0x2e, 0x2a, 0x3f, 0x29, 0x5c, 0x2a, 0x5c, 0x2a, 0x2f, + 0x67, 0x2c, 0x20, 0x27, 0x3c, 0x73, 0x74, 0x72, 0x6f, 0x6e, 0x67, 0x3e, + 0x24, 0x31, 0x3c, 0x2f, 0x73, 0x74, 0x72, 0x6f, 0x6e, 0x67, 0x3e, 0x27, 0x29, 0x0a, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x2e, 0x72, - 0x65, 0x70, 0x6c, 0x61, 0x63, 0x65, 0x28, 0x2f, 0x3c, 0x2f, 0x67, 0x2c, - 0x20, 0x27, 0x26, 0x6c, 0x74, 0x3b, 0x27, 0x29, 0x0a, 0x20, 0x20, 0x20, - 0x20, 0x20, 0x20, 0x20, 0x20, 0x2e, 0x72, 0x65, 0x70, 0x6c, 0x61, 0x63, - 0x65, 0x28, 0x2f, 0x3e, 0x2f, 0x67, 0x2c, 0x20, 0x27, 0x26, 0x67, 0x74, - 0x3b, 0x27, 0x29, 0x0a, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, - 0x2e, 0x72, 0x65, 0x70, 0x6c, 0x61, 0x63, 0x65, 0x28, 0x2f, 0x5e, 0x23, - 0x7b, 0x31, 0x2c, 0x36, 0x7d, 0x20, 0x28, 0x2e, 0x2a, 0x29, 0x24, 0x2f, - 0x67, 0x69, 0x6d, 0x2c, 0x20, 0x27, 0x3c, 0x68, 0x33, 0x3e, 0x24, 0x31, - 0x3c, 0x2f, 0x68, 0x33, 0x3e, 0x27, 0x29, 0x0a, 0x20, 0x20, 0x20, 0x20, + 0x65, 0x70, 0x6c, 0x61, 0x63, 0x65, 0x28, 0x2f, 0x5f, 0x5f, 0x28, 0x2e, + 0x2a, 0x3f, 0x29, 0x5f, 0x5f, 0x2f, 0x67, 0x2c, 0x20, 0x27, 0x3c, 0x73, + 0x74, 0x72, 0x6f, 0x6e, 0x67, 0x3e, 0x24, 0x31, 0x3c, 0x2f, 0x73, 0x74, + 0x72, 0x6f, 0x6e, 0x67, 0x3e, 0x27, 0x29, 0x0a, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x2e, 0x72, 0x65, 0x70, 0x6c, 0x61, 0x63, 0x65, - 0x28, 0x2f, 0x5c, 0x2a, 0x5c, 0x2a, 0x28, 0x2e, 0x2a, 0x3f, 0x29, 0x5c, - 0x2a, 0x5c, 0x2a, 0x2f, 0x67, 0x2c, 0x20, 0x27, 0x3c, 0x73, 0x74, 0x72, - 0x6f, 0x6e, 0x67, 0x3e, 0x24, 0x31, 0x3c, 0x2f, 0x73, 0x74, 0x72, 0x6f, - 0x6e, 0x67, 0x3e, 0x27, 0x29, 0x0a, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, - 0x20, 0x20, 0x2e, 0x72, 0x65, 0x70, 0x6c, 0x61, 0x63, 0x65, 0x28, 0x2f, - 0x5f, 0x5f, 0x28, 0x2e, 0x2a, 0x3f, 0x29, 0x5f, 0x5f, 0x2f, 0x67, 0x2c, - 0x20, 0x27, 0x3c, 0x73, 0x74, 0x72, 0x6f, 0x6e, 0x67, 0x3e, 0x24, 0x31, - 0x3c, 0x2f, 0x73, 0x74, 0x72, 0x6f, 0x6e, 0x67, 0x3e, 0x27, 0x29, 0x0a, - 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x2e, 0x72, 0x65, 0x70, - 0x6c, 0x61, 0x63, 0x65, 0x28, 0x2f, 0x5c, 0x2a, 0x28, 0x2e, 0x2a, 0x3f, - 0x29, 0x5c, 0x2a, 0x2f, 0x67, 0x2c, 0x20, 0x27, 0x3c, 0x65, 0x6d, 0x3e, - 0x24, 0x31, 0x3c, 0x2f, 0x65, 0x6d, 0x3e, 0x27, 0x29, 0x0a, 0x20, 0x20, - 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x2e, 0x72, 0x65, 0x70, 0x6c, 0x61, - 0x63, 0x65, 0x28, 0x2f, 0x5f, 0x28, 0x2e, 0x2a, 0x3f, 0x29, 0x5f, 0x2f, + 0x28, 0x2f, 0x5c, 0x2a, 0x28, 0x2e, 0x2a, 0x3f, 0x29, 0x5c, 0x2a, 0x2f, 0x67, 0x2c, 0x20, 0x27, 0x3c, 0x65, 0x6d, 0x3e, 0x24, 0x31, 0x3c, 0x2f, 0x65, 0x6d, 0x3e, 0x27, 0x29, 0x0a, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x2e, 0x72, 0x65, 0x70, 0x6c, 0x61, 0x63, 0x65, 0x28, 0x2f, - 0x60, 0x60, 0x60, 0x2e, 0x2a, 0x3f, 0x5c, 0x6e, 0x28, 0x5b, 0x5c, 0x73, - 0x5c, 0x53, 0x5d, 0x2a, 0x3f, 0x29, 0x60, 0x60, 0x60, 0x2f, 0x67, 0x2c, - 0x20, 0x27, 0x3c, 0x70, 0x72, 0x65, 0x3e, 0x3c, 0x63, 0x6f, 0x64, 0x65, - 0x3e, 0x24, 0x31, 0x3c, 0x2f, 0x63, 0x6f, 0x64, 0x65, 0x3e, 0x3c, 0x2f, - 0x70, 0x72, 0x65, 0x3e, 0x27, 0x29, 0x0a, 0x20, 0x20, 0x20, 0x20, 0x20, - 0x20, 0x20, 0x20, 0x2e, 0x72, 0x65, 0x70, 0x6c, 0x61, 0x63, 0x65, 0x28, - 0x2f, 0x60, 0x28, 0x2e, 0x2a, 0x3f, 0x29, 0x60, 0x2f, 0x67, 0x2c, 0x20, - 0x27, 0x3c, 0x63, 0x6f, 0x64, 0x65, 0x3e, 0x24, 0x31, 0x3c, 0x2f, 0x63, - 0x6f, 0x64, 0x65, 0x3e, 0x27, 0x29, 0x0a, 0x20, 0x20, 0x20, 0x20, 0x20, - 0x20, 0x20, 0x20, 0x2e, 0x72, 0x65, 0x70, 0x6c, 0x61, 0x63, 0x65, 0x28, - 0x2f, 0x5c, 0x6e, 0x2f, 0x67, 0x69, 0x6d, 0x2c, 0x20, 0x27, 0x3c, 0x62, - 0x72, 0x20, 0x2f, 0x3e, 0x27, 0x29, 0x3b, 0x0a, 0x20, 0x20, 0x20, 0x20, - 0x20, 0x20, 0x72, 0x65, 0x74, 0x75, 0x72, 0x6e, 0x20, 0x68, 0x74, 0x6d, - 0x6c, 0x60, 0x3c, 0x73, 0x70, 0x61, 0x6e, 0x20, 0x64, 0x61, 0x6e, 0x67, - 0x65, 0x72, 0x6f, 0x75, 0x73, 0x6c, 0x79, 0x53, 0x65, 0x74, 0x49, 0x6e, - 0x6e, 0x65, 0x72, 0x48, 0x54, 0x4d, 0x4c, 0x3d, 0x24, 0x7b, 0x7b, 0x20, - 0x5f, 0x5f, 0x68, 0x74, 0x6d, 0x6c, 0x3a, 0x20, 0x6d, 0x64, 0x20, 0x7d, - 0x7d, 0x20, 0x2f, 0x3e, 0x60, 0x3b, 0x0a, 0x20, 0x20, 0x20, 0x20, 0x7d, - 0x3b, 0x0a, 0x0a, 0x20, 0x20, 0x20, 0x20, 0x63, 0x6f, 0x6e, 0x73, 0x74, - 0x20, 0x4d, 0x6f, 0x64, 0x65, 0x6c, 0x47, 0x65, 0x6e, 0x65, 0x72, 0x61, - 0x74, 0x69, 0x6f, 0x6e, 0x49, 0x6e, 0x66, 0x6f, 0x20, 0x3d, 0x20, 0x28, - 0x70, 0x61, 0x72, 0x61, 0x6d, 0x73, 0x29, 0x20, 0x3d, 0x3e, 0x20, 0x7b, - 0x0a, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x69, 0x66, 0x20, 0x28, 0x21, - 0x6c, 0x6c, 0x61, 0x6d, 0x61, 0x53, 0x74, 0x61, 0x74, 0x73, 0x2e, 0x76, - 0x61, 0x6c, 0x75, 0x65, 0x29, 0x20, 0x7b, 0x0a, 0x20, 0x20, 0x20, 0x20, - 0x20, 0x20, 0x20, 0x20, 0x72, 0x65, 0x74, 0x75, 0x72, 0x6e, 0x20, 0x68, - 0x74, 0x6d, 0x6c, 0x60, 0x3c, 0x73, 0x70, 0x61, 0x6e, 0x2f, 0x3e, 0x60, - 0x0a, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x7d, 0x0a, 0x20, 0x20, 0x20, - 0x20, 0x20, 0x20, 0x72, 0x65, 0x74, 0x75, 0x72, 0x6e, 0x20, 0x68, 0x74, - 0x6d, 0x6c, 0x60, 0x0a, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, - 0x3c, 0x73, 0x70, 0x61, 0x6e, 0x3e, 0x0a, 0x20, 0x20, 0x20, 0x20, 0x20, - 0x20, 0x20, 0x20, 0x20, 0x20, 0x24, 0x7b, 0x6c, 0x6c, 0x61, 0x6d, 0x61, - 0x53, 0x74, 0x61, 0x74, 0x73, 0x2e, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x2e, - 0x70, 0x72, 0x65, 0x64, 0x69, 0x63, 0x74, 0x65, 0x64, 0x5f, 0x70, 0x65, - 0x72, 0x5f, 0x74, 0x6f, 0x6b, 0x65, 0x6e, 0x5f, 0x6d, 0x73, 0x2e, 0x74, - 0x6f, 0x46, 0x69, 0x78, 0x65, 0x64, 0x28, 0x29, 0x7d, 0x6d, 0x73, 0x20, - 0x70, 0x65, 0x72, 0x20, 0x74, 0x6f, 0x6b, 0x65, 0x6e, 0x2c, 0x20, 0x24, - 0x7b, 0x6c, 0x6c, 0x61, 0x6d, 0x61, 0x53, 0x74, 0x61, 0x74, 0x73, 0x2e, - 0x76, 0x61, 0x6c, 0x75, 0x65, 0x2e, 0x70, 0x72, 0x65, 0x64, 0x69, 0x63, - 0x74, 0x65, 0x64, 0x5f, 0x70, 0x65, 0x72, 0x5f, 0x73, 0x65, 0x63, 0x6f, - 0x6e, 0x64, 0x2e, 0x74, 0x6f, 0x46, 0x69, 0x78, 0x65, 0x64, 0x28, 0x32, - 0x29, 0x7d, 0x20, 0x74, 0x6f, 0x6b, 0x65, 0x6e, 0x73, 0x20, 0x70, 0x65, - 0x72, 0x20, 0x73, 0x65, 0x63, 0x6f, 0x6e, 0x64, 0x0a, 0x20, 0x20, 0x20, - 0x20, 0x20, 0x20, 0x20, 0x20, 0x3c, 0x2f, 0x73, 0x70, 0x61, 0x6e, 0x3e, - 0x0a, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x60, 0x0a, 0x20, 0x20, 0x20, - 0x20, 0x7d, 0x0a, 0x0a, 0x20, 0x20, 0x20, 0x20, 0x2f, 0x2f, 0x20, 0x73, - 0x69, 0x6d, 0x70, 0x6c, 0x65, 0x20, 0x70, 0x6f, 0x70, 0x6f, 0x76, 0x65, - 0x72, 0x20, 0x69, 0x6d, 0x70, 0x6c, 0x0a, 0x20, 0x20, 0x20, 0x20, 0x63, - 0x6f, 0x6e, 0x73, 0x74, 0x20, 0x50, 0x6f, 0x70, 0x6f, 0x76, 0x65, 0x72, - 0x20, 0x3d, 0x20, 0x28, 0x70, 0x72, 0x6f, 0x70, 0x73, 0x29, 0x20, 0x3d, - 0x3e, 0x20, 0x7b, 0x0a, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x63, 0x6f, - 0x6e, 0x73, 0x74, 0x20, 0x69, 0x73, 0x4f, 0x70, 0x65, 0x6e, 0x20, 0x3d, - 0x20, 0x75, 0x73, 0x65, 0x53, 0x69, 0x67, 0x6e, 0x61, 0x6c, 0x28, 0x66, - 0x61, 0x6c, 0x73, 0x65, 0x29, 0x3b, 0x0a, 0x20, 0x20, 0x20, 0x20, 0x20, - 0x20, 0x63, 0x6f, 0x6e, 0x73, 0x74, 0x20, 0x70, 0x6f, 0x73, 0x69, 0x74, - 0x69, 0x6f, 0x6e, 0x20, 0x3d, 0x20, 0x75, 0x73, 0x65, 0x53, 0x69, 0x67, - 0x6e, 0x61, 0x6c, 0x28, 0x7b, 0x20, 0x74, 0x6f, 0x70, 0x3a, 0x20, 0x27, - 0x30, 0x70, 0x78, 0x27, 0x2c, 0x20, 0x6c, 0x65, 0x66, 0x74, 0x3a, 0x20, - 0x27, 0x30, 0x70, 0x78, 0x27, 0x20, 0x7d, 0x29, 0x3b, 0x0a, 0x20, 0x20, - 0x20, 0x20, 0x20, 0x20, 0x63, 0x6f, 0x6e, 0x73, 0x74, 0x20, 0x62, 0x75, - 0x74, 0x74, 0x6f, 0x6e, 0x52, 0x65, 0x66, 0x20, 0x3d, 0x20, 0x75, 0x73, - 0x65, 0x52, 0x65, 0x66, 0x28, 0x6e, 0x75, 0x6c, 0x6c, 0x29, 0x3b, 0x0a, + 0x5f, 0x28, 0x2e, 0x2a, 0x3f, 0x29, 0x5f, 0x2f, 0x67, 0x2c, 0x20, 0x27, + 0x3c, 0x65, 0x6d, 0x3e, 0x24, 0x31, 0x3c, 0x2f, 0x65, 0x6d, 0x3e, 0x27, + 0x29, 0x0a, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x2e, 0x72, + 0x65, 0x70, 0x6c, 0x61, 0x63, 0x65, 0x28, 0x2f, 0x60, 0x60, 0x60, 0x2e, + 0x2a, 0x3f, 0x5c, 0x6e, 0x28, 0x5b, 0x5c, 0x73, 0x5c, 0x53, 0x5d, 0x2a, + 0x3f, 0x29, 0x60, 0x60, 0x60, 0x2f, 0x67, 0x2c, 0x20, 0x27, 0x3c, 0x70, + 0x72, 0x65, 0x3e, 0x3c, 0x63, 0x6f, 0x64, 0x65, 0x3e, 0x24, 0x31, 0x3c, + 0x2f, 0x63, 0x6f, 0x64, 0x65, 0x3e, 0x3c, 0x2f, 0x70, 0x72, 0x65, 0x3e, + 0x27, 0x29, 0x0a, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x2e, + 0x72, 0x65, 0x70, 0x6c, 0x61, 0x63, 0x65, 0x28, 0x2f, 0x60, 0x28, 0x2e, + 0x2a, 0x3f, 0x29, 0x60, 0x2f, 0x67, 0x2c, 0x20, 0x27, 0x3c, 0x63, 0x6f, + 0x64, 0x65, 0x3e, 0x24, 0x31, 0x3c, 0x2f, 0x63, 0x6f, 0x64, 0x65, 0x3e, + 0x27, 0x29, 0x0a, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x2e, + 0x72, 0x65, 0x70, 0x6c, 0x61, 0x63, 0x65, 0x28, 0x2f, 0x5c, 0x6e, 0x2f, + 0x67, 0x69, 0x6d, 0x2c, 0x20, 0x27, 0x3c, 0x62, 0x72, 0x20, 0x2f, 0x3e, + 0x27, 0x29, 0x3b, 0x0a, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x72, 0x65, + 0x74, 0x75, 0x72, 0x6e, 0x20, 0x68, 0x74, 0x6d, 0x6c, 0x60, 0x3c, 0x73, + 0x70, 0x61, 0x6e, 0x20, 0x64, 0x61, 0x6e, 0x67, 0x65, 0x72, 0x6f, 0x75, + 0x73, 0x6c, 0x79, 0x53, 0x65, 0x74, 0x49, 0x6e, 0x6e, 0x65, 0x72, 0x48, + 0x54, 0x4d, 0x4c, 0x3d, 0x24, 0x7b, 0x7b, 0x20, 0x5f, 0x5f, 0x68, 0x74, + 0x6d, 0x6c, 0x3a, 0x20, 0x6d, 0x64, 0x20, 0x7d, 0x7d, 0x20, 0x2f, 0x3e, + 0x60, 0x3b, 0x0a, 0x20, 0x20, 0x20, 0x20, 0x7d, 0x3b, 0x0a, 0x0a, 0x20, + 0x20, 0x20, 0x20, 0x63, 0x6f, 0x6e, 0x73, 0x74, 0x20, 0x4d, 0x6f, 0x64, + 0x65, 0x6c, 0x47, 0x65, 0x6e, 0x65, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, + 0x49, 0x6e, 0x66, 0x6f, 0x20, 0x3d, 0x20, 0x28, 0x70, 0x61, 0x72, 0x61, + 0x6d, 0x73, 0x29, 0x20, 0x3d, 0x3e, 0x20, 0x7b, 0x0a, 0x20, 0x20, 0x20, + 0x20, 0x20, 0x20, 0x69, 0x66, 0x20, 0x28, 0x21, 0x6c, 0x6c, 0x61, 0x6d, + 0x61, 0x53, 0x74, 0x61, 0x74, 0x73, 0x2e, 0x76, 0x61, 0x6c, 0x75, 0x65, + 0x29, 0x20, 0x7b, 0x0a, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, + 0x72, 0x65, 0x74, 0x75, 0x72, 0x6e, 0x20, 0x68, 0x74, 0x6d, 0x6c, 0x60, + 0x3c, 0x73, 0x70, 0x61, 0x6e, 0x2f, 0x3e, 0x60, 0x0a, 0x20, 0x20, 0x20, + 0x20, 0x20, 0x20, 0x7d, 0x0a, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x72, + 0x65, 0x74, 0x75, 0x72, 0x6e, 0x20, 0x68, 0x74, 0x6d, 0x6c, 0x60, 0x0a, + 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x3c, 0x73, 0x70, 0x61, + 0x6e, 0x3e, 0x0a, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, + 0x20, 0x24, 0x7b, 0x6c, 0x6c, 0x61, 0x6d, 0x61, 0x53, 0x74, 0x61, 0x74, + 0x73, 0x2e, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x2e, 0x70, 0x72, 0x65, 0x64, + 0x69, 0x63, 0x74, 0x65, 0x64, 0x5f, 0x70, 0x65, 0x72, 0x5f, 0x74, 0x6f, + 0x6b, 0x65, 0x6e, 0x5f, 0x6d, 0x73, 0x2e, 0x74, 0x6f, 0x46, 0x69, 0x78, + 0x65, 0x64, 0x28, 0x29, 0x7d, 0x6d, 0x73, 0x20, 0x70, 0x65, 0x72, 0x20, + 0x74, 0x6f, 0x6b, 0x65, 0x6e, 0x2c, 0x20, 0x24, 0x7b, 0x6c, 0x6c, 0x61, + 0x6d, 0x61, 0x53, 0x74, 0x61, 0x74, 0x73, 0x2e, 0x76, 0x61, 0x6c, 0x75, + 0x65, 0x2e, 0x70, 0x72, 0x65, 0x64, 0x69, 0x63, 0x74, 0x65, 0x64, 0x5f, + 0x70, 0x65, 0x72, 0x5f, 0x73, 0x65, 0x63, 0x6f, 0x6e, 0x64, 0x2e, 0x74, + 0x6f, 0x46, 0x69, 0x78, 0x65, 0x64, 0x28, 0x32, 0x29, 0x7d, 0x20, 0x74, + 0x6f, 0x6b, 0x65, 0x6e, 0x73, 0x20, 0x70, 0x65, 0x72, 0x20, 0x73, 0x65, + 0x63, 0x6f, 0x6e, 0x64, 0x0a, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, + 0x20, 0x3c, 0x2f, 0x73, 0x70, 0x61, 0x6e, 0x3e, 0x0a, 0x20, 0x20, 0x20, + 0x20, 0x20, 0x20, 0x60, 0x0a, 0x20, 0x20, 0x20, 0x20, 0x7d, 0x0a, 0x0a, + 0x20, 0x20, 0x20, 0x20, 0x2f, 0x2f, 0x20, 0x73, 0x69, 0x6d, 0x70, 0x6c, + 0x65, 0x20, 0x70, 0x6f, 0x70, 0x6f, 0x76, 0x65, 0x72, 0x20, 0x69, 0x6d, + 0x70, 0x6c, 0x0a, 0x20, 0x20, 0x20, 0x20, 0x63, 0x6f, 0x6e, 0x73, 0x74, + 0x20, 0x50, 0x6f, 0x70, 0x6f, 0x76, 0x65, 0x72, 0x20, 0x3d, 0x20, 0x28, + 0x70, 0x72, 0x6f, 0x70, 0x73, 0x29, 0x20, 0x3d, 0x3e, 0x20, 0x7b, 0x0a, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x63, 0x6f, 0x6e, 0x73, 0x74, 0x20, - 0x70, 0x6f, 0x70, 0x6f, 0x76, 0x65, 0x72, 0x52, 0x65, 0x66, 0x20, 0x3d, - 0x20, 0x75, 0x73, 0x65, 0x52, 0x65, 0x66, 0x28, 0x6e, 0x75, 0x6c, 0x6c, - 0x29, 0x3b, 0x0a, 0x0a, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x63, 0x6f, - 0x6e, 0x73, 0x74, 0x20, 0x74, 0x6f, 0x67, 0x67, 0x6c, 0x65, 0x50, 0x6f, - 0x70, 0x6f, 0x76, 0x65, 0x72, 0x20, 0x3d, 0x20, 0x28, 0x29, 0x20, 0x3d, - 0x3e, 0x20, 0x7b, 0x0a, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, - 0x69, 0x66, 0x20, 0x28, 0x62, 0x75, 0x74, 0x74, 0x6f, 0x6e, 0x52, 0x65, - 0x66, 0x2e, 0x63, 0x75, 0x72, 0x72, 0x65, 0x6e, 0x74, 0x29, 0x20, 0x7b, - 0x0a, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x63, - 0x6f, 0x6e, 0x73, 0x74, 0x20, 0x72, 0x65, 0x63, 0x74, 0x20, 0x3d, 0x20, + 0x69, 0x73, 0x4f, 0x70, 0x65, 0x6e, 0x20, 0x3d, 0x20, 0x75, 0x73, 0x65, + 0x53, 0x69, 0x67, 0x6e, 0x61, 0x6c, 0x28, 0x66, 0x61, 0x6c, 0x73, 0x65, + 0x29, 0x3b, 0x0a, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x63, 0x6f, 0x6e, + 0x73, 0x74, 0x20, 0x70, 0x6f, 0x73, 0x69, 0x74, 0x69, 0x6f, 0x6e, 0x20, + 0x3d, 0x20, 0x75, 0x73, 0x65, 0x53, 0x69, 0x67, 0x6e, 0x61, 0x6c, 0x28, + 0x7b, 0x20, 0x74, 0x6f, 0x70, 0x3a, 0x20, 0x27, 0x30, 0x70, 0x78, 0x27, + 0x2c, 0x20, 0x6c, 0x65, 0x66, 0x74, 0x3a, 0x20, 0x27, 0x30, 0x70, 0x78, + 0x27, 0x20, 0x7d, 0x29, 0x3b, 0x0a, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, + 0x63, 0x6f, 0x6e, 0x73, 0x74, 0x20, 0x62, 0x75, 0x74, 0x74, 0x6f, 0x6e, + 0x52, 0x65, 0x66, 0x20, 0x3d, 0x20, 0x75, 0x73, 0x65, 0x52, 0x65, 0x66, + 0x28, 0x6e, 0x75, 0x6c, 0x6c, 0x29, 0x3b, 0x0a, 0x20, 0x20, 0x20, 0x20, + 0x20, 0x20, 0x63, 0x6f, 0x6e, 0x73, 0x74, 0x20, 0x70, 0x6f, 0x70, 0x6f, + 0x76, 0x65, 0x72, 0x52, 0x65, 0x66, 0x20, 0x3d, 0x20, 0x75, 0x73, 0x65, + 0x52, 0x65, 0x66, 0x28, 0x6e, 0x75, 0x6c, 0x6c, 0x29, 0x3b, 0x0a, 0x0a, + 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x63, 0x6f, 0x6e, 0x73, 0x74, 0x20, + 0x74, 0x6f, 0x67, 0x67, 0x6c, 0x65, 0x50, 0x6f, 0x70, 0x6f, 0x76, 0x65, + 0x72, 0x20, 0x3d, 0x20, 0x28, 0x29, 0x20, 0x3d, 0x3e, 0x20, 0x7b, 0x0a, + 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x69, 0x66, 0x20, 0x28, 0x62, 0x75, 0x74, 0x74, 0x6f, 0x6e, 0x52, 0x65, 0x66, 0x2e, 0x63, 0x75, - 0x72, 0x72, 0x65, 0x6e, 0x74, 0x2e, 0x67, 0x65, 0x74, 0x42, 0x6f, 0x75, - 0x6e, 0x64, 0x69, 0x6e, 0x67, 0x43, 0x6c, 0x69, 0x65, 0x6e, 0x74, 0x52, - 0x65, 0x63, 0x74, 0x28, 0x29, 0x3b, 0x0a, 0x20, 0x20, 0x20, 0x20, 0x20, - 0x20, 0x20, 0x20, 0x20, 0x20, 0x70, 0x6f, 0x73, 0x69, 0x74, 0x69, 0x6f, - 0x6e, 0x2e, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x20, 0x3d, 0x20, 0x7b, 0x0a, + 0x72, 0x72, 0x65, 0x6e, 0x74, 0x29, 0x20, 0x7b, 0x0a, 0x20, 0x20, 0x20, + 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x63, 0x6f, 0x6e, 0x73, 0x74, + 0x20, 0x72, 0x65, 0x63, 0x74, 0x20, 0x3d, 0x20, 0x62, 0x75, 0x74, 0x74, + 0x6f, 0x6e, 0x52, 0x65, 0x66, 0x2e, 0x63, 0x75, 0x72, 0x72, 0x65, 0x6e, + 0x74, 0x2e, 0x67, 0x65, 0x74, 0x42, 0x6f, 0x75, 0x6e, 0x64, 0x69, 0x6e, + 0x67, 0x43, 0x6c, 0x69, 0x65, 0x6e, 0x74, 0x52, 0x65, 0x63, 0x74, 0x28, + 0x29, 0x3b, 0x0a, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, + 0x20, 0x70, 0x6f, 0x73, 0x69, 0x74, 0x69, 0x6f, 0x6e, 0x2e, 0x76, 0x61, + 0x6c, 0x75, 0x65, 0x20, 0x3d, 0x20, 0x7b, 0x0a, 0x20, 0x20, 0x20, 0x20, + 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x74, 0x6f, 0x70, 0x3a, + 0x20, 0x60, 0x24, 0x7b, 0x72, 0x65, 0x63, 0x74, 0x2e, 0x62, 0x6f, 0x74, + 0x74, 0x6f, 0x6d, 0x20, 0x2b, 0x20, 0x77, 0x69, 0x6e, 0x64, 0x6f, 0x77, + 0x2e, 0x73, 0x63, 0x72, 0x6f, 0x6c, 0x6c, 0x59, 0x7d, 0x70, 0x78, 0x60, + 0x2c, 0x0a, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, + 0x20, 0x20, 0x6c, 0x65, 0x66, 0x74, 0x3a, 0x20, 0x60, 0x24, 0x7b, 0x72, + 0x65, 0x63, 0x74, 0x2e, 0x6c, 0x65, 0x66, 0x74, 0x20, 0x2b, 0x20, 0x77, + 0x69, 0x6e, 0x64, 0x6f, 0x77, 0x2e, 0x73, 0x63, 0x72, 0x6f, 0x6c, 0x6c, + 0x58, 0x7d, 0x70, 0x78, 0x60, 0x2c, 0x0a, 0x20, 0x20, 0x20, 0x20, 0x20, + 0x20, 0x20, 0x20, 0x20, 0x20, 0x7d, 0x3b, 0x0a, 0x20, 0x20, 0x20, 0x20, + 0x20, 0x20, 0x20, 0x20, 0x7d, 0x0a, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, + 0x20, 0x20, 0x69, 0x73, 0x4f, 0x70, 0x65, 0x6e, 0x2e, 0x76, 0x61, 0x6c, + 0x75, 0x65, 0x20, 0x3d, 0x20, 0x21, 0x69, 0x73, 0x4f, 0x70, 0x65, 0x6e, + 0x2e, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x3b, 0x0a, 0x20, 0x20, 0x20, 0x20, + 0x20, 0x20, 0x7d, 0x3b, 0x0a, 0x0a, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, + 0x63, 0x6f, 0x6e, 0x73, 0x74, 0x20, 0x68, 0x61, 0x6e, 0x64, 0x6c, 0x65, + 0x43, 0x6c, 0x69, 0x63, 0x6b, 0x4f, 0x75, 0x74, 0x73, 0x69, 0x64, 0x65, + 0x20, 0x3d, 0x20, 0x28, 0x65, 0x76, 0x65, 0x6e, 0x74, 0x29, 0x20, 0x3d, + 0x3e, 0x20, 0x7b, 0x0a, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, + 0x69, 0x66, 0x20, 0x28, 0x70, 0x6f, 0x70, 0x6f, 0x76, 0x65, 0x72, 0x52, + 0x65, 0x66, 0x2e, 0x63, 0x75, 0x72, 0x72, 0x65, 0x6e, 0x74, 0x20, 0x26, + 0x26, 0x20, 0x21, 0x70, 0x6f, 0x70, 0x6f, 0x76, 0x65, 0x72, 0x52, 0x65, + 0x66, 0x2e, 0x63, 0x75, 0x72, 0x72, 0x65, 0x6e, 0x74, 0x2e, 0x63, 0x6f, + 0x6e, 0x74, 0x61, 0x69, 0x6e, 0x73, 0x28, 0x65, 0x76, 0x65, 0x6e, 0x74, + 0x2e, 0x74, 0x61, 0x72, 0x67, 0x65, 0x74, 0x29, 0x20, 0x26, 0x26, 0x20, + 0x21, 0x62, 0x75, 0x74, 0x74, 0x6f, 0x6e, 0x52, 0x65, 0x66, 0x2e, 0x63, + 0x75, 0x72, 0x72, 0x65, 0x6e, 0x74, 0x2e, 0x63, 0x6f, 0x6e, 0x74, 0x61, + 0x69, 0x6e, 0x73, 0x28, 0x65, 0x76, 0x65, 0x6e, 0x74, 0x2e, 0x74, 0x61, + 0x72, 0x67, 0x65, 0x74, 0x29, 0x29, 0x20, 0x7b, 0x0a, 0x20, 0x20, 0x20, + 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x69, 0x73, 0x4f, 0x70, 0x65, + 0x6e, 0x2e, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x20, 0x3d, 0x20, 0x66, 0x61, + 0x6c, 0x73, 0x65, 0x3b, 0x0a, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, + 0x20, 0x7d, 0x0a, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x7d, 0x3b, 0x0a, + 0x0a, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x75, 0x73, 0x65, 0x45, 0x66, + 0x66, 0x65, 0x63, 0x74, 0x28, 0x28, 0x29, 0x20, 0x3d, 0x3e, 0x20, 0x7b, + 0x0a, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x64, 0x6f, 0x63, + 0x75, 0x6d, 0x65, 0x6e, 0x74, 0x2e, 0x61, 0x64, 0x64, 0x45, 0x76, 0x65, + 0x6e, 0x74, 0x4c, 0x69, 0x73, 0x74, 0x65, 0x6e, 0x65, 0x72, 0x28, 0x27, + 0x6d, 0x6f, 0x75, 0x73, 0x65, 0x64, 0x6f, 0x77, 0x6e, 0x27, 0x2c, 0x20, + 0x68, 0x61, 0x6e, 0x64, 0x6c, 0x65, 0x43, 0x6c, 0x69, 0x63, 0x6b, 0x4f, + 0x75, 0x74, 0x73, 0x69, 0x64, 0x65, 0x29, 0x3b, 0x0a, 0x20, 0x20, 0x20, + 0x20, 0x20, 0x20, 0x20, 0x20, 0x72, 0x65, 0x74, 0x75, 0x72, 0x6e, 0x20, + 0x28, 0x29, 0x20, 0x3d, 0x3e, 0x20, 0x7b, 0x0a, 0x20, 0x20, 0x20, 0x20, + 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x64, 0x6f, 0x63, 0x75, 0x6d, 0x65, + 0x6e, 0x74, 0x2e, 0x72, 0x65, 0x6d, 0x6f, 0x76, 0x65, 0x45, 0x76, 0x65, + 0x6e, 0x74, 0x4c, 0x69, 0x73, 0x74, 0x65, 0x6e, 0x65, 0x72, 0x28, 0x27, + 0x6d, 0x6f, 0x75, 0x73, 0x65, 0x64, 0x6f, 0x77, 0x6e, 0x27, 0x2c, 0x20, + 0x68, 0x61, 0x6e, 0x64, 0x6c, 0x65, 0x43, 0x6c, 0x69, 0x63, 0x6b, 0x4f, + 0x75, 0x74, 0x73, 0x69, 0x64, 0x65, 0x29, 0x3b, 0x0a, 0x20, 0x20, 0x20, + 0x20, 0x20, 0x20, 0x20, 0x20, 0x7d, 0x3b, 0x0a, 0x20, 0x20, 0x20, 0x20, + 0x20, 0x20, 0x7d, 0x2c, 0x20, 0x5b, 0x5d, 0x29, 0x3b, 0x0a, 0x0a, 0x20, + 0x20, 0x20, 0x20, 0x20, 0x20, 0x72, 0x65, 0x74, 0x75, 0x72, 0x6e, 0x20, + 0x68, 0x74, 0x6d, 0x6c, 0x60, 0x0a, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, + 0x20, 0x20, 0x3c, 0x73, 0x70, 0x61, 0x6e, 0x20, 0x73, 0x74, 0x79, 0x6c, + 0x65, 0x3d, 0x24, 0x7b, 0x70, 0x72, 0x6f, 0x70, 0x73, 0x2e, 0x73, 0x74, + 0x79, 0x6c, 0x65, 0x7d, 0x20, 0x72, 0x65, 0x66, 0x3d, 0x24, 0x7b, 0x62, + 0x75, 0x74, 0x74, 0x6f, 0x6e, 0x52, 0x65, 0x66, 0x7d, 0x20, 0x6f, 0x6e, + 0x43, 0x6c, 0x69, 0x63, 0x6b, 0x3d, 0x24, 0x7b, 0x74, 0x6f, 0x67, 0x67, + 0x6c, 0x65, 0x50, 0x6f, 0x70, 0x6f, 0x76, 0x65, 0x72, 0x7d, 0x3e, 0x24, + 0x7b, 0x70, 0x72, 0x6f, 0x70, 0x73, 0x2e, 0x63, 0x68, 0x69, 0x6c, 0x64, + 0x72, 0x65, 0x6e, 0x7d, 0x3c, 0x2f, 0x73, 0x70, 0x61, 0x6e, 0x3e, 0x0a, + 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x24, 0x7b, 0x69, 0x73, + 0x4f, 0x70, 0x65, 0x6e, 0x2e, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x20, 0x26, + 0x26, 0x20, 0x68, 0x74, 0x6d, 0x6c, 0x60, 0x0a, 0x20, 0x20, 0x20, 0x20, + 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x3c, 0x24, 0x7b, 0x50, 0x6f, 0x72, + 0x74, 0x61, 0x6c, 0x7d, 0x20, 0x69, 0x6e, 0x74, 0x6f, 0x3d, 0x22, 0x23, + 0x70, 0x6f, 0x72, 0x74, 0x61, 0x6c, 0x22, 0x3e, 0x0a, 0x20, 0x20, 0x20, + 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x3c, 0x64, 0x69, + 0x76, 0x0a, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, + 0x20, 0x20, 0x20, 0x20, 0x72, 0x65, 0x66, 0x3d, 0x24, 0x7b, 0x70, 0x6f, + 0x70, 0x6f, 0x76, 0x65, 0x72, 0x52, 0x65, 0x66, 0x7d, 0x0a, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, - 0x74, 0x6f, 0x70, 0x3a, 0x20, 0x60, 0x24, 0x7b, 0x72, 0x65, 0x63, 0x74, - 0x2e, 0x62, 0x6f, 0x74, 0x74, 0x6f, 0x6d, 0x20, 0x2b, 0x20, 0x77, 0x69, - 0x6e, 0x64, 0x6f, 0x77, 0x2e, 0x73, 0x63, 0x72, 0x6f, 0x6c, 0x6c, 0x59, - 0x7d, 0x70, 0x78, 0x60, 0x2c, 0x0a, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, - 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x6c, 0x65, 0x66, 0x74, 0x3a, 0x20, - 0x60, 0x24, 0x7b, 0x72, 0x65, 0x63, 0x74, 0x2e, 0x6c, 0x65, 0x66, 0x74, - 0x20, 0x2b, 0x20, 0x77, 0x69, 0x6e, 0x64, 0x6f, 0x77, 0x2e, 0x73, 0x63, - 0x72, 0x6f, 0x6c, 0x6c, 0x58, 0x7d, 0x70, 0x78, 0x60, 0x2c, 0x0a, 0x20, - 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x7d, 0x3b, 0x0a, - 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x7d, 0x0a, 0x20, 0x20, - 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x69, 0x73, 0x4f, 0x70, 0x65, 0x6e, - 0x2e, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x20, 0x3d, 0x20, 0x21, 0x69, 0x73, - 0x4f, 0x70, 0x65, 0x6e, 0x2e, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x3b, 0x0a, - 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x7d, 0x3b, 0x0a, 0x0a, 0x20, 0x20, - 0x20, 0x20, 0x20, 0x20, 0x63, 0x6f, 0x6e, 0x73, 0x74, 0x20, 0x68, 0x61, - 0x6e, 0x64, 0x6c, 0x65, 0x43, 0x6c, 0x69, 0x63, 0x6b, 0x4f, 0x75, 0x74, - 0x73, 0x69, 0x64, 0x65, 0x20, 0x3d, 0x20, 0x28, 0x65, 0x76, 0x65, 0x6e, - 0x74, 0x29, 0x20, 0x3d, 0x3e, 0x20, 0x7b, 0x0a, 0x20, 0x20, 0x20, 0x20, - 0x20, 0x20, 0x20, 0x20, 0x69, 0x66, 0x20, 0x28, 0x70, 0x6f, 0x70, 0x6f, - 0x76, 0x65, 0x72, 0x52, 0x65, 0x66, 0x2e, 0x63, 0x75, 0x72, 0x72, 0x65, - 0x6e, 0x74, 0x20, 0x26, 0x26, 0x20, 0x21, 0x70, 0x6f, 0x70, 0x6f, 0x76, - 0x65, 0x72, 0x52, 0x65, 0x66, 0x2e, 0x63, 0x75, 0x72, 0x72, 0x65, 0x6e, - 0x74, 0x2e, 0x63, 0x6f, 0x6e, 0x74, 0x61, 0x69, 0x6e, 0x73, 0x28, 0x65, - 0x76, 0x65, 0x6e, 0x74, 0x2e, 0x74, 0x61, 0x72, 0x67, 0x65, 0x74, 0x29, - 0x20, 0x26, 0x26, 0x20, 0x21, 0x62, 0x75, 0x74, 0x74, 0x6f, 0x6e, 0x52, - 0x65, 0x66, 0x2e, 0x63, 0x75, 0x72, 0x72, 0x65, 0x6e, 0x74, 0x2e, 0x63, - 0x6f, 0x6e, 0x74, 0x61, 0x69, 0x6e, 0x73, 0x28, 0x65, 0x76, 0x65, 0x6e, - 0x74, 0x2e, 0x74, 0x61, 0x72, 0x67, 0x65, 0x74, 0x29, 0x29, 0x20, 0x7b, - 0x0a, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x69, - 0x73, 0x4f, 0x70, 0x65, 0x6e, 0x2e, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x20, - 0x3d, 0x20, 0x66, 0x61, 0x6c, 0x73, 0x65, 0x3b, 0x0a, 0x20, 0x20, 0x20, - 0x20, 0x20, 0x20, 0x20, 0x20, 0x7d, 0x0a, 0x20, 0x20, 0x20, 0x20, 0x20, - 0x20, 0x7d, 0x3b, 0x0a, 0x0a, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x75, - 0x73, 0x65, 0x45, 0x66, 0x66, 0x65, 0x63, 0x74, 0x28, 0x28, 0x29, 0x20, - 0x3d, 0x3e, 0x20, 0x7b, 0x0a, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, - 0x20, 0x64, 0x6f, 0x63, 0x75, 0x6d, 0x65, 0x6e, 0x74, 0x2e, 0x61, 0x64, - 0x64, 0x45, 0x76, 0x65, 0x6e, 0x74, 0x4c, 0x69, 0x73, 0x74, 0x65, 0x6e, - 0x65, 0x72, 0x28, 0x27, 0x6d, 0x6f, 0x75, 0x73, 0x65, 0x64, 0x6f, 0x77, - 0x6e, 0x27, 0x2c, 0x20, 0x68, 0x61, 0x6e, 0x64, 0x6c, 0x65, 0x43, 0x6c, - 0x69, 0x63, 0x6b, 0x4f, 0x75, 0x74, 0x73, 0x69, 0x64, 0x65, 0x29, 0x3b, - 0x0a, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x72, 0x65, 0x74, - 0x75, 0x72, 0x6e, 0x20, 0x28, 0x29, 0x20, 0x3d, 0x3e, 0x20, 0x7b, 0x0a, - 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x64, 0x6f, - 0x63, 0x75, 0x6d, 0x65, 0x6e, 0x74, 0x2e, 0x72, 0x65, 0x6d, 0x6f, 0x76, - 0x65, 0x45, 0x76, 0x65, 0x6e, 0x74, 0x4c, 0x69, 0x73, 0x74, 0x65, 0x6e, - 0x65, 0x72, 0x28, 0x27, 0x6d, 0x6f, 0x75, 0x73, 0x65, 0x64, 0x6f, 0x77, - 0x6e, 0x27, 0x2c, 0x20, 0x68, 0x61, 0x6e, 0x64, 0x6c, 0x65, 0x43, 0x6c, - 0x69, 0x63, 0x6b, 0x4f, 0x75, 0x74, 0x73, 0x69, 0x64, 0x65, 0x29, 0x3b, - 0x0a, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x7d, 0x3b, 0x0a, - 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x7d, 0x2c, 0x20, 0x5b, 0x5d, 0x29, - 0x3b, 0x0a, 0x0a, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x72, 0x65, 0x74, - 0x75, 0x72, 0x6e, 0x20, 0x68, 0x74, 0x6d, 0x6c, 0x60, 0x0a, 0x20, 0x20, - 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x3c, 0x73, 0x70, 0x61, 0x6e, 0x20, - 0x73, 0x74, 0x79, 0x6c, 0x65, 0x3d, 0x24, 0x7b, 0x70, 0x72, 0x6f, 0x70, - 0x73, 0x2e, 0x73, 0x74, 0x79, 0x6c, 0x65, 0x7d, 0x20, 0x72, 0x65, 0x66, - 0x3d, 0x24, 0x7b, 0x62, 0x75, 0x74, 0x74, 0x6f, 0x6e, 0x52, 0x65, 0x66, - 0x7d, 0x20, 0x6f, 0x6e, 0x43, 0x6c, 0x69, 0x63, 0x6b, 0x3d, 0x24, 0x7b, - 0x74, 0x6f, 0x67, 0x67, 0x6c, 0x65, 0x50, 0x6f, 0x70, 0x6f, 0x76, 0x65, - 0x72, 0x7d, 0x3e, 0x24, 0x7b, 0x70, 0x72, 0x6f, 0x70, 0x73, 0x2e, 0x63, - 0x68, 0x69, 0x6c, 0x64, 0x72, 0x65, 0x6e, 0x7d, 0x3c, 0x2f, 0x73, 0x70, - 0x61, 0x6e, 0x3e, 0x0a, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, - 0x24, 0x7b, 0x69, 0x73, 0x4f, 0x70, 0x65, 0x6e, 0x2e, 0x76, 0x61, 0x6c, - 0x75, 0x65, 0x20, 0x26, 0x26, 0x20, 0x68, 0x74, 0x6d, 0x6c, 0x60, 0x0a, - 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x3c, 0x24, - 0x7b, 0x50, 0x6f, 0x72, 0x74, 0x61, 0x6c, 0x7d, 0x20, 0x69, 0x6e, 0x74, - 0x6f, 0x3d, 0x22, 0x23, 0x70, 0x6f, 0x72, 0x74, 0x61, 0x6c, 0x22, 0x3e, - 0x0a, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, - 0x20, 0x3c, 0x64, 0x69, 0x76, 0x0a, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, - 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x72, 0x65, 0x66, 0x3d, - 0x24, 0x7b, 0x70, 0x6f, 0x70, 0x6f, 0x76, 0x65, 0x72, 0x52, 0x65, 0x66, - 0x7d, 0x0a, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, - 0x20, 0x20, 0x20, 0x20, 0x63, 0x6c, 0x61, 0x73, 0x73, 0x3d, 0x22, 0x70, - 0x6f, 0x70, 0x6f, 0x76, 0x65, 0x72, 0x2d, 0x63, 0x6f, 0x6e, 0x74, 0x65, - 0x6e, 0x74, 0x22, 0x0a, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, - 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x73, 0x74, 0x79, 0x6c, 0x65, 0x3d, - 0x24, 0x7b, 0x7b, 0x0a, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, - 0x20, 0x20, 0x20, 0x20, 0x74, 0x6f, 0x70, 0x3a, 0x20, 0x70, 0x6f, 0x73, - 0x69, 0x74, 0x69, 0x6f, 0x6e, 0x2e, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x2e, - 0x74, 0x6f, 0x70, 0x2c, 0x0a, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, - 0x20, 0x20, 0x20, 0x20, 0x20, 0x6c, 0x65, 0x66, 0x74, 0x3a, 0x20, 0x70, - 0x6f, 0x73, 0x69, 0x74, 0x69, 0x6f, 0x6e, 0x2e, 0x76, 0x61, 0x6c, 0x75, - 0x65, 0x2e, 0x6c, 0x65, 0x66, 0x74, 0x2c, 0x0a, 0x20, 0x20, 0x20, 0x20, - 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x7d, 0x7d, 0x0a, 0x20, 0x20, 0x20, - 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x3e, 0x0a, 0x20, + 0x63, 0x6c, 0x61, 0x73, 0x73, 0x3d, 0x22, 0x70, 0x6f, 0x70, 0x6f, 0x76, + 0x65, 0x72, 0x2d, 0x63, 0x6f, 0x6e, 0x74, 0x65, 0x6e, 0x74, 0x22, 0x0a, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, - 0x20, 0x24, 0x7b, 0x70, 0x72, 0x6f, 0x70, 0x73, 0x2e, 0x70, 0x6f, 0x70, - 0x6f, 0x76, 0x65, 0x72, 0x43, 0x68, 0x69, 0x6c, 0x64, 0x72, 0x65, 0x6e, - 0x7d, 0x0a, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, - 0x20, 0x20, 0x3c, 0x2f, 0x64, 0x69, 0x76, 0x3e, 0x0a, 0x20, 0x20, 0x20, - 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x3c, 0x2f, 0x24, 0x7b, 0x50, - 0x6f, 0x72, 0x74, 0x61, 0x6c, 0x7d, 0x3e, 0x0a, 0x20, 0x20, 0x20, 0x20, - 0x20, 0x20, 0x20, 0x20, 0x60, 0x7d, 0x0a, 0x20, 0x20, 0x20, 0x20, 0x20, - 0x20, 0x60, 0x3b, 0x0a, 0x20, 0x20, 0x20, 0x20, 0x7d, 0x3b, 0x0a, 0x0a, - 0x20, 0x20, 0x20, 0x20, 0x2f, 0x2f, 0x20, 0x53, 0x6f, 0x75, 0x72, 0x63, - 0x65, 0x3a, 0x20, 0x70, 0x72, 0x65, 0x61, 0x63, 0x74, 0x2d, 0x70, 0x6f, - 0x72, 0x74, 0x61, 0x6c, 0x20, 0x28, 0x68, 0x74, 0x74, 0x70, 0x73, 0x3a, - 0x2f, 0x2f, 0x67, 0x69, 0x74, 0x68, 0x75, 0x62, 0x2e, 0x63, 0x6f, 0x6d, - 0x2f, 0x64, 0x65, 0x76, 0x65, 0x6c, 0x6f, 0x70, 0x69, 0x74, 0x2f, 0x70, + 0x20, 0x20, 0x73, 0x74, 0x79, 0x6c, 0x65, 0x3d, 0x24, 0x7b, 0x7b, 0x0a, + 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, + 0x74, 0x6f, 0x70, 0x3a, 0x20, 0x70, 0x6f, 0x73, 0x69, 0x74, 0x69, 0x6f, + 0x6e, 0x2e, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x2e, 0x74, 0x6f, 0x70, 0x2c, + 0x0a, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, + 0x20, 0x6c, 0x65, 0x66, 0x74, 0x3a, 0x20, 0x70, 0x6f, 0x73, 0x69, 0x74, + 0x69, 0x6f, 0x6e, 0x2e, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x2e, 0x6c, 0x65, + 0x66, 0x74, 0x2c, 0x0a, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, + 0x20, 0x20, 0x7d, 0x7d, 0x0a, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, + 0x20, 0x20, 0x20, 0x20, 0x20, 0x3e, 0x0a, 0x20, 0x20, 0x20, 0x20, 0x20, + 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x24, 0x7b, 0x70, + 0x72, 0x6f, 0x70, 0x73, 0x2e, 0x70, 0x6f, 0x70, 0x6f, 0x76, 0x65, 0x72, + 0x43, 0x68, 0x69, 0x6c, 0x64, 0x72, 0x65, 0x6e, 0x7d, 0x0a, 0x20, 0x20, + 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x3c, 0x2f, + 0x64, 0x69, 0x76, 0x3e, 0x0a, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, + 0x20, 0x20, 0x20, 0x3c, 0x2f, 0x24, 0x7b, 0x50, 0x6f, 0x72, 0x74, 0x61, + 0x6c, 0x7d, 0x3e, 0x0a, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, + 0x60, 0x7d, 0x0a, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x60, 0x3b, 0x0a, + 0x20, 0x20, 0x20, 0x20, 0x7d, 0x3b, 0x0a, 0x0a, 0x20, 0x20, 0x20, 0x20, + 0x2f, 0x2f, 0x20, 0x53, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x3a, 0x20, 0x70, 0x72, 0x65, 0x61, 0x63, 0x74, 0x2d, 0x70, 0x6f, 0x72, 0x74, 0x61, 0x6c, - 0x2f, 0x62, 0x6c, 0x6f, 0x62, 0x2f, 0x6d, 0x61, 0x73, 0x74, 0x65, 0x72, - 0x2f, 0x73, 0x72, 0x63, 0x2f, 0x70, 0x72, 0x65, 0x61, 0x63, 0x74, 0x2d, - 0x70, 0x6f, 0x72, 0x74, 0x61, 0x6c, 0x2e, 0x6a, 0x73, 0x29, 0x0a, 0x20, - 0x20, 0x20, 0x20, 0x2f, 0x2a, 0x2a, 0x20, 0x52, 0x65, 0x64, 0x69, 0x72, - 0x65, 0x63, 0x74, 0x20, 0x72, 0x65, 0x6e, 0x64, 0x65, 0x72, 0x69, 0x6e, - 0x67, 0x20, 0x6f, 0x66, 0x20, 0x64, 0x65, 0x73, 0x63, 0x65, 0x6e, 0x64, - 0x61, 0x6e, 0x74, 0x73, 0x20, 0x69, 0x6e, 0x74, 0x6f, 0x20, 0x74, 0x68, - 0x65, 0x20, 0x67, 0x69, 0x76, 0x65, 0x6e, 0x20, 0x43, 0x53, 0x53, 0x20, - 0x73, 0x65, 0x6c, 0x65, 0x63, 0x74, 0x6f, 0x72, 0x20, 0x2a, 0x2f, 0x0a, - 0x20, 0x20, 0x20, 0x20, 0x63, 0x6c, 0x61, 0x73, 0x73, 0x20, 0x50, 0x6f, - 0x72, 0x74, 0x61, 0x6c, 0x20, 0x65, 0x78, 0x74, 0x65, 0x6e, 0x64, 0x73, - 0x20, 0x43, 0x6f, 0x6d, 0x70, 0x6f, 0x6e, 0x65, 0x6e, 0x74, 0x20, 0x7b, - 0x0a, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x63, 0x6f, 0x6d, 0x70, 0x6f, - 0x6e, 0x65, 0x6e, 0x74, 0x44, 0x69, 0x64, 0x55, 0x70, 0x64, 0x61, 0x74, - 0x65, 0x28, 0x70, 0x72, 0x6f, 0x70, 0x73, 0x29, 0x20, 0x7b, 0x0a, 0x20, - 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x66, 0x6f, 0x72, 0x20, 0x28, - 0x6c, 0x65, 0x74, 0x20, 0x69, 0x20, 0x69, 0x6e, 0x20, 0x70, 0x72, 0x6f, - 0x70, 0x73, 0x29, 0x20, 0x7b, 0x0a, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, - 0x20, 0x20, 0x20, 0x20, 0x69, 0x66, 0x20, 0x28, 0x70, 0x72, 0x6f, 0x70, - 0x73, 0x5b, 0x69, 0x5d, 0x20, 0x21, 0x3d, 0x3d, 0x20, 0x74, 0x68, 0x69, - 0x73, 0x2e, 0x70, 0x72, 0x6f, 0x70, 0x73, 0x5b, 0x69, 0x5d, 0x29, 0x20, + 0x20, 0x28, 0x68, 0x74, 0x74, 0x70, 0x73, 0x3a, 0x2f, 0x2f, 0x67, 0x69, + 0x74, 0x68, 0x75, 0x62, 0x2e, 0x63, 0x6f, 0x6d, 0x2f, 0x64, 0x65, 0x76, + 0x65, 0x6c, 0x6f, 0x70, 0x69, 0x74, 0x2f, 0x70, 0x72, 0x65, 0x61, 0x63, + 0x74, 0x2d, 0x70, 0x6f, 0x72, 0x74, 0x61, 0x6c, 0x2f, 0x62, 0x6c, 0x6f, + 0x62, 0x2f, 0x6d, 0x61, 0x73, 0x74, 0x65, 0x72, 0x2f, 0x73, 0x72, 0x63, + 0x2f, 0x70, 0x72, 0x65, 0x61, 0x63, 0x74, 0x2d, 0x70, 0x6f, 0x72, 0x74, + 0x61, 0x6c, 0x2e, 0x6a, 0x73, 0x29, 0x0a, 0x20, 0x20, 0x20, 0x20, 0x2f, + 0x2a, 0x2a, 0x20, 0x52, 0x65, 0x64, 0x69, 0x72, 0x65, 0x63, 0x74, 0x20, + 0x72, 0x65, 0x6e, 0x64, 0x65, 0x72, 0x69, 0x6e, 0x67, 0x20, 0x6f, 0x66, + 0x20, 0x64, 0x65, 0x73, 0x63, 0x65, 0x6e, 0x64, 0x61, 0x6e, 0x74, 0x73, + 0x20, 0x69, 0x6e, 0x74, 0x6f, 0x20, 0x74, 0x68, 0x65, 0x20, 0x67, 0x69, + 0x76, 0x65, 0x6e, 0x20, 0x43, 0x53, 0x53, 0x20, 0x73, 0x65, 0x6c, 0x65, + 0x63, 0x74, 0x6f, 0x72, 0x20, 0x2a, 0x2f, 0x0a, 0x20, 0x20, 0x20, 0x20, + 0x63, 0x6c, 0x61, 0x73, 0x73, 0x20, 0x50, 0x6f, 0x72, 0x74, 0x61, 0x6c, + 0x20, 0x65, 0x78, 0x74, 0x65, 0x6e, 0x64, 0x73, 0x20, 0x43, 0x6f, 0x6d, + 0x70, 0x6f, 0x6e, 0x65, 0x6e, 0x74, 0x20, 0x7b, 0x0a, 0x20, 0x20, 0x20, + 0x20, 0x20, 0x20, 0x63, 0x6f, 0x6d, 0x70, 0x6f, 0x6e, 0x65, 0x6e, 0x74, + 0x44, 0x69, 0x64, 0x55, 0x70, 0x64, 0x61, 0x74, 0x65, 0x28, 0x70, 0x72, + 0x6f, 0x70, 0x73, 0x29, 0x20, 0x7b, 0x0a, 0x20, 0x20, 0x20, 0x20, 0x20, + 0x20, 0x20, 0x20, 0x66, 0x6f, 0x72, 0x20, 0x28, 0x6c, 0x65, 0x74, 0x20, + 0x69, 0x20, 0x69, 0x6e, 0x20, 0x70, 0x72, 0x6f, 0x70, 0x73, 0x29, 0x20, 0x7b, 0x0a, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, - 0x20, 0x20, 0x72, 0x65, 0x74, 0x75, 0x72, 0x6e, 0x20, 0x73, 0x65, 0x74, - 0x54, 0x69, 0x6d, 0x65, 0x6f, 0x75, 0x74, 0x28, 0x74, 0x68, 0x69, 0x73, - 0x2e, 0x72, 0x65, 0x6e, 0x64, 0x65, 0x72, 0x4c, 0x61, 0x79, 0x65, 0x72, - 0x29, 0x3b, 0x0a, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, - 0x20, 0x7d, 0x0a, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x7d, - 0x0a, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x7d, 0x0a, 0x0a, 0x20, 0x20, - 0x20, 0x20, 0x20, 0x20, 0x63, 0x6f, 0x6d, 0x70, 0x6f, 0x6e, 0x65, 0x6e, - 0x74, 0x44, 0x69, 0x64, 0x4d, 0x6f, 0x75, 0x6e, 0x74, 0x28, 0x29, 0x20, - 0x7b, 0x0a, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x74, 0x68, - 0x69, 0x73, 0x2e, 0x69, 0x73, 0x4d, 0x6f, 0x75, 0x6e, 0x74, 0x65, 0x64, - 0x20, 0x3d, 0x20, 0x74, 0x72, 0x75, 0x65, 0x3b, 0x0a, 0x20, 0x20, 0x20, - 0x20, 0x20, 0x20, 0x20, 0x20, 0x74, 0x68, 0x69, 0x73, 0x2e, 0x72, 0x65, - 0x6e, 0x64, 0x65, 0x72, 0x4c, 0x61, 0x79, 0x65, 0x72, 0x20, 0x3d, 0x20, - 0x74, 0x68, 0x69, 0x73, 0x2e, 0x72, 0x65, 0x6e, 0x64, 0x65, 0x72, 0x4c, - 0x61, 0x79, 0x65, 0x72, 0x2e, 0x62, 0x69, 0x6e, 0x64, 0x28, 0x74, 0x68, - 0x69, 0x73, 0x29, 0x3b, 0x0a, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, - 0x20, 0x74, 0x68, 0x69, 0x73, 0x2e, 0x72, 0x65, 0x6e, 0x64, 0x65, 0x72, - 0x4c, 0x61, 0x79, 0x65, 0x72, 0x28, 0x29, 0x3b, 0x0a, 0x20, 0x20, 0x20, + 0x69, 0x66, 0x20, 0x28, 0x70, 0x72, 0x6f, 0x70, 0x73, 0x5b, 0x69, 0x5d, + 0x20, 0x21, 0x3d, 0x3d, 0x20, 0x74, 0x68, 0x69, 0x73, 0x2e, 0x70, 0x72, + 0x6f, 0x70, 0x73, 0x5b, 0x69, 0x5d, 0x29, 0x20, 0x7b, 0x0a, 0x20, 0x20, + 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x72, 0x65, + 0x74, 0x75, 0x72, 0x6e, 0x20, 0x73, 0x65, 0x74, 0x54, 0x69, 0x6d, 0x65, + 0x6f, 0x75, 0x74, 0x28, 0x74, 0x68, 0x69, 0x73, 0x2e, 0x72, 0x65, 0x6e, + 0x64, 0x65, 0x72, 0x4c, 0x61, 0x79, 0x65, 0x72, 0x29, 0x3b, 0x0a, 0x20, + 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x7d, 0x0a, 0x20, + 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x7d, 0x0a, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x7d, 0x0a, 0x0a, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, - 0x63, 0x6f, 0x6d, 0x70, 0x6f, 0x6e, 0x65, 0x6e, 0x74, 0x57, 0x69, 0x6c, - 0x6c, 0x55, 0x6e, 0x6d, 0x6f, 0x75, 0x6e, 0x74, 0x28, 0x29, 0x20, 0x7b, + 0x63, 0x6f, 0x6d, 0x70, 0x6f, 0x6e, 0x65, 0x6e, 0x74, 0x44, 0x69, 0x64, + 0x4d, 0x6f, 0x75, 0x6e, 0x74, 0x28, 0x29, 0x20, 0x7b, 0x0a, 0x20, 0x20, + 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x74, 0x68, 0x69, 0x73, 0x2e, 0x69, + 0x73, 0x4d, 0x6f, 0x75, 0x6e, 0x74, 0x65, 0x64, 0x20, 0x3d, 0x20, 0x74, + 0x72, 0x75, 0x65, 0x3b, 0x0a, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, + 0x20, 0x74, 0x68, 0x69, 0x73, 0x2e, 0x72, 0x65, 0x6e, 0x64, 0x65, 0x72, + 0x4c, 0x61, 0x79, 0x65, 0x72, 0x20, 0x3d, 0x20, 0x74, 0x68, 0x69, 0x73, + 0x2e, 0x72, 0x65, 0x6e, 0x64, 0x65, 0x72, 0x4c, 0x61, 0x79, 0x65, 0x72, + 0x2e, 0x62, 0x69, 0x6e, 0x64, 0x28, 0x74, 0x68, 0x69, 0x73, 0x29, 0x3b, 0x0a, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x74, 0x68, 0x69, 0x73, 0x2e, 0x72, 0x65, 0x6e, 0x64, 0x65, 0x72, 0x4c, 0x61, 0x79, 0x65, - 0x72, 0x28, 0x66, 0x61, 0x6c, 0x73, 0x65, 0x29, 0x3b, 0x0a, 0x20, 0x20, - 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x74, 0x68, 0x69, 0x73, 0x2e, 0x69, - 0x73, 0x4d, 0x6f, 0x75, 0x6e, 0x74, 0x65, 0x64, 0x20, 0x3d, 0x20, 0x66, - 0x61, 0x6c, 0x73, 0x65, 0x3b, 0x0a, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, - 0x20, 0x20, 0x69, 0x66, 0x20, 0x28, 0x74, 0x68, 0x69, 0x73, 0x2e, 0x72, - 0x65, 0x6d, 0x6f, 0x74, 0x65, 0x20, 0x26, 0x26, 0x20, 0x74, 0x68, 0x69, - 0x73, 0x2e, 0x72, 0x65, 0x6d, 0x6f, 0x74, 0x65, 0x2e, 0x70, 0x61, 0x72, - 0x65, 0x6e, 0x74, 0x4e, 0x6f, 0x64, 0x65, 0x29, 0x20, 0x74, 0x68, 0x69, - 0x73, 0x2e, 0x72, 0x65, 0x6d, 0x6f, 0x74, 0x65, 0x2e, 0x70, 0x61, 0x72, - 0x65, 0x6e, 0x74, 0x4e, 0x6f, 0x64, 0x65, 0x2e, 0x72, 0x65, 0x6d, 0x6f, - 0x76, 0x65, 0x43, 0x68, 0x69, 0x6c, 0x64, 0x28, 0x74, 0x68, 0x69, 0x73, - 0x2e, 0x72, 0x65, 0x6d, 0x6f, 0x74, 0x65, 0x29, 0x3b, 0x0a, 0x20, 0x20, - 0x20, 0x20, 0x20, 0x20, 0x7d, 0x0a, 0x0a, 0x20, 0x20, 0x20, 0x20, 0x20, - 0x20, 0x66, 0x69, 0x6e, 0x64, 0x4e, 0x6f, 0x64, 0x65, 0x28, 0x6e, 0x6f, - 0x64, 0x65, 0x29, 0x20, 0x7b, 0x0a, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, - 0x20, 0x20, 0x72, 0x65, 0x74, 0x75, 0x72, 0x6e, 0x20, 0x74, 0x79, 0x70, - 0x65, 0x6f, 0x66, 0x20, 0x6e, 0x6f, 0x64, 0x65, 0x20, 0x3d, 0x3d, 0x3d, - 0x20, 0x27, 0x73, 0x74, 0x72, 0x69, 0x6e, 0x67, 0x27, 0x20, 0x3f, 0x20, - 0x64, 0x6f, 0x63, 0x75, 0x6d, 0x65, 0x6e, 0x74, 0x2e, 0x71, 0x75, 0x65, - 0x72, 0x79, 0x53, 0x65, 0x6c, 0x65, 0x63, 0x74, 0x6f, 0x72, 0x28, 0x6e, - 0x6f, 0x64, 0x65, 0x29, 0x20, 0x3a, 0x20, 0x6e, 0x6f, 0x64, 0x65, 0x3b, - 0x0a, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x7d, 0x0a, 0x0a, 0x20, 0x20, - 0x20, 0x20, 0x20, 0x20, 0x72, 0x65, 0x6e, 0x64, 0x65, 0x72, 0x4c, 0x61, - 0x79, 0x65, 0x72, 0x28, 0x73, 0x68, 0x6f, 0x77, 0x20, 0x3d, 0x20, 0x74, - 0x72, 0x75, 0x65, 0x29, 0x20, 0x7b, 0x0a, 0x20, 0x20, 0x20, 0x20, 0x20, - 0x20, 0x20, 0x20, 0x69, 0x66, 0x20, 0x28, 0x21, 0x74, 0x68, 0x69, 0x73, - 0x2e, 0x69, 0x73, 0x4d, 0x6f, 0x75, 0x6e, 0x74, 0x65, 0x64, 0x29, 0x20, - 0x72, 0x65, 0x74, 0x75, 0x72, 0x6e, 0x3b, 0x0a, 0x0a, 0x20, 0x20, 0x20, - 0x20, 0x20, 0x20, 0x20, 0x20, 0x2f, 0x2f, 0x20, 0x63, 0x6c, 0x65, 0x61, - 0x6e, 0x20, 0x75, 0x70, 0x20, 0x6f, 0x6c, 0x64, 0x20, 0x6e, 0x6f, 0x64, - 0x65, 0x20, 0x69, 0x66, 0x20, 0x6d, 0x6f, 0x76, 0x69, 0x6e, 0x67, 0x20, - 0x62, 0x61, 0x73, 0x65, 0x73, 0x3a, 0x0a, 0x20, 0x20, 0x20, 0x20, 0x20, + 0x72, 0x28, 0x29, 0x3b, 0x0a, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x7d, + 0x0a, 0x0a, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x63, 0x6f, 0x6d, 0x70, + 0x6f, 0x6e, 0x65, 0x6e, 0x74, 0x57, 0x69, 0x6c, 0x6c, 0x55, 0x6e, 0x6d, + 0x6f, 0x75, 0x6e, 0x74, 0x28, 0x29, 0x20, 0x7b, 0x0a, 0x20, 0x20, 0x20, + 0x20, 0x20, 0x20, 0x20, 0x20, 0x74, 0x68, 0x69, 0x73, 0x2e, 0x72, 0x65, + 0x6e, 0x64, 0x65, 0x72, 0x4c, 0x61, 0x79, 0x65, 0x72, 0x28, 0x66, 0x61, + 0x6c, 0x73, 0x65, 0x29, 0x3b, 0x0a, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, + 0x20, 0x20, 0x74, 0x68, 0x69, 0x73, 0x2e, 0x69, 0x73, 0x4d, 0x6f, 0x75, + 0x6e, 0x74, 0x65, 0x64, 0x20, 0x3d, 0x20, 0x66, 0x61, 0x6c, 0x73, 0x65, + 0x3b, 0x0a, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x69, 0x66, + 0x20, 0x28, 0x74, 0x68, 0x69, 0x73, 0x2e, 0x72, 0x65, 0x6d, 0x6f, 0x74, + 0x65, 0x20, 0x26, 0x26, 0x20, 0x74, 0x68, 0x69, 0x73, 0x2e, 0x72, 0x65, + 0x6d, 0x6f, 0x74, 0x65, 0x2e, 0x70, 0x61, 0x72, 0x65, 0x6e, 0x74, 0x4e, + 0x6f, 0x64, 0x65, 0x29, 0x20, 0x74, 0x68, 0x69, 0x73, 0x2e, 0x72, 0x65, + 0x6d, 0x6f, 0x74, 0x65, 0x2e, 0x70, 0x61, 0x72, 0x65, 0x6e, 0x74, 0x4e, + 0x6f, 0x64, 0x65, 0x2e, 0x72, 0x65, 0x6d, 0x6f, 0x76, 0x65, 0x43, 0x68, + 0x69, 0x6c, 0x64, 0x28, 0x74, 0x68, 0x69, 0x73, 0x2e, 0x72, 0x65, 0x6d, + 0x6f, 0x74, 0x65, 0x29, 0x3b, 0x0a, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, + 0x7d, 0x0a, 0x0a, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x66, 0x69, 0x6e, + 0x64, 0x4e, 0x6f, 0x64, 0x65, 0x28, 0x6e, 0x6f, 0x64, 0x65, 0x29, 0x20, + 0x7b, 0x0a, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x72, 0x65, + 0x74, 0x75, 0x72, 0x6e, 0x20, 0x74, 0x79, 0x70, 0x65, 0x6f, 0x66, 0x20, + 0x6e, 0x6f, 0x64, 0x65, 0x20, 0x3d, 0x3d, 0x3d, 0x20, 0x27, 0x73, 0x74, + 0x72, 0x69, 0x6e, 0x67, 0x27, 0x20, 0x3f, 0x20, 0x64, 0x6f, 0x63, 0x75, + 0x6d, 0x65, 0x6e, 0x74, 0x2e, 0x71, 0x75, 0x65, 0x72, 0x79, 0x53, 0x65, + 0x6c, 0x65, 0x63, 0x74, 0x6f, 0x72, 0x28, 0x6e, 0x6f, 0x64, 0x65, 0x29, + 0x20, 0x3a, 0x20, 0x6e, 0x6f, 0x64, 0x65, 0x3b, 0x0a, 0x20, 0x20, 0x20, + 0x20, 0x20, 0x20, 0x7d, 0x0a, 0x0a, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, + 0x72, 0x65, 0x6e, 0x64, 0x65, 0x72, 0x4c, 0x61, 0x79, 0x65, 0x72, 0x28, + 0x73, 0x68, 0x6f, 0x77, 0x20, 0x3d, 0x20, 0x74, 0x72, 0x75, 0x65, 0x29, + 0x20, 0x7b, 0x0a, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x69, + 0x66, 0x20, 0x28, 0x21, 0x74, 0x68, 0x69, 0x73, 0x2e, 0x69, 0x73, 0x4d, + 0x6f, 0x75, 0x6e, 0x74, 0x65, 0x64, 0x29, 0x20, 0x72, 0x65, 0x74, 0x75, + 0x72, 0x6e, 0x3b, 0x0a, 0x0a, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, + 0x20, 0x2f, 0x2f, 0x20, 0x63, 0x6c, 0x65, 0x61, 0x6e, 0x20, 0x75, 0x70, + 0x20, 0x6f, 0x6c, 0x64, 0x20, 0x6e, 0x6f, 0x64, 0x65, 0x20, 0x69, 0x66, + 0x20, 0x6d, 0x6f, 0x76, 0x69, 0x6e, 0x67, 0x20, 0x62, 0x61, 0x73, 0x65, + 0x73, 0x3a, 0x0a, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x69, + 0x66, 0x20, 0x28, 0x74, 0x68, 0x69, 0x73, 0x2e, 0x70, 0x72, 0x6f, 0x70, + 0x73, 0x2e, 0x69, 0x6e, 0x74, 0x6f, 0x20, 0x21, 0x3d, 0x3d, 0x20, 0x74, + 0x68, 0x69, 0x73, 0x2e, 0x69, 0x6e, 0x74, 0x6f, 0x50, 0x6f, 0x69, 0x6e, + 0x74, 0x65, 0x72, 0x29, 0x20, 0x7b, 0x0a, 0x20, 0x20, 0x20, 0x20, 0x20, + 0x20, 0x20, 0x20, 0x20, 0x20, 0x74, 0x68, 0x69, 0x73, 0x2e, 0x69, 0x6e, + 0x74, 0x6f, 0x50, 0x6f, 0x69, 0x6e, 0x74, 0x65, 0x72, 0x20, 0x3d, 0x20, + 0x74, 0x68, 0x69, 0x73, 0x2e, 0x70, 0x72, 0x6f, 0x70, 0x73, 0x2e, 0x69, + 0x6e, 0x74, 0x6f, 0x3b, 0x0a, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x69, 0x66, 0x20, 0x28, 0x74, 0x68, 0x69, 0x73, 0x2e, - 0x70, 0x72, 0x6f, 0x70, 0x73, 0x2e, 0x69, 0x6e, 0x74, 0x6f, 0x20, 0x21, - 0x3d, 0x3d, 0x20, 0x74, 0x68, 0x69, 0x73, 0x2e, 0x69, 0x6e, 0x74, 0x6f, - 0x50, 0x6f, 0x69, 0x6e, 0x74, 0x65, 0x72, 0x29, 0x20, 0x7b, 0x0a, 0x20, + 0x69, 0x6e, 0x74, 0x6f, 0x20, 0x26, 0x26, 0x20, 0x74, 0x68, 0x69, 0x73, + 0x2e, 0x72, 0x65, 0x6d, 0x6f, 0x74, 0x65, 0x29, 0x20, 0x7b, 0x0a, 0x20, + 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x74, + 0x68, 0x69, 0x73, 0x2e, 0x72, 0x65, 0x6d, 0x6f, 0x74, 0x65, 0x20, 0x3d, + 0x20, 0x72, 0x65, 0x6e, 0x64, 0x65, 0x72, 0x28, 0x68, 0x74, 0x6d, 0x6c, + 0x60, 0x3c, 0x24, 0x7b, 0x50, 0x6f, 0x72, 0x74, 0x61, 0x6c, 0x50, 0x72, + 0x6f, 0x78, 0x79, 0x7d, 0x20, 0x2f, 0x3e, 0x60, 0x2c, 0x20, 0x74, 0x68, + 0x69, 0x73, 0x2e, 0x69, 0x6e, 0x74, 0x6f, 0x2c, 0x20, 0x74, 0x68, 0x69, + 0x73, 0x2e, 0x72, 0x65, 0x6d, 0x6f, 0x74, 0x65, 0x29, 0x3b, 0x0a, 0x20, + 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x7d, 0x0a, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x74, 0x68, 0x69, - 0x73, 0x2e, 0x69, 0x6e, 0x74, 0x6f, 0x50, 0x6f, 0x69, 0x6e, 0x74, 0x65, - 0x72, 0x20, 0x3d, 0x20, 0x74, 0x68, 0x69, 0x73, 0x2e, 0x70, 0x72, 0x6f, - 0x70, 0x73, 0x2e, 0x69, 0x6e, 0x74, 0x6f, 0x3b, 0x0a, 0x20, 0x20, 0x20, - 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x69, 0x66, 0x20, 0x28, 0x74, - 0x68, 0x69, 0x73, 0x2e, 0x69, 0x6e, 0x74, 0x6f, 0x20, 0x26, 0x26, 0x20, - 0x74, 0x68, 0x69, 0x73, 0x2e, 0x72, 0x65, 0x6d, 0x6f, 0x74, 0x65, 0x29, - 0x20, 0x7b, 0x0a, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, - 0x20, 0x20, 0x20, 0x74, 0x68, 0x69, 0x73, 0x2e, 0x72, 0x65, 0x6d, 0x6f, - 0x74, 0x65, 0x20, 0x3d, 0x20, 0x72, 0x65, 0x6e, 0x64, 0x65, 0x72, 0x28, - 0x68, 0x74, 0x6d, 0x6c, 0x60, 0x3c, 0x24, 0x7b, 0x50, 0x6f, 0x72, 0x74, - 0x61, 0x6c, 0x50, 0x72, 0x6f, 0x78, 0x79, 0x7d, 0x20, 0x2f, 0x3e, 0x60, - 0x2c, 0x20, 0x74, 0x68, 0x69, 0x73, 0x2e, 0x69, 0x6e, 0x74, 0x6f, 0x2c, - 0x20, 0x74, 0x68, 0x69, 0x73, 0x2e, 0x72, 0x65, 0x6d, 0x6f, 0x74, 0x65, - 0x29, 0x3b, 0x0a, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, - 0x20, 0x7d, 0x0a, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, - 0x20, 0x74, 0x68, 0x69, 0x73, 0x2e, 0x69, 0x6e, 0x74, 0x6f, 0x20, 0x3d, - 0x20, 0x74, 0x68, 0x69, 0x73, 0x2e, 0x66, 0x69, 0x6e, 0x64, 0x4e, 0x6f, - 0x64, 0x65, 0x28, 0x74, 0x68, 0x69, 0x73, 0x2e, 0x70, 0x72, 0x6f, 0x70, - 0x73, 0x2e, 0x69, 0x6e, 0x74, 0x6f, 0x29, 0x3b, 0x0a, 0x20, 0x20, 0x20, - 0x20, 0x20, 0x20, 0x20, 0x20, 0x7d, 0x0a, 0x0a, 0x20, 0x20, 0x20, 0x20, - 0x20, 0x20, 0x20, 0x20, 0x74, 0x68, 0x69, 0x73, 0x2e, 0x72, 0x65, 0x6d, - 0x6f, 0x74, 0x65, 0x20, 0x3d, 0x20, 0x72, 0x65, 0x6e, 0x64, 0x65, 0x72, - 0x28, 0x68, 0x74, 0x6d, 0x6c, 0x60, 0x0a, 0x20, 0x20, 0x20, 0x20, 0x20, - 0x20, 0x20, 0x20, 0x20, 0x20, 0x3c, 0x24, 0x7b, 0x50, 0x6f, 0x72, 0x74, - 0x61, 0x6c, 0x50, 0x72, 0x6f, 0x78, 0x79, 0x7d, 0x20, 0x63, 0x6f, 0x6e, - 0x74, 0x65, 0x78, 0x74, 0x3d, 0x24, 0x7b, 0x74, 0x68, 0x69, 0x73, 0x2e, - 0x63, 0x6f, 0x6e, 0x74, 0x65, 0x78, 0x74, 0x7d, 0x3e, 0x0a, 0x20, 0x20, - 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x24, 0x7b, - 0x73, 0x68, 0x6f, 0x77, 0x20, 0x26, 0x26, 0x20, 0x74, 0x68, 0x69, 0x73, - 0x2e, 0x70, 0x72, 0x6f, 0x70, 0x73, 0x2e, 0x63, 0x68, 0x69, 0x6c, 0x64, - 0x72, 0x65, 0x6e, 0x20, 0x7c, 0x7c, 0x20, 0x6e, 0x75, 0x6c, 0x6c, 0x7d, - 0x0a, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x3c, - 0x2f, 0x24, 0x7b, 0x50, 0x6f, 0x72, 0x74, 0x61, 0x6c, 0x50, 0x72, 0x6f, - 0x78, 0x79, 0x7d, 0x3e, 0x0a, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, - 0x20, 0x60, 0x2c, 0x20, 0x74, 0x68, 0x69, 0x73, 0x2e, 0x69, 0x6e, 0x74, - 0x6f, 0x2c, 0x20, 0x74, 0x68, 0x69, 0x73, 0x2e, 0x72, 0x65, 0x6d, 0x6f, - 0x74, 0x65, 0x29, 0x3b, 0x0a, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x7d, - 0x0a, 0x0a, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x72, 0x65, 0x6e, 0x64, - 0x65, 0x72, 0x28, 0x29, 0x20, 0x7b, 0x0a, 0x20, 0x20, 0x20, 0x20, 0x20, - 0x20, 0x20, 0x20, 0x72, 0x65, 0x74, 0x75, 0x72, 0x6e, 0x20, 0x6e, 0x75, + 0x73, 0x2e, 0x69, 0x6e, 0x74, 0x6f, 0x20, 0x3d, 0x20, 0x74, 0x68, 0x69, + 0x73, 0x2e, 0x66, 0x69, 0x6e, 0x64, 0x4e, 0x6f, 0x64, 0x65, 0x28, 0x74, + 0x68, 0x69, 0x73, 0x2e, 0x70, 0x72, 0x6f, 0x70, 0x73, 0x2e, 0x69, 0x6e, + 0x74, 0x6f, 0x29, 0x3b, 0x0a, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, + 0x20, 0x7d, 0x0a, 0x0a, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, + 0x74, 0x68, 0x69, 0x73, 0x2e, 0x72, 0x65, 0x6d, 0x6f, 0x74, 0x65, 0x20, + 0x3d, 0x20, 0x72, 0x65, 0x6e, 0x64, 0x65, 0x72, 0x28, 0x68, 0x74, 0x6d, + 0x6c, 0x60, 0x0a, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, + 0x20, 0x3c, 0x24, 0x7b, 0x50, 0x6f, 0x72, 0x74, 0x61, 0x6c, 0x50, 0x72, + 0x6f, 0x78, 0x79, 0x7d, 0x20, 0x63, 0x6f, 0x6e, 0x74, 0x65, 0x78, 0x74, + 0x3d, 0x24, 0x7b, 0x74, 0x68, 0x69, 0x73, 0x2e, 0x63, 0x6f, 0x6e, 0x74, + 0x65, 0x78, 0x74, 0x7d, 0x3e, 0x0a, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, + 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x24, 0x7b, 0x73, 0x68, 0x6f, 0x77, + 0x20, 0x26, 0x26, 0x20, 0x74, 0x68, 0x69, 0x73, 0x2e, 0x70, 0x72, 0x6f, + 0x70, 0x73, 0x2e, 0x63, 0x68, 0x69, 0x6c, 0x64, 0x72, 0x65, 0x6e, 0x20, + 0x7c, 0x7c, 0x20, 0x6e, 0x75, 0x6c, 0x6c, 0x7d, 0x0a, 0x20, 0x20, 0x20, + 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x3c, 0x2f, 0x24, 0x7b, 0x50, + 0x6f, 0x72, 0x74, 0x61, 0x6c, 0x50, 0x72, 0x6f, 0x78, 0x79, 0x7d, 0x3e, + 0x0a, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x60, 0x2c, 0x20, + 0x74, 0x68, 0x69, 0x73, 0x2e, 0x69, 0x6e, 0x74, 0x6f, 0x2c, 0x20, 0x74, + 0x68, 0x69, 0x73, 0x2e, 0x72, 0x65, 0x6d, 0x6f, 0x74, 0x65, 0x29, 0x3b, + 0x0a, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x7d, 0x0a, 0x0a, 0x20, 0x20, + 0x20, 0x20, 0x20, 0x20, 0x72, 0x65, 0x6e, 0x64, 0x65, 0x72, 0x28, 0x29, + 0x20, 0x7b, 0x0a, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x72, + 0x65, 0x74, 0x75, 0x72, 0x6e, 0x20, 0x6e, 0x75, 0x6c, 0x6c, 0x3b, 0x0a, + 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x7d, 0x0a, 0x20, 0x20, 0x20, 0x20, + 0x7d, 0x0a, 0x20, 0x20, 0x20, 0x20, 0x2f, 0x2f, 0x20, 0x68, 0x69, 0x67, + 0x68, 0x2d, 0x6f, 0x72, 0x64, 0x65, 0x72, 0x20, 0x63, 0x6f, 0x6d, 0x70, + 0x6f, 0x6e, 0x65, 0x6e, 0x74, 0x20, 0x74, 0x68, 0x61, 0x74, 0x20, 0x72, + 0x65, 0x6e, 0x64, 0x65, 0x72, 0x73, 0x20, 0x69, 0x74, 0x73, 0x20, 0x66, + 0x69, 0x72, 0x73, 0x74, 0x20, 0x63, 0x68, 0x69, 0x6c, 0x64, 0x20, 0x69, + 0x66, 0x20, 0x69, 0x74, 0x20, 0x65, 0x78, 0x69, 0x73, 0x74, 0x73, 0x2e, + 0x0a, 0x20, 0x20, 0x20, 0x20, 0x2f, 0x2f, 0x20, 0x75, 0x73, 0x65, 0x64, + 0x20, 0x61, 0x73, 0x20, 0x61, 0x20, 0x63, 0x6f, 0x6e, 0x64, 0x69, 0x74, + 0x69, 0x6f, 0x6e, 0x61, 0x6c, 0x20, 0x72, 0x65, 0x6e, 0x64, 0x65, 0x72, + 0x69, 0x6e, 0x67, 0x20, 0x70, 0x72, 0x6f, 0x78, 0x79, 0x2e, 0x0a, 0x20, + 0x20, 0x20, 0x20, 0x63, 0x6c, 0x61, 0x73, 0x73, 0x20, 0x50, 0x6f, 0x72, + 0x74, 0x61, 0x6c, 0x50, 0x72, 0x6f, 0x78, 0x79, 0x20, 0x65, 0x78, 0x74, + 0x65, 0x6e, 0x64, 0x73, 0x20, 0x43, 0x6f, 0x6d, 0x70, 0x6f, 0x6e, 0x65, + 0x6e, 0x74, 0x20, 0x7b, 0x0a, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x67, + 0x65, 0x74, 0x43, 0x68, 0x69, 0x6c, 0x64, 0x43, 0x6f, 0x6e, 0x74, 0x65, + 0x78, 0x74, 0x28, 0x29, 0x20, 0x7b, 0x0a, 0x20, 0x20, 0x20, 0x20, 0x20, + 0x20, 0x20, 0x20, 0x72, 0x65, 0x74, 0x75, 0x72, 0x6e, 0x20, 0x74, 0x68, + 0x69, 0x73, 0x2e, 0x70, 0x72, 0x6f, 0x70, 0x73, 0x2e, 0x63, 0x6f, 0x6e, + 0x74, 0x65, 0x78, 0x74, 0x3b, 0x0a, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, + 0x7d, 0x0a, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x72, 0x65, 0x6e, 0x64, + 0x65, 0x72, 0x28, 0x7b, 0x20, 0x63, 0x68, 0x69, 0x6c, 0x64, 0x72, 0x65, + 0x6e, 0x20, 0x7d, 0x29, 0x20, 0x7b, 0x0a, 0x20, 0x20, 0x20, 0x20, 0x20, + 0x20, 0x20, 0x20, 0x72, 0x65, 0x74, 0x75, 0x72, 0x6e, 0x20, 0x63, 0x68, + 0x69, 0x6c, 0x64, 0x72, 0x65, 0x6e, 0x20, 0x7c, 0x7c, 0x20, 0x6e, 0x75, 0x6c, 0x6c, 0x3b, 0x0a, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x7d, 0x0a, - 0x20, 0x20, 0x20, 0x20, 0x7d, 0x0a, 0x20, 0x20, 0x20, 0x20, 0x2f, 0x2f, - 0x20, 0x68, 0x69, 0x67, 0x68, 0x2d, 0x6f, 0x72, 0x64, 0x65, 0x72, 0x20, - 0x63, 0x6f, 0x6d, 0x70, 0x6f, 0x6e, 0x65, 0x6e, 0x74, 0x20, 0x74, 0x68, - 0x61, 0x74, 0x20, 0x72, 0x65, 0x6e, 0x64, 0x65, 0x72, 0x73, 0x20, 0x69, - 0x74, 0x73, 0x20, 0x66, 0x69, 0x72, 0x73, 0x74, 0x20, 0x63, 0x68, 0x69, - 0x6c, 0x64, 0x20, 0x69, 0x66, 0x20, 0x69, 0x74, 0x20, 0x65, 0x78, 0x69, - 0x73, 0x74, 0x73, 0x2e, 0x0a, 0x20, 0x20, 0x20, 0x20, 0x2f, 0x2f, 0x20, - 0x75, 0x73, 0x65, 0x64, 0x20, 0x61, 0x73, 0x20, 0x61, 0x20, 0x63, 0x6f, - 0x6e, 0x64, 0x69, 0x74, 0x69, 0x6f, 0x6e, 0x61, 0x6c, 0x20, 0x72, 0x65, - 0x6e, 0x64, 0x65, 0x72, 0x69, 0x6e, 0x67, 0x20, 0x70, 0x72, 0x6f, 0x78, - 0x79, 0x2e, 0x0a, 0x20, 0x20, 0x20, 0x20, 0x63, 0x6c, 0x61, 0x73, 0x73, - 0x20, 0x50, 0x6f, 0x72, 0x74, 0x61, 0x6c, 0x50, 0x72, 0x6f, 0x78, 0x79, - 0x20, 0x65, 0x78, 0x74, 0x65, 0x6e, 0x64, 0x73, 0x20, 0x43, 0x6f, 0x6d, - 0x70, 0x6f, 0x6e, 0x65, 0x6e, 0x74, 0x20, 0x7b, 0x0a, 0x20, 0x20, 0x20, - 0x20, 0x20, 0x20, 0x67, 0x65, 0x74, 0x43, 0x68, 0x69, 0x6c, 0x64, 0x43, - 0x6f, 0x6e, 0x74, 0x65, 0x78, 0x74, 0x28, 0x29, 0x20, 0x7b, 0x0a, 0x20, - 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x72, 0x65, 0x74, 0x75, 0x72, - 0x6e, 0x20, 0x74, 0x68, 0x69, 0x73, 0x2e, 0x70, 0x72, 0x6f, 0x70, 0x73, - 0x2e, 0x63, 0x6f, 0x6e, 0x74, 0x65, 0x78, 0x74, 0x3b, 0x0a, 0x20, 0x20, - 0x20, 0x20, 0x20, 0x20, 0x7d, 0x0a, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, - 0x72, 0x65, 0x6e, 0x64, 0x65, 0x72, 0x28, 0x7b, 0x20, 0x63, 0x68, 0x69, - 0x6c, 0x64, 0x72, 0x65, 0x6e, 0x20, 0x7d, 0x29, 0x20, 0x7b, 0x0a, 0x20, - 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x72, 0x65, 0x74, 0x75, 0x72, - 0x6e, 0x20, 0x63, 0x68, 0x69, 0x6c, 0x64, 0x72, 0x65, 0x6e, 0x20, 0x7c, - 0x7c, 0x20, 0x6e, 0x75, 0x6c, 0x6c, 0x3b, 0x0a, 0x20, 0x20, 0x20, 0x20, - 0x20, 0x20, 0x7d, 0x0a, 0x20, 0x20, 0x20, 0x20, 0x7d, 0x0a, 0x0a, 0x20, - 0x20, 0x20, 0x20, 0x66, 0x75, 0x6e, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x20, - 0x41, 0x70, 0x70, 0x28, 0x70, 0x72, 0x6f, 0x70, 0x73, 0x29, 0x20, 0x7b, - 0x0a, 0x0a, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x72, 0x65, 0x74, 0x75, - 0x72, 0x6e, 0x20, 0x68, 0x74, 0x6d, 0x6c, 0x60, 0x0a, 0x20, 0x20, 0x20, - 0x20, 0x20, 0x20, 0x20, 0x20, 0x3c, 0x64, 0x69, 0x76, 0x20, 0x63, 0x6c, - 0x61, 0x73, 0x73, 0x3d, 0x22, 0x6d, 0x6f, 0x64, 0x65, 0x2d, 0x24, 0x7b, - 0x73, 0x65, 0x73, 0x73, 0x69, 0x6f, 0x6e, 0x2e, 0x76, 0x61, 0x6c, 0x75, - 0x65, 0x2e, 0x74, 0x79, 0x70, 0x65, 0x7d, 0x22, 0x3e, 0x0a, 0x20, 0x20, - 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x3c, 0x68, 0x65, 0x61, - 0x64, 0x65, 0x72, 0x3e, 0x0a, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, - 0x20, 0x20, 0x20, 0x20, 0x20, 0x3c, 0x68, 0x31, 0x3e, 0x6c, 0x6c, 0x61, - 0x6d, 0x61, 0x2e, 0x63, 0x70, 0x70, 0x3c, 0x2f, 0x68, 0x31, 0x3e, 0x0a, - 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x3c, 0x2f, - 0x68, 0x65, 0x61, 0x64, 0x65, 0x72, 0x3e, 0x0a, 0x0a, 0x20, 0x20, 0x20, - 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x3c, 0x6d, 0x61, 0x69, 0x6e, - 0x20, 0x69, 0x64, 0x3d, 0x22, 0x63, 0x6f, 0x6e, 0x74, 0x65, 0x6e, 0x74, - 0x22, 0x3e, 0x0a, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, - 0x20, 0x20, 0x20, 0x3c, 0x24, 0x7b, 0x63, 0x68, 0x61, 0x74, 0x53, 0x74, - 0x61, 0x72, 0x74, 0x65, 0x64, 0x2e, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x20, - 0x3f, 0x20, 0x43, 0x68, 0x61, 0x74, 0x4c, 0x6f, 0x67, 0x20, 0x3a, 0x20, - 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x46, 0x6f, 0x72, 0x6d, 0x7d, 0x20, - 0x2f, 0x3e, 0x0a, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, - 0x20, 0x3c, 0x2f, 0x6d, 0x61, 0x69, 0x6e, 0x3e, 0x0a, 0x0a, 0x20, 0x20, - 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x3c, 0x73, 0x65, 0x63, - 0x74, 0x69, 0x6f, 0x6e, 0x20, 0x69, 0x64, 0x3d, 0x22, 0x77, 0x72, 0x69, - 0x74, 0x65, 0x22, 0x3e, 0x0a, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, - 0x20, 0x20, 0x20, 0x20, 0x20, 0x3c, 0x24, 0x7b, 0x73, 0x65, 0x73, 0x73, + 0x20, 0x20, 0x20, 0x20, 0x7d, 0x0a, 0x0a, 0x20, 0x20, 0x20, 0x20, 0x66, + 0x75, 0x6e, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x20, 0x41, 0x70, 0x70, 0x28, + 0x70, 0x72, 0x6f, 0x70, 0x73, 0x29, 0x20, 0x7b, 0x0a, 0x0a, 0x20, 0x20, + 0x20, 0x20, 0x20, 0x20, 0x72, 0x65, 0x74, 0x75, 0x72, 0x6e, 0x20, 0x68, + 0x74, 0x6d, 0x6c, 0x60, 0x0a, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, + 0x20, 0x3c, 0x64, 0x69, 0x76, 0x20, 0x63, 0x6c, 0x61, 0x73, 0x73, 0x3d, + 0x22, 0x6d, 0x6f, 0x64, 0x65, 0x2d, 0x24, 0x7b, 0x73, 0x65, 0x73, 0x73, 0x69, 0x6f, 0x6e, 0x2e, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x2e, 0x74, 0x79, - 0x70, 0x65, 0x20, 0x3d, 0x3d, 0x3d, 0x20, 0x27, 0x63, 0x68, 0x61, 0x74, - 0x27, 0x20, 0x3f, 0x20, 0x4d, 0x65, 0x73, 0x73, 0x61, 0x67, 0x65, 0x49, - 0x6e, 0x70, 0x75, 0x74, 0x20, 0x3a, 0x20, 0x43, 0x6f, 0x6d, 0x70, 0x6c, - 0x65, 0x74, 0x69, 0x6f, 0x6e, 0x43, 0x6f, 0x6e, 0x74, 0x72, 0x6f, 0x6c, - 0x73, 0x7d, 0x20, 0x2f, 0x3e, 0x0a, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, - 0x20, 0x20, 0x20, 0x20, 0x3c, 0x2f, 0x73, 0x65, 0x63, 0x74, 0x69, 0x6f, - 0x6e, 0x3e, 0x0a, 0x0a, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, - 0x20, 0x20, 0x3c, 0x66, 0x6f, 0x6f, 0x74, 0x65, 0x72, 0x3e, 0x0a, 0x20, + 0x70, 0x65, 0x7d, 0x22, 0x3e, 0x0a, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, + 0x20, 0x20, 0x20, 0x20, 0x3c, 0x68, 0x65, 0x61, 0x64, 0x65, 0x72, 0x3e, + 0x0a, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, + 0x20, 0x3c, 0x68, 0x31, 0x3e, 0x6c, 0x6c, 0x61, 0x6d, 0x61, 0x2e, 0x63, + 0x70, 0x70, 0x3c, 0x2f, 0x68, 0x31, 0x3e, 0x0a, 0x20, 0x20, 0x20, 0x20, + 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x3c, 0x2f, 0x68, 0x65, 0x61, 0x64, + 0x65, 0x72, 0x3e, 0x0a, 0x0a, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, + 0x20, 0x20, 0x20, 0x3c, 0x6d, 0x61, 0x69, 0x6e, 0x20, 0x69, 0x64, 0x3d, + 0x22, 0x63, 0x6f, 0x6e, 0x74, 0x65, 0x6e, 0x74, 0x22, 0x3e, 0x0a, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x3c, - 0x70, 0x3e, 0x3c, 0x24, 0x7b, 0x4d, 0x6f, 0x64, 0x65, 0x6c, 0x47, 0x65, - 0x6e, 0x65, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x49, 0x6e, 0x66, 0x6f, - 0x7d, 0x20, 0x2f, 0x3e, 0x3c, 0x2f, 0x70, 0x3e, 0x0a, 0x20, 0x20, 0x20, - 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x3c, 0x70, 0x3e, - 0x50, 0x6f, 0x77, 0x65, 0x72, 0x65, 0x64, 0x20, 0x62, 0x79, 0x20, 0x3c, - 0x61, 0x20, 0x68, 0x72, 0x65, 0x66, 0x3d, 0x22, 0x68, 0x74, 0x74, 0x70, - 0x73, 0x3a, 0x2f, 0x2f, 0x67, 0x69, 0x74, 0x68, 0x75, 0x62, 0x2e, 0x63, - 0x6f, 0x6d, 0x2f, 0x67, 0x67, 0x65, 0x72, 0x67, 0x61, 0x6e, 0x6f, 0x76, - 0x2f, 0x6c, 0x6c, 0x61, 0x6d, 0x61, 0x2e, 0x63, 0x70, 0x70, 0x22, 0x3e, - 0x6c, 0x6c, 0x61, 0x6d, 0x61, 0x2e, 0x63, 0x70, 0x70, 0x3c, 0x2f, 0x61, - 0x3e, 0x20, 0x61, 0x6e, 0x64, 0x20, 0x3c, 0x61, 0x20, 0x68, 0x72, 0x65, - 0x66, 0x3d, 0x22, 0x68, 0x74, 0x74, 0x70, 0x73, 0x3a, 0x2f, 0x2f, 0x67, - 0x67, 0x6d, 0x6c, 0x2e, 0x61, 0x69, 0x22, 0x3e, 0x67, 0x67, 0x6d, 0x6c, - 0x2e, 0x61, 0x69, 0x3c, 0x2f, 0x61, 0x3e, 0x2e, 0x3c, 0x2f, 0x70, 0x3e, - 0x0a, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x3c, - 0x2f, 0x66, 0x6f, 0x6f, 0x74, 0x65, 0x72, 0x3e, 0x0a, 0x20, 0x20, 0x20, - 0x20, 0x20, 0x20, 0x20, 0x20, 0x3c, 0x2f, 0x64, 0x69, 0x76, 0x3e, 0x0a, - 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x60, 0x3b, 0x0a, 0x20, 0x20, 0x20, - 0x20, 0x7d, 0x0a, 0x0a, 0x20, 0x20, 0x20, 0x20, 0x72, 0x65, 0x6e, 0x64, - 0x65, 0x72, 0x28, 0x68, 0x28, 0x41, 0x70, 0x70, 0x29, 0x2c, 0x20, 0x64, - 0x6f, 0x63, 0x75, 0x6d, 0x65, 0x6e, 0x74, 0x2e, 0x71, 0x75, 0x65, 0x72, - 0x79, 0x53, 0x65, 0x6c, 0x65, 0x63, 0x74, 0x6f, 0x72, 0x28, 0x27, 0x23, - 0x63, 0x6f, 0x6e, 0x74, 0x61, 0x69, 0x6e, 0x65, 0x72, 0x27, 0x29, 0x29, - 0x3b, 0x0a, 0x20, 0x20, 0x3c, 0x2f, 0x73, 0x63, 0x72, 0x69, 0x70, 0x74, - 0x3e, 0x0a, 0x3c, 0x2f, 0x68, 0x65, 0x61, 0x64, 0x3e, 0x0a, 0x0a, 0x3c, - 0x62, 0x6f, 0x64, 0x79, 0x3e, 0x0a, 0x20, 0x20, 0x3c, 0x64, 0x69, 0x76, - 0x20, 0x69, 0x64, 0x3d, 0x22, 0x63, 0x6f, 0x6e, 0x74, 0x61, 0x69, 0x6e, - 0x65, 0x72, 0x22, 0x3e, 0x0a, 0x20, 0x20, 0x20, 0x20, 0x3c, 0x69, 0x6e, - 0x70, 0x75, 0x74, 0x20, 0x74, 0x79, 0x70, 0x65, 0x3d, 0x22, 0x66, 0x69, - 0x6c, 0x65, 0x22, 0x20, 0x69, 0x64, 0x3d, 0x22, 0x66, 0x69, 0x6c, 0x65, - 0x49, 0x6e, 0x70, 0x75, 0x74, 0x22, 0x20, 0x61, 0x63, 0x63, 0x65, 0x70, - 0x74, 0x3d, 0x22, 0x69, 0x6d, 0x61, 0x67, 0x65, 0x2f, 0x2a, 0x22, 0x20, - 0x73, 0x74, 0x79, 0x6c, 0x65, 0x3d, 0x22, 0x64, 0x69, 0x73, 0x70, 0x6c, - 0x61, 0x79, 0x3a, 0x20, 0x6e, 0x6f, 0x6e, 0x65, 0x3b, 0x22, 0x3e, 0x0a, - 0x20, 0x20, 0x3c, 0x2f, 0x64, 0x69, 0x76, 0x3e, 0x0a, 0x20, 0x20, 0x3c, - 0x64, 0x69, 0x76, 0x20, 0x69, 0x64, 0x3d, 0x22, 0x70, 0x6f, 0x72, 0x74, - 0x61, 0x6c, 0x22, 0x3e, 0x3c, 0x2f, 0x64, 0x69, 0x76, 0x3e, 0x0a, 0x3c, - 0x2f, 0x62, 0x6f, 0x64, 0x79, 0x3e, 0x0a, 0x0a, 0x3c, 0x2f, 0x68, 0x74, - 0x6d, 0x6c, 0x3e, 0x0a, 0x0a + 0x24, 0x7b, 0x63, 0x68, 0x61, 0x74, 0x53, 0x74, 0x61, 0x72, 0x74, 0x65, + 0x64, 0x2e, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x20, 0x3f, 0x20, 0x43, 0x68, + 0x61, 0x74, 0x4c, 0x6f, 0x67, 0x20, 0x3a, 0x20, 0x43, 0x6f, 0x6e, 0x66, + 0x69, 0x67, 0x46, 0x6f, 0x72, 0x6d, 0x7d, 0x20, 0x2f, 0x3e, 0x0a, 0x20, + 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x3c, 0x2f, 0x6d, + 0x61, 0x69, 0x6e, 0x3e, 0x0a, 0x0a, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, + 0x20, 0x20, 0x20, 0x20, 0x3c, 0x73, 0x65, 0x63, 0x74, 0x69, 0x6f, 0x6e, + 0x20, 0x69, 0x64, 0x3d, 0x22, 0x77, 0x72, 0x69, 0x74, 0x65, 0x22, 0x3e, + 0x0a, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, + 0x20, 0x3c, 0x24, 0x7b, 0x73, 0x65, 0x73, 0x73, 0x69, 0x6f, 0x6e, 0x2e, + 0x76, 0x61, 0x6c, 0x75, 0x65, 0x2e, 0x74, 0x79, 0x70, 0x65, 0x20, 0x3d, + 0x3d, 0x3d, 0x20, 0x27, 0x63, 0x68, 0x61, 0x74, 0x27, 0x20, 0x3f, 0x20, + 0x4d, 0x65, 0x73, 0x73, 0x61, 0x67, 0x65, 0x49, 0x6e, 0x70, 0x75, 0x74, + 0x20, 0x3a, 0x20, 0x43, 0x6f, 0x6d, 0x70, 0x6c, 0x65, 0x74, 0x69, 0x6f, + 0x6e, 0x43, 0x6f, 0x6e, 0x74, 0x72, 0x6f, 0x6c, 0x73, 0x7d, 0x20, 0x2f, + 0x3e, 0x0a, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, + 0x3c, 0x2f, 0x73, 0x65, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x3e, 0x0a, 0x0a, + 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x3c, 0x66, + 0x6f, 0x6f, 0x74, 0x65, 0x72, 0x3e, 0x0a, 0x20, 0x20, 0x20, 0x20, 0x20, + 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x3c, 0x70, 0x3e, 0x3c, 0x24, + 0x7b, 0x4d, 0x6f, 0x64, 0x65, 0x6c, 0x47, 0x65, 0x6e, 0x65, 0x72, 0x61, + 0x74, 0x69, 0x6f, 0x6e, 0x49, 0x6e, 0x66, 0x6f, 0x7d, 0x20, 0x2f, 0x3e, + 0x3c, 0x2f, 0x70, 0x3e, 0x0a, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, + 0x20, 0x20, 0x20, 0x20, 0x20, 0x3c, 0x70, 0x3e, 0x50, 0x6f, 0x77, 0x65, + 0x72, 0x65, 0x64, 0x20, 0x62, 0x79, 0x20, 0x3c, 0x61, 0x20, 0x68, 0x72, + 0x65, 0x66, 0x3d, 0x22, 0x68, 0x74, 0x74, 0x70, 0x73, 0x3a, 0x2f, 0x2f, + 0x67, 0x69, 0x74, 0x68, 0x75, 0x62, 0x2e, 0x63, 0x6f, 0x6d, 0x2f, 0x67, + 0x67, 0x65, 0x72, 0x67, 0x61, 0x6e, 0x6f, 0x76, 0x2f, 0x6c, 0x6c, 0x61, + 0x6d, 0x61, 0x2e, 0x63, 0x70, 0x70, 0x22, 0x3e, 0x6c, 0x6c, 0x61, 0x6d, + 0x61, 0x2e, 0x63, 0x70, 0x70, 0x3c, 0x2f, 0x61, 0x3e, 0x20, 0x61, 0x6e, + 0x64, 0x20, 0x3c, 0x61, 0x20, 0x68, 0x72, 0x65, 0x66, 0x3d, 0x22, 0x68, + 0x74, 0x74, 0x70, 0x73, 0x3a, 0x2f, 0x2f, 0x67, 0x67, 0x6d, 0x6c, 0x2e, + 0x61, 0x69, 0x22, 0x3e, 0x67, 0x67, 0x6d, 0x6c, 0x2e, 0x61, 0x69, 0x3c, + 0x2f, 0x61, 0x3e, 0x2e, 0x3c, 0x2f, 0x70, 0x3e, 0x0a, 0x20, 0x20, 0x20, + 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x3c, 0x2f, 0x66, 0x6f, 0x6f, + 0x74, 0x65, 0x72, 0x3e, 0x0a, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, + 0x20, 0x3c, 0x2f, 0x64, 0x69, 0x76, 0x3e, 0x0a, 0x20, 0x20, 0x20, 0x20, + 0x20, 0x20, 0x60, 0x3b, 0x0a, 0x20, 0x20, 0x20, 0x20, 0x7d, 0x0a, 0x0a, + 0x20, 0x20, 0x20, 0x20, 0x72, 0x65, 0x6e, 0x64, 0x65, 0x72, 0x28, 0x68, + 0x28, 0x41, 0x70, 0x70, 0x29, 0x2c, 0x20, 0x64, 0x6f, 0x63, 0x75, 0x6d, + 0x65, 0x6e, 0x74, 0x2e, 0x71, 0x75, 0x65, 0x72, 0x79, 0x53, 0x65, 0x6c, + 0x65, 0x63, 0x74, 0x6f, 0x72, 0x28, 0x27, 0x23, 0x63, 0x6f, 0x6e, 0x74, + 0x61, 0x69, 0x6e, 0x65, 0x72, 0x27, 0x29, 0x29, 0x3b, 0x0a, 0x20, 0x20, + 0x3c, 0x2f, 0x73, 0x63, 0x72, 0x69, 0x70, 0x74, 0x3e, 0x0a, 0x3c, 0x2f, + 0x68, 0x65, 0x61, 0x64, 0x3e, 0x0a, 0x0a, 0x3c, 0x62, 0x6f, 0x64, 0x79, + 0x3e, 0x0a, 0x20, 0x20, 0x3c, 0x64, 0x69, 0x76, 0x20, 0x69, 0x64, 0x3d, + 0x22, 0x63, 0x6f, 0x6e, 0x74, 0x61, 0x69, 0x6e, 0x65, 0x72, 0x22, 0x3e, + 0x0a, 0x20, 0x20, 0x20, 0x20, 0x3c, 0x69, 0x6e, 0x70, 0x75, 0x74, 0x20, + 0x74, 0x79, 0x70, 0x65, 0x3d, 0x22, 0x66, 0x69, 0x6c, 0x65, 0x22, 0x20, + 0x69, 0x64, 0x3d, 0x22, 0x66, 0x69, 0x6c, 0x65, 0x49, 0x6e, 0x70, 0x75, + 0x74, 0x22, 0x20, 0x61, 0x63, 0x63, 0x65, 0x70, 0x74, 0x3d, 0x22, 0x69, + 0x6d, 0x61, 0x67, 0x65, 0x2f, 0x2a, 0x22, 0x20, 0x73, 0x74, 0x79, 0x6c, + 0x65, 0x3d, 0x22, 0x64, 0x69, 0x73, 0x70, 0x6c, 0x61, 0x79, 0x3a, 0x20, + 0x6e, 0x6f, 0x6e, 0x65, 0x3b, 0x22, 0x3e, 0x0a, 0x20, 0x20, 0x3c, 0x2f, + 0x64, 0x69, 0x76, 0x3e, 0x0a, 0x20, 0x20, 0x3c, 0x64, 0x69, 0x76, 0x20, + 0x69, 0x64, 0x3d, 0x22, 0x70, 0x6f, 0x72, 0x74, 0x61, 0x6c, 0x22, 0x3e, + 0x3c, 0x2f, 0x64, 0x69, 0x76, 0x3e, 0x0a, 0x3c, 0x2f, 0x62, 0x6f, 0x64, + 0x79, 0x3e, 0x0a, 0x0a, 0x3c, 0x2f, 0x68, 0x74, 0x6d, 0x6c, 0x3e, 0x0a, + 0x0a }; -unsigned int index_html_len = 32105; +unsigned int index_html_len = 32269; diff --git a/examples/server/public/index.html b/examples/server/public/index.html index 39d7bb93d9c4c1..60659c1478f72f 100644 --- a/examples/server/public/index.html +++ b/examples/server/public/index.html @@ -219,6 +219,7 @@ repeat_penalty: 1.18, // 1.0 = disabled top_k: 40, // <= 0 to use vocab size top_p: 0.5, // 1.0 = disabled + min_p: 0.05, // 0 = disabled tfs_z: 1.0, // 1.0 = disabled typical_p: 1.0, // 1.0 = disabled presence_penalty: 0.0, // 0.0 = disabled @@ -744,6 +745,7 @@ ${IntField({ label: "Consider N tokens for penalize", max: 2048, min: 0, name: "repeat_last_n", value: params.value.repeat_last_n })} ${IntField({ label: "Top-K sampling", max: 100, min: -1, name: "top_k", value: params.value.top_k })} ${FloatField({ label: "Top-P sampling", max: 1.0, min: 0.0, name: "top_p", step: 0.01, value: params.value.top_p })} + ${FloatField({ label: "Min-P sampling", max: 1.0, min: 0.0, name: "min_p", step: 0.01, value: params.value.min_p })}