From 924326b5961cc4fdb481d2e10edc6f1cf3d93c04 Mon Sep 17 00:00:00 2001 From: Mohit Soni Date: Thu, 16 Oct 2025 19:28:23 +0530 Subject: [PATCH 01/60] [Qwen2_5_vl] - Onboarding Qwen2_5_vl model in QEfficient (#560) Signed-off-by: Mohit Soni Co-authored-by: Hem Agnihotri Signed-off-by: Dhiraj Kumar Sah --- .../transformers/models/modeling_auto.py | 15 +- .../transformers/models/pytorch_transforms.py | 30 + .../models/qwen2_5_vl/__init__.py | 6 + .../models/qwen2_5_vl/modeling_qwen2_5_vl.py | 935 ++++++++++++++++++ README.md | 3 + examples/qwen2_5_vl_example.py | 178 ++++ tests/transformers/models/qnn_config.json | 10 + 7 files changed, 1172 insertions(+), 5 deletions(-) create mode 100644 QEfficient/transformers/models/qwen2_5_vl/__init__.py create mode 100644 QEfficient/transformers/models/qwen2_5_vl/modeling_qwen2_5_vl.py create mode 100644 examples/qwen2_5_vl_example.py create mode 100644 tests/transformers/models/qnn_config.json diff --git a/QEfficient/transformers/models/modeling_auto.py b/QEfficient/transformers/models/modeling_auto.py index 2a00577f2..633a0b29d 100644 --- a/QEfficient/transformers/models/modeling_auto.py +++ b/QEfficient/transformers/models/modeling_auto.py @@ -1313,9 +1313,14 @@ def kv_offload_generate( vision_end = perf_counter() lang_inputs = {k: v for k, v in inputs.items() if k not in vision_inputs} - lang_inputs["position_ids"] = np.where( - lang_inputs.pop("attention_mask"), np.arange(padded_len), -1 - ) # Need to use -1 as position_ids for invalid tokens + + if "position_ids" in inputs: + lang_inputs["position_ids"] = inputs["position_ids"] + lang_inputs.pop("attention_mask") + else: + lang_inputs["position_ids"] = np.where( + lang_inputs.pop("attention_mask"), np.arange(padded_len), -1 + ) # Need to use -1 as position_ids for invalid tokens not_mllama = hasattr(self.model.config, "model_type") and self.model.config.model_type != "mllama" if not_mllama: @@ -1336,7 +1341,7 @@ def kv_offload_generate( for i in range(num_chunks): chunk_inputs["input_ids"] = lang_inputs["input_ids"][:, i * prefill_seq_len : (i + 1) * prefill_seq_len] chunk_inputs["position_ids"] = lang_inputs["position_ids"][ - :, i * prefill_seq_len : (i + 1) * prefill_seq_len + ..., i * prefill_seq_len : (i + 1) * prefill_seq_len ] outputs = lang_session.run(chunk_inputs) chunk_inputs["image_idx"] = outputs["image_idx_output"] @@ -1353,7 +1358,7 @@ def kv_offload_generate( # Get first token lang_inputs["input_ids"] = outputs["logits"].argmax(2) - lang_inputs["position_ids"] = input_len.numpy() + lang_inputs["position_ids"] = np.max(lang_inputs["position_ids"], axis=-1, keepdims=True) + 1 if "cross_attention_mask" in lang_inputs: bs, _, num_images, img_tiles = lang_inputs["cross_attention_mask"].shape lang_inputs["cross_attention_mask"] = torch.ones((bs, 1, num_images, img_tiles), dtype=torch.int64).numpy() diff --git a/QEfficient/transformers/models/pytorch_transforms.py b/QEfficient/transformers/models/pytorch_transforms.py index c910ab387..eeb7bd6e6 100644 --- a/QEfficient/transformers/models/pytorch_transforms.py +++ b/QEfficient/transformers/models/pytorch_transforms.py @@ -152,6 +152,18 @@ Qwen2Model, Qwen2RMSNorm, ) +from transformers.models.qwen2_5_vl.modeling_qwen2_5_vl import ( + Qwen2_5_VisionTransformerPretrainedModel, + Qwen2_5_VLAttention, + Qwen2_5_VLDecoderLayer, + Qwen2_5_VLForConditionalGeneration, + Qwen2_5_VLModel, + Qwen2_5_VLTextModel, + Qwen2_5_VLVisionAttention, +) +from transformers.models.qwen2_5_vl.modeling_qwen2_5_vl import ( + Qwen2RMSNorm as Qwen2_5RMSNorm, +) from transformers.models.qwen3.modeling_qwen3 import ( Qwen3Attention, Qwen3DecoderLayer, @@ -356,6 +368,15 @@ QEffQwen2ForCausalLM, QEffQwen2Model, ) +from QEfficient.transformers.models.qwen2_5_vl.modeling_qwen2_5_vl import ( + QEffQwen2_5_VisionTransformerPretrainedModel, + QEffQwen2_5_VLAttention, + QEffQwen2_5_VLDecoderLayer, + QEffQwen2_5_VLModel, + QEffQwen2_5_VLTextModel, + QEffQwen2_5_VLVisionAttention, + QEffQwen_2_5_vl_ForConditionalGeneration, +) from QEfficient.transformers.models.qwen3.modeling_qwen3 import ( QEffQwen3Attention, QEffQwen3DecoderLayer, @@ -404,6 +425,7 @@ class CustomOpsTransform(ModuleMappingTransform): Phi3RMSNorm: CustomRMSNormAIC, Qwen2RMSNorm: CustomRMSNormAIC, Qwen3RMSNorm: CustomRMSNormAIC, + Qwen2_5RMSNorm: CustomRMSNormAIC, MllamaTextRMSNorm: CustomRMSNormAIC, GraniteRMSNorm: CustomRMSNormAIC, PixtralRMSNorm: CustomRMSNormAIC, @@ -544,6 +566,14 @@ class KVCacheTransform(ModuleMappingTransform): Qwen3DecoderLayer: QEffQwen3DecoderLayer, Qwen3Model: QEffQwen3Model, Qwen3ForCausalLM: QEffQwen3ForCausalLM, + # Qwen2.5 VL + Qwen2_5_VLForConditionalGeneration: QEffQwen_2_5_vl_ForConditionalGeneration, + Qwen2_5_VLModel: QEffQwen2_5_VLModel, + Qwen2_5_VLAttention: QEffQwen2_5_VLAttention, + Qwen2_5_VLDecoderLayer: QEffQwen2_5_VLDecoderLayer, + Qwen2_5_VisionTransformerPretrainedModel: QEffQwen2_5_VisionTransformerPretrainedModel, + Qwen2_5_VLVisionAttention: QEffQwen2_5_VLVisionAttention, + Qwen2_5_VLTextModel: QEffQwen2_5_VLTextModel, # Starcoder2 Starcoder2Attention: QEffStarcoder2Attention, Starcoder2DecoderLayer: QEFFStarcoder2DecoderLayer, diff --git a/QEfficient/transformers/models/qwen2_5_vl/__init__.py b/QEfficient/transformers/models/qwen2_5_vl/__init__.py new file mode 100644 index 000000000..d647b73a6 --- /dev/null +++ b/QEfficient/transformers/models/qwen2_5_vl/__init__.py @@ -0,0 +1,6 @@ +# ----------------------------------------------------------------------------- +# +# Copyright (c) Qualcomm Technologies, Inc. and/or its subsidiaries. +# SPDX-License-Identifier: BSD-3-Clause +# +# ----------------------------------------------------------------------------- diff --git a/QEfficient/transformers/models/qwen2_5_vl/modeling_qwen2_5_vl.py b/QEfficient/transformers/models/qwen2_5_vl/modeling_qwen2_5_vl.py new file mode 100644 index 000000000..030dd7a56 --- /dev/null +++ b/QEfficient/transformers/models/qwen2_5_vl/modeling_qwen2_5_vl.py @@ -0,0 +1,935 @@ +# ----------------------------------------------------------------------------- +# +# Copyright (c) Qualcomm Technologies, Inc. and/or its subsidiaries. +# SPDX-License-Identifier: BSD-3-Clause +# +# ----------------------------------------------------------------------------- + +import math +from typing import Callable, List, Optional, Tuple, Union + +import torch +import torch.nn as nn +import torch.nn.functional as F +from transformers import Qwen2_5_VLForConditionalGeneration, Qwen2_5_VLModel +from transformers.cache_utils import Cache +from transformers.modeling_outputs import ( + BaseModelOutputWithPast, +) +from transformers.models.qwen2_5_vl.modeling_qwen2_5_vl import ( + Qwen2_5_VisionTransformerPretrainedModel, + Qwen2_5_VLAttention, + Qwen2_5_VLConfig, + Qwen2_5_VLDecoderLayer, + Qwen2_5_VLModelOutputWithPast, + Qwen2_5_VLRotaryEmbedding, + Qwen2_5_VLTextModel, + Qwen2_5_VLVisionAttention, + apply_rotary_pos_emb_vision, + repeat_kv, + rotate_half, +) + +from QEfficient.transformers.cache_utils import QEffDynamicCache + +# from transformers import Qw +from QEfficient.transformers.modeling_attn_mask_utils import _create_causal_mask +from QEfficient.utils import constants +from QEfficient.utils._utils import IOInfo, get_padding_shape_from_config +from QEfficient.utils.constants import MIN_MASKED_ATTENTION_VALUE +from QEfficient.utils.logging_utils import logger + + +def qeff_apply_rotary_pos_emb(q, k, cos, sin, position_ids, mrope_section, unsqueeze_dim=1): + """Applies Rotary Position Embedding with Multimodal Sections to the query and key tensors (https://qwenlm.github.io/blog/qwen2-vl/). + + Explanation: + Multimodal 3D rotary position embedding is an extension to 1D rotary position embedding. The input embedding + sequence contains vision (images / videos) embedding and text embedding or just contains text embedding. For + vision embedding part, we apply rotary position embedding on temporal, height and width dimension seperately. + Here we split the channel dimension to 3 chunks for the temporal, height and width rotary position embedding. + For text embedding part, we just apply 1D rotary position embedding. The three rotary position index (temporal, + height and width) of text embedding is always the same, so the text embedding rotary position embedding has no + difference with modern LLMs. + + Args: + q (`torch.Tensor`): The query tensor. + k (`torch.Tensor`): The key tensor. + cos (`torch.Tensor`): The cosine part of the rotary embedding. + sin (`torch.Tensor`): The sine part of the rotary embedding. + position_ids (`torch.Tensor`): + The position indices of the tokens corresponding to the query and key tensors. For example, this can be + used to pass offsetted position ids when working with a KV-cache. + mrope_section(`List(int)`): + Multimodal rope section is for channel dimension of temporal, height and width in rope calculation. + unsqueeze_dim (`int`, *optional*, defaults to 1): + The 'unsqueeze_dim' argument specifies the dimension along which to unsqueeze cos[position_ids] and + sin[position_ids] so that they can be properly broadcasted to the dimensions of q and k. For example, note + that cos[position_ids] and sin[position_ids] have the shape [batch_size, seq_len, head_dim]. Then, if q and + k have the shape [batch_size, heads, seq_len, head_dim], then setting unsqueeze_dim=1 makes + cos[position_ids] and sin[position_ids] broadcastable to the shapes of q and k. Similarly, if q and k have + the shape [batch_size, seq_len, heads, head_dim], then set unsqueeze_dim=2. + Returns: + `tuple(torch.Tensor)` comprising of the query and key tensors rotated using the Rotary Position Embedding. + """ + + mrope_section = mrope_section * 2 + cos = cos[position_ids] + sin = sin[position_ids] + + cos = torch.cat([m[i % 3] for i, m in enumerate(cos.split(mrope_section, dim=-1))], dim=-1).unsqueeze(unsqueeze_dim) + sin = torch.cat([m[i % 3] for i, m in enumerate(sin.split(mrope_section, dim=-1))], dim=-1).unsqueeze(unsqueeze_dim) + + q_embed = (q * cos) + (rotate_half(q) * sin) + k_embed = (k * cos) + (rotate_half(k) * sin) + + return q_embed.to(q.dtype), k_embed.to(k.dtype) + + +class QEffQwen2_5_VLVisionAttention(Qwen2_5_VLVisionAttention): + def __init__(self, dim: int, num_heads: int = 16) -> None: + super().__init__() + self.num_heads = num_heads + self.head_dim = dim // num_heads + self.qkv = nn.Linear(dim, dim * 3, bias=True) + self.proj = nn.Linear(dim, dim) + + def forward( + self, + hidden_states: torch.Tensor, + cu_seqlens: torch.Tensor, + rotary_pos_emb: Optional[torch.Tensor] = None, + position_embeddings: Optional[Tuple[torch.Tensor, torch.Tensor]] = None, + ) -> torch.Tensor: + seq_length = hidden_states.shape[0] + q, k, v = self.qkv(hidden_states).reshape(seq_length, 3, self.num_heads, -1).permute(1, 0, 2, 3).unbind(0) + if position_embeddings is None: + logger.warning_once( + "The attention layers in this model are transitioning from computing the RoPE embeddings internally " + "through `rotary_pos_emb` (2D tensor of RoPE theta values), to using externally computed " + "`position_embeddings` (Tuple of tensors, containing cos and sin). In v4.54 `rotary_pos_emb` will be " + "removed and `position_embeddings` will be mandatory." + ) + emb = torch.cat((rotary_pos_emb, rotary_pos_emb), dim=-1) + cos = emb.cos() + sin = emb.sin() + else: + cos, sin = position_embeddings + q, k = apply_rotary_pos_emb_vision(q, k, cos, sin) + + attention_mask = torch.full( + [1, seq_length, seq_length], torch.finfo(q.dtype).min, device=q.device, dtype=q.dtype + ) + + # Create index grids + seq_len = attention_mask.shape[-1] + rows = torch.arange(seq_len).view(1, -1) + cols = torch.arange(seq_len).view(-1, 1) + + # Prepare start and end indices + start = cu_seqlens[:-1].view(-1, 1, 1) + end = cu_seqlens[1:].view(-1, 1, 1) + + # Create block masks using broadcasting + row_mask = (rows >= start) & (rows < end) + col_mask = (cols >= start) & (cols < end) + block_mask = row_mask & col_mask # shape: (num_blocks, seq_len, seq_len) + + # Combine all blocks into one mask + final_mask = torch.ones((seq_len, seq_len), dtype=torch.float32) + final_mask[block_mask.any(dim=0)] = 0 + + final_mask = torch.where(final_mask == 1.0, torch.finfo(q.dtype).min, final_mask) + + attention_mask[0] = final_mask + + q = q.transpose(0, 1) + k = k.transpose(0, 1) + v = v.transpose(0, 1) + attn_weights = torch.matmul(q, k.transpose(1, 2)) / math.sqrt(self.head_dim) + attn_weights = attn_weights + attention_mask + attn_weights = nn.functional.softmax(attn_weights, dim=-1, dtype=torch.float32).to(q.dtype) + attn_output = torch.matmul(attn_weights, v) + attn_output = attn_output.transpose(0, 1) + attn_output = attn_output.reshape(seq_length, -1) + attn_output = self.proj(attn_output) + return attn_output + + +class QEffQwen2_5_VisionTransformerPretrainedModel(Qwen2_5_VisionTransformerPretrainedModel): + def rot_pos_emb(self, grid_thw): + pos_ids = [] + + bs, t, h, w = grid_thw.shape + + hpos_ids = torch.arange(h).unsqueeze(1).expand(-1, w) + hpos_ids = hpos_ids.reshape( + h // self.spatial_merge_size, + self.spatial_merge_size, + w // self.spatial_merge_size, + self.spatial_merge_size, + ) + hpos_ids = hpos_ids.permute(0, 2, 1, 3) + hpos_ids = hpos_ids.flatten() + + wpos_ids = torch.arange(w).unsqueeze(0).expand(h, -1) + wpos_ids = wpos_ids.reshape( + h // self.spatial_merge_size, + self.spatial_merge_size, + w // self.spatial_merge_size, + self.spatial_merge_size, + ) + wpos_ids = wpos_ids.permute(0, 2, 1, 3) + wpos_ids = wpos_ids.flatten() + pos_ids.append(torch.stack([hpos_ids, wpos_ids], dim=-1).repeat(t, 1)) + pos_ids = torch.cat(pos_ids, dim=0) + + x_expanded = pos_ids.unsqueeze(0) + x_expanded = x_expanded.expand(bs, -1, -1) + pos_ids = x_expanded.reshape(-1, pos_ids.size(1)) + + max_grid_size = max(grid_thw.shape) + rotary_pos_emb_full = self.rotary_pos_emb(max_grid_size) + rotary_pos_emb = rotary_pos_emb_full[pos_ids].flatten(1) + return rotary_pos_emb + + def get_window_index(self, grid_thw): + window_index: list = [] + cu_window_seqlens: list = [0] + vit_merger_window_size = self.window_size // self.spatial_merge_size // self.patch_size + + bs, grid_t, grid_h, grid_w = grid_thw.shape + + llm_grid_h, llm_grid_w = ( + grid_h // self.spatial_merge_size, + grid_w // self.spatial_merge_size, + ) + index = torch.arange(grid_t * llm_grid_h * llm_grid_w).reshape(grid_t, llm_grid_h, llm_grid_w) + + pad_h = vit_merger_window_size - llm_grid_h % vit_merger_window_size + pad_w = vit_merger_window_size - llm_grid_w % vit_merger_window_size + num_windows_h = (llm_grid_h + pad_h) // vit_merger_window_size + num_windows_w = (llm_grid_w + pad_w) // vit_merger_window_size + + index_padded = F.pad(index, (0, pad_w, 0, pad_h), "constant", -100) + + index_padded = index_padded.reshape( + grid_t, + num_windows_h, + vit_merger_window_size, + num_windows_w, + vit_merger_window_size, + ) + index_padded = index_padded.permute(0, 1, 3, 2, 4).reshape( + grid_t, + num_windows_h * num_windows_w, + vit_merger_window_size, + vit_merger_window_size, + ) + + seqlens = (index_padded != -100).sum([2, 3]).reshape(-1) + + x_expanded = seqlens.unsqueeze(0) + x_expanded = x_expanded.expand(bs, -1) + seqlens = x_expanded.reshape(-1) + + index_padded = index_padded.reshape(-1) + + mask = (index_padded == -100).to(torch.int32) + + if torch.jit.is_tracing(): + order = torch.argsort(mask) + else: + order = torch.argsort(mask, stable=True) + + index_new = index_padded[order] + index_new = index_new[: index.reshape(-1).size(0)] + + step = grid_t * llm_grid_h * llm_grid_w + batch_indices = torch.arange(bs) + batch_indices = batch_indices.view(-1, 1) + offsets = batch_indices * step + window_index_tmp = index_new.unsqueeze(0) + offsets + window_index = window_index_tmp.reshape(-1) + + cu_seqlens_tmp = seqlens.cumsum(0) * self.spatial_merge_unit + cu_window_seqlens[-1] + + cu_window_seqlens = torch.cat([torch.tensor([0], dtype=cu_seqlens_tmp.dtype), cu_seqlens_tmp]) + + return window_index, cu_window_seqlens + + def forward(self, hidden_states: torch.Tensor, grid_thw: torch.Tensor) -> torch.Tensor: + """ + Args: + hidden_states (`torch.Tensor` of shape `(seq_len, hidden_size)`): + The final hidden states of the model. + grid_thw (`torch.Tensor` of shape `(num_images_or_videos, 3)`): + The temporal, height and width of feature shape of each image in LLM. + + Returns: + `torch.Tensor`: hidden_states. + """ + hidden_states = self.patch_embed(hidden_states) + + rotary_pos_emb = self.rot_pos_emb(grid_thw) + + window_index, cu_window_seqlens = self.get_window_index(grid_thw) + + cu_window_seqlens = cu_window_seqlens.to( + device=hidden_states.device, dtype=grid_thw.dtype if torch.jit.is_tracing() else torch.int32 + ) + + # cu_window_seqlens = torch.unique_consecutive(cu_window_seqlens) + + seq_len, _ = hidden_states.size() + hidden_states = hidden_states.reshape(seq_len // self.spatial_merge_unit, self.spatial_merge_unit, -1) + + hidden_states = hidden_states[window_index, :, :] + + hidden_states = hidden_states.reshape(seq_len, -1) + rotary_pos_emb = rotary_pos_emb.reshape(seq_len // self.spatial_merge_unit, self.spatial_merge_unit, -1) + rotary_pos_emb = rotary_pos_emb[window_index, :, :] + rotary_pos_emb = rotary_pos_emb.reshape(seq_len, -1) + emb = torch.cat((rotary_pos_emb, rotary_pos_emb), dim=-1) + position_embeddings = (emb.cos(), emb.sin()) + + bs, t, h, w = grid_thw.shape + + t = torch.arange(t, t + 1).squeeze().expand(bs) + h = torch.arange(h, h + 1).squeeze().expand(bs) + w = torch.arange(w, w + 1).squeeze().expand(bs) + + cu_seqlens = (h * w).cumsum( + dim=0, + # Select dtype based on the following factors: + # - FA2 requires that cu_seqlens_q must have dtype int32 + # - torch.onnx.export requires that cu_seqlens_q must have same dtype as grid_thw + # See https://github.com/huggingface/transformers/pull/34852 for more information + dtype=grid_thw.dtype if torch.jit.is_tracing() else torch.int32, + ) + + cu_seqlens = torch.cat([torch.tensor([0], dtype=cu_seqlens.dtype), cu_seqlens]) + + for layer_num, blk in enumerate(self.blocks): + if layer_num in self.fullatt_block_indexes: + cu_seqlens_now = cu_seqlens + else: + cu_seqlens_now = cu_window_seqlens + + hidden_states = blk(hidden_states, cu_seqlens=cu_seqlens_now, position_embeddings=position_embeddings) + + hidden_states = self.merger(hidden_states) + reverse_indices = torch.argsort(window_index) + hidden_states = hidden_states[reverse_indices, :] + + return hidden_states + + +class QEffQwen2_5_VLRotaryEmbedding(Qwen2_5_VLRotaryEmbedding): + """ + Copied from LlamaForCausalLM: https://github.com/huggingface/transformers/blob/main/src/transformers/models/llama/modeling_llama.py + The only differences are: + - Add static sin/cos computations. + """ + + def __init__(self, config: Qwen2_5_VLConfig, device=None): + super().__init__(config=config) + # Build here to make `torch.jit.trace` work. + self._set_cos_sin_cache( + seq_len=self.original_max_seq_len, device=self.inv_freq.device, dtype=torch.get_default_dtype() + ) + + def _set_cos_sin_cache(self, seq_len, device, dtype): + self.max_seq_len_cached = seq_len + t = torch.arange(self.max_seq_len_cached, device=device, dtype=torch.int64).type_as(self.inv_freq) + + freqs = torch.outer(t, self.inv_freq) + + emb = torch.cat((freqs, freqs), dim=-1) + self.register_buffer("cos_cached", emb.cos().to(dtype), persistent=False) + self.register_buffer("sin_cached", emb.sin().to(dtype), persistent=False) + + def forward(self, x, seq_len=None): + # x: [bs, num_attention_heads, seq_len, head_size] + if seq_len > self.max_seq_len_cached: + self._set_cos_sin_cache(seq_len=seq_len, device=x.device, dtype=x.dtype) + + return ( + self.cos_cached[:seq_len].to(dtype=x.dtype) * self.attention_scaling, + self.sin_cached[:seq_len].to(dtype=x.dtype) * self.attention_scaling, + ) + + +def eager_attention_forward( + module: nn.Module, + query: torch.Tensor, + key: torch.Tensor, + value: torch.Tensor, + attention_mask: Optional[torch.Tensor], + **kwargs, +): + key_states = repeat_kv(key, module.num_key_value_groups) + value_states = repeat_kv(value, module.num_key_value_groups) + + attn_weights = torch.matmul(query, key_states.transpose(2, 3)) / math.sqrt(module.head_dim) + if attention_mask is not None: + attn_weights = torch.where( + attention_mask, torch.tensor(MIN_MASKED_ATTENTION_VALUE, dtype=torch.float32), attn_weights + ) + + attn_weights = nn.functional.softmax(attn_weights, dim=-1, dtype=torch.float32).to(query.dtype) + attn_output = torch.matmul(attn_weights, value_states) + attn_output = attn_output.transpose(1, 2).contiguous() + + return attn_output, attn_weights + + +class QEffQwen2_5_VLAttention(Qwen2_5_VLAttention): + """ + Multi-headed attention from 'Attention Is All You Need' paper. Modified to use sliding window attention: Longformer + and "Generating Long Sequences with Sparse Transformers". + """ + + def __qeff_init__(self): + self.rotary_emb = QEffQwen2_5_VLRotaryEmbedding(config=self.config) + + def forward( + self, + hidden_states: torch.Tensor, + attention_mask: Optional[torch.Tensor] = None, + position_ids: Optional[torch.LongTensor] = None, + past_key_value: Optional[Cache] = None, + batch_index: Optional[torch.LongTensor] = None, + output_attentions: bool = False, + use_cache: bool = False, + cache_position: Optional[torch.LongTensor] = None, + **kwargs, + ) -> Tuple[torch.Tensor, Optional[torch.Tensor], Optional[Tuple[torch.Tensor]]]: + bsz, q_len, _ = hidden_states.size() + + query_states = self.q_proj(hidden_states) + key_states = self.k_proj(hidden_states) + value_states = self.v_proj(hidden_states) + + query_states = query_states.view(bsz, q_len, -1, self.head_dim).transpose(1, 2) + key_states = key_states.view(bsz, q_len, -1, self.head_dim).transpose(1, 2) + value_states = value_states.view(bsz, q_len, -1, self.head_dim).transpose(1, 2) + + kv_seq_len = key_states.shape[-2] + kv_seq_len = past_key_value.get_seq_length(self.layer_idx, cache_position) + + cos, sin = self.rotary_emb(value_states, seq_len=kv_seq_len) + + query_states, key_states = qeff_apply_rotary_pos_emb( + query_states, key_states, cos, sin, position_ids[1:], self.rope_scaling["mrope_section"] + ) + + if past_key_value is not None: + # sin and cos are specific to RoPE models; cache_position needed for the static cache + cache_kwargs = {"sin": sin, "cos": cos, "batch_index": batch_index, "position_ids": position_ids[0]} + key_states, value_states = past_key_value.update(key_states, value_states, self.layer_idx, cache_kwargs) + + attention_interface: Callable = eager_attention_forward + + attn_output, attn_weights = attention_interface( + self, + query_states, + key_states, + value_states, + attention_mask, + **kwargs, + ) + + attn_output = attn_output.reshape(bsz, q_len, -1) + + attn_output = self.o_proj(attn_output) + + if not output_attentions: + attn_weights = None + + return attn_output, attn_weights, past_key_value + + +class QEffQwen2_5_VLDecoderLayer(Qwen2_5_VLDecoderLayer): + def forward( + self, + hidden_states: torch.Tensor, + attention_mask: Optional[torch.Tensor] = None, + position_ids: Optional[torch.LongTensor] = None, + past_key_value: Optional[Tuple[torch.Tensor]] = None, + batch_index: Optional[torch.LongTensor] = None, + output_attentions: Optional[bool] = False, + use_cache: Optional[bool] = False, + cache_position: Optional[torch.LongTensor] = None, + # position_embeddings: Optional[Tuple[torch.Tensor, torch.Tensor]] = None, # necessary, but kept here for BC + **kwargs, + ) -> Tuple[torch.FloatTensor, Optional[Tuple[torch.FloatTensor, torch.FloatTensor]]]: + """ + Args: + hidden_states (`torch.FloatTensor`): input to the layer of shape `(batch, seq_len, embed_dim)` + attention_mask (`torch.FloatTensor`, *optional*): attention mask of size + `(batch, sequence_length)` where padding elements are indicated by 0. + output_attentions (`bool`, *optional*): + Whether or not to return the attentions tensors of all attention layers. See `attentions` under + returned tensors for more detail. + use_cache (`bool`, *optional*): + If set to `True`, `past_key_values` key value states are returned and can be used to speed up decoding + (see `past_key_values`). + past_key_value (`Tuple(torch.FloatTensor)`, *optional*): cached past key and value projection states + cache_position (`torch.LongTensor` of shape `(sequence_length)`, *optional*): + Indices depicting the position of the input sequence tokens in the sequence. + position_embeddings (`Tuple[torch.FloatTensor, torch.FloatTensor]`, *optional*): + Tuple containing the cosine and sine positional embeddings of shape `(batch_size, seq_len, head_dim)`, + with `head_dim` being the embedding dimension of each attention head. + kwargs (`dict`, *optional*): + Arbitrary kwargs to be ignored, used for FSDP and other methods that injects code + into the model + """ + + residual = hidden_states + + hidden_states = self.input_layernorm(hidden_states) + + # Self Attention + hidden_states, self_attn_weights, present_key_value = self.self_attn( + hidden_states=hidden_states, + attention_mask=attention_mask, + position_ids=position_ids, + past_key_value=past_key_value, + batch_index=batch_index, + output_attentions=output_attentions, + use_cache=use_cache, + cache_position=cache_position, + **kwargs, + ) + hidden_states = residual + hidden_states + + # Fully Connected + residual = hidden_states + hidden_states = self.post_attention_layernorm(hidden_states) + hidden_states = self.mlp(hidden_states) + hidden_states = residual + hidden_states + + outputs = (hidden_states,) + + if output_attentions: + outputs += (self_attn_weights,) + + if use_cache: + outputs += (present_key_value,) + + return outputs + + +class QEffQwen2_5_VLTextModel(Qwen2_5_VLTextModel): + def forward( + self, + input_ids: Optional[torch.LongTensor] = None, + attention_mask: Optional[torch.Tensor] = None, + position_ids: Optional[torch.LongTensor] = None, + past_key_values: Optional[List[torch.FloatTensor]] = None, + batch_index: Optional[torch.LongTensor] = None, + inputs_embeds: Optional[torch.FloatTensor] = None, + use_cache: Optional[bool] = None, + output_attentions: Optional[bool] = None, + output_hidden_states: Optional[bool] = None, + return_dict: Optional[bool] = None, + cache_position: Optional[torch.LongTensor] = None, + **kwargs, + ) -> Union[Tuple, BaseModelOutputWithPast]: + output_attentions = output_attentions if output_attentions is not None else self.config.output_attentions + output_hidden_states = ( + output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states + ) + + if use_cache and not isinstance(past_key_values, Cache): + return_legacy_cache = True + past_key_values = QEffDynamicCache.from_legacy_cache(past_key_values) + + if cache_position is None: + past_seen_tokens = past_key_values.get_seq_length() if past_key_values is not None else 0 + cache_position = torch.arange( + past_seen_tokens, past_seen_tokens + inputs_embeds.shape[1], device=inputs_embeds.device + ) + + # the hard coded `3` is for temporal, height and width. + if position_ids is None: + position_ids = cache_position.view(1, 1, -1).expand(3, inputs_embeds.shape[0], -1) + elif position_ids.dim() == 2: + position_ids = position_ids[None, ...].expand(3, position_ids.shape[0], -1) + + target_length = attention_mask.shape[-1] if isinstance(attention_mask, torch.Tensor) else past_seen_tokens + causal_mask = _create_causal_mask( + position_ids=position_ids[0], target_length=target_length, sliding_window=self.config.sliding_window + ) + + hidden_states = inputs_embeds + + # decoder layers + all_hidden_states = () if output_hidden_states else None + all_self_attns = () if output_attentions else None + + for decoder_layer in self.layers: + if output_hidden_states: + all_hidden_states += (hidden_states,) + + layer_outputs = decoder_layer( + hidden_states, + attention_mask=causal_mask, + position_ids=position_ids, + past_key_value=past_key_values, + batch_index=batch_index, + output_attentions=output_attentions, + use_cache=use_cache, + cache_position=cache_position, + **kwargs, + ) + + hidden_states = layer_outputs[0] + + if output_attentions: + all_self_attns += (layer_outputs[1],) + + hidden_states = self.norm(hidden_states) + + # add hidden states from the last decoder layer + if output_hidden_states: + all_hidden_states += (hidden_states,) + + if return_legacy_cache: + past_key_values = past_key_values.to_legacy_cache() + + return BaseModelOutputWithPast( + last_hidden_state=hidden_states, + past_key_values=past_key_values, + hidden_states=all_hidden_states, + attentions=all_self_attns, + ) + + return (hidden_states, past_key_values) + + +class QEffQwen2_5_VLModel(Qwen2_5_VLModel): + def forward( + self, + input_ids: Optional[torch.LongTensor] = None, + attention_mask: Optional[torch.Tensor] = None, + position_ids: Optional[torch.LongTensor] = None, + past_key_values: Optional[List[torch.FloatTensor]] = None, + batch_index: Optional[torch.LongTensor] = None, + inputs_embeds: Optional[torch.FloatTensor] = None, + use_cache: Optional[bool] = None, + output_attentions: Optional[bool] = None, + output_hidden_states: Optional[bool] = None, + return_dict: Optional[bool] = None, + cache_position: Optional[torch.LongTensor] = None, + **kwargs, + ) -> Union[Tuple, BaseModelOutputWithPast]: + output_attentions = output_attentions if output_attentions is not None else self.config.output_attentions + output_hidden_states = ( + output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states + ) + return_dict = return_dict if return_dict is not None else self.config.use_return_dict + + if inputs_embeds is None: + inputs_embeds = self.get_input_embeddings()(input_ids) + + outputs = self.language_model( + input_ids=None, + position_ids=position_ids, + attention_mask=attention_mask, + past_key_values=past_key_values, + batch_index=batch_index, + inputs_embeds=inputs_embeds, + use_cache=use_cache, + output_attentions=output_attentions, + output_hidden_states=output_hidden_states, + return_dict=True, + cache_position=cache_position, + **kwargs, + ) + + output = Qwen2_5_VLModelOutputWithPast( + last_hidden_state=outputs.last_hidden_state, + past_key_values=outputs.past_key_values, + hidden_states=outputs.hidden_states, + attentions=outputs.attentions, + rope_deltas=self.rope_deltas, + ) + return output if return_dict else output.to_tuple() + + +class QEffQwen_2_5_vl_EncoderWrapper(nn.Module): + def __init__(self, model): + super().__init__() + self.model = model + self.model.vision_model = self.model.visual + + def forward(self, pixel_values, image_grid_thw): + image_embeds = self.model.visual(pixel_values, grid_thw=image_grid_thw) + bs = image_grid_thw.shape[0] + split_size = torch.floor_divide(torch.tensor(image_embeds.size(0)), bs) + image_embeds = image_embeds.reshape(bs, split_size, image_embeds.size(1)) + + return image_embeds + + +class QEffQwen_2_5_vl_DecoderWrapper(nn.Module): + def __init__(self, model): + super().__init__() + self.model = model + self.language_model = self.model.model.language_model + + def forward(self, input_ids, vision_embeds, position_ids, image_idx, past_key_values): + inputs_embeds = self.model.get_input_embeddings()(input_ids) + B, N, C = inputs_embeds.shape + selected = input_ids == self.model.config.image_token_id + indices1 = selected.to(torch.int64).cumsum(1) - 1 + indices1 = torch.where(indices1 != -1, indices1 + image_idx, indices1) + indices0 = torch.arange(selected.unsqueeze(0).shape[0]).view(-1, 1) + image_features_expanded = vision_embeds.reshape(-1, C).unsqueeze(0)[indices0, indices1] + image_input_embeds = torch.where(selected.unsqueeze(-1), image_features_expanded, inputs_embeds) + inputs_embeds = torch.where(input_ids.shape[1] == torch.tensor(1), inputs_embeds, image_input_embeds) + outputs = self.model.model( + inputs_embeds=inputs_embeds, position_ids=position_ids, past_key_values=past_key_values, use_cache=True + ) + + logit_index = position_ids[0].to(torch.int32).argmax(1, keepdim=True) + hidden_states = outputs.last_hidden_state[torch.arange(position_ids[0].shape[0]).view(-1, 1), logit_index] + logits = self.model.lm_head(hidden_states) + image_idx = (indices1.max() + 1).unsqueeze(0).unsqueeze(0) + + return logits, vision_embeds, image_idx, outputs.past_key_values + + +class QEffQwen_2_5_vl_ForConditionalGeneration(Qwen2_5_VLForConditionalGeneration): + def get_qeff_vision_encoder(self): + return QEffQwen_2_5_vl_EncoderWrapper(self) + + def get_qeff_language_decoder(self): + return QEffQwen_2_5_vl_DecoderWrapper(self) + + def get_dummy_inputs(self, kv_offload: bool = False, **kwargs): + inputs_shapes = {} + inputs_shapes["input_ids"] = (constants.ONNX_EXPORT_EXAMPLE_BATCH_SIZE, constants.ONNX_EXPORT_EXAMPLE_SEQ_LEN) + + vision_size = 3577 + inputs_shapes["vision_embeds"] = ( + constants.ONNX_EXPORT_EXAMPLE_BATCH_SIZE, + vision_size, + self.model.config.hidden_size, + ) + inputs_shapes["image_grid_thw"] = (1, 1, 98, 146) + inputs_shapes["position_ids"] = ( + 3, + constants.ONNX_EXPORT_EXAMPLE_BATCH_SIZE, + constants.ONNX_EXPORT_EXAMPLE_SEQ_LEN, + ) + inputs_shapes["pixel_values"] = (14308, 1176) + inputs_shapes["image_idx"] = (1, 1) + inputs_shapes["image_sizes"] = (constants.ONNX_EXPORT_EXAMPLE_BATCH_SIZE, 2) + # Define inputs + vision_inputs = {} + lang_inputs = {} + vision_inputs["pixel_values"] = torch.zeros((inputs_shapes["pixel_values"]), dtype=torch.float32) + vision_inputs["image_grid_thw"] = torch.zeros((inputs_shapes["image_grid_thw"]), dtype=torch.int64) + lang_inputs["input_ids"] = torch.zeros((inputs_shapes["input_ids"]), dtype=torch.int64) + lang_inputs["vision_embeds"] = torch.zeros((inputs_shapes["vision_embeds"]), dtype=torch.float32) + lang_inputs["position_ids"] = ( + ( + torch.arange(constants.ONNX_EXPORT_EXAMPLE_SEQ_LEN, dtype=torch.int64) + .view(1, constants.ONNX_EXPORT_EXAMPLE_SEQ_LEN) + .repeat(constants.ONNX_EXPORT_EXAMPLE_BATCH_SIZE, 1) + ) + .unsqueeze(0) + .repeat(4, 1, 1) + ) + lang_inputs["image_idx"] = torch.zeros((inputs_shapes["image_idx"]), dtype=torch.int64) + # Add data for KV + kv_cache_shape = get_padding_shape_from_config( + config=self.model.config, + batch_size=constants.ONNX_EXPORT_EXAMPLE_BATCH_SIZE, + seq_len=constants.ONNX_EXPORT_EXAMPLE_SEQ_LEN, + ) + + lang_inputs["past_key_values"] = [[] for _ in range(self.model.config.num_hidden_layers)] + for i in range(self.model.config.num_hidden_layers): + for kv in ["key", "value"]: + lang_inputs["past_key_values"][i].append(torch.zeros(kv_cache_shape, dtype=torch.float32)) + + inputs = {} + if kv_offload: + inputs["vision"] = vision_inputs + inputs["lang"] = lang_inputs + else: + lang_inputs.pop("vision_embeds") + inputs = {**vision_inputs, **lang_inputs} + + return inputs + + def get_specializations( + self, + batch_size: int, + prefill_seq_len: int, + ctx_len: int, + img_size: None, + height: int = None, + width: int = None, + kv_offload: bool = False, + **compiler_options, + ): + if height is None or width is None: + height = 1365 + width = 2048 + logger.warning( + "Setting height and width to be 1365 and 2048 respectively, as it was neither passed nor found in vision_config" + ) + prefill_seq_len = prefill_seq_len if prefill_seq_len else 128 + ctx_len = ctx_len if ctx_len else constants.INTERN_CTX_LEN + channel = 3 + patch_size = self.config.vision_config.patch_size + temporal_patch_size = self.config.vision_config.temporal_patch_size + + IMAGE_FACTOR = 28 + MIN_PIXELS = 4 * 28 * 28 + MAX_PIXELS = 16384 * 28 * 28 + MAX_RATIO = 200 + + def round_by_factor(number: int, factor: int) -> int: + """Returns the closest integer to 'number' that is divisible by 'factor'.""" + return round(number / factor) * factor + + def ceil_by_factor(number: int, factor: int) -> int: + """Returns the smallest integer greater than or equal to 'number' that is divisible by 'factor'.""" + return math.ceil(number / factor) * factor + + def floor_by_factor(number: int, factor: int) -> int: + """Returns the largest integer less than or equal to 'number' that is divisible by 'factor'.""" + return math.floor(number / factor) * factor + + def smart_resize( + height: int, + width: int, + factor: int = IMAGE_FACTOR, + min_pixels: int = MIN_PIXELS, + max_pixels: int = MAX_PIXELS, + ) -> tuple[int, int]: + """ + Rescales the image so that the following conditions are met: + + 1. Both dimensions (height and width) are divisible by 'factor'. + + 2. The total number of pixels is within the range ['min_pixels', 'max_pixels']. + + 3. The aspect ratio of the image is maintained as closely as possible. + """ + if max(height, width) / min(height, width) > MAX_RATIO: + raise ValueError( + f"absolute aspect ratio must be smaller than {MAX_RATIO}, got {max(height, width) / min(height, width)}" + ) + h_bar = max(factor, round_by_factor(height, factor)) + w_bar = max(factor, round_by_factor(width, factor)) + if h_bar * w_bar > max_pixels: + beta = math.sqrt((height * width) / max_pixels) + h_bar = floor_by_factor(height / beta, factor) + w_bar = floor_by_factor(width / beta, factor) + elif h_bar * w_bar < min_pixels: + beta = math.sqrt(min_pixels / (height * width)) + h_bar = ceil_by_factor(height * beta, factor) + w_bar = ceil_by_factor(width * beta, factor) + return h_bar, w_bar + + resized_height, resized_width = smart_resize(height=height, width=width) + grid_h, grid_w = resized_height // patch_size, resized_width // patch_size + grid_height = grid_h * grid_w + grid_width = patch_size * patch_size * temporal_patch_size * channel + vision_size = grid_height // 4 + grid_height = grid_height * batch_size + + vision = [ + { + "batch_size": batch_size, + "vision_size": vision_size, + "grid_height": grid_height, + "grid_width": grid_width, + "grid_h": grid_h, + "grid_w": grid_w, + } + ] + lang = [ + { + "batch_size": batch_size, + "seq_len": prefill_seq_len, + "ctx_len": ctx_len, + "vision_size": vision_size, + }, + { + "batch_size": batch_size, + "seq_len": "1", + "ctx_len": ctx_len, + "vision_size": vision_size, + }, + ] + + specializations = {} + + if kv_offload: + specializations["vision"] = vision + specializations["lang"] = lang + return specializations, compiler_options + else: + return lang, compiler_options + + def get_onnx_dynamic_axes(self, kv_offload: bool = False): + # Define dynamic axes + num_layers = self.config.num_hidden_layers + + vision_dynamic_axes = { + "pixel_values": {0: "grid_height", 1: "grid_width"}, + "image_grid_thw": {0: "batch_size", 2: "grid_h", 3: "grid_w"}, + } + + lang_dynamic_axes = { + "input_ids": {0: "batch_size", 1: "seq_len"}, + "position_ids": {1: "batch_size", 2: "seq_len"}, + "vision_embeds": {0: "batch_size", 1: "vision_size"}, + } + + for i in range(num_layers): + lang_dynamic_axes[f"past_key.{i}"] = {0: "batch_size", 2: "ctx_len"} + lang_dynamic_axes[f"past_value.{i}"] = {0: "batch_size", 2: "ctx_len"} + + dynamic_axes = {} + if kv_offload: + dynamic_axes["vision"] = vision_dynamic_axes + dynamic_axes["lang"] = lang_dynamic_axes + else: + lang_dynamic_axes.pop("vision_embeds") + dynamic_axes = {**vision_dynamic_axes, **lang_dynamic_axes} + return dynamic_axes + + def get_output_names(self, kv_offload: bool = False): + vision_output_names = ["vision_embeds"] + lang_output_names = ["logits"] + for i in range(self.model.config.num_hidden_layers): + for kv in ["key", "value"]: + lang_output_names.append(f"past_{kv}.{i}_RetainedState") + + output_names = {} + if kv_offload: + lang_output_names.insert(1, "vision_embeds_RetainedState") + lang_output_names.insert(2, "image_idx_output") + output_names["vision"] = vision_output_names + output_names["lang"] = lang_output_names + else: + lang_output_names.insert(1, "pixel_values_RetainedState") + lang_output_names.insert(2, "image_idx_output") + return lang_output_names + return output_names + + def get_inputs_info(self): + return [ + IOInfo(name="input_ids", datatype=torch.int64, shape=("batch_size", "seq_len")), + IOInfo(name="attention_mask", datatype=torch.int64, shape=("batch_size", "seq_len")), + IOInfo(name="pixel_values", datatype=torch.float32, shape=("batch_size", 3, "image_size", "image_size")), + ] diff --git a/README.md b/README.md index b396daede..40d64116c 100644 --- a/README.md +++ b/README.md @@ -7,6 +7,9 @@ *Latest news* :fire:
+- [10/2025] Added support for Qwen2.5VL Multi-Model [Qwen/Qwen2.5-VL-32B-Instruct](https://huggingface.co/Qwen/Qwen2.5-VL-32B-Instruct) +- [10/2025] Added support for Mistral3 Multi-Model [mistralai/Mistral-Small-3.1-24B-Instruct-2503](https://huggingface.co/mistralai/Mistral-Small-3.1-24B-Instruct-2503) +- [10/2025] Added support for Molmo Multi-Model [allenai/Molmo-7B-D-0924](https://huggingface.co/allenai/Molmo-7B-D-0924) - [06/2025] Added support for Llama4 Multi-Model [meta-llama/Llama-4-Scout-17B-16E-Instruct](https://huggingface.co/meta-llama/Llama-4-Scout-17B-16E-Instruct) - [06/2025] Added support for Gemma3 Multi-Modal-Model [google/gemma-3-4b-it](https://huggingface.co/google/gemma-3-4b-it) - [06/2025] Added support of model `hpcai-tech/grok-1` [hpcai-tech/grok-1](https://huggingface.co/hpcai-tech/grok-1) diff --git a/examples/qwen2_5_vl_example.py b/examples/qwen2_5_vl_example.py new file mode 100644 index 000000000..6b308e532 --- /dev/null +++ b/examples/qwen2_5_vl_example.py @@ -0,0 +1,178 @@ +# ----------------------------------------------------------------------------- +# +# Copyright (c) Qualcomm Technologies, Inc. and/or its subsidiaries. +# SPDX-License-Identifier: BSD-3-Clause +# +# ----------------------------------------------------------------------------- + +import requests +import torch +import torch.nn.functional as F +import transformers +from PIL import Image +from qwen_vl_utils import process_vision_info +from transformers import AutoConfig, AutoProcessor, TextStreamer + +from QEfficient import QEFFAutoModelForImageTextToText + +## For AWQ model update pytorch version to 2.8.* +model_id = "Qwen/Qwen2.5-VL-32B-Instruct" +config = AutoConfig.from_pretrained(model_id) + +## Use complete model without changing num_hidden_layers as it will not work for TF version 4.55.0 for Qwen2.5VL model + +qeff_model = QEFFAutoModelForImageTextToText.from_pretrained( + model_id, attn_implementation="eager", kv_offload=True, config=config +) +tokenizer = transformers.AutoTokenizer.from_pretrained(model_id) +processor = AutoProcessor.from_pretrained(model_id) + +### use skip_vision=Ture, if want to run only text, ow false ### +skip_vision = False + +if skip_vision: + ## Only Text ## + + ## Set Batch_Size ## + batch_size = 2 + qeff_model.compile( + batch_size=batch_size, + prefill_seq_len=128, + ctx_len=4096, + num_cores=16, + num_devices=8, + height=354, + width=536, + mxfp6_matmul=False, + aic_enable_depth_first=True, + skip_vision=True, + mos=1, + ) + + messages = [ + { + "role": "user", + "content": [ + {"type": "text", "text": "Tell me about yourself."}, + ], + }, + ] + + messages = [messages] * batch_size + + inputs = processor.apply_chat_template( + messages, + add_generation_prompt=True, + tokenize=True, + return_dict=True, + return_tensors="pt", + ) + + pos_ids, rope_deltas = qeff_model.model.get_rope_index( + inputs["input_ids"], + image_grid_thw=None, + video_grid_thw=None, + second_per_grid_ts=None, + attention_mask=inputs["attention_mask"], + ) + + input_ids_length = inputs["input_ids"].shape[1] + + inputs["position_ids"] = torch.cat([pos_ids, pos_ids[0].unsqueeze(0)], dim=0) + + prefill_seq_len = 128 + num_chunks = -(input_ids_length // -prefill_seq_len) # ceil divide without float + padded_len = num_chunks * prefill_seq_len # Convert to a multiple of prompt_len + + inputs["position_ids"] = F.pad( + inputs["position_ids"], pad=(0, padded_len - input_ids_length), mode="constant", value=-1 + ) + + streamer = TextStreamer(tokenizer) + output = qeff_model.generate(inputs=inputs, generation_len=100) + print(output.generated_ids) + print(tokenizer.batch_decode(output.generated_ids)) + print(output) + +else: + batch_size = 1 + ## Vision + Text ## + qeff_model.compile( + batch_size=batch_size, + prefill_seq_len=128, + ctx_len=4096, + num_cores=16, + num_devices=8, + height=354, + width=536, + mxfp6_matmul=True, + mxint8_kv_cache=True, + aic_enable_depth_first=True, + mos=1, + ) + + ### IMAGE + TEXT ### + image_url = "https://picsum.photos/id/237/536/354" + + image = Image.open(requests.get(image_url, stream=True).raw) + + messages_1 = [ + { + "role": "user", + "content": [ + {"type": "image", "image": image}, + {"type": "text", "text": "Describe this image."}, + ], + }, + ] + + messages_2 = [ + { + "role": "user", + "content": [ + {"type": "image", "image": image}, + {"type": "text", "text": "Describe about the color of the dog."}, + ], + }, + ] + + messages = [messages_2] * batch_size + + texts = [processor.apply_chat_template(msg, tokenize=False, add_generation_prompt=True) for msg in messages] + + image_inputs, video_inputs = process_vision_info(messages) + inputs = processor( + text=texts, + images=image_inputs, + videos=video_inputs, + padding=True, + return_tensors="pt", + ) + input_ids_length = inputs["input_ids"].shape[1] + + inputs["position_ids"] = torch.arange(input_ids_length).view(1, 1, input_ids_length).expand(-1, batch_size, -1) + + pos_ids, rope_deltas = qeff_model.model.model.get_rope_index( + inputs["input_ids"], + inputs["image_grid_thw"], + video_grid_thw=None, + second_per_grid_ts=None, + attention_mask=inputs["attention_mask"], + ) + + inputs["position_ids"] = torch.cat((inputs["position_ids"], pos_ids), dim=0) + + prefill_seq_len = 128 + num_chunks = -(input_ids_length // -prefill_seq_len) # ceil divide without float + padded_len = num_chunks * prefill_seq_len # Convert to a multiple of prompt_len + + inputs["position_ids"] = F.pad( + inputs["position_ids"], pad=(0, padded_len - input_ids_length), mode="constant", value=-1 + ) + + inputs.pop("image_grid_thw") + streamer = TextStreamer(tokenizer) + output = qeff_model.generate(inputs=inputs, generation_len=100) + print(output.generated_ids) + print(tokenizer.batch_decode(output.generated_ids)) + print(output) diff --git a/tests/transformers/models/qnn_config.json b/tests/transformers/models/qnn_config.json new file mode 100644 index 000000000..b1f249e2b --- /dev/null +++ b/tests/transformers/models/qnn_config.json @@ -0,0 +1,10 @@ +{ + "SKIP_QNN_CONVERTER_STEP":false, + "context_binary_generator_args_extension":"--log_level debug", + "converter_args_extension":"--onnx_defer_loading", + "qnn_compilation_backend":{ + "compiler_enable_depth_first":true, + "compiler_printDDRStats":false, + "compiler_printPerfMetrics":false + } +} \ No newline at end of file From 8c96a4de9bb18c32c46c161ad8fb5bcf9dc056e8 Mon Sep 17 00:00:00 2001 From: Dipankar Sarkar Date: Fri, 17 Oct 2025 11:07:04 +0530 Subject: [PATCH 02/60] Olmo2 Bug fix (#589) Fixed -10000 with MIN_MASK Signed-off-by: Dipankar Sarkar Signed-off-by: Dhiraj Kumar Sah --- QEfficient/transformers/models/olmo2/modeling_olmo2.py | 5 ++++- 1 file changed, 4 insertions(+), 1 deletion(-) diff --git a/QEfficient/transformers/models/olmo2/modeling_olmo2.py b/QEfficient/transformers/models/olmo2/modeling_olmo2.py index 6dae7ac84..0d23729c1 100644 --- a/QEfficient/transformers/models/olmo2/modeling_olmo2.py +++ b/QEfficient/transformers/models/olmo2/modeling_olmo2.py @@ -27,6 +27,7 @@ from QEfficient.transformers.cache_utils import QEffDynamicCache from QEfficient.transformers.modeling_attn_mask_utils import _create_causal_mask +from QEfficient.utils.constants import MIN_MASKED_ATTENTION_VALUE class QEffOlmo2RotaryEmbedding(Olmo2RotaryEmbedding): @@ -109,7 +110,9 @@ def eager_attention_forward( attn_weights = torch.matmul(query, key_states.transpose(2, 3)) * scaling if attention_mask is not None: - attn_weights = torch.where(attention_mask, torch.tensor(-10000.0, dtype=torch.float32), attn_weights) + attn_weights = torch.where( + attention_mask, torch.tensor(MIN_MASKED_ATTENTION_VALUE, dtype=torch.float32), attn_weights + ) attn_weights = nn.functional.softmax(attn_weights, dim=-1, dtype=torch.float32).to(query.dtype) attn_output = torch.matmul(attn_weights, value_states) attn_output = attn_output.transpose(1, 2).contiguous() From 7ad6365131db037777aa6fbccf840937350deb30 Mon Sep 17 00:00:00 2001 From: smedhe Date: Thu, 23 Oct 2025 10:54:50 +0530 Subject: [PATCH 03/60] updated notebooks (#543) Updated the correct code with updated syntax, removed device_group parameter in model.compile() Signed-off-by: Sharvari Medhe Signed-off-by: Dhiraj Kumar Sah --- notebooks/QEfficientGPT2.ipynb | 13 ++++++------- notebooks/QEfficientMPT.ipynb | 11 +++++------ 2 files changed, 11 insertions(+), 13 deletions(-) diff --git a/notebooks/QEfficientGPT2.ipynb b/notebooks/QEfficientGPT2.ipynb index 74e8097bb..350f8bc31 100644 --- a/notebooks/QEfficientGPT2.ipynb +++ b/notebooks/QEfficientGPT2.ipynb @@ -33,6 +33,9 @@ "outputs": [], "source": [ "# Initiate the Original Transformer model\n", + "# Initiate the tokenizer for transformers library\n", + "from transformers import AutoTokenizer\n", + "\n", "from QEfficient import QEFFAutoModelForCausalLM as AutoModelForCausalLM\n", "\n", "# Please uncomment and use appropriate Cache Directory for transformers, in case you don't want to use default ~/.cache dir.\n", @@ -92,11 +95,7 @@ "# Compile the model for provided compilation arguments\n", "# Please use platform SDK to Check num_cores for your card.\n", "\n", - "qeff_model.compile(\n", - " num_cores=14,\n", - " mxfp6=True,\n", - " device_group=[0],\n", - ")" + "qeff_model.compile(num_cores=14, mxfp6_matmul=True)" ] }, { @@ -116,8 +115,8 @@ "source": [ "# post compilation, we can print the latency stats for the kv models, We provide API to print token and Latency stats on Cloud AI 100\n", "# We need the compiled prefill and decode qpc to compute the token generated, This is based on Greedy Sampling Approach\n", - "\n", - "qeff_model.generate(prompts=[\"My name is\"])" + "tokenizer = AutoTokenizer.from_pretrained(model_name)\n", + "qeff_model.generate(prompts=[\"My name is\"], tokenizer=tokenizer)" ] } ], diff --git a/notebooks/QEfficientMPT.ipynb b/notebooks/QEfficientMPT.ipynb index d1a1f3c5f..3bb99ecbc 100644 --- a/notebooks/QEfficientMPT.ipynb +++ b/notebooks/QEfficientMPT.ipynb @@ -32,6 +32,8 @@ "outputs": [], "source": [ "# Initiate the Original Transformer model\n", + "# Initiate the tokenizer for transformers library\n", + "from transformers import AutoTokenizer\n", "\n", "from QEfficient import QEFFAutoModelForCausalLM as AutoModelForCausalLM\n", "\n", @@ -91,11 +93,7 @@ "# Compile the model for provided compilation arguments\n", "# Please use platform SDK to Check num_cores for your card.\n", "\n", - "qeff_model.compile(\n", - " num_cores=14,\n", - " mxfp6=True,\n", - " device_group=[0],\n", - ")" + "qeff_model.compile(num_cores=14, mxfp6_matmul=True)" ] }, { @@ -116,7 +114,8 @@ "# post compilation, we can print the latency stats for the kv models, We provide API to print token and Latency stats on Cloud AI 100\n", "# We need the compiled prefill and decode qpc to compute the token generated, This is based on Greedy Sampling Approach\n", "\n", - "qeff_model.generate(prompts=[\"My name is\"])" + "tokenizer = AutoTokenizer.from_pretrained(model_name)\n", + "qeff_model.generate(prompts=[\"My name is\"], tokenizer=tokenizer)" ] } ], From c9e417a3779a133eae4000fb648c04de8e6e1736 Mon Sep 17 00:00:00 2001 From: Mohit Soni Date: Fri, 31 Oct 2025 21:33:32 +0530 Subject: [PATCH 04/60] Qwen2.5_VL Example Script Update (#598) Signed-off-by: Mohit Soni Signed-off-by: Dhiraj Kumar Sah --- .../models/qwen2_5_vl/modeling_qwen2_5_vl.py | 41 ++++++++++++--- QEfficient/utils/constants.py | 4 ++ examples/qwen2_5_vl_example.py | 51 ++----------------- .../models/test_image_text_to_text_models.py | 15 ++++++ 4 files changed, 58 insertions(+), 53 deletions(-) diff --git a/QEfficient/transformers/models/qwen2_5_vl/modeling_qwen2_5_vl.py b/QEfficient/transformers/models/qwen2_5_vl/modeling_qwen2_5_vl.py index 030dd7a56..e5e842e6f 100644 --- a/QEfficient/transformers/models/qwen2_5_vl/modeling_qwen2_5_vl.py +++ b/QEfficient/transformers/models/qwen2_5_vl/modeling_qwen2_5_vl.py @@ -752,8 +752,8 @@ def get_dummy_inputs(self, kv_offload: bool = False, **kwargs): seq_len=constants.ONNX_EXPORT_EXAMPLE_SEQ_LEN, ) - lang_inputs["past_key_values"] = [[] for _ in range(self.model.config.num_hidden_layers)] - for i in range(self.model.config.num_hidden_layers): + lang_inputs["past_key_values"] = [[] for _ in range(self.model.config.text_config.num_hidden_layers)] + for i in range(self.model.config.text_config.num_hidden_layers): for kv in ["key", "value"]: lang_inputs["past_key_values"][i].append(torch.zeros(kv_cache_shape, dtype=torch.float32)) @@ -779,10 +779,10 @@ def get_specializations( **compiler_options, ): if height is None or width is None: - height = 1365 - width = 2048 + height = constants.QWEN2_5_VL_HEIGHT + width = constants.QWEN2_5_VL_WIDTH logger.warning( - "Setting height and width to be 1365 and 2048 respectively, as it was neither passed nor found in vision_config" + f"Setting height and width to be {height} and {width} respectively, as it was neither passed nor found in vision_config" ) prefill_seq_len = prefill_seq_len if prefill_seq_len else 128 ctx_len = ctx_len if ctx_len else constants.INTERN_CTX_LEN @@ -882,7 +882,7 @@ def smart_resize( def get_onnx_dynamic_axes(self, kv_offload: bool = False): # Define dynamic axes - num_layers = self.config.num_hidden_layers + num_layers = self.config.text_config.num_hidden_layers vision_dynamic_axes = { "pixel_values": {0: "grid_height", 1: "grid_width"}, @@ -900,6 +900,7 @@ def get_onnx_dynamic_axes(self, kv_offload: bool = False): lang_dynamic_axes[f"past_value.{i}"] = {0: "batch_size", 2: "ctx_len"} dynamic_axes = {} + if kv_offload: dynamic_axes["vision"] = vision_dynamic_axes dynamic_axes["lang"] = lang_dynamic_axes @@ -911,7 +912,7 @@ def get_onnx_dynamic_axes(self, kv_offload: bool = False): def get_output_names(self, kv_offload: bool = False): vision_output_names = ["vision_embeds"] lang_output_names = ["logits"] - for i in range(self.model.config.num_hidden_layers): + for i in range(self.model.config.text_config.num_hidden_layers): for kv in ["key", "value"]: lang_output_names.append(f"past_{kv}.{i}_RetainedState") @@ -927,6 +928,32 @@ def get_output_names(self, kv_offload: bool = False): return lang_output_names return output_names + def prepare_inputs_for_generation(self, inputs, prefill_seq_len=128, batch_size=1): + input_ids_length = inputs["input_ids"].shape[1] + + inputs["position_ids"] = torch.arange(input_ids_length).view(1, 1, input_ids_length).expand(-1, batch_size, -1) + + pos_ids, rope_deltas = self.model.get_rope_index( + inputs["input_ids"], + None if "image_grid_thw" not in inputs else inputs["image_grid_thw"], + video_grid_thw=None, + second_per_grid_ts=None, + attention_mask=inputs["attention_mask"], + ) + + inputs["position_ids"] = torch.cat((inputs["position_ids"], pos_ids), dim=0) + + num_chunks = -(input_ids_length // -prefill_seq_len) # ceil divide without float + padded_len = num_chunks * prefill_seq_len # Convert to a multiple of prompt_len + + inputs["position_ids"] = F.pad( + inputs["position_ids"], pad=(0, padded_len - input_ids_length), mode="constant", value=-1 + ) + + inputs.pop("image_grid_thw", None) + + return inputs + def get_inputs_info(self): return [ IOInfo(name="input_ids", datatype=torch.int64, shape=("batch_size", "seq_len")), diff --git a/QEfficient/utils/constants.py b/QEfficient/utils/constants.py index 57fba282b..5f7a4db7b 100644 --- a/QEfficient/utils/constants.py +++ b/QEfficient/utils/constants.py @@ -125,6 +125,10 @@ def get_models_dir(): # Wav2Vec2 Constant WAV2VEC2_MAX_SEQ_LEN = 480000 # 30 seconds of audio at 16 kHz sampling rate (16,000 samples/sec Ɨ 30 sec) +# Qwen2_5_vl Constants +QWEN2_5_VL_HEIGHT = 354 +QWEN2_5_VL_WIDTH = 536 + class Constants: # Export Constants. diff --git a/examples/qwen2_5_vl_example.py b/examples/qwen2_5_vl_example.py index 6b308e532..374f70ad2 100644 --- a/examples/qwen2_5_vl_example.py +++ b/examples/qwen2_5_vl_example.py @@ -6,8 +6,6 @@ # ----------------------------------------------------------------------------- import requests -import torch -import torch.nn.functional as F import transformers from PIL import Image from qwen_vl_utils import process_vision_info @@ -18,8 +16,7 @@ ## For AWQ model update pytorch version to 2.8.* model_id = "Qwen/Qwen2.5-VL-32B-Instruct" config = AutoConfig.from_pretrained(model_id) - -## Use complete model without changing num_hidden_layers as it will not work for TF version 4.55.0 for Qwen2.5VL model +config.text_config.num_hidden_layers = 2 qeff_model = QEFFAutoModelForImageTextToText.from_pretrained( model_id, attn_implementation="eager", kv_offload=True, config=config @@ -28,13 +25,13 @@ processor = AutoProcessor.from_pretrained(model_id) ### use skip_vision=Ture, if want to run only text, ow false ### -skip_vision = False +skip_vision = True if skip_vision: ## Only Text ## ## Set Batch_Size ## - batch_size = 2 + batch_size = 1 qeff_model.compile( batch_size=batch_size, prefill_seq_len=128, @@ -68,25 +65,7 @@ return_tensors="pt", ) - pos_ids, rope_deltas = qeff_model.model.get_rope_index( - inputs["input_ids"], - image_grid_thw=None, - video_grid_thw=None, - second_per_grid_ts=None, - attention_mask=inputs["attention_mask"], - ) - - input_ids_length = inputs["input_ids"].shape[1] - - inputs["position_ids"] = torch.cat([pos_ids, pos_ids[0].unsqueeze(0)], dim=0) - - prefill_seq_len = 128 - num_chunks = -(input_ids_length // -prefill_seq_len) # ceil divide without float - padded_len = num_chunks * prefill_seq_len # Convert to a multiple of prompt_len - - inputs["position_ids"] = F.pad( - inputs["position_ids"], pad=(0, padded_len - input_ids_length), mode="constant", value=-1 - ) + inputs = qeff_model.model.prepare_inputs_for_generation(inputs=inputs, prefill_seq_len=128, batch_size=batch_size) streamer = TextStreamer(tokenizer) output = qeff_model.generate(inputs=inputs, generation_len=100) @@ -148,29 +127,9 @@ padding=True, return_tensors="pt", ) - input_ids_length = inputs["input_ids"].shape[1] - - inputs["position_ids"] = torch.arange(input_ids_length).view(1, 1, input_ids_length).expand(-1, batch_size, -1) - - pos_ids, rope_deltas = qeff_model.model.model.get_rope_index( - inputs["input_ids"], - inputs["image_grid_thw"], - video_grid_thw=None, - second_per_grid_ts=None, - attention_mask=inputs["attention_mask"], - ) - inputs["position_ids"] = torch.cat((inputs["position_ids"], pos_ids), dim=0) - - prefill_seq_len = 128 - num_chunks = -(input_ids_length // -prefill_seq_len) # ceil divide without float - padded_len = num_chunks * prefill_seq_len # Convert to a multiple of prompt_len - - inputs["position_ids"] = F.pad( - inputs["position_ids"], pad=(0, padded_len - input_ids_length), mode="constant", value=-1 - ) + inputs = qeff_model.model.prepare_inputs_for_generation(inputs=inputs, prefill_seq_len=128, batch_size=batch_size) - inputs.pop("image_grid_thw") streamer = TextStreamer(tokenizer) output = qeff_model.generate(inputs=inputs, generation_len=100) print(output.generated_ids) diff --git a/tests/transformers/models/test_image_text_to_text_models.py b/tests/transformers/models/test_image_text_to_text_models.py index a7b4162aa..e6a145195 100644 --- a/tests/transformers/models/test_image_text_to_text_models.py +++ b/tests/transformers/models/test_image_text_to_text_models.py @@ -134,6 +134,17 @@ "Can you describe the image in detail.", 1, ), + ( + "Qwen/Qwen2.5-VL-3B-Instruct", + True, + 1, + 128, + 4096, + 1540, + "https://picsum.photos/id/237/536/354", + "Can you describe the image in detail.", + 1, + ), # ( # "meta-llama/Llama-3.2-11B-Vision-Instruct", # True, @@ -320,6 +331,10 @@ def check_image_text_to_text_pytorch_vs_kv_vs_ort_vs_ai100( qnn_config=qnn_config, ) inputs = processor(images=image, text=prompt, return_tensors="pt") + if hasattr(qeff_model.model.config, "model_type") and qeff_model.model.config.model_type == "qwen2_5_vl": + inputs = qeff_model.model.prepare_inputs_for_generation( + inputs=inputs, prefill_seq_len=prompt_len, batch_size=batch_size + ) if "pixel_values" in inputs: inputs["pixel_values"] = inputs["pixel_values"].to(torch.float32) print("QPC Outputs (QAIC):") From d27fe98b3a1267fe9ee0cdac018638fefc74b906 Mon Sep 17 00:00:00 2001 From: Sanidhya Singal Date: Sat, 1 Nov 2025 06:12:42 +0530 Subject: [PATCH 05/60] Extend On-Device Sampling Support to more Causal Language Models (#553) MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit ### šŸ“¢ Expanded On-Device Sampling Support in QEfficient Excited to share that **On-Device Sampling**—previously available only for `LlamaForCausalLM`—is now supported across a broader set of architectures! This enhancement brings faster, more efficient inference directly to the QAIC device. #### āœ… Newly Supported Architectures: 1. `FalconForCausalLM` 2. `GemmaForCausalLM` 3. `GPT2LMHeadModel` 4. `GPTJForCausalLM` 5. `GraniteForCausalLM` 6. `GraniteMoeForCausalLM` 7. `LlamaForCausalLM` (existing) 8. `MptForCausalLM` 9. `Phi3ForCausalLM` 10. `Qwen2ForCausalLM` #### āš ļø Architectures Still Pending Support: 1. `GPTBigCodeForCausalLM` 2. `InternVLChatModel` 3. `MistralForCausalLM` 4. `MixtralForCausalLM` 5. `LlamaSwiftKVForCausalLM` 6. `Grok1ModelForCausalLM` We’re actively working to extend support to these models. Contributions, feedback, and testing from the community are always welcome to help accelerate this effort! --------- Signed-off-by: quic-sanising Signed-off-by: sanising Signed-off-by: Dhiraj Kumar Sah Co-authored-by: sanising Co-authored-by: Dhiraj Kumar Sah Co-authored-by: Hem Agnihotri Signed-off-by: Dhiraj Kumar Sah --- QEfficient/transformers/models/pytorch_transforms.py | 10 +++++++++- 1 file changed, 9 insertions(+), 1 deletion(-) diff --git a/QEfficient/transformers/models/pytorch_transforms.py b/QEfficient/transformers/models/pytorch_transforms.py index eeb7bd6e6..23ab2ca5f 100644 --- a/QEfficient/transformers/models/pytorch_transforms.py +++ b/QEfficient/transformers/models/pytorch_transforms.py @@ -676,8 +676,16 @@ class SamplerTransform: # supported architectures _module_mapping = { - # Llama + QEffFalconForCausalLM, + QEffGemmaForCausalLM, + QEffGPT2LMHeadModel, + QEffGPTJForCausalLM, + QEffGraniteForCausalLM, + QEffGraniteMoeForCausalLM, QEffLlamaForCausalLM, + QEffMptForCausalLM, + QEffPhi3ForCausalLM, + QEffQwen2ForCausalLM, } @classmethod From 120698ff5f5208b8edc28d1a75435f6112be7cce Mon Sep 17 00:00:00 2001 From: Meet Patel Date: Mon, 3 Nov 2025 09:34:10 +0530 Subject: [PATCH 06/60] [QEff. Finetune]: Added fix for pad_to_max_length in tokenization. (#599) Signed-off-by: meetkuma Signed-off-by: Dhiraj Kumar Sah --- QEfficient/finetune/dataset/alpaca_dataset.py | 9 +++++++-- .../dataset/custom_dataset/sample_dataset_preproc.py | 9 +++++++-- QEfficient/finetune/dataset/grammar_dataset.py | 10 ++++++++-- 3 files changed, 22 insertions(+), 6 deletions(-) diff --git a/QEfficient/finetune/dataset/alpaca_dataset.py b/QEfficient/finetune/dataset/alpaca_dataset.py index c6ddb6ce1..ff44860eb 100644 --- a/QEfficient/finetune/dataset/alpaca_dataset.py +++ b/QEfficient/finetune/dataset/alpaca_dataset.py @@ -58,10 +58,15 @@ def __getitem__(self, index): else: prompt = PROMPT_DICT["prompt_input"].format_map(ann) example = prompt + ann["output"] + + if self.context_length is not None: + padding_type = "max_length" + else: + padding_type = True prompt = torch.tensor( - self.tokenizer.encode(prompt, max_length=self.context_length, pad_to_max_length=True), dtype=torch.int64 + self.tokenizer.encode(prompt, max_length=self.context_length, padding=padding_type), dtype=torch.int64 ) - example = self.tokenizer.encode(example, max_length=self.context_length, pad_to_max_length=True) + example = self.tokenizer.encode(example, max_length=self.context_length, padding=padding_type) example.append(self.tokenizer.eos_token_id) example = torch.tensor(example, dtype=torch.int64) labels = copy.deepcopy(example) diff --git a/QEfficient/finetune/dataset/custom_dataset/sample_dataset_preproc.py b/QEfficient/finetune/dataset/custom_dataset/sample_dataset_preproc.py index 78db5674c..383d6fd67 100644 --- a/QEfficient/finetune/dataset/custom_dataset/sample_dataset_preproc.py +++ b/QEfficient/finetune/dataset/custom_dataset/sample_dataset_preproc.py @@ -61,17 +61,22 @@ def apply_prompt_template(sample): dataset = dataset.map(apply_prompt_template, remove_columns=list(dataset.features)) def tokenize_add_label(sample): + if context_length is not None: + padding_type = "max_length" + else: + padding_type = True + input = tokenizer.encode( tokenizer.bos_token + sample["input"], add_special_tokens=False, max_length=context_length, - pad_to_max_length=True, + padding=padding_type, ) label = tokenizer.encode( sample["label"] + tokenizer.pad_token + tokenizer.eos_token, add_special_tokens=False, max_length=context_length, - pad_to_max_length=True, + padding=padding_type, ) sample = { diff --git a/QEfficient/finetune/dataset/grammar_dataset.py b/QEfficient/finetune/dataset/grammar_dataset.py index e40c01e97..8fb3eb152 100644 --- a/QEfficient/finetune/dataset/grammar_dataset.py +++ b/QEfficient/finetune/dataset/grammar_dataset.py @@ -44,17 +44,23 @@ def convert_to_features(self, example_batch): target_ = example_batch["target"] prompt = f"Correct this to standard English: {input_}\n---\nCorrected: " + + if self.context_length is not None: + padding_type = "max_length" + else: + padding_type = True + prompt_ids = self.tokenizer.encode( self.tokenizer.bos_token + prompt, add_special_tokens=False, max_length=self.context_length, - pad_to_max_length=True, + padding=padding_type, ) label_ids = self.tokenizer.encode( target_ + self.tokenizer.eos_token, add_special_tokens=False, max_length=self.context_length, - pad_to_max_length=True, + padding=padding_type, ) sample = { From e8cc0f769f0497217f60955713d7ba7718401f56 Mon Sep 17 00:00:00 2001 From: Mamta Singh <168400541+quic-mamta@users.noreply.github.com> Date: Tue, 4 Nov 2025 14:42:26 +0530 Subject: [PATCH 07/60] Enable CB for vlms with multiple images and multiple prompts (#583) Signed-off-by: Mamta Singh Signed-off-by: Rishin Raj Signed-off-by: Asmita Goswami Signed-off-by: Mohit Soni Signed-off-by: vbaddi Co-authored-by: Mamta Singh Co-authored-by: Asmita Goswami Co-authored-by: Rishin Raj Co-authored-by: Mohit Soni Co-authored-by: Vinayak Baddi Signed-off-by: Dhiraj Kumar Sah --- QEfficient/generation/cloud_infer.py | 16 +- QEfficient/generation/embedding_handler.py | 367 ++++++++ .../generation/text_generation_inference.py | 8 +- QEfficient/generation/vlm_generation.py | 784 ++++++++++++++++++ .../models/llama4/modeling_llama4.py | 98 ++- .../transformers/models/modeling_auto.py | 114 ++- .../models/qwen2_5_vl/modeling_qwen2_5_vl.py | 199 ++++- QEfficient/utils/__init__.py | 1 + QEfficient/utils/_utils.py | 30 + examples/llama4_CB_example_vision_lang.py | 93 +++ examples/qwen2_5_vl_CB.py | 72 ++ examples/qwen2_5_vl_example.py | 3 + 12 files changed, 1696 insertions(+), 89 deletions(-) create mode 100644 QEfficient/generation/embedding_handler.py create mode 100644 QEfficient/generation/vlm_generation.py create mode 100644 examples/llama4_CB_example_vision_lang.py create mode 100644 examples/qwen2_5_vl_CB.py diff --git a/QEfficient/generation/cloud_infer.py b/QEfficient/generation/cloud_infer.py index 8519d824c..5068c174e 100644 --- a/QEfficient/generation/cloud_infer.py +++ b/QEfficient/generation/cloud_infer.py @@ -90,8 +90,10 @@ def __init__( self.program = qaicrt.Program(self.context, None, qpc, prog_properties) if self.program.load() != qaicrt.QStatus.QS_SUCCESS: raise RuntimeError("Failed to load program") + self.is_active = False if activate: self.activate() + self.is_active = True # Create input qbuffers and buf_dims self.qbuffers = [qaicrt.QBuffer(bytes(binding.size)) for binding in self.bindings] self.buf_dims = qaicrt.BufferDimensionsVecRef( @@ -108,15 +110,17 @@ def output_names(self) -> List[str]: def activate(self): """Activate qpc""" - - self.program.activate() - self.execObj = qaicrt.ExecObj(self.context, self.program) + if not self.is_active: + self.program.activate() + self.execObj = qaicrt.ExecObj(self.context, self.program) + self.is_active = True def deactivate(self): """Deactivate qpc""" - - del self.execObj - self.program.deactivate() + if self.is_active: + del self.execObj + self.program.deactivate() + self.is_active = False def set_buffers(self, buffers: Dict[str, np.ndarray]): """ diff --git a/QEfficient/generation/embedding_handler.py b/QEfficient/generation/embedding_handler.py new file mode 100644 index 000000000..76da7afc2 --- /dev/null +++ b/QEfficient/generation/embedding_handler.py @@ -0,0 +1,367 @@ +# ----------------------------------------------------------------------------- +# +# Copyright (c) Qualcomm Technologies, Inc. and/or its subsidiaries. +# SPDX-License-Identifier: BSD-3-Clause +# +# ----------------------------------------------------------------------------- + +""" +Vision Handler for Vision-Language Models + +This module provides the VisionHandler class that encapsulates all vision model +operations, separating them from the main text generation logic. +""" + +from typing import Any, Dict, Optional, Tuple + +import numpy as np +import requests +import torch +from PIL import Image +from transformers import AutoImageProcessor + +from QEfficient.generation.cloud_infer import QAICInferenceSession +from QEfficient.utils.logging_utils import logger + + +class VisionHandler: + """ + Handles all vision model operations for vision-language models. + + This class encapsulates vision preprocessing, inference, and output handling, + providing a clean separation between vision and language processing. + """ + + def __init__( + self, + qeff_model: Optional[QAICInferenceSession], + vision_session: Optional[QAICInferenceSession], + processor: Optional[AutoImageProcessor], + config: Optional[Dict[str, Any]] = None, + lang_session: Optional[QAICInferenceSession] = None, + ): + """ + Initialize vision handler + + Args: + vision_session: QAICInferenceSession for vision model + processor: AutoImageProcessor for image preprocessing + config: Configuration dictionary with vision model parameters + lang_session: Optional language session for coordination (to avoid resource conflicts) + """ + self._qeff_model = qeff_model + self._vision_session = vision_session + self._processor = processor + self._config = config or {} + self._lang_session = lang_session # Store language session for coordination + + # Cache for vision output shapes + self._vision_output_shapes = None + + if self._vision_session and not self._processor: + logger.warning("Vision session provided but no processor. Vision functionality may be limited.") + + def is_available(self) -> bool: + """ + Check if vision processing is available + + Returns: + True if both vision session and processor are available + """ + return self._vision_session is not None and self._processor is not None + + def prepare_vlm_inputs(self, image_url: str, query: str, prefill_seq_len: int) -> Dict[str, np.ndarray]: + """ + Download and preprocess image into model inputs + + Args: + image_url: URL or path to image + query: Text query to process with image + + Returns: + Dictionary of vision model inputs + + Raises: + ValueError: If vision handler is not properly initialized + RuntimeError: If image processing fails + """ + if not self.is_available(): + raise ValueError("Vision handler not properly initialized. Need both vision_session and processor.") + + try: + # Download image + if image_url.startswith(("http://", "https://")): + image = Image.open(requests.get(image_url, stream=True).raw) + else: + image = Image.open(image_url) + + # Prepare conversation format + conversation = [ + { + "role": "user", + "content": [ + {"type": "text", "text": query}, + {"type": "image"}, + ], + }, + ] + + # Apply chat template + prompt = self._processor.apply_chat_template(conversation, add_generation_prompt=True) + + # Process image and text + inputs = self._processor(images=image, text=prompt, return_tensors="pt") + + if ( + hasattr(self._qeff_model.model.config, "model_type") + and self._qeff_model.model.config.model_type == "qwen2_5_vl" + ): + inputs = self._qeff_model.model.prepare_inputs_for_generation( + inputs=inputs, prefill_seq_len=prefill_seq_len, batch_size=inputs["input_ids"].shape[0] + ) + + # Convert to float32 if needed + if "pixel_values" in inputs: + inputs["pixel_values"] = inputs["pixel_values"].to(torch.float32) + + # Convert to numpy arrays + vision_inputs = {} + for k, v in inputs.items(): + if k in { + "pixel_values", + "image_masks", + "image_input_idx", + "valid_idx", + "aspect_ratio_ids", + "aspect_ratio_mask", + }: + vision_inputs[k] = np.array(v) + + # Convert specific inputs to float16 + vision_inputs_fp16 = {"pixel_values", "image_masks"} + for k in vision_inputs_fp16: + if k in vision_inputs: + vision_inputs[k] = vision_inputs[k].astype("float16") + + lang_inputs = {k: v for k, v in inputs.items() if k not in vision_inputs} + + return vision_inputs, lang_inputs + + except Exception as e: + raise RuntimeError(f"Failed to process image {image_url}: {str(e)}") + + def run_vision_inference(self, vision_inputs: Dict[str, np.ndarray]) -> Dict[str, np.ndarray]: + """ + Execute vision model inference with session coordination + + Args: + vision_inputs: Preprocessed vision inputs + + Returns: + Vision embeddings and metadata + + Raises: + ValueError: If vision session is not available + RuntimeError: If inference fails + """ + if not self._vision_session: + raise ValueError("Vision session not available") + + lang_was_active = False + try: + # Coordinate with language session to avoid resource conflicts + if self._lang_session and self._lang_session.is_active: + logger.debug("Deactivating language session before vision inference") + self._lang_session.deactivate() + lang_was_active = True + + # Activate vision session + logger.debug("Activating vision session for inference") + self._vision_session.activate() + + # Run inference + vision_outputs = self._vision_session.run(vision_inputs) + + # Deactivate vision session + logger.debug("Deactivating vision session after inference") + self._vision_session.deactivate() + + # Reactivate language session if it was active before + if lang_was_active and self._lang_session: + logger.debug("Reactivating language session after vision inference") + self._lang_session.activate() + + return vision_outputs + + except Exception as e: + # Ensure proper cleanup on error + if self._vision_session: + try: + self._vision_session.deactivate() + except Exception: + logger.warning("Deactivating vision session failed") + + # Restore language session if needed + if lang_was_active and self._lang_session: + try: + self._lang_session.activate() + except Exception: + logger.warning("Deactivating language session failed") + + raise RuntimeError(f"Vision inference failed: {str(e)}") + + def get_vision_output_shapes(self) -> Dict[str, Tuple[int, ...]]: + """ + Get vision output dimensions from config or session + + Returns: + Dictionary mapping output names to shapes + """ + if self._vision_output_shapes is not None: + return self._vision_output_shapes + + # Try to get from config first + if self._config and "vision_output_shapes" in self._config: + self._vision_output_shapes = self._config["vision_output_shapes"] + return self._vision_output_shapes + + # Try to derive from vision session + if self._vision_session: + try: + shapes = {} + for output_name in self._vision_session.output_names: + if ( + hasattr(self._vision_session, "bindings") + and output_name in self._vision_session.binding_index_map + ): + binding_idx = self._vision_session.binding_index_map[output_name] + if hasattr(self._vision_session.bindings[binding_idx], "dims"): + shapes[output_name] = tuple(self._vision_session.bindings[binding_idx].dims) + + if shapes: + self._vision_output_shapes = shapes + return shapes + except Exception as e: + logger.warning(f"Could not derive vision output shapes from session: {e}") + + # Fallback to default shapes (these were hard-coded in original implementation) + default_shapes = { + "vision_embeds": (2448, 5120) # This should be derived from model config + } + + logger.warning("Using default vision output shapes. Consider providing shapes in config.") + self._vision_output_shapes = default_shapes + return default_shapes + + def setup_vision_buffers(self): + """ + Configure vision model output buffers + + Raises: + ValueError: If vision session is not available + """ + if not self._vision_session: + raise ValueError("Vision session not available") + + try: + shapes = self.get_vision_output_shapes() + + # Set up output buffers + buffers = {} + for output_name, shape in shapes.items(): + # Create placeholder with appropriate dtype + if "vision_embeds" in output_name: + buffers[output_name] = np.zeros(shape, dtype=np.float16) + else: + buffers[output_name] = np.zeros(shape, dtype=np.float32) + + self._vision_session.set_buffers(buffers) + + except Exception as e: + raise RuntimeError(f"Failed to setup vision buffers: {str(e)}") + + def prepare_complete_vision_language_inputs( + self, image_url: str, query: str + ) -> Tuple[Dict[str, np.ndarray], Dict[str, np.ndarray]]: + """ + Complete pipeline: prepare inputs and run vision inference + + Args: + image_url: URL or path to image + query: Text query + + Returns: + Tuple of (vision_inputs, vision_outputs) + """ + # Prepare vision inputs + vision_inputs = self.prepare_vision_inputs(image_url, query) + + # Setup buffers + self.setup_vision_buffers() + + # Run vision inference + vision_outputs = self.run_vision_inference(vision_inputs) + + return vision_inputs, vision_outputs + + def get_processed_inputs( + self, image_url: str, query: str, prefill_seq_len: int + ) -> Tuple[Dict[str, np.ndarray], Dict[str, np.ndarray]]: + """ + Process vision inputs and prepare language model inputs + + Args: + image_url: URL or path to image + query: Text query + padded_len: Padded sequence length for language model + + Returns: + Tuple of (language_inputs, vision_outputs) + """ + if not self.is_available(): + raise ValueError("Vision handler not properly initialized") + + try: + ## Get vlm inputs ## + vision_inputs, lang_inputs = self.prepare_vlm_inputs(image_url, query, prefill_seq_len) + + # Handle padding for language model + pad_token_id = 1 + input_ids_length = lang_inputs["input_ids"].shape[1] + num_chunks = -(input_ids_length // -prefill_seq_len) + padded_len = num_chunks * prefill_seq_len + + lang_inputs["input_ids"] = torch.nn.functional.pad( + lang_inputs["input_ids"], + (0, padded_len - input_ids_length), + "constant", + pad_token_id, + ) + lang_inputs["attention_mask"] = torch.nn.functional.pad( + lang_inputs["attention_mask"], (0, padded_len - input_ids_length), "constant", 0 + ) + + if "cross_attention_mask" in lang_inputs: + lang_inputs["cross_attention_mask"] = torch.nn.functional.pad( + lang_inputs["cross_attention_mask"], (0, 0, 0, 0, 0, padded_len - input_ids_length) + ) + + for k, v in lang_inputs.items(): + lang_inputs[k] = np.array(v) + + vision_outputs = {} + if vision_inputs: + self.setup_vision_buffers() + vision_outputs = self.run_vision_inference(vision_inputs) + + if "position_ids" in lang_inputs: + lang_inputs.pop("attention_mask") + else: + lang_inputs["position_ids"] = np.where(lang_inputs.pop("attention_mask"), np.arange(padded_len), -1) + + lang_inputs["image_idx"] = np.array([[0]]) + + return lang_inputs, vision_outputs, num_chunks + + except Exception as e: + raise RuntimeError(f"Failed to process vision-language inputs: {str(e)}") diff --git a/QEfficient/generation/text_generation_inference.py b/QEfficient/generation/text_generation_inference.py index 6d04cf573..e96908824 100755 --- a/QEfficient/generation/text_generation_inference.py +++ b/QEfficient/generation/text_generation_inference.py @@ -437,15 +437,19 @@ def __init__( include_sampler: bool = False, return_pdfs: bool = False, sampling_params: Optional[Dict[str, Any]] = None, + activate: bool = True, ) -> None: self._ctx_len = ctx_len self._write_io_dir = write_io_dir self.is_tlm = is_tlm self.return_pdfs = return_pdfs self.sampling_params = sampling_params + self._qpc_path = qpc_path # Store qpc_path for later use # Load QPC - self._session = QAICInferenceSession(qpc_path, device_id, enable_debug_logs=enable_debug_logs) + self._session = QAICInferenceSession( + qpc_path, device_id, activate=activate, enable_debug_logs=enable_debug_logs + ) # Validate sampler inputs for On-Device Sampling self.include_sampler = validate_sampler_inputs( @@ -778,6 +782,7 @@ def run_prefill(self, prompt, generation_len, prefill_logit_bs=1, decode_batch_i if decode_batch_id is not None: inputs["batch_index"] = decode_batch_id + if self.is_tlm: inputs["num_logits_to_keep"] = np.zeros((1, 1)) if self.include_sampler: @@ -808,6 +813,7 @@ def run_prefill(self, prompt, generation_len, prefill_logit_bs=1, decode_batch_i if self.include_sampler: chunk_inputs["last_accepted_output_tokens"] = chunk_inputs["input_ids"] outputs = self._session.run(chunk_inputs) + if self._write_io_dir is not None: write_io_files(inputs, outputs, self._write_io_dir, "prefill", "aic_batch_io", True, False) return ( diff --git a/QEfficient/generation/vlm_generation.py b/QEfficient/generation/vlm_generation.py new file mode 100644 index 000000000..2e8f04f2b --- /dev/null +++ b/QEfficient/generation/vlm_generation.py @@ -0,0 +1,784 @@ +# ----------------------------------------------------------------------------- +# +# Copyright (c) Qualcomm Technologies, Inc. and/or its subsidiaries. +# SPDX-License-Identifier: BSD-3-Clause +# +# ----------------------------------------------------------------------------- + +""" +This module provides the VisionLanguageGeneration class that inherits from +QEffTextGenerationBase, enabling all advanced text generation features while +maintaining full API compatibility with the original VisionLanguageGeneration. + +Key enhancements: +- Continuous batching support for vision models +- Advanced streaming capabilities +- On-device sampling support +- LoRA adapter support +- Better performance metrics +""" + +from collections import deque +from time import perf_counter +from typing import Any, Dict, List, Optional, Union + +import numpy as np +from transformers import AutoImageProcessor, PreTrainedTokenizer, PreTrainedTokenizerFast + +from QEfficient.generation.cloud_infer import QAICInferenceSession +from QEfficient.generation.embedding_handler import VisionHandler +from QEfficient.generation.text_generation_inference import ( + CloudAI100ExecInfo, + PerfMetrics, + QEffTextGenerationBase, + TextGeneration, + calculate_latency, + write_io_files, +) +from QEfficient.utils import LRUCache +from QEfficient.utils.logging_utils import logger + + +class VisionLanguageGeneration(QEffTextGenerationBase): + """ + Enhanced vision-language generation class inheriting from QEffTextGenerationBase. + + This class maintains full API compatibility with VisionLanguageGeneration while + adding advanced features like continuous batching, streaming, and sampling. + + Example: + >>> # Drop-in replacement for VisionLanguageGeneration + >>> vlm = VisionLanguageGeneration( + ... tokenizer=tokenizer, + ... processor=processor, + ... lang_qpc_path="path/to/lang.qpc", + ... vision_qpc_path="path/to/vision.qpc", + ... device_id=[0] + ... ) + >>> result = vlm.generate( + ... images=["image1.jpg"], + ... prompts=["Describe this image"], + ... generation_len=512 + ... ) + + >>> # Enhanced usage with new features + >>> vlm_enhanced = VisionLanguageGeneration( + ... tokenizer=tokenizer, + ... processor=processor, + ... lang_qpc_path="path/to/lang.qpc", + ... vision_qpc_path="path/to/vision.qpc", + ... device_id=[0], + ... full_batch_size=8, # Enable continuous batching + ... include_sampler=True, # Enable on-device sampling + ... sampling_params=sampling_config + ... ) + """ + + def __init__( + self, + qeff_model, + tokenizer: Union[PreTrainedTokenizer, PreTrainedTokenizerFast], + processor: AutoImageProcessor, + lang_qpc_path: str, + vision_qpc_path: str, + device_id: Optional[List[int]] = None, + ctx_len: Optional[int] = None, + enable_debug_logs: bool = False, + write_io_dir: Optional[str] = None, + full_batch_size: Optional[int] = None, + is_tlm: bool = False, + include_sampler: bool = False, + return_pdfs: bool = False, + sampling_params: Optional[Dict[str, Any]] = None, + ): + """ + Initialize vision-language generation with enhanced capabilities + + Args: + qeff_model: QEff model instance + tokenizer: Text tokenizer + processor: Image processor + lang_qpc_path: Path to language model QPC + vision_qpc_path: Path to vision encoder QPC + device_id: Device IDs for execution (default: [0]) + ctx_len: Context length + enable_debug_logs: Enable debug logging + write_io_dir: Directory for I/O file writing + full_batch_size: Enable continuous batching (new feature) + is_tlm: Target language model flag + include_sampler: Enable on-device sampling (new feature) + return_pdfs: Return probability distributions + sampling_params: Sampling parameters for on-device sampling + """ + # Validate required parameters + if not lang_qpc_path: + raise TypeError("lang_qpc_path is required") + if not vision_qpc_path: + raise TypeError("vision_qpc_path is required") + + # Initialize base class with language QPC + # Pass activate=False to prevent premature activation before vision components are ready + super().__init__( + tokenizer=tokenizer, + qpc_path=lang_qpc_path, + full_batch_size=full_batch_size, + ctx_len=ctx_len, + device_id=device_id, + enable_debug_logs=enable_debug_logs, + write_io_dir=write_io_dir, + is_tlm=is_tlm, + include_sampler=include_sampler, + return_pdfs=return_pdfs, + sampling_params=sampling_params, + activate=False, # vision components need to be initialized first + ) + + # Vision-specific initialization + self.is_qwen2_5_vl = ( + hasattr(qeff_model.model.config, "model_type") and qeff_model.model.config.model_type == "qwen2_5_vl" + ) + self.qeff_model = qeff_model + self.processor = processor + self._vision_qpc_path = vision_qpc_path + self.device_id = device_id # Store device_id for vision components + self.enable_debug_logs = enable_debug_logs # Store for vision components + self._vision_outputs_cache = LRUCache(max_size=100) # LRU cache for vision outputs + self._vision_cache = {} # Cache for vision outputs across batches + self._init_vision_components() + + # Now that vision components are initialized, activate the text session + self._session.activate() + + logger.info( + f"VisionLanguageGeneration initialized: batch_size={self.batch_size}, " + f"prefill_seq_len={self._prefill_seq_len}, ctx_len={ctx_len}, " + f"continuous_batching={'enabled' if full_batch_size else 'disabled'}, " + f"sampling={'enabled' if include_sampler else 'disabled'}" + ) + + def _init_vision_components(self): + """Initialize vision-specific components""" + # Vision session (separate from base class language session) + self._vision_session = QAICInferenceSession( + self._vision_qpc_path, self.device_id, activate=False, enable_debug_logs=self.enable_debug_logs + ) + + # Vision handler with language session coordination + vision_config = self._get_vision_config() + self._vision_handler = VisionHandler( + qeff_model=self.qeff_model, + vision_session=self._vision_session, + processor=self.processor, + config=vision_config, + lang_session=self._session, # Pass language session for coordination + ) + + # Setup vision buffer skipping + self._setup_vision_buffer_skipping() + + def _get_vision_config(self) -> Dict[str, Any]: + """ + Derive vision config from session + + Returns: + Dictionary with vision configuration + """ + config = {} + if self._vision_session: + try: + shapes = {} + for output_name in self._vision_session.output_names: + if ( + hasattr(self._vision_session, "bindings") + and output_name in self._vision_session.binding_index_map + ): + binding_idx = self._vision_session.binding_index_map[output_name] + if hasattr(self._vision_session.bindings[binding_idx], "dims"): + shapes[output_name] = tuple(self._vision_session.bindings[binding_idx].dims) + + if shapes: + config["vision_output_shapes"] = shapes + except Exception as e: + logger.warning(f"Could not derive vision config from session: {e}") + + return config + + def _setup_vision_buffer_skipping(self): + """Skip KV cache and retained state buffers for vision session""" + # Pre-compute skip buffers + self._vision_skip_buffers = [ + x + for x in self._vision_session.input_names + self._vision_session.output_names + if x.startswith("past_") or x.endswith("_RetainedState") + ] + self._vision_session.skip_buffers(self._vision_skip_buffers) + + # Pre-compute language skip buffers + self._lang_skip_buffers = [ + x + for x in self._session.input_names + self._session.output_names + if x.startswith("past_") or x.endswith("_RetainedState") + ] + + def run_prefill_for_all_inputs(self, prompt_queue, generation_len): + """ + Runs prefill for all inputs in the prompt queue and updates the decode input. + + Method iterates over the full batch size and for each decode batch ID, it pops the next prompt from the queue. It then runs prefill for the next prompt and updates the decode input with the outputs. + + Args: + prompt_queue (deque): The queue of prompts. + generation_len (int): The generation length. + + """ + for decode_batch_id in range(self.full_batch_size): + next_prompt = prompt_queue.popleft() + + # run prefill for num_chunks + outputs, position_ids, generation_len = self.run_prefill( + next_prompt, generation_len, decode_batch_id=np.array(decode_batch_id, dtype=np.int64).reshape(1, 1) + ) + + if self.is_qwen2_5_vl: + _ = self.update_decode_inputs_qwen2_5_vl(outputs, position_ids, generation_len, decode_batch_id) + else: + _ = self.update_decode_input(outputs, position_ids, generation_len, decode_batch_id) + + def update_decode_inputs_qwen2_5_vl(self, outputs, position_ids, generation_len, decode_batch_id=None): + """ + Updates the decode input with the generated values. + Args: + outputs (dict): The outputs of the model. + position_ids (array): The position IDs. + generation_len (int): The generation length. + decode_batch_id (int, optional): The decode batch ID. If None, all values are updated. Defaults to None. + + Returns: + next_token_id (array): The next token ID. + """ + next_token_id = self._fetch_next_token_id(outputs) + + # Store the generated values. + self.decode_input_ids[decode_batch_id or slice(None)] = next_token_id + self.decode_pos_ids[:, decode_batch_id] = position_ids.squeeze(1) + self.generated_ids[decode_batch_id or slice(None), 0] = next_token_id.squeeze(1) + self.generation_len[decode_batch_id or slice(None)] = generation_len + return next_token_id + + def _execute_chunked_prefill( + self, + lang_inputs: Dict[str, np.ndarray], + num_chunks: int, + decode_batch_id: Optional[np.ndarray] = None, + prefill_logit_bs: int = 1, + ) -> Dict[str, np.ndarray]: + """ + Execute chunked prefill with language inputs + + Args: + lang_inputs: Pre-processed language inputs with input_ids, position_ids, etc. + num_chunks: Number of chunks to process + decode_batch_id: Batch ID for continuous batching (optional) + prefill_logit_bs: Batch size for prefill logits + + Returns: + Final prefill outputs + """ + # Set output buffers + self._set_output_buffers(batch_size=prefill_logit_bs, sequence_length=1) + + # Skip buffers for dual-QPC coordination + self._session.skip_buffers(self._lang_skip_buffers) + + # Run chunked prefill + outputs = None + chunk_image_idx = None + + for i in range(num_chunks): + input_ids_slice = lang_inputs["input_ids"][:, i * self._prefill_seq_len : (i + 1) * self._prefill_seq_len] + position_ids_slice = lang_inputs["position_ids"][ + ..., i * self._prefill_seq_len : (i + 1) * self._prefill_seq_len + ] + + chunk_inputs = { + "input_ids": input_ids_slice, + "position_ids": position_ids_slice, + "image_idx": chunk_image_idx if chunk_image_idx is not None else np.array([[0]], dtype=np.int64), + } + + if decode_batch_id is not None: + chunk_inputs["batch_index"] = decode_batch_id + + if "cross_attention_mask" in lang_inputs: + chunk_inputs["cross_attention_mask"] = lang_inputs["cross_attention_mask"] + + outputs = self._session.run(chunk_inputs) + + if "image_idx_output" in outputs: + chunk_image_idx = outputs["image_idx_output"] + + if self._write_io_dir is not None: + write_io_files(lang_inputs, outputs, self._write_io_dir, "prefill", "aic_batch_io", True, False) + + # Prepare decode-time cross_attention_mask + if "cross_attention_mask" in lang_inputs: + bs, _, num_images, img_tiles = lang_inputs["cross_attention_mask"].shape + self._decode_cross_attention_mask = np.ones((bs, 1, num_images, img_tiles), dtype=np.int64) + else: + self._decode_cross_attention_mask = None + + return outputs + + def run_prefill(self, prompt, generation_len, prefill_logit_bs=1, decode_batch_id=None): + """ + Override base class prefill to handle vision processing + + Args: + prompt: Can be string or tuple (image_path, text_prompt) + generation_len: Generation length + prefill_logit_bs: Prefill batch size + decode_batch_id: Batch ID for continuous batching + + Returns: + Same as base class: (outputs, position_ids, generation_len) + """ + # Normalize prompt: TextGeneration passes a list even for batch_size=1 + if isinstance(prompt, list) and len(prompt) > 0 and isinstance(prompt[0], tuple) and len(prompt[0]) == 2: + # Unwrap single (image_path, text_prompt) tuple + if len(prompt) == 1: + prompt = prompt[0] + else: + raise NotImplementedError( + "VisionLanguageGeneration.run_prefill currently supports a single (image, text) pair per call." + ) + # Check if this is a vision-language prompt + if isinstance(prompt, tuple) and len(prompt) == 2: + image_path, text_prompt = prompt + + # Check cache for vision outputs + cache_key = image_path if isinstance(image_path, str) else str(image_path) + if cache_key in self._vision_cache: + lang_inputs, vision_outputs, num_chunks = self._vision_cache[cache_key] + logger.debug(f"Using cached vision outputs for {cache_key}") + else: + # Build language inputs with processor-aware vision/text integration + lang_inputs, vision_outputs, num_chunks = self._vision_handler.get_processed_inputs( + image_url=image_path, query=text_prompt, prefill_seq_len=self._prefill_seq_len + ) + # Cache for future use + self._vision_cache[cache_key] = (lang_inputs, vision_outputs, num_chunks) + logger.debug(f"Cached vision outputs for {cache_key}") + + # Set vision buffers in language session + self._session.set_buffers(vision_outputs) + logger.debug(f"Vision buffers set: {list(vision_outputs.keys())}") + self._vision_processed = True + self._vision_outputs = vision_outputs + + # Calculate generation_len consistent with ctx_len + max_gen_len = self._ctx_len - np.where(lang_inputs["position_ids"] != -1, 1, 0).sum(1, keepdims=True).max() + generation_len = self._fetch_generation_len(generation_len, max_gen_len) + + # Execute chunked prefill + outputs = self._execute_chunked_prefill(lang_inputs, num_chunks, decode_batch_id, prefill_logit_bs) + + self._session.skip_buffers(vision_outputs) + + # Prepare position_ids for decode phase (next position after prefill) + position_ids_decode = np.max(lang_inputs["position_ids"], axis=-1, keepdims=True) + 1 + + return outputs, position_ids_decode, generation_len + else: + # Fall back to base class for text-only + return super().run_prefill(prompt, generation_len, prefill_logit_bs, decode_batch_id) + + def _prepare_vision_language_prompt(self, text_prompt, image_path): + """ + Prepare text prompt with vision context + + This method handles the integration of vision and text inputs + according to the specific model's requirements. + """ + # For most vision-language models, we need to apply the chat template + # that includes both image and text components + try: + conversation = [ + { + "role": "user", + "content": [ + {"type": "text", "text": text_prompt}, + {"type": "image"}, + ], + }, + ] + + # Apply chat template + processed_prompt = self.processor.apply_chat_template(conversation, add_generation_prompt=True) + + return processed_prompt + + except Exception as e: + logger.warning(f"Failed to apply chat template: {e}. Using original prompt.") + return text_prompt + + def generate( + self, images: List[str], prompts: List[str], generation_len: Optional[int] = None, stream: bool = True, **kwargs + ) -> CloudAI100ExecInfo: + """ + Main generation method maintaining API compatibility with VisionLanguageGeneration + + Args: + images: List of image URLs/paths + prompts: List of text prompts + generation_len: Max generation length + stream: Enable streaming output + **kwargs: Additional arguments passed to base class + + Returns: + CloudAI100ExecInfo with results and metrics + + Raises: + ValueError: If images and prompts lengths don't match + """ + if len(images) != len(prompts): + raise ValueError(f"Number of images ({len(images)}) must match number of prompts ({len(prompts)})") + + # Clear vision cache for fresh generation + self._vision_cache.clear() + + logger.info(f"Generating for {len(images)} image-prompt pairs") + + # Convert to base class format: list of (image, prompt) tuples + vision_prompts = [(img, prompt) for img, prompt in zip(images, prompts)] + + # Use base class generate method with vision prompts + if self.full_batch_size is not None: + # Continuous batching mode (new capability) + return self._generate_continuous_batching(vision_prompts, generation_len, stream, **kwargs) + else: + # Regular batching mode + return self._generate_regular_batching(vision_prompts, generation_len, stream, **kwargs) + + def _generate_regular_batching(self, vision_prompts, generation_len, stream, **kwargs): + """Handle regular batching for vision-language generation without creating a second language session""" + batch_results = [] + for i in range(0, len(vision_prompts), self.batch_size): + batch = vision_prompts[i : i + self.batch_size] + + if stream: + print( + f"\nProcessing batch {i // self.batch_size + 1}/{(len(vision_prompts) - 1) // self.batch_size + 1}" + ) + for j, (img, prompt) in enumerate(batch): + print(f"Image: {img}") + print(f"Prompt: {prompt}") + print("Completion:", flush=True, end="") + + # Setup decode storage arrays for this batch (use ctx_len or generation_len whichever is larger) + exec_batch_size = self.batch_size + max_gen_length = self._ctx_len if not generation_len else max(self._ctx_len, generation_len) + self.initialize_decode_inputs( + num_prompts=len(batch), execution_batch_size=exec_batch_size, max_gen_length=max_gen_length + ) + + # Prefill using VLM-aware run_prefill (batch is a list of (image, text)) + start = perf_counter() + outputs, position_ids, generation_len_final = self.run_prefill( + batch, generation_len, prefill_logit_bs=self.batch_size + ) + self.update_decode_input(outputs, position_ids, generation_len_final) + + # Prepare decode + decode_inputs = self.prepare_decode_inputs() + + # Decode loop + loop_start = perf_counter() + num_token = self.run_decode(decode_inputs, generation_len_final, automation=False, streamer=None) + end = perf_counter() + + # Decode generated texts + generated_texts = self.tokenizer.batch_decode(self.generated_ids, skip_special_tokens=True) + + # Latency metrics + total_decode_tokens = num_token + prefill_time, decode_perf, total_perf, total_time = calculate_latency( + total_decode_tokens, loop_start, start, end + ) + perf_metrics = PerfMetrics(prefill_time, decode_perf, total_perf, total_time) + + # Package result for this batch + batch_results.append( + CloudAI100ExecInfo( + batch_size=self.batch_size, + generated_texts=generated_texts, + generated_ids=self.generated_ids, + perf_metrics=perf_metrics, + ) + ) + + # Aggregate results across batches + return self._aggregate_batch_results(batch_results) + + def _generate_continuous_batching(self, vision_prompts, generation_len, stream, **kwargs): + """Enable continuous batching for vision-language models (new capability)""" + logger.info("Using continuous batching for vision-language generation") + + if stream: + logger.warning("Streaming output not fully supported with continuous batching") + + # Reset vision processing state for new generation + self._vision_processed = False + self._vision_outputs = None + self._vision_outputs_cache = {} + + # Initialize decode inputs + num_prompts = len(vision_prompts) + execution_batch_size = self.full_batch_size + max_gen_length = self._ctx_len if not generation_len else max(self._ctx_len, generation_len) + + self.initialize_decode_inputs(num_prompts, execution_batch_size, max_gen_length) + if self.is_qwen2_5_vl: + self.decode_pos_ids = np.zeros((4, execution_batch_size, 1), np.int64) + + # Create prompt queue + prompt_queue = deque(vision_prompts) + + start = perf_counter() + + # Pre-process ALL vision inputs and cache them + logger.info("Pre-processing all vision inputs...") + for batch_id in range(min(self.full_batch_size, len(vision_prompts))): + img, prompt = vision_prompts[batch_id] + + # Process vision for this slot + lang_inputs, vision_outputs, num_chunks = self._vision_handler.get_processed_inputs( + image_url=img, query=prompt, prefill_seq_len=self._prefill_seq_len + ) + + # Cache vision outputs for this batch slot + self._vision_outputs_cache[batch_id] = { + "vision_outputs": vision_outputs, + "lang_inputs": lang_inputs, + "num_chunks": num_chunks, + } + + logger.debug(f"Cached vision outputs for batch_id {batch_id}") + + # Reset prompt queue for prefill + prompt_queue = deque(vision_prompts) + + self.batch_index = None + + # Run prefill for all inputs using cached vision + self.run_prefill_for_all_inputs_with_cached_vision(prompt_queue, generation_len) + + # Set vision buffers for decode (use first slot's vision for now) + # For identical images, any slot's vision works + cached_slot_0 = self._vision_outputs_cache.get(0) + if cached_slot_0: + self._session.set_buffers(cached_slot_0["vision_outputs"]) + logger.debug("Set vision buffers from slot 0 for decode phase") + + # Now set batch_index for decode phase + self.batch_index = np.arange(self.full_batch_size).reshape(-1, 1) + + loop_start = perf_counter() + decode_pause_time = self.run_continuous_batching_decode(prompt_queue, generation_len) + end = perf_counter() + + generated_texts = self.tokenizer.batch_decode(self.generated_ids, skip_special_tokens=True) + + total_decode_tokens = sum( + np.sum(self.generated_ids[i] != self.tokenizer.pad_token_id) - 1 for i in range(len(vision_prompts)) + ) + prefill_time, decode_perf, total_perf, total_time = calculate_latency( + total_decode_tokens, loop_start, start, end, decode_pause_time + ) + prefill_time /= len(vision_prompts) # Average prefill time for continuous batching + + perf_metrics = PerfMetrics(prefill_time, decode_perf, total_perf, total_time) + + return CloudAI100ExecInfo( + batch_size=1, generated_texts=generated_texts, generated_ids=self.generated_ids, perf_metrics=perf_metrics + ) + + def run_prefill_for_all_inputs_with_cached_vision(self, prompt_queue, generation_len): + """ + Runs prefill for all inputs using pre-cached vision outputs. + + This avoids the vision buffer overwriting issue by using cached vision + outputs instead of processing vision during each prefill iteration. + + Args: + prompt_queue (deque): The queue of prompts. + generation_len (int): The generation length. + """ + for decode_batch_id in range(self.full_batch_size): + # Pop the promt as we are processing + _ = prompt_queue.popleft() + + # Get cached vision outputs for this batch slot + cached = self._vision_outputs_cache.get(decode_batch_id) + if cached: + vision_outputs = cached["vision_outputs"] + lang_inputs = cached["lang_inputs"] + num_chunks = cached["num_chunks"] + + # Set vision buffers for THIS prefill + self._session.set_buffers(vision_outputs) + logger.debug(f"Set vision buffers for batch_id {decode_batch_id} prefill") + + # Run prefill with cached inputs + outputs = self._execute_chunked_prefill( + lang_inputs, + num_chunks, + decode_batch_id=np.array(decode_batch_id, dtype=np.int64).reshape(1, 1), + prefill_logit_bs=1, + ) + + self._session.skip_buffers(vision_outputs.keys()) + + # Calculate position_ids for decode + position_ids_decode = np.max(lang_inputs["position_ids"], axis=-1, keepdims=True) + 1 + + # Calculate generation_len + max_gen_len = ( + self._ctx_len - np.where(lang_inputs["position_ids"] != -1, 1, 0).sum(1, keepdims=True).max() + ) + generation_len_final = self._fetch_generation_len(generation_len, max_gen_len) + + # Update decode inputs + if self.is_qwen2_5_vl: + self.update_decode_inputs_qwen2_5_vl( + outputs, position_ids_decode, generation_len_final, decode_batch_id + ) + else: + self.update_decode_input(outputs, position_ids_decode, generation_len_final, decode_batch_id) + else: + logger.error(f"No cached vision outputs for batch_id {decode_batch_id}") + raise RuntimeError(f"Vision outputs not cached for batch_id {decode_batch_id}") + + def prepare_decode_inputs(self): + """ + Override base class to handle vision-specific decode inputs + """ + decode_inputs = super().prepare_decode_inputs() + + # Add image_idx for vision-language models in CB mode during decode only + if self.batch_index is not None and hasattr(self, "_vision_outputs"): + # image_idx should be a single slot selector; decoder expects shape (1,1) + # Query binding dims if available to be robust + try: + if "image_idx" in getattr(self._session, "binding_index_map", {}): + idx = self._session.binding_index_map["image_idx"] + dims = tuple(self._session.bindings[idx].dims) + decode_inputs["image_idx"] = np.zeros(dims, dtype=np.int64) + else: + decode_inputs["image_idx"] = np.array([[0]], dtype=np.int64) + except Exception: + decode_inputs["image_idx"] = np.array([[0]], dtype=np.int64) + + # Include cross_attention_mask during decode if present/required + if hasattr(self, "_decode_cross_attention_mask") and self._decode_cross_attention_mask is not None: + # Decoder specialization expects a single mask (batch dim = 1) + decode_inputs["cross_attention_mask"] = self._decode_cross_attention_mask + + return decode_inputs + + def _aggregate_batch_results(self, batch_results): + """Aggregate results from multiple batches""" + if not batch_results: + raise ValueError("No batch results to aggregate") + + if len(batch_results) == 1: + return batch_results[0] + + # Aggregate multiple batch results + all_generated_texts = [] + all_generated_ids = [] + all_metrics = [] + + for result in batch_results: + if isinstance(result.generated_texts[0], list): + # Flatten nested lists + all_generated_texts.extend([text for batch in result.generated_texts for text in batch]) + else: + all_generated_texts.extend(result.generated_texts) + + if isinstance(result.generated_ids, list): + all_generated_ids.extend(result.generated_ids) + else: + all_generated_ids.append(result.generated_ids) + + all_metrics.append(result.perf_metrics) + + # Average metrics + avg_metrics = PerfMetrics( + prefill_time=np.mean([m.prefill_time for m in all_metrics]), + decode_perf=np.mean([m.decode_perf for m in all_metrics]), + total_perf=np.mean([m.total_perf for m in all_metrics]), + total_time=np.mean([m.total_time for m in all_metrics]), + ) + + return CloudAI100ExecInfo( + batch_size=batch_results[0].batch_size, + generated_texts=all_generated_texts, + generated_ids=all_generated_ids, + perf_metrics=avg_metrics, + ) + + def generate_stream_tokens( + self, images: List[str], prompts: List[str], generation_len: Optional[int] = None, **kwargs + ): + """ + Enable token-by-token streaming for vision models (new capability) + + Args: + images: List of image URLs/paths + prompts: List of text prompts + generation_len: Max generation length + **kwargs: Additional arguments + + Yields: + List of decoded tokens for each batch position + + Raises: + NotImplementedError: If continuous batching is enabled + """ + if self.full_batch_size is not None: + raise NotImplementedError("Token streaming not supported with continuous batching for VLM") + + if len(images) != len(prompts): + raise ValueError(f"Number of images ({len(images)}) must match number of prompts ({len(prompts)})") + + logger.info(f"Starting token streaming for {len(images)} image-prompt pairs") + + vision_prompts = [(img, prompt) for img, prompt in zip(images, prompts)] + + text_gen = TextGeneration( + tokenizer=self.tokenizer, + qpc_path=self._qpc_path, + ctx_len=self._ctx_len, + device_id=self.device_id, + enable_debug_logs=self.enable_debug_logs, + is_tlm=self.is_tlm, + include_sampler=self.include_sampler, + return_pdfs=self.return_pdfs, + sampling_params=self.sampling_params, + ) + + text_gen._qaic_model = self + + # Yield tokens as they're generated + for tokens in text_gen.generate_stream_tokens(vision_prompts, generation_len, **kwargs): + yield tokens + + def __repr__(self): + """String representation of the class""" + return ( + f"VisionLanguageGeneration(" + f"batch_size={self.batch_size}, " + f"ctx_len={self._ctx_len}, " + f"continuous_batching={'enabled' if self.full_batch_size else 'disabled'}, " + f"sampling={'enabled' if self.include_sampler else 'disabled'})" + ) diff --git a/QEfficient/transformers/models/llama4/modeling_llama4.py b/QEfficient/transformers/models/llama4/modeling_llama4.py index 212fe16ae..b7b951101 100644 --- a/QEfficient/transformers/models/llama4/modeling_llama4.py +++ b/QEfficient/transformers/models/llama4/modeling_llama4.py @@ -820,7 +820,7 @@ def forward(self, pixel_values): ) vision_flat = image_features.view(-1, image_features.size(-1)) projected_vision_flat = self.model.multi_modal_projector(vision_flat) - return projected_vision_flat + return projected_vision_flat # , pixel_values # This wrapper utilizes the 'vision_embeds', which contains vision embeddings, and an 'image_idx' index starting at 0. @@ -836,7 +836,15 @@ def __init__(self, model): self.language_model = self.model.language_model self.config = self.model.config - def forward(self, input_ids, vision_embeds, position_ids, image_idx, past_key_values): + def forward( + self, + input_ids, + vision_embeds, + position_ids, + image_idx, + past_key_values, + batch_index: Optional[torch.LongTensor] = None, + ): inputs_embeds = self.model.language_model.get_input_embeddings()(input_ids) selected = input_ids == self.model.config.image_token_index indices1 = selected.to(torch.int64).cumsum(1) - 1 @@ -846,7 +854,11 @@ def forward(self, input_ids, vision_embeds, position_ids, image_idx, past_key_va image_embeds = torch.where(selected.unsqueeze(-1), image_features_expanded, inputs_embeds) inputs_embeds = torch.where(input_ids.shape[1] == torch.tensor(1), inputs_embeds, image_embeds) outputs = self.model.language_model( - inputs_embeds=inputs_embeds, position_ids=position_ids, past_key_values=past_key_values, use_cache=True + inputs_embeds=inputs_embeds, + position_ids=position_ids, + past_key_values=past_key_values, + batch_index=batch_index, + use_cache=True, ) next_idx = (indices1.max() + 1).unsqueeze(0).unsqueeze(0) image_idx = torch.where(image_idx < next_idx, next_idx, image_idx) @@ -893,6 +905,9 @@ def get_specializations( ctx_len: int, img_size: int, kv_offload: bool = False, + continuous_batching: bool = False, + kv_cache_batch_size: Optional[int] = None, + full_batch_size: Optional[int] = None, **compiler_options, ): max_num_tiles = compiler_options.pop("max_num_tiles", None) @@ -941,28 +956,42 @@ def get_specializations( "img_size": img_size, } ] - lang = [ - { - "batch_size": batch_size, - "seq_len": prefill_seq_len, - "ctx_len": ctx_len, - "max_num_tiles": max_num_tiles, - "img_size": img_size, - "vision_size": vision_size, - "chunk_length": prefill_seq_len, - "chunk_ctx_len": chunk_ctx_len, - }, - { - "batch_size": batch_size, - "seq_len": "1", - "ctx_len": ctx_len, - "max_num_tiles": max_num_tiles, - "img_size": img_size, - "vision_size": vision_size, - "chunk_length": prefill_seq_len, - "chunk_ctx_len": chunk_ctx_len, - }, - ] + + lang_prefill = { + "batch_size": 1 if continuous_batching else batch_size, + "seq_len": prefill_seq_len, + "ctx_len": ctx_len, + "max_num_tiles": max_num_tiles, + "img_size": img_size, + "vision_size": vision_size, + "chunk_length": prefill_seq_len, + "chunk_ctx_len": chunk_ctx_len, + } + if continuous_batching: + lang_prefill["full_batch_size"] = kv_cache_batch_size + else: + lang_prefill["batch_size"] = kv_cache_batch_size + if full_batch_size: + lang_prefill["full_batch_exec_size"] = full_batch_size + + lang_decode = { + "batch_size": full_batch_size if continuous_batching else batch_size, + "seq_len": 1, + "ctx_len": ctx_len, + "max_num_tiles": max_num_tiles, + "img_size": img_size, + "vision_size": vision_size, + "chunk_length": prefill_seq_len, + "chunk_ctx_len": chunk_ctx_len, + } + if continuous_batching: + lang_decode["full_batch_size"] = kv_cache_batch_size + else: + lang_decode["batch_size"] = kv_cache_batch_size + + lang = [] + lang.append(lang_prefill) + lang.append(lang_decode) specializations = {} @@ -971,18 +1000,22 @@ def get_specializations( specializations["lang"] = lang return specializations, compiler_options else: + lang[0].pop("vision_size") + lang[1].pop("vision_size") return lang, compiler_options - def get_onnx_dynamic_axes(self, kv_offload: bool = False): + def get_onnx_dynamic_axes(self, kv_offload: bool = False, continuous_batching: bool = False): # Define dynamic axes vision_dynamic_axes = {} lang_dynamic_axes = {} lang_dynamic_axes["input_ids"] = {0: "batch_size", 1: "seq_len"} lang_dynamic_axes["position_ids"] = {0: "batch_size", 1: "seq_len"} lang_dynamic_axes["vision_embeds"] = {0: "vision_size"} + if continuous_batching: + lang_dynamic_axes["batch_index"] = {0: "batch_size"} vision_dynamic_axes["pixel_values"] = {0: "max_num_tiles", 2: "img_size", 3: "img_size"} - pkv_dynamic_axes = {0: "batch_size"} + pkv_dynamic_axes = {0: "full_batch_size" if continuous_batching else "batch_size"} for i in range(self.language_model.config.num_hidden_layers): # switch between chunk_ctx_len and ctx_len for RoPE and NoPE layers. if int((i + 1) % 4 != 0): @@ -1011,6 +1044,7 @@ def get_output_names(self, kv_offload: bool = False): output_names = {} if kv_offload: + # vision_output_names.insert(1, "pixel_values_RetainedState") lang_output_names.insert(1, "vision_embeds_RetainedState") lang_output_names.insert(2, "image_idx_output") output_names["vision"] = vision_output_names @@ -1045,7 +1079,7 @@ def get_dummy_pkv_cache(self, config, batch_size, seq_len): past_key_values.append(pkv) return past_key_values - def get_dummy_inputs(self, kv_offload: bool = False): + def get_dummy_inputs(self, kv_offload: bool = False, continuous_batching: bool = False): if vis_cfg := getattr(self.config, "vision_config", None): img_size = getattr(vis_cfg, "image_size", 336) else: @@ -1090,10 +1124,14 @@ def get_dummy_inputs(self, kv_offload: bool = False): .repeat(constants.ONNX_EXPORT_EXAMPLE_BATCH_SIZE, 1) ) lang_inputs["image_idx"] = torch.zeros((inputs_shapes["image_idx"]), dtype=torch.int64) + + bs: int = constants.ONNX_EXPORT_EXAMPLE_BATCH_SIZE + fbs: int = constants.ONNX_EXPORT_EXAMPLE_FBS + # Add data for KV past_key_values = self.get_dummy_pkv_cache( config=self.language_model.config, - batch_size=constants.ONNX_EXPORT_EXAMPLE_BATCH_SIZE, + batch_size=fbs if continuous_batching else bs, seq_len=constants.ONNX_EXPORT_EXAMPLE_SEQ_LEN, ) @@ -1102,6 +1140,8 @@ def get_dummy_inputs(self, kv_offload: bool = False): for kv in ["key", "value"]: lang_inputs["past_key_values"][i].append(torch.zeros(past_key_values[0][0].shape, dtype=torch.float32)) + if continuous_batching: + lang_inputs["batch_index"] = torch.arange(bs).view(bs, 1) inputs = {} if kv_offload: inputs["vision"] = vision_inputs diff --git a/QEfficient/transformers/models/modeling_auto.py b/QEfficient/transformers/models/modeling_auto.py index 633a0b29d..aeb72d858 100644 --- a/QEfficient/transformers/models/modeling_auto.py +++ b/QEfficient/transformers/models/modeling_auto.py @@ -14,6 +14,7 @@ import torch import torch.nn as nn from transformers import ( + AutoImageProcessor, AutoModel, AutoModelForCausalLM, AutoModelForCTC, @@ -35,6 +36,7 @@ calculate_latency, get_compilation_dims, ) +from QEfficient.generation.vlm_generation import VisionLanguageGeneration from QEfficient.transformers.modeling_utils import DYNAMIC_SEQ_LEN_SUPPORTED_MODEL_ARCH from QEfficient.transformers.models.pytorch_transforms import ( CustomOpsTransform, @@ -856,6 +858,7 @@ class _QEffAutoModelForImageTextToTextDualQPC: def __init__( self, model: nn.Module, + continuous_batching: bool = False, **kwargs, ): """ @@ -879,6 +882,7 @@ def __init__( self.config = model.config self.vision_model = QEffVisionEncoderForTextImageToTextModel(model, **kwargs) self.lang_model = QEffCausalLMForTextImageToTextModel(model, **kwargs) + self.continuous_batching = continuous_batching self.input_shapes, self.output_names = None, None @property @@ -978,8 +982,15 @@ def export( List[str] A list containing the paths to the generated ONNX graph files for both components. """ - inputs = self.model.get_dummy_inputs(kv_offload=True) - dynamic_axes = self.model.get_onnx_dynamic_axes(kv_offload=True) + # TODO This is a temporary change as continous batching is enabled only for few models. Once support is added for all the models this exception handing can be removed. + try: + inputs = self.model.get_dummy_inputs(kv_offload=True, continuous_batching=self.continuous_batching) + dynamic_axes = self.model.get_onnx_dynamic_axes( + kv_offload=True, continuous_batching=self.continuous_batching + ) + except TypeError: + inputs = self.model.get_dummy_inputs(kv_offload=True) + dynamic_axes = self.model.get_onnx_dynamic_axes(kv_offload=True) output_names = self.model.get_output_names(kv_offload=True) self.vision_model.export( @@ -1011,7 +1022,6 @@ def compile( num_cores: int = 16, # FIXME: Make this mandatory arg mxfp6_matmul: bool = False, mxint8_kv_cache: bool = False, - num_speculative_tokens: Optional[int] = None, skip_vision: Optional[bool] = False, skip_lang: Optional[bool] = False, **compiler_options, @@ -1068,14 +1078,20 @@ def compile( If `full_batch_size`, `kv_cache_batch_size`, or `num_speculative_tokens` are not None. If both `skip_lang` and `skip_vision` are True. """ - if any(param is not None for param in [full_batch_size, kv_cache_batch_size, num_speculative_tokens]): + if skip_lang and skip_vision: + raise ValueError("Expected at least one of 'skip_lang' or 'skip_vision' to be False") + + if self.continuous_batching and full_batch_size is None: + raise TypeError("`full_batch_size` is required when `continuous_batching=True`.") + + if kv_cache_batch_size and not full_batch_size: raise ValueError( - f"Expected 'full_batch_size', 'kv_cache_batch_size', 'num_speculative_tokens' to be None but got: " - f"full_batch_size={full_batch_size}, kv_cache_batch_size={kv_cache_batch_size}, num_speculative_tokens={num_speculative_tokens}, " + "KV caching requires continuous batching. Please set `full_batch_size` and " + "enable `continuous_batching=True` in `from_pretrained`." ) - if skip_lang and skip_vision: - raise ValueError("Expected at least one of 'skip_lang' or 'skip_vision' to be False") + # Infer kv_cache_batch_size if not provided + kv_cache_batch_size = kv_cache_batch_size or full_batch_size or batch_size output_names = self.model.get_output_names(kv_offload=True) @@ -1085,6 +1101,9 @@ def compile( ctx_len=ctx_len, img_size=img_size, kv_offload=True, + continuous_batching=self.continuous_batching, + kv_cache_batch_size=kv_cache_batch_size, + full_batch_size=full_batch_size, **compiler_options, ) @@ -1111,6 +1130,11 @@ def compile( ): self.export() + # TODO this hould be removed once the continous batching is supported for all the models. + compiler_options.pop("continuous_batching", None) + compiler_options.pop("kv_cache_batch_size", None) + compiler_options.pop("full_batch_size", None) + if not skip_vision: self.vision_model._compile( compile_dir=compile_dir, @@ -1156,7 +1180,11 @@ def compile( def generate( self, - inputs: torch.Tensor, + inputs: Optional[torch.Tensor] = None, + tokenizer: Union[PreTrainedTokenizerFast, PreTrainedTokenizer] = None, + processor: Optional[AutoImageProcessor] = None, + images: List[str] = None, + prompts: List[str] = None, streamer: Optional[TextStreamer] = None, device_ids: List[int] = None, runtime_ai100: bool = True, @@ -1172,6 +1200,14 @@ def generate( inputs : Dict[str, Union[torch.Tensor, np.ndarray]] Inputs to run the execution, typically includes `pixel_values`, `input_ids`, `attention_mask`, etc. + tokenizer : PreTrainedTokenizer or PreTrainedTokenizerFast, optional + Tokenizer for the model. Used when images and prompts are provided. + processor : AutoImageProcessor, optional + Processor for the model. Used when images and prompts are provided. + images : List[str], optional + List of image paths or PIL images to process. + prompts : List[str], optional + List of text prompts corresponding to the images. streamer : TextStreamer, optional A streamer object to display generated tokens in real-time. Default is None. device_ids : List[int], optional @@ -1196,6 +1232,30 @@ def generate( if not runtime_ai100: raise NotImplementedError("PyTorch execution is not supported yet for this model!") + # Use VisionLanguageGeneration for image-prompt pairs + if (processor and images) or (tokenizer and prompts): + # Create VisionLanguageGeneration instance + batch_size_comp, ctx_len_comp, fbs = get_compilation_dims(self.lang_model.qpc_path) + vlm_gen = VisionLanguageGeneration( + qeff_model=self, + lang_qpc_path=self.lang_model.qpc_path, + vision_qpc_path=self.vision_model.qpc_path, + tokenizer=tokenizer, + processor=processor, + device_id=device_ids, # if device_ids is not None else [0], + ctx_len=ctx_len_comp, + full_batch_size=fbs, + ) + + # Call generate method + return vlm_gen.generate( + images=images, + prompts=prompts, + generation_len=generation_len, + stream=streamer is not None, + ) + + # Fallback to kv_offload_generate for direct inputs (backward compatibility) return self.kv_offload_generate( inputs=inputs, device_ids=device_ids, streamer=streamer, generation_len=generation_len ) @@ -1332,9 +1392,7 @@ def kv_offload_generate( lang_session.set_buffers(vision_outputs) - # Prepare inputs for prefill - chunk_inputs = lang_inputs.copy() - prefill_start = perf_counter() + lang_start = perf_counter() # Run prefill chunk_inputs = lang_inputs.copy() @@ -1346,7 +1404,7 @@ def kv_offload_generate( outputs = lang_session.run(chunk_inputs) chunk_inputs["image_idx"] = outputs["image_idx_output"] - prefill_time = perf_counter() - prefill_start + vision_end - vision_start + prefill_time = perf_counter() - lang_start + vision_end - vision_start # Skip inputs/outputs again lang_session.skip_buffers( [ @@ -1930,7 +1988,7 @@ class QEFFAutoModelForImageTextToText: _hf_auto_class = AutoModelForImageTextToText - def __new__(self, model: nn.Module, kv_offload: Optional[bool] = True, **kwargs): + def __new__(self, model: nn.Module, kv_offload: Optional[bool] = True, continuous_batching: bool = False, **kwargs): """ Instantiate the appropriate internal class for single or dual QPC mode. @@ -1951,13 +2009,19 @@ def __new__(self, model: nn.Module, kv_offload: Optional[bool] = True, **kwargs) The wrapped model instance, configured for either dual or single QPC. """ if kv_offload: - return _QEffAutoModelForImageTextToTextDualQPC(model, **kwargs) + return _QEffAutoModelForImageTextToTextDualQPC(model, continuous_batching, **kwargs) else: return _QEFFAutoModelForImageTextToTextSingleQPC(model, **kwargs) @classmethod @with_replaced_quantizers - def from_pretrained(cls, pretrained_model_name_or_path: str, kv_offload: Optional[bool] = None, **kwargs): + def from_pretrained( + cls, + pretrained_model_name_or_path: str, + kv_offload: Optional[bool] = None, + continuous_batching: bool = False, + **kwargs, + ): """ Load a QEfficient image-text-to-text model from a pretrained HuggingFace model or local path. @@ -1986,18 +2050,24 @@ def from_pretrained(cls, pretrained_model_name_or_path: str, kv_offload: Optiona If `continuous_batching` is provided as True. """ # TODO: add a check to see if kv_offload is allowed for given model by loading the config and checking architecture or type of config here. + if continuous_batching and not kv_offload: + NotImplementedError("Continuous batching is not supported for kv_offload = False") + if kwargs.get("attn_implementation", None) not in {None, "eager"}: logger.warning('Updating attn_implementation="eager"') if kwargs.get("low_cpu_mem_usage", None): logger.warning("Updating low_cpu_mem_usage=False") - if kwargs.pop("continuous_batching", None): - NotImplementedError("Continuous batching is not supported for image-text-to-text models yet.") - kwargs.update({"attn_implementation": "eager", "low_cpu_mem_usage": False}) model = cls._hf_auto_class.from_pretrained(pretrained_model_name_or_path, **kwargs) - return cls(model, kv_offload=kv_offload, pretrained_model_name_or_path=pretrained_model_name_or_path, **kwargs) + return cls( + model, + kv_offload=kv_offload, + continuous_batching=continuous_batching, + pretrained_model_name_or_path=pretrained_model_name_or_path, + **kwargs, + ) MISCLASSIFIED_CAUSAL_LM_TO_QEFF_AUTO_CLASS_MAP = { @@ -2705,8 +2775,8 @@ def generate( raise TypeError("Please run compile API first!") generation_len = kwargs.pop("generation_len", None) return QEfficient.cloud_ai_100_exec_kv( - tokenizer, - self.qpc_path, + tokenizer=tokenizer, + qpc_path=self.qpc_path, prompt=prompts, device_id=device_id, generation_len=generation_len, diff --git a/QEfficient/transformers/models/qwen2_5_vl/modeling_qwen2_5_vl.py b/QEfficient/transformers/models/qwen2_5_vl/modeling_qwen2_5_vl.py index e5e842e6f..0f6630210 100644 --- a/QEfficient/transformers/models/qwen2_5_vl/modeling_qwen2_5_vl.py +++ b/QEfficient/transformers/models/qwen2_5_vl/modeling_qwen2_5_vl.py @@ -6,6 +6,7 @@ # ----------------------------------------------------------------------------- import math +import os from typing import Callable, List, Optional, Tuple, Union import torch @@ -360,7 +361,7 @@ def forward(self, x, seq_len=None): ) -def eager_attention_forward( +def eager_attention_forward_q_blocked( module: nn.Module, query: torch.Tensor, key: torch.Tensor, @@ -368,22 +369,107 @@ def eager_attention_forward( attention_mask: Optional[torch.Tensor], **kwargs, ): + """ + Q-blocked attention for Qwen2.5-VL. + Blocks only the query SL dimension. + + Args: + query: (BS, NH, Q_LEN, DH) + key: (BS, NH_KV, KV_LEN, DH) + value: (BS, NH_KV, KV_LEN, DH) + attention_mask: (BS, NH, Q_LEN, KV_LEN) or broadcastable + """ + BS, NH, Q_LEN, DH = query.shape + _, _, KV_LEN, _ = key.shape + key_states = repeat_kv(key, module.num_key_value_groups) value_states = repeat_kv(value, module.num_key_value_groups) - attn_weights = torch.matmul(query, key_states.transpose(2, 3)) / math.sqrt(module.head_dim) - if attention_mask is not None: - attn_weights = torch.where( - attention_mask, torch.tensor(MIN_MASKED_ATTENTION_VALUE, dtype=torch.float32), attn_weights - ) + target_blocks_q = int(os.environ.get("num_q_blocks", Q_LEN)) + q_block_positions = [(i * Q_LEN) // target_blocks_q for i in range(target_blocks_q)] + scaling = 1.0 / math.sqrt(module.head_dim) + + q_output_blocks = [] + q_attn_weights_blocks = [] - attn_weights = nn.functional.softmax(attn_weights, dim=-1, dtype=torch.float32).to(query.dtype) - attn_output = torch.matmul(attn_weights, value_states) + # Process each Q block + for q_block_idx in range(target_blocks_q): + qi = q_block_positions[q_block_idx] + + # Calculate Q block size + if q_block_idx == target_blocks_q - 1: + real_q_len = Q_LEN - qi + else: + real_q_len = q_block_positions[q_block_idx + 1] - qi + + # Extract Q block + q_block = query[:, :, qi : qi + real_q_len, :] + attn_mask_block = None + if attention_mask is not None: + attn_mask_block = attention_mask[:, :, qi : qi + real_q_len, :] + + # Compute attention scores for this Q block + attn_weights = torch.matmul(q_block, key_states.transpose(2, 3)) * scaling + if attn_mask_block is not None: + attn_weights = torch.where( + attn_mask_block, + torch.tensor(MIN_MASKED_ATTENTION_VALUE, dtype=torch.float32, device=attn_weights.device), + attn_weights, + ) + + attn_weights = F.softmax(attn_weights, dim=-1, dtype=torch.float32).to(query.dtype) + + # Compute output for this Q block + output_block = torch.matmul(attn_weights, value_states) + + q_output_blocks.append(output_block) + q_attn_weights_blocks.append(attn_weights) + + attn_output = torch.cat(q_output_blocks, dim=2) attn_output = attn_output.transpose(1, 2).contiguous() + # Concatenate attention weights + attn_weights = torch.cat(q_attn_weights_blocks, dim=2) + return attn_output, attn_weights +def eager_attention_forward( + module: nn.Module, + query: torch.Tensor, + key: torch.Tensor, + value: torch.Tensor, + attention_mask: Optional[torch.Tensor], + **kwargs, +): + """ + Wrapper that routes to blocked or default attention based on environment variable. + """ + blocking_mode = os.environ.get("ATTENTION_BLOCKING_MODE", "default").lower() + + if blocking_mode == "q": + return eager_attention_forward_q_blocked(module, query, key, value, attention_mask, **kwargs) + elif blocking_mode == "default": + # Original implementation + key_states = repeat_kv(key, module.num_key_value_groups) + value_states = repeat_kv(value, module.num_key_value_groups) + + attn_weights = torch.matmul(query, key_states.transpose(2, 3)) / math.sqrt(module.head_dim) + + if attention_mask is not None: + attn_weights = torch.where( + attention_mask, torch.tensor(MIN_MASKED_ATTENTION_VALUE, dtype=torch.float32), attn_weights + ) + + attn_weights = nn.functional.softmax(attn_weights, dim=-1, dtype=torch.float32).to(query.dtype) + attn_output = torch.matmul(attn_weights, value_states) + attn_output = attn_output.transpose(1, 2).contiguous() + + return attn_output, attn_weights + else: + raise ValueError(f"Invalid ATTENTION_BLOCKING_MODE: {blocking_mode}. Must be 'q' or 'default'") + + class QEffQwen2_5_VLAttention(Qwen2_5_VLAttention): """ Multi-headed attention from 'Attention Is All You Need' paper. Modified to use sliding window attention: Longformer @@ -680,7 +766,15 @@ def __init__(self, model): self.model = model self.language_model = self.model.model.language_model - def forward(self, input_ids, vision_embeds, position_ids, image_idx, past_key_values): + def forward( + self, + input_ids, + vision_embeds, + position_ids, + image_idx, + past_key_values, + batch_index: Optional[torch.LongTensor] = None, + ): inputs_embeds = self.model.get_input_embeddings()(input_ids) B, N, C = inputs_embeds.shape selected = input_ids == self.model.config.image_token_id @@ -691,7 +785,11 @@ def forward(self, input_ids, vision_embeds, position_ids, image_idx, past_key_va image_input_embeds = torch.where(selected.unsqueeze(-1), image_features_expanded, inputs_embeds) inputs_embeds = torch.where(input_ids.shape[1] == torch.tensor(1), inputs_embeds, image_input_embeds) outputs = self.model.model( - inputs_embeds=inputs_embeds, position_ids=position_ids, past_key_values=past_key_values, use_cache=True + inputs_embeds=inputs_embeds, + position_ids=position_ids, + past_key_values=past_key_values, + batch_index=batch_index, + use_cache=True, ) logit_index = position_ids[0].to(torch.int32).argmax(1, keepdim=True) @@ -709,7 +807,7 @@ def get_qeff_vision_encoder(self): def get_qeff_language_decoder(self): return QEffQwen_2_5_vl_DecoderWrapper(self) - def get_dummy_inputs(self, kv_offload: bool = False, **kwargs): + def get_dummy_inputs(self, kv_offload: bool = False, continuous_batching: bool = False, **kwargs): inputs_shapes = {} inputs_shapes["input_ids"] = (constants.ONNX_EXPORT_EXAMPLE_BATCH_SIZE, constants.ONNX_EXPORT_EXAMPLE_SEQ_LEN) @@ -745,10 +843,14 @@ def get_dummy_inputs(self, kv_offload: bool = False, **kwargs): .repeat(4, 1, 1) ) lang_inputs["image_idx"] = torch.zeros((inputs_shapes["image_idx"]), dtype=torch.int64) + + bs: int = constants.ONNX_EXPORT_EXAMPLE_BATCH_SIZE + fbs: int = constants.ONNX_EXPORT_EXAMPLE_FBS + # Add data for KV kv_cache_shape = get_padding_shape_from_config( - config=self.model.config, - batch_size=constants.ONNX_EXPORT_EXAMPLE_BATCH_SIZE, + config=self.model.config.text_config, + batch_size=fbs if continuous_batching else bs, seq_len=constants.ONNX_EXPORT_EXAMPLE_SEQ_LEN, ) @@ -757,6 +859,9 @@ def get_dummy_inputs(self, kv_offload: bool = False, **kwargs): for kv in ["key", "value"]: lang_inputs["past_key_values"][i].append(torch.zeros(kv_cache_shape, dtype=torch.float32)) + if continuous_batching: + lang_inputs["batch_index"] = torch.arange(bs).view(bs, 1) + inputs = {} if kv_offload: inputs["vision"] = vision_inputs @@ -775,7 +880,11 @@ def get_specializations( img_size: None, height: int = None, width: int = None, + num_frames: int = 1, kv_offload: bool = False, + continuous_batching: bool = False, + kv_cache_batch_size: Optional[int] = None, + full_batch_size: Optional[int] = None, **compiler_options, ): if height is None or width is None: @@ -856,20 +965,37 @@ def smart_resize( "grid_w": grid_w, } ] - lang = [ - { - "batch_size": batch_size, - "seq_len": prefill_seq_len, - "ctx_len": ctx_len, - "vision_size": vision_size, - }, - { - "batch_size": batch_size, - "seq_len": "1", - "ctx_len": ctx_len, - "vision_size": vision_size, - }, - ] + lang_prefill = { + "batch_size": 1 if continuous_batching else batch_size, + "seq_len": prefill_seq_len, + "ctx_len": ctx_len, + "vision_size": vision_size, + "vision_batch_size": batch_size, + } + + if continuous_batching: + lang_prefill["full_batch_size"] = kv_cache_batch_size + else: + lang_prefill["batch_size"] = kv_cache_batch_size + if full_batch_size: + lang_prefill["full_batch_exec_size"] = full_batch_size + + lang_decode = { + "batch_size": full_batch_size if continuous_batching else batch_size, + "seq_len": 1, + "ctx_len": ctx_len, + "vision_size": vision_size, + "vision_batch_size": batch_size, + } + + if continuous_batching: + lang_decode["full_batch_size"] = kv_cache_batch_size + else: + lang_decode["batch_size"] = kv_cache_batch_size + + lang = [] + lang.append(lang_prefill) + lang.append(lang_decode) specializations = {} @@ -878,9 +1004,11 @@ def smart_resize( specializations["lang"] = lang return specializations, compiler_options else: + lang[0].pop("vision_size") + lang[1].pop("vision_size") return lang, compiler_options - def get_onnx_dynamic_axes(self, kv_offload: bool = False): + def get_onnx_dynamic_axes(self, kv_offload: bool = False, continuous_batching: bool = False): # Define dynamic axes num_layers = self.config.text_config.num_hidden_layers @@ -892,12 +1020,21 @@ def get_onnx_dynamic_axes(self, kv_offload: bool = False): lang_dynamic_axes = { "input_ids": {0: "batch_size", 1: "seq_len"}, "position_ids": {1: "batch_size", 2: "seq_len"}, - "vision_embeds": {0: "batch_size", 1: "vision_size"}, + "vision_embeds": {0: "vision_batch_size", 1: "vision_size"}, } for i in range(num_layers): - lang_dynamic_axes[f"past_key.{i}"] = {0: "batch_size", 2: "ctx_len"} - lang_dynamic_axes[f"past_value.{i}"] = {0: "batch_size", 2: "ctx_len"} + lang_dynamic_axes[f"past_key.{i}"] = { + 0: "full_batch_size" if continuous_batching else "batch_size", + 2: "ctx_len", + } + lang_dynamic_axes[f"past_value.{i}"] = { + 0: "full_batch_size" if continuous_batching else "batch_size", + 2: "ctx_len", + } + + if continuous_batching: + lang_dynamic_axes["batch_index"] = {0: "batch_size"} dynamic_axes = {} diff --git a/QEfficient/utils/__init__.py b/QEfficient/utils/__init__.py index e487d4af4..49f0ad30b 100755 --- a/QEfficient/utils/__init__.py +++ b/QEfficient/utils/__init__.py @@ -10,6 +10,7 @@ undo_transformers_quantizers, ) from QEfficient.utils._utils import ( # noqa: F401 + LRUCache, check_and_assign_cache_dir, create_json, create_model_params, diff --git a/QEfficient/utils/_utils.py b/QEfficient/utils/_utils.py index abe383556..d58f54952 100644 --- a/QEfficient/utils/_utils.py +++ b/QEfficient/utils/_utils.py @@ -33,6 +33,36 @@ from QEfficient.utils.logging_utils import logger +class LRUCache: + """Simple LRU cache with size limit for vision outputs""" + + def __init__(self, max_size=100): + self._cache = {} + self._access_order = [] + self._max_size = max_size + + def get(self, key): + if key in self._cache: + self._access_order.remove(key) + self._access_order.append(key) + return self._cache[key] + return None + + def put(self, key, value): + if key in self._cache: + self._access_order.remove(key) + elif len(self._cache) >= self._max_size: + oldest = self._access_order.pop(0) + del self._cache[oldest] + + self._cache[key] = value + self._access_order.append(key) + + def clear(self): + self._cache.clear() + self._access_order.clear() + + class DownloadRetryLimitExceeded(Exception): """ Used for raising error when hf_download fails to download the model after given max_retries. diff --git a/examples/llama4_CB_example_vision_lang.py b/examples/llama4_CB_example_vision_lang.py new file mode 100644 index 000000000..f285ea278 --- /dev/null +++ b/examples/llama4_CB_example_vision_lang.py @@ -0,0 +1,93 @@ +# ----------------------------------------------------------------------------- +# +# Copyright (c) Qualcomm Technologies, Inc. and/or its subsidiaries. +# SPDX-License-Identifier: BSD-3-Clause +# +# ---------------------------------------------------------------------------- + +import transformers +from transformers import AutoConfig, AutoProcessor + +from QEfficient import QEFFAutoModelForImageTextToText + +model_id = "meta-llama/Llama-4-Scout-17B-16E-Instruct" +config = AutoConfig.from_pretrained(model_id) +# For Testing Purpose Only +config.text_config.num_hidden_layers = 4 +config.vision_config.num_hidden_layers = 2 + +tokenizer = transformers.AutoTokenizer.from_pretrained(model_id) +processor = AutoProcessor.from_pretrained(model_id) + +continious_batching = False +if continious_batching: + qeff_model = QEFFAutoModelForImageTextToText.from_pretrained( + model_id, + attn_implementation="eager", + kv_offload=True, + config=config, + continuous_batching=True, + ) + + qeff_model.compile( + prefill_seq_len=128, + ctx_len=3072, + img_size=336, + num_cores=16, + num_devices=4, + max_num_tiles=17, + batch_size=1, + full_batch_size=4, + mxfp6_matmul=True, + mxint8_kv_cache=True, + aic_enable_depth_first=True, + mos=1, + ) +else: + qeff_model = QEFFAutoModelForImageTextToText.from_pretrained( + model_id, + attn_implementation="eager", + kv_offload=True, + config=config, + ) + + qeff_model.compile( + prefill_seq_len=128, + ctx_len=3072, + img_size=336, + num_cores=16, + num_devices=4, + max_num_tiles=17, + batch_size=1, + mxfp6_matmul=True, + mxint8_kv_cache=True, + aic_enable_depth_first=True, + mos=1, + ) + +image_urls = [ + "https://huggingface.co/datasets/huggingface/documentation-images/resolve/main/datasets/cat_style_layout.png", + "https://huggingface.co/datasets/huggingface/documentation-images/resolve/0052a70beed5bf71b92610a43a52df6d286cd5f3/diffusers/rabbit.jpg", + "https://huggingface.co/datasets/huggingface/documentation-images/resolve/main/datasets/cat_style_layout.png", + "https://huggingface.co/datasets/huggingface/documentation-images/resolve/0052a70beed5bf71b92610a43a52df6d286cd5f3/diffusers/rabbit.jpg", +] + +prompts = [ + "Can you describe the image in detail?", + "What are the objects in the image?", + "What is the main subject of the image?", + "What colors are predominant in the image?", +] + +exec_info = qeff_model.generate( + tokenizer=tokenizer, + prompts=prompts, + processor=processor, + images=image_urls, + device_ids=[0, 1, 2, 3], + generation_len=100, +) + +# print("Generated texts:", exec_info.generated_texts) +print("Generated IDs:", exec_info.generated_ids) +print(exec_info) diff --git a/examples/qwen2_5_vl_CB.py b/examples/qwen2_5_vl_CB.py new file mode 100644 index 000000000..96ef4898a --- /dev/null +++ b/examples/qwen2_5_vl_CB.py @@ -0,0 +1,72 @@ +# ----------------------------------------------------------------------------- +# +# Copyright (c) Qualcomm Technologies, Inc. and/or its subsidiaries. +# SPDX-License-Identifier: BSD-3-Clause +# +# ----------------------------------------------------------------------------- + +# If we want to enable QBlocking Run below command:, default is without blocking +# ATTENTION_BLOCKING_MODE=q num_q_blocks=2 python -W ignore qwen2_5_vl_example.py + +import transformers +from transformers import AutoConfig, AutoProcessor, TextStreamer + +from QEfficient import QEFFAutoModelForImageTextToText + +## For AWQ model update pytorch version to 2.8.* +model_id = "Qwen/Qwen2.5-VL-32B-Instruct" +config = AutoConfig.from_pretrained(model_id) +config.text_config.num_hidden_layers = 2 + +qeff_model = QEFFAutoModelForImageTextToText.from_pretrained( + model_id, + attn_implementation="eager", + kv_offload=True, + config=config, + continuous_batching=True, +) +tokenizer = transformers.AutoTokenizer.from_pretrained(model_id) +processor = AutoProcessor.from_pretrained(model_id) + +batch_size = 1 +## Vision + Text ## +qeff_model.compile( + batch_size=batch_size, + full_batch_size=4, + prefill_seq_len=128, + ctx_len=4096, + num_cores=16, + num_devices=4, + height=354, + width=536, + mxfp6_matmul=True, + mxint8_kv_cache=True, + aic_enable_depth_first=True, + mos=1, +) + +image_urls = [ + "https://picsum.photos/id/237/536/354", + "https://picsum.photos/id/237/536/354", + "https://picsum.photos/id/237/536/354", + "https://picsum.photos/id/237/536/354", +] + +prompts = [ + "Can you describe the image in detail?", + "What are the objects in the image?", + "What is the main subject of the image?", + "What colors are predominant in the image?", +] + +streamer = TextStreamer(tokenizer) +output = qeff_model.generate( + tokenizer=tokenizer, + prompts=prompts, + processor=processor, + images=image_urls, + generation_len=100, +) +print(output.generated_ids) +print(tokenizer.batch_decode(output.generated_ids)) +print(output) diff --git a/examples/qwen2_5_vl_example.py b/examples/qwen2_5_vl_example.py index 374f70ad2..d5d943c9c 100644 --- a/examples/qwen2_5_vl_example.py +++ b/examples/qwen2_5_vl_example.py @@ -5,6 +5,9 @@ # # ----------------------------------------------------------------------------- +# If we want to enable QBlocking Run below command:, default is without blocking +# ATTENTION_BLOCKING_MODE=q num_q_blocks=2 python -W ignore qwen2_5_vl_example.py + import requests import transformers from PIL import Image From 848dc6e83c33f6cfb01da205d22b532e0efae23e Mon Sep 17 00:00:00 2001 From: Mohit Soni Date: Tue, 4 Nov 2025 18:04:41 +0530 Subject: [PATCH 08/60] Modeling fix (#605) Signed-off-by: Mohit Soni Co-authored-by: Mohit Soni Signed-off-by: Dhiraj Kumar Sah --- QEfficient/transformers/models/modeling_auto.py | 2 ++ .../transformers/models/qwen2_5_vl/modeling_qwen2_5_vl.py | 1 + 2 files changed, 3 insertions(+) diff --git a/QEfficient/transformers/models/modeling_auto.py b/QEfficient/transformers/models/modeling_auto.py index aeb72d858..d92f63d3b 100644 --- a/QEfficient/transformers/models/modeling_auto.py +++ b/QEfficient/transformers/models/modeling_auto.py @@ -1413,6 +1413,8 @@ def kv_offload_generate( if x.startswith("past_") or x.endswith("_RetainedState") ] ) + if not_mllama: + lang_session.skip_buffers(vision_outputs.keys()) # Get first token lang_inputs["input_ids"] = outputs["logits"].argmax(2) diff --git a/QEfficient/transformers/models/qwen2_5_vl/modeling_qwen2_5_vl.py b/QEfficient/transformers/models/qwen2_5_vl/modeling_qwen2_5_vl.py index 0f6630210..445c15583 100644 --- a/QEfficient/transformers/models/qwen2_5_vl/modeling_qwen2_5_vl.py +++ b/QEfficient/transformers/models/qwen2_5_vl/modeling_qwen2_5_vl.py @@ -953,6 +953,7 @@ def smart_resize( grid_height = grid_h * grid_w grid_width = patch_size * patch_size * temporal_patch_size * channel vision_size = grid_height // 4 + vision_size = vision_size * num_frames grid_height = grid_height * batch_size vision = [ From 171af206d3decb0067c857e7b114192831cbd986 Mon Sep 17 00:00:00 2001 From: Onkar Chougule <168134249+ochougul@users.noreply.github.com> Date: Wed, 5 Nov 2025 11:39:04 +0530 Subject: [PATCH 09/60] New PR for GPTOSS decode-only model (#603) Signed-off-by: vbaddi Signed-off-by: Onkar Chougule Signed-off-by: Mamta Singh Signed-off-by: Mamta Singh <168400541+quic-mamta@users.noreply.github.com> Co-authored-by: Vinayak Baddi Co-authored-by: Vinayak Baddi Co-authored-by: Mamta Singh Co-authored-by: Mamta Singh <168400541+quic-mamta@users.noreply.github.com> Signed-off-by: Dhiraj Kumar Sah --- QEfficient/base/pytorch_transforms.py | 78 +- QEfficient/transformers/cache_utils.py | 119 +++ QEfficient/transformers/modeling_utils.py | 1 + .../transformers/models/gpt_oss/__init__.py | 6 + .../models/gpt_oss/modeling_gpt_oss.py | 736 ++++++++++++++++++ .../transformers/models/modeling_auto.py | 54 +- .../transformers/models/pytorch_transforms.py | 25 + .../transformers/quantizers/__init__.py | 4 + QEfficient/transformers/quantizers/auto.py | 8 +- .../quantizers/quant_transforms.py | 33 +- .../quantizers/quantizer_mxfp4.py | 155 ++++ .../quantizers/quantizer_utils.py | 68 ++ QEfficient/utils/generate_inputs.py | 40 +- examples/gpt_oss.py | 35 + .../models/test_causal_lm_models.py | 6 +- tests/transformers/test_causal_lm.py | 14 +- 16 files changed, 1336 insertions(+), 46 deletions(-) create mode 100644 QEfficient/transformers/models/gpt_oss/__init__.py create mode 100644 QEfficient/transformers/models/gpt_oss/modeling_gpt_oss.py create mode 100644 QEfficient/transformers/quantizers/quantizer_mxfp4.py create mode 100644 examples/gpt_oss.py diff --git a/QEfficient/base/pytorch_transforms.py b/QEfficient/base/pytorch_transforms.py index a20fc4cb3..e503a057f 100644 --- a/QEfficient/base/pytorch_transforms.py +++ b/QEfficient/base/pytorch_transforms.py @@ -120,61 +120,109 @@ def apply(cls, model: nn.Module) -> Tuple[nn.Module, bool]: class SplitGateUpWeightsTransform(PytorchTransform): """ - split fused Gate+Up weights and copy into the model + Split fused Gate+Up weights and copy into the model. + Handles both standard MoE models and GptOss models. For every transformer layer inside `model`: - • expects .experts.gate_up_proj in the *source* `sd` - • copies halves into - .experts.gate_proj <-- Gate [E,H,I] - .experts.up_proj <-- Up [E,H,I] + • expects .experts.gate_up_proj in the *source* `sd` + • copies halves into + .experts.gate_proj <-- Gate [E,H,I] + .experts.up_proj <-- Up [E,H,I] + + Handles both interleaved weights (GptOss) and concatenated weights (standard MoE). + Also handles bias terms when present. """ @classmethod def apply(cls, model: nn.Module) -> Tuple[nn.Module, bool]: transformed = False model_class = model.__class__.__name__ if hasattr(model, "model") else model.__class__.__name__ - if model_class not in VLM_SPLIT_GATE_UP_WEIGHTS: return model, transformed model_tmp = model.language_model if hasattr(model, "language_model") else model - num_layers = len(model_tmp.model.layers) delete_fused_key = True sd = model_tmp.state_dict() + for layer_idx in range(num_layers): + # Determine if this is a GptOss model or standard MoE model + is_gpt_oss = hasattr(model_tmp.model.layers[layer_idx], "mlp") + # ---- build the textual prefix once per layer ---------- - prefix = f"model.layers.{layer_idx}.feed_forward.experts." + if is_gpt_oss: + prefix = f"model.layers.{layer_idx}.mlp.experts." + experts = model_tmp.model.layers[layer_idx].mlp.experts + else: + prefix = f"model.layers.{layer_idx}.feed_forward.experts." + experts = model_tmp.model.layers[layer_idx].feed_forward.experts fused_key = prefix + "gate_up_proj" gate_key = prefix + "gate_proj" up_key = prefix + "up_proj" - # ---- split [E,H,2I] → two [E,H,I] tensors ---------------------- - fused = sd[fused_key] # [E, H, 2I] (no .weight here) + # Check if we have bias terms (GptOss case) + has_bias = fused_key + "_bias" in sd + if has_bias: + fused_bias_key = fused_key + "_bias" + gate_bias_key = gate_key + "_bias" + up_bias_key = up_key + "_bias" + + # ---- split weights based on model type ---------------------- + fused = sd[fused_key] # [E, H, 2I] E, H, two_I = fused.shape - ffn_dim = two_I // 2 - gate, up = fused.split(ffn_dim, dim=-1) # views – no copy - experts = model_tmp.model.layers[layer_idx].feed_forward.experts + if is_gpt_oss: + # For GptOss, gate/up are interleaved: [gate0, up0, gate1, up1, ...] + gate = fused[..., ::2] # [E, H, I] - even indices + up = fused[..., 1::2] # [E, H, I] - odd indices + else: + # For standard MoE, gate/up are concatenated: [gate, up] + ffn_dim = two_I // 2 + gate, up = fused.split(ffn_dim, dim=-1) # views – no copy + + # Copy weights to model experts.gate_proj.data.copy_(gate) experts.up_proj.data.copy_(up) + # Handle bias if present + if has_bias: + fused_bias = sd[fused_bias_key] # [E, 2I] + + if is_gpt_oss: + gate_bias = fused_bias[..., ::2] # [E, I] - even indices + up_bias = fused_bias[..., 1::2] # [E, I] - odd indices + else: + ffn_dim = fused_bias.shape[-1] // 2 + gate_bias, up_bias = fused_bias.split(ffn_dim, dim=-1) + + experts.gate_proj_bias.data.copy_(gate_bias) + experts.up_proj_bias.data.copy_(up_bias) + # ---- update the state-dict so load_state_dict sees the right keys sd[gate_key] = gate sd[up_key] = up + if has_bias: + sd[gate_bias_key] = gate_bias + sd[up_bias_key] = up_bias + + # Delete fused keys if delete_fused_key: del sd[fused_key] + if has_bias: + del sd[fused_bias_key] - logger.info(f"[layer {layer_idx:02d}] loaded gate_proj & up_proj from fused tensor (shape {fused.shape})") + logger.info(f"[layer {layer_idx:02d}] loaded gate_proj & up_proj from fused tensor (shape {fused.shape})") transformed = True if hasattr(model, "language_model"): model.language_model = model_tmp else: model = model_tmp + return model, transformed -VLM_SPLIT_GATE_UP_WEIGHTS = {"QEffLlama4ForConditionalGeneration", "QEffLlama4ForCausalLM"} +# Keep the existing list of supported models +VLM_SPLIT_GATE_UP_WEIGHTS = {"QEffLlama4ForConditionalGeneration", "QEffLlama4ForCausalLM", "QEffGptOssForCausalLM"} diff --git a/QEfficient/transformers/cache_utils.py b/QEfficient/transformers/cache_utils.py index bbd937d52..853567be9 100644 --- a/QEfficient/transformers/cache_utils.py +++ b/QEfficient/transformers/cache_utils.py @@ -537,3 +537,122 @@ def update( ctx_v_out = torch.where(invalid_mask.unsqueeze(-1), torch.tensor(0.0, dtype=torch.float32), v_out) v_out = torch.where((is_sliding_layer & (position_ids.max() >= (layer_ctx_len - 1))), v_out, ctx_v_out) return k_out, v_out + + +# This is a hack for now, until we get to merging this code with HybridCache class, +# We don't really need to inherit transformers classes as their cache classes are made to work with pytorch and +# ours are made to work with AIC +class QEffHybridCacheForGPTOSS: + def __init__(self, config, batch_size, max_cache_len, sliding_window_len): + self.max_cache_len = max_cache_len + self.batch_size = batch_size + self.sliding_window_len = sliding_window_len + self.key_cache: List[torch.Tensor] = [] + self.value_cache: List[torch.Tensor] = [] + + @classmethod + def from_legacy_cache( + cls, config, past_key_values: Optional[Tuple[Tuple[torch.FloatTensor]]] = None + ) -> "HybridCache": + """Converts a cache in the legacy cache format into an equivalent `DynamicCache`. Used for + backward compatibility.""" + cache = cls( + config, + batch_size=past_key_values[0][0].shape[0], + max_cache_len=past_key_values[1][0].shape[2], + sliding_window_len=past_key_values[0][0].shape[2], + ) + if past_key_values is not None: + for layer_idx in range(len(past_key_values)): + key_states, value_states = past_key_values[layer_idx] + cache.update(key_states, value_states, layer_idx) + return cache + + def __len__(self): + """ + Support for backwards-compatible `past_key_value` length, e.g. `len(past_key_value)`. This value corresponds + to the number of layers in the model. + """ + return len(self.key_cache) + + def get_seq_length(self, layer_idx: Optional[int] = 0) -> int: + """Returns the sequence length of the cached states. A layer index can be optionally passed.""" + # TODO: deprecate this function in favor of `cache_position` + is_empty_layer = ( + len(self.key_cache) == 0 # no cache in any layer + or len(self.key_cache) <= layer_idx # skipped `layer_idx` and hasn't run a layer with cache after it + or len(self.key_cache[layer_idx]) == 0 # the layer has no cache + ) + layer_seq_length = self.key_cache[layer_idx].shape[-2] if not is_empty_layer else 0 + return layer_seq_length + + def to_legacy_cache(self) -> Tuple[Tuple[torch.Tensor], Tuple[torch.Tensor]]: + """Converts the `DynamicCache` instance into the its equivalent in the legacy cache format. Used for + backward compatibility.""" + legacy_cache = () + for layer_idx in range(len(self)): + legacy_cache += ((self.key_cache[layer_idx], self.value_cache[layer_idx]),) + return legacy_cache + + def update( + self, + key_states: torch.Tensor, + value_states: torch.Tensor, + layer_idx: int, + cache_kwargs: Optional[Dict[str, Any]] = None, + ) -> Tuple[torch.Tensor, torch.Tensor]: + if len(self.key_cache) <= layer_idx: + self.key_cache.append(key_states) + self.value_cache.append(value_states) + k_out, v_out = key_states, value_states + else: + position_ids = cache_kwargs.get("position_ids") + is_sliding_layer = cache_kwargs.get("is_sliding") + sliding_window = cache_kwargs.get("sliding_window") + batch_index = cache_kwargs.get("batch_index", None) # Check and fetch batch index value from the kwargs + + if is_sliding_layer: + kv_position_ids = torch.where(position_ids == -1, position_ids, position_ids % sliding_window) + else: + kv_position_ids = position_ids + + if batch_index is not None: + if torch.onnx.is_in_onnx_export(): + invalid_scatter_index = torch.iinfo(torch.int32).max + scatter_position_ids = torch.where(kv_position_ids < 0, invalid_scatter_index, kv_position_ids) + else: + scatter_position_ids = kv_position_ids + self.key_cache[layer_idx] = CtxScatterFuncCB.apply( + self.key_cache[layer_idx], batch_index, scatter_position_ids, key_states + ) + self.value_cache[layer_idx] = CtxScatterFuncCB.apply( + self.value_cache[layer_idx], batch_index, scatter_position_ids, value_states + ) + else: + self.key_cache[layer_idx] = CtxScatterFunc.apply(self.key_cache[layer_idx], kv_position_ids, key_states) + self.value_cache[layer_idx] = CtxScatterFunc.apply( + self.value_cache[layer_idx], kv_position_ids, value_states + ) + + k_out, v_out = self.key_cache[layer_idx], self.value_cache[layer_idx] + + # Original Gather + ctx_len = self.key_cache[layer_idx].shape[2] + ctx_indices = torch.arange(ctx_len)[None, None, ...] + gather_limit = position_ids.max(1, keepdim=True).values.unsqueeze(1) + invalid_mask = ctx_indices > gather_limit + if torch.onnx.is_in_onnx_export(): + invalid_idx_value = torch.iinfo(torch.int32).max + else: + invalid_idx_value = 0 + ctx_indices = torch.where(invalid_mask, invalid_idx_value, ctx_indices) + + if batch_index is not None: + k_out = CtxGatherFuncCB.apply(k_out, batch_index, ctx_indices) + v_out = CtxGatherFuncCB.apply(v_out, batch_index, ctx_indices) + else: + k_out = CtxGatherFunc.apply(k_out, ctx_indices) + v_out = CtxGatherFunc.apply(v_out, ctx_indices) + + v_out = torch.where(invalid_mask.unsqueeze(-1), torch.tensor(0.0, dtype=torch.float32), v_out) + return k_out, v_out diff --git a/QEfficient/transformers/modeling_utils.py b/QEfficient/transformers/modeling_utils.py index c692d1beb..5337b44f5 100644 --- a/QEfficient/transformers/modeling_utils.py +++ b/QEfficient/transformers/modeling_utils.py @@ -185,6 +185,7 @@ ] ) +# This is for supporting different seq_len for different layers for Sliding window attn, chunked attn etc. DYNAMIC_SEQ_LEN_SUPPORTED_MODEL_ARCH = {"gemma3", "llama4", "gemma3_text", "llama4_text"} # Define a transformers layers to QEff layers dictionary diff --git a/QEfficient/transformers/models/gpt_oss/__init__.py b/QEfficient/transformers/models/gpt_oss/__init__.py new file mode 100644 index 000000000..75daf1953 --- /dev/null +++ b/QEfficient/transformers/models/gpt_oss/__init__.py @@ -0,0 +1,6 @@ +# ----------------------------------------------------------------------------- +# +# Copyright (c) Qualcomm Technologies, Inc. and/or its subsidiaries. +# SPDX-License-Identifier: BSD-3-Clause +# +# ---------------------------------------------------------------------------- diff --git a/QEfficient/transformers/models/gpt_oss/modeling_gpt_oss.py b/QEfficient/transformers/models/gpt_oss/modeling_gpt_oss.py new file mode 100644 index 000000000..62bc849b7 --- /dev/null +++ b/QEfficient/transformers/models/gpt_oss/modeling_gpt_oss.py @@ -0,0 +1,736 @@ +# ----------------------------------------------------------------------------- +# +# Copyright (c) Qualcomm Technologies, Inc. and/or its subsidiaries. +# SPDX-License-Identifier: BSD-3-Clause +# +# ----------------------------------------------------------------------------- +from typing import Callable, Optional, Union + +import torch +from torch import nn +from torch.nn import functional as F +from transformers.cache_utils import Cache +from transformers.modeling_outputs import ( + MoeCausalLMOutputWithPast, + MoeModelOutputWithPast, +) +from transformers.models.gpt_oss.modeling_gpt_oss import ( + GptOssAttention, + GptOssConfig, + GptOssDecoderLayer, + GptOssExperts, + GptOssForCausalLM, + GptOssMLP, + GptOssModel, + GptOssRotaryEmbedding, + repeat_kv, +) +from transformers.processing_utils import Unpack +from transformers.utils import TransformersKwargs + +from QEfficient.transformers.cache_utils import QEffHybridCacheForGPTOSS +from QEfficient.transformers.modeling_attn_mask_utils import _create_causal_mask +from QEfficient.utils import constants +from QEfficient.utils.constants import MIN_MASKED_ATTENTION_VALUE + + +class QEffGptOssExperts(GptOssExperts): + def __qeff_init__(self): + self.gate_proj = nn.Parameter(torch.empty(self.num_experts, self.hidden_size, self.expert_dim)) + self.up_proj = nn.Parameter(torch.empty(self.num_experts, self.hidden_size, self.expert_dim)) + self.gate_proj_bias = nn.Parameter(torch.empty(self.num_experts, self.expert_dim)) + self.up_proj_bias = nn.Parameter(torch.empty(self.num_experts, self.expert_dim)) + + +class QEffGptOssMLP(GptOssMLP): + def alt_forward(self, hidden: torch.Tensor): + B, S, H = hidden.shape + T = B * S + hidden = hidden.view(T, H) + + # Router computation + router_logits = F.linear(hidden, self.router.weight, self.router.bias) + + # Top-k selection + top_w, top_i = torch.topk(router_logits, self.router.top_k, dim=-1) # both [T, K] + top_w = torch.nn.functional.softmax(top_w, dim=1, dtype=top_w.dtype) + + masked_logits = torch.zeros_like(router_logits) + masked_logits.scatter_(1, top_i, top_w) + + # Routing weights for each expert [T, E] + routing_weights = masked_logits + + # ────────────────── allocate the output tensor ───── + expert_out = hidden.new_zeros((T, H)) # accumulation buffer + + # ───────────────────────── Expert computation loop ───────────────────────────── + for e in range(self.experts.num_experts): + routing_weight = routing_weights[:, e].unsqueeze(-1) # [T, 1] + + W_g, W_u = self.experts.gate_proj[e], self.experts.up_proj[e] # [H, I], [H, I] + b_g, b_u = self.experts.gate_proj_bias[e], self.experts.up_proj_bias[e] # [I], [I] + W_d = self.experts.down_proj[e] # [I, H] + b_d = self.experts.down_proj_bias[e] # [H] + + # Gate and Up projections + gate = (hidden @ W_g) + b_g # [T, I] + up = (hidden @ W_u) + b_u # [T, I] + + # Apply GptOss activation with clamping + gate = gate.clamp(min=None, max=self.experts.limit) + up = up.clamp(min=-self.experts.limit, max=self.experts.limit) + + # GLU activation + glu = gate * torch.sigmoid(gate * self.experts.alpha) + intermediate = (up + 1) * glu # [T, I] + + # Down projection + down_out = (intermediate @ W_d) + b_d # [T, H] + + # Apply routing weights and accumulate + masked_down = torch.where(routing_weight > 0, down_out * routing_weight, torch.zeros_like(expert_out)) + expert_out += masked_down + + # original shape [B, S, H] + return expert_out.view(B, S, H), router_logits + + # ------------------- Gather based, weights as activation approach --------------- + def forward_weights_as_activation(self, hidden_states): + bs, seq_len, _ = hidden_states.shape + hidden_states = hidden_states.view(bs * seq_len, self.experts.hidden_size) + + # Router computation + router_logits = F.linear(hidden_states, self.router.weight, self.router.bias) + router_top_value, router_indices = torch.topk(router_logits, self.router.top_k, dim=-1) + router_top_value = torch.nn.functional.softmax(router_top_value, dim=1, dtype=router_top_value.dtype) + + # GATHER - collect weights for selected experts + gate_up_proj = self.experts.gate_up_proj[router_indices.flatten()] + gate_up_proj_bias = self.experts.gate_up_proj_bias[router_indices.flatten()] + down_proj = self.experts.down_proj[router_indices.flatten()] + down_proj_bias = self.experts.down_proj_bias[router_indices.flatten()] + + # Apply Chosen Experts (without routing weights first) + # expert_in = hidden_states.repeat_interleave(self.router.top_k, dim=0) + # expert_in = expert_in.view(-1, 1, self.experts.hidden_size) + # Reshape for bmm: (bs*seq_len*top_k, 1, hidden_size) + expert_in = ( + hidden_states.unsqueeze(1) + .expand(-1, self.router.top_k, -1) + .contiguous() + .view(-1, 1, self.experts.hidden_size) + ) + + gate_up = torch.bmm(expert_in, gate_up_proj) + gate_up_proj_bias.unsqueeze(1) + gate, up = gate_up[..., ::2], gate_up[..., 1::2] + + # Apply activation with clamping + gate = gate.clamp(min=None, max=self.experts.limit) + up = up.clamp(min=-self.experts.limit, max=self.experts.limit) + glu = gate * torch.sigmoid(gate * self.experts.alpha) + gated_output = (up + 1) * glu + + experts_out = torch.bmm(gated_output, down_proj) + down_proj_bias.unsqueeze(1) + experts_out = experts_out.view(bs * seq_len, self.router.top_k, self.experts.hidden_size) + + # Apply routing weights AFTER expert computation (This is before on Llama4) + experts_out = experts_out * router_top_value.unsqueeze(-1) + experts_out = experts_out.sum(dim=1) + + return experts_out, router_logits + + # ------------------- Gather based, weights as activation approach, With Seperate Gate, up Projections --------------- + def forward(self, hidden_states): + # print("Seperate Split, Up, Gate Projections") + bs, seq_len, _ = hidden_states.shape + hidden_states = hidden_states.view(bs * seq_len, self.experts.hidden_size) + + # Router computation + router_logits = F.linear(hidden_states, self.router.weight, self.router.bias) + router_top_value, router_indices = torch.topk(router_logits, self.router.top_k, dim=-1) + router_top_value = torch.nn.functional.softmax(router_top_value, dim=1, dtype=router_top_value.dtype) + + # GATHER - collect weights for selected experts (separate gate and up projections) + gate_proj = self.experts.gate_proj[router_indices.flatten()] + gate_proj_bias = self.experts.gate_proj_bias[router_indices.flatten()] + up_proj = self.experts.up_proj[router_indices.flatten()] + up_proj_bias = self.experts.up_proj_bias[router_indices.flatten()] + down_proj = self.experts.down_proj[router_indices.flatten()] + down_proj_bias = self.experts.down_proj_bias[router_indices.flatten()] + + # Reshape for bmm: (bs*seq_len*top_k, 1, hidden_size) + expert_in = ( + hidden_states.unsqueeze(1) + .expand(-1, self.router.top_k, -1) + .contiguous() + .view(-1, 1, self.experts.hidden_size) + ) + + # Apply gate and up projections separately using bmm + gate = torch.bmm(expert_in, gate_proj) + gate_proj_bias.unsqueeze(1) + up = torch.bmm(expert_in, up_proj) + up_proj_bias.unsqueeze(1) + + # Apply activation with clamping + gate = gate.clamp(min=None, max=self.experts.limit) + up = up.clamp(min=-self.experts.limit, max=self.experts.limit) + + # GLU activation + glu = gate * torch.sigmoid(gate * self.experts.alpha) + gated_output = (up + 1) * glu + + # Down projection + experts_out = torch.bmm(gated_output, down_proj) + down_proj_bias.unsqueeze(1) + experts_out = experts_out.view(bs * seq_len, self.router.top_k, self.experts.hidden_size) + + # Apply routing weights AFTER expert computation + experts_out = experts_out * router_top_value.unsqueeze(-1) + experts_out = experts_out.sum(dim=1) + + return experts_out, router_logits + + def optimized_moe_forward(self, hidden_states: torch.Tensor): + B, S, H = hidden_states.shape + T = B * S + hidden_states = hidden_states.view(T, H) + + # Router computation + router_logits = F.linear(hidden_states, self.router.weight, self.router.bias) + + # Top-k selection + top_w, selected_experts = torch.topk(router_logits, self.router.top_k, dim=-1) # both [T, K] + top_w = torch.nn.functional.softmax(top_w, dim=1, dtype=top_w.dtype) + + # Creating experts mask and routing weights masked + awesome_experts_mask_1 = ( + torch.nn.functional.one_hot(selected_experts[:, 0], num_classes=self.experts.num_experts) + .bool() + .T.unsqueeze(-1) + ) + awesome_experts_mask_2 = ( + torch.nn.functional.one_hot(selected_experts[:, 1], num_classes=self.experts.num_experts) + .bool() + .T.unsqueeze(-1) + ) + awesome_experts_mask_3 = ( + torch.nn.functional.one_hot(selected_experts[:, 2], num_classes=self.experts.num_experts) + .bool() + .T.unsqueeze(-1) + ) + awesome_experts_mask_4 = ( + torch.nn.functional.one_hot(selected_experts[:, 3], num_classes=self.experts.num_experts) + .bool() + .T.unsqueeze(-1) + ) + + gateupout1 = torch.zeros(hidden_states.shape[0], self.experts.intermediate_size) # T, hs + gateupout2 = torch.zeros(hidden_states.shape[0], self.experts.intermediate_size) # T, hs + gateupout3 = torch.zeros(hidden_states.shape[0], self.experts.intermediate_size) # T, hs + gateupout4 = torch.zeros(hidden_states.shape[0], self.experts.intermediate_size) # T, hs + + # ───────────────────────── Expert computation loop ───────────────────────────── + for e in range(self.experts.num_experts): + W_g, W_u = self.experts.gate_proj[e], self.experts.up_proj[e] # [H, I], [H, I] + b_g, b_u = self.experts.gate_proj_bias[e], self.experts.up_proj_bias[e] # [I], [I] + + # Gate and Up projections + gate = (hidden_states @ W_g) + b_g # [T, I] + up = (hidden_states @ W_u) + b_u # [T, I] + + # Apply GptOss activation with clamping + gate = gate.clamp(min=None, max=self.experts.limit) + up = up.clamp(min=-self.experts.limit, max=self.experts.limit) + + # GLU activation + glu = gate * torch.sigmoid(gate * self.experts.alpha) + intermediate = (up + 1) * glu # [T, I] + + gateupout1 += torch.where(awesome_experts_mask_1[e], intermediate, torch.zeros_like(gateupout1)) + gateupout2 += torch.where(awesome_experts_mask_2[e], intermediate, torch.zeros_like(gateupout2)) + gateupout3 += torch.where(awesome_experts_mask_3[e], intermediate, torch.zeros_like(gateupout3)) + gateupout4 += torch.where(awesome_experts_mask_4[e], intermediate, torch.zeros_like(gateupout4)) + + concat_down = torch.zeros((self.router.top_k, T, H)) + concat_mask = torch.cat( + ( + awesome_experts_mask_1.unsqueeze(0), + awesome_experts_mask_2.unsqueeze(0), + awesome_experts_mask_3.unsqueeze(0), + awesome_experts_mask_4.unsqueeze(0), + ), + dim=0, + ) + + concat_gateout = torch.cat( + (gateupout1.unsqueeze(0), gateupout2.unsqueeze(0), gateupout3.unsqueeze(0), gateupout4.unsqueeze(0)), dim=0 + ) + + for e in range(self.experts.num_experts): + W_d = self.experts.down_proj[e] # [I, H] + b_d = self.experts.down_proj_bias[e] # [H] + + # Down projection + down_out = (concat_gateout @ W_d) + b_d # [T, H] + + concat_down += torch.where(concat_mask[:, e, :], down_out, torch.zeros_like(concat_down)) + + downout1, downout2, downout3, downout4 = concat_down[0], concat_down[1], concat_down[2], concat_down[3] + hidden_states = ( + downout1 * top_w[:, 0].unsqueeze(-1) + + downout2 * top_w[:, 1].unsqueeze(-1) + + downout3 * top_w[:, 2].unsqueeze(-1) + + downout4 * top_w[:, 3].unsqueeze(-1) + ).reshape(B, S, H) + + # original shape [B, S, H] + return hidden_states, router_logits + + +# Can be replaced with llama/modeling_llama.py::QEffLlamaRotaryEmbedding but keeping it following transformers ideology +class QEffGptOssRotaryEmbedding(GptOssRotaryEmbedding): + """ + Copied from LlamaForCausalLM: https://github.com/huggingface/transformers/blob/main/src/transformers/models/llama/modeling_llama.py + The only differences are: + - Add static sin/cos computations. + """ + + def __init__(self, config: GptOssConfig, device=None): + super().__init__(config=config) + # Build here to make `torch.jit.trace` work. + self._set_cos_sin_cache( + seq_len=self.original_max_seq_len, device=self.inv_freq.device, dtype=torch.get_default_dtype() + ) + + def _set_cos_sin_cache(self, seq_len, device, dtype): + self.max_seq_len_cached = seq_len + t = torch.arange(self.max_seq_len_cached, device=device, dtype=torch.int64).type_as(self.inv_freq) + + freqs = torch.outer(t, self.inv_freq) + + emb = torch.cat((freqs, freqs), dim=-1) + self.register_buffer("cos_cached", emb.cos().to(dtype), persistent=False) + self.register_buffer("sin_cached", emb.sin().to(dtype), persistent=False) + + def forward(self, x, seq_len=None): + # x: [bs, num_attention_heads, seq_len, head_size] + if seq_len > self.max_seq_len_cached: + self._set_cos_sin_cache(seq_len=seq_len, device=x.device, dtype=x.dtype) + + return ( + self.cos_cached[:seq_len].to(dtype=x.dtype) * self.attention_scaling, + self.sin_cached[:seq_len].to(dtype=x.dtype) * self.attention_scaling, + ) + + +def rotate_half(x): + """Rotates half the hidden dims of the input.""" + x1 = x[..., : x.shape[-1] // 2] + x2 = x[..., x.shape[-1] // 2 :] + return torch.cat((-x2, x1), dim=-1) + + +def qeff_apply_rotary_pos_emb(q, k, cos, sin, position_ids, unsqueeze_dim=1): + """Applies Rotary Position Embedding with Multimodal Sections to the query and key tensors (https://qwenlm.github.io/blog/qwen2-vl/). + + Explanation: + Multimodal 3D rotary position embedding is an extension to 1D rotary position embedding. The input embedding + sequence contains vision (images / videos) embedding and text embedding or just contains text embedding. For + vision embedding part, we apply rotary position embedding on temporal, height and width dimension seperately. + Here we split the channel dimension to 3 chunks for the temporal, height and width rotary position embedding. + For text embedding part, we just apply 1D rotary position embedding. The three rotary position index (temporal, + height and width) of text embedding is always the same, so the text embedding rotary position embedding has no + difference with modern LLMs. + + Args: + q (`torch.Tensor`): The query tensor. + k (`torch.Tensor`): The key tensor. + cos (`torch.Tensor`): The cosine part of the rotary embedding. + sin (`torch.Tensor`): The sine part of the rotary embedding. + position_ids (`torch.Tensor`): + The position indices of the tokens corresponding to the query and key tensors. For example, this can be + used to pass offsetted position ids when working with a KV-cache. + mrope_section(`List(int)`): + Multimodal rope section is for channel dimension of temporal, height and width in rope calculation. + unsqueeze_dim (`int`, *optional*, defaults to 1): + The 'unsqueeze_dim' argument specifies the dimension along which to unsqueeze cos[position_ids] and + sin[position_ids] so that they can be properly broadcasted to the dimensions of q and k. For example, note + that cos[position_ids] and sin[position_ids] have the shape [batch_size, seq_len, head_dim]. Then, if q and + k have the shape [batch_size, heads, seq_len, head_dim], then setting unsqueeze_dim=1 makes + cos[position_ids] and sin[position_ids] broadcastable to the shapes of q and k. Similarly, if q and k have + the shape [batch_size, seq_len, heads, head_dim], then set unsqueeze_dim=2. + Returns: + `tuple(torch.Tensor)` comprising of the query and key tensors rotated using the Rotary Position Embedding. + """ + + cos = cos[position_ids].unsqueeze(unsqueeze_dim) + sin = sin[position_ids].unsqueeze(unsqueeze_dim) + + q_embed = (q * cos) + (rotate_half(q) * sin) + k_embed = (k * cos) + (rotate_half(k) * sin) + + return q_embed.to(q.dtype), k_embed.to(k.dtype) + + +def eager_attention_forward( + module: nn.Module, + query: torch.Tensor, + key: torch.Tensor, + value: torch.Tensor, + attention_mask: Optional[torch.Tensor], + scaling: float, + dropout: float = 0.0, + **kwargs, +): + key_states = repeat_kv(key, module.num_key_value_groups) + value_states = repeat_kv(value, module.num_key_value_groups) + + attn_weights = torch.matmul(query, key_states.transpose(2, 3)) * scaling + if attention_mask is not None: + attn_weights = torch.where( + attention_mask, torch.tensor(MIN_MASKED_ATTENTION_VALUE, dtype=torch.float32), attn_weights + ) + + sinks = module.sinks.reshape(1, -1, 1, 1).expand(query.shape[0], -1, query.shape[-2], -1) + combined_logits = torch.cat([attn_weights, sinks], dim=-1) + + # This was not in the original implementation and slightly affect results; it prevents overflow in BF16/FP16 + # when training with bsz>1 we clamp max values. + combined_logits = combined_logits - combined_logits.max(dim=-1, keepdim=True).values + probs = F.softmax(combined_logits, dim=-1, dtype=combined_logits.dtype) + scores = probs[..., :-1] # we drop the sink here + attn_weights = nn.functional.dropout(scores, p=dropout, training=module.training) + attn_output = torch.matmul(attn_weights, value_states) + attn_output = attn_output.transpose(1, 2).contiguous() + return attn_output, attn_weights + + +class QEffGptOssAttention(GptOssAttention): + """Multi-headed attention from 'Attention Is All You Need' paper""" + + def __qeff_init__(self): + self.rotary_emb = QEffGptOssRotaryEmbedding(config=self.config) + + def forward( + self, + hidden_states: torch.Tensor, + position_embeddings: tuple[torch.Tensor, torch.Tensor], + attention_mask: Optional[torch.Tensor], + position_ids: Optional[torch.LongTensor] = None, + past_key_value: Optional[Cache] = None, + batch_index: Optional[torch.LongTensor] = None, + cache_position: Optional[torch.LongTensor] = None, + sliding_mask=None, + **kwargs: Unpack[TransformersKwargs], + ) -> tuple[torch.Tensor, torch.Tensor]: + input_shape = hidden_states.shape[:-1] + hidden_shape = (*input_shape, -1, self.head_dim) + + query_states = self.q_proj(hidden_states).view(hidden_shape).transpose(1, 2) + key_states = self.k_proj(hidden_states).view(hidden_shape).transpose(1, 2) + value_states = self.v_proj(hidden_states).view(hidden_shape).transpose(1, 2) + + cos, sin = self.rotary_emb(value_states, seq_len=32 * 1024) + query_states, key_states = qeff_apply_rotary_pos_emb(query_states, key_states, cos, sin, position_ids) + + if past_key_value is not None: + # sin and cos are specific to RoPE models; cache_position needed for the static cache + cache_kwargs = { + "sin": sin, + "cos": cos, + "batch_index": batch_index, + "position_ids": position_ids, + "config": self.config, + "is_sliding": self.sliding_window is not None, + "sliding_window": past_key_value.sliding_window_len, + } + key_states, value_states = past_key_value.update(key_states, value_states, self.layer_idx, cache_kwargs) + + if self.sliding_window is not None: + attention_mask = sliding_mask + else: + attention_mask = attention_mask + + attention_interface: Callable = eager_attention_forward + attn_output, attn_weights = attention_interface( + self, + query_states, + key_states, + value_states, + attention_mask, + dropout=0.0 if not self.training else self.attention_dropout, + scaling=self.scaling, + sliding_window=self.sliding_window, + s_aux=self.sinks, # diff with Llama + **kwargs, + ) + + attn_output = attn_output.reshape(*input_shape, -1).contiguous() + attn_output = self.o_proj(attn_output) + return attn_output, attn_weights, past_key_value + + +class QEffGptOssDecoderLayer(GptOssDecoderLayer): + def forward( + self, + hidden_states: torch.Tensor, + attention_mask: Optional[torch.Tensor] = None, + position_ids: Optional[torch.LongTensor] = None, + past_key_value: Optional[Cache] = None, + batch_index: Optional[torch.LongTensor] = None, + output_attentions: Optional[bool] = False, + use_cache: Optional[bool] = False, + cache_position: Optional[torch.LongTensor] = None, + position_embeddings: Optional[tuple[torch.Tensor, torch.Tensor]] = None, # necessary, but kept here for BC + sliding_mask=None, + **kwargs: Unpack[TransformersKwargs], + ) -> tuple[torch.Tensor]: + residual = hidden_states + hidden_states = self.input_layernorm(hidden_states) + # Self Attention + hidden_states, self_attn_weights, present_key_value = self.self_attn( + hidden_states=hidden_states, + attention_mask=attention_mask, + position_ids=position_ids, + past_key_value=past_key_value, + batch_index=batch_index, + use_cache=use_cache, + cache_position=cache_position, + position_embeddings=position_embeddings, + sliding_mask=sliding_mask, + **kwargs, + ) + hidden_states = residual + hidden_states + + # Fully Connected + residual = hidden_states + hidden_states = self.post_attention_layernorm(hidden_states) + hidden_states, _ = self.mlp(hidden_states) # diff with llama: router scores + # alth, _ = self.mlp.alt_forward(hidden_states) + hidden_states = hidden_states.reshape(residual.shape) + hidden_states = residual + hidden_states + outputs = (hidden_states,) + + if output_attentions: + outputs += (self_attn_weights,) + + if use_cache: + outputs += (present_key_value,) + + return outputs + + +class QEffGptOssModel(GptOssModel): + def forward( + self, + input_ids: Optional[torch.LongTensor] = None, + attention_mask: Optional[torch.Tensor] = None, + position_ids: Optional[torch.LongTensor] = None, + past_key_values: Optional[Cache] = None, + batch_index: Optional[torch.LongTensor] = None, + inputs_embeds: Optional[torch.FloatTensor] = None, + use_cache: Optional[bool] = None, + output_attentions: Optional[bool] = None, + return_dict: Optional[bool] = None, + output_hidden_states: Optional[bool] = None, + cache_position: Optional[torch.LongTensor] = None, + **kwargs: Unpack[TransformersKwargs], + ) -> MoeModelOutputWithPast: + output_attentions = output_attentions if output_attentions is not None else self.config.output_attentions + output_hidden_states = ( + output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states + ) + use_cache = use_cache if use_cache is not None else self.config.use_cache + + return_dict = return_dict if return_dict is not None else self.config.use_return_dict + + if (input_ids is None) ^ (inputs_embeds is not None): + raise ValueError("You must specify exactly one of input_ids or inputs_embeds") + + return_legacy_cache = False + if use_cache and not isinstance(past_key_values, Cache): + return_legacy_cache = True + past_key_values = QEffHybridCacheForGPTOSS.from_legacy_cache(self.config, past_key_values) + + if inputs_embeds is None: + inputs_embeds = self.embed_tokens(input_ids) + + if cache_position is None: + past_seen_tokens = past_key_values.get_seq_length() if past_key_values is not None else 0 + cache_position = torch.arange( + past_seen_tokens, past_seen_tokens + inputs_embeds.shape[1], device=inputs_embeds.device + ) + if position_ids is None: + position_ids = cache_position.unsqueeze(0) + + # target_length = attention_mask.shape[-1] if isinstance(attention_mask, torch.Tensor) else past_seen_tokens + causal_mask = _create_causal_mask(position_ids=position_ids, target_length=past_key_values.max_cache_len) + sliding_mask = _create_causal_mask( + position_ids=position_ids, + target_length=past_key_values.sliding_window_len, + sliding_window=past_key_values.sliding_window_len, + ) + + hidden_states = inputs_embeds + # position_embeddings = self.rotary_emb(hidden_states, position_ids) + + # decoder layers + all_hidden_states = () if output_hidden_states else None + all_self_attns = () if output_attentions else None + + for decoder_layer in self.layers: + if output_hidden_states: + all_hidden_states += (hidden_states,) + + layer_outputs = decoder_layer( + hidden_states, + attention_mask=causal_mask, + position_ids=position_ids, + past_key_value=past_key_values, + batch_index=batch_index, + use_cache=use_cache, + output_attentions=output_attentions, + cache_position=cache_position, + sliding_mask=sliding_mask, + **kwargs, + ) + hidden_states = layer_outputs[0] + + if output_attentions: + all_self_attns += (layer_outputs[1],) + + hidden_states = self.norm(hidden_states) + # add hidden states from the last decoder layer + if output_hidden_states: + all_hidden_states += (hidden_states,) + + if return_legacy_cache: + past_key_values = past_key_values.to_legacy_cache() + + return MoeModelOutputWithPast( + last_hidden_state=hidden_states, + past_key_values=past_key_values if use_cache else None, + ) + + +class QEffGptOssForCausalLM(GptOssForCausalLM): + def forward( + self, + input_ids: Optional[torch.LongTensor] = None, + attention_mask: Optional[torch.Tensor] = None, + position_ids: Optional[torch.LongTensor] = None, + past_key_values: Optional[Cache] = None, + batch_index: Optional[torch.LongTensor] = None, + inputs_embeds: Optional[torch.FloatTensor] = None, + labels: Optional[torch.LongTensor] = None, + use_cache: Optional[bool] = None, + output_attentions: Optional[bool] = None, + output_hidden_states: Optional[bool] = None, + return_dict: Optional[bool] = None, + output_router_logits: Optional[bool] = None, + cache_position: Optional[torch.LongTensor] = None, + logits_to_keep: Union[int, torch.Tensor] = 0, + **kwargs: Unpack[TransformersKwargs], + ) -> MoeCausalLMOutputWithPast: + r""" + labels (`torch.LongTensor` of shape `(batch_size, sequence_length)`, *optional*): + Labels for computing the masked language modeling loss. Indices should either be in `[0, ..., + config.vocab_size]` or -100 (see `input_ids` docstring). Tokens with indices set to `-100` are ignored + (masked), the loss is only computed for the tokens with labels in `[0, ..., config.vocab_size]`. + + Example: + + ```python + >>> from transformers import AutoTokenizer, GptOssForCausalLM + + >>> model = GptOssForCausalLM.from_pretrained("mistralai/GptOss-8x7B-v0.1") + >>> tokenizer = AutoTokenizer.from_pretrained("mistralai/GptOss-8x7B-v0.1") + + >>> prompt = "Hey, are you conscious? Can you talk to me?" + >>> inputs = tokenizer(prompt, return_tensors="pt") + + >>> # Generate + >>> generate_ids = model.generate(inputs.input_ids, max_length=30) + >>> tokenizer.batch_decode(generate_ids, skip_special_tokens=True, clean_up_tokenization_spaces=False)[0] + "Hey, are you conscious? Can you talk to me?\nI'm not conscious, but I can talk to you." + ```""" + + output_attentions = output_attentions if output_attentions is not None else self.config.output_attentions + output_hidden_states = ( + output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states + ) + return_dict = return_dict if return_dict is not None else self.config.use_return_dict + + output_router_logits = ( + output_router_logits if output_router_logits is not None else self.config.output_router_logits + ) + + # decoder outputs consists of (dec_features, layer_state, dec_hidden, dec_attn) + outputs: MoeModelOutputWithPast = self.model( + input_ids=input_ids, + attention_mask=attention_mask, + position_ids=position_ids, + past_key_values=past_key_values, + batch_index=batch_index, + inputs_embeds=inputs_embeds, + use_cache=use_cache, + output_attentions=output_attentions, + output_hidden_states=output_hidden_states, + output_router_logits=output_router_logits, + return_dict=return_dict, + cache_position=cache_position, + **kwargs, + ) + + hidden_states = outputs.last_hidden_state + + logit_index = position_ids.to(torch.int32).argmax(1, keepdim=True) + hidden_states = outputs[0][torch.arange(position_ids.shape[0]).view(-1, 1), logit_index] + logits = self.lm_head(hidden_states) + logits = logits.float() + + return MoeCausalLMOutputWithPast( + loss=None, + aux_loss=None, + logits=logits, + past_key_values=outputs.past_key_values, + hidden_states=outputs.hidden_states, + attentions=outputs.attentions, + router_logits=outputs.router_logits, + ) + + def get_pkv_dynamic_axes( + self, + ): + pkv_dynamic_axes = [] + for layer_type in self.config.layer_types: + if layer_type == "sliding_attention": + pkv_dynamic_axes.append({0: "batch_size", 2: "sliding_window"}) + elif layer_type == "full_attention": + pkv_dynamic_axes.append({0: "batch_size", 2: "ctx_len"}) + return pkv_dynamic_axes + + def get_specializations( + self, + batch_size: int, + prefill_seq_len: int, + ctx_len: int, + ): + batch_size = batch_size if batch_size else 1 + prefill_seq_len = prefill_seq_len if prefill_seq_len else constants.PROMPT_LEN + ctx_len = ctx_len if ctx_len else constants.CTX_LEN + + specializations = [ + { + "batch_size": batch_size, + "seq_len": prefill_seq_len, + "ctx_len": ctx_len, + "sliding_window": 128, + }, + { + "batch_size": batch_size, + "seq_len": 1, + "ctx_len": ctx_len, + "sliding_window": 128, + }, + ] + return specializations diff --git a/QEfficient/transformers/models/modeling_auto.py b/QEfficient/transformers/models/modeling_auto.py index d92f63d3b..60f60c768 100644 --- a/QEfficient/transformers/models/modeling_auto.py +++ b/QEfficient/transformers/models/modeling_auto.py @@ -53,6 +53,7 @@ AwqToMatmulNbitsTransform, FP8DeQuantLinearToLinearTransform, GPTQToMatmulNbitsTransform, + Mxfp4GptOssExpertDequantizeTransform, ) from QEfficient.utils import ( constants, @@ -2104,6 +2105,7 @@ class QEFFAutoModelForCausalLM(QEFFBaseModel): AwqToMatmulNbitsTransform, GPTQToMatmulNbitsTransform, FP8DeQuantLinearToLinearTransform, + Mxfp4GptOssExpertDequantizeTransform, CustomOpsTransform, KVCacheTransform, SplitGateUpWeightsTransform, @@ -2360,10 +2362,21 @@ def export(self, export_dir: Optional[str] = None) -> str: output_names.append(f"past_{kv}.{i}_RetainedState") else: + # HACK: create common function for this including above if condition code + pkv_dynamic_axes = ( + self.model.get_pkv_dynamic_axes() if hasattr(self.model, "get_pkv_dynamic_axes") else pkv_dynamic_axes + ) + pkv_dynamic_axes = ( + [pkv_dynamic_axes] * self.model.config.num_hidden_layers + if isinstance(pkv_dynamic_axes, dict) + else pkv_dynamic_axes + ) + for i in range(self.num_layers): + pkv_dynamic_axes[i][0] = "full_batch_size" if self.continuous_batching else "batch_size" for kv in ["key", "value"]: example_inputs["past_key_values"][i].append(torch.zeros(kv_cache_shape, dtype=torch.float32)) - dynamic_axes[f"past_{kv}.{i}"] = pkv_dynamic_axes + dynamic_axes[f"past_{kv}.{i}"] = pkv_dynamic_axes[i] output_names.append(f"past_{kv}.{i}_RetainedState") if self.continuous_batching: @@ -2497,12 +2510,19 @@ def build_prefill_specialization( Dict[str, Union[int, str]] A dictionary defining the prefill specialization. """ - spec = { - "batch_size": 1 if self.continuous_batching else batch_size, - "seq_len": prefill_seq_len, - "ctx_len": ctx_len, - "num_logits_to_keep": 1 if self.is_tlm else None, - } + if hasattr(self.model, "get_specializations"): + spec = self.model.get_specializations( + batch_size=1 if self.continuous_batching else batch_size, + prefill_seq_len=prefill_seq_len, + ctx_len=ctx_len, + )[0] + else: + spec = { + "batch_size": 1 if self.continuous_batching else batch_size, + "seq_len": prefill_seq_len, + "ctx_len": ctx_len, + } + spec["num_logits_to_keep"] = 1 if self.is_tlm else None if self.continuous_batching: spec["full_batch_size"] = kv_cache_batch_size else: @@ -2546,12 +2566,20 @@ def build_decode_specialization( """ if prefill_seq_len == 1 and not self.continuous_batching: return None # Avoid duplication with prefill - spec = { - "batch_size": full_batch_size if self.continuous_batching else batch_size, - "seq_len": (num_speculative_tokens + 1) if self.is_tlm else 1, - "ctx_len": ctx_len, - "num_logits_to_keep": (num_speculative_tokens + 1) if self.is_tlm else None, - } + + if hasattr(self.model, "get_specializations"): + spec = self.model.get_specializations( + batch_size=full_batch_size if self.continuous_batching else batch_size, + prefill_seq_len=(num_speculative_tokens + 1) if self.is_tlm else 1, + ctx_len=ctx_len, + )[1] + else: + spec = { + "batch_size": full_batch_size if self.continuous_batching else batch_size, + "seq_len": (num_speculative_tokens + 1) if self.is_tlm else 1, + "ctx_len": ctx_len, + } + spec["num_logits_to_keep"] = (num_speculative_tokens + 1) if self.is_tlm else None if self.continuous_batching: spec["full_batch_size"] = kv_cache_batch_size diff --git a/QEfficient/transformers/models/pytorch_transforms.py b/QEfficient/transformers/models/pytorch_transforms.py index 23ab2ca5f..773ce178c 100644 --- a/QEfficient/transformers/models/pytorch_transforms.py +++ b/QEfficient/transformers/models/pytorch_transforms.py @@ -51,6 +51,15 @@ GPTBigCodeForCausalLM, GPTBigCodeModel, ) +from transformers.models.gpt_oss.modeling_gpt_oss import ( + GptOssAttention, + GptOssDecoderLayer, + GptOssExperts, + GptOssForCausalLM, + GptOssMLP, + GptOssModel, + GptOssRMSNorm, +) from transformers.models.gptj.modeling_gptj import GPTJAttention, GPTJBlock, GPTJForCausalLM, GPTJModel from transformers.models.granite.modeling_granite import ( GraniteAttention, @@ -243,6 +252,14 @@ QEffGPTBigCodeForCausalLM, QEffGPTBigCodeModel, ) +from QEfficient.transformers.models.gpt_oss.modeling_gpt_oss import ( + QEffGptOssAttention, + QEffGptOssDecoderLayer, + QEffGptOssExperts, + QEffGptOssForCausalLM, + QEffGptOssMLP, + QEffGptOssModel, +) from QEfficient.transformers.models.gptj.modeling_gptj import ( QEffGPTJAttention, QEffGPTJBlock, @@ -417,6 +434,7 @@ class CustomOpsTransform(ModuleMappingTransform): _module_mapping = { GemmaRMSNorm: GemmaCustomRMSNormAIC, Gemma2RMSNorm: GemmaCustomRMSNormAIC, + GptOssRMSNorm: CustomRMSNormAIC, LlamaRMSNorm: CustomRMSNormAIC, Llama4TextRMSNorm: CustomRMSNormAIC, MistralRMSNorm: CustomRMSNormAIC, @@ -502,6 +520,13 @@ class KVCacheTransform(ModuleMappingTransform): Gemma3TextModel: QEffGemma3TextModel, Gemma3ForCausalLM: QEffGemma3ForCausalLMModel, Gemma3ForConditionalGeneration: QEffGemma3ForConditionalGeneration, + # GPT_OSS + GptOssAttention: QEffGptOssAttention, + GptOssDecoderLayer: QEffGptOssDecoderLayer, + GptOssModel: QEffGptOssModel, + GptOssForCausalLM: QEffGptOssForCausalLM, + GptOssMLP: QEffGptOssMLP, + GptOssExperts: QEffGptOssExperts, # Granite GraniteModel: QEffGraniteModel, GraniteForCausalLM: QEffGraniteForCausalLM, diff --git a/QEfficient/transformers/quantizers/__init__.py b/QEfficient/transformers/quantizers/__init__.py index d647b73a6..dfadc00ef 100644 --- a/QEfficient/transformers/quantizers/__init__.py +++ b/QEfficient/transformers/quantizers/__init__.py @@ -4,3 +4,7 @@ # SPDX-License-Identifier: BSD-3-Clause # # ----------------------------------------------------------------------------- + +from QEfficient.transformers.quantizers.auto import replace_transformers_quantizers + +__all__ = ["replace_transformers_quantizers"] diff --git a/QEfficient/transformers/quantizers/auto.py b/QEfficient/transformers/quantizers/auto.py index ba204e419..d73909211 100644 --- a/QEfficient/transformers/quantizers/auto.py +++ b/QEfficient/transformers/quantizers/auto.py @@ -11,7 +11,8 @@ from transformers.quantizers.quantizer_awq import AwqQuantizer from transformers.quantizers.quantizer_compressed_tensors import CompressedTensorsHfQuantizer from transformers.quantizers.quantizer_gptq import GptqHfQuantizer -from transformers.utils.quantization_config import AwqConfig, CompressedTensorsConfig, GPTQConfig +from transformers.quantizers.quantizer_mxfp4 import Mxfp4HfQuantizer +from transformers.utils.quantization_config import AwqConfig, CompressedTensorsConfig, GPTQConfig, Mxfp4Config from QEfficient.transformers.quantizers.quantizer_awq import QEffAwqConfig, QEffAwqQuantizer from QEfficient.transformers.quantizers.quantizer_compressed_tensors import ( @@ -21,30 +22,35 @@ QEffFP8Quantizer, ) from QEfficient.transformers.quantizers.quantizer_gptq import QEffGPTQConfig, QEffGPTQQuantizer +from QEfficient.transformers.quantizers.quantizer_mxfp4 import QEffMxfp4Config, QEffMxfp4HfQuantizer QEFF_AUTO_QUANTIZER_MAPPING = { "awq": QEffAwqQuantizer, "gptq": QEffGPTQQuantizer, "compressed-tensors": QEffCompressedTensorsFP8Quantizer, "fp8": QEffFP8Quantizer, + "mxfp4": QEffMxfp4HfQuantizer, } QEFF_AUTO_QUANTIZATION_CONFIG_MAPPING = { "awq": QEffAwqConfig, "gptq": QEffGPTQConfig, "compressed-tensors": QEffCompressedTensorsConfig, "fp8": QEffFP8Config, + "mxfp4": QEffMxfp4Config, } DUPLICATE_AUTO_QUANTIZER_MAPPING = { "awq": AwqQuantizer, "gptq": GptqHfQuantizer, "compressed-tensors": CompressedTensorsHfQuantizer, "fp8": None, + "mxfp4": Mxfp4HfQuantizer, } DUPLICATE_AUTO_QUANTIZATION_CONFIG_MAPPING = { "awq": AwqConfig, "gptq": GPTQConfig, "compressed-tensors": CompressedTensorsConfig, "fp8": None, + "mxfp4": Mxfp4Config, } diff --git a/QEfficient/transformers/quantizers/quant_transforms.py b/QEfficient/transformers/quantizers/quant_transforms.py index 0427bca37..69d6380f0 100644 --- a/QEfficient/transformers/quantizers/quant_transforms.py +++ b/QEfficient/transformers/quantizers/quant_transforms.py @@ -7,13 +7,19 @@ import torch from torch import nn +from transformers.models.gpt_oss.modeling_gpt_oss import GptOssExperts from QEfficient.base.pytorch_transforms import ModuleMutatorTransform from QEfficient.customop.matmulnbits import QuantLinearORT from QEfficient.transformers.quantizers.awq import WQLinear_GEMM from QEfficient.transformers.quantizers.gptq import QuantLinearGPTQ from QEfficient.transformers.quantizers.quantizer_compressed_tensors import FP8DeQuantLinear -from QEfficient.transformers.quantizers.quantizer_utils import dequantize_gptq, unpack_weights +from QEfficient.transformers.quantizers.quantizer_mxfp4 import QEffMxfp4GptOssExperts +from QEfficient.transformers.quantizers.quantizer_utils import ( + convert_moe_packed_tensors, + dequantize_gptq, + unpack_weights, +) class AwqToMatmulNbitsTransform(ModuleMutatorTransform): @@ -115,3 +121,28 @@ def mutate(cls, original_module, parent_module): if original_module.bias is not None: dequant_linear_layer.bias = torch.nn.Parameter(original_module.bias.float()) return dequant_linear_layer + + +class Mxfp4GptOssExpertDequantizeTransform(ModuleMutatorTransform): + """ + Used to dequantize the weights of an Mxfp4GptOssExpert module and replace with transformers GptOssExperts with dequantized weights + """ + + _match_class = QEffMxfp4GptOssExperts + + @classmethod + def mutate(cls, original_module, parent_module): + dequant_module = GptOssExperts(original_module.config) + dequant_module.gate_up_proj = torch.nn.Parameter( + convert_moe_packed_tensors( + original_module.gate_up_proj_blocks, original_module.gate_up_proj_scales, dtype=torch.float32 + ) + ) + dequant_module.down_proj = torch.nn.Parameter( + convert_moe_packed_tensors( + original_module.down_proj_blocks, original_module.down_proj_scales, dtype=torch.float32 + ) + ) + dequant_module.gate_up_proj_bias = original_module.gate_up_proj_bias + dequant_module.down_proj_bias = original_module.down_proj_bias + return dequant_module diff --git a/QEfficient/transformers/quantizers/quantizer_mxfp4.py b/QEfficient/transformers/quantizers/quantizer_mxfp4.py new file mode 100644 index 000000000..2ffba1bea --- /dev/null +++ b/QEfficient/transformers/quantizers/quantizer_mxfp4.py @@ -0,0 +1,155 @@ +# ----------------------------------------------------------------------------- +# +# Copyright (c) Qualcomm Technologies, Inc. and/or its subsidiaries. +# SPDX-License-Identifier: BSD-3-Clause +# +# ----------------------------------------------------------------------------- + +import re +from typing import Optional + +import torch +import torch.nn as nn +from transformers.quantizers.quantizer_mxfp4 import Mxfp4HfQuantizer +from transformers.utils.quantization_config import Mxfp4Config + +from QEfficient.transformers.quantizers.quantizer_utils import convert_moe_packed_tensors, get_keys_to_not_convert +from QEfficient.utils.logging_utils import logger + + +class QEffMxfp4GptOssExperts(nn.Module): + def __init__(self, config): + super().__init__() + self.config = config + + self.num_experts = config.num_local_experts + self.intermediate_size = config.intermediate_size + self.hidden_size = config.hidden_size + + self.gate_up_proj_blocks = nn.Parameter( + torch.zeros(self.num_experts, 2 * self.intermediate_size, self.hidden_size // 32, 16, dtype=torch.uint8), + requires_grad=False, + ) + self.gate_up_proj_scales = nn.Parameter( + torch.zeros(self.num_experts, 2 * self.intermediate_size, self.hidden_size // 32, dtype=torch.uint8), + requires_grad=False, + ) + self.gate_up_proj_bias = nn.Parameter( + torch.zeros(self.num_experts, 2 * self.intermediate_size, dtype=torch.float32), requires_grad=False + ) + + self.down_proj_blocks = nn.Parameter( + torch.zeros((self.num_experts, self.hidden_size, self.intermediate_size // 32, 16), dtype=torch.uint8), + requires_grad=False, + ) + self.down_proj_scales = nn.Parameter( + torch.zeros(self.num_experts, self.hidden_size, self.intermediate_size // 32, dtype=torch.uint8), + requires_grad=False, + ) + self.down_proj_bias = nn.Parameter( + torch.zeros(self.num_experts, self.hidden_size, dtype=torch.float32), requires_grad=False + ) + self.alpha = 1.702 + self.limit = 7.0 + + self.gate_up_proj_precision_config = None + self.down_proj_precision_config = None + + def forward(self, hidden_states: torch.Tensor, router_indices=None, routing_weights=None) -> torch.Tensor: + gate_up_proj = convert_moe_packed_tensors( + self.gate_up_proj_blocks, self.gate_up_proj_scales, dtype=torch.float32 + ) + down_proj = convert_moe_packed_tensors(self.down_proj_blocks, self.down_proj_scales, dtype=torch.float32) + batch_size = hidden_states.shape[0] + hidden_states = hidden_states.reshape(-1, self.hidden_size) # (num_tokens, hidden_size) + num_experts = routing_weights.shape[1] + hidden_states = hidden_states.repeat(num_experts, 1) + hidden_states = hidden_states.view(num_experts, -1, self.hidden_size) + gate_up = torch.bmm(hidden_states, gate_up_proj) + self.gate_up_proj_bias[..., None, :] + gate, up = gate_up[..., ::2], gate_up[..., 1::2] + gate = gate.clamp(min=None, max=self.limit) + up = up.clamp(min=-self.limit, max=self.limit) + glu = gate * torch.sigmoid(gate * self.alpha) + next_states = torch.bmm(((up + 1) * glu), down_proj) + next_states = next_states + self.down_proj_bias[..., None, :] + next_states = next_states.view(num_experts, batch_size, -1, self.hidden_size) + next_states = next_states * routing_weights.transpose(0, 1).view(num_experts, batch_size, -1)[..., None] + next_states = next_states.sum(dim=0) + return next_states + + +def should_convert_module(current_key_name, patterns): + current_key_name_str = ".".join(current_key_name) + if not any( + re.match(f"{key}\\.", current_key_name_str) or re.match(f"{key}", current_key_name_str) for key in patterns + ): + return True + return False + + +class QEffMxfp4Config(Mxfp4Config): + """ + Currently there is not need to change the implementation of Mxfp4Config + This is placeholder for future when we would want to change this + """ + + pass + + +class QEffMxfp4HfQuantizer(Mxfp4HfQuantizer): + def validate_environment(self, *args, **kwargs): + return True + + def update_torch_dtype(self, torch_dtype): + if torch_dtype not in [None, torch.float32]: + logger.warning(f"Requested dtype {torch_dtype} is not supported, overriding to None") + return None + + def _process_model_before_weight_loading( + self, + model: torch.nn.Module, + keep_in_fp32_modules: Optional[list[str]] = None, + **kwargs, + ): + self.modules_to_not_convert = get_keys_to_not_convert(model) + self.modules_to_not_convert = ( + ["lm_head"] if self.modules_to_not_convert is None else self.modules_to_not_convert + ) + self.modules_to_not_convert.extend(self.quantization_config.modules_to_not_convert) + self.modules_to_not_convert = list(set(self.modules_to_not_convert)) + config = model.config + + # -- Defining local method as it uses lot of local variables -- + def _replace_with_mxfp4_linear( + model, + modules_to_not_convert=None, + current_key_name=None, + quantization_config=None, + has_been_replaced=False, + ): + if current_key_name is None: + current_key_name = [] + + for name, module in model.named_children(): + current_key_name.append(name) + if not should_convert_module(current_key_name, modules_to_not_convert): + current_key_name.pop(-1) + continue + if module.__class__.__name__ == "GptOssExperts" and not quantization_config.dequantize: + model._modules[name] = QEffMxfp4GptOssExperts(config) + has_been_replaced = True + if len(list(module.children())) > 0: + _, has_been_replaced = _replace_with_mxfp4_linear( + module, + modules_to_not_convert, + current_key_name, + quantization_config, + has_been_replaced=has_been_replaced, + ) + current_key_name.pop(-1) + return model, has_been_replaced + + _replace_with_mxfp4_linear( + model, modules_to_not_convert=self.modules_to_not_convert, quantization_config=self.quantization_config + ) + model.config.quantization_config = self.quantization_config diff --git a/QEfficient/transformers/quantizers/quantizer_utils.py b/QEfficient/transformers/quantizers/quantizer_utils.py index a318fb8e4..424692d08 100644 --- a/QEfficient/transformers/quantizers/quantizer_utils.py +++ b/QEfficient/transformers/quantizers/quantizer_utils.py @@ -6,6 +6,7 @@ # ----------------------------------------------------------------------------- import copy +import math import torch from torch import nn @@ -378,3 +379,70 @@ def repack_zeros(qzeros, bits): break qzeros = qzeros.T return qzeros + + +FP4_VALUES = [ + +0.0, + +0.5, + +1.0, + +1.5, + +2.0, + +3.0, + +4.0, + +6.0, + -0.0, + -0.5, + -1.0, + -1.5, + -2.0, + -3.0, + -4.0, + -6.0, +] + + +def convert_moe_packed_tensors( + blocks, + scales, + *, + dtype: torch.dtype = torch.bfloat16, + rows_per_chunk: int = 32768 * 1024, +) -> torch.Tensor: + """ + reference for this function is taken from: https://github.com/huggingface/transformers/tree/main/src/transformers/models/gpt_oss#L98 + """ + + scales = scales.to(torch.int32) - 127 + + assert blocks.shape[:-1] == scales.shape, f"{blocks.shape=} does not match {scales.shape=}" + + lut = torch.tensor(FP4_VALUES, dtype=dtype, device=blocks.device) + + *prefix_shape, G, B = blocks.shape + rows_total = math.prod(prefix_shape) * G + + blocks = blocks.reshape(rows_total, B) + scales = scales.reshape(rows_total, 1) + + out = torch.empty(rows_total, B * 2, dtype=dtype, device=blocks.device) + + for r0 in range(0, rows_total, rows_per_chunk): + r1 = min(r0 + rows_per_chunk, rows_total) + + blk = blocks[r0:r1] + exp = scales[r0:r1] + + # nibble indices -> int64 + idx_lo = (blk & 0x0F).to(torch.long) + idx_hi = (blk >> 4).to(torch.long) + + sub = out[r0:r1] + sub[:, 0::2] = lut[idx_lo] + sub[:, 1::2] = lut[idx_hi] + + torch.ldexp(sub, exp, out=sub) + del idx_lo, idx_hi, blk, exp + + out = out.reshape(*prefix_shape, G, B * 2).view(*prefix_shape, G * B * 2) + out = out.to(dtype).permute(0, 2, 1).contiguous() + return out diff --git a/QEfficient/utils/generate_inputs.py b/QEfficient/utils/generate_inputs.py index eb1f7c8e6..7d07db530 100644 --- a/QEfficient/utils/generate_inputs.py +++ b/QEfficient/utils/generate_inputs.py @@ -87,13 +87,20 @@ def prepare_pytorch_inputs(self): if self.full_batch_size: inputs["input_ids"] = input_ids - inputs["position_ids"] = torch.arange(input_len).view(1, input_len) - inputs["batch_index"] = torch.arange(1).view(-1, 1) + inputs["position_ids"] = position_ids + inputs["batch_index"] = torch.arange(self.full_batch_size).view(-1, 1) past_key_values = [] for i in range(self.n_layer): - past_key = torch.zeros((self.padding_shape), dtype=torch.float32) - past_value = torch.zeros((self.padding_shape), dtype=torch.float32) + if ( + all(hasattr(self.config, attr) for attr in ["sliding_window", "layer_types"]) + and self.config.layer_types[i] == "sliding_attention" + ): + pad_shape = self.padding_shape[:2] + [self.config.sliding_window] + [self.padding_shape[-1]] + else: + pad_shape = self.padding_shape + past_key = torch.zeros((pad_shape), dtype=torch.float32) + past_value = torch.zeros((pad_shape), dtype=torch.float32) pkv = (past_key, past_value) past_key_values.append(pkv) inputs["past_key_values"] = tuple(past_key_values) @@ -113,18 +120,15 @@ def update_pytorch_inputs(self, inputs, pt_outputs): """ updated_inputs = {} if self.full_batch_size: - batch_index = torch.arange(1).view(-1, 1) - input_ids = pt_outputs.logits.detach().argmax(2) updated_inputs["input_ids"] = torch.full((self.full_batch_size, 1), self.tokenizer.pad_token_id) - updated_inputs["input_ids"][batch_index.view(-1)] = input_ids + updated_inputs["input_ids"][inputs["batch_index"].view(-1)] = input_ids position_ids = inputs["position_ids"].max(1, keepdim=True).values + 1 updated_inputs["position_ids"] = torch.full((self.full_batch_size, 1), 0) - updated_inputs["position_ids"][batch_index.view(-1)] = position_ids - - updated_inputs["batch_index"] = torch.arange(self.full_batch_size).view(-1, 1) + updated_inputs["position_ids"][inputs["batch_index"].view(-1)] = position_ids + updated_inputs["batch_index"] = inputs["batch_index"] else: updated_inputs["input_ids"] = pt_outputs["logits"].argmax(-1).reshape(-1, 1) updated_inputs["position_ids"] = inputs["position_ids"].max(1, keepdim=True).values + 1 @@ -169,8 +173,17 @@ def prepare_ort_inputs(self): inputs["past_value." + str(i)] = np.zeros((cache_shape), dtype=np.float32) else: for i in range(self.n_layer): - inputs["past_key." + str(i)] = np.zeros((self.padding_shape), dtype=np.float32) - inputs["past_value." + str(i)] = np.zeros((self.padding_shape), dtype=np.float32) + if ( + all(hasattr(self.config, attr) for attr in ["sliding_window", "layer_types"]) + and self.config.layer_types[i] == "sliding_attention" + ): + pad_shape = self.padding_shape[:2] + [self.config.sliding_window] + [self.padding_shape[-1]] + else: + pad_shape = self.padding_shape + inputs["past_key." + str(i)] = np.zeros((pad_shape), dtype=np.float32) + inputs["past_value." + str(i)] = np.zeros((pad_shape), dtype=np.float32) + if self.full_batch_size: + inputs["batch_index"] = np.arange(self.full_batch_size).reshape(-1, 1) return inputs def update_ort_inputs(self, inputs, ort_outputs): @@ -191,7 +204,8 @@ def update_ort_inputs(self, inputs, ort_outputs): for i in range(self.n_layer): updated_inputs["past_key." + str(i)] = ort_outputs["past_key_values"][i * 2] updated_inputs["past_value." + str(i)] = ort_outputs["past_key_values"][i * 2 + 1] - + if self.full_batch_size: + updated_inputs["batch_index"] = inputs["batch_index"] return updated_inputs def update_ort_outputs(self, ort_outputs): diff --git a/examples/gpt_oss.py b/examples/gpt_oss.py new file mode 100644 index 000000000..24d050e97 --- /dev/null +++ b/examples/gpt_oss.py @@ -0,0 +1,35 @@ +# ----------------------------------------------------------------------------- +# +# Copyright (c) Qualcomm Technologies, Inc. and/or its subsidiaries. +# SPDX-License-Identifier: BSD-3-Clause +# +# ----------------------------------------------------------------------------- + +from transformers import AutoTokenizer, TextStreamer + +from QEfficient import QEFFAutoModelForCausalLM + +model_id = "openai/gpt-oss-20b" # weights are not required to convert to fp32 + +qeff_model = QEFFAutoModelForCausalLM.from_pretrained(model_id) +tokenizer = AutoTokenizer.from_pretrained(model_id) + +onnx_model_path = qeff_model.export() +qpc_path = qeff_model.compile( + prefill_seq_len=1, # Currently we can get best perf using PL=1 i.e. decode-only model, prefill optimizations are being worked on. + ctx_len=256, + num_cores=16, + mxfp6_matmul=True, + mxint8_kv_cache=True, + num_devices=8, + mos=1, + aic_enable_depth_first=True, + num_speculative_tokens=None, +) +print(f"qpc path is {qpc_path}") +streamer = TextStreamer(tokenizer) +exec_info = qeff_model.generate( + tokenizer, + prompts="Who is your creator? and What all you are allowed to do?", + device_id=[0, 1, 2, 3], +) diff --git a/tests/transformers/models/test_causal_lm_models.py b/tests/transformers/models/test_causal_lm_models.py index 86bce4441..321a466ab 100644 --- a/tests/transformers/models/test_causal_lm_models.py +++ b/tests/transformers/models/test_causal_lm_models.py @@ -25,6 +25,7 @@ from QEfficient.utils.test_utils import ModelConfig test_models_causal = [ + "openai/gpt-oss-20b", "TinyLlama/TinyLlama-1.1B-Chat-v1.0", "gpt2", "Salesforce/codegen-350M-mono", @@ -76,11 +77,11 @@ def get_custom_n_layers(model_name): :return n_layer """ - if model_name in {"microsoft/Phi-3-mini-4k-instruct", "neuralmagic/Qwen2-0.5B-Instruct-FP8"}: + if model_name in {"microsoft/Phi-3-mini-4k-instruct", "neuralmagic/Qwen2-0.5B-Instruct-FP8", "openai/gpt-oss-20b"}: return 2 elif model_name in ModelConfig.SWIFTKV_MODELS: return None - return 16 + return 1 def load_causal_lm_model(model_name, n_layer=1, config=None): @@ -157,6 +158,7 @@ def check_causal_lm_pytorch_vs_kv_vs_ort_vs_ai100( """ replace_transformers_quantizers() if config is None: + n_layer = get_custom_n_layers(model_name) model_hf, _ = load_causal_lm_model(model_name, n_layer=n_layer) else: model_hf, _ = load_causal_lm_model(model_name, config=config) diff --git a/tests/transformers/test_causal_lm.py b/tests/transformers/test_causal_lm.py index bdc15519e..0810ac6ba 100644 --- a/tests/transformers/test_causal_lm.py +++ b/tests/transformers/test_causal_lm.py @@ -33,6 +33,7 @@ ("starcoder2", 256, 2, 4, 128, 512, 127, {}), ("granite", 256, 2, 4, 128, 512, 127, {"num_key_value_heads": 2}), ("olmo2", 256, 2, 4, 128, 512, 127, {"num_key_value_heads": 2}), + ("gpt_oss", 256, 3, 4, 128, 512, 127, {"num_key_value_heads": 2}), ] configs = [ @@ -177,12 +178,23 @@ def test_causal_lm_hash_creation(config, cb, tmp_path): 0: "full_batch_size" if qeff_model.continuous_batching else "batch_size", 2: "ctx_len", } + pkv_dynamic_axes = ( + qeff_model.model.get_pkv_dynamic_axes() + if hasattr(qeff_model.model, "get_pkv_dynamic_axes") + else pkv_dynamic_axes + ) + pkv_dynamic_axes = ( + [pkv_dynamic_axes] * qeff_model.model.config.num_hidden_layers + if isinstance(pkv_dynamic_axes, dict) + else pkv_dynamic_axes + ) output_names = [] output_names.append("logits") for i in range(qeff_model.num_layers): + pkv_dynamic_axes[i][0] = "full_batch_size" if qeff_model.continuous_batching else "batch_size" for kv in ["key", "value"]: - dynamic_axes[f"past_{kv}.{i}"] = pkv_dynamic_axes + dynamic_axes[f"past_{kv}.{i}"] = pkv_dynamic_axes[i] output_names.append(f"past_{kv}.{i}_RetainedState") if qeff_model.continuous_batching: From 9b3164e795168df0e99e14a8daaea2c62e781eb7 Mon Sep 17 00:00:00 2001 From: quic-vargupt Date: Wed, 5 Nov 2025 21:24:54 +0530 Subject: [PATCH 10/60] Update Qeff Documentation to indicate vLLM Support in Validated Models Page (#588) Signed-off-by: Varun Gupta Co-authored-by: Abhishek Kumar Singh Signed-off-by: Dhiraj Kumar Sah --- docs/source/validate.md | 84 +++++++++++++++++++---------------------- 1 file changed, 38 insertions(+), 46 deletions(-) diff --git a/docs/source/validate.md b/docs/source/validate.md index e17e85578..b5ab87629 100644 --- a/docs/source/validate.md +++ b/docs/source/validate.md @@ -4,21 +4,21 @@ ## Text-only Language Models ### Text Generation Task -**QEff Auto Class:** [`QEFFAutoModelForCausalLM`](#QEFFAutoModelForCausalLM) +**QEff Auto Class:** `QEFFAutoModelForCausalLM` -| Architecture | Model Family | Representative Models | CB Support | -|-------------------------|--------------------|--------------------------------------------------------------------------------------|------------| -| **FalconForCausalLM** | Falcon | [tiiuae/falcon-40b](https://huggingface.co/tiiuae/falcon-40b) | āœ”ļø | -| **Qwen3MoeForCausalLM** | Qwen3Moe | [Qwen/Qwen3-30B-A3B-Instruct-2507](https://huggingface.co/Qwen/Qwen3-30B-A3B-Instruct-2507) | āœ”ļø | +| Architecture | Model Family | Representative Models | [vLLM Support](https://quic.github.io/cloud-ai-sdk-pages/latest/Getting-Started/Installation/vLLM/vLLM/index.html) | +|-------------------------|--------------------|--------------------------------------------------------------------------------------|--------------| +| **FalconForCausalLM** | Falcon** | [tiiuae/falcon-40b](https://huggingface.co/tiiuae/falcon-40b) | āœ”ļø | +| **Qwen3MoeForCausalLM** | Qwen3Moe | [Qwen/Qwen3-30B-A3B-Instruct-2507](https://huggingface.co/Qwen/Qwen3-30B-A3B-Instruct-2507) | āœ• | | **GemmaForCausalLM** | CodeGemma | [google/codegemma-2b](https://huggingface.co/google/codegemma-2b)
[google/codegemma-7b](https://huggingface.co/google/codegemma-7b) | āœ”ļø | -| | Gemma | [google/gemma-2b](https://huggingface.co/google/gemma-2b)
[google/gemma-7b](https://huggingface.co/google/gemma-7b)
[google/gemma-2-2b](https://huggingface.co/google/gemma-2-2b)
[google/gemma-2-9b](https://huggingface.co/google/gemma-2-9b)
[google/gemma-2-27b](https://huggingface.co/google/gemma-2-27b) | āœ”ļø | +| | Gemma*** | [google/gemma-2b](https://huggingface.co/google/gemma-2b)
[google/gemma-7b](https://huggingface.co/google/gemma-7b)
[google/gemma-2-2b](https://huggingface.co/google/gemma-2-2b)
[google/gemma-2-9b](https://huggingface.co/google/gemma-2-9b)
[google/gemma-2-27b](https://huggingface.co/google/gemma-2-27b) | āœ”ļø | | **GPTBigCodeForCausalLM** | Starcoder1.5 | [bigcode/starcoder](https://huggingface.co/bigcode/starcoder) | āœ”ļø | | | Starcoder2 | [bigcode/starcoder2-15b](https://huggingface.co/bigcode/starcoder2-15b) | āœ”ļø | | **GPTJForCausalLM** | GPT-J | [EleutherAI/gpt-j-6b](https://huggingface.co/EleutherAI/gpt-j-6b) | āœ”ļø | | **GPT2LMHeadModel** | GPT-2 | [openai-community/gpt2](https://huggingface.co/openai-community/gpt2) | āœ”ļø | | **GraniteForCausalLM** | Granite 3.1 | [ibm-granite/granite-3.1-8b-instruct](https://huggingface.co/ibm-granite/granite-3.1-8b-instruct)
[ibm-granite/granite-guardian-3.1-8b](https://huggingface.co/ibm-granite/granite-guardian-3.1-8b) | āœ”ļø | | | Granite 20B | [ibm-granite/granite-20b-code-base-8k](https://huggingface.co/ibm-granite/granite-20b-code-base-8k)
[ibm-granite/granite-20b-code-instruct-8k](https://huggingface.co/ibm-granite/granite-20b-code-instruct-8k) | āœ”ļø | -| **InternVLChatModel** | Intern-VL | [OpenGVLab/InternVL2_5-1B](https://huggingface.co/OpenGVLab/InternVL2_5-1B) | | +| **InternVLChatModel** | Intern-VL | [OpenGVLab/InternVL2_5-1B](https://huggingface.co/OpenGVLab/InternVL2_5-1B) | āœ”ļø | | | | **LlamaForCausalLM** | CodeLlama | [codellama/CodeLlama-7b-hf](https://huggingface.co/codellama/CodeLlama-7b-hf)
[codellama/CodeLlama-13b-hf](https://huggingface.co/codellama/CodeLlama-13b-hf)
[codellama/CodeLlama-34b-hf](https://huggingface.co/codellama/CodeLlama-34b-hf) | āœ”ļø | | | DeepSeek-R1-Distill-Llama | [deepseek-ai/DeepSeek-R1-Distill-Llama-70B](https://huggingface.co/deepseek-ai/DeepSeek-R1-Distill-Llama-70B) | āœ”ļø | | | InceptionAI-Adapted | [inceptionai/jais-adapted-7b](https://huggingface.co/inceptionai/jais-adapted-7b)
[inceptionai/jais-adapted-13b-chat](https://huggingface.co/inceptionai/jais-adapted-13b-chat)
[inceptionai/jais-adapted-70b](https://huggingface.co/inceptionai/jais-adapted-70b) | āœ”ļø | @@ -31,45 +31,42 @@ | **MistralForCausalLM** | Mistral | [mistralai/Mistral-7B-Instruct-v0.1](https://huggingface.co/mistralai/Mistral-7B-Instruct-v0.1) | āœ”ļø | | **MixtralForCausalLM** | Codestral
Mixtral | [mistralai/Codestral-22B-v0.1](https://huggingface.co/mistralai/Codestral-22B-v0.1)
[mistralai/Mixtral-8x7B-v0.1](https://huggingface.co/mistralai/Mixtral-8x7B-v0.1) | āœ”ļø | | **MPTForCausalLM** | MPT | [mosaicml/mpt-7b](https://huggingface.co/mosaicml/mpt-7b) | āœ”ļø | -| **Phi3ForCausalLM** | Phi-3, Phi-3.5 | [microsoft/Phi-3-mini-4k-instruct](https://huggingface.co/microsoft/Phi-3-mini-4k-instruct) | āœ”ļø | +| **Phi3ForCausalLM** | Phi-3**, Phi-3.5** | [microsoft/Phi-3-mini-4k-instruct](https://huggingface.co/microsoft/Phi-3-mini-4k-instruct) | āœ”ļø | | **QwenForCausalLM** | DeepSeek-R1-Distill-Qwen | [DeepSeek-R1-Distill-Qwen-32B](https://huggingface.co/deepseek-ai/DeepSeek-R1-Distill-Qwen-32B) | āœ”ļø | | | Qwen2, Qwen2.5 | [Qwen/Qwen2-1.5B-Instruct](https://huggingface.co/Qwen/Qwen2-1.5B-Instruct) | āœ”ļø | | **LlamaSwiftKVForCausalLM** | swiftkv | [Snowflake/Llama-3.1-SwiftKV-8B-Instruct](https://huggingface.co/Snowflake/Llama-3.1-SwiftKV-8B-Instruct) | āœ”ļø | -| **Grok1ModelForCausalLM** | grok-1 | [hpcai-tech/grok-1](https://huggingface.co/hpcai-tech/grok-1) | āœ”ļø | - ---- - +| **Grok1ModelForCausalLM** | grok-1 | [hpcai-tech/grok-1](https://huggingface.co/hpcai-tech/grok-1) | āœ• | +- ** set "trust-remote-code" flag to True for e2e inference with vLLM +- *** pass "disable-sliding-window" flag for e2e inference of Gemma-2 family of models with vLLM ## Embedding Models ### Text Embedding Task -**QEff Auto Class:** [`QEFFAutoModel`](#QEFFAutoModel) - -| Architecture | Model Family | Representative Models | -|--------------|--------------|---------------------------------| -| **BertModel** | BERT-based | [BAAI/bge-base-en-v1.5](https://huggingface.co/BAAI/bge-base-en-v1.5)
[BAAI/bge-large-en-v1.5](https://huggingface.co/BAAI/bge-large-en-v1.5)
[BAAI/bge-small-en-v1.5](https://huggingface.co/BAAI/bge-small-en-v1.5)
[e5-large-v2](https://huggingface.co/intfloat/e5-large-v2) | -| **LlamaModel** | Llama-based | [intfloat/e5-mistral-7b-instruct](https://huggingface.co/intfloat/e5-mistral-7b-instruct) | -| **MPNetForMaskedLM** | MPNet | [sentence-transformers/multi-qa-mpnet-base-cos-v1](https://huggingface.co/sentence-transformers/multi-qa-mpnet-base-cos-v1) | -| **MistralModel** | Mistral | [e5-mistral-7b-instruct](https://huggingface.co/intfloat/e5-mistral-7b-instruct) | -| **NomicBertModel** | NomicBERT | [nomic-embed-text-v1.5](https://huggingface.co/nomic-ai/nomic-embed-text-v1.5) | -| **Qwen2ForCausalLM** | Qwen2 | [stella_en_1.5B_v5](https://huggingface.co/NovaSearch/stella_en_1.5B_v5) | -| **RobertaModel** | RoBERTa | [ibm-granite/granite-embedding-30m-english](https://huggingface.co/ibm-granite/granite-embedding-30m-english)
[ibm-granite/granite-embedding-125m-english](https://huggingface.co/ibm-granite/granite-embedding-125m-english) | -| **XLMRobertaForSequenceClassification** | XLM-RoBERTa | [bge-reranker-v2-m3bge-reranker-v2-m3](https://huggingface.co/BAAI/bge-reranker-v2-m3) | -| **XLMRobertaModel** | XLM-RoBERTa |[ibm-granite/granite-embedding-107m-multilingual](https://huggingface.co/ibm-granite/granite-embedding-107m-multilingual)
[ibm-granite/granite-embedding-278m-multilingual](https://huggingface.co/ibm-granite/granite-embedding-278m-multilingual) | - ---- +**QEff Auto Class:** `QEFFAutoModel` + +| Architecture | Model Family | Representative Models | vLLM Support | +|--------------|--------------|---------------------------------|--------------| +| **BertModel** | BERT-based | [BAAI/bge-base-en-v1.5](https://huggingface.co/BAAI/bge-base-en-v1.5)
[BAAI/bge-large-en-v1.5](https://huggingface.co/BAAI/bge-large-en-v1.5)
[BAAI/bge-small-en-v1.5](https://huggingface.co/BAAI/bge-small-en-v1.5)
[e5-large-v2](https://huggingface.co/intfloat/e5-large-v2) | āœ”ļø | +| **MPNetForMaskedLM** | MPNet | [sentence-transformers/multi-qa-mpnet-base-cos-v1](https://huggingface.co/sentence-transformers/multi-qa-mpnet-base-cos-v1) | āœ• | +| **MistralModel** | Mistral | [e5-mistral-7b-instruct](https://huggingface.co/intfloat/e5-mistral-7b-instruct) | āœ• | +| **NomicBertModel** | NomicBERT | [nomic-embed-text-v1.5](https://huggingface.co/nomic-ai/nomic-embed-text-v1.5) | āœ• | +| **Qwen2ForCausalLM** | Qwen2 | [stella_en_1.5B_v5](https://huggingface.co/NovaSearch/stella_en_1.5B_v5) | āœ”ļø | +| **RobertaModel** | RoBERTa | [ibm-granite/granite-embedding-30m-english](https://huggingface.co/ibm-granite/granite-embedding-30m-english)
[ibm-granite/granite-embedding-125m-english](https://huggingface.co/ibm-granite/granite-embedding-125m-english) | āœ”ļø | +| **XLMRobertaForSequenceClassification** | XLM-RoBERTa | [bge-reranker-v2-m3bge-reranker-v2-m3](https://huggingface.co/BAAI/bge-reranker-v2-m3) | āœ• | +| **XLMRobertaModel** | XLM-RoBERTa |[ibm-granite/granite-embedding-107m-multilingual](https://huggingface.co/ibm-granite/granite-embedding-107m-multilingual)
[ibm-granite/granite-embedding-278m-multilingual](https://huggingface.co/ibm-granite/granite-embedding-278m-multilingual) | āœ”ļø | ## Multimodal Language Models ### Vision-Language Models (Text + Image Generation) -**QEff Auto Class:** [`QEFFAutoModelForImageTextToText`](#QEFFAutoModelForImageTextToText) +**QEff Auto Class:** `QEFFAutoModelForImageTextToText` -| Architecture | Model Family | Representative Models | CB Support | Single Qpc Support | Dual Qpc Support | -|-----------------------------|--------------|----------------------------------------------------------------------------------------|------------|--------------------|------------------| -| **LlavaForConditionalGeneration** | LLaVA-1.5 | [llava-hf/llava-1.5-7b-hf](https://huggingface.co/llava-hf/llava-1.5-7b-hf) | āœ• | āœ”ļø | āœ”ļø | -| **MllamaForConditionalGeneration** | Llama 3.2 | [meta-llama/Llama-3.2-11B-Vision Instruct](https://huggingface.co/meta-llama/Llama-3.2-11B-Vision-Instruct)
[meta-llama/Llama-3.2-90B-Vision](https://huggingface.co/meta-llama/Llama-3.2-90B-Vision) | āœ• | āœ”ļø | āœ”ļø | -|**LlavaNextForConditionalGeneration** | Granite Vision | [ibm-granite/granite-vision-3.2-2b](https://huggingface.co/ibm-granite/granite-vision-3.2-2b) | āœ• | āœ• | āœ”ļø | -|**Llama4ForConditionalGeneration** | Llama-4-Scout | [Llama-4-Scout-17B-16E-Instruct](https://huggingface.co/meta-llama/Llama-4-Scout-17B-16E-Instruct) | āœ• | āœ”ļø | āœ”ļø | -|**Gemma3ForConditionalGeneration** | Gemma3 | [google/gemma-3-4b-it](https://huggingface.co/google/gemma-3-4b-it)| āœ• | āœ”ļø | āœ”ļø | +| Architecture | Model Family | Representative Models | Qeff Single Qpc | Qeff Dual Qpc | vllm Single Qpc | vllm Dual Qpc | +|------------------------------------|--------------|----------------------------------------------------------------------------------------|------------|---------------------|-------------------|-----------------| +| **LlavaForConditionalGeneration** | LLaVA-1.5 | [llava-hf/llava-1.5-7b-hf](https://huggingface.co/llava-hf/llava-1.5-7b-hf) | āœ”ļø | āœ”ļø | āœ”ļø | āœ”ļø | +| **MllamaForConditionalGeneration** | Llama 3.2 | [meta-llama/Llama-3.2-11B-Vision Instruct](https://huggingface.co/meta-llama/Llama-3.2-11B-Vision-Instruct)
[meta-llama/Llama-3.2-90B-Vision-Instruct](https://huggingface.co/meta-llama/Llama-3.2-90B-Vision-Instruct) | āœ”ļø | āœ”ļø | āœ”ļø | āœ”ļø | +| **LlavaNextForConditionalGeneration** | Granite Vision | [ibm-granite/granite-vision-3.2-2b](https://huggingface.co/ibm-granite/granite-vision-3.2-2b) | āœ• | āœ”ļø | āœ• | āœ”ļø | +| **Llama4ForConditionalGeneration** | Llama-4-Scout | [Llama-4-Scout-17B-16E-Instruct](https://huggingface.co/meta-llama/Llama-4-Scout-17B-16E-Instruct) | āœ”ļø | āœ”ļø | āœ”ļø | āœ”ļø | +| **Gemma3ForConditionalGeneration** | Gemma3*** | [google/gemma-3-4b-it](https://huggingface.co/google/gemma-3-4b-it) | āœ”ļø | āœ”ļø | āœ”ļø | āœ• | +- *** pass "disable-sliding-window" flag for e2e inference with vLLM **Dual QPC:** @@ -85,25 +82,20 @@ In the Dual QPC(Qualcomm Program Container) setup, the model is split across two **Single QPC:** In the single QPC(Qualcomm Program Container) setup, the entire model—including both image encoding and text generation—runs within a single QPC. There is no model splitting, and all components operate within the same execution environment. -**For more details click [here](#QEFFAutoModelForImageTextToText)** -```{NOTE} + +**Note:** The choice between Single and Dual QPC is determined during model instantiation using the `kv_offload` setting. If the `kv_offload` is set to `True` it runs in dual QPC and if its set to `False` model runs in single QPC mode. -``` --- - ### Audio Models (Automatic Speech Recognition) - Transcription Task +**QEff Auto Class:** `QEFFAutoModelForSpeechSeq2Seq` -**QEff Auto Class:** [`QEFFAutoModelForSpeechSeq2Seq`](#QEFFAutoModelForSpeechSeq2Seq) - -| Architecture | Model Family | Representative Models | -|--------------|--------------|----------------------------------------------------------------------------------------| -| **Whisper** | Whisper | [openai/whisper-tiny](https://huggingface.co/openai/whisper-tiny)
[openai/whisper-base](https://huggingface.co/openai/whisper-base)
[openai/whisper-small](https://huggingface.co/openai/whisper-small)
[openai/whisper-medium](https://huggingface.co/openai/whisper-medium)
[openai/whisper-large](https://huggingface.co/openai/whisper-large)
[openai/whisper-large-v3-turbo](https://huggingface.co/openai/whisper-large-v3-turbo) | - ---- +| Architecture | Model Family | Representative Models | vLLM Support | +|--------------|--------------|----------------------------------------------------------------------------------------|--------------| +| **Whisper** | Whisper | [openai/whisper-tiny](https://huggingface.co/openai/whisper-tiny)
[openai/whisper-base](https://huggingface.co/openai/whisper-base)
[openai/whisper-small](https://huggingface.co/openai/whisper-small)
[openai/whisper-medium](https://huggingface.co/openai/whisper-medium)
[openai/whisper-large](https://huggingface.co/openai/whisper-large)
[openai/whisper-large-v3-turbo](https://huggingface.co/openai/whisper-large-v3-turbo) | āœ”ļø | (models_coming_soon)= # Models Coming Soon From e6ac655328c039e4a87c64f918e6e8331d1398aa Mon Sep 17 00:00:00 2001 From: Tanisha Chawada Date: Wed, 5 Nov 2025 22:23:42 +0530 Subject: [PATCH 11/60] Adding support to load checkpoints from epoch (#606) Signed-off-by: Tanisha Signed-off-by: Dhiraj Kumar Sah --- QEfficient/finetune/utils/train_utils.py | 13 +++++++++++-- 1 file changed, 11 insertions(+), 2 deletions(-) diff --git a/QEfficient/finetune/utils/train_utils.py b/QEfficient/finetune/utils/train_utils.py index e9e1320de..6113f2b03 100644 --- a/QEfficient/finetune/utils/train_utils.py +++ b/QEfficient/finetune/utils/train_utils.py @@ -123,11 +123,19 @@ def train( break if train_config.use_peft and train_config.from_peft_checkpoint: - intermediate_epoch = int(train_config.from_peft_checkpoint.split("/")[-2].split("_")[-1]) - 1 - intermediate_step = int(train_config.from_peft_checkpoint.split("/")[-1].split("_")[-1]) + try: + intermediate_epoch = int(train_config.from_peft_checkpoint.split("/")[-2].split("_")[-1]) - 1 + intermediate_step = int(train_config.from_peft_checkpoint.split("/")[-1].split("_")[-1]) + except (IndexError, ValueError): + intermediate_epoch = int(train_config.from_peft_checkpoint.split("/")[-1].split("_")[-1]) - 1 + intermediate_step = 0 + if epoch < intermediate_epoch: logger.log_rank_zero(f"Skipping epoch {epoch + 1} since fine tuning has already completed for it.") continue + if intermediate_step == 0 and epoch == intermediate_epoch: + logger.log_rank_zero(f"Skipping epoch {epoch + 1}, since fine tuning has already completed for it.") + continue logger.log_rank_zero(f"Starting epoch {epoch + 1}/{train_config.num_epochs}") if max_steps_reached: @@ -154,6 +162,7 @@ def train( # resume training from a particular checkpoint, assuming the dataset is not shuffled if train_config.use_peft and train_config.from_peft_checkpoint: # to bring the count of train_step in sync with where it left off + if epoch == intermediate_epoch and step == 0: logger.log_rank_zero( f"Skipping first {intermediate_step} steps for epoch {epoch + 1}, since fine tuning has already completed for it." From 1d3eebf52a095190843fad8df8e02718142e5c40 Mon Sep 17 00:00:00 2001 From: Tanisha Chawada Date: Tue, 11 Nov 2025 09:55:05 +0530 Subject: [PATCH 12/60] "[QEff. Finetune]: Support for resuming checkpoints using Epoch" (#614) Signed-off-by: Tanisha Signed-off-by: Dhiraj Kumar Sah --- QEfficient/finetune/utils/train_utils.py | 9 +++++---- 1 file changed, 5 insertions(+), 4 deletions(-) diff --git a/QEfficient/finetune/utils/train_utils.py b/QEfficient/finetune/utils/train_utils.py index 6113f2b03..45b995124 100644 --- a/QEfficient/finetune/utils/train_utils.py +++ b/QEfficient/finetune/utils/train_utils.py @@ -123,11 +123,12 @@ def train( break if train_config.use_peft and train_config.from_peft_checkpoint: + path = train_config.from_peft_checkpoint.rstrip("/") try: - intermediate_epoch = int(train_config.from_peft_checkpoint.split("/")[-2].split("_")[-1]) - 1 - intermediate_step = int(train_config.from_peft_checkpoint.split("/")[-1].split("_")[-1]) + intermediate_epoch = int(path.split("/")[-2].split("_")[-1]) - 1 + intermediate_step = int(path.split("/")[-1].split("_")[-1]) except (IndexError, ValueError): - intermediate_epoch = int(train_config.from_peft_checkpoint.split("/")[-1].split("_")[-1]) - 1 + intermediate_epoch = int(path.split("/")[-1].split("_")[-1]) - 1 intermediate_step = 0 if epoch < intermediate_epoch: @@ -374,7 +375,7 @@ def train( eval_step_metric, eval_metric, ) - avg_epoch_time = sum(epoch_times) / len(epoch_times) + avg_epoch_time = sum(epoch_times) / len(epoch_times) if len(epoch_times) > 0 else 0 avg_checkpoint_time = sum(checkpoint_times) / len(checkpoint_times) if len(checkpoint_times) > 0 else 0 results["last_epoch_train_loss"] = train_epoch_loss.cpu() From 9d535718f86bcc7514999732fc16471d6a40d585 Mon Sep 17 00:00:00 2001 From: Abukhoyer Shaik Date: Thu, 13 Nov 2025 12:12:16 +0530 Subject: [PATCH 13/60] [Upgradation]: onnx opset version updated from 13 to 17 (#587) This pull request is created for updating the _onnx opset_ version to 17 from 13. ## Testing Below are the models I have tested: ### Causal Models - TinyLlama/TinyLlama-1.1B-Chat-v1.0 - gpt2 - Salesforce/codegen-350M-mono - microsoft/Phi-3-mini-4k-instruct - tiiuae/falcon-7b - Qwen/Qwen2-0.5B - Qwen/Qwen3-0.6B - bigcode/starcoder2-3b - Qwen/Qwen3-30B-A3B-Instruct-2507 - Felladrin/Minueza-32M-Base - wtang06/mpt-125m-c4 - hakurei/gpt-j-random-tinier - mistralai/Mixtral-8x7B-Instruct-v0.1 - meta-llama/Llama-3.2-1B - unsloth/gemma-2b - unsloth/gemma-2-2b - TheBloke/TinyLlama-1.1B-Chat-v0.3-AWQ - TheBloke/Llama-2-7B-GPTQ - ibm-granite/granite-20b-code-base - neuralmagic/Llama-3.2-3B-Instruct-FP8 - neuralmagic/Qwen2-0.5B-Instruct-FP8 - ibm-granite/granite-3.1-2b-instruct - ibm-granite/granite-guardian-3.1-2b - hpcai-tech/grok-1 - Snowflake/Llama-3.1-SwiftKV-8B-Instruct - allenai/OLMo-2-0425-1B ### Embedding Models - BAAI/bge-base-en-v1.5 - BAAI/bge-large-en-v1.5 - BAAI/bge-small-en-v1.5 - intfloat/e5-large-v2 - sentence-transformers/multi-qa-mpnet-base-cos-v1 - ibm-granite/granite-embedding-30m-english - ibm-granite/granite-embedding-125m-english - BAAI/bge-reranker-v2-m3 - ibm-granite/granite-embedding-107m-multilingual - ibm-granite/granite-embedding-278m-multilingual ### Vision Models - llava-hf/llava-1.5-7b-hf - OpenGVLab/InternVL2_5-1B - meta-llama/Llama-3.2-11B-Vision-Instruct - ibm-granite/granite-vision-3.2-2b - meta-llama/Llama-4-Scout-17B-16E-Instruct - google/gemma-3-4b-it ### Audio Models - openai/whisper-tiny - openai/whisper-base - openai/whisper-small - openai/whisper-medium - openai/whisper-large - openai/whisper-large-v3-turbo --------- Signed-off-by: Abukhoyer Shaik Signed-off-by: Dhiraj Kumar Sah --- QEfficient/exporter/export_utils.py | 3 ++- QEfficient/utils/constants.py | 3 +-- 2 files changed, 3 insertions(+), 3 deletions(-) diff --git a/QEfficient/exporter/export_utils.py b/QEfficient/exporter/export_utils.py index f86a0f254..eec756e4b 100644 --- a/QEfficient/exporter/export_utils.py +++ b/QEfficient/exporter/export_utils.py @@ -18,6 +18,7 @@ from onnx import external_data_helper from QEfficient.base.onnx_transforms import FP16ClipTransform +from QEfficient.utils import constants def export_onnx( @@ -97,7 +98,7 @@ def export_onnx( input_names=input_names, output_names=output_names, dynamic_axes=dynamic_axes, - opset_version=13, + opset_version=constants.ONNX_EXPORT_OPSET, custom_opsets={"com.qti.aisw.onnx": 1}, ) except Exception as e: diff --git a/QEfficient/utils/constants.py b/QEfficient/utils/constants.py index 5f7a4db7b..1504bdae5 100644 --- a/QEfficient/utils/constants.py +++ b/QEfficient/utils/constants.py @@ -17,7 +17,6 @@ ONNX_EXPORT_EXAMPLE_SEQ_LEN = 32 ONNX_EXPORT_EXAMPLE_FBS = 4 ONNX_EXPORT_EXAMPLE_NLK = 2 # Number of Logits to Keep -ONNX_EXPORT_OPSET = 13 ONNX_EXPORT_MAX_NUM_IMAGES = 1 ONNX_EXPORT_MAX_IMAGE_TILES = 4 ONNX_EXPORT_IMAGE_WIDTH = 560 @@ -84,7 +83,7 @@ def get_models_dir(): ONNX_EXPORT_EXAMPLE_MAX_TOP_K_IDS = 512 ONNX_EXPORT_EXAMPLE_TOP_PS = 0.80 ONNX_EXPORT_EXAMPLE_MIN_PS = 0.99 -ONNX_EXPORT_OPSET = 13 +ONNX_EXPORT_OPSET = 17 COMPILER = ["/opt/qti-aic/exec/qaic-exec", "-aic-hw"] DEFAULT_AIC_HW_VERSION = "ai100" From 435895fc29d906c321990958fd577e86e6d6b54d Mon Sep 17 00:00:00 2001 From: Abukhoyer Shaik Date: Fri, 14 Nov 2025 08:43:13 +0530 Subject: [PATCH 14/60] [Docs]: Readme Fix (#617) Signed-off-by: Abukhoyer Shaik Signed-off-by: Dhiraj Kumar Sah --- README.md | 4 ++-- docs/source/supported_features.rst | 2 ++ 2 files changed, 4 insertions(+), 2 deletions(-) diff --git a/README.md b/README.md index 40d64116c..8972e5b56 100644 --- a/README.md +++ b/README.md @@ -108,8 +108,8 @@ For more details about using ``QEfficient`` via Cloud AI 100 Apps SDK, visit [Li ## Documentation -* [Quick Start Guide](https://quic.github.io/efficient-transformers/source/quick_start.html#) -* [Python API](https://quic.github.io/efficient-transformers/source/hl_api.html) +* [Quick Start Guide](https://quic.github.io/efficient-transformers/source/quick_start.html) +* [QEFF API](https://quic.github.io/efficient-transformers/source/qeff_autoclasses.html) * [Validated Models](https://quic.github.io/efficient-transformers/source/validate.html) * [Models coming soon](https://quic.github.io/efficient-transformers/source/validate.html#models-coming-soon) diff --git a/docs/source/supported_features.rst b/docs/source/supported_features.rst index 4177b451f..9715da982 100644 --- a/docs/source/supported_features.rst +++ b/docs/source/supported_features.rst @@ -30,6 +30,8 @@ Supported Features - Enables execution with FP8 precision, significantly improving performance and reducing memory usage for computational tasks. * - Prefill caching - Enhances inference speed by caching key-value pairs for shared prefixes, reducing redundant computations and improving efficiency. + * - On Device Sampling + - Enables sampling operations to be executed directly on the QAIC device rather than the host CPU for QEffForCausalLM models. This enhancement significantly reduces host-device communication overhead and improves inference throughput and scalability. Refer `sample script `_ for more **details**. * - Prompt-Lookup Decoding - Speeds up text generation by using overlapping parts of the input prompt and the generated text, making the process faster without losing quality. Refer `sample script `_ for more **details**. * - :ref:`PEFT LoRA support ` From c7494ce7448a9c9ea043d34db684d7033c9fc0ea Mon Sep 17 00:00:00 2001 From: vjanfaza Date: Thu, 13 Nov 2025 19:17:04 -0800 Subject: [PATCH 15/60] Adding Compute-Context-Length (CCL) (#576) Compute-Context-Length (CCL) technique optimizes the throughput of large language models (LLMs) on Qualcomm devices when handling very large context lengths. The current Ahead Of Time (AOT) compilation on Qualcomm devices doesn't predict the number of tokens needed, leading to significant throughput drops during the prefilling and the decoding phases. This happens because the system performs attention calculations based on large context length. To address this issue, we introduce Compute Context Length (CCL), an additional ONNX variable that allows for dynamic context-length specialization. By generating tokens using smaller, more manageable context lengths (CCL), we optimize memory reads and attention calculations, thereby improving throughput. --------- Signed-off-by: Vahid Janfaza Signed-off-by: Dhiraj Kumar Sah --- QEfficient/cloud/infer.py | 12 + QEfficient/customop/ctx_scatter_gather.py | 16 +- QEfficient/customop/ctx_scatter_gather_cb.py | 18 +- .../generation/text_generation_inference.py | 76 +++++ QEfficient/generation/vlm_generation.py | 16 + QEfficient/peft/lora/layers.py | 6 +- QEfficient/transformers/cache_utils.py | 50 +-- .../models/codegen/modeling_codegen.py | 10 + .../models/falcon/modeling_falcon.py | 10 + .../models/gemma/modeling_gemma.py | 10 + .../models/gemma2/modeling_gemma2.py | 17 +- .../models/gemma3/modeling_gemma3.py | 123 ++++++-- .../transformers/models/gpt2/modeling_gpt2.py | 11 + .../gpt_bigcode/modeling_gpt_bigcode.py | 10 + .../models/gpt_oss/modeling_gpt_oss.py | 10 + .../transformers/models/gptj/modeling_gptj.py | 10 + .../models/granite/modeling_granite.py | 15 +- .../models/granitemoe/modeling_granitemoe.py | 9 + .../models/grok_1/modeling_grok1.py | 10 + .../models/internvl/modeling_internvl.py | 112 +++++-- .../models/llama/modeling_llama.py | 10 + .../models/llama4/modeling_llama4.py | 165 +++++++--- .../llama_swiftkv/modeling_llama_swiftkv.py | 29 +- .../models/llava/modeling_llava.py | 104 +++++-- .../models/llava_next/modeling_llava_next.py | 112 +++++-- .../models/mistral/modeling_mistral.py | 10 + .../models/mistral3/modeling_mistral3.py | 98 ++++-- .../models/mixtral_moe/modeling_mixtral.py | 10 + .../models/mllama/modeling_mllama.py | 100 ++++-- .../transformers/models/modeling_auto.py | 246 +++++++++++++-- .../models/molmo/modeling_molmo.py | 148 +++++++-- .../transformers/models/mpt/modeling_mpt.py | 10 + .../models/olmo2/modeling_olmo2.py | 10 + .../transformers/models/phi/modeling_phi.py | 17 +- .../transformers/models/phi3/modeling_phi3.py | 10 + .../models/qwen2/modeling_qwen2.py | 10 + .../models/qwen2_5_vl/modeling_qwen2_5_vl.py | 134 ++++++-- .../models/qwen3/modeling_qwen3.py | 10 + .../models/qwen3_moe/modeling_qwen3_moe.py | 10 + .../models/starcoder2/modeling_starcoder2.py | 10 + .../models/whisper/modeling_whisper.py | 12 + QEfficient/utils/check_ccl_specializations.py | 51 ++++ examples/ccl_gpt_oss.py | 51 ++++ examples/ccl_image_text_to_text_inference.py | 137 +++++++++ examples/ccl_llama4_CB_example_vision_lang.py | 109 +++++++ examples/ccl_llama4_example.py | 128 ++++++++ examples/ccl_llama4_multi_image_example.py | 89 ++++++ examples/ccl_mistral3_example.py | 123 ++++++++ examples/ccl_molmo_example.py | 100 ++++++ examples/ccl_qwen2_5_vl_CB.py | 81 +++++ examples/ccl_qwen2_5_vl_example.py | 152 +++++++++ examples/compute_context_length.py | 70 +++++ examples/gemma3_example/ccl_gemma3_mm.py | 121 ++++++++ .../ccl_granite_vision_inference.py | 129 ++++++++ .../intern_example/ccl_internvl_inference.py | 288 ++++++++++++++++++ .../ccl_qwen3moe_inference.py | 48 +++ 56 files changed, 3189 insertions(+), 304 deletions(-) create mode 100644 QEfficient/utils/check_ccl_specializations.py create mode 100644 examples/ccl_gpt_oss.py create mode 100644 examples/ccl_image_text_to_text_inference.py create mode 100644 examples/ccl_llama4_CB_example_vision_lang.py create mode 100644 examples/ccl_llama4_example.py create mode 100644 examples/ccl_llama4_multi_image_example.py create mode 100644 examples/ccl_mistral3_example.py create mode 100644 examples/ccl_molmo_example.py create mode 100644 examples/ccl_qwen2_5_vl_CB.py create mode 100644 examples/ccl_qwen2_5_vl_example.py create mode 100644 examples/compute_context_length.py create mode 100644 examples/gemma3_example/ccl_gemma3_mm.py create mode 100644 examples/granite_example/ccl_granite_vision_inference.py create mode 100644 examples/intern_example/ccl_internvl_inference.py create mode 100644 examples/qwen3moe_example/ccl_qwen3moe_inference.py diff --git a/QEfficient/cloud/infer.py b/QEfficient/cloud/infer.py index 814122b9d..fbff5b18b 100644 --- a/QEfficient/cloud/infer.py +++ b/QEfficient/cloud/infer.py @@ -340,6 +340,18 @@ def main( "--prompt-len", "--prompt_len", default=32, type=int, help="Sequence length for text generation." ) parser.add_argument("--ctx-len", "--ctx_len", default=128, type=int, help="Context length for text generation.") + parser.add_argument( + "--comp-ctx-lengths-prefill", + type=lambda comp_ctx_lengths_prefill: [int(x) for x in comp_ctx_lengths_prefill.split(",")], + default=[512], + help="Define ccl list in csv format (e.g.,--comp-ctx-lengths 512,1024,2048).", + ) + parser.add_argument( + "--comp-ctx-lengths-decode", + type=lambda comp_ctx_lengths_decode: [int(x) for x in comp_ctx_lengths_decode.split(",")], + default=[2048], + help="Define ccl list in csv format (e.g.,--comp-ctx-lengths 512,1024,2048).", + ) parser.add_argument( "--mxfp6", "--mxfp6_matmul", diff --git a/QEfficient/customop/ctx_scatter_gather.py b/QEfficient/customop/ctx_scatter_gather.py index c4f5a7bbd..269ccb0be 100644 --- a/QEfficient/customop/ctx_scatter_gather.py +++ b/QEfficient/customop/ctx_scatter_gather.py @@ -115,8 +115,14 @@ def symbolic(g: torch.Graph, data: torch.Value, ctx_indices: torch.Value) -> tor @onnxscript.script(onnxscript.values.Opset("com.qualcomm.cloud", 1)) -def CtxGather(data: onnxscript.FLOAT, ctx_indices: onnxscript.INT32) -> onnxscript.FLOAT: - ctx_indices = ops.Expand(ctx_indices, ops.Slice(ops.Shape(data), starts=[0], ends=[3], axes=[0])) +def CtxGather( + data: onnxscript.FLOAT, ctx_indices: onnxscript.INT32, comp_ctx_len: onnxscript.INT32 +) -> onnxscript.FLOAT: + # Create a shape tensor based on comp_ctx_len + shape_tensor = ops.Concat(ops.Shape(data)[:2], ops.Reshape(comp_ctx_len, [1]), axis=0) + + # Directly use the shape tensor without validation + ctx_indices = ops.Expand(ctx_indices, shape_tensor) ctx_indices = ops.Unsqueeze(ctx_indices, [-1]) return ops.GatherND(data, ctx_indices, batch_dims=2) @@ -127,7 +133,7 @@ class CtxGatherFunc(torch.autograd.Function): """ @staticmethod - def forward(data: torch.Tensor, ctx_indices: torch.Tensor): + def forward(data: torch.Tensor, ctx_indices: torch.Tensor, comp_ctx_len: int): batch_indices = torch.arange(data.shape[0]).view(-1, 1, 1) head_indices = torch.arange(data.shape[1]).view(1, -1, 1) return data[batch_indices, head_indices, ctx_indices] @@ -137,5 +143,5 @@ def setup_context(ctx, inputs, outputs): pass @staticmethod - def symbolic(g: torch.Graph, data: torch.Value, ctx_indices: torch.Value) -> torch.Value: - return g.onnxscript_op(CtxGather, data, ctx_indices).setTypeAs(data) + def symbolic(g: torch.Graph, data: torch.Value, ctx_indices: torch.Value, comp_ctx_len: int) -> torch.Value: + return g.onnxscript_op(CtxGather, data, ctx_indices, comp_ctx_len).setTypeAs(data) diff --git a/QEfficient/customop/ctx_scatter_gather_cb.py b/QEfficient/customop/ctx_scatter_gather_cb.py index 75d9a12ef..cc9693716 100644 --- a/QEfficient/customop/ctx_scatter_gather_cb.py +++ b/QEfficient/customop/ctx_scatter_gather_cb.py @@ -97,16 +97,20 @@ def symbolic( @onnxscript.script(onnxscript.values.Opset("com.qualcomm.cloud", 1)) def CtxGatherCB( - data: onnxscript.FLOAT, batch_index: onnxscript.INT32, ctx_indices: onnxscript.INT32 + data: onnxscript.FLOAT, batch_index: onnxscript.INT32, ctx_indices: onnxscript.INT32, comp_ctx_len: onnxscript.INT32 ) -> onnxscript.FLOAT: batch_size = ops.Gather(ops.Shape(batch_index), [0]) num_heads = ops.Gather(ops.Shape(data), [1]) - ctx_len = ops.Gather(ops.Shape(data), [2]) + # using compute-context-length (CCL) instead of context-length to do gather process based on CCL and later do attention computations based on CCL as well. + ctx_len = ops.Reshape(comp_ctx_len, [1]) # Expanded shape to create indices zero = ops.Constant(value_ints=[0]) one = ops.Constant(value_ints=[1]) - exp_shape = ops.Concat(batch_size, num_heads, ctx_len, one, axis=0) + # exp_shape = ops.Concat(batch_size, num_heads, ctx_len, one, axis=0) + exp_shape = ops.Concat( + ops.Reshape(batch_size, [1]), ops.Reshape(num_heads, [1]), ops.Reshape(ctx_len, [1]), one, axis=0 + ) # Create indices batch_idx = ops.Expand(ops.Unsqueeze(batch_index, [2, 3]), exp_shape) @@ -119,7 +123,7 @@ def CtxGatherCB( class CtxGatherFuncCB(torch.autograd.Function): @staticmethod - def forward(data: torch.Tensor, batch_index: torch.Tensor, ctx_indices: torch.Tensor): + def forward(data: torch.Tensor, batch_index: torch.Tensor, ctx_indices: torch.Tensor, comp_ctx_len: int): batch_indices = batch_index.view(-1, 1, 1) head_indices = torch.arange(data.shape[1]).view(1, -1, 1) return data[batch_indices, head_indices, ctx_indices] @@ -129,8 +133,10 @@ def setup_context(ctx, inputs, outputs): pass @staticmethod - def symbolic(g: torch.Graph, data: torch.Value, batch_index: torch.Value, ctx_indices: torch.Value) -> torch.Value: - return g.onnxscript_op(CtxGatherCB, data, batch_index, ctx_indices).setTypeAs(data) + def symbolic( + g: torch.Graph, data: torch.Value, batch_index: torch.Value, ctx_indices: torch.Value, comp_ctx_len: int + ) -> torch.Value: + return g.onnxscript_op(CtxGatherCB, data, batch_index, ctx_indices, comp_ctx_len).setTypeAs(data) @onnxscript.script(onnxscript.values.Opset("com.qualcomm.cloud", 1)) diff --git a/QEfficient/generation/text_generation_inference.py b/QEfficient/generation/text_generation_inference.py index e96908824..7da2300d6 100755 --- a/QEfficient/generation/text_generation_inference.py +++ b/QEfficient/generation/text_generation_inference.py @@ -318,6 +318,8 @@ def cloud_ai_100_exec_kv( prompts_txt_file_path: Optional[str] = None, device_id: Optional[List[int]] = None, generation_len: Optional[int] = None, + comp_ctx_lengths_prefill: Optional[List[int]] = None, + comp_ctx_lengths_decode: Optional[List[int]] = None, enable_debug_logs: bool = False, stream: bool = True, write_io_dir: Optional[str] = None, @@ -384,6 +386,8 @@ def cloud_ai_100_exec_kv( qpc_path=qpc_path, device_id=device_id, ctx_len=ctx_len, + comp_ctx_lengths_prefill=comp_ctx_lengths_prefill, + comp_ctx_lengths_decode=comp_ctx_lengths_decode, enable_debug_logs=enable_debug_logs, write_io_dir=write_io_dir, full_batch_size=full_batch_size, @@ -430,6 +434,8 @@ def __init__( qpc_path: str, full_batch_size: Optional[int] = None, ctx_len: Optional[int] = None, + comp_ctx_lengths_prefill: Optional[List[int]] = None, + comp_ctx_lengths_decode: Optional[List[int]] = None, device_id: Optional[List[int]] = None, enable_debug_logs: bool = False, write_io_dir: Optional[str] = None, @@ -440,6 +446,8 @@ def __init__( activate: bool = True, ) -> None: self._ctx_len = ctx_len + self.comp_ctx_lengths_prefill = comp_ctx_lengths_prefill + self.comp_ctx_lengths_decode = comp_ctx_lengths_decode self._write_io_dir = write_io_dir self.is_tlm = is_tlm self.return_pdfs = return_pdfs @@ -802,7 +810,17 @@ def run_prefill(self, prompt, generation_len, prefill_logit_bs=1, decode_batch_i batch_lora_ids = [self._prompt_to_lora_id_mapping_prefill.popleft() for i in range(self.batch_size)] inputs["lora_ids"] = np.array(batch_lora_ids, dtype=np.int64).reshape(self.batch_size, 1) + if self.comp_ctx_lengths_prefill is not None: + self.list_of_comp_ctx_lengths_prefill = [np.zeros(length) for length in self.comp_ctx_lengths_prefill] + prefill_ccl_id = 0 + inputs["comp_ctx_lengths"] = self.list_of_comp_ctx_lengths_prefill[prefill_ccl_id] + for i in range(num_chunks): + if self.comp_ctx_lengths_prefill is not None: + if (i + 1) * self._prefill_seq_len > self.comp_ctx_lengths_prefill[prefill_ccl_id]: + prefill_ccl_id = min(prefill_ccl_id + 1, len(self.comp_ctx_lengths_prefill) - 1) + inputs["comp_ctx_lengths"] = self.list_of_comp_ctx_lengths_prefill[prefill_ccl_id] + chunk_inputs = inputs.copy() chunk_inputs["input_ids"] = inputs["input_ids"][ :, i * self._prefill_seq_len : (i + 1) * self._prefill_seq_len @@ -822,6 +840,19 @@ def run_prefill(self, prompt, generation_len, prefill_logit_bs=1, decode_batch_i generation_len, ) + def initialize_ccl(self, decode_inputs): + self.list_of_comp_ctx_lengths_decode = [np.zeros(length) for length in self.comp_ctx_lengths_decode] + max_ccl_id = len(self.comp_ctx_lengths_decode) - 1 + max_position_id = np.max(decode_inputs["position_ids"]) + ccl_id_initial = 0 + ccl_id = ccl_id_initial + for i in range(ccl_id_initial, len(self.comp_ctx_lengths_decode)): + if max_position_id < self.comp_ctx_lengths_decode[i]: + ccl_id = i + break + + return ccl_id, max_ccl_id + def run_continuous_batching_decode(self, prompt_queue, generation_len): """ Runs continuous batching decode for the given prompt queue and generation length. @@ -853,6 +884,10 @@ def run_continuous_batching_decode(self, prompt_queue, generation_len): # Prepare decode inputs inputs. decode_inputs = self.prepare_decode_inputs() + if self.comp_ctx_lengths_decode is not None: + ccl_id, max_ccl_id = self.initialize_ccl(decode_inputs) + decode_inputs["comp_ctx_lengths"] = self.list_of_comp_ctx_lengths_decode[ccl_id] + while prompt_queue or current_decode_ongoing.any(): outputs = self._session.run(decode_inputs) @@ -890,6 +925,20 @@ def run_continuous_batching_decode(self, prompt_queue, generation_len): batch_id_map[decode_batch_id] ] + if self.comp_ctx_lengths_decode is not None: + ###Recalculate ccl_id based on position ids### + # Determine the maximum value of position_ids across all batch elements + max_position_id = np.max(decode_inputs["position_ids"]) + + # Update ccl_id and comp_ctx_lengths_decode based on the maximum position id + ccl_id_initial = 0 + ccl_id = ccl_id_initial + for i in range(ccl_id_initial, len(self.comp_ctx_lengths_decode)): + if max_position_id < self.comp_ctx_lengths_decode[i]: + ccl_id = i + break + decode_inputs["comp_ctx_lengths"] = self.list_of_comp_ctx_lengths_decode[ccl_id] + else: current_decode_ongoing[decode_batch_id] = False else: @@ -902,6 +951,15 @@ def run_continuous_batching_decode(self, prompt_queue, generation_len): if self.include_sampler: decode_inputs["last_accepted_output_tokens"] = decode_inputs["input_ids"] + if self.comp_ctx_lengths_decode is not None: + # Update ccl_id and comp_ctx_lengths_decode based on the maximum position id + if ( + decode_inputs["position_ids"][decode_batch_id, -1] + >= self.comp_ctx_lengths_decode[ccl_id] - 1 + ): + ccl_id = min(ccl_id + 1, max_ccl_id) + decode_inputs["comp_ctx_lengths"] = self.list_of_comp_ctx_lengths_decode[ccl_id] + generated_id_current_index[decode_batch_id] += 1 return decode_pause_time @@ -928,7 +986,18 @@ def run_decode( self._session.set_buffers({"logits": logits_out_placeholder}) finished_sequences = decode_inputs["input_ids"] == self.tokenizer.eos_token_id num_token = 0 + + if self.comp_ctx_lengths_decode is not None: + ccl_id, max_ccl_id = self.initialize_ccl(decode_inputs) + decode_inputs["comp_ctx_lengths"] = self.list_of_comp_ctx_lengths_decode[ccl_id] + + cache_index = np.max(decode_inputs["position_ids"]) for num_token in range(1, generation_len): + if self.comp_ctx_lengths_decode is not None: + if cache_index >= self.comp_ctx_lengths_decode[ccl_id] - 1: + ccl_id = min(ccl_id + 1, max_ccl_id) + decode_inputs["comp_ctx_lengths"] = self.list_of_comp_ctx_lengths_decode[ccl_id] + if streamer: streamer.put(decode_inputs["input_ids"][0]) outputs = self._session.run(decode_inputs) @@ -940,6 +1009,7 @@ def run_decode( # Prepare inputs for next iteration decode_inputs["input_ids"] = self._fetch_next_token_id(outputs) decode_inputs["position_ids"][:, -1] += 1 + cache_index += 1 self.generated_ids[:, num_token] = decode_inputs["input_ids"][:, -1] finished_sequences |= decode_inputs["input_ids"] == self.tokenizer.eos_token_id if self.include_sampler: @@ -989,6 +1059,8 @@ def __init__( qpc_path: str, full_batch_size: Optional[int] = None, ctx_len: Optional[int] = None, + comp_ctx_lengths_prefill: Optional[List[int]] = None, + comp_ctx_lengths_decode: Optional[List[int]] = None, device_id: Optional[List[int]] = None, enable_debug_logs: bool = False, write_io_dir: Optional[str] = None, @@ -1002,6 +1074,8 @@ def __init__( qpc_path=qpc_path, full_batch_size=full_batch_size, ctx_len=ctx_len, + comp_ctx_lengths_prefill=comp_ctx_lengths_prefill, + comp_ctx_lengths_decode=comp_ctx_lengths_decode, device_id=device_id, enable_debug_logs=enable_debug_logs, write_io_dir=write_io_dir, @@ -1013,6 +1087,8 @@ def __init__( self._full_batch_size = self._qaic_model.full_batch_size self._tokenizer = self._qaic_model.tokenizer self._ctx_len = ctx_len + self.comp_ctx_lengths_prefill = comp_ctx_lengths_prefill + self.comp_ctx_lengths_decode = comp_ctx_lengths_decode self._perf_metrics = None self._prompt_queue = None self._text_streamer = None diff --git a/QEfficient/generation/vlm_generation.py b/QEfficient/generation/vlm_generation.py index 2e8f04f2b..5eb91d142 100644 --- a/QEfficient/generation/vlm_generation.py +++ b/QEfficient/generation/vlm_generation.py @@ -83,6 +83,8 @@ def __init__( vision_qpc_path: str, device_id: Optional[List[int]] = None, ctx_len: Optional[int] = None, + comp_ctx_lengths_prefill: Optional[List[int]] = None, + comp_ctx_lengths_decode: Optional[List[int]] = None, enable_debug_logs: bool = False, write_io_dir: Optional[str] = None, full_batch_size: Optional[int] = None, @@ -123,6 +125,8 @@ def __init__( qpc_path=lang_qpc_path, full_batch_size=full_batch_size, ctx_len=ctx_len, + comp_ctx_lengths_prefill=comp_ctx_lengths_prefill, + comp_ctx_lengths_decode=comp_ctx_lengths_decode, device_id=device_id, enable_debug_logs=enable_debug_logs, write_io_dir=write_io_dir, @@ -294,6 +298,11 @@ def _execute_chunked_prefill( outputs = None chunk_image_idx = None + if self.comp_ctx_lengths_prefill is not None: + self.list_of_comp_ctx_lengths_prefill = [np.zeros(length) for length in self.comp_ctx_lengths_prefill] + prefill_ccl_id = 0 + lang_inputs["comp_ctx_lengths"] = self.list_of_comp_ctx_lengths_prefill[prefill_ccl_id] + for i in range(num_chunks): input_ids_slice = lang_inputs["input_ids"][:, i * self._prefill_seq_len : (i + 1) * self._prefill_seq_len] position_ids_slice = lang_inputs["position_ids"][ @@ -312,6 +321,13 @@ def _execute_chunked_prefill( if "cross_attention_mask" in lang_inputs: chunk_inputs["cross_attention_mask"] = lang_inputs["cross_attention_mask"] + if self.comp_ctx_lengths_prefill is not None: + if (i + 1) * self._prefill_seq_len > self.comp_ctx_lengths_prefill[prefill_ccl_id]: + prefill_ccl_id = min(prefill_ccl_id + 1, len(self.comp_ctx_lengths_prefill) - 1) + lang_inputs["comp_ctx_lengths"] = self.list_of_comp_ctx_lengths_prefill[prefill_ccl_id] + + chunk_inputs["comp_ctx_lengths"] = lang_inputs["comp_ctx_lengths"] + outputs = self._session.run(chunk_inputs) if "image_idx_output" in outputs: diff --git a/QEfficient/peft/lora/layers.py b/QEfficient/peft/lora/layers.py index 6b75e696f..79abeba77 100644 --- a/QEfficient/peft/lora/layers.py +++ b/QEfficient/peft/lora/layers.py @@ -42,15 +42,15 @@ def forward(self, x: torch.Tensor, lora_ids: torch.Tensor): # multilora implementation: lora_ids other_indices_a = torch.arange(self.lora_a_weights.shape[2]).view(1, 1, -1) selected_lora_a_weights = CtxGatherFuncCB.apply( - self.lora_a_weights, lora_ids, other_indices_a + self.lora_a_weights, lora_ids, other_indices_a, self.lora_a_weights.shape[2] ) # other_indices_b = torch.arange(self.lora_b_weights.shape[2]).view(1, 1, -1) selected_lora_b_weights = CtxGatherFuncCB.apply( - self.lora_b_weights, lora_ids, other_indices_b + self.lora_b_weights, lora_ids, other_indices_b, self.lora_b_weights.shape[2] ) # other_indices_s = torch.arange(self.lora_scalings.shape[2]).view(1, 1, -1) selected_lora_scalings = CtxGatherFuncCB.apply( - self.lora_scalings, lora_ids, other_indices_s + self.lora_scalings, lora_ids, other_indices_s, self.lora_scalings.shape[2] ) # selected_lora_a_weights = selected_lora_a_weights.squeeze(1) diff --git a/QEfficient/transformers/cache_utils.py b/QEfficient/transformers/cache_utils.py index 853567be9..5452589f6 100644 --- a/QEfficient/transformers/cache_utils.py +++ b/QEfficient/transformers/cache_utils.py @@ -40,7 +40,8 @@ def read_only(self, cache_kwargs): k_out, v_out = self.keys, self.values position_ids = cache_kwargs.get("position_ids") batch_index = cache_kwargs.get("batch_index", None) - ctx_len = k_out.shape[2] + ctx_len = cache_kwargs.get("CCL", k_out.shape[2]) + ctx_indices = torch.arange(ctx_len)[None, None, ...] gather_limit = position_ids.max(1, keepdim=True).values.unsqueeze(1) invalid_mask = ctx_indices > gather_limit @@ -53,11 +54,11 @@ def read_only(self, cache_kwargs): ctx_indices = torch.where(invalid_mask, invalid_idx_value, ctx_indices) if batch_index is not None: - k_out = CtxGatherFuncCB.apply(k_out, batch_index, ctx_indices) - v_out = CtxGatherFuncCB.apply(v_out, batch_index, ctx_indices) + k_out = CtxGatherFuncCB.apply(k_out, batch_index, ctx_indices, ctx_len) + v_out = CtxGatherFuncCB.apply(v_out, batch_index, ctx_indices, ctx_len) else: - k_out = CtxGatherFunc.apply(k_out, ctx_indices) - v_out = CtxGatherFunc.apply(v_out, ctx_indices) + k_out = CtxGatherFunc.apply(k_out, ctx_indices, ctx_len) + v_out = CtxGatherFunc.apply(v_out, ctx_indices, ctx_len) v_out = torch.where(invalid_mask.unsqueeze(-1), torch.tensor(0.0, dtype=torch.float32), v_out) return k_out, v_out @@ -137,7 +138,7 @@ def update( k_out, v_out = self.keys, self.values # Gather - ctx_len = k_out.shape[2] + ctx_len = cache_kwargs.get("CCL", k_out.shape[2]) ctx_indices = torch.arange(ctx_len)[None, None, ...] gather_limit = position_ids.max(1, keepdim=True).values.unsqueeze(1) invalid_mask = ctx_indices > gather_limit @@ -149,11 +150,11 @@ def update( ctx_indices = torch.where(invalid_mask, invalid_idx_value, ctx_indices) if batch_index is not None: - k_out = CtxGatherFuncCB.apply(k_out, batch_index, ctx_indices) - v_out = CtxGatherFuncCB.apply(v_out, batch_index, ctx_indices) + k_out = CtxGatherFuncCB.apply(k_out, batch_index, ctx_indices, ctx_len) + v_out = CtxGatherFuncCB.apply(v_out, batch_index, ctx_indices, ctx_len) else: - k_out = CtxGatherFunc.apply(k_out, ctx_indices) - v_out = CtxGatherFunc.apply(v_out, ctx_indices) + k_out = CtxGatherFunc.apply(k_out, ctx_indices, ctx_len) + v_out = CtxGatherFunc.apply(v_out, ctx_indices, ctx_len) v_out = torch.where(invalid_mask.unsqueeze(-1), torch.tensor(0.0, dtype=torch.float32), v_out) return k_out, v_out @@ -414,7 +415,7 @@ def update( k_out, v_out = self.key_cache[layer_idx], self.value_cache[layer_idx] # Original Gather - ctx_len = self.key_cache[layer_idx].shape[2] + ctx_len = cache_kwargs.get("CCL", self.key_cache[layer_idx].shape[2]) ctx_indices = torch.arange(ctx_len)[None, None, ...] gather_limit = kv_position_ids.max(1, keepdim=True).values.unsqueeze(1) invalid_mask = ctx_indices > gather_limit @@ -426,11 +427,12 @@ def update( all_indices = torch.arange(layer_ctx_len) + kv_position_ids.max() + 1 rolling_indices = torch.where(all_indices > layer_ctx_len - 1, all_indices % layer_ctx_len, all_indices) + rolling_indices = rolling_indices[:ctx_len] final_indices = torch.where( (is_sliding_layer & (position_ids.max() >= (layer_ctx_len - 1))), rolling_indices, ctx_indices ) - k_out = CtxGatherFunc.apply(k_out, final_indices) - v_out = CtxGatherFunc.apply(v_out, final_indices) + k_out = CtxGatherFunc.apply(k_out, final_indices, ctx_len) + v_out = CtxGatherFunc.apply(v_out, final_indices, ctx_len) ctx_v_out = torch.where(invalid_mask.unsqueeze(-1), torch.tensor(0.0, dtype=torch.float32), v_out) v_out = torch.where((is_sliding_layer & (position_ids.max() >= (layer_ctx_len - 1))), v_out, ctx_v_out) return k_out, v_out @@ -516,7 +518,8 @@ def update( k_out, v_out = self.key_cache[layer_idx], self.value_cache[layer_idx] # Original Gather - ctx_len = min(layer_ctx_len, k_out.shape[2]) + ctx_len = cache_kwargs.get("CCL", k_out.shape[2]) + ctx_len = min(layer_ctx_len, ctx_len) ctx_indices = torch.arange(ctx_len)[None, None, ...] gather_limit = kv_position_ids.max(1, keepdim=True).values.unsqueeze(1) invalid_mask = ctx_indices > gather_limit @@ -529,11 +532,12 @@ def update( # Rolling indices for sliding window all_indices = torch.arange(layer_ctx_len) + kv_position_ids.max() + 1 rolling_indices = torch.where(all_indices > layer_ctx_len - 1, all_indices % layer_ctx_len, all_indices) + rolling_indices = rolling_indices[:ctx_len] final_indices = torch.where( (is_sliding_layer & (position_ids.max() >= (layer_ctx_len - 1))), rolling_indices, ctx_indices ) - k_out = CtxGatherFunc.apply(k_out, final_indices) - v_out = CtxGatherFunc.apply(v_out, final_indices) + k_out = CtxGatherFunc.apply(k_out, final_indices, ctx_len) + v_out = CtxGatherFunc.apply(v_out, final_indices, ctx_len) ctx_v_out = torch.where(invalid_mask.unsqueeze(-1), torch.tensor(0.0, dtype=torch.float32), v_out) v_out = torch.where((is_sliding_layer & (position_ids.max() >= (layer_ctx_len - 1))), v_out, ctx_v_out) return k_out, v_out @@ -637,7 +641,11 @@ def update( k_out, v_out = self.key_cache[layer_idx], self.value_cache[layer_idx] # Original Gather - ctx_len = self.key_cache[layer_idx].shape[2] + if is_sliding_layer: + ctx_len = self.key_cache[layer_idx].shape[2] + else: + ctx_len = cache_kwargs.get("CCL", self.key_cache[layer_idx].shape[2]) + ctx_indices = torch.arange(ctx_len)[None, None, ...] gather_limit = position_ids.max(1, keepdim=True).values.unsqueeze(1) invalid_mask = ctx_indices > gather_limit @@ -648,11 +656,11 @@ def update( ctx_indices = torch.where(invalid_mask, invalid_idx_value, ctx_indices) if batch_index is not None: - k_out = CtxGatherFuncCB.apply(k_out, batch_index, ctx_indices) - v_out = CtxGatherFuncCB.apply(v_out, batch_index, ctx_indices) + k_out = CtxGatherFuncCB.apply(k_out, batch_index, ctx_indices, ctx_len) + v_out = CtxGatherFuncCB.apply(v_out, batch_index, ctx_indices, ctx_len) else: - k_out = CtxGatherFunc.apply(k_out, ctx_indices) - v_out = CtxGatherFunc.apply(v_out, ctx_indices) + k_out = CtxGatherFunc.apply(k_out, ctx_indices, ctx_len) + v_out = CtxGatherFunc.apply(v_out, ctx_indices, ctx_len) v_out = torch.where(invalid_mask.unsqueeze(-1), torch.tensor(0.0, dtype=torch.float32), v_out) return k_out, v_out diff --git a/QEfficient/transformers/models/codegen/modeling_codegen.py b/QEfficient/transformers/models/codegen/modeling_codegen.py index 776bfce43..3addd7501 100644 --- a/QEfficient/transformers/models/codegen/modeling_codegen.py +++ b/QEfficient/transformers/models/codegen/modeling_codegen.py @@ -72,6 +72,7 @@ def forward( self, hidden_states: Optional[torch.FloatTensor], layer_past: Optional[Tuple[torch.Tensor]] = None, + comp_ctx_lengths: Optional[torch.LongTensor] = None, attention_mask: Optional[torch.FloatTensor] = None, position_ids: Optional[torch.LongTensor] = None, head_mask: Optional[torch.FloatTensor] = None, @@ -124,6 +125,9 @@ def forward( if layer_past is not None: cache_kwargs = {"position_ids": position_ids, "batch_index": batch_index} + if comp_ctx_lengths is not None: + attention_mask = attention_mask[:, :, :, : comp_ctx_lengths.shape[-1]] + cache_kwargs["CCL"] = attention_mask.shape[-1] key, value = layer_past.update(key.to(hidden_states.dtype), value, self.layer_idx, cache_kwargs) # compute self-attention: V x Softmax(QK^T) @@ -147,6 +151,7 @@ def forward( self, input_ids: Optional[torch.LongTensor] = None, past_key_values: Optional[Union[Cache, tuple[tuple[torch.Tensor]]]] = None, + comp_ctx_lengths: Optional[torch.LongTensor] = None, attention_mask: Optional[torch.FloatTensor] = None, token_type_ids: Optional[torch.LongTensor] = None, batch_index: Optional[torch.LongTensor] = None, @@ -245,6 +250,7 @@ def forward( outputs = block( hidden_states, layer_past=past_key_values, + comp_ctx_lengths=comp_ctx_lengths, batch_index=batch_index, attention_mask=attention_mask, position_ids=position_ids, @@ -294,6 +300,7 @@ def forward( self, input_ids: Optional[torch.LongTensor] = None, past_key_values: Optional[Tuple[Tuple[torch.Tensor]]] = None, + comp_ctx_lengths: Optional[torch.LongTensor] = None, attention_mask: Optional[torch.FloatTensor] = None, token_type_ids: Optional[torch.LongTensor] = None, position_ids: Optional[torch.LongTensor] = None, @@ -312,6 +319,7 @@ def forward( transformer_outputs = self.transformer( input_ids, past_key_values=past_key_values, + comp_ctx_lengths=comp_ctx_lengths, attention_mask=attention_mask, token_type_ids=token_type_ids, batch_index=batch_index, @@ -348,6 +356,7 @@ def forward( self, hidden_states: Optional[torch.FloatTensor], layer_past: Optional[Cache] = None, + comp_ctx_lengths: Optional[torch.LongTensor] = None, attention_mask: Optional[torch.FloatTensor] = None, position_ids: Optional[torch.LongTensor] = None, batch_index: Optional[torch.LongTensor] = None, @@ -361,6 +370,7 @@ def forward( attn_outputs, attn_weights = self.attn( hidden_states=hidden_states, layer_past=layer_past, + comp_ctx_lengths=comp_ctx_lengths, attention_mask=attention_mask, position_ids=position_ids, batch_index=batch_index, diff --git a/QEfficient/transformers/models/falcon/modeling_falcon.py b/QEfficient/transformers/models/falcon/modeling_falcon.py index 8f2c3730d..1cfdf88e1 100644 --- a/QEfficient/transformers/models/falcon/modeling_falcon.py +++ b/QEfficient/transformers/models/falcon/modeling_falcon.py @@ -117,6 +117,7 @@ def forward( attention_mask: torch.Tensor, position_ids: Optional[torch.LongTensor] = None, past_key_value: Optional[Cache] = None, + comp_ctx_lengths: Optional[torch.LongTensor] = None, batch_index: Optional[torch.LongTensor] = None, layer_past: Optional[Cache] = None, head_mask: Optional[torch.Tensor] = None, @@ -141,6 +142,9 @@ def forward( if layer_past is not None: cache_kwargs = {"batch_index": batch_index, "position_ids": position_ids} + if comp_ctx_lengths is not None: + attention_mask = attention_mask[:, :, :, : comp_ctx_lengths.shape[-1]] + cache_kwargs["CCL"] = attention_mask.shape[-1] key_layer, value_layer = layer_past.update(key_layer, value_layer, self.layer_idx, cache_kwargs) if attention_mask is not None: @@ -172,6 +176,7 @@ def forward( attention_mask: torch.Tensor, position_ids: Optional[torch.LongTensor] = None, past_key_value: Optional[Cache] = None, + comp_ctx_lengths: Optional[torch.LongTensor] = None, batch_index: Optional[torch.LongTensor] = None, layer_past: Optional[Union[Cache, Tuple[torch.Tensor, torch.Tensor]]] = None, head_mask: Optional[torch.Tensor] = None, @@ -195,6 +200,7 @@ def forward( attention_mask=attention_mask, position_ids=position_ids, past_key_value=past_key_value, + comp_ctx_lengths=comp_ctx_lengths, batch_index=batch_index, alibi=alibi, head_mask=head_mask, @@ -245,6 +251,7 @@ def forward( position_ids: Optional[torch.LongTensor] = None, batch_index: Optional[torch.LongTensor] = None, past_key_values: Optional[Union[Cache, Tuple[Tuple[torch.Tensor, torch.Tensor], ...]]] = None, + comp_ctx_lengths: Optional[torch.LongTensor] = None, head_mask: Optional[torch.LongTensor] = None, inputs_embeds: Optional[torch.LongTensor] = None, use_cache: Optional[bool] = None, @@ -307,6 +314,7 @@ def forward( attention_mask=causal_mask, position_ids=position_ids, past_key_value=past_key_values, + comp_ctx_lengths=comp_ctx_lengths, batch_index=batch_index, head_mask=head_mask[i], use_cache=use_cache, @@ -352,6 +360,7 @@ def forward( position_ids: Optional[torch.LongTensor] = None, batch_index: Optional[torch.LongTensor] = None, past_key_values: Optional[Tuple[Tuple[torch.Tensor, torch.Tensor], ...]] = None, + comp_ctx_lengths: Optional[torch.LongTensor] = None, head_mask: Optional[torch.Tensor] = None, inputs_embeds: Optional[torch.Tensor] = None, use_cache: Optional[bool] = None, @@ -368,6 +377,7 @@ def forward( attention_mask=attention_mask, position_ids=position_ids, past_key_values=past_key_values, + comp_ctx_lengths=comp_ctx_lengths, batch_index=batch_index, head_mask=head_mask, inputs_embeds=inputs_embeds, diff --git a/QEfficient/transformers/models/gemma/modeling_gemma.py b/QEfficient/transformers/models/gemma/modeling_gemma.py index eea1e3898..1edb8ef53 100644 --- a/QEfficient/transformers/models/gemma/modeling_gemma.py +++ b/QEfficient/transformers/models/gemma/modeling_gemma.py @@ -137,6 +137,7 @@ def forward( attention_mask: Optional[torch.Tensor], position_ids: Optional[torch.LongTensor] = None, past_key_value: Optional[Cache] = None, + comp_ctx_lengths: Optional[torch.LongTensor] = None, batch_index: Optional[torch.LongTensor] = None, cache_position: Optional[torch.LongTensor] = None, **kwargs, @@ -154,6 +155,9 @@ def forward( if past_key_value is not None: cache_kwargs = {"batch_index": batch_index, "position_ids": position_ids} + if comp_ctx_lengths is not None: + attention_mask = attention_mask[:, :, :, : comp_ctx_lengths.shape[-1]] + cache_kwargs["CCL"] = attention_mask.shape[-1] key_states, value_states = past_key_value.update(key_states, value_states, self.layer_idx, cache_kwargs) attention_interface = eager_attention_forward @@ -186,6 +190,7 @@ def forward( attention_mask: Optional[torch.Tensor] = None, position_ids: Optional[torch.LongTensor] = None, past_key_value: Optional[Cache] = None, + comp_ctx_lengths: Optional[torch.LongTensor] = None, batch_index: Optional[torch.LongTensor] = None, use_cache: Optional[bool] = False, cache_position: Optional[torch.LongTensor] = None, @@ -214,6 +219,7 @@ def forward( attention_mask=attention_mask, position_ids=position_ids, past_key_value=past_key_value, + comp_ctx_lengths=comp_ctx_lengths, batch_index=batch_index, use_cache=use_cache, cache_position=cache_position, @@ -243,6 +249,7 @@ def forward( attention_mask: Optional[torch.Tensor] = None, position_ids: Optional[torch.LongTensor] = None, past_key_values: Optional[Cache] = None, + comp_ctx_lengths: Optional[torch.LongTensor] = None, batch_index: Optional[torch.LongTensor] = None, inputs_embeds: Optional[torch.FloatTensor] = None, use_cache: Optional[bool] = None, @@ -299,6 +306,7 @@ def forward( attention_mask=causal_mask, position_ids=position_ids, past_key_value=past_key_values, + comp_ctx_lengths=comp_ctx_lengths, batch_index=batch_index, use_cache=use_cache, cache_position=cache_position, @@ -334,6 +342,7 @@ def forward( attention_mask: Optional[torch.Tensor] = None, position_ids: Optional[torch.LongTensor] = None, past_key_values: Optional[Union[Cache, List[torch.FloatTensor]]] = None, + comp_ctx_lengths: Optional[torch.LongTensor] = None, batch_index: Optional[torch.LongTensor] = None, inputs_embeds: Optional[torch.FloatTensor] = None, use_cache: Optional[bool] = None, @@ -350,6 +359,7 @@ def forward( attention_mask=attention_mask, position_ids=position_ids, past_key_values=past_key_values, + comp_ctx_lengths=comp_ctx_lengths, batch_index=batch_index, inputs_embeds=inputs_embeds, use_cache=use_cache, diff --git a/QEfficient/transformers/models/gemma2/modeling_gemma2.py b/QEfficient/transformers/models/gemma2/modeling_gemma2.py index be3ba942d..2944601c9 100644 --- a/QEfficient/transformers/models/gemma2/modeling_gemma2.py +++ b/QEfficient/transformers/models/gemma2/modeling_gemma2.py @@ -144,6 +144,7 @@ def forward( attention_mask: Optional[torch.Tensor], position_ids: Optional[torch.LongTensor] = None, past_key_value: Optional[Cache] = None, + comp_ctx_lengths: Optional[torch.LongTensor] = None, batch_index: Optional[torch.LongTensor] = None, cache_position: Optional[torch.LongTensor] = None, **kwargs, @@ -161,7 +162,15 @@ def forward( if past_key_value is not None: # sin and cos are specific to RoPE models; cache_position needed for the static cache - cache_kwargs = {"sin": sin, "cos": cos, "batch_index": batch_index, "position_ids": position_ids} + cache_kwargs = { + "sin": sin, + "cos": cos, + "batch_index": batch_index, + "position_ids": position_ids, + } + if comp_ctx_lengths is not None: + attention_mask = attention_mask[:, :, :, : comp_ctx_lengths.shape[-1]] + cache_kwargs["CCL"] = attention_mask.shape[-1] key_states, value_states = past_key_value.update(key_states, value_states, self.layer_idx, cache_kwargs) attention_interface: Callable = eager_attention_forward @@ -194,6 +203,7 @@ def forward( attention_mask: Optional[torch.Tensor] = None, position_ids: Optional[torch.LongTensor] = None, past_key_value: Optional[Cache] = None, + comp_ctx_lengths: Optional[torch.LongTensor] = None, batch_index: Optional[torch.LongTensor] = None, output_attentions: Optional[bool] = False, use_cache: Optional[bool] = False, @@ -226,6 +236,7 @@ def forward( attention_mask=attention_mask, position_ids=position_ids, past_key_value=past_key_value, + comp_ctx_lengths=comp_ctx_lengths, batch_index=batch_index, output_attentions=output_attentions, use_cache=use_cache, @@ -266,6 +277,7 @@ def forward( attention_mask: Optional[torch.Tensor] = None, position_ids: Optional[torch.LongTensor] = None, past_key_values: Optional[Cache] = None, + comp_ctx_lengths: Optional[torch.LongTensor] = None, batch_index: Optional[torch.LongTensor] = None, inputs_embeds: Optional[torch.FloatTensor] = None, use_cache: Optional[bool] = None, @@ -338,6 +350,7 @@ def forward( attention_mask=causal_mask, position_ids=position_ids, past_key_value=past_key_values, + comp_ctx_lengths=comp_ctx_lengths, batch_index=batch_index, output_attentions=output_attentions, use_cache=use_cache, @@ -381,6 +394,7 @@ def forward( attention_mask: Optional[torch.Tensor] = None, position_ids: Optional[torch.LongTensor] = None, past_key_values: Optional[Union[Cache, List[torch.FloatTensor]]] = None, + comp_ctx_lengths: Optional[torch.LongTensor] = None, batch_index: Optional[torch.LongTensor] = None, inputs_embeds: Optional[torch.FloatTensor] = None, labels: Optional[torch.LongTensor] = None, @@ -404,6 +418,7 @@ def forward( attention_mask=attention_mask, position_ids=position_ids, past_key_values=past_key_values, + comp_ctx_lengths=comp_ctx_lengths, batch_index=batch_index, inputs_embeds=inputs_embeds, use_cache=use_cache, diff --git a/QEfficient/transformers/models/gemma3/modeling_gemma3.py b/QEfficient/transformers/models/gemma3/modeling_gemma3.py index 20b7036fd..398259d8b 100644 --- a/QEfficient/transformers/models/gemma3/modeling_gemma3.py +++ b/QEfficient/transformers/models/gemma3/modeling_gemma3.py @@ -6,7 +6,7 @@ # ----------------------------------------------------------------------------- import copy -from typing import Optional, Tuple, Union +from typing import List, Optional, Tuple, Union import torch from torch import nn @@ -215,6 +215,7 @@ def forward( attention_mask: Optional[torch.Tensor], position_ids: Optional[torch.LongTensor] = None, past_key_value: Optional[Cache] = None, + comp_ctx_lengths: Optional[torch.LongTensor] = None, batch_index: Optional[torch.LongTensor] = None, cache_position: Optional[torch.LongTensor] = None, **kwargs, @@ -254,6 +255,9 @@ def forward( "is_sliding": self.is_sliding, "sliding_window_pattern": self.config.sliding_window_pattern, } + if comp_ctx_lengths is not None: + attention_mask = attention_mask[:, :, :, : comp_ctx_lengths.shape[-1]] + cache_kwargs["CCL"] = attention_mask.shape[-1] key_states, value_states = past_key_value.update(key_states, value_states, self.layer_idx, cache_kwargs) key_states = repeat_kv(key_states, self.num_key_value_groups) @@ -297,6 +301,7 @@ def forward( attention_mask: Optional[torch.Tensor] = None, position_ids: Optional[torch.LongTensor] = None, past_key_value: Optional[Cache] = None, + comp_ctx_lengths: Optional[torch.LongTensor] = None, batch_index: Optional[torch.LongTensor] = None, output_attentions: Optional[bool] = False, use_cache: Optional[bool] = False, @@ -323,6 +328,7 @@ def forward( attention_mask=attention_mask, position_ids=position_ids, past_key_value=past_key_value, + comp_ctx_lengths=comp_ctx_lengths, batch_index=batch_index, output_attentions=output_attentions, use_cache=use_cache, @@ -363,6 +369,7 @@ def forward( attention_mask: Optional[torch.Tensor] = None, position_ids: Optional[torch.LongTensor] = None, past_key_values: Optional[Cache] = None, + comp_ctx_lengths: Optional[torch.LongTensor] = None, batch_index: Optional[torch.LongTensor] = None, inputs_embeds: Optional[torch.FloatTensor] = None, use_cache: Optional[bool] = None, @@ -429,6 +436,7 @@ def forward( attention_mask=causal_mask, position_ids=position_ids, past_key_value=past_key_values, + comp_ctx_lengths=comp_ctx_lengths, batch_index=batch_index, output_attentions=output_attentions, use_cache=use_cache, @@ -466,6 +474,7 @@ def forward( attention_mask: Optional[torch.Tensor] = None, position_ids: Optional[torch.LongTensor] = None, past_key_values: Optional[Cache] = None, + comp_ctx_lengths: Optional[torch.LongTensor] = None, batch_index: Optional[torch.LongTensor] = None, inputs_embeds: Optional[torch.FloatTensor] = None, labels: Optional[torch.LongTensor] = None, @@ -525,6 +534,7 @@ def forward( attention_mask=attention_mask, position_ids=position_ids, past_key_values=past_key_values, + comp_ctx_lengths=comp_ctx_lengths, batch_index=batch_index, inputs_embeds=inputs_embeds, use_cache=use_cache, @@ -592,7 +602,15 @@ def __init__(self, model): self.config = self.model.config self.lm_head = self.model.lm_head - def forward(self, input_ids, vision_embeds, position_ids, image_idx, past_key_values): + def forward( + self, + input_ids, + vision_embeds, + position_ids, + image_idx, + past_key_values, + comp_ctx_lengths: Optional[List[int]] = None, + ): inputs_embeds = self.model.get_input_embeddings()(input_ids) B, N, C = inputs_embeds.shape selected = input_ids == self.model.config.image_token_index @@ -603,7 +621,11 @@ def forward(self, input_ids, vision_embeds, position_ids, image_idx, past_key_va image_input_embeds = torch.where(selected.unsqueeze(-1), image_features_expanded, inputs_embeds) inputs_embeds = torch.where(input_ids.shape[1] == torch.tensor(1), inputs_embeds, image_input_embeds) outputs = self.language_model( - inputs_embeds=inputs_embeds, position_ids=position_ids, past_key_values=past_key_values, use_cache=True + inputs_embeds=inputs_embeds, + position_ids=position_ids, + past_key_values=past_key_values, + comp_ctx_lengths=comp_ctx_lengths, + use_cache=True, ) image_idx = (indices1.max() + 1).unsqueeze(0).unsqueeze(0) logit_index = position_ids.to(torch.int32).argmax(1, keepdim=True) @@ -620,7 +642,15 @@ def get_qeff_vision_encoder(self): def get_qeff_language_decoder(self): return QEffGemma3DecoderWrapper(self) - def forward(self, input_ids, position_ids, pixel_values, image_idx, past_key_values): + def forward( + self, + input_ids, + position_ids, + pixel_values, + image_idx, + past_key_values, + comp_ctx_lengths: Optional[List[int]] = None, + ): image_features = self.get_image_features(pixel_values=pixel_values) inputs_embeds = self.get_input_embeddings()(input_ids) B, N, C = inputs_embeds.shape @@ -632,7 +662,11 @@ def forward(self, input_ids, position_ids, pixel_values, image_idx, past_key_val image_input_embeds = torch.where(selected.unsqueeze(-1), image_features_expanded, inputs_embeds) inputs_embeds = torch.where(input_ids.shape[1] == torch.tensor(1), inputs_embeds, image_input_embeds) outputs = self.language_model( - inputs_embeds=inputs_embeds, position_ids=position_ids, past_key_values=past_key_values, use_cache=True + inputs_embeds=inputs_embeds, + position_ids=position_ids, + past_key_values=past_key_values, + comp_ctx_lengths=comp_ctx_lengths, + use_cache=True, ) image_idx = (indices1.max() + 1).unsqueeze(0).unsqueeze(0) logit_index = position_ids.to(torch.int32).argmax(1, keepdim=True) @@ -647,6 +681,8 @@ def get_specializations( prefill_seq_len: int, ctx_len: int, img_size: int, + comp_ctx_lengths_prefill: Optional[List[int]] = None, + comp_ctx_lengths_decode: Optional[List[int]] = None, kv_offload: bool = False, **compiler_options, ): @@ -667,24 +703,55 @@ def get_specializations( "ctx_len": ctx_len, } ] - lang = [ - { - "batch_size": batch_size, - "seq_len": prefill_seq_len, - "ctx_len": ctx_len, - "sliding_window": self.language_model.config.sliding_window, - "img_size": img_size, - "mm_tokens_per_image": mm_tokens_per_image, - }, - { - "batch_size": batch_size, - "seq_len": "1", - "ctx_len": ctx_len, - "sliding_window": self.language_model.config.sliding_window, - "img_size": img_size, - "mm_tokens_per_image": mm_tokens_per_image, - }, - ] + if comp_ctx_lengths_prefill and comp_ctx_lengths_decode: + lang = [] + + for i in range(0, len(comp_ctx_lengths_prefill)): + lang.append( + { + "batch_size": batch_size, + "seq_len": prefill_seq_len, + "ctx_len": ctx_len, + "comp_ctx_lengths": comp_ctx_lengths_prefill[i], + "sliding_window": self.language_model.config.sliding_window, + "img_size": img_size, + "mm_tokens_per_image": mm_tokens_per_image, + } + ) + + for i in range(0, len(comp_ctx_lengths_decode)): + lang.append( + { + "batch_size": batch_size, + "seq_len": "1", + "ctx_len": ctx_len, + "comp_ctx_lengths": comp_ctx_lengths_decode[i], + "sliding_window": self.language_model.config.sliding_window, + "img_size": img_size, + "mm_tokens_per_image": mm_tokens_per_image, + } + ) + + else: + lang = [ + { + "batch_size": batch_size, + "seq_len": prefill_seq_len, + "ctx_len": ctx_len, + "sliding_window": self.language_model.config.sliding_window, + "img_size": img_size, + "mm_tokens_per_image": mm_tokens_per_image, + }, + { + "batch_size": batch_size, + "seq_len": "1", + "ctx_len": ctx_len, + "sliding_window": self.language_model.config.sliding_window, + "img_size": img_size, + "mm_tokens_per_image": mm_tokens_per_image, + }, + ] + specializations = {} if kv_offload: @@ -694,7 +761,7 @@ def get_specializations( else: return lang, compiler_options - def get_onnx_dynamic_axes(self, kv_offload: bool = False): + def get_onnx_dynamic_axes(self, comp_ctx_lengths: Optional[List[int]] = None, kv_offload: bool = False): # Define dynamic axes vision_dynamic_axes = {} lang_dynamic_axes = {} @@ -719,6 +786,9 @@ def get_onnx_dynamic_axes(self, kv_offload: bool = False): ) lang_dynamic_axes[f"past_{kv}.{i}"] = apply_dynamic_axes + if comp_ctx_lengths is not None: + lang_dynamic_axes["comp_ctx_lengths"] = {0: "comp_ctx_lengths"} + dynamic_axes = {} if kv_offload: dynamic_axes["vision"] = vision_dynamic_axes @@ -767,7 +837,7 @@ def get_dummy_pkv_cache(self, config, batch_size, seq_len): past_key_values.append(pkv) return past_key_values - def get_dummy_inputs(self, kv_offload: bool = False): + def get_dummy_inputs(self, comp_ctx_lengths: Optional[List[int]] = None, kv_offload: bool = False): if vis_cfg := getattr(self.config, "vision_config", None): img_size = getattr(vis_cfg, "image_size", 896) else: @@ -813,6 +883,9 @@ def get_dummy_inputs(self, kv_offload: bool = False): seq_len=constants.ONNX_EXPORT_EXAMPLE_SEQ_LEN, ) + if comp_ctx_lengths is not None: + lang_inputs["comp_ctx_lengths"] = torch.randint(0, 100, (40,), dtype=torch.long) + inputs = {} if kv_offload: inputs["vision"] = vision_inputs diff --git a/QEfficient/transformers/models/gpt2/modeling_gpt2.py b/QEfficient/transformers/models/gpt2/modeling_gpt2.py index d68a65430..6136a2c5d 100644 --- a/QEfficient/transformers/models/gpt2/modeling_gpt2.py +++ b/QEfficient/transformers/models/gpt2/modeling_gpt2.py @@ -65,6 +65,7 @@ def forward( self, hidden_states: Optional[Tuple[torch.FloatTensor]], past_key_value: Optional[Cache] = None, + comp_ctx_lengths: Optional[torch.LongTensor] = None, attention_mask: Optional[torch.FloatTensor] = None, position_ids: Optional[torch.LongTensor] = None, batch_index: Optional[torch.LongTensor] = None, @@ -121,6 +122,10 @@ def forward( # save all key/value_layer to cache to be re-used for fast auto-regressive generation # Update the cache_kwargs with position_ids for Cloud AI 100 cache_kwargs = {"position_ids": position_ids, "batch_index": batch_index} + if comp_ctx_lengths is not None: + attention_mask = attention_mask[:, :, :, : comp_ctx_lengths.shape[-1]] + cache_kwargs["CCL"] = attention_mask.shape[-1] + key_states, value_states = curr_past_key_value.update( key_states, value_states, self.layer_idx, cache_kwargs ) @@ -156,6 +161,7 @@ def forward( self, hidden_states: Optional[Tuple[torch.FloatTensor]], past_key_value: Optional[Cache] = None, + comp_ctx_lengths: Optional[torch.LongTensor] = None, attention_mask: Optional[torch.FloatTensor] = None, position_ids: Optional[torch.LongTensor] = None, batch_index: Optional[torch.LongTensor] = None, @@ -174,6 +180,7 @@ def forward( hidden_states, past_key_value=past_key_value, attention_mask=attention_mask, + comp_ctx_lengths=comp_ctx_lengths, position_ids=position_ids, batch_index=batch_index, head_mask=head_mask, @@ -232,6 +239,7 @@ def forward( self, input_ids: Optional[torch.LongTensor] = None, past_key_values: Optional[Tuple[Tuple[torch.Tensor]]] = None, + comp_ctx_lengths: Optional[torch.LongTensor] = None, attention_mask: Optional[torch.FloatTensor] = None, token_type_ids: Optional[torch.LongTensor] = None, position_ids: Optional[torch.LongTensor] = None, @@ -341,6 +349,7 @@ def forward( outputs = block( hidden_states, past_key_value=past_key_values, + comp_ctx_lengths=comp_ctx_lengths, attention_mask=attention_mask, position_ids=position_ids, batch_index=batch_index, @@ -392,6 +401,7 @@ def forward( self, input_ids: Optional[torch.LongTensor] = None, past_key_values: Optional[Tuple[Tuple[torch.Tensor]]] = None, + comp_ctx_lengths: Optional[torch.LongTensor] = None, attention_mask: Optional[torch.FloatTensor] = None, token_type_ids: Optional[torch.LongTensor] = None, position_ids: Optional[torch.LongTensor] = None, @@ -418,6 +428,7 @@ def forward( transformer_outputs = self.transformer( input_ids, past_key_values=past_key_values, + comp_ctx_lengths=comp_ctx_lengths, attention_mask=attention_mask, token_type_ids=token_type_ids, position_ids=position_ids, diff --git a/QEfficient/transformers/models/gpt_bigcode/modeling_gpt_bigcode.py b/QEfficient/transformers/models/gpt_bigcode/modeling_gpt_bigcode.py index af233870b..85ea42674 100644 --- a/QEfficient/transformers/models/gpt_bigcode/modeling_gpt_bigcode.py +++ b/QEfficient/transformers/models/gpt_bigcode/modeling_gpt_bigcode.py @@ -98,6 +98,7 @@ def forward( self, hidden_states: torch.Tensor, layer_past: Optional[Cache] = None, + comp_ctx_lengths: Optional[torch.LongTensor] = None, position_ids: Optional[torch.LongTensor] = None, attention_mask: Optional[torch.Tensor] = None, batch_index: Optional[torch.LongTensor] = None, @@ -153,6 +154,9 @@ def forward( if layer_past is not None: # save all key/value_states to cache to be re-used for fast auto-regressive generation cache_kwargs = {"position_ids": position_ids, "batch_index": batch_index} + if comp_ctx_lengths is not None: + attention_mask = attention_mask[:, :, :, : comp_ctx_lengths.shape[-1]] + cache_kwargs["CCL"] = attention_mask.shape[-1] key, value = curr_past_key_value.update(key, value, self.layer_idx, cache_kwargs) # set flag that curr layer for cross-attn is already updated so we can re-use in subsequent calls if self.is_cross_attention: @@ -180,6 +184,7 @@ def forward( self, hidden_states: Optional[Tuple[torch.Tensor]], layer_past: Optional[Cache] = None, + comp_ctx_lengths: Optional[torch.LongTensor] = None, position_ids: Optional[torch.LongTensor] = None, batch_index: Optional[torch.LongTensor] = None, attention_mask: Optional[torch.Tensor] = None, @@ -194,6 +199,7 @@ def forward( attn_outputs = self.attn( hidden_states, layer_past=layer_past, + comp_ctx_lengths=comp_ctx_lengths, attention_mask=attention_mask, position_ids=position_ids, batch_index=batch_index, @@ -242,6 +248,7 @@ def forward( self, input_ids: Optional[torch.Tensor] = None, past_key_values: Optional[list[torch.Tensor]] = None, + comp_ctx_lengths: Optional[torch.LongTensor] = None, attention_mask: Optional[torch.Tensor] = None, token_type_ids: Optional[torch.Tensor] = None, position_ids: Optional[torch.Tensor] = None, @@ -333,6 +340,7 @@ def forward( outputs = block( hidden_states, layer_past=past_key_values, + comp_ctx_lengths=comp_ctx_lengths, position_ids=position_ids, batch_index=batch_index, attention_mask=attention_mask, @@ -374,6 +382,7 @@ def forward( self, input_ids: Optional[torch.Tensor] = None, past_key_values: Optional[tuple[tuple[torch.Tensor]]] = None, + comp_ctx_lengths: Optional[torch.LongTensor] = None, attention_mask: Optional[torch.Tensor] = None, token_type_ids: Optional[torch.Tensor] = None, position_ids: Optional[torch.Tensor] = None, @@ -399,6 +408,7 @@ def forward( transformer_outputs = self.transformer( input_ids, past_key_values=past_key_values, + comp_ctx_lengths=comp_ctx_lengths, attention_mask=attention_mask, token_type_ids=token_type_ids, position_ids=position_ids, diff --git a/QEfficient/transformers/models/gpt_oss/modeling_gpt_oss.py b/QEfficient/transformers/models/gpt_oss/modeling_gpt_oss.py index 62bc849b7..84552aff4 100644 --- a/QEfficient/transformers/models/gpt_oss/modeling_gpt_oss.py +++ b/QEfficient/transformers/models/gpt_oss/modeling_gpt_oss.py @@ -417,6 +417,7 @@ def forward( attention_mask: Optional[torch.Tensor], position_ids: Optional[torch.LongTensor] = None, past_key_value: Optional[Cache] = None, + comp_ctx_lengths: Optional[torch.LongTensor] = None, batch_index: Optional[torch.LongTensor] = None, cache_position: Optional[torch.LongTensor] = None, sliding_mask=None, @@ -443,6 +444,9 @@ def forward( "is_sliding": self.sliding_window is not None, "sliding_window": past_key_value.sliding_window_len, } + if comp_ctx_lengths is not None: + attention_mask = attention_mask[:, :, :, : comp_ctx_lengths.shape[-1]] + cache_kwargs["CCL"] = attention_mask.shape[-1] key_states, value_states = past_key_value.update(key_states, value_states, self.layer_idx, cache_kwargs) if self.sliding_window is not None: @@ -476,6 +480,7 @@ def forward( attention_mask: Optional[torch.Tensor] = None, position_ids: Optional[torch.LongTensor] = None, past_key_value: Optional[Cache] = None, + comp_ctx_lengths: Optional[torch.LongTensor] = None, batch_index: Optional[torch.LongTensor] = None, output_attentions: Optional[bool] = False, use_cache: Optional[bool] = False, @@ -492,6 +497,7 @@ def forward( attention_mask=attention_mask, position_ids=position_ids, past_key_value=past_key_value, + comp_ctx_lengths=comp_ctx_lengths, batch_index=batch_index, use_cache=use_cache, cache_position=cache_position, @@ -526,6 +532,7 @@ def forward( attention_mask: Optional[torch.Tensor] = None, position_ids: Optional[torch.LongTensor] = None, past_key_values: Optional[Cache] = None, + comp_ctx_lengths: Optional[torch.LongTensor] = None, batch_index: Optional[torch.LongTensor] = None, inputs_embeds: Optional[torch.FloatTensor] = None, use_cache: Optional[bool] = None, @@ -586,6 +593,7 @@ def forward( attention_mask=causal_mask, position_ids=position_ids, past_key_value=past_key_values, + comp_ctx_lengths=comp_ctx_lengths, batch_index=batch_index, use_cache=use_cache, output_attentions=output_attentions, @@ -619,6 +627,7 @@ def forward( attention_mask: Optional[torch.Tensor] = None, position_ids: Optional[torch.LongTensor] = None, past_key_values: Optional[Cache] = None, + comp_ctx_lengths: Optional[torch.LongTensor] = None, batch_index: Optional[torch.LongTensor] = None, inputs_embeds: Optional[torch.FloatTensor] = None, labels: Optional[torch.LongTensor] = None, @@ -670,6 +679,7 @@ def forward( attention_mask=attention_mask, position_ids=position_ids, past_key_values=past_key_values, + comp_ctx_lengths=comp_ctx_lengths, batch_index=batch_index, inputs_embeds=inputs_embeds, use_cache=use_cache, diff --git a/QEfficient/transformers/models/gptj/modeling_gptj.py b/QEfficient/transformers/models/gptj/modeling_gptj.py index dc3e5e6d2..1a9e45e97 100644 --- a/QEfficient/transformers/models/gptj/modeling_gptj.py +++ b/QEfficient/transformers/models/gptj/modeling_gptj.py @@ -83,6 +83,7 @@ def forward( self, hidden_states: torch.FloatTensor, layer_past: Optional[Tuple[torch.Tensor]] = None, + comp_ctx_lengths: Optional[torch.LongTensor] = None, attention_mask: Optional[torch.FloatTensor] = None, position_ids: Optional[torch.LongTensor] = None, batch_index: Optional[torch.LongTensor] = None, @@ -135,6 +136,9 @@ def forward( if layer_past is not None: cache_kwargs = {"position_ids": position_ids, "batch_index": batch_index} + if comp_ctx_lengths is not None: + attention_mask = attention_mask[:, :, :, : comp_ctx_lengths.shape[-1]] + cache_kwargs["CCL"] = attention_mask.shape[-1] key, value = layer_past.update(key, value, self.layer_idx, cache_kwargs) # compute self-attention: V x Softmax(QK^T) @@ -151,6 +155,7 @@ def forward( self, hidden_states: Optional[torch.FloatTensor], layer_past: Optional[Cache] = None, + comp_ctx_lengths: Optional[torch.LongTensor] = None, attention_mask: Optional[torch.FloatTensor] = None, position_ids: Optional[torch.LongTensor] = None, batch_index: Optional[torch.LongTensor] = None, @@ -164,6 +169,7 @@ def forward( attn_outputs, attn_weights = self.attn( hidden_states=hidden_states, layer_past=layer_past, + comp_ctx_lengths=comp_ctx_lengths, attention_mask=attention_mask, position_ids=position_ids, batch_index=batch_index, @@ -191,6 +197,7 @@ def forward( self, input_ids: Optional[torch.LongTensor] = None, past_key_values: Optional[Tuple[Tuple[torch.Tensor]]] = None, + comp_ctx_lengths: Optional[torch.LongTensor] = None, attention_mask: Optional[torch.FloatTensor] = None, token_type_ids: Optional[torch.LongTensor] = None, position_ids: Optional[torch.LongTensor] = None, @@ -270,6 +277,7 @@ def forward( outputs = block( hidden_states=hidden_states, layer_past=past_key_values, + comp_ctx_lengths=comp_ctx_lengths, attention_mask=causal_mask, position_ids=position_ids, batch_index=batch_index, @@ -314,6 +322,7 @@ def forward( self, input_ids: Optional[torch.LongTensor] = None, past_key_values: Optional[Tuple[Tuple[torch.Tensor]]] = None, + comp_ctx_lengths: Optional[torch.LongTensor] = None, attention_mask: Optional[torch.FloatTensor] = None, token_type_ids: Optional[torch.LongTensor] = None, position_ids: Optional[torch.LongTensor] = None, @@ -339,6 +348,7 @@ def forward( transformer_outputs = self.transformer( input_ids, past_key_values=past_key_values, + comp_ctx_lengths=comp_ctx_lengths, attention_mask=attention_mask, token_type_ids=token_type_ids, position_ids=position_ids, diff --git a/QEfficient/transformers/models/granite/modeling_granite.py b/QEfficient/transformers/models/granite/modeling_granite.py index 2a2d47d6d..aa14554b2 100644 --- a/QEfficient/transformers/models/granite/modeling_granite.py +++ b/QEfficient/transformers/models/granite/modeling_granite.py @@ -129,6 +129,7 @@ def forward( position_ids: Optional[torch.LongTensor] = None, attention_mask: Optional[torch.Tensor] = None, past_key_value: Optional[Cache] = None, + comp_ctx_lengths: Optional[torch.LongTensor] = None, batch_index: Optional[torch.LongTensor] = None, cache_position: Optional[torch.LongTensor] = None, **kwargs, @@ -146,7 +147,15 @@ def forward( if past_key_value is not None: # sin and cos are specific to RoPE models; cache_position needed for the static cache - cache_kwargs = {"sin": sin, "cos": cos, "batch_index": batch_index, "position_ids": position_ids} + cache_kwargs = { + "sin": sin, + "cos": cos, + "batch_index": batch_index, + "position_ids": position_ids, + } + if comp_ctx_lengths is not None: + attention_mask = attention_mask[:, :, :, : comp_ctx_lengths.shape[-1]] + cache_kwargs["CCL"] = attention_mask.shape[-1] key_states, value_states = past_key_value.update(key_states, value_states, self.layer_idx, cache_kwargs) attention_interface: Callable = eager_attention_forward @@ -171,6 +180,7 @@ def forward( attention_mask: Optional[torch.Tensor] = None, position_ids: Optional[torch.LongTensor] = None, past_key_values: Optional[Cache] = None, + comp_ctx_lengths: Optional[torch.LongTensor] = None, batch_index: Optional[torch.LongTensor] = None, inputs_embeds: Optional[torch.FloatTensor] = None, use_cache: Optional[bool] = None, @@ -226,6 +236,7 @@ def forward( attention_mask=causal_mask, position_ids=position_ids, past_key_value=past_key_values, + comp_ctx_lengths=comp_ctx_lengths, batch_index=batch_index, output_attentions=output_attentions, use_cache=use_cache, @@ -267,6 +278,7 @@ def forward( attention_mask: Optional[torch.Tensor] = None, position_ids: Optional[torch.LongTensor] = None, past_key_values: Optional[Union[Cache, List[torch.FloatTensor]]] = None, + comp_ctx_lengths: Optional[torch.LongTensor] = None, batch_index: Optional[torch.LongTensor] = None, inputs_embeds: Optional[torch.FloatTensor] = None, labels: Optional[torch.LongTensor] = None, @@ -319,6 +331,7 @@ def forward( attention_mask=attention_mask, position_ids=position_ids, past_key_values=past_key_values, + comp_ctx_lengths=comp_ctx_lengths, batch_index=batch_index, inputs_embeds=inputs_embeds, use_cache=use_cache, diff --git a/QEfficient/transformers/models/granitemoe/modeling_granitemoe.py b/QEfficient/transformers/models/granitemoe/modeling_granitemoe.py index c085f6a5e..b158b4046 100644 --- a/QEfficient/transformers/models/granitemoe/modeling_granitemoe.py +++ b/QEfficient/transformers/models/granitemoe/modeling_granitemoe.py @@ -123,6 +123,7 @@ def forward( position_embeddings: Tuple[torch.Tensor, torch.Tensor] = None, attention_mask: Optional[torch.Tensor] = None, past_key_value: Optional[Cache] = None, + comp_ctx_lengths: Optional[torch.LongTensor] = None, batch_index: Optional[torch.LongTensor] = None, output_attentions: bool = False, use_cache: bool = False, @@ -150,6 +151,9 @@ def forward( "batch_index": batch_index, "position_ids": position_ids, } + if comp_ctx_lengths is not None: + attention_mask = attention_mask[:, :, :, : comp_ctx_lengths.shape[-1]] + cache_kwargs["CCL"] = attention_mask.shape[-1] key_states, value_states = past_key_value.update(key_states, value_states, self.layer_idx, cache_kwargs) attention_interface = eager_attention_forward @@ -209,6 +213,7 @@ def forward( attention_mask: Optional[torch.Tensor] = None, position_ids: Optional[torch.LongTensor] = None, past_key_values: Optional[Cache] = None, + comp_ctx_lengths: Optional[torch.LongTensor] = None, batch_index: Optional[torch.LongTensor] = None, inputs_embeds: Optional[torch.FloatTensor] = None, use_cache: Optional[bool] = None, @@ -286,6 +291,7 @@ def forward( attention_mask=causal_mask, position_ids=position_ids, past_key_value=past_key_values, + comp_ctx_lengths=comp_ctx_lengths, batch_index=batch_index, output_attentions=output_attentions, use_cache=use_cache, @@ -297,6 +303,7 @@ def forward( attention_mask=causal_mask, position_ids=position_ids, past_key_value=past_key_values, + comp_ctx_lengths=comp_ctx_lengths, output_attentions=output_attentions, use_cache=use_cache, cache_position=cache_position, @@ -492,6 +499,7 @@ def forward( attention_mask: Optional[torch.Tensor] = None, position_ids: Optional[torch.LongTensor] = None, past_key_values: Optional[Union[Cache, List[torch.FloatTensor]]] = None, + comp_ctx_lengths: Optional[torch.LongTensor] = None, batch_index: Optional[torch.LongTensor] = None, inputs_embeds: Optional[torch.FloatTensor] = None, labels: Optional[torch.LongTensor] = None, @@ -546,6 +554,7 @@ def forward( attention_mask=attention_mask, position_ids=position_ids, past_key_values=past_key_values, + comp_ctx_lengths=comp_ctx_lengths, batch_index=batch_index, inputs_embeds=inputs_embeds, use_cache=use_cache, diff --git a/QEfficient/transformers/models/grok_1/modeling_grok1.py b/QEfficient/transformers/models/grok_1/modeling_grok1.py index 567a8e070..2d8fc412d 100644 --- a/QEfficient/transformers/models/grok_1/modeling_grok1.py +++ b/QEfficient/transformers/models/grok_1/modeling_grok1.py @@ -55,6 +55,7 @@ def forward( attention_mask: Optional[torch.Tensor] = None, position_ids: Optional[torch.LongTensor] = None, past_key_value: Optional[Tuple[torch.Tensor]] = None, + comp_ctx_lengths: Optional[torch.LongTensor] = None, batch_index: Optional[torch.LongTensor] = None, output_attentions: bool = False, use_cache: bool = False, @@ -94,6 +95,9 @@ def forward( if past_key_value is not None: cache_kwargs = {"batch_index": batch_index, "position_ids": position_ids} + if comp_ctx_lengths is not None: + attention_mask = attention_mask[:, :, :, : comp_ctx_lengths.shape[-1]] + cache_kwargs["CCL"] = attention_mask.shape[-1] key_states, value_states = past_key_value.update(key_states, value_states, layer_idx, cache_kwargs) # repeat k/v heads if n_kv_heads < n_heads @@ -205,6 +209,7 @@ def forward( attention_mask: Optional[torch.Tensor] = None, position_ids: Optional[torch.LongTensor] = None, past_key_value: Optional[Tuple[torch.Tensor]] = None, + comp_ctx_lengths: Optional[torch.LongTensor] = None, batch_index: Optional[torch.LongTensor] = None, output_attentions: Optional[bool] = False, output_router_logits: Optional[bool] = False, @@ -235,6 +240,7 @@ def forward( attention_mask=attention_mask, position_ids=position_ids, past_key_value=past_key_value, + comp_ctx_lengths=comp_ctx_lengths, batch_index=batch_index, output_attentions=output_attentions, use_cache=use_cache, @@ -277,6 +283,7 @@ def forward( attention_mask: Optional[torch.Tensor] = None, position_ids: Optional[torch.LongTensor] = None, past_key_values: Optional[List[torch.FloatTensor]] = None, + comp_ctx_lengths: Optional[torch.LongTensor] = None, batch_index: Optional[torch.LongTensor] = None, inputs_embeds: Optional[torch.FloatTensor] = None, use_cache: Optional[bool] = None, @@ -351,6 +358,7 @@ def forward( attention_mask=attention_mask, position_ids=position_ids, past_key_value=past_key_values, + comp_ctx_lengths=comp_ctx_lengths, batch_index=batch_index, output_attentions=output_attentions, use_cache=use_cache, @@ -395,6 +403,7 @@ def forward( attention_mask: Optional[torch.Tensor] = None, position_ids: Optional[torch.LongTensor] = None, past_key_values: Optional[List[torch.FloatTensor]] = None, + comp_ctx_lengths: Optional[torch.LongTensor] = None, batch_index: Optional[torch.LongTensor] = None, inputs_embeds: Optional[torch.FloatTensor] = None, labels: Optional[torch.LongTensor] = None, @@ -441,6 +450,7 @@ def forward( attention_mask=attention_mask, position_ids=position_ids, past_key_values=past_key_values, + comp_ctx_lengths=comp_ctx_lengths, batch_index=batch_index, inputs_embeds=inputs_embeds, use_cache=use_cache, diff --git a/QEfficient/transformers/models/internvl/modeling_internvl.py b/QEfficient/transformers/models/internvl/modeling_internvl.py index 38d0fe167..96c59325f 100644 --- a/QEfficient/transformers/models/internvl/modeling_internvl.py +++ b/QEfficient/transformers/models/internvl/modeling_internvl.py @@ -5,6 +5,8 @@ # # ----------------------------------------------------------------------------- +from typing import List, Optional + import torch import torch.nn as nn import torch.nn.functional as F @@ -34,7 +36,15 @@ def __init__(self, model): self.config = self.model.language_model.config self.language_model = self.model.language_model - def forward(self, input_ids, vision_embeds, position_ids, image_idx, past_key_values): + def forward( + self, + input_ids, + vision_embeds, + position_ids, + image_idx, + past_key_values, + comp_ctx_lengths: Optional[List[int]] = None, + ): input_embeds = self.model.language_model.get_input_embeddings()(input_ids) B, N, C = input_embeds.shape image_input_embeds = input_embeds.reshape(B * N, C) @@ -55,7 +65,11 @@ def forward(self, input_ids, vision_embeds, position_ids, image_idx, past_key_va inputs_embeds = torch.where(input_ids.shape[1] == torch.tensor(1), input_embeds, image_input_embeds) inputs_embeds = inputs_embeds.reshape(B, N, C) outputs = self.model.language_model( - inputs_embeds=inputs_embeds, position_ids=position_ids, past_key_values=past_key_values, use_cache=True + inputs_embeds=inputs_embeds, + position_ids=position_ids, + past_key_values=past_key_values, + comp_ctx_lengths=comp_ctx_lengths, + use_cache=True, ) image_idx = (indices1.max() + 1).unsqueeze(0).unsqueeze(0) return outputs.logits, vision_embeds, image_idx, outputs.past_key_values @@ -74,6 +88,8 @@ def get_specializations( prefill_seq_len: int, ctx_len: int, img_size: int, + comp_ctx_lengths_prefill: Optional[List[int]] = None, + comp_ctx_lengths_decode: Optional[List[int]] = None, kv_offload: bool = False, **compiler_options, ): @@ -104,24 +120,54 @@ def get_specializations( "batched_num_patches": batch_size * num_patches, } ] - lang = [ - { - "batch_size": batch_size, - "seq_len": prefill_seq_len, - "ctx_len": ctx_len, - "num_patches": num_patches, - "img_size": img_size, - "vision_size": vision_size, - }, - { - "batch_size": batch_size, - "seq_len": "1", - "ctx_len": ctx_len, - "num_patches": num_patches, - "img_size": img_size, - "vision_size": vision_size, - }, - ] + if comp_ctx_lengths_prefill and comp_ctx_lengths_decode: + lang = [] + + for i in range(0, len(comp_ctx_lengths_prefill)): + lang.append( + { + "batch_size": batch_size, + "seq_len": prefill_seq_len, + "ctx_len": ctx_len, + "comp_ctx_lengths": comp_ctx_lengths_prefill[i], + "num_patches": num_patches, + "img_size": img_size, + "vision_size": vision_size, + } + ) + + for i in range(0, len(comp_ctx_lengths_decode)): + lang.append( + { + "batch_size": batch_size, + "seq_len": "1", + "ctx_len": ctx_len, + "comp_ctx_lengths": comp_ctx_lengths_decode[i], + "num_patches": num_patches, + "img_size": img_size, + "vision_size": vision_size, + } + ) + + else: + lang = [ + { + "batch_size": batch_size, + "seq_len": prefill_seq_len, + "ctx_len": ctx_len, + "num_patches": num_patches, + "img_size": img_size, + "vision_size": vision_size, + }, + { + "batch_size": batch_size, + "seq_len": "1", + "ctx_len": ctx_len, + "num_patches": num_patches, + "img_size": img_size, + "vision_size": vision_size, + }, + ] specializations = {} @@ -132,7 +178,7 @@ def get_specializations( else: return lang, compiler_options - def get_onnx_dynamic_axes(self, kv_offload: bool = False): + def get_onnx_dynamic_axes(self, comp_ctx_lengths: Optional[List[int]] = None, kv_offload: bool = False): # Define dynamic axes vision_dynamic_axes = {} lang_dynamic_axes = {} @@ -146,6 +192,9 @@ def get_onnx_dynamic_axes(self, kv_offload: bool = False): for kv in ["key", "value"]: lang_dynamic_axes[f"past_{kv}.{i}"] = pkv_dynamic_axes + if comp_ctx_lengths is not None: + lang_dynamic_axes["comp_ctx_lengths"] = {0: "comp_ctx_lengths"} + dynamic_axes = {} if kv_offload: dynamic_axes["vision"] = vision_dynamic_axes @@ -173,7 +222,7 @@ def get_output_names(self, kv_offload: bool = False): return lang_output_names return output_names - def get_dummy_inputs(self, kv_offload: bool = False): + def get_dummy_inputs(self, comp_ctx_lengths: Optional[List[int]] = None, kv_offload: bool = False): if vis_cfg := getattr(self.config, "vision_config", None): img_size = getattr(vis_cfg, "image_size", constants.INTERN_IMG_SIZE) else: @@ -234,6 +283,9 @@ def get_dummy_inputs(self, kv_offload: bool = False): for kv in ["key", "value"]: lang_inputs["past_key_values"][i].append(torch.zeros(kv_cache_shape, dtype=torch.float32)) + if comp_ctx_lengths is not None: + lang_inputs["comp_ctx_lengths"] = torch.randint(0, 100, (40,), dtype=torch.long) + inputs = {} if kv_offload: inputs["vision"] = vision_inputs @@ -244,7 +296,15 @@ def get_dummy_inputs(self, kv_offload: bool = False): return inputs - def forward(self, input_ids, pixel_values, position_ids, image_idx, past_key_values): + def forward( + self, + input_ids, + pixel_values, + position_ids, + image_idx, + past_key_values, + comp_ctx_lengths: Optional[List[int]] = None, + ): input_embeds = self.language_model.get_input_embeddings()(input_ids) vision_embeds = self.extract_feature(pixel_values) B, N, C = input_embeds.shape @@ -266,7 +326,11 @@ def forward(self, input_ids, pixel_values, position_ids, image_idx, past_key_val inputs_embeds = torch.where(input_ids.shape[1] == torch.tensor(1), input_embeds, image_input_embeds) inputs_embeds = inputs_embeds.reshape(B, N, C) outputs = self.language_model( - inputs_embeds=inputs_embeds, position_ids=position_ids, past_key_values=past_key_values, use_cache=True + inputs_embeds=inputs_embeds, + position_ids=position_ids, + past_key_values=past_key_values, + comp_ctx_lengths=comp_ctx_lengths, + use_cache=True, ) next_image_idx = (indices1.max() + 1).unsqueeze(0).unsqueeze(0) image_idx = torch.where(image_idx < next_image_idx, next_image_idx, image_idx) diff --git a/QEfficient/transformers/models/llama/modeling_llama.py b/QEfficient/transformers/models/llama/modeling_llama.py index f2a68f80e..73b947dba 100644 --- a/QEfficient/transformers/models/llama/modeling_llama.py +++ b/QEfficient/transformers/models/llama/modeling_llama.py @@ -132,6 +132,7 @@ def forward( attention_mask: Optional[torch.Tensor], position_ids: Optional[torch.LongTensor] = None, past_key_value: Optional[Cache] = None, + comp_ctx_lengths: Optional[torch.LongTensor] = None, batch_index: Optional[torch.LongTensor] = None, use_cache: bool = False, cache_position: Optional[torch.LongTensor] = None, @@ -155,6 +156,9 @@ def forward( if past_key_value is not None: cache_kwargs = {"batch_index": batch_index, "position_ids": position_ids} + if comp_ctx_lengths is not None: + attention_mask = attention_mask[:, :, :, : comp_ctx_lengths.shape[-1]] + cache_kwargs["CCL"] = attention_mask.shape[-1] key_states, value_states = past_key_value.update(key_states, value_states, self.layer_idx, cache_kwargs) attention_interface = eager_attention_forward @@ -187,6 +191,7 @@ def forward( attention_mask: Optional[torch.Tensor] = None, position_ids: Optional[torch.LongTensor] = None, past_key_value: Optional[Cache] = None, + comp_ctx_lengths: Optional[torch.LongTensor] = None, batch_index: Optional[torch.LongTensor] = None, use_cache: Optional[bool] = False, cache_position: Optional[torch.LongTensor] = None, @@ -202,6 +207,7 @@ def forward( attention_mask=attention_mask, position_ids=position_ids, past_key_value=past_key_value, + comp_ctx_lengths=comp_ctx_lengths, batch_index=batch_index, use_cache=use_cache, cache_position=cache_position, @@ -229,6 +235,7 @@ def forward( attention_mask: Optional[torch.Tensor] = None, position_ids: Optional[torch.LongTensor] = None, past_key_values: Optional[Cache] = None, + comp_ctx_lengths: Optional[torch.LongTensor] = None, batch_index: Optional[torch.LongTensor] = None, inputs_embeds: Optional[torch.FloatTensor] = None, use_cache: Optional[bool] = None, @@ -277,6 +284,7 @@ def forward( attention_mask=causal_mask, position_ids=position_ids, past_key_value=past_key_values, + comp_ctx_lengths=comp_ctx_lengths, batch_index=batch_index, use_cache=use_cache, cache_position=cache_position, @@ -310,6 +318,7 @@ def forward( attention_mask: Optional[torch.Tensor] = None, position_ids: Optional[torch.LongTensor] = None, past_key_values: Optional[Union[Cache, List[torch.FloatTensor]]] = None, + comp_ctx_lengths: Optional[torch.LongTensor] = None, batch_index: Optional[torch.LongTensor] = None, inputs_embeds: Optional[torch.FloatTensor] = None, use_cache: Optional[bool] = None, @@ -326,6 +335,7 @@ def forward( attention_mask=attention_mask, position_ids=position_ids, past_key_values=past_key_values, + comp_ctx_lengths=comp_ctx_lengths, batch_index=batch_index, inputs_embeds=inputs_embeds, use_cache=use_cache, diff --git a/QEfficient/transformers/models/llama4/modeling_llama4.py b/QEfficient/transformers/models/llama4/modeling_llama4.py index b7b951101..0bcdf8ae0 100644 --- a/QEfficient/transformers/models/llama4/modeling_llama4.py +++ b/QEfficient/transformers/models/llama4/modeling_llama4.py @@ -470,6 +470,7 @@ def forward( position_embeddings: Tuple[torch.Tensor, torch.Tensor], position_ids: Optional[torch.LongTensor] = None, past_key_value: Optional[Cache] = None, + comp_ctx_lengths: Optional[torch.LongTensor] = None, batch_index: Optional[torch.LongTensor] = None, cache_position: Optional[torch.LongTensor] = None, **kwargs, @@ -503,14 +504,20 @@ def forward( if past_key_value is not None: chunk_position_ids = position_ids - if self.use_rope: chunk_position_ids = torch.where( chunk_position_ids != -1, chunk_position_ids % self.config.attention_chunk_size, chunk_position_ids ) # sin and cos are specific to RoPE models; cache_position needed for the static cache - cache_kwargs = {"batch_index": batch_index, "position_ids": chunk_position_ids} + cache_kwargs = { + "batch_index": batch_index, + "position_ids": chunk_position_ids, + } + if comp_ctx_lengths is not None: + attention_mask = attention_mask[:, :, :, : comp_ctx_lengths.shape[-1]] + cache_kwargs["CCL"] = attention_mask.shape[-1] + key_states, value_states = past_key_value.update(key_states, value_states, self.layer_idx, cache_kwargs) attention_interface: Callable = eager_attention_forward @@ -543,6 +550,7 @@ def forward( attention_mask: Optional[torch.Tensor] = None, position_ids: Optional[torch.LongTensor] = None, past_key_value: Optional[Cache] = None, + comp_ctx_lengths: Optional[torch.LongTensor] = None, batch_index: Optional[torch.LongTensor] = None, output_attentions: Optional[bool] = False, output_router_logits: Optional[bool] = False, @@ -562,6 +570,7 @@ def forward( position_embeddings=position_embeddings, position_ids=position_ids, past_key_value=past_key_value, + comp_ctx_lengths=comp_ctx_lengths, batch_index=batch_index, output_attentions=output_attentions, use_cache=use_cache, @@ -615,6 +624,7 @@ def forward( attention_mask: Optional[torch.Tensor] = None, position_ids: Optional[torch.LongTensor] = None, past_key_values: Optional[Cache] = None, + comp_ctx_lengths: Optional[torch.LongTensor] = None, batch_index: Optional[torch.LongTensor] = None, inputs_embeds: Optional[torch.FloatTensor] = None, use_cache: Optional[bool] = None, @@ -682,6 +692,7 @@ def forward( attention_mask=causal_mask_mapping[decoder_layer.attention_type], position_ids=position_ids, past_key_value=past_key_values, + comp_ctx_lengths=comp_ctx_lengths, batch_index=batch_index, output_attentions=output_attentions, use_cache=use_cache, @@ -731,6 +742,7 @@ def forward( attention_mask: Optional[torch.Tensor] = None, position_ids: Optional[torch.LongTensor] = None, past_key_values: Optional[Union[Cache, List[torch.FloatTensor]]] = None, + comp_ctx_lengths: Optional[torch.LongTensor] = None, batch_index: Optional[torch.LongTensor] = None, inputs_embeds: Optional[torch.FloatTensor] = None, labels: Optional[torch.LongTensor] = None, @@ -754,6 +766,7 @@ def forward( attention_mask=attention_mask, position_ids=position_ids, past_key_values=past_key_values, + comp_ctx_lengths=comp_ctx_lengths, batch_index=batch_index, inputs_embeds=inputs_embeds, use_cache=use_cache, @@ -844,6 +857,7 @@ def forward( image_idx, past_key_values, batch_index: Optional[torch.LongTensor] = None, + comp_ctx_lengths: Optional[List[int]] = None, ): inputs_embeds = self.model.language_model.get_input_embeddings()(input_ids) selected = input_ids == self.model.config.image_token_index @@ -857,6 +871,7 @@ def forward( inputs_embeds=inputs_embeds, position_ids=position_ids, past_key_values=past_key_values, + comp_ctx_lengths=comp_ctx_lengths, batch_index=batch_index, use_cache=True, ) @@ -872,7 +887,15 @@ def get_qeff_vision_encoder(self): def get_qeff_language_decoder(self): return QEffLlama4DecoderWrapper(self) - def forward(self, input_ids, position_ids, pixel_values, image_idx, past_key_values): + def forward( + self, + input_ids, + position_ids, + pixel_values, + image_idx, + past_key_values, + comp_ctx_lengths: Optional[List[int]] = None, + ): inputs_embeds = self.language_model.get_input_embeddings()(input_ids) vision_feature_layer = self.config.vision_config.vision_feature_layer vision_feature_select_strategy = self.config.vision_config.vision_feature_select_strategy @@ -892,7 +915,11 @@ def forward(self, input_ids, position_ids, pixel_values, image_idx, past_key_val image_embeds = torch.where(selected.unsqueeze(-1), image_features_expanded, inputs_embeds) inputs_embeds = torch.where(input_ids.shape[1] == torch.tensor(1), inputs_embeds, image_embeds) outputs = self.language_model( - inputs_embeds=inputs_embeds, position_ids=position_ids, past_key_values=past_key_values, use_cache=True + inputs_embeds=inputs_embeds, + position_ids=position_ids, + past_key_values=past_key_values, + comp_ctx_lengths=comp_ctx_lengths, + use_cache=True, ) next_idx = (indices1.max() + 1).unsqueeze(0).unsqueeze(0) image_idx = torch.where(image_idx < next_idx, next_idx, image_idx) @@ -911,6 +938,9 @@ def get_specializations( **compiler_options, ): max_num_tiles = compiler_options.pop("max_num_tiles", None) + comp_ctx_lengths_prefill = compiler_options.pop("comp_ctx_lengths_prefill", None) + comp_ctx_lengths_decode = compiler_options.pop("comp_ctx_lengths_decode", None) + if max_num_tiles is None: logger.warning( "User should pass `max_num_tiles` to compile API to fix the dynamic axes `pixel_values`, you can get more info by calling get_inputs_info function!, Since its not found setting its value to 17" @@ -957,41 +987,87 @@ def get_specializations( } ] - lang_prefill = { - "batch_size": 1 if continuous_batching else batch_size, - "seq_len": prefill_seq_len, - "ctx_len": ctx_len, - "max_num_tiles": max_num_tiles, - "img_size": img_size, - "vision_size": vision_size, - "chunk_length": prefill_seq_len, - "chunk_ctx_len": chunk_ctx_len, - } - if continuous_batching: - lang_prefill["full_batch_size"] = kv_cache_batch_size - else: - lang_prefill["batch_size"] = kv_cache_batch_size - if full_batch_size: - lang_prefill["full_batch_exec_size"] = full_batch_size - - lang_decode = { - "batch_size": full_batch_size if continuous_batching else batch_size, - "seq_len": 1, - "ctx_len": ctx_len, - "max_num_tiles": max_num_tiles, - "img_size": img_size, - "vision_size": vision_size, - "chunk_length": prefill_seq_len, - "chunk_ctx_len": chunk_ctx_len, - } - if continuous_batching: - lang_decode["full_batch_size"] = kv_cache_batch_size + if comp_ctx_lengths_prefill is not None: + lang = [] + + for i in range(0, len(comp_ctx_lengths_prefill)): + lang_prefill = { + "batch_size": 1 if continuous_batching else batch_size, + "seq_len": prefill_seq_len, + "ctx_len": ctx_len, + "comp_ctx_lengths": comp_ctx_lengths_prefill[i], + "max_num_tiles": max_num_tiles, + "img_size": img_size, + "vision_size": vision_size, + "chunk_length": prefill_seq_len, + "chunk_ctx_len": chunk_ctx_len, + } + if continuous_batching: + lang_prefill["full_batch_size"] = kv_cache_batch_size + else: + lang_prefill["batch_size"] = kv_cache_batch_size + if full_batch_size: + lang_prefill["full_batch_exec_size"] = full_batch_size + + lang.append(lang_prefill) + + for i in range(0, len(comp_ctx_lengths_decode)): + lang_decode = { + "batch_size": full_batch_size if continuous_batching else batch_size, + "seq_len": "1", + "ctx_len": ctx_len, + "comp_ctx_lengths": comp_ctx_lengths_decode[i], + "max_num_tiles": max_num_tiles, + "img_size": img_size, + "vision_size": vision_size, + "chunk_length": prefill_seq_len, + "chunk_ctx_len": chunk_ctx_len, + } + + if continuous_batching: + lang_decode["full_batch_size"] = kv_cache_batch_size + else: + lang_decode["batch_size"] = kv_cache_batch_size + + lang.append(lang_decode) + else: - lang_decode["batch_size"] = kv_cache_batch_size + lang_prefill = { + "batch_size": 1 if continuous_batching else batch_size, + "seq_len": prefill_seq_len, + "ctx_len": ctx_len, + "max_num_tiles": max_num_tiles, + "img_size": img_size, + "vision_size": vision_size, + "chunk_length": prefill_seq_len, + "chunk_ctx_len": chunk_ctx_len, + } + if continuous_batching: + lang_prefill["full_batch_size"] = kv_cache_batch_size + else: + lang_prefill["batch_size"] = kv_cache_batch_size + if full_batch_size: + lang_prefill["full_batch_exec_size"] = full_batch_size + + lang_decode = { + "batch_size": full_batch_size if continuous_batching else batch_size, + "seq_len": 1, + "ctx_len": ctx_len, + "max_num_tiles": max_num_tiles, + "img_size": img_size, + "vision_size": vision_size, + "chunk_length": prefill_seq_len, + "chunk_ctx_len": chunk_ctx_len, + } + + if continuous_batching: + lang_decode["full_batch_size"] = kv_cache_batch_size + else: + lang_decode["batch_size"] = kv_cache_batch_size - lang = [] - lang.append(lang_prefill) - lang.append(lang_decode) + lang = [] + lang.append(lang_prefill) + lang.append(lang_decode) specializations = {} @@ -1004,7 +1080,9 @@ def get_specializations( lang[1].pop("vision_size") return lang, compiler_options - def get_onnx_dynamic_axes(self, kv_offload: bool = False, continuous_batching: bool = False): + def get_onnx_dynamic_axes( + self, comp_ctx_lengths: Optional[List[int]] = None, kv_offload: bool = False, continuous_batching: bool = False + ): # Define dynamic axes vision_dynamic_axes = {} lang_dynamic_axes = {} @@ -1026,6 +1104,9 @@ def get_onnx_dynamic_axes(self, kv_offload: bool = False, continuous_batching: b for kv in ["key", "value"]: lang_dynamic_axes[f"past_{kv}.{i}"] = pkv_dynamic_axes + if comp_ctx_lengths is not None: + lang_dynamic_axes["comp_ctx_lengths"] = {0: "comp_ctx_lengths"} + dynamic_axes = {} if kv_offload: dynamic_axes["vision"] = vision_dynamic_axes @@ -1079,7 +1160,9 @@ def get_dummy_pkv_cache(self, config, batch_size, seq_len): past_key_values.append(pkv) return past_key_values - def get_dummy_inputs(self, kv_offload: bool = False, continuous_batching: bool = False): + def get_dummy_inputs( + self, comp_ctx_lengths: Optional[List[int]] = None, kv_offload: bool = False, continuous_batching: bool = False + ): if vis_cfg := getattr(self.config, "vision_config", None): img_size = getattr(vis_cfg, "image_size", 336) else: @@ -1142,6 +1225,10 @@ def get_dummy_inputs(self, kv_offload: bool = False, continuous_batching: bool = if continuous_batching: lang_inputs["batch_index"] = torch.arange(bs).view(bs, 1) + + if comp_ctx_lengths is not None: + lang_inputs["comp_ctx_lengths"] = torch.randint(0, 100, (40,), dtype=torch.long) + inputs = {} if kv_offload: inputs["vision"] = vision_inputs diff --git a/QEfficient/transformers/models/llama_swiftkv/modeling_llama_swiftkv.py b/QEfficient/transformers/models/llama_swiftkv/modeling_llama_swiftkv.py index 9fd1ed782..fa42b3f96 100644 --- a/QEfficient/transformers/models/llama_swiftkv/modeling_llama_swiftkv.py +++ b/QEfficient/transformers/models/llama_swiftkv/modeling_llama_swiftkv.py @@ -89,6 +89,7 @@ def forward( hidden_states: torch.Tensor, position_ids: torch.LongTensor, past_key_value: Optional[Tuple[torch.Tensor, torch.Tensor]] = None, + comp_ctx_lengths: Optional[torch.LongTensor] = None, attention_mask: torch.Tensor = None, batch_index: Optional[torch.LongTensor] = None, ) -> torch.Tensor: @@ -98,6 +99,7 @@ def forward( # Reshape the query, key, and value tensors. query_states = query.view(bsz, q_len, self.num_heads, self.head_dim).transpose(1, 2) + cache_kwargs = {"position_ids": position_ids, "batch_index": batch_index} if past_key_value is not None: if self.layer_idx is None: raise ValueError( @@ -105,8 +107,10 @@ def forward( "for auto-regressive decoding with k/v caching, please make sure to initialize the attention class " "with a layer index." ) + if comp_ctx_lengths is not None: + attention_mask = attention_mask[:, :, :, : comp_ctx_lengths.shape[-1]] + cache_kwargs["CCL"] = attention_mask.shape[-1] kv_seq_len = past_key_value.get_seq_length(self.layer_idx) - cache_kwargs = {"position_ids": position_ids, "batch_index": batch_index} key_states, value_states = past_key_value.read_only(self.layer_idx, cache_kwargs=cache_kwargs) cos, sin = self.rotary_emb(value_states, seq_len=kv_seq_len) @@ -155,6 +159,7 @@ def forward( hidden_states: torch.Tensor, position_ids: torch.Tensor, past_key_values, + comp_ctx_lengths, causal_mask, batch_index: Optional[torch.LongTensor] = None, ) -> Tuple[torch.Tensor, torch.Tensor]: @@ -166,6 +171,7 @@ def forward( hidden_states=hidden_states, position_ids=position_ids, past_key_value=past_key_values, + comp_ctx_lengths=comp_ctx_lengths, attention_mask=causal_mask, batch_index=batch_index, ) @@ -201,11 +207,19 @@ def __init__(self, config: QEffLlamaSwiftKVConfig): self.norm_swiftkv = LlamaRMSNorm(config.hidden_size, eps=config.rms_norm_eps) def _run_swiftkv_layers( - self, hidden_states: torch.Tensor, position_ids: torch.Tensor, past_key_values, causal_mask, batch_index + self, + hidden_states: torch.Tensor, + position_ids: torch.Tensor, + past_key_values, + comp_ctx_lengths, + causal_mask, + batch_index, ) -> torch.Tensor: for layer_idx in range(self.config.num_key_value_layers, self.config.num_hidden_layers): layer = self.layers[layer_idx] - hidden_states = layer(hidden_states, position_ids, past_key_values, causal_mask, batch_index) + hidden_states = layer( + hidden_states, position_ids, past_key_values, comp_ctx_lengths, causal_mask, batch_index + ) hidden_states = self.norm(hidden_states) return hidden_states, past_key_values @@ -289,6 +303,7 @@ def forward( input_ids: Optional[torch.Tensor], position_ids: torch.Tensor, past_key_values: List[torch.Tensor], + comp_ctx_lengths: Optional[torch.LongTensor], batch_index: Optional[torch.LongTensor] = None, ): inputs_embeds = self.embed_tokens(input_ids) @@ -328,6 +343,7 @@ def forward( attention_mask=causal_mask, position_ids=position_ids, past_key_value=past_key_values, + comp_ctx_lengths=comp_ctx_lengths, batch_index=batch_index, output_attentions=False, use_cache=True, @@ -373,7 +389,7 @@ def forward( causal_mask = causal_mask[torch.arange(bsz).reshape(-1, 1), :, last_pos_id, :] hidden_states, next_decoder_cache = self._run_swiftkv_layers( - hidden_states, position_ids, past_key_values, causal_mask, batch_index + hidden_states, position_ids, past_key_values, comp_ctx_lengths, causal_mask, batch_index ) # We can fill the orig_hidden_states with the processed hidden_states here but it's not needed as for next token prediction # we only need the last valid pos_indices hidden_states. @@ -405,9 +421,12 @@ def forward( input_ids: torch.Tensor, position_ids: torch.Tensor, past_key_values: Optional[Union[List[torch.FloatTensor]]] = None, + comp_ctx_lengths: Optional[torch.LongTensor] = None, batch_index: Optional[torch.LongTensor] = None, ): - hidden_states, output_past_key_values = self.model(input_ids, position_ids, past_key_values, batch_index) + hidden_states, output_past_key_values = self.model( + input_ids, position_ids, past_key_values, comp_ctx_lengths, batch_index + ) logits = self.lm_head(hidden_states) return CausalLMOutputWithPast( loss=None, diff --git a/QEfficient/transformers/models/llava/modeling_llava.py b/QEfficient/transformers/models/llava/modeling_llava.py index e260beb05..dc6653db0 100644 --- a/QEfficient/transformers/models/llava/modeling_llava.py +++ b/QEfficient/transformers/models/llava/modeling_llava.py @@ -5,6 +5,8 @@ # # ----------------------------------------------------------------------------- +from typing import List, Optional + import torch import torch.nn as nn import torch.utils.checkpoint @@ -51,7 +53,15 @@ def __init__(self, model): self.language_model = self.model.language_model self.lm_head = self.model.lm_head - def forward(self, input_ids, vision_embeds, position_ids, image_idx, past_key_values): + def forward( + self, + input_ids, + vision_embeds, + position_ids, + image_idx, + past_key_values, + comp_ctx_lengths: Optional[List[int]] = None, + ): inputs_embeds = self.model.get_input_embeddings()(input_ids) vision_embeds = vision_embeds.to(inputs_embeds.device, inputs_embeds.dtype) mask = input_ids == self.model.config.image_token_index @@ -65,6 +75,7 @@ def forward(self, input_ids, vision_embeds, position_ids, image_idx, past_key_va inputs_embeds=inputs_embeds, position_ids=position_ids, past_key_values=past_key_values, + comp_ctx_lengths=comp_ctx_lengths, return_dict=True, ) @@ -83,7 +94,15 @@ def get_qeff_vision_encoder(self): def get_qeff_language_decoder(self): return QEFFLlavaDecoderWrapper(self) - def forward(self, input_ids, position_ids, pixel_values, image_idx, past_key_values): + def forward( + self, + input_ids, + position_ids, + pixel_values, + image_idx, + past_key_values, + comp_ctx_lengths: Optional[List[int]] = None, + ): inputs_embeds = self.get_input_embeddings()(input_ids) # Image features image_outputs = self.vision_tower(pixel_values, output_hidden_states=True) @@ -109,6 +128,7 @@ def forward(self, input_ids, position_ids, pixel_values, image_idx, past_key_val inputs_embeds=inputs_embeds, position_ids=position_ids, past_key_values=past_key_values, + comp_ctx_lengths=comp_ctx_lengths, ) logit_index = position_ids.to(torch.int32).argmax(1, keepdim=True) @@ -120,7 +140,7 @@ def forward(self, input_ids, position_ids, pixel_values, image_idx, past_key_val image_idx = torch.where(image_idx < next_image_idx, next_image_idx, image_idx) return logits, pixel_values, image_idx, outputs.past_key_values - def get_dummy_inputs(self, kv_offload: bool = False, **kwargs): + def get_dummy_inputs(self, comp_ctx_lengths: Optional[List[int]] = None, kv_offload: bool = False, **kwargs): num_layers = self.config.text_config.num_hidden_layers num_key_value_heads = self.config.text_config.num_key_value_heads head_dim = self.config.text_config.hidden_size // self.config.text_config.num_attention_heads @@ -150,6 +170,10 @@ def get_dummy_inputs(self, kv_offload: bool = False, **kwargs): ) ) lang_inputs["position_ids"] = torch.full(lang_inputs["position_ids"].shape, CTX_LEN - 1) + + if comp_ctx_lengths is not None: + lang_inputs["comp_ctx_lengths"] = torch.randint(0, 100, (40,), dtype=torch.long) + inputs = {} if kv_offload: @@ -166,6 +190,8 @@ def get_specializations( prefill_seq_len: int, ctx_len: int, img_size: int, + comp_ctx_lengths_prefill: Optional[List[int]] = None, + comp_ctx_lengths_decode: Optional[List[int]] = None, kv_offload: bool = False, **compiler_options, ): @@ -187,24 +213,55 @@ def get_specializations( "img_size": img_size, } ] - lang = [ - { - "batch_size": batch_size, - "seq_len": prefill_seq_len, - "ctx_len": ctx_len, - "max_num_images": max_num_images, - "img_size": img_size, - "vision_size": vision_size, - }, - { - "batch_size": batch_size, - "seq_len": "1", - "ctx_len": ctx_len, - "max_num_images": max_num_images, - "img_size": img_size, - "vision_size": vision_size, - }, - ] + + if comp_ctx_lengths_prefill and comp_ctx_lengths_decode: + lang = [] + + for i in range(0, len(comp_ctx_lengths_prefill)): + lang.append( + { + "batch_size": batch_size, + "seq_len": prefill_seq_len, + "ctx_len": ctx_len, + "comp_ctx_lengths": comp_ctx_lengths_prefill[i], + "max_num_images": max_num_images, + "img_size": img_size, + "vision_size": vision_size, + } + ) + + for i in range(0, len(comp_ctx_lengths_decode)): + lang.append( + { + "batch_size": batch_size, + "seq_len": "1", + "ctx_len": ctx_len, + "comp_ctx_lengths": comp_ctx_lengths_decode[i], + "max_num_images": max_num_images, + "img_size": img_size, + "vision_size": vision_size, + } + ) + else: + lang = [ + { + "batch_size": batch_size, + "seq_len": prefill_seq_len, + "ctx_len": ctx_len, + "max_num_images": max_num_images, + "img_size": img_size, + "vision_size": vision_size, + }, + { + "batch_size": batch_size, + "seq_len": "1", + "ctx_len": ctx_len, + "max_num_images": max_num_images, + "img_size": img_size, + "vision_size": vision_size, + }, + ] + specializations = {} if kv_offload: @@ -214,7 +271,7 @@ def get_specializations( else: return lang, compiler_options - def get_onnx_dynamic_axes(self, kv_offload: bool = False): + def get_onnx_dynamic_axes(self, comp_ctx_lengths: Optional[List[int]] = None, kv_offload: bool = False): # Define dynamic axes num_layers = self.config.text_config.num_hidden_layers @@ -230,6 +287,9 @@ def get_onnx_dynamic_axes(self, kv_offload: bool = False): lang_dynamic_axes[f"past_key.{i}"] = {0: "batch_size", 2: "ctx_len"} lang_dynamic_axes[f"past_value.{i}"] = {0: "batch_size", 2: "ctx_len"} + if comp_ctx_lengths is not None: + lang_dynamic_axes["comp_ctx_lengths"] = {0: "comp_ctx_lengths"} + dynamic_axes = {} if kv_offload: dynamic_axes["vision"] = vision_dynamic_axes diff --git a/QEfficient/transformers/models/llava_next/modeling_llava_next.py b/QEfficient/transformers/models/llava_next/modeling_llava_next.py index 2fa1d9234..2e4848b6b 100755 --- a/QEfficient/transformers/models/llava_next/modeling_llava_next.py +++ b/QEfficient/transformers/models/llava_next/modeling_llava_next.py @@ -6,6 +6,8 @@ # ----------------------------------------------------------------------------- +from typing import List, Optional + import numpy as np import torch import torch.nn as nn @@ -123,7 +125,15 @@ def __init__(self, model): self.language_model = self.model.language_model self.lm_head = self.model.lm_head - def forward(self, input_ids, vision_embeds, position_ids, image_idx, past_key_values): + def forward( + self, + input_ids, + vision_embeds, + position_ids, + image_idx, + past_key_values, + comp_ctx_lengths: Optional[List[int]] = None, + ): inputs_embeds = self.model.get_input_embeddings()(input_ids) image_features = vision_embeds.to(inputs_embeds.device, inputs_embeds.dtype) mask = input_ids == self.config.image_token_index @@ -138,6 +148,7 @@ def forward(self, input_ids, vision_embeds, position_ids, image_idx, past_key_va inputs_embeds=inputs_embeds, position_ids=position_ids, past_key_values=past_key_values, + comp_ctx_lengths=comp_ctx_lengths, ) image_idx = (indices1.max() + 1).unsqueeze(0).unsqueeze(0) logit_index = position_ids.to(torch.int32).argmax(1, keepdim=True) @@ -154,7 +165,7 @@ def get_qeff_vision_encoder(self): def get_qeff_language_decoder(self): return QEffLlavaNextDecoderWrapper(self) - def get_dummy_inputs(self, kv_offload: bool = False, **kwargs): + def get_dummy_inputs(self, comp_ctx_lengths: Optional[List[int]] = None, kv_offload: bool = False, **kwargs): num_layers = self.config.text_config.num_hidden_layers num_key_value_heads = self.config.text_config.num_key_value_heads head_dim = self.config.text_config.hidden_size // self.config.text_config.num_attention_heads @@ -217,6 +228,10 @@ def get_dummy_inputs(self, kv_offload: bool = False, **kwargs): ) ) lang_inputs["position_ids"] = torch.full(lang_inputs["position_ids"].shape, constants.GRANITEVISION_CTX_LEN - 1) + + if comp_ctx_lengths is not None: + lang_inputs["comp_ctx_lengths"] = torch.randint(0, 100, (40,), dtype=torch.long) + inputs = {} if kv_offload: inputs["vision"] = vision_inputs @@ -232,6 +247,8 @@ def get_specializations( prefill_seq_len: int, ctx_len: int, img_size: int, + comp_ctx_lengths_prefill: Optional[List[int]] = None, + comp_ctx_lengths_decode: Optional[List[int]] = None, kv_offload: bool = False, **compiler_options, ): @@ -285,30 +302,67 @@ def get_specializations( "img_size": img_size, } ] - lang = [ - { - "batch_size": batch_size, - "seq_len": prefill_seq_len, - "ctx_len": ctx_len, - "image_size_height": image_size_height, - "image_size_width": image_size_width, - "num_patches": num_patches, - "max_num_images": max_num_images, - "img_size": img_size, - "vision_size": vision_size, - }, - { - "batch_size": batch_size, - "seq_len": "1", - "ctx_len": ctx_len, - "image_size_height": image_size_height, - "image_size_width": image_size_width, - "num_patches": num_patches, - "max_num_images": max_num_images, - "img_size": img_size, - "vision_size": vision_size, - }, - ] + if comp_ctx_lengths_prefill is not None: + lang = [] + + for i in range(0, len(comp_ctx_lengths_prefill)): + lang.append( + { + "batch_size": batch_size, + "seq_len": prefill_seq_len, + "ctx_len": ctx_len, + "comp_ctx_lengths": comp_ctx_lengths_prefill[i], + "image_size_height": image_size_height, + "image_size_width": image_size_width, + "num_patches": num_patches, + "max_num_images": max_num_images, + "img_size": img_size, + "vision_size": vision_size, + } + ) + + # Remaining elements use comp_ctx_lengths[1:] in a loop + for i in range(0, len(comp_ctx_lengths_decode)): + lang.append( + { + "batch_size": batch_size, + "seq_len": "1", + "ctx_len": ctx_len, + "comp_ctx_lengths": comp_ctx_lengths_decode[i], + "image_size_height": image_size_height, + "image_size_width": image_size_width, + "num_patches": num_patches, + "max_num_images": max_num_images, + "img_size": img_size, + "vision_size": vision_size, + } + ) + else: + lang = [ + { + "batch_size": batch_size, + "seq_len": prefill_seq_len, + "ctx_len": ctx_len, + "image_size_height": image_size_height, + "image_size_width": image_size_width, + "num_patches": num_patches, + "max_num_images": max_num_images, + "img_size": img_size, + "vision_size": vision_size, + }, + { + "batch_size": batch_size, + "seq_len": "1", + "ctx_len": ctx_len, + "image_size_height": image_size_height, + "image_size_width": image_size_width, + "num_patches": num_patches, + "max_num_images": max_num_images, + "img_size": img_size, + "vision_size": vision_size, + }, + ] + specializations = {} if kv_offload: specializations["vision"] = vision @@ -317,7 +371,7 @@ def get_specializations( else: return lang, compiler_options - def get_onnx_dynamic_axes(self, kv_offload: bool = False): + def get_onnx_dynamic_axes(self, comp_ctx_lengths: Optional[List[int]] = None, kv_offload: bool = False): # Define dynamic axes num_layers = self.config.text_config.num_hidden_layers vision_dynamic_axes = { @@ -332,6 +386,10 @@ def get_onnx_dynamic_axes(self, kv_offload: bool = False): for i in range(num_layers): lang_dynamic_axes[f"past_key.{i}"] = {0: "batch_size", 2: "ctx_len"} lang_dynamic_axes[f"past_value.{i}"] = {0: "batch_size", 2: "ctx_len"} + + if comp_ctx_lengths is not None: + lang_dynamic_axes["comp_ctx_lengths"] = {0: "comp_ctx_lengths"} + dynamic_axes = {} if kv_offload: dynamic_axes["vision"] = vision_dynamic_axes diff --git a/QEfficient/transformers/models/mistral/modeling_mistral.py b/QEfficient/transformers/models/mistral/modeling_mistral.py index ca23cc144..5edfb8f3a 100644 --- a/QEfficient/transformers/models/mistral/modeling_mistral.py +++ b/QEfficient/transformers/models/mistral/modeling_mistral.py @@ -140,6 +140,7 @@ def forward( attention_mask: Optional[torch.Tensor], position_ids: Optional[torch.LongTensor] = None, past_key_value: Optional[Cache] = None, + comp_ctx_lengths: Optional[torch.LongTensor] = None, batch_index: Optional[torch.LongTensor] = None, output_attentions: bool = False, use_cache: bool = False, @@ -164,6 +165,9 @@ def forward( if past_key_value is not None: cache_kwargs = {"batch_index": batch_index, "position_ids": position_ids} + if comp_ctx_lengths is not None: + attention_mask = attention_mask[:, :, :, : comp_ctx_lengths.shape[-1]] + cache_kwargs["CCL"] = attention_mask.shape[-1] key_states, value_states = past_key_value.update(key_states, value_states, self.layer_idx, cache_kwargs) attention_interface: Callable = eager_attention_forward @@ -196,6 +200,7 @@ def forward( attention_mask: Optional[torch.Tensor] = None, position_ids: Optional[torch.LongTensor] = None, past_key_value: Optional[Cache] = None, + comp_ctx_lengths: Optional[torch.LongTensor] = None, batch_index: Optional[torch.LongTensor] = None, output_attentions: Optional[bool] = False, use_cache: Optional[bool] = False, @@ -226,6 +231,7 @@ def forward( attention_mask=attention_mask, position_ids=position_ids, past_key_value=past_key_value, + comp_ctx_lengths=comp_ctx_lengths, batch_index=batch_index, output_attentions=output_attentions, use_cache=use_cache, @@ -256,6 +262,7 @@ def forward( attention_mask: Optional[torch.Tensor] = None, position_ids: Optional[torch.LongTensor] = None, past_key_values: Optional[List[torch.FloatTensor]] = None, + comp_ctx_lengths: Optional[torch.LongTensor] = None, batch_index: Optional[torch.LongTensor] = None, inputs_embeds: Optional[torch.FloatTensor] = None, use_cache: Optional[bool] = None, @@ -316,6 +323,7 @@ def forward( attention_mask=causal_mask, position_ids=position_ids, past_key_value=past_key_values, + comp_ctx_lengths=comp_ctx_lengths, batch_index=batch_index, output_attentions=output_attentions, use_cache=use_cache, @@ -354,6 +362,7 @@ def forward( attention_mask: Optional[torch.Tensor] = None, position_ids: Optional[torch.LongTensor] = None, past_key_values: Optional[List[torch.FloatTensor]] = None, + comp_ctx_lengths: Optional[torch.LongTensor] = None, batch_index: Optional[torch.LongTensor] = None, inputs_embeds: Optional[torch.FloatTensor] = None, labels: Optional[torch.LongTensor] = None, @@ -377,6 +386,7 @@ def forward( attention_mask=attention_mask, position_ids=position_ids, past_key_values=past_key_values, + comp_ctx_lengths=comp_ctx_lengths, batch_index=batch_index, inputs_embeds=inputs_embeds, use_cache=use_cache, diff --git a/QEfficient/transformers/models/mistral3/modeling_mistral3.py b/QEfficient/transformers/models/mistral3/modeling_mistral3.py index 735eec9e5..694ed4cde 100644 --- a/QEfficient/transformers/models/mistral3/modeling_mistral3.py +++ b/QEfficient/transformers/models/mistral3/modeling_mistral3.py @@ -5,7 +5,7 @@ # # ----------------------------------------------------------------------------- -from typing import Optional, Tuple, Union +from typing import List, Optional, Tuple, Union import torch import torch.nn as nn @@ -106,6 +106,7 @@ def forward( attention_mask: Optional[torch.Tensor] = None, position_ids: Optional[torch.LongTensor] = None, past_key_values: Optional[Cache] = None, + comp_ctx_lengths: Optional[torch.LongTensor] = None, inputs_embeds: Optional[torch.FloatTensor] = None, vision_feature_layer: Optional[Union[int, list[int]]] = None, use_cache: Optional[bool] = None, @@ -126,6 +127,7 @@ def forward( attention_mask=attention_mask, position_ids=position_ids, past_key_values=past_key_values, + comp_ctx_lengths=comp_ctx_lengths, inputs_embeds=inputs_embeds, use_cache=use_cache, output_attentions=output_attentions, @@ -166,7 +168,15 @@ def __init__(self, model): self.config = self.model.config self.language_model = self.model.language_model - def forward(self, input_ids, vision_embeds, position_ids, image_idx, past_key_values): + def forward( + self, + input_ids, + vision_embeds, + position_ids, + image_idx, + past_key_values, + comp_ctx_lengths: Optional[List[int]] = None, + ): inputs_embeds = self.model.get_input_embeddings()(input_ids) vision_embeds = vision_embeds.to(inputs_embeds.device, inputs_embeds.dtype) mask = input_ids == self.model.config.image_token_index @@ -179,6 +189,7 @@ def forward(self, input_ids, vision_embeds, position_ids, image_idx, past_key_va inputs_embeds=inputs_embeds_1, position_ids=position_ids, past_key_values=past_key_values, + comp_ctx_lengths=comp_ctx_lengths, ) # Cast to int32 to avoid ONNXRT issue @@ -198,7 +209,15 @@ def get_qeff_vision_encoder(self): def get_qeff_language_decoder(self): return QEFFMistral3DecoderWrapper(self) - def forward(self, input_ids, position_ids, pixel_values, image_idx, past_key_values): + def forward( + self, + input_ids, + position_ids, + pixel_values, + image_idx, + past_key_values, + comp_ctx_lengths: Optional[List[int]] = None, + ): inputs_embeds = self.get_input_embeddings()(input_ids) image_sizes = torch.tensor([[pixel_values.shape[2], pixel_values.shape[3]]]).repeat(pixel_values.shape[0], 1) image_features = self.get_image_features( @@ -219,6 +238,7 @@ def forward(self, input_ids, position_ids, pixel_values, image_idx, past_key_val inputs_embeds=inputs_embeds, position_ids=position_ids, past_key_values=past_key_values, + comp_ctx_lengths=comp_ctx_lengths, ) # Cast to int32 to avoid ONNXRT issue logit_idx = position_ids.to(torch.int32).argmax(1, keepdim=True) @@ -230,7 +250,7 @@ def forward(self, input_ids, position_ids, pixel_values, image_idx, past_key_val return logits, pixel_values, image_idx, outputs.past_key_values - def get_dummy_inputs(self, kv_offload: bool = False, **kwargs): + def get_dummy_inputs(self, comp_ctx_lengths: Optional[List[int]] = None, kv_offload: bool = False, **kwargs): inputs_shapes = {} inputs_shapes["input_ids"] = (constants.ONNX_EXPORT_EXAMPLE_BATCH_SIZE, constants.ONNX_EXPORT_EXAMPLE_SEQ_LEN) height = self.config.vision_config.image_size @@ -282,6 +302,9 @@ def get_dummy_inputs(self, kv_offload: bool = False, **kwargs): for kv in ["key", "value"]: lang_inputs["past_key_values"][i].append(torch.zeros(kv_cache_shape, dtype=torch.float32)) + if comp_ctx_lengths is not None: + lang_inputs["comp_ctx_lengths"] = torch.randint(0, 100, (40,), dtype=torch.long) + inputs = {} if kv_offload: inputs["vision"] = vision_inputs @@ -298,6 +321,8 @@ def get_specializations( prefill_seq_len: int, ctx_len: int, img_size: int, + comp_ctx_lengths_prefill: Optional[List[int]] = None, + comp_ctx_lengths_decode: Optional[List[int]] = None, kv_offload: bool = False, **compiler_options, ): @@ -323,22 +348,50 @@ def get_specializations( "vision_size": vision_size, } ] - lang = [ - { - "batch_size": batch_size, - "seq_len": prefill_seq_len, - "ctx_len": ctx_len, - "image_size": img_size, - "vision_size": vision_size, - }, - { - "batch_size": batch_size, - "seq_len": "1", - "ctx_len": ctx_len, - "image_size": img_size, - "vision_size": vision_size, - }, - ] + if comp_ctx_lengths_prefill is not None: + lang = [] + + for i in range(0, len(comp_ctx_lengths_prefill)): + lang.append( + { + "batch_size": batch_size, + "seq_len": prefill_seq_len, + "ctx_len": ctx_len, + "comp_ctx_lengths": comp_ctx_lengths_prefill[i], + "image_size": img_size, + "vision_size": vision_size, + } + ) + + # Remaining elements use comp_ctx_lengths[1:] in a loop + for i in range(0, len(comp_ctx_lengths_decode)): + lang.append( + { + "batch_size": batch_size, + "seq_len": "1", + "ctx_len": ctx_len, + "comp_ctx_lengths": comp_ctx_lengths_decode[i], + "image_size": img_size, + "vision_size": vision_size, + } + ) + else: + lang = [ + { + "batch_size": batch_size, + "seq_len": prefill_seq_len, + "ctx_len": ctx_len, + "image_size": img_size, + "vision_size": vision_size, + }, + { + "batch_size": batch_size, + "seq_len": "1", + "ctx_len": ctx_len, + "image_size": img_size, + "vision_size": vision_size, + }, + ] specializations = {} @@ -351,7 +404,7 @@ def get_specializations( lang[1].pop("vision_size") return lang, compiler_options - def get_onnx_dynamic_axes(self, kv_offload: bool = False): + def get_onnx_dynamic_axes(self, comp_ctx_lengths: Optional[List[int]] = None, kv_offload: bool = False): # Define dynamic axes num_layers = self.config.text_config.num_hidden_layers @@ -368,6 +421,9 @@ def get_onnx_dynamic_axes(self, kv_offload: bool = False): lang_dynamic_axes[f"past_key.{i}"] = {0: "batch_size", 2: "ctx_len"} lang_dynamic_axes[f"past_value.{i}"] = {0: "batch_size", 2: "ctx_len"} + if comp_ctx_lengths is not None: + lang_dynamic_axes["comp_ctx_lengths"] = {0: "comp_ctx_lengths"} + dynamic_axes = {} if kv_offload: dynamic_axes["vision"] = vision_dynamic_axes diff --git a/QEfficient/transformers/models/mixtral_moe/modeling_mixtral.py b/QEfficient/transformers/models/mixtral_moe/modeling_mixtral.py index 9b9e3448a..862714fea 100644 --- a/QEfficient/transformers/models/mixtral_moe/modeling_mixtral.py +++ b/QEfficient/transformers/models/mixtral_moe/modeling_mixtral.py @@ -137,6 +137,7 @@ def forward( attention_mask: Optional[torch.Tensor], position_ids: Optional[torch.LongTensor] = None, past_key_value: Optional[Cache] = None, + comp_ctx_lengths: Optional[torch.LongTensor] = None, batch_index: Optional[torch.LongTensor] = None, **kwargs, ) -> Tuple[torch.Tensor, Optional[torch.Tensor], Optional[Tuple[torch.Tensor]]]: @@ -160,6 +161,9 @@ def forward( if past_key_value is not None: cache_kwargs = {"batch_index": batch_index, "position_ids": position_ids} + if comp_ctx_lengths is not None: + attention_mask = attention_mask[:, :, :, : comp_ctx_lengths.shape[-1]] + cache_kwargs["CCL"] = attention_mask.shape[-1] key_states, value_states = past_key_value.update(key_states, value_states, self.layer_idx, cache_kwargs) attention_interface = eager_attention_forward @@ -245,6 +249,7 @@ def forward( attention_mask: Optional[torch.Tensor] = None, position_ids: Optional[torch.LongTensor] = None, past_key_value: Optional[Tuple[torch.Tensor]] = None, + comp_ctx_lengths: Optional[torch.LongTensor] = None, batch_index: Optional[torch.LongTensor] = None, output_router_logits: Optional[bool] = False, use_cache: Optional[bool] = False, @@ -282,6 +287,7 @@ def forward( attention_mask=attention_mask, position_ids=position_ids, past_key_value=past_key_value, + comp_ctx_lengths=comp_ctx_lengths, batch_index=batch_index, use_cache=use_cache, cache_position=cache_position, @@ -314,6 +320,7 @@ def forward( attention_mask: Optional[torch.Tensor] = None, position_ids: Optional[torch.LongTensor] = None, past_key_values: Optional[List[torch.FloatTensor]] = None, + comp_ctx_lengths: Optional[torch.LongTensor] = None, batch_index: Optional[torch.LongTensor] = None, inputs_embeds: Optional[torch.FloatTensor] = None, use_cache: Optional[bool] = None, @@ -375,6 +382,7 @@ def forward( position_ids=position_ids, batch_index=batch_index, past_key_value=past_key_values, + comp_ctx_lengths=comp_ctx_lengths, output_router_logits=output_router_logits, use_cache=use_cache, cache_position=cache_position, @@ -412,6 +420,7 @@ def forward( attention_mask: Optional[torch.Tensor] = None, position_ids: Optional[torch.LongTensor] = None, past_key_values: Optional[List[torch.FloatTensor]] = None, + comp_ctx_lengths: Optional[torch.LongTensor] = None, batch_index: Optional[torch.LongTensor] = None, inputs_embeds: Optional[torch.FloatTensor] = None, use_cache: Optional[bool] = None, @@ -435,6 +444,7 @@ def forward( attention_mask=attention_mask, position_ids=position_ids, past_key_values=past_key_values, + comp_ctx_lengths=comp_ctx_lengths, batch_index=batch_index, inputs_embeds=inputs_embeds, use_cache=use_cache, diff --git a/QEfficient/transformers/models/mllama/modeling_mllama.py b/QEfficient/transformers/models/mllama/modeling_mllama.py index cb24f1de4..a3cb4273d 100644 --- a/QEfficient/transformers/models/mllama/modeling_mllama.py +++ b/QEfficient/transformers/models/mllama/modeling_mllama.py @@ -177,6 +177,7 @@ def forward( hidden_states: torch.Tensor, cross_attention_states: Optional[torch.Tensor] = None, past_key_value: Optional[Cache] = None, + comp_ctx_lengths: Optional[torch.LongTensor] = None, batch_index: Optional[torch.LongTensor] = None, attention_mask: Optional[torch.Tensor] = None, use_cache: bool = None, @@ -249,6 +250,7 @@ def forward( attention_mask: Optional[torch.Tensor] = None, position_ids: Optional[torch.LongTensor] = None, past_key_value: Optional[Cache] = None, + comp_ctx_lengths: Optional[torch.LongTensor] = None, batch_index: Optional[torch.LongTensor] = None, position_embeddings: torch.Tensor = None, use_cache: bool = False, @@ -282,6 +284,9 @@ def forward( "batch_index": batch_index, "position_ids": position_ids, } + if comp_ctx_lengths is not None: + attention_mask = attention_mask[:, :, :, : comp_ctx_lengths.shape[-1]] + cache_kwargs["CCL"] = attention_mask.shape[-1] key_states, value_states = past_key_value.update(key_states, value_states, self.layer_idx, cache_kwargs) attention_interface = eager_self_attention_forward @@ -316,6 +321,7 @@ def forward( full_text_row_masked_out_mask: Optional[Tuple[torch.Tensor, torch.Tensor]] = None, position_ids: Optional[torch.LongTensor] = None, past_key_value: Optional[Cache] = None, + comp_ctx_lengths: Optional[torch.LongTensor] = None, batch_index: Optional[torch.LongTensor] = None, use_cache: Optional[bool] = False, cache_position: Optional[torch.LongTensor] = None, @@ -350,6 +356,7 @@ def forward( attention_mask=attention_mask, position_ids=position_ids, past_key_value=past_key_value, + comp_ctx_lengths=comp_ctx_lengths, batch_index=batch_index, use_cache=use_cache, cache_position=cache_position, @@ -379,6 +386,7 @@ def forward( cross_attention_states: Optional[torch.Tensor] = None, position_ids: Optional[torch.LongTensor] = None, past_key_value: Optional[Cache] = None, + comp_ctx_lengths: Optional[torch.LongTensor] = None, batch_index: Optional[torch.LongTensor] = None, attention_mask: Optional[torch.Tensor] = None, use_cache: bool = None, @@ -396,13 +404,17 @@ def forward( key_states = key_states.view(bsz, -1, self.num_key_value_heads, self.head_dim).transpose(1, 2) value_states = value_states.view(bsz, -1, self.num_key_value_heads, self.head_dim).transpose(1, 2) if past_key_value is not None: + cache_kwargs = {"batch_index": batch_index, "position_ids": position_ids} + if comp_ctx_lengths is not None: + attention_mask = attention_mask[:, :, :, : comp_ctx_lengths.shape[-1]] + cache_kwargs["CCL"] = attention_mask.shape[-1] # if we have a new image + new tokens, we only computed key_states on that new image # we still update the cross key states, past_image, new_image. And use it! key_states, value_states = past_key_value.update( key_states, value_states, self.layer_idx, - {"batch_index": batch_index, "position_ids": position_ids}, + cache_kwargs, ) elif past_key_value is not None: key_states, value_states = ( @@ -448,6 +460,7 @@ def forward( full_text_row_masked_out_mask: Tuple[torch.Tensor, torch.Tensor], position_ids: Optional[torch.LongTensor] = None, past_key_value: Optional[Cache] = None, + comp_ctx_lengths: Optional[torch.LongTensor] = None, batch_index: Optional[torch.LongTensor] = None, use_cache: Optional[bool] = False, cache_position: Optional[torch.LongTensor] = None, @@ -461,6 +474,7 @@ def forward( attention_mask=cross_attention_mask, cross_attention_states=cross_attention_states, past_key_value=past_key_value, + comp_ctx_lengths=comp_ctx_lengths, batch_index=batch_index, cache_position=cache_position, ) @@ -594,6 +608,7 @@ def forward( attention_mask: Optional[torch.Tensor] = None, position_ids: Optional[torch.LongTensor] = None, past_key_values: Optional[Cache] = None, + comp_ctx_lengths: Optional[torch.LongTensor] = None, batch_index: Optional[torch.LongTensor] = None, cross_attention_states: Optional[torch.FloatTensor] = None, cross_attention_mask: Optional[torch.Tensor] = None, @@ -658,6 +673,7 @@ def forward( full_text_row_masked_out_mask=full_text_row_masked_out_mask, position_ids=position_ids, past_key_value=past_key_values, + comp_ctx_lengths=comp_ctx_lengths, use_cache=use_cache, cache_position=cache_position, ) @@ -688,6 +704,7 @@ def forward( attention_mask: Optional[torch.Tensor] = None, position_ids: Optional[torch.LongTensor] = None, past_key_values: Optional[Union[Cache, List[torch.FloatTensor]]] = None, + comp_ctx_lengths: Optional[torch.LongTensor] = None, batch_index: Optional[torch.LongTensor] = None, cross_attention_states: Optional[torch.LongTensor] = None, cross_attention_mask: Optional[torch.LongTensor] = None, @@ -707,6 +724,7 @@ def forward( cross_attention_mask=cross_attention_mask, full_text_row_masked_out_mask=full_text_row_masked_out_mask, past_key_values=past_key_values, + comp_ctx_lengths=comp_ctx_lengths, inputs_embeds=inputs_embeds, use_cache=use_cache, cache_position=cache_position, @@ -774,6 +792,7 @@ def forward( cross_attention_states: Optional[torch.Tensor] = None, position_ids: Optional[torch.LongTensor] = None, past_key_values: Optional[List[torch.FloatTensor]] = None, + comp_ctx_lengths: Optional[torch.LongTensor] = None, inputs_embeds: Optional[torch.FloatTensor] = None, use_cache: Optional[bool] = None, cache_position: Optional[torch.LongTensor] = None, @@ -820,6 +839,7 @@ def forward( cross_attention_mask=cross_attention_mask, full_text_row_masked_out_mask=full_text_row_masked_out_mask, past_key_values=past_key_values, + comp_ctx_lengths=comp_ctx_lengths, use_cache=use_cache, inputs_embeds=inputs_embeds, cache_position=cache_position, @@ -853,6 +873,7 @@ def forward( cross_attention_states: Optional[torch.Tensor] = None, position_ids: Optional[torch.LongTensor] = None, past_key_values: Optional[List[torch.FloatTensor]] = None, + comp_ctx_lengths: Optional[torch.LongTensor] = None, batch_index: Optional[torch.LongTensor] = None, inputs_embeds: Optional[torch.FloatTensor] = None, labels: Optional[torch.LongTensor] = None, @@ -869,6 +890,7 @@ def forward( attention_mask=attention_mask, position_ids=position_ids, past_key_values=past_key_values, + comp_ctx_lengths=comp_ctx_lengths, inputs_embeds=inputs_embeds, use_cache=use_cache, cache_position=cache_position, @@ -879,7 +901,7 @@ def forward( logits = self.lm_head(hidden_states).float() return logits, image_idx, outputs.past_key_values, pixel_values - def get_dummy_inputs(self, kv_offload: bool = False): + def get_dummy_inputs(self, comp_ctx_lengths: Optional[List[int]] = None, kv_offload: bool = False): BS = constants.ONNX_EXPORT_EXAMPLE_BATCH_SIZE SEQ_LEN = constants.ONNX_EXPORT_EXAMPLE_SEQ_LEN CTX_LEN = constants.ONNX_EXPORT_CTX_LEN @@ -943,6 +965,10 @@ def get_dummy_inputs(self, kv_offload: bool = False): lang_inputs["past_key_values"] = lang_inputs["past_key_values"].to_legacy_cache() lang_inputs["position_ids"] = torch.full(lang_inputs["position_ids"].shape, CTX_LEN - 1) + + if comp_ctx_lengths is not None: + lang_inputs["comp_ctx_lengths"] = torch.randint(0, 100, (40,), dtype=torch.long) + inputs = {} if kv_offload: @@ -959,6 +985,8 @@ def get_specializations( prefill_seq_len: int, ctx_len: int, img_size: int, + comp_ctx_lengths_prefill: Optional[List[int]] = None, + comp_ctx_lengths_decode: Optional[List[int]] = None, kv_offload: bool = False, **compiler_options, ): @@ -973,22 +1001,53 @@ def get_specializations( logger.warning("Setting `img_size=448` as it was neither passed nor found in vision_config") vision = [{"batch_size": batch_size, "max_num_images": max_num_images, "img_size": img_size}] - lang = [ - { - "batch_size": batch_size, - "seq_len": prefill_seq_len, - "ctx_len": ctx_len, - "max_num_images": max_num_images, - "img_size": img_size, - }, - { - "batch_size": batch_size, - "seq_len": "1", - "ctx_len": ctx_len, - "max_num_images": max_num_images, - "img_size": img_size, - }, - ] + + if comp_ctx_lengths_prefill is not None: + lang = [] + + for i in range(0, len(comp_ctx_lengths_prefill)): + lang.append( + { + "batch_size": batch_size, + "seq_len": prefill_seq_len, + "ctx_len": ctx_len, + "comp_ctx_lengths": comp_ctx_lengths_prefill[i], + "max_num_images": max_num_images, + "img_size": img_size, + } + ) + + # Remaining elements use comp_ctx_lengths[1:] in a loop + for i in range(0, len(comp_ctx_lengths_decode)): + lang.append( + { + "batch_size": batch_size, + "seq_len": "1", + "ctx_len": ctx_len, + "comp_ctx_lengths": comp_ctx_lengths_decode[i], + "max_num_images": max_num_images, + "img_size": img_size, + } + ) + + else: + lang = [ + { + "batch_size": batch_size, + "seq_len": prefill_seq_len, + "ctx_len": ctx_len, + "max_num_images": max_num_images, + "img_size": img_size, + }, + { + "batch_size": batch_size, + "seq_len": "1", + "ctx_len": ctx_len, + "max_num_images": max_num_images, + "img_size": img_size, + }, + ] + specializations = {} if kv_offload: @@ -998,7 +1057,7 @@ def get_specializations( else: return lang, compiler_options - def get_onnx_dynamic_axes(self, kv_offload: bool = False): + def get_onnx_dynamic_axes(self, comp_ctx_lengths: Optional[List[int]] = None, kv_offload: bool = False): txt_cfg = self.config.get_text_config() num_hidden_layers = txt_cfg.num_hidden_layers cross_attention_layers = txt_cfg.cross_attention_layers @@ -1023,6 +1082,9 @@ def get_onnx_dynamic_axes(self, kv_offload: bool = False): lang_dynamic_axes[f"past_key.{i}"] = {0: "batch_size", 2: "ctx_len"} lang_dynamic_axes[f"past_value.{i}"] = {0: "batch_size", 2: "ctx_len"} + if comp_ctx_lengths is not None: + lang_dynamic_axes["comp_ctx_lengths"] = {0: "comp_ctx_lengths"} + dynamic_axes = {} if kv_offload: dynamic_axes["vision"] = vision_dynamic_axes diff --git a/QEfficient/transformers/models/modeling_auto.py b/QEfficient/transformers/models/modeling_auto.py index 60f60c768..5f1ec51e6 100644 --- a/QEfficient/transformers/models/modeling_auto.py +++ b/QEfficient/transformers/models/modeling_auto.py @@ -59,6 +59,7 @@ constants, get_padding_shape_from_config, ) +from QEfficient.utils.check_ccl_specializations import process_ccl_specializations from QEfficient.utils.logging_utils import logger @@ -860,6 +861,7 @@ def __init__( self, model: nn.Module, continuous_batching: bool = False, + qaic_config: Optional[dict] = None, **kwargs, ): """ @@ -881,6 +883,9 @@ def __init__( raise NotImplementedError("Continuous batching is not supported for image-text-to-text models yet.") self.model = model self.config = model.config + + self.comp_ctx_lengths_prefill, self.comp_ctx_lengths_decode = process_ccl_specializations(qaic_config) + self.vision_model = QEffVisionEncoderForTextImageToTextModel(model, **kwargs) self.lang_model = QEffCausalLMForTextImageToTextModel(model, **kwargs) self.continuous_batching = continuous_batching @@ -902,7 +907,7 @@ def model_name(self) -> str: return mname @classmethod - def from_pretrained(cls, pretrained_model_name_or_path: str, **kwargs): + def from_pretrained(cls, pretrained_model_name_or_path: str, qaic_config: Optional[dict] = None, **kwargs): """ Load a QEfficient multimodal model for dual QPC from a pretrained HuggingFace model or local path. @@ -928,7 +933,12 @@ def from_pretrained(cls, pretrained_model_name_or_path: str, **kwargs): kwargs.update({"attn_implementation": "eager", "low_cpu_mem_usage": False}) model = cls._hf_auto_class.from_pretrained(pretrained_model_name_or_path, **kwargs) - return cls(model, pretrained_model_name_or_path=pretrained_model_name_or_path, **kwargs) + return cls( + model, + pretrained_model_name_or_path=pretrained_model_name_or_path, + qaic_config=qaic_config, + **kwargs, + ) @property def onnx_path(self): @@ -985,13 +995,21 @@ def export( """ # TODO This is a temporary change as continous batching is enabled only for few models. Once support is added for all the models this exception handing can be removed. try: - inputs = self.model.get_dummy_inputs(kv_offload=True, continuous_batching=self.continuous_batching) + inputs = self.model.get_dummy_inputs( + kv_offload=True, + continuous_batching=self.continuous_batching, + comp_ctx_lengths=self.comp_ctx_lengths_decode, + ) dynamic_axes = self.model.get_onnx_dynamic_axes( - kv_offload=True, continuous_batching=self.continuous_batching + kv_offload=True, + continuous_batching=self.continuous_batching, + comp_ctx_lengths=self.comp_ctx_lengths_decode, ) except TypeError: - inputs = self.model.get_dummy_inputs(kv_offload=True) - dynamic_axes = self.model.get_onnx_dynamic_axes(kv_offload=True) + inputs = self.model.get_dummy_inputs(kv_offload=True, comp_ctx_lengths=self.comp_ctx_lengths_decode) + dynamic_axes = self.model.get_onnx_dynamic_axes( + kv_offload=True, comp_ctx_lengths=self.comp_ctx_lengths_decode + ) output_names = self.model.get_output_names(kv_offload=True) self.vision_model.export( @@ -1096,10 +1114,17 @@ def compile( output_names = self.model.get_output_names(kv_offload=True) + # For supporting VLLM and Disaggregated with CCL + if "comp_ctx_lengths_prefill" in compiler_options: + self.comp_ctx_lengths_prefill = compiler_options.pop("comp_ctx_lengths_prefill") + self.comp_ctx_lengths_decode = compiler_options.pop("comp_ctx_lengths_decode") + specializations, compiler_options = self.model.get_specializations( batch_size=batch_size, prefill_seq_len=prefill_seq_len, ctx_len=ctx_len, + comp_ctx_lengths_prefill=self.comp_ctx_lengths_prefill, + comp_ctx_lengths_decode=self.comp_ctx_lengths_decode, img_size=img_size, kv_offload=True, continuous_batching=self.continuous_batching, @@ -1246,6 +1271,8 @@ def generate( device_id=device_ids, # if device_ids is not None else [0], ctx_len=ctx_len_comp, full_batch_size=fbs, + comp_ctx_lengths_prefill=self.comp_ctx_lengths_prefill, + comp_ctx_lengths_decode=self.comp_ctx_lengths_decode, ) # Call generate method @@ -1393,11 +1420,23 @@ def kv_offload_generate( lang_session.set_buffers(vision_outputs) + if self.comp_ctx_lengths_prefill is not None: + list_of_comp_ctx_lengths_prefill = [np.zeros(length) for length in self.comp_ctx_lengths_prefill] + prefill_ccl_id = 0 + lang_inputs["comp_ctx_lengths"] = list_of_comp_ctx_lengths_prefill[prefill_ccl_id] + lang_start = perf_counter() # Run prefill chunk_inputs = lang_inputs.copy() for i in range(num_chunks): + if ( + self.comp_ctx_lengths_prefill is not None + and (i + 1) * prefill_seq_len > self.comp_ctx_lengths_prefill[prefill_ccl_id] + ): + prefill_ccl_id = min(prefill_ccl_id + 1, len(self.comp_ctx_lengths_prefill) - 1) + chunk_inputs["comp_ctx_lengths"] = list_of_comp_ctx_lengths_prefill[prefill_ccl_id] + chunk_inputs["input_ids"] = lang_inputs["input_ids"][:, i * prefill_seq_len : (i + 1) * prefill_seq_len] chunk_inputs["position_ids"] = lang_inputs["position_ids"][ ..., i * prefill_seq_len : (i + 1) * prefill_seq_len @@ -1429,8 +1468,25 @@ def kv_offload_generate( streamer.put(lang_inputs["input_ids"][0]) # Decode loop + if self.comp_ctx_lengths_decode is not None: + max_ccl_id = len(self.comp_ctx_lengths_decode) - 1 + list_of_comp_ctx_lengths_decode = [np.zeros(length) for length in self.comp_ctx_lengths_decode] + max_position_id = np.max(lang_inputs["position_ids"]) + ccl_id_initial = 0 + ccl_id = ccl_id_initial + for i in range(ccl_id_initial, len(self.comp_ctx_lengths_decode)): + if max_position_id < self.comp_ctx_lengths_decode[i]: + ccl_id = i + break + lang_inputs["comp_ctx_lengths"] = list_of_comp_ctx_lengths_decode[ccl_id] + decode_start = perf_counter() for num_token in range(1, generation_len): + if self.comp_ctx_lengths_decode is not None: + if max_position_id >= self.comp_ctx_lengths_decode[ccl_id] - 1: + ccl_id = min(ccl_id + 1, max_ccl_id) + lang_inputs["comp_ctx_lengths"] = list_of_comp_ctx_lengths_decode[ccl_id] + outputs = lang_session.run(lang_inputs) # Prepare inputs for next iteration @@ -1480,6 +1536,7 @@ class _QEFFAutoModelForImageTextToTextSingleQPC(QEFFTransformersBase, Multimodal def __init__( self, model: nn.Module, + qaic_config: Optional[dict] = None, **kwargs, ): """ @@ -1501,6 +1558,8 @@ def __init__( raise NotImplementedError("Continuous batching is not supported for image-text-to-text models yet.") super().__init__(model, **kwargs) + self.comp_ctx_lengths_prefill, self.comp_ctx_lengths_decode = process_ccl_specializations(qaic_config) + # to handle internvl models if hasattr(self.model.config, "llm_config") and hasattr(self.model.config, "vision_config"): self.model.config.llm_config.use_cache = True @@ -1517,6 +1576,7 @@ def __init__( def from_pretrained( cls, pretrained_model_name_or_path, + qaic_config: Optional[dict] = None, *args, **kwargs, ): @@ -1554,7 +1614,12 @@ def from_pretrained( config.vision_config.use_flash_attn = "false" model = cls._hf_auto_class.from_pretrained(pretrained_model_name_or_path, config, *args, **kwargs) - return cls(model, pretrained_model_name_or_path=pretrained_model_name_or_path, **kwargs) + return cls( + model, + pretrained_model_name_or_path=pretrained_model_name_or_path, + qaic_config=qaic_config, + **kwargs, + ) def export( self, @@ -1576,8 +1641,8 @@ def export( str Path to the generated ONNX graph file. """ - inputs = self.model.get_dummy_inputs() - dynamic_axes = self.model.get_onnx_dynamic_axes() + inputs = self.model.get_dummy_inputs(comp_ctx_lengths=self.comp_ctx_lengths_decode) + dynamic_axes = self.model.get_onnx_dynamic_axes(comp_ctx_lengths=self.comp_ctx_lengths_decode) output_names = self.model.get_output_names() return self._export(inputs, output_names, dynamic_axes, export_dir=export_dir) @@ -1651,14 +1716,24 @@ def compile( f"full_batch_size={full_batch_size}, kv_cache_batch_size={kv_cache_batch_size}, num_speculative_tokens={num_speculative_tokens}, " ) + # Infer kv_cache_batch_size if not provided + kv_cache_batch_size = kv_cache_batch_size or full_batch_size or batch_size output_names = self.model.get_output_names() + # For supporting VLLM and Disaggregated with CCL + if "comp_ctx_lengths_prefill" in compiler_options: + self.comp_ctx_lengths_prefill = compiler_options.pop("comp_ctx_lengths_prefill") + self.comp_ctx_lengths_decode = compiler_options.pop("comp_ctx_lengths_decode") + # Get specializations from modelling file # TODO: expose this via the auto class as well specializations, compiler_options = self.model.get_specializations( batch_size=batch_size, prefill_seq_len=prefill_seq_len, ctx_len=ctx_len, + comp_ctx_lengths_prefill=self.comp_ctx_lengths_prefill, + comp_ctx_lengths_decode=self.comp_ctx_lengths_decode, + kv_cache_batch_size=kv_cache_batch_size, img_size=img_size, **compiler_options, ) @@ -1677,6 +1752,11 @@ def compile( if output_name.endswith("_RetainedState"): custom_io[output_name] = "float16" if "pixel_values" in output_name else kv_cache_dtype + # TODO this hould be removed once the continous batching is supported for all the models. + compiler_options.pop("continuous_batching", None) + compiler_options.pop("kv_cache_batch_size", None) + compiler_options.pop("full_batch_size", None) + self._compile( onnx_path=onnx_path, compile_dir=compile_dir, @@ -1843,12 +1923,24 @@ def cloud_ai_100_generate( inputs["position_ids"] = np.where(inputs.pop("attention_mask"), np.arange(padded_len), -1) inputs["image_idx"] = np.array([[0]]) + if self.comp_ctx_lengths_prefill is not None: + list_of_comp_ctx_lengths_prefill = [np.zeros(length) for length in self.comp_ctx_lengths_prefill] + prefill_ccl_id = 0 + inputs["comp_ctx_lengths"] = list_of_comp_ctx_lengths_prefill[prefill_ccl_id] + qpc_session.activate() chunk_inputs = inputs.copy() prefill_start = perf_counter() # Run prefill for i in range(num_chunks): + if ( + self.comp_ctx_lengths_prefill is not None + and (i + 1) * prefill_seq_len > self.comp_ctx_lengths_prefill[prefill_ccl_id] + ): + prefill_ccl_id = min(prefill_ccl_id + 1, len(self.comp_ctx_lengths_prefill) - 1) + chunk_inputs["comp_ctx_lengths"] = list_of_comp_ctx_lengths_prefill[prefill_ccl_id] + chunk_inputs["input_ids"] = inputs["input_ids"][:, i * prefill_seq_len : (i + 1) * prefill_seq_len] chunk_inputs["position_ids"] = inputs["position_ids"][:, i * prefill_seq_len : (i + 1) * prefill_seq_len] outputs = qpc_session.run(chunk_inputs) @@ -1872,8 +1964,25 @@ def cloud_ai_100_generate( inputs.pop("pixel_values") # Decode loop + if self.comp_ctx_lengths_decode is not None: + list_of_comp_ctx_lengths_decode = [np.zeros(length) for length in self.comp_ctx_lengths_decode] + max_ccl_id = len(self.comp_ctx_lengths_decode) - 1 + max_position_id = np.max(inputs["position_ids"]) + ccl_id_initial = 0 + ccl_id = ccl_id_initial + for i in range(ccl_id_initial, len(self.comp_ctx_lengths_decode)): + if max_position_id < self.comp_ctx_lengths_decode[i]: + ccl_id = i + break + inputs["comp_ctx_lengths"] = list_of_comp_ctx_lengths_decode[ccl_id] + decode_start = perf_counter() for num_token in range(1, generation_len): + if self.comp_ctx_lengths_decode is not None: + if max_position_id >= self.comp_ctx_lengths_decode[ccl_id] - 1: + ccl_id = min(ccl_id + 1, max_ccl_id) + inputs["comp_ctx_lengths"] = list_of_comp_ctx_lengths_decode[ccl_id] + outputs = qpc_session.run(inputs) # Prepare inputs for next iteration inputs["input_ids"] = outputs["logits"].argmax(2) @@ -1991,7 +2100,14 @@ class QEFFAutoModelForImageTextToText: _hf_auto_class = AutoModelForImageTextToText - def __new__(self, model: nn.Module, kv_offload: Optional[bool] = True, continuous_batching: bool = False, **kwargs): + def __new__( + self, + model: nn.Module, + kv_offload: Optional[bool] = True, + continuous_batching: bool = False, + qaic_config: Optional[dict] = None, + **kwargs, + ): """ Instantiate the appropriate internal class for single or dual QPC mode. @@ -2012,9 +2128,11 @@ def __new__(self, model: nn.Module, kv_offload: Optional[bool] = True, continuou The wrapped model instance, configured for either dual or single QPC. """ if kv_offload: - return _QEffAutoModelForImageTextToTextDualQPC(model, continuous_batching, **kwargs) + return _QEffAutoModelForImageTextToTextDualQPC( + model, continuous_batching, qaic_config=qaic_config, **kwargs + ) else: - return _QEFFAutoModelForImageTextToTextSingleQPC(model, **kwargs) + return _QEFFAutoModelForImageTextToTextSingleQPC(model, qaic_config=qaic_config, **kwargs) @classmethod @with_replaced_quantizers @@ -2023,6 +2141,7 @@ def from_pretrained( pretrained_model_name_or_path: str, kv_offload: Optional[bool] = None, continuous_batching: bool = False, + qaic_config: Optional[dict] = None, **kwargs, ): """ @@ -2063,12 +2182,14 @@ def from_pretrained( logger.warning("Updating low_cpu_mem_usage=False") kwargs.update({"attn_implementation": "eager", "low_cpu_mem_usage": False}) + model = cls._hf_auto_class.from_pretrained(pretrained_model_name_or_path, **kwargs) return cls( model, kv_offload=kv_offload, continuous_batching=continuous_batching, pretrained_model_name_or_path=pretrained_model_name_or_path, + qaic_config=qaic_config, **kwargs, ) @@ -2163,6 +2284,9 @@ def __init__( ) # Set use_cache=True to get KV values as output during ONNX export model.config.use_cache = True + + self.comp_ctx_lengths_prefill, self.comp_ctx_lengths_decode = process_ccl_specializations(qaic_config) + super().__init__(model, qaic_config=qaic_config, **kwargs) self.num_layers = model.config.num_hidden_layers self.continuous_batching = continuous_batching @@ -2273,7 +2397,11 @@ def from_pretrained( if model.__class__.__name__ in MISCLASSIFIED_CAUSAL_LM_TO_QEFF_AUTO_CLASS_MAP: return MISCLASSIFIED_CAUSAL_LM_TO_QEFF_AUTO_CLASS_MAP[model.__class__.__name__]( - model, kv_offload=kv_offload, pretrained_model_name_or_path=pretrained_model_name_or_path, **kwargs + model, + kv_offload=kv_offload, + pretrained_model_name_or_path=pretrained_model_name_or_path, + qaic_config=qaic_config, + **kwargs, ) return cls( model, @@ -2329,6 +2457,10 @@ def export(self, export_dir: Optional[str] = None) -> str: "input_ids": {0: "batch_size", 1: "seq_len"}, "position_ids": {0: "batch_size", 1: "seq_len"}, } + if self.comp_ctx_lengths_prefill is not None: + example_inputs["comp_ctx_lengths"] = torch.randint(0, 512, (512,), dtype=torch.long) + dynamic_axes["comp_ctx_lengths"] = {0: "comp_ctx_lengths"} + if len(kv_cache_shape) == 3: # For GPTBigCode arch the pkv is 3d pkv_dynamic_axes = { 0: "full_batch_size" if self.continuous_batching else "batch_size", @@ -2485,6 +2617,7 @@ def build_prefill_specialization( self, prefill_seq_len: int = 32, ctx_len: int = 128, + comp_ctx_lengths: Optional[int] = None, batch_size: int = 1, kv_cache_batch_size: Optional[int] = None, full_batch_size: Optional[int] = None, @@ -2522,6 +2655,8 @@ def build_prefill_specialization( "seq_len": prefill_seq_len, "ctx_len": ctx_len, } + if comp_ctx_lengths is not None: + spec["comp_ctx_lengths"] = comp_ctx_lengths spec["num_logits_to_keep"] = 1 if self.is_tlm else None if self.continuous_batching: spec["full_batch_size"] = kv_cache_batch_size @@ -2535,6 +2670,7 @@ def build_decode_specialization( self, prefill_seq_len: int = 32, ctx_len: int = 128, + comp_ctx_lengths: Optional[int] = None, batch_size: int = 1, kv_cache_batch_size: Optional[int] = None, full_batch_size: Optional[int] = None, @@ -2579,6 +2715,9 @@ def build_decode_specialization( "seq_len": (num_speculative_tokens + 1) if self.is_tlm else 1, "ctx_len": ctx_len, } + if comp_ctx_lengths is not None: + spec["comp_ctx_lengths"] = comp_ctx_lengths + spec["num_logits_to_keep"] = (num_speculative_tokens + 1) if self.is_tlm else None if self.continuous_batching: @@ -2681,6 +2820,25 @@ def compile( If `prefill_seq_len` is less than `num_speculative_tokens + 1` for TLM models. """ + + # For supporting VLLM and Disaggregated with CCL + if "comp_ctx_lengths_prefill" in compiler_options and "comp_ctx_lengths_decode" in compiler_options: + comp_ctx_lengths_prefill = compiler_options.pop("comp_ctx_lengths_prefill") + comp_ctx_lengths_decode = compiler_options.pop("comp_ctx_lengths_decode") + if isinstance(comp_ctx_lengths_prefill, str): + import ast + + try: + # Safely evaluate the string to a Python list for disaggregated input + self.comp_ctx_lengths_prefill = ast.literal_eval(comp_ctx_lengths_prefill) + self.comp_ctx_lengths_decode = ast.literal_eval(comp_ctx_lengths_decode) + + except (ValueError, SyntaxError): + raise ValueError("Invalid format for comp_ctx_lengths. Expected a list-like string.") + else: + self.comp_ctx_lengths_prefill = comp_ctx_lengths_prefill + self.comp_ctx_lengths_decode = comp_ctx_lengths_decode + # --- Validation --- if prefill_only is not None and not isinstance(prefill_only, bool): raise TypeError("`prefill_only` must be a boolean.") @@ -2711,26 +2869,58 @@ def compile( # --- Specializations --- specializations = [] if prefill_only is None or prefill_only or prefill_seq_len == 1: - specializations.append( - self.build_prefill_specialization( + if self.comp_ctx_lengths_prefill is not None: + # Adding elements from self.comp_ctx_lengths_prefill to prefill_specialization + for i in range(0, len(self.comp_ctx_lengths_prefill)): + specializations.append( + self.build_prefill_specialization( + prefill_seq_len=prefill_seq_len, + ctx_len=ctx_len, + comp_ctx_lengths=self.comp_ctx_lengths_prefill[i], + batch_size=batch_size, + kv_cache_batch_size=kv_cache_batch_size, + full_batch_size=full_batch_size, + ) + ) + + else: + specializations.append( + self.build_prefill_specialization( + prefill_seq_len=prefill_seq_len, + ctx_len=ctx_len, + batch_size=batch_size, + kv_cache_batch_size=kv_cache_batch_size, + full_batch_size=full_batch_size, + ) + ) + + if prefill_only is None or not prefill_only: + if self.comp_ctx_lengths_decode is not None: + # Adding elements from self.comp_ctx_lengths_decode to decode_specialization + for i in range(0, len(self.comp_ctx_lengths_decode)): + decode_spec = self.build_decode_specialization( + prefill_seq_len=prefill_seq_len, + ctx_len=ctx_len, + comp_ctx_lengths=self.comp_ctx_lengths_decode[i], + batch_size=batch_size, + kv_cache_batch_size=kv_cache_batch_size, + full_batch_size=full_batch_size, + num_speculative_tokens=num_speculative_tokens, + ) + if decode_spec: + specializations.append(decode_spec) + + else: + decode_spec = self.build_decode_specialization( prefill_seq_len=prefill_seq_len, ctx_len=ctx_len, batch_size=batch_size, kv_cache_batch_size=kv_cache_batch_size, full_batch_size=full_batch_size, + num_speculative_tokens=num_speculative_tokens, ) - ) - if prefill_only is None or not prefill_only: - decode_spec = self.build_decode_specialization( - prefill_seq_len=prefill_seq_len, - ctx_len=ctx_len, - batch_size=batch_size, - kv_cache_batch_size=kv_cache_batch_size, - full_batch_size=full_batch_size, - num_speculative_tokens=num_speculative_tokens, - ) - if decode_spec: - specializations.append(decode_spec) + if decode_spec: + specializations.append(decode_spec) # --- Compilation --- kv_cache_dtype = "mxint8" if mxint8_kv_cache else "float16" @@ -2808,6 +2998,8 @@ def generate( tokenizer=tokenizer, qpc_path=self.qpc_path, prompt=prompts, + comp_ctx_lengths_prefill=self.comp_ctx_lengths_prefill, + comp_ctx_lengths_decode=self.comp_ctx_lengths_decode, device_id=device_id, generation_len=generation_len, automation=kwargs.pop("automation", False), diff --git a/QEfficient/transformers/models/molmo/modeling_molmo.py b/QEfficient/transformers/models/molmo/modeling_molmo.py index 4f92316ca..c088158c4 100644 --- a/QEfficient/transformers/models/molmo/modeling_molmo.py +++ b/QEfficient/transformers/models/molmo/modeling_molmo.py @@ -243,6 +243,7 @@ def attention( attention_bias: Optional[torch.Tensor] = None, position_ids: Optional[torch.Tensor] = None, layer_past: Optional[Cache] = None, + comp_ctx_lengths: Optional[torch.LongTensor] = None, batch_index: Optional[torch.LongTensor] = None, use_cache: bool = False, **kwargs, @@ -279,7 +280,15 @@ def attention( if layer_past is not None: # sin and cos are specific to RoPE models; cache_position needed for the static cache - cache_kwargs = {"sin": sin, "cos": cos, "batch_index": batch_index, "position_ids": position_ids} + cache_kwargs = { + "sin": sin, + "cos": cos, + "batch_index": batch_index, + "position_ids": position_ids, + } + if comp_ctx_lengths is not None: + attention_bias = attention_bias[:, :, :, : comp_ctx_lengths.shape[-1]] + cache_kwargs["CCL"] = attention_bias.shape[-1] k, v = layer_past.update(k, v, self.layer_id, cache_kwargs) attention_interface: Callable = eager_attention_forward @@ -311,6 +320,7 @@ def forward( attention_bias: Optional[torch.Tensor] = None, position_ids: Optional[torch.Tensor] = None, layer_past: Optional[Cache] = None, + comp_ctx_lengths: Optional[torch.LongTensor] = None, batch_index: Optional[torch.LongTensor] = None, use_cache: bool = False, **kwargs, @@ -334,6 +344,7 @@ def forward( attention_bias, position_ids=position_ids, layer_past=layer_past, + comp_ctx_lengths=comp_ctx_lengths, batch_index=batch_index, use_cache=use_cache, ) @@ -380,6 +391,7 @@ def forward( subsegment_ids: Optional[torch.Tensor] = None, position_ids: Optional[torch.Tensor] = None, past_key_values: Optional[Union[Cache, List[torch.FloatTensor]]] = None, + comp_ctx_lengths: Optional[torch.LongTensor] = None, batch_index: Optional[torch.LongTensor] = None, use_cache: bool = False, last_logits_only: bool = False, @@ -496,6 +508,7 @@ def forward( attention_bias=causal_mask, position_ids=position_ids, layer_past=layer_past, + comp_ctx_lengths=comp_ctx_lengths, batch_index=batch_index, use_cache=use_cache, ) @@ -518,6 +531,7 @@ def forward( attention_bias=causal_mask, position_ids=position_ids, layers_past=layers_past, + comp_ctx_lengths=comp_ctx_lengths, use_cache=use_cache, ) @@ -574,7 +588,15 @@ def __init__(self, model): # self.language_model = self.model.language_model self.config = self.model.config - def forward(self, input_ids, vision_embeds, position_ids, image_idx, past_key_values): + def forward( + self, + input_ids, + vision_embeds, + position_ids, + image_idx, + past_key_values, + comp_ctx_lengths: Optional[List[int]] = None, + ): if input_ids is not None: input_ids = input_ids * (input_ids != -1).to(input_ids.dtype) inputs_embeds = self.model.model.transformer.wte(input_ids) @@ -587,7 +609,11 @@ def forward(self, input_ids, vision_embeds, position_ids, image_idx, past_key_va # inputs_embeds = torch.where(input_ids.shape[1] == torch.tensor(1), inputs_embeds, image_embeds) outputs = self.model.model.forward( - input_embeddings=inputs_embeds, position_ids=position_ids, past_key_values=past_key_values, use_cache=True + input_embeddings=inputs_embeds, + position_ids=position_ids, + past_key_values=past_key_values, + comp_ctx_lengths=comp_ctx_lengths, + use_cache=True, ) next_idx = (indices1.max() + 1).unsqueeze(0).unsqueeze(0) image_idx = torch.where(image_idx < next_idx, next_idx, image_idx) @@ -608,7 +634,16 @@ def get_qeff_language_decoder(self): """ def forward( - self, pixel_values, image_masks, image_input_idx, valid_idx, input_ids, position_ids, image_idx, past_key_values + self, + pixel_values, + image_masks, + image_input_idx, + valid_idx, + input_ids, + position_ids, + image_idx, + past_key_values, + comp_ctx_lengths: Optional[List[int]] = None, ): image_features, _ = self.model.vision_backbone(pixel_values, image_masks) num_image, num_patch = image_features.shape[1:3] @@ -637,7 +672,11 @@ def forward( inputs_embeds = torch.where(input_ids.shape[1] == torch.tensor(1), inputs_embeds, image_embeds) outputs = self.model.forward( - input_embeddings=inputs_embeds, position_ids=position_ids, past_key_values=past_key_values, use_cache=True + input_embeddings=inputs_embeds, + position_ids=position_ids, + past_key_values=past_key_values, + comp_ctx_lengths=comp_ctx_lengths, + use_cache=True, ) next_idx = (indices1.max() + 1).unsqueeze(0).unsqueeze(0) image_idx = torch.where(image_idx < next_idx, next_idx, image_idx) @@ -651,6 +690,8 @@ def get_specializations( ctx_len: int, num_images: int = None, img_size: int = None, + comp_ctx_lengths_prefill: Optional[List[int]] = None, + comp_ctx_lengths_decode: Optional[List[int]] = None, valid_size: int = None, kv_offload: bool = False, **compiler_options, @@ -679,30 +720,77 @@ def get_specializations( } ] - lang_prefill = { - "batch_size": batch_size, - "seq_len": prefill_seq_len, - "ctx_len": ctx_len, - "valid_size": valid_size, - } - - lang_decode = {"batch_size": batch_size, "seq_len": "1", "ctx_len": ctx_len, "valid_size": valid_size} + if comp_ctx_lengths_prefill is not None and comp_ctx_lengths_decode is not None: + lang = [] + + for i in range(0, len(comp_ctx_lengths_prefill)): + lang_prefill = { + "batch_size": batch_size, + "seq_len": prefill_seq_len, + "ctx_len": ctx_len, + "comp_ctx_lengths": comp_ctx_lengths_prefill[i], + "valid_size": valid_size, + } + if kv_offload: + values = { + "img_size": img_size, + "img_tile": img_tile, + "num_images": num_images, + "num_patch": num_patch, + } + + for key, value in values.items(): + lang_prefill[key] = value + + lang.append(lang_prefill) + + for i in range(0, len(comp_ctx_lengths_decode)): + lang_decode = { + "batch_size": batch_size, + "seq_len": "1", + "ctx_len": ctx_len, + "comp_ctx_lengths": comp_ctx_lengths_decode[i], + "valid_size": valid_size, + } + if kv_offload: + values = { + "img_size": img_size, + "img_tile": img_tile, + "num_images": num_images, + "num_patch": num_patch, + } + + for key, value in values.items(): + lang_decode[key] = value + + lang.append(lang_decode) - if kv_offload: - values = { - "img_size": img_size, - "img_tile": img_tile, - "num_images": num_images, - "num_patch": num_patch, + else: + lang_prefill = { + "batch_size": batch_size, + "seq_len": prefill_seq_len, + "ctx_len": ctx_len, + "valid_size": valid_size, } - for key, value in values.items(): - lang_prefill[key] = value - lang_decode[key] = value + lang_decode = {"batch_size": batch_size, "seq_len": "1", "ctx_len": ctx_len, "valid_size": valid_size} + + if kv_offload: + values = { + "img_size": img_size, + "img_tile": img_tile, + "num_images": num_images, + "num_patch": num_patch, + } + + for key, value in values.items(): + lang_prefill[key] = value + lang_decode[key] = value + + lang = [] + lang.append(lang_prefill) + lang.append(lang_decode) - lang = [] - lang.append(lang_prefill) - lang.append(lang_decode) specializations = {} if kv_offload: @@ -712,7 +800,7 @@ def get_specializations( else: return lang, compiler_options - def get_onnx_dynamic_axes(self, kv_offload: bool = False): + def get_onnx_dynamic_axes(self, comp_ctx_lengths: Optional[List[int]] = None, kv_offload: bool = False): # Define dynamic axes vision_dynamic_axes = {} lang_dynamic_axes = {} @@ -731,6 +819,9 @@ def get_onnx_dynamic_axes(self, kv_offload: bool = False): lang_dynamic_axes[f"past_key.{i}"] = {0: "batch_size", 2: "ctx_len"} lang_dynamic_axes[f"past_value.{i}"] = {0: "batch_size", 2: "ctx_len"} + if comp_ctx_lengths is not None: + lang_dynamic_axes["comp_ctx_lengths"] = {0: "comp_ctx_lengths"} + dynamic_axes = {} if kv_offload: dynamic_axes["vision"] = vision_dynamic_axes @@ -760,7 +851,7 @@ def get_output_names(self, kv_offload: bool = False): return lang_output_names return output_names - def get_dummy_inputs(self, kv_offload: bool = False, **kwargs): + def get_dummy_inputs(self, comp_ctx_lengths: Optional[List[int]] = None, kv_offload: bool = False, **kwargs): inputs_shapes = {} inputs_shapes_lang = {} inputs_shapes["input_ids"] = (constants.ONNX_EXPORT_EXAMPLE_BATCH_SIZE, constants.ONNX_EXPORT_EXAMPLE_SEQ_LEN) @@ -823,6 +914,9 @@ def get_dummy_inputs(self, kv_offload: bool = False, **kwargs): for kv in ["key", "value"]: lang_inputs["past_key_values"][i].append(torch.zeros(kv_cache_shape, dtype=torch.float32)) + if comp_ctx_lengths is not None: + lang_inputs["comp_ctx_lengths"] = torch.randint(0, 100, (40,), dtype=torch.long) + inputs = {} if kv_offload: inputs["vision"] = vision_inputs diff --git a/QEfficient/transformers/models/mpt/modeling_mpt.py b/QEfficient/transformers/models/mpt/modeling_mpt.py index 9bf6a4422..c1d98c1f8 100644 --- a/QEfficient/transformers/models/mpt/modeling_mpt.py +++ b/QEfficient/transformers/models/mpt/modeling_mpt.py @@ -39,6 +39,7 @@ def forward( position_ids: Optional[torch.LongTensor] = None, batch_index: Optional[torch.LongTensor] = None, past_key_value: Optional[Tuple[torch.Tensor]] = None, + comp_ctx_lengths: Optional[torch.LongTensor] = None, attention_mask: Optional[torch.Tensor] = None, use_cache: Optional[bool] = None, ): @@ -52,6 +53,9 @@ def forward( if past_key_value is not None: cache_kwargs = {"position_ids": position_ids, "batch_index": batch_index} + if comp_ctx_lengths is not None: + attention_mask = attention_mask[:, :, :, : comp_ctx_lengths.shape[-1]] + cache_kwargs["CCL"] = attention_mask.shape[-1] key_states, value_states = past_key_value.update(key_states, value_states, self.layer_idx, cache_kwargs) attention_scores = torch.matmul(query_states, key_states.transpose(-1, -2)) * self.softmax_scale @@ -101,6 +105,7 @@ def forward( position_ids: Optional[torch.LongTensor] = None, batch_index: Optional[torch.LongTensor] = None, layer_past: Optional[Tuple[torch.Tensor, torch.Tensor]] = None, + comp_ctx_lengths: Optional[torch.LongTensor] = None, use_cache: bool = False, output_attentions: bool = False, ): @@ -118,6 +123,7 @@ def forward( batch_index=batch_index, attention_mask=attention_mask, past_key_value=layer_past, + comp_ctx_lengths=comp_ctx_lengths, use_cache=use_cache, ) @@ -144,6 +150,7 @@ def forward( self, input_ids: Optional[torch.LongTensor] = None, past_key_values: Optional[Tuple[Tuple[torch.Tensor, torch.Tensor], ...]] = None, + comp_ctx_lengths: Optional[torch.LongTensor] = None, attention_mask: Optional[torch.Tensor] = None, position_ids: Optional[torch.LongTensor] = None, batch_index: Optional[torch.LongTensor] = None, @@ -205,6 +212,7 @@ def forward( outputs = block( hidden_states, layer_past=past_key_values, + comp_ctx_lengths=comp_ctx_lengths, attention_mask=causal_mask, position_ids=position_ids, batch_index=batch_index, @@ -250,6 +258,7 @@ def forward( self, input_ids: Optional[torch.LongTensor] = None, past_key_values: Optional[Tuple[Tuple[torch.Tensor, torch.Tensor], ...]] = None, + comp_ctx_lengths: Optional[torch.LongTensor] = None, attention_mask: Optional[torch.Tensor] = None, position_ids: Optional[torch.LongTensor] = None, batch_index: Optional[torch.LongTensor] = None, @@ -271,6 +280,7 @@ def forward( transformer_outputs = self.transformer( input_ids, past_key_values=past_key_values, + comp_ctx_lengths=comp_ctx_lengths, attention_mask=attention_mask, position_ids=position_ids, batch_index=batch_index, diff --git a/QEfficient/transformers/models/olmo2/modeling_olmo2.py b/QEfficient/transformers/models/olmo2/modeling_olmo2.py index 0d23729c1..00755cae5 100644 --- a/QEfficient/transformers/models/olmo2/modeling_olmo2.py +++ b/QEfficient/transformers/models/olmo2/modeling_olmo2.py @@ -132,6 +132,7 @@ def forward( attention_mask: Optional[torch.Tensor], position_ids: Optional[torch.LongTensor] = None, past_key_value: Optional[Cache] = None, + comp_ctx_lengths: Optional[torch.LongTensor] = None, batch_index: Optional[torch.LongTensor] = None, cache_position: Optional[torch.LongTensor] = None, **kwargs, @@ -156,6 +157,9 @@ def forward( if past_key_value is not None: # sin and cos are specific to RoPE models; cache_position needed for the static cache cache_kwargs = {"batch_index": batch_index, "position_ids": position_ids} + if comp_ctx_lengths is not None: + attention_mask = attention_mask[:, :, :, : comp_ctx_lengths.shape[-1]] + cache_kwargs["CCL"] = attention_mask.shape[-1] key_states, value_states = past_key_value.update(key_states, value_states, self.layer_idx, cache_kwargs) attention_interface: Callable = eager_attention_forward @@ -188,6 +192,7 @@ def forward( attention_mask: Optional[torch.Tensor] = None, position_ids: Optional[torch.LongTensor] = None, past_key_value: Optional[Cache] = None, + comp_ctx_lengths: Optional[torch.LongTensor] = None, batch_index: Optional[torch.LongTensor] = None, output_attentions: Optional[bool] = False, use_cache: Optional[bool] = False, @@ -203,6 +208,7 @@ def forward( attention_mask=attention_mask, position_ids=position_ids, past_key_value=past_key_value, + comp_ctx_lengths=comp_ctx_lengths, batch_index=batch_index, use_cache=use_cache, cache_position=cache_position, @@ -233,6 +239,7 @@ def forward( attention_mask: Optional[torch.Tensor] = None, position_ids: Optional[torch.LongTensor] = None, past_key_values: Optional[Cache] = None, + comp_ctx_lengths: Optional[torch.LongTensor] = None, batch_index: Optional[torch.LongTensor] = None, inputs_embeds: Optional[torch.FloatTensor] = None, use_cache: Optional[bool] = None, @@ -286,6 +293,7 @@ def forward( attention_mask=causal_mask, position_ids=position_ids, past_key_value=past_key_values, + comp_ctx_lengths=comp_ctx_lengths, batch_index=batch_index, use_cache=use_cache, cache_position=cache_position, @@ -322,6 +330,7 @@ def forward( attention_mask: Optional[torch.Tensor] = None, position_ids: Optional[torch.LongTensor] = None, past_key_values: Optional[Union[Cache, List[torch.FloatTensor]]] = None, + comp_ctx_lengths: Optional[torch.LongTensor] = None, batch_index: Optional[torch.LongTensor] = None, inputs_embeds: Optional[torch.FloatTensor] = None, use_cache: Optional[bool] = None, @@ -343,6 +352,7 @@ def forward( attention_mask=attention_mask, position_ids=position_ids, past_key_values=past_key_values, + comp_ctx_lengths=comp_ctx_lengths, batch_index=batch_index, inputs_embeds=inputs_embeds, use_cache=use_cache, diff --git a/QEfficient/transformers/models/phi/modeling_phi.py b/QEfficient/transformers/models/phi/modeling_phi.py index 18557f1ca..4bf2e8785 100644 --- a/QEfficient/transformers/models/phi/modeling_phi.py +++ b/QEfficient/transformers/models/phi/modeling_phi.py @@ -67,6 +67,7 @@ def forward( attention_mask: Optional[torch.Tensor] = None, position_ids: Optional[torch.LongTensor] = None, past_key_value: Optional[Cache] = None, + comp_ctx_lengths: Optional[torch.LongTensor] = None, batch_index: Optional[torch.LongTensor] = None, output_attentions: bool = False, use_cache: bool = False, @@ -105,7 +106,15 @@ def forward( if past_key_value is not None: # Update the cache_kwargs with position_ids for Cloud AI 100 - cache_kwargs = {"sin": sin, "cos": cos, "batch_index": batch_index, "position_ids": position_ids} + cache_kwargs = { + "sin": sin, + "cos": cos, + "batch_index": batch_index, + "position_ids": position_ids, + } + if comp_ctx_lengths is not None: + attention_mask = attention_mask[:, :, :, : comp_ctx_lengths.shape[-1]] + cache_kwargs["CCL"] = attention_mask.shape[-1] key_states, value_states = past_key_value.update(key_states, value_states, self.layer_idx, cache_kwargs) attention_interface: Callable = eager_attention_forward @@ -140,6 +149,7 @@ def forward( output_attentions: Optional[bool] = False, use_cache: Optional[bool] = False, past_key_value: Optional[Tuple[torch.Tensor]] = None, + comp_ctx_lengths: Optional[torch.LongTensor] = None, cache_position: Optional[torch.LongTensor] = None, position_embeddings: Optional[Tuple[torch.Tensor, torch.Tensor]] = None, # will become mandatory in v4.46 **kwargs, @@ -181,6 +191,7 @@ def forward( position_ids=position_ids, batch_index=batch_index, past_key_value=past_key_value, + comp_ctx_lengths=comp_ctx_lengths, output_attentions=output_attentions, use_cache=use_cache, cache_position=cache_position, @@ -213,6 +224,7 @@ def forward( attention_mask: Optional[torch.Tensor] = None, position_ids: Optional[torch.LongTensor] = None, past_key_values: Optional[List[torch.FloatTensor]] = None, + comp_ctx_lengths: Optional[torch.LongTensor] = None, batch_index: Optional[torch.LongTensor] = None, inputs_embeds: Optional[torch.FloatTensor] = None, use_cache: Optional[bool] = None, @@ -274,6 +286,7 @@ def forward( attention_mask=causal_mask, position_ids=position_ids, past_key_value=past_key_values, + comp_ctx_lengths=comp_ctx_lengths, batch_index=batch_index, output_attentions=output_attentions, use_cache=use_cache, @@ -316,6 +329,7 @@ def forward( attention_mask: Optional[torch.Tensor] = None, position_ids: Optional[torch.LongTensor] = None, past_key_values: Optional[List[torch.FloatTensor]] = None, + comp_ctx_lengths: Optional[torch.LongTensor] = None, batch_index: Optional[torch.LongTensor] = None, inputs_embeds: Optional[torch.FloatTensor] = None, labels: Optional[torch.LongTensor] = None, @@ -370,6 +384,7 @@ def forward( position_ids=position_ids, batch_index=batch_index, past_key_values=past_key_values, + comp_ctx_lengths=comp_ctx_lengths, inputs_embeds=inputs_embeds, use_cache=use_cache, output_attentions=output_attentions, diff --git a/QEfficient/transformers/models/phi3/modeling_phi3.py b/QEfficient/transformers/models/phi3/modeling_phi3.py index 4b5234a5a..b97a0ab8d 100644 --- a/QEfficient/transformers/models/phi3/modeling_phi3.py +++ b/QEfficient/transformers/models/phi3/modeling_phi3.py @@ -140,6 +140,7 @@ def forward( batch_index: Optional[torch.LongTensor] = None, position_ids=Optional[torch.Tensor], past_key_value: Optional[Cache] = None, + comp_ctx_lengths: Optional[torch.LongTensor] = None, cache_position: Optional[torch.LongTensor] = None, **kwargs, ) -> Tuple[torch.Tensor, Optional[torch.Tensor], Optional[Tuple[torch.Tensor]]]: @@ -166,6 +167,9 @@ def forward( "batch_index": batch_index, "position_ids": position_ids, } + if comp_ctx_lengths is not None: + attention_mask = attention_mask[:, :, :, : comp_ctx_lengths.shape[-1]] + cache_kwargs["CCL"] = attention_mask.shape[-1] key_states, value_states = past_key_value.update(key_states, value_states, self.layer_idx, cache_kwargs) attention_interface: Callable = eager_attention_forward @@ -198,6 +202,7 @@ def forward( attention_mask: Optional[torch.Tensor] = None, position_ids: Optional[torch.LongTensor] = None, past_key_value: Optional[Cache] = None, + comp_ctx_lengths: Optional[torch.LongTensor] = None, batch_index: Optional[torch.LongTensor] = None, use_cache: Optional[bool] = False, cache_position: Optional[torch.LongTensor] = None, @@ -235,6 +240,7 @@ def forward( position_ids=position_ids, batch_index=batch_index, past_key_value=past_key_value, + comp_ctx_lengths=comp_ctx_lengths, use_cache=use_cache, cache_position=cache_position, position_embeddings=position_embeddings, @@ -265,6 +271,7 @@ def forward( attention_mask: Optional[torch.Tensor] = None, position_ids: Optional[torch.LongTensor] = None, past_key_values: Optional[Cache] = None, + comp_ctx_lengths: Optional[torch.LongTensor] = None, batch_index: Optional[torch.LongTensor] = None, inputs_embeds: Optional[torch.FloatTensor] = None, use_cache: Optional[bool] = None, @@ -314,6 +321,7 @@ def forward( position_ids=position_ids, batch_index=batch_index, past_key_value=past_key_values, + comp_ctx_lengths=comp_ctx_lengths, use_cache=use_cache, cache_position=cache_position, **kwargs, @@ -350,6 +358,7 @@ def forward( position_ids: Optional[torch.LongTensor] = None, batch_index: Optional[torch.LongTensor] = None, past_key_values: Optional[Cache] = None, + comp_ctx_lengths: Optional[torch.LongTensor] = None, inputs_embeds: Optional[torch.FloatTensor] = None, use_cache: Optional[bool] = None, output_hidden_states: Optional[bool] = None, @@ -366,6 +375,7 @@ def forward( batch_index=batch_index, position_ids=position_ids, past_key_values=past_key_values, + comp_ctx_lengths=comp_ctx_lengths, inputs_embeds=inputs_embeds, use_cache=use_cache, output_hidden_states=output_hidden_states, diff --git a/QEfficient/transformers/models/qwen2/modeling_qwen2.py b/QEfficient/transformers/models/qwen2/modeling_qwen2.py index 24e8df46c..7c093a4b0 100644 --- a/QEfficient/transformers/models/qwen2/modeling_qwen2.py +++ b/QEfficient/transformers/models/qwen2/modeling_qwen2.py @@ -150,6 +150,7 @@ def forward( attention_mask: Optional[torch.Tensor], position_ids: Optional[torch.LongTensor] = None, past_key_value: Optional[Cache] = None, + comp_ctx_lengths: Optional[torch.LongTensor] = None, batch_index: Optional[torch.LongTensor] = None, cache_position: Optional[torch.LongTensor] = None, **kwargs, @@ -167,6 +168,9 @@ def forward( if past_key_value is not None: cache_kwargs = {"batch_index": batch_index, "position_ids": position_ids} + if comp_ctx_lengths is not None: + attention_mask = attention_mask[:, :, :, : comp_ctx_lengths.shape[-1]] + cache_kwargs["CCL"] = attention_mask.shape[-1] key_states, value_states = past_key_value.update(key_states, value_states, self.layer_idx, cache_kwargs) attention_interface = eager_attention_forward @@ -200,6 +204,7 @@ def forward( attention_mask: Optional[torch.Tensor] = None, position_ids: Optional[torch.LongTensor] = None, past_key_value: Optional[Cache] = None, + comp_ctx_lengths: Optional[torch.LongTensor] = None, batch_index: Optional[torch.LongTensor] = None, use_cache: Optional[bool] = False, cache_position: Optional[torch.LongTensor] = None, @@ -231,6 +236,7 @@ def forward( attention_mask=attention_mask, position_ids=position_ids, past_key_value=past_key_value, + comp_ctx_lengths=comp_ctx_lengths, batch_index=batch_index, use_cache=use_cache, cache_position=cache_position, @@ -261,6 +267,7 @@ def forward( attention_mask: Optional[torch.Tensor] = None, position_ids: Optional[torch.LongTensor] = None, past_key_values: Optional[Cache] = None, + comp_ctx_lengths: Optional[torch.LongTensor] = None, batch_index: Optional[torch.LongTensor] = None, inputs_embeds: Optional[torch.FloatTensor] = None, use_cache: Optional[bool] = None, @@ -313,6 +320,7 @@ def forward( attention_mask=causal_mask, position_ids=position_ids, past_key_value=past_key_values, + comp_ctx_lengths=comp_ctx_lengths, batch_index=batch_index, use_cache=use_cache, cache_position=cache_position, @@ -348,6 +356,7 @@ def forward( attention_mask: Optional[torch.Tensor] = None, position_ids: Optional[torch.LongTensor] = None, past_key_values: Optional[Union[Cache, List[torch.FloatTensor]]] = None, + comp_ctx_lengths: Optional[torch.LongTensor] = None, batch_index: Optional[torch.LongTensor] = None, inputs_embeds: Optional[torch.FloatTensor] = None, use_cache: Optional[bool] = None, @@ -364,6 +373,7 @@ def forward( attention_mask=attention_mask, position_ids=position_ids, past_key_values=past_key_values, + comp_ctx_lengths=comp_ctx_lengths, batch_index=batch_index, inputs_embeds=inputs_embeds, use_cache=use_cache, diff --git a/QEfficient/transformers/models/qwen2_5_vl/modeling_qwen2_5_vl.py b/QEfficient/transformers/models/qwen2_5_vl/modeling_qwen2_5_vl.py index 445c15583..baffb44c5 100644 --- a/QEfficient/transformers/models/qwen2_5_vl/modeling_qwen2_5_vl.py +++ b/QEfficient/transformers/models/qwen2_5_vl/modeling_qwen2_5_vl.py @@ -485,6 +485,7 @@ def forward( attention_mask: Optional[torch.Tensor] = None, position_ids: Optional[torch.LongTensor] = None, past_key_value: Optional[Cache] = None, + comp_ctx_lengths: Optional[torch.LongTensor] = None, batch_index: Optional[torch.LongTensor] = None, output_attentions: bool = False, use_cache: bool = False, @@ -512,7 +513,15 @@ def forward( if past_key_value is not None: # sin and cos are specific to RoPE models; cache_position needed for the static cache - cache_kwargs = {"sin": sin, "cos": cos, "batch_index": batch_index, "position_ids": position_ids[0]} + cache_kwargs = { + "sin": sin, + "cos": cos, + "batch_index": batch_index, + "position_ids": position_ids[0], + } + if comp_ctx_lengths is not None: + attention_mask = attention_mask[:, :, :, : comp_ctx_lengths.shape[-1]] + cache_kwargs["CCL"] = attention_mask.shape[-1] key_states, value_states = past_key_value.update(key_states, value_states, self.layer_idx, cache_kwargs) attention_interface: Callable = eager_attention_forward @@ -543,6 +552,7 @@ def forward( attention_mask: Optional[torch.Tensor] = None, position_ids: Optional[torch.LongTensor] = None, past_key_value: Optional[Tuple[torch.Tensor]] = None, + comp_ctx_lengths: Optional[torch.LongTensor] = None, batch_index: Optional[torch.LongTensor] = None, output_attentions: Optional[bool] = False, use_cache: Optional[bool] = False, @@ -582,6 +592,7 @@ def forward( attention_mask=attention_mask, position_ids=position_ids, past_key_value=past_key_value, + comp_ctx_lengths=comp_ctx_lengths, batch_index=batch_index, output_attentions=output_attentions, use_cache=use_cache, @@ -614,6 +625,7 @@ def forward( attention_mask: Optional[torch.Tensor] = None, position_ids: Optional[torch.LongTensor] = None, past_key_values: Optional[List[torch.FloatTensor]] = None, + comp_ctx_lengths: Optional[torch.LongTensor] = None, batch_index: Optional[torch.LongTensor] = None, inputs_embeds: Optional[torch.FloatTensor] = None, use_cache: Optional[bool] = None, @@ -664,6 +676,7 @@ def forward( attention_mask=causal_mask, position_ids=position_ids, past_key_value=past_key_values, + comp_ctx_lengths=comp_ctx_lengths, batch_index=batch_index, output_attentions=output_attentions, use_cache=use_cache, @@ -702,6 +715,7 @@ def forward( attention_mask: Optional[torch.Tensor] = None, position_ids: Optional[torch.LongTensor] = None, past_key_values: Optional[List[torch.FloatTensor]] = None, + comp_ctx_lengths: Optional[torch.LongTensor] = None, batch_index: Optional[torch.LongTensor] = None, inputs_embeds: Optional[torch.FloatTensor] = None, use_cache: Optional[bool] = None, @@ -725,6 +739,7 @@ def forward( position_ids=position_ids, attention_mask=attention_mask, past_key_values=past_key_values, + comp_ctx_lengths=comp_ctx_lengths, batch_index=batch_index, inputs_embeds=inputs_embeds, use_cache=use_cache, @@ -774,6 +789,7 @@ def forward( image_idx, past_key_values, batch_index: Optional[torch.LongTensor] = None, + comp_ctx_lengths: Optional[List[int]] = None, ): inputs_embeds = self.model.get_input_embeddings()(input_ids) B, N, C = inputs_embeds.shape @@ -788,6 +804,7 @@ def forward( inputs_embeds=inputs_embeds, position_ids=position_ids, past_key_values=past_key_values, + comp_ctx_lengths=comp_ctx_lengths, batch_index=batch_index, use_cache=True, ) @@ -807,7 +824,13 @@ def get_qeff_vision_encoder(self): def get_qeff_language_decoder(self): return QEffQwen_2_5_vl_DecoderWrapper(self) - def get_dummy_inputs(self, kv_offload: bool = False, continuous_batching: bool = False, **kwargs): + def get_dummy_inputs( + self, + comp_ctx_lengths: Optional[List[int]] = None, + kv_offload: bool = False, + continuous_batching: bool = False, + **kwargs, + ): inputs_shapes = {} inputs_shapes["input_ids"] = (constants.ONNX_EXPORT_EXAMPLE_BATCH_SIZE, constants.ONNX_EXPORT_EXAMPLE_SEQ_LEN) @@ -862,6 +885,9 @@ def get_dummy_inputs(self, kv_offload: bool = False, continuous_batching: bool = if continuous_batching: lang_inputs["batch_index"] = torch.arange(bs).view(bs, 1) + if comp_ctx_lengths is not None: + lang_inputs["comp_ctx_lengths"] = torch.randint(0, 100, (40,), dtype=torch.long) + inputs = {} if kv_offload: inputs["vision"] = vision_inputs @@ -887,6 +913,9 @@ def get_specializations( full_batch_size: Optional[int] = None, **compiler_options, ): + comp_ctx_lengths_prefill = compiler_options.pop("comp_ctx_lengths_prefill", None) + comp_ctx_lengths_decode = compiler_options.pop("comp_ctx_lengths_decode", None) + if height is None or width is None: height = constants.QWEN2_5_VL_HEIGHT width = constants.QWEN2_5_VL_WIDTH @@ -966,37 +995,77 @@ def smart_resize( "grid_w": grid_w, } ] - lang_prefill = { - "batch_size": 1 if continuous_batching else batch_size, - "seq_len": prefill_seq_len, - "ctx_len": ctx_len, - "vision_size": vision_size, - "vision_batch_size": batch_size, - } - if continuous_batching: - lang_prefill["full_batch_size"] = kv_cache_batch_size + if comp_ctx_lengths_prefill is not None: + lang = [] + + for i in range(0, len(comp_ctx_lengths_prefill)): + lang_prefill = { + "batch_size": 1 if continuous_batching else batch_size, + "seq_len": prefill_seq_len, + "ctx_len": ctx_len, + "vision_size": vision_size, + "comp_ctx_lengths": comp_ctx_lengths_prefill[i], + "vision_batch_size": batch_size, + } + + if continuous_batching: + lang_prefill["full_batch_size"] = kv_cache_batch_size + else: + lang_prefill["batch_size"] = kv_cache_batch_size + if full_batch_size: + lang_prefill["full_batch_exec_size"] = full_batch_size + + lang.append(lang_prefill) + + for i in range(0, len(comp_ctx_lengths_decode)): + lang_decode = { + "batch_size": full_batch_size if continuous_batching else batch_size, + "seq_len": "1", + "ctx_len": ctx_len, + "vision_size": vision_size, + "comp_ctx_lengths": comp_ctx_lengths_decode[i], + "vision_batch_size": batch_size, + } + + if continuous_batching: + lang_decode["full_batch_size"] = kv_cache_batch_size + else: + lang_decode["batch_size"] = kv_cache_batch_size + + lang.append(lang_decode) else: - lang_prefill["batch_size"] = kv_cache_batch_size - if full_batch_size: - lang_prefill["full_batch_exec_size"] = full_batch_size - - lang_decode = { - "batch_size": full_batch_size if continuous_batching else batch_size, - "seq_len": 1, - "ctx_len": ctx_len, - "vision_size": vision_size, - "vision_batch_size": batch_size, - } + lang_prefill = { + "batch_size": 1 if continuous_batching else batch_size, + "seq_len": prefill_seq_len, + "ctx_len": ctx_len, + "vision_size": vision_size, + "vision_batch_size": batch_size, + } - if continuous_batching: - lang_decode["full_batch_size"] = kv_cache_batch_size - else: - lang_decode["batch_size"] = kv_cache_batch_size + if continuous_batching: + lang_prefill["full_batch_size"] = kv_cache_batch_size + else: + lang_prefill["batch_size"] = kv_cache_batch_size + if full_batch_size: + lang_prefill["full_batch_exec_size"] = full_batch_size + + lang_decode = { + "batch_size": full_batch_size if continuous_batching else batch_size, + "seq_len": 1, + "ctx_len": ctx_len, + "vision_size": vision_size, + "vision_batch_size": batch_size, + } + + if continuous_batching: + lang_decode["full_batch_size"] = kv_cache_batch_size + else: + lang_decode["batch_size"] = kv_cache_batch_size - lang = [] - lang.append(lang_prefill) - lang.append(lang_decode) + lang = [] + lang.append(lang_prefill) + lang.append(lang_decode) specializations = {} @@ -1009,7 +1078,9 @@ def smart_resize( lang[1].pop("vision_size") return lang, compiler_options - def get_onnx_dynamic_axes(self, kv_offload: bool = False, continuous_batching: bool = False): + def get_onnx_dynamic_axes( + self, comp_ctx_lengths: Optional[List[int]] = None, kv_offload: bool = False, continuous_batching: bool = False + ): # Define dynamic axes num_layers = self.config.text_config.num_hidden_layers @@ -1037,6 +1108,9 @@ def get_onnx_dynamic_axes(self, kv_offload: bool = False, continuous_batching: b if continuous_batching: lang_dynamic_axes["batch_index"] = {0: "batch_size"} + if comp_ctx_lengths is not None: + lang_dynamic_axes["comp_ctx_lengths"] = {0: "comp_ctx_lengths"} + dynamic_axes = {} if kv_offload: diff --git a/QEfficient/transformers/models/qwen3/modeling_qwen3.py b/QEfficient/transformers/models/qwen3/modeling_qwen3.py index ecdb36019..540bad4c7 100644 --- a/QEfficient/transformers/models/qwen3/modeling_qwen3.py +++ b/QEfficient/transformers/models/qwen3/modeling_qwen3.py @@ -151,6 +151,7 @@ def forward( attention_mask: Optional[torch.Tensor], position_ids: Optional[torch.LongTensor] = None, past_key_value: Optional[Cache] = None, + comp_ctx_lengths: Optional[torch.LongTensor] = None, batch_index: Optional[torch.LongTensor] = None, cache_position: Optional[torch.LongTensor] = None, **kwargs, @@ -168,6 +169,9 @@ def forward( if past_key_value is not None: cache_kwargs = {"batch_index": batch_index, "position_ids": position_ids} + if comp_ctx_lengths is not None: + attention_mask = attention_mask[:, :, :, : comp_ctx_lengths.shape[-1]] + cache_kwargs["CCL"] = attention_mask.shape[-1] key_states, value_states = past_key_value.update(key_states, value_states, self.layer_idx, cache_kwargs) attention_interface = eager_attention_forward @@ -201,6 +205,7 @@ def forward( attention_mask: Optional[torch.Tensor] = None, position_ids: Optional[torch.LongTensor] = None, past_key_value: Optional[Cache] = None, + comp_ctx_lengths: Optional[torch.LongTensor] = None, batch_index: Optional[torch.LongTensor] = None, use_cache: Optional[bool] = False, cache_position: Optional[torch.LongTensor] = None, @@ -232,6 +237,7 @@ def forward( attention_mask=attention_mask, position_ids=position_ids, past_key_value=past_key_value, + comp_ctx_lengths=comp_ctx_lengths, batch_index=batch_index, use_cache=use_cache, cache_position=cache_position, @@ -262,6 +268,7 @@ def forward( attention_mask: Optional[torch.Tensor] = None, position_ids: Optional[torch.LongTensor] = None, past_key_values: Optional[Cache] = None, + comp_ctx_lengths: Optional[torch.LongTensor] = None, batch_index: Optional[torch.LongTensor] = None, inputs_embeds: Optional[torch.FloatTensor] = None, use_cache: Optional[bool] = None, @@ -314,6 +321,7 @@ def forward( attention_mask=causal_mask, position_ids=position_ids, past_key_value=past_key_values, + comp_ctx_lengths=comp_ctx_lengths, batch_index=batch_index, use_cache=use_cache, cache_position=cache_position, @@ -349,6 +357,7 @@ def forward( attention_mask: Optional[torch.Tensor] = None, position_ids: Optional[torch.LongTensor] = None, past_key_values: Optional[Union[Cache, List[torch.FloatTensor]]] = None, + comp_ctx_lengths: Optional[torch.LongTensor] = None, batch_index: Optional[torch.LongTensor] = None, inputs_embeds: Optional[torch.FloatTensor] = None, use_cache: Optional[bool] = None, @@ -367,6 +376,7 @@ def forward( attention_mask=attention_mask, position_ids=position_ids, past_key_values=past_key_values, + comp_ctx_lengths=comp_ctx_lengths, batch_index=batch_index, inputs_embeds=inputs_embeds, use_cache=use_cache, diff --git a/QEfficient/transformers/models/qwen3_moe/modeling_qwen3_moe.py b/QEfficient/transformers/models/qwen3_moe/modeling_qwen3_moe.py index 591f7c1b0..cbd80d8ca 100644 --- a/QEfficient/transformers/models/qwen3_moe/modeling_qwen3_moe.py +++ b/QEfficient/transformers/models/qwen3_moe/modeling_qwen3_moe.py @@ -201,6 +201,7 @@ def forward( attention_mask: Optional[torch.Tensor], position_ids: Optional[torch.LongTensor] = None, past_key_value: Optional[Tuple[torch.Tensor]] = None, + comp_ctx_lengths: Optional[torch.LongTensor] = None, batch_index: Optional[torch.LongTensor] = None, cache_position: Optional[torch.LongTensor] = None, **kwargs, @@ -218,6 +219,9 @@ def forward( if past_key_value is not None: cache_kwargs = {"batch_index": batch_index, "position_ids": position_ids} + if comp_ctx_lengths is not None: + attention_mask = attention_mask[:, :, :, : comp_ctx_lengths.shape[-1]] + cache_kwargs["CCL"] = attention_mask.shape[-1] key_states, value_states = past_key_value.update(key_states, value_states, self.layer_idx, cache_kwargs) attention_interface = eager_attention_forward @@ -243,6 +247,7 @@ def forward( attention_mask: Optional[torch.Tensor] = None, position_ids: Optional[torch.LongTensor] = None, past_key_value: Optional[Tuple[torch.Tensor]] = None, + comp_ctx_lengths: Optional[torch.LongTensor] = None, batch_index: Optional[torch.LongTensor] = None, use_cache: Optional[bool] = False, cache_position: Optional[torch.LongTensor] = None, @@ -274,6 +279,7 @@ def forward( attention_mask=attention_mask, position_ids=position_ids, past_key_value=past_key_value, + comp_ctx_lengths=comp_ctx_lengths, batch_index=batch_index, use_cache=use_cache, cache_position=cache_position, @@ -300,6 +306,7 @@ def forward( attention_mask: Optional[torch.Tensor] = None, position_ids: Optional[torch.LongTensor] = None, past_key_values: Optional[List[torch.FloatTensor]] = None, + comp_ctx_lengths: Optional[torch.LongTensor] = None, inputs_embeds: Optional[torch.FloatTensor] = None, use_cache: Optional[bool] = None, batch_index: Optional[torch.LongTensor] = None, @@ -342,6 +349,7 @@ def forward( attention_mask=causal_mask, position_ids=position_ids, past_key_value=past_key_values, + comp_ctx_lengths=comp_ctx_lengths, batch_index=batch_index, use_cache=use_cache, cache_position=cache_position, @@ -369,6 +377,7 @@ def forward( attention_mask: Optional[torch.Tensor] = None, position_ids: Optional[torch.LongTensor] = None, past_key_values: Optional[List[torch.FloatTensor]] = None, + comp_ctx_lengths: Optional[torch.LongTensor] = None, inputs_embeds: Optional[torch.FloatTensor] = None, batch_index: Optional[torch.LongTensor] = None, use_cache: Optional[bool] = None, @@ -385,6 +394,7 @@ def forward( attention_mask=attention_mask, position_ids=position_ids, past_key_values=past_key_values, + comp_ctx_lengths=comp_ctx_lengths, inputs_embeds=inputs_embeds, batch_index=batch_index, use_cache=use_cache, diff --git a/QEfficient/transformers/models/starcoder2/modeling_starcoder2.py b/QEfficient/transformers/models/starcoder2/modeling_starcoder2.py index 9a327761d..c86e7478b 100644 --- a/QEfficient/transformers/models/starcoder2/modeling_starcoder2.py +++ b/QEfficient/transformers/models/starcoder2/modeling_starcoder2.py @@ -69,6 +69,7 @@ def forward( attention_mask: Optional[torch.Tensor], position_ids: Optional[torch.LongTensor] = None, past_key_value: Optional[Cache] = None, + comp_ctx_lengths: Optional[torch.LongTensor] = None, batch_index: Optional[torch.LongTensor] = None, cache_position: Optional[torch.LongTensor] = None, **kwargs, @@ -85,6 +86,9 @@ def forward( if past_key_value is not None: cache_kwargs = {"batch_index": batch_index, "position_ids": position_ids} + if comp_ctx_lengths is not None: + attention_mask = attention_mask[:, :, :, : comp_ctx_lengths.shape[-1]] + cache_kwargs["CCL"] = attention_mask.shape[-1] key_states, value_states = past_key_value.update(key_states, value_states, self.layer_idx, cache_kwargs) attention_interface = eager_attention_forward @@ -118,6 +122,7 @@ def forward( attention_mask: Optional[torch.Tensor] = None, position_ids: Optional[torch.LongTensor] = None, past_key_value: Optional[Tuple[torch.Tensor]] = None, + comp_ctx_lengths: Optional[torch.LongTensor] = None, batch_index: Optional[torch.LongTensor] = None, use_cache: Optional[bool] = False, cache_position: Optional[torch.LongTensor] = None, @@ -153,6 +158,7 @@ def forward( attention_mask=attention_mask, position_ids=position_ids, past_key_value=past_key_value, + comp_ctx_lengths=comp_ctx_lengths, batch_index=batch_index, use_cache=use_cache, cache_position=cache_position, @@ -184,6 +190,7 @@ def forward( attention_mask: Optional[torch.Tensor] = None, position_ids: Optional[torch.LongTensor] = None, past_key_values: Optional[Cache] = None, + comp_ctx_lengths: Optional[torch.LongTensor] = None, batch_index: Optional[torch.LongTensor] = None, inputs_embeds: Optional[torch.FloatTensor] = None, use_cache: Optional[bool] = None, @@ -237,6 +244,7 @@ def forward( attention_mask=causal_mask, position_ids=position_ids, past_key_value=past_key_values, + comp_ctx_lengths=comp_ctx_lengths, batch_index=batch_index, use_cache=use_cache, cache_position=cache_position, @@ -273,6 +281,7 @@ def forward( attention_mask: Optional[torch.Tensor] = None, position_ids: Optional[torch.LongTensor] = None, past_key_values: Optional[Union[Cache, List[torch.FloatTensor]]] = None, + comp_ctx_lengths: Optional[torch.LongTensor] = None, batch_index: Optional[torch.LongTensor] = None, inputs_embeds: Optional[torch.FloatTensor] = None, use_cache: Optional[bool] = None, @@ -289,6 +298,7 @@ def forward( attention_mask=attention_mask, position_ids=position_ids, past_key_values=past_key_values, + comp_ctx_lengths=comp_ctx_lengths, batch_index=batch_index, inputs_embeds=inputs_embeds, use_cache=use_cache, diff --git a/QEfficient/transformers/models/whisper/modeling_whisper.py b/QEfficient/transformers/models/whisper/modeling_whisper.py index e078493a7..a03ffecf7 100644 --- a/QEfficient/transformers/models/whisper/modeling_whisper.py +++ b/QEfficient/transformers/models/whisper/modeling_whisper.py @@ -55,6 +55,7 @@ def forward( position_ids_layer: torch.Tensor = None, key_value_states: Optional[torch.Tensor] = None, past_key_value: Optional[Cache] = None, + comp_ctx_lengths: Optional[torch.LongTensor] = None, attention_mask: Optional[torch.Tensor] = None, layer_head_mask: Optional[torch.Tensor] = None, output_attentions: bool = False, @@ -100,6 +101,9 @@ def forward( value_states = value_states.transpose(1, 2).contiguous() if past_key_value is not None: cache_kwargs = {"position_ids": position_ids_layer} + if comp_ctx_lengths is not None: + attention_mask = attention_mask[:, :, :, : comp_ctx_lengths.shape[-1]] + cache_kwargs["CCL"] = attention_mask.shape[-1] key_states, value_states = past_key_value.update( key_states, value_states, self.layer_idx, cache_kwargs ) @@ -181,6 +185,7 @@ def forward( layer_head_mask: Optional[torch.Tensor] = None, cross_attn_layer_head_mask: Optional[torch.Tensor] = None, past_key_value: Optional[Cache] = None, + comp_ctx_lengths: Optional[torch.LongTensor] = None, output_attentions: Optional[bool] = False, use_cache: Optional[bool] = True, cache_position: Optional[torch.LongTensor] = None, @@ -215,6 +220,7 @@ def forward( hidden_states, self_attn_weights = self.self_attn( hidden_states=hidden_states, past_key_value=self_attn_past_key_value, + comp_ctx_lengths=comp_ctx_lengths, attention_mask=attention_mask, layer_head_mask=layer_head_mask, output_attentions=output_attentions, @@ -388,6 +394,7 @@ def forward( cross_attn_head_mask=None, position_ids=None, past_key_values=None, + comp_ctx_lengths: Optional[torch.LongTensor] = None, inputs_embeds=None, use_cache=None, output_attentions=None, @@ -532,6 +539,7 @@ def forward( layer_head_mask=(head_mask[idx] if head_mask is not None else None), cross_attn_layer_head_mask=(cross_attn_head_mask[idx] if cross_attn_head_mask is not None else None), past_key_value=past_key_values, + comp_ctx_lengths=comp_ctx_lengths, output_attentions=output_attentions, use_cache=use_cache, position_ids_layer=position_ids, @@ -643,6 +651,7 @@ def forward( cross_attn_head_mask=None, encoder_outputs=None, past_key_values=None, + comp_ctx_lengths: Optional[torch.LongTensor] = None, decoder_inputs_embeds=None, use_cache=None, output_attentions=None, @@ -674,6 +683,7 @@ def forward( head_mask=decoder_head_mask, cross_attn_head_mask=cross_attn_head_mask, past_key_values=past_key_values, + comp_ctx_lengths=comp_ctx_lengths, inputs_embeds=decoder_inputs_embeds, use_cache=use_cache, output_attentions=output_attentions, @@ -719,6 +729,7 @@ def forward( cross_attn_head_mask: Optional[torch.Tensor] = None, encoder_outputs: Optional[Tuple[Tuple[torch.FloatTensor]]] = None, past_key_values: Optional[Union[EncoderDecoderCache, Tuple[torch.FloatTensor]]] = None, + comp_ctx_lengths: Optional[torch.LongTensor] = None, decoder_inputs_embeds: Optional[Tuple[torch.FloatTensor]] = None, position_ids: Optional[Tuple[torch.LongTensor]] = None, labels: Optional[torch.LongTensor] = None, @@ -740,6 +751,7 @@ def forward( decoder_head_mask=decoder_head_mask, cross_attn_head_mask=cross_attn_head_mask, past_key_values=past_key_values, + comp_ctx_lengths=comp_ctx_lengths, decoder_inputs_embeds=decoder_inputs_embeds, decoder_position_ids=position_ids, use_cache=use_cache, diff --git a/QEfficient/utils/check_ccl_specializations.py b/QEfficient/utils/check_ccl_specializations.py new file mode 100644 index 000000000..308c69554 --- /dev/null +++ b/QEfficient/utils/check_ccl_specializations.py @@ -0,0 +1,51 @@ +# ----------------------------------------------------------------------------- +# +# Copyright (c) Qualcomm Technologies, Inc. and/or its subsidiaries. +# SPDX-License-Identifier: BSD-3-Clause +# +# ----------------------------------------------------------------------------- + + +def process_ccl_specializations(qaic_config): + if qaic_config is None: + return None, None + ccl_prefill = qaic_config.pop("comp_ctx_lengths_prefill", None) + ccl_decode = qaic_config.pop("comp_ctx_lengths_decode", None) + ctx_len = qaic_config.pop("ctx_len", None) + prefill_seq_len = qaic_config.pop("prefill_seq_len", 128) + + if ccl_prefill is None or ccl_decode is None: + return None, None + + if ctx_len is None: + raise TypeError("`ctx_len` is required when loading the model with CCL.") + + if prefill_seq_len == 1: + # both prefill and decode ccl can share the same specializations since prefill_seq_len=1. So, a sorted union of both lists can be used for both of them. + ccl_union_all = sorted(set(ccl_prefill + ccl_decode)) + ccl_union_all = [min(x, ctx_len) for x in ccl_union_all] + return ccl_union_all, ccl_union_all + + # Step 1: Cap values to ctx_len + ccl_prefill = [min(x, ctx_len) for x in ccl_prefill] + ccl_decode = [min(x, ctx_len) for x in ccl_decode] + + # Step 2: Remove duplicates within each list + ccl_prefill = list(set(ccl_prefill)) + ccl_decode = list(set(ccl_decode)) + + # Step 3: Ensure no overlap between ccl_prefill and ccl_decode + updated_prefill = [] + for val in ccl_prefill: + while val in ccl_decode or val in updated_prefill: + val -= 1 + if val < 0: + break # Prevent negative values + if val >= 0: + updated_prefill.append(val) + + # Step 4: Sort both lists + updated_prefill.sort() + ccl_decode.sort() + + return updated_prefill, ccl_decode diff --git a/examples/ccl_gpt_oss.py b/examples/ccl_gpt_oss.py new file mode 100644 index 000000000..b211ba914 --- /dev/null +++ b/examples/ccl_gpt_oss.py @@ -0,0 +1,51 @@ +# ----------------------------------------------------------------------------- +# +# Copyright (c) Qualcomm Technologies, Inc. and/or its subsidiaries. +# SPDX-License-Identifier: BSD-3-Clause +# +# ----------------------------------------------------------------------------- + +from transformers import AutoTokenizer, TextStreamer + +from QEfficient import QEFFAutoModelForCausalLM + +model_id = "openai/gpt-oss-20b" # weights are not required to convert to fp32 + +ctx_len = 4096 +# In moe models like gpt-oss, since prefill_seq_len=1 both comp_ctx_lengths_prefill and comp_ctx_lengths_decode can share similar lists. +# Set the list of ccl during prefilling process +comp_ctx_lengths_prefill = [512, ctx_len] +# Set the list of ccl during decoding process +comp_ctx_lengths_decode = [512, ctx_len] + + +qeff_model = QEFFAutoModelForCausalLM.from_pretrained( + model_id, + qaic_config={ + "comp_ctx_lengths_prefill": comp_ctx_lengths_prefill, + "comp_ctx_lengths_decode": comp_ctx_lengths_decode, + "ctx_len": ctx_len, + "prefill_seq_len": 1, # Passing prefill_seq_len is mandatory for CCL goal in moe models. Currently we can get best perf using PL=1. + }, +) +tokenizer = AutoTokenizer.from_pretrained(model_id) + +onnx_model_path = qeff_model.export() +qpc_path = qeff_model.compile( + prefill_seq_len=1, # Currently we can get best perf using PL=1 i.e. decode-only model, prefill optimizations are being worked on. + ctx_len=ctx_len, + num_cores=16, + mxfp6_matmul=True, + mxint8_kv_cache=True, + num_devices=4, + mos=1, + aic_enable_depth_first=True, + num_speculative_tokens=None, +) +print(f"qpc path is {qpc_path}") +streamer = TextStreamer(tokenizer) +exec_info = qeff_model.generate( + tokenizer, + prompts="Who is your creator? and What all you are allowed to do?", + generation_len=256, +) diff --git a/examples/ccl_image_text_to_text_inference.py b/examples/ccl_image_text_to_text_inference.py new file mode 100644 index 000000000..be472f433 --- /dev/null +++ b/examples/ccl_image_text_to_text_inference.py @@ -0,0 +1,137 @@ +# ----------------------------------------------------------------------------- +# +# Copyright (c) Qualcomm Technologies, Inc. and/or its subsidiaries. +# SPDX-License-Identifier: BSD-3-Clause +# +# ----------------------------------------------------------------------------- + +import requests +from PIL import Image +from transformers import AutoProcessor, TextStreamer + +from QEfficient import QEFFAutoModelForImageTextToText + +# Add HuggingFace Token to access the model +HF_TOKEN = "" + + +def run_model( + model_name, + token, + query, + image_url, + kv_offload=False, + prefill_seq_len=32, + ctx_len=512, + comp_ctx_lengths_prefill=None, + comp_ctx_lengths_decode=None, + generation_len=128, + img_size=560, + num_cores=16, + num_devices=1, +): + ## STEP - 1 Load the Processor and Model + + processor = AutoProcessor.from_pretrained(model_name, token=token) + + # `kv_offload` is used to compile the model in a Single QPC or 2 QPCs. + # The Dual QPC approach splits the model to perform Image Encoding and Output generation in 2 different QPCs. + # The outputs of the Vision Encoder are then passed to the Language model via host in this case. + + model = QEFFAutoModelForImageTextToText.from_pretrained( + model_name, + token=token, + attn_implementation="eager", + kv_offload=kv_offload, + qaic_config={ + "comp_ctx_lengths_prefill": comp_ctx_lengths_prefill, + "comp_ctx_lengths_decode": comp_ctx_lengths_decode, + "ctx_len": ctx_len, + }, + ) + + ## STEP - 2 Export & Compile the Model + + model.compile( + prefill_seq_len=prefill_seq_len, + ctx_len=ctx_len, + img_size=img_size, + num_cores=num_cores, + num_devices=num_devices, + mxfp6_matmul=False, + ) + + ## STEP - 3 Load and process the inputs for Inference + + image = Image.open(requests.get(image_url, stream=True).raw) + messages = [ + { + "role": "user", + "content": [ + {"type": "image"}, + {"type": "text", "text": query}, + ], + } + ] + input_text = [processor.apply_chat_template(messages, add_generation_prompt=True)] + + inputs = processor( + text=input_text, + images=image, + return_tensors="pt", + add_special_tokens=False, + padding="max_length", + max_length=prefill_seq_len, + ) + + ## STEP - 4 Run Inference on the compiled model + + streamer = TextStreamer(processor.tokenizer) + output_statistics = model.generate(inputs=inputs, streamer=streamer, generation_len=generation_len) + print(output_statistics) + + +if __name__ == "__main__": + # Model name and Input parameters + # model_name = "llava-hf/llava-1.5-7b-hf" + model_name = "meta-llama/Llama-3.2-11B-Vision-Instruct" + query = "Describe this image." + image_url = "https://huggingface.co/datasets/huggingface/documentation-images/resolve/0052a70beed5bf71b92610a43a52df6d286cd5f3/diffusers/rabbit.jpg" + + # Compilation parameters for the model + kv_offload = True + prefill_seq_len = 32 + ctx_len = 8192 + generation_len = 128 + # img_size = 336 + img_size = 560 + num_cores = 16 + num_devices = 4 + comp_ctx_lengths_prefill = [4096] + comp_ctx_lengths_decode = [6144, ctx_len] + + run_model( + model_name=model_name, + token=HF_TOKEN, + query=query, + kv_offload=kv_offload, + image_url=image_url, + prefill_seq_len=prefill_seq_len, + ctx_len=ctx_len, + comp_ctx_lengths_prefill=comp_ctx_lengths_prefill, + comp_ctx_lengths_decode=comp_ctx_lengths_decode, + generation_len=generation_len, + img_size=img_size, + num_cores=num_cores, + num_devices=num_devices, + ) + + +""" +Expected Response: + +This image depicts a charming anthropomorphic rabbit standing on a dirt path in front of a picturesque stone cottage, surrounded by a serene landscape. + +The rabbit, with its light brown fur and distinctive long ears, is attired in a stylish blue coat, brown vest, and tan pants, exuding a sense of sophistication. The dirt path, flanked by vibrant flowers and lush greenery, leads to the cottage, which features a thatched roof and a chimney, adding to the rustic charm of the scene. In the background, rolling hills and trees create a breathtaking panorama, while the sky above is a brilliant blue with white clouds, completing the + +""" diff --git a/examples/ccl_llama4_CB_example_vision_lang.py b/examples/ccl_llama4_CB_example_vision_lang.py new file mode 100644 index 000000000..6423ee765 --- /dev/null +++ b/examples/ccl_llama4_CB_example_vision_lang.py @@ -0,0 +1,109 @@ +# ----------------------------------------------------------------------------- +# +# Copyright (c) Qualcomm Technologies, Inc. and/or its subsidiaries. +# SPDX-License-Identifier: BSD-3-Clause +# +# ---------------------------------------------------------------------------- + +import transformers +from transformers import AutoConfig, AutoProcessor + +from QEfficient import QEFFAutoModelForImageTextToText + +model_id = "meta-llama/Llama-4-Scout-17B-16E-Instruct" +config = AutoConfig.from_pretrained(model_id) +# For Testing Purpose Only +config.text_config.num_hidden_layers = 4 +config.vision_config.num_hidden_layers = 2 + +tokenizer = transformers.AutoTokenizer.from_pretrained(model_id) +processor = AutoProcessor.from_pretrained(model_id) + +ctx_len = 4096 +# Set the list of ccl during prefilling process +comp_ctx_lengths_prefill = [3072] +# Set the list of ccl during decoding process +comp_ctx_lengths_decode = [ctx_len] + +continious_batching = True +if continious_batching: + qeff_model = QEFFAutoModelForImageTextToText.from_pretrained( + model_id, + attn_implementation="eager", + kv_offload=True, + config=config, + continuous_batching=True, + qaic_config={ + "comp_ctx_lengths_prefill": comp_ctx_lengths_prefill, + "comp_ctx_lengths_decode": comp_ctx_lengths_decode, + "ctx_len": ctx_len, + }, + ) + + qeff_model.compile( + prefill_seq_len=128, + ctx_len=ctx_len, + img_size=336, + num_cores=16, + num_devices=4, + max_num_tiles=17, + batch_size=1, + full_batch_size=4, + mxfp6_matmul=True, + mxint8_kv_cache=True, + aic_enable_depth_first=True, + mos=1, + ) +else: + qeff_model = QEFFAutoModelForImageTextToText.from_pretrained( + model_id, + attn_implementation="eager", + kv_offload=True, + config=config, + qaic_config={ + "comp_ctx_lengths_prefill": comp_ctx_lengths_prefill, + "comp_ctx_lengths_decode": comp_ctx_lengths_decode, + "ctx_len": ctx_len, + }, + ) + + qeff_model.compile( + prefill_seq_len=128, + ctx_len=ctx_len, + img_size=336, + num_cores=16, + num_devices=4, + max_num_tiles=17, + batch_size=1, + mxfp6_matmul=True, + mxint8_kv_cache=True, + aic_enable_depth_first=True, + mos=1, + ) + +image_urls = [ + "https://huggingface.co/datasets/huggingface/documentation-images/resolve/main/datasets/cat_style_layout.png", + "https://huggingface.co/datasets/huggingface/documentation-images/resolve/0052a70beed5bf71b92610a43a52df6d286cd5f3/diffusers/rabbit.jpg", + "https://huggingface.co/datasets/huggingface/documentation-images/resolve/main/datasets/cat_style_layout.png", + "https://huggingface.co/datasets/huggingface/documentation-images/resolve/0052a70beed5bf71b92610a43a52df6d286cd5f3/diffusers/rabbit.jpg", +] + +prompts = [ + "Can you describe the image in detail?", + "What are the objects in the image?", + "What is the main subject of the image?", + "What colors are predominant in the image?", +] + +exec_info = qeff_model.generate( + tokenizer=tokenizer, + prompts=prompts, + processor=processor, + images=image_urls, + device_ids=[32, 33, 34, 35], + generation_len=100, +) + +# print("Generated texts:", exec_info.generated_texts) +print("Generated IDs:", exec_info.generated_ids) +print(exec_info) diff --git a/examples/ccl_llama4_example.py b/examples/ccl_llama4_example.py new file mode 100644 index 000000000..5da29960f --- /dev/null +++ b/examples/ccl_llama4_example.py @@ -0,0 +1,128 @@ +# ----------------------------------------------------------------------------- +# +# Copyright (c) Qualcomm Technologies, Inc. and/or its subsidiaries. +# SPDX-License-Identifier: BSD-3-Clause +# +# ----------------------------------------------------------------------------- + +import torch +import transformers +from transformers import AutoConfig, AutoProcessor, TextStreamer + +from QEfficient import QEFFAutoModelForImageTextToText + +model_id = "meta-llama/Llama-4-Scout-17B-16E-Instruct" +config = AutoConfig.from_pretrained(model_id) +# For Testing Purpose Only +config.text_config.num_hidden_layers = 4 +config.vision_config.num_hidden_layers = 2 + +ctx_len = 8192 +# Set the list of ccl during prefilling process +comp_ctx_lengths_prefill = [3072] +# Set the list of ccl during decoding process +comp_ctx_lengths_decode = [4096, ctx_len] + +qeff_model = QEFFAutoModelForImageTextToText.from_pretrained( + model_id, + attn_implementation="eager", + kv_offload=True, + qaic_config={ + "comp_ctx_lengths_prefill": comp_ctx_lengths_prefill, + "comp_ctx_lengths_decode": comp_ctx_lengths_decode, + "ctx_len": ctx_len, + }, + config=config, +) +tokenizer = transformers.AutoTokenizer.from_pretrained(model_id) +processor = AutoProcessor.from_pretrained(model_id) + +### use skip_vision=Ture, if want to run only text, ow false ### +skip_vision = False + +if skip_vision: + ## Only Text ## + qeff_model.compile( + prefill_seq_len=128, + ctx_len=ctx_len, + img_size=336, + num_cores=16, + num_devices=4, + max_num_tiles=17, + mxfp6_matmul=True, + mxint8_kv_cache=True, + aic_enable_depth_first=True, + skip_vision=True, + mos=1, + ) + + messages = [ + { + "role": "user", + "content": [ + { + "type": "text", + "text": "Can you describe the image in detail.", + }, + ], + }, + ] + + inputs = processor.apply_chat_template( + messages, + add_generation_prompt=True, + tokenize=True, + return_dict=True, + return_tensors="pt", + ) + + streamer = TextStreamer(tokenizer) + output = qeff_model.generate(inputs=inputs, device_ids=[0, 1, 2, 3], generation_len=100) + print(output.generated_ids) + print(tokenizer.batch_decode(output.generated_ids)) + print(output) + +else: + ## Vision + Text ## + qeff_model.compile( + prefill_seq_len=128, + ctx_len=ctx_len, + img_size=336, + num_cores=16, + num_devices=4, + max_num_tiles=17, + mxfp6_matmul=True, + mxint8_kv_cache=True, + aic_enable_depth_first=True, + mos=1, + ) + + ### IMAGE + TEXT ### + image_url = ( + "https://huggingface.co/datasets/huggingface/documentation-images/resolve/main/datasets/cat_style_layout.png" + ) + + messages = [ + { + "role": "user", + "content": [ + {"type": "image", "url": image_url}, + {"type": "text", "text": "Can you describe the image in detail."}, + ], + }, + ] + + inputs = processor.apply_chat_template( + messages, + add_generation_prompt=True, + tokenize=True, + return_dict=True, + return_tensors="pt", + ) + inputs["pixel_values"] = inputs["pixel_values"].to(torch.float32) + streamer = TextStreamer(tokenizer) + output = qeff_model.generate(inputs=inputs, device_ids=[0, 1, 2, 3], generation_len=100) + print(output.generated_ids) + print(tokenizer.batch_decode(output.generated_ids)) + print(output) + print() diff --git a/examples/ccl_llama4_multi_image_example.py b/examples/ccl_llama4_multi_image_example.py new file mode 100644 index 000000000..33bf07df0 --- /dev/null +++ b/examples/ccl_llama4_multi_image_example.py @@ -0,0 +1,89 @@ +# ----------------------------------------------------------------------------- +# +# Copyright (c) Qualcomm Technologies, Inc. and/or its subsidiaries. +# SPDX-License-Identifier: BSD-3-Clause +# +# ----------------------------------------------------------------------------- + +import torch +import transformers +from transformers import AutoConfig, AutoProcessor, TextStreamer + +from QEfficient import QEFFAutoModelForImageTextToText + +model_id = "meta-llama/Llama-4-Scout-17B-16E-Instruct" +config = AutoConfig.from_pretrained(model_id) +# For Testing Purpose Only +config.text_config.num_hidden_layers = 4 +config.vision_config.num_hidden_layers = 2 + +ctx_len = 8192 +# Set the list of ccl during prefilling process +comp_ctx_lengths_prefill = [5376] +# Set the list of ccl during decoding process +comp_ctx_lengths_decode = [6144, ctx_len] + +qeff_model = QEFFAutoModelForImageTextToText.from_pretrained( + model_id, + attn_implementation="eager", + kv_offload=True, + config=config, + qaic_config={ + "comp_ctx_lengths_prefill": comp_ctx_lengths_prefill, + "comp_ctx_lengths_decode": comp_ctx_lengths_decode, + "ctx_len": ctx_len, + }, +) +tokenizer = transformers.AutoTokenizer.from_pretrained(model_id) +processor = AutoProcessor.from_pretrained(model_id) + +### For multi-image, the value of max_num_tiles should be the sum of the num_tiles values across all the images ### +qeff_model.compile( + prefill_seq_len=128, + ctx_len=ctx_len, + img_size=336, + num_cores=16, + num_devices=4, + max_num_tiles=34, + mxfp6_matmul=True, + mxint8_kv_cache=True, + aic_enable_depth_first=True, + mos=1, +) + +### Multi_image Prompt ### +image_url_1 = ( + "https://huggingface.co/datasets/huggingface/documentation-images/resolve/main/datasets/cat_style_layout.png" +) + + +image_url_2 = "https://huggingface.co/datasets/huggingface/documentation-images/resolve/0052a70beed5bf71b92610a43a52df6d286cd5f3/diffusers/rabbit.jpg" + +messages = [ + { + "role": "user", + "content": [ + {"type": "image", "url": image_url_1}, + {"type": "image", "url": image_url_2}, + { + "type": "text", + "text": "Analyze the key elements, colors, and objects in the two images. Discuss their similarities, differences, and how they complement or contrast each other. Reflect on the emotions or ideas they convey, considering the context, light, shadow, and composition.", + }, + ], + }, +] + +inputs = processor.apply_chat_template( + messages, + add_generation_prompt=True, + tokenize=True, + return_dict=True, + return_tensors="pt", +) + +inputs["pixel_values"] = inputs["pixel_values"].to(torch.float32) +streamer = TextStreamer(tokenizer) +output = qeff_model.generate(inputs=inputs, device_ids=[32, 33, 34, 35], generation_len=100) +print(output.generated_ids) +print(tokenizer.batch_decode(output.generated_ids)) +print(output) diff --git a/examples/ccl_mistral3_example.py b/examples/ccl_mistral3_example.py new file mode 100644 index 000000000..96ed519f5 --- /dev/null +++ b/examples/ccl_mistral3_example.py @@ -0,0 +1,123 @@ +# ----------------------------------------------------------------------------- +# +# Copyright (c) Qualcomm Technologies, Inc. and/or its subsidiaries. +# SPDX-License-Identifier: BSD-3-Clause +# +# ----------------------------------------------------------------------------- + +import requests +from PIL import Image +from transformers import AutoConfig, AutoProcessor, TextStreamer + +from QEfficient import QEFFAutoModelForImageTextToText + + +def run_model( + model_name, + query, + image_url, + kv_offload=False, + prefill_seq_len=128, + ctx_len=4096, + comp_ctx_lengths_prefill=None, + comp_ctx_lengths_decode=None, + generation_len=128, + img_size=1540, + num_cores=16, + num_devices=4, +): + ## STEP - 1 Load the Processor and Model + + processor = AutoProcessor.from_pretrained(model_name) + + # `kv_offload` is used to compile the model in a 2 QPCs.Currently we are not supporting 1 qpc so the flag false is not allowed. + # The `kv_offload` flag should always be set to True. + # The Dual QPC approach splits the model to perform Image Encoding and Output generation in 2 different QPCs. + # The outputs of the Vision Encoder are then passed to the Language model via host in this case. + + config = AutoConfig.from_pretrained(model_name) + config.vision_config._attn_implementation = "eager" + + model = QEFFAutoModelForImageTextToText.from_pretrained( + model_name, + kv_offload=kv_offload, + config=config, + qaic_config={ + "comp_ctx_lengths_prefill": comp_ctx_lengths_prefill, + "comp_ctx_lengths_decode": comp_ctx_lengths_decode, + "ctx_len": ctx_len, + }, + ) + + ## STEP - 2 Export & Compile the Model + + model.compile( + prefill_seq_len=prefill_seq_len, + ctx_len=ctx_len, + img_size=img_size, + num_cores=num_cores, + num_devices=num_devices, + mxfp6_matmul=False, + ) + + ## STEP - 3 Load and process the inputs for Inference + + # We are resizing the image to (w x h) (1540 x 1540) so that any image can work on the model irrespective of image dimensssions + # we have a fixed size of height 1540 and width 1540 as defined in the config + + image = Image.open(requests.get(image_url, stream=True).raw) + image = image.resize((1540, 1540)) + + messages = [{"role": "user", "content": [{"type": "image"}, {"type": "text", "text": query}]}] + input_text = processor.apply_chat_template(messages, add_generation_prompt=True) + inputs = processor(image, input_text, add_special_tokens=False, return_tensors="pt") + + ## STEP - 4 Run Inference on the compiled model + + streamer = TextStreamer(processor.tokenizer) + output = model.generate(inputs=inputs, streamer=streamer, generation_len=generation_len) + print(output) + + +if __name__ == "__main__": + # Model name and Input parameters + model_name = "mistralai/Mistral-Small-3.1-24B-Instruct-2503" + + # Please add prompt here + query = "Describe the image" + + # Please pass image url or image path .The format of the image should be jpg. + image_url = "https://www.ilankelman.org/stopsigns/australia.jpg" + + # Compilation parameters for the model + kv_offload = True + prefill_seq_len = 128 + ctx_len = 8192 + generation_len = 128 + num_cores = 16 + num_devices = 4 + comp_ctx_lengths_prefill = [4096] + comp_ctx_lengths_decode = [6144, ctx_len] + + run_model( + model_name=model_name, + query=query, + kv_offload=kv_offload, + image_url=image_url, + prefill_seq_len=prefill_seq_len, + ctx_len=ctx_len, + comp_ctx_lengths_prefill=comp_ctx_lengths_prefill, + comp_ctx_lengths_decode=comp_ctx_lengths_decode, + generation_len=generation_len, + num_cores=num_cores, + num_devices=num_devices, + ) + + +""" +Expected Response: +The image depicts a street scene in what appears to be a Chinatown district. The focal point is a traditional Chinese archway, known as a paifang, which is intricately designed with red columns and ornate details. The archway features Chinese characters at the top, which translate to "Chinatown Gate." +In the foreground, there is a red stop sign mounted on a pole. The street is relatively quiet, with a single dark-colored SUV driving through the archway. On either side of the archway, there are stone lion statues, which are common decorative elements in Chinese architecture and symbolize protection. + + +""" diff --git a/examples/ccl_molmo_example.py b/examples/ccl_molmo_example.py new file mode 100644 index 000000000..dd09fa020 --- /dev/null +++ b/examples/ccl_molmo_example.py @@ -0,0 +1,100 @@ +# ----------------------------------------------------------------------------- +# +# Copyright (c) Qualcomm Technologies, Inc. and/or its subsidiaries. +# SPDX-License-Identifier: BSD-3-Clause +# +# ----------------------------------------------------------------------------- + +import requests +import torch +import transformers +from PIL import Image +from transformers import AutoConfig, AutoProcessor, TextStreamer + +from QEfficient import QEFFAutoModelForCausalLM + +model_id = "allenai/Molmo-7B-D-0924" +config = AutoConfig.from_pretrained(model_id, trust_remote_code=True) + +# config.num_hidden_layers = 2 + +# load the model +ctx_len = 8192 +comp_ctx_lengths_prefill = [3072] +comp_ctx_lengths_decode = [4096, 8192] + +qeff_model = QEFFAutoModelForCausalLM.from_pretrained( + model_id, + kv_offload=True, + trust_remote_code=True, + config=config, + qaic_config={ + "comp_ctx_lengths_prefill": comp_ctx_lengths_prefill, + "comp_ctx_lengths_decode": comp_ctx_lengths_decode, + "ctx_len": ctx_len, + }, +) +tokenizer = transformers.AutoTokenizer.from_pretrained(model_id, trust_remote_code=True) +processor = AutoProcessor.from_pretrained(model_id, trust_remote_code=True) + +### use skip_vision=Ture, if want to run only text, ow false ### +skip_vision = False + +if skip_vision: + ## Only Text ## + qeff_model.compile( + prefill_seq_len=128, + ctx_len=ctx_len, + num_cores=16, + num_devices=4, + mxint8_kv_cache=True, + aic_enable_depth_first=True, + skip_vision=True, + mos=1, + ) + + inputs = processor.process(text="Tell me about yourself") + inputs = {k: v.unsqueeze(0) for k, v in inputs.items()} + inputs["input_ids"] = inputs["input_ids"].to(torch.int64) + inputs["attention_mask"] = torch.ones((inputs["input_ids"].shape), dtype=torch.int64) + + streamer = TextStreamer(tokenizer) + output = qeff_model.generate(inputs=inputs, device_ids=[0, 1, 2, 3], generation_len=100) + print(output.generated_ids) + print(tokenizer.batch_decode(output.generated_ids)) + print(output) + +else: + ## Vision + Text ## + qeff_model.compile( + prefill_seq_len=128, + ctx_len=ctx_len, + num_cores=16, + num_devices=4, + mxint8_kv_cache=True, + aic_enable_depth_first=True, + mos=1, + ) + + ### IMAGE + TEXT ### + image_url = "https://picsum.photos/id/237/536/354" + + image = Image.open(requests.get(image_url, stream=True).raw) + image = image.resize((536, 354)) + + inputs = processor.process(images=[image], text="Can you describe the image in detail.") + + inputs = {k: v.unsqueeze(0) for k, v in inputs.items()} + inputs["pixel_values"] = inputs.pop("images") + inputs["attention_mask"] = torch.ones((inputs["input_ids"].shape), dtype=torch.int64) + + valid = inputs["image_input_idx"] > 0 + valid = valid.reshape(1, -1) + inputs["valid_idx"] = torch.nonzero(valid)[:, 1].unsqueeze(0) + + streamer = TextStreamer(tokenizer) + output = qeff_model.generate(inputs=inputs, device_ids=[0, 1, 2, 3], generation_len=100) + print(output.generated_ids) + print(tokenizer.batch_decode(output.generated_ids)) + print(output) + print() diff --git a/examples/ccl_qwen2_5_vl_CB.py b/examples/ccl_qwen2_5_vl_CB.py new file mode 100644 index 000000000..6954d356f --- /dev/null +++ b/examples/ccl_qwen2_5_vl_CB.py @@ -0,0 +1,81 @@ +# ----------------------------------------------------------------------------- +# +# Copyright (c) Qualcomm Technologies, Inc. and/or its subsidiaries. +# SPDX-License-Identifier: BSD-3-Clause +# +# ----------------------------------------------------------------------------- + +# If we want to enable QBlocking Run below command:, default is without blocking +# ATTENTION_BLOCKING_MODE=q num_q_blocks=2 python -W ignore qwen2_5_vl_example.py + +import transformers +from transformers import AutoConfig, AutoProcessor, TextStreamer + +from QEfficient import QEFFAutoModelForImageTextToText + +## For AWQ model update pytorch version to 2.8.* +model_id = "Qwen/Qwen2.5-VL-32B-Instruct" +config = AutoConfig.from_pretrained(model_id) +# config.text_config.num_hidden_layers = 2 + +ctx_len = 8192 +comp_ctx_lengths_prefill = [4096] +comp_ctx_lengths_decode = [6144, ctx_len] + +qeff_model = QEFFAutoModelForImageTextToText.from_pretrained( + model_id, + attn_implementation="eager", + kv_offload=True, + config=config, + continuous_batching=True, + qaic_config={ + "comp_ctx_lengths_prefill": comp_ctx_lengths_prefill, + "comp_ctx_lengths_decode": comp_ctx_lengths_decode, + "ctx_len": ctx_len, + }, +) +tokenizer = transformers.AutoTokenizer.from_pretrained(model_id) +processor = AutoProcessor.from_pretrained(model_id) + +batch_size = 1 +## Vision + Text ## +qeff_model.compile( + batch_size=batch_size, + full_batch_size=4, + prefill_seq_len=128, + ctx_len=8192, + num_cores=16, + num_devices=4, + height=354, + width=536, + mxfp6_matmul=True, + mxint8_kv_cache=True, + aic_enable_depth_first=True, + mos=1, +) + +image_urls = [ + "https://picsum.photos/id/237/536/354", + "https://picsum.photos/id/237/536/354", + "https://picsum.photos/id/237/536/354", + "https://picsum.photos/id/237/536/354", +] + +prompts = [ + "Can you describe the image in detail?", + "What are the objects in the image?", + "What is the main subject of the image?", + "What colors are predominant in the image?", +] + +streamer = TextStreamer(tokenizer) +output = qeff_model.generate( + tokenizer=tokenizer, + prompts=prompts, + processor=processor, + images=image_urls, + generation_len=100, +) +print(output.generated_ids) +print(tokenizer.batch_decode(output.generated_ids)) +print(output) diff --git a/examples/ccl_qwen2_5_vl_example.py b/examples/ccl_qwen2_5_vl_example.py new file mode 100644 index 000000000..273a18361 --- /dev/null +++ b/examples/ccl_qwen2_5_vl_example.py @@ -0,0 +1,152 @@ +# ----------------------------------------------------------------------------- +# +# Copyright (c) Qualcomm Technologies, Inc. and/or its subsidiaries. +# SPDX-License-Identifier: BSD-3-Clause +# +# ----------------------------------------------------------------------------- + +# If we want to enable QBlocking Run below command:, default is without blocking +# ATTENTION_BLOCKING_MODE=q num_q_blocks=2 python -W ignore qwen2_5_vl_example.py + +import requests +import transformers +from PIL import Image +from qwen_vl_utils import process_vision_info +from transformers import AutoConfig, AutoProcessor, TextStreamer + +from QEfficient import QEFFAutoModelForImageTextToText + +## For AWQ model update pytorch version to 2.8.* +model_id = "Qwen/Qwen2.5-VL-32B-Instruct" +config = AutoConfig.from_pretrained(model_id) +# config.text_config.num_hidden_layers = 2 + +ctx_len = 8192 +comp_ctx_lengths_prefill = [4096] +comp_ctx_lengths_decode = [6144, ctx_len] + +qeff_model = QEFFAutoModelForImageTextToText.from_pretrained( + model_id, + attn_implementation="eager", + kv_offload=True, + config=config, + qaic_config={ + "comp_ctx_lengths_prefill": comp_ctx_lengths_prefill, + "comp_ctx_lengths_decode": comp_ctx_lengths_decode, + "ctx_len": ctx_len, + }, +) +tokenizer = transformers.AutoTokenizer.from_pretrained(model_id) +processor = AutoProcessor.from_pretrained(model_id) + +### use skip_vision=Ture, if want to run only text, ow false ### +skip_vision = False + +if skip_vision: + ## Only Text ## + + ## Set Batch_Size ## + batch_size = 1 + qeff_model.compile( + batch_size=batch_size, + prefill_seq_len=128, + ctx_len=ctx_len, + num_cores=16, + num_devices=4, + height=354, + width=536, + mxfp6_matmul=False, + aic_enable_depth_first=True, + skip_vision=True, + mos=1, + ) + + messages = [ + { + "role": "user", + "content": [ + {"type": "text", "text": "Tell me about yourself."}, + ], + }, + ] + + messages = [messages] * batch_size + + inputs = processor.apply_chat_template( + messages, + add_generation_prompt=True, + tokenize=True, + return_dict=True, + return_tensors="pt", + ) + + inputs = qeff_model.model.prepare_inputs_for_generation(inputs=inputs, prefill_seq_len=128, batch_size=batch_size) + + streamer = TextStreamer(tokenizer) + output = qeff_model.generate(inputs=inputs, generation_len=100, device_ids=[0, 1, 2, 3]) + print(output.generated_ids) + print(tokenizer.batch_decode(output.generated_ids)) + print(output) + +else: + batch_size = 1 + ## Vision + Text ## + qeff_model.compile( + batch_size=batch_size, + prefill_seq_len=128, + ctx_len=ctx_len, + num_cores=16, + num_devices=4, + height=354, + width=536, + mxfp6_matmul=True, + mxint8_kv_cache=True, + aic_enable_depth_first=True, + mos=1, + ) + + ### IMAGE + TEXT ### + image_url = "https://picsum.photos/id/237/536/354" + + image = Image.open(requests.get(image_url, stream=True).raw) + + messages_1 = [ + { + "role": "user", + "content": [ + {"type": "image", "image": image}, + {"type": "text", "text": "Describe this image."}, + ], + }, + ] + + messages_2 = [ + { + "role": "user", + "content": [ + {"type": "image", "image": image}, + {"type": "text", "text": "Describe about the color of the dog."}, + ], + }, + ] + + messages = [messages_2] * batch_size + + texts = [processor.apply_chat_template(msg, tokenize=False, add_generation_prompt=True) for msg in messages] + + image_inputs, video_inputs = process_vision_info(messages) + inputs = processor( + text=texts, + images=image_inputs, + videos=video_inputs, + padding=True, + return_tensors="pt", + ) + + inputs = qeff_model.model.prepare_inputs_for_generation(inputs=inputs, prefill_seq_len=128, batch_size=batch_size) + + streamer = TextStreamer(tokenizer) + output = qeff_model.generate(inputs=inputs, generation_len=100, device_ids=[0, 1, 2, 3]) + print(output.generated_ids) + print(tokenizer.batch_decode(output.generated_ids)) + print(output) diff --git a/examples/compute_context_length.py b/examples/compute_context_length.py new file mode 100644 index 000000000..163261e04 --- /dev/null +++ b/examples/compute_context_length.py @@ -0,0 +1,70 @@ +# ----------------------------------------------------------------------------- +# +# Copyright (c) 2025 Qualcomm Innovation Center, Inc. All rights reserved. +# SPDX-License-Identifier: BSD-3-Clause +# +# ----------------------------------------------------------------------------- + +## In this example, you can run a model for static and continuous batching with different Compute-Context-Length (CCL) inputs. ## + +from transformers import AutoTokenizer + +from QEfficient import QEFFAutoModelForCausalLM + +## Using optional variable comp_ctx_lengths variable you can pass a list of context lengths for both prefilling and decoding processes. It will run the model with default context length if comp_ctx_lengths=None. ## +## - The first comp_ctx_lengths_prefill list shows the compute-ctx-length list for prefilling process. It will start the prefilling process with the first element in the list and gradually will increase the comp_ctx_lengths based on the position_id of the current prompt chunk. ## +## - The second comp_ctx_lengths_decode list will be used for decoding. During the decoding process, based on the position_id or cache index it will work with the specific compute-context-length in the list. It will start from a proper compute-context-length in the list based on input prompt length and will gradually increase the compute-context-length if the cache index passes the current compute-context-length. ## + +ctx_len = 1024 +comp_ctx_lengths_prefill = [256, 500] # None +comp_ctx_lengths_decode = [512, ctx_len] # None + +model_name = "meta-llama/Llama-3.2-1B" +# model_name = "google/gemma-7b" +# model_name = "tiiuae/falcon-7b-instruct" +# model_name = "google/gemma-2-2b" +# model_name = "ibm-granite/granite-3.1-8b-instruct" +# model_name = "Snowflake/Llama-3.1-SwiftKV-8B-Instruct" +# model_name = "mistralai/Mistral-7B-v0.1" +# model_name = "microsoft/phi-1_5" +# model_name = "microsoft/Phi-3-mini-4k-instruct" +# model_name = "Qwen/Qwen2.5-7B-Instruct" +# model_name = "Qwen/Qwen3-1.7B" +# model_name = "allenai/OLMo-2-0425-1B" +# model_name = "ibm-granite/granite-3.3-2b-base" +# model_name = "ibm-granite/granite-3.2-8b-instruct" +# model_name = "meta-llama/Llama-3.3-70B-Instruct" +# model_name = "Salesforce/codegen-350M-mono" +# model_name = "openai-community/gpt2" +# model_name = "EleutherAI/gpt-j-6b" + +model = QEFFAutoModelForCausalLM.from_pretrained( + model_name, + continuous_batching=True, + qaic_config={ + "comp_ctx_lengths_prefill": comp_ctx_lengths_prefill, + "comp_ctx_lengths_decode": comp_ctx_lengths_decode, + "ctx_len": ctx_len, # Is required for CCL checkings + }, +) + +# model compilation for either continuous or static batching. For continuous batching full_batch_size is needed. +model.compile( + prefill_seq_len=128, + ctx_len=ctx_len, + num_cores=16, + num_devices=1, + mxint8_kv_cache=True, + mxfp6_matmul=True, + full_batch_size=1, +) + +# Create tokenizer and run model.generate and passes the input prompts to it. +tokenizer = AutoTokenizer.from_pretrained(model_name) +model.generate( + prompts=[ + "My name is ", + ], + tokenizer=tokenizer, + generation_len=128, +) diff --git a/examples/gemma3_example/ccl_gemma3_mm.py b/examples/gemma3_example/ccl_gemma3_mm.py new file mode 100644 index 000000000..9bf6e9c5a --- /dev/null +++ b/examples/gemma3_example/ccl_gemma3_mm.py @@ -0,0 +1,121 @@ +# ----------------------------------------------------------------------------- +# +# Copyright (c) Qualcomm Technologies, Inc. and/or its subsidiaries. +# SPDX-License-Identifier: BSD-3-Clause +# +# ----------------------------------------------------------------------------- + +import torch +import transformers +from transformers import AutoConfig, AutoProcessor + +from QEfficient import QEFFAutoModelForImageTextToText + +# Change model_id to "google/gemma-3-27b-it" for 27B model +model_id = "google/gemma-3-4b-it" +config = AutoConfig.from_pretrained(model_id) +# For Testing Purpose Only +# config.text_config.num_hidden_layers = 1 +# config.vision_config.num_hidden_layers = 2 +tokenizer = transformers.AutoTokenizer.from_pretrained(model_id, trust_remote_code=True) +processor = AutoProcessor.from_pretrained(model_id) + +# pass HF_TOKEN if gated model +# For running the model in single QPC approach use kv_offload=False. For Dual QPC approach use kv_offload=True ### +ctx_len = 8192 +comp_ctx_lengths_prefill = [3072] +comp_ctx_lengths_decode = [4096, ctx_len] + +qeff_model = QEFFAutoModelForImageTextToText.from_pretrained( + model_id, + config=config, + attn_implementation="eager", + kv_offload=True, + qaic_config={ + "comp_ctx_lengths_prefill": comp_ctx_lengths_prefill, + "comp_ctx_lengths_decode": comp_ctx_lengths_decode, + "ctx_len": ctx_len, + }, +) + +### use skip_vision=Ture, if want to run only text, or false ### +skip_vision = False + +if skip_vision: + ## Only Text ## + qeff_model.compile( + prefill_seq_len=128, + ctx_len=ctx_len, + img_size=896, + num_cores=16, + num_devices=4, + mxfp6_matmul=False, + mxint8_kv_cache=False, + aic_enable_depth_first=True, + skip_vision=True, + mos=1, + node_precision_info="examples/gemma3_example/fp32_nodes_gemma3_27b.yaml", + ) + + messages = [ + { + "role": "user", + "content": [ + {"type": "text", "text": "Describe the transformers architecture in LLMs."}, + ], + }, + ] + + inputs = processor.apply_chat_template( + messages, + add_generation_prompt=True, + tokenize=True, + return_dict=True, + return_tensors="pt", + ) + + output = qeff_model.generate(inputs=inputs, generation_len=100) + print(tokenizer.batch_decode(output.generated_ids)) + print(output) + +else: + ## Vision + Text ## + qeff_model.compile( + prefill_seq_len=128, + ctx_len=ctx_len, + img_size=896, + num_cores=16, + num_devices=4, + mxfp6_matmul=False, + mxint8_kv_cache=False, + aic_enable_depth_first=True, + mos=1, + node_precision_info="examples/gemma3_example/fp32_nodes_gemma3_27b.yaml", + ) + + ### IMAGE + TEXT ### + image_url = ( + "https://huggingface.co/datasets/huggingface/documentation-images/resolve/main/datasets/cat_style_layout.png" + ) + + messages = [ + { + "role": "user", + "content": [ + {"type": "image", "url": image_url}, + {"type": "text", "text": "Can you describe the image in detail."}, + ], + }, + ] + + inputs = processor.apply_chat_template( + messages, + add_generation_prompt=True, + tokenize=True, + return_dict=True, + return_tensors="pt", + ) + inputs["pixel_values"] = inputs["pixel_values"].to(torch.float32) + output = qeff_model.generate(inputs=inputs, generation_len=100) + print(tokenizer.batch_decode(output.generated_ids)) + print(output) diff --git a/examples/granite_example/ccl_granite_vision_inference.py b/examples/granite_example/ccl_granite_vision_inference.py new file mode 100644 index 000000000..64ecaf948 --- /dev/null +++ b/examples/granite_example/ccl_granite_vision_inference.py @@ -0,0 +1,129 @@ +# ----------------------------------------------------------------------------- +# +# Copyright (c) Qualcomm Technologies, Inc. and/or its subsidiaries. +# SPDX-License-Identifier: BSD-3-Clause +# +# ----------------------------------------------------------------------------- + +import requests +from PIL import Image +from transformers import AutoProcessor, TextStreamer + +from QEfficient import QEFFAutoModelForImageTextToText + +# Add HuggingFace Token to access the model +HF_TOKEN = "" + + +def run_model( + model_name, + token, + query, + image_url, + kv_offload=False, + prefill_seq_len=5500, + ctx_len=6000, + comp_ctx_lengths_prefill=None, + comp_ctx_lengths_decode=None, + generation_len=128, + img_size=384, + num_cores=16, + num_devices=1, +): + ## STEP - 1 Load the Processor and Model + + processor = AutoProcessor.from_pretrained(model_name, token=token) + + # `kv_offload` is used to compile the model in a 2 QPCs.Currently we are not supporting 1 qpc so the flag false is not allowed. + # The `kv_offload` flag should always be set to True. + # The Dual QPC approach splits the model to perform Image Encoding and Output generation in 2 different QPCs. + # The outputs of the Vision Encoder are then passed to the Language model via host in this case. + + model = QEFFAutoModelForImageTextToText.from_pretrained( + model_name, + token=token, + kv_offload=kv_offload, + qaic_config={ + "comp_ctx_lengths_prefill": comp_ctx_lengths_prefill, + "comp_ctx_lengths_decode": comp_ctx_lengths_decode, + "ctx_len": ctx_len, + }, + ) + + ## STEP - 2 Export & Compile the Model + + model.compile( + prefill_seq_len=prefill_seq_len, + ctx_len=ctx_len, + img_size=img_size, + num_cores=num_cores, + num_devices=num_devices, + mxfp6_matmul=False, + ) + + ## STEP - 3 Load and process the inputs for Inference + + # We are resizing the image to (w x h) (1610 x 1109) so that any image can work on the model irrespective of image dimensssions + # we have a fixed size of height 1109 and width 1610 + + image = Image.open(requests.get(image_url, stream=True).raw) + image = image.resize((1610, 1109)) + + messages = [{"role": "user", "content": [{"type": "image"}, {"type": "text", "text": query}]}] + input_text = processor.apply_chat_template(messages, add_generation_prompt=True) + inputs = processor(image, input_text, add_special_tokens=False, return_tensors="pt") + + ## STEP - 4 Run Inference on the compiled model + + streamer = TextStreamer(processor.tokenizer) + output = model.generate(inputs=inputs, streamer=streamer, generation_len=generation_len) + print(output) + + +if __name__ == "__main__": + # Model name and Input parameters + model_name = "ibm-granite/granite-vision-3.2-2b" + + # Please add prompt here + query = "Describe the image" + + # Please pass image url or image path .The format of the image should be jpg. + image_url = "http://images.cocodataset.org/val2017/000000039769.jpg" + + # Compilation parameters for the model + kv_offload = True + prefill_seq_len = 5500 + ctx_len = 8192 + generation_len = 128 + img_size = 384 + num_cores = 16 + num_devices = 4 + ctx_len = 8192 + comp_ctx_lengths_prefill = [5500] + comp_ctx_lengths_decode = [6144, ctx_len] + + run_model( + model_name=model_name, + token=HF_TOKEN, + query=query, + kv_offload=kv_offload, + image_url=image_url, + prefill_seq_len=prefill_seq_len, + ctx_len=ctx_len, + comp_ctx_lengths_prefill=comp_ctx_lengths_prefill, + comp_ctx_lengths_decode=comp_ctx_lengths_decode, + generation_len=generation_len, + img_size=img_size, + num_cores=num_cores, + num_devices=num_devices, + ) + + +""" +Expected Response: + +The image depicts two cats lying on a pink blanket that is spread out on a red couch. The cats are positioned in a relaxed manner, with their bodies stretched out and their heads resting on the blanket. +The cat on the left is a smaller, tabby cat with a mix of black, gray, and white fur. It has a long, slender body and a distinctive tail that is curled up near its tail end. The cat on the right is a larger, +tabby cat with a mix of gray, black, and brown fur. It has + +""" diff --git a/examples/intern_example/ccl_internvl_inference.py b/examples/intern_example/ccl_internvl_inference.py new file mode 100644 index 000000000..827d50c97 --- /dev/null +++ b/examples/intern_example/ccl_internvl_inference.py @@ -0,0 +1,288 @@ +# ----------------------------------------------------------------------------- +# +# Copyright (c) Qualcomm Technologies, Inc. and/or its subsidiaries. +# SPDX-License-Identifier: BSD-3-Clause +# +# ----------------------------------------------------------------------------- + +from io import BytesIO +from typing import List + +import requests +import torch +import torch.nn as nn +import torchvision.transforms as T +from PIL import Image +from torchvision.transforms.functional import InterpolationMode +from transformers import AutoTokenizer, TextStreamer + +from QEfficient import QEFFAutoModelForCausalLM +from QEfficient.utils.logging_utils import logger + +IMAGENET_MEAN = (0.485, 0.456, 0.406) +IMAGENET_STD = (0.229, 0.224, 0.225) + + +# Process the input messages to generate prompt for the model. +def get_prompt(messages) -> str: + """Get the prompt for generation.""" + ## Chat template used for InternVL + system_prompt = "<|im_start|>system\nä½ ę˜Æä¹¦ē”ŸĀ·äø‡č±”ļ¼Œč‹±ę–‡åę˜ÆInternVLļ¼Œę˜Æē”±äøŠęµ·äŗŗå·„ę™ŗčƒ½å®žéŖŒå®¤ć€ęø…åŽå¤§å­¦åŠå¤šå®¶åˆä½œå•ä½č”åˆå¼€å‘ēš„å¤šęØ”ę€å¤§čÆ­čØ€ęØ”åž‹ć€‚" + sep = "<|im_end|>\n" + + ret = system_prompt + sep + for role, message in messages: + if message: + if type(message) is tuple: + message, _, _ = message + ret += role + message + sep + else: + ret += role + return ret + + +# Processor class for InternVL models +class InternProcessor: + """ + InternVL model only has an AutoTokenizer so this class performs the processing tasks similar to an AutoProcessor. + The methods used here are borrowed from the original InternVL modelling files. + "https://huggingface.co/OpenGVLab/InternVL2_5-1B/" + """ + + def __init__(self, model: nn.Module, tokenizer): + self.model = model + image_size = self.model.config.force_image_size or self.model.config.vision_config.image_size + patch_size = self.model.config.vision_config.patch_size + self.template = model.config.template + self.num_image_token = int((image_size // patch_size) ** 2 * (self.model.config.downsample_ratio**2)) + self.tokenizer = tokenizer + + def build_transform(self, input_size): + MEAN, STD = IMAGENET_MEAN, IMAGENET_STD + transform = T.Compose( + [ + T.Lambda(lambda img: img.convert("RGB") if img.mode != "RGB" else img), + T.Resize((input_size, input_size), interpolation=InterpolationMode.BICUBIC), + T.ToTensor(), + T.Normalize(mean=MEAN, std=STD), + ] + ) + return transform + + def find_closest_aspect_ratio(self, aspect_ratio, target_ratios, width, height, image_size): + best_ratio_diff = float("inf") + best_ratio = (1, 1) + area = width * height + for ratio in target_ratios: + target_aspect_ratio = ratio[0] / ratio[1] + ratio_diff = abs(aspect_ratio - target_aspect_ratio) + if ratio_diff < best_ratio_diff: + best_ratio_diff = ratio_diff + best_ratio = ratio + elif ratio_diff == best_ratio_diff: + if area > 0.5 * image_size * image_size * ratio[0] * ratio[1]: + best_ratio = ratio + return best_ratio + + def dynamic_preprocess(self, image, min_num=1, max_num=12, image_size=448, use_thumbnail=False): + orig_width, orig_height = image.size + aspect_ratio = orig_width / orig_height + # calculate the existing image aspect ratio + target_ratios = set( + (i, j) + for n in range(min_num, max_num + 1) + for i in range(1, n + 1) + for j in range(1, n + 1) + if i * j <= max_num and i * j >= min_num + ) + target_ratios = sorted(target_ratios, key=lambda x: x[0] * x[1]) + # find the closest aspect ratio to the target + target_aspect_ratio = self.find_closest_aspect_ratio( + aspect_ratio, target_ratios, orig_width, orig_height, image_size + ) + # calculate the target width and height + target_width = image_size * target_aspect_ratio[0] + target_height = image_size * target_aspect_ratio[1] + blocks = target_aspect_ratio[0] * target_aspect_ratio[1] + # resize the image + resized_img = image.resize((target_width, target_height)) + processed_images = [] + for i in range(blocks): + box = ( + (i % (target_width // image_size)) * image_size, + (i // (target_width // image_size)) * image_size, + ((i % (target_width // image_size)) + 1) * image_size, + ((i // (target_width // image_size)) + 1) * image_size, + ) + # split the image + split_img = resized_img.crop(box) + processed_images.append(split_img) + assert len(processed_images) == blocks + if use_thumbnail and len(processed_images) != 1: + thumbnail_img = image.resize((image_size, image_size)) + processed_images.append(thumbnail_img) + return processed_images + + def load_image(self, image, input_size=448, max_num=12): + transform = self.build_transform(input_size=input_size) + images = self.dynamic_preprocess(image, image_size=input_size, use_thumbnail=True, max_num=max_num) + pixel_values = [transform(image) for image in images] + pixel_values = torch.stack(pixel_values) + return pixel_values + + def __call__( + self, + pixel_values, + question, + messages, + roles, + history=None, + num_patches_list=None, + IMG_START_TOKEN="", + IMG_END_TOKEN="", + IMG_CONTEXT_TOKEN="", + verbose=False, + ) -> str: + if history is None and pixel_values is not None and "" not in question: + question = "\n" + question + if num_patches_list is None: + num_patches_list = [pixel_values.shape[0]] if pixel_values is not None else [] + assert pixel_values is None or len(pixel_values) == sum(num_patches_list) + img_context_token_id = self.tokenizer.convert_tokens_to_ids(IMG_CONTEXT_TOKEN) + self.model.img_context_token_id = img_context_token_id + + messages.append([roles[0], question]) + messages.append([roles[1], None]) + query = get_prompt(messages) + if verbose and pixel_values is not None: + image_bs = pixel_values.shape[0] + logger.info(f"dynamic ViT batch size: {image_bs}") + + for num_patches in num_patches_list: + image_tokens = IMG_START_TOKEN + IMG_CONTEXT_TOKEN * self.num_image_token * num_patches + IMG_END_TOKEN + query = query.replace("", image_tokens, 1) + return query + + +def run_intern_on_aic( + model_name, + prompt, + image_url, + messages, + roles, + kv_offload=False, + prefill_seq_len=3840, + num_devices=1, + num_cores=16, +): + ## STEP 1 -- LOAD THE MODEL + + # The original Intern-VL model, despite being multimodal, is loaded using `AutoModelForCausalLM` in Huggingface. + # To maintain compatibility, we load this model using `QEFFAutoModelForCausalLM`. + + ctx_len = 8192 + comp_ctx_lengths_prefill = [4096] + comp_ctx_lengths_decode = [6144, ctx_len] + + # model = QEFFAutoModelForCausalLM.from_pretrained(model_name, kv_offload=kv_offload, trust_remote_code=True) + + model = QEFFAutoModelForCausalLM.from_pretrained( + model_name, + kv_offload=kv_offload, + trust_remote_code=True, + qaic_config={ + "comp_ctx_lengths_prefill": comp_ctx_lengths_prefill, + "comp_ctx_lengths_decode": comp_ctx_lengths_decode, + "ctx_len": ctx_len, + }, + ) + + ## STEP 2 -- EXPORT & COMPILE THE MODEL + + model.compile( + num_cores=num_cores, + num_devices=num_devices, + ctx_len=ctx_len, + prefill_seq_len=prefill_seq_len, + mxfp6_matmul=False, + ) + + ## STEP 3 -- SETUP THE PROCESSOR + + # InternVL doesn't have an AutoProcessor yet, so we will use our own processor class "InternProcessor" + tokenizer = AutoTokenizer.from_pretrained(model_name, trust_remote_code=True, use_fast=False) + internProcessor = InternProcessor(model.model, tokenizer) + + ## STEP 4 -- PREPROCESS THE INPUTS + + img = requests.get(image_url, stream=True) + image = Image.open(BytesIO(img.content)).convert("RGB") + + # Images are resized to (1000, 747) for inference + image = image.resize((1000, 747)) + + # preprocess the resized image + pixel_values = internProcessor.load_image(image, max_num=12) + question = "\n" + prompt + query = internProcessor(pixel_values, question, messages, roles) + inputs = tokenizer( + query, return_tensors="pt", padding="max_length", max_length=prefill_seq_len, padding_side="right" + ) + + inputs["pixel_values"] = pixel_values + + ## STEP 5 -- RUN INFERENCE VIA GENERATE FUNCTION + streamer = TextStreamer(tokenizer) + model.generate(inputs=inputs, streamer=streamer, generation_len=128) + + +if __name__ == "__main__": + model_name = "OpenGVLab/InternVL2_5-1B" + + # Chat Template information for prompt preprocessing + messages: List[List[str]] = [] + roles = ("<|im_start|>user\n", "<|im_start|>assistant\n") + + # Inputs for the model + prompt = "Please describe the image in detail." + image_url = "https://image.slidesharecdn.com/azureintroduction-191206101932/75/Introduction-to-Microsoft-Azure-Cloud-1-2048.jpg" + + ## Compilation parameters + + # `kv_offload` is used to compile the model in a Single QPC or 2 QPCs. + # The Dual QPC approach splits the model to perform Image Encoding and Output generation in 2 different QPCs. + # The outputs of the Vision Encoder are then passed to the Language model via host in this case. + + kv_offload = True + + # InternVL is an Early-Fusion model that uses placeholder tokens within the input_ids to interleave text_embeddings with + # Image embeddings and generate final input_embeds for outout generation. Hence we need very large prefill_seq_len (3840 in this case) to + # incorporate the memory for the merged embeddings. + + prefill_seq_len = 3840 + num_devices = 4 + num_cores = 16 + + run_intern_on_aic( + model_name=model_name, + prompt=prompt, + image_url=image_url, + messages=messages, + roles=roles, + kv_offload=kv_offload, + prefill_seq_len=prefill_seq_len, + num_devices=num_devices, + num_cores=num_cores, + ) + + +""" +Expected Response: + +The image is a promotional graphic for Microsoft Azure. It features a blue background with a hexagonal pattern on the left side. The hexagons are white and are arranged in a way that suggests a network or connectivity theme. + +On the right side of the image, the Microsoft Azure logo is prominently displayed. The logo consists of the Azure name in white, with the Microsoft logo above it, which includes four colored squares (blue, green, yellow, and red). Below the logo, the word "Azure" is written in large white letters. + +Below the logo, there is text that reads: +- "By Dinesh Kumar Wick +""" diff --git a/examples/qwen3moe_example/ccl_qwen3moe_inference.py b/examples/qwen3moe_example/ccl_qwen3moe_inference.py new file mode 100644 index 000000000..d2fa208df --- /dev/null +++ b/examples/qwen3moe_example/ccl_qwen3moe_inference.py @@ -0,0 +1,48 @@ +# ----------------------------------------------------------------------------- +# +# Copyright (c) Qualcomm Technologies, Inc. and/or its subsidiaries. +# SPDX-License-Identifier: BSD-3-Clause +# +# ----------------------------------------------------------------------------- + +from transformers import AutoTokenizer + +from QEfficient import QEFFAutoModelForCausalLM +from QEfficient.utils.constants import Constants + +model_name = "Qwen/Qwen3-30B-A3B-Instruct-2507" +""" +# For CB inference, set continuous_batching to True and add full_batch_size,mxfp6,mint8 argument in compile function +# We will use prompt_len=1 for compilation for both cb and non-cb inference +""" + +ctx_len = 1024 +prefill_seq_len = 1 +# In moe models when compiling with prefill_seq_len=1 and non-continuous-batching mode, prefill and decode will share the same specializations. +comp_ctx_lengths_prefill = [256, 512, ctx_len] +comp_ctx_lengths_decode = [256, 512, ctx_len] + +model = QEFFAutoModelForCausalLM.from_pretrained( + model_name, + continuous_batching=False, + qaic_config={ + "comp_ctx_lengths_prefill": comp_ctx_lengths_prefill, + "comp_ctx_lengths_decode": comp_ctx_lengths_decode, + "ctx_len": ctx_len, + "prefill_seq_len": prefill_seq_len, + }, +) + +model.compile( + prefill_seq_len=prefill_seq_len, + ctx_len=ctx_len, + batch_size=1, + num_cores=16, + num_devices=4, + mxfp6_matmul=True, + mxint8_kv_cache=True, + mos=1, +) +# mos=1, +tokenizer = AutoTokenizer.from_pretrained(model_name) +exec_info = model.generate(prompts=Constants.INPUT_STR, tokenizer=tokenizer) From ed6bb1f8f1f7c38b9eec040a2029f548fe5090a4 Mon Sep 17 00:00:00 2001 From: quic-akuruvil Date: Wed, 19 Nov 2025 09:07:08 +0530 Subject: [PATCH 16/60] Fix for token during inference (#622) Fix for this JIRA from Imagine team Signed-off-by: Ann Kuruvilla Signed-off-by: Dhiraj Kumar Sah --- examples/gemma3_example/gemma3_mm.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/examples/gemma3_example/gemma3_mm.py b/examples/gemma3_example/gemma3_mm.py index e090148f7..ca82b2120 100644 --- a/examples/gemma3_example/gemma3_mm.py +++ b/examples/gemma3_example/gemma3_mm.py @@ -105,5 +105,5 @@ ) inputs["pixel_values"] = inputs["pixel_values"].to(torch.float32) output = qeff_model.generate(inputs=inputs, generation_len=100) - print(tokenizer.batch_decode(output.generated_ids)) + print(tokenizer.batch_decode(output.generated_ids, skip_special_tokens=True)) print(output) From a607dff85d5197156e19f339529b2507e3a4421f Mon Sep 17 00:00:00 2001 From: Abhishek Kumar Singh Date: Wed, 19 Nov 2025 22:10:36 +0530 Subject: [PATCH 17/60] Add ONNX Sub Functions Export Feature for AutoModelForCausalLM (#621) # ONNX Functions Export Support ## Overview This PR introduces support for exporting ONNX modules as **functions**, enabling more efficient model compilation and execution on hardware. ## Key Changes - Added a new flag **`use_onnx_subfunctions`** to control ONNX function export behavior. - Integrated ONNX function export capability into the inference pipeline. ## How to Enable ONNX Function Export Set the flag before running inference (either during export or compile): ```bash model.export(tmp_path, use_onnx_subfunctions=True) ``` ## Backward Compatibility This feature is **opt-in** and requires an explicit environment variable. Existing workflows remain unaffected when the flag is disabled. --------- Signed-off-by: abhishek-singh591 Signed-off-by: Ann Kuruvilla Co-authored-by: quic-akuruvil Signed-off-by: Dhiraj Kumar Sah --- QEfficient/base/modeling_qeff.py | 30 +++- QEfficient/base/onnx_transforms.py | 97 ++++++++++++- QEfficient/peft/auto.py | 5 +- QEfficient/peft/lora/auto.py | 3 +- QEfficient/transformers/cache_utils.py | 42 ++++-- .../transformers/models/modeling_auto.py | 133 ++++++++++++++++-- .../transformers/models/pytorch_transforms.py | 26 ++++ QEfficient/utils/_utils.py | 1 + QEfficient/utils/hash_utils.py | 3 +- QEfficient/utils/torch_patches.py | 115 +++++++++++++++ tests/transformers/test_subfunction.py | 67 +++++++++ 11 files changed, 488 insertions(+), 34 deletions(-) create mode 100644 QEfficient/utils/torch_patches.py create mode 100644 tests/transformers/test_subfunction.py diff --git a/QEfficient/base/modeling_qeff.py b/QEfficient/base/modeling_qeff.py index 6ecbf0fc0..72f5c050e 100644 --- a/QEfficient/base/modeling_qeff.py +++ b/QEfficient/base/modeling_qeff.py @@ -8,6 +8,7 @@ import gc import inspect import logging +import re import shutil import subprocess import warnings @@ -18,10 +19,12 @@ import onnx import torch -from QEfficient.base.onnx_transforms import OnnxTransform +from QEfficient.base.onnx_transforms import CustomOpTransform, OnnxTransform, RenameFunctionOutputsTransform from QEfficient.base.pytorch_transforms import PytorchTransform from QEfficient.compile.qnn_compiler import compile as qnn_compile from QEfficient.generation.cloud_infer import QAICInferenceSession +from QEfficient.transformers.cache_utils import InvalidIndexProvider +from QEfficient.transformers.models.pytorch_transforms import get_decoder_layer_classes_for_export from QEfficient.utils import ( constants, create_json, @@ -32,6 +35,7 @@ hash_dict_params, load_json, ) +from QEfficient.utils.torch_patches import apply_torch_patches, undo_torch_patches logger = logging.getLogger(__name__) @@ -179,6 +183,7 @@ def _export( onnx_transform_kwargs: Optional[Dict[str, any]] = None, export_dir: Optional[str] = None, offload_pt_weights: bool = True, + use_onnx_subfunctions: bool = False, ) -> str: """ Export the PyTorch model to ONNX and apply ONNX transforms @@ -243,7 +248,19 @@ def _export( input_names.append(param) try: + # Initialize the registry with your custom ops export_kwargs = {} if export_kwargs is None else export_kwargs + if use_onnx_subfunctions: + warnings.warn( + "The subfunction feature is experimental. Please note that using compile consecutively with and without subfunction may produce inconsistent results." + ) + apply_torch_patches() + InvalidIndexProvider.SUBFUNC_ENABLED = True + output_names = [re.sub("_RetainedState", "_InternalRetainedState", s) for s in output_names] + export_kwargs["export_modules_as_functions"] = get_decoder_layer_classes_for_export(self.model) + self._onnx_transforms.append(RenameFunctionOutputsTransform) + self._onnx_transforms.append(CustomOpTransform) + torch.onnx.export( self.model, (example_inputs,), @@ -255,7 +272,6 @@ def _export( **export_kwargs, ) logger.info("PyTorch export successful") - _ = self._offload_model_weights(offload_pt_weights) model = onnx.load(tmp_onnx_path, load_external_data=False) @@ -284,6 +300,12 @@ def _export( finally: shutil.rmtree(tmp_onnx_dir, ignore_errors=True) + if use_onnx_subfunctions: + undo_torch_patches() + InvalidIndexProvider.SUBFUNC_ENABLED = False + self._onnx_transforms.remove(CustomOpTransform) + self._onnx_transforms.remove(RenameFunctionOutputsTransform) + self.onnx_path = onnx_path return onnx_path @@ -300,6 +322,7 @@ def _compile( num_speculative_tokens: Optional[int] = None, enable_qnn: Optional[bool] = False, qnn_config: Optional[str] = None, + use_onnx_subfunctions: bool = False, **compiler_options, ) -> str: """ @@ -325,8 +348,9 @@ def _compile( For QNN Compilation path, when enable_qnn is set to True, any parameter passed in compiler_options will be ignored. """ + if onnx_path is None and self.onnx_path is None: - self.export() + self.export(use_onnx_subfunctions=use_onnx_subfunctions) onnx_path = Path(onnx_path or self.onnx_path) compile_dir = Path(compile_dir or onnx_path.parent) diff --git a/QEfficient/base/onnx_transforms.py b/QEfficient/base/onnx_transforms.py index 61b5c00f6..7ebe6bce5 100644 --- a/QEfficient/base/onnx_transforms.py +++ b/QEfficient/base/onnx_transforms.py @@ -5,11 +5,15 @@ # # ---------------------------------------------------------------------------- -from typing import Optional, Tuple +from typing import Any, Dict, Optional, Tuple import numpy as np +import torch from onnx import ModelProto, external_data_helper, numpy_helper +from QEfficient.customop.ctx_scatter_gather import CtxGather, CtxGatherFunc, CtxScatter, CtxScatterFunc +from QEfficient.customop.rms_norm import CustomRMSNorm, CustomRMSNormFunc + class OnnxTransform: """ @@ -99,3 +103,94 @@ def apply( current_file_size = tsize external_data_helper.set_external_data(tensor, f"{model_name}_{file_num}.onnx.data") return model, transformed + + +class CustomOpTransform(OnnxTransform): + """ + Transform to register custom operations and add their function protos to the ONNX model. + """ + + _custom_ops: Dict[str, Tuple[Any, Any]] = { + "CustomRMSNormFunc": (CustomRMSNormFunc, CustomRMSNorm), + "CtxScatterFunc": (CtxScatterFunc, CtxScatter), + "CtxGatherFunc": (CtxGatherFunc, CtxGather), + } + + @classmethod + def register_custom_op(cls, op_name: str, func_class: Any, onnxscript_func: Any) -> None: + """Register a custom operation.""" + cls._custom_ops[op_name] = (func_class, onnxscript_func) + + @classmethod + def apply(cls, model: ModelProto, *, opset_version: int = 17, **kwargs) -> Tuple[ModelProto, bool]: + """ + Apply custom op registration and add all function protos to the model. + + :param model: The ONNX model to transform. + :param opset_version: ONNX opset version for symbolic registration. + :returns: (Transformed model, success flag). + """ + transformed = False + + # Register all custom op symbolic functions with torch.onnx + for op_name, (func_class, _) in cls._custom_ops.items(): + if hasattr(func_class, "symbolic"): + torch.onnx.register_custom_op_symbolic(f"::{op_name}", func_class.symbolic, opset_version) + + func_names = {func.name for func in model.functions} + + for _, onnxscript_func in cls._custom_ops.values(): + proto = onnxscript_func.to_function_proto() + if proto.name not in func_names: + model.functions.append(proto) + transformed = True + + return model, transformed + + +class RenameFunctionOutputsTransform(OnnxTransform): + """ + Renames function outputs in decoder layers by removing 'Internal' from '_InternalRetainedState' patterns. + """ + + @classmethod + def apply(cls, model: ModelProto, **kwargs) -> Tuple[ModelProto, bool]: + """ + Rename function outputs in decoder layer nodes. + + :param model: The ONNX model to transform + :returns: Transformed model and boolean indicating whether transform was applied + """ + graph = model.graph + op_type_to_func_map = {func.name: func for func in model.functions} + decoder_layer_patterns = ["DecoderLayer", "Block", "Layer"] + transformed = False + + # Create a dict mapping output name to its index for quick lookup + model_graph_outputs_map = {val.name: idx for idx, val in enumerate(model.graph.output)} + + layer_index = 0 + for node in graph.node: + if any(pattern in node.name or pattern in node.op_type for pattern in decoder_layer_patterns): + func = op_type_to_func_map.get(node.op_type) + if func is None: + continue + + for i, out_name in enumerate(func.output): + if "_InternalRetainedState" in out_name: + transformed = True + original_output_name = node.output[i] + + # Generate new name based on key/value + if "key" in out_name: + new_name = f"past_key.{layer_index}_RetainedState" + elif "value" in out_name: + new_name = f"past_value.{layer_index}_RetainedState" + node.output[i] = new_name + + # Update graph output name if it exists + if original_output_name in model_graph_outputs_map: + idx = model_graph_outputs_map[original_output_name] + model.graph.output[idx].name = new_name + layer_index += 1 + return model, transformed diff --git a/QEfficient/peft/auto.py b/QEfficient/peft/auto.py index 592c0c1d3..99d64cc2f 100644 --- a/QEfficient/peft/auto.py +++ b/QEfficient/peft/auto.py @@ -245,7 +245,7 @@ def from_pretrained(cls, pretrained_name_or_path: str, *args, **kwargs): obj = cls._from_pretrained(pretrained_name_or_path, *args, **kwargs) return obj - def export(self, export_dir: Optional[str] = None) -> str: + def export(self, export_dir: Optional[str] = None, use_onnx_subfunctions: bool = False) -> str: """ Export the model with the active adapter to ONNX format. @@ -286,6 +286,7 @@ def export(self, export_dir: Optional[str] = None) -> str: export_kwargs={"do_constant_folding": False}, # To avoid merging adapter weights with base weights onnx_transform_kwargs={"adapter_name": self.model.active_adapter}, export_dir=export_dir, + use_onnx_subfunctions=use_onnx_subfunctions, ) def compile( @@ -300,6 +301,7 @@ def compile( num_cores: int = 16, mxfp6_matmul: bool = False, mxint8_kv_cache: bool = False, + use_onnx_subfunctions: bool = False, **compiler_options, ) -> str: """ @@ -367,6 +369,7 @@ def compile( mdp_ts_num_devices=num_devices, aic_num_cores=num_cores, mxint8_kv_cache=mxint8_kv_cache, + use_onnx_subfunctions=use_onnx_subfunctions, **compiler_options, ) diff --git a/QEfficient/peft/lora/auto.py b/QEfficient/peft/lora/auto.py index 8196cd769..64fa3f61c 100644 --- a/QEfficient/peft/lora/auto.py +++ b/QEfficient/peft/lora/auto.py @@ -327,7 +327,7 @@ def _init_adapter_model(self): # load_weight to model self._load_adapter_weights_to_model() - def export(self, export_dir: Optional[str] = None) -> str: + def export(self, export_dir: Optional[str] = None, use_onnx_subfunctions: bool = False) -> str: """ Export the model with all loaded adapters to ONNX format using ``torch.onnx.export``. @@ -387,6 +387,7 @@ def export(self, export_dir: Optional[str] = None) -> str: output_names, dynamic_axes, export_dir=export_dir, + use_onnx_subfunctions=use_onnx_subfunctions, ) def generate( diff --git a/QEfficient/transformers/cache_utils.py b/QEfficient/transformers/cache_utils.py index 5452589f6..292fe0487 100644 --- a/QEfficient/transformers/cache_utils.py +++ b/QEfficient/transformers/cache_utils.py @@ -24,6 +24,33 @@ ) +class InvalidIndexProvider: + SUBFUNC_ENABLED = False + + @classmethod + def enable_subfunc(cls): + cls.SUBFUNC_ENABLED = True + + @classmethod + def _get_invalid_idx_value(cls): + """ + Get the appropriate invalid index value for CtxGather operations. + + For ONNX export with functions, we use 0 to avoid INT32_MAX constants + that cause issues when functions are inlined at runtime. + + Returns: + int: Invalid index value (0 for ONNX functions, INT32_MAX otherwise) + """ + if torch.onnx.is_in_onnx_export(): + if cls.SUBFUNC_ENABLED: + return 0 + else: + return torch.iinfo(torch.int32).max + else: + return 0 + + class QEffDynamicLayer(DynamicLayer): def read_only(self, cache_kwargs): """ @@ -46,10 +73,7 @@ def read_only(self, cache_kwargs): gather_limit = position_ids.max(1, keepdim=True).values.unsqueeze(1) invalid_mask = ctx_indices > gather_limit - if torch.onnx.is_in_onnx_export(): - invalid_idx_value = torch.iinfo(torch.int32).max - else: - invalid_idx_value = 0 + invalid_idx_value = InvalidIndexProvider._get_invalid_idx_value() ctx_indices = torch.where(invalid_mask, invalid_idx_value, ctx_indices) @@ -143,10 +167,7 @@ def update( gather_limit = position_ids.max(1, keepdim=True).values.unsqueeze(1) invalid_mask = ctx_indices > gather_limit - if torch.onnx.is_in_onnx_export(): - invalid_idx_value = torch.iinfo(torch.int32).max - else: - invalid_idx_value = 0 + invalid_idx_value = InvalidIndexProvider._get_invalid_idx_value() ctx_indices = torch.where(invalid_mask, invalid_idx_value, ctx_indices) if batch_index is not None: @@ -419,10 +440,7 @@ def update( ctx_indices = torch.arange(ctx_len)[None, None, ...] gather_limit = kv_position_ids.max(1, keepdim=True).values.unsqueeze(1) invalid_mask = ctx_indices > gather_limit - if torch.onnx.is_in_onnx_export(): - invalid_idx_value = torch.iinfo(torch.int32).max - else: - invalid_idx_value = 0 + invalid_idx_value = InvalidIndexProvider._get_invalid_idx_value() ctx_indices = torch.where(invalid_mask, invalid_idx_value, ctx_indices) all_indices = torch.arange(layer_ctx_len) + kv_position_ids.max() + 1 diff --git a/QEfficient/transformers/models/modeling_auto.py b/QEfficient/transformers/models/modeling_auto.py index 5f1ec51e6..cbff5be91 100644 --- a/QEfficient/transformers/models/modeling_auto.py +++ b/QEfficient/transformers/models/modeling_auto.py @@ -27,7 +27,10 @@ import QEfficient from QEfficient.base.modeling_qeff import QEFFBaseModel -from QEfficient.base.onnx_transforms import FP16ClipTransform, SplitTensorsTransform +from QEfficient.base.onnx_transforms import ( + FP16ClipTransform, + SplitTensorsTransform, +) from QEfficient.base.pytorch_transforms import SplitGateUpWeightsTransform from QEfficient.generation.cloud_infer import QAICInferenceSession from QEfficient.generation.text_generation_inference import ( @@ -315,7 +318,7 @@ def get_model_config(self) -> dict: """ return self.model.config.__dict__ - def export(self, export_dir: Optional[str] = None) -> str: + def export(self, export_dir: Optional[str] = None, use_onnx_subfunctions: bool = False) -> str: """ Export the model to ONNX format using ``torch.onnx.export``. @@ -327,6 +330,8 @@ def export(self, export_dir: Optional[str] = None) -> str: export_dir : str, optional Directory path where the exported ONNX graph will be saved. If not provided, the default export directory is used. + use_onnx_subfunctions: bool, optional + whether to enable ONNX subfunctions during export. Exporting PyTorch model to ONNX with modules as subfunctions helps to reduce export/compile time. Defaults to False Returns ------- @@ -350,6 +355,7 @@ def export(self, export_dir: Optional[str] = None) -> str: output_names, dynamic_axes, export_dir=export_dir, + use_onnx_subfunctions=use_onnx_subfunctions, ) def compile( @@ -362,6 +368,7 @@ def compile( num_devices: int = 1, num_cores: int = 16, # FIXME: Make this mandatory arg mxfp6_matmul: bool = False, + use_onnx_subfunctions: bool = False, **compiler_options, ) -> str: """ @@ -388,6 +395,8 @@ def compile( Number of cores to use for compilation. mxfp6_matmul : bool, optional Use MXFP6 compression for weights. Default is False. + use_onnx_subfunctions: bool, optional + whether to enable ONNX subfunctions during export. Exporting PyTorch model to ONNX with modules as subfunctions helps to reduce export/compile time. Defaults to False **compiler_options : dict Additional compiler options for QAIC or QNN compilers. These are passed directly to the underlying compilation command. @@ -431,6 +440,7 @@ def compile( mxfp6_matmul=mxfp6_matmul, mdp_ts_num_devices=num_devices, aic_num_cores=num_cores, + use_onnx_subfunctions=use_onnx_subfunctions, **compiler_options, ) @@ -595,7 +605,15 @@ def __init__(self, model: nn.modules, **kwargs): self.model = model.get_qeff_vision_encoder() self.hash_params["qeff_auto_class"] = self.__class__.__name__ - def export(self, inputs, output_names, dynamic_axes, export_dir=None, offload_pt_weights=True): + def export( + self, + inputs, + output_names, + dynamic_axes, + export_dir=None, + offload_pt_weights=True, + use_onnx_subfunctions: bool = False, + ): """ Exports the vision encoder component to ONNX format. @@ -611,6 +629,8 @@ def export(self, inputs, output_names, dynamic_axes, export_dir=None, offload_pt Directory path where the exported ONNX graph will be saved. Default is None. offload_pt_weights : bool, optional If True, PyTorch weights will be offloaded after export. Default is True. + use_onnx_subfunctions: bool, optional + whether to enable ONNX subfunctions during export. Exporting PyTorch model to ONNX with modules as subfunctions helps to reduce export/compile time. Defaults to False Returns ------- @@ -618,7 +638,12 @@ def export(self, inputs, output_names, dynamic_axes, export_dir=None, offload_pt Path to the generated ONNX graph file for the vision encoder. """ return self._export( - inputs, output_names, dynamic_axes, export_dir=export_dir, offload_pt_weights=offload_pt_weights + inputs, + output_names, + dynamic_axes, + export_dir=export_dir, + offload_pt_weights=offload_pt_weights, + use_onnx_subfunctions=use_onnx_subfunctions, ) def compile( @@ -631,6 +656,7 @@ def compile( mdp_ts_num_devices, aic_num_cores, custom_io, + use_onnx_subfunctions: bool = False, **compiler_options, ) -> str: """ @@ -654,6 +680,8 @@ def compile( Number of cores to use for compilation. custom_io : Dict[str, str] Custom I/O configurations for the compiler. + use_onnx_subfunctions: bool, optional + whether to enable ONNX subfunctions during export. Exporting PyTorch model to ONNX with modules as subfunctions helps to reduce export/compile time. Defaults to False **compiler_options : Additional compiler options passed to the underlying compilation command. @@ -671,6 +699,7 @@ def compile( mdp_ts_num_devices=mdp_ts_num_devices, aic_num_cores=aic_num_cores, custom_io=custom_io, + use_onnx_subfunctions=use_onnx_subfunctions, **compiler_options, ) @@ -737,7 +766,15 @@ def __init__(self, model, **kwargs): self.model = model.get_qeff_language_decoder() self.hash_params["qeff_auto_class"] = self.__class__.__name__ - def export(self, inputs, output_names, dynamic_axes, export_dir=None, offload_pt_weights=True): + def export( + self, + inputs, + output_names, + dynamic_axes, + export_dir=None, + offload_pt_weights=True, + use_onnx_subfunctions: bool = False, + ): """ Exports the language decoder component to ONNX format. @@ -753,6 +790,8 @@ def export(self, inputs, output_names, dynamic_axes, export_dir=None, offload_pt Directory path where the exported ONNX graph will be saved. Default is None. offload_pt_weights : bool, optional If True, PyTorch weights will be offloaded after export. Default is True. + use_onnx_subfunctions: bool, optional + whether to enable ONNX subfunctions during export. Exporting PyTorch model to ONNX with modules as subfunctions helps to reduce export/compile time. Defaults to False Returns ------- @@ -760,7 +799,12 @@ def export(self, inputs, output_names, dynamic_axes, export_dir=None, offload_pt Path to the generated ONNX graph file for the language decoder. """ return self._export( - inputs, output_names, dynamic_axes, export_dir=export_dir, offload_pt_weights=offload_pt_weights + inputs, + output_names, + dynamic_axes, + export_dir=export_dir, + offload_pt_weights=offload_pt_weights, + use_onnx_subfunctions=use_onnx_subfunctions, ) def compile( @@ -773,6 +817,7 @@ def compile( mdp_ts_num_devices, aic_num_cores, custom_io, + use_onnx_subfunctions: bool = False, **compiler_options, ) -> str: """ @@ -796,6 +841,8 @@ def compile( Number of cores to use for compilation. custom_io : Dict[str, str] Custom I/O configurations for the compiler. + use_onnx_subfunctions: bool, optional + whether to enable ONNX subfunctions during export. Exporting PyTorch model to ONNX with modules as subfunctions helps to reduce export/compile time. Defaults to False **compiler_options : Additional compiler options passed to the underlying compilation command. @@ -813,6 +860,7 @@ def compile( mdp_ts_num_devices=mdp_ts_num_devices, aic_num_cores=aic_num_cores, custom_io=custom_io, + use_onnx_subfunctions=use_onnx_subfunctions, **compiler_options, ) @@ -973,6 +1021,7 @@ def qpc_path(self): def export( self, export_dir: Optional[str] = None, + use_onnx_subfunctions: bool = False, **kwargs, ) -> str: """ @@ -985,6 +1034,8 @@ def export( ---------- export_dir : str, optional Directory path where the exported ONNX graphs will be saved. Default is None. + use_onnx_subfunctions: bool, optional + whether to enable ONNX subfunctions during export. Exporting PyTorch model to ONNX with modules as subfunctions helps to reduce export/compile time. Defaults to False **kwargs : Additional keyword arguments. @@ -1018,9 +1069,15 @@ def export( dynamic_axes["vision"], export_dir=export_dir, offload_pt_weights=False, + use_onnx_subfunctions=use_onnx_subfunctions, ) self.lang_model.export( - inputs["lang"], output_names["lang"], dynamic_axes["lang"], export_dir=export_dir, offload_pt_weights=True + inputs["lang"], + output_names["lang"], + dynamic_axes["lang"], + export_dir=export_dir, + offload_pt_weights=True, + use_onnx_subfunctions=use_onnx_subfunctions, ) return self.onnx_path @@ -1043,6 +1100,7 @@ def compile( mxint8_kv_cache: bool = False, skip_vision: Optional[bool] = False, skip_lang: Optional[bool] = False, + use_onnx_subfunctions: bool = False, **compiler_options, ) -> str: """ @@ -1082,6 +1140,8 @@ def compile( If True, skips compilation of the vision encoder. Default is False. skip_lang : bool, optional If True, skips compilation of the language decoder. Default is False. + use_onnx_subfunctions: bool, optional + whether to enable ONNX subfunctions during export. Exporting PyTorch model to ONNX with modules as subfunctions helps to reduce export/compile time. Defaults to False **compiler_options : dict Additional compiler options for QAIC or QNN compilers. @@ -1154,7 +1214,9 @@ def compile( if (self.vision_model.onnx_path is None and vision_onnx_path is None) or ( self.lang_model.onnx_path is None and lang_onnx_path is None ): - self.export() + self.export( + use_onnx_subfunctions=use_onnx_subfunctions, + ) # TODO this hould be removed once the continous batching is supported for all the models. compiler_options.pop("continuous_batching", None) @@ -1172,6 +1234,7 @@ def compile( aic_num_cores=num_cores, custom_io=custom_io_vision, mxint8_kv_cache=mxint8_kv_cache, + use_onnx_subfunctions=use_onnx_subfunctions, **compiler_options, ) @@ -1200,6 +1263,7 @@ def compile( aic_num_cores=num_cores, custom_io=custom_io_lang, mxint8_kv_cache=mxint8_kv_cache, + use_onnx_subfunctions=use_onnx_subfunctions, **compiler_options, ) return self.qpc_path @@ -1624,6 +1688,7 @@ def from_pretrained( def export( self, export_dir: Optional[str] = None, + use_onnx_subfunctions: bool = False, **kwargs, ) -> str: """ @@ -1644,7 +1709,13 @@ def export( inputs = self.model.get_dummy_inputs(comp_ctx_lengths=self.comp_ctx_lengths_decode) dynamic_axes = self.model.get_onnx_dynamic_axes(comp_ctx_lengths=self.comp_ctx_lengths_decode) output_names = self.model.get_output_names() - return self._export(inputs, output_names, dynamic_axes, export_dir=export_dir) + return self._export( + inputs, + output_names, + dynamic_axes, + export_dir=export_dir, + use_onnx_subfunctions=use_onnx_subfunctions, + ) def compile( self, @@ -1662,6 +1733,7 @@ def compile( mxfp6_matmul: bool = False, mxint8_kv_cache: bool = False, num_speculative_tokens: Optional[int] = None, + use_onnx_subfunctions: bool = False, **compiler_options, ) -> str: """ @@ -1697,6 +1769,8 @@ def compile( Use MXINT8 compression for KV cache. Default is False. num_speculative_tokens : int, optional Not supported for this model; must be None. + use_onnx_subfunctions: bool, optional + whether to enable ONNX subfunctions during export. Exporting PyTorch model to ONNX with modules as subfunctions helps to reduce export/compile time. Defaults to False **compiler_options : dict Additional compiler options for QAIC or QNN compilers. @@ -1769,6 +1843,7 @@ def compile( mdp_ts_num_devices=num_devices, aic_num_cores=num_cores, mxint8_kv_cache=mxint8_kv_cache, + use_onnx_subfunctions=use_onnx_subfunctions, **compiler_options, ) return self.qpc_path @@ -2232,7 +2307,10 @@ class QEFFAutoModelForCausalLM(QEFFBaseModel): SplitGateUpWeightsTransform, KVCacheExternalModuleMapperTransform, ] - _onnx_transforms = [FP16ClipTransform, SplitTensorsTransform] + _onnx_transforms = [ + FP16ClipTransform, + SplitTensorsTransform, + ] def __init__( self, @@ -2423,7 +2501,7 @@ def get_model_config(self) -> dict: """ return self.model.config.__dict__ - def export(self, export_dir: Optional[str] = None) -> str: + def export(self, export_dir: Optional[str] = None, use_onnx_subfunctions: bool = False, **kwargs) -> str: """ Export the model to ONNX format using ``torch.onnx.export``. @@ -2436,7 +2514,8 @@ def export(self, export_dir: Optional[str] = None) -> str: export_dir : str, optional Directory path where the exported ONNX graph will be saved. If not provided, the default export directory is used. - + use_onnx_subfunctions: bool, optional + whether to enable ONNX subfunctions during export. Exporting PyTorch model to ONNX with modules as subfunctions helps to reduce export/compile time. Defaults to False Returns ------- str @@ -2532,6 +2611,8 @@ def export(self, export_dir: Optional[str] = None) -> str: output_names, dynamic_axes, export_dir=export_dir, + use_onnx_subfunctions=use_onnx_subfunctions, + offload_pt_weights=kwargs.get("offload_pt_weights", True), ) def get_sampling_inputs_and_outputs( @@ -2742,6 +2823,7 @@ def compile( mxint8_kv_cache: bool = False, num_speculative_tokens: Optional[int] = None, prefill_only: Optional[bool] = None, + use_onnx_subfunctions: bool = False, **compiler_options, ) -> str: """ @@ -2783,6 +2865,8 @@ def compile( prefill_only : bool, optional If True, compiles only for the prefill stage. If False, compiles only for the decode stage. If None, compiles for both stages. Default is None. + use_onnx_subfunctions: bool, optional + whether to enable ONNX subfunctions during export. Exporting PyTorch model to ONNX with modules as subfunctions helps to reduce export/compile time. Defaults to False **compiler_options : dict Additional compiler options for QAIC or QNN compilers. @@ -2944,6 +3028,7 @@ def compile( num_speculative_tokens=num_speculative_tokens, aic_num_cores=num_cores, mxint8_kv_cache=mxint8_kv_cache, + use_onnx_subfunctions=use_onnx_subfunctions, **compiler_options, ) @@ -3135,7 +3220,7 @@ def get_model_config(self) -> dict: """ return self.model.config.__dict__ - def export(self, export_dir: Optional[str] = None) -> str: + def export(self, export_dir: Optional[str] = None, use_onnx_subfunctions: bool = False) -> str: """ Export the model to ONNX format using ``torch.onnx.export``. @@ -3147,6 +3232,8 @@ def export(self, export_dir: Optional[str] = None) -> str: export_dir : str, optional Directory path where the exported ONNX graph will be saved. If not provided, the default export directory is used. + use_onnx_subfunctions: bool, optional + whether to enable ONNX subfunctions during export. Exporting PyTorch model to ONNX with modules as subfunctions helps to reduce export/compile time. Defaults to False Returns ------- @@ -3156,7 +3243,13 @@ def export(self, export_dir: Optional[str] = None) -> str: inputs = self.model.get_dummy_inputs() dynamic_axes = self.model.get_onnx_dynamic_axes() output_names = self.model.get_output_names() - return self._export(inputs, output_names, dynamic_axes, export_dir=export_dir) + return self._export( + inputs, + output_names, + dynamic_axes, + export_dir=export_dir, + use_onnx_subfunctions=use_onnx_subfunctions, + ) def compile( self, @@ -3174,6 +3267,7 @@ def compile( mxfp6_matmul: bool = False, mxint8_kv_cache: bool = False, num_speculative_tokens: Optional[int] = None, + use_onnx_subfunctions: bool = False, **compiler_options, ) -> str: """ @@ -3215,6 +3309,8 @@ def compile( Not yet supported for this model. num_speculative_tokens : int, optional Not yet supported for this model. + use_onnx_subfunctions: bool, optional + whether to enable ONNX subfunctions during export. Exporting PyTorch model to ONNX with modules as subfunctions helps to reduce export/compile time. Defaults to False **compiler_options : dict Additional compiler options for QAIC. @@ -3282,6 +3378,7 @@ def compile( mdp_ts_num_devices=num_devices, aic_num_cores=num_cores, custom_io=custom_io, + use_onnx_subfunctions=use_onnx_subfunctions, **compiler_options, ) @@ -3499,12 +3596,14 @@ def from_pretrained(cls, pretrained_model_name_or_path, pooling=None, *args, **k def get_model_config(self) -> dict: return self.model.config.__dict__ - def export(self, export_dir: Optional[str] = None) -> str: + def export(self, export_dir: Optional[str] = None, use_onnx_subfunctions: bool = False) -> str: """ Exports the model to ``ONNX`` format using ``torch.onnx.export``. ``Optional`` Args: :export_dir (str, optional): The directory path to store ONNX-graph. + :use_onnx_subfunctions: bool, optional + whether to enable ONNX subfunctions during export. Exporting PyTorch model to ONNX with modules as subfunctions helps to reduce export/compile time. Defaults to False Returns: :str: Path of the generated ``ONNX`` graph. @@ -3525,6 +3624,7 @@ def export(self, export_dir: Optional[str] = None) -> str: output_names, dynamic_axes, export_dir=export_dir, + use_onnx_subfunctions=use_onnx_subfunctions, ) def compile( @@ -3537,6 +3637,7 @@ def compile( num_devices: int = 1, num_cores: int = 16, # FIXME: Make this mandatory arg mxfp6_matmul: bool = False, + use_onnx_subfunctions: bool = False, **compiler_options, ) -> str: """ @@ -3552,6 +3653,7 @@ def compile( :num_devices (int): Number of devices the model needs to be compiled for. Defaults to 1. :num_cores (int): Number of cores used to compile the model. :mxfp6_matmul (bool, optional): Whether to use ``mxfp6`` compression for weights. ``Defaults to False``. + :use_onnx_subfunctions: bool, optional: whether to enable ONNX subfunctions during export. Exporting PyTorch model to ONNX with modules as subfunctions helps to reduce export/compile time. Defaults to False :compiler_options (dict, optional): Additional compiler options. For QAIC Compiler: Extra arguments for qaic-exec can be passed. @@ -3584,6 +3686,7 @@ def compile( mxfp6_matmul=mxfp6_matmul, mdp_ts_num_devices=num_devices, aic_num_cores=num_cores, + use_onnx_subfunctions=use_onnx_subfunctions, **compiler_options, ) diff --git a/QEfficient/transformers/models/pytorch_transforms.py b/QEfficient/transformers/models/pytorch_transforms.py index 773ce178c..62a873b9e 100644 --- a/QEfficient/transformers/models/pytorch_transforms.py +++ b/QEfficient/transformers/models/pytorch_transforms.py @@ -821,3 +821,29 @@ def apply(cls, model: nn.Module, pooling: Union[str, Callable]) -> Tuple[nn.Modu model = PooledModel(model, pooling_method) warnings.warn("Pooling is applied to the model.") return model, transformed + + +def get_decoder_layer_classes_for_export(model: nn.Module) -> set: + """ + Dynamically determine which DecoderLayer classes should be exported as functions + based on the model's architecture using the existing KVCacheTransform mapping. + """ + # Define patterns that identify decoder layer classes + DECODER_LAYER_PATTERNS = ["DecoderLayer", "Block", "Layer"] + + # Get all QEff classes that are decoder layers from the existing mapping + decoder_layer_classes = set() + + for original_class, qeff_class in KVCacheTransform._module_mapping.items(): + # Check if the QEff class name contains decoder layer patterns + qeff_class_name = qeff_class.__name__ + if any(pattern in qeff_class_name for pattern in DECODER_LAYER_PATTERNS): + decoder_layer_classes.add(qeff_class) + + # Filter to only include classes that are actually used in the current model + model_decoder_classes = set() + for module in model.modules(): + if module.__class__ in decoder_layer_classes: + model_decoder_classes.add(module.__class__) + + return model_decoder_classes diff --git a/QEfficient/utils/_utils.py b/QEfficient/utils/_utils.py index d58f54952..1fb0311eb 100644 --- a/QEfficient/utils/_utils.py +++ b/QEfficient/utils/_utils.py @@ -566,6 +566,7 @@ def wrapper(self, *args, **kwargs): dynamic_axes=all_args.get("dynamic_axes"), export_kwargs=all_args.get("export_kwargs", None), onnx_transform_kwargs=all_args.get("onnx_transform_kwargs", None), + use_onnx_subfunctions=all_args.get("use_onnx_subfunctions", False), ) export_dir = export_dir.with_name(export_dir.name + "-" + export_hash) kwargs["export_dir"] = export_dir diff --git a/QEfficient/utils/hash_utils.py b/QEfficient/utils/hash_utils.py index b6b38b8b4..948b72e6a 100644 --- a/QEfficient/utils/hash_utils.py +++ b/QEfficient/utils/hash_utils.py @@ -55,7 +55,8 @@ def create_export_hash(**kwargs): export_params = {} export_params["output_names"] = kwargs.get("output_names") export_params["dynamic_axes"] = kwargs.get("dynamic_axes") - + if kwargs.get("use_onnx_subfunctions"): + export_params["use_onnx_subfunctions"] = True export_hash_params["export_params"] = export_params export_kwargs = kwargs.get("export_kwargs") diff --git a/QEfficient/utils/torch_patches.py b/QEfficient/utils/torch_patches.py new file mode 100644 index 000000000..0b9b37afa --- /dev/null +++ b/QEfficient/utils/torch_patches.py @@ -0,0 +1,115 @@ +# ----------------------------------------------------------------------------- +# +# Copyright (c) Qualcomm Technologies, Inc. and/or its subsidiaries. +# SPDX-License-Identifier: BSD-3-Clause +# +# ----------------------------------------------------------------------------- + +"""Monkey patches for torch.onnx.utils to fix ONNX export issues.""" + +import torch +import torch.onnx.utils as onnx_utils +from torch import _C + +# Store original references before patching +_original_setup_trace_module_map = onnx_utils._setup_trace_module_map +_original_get_module_attributes = getattr(onnx_utils, "_get_module_attributes", None) + + +def _setup_trace_module_map_patched( + model, + export_modules_as_functions, +): + """Patched version of _setup_trace_module_map that fixes onnx_attrs type mismatch.""" + + def __register_attribute_hook(): + attr_name = "_onnx_attrs" + + def _track_module_attributes_forward_pre_hook(module, input): + setattr(module, attr_name, _get_module_attributes(module)) + + def _track_module_attributes_forward_hook(module, input, output): + tracing_state = _C._get_tracing_state() + if not tracing_state: + return + graph = tracing_state.graph() + onnx_attrs = {} + if hasattr(module, attr_name): + onnx_attrs = getattr(module, attr_name) + delattr(module, attr_name) + # FIX: use empty dict to avoid type mismatch + onnx_attrs = {} + _C._jit_pass_onnx_track_scope_attributes(graph, onnx_attrs) + + for m in model.modules(): + m.register_forward_hook(_track_module_attributes_forward_hook) + m.register_forward_pre_hook(_track_module_attributes_forward_pre_hook) + + def _unqualified_variable_name(qualified_name: str) -> str: + name_atoms = qualified_name.split(".") + for i, atom in reversed(list(enumerate(name_atoms))): + if not atom.isnumeric(): + return ".".join(name_atoms[i:]) + return qualified_name + + trace_module_map = { + _m: torch._C._jit_onnx_create_full_scope_name(torch.typename(type(_m)), _unqualified_variable_name(_n)) + for _n, _m in model.named_modules() + } + torch.jit._trace._trace_module_map = trace_module_map + + if isinstance(export_modules_as_functions, bool) and export_modules_as_functions: + module_typenames = {torch.typename(type(module)) for module in trace_module_map} + elif isinstance(export_modules_as_functions, set) and export_modules_as_functions: + + def _find_typename(v): + if isinstance(v, type): + return torch.typename(v) + else: + raise RuntimeError( + "Only type of the `nn.Module` should be passed in the set for argument `export_modules_as_functions`. " + f"Got `{type(v).__name__}`." + ) + + module_typenames = {_find_typename(v) for v in export_modules_as_functions} + else: + module_typenames = set() + + if module_typenames: + __register_attribute_hook() + + return module_typenames + + +def _get_module_attributes(module): + """Helper function to get module attributes safely.""" + import typing + + import torch.nn + + annotations = typing.get_type_hints(type(module)) + base_m_annotations = typing.get_type_hints(torch.nn.Module) + [annotations.pop(k, None) for k in base_m_annotations] + + attrs = {} + for k in annotations: + try: + attrs[k] = getattr(module, k) + except AttributeError: + _C._jit_onnx_log(f"Skipping module attribute '{k}'") + continue + return attrs + + +def apply_torch_patches(): + """Apply monkey patches for ONNX export.""" + onnx_utils._setup_trace_module_map = _setup_trace_module_map_patched + if hasattr(onnx_utils, "_get_module_attributes"): + onnx_utils._get_module_attributes = _get_module_attributes + + +def undo_torch_patches(): + """Undo monkey patches and restore original functions.""" + onnx_utils._setup_trace_module_map = _original_setup_trace_module_map + if _original_get_module_attributes: + onnx_utils._get_module_attributes = _original_get_module_attributes diff --git a/tests/transformers/test_subfunction.py b/tests/transformers/test_subfunction.py new file mode 100644 index 000000000..36cfc0ce5 --- /dev/null +++ b/tests/transformers/test_subfunction.py @@ -0,0 +1,67 @@ +# ----------------------------------------------------------------------------- +# +# Copyright (c) Qualcomm Technologies, Inc. and/or its subsidiaries. +# SPDX-License-Identifier: BSD-3-Clause +# +# ---------------------------------------------------------------------------- + +import pytest +import torch +from transformers import AutoConfig, AutoModelForCausalLM, AutoTokenizer + +from QEfficient.transformers.models.modeling_auto import QEFFAutoModelForCausalLM + +torch.manual_seed(42) + +configs = [ + ("gpt2", 256, 2, 4, 128, 512, 127, {}), +] + +configs = [ + AutoConfig.for_model( + model_name, + max_position_embeddings=max_position_embeddings, + num_hidden_layers=num_hidden_layers, + num_attention_heads=num_attention_heads, + hidden_size=hidden_size, + intermediate_size=intermediate_size, + vocab_size=vocab_size, + **additional_params, + ) + for ( + model_name, + max_position_embeddings, + num_hidden_layers, + num_attention_heads, + hidden_size, + intermediate_size, + vocab_size, + additional_params, + ) in configs +] + +model_kwargs = {"attn_implementation": "eager"} +config_ids = [x.model_type for x in configs] + + +@pytest.mark.parametrize("config", configs, ids=config_ids) +def test_subfunction_vs_nonsubfunction(config, tmp_path): + tokenizer = AutoTokenizer.from_pretrained(config.model_type) + model_0_0 = QEFFAutoModelForCausalLM(AutoModelForCausalLM.from_config(config, **model_kwargs), cb=False) + # model_0_0 = QEFFAutoModelForCausalLM.from_pretrained(config.model_type) + + with_sub_func_onnx = model_0_0.export(tmp_path, use_onnx_subfunctions=True, offload_pt_weights=False) + hash_0_0 = model_0_0.export_hash + + without_sub_func_onnx = model_0_0.export(tmp_path, use_onnx_subfunctions=False) + hash_0_1 = model_0_0.export_hash + + assert hash_0_0 != hash_0_1 + + compile_params = {"prefill_seq_len": 8, "ctx_len": 16} + model_0_0.compile(onnx_path=with_sub_func_onnx, **compile_params) + generation_00 = model_0_0.generate(prompts=["Help me with this"], tokenizer=tokenizer) + + model_0_0.compile(onnx_path=without_sub_func_onnx, **compile_params) + generation_01 = model_0_0.generate(prompts=["Help me with this"], tokenizer=tokenizer) + assert generation_00.generated_texts == generation_01.generated_texts From 8353831958dd3f7547fab046dafedff7d75a7761 Mon Sep 17 00:00:00 2001 From: Rishin Raj Date: Thu, 20 Nov 2025 19:52:08 +0530 Subject: [PATCH 18/60] Example scripts revamp (#615) 1. Restructured examples folder - Organized examples into domain-specific subdirectories: `text_generation/`, `image_text_to_text/`, `embeddings/`, `audio/`, `peft/`, `performance/` - Each subdirectory contains focused examples with dedicated READMEs 2. New CONTRIBUTING.md - Guidelines for when to add new examples - Code templates and file requirements - Complete Git workflow with DCO sign-off - Pre-commit checks and PR submission process --------- Signed-off-by: Rishin Raj Co-authored-by: Abukhoyer Shaik Signed-off-by: Dhiraj Kumar Sah --- QEfficient/cloud/execute.py | 2 +- QEfficient/cloud/infer.py | 2 +- docs/source/quick_start.md | 4 +- docs/source/release_docs.md | 6 +- docs/source/supported_features.rst | 26 +- examples/CONTRIBUTING.md | 260 ++++++++++++++ examples/README.md | 97 ++++++ examples/audio/README.md | 87 +++++ examples/audio/speech_to_text.py | 66 ++++ examples/audio/wav2vec2_inference.py | 54 +++ examples/basic_gguf_models.py | 23 -- examples/ccl_image_text_to_text_inference.py | 137 -------- examples/compute_context_length.py | 70 ---- examples/embedding_model.py | 46 --- examples/embeddings/README.md | 71 ++++ examples/embeddings/text_embeddings.py | 92 +++++ examples/gpt_oss.py | 35 -- examples/image_text_to_text/README.md | 112 ++++++ .../image_text_to_text/basic_vlm_inference.py | 134 ++++++++ .../configs}/fp32_nodes_gemma3_27b.yaml | 0 .../configs}/fp32_nodes_gemma3_4b.yaml | 0 .../models/gemma_vision/gemma3_example.py} | 6 +- .../models/granite_vision/README.md} | 0 .../models/granite_vision/granite_example.py} | 12 +- .../models/internvl/README.md} | 3 +- .../models/internvl/internvl_example.py} | 0 .../models/llama4/continuous_batching.py | 91 +++++ .../models/llama4/multi_image.py} | 0 .../models/llama4/single_image.py} | 48 ++- .../mistral_vision}/mistral3_example.py | 0 .../models/molmo}/molmo_example.py | 3 +- .../models/qwen_vl/basic_inference.py} | 3 - .../models/qwen_vl/continuous_batching.py} | 3 - examples/image_text_to_text_inference.py | 120 ------- examples/llama4_CB_example_vision_lang.py | 93 ----- examples/peft/README.md | 83 +++++ .../{lora_models.py => peft/multi_adapter.py} | 0 .../single_adapter.py} | 46 ++- examples/performance/README.md | 110 ++++++ .../compute_context_length/README.md | 323 ++++++++++++++++++ .../compute_context_length/basic_inference.py | 154 +++++++++ .../compute_context_length/gemma3.py} | 2 +- .../compute_context_length/gpt_oss.py} | 0 .../compute_context_length/granite_vision.py} | 4 - .../compute_context_length/internvl.py} | 0 .../compute_context_length/llama4.py} | 2 +- .../compute_context_length/llama4_cb.py} | 1 - .../llama4_multi_image.py} | 2 +- .../compute_context_length/mistral3.py} | 0 .../compute_context_length/molmo.py} | 2 +- .../compute_context_length/qwen2_5_vl.py} | 2 +- .../compute_context_length/qwen2_5_vl_cb.py} | 0 .../ccl_qwen3moe_inference.py | 0 .../compute_context_length/vlm_inference.py | 236 +++++++++++++ .../cpp_execution/CMakeLists.txt | 0 .../cpp_execution/InferenceSetIOBuffer.cpp | 0 .../{ => performance}/cpp_execution/README.md | 2 +- .../cpp_execution/text_inference_cpp.py} | 2 +- .../{ => performance}/on_device_sampling.py | 2 +- .../speculative_decoding/README.md | 181 ++++++++++ .../speculative_decoding/draft_based.py} | 4 +- .../speculative_decoding/multi_projection.py} | 0 .../speculative_decoding/prompt_lookup.py} | 6 +- .../qwen3moe_example/qwen3moe_inference.py | 21 -- examples/{ => sample_prompts}/prompts.txt | 0 examples/speech_to_text/README.md | 21 -- .../run_whisper_speech_to_text.py | 36 -- examples/text_generation/README.md | 314 +++++++++++++++++ examples/text_generation/basic_inference.py | 57 ++++ examples/text_generation/cli_examples.sh | 209 ++++++++++++ .../text_generation/continuous_batching.py | 72 ++++ examples/text_generation/gguf_models.py | 59 ++++ examples/text_generation/moe_inference.py | 66 ++++ examples/wav2vec2_example/README.md | 21 -- .../run_wav2vec2_inference.py | 24 -- tests/cloud/test_export_compile_execute.py | 2 +- tests/cloud/test_infer.py | 2 +- 77 files changed, 3047 insertions(+), 727 deletions(-) create mode 100644 examples/CONTRIBUTING.md create mode 100644 examples/README.md create mode 100644 examples/audio/README.md create mode 100644 examples/audio/speech_to_text.py create mode 100644 examples/audio/wav2vec2_inference.py delete mode 100644 examples/basic_gguf_models.py delete mode 100644 examples/ccl_image_text_to_text_inference.py delete mode 100644 examples/compute_context_length.py delete mode 100644 examples/embedding_model.py create mode 100644 examples/embeddings/README.md create mode 100644 examples/embeddings/text_embeddings.py delete mode 100644 examples/gpt_oss.py create mode 100644 examples/image_text_to_text/README.md create mode 100644 examples/image_text_to_text/basic_vlm_inference.py rename examples/{gemma3_example => image_text_to_text/models/gemma_vision/configs}/fp32_nodes_gemma3_27b.yaml (100%) rename examples/{gemma3_example => image_text_to_text/models/gemma_vision/configs}/fp32_nodes_gemma3_4b.yaml (100%) rename examples/{gemma3_example/gemma3_mm.py => image_text_to_text/models/gemma_vision/gemma3_example.py} (95%) rename examples/{granite_example/readme.md => image_text_to_text/models/granite_vision/README.md} (100%) rename examples/{granite_example/granite_vision_inference.py => image_text_to_text/models/granite_vision/granite_example.py} (96%) rename examples/{intern_example/readme.md => image_text_to_text/models/internvl/README.md} (95%) rename examples/{intern_example/internvl_inference.py => image_text_to_text/models/internvl/internvl_example.py} (100%) create mode 100644 examples/image_text_to_text/models/llama4/continuous_batching.py rename examples/{llama4_multi_image_example.py => image_text_to_text/models/llama4/multi_image.py} (100%) rename examples/{llama4_example.py => image_text_to_text/models/llama4/single_image.py} (65%) rename examples/{ => image_text_to_text/models/mistral_vision}/mistral3_example.py (100%) rename examples/{ => image_text_to_text/models/molmo}/molmo_example.py (96%) rename examples/{qwen2_5_vl_example.py => image_text_to_text/models/qwen_vl/basic_inference.py} (95%) rename examples/{qwen2_5_vl_CB.py => image_text_to_text/models/qwen_vl/continuous_batching.py} (91%) delete mode 100644 examples/image_text_to_text_inference.py delete mode 100644 examples/llama4_CB_example_vision_lang.py create mode 100644 examples/peft/README.md rename examples/{lora_models.py => peft/multi_adapter.py} (100%) rename examples/{peft_models.py => peft/single_adapter.py} (60%) create mode 100644 examples/performance/README.md create mode 100644 examples/performance/compute_context_length/README.md create mode 100644 examples/performance/compute_context_length/basic_inference.py rename examples/{gemma3_example/ccl_gemma3_mm.py => performance/compute_context_length/gemma3.py} (98%) rename examples/{ccl_gpt_oss.py => performance/compute_context_length/gpt_oss.py} (100%) rename examples/{granite_example/ccl_granite_vision_inference.py => performance/compute_context_length/granite_vision.py} (98%) rename examples/{intern_example/ccl_internvl_inference.py => performance/compute_context_length/internvl.py} (100%) rename examples/{ccl_llama4_example.py => performance/compute_context_length/llama4.py} (98%) rename examples/{ccl_llama4_CB_example_vision_lang.py => performance/compute_context_length/llama4_cb.py} (95%) rename examples/{ccl_llama4_multi_image_example.py => performance/compute_context_length/llama4_multi_image.py} (96%) rename examples/{ccl_mistral3_example.py => performance/compute_context_length/mistral3.py} (100%) rename examples/{ccl_molmo_example.py => performance/compute_context_length/molmo.py} (97%) rename examples/{ccl_qwen2_5_vl_example.py => performance/compute_context_length/qwen2_5_vl.py} (98%) rename examples/{ccl_qwen2_5_vl_CB.py => performance/compute_context_length/qwen2_5_vl_cb.py} (100%) rename examples/{ => performance/compute_context_length}/qwen3moe_example/ccl_qwen3moe_inference.py (100%) create mode 100644 examples/performance/compute_context_length/vlm_inference.py rename examples/{ => performance}/cpp_execution/CMakeLists.txt (100%) rename examples/{ => performance}/cpp_execution/InferenceSetIOBuffer.cpp (100%) rename examples/{ => performance}/cpp_execution/README.md (81%) rename examples/{cpp_execution/text_inference_using_cpp.py => performance/cpp_execution/text_inference_cpp.py} (99%) rename examples/{ => performance}/on_device_sampling.py (99%) create mode 100644 examples/performance/speculative_decoding/README.md rename examples/{draft_spd_inference.py => performance/speculative_decoding/draft_based.py} (98%) rename examples/{multiprojs_spd_inference.py => performance/speculative_decoding/multi_projection.py} (100%) rename examples/{pld_spd_inference.py => performance/speculative_decoding/prompt_lookup.py} (98%) delete mode 100644 examples/qwen3moe_example/qwen3moe_inference.py rename examples/{ => sample_prompts}/prompts.txt (100%) delete mode 100644 examples/speech_to_text/README.md delete mode 100644 examples/speech_to_text/run_whisper_speech_to_text.py create mode 100644 examples/text_generation/README.md create mode 100644 examples/text_generation/basic_inference.py create mode 100755 examples/text_generation/cli_examples.sh create mode 100644 examples/text_generation/continuous_batching.py create mode 100644 examples/text_generation/gguf_models.py create mode 100644 examples/text_generation/moe_inference.py delete mode 100644 examples/wav2vec2_example/README.md delete mode 100644 examples/wav2vec2_example/run_wav2vec2_inference.py diff --git a/QEfficient/cloud/execute.py b/QEfficient/cloud/execute.py index 27ea529cd..09e989ea0 100644 --- a/QEfficient/cloud/execute.py +++ b/QEfficient/cloud/execute.py @@ -115,7 +115,7 @@ def main( "--prompts_txt_file_path", "--prompts-txt-file-path", type=str, - help="File path for taking input prompts from txt file, sample prompts.txt file present in examples folder", + help="File path for taking input prompts from txt file, sample prompts.txt file present in examples/sample_prompts folder", ) parser.add_argument("--generation_len", "--generation-len", type=int, help="Number of tokens to generate") parser.add_argument( diff --git a/QEfficient/cloud/infer.py b/QEfficient/cloud/infer.py index fbff5b18b..ef05d29ab 100644 --- a/QEfficient/cloud/infer.py +++ b/QEfficient/cloud/infer.py @@ -390,7 +390,7 @@ def main( "--prompts_txt_file_path", "--prompts-txt-file-path", type=str, - help="File path for taking input prompts from txt file, sample prompts.txt file present in examples folder", + help="File path for taking input prompts from txt file, sample prompts.txt file present in examples/sample_prompts folder", ) parser.add_argument("--generation_len", "--generation-len", type=int, help="Number of tokens to generate") parser.add_argument( diff --git a/docs/source/quick_start.md b/docs/source/quick_start.md index 98ec72b7c..9358f9c4a 100644 --- a/docs/source/quick_start.md +++ b/docs/source/quick_start.md @@ -125,10 +125,10 @@ You can pass input prompts in single string but separate with pipe (|) symbol". python -m QEfficient.cloud.infer --model_name gpt2 --batch_size 3 --prompt_len 32 --ctx_len 128 --num_cores 16 --device_group [0] --prompt "My name is|The flat earth theory is the belief that|The sun rises from" --mxfp6 --mos 1 --aic_enable_depth_first ``` -You can also pass path of txt file with input prompts when you want to run inference on lot of prompts, Example below, sample txt file(prompts.txt) is present in examples folder. +You can also pass path of txt file with input prompts when you want to run inference on lot of prompts, Example below, sample txt file(prompts.txt) is present in examples/sample_prompts folder. ```bash -python -m QEfficient.cloud.infer --model_name gpt2 --batch_size 3 --prompt_len 32 --ctx_len 128 --num_cores 16 --device_group [0] --prompts_txt_file_path examples/prompts.txt --mxfp6 --mos 1 --aic_enable_depth_first +python -m QEfficient.cloud.infer --model_name gpt2 --batch_size 3 --prompt_len 32 --ctx_len 128 --num_cores 16 --device_group [0] --prompts_txt_file_path examples/sample_prompts/prompts.txt --mxfp6 --mos 1 --aic_enable_depth_first ``` **QNN CLI Inference Command** diff --git a/docs/source/release_docs.md b/docs/source/release_docs.md index 79e4bd181..97389e571 100644 --- a/docs/source/release_docs.md +++ b/docs/source/release_docs.md @@ -13,7 +13,7 @@ Welcome to the official release of **Efficient Transformer Library v1.20.0**! Th - Text & Image+Text support - Chunk attention, Single/Dual QPC support - Multi-image prompts enabled via VLLM interface - - [Llama4 Example Script](https://github.com/quic/efficient-transformers/blob/main/examples/llama4_example.py) + - [Llama4 Example Script](https://github.com/quic/efficient-transformers/blob/main/examples/image_text_to_text/models/llama_vision/single_image.py) - **Grok-1** - Executable via [`QEffAutoModelForCausalLM`](#QEffAutoModelForCausalLM) @@ -22,7 +22,7 @@ Welcome to the official release of **Efficient Transformer Library v1.20.0**! Th - Executable via [`QEFFAutoModelForImageTextToText`](#QEFFAutoModelForImageTextToText) - Text & Image+Text support - Sliding window support - - [Gemma3 Example Script](https://github.com/quic/efficient-transformers/blob/main/examples/gemma3_example/gemma3_mm.py) + - [Gemma3 Example Script](https://github.com/quic/efficient-transformers/blob/main/examples/image_text_to_text/models/gemma_vision/inference.py) - **SwiftKV (Llama-3.1-SwiftKV-8B-Instruct)** @@ -32,7 +32,7 @@ Welcome to the official release of **Efficient Transformer Library v1.20.0**! Th - **GGUF Models** - Executable via [`QEffAutoModelForCausalLM`](#QEffAutoModelForCausalLM) - Execution support (non-quantized) - - [Example Script](https://github.com/quic/efficient-transformers/blob/main/examples/basic_gguf_models.py) + - [Example Script](https://github.com/quic/efficient-transformers/blob/main/examples/text_generation/gguf_models.py) - **FP8 Compressed Quantization** - Support for [`Llama-3.3-70B-Instruct-FP8-Dynamic`](https://huggingface.co/Infermatic/Llama-3.3-70B-Instruct-FP8-Dynamic) diff --git a/docs/source/supported_features.rst b/docs/source/supported_features.rst index 9715da982..8260342f2 100644 --- a/docs/source/supported_features.rst +++ b/docs/source/supported_features.rst @@ -6,16 +6,18 @@ Supported Features * - Feature - Impact + * - `Compute Context Length (CCL) `_ + - Optimizes inference by using different context lengths during prefill and decode phases, reducing memory footprint and computation for shorter sequences while maintaining support for longer contexts. Supports both text-only and vision-language models. Refer `sample script `_ for more **details**. * - Sentence embedding, Flexible Pooling configuration and compilation with multiple sequence lengths - - Supports standard/custom pooling with AI 100 acceleration and sentence embedding. Enables efficient sentence embeddings via Efficient-Transformers. Compile with one or multiple seq_len; optimal graph auto-selected at runtime. Refer `sample script `_ for more **details**. + - Supports standard/custom pooling with AI 100 acceleration and sentence embedding. Enables efficient sentence embeddings via Efficient-Transformers. Compile with one or multiple seq_len; optimal graph auto-selected at runtime. Refer `sample script `_ for more **details**. * - `SpD, multiprojection heads `_ - - Implemented post-attention hidden size projections to speculate tokens ahead of the base model. Refer `sample script `_ for more **details**. + - Implemented post-attention hidden size projections to speculate tokens ahead of the base model. Refer `sample script `_ for more **details**. * - `QNN Compilation support `_ - Enabled for AutoModel classes QNN compilation capabilities for multi-models, embedding models and causal models. * - `Disaggregated serving `_ - It support for separate prefill and decode compilation for encoder (vision) and language models. * - `GGUF model execution `_ - - Supported GGUF model execution (without quantized weights). Refer `sample script `_ for more **details**. + - Supported GGUF model execution (without quantized weights). Refer `sample script `_ for more **details**. * - Replication of KV - Enabled FP8 model support on `replicate_kv_heads script `_. * - `gradient checkpointing `_ @@ -23,9 +25,9 @@ Supported Features * - Swift KV `Snowflake/Llama-3.1-SwiftKV-8B-Instruct `_ - Reduces computational overhead during inference by optimizing key-value pair processing, leading to improved throughput. Support for both `continuous and non-continuous batching execution `_ in SwiftKV * - :ref:`Vision Language Model ` - - Provides support for the AutoModelForImageTextToText class from the transformers library, enabling advanced vision-language tasks. Refer `sample script `_ for more **details**. + - Provides support for the AutoModelForImageTextToText class from the transformers library, enabling advanced vision-language tasks. Refer `sample script `_ for more **details**. * - :ref:`Speech Sequence to Sequence Model ` - - Provides support for the QEFFAutoModelForSpeechSeq2Seq Facilitates speech-to-text sequence models. Refer `sample script `_ for more **details**. + - Provides support for the QEFFAutoModelForSpeechSeq2Seq Facilitates speech-to-text sequence models. Refer `sample script `_ for more **details**. * - Support for FP8 Execution - Enables execution with FP8 precision, significantly improving performance and reducing memory usage for computational tasks. * - Prefill caching @@ -33,19 +35,19 @@ Supported Features * - On Device Sampling - Enables sampling operations to be executed directly on the QAIC device rather than the host CPU for QEffForCausalLM models. This enhancement significantly reduces host-device communication overhead and improves inference throughput and scalability. Refer `sample script `_ for more **details**. * - Prompt-Lookup Decoding - - Speeds up text generation by using overlapping parts of the input prompt and the generated text, making the process faster without losing quality. Refer `sample script `_ for more **details**. + - Speeds up text generation by using overlapping parts of the input prompt and the generated text, making the process faster without losing quality. Refer `sample script `_ for more **details**. * - :ref:`PEFT LoRA support ` - - Enables parameter-efficient fine-tuning using low-rank adaptation techniques, reducing the computational and memory requirements for fine-tuning large models. Refer `sample script `_ for more **details**. + - Enables parameter-efficient fine-tuning using low-rank adaptation techniques, reducing the computational and memory requirements for fine-tuning large models. Refer `sample script `_ for more **details**. * - :ref:`QNN support ` - Enables compilation using QNN SDK, making Qeff adaptable for various backends in the future. * - :ref:`Embedding model support ` - Facilitates the generation of vector embeddings for retrieval tasks. * - :ref:`Speculative Decoding ` - - Accelerates text generation by using a draft model to generate preliminary predictions, which are then verified by the target model, reducing latency and improving efficiency. Refer `sample script `_ for more **details**. + - Accelerates text generation by using a draft model to generate preliminary predictions, which are then verified by the target model, reducing latency and improving efficiency. Refer `sample script `_ for more **details**. * - :ref:`Finite lorax ` - - Users can activate multiple LoRA adapters and compile them with the base model. At runtime, they can specify which prompt should use which adapter, enabling mixed adapter usage within the same batch. Refer `sample script `_ for more **details**. + - Users can activate multiple LoRA adapters and compile them with the base model. At runtime, they can specify which prompt should use which adapter, enabling mixed adapter usage within the same batch. Refer `sample script `_ for more **details**. * - Python and CPP Inferencing API support - - Provides flexibility while running inference with Qeff and enabling integration with various applications and improving accessibility for developers. Refer `sample script `_ for more **details**. + - Provides flexibility while running inference with Qeff and enabling integration with various applications and improving accessibility for developers. Refer `sample script `_ for more **details**. * - :ref:`Continuous batching ` - Optimizes throughput and latency by dynamically batching requests, ensuring efficient use of computational resources. * - AWQ and GPTQ support @@ -56,7 +58,5 @@ Supported Features - A script for computing the perplexity of a model, allowing for the evaluation of model performance and comparison across different models and datasets. Refer `sample script `_ for more **details**. * - KV Heads Replication Script - A sample script for replicating key-value (KV) heads for the Llama-3-8B-Instruct model, running inference with the original model, replicating KV heads, validating changes, and exporting the modified model to ONNX format. Refer `sample script `_ for more **details**. - * - Context Length Specializations (upcoming) - - Increases the maximum context length that models can handle, allowing for better performance on tasks requiring long sequences of text. * - Block Attention (in progress) - - Reduces inference latency and computational cost by dividing context into blocks and reusing key-value states, particularly useful in RAG. \ No newline at end of file + - Reduces inference latency and computational cost by dividing context into blocks and reusing key-value states, particularly useful in RAG. diff --git a/examples/CONTRIBUTING.md b/examples/CONTRIBUTING.md new file mode 100644 index 000000000..d7766fa92 --- /dev/null +++ b/examples/CONTRIBUTING.md @@ -0,0 +1,260 @@ +# Contributing Examples + +This guide explains how to add new examples to the QEfficient repository. + +## When to Add an Example + +Add a new example if: +- The model requires special configuration not covered by existing examples +- You're demonstrating a new feature or optimization technique +- The model has unique requirements (dependencies, image sizes, etc.) + +Don't add an example if: +- The model works with existing generic examples (just use those) +- The only difference is the model name, you can include the model name in validated model list and model class readme file. + +## Directory Structure + +Place your example in the appropriate domain: +- `text_generation/` - Text-only language models +- `image_text_to_text/` - Vision-language models +- `embeddings/` - Embedding models +- `audio/` - Speech and audio models +- `peft/` - Fine-tuning and adapter examples +- `performance/` - Optimization techniques + + + +## File Requirements + +### 1. Python Script + +Your example script should: +- Include the copyright header +- Use argparse for command-line arguments +- Provide clear error messages +- Print results in a readable format + +Basic template: +```python +# ----------------------------------------------------------------------------- +# +# Copyright (c) Qualcomm Technologies, Inc. and/or its subsidiaries. +# SPDX-License-Identifier: BSD-3-Clause +# +# ----------------------------------------------------------------------------- + +import argparse +from transformers import AutoTokenizer +from QEfficient import QEFFAutoModelForCausalLM + +def main(): + parser = argparse.ArgumentParser(description="Description of what this example does") + parser.add_argument("--model-name", type=str, required=True, help="HuggingFace model ID") + parser.add_argument("--prompt", type=str, default="Hello", help="Input prompt") + parser.add_argument("--prefill-seq-len", type=int, default=32) + parser.add_argument("--ctx-len", type=int, default=128) + parser.add_argument("--num-cores", type=int, default=16) + parser.add_argument("--num-devices", type=int, default=1) + args = parser.parse_args() + + tokenizer = AutoTokenizer.from_pretrained(args.model_name) + model = QEFFAutoModelForCausalLM.from_pretrained(args.model_name) + + qpc_path = model.compile( + prefill_seq_len=args.prefill_seq_len, + ctx_len=args.ctx_len, + num_cores=args.num_cores, + num_devices=args.num_devices, + ) + + exec_info = model.generate( + tokenizer=tokenizer, + prompts=[args.prompt], + ) + + print(f"Generated: {exec_info.generated_texts[0]}") + +if __name__ == "__main__": + main() +``` + +### 2. README.md + +Each model-specific example needs a README explaining: +- What the model does +- Any special requirements +- How to run it +- Expected output + +Template: +```markdown +# [Model Name] + +## Overview +Brief description of the model and what makes it special. + +## Requirements +```bash +# For single package +pip install package-name==1.2.3 + +# For multiple packages +pip install package-name==1.2.3 another-package==4.5.6 + +# Or use a requirements.txt file +pip install -r requirements.txt +``` + +**Note:** Always specify exact versions to ensure reproducibility. Use `pip show package-name` to check installed versions. + +## Usage +```bash +python inference.py --model-name [model-id] --prompt "Your prompt" +``` + +## Special Notes +Any model-specific considerations, limitations, or configuration details. + +## References +- Model card: [link] +- Paper: [link] (optional) + +## Code Guidelines + +- Use clear variable names +- Add comments for non-obvious code +- Handle errors gracefully +- Follow existing code style in the repository +- Test your example before submitting + +## Testing Your Example + +Before submitting: +1. Run the example with default parameters +2. Test with different model sizes if applicable +3. Verify the README instructions work +4. Check that all dependencies are documented + +## Submitting Your Contribution + +Follow these steps to submit your example to the QEfficient repository: + +### 1. Fork and Clone the Repository + +First, fork the repository to your GitHub account, then clone your fork: + +```bash +# Fork the repository on GitHub (click the "Fork" button) +# Then clone your fork +git clone git@github.com:YOUR_USERNAME/efficient-transformers.git +cd efficient-transformers + +# Add upstream remote to keep your fork in sync +git remote add upstream git@github.com:quic/efficient-transformers.git +``` + +### 2. Create a Feature Branch + +Create a descriptive branch for your changes: + +```bash +# Update your main branch +git checkout main +git pull upstream main + +# Create a new branch +git checkout -b add-[model-name]-example +``` + +### 3. Make Your Changes + +Add your example files following the guidelines above: +- Python script with proper copyright header +- README.md with clear documentation +- requirements.txt (if needed) + +### 4. Run Pre-commit Checks + +Before committing, ensure your code passes all quality checks: + +```bash +# Install pre-commit if not already installed +pip install pre-commit + +# Run pre-commit on your changed files +pre-commit run --files path/to/your/file1.py path/to/your/file2.md +``` + +**Important:** If pre-commit reports any failures: +- Some issues will be auto-fixed (formatting, trailing whitespace, etc.) +- For issues that aren't auto-fixed, manually correct them +- Re-run `pre-commit run --files ` until all checks pass + +### 5. Commit with Sign-off (DCO) + +All commits must be signed off to comply with the Developer Certificate of Origin (DCO): + +```bash +# Stage your changes +git add examples/your_domain/your_example.py +git add examples/your_domain/README.md + +# Commit with sign-off +git commit -s --author "Your Name " -m "Add [model-name] example + +- Implements inference for [model-name] +- Includes documentation and usage examples +- Tested with [specific configurations]" +``` + +**Commit Message Guidelines:** +- Use a clear, descriptive title +- Add a blank line, then detailed description if needed +- Always include the `-s` flag for DCO sign-off + +### 6. Push to Your Fork + +Push your branch to your forked repository: + +```bash +git push origin add-[model-name]-example +``` + +### 7. Create a Pull Request + +1. Go to your fork on GitHub +2. Click "Compare & pull request" for your branch +3. Fill out the PR template with: + - **Title:** Clear, descriptive title (e.g., "Add Llama-3.2-Vision example") + - **Description:** + - What the example demonstrates + - Why it's needed (what makes it different from existing examples) + - Any special testing considerations + - Link to model card or documentation + - **Testing:** Describe how you tested the example + +### 8. Ensure CI Checks Pass + +After creating the PR, verify that all automated checks pass: + +- āœ… **DCO Check:** Ensures all commits are signed off +- āœ… **Lint Check:** Code style and formatting validation +- āœ… **Tests:** Automated test suite (if applicable) + +If any checks fail: +1. Review the error messages in the PR +2. Make necessary fixes in your local branch +3. Commit and push the fixes (with sign-off) +4. The PR will automatically update and re-run checks + +### 9. Address Review Feedback + +Maintainers will review your PR and may request changes: +- Make requested changes in your local branch +- Commit with sign-off and push to update the PR +- Respond to comments to facilitate discussion + +## Questions + +For questions or issues, open a GitHub issue or discussion. diff --git a/examples/README.md b/examples/README.md new file mode 100644 index 000000000..3913b25ce --- /dev/null +++ b/examples/README.md @@ -0,0 +1,97 @@ +# QEfficient Examples + +Examples for running models on Qualcomm Cloud AI 100. + +For detailed documentation, see https://quic.github.io/efficient-transformers/ + +## Quick Navigation + +### Text Generation +Language model inference. + +| Example | Description | Script | +|---------|-------------|--------| +| Basic Inference | Simple text generation | [text_generation/basic_inference.py](text_generation/basic_inference.py) | +| GGUF Models | GGUF format support | [text_generation/gguf_models.py](text_generation/gguf_models.py) | +| MoE Models | Mixture of Experts | [text_generation/moe_inference.py](text_generation/moe_inference.py) | +| Continuous Batching | Dynamic batching | [text_generation/continuous_batching.py](text_generation/continuous_batching.py) | + +[See all text generation examples →](text_generation/) + +### Image-Text-to-Text +Vision-language models. + +| Example | Model | Script | +|---------|---------------|---------------| +| Basic VLM | Most VLMs | [image_text_to_text/basic_vlm_inference.py](image_text_to_text/basic_vlm_inference.py) | + +[See all vision-language examples →](image_text_to_text/) + +### Embeddings +Sentence and document embeddings. + +| Example | Model | Script | +|---------|-------|--------| +| Text Embeddings | all-MiniLM-L6-v2 | [embeddings/text_embeddings.py](embeddings/text_embeddings.py) | + +[See all embedding examples →](embeddings/) + +### Audio +Speech processing models. + +| Example | Model | Task | Script | +|---------|-------|------|--------| +| Speech-to-Text | Whisper | Transcription | [audio/speech_to_text.py](audio/speech_to_text.py) | +| CTC Speech Recognition | Wav2Vec2 | Recognition | [audio/wav2vec2_inference.py](audio/wav2vec2_inference.py) | + +[See all audio examples →](audio/) + +### PEFT +Parameter-efficient fine-tuning. + +| Example | Description | Script | +|---------|-------------|--------| +| Single Adapter | Load and use one adapter | [peft/single_adapter.py](peft/single_adapter.py) | +| Multi-Adapter | Multiple adapters with CB | [peft/multi_adapter.py](peft/multi_adapter.py) | + +**Note:** PEFT examples use hardcoded configurations to demonstrate specific adapter workflows. Modify the scripts directly to test different adapters or configurations. + +[See all PEFT examples →](peft/) + +### Performance +Optimization techniques. + +| Example | Technique | Script | +|---------|-----------|--------| +| Draft-based SpD | Speculative decoding | [performance/speculative_decoding/draft_based.py](performance/speculative_decoding/draft_based.py) | +| Prompt Lookup | N-gram speculation | [performance/speculative_decoding/prompt_lookup.py](performance/speculative_decoding/prompt_lookup.py) | +| Multi-Projection | Turbo models | [performance/speculative_decoding/multi_projection.py](performance/speculative_decoding/multi_projection.py) | +| On-Device Sampling | Sampling parameters | [performance/on_device_sampling.py](performance/on_device_sampling.py) | +| Compute Context Length | Dynamic context optimization | [performance/compute_context_length/basic_inference.py](performance/compute_context_length/basic_inference.py) | +| C++ Execution | Native C++ API | [performance/cpp_execution/](performance/cpp_execution/) | + +[See all performance examples →](performance/) + +## Installation + +For installation instructions, see the [Quick Installation guide](../README.md#quick-installation) in the main README. + + +## Running Examples + +### Python Scripts + +Basic usage: +```bash +python text_generation/basic_inference.py \ + --model-name gpt2 \ + --prompt "Hello, how are you?" +``` + +## Contributing + +See [CONTRIBUTING.md](CONTRIBUTING.md) for guidelines on adding new examples. + +## Documentation + +Full documentation: https://quic.github.io/efficient-transformers/ diff --git a/examples/audio/README.md b/examples/audio/README.md new file mode 100644 index 000000000..df0204d87 --- /dev/null +++ b/examples/audio/README.md @@ -0,0 +1,87 @@ +# Audio Examples + +Examples for running audio processing models on Qualcomm Cloud AI 100. + +## Dependencies + +Install required packages: +```bash +pip install librosa==0.10.2 soundfile==0.13.1 +``` + +## Authentication + +For private/gated models, export your HuggingFace token: +```bash +export HF_TOKEN= +``` + +## Supported Models + +**QEff Auto Classes:** +- `QEFFAutoModelForSpeechSeq2Seq` (for Whisper models) +- `QEFFAutoModelForCTC` (for Wav2Vec2 models) + +For the complete list of supported audio models, see the [Validated Models - Audio Section](../../docs/source/validate.md#audio-models). + +Popular models include: +- Whisper (tiny, base, small, medium, large, large-v3-turbo) +- Wav2Vec2 (base-960h) + +## Available Examples + +### speech_to_text.py +Speech-to-text transcription using Whisper models. + +**Usage:** +```bash +# With default parameters +python speech_to_text.py \ + +# With custom parameters +python speech_to_text.py \ + --model-name openai/whisper-tiny \ + --ctx-len 25 \ + --num-cores 16 +``` + +**Parameters:** +- `--model-name`: HuggingFace Whisper model ID (default: `openai/whisper-tiny`) +- `--ctx-len`: Context length for generation (default: `25`) +- `--num-cores`: Number of cores (default: `16`) + +This example: +- Loads a sample audio from the librispeech dataset +- Uses Whisper-tiny model by default +- Compiles and runs inference on Cloud AI 100 +- Outputs the transcribed text + +### wav2vec2_inference.py +Speech recognition using Wav2Vec2 models with CTC (Connectionist Temporal Classification). + +**Usage:** +```bash +# With default parameters +python wav2vec2_inference.py + +# With custom parameters +python wav2vec2_inference.py \ + --model-name facebook/wav2vec2-base-960h \ + --num-cores 16 +``` + +**Parameters:** +- `--model-name`: HuggingFace CTC model ID (default: `facebook/wav2vec2-base-960h`) +- `--num-cores`: Number of cores (default: `16`) + +This example: +- Loads a sample audio from the librispeech dataset +- Uses Wav2Vec2-base-960h model by default +- Compiles and runs inference on Cloud AI 100 +- Outputs the recognized text + +## Documentation + +- [QEff Auto Classes](https://quic.github.io/efficient-transformers/source/qeff_autoclasses.html) +- [Validated Audio Models](https://quic.github.io/efficient-transformers/source/validate.html#audio-models) +- [Quick Start Guide](https://quic.github.io/efficient-transformers/source/quick_start.html) diff --git a/examples/audio/speech_to_text.py b/examples/audio/speech_to_text.py new file mode 100644 index 000000000..9f1df19aa --- /dev/null +++ b/examples/audio/speech_to_text.py @@ -0,0 +1,66 @@ +# ----------------------------------------------------------------------------- +# +# Copyright (c) Qualcomm Technologies, Inc. and/or its subsidiaries. +# SPDX-License-Identifier: BSD-3-Clause +# +# ----------------------------------------------------------------------------- + +import argparse + +from datasets import load_dataset +from transformers import AutoProcessor + +from QEfficient import QEFFAutoModelForSpeechSeq2Seq + + +def main(): + parser = argparse.ArgumentParser(description="Speech-to-text inference with Whisper") + parser.add_argument( + "--model-name", + type=str, + default="openai/whisper-tiny", + help="HuggingFace Whisper model ID", + ) + parser.add_argument( + "--ctx-len", + type=int, + default=25, + help="Context length for generation", + ) + parser.add_argument("--num-cores", type=int, default=16, help="Number of cores") + args = parser.parse_args() + + print(f"Loading Whisper model: {args.model_name}") + + ## STEP 1 -- load audio sample + + # Using a standard english dataset + print("Loading audio sample from dataset...") + ds = load_dataset("hf-internal-testing/librispeech_asr_dummy", "clean", split="validation") + sample_rate = ds[0]["audio"]["sampling_rate"] + data = ds[0]["audio"]["array"] + + # Reshape so shape corresponds to data with batch size 1 + data = data.reshape(-1) + + # Load processor + processor = AutoProcessor.from_pretrained(args.model_name) + + ## STEP 2 -- init base model + qeff_model = QEFFAutoModelForSpeechSeq2Seq.from_pretrained(args.model_name) + + ## STEP 3 -- export and compile model + qeff_model.compile(num_cores=args.num_cores) + + ## STEP 4 -- generate output for loaded input and processor + exec_info = qeff_model.generate( + inputs=processor(data, sampling_rate=sample_rate, return_tensors="pt"), generation_len=args.ctx_len + ) + + ## STEP 5 -- use processor to decode output + transcription = processor.batch_decode(exec_info.generated_ids)[0] + print(f"\nTranscription: {transcription}") + + +if __name__ == "__main__": + main() diff --git a/examples/audio/wav2vec2_inference.py b/examples/audio/wav2vec2_inference.py new file mode 100644 index 000000000..9d310b1c2 --- /dev/null +++ b/examples/audio/wav2vec2_inference.py @@ -0,0 +1,54 @@ +# ----------------------------------------------------------------------------- +# +# Copyright (c) Qualcomm Technologies, Inc. and/or its subsidiaries. +# SPDX-License-Identifier: BSD-3-Clause +# +# ----------------------------------------------------------------------------- + +import argparse + +from datasets import load_dataset +from transformers import AutoProcessor + +from QEfficient import QEFFAutoModelForCTC + + +def main(): + parser = argparse.ArgumentParser(description="CTC speech recognition inference with Wav2Vec2") + parser.add_argument( + "--model-name", + type=str, + default="facebook/wav2vec2-base-960h", + help="HuggingFace CTC model ID (e.g., Wav2Vec2)", + ) + + parser.add_argument("--num-cores", type=int, default=16, help="Number of cores") + args = parser.parse_args() + + print(f"Loading CTC model: {args.model_name}") + + ## STEP 1 -- load audio sample + # Using a standard english dataset + print("Loading audio sample from dataset...") + ds = load_dataset("hf-internal-testing/librispeech_asr_dummy", "clean", split="validation") + data = ds[0]["audio"]["array"] + + # Reshape so shape corresponds to data with batch size 1 + data = data.reshape(-1) + + # Load processor + processor = AutoProcessor.from_pretrained(args.model_name) + + ## STEP 2 -- Load the model + model = QEFFAutoModelForCTC.from_pretrained(args.model_name) + + ## STEP 3 -- Compile the model + model.compile(num_cores=args.num_cores) + + ## STEP 4 -- Run the model and generate the output + model_output = model.generate(processor, inputs=data) + print(f"\nTranscription: {model_output}") + + +if __name__ == "__main__": + main() diff --git a/examples/basic_gguf_models.py b/examples/basic_gguf_models.py deleted file mode 100644 index 84fc73059..000000000 --- a/examples/basic_gguf_models.py +++ /dev/null @@ -1,23 +0,0 @@ -# ----------------------------------------------------------------------------- -# -# Copyright (c) Qualcomm Technologies, Inc. and/or its subsidiaries. -# SPDX-License-Identifier: BSD-3-Clause -# -# ----------------------------------------------------------------------------- - -# This is the work example of the GGUF models with the AI 100 - -from transformers import AutoTokenizer - -from QEfficient import QEFFAutoModelForCausalLM as AutoModelForCausalLM - -# Load the model and tokenizer -model_name = "MaziyarPanahi/Mistral-7B-Instruct-v0.3-GGUF" -gguf_file = "Mistral-7B-Instruct-v0.3.fp16.gguf" -# org_model_name = "mistralai/Mistral-7B-Instruct-v0.3" - -tokenizer = AutoTokenizer.from_pretrained(model_name, gguf_file=gguf_file) -model = AutoModelForCausalLM.from_pretrained(model_name, gguf_file=gguf_file) - -generated_qpc_path = model.compile(prefill_seq_len=32, ctx_len=128, num_cores=16, num_devices=1) -model.generate(prompts=["How are you?"], tokenizer=tokenizer) diff --git a/examples/ccl_image_text_to_text_inference.py b/examples/ccl_image_text_to_text_inference.py deleted file mode 100644 index be472f433..000000000 --- a/examples/ccl_image_text_to_text_inference.py +++ /dev/null @@ -1,137 +0,0 @@ -# ----------------------------------------------------------------------------- -# -# Copyright (c) Qualcomm Technologies, Inc. and/or its subsidiaries. -# SPDX-License-Identifier: BSD-3-Clause -# -# ----------------------------------------------------------------------------- - -import requests -from PIL import Image -from transformers import AutoProcessor, TextStreamer - -from QEfficient import QEFFAutoModelForImageTextToText - -# Add HuggingFace Token to access the model -HF_TOKEN = "" - - -def run_model( - model_name, - token, - query, - image_url, - kv_offload=False, - prefill_seq_len=32, - ctx_len=512, - comp_ctx_lengths_prefill=None, - comp_ctx_lengths_decode=None, - generation_len=128, - img_size=560, - num_cores=16, - num_devices=1, -): - ## STEP - 1 Load the Processor and Model - - processor = AutoProcessor.from_pretrained(model_name, token=token) - - # `kv_offload` is used to compile the model in a Single QPC or 2 QPCs. - # The Dual QPC approach splits the model to perform Image Encoding and Output generation in 2 different QPCs. - # The outputs of the Vision Encoder are then passed to the Language model via host in this case. - - model = QEFFAutoModelForImageTextToText.from_pretrained( - model_name, - token=token, - attn_implementation="eager", - kv_offload=kv_offload, - qaic_config={ - "comp_ctx_lengths_prefill": comp_ctx_lengths_prefill, - "comp_ctx_lengths_decode": comp_ctx_lengths_decode, - "ctx_len": ctx_len, - }, - ) - - ## STEP - 2 Export & Compile the Model - - model.compile( - prefill_seq_len=prefill_seq_len, - ctx_len=ctx_len, - img_size=img_size, - num_cores=num_cores, - num_devices=num_devices, - mxfp6_matmul=False, - ) - - ## STEP - 3 Load and process the inputs for Inference - - image = Image.open(requests.get(image_url, stream=True).raw) - messages = [ - { - "role": "user", - "content": [ - {"type": "image"}, - {"type": "text", "text": query}, - ], - } - ] - input_text = [processor.apply_chat_template(messages, add_generation_prompt=True)] - - inputs = processor( - text=input_text, - images=image, - return_tensors="pt", - add_special_tokens=False, - padding="max_length", - max_length=prefill_seq_len, - ) - - ## STEP - 4 Run Inference on the compiled model - - streamer = TextStreamer(processor.tokenizer) - output_statistics = model.generate(inputs=inputs, streamer=streamer, generation_len=generation_len) - print(output_statistics) - - -if __name__ == "__main__": - # Model name and Input parameters - # model_name = "llava-hf/llava-1.5-7b-hf" - model_name = "meta-llama/Llama-3.2-11B-Vision-Instruct" - query = "Describe this image." - image_url = "https://huggingface.co/datasets/huggingface/documentation-images/resolve/0052a70beed5bf71b92610a43a52df6d286cd5f3/diffusers/rabbit.jpg" - - # Compilation parameters for the model - kv_offload = True - prefill_seq_len = 32 - ctx_len = 8192 - generation_len = 128 - # img_size = 336 - img_size = 560 - num_cores = 16 - num_devices = 4 - comp_ctx_lengths_prefill = [4096] - comp_ctx_lengths_decode = [6144, ctx_len] - - run_model( - model_name=model_name, - token=HF_TOKEN, - query=query, - kv_offload=kv_offload, - image_url=image_url, - prefill_seq_len=prefill_seq_len, - ctx_len=ctx_len, - comp_ctx_lengths_prefill=comp_ctx_lengths_prefill, - comp_ctx_lengths_decode=comp_ctx_lengths_decode, - generation_len=generation_len, - img_size=img_size, - num_cores=num_cores, - num_devices=num_devices, - ) - - -""" -Expected Response: - -This image depicts a charming anthropomorphic rabbit standing on a dirt path in front of a picturesque stone cottage, surrounded by a serene landscape. - -The rabbit, with its light brown fur and distinctive long ears, is attired in a stylish blue coat, brown vest, and tan pants, exuding a sense of sophistication. The dirt path, flanked by vibrant flowers and lush greenery, leads to the cottage, which features a thatched roof and a chimney, adding to the rustic charm of the scene. In the background, rolling hills and trees create a breathtaking panorama, while the sky above is a brilliant blue with white clouds, completing the - -""" diff --git a/examples/compute_context_length.py b/examples/compute_context_length.py deleted file mode 100644 index 163261e04..000000000 --- a/examples/compute_context_length.py +++ /dev/null @@ -1,70 +0,0 @@ -# ----------------------------------------------------------------------------- -# -# Copyright (c) 2025 Qualcomm Innovation Center, Inc. All rights reserved. -# SPDX-License-Identifier: BSD-3-Clause -# -# ----------------------------------------------------------------------------- - -## In this example, you can run a model for static and continuous batching with different Compute-Context-Length (CCL) inputs. ## - -from transformers import AutoTokenizer - -from QEfficient import QEFFAutoModelForCausalLM - -## Using optional variable comp_ctx_lengths variable you can pass a list of context lengths for both prefilling and decoding processes. It will run the model with default context length if comp_ctx_lengths=None. ## -## - The first comp_ctx_lengths_prefill list shows the compute-ctx-length list for prefilling process. It will start the prefilling process with the first element in the list and gradually will increase the comp_ctx_lengths based on the position_id of the current prompt chunk. ## -## - The second comp_ctx_lengths_decode list will be used for decoding. During the decoding process, based on the position_id or cache index it will work with the specific compute-context-length in the list. It will start from a proper compute-context-length in the list based on input prompt length and will gradually increase the compute-context-length if the cache index passes the current compute-context-length. ## - -ctx_len = 1024 -comp_ctx_lengths_prefill = [256, 500] # None -comp_ctx_lengths_decode = [512, ctx_len] # None - -model_name = "meta-llama/Llama-3.2-1B" -# model_name = "google/gemma-7b" -# model_name = "tiiuae/falcon-7b-instruct" -# model_name = "google/gemma-2-2b" -# model_name = "ibm-granite/granite-3.1-8b-instruct" -# model_name = "Snowflake/Llama-3.1-SwiftKV-8B-Instruct" -# model_name = "mistralai/Mistral-7B-v0.1" -# model_name = "microsoft/phi-1_5" -# model_name = "microsoft/Phi-3-mini-4k-instruct" -# model_name = "Qwen/Qwen2.5-7B-Instruct" -# model_name = "Qwen/Qwen3-1.7B" -# model_name = "allenai/OLMo-2-0425-1B" -# model_name = "ibm-granite/granite-3.3-2b-base" -# model_name = "ibm-granite/granite-3.2-8b-instruct" -# model_name = "meta-llama/Llama-3.3-70B-Instruct" -# model_name = "Salesforce/codegen-350M-mono" -# model_name = "openai-community/gpt2" -# model_name = "EleutherAI/gpt-j-6b" - -model = QEFFAutoModelForCausalLM.from_pretrained( - model_name, - continuous_batching=True, - qaic_config={ - "comp_ctx_lengths_prefill": comp_ctx_lengths_prefill, - "comp_ctx_lengths_decode": comp_ctx_lengths_decode, - "ctx_len": ctx_len, # Is required for CCL checkings - }, -) - -# model compilation for either continuous or static batching. For continuous batching full_batch_size is needed. -model.compile( - prefill_seq_len=128, - ctx_len=ctx_len, - num_cores=16, - num_devices=1, - mxint8_kv_cache=True, - mxfp6_matmul=True, - full_batch_size=1, -) - -# Create tokenizer and run model.generate and passes the input prompts to it. -tokenizer = AutoTokenizer.from_pretrained(model_name) -model.generate( - prompts=[ - "My name is ", - ], - tokenizer=tokenizer, - generation_len=128, -) diff --git a/examples/embedding_model.py b/examples/embedding_model.py deleted file mode 100644 index 7e6973e2e..000000000 --- a/examples/embedding_model.py +++ /dev/null @@ -1,46 +0,0 @@ -# ----------------------------------------------------------------------------- - -# Copyright (c) Qualcomm Technologies, Inc. and/or its subsidiaries. -# SPDX-License-Identifier: BSD-3-Clause - -# ----------------------------------------------------------------------------- - -# This is the work example of the Embedding model with the AI 100 -# For more information, visit: https://huggingface.co/sentence-transformers/all-MiniLM-L6-v2 - -import torch -from transformers import AutoTokenizer - -from QEfficient import QEFFAutoModel as AutoModel - - -def max_pooling(last_hidden_states: torch.Tensor, attention_mask: torch.Tensor) -> torch.Tensor: - input_mask_expanded = attention_mask.unsqueeze(-1).expand(last_hidden_states.size()).float() - last_hidden_states[input_mask_expanded == 0] = -1e9 - return torch.max(last_hidden_states, 1)[0] - - -# Sentences we want sentence embeddings for -sentences = "This is an example sentence" - -# Load model from HuggingFace Hub -tokenizer = AutoTokenizer.from_pretrained("sentence-transformers/all-MiniLM-L6-v2") - - -# You can specify the pooling strategy either as a string (e.g., "max") or by passing a custom pooling function. -# If no pooling is specified, the model will return its default output (typically token embeddings). -qeff_model = AutoModel.from_pretrained("sentence-transformers/all-MiniLM-L6-v2", pooling=max_pooling) -# qeff_model = AutoModel.from_pretrained("sentence-transformers/all-MiniLM-L6-v2", pooling="max") -# qeff_model = AutoModel.from_pretrained("sentence-transformers/all-MiniLM-L6-v2") - -# Here seq_len can be list of seq_len or single int -qeff_model.compile(num_cores=16, seq_len=[32, 64]) -# qeff_model.compile(num_cores=16, seq_len=32) - - -# Tokenize sentences -encoded_input = tokenizer(sentences, return_tensors="pt") - -sentence_embeddings = qeff_model.generate(encoded_input) - -print("Sentence embeddings:", sentence_embeddings) diff --git a/examples/embeddings/README.md b/examples/embeddings/README.md new file mode 100644 index 000000000..baf80919c --- /dev/null +++ b/examples/embeddings/README.md @@ -0,0 +1,71 @@ +# Embedding Examples + +Examples for running text embedding models on Qualcomm Cloud AI 100. + +## Authentication + +For private/gated models, export your HuggingFace token: +```bash +export HF_TOKEN= +``` + +## Supported Models + +**QEff Auto Class:** `QEFFAutoModel` + +For the complete list of supported embedding models, see the [Validated Models - Embedding Section](../../docs/source/validate.md#embedding-models). + +Popular model families include: +- BERT-based (BGE, E5) +- MPNet +- Mistral-based +- NomicBERT +- Qwen2 +- RoBERTa (Granite) +- XLM-RoBERTa (multilingual) + +## Available Examples + +### text_embeddings.py +Generate text embeddings using transformer models. + +**Usage:** +```bash +# With default parameters +python text_embeddings.py + +# With custom parameters +python text_embeddings.py \ + --model-name sentence-transformers/all-MiniLM-L6-v2 \ + --sentences "This is an example sentence" \ + --pooling max \ + --num-cores 16 \ + --seq-len "32,64" +``` + +**Parameters:** +- `--model-name`: HuggingFace embedding model ID (default: `sentence-transformers/all-MiniLM-L6-v2`) +- `--sentences`: Input text to generate embeddings for (default: `"This is an example sentence"`) +- `--pooling`: Pooling strategy - `max`, `mean`, or `none` (default: `max`) +- `--num-cores`: Number of cores (default: `16`) +- `--seq-len`: Sequence length(s) - single int or comma-separated list (default: `"32,64"`) + +This example: +- Uses `sentence-transformers/all-MiniLM-L6-v2` by default +- Demonstrates custom pooling strategies (max pooling) +- Compiles for multiple sequence lengths [32, 64] +- Outputs text embeddings +- Works with various embedding model families (BERT, MPNet, Mistral-based, etc.) + +## Pooling Strategies + +The example supports different pooling strategies: +- **max**: Max pooling over token embeddings +- **mean**: Mean pooling over token embeddings +- **custom**: Pass your own pooling function + +## Documentation + +- [QEff Auto Classes](https://quic.github.io/efficient-transformers/source/qeff_autoclasses.html) +- [Validated Embedding Models](https://quic.github.io/efficient-transformers/source/validate.html#embedding-models) +- [Quick Start Guide](https://quic.github.io/efficient-transformers/source/quick_start.html) diff --git a/examples/embeddings/text_embeddings.py b/examples/embeddings/text_embeddings.py new file mode 100644 index 000000000..e69e6f1af --- /dev/null +++ b/examples/embeddings/text_embeddings.py @@ -0,0 +1,92 @@ +# ----------------------------------------------------------------------------- + +# Copyright (c) Qualcomm Technologies, Inc. and/or its subsidiaries. +# SPDX-License-Identifier: BSD-3-Clause + +# ----------------------------------------------------------------------------- + +import argparse + +import torch +from transformers import AutoTokenizer + +from QEfficient import QEFFAutoModel as AutoModel + + +def max_pooling(last_hidden_states: torch.Tensor, attention_mask: torch.Tensor) -> torch.Tensor: + """Apply max pooling to the last hidden states.""" + input_mask_expanded = attention_mask.unsqueeze(-1).expand(last_hidden_states.size()).float() + last_hidden_states[input_mask_expanded == 0] = -1e9 + return torch.max(last_hidden_states, 1)[0] + + +def main(): + parser = argparse.ArgumentParser(description="Text embeddings inference") + parser.add_argument( + "--model-name", + type=str, + default="sentence-transformers/all-MiniLM-L6-v2", + help="HuggingFace embedding model ID", + ) + parser.add_argument( + "--sentences", + type=str, + default="This is an example sentence", + help="Input sentence(s) to generate embeddings for", + ) + parser.add_argument( + "--pooling", + type=str, + default="max", + choices=["max", "mean", "none"], + help="Pooling strategy: 'max' for max pooling, 'mean' for mean pooling, 'none' for no pooling", + ) + parser.add_argument("--num-cores", type=int, default=16, help="Number of cores") + parser.add_argument( + "--seq-len", + type=str, + default="32,64", + help="Sequence length(s) - single int (e.g., '32') or comma-separated list (e.g., '32,64')", + ) + args = parser.parse_args() + + # Parse seq_len argument + if "," in args.seq_len: + seq_len = [int(x.strip()) for x in args.seq_len.split(",")] + else: + seq_len = int(args.seq_len) + + print(f"Loading embedding model: {args.model_name}") + print(f"Pooling strategy: {args.pooling}") + print(f"Sequence length(s): {seq_len}") + + # Load tokenizer + tokenizer = AutoTokenizer.from_pretrained(args.model_name) + + # Load model with pooling strategy + # You can specify the pooling strategy either as a string (e.g., "max") or by passing a custom pooling function. + # If no pooling is specified, the model will return its default output (typically token embeddings). + if args.pooling == "max": + qeff_model = AutoModel.from_pretrained(args.model_name, pooling=max_pooling) + elif args.pooling == "mean": + qeff_model = AutoModel.from_pretrained(args.model_name, pooling="mean") + else: + qeff_model = AutoModel.from_pretrained(args.model_name) + + # Compile the model + # seq_len can be a list of seq_len or single int + qeff_model.compile(num_cores=args.num_cores, seq_len=seq_len) + + # Tokenize sentences + encoded_input = tokenizer(args.sentences, return_tensors="pt") + + # Run the generation + sentence_embeddings = qeff_model.generate(encoded_input) + + print(f"\nInput: {args.sentences}") + print(f"Sentence embeddings shape: {sentence_embeddings['output'].shape}") + print(f"Sentence embeddings: {sentence_embeddings}") + + +if __name__ == "__main__": + main() diff --git a/examples/gpt_oss.py b/examples/gpt_oss.py deleted file mode 100644 index 24d050e97..000000000 --- a/examples/gpt_oss.py +++ /dev/null @@ -1,35 +0,0 @@ -# ----------------------------------------------------------------------------- -# -# Copyright (c) Qualcomm Technologies, Inc. and/or its subsidiaries. -# SPDX-License-Identifier: BSD-3-Clause -# -# ----------------------------------------------------------------------------- - -from transformers import AutoTokenizer, TextStreamer - -from QEfficient import QEFFAutoModelForCausalLM - -model_id = "openai/gpt-oss-20b" # weights are not required to convert to fp32 - -qeff_model = QEFFAutoModelForCausalLM.from_pretrained(model_id) -tokenizer = AutoTokenizer.from_pretrained(model_id) - -onnx_model_path = qeff_model.export() -qpc_path = qeff_model.compile( - prefill_seq_len=1, # Currently we can get best perf using PL=1 i.e. decode-only model, prefill optimizations are being worked on. - ctx_len=256, - num_cores=16, - mxfp6_matmul=True, - mxint8_kv_cache=True, - num_devices=8, - mos=1, - aic_enable_depth_first=True, - num_speculative_tokens=None, -) -print(f"qpc path is {qpc_path}") -streamer = TextStreamer(tokenizer) -exec_info = qeff_model.generate( - tokenizer, - prompts="Who is your creator? and What all you are allowed to do?", - device_id=[0, 1, 2, 3], -) diff --git a/examples/image_text_to_text/README.md b/examples/image_text_to_text/README.md new file mode 100644 index 000000000..a6f1608b4 --- /dev/null +++ b/examples/image_text_to_text/README.md @@ -0,0 +1,112 @@ +# Image-Text-to-Text (Vision-Language Models) + +Multi-modal models that process both images and text. + + +## Authentication + +For private/gated models, export your HuggingFace token: +```bash +export HF_TOKEN= +``` +## Quick Start +### Generic VLM Inference +Generic script for vision-language models: + +```bash +# With default parameters +python basic_vlm_inference.py + +# With custom parameters +python basic_vlm_inference.py \ + --model-name llava-hf/llava-1.5-7b-hf \ + --image-url "https://example.com/image.jpg" \ + --query "Describe this image" \ + --prefill-seq-len 128 \ + --ctx-len 3000 \ + --generation-len 128 \ + --num-cores 16 +``` + +### Single QPC Mode +Run the entire model (vision encoder + language model) in a single QPC: + +```bash +python basic_vlm_inference.py \ + --model-name llava-hf/llava-1.5-7b-hf \ + --image-url "https://example.com/image.jpg" \ + --query "Describe this image" \ + --num-cores 16 \ + --num-devices 1 +``` + +### Dual QPC Mode +Split the model into two QPCs (vision encoder + language model separately): + +```bash +python basic_vlm_inference.py \ + --model-name llava-hf/llava-1.5-7b-hf \ + --image-url "https://example.com/image.jpg" \ + --query "Describe this image" \ + --kv-offload \ + --num-cores 16 \ + --num-devices 1 +``` + +**Note:** In Dual QPC mode (`kv_offload=True`), the vision encoder runs in one QPC and the language model in another, with outputs transferred via host. This provides flexibility for independent execution of vision and language components. + +### Text-Only Execution (Skip Vision) +Run text-only inference without image processing: + +```bash +python basic_vlm_inference.py \ + --model-name llava-hf/llava-1.5-7b-hf \ + --prompt "Tell me about yourself" \ + --skip-vision True +``` + +**Note:** Use `skip_vision=True` when you want to run the language model without processing any images. This is useful for text-only tasks on vision-language models. + +### Continuous Batching +Dynamic batching for VLMs: + +```bash +python continuous_batching_vlm.py \ + --model-name meta-llama/Llama-4-Scout-17B-16E-Instruct \ + --full-batch-size 4 \ +``` + +## Supported Models + +**QEff Auto Class:** `QEFFAutoModelForImageTextToText` + +For the complete list of supported vision-language models, see the [Validated Models - Vision-Language Models Section](../../docs/source/validate.md#vision-language-models-text--image-generation). + +Popular model families include: +- Llama Vision (3.2, 4-Scout) +- Qwen VL (2.5) +- Mistral Vision (Small-3.1) +- Gemma-3 +- Granite Vision (3.2) +- InternVL +- Molmo +- LLaVA + +### Model-Specific Examples + +Some models have specialized examples demonstrating advanced features: + +| Model | Location | +|-------|----------| +| **Llama-4** | [models/llama4/](models/llama4/) | +| **Qwen** | [models/qwen_vl/](models/qwen_vl/) | +| **Mistral** | [models/mistral_vision/](models/mistral_vision/) | +| **Gemma** | [models/gemma_vision/](models/gemma_vision/) | +| **Granite** | [models/granite_vision/](models/granite_vision/) | +| **InternVL** | [models/internvl/](models/internvl/) | +| **Molmo** | [models/molmo/](models/molmo/) | + + +## Documentation +- **Full Guide**: [VLM Documentation](../../docs/source/quick_start.md#vision-language-models) +- **API Reference**: [QEFFAutoModelForImageTextToText](../../docs/source/qeff_autoclasses.md#QEFFAutoModelForImageTextToText) diff --git a/examples/image_text_to_text/basic_vlm_inference.py b/examples/image_text_to_text/basic_vlm_inference.py new file mode 100644 index 000000000..45d5454cb --- /dev/null +++ b/examples/image_text_to_text/basic_vlm_inference.py @@ -0,0 +1,134 @@ +# ----------------------------------------------------------------------------- +# +# Copyright (c) Qualcomm Technologies, Inc. and/or its subsidiaries. +# SPDX-License-Identifier: BSD-3-Clause +# +# ----------------------------------------------------------------------------- + +import argparse + +import requests +from PIL import Image +from transformers import AutoProcessor, TextStreamer + +from QEfficient import QEFFAutoModelForImageTextToText + + +def run_model( + model_name, + query, + image_url, + kv_offload=True, + prefill_seq_len=32, + ctx_len=512, + generation_len=128, + img_size=336, + num_cores=16, + num_devices=1, +): + ## STEP 1: Load the Processor and Model + + processor = AutoProcessor.from_pretrained(model_name) + + # `kv_offload` determines Single QPC vs Dual QPC mode: + # - Single QPC (kv_offload=False): Entire model runs in one QPC + # - Dual QPC (kv_offload=True): Vision encoder and language model run in separate QPCs + # with outputs transferred via host for flexibility + + model = QEFFAutoModelForImageTextToText.from_pretrained( + model_name, attn_implementation="eager", kv_offload=kv_offload + ) + + ## STEP 2: Export & Compile the Model + + model.compile( + prefill_seq_len=prefill_seq_len, + ctx_len=ctx_len, + img_size=img_size, + num_cores=num_cores, + num_devices=num_devices, + mxfp6_matmul=False, + ) + + ## STEP 3: Load and Process the Inputs for Inference + # Note: the message format would change for different model + image = Image.open(requests.get(image_url, stream=True).raw) + messages = [ + { + "role": "user", + "content": [ + {"type": "image"}, + {"type": "text", "text": query}, + ], + } + ] + input_text = [processor.apply_chat_template(messages, add_generation_prompt=True)] + + inputs = processor( + text=input_text, + images=image, + return_tensors="pt", + add_special_tokens=False, + padding="max_length", + max_length=prefill_seq_len, + ) + + ## STEP 4: Run Inference on the Compiled Model + + streamer = TextStreamer(processor.tokenizer) + model.generate(inputs=inputs, streamer=streamer, generation_len=generation_len) + + +def main(): + parser = argparse.ArgumentParser(description="Vision-Language Model (VLM) inference") + parser.add_argument( + "--model-name", + type=str, + default="llava-hf/llava-1.5-7b-hf", + help="HuggingFace VLM model ID", + ) + parser.add_argument( + "--query", + type=str, + default="Describe this image.", + help="Text query/question about the image", + ) + parser.add_argument( + "--image-url", + type=str, + default="https://huggingface.co/datasets/huggingface/documentation-images/resolve/0052a70beed5bf71b92610a43a52df6d286cd5f3/diffusers/rabbit.jpg", + help="URL of the image to process", + ) + parser.add_argument( + "--kv-offload", + action="store_true", + default=True, + help="Enable Dual QPC mode (vision encoder and LM in separate QPCs)", + ) + parser.add_argument("--prefill-seq-len", type=int, default=128, help="Prefill sequence length") + parser.add_argument("--ctx-len", type=int, default=3000, help="Context length") + parser.add_argument("--generation-len", type=int, default=128, help="Number of tokens to generate") + parser.add_argument("--img-size", type=int, default=336, help="Image size for processing") + parser.add_argument("--num-cores", type=int, default=16, help="Number of cores") + parser.add_argument("--num-devices", type=int, default=1, help="Number of devices") + args = parser.parse_args() + + print(f"Running VLM inference with model: {args.model_name}") + print(f"KV offload (Dual QPC mode): {args.kv_offload}") + + run_model( + model_name=args.model_name, + query=args.query, + image_url=args.image_url, + kv_offload=args.kv_offload, + prefill_seq_len=args.prefill_seq_len, + ctx_len=args.ctx_len, + generation_len=args.generation_len, + img_size=args.img_size, + num_cores=args.num_cores, + num_devices=args.num_devices, + ) + + +if __name__ == "__main__": + main() diff --git a/examples/gemma3_example/fp32_nodes_gemma3_27b.yaml b/examples/image_text_to_text/models/gemma_vision/configs/fp32_nodes_gemma3_27b.yaml similarity index 100% rename from examples/gemma3_example/fp32_nodes_gemma3_27b.yaml rename to examples/image_text_to_text/models/gemma_vision/configs/fp32_nodes_gemma3_27b.yaml diff --git a/examples/gemma3_example/fp32_nodes_gemma3_4b.yaml b/examples/image_text_to_text/models/gemma_vision/configs/fp32_nodes_gemma3_4b.yaml similarity index 100% rename from examples/gemma3_example/fp32_nodes_gemma3_4b.yaml rename to examples/image_text_to_text/models/gemma_vision/configs/fp32_nodes_gemma3_4b.yaml diff --git a/examples/gemma3_example/gemma3_mm.py b/examples/image_text_to_text/models/gemma_vision/gemma3_example.py similarity index 95% rename from examples/gemma3_example/gemma3_mm.py rename to examples/image_text_to_text/models/gemma_vision/gemma3_example.py index ca82b2120..5c1f141d4 100644 --- a/examples/gemma3_example/gemma3_mm.py +++ b/examples/image_text_to_text/models/gemma_vision/gemma3_example.py @@ -13,15 +13,17 @@ # Change model_id to "google/gemma-3-27b-it" for 27B model model_id = "google/gemma-3-4b-it" + config = AutoConfig.from_pretrained(model_id) + # For Testing Purpose Only config.text_config.num_hidden_layers = 1 config.vision_config.num_hidden_layers = 2 + tokenizer = transformers.AutoTokenizer.from_pretrained(model_id, trust_remote_code=True) processor = AutoProcessor.from_pretrained(model_id) -# pass HF_TOKEN if gated model -# For running the model in single QPC approach use kv_offload=False. For Dual QPC approach use kv_offload=True ### +# For single QPC: kv_offload=False, For dual QPC: kv_offload=True qeff_model = QEFFAutoModelForImageTextToText.from_pretrained( model_id, config=config, attn_implementation="eager", kv_offload=True ) diff --git a/examples/granite_example/readme.md b/examples/image_text_to_text/models/granite_vision/README.md similarity index 100% rename from examples/granite_example/readme.md rename to examples/image_text_to_text/models/granite_vision/README.md diff --git a/examples/granite_example/granite_vision_inference.py b/examples/image_text_to_text/models/granite_vision/granite_example.py similarity index 96% rename from examples/granite_example/granite_vision_inference.py rename to examples/image_text_to_text/models/granite_vision/granite_example.py index 230e10a40..08b01b1ef 100644 --- a/examples/granite_example/granite_vision_inference.py +++ b/examples/image_text_to_text/models/granite_vision/granite_example.py @@ -5,15 +5,14 @@ # # ----------------------------------------------------------------------------- +import os + import requests from PIL import Image from transformers import AutoProcessor, TextStreamer from QEfficient import QEFFAutoModelForImageTextToText -# Add HuggingFace Token to access the model -HF_TOKEN = "" - def run_model( model_name, @@ -29,7 +28,6 @@ def run_model( num_devices=1, ): ## STEP - 1 Load the Processor and Model - processor = AutoProcessor.from_pretrained(model_name, token=token) # `kv_offload` is used to compile the model in a 2 QPCs.Currently we are not supporting 1 qpc so the flag false is not allowed. @@ -40,7 +38,6 @@ def run_model( model = QEFFAutoModelForImageTextToText.from_pretrained(model_name, token=token, kv_offload=kv_offload) ## STEP - 2 Export & Compile the Model - model.compile( prefill_seq_len=prefill_seq_len, ctx_len=ctx_len, @@ -88,9 +85,12 @@ def run_model( num_cores = 16 num_devices = 4 + # Get HF token from environment variable (None if not set) + hf_token = os.getenv("HF_TOKEN") + run_model( model_name=model_name, - token=HF_TOKEN, + token=hf_token, query=query, kv_offload=kv_offload, image_url=image_url, diff --git a/examples/intern_example/readme.md b/examples/image_text_to_text/models/internvl/README.md similarity index 95% rename from examples/intern_example/readme.md rename to examples/image_text_to_text/models/internvl/README.md index 6b0b674c9..8371ffc50 100644 --- a/examples/intern_example/readme.md +++ b/examples/image_text_to_text/models/internvl/README.md @@ -2,7 +2,6 @@ This directory contains an example script of how to run inference on InternVL-1B model via QEFFAutoModelForCausalLM class. ## Required packages: -- `torch==2.7.0+cpu` - `torchvision==0.22.0+cpu` - `timm==1.0.14` - `einops==0.8.1` @@ -14,7 +13,7 @@ pip install torch==2.7.0+cpu --extra-index-url https://download.pytorch.org/whl/ To run example script after package installations: ```sh -python internvl_inference.py +python internvl_example.py ``` Expected output for given sample inputs in the script: diff --git a/examples/intern_example/internvl_inference.py b/examples/image_text_to_text/models/internvl/internvl_example.py similarity index 100% rename from examples/intern_example/internvl_inference.py rename to examples/image_text_to_text/models/internvl/internvl_example.py diff --git a/examples/image_text_to_text/models/llama4/continuous_batching.py b/examples/image_text_to_text/models/llama4/continuous_batching.py new file mode 100644 index 000000000..515e7c01b --- /dev/null +++ b/examples/image_text_to_text/models/llama4/continuous_batching.py @@ -0,0 +1,91 @@ +# ----------------------------------------------------------------------------- +# +# Copyright (c) Qualcomm Technologies, Inc. and/or its subsidiaries. +# SPDX-License-Identifier: BSD-3-Clause +# +# ---------------------------------------------------------------------------- + +""" +Continuous Batching Example for Llama-4-Scout Vision Model + +This example demonstrates how to use continuous batching with vision-language models +to process multiple image-text pairs simultaneously in a single batch. +""" + +import transformers +from transformers import AutoConfig, AutoProcessor + +from QEfficient import QEFFAutoModelForImageTextToText + +# Model configuration +model_id = "meta-llama/Llama-4-Scout-17B-16E-Instruct" + +## STEP 1: Load Model Configuration and Processor +config = AutoConfig.from_pretrained(model_id) +# For Testing Purpose Only - reduce layers for faster testing +config.text_config.num_hidden_layers = 4 +config.vision_config.num_hidden_layers = 2 + +tokenizer = transformers.AutoTokenizer.from_pretrained(model_id) +processor = AutoProcessor.from_pretrained(model_id) + +## STEP 2: Initialize Model with Continuous Batching +# Enable continuous batching to process multiple prompts in parallel +# Set kv_offload=True for Dual QPC mode (vision encoder + language model separately) +qeff_model = QEFFAutoModelForImageTextToText.from_pretrained( + model_id, + attn_implementation="eager", + kv_offload=True, # Dual QPC mode + config=config, + continuous_batching=True, # Enable continuous batching +) + +## STEP 3: Compile the Model for Cloud AI 100 +# Configure compilation parameters for continuous batching +qeff_model.compile( + prefill_seq_len=128, + ctx_len=3072, + img_size=336, + num_cores=16, + num_devices=4, + max_num_tiles=17, + batch_size=1, # Batch size per request + full_batch_size=4, # Total batch size for continuous batching + mxfp6_matmul=True, + mxint8_kv_cache=True, + aic_enable_depth_first=True, + mos=1, +) + +## STEP 4: Prepare Input Images and Prompts +# Define multiple images to process in the batch +image_urls = [ + "https://huggingface.co/datasets/huggingface/documentation-images/resolve/main/datasets/cat_style_layout.png", + "https://huggingface.co/datasets/huggingface/documentation-images/resolve/0052a70beed5bf71b92610a43a52df6d286cd5f3/diffusers/rabbit.jpg", + "https://huggingface.co/datasets/huggingface/documentation-images/resolve/main/datasets/cat_style_layout.png", + "https://huggingface.co/datasets/huggingface/documentation-images/resolve/0052a70beed5bf71b92610a43a52df6d286cd5f3/diffusers/rabbit.jpg", +] + +# Define corresponding prompts for each image +prompts = [ + "Can you describe the image in detail?", + "What are the objects in the image?", + "What is the main subject of the image?", + "What colors are predominant in the image?", +] + +## STEP 5: Run Inference with Continuous Batching +# Process all image-prompt pairs in a single batch +exec_info = qeff_model.generate( + tokenizer=tokenizer, + prompts=prompts, + processor=processor, + images=image_urls, # Images are processed with their corresponding prompts + device_ids=[0, 1, 2, 3], + generation_len=100, +) + +## STEP 6: Display Results +print("Generated IDs:", exec_info.generated_ids) +print("\nFull execution info:") +print(exec_info) diff --git a/examples/llama4_multi_image_example.py b/examples/image_text_to_text/models/llama4/multi_image.py similarity index 100% rename from examples/llama4_multi_image_example.py rename to examples/image_text_to_text/models/llama4/multi_image.py diff --git a/examples/llama4_example.py b/examples/image_text_to_text/models/llama4/single_image.py similarity index 65% rename from examples/llama4_example.py rename to examples/image_text_to_text/models/llama4/single_image.py index 981bac203..ca1017d58 100644 --- a/examples/llama4_example.py +++ b/examples/image_text_to_text/models/llama4/single_image.py @@ -5,29 +5,47 @@ # # ----------------------------------------------------------------------------- +""" +Single Image Inference Example for Llama-4-Scout Vision Model + +This example demonstrates two modes: +1. Text-only mode (skip_vision=True): Run language model without image processing +2. Vision+Text mode (skip_vision=False): Process image and text together +""" + import torch import transformers from transformers import AutoConfig, AutoProcessor, TextStreamer from QEfficient import QEFFAutoModelForImageTextToText +# Model configuration model_id = "meta-llama/Llama-4-Scout-17B-16E-Instruct" + +## STEP 1: Load Model Configuration and Processor config = AutoConfig.from_pretrained(model_id) -# For Testing Purpose Only +# For Testing Purpose Only - reduce layers for faster testing config.text_config.num_hidden_layers = 4 config.vision_config.num_hidden_layers = 2 +## STEP 2: Initialize the Model +# Set kv_offload=True for Dual QPC mode (vision encoder + language model separately) qeff_model = QEFFAutoModelForImageTextToText.from_pretrained( model_id, attn_implementation="eager", kv_offload=True, config=config ) tokenizer = transformers.AutoTokenizer.from_pretrained(model_id) processor = AutoProcessor.from_pretrained(model_id) -### use skip_vision=Ture, if want to run only text, ow false ### +# Toggle between text-only and vision+text modes +# Set skip_vision=True for text-only execution (no image processing) +# Set skip_vision=False for vision+text execution (process images with text) skip_vision = True if skip_vision: - ## Only Text ## + ## TEXT-ONLY MODE ## + + ## STEP 3: Compile Model for Text-Only Execution + # Set skip_vision=True to bypass image processing qeff_model.compile( prefill_seq_len=128, ctx_len=3072, @@ -38,10 +56,12 @@ mxfp6_matmul=True, mxint8_kv_cache=True, aic_enable_depth_first=True, - skip_vision=True, + skip_vision=True, # Skip vision encoder for text-only inference mos=1, ) + ## STEP 4: Prepare Text-Only Input + # Create a text-only message without any image messages = [ { "role": "user", @@ -51,6 +71,7 @@ }, ] + ## STEP 5: Process Input with Chat Template inputs = processor.apply_chat_template( messages, add_generation_prompt=True, @@ -59,14 +80,20 @@ return_tensors="pt", ) + ## STEP 6: Run Text-Only Inference streamer = TextStreamer(tokenizer) output = qeff_model.generate(inputs=inputs, device_ids=[0, 1, 2, 3, 4, 5, 6, 7], generation_len=100) + + ## STEP 7: Display Results print(output.generated_ids) print(tokenizer.batch_decode(output.generated_ids)) print(output) else: - ## Vision + Text ## + ## VISION + TEXT MODE ## + + ## STEP 3: Compile Model for Vision+Text Execution + # Do not set skip_vision (defaults to False) to enable image processing qeff_model.compile( prefill_seq_len=128, ctx_len=3072, @@ -80,11 +107,13 @@ mos=1, ) - ### IMAGE + TEXT ### + ## STEP 4: Prepare Image and Text Input + # Define the image URL to process image_url = ( "https://huggingface.co/datasets/huggingface/documentation-images/resolve/main/datasets/cat_style_layout.png" ) + # Create a message with both image and text messages = [ { "role": "user", @@ -95,6 +124,7 @@ }, ] + ## STEP 5: Process Input with Chat Template inputs = processor.apply_chat_template( messages, add_generation_prompt=True, @@ -102,10 +132,14 @@ return_dict=True, return_tensors="pt", ) + # Convert pixel values to float32 for processing inputs["pixel_values"] = inputs["pixel_values"].to(torch.float32) + + ## STEP 6: Run Vision+Text Inference streamer = TextStreamer(tokenizer) output = qeff_model.generate(inputs=inputs, device_ids=[0, 1, 2, 3, 4, 5, 6, 7], generation_len=100) + + ## STEP 7: Display Results print(output.generated_ids) print(tokenizer.batch_decode(output.generated_ids)) print(output) - print() diff --git a/examples/mistral3_example.py b/examples/image_text_to_text/models/mistral_vision/mistral3_example.py similarity index 100% rename from examples/mistral3_example.py rename to examples/image_text_to_text/models/mistral_vision/mistral3_example.py diff --git a/examples/molmo_example.py b/examples/image_text_to_text/models/molmo/molmo_example.py similarity index 96% rename from examples/molmo_example.py rename to examples/image_text_to_text/models/molmo/molmo_example.py index 09658ce41..04bba5248 100644 --- a/examples/molmo_example.py +++ b/examples/image_text_to_text/models/molmo/molmo_example.py @@ -16,7 +16,8 @@ model_id = "allenai/Molmo-7B-D-0924" config = AutoConfig.from_pretrained(model_id, trust_remote_code=True) -config.num_hidden_layers = 2 +# For faster execution user can run on 2 layers, This is only for testing purpose +# config.num_hidden_layers = 2 # load the model qeff_model = QEFFAutoModelForCausalLM.from_pretrained(model_id, kv_offload=True, trust_remote_code=True, config=config) diff --git a/examples/qwen2_5_vl_example.py b/examples/image_text_to_text/models/qwen_vl/basic_inference.py similarity index 95% rename from examples/qwen2_5_vl_example.py rename to examples/image_text_to_text/models/qwen_vl/basic_inference.py index d5d943c9c..374f70ad2 100644 --- a/examples/qwen2_5_vl_example.py +++ b/examples/image_text_to_text/models/qwen_vl/basic_inference.py @@ -5,9 +5,6 @@ # # ----------------------------------------------------------------------------- -# If we want to enable QBlocking Run below command:, default is without blocking -# ATTENTION_BLOCKING_MODE=q num_q_blocks=2 python -W ignore qwen2_5_vl_example.py - import requests import transformers from PIL import Image diff --git a/examples/qwen2_5_vl_CB.py b/examples/image_text_to_text/models/qwen_vl/continuous_batching.py similarity index 91% rename from examples/qwen2_5_vl_CB.py rename to examples/image_text_to_text/models/qwen_vl/continuous_batching.py index 96ef4898a..03094dc92 100644 --- a/examples/qwen2_5_vl_CB.py +++ b/examples/image_text_to_text/models/qwen_vl/continuous_batching.py @@ -5,9 +5,6 @@ # # ----------------------------------------------------------------------------- -# If we want to enable QBlocking Run below command:, default is without blocking -# ATTENTION_BLOCKING_MODE=q num_q_blocks=2 python -W ignore qwen2_5_vl_example.py - import transformers from transformers import AutoConfig, AutoProcessor, TextStreamer diff --git a/examples/image_text_to_text_inference.py b/examples/image_text_to_text_inference.py deleted file mode 100644 index e722284ba..000000000 --- a/examples/image_text_to_text_inference.py +++ /dev/null @@ -1,120 +0,0 @@ -# ----------------------------------------------------------------------------- -# -# Copyright (c) Qualcomm Technologies, Inc. and/or its subsidiaries. -# SPDX-License-Identifier: BSD-3-Clause -# -# ----------------------------------------------------------------------------- - -import requests -from PIL import Image -from transformers import AutoProcessor, TextStreamer - -from QEfficient import QEFFAutoModelForImageTextToText - -# Add HuggingFace Token to access the model -HF_TOKEN = "" - - -def run_model( - model_name, - token, - query, - image_url, - kv_offload=False, - prefill_seq_len=32, - ctx_len=512, - generation_len=128, - img_size=560, - num_cores=16, - num_devices=1, -): - ## STEP - 1 Load the Processor and Model - - processor = AutoProcessor.from_pretrained(model_name, token=token) - - # `kv_offload` is used to compile the model in a Single QPC or 2 QPCs. - # The Dual QPC approach splits the model to perform Image Encoding and Output generation in 2 different QPCs. - # The outputs of the Vision Encoder are then passed to the Language model via host in this case. - - model = QEFFAutoModelForImageTextToText.from_pretrained( - model_name, token=token, attn_implementation="eager", kv_offload=kv_offload - ) - - ## STEP - 2 Export & Compile the Model - - model.compile( - prefill_seq_len=prefill_seq_len, - ctx_len=ctx_len, - img_size=img_size, - num_cores=num_cores, - num_devices=num_devices, - mxfp6_matmul=False, - ) - - ## STEP - 3 Load and process the inputs for Inference - - image = Image.open(requests.get(image_url, stream=True).raw) - messages = [ - { - "role": "user", - "content": [ - {"type": "image"}, - {"type": "text", "text": query}, - ], - } - ] - input_text = [processor.apply_chat_template(messages, add_generation_prompt=True)] - - inputs = processor( - text=input_text, - images=image, - return_tensors="pt", - add_special_tokens=False, - padding="max_length", - max_length=prefill_seq_len, - ) - - ## STEP - 4 Run Inference on the compiled model - - streamer = TextStreamer(processor.tokenizer) - model.generate(inputs=inputs, streamer=streamer, generation_len=generation_len) - - -if __name__ == "__main__": - # Model name and Input parameters - model_name = "meta-llama/Llama-3.2-11B-Vision-Instruct" - query = "Describe this image." - image_url = "https://huggingface.co/datasets/huggingface/documentation-images/resolve/0052a70beed5bf71b92610a43a52df6d286cd5f3/diffusers/rabbit.jpg" - - # Compilation parameters for the model - kv_offload = False - prefill_seq_len = 32 - ctx_len = 512 - generation_len = 128 - img_size = 560 - num_cores = 16 - num_devices = 1 - - run_model( - model_name=model_name, - token=HF_TOKEN, - query=query, - kv_offload=kv_offload, - image_url=image_url, - prefill_seq_len=prefill_seq_len, - ctx_len=ctx_len, - generation_len=generation_len, - img_size=img_size, - num_cores=num_cores, - num_devices=num_devices, - ) - - -""" -Expected Response: - -This image depicts a charming anthropomorphic rabbit standing on a dirt path in front of a picturesque stone cottage, surrounded by a serene landscape. - -The rabbit, with its light brown fur and distinctive long ears, is attired in a stylish blue coat, brown vest, and tan pants, exuding a sense of sophistication. The dirt path, flanked by vibrant flowers and lush greenery, leads to the cottage, which features a thatched roof and a chimney, adding to the rustic charm of the scene. In the background, rolling hills and trees create a breathtaking panorama, while the sky above is a brilliant blue with white clouds, completing the - -""" diff --git a/examples/llama4_CB_example_vision_lang.py b/examples/llama4_CB_example_vision_lang.py deleted file mode 100644 index f285ea278..000000000 --- a/examples/llama4_CB_example_vision_lang.py +++ /dev/null @@ -1,93 +0,0 @@ -# ----------------------------------------------------------------------------- -# -# Copyright (c) Qualcomm Technologies, Inc. and/or its subsidiaries. -# SPDX-License-Identifier: BSD-3-Clause -# -# ---------------------------------------------------------------------------- - -import transformers -from transformers import AutoConfig, AutoProcessor - -from QEfficient import QEFFAutoModelForImageTextToText - -model_id = "meta-llama/Llama-4-Scout-17B-16E-Instruct" -config = AutoConfig.from_pretrained(model_id) -# For Testing Purpose Only -config.text_config.num_hidden_layers = 4 -config.vision_config.num_hidden_layers = 2 - -tokenizer = transformers.AutoTokenizer.from_pretrained(model_id) -processor = AutoProcessor.from_pretrained(model_id) - -continious_batching = False -if continious_batching: - qeff_model = QEFFAutoModelForImageTextToText.from_pretrained( - model_id, - attn_implementation="eager", - kv_offload=True, - config=config, - continuous_batching=True, - ) - - qeff_model.compile( - prefill_seq_len=128, - ctx_len=3072, - img_size=336, - num_cores=16, - num_devices=4, - max_num_tiles=17, - batch_size=1, - full_batch_size=4, - mxfp6_matmul=True, - mxint8_kv_cache=True, - aic_enable_depth_first=True, - mos=1, - ) -else: - qeff_model = QEFFAutoModelForImageTextToText.from_pretrained( - model_id, - attn_implementation="eager", - kv_offload=True, - config=config, - ) - - qeff_model.compile( - prefill_seq_len=128, - ctx_len=3072, - img_size=336, - num_cores=16, - num_devices=4, - max_num_tiles=17, - batch_size=1, - mxfp6_matmul=True, - mxint8_kv_cache=True, - aic_enable_depth_first=True, - mos=1, - ) - -image_urls = [ - "https://huggingface.co/datasets/huggingface/documentation-images/resolve/main/datasets/cat_style_layout.png", - "https://huggingface.co/datasets/huggingface/documentation-images/resolve/0052a70beed5bf71b92610a43a52df6d286cd5f3/diffusers/rabbit.jpg", - "https://huggingface.co/datasets/huggingface/documentation-images/resolve/main/datasets/cat_style_layout.png", - "https://huggingface.co/datasets/huggingface/documentation-images/resolve/0052a70beed5bf71b92610a43a52df6d286cd5f3/diffusers/rabbit.jpg", -] - -prompts = [ - "Can you describe the image in detail?", - "What are the objects in the image?", - "What is the main subject of the image?", - "What colors are predominant in the image?", -] - -exec_info = qeff_model.generate( - tokenizer=tokenizer, - prompts=prompts, - processor=processor, - images=image_urls, - device_ids=[0, 1, 2, 3], - generation_len=100, -) - -# print("Generated texts:", exec_info.generated_texts) -print("Generated IDs:", exec_info.generated_ids) -print(exec_info) diff --git a/examples/peft/README.md b/examples/peft/README.md new file mode 100644 index 000000000..fbc8c99b7 --- /dev/null +++ b/examples/peft/README.md @@ -0,0 +1,83 @@ +# PEFT Examples + +Examples for running Parameter-Efficient Fine-Tuning (PEFT) models with LoRA adapters on Qualcomm Cloud AI 100. + + +## Authentication + +For private/gated models, export your HuggingFace token: +```bash +export HF_TOKEN= +``` + +## Supported Models + +**QEff Auto Class:** `QEffAutoPeftModelForCausalLM` + +PEFT/LoRA adapters work with any supported base model architecture. + +Popular base models include: +- Llama +- Mistral, Mixtral + + +## Available Examples + +### single_adapter.py +Load and use a single LoRA adapter with a base model. + +**Usage:** +```python +python single_adapter.py +``` + +This example: +- Loads Mistral-7B base model with a LoRA adapter +- Demonstrates adapter switching +- Shows inference with different adapters (magicoder, tldr, gsm8k, agnews) + +### multi_adapter.py +Use multiple LoRA adapters with continuous batching. + +**Usage:** +```python +python multi_adapter.py +``` + +This example: +- Runs multiple adapters simultaneously in one batch +- Demonstrates continuous batching with `full_batch_size=4` +- Shows different prompts using different adapters in the same batch + +## Key Features + +### Single Adapter Mode +- Load one LoRA adapter at a time +- Switch between adapters dynamically +- Suitable for single-task inference + +### Multi-Adapter Mode (Continuous Batching) +- Run multiple adapters simultaneously +- Different prompts can use different adapters in the same batch +- Efficient for multi-task scenarios +- Requires `continuous_batching=True` and `finite_adapters=True` + +## Adapter Management + +```python +# Load adapter +qeff_model.load_adapter("predibase/adapter_name", "adapter_name") + +# Set active adapter +qeff_model.set_adapter("adapter_name") + +# Unload adapter +qeff_model.unload_adapter("adapter_name") +``` + +## Documentation + +- [QEff Auto Classes](https://quic.github.io/efficient-transformers/source/qeff_autoclasses.html) +- [Validated Base Models](https://quic.github.io/efficient-transformers/source/validate.html#text-only-language-models) +- [PEFT Documentation](https://huggingface.co/docs/peft) +- [Quick Start Guide](https://quic.github.io/efficient-transformers/source/quick_start.html) diff --git a/examples/lora_models.py b/examples/peft/multi_adapter.py similarity index 100% rename from examples/lora_models.py rename to examples/peft/multi_adapter.py diff --git a/examples/peft_models.py b/examples/peft/single_adapter.py similarity index 60% rename from examples/peft_models.py rename to examples/peft/single_adapter.py index 63c196a22..4f84bd13c 100644 --- a/examples/peft_models.py +++ b/examples/peft/single_adapter.py @@ -5,6 +5,8 @@ # # ----------------------------------------------------------------------------- +## This example demonstrates single adapter usage with sequential adapter switching ## + from transformers import AutoTokenizer, TextStreamer from QEfficient import QEffAutoPeftModelForCausalLM @@ -12,19 +14,27 @@ base_model_name = "mistralai/Mistral-7B-v0.1" tokenizer = AutoTokenizer.from_pretrained(base_model_name) streamer = TextStreamer(tokenizer) +prefill_seq_len = 32 +ctx_len = 1024 +generation_len = 1024 + + +## STEP 1 -- init base model +qeff_model = QEffAutoPeftModelForCausalLM.from_pretrained("predibase/magicoder", "magicoder") + +## STEP 2 -- export & compile qeff model +qeff_model.compile(prefill_seq_len=prefill_seq_len, ctx_len=ctx_len) -m = QEffAutoPeftModelForCausalLM.from_pretrained("predibase/magicoder", "magicoder") -m.export() -m.compile(prefill_seq_len=32, ctx_len=1024) +## STEP 3 -- run inference with different adapters -# Magicoder adapter -m.set_adapter("magicoder") +# Magicoder adapter - code generation +qeff_model.set_adapter("magicoder") inputs = tokenizer("def fibonacci", return_tensors="pt") -m.generate(**inputs, streamer=streamer, max_new_tokens=1024) +qeff_model.generate(**inputs, streamer=streamer, max_new_tokens=generation_len) -# TLDR, summary generator -m.load_adapter("predibase/tldr_headline_gen", "tldr_headline_gen") -m.set_adapter("tldr_headline_gen") +## STEP 3.1 -- load and use TLDR headline generator adapter +qeff_model.load_adapter("predibase/tldr_headline_gen", "tldr_headline_gen") +qeff_model.set_adapter("tldr_headline_gen") inputs = tokenizer( """Summarize this passage in one sentence or less: Jeffrey Berns, CEO of Blockchains LLC, wants the Nevada government to allow companies like \ his to form local governments on land they own, granting them power over everything from \ @@ -36,21 +46,21 @@ Summary: """, return_tensors="pt", ) -m.generate(**inputs, streamer=streamer, max_new_tokens=1024) +qeff_model.generate(**inputs, streamer=streamer, max_new_tokens=1024) -# Math problems -m.load_adapter("predibase/gsm8k", "gsm8k") -m.set_adapter("gsm8k") +## STEP 3.2 -- load and use GSM8K adapter for math problems +qeff_model.load_adapter("predibase/gsm8k", "gsm8k") +qeff_model.set_adapter("gsm8k") inputs = tokenizer( "James decides to run 3 sprints 3 times a week. He runs 60 meters each sprint. \ How many total meters does he run a week?", return_tensors="pt", ) -m.generate(**inputs, streamer=streamer, max_new_tokens=1024) +qeff_model.generate(**inputs, streamer=streamer, max_new_tokens=1024) -# News explanation -m.load_adapter("predibase/agnews_explained", "agnews_explained") -m.set_adapter("agnews_explained") +## STEP 3.3 -- load and use AGNews adapter for news classification +qeff_model.load_adapter("predibase/agnews_explained", "agnews_explained") +qeff_model.set_adapter("agnews_explained") inputs = tokenizer( """Below is a news article. Please classify it under one of the following \ classes (World, Business, Sports, Sci/Tech) and provide a reasonable coherent explanation for \ @@ -65,4 +75,4 @@ """, return_tensors="pt", ) -m.generate(**inputs, streamer=streamer, max_new_tokens=1024) +qeff_model.generate(**inputs, streamer=streamer, max_new_tokens=1024) diff --git a/examples/performance/README.md b/examples/performance/README.md new file mode 100644 index 000000000..48d34d972 --- /dev/null +++ b/examples/performance/README.md @@ -0,0 +1,110 @@ +# Performance Optimization Examples + +Examples demonstrating performance optimization techniques for Qualcomm Cloud AI 100. + +## Authentication + +For private/gated models, export your HuggingFace token: +```bash +export HF_TOKEN= +``` + +## Available Examples + +### Speculative Decoding + +Accelerate text generation using speculative decoding techniques. + +#### draft_based.py +Draft-based speculative decoding with separate draft and target models. + +**Basic Usage:** +```bash +python speculative_decoding/draft_based.py \ + --target-model-name TinyLlama/TinyLlama-1.1B-Chat-v1.0 \ + --draft-model-name TinyLlama/TinyLlama-1.1B-Chat-v1.0 \ + --num-speculative-tokens 4 +``` + +**Advanced Usage:** +```bash +python speculative_decoding/draft_based.py \ + --target-model-name meta-llama/Llama-3.1-8B \ + --draft-model-name TinyLlama/TinyLlama-1.1B-Chat-v1.0 \ + --num-speculative-tokens 4 \ + --prefill-seq-len 32 \ + --ctx-len 128 \ + --target-device-group 0,1 \ + --draft-device-group 2 +``` +errors in this example + + +#### prompt_lookup.py +Prompt Lookup Decoding (PLD) - N-gram based speculation without a draft model. + +**Basic Usage:** +```bash +python speculative_decoding/prompt_lookup.py \ + --target-model-name TinyLlama/TinyLlama-1.1B-Chat-v1.0 \ + --num-speculative-tokens 3 \ + --max-ngram-size 3 +``` + +#### multi_projection.py +Multi-projection speculative decoding (Turbo models). + +**Basic Usage:** +```bash +python speculative_decoding/multi_projection.py \ + --pretrained-model-name-or-path TinyLlama/TinyLlama-1.1B-Chat-v1.0 +``` +error + +### On-Device Sampling + +Control sampling parameters directly on the AI 100 hardware. + +#### on_device_sampling.py +Configure sampling parameters (temperature, top-k, top-p, etc.) on-device. + +**Basic Usage:** +```bash +python on_device_sampling.py \ + --model-name meta-llama/Llama-3.1-8B \ + --num-cores 16 \ + --prompt-len 128 \ + --ctx-len 256 +``` + +**Advanced Usage with Sampling Parameters:** +```bash +python on_device_sampling.py \ + --model-name meta-llama/Llama-3.1-8B \ + --prompt-len 128 \ + --ctx-len 256 \ + --full-batch-size 2 \ + --device-group 0,1,2,3 \ + --num-cores 16 \ + --mxint8-kv-cache \ + --mxfp6-matmul \ + --override-qaic-config "aic_include_sampler:true aic_return_pdfs:false max_top_k_ids:512" \ + --repetition-penalty 1.9 \ + --temperature 0.67 \ + --top-k 54720 \ + --top-p 0.89 +``` + +## Performance Tips + +1. **Speculative Decoding**: Best for long-form generation where draft model is much faster than target +2. **Prompt Lookup**: No draft model needed, works well for repetitive patterns +3. **Multi-Projection**: Optimal for models with built-in speculation support +4. **On-Device Sampling**: Reduces host-device communication overhead +5. **C++ Execution**: Maximum performance for production deployments + +## Documentation + +- [QEff Auto Classes](https://quic.github.io/efficient-transformers/source/qeff_autoclasses.html) +- [Performance Features](https://quic.github.io/efficient-transformers/source/features_enablement.html) +- [Quick Start Guide](https://quic.github.io/efficient-transformers/source/quick_start.html) diff --git a/examples/performance/compute_context_length/README.md b/examples/performance/compute_context_length/README.md new file mode 100644 index 000000000..bbc240645 --- /dev/null +++ b/examples/performance/compute_context_length/README.md @@ -0,0 +1,323 @@ +# Compute Context Length (CCL) Examples + +Examples demonstrating Compute Context Length (CCL) optimization for efficient inference on Qualcomm Cloud AI 100. + +## What is CCL? + +Compute Context Length (CCL) is a performance optimization feature that allows models to use different context lengths during different phases of inference: + +- **Prefill Phase**: Processing the initial prompt with optimized context lengths +- **Decode Phase**: Generating new tokens with dynamically adjusted context lengths + +This optimization provides: +- **Memory Efficiency**: Uses smaller context lengths when possible +- **Performance Optimization**: Reduces computation for shorter sequences +- **Flexible Scaling**: Adapts context length based on actual sequence position +- **Hardware Optimization**: Optimized for Qualcomm Cloud AI 100 accelerators + +## Authentication + +For private/gated models, export your HuggingFace token: +```bash +export HF_TOKEN= +``` + +## Quick Start + +### Text-Only Models + +Run basic CCL inference with default settings: +```bash +python basic_inference.py +``` + +Customize with command-line arguments: +```bash +python basic_inference.py \ + --model-name meta-llama/Llama-3.2-1B \ + --prompt "Hello, how are you?" \ + --ctx-len 1024 \ + --comp-ctx-lengths-prefill "256,500" \ + --comp-ctx-lengths-decode "512,1024" \ + --generation-len 100 +``` + +### Vision-Language Models + +Run VLM inference with CCL: +```bash +python vlm_inference.py +``` + +Customize with command-line arguments: +```bash +python vlm_inference.py \ + --model-name meta-llama/Llama-3.2-11B-Vision-Instruct \ + --query "Describe this image" \ + --image-url "https://..." \ + --comp-ctx-lengths-prefill "4096" \ + --comp-ctx-lengths-decode "6144,8192" \ + --ctx-len 8192 +``` + +## Available Examples + +### Text-Only Models + +#### basic_inference.py +Basic CCL usage with text-only language models. + +**Supported Models:** +- Llama (3.2, 3.3) +- Gemma/Gemma-2 +- Mistral +- Phi/Phi-3 +- Qwen +- Granite +- GPT-2, GPT-J +- CodeGen +- OLMo-2 + +**Command-Line Arguments:** +- `--model-name`: HuggingFace model ID (default: meta-llama/Llama-3.2-1B) +- `--prompt`: Input prompt (default: "My name is ") +- `--ctx-len`: Maximum context length (default: 1024) +- `--comp-ctx-lengths-prefill`: Comma-separated prefill context lengths (default: 256,500) +- `--comp-ctx-lengths-decode`: Comma-separated decode context lengths (default: 512,1024) +- `--generation-len`: Number of tokens to generate (default: 128) +- `--continuous-batching`: Enable continuous batching mode +- `--num-cores`: Number of cores (default: 16) +- `--num-devices`: Number of devices (default: 1) + +**Usage Examples:** +```bash +# Basic usage with defaults +python basic_inference.py + +# Custom model and prompt +python basic_inference.py \ + --model-name Qwen/Qwen2.5-7B-Instruct \ + --prompt "Explain quantum computing" + +# With continuous batching +python basic_inference.py \ + --continuous-batching \ + --full-batch-size 4 + +# Larger context with progressive CCL +python basic_inference.py \ + --ctx-len 4096 \ + --comp-ctx-lengths-prefill "1024,2048" \ + --comp-ctx-lengths-decode "2048,3072,4096" +``` + +**Python API:** +```python +from transformers import AutoTokenizer +from QEfficient import QEFFAutoModelForCausalLM + +model = QEFFAutoModelForCausalLM.from_pretrained( + "meta-llama/Llama-3.2-1B", + qaic_config={ + "comp_ctx_lengths_prefill": [256, 500], + "comp_ctx_lengths_decode": [512, 1024], + "ctx_len": 1024, # Required for CCL validation + }, +) +``` + +#### gpt_oss.py +CCL for GPT-OSS MoE models with prefill_seq_len=1 optimization. + +**Usage:** +```bash +python gpt_oss.py +``` + +**Note:** For MoE models, both prefill and decode CCL lists can be similar when using prefill_seq_len=1. + +### Vision-Language Models + +#### vlm_inference.py +General VLM inference with CCL optimization. + +**Usage:** +```bash +python vlm_inference.py +``` + +#### gemma3.py +CCL for Gemma-3 multimodal models (4B/27B). + +**Usage:** +```bash +python gemma3.py +``` + +#### granite_vision.py +CCL for IBM Granite Vision models. + +**Usage:** +```bash +python granite_vision.py +``` + +#### internvl.py +CCL for InternVL2.5 models with custom processor. + +**Usage:** +```bash +python internvl.py +``` + +#### llama4.py +CCL for Llama-4 Scout vision-language models. + +**Usage:** +```bash +python llama4.py +``` + +#### llama4_cb.py +CCL for Llama-4 with continuous batching. + +**Usage:** +```bash +python llama4_cb.py +``` + +#### llama4_multi_image.py +CCL for Llama-4 with multiple images. + +**Usage:** +```bash +python llama4_multi_image.py +``` + +#### mistral3.py +CCL for Mistral-Small-3.1 vision models. + +**Usage:** +```bash +python mistral3.py +``` + +#### molmo.py +CCL for Molmo-7B multimodal models. + +**Usage:** +```bash +python molmo.py +``` + +#### qwen2_5_vl.py +CCL for Qwen2.5-VL models (32B). + +**Usage:** +```bash +python qwen2_5_vl.py +``` + +#### qwen2_5_vl_cb.py +CCL for Qwen2.5-VL with continuous batching. + +**Usage:** +```bash +python qwen2_5_vl_cb.py +``` + +## Configuration Guidelines + +### Choosing CCL Values + +1. **Prefill Context Lengths** (`comp_ctx_lengths_prefill`): + - Start with smaller values (e.g., [256, 512, 1024]) + - Should be less than or equal to your prefill_seq_len + - Gradually increase based on prompt chunk position + +2. **Decode Context Lengths** (`comp_ctx_lengths_decode`): + - Start from a value based on expected prompt length + - Include intermediate steps (e.g., [512, 1024, 2048, ctx_len]) + - Final value should match ctx_len + +3. **Context Length** (`ctx_len`): + - Maximum context length for the model + - Required parameter for CCL validation + - Should match your model's maximum supported length + +### Example Configurations + +**Small Context (1K-2K):** +```python +ctx_len = 2048 +comp_ctx_lengths_prefill = [256, 512] +comp_ctx_lengths_decode = [1024, ctx_len] +``` + +**Medium Context (4K-8K):** +```python +ctx_len = 8192 +comp_ctx_lengths_prefill = [3072, 4096] +comp_ctx_lengths_decode = [4096, 6144, ctx_len] +``` + +**Large Context (16K+):** +```python +ctx_len = 16384 +comp_ctx_lengths_prefill = [4096, 8192] +comp_ctx_lengths_decode = [8192, 12288, ctx_len] +``` + +## Performance Tips + +1. **Memory Optimization**: Use smaller CCL values for prefill to reduce memory footprint +2. **Progressive Scaling**: Include intermediate CCL values in decode list for smooth transitions +3. **Vision Models**: Larger prefill contexts needed for image embeddings +4. **Continuous Batching**: CCL works seamlessly with CB for dynamic workloads +5. **MoE Models**: Consider prefill_seq_len=1 for optimal performance + +## Common Patterns + +### Text-Only Model +```python +model = QEFFAutoModelForCausalLM.from_pretrained( + model_name, + qaic_config={ + "comp_ctx_lengths_prefill": [256, 500], + "comp_ctx_lengths_decode": [512, 1024], + "ctx_len": 1024, + }, +) +``` + +### Vision-Language Model +```python +model = QEFFAutoModelForImageTextToText.from_pretrained( + model_name, + kv_offload=True, + qaic_config={ + "comp_ctx_lengths_prefill": [3072], + "comp_ctx_lengths_decode": [4096, 8192], + "ctx_len": 8192, + }, +) +``` + +### Continuous Batching +```python +model = QEFFAutoModelForCausalLM.from_pretrained( + model_name, + continuous_batching=True, + qaic_config={ + "comp_ctx_lengths_prefill": [256, 500], + "comp_ctx_lengths_decode": [512, 1024], + "ctx_len": 1024, + }, +) +``` + +## Documentation + +- [QEff Auto Classes](https://quic.github.io/efficient-transformers/source/qeff_autoclasses.html) +- [Performance Features](https://quic.github.io/efficient-transformers/source/features_enablement.html) +- [Quick Start Guide](https://quic.github.io/efficient-transformers/source/quick_start.html) diff --git a/examples/performance/compute_context_length/basic_inference.py b/examples/performance/compute_context_length/basic_inference.py new file mode 100644 index 000000000..a4407b05a --- /dev/null +++ b/examples/performance/compute_context_length/basic_inference.py @@ -0,0 +1,154 @@ +# ----------------------------------------------------------------------------- +# +# Copyright (c) 2025 Qualcomm Innovation Center, Inc. All rights reserved. +# SPDX-License-Identifier: BSD-3-Clause +# +# ----------------------------------------------------------------------------- + +""" +Basic Compute Context Length (CCL) inference example. + +This example demonstrates how to use CCL optimization for text generation models. +CCL allows using different context lengths during prefill and decode phases, +reducing memory footprint and computation for shorter sequences. +""" + +import argparse + +from transformers import AutoTokenizer + +from QEfficient import QEFFAutoModelForCausalLM + + +def main(): + parser = argparse.ArgumentParser(description="Text generation with Compute Context Length (CCL) optimization") + parser.add_argument( + "--model-name", + type=str, + default="meta-llama/Llama-3.2-1B", + help="HuggingFace model ID", + ) + parser.add_argument( + "--prompt", + type=str, + default="My name is ", + help="Input prompt for text generation", + ) + parser.add_argument( + "--prefill-seq-len", + type=int, + default=128, + help="Prefill sequence length", + ) + parser.add_argument( + "--ctx-len", + type=int, + default=1024, + help="Maximum context length", + ) + parser.add_argument( + "--comp-ctx-lengths-prefill", + type=lambda x: [int(i) for i in x.split(",")], + default="256,500", + help="Comma-separated list of context lengths for prefill phase (e.g., '256,500')", + ) + parser.add_argument( + "--comp-ctx-lengths-decode", + type=lambda x: [int(i) for i in x.split(",")], + default="512,1024", + help="Comma-separated list of context lengths for decode phase (e.g., '512,1024')", + ) + parser.add_argument( + "--generation-len", + type=int, + default=128, + help="Number of tokens to generate", + ) + parser.add_argument( + "--num-cores", + type=int, + default=16, + help="Number of cores for compilation", + ) + parser.add_argument( + "--num-devices", + type=int, + default=1, + help="Number of devices to use", + ) + parser.add_argument( + "--continuous-batching", + action="store_true", + help="Enable continuous batching mode", + ) + parser.add_argument( + "--full-batch-size", + type=int, + default=1, + help="Full batch size for continuous batching", + ) + parser.add_argument( + "--mxint8-kv-cache", + action="store_true", + default=True, + help="Enable MX INT8 KV cache", + ) + parser.add_argument( + "--mxfp6-matmul", + action="store_true", + default=True, + help="Enable MX FP6 matrix multiplication", + ) + args = parser.parse_args() + + print(f"Loading model: {args.model_name}") + print("CCL Configuration:") + print(f" - Prefill context lengths: {args.comp_ctx_lengths_prefill}") + print(f" - Decode context lengths: {args.comp_ctx_lengths_decode}") + print(f" - Max context length: {args.ctx_len}") + print(f" - Continuous batching: {args.continuous_batching}") + + # Load model with CCL configuration + model = QEFFAutoModelForCausalLM.from_pretrained( + args.model_name, + continuous_batching=args.continuous_batching, + qaic_config={ + "comp_ctx_lengths_prefill": args.comp_ctx_lengths_prefill, + "comp_ctx_lengths_decode": args.comp_ctx_lengths_decode, + "ctx_len": args.ctx_len, # Required for CCL validation + }, + ) + + # Compile the model + print("\nCompiling model...") + compile_kwargs = { + "prefill_seq_len": args.prefill_seq_len, + "ctx_len": args.ctx_len, + "num_cores": args.num_cores, + "num_devices": args.num_devices, + "mxint8_kv_cache": args.mxint8_kv_cache, + "mxfp6_matmul": args.mxfp6_matmul, + } + + if args.continuous_batching: + compile_kwargs["full_batch_size"] = args.full_batch_size + + qpc_path = model.compile(**compile_kwargs) + print(f"Model compiled successfully to: {qpc_path}") + + # Load tokenizer and generate + print("\nGenerating text...") + tokenizer = AutoTokenizer.from_pretrained(args.model_name) + + exec_info = model.generate( + prompts=[args.prompt], + tokenizer=tokenizer, + generation_len=args.generation_len, + ) + + print(f"\nPrompt: {args.prompt}") + print(f"Generated: {exec_info.generated_texts[0]}") + + +if __name__ == "__main__": + main() diff --git a/examples/gemma3_example/ccl_gemma3_mm.py b/examples/performance/compute_context_length/gemma3.py similarity index 98% rename from examples/gemma3_example/ccl_gemma3_mm.py rename to examples/performance/compute_context_length/gemma3.py index 9bf6e9c5a..c31b1748a 100644 --- a/examples/gemma3_example/ccl_gemma3_mm.py +++ b/examples/performance/compute_context_length/gemma3.py @@ -38,7 +38,7 @@ }, ) -### use skip_vision=Ture, if want to run only text, or false ### +### use skip_vision=True, if want to run only text, or false ### skip_vision = False if skip_vision: diff --git a/examples/ccl_gpt_oss.py b/examples/performance/compute_context_length/gpt_oss.py similarity index 100% rename from examples/ccl_gpt_oss.py rename to examples/performance/compute_context_length/gpt_oss.py diff --git a/examples/granite_example/ccl_granite_vision_inference.py b/examples/performance/compute_context_length/granite_vision.py similarity index 98% rename from examples/granite_example/ccl_granite_vision_inference.py rename to examples/performance/compute_context_length/granite_vision.py index 64ecaf948..39b139bad 100644 --- a/examples/granite_example/ccl_granite_vision_inference.py +++ b/examples/performance/compute_context_length/granite_vision.py @@ -11,9 +11,6 @@ from QEfficient import QEFFAutoModelForImageTextToText -# Add HuggingFace Token to access the model -HF_TOKEN = "" - def run_model( model_name, @@ -104,7 +101,6 @@ def run_model( run_model( model_name=model_name, - token=HF_TOKEN, query=query, kv_offload=kv_offload, image_url=image_url, diff --git a/examples/intern_example/ccl_internvl_inference.py b/examples/performance/compute_context_length/internvl.py similarity index 100% rename from examples/intern_example/ccl_internvl_inference.py rename to examples/performance/compute_context_length/internvl.py diff --git a/examples/ccl_llama4_example.py b/examples/performance/compute_context_length/llama4.py similarity index 98% rename from examples/ccl_llama4_example.py rename to examples/performance/compute_context_length/llama4.py index 5da29960f..534be8f96 100644 --- a/examples/ccl_llama4_example.py +++ b/examples/performance/compute_context_length/llama4.py @@ -37,7 +37,7 @@ tokenizer = transformers.AutoTokenizer.from_pretrained(model_id) processor = AutoProcessor.from_pretrained(model_id) -### use skip_vision=Ture, if want to run only text, ow false ### +### use skip_vision=True, if want to run only text, or false ### skip_vision = False if skip_vision: diff --git a/examples/ccl_llama4_CB_example_vision_lang.py b/examples/performance/compute_context_length/llama4_cb.py similarity index 95% rename from examples/ccl_llama4_CB_example_vision_lang.py rename to examples/performance/compute_context_length/llama4_cb.py index 6423ee765..ea7c09d69 100644 --- a/examples/ccl_llama4_CB_example_vision_lang.py +++ b/examples/performance/compute_context_length/llama4_cb.py @@ -100,7 +100,6 @@ prompts=prompts, processor=processor, images=image_urls, - device_ids=[32, 33, 34, 35], generation_len=100, ) diff --git a/examples/ccl_llama4_multi_image_example.py b/examples/performance/compute_context_length/llama4_multi_image.py similarity index 96% rename from examples/ccl_llama4_multi_image_example.py rename to examples/performance/compute_context_length/llama4_multi_image.py index 33bf07df0..d7c403e5f 100644 --- a/examples/ccl_llama4_multi_image_example.py +++ b/examples/performance/compute_context_length/llama4_multi_image.py @@ -83,7 +83,7 @@ inputs["pixel_values"] = inputs["pixel_values"].to(torch.float32) streamer = TextStreamer(tokenizer) -output = qeff_model.generate(inputs=inputs, device_ids=[32, 33, 34, 35], generation_len=100) +output = qeff_model.generate(inputs=inputs, generation_len=100) print(output.generated_ids) print(tokenizer.batch_decode(output.generated_ids)) print(output) diff --git a/examples/ccl_mistral3_example.py b/examples/performance/compute_context_length/mistral3.py similarity index 100% rename from examples/ccl_mistral3_example.py rename to examples/performance/compute_context_length/mistral3.py diff --git a/examples/ccl_molmo_example.py b/examples/performance/compute_context_length/molmo.py similarity index 97% rename from examples/ccl_molmo_example.py rename to examples/performance/compute_context_length/molmo.py index dd09fa020..f68481631 100644 --- a/examples/ccl_molmo_example.py +++ b/examples/performance/compute_context_length/molmo.py @@ -37,7 +37,7 @@ tokenizer = transformers.AutoTokenizer.from_pretrained(model_id, trust_remote_code=True) processor = AutoProcessor.from_pretrained(model_id, trust_remote_code=True) -### use skip_vision=Ture, if want to run only text, ow false ### +### use skip_vision=True, if want to run only text, or false ### skip_vision = False if skip_vision: diff --git a/examples/ccl_qwen2_5_vl_example.py b/examples/performance/compute_context_length/qwen2_5_vl.py similarity index 98% rename from examples/ccl_qwen2_5_vl_example.py rename to examples/performance/compute_context_length/qwen2_5_vl.py index 273a18361..00f43a73f 100644 --- a/examples/ccl_qwen2_5_vl_example.py +++ b/examples/performance/compute_context_length/qwen2_5_vl.py @@ -39,7 +39,7 @@ tokenizer = transformers.AutoTokenizer.from_pretrained(model_id) processor = AutoProcessor.from_pretrained(model_id) -### use skip_vision=Ture, if want to run only text, ow false ### +### use skip_vision=True, if want to run only text, or false ### skip_vision = False if skip_vision: diff --git a/examples/ccl_qwen2_5_vl_CB.py b/examples/performance/compute_context_length/qwen2_5_vl_cb.py similarity index 100% rename from examples/ccl_qwen2_5_vl_CB.py rename to examples/performance/compute_context_length/qwen2_5_vl_cb.py diff --git a/examples/qwen3moe_example/ccl_qwen3moe_inference.py b/examples/performance/compute_context_length/qwen3moe_example/ccl_qwen3moe_inference.py similarity index 100% rename from examples/qwen3moe_example/ccl_qwen3moe_inference.py rename to examples/performance/compute_context_length/qwen3moe_example/ccl_qwen3moe_inference.py diff --git a/examples/performance/compute_context_length/vlm_inference.py b/examples/performance/compute_context_length/vlm_inference.py new file mode 100644 index 000000000..0920ddf30 --- /dev/null +++ b/examples/performance/compute_context_length/vlm_inference.py @@ -0,0 +1,236 @@ +# ----------------------------------------------------------------------------- +# +# Copyright (c) Qualcomm Technologies, Inc. and/or its subsidiaries. +# SPDX-License-Identifier: BSD-3-Clause +# +# ----------------------------------------------------------------------------- + +""" +Vision-Language Model (VLM) inference with Compute Context Length (CCL) optimization. + +This example demonstrates how to use CCL optimization for vision-language models. +CCL allows using different context lengths during prefill and decode phases, +reducing memory footprint and computation while maintaining support for longer contexts. +""" + +import argparse + +import requests +from PIL import Image +from transformers import AutoProcessor, TextStreamer + +from QEfficient import QEFFAutoModelForImageTextToText + + +def run_model( + model_name, + query, + image_url, + hf_token=None, + kv_offload=True, + prefill_seq_len=32, + ctx_len=8192, + comp_ctx_lengths_prefill=None, + comp_ctx_lengths_decode=None, + generation_len=128, + img_size=560, + num_cores=16, + num_devices=4, +): + """ + Run VLM inference with CCL optimization. + + Args: + model_name: HuggingFace model ID + query: Text query about the image + image_url: URL of the image to process + hf_token: HuggingFace token for gated models + kv_offload: Enable Dual QPC mode (vision encoder and LM in separate QPCs) + prefill_seq_len: Prefill sequence length + ctx_len: Maximum context length + comp_ctx_lengths_prefill: List of context lengths for prefill phase + comp_ctx_lengths_decode: List of context lengths for decode phase + generation_len: Number of tokens to generate + img_size: Image size for processing + num_cores: Number of cores for compilation + num_devices: Number of devices to use + """ + print(f"Loading model: {model_name}") + print(f"KV offload (Dual QPC mode): {kv_offload}") + print("CCL Configuration:") + print(f" - Prefill context lengths: {comp_ctx_lengths_prefill}") + print(f" - Decode context lengths: {comp_ctx_lengths_decode}") + print(f" - Max context length: {ctx_len}") + + ## STEP 1: Load the Processor and Model + + processor = AutoProcessor.from_pretrained(model_name, token=hf_token) + + # `kv_offload` determines Single QPC vs Dual QPC mode: + # - Single QPC (kv_offload=False): Entire model runs in one QPC + # - Dual QPC (kv_offload=True): Vision encoder and language model run in separate QPCs + # with outputs transferred via host for flexibility + + model = QEFFAutoModelForImageTextToText.from_pretrained( + model_name, + token=hf_token, + attn_implementation="eager", + kv_offload=kv_offload, + qaic_config={ + "comp_ctx_lengths_prefill": comp_ctx_lengths_prefill, + "comp_ctx_lengths_decode": comp_ctx_lengths_decode, + "ctx_len": ctx_len, + }, + ) + + ## STEP 2: Export & Compile the Model + + print("\nCompiling model...") + qpc_path = model.compile( + prefill_seq_len=prefill_seq_len, + ctx_len=ctx_len, + img_size=img_size, + num_cores=num_cores, + num_devices=num_devices, + mxfp6_matmul=False, + ) + print(f"Model compiled successfully to: {qpc_path}") + + ## STEP 3: Load and Process the Inputs for Inference + + print(f"\nLoading image from: {image_url}") + image = Image.open(requests.get(image_url, stream=True).raw) + + messages = [ + { + "role": "user", + "content": [ + {"type": "image"}, + {"type": "text", "text": query}, + ], + } + ] + input_text = [processor.apply_chat_template(messages, add_generation_prompt=True)] + + inputs = processor( + text=input_text, + images=image, + return_tensors="pt", + add_special_tokens=False, + padding="max_length", + max_length=prefill_seq_len, + ) + + ## STEP 4: Run Inference on the Compiled Model + + print(f"\nQuery: {query}") + print("Generated response:") + streamer = TextStreamer(processor.tokenizer) + output_statistics = model.generate(inputs=inputs, streamer=streamer, generation_len=generation_len) + + print(f"Tokens generated: {len(output_statistics.generated_ids[0])}") + + +def main(): + parser = argparse.ArgumentParser( + description="Vision-Language Model (VLM) inference with Compute Context Length (CCL) optimization" + ) + parser.add_argument( + "--model-name", + type=str, + default="meta-llama/Llama-3.2-11B-Vision-Instruct", + help="HuggingFace VLM model ID", + ) + parser.add_argument( + "--query", + type=str, + default="Describe this image.", + help="Text query/question about the image", + ) + parser.add_argument( + "--image-url", + type=str, + default="https://huggingface.co/datasets/huggingface/documentation-images/resolve/0052a70beed5bf71b92610a43a52df6d286cd5f3/diffusers/rabbit.jpg", + help="URL of the image to process", + ) + parser.add_argument( + "--hf-token", + type=str, + default=None, + help="HuggingFace token for accessing gated models", + ) + parser.add_argument( + "--kv-offload", + action="store_true", + default=True, + help="Enable Dual QPC mode (vision encoder and LM in separate QPCs)", + ) + parser.add_argument( + "--prefill-seq-len", + type=int, + default=32, + help="Prefill sequence length", + ) + parser.add_argument( + "--ctx-len", + type=int, + default=8192, + help="Maximum context length", + ) + parser.add_argument( + "--comp-ctx-lengths-prefill", + type=lambda x: [int(i) for i in x.split(",")], + default="4096", + help="Comma-separated list of context lengths for prefill phase (e.g., '4096')", + ) + parser.add_argument( + "--comp-ctx-lengths-decode", + type=lambda x: [int(i) for i in x.split(",")], + default="6144,8192", + help="Comma-separated list of context lengths for decode phase (e.g., '6144,8192')", + ) + parser.add_argument( + "--generation-len", + type=int, + default=128, + help="Number of tokens to generate", + ) + parser.add_argument( + "--img-size", + type=int, + default=336, + help="Image size for processing", + ) + parser.add_argument( + "--num-cores", + type=int, + default=16, + help="Number of cores for compilation", + ) + parser.add_argument( + "--num-devices", + type=int, + default=4, + help="Number of devices to use", + ) + args = parser.parse_args() + + run_model( + model_name=args.model_name, + query=args.query, + image_url=args.image_url, + hf_token=args.hf_token, + kv_offload=args.kv_offload, + prefill_seq_len=args.prefill_seq_len, + ctx_len=args.ctx_len, + comp_ctx_lengths_prefill=args.comp_ctx_lengths_prefill, + comp_ctx_lengths_decode=args.comp_ctx_lengths_decode, + generation_len=args.generation_len, + img_size=args.img_size, + num_cores=args.num_cores, + num_devices=args.num_devices, + ) + + +if __name__ == "__main__": + main() diff --git a/examples/cpp_execution/CMakeLists.txt b/examples/performance/cpp_execution/CMakeLists.txt similarity index 100% rename from examples/cpp_execution/CMakeLists.txt rename to examples/performance/cpp_execution/CMakeLists.txt diff --git a/examples/cpp_execution/InferenceSetIOBuffer.cpp b/examples/performance/cpp_execution/InferenceSetIOBuffer.cpp similarity index 100% rename from examples/cpp_execution/InferenceSetIOBuffer.cpp rename to examples/performance/cpp_execution/InferenceSetIOBuffer.cpp diff --git a/examples/cpp_execution/README.md b/examples/performance/cpp_execution/README.md similarity index 81% rename from examples/cpp_execution/README.md rename to examples/performance/cpp_execution/README.md index 386921657..2d1c604e5 100644 --- a/examples/cpp_execution/README.md +++ b/examples/performance/cpp_execution/README.md @@ -24,7 +24,7 @@ make -j 8 cd ../../../ # Need to be in base folder - efficient-transformers to run below cmd # Run the python script to get the generated text -python examples/cpp_execution/text_inference_using_cpp.py --model_name gpt2 --batch_size 1 --prompt_len 32 --ctx_len 128 --mxfp6 --num_cores 14 --device_group [0] --prompt "My name is" --mos 1 --aic_enable_depth_first +python examples/performance/cpp_execution/text_inference_cpp.py --model_name gpt2 --batch_size 1 --prompt_len 32 --ctx_len 128 --mxfp6 --num_cores 14 --device_group [0] --prompt "My name is" --mos 1 --aic_enable_depth_first ``` diff --git a/examples/cpp_execution/text_inference_using_cpp.py b/examples/performance/cpp_execution/text_inference_cpp.py similarity index 99% rename from examples/cpp_execution/text_inference_using_cpp.py rename to examples/performance/cpp_execution/text_inference_cpp.py index 072f2c57c..8355c1e44 100644 --- a/examples/cpp_execution/text_inference_using_cpp.py +++ b/examples/performance/cpp_execution/text_inference_cpp.py @@ -229,7 +229,7 @@ def tokenize_decode_output(tokenizer, generated_ids, prompt): "--prompts_txt_file_path", "--prompts-txt-file-path", type=str, - help="File path for taking input prompts from txt file, sample prompts.txt file present in examples folder", + help="File path for taking input prompts from txt file, sample prompts.txt file present in examples/sample_prompts folder", ) parser.add_argument("--generation_len", "--generation-len", type=int, help="Number of tokens to generate") parser.add_argument( diff --git a/examples/on_device_sampling.py b/examples/performance/on_device_sampling.py similarity index 99% rename from examples/on_device_sampling.py rename to examples/performance/on_device_sampling.py index 00d8c2430..6cc72b715 100644 --- a/examples/on_device_sampling.py +++ b/examples/performance/on_device_sampling.py @@ -177,7 +177,7 @@ def main(args, **kwargs): "--prompts_txt_file_path", "--prompts-txt-file-path", type=str, - help="File path for taking input prompts from txt file, sample prompts.txt file present in examples folder", + help="File path for taking input prompts from txt file, sample prompts.txt file present in examples/sample_prompts folder", ) parser.add_argument("--generation_len", "--generation-len", type=int, help="Number of tokens to generate") diff --git a/examples/performance/speculative_decoding/README.md b/examples/performance/speculative_decoding/README.md new file mode 100644 index 000000000..e03eb45be --- /dev/null +++ b/examples/performance/speculative_decoding/README.md @@ -0,0 +1,181 @@ +# Speculative Decoding Examples + +Accelerate text generation using speculative decoding techniques on Qualcomm Cloud AI 100. + +Speculative decoding improves inference speed by generating multiple candidate tokens in parallel and validating them with the target model, reducing sequential forward passes required for text generation. + +## Authentication + +For private/gated models, export your HuggingFace token: +```bash +export HF_TOKEN= +``` + +## Quick Start + +```bash +# Draft-based: Use small draft model + large target model +python draft_based.py \ + --draft-model-name "meta-llama/Llama-3.2-1B" \ + --target-model-name "meta-llama/Llama-3.2-1B" \ + --num-speculative-tokens 4 + +# Prompt Lookup: N-gram matching without draft model +python prompt_lookup.py \ + --target-model-name "meta-llama/Llama-3.2-1B" \ + --num-speculative-tokens 3 \ + --max-ngram-size 3 + +# Multi-Projection: Built-in speculation for Turbo models (requires speculator_config.json) +# Note: TinyLlama does not support multi-projection - use actual Turbo models +python multi_projection.py \ + --pretrained-model-name-or-path "meta-llama/Llama-3.1-8B-Turbo" +``` + +## Available Scripts + +### draft_based.py - Two-Model Speculative Decoding + +**How It Works:** +1. **Draft Phase**: Small, fast model generates `N` candidate tokens sequentially +2. **Validation Phase**: Large target model scores all candidates in a single forward pass +3. **Acceptance**: Greedily accept tokens until first mismatch, then sample from target distribution +4. **Iteration**: Repeat with accepted tokens + one additional target token + +This approach achieves speedup when draft model is 3-8x faster than target model. + +**Basic Usage:** +```bash +python draft_based.py \ + --draft-model-name "meta-llama/Llama-3.2-1B" \ + --target-model-name "meta-llama/Llama-3.2-8B" \ + --num-speculative-tokens 4 \ + --prefill-seq-len 32 \ + --ctx-len 128 +``` + +**Multi-Device Deployment:** +```bash +python draft_based.py \ + --draft-model-name "TinyLlama/TinyLlama-1.1B-Chat-v1.0" \ + --target-model-name "meta-llama/Llama-3.1-70B" \ + --target-device-group 0,1,2,3 \ + --draft-device-group 4,5 \ + --num-speculative-tokens 6 +``` + +**Key Features:** +- Uses `qaic_config={"speculative_model_type": "target"}` for target model compilation +- Draft model uses fewer cores (5) vs target model (11) by default +- Supports both regular batching and continuous batching modes +- Implements "bonus token" handling for multi-batch scenarios + +**Recommended Model Pairs:** +- `TinyLlama-1.1B` → `Llama-3.1-8B` (8x size ratio) +- `Llama-3.2-1B` → `Llama-3.2-8B` (8x size ratio) +- `Llama-3.1-8B` → `Llama-3.1-70B` (9x size ratio) + +### prompt_lookup.py - N-gram Pattern Matching + +**How It Works:** +1. **Pattern Search**: Sliding window searches input context for n-gram matches +2. **Candidate Generation**: When match found, extract following tokens as candidates +3. **Fallback**: If no match, pad with dummy tokens (no speculation benefit) +4. **Validation**: Target model scores candidates like draft-based approach + +Most effective for repetitive text patterns, code with common structures, or templated content. + +**Basic Usage:** +```bash +python prompt_lookup.py \ + --target-model-name "meta-llama/Llama-3.2-8B" \ + --num-speculative-tokens 3 \ + --max-ngram-size 3 \ + --prefill-seq-len 256 \ + --ctx-len 1024 +``` + +**Optimized for Repetitive Content:** +```bash +python prompt_lookup.py \ + --target-model-name "meta-llama/Llama-3.1-8B" \ + --prompts "Write code with repeated patterns: for i in range(10): print(i)" \ + --num-speculative-tokens 5 \ + --max-ngram-size 4 \ + --ctx-len 2048 +``` + +**Key Features:** +- Implements `find_candidate_pred_tokens()` for n-gram matching +- Maintains `all_ids` array to track full context for pattern matching +- Default prompts designed for repetitive patterns (e.g., "hello, good morning to you") +- Uses `fill_tok=-1` for padding when no matches found +- No separate draft model required - uses n-gram pattern matching instead + +**Key Parameters:** +- `--max-ngram-size`: Larger values (3-5) better for structured text +- `--num-speculative-tokens`: Reduce if acceptance rate is low +- Longer context lengths improve pattern matching opportunities + +### multi_projection.py - Turbo Model Speculation + +**How It Works:** +1. **Multi-Head Projection**: Model has multiple projection heads generating token candidates +2. **Single Forward Pass**: All candidates generated simultaneously in one inference +3. **Built-in Validation**: Model internally scores and ranks candidates +4. **Optimized Architecture**: Specifically designed for speculative decoding + +Requires models with `speculative_config` and multi-projection architecture. + +**Basic Usage:** +```bash +python multi_projection.py \ + --pretrained-model-name-or-path "meta-llama/Llama-3.1-8B-Turbo" \ + --prefill-seq-len 32 \ + --ctx-len 128 +``` + +**Continuous Batching:** +```bash +python multi_projection.py \ + --pretrained-model-name-or-path "meta-llama/Llama-3.1-8B-Turbo" \ + --full-batch-size 4 \ + --device-group 0,1,2,3 \ + --ignore-eos-token +``` + +**Key Features:** +- Uses `qaic_config={"speculative_model_type": "turbo"}` for compilation +- Automatically extracts `num_speculative_tokens` from model's `speculative_config` +- Generates 4D logits tensor: `[batch, num_logits, num_logits, vocab_size]` +- No separate draft model required - speculation built into architecture + + +## Common Parameters + +| Parameter | Description | Default | Recommended | +|-----------|-------------|---------|-------------| +| `--prefill-seq-len` | Prefill chunk size | 32 | 128-256 | +| `--ctx-len` | Max context length | 128 | 512-2048 | +| `--num-speculative-tokens` | Candidates per iteration | 3-4 | 3-6 | +| `--device-group` | Device allocation | `[0]` | Multi-device for large models | +| `--full-batch-size` | Continuous batching | None | 2-8 for throughput | + +## Performance Metrics Explained + +All scripts output detailed metrics: + +``` +Avg TLM+DLM TTFT = 0.15 # Time to first token (seconds) +Decode Throughput = 125.67 # Tokens/second during generation +E2E Throughput = 98.23 # Overall tokens/second including prefill +Avg number of accepted tokens = 2.8 # Speculation effectiveness +``` + + + +## Documentation + +- [Speculative Decoding Guide](https://quic.github.io/efficient-transformers/source/features_enablement.html#speculative-decoding) +- [QEff Auto Classes](https://quic.github.io/efficient-transformers/source/qeff_autoclasses.html) +- [Performance Optimization](https://quic.github.io/efficient-transformers/source/features_enablement.html) diff --git a/examples/draft_spd_inference.py b/examples/performance/speculative_decoding/draft_based.py similarity index 98% rename from examples/draft_spd_inference.py rename to examples/performance/speculative_decoding/draft_based.py index 9dccc2a1d..9e617663c 100644 --- a/examples/draft_spd_inference.py +++ b/examples/performance/speculative_decoding/draft_based.py @@ -200,7 +200,7 @@ def draft_spec_decode_inference( continuous_batching = full_batch_size is not None if target_model_session is None: target_model = AutoModelForCausalLM.from_pretrained( - target_model_name, continuous_batching=continuous_batching, is_tlm=True + target_model_name, continuous_batching=continuous_batching, qaic_config={"speculative_model_type": "target"} ) target_num_devices = len(target_device_group) target_model_qpc_path: str = target_model.compile( @@ -248,6 +248,7 @@ def draft_spec_decode_inference( p_tok: dict = tokenizer(p, return_tensors="np", padding="max_length", max_length=input_len_padded) position_ids = np.where(p_tok.pop("attention_mask"), np.arange(input_len_padded), -1) p_tok["position_ids"] = position_ids + p_tok["num_logits_to_keep"] = np.array([[1]], dtype=np.int64) prompts_tokenized.append(p_tok) # create caches to hold generated ids and input prompt lengths generated_ids = [[] for i in range(decode_batch_size)] @@ -264,6 +265,7 @@ def draft_spec_decode_inference( input_ids=np.zeros((decode_batch_size, num_speculative_tokens + 1), dtype=np.int64), position_ids=np.zeros((decode_batch_size, num_speculative_tokens + 1), dtype=np.int64), batch_index=np.arange(decode_batch_size, dtype=np.int64).reshape(-1, 1), + num_logits_to_keep=np.arange(num_speculative_tokens + 1, dtype=np.int64).reshape(-1, 1), ) max_gen_len = [ctx_len] * decode_batch_size num_logits_to_keep = num_speculative_tokens + 1 diff --git a/examples/multiprojs_spd_inference.py b/examples/performance/speculative_decoding/multi_projection.py similarity index 100% rename from examples/multiprojs_spd_inference.py rename to examples/performance/speculative_decoding/multi_projection.py diff --git a/examples/pld_spd_inference.py b/examples/performance/speculative_decoding/prompt_lookup.py similarity index 98% rename from examples/pld_spd_inference.py rename to examples/performance/speculative_decoding/prompt_lookup.py index 2b5baba18..53b1f4e85 100644 --- a/examples/pld_spd_inference.py +++ b/examples/performance/speculative_decoding/prompt_lookup.py @@ -103,7 +103,7 @@ def run_prefill_on_draft_and_target( prefill_seq_len: int, slot_idx: int, ): - input_len = inputs.input_ids.shape[1] + input_len = inputs["input_ids"].shape[1] num_chunks = input_len // prefill_seq_len cache_index = np.array([[0]], np.int64) batch_index = np.array([[slot_idx]], np.int64) @@ -234,7 +234,7 @@ def pld_spec_decode_inference( # export_and_compile tlm and dlm continuous_batching = full_batch_size is not None target_model = AutoModelForCausalLM.from_pretrained( - target_model_name, continuous_batching=continuous_batching, is_tlm=True + target_model_name, continuous_batching=continuous_batching, qaic_config={"speculative_model_type": "target"} ) num_devices = len(device_group) @@ -270,6 +270,7 @@ def pld_spec_decode_inference( p_tok: dict = tokenizer(p, return_tensors="np", padding="max_length", max_length=input_len_padded) position_ids = np.where(p_tok.pop("attention_mask"), np.arange(input_len_padded), -1) p_tok["position_ids"] = position_ids + p_tok["num_logits_to_keep"] = np.array([[1]], dtype=np.int64) prompts_tokenized.append(p_tok) # create caches to hold generated ids and input prompt lengths generated_ids = [[] for i in range(decode_batch_size)] @@ -280,6 +281,7 @@ def pld_spec_decode_inference( input_ids=np.zeros((decode_batch_size, num_speculative_tokens + 1), dtype=np.int64), position_ids=np.zeros((decode_batch_size, num_speculative_tokens + 1), dtype=np.int64), batch_index=np.arange(decode_batch_size, dtype=np.int64).reshape(-1, 1), + num_logits_to_keep=np.arange(num_speculative_tokens + 1, dtype=np.int64).reshape(-1, 1), ) num_logits_to_keep = num_speculative_tokens + 1 max_gen_len = [ctx_len] * decode_batch_size diff --git a/examples/qwen3moe_example/qwen3moe_inference.py b/examples/qwen3moe_example/qwen3moe_inference.py deleted file mode 100644 index 3bef3a1dc..000000000 --- a/examples/qwen3moe_example/qwen3moe_inference.py +++ /dev/null @@ -1,21 +0,0 @@ -# ----------------------------------------------------------------------------- -# -# Copyright (c) Qualcomm Technologies, Inc. and/or its subsidiaries. -# SPDX-License-Identifier: BSD-3-Clause -# -# ----------------------------------------------------------------------------- - -from transformers import AutoTokenizer - -from QEfficient import QEFFAutoModelForCausalLM -from QEfficient.utils.constants import Constants - -model_name = "Qwen/Qwen3-30B-A3B-Instruct-2507" -""" -# For CB inference, set continuous_batching to True and add full_batch_size,mxfp6,mint8 argument in compile function -# We will use prompt_len=1 for compilation for both cb and non-cb inference -""" -model = QEFFAutoModelForCausalLM.from_pretrained(model_name, continuous_batching=False) -model.compile(prefill_seq_len=1, ctx_len=256, num_cores=16, num_devices=4, mxfp6_matmul=False, mxint8_kv_cache=False) -tokenizer = AutoTokenizer.from_pretrained(model_name) -exec_info = model.generate(prompts=Constants.INPUT_STR, tokenizer=tokenizer) diff --git a/examples/prompts.txt b/examples/sample_prompts/prompts.txt similarity index 100% rename from examples/prompts.txt rename to examples/sample_prompts/prompts.txt diff --git a/examples/speech_to_text/README.md b/examples/speech_to_text/README.md deleted file mode 100644 index 4b091347b..000000000 --- a/examples/speech_to_text/README.md +++ /dev/null @@ -1,21 +0,0 @@ -# Speech Seq2Seq -This directory contains an example script of how to use the AutoModelForSpeechSeq2Seq class. (for now, Whisper models on audio <30 seconds only has been validated) - -## Required packages: -- `librosa==0.10.2` -- `soundfile==0.13.1` - -You can install them using pip: -```sh -pip install librosa==0.10.2 soundfile==0.13.1 -``` - -To run example script after package installations: -```sh -python speech_seq2seq_models.py -``` - -Expected output for given data sample: -```sh -<|startoftranscript|><|en|><|transcribe|><|notimestamps|> Mr. Quilter is the apostle of the middle classes and we are glad to welcome his gospel.<|endoftext|> -``` \ No newline at end of file diff --git a/examples/speech_to_text/run_whisper_speech_to_text.py b/examples/speech_to_text/run_whisper_speech_to_text.py deleted file mode 100644 index d24389e9e..000000000 --- a/examples/speech_to_text/run_whisper_speech_to_text.py +++ /dev/null @@ -1,36 +0,0 @@ -# ----------------------------------------------------------------------------- -# -# Copyright (c) Qualcomm Technologies, Inc. and/or its subsidiaries. -# SPDX-License-Identifier: BSD-3-Clause -# -# ----------------------------------------------------------------------------- - -from datasets import load_dataset -from transformers import AutoProcessor - -from QEfficient import QEFFAutoModelForSpeechSeq2Seq - -base_model_name = "openai/whisper-tiny" -ctx_len = 25 - -## STEP 1 -- load audio sample, using a standard english dataset, can load specific files if longer audio needs to be tested; also load initial processor -ds = load_dataset("hf-internal-testing/librispeech_asr_dummy", "clean", split="validation") -data = ds[0]["audio"]["array"] -# reshape to so shape corresponds to data with batch size 1 -data = data.reshape(-1) -sample_rate = ds[0]["audio"]["sampling_rate"] -processor = AutoProcessor.from_pretrained(base_model_name) - -## STEP 2 -- init base model -qeff_model = QEFFAutoModelForSpeechSeq2Seq.from_pretrained(base_model_name) - -## STEP 3 -- export and compile model -qeff_model.compile() - -## STEP 4 -- generate output for loaded input and processor -exec_info = qeff_model.generate( - inputs=processor(data, sampling_rate=sample_rate, return_tensors="pt"), generation_len=ctx_len -) - -## STEP 5 (optional) -- use processor to decode output -print(processor.batch_decode(exec_info.generated_ids)[0]) diff --git a/examples/text_generation/README.md b/examples/text_generation/README.md new file mode 100644 index 000000000..6b80442c2 --- /dev/null +++ b/examples/text_generation/README.md @@ -0,0 +1,314 @@ +# Text Generation Examples + +Examples for running inference on text-only language models on Qualcomm Cloud AI 100. + + +## Authentication + +For private/gated models, export your HuggingFace token: +```bash +export HF_TOKEN= +``` + +## Supported Models + +**QEff Auto Class:** `QEFFAutoModelForCausalLM` + +For the complete list of supported text generation models, see the [Validated Models - Text Generation Section](../../docs/source/validate.md#text-only-language-models). + +Popular model families include: +- Llama (2, 3, 3.1, 3.2, 3.3) +- Mistral, Mixtral, Codestral +- Qwen, Qwen2, Qwen3-MoE +- Gemma, CodeGemma +- GPT-2, GPT-J +- Falcon, MPT, Phi-3 +- Granite, StarCoder + +--- + +## Python Examples + +### basic_inference.py +Simple text generation with any supported language model. + +**Usage:** +```bash +python basic_inference.py \ + --model-name Qwen/Qwen2-1.5B-Instruct \ + --prompt "Hello, how are you?" \ + --prefill-seq-len 32 \ + --ctx-len 128 \ + --num-cores 16 +``` + +This example: +- Demonstrates basic text generation workflow +- Loads any HuggingFace text model +- Compiles and runs inference on Cloud AI 100 + +### continuous_batching.py +Dynamic batching for processing multiple prompts efficiently. + +**Usage:** +```bash +python continuous_batching.py \ + --model-name meta-llama/Llama-3.1-8B \ + --prompts "Hello|Hi there|Good morning|How are you" \ + --full-batch-size 4 \ + --prefill-seq-len 128 \ + --ctx-len 512 \ + --num-cores 16 +``` + +This example: +- Demonstrates continuous batching mode +- Processes multiple prompts in parallel +- Improves throughput for multi-request scenarios +- Uses pipe-separated prompts + +### gguf_models.py +GGUF format model support (quantized models). To run GGUF format models, you need to install the `gguf` package: + +```bash +pip install gguf +``` + +**Usage:** +```bash +# With default parameters +python gguf_models.py + +# With custom parameters +python gguf_models.py \ + --model-name MaziyarPanahi/Mistral-7B-Instruct-v0.3-GGUF \ + --gguf-file Mistral-7B-Instruct-v0.3.fp16.gguf \ + --prompt "How are you?" \ + --prefill-seq-len 32 \ + --ctx-len 128 \ + --num-cores 16 +``` + +This example: +- Loads models in GGUF format (quantized models) +- Demonstrates GGUF file loading from HuggingFace +- Compiles and runs inference on Cloud AI 100 +- Supports custom GGUF files and prompts + +--- + + +### moe_inference.py +Mixture of Experts (MoE) model inference. + +**Usage:** +```bash +python moe_inference.py \ + --model-name Qwen/Qwen3-30B-A3B-Instruct-2507 \ + --prompt "Explain quantum computing" \ + --ctx-len 256 \ + --num-cores 16 +``` + +This example: +- Demonstrates MoE model inference +- Uses sparse expert activation for efficiency +- Works with Qwen, Mixtral, and other MoE models + + +## CLI Workflow + +The QEfficient CLI provides a streamlined workflow for running text generation models on Cloud AI 100. You can use individual commands for each step or the all-in-one `infer` command. + +### Quick Start: All-in-One Inference (Recommended) + +The `infer` command handles export, compile, and execute in a single step: + +```bash +python -m QEfficient.cloud.infer \ + --model_name meta-llama/Llama-3.1-8B \ + --batch_size 1 \ + --prompt_len 128 \ + --ctx_len 512 \ + --num_cores 16 \ + --device_group [0] \ + --prompt "Write a short story about AI" \ + --mxfp6 \ + --mxint8_kv_cache \ + --mos 1 \ + --aic_enable_depth_first +``` + +**What it does:** +1. Downloads and exports the model to ONNX +2. Compiles to QPC +3. Executes inference with your prompt + +**CLI API Reference:** [`QEfficient.cloud.infer`](https://quic.github.io/efficient-transformers/source/cli_api.html#qefficient-cloud-infer) + +### Step-by-Step Workflow + +For more control, you can execute each step individually: + +#### Step 1: Export Model to ONNX + +Export the HuggingFace model to ONNX format optimized for Cloud AI 100: + +```bash +python -m QEfficient.cloud.export \ + --model_name meta-llama/Llama-3.1-8B \ + --cache_dir ~/.cache/qeff_cache +``` + +This downloads the model and converts it to ONNX format. The ONNX model is saved in the QEfficient cache directory. + +**CLI API Reference:** [`QEfficient.cloud.export`](https://quic.github.io/efficient-transformers/source/cli_api.html#qefficient-cloud-export) + +#### Step 2: Compile Model to QPC + +Compile the ONNX model to Qualcomm Program Container (QPC) format: + +```bash +python -m QEfficient.cloud.compile \ + --onnx_path ~/.cache/qeff_cache/meta-llama/Llama-3.1-8B/onnx/model.onnx \ + --qpc_path ./qpc_output \ + --batch_size 1 \ + --prompt_len 128 \ + --ctx_len 512 \ + --num_cores 16 \ + --device_group [0] \ + --mxfp6 \ + --mos 1 \ + --aic_enable_depth_first +``` + +**Note:** The `compile` API is deprecated for direct use. Use the unified `infer` API instead for most use cases. + +**CLI API Reference:** [`QEfficient.cloud.compile`](https://quic.github.io/efficient-transformers/source/cli_api.html#qefficient-cloud-compile) + +#### Step 3: Execute Inference + +Run inference using the pre-compiled QPC: + +```bash +python -m QEfficient.cloud.execute \ + --model_name meta-llama/Llama-3.1-8B \ + --qpc_path ./qpc_output/qpcs \ + --prompt "Write a short story about AI" \ + --device_group [0] +``` + +This uses the pre-compiled QPC for fast inference. You can run this multiple times with different prompts without recompiling. + +**CLI API Reference:** [`QEfficient.cloud.execute`](https://quic.github.io/efficient-transformers/source/cli_api.html#qefficient-cloud-execute) + +### Common CLI Parameters + +| Parameter | Description | Default | Example | +|-----------|-------------|---------|---------| +| `--model_name` | HuggingFace model ID | Required | `meta-llama/Llama-3.1-8B` | +| `--prompt` | Input text prompt | Required | `"Hello, how are you?"` | +| `--prompt_len` | Maximum input sequence length | 32 | `128` | +| `--ctx_len` | Maximum context length (input + output) | 128 | `512` | +| `--batch_size` | Batch size for inference | 1 | `1` | +| `--num_cores` | AI 100 cores to use | 16 | `16` or `14` | +| `--device_group` | Device IDs to use | `[0]` | `[0]` or `[0,1,2,3]` | +| `--mxfp6` | Enable MXFP6 quantization | False | Add flag to enable | +| `--mxint8_kv_cache` | Enable MXINT8 KV cache | False | Add flag to enable | +| `--mos` | Memory optimization strategy | 1 | `1` or `2` | +| `--aic_enable_depth_first` | Enable depth-first execution | False | Add flag to enable | + + +### Advanced Features + +#### Multi-Device Inference (Multi-Qranium) + +Run models across multiple devices for better performance: + +```bash +python -m QEfficient.cloud.infer \ + --model_name meta-llama/Llama-3.1-8B \ + --batch_size 1 \ + --prompt_len 128 \ + --ctx_len 512 \ + --num_cores 16 \ + --device_group [0,1,2,3] \ + --prompt "Explain quantum computing" \ + --mxfp6 \ + --mxint8_kv_cache \ + --aic_enable_depth_first +``` + +**Documentation:** [Multi-Qranium Inference](https://quic.github.io/efficient-transformers/source/features_enablement.html#multi-qranium-inference) + +#### Continuous Batching + +Process multiple prompts efficiently with continuous batching: + +```bash +python -m QEfficient.cloud.infer \ + --model_name meta-llama/Llama-3.1-8B \ + --full_batch_size 4 \ + --prompt_len 128 \ + --ctx_len 512 \ + --num_cores 16 \ + --device_group [0] \ + --prompt "Hello|Hi there|Good morning|How are you" \ + --mxfp6 \ + --mxint8_kv_cache +``` + +**Note:** Use pipe (`|`) to separate multiple prompts. When using continuous batching, do not specify `--batch_size`. + +**Documentation:** [Continuous Batching](https://quic.github.io/efficient-transformers/source/features_enablement.html#continuous-batching) + +#### Batch Processing from File + +Process multiple prompts from a text file: + +```bash +python -m QEfficient.cloud.infer \ + --model_name meta-llama/Llama-3.1-8B \ + --full_batch_size 8 \ + --prompt_len 128 \ + --ctx_len 512 \ + --num_cores 16 \ + --device_group [0] \ + --prompts_txt_file_path examples/sample_prompts/prompts.txt \ + --mxfp6 \ + --mxint8_kv_cache +``` + +### CLI Examples Script + +For a comprehensive collection of copy-paste ready CLI commands, run: + +```bash +bash cli_examples.sh +``` + +This script demonstrates: +- Complete 4-step workflow (Export → Compile → Execute → Infer) +- Multi-device inference +- Continuous batching +- Batch processing from file +- Parameter explanations and best practices + +--- + + +## Additional Resources + +### Documentation +- [CLI API Reference](https://quic.github.io/efficient-transformers/source/cli_api.html) - Complete CLI command documentation +- [Quick Start Guide](https://quic.github.io/efficient-transformers/source/quick_start.html) - Getting started with QEfficient +- [Features Enablement](https://quic.github.io/efficient-transformers/source/features_enablement.html) - Advanced features guide +- [QEff Auto Classes](https://quic.github.io/efficient-transformers/source/qeff_autoclasses.html) - Python API reference +- [Validated Models](https://quic.github.io/efficient-transformers/source/validate.html) - Supported models list + + +### Model Storage +By default, exported models and QPC files are stored in `~/.cache/qeff_cache`. Customize this with: +- `QEFF_HOME`: Primary cache directory +- `XDG_CACHE_HOME`: Alternative cache location + diff --git a/examples/text_generation/basic_inference.py b/examples/text_generation/basic_inference.py new file mode 100644 index 000000000..6340ec725 --- /dev/null +++ b/examples/text_generation/basic_inference.py @@ -0,0 +1,57 @@ +# ----------------------------------------------------------------------------- +# +# Copyright (c) Qualcomm Technologies, Inc. and/or its subsidiaries. +# SPDX-License-Identifier: BSD-3-Clause +# +# ----------------------------------------------------------------------------- + +import argparse + +from transformers import AutoTokenizer + +from QEfficient import QEFFAutoModelForCausalLM + + +def main(): + parser = argparse.ArgumentParser(description="Basic text generation inference") + parser.add_argument("--model-name", type=str, default="Qwen/Qwen2-1.5B-Instruct", help="HuggingFace model ID") + parser.add_argument("--prompt", type=str, default="Hello, how are you?", help="Input prompt") + parser.add_argument("--prefill-seq-len", type=int, default=32, help="Prefill sequence length") + parser.add_argument("--ctx-len", type=int, default=128, help="Context length") + parser.add_argument("--generation-len", type=int, default=100, help="Number of tokens to generate") + parser.add_argument("--num-cores", type=int, default=16, help="Number of cores") + parser.add_argument( + "--device-group", + type=lambda device_ids: [int(x) for x in device_ids.strip("[]").split(",")], + default=None, + help="Device IDs (comma-separated) e.g. [0,1]", + ) + args = parser.parse_args() + + # Load tokenizer and model + tokenizer = AutoTokenizer.from_pretrained(args.model_name) + model = QEFFAutoModelForCausalLM.from_pretrained(args.model_name) + + # Compile the model + qpc_path = model.compile( + prefill_seq_len=args.prefill_seq_len, + ctx_len=args.ctx_len, + num_cores=args.num_cores, + num_devices=(1 if args.device_group is None else len(args.device_group)), + ) + print(f"Model compiled to: {qpc_path}") + + # Generate text + exec_info = model.generate( + tokenizer=tokenizer, + prompts=[args.prompt], + device_id=args.device_group, + generation_len=args.generation_len, + ) + + print(f"\nPrompt: {args.prompt}") + print(f"Generated: {exec_info.generated_texts[0]}") + + +if __name__ == "__main__": + main() diff --git a/examples/text_generation/cli_examples.sh b/examples/text_generation/cli_examples.sh new file mode 100755 index 000000000..12a426ebe --- /dev/null +++ b/examples/text_generation/cli_examples.sh @@ -0,0 +1,209 @@ +#!/bin/bash + +# ----------------------------------------------------------------------------- +# +# Copyright (c) Qualcomm Technologies, Inc. and/or its subsidiaries. +# SPDX-License-Identifier: BSD-3-Clause +# +# ----------------------------------------------------------------------------- + +# QEfficient CLI Examples for Text Generation +# This script provides a simplified workflow for running text generation on Cloud AI 100 + +echo "QEfficient CLI Workflow for Text Generation" +echo "===========================================" +echo "" +echo "This example demonstrates the complete workflow using Llama-3.1-8B" +echo "" + +# ============================================================================ +# STEP 1: EXPORT MODEL TO ONNX +# ============================================================================ + +echo "Step 1: Export Model to ONNX" +echo "-----------------------------" +echo "Export the HuggingFace model to ONNX format optimized for Cloud AI 100" +echo "" +cat << 'EOF' +python -m QEfficient.cloud.export \ + --model_name meta-llama/Llama-3.1-8B \ + --cache_dir ~/.cache/qeff_cache +EOF +echo "" +echo "This will download the model and convert it to ONNX format." +echo "The ONNX model will be saved in the QEfficient cache directory." +echo "" + +# ============================================================================ +# STEP 2: COMPILE MODEL TO QPC +# ============================================================================ + +echo "Step 2: Compile Model to QPC" +echo "-----------------------------" +echo "Compile the ONNX model to Qualcomm Program Container (QPC) format" +echo "" +cat << 'EOF' +python -m QEfficient.cloud.compile \ + --onnx_path ~/.cache/qeff_cache/meta-llama/Llama-3.1-8B/onnx/model.onnx \ + --qpc_path ./qpc_output \ + --batch_size 1 \ + --prompt_len 128 \ + --ctx_len 512 \ + --num_cores 16 \ + --device_group [0] \ + --mxfp6 \ + --mos 1 \ + --aic_enable_depth_first +EOF +echo "" +echo "Compilation parameters:" +echo " --batch_size: Number of prompts to process simultaneously" +echo " --prompt_len: Maximum input prompt length" +echo " --ctx_len: Maximum context length (prompt + generation)" +echo " --num_cores: Number of AI 100 cores to use (typically 14 or 16)" +echo " --device_group: Device IDs to use (e.g., [0] for single device, [0,1,2,3] for multi-device)" +echo " --mxfp6: Enable MXFP6 quantization for better performance" +echo " --mos: Memory optimization strategy" +echo " --aic_enable_depth_first: Enable depth-first execution" +echo "" + +# ============================================================================ +# STEP 3: EXECUTE WITH COMPILED QPC +# ============================================================================ + +echo "Step 3: Execute Inference with Compiled QPC" +echo "--------------------------------------------" +echo "Run inference using the pre-compiled QPC" +echo "" +cat << 'EOF' +python -m QEfficient.cloud.execute \ + --model_name meta-llama/Llama-3.1-8B \ + --qpc_path ./qpc_output/qpcs \ + --prompt "Write a short story about AI" \ + --device_group [0] +EOF +echo "" +echo "This uses the pre-compiled QPC for fast inference." +echo "You can run this multiple times with different prompts without recompiling." +echo "" + +# ============================================================================ +# STEP 4: END-TO-END INFERENCE (ALL-IN-ONE) +# ============================================================================ + +echo "Step 4: End-to-End Inference (Recommended)" +echo "-------------------------------------------" +echo "The 'infer' command handles export, compile, and execute in one step" +echo "" +cat << 'EOF' +python -m QEfficient.cloud.infer \ + --model_name meta-llama/Llama-3.1-8B \ + --batch_size 1 \ + --prompt_len 128 \ + --ctx_len 512 \ + --num_cores 16 \ + --device_group [0] \ + --prompt "Write a short story about AI" \ + --mxfp6 \ + --mxint8_kv_cache \ + --mos 1 \ + --aic_enable_depth_first +EOF +echo "" +echo "This is the recommended approach for most use cases." +echo "It automatically:" +echo " 1. Downloads and exports the model to ONNX (if not cached)" +echo " 2. Compiles to QPC (if not already compiled with these settings)" +echo " 3. Executes inference with your prompt" +echo "" + +# ============================================================================ +# ADDITIONAL EXAMPLES +# ============================================================================ + +echo "" +echo "Additional Examples" +echo "===================" +echo "" + +echo "Multi-Device Inference (Multi-Qranium)" +echo "---------------------------------------" +cat << 'EOF' +python -m QEfficient.cloud.infer \ + --model_name meta-llama/Llama-3.1-8B \ + --batch_size 1 \ + --prompt_len 128 \ + --ctx_len 512 \ + --num_cores 16 \ + --device_group [0,1,2,3] \ + --prompt "Explain quantum computing" \ + --mxfp6 \ + --mxint8_kv_cache \ + --aic_enable_depth_first +EOF +echo "" + +echo "Continuous Batching (Multiple Prompts)" +echo "---------------------------------------" +cat << 'EOF' +python -m QEfficient.cloud.infer \ + --model_name meta-llama/Llama-3.1-8B \ + --full_batch_size 4 \ + --prompt_len 128 \ + --ctx_len 512 \ + --num_cores 16 \ + --device_group [0] \ + --prompt "Hello|Hi there|Good morning|How are you" \ + --mxfp6 \ + --mxint8_kv_cache +EOF +echo "" +echo "Note: Use pipe (|) to separate multiple prompts for continuous batching" +echo "" + +echo "Batch Processing from File" +echo "---------------------------" +cat << 'EOF' +python -m QEfficient.cloud.infer \ + --model_name meta-llama/Llama-3.1-8B \ + --full_batch_size 8 \ + --prompt_len 128 \ + --ctx_len 512 \ + --num_cores 16 \ + --device_group [0] \ + --prompts_txt_file_path examples/sample_prompts/prompts.txt \ + --mxfp6 \ + --mxint8_kv_cache +EOF +echo "" + +# ============================================================================ +# NOTES AND DOCUMENTATION +# ============================================================================ + +echo "" +echo "Important Notes" +echo "===============" +echo "" +echo "Terminal Compatibility:" +echo " - Use bash terminal for best compatibility" +echo " - If using ZSH, wrap device_group in single quotes: '--device_group [0]'" +echo "" +echo "Common Parameters:" +echo " --model_name: HuggingFace model ID (e.g., meta-llama/Llama-3.1-8B)" +echo " --prompt: Input text prompt" +echo " --prompt_len: Maximum input sequence length" +echo " --ctx_len: Maximum context length (input + output)" +echo " --num_cores: AI 100 cores (typically 14 or 16)" +echo " --device_group: Device IDs [0] for single, [0,1,2,3] for multi-device" +echo " --mxfp6: Enable MXFP6 quantization (recommended)" +echo " --mxint8_kv_cache: Enable MXINT8 KV cache (recommended)" +echo " --aic_enable_depth_first: Enable depth-first execution" +echo "" +echo "For More Information:" +echo " - Full CLI API Reference: https://quic.github.io/efficient-transformers/cli_api.html" +echo " - Quick Start Guide: https://quic.github.io/efficient-transformers/quick_start.html" +echo " - Features Guide: https://quic.github.io/efficient-transformers/features_enablement.html" +echo " - Supported Models: https://quic.github.io/efficient-transformers/validate.html" +echo " - Examples README: examples/text_generation/README.md" +echo "" diff --git a/examples/text_generation/continuous_batching.py b/examples/text_generation/continuous_batching.py new file mode 100644 index 000000000..ec3a36ea9 --- /dev/null +++ b/examples/text_generation/continuous_batching.py @@ -0,0 +1,72 @@ +# ----------------------------------------------------------------------------- +# +# Copyright (c) Qualcomm Technologies, Inc. and/or its subsidiaries. +# SPDX-License-Identifier: BSD-3-Clause +# +# ----------------------------------------------------------------------------- + +import argparse + +from transformers import AutoTokenizer + +from QEfficient import QEFFAutoModelForCausalLM + + +def main(): + parser = argparse.ArgumentParser(description="Continuous batching inference") + parser.add_argument("--model-name", type=str, default="Qwen/Qwen2-1.5B-Instruct", help="HuggingFace model ID") + parser.add_argument( + "--prompts", + type=str, + default="Hello! How can I help?|Hi there! What’s up?|Hey! Need assistance?|Welcome! How can I support you today?", + help="Pipe-separated prompts for batch processing", + ) + parser.add_argument("--prefill-seq-len", type=int, default=128, help="Prefill sequence length") + parser.add_argument("--ctx-len", type=int, default=512, help="Context length") + parser.add_argument("--full-batch-size", type=int, default=4, help="Full batch size for continuous batching") + parser.add_argument("--generation-len", type=int, default=100, help="Number of tokens to generate") + parser.add_argument("--num-cores", type=int, default=16, help="Number of cores") + parser.add_argument( + "--device-group", + type=lambda device_ids: [int(x) for x in device_ids.strip("[]").split(",")], + default=None, + help="Device IDs (comma-separated) e.g. [0,1]", + ) + args = parser.parse_args() + + # Parse prompts + prompt_list = args.prompts.split("|") + print(f"Processing {len(prompt_list)} prompts with continuous batching") + + # Load tokenizer and model with continuous batching enabled + tokenizer = AutoTokenizer.from_pretrained(args.model_name) + model = QEFFAutoModelForCausalLM.from_pretrained(args.model_name, continuous_batching=True) + + # Compile the model with full_batch_size for continuous batching + qpc_path = model.compile( + prefill_seq_len=args.prefill_seq_len, + ctx_len=args.ctx_len, + full_batch_size=args.full_batch_size, + num_cores=args.num_cores, + num_devices=(1 if args.device_group is None else len(args.device_group)), + ) + print(f"Model compiled to: {qpc_path}") + + # Generate text for all prompts + exec_info = model.generate( + tokenizer=tokenizer, + prompts=prompt_list, + device_id=args.device_group, + generation_len=args.generation_len, + ) + + # Display results + print("\n" + "=" * 80) + for i, (prompt, generated) in enumerate(zip(prompt_list, exec_info.generated_texts)): + print(f"\nPrompt {i + 1}: {prompt}") + print(f"Generated: {generated}") + print("-" * 80) + + +if __name__ == "__main__": + main() diff --git a/examples/text_generation/gguf_models.py b/examples/text_generation/gguf_models.py new file mode 100644 index 000000000..2f81ef031 --- /dev/null +++ b/examples/text_generation/gguf_models.py @@ -0,0 +1,59 @@ +# ----------------------------------------------------------------------------- +# +# Copyright (c) Qualcomm Technologies, Inc. and/or its subsidiaries. +# SPDX-License-Identifier: BSD-3-Clause +# +# ----------------------------------------------------------------------------- + +import argparse + +from transformers import AutoTokenizer + +from QEfficient import QEFFAutoModelForCausalLM + + +def main(): + parser = argparse.ArgumentParser(description="GGUF model inference") + parser.add_argument( + "--model-name", + type=str, + default="Qwen/Qwen2-1.5B-Instruct-GGUF", + help="HuggingFace model ID for GGUF model", + ) + parser.add_argument( + "--gguf-file", + type=str, + default="qwen2-1_5b-instruct-q8_0.gguf", + help="GGUF file name within the model repository", + ) + parser.add_argument("--prompt", type=str, default="Hello! How are you?", help="Input prompt") + parser.add_argument("--prefill-seq-len", type=int, default=32, help="Prefill sequence length") + parser.add_argument("--ctx-len", type=int, default=128, help="Context length") + parser.add_argument("--num-cores", type=int, default=16, help="Number of cores") + parser.add_argument("--num-devices", type=int, default=1, help="Number of devices") + args = parser.parse_args() + + # Load the model and tokenizer + print(f"Loading GGUF model: {args.model_name}") + print(f"GGUF file: {args.gguf_file}") + + tokenizer = AutoTokenizer.from_pretrained(args.model_name, gguf_file=args.gguf_file) + model = QEFFAutoModelForCausalLM.from_pretrained(args.model_name, gguf_file=args.gguf_file) + + # Compile the model + generated_qpc_path = model.compile( + prefill_seq_len=args.prefill_seq_len, + ctx_len=args.ctx_len, + num_cores=args.num_cores, + num_devices=args.num_devices, + ) + print(f"Model compiled to: {generated_qpc_path}") + + # Generate text + exec_info = model.generate(prompts=[args.prompt], tokenizer=tokenizer) + print(f"\nPrompt: {args.prompt}") + print(f"Generated: {exec_info.generated_texts[0]}") + + +if __name__ == "__main__": + main() diff --git a/examples/text_generation/moe_inference.py b/examples/text_generation/moe_inference.py new file mode 100644 index 000000000..276c766dd --- /dev/null +++ b/examples/text_generation/moe_inference.py @@ -0,0 +1,66 @@ +# ----------------------------------------------------------------------------- +# +# Copyright (c) Qualcomm Technologies, Inc. and/or its subsidiaries. +# SPDX-License-Identifier: BSD-3-Clause +# +# ----------------------------------------------------------------------------- + + +import argparse + +from transformers import AutoTokenizer + +from QEfficient import QEFFAutoModelForCausalLM + + +def main(): + parser = argparse.ArgumentParser(description="MoE model inference") + parser.add_argument( + "--model-name", + type=str, + default="Qwen/Qwen3-30B-A3B-Instruct-2507", + help="HuggingFace MoE model ID", + ) + parser.add_argument("--prompt", type=str, default="Explain quantum computing", help="Input prompt") + parser.add_argument("--prefill-seq-len", type=int, default=32, help="Prefill sequence length") + parser.add_argument("--ctx-len", type=int, default=256, help="Context length") + parser.add_argument("--generation-len", type=int, default=None, help="Number of tokens to generate") + parser.add_argument("--num-cores", type=int, default=16, help="Number of cores") + parser.add_argument( + "--device-group", + type=lambda device_ids: [int(x) for x in device_ids.strip("[]").split(",")], + default=None, + help="Device IDs (comma-separated) e.g. [0,1]", + ) + args = parser.parse_args() + + print(f"Loading MoE model: {args.model_name}") + print("Note: MoE models use sparse expert activation for efficient inference") + + # Load tokenizer and model + tokenizer = AutoTokenizer.from_pretrained(args.model_name) + model = QEFFAutoModelForCausalLM.from_pretrained(args.model_name) + + # Compile the model + qpc_path = model.compile( + prefill_seq_len=args.prefill_seq_len, + ctx_len=args.ctx_len, + num_cores=args.num_cores, + num_devices=(1 if args.device_group is None else len(args.device_group)), + ) + print(f"Model compiled to: {qpc_path}") + + # Generate text + exec_info = model.generate( + tokenizer=tokenizer, + prompts=[args.prompt], + device_id=args.device_group, + generation_len=args.generation_len, + ) + + print(f"\nPrompt: {args.prompt}") + print(f"Generated: {exec_info.generated_texts[0]}") + + +if __name__ == "__main__": + main() diff --git a/examples/wav2vec2_example/README.md b/examples/wav2vec2_example/README.md deleted file mode 100644 index fba8d9ad2..000000000 --- a/examples/wav2vec2_example/README.md +++ /dev/null @@ -1,21 +0,0 @@ -# Speech Recognition with Wav2Vec2 -This directory contains an example script of how to use the AutoModelForCTC class. (for now, Wav2Vec2 models on audio <30 seconds only has been validated) - -## Required packages: -- `librosa==0.10.2` -- `soundfile==0.13.1` - -You can install them using pip: -```sh -pip install librosa==0.10.2 soundfile==0.13.1 -``` - -To run example script after package installations: -```sh -python run_wav2vec2_inference.py -``` - -Expected output for given data sample: -```sh -MISTER QUILTER IS THE APOSTLE OF THE MIDDLE CLASSES AND WE ARE GLAD TO WELCOME HIS GOSPEL -``` \ No newline at end of file diff --git a/examples/wav2vec2_example/run_wav2vec2_inference.py b/examples/wav2vec2_example/run_wav2vec2_inference.py deleted file mode 100644 index 961aabeb8..000000000 --- a/examples/wav2vec2_example/run_wav2vec2_inference.py +++ /dev/null @@ -1,24 +0,0 @@ -# ----------------------------------------------------------------------------- -# -# Copyright (c) Qualcomm Technologies, Inc. and/or its subsidiaries. -# SPDX-License-Identifier: BSD-3-Clause -# -# ----------------------------------------------------------------------------- - -from datasets import load_dataset -from transformers import AutoProcessor - -from QEfficient import QEFFAutoModelForCTC - -base_model_name = "facebook/wav2vec2-base-960h" - -ds = load_dataset("hf-internal-testing/librispeech_asr_dummy", "clean", split="validation") -data = ds[0]["audio"]["array"] -# reshape to so shape corresponds to data with batch size 1 -data = data.reshape(-1) -sample_rate = ds[0]["audio"]["sampling_rate"] -processor = AutoProcessor.from_pretrained(base_model_name) - -model = QEFFAutoModelForCTC.from_pretrained(base_model_name) -model.compile(num_cores=16) -print(model.generate(processor, inputs=data)) diff --git a/tests/cloud/test_export_compile_execute.py b/tests/cloud/test_export_compile_execute.py index f1c80a6b0..c2e77578a 100644 --- a/tests/cloud/test_export_compile_execute.py +++ b/tests/cloud/test_export_compile_execute.py @@ -76,7 +76,7 @@ def check_export_compile_execute(mocker, model_name, full_batch_size=None, enabl model_name=model_name, qpc_path=qpc_path, prompt="My name is", - prompts_txt_file_path="examples/prompts.txt", + prompts_txt_file_path="examples/sample_prompts/prompts.txt", generation_len=20, full_batch_size=full_batch_size, ) diff --git a/tests/cloud/test_infer.py b/tests/cloud/test_infer.py index 9addc0a7b..e11f69017 100644 --- a/tests/cloud/test_infer.py +++ b/tests/cloud/test_infer.py @@ -24,7 +24,7 @@ def check_infer( num_cores=16, prompt=prompt, local_model_dir=None, - prompts_txt_file_path="examples/prompts.txt", + prompts_txt_file_path="examples/sample_prompts/prompts.txt", aic_enable_depth_first=True, mos=1, hf_token=None, From aab6fac332b4c2ecca618af604b8191859e0ea48 Mon Sep 17 00:00:00 2001 From: Dhiraj Kumar Sah Date: Thu, 20 Nov 2025 20:41:28 +0530 Subject: [PATCH 19/60] Example walk through on how to onboard a Causal LM on Qefficient Transformers. (#574) This PR adds an example showing the users how to onboard a new model and how to raise a new PR on Qeff --------- Signed-off-by: Dhiraj Kumar Sah Signed-off-by: Rishin Raj Signed-off-by: Dhiraj Kumar Sah --- CONTRIBUTING.md | 158 ++++++- .../onboarding_guide/causallm/Onboarding.png | Bin 0 -> 231305 bytes examples/onboarding_guide/causallm/README.md | 232 +++++++++++ .../causallm/example_pytorch_transforms.py | 291 +++++++++++++ .../causallm/modeling_example.py | 394 ++++++++++++++++++ 5 files changed, 1065 insertions(+), 10 deletions(-) create mode 100644 examples/onboarding_guide/causallm/Onboarding.png create mode 100644 examples/onboarding_guide/causallm/README.md create mode 100644 examples/onboarding_guide/causallm/example_pytorch_transforms.py create mode 100644 examples/onboarding_guide/causallm/modeling_example.py diff --git a/CONTRIBUTING.md b/CONTRIBUTING.md index 260a32f1d..424e9fc4c 100644 --- a/CONTRIBUTING.md +++ b/CONTRIBUTING.md @@ -1,22 +1,160 @@ ## Contributing to PROJECT Hi there! -We’re thrilled that you’d like to contribute to this project. +We're thrilled that you'd like to contribute to this project. Your help is essential for keeping this project great and for making it better. -## Branching Strategy -In general, contributors should develop on branches based off of `main` and pull requests should be made against `main`. +## Submitting Your Contribution -## Submitting a pull request +Follow these steps to submit your example to the QEfficient repository: 1. Please read our [code of conduct](CODE-OF-CONDUCT.md) and [license](LICENSE). -1. Fork and clone the repository. -1. Create a new branch based on `main`: `git checkout -b main`. -1. Make your changes, add tests, and make sure the tests still pass. -1. Commit your changes using the [DCO](http://developercertificate.org/). You can attest to the DCO by commiting with the **-s** or **--signoff** options or manually adding the "Signed-off-by". -1. Push to your fork and submit a pull request from your branch to `main`. -1. Pat yourself on the back and wait for your pull request to be reviewed. + +### 1. Fork and Clone the Repository + +First, fork the repository to your GitHub account, then clone your fork: + +```bash +# Fork the repository on GitHub (click the "Fork" button) +# Then clone your fork +git clone git@github.com:YOUR_USERNAME/efficient-transformers.git +cd efficient-transformers + +# Add upstream remote to keep your fork in sync +git remote add upstream git@github.com:quic/efficient-transformers.git +``` + +### 2. Create a Feature Branch + +Create a descriptive branch for your changes: + +```bash +# Update your main branch +git checkout main +git pull upstream main + +# Create a new branch +git checkout -b +``` + +### 3. Make Your Changes + +When making changes to the codebase: + +- **Follow Existing Design Patterns** + - Review similar implementations before creating new code + - Maintain consistency with the project's architecture and coding style + - Reuse existing utilities and base classes where applicable + +- **Onboarding New Models** + - For adding new model support, refer to the comprehensive guide: `examples/onboarding_guide/causallm/` + - Follow the step-by-step process with code examples provided + +- **Testing is Mandatory** + - Add tests for all new features in the appropriate `tests/` subdirectory + - Run tests locally before pushing: `pytest tests/path/to/your/test.py -v` + - For model additions, verify all 4 pipeline stages (PyTorch HF → KV → ORT → AI 100) and make sure tokens are matching with refernce PyTorch HF + +- **Documentation** + - **For New Features/Flags:** + - Document usage in `docs/source/` with feature description and usage examples + - Ensure documentation is clear enough for others to understand and use the feature + - **For New Models:** + - Test with basic inference scripts in the `examples/` folder + - If specific changes are needed, create a dedicated example file + - Update `docs/source/validate.md` with the model's HuggingFace card name and relevant details + + +- **Code Quality Checks** + - Pre-commit hooks, DCO sign-off, and CI checks are covered in the following steps + - Ensure you complete steps 4-8 before finalizing your PR + +### 4. Run Pre-commit Checks + +Before committing, ensure your code passes all quality checks: + +```bash +# Install pre-commit and ruff if not already installed +pip install pre-commit +pip install ruff + +# Run pre-commit on your changed files +pre-commit run --files path/to/your/file1.py path/to/your/file2.py + +# Run Ruff check +ruff check +``` + +**Important:** If pre-commit reports any failures: +- Some issues will be auto-fixed (formatting, trailing whitespace, etc.) +- For issues that aren't auto-fixed, manually correct them +- Re-run `pre-commit run --files ` or `ruff check` until all checks pass + +### 5. Commit with Sign-off (DCO) + +All commits must be signed off to comply with the Developer Certificate of Origin (DCO): + +```bash +# Stage your changes +git add examples/your_domain/your_example.py +git add examples/your_domain/README.md + +# Commit with sign-off +git commit -s --author "Your Name " -m "Add [model-name] support + +- Implements inference for [model-name] +- Includes documentation and usage examples +- Tested with [specific configurations]" +``` + +**Commit Message Guidelines:** +- Use a clear, descriptive title +- Add a blank line, then detailed description if needed +- Always include the `-s` flag for DCO sign-off + +### 6. Push to Your Fork + +Push your branch to your forked repository: + +```bash +git push origin +``` + +### 7. Create a Pull Request + +1. Go to your fork on GitHub +2. Click "Compare & pull request" for your branch +3. Fill out the PR template with: + - **Title:** Clear, descriptive title (e.g., "Add Llama-3.2-Vision Support" or "Fix memory leak in KV cache") + - **Description:** + - What changes were made and why + - What problem it solves or feature it adds + - Any special considerations or breaking changes + - Links to relevant documentation, issues, or model cards (if applicable) + - **Testing:** Describe how you tested your changes + +### 8. Ensure CI Checks Pass + +After creating the PR, verify that all automated checks pass: + +- āœ… **DCO Check:** Ensures all commits are signed off +- āœ… **Lint Check:** Code style and formatting validation +- āœ… **Tests:** Automated test suite (if applicable) + +If any checks fail: +1. Review the error messages in the PR +2. Make necessary fixes in your local branch +3. Commit and push the fixes (with sign-off) +4. The PR will automatically update and re-run checks + +### 9. Address Review Feedback + +Maintainers will review your PR and may request changes: +- Make requested changes in your local branch +- Commit with sign-off and push to update the PR +- Respond to comments to facilitate discussion + Here are a few things you can do that will increase the likelihood of your pull request to be accepted: diff --git a/examples/onboarding_guide/causallm/Onboarding.png b/examples/onboarding_guide/causallm/Onboarding.png new file mode 100644 index 0000000000000000000000000000000000000000..8c83b0ac006e90f18f66f3e1282db782048c32ce GIT binary patch literal 231305 zcmeEv2V7K1wzr6*pb|yNIZMt>&H^GJA_9V715J?7L;*=ES+Yoyq#&YzWI-|#MGz!O z6l{r8z8&{{yYpt=+x@xPUH9I)RVSbSIo0ELURe(7D9Omf#~8hZ`d-cP}U*Z*M}A&rFd}JXWscB5=qsAqLC^CDu=DVPVJ1cSi%RF{xuwBp zb5k<}_(u+5V1|SqF|>g@u$v;m?cDtAoa_Q%NZP~(X#s9J!^I5)L;U<)JnURBXrgb6 zKEuP!&dtsT#?;_Oa2wNouY>k;9bsc@3O2XB)xC+K3$_MubHwiO&ervjHiihBoguJa zZh9VBdQM4jEA)p)+RzkkYy-CdUo7EZQQH%q2jpVo+Iig6aA!uRTM4H=~T75rgfV8v({QvGoxQz|O=ItHszQhP= zzPr8M0V}Zc@9uyG)V|Sl-)$PEhIS@9_rmygmj~S~k1#bh*?Th==k65?_}Zx8Kf%MKy;$Z3N_g8BAwZU5!Hn=Blq5#|uJ_vpC$9w0dX{Ig%TY4cTW{(q*k zKX)0)W9MXMVWY&S0h2$k#wDZA#u_`XU6St%*xEVmNif0?5P$b8(#FmNX^gal zo6CM1m9jzFTN)xD9zr_%?VfW;q!k$C0)y8Oc6Ls?k_5N6LxLd_I}7vOt2^(6UbMrC zpX_{h$QEgDW3YSIPdBuCpB>!B7_rO$pWe5v1OJ$v-ENDSnfb?SfvoEfqV>~l?QZ`&e)5|GLP!~!UZ>~gH-`jNmt zBcUueJ&coH5)zDaNCUVX#E8H52?z0Ene6L65*BM3VKcHp&tz9fi%`>fM9+XI;y2k4b*R`6{T;Q-tp7~5xO z;O3^rkh?ZV7(sflYx@v3c8ERaxHDu0|7LQxHR>NL=T8mA-i(lAh1R_5Y$Onn&JN&g1x)veS$3wxnZF1aB~|393+6;oC+2oo3;QB83gdZi~Jx{u;1C5AT9Lm zf#La8M{5AhYXk28-c9e=7?3{y8yzn<&-bQ`chAZ0bG%%8&UnA${W|#9@w{9=3+sR2 z&EL!QZo5&L0JQ=2{r3d#UtjmnXA^*izgyD}N#xI?`F}Qb`f9BJF%0&o@-?^kbJief2rMP1 zo}M+r$VkuB($r2*4}1pzfeF$OV$OD+283!KCwC0qK1=!siOTop@_!(05&Yg=^6w#N z`;Di-PqLOhBm38}o!c=Ez-qrCTo(}lo(8tZWPb{7Zwu1rucO-#)wle7Tfo3YxFy7S zFqNs{j$-c2VPtP<0Ohk_+zzT96<752JVRavp#zZWH4xqj%Hxza68wcZ2qQuJTU^Y(bV{ z~_$q9;mJW|_VwV4K7)GN|}K@cIt{`k%k=zxNyeHC$4>Bn>w(L43y}_;v|-x6-p$ zIr%@zv40JfWgP(w1>$ohq@lez0x};6o9{?&Y6O(S5`h3^?4J<&=Zp9u`uKx5|A!>* z`wVZ#k^FlBF|NIkem6)E;NC@*_5}$%{M+8;`>=m6&-?3ygWSK52X`p==Y|cx?*9MK zt8@PsZ9EXqzW?*z#kqb3L)&>3^oAemLOh%xGUD0yzn!h`N)i{Bz;4*KCn7s-8oQG~ zdh*8@+upnXXG=kQy;0vqAwMYk_p18`YX7HbHrK8`?D7M)m%x3e**rT-*`LsVEj;h> z?Jv;4vYVp*xtjb}NC0;E^M|Hlr&;e;)cL<_Dt<*>|MieK*De9RQ@!5>y=rz)b0BCC zrRRa#C(k=wgd%_NNgg!Ha036n-R#ID1-FG7Vh}%Z=Kt-e^XE~(?^d{%wb)w0p|+lH zg3NDi548|V>B9}ojJJ&lo52oF1np!yciC$^fts%NT?43;i5;6E+{TQ_##o;T>;P(< z+4~3V5&8;U;pPL0KlEk)2WIB&RCVw5V12u7L*HirOm+acvV?LfTN9)`XvqZlC*01! zKjN{O94ex2`Ekng{q_2h#=ExwSE-?N_ryHU_S`^mirvFx{>e?34HQr#ci)&C%V z!Unpyxp@BoMA^2z=Mgrhpn|g9BK@yMR(`qGxa~#%7aG=IDmLyJR-PThs-mo{2|_WI z3m1W2d^M|*Mi7g*IT4Olpw0Iumf=6f^{X-b4qo{`Yh3vuNOiw)<>KV|+gewy-FnkE zpU(X!Sl1o${cBL>ZC?C4P28`{y_f3!F3sc?{LvMEla{>{WhXxVJEoSqdhl!UEr9sH zk8c$({mc-n${In))pqa*b@JOnHuc9llpS9DBb>7r!8^r-zV**JG=XQ>= z?c%-*t|2!6uAKSR3;qnF(6`x#g8Yg%_&?i2#JzVmcn=%dYpdR;C0swlM)ol1y}0dv z9$M!4U)M#wQ#Ja9HtHQ_{7P*8gSq@W9%0%whI_>+ZrE;d>Q`$}dwv7l|Mxk=#Pc)s z9P$Z&+g$b6g?pRf|4!kCB;wCMBd|w+eO`ULO<|`!#BQgH*=qM#?GLQB`PGv;-={gd zVZ^`rU=H{Jjvp`~ZmwM&-S1U+zd}d*F?X)Lj@7@AW7!2*|6Eo3RQ zh4Keu_XjN)f4+%>ZF{7z^+Yb7p9Rf(#l!u^ZEx%^WZQPPvX8HOdE~yfTs3=r3+UIF zIL}}B%I$yj^C4f8vwd^?WLl;(NON=W`#b>pQBl5lK|xCWqJpyA-i<&%{_*2KS<3cL z7eS2y|20thR~IgB-XElkmvcAv+wVLfwS#u@7niQzW8(igzyr9phf?h&)_cBWyR8Fa z>poWR#Y6i!z2}(!LU!c$=kWe>gvaR{t3ik5j>7J-nrAOY{tK{ryV>i%_yM%tMliW= zzcsf%6x+VK_v>~$7j_?jel>9CCq;DdhK3FtpgN!+BYDwLcQWqC0=?|>OvUrZnT{s< zVjMrsKycpW^qmhx=h159@xvv5JA|&0fZ2}5L4+nNSs}r#FNKGTemn|Y_dKWQJ;MVp zeYNoVSI7HT2kt*!^ZYC}IxxD>Y4(OBs_o%;+I#gijpA!dZ!Ak@Ye_xiQZRJfR)$fJ z!j$hI2FvHg<4!;E3cY!VjtUJ^Ug98r6zM_52v$FP)bzqYuX(Q0`m~(rxT>vMb4}{2 zHw3!=UT(vq4B7UBevD@x4pZ+u69PScD+5=4aH6o;HG?MID2b`$+HJFm91k2Z7@6H$ zNtXBQ#Qg5a7bx#zx!9&751$!3Q*OU*`%yGZx`U>3;?MF22bzVaOC+vRiO(o3br}R_ zxvbcr0wpC-e23HdRb;J(Tb>zKc-mqO=PUF$D}dGRKF&)Wa)?+P-Xyxzx9lU0#!sQ{ z)#+G5k`VZiT|3_#E=Ya2!5l63FvZ^e&_>6E&~6`5@MNwygJkpS97`ujh~{9qXvJ6i z32ToQVD?j?2M!+JitvqU$wXm2oFQp?63!FBex(RE0-+LVrmy{STJCC=hT+MLR| zGvRqcS+g47eyQbXCT0Ov?Wrz!%qriOmVTvoNX13W4-@z%A1X$#Wb4;R zon3u;>QNPrnxj&dHSQHsi@cWGXs3%9FnYBWzwMXxG@6TEwZ`QePnIti3KHm^;BC$; z3Be%r9{r9ZP7F84&UP33xm>y4jhNSYpFRF~hU8f17$aj#^2@-cMjrZIrh|7d9n6n2=AdgGj?ci9Sd7?5wK)0^WFc2}pSwU-r5Vi`MQ$vDve#}jj-^lcZTL8zJj=^IH-qXl_1s}vn@2&NS(j_Z-|U~` zp2VgpJ-18lXj-O^B9^xK3c-|(AKd?ZY52Z*&Yj?d%?`XtnYmUU5GENdR7U@x|IAmJ zkiTk)@hD)xHRuL&$z^Wiq$_N<7WnsV#a50=C9kCu=?pG&nt{s_3#ay9R-%$ET`gjh zLv`GC3OkQJJA7pD^36|d`&Kz}SK_r*SLfuw(ackgJqYjL&G2Bl0qdkLTNKc}>$#4R zq{xzNuVSZT`vt#|67536iL4J}LpIW08qzV!YsET7UAT`T_i?*X^j2PTS}-&h)*Ioe z>2cd@jouigI^VYXfbq~OS>Aa^K3*Fz$xU7MZ+iKb^q^^i24%Tx+RQbR5my!eZrp_Z zlP~z*tW1*&e1BbC?Tob8p#sDDyZOlJjeXms;}ke_;WpO#NSUjAx37jY4}TK;ckeiw z+0Sdgc2eE(d2_q)*>95C$Z@M|wfRg5NspZ#RthygN8~qCGsX;EJDTZzctE4d)b~xp zK5@Kp_Ce-mZe`lTmrEU1B>M9<--vcA9)B!tD;;cS#+5JDyQ^eBAyN*lj*GLDkdV0i z=XGC&;6KFthoZ91?FdMS)f`Z#zSb}GqL%(3(FuMaLEJFQ8=5!K88qFPP;7?XT7^@=JkfZm$h&3Q~^e(uCv99 z54VpEwmEdceXOeS zc*tZr*7}#qvcM)~CG8hlnb=VgMwbcgfjhUa z^;#5=J5J{|iwU#K^|`H0Rf(-lp0IT8?aiPo2@>C2o(V*{(306Ua^14kN?cs76BjEP z;mgDXVHR`g(r~06MgNx!2QQu`7;V45=;^+#8V)z>VLlCACq$pw!c!I3)9fA}1%q8D z4Tkt)EQ?I#?Vt4w$=;g=vl^_=Pa2C0e>Ny;i~G!bs#BFl_fss6%Lnw(jtRZ3)tLsp zO>U84W);jMnLq34?1ur)8f#q(~f&$by$d~-IqDyDzo@$wpb@g%

zJ4cq){Pe|E$Xc+0UQ!;RA`-gn1JR~sQ8QvKs!oJqvT!t>>w?ZDZ|( z?m9(vuo+X`0}Sh;_@!C#%^~qq{X^Nh=TstgJXoH*1ge9kQS5zcqm{DoRNj~fs*48u zov!m#b|@Wnh1b%HI?+U;N=YN}xAc)Z_;Gw>60xal`Ca;`O_^T#!P%V1QgalUr$moh zt$S|5B;fP-S!|*psv!PmCFfw=W3ZPnuSs?3^z#GUawm=Nu2EQ3pgor*3@4IvoXTrc zR;5b^9J3ziyE1*FlPQ9ULzyA*HageyS`stQiHu@)7r?-PtxM1I=5?2fGze@NW34LX zOQ7(2rf*ctfm-2kkmC*PVXBwtgSV2sGF^sQs+!#vb3^d&Jlb$u`&^7<#j#kJi!2xE zIo@emwlZREdU}+$!YT)k(J9gA<5;f9{4u>{sdX&XnnG42Vw$MZpZ-~2q=zd)d~2nd zu!kgDHW9~dG+HMCW-Xd8LlBV^>3BYCaZQnYd-yO-DVf16UvLQR)+TorHd6o zXp(PczE@{*q%H}?!h<7TJk26JJ%ov3rS86JlOM~LN~bH=1kyz_$~RX{jzG4IuqWye z)k?g|B-&>M7py@!&)9kQt+jzj2DDqW`u<{{auYTt&$`xh#oZ-@;(cJIF4H`ixrS*t zV5w#VlOe~Yl6hoReYZ^rJ4R%J5?iQ+CDt^N5tal?^gSMXnua&+S^njcjg`>^N?IW@ zR%@5R6A46_)Ca50H&}FM@60s2Z}ObE?W=E%TUucaq_CXQ>)=*kn17hk!fz<6nBG1H zq0t!goO&fs!X`s}p=Os69s{j5-RIWhA{jG-E;8t<2gl`0KS+q;Mc}wER~b}~#xIqO zN^!XX<@DNE?B#fO?6AyTm`y*dy&d%u$IXi1zIusTVgSKI8t$qUL4lkLBhH1X5hRtk zjQ4#B5aN?rRh91nT{QXQo6Fmp)@yYu2uu}%#0JAm=v8U3Kl>0VmCqyl5*F(o$}!QJ zWT|J!>XgjKO$RhTTIZyzjHnh(Ov-SjN1cDCTS9to2)Hvj0$RhBM{I)ZWy^yz>!af0 zxPJ63W6~6JArD8|^Ji$J6cz#(ADEXA=B@SiFXn4nPR1=#-L2tD7sYLA_wywlJpG)u zky$v5vV6IQBhlBWlFFU&@a#|_!p|5uLdt4x{kD>0HNKzDKZxH;J`bRIbsl=Kh>DP{ zdbz^bdiy5zK~K(9+mBK=li{rP4LCCR2mLXyLXVP_uXnGLHdJ3&X? zW&83(`2Z2lqF~&iE?8s9N)A3=@JYtGsl2A`;_ImJh`3p#%>A;t@JtzN50jB6_`3CyXJ8S@9E^8r@2XU* zTjJEWWL~}&;U<@bT41~-q2+rm`*;L~o@&=RFogjJZ{;h8-Yr@T@5xIqy&y9JZ1D{p zBgcfbK4bdahtF_3Ib1&}4$alW>*zcQ6C;__#ctj0@mRVot}ugr4d)Ok{}ONkTxUZ3 zrSM}=rPaDNcznK;B2RcuutFgUk|yKsTZ3k1Mf^S2e}(G7%(xUw<5l82-eF`4WVH-W zo5OG=4&g=DC-Glh1@`rM))oW3sr;6=M*!~C`b)D{Gmm1kh7wg1^DM)j!`8scMo%58 zeDGMli4Bk7b^Ik5`?+$i-LVQYhKlfk%ZOjSG?W<_kj zg_AXwx-U+nydDg$N~*TzdGZC@Fvw9_S3Iww-Oycp^IS5T$5z!LJ zbq(1UnhvqGQ)=yI>E0WZc$ESpwe@!dO@av(vAR>D6RVt@F2oE^`aG}8T75Lp;HeQ_ zn%ju0T0>Dz5I!QvOw(WkFPG&HC@J60kt%pGnMxijI0wb6^TxWoD+=`0=s0q(foz%J zx1oYO_b0yhH-smN^)rt)5lQLy;nXn5eQ9ziv+eV*x&R}yjjc&Fq)g&pfmNE_!-1VZ zlcvfJ#;99R)LXrvbH-4fgk%wuI(k<77fkD8ZaB6p)iJ}Ic&Sxauke|vH3G)jHM ziZI%Bsdyl8$%_v!pm$(%@t9(7s?6qtwIG&~@h*+asjucbQWq=Zaj}`vj&|Z|(VwkL zA*$7)=)7K?#(F=xCwWpUDbJuNe)vI&@MPyY3dfx&lJK!1DWWp%4yCGTV#y>kMON7N4K3r=w7R#l&-MjsDEzp< zkEe@}GL1?acs!E|+$;u1kC&_WBp0pUaVa|T$xM9}3gbV3s0 z9k!^08%;!?hC;f{$7F47uGIy9Dx2}*2yrt4a=!f50mI=?BaAD#XjQvE4 zkZHMK6QtTYigQ|xw2}ouaxVG+6hziyQ2EjNzC!g8(sJ565>C9#DfDMAViIZ>@nCf& zb0SFen>jnBc*h+-g-YfzGsGNxb`Kg|LAgW;LpPDpSldBeeodQ?&iF^VReFIu_-d3L zvO4cf)vAyu<$14~aq<}Lr{5Zgv~m=SwhBsy*{qQtUR1KMu~d)vR`yR>I`}&;jA=Wj zT(G$Z-?+6SoHq|WhR`dl*S5<6{AEKm__X*4kq>A%nq4vKau~SGRP!XtT~C~aqO2Jc zC2YJ>MRn=Wscfq5$}FO0l(-gKKH||XJ`4@Dn0QtYB;_H0B`)dd%W(CD=ip-ov`Ss-yvVn6$Yvwhpcl*4>hmLZ54#PLSx@7p#Q0w#{i|f78 zrnND?9c1K|0`7T7rQ6qVK5HRh^`XkD)JC}tK_JrXY~eB%#O+33k>^-Sr!A4kQ(okY z;3@^-qMxz`R}1>UguZut^mU`o?xFAqx^T9Wjcu_h<~I=a7`V-n0Cm2;5*H49U?}-$lrWQjxHeiwurzL+o|WB zhcBqA8Hv3s8II7dok;T}Y-Z@Gb$dSOgOKC%cwEUcAcwxCzp=6vcfE(1-G|Oh_pF>= zq<|vtQ;vpLA0XWN$l`CIg7}-h67iuu`Kj+epd$|*RwU>ym!-p^j{NKuQWN?Z`%{!r zwZAbCdC5wDXSY$|0Zb0Zm&f!)8~X9e_`I{crm_rT4^c5IZ9cG@J4X}p6u)=`iC_Q7 zp@8+i=o8U7oIo|4&puZkEd?wh#A#e5%UrC9%3~E)KOWffeot+9#I^q_k;+#Sb{m*5 z{EN<3&2Vq&cHfIFzSI|@8E3;T7@eTj@79|wmbLeiunCHmG>+H|;A!&JKk5_lW}R;O zDpH}_;ciCB-0P^gknTmgiY~S4yQv_XzWR_a%~Jy(C$?D*F)!cz4<-tWijF&~X{2vV(4ReN2)R`O(ewvLjL2k$O@ z*cih!%YY6GOJ~;N16LLCjRjb!<2zT&N`STno#%)^~$yzke3 zIPrA{d#JjE3g$wG`y~L&E`NRbZYo0eR_&ncmjr<5VwGijx8cTohsoZP(AzKuNUq0p z8j81iWOyGgElR&liH^4)*7gYYPri=DJFIMY9%K}pA`@E8*EG{=%GbR~Eir%tR9+{V z1X=jU8jiO#5g!W5H!RHCAebuF<~1`4fGG}Bt?R&*V}dNw_C>>$&-h{s4{^MN`)7mD zr^DmlC?v(|6Cfv%`A|~GuejBm)bKnaHggO0F-!$}seEf=B5{1m-hHiiK)-0+ zeSL@$^&~0iD0sz4DG1(-i$P-U6PTrJwBsC&c_kCN32(Bfgh2U(5#p$E+_>LUg%`1_O4v6kn&CdNiF3VMvK`csu zW-6Xa0`K!aPmPWjWh^(hd8f`?lP_WMPMW#a6+&L7gX%!l5}(i0ihVY9`V*8?TRXk~ zjZg;{a1zJ!d8cl1n(URQI#=w^P0Sez_I*l8pvWj3xep5yXQ_MCixM7KQI>$Qfixe2 zC=3jq_EH##KN&v8@aFmOpz`qbPCa+m<(6PK0{TFi5UZ^9frnE{E&*d(BV8&juKKUU7JD56@J! zxg^LTerJs&qja%n24sTbE|Uh7q^s+aO0kEr-|3ZuHDN<}Qe~(d1#!bMA3_kNTGt;~ zQ(!L|OPpCOC>I48eu7^?pLHF1m6`7R{TH6;j#=q9=dJLyM&Hc(%8AhQoB5Dm$ACgd z#mrlu6d2@iL8awuPWKU-+kW7{m6^lacWK-&~}IAPV^zWP)O9&=57A*nl#K8rha2r&4B@WXJ!r!H*V3L1 zu2n@X0_2Ug4{XYR<0VsF-lX$=9jA9NSGjJA+SK~AkJBz+()0qSiH2~>{+iHu_d>mvd~#`13P@O1~=$5LPkQz5#<%y z0AS_0u@7xeTl^q24h7;uzgVk$Yk8a58sy{@T$=5;0B9X}Mg4v{y4l6Z?&=eyklV(D zUfmiG(Ic~{E02{qoaz;6jztaE^AXA5QXikY_uG;Cixn&K`pdqwK^OoDRU_=lR6@xP zh8+=Ke%W%XY~so^NkUvNwlb3<`(g5`9^r6823V@E%5v4QmzDF@x_FNT(GOpoMCXz& z#Le|33F3_k*4|tftY;4+H*h%acs!4lt=iiih*S-?)m8K^0{uhIDS=rm=00Wu11|F> znNJ%rjx(H^lR0hrq0clWE|JsND9*W(VwAkaZ>EyM>QJ_HyrNTUalI%<=|bzW*r_S9 z7{r#A4eWlGhPv~6kiP_)4n>2Nu$jG6DLq^=1V*b@j{e1NNsuwbp5($m7ptRryA51q- zwCGtBRm!M}uW`OW7Bh~)F?q9IV^wXnh}WnxLM#aLX>~j|mmB_5gw6vT<7T#LvaN*v z4hZ-P^wJ<{?r!jMuMzfm#vpDS)f>Go$N>9rcRX@wxG(`I#>Q!e5Bef(iWTAb1ypZ~ z@-7y$Y|dvbnyfj1a3#@#qzS;H^Pp{G?#1D#F+83gm|+r`y(eMi&2GWigVlK54p9%Q zULsD6K@h6ft&?&g9>lD%!bX8ahRaAHejz@iX=@RrZA@=9t0e$W6=LfVA&+^gLa#ov z6(VB#bf_@M+u0zSnQ2Sbqn09WhW^Ud2vIZbD zJlf-&%3o?QU!tV#-l9Ezvk4o-kTJT}79g%4f!=a6%riGmxUf-^$6tlCMGxVrB?$D*Q8IhoOQ0)bMN7UQ#-^% zALL3MgZt+q3Rq>jo(SNbJ7m}$!0L*HFi4QO&)4LXr#c|CK^=rWEw({!@mjcU@_N4y zH}%aE@t%bI=vBN^`ELc8YHQj1mBcdjoS?1AKow$KONSwOdqh8 zWN4tYsiURWlp34Y7%3L&7Mq-UFJa>KVhxWNH_AV_Ejb{|VzoIbUx~0u&5_UCEC09; zF_!K!ulAYj*NM#PHRKwO!^EAhbuS8W#v0LR%jiU58qZYWRo|-09+7P}CM)!Qd0&W% zE-L!*{6lHiDKkCNcu)j;|7iJ>Tg@IQQjT1X+okE36Fj&yQ9A&kVP^KLxK4a3ti(RK_MtT$Z@e0iiL(tqmo z8aYFWRUVO5>mi9EoKW&=eh*TM^RZe+S+WNPyBO*O2z8Sp#IIBx@R?7j#vT4J5Jk80 zTB8gfRNe&G?q#2KOkS+jeQm^OI~(6?sK4OJDDu!QL2Wh8Eh(zeW9-=UZ=p$vaijS% z)#0ufN>gd6r5A|(9#BQid*mnL^xc?vM%(=H!BsgJ$+@wQ4JJM5$oHF^rPH#ZGNofU z%p9)r>?Ic#h2j;2@qTLz<`oW%OGZ3j*Byzd>TLiGAWhfb(3 zXs?7G;tmUwvFg;ZedFckklphzU5|ymnXaMgla_)3iUTAWn$GdtBOpe5d zIte)UYiuI@R;!tU`-eM|2O7wH;Fy>1T<>o$OYLFl@HgQl)B5atfZXm8Ki&O%?nzm1 zR^NTXv5vR?9OZfX#NGC%=a$1fZ>^4=;ICOt5ks536?YLcli!JfRD$Yl^K^C{1+DAi zgE!Z&9=wY|XrQ9LSg^p}y7kz%R==Q%YE5a)HToVMrW|`^p(P3AW$f320G#G7KF2S= z7_#o8TKkCiWj8%4+{H81IuNtsV~nbt%t=?R%huKQrWgE9N+t!>*}D`9b>w=s^zMQQpI)CQ$9q*jLM>S|cGNejk9~tON#rBL3{#F(9EDvEOVlxNBkAL)M$9EQyP9ZYhVSQ{##{oW1BInCvBS# z_20LOF)lVdn&=_WRj(Io4G>*xJ2N%rgPEx+zTBuQ{_drkis#A2nw+P7-NX!1sjr)_ z)|&IJp}{UZKE0R?50*vaGB;R$pb(yyg1M&Dz?a6re4!X;)m1aj|GF6SS|yqxGyC~lw9gj zctZ2iz&7pA=d{AxkGoprS21mLg_I)K2&uS>R{OBaVDe+2{nDPDcf?;u$ z+(ru#MUYPF$9VI+`ACukBVit_Ipbs$)9uWgF>^Nh1FlYEzxZ5eZ~z_&nLdq4<6<_K z3jrtyiruYv*p!o-^O%C4N{M?gJmNmrF+P*R3(+%Y$yu8y%__9cz+Q8um(@5QKg&4q z8!Ft`gYIBW8BU^#p@#vIxCuu=d(O8zYpj_GPne8h@g=Tmm3O^KU1&2esJ1+HeToxA z8b$zU)t1HK$vU+J8h3;jIb51VFv&t_-F{V?`{85xV;f}7 zZD*X^@6uj;elB%fl;Yi{lGEeS2>MPcQ>=Q4SH~~+Y4jQi2egV0<8ja&3}C)udZgzA z1Oa2JjYVb~b4y;L?~IDrF|G#LUK*zboOOz6O1!_Y)NsMrSPVtHdA2ZkjkdIMD$AlozL*AyOgRwCIu5Uy_w*R>n@f<$IcC@S2YAxuKIs_>=Qtui zQP9>}l`1N3T8QtSlI*;4ctbUE{-vv?tDH4iFD`q{X#16GQL&nfqSROYiIZCU<3!>d zdWVgjSd>pW>(61xf*OcDd0rjMl)$ScG%VYaUb~wq2)#n`EXI)JSd1**Hh(IbM6}?+ z92EbAc8WON>*&eMaaWGqa@DdzHGWnsv#mkCp*dDvaZ8~UeafZ{?^3gxO+MG=J1SgY zKWCk_G)W>B9oeo6w<|GU9nZwQ zRO8oqPu|0<-tpB1$7+qUS$uwT?@Em|)<=tmGo9lq)7T5Q3Nrv9GiFE_{WU(HdyJOB zng)+&h-oyPyd}>JFmfa*pjcw^ph?S&(CQUEn+eC4)>Yp6;X|DPJ1(r|V&k z=MF~NoXl;RMKb#ns#aEqKA|KokJ4gmXsxz4bd(7R84hZHZ6p*gCQxN*+6r zQ~waqu0Wjba@{8HT~C7CwBhbLn>EF0Va2sM6;ZlVhF6oO^=t1ZyG{BH<)YQB=D)Fh zdmi@s$h}@%-66vPY2P8c)=V}d;fiNXNA9&b@?(7^V`o;nx6?pd@<$Rww>RRBsY86l%J7!EuM4jQRD;;~cjG zUPdJ|!kntabh`6rQs>Dz4*#Y}_2>zyGNIzo*i}3em@X?hlUtqtOEH9#FleXKKfAh6 zN?I0rCht(o8Gg1_ltEh6xHj(+u&k{4OAqEM8D)w5_(LCgK4CqA&ODgVOhizg#A|-4 z4A`~B*HrStA*qpAUG)+3$n(SLj9}6MP43ndqj~e_B<(^F+14tO}gR=Rr#1(-Fi~(r&VH<=)CAXW5c*@vyTVa zCHta_9Vh}Np)FxiHrRBD zSi|i|%q^U$JT_#DOVx-UN^Q}_bd(HiA))c9uWzRHnHk=)zS*UC{-dJSD{o(z!l0d|E1{xeGBSjqH|E}Qjp$`3V!SAr zFh*ka{ekXC4DpEKoId|#22;JOD!v0`G2Uv^9u}xnibC~N;+F3_yj()<) zXocJD{dv|!gt;rnBl1jz9uv==)qi5Kb{q2x!-mk2P#Uq*F{?N)1`oARX88mmv#5(+ zm!Wm#SLqdqH!cw~kkaKcY*2rZ&I@Cuqjz}NUot(ZB|P_vR%4L!$uy{`^bQK9XQr=~ zPF;VXH{`7El8HxA!{xo?7460lF}Gpl-;HhGm27U?mIFlzJp4+R9ZxoRmRF715oOMj zYTlJAzsQ%Zo)UQE3Xe(6nK|K^;MPs}I@zkq>d94wb=I;^nu`iydFFRVBVE&Sri$82 z^?qle)Z`rLNAIA0SRM)vcFw*VB~>z`gn@tc9{F~w;(_KyyCQF#B`)oIM%jfy(R%#8 z+{JIWMTB7dCPU}5-H*SlP8W(hU)G4th*1|tuh2)C!cR~9p$x`jcb9Ult4?38`f5P# z>*Inn7MhP(I!vQ*pVF4xHSkJm4&Y0??QsZAMk&A7vbc5b<={e_N-+89^f4U;5h(^% zqnd`++>|$cS#abz{6-Of6RLDSk)X>1`r0ZigX$gq=3hpH=Q-SDyQ1TlkbKxNA1D`f7gIke1+9s0lsB zu_;cUO=V_&%{_y~Mt<5*-L-d6%Yi8t0Fz7ixo*0#3C7TgUDfHaYg1ArZ>ll(v0h<= zvYm++25pbLG&bFMP?9pf^@7#MTCOpSMn!$TJEN~36p-j^dkLE>yNZs0MKjffXFU3D z{s){#t|3D`qr6SPGKvHijWmPP0CfF7^wmo+V$h)kEc06w2+Eq?g_e*hI8rxh9=;M= z9U~Jx(U-g3av${Euyf&urh887gLH}1{*yI?dSpp*Q2F6(bK?mQdz7<=l9$HoJKA3? z-uj_a&Z2GCv}j4>J4}D<%|GWQ@%mLG{?$d%6r>(aQP-&oH+hq*@MJRJ4o zsAbmSnxBDnj*Py;(_ImBUo6pZ6@G3R&0u|Hr>15${zFae>a|1iWp^1%&vwVwQ^>bQ z+zX8u!>f;yr{Qmsyug7^J1ZE)0P5v64qLCNlsx?d66oj}a^3iun;&INrEQ->Q2lYQ z@(=zuokO_76`Kig5IEq@QKfnbqu1}9klMG_2^}@_ z#KG75oko|@-doqAgzTe7a>wr>FJ(s8YRZZ)jR=$ru5yBwRafJa>O(dTHSU%=pRA$0k5AY(wdADmUs20FQ*pTxMRj2x^->dtPZ2V{vNv@;cNp zowYI&BVhi<@=E{X8L0!4AcTL&q@SWbRO7apH(qZ`p!>x{&!(O}n?pig6m;Km*uQF9 z+X`|hq3r{$`q;ssr@0b3Q4r2xen%w*3Hxvpy%OrKc61Xu{6+N3we!=dsdI1AX3pNT zbSUoi(g1yrC{QDHGH-B#J!bZPyprx@5~Di78JK-q={(ik_F)95y}NzzO^4HW^~_6N z8tR^pQ?)Mm*h3Yc?E@9mI8O3arzSrS+k0xLTcjrXw54w7i@RU`a?!DaR;hio0v+c# zRDP(eXG|b_2a0j6q#NTJ<#+Jj^S#rgO$XJu?E@09FYRho1{CvAuaBfGJz*-TopK)U zFkelh>uGRFTimMy(ZbB zKHtve4{dXj=5B9WUqVF+6aeyX1UQO=B6#3dsd;zK3{*lsQ`Ug()ww*Sx^xF#$?|@w zmK1rM!6S{WsK}Tq&Svfr{D}!7`wt_F+tp!-YUt)=;l#gO8`Pja0zlDxKh{#_(%V-A3hj zhkhq(@s4ie1af1>f^MUj7v6Cjnl&?ypFh0ygBpI1!_I2xSj)zSx`E0$(RI)`^4k$+ zdn>0CS$3RL{`JBYDWX%R;m$|8(5(Rw%U?2y;PA&L<{7PVMd!R~!i0F0M_S}WnH+I<3* zR935>QtE6}HL&CB?`D>~nTxT)YC18|o;FDvo-)2t73-5TFfWwzaS{}ee1m#7KmmLv zk+78MGx@_Swa*9Oi=1a&3+t#r2Tk0%R3x=v`LXen3ZyTCb=vzZ&97>3^) z=`d%Tu%`23?fo=G!WZb+#hEhxZo{zf48?qD0yc5S1Xx$^<&LG9>7gE#emx6NA+pB& z2=yNfSg+mz#aIAK#(2E06BagkO3gafVBO_inz^!8xk9MtVm+zoXpFS>*OrNK{-mf! z!vEf`t#<9%nUlIR2&a{~;f5;Gf#uq+f)gv3EuMFrx^Jkn`Kgj6vxNJM60tKoIN#$X z7PI+0J!mMt*|v)`s(@pNv_Xq_>G0HS(fV9NRWUey^3YYhS$F;Fe~EwZw6TI$zsqyA)yBm$%izg}E_gJx?q>Jnk#8XqPQ;03Jft;@mex z(LxL-zTgHdLb+eilzrzF!V^wZ4-S)y18p2UC)7kW`uN7<)ge|&sOmba#2+#qEWPB# z;X;jV2-SUQ3|AR3bS;iecaUM4mV{&tgS!?4;KXI^hw~+S!_V^5JFB_=6k2 z(rE0>D1yQiur8aQXmf&j!AhZ4z{INCQx_;=ms~1`TNOF5GM(O-B`WC~FThz;>iUel>3d6&n}m{HLvQFBAI!n4)GPhPFwo9YhMC{yS7^RT z-&}EOUJOlc13JMFt5XOHxkVQSs$6mXRBp6}oiWXMZ3{4rJ~~;UPsdPlg|P9NQ6UDt z#U8Uvug!aIx5Pr1(z=Ynf1&HC_mhh2YVNW9a!tmpRT*=L5QG7G^D)AHCgGLzUx6nMyor8+AF1dDtlM{u!QPpHC>jK1TGqAS<#GrL;n;Ly`5^UBE$FdON|)-K1;lLq*H@-Vs1sKZIi5A&b(@KX4|I1f%g--OLtt)UZ$xkwW7!lhmM}1M{W(L zJJnga9j14?rHZd5;;|I$f%#kY!Iq5FQuq)%UwZG>Fy8uLvCKWc{JL94mCVhom(|O- zQ8m?gCm7npH*YJX8_IQ&x*l@HuRKmFjXTP-0!vtdg%l7s@g+`z)*aU=RcjqoIGM`o zt@hxKVHLaIishKDvaAbiTo>Ey8{sd|@(r?~NW5aw|*u62B`B^Phv8ecdJ_b#uI zu@OqGO|_mfyEoSw5%B-md+&cP-}iq!QbuHEXOj>j*?Vs(JFAS)pvVqo?@bc2vqyv; zB-t{OP2_=$%xt31aXq}A@Avcd{Rh6c+voQAp>DU!^|;RKSm$vb$9bK{{a8_8+;sAa zcbK0}y?!)rgnk2uw~QaqoY&&LC?@E**obdS(w(KI*K&p2%>As)*{D0Y{AWp<#w7%) z%n0cT<(z92x<8-BD@J zuNwL51=MmaIFp{~CbrRYVtHw;-Bqi5yFh7vK7tpoW`nIvdK%gsql+Ysl!LT zwV|4A>zx`-lv|QpTLTmhDFx*vQ*gYGLTQE23HFI-?EB3G7nchMs?u_Zwn~1~I?QW0 z+ZHG79*?iTby>j#PA0uCreMD^a|p-YB$bL5<{y7Dk`NXRZ?O*8XHy*{L&Jf1(?iwe z#rIu_8XSHd;-5aGMLNOJ+adKN-iY+R8VHe4WL$ecGAdI=*%>@sUrf)iNshMvvx<5!yV} zc~*3PR)!Z9`=mSB_w_enilnBb2-C%>V4S34bUrJ}&9z)os<1KJ!O7ez^kL40ON*7) zX2Of(3f5|rUh>5#Oi;wVC#jdy`KDhqsai;gSio|aXQW5=p#X1R`tIVrE}t|Z8(3Sf z3?)vp+2$Ii-uUK|gkKuZ`*M4N=jd*T)`kk7H!sTZ{7Wn@6W8<-?aKyFF0qJ`D7LYf z#`SZ|yV&1!n_jQniprOfI((gaU2L)f>;7%>{ISo1`Nwz;%{)F)2M_WrCAwJ7=t%?- zakx{wX=kl6A^SpTXcuL4!$QA2)qnkte%*-ztr_1pxWqZcv)^R)ywug>#^^*H;y-t2 z5o*n_`^QRt=A93jgUC_*J{$maUU6;@xuEWcD|*y1J$I37?6z8sEZbGrj6nQX1+F56FTTg{u2g_B9wv`8LYta?=7VK7Ew=CW5A z9_wu%pMK=ilCM#YdMm9oOS-{nLV4IGeSB3*U!Xeop2E_(E)F?2rI(JXO_Z{_RbO6P zhX$YGXz1q(wcNR->_$ZQt6){MXGHAn!Fg`aWAQjV1)h&vIedCbv9aIDFZUd#Rg~M` zKBUbnvh$*ueH8KJqmkJAD}UV+mo$vv(5Xoge981*S9>jb`1<$uPxpjx>gnRAJ80A$ zCc8+=X9MP*xjWk$QZ`haFV>t+xW1U~*o4omi0+p$yFM_>aTCzFZ;#j55-QQzsA=1zlzv zNVkYKZm@QS_-b@qe|+dCsoqs4czy?6=AS^cgU7(4zQ4RLGC%ylXA(D)K!HR=_c2Lg zZ~1)qk&S!*OWCy%&NW4kQX?s2%0V?PW^xVd^kiybwBGQuK=_>DSq(8~ZT{ul)?S)R z8?WhW-@Pwd@_uXFAgMNGD3M1rtrsL6=`{L#vOp~Sj;s4@;M^OHXC$w!qiinxWm^bN z%eg8Xa`oWMj&#K`3YgGGiMPz^>K*lPx=;@VF1y~byS6KrEcodYLm67P!>M&hbM%V7$7@;im9dYe^Jkmh_V86Gr6+Z5j zwTua*s`3cY=iKFea`tz<;^%c|Rks^Mv(DLbfg(vRLpqiIA^9|dua>*oiO(uO>%~=l z#>f`0WEaNrOZ8mogevV&&mSr7roxgVEn~MFo(Gq+=+8Tek=){Ou}rtUQA1pzm?%MN z*^@Pzm1h3_=<<`!=g9(TKbIMh5+rL4lZUl)eLU`2C@MQ`8dajei!Iiz`<|jpuBfFk zZTMcg^zG)9gho71?z&nR0nK0C*|+EaTDtIzb#p#7SpK&8rB1J@@IFhqJ=kVqSH(f~ z6;vn<&E{kNm+fnabA2RWB))_(PtV=WsX~E3{qJ|L1`Ov{i)bC$&P`PRnKyd-h%2wD z&@!ybPUOL*VHHxo9nSJ*=3cd=0#goDzivVS`wPAHF$w~SyQ#CZZ#C?UnEysO-WiIW zQ?;_XbJJZ-i$8yK^NXXxQ80fQF2Q`FKQ{ex7H7}1?v(XTTjx!0*Ux4lmgj9ox4(tE z4OUI55|m1O;1rx3q44xGL^r3=lHH1ycu{Yx*A-Tad$`n&6BO4ukJm2N!4O~3O!m3Q z_V3v^vUS?Lqp(!N3UIbwm!3tQnkTeP@qS}{dGeP?*O*wEr5+!=k$p*~e9`2R-jpQ@3@yX< z`MnA6uy2uWgLZ9BhR*vFu(0wDR~lXqF!(&Q6gnLbUjQ8jRCA20A}MX={4Do2pe~fiC_6%R6)!lYE>mq5zWb#6UxzLc;gl!opV1^q>SqMe*Q~fiTa~E zurAs?avHt)7)Rw8vntr_*=?wJ|r4^k!T1rE}2lxF+y@d|g`60CjbcN%yS*E>JXBq($JTG#gU zFtj(ezJzMCx;^*Z3LWmWW-EPYG&hobKQza}F3P~eykIYGz<+DaU2I>%s9nT!A#n6t znQPv%q~;vovuc*tCi=%)$;K!iEi2V4WJII(^k8yf;P9hQB>vd*4V5OJF4H+Ir!&Qx>vd{ zsBAxzu6@~wo#Q4d65+7kiRr(Vw|0fpso3KQh;nZ`fQz!3e-yet6S=S=T zJJ>7v`_I&%Dp*WrU(OYbT~=%jHPUOl00mpN$ypQE(2DiS-S#SCYqO5$@1mes-t|>o z^xHVAs~UvABd$i){fW!x?6OY|li0x*czfMm|FZ;b(j-;v1cl(P{)6~ zn75XHyX#m|x0RlWI#JNd$3iZj8hhyL7KXe zcg(Gi&}AW#aSq|%nkv0Zz%v}W7`fdLx*nBl?Ue2MQ@%NvE%DpNUqUH*_xZ>&7FNp3 zPe->@IW8r#>lmEnuL$0tQ}O)cck< zk6sq6tcA@SH!hbp{M=Q%7%xewkHm#OgYPw|^QogdjFW61&^`U=6q}I!Bnweyw^v+nMRXwJ#`75!n*y|} zQ=n+9%gXyshSuUoM*IEC*VNrrPHT}7OkP@z$l4zpZn(}HGt*wne$_9leXX`b9 z-rE#{Uvr*v?MS+;6v2Z)Eq4uO&JpHnd)sm?2%mN+Eni|ql zi92d%btW(GBtF*R!?}EEj-WOre%{js}Xv9iH*}N7AXwmm&c2? z4thZ&)zGGprxBzb)SY%&V1wx@kjC`xkE0qP+`T}Oa^{`L_eKi!}V5h zd;0TBLJ<-1fuwwP(k~SU{k$S0`)Pp_QvE{Nzm%hOh0h8dXZt!%ev#z%$y z6kOTIP5tI6mk#?Bv3FCOWWBX79@6~eG*CnpbQ-uw9g#4;%UM%(X=)gmX^rJf^~j91 z3)w0Q2`va7F2hqbGKq{UJ0t$-tF4`jlWx&@pHBJNU_8;gx0clVKgf;85e8=S(Qmck zIV+e??q+XZW{lGc&YXzc++g0YWxU8ZXPs)rNAdO8Yt68HP1{<1jd0HIG98QjxiBYZiHN=)DJj+}% zolcRxJ1DXv)Ok3A8~>v`&)C zD&f!I>o!)&bFFV|8b@IJHA_B~g%#iO;+w`Q5wc(uvWFseUW6hFw#sNBdFW5{XVII| zq8Zp~`J(MS#^c&#$9fsNGFK#VU*k{NLEfh{zldKqO@8=SX8T*eT6~d^D!%+|25QC0 zaFb*OuavjPMewUc(@!HGi%%1;$kdLM%DnC(tK|1Bzs5U?Ypo>ngQAJcH8)T)U4WU% zFfQt~Wx7Z&QmS;fIq@$2bj$wC*jibM#lV5SXkCkj?`0zKZPv_|XXJ^Gb?3ck$hA0M zO^1+0|8^ho6j|LL@SNDu=Gk*Ttwryx=Apbm7aUC~O=i*}tXQxR^cy;z}Urt7VKX+p39u%9hugy4m(@iJ-LLclqvJ_{PPx@~Sq_xFtzn(= zH6fqfIyO|9d*T;?4uz@b-M2cVmLZjvi@9BE;)o-f1t17?Nm;Kh76_R-?DQ1TZgoYZaLP= z%lKACfClArX!*JC_DSuV4pRGu0Zn_gH)rR_qE<9eB*pc z(pFb{Nth+NVY|A6;}s9jMF(ejzBG62Frn&P94fp8OUCxJ3+;Swx)qH0$2_S5o?9s85(Js6zPNaiqvJA%hh0HKCnFvb^ z&yAf0VZRnylTrD$V|)GNvZhR)$D$5b8uXq=c z!tlXiyG)`R&)M?6H6Oh?lTn&jXVB=YK{qjjybg}o)Eou2Z|;v>)B2HW(!sv7m>SBe zXYNing@0{zH6#rjyr_SJ!`4UO7atH_FSPuT+9k@6n|PN4-g+6UZD@yx_3GT36x1?e z>N>shFg2f5&qOO9-_N+jd`A6_VMV;|)Az^xkC2*I|5Ny^?b&Jr+}69P?JrOOg2+z> zqr~t>xRBvDk9cYcaFbzeZ~q6*ZsY&%&&dCeyZ^zi{QnL4@Z(n{x_~aej}o_`D%P?i zmYrzrzp9JLlFhOAy7zjKe{tS<_`(tNi52{#D&)OBps;N^bK^}irM$&&f7?Os|k zk~{Rp0>tOCXim%W%r2gB2#+DGi4oaU1{3V|5su76IrH}l_gI{Ik+o2e**yUs_}+Q{sofzh0QPi zXnaKkh!FlEu@q3?GmynFcp!)Zhxbnjxi3zg7L)aHZYoCxAka3OJ+Y38d9(fx&JgBp zeN@;*ICn#sgSbB(e;72i7h(RT%@{0LB*xr0ssArX(n=g%N_l-B(%^M~4Eb{Q_mLUx zL^HqV-GA{DNvh+VARJ6uicf2QY-!D6&MThEbpO$n6L0_oSFkJ}=Xc|ahBeJsa??VV zn>?YV2!I=pd4pGz_V&)dxF<_y@RNuM%xw?*xaIDmoA_{s(XX z8M9BoFWxY6ysjmewuH1ak?2w1%HynCzM7<{s93oDA5q+Ued-1;StqI04}Q;Si)Mn- zAx+11qOh}}r|vE?-{2?yUpBDtonOnml>W8r5cEE!$w4G*sMkXO)C+d(L>oL^ocpgH zRBvQ;6_f7<{)=2qyzF8@098b5{xR633Rlvf7NX4yN!7Dyl)xixJup--Jn%X0Ccdhj zO@pi&|ENa>@7!|&#sq2P`~7SaS<@fKE)B^iH{1qihL-d?Y7lfcYW$g~p#OaW6e`y^ zXw+>2W|3<7%T-04l=Ke0YI2TsNU?Mu-M^4px6)*cO%wEubN;$swiM5WuL(g}t{5Bz zzev)W(e8(o>dt`M__WB1QW>M>0|WrCP5*rl!k32(T)a@K4n;0(08`c>khUumGkoI$ z%;v1h-x?T^B})Lr*DTE;4(7@b2cSk2eOFmA6FegpFcxPJ9WnXUvSqBNblFttVg^iv z0c~FOJU>BvV!|fiTam{QQKta!%^51=!3ejH7T{YPIO}6D0(ghegst%C1QYo?D}ZE~ zy)tA$UhK^Q7f*hfxFQ!%5Y))F2!}yL&R>{-KEmrMiWrzbziFWTGmU#7W$_67SO(5#m&-cHP z3o&K5sO2b^Iidape5U?HeeV?E9;NCUD`qyl2I#t_TTcwB5rXS4!9_hj9)fDu=tZXE z)@}R2m>aoMsRpbRUa_sk$kPINNjl4d8Nd`7K-a;L68Ra~JwRY86pEiz!3fb9@Wbq} zGT)y_ew9?m&81Y62N9y56uFMucI|fY2joH&q?R(%T4j%rYZDFZpQY<&InjwDC~`Vn z;zTRCz?YEZkd!KB_*^g)A;l3XHOy5Y&`YE^i;rQMKMdqsIbxq^gs}1lVdc@du^@&? zL`5!}Cho|^Yeads;|!TjsDA;}6NOHmylf0Ovk%MOJ+W0Sg!%$eo^XWVNrd`I92{i+ zSV)0RepZ?mBM{PlMu<)Z6Duc9$N*l{PoE6Hiwy8qh$jqw23R3VpaojBM<*ztR{;K~ z>eN>gM1;{YyLg;}o?7xVxi8;}U2-ugS zQrd}8egnQz7#z}1hQEo}Jj;DXc8td4VZ=a|-zO6#SrO!GbYG|oabyR&C!w}L|=_Xh)(Rgj2>bnPYt}}_?--pgs?wAo2Ups18ha$#GWWe7~?|y zDB+&5@50GCnc)coes>^ON4U)Bg$wV8-oFrxYKusOks7Ds6ohsfg#AxSmF*`wNd@~7 zVeLIJ%0-}EGpb7aWcbTKlqPBQiNE%H10%|lxLCq; z$x?*L?|<~okc)YQ{b4rBlNm6E%zzh}yC*NJhI?Zyi{6;IoN*Jzcx`|F95N9zcHyF4 z7z+y#L9#aJWdFW_1EMbreHh)QYL4eUh73aBrC;V`0CjlA)Sjs8WFi(LS_tf?!>r6Q z=iuJ|;D+}}!how)A?yg3eYS6{jSc@f3jjBy;JX?EpvK<@&per^(bq)uLx||dBFo6? zL_bg} z7U=VYi^*5Mrx0;`xdi(0sxGWTc;qjHhbqS&Cz>ECiNZ%Z6O``&Fz(xI{?K)_)Qi=x zcboN*DFp@w{=l9l|9u=h$KTB#n z3XDJ&gkVJ|eKpmI;mi~}I5$T%34=5bmC9g`0apR zRq~zV6Fa<$jmL*Wt$f3GuBni-MuhQ`^*e_igOa z9kP)g@-PR_;91(&l9vyjp2Ju>2Vx=>qdf00Cc=j8KHzYROX-6W`<(lU)F^>i6Sn#& z%zYjLzl!|fvggz*v`Xz$CvSTIlvRg#NMm?Q7V~q;G*kMJJ1HThgOQOO9syrB;+w!d zr!|V);L9=ZS;(LKzwoMhDOOV;nyKb$zd7Vx3QJinz*5WtMt`PBxx z?im!e*CQSV3qKswf`Lan0VDf|zOneIY2CHfz(eHd9S1Z zP9Ya!GcQ2J7w=Pqo!HDae!OCR_|w7p#e@@<&jZWi^z|p+*4bai2zjwGZQC70rVCf$ zZWP^A0Y-ZUAh51`!>HOd-~1Zk=g%bs#*U; z+L(cXO2hl0m6nHC>Bt;GWQ|A(EL{>TeL}eWLj1|~e{TKvoB!w56K$BeyPEgUU=;n& zD1I$q^~TGT>-#6tBJe|!w`jTJQ0|gopUuDYmtX%ufNTK$=8ng{{yk4}3wx(X%c<#!Zs2QK`l z9Yrz)h$6e)!yH2$gw~l3kXCLq%;1&WkqyNz>Zms^%@)SU>AwTNpHZi76d?JTez>GD zKp4x-sIfF^!Bh`8eC)IPOsI8<2&07ze;}w5BY3|aa;DCN9ed^)60-*kaU?De5_?a2 zg)}ZB%kncioO8C;9~h+AGOzEAdz(M_w*3qvZgAmePlk`f z3C(n%#pkCme|_!DIgG#O1?T!Rlcv}olXzdv>OPv9jOTGO= zZf;~A9O6*kbY>R|EnOWCVMkcycR=iS=uRRs=^$iSc)p_7EpB8rv#7@Vf_Z78rjaos6H|zamh8I>c`*XgvPe{bM|&c^E+H>U z#s(!)FdgDylxX;}ktY-=(m%YKC^E`i79I`tM2P`X_{!4pL)Zjgc9Z6w^fh6w>A@wi&Ys$&!sgO)npaVb+ZdaU2TM!3S^lmLgvplOfU%Qw8Hl z-tw+SY)_OaV}W)O^4$M~-;F55%stPUU{(ooHn@AS1nuCgpN1HZ1p`R*XSrT+9P;dC z5*_CVVxtWqDZx_UoQJ2Wb{^xcZ2$w`A=K^Of=~Ekvj`e()@%afbks6E2*ByN3}O zd-myXU6Hxarw9C&REN)DA|GZLxkD%r?eO@1Jtl+tcTDbgo=nGCv*8AXd7SMsCf(w> z4u*N>y>BlP9ranl!;3yP;y5;phsZ||_+NHz&HAaW<;-!aoxf<%m zuVW0qQ3USJ^!>h#nTaa!?2(e5m&?Zp9%Q%1&l2u_#@qiVD_cl;-Kyu+tz}}Qi~Qai z?v4c@ze@vIb(?+WZm=os<|#-10{L>z6@+&1&5Faz*Xz&@Pse*=L8C`V!sU8s;t7-o zfyGr$Er4Y|l6OFC88Y8Op9c>i=<#C!KD)JNxEp@vT`3L$i4y~x&)!v5A&MlSFx|Ap z2`$}sxaj~s@CzTP=1+C85E8YOfe3^?wr1@8CmC+^f>5~Iz|$DB$0f|1v5?}ErQ_A@ zLuVonhPs19OCO1f{SdO%K&!{pLFhv12M;uXh*2b0=zdx`fY=tKl+Hwo*IX}gHvBA6 zEr#5OUl~O3WFT1|Nls)C;g(+EY&aO-8G`Q=3t%q>@5kHvktt05$~V*DzSR>~KeWSY z$^ZfU4OBHPgd1`10^<9r7yHkK;!v26Vf_3`=Z3NX*`dhH&l*uTMG>SY}*@>GZ(4 zzFucza6I=t8s3cy-cPI#j}r6S8qnqp*`^n(`535y$*%?6yz%X0DNL?90yponZa&Ax zVl_{}Fu4f+Lo>Zr8_}l@h^O$n7~d@~yVHoMDHCA2+}JpDM2OA;ONi*{6jWxln02_O zm4>W~|7G;#9H3MF9}H7w8~^dP(CrGckkIiU3|HwOdoLTas|;8z(637ZlLcG^YZ4nk zA!%u2JRWCVF3wmRPD_#!=t9M!0^}Pv_v24n2S2Z*w*VEB|5h=|U^ z)}%}RRSz?Y7K2`*#wC~^#mJ%?Ie_uc`N}e1^`c%GXh!<}@^_l0kuzHTS#cQ+hGpgU4l50`T!ntmW;iA zN`$U!NOIlUTljCbsY2k@F^z|YAeMi{B1lwWibNH|?Mc@WnPt#`9SJ@&Lv~dy?!z=H zk8zi6!=v6`GJIi34}aqTQ?bc>$&-EJ{!KCNeECpfgM`f zah=f1b;^5py{gpv3JSqSNsImmwqD zih!+EP;5I0nl##Cx1b5=H z^g;2UZOc^9Yw=NCPuRK32#28WLv&**iACdg0QYMG6jN#>I4pw>H@AkC5&Tm6)Y687 zCCARodHx7a))TomuLeQ7m8Lv;uqy$p6VK!#)G8^9tT&$ijDVIRr4g;1Ln#g=?s2%s zfCEYFxmNGG2heH^lSt#6r21uR%RxPZ*S9~RU~S?@)}E1elnnNFwrGE<9J=aeg=wMs z>$jYd-3sH|h^RJtljs|up#mNPd)I{Y0t>*MbeoBohO=+BBR+qRa6>CQ68gl!#5NFJmt&X70(&4YleuFCK*SAeydzU;kk5 z2d*?V!_k8B6wdP|=$i6oAw}YDdY>l3_`7pQVyc3XVoUj)zL*|2NJZamfZLn~a5g8A z{n@zBo|BttocX%fmZ9P!0$&sZFggyPBbJ-p!ciTfP!qbQDBCi5HIaXEQOh+npaE)* zX8uC)VyT(PU#PZo!8A_76p=!yTg&`x<|Bcs?UMi9n;DeHR&}dK(A|)7%$bAZmG@Ip zJ&z}$ZtGkoU-S~J zTN`lheSj296Q%K*=qD=|-uezsS&sEk_KFPpOSo5S?j984R^ zZNq_z?7I7w^XqUW&_D-ZfJ%Yk_J5(h$7n&utN$<0bBXw8n2x4e%o)6RqRx_bF zZmO?w`RyYsa_`eBeMz?Hb9^?EG#ah|yt|GRd?2k;@St{o1v`Ojdi!Y2N7`%?>L~9oyK6~$sUV1hh)=^P0qxs0=X80ZEpTTMD<*l(U94m$e zX)O1))t4cbg^*w!Tr2-2oY1CU0Ngp)L`Ff+IJB2%&1&5gM*K*?v(w_RRk-1*(wES; z!5^U?K{L?=kGK~=;hLxPyJP4YmQH?Os2H0 z3wBdzGqoMDK{=b9&}TY;GNGzoZz44rGxxZ}5puBo;lxiP zq%`O=rOExPe=U^r-())AsHF8fz!ZG?iqTJDO6yh_o7!Sl$KvJvwJZrye7+t{Ba)l? zpZ^>vOZ|?ydUrL^%CM$?&tXu|b) zgqTTjUsXZ3Gx16PFG#na8uRd+@a*e0l<(GpG7HxzEsm7gGxlf`2G%zN@k>#qT3vWS zQ`@<=>opWcl}}qoAeu0}wRWNBss7l^tF*M$ zPr~)oR3jg|-hYyU5~v^p6#Bx(i;kAhA@6@an7?>{eEM12dB5{S!6JG+GlfU=Htb>k zIX?Gd9;pCL@E=BcVC-E2`kzx5oS#kKT4bCy*^rL0s~2Y<#Tes-I<3Si!vd`Tf7JaNn18 z+xYve&7sWC+0=RZnm2?HA?g7=pqT|jhzN|Qr-^;s{zNY!fQhfzG~XWNj?;hSI;ThH zVt&`a=!?hxbirPMt!WlPDO;{eq|={`wzlwh%Y$+{n1aAF@>d5XjEMFpX&J3JOeK%n zk(fc6^?5L>T%wO*tr-zY4f_Z@*^=!ClV752unV0zX4k=d+Wv+c=>>79L7!nLyGGaP z`>J8P>Ha14nm6B~38<*E`kT+w8 zldc<4&+O=2$sGp&By?dHrOhZrd({vSk8vy=v6ZRdKz#*8)K-({O4YEMI??7GR)Ia4 z-!!(Vq(~XB$hhi~n~n0J$oQzsS4Er7Bj^!zecAz<6^>{HmpGSc1kWjW6$asrAYZ<# zVKv;VY*L@DQZXG;ixT=h7@%K92$5GdypBV2-Ul~I2j1d=&K3g< ze4A{cZlRMrlOFn%P5*d7%us`~RZv($fbVuCSyK=qDjV& z+pV(i!Uw)tsc}EM_v(mw`O>tBw4Jo-t9fF9cTQa!xnB`Au3xvv)( z)N>U9;P}Sha^n6+rzMWDi zq)qN^>{GjckDtF19>!bm#pVFV^C^VTjk`-Ew=gBf<_cX9#$I>i zV^e<;C4vr4P846sDZDAQ^lGhRwn3HW?^d34#LGD-1}@^v-d7Q0Nyg*M++*0SQMUW} zb!!xFEQXC|qS67fWQQwv+!5P?(Ej8@mR~J1|GCU_BiXK#Psbl1O9ko0NA&AXQC!mr z4fFC!%}6W0iOsa#+ciRWd?l^fwwP$E0WQ34s$U2%5rHhfx_u2dL^7D-A~Z6*@g*kp zjI6-w{);93JQU(Ut{TOv`iGp}a1Jw>U`h2k-oG*7NdHgiE7B|woAgTOr#9RD5s~{N zL$Y(;xNwrr) z_*c=e2X?2>kON}>%iuk3J~xA1w=Wm`j7Uvoq+x#F8Ner9|2ryv?w)$+uV2HAu9ryg zQhwkwFnyBzWcpkjCy1Dszee6M>C<<7DSkJ+5@r%SUi^o$WDVQgj~TW->eg0P$9E?a zO&z*ZUhdWvE-l|Qb$R;wW**)O9U!3#{$h!Avm7C>IQx>-s14DST)kV}dQEiMi!vdC zUimte$#Ot4Iq!k1P005A@cGX8BWZ1>3aNCq+mpxDtY%wO^fkLZ1pWkkOw#kbAD|uO zBY!~`4yqcNhfFWs*8xgxK0LaW-;#j&#Xv3e@H@|9_J*Ozo7I3ah4X;xGDf#gz0lMzZ4BmG`_F>}yw4L51U?1IBIB zy}jI6EZtSve{I@XhkEjsd|U~pqH0+MDE}7)o-i{cMxXn#2P~1EZFesN9FB=NL2>e- zg1=5jGxuq1Ytg|xE=GpTRBuf3nWcRoVE(f&E)kii5Rm?)m00eJ9*JJDg!^t7?>s(Sd4H($IaAW?M(qZtH@N0i_^`0i z!LeF1bDmKb*9kv_N|i~4N7loWU>>zz;P_~SOr2-C_jv$Ssg1W(u~h%g`->ZO0RAP} zK@b*m5s%w;1Wz)Yvgd^)yMT@{?pdiNI!A%zs4Zzzr=@%LroR$| zP~mIAlymjLdbluJu|XE-ncMO%y;rfr;6g+9GjvWhLOvgmKQ|cR!2#qGljhSdVy7rG zdJhysrZHK@cMwkX=6*DxZl5`XoU72^oVxF9T0FSifqFnwco9rR?AVIOcg~DffF&>> zS`?)MeGE4nqbjA+ud)5!5+>fLIQ-pD*uDPzchS)dY_2zv_PeKAormqWMw?S6G{ zI?$hnjHRQtlI~&6r}T40y7(Q#03ouhniEPwf-RR}j~DEwr+F8U1xc_2SY>E;nB@ca zJXER6NA84Om{bzxdPnFI?RU+H=<0El*b*lMG`+O|bkpQ{BUX+mh?sHp`q({I?Had- zusH27Ff@&=1XBUISh9&>3e_n-N3R{1^U58*Ew>F`+nLOq3mX+>a+7{|0G{Pvzvy_Rm+3>Y!SqF-{$2$X)zSmuG{e z$PrS&U02tb*RzhT2+$gPgd`hCZOLuSPZP+>1 zaJmflLtEF!Z%+4YD4w%S2Oymx==QUtA#z3M*B;S%H!>P_RSCLD)+*?!bU<=F zR;t+@Rirf;#!5AT)nddeaajqq-sjjhSZ4XztGXUAdmi<<1qb$!E+x+G>$}F!E%LqY zl1DB78Nk1k`lCl=hEQ84%WVqkD~>alAc3FC8^pb|xf&$+apTahlgR(hTIz7oy+r z3g#}o0xPMPm1!;%7Ad>{4nr;Yl=%E|hJpJ${pmjd+`lo&MkX13d@)KX*(0?~katV+ zU?J-$pf>fRF|2p>O6cIE0RG3Ke6!fBNg8hPWxU6X9w;VU5>g3nlDM~dX=5Md88<2( zfE-sZN$-cS+MQ8)GBh6-s55!|li6$wOYW|!Fr>Z~xG@)DHKyaqSCd>vBj?jTyZCTJ$CGBsR4 zb&j}1jU}~Urug_A7PAf^OxBhk`NsafB>B|X(7tdxX4Ys9sF8Q0CNUb9G>XE@qpzKvw|Dyl4IiN|R?@v6I#E_0`U$>z9>Ni1UB zwwJ@h^CM$oLhE6(=bwXlyhVvcbHBdDqYoDjnBhX+lz4i%3{jXw zLdwIjx8S+I%sR@ClH$2>GVy*Sunt5EhTE0U<+D5=Q+S&>G+@izcN<3ml~U-b%8NrA zB9q)f^7)n`-T*#U>opo4)j&W$jM`h*l7lbT8_lx#21_hY7PbreFM7FrGJlMR#T-op z9_7B>D{nB5io`hth zAW4cn0@Uz;r0m*&o)$A7`#C>0oOL&+2aU407iWJ0;^}*6n_UCQl)O6+-`8d2KdL+7 z?HG6)G;fR50p3nwt!6j<(#yLas9Q#IPXL=z>WgBhTm64DtOyz|O1bgzEVD%i2~HS^ zN|;Rl{uMg)Z0xAq>)Iyo)5yWcNZ>j4gss<6a#+<+0!t`1dDR1UG)i0y<+%F_5ox0e}7r`|^%B+Kjt(JTfFT5bT7iicv z!jiN%YeqtmmpvfWB|_=?qtn=SJumS1Kp_P$e20RT7Stp)l|U3US@Q6oM+qw4*pR!l zsUPpu#+@Mg-Xz`5B&91$!6fYgzOUEp6Tb2rw0NlEZBYYr45ugaegZc*sFZFb%(;%* za(2L3vMMqK&-4)DZ&&F2l>j}%XC{*GMjFv>e}ur`A*4=EbzRlc2u0{F+;b9|^iNC-UYDXa2UM#nNVbbojSqaw;U1>+lsCJ zy{p&EE&bG^G6<0p$#UQ|J*q6t=E6Uxha`N?@sz;64y|^aI0b zjYUdgh~3=8*?vah*Y``vs-5ILw8S(ZVn!n4!_ zbSs3m=c58zYc0O1gH;m-N$BhSPEbHC&!=u0*}Sx#fDUhKy?;=EmiMrOJ;@oc$ns3? zgs@KljF`DA$nwKYEzy?m?(?r7=DxIf3~k2Wj)SL|r~wo_Ukk%2mM|W)q%dH(CZ1MSSf8- z0v0Vz$8wg5aJE9$b`p{VZlB{{MU_2|0YB%XkMpN9RL-v+HPo6|23(j}$cmC`Qn~&| zOJo{nDk_8{r9FLn+&am00z%r`zXZ*mvg2e-0P@qr@)pW(w=dyvMS&{j^pe$1k(S=# zUwre76{f`_qJj5E{Ik)g5A+1lW2Ah9PT>d&*Vpc=ZSO z0-=9>$TklCZ1#SK4dbQ1I`=$Nomw}%{i)?Bw8a+Lgg+AshRgg3jSUKA^jeqI=1=$f zt*Tvf4{0^O`P-NHXdK!xC`a1@sM^+%HR=m&+Dh34Z(l?c=mUOf)Y>7WzY8TbXo4v2 z+2HF;HWuQr-76N%m~86!=}dzr{~7569GdSIFNh|9wHc1r&izk1Aoo8^LC0%3ofS6Ol&(M7?fjoq&cXJimtc5U+nX6B05#XO8jz0UMTo z8hirjtX$v>)cVmBSiB_u#Ru=co~@^noC(NtNcY~^VB?ApNn)~!t^{?DtpFxn`$o-$N%sniCt&{1 z1#0o-Dlkb1>azbHjC7MF;jNt|YZJC=saQrrtyS0}`OW-TAHHiitf~5iTl^?}*RS>> zxRv4VIkE8e&%uBZP(=p+i?FvCC&q^7)OXtZmq}?1|$!Q1rIk|J}tW=~TOi z4*L#9F!PyO9&HdW(H^uLt|K!mqTe?$ME!n+orTWdQ)i z<#AeUFMgdb+%|a#y;0}t`C%h5SDH@Ui_8PWy*E`*q0zW& z1Mt=07m_o&ekKwo^gk0w?FE=>W(@q#?;S7OO4CK-#@a}qo4P6|-oX|}DE2ua{`L$^ z&)u{(*nt_EAvJ*dPyV%yOv#>)Ms(3)?RM19+YezQ+h?NTaP^m4DK7(GOi90X`jQ0} zXz;?RJ7)^OvHQjjD9*e7GV&VjF4m<^b~dk3;!(>eGLt)x?|rTI(;0N=F#lrH@@B8}6xgg%6_eRl zBGRE#7a+`n)nF-qkkpzC7VtD~z8u7}8PZ@CC^dL!qGC~;b|D706F+8u`g#R@A&@Ya zCiy#)8MB$)fkkte!sT;U-t{r*(t1mOIfehJ$ewhupy21gngn9Mvc(>2ka23;Ml6>H z8?X*e3co2-K*kI_e{tzs{rtAu=x70dIDbTD*p}Ox~a{wX7cAlPx*W^|0JG4;- zZizijc;9|lFz_{ZSrPd~5slw}`N+p*ncN!Ga9Lh*uuotu@0WdG)%pc#v)ol;iXTV%(=<=Z`!1dCaB z-;=oVS%E?x!l=!7?1s%Q*j zh9`6@Aw0xwQH4fZ?YQPkD4Xz`DXXJGmJPDSg4tVdwWyF-1dB;OgMhS@fK%1<)?=aw zzhh>dr@sds&;cAx;(1M;C};sEq@7D?WJOw9CX$I=&JxBr>&HfFHN{$sql8O!+8oL3 zT`Br=o@i6%sGt?S*?N349CwlW?|D954r_@yaGLA>gaSCz)90_P?k&Z;#mKOJQNSyg z)9)XQ!%kaQS7&HmLz2~W>VOXeOgDDKd)%F#Iy}ecSbuwn0P=eNS-*}Jok_c{HW-|w zV(|B;Xw`%d5}K60Fx!G)hKjQ=B9b(^t?(|Mo-8h2{7bLx6t^QlVRXti4yu9Hee~y_ zH`cPQt~7=DCv~nitFdrRD4vo15l$9xSX{&Tl3#4{<-N=hi8nv!<7ikF$@#|SN=mN0 zpXh(D8mNdec8iB9DtzV6eQ9;n3$0|)|xcdmgYjoF&>tnfVyY2>8cFt1<9jqEfwmts|35=-(ls35Rk?6qK1w~ z6(vlALAi(r0VEst=VBN*PlL5s_sLZr>gXf7P<$#Wq3C9d=aS`U;{f}=TmCE(c>|sD z{;m#t>(h~2zX6an4+)$8c;A83yQoVnUmaNMWKj!;;NMnMsGGju&3+K{$MI*FpapkP z+4D=X`oBF;Zj6r)pe5U=U*VcjwEqH4Ex$M*_# zTtyNt^^Txnf}nTt7ed_zRIkJO5oC%*Asg@vRMMseqllx2l3n zv-*pyaJ*|FUhuFJj(({&i|OFEmmU*zFo?&CCm&5nHnntd>ci70_9h5$+59aYqg-UJ(59?aD0 zn8~@Sqllx>P>?UQUHb|z*x7~LbkOt?47iltZ3P+HNjen;E_s9+OQC@_%N~F1ELIFJ z5h-&t1BCuthI;#|ush~ZD(g+PVIN)Lz*Nv~KorkODu(dEy0`WCG-L|)f&KqZ&8};m ziYaYpN-1mPdk+ zSD0_RfxAmq+MK*hHaFu#4|O~RgwxjKe-p=vbsXf|o)jo&N{AOku%MWUx@M?$jC1?g%`D(GPjN2?Uy4+q|ra`oCjYCy7DHD5%lv}AgU zAl`zMn;*NvVVb zs{D`iII+m!z5RXp1Rxc?uki;)U=OH*{HkTpL1*O4$pyq~c%L%W+#v+ZOOq`Qe9Qvc zAQ!3Uz|m!aX-AeHDh-zBbmn@_A&Z&jSLY{l6#Moit6jXcDh8uM*~f!eK#|zJ@_IrR zvz4p;jcAW%-+OOFz|e_N-R_3-|HT`%vn&efNzfShv#7M1{Qiba!iOcYzY5L#bivZ zAVM0fZ!-a4S9;X(1uA&ikn3{`-AZ@{ldXTrL2Dk&HOoQu0GKzYC8}2fVGjovN;0oY zf;76R@&Eh1TJYY-*m=VOtRNX0fqEt!0>mZo{~zw&GODVs?-!O3HoXC9iA_sNxTU*8 zK&4?zcZk%cyBnlL8Wj~J6@-l-AR&?h(o#yNAOeDCF0T8!pZj{wdEQU&7-x+071o|> zt{MM1=l>UzThlx(*6{gQ=>Q%089fomC@jD1o!Uv-WHoPjZ(Ioj*&C>bk^k&rQgZn5 zI$W-$0L_g6lA$*Zl%ufJ zeNjTs0-nR+_-Txr_DY6VuAe9j3^nkv6#x9#_psPNDq@F-7nEyYzH>O)f&?e@1 zjqPIzUiwCU>;BfzppSm>pYcZ{lnH$67)~nhc%+))hbvFFk<`#QDF<1hi(JpYiXsO^ zNn$8Jrsi9t#iU^Sc4EK!&MJN6=uUCs1GkLd|N$3m*158suWnqaS9P-9CgfKjX}cl4Hyyb2l-&%mhWWxVCD z0jpO4BRP0UvFv5AM5aNyXY=(!@iA9H*h{sH7str|F6=HY z_}qBz7aeZ#Xm3~%Y6k>%6f~_wi9n=UUo$Ox=`V37B}$GSH1kN&-7|X-*s3~I)WGunHmKt5a`}7*XjwQjtOpxe?;<8x7z&C6n)Rn&vSJPY zR2%j|%g)D3Sxi*em!WBXDOUXD->U)@l;>>!*f-=&UPWxNHhd0EVA;*F2cFP?ZP`|C z@=tkdas}HKird5{tHI1ln5L8(6Yfjk-Pm$>sT5d1t?r4-^&Nw1{`KJ?Frh$TA|wX` zJhV*p1lr3%6%(ZaU9-J`DtmcK`|>j4f84w9WrX|EQm?`kU) zt%D${ZyvFBBDk1I$|XH!5Blb$;?fO4Tx<}|C74NB&GW6x=_BRxp#YbG%&`6Ey6FV1 zufb9AUf;TJxf6ypLE*@-pEOJJpVb3n`QBtE&qyYtNDBh@5j4ao>o*_&$F5d^K=%%K z;|Q-o8LzZ~|81Bk1P|CbnEkULAyCj(CYb9?Y|@}0XxSI37X;y6+7P8dUiI;%F81hE z(7HC46THR(cQvIDx?ozNE{7V0@qqYJ#O^&T4)(g(2;E@tHkHvdF!;JF{kd^JLUp36 zA3Bo(s|vIA>wo>MoCmhP@gpqQC zN|J6~wVBmvS@=UVa_KlR&hDK~0>vSyQAHxF{ zW`z4f34E$(EZ_^QQiFHSD!ef}1V({_06~Z#{49d- zLshQO`4U^>HcdX|-Uv4wyI-zI3wnyq`qpj2Fk$E+JbtfMum;R_8}a|E=UxyZbjfi) z%7IVd5jGmtM(9G3*r}ukX$7FW|Jd>uZp2@qszQzm#ADs`rgB|H z(DOq|xF_~;UAT~^Q1m{8;kzyP2K<364Og!frMXqcfg-hOxL)&P>0r%liD zeN2u3;lS$&qdpHVKt*GrxSJ^S-y41gCif1?J4F-IEfSD-5@8$Ii0@#2%y8zpEWR`Y z*HX;@Ca}KE;R{U|zz2sYpa4Mp&kc|q%qy`CFtX=+X`)&aSntigXs`~`4+Bk_1mWa# zFTk1UKbGG}IRq?{{rR088t;X*_EMGieNhdX$`_?OL#t2{I%JA+Z=G!0=?2&iqoD1u zLQxkia^)RdgcmD;LSRl7P%xC6oTTj7AHlgL!85jBbcN0+_jvGB$40dhTf9DgNqhPtTrHBf_mR->dNV0~Ep ztm_EuZub=BF2TaGU^z5IDb#cPgtAL1e(eAUdK4BybPLQpzy*dzYX-9E3k_JwEnhw& zf_5qZzXE$xh}r?#S{FKkYdlGQ<*m~B(s}5`A}e%!q|hGYfV%VIsfQ3`DePDp5Cl%` zOmAMil>m~m;t1=LN$#;hj z+!z&CRfa(Mq>%akEC=8HuZk-a5yQ%AbVlHU&Dqt&!q3T$L5qEi$(!S$4bw(Wc7ACD z&RZWSstr05;v80oinoFI!m|pqukl{`j`zPm4nz}TtU6D?lne<;3#hMWMcJ)Xs|P`F z(Kva_tRZNx6T(*do47f5SqvY7gb*4Rn_3m)!b&e}6C1Jvs6K}~FaEPqS}?gs)Zhvk zou=5`IEreqMXJ}HEasIZ5g*yBPAI@`B3nhp5pHo4R+X8V8XDD^@O5cHxc$SQ+@9qv zaEKlw|M&N?gkJ*F&CW+ttxaK(io;B5ux{T+KSV7K4YEA1(^wAtqZH)Za>{>6{~_(o zmF99cbd3(fy&(QTqeBz-(*_lxJ=66%*NMBNH?y{k{hHT>*g=A%xO+;C{Ludixz^k( zQSLQ})%MqbDf$YFL{-%Xr8V&K-~JYI3jnARPIt*NFsMqo)P|ROic0N-|7!noK5P$E zl}~%m^cZRJ`fqUjkZ5w!(y2qYEN?k|s!lEXyM>*?L2RLHSU)H;L z2*6lso^1HvPtiyG_0%Cf^~E!;W$H`NGsyKUmVZA3_QOy~hYa9cclge&RaHBmegtg<-9jtwMF2Qv?gk+(~7WSMe9kxmPCTcNP9?VEs`9{S7Lx z0~O?cHm)LpL1}40!`am){ohRl%DjYyL0yT>)(9f!z6>BMA+J^46yYxKU%o-b**2gl zN9nNIt*%8T=|bV|kNc@#zHPP(`1?Z~tVzRfLlORZuDJ1LgQpsp95|`i{3Vb|UC}pX zTL&x-Yz3Cf*jjU`RY`!*7Ml75*uz{@5G1)yc__{NjbJt^EIa;lG>eyqS-tr96v3J? zK}G5tC&E@tbMd(xa+f+{tWdufI%3GCY^+8gP1fxB>CV0Ixfc-81+ai74ZVj-YyhKC z`PGUv7s~QGH64>JgUNadc>kciunFsJ-KPK#=(pA?EjaBc2+M;FdfW9PG`+Dh*b z?0p<5z7_T56dG$`FIeC=a<2FrQT0DBR4BR%>v_i$!!$~SNcEB4k=P^Tx|~S3ZF&2z zi59j)UVZ$&wwKN@@ynKQp_c;oLOXj^jUGQ3@uAQ?K!Snb&m({{(-u zLEqSaa=QNU3B#cZ>>)Y$0^8B}6L#FkgA)by1-_Z+$+EWXN`c7Fx>vA4jo8`RZsnR^ zd6mSIbOj7P6VTimONy|EG+_oCeqUk@b4DJ{PVWx=0x9&BS1mtHe8J5-ji)=UUxUC6 zUKi{FNWC=!^k$L`J*)|hKnB%@j zH=j8Jm;F2_d-fEg*`TD4$!X@0Cif4Il z5@dLY7!A2H4+L2Ku7nx<)Nd59m3K=P|9&6R@iu&DGKRPazJT3*?DIX>zTmDl$0Rkl z1uEPWu=i(|Gjz7UECT#pOg~6Fu*W5nbu%51D7ER6 zZo~VTE%Pjhi|X2X_H!H9o$Qu9y!#u#(?_B93A06jSn_EpVA{3eEp(4!L0J;;1&r#+ zdy_7qZkh3@hSFcg-+hHa=sCPYo*=1(iANr!c7IfQD~1_VZHAaY+3XHvp;M4K_9<# zu>@co&py0!i;h>iAiwg>)JJnbPms|fN*4mGwIinI)cilw(F^)G5rFeC>RQ*2+y*z( z-3N3UHuunw2_vr)lF}t(YTFe~v=_tOVHFH_rv1x`t4%4~CrAaoz3XtJCU3 zLA(Z;s~&(!;R_UouNmwgdjxVb)?gs^(a~xuf24?&roh{>X@G~%0L9(b27B-?uqQw0 z#KBg0pAT_Y1KjmyCWYo8;sEBomcN0*5z9@Csv5#(7^CVT@LP;V+A<3m7?w1HAr<&F z&R&H{%FtLq9q@SUXMuQj?HhED>+ELT;2ecykAQs}?AR0#rf0#OdE;3y_MlYsh?uGm zpbJTOVPE?$H3``C-w(xszUK5Dd(^tJwd*~AZ2F!buQb1`dN&o5Ng?od^tPs-R%&0D zwh`B#ck|&yGl1r`rpUd9zvJ=B3BuhFnN_XlO%Ne0u;pO53wLFVH|eLPp*3_M)>~Gn=X21G(#tA_@`)Velkto9aG6^)&q1t;P-65$7jA;ZD_;= zqJwb*xB-=8%1n9Ev6zPJ88*YW$JQ70A0Y2yXaEiBKfMM@Q(Ug@WCwq}z+(ilP{o*T+Wge!HF-Baa@CNWLvBV!G*fO{Uc6;#Fq@*>7kVPW)ma9w=x$>?V z_+>;(g^k~U7%(y28s`uOnsw$h-Up!aCiVSbs#ol*6$$DKsy%u{a zU4X2%YKM8${IKaEi3`8(@tuU2Ip{!7R!`)8D1j2q?J-iyb$crdIn&kW+Kue%MKXA@ zAv)LJ|jMJ^X;rjMl){m8wnlV)Hv8_&c zdESjjhk%0E^@Cw;BF5uMzN@7VV&@!)c@FccDl$@~H*k=&Z{%`%O7peT$8L!%w&~Gr z>S&BI$@^ZVSvZ<_nkqc*L8z)&ZOzw2bV@WMMueaC`t{dM!D`M2i6o|`r)hE4606;F zLRZyf!ucb&=r~Y*o8NtucwY4R40fM^NmO`b9E>QpQx%CW1Yo%!^V*b06Ya5+GRE$) zhl;!NLXMh^AX-9BHbJ1vIxXi!<{+Iq4~+^81mb(ajv_ zDq6Wqm*m)UEa0A91hv&4s7-%~*koCRhka-RWckuFITcH!M|GWI5L~&S#+SB}JAF15 zoK-ps^RWXpUg~zJGWm7x9O(YK9trqeU}UE8CSuqbS^J?dfE6Q!>F`lU*mP9kGO3S3#L@Ha!rEz3PCn&=c- zh2&=sFUJ+SH_ZCT$dlv|1vYBcj(!+qG1Ds7dXq&UIQVNw@|#lIHWv9F&$HlBUc`_y z?tV1g$e>SWg1Mg*YsDAZ{NK~#j>x;lItV+C6VIpG<WdKUI^BUPacW2oRv0vnC zR?GL}uRYy8>zM0IUj8#rGnIxODKDgO6X5E_C5789KZ~@%3b>XsJCm&Dt$_hL|37HS zGjL6FX+ua29lz-j(4Q>1`fSS8mJR}fz&k+MO4tkYP1_60h_0D%3Zy*;&i@-lzVj6G z-e4b-9BSMAmc?wa`4)zS4z2wwRcPY)(*6}T6zwYb^$%D9F@~S_RbvVBM5CRMT89E* z0F17gWmDo4AS29G+rq(Gv{f*XgS{pFFoi2Rh0`mBi@ZwLz~{z&W>sl&m0Kf^@124x z0nRR2mvns2x~UuhdN?9SJyN##s(Z^}maMGGVg`_+IfYhqkqe%N{?a}EwU?+I*Ff^1 zofOetwnEmc>$RvNtMWbfV2kl{UeaYwi>`K1;MWH|w!KcY((uQ~(bRk%PO`46N{(5^ zc66g$rdQsUv?(}L-m~Z#?9j^mP5i)br{&0>Zv_2wKhW3zSqKdNlX!Amyy|>Sj-MzD za2T`~ZHI4Ej7_+_5qI6Ee9_g#y%3;eS3T^w!MxxJkFA!QpWyrM&oa(KRJudG7oG~2 zpx(m4jV7^i5q3!^*%F-+$z9we$CxP9%>zjj_D*h6Op7<5s-deMyEZN&5{Np8Jvk@7 z-)w5He#Y!#VSO_i$xxu^f^FB6BTGDzmAU4TQ0R`Q^D(}xBPyFlt zOeb<#VOYmW0jEK$8$iJ}zL^mJNjp4zG{%O6cd3$=-D9Mlz_ zx!@|#Te&K4e==GLGOKsgmg=CH8V0=xZsfk0t*l4vXuj+{B2o@z0OE)Al>*W zf4-_JyU+XsD6oc+npRf{EMqRUW9?bMyYuxD@&c!N=QN&Hqz_9v1`iPZ4C(QnHGC;A9yKJwMSN;TLdDsv2^>8`;gf zU4p45Nr*~1UEO-DPhk|vFN`HrUKp?~zO((>hQUz^wYgi}9FA~5%1}O)S7Ue6u_~B0 zH*m;6eB626oj?$FU%{;9iY7ArOH!lj3G*3jG1ict;!3a$=d&~f=dsx1CS^=*S>xsG zdM3fVc|HSix4@~%%3UeuIb%-E0q)zhUn6-IXE|pB?4F zR8-N~2UB|Kc{bedRN_A|L`I}j=Vkea8lrJ9<5P!|y$-{&OTFoJI;;0PAy;4cil#&zdB}dN+Y5;s5L48v&e}j(YqiwP+ZpQ^s6f5td9#} z;TzLQ&}d=`$eBepfB;_CmdcYZnt8R2?P{{F%1HPFi-*6KLE|K5yV9uSsHjU>+is%_#Nl7cipZ*=m!>?RUqGsGE9LQj5= zh$4SF7V_)r(r>VP`icJ_H`XO>qdVW#@1)8XU^a2ZnJqT}5(aX!j|hDYjy>Ewg>#3smff*A8PEJGGNj9jsE0 z9nuHTlkas%i>I(YH-SY#_cO(+&k~Lf|8l`*vhvvEEA6v)M=&odhr@RbPI@3?;>J4E z8#ok?j#`OZO3(h57Uwb)*{j&5kUr*~Vt8}2o;5CevMIH^Oa%4#_*=3lMW%fHgaub| zGXR#dbs!)5j4k_u#9kgPNZO4Lb3 z{OCsHT5l<{s3a$C`s2feKyqW#RTpqiodA)?hsJs=B6ocwmk~>g2i&AqR`INEqYr7` zQQ8TrD`F=2S)G|*4L5!g9@T!#e`GLk+>tlF4K{!G)l&xicrhWCAIG_Ue0H!_KDV`P zcU4c!iQ(jXDt9{k(2Hhfcf5IA|czAVu@&}V&f1%XmI zX}h*g8&_A?i*n1ou|SkMRNc83RuXI2ToIjx$nQU>ABcZPUbR~yP1?2hKAmUsc2;e~ zACSTDdosTlE}EJe+L8MZY^74Jjwn951=dma!Ib4Rd;c;`q1EY#uD@Q(4>F#p?sjG^7diQ!FE}H z!!mWFW$y1(W-B_>ud7x^Am4NxaHFw@UVNBfZGLZ6uU+2e$n$;**2F?kz53T}J(j)8 zD86mwB3huTEZSLSs%m2Y5^9uYhR2wOMaZgLLVR60B$9MrGTxHjKc4Q3O-%g`2PT5G z&&cWK_`&G|1eluo0_vj-Ak8453`scBmo~Nz9S2YQ9!G zZ!+E8yn%fDtiaz%&NwUA+BT3<($A*@T|3blV5%b+yssjLg< zX49DAz1iv5%1w-vk7s#@C=*XX!;;EG2K-v{CkcPStTCE&oi2KxI*vesoph3JE4@zM zPT6fH!R@hSx0>?)$KR*7-*zY5QWkuW(7g6|MU7f+F?x5d#D|%7M_zaL%KVM_HpT*M z&X`?1Pc_^l4c+VLNybp)JA7;a)SDo?2fB=!Q%?b;EaQX@h-Z<&vf+N6bJP*pZF&kp zn@BNM!`P%=dDCIj&8?+Hzv>wFeaqcF&k)>&xW;1=%8uM7hr*P;SmjwMltsHudsjQ8 zXA)8-D+_b=W{^G#zCV{{zvwlf&`V3&nkh;dTJRhXJD=8MELFD!BrRF^vGYIS1BQMO zi-cVtf5SoE$eTHfRhhj`%DG@`wp<6=X}Ep9T~drvl=Gv?t+=mn_u7#h#q>5bGyOz&csRM3Ezpdid@)$5@)f zA`1$+#~}THB1byBr3oXGaw<|0IWnH|A~x~y5X-|AItE!)nbi^$o`}i;AWbh$SC_#L z!cyo{=sp$vvO95eG?-LgH2D&+wF5 zn$$hg2+3g2Gt&v{kUo)V#uW$%KH+X zQQfw$^Xe!B>-y>LtTHmdjKxZm`6dQ8Ko$w66dlkDyVGeW`YQA+ao}yWq}O#IC`f!c zq~l`ea)V-bK{=ARlx#rIAR6sx0tD?)rrryRu1}ohbpW2JONA@Sd{_C-x_v(R^~GF{ z1PSy(|B}AFtr`+SY~<2lJwtz?qc4&|zehN2=(GsRV%to17lFR zdrJ=x{Tp}L6TyDTTS+e3fblgCkiW`+YAttYyAy=*!6wGEmXWcVPAZGi`&9RD@oSRr zeGJbkxcC~t_n|X^=&FQa>8wS^MGzm)DdtkHxca8N7(yM%tsW;mLI@~7)YqiBAe3R1 z5?i<)(rQv+b12FEkSEA25g-VA2*^QCa5Yjc5Z;rhP~ys9M3qmJ+%bG8P**0uTosy| ztSHB{5zmVzltHj4zSL=ZX^hZS05yH6du=@(t`-OAG8#n(IS}YmbPu%LI?cY5A?zU; zRzLo?&vE|jxFdir@c&0ul2}nd9a_o8{0Athw*g>aaMVsD*yz#C_@y75f8BCzvpg-&}w^aGA6;K&lG$!w-OiN=!2_+nEI_&hK4UJwNey zx9q-S@*0wN1Nn4M8Wg&?;mLlMTgnr+FNKENn4KHZXlW1+-GE=lA@fz+S8u;R012|^ z7kNu4(F*MAaBdDOQN9J17Za{PKiLu z`p))JLN2740l>#$7E)}fFRc`YY`nS?q7#|HupFvJ2iwNE=e%*3z1AtkpI_ zo9}F>z7u#H=8h73wcZ6VIJw};^4|Ig{?yZ{DM~?~v&k(#;H5Nk?b+i+V9E9MZJG+b zBd!rpg-KojV*taSI9ukD^#E7;L_tEklhRD;%6RnIrCSZss3&)V<4J2?9s`MJGh_`g z>-AY%rk;9d8|eLZ6z?4S8n?W?*6F;-W=ve!70&)XN=^}~Pb~WEI=2!AFNB2^b(VYG zNe2TdZ<&3RU4B54P>|@yD3uX(PXTExrojT(zc7i>SV#N-Ib0t|(F3K9zi+hJwg8ZY zxKoQ#Qj38W6F3iWq>P(r0AK~&1XerALjo(WuO|uuiDH#qseJ9Y%jia9IQ z^k&pKT8aZt%^=tlDyz3?xt=a5vgeI?c8?rqZfo*?I}LrtR!Jksj(!{>dH!puC3_3( zXS^>U2>|iTLar~V3!syZ%6<+=?i>ia1A_@S&^&u9D<$86YmFxuN$;A0n)%KVS7XS8e8{9m$yHAVkdDl+J3jvQo zxX0SEz{RnEnXLzV@*Z#tnE|p#^9euU9vXjI;0Q4T_NZIH1ItF1dgP(5%sUz%ysGAH zi9hRrtF$TIwP2*+h7=vTGh!^+;S1o+qovOSbY*KP1s*tG`5BaWKui;52&?bx8{dOh zihs}P82p@}CEVLaw+247v#PYhwqglnyIz^`S`-({JR?FTjAhtxMG*M#BPo4sM|~2k zhPRJ>>h4s=eF3WOP5he+`uUIJYRR(RYm{KiTpfrmo=_we?}R}5InypVm9^hYY&5oX z$KpRHtY?AvRpNTsLax~N9BZkt;`x)Vo-+WQV)k5Bw!x`)8_>FhRUac)JYgKAwv=mgOY z3AX;O%?>*kG4{;b1`De0Fs+nD`MRjF2iYSF+87yM z+9AF`Wt89;~_5dZ|RGu_PXwSCH5>9@VDRd!NRO-0eO?j%mTx zi~$wqLIGBgW*m8G!)jE;${fIvPg;UH@Ye3$(f)RN>8DGN!nwkJ=CcrwiFDn_y=q{q z5b$=`-D`H>1+dy7qIf$fbNXw~5m|jmf{kY>oX7Dzc)d-)Zi$)U0nn!%AfHXnjrQ%a zdbunoXs-eClT9eD$Xv}e?Lqvj_d|WlX^%gi1UA>a-{FpROX33Mfoi8_8@i#)wvj$nusS}ZQ9j*#ONZ>O7d4E?YJmXmF;Py1QGFL|$H z5MkANepodNOqiAj7mVbG*_AE0LVYd%7y!qky=@DO=Tm&L%YW?VW8+I1 zZD};#QK=<|aZ)0M^ZmZ(7PJ8mTiFNb>k}MH)jSb#vkX3TJx#P$w_11M3WUirCk{J^ znsaFo(oiVsP>}(#4q4T(*j-Bn-3}R+jFt{uD@imQrNIOZlKKygO09vT2SoWb&f)Le z8yE0y0u!iTGD3KWp2Wgk#$B76#K*b!OKa;t-oY_nfHw zT$d5LWoYVpqgB0os*jV7!FK`)@al27+fPeelce70GNsTw(jdG+{KcNw8>wC6%Z)a^ zzKu{ACi%v`WhyzrmC6mvn^Xh3_?(78z=a{X31pwYJPqusYJBR%N#-<;In)Z4IPZdD`FWb z4TD7=oJHI>N;tsgIA2E~6**hHn5}BAR&TMzoUBy*UhdISRFx+kK!#_k#TLIPU*lN96sN_S!W!^{ z;7`?*^b5`>qq(-aT^n1$9_Cz(h@k|r_p$UVNC0=DkY~9%BcH0~@)O@)1fwqYVWje$j9# zM+N+?S%<6cCigeRMU?0+(dJp!U`4(!RN77~rwvo{eD@(1Wc@yL#&$0!OK#IH+Y+2Q zUbCHCgK-VB^HrRQ)lP1+B`{4yN-Yt*=hoSmGBM(o5Yb6{GGN~wA*-k0Ji(S1MgM(F zl?kMZnc6b0lll;L2+<$a!2|Meb012!WbkeLN|tdy(Ig}it(Y*k`t;*wWFkS{<;xW6 zYEP)9Gc|tn`%E1*y(|1M^nw>J9gu?oQwnU9>H)4*?BOf@$XvcYzEXX*`o`3n=m0fO zI=aSZ;qrpcs%`wwKA8HW`+ghD-kp^zmFXrqvrcblEMxsn?0vX?KCS$`6|0Uqds9bH zYP1NO=QDg9M0b;^cAWY3Z2wQ#&F_de=<5VGlQbJu8I23w0qybVH781o7OnS5ZgX^r z%3jQbftVOW70INMFiWLE>^`H~^NOq!=XPf@A#OY)smz>cXI~E<6L*3~($6p?UvN$t z{IQO+etInPPW>sYBtxTFU09;6|IT=F1$>}qp!44Oem0ibk99V(JtyCS-eC7;&dsOn zmhPRj4QS(7ngk)C)#3EAKY%VYQCa(uCj#>Vnda%kzb>vVslN8sB|2-D0vxgCVBoBq zo_=St>4V?;YL{Kw81_~^H|r8Jc^?o$L&vOSK>72ZMROcqtL^6?8TX9@Gr6o z^x_5-H0SPT-&b)6K7KOjb=HJhM(lO1;13GHi2&@(%`rsSMv?|loQ+(89hvX zxr%;(t|SdI=}Lx^WE~!his-Dx8G?jtmTQtkyclRE2QI(m#Wu2zo(bD9_WRZFlJ}Mh zyjLkA`5t_~MzTjZJLUKgrLw5pTAHEPeD|3Lks>PSAP@baH(e!r7R~v|iB@$egVXqj z355x;*Q4G}n4W9rY#Lr^`jQI2>v!!3O#m&H}acqZ8lMn`f)k(?rGx@ZnOz(F)K}GYbaqr8NU`jzY zel$nl;+|EaKy42z+x(nU`s->1qMP-*iR?SZk7~;8m-j|!!MS-JANNEVq;Z{>Yobo% zF`t&G3S0%C4@*&FkwUTCTStUI#tg;HdW)Hct!FGKxKD{gPn294o%)WL>Md-^0k(9? zdB9;()5<2JMfXD1_|4$pdmMOK>5(2Ev!W=CJn$UrQssy>;e34ET#ushyj`|=ALpr9%Q7ydW2q!uZFcUxyFU z^f>4O@VxL!%>8H9+1~dlWy#EBl__vB+KLk3;uEN25i-PtiKy`_2R`;?V-E$5Bz7%z zG%O-#xFT9h481pT*@C6pe1K+AkA3fB`WSp9R%kHBj(}^`aORk$FETcJ=>DftvZ-~> zy^=qbwe5XRUr~RfJM;9VBf_k-ByKEyr1x1reUJVPM`Xh^c?Vx-LTZ=P84*|= z?Ubu)wUQ>fR4^9}Bz(IkI=~EoYr(M<5n?*9Yl(I@T(YGEZ^<D+nALRS~^4lKl;-7a6e zCE#<0AedpilwhB#5bpN&8}(O#?6-7!{$RN}JENQNRtA{%uvSQ9xgJ*x;oFtOgopgL zQUnYvHgNjdftmn{mRv~*lX!O2X#NV5AdJCC&Lxecy@!+{1Ba63$SRHShK51ydPGXm zbmz-iY1C?z%)>a4Bq@{Z=FG7O?@U$G{4vProw_2r=6k2#fFzhZp?J(9hrJ2zfD4)q zM`;_z9d+)s~vJG-sqweBPAQE$yvCE1B5aFH#RLB>vLrQCVm%yDcwAxbwF5vJq_5J-QDuZ2{c%!>g&ND%6p=CiB6XK^*H>+s0F>SHU6OGP^v!of5 zZnP^bY;)u?kwd05z9QnrtizDiCUVzhRs7Qnvg%FbUM&*#OeqGOmaYgieisl`5X#_O zcQc|?H^iGj+Y>9eTubEG;0;=N(DC*6_nS2+zUh8eZI2mq``Oed+un9U_3Oz|%aqvK zs$fpT)31!r0&BAmF=3vD`ZG`Jqg`J{>VT8;f_T2v8~7R77Qs2mdz(9ut>WRzqjufa z)KyCyidm&ABQ_b@-|J+oZR*r-S>Yk&g)l!Rbd&=yL+S{zwoZjQ>zKS* zGVjoRka*@|+)5C({7x@{u^PNDGRZZz*3SY6|e># zb?3`7j{>%9y$t)@nK&Wn!dO|79m3eYu6B{K)|K0mFv<+VjC-_p-4-2(8Y9?5%w^hr zZQq+KOU`b(P&aNo4=eRxaKa(n( zK@WC*NM>K63y=LW-hO{+idaxUi;}GH^;hjc888c!0)gzyfdboj1t(D>(d@eijC46X z(!wu}#)!t&dHHqJDvmR^Gx5qdq4AmqqrzaoQ99+O)9{l1fNEn1=MVE>ERc*LhM#l| zPOv=D|Pb?#xX7;5+G8#H8X+C+n)-{$^@L zwCMrc1u3N)p}x1z^rnY4&21JboRD=j&*{h}%&AR&0sH!6&y|Vd@96jE-(m2kHx5F1 zcWPz6x(PU4*>l2=4i>~ciIRI6N!7A_=TTC$L6GpGa%}OF%$?W7-p+zAi}i0Fb3k-| zS(c(>IbfA|mj+0Z*4J-ISHZF2sla4#xbP1fF{=rm9t>=_zWR<`+p8_(jwA(;kM~#g zsJjziUVR)|=l$(u275>d64p;{$4Ic@0+qtzUsfFZ2#YmW2>vV7{YKOJ>qDFT5a9cC z#gwLpMqW2kU^p?}hYKoL5Io@!{aId`yuZL4=?oXF91#ck_PCPZ;^re{CUKTHqZfycQ`Drd+gdQwLjX<`3 zne|@C+3Qy2e4Kn^Li~^~#`cVbbTt|2G3X1MMDTVWvqHJ_RKU*euqV;Kh6fVa7j~_3 zj1U5V&#w{v6jJ)#?bmbz@CHu=KnOmTYcNt_o2vp40W!5vC~}5u8c0m^`5>Hu`~U4C zbORQ>fnK3GirS=yzj?EEyCgy2!G)8h*BOweK&Ih?EdVjtuI&@>1R2>o7Y>)dfn)l8 zz_2U@bn<@44RdniJCSpWW_rwjh#{y9+$VYbfpB65Oo=T)XMt&7ePl)aY6@{BtL|M! z1t52KkWm|F+ZYCk*Bw%+UAYEun@N_!g8Igj!$Sar9K5l3bh|#VrMBg7>4Tr7FQBH; z!CyQ0jaW=vNC?Wo?AUo9vi2l<2H4aueb4trK^!mrM`-z?G>$%{scuA0vm<%c%+NP~ zy=Vi0iGy@_IR`+pfb9xPR!e=$|E4iuMGd_D15&sl6g| zNG2D`&!@U6!pYYem;(sB8=mSDF8pcW^cu47z6#1+m&fcUZnH`&L1jyKLkY5Z2(L70L{o-$m)1j^t0AdmWk7~uQ8Lr^gZ*b|QnTDvzEnw~$F;|x&bcM6e4+ALc{rNyl!?a;==G>t9@F3to_Fs;k z{^CO`W}f}u3_xzF0kZXke4l-1ZCb%##9!l(5Dx|vV{k{rs~^ASlb+1`0G??7EFo{j zyb1tk$-80}fK)=~LUwY5BMt8+Er5Wf0MI_Fw4MQ*H8GHe2Y;?IP`4QsKLWv^oNIQ{Ye43AjfNfu>hDc;yl^kw##3` z7^HR1a$f;kRIWohLDM5YcQ6Tkfrw_mNM1>D;sHpHzr2TUl)^o68EW*Fii+ckn1^MD zhz&ZRdb52mo3uq`j$=B>tA4MrH~EZ$S~8h@3I$0o8g33s?ntFvdovE9N?WCnC2AyR z{OUbfyQD74Dqw5+fk7XDhb4J{IzIy-iDeSB8Wqkwz#o6ICterp=>W5Tfuwiq&GY~x zfRb=l(p+|TH-RbT23HiI;(rbT@Y*&=fbTbbaQKeHWcyI>_aBh?VrEp~4T^vXdsxOQ zgeRI^1r6;6#7c}gofT&#%f%r;SyaT86L`H|7PyQyf;88sJ{*|~7*a9rqoNhTfU}UE zHgL;3H(#Dtd;|>GeyhG-jOXg= zz`*~3CmbO~ws!)qK$wNhMGsRjs~MfloJ32PNAR)Ec00&s!Gzvs0<8Y8GL(1#=f14| z+tE;K{oZvW)DN%8=GSXFDyZoJ$$h=>RF3bFJ;=4Av9Q&)J+;X(>kiVk8pH z;BCD*11>ko2=nM_@3xE4HZU$6{2?tbH_XK#@RXO@)=Nz`8#t{Cc7v?=ScV^9G8yPZ z8nS_uE3raR;tzQ-!My(PGizt6jrS1zBu=nZ$oCU_N2n0GB5dww8?|*mizB_KNN<7B zPIbx%Iuhmgjmn*^TA(KD4|JPcq{xPZ4Uyy!MNyOw7MV3eGKk*;<9xwgoIe2cAyb`Y zj{v{uiw}{UCJLYo$keu~!>>NnDr9h}4r@rM_B5fYo(7oU?|^v=w53jH#-yDXRds8> z52-5|Mat0EAx^#*A!j)zJo1?9?2}22Ap6(S5GAIUI4{bvmY97lPXb7piF|tyF~6`n zZ*_rbu_n9D407OZ5RQc>sDxcz5Pag;9co=$BuhK{uv!KWzOyU9JPanqBFPj`u?&z| z;hBo;MOEs@Q1R(L`+0Ta7LniE#=mu-hk)F`r;Hzt#b1TemS8iT!fvs)#+gG%8 zD!$eG7+fJ79I5DHsm)75z0pDT_Tjy$9hp6N*Px3w&I0^qW-Cv8f$rH)F$f|hvRi=* zZTgfk1wlWJ0Rsc576ki%_-gHx;bS-#rLa}Oi!xnBPTQ_a8IqI(sR%_pPQHVbw804& zLC~sl!eZ~hICOKilB|FqXWcmHR=F_qs%i#WW?nBQ^1C)(5z?yUGVDb zCa1(TKFC5ZipfS*Q3i~ z1E?OhdrZGgCsCD~STHgp=+2h80>y?m$8%118;EW!o4YuB{mLl(-D#*idSy-k`JhIY z?~d2alP{Arxm#Nx2mBB-W?*`p(oR^cLF=};-M|ZYR^GXK_TI)%X)rXATI2?Hm)j%o zafK;JaZf{m7j>3=WY1hjAGjoltt-|ZA2F~*HJ*}q{jA)Q2B7(cR2*dc3(5=D@^h;( zQ4MjML6Mh*k1cj%ln?{lK>(}9NNJ^NX&^1XAyBkGQ2x@qMT96UTO)ZHFq#HoV8^Do;CjoYIDE;dVXytwG-cx2sNfPN z&CjyUv2Ow#tiOZ1&{CR#ody0@!JbGD8DcVuCz5b+y$k*0P-Q4^eXU zdD*q+?_8n{#?LqWz_vGDvB>2ImNQ3QU>QNouSmoXHP;QIykVa+HKu^D8(50W&C7`* z1G_4IjRJWw@vmh`_DbBrkZJxZl^o4rrCN;`9PB5x`N1#^qdNyT6B5^wXgAyCMQ%Mi zM1RW^Bs;H+&7LHKoP8UZSrRQF^}}yGew51ejN8p7ASMu-&Xge0fzeqfjwGGG-Uvx+ z6_nl+Ub@NSHRUx6dK2%iXp~=bogp%`T_)#!lE=qRktv5#lJ$KsxCGsetSvlI@kT3+ zKA*%sSaiZ|xwuUdY4?S4R@$Zn6_{eln~`A46VnVf4tTR~ErKk=6`3$MR7$i8Al**P z=Y`v(2`xXs^dxR-;N#tEpYECDOgsX8BCgMEUZAUZ+K5nJeJm2PmAEOk_yKpo>OHd$ z&aj@DP%TIVd0Y|D8opA2`3kfM#`t@k3&jezdYmK$Z;hX_$}0Wpl}73Dzu5Z^XG@4I$&6pm5uE*MK8>%eUfs6ucB--~W*A(p3tgD!NSDZnZS` ziYT3Y?Ttutvu4*mN;mmRUABc$x<0qo5MDBXZ0Iu$X;#Jepxl`=xw2OA2aw5;ArC;6cK)K#CU;8bRk|JUIF zf!6)6NAwKO^sCI zTJvCoC*8ij9w$5sXmMQ%eXPxRSCYW8inVU>D(pkB-eeN7nTk3RHL8Qps_}7+MH*)I z5(xBpFRCiTDGHy-?yMQJdz}`K+l**U*1O9Se%vR<-b!dXZOx@>m%OHxx{Pm{8nOj9 zkSA~@C{#qEG~Sw3y(nY3Pxngx9qD`j+yI6WVoP~DRpaK*%z9WP6Oko+_>DYidAE_wolvFzI#p&)1Kz}$Q4Xjuz!C>pcTu>GzxO*6$fcO$+b*&fe z9G@a|Z$RkV`tY_c$Rr?A?!!vS*(K(aq-~oiLjUR>gY{OM2BJ+WUW{QHU|m!(GN}2m*!eSnCb^xb@QwSHc6kHJdBD0E_vagPB>+DkY7!Q_xvv2Q7f{ubb2`G? zYF#Xr+o&})#Xd!UNtyc|{4Zice-^cTn8KnY6vFQt$d*=6${4SD{3I_c?H((h(xdYr z{I)40~Sx zpf0fzB%w%KlDEl<9(YnG3Zp(D_V}wMRE;-hJYY&GxUu$PPcq0e{aS`-g|~L`0$nc` zDGlD$5J}IG&Np#;_G1La_8oZ=fILYh5b7@@*=8PPc~#tIBY-wK{gbaH1+z$rOqS}o zMPF^c829t@?}N{sa<$CLxIs2dCyt3DbqMP6nP! zoB}->j%L2hLshMkoFpMdI|w!=?q^(Nc+>dhGf{@IX~nMVH|+KNayNaY7du1;9#aj++dCSTg7@EY?@^Qs%XrCs~N8& zC}Bh$6m54meAK7x$r#a|sUs}B618YNW_qdZ$=FC?Dg#JVKpO8a5JeU+zA|w}8foD= zUAq|e!XxN10RaX_8cv0Lvu8J8!@JsI)O{3Yq@v+Uq34Q;SDz_|U=|=|;#k?T`(Pf+ zk{wBihkWenau049ALdfZgvGYLyCmLssOaHc&O6)SSE(tzKM0H8@)mfsX=;_2s-)TF zv>)5qzmk&*_)Bno4{&3xWpG&~g0$WeM}-EVlKVU~R$M-~OR&STUZM=Yj(@ihrL`*_ z-Z=>J_Q^?5q+#>@(DJPHMU;BY8Wu5-Ys_FC{1 zp7QfkGnsAA^|RM6W_MQ;i3I0uF_)#ttGUV7tx+6pS-d7N!HtN@`VdTsNx(-^|Kv?7 z+ug5){=R=($h42@Ync#m|BkKuonQs+3_Ws`v5<1tb@*XSbYe+mkV9V@XdHF)PM(8~ zLy3dzy1|=TI2DUrIgYDcqKf>i{{~Z|hQ!V8EmIG<+|%$!5V31Ja&h^U-^7dnVs*(3l#PWNn zRqRonN$B*7vYR9D%A#$PrM+@{;r5MKV4D0%r|Mn`Q$n?M10tHmX0|VKsosL=xXRWE z9!)`=bs7P>Bs+OGfW5fG`k?yHTS5~RT-q1au1xgK;s%LHCu3htiyd4BSrplJ0mKaiXB0H^|fXSPb(?*YYOL>u>7zeNVZmDb}}jz>P9EBysDOUASQ zX!{z6ik}K5BXl3IvT(yHbrt=IQq^s~*ikFQ5sf@DobzINW|P>3qy?ddiV(l;&NOPO z)c8mA4p*{S2nf5?kybdbx;|6EIB;Q)PF-Jb%_*_vBtN6%e<2962_ofpAB{>z()D_y zzrAJhh7;Cem3@k$Kzj(FgmLpHR}0Oyh{t) zP=0UreC0{6BgM1x`rcWB;K*_N)n$NkY+uC)9sR^s85jCQbRn7|+RB0*_qx77Cn-$` zPP7sa0hvir1y_Z>uS>mC{J#5q*Riy}erbO+>6qv}T%~)54O_3rMtM`Rbmm#I>wp!| z5gN0pdI6U10veqa7;Y5OSh_cJ?e+-ogSRi*&$8WKjF)lh5pRFt&5D}mg{HaBkF zpMZ_5nc*w!o~~#nb3)N_HFdJZ-iwo8ge~ze-ssCvdoUG)- zTN^!X$>He@uS@#3NQ4|UA+I%rfcXoB4PI zQ2df}mR~6!cQz6-blVfEU0L~#}EGTI3;C}}Vn5KgNy#o+1nnKAD zih#RJSR5he0@1>$xB)^<2+s~U-!9<^m$wZP1Me}-EgcppgMtsMviTXGml_RoKOU%n z#69gzKzIp73!a}2)m`KQ@PZf!L>h^ZGX>#0E3Ht?OSKnVvP%a4EH z`f5Sf@jN{AaQC42&EMIQVX!3w2{00nPC>yU&4fkc2l6o5@(kY%|IbR8Sq-UYMBm4K z{SI8$>i=0UpVc1xp1_NLdJZsI8a04r#ggI=2o1P_^wvm8fKQwP;PR9| zXIs~P9{N+lmRziUXPlqrLI=|+Aa=aO0X%l$<9Vyyz(wXW1+)q$fw~b|*U`;^n;{z7e5@Y8wIiucK6>3;TR->_&t+_8QI3JLjvF@B|!)V zwy|rfu8P*zoO(;XLn$;&l3jKN$MnzY&jB327C_Di0J`169x#U7uCVf2H8Kqvf%Ff; z=0NPL%L_2o*mp!S+k?Jv#g@}041$|0tpx&!nNw%rNt*ZqLLPl&h13>_-)N_Rr#w*- zkSfyO-%=#goCke%Lk!SPoMiL?e`pddh%+BU4n}NRyI(-gXrhrUu;GyKK&-fcg0NZ-Fwl+km51q*shE1axkYHOuGnRc^78@hM)$ZUB=@b zBpLvp0zCocfNW!*L$nI_sm*V0n*0InOI@8K-|o7W;?wD>exVl$M7C-HGfwY0ojd_G z5cgS~`q$NP8n*ZAyKcRVv{j>^CL6CP3v5|u^rixjGz&yoOX-`_gQHq4pCs~Jtez>P z*w*$K1OGOd^DiPr-@$>GTF)L#tHbG7NF5Z?>zXcn1>Cw{fDeXuL@d>u-w^Y<$SFf` zc5r9V2%sK%68xa2PP6oj4B{qVF#OY%{O_!q0+kZVYJeT`n6*FlPv(1DJMxpsm5|OC ztMB2TQaowyU3l4QAd$<`Y6x!*iK{}sV;4ZhQzyBc9Rs1_=|dl3!AK<`0KFZi;5&Z)K7Y8P9-zY+;{}V75 zHz}Sv-kWh#6V6Exk^+=$hn5vgRZwgM5*j=ELywXqe4Y{sa&m@AwqQWJ?DTG?)$;rs zU4Y~f=N6>c@=gc&FO4%7czVFaGzE}`OBR5j<(^62!yV&uq7!v1v!vb)|CnFsK*kM7 zCPp@%IwqBG5)wKCPN0qbT@Y|Mp}$4D_^T9zL;mphTbZ!>QEdQ%jBZ~6|LuFvAbAcV zB0@aZCo?%e`{!Dd8gUrkI2sdd7Xv+`@yQ(~uEPKA(4Y2O(=8V8Gb4JrUfH(ZNpF_*Dt-F*^ zz?884U=-+0(YrcDdztNM$e!&@|1COk|8t+1?`H5r6+s;NFdmd|L?ctHN=zkIQV#e#Kr0I0yvkJfbAb`J`dFDjm4wTgb zabwahxE`$Rk32ub>N%DH%ISnGus0e|DB@hT@2Q+9qlH~Yj#+y_=(r_8Y> zM6frrKo%Vof0B)8n|~PaB4!MD)|NVn@hOgdjiH24@24?#0WIvIW5Ds+tSK_|1hdYx z`N}JrXuhDkn_JEtwtI@Bcf>QrIP4pRO8LEH!NTJ`xl6e1{4;-_sL1>HXU6e0-qYTG z94wNxMc`Gxsxqk#80qXBEaD|K82ytX$&AGcAS!2k)unPNn5^E^%H%eGaJASAfO zE2R6a~FgjKd_-&QY} zMD&XlX8e6oUq1W@RYmU^V)a?r*0MHd?D)Hc)ys!sN3Q~UJM;`^T|k=Jv6=NK#pna$ zmK<(Tl4?O8PWF;=QTUC!t1u|M@E1t&`9Sh*$Mo3c5uY@xzv9@jh43o~{TiLJ%oNJZ zr}eva8-&t=W`=S~Y5z{l{2d1Rqbk$-{$^iixwDi5rnUi%Qadf&$ga#^az0W4nbjG% z4}{&|!x33}qJ6lp-jcP6G2Vc$0oecF=4N9EThKs>+nh{KunT9Vl)Mk$jeqX#e(u0Z z|K``)*SAE|N-|v_`K`}%7_bfLw&3#-#2Don?X?Fs0rEP}DTS^!MxQq`i?w(aYs}X~ z?+Pt2JzTm{-l=?%lJ3w4DGUb-*6W;Rv<3+D*7%;@<_g7E`*F13rZK583}SSZ?VCXK zj!K_x+@EG#?A|$(=0Vyzi0pY22$1YHK)Ej7Qi3vnu&MPbEluy7ivF2`vRRsBuPahN z5(oK%H&DKz29Z2LZ0t{|MwPM|9PiEsv7Tb6XVI^MbiH}aA2F`J`Sn?G6gU^jm9#-^ z#)O}pRSiui(unLAw=pXEd#@_9aEVU6etZZ%oD)`V3!dmt5h8DwIM?nQaW=b+MpkQ# z6|cRkt%~$eRr!`9y*hSoxp_PD4)^D(XUyN;)vpp^QDNW_Pfktw)f>H&yyu7I##vi0 z$!z7Q{`Q-+{@W0&ji&UjspmE}b=uv_&RQ8IGq4a5q15<-+=9A8BhQh~5E8*BP@#Ss zO2+~R_yF06k3%)mr!xLCTAzhDTcChT)5w9>yuqk?khJjFC$7fc#kU%kAK&^Qj_ zFG7EeZO8iu#0B5&y6Jkqo|8d<;O^5g^tVu%-+3Rm1hoMFU8tD8f*zj~RW{XN*47Kx z0N<9czRcFg&CSDhQTFt@wL5oLSdh`73lO_tZph+q|6i#fZWG}C2NB>di&p}do`aN> zr1jv=TNXbW(FBY_ZU{OApS_eG{3-|(x$<=_Euy!z~{>er3Fri2E@MiSTOHjMre1DSIs&8C3(*Sk8J(r2_$*IqsVFNu(-tIg9I zoMv{&?pdR|bi8ockOeIHCqZ)OWL8}#Ox<6#C*HHE!v(S^0xL}x`AH%y0~}eX ziV!JG6$NiyD}9~WGC3RGR!q+*O4ZzJWnGb^if)2aOon=FJkPC?8w^VzBw=5sfkOoW~c zFBdRi2eQJOT*$RFzZ)h|M)DjId6dsffM>Z<{nTxnN()Ji~*T9vIeZ0 ziuZ>fD#FqF#yd3kP}xkj-E(umJ~@mze-rba%o4@3K4EP=1v1%A<&CD;#<1KK2)W%1 zUxhg`PK{2B@;zlpkqPl?I-M3kE3x2FY@i#T0UU?gu9Y%;Ohc3?qQUS~c2};og3(=q zSD@*3Ig!7fl+~Qt1d}(|5pCF!@|!p>*sLpNvlD`;XNx{D+wlPb%Wuck{clVB+Ie1* ztycEDKf8_mVR0=4mk%rOJNCF?arQyA{`pg zn>)&lj;k8(@aBsN2ISCP*ORinaJ|f?lGvNTQ6LT*ODoF3a&j24PB!MZbaCELtYn|a z8=nJ>15x|)^%fqhbl&tn(o`tc9xUoHbz+&c<3F>Taq7_e4W-jSHI64D%${~Fu!`1% z>uLJ8$VzXnZmuHRy4O6E^6%|rRVyTlLx0_FJiUXC3w%_yXtqDcf(nU7k7Y;9-}T@| z&U!R~W!@fu*r;-d+FVq~0-XYWY)Np~CEnHC30gFXJ*12Fj@%2VeM9kZ6&%^DM12d^ z3hN%nZXIjmx6Z5-Ie604h*$8T+HY`B;3aY9%5x$T+&6G}D;*V5*w|TGhn+?mw5yF8 zw>)8AwqAtYOfor&D6Mx57wi;4xzNWm^W_tWDlmR_7KqR@v?4uJC50_)s~M7^L|n*W z=ukD1hy->nRp*J^sxMNo_0WLR5sQXYFlzG0cEh{`tUZsDVeZJFU{3ezd>&G;mbDnC z_x}Cv{t0RA$eSSJ4bi8KN<^!MDR*Xq+u2r z=3Wyqwf;$Uoi|kXuvZIk9n5$o@8YSjv?l-7!sR;BHJG(ix4>~=B>%tyJN#YX6<=}xl_k#M3}8twQr{<$gWnmbz{qo zAJeO6`?~cHPw0CoIOwMLazUeVvS@bErboSojA)5rso&q{^*(KtdsZ3FnKQRkA8Y{_ z%C1QOLj9HW1P%g+k&S@*Z#S)FFO3|dk63Af-s`s8CPQope*js0A;rp9TWL1j5id;Cmc~!q0EtCdiG&v*ss(oU}2O5Rq zU%%sZ{bBb#yJym1=77!Kq8UQLc}Ez;cg_E~pg)@NH~T%)DG>^1z={bcg$Nm*j9~x?3*l9K7gASb62bh zv*qQbYzbaXjxD><9Q#dKH<`aMYlxBoes9)4E1r;~Mi7rgpHHE{5wBz;G@vNK`61fe z3(;SfU6i&kV2*lMW;!A8gd6$ZOr$cxC{cxz8}lY3%dzNc?i$UobU8g%ipTDqAR)Qm zczwkMahksbr=vwEzyhJe07TqqC8PxMH_4-csAskg@W<^qb%EUieGU9H0^$ zCUBoL034*ca($JBnFf!;qUU9B~^(y&O z@@bC8_?Klrw`}V|TxlxX$KZQ}C63YNErcewJn6ah{E;koeFVTVSuk(gk$H;)WZ56@ zE}YbazYtz!JCK3TlP^*?V=Wh`SAUY_oNO}R>EvY0`K(iug(4{AC^!lJ`irTb=cFKI zuiWn1FrDf1oo6h!KE^dX(uIFo2TK{T(VF{vO(4e$_@?Pf9irFX6&!BgihkPMSYvl0 zZvX*+&I#u|+p?#DQ)QLxY0JzvK?XiS98PoMg=vZ?lvv{8LfJt5*rLRrq9e~~614f4 zWZn9Hc|e%hH%&~MjXAR3pc!CiOg}cQ57q{7D(?uEVb{_x4Ey;0l!w~m)Te+M`!MMY zaFYv`=Gy^!o7gmw+d&FaPi|Mvo9_Tz%Acv9P)_F|I4$3Z(VAWfL3fQ)-*{wSAx6_f zwKR#NkgzY3vsVF_)(YNf7ZS!6cGysPNO!Dt$XEnL!DTD7Zp`Z*_L7P!`Rh$8<4T&J zu`N$iQ;KR48?QN-^A%B`L=*}Nt4 zsUI^5kDQ5@LI$Q3QjXq8pd*q_n>YxUA?h%-Rq=kf7&bbI4)m2OlN` z?$}3-J=PX+#tuPLciSkG+Hbj3)F&Xi-4eyjZLMG`<7oG$hwU5Wcu6ESX?AQAT7J!~ zhinVk{`y5KkVr{)=JP9!@inT$BJpLrxYof2u=0AyWm1Bbw{T1io^-`zXB8Z#b~h~= zud=BLdHOpS5W^+|&zKBCTEtLTR8bF#oNBCa=VB}>DN2mqFtIbTusHICDI*k zVQ{2Qb{|*gI)%J^tVsVZ#DOu2zDT6m=Ka zC8m(KYlXMvj~ncb4bh#~es+CT@Meh;RKk!5pa5Y`b&DhD@G3F7O2L zr{ge~x&OMW*S-`9vF|IutRFX*Q|O`aHU6M$>OkL$v?++_%$wO}Ueb z5LNiTWqtnOnetcIkL0|Srs~JRN|_T7D}PFDx?b#+ZbB8U_D4+7KS^GwphTj}2@gIF z9U?!X@Jq7_izf_joD`chyVxjoX{8D{btpwhc~b7H!42LxajrbsOLy<(Gn$q<4DVAl ze6GJBQ7<~y_+z=j2O-un0P{R9no+ea!yzj!+H`OC zr{r&LH89p~0@6+pUymKUhUm z!($wOH|WWxCLDTBlcTSd_Vb1xeGnL(Q3mpzZ*hgA-hT4W*+7zYUg;jcL(js5!SVnB zhKQx|^=I5C+L;}PNLy5Q3#I2A@hhvg04)7|dO0_sxtGUzm4I#9x98EW+YR;~d6q`` zPgwf3vDT(oP6{#%HO<#8G0dA5pOM3`6OY>aw~VVP=lu8u$jp-etS@flm4hWDv2<1& zKSkuD3Yqo_BPchG33M9S6PLamvb-J7X)?6=bjbr=(T0aQK491_h`)CCv61<#0JD(T zw*b%BsI##kxnP46qnV6rS9&vP>v~8Z{<|ZU6!L1H!YC!6LzDR&Z!~9nVIY^maAdrs zIR90MoDXTF*HFfK&ZChKxwx5KL);P%w2pPMoH20IZwtM%Td~9}z}wfQhaUg;$3sC5 zRVX&OKgd89xwYbixwH*+(&bM-KL#m`(AH7sP!@{~IY{3mriYCiEv6V8Z|RD>cETsqA9Qp@6pty_d=I*MLqZ)M%*P_ra@d z6om?-ci{O%VD$d$XAfkS=bL)RqY6}*ovV%^O5N+zFX6*uW*taSz>>xGM0W9PTH? z|5gx~>{#Gc_k}BNaYrN^|3?Lb%{+?iU0RG|M!y9|NqsRms7=BSTA5g?`$@eEOE=!~U ze-27t?m{anphe-u9|VIh_Oe*8UF*=K-MJjfB__}pH&8>xmwizOjps?vKF~Ucnr{b+ z9}9gQa9LAE&>1Tq+a5izAq6H9sCOmU zhCgjw8VC=R!7E+t$m1>sR54Q+6#rxCb=GAGI8a{@f3^qYi%@@ifqSPcKXbmnc#Rpb zUAio?g)Uol40Q&hNc{U-(7l8#pv25JU7(u|Yyvz`wdDS;f{)N!_ZUGDe{F^0^e^Ya zwX1l1E6s;C4hFl$T;^E56;<`XDB?tU)*WnV}@{pRQBVtaY-24p(PI7?p)z9Lf! zC|;B`?Xp$<%24;`l|6X^-D^b*O2ihaF1V_1P*Z@Day7yOy; zd1(R-g1N3oZ=_@n#09}yxWG&(6qa~)IWPL}fxe(e6dGN2>1*gqI80g%zI@FXD88I} znc%XfGEiqIhkg10^_!d@C_*H!Oh@OUY8aqu9F^rO7jHdN21T5YYp`E_7gqP=Q0h|( zm)#T&Ub!-Q>sHL=$XJ8oKMgF)UzTtK^xF#4(<#Htdr!c+==oYuiE=$A*f37fGw$BB8 zGCJ+L50fs(UJVSztuQUh%X#rS67&TQdS&?X-YHJddanhK50|eI1;yLlm-weu_pvq_ z4q=eMS9UyC#Fhuh_B-2utS#*lW|Q;bvcd!wpkTi*E;?dRt*8)KfGLdZc_E0NaXoxn zMGC|H4B=kV006@t#7+Dn!wddda`eD+E-$I$)Gs=PDeB5Bo>OZXi1!h3evL*X;C+S! zy`H-P#MQB_%Vk+Xay6g}TtX_-fk-Eq@einhFde*j4fmQp7xIXnC;{)zUrl%x@>!gS zzx`z~kujjPS`Q>NFS}j_OTI);{}vXN?boBUroh>r-k6#~nf1JAr~tVbsDtyXbo4Ho zqKNgPL@&LdbK=6IdojtDAX8WC!I1PnQ(lYd`|8ljw;fj(q`}^m%fY@04Fte1K{fPJ z&kTZe_ZnYXkUxcLfDJyUl6wQ81qi6Sg=6GMAl1O{Ypewq%iEwosO>XD`QM-`p*gL@ z6ahW{$jT+^a(Mi~)VYPoH@aM#UT4TLK+jJ(Q2z6s8S6u(ll)V=kMR2jZ9xO{`u|nBRft?P^mujT%0J8Gb@0LK;6ARo{Nk4Aec7PW8*GWB4KBL( zf0XVQAO}eh+V%?<-nuws_<<#VB-84?j{L>VS1=$+!aMtTm)#}-R?C9Nwt2TN>;97L z>#7S#ajDBv2(0$voBH<<19ld)4tZzlp^4}LKpq@cl%sv_@{L$uKK0MURXYaqb zcL?&uB^>oo_(Okg1r$wOj`rm@Fc=`0{PUjZR;eZ%TE`?o8X&t;i|0_^-`DGyAjmO> zi~qBBHtI|;3sxKiLQh%3w$?gV!a$jL6{PB4d_9*lRF3KO{f;|-9sD}ybnYz@N{&b$ zPJVQGO3pvT`=4(*PPnq#=&0eS^5!g;o*g;9)`w2M6Z~1!WaZVx_+89bV&8iTOgEq3 z{>EJ47DI$rB5LFVowDDcsF$)3+kQ%SsD$}D^ybzK!so<*K!yIQA` z?OUUMX%}2T1^Z6>)?y;>)#TE1FyjN=qOLe z1`)4GVeH5C#p^a6sWy18>i_`q-x;HrnFZufhn~F_iO#bUQ^iC?*Cp--Hs1%Qi}UpMsI_ng((oiusrD_Eh$36eKhc!Sh)FZZm=;STzG zgWsMF>PS4$h9v< zCG$E)b+2ud62rGv)+}YaZweH@A9#uOA3X;Az*A5TJ<$H$9{gY+y^i>1#ze73 zA<@8sv5=nza^PwBkam+qIZpdPHt?tR@EgFSx)H5@$4SF%`Ert3QGgA{qRP#v>O1KL zDfR>aw9?ytwFh*TP-gC*2gt(DbzMOE(B@P6)Jt>~NNleKahO(o<*xLb_D2Bw^#!a5 zKQGXO0E^`aIH%E>$w_=e59;z>g3ab!Nj%Y?vZ{v+W6Cw~2jS({VWY~_o&MQ8i-#*# z3U-az!X*~U-!2AJMlzw;0y_YpSFfY@Y^WG8P;6w!XuSfmH0~t_);a*8|4s&hLQ#qWLKqO&$cyZMtB3$au38yjTWCt_~n`N5lA0 zxS0DYMswsci~*2LUlXP6!`cy`q&26jZ*olxVxnwjvv1mLI-kyH0^!aR7uTYikOuK0 zv~MSXs;*p<_-)>RkCFCZ2$`j)nia5MkQuqBAXPHxzZePA~WQ z-j$sAW(F}L->}FCCf~V^g6M(Q%qT##1mIWsk3k%|C*oveq4dMr{_Q{%QaaSPw*`Cq z7XTdhUJ)9nXygxRPpINVmjce`Xozz7Y#k46`pzWPsm&?YZw9D{-F5>k7k`}0yj}yK zAr(iq_|rL&^V40TpR!!-fIQ;&1m6au5=^M=7=#{Z7X*#wsNes5Su-^pD461wEW!I_ zKKgGRi)l&PUsTU;NIUgwxEo_u*IppRd>c_;DezBnRrtpH%BdMIIdn%H=% zS@!QXkwc~FFM7bK(9Et42hMYP0}6qgb`4mJuZKGwO8_ErC?rQpsN3WfVar!w!7dXn z0Qi&F*GPQ)aH?;kZNEI1vI0rTZa8KDYqukWg213F=$jvvlG&C+C_RC$WKU^pNA@Ic z_n1Y>cjuORQIG>(3$Uf%Lu736$zW6uiKMpPQkFtGx~CPtdYxKGl#k3fgA~JWqy4FB z!J3Sd-uK6qFWs-q7MprKP=Ct4ejU3driyXDvN{nchGC-?iL3TVHk8u1_*RpQ{Y^B} z3`_$%dL#_-_(+9HVb>qtd;a!|v?<_i*$-F%+N+8CL06lpn-fo>5dJ6V_3VsS+Mzm9 z4Q#RKOnL~gd3gSJ=6rY70epgt3wI%C(S)%d;RUe#PUF!FH(>prAn>)N_hUcW6`h;V zv#h;Pn>ctQ0H*UO`2+D{oJehAV)4FXWVS-#-w$$&*6F_iA%-clE42s8Eafw; zeDE?%e`iZm^Hh?|dV<-xKbIiU0C>Y28^o99G%%PLl5m$4Pky)&!vMof$)*_HULnLK z@0{&LjkyQ#1v9iy`{sS!&vu8!Y^6)KH|F6V>>(I~V&-S8^0no<`1+|=zye_#E%<26 zpVYz9;|K&Wx7=!y&MP&1AH2X`U<@EWkO9UHt*q!OwO!Vc#zWbS+6O7G{p<_8HM zW~{dpo8C|GF_mg#@T*)2K!uE%+~|tBX>Wy&g(QV3aj5p4a>Ug6-%(;_AqK1%I9HTJ z6%jc1DSg5PySV-)S&Z%+@>Y+cXxpjM23G<6b9**m4p5ZuVRHoH@jVE*A@MI3;BPo5 zVP7vmm##`vm$vGEDx%V*g3|%Wm9zZkNqlYm`)nrhCMPWXTbag{0qe>vz7PsvB+l=k z96_=`od%RjPDOg!oWD{;g^iGE(+50{Kg^EXLfO8%N)gZV2Ky}hfY!FC%rG3$Z)$=q z@*<$zf4^lSgiC?TfaTgT>&w7VJR+ny+;983A@=vm2MUN=i~J0=3rim(4qo<cr@tzZNJECYl|}v<{uDF@X+;z zUTtG%cyQbXwg#CVPrpVy1w^g;S@)n=@p`>Tde?#UGfv3TO#>(FA#0T1{NG}U_teo& z`dQRth~8HGuIAV^2=XYFjT(DI<_rfUN#=d1ezu-K&k2&83O*hrzxdXD;&)_$w8`ke z0WI7y=0G~Z)Gc{}HGr0cytEV7s*cF@EB_35T48UEvn}xWYG0Lt+dKA$4<{X%!5FZ@N8OYBwE z!JPD=6F}cWOoKE*7;SFaA@#)R_I?!MgX2k&Bsyq6QRdBPI*u~j$cBj8VNB0 zp+AtwfOL-YI{}|G%VYB$7|=tV+k;x?KdT^fv|kmN1ZmllK^tkq59KkXV5?>fEK&lv zIH5L@#Jae2!=}XJZOOSu#@HE-RS)1MJzL*6o;uH&hnDj9Fp(q$)Zmna*9~ z6{3G0T}=@(PgCS4ICqurrtLEodpUh$I+PN1PoCLJ`2i6aP)20*8uSd-?>3)i$FKQ@ zD-iL30j=cWG3;)mPaRLm>X9OlAnaXZCVO@(!qLnb;j?>0KlLcG1r zF+Hr{r3F3fj69*c42n8}2nU65HTyUT_Tb)f`ZR?ce{?0h*2Z@>`;I%!KJ2Zp zLTn;l&HXxg;HKqMf(*?d=B7CV_3I)UWU-(n$@(i*TWVxvRL{V%VopxCIkN8^{+*=; zH(;hYEVNBb-Gs#oXm)Vo@%4&n$NJOG(pz}loK7C~91h=7qfyr+hbgtg3;@b$SNQ7v z`rSsRZkAOR-oaXWv`(d$Bs22;u$VgB#Y4*6D`Kx6I$`d;2M(T+j>9qM7zwpR#joW* z*>&(ugKnFf{J`3Um&J3zI86?N2v_^({6~UruA~ve(1rq!>gK|jW8d#fXMIaGn`9Z@ zdgDl6@hdwafRMUXUwaQgm^pIlYdp<|csl3eC|t*E(dI(?TS5_fldzykNMapd0C7F| zp?*ah!3kj1F$|Bkzh08PFXbc~HuBg&pLvnw(fwDXNFOuFN)b2L2vqL0wqd}k&&$U1@+_iwTmEP~Ik7;V zaD6%`q|!xxVOv#gD{_!h6@Kr!Ey1vqt$!UiQe`P}Si6{hcK-?bSXo}hq|dMj-oSeM z2SQJm*16b!m!o)FZB~Vpj5L(VfrzqlbbI)+@KV9?#rH4=ryG_SR^PTsUYsA%A@I22-8e~s~3#4J$6tZ{>5Y(tqvXv2b0Dne-uRcwbs z&|(#;=-A?ca+&~xogORoE(Gp#z=UCu0`S=8<{|_J2y8Bw=4QOU>@ckgg45FHE|1|+ zD$LxNnfs4XR?QC#c{t6@JTb7?nd7q)v`Hm(%tQ!Ip7Mmlt#IAVPOA#YRtI)CqL-_- zZb=?=;qq~qFEzo-RVBzQ?`U3^E@mk-nLa2sgvaGO#BZ(a!D$pTU^+@zipG&(nW8Zx z9M4~Ap=X~gxdkAHrMIgoLLIrfciONk%|zMWgzuqou}FDZPUQSi_?Y<2uffU8eJ)Co zu!s@=u-?X!v4r0w@Ovhiph}`C-CqjY8eE?4ppHr(_QRC)mk7G587Ly z$eM*-=mkRheTPjTlWCcRr^%G(WPE2SD$o3q#;6qCbj0-yEUCnyYcITW3GvgHsV=g%KT{W8T&3FEyM{tuh7Ob(!qyza(h`p?ljw;g( zZhB^LJ9X?lv{dTGXJ#dtbXFdrrYM0slYBhZiC~(M(D^T|2e==(+?-EtI`A;tRdT0OjP~dL>NUGcMvG>6ZXnN{zix%I z;`h}ggY#I@E0T59Tjx4pnBRH2(#0`9nBkJO#+(p8RN6Dj!;0jhjM21vOUO4)uLbul z)=YfxVz%|*$R=(tmu%hW)0RzcBfJ_#Y(LizKF7$u@&ayPiUfkbKQ0~ccsO7B5mE6$ zbyX+2i_#uN8gPB*Z0pSw)$I2cPic)y!m;+76l}&(1*D zb{V?@?!XgX+9UdJt>Ol5Q!ZyTMMr-O^P~Y=+j2z#3sR z0~Wrp?ZJ@E>~zq*Oc(~Bf6H-~-Ly0F=|35L&oCe5h-1A8D>^Hfh>Cyx!Tz_MPWtU; zDeY$(aOOzd4AY3a^ep{~7&O|Nb}mC8V8F!I|HSi{^St|JdiW$(^7zA7m^QmD-ej;M zch(dZ1}3C4!aqQ{RZRhW>qdb^15qtj%)0@M74}*2UN)W@o{!}Db-u@?>%oQQm&5Rx}<_UHJs@D*npqS z_}x_RgLtV*Z-lT>Ahb#CX)FuZOse**NS5%fhUKOkk0=16!$vVP|DY0~_f2bbF#Hkm z?x58>?WcbP-O=V{_^`xCgU(mqST9{LTv4Nar+zB~EOFv}g}7V6m@F9!z89{rie z^Bwiq6r(&>COt^{;RSl0dBUIvr#L|m0PnH!b6Xo_?x6g=!kF^%6tLyN{XS^!}PQjPCO! zZXnZK@_@ht{AY^FG!zYdJa=jaAE|!<94zgSNZ2)7!>MJ^`lx?1=EY0DIJ&r%9B46m zA7#}81-~ia4F1L7vBZ1`r2LM~au?D)?Q4MVyIzUHpe0OVC2vSZcya1Z-4aWN1+qf` zjKP=gJM}#dPbW)owrBP>q16q_nU>KxhtiJ2r4RIH`f$3P!2kO*cv2~}PQK|EudBV9 z0p(j8Ikl1pu4Ys#VUGL^)RtIg69Wm$H zcJ+hO@}jzxg2vmbePugE$epeY;5c<7(-eMaV=$U*0ue()LRT|+NIlaM4aO62)$7Y5 z$fE}eb!WW}Y@aWqxW8nvR;;w$KE4D_vPYkdl-(h^)Q7JDtMhSksoNkHnZ_^RU-vu& zk9(r|K_XfmB=rS}13o)HGJRuL`VISrQ_#J;z(OWxn@;-)GLwzmKob5Ju;t7MHD_9G zauY}+<{)`N{GF7U(>8kG%Ca9&qFg`regaJ7KG}1mk@I(vsSv(K@%|(w$V1Y{56n$$y-0{DDL{)cd8MAEM@kVk*UradkIoRyv?1(ms`3uZ&4Ci&n+9 zL7>}0_n^dq))qkdvM~J2ZR+!N>&Oo^j{N7H$NfSW%ny#!p zWM=yV7?+90N!&KzbG>j%gD6U)GvwvXNUfcZR-c5_1~b|>o!pOrwha`N=LIWNaLe&C zxWNQ!2QUl22F$`HzUf(yA?K>`7HM9L^ps{7_?E3lK7B-jBhmCPchIf|Dt(8BeI%NU zu}6#Sx?<1(Fv-*JPCO%4URXY3Lf&cEdnC&PMOo?rkH83;Tt&h-Sl|ory?Vw!6$5Fr z7J^Ijc$nZ+aK;8OMWnvP{eayw#F!tE+U2A-!2&?;UPd96c z2Aa?sQ?&tpf~;~Nq~UKOWrkM#3cO8=Bf-p>BvWANYA^!ob6(Hx)-p1%71FxA2%}$D z5#I!c;O~heK#qODV17Kq;JJn#!EF0~?7e4HQ`_GytblZ+L+HIIC;_BM??vfKmtF(` z5fuo%g`)HhDjuA_ z*IsL7tvTm!PM;?uFCD>jYT7@FFDn9bn$)O&q9Ne4PyEjW1?05ba&8^XKootLvggpL zHstQ})WJ7MP5`F3oVUsWU2rkEg4{)<-;&4ArG3LcLP zr1t?a^jCKSb(KaiM^})i4h!|qdCBeBjTZTWypiMZD7JOlX|AnvxK?ngRh-6 zQwbL|sNUTMOilL+JwPD-FqO(gpKMO)4_YvfI44Dh2H`Q@WK%63Dz;J%qe zq?K6gDYdfF3g5)9wE29Bz$O}@$>q!E)1{y?9vjf^7<-0-R320Hk#KqB2LU7B;Smm; z0zzki&y2(*X!Odub_54tNXYi+28s>AcT~WPLu27htB_h*>U2j6P6I?U;OOkVoz+~# z1vD0O1@H3S*n~3u{4e8?j0Z(Z!y^WFjgcf_Y5RAS`Kh_HQup`|esMO0`#=UPL=rb@ zEwm7^j6?|q1be(Z^i6uu6cesD7r#s1?E5@P>LKeRPi_e$X(pYvrYiQ=0pOCioD!29 zrlmI&$^g-;}D==5v4|1BH_8VLB3_+!fJ`Gj#hspd#}KsHeBL78Y*U&KoOYQ@yP?qJ5S^m@}h*rc-fdQ+r+Xv zi`V9m%ooX6fWeBQ&21OOs5!Y82wZa2lwj}HIIFy6BJra!PKF7wWu+Tch4m0dJ(R_^ z;xePuxume8xW<=UyAr7{`I%ZipVk!r);%*{BN>vi6B+aK+8ZgQ`G9_}C-IX2{Ztyj zjC^xkc}$j_b{%=n;4=J4F*klril?A{j*V)%%?R*MSa?$PN^X_ z6yucK>caJB;%M>B5P`P z_yNYxXrK2YT?0ULeUiy&*QNPD`iLN&*@$JAh8rG zmF-6sg{c|xW)p^LfD*R~L=LKV#Kws#NvBn*bs75uh@O$GpXQhGE+0%YeAF#Bz{{U_ zbI$ORrh@)LhF~0A4t-6R%l~ua?o~#6B3m+j(Q-plBu4DjOza0~Hnn}E zGX@!)5&sz{#E)!I1e9L=iqXiiBO|3vlc=vkkfQ2^d3X7b8x1Be!PX$8QU5WZwnnM$ zPK&T;1>rAd851FDRU}3M(#4`9)i1gfQ6T)W5u*LD1-O ze!A)KC^rgY#ZT_VxGaz`C0NZ!DKWCtfqS?8p+t)Z7egH$B!w8SrBl*Buz8V5`&Pbn z0wWK~|IamtPX<`T=W#sQSd(OnR`W8q95ri!oRN3jY#Ctzr=-e;RQwj~%p5R@xIYQc z?o_yC5T31l&fU>`)H#D5x-n1~ihS?BZMk2d@GU3;>ECaY827S(;@n8JP0+(6)dDf- zL==;(X8!yB2fuW}`B7(c|11F11~K$SuIF@8UC-jEM6S+Y#pQ_TXirJ6Rnr#;qid*{ z3$tC7>ZM3S_|O`~-lT4@a9BWP$uDN8Z+bG>(gwvaFX7$}2>cldlfEjWUx*hJ$t4Y) zR-NVQjpyTq)?10+P2TPOBO_g%W(<57MD-c;aOZw&nMf$nBKd{czFLQsUBPDyeYP>A zrgMhHRq~6o#RMrCXvUx2=I_KPdlGN*e;WCr4+t73hq^Rf44ht*oO&Wu$%wT`5KT9v zM4QUtoEwfSF&YZtPvrQ1<=35?cDdUj<>XPQ9qR>2f$BbxOnIHq`+WFhOL~XCtfz2I z?mXC%+M-^k3>lRb2SqpP%6&z!y@oKI$|C+um|Jy>3^%I6aVqh}>)W~rA-1moR_d90 zAY<>T!P{RZfvLTQ>6M}xdV?hJnP396O=^X!?eHDS*0ddQGdZUvQNs`g@U^u{y(YOn z0BPw>Hy;F}#!$KAf0HIP*Bw?lw>joBS$Dckv-Ym(2k1_SRU{A|?hCC8uVMGZGScbd zydQWCkr=@4>XE~}M(FMeDp4p;S`&5mFsOe^64&XqqG}OxBOWRf~Ot*9h`%=m3luS#iNVXhhg~dvb(*rjH z=jtYkzSpsqA~o!#{T?@iuKgmcAPJN-2q~@_qj;J7f{whQX|80t+(9MAoTjk^%4M~^ zyCW2Q?F1vfZ)3VNrf}CTx`cu+As;!Ud6u<)^C^m&qXD18n!&JZ~3Szn*vhO}JRkfObx zQ`NI)2ZU>#W`FOLipYF?_>csGJiNZVc+ND|Db{zy@<#QRn;tUmYN7v4*gClc!*$21 z33iCa)Hz!_>*<9S7o&VPgh?y=B zuhTv_@20Pox79tKnk2}(!CqN6IBG;G5zEeUFt`LDlPGoLlWn6qKvD$lb3xuyJWp`U zx5AJ$wu|*4RxbwA)ak~?DL(Si>_CAc$@SpacQbFBFNZ#5|1rIHh)cRb)b-CBg_inp zSDCG8G@qB>N@e9FkY;>j*G%=k|0)Q@{;w9GwomG+b0xVoe_`XrN#qE1Pl;g#78Un{ zzI|6l;IhPk5qTxb^@4s_)LmD$*ZMxjJwaNR@MZe@Y(i|`SKVa#Hv~{+CK|hEMSqLH9yWuu>7=%Yb|*nH>Nih3G;mn690A#mGG|G zuDm#QyTH`sDFEQU0_xrK?@UcDW-;I*<8CB2a&x9%NBh0^VJPXH{wg^&Yi`={2%gNI1WURF5b9vs=FkSo!+d^>RqXGq1 zsN7<6Uqrqf_$1fee$%DrcQlrR`C+`#%Dg!lh%LJR%i8|uoqiATgJF`GSf?8GOKhTJ z|6 zi5Hm?yTxA?p{2c0tpFTHF*)Ek+GGT174a;%z}Y0x8ZQ3#{dfm<0;!_Zn3O~_%zM48)3Y! zK41!FM#&YYd(Y{653i4ooTB$VOy3)fU%Vz5(AtYDG~=|kF9l<53t`Gl#b!jA+qb+? zqvFKKcA&_l=n2Y>PqB%Oty9t>o=FV*|mP@CAy`-Gyty-eB`r zw>)Y}XAP{OYUQ-xyOLVshH~dhH>Rr`EQX@|;zcNZ&q|NM;6R{SZOoR+r(0&CBz*`@ zCBXv8S)YX0ARV_vTAokpwH_DFd37krt67q>)V4Fy`S%`l+Q4z*Xx&;JOQ}n;&ZZhs zNQ98a$<=P7sGKTAv|!)W6EdPXPG_*#xW%y*8zSyoQRx&&s!^1?7+T%nDYVVH*(Ffv zSD0OV@#7_r!+2M5REUdmKf4zSkA=_EcqW!)zFbjCq_fz2vTd24y6EZP|Xf;0pLB zokx!3ayTPOxsOZI`s-Xhn5I)}-p^FNXI(qHGu}-pQ%8k)P+StWFQ1z9bW3+yTlSs$ zUAk#5Kh+)v3+oSn0`^vY2M^N!?J{4r z-euwM^qw3yD{eb-8*1?D-;f5U(vjDp=ac@j@&7>K%5NYjyi}*~B2<%HSu%u*aaHEx zrvLpW<7+tsO8G-Kz&h8zvS+Ly+ci&wV`?Nvn*Xo@^n?H0Y%bxJYptb)Cj*ib#NSLW zyIHG?HNCt2*`KJvPx(Q>sIx%d%IG#`AqqN>m`MLiR+Yic21LC`h zNnoP50tA-SCsw$j*u5cp5aVWok@30Mwua2&Bk6SM=C4z`=VfCV2mrZ6@ff=(m=ZZb0YsjJ{TS}ye6z6?}SZK_xKo+-XDK(INtkuJDNh6Fc1pl{mrvbp4}8sx6MIuQaiOphbZ2S z-$s=~r@YNGN&x{n^1A%Z($RKh#Jiv$`JIXPj(`Hn_U+d@GVk|*#&|oE1`SsJmvEl z48{ilVy=^7*>wcwqaC-wjc(h=18L}|kOCc;6N$qFz{#;8&){kF=JQF?M(1OJDamKZ ziFgfQAY0S59RQ7;o%}D9AE?&12gU(fKPdN1ngU6>BNIf6C;O0;6IeVZ`!6gm)NSQh z@dZGh>sT#1J1~G=+k)t)xJ`JzdbiD!5`wy>hSi)p8P~U)Pb5_~LdTr&@#OMKw~%=+ zxE#W^vwCggoiv)@E`-;Q*wu0YXy?>UuH0i|9TrZ zY5!inUlwRVZp59MTR@x&n1YW~ZWLfUCTN%&Vb z`;(Fo;C8FjSHDGDXc#g!u4AM($2?M5AY<c#nAcveE!@zXi;&!qYCu zmc+O0DW65oN|a8m1F^t|HzK|cKb0ZAW>*&e5h52OUcyUK1t@y2jiHuZU z$5-^OcssodB{7i;drX#BqcLpAgXnY&BstrR;~&5~hOraHDL7{#`Es+8{gn?oqpBgl z-X^F!?K%0~jT9D3a^gbi@!EG|&z0t32H@9tlpeI}=!v2R5FcA%yyF6bJ3n!FOoI+V%w$TDd`c~5M}EDeHwC@6$Z-R zW74spI8y%G|F-$byz;aXGRIw&6T4X&`8iib&!aKh734bx> zqgD0egjl7oeWaGfN!Bh2Bwxz-@KdZ3J@++fW8>pvHpy3T_fdwpgzDQkmKZu;s^!k< z@O_^^P+Uce==ScXA)<_lvi489^Tr9sO*{vD<^QO8SS{qYm1*j=CCo#jFCqqYMyXkQ zSRJgAT=qz1wQhLNYc`IIIwpheqs4R!KZYumV&ECqIi6{q6wogBrv>mi$)|mrm_9kT z;;=>*>{z>tm2bwQDhupx>6Z!jMyFtK5DFG}LX2fMrWknm_3n#U_92xm`o93Ra^Q#& zt@2iy@!l^m-`@eq7+b_j$w(bOeg<10#e3JPD8ba&;b)*0(+c>8H#+_W`6mC^CptV6 z)wf&KqW*`TFNtcGzo+Kxo8qL;0_P^4Ry#l8YkdiNJ<66p>s`)Kf?SeR{B}8JUOPg) zs}OjP-v&F1?NGu+sT}fSHzH!%Sx^@8L*&AOSd!$lB;sf`rY!ElIxp_bVSYnR%UR+h zRhOcfg@}=OZyLI7%3eBqtZy-)W+iQ_xY3wjRFr4n<}Y&NCW>FrFk%(nPB7WSPT?mR zYYwGhi77?Te&*tDt_N+p0~N%xa5QrL>Yj^P!#Kzj{%9;d81D;e$mGDj!BFrKnl(yoSbZFzwlgbxnps>gq+>_Z(OkA}Pr5h;B6p5AXZmimOa&%6+3ztqZd75;C@#Xafl*4CbBYB3A;xfXO zU))lfsg`t~G_2>fL}kSc8L~`x^LWpj`8JXIo^ndSN1ZDEP{VZ^*A&E?iL``0qqOYd zamRdXN$|w5=HICryT3`!*Of8CJq}PR%tc`6S0BaCZ?Z8y8+&zW0OE3a7&inpY(CU? zpGg`r8NA!#7V>;eFomMmX{R=vnKJdfGAJ}GK?*0a;&jaBI62-Tp3@b(B+KMT56ESn z*C)#CX4crGJBL&9c0fw=U*1C5YH;IptRg~=K!w9v5$yzFRymGW(=pylJ(0YU(42l< zcG@dx6O`n&eb^+wnzHWD@Sn%V+em1kRDN)viu(mB>1StZ^}8cbJUHDmV~g%JvQZnVvMN;Ba<*Y){}v!iZ@v25hsl(?vl2(r~D4psNmM7%|jypQcDe`4yiOoVAi)cQfAMFanEvqcuqaPw;n62v>A2ULFfnb8!+?-2W*seS3DY%dw0QAigF zTkg{l&=4Ps%dY5iueG+Oso>He?peC+rJ3==zMsYJL3H-PrAi zp$_D^LJugNCjS;wQ36lmVe_mPk9R;hvtHBd6f^TOJ|2@1E2~BxCxt$p+S?E6*^mzP z<8`A$9rMl36_k9gEa$blEQe6lv2&*J0V4?O5w#H$r!s!@O6$T)aa7E513^kt-1zH4 z907xkyctcWHZG&2$KAGm04R*PmWo2{U3Tu4L9D3-Vgofg+b{K~HhjI5@KAa$-}GuT z`Hq`e^oBGHY1wxOD7W8nh(?PG++x*LZwWVqWqxrsv)H?hzwO)YR-7Kke|>#1RL<%{ z_xPy1m&kTE`@NuXv!75Ewi*NQBwh@EE`~R+>Ug~(YA*COVaJ%?lw>YZkT`(_&FmR} zp#^6mMc*-FU1coXTykl=m_AMJQpOb<_Z7yQJixyJ+x zg2y~Ugo74X5ojx4SS3e|3e!$yRWlZjQJNR+6=0qgC{GnsmRLMG>5FQw8XO0kfd2iq zrdOgLs&3Q~=U>ZTc$PY@Wn|_WgS0H1esvGP5IqcKLxmS@AJ*$`IB72lJp(lz$*X2j#YOiar2ocK-(|%O((1e3Ez(xA~l-ZcLn z{|p?12b@V+C41hJwA+n`Ybc57M~;psqL-tF-kUIEdV4gS6W}uMl%;(PuQp-C-Y||4 zQCMn3#(E`YAX&NT+h@T(_|X!*Oy&7v1AxX~GQN^yI&}fh{N^#m+l%D+W8<&hhoDi1 zHWeEWYadH(XlT%6JSho6XD<0Nvez*#C;xgQeV!jZl+lm$!OHWWqhHphPun=`ZSXBO z)*s*1ZxUWSs5uxaRBu21z@-bB|NG6gGADInbqLA+CJc`13V~5#3c$R9x9k1=EO8g4 zwWmrc;zP!73}v|wM+d{3xZ{bG&<>G0SeujeWv~ZFLc=J(wON;D zx%N1F@zeO*g)uYKdB@WOqr_tq`)JE^Txy) zTW9k&9!4qx5n0%7@v1}&fzMEG`h1GXiQn(a1!pROm^6tsNMdrVqp* zRw-?m==(kKjacGVU(UYW+T80yfbo*Tcd;w&^4VM#CF6f^*Rjm(XTVoPJ}cuo`|{c- zb-uvlX1p@aKxxxf>`mI%fSuVvA0rYkc}s^?SQQ&raNBgSmZ zG`Le0mP9|y@MO{=ald?(zW5!|TWnd5WV`_>Q7Tzq_oP3xF-U~Tt6#g0@CzSTu`kK^ z$%dH76Rlav*=VZs-e=)b)gF+*4oH1vFe49K@(GifEJ`IOd&cD~Q8LucdWjB#1J!Or z9LYV_#%GM+J31N#a3BFRCOOhttAJ_f=9jl)*Hu236w~f}(${Q1^pU=i6mhoK%H8^S zHv(m-fllKZ*mWXE+J1Z=OHk;&<-Y7ro}cv9CS5YQKbzh+Cus1EW@ObT;L!VVdEFK_ z-{B8SgL%0An&YkC15OKz+TsI3f5?{Y^rX^Qq|5kpQT zz>eZ(C*cug`$M8}g8V>%b;MjOW zwAKEcav)YKJFW08?{%=fNY)H`@NFW>m2o_r@)% z0L1egx}D77PaD^^LVIlDcnXzeV_qI5IW6^n%cz$~KeP4ePO3!V3z{q{b}~q*{FE;0 z{pfHkrCnP$fTZ|FUOfLMqXXxayuERX&t1b;+iLsH@Nfx@hpiK477f&9&BgtF=XnrG z=L_TU-QMzv*!dW?1qfiH5@HFNkbw~)?5qT|8b65xj&9Q4d4;cCfnmvoDCsAiy=UR0 z03_>I{l( z4%gkXL|@+WLH8rhd8eY^Q%Wpd31Y)$ec4l3R(wUZ{L~w}(pWj%g3#R{l3sfk*UcnM zMrW*F=$;2(gY1oY?j|9$Ir%IWS*uTV?Y~LvV10GS#$h2_*cNPziNWre4juN^`Q~q6 z_uq9P=GnCW@CiD#X`L#bNYV`X%7CxJP>KnfX0XiWw zn>55V#VhIt;6dyaMpkTNiM?$vY=ynJeww~Zzcu4^iJMYi5J!P91g-s_BK3t3q8u%v zRHO3x&@cy6V0Xk%#B%FuK*ybZ@+iozDC#x&r7wkmUK@=8l?If~LyP=yQF1FO*54S# z*+^oB%wg(>4_`Cz)6rGQd=R|yVCMmoDAc~P=zl1J=9mAf#@5?!j1a|? zX1q=yKj1lib$Osjq!97U`21g`%WL6(wEf1nAZ@??%f!~Fe2$*zS9B9nNkCm5Q+gY` zv7MwVu2@@R4~vxZ+?gA^NZ&Pc-n|N0EUfN8}hCHVzFi%v3G6p`l#yB z?(-%f`J4iWq*W-SZgQYv{{gGr{dC)i!|kayGM<&+Kt&og7Z(b^c$Yu`E4>2k&DtDy;Aq*BxJ7;;XL4q-oPze z2?#guK+^cP09TctcJk&(-)>pA!rynH_aKLsl_?zHx6%+`qx3eGmGlp=6_H*@np46g zHhaHmoTHR;?MnMR9i3kHkEq=Q;|0KciW6TNJD>)?ouMzR@3sR(-NE=kKuTOe92Sw^ zeng-QL)I<)s|D!T0ZE0+KuIhM*^V?pqEmpjeK^0H^XBK`=M50}<_`tDMi?6a31G`p zeM~up(wqgTfqkZA5jO)49Y+POGyB26MYFGv>^(k#fz+ zQ0?4CfWT`gRSm-7Uwx1Gk$-a%!oZI}lAa!D9U*!OqDkJo`@#NpcP{Z|P18SSH!_5f zOl5n|w4;BFfm$yBppt8cbjvyI(aSAB)C2RR1o+uopY;e`#mw6miDDv8+WY*U_G$xU3j5+op%6M?(oZ)Yo(BR$ zr$D6#L=q*nE7Ifi^O%UDH{bImEOXy#{MYpcid|DlN~?Oky9;`H66iG+0p_AF*THlR zVGQic2zID*m}Ma5;LakIaPmg7v;Y7-pL^B>L1V$KFY!qMffoHbSgbh(h@EATL=ew8 zHV|&n->DM_ES+I-CoPmq1k-cQe^Jye_Unh^L*GDPIRnN#i00eob7C}rx1EZuY}xBs zt?9iO?$bf9%Zay|0i+4XJNd11EG$q>i%aq_7_V!|9wRsD+#9fG6&nH2@1K`OW7~<* zOa+JvPx!sX7rF02iNF}gV&9T4DB)-^fXzW7)uLE5u-BPP5J*|Dun%&33k*qU0v$ef zw!Q<!U?7Ve^V#9C!kBO0s|_bxXAYZuAy>N#mEg{5KS%T6+Hg`0f>y zcc`3+Fm$w9yhDwDfB+9fX-4UA5GS)vixZmv1`K8VpopKJ7{zEmEeDsHZ-9N=iFm89 z5nf^}bwt3#9>)K=_Onhuuf(_qqJ@eK$vM`)!jTt}gspt)zZ|Tyync)mSY&hB-&`yG zuz56RJl#3@_(2JxZ@l?o(!HkAt`OChA7FpCxz4!P4kqiWri5+9#(5i+J?eDwx{Bm^ zkU}^W!dY>cIX%qsqkCUPtQ`%)WT6c_(TO@|O7tIQE{oaTovym|F&!L9%j2@t5l<;_ zD$4m8wM$<9IQY`TP@QLqb*XYTa7Nra03_jdTo+h5S5*n%|IM_LE)6X6->R@@?}2)N z?bbdgkA?Z~2%vX5l!57>7I{k^?gldi&3a-+F&;msVLG8G$+_J-m5p-`i7zC0l%Qbx zO1brXP#~n-sdI{Cq@Im;6s{tilEOgQ<8&kh@g87ycj3<;%Cfm-nKESho5VrZXZCtF zV~$kPBrqKXjEsRd{X^Hn2kM4ljqYt{SnmT8G3DmqIiL)Gn#%iY)@aOwc)ojvzJH6E zgSD71S?xU_o0^}#1H=(yrl4c1021*L#Bg4El2$T3Nw&e0g9qPnJdV1T>8uj0&MrHU z+wIZnh#$tc=!lB$Mlf^fa1LW*t_OI zQC(Fy_7CL34^k5t56{yMHG8p;Z%i28JU)pR`q?%&sLh-0VGd4$l8u(av*IfE;DlO(tXc9*b!|Ht-EU*>MVrciov|V{(^+b}p19oPRB4?r zU}0TuJ_Q>Ng-c?rB%&mxfl~UjmVZ}dYB;fTF1=Q%z0V1pJG2oOBHzg@|kNZqIx%J#VCC4{N=KP*< zZmCk=A}JO6wU9-qkvW$o?M<8%?EWcm!beWG%yHBb^zF}tn1-vAa!J@qM_35K${}8k z02Z&ELlT!sw7Pq%3nyRMA4WXKZ;5uZQ3?8=Kn9*AcL2l_h7vt=Rh&+kM<bE!NF?~rs%B?x$+Lr=0gV-k-Tep2W1XeTMFp0l!I`f6zgj>+PPcTA~taHfU`8%j`KBe!t zj;3M)vRNV`q?%lUfJ1xMYDRRzkABYn0&GD|s*tBm!pUJ8*A2dJRI>cyf z<@K*42{rYWpU=HgMdEz&q*Pe!fpVR!(uwo7M;yP7jOP>D%iq%QNY-fLjgQpIIA>Um zE}Qr(3euYXjQB$lCMDd(Z(PB1-UjrftnLN#3R* zcmwD)kyH~}#*YT8{OCdgR>Pf|X5Z;94C#8`kN}6-3ms?IZzu@>9ZtU*{YHz)#qkl` z^Fvq)m9Wk)?~15A*P(8uBJwK20+R5smn(%?$$86nyn01mVsrXtO`J-5M1lq;2pjON zQ0oj5Q*Q95@uiQkAMoKr6!Qd^UAV+45%P#oBVJ!;XCGCt-9NGp4kB2F6Or^A@Qnm% zzn$Z*AMBzZoEc}fSNht{gLi~ApdLKb+(3oue1G-kYc($?x8tHG&co~pq6*NFUCw^8A$vjQD z8JeZfGIXn-{h4@$gN>r(=EsO6uMm|&{&7B)6>CHr%8-s8p*!>j&z%rP@{}SIbyTo* z1%Quv2UXP;-?no`0+Xl;t@rMPS{<%TF|+sHaV3iK*WP$vPf+p1B>LMSsThQF8jJ?f zjFSUp_d#?J$7UStz$Qu(E^4HpG)`mhHT61-bU*1O_Z;pH7ACwZhLAN4EN{C4=>g)tq| zR92uTH|S%rZ7hzx?&teh1{4qkQOj&TX(bz=@Z!r85}6e8nfGG6UqC?Q(`(tzO1gzz z4INbw8==oXl>%xut7YyWBOkr&QerELIQkRGDnu(Xa+cxYLp`-d>BNukU);%20)l0C z)Z;V?8sCuc@!+~fhdECfiPsg1SAq*hAIWo+TOT&r;j;sY$Cbp-UA1Tuq#)bPXhjCh zwQlNj;8%D1eZ!@1@aNY_7JN36-_G zw3+Yo2_K(9S(Au3@~YNO#Zq38AId@CE&ZTTkbMovwxY`~G|w=8_vI3vA;?T?aAhI{Du{#$uJ(hYC@ zglEahp?M zBr?5BR__$#I;p8uxr^D)7F`na;!{b;;4e0`mhQ3sAj0P9#3F{4E)jJ=yb=&hec-wqzl%|4f#?y9Pv&Yw&Z^7Q! z)c|5cQ>dG~fkdzm!6Bv*Mm|rWc{&rx>P)lI?T^TwjS0;5PpcDSB=e)ZwXC|eUII5< z`g8?>SGV;jj*Tn7Jt<+$+WuZgDT75PFW%JR*yz1(x(W7DOf7XUsJZz{3CeluBTtJ(^~nL7XX>E zvhogjDqRmdViIf3pGke)w!Ns~_u$B>*mGqS$7!zoK2D)S?>WGHYdDR|*>HBgWFV#1 zjL|-D+mCpOv}DUMC$Fqh*|J5ldWB+C@_uYc{eXS)bb8K0+UIJD#)VU0(C~r@!^{9_=bWv6dbeUe8S#dPAUO z^h*0f5tC;fQqzDpPmr@P0#!*O0_Q?FKf|9`EhKeP__7y16nDzr4#`N*`A)BvW_otW z0fbcMTsmb`;wEhO8!BV3y4~%5Yohy>hgO1;g&ouDN zDyrwCVg7CfB+=vp{L@+^*GniQz&?>xPuLLCmv@?WM>{dr5>Z)FB5l3xq`~`YCFe^T z%Kacd@DdX&A-V+RmGcF@^?7tpMrx-cpYGkuFQ*04_A=Te%Oe6c5ZP|lI*74pOF<_! zt9Wa=)dISZb5P=6-1iaS;PqDAiD#fTMP^OJI`bm%gLT7Qj_#MDV+mTHuyPVhl~;5h zx4Qi{RNvYUiz)^6a0d1z-`LM23xaODO!2v+@T2u)j}ZX!5^64Tz2L$#$HC$_qUNSr z8QWB3{kn?TXq{N(2h-(=)isHz%!X>yrMzCr6ACw+aJ(H0f=yhn3Tvqcz{pBskX9g9 zetyt<;q7E=>0n>NI*?DQoF>(^#&^``gr!xR|CIp?JmOYx{^V zZS!AN@Bh-y@EUA6nVI487UZA4Pqeasuvjk*=Z)DMb2WZzog@(r|;B!)z z20=HJ^=&_3yzYeHnFO8xc7w++pd`c(tk?5K-gU7{ta;^gcBduWOiRnw7tVK$bQnJrB}3B{!#LpW0+AxXfI(%W zxS)?H^RPeV&J;W6D@Gsp#PN7)wmI&Th(}kCGG4t#9v{riImP{0KN>F_s8GJkQ<&Pt zOHbrh7b5ABB-!0&Zvgu$++a%~(Q6X-DK4!hYzox^#veJ_PNboDKR zm!#ESyQ8gVGr@1NYJEpDa_oKe$;PMW*TP-HoX+ReSJ!OalZSvX_Q#II^8(Zq!V<TXf&sAr_UWO4u8(yAI?F1LfQQa&VQf&WiW$al3H@7+y2eQdBuwJZ7GnYeXGs3cuI-B|$j6ad~{9o3Z_8+fd zQ%KZ>nlAn`m2cLzYA{sTsSIYpSf^M%o)t0eF1~1r=ck<_zhW?C*zL=qaQa&-k1@JT z#7vUvBMMqJjT=5W`26jVN9l2+O~6s6*{@H5L5!ID@)Cw9LZcZdXmg@WASnj5dNIwM zzj6h&N>3mFGeWAb{(+w&zlXi1QTFmdZdL9eH#rM~H{Gx3(N==4EqrhsH#VYt{8I_5Ky&z>#RxNB=mK3iCObV|F=+L}c=&%^|N!RstStAjJ( zPyW4>vV(z_2VZL)*8A=o8t?R>EsIoc4q#f=mX?gC)&f8H`%bZE#dX3=Nswj6?`b*u zey!ZVegX=UYdY;|)hdTu!>pWkdxrNjGy1+9=IekY>vfK~dnQHkmv^_95aet~#)jB= zqmt~QfpbE&w^S#Wfxt2OfbLp}w$!4Uf>S}@d)2QduNcz9!A@VRvru__HustJwp{*C z+;1a!JQAj^M|7HA*V-Scq3~#>Ndm9kG+*NlZAikh^72}5XP9)_)9Lvi8NTvgRF0I8 zO%8J*GgxYUt8#Hu5Ixkus1-+65nE7CxV2d*!ol^kspt*g64H*aA03$C_sv$r+)kMA zdZzR~&AjTzKv6r(w}CCTp^AUP+O@ji_-rW-oz;P_Q@;0XGt`1e5OU{CI^I4GY%0;j z%lz;dNF=b=P842@awe~%m%U8%92TkK@sng|Pgs~r&!xgLdG#PpcJz^&jit_)@nD7f z%R0>_L=0A1k*7$HE}f<(ViV$`td!J!Tqp9I(dI*Vm_hEZv`CU4iIEtu794gOnHSt| zqxL$av_AZ9zA=$!s(Ot8Q}^a&o!_?>8Cz3vML6)^#YGcD^5yX?yO$p74{%b#=Z$k) z4ak8hPW04cSvvXsm0FkA(j69hsatufM6b*wUZu@@KbMR!c1{(9&ybkW5l1|tNXa-G z`!(-*0p_{Tj~6KqdWMDIj6{@``HnlKG$dv+)+0eQ>IR>L{c)f1 z3gL}K-(O(%Oqxx_>2v*-qmU1zc{xW%6XsezbXh4mGsnz7(jlZ`uAd0B^1}?t~3zs9>Q~SyiwNldcC*EMPYg#+-C&r`;!!uiSZrLSi{G%7ZR%KPE`+cjOG2O zzamK#c+(zsYy|E-A7$r3VZ1`!?f+ixo`K`{o@_s7ugxF*7C8m;el=duAi5&(6m)xg zm_787y-&c*%creM6#*@9e86CWxbP{ecH(5P!Sf<9S9?!}T~v@D7~Gwv)y5~U3_gm) zl*=ecWI;C?#Z4iK4n}$n|9w*!m`01zJ1Kwv&_E6UrLRl+WC%D?(1Xhtx7CzGuTg^O za^9mV>^js8AvzO8HoQmWIdr`jx@!3PEnFY>Rp=qLk`qx%^lP3>iy!HrwcNTLDkqOm z%?o_g$O0NHFyMN@poL5HZw&vwX$!QebYN2P3UmoI>C>^6(gFj2Z$(H8Tpmf<`v!VV zGy&Yn)3)EH(DThwQ^T8MME8@R>*a8tj{Q2sAsr%q@@xda0(0r_5X;jApBu!IdtIi4D8}f=fP`F zdBGKSUa!1#XypLw@cQUTi zTH{SF-S}4vFbP(d{(h5D`jgl4vcPmOi?)|InUrB*WN&Ma1f0wp$0ac6uj#JQoczX{ z8Ho{N%zF8^aR*@D^dG~7Cqf)pc-FIZIt*z9%0(`VdT==l@I{A5aI9)X6s@VV6Rtbx!Z6q;^S+uLhr1&q7#MXO##i@FY zpxA;nWo@`eaNqtHdv6&Rb=SR*3W5^SAV`AT8Yk5)wl= zlp>OnA~Jx|0#edv5BKf;+|Td#KQGRU^Xi;;^6@+0*n91@*Iw7PuGLnL{r7B4fLAsK z9ox`5^DRsqHX)^ASZ}$0MI))EIx=Qm*lCIw;6^1=;XtVtT%%-R41O@iycu};R~{Lr z-W}GW7m*YTWz^HuYzSi0Qy#OozPmp(j;;POE5=|`;POm$a?tyj45p{-6_N&z@BLxz zde}y0Fn8YjapXN-PP0t{=zayBJ3LSBUk+?PFeNVA%0M0Tk3NQ9Sq;}0=m{x;m_*(Q zrFgnq;i|A0jwsNPleIB)D#XiYAYM5hDLPu{vV7xfYdJ956!LFop$%XefcDLurMefvr?7d zCc)no^V(l~?Bh;Wo6$^EKCC;XpJ7Lv5o7Re^k%&jK0tW6Xl1gmtRo@=#=)q}L|;CA z03Uo6A~y8wFYB7Xx+dRNYW4n@_s^(GpciKs#}w$`2~0mdT?VoykNtq_mLK|0@S`7L zV7WpSkU^J!cgkUvYx4=K=HL`j{`Ev6Fr<<)4a$H0oFKUEa87)V3ibW(kSd)WjD;^X z!O7hhGBdf$=!J@w-IQQIGx6Tak-cEMoO<+w`kA!8a!8)}1nzpBKP}0A5}jGT1&rl; z=Z>e=OJtca{Qa|dXi+8e{D2Q!w-aATvX@($9Aq!4%Pk!T4 zu1dQ$R7Px~!6JQTs|{1n^(FDCEG8gSxm6j>gKa>qX&)oK16cyq+bBonJLJB^p?Ki)`rO#5o6 zV!I#fyL}gKEUC%T1DcUhze|GzLt*5Cl|-yMQsW@YFPAPg>_M~hAy*Lv5$$3&QL?Il zQn3Wu0SnS)rDQO^oL^u=cX3(2m9}>?A1TNo*NQejvg-fTyFUSGQP_pIa{gohz-riO0 zOXmjG0J#jKzISoxS)Q_^U??8{^L`dCL_hUbZSKu?`gc++xlmI$ZGV{?`ZKy?nMR2# zC`k7!`tRNdGX!FbXZ0aily!F%-o7F6%m4EZq*O}JR?;vlB^JSEApCzR7QE@D<51>s{6=nA@Fa>XIkF+60U>T(5w=W&ET!>o*bZ2)^IF!&%h$#fAI zuRm&ei5hO+F&B4%_ZFYQ{Ldh*l3Cmh*k2oWze5$_*jX%nEpSd7HO~H3)Wv@|rfNQ8 z5u@%hyhnck`GdTlCAGadYOIGCCNp4f+E7kj6l^tWCvG4wjez`b!>%RFYNQ1m==1YTr&4-Z}>{J6@` zG*>{Ns)j;=@6sDwIu1c584SWx(a`i;s)DzkN4b`7XHcO6ggY11ZrcA!@~FA>rnPYB zFnwD5a>b(E0)J#@-bz~Xs%oDBh(>KIdw-T`SJx)`2LChC>He5F{N+00wNi%<8cIXJ z!y(XzZB_8^cl%$4eMGaV7E6ubu6#0_du-9`o+3^)Yb;5rR zk`7qqBYgO-18;pn80{a>yG9qo`*5hovIki^?EB`QCmX%NA~xp{ct=^k-Fr${fOHJ% zdt8~LKj~Tffgb;kg z^c;d8)dIJ88rw4^=`Q)7T_1fn9)Ha0lZ+uI1K6kKrh5nd`=)V`5}=3grT+v|tJ>*p zzALo{jP>x0}8Wj zmIm2>CQgeSqi517!UP2|Ie+K_)PCE#0R8S!A$LqF``_;A(sWp~s-m5%o~_PAAst%~ zeeIcv|2@6vL!SQ5qZ!nm8dxln6V~6#T8Kv>X4Z?lnEyPCF_;O9+G&Vha;r!&8``|#iMNB z4x>EYeKi!owo%(hk7^zYbb%ez<$g7hj)2@40{YN4BWKI6eD+1rWY17r$@PaQ(1Fks zwyJ7Q&FfvJO7Q)#$~BOi7N~HdCf>GtFE)ozzZ2@%4&w_&zcva~uiuCjzrKl#KPx3n6a<~Q2Xr?=%WSX5>#jnkkwRM zZ=6%a^i=JHn2*s>hfR9<+J24OH?3S>(DHi3v|IQ{fl-m;hToDs4&0}0-3*061d}QK z=9m-K*(BkvmV3Iy9uWiG%O|NrR-EC7?Q=e_`1&tdG6tI8OOc#9uRf7~_uhCE=cS;FCHFBY}|NrT$K7F~U>W=+GJcQv^p zWugP>-PKx=DaZT-iIe)d$=mrrS{qR6&Z;tckAikHztI+`&~AaZ>&o5=Sgvm2*A@?9 zhLS-L}-lZ67$uq*m>U%s!-CLomkuM+yuvOrkW=>61}lFU+%cL z$55HI&OgV;%#T$fvi6_zT@HJT_q|K1>0GuSc8uTkplh{UkzVUiFg$>7~ih|=#E5;xRwBFQv zJ_?WcsO5JL-`vk)I%orV80GVb$o|Z^y<)1K8KANfK{{P;uWQucps%)n3X1aS!)8QC z_`1pML!TafG)Ich?Zm(4^orzTHhJ6nbu<6>45)Xu%2gqQ7Cx2QwAYNOL;-4{xR+g( z(Cy$N)jY2D_f7XheZ5pc4(85Kl(Fig#aE9y=Yh`(?!od1rPLS)vLH95g_UjMl4XkBM+OWdU$XCbWiZ_uIJMkUR!r@o9#GgfQJEObZqoJT---2OY%u9iw@!D0!f;J2LxM( zVAcdiuXglF{1CXh`OMZfn@n3e=R3Fgk;)YK`h^*e<9l|o3T?TW!mjb_4Hi53lx=q0 zt*VpfOYG87Rx5ahP50G;7op9I88g2ZC_AWr`c1a>a!i)$$>tCR_wImMk}_R4o{U~5 zds#s*-26fPro4i0iPX^t&0fzB{m*XO#~=*%=)|iNJWWpM_M||Qi=2_Uh-uxgsiRGj z8H0IvIFdmMVv6L$TaF9$MAzD}*b`?Ahem zE;>8ux6$2aX|p}LZ*4OEa7$Ih<->EIJ^dk^V6_d1@qS(_W0KWgMcnf5Jivkvw8U~Z zR>qassOh>H%Ge_YhH@ji52ODb(NiaAL28X`c8b@`nRS z>Y8`ic5j5#-fJV`kn>oUQe6|yg|N6JOmu;Tu(vB}d5_3F7jL=~uT=`jWEAkTM2CN^ zds2BEU$~S1x=-q$R@;pod*`D_aV{v#IZyf}+gpTj!we7A`dmh= zsI6IcpWg=oGW0`~ypiMrNkZhha8f5yxm!R2v=%@L<{k~mh+z}%0WTZp2I@1y*3Cdg z1918K_xnklQKz0i$#S+sdl zjr*u^hA)+=6lM?8C6qA}J2_DDtkI<;b!T!8ZcXKV;D#SL-lt1yI=Up~b;>SldK+Zj za;G)QheT#CxiwNiaqjDUSUc=Q^=kKFAO^jw$Jik4w3zFjnP#06~Qj_IG+kqWHU z+^Db>rtbtyp*W0qeV$1wy#O1$wl7#Tj7niKBG&#eEkFAiU!t&9B_PVE=s9{|3H>CK zKrDVc7D@ zp)cqcq{y^)>`-6-tOL2PGDjwG+xgyprvI<4Rl1_&QyR{aTrl+uFm{t_sVgH!p?pKV ztxMp@e3~Fr?qydS=TZg6^3G7Lab0?^QIo_&OqMW1kNikICbw&JA{KJBNn_dFGy2sY zFIT*DKj(cYb74V9olFn$M&lH=7u}tUOeVi|YtMDx*7DP6;7`xo+Azbxb~}-EIr7Nk zLau;A!YZt`eNQ4C&vzZXH;oEuRV9fy~d}%yBHR_u}B^&wNuXbZ*MmFu*k`I-=ptne?MzGJM$Li3Vcg^`r%=1%z_qQsj%w^R+m2< zu^Hd3)WTe@n+v;-E1H;?6^Kx9ej}~|Bl*CrbFKAQn^LWCF3|v&a)MObqA*BH4l5Yv zq`NR~o7&)K2X9s~m0V~Yo7?cUJ~^y0F_!2jH==X&a;^0!6r5guz`(*E;h%U{WUYhJ zw~Y!61d&oKXI7*)Uas5cNt^F@d^*%5<&okux7+wBz9=ES%(!NVC_A2CYwNJzG=Pk% z?q!rxI5LRBE|m3Ik3YfURT|W`ND?)f!z%Un!13T*5j`X(A?P7_c*nq&q>?&0Y#^DH zUk`R0BV;$#lNHs|T!v>=?qJlGv&S>~*2OdPiMC3$Lkwptiug3q%Q$e^gG^>y(MD|w=>A%i^0N^_?I|F^zUppf-ta;t`><3wKINKg1ycvpx%EHYQ5jxSViOufJ&}}M%4NLM0ZDb7TkFI=d?{|7;5yDQJV|hV)oQTmG zW-OSot#I|%&qE9pJJgC6pFg$`tx>Mm7~w0fv1wQ7&09?g7QWZ<{$-iIU4HWllh}tA zuKq=`?>3Um-;WajZ>TBNAuDo{@kD)nBfKc`M=b6)jDsKLiocC^MN4f0;*|!P;mb-E z&19uFzvQLIWpck|a1S%|e^Lck;|zC6nx%eCc~iwg&ppl|7;D0Rsd|?xJn2;+9`Ag; zet}6KJ{&D}SG#dG6m(Gcni*GHDcabu2a+*hpR%2QLs*R?*<7;R5s^eyeN&4{@yY=w zqI^A63KTL57n2CpE-Zs(yS_|(5JJfO7A@fu%c^}hyF>Tkq2*y!@iZaVd=d*@1mIFa z$yF?~>DOPcS*v}HmtLgz zD+PftdU08BV}i$IzMtF8IlKW>-#opV(&OOtoOFr%4^l*It4K8X6EX1!Yb-s^N+}6T z#l{1RsD<>yDvI?7L$;ZnowZ)=>y5v#GmoYs)wj~*7@fGLe?OEDo5(0c&L+&RAJi@h zYrVfgh9A;fDOT-`ij|S8wi0(d`tF*wXPmxRoz`@+&IO{i^85X~i80*FUZ#?~h+IotB~UI}zTS3&w*D<1dJ3)0?Mt6H z*+3bC<*~qSPb-$?*B=OaKfiM)Gw7f9*cPM>U z3etz)x_3Z78R?$|sRsl3-D1dR`2^$=mPC-5b-@LBX^yPmntTf^drzmrta1|_cT(B# zboiFC(8m4%C%g2@Oe3Tq7hiGq}X#0Yl7~lTLiXj_-=y8yb=Y8 z?*6@*d0RboRYJPD6{qI|vM1MR-DJGpKLv>dMO)#hI*<%_VwwBeJ&aPDN@r&(G|dWQ zFXjM7NUOrtn0TyjRwCFhcT5hQO~VT<9I9=PEPoa$aQvy=o}69wMq$CZQ(3f`)u1m> zUxV?;=XPs;L&o@%+FJ`+7-^>jm);~1)48ySmyt8osYa|S(VSR;hPZXbj=z0P0I6{M zu@#0pWUUiQvNc|62Eprr)l`$TF4K-ul6Z_CvgltgYO&^$(KX8i!7EB3gWvI*XbroN z3Q%|znj#^QEGI6;O+v>K?qiPzl^4S!RfQt+e%vbe4;X2r;aD~*1U|$O^RbKiV+uU@ zhu@FP&Avtsa)nP%NoZrn;KWA5ipboVNif*6-0si*rYyOI^5{7E%Ewy+7q<|ZEL5q@ zB^6x$5c~zmN*6@3+&3`e>)jBi*-gqx)fslX=DyTI3YAc8Gp2boaR^G8SFq~h!}s96 z#85eX;yNb>RWA%Sa!W>s&}8Y-4BDYyA7Yo1+@RyKynm%5vqOZPt@+&ves@MCqZGgpn&W!XzFrZ`nRmwmo^lEYm;5qTI0dT6Z!9o`Py=YBq@=y zD&4zM#T-u-`U2ZQW}~km9nsgkm^uNk6-smjryH7%MgYlN!#K?Q^z7aOdDx`Q>6>by z9)A2}beiiGNO-?0#5Da;6`u20zG(z&EaP^*V&OZ8^MK?kjHhUQ`CZT$+fo-7J@4(D z1AWXfwio4|f+WaL#lZVEGiMJ~4s~Jbmi4n7zIZR%!mv_tgTc&gp#b0eMx_9YIvmOo z9C>B^pyNw))3=#0+qXCa!YNj zH9RkW{k~>%b;FUt_8*ShJ-~=x^N!cwvbGu%l zfcj?jihZ=3ybP)Tq;!=maLM@BUJAqT;1U|swzoF>6~0IP^Pd3g2z{mv{}Bx4Ly&s? zMuC|%>X*&GvZ9Y zEa61i$O@La_TeyIa+4W^ucuM1mAqorOzb}?X%IE05(eKfX~atXHcy1cHcAa2t&l_M zrrdQ7BIy0>?(zSP2L1&#q^K}HdR}GLDg=q}VChc9W4-_D$)p$Kpmt^C-^97p+ra=d$L6AW18C=MD$?Xo zGNWhUQCEik>naGw7(pD&+iL#j*Ku40n#8}p8$Aa`y;y3(r{CtOcO2|jI6n=?p^P#2 z?*8Y?gCA)!7SXH;nwS ze@an3%v6UsSuRy?Is6ZLBI~>u{DQC*SrK+*qR@_gkMCda#gQaqyzIU5I^tTRCD*RBZ3xq_R(diO^1zkuUp8Bz@UQJ=-$xax~FG8oRt zHjtOIFWj$7o$YVqBd69lEtRFbKy2~5Ine!b)fG-?Fvtr zA->&JUS+=Q{0f?!M&`fF1MN6Nje1JPSkHQH5RoktCGXs`+Rkh!ehKeGO39j)ZZ zH2l8G8FBM;Eyvy1MQ05Qfpol)`6lI*s^(f&#&43(t+*w5NnjRmNVjO zT~W^{{x~PNJGbhG1dNI&mI`3&xF{MDJ|rEezP3-i+8$2XyK*(!`n2noDDYiyXQ{fs}HK-BsN|Cxbw0*te?peAm_F{-7~SRdBFpdoGg?{}$V%DWgP@(DAP zHciv7DE|0=bHQQKn20)Lpq?|#5dE@8sQ-GE|7Q;*{jUMJd05K?*cArSk2E=F8@6xltwtbI$0784dE2|W z7ZM1NZpQx4T-eC>lM*ZbJ91 zMcHfm8#ld|=OyhhE0|;NJqCDaAZ+CU19gWp8fETl{ys2B_qfiUXJ&hS!4jx3eRczS zIH03}2$KbzepgZE8@xk+^JNv1d#qRm@kdALsjKrBa^~ytLi9ABL68RIYe4Q&|bl!=ol58x38dT`xqQ9>Ch<7Hc+_| z0br>suJ}gHgMf23lzw0u@lNC#8ctSc6WPrLOd%CAGPO>DT7EkZmmk}BThH3fpI+1A zLc2iqmkx}tV9^v%^OKp$e79o&<s+pP^+UIA z<+fGoFQ7z(BO)^d>lM5vFAkqx zP_o=(0KJcp#p;<0WtDY|enL*4M$Lij)ryw$R1K3@a@JE2F@|4)LjVEuHZ8fwA+}p# ze#V73`s7Loq?9~<_Zzm8PL`s1652KMP4RU?T%vA;hmr@i332O_zgr#q8aWsAA7$$F zuk@)xeaX&ftlyw%JgLIs%CYVJ0fGi$OU0XbzJ8bt^g6XsH6n+-y1$sYL z8`szU4XTVOoH>O*+0(KM*~S?%AY&pw5xBU8xB(&izCE4ujv#(swXAJVxo*(OQ*v0U zk52k$7euZL{r%S`cHD>qyHe4JF`=1fpYZqkF^!nU=gyuliU)qvx^-fpNYk3S^ZV?` zc8<{iwDT&brmxB=oBpPhB5WjBTU$IQ*mfxksE=`H`ONtSZ(SLZRp^uiD14Pu)%j-g z=*w#IpRhjh-Ub`M&p9+0QfKHrg2p8~7-ipR1^dKz zQU9QmFWC;a@{lc#1uAQO;7fv z7y#ynEtgyy1Yt?kE9b0(*?n5MW;8rKbwTGdD^|NC*+UK-7r(L0UPV!m zB)4Ls!V}~VmieOIr$Yi2shMIZbK?(J-Jhph$^v3>Y!YCeVlK?y4hbzrtS~pEAKs1= z?XkH;+Yd+><;>QflQHc8DuU_&awkJAIhx7V+TZRrjY7@51UbzdDg5 z)1vg_VB?d+0;^p$^T9`8P#w%BYH|qJ0 zK$CEcGSH;&4B9{f#nJtleA2$X5x{=bC9BQAra|R1=OagJ4-YU|zrH$*2thnmi(NuQ zsBe`?Z-oOVE=+s9LFZp?Tp_11Yg#}1n%o!*{Z8bH5lU-WE;77``h3GyI!3UtP(^@6 z^Gn%tszKU891WxBIn!TBX|ZMck^;L!$LdN;!W!zHSnxYs`rPX2?eaZ+eT@tpl%KP-Y2qL~M zzg;8%hx=6G@#V__(X6ue*7{&O+0q(?L`(?c#2(Ye6!nrepq(;=jV1#KFTCup+M}~n zB0nlOY+FM*2HSP!MAnQgvyvXA4htn?S4MiHtzdz|KL z;;siP>$A67zF>6O{j{Iqg2Q8bQABfp)8wyE;_=_11Rs4?=`fmb(3HwaCs(TU(sGl8 z@ao_PMkXOO7^;vFmY&Earkha6l;pN@^gU;yu<*^)K)E6J%Oa`U?4;LgK>lf>jKu8q zGn}Ej%xA%p4+1PN5~9QMA2c<D|FHP0;G%o6NFEk=jJI+p%J%NbTM&MX9R52Wq<;a#jY_ zrp+tbI{wY;-r+?bgyPJG!{WS$7SYM(CQFZq?fD3SSnDNnyPCDgf>NH%3c8lCx# zWG+0OE8#F9vBcQLicD~i(8zU;44W|p%Te3e(#wjQX+gTPXnMmKBv=W9QUOoVh{>@R`^yWKVi z7DTVFd88c^wPmNc0UtqbPV)Psnk{8NqKKkip*Hh#1Ow)A(3xA&_`joo`t1eOQO|Li zW*}E_VUah1K1T|FA5%ritNJ_D0!r@-NB2amLne6iv)h-jO3GarPNw?82kSlP;;B!B z1X%pD*G+Am>Q+o(-@PKq*|KQ5mBc_v4DH8{$us(Q?!91+7a0v#Qih@r0$Coh{C6foVBN#;a=U4&f}VUO%lA7vc!*O?RpIyMwUm&W9?<7(3kO(U9nI zudWmYKfod6!-M8?c%#N2pGMkjSG#?ay^lHf;a&AFjSHdjZT#ohwV%Y5%td=CpdW6A zM{R&LFyWG9pyVZeSb8Wd?*~euuaE^l{;R}!MSTSsh@Tw1hO;-8V zDv%H}eA;q!-sq}9)sM-g&@({fNN`kAT2)4J7tsqWYOD?C5jgnf)N8zU{;>BD;B9h` z=SXf2q}R<=`V~f7fualZdt}+PoH`wA*f`~r38YYUdz1PHzApL9S(!Sq3KYCl?4S7d z>&gOr*OozfX$3)w=T_$KopQmxU`1L+or=|P&bSmiJbB$~)3%vA)@@Wzdi~y6j*}#Z zWaqR}Z?~4%x^TYUg3z{$3=;)w93}sdfmN*NkJ|u zt*)Za(m9*F?`_W@_XhiSbz-VdjLEE(Hc3q5w+bPoW4alkpLvp-hNJuSIEK|hIogOS zOZ5>YIf(p2CCYN#nm#)Y`1f2IrjZnQA( zc@6jaC_x!VdcHDe968eq9X^nnIR4Gllo9>@Gu@#Bji{!SD(dBN-;4osPvMRM3tVlK zpQ)Rgz55Q5n@+B`H0!8<;moe$vk7y&X_pbx=km!H6Yq_c*tCI#9N&a4I#o-l!S*4d zFRgWR1hde`hzad~?LX1vP!TQi%BD}wE@YHFlas1?uM1xpj~*zAT0Ty(PT$TKW9rD` zgmv$16EC8C>k%Z5KjQwFxVdY=>S=MLp#z-jrtV(3@ux(ttjZXhY;yaY zfVwABmJx1%XxzI=a;?g@&^g}*9E6ChWXYq480SPr3vt3kVZva)-oJwwZ2>f_q>$c5 zxT{S(oo_tS@urYgxp@^x8#U?>u##yeBDpD=EZBUsMxM(8`jMn#`nF2P`}LwU-Awy4 z-1TrB6%u(f%XYpAsK^6Q!Oy*H#~xfQ#L)3yQ*yFyTv5-)RUZ-g{nSQs zOA2cIt^rIWaxH<{_FhM=a$V!a`!C-DewtEp;i=HV^Lxrp$(Uq|g&J|xi|(Uzw#^BY zTg04h`_b-&XGwnjM&DL7e4-P?-b;On{YFvk7M3n%Ofl-68tLCZYuHy{%x^V(JYcyv z!dh4i*c7aU{)6LhoAVfO1Sgt**#gGILE0_}RL@5qu|rtQXa^Mz2Li-W5uXw$r9?GamRoMb*!wuZ<+rXWt!^Dn+fZ>E=Ua zSW9FS$9yJoeKbMdz`wYx=ieJm;r7DoicH$N$(xQOM#S-5Y0*yeR~GeNxF|)f>fL;` zp3EX=|M=X(w(K)x6|xZ#ay5-B0`+WU4$bnOybr=NG`%3_9lfB4BJFsCG5KI8lNE-9 zy^K@gfbk7N9e+1BfCK+F(#vgC_8RB-VFoLTPCzS>T8Cnhr1rNa-2GWhkn&~Y>i7~Z z(U-o`7GWyPi#XQ+>!yDF>NghFMG6S24)Qolgj;-6hTdBus>iv`y?kw&a&Ir0j{A>9 zrg22ib0diqy(M7qS520!*CsR)-za!X69)DAglu6DO6K1_a{`-=iAGTR_5w}UT+rSb zI9TD2Ln@+}?1!i3&=jq(ty{=Y*PA1T@vMmQg;ZC3k{Q<2S63{}=)dKzy)F54Sni4n ztI3DQGL7hQX(jddAJRUqG+VT#wV|yk^Es=@3ZR@*ICNe)@gCAcg{3WzD0Qa6Nk)dr zQU%4K45LtDNEz28BQZ`1x;>XWUrzlI=W>w}{>3M1-fkL=YnT@3avp1w>Yl_O7QH9n zG?b9Jl;ooHaAR$TXCho6vwy&$g|P3AsHv%HxxQq^gL|=Og|KV=)nwzy9rWR10n?Y3 zN9erLB~>h5ehCN8Dw6T|OU0pc#v_K31kR*x2&eI9uRy>SUbQl(&r%dAF89X4Nat6G z^q5#LIxJL%jg+jxic}oR*613M;Z>EAOLh+M&`84u4@P1*jw|4^Tl-ksX;(-2^0n6rJ>sf-2 zIq9A@7pX^$?7c7ev3`?dGA+R#V`)#GNShCyf7~cNKO8K20|7a9-@miLFFWHO?hys9 zJQ#WXfTyV6bmJp(U@K4OhfLSQcfRjd`;J(CrQnHu<_<*7Z**LoyEoFahpvzdR(K~q zyV@y6I`MFrvYI7Yo+0vmt&p=8HJ!$-$(jq|x*3tDBC4}QvQuoMFK<|L;=t=4Cp^co zZ_`gc69%hMF>CAqmpzMEUW|u|tefXiz3v|K*9v8hqTMO(Xm>Wh7c;Ca&)uq0Ke;-< zu~67`Dg6*T>ox5I(VxGIcFHvJk|TwZxrE$=;GboPCrJ6w`WKefh>4r`|s{^ftQVcr-HNTGDjpnlyP(gUEroc>FP z&o!Y&MsEXXgAbNZ7?m)ykSpIZxZ{RI?Rel0UymktoQuL_r!j}T;*vkd52^J zoCn%v@ODq$gD1qP|L-T1f6sa)`V~+WZgSZs_J?p-Z>H6iUtq^gF^?xhe9%&$rZkGi z#E#QzoO)!0acQa+)i@Sgg{RszA>;}P({-$Qrf3cGere58k)6|JsdKKIbMQG!_&8hLG(D{_Asb8 zM6tApmj<+5DS5yc81nlIZtf~S!y)f+&?4za>5j`_jE3_MS}`t56M~1K#TgrRMm}Zn z&M)vHozSI~HS^iHdmkO;;r;yQw3&n@q*JQDf;Z~1dKqOJWbtaX&XUg7OIcR{yy;}v z>8BH2wgD7<$J&~GJ~HQRrG=G^D;K@dhds(rExVQ!rHC5G=J*0Y&PxE+!UbpLWM7#4 z@`4$?|F2x2lpn+LhYRq}n52sZqtmIVheLh_zX3Kt3q2l93s*H;)2StL;ynS0dPz`! zMT86LuPE5pCT*p5U)2De{7`g}0t(35AILf1NsB=znL|&$U7TMgu3?}=;$|bdg@Ipv zF*cX}2y~zn$?)fFO|&a63E1NU-tXRS^wU zHLp^G>o?75Py$u9)K7AGH`_k#f*isHA)nt!4CdAcbkz|@uOur!A2?PW(?tC%Iw;s) z!%T5jdx=(7K!Q%lq(GoNx90Ju47bD>Q`Wf);f|wE_v`ax>|&9sD2RR+{qAG48Gjm- zyw6x`6Ohi2D3RQ>$KP5=PgqzbT|vj`%Sx2gJpTeN@Zh{M)4iW@OhLuDK*Nh$2f7-f zx6#f3|M2H1Ns)FKK-wL`zb+_%`+{gXkRj@R$%<&LhAyj9K300j-^_57Ij?Q@~cKw&$SIguHncB(38rE7qIJ%L3d^ zXP2ekmUf^k29TiXceDP})f*be;N>@Axjyxas8h}r;j zC)!Z1-LPf!W&S5xAWL;wWMTxEzFJ2Brvtr(EhFskw|s~Ekw9>p0SyUk$J$D;#PGKIW4f?=}T!&kvU zl|JwMf=%^*_})eV2qq5o&AnW|wK7!aRiJ^$cQM3#0cvF`cD?~x&}+~ZKWsEhmG{vu4qV;dya;di2KiZY2*+mNe|+2}_+ z;r?H_X+ckQ?O69=whN8=sN6Py^O{FO$HNTAeQm=p(A9iRYx?k(?G5`j{4*B1Bs)A3 z%q)v6TV`3Dx=T0>;F`P4Od(eosqbf^y-&gKJXdt?+FA}^wthMri=gYxP8iNrD}vhl z-C+e>cu`6EEYG{HMZPB*Qa4BQ>N%E3)4W{e=nqRYIrHV7x(}@K$3l+s}+7 z2^5>5yg}(K89J|t?~H%$`V5x_1ukK?{++&0ek-0%WW8N3`L1uh!beAdw=pc5()OKR)x(2A%8aQlg8&!XrREW34Ng zi5SWv8A9p^a((9GV8&AV*-Y{}^btXT0Pg%W>IWL~prO`AHnX1*yK7U#n#m(Z&rl{5 zZL1G;kGaa8E#mRXap#vg#K<)ePss73zaau$)P!$z0g6GoBkx$J%iTD8UFTqE_Bh3WJNnG56t7R_X*(gVG$AvE=1=p229|~vc8iQuUuB9 z5i@-E!XRxz<911_{kJmNAvtp+?)^7uZ9GD`c>Fa#;vf$z`WyBdmdG^Ot{<2b@D)=6=QPWw2|8ZLA z7b?D=_kko=4o2W%kZduJDn%%EU%O5>(j}JTz-0+}#DBe)74&1!y^YkvsP)r~man1T z5PIAEiFxE+rFmh?j%guHl8d3Fk|Yb>8^A;9d+Oq`2C|*9iFw64gA`CvBwX3(^K=~P zu5+ki+X6GlZ$?@&XORC)H$=Vc&9-xKNx8EuxkHOve;Y)4siwQ$=rA1eMM0wAzpO8r zMJj4rg@muaT{rd_hmOaTvh32gQWeG8>l0GrlXyjXqF5t&Deo+w&EUN9Gb!W`0;&Vi zw^7P;g?z2xC}41s1tB1HtU}jAgrXPb&Jo9vhP*_2A@dFA<4l$So$i=?y4g%`MfJsO zZjjlGX;9W{N9+~xwNXSr#N%tyC3vCN9%Hs)C9t4`!l!x>ud+jqms>Naff~2zIXA_; zK)-(O?f)I9a$#lA?pPwZ&4e9L(vL>A0B(E3bWz`|FQhad2Me2BNtyL~JPU4w;E;r1 z0I0$ngtm?;ypMgZUKqOOv3Ez<*~Q3TZ{)!Z#!j}V;UZg`xN(qoEbGLUzvVFrE3#r6 z-?`N-7-cn4P0%8ZjKE=g>ha3^){e^A$p_7#H>KVF)M=JyG>^A9Hz{PLc#gX>?(~0U z2>OkxombTrb4lihjJt8=%DCRW2I36?i#T70OZy1FM6uE5)l2)}^ca=b?A<`fxDLpK5V2KO@FYxySxo79x5O2tXeWRJ_RRP~Y-SBf4j7 z>|)s;f3))f(SS?&iK#&PwUPa1OK&`MIte-0wz_qFWT1)o+o93A)X5 zM#>mdfx=LS5V-_mo0L$+EGF30Vny5wWq;RNc8d7@I2W&iu=)%%GMY+cTSdU>zkw^X zrgWm1s1e0K4UVactK(;x06lf&At$#VGoNv+OE7GiL_^|DY12U`o0Pd^Op0Xz%zCsy z^vjc2SkDB3q^KqyQ*ibF@};+tgZDl}G4x&^PpF~aAetLd%RsL5+BFsIYHVd{M$@}tKu-6Jf&WC3uTjl*yp(7g;D@1^{6Y;P2xz~uCJ zPL@4^q3>!MH>@?0;jKX)KJQBFghhmm*f-ImXVvM+V4e`rt|BHudaqKE@2KQsCI0GW zmlCK+ylZb_&%mC4uaJ2`O_wyg^yc7Ckb5KJ9v>Hmy0F;52GPmAooNv?o4|-UTj|>R zE0|P}LWV^kU+u9B3egpYq^3_ zZAUlkk}*kfm?tcYT!l^)-Cgtb%UtzhYz9ha-=9C;aDB-Fs_{xk&qyrmdujpkEF2x4 z2$Dd@`kDv&Rmh7|TlxNXqfR?9UAEgk17OJwpZmf+Q;uWp;A`Zp>K~iZx%HxW8y2F{lU-uuikq)d{*ZH686PFQh7V zX5(ZW4FZ5SBQq2-4HmGTsB$t|9V_4LiQ}}adeq1D>-XWpwm=fF_H79Inh?qKZOIwv~E67oBGqo;&2D$&x z0-1HIu%Aio=)Bw3^yM%6VB$wG?MVNmu3i0|Z|jg~{-TNKuOR@gfIMKK00sO5>m(Fs zTS!mZkP;!ovl^@~GZkokZQg|%%R%w2@+r+oZ8m6xR{(O<(JHXAIC*HY+WMiz^V$3P z)I6ks4}Fw|^1)K24hTPUK}oh-ijZzRkjVFpHwP;F5Wg_SUpRZp2AI((90mN}M*Mdq z{?#Ug7Z59~MzP0ri;Y{pR5;Xim2u>Rbo1L>m^SMx;qFQ|UP@Ki!_dcnOmRmP%8eIK z05S*~u3}nU7jgkux84xQ8Ho56zd0Ly2VY9TiYcn~+Q3Q~|5)i4)XQT1Aj^1D9Mn!; zEswrLvSY3A!V(-6#ftvS1-tMBnAais&yBkteo@GR_7%Ho>3`s6AB&dl*oM}f+?$_gPd9ucOrilR7(`-Trs~Uc-yIti z`;-jP)?(1T{@$11>kSleXbo@$-}B(9fy6SshJK!_G=Hu8S{y>!rH5s?`_miO;2pvF z*%7%v<1ObStS|t0$*R7Ma1!t%`Jdy;{%2fS@ep{GSkm-Vh_2g0EWs4cpABmNYXcYn z>H(lZabv77DrnQmN4ePDb4F)S&)D(#eQ$>EwSM z$!u&w++FrD&OwR|v;Vbci9k^JVVUL9K`pf1|LX{;&^`!(8qMcv3#r}zYsda?w%5gz zGQe&21_s*f96MNWHgVWxby1T5eD9&SAxQUZK9#yZKLW`EKg)V%ppH?ENS1kFxOs34 zL$|_xyL_EW<^MD4^@B+TBHDP(3L_s>Fq z%6xbUp{VAj&b)j;G`MTvfo<^B`Qm_Y{dvI8BiTX%Ct!%dtcMHxAui!GWZrT{u6&z) zVcH?^*SGTJGgKXX$~UO@%v%=J`bn|8jN&Lloaltiq*+?@^wq`m0NrNt=Lg!rPprX#Q&}-wFS>j|(9+7< zWSdQsNbfvZWHL><&}0W=i;I#jZA{}Q&%1)*{`cb{S%(ysVsn3-!pmM|OpO6EXV=m$ zjOBv<*xLUoUc#NI%Jx6}N8vaoXF=CzFMn=paM*(4tVKYwWi_$VAAML3>hK}g55-UZ z^TD1^{e7P!)LvAjSo~&9?#KiBxU#V)Fd2`jg77Q7n`scDXte z3||2+={h{Y^N`sgHh=9DO7y$W;3OjhZmB_&7t*bf7OKzUBm%<_jAH|2sCeC1Ovq3n zS^Bx+^}1tUppLjX4T8<~QqFwRe#wAY>jQyD?#I7}$Xr0LR>vDCwUR?f2O|)5w07ce z-50@deZaH*nWL@h?3YBb;1H)?aspxPZoX|u59^|r$2ufn4CUYdh!b0FfxNsv?Cb0; z0+0j^^M>;-#aUdHU_ibO=SL5~7!aM;0n#da2|q~0)i;3)+{_0SprzXwnY5E0;jB{b z2bF`b0SR2N@&5yK@b|nM9a4gUa;H4p8ED15iQ*JduptK-APP2*5u#Nu=-D zI+w{cJ7FH-IPX8RawYYXcaGl8ct9{$=g#Dc6 z35chnVm$qHzfsY1EAq$lp{eAf7Bx4+g!qDwLKpxGu<^RGCWCIerP?3)IaZ_dh?_s1 z_zGnKQEn#NF|a_a8+<=fHXzW_bgg6qoIFhVq8`wr6_M)vN`>rf7IAJcJ3sfBoTK4J zA*dc{!LL)>iKwdsYQV^Vr+KWdcWdFesl{oUGk6#R%!L9OSO`nfijoVl_w4)p>$cX= z_(Op_>j&enEreku^vx$v_z#>Xfw5sIKJ8GM5gX;fi2_+kt^0rF@D^2J9soNL)VK&K zi6&$lo1H^s;rBpQoAeCGefvT{)855qBS=uV3Bo!ZY&)ipu79VliQ%nV_gE(Z^h^?O zNG=!7)jjzxg1Z7p<#vvaMomoHMX`Hdi{(OlilnYmVivnVie*IB39utakh>My$EWI% zrJMs$7g=m}} zdpy=~s5p3gb9eFp#>PXqY_pEylHbjm`Vod+-cW2xeK~vz>KJ6=+j9MykbpKagkovx z1VmgxG1M`RIAx7Z-U-*2xrAMCMk9Tdh7|6o-tMpy&@qN`>mN8d&a!U z7u^U%EObp}l6hNHggy;y2e?pOIB zL@V{?59};S!H1weDmHeGV3Gq!XM1t9fyDLtj2x|kf}Y_W?rEQt7guzz@eZ9Rey_CM8o$y!_Pfv{dKt=J5bhd|lc3}F%H`3XYdH=gB z)T38I&moJaJA$a7EXeg$cZ{+M!R*3iy0@vlUe_9a~;+efwkzGybCaKecSv z;3Q`%I(aO{C)ZqKD?PN zq5bW{l;?5FJ2c&ZMsyjEBao-NNfjf|Uxb@WI_;APRkBF{;5GY+PRP=E92EFau-;d( zO7}PpX_>e9(=Xm96;p>quqCqy79x@dRwr;ew%BK^lLix2 zbnvw-5teJsiq~Vtx7~Eng+bg#Dy^5HKxC%QI^C~CpC#XN?5u5$h8^w z0TCI>DH}@fT_@iUQ^`i_4^F3~GrphBAM4(eI1QB+@MY^h%rtZxp%D$U#h5~(92_B= z5)pg6tD|=FN^6WQ$@*S_tK0ron6_jflbKE8!*LAmUxG^-q9l$rgT z>x7GqAJ);V0=|{(g-;=wnj21;GH>LOmLc`*SP9@*a!6mf?Q{mLT*>{-!Rsxw_B|9EU73yY6}YaaT!^3Z~sX zvrBDK7;5ZV?X>;BZg%1qH?zc+(FC2(6X1%AwzhMVU5k@KEM498mkd1P6c^1|o#zu$ zn{HvTFxxb0D|7vKhpQ`kse3v=zTl*BBl^?Bp}-wtx>1)-EmeM0DW| zr-Hm*N~ZbD8-c}$;5G&kZTV^xd%K66K&e=_M(a$h0}Mc4R?ij#W`QetBTJj0XdWgO=(Xj}jqJNjp@yhHo&z8`jxPE`F9kP*Ee_O8vEQ8ZQ z<9#Du>icnkd%0GZxhQayOmkCtY$4^YXq|}|cmF>9DD}PwVML!_mr3%XPHX+P{EXCI z6R(AQKk(%i4kn1pXzuc!y3g8KVJSh{09U5&(eOwFWPkOy#6vTFu}-GoyG^NzPwDW+!Q^HeBb(|wYJmTt7LtI zx&VR}6iJZYy6MI=>UyNt=pu-xl6|J1J4x$N4O2g0S7PtTj0vuS_`XxwstvTKb+fnJ z7mDn5(Pqy*6UyvGhln%QtA=@>(Z1VNAZHH$e%k_@a9MOFj5Pp34bq^f3#s2W(GtM`A#V9TdRh?je z!lG|dM(<}if@)~kTX#AE6qJvrgJObBD&6Oy=?4xQ5t1iL?_YR+OP@`jMppiaCL(c< zH>WWu8B1iAoYHs1FogI9`YCzNn;}#5K+@o*+BhL~m#Sl2efmL633`NAlngws$Z4QB zht0IoD;`7jyItuRuH8zFxqP>Hti_1z@OkX66Vf8yym#M}+OMig0fm-YoZwNI&v{zw zGQdl^Ifm!lFfPAz2>}&}QM{|@Eu<4~@9v<&y4;9Li`465BI@mT%#9~6z1M}dO@28% zRf%Wv!|4l4HQ2QKwvA!wu45DK;q;`O@&?+j%uDMw+mQp+wuCx$AB`L{-$*qN7ha47 z0zZOdk+12-gaGFAWZqYvi<(4{GDu1>&ar_vvxCfp!wSDnK8GU0d1O13J)-3ql@K8n zO$H_|GRLG2jTKK0R+1_wmShs!?08GEh$JVTYYaJ=;{*h4{1NUjl_Ll=$ro?+%Wzs9 zWm2=rm?2iYt`iTw>_M&b!s)XdB1OWoNyC0-RT6;??Uoc`-sAg=@j;ronK0vXISIol zC!>48Sd__A7VcCk101^~BF!$L>wJyD=Au&S9P*#$8?HfE4aB;A()84!^K`D{qq)Y zJu*$Y)j1<;aMUL{{`p{rbyo9LcT?~yZ zF4U0{XF14-@zxl`nvN4ui&7?Y=#UP3p%a4m4Hsi+_Pxo-+yUm`;XsxU9xuDlpg|+O z4|1_GfrdqE@>#Hq1Ig{0K3F=kn&4^)0KE>gW|$M>0s#X<(wIaKNVT1cPp!J~@;3rF zZ2bo^IiIo{a?R0E!eciztb311fkEPyJr-9?bwfVO{G_F(i7xY0wwG=)IU$ueyB_R+ zZZG@WoY8?XL`eiXMEChDHRg65sHHu9*-9(Oy{GQs5<6kuiNdfjh`O<1B_@n%`gw!4 zw;${>IWB3faRd1x7CyWgr{lIA>MrMdt#YA3VM-d4}n379LTx)(ljm=EB*-cJ|a8HWW==n22r-|gkTyvCtI;{=>dOxHGZLi_DNJYl@ z=yb;opH0!HZlPBEZk8sK(U->DmBdDvNs#{&Zz;CZyB?D1i=o*EcXVD__*i0U)hx%Z z6dyl)5q{q1LNN1sz`&DH5hxR}ZECc9qOq;``lJ(J_zAo<>W>CzeZA=Lm5h~3R&PdL zFDx18+Sx9ViWI!}a9!EWHx^djo=&rG$!(-Xd_bVK*e70XIa3d_(y(rf$)9PPbCiPd zO6p)FF9c~@e)wiYymAf_W^_x63no)w>b_Mv zo2QmaD85M-f4BIV$q<X5O{}E#-*Rt# zIn}PeHhlDI(u84;KSV?2yv5|ohsyToeD62?TMnF9KuGXrOk@>=K_ zrB|@@6T*lzNHNUT?UXUE)%@t!+Abk$+cDGH78QH3i$Cu;l1F`yr{Z-Avj47ldPD@y zy3=02L-<*f$15B~KFmd0+{z#GF>bA;@w5HbIF>q(6ze}nlo9DCY$;qt{$W5nJ!ma( z;5o`vuow%GJpdYl#~pEn_#J(risIs=P0&Zzn%m!fMc;{=dc^8VtaU2J zIPMWqnkupj_Lr7`J8MBGu{rW>)q+$`J{pn7@~&@31QT)LkIrnk25u6cQz4RxzxqGOb2>Pnl3lL;ZVj zrtyRomlI{RprZSv;`BqSn6Hb@QLFjt8Zb4r+1x7U!bZk@PJOk;PKI?Qxfjxch=Dir zG|X<|T%~r!(@*GUaM8Zn?3l+r;n@E^GhDuMl{=MST1_`hrZ-sRdLhkz{MHZZosJ}| zL(hpx?XG)0aC0l909q@g+@?(TUy4u($H%Yr!sRQ%9)*){ev|p=#(!M=( z6Z5SY4-9{5pP?rBoNL^QL;aGmI+p&!WpPlPLHB)FPVK6dZm^(b;G?jRSC8A|oGKrs z0nw#a?}Q8zEw6yP9$DEN454IdyZ$h?V$r37J9~=9+K$xy3w_nyq1#UJi7Z>2WtE$T zuhN-TV;r;*6P(;z*m4+atX^F)uTkhQ?Dr=)CG*Mg<>E@i+>{xfRIc$WZ^!K?qE^Q%Don*7~B+6Smmz1`rHdqDrCI6 z`>KR(g$hdO)#oK{@>DT9e$v=EbM{lnXrb%?O?=|7uTeOBgA^x3dBfSL*d>Ylm*@qP(s%@!Q zFY)ykCpmcv{I$%AJ3|L}8}*CihXMxg>g&hc|t$Bmz&3V@QaP5 zof|K`)R0V<1N40eZZE_dzD0A>DxWNIQ7cqK*Rr%utsCd)U)`>@}*SB;koTd5@Fdpha`t$glQO|5g?L zRq`cKw>^|nj|e-t*jf`svQSl+(^y+j6dftLXERUV)%FvTr|YB;cr2J_M2K&NNMeZq zHP2p?K7Pu=1OFcWPMFs8jBmVku;M0ZEuWCHkb}E@gPm~xm|mrj%M2Et^gSSu5$j<4Pyx{9kP}-}8$5kC7IasURfz`tJ zu0s8*P|D}GkNG@tUlcL8N3~3P>`$0{yhQW(fIoU9u*arKC^Rr!O#So61&-F}QI9?_ z%j@&huZx#d6(*H8I=G~mlNj|-U$3<-VClDrUTzH%y^&s8d=J|)kY|Mdx`;2Kd?f=g zdwc!XMlQ70HQ2ebQP@7$tbzfo?TKD!Nu$(>Tj6@LCrTluiQ*Bv4nF2!+NZ1QFhqu4 zk%}2%U56c@T=FqgzVO3*oJl<`hQNk!F#h%l&YcSRqFln=lCa?vN`3jY;Uv!J$o0Jn zvXuqEhr-rZ;O`t1PJr=o<#vXW(JCYK*9Lbm3-eK?`qskwx*i@X88sQ-$Z@5Zl&Va@ z<>WZ8aSy25XzIa0cXQnU(tIOSnWo(k)^Wu zdgU1{XLwKGaGGs;KHIY84JEv0zM$_x*^lQ>)H8~r^-49`+PY&ijj%Hoxn^yrmbr8i z3LL1G+6Xz$#F(v4Z{4u1YSZ9;t4hj}>+J`$7-!|;lC1TTj@lN!4qzhRy0$ z6h9$$`tDY{LZIPP_&QbEiEF4IFZZ^3mnk>Vy)}x&a3ibiA>RhW&%-*lwVJmJG%;yB zoV(OxE)@rQCLoy)D$m9t7tX}-1s!AzUKcD~wn%4~=zkZv!QQ&3PZ`}q9$)p5yo1iH0vY=J_@(n z_SqgTDsK6mnjAtGSEO{T$7@ZKwCduKQptF=Qk}gnBrkJvd^hF=7Ry&E(YxJSZIf#m z`9^Vjt}Wk>B`n*V^~_nxyjW&fELM4Ir*U@%>9&tLm#|VQ;KbP%jI*cgd&I z^Lz3X)Sn&kM0)N|xU`Yk++a9hdnpxV`QiKP_-vCCsc$fx#G)wTR1;*;!!M74e7$E9TJKrAa$i{X(=%kA%fS%Pm@PCE_RM#&F-Hu$+V!e~DXvvT7VV z#omFhwGrGvhr)7XNiVUOq&=~g7QZob9E?}-k!Si~X6|FOy{TNFaYg)tnS@{h9aPr) zU3_dSpd?Va8ty|atKyc{s!1?`*!nvW4mhVD-%`2?_r5A=8mdy#u9H?m=Qkg$&9vZu zXi3h`tLoI&b0qo}E6IR_tTn-y%AdYjMeDXglKFLXUD&h_8(+u|L)};)$Clp0SH&Cf zFyW4}k4k)|z&IjiYNJl$)X;lu!6Ia1TH4rsY$a-C#p2r6ZRQrSr%E{bYQb0>Ei;7lFO4%q z7`MAiJ2{Tyr%q_j_G$p(k!;+%cAOBLziwo&D*SJb*xojdN5P^&4_M07!b`LY+_vvY z$HKW?8hj&(v!if(Jbp~R(mZo~0q!c6+ZRvuDYzZut=43pD=$24FSjnR!rOeJ?Ot$g zrn$d(bmeomr>fM$JQa~V^n%hxUQB)Deq-XXye}@hN{9l8On~2@I{LwH`;SSTWW}(VDUyywf-(750ov#TtqKMcC>VvkC@>~InO%`IpDPqEi zzKRV)wb&0$Pv2%Bx-7@K*V}lwD zC`-^(`O!*U5NXkzG~KP_Q3CG$K7T3e$CfV_w4y_f1dp+cay-Rg%bzoQ>ckw)DOvdt znHc0dY$xvM&mHgDk~%zOoc>;xt&83qU=Br4Bw{MKyg4Z<>|#mw@!KE0{QfREO%vI> z9E~Tf;`y_I6@}cJ` zNg&inUTX%(eTmq3d?l)z0)dho{>RFW2#poplRsl62|n}Sug`qK3|}9vfBOmUhi5`( z^(~Y_F5yMepRXWCpt=t=#Fz5_R70-NZZx8@ctbS%6QKBe9n_I=jj^peG$u=O-)gs8 z%y{Dq$x<6dh~qD28(8I3x6{OXLz&QNP{(w6QMa`7&8YVYVEk%m8VB5Ngn}4FBgR1? z1Noe*Q;@a`T;do`@-OWXqHijLUX(QL1jw~e=$`|%023GxhrS8rD=R3kQ`eZ? z8p>=mdvL9>o9`Pjgi7`p9gJOAZ{tR6=@MF6eo1ChJgx-62O z&Ib(BG>YRg2@Yvn=Gz^9f8(d7u66s2W4X}=P^|5yeF%KVZL04GfGV)|*4HgxYvNQi zAfN^xdq!^@K>{BAq=#Vxtsvj(Fbzt14vx5Jxom@ilDxY1EqK#OZfhu=EcNaqApYO@ z*6$YaO%*rV&8xB@k6sBH5(v0CM*%k-fd12k)+-(qMu9)u^1$-F6rUVASNz_n2}q0j=kb(7m^JL4&B-1 zt)1Q-dM*=quk~FQAE83k+?t?NT@l1OhZGQiVgslaZPzPK;FGPA7_*L-Fu%fw-J>jVbw#f(SK z{i(%QPz?@5aVg#349HjS>3{%^9Vpi8`|ypPvo7FlTk3De{YY?GF5HdXZa_C`b0H*K z<`|YZA%C%&IFY#_6WdGX&HR}#OP+h;F21n{>C+&~mb;Q?x-VaA6ttJ);cL9k3F*=L;l0;La0IL;2T`1fqnAi-it4R z#v`W<=K$3l(t}#D0X77uyq=Nqbm>SlY=dCh;X|lUI3&K{gQuZuZ@7L%PYN*ynw@0p zIrw?50-fh!-Wnt+j-g(dtVru;$LnMS?Il4P8T!+X95`V=MnM7UCi8p8qjar2JhYHd zLl4NZ*A)LyQ-Wo5^K+a)Dj%Ben=Nf?6GPQs?)6awWMWb@ptL@v!S-gYUdqXB1gRQ+ zg-_&{{C(}ZA#o2S_b-)DccGcNw3 zSoC~bpQvV?uyhi7&XPULFEkpoJ$47%*v^1uVESYQWK}f=c^d2Nkcl)C$|VW#UO&72 zMwdOWdaa7h)w|XRX@iE+!kG9LDc4(8qegEY8+?)P{38@fC6Z(wcb|yeec~v?nLa~a z!&?*)#jSP^%YK9eA<;7q6eEW#G!y`Z1_;`50Du*@Ap0#wc|MR?NCH-#v;ebJM01{}hSbDp=3`#`@W!vz) z^}nQ?!=pUy^QiGdvq;7}B!oM7dS%=Tkz725_=6?O;Sv(((rx^J3z(LcbUk)wZ11KR z>5aRTook~<3hoBbEMeJO1*!R)ZxFP&Zs5b!8Kf`@)6qX!I_q6dbry94staIo|c;lc&U6^lfuz=-Ng*aC~aC_c18Gg!z!2QLa zC#q-zB2wwDnEX80(d>uwpNL_p0>NMd$*hey%Yp#QM}41^8SL~yRe@GIk?0>KkG|$t zPHGe36>Uw4Y1tX_W}&c%#hvQ7s?cyj(5uksPv=m{0FcV8^nu&}6VamgLS6ji&Qm;H zD?t)>uX<@aIWDs__6)ITJ(M1DS_M!#Hjh`&O6e3`(UreJs%8g@Vy03)ma;IS`^{Jj z3d6Hb*j$N(aS!N&TAj4bC6;b+`P!L##E9=#g=lDP1~FWyGH7x2K5)Lnf3Me|S!FY& z9#DKxR7gxHpZfpsA<%z6erM4E>jxv`%r(XT^Qae1iKo*4?(fP_5gYRni>s~In*agH zkI24dfhV~|6qAOB2V?oE)%^urGz+H+=Z=Imn`4FO2^M{nj_c?) zQ9kgG$XSl(DJ0osu$;=>pv=J2xgU%jOBXmKVg*vv{kE~ndt34<+fig7B9#0 z;zIefZy7m%;PCxl_N5>5{`-_Q9SLeUXkSUA_x(q{rBJzQwnb)QXVSmCMF`?j-L}!l z`3T0Y@{l5fRTDZlDQ#TNk$4WtzIYYIqVuB25|L84nuZ?dnFDuL9f-~bAv%%R7H(`I z*V{CvekzEJv8AjD#3R7ccP94@c%=N^L>0vS`FnM3qq?>e-i09Fy$u{6`VVVUke{OA zGVXupFUR)^#g{Iubr5@O4k}s4r6LbBy}?iKe?p?RNVnH8mNlbIAsc-kGdcrJ{qkc< z)4&BvpG^p##)P7{MDC$wT|Q_wwvabYUOcq4*+zx|Ku=*ar%GFifiTu$N;LVSWh^25 z&D2Lo!)6mom79ClS%P52Tmc<#wt-I}N9Rjl^G$(MSov@s_%DR^=^Izkq$D$7*L@T< z%|Y&@D{%J%@(CGaDy824@cY>?5hh4C8KxgjEthR1m;3;pf!iW+q2o=A)Ids1+ywI5 z5mEd%aBhI$yvqA&Uy`quE^)}Q3Pk}?5A;Iop7zh@bW7q1HeW)>dxM-fL1mc-0;gJD zsDbRTVVD+Mq@#NWERN1>ReN+7OGu3Mu%|Px%m2;pIs}7#kG;;ttah4P;v(xN9XWT+)DVM!IzILTJ+uoh8a;8Kqu@nyZ1$>_Uq{OH1v(iR5k3+^Uf-vnmvp+`4xB zD+~o};lpp4`+S~*i(IVKUl%zkSovj{1w!P@w!BZxP72@81Qt6}3iQdmLZYO{OFR<%`@#eLIpn1sQ?+@j z_b1(K^Hepy!af9qPYWpuY&OGeNtg3V6__Fvs9)~NeI-kdpwav)o5%BjXWi~?(KV#+ zbC9ltq`uYBXTb!3*tU;+IGG`T)cF*venrXW!29KH>|OqD>B}gHSj$B#iCt$(y77;q zcS9aW1QSJ*j7|lg<_iE(6Um3elHT^rp`1gSRPN9nE^tGJdtRZfAILD2y`Z=Fu&=P! zefr7_w(${3@|bzc?D*2&t0o~{Fi2yTL7kedk^EX`^<&^e2SzaS$6Y5{^BH&#oa1I0Z)8F-Y#&xS z3`HZ~GBo$B%c5WOf$J2}3002$boH{#ZOQKF*WI>b1~_?QAmFpn*r3hR-Qt9qW_6^? z)NA_!$Q6mc*%VHR&_}UV-ryQF#d)(KQwT&D*8Ub@AjD!Rl@CnQjFu@SIug6P1@3tA zxN%USr8eu4ukDjq1}lZ)xFMXuBghEB2y82slFR2;IO zX%_avy-CSx1^t{G;a`95e-#U}hd9&ANe+Am4JG_WhcPGbpBDbl z;}?F@X489|n_tm~b}Zb!Y)r|T^jthe+RyY(QmS4_`2T)MS9pF}Mt8R%j&z6v9t2|g z4;_XZScG`+24nxT!QT+y{V*F`vpaj|oOK^j%+T&ahyen%uT^H=N0bPF_MH%kf9yLT z)^zblNCba3?mylQUMCFyGc<*K)~N@7`K%XrhZ{d%f$0?ik3Zc1XZ+#kD})gxe4&P* zi~qMF7lZnLazt*%TZ16K5p?ed>W&yXR>k5ZSp&Sk=SnQa77@}jfc`m_$&z2BkR|_I z8vmMja#p9Mfg(uf69tFH8|);(V##J%3UlyOX+95hXfKXfZOyeC^`%R1)(7CwR@loV z{9s8!J9Vr`MMR|L=htc z=br^Zc{zhnUY>{f@h0qJ(GMuL#Xy+qcDh0&n|8fY#Bv z6*bu10x@PQqk2a9%I`Hofm!s3;Eg8~hWzqUfH^kT;0}mOuS0R^w!zQ(aFGV(?lRPP z;(&|D?sH_c=g;${7nB2?cL`t@%_)GNpDrA^*y&OCx!E~!e=p-pmvzDl7(So+C0@KZ zAO7Nr;8ALQtCMVbl!=jn2ftQyN*JP?qlNMNu<%Hg1^7&@GD5?VJ;;RqJ-5T?j^ksd zU!mbI>|X7Wv`8mdH!=t-&?;vfFEWA0@IdopZdroD$6XrM?nf_v4UBLY@Ky}4^#mL9 z4tI4=AA{Kb#eo(Qmto=O&!UV$tB^4AL-**_uU|YzhM|mbahJ6ry`5pVtae266~5?g z9enUxyD$w1KwF-sb&OHGR&jDrLe$6M50`N~q&m3ubc~SuCQ6 zge*;6+*1Nc<7boGK8^^#XJa(!ISS)h4JZgVb2`Syyv-P8!6Cr$X|V7$I8k)ri4++Q z{m?}#RYuxD=|4;3+th_rLssZC)_*+h%7I9DILsyc@R{wYDBs&(eQU6id@e^6WHmZ= zC!dNe(Wv|43x?1a(#)XPIV&!LD>uf~YGwv#$OW-J?M?v0xEl|3L$V9{vLPMfj?Jy> z&U&g)&s{0Qm#cLErh^NC+~p~L}wn(+GN+IYVgdIWe-tCEoCj@H1CgUSp49gzPI49GVR?w%MnDk%Pae=+UC zWFa?ae?qX@K62wp=%YXL_=N!Bc)k7^t{e`n0+ZmgN|)`q_Q-r0#q@X5tUq?2!h#5KEFf4ZT^)^F3~S`2Tvu+Y_cZcxxW{)Nc0?WM z-GpqF93f5Q=OOj%&zcaPLME@lG$a+jtnSX8^`di(acr>iZEpbFcCoxJcBT)1**`cW zlnQqcXd0MuT9Wt9EkJ$vBQ7SsV#u!V(uBizm%FilHLnn{@7YB0v1!#p_ zll<8blfN2bfD$EMTc!7)x4_1G=L3A1`2F1BCjVNYbvW;Ec9qL=*PGs=Lb*F(6NHPs zHuA3}c%+b`Fd*AoP>12i4DWi#&jU^V?=^;eSrK~33(bd_5*nZX_YVrdG8a_P^%f z%IydEU<@o%?xzGn8{5B*!8HZ)5a>;ICsir`Yn#47las1WUV z!~uB>eos6u_E*RtFN`A}-Y^eaUlS*gLupxN?Myry-49xZM)cyZ^zP-p2;K^iB|KOF zdB0odQ)eEp>po}|ZC}isNf)$v9u1*=6>)6;=$SCO#`c6m(nsaXy-9F5+`TM& zkNSJ~Cjy}WHBnMW1G9zY3)EdcAKYKdj?G_u^ zAOr3M*mL0MeWr;}%>T>Z^p$`9iC{3gy`#6;?l>*t!e_X6T&c9oRG?IPthoqF!bAG! zAc;|NDUScb0;FIdVj4hxrT_=MAH%`mt+|WNqr*EN61T}dn<0@dd)Oa>xg`7BN<63y zBb()JFn%u*{BmIzBtN~U{-8WAb`OzXpE5*7w9@jX2^@c!e9^AA@NPJBY-M5@T%Q=0 z$YS|D4`|`fY2xYRNN?8-U{DpQg0^29kv*buGN9k1E7 z;Awb+QtH(Zj3Hon1cEGt@~;u@0oD1UldVpq(yX#8klL)t)EOwe%$)#mY(|B(dkofX zE@wb{z!!W$0IC!`T~NJtodiI-N3s=oroACqT0f9XN2arNWzXw76F(?8r9^b4WIQ*`Ep`>D{?e=R;$_At27JN&0nb`wCHD-qj4U~5` zRWHPtgrY#A^sOs}FoYx%t8J*VpeD$`G)8Oa6AfvagoF-11?k8pNHqe2lU!SiHeY*z z%IrV8k+|+o3&!sm3Ixuu+*J#4qk6E*zYSG{t~oyibCqB(;eK!VQF24xxB9YY!>7C^ z1x(;pw0E%|Kj!LSve&owgtQJecKQ`HCWrY!#^FZ;Q|(UQ>{tT8K1|~zAAsY*{f;oz z5TQ9b(&4w47@=}@b@t4VM2O_&3cz~w1&Aa5JOHrC-#r116z2h%?k13T0W7H8v-`mT?FVGmYo(Paoy+(Duu7FXf zLSkyl_Q+YFh&1;%|0W^4vX_|pUzv~^%LY_R^~)Mmvl7?9LbZ{Sy5*BJeAH>tk%D5a&1x&bRpZkvsOR9ovpb-Y zX5vfkC98#7w6+@afMQI1ZU)#KIw+=W*+}Z}8B5WJrw%}EW>ACmv)-d zq&1k({woML^IccL$q+LjgMIVj3HTC_6+&Tr4lpa~!B57FjH?$%hCEo_ojyZ$JW6EK zA!mFpovjdwYpk8zKzGctyKeS=>f-!c`+1B?M1O|})iLuvF;(D;GfXz4az&>_6jC-I z7uYvYHYyIIAjVYXDNX-~XWj;}$s`s05zM|Cfc*MUiI4FtsL#%xxfG#1bf4nb3041{F$6+6}u{8K3?inBl%B*_1JW<;zJsjLVn}2u2QabZ)u0t6FU-IETa=uww z2R3a;6f5bWLSq0KSHcMZT1iAy07dRlUV|1+_C>3WAeK79VWv$JSid}X%eq~7P#6F> zrENI98EB-}06zp)m{d0pII*-0U&RS?Mj%$m)qn8!$6!YYJN zRT4>o^nSOd#TmXsHEalPHY|#y5UAYX2Z-$C>*9T0QsynMtMZpi+}e6?H^xY$kIjF_ z(lJ;#=ou49 zjUBftm3QZ|Q|+YD4eX(?On{w9aTj3g3v8!{W8W705l%U7VQm`8so$Ta7`A?65_LpP zZQ$@D5?OGs(Hb6CO)kvkBkHjN)&AlLUb3JSnORL!U)m#7WI<&gEN=l4(!Auk!%2&* z#Ma&ZNvblVt!YqeDQI1qvN@RBc#LvVZ5?5Kx34_v>pe?ugWVjBki^&Tm~rItOiJ66 zA$=jMl9%n*))pXO7pcl*?#TS{+iMlka*Mh^m?}Ugy+g?s}{*Nq)>E5zSYzo zMuUa!$wU7+{A^_V=5fL!q{e`>yysUIW{?ueTuEt$l%~_a9gx%6aRD(4gAfO1`{2X3 zt;MtmhRbPoq+RXJ#UB^nX^&6ee1IdwlWftUjFZJ5lI|Giqj>x}^YwIaq~|BWj7C(+ zMNp?Tc-f$%@4g`TpJx~6a4`KFe}OXvZ4FmP9R0F#C*6~K3S0`ey0N-F2dkLJ;;eyA z(*AiPq+36%x%={sBn;HztVe~s2$L$>NL)1^ytonJ-nBk^v4dI6alX%vAtY0!{x>Wa zW0(#K&yNa$GhS(ar9jN(g91Zdzhn0fultX;e#X}qL%1gihredDa1!cRg`Tg(8UQvh ze?f>9WsRJ>3o&Ye?JCogkek~!)2AwOR43z8XZTbT?ycGih-vw!O=}<4AxP||M`N6# zH>&aV>5h-pzW*0{Zyi-t*Z%!dN+}WwDxK2Zjch{c?oCO9gn%?CAti`(E7ILv($bBf zfPgdzNcWiwyzl3Jp5Jr+d*3tOamG6a4u_7t7i-O0b6(f?^S$-yv|Nn6I*d0)zx6i#8liw_c?#p@(_%A#=(7Ci`U%9WDzOmvLZlB+4|)2+_COT@bhESALYzVit8vLhinh7T(qU6fml z%%3o%>9u#il1qA?#OHT=IrIHoZeHPu#Lvnr_cdUMQX(!+xbRR+W4XHs zW`&H}@31uS2v?};u`|CuXvt=r9;6VtJ5KiW@o4$|ozftrBuzgeJ|)m#!dv=U%Ef4Vp{J{Zz<=LDnT>K{hI$?a_R9L}8?_VS=+Z;Ff*+Je{-? zJB8x9DEbEJ3n>)iKjhGB8yRap8e^5qGf6{6G(*DtEUt=mVBuDbPMBuCZIq-X-qnU7 zl@%x#xo$6?qCp`I>07?m4Q}`jj2s$IiTF`{k2`8{9T>Sn078y@CNpRZ*P-)8UprW}9du7nmd&5)Cl?w4VOar!w=htGALFl`C)jpevd zllsh%*eNOr@fm zY-FgoF*v8hbQLjx*%lf61?Izd*3MSqPMdj8`{~1>`&uGbfUJ^z#;d|%6C;Kzj%ANKo?2mgn)g3&W`a3ntx~o+2au}Qypy4g; zZ<^Pky8QyOuc!DOu~zXj`&HQTLWOSZSJ=SS~HR$!%LD^)iu z6Fdp}jNa**LtpWW?HxO!+Om5N0uvs*_dZ^Mm|dn-*BTnjiMZc;E-sj4VeMLO$?zh4 zF8aWC*NOh85k+D);&K*~+HJm*&v}~iDNm4PGFoEn=Hq-s?VRwY02C(6i(S*UH1ht$ znJLK)1@D|0gJmNcjUX-1o`Hei#}m)Qr$8cLgD;<+%1A;X&|E9`BVPmBMmds+<$z!r zYz+IA-C{OFeeX?a3mPZe8Q10H)rcoIM0fOzR^w$;SR-29nS>J1SQ256M>QQQztS07 z;&|b4!-#R1(~aE2X(q~E=ya32H{#r&3f5Og96RaM%))6c59fAX+=&}KH1~x z;~AwNMJ?{RdtuvS*l)+ZUG9t^^=u;+sm9+~qq=#EdsUqG<6fU%EZ-|$473O!FLlo> zDQPISOR2Ll_-0`+GTLtBrq1Mh^l}025333!h<(Ku#&tu=+6GnLqw02)${Q{{)W;y@qIpRde~>EBD1+K6g{dTGW1& zcz$1}(D{ewWL@I#7w<=#ELg4!jNGnT$6!%# zPyF!lj)@WzgY@HP(`g>#v$a5Y^TNWasr%P7jm?H@s8BLbe5|x3iP`P598wHVKgFmN}M_!t8U7AF7UN>W2FBGnY zno&!c>7659Qr!H7nwm%;?|L8wz4F{(Mj{8E7-a$0jqNoZg*O_OMiJAq7&g{|IBIb9S&AI-Z9 z`_YY&_w^-Hbg&bP1$l_qHa#BPxhee_xhVfWSHr9q8cVL1vYrFboCgh2gwT-R|6wjc z(M{wUE-rJq`9M-J5?`0PdlNnq>_Jn%L6ovwJloO}Z0Wd;x%H8AGvWZiVp*1fCKFqr zmQ(|Lia^HmS%LXjZF?yj^Na%CoLV^#4W^*;8V+2p-I_<>%lWR>wD*IIot~==4#t)7 zgLa=}G<5{p)5Bj?vtGtIB5FM9G4QC1Kzy^#ku+l16K+87-;baW3L5?uNH9)1h69hm zm!Oa9!_8VJ<}WzT<}%QGnD|L_E6iMB<@G+WEuO=Q|fv~ zBb5lAqw4HV4Wk#w@yNQC_plTEz_xNsTzjzfMCvqm zMd_-k4zeP**G!pO&pp6Ka8B96dH*V|xOpW@%o(#*)tageEsaW?zpZ}G8`B3%5KS}E zC^<<@j#b`36UKwDcC@Qm)eqk|TZ)Q3*D3f+$4OJ$sm(o{7RSdDvz=K}Kn$9=4{j*` z9iPFEb3>vzBcrTuXYryZ&D>^pjjDe5t_*h+Geb^lm`amhY{>^~q#A4H|LH3l$hxNG|2lRcuInG&KG;aeHSARztt_uRQAexGwaZE=3mxURIQLj3a*|s z&%5Rm`2~ zc?oOD^yo}&L)}W`qxlTg*5~RI8@o#T`U%s0c1_U?G9V2-eHA~iNO()%11Z(;^$a9Q z%`nMT)fp+AL_@;VChw%3zf!Ek}y|gcWuB*nku5y1gO7nYtr=3eN=RC{mmd*DcYLzy~p5&#kyXJ&iFEs8ltkR?h z52$82`LCW&&FCHcVoK?xFivmEv`Iu`S!hpan~%ZNo#=mZL#~ZIt%hK3?5EovB~!lP zH`K2$o~rdY8ZVaT8cw>8rC=&^G3k}%G`P;_k<6KBJvbBIdyh2p6BK&&HUr~QE5h|E z6f&N0l?$-s&Se}yyZIU!C&1VT`Li2Y8a^z_E=)78Jt1(uH~X9{^WLEKQly;v@lZz) zx7eFc^7||Y6?4-58daY4D}*A$?G@VGAw9?0+=0Hv7wld{0uMVpRc!>PZ4BM* zRjg_iHC%R*%bU&`XzPcg+HwQUYE2oZf5vyA<5c86wQOHM?9Ky(@NW5iu`K1I^Sf}1 zeX*w>Z9(us$s^)dx6)!4&!k4sDWKRdAj&CI>-Pr1;fRdCMna3^E3DijC2J(Yj$hJD zOG!i)hS9Yxc`E+OiPCA?()PYTOU*{ovI4Qb-I}JX&ks%&N~L4T^=5zdnwNXXzpt2t zWty}8s`*!s-USDtnf|8s#2SfeRdrFC=9ngBV-i;JI7s}eLp-OZW`Q?O|7doPAba;v za?E>mVEW)N|N7vd_V>Y|jjrXGeE2tvnRPV)TjWGE|5D3E~xQOId)9v*0q1u1v#PVHy z$L)I$RHn4LB!@3I`6f0E3;`#4j}-Mi1*%27eH*2xLNW`(Q^lA1Bqy(%7f*XhUVIhO zl#u*|xg6U?jKx?#(z;MTDEu~o>mFRQY_%V@8Rk9G})z<50~P()2oP!^&sR zb?OCMFx{IcJ3i7pmQqEoV6{j!^B%(3TufFNoLN2ltXFN}J8+8*qNSEOBry%dPEcVwIDjF_Y>A{EM zB`~k)G#>m6%K3o=))|&$V9EJRfeULqyd;*CyMKz-1*hHE=w_xz|IhFSjvrwNWn3?S z{NFunU3VaDI$a(AP>Z9aZE2~p#5KXPdMUw5a!%LdPU#NELj3c&p|%;pt-{jT`1yyb zHXn3*nCy8-OfK6!@hFE`uQpG}f4%kZX-*}OQ{3KQG7b~QAx61L7(pQ%j2ebL#4!BD zuGo6_>92TF_k*5~j)F+0yL~+27TmoUctZ1U@T9Ude?Q2=?o8w)^NO-}w(pK#eB>4& z9?9>q7ewkoL`Q`9G%N8_6^Q6}%9++US9!(_yBj-`z6teGyhw3x-D&2d>@EeN$G74` z0V6gOROfG$FL-Ns?Ey^V#kl)cv;7O5OzLl5mhm~x*CSN(?XJUGFY(|}zolS!)H|w3 zvNwq2`6?eP-L}R;ZKRj${q^ZK9K?GF$1jv_-E>y-=bkElCBI;vqk4r7_)FFD{oW(h zS(s9jUc>HQEMQB1s!cnV@TaL7186W5QH$2uMz!% z7N*gQIy4xAsS~^J*4KffdOlg8gx};uN$hJWl3z6y@%PSRwM=9kOz41yZtzb!u_WI= zue9V`w2ak5@!qX(t2O;V{4j`*7@}HGIc2#p%=C@Fsb3|zmOt0pdYbTtcc6UMy@yg2 z17%N}em0f!C|^%s#XlKG%@ezb%+>_@adS2SxS-}NtSyvO>PRdT5pv$)WkIQN;at|L#2N~KRZrGJe zJCXMVIp)o0W;K@OMS~RR#^YvXk*X<61_|C1*6gzP+ofo<7Vq=D)ynhqe1Y*A$-sYP zG(D?>)ef!gy{aA~{B;SKPsO{tf`$belfo)xBMn$5aMOEpCg1KV4i)D|U4E$P3!8|% z9G=Nux;iC`|2ZWat#(R$M!}F#%r^$w(30#M{s^?kvL}XWYRWaHF7gd0POz zcU;2P9J}7YDQ(@Q3*69b? zYR`d$(H*eT=+~b_DhNMd1}UR$P#yOXV`&$K(EbW!*8cTbq+APBc-;L9 z>Au!FP~E<`4Ohkw^2&v>VuU$5R#Qz-@kAo;vz<8x{o|)!K~z+7Ers7HlgZuI;SyX# zp#_kO7j_*F%*^qAXmuFgzuVTOm2f`#67b9c5acrl zg7;<&X`(GwONch~);CQa%AHQYTyCwUATVz_SL}{j1*a0OE z;v89wuBFmK+E=w5dS0UnuTB<|CwOKqmVy6nn+r6nk?zp?=!GQS7r( zjkHKiIAA*Bc40rh+I0O9U|7<`RM_S0AfU;n3JXC{#vCXS@Iv-Qf?0~|yjqYQ&JCIH zig9fQU9aOs!)P`2pv&IcV&MNR1UZzNrjfa*sB$XB8ooZnl1_pp#wf)ukI2**lxj;1g)8syj$ zS{IA)9{-FqZfXP`z#SsTGf?Si$d~^Ol-<*vg@c0!l&cP{ScC(==;W^{?pn`l7QcT& zj0JfN=3uQ~$H25VTz6(C>?Le`mA{ZP`c4>BJhNl}W!+{aCMoE5vo)6F&rdD^Cy!IK z3;b3EW(17I%Tc)7-OJeuw2d_mT(eZgjh#W}Te1-(#>~s(DZ_XvKo{~=jJw1u)_`Bs z0(=CBZRP->?FPot1x_W9McM5QNp4xEX9; z4N5M5Vp0HwZ3NG@TMa+@<3ovT`|>hyG;r;e6gq&vc}ZViO%e(qWZO1j158vB7lZNw zqtw+?-#fzhX8`{t>HdUr@Hw)oIL-<(;xWsnSl|CtgpeR0qTI$jDFP|H?SrZ&&yC=- zWQhxKH~MEhFao2M^hkLOhJtcs#@d~sa4$|>hcDDa@p_=~Ip%ULMO2#OfX!zx1 z2y8WPsbtCqVF?&5L%_GA$0}aff!lfkK~w8P?lzMChH1=p3G4jkU96<_ZI`Vd%(o42ALZw5zCmLIB6G1OQ>e)6pT*c*jBEx7d; z>IFn?dBBVPfe@m|*t25>XgfJ0kBkIs`k{quyQ0XG2}pN;l_er-|*!0Wa=gHmv?HO?Wo*Wni$_GPz}?l3%dG=wzrq+MWxV0@Q! zZ_}9cR5O&87%Cwh&f-4V(?M}7kgE*?hagP#E-tkUv+^&Tkf-j7a$Ap1{4x-wnieWpxYoO*Gj@(-DMEVB6#%_(pGVV z{g(q)af$~ndJpzLThSyJx zMK2+QZJx7|*1(rq{@Jx6J4WvfC;N#4{|xY|B_`fi?Mk1h+#;K|`+%e643KLguL~I! z0x0kN=Bm)JEqDp6Rp0F@oh!-49lC=e=4NN3+zSjG%i;ni%9~&>AYn>(Aa1|V>YKn2 zRo2RTJEiNbJog%$i((p|(i`Xjm|lW2qf%NG(@+j&7fPIKdY|ui77GK4p3SJ|enQ|! z@4y~N&mc)Ka^MlM_R8AKCr;*6!(_|i6zr)*;4Bq5ZR7_~a+Wbza9Cf?C3;%HO?ccM z=N<#eUb6dAeNqpFtEU(B${q#T92Wv%*BAjvF+J4yYpV&pgFI;rUi(hErO|*9x>e1f z&X7R)8ZN3Jzl65f^pP`r;b{jzs7ZJ3qmn8k8RT!d5e=q{0J!e zE|?nV@DhtW78yF9{=D4F61fdk({6#Rn%T%TbyJs!G3soSOb;YL$z;(2{@Q(lIkWZH z{8Fm*THsC*9w+>zNh|76!%aFnpzL^g_K1Q#y`_S>K9qbu@I}`84nKOL{4KfcD2WCB z0uD;2JA~rZWx~B_?$aH4`9~7b4UsfxG8c@A=CpM&$*C(W^Eobq`pVDp{Bqv1MF_(7 zKmhCeW(<%(??posdHX-MpIVr)4-ex-d8F#QarRZMIoNzDt;0T{v!GyMam)t~*b9gF zlgZlIk&N$t4d|!C9bU8EjN+S~K1imodvwyGoda|myE}X8KCifJ^S>Ag;ahm5ATKAG zlVb6sS`}2zxUCn1r4JVIy@R10+6RQ>sny z*u#0|)y5k&<|LjnR0iCqrk8O0c<|Q*P!QI8qq;%|4E*!C8t&eooG~@acTb)R5*&Xp@yfeXY9q`=s)(q4Lya z@{phDEQ|33h!Wtn9SnM%zS(JLC^sHSKcQL-YxQddCMbmivZR5Ex3JW~p4)!0wA zGvzh8yGjyw*nAMysUMpic0J6?<$|mKc%61@2K?PiMV!i>LKCc2r|#_OcLzuIVwc^D zW8UN(R6OzGDQv5ID+7_=)5CC_pK3#o#;uipT~Lx9OEVq~GZ9xMva018rs{j~d)k17 z(vmXo?7!?0?NiYmLz2ps##o)|dk%Ien8N4L2z2elWS5O?S(UNu!*Gz!8W`{4a0Vhh zO~Cv7A%W;rmhYj2~j9aNgQO@Z;m)v>!aRxJ#+&B`1lM;frrF7Rn8;~v$M&azx@U?e^H zGS{*tKR@?l$tT>8YI+-{9MgWWYO~Km5bsT_?1m_2y#$>x-}2Kk3K~hVuC(ZqK{q7 zcGK$R@~_fGEi#KgQ7`yMZ`5H!12O2o4n!XbdAl?lVnw$voR7t|tj772?!OkXFUAo> zBOSL@a(yR#&nj566ISsUhBjOFiE-W}4JE<>yzg&tE7REY9N1Gl6{n}@=cFDAvYguh zI?;v_Wj*_aMViL}r+{O}TWGKcg|zugVEhrc#yRZ0JtPwh>{&|YvFGYr{aYRqMMVPD zls<`T$tuU89aLLIF2pWyd@- zF2;`ALFJ&Lk!_HWID#H{;j+1jj-tJZdVCWq79MJF@318o7oLQf6<1zu>$RI$k}p7b zIko#6(tVPPkKW!fTAT@jZ2}Dt+pvk03VCb8yN5gg3epV1M)Z|V!@jvVN7rj`zRJyr zHkDV@$nS6^YN#O-&H33?Gdz4OcWU)I+8l^I;rBH$J8P=3jH2XHL#R(cc2G`ig`EVA zbC_XH4r2x+?&fjftNV1tpU;&U{b?UsxWedy)Uhb%O)8(*-_29D;1*8}l)M1K)S$<^ zK+AYueLE2})O^T!MPOP<01ly;j}lpc=Vtb>g%nFm&o~dRYuUc*w6%O{h-ec!h=+vR z{nrDeCeFBVHZR| zQf~uHrrGTggxYkOK+#GRX;hN};(>Wc@~Xn^6xd7)4d@x#|1zNO1U3UI#;A4_tnYw@ zMP!>Wfqe-52g@D_Sau+skN7Xbc@i?jgORjB1S<#4fpiE-FGdM;E@48l+NRZRxfT;dU3X{jY7CZ@`DI6^N@^HmhbyAU;cbR_$->f}@ve?*`{r^B;us2)0fy{VRM#>iDnkaX^LTiDh8AyGL=YtsnK*WX| z1eO~9^EWh~iT<~Tsf#bzRjxI(ar_sa-Hd=v+gI%Q_4;;nu5X8@Y)v%SZS3FqA_Oh| zXY1JyU;HOw^f%sawJ<{PH%xX@v^h;rcP%3p{;>JpZZ+%=!F?qoknGzqksJ-tqFR*Y zX~|~!n{VHH1@Y~p!Oj(!yw}ZnWip_!V5&|nP50(4&RWRdw2>#7!TOMze~oALKkxh@ zk1=-;0in00N=wK6eTlzGCVcd`mEktK5XDH9Ykq8#?wTLddf2TI-I}7f*7Nt3g>d5p zT^*W9=M;kt!&w;`Q?99}D-acM)1;=X-vgH4Q{wNe9MYWG)((-8P6Z)>-uWjYoLp<9#w58f(l?I2IeSlKyvV zOyY`Kv*q-p^8Y)^sxX^K-rUtxsYQS4KFgAS1*l#_h`&*~qDs1A3Z{qof6v{vy6*QS zNFPgbO`b=PeR#Pqean~s%5Ihz{pU#`Aijm~Lo+A*8ef+NaJm~+fLQXC3j=x)F2K9` zQN=};A;WcmT6uax(*QE_M>9^C;5J#bC$ z&mx1y-XHuO9m!22yp1&*4|>CXwR|1KTupsG10BHi2FC49z&y%s{Sb&D9S^6>pV_C zEa#f_j{w&Y_bZYKfd)e4^^-gZjP$u~s};Pi{Z5ZPDM_Kb5KeZ9-L-BK4UQO!DD%CwC2n6hS`O zWPUENE_;%lk4aoFiKaAzn|eZZ><|=h{f{w*jWm)ehY5m7CU^CL>Oby@k6J?>jxO}> z@JwPid3siO&BMN$!~TOn>ipx|tVn1qDEFn5eD2MT<$(Hv!Opj>vmMEyfkI1ty{34O z{iA_j;S^_UxQXEz;E&BKoL-hE10tG`2Ka%3o236kj*|xeW0rilV%l5&$+VyR#k61B zhFE4g{oh=SX*G#+BkJeAu7&i4aTfHp)j{$!xNi_M2)g~p;yBe}o z^%!8lCEfA&i~VcnA)vdY!NJqoi8|6jPiyz5b?&3e{o`>zVsFDB5WRbFB@LQi9|B`3 z;C(4b=tZXA_n4sXt?*m}?I&{j%LLH41WU($9&k^edisxM=0r-q`fWCvj)u^;6x(EQ zicA$=`Dt)GKZ${+%&pg=S`cC{_HKd;>hNo5T%tay^6FLa#|VFf|!}1xf<+3 zH$S#P_io#=Lv9vik+ENi;Yfdio-4SvkP`aDKGZ1;^NZ!t|LByJYnEM{6wB!fslB_S z+?F?~LN+%Rn%GDUea)9jbkquRdOq1(^{vk@y!uZHMPG=JrGU#*UZJ^wLq}ap3t+e-VBPyk%Z?DBFR9^q@XEc8$YLBG(fQLzR z(;@hp`?mSReP^VF#1ftUTKUJ-#qf&v#FepvJmw-bsoTU3so(rXvI|_b;}b`& zmaKnmhstGrg1c!N%SjD->>@|5ANty^J!iTXMo{+Tr^@<2euE(%*>4FalTrsxw(m_t ze-?`?Gc=VvEp7IcNh%v7P0ZAXSIaXrX7?fx=B)rQ8md9IsCc7k>MyIHYx-TC18!PB z8t6#pU5n5E4~!ZCF_FsDL6h#yCi1|wd5tnOQx=|Q62ucmKm6N`;8uhWQ^+99{?zx+ z-sbAEoztQ97HzSQ51&7?{MX(D1&LcLe}84O_1~{(N>owV^H%ou--o#X(dKbC3**Iu zxoevuo2y-%Jy%&gr5d`!oHFI#ok@=OH@e={5bw$S2cW+~*E`2#EdrQjEvcZH0{yU3 zpjhhz+jD1Bs()-d!vSR;iy8Uw?@<|whIp_`v6kTkP;=?)p>c(plW1@Na$T+I^}t_R zfPFn?-G5yE8PY00OLVlYE$bg+s4Et~XKZaC)9t}^N zfQAcN8m@lslS7FX(oo%a2uA+DANjxS^1o*=5Z-MifV+@?2L!_$NETVfaocFS0GsaC z$CylaAu1cng9boQE+b%MGdxp(9HaVI?RpEu(6ItSz2=vdy7eBw()ox$T67MoZd?uc z^H%!2{NWQDD2O=+CDtH(`vE}7xBLOlBn&qLp1rrAOw(8Evc48UfG+;3{Wtka+YWH2 zYuDb=V0pBW;my*pr_2jX{#&6E;l)#ydH#?uV}41;!|3T_ARKtuxdNQ)F(D7<7LdFf zsb1%q^?2dZDKLWrv8%;99fTSwsHY;p6||)au=>png&?eI7%p@WIMA90-_IT15&}wz zxgPxs2at4TXo19+oi0#0gf}E^=8(DE!~J-(qrZ4^3XYU^kbI8bJWQ8xGN2lWF+h`{C;|X?v<>hg-9;rB64>Uy$I!U2T9iX zrUsyBAbN4&*0eq(fQhZ6J!x_6lF;su3ut0DYusz^D1C|0|cm%SVNuCFj3mw51!hl8D z5kRY0jMW$Ry6B<|hYfYadGCbihd!?RWj|>W>kpyfO4;>uw`~LUW4Cig@JwM*?Spg^wg8ltQG+F-%qZEG=Vf(-oCfF;|lyI zh+lrk0cMCzD~eT2^?~U)Iajqj5@+@@yWD_-?yVPnTpB+^R}zIi-FQK!v~>db>lcGe zZD(LVr~*MEvL*4gI<&Y`vSDuv)W-=zT6!mrg$~u#diJ^is%MbVu%_l>xO-KkAwr7e zL1)4zK!yi`Pc+*BI|RvZox&@m3$AN9Spfir@ATqecphPJ@^Vy3Nzc zjG13!r<+aX-@a)rSl6&ao~dS6uW-w8+W<&s)(TZ^E(X&%0IDd$rdQySK14z!Cu96H z`9kT;-VxE6(+wCxeDXa9BdpDw6ep)r`L-eEAu<*(8AdJ;@jQau3^E$#YxfoayNCfM z%=b9o>5yXk?_L9P2?uxBUbB9#sX3UkYPG4F#+McmzYjr#1sE)NRe{ug#1@rk6v`rb zeiKlgfMQS=;tk;ATa|P(=ss|Nd}T-F6cwe1HLUs)jF;x!$GLM9A&l(KqyP{XS%|}} zSaS?F8F(~6<*X{(uG*GowmXI%07Aw71k!Q0j7uFr>xM-#aw~CD=~B8*dY!URx{i4M zQZ1APr~iPfP)c>eej+&*$+~9n>)fw3`e*Y5_S4;9IXR)lvb%3{TXM=7$eakP`q4VW ztGHyQ@L27oJ3~^vjcM)7A#JBvvrbY8cXRhC&C+NQ1S>JstNPyNcs+u(|E$1 zPpNRQZv`J@yl)%~x3`HhL+E{Ax;YADRit*6-*j~havalh$+H80Ir9Gy*x$|&YK`DW+pxluKW#=${4H^3MMV<;LhAb zu_#?MF%JMkZ<1-aa0>e@eY@S6&)mBWN2;w{Ii+U)Grvs7n*#v%JA?|p6zE0#sYQ>` z%ZlLk<@^0_il~8^)v|Tkqz}(<<5Sn-W#M{XS8ZO`fQktiG^pS zMtjR=_l=547(!ahc&QzU4zjo^B5IVDc%6bDRzICc{oNQccF~`RS&0V_Z~bYT76X|G z6@&H1hmq%-6%%0`?>z35d_ELDtQi=4I-6DmoD=rHzNCBpnJd?i!03*W=H1lfnFz+>kS1TtxZ zB?Wq2J0_))ehW4FPDjYg@KYIwe3+L`-G?foG>W@C(u%lS)|X~3C&4-(JTb59{8a#J z$F!xK^w3w@^fmgBbV9CuDOUpPV?PepA6j^Rf!XfDesn`G+Hk+;D`L_u2+E+HrqRkw za7?);ATbG=ZBrrja3o1O4)3kZf8A-68~IrGSI+|#An@g|MXmh&>`oK$-5PSe*peSL z^E?=A161#JcoK~Y@QE~Kx@}QWo2-)FaOv(ndMlr3qeWld@JVn%`}BqgruWC$iyp?% zO#6u!;KjUn?E%<1mOp#z7?C;|)65Qn?!%Zw5_%ZRLK&RW=!yMyOB-)=24y31yur6` z$;t3R2xE#MM0sY^1*=XEB${pt=ZtAD(6 zTPTSs$_8Z{Dm&;!;IUj9&%)gq^U4W|M8XlkS& z;}G5oBmv!2vtY9=ancjTGrZ2YRzHIEoxlqm#KmUrAfs>Tk~>;bRNM0N4b6rvJrp0% z#yO|s&5}wC3W%kY;tXfePz4m4ek}vx@{?YYAI+ahBOi9(=EE1%okfL`)+-8 z_p!n?(E;My_4-<~(@JQ* zt`2}yY_AOb_j^_H@%2un%fc^_UTs`EPa2s#e!N262rr@e`n+YOEy0UwBxZWr`|_N- z`$NWy#_jH@tV~b-6D75DrmgqbkEGSAbYfJTxS1~H8k&fhCYrY~GSFFgo{Z@nm7~|B zsM9{MeL8kpWj6K zo6(EPO?tIvovJ*PBjJzYlDfc-UEooW-r@NKTuq3O>ee*7+_IE}+xM4EVHZcUCdcGV zBgv=F_uVXhoSMBm*-Lscq*;Q;bKZnvGo@L*s`?3`x(Y`Q zIav40;99!*ECRt`hjFI*GXhVGPU$0ei*up){zt(MSwad)<{Bb<#M1ERvTHXYyvlx~ ztx?Rj4)$OfU*Ph8N;`NNP1nM(EWNsh@FnPxMu{QYsE*d=?LjI%s|CFi2eb6jO^cQf zJV{>s$EAcHMrh)mrSsITsNPlKUtIlSc{GOUfi#NwZ??V?7F zI}G#rra#K^ekq3U|+}AY_?Er{e=t`@Nu%)1s*+#kg6+$ z>4sC9SVRjPN3$wjT}gT@=j6L^TfXP#FKl1e8Efb%;Lq=r_Op(1$l_%GjKg3lt^2@Ja4)*>uh1` zBr-1F>X0D)-sA5zTSR}d0A~)Y67Xg@CUS6}cO2JIS05Ey-;E{86sp>{mH)oV_rX)n zVnCai4(v$D0 zem{?QWG}E!BHIN;%lu+xh*=`CtCqYh4|DxvH=Jy@^JW&JrCc%^CI=D%sYC`i09D4Y z&lA4w@P(rEEN8|4{hjItae5KPxkZ|SLIn%kuf+PBv7?&9Js6vvYgLS2N`R-@1t*T- zN+)@bzE&i>>6cfdtZDw<)<_SYXJP1T3z^#^F|VXjM( z>XCNSYlyknlPR2g?`&SsMAOO7AUin=KPZizH06BtJ2v%ad}s~NS9#H6ckbB%3|6=i zXWzqJ_Pa+lU@ph*-fj*41R?~VWm7(KG0u&}ICF&)xr_zg3}xH_=~lXdYODx*wKS@tMiTts>mC!fRh*f>i)u{Rrrp(;T8x>d)horjq5V5uIHn;uWnq#1B^`3c zH<@7;hcRf0+qgHmFc$7zRF?DoB1CZY=d?1r#ElblC5`7G4xJRmYNFY~Fu(`5aKBU5 zzvHl)vzxLI&1K)4uy?XhsnOOmnO{

*JT*5x3*2)NO}IFKX{6-y#>blAknww+r3d z(x%Bi=VDGac0p+aY=VjL18SlbAP6Fp<1?A^_`JEv7iZR%C@9Lv~AXDs`js-wIT zGh;D%2!bAb5vwdc!ktYMBrNBJEB=8gk(LTl&%h`d$}5s6eOOyM#qrfDJXWVApL*ce zWc>d57s`4~%fRjKlYZ4>*W?#1^qcv$%zhg3X>JTfjumb{YIb1`Gfzt{DHRW6dzsdi zhi^#;o%u?6J~~=%J`x#j@FJQ{&zl$)enfUA1lNl1*L8jz#NPzxLD%49su5K5w8MMa zusZ5R75lqpg)GlF=cBcq$OW$5?^jLrLsdiGDnMV``R&Wwj?NESx6gxYn%?eu(X?@1 z@C(q@EO!sk#6*t`D&$5uF^8qL_p%5u&QUk<&LxD_u5xrfja0qtW#dsF(L9zy`00w1 z=3JWA=8I9pm~wvhO~K{(iSqEtni;Dc1`CgpzHP(4HSp1>x1*R=C@P{2kNJ8W#2YX~ zT9MNL`$8ak^85)(H}j^%`b*Q)fh==R=8^!4NZiX>s`pW_R(*$+M+|FDqX8*Q+VLOE z-ov6OOv9Se8ZESLSBS`=)9ne4?~1xVt0;Z^np5sa#yNi15=Bd%w7R%HD;9nISu`2d zgytDR#jj(Tq1o;G-Pf;RQ09CQWh_f`TA$sUy@>i1zpuobV*2Fzsc{`8n-3EXWDO7G z8o8WA!n&U>nLT1NmBI8^_Xa+|>jRapk5?S>Yf&xyG7Zy`LFP$xMOq7sJg=Dd% zdrxSYfTL&XG;-1z)5`w0;dMU2E>H_^$bQ+kjlzvLQtVIafz@MCDp4 zI29tXW65Ql==w^OC)QC_a1iuxO%vKIhcijJ&g)`2q|g|F|L{$@(fE&KnPjRTfsLG zCuTx5*xpAy1Z(zzegDqz<;ENxbR*MXttx3V?T*t#p zH!o?wnrC-{&Cdb(BReg%3g74ogUjITYfZiH<3Igm776EEInCH1yQDwK@uRQ3dmQ2O zgn2^tMJ3eP-mfABV|&6gX<|d^akE}jabA3P84d9P)Y0UL1d{UdZUG>i@Jw@=8qkI@ zYs$?+P!cbyy-wWwbqOCPx(Nrys&mjT6g0_}P1uI?Z_5aD!97Q1ge%rNBE2~FPBR-V zHlF^l(*KBu;wxFtD_V1Dj4>Cz&X!V#-NL|=`wM?G@!Rq4@;7m&{iTRJ-dBb~)q9^Z zg2q6pHiJh3zld1mNbcqg4&yLI>Gz_Ibwrv1@g^|)ch_a?wBXMRr2k(R8E#*s)!mAt344@8+=GKLz!a%TUFvDAv3@07ixS&GdKv zhqRe&6_+pgmyBFYm=2K@jHG)4y&8Dgz`eUg-mYC4?=dyM(lbS2xkLm@M)RPgqmWND zjKB*rS#K(v+5lqS)@r!FFvgVXq`@p^xk^UH!20l-ZquWXQ~$WfF6Kbl{(9uEIVlj#;jrY|k}>@DBn^x&Z!_};LCeuE%wE#o(di{TR;|7~TFF{!tx zLB?|2$qm}dSzGmK)WMv`JvYJrY%RKaa2PC%`|l+m%PXl%$lvoHVy`Oqs&QPOT8Qq| zTrpc;uXtA0f3f24Lv!?8u)1|+UoQ?sq}MpgC(5KfevdAPXqqI+={Ri0*wT0ZOjXFP z+&6+vnoYYz#SMo%IMzT_esrgAT*$3*rAlv>OGw9Ym83y+@P)8zIQy&M5DyB{RX!@3 z6P=6kUFI4Q=FKXJ1tvL{F32 zFJehhu!0vHmp8iW)dspO?s2y;Jjxbscd?TTSfF{K_KcP2yTB;It0na6u2~R{v}!j? zF@L(NZj6Z1Li6KbZj~DJJniEiu6TsqSW79CmOSkgrSX96hu;hI-0UpMD$}%T_YQP} zc+PqhG~apj2=Q82`iRD%=)OmhNI&B^XxX@}E>3m}2}&N>P|3-Bs?b;IX{o`6JB1-8 zKjCF(H#+Y<8KJ9ngN<+_0gjoJt>d$NWhZJp5QK!S6j8G&WunaI;4!15D8khTwIzLyWh$^%e;*M0Y4n$jF1F4%P;@hP_%h((+<>kEea`#=;u6akKNKZ6 zkd+`NSKh1dv}AhVWdj!fUT_Q)+o$XBwXzEs{quRBW;=9N@0^&Iv91*L7@--=40}j% zZupzO2?{X0MiKw~^5*TU54m1{h#E_Y8G#|gJhsW)H)7%WU-y?l(R`M{|C>4b42qW2 zyYA-Z@xDvIhTk9>TkP|>!HTn!i-hm==V8bOF_6%T*1iuea@50c=F|)ro%qg85M|Md z=na%muNnn>vVhEIJGcL~-W?+{aC6kk-2^_-%2B8j24~fE!bni^p$`gQ?SlGv)87g5 ziT=_8K(5oK097e_yNa-3>#;z1P`nNXw*?t8u-N7=Tg^;NfO1|-#jRL`*@c?VArIkA z!+B6PE&(KQUi5Y|6n)PvjIP)M*)q;Iq+Zp0;K0!bnBC!A;eC$G+#Pu4B`C~1&`aXJ zVb}=9`#15W8nQteq+b2p-J2~|LunFYbD?!K?1WfL`Xsu(I}5)~&>)NNrR+~(rFe@V zPCv`N2QD6`_i}pd3;_SYS&^(B|4?7cP-Rv+PWOe@X*XK9;eL*WfLhog`?)trO7()w zuIs0*FTj1(w4Ij#W$G~)4^Am7ZfX1!@EaYtEqMs}QE7tJHy1jm_pt{LPtLczneF;}eWG71(WobcKLS^j|S+bQq^qc3E_xt_% z{{HIab)V1kJZH|#oH;X(nVBDjRK{yG(BwLD-bzyL@ex$~N!ZVGFTZD)*+nk!ODDc_ zHbN)k2s0KhetoYl7p6F{=H!q)Tw(!kJ6kB9o@Srl8kxrrg^3B%EN=~ZOvb%L1PGh+ zIrvWD4O7*>d3z{A(3ab$QFVWZXXu$dsDl z+5es}=TThVmJVq~M+cs8nnvAT|GbaY+43Xwa))s7%$LBvdePiQxv?1WcG!IX-$0wM zbJ|^xcxI`5`^q0lPHmev&Ol4x^0s5k>R2f2o-2|9&#cq0I*xppdYb7XT>XvWCX@qC zfySMmoS|5?Efl0Ir@wTDqm$ajVGv1yzX_S|CND3@PI3RKnuF%{v+YpuhgKXC;5A=t z!C+zsj|e9771TRJ(BuVSqwEcf)AyCIYE#dzHiqr_9Z2uOzerJ5^U>lL3*ysYT>BP8kGj?@4* zs_*chyE&UI14&sQkg%tcFCkrL9dg{QAt~A8>=C5TWGXV!1eBIcgIrtX=SUm+z~q{n zKUlQQT?mV6XI)kYZ(d|g@woa~Mk@f)-2*J&maUu|fnL|P>*Gmh(wxg8kJ};3Y-au- zJ}jx(+BU#s?szdq7;@R2Lf$k&xpAAfhTXPbw;?v)O#lEv?Swn5|(q%2_I@sMxDh!ffI8ekp31Eh*YcO{tExaA_ft|(Wsk)Zh=(#Tj#Yjz*p|=D_<_x__#ygB;5g zq&?;Y)K-A~TUHS9evke6{al;5UHLTa8LG2GTz{q0dS5i(S1EzC!&3x5+cI?}T>JN@ zXX>W3rD&shnY|x6NoW6p;^(dRPMQ5$hQQO=nLX>~%cuAYIwXg=TP0}Rr*eZSGfjAB z{=8m&_NK7;Wzk{(N#zy!I`(%g^K@oDQzi5DfBF{-#kkXsxo%ZaW@Dp>8(iCft|;HC zw--d<9rRL8gu8d|3h~x|zgYPjs>;1VdJU-kQLJX~lpqhqNuHSWv7!#eMc!zZFQ)6< zouk8M{P)JWDAg6kEvrDOYxRK1yn;9BQC= zbNVDOm~VeKb6vYAO{6S`M)MckduDIN`5V(bvybAWOr>=C^;{!1^s6e9nKjZmN>fRV z0@H==U7yA^^hX{3x&oXzs1{En{f*3N45E#4v{zz{9vbO3t0svYVC4DDb-Rv`{qFvT zkjZ_S31)%s$Cy69Prhmh&_D)seD-sEDM{D7@O=-wJ=lBNri>VjuRD{-P>4;CjA1*rHSU z6>NA}W1->OUJ=n^pWXuJC5+uIkY_y1&IF=}kn3$z=teQHf#hPMfFn=KLbTJiK%{juyJ3aJx!_aBVkahK{6o zk$<~?>h3M7_umi3O=nW{po0A`HxS~+1F3%d7oL=KdNnRrr%bXV`E)r(Y9E>(OcXfSu0|_`C>?p z?nWvGYh5FM_N4^zGh5))DD;@H^c?ZOG%t_Ge* z%`f{heprWl(V#f~@sH?4HZ9Y&+okytNlq4i{#`R0(}~57rz~}eJYD=<-|wrcR&qaQ ze7)IMRd0iIg1=zt4=VOrv!T2@r`n39sW~7#q-rOVKHqIUkvDdTd&2LJ&)r)Mo2Q

(0}Yh- zzx31lJApqJz-mlU1@V`GKs^;TED`c8?`1c~O|PtuEz?VFG0SE@IC6KUs^dI%RpJfz zV5LI4>T20$LgFU=v)AX-ilsdx4k9ew{cg(w8v={*1nc6&cPn&HKE93muBoE%L#E^X zG+m~0mLV8huNTW2;MDhFDD5aXCJT~ZeFG;)S;jkB$BXwq{X#}8C$c0>T9ydTQL6Ih z1o_Y3#cRGdQqlD0Y@M6K*ky($_SV#0a_P?SnA11Tw2ofUIu8D@7d}ZrioU}YC$WD@ zD`G@Ca^KhB{Zq4My3Gr!AhUGBY~Ck{F(oQJmuJtEN03`;-Mt*V-S(Bw_5KcVS%QH- zw2O$GI&@rh{kKL5_RRu?n{X$`8cX(Zt1%{Y|58eF=DOG{;B9CmQx zXmorqCILj}(1nLv;MVBQiSRcc?_B)G|Mu?7X^vP)S~eB0Ty4V)OC8sE?~Amo+MPT; z463V1bQ{=xzn22g#M})tUZo`RF4s%%IV;$HQbX}1?yjCTVIhx84HbhrZL^zLYzM<$ zZ%FBN`77h3XJP1-t9rX^GXv)}P8)E!k@I~~X0iwu;lWWR9>O4x*z<@dbhj!Dykr^# zQq#_~CjRhMI*N+t0Ru{~=I`*^ZQ;<*7-W(yi!jNLF=wZ_b|zU9kW)4l-`7>e0h~jmV+1hzL&1;rEWB%e2ot-YDS2@LPs*+qJCy#By->Z4+DR zt8WgaSw3xJ@+D~bWWqvTa}c7$$L?*QthJkcsYGsBG~E5G6}^p=#`3tI)No%NucgTz ze_aVnSNZ)y5)*G~jk5#^oTbm5q+^n0vbVM$aoT@~T)4k|^ z|J7b_`C_kUKR)EW>Q!USTOZi%C9C_q&dFgGyEeJ182IaS!RgWQpGc*dleu2E0;X&% ziWlb+>$GB5qvRbr5_t^)IM<-P_Mr0E8EkS{Pf7ic6Ou_>uouV9+)7Fl7x@ z*aH8Q|B5%`{CH?C9p+w7lGZ(NGby<1=_AQVPaBWwd8CiaO@NXZ4?O(3 z)SdV@p|lDdtbuIFWtiF~--o{Co9E-f!=$T`tBx~Xt!R&Ws%_^<) zci3)^Lt^uDN%Ii zw%5iUe$3DtJ*53D0j=29=pI|vCY3iCN13A6h)vgAn2aett@I|NE?4}SL}>s3U1Um= zX5gk!In`A8Sq;15&TG`rmcf=g9#6;X^t?1&XfAVP3C~n#&zY@p)YrfwC-; zgRJ52FJ&)k(ZuxIY^<#1KSr)|5SaH6(bwbXwedsp>?1x3N;wsM{XZTzqc=?GovBVx zvCWR?O(?47S3K4DlN?{D!>O`bBOo+7t`e!zir{E-LgFi(+cL%;^W9D4bUa-cKN;od zQ05DD;ReooybnogLmgfIEuOg^Gn-a)SE~Qr6Ac`<@}HC*1^*{Dx8IjHKN#-&Ze|*9 zq@2|Kz8D&JCMMDN8?xP#TQTx|R~8uL7WX_nA2*7#)7SKi3Ms$b_Ols7#mya2aky>k z+sr0kWubjSx%OAiad8`$o^@N<5&ruNzgBf?R(KlqlZv3GWydC*!Ojr&+JE!Uw!3`i z7k8>lo9^>lZ(eQ`*n|U`ulV?g=CG_h#_qj?-!zN9*Nq@I>5i@HSoVA(QvufF@NI;>1%s z%plnvsvmg>psXiZUu!)I0}tIZVeP0&!8tJAOC5$h7K;C5Lyx9F*RUA83!RAY&`VBT zC<}D=u6s>E%z~T0?sxMxOnjscg&L=3q-gvJ^T?x{5(`@dK}BC^(`+n^goDR9$@9Ew9DEeP_H*_FBJ0j)B#4 z8sAio#)3_wvOSuO(c}q-IS&{4rSnOGCx|KpZ{%~;V&&U6;^p&LbVOQEndS80nL49R zEjk_z;%&?)IL6|C2OZR@PBIvGjGTd!B50fY5;h>{T=lPV{P*jqYVIRZLEpbNNgOM% z-%ByXoi(1dq1}6H5i0pqV5PwAun*i0t=_MXr=FK;LO4Hx6-$5XMzY)P5DTcuv(Lx% z5ciqumpgYd64^_b$(e%t+%#rexcXbz9S(xeX#5;8@#yr+j2D|PO$)D#ge~O13Ox2C zeRnB-6tWkwk^3r6s{ctrJZD5YoeJz*PW!`;9`sEnQwpOh!oYxN!$R`5TTKQ)J%P6PS$#DgHUXFgb{k{&pU3)@qe!6K?vD3cZ+rT}AZEM$`A# z21Lso{@vL@FVl5wK)cGz(-V!+?zj1HJd_N}-9ev5RO*hXw}uovxT40h>+(Ak2lVnQ zF(XsTjboZ>2Y)Rqwg!R<(|4ox3`#*L`qf15MQ_O{c~Dk*d>7R zHhg33aoeMvlAr5h0Wc6CVIFinV@O$Z=;9U;Y_%pAW$s|vic`@qN>&5 zRRTh=5BA^agb$r{>JtR_fc$m;Y{PHT3wx=1L%vY&P;2rCdDWdrYlnwha}(~@Pp$Z4 zycov}@+DmNK6(?rz%S`nO|`qma8z{y)-2F_Zfup_QP8xy_7$?IJI9s1SjruQW&P1N#hX!8(pLH!WC z{DDu?g^En8vNcsaW{i)*_8t81Ud03rw5hoNX!x^~(m*YBjMhA4QNW{0M83kd8f(Y4 z*7$I_J2(9Bncd0gh?6tT>WN1m5z4Qr^u%@0HBE1ZEacy_$xhwmxnp+LC`CEzP=%8Nzo7Zcmxs`K@Ik@Y35Rd@b<~GKpuY zo7X)*D^;7#`96EysJ$x(J!-G6?a|$1b}{dDeUVT2(Z+%kMPZK1tLHp~zJ@n=8@TG% zz8ER&#AJ)^QZa#kp(>EQR=T+07x{!i#6tcCR0So_veBHc2@-IeRB{8UC>h zIf~)rH+4^m{Zsu17r4b9I54m&tIh$3WgA@mT6A(i=kJe?8BXF8lS)Co(qHmec3$he zLgDf;+u+QGtC?)SdVx!AK-~G9VOnsKC%I<#Yf3LJk^66l=un(L+%yFZzEEYt8NP=0 zWd9XLP0I9=?h)`?mOx~jK$y%pXY@|m>~GS3qJhGgl2GjR#xGSDw+Hf+$WHC*3^K~o zD0`C-^XX|<;j0!tSg)yRAFDXow|g23F4XeOtUSEt?%(i`GP%#Cj0%PFlI+W)W&hW!9VD=nf2Dw=he@CDvgoP^X>MTdh+VW~1VpLC5x6n8WK&+{s( zn43)wpPqb`cewY?yJYsdNMphA0KRKs^+r(v4zt2X4`ct&Job_YDlkor4vg!C;$Cj* zwz4P;5%?`faHnDhDgTKY8Rc!>Z?39D%kL{Sbi`iy%=i}({ua4iH~ynd(5OVRKQ)cl z;SzaBVf@7de~TJjUG#s}KzT7}%amA+CNkbDb~9?1B8Dn%eAoDrV-}l#{tU<<(PLlq?x_=m*-tgeZk;hQEsQrMQd5NDyyOM0-8emV zm)^oS{2dr7|A|(~Q@meKo4EFS;zRk5Dz`9IG9qJQ1AnPhisz$A9FNg6%Kt1-Un@w) zBv+cX_zrV~-GgPc9`_XaEt5=Scipr~dg%O|jKTj98yVo_q;Q!hq zZmUG9NB_T~w?U_2VkiynYi>ey}KFRY6<^TC@dg?COA|8_c&X?XYevCsEb5_V28y-GTm!io_ z>{ROjty6yZdCu5vmtAchtBtvtheRP}b8jU*NLl1On`Y+BgElYqu6VO8P5$K0ofDkf zP0F0kRYRU!GZ*e|KKrNZ$bv;t-1lb{uZIzOX$nGwONz3w3r%Ce zIL(3g=j8nSXeS1Ws^m7tR>C-%m3|4nQ2KWgu~XEACgzAsN!j*6@70wCaZ^1ib^a&)V(yxbq52P3=d*t+F6jz%>b)zxl7mxBlJ$ei7H^bisJK;A zh7EJ;TBes13zm;JV|IC$pQF=h8SCZ|WB9T9u|pTMKGoI(5aHFEeQTcrFNvt ze5!x;=!~m5v)WZ25?8t32IWcb^5h{h<63y5ms;Ty78#X;4&xOzDd|i)p@a1)jNQ(G znf5Z&p4b?s^KWY_!r^VnqE~3rG??Sb8l7}*4<05h!vnUuS2fXx#G00qpz4*?FNBZ? z6Z?=1@DF@ZBr^o_2R5#Mo=XtX5S<1<*&?4u0SJ#q#KL1rPrG@L(c{nXXd&uNg8PonQXwO@NRv$ZU@6#*$}WK~JIX#6NT!uDwsTJ9An?Mz@ z$lo+3>Dypun1Zuf{LUCb@KFl4BTo=^C$O_DKv6{F6*!} z+R=3W$V5+&Cfcb6gGu@9P1~U%618%0AKW=0Bw>2JsHFUz&0Yg4HrZW>CcPF*vgjk| zNPnQwLE@6_yI24i0rRm@AEEDY&Y*?q*zzfUAXcz2?6t@1wk70v#dyJB#PvfvCqvd@ z*oLM(B#r+6ZbQ%l|J{2#`<;bMm4&Fcb1gZP34FQM?yz%OmjUhmb0wvK&mMSpIr#vv zY?d;7>A0JeSs22Idtn4&uM~NNSg8BJ3Fk7ei0$wT?2(n%4!^J>ygSM0Op=G-ZW!P{ z=WP#gi2;{s{*L?*f&Zv8WrRqo2qOh6m*Ude83O}vk;@#sMB0f6*a_v2PYMufS+OJ4 z+|!_E?#fRsXxnFS@zC?hJ)}8QAU2O&{VO^WZ@yEsw6<ggCHwdz!Dn{-!?J44K z;l3lP?!*whqlopvk}{xHcO?B8%t5R3SZh>k)T!_JpG0ncF43D4=fwlJ{Y)(Mf_TD`M5@!tk()#N5&nNKdy zkq4ECiOXZ^DzY4kAtF=5gTO~M-gQU35|~?5-|za+&Q8xUXa7Kc?8ktd?h~=xp>sTM z?8#}Ew1oMN0TUbr^1x4_DY>)6W5LgnfhYWw+erUo0fd5yi<~sS+b26k;8g*y)ozkL z)I}5+QB8J7fz{#7RbDc)?hLaJ5iBoqR+3;@T2eUZia;TvtGN`U>E|Il!5OQ+hcwIw zdJSZulJoR}I}3dZEXYhn8VF5Iu7G@!rD{iM{ohaC3yOw?TAknFx-?iw|95KV9cBoo z;+vL)trZ7u2%f?3d^iYW07Nv1dwha457OsIbC5%n0t@Mr|F!%bd0{_}iuUeW7yo{N z^L`&`T|0^o`Olv7z=z_N`FH-=2mgKnsX9cO+kgLeUJ5MT_Lct5iLy+0eQIpJ=EsRN zHS@p4NIjpGd9Z2_0lBalSl6d>mc%O<_6)Md=(rs^E7Cz{c`gL8%o}QGc#^x`{l(|W zNqa+G`F~&62fb_{qsPCaFJ-B?_a;T#PPsA->HUccm@+Q03}&>O`%7!-;d7^UhcTI8 zkFV~ipxf;1M2XXiYgAK{dc|T z(Z=94@mQSh>x7CAOqMDQ(<{f8NpuSW_z197G5SnOfOt^C?y z!;ofa(stOxXy&G(49;%sc+MO3-<|_mX$EpZ#z!uWC~H;l+fv zm*RJr?lf?hP2PZCfkSRC9+wR7qULUpp0wZ8vT%wgt+0V_s`zO{G?Pu zn?)3VQRmn~uBW1=Ace$VPb85CGftu(Oe*<#1XNQX+=;P)K@+kcbCRa>juVZCA8ou7 zI!GMB4u=x%gx0@R1yh9z)}k%;yv#g+Y&~328w-9blaV14pFW8Wfj4^xyL&nX+_kbvZgAsxLWP3HXwHIZ0W^$WH$fz zG=K_NGYwuEk{wBVN!#0v=n$qI-a?bBd>&l`no*x}ARRCZ`@8* z6IrGhFl5V5vkk;Km9-Ci1EV^$7A+|^5Kv?A7nqRMZ@w*rH+8tkRdC+b5yiiLR+Bz+ z2-zsFIC(@efxA^ce~YEH2oZn04;eNav6QD}ej-x`4_QRfv?W)4{#8wq$4UvYFy5Pd zAz2vY$dB3Zqn*}WWNrP3QRsR23L7$keK6bUko#--_k|=8Wnj+*RA8;UO&Z1+L)K-` zK##TnjN@M9qt{@ej?pJe5JiNE1}cRAIMn=k!RQkmG8Qk$ZtCNU1IR~=HORUo{hqSd zhj*kOK{n=48l3B2njH?4UWKow-lQ2VkXflfOg>9ySZY(&dD78D!5%c}IIQ{W^N;^o}#DDkRB}7JEy`n^$?csXedjc8#4@|EA2fZL-yy&pPhF`OHDrR0ZIBP{RA#?Ti!MuKuWx-YCQq6WFq7o`9?~hSa~58ojkwbX6~yfG97t==h1n&HK0b{+JIe)h z%)Gd-lJ8VK91I@-g}bjX@efIR{I9JV`~;ci3dVnytA6?__5-qg(+JQugx>>MCFlqh z+`)96a;k{pRPsx5$jX4p3H+g&1|)t`vfSQWx1DHD={qK*mOf$g3=0b{TFUAWId790WO=5(`dF;nz2I21pP41Ea9 zX-RgB3!;)b8GsCzzBf5AErP461RS>!VB!k|uzgZSVc3;`&rli0my*yHlEJyg$k+A) zjSf)cAosmp5_UwPg((L;6osou#66g@Y*IG|49p7yClkIF)w9?4j~{*x;xY=?xTul0OF$S@Jdjf7 z?OtoCQ2} zhpJg4ii0xlOu_RrKti^jpqrPjOwe@y>)2u-{~ky2?KLro{)|VC`J-piat%=jC(iSiPlFn?eaJ77fFvFW_CQEUGUj{m)F2Wc=}Wmki5GS_Kk!W?Vio724Kx8TJeROqSc zO>_hu*{Fb4Rre_v_J8lBD)6E$Mg1Df@#B)R(f4+=X*ayieDt4`P9jqJA}UCvo}0k^ z3gWu3O3NEyajES^xR?bd<`|@vP{7l7O@ii0(!A2GM+IKU4l zvO;K1W1&2hm7J{WNGpQV~;_$PDpuuK&;0!EwEfI91xZs842Tnwud?eZt zuu}d3dLvzfVo>7XVW?Mk-}_zg{Bc?G=FgpRpV&cvSiue-IlKSUDDSt>&-)eD6w6`F zser5ObOyNm1T;+$hlGRv#~s*XnBcd(L$Q+^V{Cuz_kMwnb*?mLe~R9GMZW*B@Rere zagc)6^0AL|@D-1aA4?zxM(LoRbjgFmQEQDe)noU>5h(RnXzup`0mDLHIL`9%M3W0f zeORv{DEq$h2x_&x^Me{=5&*9#hh8BzF_d2+>%kSuWm((=@LI~H3oJ7qHyx z=@tS8cUeyx27-eR66^_O_EUsJGtbLV+3E)=O9HMQpVB@0F552m;;L4jP!&Vfw^p@Q z29C4Sc{$F_&XGKc$vq9wWuo61ko=xsi3rLcaz1kVmK5}r(PoIbK7b)x{CDpL_0DOo z9rtwTBfZ9fVwn8Lb5K*v^YXW6Paiqb$SvszJdYn2)%J3b$>uaQw#q}Lthn(MtF!s{ zK=-v1YS;V$&?|=YxcgCam;!7cl-yj{D>$tzVkkij3xLthmd|auq+?PS9_Tgz)VBS? z*;(r(6#sOtczt*1XUGe1fx3OGUsaAp#TOEOwMH*by+4^*@u#ICei1Oyl1MWd*7;j` z``BxhmXCQ>LE+GU{>LmA;H+AzOA3@e=t&+zg+iN<%pmljbdK`odH)4!z_lX~XCzf3 zO@-?|>MOd4#!iT3YE&f=he3JB-B zA2h|q(cT1s{F>K`q_g{t3nI)FRJ3@*1To?=yDN7L*ix(5r(z zQfcwiyTjLf@=Lg_0)EFd;vTd_uK5{Y)=Y<{O|aQE zU-mlEWpEhh7=}@xP^o8T0}b^PHm=k+dAl+ZbAre-HuRtg*<&$cZr&H3BvY%^pi_l} z2QJTRCQ{X&O}N%h@utO(K^9T@sp*8oM*?YZh$0vh9VXCuX4cP`Kw<6yg~>_?qR!xs z#d%SEgzAoVD^LPZ6HD!H56v{I?f=RS;y_GIZYh-V(> z&_NXF^7&_QoZIh*!Tz25O`v_L&jw9!d;>8RI|d4o8C1K~62Cv3MhC!g$S#i2(_D)@pzk9NeT$I`MfVnh|W2<05 zXXGh1wli>Qw$?|2bM@8#s9zTNnqw%o7l`Hy3Zs99RWJnefNV5c44fihrx3wTnF0rw zP6UWJTKIr+79+4+;q^UgurYhUPeo03-vrFC1vq3#Hqt9(9Y^4^u`sgI%83`i24V2y z0U}RDDp6+4u%06V|oE9;N@nj8?kFC2i3zHHCPB%^F zqYTbMq1O+znlZx>&U_WF`+MLyNBA*s2>V0!0t~xIj#=eEGXrTYX_`#oR|T+20u19r zF>uYdAYJo2` zQ0$~}_4O;ATd%!f0m3lb>P<~o_D%Z$FZBO6)3A7DjB= zYak$wAxtIUC}2*01-~o40Nb{0agFZc)d2EtEl+N4EF~z zfWK=;G#GMv--O(>R3T6fmbL+V)lYaomVp+aCoAWjH=y_3H8@#HOlP+s<3nO(?ai6? z{qhUs|&-Xa>cF6K`+y>Ag z>#`kma4+h4^Sm|HMWIbzo=up1^)wt}*K0npfEd6XHM$GfmGUBcA5|Z8qq>6Rq#(^x z2mEGCMouH0{+xarFgDH*@31|J=#%6iq`EXn{m{8_iefuJ1aF>;c7p(oju`A|3+5TP zhM47pJ9EtHB4mgjP**h>Iuni{ZKEn*t9tc@Cq}Vyqi3)w`{KGK;I2vr@<_H&%?NV+ z2qQ=@5crJfIdIL6%M8qzUsgfC=c;O4saG?wYLi8OrB;2)Yd?uvvxp6 zS|^#-L)RyR&-I^TCM4jhSMF_w)S40Z4)z$@~F3Afv0k5pk~~X>vcoBvQ6GZ9IM{EDx6;@eB;s z|B3cVXkn-%C3I;#T$4h694uZX{E(Y~(@#`)^0a+0Pe>h9vVAYL~jPACek2Z3e=HBa4vqKu?8! zW2Zu!BSOa3U=q^Zgz8=w%CMY@x3#2>`~EDQfR0?Np`vTO#Q`?-Pc$fN0{9Mi0SMUS zM%I=qQZKP^t<|xCN#rD<&+L}jZ>TK$iC63^^gU~j5-5^>jiu1y^y)RhW#mnO4IvIT zZix(}Z54iT+rF`-`x?pjngG&nhOY5hv830+#Si#cJ$nGHF%$2V5p!nt_&WxuNf ztq3~U+(73s+}a3D zh39@#Sc@_|CQt+tW>(69+omV0-cAF|J>_oJL?OBt( zf4x6>E&hV;(IlhqP~WO*)b>Zltw)n@3#*15b3CJ_*L_D?e=fQ~Oo(wrDl+e4z^{i= z(ch6Az?PEel2Zpxvc|tt9o6EzltlRiuLf4@(;7&DYlqr>*K!e9&7Vpw55YDVAR-2d#A;w#63bheF|wR7+RF;+u)docoW^2kzbF z9jC)YDXmk9PBWO(>EAi#7Qwj3F2f^H`&C?e^UH04Yg>?V%XlUACsb`-_FT_QRNF}6 z)QPhG+3GtMCJ#BX4(;cvi8XB|$h+p2gST~48O0(9VeUaie57iX`h zz%YCB?H*~`j4B1ywt~N-1rd0^Q^Ugf9CF7^A_QeN^E;^DwAY7%Bk!iAY3D3Qt}+ce5LPBS_^htbDO z^N(Yfp_;%pgHW;k1k^5ZLdr|0AQr5l*GZ8im%nf!sfcTVPTT{w?LB&0s;?M^F1fn^ z!22VM+H3zgx`o&wQR&ZDL@Dv$+=*(>v|@I+sS`?z;F zvqtP6U+A6O@I~cF)C+rul$D@U(~yCa6kVpt>@fdX4~kPH7Yv?|nB{S!l3}PDfGbMj zY;8pc*3-#h(8>IFY73_*EJ3xbqP=nxoOQFWw>c3?EN0PybG5@!|$*D*Tl{}r8VbJ zI9__3Vi63Sma#T=U%IBlKC$2Sh}R}9bbmLb&u5nUfICGnH9Sjo>7;RbTV=COW&=_| z8mXBe%f=thY4T`5JTTu3QkL<|Hsob?M3Rh-#WcHY}$a zh2aa$b}gvAcIx)Z%aJV?t7qANo@)fUUmX6mZC-gUD`P?LvQP$6)ccys#mMuUSWX<3 z%mb|4?~1M`r%Sns4)m9h;9L}Vm>BKe9A~79unD{nrjeIVMxMwbOf!>x=S0~bxA^gk zU)ST+OpmU{4_&~vLG^5fFH2KRrjB`h?PrGRN@lOdWC)4tT9k;iLG6;Lw`r zcPf?~*_=32_@!%nCG-^i2zw)zR>H!D7<*c<{ESSuqOlK?N_4;6x z;n&n($YQ8<4#8ryz|)q{_(t0R{xwi7)2E{w-o*l>Eh9-D=WXV+wn0rwfq4>w zq>oBhK%Wu=y1h})^>3gnC~t=*Jy6v6SQS`ckEJ$trC8?3OU?m+VVm2SQtwuF_K3p@H|3R%A5KIcny-RBw~=c#}kP)X;nJv$vbo^W2Eq^aZ_&VvWEG zhEfghjL*5vN9hc;7P4*b$!dJf4Zi|a}EuVc4VX#kR_R>Wi9^Se(4 zS=!L_s2I0VBORZ$`w!~3$e0I&svJdukzoVjzE{xYM6($GEh2?<@IntX|eW(i_o}wh1CbG+*1Ix@dLTViJGB^E6ek6EbnMk_~dm9)mLEsotqY7pTqs2@dc4n$=LC!zcSr^VgDMuI3ScQex?_JA@iBnQ^)&c zr~U=s!iu*7-g&MKE1%xvH(2Yov5O#PShwcM%LDNe%AVK2w2MBjA|%EY_oS>pGq*j8 z&NNE;l+R-ocGOxhpiRt8uLm*JLG_PrW9rJ;(oMD-%YR?dk~m2|H95Zj5!>q)DkCS3iJ z=ld(bVZIxvtV9rN;V*77{?Qeg9Y=Y4>;FjqY)$w!*MNx_bW#@>AtA~lb zZu?cOPmBt1+I1&<1pk3Y0E&!^LJ{#dO}+4nQA3Vxw9oCe1Fx}PP{G@9+m8X{A-g^d zts}=2IRxI3V@Gw)pe;yns#pkKT4?|QIN%FmKkHD=>Rzhfn#o=UtsUreNFMYCN5jX% z^sjZzvdZ2S_RVb5qLwID#E=AK2H7tJ=CaKcEdGxcs#|4)@R)2v#tQXyFj- zik(_b(r~atmAFW5%9$v;Xr^iRtTc%bu{@ZIgzpCx2&b?Xu%#R zf^Zl1P;!pq5X2qB0m~@K-6`=9IHu*D{aNsv4akN1!`8Tm{ggw11~4ZtnA5rz(2~DJ z(*`n${CX)99nT^}bXfi5jdDj4pdPxoUIbHac5!dw(#fDTt58f9EUSpLEOs;vtW>%_ zsvA)9@h*2?T#6{RH^aUSMIif5RV&vok$zqfTp~Tbppf0HTY?=ED=Wftn!kH<<4& zL^a(vo7)Td!NX7I!S(wO;WkMJOGzROHP&+yj3*h+^PdN@Gao(U4uMq?aY42?VDFTw z`Aj9@(O#8j#iw9@pS=6z0;t5=f2hR&XEm2$HE}ZWbMc%(ll)OngMckxy}vdG zwI)9dzkUIz91B3@Toi<ggm>x@AgLiph`~`U1pdbf z>xt?q&2~RX!e*57!3OJE#UD-qx2yI16+mW*3c_r!#68IXaTVF82Ej)Q1o|RF5h45r z!SO=D4Yc{Xv~E5xJeT@P@0t^85UvNdP%f+Q5!^JyWo7$W1|V`50?bRT6${bxU}4bM z9`6T)ehM@egk5<3xD|;ZOZQ07Vc6k&RmBlSUjEn)n3oSQB>_S8UctW~tL`m8NN`e+tQlu# z#%BGN)=aUMItP}K2+OE!=P(oBPb4XvOmNjMq5)=tLzy=M$d?@nSE30FrFT)q5gFLe zZZL=OMwRSvP(Vn`30NT>CZ}5~nSOzT(74E$*oO!yn~9_FMcAQ7$8RCwehma$W-5Vr zEcpUhTxns4kZ9q)t`>F>PM8*eIC6!e01aIE0 zr`jsGTYlOXf!Y8CK~i&`3IXr*Z@J5#1VTWHl7I?K_Fsa~A<)v2qDuVWEp9}-#REJr zim>;~?Rw7IX}j|Wkr?gnMS-2b(sFSu7`9~ZL`h<5^rYjntD@i^ivZx4fDcpE%MesC z61VJ!ErFz*P-|Me0JrNE0q{4o!YZ6kP(N2d6~8(bUQ@^pkR27rA^0Q0v7m_J8RsxE^)`uPaXQwItBQ0qK9{cqi&n4|EW(>;wf`@8-DN`k=s zEC#3`?KOAp4B1LMR&TJIr*P`mj%Qh+RFp6y9NL}Bt5ATpeQnK)TW-elO!{Q^Z(aEe z==@a`-06#W@Pz=w;aC%*bjB6X*`8M*cf{Zv6iZ3v{s?Z)_DqG5_Ovr1?hIcJ2djbJ zVrjAP9*Zbyjnm^mv|&GRsiXSf>^|;P3B^v<%XN3P50pp{$0 z1vPN5x}}Zs(V5y0jNv?uDjqvu-1}0 zC_FI16+jSmUW3P7{si*X-O?3l5Igw_&_h@MUhWcNBK~Tlon?uNwtsvTblMf!V_l3- zMoN$zpdFa%p|Bvf<<!XRtO0RwQEfTqrM~0vY^DyC}%MM ztxh>t4uwwcV#MqbvrKGh$XWVgPqaPm%-el+E{}k-z1iMTsQe4(kx}L&(B@RWWgEIl zre)6lgvk4VK1l%eSkZh9vSBJ;sE!i0|FQc9Mo4$v3ru@)$)t}V`-!Jti;N?Xs5`KV$oRXk`X0-wC z_oYWs;&va?J2xT`kN19*hP$R8((8);_VYbMCmfjoR7q5Gr+#z)C4!9$J7<}H%;lYCvw)e${gr2 zxW{@6IeH?(Fi#eKiFb&_MYha>s=7gUy)BF9O*;E0EPxG{x7GX@uSQMoz+}Ldb40Qd?1$)Is(H0kPs=D`z*XKb{D+n`5$s3xK zdD`4H5i!^QFr9^9-PD7bDFMn_?5jFwD+Q;zt62ib=jWyI zfXO1GC`FevIzeLB3x%JvQ7wfCK_=B-%`T5xj&Hox{E0mJml;oj#==ZbLmms4x%GNf zOD^){JLOl}$q7~gpn^Mu;!kO^8zOU%VXvk}=Ace>J_Q1=Lt`zco9^8stECRUM|b^) z4+kM!oXm>e0z@)~)fl{&>}Yy8H_i_~7J-*sC}caZ5A9aG$JL~+zcy-45~+O`BePJF zMarGX;1IQeng`KKSKnlR#Dolcj|sUf{6E8@Anlf4 zggJjr$=yJn;s2xSyW_EZ-~O|*G9yB=_l$&OhES5s;N{Np8e_jR4ud7jsKypQ+sK8_>jSP1)`gzpuWV-wxW zvB!XrV!joLyRtCbS^D$DWT=@7C2fd0iUQ6qN9 z`>AN1^@poQ`jd;1oUz553yC?m^ z;~7{b&;%m}DOC)~836$x4g`P?nRr&Y@v=#tKQYo!ychU$W1)Z9e>d=;cMhp25V*)% z?{{vmrRmLM&}YKU#ID1E4+*HkV5E^GpAYWp>{fk*Mg~M)-1A=?QjX=>Fu#bNm*ro} zPBy+N5|thKEbDIdhRg00hlOL|r`hJfh;b_rc_53C3FH5l4*nzzkcp9<9{zfKzyHYi z{13G)@~ORGur^5%w3Zyj00AvgztbYBdw>JozyJnfFOnBZm-TE>|8^GlFYIdgUWobuI#&iIJl8+- zQO0A|h}~dX8Olef4#~pi`~6mH^mtBS(sl@CxiQ~Y%f9qO@y>ezP$c$lup4L%@34z| z|M~U!d$Uc%^;1oYAUBj+c{oth#pGCE(NhBzPEnRn-dNlN*GQZS(uiIb2!Kni=}$X& z=;eN`ptra_^e<-`6fekf19=K}mJ3J9GJlAd-uR~S_44>%?PGa}zJYPJs|N0M747hpAa1y68e8S>}oj&y>!ZCmWn^0VGcTw zKSK18+6o7rHH12ycxKzx149px!i|D_F3`zVdGD+BA`)|x@R;o)Wv{mH_MMzZE79jaAkaWx@-PRVr{HJP)3pJej?uvzoWhDe`wholHPop%(k3eoxI_UxZ z94=j6F6+1SC3uqLy+>_$*ZE>iobaa}u|w>weQc(!NaD%g-3HZ4r%?U=nl0#9Btba(_Qvok%+nB= zl(3tFy73l4A9QiYGYoVVF6ZOlyaSarq-p>(zDH*3QcAZGc7@JL68Jb3z`Aq9r zzEGlc+VL?JX)qEN3=18AJ6gWE1HCmf>F*COLPc?DIVh)Dsv*+HvvqLtknL7>*)$U( z8lVqeEUG``@=f}FHN~`Q8(Rm+QA4tEIEh-Fps2W1V&<1_kMwmG!8`I$ckzzJCe%{? z=C@Uv@rv5$<#YguATAwO&6VzL)(`2*Xm%_q?19?fc9%1MH1Cs}?Su#;9h(Qz{MOoS zvy%-NRbE7`DVp zo7B2Y{pQrI8L!cYiysVf#vo!d3!S8&`7Aq{rG^Ra^w6LhYsAV4NQ$HnG>Z^dD-;Z6 z@}v@J42X@s+e&$M)*;g};<+AWq7i*h<9Xl#$O(!=u4i5C&4luGE{V9r6YEl_wz|GysEm~BeDYxn024Z=!g{`~yKeeb4{rCxDuKPqT zy=yk1DTK-UStK9)INg>)LnXcqWpd}!W>u?C;y|o)>5lCVs0{pc%{K5AJN3551CLss zIfow>Yn7hI(!Vi`Q9`#Y7(N8PSCP#VEL|G(4Cw=jYRyXeJEUBdt)C9@0YlLyd7M5L|tT3Dxwhg9^Zxv_xw?d;Xje zHd8}uP-}sn11YR{?Z#q}f7tN$I|d?JJK8Yq0*xSl=$BogrE2!k!|OdWEvAH2M(#j+*4Vdz^r^rENpUoFZ_7g?OKZ*SR>#LACOslNtVGUeBAr)a5V>g_@Cx)+GD zpZD>%teH*_Zw!}p=}2{60K)e&XY zVr&RKqSDeKvcveBUjE-&fd5=NI;v{>u+XTjulAFFLT|7}@P?)Q(dg8>bR9d>Y&=#F zet#wY>iY{9s;AEf^pQLkU1rxE_7^Visu9f*!OloKo`1GwK64`nyH}Rs{JVxDDJtvJ zmy8<2EY(|DfeWYvWS^a0_Gz_tf1G=v&L4_sibIp^kE1nnBBYwj#7SnO@)dHM5qC~v z>GS4xM`GaAF4{nhDmYMJ@2IM$2|8?Z6z&+j%L%@l4y8qA-TV?F2O{J**i+Li!0ZX9 znLw4?MWn6YUw(s{B+ybTl9F~4b1_cKyXtKMPCo$*qo z4VwOV!*{lM0$M0M?xoeub~QBUM5KCTr3{z}qNm~N>Bz|IS9A_M+l|+vZy2g|YOG+^ zwcnKOA?LTVGNtMLo6qZcKVGZNOj1RMP3BrY;J`$!W`Vd7&VW-$vPI+2tEueBxD1>! z>8Y}T_QxOPd{cjMp?ux?XwT-{<8_S!0a!)VmzO+S9VI#K?nPHnQ4V~MUS){^6}Fpu zm8SjC({myU+EJ5o{JlCq_K)tgUtB|Bx9)M-(gtdo^bz0AI3aDu=XT%a|lyw58R}E574#WT5k456Pf0v+x*T3s1GK%1ZriiPZwiCYHud5(T-#^BE1NVwLQENXks^l-f@(d;A zPrH{p0+Z9{Cl_x{>c0={PV9S?Px+YM=Gpq23-{OdKr`8rPPJK4S^Jet>S#%HXqvpJ zzyrf|Qr`uI<;G*fX61~_?=4BVJxIP9bb%`X5rg>F8yMTrcd8xq%JaU0=66Q*`3~5b zXNBa3S)r|bY=TQAHZA|q)&+%8Zs3e0VTj|k?rL5GFof?p376GD!^=Ah~Rg7jqd}x=<0b5sQ~{d zYmG9HdnrYTkP4HksV+$x4`k0j-<5>^kGy-|)~+#py|p)7KTUbd;JbQ-tz7s9XTqbk`mCq<>eM@XAG52UKE9RR9KVy7=e7-%bic{1zvxMuRv;Bp{gOnj zvezYf(kmmhYW->VbYi5t{}E(pIB+I0-Rv$J?wo1;s5`qT_M5|Zs-(`)`G5t{pP#a2 zXmS(_`1MZ-m`z{~&sy>3-HFQ9ST_Kyx`4xSJs^#POivE$ghPa9R!^!(f%M1G=%kMQ#6VOr&n0Um{J36SJwtmd3DqKeOHQO!a#JVUc zp6Xq?-gsBt%P-OOSPB<02w)rV&C;W^1OThaL5VeaJWrOeL1rf@Wx$>X^z*0FUt6;cQ@s;@2l?+ZQaLe^y5DzjE!7%$jl-mb7kRDmIUe^($EH#I+#8YYnA@ zd>A>$huUd`*MgclUpXQ_9?TRRf3^UhrQKJG%^W>Kl{3sK&Q$;@0=B4()dacjiSQsjO4=HgH8V{s*w!;`u@eenD zYs4^JSx8wnSHPFpRrozlpAiCT`C?- zNK6|Ub`z2VG^bi)vOO32F1SrLKPN9T+PsNPE-;jwrMiu7Vez>x`xX~PYWqMLnIbM~ zcI1|C>--~jhAW&?k>=L!J2;CMS9~khlDOugw%E16vR}c=zedx?ovqxSfTV_|Izo~l>dA6gT!E!@Ae;u+J%5` zYGQ65+Him8GPHnJ)xTRzAu?E{9>=N+J0eK#VBmSWUFH2z+AF6uVpH4#^z#k$7a^rU z!9b|72>$VSjPq-|rf%T@(7m=oQL>`JkG40F-Jl3rV#Vb+Ws#o~jgM?#$7)JP zvtEI=f7%~xy{B3!Hdl5Yk*7yP;!<-8b9l@&EGZx84-7M`o|4C%4J8wa6q!ze7fUl8 z{PoBY73L$54ZppSDple#F0WsV&ulq;`<$%jwL-Ic5tRtaWXS0=d>pQGKz!lF*i|bP zd9UAkaCAIckrM8}QJH@$GE*4016t(?tq=&y0|}c*!_Mr78R|~8C-Fa}MZMR4m6*t_ z)d2}BhKMTm$9}wbgqxDN<cBb$MI&XYMK ziZap7*35PyN_d39tWLG@ejyH>2p4NWdlpWVI}RjOU1g@0qOKn_iukQCxKl2+Fj85x zG+eF~%OaE2`e3g6+z%>|0*&Z%Q!VtGXbNVj>yKT#&N|{?Wi^YfxS$*grpELuhMX;<$RTu%Etvwgs2msr^dUp`V0GMK^WQei(LMI<3KZKU^^6&)s+_<@|fy&kv3h%^km^ zgY7lQ{q2CAedUgZ<2M-MOq@)@>Gk43ktocj0X|_$Jx((! z;tUnkvM@;XCwW`#wV3zyirhI|!{*Ak*(M$pedwJWL8DmuVP-M&AX)=hZk?^4M25wU z>f^A}Zy#9HF+Q3t@!xy!;<9()nvf*{g~-k&5|PLok=jlTuBuO`Y&OW42F~vO1yD-J z`rfK7HrI=+NME{%e4BCZae3Xnex8NC>?aQce&p%rogfuKZ|FRx-U>apdPKcCf8G(_ zk~xI5XX44pDY?(N4)$po3cj`BtJxvFZD2qv|5){; zUlHgkhG-{gYtS!AbSSlQxI=)(W(FPct0~)G`wnqudv=@1%fD9KAI+=7Zc`~p4%4x6IBCYDwwfRBLI1sq;Zs%~(QXYAVK^D9 zy0{~zC-17l$L8T1!wPJ=pOGRmQi1VlxoSHoEu*0Lu^gN-5$&`NsBCT!9D zXeHN16@r`5N1x0a$MVga)Ls4%%zRv^V0%5W8nUBwn0_XmIvn?meOk6$l%9Vs>)Uz=%8DkUGn9`LyR^_oTAAU)ZxtZ z!RNwa4b`gGfq2BVfBSv(gWu(?`^!F9nin)Hh5`%wYOmtd`o%j?(y1FzQ#_r4KzE439=^M3MdC>2o|=B-Ac{7LnQGbhk8&()^_z6Etm^pi9Fbr$SiIS>Ty;&0;bJ#Pg^C^%NBXg|Ct8WTM;3j z5b=mbLa(NuSbl0;`~h0XmGXs16qLNjr<%UP9(O98%BkriY507I9U+P#k|Od7TA13B z*yVL^sih&-hE4>FiBbzbA%d&QG$(5kD?g!fndH;eTc@WVF61S`Q#E#{K2by83#QXV zV?EH@p5o7S{4CiqFlL?gi;Es)qNUe{^Usk%H@in~*pHWI=T*>hD8tDx3RH%?zuuVU z>omq!dy{5gMU!qlBF(TLk=TkLi-wR>{R|lPhyqmYon%xSQ9@`(^p~N>YUoUgG9rtM z50;E(MwPL#ATJWQ$tv)Ro$EA%dkqL2vdMJkA$BA2a+1lw7ZZ(rDH|=8L_y#m`=IV{ z*Rh2ZpOJ{&^(@t>b}TMrr)`z+&``38JmhTlf(of<=s{y4pDoRO)^o#UeN*&WzN)2^ zSw7)V;?!X)5Z<;!sYwzI1Rp;^4u5l!BnS0-&VExXW+sXe!)I%av5cb6agQr%&HbC0 z321`8%DNY@MY1Jj(W9!L6DglfSPXk`%aX-X(8>u3yS?6-LQ;0%f}^ zT6vQUgAo5+&V5P`V&19UYq7WsXE0)0A3QMBWVtQ3a&$hMTReH_2la>zrqm1B2&L8r zqd8>1%U0^5Tl!F$v-f_^(V=V;$_y{lp|(nh)m4tS2<|f82$`r=_NCC+vz<;b=XV}o zn06z|lZr5*L;J@Qhc=P(?Zw1672j}^TxM_9F?^}Q?>!}>euCMafaL?OZ;~uM4dPrk zeCnc8-Cn&3|E6Yo@1|jEX$@|;KCnKIEHW?t@W)}aRm6!lALAU3NDObNE5{P;UklkZ zo%-*xm)eD}^j{XE>LeEdyOe=F)hED5_Bm%S@}Ar zAKef3@_FTXu4WRsh%e<$c6@vxnOBuy40X)alH`&m{Rk6S>oRkFS#YlL)5KhtvMWx&ICk zhE}JK@-|35I+$1)fWt5(nC9IS?aPPhdTn1X1wh2^bZq4DWHhw0iaN6_>o<&lBptA` zRN7&aq-ddc&778@)Km*5cap#L3qPHYj_+Hlpp`ad)0V6JX{p^E7s+LRh{yeo=0{t7 zGy7=Sq9?*2)x}8MOdX0RM|0?E<5uf0 zK9=h-1Z9n9S#ODj2c0}mHymWH^pQrmIPtea5-uc#<_EVp>mcK=23o5yLo$@U@V+vs zC=0=Bk8fhOzv4LQ#E2tRH*8K-HXEBtkCSf=tcMmWc+SVu4pT5m>aa?>HroJ9KL4@U zFlW250D){A_lh3COxaMyhW`WEyzwix&=x_1*2?AqkW;9q$$%$M@MN{i@Ogibs%Y^{Imr+dI#&bK*a zjC0+|AhMy;!Qt4Jx)=je^!{!uPBmJz+LDWQQTRcq>57eDB&C1%^D`0BzD|FBeG1W0 z>SDmBP$rt$IJlyRlf}wO(0v=M^RJ;5rB>!!i2aoVP=J?A>3ZIg;!=4V`42kwX|}#w z(nN+I?v#JW4pPUfWx`S07yvlLAR@$yLb-V0!(Bt4S^N)%8$?v+$^06$m>tqTf<~pp zP$=LUIG7>sOM9+t{tBT)Grmg!VKrxS1D=&k6S zOH>bP67<<$+La7TF%|yomT-bJKB$sZ2kkR*nC4Fl?*FfPIrwqU3Y^U0yiq4(ar>pFu1-#8JE;N+K`)v0WyM<+X;B!eX6ssuu* zcrFdzGAK0Rw`z?}K-E6f0JQ9UHC*K%BoWCA^N8-V8;7ZI1KNq+aQ~TComCrUBYOwE zph42|NM&kYocJ^jDhkEJlRrTz%!3FEj5=Sr~hr=qqTKa*j@jDN>}hT@K$B{rRJ?R&H8+5@46lWu*0zYmlpI_-ZlZYZ3B z*%~N*0mD-aP4pu`uKdV1Op(0T#}$M=I`kj<7efMM2TzKq*)xPq8m;!qhIv|huW-;F z@J)sTfvs%8@h~UK9Fr9AIQJ12O}q0BH|G8rCO68~5ALOT0&451Ua;-r14iWO{n6_7 z5F&DGptUr07RtR=Csp<1t*}jDiR>yT`B9iEfo?b8jkt5-FNagNIt`WZTm7o_H9V+w z8Lz9`>(AEW1(j*SXt{{lFAQd5JR}H z$JyWj@+bVrxy7k}mEh=SrV8;3X}7GmH#1N)60_mhQt*a%2+t6nU2ABKVdg#9+h)+S z3}w6ga3hCI-*0!}J)-(c6f_huyPNYY(wAm7((d4j4W-2C2rcAzO6SByM9~Y%H;D2# zH*&J3Gaze&4h~9A^$^9CuCpY>C5xKLBm6;{y#Me zNl$*TJe<3>0S~zOSh}D0%Si<71rZ9dD`IU(4Wu;jxy-%sZ4K*6_xx%l<@3W*#HYnz z7S}-D(E$Z3-)cPNAq}O?JCBfTuNHqmfs!B|#)}R~YnDn~jlS<%5K#}$=Lo8lj%2|- zIeSzI`y5;oR+|eK!LAZ6$9Taya)PSPbn2KbMX|viKndOz#wcn>u>2XRygBB#euIAZ zy~?Ro8A@GNY1h|^Mww#a-PpDFvGJebIXa3hK7T);_M*1TNsA=+vEXaIRcd?VFVwXV zx+%9B`u18vQ*G;17jjHAv=APOxr||mrS1E=N@_%ATXNyRtO`95@JGpyUG5a|@|rsX zUjF(vTl_>zh#}GxzE0)oMb)V}Z2S-K{C^8XR_A4|{!K9pFjk9W6&8g)+xRPYr2TQM z|7_+R+8v~g&v~P>IH+B)UVB^4`Y)(kq&yidBtzZ! z*5PWo9lb?&E4B46U1fdIP5R{B4f%gyEcJQ(Kr{{*Ko-fzS3Sa+TiZyMl7WM+AY`;< zygRl!FFe*n?g2Rt;7WJ=U=lM|EV-gLP-JGfoT;PECVE#+y7cBhBg2vMVYBmnjrH*a z_OUo^PvL8#EeJyVh)Kj|FLOQO zIS+=R_zK?ga+BhDIkfrSs(Vl`P4Oe#1p|evEuRxO)pwijte)-0Q&FN|2|q=3rf&Zi z5WwGaZ>y6rh!n^NLM0Xd^b(i4ziQ{Y3l~)D{;dV@`}0f2xZELGz^>=XKXz7zFbUV> zx@k+Ou(K*G=E0g~bu~5#=1LB~didO*ca?&2w%WCwluhJI-5Cm^a2VZ|G0xF3!l2!} zaN>C!9A^27!T6fy@TQ?M!`b$rV|vzACJ|GbRiM>NyjM^^PRgsic*s)K^nn{uUk|d-~hof`UmVd5QO9xz5hX4V1j!bxJ?i^2LH~&%{gdf_phz4wIBq z4&~J?(Tj0;;f$9!baVRpW`VdlF$s5J_M${jT>dctxW;BcNJ}c}>2zJ* zuG_HSdSd{*IbA&FzszrYzPNFzZh1kJboDYB4HIB9&G=N7`5-4br22mbiJf@v&TxD! z+%7(7fP|0^6=Z(u^7s0|5dYHoi}q=v@aF^4d8-Rzy8EX-^Q5xdK>Zmm&hL|&BaB;N zCq4(pG?52hA`=-Ut~E{gHd~84zaoA+H?>nPKJi(! zl#E^==kCUM?a8=x*qo8)@q`O-rAQ!cIFB_^7*jaFNf3{)p40614sVbgdyT4J@Uo-Rd;_2LhC&|LKpR}bNkvB8) zQgBYFA%^(t;i!t@dHfD8@9n`P3HD$#^|zTRmCYM2hJ4EVNSq2LLjdfSL4r+<1wa-u z^3yJWOi7d)j9M<_-d$Lp)IA2Ow2=%5Xh^WDk2(>?z<>`cVN9s(Q)`6n6Y6t{IKB`Y z6I`fQXG-E7T<8J?H!}xb&uJ0^6zUN+xINvBgYbAc{)IS}(wd$)B6q)z->Q;)Fk&N; z`{M1;@pHv1z6FcU!QHxd1I)RNfVPwxemXC8*P^OR%3R7xJOp?s{sM&BGDbXdcZIlxN zS+=1#fIZ zIe$ zjuP%@<9cfg%MxvH>*Y){xeH z4yl>~1rZ~l4t>&UC|fCJ{2*AeY1M2BLPtbX*)Bv+SvyfvO#iTG@+iWe8&xmI%V zcSL9FreE*4zm<`(2efqphFK*vsR`ar;m(i`HGZr6G_}d#4tB&F1(*EDegYdND({Lt z_p8qT@_N1Hqx4!>Fe4-)s$AtHD+x37EgG%vMC$sP;h38Up}`&Dhh|2Y2*TI35pTqk zT-I-&4n=wCb!WH>ovEDXfKBU+CyRz+#CeWso%0k8X`1RU6CVMzrluhi6?s%e3s_q) z;K3ht#W~Mgo^JQzgK9}$XXTtRlcr8RKJKIwV4Ix&FqC*G;oVbs>IokRriTN77zq(+4!s>Z6lv=Et# zF$VoHkJfv(>nsxKkvrO7*DZI@zxS)^<36Y=qM}|Lsm#BDAn66CL9w%dIryE`hQDNM zrMxwUlppbEUg(EW$5ZS!UTpEG%yB>)kQhVB0)i{wsKrb}+)Zgk_*PbEl7PM@#4`m& zF}M%}hj_b4_e|NAtwQxkEfD!8n{M=rjbX)cS5e)63P(4gA@(2-Z~>gI*2t2Y2o^O>-p8+855aFIlQj-a;s#ebR*( zOYek#hY++-JeJrnDR2-(h{))!ecy-Rjy1%YJ31pAA+FN#)SeEdaV4(j{;w)_C_Q56 zJl*~3F5($Ma8l!$@UO#SO7aK&4BYoC&(1*{9!Nhj(ZW#JP;JQW{uEhuuV0O=?iXFP z=R^Zu?yWcBNkYcDwms>>GVasYtR4M^#Q)_Ky#@4ZL7r&-r1eP?xe$c2^SgbRW!&Gc zi8ZYg1?RV*_^@F;*wx<<@uB^^KG%~15!AKO>mfmhC82+xoA4leA8}D7pL}w{(4j;m zatPkP8F-!QUSm*%65d=KpyJPoJ+B}FA-W{HbRV^M+OGs4$Zv3lMc%LK6qDpNpC-@l zq~5Y%`AcL)up)?8n7lo^Hq!~|4Mo?$U(E)1kGKAM;&F)iP}#kw*I!C%Y%Pzxm%9#z z>lyr}wY#w}1CZQc(iNba17EqrkHmn3T@4_|{Z+`5rn1`{SOVdXv@=0}MMtlN_W%5F zH~IY{!~zW`Nm)33SUDK=_i()FGVaCTBH%2cq0hTdiFv%I3}aU*IzBV zbdMTv-|)pyqatP0?kIA~G?7nE0RRPQ0ZsCVCj=#jYP^f?I5w`mP7m0<2kt=(?L%q{ zr02cpHmT2T5a;F5CO}7vWZrbNvUa*$E?L;?*F>SQXUu{_8QcfiP9$iBrAhsILb3N; z+{#Z`;@cDI$F6-%8CfzFfwNTo5>wAmme%Wr`2R67qOaAh~$gpqHAdA2}4R3=4nn+DeS*q z`zFS-e-57_)2H^~rY;9D5kFgpD^b3Ky?!jiu4Fh<6G4RAI~camG4Kx_pa}s@M0V5& z6b0pmkQ*i1ck=R8|3Zc^L%-#QF|b!0u#xD`ijFy7fZhBwxa3nLNtHetuLuA5{h>*?_LJ}=e^yEg-mJoas*#8Km!5S#0^KeRUaLQj{T#w7r=bfzP##qIVOSzl8!#2w zCZF6%*9HryQ5v8?or}=L;eIx961y-In$JvIoIWS;g7+qC@_Q(jSH$0#{44RW!snx< z7`0V8LuW_Z3F#UDPGVtjX!0`-T`PkDW^uR8b+Ju6z|Qo{gO8PfI1y0uus#>V8%bbt z_{k87{!3frbJ6Uj~o z)Xc}c=O?4w8wl)$Xf0cZANVrUq3&0T!TIxt3r+y-8d+NdI#jH9vLNyAnN9D-pTgXA z$DHWhG^McpJe?5U!MW-@i(<`4ZFbLU z+7pw^TEwmOjAX5LLfAc|ty2IS_JGDc(jlBLW4t~va0ZY3{f>s*_}R;w%dOi3+I}); z;4bTMmlr)r!fJ{^_0pU{Eq8D5!hP)Iq6g~DRoQ}quNvq!ktgrylo8lDoUOe(JQdbA zwqtdwMx`D|1bw>@3%^4d)C0sy?Wr4|3MnS zR`597$&I@?`9z>wD}DXkW$(q^ROqKEi)K`n(D9;DWx+QE$a8z!ADqA6I7`z6m&OpH zzC3-wWUGtj4e*9^pl9{iuwj}Wd}lKR4GRfPC;bFSPED&Qm4VsoUS}0OQT<@~r9e5= zEW(qsN9?jW_SC;-KT)-LkUa{o*e`?tt*!8C?%H|MPh;!8%r;tm2wq`P-ht8*9iNWk znE$q^7oVtRx>Yw{$w#{$))$iZ5}n2{#&F%dj&hY#Sml<6ql59IWH4}oSc5!0{>>E< zt5E7as5kk2oPbnx-#3EtoHdxf-*N(IuWyDaBBtLPJAyR}+d8JgR@uIbr+Jhp&)A1O z|s20_@$5L!&_ep)Z>?7$dzs`Kk2 zRVrGK#sp}N#Gg(h6(8~(?CCi9LW2pO-*TqP?t8)@S~x5XaRSWFPf z4>OfTt5P41v82Od{=D-2fIyX=bZ@`}|BcGl8QwpMUTDaz@;|UpiEE-j6i=o#5B_$q zhAu&J-g&8ksN;oIL^)Ys8K?fH~=%xTZaIF44ABjAa?t>J}!upA2H1VSKe(T%qX zC)eN8!4ZV;>a=_1UqLYvG@loczCTiN<1v$-TQfSuG=b#(G?}Pe+}gZ3S#O6jGxk8t zZEug6m1qn|qzQy^*DiTR-qvB3CL)>l=w0*X9GjI}4%dWw(28gZA-F&N2rK*5QiNGF z(5<@bHDxO0K*r}&tRh2h#M$W2+z7}( z=zn5_{x4qrCFXhGZ`53Mt~+?K=TpVh7Y(?{0OADypEm)1+k^lx10^^MGv-sDEardh zR5wO-zakjO@NjNRcnV`UzQzANhKHD{lYo9%5RBW{>hby8r&X^<^sg=x7b?sR$8z%G z)zu&0;=f_Ze#^pzN3u?`CV`X^jKDi?pxr2tWqb}t=A)Ro@Jx@X#gl@v-jpVR-R0-h zwekG|KOwhBQ-fRFJL55g{`;ig=A_r4XVoE#tr%qr;r!|(6CAp(VlX4b%smt;bFBju z2A+RdstyZ2jsk6BvxCEypjR#VbzTSkCJa0;X|H2+?g6mdJlbh|U%f!j|2&*96A)%ZPN4%&4j4-lZKL^ViMPb zurU6tRDcou!gtEzfy>m`+getYfDkGPrwiqy6XWAzY^%umb{E#OBv}S?iScl9BmL{L zq*bAU#|Jk!pwEIt2!NOo^oL~w5`M$$b=Uxqiq@dGn0>G`z;p%mY_j~ulwY8;6&WQ+ zq1dj&I4nXYoqtlpI2@}=e&CRCwDJF{*;*MF1yF2Y5r#vb1xcGL|5}|ZDeizGg>D5R zCdK41RW(LUZ>HdjaZF=8{j<9gJ=C}dn+yHwfZ}+yQbdyd{;s_cz|`H~^jKtk9L zeh}nii@5*(&X45kSBvUm>F@1by8kP_(6mONx6<#gyV(!5XuJ7unZKz-=omppws9N6 z7`cEoN(7`{+KJCq!chK7O`p2f;E0^Gwr;6VU|QPQmO73iVzGzi(Ghg?5D2}GZ3=^6 z$QkJBaXl;i^9Kiep*pvfu`ArxI3c-fBsNG@?~sb#^Mn*llyji^gqflonWA^zgOWR+ zNz2t2p}0$kf?*LV84NSu3X4cmIS|pGy3(&)zIB)?V}L^m`UTe7d2~PjcL>gNX9r$5 z>`BiZ+^54X@d+?(-#-~4G9sb771z@r{^q|O%FslL#VSW)^YrueL{g5jiF%M&*TL?l zZk}FlahPG*@_7()IegDy2mPl(7PTb~9E-kU3l)IhG3M95;EjMUkpybc`{Q-O{(D== z;Ght`ijaARGeCx8)BZuI^B@O(BuvlWv*)x2Z}^$mXj1%5Uo5$hZq{ zWz_)or5~Q_F4qL2pQe<2nHt_@mUh47-TG|Prd{mUCK@pVQqeBO$`9+EU3946VRiX| zNE%K=Jk`-@02HqN1VI)#HVn7l$Z7Ci``lTD01Tp&%rp4FR7{&Ca|UONJkLGv?w5pV z$dTFp)1z?^rk6;~6GvAC&x4W+OX-~o^D8nm^S`*H^VzlaT#moa2yoG~2H+g(j|uAu zb-lcpFOaqhUX}nAgUIxQOk5k{p=3tP>e#{M|IgF1OWa5rpNA=qK~6n~atDfSU$(!W zM7cKt@yKQwkSkF+APCCGna?H8qzF7qO+Y{T#=gQo<5$8<+jOD{EU!m!zf^hk0e3KS zeBd&{De~U9%VuguR6FJDcZ+`Pu0)%8byX{P2ee5>mnv8xBkk>PwkRWei_`aWEXtOx zPb{EIU|o>8YcFAf4fB0-&n#{a^3)SR>5^Mt2L;+mJK8=IL|_U*u9@HC#b_<< zF?$6|GXeIQ7qQRg=V6VvK8EqCH<_iSOZt}ZQegFgx|K6%P%u-94S4zta zljAjQ?>P#$mqXbTgb5*PcErp2fk!hOQTkDbj5UJM?n}H9eT8DA8SHqy$^?=k`{4nP z{sZJZ*B8ASU|8+B@V!lz&aEx>+~cb+e7@I?(vF+CjAWGZ;_(9WKsdQ6h`Y%`rHH`x?Dsd~vo59FGqnn&RjN}dSW$phz8 zU@>;RwoI6l9@Se`lo5Q1JTsKUt@)EFb>OaYuFw8C_K49MZ^M?0z^z!Zx>ih=$}~$H zQ;x3qu#CJ$?JZW*tphEshg}O$>pYI-1{zD(-{B6-j-l}5fG6F?N{|7^I z3`JTkWQP~Sq8@m!j8VvjD6&VI1pZ_^MNE7Xs12XmwYeUb8@x_G)(PQg2q2{fpb`rv zYwG%JAs*n(nx(TvL!oOAiPnSM4q7@bmr@85Zw9Tnteq_(0uN&l;S_~kwr16XEUuZ* z+R~EDY_2hw|C>;K3A6bSKNDlBaW1DTCUtYmf{M$W z9wrQ7^x<*_2xf^A8bN|+X(y<3Dpg z(bZ3G9t8_BuCx<)9mg2(Kh+a#6D!M6S6%OgJfKGL*#rUsYW&eMY#%c5laUYzHkt31 zGAiJ&4PtIFxF+&M4%H|wia=}=(cg{!n}}&C0e9$=-`^5QI#9nd0&0+4qmNmL5jJNA ziT5*L_;UB=dNM@m1@uUU2mZyGx+CGbI=@edbA{+7;7EzHw>Tqc(6x>Eqo7A%(~*<_ zzIIOCx1!Z*pY?YDWp3Mcr}0B0X?_@LSMLiM_cdKh5DKlqxfLy!ayXACN(mtN{?jRcj==>iS0BGs}^7MxU zBJXy11Mxojn@VgJ2zuj2A+zc#Zp@D=WLl9oaI`x+~ZrguM5ktGNJYwh*gm4^4$7TGY zwwnqET##y`J4qR03kb3==hhp-RFA+oYQj4!W3>eL^9V?b6!)LfDj#k~EZz!<57jw$ z>3-UppRiG}qWf%D?n`oYwAVjg!t*cd+nu8Id{k6BSnwp1lSE91C9%mS@>n$H8!;vo z%?r^yI$ZKKP4sA$n~GX2OYWh$a4pnd&@Dn;;jZ2wYRxw+GDu@vQz5OjAMbR{crv%g z{D;x!JQ9OrM2Lth@C*8xB1Y;y^>ykygVPTnPXg+d_|N2$c_Li8fFAWEVfqKyDSz)H z8hPj{!K_>5isicWi^R^P+B4S^gb(g>-NNWXP|7?GC2DFb{uBT`ZS<;8GabpNDfrWy z$REl0ZWXMR0ehqpX7dch979dy6u)Y`Z-!Hz69939;e&;NqKr4841|1F9{_WZRCS8Z z{W(Nt3%LtQfidQp2hwqW>b$@&=qVFY;)+#n2a8}55Z=6a5YuTr1fr=RK_sTs6rBeC zt)e91WVst6KasTmrbsblMo9FM7{mAlPo<7QxgANQks1=dTcIZ-jr*qe;m$=3LbZ`X&qfaeI|i~xeE_}12}2{ zxnc-{IE=N^qtxP3>g~i6SB+uZ*sv^S5{6KJXyT(WgWaH#ImILN##oD4%1OtO(DDG% zt|&fj6;!dKR*L97EK=cM9z$d*6qh}o!S7IQ(`pKwg!8<++B&5mXi zSB|N594PE?d@pzrUVGC|?y3m#$=Ql%;yP?mZ91DV>r9m48phy`@gxz%WP(eg5&wq% z8k+qSy(M!Xhp|>KtU|`)wmru!mO)n78kMj$e2yKz_@H1#IjQ?RhN*PuHGn+HE#RTPY>%rn5aJxu$&R` zb(ezQX{aOHT7{o)bA(v%o;66(yWD!CVpWJY5KAOV+w@b%-~f6a=%!ypU!``p&&I8Z z@&-a+LB?;VIPY~tyt3Z#XP>)#UMnkD2^BQaX)kSBRB^pXz$&nBJYU$O4ba@&(VzK0 zo^KzHogPDD>Jy^lw!!Si2PpvNDI^6ppobRjPcg^VSc)&ITgF;sU;F=xJM(ua_y3Pu z27|%acMS$(EEAF;+n^FBsce-sMMtu>j8Y6UhKQ*wg)nkTlI0v_X{s@a5)Q38IYPFy zjqOCX@9VD5^}WvLFZlG6>*~I)d+wR{@_Id=kLUYV7(U54x-j@)b)Mwi^&UL32o9ko z_&2^pL&jF|gxnWQ0lJv<*sSoCa3y&D{N2jLtiE4)k_@jHyg(1$Syq|up(Dluu+m|Q z91C|Sf>Pl33~e$7sYm3~nfw+ApAh|MLh8r6r@_uxbg&u4hD28Cz!|>8aqWsD5VD)< zy6)(UA6cJU*Lpb|yB%9a>#=ZN1D*Qh@)mY8WR6;kV>N$vbZj?%>Q)#Qsir>YN7uF& zRXx1fJt7~KU7$r04aZn752ebW@{9|N_mp>X)$d(t*C4H#f8TcRJb6HUqLn>$kf85# z@69wd=~kg6QeN;y;hTx>zx)Rt_yI>;pt{xP1axDS_QqziuSCf4BY!o>(pj5#XP6dO zrLBr#bVsp^UIfvUa$IG5KR|w$BuJN}M=&X0!Pdx#E9jC7Y;7x2QYEWPj_3i~LV0I! zniQ3Oo$s_po~VZSUqm&bGNpb|l9UL+8=JB2tO_T6hH_E`@*_EpLQSI+-fjS^g1-|k zkY{uuo-=gbF(F_<*iO{tC<=DD%#;5{yaZ;fQ_1~v`Wp0QG)&g~Pj zY3RP>KgzpiCg~EEFG3fg_3$oHmKIO@CiUCcymoIGnq;ESdLD+c+>pi;l|0s_a!zC$ zD~KVH0V)LM&_zEqU^MaLg6D*87NH;6;{|z1qx$O&pJUrNBww%a(mkey1OSs^S4PG`8~V8c3c*7@GyvUxPyxYbOu z4^MJsishW83E949AG@3KA=7s)?q!zNrYzONa*C$0*VJBV^jy3N%Cg_YCU!nF;SuG98s3`?EA?NcQ=`7sKKo`>rx2l%2EB zuZi>}7n_X+vOn2;>Coo#f6;c4P7!(yZ>+p-t3WPz`at0n1pSU2`ug_%>7@~U4rodG zSP?84UF!VE-pd-ZmCBfvD|37V#{tSIO3{4LA!(0qr-mE%i}YEZ{(dxJ`jPU*Pp0|2 zr-a^Aagxb>u&i&;^GZT9-H3{cV} z-=czxAQrlwP1f8P)vw_ExZ1DY3F?nAdAEMP&JJ`S6ByXEIRZ}R(EQX(BsT`H{LGOH z;k!Bta33;w8ZCR-4i{m6D0!}Rok~}midDb2<2x%@gLp2!1J~wxT9!ecQ=Cc;LkFVu zHNvE#eZl%qHC3l#LyV~HL-T^+#SUr9hFEgH zP?1}bzt(nRc*9VC(ZM=Xl#`7{+EOFIsDXiqJv~n)k=C!ZvshQWDHB@1yPuxCflFN+ zlhqs1!P7o+b*j)PZL~l$ftADp$n5Lt!a>^8Td1bk_?riEdYyUWCSGSvKWLww_U67WQ@U!Gow+b&D zjL0dFzmfPKz+BaAzsv8OWRJp4?}7~WWQM||5I{C<;UAke{4kfVUFW++Hi9U!4fy?0 zP~-9;7vfgq-(39b0D4RJH>)4R%ayAH_lXPuPw(L?z>YR;WDuq$y19;Yh0=vb}Thej7YC*h7P$C3fO}n z3jzeIly?G{cT&(AEskRv$RWU?g6Dz2e+)eyvgaW@=Z9CfiWE%tdNp*Lab4p$SqVW% z4*ql^#Kf!4!6AAS=xOwz5XY#Di|uR(c_@mUc<^cP-oARlMg;1B>8%G1UIrE3cyI#2 zeBMBbHRb&0RagY89w->@!k-i^h8F88Pb#+bE$RM)|2WYIi(+3Cw*`h}2n=SE^_2jv z7_onk8*|g60%@-QSQqlxwJ)l?-Oa;$s$H1gypNz9o1a;xA`x@7U{JG>V=0hB7!n{e zb$Vq#0TxiaZjge-4D9w7=x$6$9(TlAqCwx2IIyvz8kQEuc~MdbTga; z?FuX9i*O+RXS;=YRY%_55qH6B*+UDZCj6bbenYlymoIN;BRI;r&163q7-?#8h?hmZ zEMOItaZ9Gpg31rp+hucfvMesGZaRrSM8oBvQ!rZV{Xs_d`jCn<^lz$@GY4_PN9R3k z%EV%69npfF)K+IyjtWYaF21_11fQi|QVR%;VQWLgOCqg$_-hHwN4wOon_ zE%6?ZF}3Pi&l-Wn6xVxautwmA$DPZ zIrk&}joXwkQ!!(zWVp-a=)xG#WbnUw;SQSlStQ2;sTs_KjH9fit7$i3=XOWj5|I`~_V6%4IfeFMc|l z`z!zKd+ER%U1o@VXD&EOtJ0ZDfmj+0$MZ-|nqp_CV#PVcFKpGFd$D`RW<3s)5EyH9 zE82KOh+0@=*0DXa40MmmxUV$RE@3TFmU#Ht6+rGfPu)qF4STdsn+e7?%d3WB74fEg*l-SSf0_ZXCS50aeGTk(mDn1XIcY6_%a<~% zR2GnaulP4;miFjySUC(&E>eTDD5J+0g9zURDdmiUo+BwtaceJH38h||n$NQO&F&aj zb}5Q7l)fxb$ud?42-ssKK7c;4oas(#ZyDbt2Q5aEhk3JbaAwO)NywRP8HU|5I#Hk; z&+s40+>QHtAp@x^$U@9VoL(9A(%lAvwepUXRETd>IuHo0A0LzxJ#QT6_laoW&H}$P zOpRP-D0BSyEcPXav@WJ2-{&N7h!A)nht50@`#cnBntOH)tj$Emc9w-wxQ!|HFg!zH)uP?1CPO(=o0qy}y zxrU%kgEG;|gF)20+|-|(DPoGsSGSoV3ayiX-s7DYHi1c(5|O}haq_l&@lr4eqC6;` zyYewK9vvg9*x9RVWdu3S*AjZ~V`I#T>Y)2s(e@yCnm1lJ!EP^=l0_NCee3co{CffC zqErIWn*!qdY>WHVN`Xt?z7~K-?hb5=uuuAn;dJ!*w*H+aatRS|VqbRWnA5P-S`5#V zeF=6tZ|k@yQVm=3@DL1ur~fA0MpS@jG1e`c<)}n0Y#ONm=7Kee zdnZ|#HwBkhjoL`>+VX_zj|6mKO;e%_4Mm!x)owDB>54A+&evB__HNyW&nN$hWYLKm zz`pnD7Ad9lf*87sp)g`i_ndmRP&t^sY|3AOzO(`~&5SC|lk8&K*}KL|v5(V`X8;!B zpjq%nhzbTr3$@s&-3wg1{Tz;Ufe-s~9#KUBag#@jX#P0&#G5au_d-&EP^RUg=#aae z*~|65_KT`_0Cun#p$Ze?apsVV;0F&^8+-ke+VAM?kiI^^R(K*{*w|>zb_4bALF#)c zy-H8IP@oC57l_M>zgia$kXfo@Z(<>a-OD_GFb$)cYam&^!`BbxfUW}+a|y$TLrRFW z%sO{F8UCZ?R!t#!z0a|Ax2I=11v~k>9`5UosQ72~O=lmh8hC^Q_ojf3{HstuS33wL zl(nd{79hd*(%ujQP-f?n5KVw2*VFGr(5U(X^Ra~9h+#*e<>{{_%B9#T{4ShJuV0kr zH?hv?XS)2_Ph$0ABsJ-+X}*#px}JhqYS#L3C^jn1KVvsRsLne))Efj(%4DE1U3px% zuLY?kplBN@e{|qsnv^#UlO)J!UgvPeU$X@hD;uFzc)2IXIZ&gd8_ux#ICuo|Ic?mG zpR*tXX2HekNA*8ufn)Ue&^&KTfwgX{+2Q&*klMF;QF`Sb*7u=Mp7V781-Km*siICE z^|vvQpDk;A`r#F1N4Z5vL-xaNb>-$H*IXE%hc!+fR`$13F?&aM{{{}epdqzoXx^!* zbl*K9AZSBuFl=vjhNoVKilo@1c*}**SBsabou+J_$2sALRG~aCF|#xP>xHEtm{^p3 zPIL9+nDd@vO=?cU+SNqg8e^o5okXw+y-wEBm{0B5-S6ZAD6))SJTJ8!DNqUsT-)Dv zE3gerjW#@I!F~E}LPP~sgy65Dw@*`)6 zAga6*ygYE%eo~{`ysh^@QzV3Gao6BM_n$4g0zY~Gm1MJocQ>trTIxj3QV?3qd(D}h z1&0i7R_w3zWT*To8Fu2-$&f_EhJYgR=7=WjK5^Z-=NQq17RbDN&{4KP+dh3hJ~0Y2 zLP&Ofnx<2t_8eAy-%mKdEGNM+nku$4R-rinVc%W3PvG{&k#^aTbm5u{VS@!VY*q e`UEFSTNA54>DxYRfWr~Dt1_|EPC literal 0 HcmV?d00001 diff --git a/examples/onboarding_guide/causallm/README.md b/examples/onboarding_guide/causallm/README.md new file mode 100644 index 000000000..2e077b6ba --- /dev/null +++ b/examples/onboarding_guide/causallm/README.md @@ -0,0 +1,232 @@ +# Onboarding a CausalLM Model + +## Prerequisites + +Install `qefficient-transformers` library in editable mode: +```sh +git clone https://github.com/quic/efficient-transformers.git +cd efficient-transformers +pip install -e . +``` + +--- + +## Transformers Version Compatibility + +**Important:** QEfficient has a pinned `transformers` library version dependency. + +**Check the current version:** +```bash +grep "transformers==" pyproject.toml +``` + +See `dependencies` in [`pyproject.toml`](../../../pyproject.toml) for the exact version. + +**Compatibility rules:** +- You can only onboard models that are supported in the pinned transformers version or earlier +- Models added to transformers after this version are not yet supported +- Always verify when your target model was added to the transformers library + +**How to verify model compatibility:** + +1. Check transformers release history at [HuggingFace Transformers Releases](https://github.com/huggingface/transformers/releases) +2. Find the release where your model was first introduced +3. Compare versions: + - If model's release version ≤ QEfficient's pinned version → Proceed with onboarding + - If model's release version > QEfficient's pinned version → Cannot onboard yet + + +**Need a newer model?** + +If you need to onboard a model that requires a newer transformers version: +1. Open an issue on the [QEfficient GitHub repository](https://github.com/quic/efficient-transformers/issues) +2. Request a transformers version bump +3. Provide justification and the specific model you need + +--- + +## Introduction + +This guide walks you through onboarding a new CausalLM model to QEfficient-transformers. We use an example model named `Blueprint` to demonstrate the required changes. + +--- + +## Onboarding Process + +![Onboarding Flowchart](./Onboarding.png) + +--- + +## Step 1: Check Transformers Library + +1. **Locate the model** in the transformers library: + - Path: `/src/transformers/models//modeling_.py` + - Example: `/src/transformers/models/blueprint/modeling_blueprint.py` + +2. **Identify required classes**: + - Attention Layer + - Decoder Layer + - Model (main class) + - ForCausalLM (top-level) + - RMSNorm/LayerNorm + - RotaryEmbedding (if applicable) + +3. **Check existing implementations** in `QEfficient/transformers/models/`: + - If similar classes exist → Reuse patterns + - If not → Create custom implementations + +--- + +## Step 2: Create Custom Files & Mappings + +### 2.1 Create Custom Modeling File + +Create directory structure: +``` +QEfficient/transformers/models/blueprint/ +ā”œā”€ā”€ __init__.py +└── modeling_blueprint.py +``` + +**Key modifications in `modeling_blueprint.py`:** +- `QEffBlueprintRotaryEmbedding`: Precompute sin/cos for rotary embeddings +- `QEffBlueprintAttention`: Use `position_ids`, return `past_key_value`, implement `__qeff_init__` +- `QEffBlueprintDecoderLayer`: Return `past_key_value` from forward pass +- `QEffBlueprintModel`: Use `QEffDynamicCache` instead of standard cache +- `QEffBlueprintForCausalLM`: Entry point with additional parameters + +See `modeling_example.py` for detailed implementation examples. + +### 2.2 Add Mappings in pytorch_transforms.py + +**CustomOpsTransform** (RMSNorm mapping): +```python +class CustomOpsTransform(ModuleMappingTransform): + _module_mapping = { + BlueprintRMSNorm: CustomRMSNormAIC, + } +``` + +**KVCacheTransform** (all model classes): +```python +class KVCacheTransform(ModuleMappingTransform): + _module_mapping = { + BlueprintAttention: QEffBlueprintAttention, + BlueprintDecoderLayer: QEffBlueprintDecoderLayer, + BlueprintModel: QEffBlueprintModel, + BlueprintForCausalLM: QEffBlueprintForCausalLM, + } +``` + +See `example_pytorch_transforms.py` for complete example. + +--- + +## Step 3: Testing (4-Stage Pipeline) + +Your implementation is validated through four stages: + +| Stage | Description | Validation | +|-------|-------------|------------| +| **1. PyTorch HF** | Original transformers model | Baseline tokens | +| **2. PyTorch KV** | After QEff transforms | Tokens match Stage 1 | +| **3. ONNX/ORT** | After export to ONNX | Tokens match Stage 2 | +| **4. Cloud AI 100** | Hardware execution | Tokens match Stage 3 | + +**Test function:** `check_causal_lm_pytorch_vs_kv_vs_ort_vs_ai100` in `tests/transformers/models/test_causal_lm_models.py` + +### Common Issues + +**Token mismatch (Stage 1→2):** +- Check all classes are mapped in `KVCacheTransform` +- Verify `__qeff_init__` methods exist +- Ensure `position_ids` are correctly passed + +**ONNX export failure (Stage 2→3):** +- Check for unsupported PyTorch operations +- Verify dynamic shapes are defined + +**Compilation failure (Stage 3→4):** +- Reduce `num_cores` or model size +- Check device availability: `get_available_device_id()` + +--- + +## Step 4: Add to Test Suite + +Edit `tests/transformers/models/test_causal_lm_models.py`: + +```python +test_models_causal = [ + "TinyLlama/TinyLlama-1.1B-Chat-v1.0", + "gpt2", + # ... existing models ... + "YourOrg/YourModel-7B", # Add your model here +] +``` + +**Run tests:** +```bash +# Test your specific model +pytest tests/transformers/models/test_causal_lm_models.py::test_custom_causal_lm_pytorch_vs_kv_vs_ort_vs_ai100 -k "YourModel" -v + +# Run all regular tests +pytest tests/transformers/models/test_causal_lm_models.py -m regular +``` + +--- + +## Step 5: Validation Checklist + +Before submitting PR: + +**Implementation:** +- [ ] Created `QEfficient/transformers/models//` directory +- [ ] Implemented all required custom classes +- [ ] Added mappings in `CustomOpsTransform` and `KVCacheTransform` +- [ ] Added imports at top of `pytorch_transforms.py` + +**Testing:** +- [ ] Model added to `test_models_causal` list +- [ ] All 4 stages pass (PyTorch HF → KV → ORT → AI 100) +- [ ] Continuous batching tests pass +- [ ] `qconfig.json` generated successfully + +**Code Quality:** +- [ ] Code follows project style guidelines +- [ ] Commits use DCO sign-off (`git commit -s`) +- [ ] Branch created from `main` + +--- + +## Step 6: Submit Pull Request + +Follow guidelines in [CONTRIBUTING.md](../../../CONTRIBUTING.md): + +1. Create feature branch: `git checkout -b add-yourmodel-support main` +2. Commit with DCO: `git commit -s -m "Add support for YourModel"` +3. Push and create PR targeting `main` branch +4. Include test results in PR description + +--- + +## Troubleshooting Quick Reference + +| Issue | Solution | +|-------|----------| +| Token mismatch between stages | Check class mappings, verify `position_ids` handling | +| Shape errors | Verify KV cache dimensions, check `past_key_value` returns | +| ONNX export fails | Replace unsupported ops, define dynamic shapes | +| Compilation fails | Reduce `num_cores`, check device availability | +| Runtime errors | Verify input shapes match specializations | + +**Debug tip:** Start with `n_layer=1` and short prompts, then gradually increase complexity. + +--- + +## References + +- [Hugging Face Transformers](https://github.com/huggingface/transformers) +- [QEfficient Transformers](https://github.com/quic/efficient-transformers) +- [Contributing Guidelines](../../../CONTRIBUTING.md) +- [Test Suite](../../../tests/transformers/models/test_causal_lm_models.py) diff --git a/examples/onboarding_guide/causallm/example_pytorch_transforms.py b/examples/onboarding_guide/causallm/example_pytorch_transforms.py new file mode 100644 index 000000000..ff62588f9 --- /dev/null +++ b/examples/onboarding_guide/causallm/example_pytorch_transforms.py @@ -0,0 +1,291 @@ +# ----------------------------------------------------------------------------- +# +# Copyright (c) Qualcomm Technologies, Inc. and/or its subsidiaries. +# SPDX-License-Identifier: BSD-3-Clause +# +# ----------------------------------------------------------------------------- + +""" +Example pytorch_transforms.py showing common model onboarding patterns. + +This file demonstrates three representative patterns: +1. Blueprint - Standard decoder-only model (example for onboarding) +2. Llama - Most common architecture pattern +3. Mixtral - Mixture of Experts (MoE) model + +For more examples and patterns, see: +- Production transforms: QEfficient/base/pytorch_transforms.py +- All model implementations: QEfficient/transformers/models/ +- Specific patterns: + * Gemma (custom RMSNorm): QEfficient/transformers/models/gemma/ + * Multimodal (Llama4, Mllama): QEfficient/transformers/models/llama4/ + * External models (Grok): QEfficient/transformers/models/grok_1/ + * Vision-Language models: QEfficient/transformers/models/mllama/ +""" + +import warnings +from types import MethodType +from typing import Callable, Optional, Tuple, Union + +from QEfficient.transformers.models.blueprint.modeling_blueprint import ( + QEffBlueprintAttention, + QEffBlueprintDecoderLayer, + QEffBlueprintForCausalLM, + QEffBlueprintModel, +) +from torch import nn + +# Example imports for three representative models +from transformers.models.blueprint.modeling_blueprint import ( + BlueprintAttention, + BlueprintDecoderLayer, + BlueprintForCausalLM, + BlueprintModel, + BlueprintRMSNorm, +) +from transformers.models.llama.modeling_llama import ( + LlamaAttention, + LlamaDecoderLayer, + LlamaForCausalLM, + LlamaModel, + LlamaRMSNorm, +) +from transformers.models.mixtral.modeling_mixtral import ( + MixtralAttention, + MixtralDecoderLayer, + MixtralForCausalLM, + MixtralModel, + MixtralRMSNorm, + MixtralSparseMoeBlock, +) + +from QEfficient.base.pytorch_transforms import ExternalModuleMapperTransform, ModuleMappingTransform +from QEfficient.customop import CustomRMSNormAIC +from QEfficient.transformers.embeddings.embedding_utils import POOLING_MAP, PooledModel, validate_user_pooling_function +from QEfficient.transformers.models.llama.modeling_llama import ( + QEffLlamaAttention, + QEffLlamaDecoderLayer, + QEffLlamaForCausalLM, + QEffLlamaModel, +) +from QEfficient.transformers.models.mixtral_moe.modeling_mixtral import ( + QEffMixtralAttention, + QeffMixtralDecoderLayer, + QEffMixtralForCausalLM, + QEffMixtralModel, + QEffMixtralSparseMoeBlock, +) +from QEfficient.transformers.post_processing import build_and_attach_mlp, model_type_registry +from QEfficient.transformers.sampler.sampler import sampler_forward +from QEfficient.transformers.spd.spd_transform_forward import tlm_forward + +SPD_TARGET = "target" + + +class CustomOpsTransform(ModuleMappingTransform): + """ + Maps RMSNorm classes to custom implementations optimized for Cloud AI 100. + + Most models use the standard CustomRMSNormAIC. For special cases (like Gemma), + you can create custom RMSNorm in QEfficient.customop. + """ + + _module_mapping = { + # Blueprint - Example model for onboarding + BlueprintRMSNorm: CustomRMSNormAIC, + # Llama - Most common pattern + LlamaRMSNorm: CustomRMSNormAIC, + # Mixtral - MoE model pattern + MixtralRMSNorm: CustomRMSNormAIC, + # TODO: Add your model's RMSNorm mapping here: + # YourModelRMSNorm: CustomRMSNormAIC, + } + + +class KVCacheTransform(ModuleMappingTransform): + """ + Maps model classes to their QEfficient counterparts with KV cache support. + + This is the most critical transform for enabling efficient inference. + All model classes (Attention, DecoderLayer, Model, ForCausalLM) must be mapped. + """ + + _module_mapping = { + # Blueprint - Example model for onboarding + BlueprintAttention: QEffBlueprintAttention, + BlueprintDecoderLayer: QEffBlueprintDecoderLayer, + BlueprintModel: QEffBlueprintModel, + BlueprintForCausalLM: QEffBlueprintForCausalLM, + # Llama - Most common pattern (standard decoder-only) + LlamaAttention: QEffLlamaAttention, + LlamaDecoderLayer: QEffLlamaDecoderLayer, + LlamaModel: QEffLlamaModel, + LlamaForCausalLM: QEffLlamaForCausalLM, + # Mixtral - MoE model pattern (includes SparseMoeBlock) + MixtralAttention: QEffMixtralAttention, + MixtralSparseMoeBlock: QEffMixtralSparseMoeBlock, + MixtralDecoderLayer: QeffMixtralDecoderLayer, + MixtralModel: QEffMixtralModel, + MixtralForCausalLM: QEffMixtralForCausalLM, + # TODO: Add your model's class mappings here: + # YourModelAttention: QEffYourModelAttention, + # YourModelDecoderLayer: QEffYourModelDecoderLayer, + # YourModelModel: QEffYourModelModel, + # YourModelForCausalLM: QEffYourModelForCausalLM, + } + + @classmethod + def apply(cls, model: nn.Module) -> Tuple[nn.Module, bool]: + model, transformed = super().apply(model) + return model, transformed + + +class SpDTransform: + """ + Apply generic QEffForCausalLM forward pass to extract `num_speculative_tokens+1` hidden states before computing logits during decode phase and extract last predicted token during prefill. + This is only needed if user is exporting Target Language Model (TLM) for Speculative Decoding to validate output logits + against the speculated tokens from a smaller model. + Other than the computed logits, there should be no difference between the SpD Transformed model and its corresponding cunterpart. + + ``Mandatory`` Args: + :model (nn.Module): PyTorch model. + + Returns: + :model (nn.Module): PyTorch model. + :transformed (bool): whether transformation was applied successfully. + """ + + # supported architectures + _module_mapping = { + QEffBlueprintForCausalLM, + # TODO: Add your model's ForCausalLM class here if using Speculative Decoding: + # QEffYourModelForCausalLM, + } + + @classmethod + def apply(cls, model: nn.Module, qaic_config: Optional[dict] = None, **kwargs) -> Tuple[nn.Module, bool]: + transformed = False + pretrained_model_name_or_path_temp = kwargs.pop("pretrained_model_name_or_path", None) + + if qaic_config is None or (speculative_model_type := qaic_config.get("speculative_model_type")) is None: + return model, transformed + + if speculative_model_type not in (supported_spd_model_types := [SPD_TARGET] + list(model_type_registry.keys())): + raise ValueError( + f"Speculative model type {speculative_model_type} is not supported. " + f"Currently only support {supported_spd_model_types}" + ) + + if (model_class := model.__class__) in cls._module_mapping: + model.forward = MethodType(tlm_forward, model) + if speculative_model_type != SPD_TARGET: + pretrained_model_name_or_path = qaic_config["pretrained_model_name_or_path"] + model = build_and_attach_mlp( + model, pretrained_model_name_or_path, speculative_model_type=speculative_model_type, **kwargs + ) + transformed = True + else: + raise NotImplementedError( + f"Model class {model_class} does not yet support returning multiple logits to keep." + ) + + kwargs["pretrained_model_name_or_path"] = pretrained_model_name_or_path_temp + return model, transformed + + +class SamplerTransform: + """ + Add nodes at the output of any generic QEffForCausalLM model to enable the + sampling of next tokens at the device (instead of the host) and return the + next tokens and/or probability distributions. + + Note: To achieve this, the generic QEffForCausalLM model must provide the + logits as output. + + ``Mandatory`` Args: + :model (nn.Module): PyTorch model. + + Returns: + :model (nn.Module): PyTorch model. + :transformed (bool): whether transformation was applied successfully. + """ + + # supported architectures + _module_mapping = { + # TODO: Add your model's ForCausalLM class here if using on-device sampling: + # QEffYourModelForCausalLM, + } + + @classmethod + def apply(cls, model: nn.Module, qaic_config: Optional[dict] = None, **kwargs) -> Tuple[nn.Module, bool]: + transformed = False + if qaic_config is None or not qaic_config.get("include_sampler", False): + return model, transformed + + if (model_class := model.__class__) in cls._module_mapping: + model.old_forward = model.forward + model.forward = MethodType(sampler_forward, model) + transformed = True + else: + raise NotImplementedError(f"Model class {model_class} does not support on device sampling.") + + return model, transformed + + +class VlmKVOffloadTransform(ModuleMappingTransform): + """ + Vision-Language Model transform with KV offloading (two QPC setup). + + Used for multimodal models where vision and text processing are separated. + See QEfficient/transformers/models/mllama/ for implementation examples. + """ + + _module_mapping = { + # TODO: Add VLM models with KV offloading here: + # YourVLMTextCrossAttention: QEffYourVLMTextCrossAttentionTwoQPC, + } + + +class VlmNoKVOffloadTransform(ModuleMappingTransform): + """ + Vision-Language Model transform without KV offloading (single QPC setup). + + Used for multimodal models in single QPC configuration. + See QEfficient/transformers/models/mllama/ for implementation examples. + """ + + _module_mapping = { + # TODO: Add VLM models without KV offloading here: + # YourVLMTextCrossAttention: QEffYourVLMTextCrossAttentionSingleQPC, + } + + +class KVCacheExternalModuleMapperTransform(ExternalModuleMapperTransform): + _match_string_replace_method = { + # TODO: Add external model mappings here (for models not in transformers library): + # "YourExternalModelClass": { + # "forward": QEffYourExternalModel.forward, + # "__qeff_init__": QEffYourExternalModel.__qeff_init__, + # }, + } + + _match_class_replace_method = {} + + +class PoolingTransform: + """ + Apply a pooling transformation to the model. This transformation appends a pooling layer to the model, allowing for the reduction of spatial dimensions in the output. + The pooling layer can be configured to use different pooling methods, such as max pooling or average pooling. + """ + + @classmethod + def apply(cls, model: nn.Module, pooling: Union[str, Callable]) -> Tuple[nn.Module, bool]: + transformed = False + pooling_method = ( + POOLING_MAP[pooling] + if isinstance(pooling, str) and pooling in POOLING_MAP + else validate_user_pooling_function(pooling) + ) + model = PooledModel(model, pooling_method) + warnings.warn("Pooling is applied to the model.") + return model, transformed diff --git a/examples/onboarding_guide/causallm/modeling_example.py b/examples/onboarding_guide/causallm/modeling_example.py new file mode 100644 index 000000000..195c9d7db --- /dev/null +++ b/examples/onboarding_guide/causallm/modeling_example.py @@ -0,0 +1,394 @@ +# ----------------------------------------------------------------------------- +# +# Copyright (c) Qualcomm Technologies, Inc. and/or its subsidiaries. +# SPDX-License-Identifier: BSD-3-Clause +# +# ----------------------------------------------------------------------------- + +"""PyTorch Blueprint model.""" + +from typing import Callable, List, Optional, Tuple, Union + +import torch +import torch.utils.checkpoint +from torch import nn +from transformers.cache_utils import Cache +from transformers.modeling_outputs import ( + BaseModelOutputWithPast, + CausalLMOutputWithPast, +) +from transformers.models.blueprint.modeling_blueprint import ( + BlueprintAttention, + BlueprintConfig, + BlueprintDecoderLayer, + BlueprintForCausalLM, + BlueprintModel, + BlueprintRotaryEmbedding, + rotate_half, +) + +from QEfficient.transformers.cache_utils import QEffDynamicCache +from QEfficient.transformers.modeling_attn_mask_utils import _create_causal_mask + + +class QEffBlueprintRotaryEmbedding(BlueprintRotaryEmbedding): + """ + Add the required Rotary Embedding functionality to the model based on the Class in the transformers modeling file. + The purpose of this class is to precompute sin and cos values for the rotary embedding and cache it for faster inference. + This class is more or less the same for all models that are onboarded. + """ + + def __init__(self, config: BlueprintConfig, device=None): + super().__init__(config=config) + # Build here to make `torch.jit.trace` work. + self._set_cos_sin_cache( + seq_len=self.original_max_seq_len, device=self.inv_freq.device, dtype=torch.get_default_dtype() + ) + + def _set_cos_sin_cache(self, seq_len, device, dtype): + self.max_seq_len_cached = seq_len + t = torch.arange(self.max_seq_len_cached, device=device, dtype=torch.int64).type_as(self.inv_freq) + + freqs = torch.outer(t, self.inv_freq) + + emb = torch.cat((freqs, freqs), dim=-1) + self.register_buffer("cos_cached", emb.cos().to(dtype), persistent=False) + self.register_buffer("sin_cached", emb.sin().to(dtype), persistent=False) + + def forward(self, x, seq_len=None): + # x: [bs, num_attention_heads, seq_len, head_size] + if seq_len > self.max_seq_len_cached: + self._set_cos_sin_cache(seq_len=seq_len, device=x.device, dtype=x.dtype) + + return ( + self.cos_cached[:seq_len].to(dtype=x.dtype) * self.attention_scaling, + self.sin_cached[:seq_len].to(dtype=x.dtype) * self.attention_scaling, + ) + + +def qeff_apply_rotary_pos_emb(q, k, cos, sin, position_ids, unsqueeze_dim=1): + """Applies Rotary Position Embedding with Multimodal Sections to the query and key tensors. + + We modify this method to enable the application of the rotary embedding based on position_ids + instead of seq_len. This is needed as our modified modelling accepts position_ids and not + the attention_mask as an input. + """ + # + cos = cos[position_ids].unsqueeze(unsqueeze_dim) + sin = sin[position_ids].unsqueeze(unsqueeze_dim) + + q_embed = (q * cos) + (rotate_half(q) * sin) + k_embed = (k * cos) + (rotate_half(k) * sin) + + return q_embed.to(q.dtype), k_embed.to(k.dtype) + + +def eager_attention_forward( + module: nn.Module, + query: torch.Tensor, + key: torch.Tensor, + value: torch.Tensor, + attention_mask: Optional[torch.Tensor], + scaling: float, + **kwargs, +): + """ + Implements the forward pass of Eager Attention for the model. + We explicitly support Eager mode based attention on our device. + The method would mostly be generic so we don't expect it to have much changes. + MIN_MASKED_ATTENTION_VALUE is a special value which helps our compiler know what -inf should be represented by. + """ + pass + + +class QEffBlueprintAttention(BlueprintAttention): + """ + Here we'll setup the forward pass of the Attention module as implemented in the original model. + We initialize our own RotaryEmbedding module via __qeff_init__ method call. + + """ + + # < We load our own custom class for the rotary embedding to enable supporting position_ids> + # Since we map the custom classes to the original classes, __init__ method wouldn't work as expected, + # Hence we use __qeff_init__ method to initialize something while the mapping happens. + + def __qeff_init__(self): + self.rotary_emb = QEffBlueprintRotaryEmbedding(config=self.config) + + def forward( + self, + hidden_states: torch.Tensor, + attention_mask: Optional[torch.Tensor], + position_ids: Optional[torch.LongTensor] = None, + past_key_value: Optional[Cache] = None, + batch_index: Optional[torch.LongTensor] = None, + cache_position: Optional[torch.LongTensor] = None, + **kwargs, + ) -> Tuple[torch.Tensor, Optional[torch.Tensor], Optional[Tuple[torch.Tensor]]]: + """ + Most of the implementation remains the same as original forward method. + The parts where difference occurs are the way we apply the rotary embeddings. + Also, we return the past_key_values instead of storing it in the default transformers cache. + """ + input_shape = hidden_states.shape[:-1] + hidden_shape = (*input_shape, -1, self.head_dim) + + query_states = self.q_proj(hidden_states, **kwargs) + key_states = self.k_proj(hidden_states, **kwargs) + value_states = self.v_proj(hidden_states, **kwargs) + + query_states = query_states.view(hidden_shape).transpose(1, 2) + key_states = key_states.view(hidden_shape).transpose(1, 2) + value_states = value_states.view(hidden_shape).transpose(1, 2) + + kv_seq_len = key_states.shape[-2] + + # We build the rotary embeddings different from the transformers method. + kv_seq_len = past_key_value.get_usable_length(kv_seq_len, self.layer_idx) + cos, sin = self.rotary_emb(value_states, seq_len=kv_seq_len) + # Application of the rotary embeddings requires position_ids as well. + query_states, key_states = qeff_apply_rotary_pos_emb(query_states, key_states, cos, sin, position_ids) + + if past_key_value is not None: + # < We add all the required items for cache kwargs which would enable updating QEffDynamicCache > + cache_kwargs = {"sin": sin, "cos": cos, "batch_index": batch_index, "position_ids": position_ids} + key_states, value_states = past_key_value.update(key_states, value_states, self.layer_idx, cache_kwargs) + + # < We override the attention_interface method with our own to enable Eager Attention> + attention_interface: Callable = eager_attention_forward + + attn_output, attn_weights = attention_interface( + self, + query_states, + key_states, + value_states, + attention_mask, + scaling=self.scaling, + **kwargs, + ) + + attn_output = attn_output.reshape(*input_shape, -1).contiguous() + attn_output = self.o_proj(attn_output) + return attn_output, attn_weights, past_key_value + + +class QEffBlueprintDecoderLayer(BlueprintDecoderLayer): + """ + Overrides the forward method of the original BlueprintDecoderLayer. + Only changes being that the past_key_value is returned and `self.self_attn` method + is now an object of QEffBlueprintAttention instead. + """ + + def forward( + self, + hidden_states: torch.Tensor, + attention_mask: Optional[torch.Tensor] = None, + position_ids: Optional[torch.LongTensor] = None, + past_key_value: Optional[Cache] = None, + batch_index: Optional[torch.LongTensor] = None, + output_attentions: Optional[bool] = False, + use_cache: Optional[bool] = False, + cache_position: Optional[torch.LongTensor] = None, + **kwargs, + ) -> Tuple[torch.FloatTensor, Optional[Tuple[torch.FloatTensor, torch.FloatTensor]]]: + """ + The modified forward function also stores and returns the past_key_value. + Every other operation remains the same. + """ + + residual = hidden_states + + hidden_states = self.input_layernorm(hidden_states) + + # < Self attention would also have to return the past_key_value as well and we capture it here> + # Self Attention + hidden_states, self_attn_weights, present_key_value = self.self_attn( + hidden_states=hidden_states, + attention_mask=attention_mask, + position_ids=position_ids, + past_key_value=past_key_value, + batch_index=batch_index, + output_attentions=output_attentions, + use_cache=use_cache, + cache_position=cache_position, + **kwargs, + ) + hidden_states = residual + hidden_states + + # Fully Connected + residual = hidden_states + hidden_states = self.post_attention_layernorm(hidden_states) + hidden_states = self.mlp(hidden_states) + hidden_states = residual + hidden_states + + outputs = (hidden_states,) + + if output_attentions: + outputs += (self_attn_weights,) + + if use_cache: + outputs += (present_key_value,) + + return outputs + + +class QEffBlueprintModel(BlueprintModel): + """ + Replaces the original BlueprintModel with a modified version. + We initialize the custom `QEffDynamicCache` for past_key_values here instead of the DynamicCache class. + This custom Cache class has all the required custom ops to perform CtxScatter/CtxGather as well as other required operations. + This enables us to cache the past key values in the way we want for AIC. The component won't require any changes mostly. + """ + + def forward( + self, + input_ids: torch.LongTensor = None, + attention_mask: Optional[torch.Tensor] = None, + position_ids: Optional[torch.LongTensor] = None, + past_key_values: Optional[Cache] = None, + batch_index: Optional[torch.LongTensor] = None, + inputs_embeds: Optional[torch.FloatTensor] = None, + use_cache: Optional[bool] = None, + output_attentions: Optional[bool] = None, + output_hidden_states: Optional[bool] = None, + return_dict: Optional[bool] = None, + cache_position: Optional[torch.LongTensor] = None, + **kwargs, + ) -> Union[Tuple, BaseModelOutputWithPast]: + output_attentions = output_attentions if output_attentions is not None else self.config.output_attentions + output_hidden_states = ( + output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states + ) + use_cache = use_cache if use_cache is not None else self.config.use_cache + + return_dict = return_dict if return_dict is not None else self.config.use_return_dict + + if (input_ids is None) ^ (inputs_embeds is not None): + raise ValueError( + "You cannot specify both input_ids and inputs_embeds at the same time, and must specify either one" + ) + + # < We create the custom QEffDynamicCache here to be used during the AIC execution> + return_legacy_cache = False + if use_cache and not isinstance(past_key_values, Cache): + return_legacy_cache = True + past_key_values = QEffDynamicCache.from_legacy_cache(past_key_values) + + if inputs_embeds is None: + inputs_embeds = self.embed_tokens(input_ids) + + if cache_position is None: + past_seen_tokens = past_key_values.get_seq_length() if past_key_values is not None else 0 + cache_position = torch.arange( + past_seen_tokens, past_seen_tokens + inputs_embeds.shape[1], device=inputs_embeds.device + ) + if position_ids is None: + position_ids = cache_position.unsqueeze(0) + + target_length = attention_mask.shape[-1] if isinstance(attention_mask, torch.Tensor) else past_seen_tokens + causal_mask = _create_causal_mask( + position_ids=position_ids, target_length=target_length, sliding_window=self.config.sliding_window + ) + + hidden_states = inputs_embeds + + # decoder layers + all_hidden_states = () if output_hidden_states else None + all_self_attns = () if output_attentions else None + + for decoder_layer in self.layers: + if output_hidden_states: + all_hidden_states += (hidden_states,) + + layer_outputs = decoder_layer( + hidden_states, + attention_mask=causal_mask, + position_ids=position_ids, + past_key_value=past_key_values, + batch_index=batch_index, + output_attentions=output_attentions, + use_cache=use_cache, + cache_position=cache_position, + ) + + hidden_states = layer_outputs[0] + + if output_attentions: + all_self_attns += (layer_outputs[1],) + + hidden_states = self.norm(hidden_states) + + # add hidden states from the last decoder layer + if output_hidden_states: + all_hidden_states += (hidden_states,) + + if return_legacy_cache: + past_key_values = past_key_values.to_legacy_cache() + + output = BaseModelOutputWithPast( + last_hidden_state=hidden_states, + past_key_values=past_key_values if use_cache else None, + hidden_states=all_hidden_states, + attentions=all_self_attns, + ) + return output if return_dict else output.to_tuple() + + +class QEffBlueprintForCausalLM(BlueprintForCausalLM): + """ + No major changes are needed in the forward method of this class, it is the entry point for the model during inference. + We add the additionally required parameters and pass those down the line as well. + """ + + def forward( + self, + input_ids: torch.LongTensor = None, + attention_mask: Optional[torch.Tensor] = None, + position_ids: Optional[torch.LongTensor] = None, + past_key_values: Optional[Union[Cache, List[torch.FloatTensor]]] = None, + batch_index: Optional[torch.LongTensor] = None, + inputs_embeds: Optional[torch.FloatTensor] = None, + labels: Optional[torch.LongTensor] = None, + use_cache: Optional[bool] = None, + output_attentions: Optional[bool] = None, + output_hidden_states: Optional[bool] = None, + return_dict: Optional[bool] = None, + cache_position: Optional[torch.LongTensor] = None, + logits_to_keep: Union[int, torch.Tensor] = 0, + **kwargs, + ) -> Union[Tuple, CausalLMOutputWithPast]: + output_attentions = output_attentions if output_attentions is not None else self.config.output_attentions + output_hidden_states = ( + output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states + ) + return_dict = return_dict if return_dict is not None else self.config.use_return_dict + + # < We add the additional parameters that we use for our models here and pass them down the line > + # decoder outputs consists of (dec_features, layer_state, dec_hidden, dec_attn) + outputs = self.model( + input_ids=input_ids, + attention_mask=attention_mask, + position_ids=position_ids, + past_key_values=past_key_values, + batch_index=batch_index, + inputs_embeds=inputs_embeds, + use_cache=use_cache, + output_attentions=output_attentions, + output_hidden_states=output_hidden_states, + return_dict=return_dict, + ) + + # Cast to INT32 to avoid issue while running in ONNXRT + logit_index = position_ids.to(torch.int32).argmax(1, keepdim=True) + hidden_states = outputs[0][torch.arange(position_ids.shape[0]).view(-1, 1), logit_index] + + logits = self.lm_head(hidden_states) + logits = logits.float() + + return CausalLMOutputWithPast( + loss=None, + logits=logits, + past_key_values=outputs.past_key_values, + hidden_states=outputs.hidden_states, + attentions=outputs.attentions, + ) From 9a3c49a5d5b625b6eff333455d700433c46d6993 Mon Sep 17 00:00:00 2001 From: Meet Patel Date: Fri, 21 Nov 2025 12:20:17 +0530 Subject: [PATCH 20/60] [QEff. Finetune]: Added initial folder structure and files for HF trainer based FT code. (#626) MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit - The code is based on HF Trainer infrastructure. We have added a skeleton code. Upon merger of this, different team member will work parallelly on different components. - The code will be an experimental and will sit along side the existing finetuning code. Once we achieve parity and stability with the HF Trainer based code, we will deprecate the current finetuning code and adopt the HF Trainer–based code as mainstream. Signed-off-by: meetkuma Signed-off-by: Dhiraj Kumar Sah --- QEfficient/cloud/finetune_experimental.py | 6 ++++++ QEfficient/finetune/experimental/__init__.py | 6 ++++++ QEfficient/finetune/experimental/configs/sample_config.yaml | 0 QEfficient/finetune/experimental/core/__init__.py | 6 ++++++ QEfficient/finetune/experimental/core/callbacks.py | 6 ++++++ QEfficient/finetune/experimental/core/component_registry.py | 6 ++++++ QEfficient/finetune/experimental/core/config_manager.py | 6 ++++++ QEfficient/finetune/experimental/core/dataset.py | 6 ++++++ QEfficient/finetune/experimental/core/model.py | 6 ++++++ QEfficient/finetune/experimental/core/optimizer.py | 6 ++++++ QEfficient/finetune/experimental/core/trainer/__init__.py | 6 ++++++ .../finetune/experimental/core/trainer/base_trainer.py | 6 ++++++ .../finetune/experimental/core/trainer/dpo_trainer.py | 6 ++++++ .../finetune/experimental/core/trainer/grpo_trainer.py | 6 ++++++ QEfficient/finetune/experimental/core/trainer/kd_trainer.py | 6 ++++++ .../finetune/experimental/core/trainer/reward_trainer.py | 6 ++++++ .../finetune/experimental/core/trainer/sft_trainer.py | 6 ++++++ QEfficient/finetune/experimental/core/utils/__init__.py | 6 ++++++ .../finetune/experimental/core/utils/dataset_utils.py | 6 ++++++ QEfficient/finetune/experimental/core/utils/dist_utils.py | 6 ++++++ QEfficient/finetune/experimental/core/utils/import_utils.py | 6 ++++++ .../finetune/experimental/core/utils/profiler_utils.py | 6 ++++++ QEfficient/finetune/experimental/docs/ReadMe.md | 0 QEfficient/finetune/experimental/examples/ReadMe.md | 0 .../experimental/extensions/preprocessing/__init__.py | 6 ++++++ QEfficient/finetune/experimental/tests/__init__.py | 6 ++++++ 26 files changed, 138 insertions(+) create mode 100644 QEfficient/cloud/finetune_experimental.py create mode 100644 QEfficient/finetune/experimental/__init__.py create mode 100644 QEfficient/finetune/experimental/configs/sample_config.yaml create mode 100644 QEfficient/finetune/experimental/core/__init__.py create mode 100644 QEfficient/finetune/experimental/core/callbacks.py create mode 100644 QEfficient/finetune/experimental/core/component_registry.py create mode 100644 QEfficient/finetune/experimental/core/config_manager.py create mode 100644 QEfficient/finetune/experimental/core/dataset.py create mode 100644 QEfficient/finetune/experimental/core/model.py create mode 100644 QEfficient/finetune/experimental/core/optimizer.py create mode 100644 QEfficient/finetune/experimental/core/trainer/__init__.py create mode 100644 QEfficient/finetune/experimental/core/trainer/base_trainer.py create mode 100644 QEfficient/finetune/experimental/core/trainer/dpo_trainer.py create mode 100644 QEfficient/finetune/experimental/core/trainer/grpo_trainer.py create mode 100644 QEfficient/finetune/experimental/core/trainer/kd_trainer.py create mode 100644 QEfficient/finetune/experimental/core/trainer/reward_trainer.py create mode 100644 QEfficient/finetune/experimental/core/trainer/sft_trainer.py create mode 100644 QEfficient/finetune/experimental/core/utils/__init__.py create mode 100644 QEfficient/finetune/experimental/core/utils/dataset_utils.py create mode 100644 QEfficient/finetune/experimental/core/utils/dist_utils.py create mode 100644 QEfficient/finetune/experimental/core/utils/import_utils.py create mode 100644 QEfficient/finetune/experimental/core/utils/profiler_utils.py create mode 100644 QEfficient/finetune/experimental/docs/ReadMe.md create mode 100644 QEfficient/finetune/experimental/examples/ReadMe.md create mode 100644 QEfficient/finetune/experimental/extensions/preprocessing/__init__.py create mode 100644 QEfficient/finetune/experimental/tests/__init__.py diff --git a/QEfficient/cloud/finetune_experimental.py b/QEfficient/cloud/finetune_experimental.py new file mode 100644 index 000000000..d647b73a6 --- /dev/null +++ b/QEfficient/cloud/finetune_experimental.py @@ -0,0 +1,6 @@ +# ----------------------------------------------------------------------------- +# +# Copyright (c) Qualcomm Technologies, Inc. and/or its subsidiaries. +# SPDX-License-Identifier: BSD-3-Clause +# +# ----------------------------------------------------------------------------- diff --git a/QEfficient/finetune/experimental/__init__.py b/QEfficient/finetune/experimental/__init__.py new file mode 100644 index 000000000..d647b73a6 --- /dev/null +++ b/QEfficient/finetune/experimental/__init__.py @@ -0,0 +1,6 @@ +# ----------------------------------------------------------------------------- +# +# Copyright (c) Qualcomm Technologies, Inc. and/or its subsidiaries. +# SPDX-License-Identifier: BSD-3-Clause +# +# ----------------------------------------------------------------------------- diff --git a/QEfficient/finetune/experimental/configs/sample_config.yaml b/QEfficient/finetune/experimental/configs/sample_config.yaml new file mode 100644 index 000000000..e69de29bb diff --git a/QEfficient/finetune/experimental/core/__init__.py b/QEfficient/finetune/experimental/core/__init__.py new file mode 100644 index 000000000..d647b73a6 --- /dev/null +++ b/QEfficient/finetune/experimental/core/__init__.py @@ -0,0 +1,6 @@ +# ----------------------------------------------------------------------------- +# +# Copyright (c) Qualcomm Technologies, Inc. and/or its subsidiaries. +# SPDX-License-Identifier: BSD-3-Clause +# +# ----------------------------------------------------------------------------- diff --git a/QEfficient/finetune/experimental/core/callbacks.py b/QEfficient/finetune/experimental/core/callbacks.py new file mode 100644 index 000000000..d647b73a6 --- /dev/null +++ b/QEfficient/finetune/experimental/core/callbacks.py @@ -0,0 +1,6 @@ +# ----------------------------------------------------------------------------- +# +# Copyright (c) Qualcomm Technologies, Inc. and/or its subsidiaries. +# SPDX-License-Identifier: BSD-3-Clause +# +# ----------------------------------------------------------------------------- diff --git a/QEfficient/finetune/experimental/core/component_registry.py b/QEfficient/finetune/experimental/core/component_registry.py new file mode 100644 index 000000000..d647b73a6 --- /dev/null +++ b/QEfficient/finetune/experimental/core/component_registry.py @@ -0,0 +1,6 @@ +# ----------------------------------------------------------------------------- +# +# Copyright (c) Qualcomm Technologies, Inc. and/or its subsidiaries. +# SPDX-License-Identifier: BSD-3-Clause +# +# ----------------------------------------------------------------------------- diff --git a/QEfficient/finetune/experimental/core/config_manager.py b/QEfficient/finetune/experimental/core/config_manager.py new file mode 100644 index 000000000..d647b73a6 --- /dev/null +++ b/QEfficient/finetune/experimental/core/config_manager.py @@ -0,0 +1,6 @@ +# ----------------------------------------------------------------------------- +# +# Copyright (c) Qualcomm Technologies, Inc. and/or its subsidiaries. +# SPDX-License-Identifier: BSD-3-Clause +# +# ----------------------------------------------------------------------------- diff --git a/QEfficient/finetune/experimental/core/dataset.py b/QEfficient/finetune/experimental/core/dataset.py new file mode 100644 index 000000000..d647b73a6 --- /dev/null +++ b/QEfficient/finetune/experimental/core/dataset.py @@ -0,0 +1,6 @@ +# ----------------------------------------------------------------------------- +# +# Copyright (c) Qualcomm Technologies, Inc. and/or its subsidiaries. +# SPDX-License-Identifier: BSD-3-Clause +# +# ----------------------------------------------------------------------------- diff --git a/QEfficient/finetune/experimental/core/model.py b/QEfficient/finetune/experimental/core/model.py new file mode 100644 index 000000000..d647b73a6 --- /dev/null +++ b/QEfficient/finetune/experimental/core/model.py @@ -0,0 +1,6 @@ +# ----------------------------------------------------------------------------- +# +# Copyright (c) Qualcomm Technologies, Inc. and/or its subsidiaries. +# SPDX-License-Identifier: BSD-3-Clause +# +# ----------------------------------------------------------------------------- diff --git a/QEfficient/finetune/experimental/core/optimizer.py b/QEfficient/finetune/experimental/core/optimizer.py new file mode 100644 index 000000000..d647b73a6 --- /dev/null +++ b/QEfficient/finetune/experimental/core/optimizer.py @@ -0,0 +1,6 @@ +# ----------------------------------------------------------------------------- +# +# Copyright (c) Qualcomm Technologies, Inc. and/or its subsidiaries. +# SPDX-License-Identifier: BSD-3-Clause +# +# ----------------------------------------------------------------------------- diff --git a/QEfficient/finetune/experimental/core/trainer/__init__.py b/QEfficient/finetune/experimental/core/trainer/__init__.py new file mode 100644 index 000000000..d647b73a6 --- /dev/null +++ b/QEfficient/finetune/experimental/core/trainer/__init__.py @@ -0,0 +1,6 @@ +# ----------------------------------------------------------------------------- +# +# Copyright (c) Qualcomm Technologies, Inc. and/or its subsidiaries. +# SPDX-License-Identifier: BSD-3-Clause +# +# ----------------------------------------------------------------------------- diff --git a/QEfficient/finetune/experimental/core/trainer/base_trainer.py b/QEfficient/finetune/experimental/core/trainer/base_trainer.py new file mode 100644 index 000000000..d647b73a6 --- /dev/null +++ b/QEfficient/finetune/experimental/core/trainer/base_trainer.py @@ -0,0 +1,6 @@ +# ----------------------------------------------------------------------------- +# +# Copyright (c) Qualcomm Technologies, Inc. and/or its subsidiaries. +# SPDX-License-Identifier: BSD-3-Clause +# +# ----------------------------------------------------------------------------- diff --git a/QEfficient/finetune/experimental/core/trainer/dpo_trainer.py b/QEfficient/finetune/experimental/core/trainer/dpo_trainer.py new file mode 100644 index 000000000..d647b73a6 --- /dev/null +++ b/QEfficient/finetune/experimental/core/trainer/dpo_trainer.py @@ -0,0 +1,6 @@ +# ----------------------------------------------------------------------------- +# +# Copyright (c) Qualcomm Technologies, Inc. and/or its subsidiaries. +# SPDX-License-Identifier: BSD-3-Clause +# +# ----------------------------------------------------------------------------- diff --git a/QEfficient/finetune/experimental/core/trainer/grpo_trainer.py b/QEfficient/finetune/experimental/core/trainer/grpo_trainer.py new file mode 100644 index 000000000..d647b73a6 --- /dev/null +++ b/QEfficient/finetune/experimental/core/trainer/grpo_trainer.py @@ -0,0 +1,6 @@ +# ----------------------------------------------------------------------------- +# +# Copyright (c) Qualcomm Technologies, Inc. and/or its subsidiaries. +# SPDX-License-Identifier: BSD-3-Clause +# +# ----------------------------------------------------------------------------- diff --git a/QEfficient/finetune/experimental/core/trainer/kd_trainer.py b/QEfficient/finetune/experimental/core/trainer/kd_trainer.py new file mode 100644 index 000000000..d647b73a6 --- /dev/null +++ b/QEfficient/finetune/experimental/core/trainer/kd_trainer.py @@ -0,0 +1,6 @@ +# ----------------------------------------------------------------------------- +# +# Copyright (c) Qualcomm Technologies, Inc. and/or its subsidiaries. +# SPDX-License-Identifier: BSD-3-Clause +# +# ----------------------------------------------------------------------------- diff --git a/QEfficient/finetune/experimental/core/trainer/reward_trainer.py b/QEfficient/finetune/experimental/core/trainer/reward_trainer.py new file mode 100644 index 000000000..d647b73a6 --- /dev/null +++ b/QEfficient/finetune/experimental/core/trainer/reward_trainer.py @@ -0,0 +1,6 @@ +# ----------------------------------------------------------------------------- +# +# Copyright (c) Qualcomm Technologies, Inc. and/or its subsidiaries. +# SPDX-License-Identifier: BSD-3-Clause +# +# ----------------------------------------------------------------------------- diff --git a/QEfficient/finetune/experimental/core/trainer/sft_trainer.py b/QEfficient/finetune/experimental/core/trainer/sft_trainer.py new file mode 100644 index 000000000..d647b73a6 --- /dev/null +++ b/QEfficient/finetune/experimental/core/trainer/sft_trainer.py @@ -0,0 +1,6 @@ +# ----------------------------------------------------------------------------- +# +# Copyright (c) Qualcomm Technologies, Inc. and/or its subsidiaries. +# SPDX-License-Identifier: BSD-3-Clause +# +# ----------------------------------------------------------------------------- diff --git a/QEfficient/finetune/experimental/core/utils/__init__.py b/QEfficient/finetune/experimental/core/utils/__init__.py new file mode 100644 index 000000000..d647b73a6 --- /dev/null +++ b/QEfficient/finetune/experimental/core/utils/__init__.py @@ -0,0 +1,6 @@ +# ----------------------------------------------------------------------------- +# +# Copyright (c) Qualcomm Technologies, Inc. and/or its subsidiaries. +# SPDX-License-Identifier: BSD-3-Clause +# +# ----------------------------------------------------------------------------- diff --git a/QEfficient/finetune/experimental/core/utils/dataset_utils.py b/QEfficient/finetune/experimental/core/utils/dataset_utils.py new file mode 100644 index 000000000..d647b73a6 --- /dev/null +++ b/QEfficient/finetune/experimental/core/utils/dataset_utils.py @@ -0,0 +1,6 @@ +# ----------------------------------------------------------------------------- +# +# Copyright (c) Qualcomm Technologies, Inc. and/or its subsidiaries. +# SPDX-License-Identifier: BSD-3-Clause +# +# ----------------------------------------------------------------------------- diff --git a/QEfficient/finetune/experimental/core/utils/dist_utils.py b/QEfficient/finetune/experimental/core/utils/dist_utils.py new file mode 100644 index 000000000..d647b73a6 --- /dev/null +++ b/QEfficient/finetune/experimental/core/utils/dist_utils.py @@ -0,0 +1,6 @@ +# ----------------------------------------------------------------------------- +# +# Copyright (c) Qualcomm Technologies, Inc. and/or its subsidiaries. +# SPDX-License-Identifier: BSD-3-Clause +# +# ----------------------------------------------------------------------------- diff --git a/QEfficient/finetune/experimental/core/utils/import_utils.py b/QEfficient/finetune/experimental/core/utils/import_utils.py new file mode 100644 index 000000000..d647b73a6 --- /dev/null +++ b/QEfficient/finetune/experimental/core/utils/import_utils.py @@ -0,0 +1,6 @@ +# ----------------------------------------------------------------------------- +# +# Copyright (c) Qualcomm Technologies, Inc. and/or its subsidiaries. +# SPDX-License-Identifier: BSD-3-Clause +# +# ----------------------------------------------------------------------------- diff --git a/QEfficient/finetune/experimental/core/utils/profiler_utils.py b/QEfficient/finetune/experimental/core/utils/profiler_utils.py new file mode 100644 index 000000000..d647b73a6 --- /dev/null +++ b/QEfficient/finetune/experimental/core/utils/profiler_utils.py @@ -0,0 +1,6 @@ +# ----------------------------------------------------------------------------- +# +# Copyright (c) Qualcomm Technologies, Inc. and/or its subsidiaries. +# SPDX-License-Identifier: BSD-3-Clause +# +# ----------------------------------------------------------------------------- diff --git a/QEfficient/finetune/experimental/docs/ReadMe.md b/QEfficient/finetune/experimental/docs/ReadMe.md new file mode 100644 index 000000000..e69de29bb diff --git a/QEfficient/finetune/experimental/examples/ReadMe.md b/QEfficient/finetune/experimental/examples/ReadMe.md new file mode 100644 index 000000000..e69de29bb diff --git a/QEfficient/finetune/experimental/extensions/preprocessing/__init__.py b/QEfficient/finetune/experimental/extensions/preprocessing/__init__.py new file mode 100644 index 000000000..d647b73a6 --- /dev/null +++ b/QEfficient/finetune/experimental/extensions/preprocessing/__init__.py @@ -0,0 +1,6 @@ +# ----------------------------------------------------------------------------- +# +# Copyright (c) Qualcomm Technologies, Inc. and/or its subsidiaries. +# SPDX-License-Identifier: BSD-3-Clause +# +# ----------------------------------------------------------------------------- diff --git a/QEfficient/finetune/experimental/tests/__init__.py b/QEfficient/finetune/experimental/tests/__init__.py new file mode 100644 index 000000000..d647b73a6 --- /dev/null +++ b/QEfficient/finetune/experimental/tests/__init__.py @@ -0,0 +1,6 @@ +# ----------------------------------------------------------------------------- +# +# Copyright (c) Qualcomm Technologies, Inc. and/or its subsidiaries. +# SPDX-License-Identifier: BSD-3-Clause +# +# ----------------------------------------------------------------------------- From bde0cda83ec0c1936ada2d658d1cc7eac9c1a210 Mon Sep 17 00:00:00 2001 From: Rishin Raj Date: Fri, 21 Nov 2025 16:03:31 +0530 Subject: [PATCH 21/60] Updated to mermaid diagram (#631) Updated the onboarding process png diagram to mermaid diagram Signed-off-by: Rishin Raj Signed-off-by: Dhiraj Kumar Sah --- .../onboarding_guide/causallm/Onboarding.png | Bin 231305 -> 0 bytes examples/onboarding_guide/causallm/README.md | 62 +++++++++++++++++- 2 files changed, 61 insertions(+), 1 deletion(-) delete mode 100644 examples/onboarding_guide/causallm/Onboarding.png diff --git a/examples/onboarding_guide/causallm/Onboarding.png b/examples/onboarding_guide/causallm/Onboarding.png deleted file mode 100644 index 8c83b0ac006e90f18f66f3e1282db782048c32ce..0000000000000000000000000000000000000000 GIT binary patch literal 0 HcmV?d00001 literal 231305 zcmeEv2V7K1wzr6*pb|yNIZMt>&H^GJA_9V715J?7L;*=ES+Yoyq#&YzWI-|#MGz!O z6l{r8z8&{{yYpt=+x@xPUH9I)RVSbSIo0ELURe(7D9Omf#~8hZ`d-cP}U*Z*M}A&rFd}JXWscB5=qsAqLC^CDu=DVPVJ1cSi%RF{xuwBp zb5k<}_(u+5V1|SqF|>g@u$v;m?cDtAoa_Q%NZP~(X#s9J!^I5)L;U<)JnURBXrgb6 zKEuP!&dtsT#?;_Oa2wNouY>k;9bsc@3O2XB)xC+K3$_MubHwiO&ervjHiihBoguJa zZh9VBdQM4jEA)p)+RzkkYy-CdUo7EZQQH%q2jpVo+Iig6aA!uRTM4H=~T75rgfV8v({QvGoxQz|O=ItHszQhP= zzPr8M0V}Zc@9uyG)V|Sl-)$PEhIS@9_rmygmj~S~k1#bh*?Th==k65?_}Zx8Kf%MKy;$Z3N_g8BAwZU5!Hn=Blq5#|uJ_vpC$9w0dX{Ig%TY4cTW{(q*k zKX)0)W9MXMVWY&S0h2$k#wDZA#u_`XU6St%*xEVmNif0?5P$b8(#FmNX^gal zo6CM1m9jzFTN)xD9zr_%?VfW;q!k$C0)y8Oc6Ls?k_5N6LxLd_I}7vOt2^(6UbMrC zpX_{h$QEgDW3YSIPdBuCpB>!B7_rO$pWe5v1OJ$v-ENDSnfb?SfvoEfqV>~l?QZ`&e)5|GLP!~!UZ>~gH-`jNmt zBcUueJ&coH5)zDaNCUVX#E8H52?z0Ene6L65*BM3VKcHp&tz9fi%`>fM9+XI;y2k4b*R`6{T;Q-tp7~5xO z;O3^rkh?ZV7(sflYx@v3c8ERaxHDu0|7LQxHR>NL=T8mA-i(lAh1R_5Y$Onn&JN&g1x)veS$3wxnZF1aB~|393+6;oC+2oo3;QB83gdZi~Jx{u;1C5AT9Lm zf#La8M{5AhYXk28-c9e=7?3{y8yzn<&-bQ`chAZ0bG%%8&UnA${W|#9@w{9=3+sR2 z&EL!QZo5&L0JQ=2{r3d#UtjmnXA^*izgyD}N#xI?`F}Qb`f9BJF%0&o@-?^kbJief2rMP1 zo}M+r$VkuB($r2*4}1pzfeF$OV$OD+283!KCwC0qK1=!siOTop@_!(05&Yg=^6w#N z`;Di-PqLOhBm38}o!c=Ez-qrCTo(}lo(8tZWPb{7Zwu1rucO-#)wle7Tfo3YxFy7S zFqNs{j$-c2VPtP<0Ohk_+zzT96<752JVRavp#zZWH4xqj%Hxza68wcZ2qQuJTU^Y(bV{ z~_$q9;mJW|_VwV4K7)GN|}K@cIt{`k%k=zxNyeHC$4>Bn>w(L43y}_;v|-x6-p$ zIr%@zv40JfWgP(w1>$ohq@lez0x};6o9{?&Y6O(S5`h3^?4J<&=Zp9u`uKx5|A!>* z`wVZ#k^FlBF|NIkem6)E;NC@*_5}$%{M+8;`>=m6&-?3ygWSK52X`p==Y|cx?*9MK zt8@PsZ9EXqzW?*z#kqb3L)&>3^oAemLOh%xGUD0yzn!h`N)i{Bz;4*KCn7s-8oQG~ zdh*8@+upnXXG=kQy;0vqAwMYk_p18`YX7HbHrK8`?D7M)m%x3e**rT-*`LsVEj;h> z?Jv;4vYVp*xtjb}NC0;E^M|Hlr&;e;)cL<_Dt<*>|MieK*De9RQ@!5>y=rz)b0BCC zrRRa#C(k=wgd%_NNgg!Ha036n-R#ID1-FG7Vh}%Z=Kt-e^XE~(?^d{%wb)w0p|+lH zg3NDi548|V>B9}ojJJ&lo52oF1np!yciC$^fts%NT?43;i5;6E+{TQ_##o;T>;P(< z+4~3V5&8;U;pPL0KlEk)2WIB&RCVw5V12u7L*HirOm+acvV?LfTN9)`XvqZlC*01! zKjN{O94ex2`Ekng{q_2h#=ExwSE-?N_ryHU_S`^mirvFx{>e?34HQr#ci)&C%V z!Unpyxp@BoMA^2z=Mgrhpn|g9BK@yMR(`qGxa~#%7aG=IDmLyJR-PThs-mo{2|_WI z3m1W2d^M|*Mi7g*IT4Olpw0Iumf=6f^{X-b4qo{`Yh3vuNOiw)<>KV|+gewy-FnkE zpU(X!Sl1o${cBL>ZC?C4P28`{y_f3!F3sc?{LvMEla{>{WhXxVJEoSqdhl!UEr9sH zk8c$({mc-n${In))pqa*b@JOnHuc9llpS9DBb>7r!8^r-zV**JG=XQ>= z?c%-*t|2!6uAKSR3;qnF(6`x#g8Yg%_&?i2#JzVmcn=%dYpdR;C0swlM)ol1y}0dv z9$M!4U)M#wQ#Ja9HtHQ_{7P*8gSq@W9%0%whI_>+ZrE;d>Q`$}dwv7l|Mxk=#Pc)s z9P$Z&+g$b6g?pRf|4!kCB;wCMBd|w+eO`ULO<|`!#BQgH*=qM#?GLQB`PGv;-={gd zVZ^`rU=H{Jjvp`~ZmwM&-S1U+zd}d*F?X)Lj@7@AW7!2*|6Eo3RQ zh4Keu_XjN)f4+%>ZF{7z^+Yb7p9Rf(#l!u^ZEx%^WZQPPvX8HOdE~yfTs3=r3+UIF zIL}}B%I$yj^C4f8vwd^?WLl;(NON=W`#b>pQBl5lK|xCWqJpyA-i<&%{_*2KS<3cL z7eS2y|20thR~IgB-XElkmvcAv+wVLfwS#u@7niQzW8(igzyr9phf?h&)_cBWyR8Fa z>poWR#Y6i!z2}(!LU!c$=kWe>gvaR{t3ik5j>7J-nrAOY{tK{ryV>i%_yM%tMliW= zzcsf%6x+VK_v>~$7j_?jel>9CCq;DdhK3FtpgN!+BYDwLcQWqC0=?|>OvUrZnT{s< zVjMrsKycpW^qmhx=h159@xvv5JA|&0fZ2}5L4+nNSs}r#FNKGTemn|Y_dKWQJ;MVp zeYNoVSI7HT2kt*!^ZYC}IxxD>Y4(OBs_o%;+I#gijpA!dZ!Ak@Ye_xiQZRJfR)$fJ z!j$hI2FvHg<4!;E3cY!VjtUJ^Ug98r6zM_52v$FP)bzqYuX(Q0`m~(rxT>vMb4}{2 zHw3!=UT(vq4B7UBevD@x4pZ+u69PScD+5=4aH6o;HG?MID2b`$+HJFm91k2Z7@6H$ zNtXBQ#Qg5a7bx#zx!9&751$!3Q*OU*`%yGZx`U>3;?MF22bzVaOC+vRiO(o3br}R_ zxvbcr0wpC-e23HdRb;J(Tb>zKc-mqO=PUF$D}dGRKF&)Wa)?+P-Xyxzx9lU0#!sQ{ z)#+G5k`VZiT|3_#E=Ya2!5l63FvZ^e&_>6E&~6`5@MNwygJkpS97`ujh~{9qXvJ6i z32ToQVD?j?2M!+JitvqU$wXm2oFQp?63!FBex(RE0-+LVrmy{STJCC=hT+MLR| zGvRqcS+g47eyQbXCT0Ov?Wrz!%qriOmVTvoNX13W4-@z%A1X$#Wb4;R zon3u;>QNPrnxj&dHSQHsi@cWGXs3%9FnYBWzwMXxG@6TEwZ`QePnIti3KHm^;BC$; z3Be%r9{r9ZP7F84&UP33xm>y4jhNSYpFRF~hU8f17$aj#^2@-cMjrZIrh|7d9n6n2=AdgGj?ci9Sd7?5wK)0^WFc2}pSwU-r5Vi`MQ$vDve#}jj-^lcZTL8zJj=^IH-qXl_1s}vn@2&NS(j_Z-|U~` zp2VgpJ-18lXj-O^B9^xK3c-|(AKd?ZY52Z*&Yj?d%?`XtnYmUU5GENdR7U@x|IAmJ zkiTk)@hD)xHRuL&$z^Wiq$_N<7WnsV#a50=C9kCu=?pG&nt{s_3#ay9R-%$ET`gjh zLv`GC3OkQJJA7pD^36|d`&Kz}SK_r*SLfuw(ackgJqYjL&G2Bl0qdkLTNKc}>$#4R zq{xzNuVSZT`vt#|67536iL4J}LpIW08qzV!YsET7UAT`T_i?*X^j2PTS}-&h)*Ioe z>2cd@jouigI^VYXfbq~OS>Aa^K3*Fz$xU7MZ+iKb^q^^i24%Tx+RQbR5my!eZrp_Z zlP~z*tW1*&e1BbC?Tob8p#sDDyZOlJjeXms;}ke_;WpO#NSUjAx37jY4}TK;ckeiw z+0Sdgc2eE(d2_q)*>95C$Z@M|wfRg5NspZ#RthygN8~qCGsX;EJDTZzctE4d)b~xp zK5@Kp_Ce-mZe`lTmrEU1B>M9<--vcA9)B!tD;;cS#+5JDyQ^eBAyN*lj*GLDkdV0i z=XGC&;6KFthoZ91?FdMS)f`Z#zSb}GqL%(3(FuMaLEJFQ8=5!K88qFPP;7?XT7^@=JkfZm$h&3Q~^e(uCv99 z54VpEwmEdceXOeS zc*tZr*7}#qvcM)~CG8hlnb=VgMwbcgfjhUa z^;#5=J5J{|iwU#K^|`H0Rf(-lp0IT8?aiPo2@>C2o(V*{(306Ua^14kN?cs76BjEP z;mgDXVHR`g(r~06MgNx!2QQu`7;V45=;^+#8V)z>VLlCACq$pw!c!I3)9fA}1%q8D z4Tkt)EQ?I#?Vt4w$=;g=vl^_=Pa2C0e>Ny;i~G!bs#BFl_fss6%Lnw(jtRZ3)tLsp zO>U84W);jMnLq34?1ur)8f#q(~f&$by$d~-IqDyDzo@$wpb@g%

zJ4cq){Pe|E$Xc+0UQ!;RA`-gn1JR~sQ8QvKs!oJqvT!t>>w?ZDZ|( z?m9(vuo+X`0}Sh;_@!C#%^~qq{X^Nh=TstgJXoH*1ge9kQS5zcqm{DoRNj~fs*48u zov!m#b|@Wnh1b%HI?+U;N=YN}xAc)Z_;Gw>60xal`Ca;`O_^T#!P%V1QgalUr$moh zt$S|5B;fP-S!|*psv!PmCFfw=W3ZPnuSs?3^z#GUawm=Nu2EQ3pgor*3@4IvoXTrc zR;5b^9J3ziyE1*FlPQ9ULzyA*HageyS`stQiHu@)7r?-PtxM1I=5?2fGze@NW34LX zOQ7(2rf*ctfm-2kkmC*PVXBwtgSV2sGF^sQs+!#vb3^d&Jlb$u`&^7<#j#kJi!2xE zIo@emwlZREdU}+$!YT)k(J9gA<5;f9{4u>{sdX&XnnG42Vw$MZpZ-~2q=zd)d~2nd zu!kgDHW9~dG+HMCW-Xd8LlBV^>3BYCaZQnYd-yO-DVf16UvLQR)+TorHd6o zXp(PczE@{*q%H}?!h<7TJk26JJ%ov3rS86JlOM~LN~bH=1kyz_$~RX{jzG4IuqWye z)k?g|B-&>M7py@!&)9kQt+jzj2DDqW`u<{{auYTt&$`xh#oZ-@;(cJIF4H`ixrS*t zV5w#VlOe~Yl6hoReYZ^rJ4R%J5?iQ+CDt^N5tal?^gSMXnua&+S^njcjg`>^N?IW@ zR%@5R6A46_)Ca50H&}FM@60s2Z}ObE?W=E%TUucaq_CXQ>)=*kn17hk!fz<6nBG1H zq0t!goO&fs!X`s}p=Os69s{j5-RIWhA{jG-E;8t<2gl`0KS+q;Mc}wER~b}~#xIqO zN^!XX<@DNE?B#fO?6AyTm`y*dy&d%u$IXi1zIusTVgSKI8t$qUL4lkLBhH1X5hRtk zjQ4#B5aN?rRh91nT{QXQo6Fmp)@yYu2uu}%#0JAm=v8U3Kl>0VmCqyl5*F(o$}!QJ zWT|J!>XgjKO$RhTTIZyzjHnh(Ov-SjN1cDCTS9to2)Hvj0$RhBM{I)ZWy^yz>!af0 zxPJ63W6~6JArD8|^Ji$J6cz#(ADEXA=B@SiFXn4nPR1=#-L2tD7sYLA_wywlJpG)u zky$v5vV6IQBhlBWlFFU&@a#|_!p|5uLdt4x{kD>0HNKzDKZxH;J`bRIbsl=Kh>DP{ zdbz^bdiy5zK~K(9+mBK=li{rP4LCCR2mLXyLXVP_uXnGLHdJ3&X? zW&83(`2Z2lqF~&iE?8s9N)A3=@JYtGsl2A`;_ImJh`3p#%>A;t@JtzN50jB6_`3CyXJ8S@9E^8r@2XU* zTjJEWWL~}&;U<@bT41~-q2+rm`*;L~o@&=RFogjJZ{;h8-Yr@T@5xIqy&y9JZ1D{p zBgcfbK4bdahtF_3Ib1&}4$alW>*zcQ6C;__#ctj0@mRVot}ugr4d)Ok{}ONkTxUZ3 zrSM}=rPaDNcznK;B2RcuutFgUk|yKsTZ3k1Mf^S2e}(G7%(xUw<5l82-eF`4WVH-W zo5OG=4&g=DC-Glh1@`rM))oW3sr;6=M*!~C`b)D{Gmm1kh7wg1^DM)j!`8scMo%58 zeDGMli4Bk7b^Ik5`?+$i-LVQYhKlfk%ZOjSG?W<_kj zg_AXwx-U+nydDg$N~*TzdGZC@Fvw9_S3Iww-Oycp^IS5T$5z!LJ zbq(1UnhvqGQ)=yI>E0WZc$ESpwe@!dO@av(vAR>D6RVt@F2oE^`aG}8T75Lp;HeQ_ zn%ju0T0>Dz5I!QvOw(WkFPG&HC@J60kt%pGnMxijI0wb6^TxWoD+=`0=s0q(foz%J zx1oYO_b0yhH-smN^)rt)5lQLy;nXn5eQ9ziv+eV*x&R}yjjc&Fq)g&pfmNE_!-1VZ zlcvfJ#;99R)LXrvbH-4fgk%wuI(k<77fkD8ZaB6p)iJ}Ic&Sxauke|vH3G)jHM ziZI%Bsdyl8$%_v!pm$(%@t9(7s?6qtwIG&~@h*+asjucbQWq=Zaj}`vj&|Z|(VwkL zA*$7)=)7K?#(F=xCwWpUDbJuNe)vI&@MPyY3dfx&lJK!1DWWp%4yCGTV#y>kMON7N4K3r=w7R#l&-MjsDEzp< zkEe@}GL1?acs!E|+$;u1kC&_WBp0pUaVa|T$xM9}3gbV3s0 z9k!^08%;!?hC;f{$7F47uGIy9Dx2}*2yrt4a=!f50mI=?BaAD#XjQvE4 zkZHMK6QtTYigQ|xw2}ouaxVG+6hziyQ2EjNzC!g8(sJ565>C9#DfDMAViIZ>@nCf& zb0SFen>jnBc*h+-g-YfzGsGNxb`Kg|LAgW;LpPDpSldBeeodQ?&iF^VReFIu_-d3L zvO4cf)vAyu<$14~aq<}Lr{5Zgv~m=SwhBsy*{qQtUR1KMu~d)vR`yR>I`}&;jA=Wj zT(G$Z-?+6SoHq|WhR`dl*S5<6{AEKm__X*4kq>A%nq4vKau~SGRP!XtT~C~aqO2Jc zC2YJ>MRn=Wscfq5$}FO0l(-gKKH||XJ`4@Dn0QtYB;_H0B`)dd%W(CD=ip-ov`Ss-yvVn6$Yvwhpcl*4>hmLZ54#PLSx@7p#Q0w#{i|f78 zrnND?9c1K|0`7T7rQ6qVK5HRh^`XkD)JC}tK_JrXY~eB%#O+33k>^-Sr!A4kQ(okY z;3@^-qMxz`R}1>UguZut^mU`o?xFAqx^T9Wjcu_h<~I=a7`V-n0Cm2;5*H49U?}-$lrWQjxHeiwurzL+o|WB zhcBqA8Hv3s8II7dok;T}Y-Z@Gb$dSOgOKC%cwEUcAcwxCzp=6vcfE(1-G|Oh_pF>= zq<|vtQ;vpLA0XWN$l`CIg7}-h67iuu`Kj+epd$|*RwU>ym!-p^j{NKuQWN?Z`%{!r zwZAbCdC5wDXSY$|0Zb0Zm&f!)8~X9e_`I{crm_rT4^c5IZ9cG@J4X}p6u)=`iC_Q7 zp@8+i=o8U7oIo|4&puZkEd?wh#A#e5%UrC9%3~E)KOWffeot+9#I^q_k;+#Sb{m*5 z{EN<3&2Vq&cHfIFzSI|@8E3;T7@eTj@79|wmbLeiunCHmG>+H|;A!&JKk5_lW}R;O zDpH}_;ciCB-0P^gknTmgiY~S4yQv_XzWR_a%~Jy(C$?D*F)!cz4<-tWijF&~X{2vV(4ReN2)R`O(ewvLjL2k$O@ z*cih!%YY6GOJ~;N16LLCjRjb!<2zT&N`STno#%)^~$yzke3 zIPrA{d#JjE3g$wG`y~L&E`NRbZYo0eR_&ncmjr<5VwGijx8cTohsoZP(AzKuNUq0p z8j81iWOyGgElR&liH^4)*7gYYPri=DJFIMY9%K}pA`@E8*EG{=%GbR~Eir%tR9+{V z1X=jU8jiO#5g!W5H!RHCAebuF<~1`4fGG}Bt?R&*V}dNw_C>>$&-h{s4{^MN`)7mD zr^DmlC?v(|6Cfv%`A|~GuejBm)bKnaHggO0F-!$}seEf=B5{1m-hHiiK)-0+ zeSL@$^&~0iD0sz4DG1(-i$P-U6PTrJwBsC&c_kCN32(Bfgh2U(5#p$E+_>LUg%`1_O4v6kn&CdNiF3VMvK`csu zW-6Xa0`K!aPmPWjWh^(hd8f`?lP_WMPMW#a6+&L7gX%!l5}(i0ihVY9`V*8?TRXk~ zjZg;{a1zJ!d8cl1n(URQI#=w^P0Sez_I*l8pvWj3xep5yXQ_MCixM7KQI>$Qfixe2 zC=3jq_EH##KN&v8@aFmOpz`qbPCa+m<(6PK0{TFi5UZ^9frnE{E&*d(BV8&juKKUU7JD56@J! zxg^LTerJs&qja%n24sTbE|Uh7q^s+aO0kEr-|3ZuHDN<}Qe~(d1#!bMA3_kNTGt;~ zQ(!L|OPpCOC>I48eu7^?pLHF1m6`7R{TH6;j#=q9=dJLyM&Hc(%8AhQoB5Dm$ACgd z#mrlu6d2@iL8awuPWKU-+kW7{m6^lacWK-&~}IAPV^zWP)O9&=57A*nl#K8rha2r&4B@WXJ!r!H*V3L1 zu2n@X0_2Ug4{XYR<0VsF-lX$=9jA9NSGjJA+SK~AkJBz+()0qSiH2~>{+iHu_d>mvd~#`13P@O1~=$5LPkQz5#<%y z0AS_0u@7xeTl^q24h7;uzgVk$Yk8a58sy{@T$=5;0B9X}Mg4v{y4l6Z?&=eyklV(D zUfmiG(Ic~{E02{qoaz;6jztaE^AXA5QXikY_uG;Cixn&K`pdqwK^OoDRU_=lR6@xP zh8+=Ke%W%XY~so^NkUvNwlb3<`(g5`9^r6823V@E%5v4QmzDF@x_FNT(GOpoMCXz& z#Le|33F3_k*4|tftY;4+H*h%acs!4lt=iiih*S-?)m8K^0{uhIDS=rm=00Wu11|F> znNJ%rjx(H^lR0hrq0clWE|JsND9*W(VwAkaZ>EyM>QJ_HyrNTUalI%<=|bzW*r_S9 z7{r#A4eWlGhPv~6kiP_)4n>2Nu$jG6DLq^=1V*b@j{e1NNsuwbp5($m7ptRryA51q- zwCGtBRm!M}uW`OW7Bh~)F?q9IV^wXnh}WnxLM#aLX>~j|mmB_5gw6vT<7T#LvaN*v z4hZ-P^wJ<{?r!jMuMzfm#vpDS)f>Go$N>9rcRX@wxG(`I#>Q!e5Bef(iWTAb1ypZ~ z@-7y$Y|dvbnyfj1a3#@#qzS;H^Pp{G?#1D#F+83gm|+r`y(eMi&2GWigVlK54p9%Q zULsD6K@h6ft&?&g9>lD%!bX8ahRaAHejz@iX=@RrZA@=9t0e$W6=LfVA&+^gLa#ov z6(VB#bf_@M+u0zSnQ2Sbqn09WhW^Ud2vIZbD zJlf-&%3o?QU!tV#-l9Ezvk4o-kTJT}79g%4f!=a6%riGmxUf-^$6tlCMGxVrB?$D*Q8IhoOQ0)bMN7UQ#-^% zALL3MgZt+q3Rq>jo(SNbJ7m}$!0L*HFi4QO&)4LXr#c|CK^=rWEw({!@mjcU@_N4y zH}%aE@t%bI=vBN^`ELc8YHQj1mBcdjoS?1AKow$KONSwOdqh8 zWN4tYsiURWlp34Y7%3L&7Mq-UFJa>KVhxWNH_AV_Ejb{|VzoIbUx~0u&5_UCEC09; zF_!K!ulAYj*NM#PHRKwO!^EAhbuS8W#v0LR%jiU58qZYWRo|-09+7P}CM)!Qd0&W% zE-L!*{6lHiDKkCNcu)j;|7iJ>Tg@IQQjT1X+okE36Fj&yQ9A&kVP^KLxK4a3ti(RK_MtT$Z@e0iiL(tqmo z8aYFWRUVO5>mi9EoKW&=eh*TM^RZe+S+WNPyBO*O2z8Sp#IIBx@R?7j#vT4J5Jk80 zTB8gfRNe&G?q#2KOkS+jeQm^OI~(6?sK4OJDDu!QL2Wh8Eh(zeW9-=UZ=p$vaijS% z)#0ufN>gd6r5A|(9#BQid*mnL^xc?vM%(=H!BsgJ$+@wQ4JJM5$oHF^rPH#ZGNofU z%p9)r>?Ic#h2j;2@qTLz<`oW%OGZ3j*Byzd>TLiGAWhfb(3 zXs?7G;tmUwvFg;ZedFckklphzU5|ymnXaMgla_)3iUTAWn$GdtBOpe5d zIte)UYiuI@R;!tU`-eM|2O7wH;Fy>1T<>o$OYLFl@HgQl)B5atfZXm8Ki&O%?nzm1 zR^NTXv5vR?9OZfX#NGC%=a$1fZ>^4=;ICOt5ks536?YLcli!JfRD$Yl^K^C{1+DAi zgE!Z&9=wY|XrQ9LSg^p}y7kz%R==Q%YE5a)HToVMrW|`^p(P3AW$f320G#G7KF2S= z7_#o8TKkCiWj8%4+{H81IuNtsV~nbt%t=?R%huKQrWgE9N+t!>*}D`9b>w=s^zMQQpI)CQ$9q*jLM>S|cGNejk9~tON#rBL3{#F(9EDvEOVlxNBkAL)M$9EQyP9ZYhVSQ{##{oW1BInCvBS# z_20LOF)lVdn&=_WRj(Io4G>*xJ2N%rgPEx+zTBuQ{_drkis#A2nw+P7-NX!1sjr)_ z)|&IJp}{UZKE0R?50*vaGB;R$pb(yyg1M&Dz?a6re4!X;)m1aj|GF6SS|yqxGyC~lw9gj zctZ2iz&7pA=d{AxkGoprS21mLg_I)K2&uS>R{OBaVDe+2{nDPDcf?;u$ z+(ru#MUYPF$9VI+`ACukBVit_Ipbs$)9uWgF>^Nh1FlYEzxZ5eZ~z_&nLdq4<6<_K z3jrtyiruYv*p!o-^O%C4N{M?gJmNmrF+P*R3(+%Y$yu8y%__9cz+Q8um(@5QKg&4q z8!Ft`gYIBW8BU^#p@#vIxCuu=d(O8zYpj_GPne8h@g=Tmm3O^KU1&2esJ1+HeToxA z8b$zU)t1HK$vU+J8h3;jIb51VFv&t_-F{V?`{85xV;f}7 zZD*X^@6uj;elB%fl;Yi{lGEeS2>MPcQ>=Q4SH~~+Y4jQi2egV0<8ja&3}C)udZgzA z1Oa2JjYVb~b4y;L?~IDrF|G#LUK*zboOOz6O1!_Y)NsMrSPVtHdA2ZkjkdIMD$AlozL*AyOgRwCIu5Uy_w*R>n@f<$IcC@S2YAxuKIs_>=Qtui zQP9>}l`1N3T8QtSlI*;4ctbUE{-vv?tDH4iFD`q{X#16GQL&nfqSROYiIZCU<3!>d zdWVgjSd>pW>(61xf*OcDd0rjMl)$ScG%VYaUb~wq2)#n`EXI)JSd1**Hh(IbM6}?+ z92EbAc8WON>*&eMaaWGqa@DdzHGWnsv#mkCp*dDvaZ8~UeafZ{?^3gxO+MG=J1SgY zKWCk_G)W>B9oeo6w<|GU9nZwQ zRO8oqPu|0<-tpB1$7+qUS$uwT?@Em|)<=tmGo9lq)7T5Q3Nrv9GiFE_{WU(HdyJOB zng)+&h-oyPyd}>JFmfa*pjcw^ph?S&(CQUEn+eC4)>Yp6;X|DPJ1(r|V&k z=MF~NoXl;RMKb#ns#aEqKA|KokJ4gmXsxz4bd(7R84hZHZ6p*gCQxN*+6r zQ~waqu0Wjba@{8HT~C7CwBhbLn>EF0Va2sM6;ZlVhF6oO^=t1ZyG{BH<)YQB=D)Fh zdmi@s$h}@%-66vPY2P8c)=V}d;fiNXNA9&b@?(7^V`o;nx6?pd@<$Rww>RRBsY86l%J7!EuM4jQRD;;~cjG zUPdJ|!kntabh`6rQs>Dz4*#Y}_2>zyGNIzo*i}3em@X?hlUtqtOEH9#FleXKKfAh6 zN?I0rCht(o8Gg1_ltEh6xHj(+u&k{4OAqEM8D)w5_(LCgK4CqA&ODgVOhizg#A|-4 z4A`~B*HrStA*qpAUG)+3$n(SLj9}6MP43ndqj~e_B<(^F+14tO}gR=Rr#1(-Fi~(r&VH<=)CAXW5c*@vyTVa zCHta_9Vh}Np)FxiHrRBD zSi|i|%q^U$JT_#DOVx-UN^Q}_bd(HiA))c9uWzRHnHk=)zS*UC{-dJSD{o(z!l0d|E1{xeGBSjqH|E}Qjp$`3V!SAr zFh*ka{ekXC4DpEKoId|#22;JOD!v0`G2Uv^9u}xnibC~N;+F3_yj()<) zXocJD{dv|!gt;rnBl1jz9uv==)qi5Kb{q2x!-mk2P#Uq*F{?N)1`oARX88mmv#5(+ zm!Wm#SLqdqH!cw~kkaKcY*2rZ&I@Cuqjz}NUot(ZB|P_vR%4L!$uy{`^bQK9XQr=~ zPF;VXH{`7El8HxA!{xo?7460lF}Gpl-;HhGm27U?mIFlzJp4+R9ZxoRmRF715oOMj zYTlJAzsQ%Zo)UQE3Xe(6nK|K^;MPs}I@zkq>d94wb=I;^nu`iydFFRVBVE&Sri$82 z^?qle)Z`rLNAIA0SRM)vcFw*VB~>z`gn@tc9{F~w;(_KyyCQF#B`)oIM%jfy(R%#8 z+{JIWMTB7dCPU}5-H*SlP8W(hU)G4th*1|tuh2)C!cR~9p$x`jcb9Ult4?38`f5P# z>*Inn7MhP(I!vQ*pVF4xHSkJm4&Y0??QsZAMk&A7vbc5b<={e_N-+89^f4U;5h(^% zqnd`++>|$cS#abz{6-Of6RLDSk)X>1`r0ZigX$gq=3hpH=Q-SDyQ1TlkbKxNA1D`f7gIke1+9s0lsB zu_;cUO=V_&%{_y~Mt<5*-L-d6%Yi8t0Fz7ixo*0#3C7TgUDfHaYg1ArZ>ll(v0h<= zvYm++25pbLG&bFMP?9pf^@7#MTCOpSMn!$TJEN~36p-j^dkLE>yNZs0MKjffXFU3D z{s){#t|3D`qr6SPGKvHijWmPP0CfF7^wmo+V$h)kEc06w2+Eq?g_e*hI8rxh9=;M= z9U~Jx(U-g3av${Euyf&urh887gLH}1{*yI?dSpp*Q2F6(bK?mQdz7<=l9$HoJKA3? z-uj_a&Z2GCv}j4>J4}D<%|GWQ@%mLG{?$d%6r>(aQP-&oH+hq*@MJRJ4o zsAbmSnxBDnj*Py;(_ImBUo6pZ6@G3R&0u|Hr>15${zFae>a|1iWp^1%&vwVwQ^>bQ z+zX8u!>f;yr{Qmsyug7^J1ZE)0P5v64qLCNlsx?d66oj}a^3iun;&INrEQ->Q2lYQ z@(=zuokO_76`Kig5IEq@QKfnbqu1}9klMG_2^}@_ z#KG75oko|@-doqAgzTe7a>wr>FJ(s8YRZZ)jR=$ru5yBwRafJa>O(dTHSU%=pRA$0k5AY(wdADmUs20FQ*pTxMRj2x^->dtPZ2V{vNv@;cNp zowYI&BVhi<@=E{X8L0!4AcTL&q@SWbRO7apH(qZ`p!>x{&!(O}n?pig6m;Km*uQF9 z+X`|hq3r{$`q;ssr@0b3Q4r2xen%w*3Hxvpy%OrKc61Xu{6+N3we!=dsdI1AX3pNT zbSUoi(g1yrC{QDHGH-B#J!bZPyprx@5~Di78JK-q={(ik_F)95y}NzzO^4HW^~_6N z8tR^pQ?)Mm*h3Yc?E@9mI8O3arzSrS+k0xLTcjrXw54w7i@RU`a?!DaR;hio0v+c# zRDP(eXG|b_2a0j6q#NTJ<#+Jj^S#rgO$XJu?E@09FYRho1{CvAuaBfGJz*-TopK)U zFkelh>uGRFTimMy(ZbB zKHtve4{dXj=5B9WUqVF+6aeyX1UQO=B6#3dsd;zK3{*lsQ`Ug()ww*Sx^xF#$?|@w zmK1rM!6S{WsK}Tq&Svfr{D}!7`wt_F+tp!-YUt)=;l#gO8`Pja0zlDxKh{#_(%V-A3hj zhkhq(@s4ie1af1>f^MUj7v6Cjnl&?ypFh0ygBpI1!_I2xSj)zSx`E0$(RI)`^4k$+ zdn>0CS$3RL{`JBYDWX%R;m$|8(5(Rw%U?2y;PA&L<{7PVMd!R~!i0F0M_S}WnH+I<3* zR935>QtE6}HL&CB?`D>~nTxT)YC18|o;FDvo-)2t73-5TFfWwzaS{}ee1m#7KmmLv zk+78MGx@_Swa*9Oi=1a&3+t#r2Tk0%R3x=v`LXen3ZyTCb=vzZ&97>3^) z=`d%Tu%`23?fo=G!WZb+#hEhxZo{zf48?qD0yc5S1Xx$^<&LG9>7gE#emx6NA+pB& z2=yNfSg+mz#aIAK#(2E06BagkO3gafVBO_inz^!8xk9MtVm+zoXpFS>*OrNK{-mf! z!vEf`t#<9%nUlIR2&a{~;f5;Gf#uq+f)gv3EuMFrx^Jkn`Kgj6vxNJM60tKoIN#$X z7PI+0J!mMt*|v)`s(@pNv_Xq_>G0HS(fV9NRWUey^3YYhS$F;Fe~EwZw6TI$zsqyA)yBm$%izg}E_gJx?q>Jnk#8XqPQ;03Jft;@mex z(LxL-zTgHdLb+eilzrzF!V^wZ4-S)y18p2UC)7kW`uN7<)ge|&sOmba#2+#qEWPB# z;X;jV2-SUQ3|AR3bS;iecaUM4mV{&tgS!?4;KXI^hw~+S!_V^5JFB_=6k2 z(rE0>D1yQiur8aQXmf&j!AhZ4z{INCQx_;=ms~1`TNOF5GM(O-B`WC~FThz;>iUel>3d6&n}m{HLvQFBAI!n4)GPhPFwo9YhMC{yS7^RT z-&}EOUJOlc13JMFt5XOHxkVQSs$6mXRBp6}oiWXMZ3{4rJ~~;UPsdPlg|P9NQ6UDt z#U8Uvug!aIx5Pr1(z=Ynf1&HC_mhh2YVNW9a!tmpRT*=L5QG7G^D)AHCgGLzUx6nMyor8+AF1dDtlM{u!QPpHC>jK1TGqAS<#GrL;n;Ly`5^UBE$FdON|)-K1;lLq*H@-Vs1sKZIi5A&b(@KX4|I1f%g--OLtt)UZ$xkwW7!lhmM}1M{W(L zJJnga9j14?rHZd5;;|I$f%#kY!Iq5FQuq)%UwZG>Fy8uLvCKWc{JL94mCVhom(|O- zQ8m?gCm7npH*YJX8_IQ&x*l@HuRKmFjXTP-0!vtdg%l7s@g+`z)*aU=RcjqoIGM`o zt@hxKVHLaIishKDvaAbiTo>Ey8{sd|@(r?~NW5aw|*u62B`B^Phv8ecdJ_b#uI zu@OqGO|_mfyEoSw5%B-md+&cP-}iq!QbuHEXOj>j*?Vs(JFAS)pvVqo?@bc2vqyv; zB-t{OP2_=$%xt31aXq}A@Avcd{Rh6c+voQAp>DU!^|;RKSm$vb$9bK{{a8_8+;sAa zcbK0}y?!)rgnk2uw~QaqoY&&LC?@E**obdS(w(KI*K&p2%>As)*{D0Y{AWp<#w7%) z%n0cT<(z92x<8-BD@J zuNwL51=MmaIFp{~CbrRYVtHw;-Bqi5yFh7vK7tpoW`nIvdK%gsql+Ysl!LT zwV|4A>zx`-lv|QpTLTmhDFx*vQ*gYGLTQE23HFI-?EB3G7nchMs?u_Zwn~1~I?QW0 z+ZHG79*?iTby>j#PA0uCreMD^a|p-YB$bL5<{y7Dk`NXRZ?O*8XHy*{L&Jf1(?iwe z#rIu_8XSHd;-5aGMLNOJ+adKN-iY+R8VHe4WL$ecGAdI=*%>@sUrf)iNshMvvx<5!yV} zc~*3PR)!Z9`=mSB_w_enilnBb2-C%>V4S34bUrJ}&9z)os<1KJ!O7ez^kL40ON*7) zX2Of(3f5|rUh>5#Oi;wVC#jdy`KDhqsai;gSio|aXQW5=p#X1R`tIVrE}t|Z8(3Sf z3?)vp+2$Ii-uUK|gkKuZ`*M4N=jd*T)`kk7H!sTZ{7Wn@6W8<-?aKyFF0qJ`D7LYf z#`SZ|yV&1!n_jQniprOfI((gaU2L)f>;7%>{ISo1`Nwz;%{)F)2M_WrCAwJ7=t%?- zakx{wX=kl6A^SpTXcuL4!$QA2)qnkte%*-ztr_1pxWqZcv)^R)ywug>#^^*H;y-t2 z5o*n_`^QRt=A93jgUC_*J{$maUU6;@xuEWcD|*y1J$I37?6z8sEZbGrj6nQX1+F56FTTg{u2g_B9wv`8LYta?=7VK7Ew=CW5A z9_wu%pMK=ilCM#YdMm9oOS-{nLV4IGeSB3*U!Xeop2E_(E)F?2rI(JXO_Z{_RbO6P zhX$YGXz1q(wcNR->_$ZQt6){MXGHAn!Fg`aWAQjV1)h&vIedCbv9aIDFZUd#Rg~M` zKBUbnvh$*ueH8KJqmkJAD}UV+mo$vv(5Xoge981*S9>jb`1<$uPxpjx>gnRAJ80A$ zCc8+=X9MP*xjWk$QZ`haFV>t+xW1U~*o4omi0+p$yFM_>aTCzFZ;#j55-QQzsA=1zlzv zNVkYKZm@QS_-b@qe|+dCsoqs4czy?6=AS^cgU7(4zQ4RLGC%ylXA(D)K!HR=_c2Lg zZ~1)qk&S!*OWCy%&NW4kQX?s2%0V?PW^xVd^kiybwBGQuK=_>DSq(8~ZT{ul)?S)R z8?WhW-@Pwd@_uXFAgMNGD3M1rtrsL6=`{L#vOp~Sj;s4@;M^OHXC$w!qiinxWm^bN z%eg8Xa`oWMj&#K`3YgGGiMPz^>K*lPx=;@VF1y~byS6KrEcodYLm67P!>M&hbM%V7$7@;im9dYe^Jkmh_V86Gr6+Z5j zwTua*s`3cY=iKFea`tz<;^%c|Rks^Mv(DLbfg(vRLpqiIA^9|dua>*oiO(uO>%~=l z#>f`0WEaNrOZ8mogevV&&mSr7roxgVEn~MFo(Gq+=+8Tek=){Ou}rtUQA1pzm?%MN z*^@Pzm1h3_=<<`!=g9(TKbIMh5+rL4lZUl)eLU`2C@MQ`8dajei!Iiz`<|jpuBfFk zZTMcg^zG)9gho71?z&nR0nK0C*|+EaTDtIzb#p#7SpK&8rB1J@@IFhqJ=kVqSH(f~ z6;vn<&E{kNm+fnabA2RWB))_(PtV=WsX~E3{qJ|L1`Ov{i)bC$&P`PRnKyd-h%2wD z&@!ybPUOL*VHHxo9nSJ*=3cd=0#goDzivVS`wPAHF$w~SyQ#CZZ#C?UnEysO-WiIW zQ?;_XbJJZ-i$8yK^NXXxQ80fQF2Q`FKQ{ex7H7}1?v(XTTjx!0*Ux4lmgj9ox4(tE z4OUI55|m1O;1rx3q44xGL^r3=lHH1ycu{Yx*A-Tad$`n&6BO4ukJm2N!4O~3O!m3Q z_V3v^vUS?Lqp(!N3UIbwm!3tQnkTeP@qS}{dGeP?*O*wEr5+!=k$p*~e9`2R-jpQ@3@yX< z`MnA6uy2uWgLZ9BhR*vFu(0wDR~lXqF!(&Q6gnLbUjQ8jRCA20A}MX={4Do2pe~fiC_6%R6)!lYE>mq5zWb#6UxzLc;gl!opV1^q>SqMe*Q~fiTa~E zurAs?avHt)7)Rw8vntr_*=?wJ|r4^k!T1rE}2lxF+y@d|g`60CjbcN%yS*E>JXBq($JTG#gU zFtj(ezJzMCx;^*Z3LWmWW-EPYG&hobKQza}F3P~eykIYGz<+DaU2I>%s9nT!A#n6t znQPv%q~;vovuc*tCi=%)$;K!iEi2V4WJII(^k8yf;P9hQB>vd*4V5OJF4H+Ir!&Qx>vd{ zsBAxzu6@~wo#Q4d65+7kiRr(Vw|0fpso3KQh;nZ`fQz!3e-yet6S=S=T zJJ>7v`_I&%Dp*WrU(OYbT~=%jHPUOl00mpN$ypQE(2DiS-S#SCYqO5$@1mes-t|>o z^xHVAs~UvABd$i){fW!x?6OY|li0x*czfMm|FZ;b(j-;v1cl(P{)6~ zn75XHyX#m|x0RlWI#JNd$3iZj8hhyL7KXe zcg(Gi&}AW#aSq|%nkv0Zz%v}W7`fdLx*nBl?Ue2MQ@%NvE%DpNUqUH*_xZ>&7FNp3 zPe->@IW8r#>lmEnuL$0tQ}O)cck< zk6sq6tcA@SH!hbp{M=Q%7%xewkHm#OgYPw|^QogdjFW61&^`U=6q}I!Bnweyw^v+nMRXwJ#`75!n*y|} zQ=n+9%gXyshSuUoM*IEC*VNrrPHT}7OkP@z$l4zpZn(}HGt*wne$_9leXX`b9 z-rE#{Uvr*v?MS+;6v2Z)Eq4uO&JpHnd)sm?2%mN+Eni|ql zi92d%btW(GBtF*R!?}EEj-WOre%{js}Xv9iH*}N7AXwmm&c2? z4thZ&)zGGprxBzb)SY%&V1wx@kjC`xkE0qP+`T}Oa^{`L_eKi!}V5h zd;0TBLJ<-1fuwwP(k~SU{k$S0`)Pp_QvE{Nzm%hOh0h8dXZt!%ev#z%$y z6kOTIP5tI6mk#?Bv3FCOWWBX79@6~eG*CnpbQ-uw9g#4;%UM%(X=)gmX^rJf^~j91 z3)w0Q2`va7F2hqbGKq{UJ0t$-tF4`jlWx&@pHBJNU_8;gx0clVKgf;85e8=S(Qmck zIV+e??q+XZW{lGc&YXzc++g0YWxU8ZXPs)rNAdO8Yt68HP1{<1jd0HIG98QjxiBYZiHN=)DJj+}% zolcRxJ1DXv)Ok3A8~>v`&)C zD&f!I>o!)&bFFV|8b@IJHA_B~g%#iO;+w`Q5wc(uvWFseUW6hFw#sNBdFW5{XVII| zq8Zp~`J(MS#^c&#$9fsNGFK#VU*k{NLEfh{zldKqO@8=SX8T*eT6~d^D!%+|25QC0 zaFb*OuavjPMewUc(@!HGi%%1;$kdLM%DnC(tK|1Bzs5U?Ypo>ngQAJcH8)T)U4WU% zFfQt~Wx7Z&QmS;fIq@$2bj$wC*jibM#lV5SXkCkj?`0zKZPv_|XXJ^Gb?3ck$hA0M zO^1+0|8^ho6j|LL@SNDu=Gk*Ttwryx=Apbm7aUC~O=i*}tXQxR^cy;z}Urt7VKX+p39u%9hugy4m(@iJ-LLclqvJ_{PPx@~Sq_xFtzn(= zH6fqfIyO|9d*T;?4uz@b-M2cVmLZjvi@9BE;)o-f1t17?Nm;Kh76_R-?DQ1TZgoYZaLP= z%lKACfClArX!*JC_DSuV4pRGu0Zn_gH)rR_qE<9eB*pc z(pFb{Nth+NVY|A6;}s9jMF(ejzBG62Frn&P94fp8OUCxJ3+;Swx)qH0$2_S5o?9s85(Js6zPNaiqvJA%hh0HKCnFvb^ z&yAf0VZRnylTrD$V|)GNvZhR)$D$5b8uXq=c z!tlXiyG)`R&)M?6H6Oh?lTn&jXVB=YK{qjjybg}o)Eou2Z|;v>)B2HW(!sv7m>SBe zXYNing@0{zH6#rjyr_SJ!`4UO7atH_FSPuT+9k@6n|PN4-g+6UZD@yx_3GT36x1?e z>N>shFg2f5&qOO9-_N+jd`A6_VMV;|)Az^xkC2*I|5Ny^?b&Jr+}69P?JrOOg2+z> zqr~t>xRBvDk9cYcaFbzeZ~q6*ZsY&%&&dCeyZ^zi{QnL4@Z(n{x_~aej}o_`D%P?i zmYrzrzp9JLlFhOAy7zjKe{tS<_`(tNi52{#D&)OBps;N^bK^}irM$&&f7?Os|k zk~{Rp0>tOCXim%W%r2gB2#+DGi4oaU1{3V|5su76IrH}l_gI{Ik+o2e**yUs_}+Q{sofzh0QPi zXnaKkh!FlEu@q3?GmynFcp!)Zhxbnjxi3zg7L)aHZYoCxAka3OJ+Y38d9(fx&JgBp zeN@;*ICn#sgSbB(e;72i7h(RT%@{0LB*xr0ssArX(n=g%N_l-B(%^M~4Eb{Q_mLUx zL^HqV-GA{DNvh+VARJ6uicf2QY-!D6&MThEbpO$n6L0_oSFkJ}=Xc|ahBeJsa??VV zn>?YV2!I=pd4pGz_V&)dxF<_y@RNuM%xw?*xaIDmoA_{s(XX z8M9BoFWxY6ysjmewuH1ak?2w1%HynCzM7<{s93oDA5q+Ued-1;StqI04}Q;Si)Mn- zAx+11qOh}}r|vE?-{2?yUpBDtonOnml>W8r5cEE!$w4G*sMkXO)C+d(L>oL^ocpgH zRBvQ;6_f7<{)=2qyzF8@098b5{xR633Rlvf7NX4yN!7Dyl)xixJup--Jn%X0Ccdhj zO@pi&|ENa>@7!|&#sq2P`~7SaS<@fKE)B^iH{1qihL-d?Y7lfcYW$g~p#OaW6e`y^ zXw+>2W|3<7%T-04l=Ke0YI2TsNU?Mu-M^4px6)*cO%wEubN;$swiM5WuL(g}t{5Bz zzev)W(e8(o>dt`M__WB1QW>M>0|WrCP5*rl!k32(T)a@K4n;0(08`c>khUumGkoI$ z%;v1h-x?T^B})Lr*DTE;4(7@b2cSk2eOFmA6FegpFcxPJ9WnXUvSqBNblFttVg^iv z0c~FOJU>BvV!|fiTam{QQKta!%^51=!3ejH7T{YPIO}6D0(ghegst%C1QYo?D}ZE~ zy)tA$UhK^Q7f*hfxFQ!%5Y))F2!}yL&R>{-KEmrMiWrzbziFWTGmU#7W$_67SO(5#m&-cHP z3o&K5sO2b^Iidape5U?HeeV?E9;NCUD`qyl2I#t_TTcwB5rXS4!9_hj9)fDu=tZXE z)@}R2m>aoMsRpbRUa_sk$kPINNjl4d8Nd`7K-a;L68Ra~JwRY86pEiz!3fb9@Wbq} zGT)y_ew9?m&81Y62N9y56uFMucI|fY2joH&q?R(%T4j%rYZDFZpQY<&InjwDC~`Vn z;zTRCz?YEZkd!KB_*^g)A;l3XHOy5Y&`YE^i;rQMKMdqsIbxq^gs}1lVdc@du^@&? zL`5!}Cho|^Yeads;|!TjsDA;}6NOHmylf0Ovk%MOJ+W0Sg!%$eo^XWVNrd`I92{i+ zSV)0RepZ?mBM{PlMu<)Z6Duc9$N*l{PoE6Hiwy8qh$jqw23R3VpaojBM<*ztR{;K~ z>eN>gM1;{YyLg;}o?7xVxi8;}U2-ugS zQrd}8egnQz7#z}1hQEo}Jj;DXc8td4VZ=a|-zO6#SrO!GbYG|oabyR&C!w}L|=_Xh)(Rgj2>bnPYt}}_?--pgs?wAo2Ups18ha$#GWWe7~?|y zDB+&5@50GCnc)coes>^ON4U)Bg$wV8-oFrxYKusOks7Ds6ohsfg#AxSmF*`wNd@~7 zVeLIJ%0-}EGpb7aWcbTKlqPBQiNE%H10%|lxLCq; z$x?*L?|<~okc)YQ{b4rBlNm6E%zzh}yC*NJhI?Zyi{6;IoN*Jzcx`|F95N9zcHyF4 z7z+y#L9#aJWdFW_1EMbreHh)QYL4eUh73aBrC;V`0CjlA)Sjs8WFi(LS_tf?!>r6Q z=iuJ|;D+}}!how)A?yg3eYS6{jSc@f3jjBy;JX?EpvK<@&per^(bq)uLx||dBFo6? zL_bg} z7U=VYi^*5Mrx0;`xdi(0sxGWTc;qjHhbqS&Cz>ECiNZ%Z6O``&Fz(xI{?K)_)Qi=x zcboN*DFp@w{=l9l|9u=h$KTB#n z3XDJ&gkVJ|eKpmI;mi~}I5$T%34=5bmC9g`0apR zRq~zV6Fa<$jmL*Wt$f3GuBni-MuhQ`^*e_igOa z9kP)g@-PR_;91(&l9vyjp2Ju>2Vx=>qdf00Cc=j8KHzYROX-6W`<(lU)F^>i6Sn#& z%zYjLzl!|fvggz*v`Xz$CvSTIlvRg#NMm?Q7V~q;G*kMJJ1HThgOQOO9syrB;+w!d zr!|V);L9=ZS;(LKzwoMhDOOV;nyKb$zd7Vx3QJinz*5WtMt`PBxx z?im!e*CQSV3qKswf`Lan0VDf|zOneIY2CHfz(eHd9S1Z zP9Ya!GcQ2J7w=Pqo!HDae!OCR_|w7p#e@@<&jZWi^z|p+*4bai2zjwGZQC70rVCf$ zZWP^A0Y-ZUAh51`!>HOd-~1Zk=g%bs#*U; z+L(cXO2hl0m6nHC>Bt;GWQ|A(EL{>TeL}eWLj1|~e{TKvoB!w56K$BeyPEgUU=;n& zD1I$q^~TGT>-#6tBJe|!w`jTJQ0|gopUuDYmtX%ufNTK$=8ng{{yk4}3wx(X%c<#!Zs2QK`l z9Yrz)h$6e)!yH2$gw~l3kXCLq%;1&WkqyNz>Zms^%@)SU>AwTNpHZi76d?JTez>GD zKp4x-sIfF^!Bh`8eC)IPOsI8<2&07ze;}w5BY3|aa;DCN9ed^)60-*kaU?De5_?a2 zg)}ZB%kncioO8C;9~h+AGOzEAdz(M_w*3qvZgAmePlk`f z3C(n%#pkCme|_!DIgG#O1?T!Rlcv}olXzdv>OPv9jOTGO= zZf;~A9O6*kbY>R|EnOWCVMkcycR=iS=uRRs=^$iSc)p_7EpB8rv#7@Vf_Z78rjaos6H|zamh8I>c`*XgvPe{bM|&c^E+H>U z#s(!)FdgDylxX;}ktY-=(m%YKC^E`i79I`tM2P`X_{!4pL)Zjgc9Z6w^fh6w>A@wi&Ys$&!sgO)npaVb+ZdaU2TM!3S^lmLgvplOfU%Qw8Hl z-tw+SY)_OaV}W)O^4$M~-;F55%stPUU{(ooHn@AS1nuCgpN1HZ1p`R*XSrT+9P;dC z5*_CVVxtWqDZx_UoQJ2Wb{^xcZ2$w`A=K^Of=~Ekvj`e()@%afbks6E2*ByN3}O zd-myXU6Hxarw9C&REN)DA|GZLxkD%r?eO@1Jtl+tcTDbgo=nGCv*8AXd7SMsCf(w> z4u*N>y>BlP9ranl!;3yP;y5;phsZ||_+NHz&HAaW<;-!aoxf<%m zuVW0qQ3USJ^!>h#nTaa!?2(e5m&?Zp9%Q%1&l2u_#@qiVD_cl;-Kyu+tz}}Qi~Qai z?v4c@ze@vIb(?+WZm=os<|#-10{L>z6@+&1&5Faz*Xz&@Pse*=L8C`V!sU8s;t7-o zfyGr$Er4Y|l6OFC88Y8Op9c>i=<#C!KD)JNxEp@vT`3L$i4y~x&)!v5A&MlSFx|Ap z2`$}sxaj~s@CzTP=1+C85E8YOfe3^?wr1@8CmC+^f>5~Iz|$DB$0f|1v5?}ErQ_A@ zLuVonhPs19OCO1f{SdO%K&!{pLFhv12M;uXh*2b0=zdx`fY=tKl+Hwo*IX}gHvBA6 zEr#5OUl~O3WFT1|Nls)C;g(+EY&aO-8G`Q=3t%q>@5kHvktt05$~V*DzSR>~KeWSY z$^ZfU4OBHPgd1`10^<9r7yHkK;!v26Vf_3`=Z3NX*`dhH&l*uTMG>SY}*@>GZ(4 zzFucza6I=t8s3cy-cPI#j}r6S8qnqp*`^n(`535y$*%?6yz%X0DNL?90yponZa&Ax zVl_{}Fu4f+Lo>Zr8_}l@h^O$n7~d@~yVHoMDHCA2+}JpDM2OA;ONi*{6jWxln02_O zm4>W~|7G;#9H3MF9}H7w8~^dP(CrGckkIiU3|HwOdoLTas|;8z(637ZlLcG^YZ4nk zA!%u2JRWCVF3wmRPD_#!=t9M!0^}Pv_v24n2S2Z*w*VEB|5h=|U^ z)}%}RRSz?Y7K2`*#wC~^#mJ%?Ie_uc`N}e1^`c%GXh!<}@^_l0kuzHTS#cQ+hGpgU4l50`T!ntmW;iA zN`$U!NOIlUTljCbsY2k@F^z|YAeMi{B1lwWibNH|?Mc@WnPt#`9SJ@&Lv~dy?!z=H zk8zi6!=v6`GJIi34}aqTQ?bc>$&-EJ{!KCNeECpfgM`f zah=f1b;^5py{gpv3JSqSNsImmwqD zih!+EP;5I0nl##Cx1b5=H z^g;2UZOc^9Yw=NCPuRK32#28WLv&**iACdg0QYMG6jN#>I4pw>H@AkC5&Tm6)Y687 zCCARodHx7a))TomuLeQ7m8Lv;uqy$p6VK!#)G8^9tT&$ijDVIRr4g;1Ln#g=?s2%s zfCEYFxmNGG2heH^lSt#6r21uR%RxPZ*S9~RU~S?@)}E1elnnNFwrGE<9J=aeg=wMs z>$jYd-3sH|h^RJtljs|up#mNPd)I{Y0t>*MbeoBohO=+BBR+qRa6>CQ68gl!#5NFJmt&X70(&4YleuFCK*SAeydzU;kk5 z2d*?V!_k8B6wdP|=$i6oAw}YDdY>l3_`7pQVyc3XVoUj)zL*|2NJZamfZLn~a5g8A z{n@zBo|BttocX%fmZ9P!0$&sZFggyPBbJ-p!ciTfP!qbQDBCi5HIaXEQOh+npaE)* zX8uC)VyT(PU#PZo!8A_76p=!yTg&`x<|Bcs?UMi9n;DeHR&}dK(A|)7%$bAZmG@Ip zJ&z}$ZtGkoU-S~J zTN`lheSj296Q%K*=qD=|-uezsS&sEk_KFPpOSo5S?j984R^ zZNq_z?7I7w^XqUW&_D-ZfJ%Yk_J5(h$7n&utN$<0bBXw8n2x4e%o)6RqRx_bF zZmO?w`RyYsa_`eBeMz?Hb9^?EG#ah|yt|GRd?2k;@St{o1v`Ojdi!Y2N7`%?>L~9oyK6~$sUV1hh)=^P0qxs0=X80ZEpTTMD<*l(U94m$e zX)O1))t4cbg^*w!Tr2-2oY1CU0Ngp)L`Ff+IJB2%&1&5gM*K*?v(w_RRk-1*(wES; z!5^U?K{L?=kGK~=;hLxPyJP4YmQH?Os2H0 z3wBdzGqoMDK{=b9&}TY;GNGzoZz44rGxxZ}5puBo;lxiP zq%`O=rOExPe=U^r-())AsHF8fz!ZG?iqTJDO6yh_o7!Sl$KvJvwJZrye7+t{Ba)l? zpZ^>vOZ|?ydUrL^%CM$?&tXu|b) zgqTTjUsXZ3Gx16PFG#na8uRd+@a*e0l<(GpG7HxzEsm7gGxlf`2G%zN@k>#qT3vWS zQ`@<=>opWcl}}qoAeu0}wRWNBss7l^tF*M$ zPr~)oR3jg|-hYyU5~v^p6#Bx(i;kAhA@6@an7?>{eEM12dB5{S!6JG+GlfU=Htb>k zIX?Gd9;pCL@E=BcVC-E2`kzx5oS#kKT4bCy*^rL0s~2Y<#Tes-I<3Si!vd`Tf7JaNn18 z+xYve&7sWC+0=RZnm2?HA?g7=pqT|jhzN|Qr-^;s{zNY!fQhfzG~XWNj?;hSI;ThH zVt&`a=!?hxbirPMt!WlPDO;{eq|={`wzlwh%Y$+{n1aAF@>d5XjEMFpX&J3JOeK%n zk(fc6^?5L>T%wO*tr-zY4f_Z@*^=!ClV752unV0zX4k=d+Wv+c=>>79L7!nLyGGaP z`>J8P>Ha14nm6B~38<*E`kT+w8 zldc<4&+O=2$sGp&By?dHrOhZrd({vSk8vy=v6ZRdKz#*8)K-({O4YEMI??7GR)Ia4 z-!!(Vq(~XB$hhi~n~n0J$oQzsS4Er7Bj^!zecAz<6^>{HmpGSc1kWjW6$asrAYZ<# zVKv;VY*L@DQZXG;ixT=h7@%K92$5GdypBV2-Ul~I2j1d=&K3g< ze4A{cZlRMrlOFn%P5*d7%us`~RZv($fbVuCSyK=qDjV& z+pV(i!Uw)tsc}EM_v(mw`O>tBw4Jo-t9fF9cTQa!xnB`Au3xvv)( z)N>U9;P}Sha^n6+rzMWDi zq)qN^>{GjckDtF19>!bm#pVFV^C^VTjk`-Ew=gBf<_cX9#$I>i zV^e<;C4vr4P846sDZDAQ^lGhRwn3HW?^d34#LGD-1}@^v-d7Q0Nyg*M++*0SQMUW} zb!!xFEQXC|qS67fWQQwv+!5P?(Ej8@mR~J1|GCU_BiXK#Psbl1O9ko0NA&AXQC!mr z4fFC!%}6W0iOsa#+ciRWd?l^fwwP$E0WQ34s$U2%5rHhfx_u2dL^7D-A~Z6*@g*kp zjI6-w{);93JQU(Ut{TOv`iGp}a1Jw>U`h2k-oG*7NdHgiE7B|woAgTOr#9RD5s~{N zL$Y(;xNwrr) z_*c=e2X?2>kON}>%iuk3J~xA1w=Wm`j7Uvoq+x#F8Ner9|2ryv?w)$+uV2HAu9ryg zQhwkwFnyBzWcpkjCy1Dszee6M>C<<7DSkJ+5@r%SUi^o$WDVQgj~TW->eg0P$9E?a zO&z*ZUhdWvE-l|Qb$R;wW**)O9U!3#{$h!Avm7C>IQx>-s14DST)kV}dQEiMi!vdC zUimte$#Ot4Iq!k1P005A@cGX8BWZ1>3aNCq+mpxDtY%wO^fkLZ1pWkkOw#kbAD|uO zBY!~`4yqcNhfFWs*8xgxK0LaW-;#j&#Xv3e@H@|9_J*Ozo7I3ah4X;xGDf#gz0lMzZ4BmG`_F>}yw4L51U?1IBIB zy}jI6EZtSve{I@XhkEjsd|U~pqH0+MDE}7)o-i{cMxXn#2P~1EZFesN9FB=NL2>e- zg1=5jGxuq1Ytg|xE=GpTRBuf3nWcRoVE(f&E)kii5Rm?)m00eJ9*JJDg!^t7?>s(Sd4H($IaAW?M(qZtH@N0i_^`0i z!LeF1bDmKb*9kv_N|i~4N7loWU>>zz;P_~SOr2-C_jv$Ssg1W(u~h%g`->ZO0RAP} zK@b*m5s%w;1Wz)Yvgd^)yMT@{?pdiNI!A%zs4Zzzr=@%LroR$| zP~mIAlymjLdbluJu|XE-ncMO%y;rfr;6g+9GjvWhLOvgmKQ|cR!2#qGljhSdVy7rG zdJhysrZHK@cMwkX=6*DxZl5`XoU72^oVxF9T0FSifqFnwco9rR?AVIOcg~DffF&>> zS`?)MeGE4nqbjA+ud)5!5+>fLIQ-pD*uDPzchS)dY_2zv_PeKAormqWMw?S6G{ zI?$hnjHRQtlI~&6r}T40y7(Q#03ouhniEPwf-RR}j~DEwr+F8U1xc_2SY>E;nB@ca zJXER6NA84Om{bzxdPnFI?RU+H=<0El*b*lMG`+O|bkpQ{BUX+mh?sHp`q({I?Had- zusH27Ff@&=1XBUISh9&>3e_n-N3R{1^U58*Ew>F`+nLOq3mX+>a+7{|0G{Pvzvy_Rm+3>Y!SqF-{$2$X)zSmuG{e z$PrS&U02tb*RzhT2+$gPgd`hCZOLuSPZP+>1 zaJmflLtEF!Z%+4YD4w%S2Oymx==QUtA#z3M*B;S%H!>P_RSCLD)+*?!bU<=F zR;t+@Rirf;#!5AT)nddeaajqq-sjjhSZ4XztGXUAdmi<<1qb$!E+x+G>$}F!E%LqY zl1DB78Nk1k`lCl=hEQ84%WVqkD~>alAc3FC8^pb|xf&$+apTahlgR(hTIz7oy+r z3g#}o0xPMPm1!;%7Ad>{4nr;Yl=%E|hJpJ${pmjd+`lo&MkX13d@)KX*(0?~katV+ zU?J-$pf>fRF|2p>O6cIE0RG3Ke6!fBNg8hPWxU6X9w;VU5>g3nlDM~dX=5Md88<2( zfE-sZN$-cS+MQ8)GBh6-s55!|li6$wOYW|!Fr>Z~xG@)DHKyaqSCd>vBj?jTyZCTJ$CGBsR4 zb&j}1jU}~Urug_A7PAf^OxBhk`NsafB>B|X(7tdxX4Ys9sF8Q0CNUb9G>XE@qpzKvw|Dyl4IiN|R?@v6I#E_0`U$>z9>Ni1UB zwwJ@h^CM$oLhE6(=bwXlyhVvcbHBdDqYoDjnBhX+lz4i%3{jXw zLdwIjx8S+I%sR@ClH$2>GVy*Sunt5EhTE0U<+D5=Q+S&>G+@izcN<3ml~U-b%8NrA zB9q)f^7)n`-T*#U>opo4)j&W$jM`h*l7lbT8_lx#21_hY7PbreFM7FrGJlMR#T-op z9_7B>D{nB5io`hth zAW4cn0@Uz;r0m*&o)$A7`#C>0oOL&+2aU407iWJ0;^}*6n_UCQl)O6+-`8d2KdL+7 z?HG6)G;fR50p3nwt!6j<(#yLas9Q#IPXL=z>WgBhTm64DtOyz|O1bgzEVD%i2~HS^ zN|;Rl{uMg)Z0xAq>)Iyo)5yWcNZ>j4gss<6a#+<+0!t`1dDR1UG)i0y<+%F_5ox0e}7r`|^%B+Kjt(JTfFT5bT7iicv z!jiN%YeqtmmpvfWB|_=?qtn=SJumS1Kp_P$e20RT7Stp)l|U3US@Q6oM+qw4*pR!l zsUPpu#+@Mg-Xz`5B&91$!6fYgzOUEp6Tb2rw0NlEZBYYr45ugaegZc*sFZFb%(;%* za(2L3vMMqK&-4)DZ&&F2l>j}%XC{*GMjFv>e}ur`A*4=EbzRlc2u0{F+;b9|^iNC-UYDXa2UM#nNVbbojSqaw;U1>+lsCJ zy{p&EE&bG^G6<0p$#UQ|J*q6t=E6Uxha`N?@sz;64y|^aI0b zjYUdgh~3=8*?vah*Y``vs-5ILw8S(ZVn!n4!_ zbSs3m=c58zYc0O1gH;m-N$BhSPEbHC&!=u0*}Sx#fDUhKy?;=EmiMrOJ;@oc$ns3? zgs@KljF`DA$nwKYEzy?m?(?r7=DxIf3~k2Wj)SL|r~wo_Ukk%2mM|W)q%dH(CZ1MSSf8- z0v0Vz$8wg5aJE9$b`p{VZlB{{MU_2|0YB%XkMpN9RL-v+HPo6|23(j}$cmC`Qn~&| zOJo{nDk_8{r9FLn+&am00z%r`zXZ*mvg2e-0P@qr@)pW(w=dyvMS&{j^pe$1k(S=# zUwre76{f`_qJj5E{Ik)g5A+1lW2Ah9PT>d&*Vpc=ZSO z0-=9>$TklCZ1#SK4dbQ1I`=$Nomw}%{i)?Bw8a+Lgg+AshRgg3jSUKA^jeqI=1=$f zt*Tvf4{0^O`P-NHXdK!xC`a1@sM^+%HR=m&+Dh34Z(l?c=mUOf)Y>7WzY8TbXo4v2 z+2HF;HWuQr-76N%m~86!=}dzr{~7569GdSIFNh|9wHc1r&izk1Aoo8^LC0%3ofS6Ol&(M7?fjoq&cXJimtc5U+nX6B05#XO8jz0UMTo z8hirjtX$v>)cVmBSiB_u#Ru=co~@^noC(NtNcY~^VB?ApNn)~!t^{?DtpFxn`$o-$N%sniCt&{1 z1#0o-Dlkb1>azbHjC7MF;jNt|YZJC=saQrrtyS0}`OW-TAHHiitf~5iTl^?}*RS>> zxRv4VIkE8e&%uBZP(=p+i?FvCC&q^7)OXtZmq}?1|$!Q1rIk|J}tW=~TOi z4*L#9F!PyO9&HdW(H^uLt|K!mqTe?$ME!n+orTWdQ)i z<#AeUFMgdb+%|a#y;0}t`C%h5SDH@Ui_8PWy*E`*q0zW& z1Mt=07m_o&ekKwo^gk0w?FE=>W(@q#?;S7OO4CK-#@a}qo4P6|-oX|}DE2ua{`L$^ z&)u{(*nt_EAvJ*dPyV%yOv#>)Ms(3)?RM19+YezQ+h?NTaP^m4DK7(GOi90X`jQ0} zXz;?RJ7)^OvHQjjD9*e7GV&VjF4m<^b~dk3;!(>eGLt)x?|rTI(;0N=F#lrH@@B8}6xgg%6_eRl zBGRE#7a+`n)nF-qkkpzC7VtD~z8u7}8PZ@CC^dL!qGC~;b|D706F+8u`g#R@A&@Ya zCiy#)8MB$)fkkte!sT;U-t{r*(t1mOIfehJ$ewhupy21gngn9Mvc(>2ka23;Ml6>H z8?X*e3co2-K*kI_e{tzs{rtAu=x70dIDbTD*p}Ox~a{wX7cAlPx*W^|0JG4;- zZizijc;9|lFz_{ZSrPd~5slw}`N+p*ncN!Ga9Lh*uuotu@0WdG)%pc#v)ol;iXTV%(=<=Z`!1dCaB z-;=oVS%E?x!l=!7?1s%Q*j zh9`6@Aw0xwQH4fZ?YQPkD4Xz`DXXJGmJPDSg4tVdwWyF-1dB;OgMhS@fK%1<)?=aw zzhh>dr@sds&;cAx;(1M;C};sEq@7D?WJOw9CX$I=&JxBr>&HfFHN{$sql8O!+8oL3 zT`Br=o@i6%sGt?S*?N349CwlW?|D954r_@yaGLA>gaSCz)90_P?k&Z;#mKOJQNSyg z)9)XQ!%kaQS7&HmLz2~W>VOXeOgDDKd)%F#Iy}ecSbuwn0P=eNS-*}Jok_c{HW-|w zV(|B;Xw`%d5}K60Fx!G)hKjQ=B9b(^t?(|Mo-8h2{7bLx6t^QlVRXti4yu9Hee~y_ zH`cPQt~7=DCv~nitFdrRD4vo15l$9xSX{&Tl3#4{<-N=hi8nv!<7ikF$@#|SN=mN0 zpXh(D8mNdec8iB9DtzV6eQ9;n3$0|)|xcdmgYjoF&>tnfVyY2>8cFt1<9jqEfwmts|35=-(ls35Rk?6qK1w~ z6(vlALAi(r0VEst=VBN*PlL5s_sLZr>gXf7P<$#Wq3C9d=aS`U;{f}=TmCE(c>|sD z{;m#t>(h~2zX6an4+)$8c;A83yQoVnUmaNMWKj!;;NMnMsGGju&3+K{$MI*FpapkP z+4D=X`oBF;Zj6r)pe5U=U*VcjwEqH4Ex$M*_# zTtyNt^^Txnf}nTt7ed_zRIkJO5oC%*Asg@vRMMseqllx2l3n zv-*pyaJ*|FUhuFJj(({&i|OFEmmU*zFo?&CCm&5nHnntd>ci70_9h5$+59aYqg-UJ(59?aD0 zn8~@Sqllx>P>?UQUHb|z*x7~LbkOt?47iltZ3P+HNjen;E_s9+OQC@_%N~F1ELIFJ z5h-&t1BCuthI;#|ush~ZD(g+PVIN)Lz*Nv~KorkODu(dEy0`WCG-L|)f&KqZ&8};m ziYaYpN-1mPdk+ zSD0_RfxAmq+MK*hHaFu#4|O~RgwxjKe-p=vbsXf|o)jo&N{AOku%MWUx@M?$jC1?g%`D(GPjN2?Uy4+q|ra`oCjYCy7DHD5%lv}AgU zAl`zMn;*NvVVb zs{D`iII+m!z5RXp1Rxc?uki;)U=OH*{HkTpL1*O4$pyq~c%L%W+#v+ZOOq`Qe9Qvc zAQ!3Uz|m!aX-AeHDh-zBbmn@_A&Z&jSLY{l6#Moit6jXcDh8uM*~f!eK#|zJ@_IrR zvz4p;jcAW%-+OOFz|e_N-R_3-|HT`%vn&efNzfShv#7M1{Qiba!iOcYzY5L#bivZ zAVM0fZ!-a4S9;X(1uA&ikn3{`-AZ@{ldXTrL2Dk&HOoQu0GKzYC8}2fVGjovN;0oY zf;76R@&Eh1TJYY-*m=VOtRNX0fqEt!0>mZo{~zw&GODVs?-!O3HoXC9iA_sNxTU*8 zK&4?zcZk%cyBnlL8Wj~J6@-l-AR&?h(o#yNAOeDCF0T8!pZj{wdEQU&7-x+071o|> zt{MM1=l>UzThlx(*6{gQ=>Q%089fomC@jD1o!Uv-WHoPjZ(Ioj*&C>bk^k&rQgZn5 zI$W-$0L_g6lA$*Zl%ufJ zeNjTs0-nR+_-Txr_DY6VuAe9j3^nkv6#x9#_psPNDq@F-7nEyYzH>O)f&?e@1 zjqPIzUiwCU>;BfzppSm>pYcZ{lnH$67)~nhc%+))hbvFFk<`#QDF<1hi(JpYiXsO^ zNn$8Jrsi9t#iU^Sc4EK!&MJN6=uUCs1GkLd|N$3m*158suWnqaS9P-9CgfKjX}cl4Hyyb2l-&%mhWWxVCD z0jpO4BRP0UvFv5AM5aNyXY=(!@iA9H*h{sH7str|F6=HY z_}qBz7aeZ#Xm3~%Y6k>%6f~_wi9n=UUo$Ox=`V37B}$GSH1kN&-7|X-*s3~I)WGunHmKt5a`}7*XjwQjtOpxe?;<8x7z&C6n)Rn&vSJPY zR2%j|%g)D3Sxi*em!WBXDOUXD->U)@l;>>!*f-=&UPWxNHhd0EVA;*F2cFP?ZP`|C z@=tkdas}HKird5{tHI1ln5L8(6Yfjk-Pm$>sT5d1t?r4-^&Nw1{`KJ?Frh$TA|wX` zJhV*p1lr3%6%(ZaU9-J`DtmcK`|>j4f84w9WrX|EQm?`kU) zt%D${ZyvFBBDk1I$|XH!5Blb$;?fO4Tx<}|C74NB&GW6x=_BRxp#YbG%&`6Ey6FV1 zufb9AUf;TJxf6ypLE*@-pEOJJpVb3n`QBtE&qyYtNDBh@5j4ao>o*_&$F5d^K=%%K z;|Q-o8LzZ~|81Bk1P|CbnEkULAyCj(CYb9?Y|@}0XxSI37X;y6+7P8dUiI;%F81hE z(7HC46THR(cQvIDx?ozNE{7V0@qqYJ#O^&T4)(g(2;E@tHkHvdF!;JF{kd^JLUp36 zA3Bo(s|vIA>wo>MoCmhP@gpqQC zN|J6~wVBmvS@=UVa_KlR&hDK~0>vSyQAHxF{ zW`z4f34E$(EZ_^QQiFHSD!ef}1V({_06~Z#{49d- zLshQO`4U^>HcdX|-Uv4wyI-zI3wnyq`qpj2Fk$E+JbtfMum;R_8}a|E=UxyZbjfi) z%7IVd5jGmtM(9G3*r}ukX$7FW|Jd>uZp2@qszQzm#ADs`rgB|H z(DOq|xF_~;UAT~^Q1m{8;kzyP2K<364Og!frMXqcfg-hOxL)&P>0r%liD zeN2u3;lS$&qdpHVKt*GrxSJ^S-y41gCif1?J4F-IEfSD-5@8$Ii0@#2%y8zpEWR`Y z*HX;@Ca}KE;R{U|zz2sYpa4Mp&kc|q%qy`CFtX=+X`)&aSntigXs`~`4+Bk_1mWa# zFTk1UKbGG}IRq?{{rR088t;X*_EMGieNhdX$`_?OL#t2{I%JA+Z=G!0=?2&iqoD1u zLQxkia^)RdgcmD;LSRl7P%xC6oTTj7AHlgL!85jBbcN0+_jvGB$40dhTf9DgNqhPtTrHBf_mR->dNV0~Ep ztm_EuZub=BF2TaGU^z5IDb#cPgtAL1e(eAUdK4BybPLQpzy*dzYX-9E3k_JwEnhw& zf_5qZzXE$xh}r?#S{FKkYdlGQ<*m~B(s}5`A}e%!q|hGYfV%VIsfQ3`DePDp5Cl%` zOmAMil>m~m;t1=LN$#;hj z+!z&CRfa(Mq>%akEC=8HuZk-a5yQ%AbVlHU&Dqt&!q3T$L5qEi$(!S$4bw(Wc7ACD z&RZWSstr05;v80oinoFI!m|pqukl{`j`zPm4nz}TtU6D?lne<;3#hMWMcJ)Xs|P`F z(Kva_tRZNx6T(*do47f5SqvY7gb*4Rn_3m)!b&e}6C1Jvs6K}~FaEPqS}?gs)Zhvk zou=5`IEreqMXJ}HEasIZ5g*yBPAI@`B3nhp5pHo4R+X8V8XDD^@O5cHxc$SQ+@9qv zaEKlw|M&N?gkJ*F&CW+ttxaK(io;B5ux{T+KSV7K4YEA1(^wAtqZH)Za>{>6{~_(o zmF99cbd3(fy&(QTqeBz-(*_lxJ=66%*NMBNH?y{k{hHT>*g=A%xO+;C{Ludixz^k( zQSLQ})%MqbDf$YFL{-%Xr8V&K-~JYI3jnARPIt*NFsMqo)P|ROic0N-|7!noK5P$E zl}~%m^cZRJ`fqUjkZ5w!(y2qYEN?k|s!lEXyM>*?L2RLHSU)H;L z2*6lso^1HvPtiyG_0%Cf^~E!;W$H`NGsyKUmVZA3_QOy~hYa9cclge&RaHBmegtg<-9jtwMF2Qv?gk+(~7WSMe9kxmPCTcNP9?VEs`9{S7Lx z0~O?cHm)LpL1}40!`am){ohRl%DjYyL0yT>)(9f!z6>BMA+J^46yYxKU%o-b**2gl zN9nNIt*%8T=|bV|kNc@#zHPP(`1?Z~tVzRfLlORZuDJ1LgQpsp95|`i{3Vb|UC}pX zTL&x-Yz3Cf*jjU`RY`!*7Ml75*uz{@5G1)yc__{NjbJt^EIa;lG>eyqS-tr96v3J? zK}G5tC&E@tbMd(xa+f+{tWdufI%3GCY^+8gP1fxB>CV0Ixfc-81+ai74ZVj-YyhKC z`PGUv7s~QGH64>JgUNadc>kciunFsJ-KPK#=(pA?EjaBc2+M;FdfW9PG`+Dh*b z?0p<5z7_T56dG$`FIeC=a<2FrQT0DBR4BR%>v_i$!!$~SNcEB4k=P^Tx|~S3ZF&2z zi59j)UVZ$&wwKN@@ynKQp_c;oLOXj^jUGQ3@uAQ?K!Snb&m({{(-u zLEqSaa=QNU3B#cZ>>)Y$0^8B}6L#FkgA)by1-_Z+$+EWXN`c7Fx>vA4jo8`RZsnR^ zd6mSIbOj7P6VTimONy|EG+_oCeqUk@b4DJ{PVWx=0x9&BS1mtHe8J5-ji)=UUxUC6 zUKi{FNWC=!^k$L`J*)|hKnB%@j zH=j8Jm;F2_d-fEg*`TD4$!X@0Cif4Il z5@dLY7!A2H4+L2Ku7nx<)Nd59m3K=P|9&6R@iu&DGKRPazJT3*?DIX>zTmDl$0Rkl z1uEPWu=i(|Gjz7UECT#pOg~6Fu*W5nbu%51D7ER6 zZo~VTE%Pjhi|X2X_H!H9o$Qu9y!#u#(?_B93A06jSn_EpVA{3eEp(4!L0J;;1&r#+ zdy_7qZkh3@hSFcg-+hHa=sCPYo*=1(iANr!c7IfQD~1_VZHAaY+3XHvp;M4K_9<# zu>@co&py0!i;h>iAiwg>)JJnbPms|fN*4mGwIinI)cilw(F^)G5rFeC>RQ*2+y*z( z-3N3UHuunw2_vr)lF}t(YTFe~v=_tOVHFH_rv1x`t4%4~CrAaoz3XtJCU3 zLA(Z;s~&(!;R_UouNmwgdjxVb)?gs^(a~xuf24?&roh{>X@G~%0L9(b27B-?uqQw0 z#KBg0pAT_Y1KjmyCWYo8;sEBomcN0*5z9@Csv5#(7^CVT@LP;V+A<3m7?w1HAr<&F z&R&H{%FtLq9q@SUXMuQj?HhED>+ELT;2ecykAQs}?AR0#rf0#OdE;3y_MlYsh?uGm zpbJTOVPE?$H3``C-w(xszUK5Dd(^tJwd*~AZ2F!buQb1`dN&o5Ng?od^tPs-R%&0D zwh`B#ck|&yGl1r`rpUd9zvJ=B3BuhFnN_XlO%Ne0u;pO53wLFVH|eLPp*3_M)>~Gn=X21G(#tA_@`)Velkto9aG6^)&q1t;P-65$7jA;ZD_;= zqJwb*xB-=8%1n9Ev6zPJ88*YW$JQ70A0Y2yXaEiBKfMM@Q(Ug@WCwq}z+(ilP{o*T+Wge!HF-Baa@CNWLvBV!G*fO{Uc6;#Fq@*>7kVPW)ma9w=x$>?V z_+>;(g^k~U7%(y28s`uOnsw$h-Up!aCiVSbs#ol*6$$DKsy%u{a zU4X2%YKM8${IKaEi3`8(@tuU2Ip{!7R!`)8D1j2q?J-iyb$crdIn&kW+Kue%MKXA@ zAv)LJ|jMJ^X;rjMl){m8wnlV)Hv8_&c zdESjjhk%0E^@Cw;BF5uMzN@7VV&@!)c@FccDl$@~H*k=&Z{%`%O7peT$8L!%w&~Gr z>S&BI$@^ZVSvZ<_nkqc*L8z)&ZOzw2bV@WMMueaC`t{dM!D`M2i6o|`r)hE4606;F zLRZyf!ucb&=r~Y*o8NtucwY4R40fM^NmO`b9E>QpQx%CW1Yo%!^V*b06Ya5+GRE$) zhl;!NLXMh^AX-9BHbJ1vIxXi!<{+Iq4~+^81mb(ajv_ zDq6Wqm*m)UEa0A91hv&4s7-%~*koCRhka-RWckuFITcH!M|GWI5L~&S#+SB}JAF15 zoK-ps^RWXpUg~zJGWm7x9O(YK9trqeU}UE8CSuqbS^J?dfE6Q!>F`lU*mP9kGO3S3#L@Ha!rEz3PCn&=c- zh2&=sFUJ+SH_ZCT$dlv|1vYBcj(!+qG1Ds7dXq&UIQVNw@|#lIHWv9F&$HlBUc`_y z?tV1g$e>SWg1Mg*YsDAZ{NK~#j>x;lItV+C6VIpG<WdKUI^BUPacW2oRv0vnC zR?GL}uRYy8>zM0IUj8#rGnIxODKDgO6X5E_C5789KZ~@%3b>XsJCm&Dt$_hL|37HS zGjL6FX+ua29lz-j(4Q>1`fSS8mJR}fz&k+MO4tkYP1_60h_0D%3Zy*;&i@-lzVj6G z-e4b-9BSMAmc?wa`4)zS4z2wwRcPY)(*6}T6zwYb^$%D9F@~S_RbvVBM5CRMT89E* z0F17gWmDo4AS29G+rq(Gv{f*XgS{pFFoi2Rh0`mBi@ZwLz~{z&W>sl&m0Kf^@124x z0nRR2mvns2x~UuhdN?9SJyN##s(Z^}maMGGVg`_+IfYhqkqe%N{?a}EwU?+I*Ff^1 zofOetwnEmc>$RvNtMWbfV2kl{UeaYwi>`K1;MWH|w!KcY((uQ~(bRk%PO`46N{(5^ zc66g$rdQsUv?(}L-m~Z#?9j^mP5i)br{&0>Zv_2wKhW3zSqKdNlX!Amyy|>Sj-MzD za2T`~ZHI4Ej7_+_5qI6Ee9_g#y%3;eS3T^w!MxxJkFA!QpWyrM&oa(KRJudG7oG~2 zpx(m4jV7^i5q3!^*%F-+$z9we$CxP9%>zjj_D*h6Op7<5s-deMyEZN&5{Np8Jvk@7 z-)w5He#Y!#VSO_i$xxu^f^FB6BTGDzmAU4TQ0R`Q^D(}xBPyFlt zOeb<#VOYmW0jEK$8$iJ}zL^mJNjp4zG{%O6cd3$=-D9Mlz_ zx!@|#Te&K4e==GLGOKsgmg=CH8V0=xZsfk0t*l4vXuj+{B2o@z0OE)Al>*W zf4-_JyU+XsD6oc+npRf{EMqRUW9?bMyYuxD@&c!N=QN&Hqz_9v1`iPZ4C(QnHGC;A9yKJwMSN;TLdDsv2^>8`;gf zU4p45Nr*~1UEO-DPhk|vFN`HrUKp?~zO((>hQUz^wYgi}9FA~5%1}O)S7Ue6u_~B0 zH*m;6eB626oj?$FU%{;9iY7ArOH!lj3G*3jG1ict;!3a$=d&~f=dsx1CS^=*S>xsG zdM3fVc|HSix4@~%%3UeuIb%-E0q)zhUn6-IXE|pB?4F zR8-N~2UB|Kc{bedRN_A|L`I}j=Vkea8lrJ9<5P!|y$-{&OTFoJI;;0PAy;4cil#&zdB}dN+Y5;s5L48v&e}j(YqiwP+ZpQ^s6f5td9#} z;TzLQ&}d=`$eBepfB;_CmdcYZnt8R2?P{{F%1HPFi-*6KLE|K5yV9uSsHjU>+is%_#Nl7cipZ*=m!>?RUqGsGE9LQj5= zh$4SF7V_)r(r>VP`icJ_H`XO>qdVW#@1)8XU^a2ZnJqT}5(aX!j|hDYjy>Ewg>#3smff*A8PEJGGNj9jsE0 z9nuHTlkas%i>I(YH-SY#_cO(+&k~Lf|8l`*vhvvEEA6v)M=&odhr@RbPI@3?;>J4E z8#ok?j#`OZO3(h57Uwb)*{j&5kUr*~Vt8}2o;5CevMIH^Oa%4#_*=3lMW%fHgaub| zGXR#dbs!)5j4k_u#9kgPNZO4Lb3 z{OCsHT5l<{s3a$C`s2feKyqW#RTpqiodA)?hsJs=B6ocwmk~>g2i&AqR`INEqYr7` zQQ8TrD`F=2S)G|*4L5!g9@T!#e`GLk+>tlF4K{!G)l&xicrhWCAIG_Ue0H!_KDV`P zcU4c!iQ(jXDt9{k(2Hhfcf5IA|czAVu@&}V&f1%XmI zX}h*g8&_A?i*n1ou|SkMRNc83RuXI2ToIjx$nQU>ABcZPUbR~yP1?2hKAmUsc2;e~ zACSTDdosTlE}EJe+L8MZY^74Jjwn951=dma!Ib4Rd;c;`q1EY#uD@Q(4>F#p?sjG^7diQ!FE}H z!!mWFW$y1(W-B_>ud7x^Am4NxaHFw@UVNBfZGLZ6uU+2e$n$;**2F?kz53T}J(j)8 zD86mwB3huTEZSLSs%m2Y5^9uYhR2wOMaZgLLVR60B$9MrGTxHjKc4Q3O-%g`2PT5G z&&cWK_`&G|1eluo0_vj-Ak8453`scBmo~Nz9S2YQ9!G zZ!+E8yn%fDtiaz%&NwUA+BT3<($A*@T|3blV5%b+yssjLg< zX49DAz1iv5%1w-vk7s#@C=*XX!;;EG2K-v{CkcPStTCE&oi2KxI*vesoph3JE4@zM zPT6fH!R@hSx0>?)$KR*7-*zY5QWkuW(7g6|MU7f+F?x5d#D|%7M_zaL%KVM_HpT*M z&X`?1Pc_^l4c+VLNybp)JA7;a)SDo?2fB=!Q%?b;EaQX@h-Z<&vf+N6bJP*pZF&kp zn@BNM!`P%=dDCIj&8?+Hzv>wFeaqcF&k)>&xW;1=%8uM7hr*P;SmjwMltsHudsjQ8 zXA)8-D+_b=W{^G#zCV{{zvwlf&`V3&nkh;dTJRhXJD=8MELFD!BrRF^vGYIS1BQMO zi-cVtf5SoE$eTHfRhhj`%DG@`wp<6=X}Ep9T~drvl=Gv?t+=mn_u7#h#q>5bGyOz&csRM3Ezpdid@)$5@)f zA`1$+#~}THB1byBr3oXGaw<|0IWnH|A~x~y5X-|AItE!)nbi^$o`}i;AWbh$SC_#L z!cyo{=sp$vvO95eG?-LgH2D&+wF5 zn$$hg2+3g2Gt&v{kUo)V#uW$%KH+X zQQfw$^Xe!B>-y>LtTHmdjKxZm`6dQ8Ko$w66dlkDyVGeW`YQA+ao}yWq}O#IC`f!c zq~l`ea)V-bK{=ARlx#rIAR6sx0tD?)rrryRu1}ohbpW2JONA@Sd{_C-x_v(R^~GF{ z1PSy(|B}AFtr`+SY~<2lJwtz?qc4&|zehN2=(GsRV%to17lFR zdrJ=x{Tp}L6TyDTTS+e3fblgCkiW`+YAttYyAy=*!6wGEmXWcVPAZGi`&9RD@oSRr zeGJbkxcC~t_n|X^=&FQa>8wS^MGzm)DdtkHxca8N7(yM%tsW;mLI@~7)YqiBAe3R1 z5?i<)(rQv+b12FEkSEA25g-VA2*^QCa5Yjc5Z;rhP~ys9M3qmJ+%bG8P**0uTosy| ztSHB{5zmVzltHj4zSL=ZX^hZS05yH6du=@(t`-OAG8#n(IS}YmbPu%LI?cY5A?zU; zRzLo?&vE|jxFdir@c&0ul2}nd9a_o8{0Athw*g>aaMVsD*yz#C_@y75f8BCzvpg-&}w^aGA6;K&lG$!w-OiN=!2_+nEI_&hK4UJwNey zx9q-S@*0wN1Nn4M8Wg&?;mLlMTgnr+FNKENn4KHZXlW1+-GE=lA@fz+S8u;R012|^ z7kNu4(F*MAaBdDOQN9J17Za{PKiLu z`p))JLN2740l>#$7E)}fFRc`YY`nS?q7#|HupFvJ2iwNE=e%*3z1AtkpI_ zo9}F>z7u#H=8h73wcZ6VIJw};^4|Ig{?yZ{DM~?~v&k(#;H5Nk?b+i+V9E9MZJG+b zBd!rpg-KojV*taSI9ukD^#E7;L_tEklhRD;%6RnIrCSZss3&)V<4J2?9s`MJGh_`g z>-AY%rk;9d8|eLZ6z?4S8n?W?*6F;-W=ve!70&)XN=^}~Pb~WEI=2!AFNB2^b(VYG zNe2TdZ<&3RU4B54P>|@yD3uX(PXTExrojT(zc7i>SV#N-Ib0t|(F3K9zi+hJwg8ZY zxKoQ#Qj38W6F3iWq>P(r0AK~&1XerALjo(WuO|uuiDH#qseJ9Y%jia9IQ z^k&pKT8aZt%^=tlDyz3?xt=a5vgeI?c8?rqZfo*?I}LrtR!Jksj(!{>dH!puC3_3( zXS^>U2>|iTLar~V3!syZ%6<+=?i>ia1A_@S&^&u9D<$86YmFxuN$;A0n)%KVS7XS8e8{9m$yHAVkdDl+J3jvQo zxX0SEz{RnEnXLzV@*Z#tnE|p#^9euU9vXjI;0Q4T_NZIH1ItF1dgP(5%sUz%ysGAH zi9hRrtF$TIwP2*+h7=vTGh!^+;S1o+qovOSbY*KP1s*tG`5BaWKui;52&?bx8{dOh zihs}P82p@}CEVLaw+247v#PYhwqglnyIz^`S`-({JR?FTjAhtxMG*M#BPo4sM|~2k zhPRJ>>h4s=eF3WOP5he+`uUIJYRR(RYm{KiTpfrmo=_we?}R}5InypVm9^hYY&5oX z$KpRHtY?AvRpNTsLax~N9BZkt;`x)Vo-+WQV)k5Bw!x`)8_>FhRUac)JYgKAwv=mgOY z3AX;O%?>*kG4{;b1`De0Fs+nD`MRjF2iYSF+87yM z+9AF`Wt89;~_5dZ|RGu_PXwSCH5>9@VDRd!NRO-0eO?j%mTx zi~$wqLIGBgW*m8G!)jE;${fIvPg;UH@Ye3$(f)RN>8DGN!nwkJ=CcrwiFDn_y=q{q z5b$=`-D`H>1+dy7qIf$fbNXw~5m|jmf{kY>oX7Dzc)d-)Zi$)U0nn!%AfHXnjrQ%a zdbunoXs-eClT9eD$Xv}e?Lqvj_d|WlX^%gi1UA>a-{FpROX33Mfoi8_8@i#)wvj$nusS}ZQ9j*#ONZ>O7d4E?YJmXmF;Py1QGFL|$H z5MkANepodNOqiAj7mVbG*_AE0LVYd%7y!qky=@DO=Tm&L%YW?VW8+I1 zZD};#QK=<|aZ)0M^ZmZ(7PJ8mTiFNb>k}MH)jSb#vkX3TJx#P$w_11M3WUirCk{J^ znsaFo(oiVsP>}(#4q4T(*j-Bn-3}R+jFt{uD@imQrNIOZlKKygO09vT2SoWb&f)Le z8yE0y0u!iTGD3KWp2Wgk#$B76#K*b!OKa;t-oY_nfHw zT$d5LWoYVpqgB0os*jV7!FK`)@al27+fPeelce70GNsTw(jdG+{KcNw8>wC6%Z)a^ zzKu{ACi%v`WhyzrmC6mvn^Xh3_?(78z=a{X31pwYJPqusYJBR%N#-<;In)Z4IPZdD`FWb z4TD7=oJHI>N;tsgIA2E~6**hHn5}BAR&TMzoUBy*UhdISRFx+kK!#_k#TLIPU*lN96sN_S!W!^{ z;7`?*^b5`>qq(-aT^n1$9_Cz(h@k|r_p$UVNC0=DkY~9%BcH0~@)O@)1fwqYVWje$j9# zM+N+?S%<6cCigeRMU?0+(dJp!U`4(!RN77~rwvo{eD@(1Wc@yL#&$0!OK#IH+Y+2Q zUbCHCgK-VB^HrRQ)lP1+B`{4yN-Yt*=hoSmGBM(o5Yb6{GGN~wA*-k0Ji(S1MgM(F zl?kMZnc6b0lll;L2+<$a!2|Meb012!WbkeLN|tdy(Ig}it(Y*k`t;*wWFkS{<;xW6 zYEP)9Gc|tn`%E1*y(|1M^nw>J9gu?oQwnU9>H)4*?BOf@$XvcYzEXX*`o`3n=m0fO zI=aSZ;qrpcs%`wwKA8HW`+ghD-kp^zmFXrqvrcblEMxsn?0vX?KCS$`6|0Uqds9bH zYP1NO=QDg9M0b;^cAWY3Z2wQ#&F_de=<5VGlQbJu8I23w0qybVH781o7OnS5ZgX^r z%3jQbftVOW70INMFiWLE>^`H~^NOq!=XPf@A#OY)smz>cXI~E<6L*3~($6p?UvN$t z{IQO+etInPPW>sYBtxTFU09;6|IT=F1$>}qp!44Oem0ibk99V(JtyCS-eC7;&dsOn zmhPRj4QS(7ngk)C)#3EAKY%VYQCa(uCj#>Vnda%kzb>vVslN8sB|2-D0vxgCVBoBq zo_=St>4V?;YL{Kw81_~^H|r8Jc^?o$L&vOSK>72ZMROcqtL^6?8TX9@Gr6o z^x_5-H0SPT-&b)6K7KOjb=HJhM(lO1;13GHi2&@(%`rsSMv?|loQ+(89hvX zxr%;(t|SdI=}Lx^WE~!his-Dx8G?jtmTQtkyclRE2QI(m#Wu2zo(bD9_WRZFlJ}Mh zyjLkA`5t_~MzTjZJLUKgrLw5pTAHEPeD|3Lks>PSAP@baH(e!r7R~v|iB@$egVXqj z355x;*Q4G}n4W9rY#Lr^`jQI2>v!!3O#m&H}acqZ8lMn`f)k(?rGx@ZnOz(F)K}GYbaqr8NU`jzY zel$nl;+|EaKy42z+x(nU`s->1qMP-*iR?SZk7~;8m-j|!!MS-JANNEVq;Z{>Yobo% zF`t&G3S0%C4@*&FkwUTCTStUI#tg;HdW)Hct!FGKxKD{gPn294o%)WL>Md-^0k(9? zdB9;()5<2JMfXD1_|4$pdmMOK>5(2Ev!W=CJn$UrQssy>;e34ET#ushyj`|=ALpr9%Q7ydW2q!uZFcUxyFU z^f>4O@VxL!%>8H9+1~dlWy#EBl__vB+KLk3;uEN25i-PtiKy`_2R`;?V-E$5Bz7%z zG%O-#xFT9h481pT*@C6pe1K+AkA3fB`WSp9R%kHBj(}^`aORk$FETcJ=>DftvZ-~> zy^=qbwe5XRUr~RfJM;9VBf_k-ByKEyr1x1reUJVPM`Xh^c?Vx-LTZ=P84*|= z?Ubu)wUQ>fR4^9}Bz(IkI=~EoYr(M<5n?*9Yl(I@T(YGEZ^<D+nALRS~^4lKl;-7a6e zCE#<0AedpilwhB#5bpN&8}(O#?6-7!{$RN}JENQNRtA{%uvSQ9xgJ*x;oFtOgopgL zQUnYvHgNjdftmn{mRv~*lX!O2X#NV5AdJCC&Lxecy@!+{1Ba63$SRHShK51ydPGXm zbmz-iY1C?z%)>a4Bq@{Z=FG7O?@U$G{4vProw_2r=6k2#fFzhZp?J(9hrJ2zfD4)q zM`;_z9d+)s~vJG-sqweBPAQE$yvCE1B5aFH#RLB>vLrQCVm%yDcwAxbwF5vJq_5J-QDuZ2{c%!>g&ND%6p=CiB6XK^*H>+s0F>SHU6OGP^v!of5 zZnP^bY;)u?kwd05z9QnrtizDiCUVzhRs7Qnvg%FbUM&*#OeqGOmaYgieisl`5X#_O zcQc|?H^iGj+Y>9eTubEG;0;=N(DC*6_nS2+zUh8eZI2mq``Oed+un9U_3Oz|%aqvK zs$fpT)31!r0&BAmF=3vD`ZG`Jqg`J{>VT8;f_T2v8~7R77Qs2mdz(9ut>WRzqjufa z)KyCyidm&ABQ_b@-|J+oZR*r-S>Yk&g)l!Rbd&=yL+S{zwoZjQ>zKS* zGVjoRka*@|+)5C({7x@{u^PNDGRZZz*3SY6|e># zb?3`7j{>%9y$t)@nK&Wn!dO|79m3eYu6B{K)|K0mFv<+VjC-_p-4-2(8Y9?5%w^hr zZQq+KOU`b(P&aNo4=eRxaKa(n( zK@WC*NM>K63y=LW-hO{+idaxUi;}GH^;hjc888c!0)gzyfdboj1t(D>(d@eijC46X z(!wu}#)!t&dHHqJDvmR^Gx5qdq4AmqqrzaoQ99+O)9{l1fNEn1=MVE>ERc*LhM#l| zPOv=D|Pb?#xX7;5+G8#H8X+C+n)-{$^@L zwCMrc1u3N)p}x1z^rnY4&21JboRD=j&*{h}%&AR&0sH!6&y|Vd@96jE-(m2kHx5F1 zcWPz6x(PU4*>l2=4i>~ciIRI6N!7A_=TTC$L6GpGa%}OF%$?W7-p+zAi}i0Fb3k-| zS(c(>IbfA|mj+0Z*4J-ISHZF2sla4#xbP1fF{=rm9t>=_zWR<`+p8_(jwA(;kM~#g zsJjziUVR)|=l$(u275>d64p;{$4Ic@0+qtzUsfFZ2#YmW2>vV7{YKOJ>qDFT5a9cC z#gwLpMqW2kU^p?}hYKoL5Io@!{aId`yuZL4=?oXF91#ck_PCPZ;^re{CUKTHqZfycQ`Drd+gdQwLjX<`3 zne|@C+3Qy2e4Kn^Li~^~#`cVbbTt|2G3X1MMDTVWvqHJ_RKU*euqV;Kh6fVa7j~_3 zj1U5V&#w{v6jJ)#?bmbz@CHu=KnOmTYcNt_o2vp40W!5vC~}5u8c0m^`5>Hu`~U4C zbORQ>fnK3GirS=yzj?EEyCgy2!G)8h*BOweK&Ih?EdVjtuI&@>1R2>o7Y>)dfn)l8 zz_2U@bn<@44RdniJCSpWW_rwjh#{y9+$VYbfpB65Oo=T)XMt&7ePl)aY6@{BtL|M! z1t52KkWm|F+ZYCk*Bw%+UAYEun@N_!g8Igj!$Sar9K5l3bh|#VrMBg7>4Tr7FQBH; z!CyQ0jaW=vNC?Wo?AUo9vi2l<2H4aueb4trK^!mrM`-z?G>$%{scuA0vm<%c%+NP~ zy=Vi0iGy@_IR`+pfb9xPR!e=$|E4iuMGd_D15&sl6g| zNG2D`&!@U6!pYYem;(sB8=mSDF8pcW^cu47z6#1+m&fcUZnH`&L1jyKLkY5Z2(L70L{o-$m)1j^t0AdmWk7~uQ8Lr^gZ*b|QnTDvzEnw~$F;|x&bcM6e4+ALc{rNyl!?a;==G>t9@F3to_Fs;k z{^CO`W}f}u3_xzF0kZXke4l-1ZCb%##9!l(5Dx|vV{k{rs~^ASlb+1`0G??7EFo{j zyb1tk$-80}fK)=~LUwY5BMt8+Er5Wf0MI_Fw4MQ*H8GHe2Y;?IP`4QsKLWv^oNIQ{Ye43AjfNfu>hDc;yl^kw##3` z7^HR1a$f;kRIWohLDM5YcQ6Tkfrw_mNM1>D;sHpHzr2TUl)^o68EW*Fii+ckn1^MD zhz&ZRdb52mo3uq`j$=B>tA4MrH~EZ$S~8h@3I$0o8g33s?ntFvdovE9N?WCnC2AyR z{OUbfyQD74Dqw5+fk7XDhb4J{IzIy-iDeSB8Wqkwz#o6ICterp=>W5Tfuwiq&GY~x zfRb=l(p+|TH-RbT23HiI;(rbT@Y*&=fbTbbaQKeHWcyI>_aBh?VrEp~4T^vXdsxOQ zgeRI^1r6;6#7c}gofT&#%f%r;SyaT86L`H|7PyQyf;88sJ{*|~7*a9rqoNhTfU}UE zHgL;3H(#Dtd;|>GeyhG-jOXg= zz`*~3CmbO~ws!)qK$wNhMGsRjs~MfloJ32PNAR)Ec00&s!Gzvs0<8Y8GL(1#=f14| z+tE;K{oZvW)DN%8=GSXFDyZoJ$$h=>RF3bFJ;=4Av9Q&)J+;X(>kiVk8pH z;BCD*11>ko2=nM_@3xE4HZU$6{2?tbH_XK#@RXO@)=Nz`8#t{Cc7v?=ScV^9G8yPZ z8nS_uE3raR;tzQ-!My(PGizt6jrS1zBu=nZ$oCU_N2n0GB5dww8?|*mizB_KNN<7B zPIbx%Iuhmgjmn*^TA(KD4|JPcq{xPZ4Uyy!MNyOw7MV3eGKk*;<9xwgoIe2cAyb`Y zj{v{uiw}{UCJLYo$keu~!>>NnDr9h}4r@rM_B5fYo(7oU?|^v=w53jH#-yDXRds8> z52-5|Mat0EAx^#*A!j)zJo1?9?2}22Ap6(S5GAIUI4{bvmY97lPXb7piF|tyF~6`n zZ*_rbu_n9D407OZ5RQc>sDxcz5Pag;9co=$BuhK{uv!KWzOyU9JPanqBFPj`u?&z| z;hBo;MOEs@Q1R(L`+0Ta7LniE#=mu-hk)F`r;Hzt#b1TemS8iT!fvs)#+gG%8 zD!$eG7+fJ79I5DHsm)75z0pDT_Tjy$9hp6N*Px3w&I0^qW-Cv8f$rH)F$f|hvRi=* zZTgfk1wlWJ0Rsc576ki%_-gHx;bS-#rLa}Oi!xnBPTQ_a8IqI(sR%_pPQHVbw804& zLC~sl!eZ~hICOKilB|FqXWcmHR=F_qs%i#WW?nBQ^1C)(5z?yUGVDb zCa1(TKFC5ZipfS*Q3i~ z1E?OhdrZGgCsCD~STHgp=+2h80>y?m$8%118;EW!o4YuB{mLl(-D#*idSy-k`JhIY z?~d2alP{Arxm#Nx2mBB-W?*`p(oR^cLF=};-M|ZYR^GXK_TI)%X)rXATI2?Hm)j%o zafK;JaZf{m7j>3=WY1hjAGjoltt-|ZA2F~*HJ*}q{jA)Q2B7(cR2*dc3(5=D@^h;( zQ4MjML6Mh*k1cj%ln?{lK>(}9NNJ^NX&^1XAyBkGQ2x@qMT96UTO)ZHFq#HoV8^Do;CjoYIDE;dVXytwG-cx2sNfPN z&CjyUv2Ow#tiOZ1&{CR#ody0@!JbGD8DcVuCz5b+y$k*0P-Q4^eXU zdD*q+?_8n{#?LqWz_vGDvB>2ImNQ3QU>QNouSmoXHP;QIykVa+HKu^D8(50W&C7`* z1G_4IjRJWw@vmh`_DbBrkZJxZl^o4rrCN;`9PB5x`N1#^qdNyT6B5^wXgAyCMQ%Mi zM1RW^Bs;H+&7LHKoP8UZSrRQF^}}yGew51ejN8p7ASMu-&Xge0fzeqfjwGGG-Uvx+ z6_nl+Ub@NSHRUx6dK2%iXp~=bogp%`T_)#!lE=qRktv5#lJ$KsxCGsetSvlI@kT3+ zKA*%sSaiZ|xwuUdY4?S4R@$Zn6_{eln~`A46VnVf4tTR~ErKk=6`3$MR7$i8Al**P z=Y`v(2`xXs^dxR-;N#tEpYECDOgsX8BCgMEUZAUZ+K5nJeJm2PmAEOk_yKpo>OHd$ z&aj@DP%TIVd0Y|D8opA2`3kfM#`t@k3&jezdYmK$Z;hX_$}0Wpl}73Dzu5Z^XG@4I$&6pm5uE*MK8>%eUfs6ucB--~W*A(p3tgD!NSDZnZS` ziYT3Y?Ttutvu4*mN;mmRUABc$x<0qo5MDBXZ0Iu$X;#Jepxl`=xw2OA2aw5;ArC;6cK)K#CU;8bRk|JUIF zf!6)6NAwKO^sCI zTJvCoC*8ij9w$5sXmMQ%eXPxRSCYW8inVU>D(pkB-eeN7nTk3RHL8Qps_}7+MH*)I z5(xBpFRCiTDGHy-?yMQJdz}`K+l**U*1O9Se%vR<-b!dXZOx@>m%OHxx{Pm{8nOj9 zkSA~@C{#qEG~Sw3y(nY3Pxngx9qD`j+yI6WVoP~DRpaK*%z9WP6Oko+_>DYidAE_wolvFzI#p&)1Kz}$Q4Xjuz!C>pcTu>GzxO*6$fcO$+b*&fe z9G@a|Z$RkV`tY_c$Rr?A?!!vS*(K(aq-~oiLjUR>gY{OM2BJ+WUW{QHU|m!(GN}2m*!eSnCb^xb@QwSHc6kHJdBD0E_vagPB>+DkY7!Q_xvv2Q7f{ubb2`G? zYF#Xr+o&})#Xd!UNtyc|{4Zice-^cTn8KnY6vFQt$d*=6${4SD{3I_c?H((h(xdYr z{I)40~Sx zpf0fzB%w%KlDEl<9(YnG3Zp(D_V}wMRE;-hJYY&GxUu$PPcq0e{aS`-g|~L`0$nc` zDGlD$5J}IG&Np#;_G1La_8oZ=fILYh5b7@@*=8PPc~#tIBY-wK{gbaH1+z$rOqS}o zMPF^c829t@?}N{sa<$CLxIs2dCyt3DbqMP6nP! zoB}->j%L2hLshMkoFpMdI|w!=?q^(Nc+>dhGf{@IX~nMVH|+KNayNaY7du1;9#aj++dCSTg7@EY?@^Qs%XrCs~N8& zC}Bh$6m54meAK7x$r#a|sUs}B618YNW_qdZ$=FC?Dg#JVKpO8a5JeU+zA|w}8foD= zUAq|e!XxN10RaX_8cv0Lvu8J8!@JsI)O{3Yq@v+Uq34Q;SDz_|U=|=|;#k?T`(Pf+ zk{wBihkWenau049ALdfZgvGYLyCmLssOaHc&O6)SSE(tzKM0H8@)mfsX=;_2s-)TF zv>)5qzmk&*_)Bno4{&3xWpG&~g0$WeM}-EVlKVU~R$M-~OR&STUZM=Yj(@ihrL`*_ z-Z=>J_Q^?5q+#>@(DJPHMU;BY8Wu5-Ys_FC{1 zp7QfkGnsAA^|RM6W_MQ;i3I0uF_)#ttGUV7tx+6pS-d7N!HtN@`VdTsNx(-^|Kv?7 z+ug5){=R=($h42@Ync#m|BkKuonQs+3_Ws`v5<1tb@*XSbYe+mkV9V@XdHF)PM(8~ zLy3dzy1|=TI2DUrIgYDcqKf>i{{~Z|hQ!V8EmIG<+|%$!5V31Ja&h^U-^7dnVs*(3l#PWNn zRqRonN$B*7vYR9D%A#$PrM+@{;r5MKV4D0%r|Mn`Q$n?M10tHmX0|VKsosL=xXRWE z9!)`=bs7P>Bs+OGfW5fG`k?yHTS5~RT-q1au1xgK;s%LHCu3htiyd4BSrplJ0mKaiXB0H^|fXSPb(?*YYOL>u>7zeNVZmDb}}jz>P9EBysDOUASQ zX!{z6ik}K5BXl3IvT(yHbrt=IQq^s~*ikFQ5sf@DobzINW|P>3qy?ddiV(l;&NOPO z)c8mA4p*{S2nf5?kybdbx;|6EIB;Q)PF-Jb%_*_vBtN6%e<2962_ofpAB{>z()D_y zzrAJhh7;Cem3@k$Kzj(FgmLpHR}0Oyh{t) zP=0UreC0{6BgM1x`rcWB;K*_N)n$NkY+uC)9sR^s85jCQbRn7|+RB0*_qx77Cn-$` zPP7sa0hvir1y_Z>uS>mC{J#5q*Riy}erbO+>6qv}T%~)54O_3rMtM`Rbmm#I>wp!| z5gN0pdI6U10veqa7;Y5OSh_cJ?e+-ogSRi*&$8WKjF)lh5pRFt&5D}mg{HaBkF zpMZ_5nc*w!o~~#nb3)N_HFdJZ-iwo8ge~ze-ssCvdoUG)- zTN^!X$>He@uS@#3NQ4|UA+I%rfcXoB4PI zQ2df}mR~6!cQz6-blVfEU0L~#}EGTI3;C}}Vn5KgNy#o+1nnKAD zih#RJSR5he0@1>$xB)^<2+s~U-!9<^m$wZP1Me}-EgcppgMtsMviTXGml_RoKOU%n z#69gzKzIp73!a}2)m`KQ@PZf!L>h^ZGX>#0E3Ht?OSKnVvP%a4EH z`f5Sf@jN{AaQC42&EMIQVX!3w2{00nPC>yU&4fkc2l6o5@(kY%|IbR8Sq-UYMBm4K z{SI8$>i=0UpVc1xp1_NLdJZsI8a04r#ggI=2o1P_^wvm8fKQwP;PR9| zXIs~P9{N+lmRziUXPlqrLI=|+Aa=aO0X%l$<9Vyyz(wXW1+)q$fw~b|*U`;^n;{z7e5@Y8wIiucK6>3;TR->_&t+_8QI3JLjvF@B|!)V zwy|rfu8P*zoO(;XLn$;&l3jKN$MnzY&jB327C_Di0J`169x#U7uCVf2H8Kqvf%Ff; z=0NPL%L_2o*mp!S+k?Jv#g@}041$|0tpx&!nNw%rNt*ZqLLPl&h13>_-)N_Rr#w*- zkSfyO-%=#goCke%Lk!SPoMiL?e`pddh%+BU4n}NRyI(-gXrhrUu;GyKK&-fcg0NZ-Fwl+km51q*shE1axkYHOuGnRc^78@hM)$ZUB=@b zBpLvp0zCocfNW!*L$nI_sm*V0n*0InOI@8K-|o7W;?wD>exVl$M7C-HGfwY0ojd_G z5cgS~`q$NP8n*ZAyKcRVv{j>^CL6CP3v5|u^rixjGz&yoOX-`_gQHq4pCs~Jtez>P z*w*$K1OGOd^DiPr-@$>GTF)L#tHbG7NF5Z?>zXcn1>Cw{fDeXuL@d>u-w^Y<$SFf` zc5r9V2%sK%68xa2PP6oj4B{qVF#OY%{O_!q0+kZVYJeT`n6*FlPv(1DJMxpsm5|OC ztMB2TQaowyU3l4QAd$<`Y6x!*iK{}sV;4ZhQzyBc9Rs1_=|dl3!AK<`0KFZi;5&Z)K7Y8P9-zY+;{}V75 zHz}Sv-kWh#6V6Exk^+=$hn5vgRZwgM5*j=ELywXqe4Y{sa&m@AwqQWJ?DTG?)$;rs zU4Y~f=N6>c@=gc&FO4%7czVFaGzE}`OBR5j<(^62!yV&uq7!v1v!vb)|CnFsK*kM7 zCPp@%IwqBG5)wKCPN0qbT@Y|Mp}$4D_^T9zL;mphTbZ!>QEdQ%jBZ~6|LuFvAbAcV zB0@aZCo?%e`{!Dd8gUrkI2sdd7Xv+`@yQ(~uEPKA(4Y2O(=8V8Gb4JrUfH(ZNpF_*Dt-F*^ zz?884U=-+0(YrcDdztNM$e!&@|1COk|8t+1?`H5r6+s;NFdmd|L?ctHN=zkIQV#e#Kr0I0yvkJfbAb`J`dFDjm4wTgb zabwahxE`$Rk32ub>N%DH%ISnGus0e|DB@hT@2Q+9qlH~Yj#+y_=(r_8Y> zM6frrKo%Vof0B)8n|~PaB4!MD)|NVn@hOgdjiH24@24?#0WIvIW5Ds+tSK_|1hdYx z`N}JrXuhDkn_JEtwtI@Bcf>QrIP4pRO8LEH!NTJ`xl6e1{4;-_sL1>HXU6e0-qYTG z94wNxMc`Gxsxqk#80qXBEaD|K82ytX$&AGcAS!2k)unPNn5^E^%H%eGaJASAfO zE2R6a~FgjKd_-&QY} zMD&XlX8e6oUq1W@RYmU^V)a?r*0MHd?D)Hc)ys!sN3Q~UJM;`^T|k=Jv6=NK#pna$ zmK<(Tl4?O8PWF;=QTUC!t1u|M@E1t&`9Sh*$Mo3c5uY@xzv9@jh43o~{TiLJ%oNJZ zr}eva8-&t=W`=S~Y5z{l{2d1Rqbk$-{$^iixwDi5rnUi%Qadf&$ga#^az0W4nbjG% z4}{&|!x33}qJ6lp-jcP6G2Vc$0oecF=4N9EThKs>+nh{KunT9Vl)Mk$jeqX#e(u0Z z|K``)*SAE|N-|v_`K`}%7_bfLw&3#-#2Don?X?Fs0rEP}DTS^!MxQq`i?w(aYs}X~ z?+Pt2JzTm{-l=?%lJ3w4DGUb-*6W;Rv<3+D*7%;@<_g7E`*F13rZK583}SSZ?VCXK zj!K_x+@EG#?A|$(=0Vyzi0pY22$1YHK)Ej7Qi3vnu&MPbEluy7ivF2`vRRsBuPahN z5(oK%H&DKz29Z2LZ0t{|MwPM|9PiEsv7Tb6XVI^MbiH}aA2F`J`Sn?G6gU^jm9#-^ z#)O}pRSiui(unLAw=pXEd#@_9aEVU6etZZ%oD)`V3!dmt5h8DwIM?nQaW=b+MpkQ# z6|cRkt%~$eRr!`9y*hSoxp_PD4)^D(XUyN;)vpp^QDNW_Pfktw)f>H&yyu7I##vi0 z$!z7Q{`Q-+{@W0&ji&UjspmE}b=uv_&RQ8IGq4a5q15<-+=9A8BhQh~5E8*BP@#Ss zO2+~R_yF06k3%)mr!xLCTAzhDTcChT)5w9>yuqk?khJjFC$7fc#kU%kAK&^Qj_ zFG7EeZO8iu#0B5&y6Jkqo|8d<;O^5g^tVu%-+3Rm1hoMFU8tD8f*zj~RW{XN*47Kx z0N<9czRcFg&CSDhQTFt@wL5oLSdh`73lO_tZph+q|6i#fZWG}C2NB>di&p}do`aN> zr1jv=TNXbW(FBY_ZU{OApS_eG{3-|(x$<=_Euy!z~{>er3Fri2E@MiSTOHjMre1DSIs&8C3(*Sk8J(r2_$*IqsVFNu(-tIg9I zoMv{&?pdR|bi8ockOeIHCqZ)OWL8}#Ox<6#C*HHE!v(S^0xL}x`AH%y0~}eX ziV!JG6$NiyD}9~WGC3RGR!q+*O4ZzJWnGb^if)2aOon=FJkPC?8w^VzBw=5sfkOoW~c zFBdRi2eQJOT*$RFzZ)h|M)DjId6dsffM>Z<{nTxnN()Ji~*T9vIeZ0 ziuZ>fD#FqF#yd3kP}xkj-E(umJ~@mze-rba%o4@3K4EP=1v1%A<&CD;#<1KK2)W%1 zUxhg`PK{2B@;zlpkqPl?I-M3kE3x2FY@i#T0UU?gu9Y%;Ohc3?qQUS~c2};og3(=q zSD@*3Ig!7fl+~Qt1d}(|5pCF!@|!p>*sLpNvlD`;XNx{D+wlPb%Wuck{clVB+Ie1* ztycEDKf8_mVR0=4mk%rOJNCF?arQyA{`pg zn>)&lj;k8(@aBsN2ISCP*ORinaJ|f?lGvNTQ6LT*ODoF3a&j24PB!MZbaCELtYn|a z8=nJ>15x|)^%fqhbl&tn(o`tc9xUoHbz+&c<3F>Taq7_e4W-jSHI64D%${~Fu!`1% z>uLJ8$VzXnZmuHRy4O6E^6%|rRVyTlLx0_FJiUXC3w%_yXtqDcf(nU7k7Y;9-}T@| z&U!R~W!@fu*r;-d+FVq~0-XYWY)Np~CEnHC30gFXJ*12Fj@%2VeM9kZ6&%^DM12d^ z3hN%nZXIjmx6Z5-Ie604h*$8T+HY`B;3aY9%5x$T+&6G}D;*V5*w|TGhn+?mw5yF8 zw>)8AwqAtYOfor&D6Mx57wi;4xzNWm^W_tWDlmR_7KqR@v?4uJC50_)s~M7^L|n*W z=ukD1hy->nRp*J^sxMNo_0WLR5sQXYFlzG0cEh{`tUZsDVeZJFU{3ezd>&G;mbDnC z_x}Cv{t0RA$eSSJ4bi8KN<^!MDR*Xq+u2r z=3Wyqwf;$Uoi|kXuvZIk9n5$o@8YSjv?l-7!sR;BHJG(ix4>~=B>%tyJN#YX6<=}xl_k#M3}8twQr{<$gWnmbz{qo zAJeO6`?~cHPw0CoIOwMLazUeVvS@bErboSojA)5rso&q{^*(KtdsZ3FnKQRkA8Y{_ z%C1QOLj9HW1P%g+k&S@*Z#S)FFO3|dk63Af-s`s8CPQope*js0A;rp9TWL1j5id;Cmc~!q0EtCdiG&v*ss(oU}2O5Rq zU%%sZ{bBb#yJym1=77!Kq8UQLc}Ez;cg_E~pg)@NH~T%)DG>^1z={bcg$Nm*j9~x?3*l9K7gASb62bh zv*qQbYzbaXjxD><9Q#dKH<`aMYlxBoes9)4E1r;~Mi7rgpHHE{5wBz;G@vNK`61fe z3(;SfU6i&kV2*lMW;!A8gd6$ZOr$cxC{cxz8}lY3%dzNc?i$UobU8g%ipTDqAR)Qm zczwkMahksbr=vwEzyhJe07TqqC8PxMH_4-csAskg@W<^qb%EUieGU9H0^$ zCUBoL034*ca($JBnFf!;qUU9B~^(y&O z@@bC8_?Klrw`}V|TxlxX$KZQ}C63YNErcewJn6ah{E;koeFVTVSuk(gk$H;)WZ56@ zE}YbazYtz!JCK3TlP^*?V=Wh`SAUY_oNO}R>EvY0`K(iug(4{AC^!lJ`irTb=cFKI zuiWn1FrDf1oo6h!KE^dX(uIFo2TK{T(VF{vO(4e$_@?Pf9irFX6&!BgihkPMSYvl0 zZvX*+&I#u|+p?#DQ)QLxY0JzvK?XiS98PoMg=vZ?lvv{8LfJt5*rLRrq9e~~614f4 zWZn9Hc|e%hH%&~MjXAR3pc!CiOg}cQ57q{7D(?uEVb{_x4Ey;0l!w~m)Te+M`!MMY zaFYv`=Gy^!o7gmw+d&FaPi|Mvo9_Tz%Acv9P)_F|I4$3Z(VAWfL3fQ)-*{wSAx6_f zwKR#NkgzY3vsVF_)(YNf7ZS!6cGysPNO!Dt$XEnL!DTD7Zp`Z*_L7P!`Rh$8<4T&J zu`N$iQ;KR48?QN-^A%B`L=*}Nt4 zsUI^5kDQ5@LI$Q3QjXq8pd*q_n>YxUA?h%-Rq=kf7&bbI4)m2OlN` z?$}3-J=PX+#tuPLciSkG+Hbj3)F&Xi-4eyjZLMG`<7oG$hwU5Wcu6ESX?AQAT7J!~ zhinVk{`y5KkVr{)=JP9!@inT$BJpLrxYof2u=0AyWm1Bbw{T1io^-`zXB8Z#b~h~= zud=BLdHOpS5W^+|&zKBCTEtLTR8bF#oNBCa=VB}>DN2mqFtIbTusHICDI*k zVQ{2Qb{|*gI)%J^tVsVZ#DOu2zDT6m=Ka zC8m(KYlXMvj~ncb4bh#~es+CT@Meh;RKk!5pa5Y`b&DhD@G3F7O2L zr{ge~x&OMW*S-`9vF|IutRFX*Q|O`aHU6M$>OkL$v?++_%$wO}Ueb z5LNiTWqtnOnetcIkL0|Srs~JRN|_T7D}PFDx?b#+ZbB8U_D4+7KS^GwphTj}2@gIF z9U?!X@Jq7_izf_joD`chyVxjoX{8D{btpwhc~b7H!42LxajrbsOLy<(Gn$q<4DVAl ze6GJBQ7<~y_+z=j2O-un0P{R9no+ea!yzj!+H`OC zr{r&LH89p~0@6+pUymKUhUm z!($wOH|WWxCLDTBlcTSd_Vb1xeGnL(Q3mpzZ*hgA-hT4W*+7zYUg;jcL(js5!SVnB zhKQx|^=I5C+L;}PNLy5Q3#I2A@hhvg04)7|dO0_sxtGUzm4I#9x98EW+YR;~d6q`` zPgwf3vDT(oP6{#%HO<#8G0dA5pOM3`6OY>aw~VVP=lu8u$jp-etS@flm4hWDv2<1& zKSkuD3Yqo_BPchG33M9S6PLamvb-J7X)?6=bjbr=(T0aQK491_h`)CCv61<#0JD(T zw*b%BsI##kxnP46qnV6rS9&vP>v~8Z{<|ZU6!L1H!YC!6LzDR&Z!~9nVIY^maAdrs zIR90MoDXTF*HFfK&ZChKxwx5KL);P%w2pPMoH20IZwtM%Td~9}z}wfQhaUg;$3sC5 zRVX&OKgd89xwYbixwH*+(&bM-KL#m`(AH7sP!@{~IY{3mriYCiEv6V8Z|RD>cETsqA9Qp@6pty_d=I*MLqZ)M%*P_ra@d z6om?-ci{O%VD$d$XAfkS=bL)RqY6}*ovV%^O5N+zFX6*uW*taSz>>xGM0W9PTH? z|5gx~>{#Gc_k}BNaYrN^|3?Lb%{+?iU0RG|M!y9|NqsRms7=BSTA5g?`$@eEOE=!~U ze-27t?m{anphe-u9|VIh_Oe*8UF*=K-MJjfB__}pH&8>xmwizOjps?vKF~Ucnr{b+ z9}9gQa9LAE&>1Tq+a5izAq6H9sCOmU zhCgjw8VC=R!7E+t$m1>sR54Q+6#rxCb=GAGI8a{@f3^qYi%@@ifqSPcKXbmnc#Rpb zUAio?g)Uol40Q&hNc{U-(7l8#pv25JU7(u|Yyvz`wdDS;f{)N!_ZUGDe{F^0^e^Ya zwX1l1E6s;C4hFl$T;^E56;<`XDB?tU)*WnV}@{pRQBVtaY-24p(PI7?p)z9Lf! zC|;B`?Xp$<%24;`l|6X^-D^b*O2ihaF1V_1P*Z@Day7yOy; zd1(R-g1N3oZ=_@n#09}yxWG&(6qa~)IWPL}fxe(e6dGN2>1*gqI80g%zI@FXD88I} znc%XfGEiqIhkg10^_!d@C_*H!Oh@OUY8aqu9F^rO7jHdN21T5YYp`E_7gqP=Q0h|( zm)#T&Ub!-Q>sHL=$XJ8oKMgF)UzTtK^xF#4(<#Htdr!c+==oYuiE=$A*f37fGw$BB8 zGCJ+L50fs(UJVSztuQUh%X#rS67&TQdS&?X-YHJddanhK50|eI1;yLlm-weu_pvq_ z4q=eMS9UyC#Fhuh_B-2utS#*lW|Q;bvcd!wpkTi*E;?dRt*8)KfGLdZc_E0NaXoxn zMGC|H4B=kV006@t#7+Dn!wddda`eD+E-$I$)Gs=PDeB5Bo>OZXi1!h3evL*X;C+S! zy`H-P#MQB_%Vk+Xay6g}TtX_-fk-Eq@einhFde*j4fmQp7xIXnC;{)zUrl%x@>!gS zzx`z~kujjPS`Q>NFS}j_OTI);{}vXN?boBUroh>r-k6#~nf1JAr~tVbsDtyXbo4Ho zqKNgPL@&LdbK=6IdojtDAX8WC!I1PnQ(lYd`|8ljw;fj(q`}^m%fY@04Fte1K{fPJ z&kTZe_ZnYXkUxcLfDJyUl6wQ81qi6Sg=6GMAl1O{Ypewq%iEwosO>XD`QM-`p*gL@ z6ahW{$jT+^a(Mi~)VYPoH@aM#UT4TLK+jJ(Q2z6s8S6u(ll)V=kMR2jZ9xO{`u|nBRft?P^mujT%0J8Gb@0LK;6ARo{Nk4Aec7PW8*GWB4KBL( zf0XVQAO}eh+V%?<-nuws_<<#VB-84?j{L>VS1=$+!aMtTm)#}-R?C9Nwt2TN>;97L z>#7S#ajDBv2(0$voBH<<19ld)4tZzlp^4}LKpq@cl%sv_@{L$uKK0MURXYaqb zcL?&uB^>oo_(Okg1r$wOj`rm@Fc=`0{PUjZR;eZ%TE`?o8X&t;i|0_^-`DGyAjmO> zi~qBBHtI|;3sxKiLQh%3w$?gV!a$jL6{PB4d_9*lRF3KO{f;|-9sD}ybnYz@N{&b$ zPJVQGO3pvT`=4(*PPnq#=&0eS^5!g;o*g;9)`w2M6Z~1!WaZVx_+89bV&8iTOgEq3 z{>EJ47DI$rB5LFVowDDcsF$)3+kQ%SsD$}D^ybzK!so<*K!yIQA` z?OUUMX%}2T1^Z6>)?y;>)#TE1FyjN=qOLe z1`)4GVeH5C#p^a6sWy18>i_`q-x;HrnFZufhn~F_iO#bUQ^iC?*Cp--Hs1%Qi}UpMsI_ng((oiusrD_Eh$36eKhc!Sh)FZZm=;STzG zgWsMF>PS4$h9v< zCG$E)b+2ud62rGv)+}YaZweH@A9#uOA3X;Az*A5TJ<$H$9{gY+y^i>1#ze73 zA<@8sv5=nza^PwBkam+qIZpdPHt?tR@EgFSx)H5@$4SF%`Ert3QGgA{qRP#v>O1KL zDfR>aw9?ytwFh*TP-gC*2gt(DbzMOE(B@P6)Jt>~NNleKahO(o<*xLb_D2Bw^#!a5 zKQGXO0E^`aIH%E>$w_=e59;z>g3ab!Nj%Y?vZ{v+W6Cw~2jS({VWY~_o&MQ8i-#*# z3U-az!X*~U-!2AJMlzw;0y_YpSFfY@Y^WG8P;6w!XuSfmH0~t_);a*8|4s&hLQ#qWLKqO&$cyZMtB3$au38yjTWCt_~n`N5lA0 zxS0DYMswsci~*2LUlXP6!`cy`q&26jZ*olxVxnwjvv1mLI-kyH0^!aR7uTYikOuK0 zv~MSXs;*p<_-)>RkCFCZ2$`j)nia5MkQuqBAXPHxzZePA~WQ z-j$sAW(F}L->}FCCf~V^g6M(Q%qT##1mIWsk3k%|C*oveq4dMr{_Q{%QaaSPw*`Cq z7XTdhUJ)9nXygxRPpINVmjce`Xozz7Y#k46`pzWPsm&?YZw9D{-F5>k7k`}0yj}yK zAr(iq_|rL&^V40TpR!!-fIQ;&1m6au5=^M=7=#{Z7X*#wsNes5Su-^pD461wEW!I_ zKKgGRi)l&PUsTU;NIUgwxEo_u*IppRd>c_;DezBnRrtpH%BdMIIdn%H=% zS@!QXkwc~FFM7bK(9Et42hMYP0}6qgb`4mJuZKGwO8_ErC?rQpsN3WfVar!w!7dXn z0Qi&F*GPQ)aH?;kZNEI1vI0rTZa8KDYqukWg213F=$jvvlG&C+C_RC$WKU^pNA@Ic z_n1Y>cjuORQIG>(3$Uf%Lu736$zW6uiKMpPQkFtGx~CPtdYxKGl#k3fgA~JWqy4FB z!J3Sd-uK6qFWs-q7MprKP=Ct4ejU3driyXDvN{nchGC-?iL3TVHk8u1_*RpQ{Y^B} z3`_$%dL#_-_(+9HVb>qtd;a!|v?<_i*$-F%+N+8CL06lpn-fo>5dJ6V_3VsS+Mzm9 z4Q#RKOnL~gd3gSJ=6rY70epgt3wI%C(S)%d;RUe#PUF!FH(>prAn>)N_hUcW6`h;V zv#h;Pn>ctQ0H*UO`2+D{oJehAV)4FXWVS-#-w$$&*6F_iA%-clE42s8Eafw; zeDE?%e`iZm^Hh?|dV<-xKbIiU0C>Y28^o99G%%PLl5m$4Pky)&!vMof$)*_HULnLK z@0{&LjkyQ#1v9iy`{sS!&vu8!Y^6)KH|F6V>>(I~V&-S8^0no<`1+|=zye_#E%<26 zpVYz9;|K&Wx7=!y&MP&1AH2X`U<@EWkO9UHt*q!OwO!Vc#zWbS+6O7G{p<_8HM zW~{dpo8C|GF_mg#@T*)2K!uE%+~|tBX>Wy&g(QV3aj5p4a>Ug6-%(;_AqK1%I9HTJ z6%jc1DSg5PySV-)S&Z%+@>Y+cXxpjM23G<6b9**m4p5ZuVRHoH@jVE*A@MI3;BPo5 zVP7vmm##`vm$vGEDx%V*g3|%Wm9zZkNqlYm`)nrhCMPWXTbag{0qe>vz7PsvB+l=k z96_=`od%RjPDOg!oWD{;g^iGE(+50{Kg^EXLfO8%N)gZV2Ky}hfY!FC%rG3$Z)$=q z@*<$zf4^lSgiC?TfaTgT>&w7VJR+ny+;983A@=vm2MUN=i~J0=3rim(4qo<cr@tzZNJECYl|}v<{uDF@X+;z zUTtG%cyQbXwg#CVPrpVy1w^g;S@)n=@p`>Tde?#UGfv3TO#>(FA#0T1{NG}U_teo& z`dQRth~8HGuIAV^2=XYFjT(DI<_rfUN#=d1ezu-K&k2&83O*hrzxdXD;&)_$w8`ke z0WI7y=0G~Z)Gc{}HGr0cytEV7s*cF@EB_35T48UEvn}xWYG0Lt+dKA$4<{X%!5FZ@N8OYBwE z!JPD=6F}cWOoKE*7;SFaA@#)R_I?!MgX2k&Bsyq6QRdBPI*u~j$cBj8VNB0 zp+AtwfOL-YI{}|G%VYB$7|=tV+k;x?KdT^fv|kmN1ZmllK^tkq59KkXV5?>fEK&lv zIH5L@#Jae2!=}XJZOOSu#@HE-RS)1MJzL*6o;uH&hnDj9Fp(q$)Zmna*9~ z6{3G0T}=@(PgCS4ICqurrtLEodpUh$I+PN1PoCLJ`2i6aP)20*8uSd-?>3)i$FKQ@ zD-iL30j=cWG3;)mPaRLm>X9OlAnaXZCVO@(!qLnb;j?>0KlLcG1r zF+Hr{r3F3fj69*c42n8}2nU65HTyUT_Tb)f`ZR?ce{?0h*2Z@>`;I%!KJ2Zp zLTn;l&HXxg;HKqMf(*?d=B7CV_3I)UWU-(n$@(i*TWVxvRL{V%VopxCIkN8^{+*=; zH(;hYEVNBb-Gs#oXm)Vo@%4&n$NJOG(pz}loK7C~91h=7qfyr+hbgtg3;@b$SNQ7v z`rSsRZkAOR-oaXWv`(d$Bs22;u$VgB#Y4*6D`Kx6I$`d;2M(T+j>9qM7zwpR#joW* z*>&(ugKnFf{J`3Um&J3zI86?N2v_^({6~UruA~ve(1rq!>gK|jW8d#fXMIaGn`9Z@ zdgDl6@hdwafRMUXUwaQgm^pIlYdp<|csl3eC|t*E(dI(?TS5_fldzykNMapd0C7F| zp?*ah!3kj1F$|Bkzh08PFXbc~HuBg&pLvnw(fwDXNFOuFN)b2L2vqL0wqd}k&&$U1@+_iwTmEP~Ik7;V zaD6%`q|!xxVOv#gD{_!h6@Kr!Ey1vqt$!UiQe`P}Si6{hcK-?bSXo}hq|dMj-oSeM z2SQJm*16b!m!o)FZB~Vpj5L(VfrzqlbbI)+@KV9?#rH4=ryG_SR^PTsUYsA%A@I22-8e~s~3#4J$6tZ{>5Y(tqvXv2b0Dne-uRcwbs z&|(#;=-A?ca+&~xogORoE(Gp#z=UCu0`S=8<{|_J2y8Bw=4QOU>@ckgg45FHE|1|+ zD$LxNnfs4XR?QC#c{t6@JTb7?nd7q)v`Hm(%tQ!Ip7Mmlt#IAVPOA#YRtI)CqL-_- zZb=?=;qq~qFEzo-RVBzQ?`U3^E@mk-nLa2sgvaGO#BZ(a!D$pTU^+@zipG&(nW8Zx z9M4~Ap=X~gxdkAHrMIgoLLIrfciONk%|zMWgzuqou}FDZPUQSi_?Y<2uffU8eJ)Co zu!s@=u-?X!v4r0w@Ovhiph}`C-CqjY8eE?4ppHr(_QRC)mk7G587Ly z$eM*-=mkRheTPjTlWCcRr^%G(WPE2SD$o3q#;6qCbj0-yEUCnyYcITW3GvgHsV=g%KT{W8T&3FEyM{tuh7Ob(!qyza(h`p?ljw;g( zZhB^LJ9X?lv{dTGXJ#dtbXFdrrYM0slYBhZiC~(M(D^T|2e==(+?-EtI`A;tRdT0OjP~dL>NUGcMvG>6ZXnN{zix%I z;`h}ggY#I@E0T59Tjx4pnBRH2(#0`9nBkJO#+(p8RN6Dj!;0jhjM21vOUO4)uLbul z)=YfxVz%|*$R=(tmu%hW)0RzcBfJ_#Y(LizKF7$u@&ayPiUfkbKQ0~ccsO7B5mE6$ zbyX+2i_#uN8gPB*Z0pSw)$I2cPic)y!m;+76l}&(1*D zb{V?@?!XgX+9UdJt>Ol5Q!ZyTMMr-O^P~Y=+j2z#3sR z0~Wrp?ZJ@E>~zq*Oc(~Bf6H-~-Ly0F=|35L&oCe5h-1A8D>^Hfh>Cyx!Tz_MPWtU; zDeY$(aOOzd4AY3a^ep{~7&O|Nb}mC8V8F!I|HSi{^St|JdiW$(^7zA7m^QmD-ej;M zch(dZ1}3C4!aqQ{RZRhW>qdb^15qtj%)0@M74}*2UN)W@o{!}Db-u@?>%oQQm&5Rx}<_UHJs@D*npqS z_}x_RgLtV*Z-lT>Ahb#CX)FuZOse**NS5%fhUKOkk0=16!$vVP|DY0~_f2bbF#Hkm z?x58>?WcbP-O=V{_^`xCgU(mqST9{LTv4Nar+zB~EOFv}g}7V6m@F9!z89{rie z^Bwiq6r(&>COt^{;RSl0dBUIvr#L|m0PnH!b6Xo_?x6g=!kF^%6tLyN{XS^!}PQjPCO! zZXnZK@_@ht{AY^FG!zYdJa=jaAE|!<94zgSNZ2)7!>MJ^`lx?1=EY0DIJ&r%9B46m zA7#}81-~ia4F1L7vBZ1`r2LM~au?D)?Q4MVyIzUHpe0OVC2vSZcya1Z-4aWN1+qf` zjKP=gJM}#dPbW)owrBP>q16q_nU>KxhtiJ2r4RIH`f$3P!2kO*cv2~}PQK|EudBV9 z0p(j8Ikl1pu4Ys#VUGL^)RtIg69Wm$H zcJ+hO@}jzxg2vmbePugE$epeY;5c<7(-eMaV=$U*0ue()LRT|+NIlaM4aO62)$7Y5 z$fE}eb!WW}Y@aWqxW8nvR;;w$KE4D_vPYkdl-(h^)Q7JDtMhSksoNkHnZ_^RU-vu& zk9(r|K_XfmB=rS}13o)HGJRuL`VISrQ_#J;z(OWxn@;-)GLwzmKob5Ju;t7MHD_9G zauY}+<{)`N{GF7U(>8kG%Ca9&qFg`regaJ7KG}1mk@I(vsSv(K@%|(w$V1Y{56n$$y-0{DDL{)cd8MAEM@kVk*UradkIoRyv?1(ms`3uZ&4Ci&n+9 zL7>}0_n^dq))qkdvM~J2ZR+!N>&Oo^j{N7H$NfSW%ny#!p zWM=yV7?+90N!&KzbG>j%gD6U)GvwvXNUfcZR-c5_1~b|>o!pOrwha`N=LIWNaLe&C zxWNQ!2QUl22F$`HzUf(yA?K>`7HM9L^ps{7_?E3lK7B-jBhmCPchIf|Dt(8BeI%NU zu}6#Sx?<1(Fv-*JPCO%4URXY3Lf&cEdnC&PMOo?rkH83;Tt&h-Sl|ory?Vw!6$5Fr z7J^Ijc$nZ+aK;8OMWnvP{eayw#F!tE+U2A-!2&?;UPd96c z2Aa?sQ?&tpf~;~Nq~UKOWrkM#3cO8=Bf-p>BvWANYA^!ob6(Hx)-p1%71FxA2%}$D z5#I!c;O~heK#qODV17Kq;JJn#!EF0~?7e4HQ`_GytblZ+L+HIIC;_BM??vfKmtF(` z5fuo%g`)HhDjuA_ z*IsL7tvTm!PM;?uFCD>jYT7@FFDn9bn$)O&q9Ne4PyEjW1?05ba&8^XKootLvggpL zHstQ})WJ7MP5`F3oVUsWU2rkEg4{)<-;&4ArG3LcLP zr1t?a^jCKSb(KaiM^})i4h!|qdCBeBjTZTWypiMZD7JOlX|AnvxK?ngRh-6 zQwbL|sNUTMOilL+JwPD-FqO(gpKMO)4_YvfI44Dh2H`Q@WK%63Dz;J%qe zq?K6gDYdfF3g5)9wE29Bz$O}@$>q!E)1{y?9vjf^7<-0-R320Hk#KqB2LU7B;Smm; z0zzki&y2(*X!Odub_54tNXYi+28s>AcT~WPLu27htB_h*>U2j6P6I?U;OOkVoz+~# z1vD0O1@H3S*n~3u{4e8?j0Z(Z!y^WFjgcf_Y5RAS`Kh_HQup`|esMO0`#=UPL=rb@ zEwm7^j6?|q1be(Z^i6uu6cesD7r#s1?E5@P>LKeRPi_e$X(pYvrYiQ=0pOCioD!29 zrlmI&$^g-;}D==5v4|1BH_8VLB3_+!fJ`Gj#hspd#}KsHeBL78Y*U&KoOYQ@yP?qJ5S^m@}h*rc-fdQ+r+Xv zi`V9m%ooX6fWeBQ&21OOs5!Y82wZa2lwj}HIIFy6BJra!PKF7wWu+Tch4m0dJ(R_^ z;xePuxume8xW<=UyAr7{`I%ZipVk!r);%*{BN>vi6B+aK+8ZgQ`G9_}C-IX2{Ztyj zjC^xkc}$j_b{%=n;4=J4F*klril?A{j*V)%%?R*MSa?$PN^X_ z6yucK>caJB;%M>B5P`P z_yNYxXrK2YT?0ULeUiy&*QNPD`iLN&*@$JAh8rG zmF-6sg{c|xW)p^LfD*R~L=LKV#Kws#NvBn*bs75uh@O$GpXQhGE+0%YeAF#Bz{{U_ zbI$ORrh@)LhF~0A4t-6R%l~ua?o~#6B3m+j(Q-plBu4DjOza0~Hnn}E zGX@!)5&sz{#E)!I1e9L=iqXiiBO|3vlc=vkkfQ2^d3X7b8x1Be!PX$8QU5WZwnnM$ zPK&T;1>rAd851FDRU}3M(#4`9)i1gfQ6T)W5u*LD1-O ze!A)KC^rgY#ZT_VxGaz`C0NZ!DKWCtfqS?8p+t)Z7egH$B!w8SrBl*Buz8V5`&Pbn z0wWK~|IamtPX<`T=W#sQSd(OnR`W8q95ri!oRN3jY#Ctzr=-e;RQwj~%p5R@xIYQc z?o_yC5T31l&fU>`)H#D5x-n1~ihS?BZMk2d@GU3;>ECaY827S(;@n8JP0+(6)dDf- zL==;(X8!yB2fuW}`B7(c|11F11~K$SuIF@8UC-jEM6S+Y#pQ_TXirJ6Rnr#;qid*{ z3$tC7>ZM3S_|O`~-lT4@a9BWP$uDN8Z+bG>(gwvaFX7$}2>cldlfEjWUx*hJ$t4Y) zR-NVQjpyTq)?10+P2TPOBO_g%W(<57MD-c;aOZw&nMf$nBKd{czFLQsUBPDyeYP>A zrgMhHRq~6o#RMrCXvUx2=I_KPdlGN*e;WCr4+t73hq^Rf44ht*oO&Wu$%wT`5KT9v zM4QUtoEwfSF&YZtPvrQ1<=35?cDdUj<>XPQ9qR>2f$BbxOnIHq`+WFhOL~XCtfz2I z?mXC%+M-^k3>lRb2SqpP%6&z!y@oKI$|C+um|Jy>3^%I6aVqh}>)W~rA-1moR_d90 zAY<>T!P{RZfvLTQ>6M}xdV?hJnP396O=^X!?eHDS*0ddQGdZUvQNs`g@U^u{y(YOn z0BPw>Hy;F}#!$KAf0HIP*Bw?lw>joBS$Dckv-Ym(2k1_SRU{A|?hCC8uVMGZGScbd zydQWCkr=@4>XE~}M(FMeDp4p;S`&5mFsOe^64&XqqG}OxBOWRf~Ot*9h`%=m3luS#iNVXhhg~dvb(*rjH z=jtYkzSpsqA~o!#{T?@iuKgmcAPJN-2q~@_qj;J7f{whQX|80t+(9MAoTjk^%4M~^ zyCW2Q?F1vfZ)3VNrf}CTx`cu+As;!Ud6u<)^C^m&qXD18n!&JZ~3Szn*vhO}JRkfObx zQ`NI)2ZU>#W`FOLipYF?_>csGJiNZVc+ND|Db{zy@<#QRn;tUmYN7v4*gClc!*$21 z33iCa)Hz!_>*<9S7o&VPgh?y=B zuhTv_@20Pox79tKnk2}(!CqN6IBG;G5zEeUFt`LDlPGoLlWn6qKvD$lb3xuyJWp`U zx5AJ$wu|*4RxbwA)ak~?DL(Si>_CAc$@SpacQbFBFNZ#5|1rIHh)cRb)b-CBg_inp zSDCG8G@qB>N@e9FkY;>j*G%=k|0)Q@{;w9GwomG+b0xVoe_`XrN#qE1Pl;g#78Un{ zzI|6l;IhPk5qTxb^@4s_)LmD$*ZMxjJwaNR@MZe@Y(i|`SKVa#Hv~{+CK|hEMSqLH9yWuu>7=%Yb|*nH>Nih3G;mn690A#mGG|G zuDm#QyTH`sDFEQU0_xrK?@UcDW-;I*<8CB2a&x9%NBh0^VJPXH{wg^&Yi`={2%gNI1WURF5b9vs=FkSo!+d^>RqXGq1 zsN7<6Uqrqf_$1fee$%DrcQlrR`C+`#%Dg!lh%LJR%i8|uoqiATgJF`GSf?8GOKhTJ z|6 zi5Hm?yTxA?p{2c0tpFTHF*)Ek+GGT174a;%z}Y0x8ZQ3#{dfm<0;!_Zn3O~_%zM48)3Y! zK41!FM#&YYd(Y{653i4ooTB$VOy3)fU%Vz5(AtYDG~=|kF9l<53t`Gl#b!jA+qb+? zqvFKKcA&_l=n2Y>PqB%Oty9t>o=FV*|mP@CAy`-Gyty-eB`r zw>)Y}XAP{OYUQ-xyOLVshH~dhH>Rr`EQX@|;zcNZ&q|NM;6R{SZOoR+r(0&CBz*`@ zCBXv8S)YX0ARV_vTAokpwH_DFd37krt67q>)V4Fy`S%`l+Q4z*Xx&;JOQ}n;&ZZhs zNQ98a$<=P7sGKTAv|!)W6EdPXPG_*#xW%y*8zSyoQRx&&s!^1?7+T%nDYVVH*(Ffv zSD0OV@#7_r!+2M5REUdmKf4zSkA=_EcqW!)zFbjCq_fz2vTd24y6EZP|Xf;0pLB zokx!3ayTPOxsOZI`s-Xhn5I)}-p^FNXI(qHGu}-pQ%8k)P+StWFQ1z9bW3+yTlSs$ zUAk#5Kh+)v3+oSn0`^vY2M^N!?J{4r z-euwM^qw3yD{eb-8*1?D-;f5U(vjDp=ac@j@&7>K%5NYjyi}*~B2<%HSu%u*aaHEx zrvLpW<7+tsO8G-Kz&h8zvS+Ly+ci&wV`?Nvn*Xo@^n?H0Y%bxJYptb)Cj*ib#NSLW zyIHG?HNCt2*`KJvPx(Q>sIx%d%IG#`AqqN>m`MLiR+Yic21LC`h zNnoP50tA-SCsw$j*u5cp5aVWok@30Mwua2&Bk6SM=C4z`=VfCV2mrZ6@ff=(m=ZZb0YsjJ{TS}ye6z6?}SZK_xKo+-XDK(INtkuJDNh6Fc1pl{mrvbp4}8sx6MIuQaiOphbZ2S z-$s=~r@YNGN&x{n^1A%Z($RKh#Jiv$`JIXPj(`Hn_U+d@GVk|*#&|oE1`SsJmvEl z48{ilVy=^7*>wcwqaC-wjc(h=18L}|kOCc;6N$qFz{#;8&){kF=JQF?M(1OJDamKZ ziFgfQAY0S59RQ7;o%}D9AE?&12gU(fKPdN1ngU6>BNIf6C;O0;6IeVZ`!6gm)NSQh z@dZGh>sT#1J1~G=+k)t)xJ`JzdbiD!5`wy>hSi)p8P~U)Pb5_~LdTr&@#OMKw~%=+ zxE#W^vwCggoiv)@E`-;Q*wu0YXy?>UuH0i|9TrZ zY5!inUlwRVZp59MTR@x&n1YW~ZWLfUCTN%&Vb z`;(Fo;C8FjSHDGDXc#g!u4AM($2?M5AY<c#nAcveE!@zXi;&!qYCu zmc+O0DW65oN|a8m1F^t|HzK|cKb0ZAW>*&e5h52OUcyUK1t@y2jiHuZU z$5-^OcssodB{7i;drX#BqcLpAgXnY&BstrR;~&5~hOraHDL7{#`Es+8{gn?oqpBgl z-X^F!?K%0~jT9D3a^gbi@!EG|&z0t32H@9tlpeI}=!v2R5FcA%yyF6bJ3n!FOoI+V%w$TDd`c~5M}EDeHwC@6$Z-R zW74spI8y%G|F-$byz;aXGRIw&6T4X&`8iib&!aKh734bx> zqgD0egjl7oeWaGfN!Bh2Bwxz-@KdZ3J@++fW8>pvHpy3T_fdwpgzDQkmKZu;s^!k< z@O_^^P+Uce==ScXA)<_lvi489^Tr9sO*{vD<^QO8SS{qYm1*j=CCo#jFCqqYMyXkQ zSRJgAT=qz1wQhLNYc`IIIwpheqs4R!KZYumV&ECqIi6{q6wogBrv>mi$)|mrm_9kT z;;=>*>{z>tm2bwQDhupx>6Z!jMyFtK5DFG}LX2fMrWknm_3n#U_92xm`o93Ra^Q#& zt@2iy@!l^m-`@eq7+b_j$w(bOeg<10#e3JPD8ba&;b)*0(+c>8H#+_W`6mC^CptV6 z)wf&KqW*`TFNtcGzo+Kxo8qL;0_P^4Ry#l8YkdiNJ<66p>s`)Kf?SeR{B}8JUOPg) zs}OjP-v&F1?NGu+sT}fSHzH!%Sx^@8L*&AOSd!$lB;sf`rY!ElIxp_bVSYnR%UR+h zRhOcfg@}=OZyLI7%3eBqtZy-)W+iQ_xY3wjRFr4n<}Y&NCW>FrFk%(nPB7WSPT?mR zYYwGhi77?Te&*tDt_N+p0~N%xa5QrL>Yj^P!#Kzj{%9;d81D;e$mGDj!BFrKnl(yoSbZFzwlgbxnps>gq+>_Z(OkA}Pr5h;B6p5AXZmimOa&%6+3ztqZd75;C@#Xafl*4CbBYB3A;xfXO zU))lfsg`t~G_2>fL}kSc8L~`x^LWpj`8JXIo^ndSN1ZDEP{VZ^*A&E?iL``0qqOYd zamRdXN$|w5=HICryT3`!*Of8CJq}PR%tc`6S0BaCZ?Z8y8+&zW0OE3a7&inpY(CU? zpGg`r8NA!#7V>;eFomMmX{R=vnKJdfGAJ}GK?*0a;&jaBI62-Tp3@b(B+KMT56ESn z*C)#CX4crGJBL&9c0fw=U*1C5YH;IptRg~=K!w9v5$yzFRymGW(=pylJ(0YU(42l< zcG@dx6O`n&eb^+wnzHWD@Sn%V+em1kRDN)viu(mB>1StZ^}8cbJUHDmV~g%JvQZnVvMN;Ba<*Y){}v!iZ@v25hsl(?vl2(r~D4psNmM7%|jypQcDe`4yiOoVAi)cQfAMFanEvqcuqaPw;n62v>A2ULFfnb8!+?-2W*seS3DY%dw0QAigF zTkg{l&=4Ps%dY5iueG+Oso>He?peC+rJ3==zMsYJL3H-PrAi zp$_D^LJugNCjS;wQ36lmVe_mPk9R;hvtHBd6f^TOJ|2@1E2~BxCxt$p+S?E6*^mzP z<8`A$9rMl36_k9gEa$blEQe6lv2&*J0V4?O5w#H$r!s!@O6$T)aa7E513^kt-1zH4 z907xkyctcWHZG&2$KAGm04R*PmWo2{U3Tu4L9D3-Vgofg+b{K~HhjI5@KAa$-}GuT z`Hq`e^oBGHY1wxOD7W8nh(?PG++x*LZwWVqWqxrsv)H?hzwO)YR-7Kke|>#1RL<%{ z_xPy1m&kTE`@NuXv!75Ewi*NQBwh@EE`~R+>Ug~(YA*COVaJ%?lw>YZkT`(_&FmR} zp#^6mMc*-FU1coXTykl=m_AMJQpOb<_Z7yQJixyJ+x zg2y~Ugo74X5ojx4SS3e|3e!$yRWlZjQJNR+6=0qgC{GnsmRLMG>5FQw8XO0kfd2iq zrdOgLs&3Q~=U>ZTc$PY@Wn|_WgS0H1esvGP5IqcKLxmS@AJ*$`IB72lJp(lz$*X2j#YOiar2ocK-(|%O((1e3Ez(xA~l-ZcLn z{|p?12b@V+C41hJwA+n`Ybc57M~;psqL-tF-kUIEdV4gS6W}uMl%;(PuQp-C-Y||4 zQCMn3#(E`YAX&NT+h@T(_|X!*Oy&7v1AxX~GQN^yI&}fh{N^#m+l%D+W8<&hhoDi1 zHWeEWYadH(XlT%6JSho6XD<0Nvez*#C;xgQeV!jZl+lm$!OHWWqhHphPun=`ZSXBO z)*s*1ZxUWSs5uxaRBu21z@-bB|NG6gGADInbqLA+CJc`13V~5#3c$R9x9k1=EO8g4 zwWmrc;zP!73}v|wM+d{3xZ{bG&<>G0SeujeWv~ZFLc=J(wON;D zx%N1F@zeO*g)uYKdB@WOqr_tq`)JE^Txy) zTW9k&9!4qx5n0%7@v1}&fzMEG`h1GXiQn(a1!pROm^6tsNMdrVqp* zRw-?m==(kKjacGVU(UYW+T80yfbo*Tcd;w&^4VM#CF6f^*Rjm(XTVoPJ}cuo`|{c- zb-uvlX1p@aKxxxf>`mI%fSuVvA0rYkc}s^?SQQ&raNBgSmZ zG`Le0mP9|y@MO{=ald?(zW5!|TWnd5WV`_>Q7Tzq_oP3xF-U~Tt6#g0@CzSTu`kK^ z$%dH76Rlav*=VZs-e=)b)gF+*4oH1vFe49K@(GifEJ`IOd&cD~Q8LucdWjB#1J!Or z9LYV_#%GM+J31N#a3BFRCOOhttAJ_f=9jl)*Hu236w~f}(${Q1^pU=i6mhoK%H8^S zHv(m-fllKZ*mWXE+J1Z=OHk;&<-Y7ro}cv9CS5YQKbzh+Cus1EW@ObT;L!VVdEFK_ z-{B8SgL%0An&YkC15OKz+TsI3f5?{Y^rX^Qq|5kpQT zz>eZ(C*cug`$M8}g8V>%b;MjOW zwAKEcav)YKJFW08?{%=fNY)H`@NFW>m2o_r@)% z0L1egx}D77PaD^^LVIlDcnXzeV_qI5IW6^n%cz$~KeP4ePO3!V3z{q{b}~q*{FE;0 z{pfHkrCnP$fTZ|FUOfLMqXXxayuERX&t1b;+iLsH@Nfx@hpiK477f&9&BgtF=XnrG z=L_TU-QMzv*!dW?1qfiH5@HFNkbw~)?5qT|8b65xj&9Q4d4;cCfnmvoDCsAiy=UR0 z03_>I{l( z4%gkXL|@+WLH8rhd8eY^Q%Wpd31Y)$ec4l3R(wUZ{L~w}(pWj%g3#R{l3sfk*UcnM zMrW*F=$;2(gY1oY?j|9$Ir%IWS*uTV?Y~LvV10GS#$h2_*cNPziNWre4juN^`Q~q6 z_uq9P=GnCW@CiD#X`L#bNYV`X%7CxJP>KnfX0XiWw zn>55V#VhIt;6dyaMpkTNiM?$vY=ynJeww~Zzcu4^iJMYi5J!P91g-s_BK3t3q8u%v zRHO3x&@cy6V0Xk%#B%FuK*ybZ@+iozDC#x&r7wkmUK@=8l?If~LyP=yQF1FO*54S# z*+^oB%wg(>4_`Cz)6rGQd=R|yVCMmoDAc~P=zl1J=9mAf#@5?!j1a|? zX1q=yKj1lib$Osjq!97U`21g`%WL6(wEf1nAZ@??%f!~Fe2$*zS9B9nNkCm5Q+gY` zv7MwVu2@@R4~vxZ+?gA^NZ&Pc-n|N0EUfN8}hCHVzFi%v3G6p`l#yB z?(-%f`J4iWq*W-SZgQYv{{gGr{dC)i!|kayGM<&+Kt&og7Z(b^c$Yu`E4>2k&DtDy;Aq*BxJ7;;XL4q-oPze z2?#guK+^cP09TctcJk&(-)>pA!rynH_aKLsl_?zHx6%+`qx3eGmGlp=6_H*@np46g zHhaHmoTHR;?MnMR9i3kHkEq=Q;|0KciW6TNJD>)?ouMzR@3sR(-NE=kKuTOe92Sw^ zeng-QL)I<)s|D!T0ZE0+KuIhM*^V?pqEmpjeK^0H^XBK`=M50}<_`tDMi?6a31G`p zeM~up(wqgTfqkZA5jO)49Y+POGyB26MYFGv>^(k#fz+ zQ0?4CfWT`gRSm-7Uwx1Gk$-a%!oZI}lAa!D9U*!OqDkJo`@#NpcP{Z|P18SSH!_5f zOl5n|w4;BFfm$yBppt8cbjvyI(aSAB)C2RR1o+uopY;e`#mw6miDDv8+WY*U_G$xU3j5+op%6M?(oZ)Yo(BR$ zr$D6#L=q*nE7Ifi^O%UDH{bImEOXy#{MYpcid|DlN~?Oky9;`H66iG+0p_AF*THlR zVGQic2zID*m}Ma5;LakIaPmg7v;Y7-pL^B>L1V$KFY!qMffoHbSgbh(h@EATL=ew8 zHV|&n->DM_ES+I-CoPmq1k-cQe^Jye_Unh^L*GDPIRnN#i00eob7C}rx1EZuY}xBs zt?9iO?$bf9%Zay|0i+4XJNd11EG$q>i%aq_7_V!|9wRsD+#9fG6&nH2@1K`OW7~<* zOa+JvPx!sX7rF02iNF}gV&9T4DB)-^fXzW7)uLE5u-BPP5J*|Dun%&33k*qU0v$ef zw!Q<!U?7Ve^V#9C!kBO0s|_bxXAYZuAy>N#mEg{5KS%T6+Hg`0f>y zcc`3+Fm$w9yhDwDfB+9fX-4UA5GS)vixZmv1`K8VpopKJ7{zEmEeDsHZ-9N=iFm89 z5nf^}bwt3#9>)K=_Onhuuf(_qqJ@eK$vM`)!jTt}gspt)zZ|Tyync)mSY&hB-&`yG zuz56RJl#3@_(2JxZ@l?o(!HkAt`OChA7FpCxz4!P4kqiWri5+9#(5i+J?eDwx{Bm^ zkU}^W!dY>cIX%qsqkCUPtQ`%)WT6c_(TO@|O7tIQE{oaTovym|F&!L9%j2@t5l<;_ zD$4m8wM$<9IQY`TP@QLqb*XYTa7Nra03_jdTo+h5S5*n%|IM_LE)6X6->R@@?}2)N z?bbdgkA?Z~2%vX5l!57>7I{k^?gldi&3a-+F&;msVLG8G$+_J-m5p-`i7zC0l%Qbx zO1brXP#~n-sdI{Cq@Im;6s{tilEOgQ<8&kh@g87ycj3<;%Cfm-nKESho5VrZXZCtF zV~$kPBrqKXjEsRd{X^Hn2kM4ljqYt{SnmT8G3DmqIiL)Gn#%iY)@aOwc)ojvzJH6E zgSD71S?xU_o0^}#1H=(yrl4c1021*L#Bg4El2$T3Nw&e0g9qPnJdV1T>8uj0&MrHU z+wIZnh#$tc=!lB$Mlf^fa1LW*t_OI zQC(Fy_7CL34^k5t56{yMHG8p;Z%i28JU)pR`q?%&sLh-0VGd4$l8u(av*IfE;DlO(tXc9*b!|Ht-EU*>MVrciov|V{(^+b}p19oPRB4?r zU}0TuJ_Q>Ng-c?rB%&mxfl~UjmVZ}dYB;fTF1=Q%z0V1pJG2oOBHzg@|kNZqIx%J#VCC4{N=KP*< zZmCk=A}JO6wU9-qkvW$o?M<8%?EWcm!beWG%yHBb^zF}tn1-vAa!J@qM_35K${}8k z02Z&ELlT!sw7Pq%3nyRMA4WXKZ;5uZQ3?8=Kn9*AcL2l_h7vt=Rh&+kM<bE!NF?~rs%B?x$+Lr=0gV-k-Tep2W1XeTMFp0l!I`f6zgj>+PPcTA~taHfU`8%j`KBe!t zj;3M)vRNV`q?%lUfJ1xMYDRRzkABYn0&GD|s*tBm!pUJ8*A2dJRI>cyf z<@K*42{rYWpU=HgMdEz&q*Pe!fpVR!(uwo7M;yP7jOP>D%iq%QNY-fLjgQpIIA>Um zE}Qr(3euYXjQB$lCMDd(Z(PB1-UjrftnLN#3R* zcmwD)kyH~}#*YT8{OCdgR>Pf|X5Z;94C#8`kN}6-3ms?IZzu@>9ZtU*{YHz)#qkl` z^Fvq)m9Wk)?~15A*P(8uBJwK20+R5smn(%?$$86nyn01mVsrXtO`J-5M1lq;2pjON zQ0oj5Q*Q95@uiQkAMoKr6!Qd^UAV+45%P#oBVJ!;XCGCt-9NGp4kB2F6Or^A@Qnm% zzn$Z*AMBzZoEc}fSNht{gLi~ApdLKb+(3oue1G-kYc($?x8tHG&co~pq6*NFUCw^8A$vjQD z8JeZfGIXn-{h4@$gN>r(=EsO6uMm|&{&7B)6>CHr%8-s8p*!>j&z%rP@{}SIbyTo* z1%Quv2UXP;-?no`0+Xl;t@rMPS{<%TF|+sHaV3iK*WP$vPf+p1B>LMSsThQF8jJ?f zjFSUp_d#?J$7UStz$Qu(E^4HpG)`mhHT61-bU*1O_Z;pH7ACwZhLAN4EN{C4=>g)tq| zR92uTH|S%rZ7hzx?&teh1{4qkQOj&TX(bz=@Z!r85}6e8nfGG6UqC?Q(`(tzO1gzz z4INbw8==oXl>%xut7YyWBOkr&QerELIQkRGDnu(Xa+cxYLp`-d>BNukU);%20)l0C z)Z;V?8sCuc@!+~fhdECfiPsg1SAq*hAIWo+TOT&r;j;sY$Cbp-UA1Tuq#)bPXhjCh zwQlNj;8%D1eZ!@1@aNY_7JN36-_G zw3+Yo2_K(9S(Au3@~YNO#Zq38AId@CE&ZTTkbMovwxY`~G|w=8_vI3vA;?T?aAhI{Du{#$uJ(hYC@ zglEahp?M zBr?5BR__$#I;p8uxr^D)7F`na;!{b;;4e0`mhQ3sAj0P9#3F{4E)jJ=yb=&hec-wqzl%|4f#?y9Pv&Yw&Z^7Q! z)c|5cQ>dG~fkdzm!6Bv*Mm|rWc{&rx>P)lI?T^TwjS0;5PpcDSB=e)ZwXC|eUII5< z`g8?>SGV;jj*Tn7Jt<+$+WuZgDT75PFW%JR*yz1(x(W7DOf7XUsJZz{3CeluBTtJ(^~nL7XX>E zvhogjDqRmdViIf3pGke)w!Ns~_u$B>*mGqS$7!zoK2D)S?>WGHYdDR|*>HBgWFV#1 zjL|-D+mCpOv}DUMC$Fqh*|J5ldWB+C@_uYc{eXS)bb8K0+UIJD#)VU0(C~r@!^{9_=bWv6dbeUe8S#dPAUO z^h*0f5tC;fQqzDpPmr@P0#!*O0_Q?FKf|9`EhKeP__7y16nDzr4#`N*`A)BvW_otW z0fbcMTsmb`;wEhO8!BV3y4~%5Yohy>hgO1;g&ouDN zDyrwCVg7CfB+=vp{L@+^*GniQz&?>xPuLLCmv@?WM>{dr5>Z)FB5l3xq`~`YCFe^T z%Kacd@DdX&A-V+RmGcF@^?7tpMrx-cpYGkuFQ*04_A=Te%Oe6c5ZP|lI*74pOF<_! zt9Wa=)dISZb5P=6-1iaS;PqDAiD#fTMP^OJI`bm%gLT7Qj_#MDV+mTHuyPVhl~;5h zx4Qi{RNvYUiz)^6a0d1z-`LM23xaODO!2v+@T2u)j}ZX!5^64Tz2L$#$HC$_qUNSr z8QWB3{kn?TXq{N(2h-(=)isHz%!X>yrMzCr6ACw+aJ(H0f=yhn3Tvqcz{pBskX9g9 zetyt<;q7E=>0n>NI*?DQoF>(^#&^``gr!xR|CIp?JmOYx{^V zZS!AN@Bh-y@EUA6nVI487UZA4Pqeasuvjk*=Z)DMb2WZzog@(r|;B!)z z20=HJ^=&_3yzYeHnFO8xc7w++pd`c(tk?5K-gU7{ta;^gcBduWOiRnw7tVK$bQnJrB}3B{!#LpW0+AxXfI(%W zxS)?H^RPeV&J;W6D@Gsp#PN7)wmI&Th(}kCGG4t#9v{riImP{0KN>F_s8GJkQ<&Pt zOHbrh7b5ABB-!0&Zvgu$++a%~(Q6X-DK4!hYzox^#veJ_PNboDKR zm!#ESyQ8gVGr@1NYJEpDa_oKe$;PMW*TP-HoX+ReSJ!OalZSvX_Q#II^8(Zq!V<TXf&sAr_UWO4u8(yAI?F1LfQQa&VQf&WiW$al3H@7+y2eQdBuwJZ7GnYeXGs3cuI-B|$j6ad~{9o3Z_8+fd zQ%KZ>nlAn`m2cLzYA{sTsSIYpSf^M%o)t0eF1~1r=ck<_zhW?C*zL=qaQa&-k1@JT z#7vUvBMMqJjT=5W`26jVN9l2+O~6s6*{@H5L5!ID@)Cw9LZcZdXmg@WASnj5dNIwM zzj6h&N>3mFGeWAb{(+w&zlXi1QTFmdZdL9eH#rM~H{Gx3(N==4EqrhsH#VYt{8I_5Ky&z>#RxNB=mK3iCObV|F=+L}c=&%^|N!RstStAjJ( zPyW4>vV(z_2VZL)*8A=o8t?R>EsIoc4q#f=mX?gC)&f8H`%bZE#dX3=Nswj6?`b*u zey!ZVegX=UYdY;|)hdTu!>pWkdxrNjGy1+9=IekY>vfK~dnQHkmv^_95aet~#)jB= zqmt~QfpbE&w^S#Wfxt2OfbLp}w$!4Uf>S}@d)2QduNcz9!A@VRvru__HustJwp{*C z+;1a!JQAj^M|7HA*V-Scq3~#>Ndm9kG+*NlZAikh^72}5XP9)_)9Lvi8NTvgRF0I8 zO%8J*GgxYUt8#Hu5Ixkus1-+65nE7CxV2d*!ol^kspt*g64H*aA03$C_sv$r+)kMA zdZzR~&AjTzKv6r(w}CCTp^AUP+O@ji_-rW-oz;P_Q@;0XGt`1e5OU{CI^I4GY%0;j z%lz;dNF=b=P842@awe~%m%U8%92TkK@sng|Pgs~r&!xgLdG#PpcJz^&jit_)@nD7f z%R0>_L=0A1k*7$HE}f<(ViV$`td!J!Tqp9I(dI*Vm_hEZv`CU4iIEtu794gOnHSt| zqxL$av_AZ9zA=$!s(Ot8Q}^a&o!_?>8Cz3vML6)^#YGcD^5yX?yO$p74{%b#=Z$k) z4ak8hPW04cSvvXsm0FkA(j69hsatufM6b*wUZu@@KbMR!c1{(9&ybkW5l1|tNXa-G z`!(-*0p_{Tj~6KqdWMDIj6{@``HnlKG$dv+)+0eQ>IR>L{c)f1 z3gL}K-(O(%Oqxx_>2v*-qmU1zc{xW%6XsezbXh4mGsnz7(jlZ`uAd0B^1}?t~3zs9>Q~SyiwNldcC*EMPYg#+-C&r`;!!uiSZrLSi{G%7ZR%KPE`+cjOG2O zzamK#c+(zsYy|E-A7$r3VZ1`!?f+ixo`K`{o@_s7ugxF*7C8m;el=duAi5&(6m)xg zm_787y-&c*%creM6#*@9e86CWxbP{ecH(5P!Sf<9S9?!}T~v@D7~Gwv)y5~U3_gm) zl*=ecWI;C?#Z4iK4n}$n|9w*!m`01zJ1Kwv&_E6UrLRl+WC%D?(1Xhtx7CzGuTg^O za^9mV>^js8AvzO8HoQmWIdr`jx@!3PEnFY>Rp=qLk`qx%^lP3>iy!HrwcNTLDkqOm z%?o_g$O0NHFyMN@poL5HZw&vwX$!QebYN2P3UmoI>C>^6(gFj2Z$(H8Tpmf<`v!VV zGy&Yn)3)EH(DThwQ^T8MME8@R>*a8tj{Q2sAsr%q@@xda0(0r_5X;jApBu!IdtIi4D8}f=fP`F zdBGKSUa!1#XypLw@cQUTi zTH{SF-S}4vFbP(d{(h5D`jgl4vcPmOi?)|InUrB*WN&Ma1f0wp$0ac6uj#JQoczX{ z8Ho{N%zF8^aR*@D^dG~7Cqf)pc-FIZIt*z9%0(`VdT==l@I{A5aI9)X6s@VV6Rtbx!Z6q;^S+uLhr1&q7#MXO##i@FY zpxA;nWo@`eaNqtHdv6&Rb=SR*3W5^SAV`AT8Yk5)wl= zlp>OnA~Jx|0#edv5BKf;+|Td#KQGRU^Xi;;^6@+0*n91@*Iw7PuGLnL{r7B4fLAsK z9ox`5^DRsqHX)^ASZ}$0MI))EIx=Qm*lCIw;6^1=;XtVtT%%-R41O@iycu};R~{Lr z-W}GW7m*YTWz^HuYzSi0Qy#OozPmp(j;;POE5=|`;POm$a?tyj45p{-6_N&z@BLxz zde}y0Fn8YjapXN-PP0t{=zayBJ3LSBUk+?PFeNVA%0M0Tk3NQ9Sq;}0=m{x;m_*(Q zrFgnq;i|A0jwsNPleIB)D#XiYAYM5hDLPu{vV7xfYdJ956!LFop$%XefcDLurMefvr?7d zCc)no^V(l~?Bh;Wo6$^EKCC;XpJ7Lv5o7Re^k%&jK0tW6Xl1gmtRo@=#=)q}L|;CA z03Uo6A~y8wFYB7Xx+dRNYW4n@_s^(GpciKs#}w$`2~0mdT?VoykNtq_mLK|0@S`7L zV7WpSkU^J!cgkUvYx4=K=HL`j{`Ev6Fr<<)4a$H0oFKUEa87)V3ibW(kSd)WjD;^X z!O7hhGBdf$=!J@w-IQQIGx6Tak-cEMoO<+w`kA!8a!8)}1nzpBKP}0A5}jGT1&rl; z=Z>e=OJtca{Qa|dXi+8e{D2Q!w-aATvX@($9Aq!4%Pk!T4 zu1dQ$R7Px~!6JQTs|{1n^(FDCEG8gSxm6j>gKa>qX&)oK16cyq+bBonJLJB^p?Ki)`rO#5o6 zV!I#fyL}gKEUC%T1DcUhze|GzLt*5Cl|-yMQsW@YFPAPg>_M~hAy*Lv5$$3&QL?Il zQn3Wu0SnS)rDQO^oL^u=cX3(2m9}>?A1TNo*NQejvg-fTyFUSGQP_pIa{gohz-riO0 zOXmjG0J#jKzISoxS)Q_^U??8{^L`dCL_hUbZSKu?`gc++xlmI$ZGV{?`ZKy?nMR2# zC`k7!`tRNdGX!FbXZ0aily!F%-o7F6%m4EZq*O}JR?;vlB^JSEApCzR7QE@D<51>s{6=nA@Fa>XIkF+60U>T(5w=W&ET!>o*bZ2)^IF!&%h$#fAI zuRm&ei5hO+F&B4%_ZFYQ{Ldh*l3Cmh*k2oWze5$_*jX%nEpSd7HO~H3)Wv@|rfNQ8 z5u@%hyhnck`GdTlCAGadYOIGCCNp4f+E7kj6l^tWCvG4wjez`b!>%RFYNQ1m==1YTr&4-Z}>{J6@` zG*>{Ns)j;=@6sDwIu1c584SWx(a`i;s)DzkN4b`7XHcO6ggY11ZrcA!@~FA>rnPYB zFnwD5a>b(E0)J#@-bz~Xs%oDBh(>KIdw-T`SJx)`2LChC>He5F{N+00wNi%<8cIXJ z!y(XzZB_8^cl%$4eMGaV7E6ubu6#0_du-9`o+3^)Yb;5rR zk`7qqBYgO-18;pn80{a>yG9qo`*5hovIki^?EB`QCmX%NA~xp{ct=^k-Fr${fOHJ% zdt8~LKj~Tffgb;kg z^c;d8)dIJ88rw4^=`Q)7T_1fn9)Ha0lZ+uI1K6kKrh5nd`=)V`5}=3grT+v|tJ>*p zzALo{jP>x0}8Wj zmIm2>CQgeSqi517!UP2|Ie+K_)PCE#0R8S!A$LqF``_;A(sWp~s-m5%o~_PAAst%~ zeeIcv|2@6vL!SQ5qZ!nm8dxln6V~6#T8Kv>X4Z?lnEyPCF_;O9+G&Vha;r!&8``|#iMNB z4x>EYeKi!owo%(hk7^zYbb%ez<$g7hj)2@40{YN4BWKI6eD+1rWY17r$@PaQ(1Fks zwyJ7Q&FfvJO7Q)#$~BOi7N~HdCf>GtFE)ozzZ2@%4&w_&zcva~uiuCjzrKl#KPx3n6a<~Q2Xr?=%WSX5>#jnkkwRM zZ=6%a^i=JHn2*s>hfR9<+J24OH?3S>(DHi3v|IQ{fl-m;hToDs4&0}0-3*061d}QK z=9m-K*(BkvmV3Iy9uWiG%O|NrR-EC7?Q=e_`1&tdG6tI8OOc#9uRf7~_uhCE=cS;FCHFBY}|NrT$K7F~U>W=+GJcQv^p zWugP>-PKx=DaZT-iIe)d$=mrrS{qR6&Z;tckAikHztI+`&~AaZ>&o5=Sgvm2*A@?9 zhLS-L}-lZ67$uq*m>U%s!-CLomkuM+yuvOrkW=>61}lFU+%cL z$55HI&OgV;%#T$fvi6_zT@HJT_q|K1>0GuSc8uTkplh{UkzVUiFg$>7~ih|=#E5;xRwBFQv zJ_?WcsO5JL-`vk)I%orV80GVb$o|Z^y<)1K8KANfK{{P;uWQucps%)n3X1aS!)8QC z_`1pML!TafG)Ich?Zm(4^orzTHhJ6nbu<6>45)Xu%2gqQ7Cx2QwAYNOL;-4{xR+g( z(Cy$N)jY2D_f7XheZ5pc4(85Kl(Fig#aE9y=Yh`(?!od1rPLS)vLH95g_UjMl4XkBM+OWdU$XCbWiZ_uIJMkUR!r@o9#GgfQJEObZqoJT---2OY%u9iw@!D0!f;J2LxM( zVAcdiuXglF{1CXh`OMZfn@n3e=R3Fgk;)YK`h^*e<9l|o3T?TW!mjb_4Hi53lx=q0 zt*VpfOYG87Rx5ahP50G;7op9I88g2ZC_AWr`c1a>a!i)$$>tCR_wImMk}_R4o{U~5 zds#s*-26fPro4i0iPX^t&0fzB{m*XO#~=*%=)|iNJWWpM_M||Qi=2_Uh-uxgsiRGj z8H0IvIFdmMVv6L$TaF9$MAzD}*b`?Ahem zE;>8ux6$2aX|p}LZ*4OEa7$Ih<->EIJ^dk^V6_d1@qS(_W0KWgMcnf5Jivkvw8U~Z zR>qassOh>H%Ge_YhH@ji52ODb(NiaAL28X`c8b@`nRS z>Y8`ic5j5#-fJV`kn>oUQe6|yg|N6JOmu;Tu(vB}d5_3F7jL=~uT=`jWEAkTM2CN^ zds2BEU$~S1x=-q$R@;pod*`D_aV{v#IZyf}+gpTj!we7A`dmh= zsI6IcpWg=oGW0`~ypiMrNkZhha8f5yxm!R2v=%@L<{k~mh+z}%0WTZp2I@1y*3Cdg z1918K_xnklQKz0i$#S+sdl zjr*u^hA)+=6lM?8C6qA}J2_DDtkI<;b!T!8ZcXKV;D#SL-lt1yI=Up~b;>SldK+Zj za;G)QheT#CxiwNiaqjDUSUc=Q^=kKFAO^jw$Jik4w3zFjnP#06~Qj_IG+kqWHU z+^Db>rtbtyp*W0qeV$1wy#O1$wl7#Tj7niKBG&#eEkFAiU!t&9B_PVE=s9{|3H>CK zKrDVc7D@ zp)cqcq{y^)>`-6-tOL2PGDjwG+xgyprvI<4Rl1_&QyR{aTrl+uFm{t_sVgH!p?pKV ztxMp@e3~Fr?qydS=TZg6^3G7Lab0?^QIo_&OqMW1kNikICbw&JA{KJBNn_dFGy2sY zFIT*DKj(cYb74V9olFn$M&lH=7u}tUOeVi|YtMDx*7DP6;7`xo+Azbxb~}-EIr7Nk zLau;A!YZt`eNQ4C&vzZXH;oEuRV9fy~d}%yBHR_u}B^&wNuXbZ*MmFu*k`I-=ptne?MzGJM$Li3Vcg^`r%=1%z_qQsj%w^R+m2< zu^Hd3)WTe@n+v;-E1H;?6^Kx9ej}~|Bl*CrbFKAQn^LWCF3|v&a)MObqA*BH4l5Yv zq`NR~o7&)K2X9s~m0V~Yo7?cUJ~^y0F_!2jH==X&a;^0!6r5guz`(*E;h%U{WUYhJ zw~Y!61d&oKXI7*)Uas5cNt^F@d^*%5<&okux7+wBz9=ES%(!NVC_A2CYwNJzG=Pk% z?q!rxI5LRBE|m3Ik3YfURT|W`ND?)f!z%Un!13T*5j`X(A?P7_c*nq&q>?&0Y#^DH zUk`R0BV;$#lNHs|T!v>=?qJlGv&S>~*2OdPiMC3$Lkwptiug3q%Q$e^gG^>y(MD|w=>A%i^0N^_?I|F^zUppf-ta;t`><3wKINKg1ycvpx%EHYQ5jxSViOufJ&}}M%4NLM0ZDb7TkFI=d?{|7;5yDQJV|hV)oQTmG zW-OSot#I|%&qE9pJJgC6pFg$`tx>Mm7~w0fv1wQ7&09?g7QWZ<{$-iIU4HWllh}tA zuKq=`?>3Um-;WajZ>TBNAuDo{@kD)nBfKc`M=b6)jDsKLiocC^MN4f0;*|!P;mb-E z&19uFzvQLIWpck|a1S%|e^Lck;|zC6nx%eCc~iwg&ppl|7;D0Rsd|?xJn2;+9`Ag; zet}6KJ{&D}SG#dG6m(Gcni*GHDcabu2a+*hpR%2QLs*R?*<7;R5s^eyeN&4{@yY=w zqI^A63KTL57n2CpE-Zs(yS_|(5JJfO7A@fu%c^}hyF>Tkq2*y!@iZaVd=d*@1mIFa z$yF?~>DOPcS*v}HmtLgz zD+PftdU08BV}i$IzMtF8IlKW>-#opV(&OOtoOFr%4^l*It4K8X6EX1!Yb-s^N+}6T z#l{1RsD<>yDvI?7L$;ZnowZ)=>y5v#GmoYs)wj~*7@fGLe?OEDo5(0c&L+&RAJi@h zYrVfgh9A;fDOT-`ij|S8wi0(d`tF*wXPmxRoz`@+&IO{i^85X~i80*FUZ#?~h+IotB~UI}zTS3&w*D<1dJ3)0?Mt6H z*+3bC<*~qSPb-$?*B=OaKfiM)Gw7f9*cPM>U z3etz)x_3Z78R?$|sRsl3-D1dR`2^$=mPC-5b-@LBX^yPmntTf^drzmrta1|_cT(B# zboiFC(8m4%C%g2@Oe3Tq7hiGq}X#0Yl7~lTLiXj_-=y8yb=Y8 z?*6@*d0RboRYJPD6{qI|vM1MR-DJGpKLv>dMO)#hI*<%_VwwBeJ&aPDN@r&(G|dWQ zFXjM7NUOrtn0TyjRwCFhcT5hQO~VT<9I9=PEPoa$aQvy=o}69wMq$CZQ(3f`)u1m> zUxV?;=XPs;L&o@%+FJ`+7-^>jm);~1)48ySmyt8osYa|S(VSR;hPZXbj=z0P0I6{M zu@#0pWUUiQvNc|62Eprr)l`$TF4K-ul6Z_CvgltgYO&^$(KX8i!7EB3gWvI*XbroN z3Q%|znj#^QEGI6;O+v>K?qiPzl^4S!RfQt+e%vbe4;X2r;aD~*1U|$O^RbKiV+uU@ zhu@FP&Avtsa)nP%NoZrn;KWA5ipboVNif*6-0si*rYyOI^5{7E%Ewy+7q<|ZEL5q@ zB^6x$5c~zmN*6@3+&3`e>)jBi*-gqx)fslX=DyTI3YAc8Gp2boaR^G8SFq~h!}s96 z#85eX;yNb>RWA%Sa!W>s&}8Y-4BDYyA7Yo1+@RyKynm%5vqOZPt@+&ves@MCqZGgpn&W!XzFrZ`nRmwmo^lEYm;5qTI0dT6Z!9o`Py=YBq@=y zD&4zM#T-u-`U2ZQW}~km9nsgkm^uNk6-smjryH7%MgYlN!#K?Q^z7aOdDx`Q>6>by z9)A2}beiiGNO-?0#5Da;6`u20zG(z&EaP^*V&OZ8^MK?kjHhUQ`CZT$+fo-7J@4(D z1AWXfwio4|f+WaL#lZVEGiMJ~4s~Jbmi4n7zIZR%!mv_tgTc&gp#b0eMx_9YIvmOo z9C>B^pyNw))3=#0+qXCa!YNj zH9RkW{k~>%b;FUt_8*ShJ-~=x^N!cwvbGu%l zfcj?jihZ=3ybP)Tq;!=maLM@BUJAqT;1U|swzoF>6~0IP^Pd3g2z{mv{}Bx4Ly&s? zMuC|%>X*&GvZ9Y zEa61i$O@La_TeyIa+4W^ucuM1mAqorOzb}?X%IE05(eKfX~atXHcy1cHcAa2t&l_M zrrdQ7BIy0>?(zSP2L1&#q^K}HdR}GLDg=q}VChc9W4-_D$)p$Kpmt^C-^97p+ra=d$L6AW18C=MD$?Xo zGNWhUQCEik>naGw7(pD&+iL#j*Ku40n#8}p8$Aa`y;y3(r{CtOcO2|jI6n=?p^P#2 z?*8Y?gCA)!7SXH;nwS ze@an3%v6UsSuRy?Is6ZLBI~>u{DQC*SrK+*qR@_gkMCda#gQaqyzIU5I^tTRCD*RBZ3xq_R(diO^1zkuUp8Bz@UQJ=-$xax~FG8oRt zHjtOIFWj$7o$YVqBd69lEtRFbKy2~5Ine!b)fG-?Fvtr zA->&JUS+=Q{0f?!M&`fF1MN6Nje1JPSkHQH5RoktCGXs`+Rkh!ehKeGO39j)ZZ zH2l8G8FBM;Eyvy1MQ05Qfpol)`6lI*s^(f&#&43(t+*w5NnjRmNVjO zT~W^{{x~PNJGbhG1dNI&mI`3&xF{MDJ|rEezP3-i+8$2XyK*(!`n2noDDYiyXQ{fs}HK-BsN|Cxbw0*te?peAm_F{-7~SRdBFpdoGg?{}$V%DWgP@(DAP zHciv7DE|0=bHQQKn20)Lpq?|#5dE@8sQ-GE|7Q;*{jUMJd05K?*cArSk2E=F8@6xltwtbI$0784dE2|W z7ZM1NZpQx4T-eC>lM*ZbJ91 zMcHfm8#ld|=OyhhE0|;NJqCDaAZ+CU19gWp8fETl{ys2B_qfiUXJ&hS!4jx3eRczS zIH03}2$KbzepgZE8@xk+^JNv1d#qRm@kdALsjKrBa^~ytLi9ABL68RIYe4Q&|bl!=ol58x38dT`xqQ9>Ch<7Hc+_| z0br>suJ}gHgMf23lzw0u@lNC#8ctSc6WPrLOd%CAGPO>DT7EkZmmk}BThH3fpI+1A zLc2iqmkx}tV9^v%^OKp$e79o&<s+pP^+UIA z<+fGoFQ7z(BO)^d>lM5vFAkqx zP_o=(0KJcp#p;<0WtDY|enL*4M$Lij)ryw$R1K3@a@JE2F@|4)LjVEuHZ8fwA+}p# ze#V73`s7Loq?9~<_Zzm8PL`s1652KMP4RU?T%vA;hmr@i332O_zgr#q8aWsAA7$$F zuk@)xeaX&ftlyw%JgLIs%CYVJ0fGi$OU0XbzJ8bt^g6XsH6n+-y1$sYL z8`szU4XTVOoH>O*+0(KM*~S?%AY&pw5xBU8xB(&izCE4ujv#(swXAJVxo*(OQ*v0U zk52k$7euZL{r%S`cHD>qyHe4JF`=1fpYZqkF^!nU=gyuliU)qvx^-fpNYk3S^ZV?` zc8<{iwDT&brmxB=oBpPhB5WjBTU$IQ*mfxksE=`H`ONtSZ(SLZRp^uiD14Pu)%j-g z=*w#IpRhjh-Ub`M&p9+0QfKHrg2p8~7-ipR1^dKz zQU9QmFWC;a@{lc#1uAQO;7fv z7y#ynEtgyy1Yt?kE9b0(*?n5MW;8rKbwTGdD^|NC*+UK-7r(L0UPV!m zB)4Ls!V}~VmieOIr$Yi2shMIZbK?(J-Jhph$^v3>Y!YCeVlK?y4hbzrtS~pEAKs1= z?XkH;+Yd+><;>QflQHc8DuU_&awkJAIhx7V+TZRrjY7@51UbzdDg5 z)1vg_VB?d+0;^p$^T9`8P#w%BYH|qJ0 zK$CEcGSH;&4B9{f#nJtleA2$X5x{=bC9BQAra|R1=OagJ4-YU|zrH$*2thnmi(NuQ zsBe`?Z-oOVE=+s9LFZp?Tp_11Yg#}1n%o!*{Z8bH5lU-WE;77``h3GyI!3UtP(^@6 z^Gn%tszKU891WxBIn!TBX|ZMck^;L!$LdN;!W!zHSnxYs`rPX2?eaZ+eT@tpl%KP-Y2qL~M zzg;8%hx=6G@#V__(X6ue*7{&O+0q(?L`(?c#2(Ye6!nrepq(;=jV1#KFTCup+M}~n zB0nlOY+FM*2HSP!MAnQgvyvXA4htn?S4MiHtzdz|KL z;;siP>$A67zF>6O{j{Iqg2Q8bQABfp)8wyE;_=_11Rs4?=`fmb(3HwaCs(TU(sGl8 z@ao_PMkXOO7^;vFmY&Earkha6l;pN@^gU;yu<*^)K)E6J%Oa`U?4;LgK>lf>jKu8q zGn}Ej%xA%p4+1PN5~9QMA2c<D|FHP0;G%o6NFEk=jJI+p%J%NbTM&MX9R52Wq<;a#jY_ zrp+tbI{wY;-r+?bgyPJG!{WS$7SYM(CQFZq?fD3SSnDNnyPCDgf>NH%3c8lCx# zWG+0OE8#F9vBcQLicD~i(8zU;44W|p%Te3e(#wjQX+gTPXnMmKBv=W9QUOoVh{>@R`^yWKVi z7DTVFd88c^wPmNc0UtqbPV)Psnk{8NqKKkip*Hh#1Ow)A(3xA&_`joo`t1eOQO|Li zW*}E_VUah1K1T|FA5%ritNJ_D0!r@-NB2amLne6iv)h-jO3GarPNw?82kSlP;;B!B z1X%pD*G+Am>Q+o(-@PKq*|KQ5mBc_v4DH8{$us(Q?!91+7a0v#Qih@r0$Coh{C6foVBN#;a=U4&f}VUO%lA7vc!*O?RpIyMwUm&W9?<7(3kO(U9nI zudWmYKfod6!-M8?c%#N2pGMkjSG#?ay^lHf;a&AFjSHdjZT#ohwV%Y5%td=CpdW6A zM{R&LFyWG9pyVZeSb8Wd?*~euuaE^l{;R}!MSTSsh@Tw1hO;-8V zDv%H}eA;q!-sq}9)sM-g&@({fNN`kAT2)4J7tsqWYOD?C5jgnf)N8zU{;>BD;B9h` z=SXf2q}R<=`V~f7fualZdt}+PoH`wA*f`~r38YYUdz1PHzApL9S(!Sq3KYCl?4S7d z>&gOr*OozfX$3)w=T_$KopQmxU`1L+or=|P&bSmiJbB$~)3%vA)@@Wzdi~y6j*}#Z zWaqR}Z?~4%x^TYUg3z{$3=;)w93}sdfmN*NkJ|u zt*)Za(m9*F?`_W@_XhiSbz-VdjLEE(Hc3q5w+bPoW4alkpLvp-hNJuSIEK|hIogOS zOZ5>YIf(p2CCYN#nm#)Y`1f2IrjZnQA( zc@6jaC_x!VdcHDe968eq9X^nnIR4Gllo9>@Gu@#Bji{!SD(dBN-;4osPvMRM3tVlK zpQ)Rgz55Q5n@+B`H0!8<;moe$vk7y&X_pbx=km!H6Yq_c*tCI#9N&a4I#o-l!S*4d zFRgWR1hde`hzad~?LX1vP!TQi%BD}wE@YHFlas1?uM1xpj~*zAT0Ty(PT$TKW9rD` zgmv$16EC8C>k%Z5KjQwFxVdY=>S=MLp#z-jrtV(3@ux(ttjZXhY;yaY zfVwABmJx1%XxzI=a;?g@&^g}*9E6ChWXYq480SPr3vt3kVZva)-oJwwZ2>f_q>$c5 zxT{S(oo_tS@urYgxp@^x8#U?>u##yeBDpD=EZBUsMxM(8`jMn#`nF2P`}LwU-Awy4 z-1TrB6%u(f%XYpAsK^6Q!Oy*H#~xfQ#L)3yQ*yFyTv5-)RUZ-g{nSQs zOA2cIt^rIWaxH<{_FhM=a$V!a`!C-DewtEp;i=HV^Lxrp$(Uq|g&J|xi|(Uzw#^BY zTg04h`_b-&XGwnjM&DL7e4-P?-b;On{YFvk7M3n%Ofl-68tLCZYuHy{%x^V(JYcyv z!dh4i*c7aU{)6LhoAVfO1Sgt**#gGILE0_}RL@5qu|rtQXa^Mz2Li-W5uXw$r9?GamRoMb*!wuZ<+rXWt!^Dn+fZ>E=Ua zSW9FS$9yJoeKbMdz`wYx=ieJm;r7DoicH$N$(xQOM#S-5Y0*yeR~GeNxF|)f>fL;` zp3EX=|M=X(w(K)x6|xZ#ay5-B0`+WU4$bnOybr=NG`%3_9lfB4BJFsCG5KI8lNE-9 zy^K@gfbk7N9e+1BfCK+F(#vgC_8RB-VFoLTPCzS>T8Cnhr1rNa-2GWhkn&~Y>i7~Z z(U-o`7GWyPi#XQ+>!yDF>NghFMG6S24)Qolgj;-6hTdBus>iv`y?kw&a&Ir0j{A>9 zrg22ib0diqy(M7qS520!*CsR)-za!X69)DAglu6DO6K1_a{`-=iAGTR_5w}UT+rSb zI9TD2Ln@+}?1!i3&=jq(ty{=Y*PA1T@vMmQg;ZC3k{Q<2S63{}=)dKzy)F54Sni4n ztI3DQGL7hQX(jddAJRUqG+VT#wV|yk^Es=@3ZR@*ICNe)@gCAcg{3WzD0Qa6Nk)dr zQU%4K45LtDNEz28BQZ`1x;>XWUrzlI=W>w}{>3M1-fkL=YnT@3avp1w>Yl_O7QH9n zG?b9Jl;ooHaAR$TXCho6vwy&$g|P3AsHv%HxxQq^gL|=Og|KV=)nwzy9rWR10n?Y3 zN9erLB~>h5ehCN8Dw6T|OU0pc#v_K31kR*x2&eI9uRy>SUbQl(&r%dAF89X4Nat6G z^q5#LIxJL%jg+jxic}oR*613M;Z>EAOLh+M&`84u4@P1*jw|4^Tl-ksX;(-2^0n6rJ>sf-2 zIq9A@7pX^$?7c7ev3`?dGA+R#V`)#GNShCyf7~cNKO8K20|7a9-@miLFFWHO?hys9 zJQ#WXfTyV6bmJp(U@K4OhfLSQcfRjd`;J(CrQnHu<_<*7Z**LoyEoFahpvzdR(K~q zyV@y6I`MFrvYI7Yo+0vmt&p=8HJ!$-$(jq|x*3tDBC4}QvQuoMFK<|L;=t=4Cp^co zZ_`gc69%hMF>CAqmpzMEUW|u|tefXiz3v|K*9v8hqTMO(Xm>Wh7c;Ca&)uq0Ke;-< zu~67`Dg6*T>ox5I(VxGIcFHvJk|TwZxrE$=;GboPCrJ6w`WKefh>4r`|s{^ftQVcr-HNTGDjpnlyP(gUEroc>FP z&o!Y&MsEXXgAbNZ7?m)ykSpIZxZ{RI?Rel0UymktoQuL_r!j}T;*vkd52^J zoCn%v@ODq$gD1qP|L-T1f6sa)`V~+WZgSZs_J?p-Z>H6iUtq^gF^?xhe9%&$rZkGi z#E#QzoO)!0acQa+)i@Sgg{RszA>;}P({-$Qrf3cGere58k)6|JsdKKIbMQG!_&8hLG(D{_Asb8 zM6tApmj<+5DS5yc81nlIZtf~S!y)f+&?4za>5j`_jE3_MS}`t56M~1K#TgrRMm}Zn z&M)vHozSI~HS^iHdmkO;;r;yQw3&n@q*JQDf;Z~1dKqOJWbtaX&XUg7OIcR{yy;}v z>8BH2wgD7<$J&~GJ~HQRrG=G^D;K@dhds(rExVQ!rHC5G=J*0Y&PxE+!UbpLWM7#4 z@`4$?|F2x2lpn+LhYRq}n52sZqtmIVheLh_zX3Kt3q2l93s*H;)2StL;ynS0dPz`! zMT86LuPE5pCT*p5U)2De{7`g}0t(35AILf1NsB=znL|&$U7TMgu3?}=;$|bdg@Ipv zF*cX}2y~zn$?)fFO|&a63E1NU-tXRS^wU zHLp^G>o?75Py$u9)K7AGH`_k#f*isHA)nt!4CdAcbkz|@uOur!A2?PW(?tC%Iw;s) z!%T5jdx=(7K!Q%lq(GoNx90Ju47bD>Q`Wf);f|wE_v`ax>|&9sD2RR+{qAG48Gjm- zyw6x`6Ohi2D3RQ>$KP5=PgqzbT|vj`%Sx2gJpTeN@Zh{M)4iW@OhLuDK*Nh$2f7-f zx6#f3|M2H1Ns)FKK-wL`zb+_%`+{gXkRj@R$%<&LhAyj9K300j-^_57Ij?Q@~cKw&$SIguHncB(38rE7qIJ%L3d^ zXP2ekmUf^k29TiXceDP})f*be;N>@Axjyxas8h}r;j zC)!Z1-LPf!W&S5xAWL;wWMTxEzFJ2Brvtr(EhFskw|s~Ekw9>p0SyUk$J$D;#PGKIW4f?=}T!&kvU zl|JwMf=%^*_})eV2qq5o&AnW|wK7!aRiJ^$cQM3#0cvF`cD?~x&}+~ZKWsEhmG{vu4qV;dya;di2KiZY2*+mNe|+2}_+ z;r?H_X+ckQ?O69=whN8=sN6Py^O{FO$HNTAeQm=p(A9iRYx?k(?G5`j{4*B1Bs)A3 z%q)v6TV`3Dx=T0>;F`P4Od(eosqbf^y-&gKJXdt?+FA}^wthMri=gYxP8iNrD}vhl z-C+e>cu`6EEYG{HMZPB*Qa4BQ>N%E3)4W{e=nqRYIrHV7x(}@K$3l+s}+7 z2^5>5yg}(K89J|t?~H%$`V5x_1ukK?{++&0ek-0%WW8N3`L1uh!beAdw=pc5()OKR)x(2A%8aQlg8&!XrREW34Ng zi5SWv8A9p^a((9GV8&AV*-Y{}^btXT0Pg%W>IWL~prO`AHnX1*yK7U#n#m(Z&rl{5 zZL1G;kGaa8E#mRXap#vg#K<)ePss73zaau$)P!$z0g6GoBkx$J%iTD8UFTqE_Bh3WJNnG56t7R_X*(gVG$AvE=1=p229|~vc8iQuUuB9 z5i@-E!XRxz<911_{kJmNAvtp+?)^7uZ9GD`c>Fa#;vf$z`WyBdmdG^Ot{<2b@D)=6=QPWw2|8ZLA z7b?D=_kko=4o2W%kZduJDn%%EU%O5>(j}JTz-0+}#DBe)74&1!y^YkvsP)r~man1T z5PIAEiFxE+rFmh?j%guHl8d3Fk|Yb>8^A;9d+Oq`2C|*9iFw64gA`CvBwX3(^K=~P zu5+ki+X6GlZ$?@&XORC)H$=Vc&9-xKNx8EuxkHOve;Y)4siwQ$=rA1eMM0wAzpO8r zMJj4rg@muaT{rd_hmOaTvh32gQWeG8>l0GrlXyjXqF5t&Deo+w&EUN9Gb!W`0;&Vi zw^7P;g?z2xC}41s1tB1HtU}jAgrXPb&Jo9vhP*_2A@dFA<4l$So$i=?y4g%`MfJsO zZjjlGX;9W{N9+~xwNXSr#N%tyC3vCN9%Hs)C9t4`!l!x>ud+jqms>Naff~2zIXA_; zK)-(O?f)I9a$#lA?pPwZ&4e9L(vL>A0B(E3bWz`|FQhad2Me2BNtyL~JPU4w;E;r1 z0I0$ngtm?;ypMgZUKqOOv3Ez<*~Q3TZ{)!Z#!j}V;UZg`xN(qoEbGLUzvVFrE3#r6 z-?`N-7-cn4P0%8ZjKE=g>ha3^){e^A$p_7#H>KVF)M=JyG>^A9Hz{PLc#gX>?(~0U z2>OkxombTrb4lihjJt8=%DCRW2I36?i#T70OZy1FM6uE5)l2)}^ca=b?A<`fxDLpK5V2KO@FYxySxo79x5O2tXeWRJ_RRP~Y-SBf4j7 z>|)s;f3))f(SS?&iK#&PwUPa1OK&`MIte-0wz_qFWT1)o+o93A)X5 zM#>mdfx=LS5V-_mo0L$+EGF30Vny5wWq;RNc8d7@I2W&iu=)%%GMY+cTSdU>zkw^X zrgWm1s1e0K4UVactK(;x06lf&At$#VGoNv+OE7GiL_^|DY12U`o0Pd^Op0Xz%zCsy z^vjc2SkDB3q^KqyQ*ibF@};+tgZDl}G4x&^PpF~aAetLd%RsL5+BFsIYHVd{M$@}tKu-6Jf&WC3uTjl*yp(7g;D@1^{6Y;P2xz~uCJ zPL@4^q3>!MH>@?0;jKX)KJQBFghhmm*f-ImXVvM+V4e`rt|BHudaqKE@2KQsCI0GW zmlCK+ylZb_&%mC4uaJ2`O_wyg^yc7Ckb5KJ9v>Hmy0F;52GPmAooNv?o4|-UTj|>R zE0|P}LWV^kU+u9B3egpYq^3_ zZAUlkk}*kfm?tcYT!l^)-Cgtb%UtzhYz9ha-=9C;aDB-Fs_{xk&qyrmdujpkEF2x4 z2$Dd@`kDv&Rmh7|TlxNXqfR?9UAEgk17OJwpZmf+Q;uWp;A`Zp>K~iZx%HxW8y2F{lU-uuikq)d{*ZH686PFQh7V zX5(ZW4FZ5SBQq2-4HmGTsB$t|9V_4LiQ}}adeq1D>-XWpwm=fF_H79Inh?qKZOIwv~E67oBGqo;&2D$&x z0-1HIu%Aio=)Bw3^yM%6VB$wG?MVNmu3i0|Z|jg~{-TNKuOR@gfIMKK00sO5>m(Fs zTS!mZkP;!ovl^@~GZkokZQg|%%R%w2@+r+oZ8m6xR{(O<(JHXAIC*HY+WMiz^V$3P z)I6ks4}Fw|^1)K24hTPUK}oh-ijZzRkjVFpHwP;F5Wg_SUpRZp2AI((90mN}M*Mdq z{?#Ug7Z59~MzP0ri;Y{pR5;Xim2u>Rbo1L>m^SMx;qFQ|UP@Ki!_dcnOmRmP%8eIK z05S*~u3}nU7jgkux84xQ8Ho56zd0Ly2VY9TiYcn~+Q3Q~|5)i4)XQT1Aj^1D9Mn!; zEswrLvSY3A!V(-6#ftvS1-tMBnAais&yBkteo@GR_7%Ho>3`s6AB&dl*oM}f+?$_gPd9ucOrilR7(`-Trs~Uc-yIti z`;-jP)?(1T{@$11>kSleXbo@$-}B(9fy6SshJK!_G=Hu8S{y>!rH5s?`_miO;2pvF z*%7%v<1ObStS|t0$*R7Ma1!t%`Jdy;{%2fS@ep{GSkm-Vh_2g0EWs4cpABmNYXcYn z>H(lZabv77DrnQmN4ePDb4F)S&)D(#eQ$>EwSM z$!u&w++FrD&OwR|v;Vbci9k^JVVUL9K`pf1|LX{;&^`!(8qMcv3#r}zYsda?w%5gz zGQe&21_s*f96MNWHgVWxby1T5eD9&SAxQUZK9#yZKLW`EKg)V%ppH?ENS1kFxOs34 zL$|_xyL_EW<^MD4^@B+TBHDP(3L_s>Fq z%6xbUp{VAj&b)j;G`MTvfo<^B`Qm_Y{dvI8BiTX%Ct!%dtcMHxAui!GWZrT{u6&z) zVcH?^*SGTJGgKXX$~UO@%v%=J`bn|8jN&Lloaltiq*+?@^wq`m0NrNt=Lg!rPprX#Q&}-wFS>j|(9+7< zWSdQsNbfvZWHL><&}0W=i;I#jZA{}Q&%1)*{`cb{S%(ysVsn3-!pmM|OpO6EXV=m$ zjOBv<*xLUoUc#NI%Jx6}N8vaoXF=CzFMn=paM*(4tVKYwWi_$VAAML3>hK}g55-UZ z^TD1^{e7P!)LvAjSo~&9?#KiBxU#V)Fd2`jg77Q7n`scDXte z3||2+={h{Y^N`sgHh=9DO7y$W;3OjhZmB_&7t*bf7OKzUBm%<_jAH|2sCeC1Ovq3n zS^Bx+^}1tUppLjX4T8<~QqFwRe#wAY>jQyD?#I7}$Xr0LR>vDCwUR?f2O|)5w07ce z-50@deZaH*nWL@h?3YBb;1H)?aspxPZoX|u59^|r$2ufn4CUYdh!b0FfxNsv?Cb0; z0+0j^^M>;-#aUdHU_ibO=SL5~7!aM;0n#da2|q~0)i;3)+{_0SprzXwnY5E0;jB{b z2bF`b0SR2N@&5yK@b|nM9a4gUa;H4p8ED15iQ*JduptK-APP2*5u#Nu=-D zI+w{cJ7FH-IPX8RawYYXcaGl8ct9{$=g#Dc6 z35chnVm$qHzfsY1EAq$lp{eAf7Bx4+g!qDwLKpxGu<^RGCWCIerP?3)IaZ_dh?_s1 z_zGnKQEn#NF|a_a8+<=fHXzW_bgg6qoIFhVq8`wr6_M)vN`>rf7IAJcJ3sfBoTK4J zA*dc{!LL)>iKwdsYQV^Vr+KWdcWdFesl{oUGk6#R%!L9OSO`nfijoVl_w4)p>$cX= z_(Op_>j&enEreku^vx$v_z#>Xfw5sIKJ8GM5gX;fi2_+kt^0rF@D^2J9soNL)VK&K zi6&$lo1H^s;rBpQoAeCGefvT{)855qBS=uV3Bo!ZY&)ipu79VliQ%nV_gE(Z^h^?O zNG=!7)jjzxg1Z7p<#vvaMomoHMX`Hdi{(OlilnYmVivnVie*IB39utakh>My$EWI% zrJMs$7g=m}} zdpy=~s5p3gb9eFp#>PXqY_pEylHbjm`Vod+-cW2xeK~vz>KJ6=+j9MykbpKagkovx z1VmgxG1M`RIAx7Z-U-*2xrAMCMk9Tdh7|6o-tMpy&@qN`>mN8d&a!U z7u^U%EObp}l6hNHggy;y2e?pOIB zL@V{?59};S!H1weDmHeGV3Gq!XM1t9fyDLtj2x|kf}Y_W?rEQt7guzz@eZ9Rey_CM8o$y!_Pfv{dKt=J5bhd|lc3}F%H`3XYdH=gB z)T38I&moJaJA$a7EXeg$cZ{+M!R*3iy0@vlUe_9a~;+efwkzGybCaKecSv z;3Q`%I(aO{C)ZqKD?PN zq5bW{l;?5FJ2c&ZMsyjEBao-NNfjf|Uxb@WI_;APRkBF{;5GY+PRP=E92EFau-;d( zO7}PpX_>e9(=Xm96;p>quqCqy79x@dRwr;ew%BK^lLix2 zbnvw-5teJsiq~Vtx7~Eng+bg#Dy^5HKxC%QI^C~CpC#XN?5u5$h8^w z0TCI>DH}@fT_@iUQ^`i_4^F3~GrphBAM4(eI1QB+@MY^h%rtZxp%D$U#h5~(92_B= z5)pg6tD|=FN^6WQ$@*S_tK0ron6_jflbKE8!*LAmUxG^-q9l$rgT z>x7GqAJ);V0=|{(g-;=wnj21;GH>LOmLc`*SP9@*a!6mf?Q{mLT*>{-!Rsxw_B|9EU73yY6}YaaT!^3Z~sX zvrBDK7;5ZV?X>;BZg%1qH?zc+(FC2(6X1%AwzhMVU5k@KEM498mkd1P6c^1|o#zu$ zn{HvTFxxb0D|7vKhpQ`kse3v=zTl*BBl^?Bp}-wtx>1)-EmeM0DW| zr-Hm*N~ZbD8-c}$;5G&kZTV^xd%K66K&e=_M(a$h0}Mc4R?ij#W`QetBTJj0XdWgO=(Xj}jqJNjp@yhHo&z8`jxPE`F9kP*Ee_O8vEQ8ZQ z<9#Du>icnkd%0GZxhQayOmkCtY$4^YXq|}|cmF>9DD}PwVML!_mr3%XPHX+P{EXCI z6R(AQKk(%i4kn1pXzuc!y3g8KVJSh{09U5&(eOwFWPkOy#6vTFu}-GoyG^NzPwDW+!Q^HeBb(|wYJmTt7LtI zx&VR}6iJZYy6MI=>UyNt=pu-xl6|J1J4x$N4O2g0S7PtTj0vuS_`XxwstvTKb+fnJ z7mDn5(Pqy*6UyvGhln%QtA=@>(Z1VNAZHH$e%k_@a9MOFj5Pp34bq^f3#s2W(GtM`A#V9TdRh?je z!lG|dM(<}if@)~kTX#AE6qJvrgJObBD&6Oy=?4xQ5t1iL?_YR+OP@`jMppiaCL(c< zH>WWu8B1iAoYHs1FogI9`YCzNn;}#5K+@o*+BhL~m#Sl2efmL633`NAlngws$Z4QB zht0IoD;`7jyItuRuH8zFxqP>Hti_1z@OkX66Vf8yym#M}+OMig0fm-YoZwNI&v{zw zGQdl^Ifm!lFfPAz2>}&}QM{|@Eu<4~@9v<&y4;9Li`465BI@mT%#9~6z1M}dO@28% zRf%Wv!|4l4HQ2QKwvA!wu45DK;q;`O@&?+j%uDMw+mQp+wuCx$AB`L{-$*qN7ha47 z0zZOdk+12-gaGFAWZqYvi<(4{GDu1>&ar_vvxCfp!wSDnK8GU0d1O13J)-3ql@K8n zO$H_|GRLG2jTKK0R+1_wmShs!?08GEh$JVTYYaJ=;{*h4{1NUjl_Ll=$ro?+%Wzs9 zWm2=rm?2iYt`iTw>_M&b!s)XdB1OWoNyC0-RT6;??Uoc`-sAg=@j;ronK0vXISIol zC!>48Sd__A7VcCk101^~BF!$L>wJyD=Au&S9P*#$8?HfE4aB;A()84!^K`D{qq)Y zJu*$Y)j1<;aMUL{{`p{rbyo9LcT?~yZ zF4U0{XF14-@zxl`nvN4ui&7?Y=#UP3p%a4m4Hsi+_Pxo-+yUm`;XsxU9xuDlpg|+O z4|1_GfrdqE@>#Hq1Ig{0K3F=kn&4^)0KE>gW|$M>0s#X<(wIaKNVT1cPp!J~@;3rF zZ2bo^IiIo{a?R0E!eciztb311fkEPyJr-9?bwfVO{G_F(i7xY0wwG=)IU$ueyB_R+ zZZG@WoY8?XL`eiXMEChDHRg65sHHu9*-9(Oy{GQs5<6kuiNdfjh`O<1B_@n%`gw!4 zw;${>IWB3faRd1x7CyWgr{lIA>MrMdt#YA3VM-d4}n379LTx)(ljm=EB*-cJ|a8HWW==n22r-|gkTyvCtI;{=>dOxHGZLi_DNJYl@ z=yb;opH0!HZlPBEZk8sK(U->DmBdDvNs#{&Zz;CZyB?D1i=o*EcXVD__*i0U)hx%Z z6dyl)5q{q1LNN1sz`&DH5hxR}ZECc9qOq;``lJ(J_zAo<>W>CzeZA=Lm5h~3R&PdL zFDx18+Sx9ViWI!}a9!EWHx^djo=&rG$!(-Xd_bVK*e70XIa3d_(y(rf$)9PPbCiPd zO6p)FF9c~@e)wiYymAf_W^_x63no)w>b_Mv zo2QmaD85M-f4BIV$q<X5O{}E#-*Rt# zIn}PeHhlDI(u84;KSV?2yv5|ohsyToeD62?TMnF9KuGXrOk@>=K_ zrB|@@6T*lzNHNUT?UXUE)%@t!+Abk$+cDGH78QH3i$Cu;l1F`yr{Z-Avj47ldPD@y zy3=02L-<*f$15B~KFmd0+{z#GF>bA;@w5HbIF>q(6ze}nlo9DCY$;qt{$W5nJ!ma( z;5o`vuow%GJpdYl#~pEn_#J(risIs=P0&Zzn%m!fMc;{=dc^8VtaU2J zIPMWqnkupj_Lr7`J8MBGu{rW>)q+$`J{pn7@~&@31QT)LkIrnk25u6cQz4RxzxqGOb2>Pnl3lL;ZVj zrtyRomlI{RprZSv;`BqSn6Hb@QLFjt8Zb4r+1x7U!bZk@PJOk;PKI?Qxfjxch=Dir zG|X<|T%~r!(@*GUaM8Zn?3l+r;n@E^GhDuMl{=MST1_`hrZ-sRdLhkz{MHZZosJ}| zL(hpx?XG)0aC0l909q@g+@?(TUy4u($H%Yr!sRQ%9)*){ev|p=#(!M=( z6Z5SY4-9{5pP?rBoNL^QL;aGmI+p&!WpPlPLHB)FPVK6dZm^(b;G?jRSC8A|oGKrs z0nw#a?}Q8zEw6yP9$DEN454IdyZ$h?V$r37J9~=9+K$xy3w_nyq1#UJi7Z>2WtE$T zuhN-TV;r;*6P(;z*m4+atX^F)uTkhQ?Dr=)CG*Mg<>E@i+>{xfRIc$WZ^!K?qE^Q%Don*7~B+6Smmz1`rHdqDrCI6 z`>KR(g$hdO)#oK{@>DT9e$v=EbM{lnXrb%?O?=|7uTeOBgA^x3dBfSL*d>Ylm*@qP(s%@!Q zFY)ykCpmcv{I$%AJ3|L}8}*CihXMxg>g&hc|t$Bmz&3V@QaP5 zof|K`)R0V<1N40eZZE_dzD0A>DxWNIQ7cqK*Rr%utsCd)U)`>@}*SB;koTd5@Fdpha`t$glQO|5g?L zRq`cKw>^|nj|e-t*jf`svQSl+(^y+j6dftLXERUV)%FvTr|YB;cr2J_M2K&NNMeZq zHP2p?K7Pu=1OFcWPMFs8jBmVku;M0ZEuWCHkb}E@gPm~xm|mrj%M2Et^gSSu5$j<4Pyx{9kP}-}8$5kC7IasURfz`tJ zu0s8*P|D}GkNG@tUlcL8N3~3P>`$0{yhQW(fIoU9u*arKC^Rr!O#So61&-F}QI9?_ z%j@&huZx#d6(*H8I=G~mlNj|-U$3<-VClDrUTzH%y^&s8d=J|)kY|Mdx`;2Kd?f=g zdwc!XMlQ70HQ2ebQP@7$tbzfo?TKD!Nu$(>Tj6@LCrTluiQ*Bv4nF2!+NZ1QFhqu4 zk%}2%U56c@T=FqgzVO3*oJl<`hQNk!F#h%l&YcSRqFln=lCa?vN`3jY;Uv!J$o0Jn zvXuqEhr-rZ;O`t1PJr=o<#vXW(JCYK*9Lbm3-eK?`qskwx*i@X88sQ-$Z@5Zl&Va@ z<>WZ8aSy25XzIa0cXQnU(tIOSnWo(k)^Wu zdgU1{XLwKGaGGs;KHIY84JEv0zM$_x*^lQ>)H8~r^-49`+PY&ijj%Hoxn^yrmbr8i z3LL1G+6Xz$#F(v4Z{4u1YSZ9;t4hj}>+J`$7-!|;lC1TTj@lN!4qzhRy0$ z6h9$$`tDY{LZIPP_&QbEiEF4IFZZ^3mnk>Vy)}x&a3ibiA>RhW&%-*lwVJmJG%;yB zoV(OxE)@rQCLoy)D$m9t7tX}-1s!AzUKcD~wn%4~=zkZv!QQ&3PZ`}q9$)p5yo1iH0vY=J_@(n z_SqgTDsK6mnjAtGSEO{T$7@ZKwCduKQptF=Qk}gnBrkJvd^hF=7Ry&E(YxJSZIf#m z`9^Vjt}Wk>B`n*V^~_nxyjW&fELM4Ir*U@%>9&tLm#|VQ;KbP%jI*cgd&I z^Lz3X)Sn&kM0)N|xU`Yk++a9hdnpxV`QiKP_-vCCsc$fx#G)wTR1;*;!!M74e7$E9TJKrAa$i{X(=%kA%fS%Pm@PCE_RM#&F-Hu$+V!e~DXvvT7VV z#omFhwGrGvhr)7XNiVUOq&=~g7QZob9E?}-k!Si~X6|FOy{TNFaYg)tnS@{h9aPr) zU3_dSpd?Va8ty|atKyc{s!1?`*!nvW4mhVD-%`2?_r5A=8mdy#u9H?m=Qkg$&9vZu zXi3h`tLoI&b0qo}E6IR_tTn-y%AdYjMeDXglKFLXUD&h_8(+u|L)};)$Clp0SH&Cf zFyW4}k4k)|z&IjiYNJl$)X;lu!6Ia1TH4rsY$a-C#p2r6ZRQrSr%E{bYQb0>Ei;7lFO4%q z7`MAiJ2{Tyr%q_j_G$p(k!;+%cAOBLziwo&D*SJb*xojdN5P^&4_M07!b`LY+_vvY z$HKW?8hj&(v!if(Jbp~R(mZo~0q!c6+ZRvuDYzZut=43pD=$24FSjnR!rOeJ?Ot$g zrn$d(bmeomr>fM$JQa~V^n%hxUQB)Deq-XXye}@hN{9l8On~2@I{LwH`;SSTWW}(VDUyywf-(750ov#TtqKMcC>VvkC@>~InO%`IpDPqEi zzKRV)wb&0$Pv2%Bx-7@K*V}lwD zC`-^(`O!*U5NXkzG~KP_Q3CG$K7T3e$CfV_w4y_f1dp+cay-Rg%bzoQ>ckw)DOvdt znHc0dY$xvM&mHgDk~%zOoc>;xt&83qU=Br4Bw{MKyg4Z<>|#mw@!KE0{QfREO%vI> z9E~Tf;`y_I6@}cJ` zNg&inUTX%(eTmq3d?l)z0)dho{>RFW2#poplRsl62|n}Sug`qK3|}9vfBOmUhi5`( z^(~Y_F5yMepRXWCpt=t=#Fz5_R70-NZZx8@ctbS%6QKBe9n_I=jj^peG$u=O-)gs8 z%y{Dq$x<6dh~qD28(8I3x6{OXLz&QNP{(w6QMa`7&8YVYVEk%m8VB5Ngn}4FBgR1? z1Noe*Q;@a`T;do`@-OWXqHijLUX(QL1jw~e=$`|%023GxhrS8rD=R3kQ`eZ? z8p>=mdvL9>o9`Pjgi7`p9gJOAZ{tR6=@MF6eo1ChJgx-62O z&Ib(BG>YRg2@Yvn=Gz^9f8(d7u66s2W4X}=P^|5yeF%KVZL04GfGV)|*4HgxYvNQi zAfN^xdq!^@K>{BAq=#Vxtsvj(Fbzt14vx5Jxom@ilDxY1EqK#OZfhu=EcNaqApYO@ z*6$YaO%*rV&8xB@k6sBH5(v0CM*%k-fd12k)+-(qMu9)u^1$-F6rUVASNz_n2}q0j=kb(7m^JL4&B-1 zt)1Q-dM*=quk~FQAE83k+?t?NT@l1OhZGQiVgslaZPzPK;FGPA7_*L-Fu%fw-J>jVbw#f(SK z{i(%QPz?@5aVg#349HjS>3{%^9Vpi8`|ypPvo7FlTk3De{YY?GF5HdXZa_C`b0H*K z<`|YZA%C%&IFY#_6WdGX&HR}#OP+h;F21n{>C+&~mb;Q?x-VaA6ttJ);cL9k3F*=L;l0;La0IL;2T`1fqnAi-it4R z#v`W<=K$3l(t}#D0X77uyq=Nqbm>SlY=dCh;X|lUI3&K{gQuZuZ@7L%PYN*ynw@0p zIrw?50-fh!-Wnt+j-g(dtVru;$LnMS?Il4P8T!+X95`V=MnM7UCi8p8qjar2JhYHd zLl4NZ*A)LyQ-Wo5^K+a)Dj%Ben=Nf?6GPQs?)6awWMWb@ptL@v!S-gYUdqXB1gRQ+ zg-_&{{C(}ZA#o2S_b-)DccGcNw3 zSoC~bpQvV?uyhi7&XPULFEkpoJ$47%*v^1uVESYQWK}f=c^d2Nkcl)C$|VW#UO&72 zMwdOWdaa7h)w|XRX@iE+!kG9LDc4(8qegEY8+?)P{38@fC6Z(wcb|yeec~v?nLa~a z!&?*)#jSP^%YK9eA<;7q6eEW#G!y`Z1_;`50Du*@Ap0#wc|MR?NCH-#v;ebJM01{}hSbDp=3`#`@W!vz) z^}nQ?!=pUy^QiGdvq;7}B!oM7dS%=Tkz725_=6?O;Sv(((rx^J3z(LcbUk)wZ11KR z>5aRTook~<3hoBbEMeJO1*!R)ZxFP&Zs5b!8Kf`@)6qX!I_q6dbry94staIo|c;lc&U6^lfuz=-Ng*aC~aC_c18Gg!z!2QLa zC#q-zB2wwDnEX80(d>uwpNL_p0>NMd$*hey%Yp#QM}41^8SL~yRe@GIk?0>KkG|$t zPHGe36>Uw4Y1tX_W}&c%#hvQ7s?cyj(5uksPv=m{0FcV8^nu&}6VamgLS6ji&Qm;H zD?t)>uX<@aIWDs__6)ITJ(M1DS_M!#Hjh`&O6e3`(UreJs%8g@Vy03)ma;IS`^{Jj z3d6Hb*j$N(aS!N&TAj4bC6;b+`P!L##E9=#g=lDP1~FWyGH7x2K5)Lnf3Me|S!FY& z9#DKxR7gxHpZfpsA<%z6erM4E>jxv`%r(XT^Qae1iKo*4?(fP_5gYRni>s~In*agH zkI24dfhV~|6qAOB2V?oE)%^urGz+H+=Z=Imn`4FO2^M{nj_c?) zQ9kgG$XSl(DJ0osu$;=>pv=J2xgU%jOBXmKVg*vv{kE~ndt34<+fig7B9#0 z;zIefZy7m%;PCxl_N5>5{`-_Q9SLeUXkSUA_x(q{rBJzQwnb)QXVSmCMF`?j-L}!l z`3T0Y@{l5fRTDZlDQ#TNk$4WtzIYYIqVuB25|L84nuZ?dnFDuL9f-~bAv%%R7H(`I z*V{CvekzEJv8AjD#3R7ccP94@c%=N^L>0vS`FnM3qq?>e-i09Fy$u{6`VVVUke{OA zGVXupFUR)^#g{Iubr5@O4k}s4r6LbBy}?iKe?p?RNVnH8mNlbIAsc-kGdcrJ{qkc< z)4&BvpG^p##)P7{MDC$wT|Q_wwvabYUOcq4*+zx|Ku=*ar%GFifiTu$N;LVSWh^25 z&D2Lo!)6mom79ClS%P52Tmc<#wt-I}N9Rjl^G$(MSov@s_%DR^=^Izkq$D$7*L@T< z%|Y&@D{%J%@(CGaDy824@cY>?5hh4C8KxgjEthR1m;3;pf!iW+q2o=A)Ids1+ywI5 z5mEd%aBhI$yvqA&Uy`quE^)}Q3Pk}?5A;Iop7zh@bW7q1HeW)>dxM-fL1mc-0;gJD zsDbRTVVD+Mq@#NWERN1>ReN+7OGu3Mu%|Px%m2;pIs}7#kG;;ttah4P;v(xN9XWT+)DVM!IzILTJ+uoh8a;8Kqu@nyZ1$>_Uq{OH1v(iR5k3+^Uf-vnmvp+`4xB zD+~o};lpp4`+S~*i(IVKUl%zkSovj{1w!P@w!BZxP72@81Qt6}3iQdmLZYO{OFR<%`@#eLIpn1sQ?+@j z_b1(K^Hepy!af9qPYWpuY&OGeNtg3V6__Fvs9)~NeI-kdpwav)o5%BjXWi~?(KV#+ zbC9ltq`uYBXTb!3*tU;+IGG`T)cF*venrXW!29KH>|OqD>B}gHSj$B#iCt$(y77;q zcS9aW1QSJ*j7|lg<_iE(6Um3elHT^rp`1gSRPN9nE^tGJdtRZfAILD2y`Z=Fu&=P! zefr7_w(${3@|bzc?D*2&t0o~{Fi2yTL7kedk^EX`^<&^e2SzaS$6Y5{^BH&#oa1I0Z)8F-Y#&xS z3`HZ~GBo$B%c5WOf$J2}3002$boH{#ZOQKF*WI>b1~_?QAmFpn*r3hR-Qt9qW_6^? z)NA_!$Q6mc*%VHR&_}UV-ryQF#d)(KQwT&D*8Ub@AjD!Rl@CnQjFu@SIug6P1@3tA zxN%USr8eu4ukDjq1}lZ)xFMXuBghEB2y82slFR2;IO zX%_avy-CSx1^t{G;a`95e-#U}hd9&ANe+Am4JG_WhcPGbpBDbl z;}?F@X489|n_tm~b}Zb!Y)r|T^jthe+RyY(QmS4_`2T)MS9pF}Mt8R%j&z6v9t2|g z4;_XZScG`+24nxT!QT+y{V*F`vpaj|oOK^j%+T&ahyen%uT^H=N0bPF_MH%kf9yLT z)^zblNCba3?mylQUMCFyGc<*K)~N@7`K%XrhZ{d%f$0?ik3Zc1XZ+#kD})gxe4&P* zi~qMF7lZnLazt*%TZ16K5p?ed>W&yXR>k5ZSp&Sk=SnQa77@}jfc`m_$&z2BkR|_I z8vmMja#p9Mfg(uf69tFH8|);(V##J%3UlyOX+95hXfKXfZOyeC^`%R1)(7CwR@loV z{9s8!J9Vr`MMR|L=htc z=br^Zc{zhnUY>{f@h0qJ(GMuL#Xy+qcDh0&n|8fY#Bv z6*bu10x@PQqk2a9%I`Hofm!s3;Eg8~hWzqUfH^kT;0}mOuS0R^w!zQ(aFGV(?lRPP z;(&|D?sH_c=g;${7nB2?cL`t@%_)GNpDrA^*y&OCx!E~!e=p-pmvzDl7(So+C0@KZ zAO7Nr;8ALQtCMVbl!=jn2ftQyN*JP?qlNMNu<%Hg1^7&@GD5?VJ;;RqJ-5T?j^ksd zU!mbI>|X7Wv`8mdH!=t-&?;vfFEWA0@IdopZdroD$6XrM?nf_v4UBLY@Ky}4^#mL9 z4tI4=AA{Kb#eo(Qmto=O&!UV$tB^4AL-**_uU|YzhM|mbahJ6ry`5pVtae266~5?g z9enUxyD$w1KwF-sb&OHGR&jDrLe$6M50`N~q&m3ubc~SuCQ6 zge*;6+*1Nc<7boGK8^^#XJa(!ISS)h4JZgVb2`Syyv-P8!6Cr$X|V7$I8k)ri4++Q z{m?}#RYuxD=|4;3+th_rLssZC)_*+h%7I9DILsyc@R{wYDBs&(eQU6id@e^6WHmZ= zC!dNe(Wv|43x?1a(#)XPIV&!LD>uf~YGwv#$OW-J?M?v0xEl|3L$V9{vLPMfj?Jy> z&U&g)&s{0Qm#cLErh^NC+~p~L}wn(+GN+IYVgdIWe-tCEoCj@H1CgUSp49gzPI49GVR?w%MnDk%Pae=+UC zWFa?ae?qX@K62wp=%YXL_=N!Bc)k7^t{e`n0+ZmgN|)`q_Q-r0#q@X5tUq?2!h#5KEFf4ZT^)^F3~S`2Tvu+Y_cZcxxW{)Nc0?WM z-GpqF93f5Q=OOj%&zcaPLME@lG$a+jtnSX8^`di(acr>iZEpbFcCoxJcBT)1**`cW zlnQqcXd0MuT9Wt9EkJ$vBQ7SsV#u!V(uBizm%FilHLnn{@7YB0v1!#p_ zll<8blfN2bfD$EMTc!7)x4_1G=L3A1`2F1BCjVNYbvW;Ec9qL=*PGs=Lb*F(6NHPs zHuA3}c%+b`Fd*AoP>12i4DWi#&jU^V?=^;eSrK~33(bd_5*nZX_YVrdG8a_P^%f z%IydEU<@o%?xzGn8{5B*!8HZ)5a>;ICsir`Yn#47las1WUV z!~uB>eos6u_E*RtFN`A}-Y^eaUlS*gLupxN?Myry-49xZM)cyZ^zP-p2;K^iB|KOF zdB0odQ)eEp>po}|ZC}isNf)$v9u1*=6>)6;=$SCO#`c6m(nsaXy-9F5+`TM& zkNSJ~Cjy}WHBnMW1G9zY3)EdcAKYKdj?G_u^ zAOr3M*mL0MeWr;}%>T>Z^p$`9iC{3gy`#6;?l>*t!e_X6T&c9oRG?IPthoqF!bAG! zAc;|NDUScb0;FIdVj4hxrT_=MAH%`mt+|WNqr*EN61T}dn<0@dd)Oa>xg`7BN<63y zBb()JFn%u*{BmIzBtN~U{-8WAb`OzXpE5*7w9@jX2^@c!e9^AA@NPJBY-M5@T%Q=0 z$YS|D4`|`fY2xYRNN?8-U{DpQg0^29kv*buGN9k1E7 z;Awb+QtH(Zj3Hon1cEGt@~;u@0oD1UldVpq(yX#8klL)t)EOwe%$)#mY(|B(dkofX zE@wb{z!!W$0IC!`T~NJtodiI-N3s=oroACqT0f9XN2arNWzXw76F(?8r9^b4WIQ*`Ep`>D{?e=R;$_At27JN&0nb`wCHD-qj4U~5` zRWHPtgrY#A^sOs}FoYx%t8J*VpeD$`G)8Oa6AfvagoF-11?k8pNHqe2lU!SiHeY*z z%IrV8k+|+o3&!sm3Ixuu+*J#4qk6E*zYSG{t~oyibCqB(;eK!VQF24xxB9YY!>7C^ z1x(;pw0E%|Kj!LSve&owgtQJecKQ`HCWrY!#^FZ;Q|(UQ>{tT8K1|~zAAsY*{f;oz z5TQ9b(&4w47@=}@b@t4VM2O_&3cz~w1&Aa5JOHrC-#r116z2h%?k13T0W7H8v-`mT?FVGmYo(Paoy+(Duu7FXf zLSkyl_Q+YFh&1;%|0W^4vX_|pUzv~^%LY_R^~)Mmvl7?9LbZ{Sy5*BJeAH>tk%D5a&1x&bRpZkvsOR9ovpb-Y zX5vfkC98#7w6+@afMQI1ZU)#KIw+=W*+}Z}8B5WJrw%}EW>ACmv)-d zq&1k({woML^IccL$q+LjgMIVj3HTC_6+&Tr4lpa~!B57FjH?$%hCEo_ojyZ$JW6EK zA!mFpovjdwYpk8zKzGctyKeS=>f-!c`+1B?M1O|})iLuvF;(D;GfXz4az&>_6jC-I z7uYvYHYyIIAjVYXDNX-~XWj;}$s`s05zM|Cfc*MUiI4FtsL#%xxfG#1bf4nb3041{F$6+6}u{8K3?inBl%B*_1JW<;zJsjLVn}2u2QabZ)u0t6FU-IETa=uww z2R3a;6f5bWLSq0KSHcMZT1iAy07dRlUV|1+_C>3WAeK79VWv$JSid}X%eq~7P#6F> zrENI98EB-}06zp)m{d0pII*-0U&RS?Mj%$m)qn8!$6!YYJN zRT4>o^nSOd#TmXsHEalPHY|#y5UAYX2Z-$C>*9T0QsynMtMZpi+}e6?H^xY$kIjF_ z(lJ;#=ou49 zjUBftm3QZ|Q|+YD4eX(?On{w9aTj3g3v8!{W8W705l%U7VQm`8so$Ta7`A?65_LpP zZQ$@D5?OGs(Hb6CO)kvkBkHjN)&AlLUb3JSnORL!U)m#7WI<&gEN=l4(!Auk!%2&* z#Ma&ZNvblVt!YqeDQI1qvN@RBc#LvVZ5?5Kx34_v>pe?ugWVjBki^&Tm~rItOiJ66 zA$=jMl9%n*))pXO7pcl*?#TS{+iMlka*Mh^m?}Ugy+g?s}{*Nq)>E5zSYzo zMuUa!$wU7+{A^_V=5fL!q{e`>yysUIW{?ueTuEt$l%~_a9gx%6aRD(4gAfO1`{2X3 zt;MtmhRbPoq+RXJ#UB^nX^&6ee1IdwlWftUjFZJ5lI|Giqj>x}^YwIaq~|BWj7C(+ zMNp?Tc-f$%@4g`TpJx~6a4`KFe}OXvZ4FmP9R0F#C*6~K3S0`ey0N-F2dkLJ;;eyA z(*AiPq+36%x%={sBn;HztVe~s2$L$>NL)1^ytonJ-nBk^v4dI6alX%vAtY0!{x>Wa zW0(#K&yNa$GhS(ar9jN(g91Zdzhn0fultX;e#X}qL%1gihredDa1!cRg`Tg(8UQvh ze?f>9WsRJ>3o&Ye?JCogkek~!)2AwOR43z8XZTbT?ycGih-vw!O=}<4AxP||M`N6# zH>&aV>5h-pzW*0{Zyi-t*Z%!dN+}WwDxK2Zjch{c?oCO9gn%?CAti`(E7ILv($bBf zfPgdzNcWiwyzl3Jp5Jr+d*3tOamG6a4u_7t7i-O0b6(f?^S$-yv|Nn6I*d0)zx6i#8liw_c?#p@(_%A#=(7Ci`U%9WDzOmvLZlB+4|)2+_COT@bhESALYzVit8vLhinh7T(qU6fml z%%3o%>9u#il1qA?#OHT=IrIHoZeHPu#Lvnr_cdUMQX(!+xbRR+W4XHs zW`&H}@31uS2v?};u`|CuXvt=r9;6VtJ5KiW@o4$|ozftrBuzgeJ|)m#!dv=U%Ef4Vp{J{Zz<=LDnT>K{hI$?a_R9L}8?_VS=+Z;Ff*+Je{-? zJB8x9DEbEJ3n>)iKjhGB8yRap8e^5qGf6{6G(*DtEUt=mVBuDbPMBuCZIq-X-qnU7 zl@%x#xo$6?qCp`I>07?m4Q}`jj2s$IiTF`{k2`8{9T>Sn078y@CNpRZ*P-)8UprW}9du7nmd&5)Cl?w4VOar!w=htGALFl`C)jpevd zllsh%*eNOr@fm zY-FgoF*v8hbQLjx*%lf61?Izd*3MSqPMdj8`{~1>`&uGbfUJ^z#;d|%6C;Kzj%ANKo?2mgn)g3&W`a3ntx~o+2au}Qypy4g; zZ<^Pky8QyOuc!DOu~zXj`&HQTLWOSZSJ=SS~HR$!%LD^)iu z6Fdp}jNa**LtpWW?HxO!+Om5N0uvs*_dZ^Mm|dn-*BTnjiMZc;E-sj4VeMLO$?zh4 zF8aWC*NOh85k+D);&K*~+HJm*&v}~iDNm4PGFoEn=Hq-s?VRwY02C(6i(S*UH1ht$ znJLK)1@D|0gJmNcjUX-1o`Hei#}m)Qr$8cLgD;<+%1A;X&|E9`BVPmBMmds+<$z!r zYz+IA-C{OFeeX?a3mPZe8Q10H)rcoIM0fOzR^w$;SR-29nS>J1SQ256M>QQQztS07 z;&|b4!-#R1(~aE2X(q~E=ya32H{#r&3f5Og96RaM%))6c59fAX+=&}KH1~x z;~AwNMJ?{RdtuvS*l)+ZUG9t^^=u;+sm9+~qq=#EdsUqG<6fU%EZ-|$473O!FLlo> zDQPISOR2Ll_-0`+GTLtBrq1Mh^l}025333!h<(Ku#&tu=+6GnLqw02)${Q{{)W;y@qIpRde~>EBD1+K6g{dTGW1& zcz$1}(D{ewWL@I#7w<=#ELg4!jNGnT$6!%# zPyF!lj)@WzgY@HP(`g>#v$a5Y^TNWasr%P7jm?H@s8BLbe5|x3iP`P598wHVKgFmN}M_!t8U7AF7UN>W2FBGnY zno&!c>7659Qr!H7nwm%;?|L8wz4F{(Mj{8E7-a$0jqNoZg*O_OMiJAq7&g{|IBIb9S&AI-Z9 z`_YY&_w^-Hbg&bP1$l_qHa#BPxhee_xhVfWSHr9q8cVL1vYrFboCgh2gwT-R|6wjc z(M{wUE-rJq`9M-J5?`0PdlNnq>_Jn%L6ovwJloO}Z0Wd;x%H8AGvWZiVp*1fCKFqr zmQ(|Lia^HmS%LXjZF?yj^Na%CoLV^#4W^*;8V+2p-I_<>%lWR>wD*IIot~==4#t)7 zgLa=}G<5{p)5Bj?vtGtIB5FM9G4QC1Kzy^#ku+l16K+87-;baW3L5?uNH9)1h69hm zm!Oa9!_8VJ<}WzT<}%QGnD|L_E6iMB<@G+WEuO=Q|fv~ zBb5lAqw4HV4Wk#w@yNQC_plTEz_xNsTzjzfMCvqm zMd_-k4zeP**G!pO&pp6Ka8B96dH*V|xOpW@%o(#*)tageEsaW?zpZ}G8`B3%5KS}E zC^<<@j#b`36UKwDcC@Qm)eqk|TZ)Q3*D3f+$4OJ$sm(o{7RSdDvz=K}Kn$9=4{j*` z9iPFEb3>vzBcrTuXYryZ&D>^pjjDe5t_*h+Geb^lm`amhY{>^~q#A4H|LH3l$hxNG|2lRcuInG&KG;aeHSARztt_uRQAexGwaZE=3mxURIQLj3a*|s z&%5Rm`2~ zc?oOD^yo}&L)}W`qxlTg*5~RI8@o#T`U%s0c1_U?G9V2-eHA~iNO()%11Z(;^$a9Q z%`nMT)fp+AL_@;VChw%3zf!Ek}y|gcWuB*nku5y1gO7nYtr=3eN=RC{mmd*DcYLzy~p5&#kyXJ&iFEs8ltkR?h z52$82`LCW&&FCHcVoK?xFivmEv`Iu`S!hpan~%ZNo#=mZL#~ZIt%hK3?5EovB~!lP zH`K2$o~rdY8ZVaT8cw>8rC=&^G3k}%G`P;_k<6KBJvbBIdyh2p6BK&&HUr~QE5h|E z6f&N0l?$-s&Se}yyZIU!C&1VT`Li2Y8a^z_E=)78Jt1(uH~X9{^WLEKQly;v@lZz) zx7eFc^7||Y6?4-58daY4D}*A$?G@VGAw9?0+=0Hv7wld{0uMVpRc!>PZ4BM* zRjg_iHC%R*%bU&`XzPcg+HwQUYE2oZf5vyA<5c86wQOHM?9Ky(@NW5iu`K1I^Sf}1 zeX*w>Z9(us$s^)dx6)!4&!k4sDWKRdAj&CI>-Pr1;fRdCMna3^E3DijC2J(Yj$hJD zOG!i)hS9Yxc`E+OiPCA?()PYTOU*{ovI4Qb-I}JX&ks%&N~L4T^=5zdnwNXXzpt2t zWty}8s`*!s-USDtnf|8s#2SfeRdrFC=9ngBV-i;JI7s}eLp-OZW`Q?O|7doPAba;v za?E>mVEW)N|N7vd_V>Y|jjrXGeE2tvnRPV)TjWGE|5D3E~xQOId)9v*0q1u1v#PVHy z$L)I$RHn4LB!@3I`6f0E3;`#4j}-Mi1*%27eH*2xLNW`(Q^lA1Bqy(%7f*XhUVIhO zl#u*|xg6U?jKx?#(z;MTDEu~o>mFRQY_%V@8Rk9G})z<50~P()2oP!^&sR zb?OCMFx{IcJ3i7pmQqEoV6{j!^B%(3TufFNoLN2ltXFN}J8+8*qNSEOBry%dPEcVwIDjF_Y>A{EM zB`~k)G#>m6%K3o=))|&$V9EJRfeULqyd;*CyMKz-1*hHE=w_xz|IhFSjvrwNWn3?S z{NFunU3VaDI$a(AP>Z9aZE2~p#5KXPdMUw5a!%LdPU#NELj3c&p|%;pt-{jT`1yyb zHXn3*nCy8-OfK6!@hFE`uQpG}f4%kZX-*}OQ{3KQG7b~QAx61L7(pQ%j2ebL#4!BD zuGo6_>92TF_k*5~j)F+0yL~+27TmoUctZ1U@T9Ude?Q2=?o8w)^NO-}w(pK#eB>4& z9?9>q7ewkoL`Q`9G%N8_6^Q6}%9++US9!(_yBj-`z6teGyhw3x-D&2d>@EeN$G74` z0V6gOROfG$FL-Ns?Ey^V#kl)cv;7O5OzLl5mhm~x*CSN(?XJUGFY(|}zolS!)H|w3 zvNwq2`6?eP-L}R;ZKRj${q^ZK9K?GF$1jv_-E>y-=bkElCBI;vqk4r7_)FFD{oW(h zS(s9jUc>HQEMQB1s!cnV@TaL7186W5QH$2uMz!% z7N*gQIy4xAsS~^J*4KffdOlg8gx};uN$hJWl3z6y@%PSRwM=9kOz41yZtzb!u_WI= zue9V`w2ak5@!qX(t2O;V{4j`*7@}HGIc2#p%=C@Fsb3|zmOt0pdYbTtcc6UMy@yg2 z17%N}em0f!C|^%s#XlKG%@ezb%+>_@adS2SxS-}NtSyvO>PRdT5pv$)WkIQN;at|L#2N~KRZrGJe zJCXMVIp)o0W;K@OMS~RR#^YvXk*X<61_|C1*6gzP+ofo<7Vq=D)ynhqe1Y*A$-sYP zG(D?>)ef!gy{aA~{B;SKPsO{tf`$belfo)xBMn$5aMOEpCg1KV4i)D|U4E$P3!8|% z9G=Nux;iC`|2ZWat#(R$M!}F#%r^$w(30#M{s^?kvL}XWYRWaHF7gd0POz zcU;2P9J}7YDQ(@Q3*69b? zYR`d$(H*eT=+~b_DhNMd1}UR$P#yOXV`&$K(EbW!*8cTbq+APBc-;L9 z>Au!FP~E<`4Ohkw^2&v>VuU$5R#Qz-@kAo;vz<8x{o|)!K~z+7Ers7HlgZuI;SyX# zp#_kO7j_*F%*^qAXmuFgzuVTOm2f`#67b9c5acrl zg7;<&X`(GwONch~);CQa%AHQYTyCwUATVz_SL}{j1*a0OE z;v89wuBFmK+E=w5dS0UnuTB<|CwOKqmVy6nn+r6nk?zp?=!GQS7r( zjkHKiIAA*Bc40rh+I0O9U|7<`RM_S0AfU;n3JXC{#vCXS@Iv-Qf?0~|yjqYQ&JCIH zig9fQU9aOs!)P`2pv&IcV&MNR1UZzNrjfa*sB$XB8ooZnl1_pp#wf)ukI2**lxj;1g)8syj$ zS{IA)9{-FqZfXP`z#SsTGf?Si$d~^Ol-<*vg@c0!l&cP{ScC(==;W^{?pn`l7QcT& zj0JfN=3uQ~$H25VTz6(C>?Le`mA{ZP`c4>BJhNl}W!+{aCMoE5vo)6F&rdD^Cy!IK z3;b3EW(17I%Tc)7-OJeuw2d_mT(eZgjh#W}Te1-(#>~s(DZ_XvKo{~=jJw1u)_`Bs z0(=CBZRP->?FPot1x_W9McM5QNp4xEX9; z4N5M5Vp0HwZ3NG@TMa+@<3ovT`|>hyG;r;e6gq&vc}ZViO%e(qWZO1j158vB7lZNw zqtw+?-#fzhX8`{t>HdUr@Hw)oIL-<(;xWsnSl|CtgpeR0qTI$jDFP|H?SrZ&&yC=- zWQhxKH~MEhFao2M^hkLOhJtcs#@d~sa4$|>hcDDa@p_=~Ip%ULMO2#OfX!zx1 z2y8WPsbtCqVF?&5L%_GA$0}aff!lfkK~w8P?lzMChH1=p3G4jkU96<_ZI`Vd%(o42ALZw5zCmLIB6G1OQ>e)6pT*c*jBEx7d; z>IFn?dBBVPfe@m|*t25>XgfJ0kBkIs`k{quyQ0XG2}pN;l_er-|*!0Wa=gHmv?HO?Wo*Wni$_GPz}?l3%dG=wzrq+MWxV0@Q! zZ_}9cR5O&87%Cwh&f-4V(?M}7kgE*?hagP#E-tkUv+^&Tkf-j7a$Ap1{4x-wnieWpxYoO*Gj@(-DMEVB6#%_(pGVV z{g(q)af$~ndJpzLThSyJx zMK2+QZJx7|*1(rq{@Jx6J4WvfC;N#4{|xY|B_`fi?Mk1h+#;K|`+%e643KLguL~I! z0x0kN=Bm)JEqDp6Rp0F@oh!-49lC=e=4NN3+zSjG%i;ni%9~&>AYn>(Aa1|V>YKn2 zRo2RTJEiNbJog%$i((p|(i`Xjm|lW2qf%NG(@+j&7fPIKdY|ui77GK4p3SJ|enQ|! z@4y~N&mc)Ka^MlM_R8AKCr;*6!(_|i6zr)*;4Bq5ZR7_~a+Wbza9Cf?C3;%HO?ccM z=N<#eUb6dAeNqpFtEU(B${q#T92Wv%*BAjvF+J4yYpV&pgFI;rUi(hErO|*9x>e1f z&X7R)8ZN3Jzl65f^pP`r;b{jzs7ZJ3qmn8k8RT!d5e=q{0J!e zE|?nV@DhtW78yF9{=D4F61fdk({6#Rn%T%TbyJs!G3soSOb;YL$z;(2{@Q(lIkWZH z{8Fm*THsC*9w+>zNh|76!%aFnpzL^g_K1Q#y`_S>K9qbu@I}`84nKOL{4KfcD2WCB z0uD;2JA~rZWx~B_?$aH4`9~7b4UsfxG8c@A=CpM&$*C(W^Eobq`pVDp{Bqv1MF_(7 zKmhCeW(<%(??posdHX-MpIVr)4-ex-d8F#QarRZMIoNzDt;0T{v!GyMam)t~*b9gF zlgZlIk&N$t4d|!C9bU8EjN+S~K1imodvwyGoda|myE}X8KCifJ^S>Ag;ahm5ATKAG zlVb6sS`}2zxUCn1r4JVIy@R10+6RQ>sny z*u#0|)y5k&<|LjnR0iCqrk8O0c<|Q*P!QI8qq;%|4E*!C8t&eooG~@acTb)R5*&Xp@yfeXY9q`=s)(q4Lya z@{phDEQ|33h!Wtn9SnM%zS(JLC^sHSKcQL-YxQddCMbmivZR5Ex3JW~p4)!0wA zGvzh8yGjyw*nAMysUMpic0J6?<$|mKc%61@2K?PiMV!i>LKCc2r|#_OcLzuIVwc^D zW8UN(R6OzGDQv5ID+7_=)5CC_pK3#o#;uipT~Lx9OEVq~GZ9xMva018rs{j~d)k17 z(vmXo?7!?0?NiYmLz2ps##o)|dk%Ien8N4L2z2elWS5O?S(UNu!*Gz!8W`{4a0Vhh zO~Cv7A%W;rmhYj2~j9aNgQO@Z;m)v>!aRxJ#+&B`1lM;frrF7Rn8;~v$M&azx@U?e^H zGS{*tKR@?l$tT>8YI+-{9MgWWYO~Km5bsT_?1m_2y#$>x-}2Kk3K~hVuC(ZqK{q7 zcGK$R@~_fGEi#KgQ7`yMZ`5H!12O2o4n!XbdAl?lVnw$voR7t|tj772?!OkXFUAo> zBOSL@a(yR#&nj566ISsUhBjOFiE-W}4JE<>yzg&tE7REY9N1Gl6{n}@=cFDAvYguh zI?;v_Wj*_aMViL}r+{O}TWGKcg|zugVEhrc#yRZ0JtPwh>{&|YvFGYr{aYRqMMVPD zls<`T$tuU89aLLIF2pWyd@- zF2;`ALFJ&Lk!_HWID#H{;j+1jj-tJZdVCWq79MJF@318o7oLQf6<1zu>$RI$k}p7b zIko#6(tVPPkKW!fTAT@jZ2}Dt+pvk03VCb8yN5gg3epV1M)Z|V!@jvVN7rj`zRJyr zHkDV@$nS6^YN#O-&H33?Gdz4OcWU)I+8l^I;rBH$J8P=3jH2XHL#R(cc2G`ig`EVA zbC_XH4r2x+?&fjftNV1tpU;&U{b?UsxWedy)Uhb%O)8(*-_29D;1*8}l)M1K)S$<^ zK+AYueLE2})O^T!MPOP<01ly;j}lpc=Vtb>g%nFm&o~dRYuUc*w6%O{h-ec!h=+vR z{nrDeCeFBVHZR| zQf~uHrrGTggxYkOK+#GRX;hN};(>Wc@~Xn^6xd7)4d@x#|1zNO1U3UI#;A4_tnYw@ zMP!>Wfqe-52g@D_Sau+skN7Xbc@i?jgORjB1S<#4fpiE-FGdM;E@48l+NRZRxfT;dU3X{jY7CZ@`DI6^N@^HmhbyAU;cbR_$->f}@ve?*`{r^B;us2)0fy{VRM#>iDnkaX^LTiDh8AyGL=YtsnK*WX| z1eO~9^EWh~iT<~Tsf#bzRjxI(ar_sa-Hd=v+gI%Q_4;;nu5X8@Y)v%SZS3FqA_Oh| zXY1JyU;HOw^f%sawJ<{PH%xX@v^h;rcP%3p{;>JpZZ+%=!F?qoknGzqksJ-tqFR*Y zX~|~!n{VHH1@Y~p!Oj(!yw}ZnWip_!V5&|nP50(4&RWRdw2>#7!TOMze~oALKkxh@ zk1=-;0in00N=wK6eTlzGCVcd`mEktK5XDH9Ykq8#?wTLddf2TI-I}7f*7Nt3g>d5p zT^*W9=M;kt!&w;`Q?99}D-acM)1;=X-vgH4Q{wNe9MYWG)((-8P6Z)>-uWjYoLp<9#w58f(l?I2IeSlKyvV zOyY`Kv*q-p^8Y)^sxX^K-rUtxsYQS4KFgAS1*l#_h`&*~qDs1A3Z{qof6v{vy6*QS zNFPgbO`b=PeR#Pqean~s%5Ihz{pU#`Aijm~Lo+A*8ef+NaJm~+fLQXC3j=x)F2K9` zQN=};A;WcmT6uax(*QE_M>9^C;5J#bC$ z&mx1y-XHuO9m!22yp1&*4|>CXwR|1KTupsG10BHi2FC49z&y%s{Sb&D9S^6>pV_C zEa#f_j{w&Y_bZYKfd)e4^^-gZjP$u~s};Pi{Z5ZPDM_Kb5KeZ9-L-BK4UQO!DD%CwC2n6hS`O zWPUENE_;%lk4aoFiKaAzn|eZZ><|=h{f{w*jWm)ehY5m7CU^CL>Oby@k6J?>jxO}> z@JwPid3siO&BMN$!~TOn>ipx|tVn1qDEFn5eD2MT<$(Hv!Opj>vmMEyfkI1ty{34O z{iA_j;S^_UxQXEz;E&BKoL-hE10tG`2Ka%3o236kj*|xeW0rilV%l5&$+VyR#k61B zhFE4g{oh=SX*G#+BkJeAu7&i4aTfHp)j{$!xNi_M2)g~p;yBe}o z^%!8lCEfA&i~VcnA)vdY!NJqoi8|6jPiyz5b?&3e{o`>zVsFDB5WRbFB@LQi9|B`3 z;C(4b=tZXA_n4sXt?*m}?I&{j%LLH41WU($9&k^edisxM=0r-q`fWCvj)u^;6x(EQ zicA$=`Dt)GKZ${+%&pg=S`cC{_HKd;>hNo5T%tay^6FLa#|VFf|!}1xf<+3 zH$S#P_io#=Lv9vik+ENi;Yfdio-4SvkP`aDKGZ1;^NZ!t|LByJYnEM{6wB!fslB_S z+?F?~LN+%Rn%GDUea)9jbkquRdOq1(^{vk@y!uZHMPG=JrGU#*UZJ^wLq}ap3t+e-VBPyk%Z?DBFR9^q@XEc8$YLBG(fQLzR z(;@hp`?mSReP^VF#1ftUTKUJ-#qf&v#FepvJmw-bsoTU3so(rXvI|_b;}b`& zmaKnmhstGrg1c!N%SjD->>@|5ANty^J!iTXMo{+Tr^@<2euE(%*>4FalTrsxw(m_t ze-?`?Gc=VvEp7IcNh%v7P0ZAXSIaXrX7?fx=B)rQ8md9IsCc7k>MyIHYx-TC18!PB z8t6#pU5n5E4~!ZCF_FsDL6h#yCi1|wd5tnOQx=|Q62ucmKm6N`;8uhWQ^+99{?zx+ z-sbAEoztQ97HzSQ51&7?{MX(D1&LcLe}84O_1~{(N>owV^H%ou--o#X(dKbC3**Iu zxoevuo2y-%Jy%&gr5d`!oHFI#ok@=OH@e={5bw$S2cW+~*E`2#EdrQjEvcZH0{yU3 zpjhhz+jD1Bs()-d!vSR;iy8Uw?@<|whIp_`v6kTkP;=?)p>c(plW1@Na$T+I^}t_R zfPFn?-G5yE8PY00OLVlYE$bg+s4Et~XKZaC)9t}^N zfQAcN8m@lslS7FX(oo%a2uA+DANjxS^1o*=5Z-MifV+@?2L!_$NETVfaocFS0GsaC z$CylaAu1cng9boQE+b%MGdxp(9HaVI?RpEu(6ItSz2=vdy7eBw()ox$T67MoZd?uc z^H%!2{NWQDD2O=+CDtH(`vE}7xBLOlBn&qLp1rrAOw(8Evc48UfG+;3{Wtka+YWH2 zYuDb=V0pBW;my*pr_2jX{#&6E;l)#ydH#?uV}41;!|3T_ARKtuxdNQ)F(D7<7LdFf zsb1%q^?2dZDKLWrv8%;99fTSwsHY;p6||)au=>png&?eI7%p@WIMA90-_IT15&}wz zxgPxs2at4TXo19+oi0#0gf}E^=8(DE!~J-(qrZ4^3XYU^kbI8bJWQ8xGN2lWF+h`{C;|X?v<>hg-9;rB64>Uy$I!U2T9iX zrUsyBAbN4&*0eq(fQhZ6J!x_6lF;su3ut0DYusz^D1C|0|cm%SVNuCFj3mw51!hl8D z5kRY0jMW$Ry6B<|hYfYadGCbihd!?RWj|>W>kpyfO4;>uw`~LUW4Cig@JwM*?Spg^wg8ltQG+F-%qZEG=Vf(-oCfF;|lyI zh+lrk0cMCzD~eT2^?~U)Iajqj5@+@@yWD_-?yVPnTpB+^R}zIi-FQK!v~>db>lcGe zZD(LVr~*MEvL*4gI<&Y`vSDuv)W-=zT6!mrg$~u#diJ^is%MbVu%_l>xO-KkAwr7e zL1)4zK!yi`Pc+*BI|RvZox&@m3$AN9Spfir@ATqecphPJ@^Vy3Nzc zjG13!r<+aX-@a)rSl6&ao~dS6uW-w8+W<&s)(TZ^E(X&%0IDd$rdQySK14z!Cu96H z`9kT;-VxE6(+wCxeDXa9BdpDw6ep)r`L-eEAu<*(8AdJ;@jQau3^E$#YxfoayNCfM z%=b9o>5yXk?_L9P2?uxBUbB9#sX3UkYPG4F#+McmzYjr#1sE)NRe{ug#1@rk6v`rb zeiKlgfMQS=;tk;ATa|P(=ss|Nd}T-F6cwe1HLUs)jF;x!$GLM9A&l(KqyP{XS%|}} zSaS?F8F(~6<*X{(uG*GowmXI%07Aw71k!Q0j7uFr>xM-#aw~CD=~B8*dY!URx{i4M zQZ1APr~iPfP)c>eej+&*$+~9n>)fw3`e*Y5_S4;9IXR)lvb%3{TXM=7$eakP`q4VW ztGHyQ@L27oJ3~^vjcM)7A#JBvvrbY8cXRhC&C+NQ1S>JstNPyNcs+u(|E$1 zPpNRQZv`J@yl)%~x3`HhL+E{Ax;YADRit*6-*j~havalh$+H80Ir9Gy*x$|&YK`DW+pxluKW#=${4H^3MMV<;LhAb zu_#?MF%JMkZ<1-aa0>e@eY@S6&)mBWN2;w{Ii+U)Grvs7n*#v%JA?|p6zE0#sYQ>` z%ZlLk<@^0_il~8^)v|Tkqz}(<<5Sn-W#M{XS8ZO`fQktiG^pS zMtjR=_l=547(!ahc&QzU4zjo^B5IVDc%6bDRzICc{oNQccF~`RS&0V_Z~bYT76X|G z6@&H1hmq%-6%%0`?>z35d_ELDtQi=4I-6DmoD=rHzNCBpnJd?i!03*W=H1lfnFz+>kS1TtxZ zB?Wq2J0_))ehW4FPDjYg@KYIwe3+L`-G?foG>W@C(u%lS)|X~3C&4-(JTb59{8a#J z$F!xK^w3w@^fmgBbV9CuDOUpPV?PepA6j^Rf!XfDesn`G+Hk+;D`L_u2+E+HrqRkw za7?);ATbG=ZBrrja3o1O4)3kZf8A-68~IrGSI+|#An@g|MXmh&>`oK$-5PSe*peSL z^E?=A161#JcoK~Y@QE~Kx@}QWo2-)FaOv(ndMlr3qeWld@JVn%`}BqgruWC$iyp?% zO#6u!;KjUn?E%<1mOp#z7?C;|)65Qn?!%Zw5_%ZRLK&RW=!yMyOB-)=24y31yur6` z$;t3R2xE#MM0sY^1*=XEB${pt=ZtAD(6 zTPTSs$_8Z{Dm&;!;IUj9&%)gq^U4W|M8XlkS& z;}G5oBmv!2vtY9=ancjTGrZ2YRzHIEoxlqm#KmUrAfs>Tk~>;bRNM0N4b6rvJrp0% z#yO|s&5}wC3W%kY;tXfePz4m4ek}vx@{?YYAI+ahBOi9(=EE1%okfL`)+-8 z_p!n?(E;My_4-<~(@JQ* zt`2}yY_AOb_j^_H@%2un%fc^_UTs`EPa2s#e!N262rr@e`n+YOEy0UwBxZWr`|_N- z`$NWy#_jH@tV~b-6D75DrmgqbkEGSAbYfJTxS1~H8k&fhCYrY~GSFFgo{Z@nm7~|B zsM9{MeL8kpWj6K zo6(EPO?tIvovJ*PBjJzYlDfc-UEooW-r@NKTuq3O>ee*7+_IE}+xM4EVHZcUCdcGV zBgv=F_uVXhoSMBm*-Lscq*;Q;bKZnvGo@L*s`?3`x(Y`Q zIav40;99!*ECRt`hjFI*GXhVGPU$0ei*up){zt(MSwad)<{Bb<#M1ERvTHXYyvlx~ ztx?Rj4)$OfU*Ph8N;`NNP1nM(EWNsh@FnPxMu{QYsE*d=?LjI%s|CFi2eb6jO^cQf zJV{>s$EAcHMrh)mrSsITsNPlKUtIlSc{GOUfi#NwZ??V?7F zI}G#rra#K^ekq3U|+}AY_?Er{e=t`@Nu%)1s*+#kg6+$ z>4sC9SVRjPN3$wjT}gT@=j6L^TfXP#FKl1e8Efb%;Lq=r_Op(1$l_%GjKg3lt^2@Ja4)*>uh1` zBr-1F>X0D)-sA5zTSR}d0A~)Y67Xg@CUS6}cO2JIS05Ey-;E{86sp>{mH)oV_rX)n zVnCai4(v$D0 zem{?QWG}E!BHIN;%lu+xh*=`CtCqYh4|DxvH=Jy@^JW&JrCc%^CI=D%sYC`i09D4Y z&lA4w@P(rEEN8|4{hjItae5KPxkZ|SLIn%kuf+PBv7?&9Js6vvYgLS2N`R-@1t*T- zN+)@bzE&i>>6cfdtZDw<)<_SYXJP1T3z^#^F|VXjM( z>XCNSYlyknlPR2g?`&SsMAOO7AUin=KPZizH06BtJ2v%ad}s~NS9#H6ckbB%3|6=i zXWzqJ_Pa+lU@ph*-fj*41R?~VWm7(KG0u&}ICF&)xr_zg3}xH_=~lXdYODx*wKS@tMiTts>mC!fRh*f>i)u{Rrrp(;T8x>d)horjq5V5uIHn;uWnq#1B^`3c zH<@7;hcRf0+qgHmFc$7zRF?DoB1CZY=d?1r#ElblC5`7G4xJRmYNFY~Fu(`5aKBU5 zzvHl)vzxLI&1K)4uy?XhsnOOmnO{

*JT*5x3*2)NO}IFKX{6-y#>blAknww+r3d z(x%Bi=VDGac0p+aY=VjL18SlbAP6Fp<1?A^_`JEv7iZR%C@9Lv~AXDs`js-wIT zGh;D%2!bAb5vwdc!ktYMBrNBJEB=8gk(LTl&%h`d$}5s6eOOyM#qrfDJXWVApL*ce zWc>d57s`4~%fRjKlYZ4>*W?#1^qcv$%zhg3X>JTfjumb{YIb1`Gfzt{DHRW6dzsdi zhi^#;o%u?6J~~=%J`x#j@FJQ{&zl$)enfUA1lNl1*L8jz#NPzxLD%49su5K5w8MMa zusZ5R75lqpg)GlF=cBcq$OW$5?^jLrLsdiGDnMV``R&Wwj?NESx6gxYn%?eu(X?@1 z@C(q@EO!sk#6*t`D&$5uF^8qL_p%5u&QUk<&LxD_u5xrfja0qtW#dsF(L9zy`00w1 z=3JWA=8I9pm~wvhO~K{(iSqEtni;Dc1`CgpzHP(4HSp1>x1*R=C@P{2kNJ8W#2YX~ zT9MNL`$8ak^85)(H}j^%`b*Q)fh==R=8^!4NZiX>s`pW_R(*$+M+|FDqX8*Q+VLOE z-ov6OOv9Se8ZESLSBS`=)9ne4?~1xVt0;Z^np5sa#yNi15=Bd%w7R%HD;9nISu`2d zgytDR#jj(Tq1o;G-Pf;RQ09CQWh_f`TA$sUy@>i1zpuobV*2Fzsc{`8n-3EXWDO7G z8o8WA!n&U>nLT1NmBI8^_Xa+|>jRapk5?S>Yf&xyG7Zy`LFP$xMOq7sJg=Dd% zdrxSYfTL&XG;-1z)5`w0;dMU2E>H_^$bQ+kjlzvLQtVIafz@MCDp4 zI29tXW65Ql==w^OC)QC_a1iuxO%vKIhcijJ&g)`2q|g|F|L{$@(fE&KnPjRTfsLG zCuTx5*xpAy1Z(zzegDqz<;ENxbR*MXttx3V?T*t#p zH!o?wnrC-{&Cdb(BReg%3g74ogUjITYfZiH<3Igm776EEInCH1yQDwK@uRQ3dmQ2O zgn2^tMJ3eP-mfABV|&6gX<|d^akE}jabA3P84d9P)Y0UL1d{UdZUG>i@Jw@=8qkI@ zYs$?+P!cbyy-wWwbqOCPx(Nrys&mjT6g0_}P1uI?Z_5aD!97Q1ge%rNBE2~FPBR-V zHlF^l(*KBu;wxFtD_V1Dj4>Cz&X!V#-NL|=`wM?G@!Rq4@;7m&{iTRJ-dBb~)q9^Z zg2q6pHiJh3zld1mNbcqg4&yLI>Gz_Ibwrv1@g^|)ch_a?wBXMRr2k(R8E#*s)!mAt344@8+=GKLz!a%TUFvDAv3@07ixS&GdKv zhqRe&6_+pgmyBFYm=2K@jHG)4y&8Dgz`eUg-mYC4?=dyM(lbS2xkLm@M)RPgqmWND zjKB*rS#K(v+5lqS)@r!FFvgVXq`@p^xk^UH!20l-ZquWXQ~$WfF6Kbl{(9uEIVlj#;jrY|k}>@DBn^x&Z!_};LCeuE%wE#o(di{TR;|7~TFF{!tx zLB?|2$qm}dSzGmK)WMv`JvYJrY%RKaa2PC%`|l+m%PXl%$lvoHVy`Oqs&QPOT8Qq| zTrpc;uXtA0f3f24Lv!?8u)1|+UoQ?sq}MpgC(5KfevdAPXqqI+={Ri0*wT0ZOjXFP z+&6+vnoYYz#SMo%IMzT_esrgAT*$3*rAlv>OGw9Ym83y+@P)8zIQy&M5DyB{RX!@3 z6P=6kUFI4Q=FKXJ1tvL{F32 zFJehhu!0vHmp8iW)dspO?s2y;Jjxbscd?TTSfF{K_KcP2yTB;It0na6u2~R{v}!j? zF@L(NZj6Z1Li6KbZj~DJJniEiu6TsqSW79CmOSkgrSX96hu;hI-0UpMD$}%T_YQP} zc+PqhG~apj2=Q82`iRD%=)OmhNI&B^XxX@}E>3m}2}&N>P|3-Bs?b;IX{o`6JB1-8 zKjCF(H#+Y<8KJ9ngN<+_0gjoJt>d$NWhZJp5QK!S6j8G&WunaI;4!15D8khTwIzLyWh$^%e;*M0Y4n$jF1F4%P;@hP_%h((+<>kEea`#=;u6akKNKZ6 zkd+`NSKh1dv}AhVWdj!fUT_Q)+o$XBwXzEs{quRBW;=9N@0^&Iv91*L7@--=40}j% zZupzO2?{X0MiKw~^5*TU54m1{h#E_Y8G#|gJhsW)H)7%WU-y?l(R`M{|C>4b42qW2 zyYA-Z@xDvIhTk9>TkP|>!HTn!i-hm==V8bOF_6%T*1iuea@50c=F|)ro%qg85M|Md z=na%muNnn>vVhEIJGcL~-W?+{aC6kk-2^_-%2B8j24~fE!bni^p$`gQ?SlGv)87g5 ziT=_8K(5oK097e_yNa-3>#;z1P`nNXw*?t8u-N7=Tg^;NfO1|-#jRL`*@c?VArIkA z!+B6PE&(KQUi5Y|6n)PvjIP)M*)q;Iq+Zp0;K0!bnBC!A;eC$G+#Pu4B`C~1&`aXJ zVb}=9`#15W8nQteq+b2p-J2~|LunFYbD?!K?1WfL`Xsu(I}5)~&>)NNrR+~(rFe@V zPCv`N2QD6`_i}pd3;_SYS&^(B|4?7cP-Rv+PWOe@X*XK9;eL*WfLhog`?)trO7()w zuIs0*FTj1(w4Ij#W$G~)4^Am7ZfX1!@EaYtEqMs}QE7tJHy1jm_pt{LPtLczneF;}eWG71(WobcKLS^j|S+bQq^qc3E_xt_% z{{HIab)V1kJZH|#oH;X(nVBDjRK{yG(BwLD-bzyL@ex$~N!ZVGFTZD)*+nk!ODDc_ zHbN)k2s0KhetoYl7p6F{=H!q)Tw(!kJ6kB9o@Srl8kxrrg^3B%EN=~ZOvb%L1PGh+ zIrvWD4O7*>d3z{A(3ab$QFVWZXXu$dsDl z+5es}=TThVmJVq~M+cs8nnvAT|GbaY+43Xwa))s7%$LBvdePiQxv?1WcG!IX-$0wM zbJ|^xcxI`5`^q0lPHmev&Ol4x^0s5k>R2f2o-2|9&#cq0I*xppdYb7XT>XvWCX@qC zfySMmoS|5?Efl0Ir@wTDqm$ajVGv1yzX_S|CND3@PI3RKnuF%{v+YpuhgKXC;5A=t z!C+zsj|e9771TRJ(BuVSqwEcf)AyCIYE#dzHiqr_9Z2uOzerJ5^U>lL3*ysYT>BP8kGj?@4* zs_*chyE&UI14&sQkg%tcFCkrL9dg{QAt~A8>=C5TWGXV!1eBIcgIrtX=SUm+z~q{n zKUlQQT?mV6XI)kYZ(d|g@woa~Mk@f)-2*J&maUu|fnL|P>*Gmh(wxg8kJ};3Y-au- zJ}jx(+BU#s?szdq7;@R2Lf$k&xpAAfhTXPbw;?v)O#lEv?Swn5|(q%2_I@sMxDh!ffI8ekp31Eh*YcO{tExaA_ft|(Wsk)Zh=(#Tj#Yjz*p|=D_<_x__#ygB;5g zq&?;Y)K-A~TUHS9evke6{al;5UHLTa8LG2GTz{q0dS5i(S1EzC!&3x5+cI?}T>JN@ zXX>W3rD&shnY|x6NoW6p;^(dRPMQ5$hQQO=nLX>~%cuAYIwXg=TP0}Rr*eZSGfjAB z{=8m&_NK7;Wzk{(N#zy!I`(%g^K@oDQzi5DfBF{-#kkXsxo%ZaW@Dp>8(iCft|;HC zw--d<9rRL8gu8d|3h~x|zgYPjs>;1VdJU-kQLJX~lpqhqNuHSWv7!#eMc!zZFQ)6< zouk8M{P)JWDAg6kEvrDOYxRK1yn;9BQC= zbNVDOm~VeKb6vYAO{6S`M)MckduDIN`5V(bvybAWOr>=C^;{!1^s6e9nKjZmN>fRV z0@H==U7yA^^hX{3x&oXzs1{En{f*3N45E#4v{zz{9vbO3t0svYVC4DDb-Rv`{qFvT zkjZ_S31)%s$Cy69Prhmh&_D)seD-sEDM{D7@O=-wJ=lBNri>VjuRD{-P>4;CjA1*rHSU z6>NA}W1->OUJ=n^pWXuJC5+uIkY_y1&IF=}kn3$z=teQHf#hPMfFn=KLbTJiK%{juyJ3aJx!_aBVkahK{6o zk$<~?>h3M7_umi3O=nW{po0A`HxS~+1F3%d7oL=KdNnRrr%bXV`E)r(Y9E>(OcXfSu0|_`C>?p z?nWvGYh5FM_N4^zGh5))DD;@H^c?ZOG%t_Ge* z%`f{heprWl(V#f~@sH?4HZ9Y&+okytNlq4i{#`R0(}~57rz~}eJYD=<-|wrcR&qaQ ze7)IMRd0iIg1=zt4=VOrv!T2@r`n39sW~7#q-rOVKHqIUkvDdTd&2LJ&)r)Mo2Q

(0}Yh- zzx31lJApqJz-mlU1@V`GKs^;TED`c8?`1c~O|PtuEz?VFG0SE@IC6KUs^dI%RpJfz zV5LI4>T20$LgFU=v)AX-ilsdx4k9ew{cg(w8v={*1nc6&cPn&HKE93muBoE%L#E^X zG+m~0mLV8huNTW2;MDhFDD5aXCJT~ZeFG;)S;jkB$BXwq{X#}8C$c0>T9ydTQL6Ih z1o_Y3#cRGdQqlD0Y@M6K*ky($_SV#0a_P?SnA11Tw2ofUIu8D@7d}ZrioU}YC$WD@ zD`G@Ca^KhB{Zq4My3Gr!AhUGBY~Ck{F(oQJmuJtEN03`;-Mt*V-S(Bw_5KcVS%QH- zw2O$GI&@rh{kKL5_RRu?n{X$`8cX(Zt1%{Y|58eF=DOG{;B9CmQx zXmorqCILj}(1nLv;MVBQiSRcc?_B)G|Mu?7X^vP)S~eB0Ty4V)OC8sE?~Amo+MPT; z463V1bQ{=xzn22g#M})tUZo`RF4s%%IV;$HQbX}1?yjCTVIhx84HbhrZL^zLYzM<$ zZ%FBN`77h3XJP1-t9rX^GXv)}P8)E!k@I~~X0iwu;lWWR9>O4x*z<@dbhj!Dykr^# zQq#_~CjRhMI*N+t0Ru{~=I`*^ZQ;<*7-W(yi!jNLF=wZ_b|zU9kW)4l-`7>e0h~jmV+1hzL&1;rEWB%e2ot-YDS2@LPs*+qJCy#By->Z4+DR zt8WgaSw3xJ@+D~bWWqvTa}c7$$L?*QthJkcsYGsBG~E5G6}^p=#`3tI)No%NucgTz ze_aVnSNZ)y5)*G~jk5#^oTbm5q+^n0vbVM$aoT@~T)4k|^ z|J7b_`C_kUKR)EW>Q!USTOZi%C9C_q&dFgGyEeJ182IaS!RgWQpGc*dleu2E0;X&% ziWlb+>$GB5qvRbr5_t^)IM<-P_Mr0E8EkS{Pf7ic6Ou_>uouV9+)7Fl7x@ z*aH8Q|B5%`{CH?C9p+w7lGZ(NGby<1=_AQVPaBWwd8CiaO@NXZ4?O(3 z)SdV@p|lDdtbuIFWtiF~--o{Co9E-f!=$T`tBx~Xt!R&Ws%_^<) zci3)^Lt^uDN%Ii zw%5iUe$3DtJ*53D0j=29=pI|vCY3iCN13A6h)vgAn2aett@I|NE?4}SL}>s3U1Um= zX5gk!In`A8Sq;15&TG`rmcf=g9#6;X^t?1&XfAVP3C~n#&zY@p)YrfwC-; zgRJ52FJ&)k(ZuxIY^<#1KSr)|5SaH6(bwbXwedsp>?1x3N;wsM{XZTzqc=?GovBVx zvCWR?O(?47S3K4DlN?{D!>O`bBOo+7t`e!zir{E-LgFi(+cL%;^W9D4bUa-cKN;od zQ05DD;ReooybnogLmgfIEuOg^Gn-a)SE~Qr6Ac`<@}HC*1^*{Dx8IjHKN#-&Ze|*9 zq@2|Kz8D&JCMMDN8?xP#TQTx|R~8uL7WX_nA2*7#)7SKi3Ms$b_Ols7#mya2aky>k z+sr0kWubjSx%OAiad8`$o^@N<5&ruNzgBf?R(KlqlZv3GWydC*!Ojr&+JE!Uw!3`i z7k8>lo9^>lZ(eQ`*n|U`ulV?g=CG_h#_qj?-!zN9*Nq@I>5i@HSoVA(QvufF@NI;>1%s z%plnvsvmg>psXiZUu!)I0}tIZVeP0&!8tJAOC5$h7K;C5Lyx9F*RUA83!RAY&`VBT zC<}D=u6s>E%z~T0?sxMxOnjscg&L=3q-gvJ^T?x{5(`@dK}BC^(`+n^goDR9$@9Ew9DEeP_H*_FBJ0j)B#4 z8sAio#)3_wvOSuO(c}q-IS&{4rSnOGCx|KpZ{%~;V&&U6;^p&LbVOQEndS80nL49R zEjk_z;%&?)IL6|C2OZR@PBIvGjGTd!B50fY5;h>{T=lPV{P*jqYVIRZLEpbNNgOM% z-%ByXoi(1dq1}6H5i0pqV5PwAun*i0t=_MXr=FK;LO4Hx6-$5XMzY)P5DTcuv(Lx% z5ciqumpgYd64^_b$(e%t+%#rexcXbz9S(xeX#5;8@#yr+j2D|PO$)D#ge~O13Ox2C zeRnB-6tWkwk^3r6s{ctrJZD5YoeJz*PW!`;9`sEnQwpOh!oYxN!$R`5TTKQ)J%P6PS$#DgHUXFgb{k{&pU3)@qe!6K?vD3cZ+rT}AZEM$`A# z21Lso{@vL@FVl5wK)cGz(-V!+?zj1HJd_N}-9ev5RO*hXw}uovxT40h>+(Ak2lVnQ zF(XsTjboZ>2Y)Rqwg!R<(|4ox3`#*L`qf15MQ_O{c~Dk*d>7R zHhg33aoeMvlAr5h0Wc6CVIFinV@O$Z=;9U;Y_%pAW$s|vic`@qN>&5 zRRTh=5BA^agb$r{>JtR_fc$m;Y{PHT3wx=1L%vY&P;2rCdDWdrYlnwha}(~@Pp$Z4 zycov}@+DmNK6(?rz%S`nO|`qma8z{y)-2F_Zfup_QP8xy_7$?IJI9s1SjruQW&P1N#hX!8(pLH!WC z{DDu?g^En8vNcsaW{i)*_8t81Ud03rw5hoNX!x^~(m*YBjMhA4QNW{0M83kd8f(Y4 z*7$I_J2(9Bncd0gh?6tT>WN1m5z4Qr^u%@0HBE1ZEacy_$xhwmxnp+LC`CEzP=%8Nzo7Zcmxs`K@Ik@Y35Rd@b<~GKpuY zo7X)*D^;7#`96EysJ$x(J!-G6?a|$1b}{dDeUVT2(Z+%kMPZK1tLHp~zJ@n=8@TG% zz8ER&#AJ)^QZa#kp(>EQR=T+07x{!i#6tcCR0So_veBHc2@-IeRB{8UC>h zIf~)rH+4^m{Zsu17r4b9I54m&tIh$3WgA@mT6A(i=kJe?8BXF8lS)Co(qHmec3$he zLgDf;+u+QGtC?)SdVx!AK-~G9VOnsKC%I<#Yf3LJk^66l=un(L+%yFZzEEYt8NP=0 zWd9XLP0I9=?h)`?mOx~jK$y%pXY@|m>~GS3qJhGgl2GjR#xGSDw+Hf+$WHC*3^K~o zD0`C-^XX|<;j0!tSg)yRAFDXow|g23F4XeOtUSEt?%(i`GP%#Cj0%PFlI+W)W&hW!9VD=nf2Dw=he@CDvgoP^X>MTdh+VW~1VpLC5x6n8WK&+{s( zn43)wpPqb`cewY?yJYsdNMphA0KRKs^+r(v4zt2X4`ct&Job_YDlkor4vg!C;$Cj* zwz4P;5%?`faHnDhDgTKY8Rc!>Z?39D%kL{Sbi`iy%=i}({ua4iH~ynd(5OVRKQ)cl z;SzaBVf@7de~TJjUG#s}KzT7}%amA+CNkbDb~9?1B8Dn%eAoDrV-}l#{tU<<(PLlq?x_=m*-tgeZk;hQEsQrMQd5NDyyOM0-8emV zm)^oS{2dr7|A|(~Q@meKo4EFS;zRk5Dz`9IG9qJQ1AnPhisz$A9FNg6%Kt1-Un@w) zBv+cX_zrV~-GgPc9`_XaEt5=Scipr~dg%O|jKTj98yVo_q;Q!hq zZmUG9NB_T~w?U_2VkiynYi>ey}KFRY6<^TC@dg?COA|8_c&X?XYevCsEb5_V28y-GTm!io_ z>{ROjty6yZdCu5vmtAchtBtvtheRP}b8jU*NLl1On`Y+BgElYqu6VO8P5$K0ofDkf zP0F0kRYRU!GZ*e|KKrNZ$bv;t-1lb{uZIzOX$nGwONz3w3r%Ce zIL(3g=j8nSXeS1Ws^m7tR>C-%m3|4nQ2KWgu~XEACgzAsN!j*6@70wCaZ^1ib^a&)V(yxbq52P3=d*t+F6jz%>b)zxl7mxBlJ$ei7H^bisJK;A zh7EJ;TBes13zm;JV|IC$pQF=h8SCZ|WB9T9u|pTMKGoI(5aHFEeQTcrFNvt ze5!x;=!~m5v)WZ25?8t32IWcb^5h{h<63y5ms;Ty78#X;4&xOzDd|i)p@a1)jNQ(G znf5Z&p4b?s^KWY_!r^VnqE~3rG??Sb8l7}*4<05h!vnUuS2fXx#G00qpz4*?FNBZ? z6Z?=1@DF@ZBr^o_2R5#Mo=XtX5S<1<*&?4u0SJ#q#KL1rPrG@L(c{nXXd&uNg8PonQXwO@NRv$ZU@6#*$}WK~JIX#6NT!uDwsTJ9An?Mz@ z$lo+3>Dypun1Zuf{LUCb@KFl4BTo=^C$O_DKv6{F6*!} z+R=3W$V5+&Cfcb6gGu@9P1~U%618%0AKW=0Bw>2JsHFUz&0Yg4HrZW>CcPF*vgjk| zNPnQwLE@6_yI24i0rRm@AEEDY&Y*?q*zzfUAXcz2?6t@1wk70v#dyJB#PvfvCqvd@ z*oLM(B#r+6ZbQ%l|J{2#`<;bMm4&Fcb1gZP34FQM?yz%OmjUhmb0wvK&mMSpIr#vv zY?d;7>A0JeSs22Idtn4&uM~NNSg8BJ3Fk7ei0$wT?2(n%4!^J>ygSM0Op=G-ZW!P{ z=WP#gi2;{s{*L?*f&Zv8WrRqo2qOh6m*Ude83O}vk;@#sMB0f6*a_v2PYMufS+OJ4 z+|!_E?#fRsXxnFS@zC?hJ)}8QAU2O&{VO^WZ@yEsw6<ggCHwdz!Dn{-!?J44K z;l3lP?!*whqlopvk}{xHcO?B8%t5R3SZh>k)T!_JpG0ncF43D4=fwlJ{Y)(Mf_TD`M5@!tk()#N5&nNKdy zkq4ECiOXZ^DzY4kAtF=5gTO~M-gQU35|~?5-|za+&Q8xUXa7Kc?8ktd?h~=xp>sTM z?8#}Ew1oMN0TUbr^1x4_DY>)6W5LgnfhYWw+erUo0fd5yi<~sS+b26k;8g*y)ozkL z)I}5+QB8J7fz{#7RbDc)?hLaJ5iBoqR+3;@T2eUZia;TvtGN`U>E|Il!5OQ+hcwIw zdJSZulJoR}I}3dZEXYhn8VF5Iu7G@!rD{iM{ohaC3yOw?TAknFx-?iw|95KV9cBoo z;+vL)trZ7u2%f?3d^iYW07Nv1dwha457OsIbC5%n0t@Mr|F!%bd0{_}iuUeW7yo{N z^L`&`T|0^o`Olv7z=z_N`FH-=2mgKnsX9cO+kgLeUJ5MT_Lct5iLy+0eQIpJ=EsRN zHS@p4NIjpGd9Z2_0lBalSl6d>mc%O<_6)Md=(rs^E7Cz{c`gL8%o}QGc#^x`{l(|W zNqa+G`F~&62fb_{qsPCaFJ-B?_a;T#PPsA->HUccm@+Q03}&>O`%7!-;d7^UhcTI8 zkFV~ipxf;1M2XXiYgAK{dc|T z(Z=94@mQSh>x7CAOqMDQ(<{f8NpuSW_z197G5SnOfOt^C?y z!;ofa(stOxXy&G(49;%sc+MO3-<|_mX$EpZ#z!uWC~H;l+fv zm*RJr?lf?hP2PZCfkSRC9+wR7qULUpp0wZ8vT%wgt+0V_s`zO{G?Pu zn?)3VQRmn~uBW1=Ace$VPb85CGftu(Oe*<#1XNQX+=;P)K@+kcbCRa>juVZCA8ou7 zI!GMB4u=x%gx0@R1yh9z)}k%;yv#g+Y&~328w-9blaV14pFW8Wfj4^xyL&nX+_kbvZgAsxLWP3HXwHIZ0W^$WH$fz zG=K_NGYwuEk{wBVN!#0v=n$qI-a?bBd>&l`no*x}ARRCZ`@8* z6IrGhFl5V5vkk;Km9-Ci1EV^$7A+|^5Kv?A7nqRMZ@w*rH+8tkRdC+b5yiiLR+Bz+ z2-zsFIC(@efxA^ce~YEH2oZn04;eNav6QD}ej-x`4_QRfv?W)4{#8wq$4UvYFy5Pd zAz2vY$dB3Zqn*}WWNrP3QRsR23L7$keK6bUko#--_k|=8Wnj+*RA8;UO&Z1+L)K-` zK##TnjN@M9qt{@ej?pJe5JiNE1}cRAIMn=k!RQkmG8Qk$ZtCNU1IR~=HORUo{hqSd zhj*kOK{n=48l3B2njH?4UWKow-lQ2VkXflfOg>9ySZY(&dD78D!5%c}IIQ{W^N;^o}#DDkRB}7JEy`n^$?csXedjc8#4@|EA2fZL-yy&pPhF`OHDrR0ZIBP{RA#?Ti!MuKuWx-YCQq6WFq7o`9?~hSa~58ojkwbX6~yfG97t==h1n&HK0b{+JIe)h z%)Gd-lJ8VK91I@-g}bjX@efIR{I9JV`~;ci3dVnytA6?__5-qg(+JQugx>>MCFlqh z+`)96a;k{pRPsx5$jX4p3H+g&1|)t`vfSQWx1DHD={qK*mOf$g3=0b{TFUAWId790WO=5(`dF;nz2I21pP41Ea9 zX-RgB3!;)b8GsCzzBf5AErP461RS>!VB!k|uzgZSVc3;`&rli0my*yHlEJyg$k+A) zjSf)cAosmp5_UwPg((L;6osou#66g@Y*IG|49p7yClkIF)w9?4j~{*x;xY=?xTul0OF$S@Jdjf7 z?OtoCQ2} zhpJg4ii0xlOu_RrKti^jpqrPjOwe@y>)2u-{~ky2?KLro{)|VC`J-piat%=jC(iSiPlFn?eaJ77fFvFW_CQEUGUj{m)F2Wc=}Wmki5GS_Kk!W?Vio724Kx8TJeROqSc zO>_hu*{Fb4Rre_v_J8lBD)6E$Mg1Df@#B)R(f4+=X*ayieDt4`P9jqJA}UCvo}0k^ z3gWu3O3NEyajES^xR?bd<`|@vP{7l7O@ii0(!A2GM+IKU4l zvO;K1W1&2hm7J{WNGpQV~;_$PDpuuK&;0!EwEfI91xZs842Tnwud?eZt zuu}d3dLvzfVo>7XVW?Mk-}_zg{Bc?G=FgpRpV&cvSiue-IlKSUDDSt>&-)eD6w6`F zser5ObOyNm1T;+$hlGRv#~s*XnBcd(L$Q+^V{Cuz_kMwnb*?mLe~R9GMZW*B@Rere zagc)6^0AL|@D-1aA4?zxM(LoRbjgFmQEQDe)noU>5h(RnXzup`0mDLHIL`9%M3W0f zeORv{DEq$h2x_&x^Me{=5&*9#hh8BzF_d2+>%kSuWm((=@LI~H3oJ7qHyx z=@tS8cUeyx27-eR66^_O_EUsJGtbLV+3E)=O9HMQpVB@0F552m;;L4jP!&Vfw^p@Q z29C4Sc{$F_&XGKc$vq9wWuo61ko=xsi3rLcaz1kVmK5}r(PoIbK7b)x{CDpL_0DOo z9rtwTBfZ9fVwn8Lb5K*v^YXW6Paiqb$SvszJdYn2)%J3b$>uaQw#q}Lthn(MtF!s{ zK=-v1YS;V$&?|=YxcgCam;!7cl-yj{D>$tzVkkij3xLthmd|auq+?PS9_Tgz)VBS? z*;(r(6#sOtczt*1XUGe1fx3OGUsaAp#TOEOwMH*by+4^*@u#ICei1Oyl1MWd*7;j` z``BxhmXCQ>LE+GU{>LmA;H+AzOA3@e=t&+zg+iN<%pmljbdK`odH)4!z_lX~XCzf3 zO@-?|>MOd4#!iT3YE&f=he3JB-B zA2h|q(cT1s{F>K`q_g{t3nI)FRJ3@*1To?=yDN7L*ix(5r(z zQfcwiyTjLf@=Lg_0)EFd;vTd_uK5{Y)=Y<{O|aQE zU-mlEWpEhh7=}@xP^o8T0}b^PHm=k+dAl+ZbAre-HuRtg*<&$cZr&H3BvY%^pi_l} z2QJTRCQ{X&O}N%h@utO(K^9T@sp*8oM*?YZh$0vh9VXCuX4cP`Kw<6yg~>_?qR!xs z#d%SEgzAoVD^LPZ6HD!H56v{I?f=RS;y_GIZYh-V(> z&_NXF^7&_QoZIh*!Tz25O`v_L&jw9!d;>8RI|d4o8C1K~62Cv3MhC!g$S#i2(_D)@pzk9NeT$I`MfVnh|W2<05 zXXGh1wli>Qw$?|2bM@8#s9zTNnqw%o7l`Hy3Zs99RWJnefNV5c44fihrx3wTnF0rw zP6UWJTKIr+79+4+;q^UgurYhUPeo03-vrFC1vq3#Hqt9(9Y^4^u`sgI%83`i24V2y z0U}RDDp6+4u%06V|oE9;N@nj8?kFC2i3zHHCPB%^F zqYTbMq1O+znlZx>&U_WF`+MLyNBA*s2>V0!0t~xIj#=eEGXrTYX_`#oR|T+20u19r zF>uYdAYJo2` zQ0$~}_4O;ATd%!f0m3lb>P<~o_D%Z$FZBO6)3A7DjB= zYak$wAxtIUC}2*01-~o40Nb{0agFZc)d2EtEl+N4EF~z zfWK=;G#GMv--O(>R3T6fmbL+V)lYaomVp+aCoAWjH=y_3H8@#HOlP+s<3nO(?ai6? z{qhUs|&-Xa>cF6K`+y>Ag z>#`kma4+h4^Sm|HMWIbzo=up1^)wt}*K0npfEd6XHM$GfmGUBcA5|Z8qq>6Rq#(^x z2mEGCMouH0{+xarFgDH*@31|J=#%6iq`EXn{m{8_iefuJ1aF>;c7p(oju`A|3+5TP zhM47pJ9EtHB4mgjP**h>Iuni{ZKEn*t9tc@Cq}Vyqi3)w`{KGK;I2vr@<_H&%?NV+ z2qQ=@5crJfIdIL6%M8qzUsgfC=c;O4saG?wYLi8OrB;2)Yd?uvvxp6 zS|^#-L)RyR&-I^TCM4jhSMF_w)S40Z4)z$@~F3Afv0k5pk~~X>vcoBvQ6GZ9IM{EDx6;@eB;s z|B3cVXkn-%C3I;#T$4h694uZX{E(Y~(@#`)^0a+0Pe>h9vVAYL~jPACek2Z3e=HBa4vqKu?8! zW2Zu!BSOa3U=q^Zgz8=w%CMY@x3#2>`~EDQfR0?Np`vTO#Q`?-Pc$fN0{9Mi0SMUS zM%I=qQZKP^t<|xCN#rD<&+L}jZ>TK$iC63^^gU~j5-5^>jiu1y^y)RhW#mnO4IvIT zZix(}Z54iT+rF`-`x?pjngG&nhOY5hv830+#Si#cJ$nGHF%$2V5p!nt_&WxuNf ztq3~U+(73s+}a3D zh39@#Sc@_|CQt+tW>(69+omV0-cAF|J>_oJL?OBt( zf4x6>E&hV;(IlhqP~WO*)b>Zltw)n@3#*15b3CJ_*L_D?e=fQ~Oo(wrDl+e4z^{i= z(ch6Az?PEel2Zpxvc|tt9o6EzltlRiuLf4@(;7&DYlqr>*K!e9&7Vpw55YDVAR-2d#A;w#63bheF|wR7+RF;+u)docoW^2kzbF z9jC)YDXmk9PBWO(>EAi#7Qwj3F2f^H`&C?e^UH04Yg>?V%XlUACsb`-_FT_QRNF}6 z)QPhG+3GtMCJ#BX4(;cvi8XB|$h+p2gST~48O0(9VeUaie57iX`h zz%YCB?H*~`j4B1ywt~N-1rd0^Q^Ugf9CF7^A_QeN^E;^DwAY7%Bk!iAY3D3Qt}+ce5LPBS_^htbDO z^N(Yfp_;%pgHW;k1k^5ZLdr|0AQr5l*GZ8im%nf!sfcTVPTT{w?LB&0s;?M^F1fn^ z!22VM+H3zgx`o&wQR&ZDL@Dv$+=*(>v|@I+sS`?z;F zvqtP6U+A6O@I~cF)C+rul$D@U(~yCa6kVpt>@fdX4~kPH7Yv?|nB{S!l3}PDfGbMj zY;8pc*3-#h(8>IFY73_*EJ3xbqP=nxoOQFWw>c3?EN0PybG5@!|$*D*Tl{}r8VbJ zI9__3Vi63Sma#T=U%IBlKC$2Sh}R}9bbmLb&u5nUfICGnH9Sjo>7;RbTV=COW&=_| z8mXBe%f=thY4T`5JTTu3QkL<|Hsob?M3Rh-#WcHY}$a zh2aa$b}gvAcIx)Z%aJV?t7qANo@)fUUmX6mZC-gUD`P?LvQP$6)ccys#mMuUSWX<3 z%mb|4?~1M`r%Sns4)m9h;9L}Vm>BKe9A~79unD{nrjeIVMxMwbOf!>x=S0~bxA^gk zU)ST+OpmU{4_&~vLG^5fFH2KRrjB`h?PrGRN@lOdWC)4tT9k;iLG6;Lw`r zcPf?~*_=32_@!%nCG-^i2zw)zR>H!D7<*c<{ESSuqOlK?N_4;6x z;n&n($YQ8<4#8ryz|)q{_(t0R{xwi7)2E{w-o*l>Eh9-D=WXV+wn0rwfq4>w zq>oBhK%Wu=y1h})^>3gnC~t=*Jy6v6SQS`ckEJ$trC8?3OU?m+VVm2SQtwuF_K3p@H|3R%A5KIcny-RBw~=c#}kP)X;nJv$vbo^W2Eq^aZ_&VvWEG zhEfghjL*5vN9hc;7P4*b$!dJf4Zi|a}EuVc4VX#kR_R>Wi9^Se(4 zS=!L_s2I0VBORZ$`w!~3$e0I&svJdukzoVjzE{xYM6($GEh2?<@IntX|eW(i_o}wh1CbG+*1Ix@dLTViJGB^E6ek6EbnMk_~dm9)mLEsotqY7pTqs2@dc4n$=LC!zcSr^VgDMuI3ScQex?_JA@iBnQ^)&c zr~U=s!iu*7-g&MKE1%xvH(2Yov5O#PShwcM%LDNe%AVK2w2MBjA|%EY_oS>pGq*j8 z&NNE;l+R-ocGOxhpiRt8uLm*JLG_PrW9rJ;(oMD-%YR?dk~m2|H95Zj5!>q)DkCS3iJ z=ld(bVZIxvtV9rN;V*77{?Qeg9Y=Y4>;FjqY)$w!*MNx_bW#@>AtA~lb zZu?cOPmBt1+I1&<1pk3Y0E&!^LJ{#dO}+4nQA3Vxw9oCe1Fx}PP{G@9+m8X{A-g^d zts}=2IRxI3V@Gw)pe;yns#pkKT4?|QIN%FmKkHD=>Rzhfn#o=UtsUreNFMYCN5jX% z^sjZzvdZ2S_RVb5qLwID#E=AK2H7tJ=CaKcEdGxcs#|4)@R)2v#tQXyFj- zik(_b(r~atmAFW5%9$v;Xr^iRtTc%bu{@ZIgzpCx2&b?Xu%#R zf^Zl1P;!pq5X2qB0m~@K-6`=9IHu*D{aNsv4akN1!`8Tm{ggw11~4ZtnA5rz(2~DJ z(*`n${CX)99nT^}bXfi5jdDj4pdPxoUIbHac5!dw(#fDTt58f9EUSpLEOs;vtW>%_ zsvA)9@h*2?T#6{RH^aUSMIif5RV&vok$zqfTp~Tbppf0HTY?=ED=Wftn!kH<<4& zL^a(vo7)Td!NX7I!S(wO;WkMJOGzROHP&+yj3*h+^PdN@Gao(U4uMq?aY42?VDFTw z`Aj9@(O#8j#iw9@pS=6z0;t5=f2hR&XEm2$HE}ZWbMc%(ll)OngMckxy}vdG zwI)9dzkUIz91B3@Toi<ggm>x@AgLiph`~`U1pdbf z>xt?q&2~RX!e*57!3OJE#UD-qx2yI16+mW*3c_r!#68IXaTVF82Ej)Q1o|RF5h45r z!SO=D4Yc{Xv~E5xJeT@P@0t^85UvNdP%f+Q5!^JyWo7$W1|V`50?bRT6${bxU}4bM z9`6T)ehM@egk5<3xD|;ZOZQ07Vc6k&RmBlSUjEn)n3oSQB>_S8UctW~tL`m8NN`e+tQlu# z#%BGN)=aUMItP}K2+OE!=P(oBPb4XvOmNjMq5)=tLzy=M$d?@nSE30FrFT)q5gFLe zZZL=OMwRSvP(Vn`30NT>CZ}5~nSOzT(74E$*oO!yn~9_FMcAQ7$8RCwehma$W-5Vr zEcpUhTxns4kZ9q)t`>F>PM8*eIC6!e01aIE0 zr`jsGTYlOXf!Y8CK~i&`3IXr*Z@J5#1VTWHl7I?K_Fsa~A<)v2qDuVWEp9}-#REJr zim>;~?Rw7IX}j|Wkr?gnMS-2b(sFSu7`9~ZL`h<5^rYjntD@i^ivZx4fDcpE%MesC z61VJ!ErFz*P-|Me0JrNE0q{4o!YZ6kP(N2d6~8(bUQ@^pkR27rA^0Q0v7m_J8RsxE^)`uPaXQwItBQ0qK9{cqi&n4|EW(>;wf`@8-DN`k=s zEC#3`?KOAp4B1LMR&TJIr*P`mj%Qh+RFp6y9NL}Bt5ATpeQnK)TW-elO!{Q^Z(aEe z==@a`-06#W@Pz=w;aC%*bjB6X*`8M*cf{Zv6iZ3v{s?Z)_DqG5_Ovr1?hIcJ2djbJ zVrjAP9*Zbyjnm^mv|&GRsiXSf>^|;P3B^v<%XN3P50pp{$0 z1vPN5x}}Zs(V5y0jNv?uDjqvu-1}0 zC_FI16+jSmUW3P7{si*X-O?3l5Igw_&_h@MUhWcNBK~Tlon?uNwtsvTblMf!V_l3- zMoN$zpdFa%p|Bvf<<!XRtO0RwQEfTqrM~0vY^DyC}%MM ztxh>t4uwwcV#MqbvrKGh$XWVgPqaPm%-el+E{}k-z1iMTsQe4(kx}L&(B@RWWgEIl zre)6lgvk4VK1l%eSkZh9vSBJ;sE!i0|FQc9Mo4$v3ru@)$)t}V`-!Jti;N?Xs5`KV$oRXk`X0-wC z_oYWs;&va?J2xT`kN19*hP$R8((8);_VYbMCmfjoR7q5Gr+#z)C4!9$J7<}H%;lYCvw)e${gr2 zxW{@6IeH?(Fi#eKiFb&_MYha>s=7gUy)BF9O*;E0EPxG{x7GX@uSQMoz+}Ldb40Qd?1$)Is(H0kPs=D`z*XKb{D+n`5$s3xK zdD`4H5i!^QFr9^9-PD7bDFMn_?5jFwD+Q;zt62ib=jWyI zfXO1GC`FevIzeLB3x%JvQ7wfCK_=B-%`T5xj&Hox{E0mJml;oj#==ZbLmms4x%GNf zOD^){JLOl}$q7~gpn^Mu;!kO^8zOU%VXvk}=Ace>J_Q1=Lt`zco9^8stECRUM|b^) z4+kM!oXm>e0z@)~)fl{&>}Yy8H_i_~7J-*sC}caZ5A9aG$JL~+zcy-45~+O`BePJF zMarGX;1IQeng`KKSKnlR#Dolcj|sUf{6E8@Anlf4 zggJjr$=yJn;s2xSyW_EZ-~O|*G9yB=_l$&OhES5s;N{Np8e_jR4ud7jsKypQ+sK8_>jSP1)`gzpuWV-wxW zvB!XrV!joLyRtCbS^D$DWT=@7C2fd0iUQ6qN9 z`>AN1^@poQ`jd;1oUz553yC?m^ z;~7{b&;%m}DOC)~836$x4g`P?nRr&Y@v=#tKQYo!ychU$W1)Z9e>d=;cMhp25V*)% z?{{vmrRmLM&}YKU#ID1E4+*HkV5E^GpAYWp>{fk*Mg~M)-1A=?QjX=>Fu#bNm*ro} zPBy+N5|thKEbDIdhRg00hlOL|r`hJfh;b_rc_53C3FH5l4*nzzkcp9<9{zfKzyHYi z{13G)@~ORGur^5%w3Zyj00AvgztbYBdw>JozyJnfFOnBZm-TE>|8^GlFYIdgUWobuI#&iIJl8+- zQO0A|h}~dX8Olef4#~pi`~6mH^mtBS(sl@CxiQ~Y%f9qO@y>ezP$c$lup4L%@34z| z|M~U!d$Uc%^;1oYAUBj+c{oth#pGCE(NhBzPEnRn-dNlN*GQZS(uiIb2!Kni=}$X& z=;eN`ptra_^e<-`6fekf19=K}mJ3J9GJlAd-uR~S_44>%?PGa}zJYPJs|N0M747hpAa1y68e8S>}oj&y>!ZCmWn^0VGcTw zKSK18+6o7rHH12ycxKzx149px!i|D_F3`zVdGD+BA`)|x@R;o)Wv{mH_MMzZE79jaAkaWx@-PRVr{HJP)3pJej?uvzoWhDe`wholHPop%(k3eoxI_UxZ z94=j6F6+1SC3uqLy+>_$*ZE>iobaa}u|w>weQc(!NaD%g-3HZ4r%?U=nl0#9Btba(_Qvok%+nB= zl(3tFy73l4A9QiYGYoVVF6ZOlyaSarq-p>(zDH*3QcAZGc7@JL68Jb3z`Aq9r zzEGlc+VL?JX)qEN3=18AJ6gWE1HCmf>F*COLPc?DIVh)Dsv*+HvvqLtknL7>*)$U( z8lVqeEUG``@=f}FHN~`Q8(Rm+QA4tEIEh-Fps2W1V&<1_kMwmG!8`I$ckzzJCe%{? z=C@Uv@rv5$<#YguATAwO&6VzL)(`2*Xm%_q?19?fc9%1MH1Cs}?Su#;9h(Qz{MOoS zvy%-NRbE7`DVp zo7B2Y{pQrI8L!cYiysVf#vo!d3!S8&`7Aq{rG^Ra^w6LhYsAV4NQ$HnG>Z^dD-;Z6 z@}v@J42X@s+e&$M)*;g};<+AWq7i*h<9Xl#$O(!=u4i5C&4luGE{V9r6YEl_wz|GysEm~BeDYxn024Z=!g{`~yKeeb4{rCxDuKPqT zy=yk1DTK-UStK9)INg>)LnXcqWpd}!W>u?C;y|o)>5lCVs0{pc%{K5AJN3551CLss zIfow>Yn7hI(!Vi`Q9`#Y7(N8PSCP#VEL|G(4Cw=jYRyXeJEUBdt)C9@0YlLyd7M5L|tT3Dxwhg9^Zxv_xw?d;Xje zHd8}uP-}sn11YR{?Z#q}f7tN$I|d?JJK8Yq0*xSl=$BogrE2!k!|OdWEvAH2M(#j+*4Vdz^r^rENpUoFZ_7g?OKZ*SR>#LACOslNtVGUeBAr)a5V>g_@Cx)+GD zpZD>%teH*_Zw!}p=}2{60K)e&XY zVr&RKqSDeKvcveBUjE-&fd5=NI;v{>u+XTjulAFFLT|7}@P?)Q(dg8>bR9d>Y&=#F zet#wY>iY{9s;AEf^pQLkU1rxE_7^Visu9f*!OloKo`1GwK64`nyH}Rs{JVxDDJtvJ zmy8<2EY(|DfeWYvWS^a0_Gz_tf1G=v&L4_sibIp^kE1nnBBYwj#7SnO@)dHM5qC~v z>GS4xM`GaAF4{nhDmYMJ@2IM$2|8?Z6z&+j%L%@l4y8qA-TV?F2O{J**i+Li!0ZX9 znLw4?MWn6YUw(s{B+ybTl9F~4b1_cKyXtKMPCo$*qo z4VwOV!*{lM0$M0M?xoeub~QBUM5KCTr3{z}qNm~N>Bz|IS9A_M+l|+vZy2g|YOG+^ zwcnKOA?LTVGNtMLo6qZcKVGZNOj1RMP3BrY;J`$!W`Vd7&VW-$vPI+2tEueBxD1>! z>8Y}T_QxOPd{cjMp?ux?XwT-{<8_S!0a!)VmzO+S9VI#K?nPHnQ4V~MUS){^6}Fpu zm8SjC({myU+EJ5o{JlCq_K)tgUtB|Bx9)M-(gtdo^bz0AI3aDu=XT%a|lyw58R}E574#WT5k456Pf0v+x*T3s1GK%1ZriiPZwiCYHud5(T-#^BE1NVwLQENXks^l-f@(d;A zPrH{p0+Z9{Cl_x{>c0={PV9S?Px+YM=Gpq23-{OdKr`8rPPJK4S^Jet>S#%HXqvpJ zzyrf|Qr`uI<;G*fX61~_?=4BVJxIP9bb%`X5rg>F8yMTrcd8xq%JaU0=66Q*`3~5b zXNBa3S)r|bY=TQAHZA|q)&+%8Zs3e0VTj|k?rL5GFof?p376GD!^=Ah~Rg7jqd}x=<0b5sQ~{d zYmG9HdnrYTkP4HksV+$x4`k0j-<5>^kGy-|)~+#py|p)7KTUbd;JbQ-tz7s9XTqbk`mCq<>eM@XAG52UKE9RR9KVy7=e7-%bic{1zvxMuRv;Bp{gOnj zvezYf(kmmhYW->VbYi5t{}E(pIB+I0-Rv$J?wo1;s5`qT_M5|Zs-(`)`G5t{pP#a2 zXmS(_`1MZ-m`z{~&sy>3-HFQ9ST_Kyx`4xSJs^#POivE$ghPa9R!^!(f%M1G=%kMQ#6VOr&n0Um{J36SJwtmd3DqKeOHQO!a#JVUc zp6Xq?-gsBt%P-OOSPB<02w)rV&C;W^1OThaL5VeaJWrOeL1rf@Wx$>X^z*0FUt6;cQ@s;@2l?+ZQaLe^y5DzjE!7%$jl-mb7kRDmIUe^($EH#I+#8YYnA@ zd>A>$huUd`*MgclUpXQ_9?TRRf3^UhrQKJG%^W>Kl{3sK&Q$;@0=B4()dacjiSQsjO4=HgH8V{s*w!;`u@eenD zYs4^JSx8wnSHPFpRrozlpAiCT`C?- zNK6|Ub`z2VG^bi)vOO32F1SrLKPN9T+PsNPE-;jwrMiu7Vez>x`xX~PYWqMLnIbM~ zcI1|C>--~jhAW&?k>=L!J2;CMS9~khlDOugw%E16vR}c=zedx?ovqxSfTV_|Izo~l>dA6gT!E!@Ae;u+J%5` zYGQ65+Him8GPHnJ)xTRzAu?E{9>=N+J0eK#VBmSWUFH2z+AF6uVpH4#^z#k$7a^rU z!9b|72>$VSjPq-|rf%T@(7m=oQL>`JkG40F-Jl3rV#Vb+Ws#o~jgM?#$7)JP zvtEI=f7%~xy{B3!Hdl5Yk*7yP;!<-8b9l@&EGZx84-7M`o|4C%4J8wa6q!ze7fUl8 z{PoBY73L$54ZppSDple#F0WsV&ulq;`<$%jwL-Ic5tRtaWXS0=d>pQGKz!lF*i|bP zd9UAkaCAIckrM8}QJH@$GE*4016t(?tq=&y0|}c*!_Mr78R|~8C-Fa}MZMR4m6*t_ z)d2}BhKMTm$9}wbgqxDN<cBb$MI&XYMK ziZap7*35PyN_d39tWLG@ejyH>2p4NWdlpWVI}RjOU1g@0qOKn_iukQCxKl2+Fj85x zG+eF~%OaE2`e3g6+z%>|0*&Z%Q!VtGXbNVj>yKT#&N|{?Wi^YfxS$*grpELuhMX;<$RTu%Etvwgs2msr^dUp`V0GMK^WQei(LMI<3KZKU^^6&)s+_<@|fy&kv3h%^km^ zgY7lQ{q2CAedUgZ<2M-MOq@)@>Gk43ktocj0X|_$Jx((! z;tUnkvM@;XCwW`#wV3zyirhI|!{*Ak*(M$pedwJWL8DmuVP-M&AX)=hZk?^4M25wU z>f^A}Zy#9HF+Q3t@!xy!;<9()nvf*{g~-k&5|PLok=jlTuBuO`Y&OW42F~vO1yD-J z`rfK7HrI=+NME{%e4BCZae3Xnex8NC>?aQce&p%rogfuKZ|FRx-U>apdPKcCf8G(_ zk~xI5XX44pDY?(N4)$po3cj`BtJxvFZD2qv|5){; zUlHgkhG-{gYtS!AbSSlQxI=)(W(FPct0~)G`wnqudv=@1%fD9KAI+=7Zc`~p4%4x6IBCYDwwfRBLI1sq;Zs%~(QXYAVK^D9 zy0{~zC-17l$L8T1!wPJ=pOGRmQi1VlxoSHoEu*0Lu^gN-5$&`NsBCT!9D zXeHN16@r`5N1x0a$MVga)Ls4%%zRv^V0%5W8nUBwn0_XmIvn?meOk6$l%9Vs>)Uz=%8DkUGn9`LyR^_oTAAU)ZxtZ z!RNwa4b`gGfq2BVfBSv(gWu(?`^!F9nin)Hh5`%wYOmtd`o%j?(y1FzQ#_r4KzE439=^M3MdC>2o|=B-Ac{7LnQGbhk8&()^_z6Etm^pi9Fbr$SiIS>Ty;&0;bJ#Pg^C^%NBXg|Ct8WTM;3j z5b=mbLa(NuSbl0;`~h0XmGXs16qLNjr<%UP9(O98%BkriY507I9U+P#k|Od7TA13B z*yVL^sih&-hE4>FiBbzbA%d&QG$(5kD?g!fndH;eTc@WVF61S`Q#E#{K2by83#QXV zV?EH@p5o7S{4CiqFlL?gi;Es)qNUe{^Usk%H@in~*pHWI=T*>hD8tDx3RH%?zuuVU z>omq!dy{5gMU!qlBF(TLk=TkLi-wR>{R|lPhyqmYon%xSQ9@`(^p~N>YUoUgG9rtM z50;E(MwPL#ATJWQ$tv)Ro$EA%dkqL2vdMJkA$BA2a+1lw7ZZ(rDH|=8L_y#m`=IV{ z*Rh2ZpOJ{&^(@t>b}TMrr)`z+&``38JmhTlf(of<=s{y4pDoRO)^o#UeN*&WzN)2^ zSw7)V;?!X)5Z<;!sYwzI1Rp;^4u5l!BnS0-&VExXW+sXe!)I%av5cb6agQr%&HbC0 z321`8%DNY@MY1Jj(W9!L6DglfSPXk`%aX-X(8>u3yS?6-LQ;0%f}^ zT6vQUgAo5+&V5P`V&19UYq7WsXE0)0A3QMBWVtQ3a&$hMTReH_2la>zrqm1B2&L8r zqd8>1%U0^5Tl!F$v-f_^(V=V;$_y{lp|(nh)m4tS2<|f82$`r=_NCC+vz<;b=XV}o zn06z|lZr5*L;J@Qhc=P(?Zw1672j}^TxM_9F?^}Q?>!}>euCMafaL?OZ;~uM4dPrk zeCnc8-Cn&3|E6Yo@1|jEX$@|;KCnKIEHW?t@W)}aRm6!lALAU3NDObNE5{P;UklkZ zo%-*xm)eD}^j{XE>LeEdyOe=F)hED5_Bm%S@}Ar zAKef3@_FTXu4WRsh%e<$c6@vxnOBuy40X)alH`&m{Rk6S>oRkFS#YlL)5KhtvMWx&ICk zhE}JK@-|35I+$1)fWt5(nC9IS?aPPhdTn1X1wh2^bZq4DWHhw0iaN6_>o<&lBptA` zRN7&aq-ddc&778@)Km*5cap#L3qPHYj_+Hlpp`ad)0V6JX{p^E7s+LRh{yeo=0{t7 zGy7=Sq9?*2)x}8MOdX0RM|0?E<5uf0 zK9=h-1Z9n9S#ODj2c0}mHymWH^pQrmIPtea5-uc#<_EVp>mcK=23o5yLo$@U@V+vs zC=0=Bk8fhOzv4LQ#E2tRH*8K-HXEBtkCSf=tcMmWc+SVu4pT5m>aa?>HroJ9KL4@U zFlW250D){A_lh3COxaMyhW`WEyzwix&=x_1*2?AqkW;9q$$%$M@MN{i@Ogibs%Y^{Imr+dI#&bK*a zjC0+|AhMy;!Qt4Jx)=je^!{!uPBmJz+LDWQQTRcq>57eDB&C1%^D`0BzD|FBeG1W0 z>SDmBP$rt$IJlyRlf}wO(0v=M^RJ;5rB>!!i2aoVP=J?A>3ZIg;!=4V`42kwX|}#w z(nN+I?v#JW4pPUfWx`S07yvlLAR@$yLb-V0!(Bt4S^N)%8$?v+$^06$m>tqTf<~pp zP$=LUIG7>sOM9+t{tBT)Grmg!VKrxS1D=&k6S zOH>bP67<<$+La7TF%|yomT-bJKB$sZ2kkR*nC4Fl?*FfPIrwqU3Y^U0yiq4(ar>pFu1-#8JE;N+K`)v0WyM<+X;B!eX6ssuu* zcrFdzGAK0Rw`z?}K-E6f0JQ9UHC*K%BoWCA^N8-V8;7ZI1KNq+aQ~TComCrUBYOwE zph42|NM&kYocJ^jDhkEJlRrTz%!3FEj5=Sr~hr=qqTKa*j@jDN>}hT@K$B{rRJ?R&H8+5@46lWu*0zYmlpI_-ZlZYZ3B z*%~N*0mD-aP4pu`uKdV1Op(0T#}$M=I`kj<7efMM2TzKq*)xPq8m;!qhIv|huW-;F z@J)sTfvs%8@h~UK9Fr9AIQJ12O}q0BH|G8rCO68~5ALOT0&451Ua;-r14iWO{n6_7 z5F&DGptUr07RtR=Csp<1t*}jDiR>yT`B9iEfo?b8jkt5-FNagNIt`WZTm7o_H9V+w z8Lz9`>(AEW1(j*SXt{{lFAQd5JR}H z$JyWj@+bVrxy7k}mEh=SrV8;3X}7GmH#1N)60_mhQt*a%2+t6nU2ABKVdg#9+h)+S z3}w6ga3hCI-*0!}J)-(c6f_huyPNYY(wAm7((d4j4W-2C2rcAzO6SByM9~Y%H;D2# zH*&J3Gaze&4h~9A^$^9CuCpY>C5xKLBm6;{y#Me zNl$*TJe<3>0S~zOSh}D0%Si<71rZ9dD`IU(4Wu;jxy-%sZ4K*6_xx%l<@3W*#HYnz z7S}-D(E$Z3-)cPNAq}O?JCBfTuNHqmfs!B|#)}R~YnDn~jlS<%5K#}$=Lo8lj%2|- zIeSzI`y5;oR+|eK!LAZ6$9Taya)PSPbn2KbMX|viKndOz#wcn>u>2XRygBB#euIAZ zy~?Ro8A@GNY1h|^Mww#a-PpDFvGJebIXa3hK7T);_M*1TNsA=+vEXaIRcd?VFVwXV zx+%9B`u18vQ*G;17jjHAv=APOxr||mrS1E=N@_%ATXNyRtO`95@JGpyUG5a|@|rsX zUjF(vTl_>zh#}GxzE0)oMb)V}Z2S-K{C^8XR_A4|{!K9pFjk9W6&8g)+xRPYr2TQM z|7_+R+8v~g&v~P>IH+B)UVB^4`Y)(kq&yidBtzZ! z*5PWo9lb?&E4B46U1fdIP5R{B4f%gyEcJQ(Kr{{*Ko-fzS3Sa+TiZyMl7WM+AY`;< zygRl!FFe*n?g2Rt;7WJ=U=lM|EV-gLP-JGfoT;PECVE#+y7cBhBg2vMVYBmnjrH*a z_OUo^PvL8#EeJyVh)Kj|FLOQO zIS+=R_zK?ga+BhDIkfrSs(Vl`P4Oe#1p|evEuRxO)pwijte)-0Q&FN|2|q=3rf&Zi z5WwGaZ>y6rh!n^NLM0Xd^b(i4ziQ{Y3l~)D{;dV@`}0f2xZELGz^>=XKXz7zFbUV> zx@k+Ou(K*G=E0g~bu~5#=1LB~didO*ca?&2w%WCwluhJI-5Cm^a2VZ|G0xF3!l2!} zaN>C!9A^27!T6fy@TQ?M!`b$rV|vzACJ|GbRiM>NyjM^^PRgsic*s)K^nn{uUk|d-~hof`UmVd5QO9xz5hX4V1j!bxJ?i^2LH~&%{gdf_phz4wIBq z4&~J?(Tj0;;f$9!baVRpW`VdlF$s5J_M${jT>dctxW;BcNJ}c}>2zJ* zuG_HSdSd{*IbA&FzszrYzPNFzZh1kJboDYB4HIB9&G=N7`5-4br22mbiJf@v&TxD! z+%7(7fP|0^6=Z(u^7s0|5dYHoi}q=v@aF^4d8-Rzy8EX-^Q5xdK>Zmm&hL|&BaB;N zCq4(pG?52hA`=-Ut~E{gHd~84zaoA+H?>nPKJi(! zl#E^==kCUM?a8=x*qo8)@q`O-rAQ!cIFB_^7*jaFNf3{)p40614sVbgdyT4J@Uo-Rd;_2LhC&|LKpR}bNkvB8) zQgBYFA%^(t;i!t@dHfD8@9n`P3HD$#^|zTRmCYM2hJ4EVNSq2LLjdfSL4r+<1wa-u z^3yJWOi7d)j9M<_-d$Lp)IA2Ow2=%5Xh^WDk2(>?z<>`cVN9s(Q)`6n6Y6t{IKB`Y z6I`fQXG-E7T<8J?H!}xb&uJ0^6zUN+xINvBgYbAc{)IS}(wd$)B6q)z->Q;)Fk&N; z`{M1;@pHv1z6FcU!QHxd1I)RNfVPwxemXC8*P^OR%3R7xJOp?s{sM&BGDbXdcZIlxN zS+=1#fIZ zIe$ zjuP%@<9cfg%MxvH>*Y){xeH z4yl>~1rZ~l4t>&UC|fCJ{2*AeY1M2BLPtbX*)Bv+SvyfvO#iTG@+iWe8&xmI%V zcSL9FreE*4zm<`(2efqphFK*vsR`ar;m(i`HGZr6G_}d#4tB&F1(*EDegYdND({Lt z_p8qT@_N1Hqx4!>Fe4-)s$AtHD+x37EgG%vMC$sP;h38Up}`&Dhh|2Y2*TI35pTqk zT-I-&4n=wCb!WH>ovEDXfKBU+CyRz+#CeWso%0k8X`1RU6CVMzrluhi6?s%e3s_q) z;K3ht#W~Mgo^JQzgK9}$XXTtRlcr8RKJKIwV4Ix&FqC*G;oVbs>IokRriTN77zq(+4!s>Z6lv=Et# zF$VoHkJfv(>nsxKkvrO7*DZI@zxS)^<36Y=qM}|Lsm#BDAn66CL9w%dIryE`hQDNM zrMxwUlppbEUg(EW$5ZS!UTpEG%yB>)kQhVB0)i{wsKrb}+)Zgk_*PbEl7PM@#4`m& zF}M%}hj_b4_e|NAtwQxkEfD!8n{M=rjbX)cS5e)63P(4gA@(2-Z~>gI*2t2Y2o^O>-p8+855aFIlQj-a;s#ebR*( zOYek#hY++-JeJrnDR2-(h{))!ecy-Rjy1%YJ31pAA+FN#)SeEdaV4(j{;w)_C_Q56 zJl*~3F5($Ma8l!$@UO#SO7aK&4BYoC&(1*{9!Nhj(ZW#JP;JQW{uEhuuV0O=?iXFP z=R^Zu?yWcBNkYcDwms>>GVasYtR4M^#Q)_Ky#@4ZL7r&-r1eP?xe$c2^SgbRW!&Gc zi8ZYg1?RV*_^@F;*wx<<@uB^^KG%~15!AKO>mfmhC82+xoA4leA8}D7pL}w{(4j;m zatPkP8F-!QUSm*%65d=KpyJPoJ+B}FA-W{HbRV^M+OGs4$Zv3lMc%LK6qDpNpC-@l zq~5Y%`AcL)up)?8n7lo^Hq!~|4Mo?$U(E)1kGKAM;&F)iP}#kw*I!C%Y%Pzxm%9#z z>lyr}wY#w}1CZQc(iNba17EqrkHmn3T@4_|{Z+`5rn1`{SOVdXv@=0}MMtlN_W%5F zH~IY{!~zW`Nm)33SUDK=_i()FGVaCTBH%2cq0hTdiFv%I3}aU*IzBV zbdMTv-|)pyqatP0?kIA~G?7nE0RRPQ0ZsCVCj=#jYP^f?I5w`mP7m0<2kt=(?L%q{ zr02cpHmT2T5a;F5CO}7vWZrbNvUa*$E?L;?*F>SQXUu{_8QcfiP9$iBrAhsILb3N; z+{#Z`;@cDI$F6-%8CfzFfwNTo5>wAmme%Wr`2R67qOaAh~$gpqHAdA2}4R3=4nn+DeS*q z`zFS-e-57_)2H^~rY;9D5kFgpD^b3Ky?!jiu4Fh<6G4RAI~camG4Kx_pa}s@M0V5& z6b0pmkQ*i1ck=R8|3Zc^L%-#QF|b!0u#xD`ijFy7fZhBwxa3nLNtHetuLuA5{h>*?_LJ}=e^yEg-mJoas*#8Km!5S#0^KeRUaLQj{T#w7r=bfzP##qIVOSzl8!#2w zCZF6%*9HryQ5v8?or}=L;eIx961y-In$JvIoIWS;g7+qC@_Q(jSH$0#{44RW!snx< z7`0V8LuW_Z3F#UDPGVtjX!0`-T`PkDW^uR8b+Ju6z|Qo{gO8PfI1y0uus#>V8%bbt z_{k87{!3frbJ6Uj~o z)Xc}c=O?4w8wl)$Xf0cZANVrUq3&0T!TIxt3r+y-8d+NdI#jH9vLNyAnN9D-pTgXA z$DHWhG^McpJe?5U!MW-@i(<`4ZFbLU z+7pw^TEwmOjAX5LLfAc|ty2IS_JGDc(jlBLW4t~va0ZY3{f>s*_}R;w%dOi3+I}); z;4bTMmlr)r!fJ{^_0pU{Eq8D5!hP)Iq6g~DRoQ}quNvq!ktgrylo8lDoUOe(JQdbA zwqtdwMx`D|1bw>@3%^4d)C0sy?Wr4|3MnS zR`597$&I@?`9z>wD}DXkW$(q^ROqKEi)K`n(D9;DWx+QE$a8z!ADqA6I7`z6m&OpH zzC3-wWUGtj4e*9^pl9{iuwj}Wd}lKR4GRfPC;bFSPED&Qm4VsoUS}0OQT<@~r9e5= zEW(qsN9?jW_SC;-KT)-LkUa{o*e`?tt*!8C?%H|MPh;!8%r;tm2wq`P-ht8*9iNWk znE$q^7oVtRx>Yw{$w#{$))$iZ5}n2{#&F%dj&hY#Sml<6ql59IWH4}oSc5!0{>>E< zt5E7as5kk2oPbnx-#3EtoHdxf-*N(IuWyDaBBtLPJAyR}+d8JgR@uIbr+Jhp&)A1O z|s20_@$5L!&_ep)Z>?7$dzs`Kk2 zRVrGK#sp}N#Gg(h6(8~(?CCi9LW2pO-*TqP?t8)@S~x5XaRSWFPf z4>OfTt5P41v82Od{=D-2fIyX=bZ@`}|BcGl8QwpMUTDaz@;|UpiEE-j6i=o#5B_$q zhAu&J-g&8ksN;oIL^)Ys8K?fH~=%xTZaIF44ABjAa?t>J}!upA2H1VSKe(T%qX zC)eN8!4ZV;>a=_1UqLYvG@loczCTiN<1v$-TQfSuG=b#(G?}Pe+}gZ3S#O6jGxk8t zZEug6m1qn|qzQy^*DiTR-qvB3CL)>l=w0*X9GjI}4%dWw(28gZA-F&N2rK*5QiNGF z(5<@bHDxO0K*r}&tRh2h#M$W2+z7}( z=zn5_{x4qrCFXhGZ`53Mt~+?K=TpVh7Y(?{0OADypEm)1+k^lx10^^MGv-sDEardh zR5wO-zakjO@NjNRcnV`UzQzANhKHD{lYo9%5RBW{>hby8r&X^<^sg=x7b?sR$8z%G z)zu&0;=f_Ze#^pzN3u?`CV`X^jKDi?pxr2tWqb}t=A)Ro@Jx@X#gl@v-jpVR-R0-h zwekG|KOwhBQ-fRFJL55g{`;ig=A_r4XVoE#tr%qr;r!|(6CAp(VlX4b%smt;bFBju z2A+RdstyZ2jsk6BvxCEypjR#VbzTSkCJa0;X|H2+?g6mdJlbh|U%f!j|2&*96A)%ZPN4%&4j4-lZKL^ViMPb zurU6tRDcou!gtEzfy>m`+getYfDkGPrwiqy6XWAzY^%umb{E#OBv}S?iScl9BmL{L zq*bAU#|Jk!pwEIt2!NOo^oL~w5`M$$b=Uxqiq@dGn0>G`z;p%mY_j~ulwY8;6&WQ+ zq1dj&I4nXYoqtlpI2@}=e&CRCwDJF{*;*MF1yF2Y5r#vb1xcGL|5}|ZDeizGg>D5R zCdK41RW(LUZ>HdjaZF=8{j<9gJ=C}dn+yHwfZ}+yQbdyd{;s_cz|`H~^jKtk9L zeh}nii@5*(&X45kSBvUm>F@1by8kP_(6mONx6<#gyV(!5XuJ7unZKz-=omppws9N6 z7`cEoN(7`{+KJCq!chK7O`p2f;E0^Gwr;6VU|QPQmO73iVzGzi(Ghg?5D2}GZ3=^6 z$QkJBaXl;i^9Kiep*pvfu`ArxI3c-fBsNG@?~sb#^Mn*llyji^gqflonWA^zgOWR+ zNz2t2p}0$kf?*LV84NSu3X4cmIS|pGy3(&)zIB)?V}L^m`UTe7d2~PjcL>gNX9r$5 z>`BiZ+^54X@d+?(-#-~4G9sb771z@r{^q|O%FslL#VSW)^YrueL{g5jiF%M&*TL?l zZk}FlahPG*@_7()IegDy2mPl(7PTb~9E-kU3l)IhG3M95;EjMUkpybc`{Q-O{(D== z;Ght`ijaARGeCx8)BZuI^B@O(BuvlWv*)x2Z}^$mXj1%5Uo5$hZq{ zWz_)or5~Q_F4qL2pQe<2nHt_@mUh47-TG|Prd{mUCK@pVQqeBO$`9+EU3946VRiX| zNE%K=Jk`-@02HqN1VI)#HVn7l$Z7Ci``lTD01Tp&%rp4FR7{&Ca|UONJkLGv?w5pV z$dTFp)1z?^rk6;~6GvAC&x4W+OX-~o^D8nm^S`*H^VzlaT#moa2yoG~2H+g(j|uAu zb-lcpFOaqhUX}nAgUIxQOk5k{p=3tP>e#{M|IgF1OWa5rpNA=qK~6n~atDfSU$(!W zM7cKt@yKQwkSkF+APCCGna?H8qzF7qO+Y{T#=gQo<5$8<+jOD{EU!m!zf^hk0e3KS zeBd&{De~U9%VuguR6FJDcZ+`Pu0)%8byX{P2ee5>mnv8xBkk>PwkRWei_`aWEXtOx zPb{EIU|o>8YcFAf4fB0-&n#{a^3)SR>5^Mt2L;+mJK8=IL|_U*u9@HC#b_<< zF?$6|GXeIQ7qQRg=V6VvK8EqCH<_iSOZt}ZQegFgx|K6%P%u-94S4zta zljAjQ?>P#$mqXbTgb5*PcErp2fk!hOQTkDbj5UJM?n}H9eT8DA8SHqy$^?=k`{4nP z{sZJZ*B8ASU|8+B@V!lz&aEx>+~cb+e7@I?(vF+CjAWGZ;_(9WKsdQ6h`Y%`rHH`x?Dsd~vo59FGqnn&RjN}dSW$phz8 zU@>;RwoI6l9@Se`lo5Q1JTsKUt@)EFb>OaYuFw8C_K49MZ^M?0z^z!Zx>ih=$}~$H zQ;x3qu#CJ$?JZW*tphEshg}O$>pYI-1{zD(-{B6-j-l}5fG6F?N{|7^I z3`JTkWQP~Sq8@m!j8VvjD6&VI1pZ_^MNE7Xs12XmwYeUb8@x_G)(PQg2q2{fpb`rv zYwG%JAs*n(nx(TvL!oOAiPnSM4q7@bmr@85Zw9Tnteq_(0uN&l;S_~kwr16XEUuZ* z+R~EDY_2hw|C>;K3A6bSKNDlBaW1DTCUtYmf{M$W z9wrQ7^x<*_2xf^A8bN|+X(y<3Dpg z(bZ3G9t8_BuCx<)9mg2(Kh+a#6D!M6S6%OgJfKGL*#rUsYW&eMY#%c5laUYzHkt31 zGAiJ&4PtIFxF+&M4%H|wia=}=(cg{!n}}&C0e9$=-`^5QI#9nd0&0+4qmNmL5jJNA ziT5*L_;UB=dNM@m1@uUU2mZyGx+CGbI=@edbA{+7;7EzHw>Tqc(6x>Eqo7A%(~*<_ zzIIOCx1!Z*pY?YDWp3Mcr}0B0X?_@LSMLiM_cdKh5DKlqxfLy!ayXACN(mtN{?jRcj==>iS0BGs}^7MxU zBJXy11Mxojn@VgJ2zuj2A+zc#Zp@D=WLl9oaI`x+~ZrguM5ktGNJYwh*gm4^4$7TGY zwwnqET##y`J4qR03kb3==hhp-RFA+oYQj4!W3>eL^9V?b6!)LfDj#k~EZz!<57jw$ z>3-UppRiG}qWf%D?n`oYwAVjg!t*cd+nu8Id{k6BSnwp1lSE91C9%mS@>n$H8!;vo z%?r^yI$ZKKP4sA$n~GX2OYWh$a4pnd&@Dn;;jZ2wYRxw+GDu@vQz5OjAMbR{crv%g z{D;x!JQ9OrM2Lth@C*8xB1Y;y^>ykygVPTnPXg+d_|N2$c_Li8fFAWEVfqKyDSz)H z8hPj{!K_>5isicWi^R^P+B4S^gb(g>-NNWXP|7?GC2DFb{uBT`ZS<;8GabpNDfrWy z$REl0ZWXMR0ehqpX7dch979dy6u)Y`Z-!Hz69939;e&;NqKr4841|1F9{_WZRCS8Z z{W(Nt3%LtQfidQp2hwqW>b$@&=qVFY;)+#n2a8}55Z=6a5YuTr1fr=RK_sTs6rBeC zt)e91WVst6KasTmrbsblMo9FM7{mAlPo<7QxgANQks1=dTcIZ-jr*qe;m$=3LbZ`X&qfaeI|i~xeE_}12}2{ zxnc-{IE=N^qtxP3>g~i6SB+uZ*sv^S5{6KJXyT(WgWaH#ImILN##oD4%1OtO(DDG% zt|&fj6;!dKR*L97EK=cM9z$d*6qh}o!S7IQ(`pKwg!8<++B&5mXi zSB|N594PE?d@pzrUVGC|?y3m#$=Ql%;yP?mZ91DV>r9m48phy`@gxz%WP(eg5&wq% z8k+qSy(M!Xhp|>KtU|`)wmru!mO)n78kMj$e2yKz_@H1#IjQ?RhN*PuHGn+HE#RTPY>%rn5aJxu$&R` zb(ezQX{aOHT7{o)bA(v%o;66(yWD!CVpWJY5KAOV+w@b%-~f6a=%!ypU!``p&&I8Z z@&-a+LB?;VIPY~tyt3Z#XP>)#UMnkD2^BQaX)kSBRB^pXz$&nBJYU$O4ba@&(VzK0 zo^KzHogPDD>Jy^lw!!Si2PpvNDI^6ppobRjPcg^VSc)&ITgF;sU;F=xJM(ua_y3Pu z27|%acMS$(EEAF;+n^FBsce-sMMtu>j8Y6UhKQ*wg)nkTlI0v_X{s@a5)Q38IYPFy zjqOCX@9VD5^}WvLFZlG6>*~I)d+wR{@_Id=kLUYV7(U54x-j@)b)Mwi^&UL32o9ko z_&2^pL&jF|gxnWQ0lJv<*sSoCa3y&D{N2jLtiE4)k_@jHyg(1$Syq|up(Dluu+m|Q z91C|Sf>Pl33~e$7sYm3~nfw+ApAh|MLh8r6r@_uxbg&u4hD28Cz!|>8aqWsD5VD)< zy6)(UA6cJU*Lpb|yB%9a>#=ZN1D*Qh@)mY8WR6;kV>N$vbZj?%>Q)#Qsir>YN7uF& zRXx1fJt7~KU7$r04aZn752ebW@{9|N_mp>X)$d(t*C4H#f8TcRJb6HUqLn>$kf85# z@69wd=~kg6QeN;y;hTx>zx)Rt_yI>;pt{xP1axDS_QqziuSCf4BY!o>(pj5#XP6dO zrLBr#bVsp^UIfvUa$IG5KR|w$BuJN}M=&X0!Pdx#E9jC7Y;7x2QYEWPj_3i~LV0I! zniQ3Oo$s_po~VZSUqm&bGNpb|l9UL+8=JB2tO_T6hH_E`@*_EpLQSI+-fjS^g1-|k zkY{uuo-=gbF(F_<*iO{tC<=DD%#;5{yaZ;fQ_1~v`Wp0QG)&g~Pj zY3RP>KgzpiCg~EEFG3fg_3$oHmKIO@CiUCcymoIGnq;ESdLD+c+>pi;l|0s_a!zC$ zD~KVH0V)LM&_zEqU^MaLg6D*87NH;6;{|z1qx$O&pJUrNBww%a(mkey1OSs^S4PG`8~V8c3c*7@GyvUxPyxYbOu z4^MJsishW83E949AG@3KA=7s)?q!zNrYzONa*C$0*VJBV^jy3N%Cg_YCU!nF;SuG98s3`?EA?NcQ=`7sKKo`>rx2l%2EB zuZi>}7n_X+vOn2;>Coo#f6;c4P7!(yZ>+p-t3WPz`at0n1pSU2`ug_%>7@~U4rodG zSP?84UF!VE-pd-ZmCBfvD|37V#{tSIO3{4LA!(0qr-mE%i}YEZ{(dxJ`jPU*Pp0|2 zr-a^Aagxb>u&i&;^GZT9-H3{cV} z-=czxAQrlwP1f8P)vw_ExZ1DY3F?nAdAEMP&JJ`S6ByXEIRZ}R(EQX(BsT`H{LGOH z;k!Bta33;w8ZCR-4i{m6D0!}Rok~}midDb2<2x%@gLp2!1J~wxT9!ecQ=Cc;LkFVu zHNvE#eZl%qHC3l#LyV~HL-T^+#SUr9hFEgH zP?1}bzt(nRc*9VC(ZM=Xl#`7{+EOFIsDXiqJv~n)k=C!ZvshQWDHB@1yPuxCflFN+ zlhqs1!P7o+b*j)PZL~l$ftADp$n5Lt!a>^8Td1bk_?riEdYyUWCSGSvKWLww_U67WQ@U!Gow+b&D zjL0dFzmfPKz+BaAzsv8OWRJp4?}7~WWQM||5I{C<;UAke{4kfVUFW++Hi9U!4fy?0 zP~-9;7vfgq-(39b0D4RJH>)4R%ayAH_lXPuPw(L?z>YR;WDuq$y19;Yh0=vb}Thej7YC*h7P$C3fO}n z3jzeIly?G{cT&(AEskRv$RWU?g6Dz2e+)eyvgaW@=Z9CfiWE%tdNp*Lab4p$SqVW% z4*ql^#Kf!4!6AAS=xOwz5XY#Di|uR(c_@mUc<^cP-oARlMg;1B>8%G1UIrE3cyI#2 zeBMBbHRb&0RagY89w->@!k-i^h8F88Pb#+bE$RM)|2WYIi(+3Cw*`h}2n=SE^_2jv z7_onk8*|g60%@-QSQqlxwJ)l?-Oa;$s$H1gypNz9o1a;xA`x@7U{JG>V=0hB7!n{e zb$Vq#0TxiaZjge-4D9w7=x$6$9(TlAqCwx2IIyvz8kQEuc~MdbTga; z?FuX9i*O+RXS;=YRY%_55qH6B*+UDZCj6bbenYlymoIN;BRI;r&163q7-?#8h?hmZ zEMOItaZ9Gpg31rp+hucfvMesGZaRrSM8oBvQ!rZV{Xs_d`jCn<^lz$@GY4_PN9R3k z%EV%69npfF)K+IyjtWYaF21_11fQi|QVR%;VQWLgOCqg$_-hHwN4wOon_ zE%6?ZF}3Pi&l-Wn6xVxautwmA$DPZ zIrk&}joXwkQ!!(zWVp-a=)xG#WbnUw;SQSlStQ2;sTs_KjH9fit7$i3=XOWj5|I`~_V6%4IfeFMc|l z`z!zKd+ER%U1o@VXD&EOtJ0ZDfmj+0$MZ-|nqp_CV#PVcFKpGFd$D`RW<3s)5EyH9 zE82KOh+0@=*0DXa40MmmxUV$RE@3TFmU#Ht6+rGfPu)qF4STdsn+e7?%d3WB74fEg*l-SSf0_ZXCS50aeGTk(mDn1XIcY6_%a<~% zR2GnaulP4;miFjySUC(&E>eTDD5J+0g9zURDdmiUo+BwtaceJH38h||n$NQO&F&aj zb}5Q7l)fxb$ud?42-ssKK7c;4oas(#ZyDbt2Q5aEhk3JbaAwO)NywRP8HU|5I#Hk; z&+s40+>QHtAp@x^$U@9VoL(9A(%lAvwepUXRETd>IuHo0A0LzxJ#QT6_laoW&H}$P zOpRP-D0BSyEcPXav@WJ2-{&N7h!A)nht50@`#cnBntOH)tj$Emc9w-wxQ!|HFg!zH)uP?1CPO(=o0qy}y zxrU%kgEG;|gF)20+|-|(DPoGsSGSoV3ayiX-s7DYHi1c(5|O}haq_l&@lr4eqC6;` zyYewK9vvg9*x9RVWdu3S*AjZ~V`I#T>Y)2s(e@yCnm1lJ!EP^=l0_NCee3co{CffC zqErIWn*!qdY>WHVN`Xt?z7~K-?hb5=uuuAn;dJ!*w*H+aatRS|VqbRWnA5P-S`5#V zeF=6tZ|k@yQVm=3@DL1ur~fA0MpS@jG1e`c<)}n0Y#ONm=7Kee zdnZ|#HwBkhjoL`>+VX_zj|6mKO;e%_4Mm!x)owDB>54A+&evB__HNyW&nN$hWYLKm zz`pnD7Ad9lf*87sp)g`i_ndmRP&t^sY|3AOzO(`~&5SC|lk8&K*}KL|v5(V`X8;!B zpjq%nhzbTr3$@s&-3wg1{Tz;Ufe-s~9#KUBag#@jX#P0&#G5au_d-&EP^RUg=#aae z*~|65_KT`_0Cun#p$Ze?apsVV;0F&^8+-ke+VAM?kiI^^R(K*{*w|>zb_4bALF#)c zy-H8IP@oC57l_M>zgia$kXfo@Z(<>a-OD_GFb$)cYam&^!`BbxfUW}+a|y$TLrRFW z%sO{F8UCZ?R!t#!z0a|Ax2I=11v~k>9`5UosQ72~O=lmh8hC^Q_ojf3{HstuS33wL zl(nd{79hd*(%ujQP-f?n5KVw2*VFGr(5U(X^Ra~9h+#*e<>{{_%B9#T{4ShJuV0kr zH?hv?XS)2_Ph$0ABsJ-+X}*#px}JhqYS#L3C^jn1KVvsRsLne))Efj(%4DE1U3px% zuLY?kplBN@e{|qsnv^#UlO)J!UgvPeU$X@hD;uFzc)2IXIZ&gd8_ux#ICuo|Ic?mG zpR*tXX2HekNA*8ufn)Ue&^&KTfwgX{+2Q&*klMF;QF`Sb*7u=Mp7V781-Km*siICE z^|vvQpDk;A`r#F1N4Z5vL-xaNb>-$H*IXE%hc!+fR`$13F?&aM{{{}epdqzoXx^!* zbl*K9AZSBuFl=vjhNoVKilo@1c*}**SBsabou+J_$2sALRG~aCF|#xP>xHEtm{^p3 zPIL9+nDd@vO=?cU+SNqg8e^o5okXw+y-wEBm{0B5-S6ZAD6))SJTJ8!DNqUsT-)Dv zE3gerjW#@I!F~E}LPP~sgy65Dw@*`)6 zAga6*ygYE%eo~{`ysh^@QzV3Gao6BM_n$4g0zY~Gm1MJocQ>trTIxj3QV?3qd(D}h z1&0i7R_w3zWT*To8Fu2-$&f_EhJYgR=7=WjK5^Z-=NQq17RbDN&{4KP+dh3hJ~0Y2 zLP&Ofnx<2t_8eAy-%mKdEGNM+nku$4R-rinVc%W3PvG{&k#^aTbm5u{VS@!VY*q e`UEFSTNA54>DxYRfWr~Dt1_|EPC diff --git a/examples/onboarding_guide/causallm/README.md b/examples/onboarding_guide/causallm/README.md index 2e077b6ba..e7ac3f362 100644 --- a/examples/onboarding_guide/causallm/README.md +++ b/examples/onboarding_guide/causallm/README.md @@ -53,7 +53,67 @@ This guide walks you through onboarding a new CausalLM model to QEfficient-trans ## Onboarding Process -![Onboarding Flowchart](./Onboarding.png) +```mermaid +flowchart TD + A["Check Transformers Library +• Locate model in transformers/models/<model>/modeling_*.py +• Identify architecture classes (Attention, DecoderLayer, etc.)"] + + B{"Class already +Implemented"} + + C["Create Custom Files +• Create modeling_*.py +• Implement custom classes +• Add __qeff_init__ methods"] + + D["Test the model using +the auto model class +and validate the +functionality"] + + E["Add Mappings in pytorch_transforms.py +• CustomOpsTransform (RMSNorm) +• KVCacheTransform (all model classes) +• ExternalModuleMapperTransform (if needed)"] + + K{"if all test passes"} + + L["Debug & Fix Issues +Retest with test pipelines"] + + M["Submit PR +(Follow +CONTRIBUTING +guidelines)"] + + A --> B + B -->|No| C + B -->|Yes| D + C --> E + E --> F + + subgraph F["Testing Pipeline (4 Stages)"] + direction TB + G["Stage 1: PyTorch HF Model (Baseline) +(tokens should match)"] + H["Stage 2: PyTorch KV Model (After QEff transforms) +(tokens should match)"] + I["Stage 3: ONNX/ORT Model (After export) +(tokens should match)"] + J["Stage 4: Cloud AI 100 (Hardware execution) +(tokens should match)"] + + G --> H + H --> I + I --> J + end + + F --> K + K -->|No| L + L --> F + K -->|Yes| M +``` --- From 75065e968c34198c9db4a2686a514555d4461e82 Mon Sep 17 00:00:00 2001 From: Abhishek Kumar Singh Date: Fri, 21 Nov 2025 17:37:51 +0530 Subject: [PATCH 22/60] Added Decoder layer class in Qeff for granite (#628) This PR introduces support for the Decoder layer class in Qeff for granite, which is required for the subfunction. While there are alternative approaches to achieve this, implementing it now ensures future compatibility, particularly if we decide to add CB support for Granite. In that case, the Qeff Granite Decoder layer will be necessary. Signed-off-by: abhishek-singh591 Signed-off-by: Dhiraj Kumar Sah --- .../models/granite/modeling_granite.py | 75 +++++++++++++++++++ .../transformers/models/pytorch_transforms.py | 3 + 2 files changed, 78 insertions(+) diff --git a/QEfficient/transformers/models/granite/modeling_granite.py b/QEfficient/transformers/models/granite/modeling_granite.py index aa14554b2..62be5f54d 100644 --- a/QEfficient/transformers/models/granite/modeling_granite.py +++ b/QEfficient/transformers/models/granite/modeling_granite.py @@ -17,6 +17,7 @@ from transformers.models.granite.modeling_granite import ( GraniteAttention, GraniteConfig, + GraniteDecoderLayer, GraniteForCausalLM, GraniteModel, GraniteRotaryEmbedding, @@ -173,6 +174,80 @@ def forward( return attn_output, attn_weights +class QEffGraniteDecoderLayer(GraniteDecoderLayer): + """ + Copied from LlamaForCausalLM: https://github.com/huggingface/transformers/blob/main/src/transformers/models/granite/modeling_granite.py + The only differences are: + - add new args batch idx for the CB models although its not supported yet. + """ + + def forward( + self, + hidden_states: torch.Tensor, + attention_mask: Optional[torch.Tensor] = None, + position_ids: Optional[torch.LongTensor] = None, + past_key_values: Optional[Cache] = None, + output_attentions: Optional[bool] = False, + batch_index: Optional[torch.LongTensor] = None, + use_cache: Optional[bool] = False, + cache_position: Optional[torch.LongTensor] = None, + position_embeddings: Optional[tuple[torch.Tensor, torch.Tensor]] = None, + **kwargs, + ) -> tuple[torch.FloatTensor, Optional[tuple[torch.FloatTensor, torch.FloatTensor]]]: + """ + Args: + hidden_states (`torch.FloatTensor`): input to the layer of shape `(batch, seq_len, embed_dim)` + attention_mask (`torch.FloatTensor`, *optional*): + attention mask of size `(batch_size, sequence_length)` if flash attention is used or `(batch_size, 1, + query_sequence_length, key_sequence_length)` if default attention is used. + output_attentions (`bool`, *optional*): + Whether or not to return the attentions tensors of all attention layers. See `attentions` under + returned tensors for more detail. + use_cache (`bool`, *optional*): + If set to `True`, `past_key_values` key value states are returned and can be used to speed up decoding + (see `past_key_values`). + past_key_values (`Cache`, *optional*): cached past key and value projection states + cache_position (`torch.LongTensor` of shape `(sequence_length)`, *optional*): + Indices depicting the position of the input sequence tokens in the sequence + position_embeddings (`tuple[torch.FloatTensor, torch.FloatTensor]`, *optional*): + Tuple containing the cosine and sine positional embeddings of shape `(batch_size, seq_len, head_dim)`, + with `head_dim` being the embedding dimension of each attention head. + kwargs (`dict`, *optional*): + Arbitrary kwargs to be ignored, used for FSDP and other methods that injects code + into the model + """ + residual = hidden_states + + hidden_states = self.input_layernorm(hidden_states) + # Self Attention + hidden_states, self_attn_weights = self.self_attn( + hidden_states=hidden_states, + attention_mask=attention_mask, + position_ids=position_ids, + past_key_values=past_key_values, + output_attentions=output_attentions, + batch_index=batch_index, + use_cache=use_cache, + cache_position=cache_position, + position_embeddings=position_embeddings, + **kwargs, + ) + hidden_states = residual + hidden_states * self.residual_multiplier + + # Fully Connected + residual = hidden_states + hidden_states = self.post_attention_layernorm(hidden_states) + hidden_states = self.mlp(hidden_states) + hidden_states = residual + hidden_states * self.residual_multiplier # main diff with Llama + + outputs = (hidden_states,) + + if output_attentions: + outputs += (self_attn_weights,) + + return outputs + + class QEffGraniteModel(GraniteModel): def forward( self, diff --git a/QEfficient/transformers/models/pytorch_transforms.py b/QEfficient/transformers/models/pytorch_transforms.py index 62a873b9e..c7c9d5e25 100644 --- a/QEfficient/transformers/models/pytorch_transforms.py +++ b/QEfficient/transformers/models/pytorch_transforms.py @@ -63,6 +63,7 @@ from transformers.models.gptj.modeling_gptj import GPTJAttention, GPTJBlock, GPTJForCausalLM, GPTJModel from transformers.models.granite.modeling_granite import ( GraniteAttention, + GraniteDecoderLayer, GraniteForCausalLM, GraniteModel, GraniteRMSNorm, @@ -268,6 +269,7 @@ ) from QEfficient.transformers.models.granite.modeling_granite import ( QEffGraniteAttention, + QEffGraniteDecoderLayer, QEffGraniteForCausalLM, QEffGraniteModel, ) @@ -531,6 +533,7 @@ class KVCacheTransform(ModuleMappingTransform): GraniteModel: QEffGraniteModel, GraniteForCausalLM: QEffGraniteForCausalLM, GraniteAttention: QEffGraniteAttention, + GraniteDecoderLayer: QEffGraniteDecoderLayer, # GraniteMoe GraniteMoeModel: QEffGraniteMoeModel, GraniteMoeForCausalLM: QEffGraniteMoeForCausalLM, From 8fc86d6c536af1ad3273191a106c29516ce17e26 Mon Sep 17 00:00:00 2001 From: Abukhoyer Shaik Date: Wed, 26 Nov 2025 13:39:55 +0530 Subject: [PATCH 23/60] [CI-FIX]: qnn and vllm downstream jobs are disabled (#639) Signed-off-by: Abukhoyer Shaik Signed-off-by: Dhiraj Kumar Sah --- scripts/Jenkinsfile | 38 +++++++++++++++++++++++++------------- 1 file changed, 25 insertions(+), 13 deletions(-) diff --git a/scripts/Jenkinsfile b/scripts/Jenkinsfile index d9d391d47..683ef5018 100644 --- a/scripts/Jenkinsfile +++ b/scripts/Jenkinsfile @@ -90,8 +90,8 @@ pipeline { timeout(time: 60, unit: 'MINUTES') { sh ''' sudo docker exec ${BUILD_TAG} bash -c " - source /qnn_sdk/bin/envsetup.sh && - source /qnn_sdk/bin/envcheck -c && + #source /qnn_sdk/bin/envsetup.sh && + #source /qnn_sdk/bin/envcheck -c && cd /efficient-transformers && . preflight_qeff/bin/activate && mkdir -p $PWD/cli && @@ -181,15 +181,15 @@ pipeline { } post { - success { - // Trigger downstream job only if this pipeline succeeds - build job: 'qefficient_vllm_upstream', - parameters: [ - string(name: 'NAME', value: "${BUILD_TAG}"), - string(name: 'QEFF_WORKSPACE', value: "${env.WORKSPACE}") - ], - wait: false - } + // success { + // // Trigger downstream job only if this pipeline succeeds + // build job: 'qefficient_vllm_upstream', + // parameters: [ + // string(name: 'NAME', value: "${BUILD_TAG}"), + // string(name: 'QEFF_WORKSPACE', value: "${env.WORKSPACE}") + // ], + // wait: false + // } always { script { try { @@ -201,8 +201,7 @@ pipeline { } } junit testResults: 'tests/tests_log.xml' - } - unsuccessful { + script { try { sh ''' @@ -215,5 +214,18 @@ pipeline { echo 'Cleaning Workspace' deleteDir() } + // unsuccessful { + // script { + // try { + // sh ''' + // sudo docker rm -f ${BUILD_TAG} + // ''' + // } catch (error) { + // echo "Failed to delete container ${BUILD_TAG}: ${error}" + // } + // } + // echo 'Cleaning Workspace' + // deleteDir() + // } } } \ No newline at end of file From 037e0c4e562db4561764549c9fb11343a0b25213 Mon Sep 17 00:00:00 2001 From: Rishin Raj Date: Wed, 26 Nov 2025 14:25:16 +0530 Subject: [PATCH 24/60] Installation guide for installing release branches (#637) Added installation guide for installing release branches --------- Signed-off-by: Rishin Raj Signed-off-by: Abukhoyer Shaik Co-authored-by: Abukhoyer Shaik Signed-off-by: Dhiraj Kumar Sah --- README.md | 6 +++++- docs/source/quick_start.md | 22 ++++++++++++++++++++++ 2 files changed, 27 insertions(+), 1 deletion(-) diff --git a/README.md b/README.md index 8972e5b56..cb6f32382 100644 --- a/README.md +++ b/README.md @@ -93,9 +93,13 @@ python3.10 -m venv qeff_env source qeff_env/bin/activate pip install -U pip -# Clone and Install the QEfficient Repo. +# Clone and Install the QEfficient repository from the mainline branch pip install git+https://github.com/quic/efficient-transformers +# Clone and Install the QEfficient repository from a specific branch, tag or commit by appending @ref +# Release branch (e.g., release/v1.20.0): +pip install "git+https://github.com/quic/efficient-transformers@release/v1.20.0" + # Or build wheel package using the below command. pip install build wheel python -m build --wheel --outdir dist diff --git a/docs/source/quick_start.md b/docs/source/quick_start.md index 9358f9c4a..f15d8de2f 100644 --- a/docs/source/quick_start.md +++ b/docs/source/quick_start.md @@ -221,4 +221,26 @@ Benchmark the model on Cloud AI 100, run the infer API to print tokens and tok/s tokenizer = AutoTokenizer.from_pretrained(model_name) qeff_model.generate(prompts=["My name is"],tokenizer=tokenizer) ``` + +### Local Model Execution +If the model and tokenizer are already downloaded, we can directly load them from local path. + +```python +from QEfficient import QEFFAutoModelForCausalLM +from transformers import AutoTokenizer + +# Local path to the downloaded model. You can find downloaded HF models in: +# - Default location: ~/.cache/huggingface/hub/models--{model_name}/snapshots/{snapshot_id}/ +local_model_repo = "~/.cache/huggingface/hub/models--gpt2/snapshots/607a30d783dfa663caf39e06633721c8d4cfcd7e" + +# Load model from local path +model = QEFFAutoModelForCausalLM.from_pretrained(pretrained_model_name_or_path=local_model_repo) + +model.compile(num_cores=16) + +# Load tokenizer from the same local path +tokenizer = AutoTokenizer.from_pretrained(pretrained_model_name_or_path=local_model_repo) + +model.generate(prompts=["Hi there!!"], tokenizer=tokenizer) +``` End to End demo examples for various models are available in [**notebooks**](https://github.com/quic/efficient-transformers/tree/main/notebooks) directory. Please check them out. From a380c7a9746188271abd50bf551bd73180d41d07 Mon Sep 17 00:00:00 2001 From: Abhishek Kumar Singh Date: Wed, 26 Nov 2025 21:31:00 +0530 Subject: [PATCH 25/60] Added Continuous Batching (CB) Support for Subfunctions (#642) Resolved compilation issues by adding CB support for subfunctions, ensuring compatibility across CB and non-CB models. Signed-off-by: abhishek-singh591 Signed-off-by: Dhiraj Kumar Sah --- QEfficient/base/onnx_transforms.py | 27 ++++++++++++++++++++++++++- 1 file changed, 26 insertions(+), 1 deletion(-) diff --git a/QEfficient/base/onnx_transforms.py b/QEfficient/base/onnx_transforms.py index 7ebe6bce5..1bc483eed 100644 --- a/QEfficient/base/onnx_transforms.py +++ b/QEfficient/base/onnx_transforms.py @@ -11,7 +11,26 @@ import torch from onnx import ModelProto, external_data_helper, numpy_helper -from QEfficient.customop.ctx_scatter_gather import CtxGather, CtxGatherFunc, CtxScatter, CtxScatterFunc +from QEfficient.customop.ctx_scatter_gather import ( + CtxGather, + CtxGather3D, + CtxGatherFunc, + CtxGatherFunc3D, + CtxScatter, + CtxScatter3D, + CtxScatterFunc, + CtxScatterFunc3D, +) +from QEfficient.customop.ctx_scatter_gather_cb import ( + CtxGatherCB, + CtxGatherCB3D, + CtxGatherFuncCB, + CtxGatherFuncCB3D, + CtxScatterCB, + CtxScatterCB3D, + CtxScatterFuncCB, + CtxScatterFuncCB3D, +) from QEfficient.customop.rms_norm import CustomRMSNorm, CustomRMSNormFunc @@ -113,7 +132,13 @@ class CustomOpTransform(OnnxTransform): _custom_ops: Dict[str, Tuple[Any, Any]] = { "CustomRMSNormFunc": (CustomRMSNormFunc, CustomRMSNorm), "CtxScatterFunc": (CtxScatterFunc, CtxScatter), + "CtxScatterFunc3D": (CtxScatterFunc3D, CtxScatter3D), "CtxGatherFunc": (CtxGatherFunc, CtxGather), + "CtxGatherFunc3D": (CtxGatherFunc3D, CtxGather3D), + "CtxScatterFuncCB": (CtxScatterFuncCB, CtxScatterCB), + "CtxScatterFuncCB3D": (CtxScatterFuncCB3D, CtxScatterCB3D), + "CtxGatherFuncCB": (CtxGatherFuncCB, CtxGatherCB), + "CtxGatherFuncCB3D": (CtxGatherFuncCB3D, CtxGatherCB3D), } @classmethod From 3dffc651c4ec86f95a9fdef9e9a857d75fef2cd2 Mon Sep 17 00:00:00 2001 From: Meet Patel Date: Fri, 28 Nov 2025 17:09:38 +0530 Subject: [PATCH 26/60] [QEff. Finetune]: Added logger and its test cases. (#644) - Added a logger which will log onto console and file. This code is similar to existing QEff. Finetuning logger code. - Also added dist_utils which serves as utility code when dealing with distributed training. - Added logger test cases for sanity checks. --------- Signed-off-by: meetkuma Signed-off-by: Dhiraj Kumar Sah --- .../finetune/experimental/core/logger.py | 170 +++++++++++++ .../experimental/core/utils/dist_utils.py | 33 +++ .../experimental/tests/test_logger.py | 233 ++++++++++++++++++ 3 files changed, 436 insertions(+) create mode 100644 QEfficient/finetune/experimental/core/logger.py create mode 100644 QEfficient/finetune/experimental/tests/test_logger.py diff --git a/QEfficient/finetune/experimental/core/logger.py b/QEfficient/finetune/experimental/core/logger.py new file mode 100644 index 000000000..a1b9c771f --- /dev/null +++ b/QEfficient/finetune/experimental/core/logger.py @@ -0,0 +1,170 @@ +# ----------------------------------------------------------------------------- +# +# Copyright (c) Qualcomm Technologies, Inc. and/or its subsidiaries. +# SPDX-License-Identifier: BSD-3-Clause +# +# ----------------------------------------------------------------------------- + + +import logging +import sys +from pathlib import Path +from typing import Optional + +from transformers.utils.logging import get_logger as hf_get_logger + +from QEfficient.finetune.experimental.core.utils.dist_utils import get_local_rank + +# ----------------------------------------------------------------------------- +# Logger usage: +# Initialize logger: +# logger = Logger("my_logger", log_file="logs/output.log", level=logging.DEBUG) +# Log messages: +# logger.info("This is an info message") +# logger.error("This is an error message") +# logger.log_rank_zero("This message is logged only on rank 0") +# logger.log_exception("An error occurred", exception, raise_exception=False) +# Attach file handler later if needed: +# logger.prepare_for_logs(output_dir="logs", log_level="DEBUG") +# ----------------------------------------------------------------------------- + + +class Logger: + """Custom logger with console and file logging capabilities.""" + + def __init__( + self, + name: str = "transformers", # We are using "transformers" as default to align with HF logs + log_file: Optional[str] = None, + level: int = logging.INFO, + ): + """ + Initialize the logger. + + Args: + name: Logger name + log_file: Path to log file (if None, log only to console) + level: Logging level + """ + self.logger = hf_get_logger(name) + self.logger.setLevel(level) + + # Clear any existing handlers + self.logger.handlers.clear() + + # Create formatter + self.formatter = logging.Formatter("%(asctime)s - %(name)s - %(levelname)s - %(message)s") + + # Console handler + console_handler = logging.StreamHandler(sys.stdout) + console_handler.setLevel(level) + console_handler.setFormatter(self.formatter) + self.logger.addHandler(console_handler) + + # File handler (if log_file is provided) + if log_file: + # Create directory if it doesn't exist + log_path = Path(log_file) + log_path.parent.mkdir(parents=True, exist_ok=True) + + file_handler = logging.FileHandler(log_file) + file_handler.setLevel(level) + file_handler.setFormatter(self.formatter) + self.logger.addHandler(file_handler) + + def debug(self, message: str) -> None: + """Log debug message.""" + self.logger.debug(message) + + def info(self, message: str) -> None: + """Log info message.""" + self.logger.info(message) + + def warning(self, message: str) -> None: + """Log warning message.""" + self.logger.warning(message) + + def error(self, message: str) -> None: + """Log error message.""" + self.logger.error(message) + + def critical(self, message: str) -> None: + """Log critical message.""" + self.logger.critical(message) + + def log_rank_zero(self, message: str, level: int = logging.INFO) -> None: + """ + Log message only on rank 0 process. + + Args: + message: Message to log + level: Logging level + """ + if get_local_rank() == 0: + self.logger.log(level, message) + + def log_exception(self, message: str, exception: Exception, raise_exception: bool = True) -> None: + """ + Log exception message and optionally raise the exception. + + Args: + message: Custom message to log + exception: Exception to log + raise_exception: Whether to raise the exception after logging + """ + error_message = f"{message}: {str(exception)}" + self.logger.error(error_message) + + if raise_exception: + raise exception + + def prepare_for_logs(self, output_dir: Optional[str] = None, log_level: str = "INFO") -> None: + """ + Prepare existing logger to log to both console and file with specified + output directory and log level. + + Args: + output_dir: Output directory for logs + log_level: Logging level as string + """ + # Convert string log level to logging constant + level = getattr(logging, log_level.upper(), logging.INFO) + self.logger.setLevel(level) + + # Update existing handlers' levels + for handler in self.logger.handlers: + handler.setLevel(level) + + # Add file handler if saving metrics + if output_dir: + log_file = Path(output_dir) / "training.log" + log_file.parent.mkdir(parents=True, exist_ok=True) + + # Check if file handler already exists + file_handler_exists = any(isinstance(handler, logging.FileHandler) for handler in self.logger.handlers) + + if not file_handler_exists: + file_handler = logging.FileHandler(log_file) + file_handler.setLevel(level) + file_handler.setFormatter(self.formatter) + self.logger.addHandler(file_handler) + + +# Global logger instance +_logger: Optional[Logger] = None + + +def get_logger(log_file: Optional[str] = None) -> Logger: + """ + Get or create a logger instance. + + Args: + log_file: Path to log file (if None, log only to console) + + Returns: + Logger instance + """ + global _logger + if _logger is None: + _logger = Logger(log_file=log_file) + return _logger diff --git a/QEfficient/finetune/experimental/core/utils/dist_utils.py b/QEfficient/finetune/experimental/core/utils/dist_utils.py index d647b73a6..aed88862d 100644 --- a/QEfficient/finetune/experimental/core/utils/dist_utils.py +++ b/QEfficient/finetune/experimental/core/utils/dist_utils.py @@ -4,3 +4,36 @@ # SPDX-License-Identifier: BSD-3-Clause # # ----------------------------------------------------------------------------- + +import torch.distributed as dist + + +def is_dist_available_and_initialized() -> bool: + """Check if distributed training is available and initialized.""" + return dist.is_available() and dist.is_initialized() + + +def get_rank() -> int: + """Return the global rank of the current process, else 0.""" + if not is_dist_available_and_initialized(): + return 0 + return dist.get_rank() + + +def get_local_rank() -> int: + """Return the local rank of the current process on its node, else 0.""" + if not is_dist_available_and_initialized(): + return 0 + return dist.get_node_local_rank() + + +def get_world_size() -> int: + """Get the total number of processes in distributed training.""" + if not is_dist_available_and_initialized(): + return 1 + return dist.get_world_size() + + +def is_main_process() -> bool: + """Check if the current process is the main process (rank 0).""" + return get_rank() == 0 diff --git a/QEfficient/finetune/experimental/tests/test_logger.py b/QEfficient/finetune/experimental/tests/test_logger.py new file mode 100644 index 000000000..0af0c8b51 --- /dev/null +++ b/QEfficient/finetune/experimental/tests/test_logger.py @@ -0,0 +1,233 @@ +# ----------------------------------------------------------------------------- +# +# Copyright (c) Qualcomm Technologies, Inc. and/or its subsidiaries. +# SPDX-License-Identifier: BSD-3-Clause +# +# ----------------------------------------------------------------------------- + +import logging +from unittest.mock import patch + +import pytest + +from QEfficient.finetune.experimental.core.logger import Logger, get_logger + + +class TestLogger: + def setup_method(self): + """Reset the global logger before each test method""" + import QEfficient.finetune.experimental.core.logger as logger_module + + logger_module._logger = None + + def test_init_console_only(self): + """Test logger initialization with console-only output""" + logger = Logger("test_logger") + + # Check logger attributes + assert logger.logger.name == "test_logger" + assert logger.logger.level == logging.INFO + + # Check handlers - should have console handler only + assert len(logger.logger.handlers) == 1 # Only console handler + assert isinstance(logger.logger.handlers[0], logging.StreamHandler) + + def test_init_with_file(self, tmp_path): + """Test logger initialization with file output""" + log_file = tmp_path / "test.log" + logger = Logger("file_test_logger", str(log_file)) + + # Check handlers - should have both console and file handlers + assert len(logger.logger.handlers) == 2 # Console + file handler + assert isinstance(logger.logger.handlers[0], logging.StreamHandler) + assert isinstance(logger.logger.handlers[1], logging.FileHandler) + + # Check file creation + assert log_file.exists() + + def test_log_levels(self, caplog): + """Test all log levels work correctly""" + logger = Logger("level_test_logger", level=logging.DEBUG) + + with caplog.at_level(logging.DEBUG): + logger.debug("Debug message") + logger.info("Info message") + logger.warning("Warning message") + logger.error("Error message") + logger.critical("Critical message") + + # Check all messages were logged + assert "Debug message" in caplog.text + assert "Info message" in caplog.text + assert "Warning message" in caplog.text + assert "Error message" in caplog.text + assert "Critical message" in caplog.text + + @patch("QEfficient.finetune.experimental.core.logger.get_local_rank") + def test_log_rank_zero_positive_case(self, mock_get_local_rank, caplog): + """Test rank zero logging functionality""" + mock_get_local_rank.return_value = 0 + logger = Logger("rank_test_logger") + + with caplog.at_level(logging.INFO): + logger.log_rank_zero("Rank zero message") + + assert "Rank zero message" in caplog.text + + @patch("QEfficient.finetune.experimental.core.logger.get_local_rank") + def test_log_rank_zero_negative_case(self, mock_get_local_rank, caplog): + """Test to verify that only rank‑zero messages are logged""" + mock_get_local_rank.return_value = 1 + logger = Logger("rank_test_logger") + + with caplog.at_level(logging.INFO): + logger.log_rank_zero("Should not appear") + + assert "Should not appear" not in caplog.text + + def test_log_exception_raise(self, caplog): + """Test exception logging with raising""" + logger = Logger("exception_test_logger") + + with pytest.raises(ValueError), caplog.at_level(logging.ERROR): + logger.log_exception("Custom error", ValueError("Test exception"), raise_exception=True) + + # The actual logged message is "Custom error: Test exception" + # But the exception itself contains just "Test exception" + assert "Custom error: Test exception" in caplog.text + + def test_log_exception_no_raise(self, caplog): + """Test exception logging without raising""" + logger = Logger("exception_test_logger") + + with caplog.at_level(logging.ERROR): + logger.log_exception("Custom error", ValueError("Test exception"), raise_exception=False) + + # Check that the formatted message was logged + assert "Custom error: Test exception" in caplog.text + + def test_prepare_for_logs(self, tmp_path): + """Test preparing logger for training logs""" + output_dir = tmp_path / "output" + logger = Logger("prepare_test_logger") + + # Prepare for logs + logger.prepare_for_logs(str(output_dir), log_level="DEBUG") + + # Check file handler was added + file_handlers = [h for h in logger.logger.handlers if isinstance(h, logging.FileHandler)] + assert len(file_handlers) == 1 + + # Check file exists + log_file = output_dir / "training.log" + assert log_file.exists() + + # Check log level was updated + assert logger.logger.level == logging.DEBUG + + def test_prepare_for_logs_no_file_handler(self): + """Test preparing logger without saving to file""" + logger = Logger("prepare_test_logger") + + # Prepare for logs without saving metrics + logger.prepare_for_logs(log_level="INFO") + + # Check no file handler was added + file_handlers = [h for h in logger.logger.handlers if isinstance(h, logging.FileHandler)] + assert len(file_handlers) == 0 + + def test_prepare_for_logs_already_has_file_handler(self, tmp_path): + """Test preparing logger when file handler already exists""" + output_dir = tmp_path / "output" + logger = Logger("prepare_test_logger") + + # Add a file handler manually first + log_file = output_dir / "manual.log" + log_file.parent.mkdir(parents=True, exist_ok=True) + file_handler = logging.FileHandler(str(log_file)) + logger.logger.addHandler(file_handler) + + # Prepare for logs again + logger.prepare_for_logs(str(output_dir), log_level="INFO") + + # Should still have only one file handler + file_handlers = [h for h in logger.logger.handlers if isinstance(h, logging.FileHandler)] + assert len(file_handlers) == 1 + + def test_get_logger_singleton(self): + """Test that get_logger returns the same instance""" + logger1 = get_logger() + logger2 = get_logger() + + assert logger1 is logger2 + + def test_get_logger_with_file(self, tmp_path): + """Test get_logger with file parameter""" + log_file = tmp_path / "get_logger_test.log" + logger = get_logger(str(log_file)) + + # Check that we have 2 handlers (console + file) + assert len(logger.logger.handlers) == 2 # Console + file + assert isinstance(logger.logger.handlers[1], logging.FileHandler) + + # Check file exists + assert log_file.exists() + + +class TestLoggerIntegration: + """Integration tests for logger functionality""" + + def setup_method(self): + """Reset the global logger before each test method""" + import QEfficient.finetune.experimental.core.logger as logger_module + + logger_module._logger = None + + def test_complete_workflow(self, tmp_path, caplog): + """Test complete logger workflow""" + # Setup + log_file = tmp_path / "workflow.log" + logger = Logger("workflow_test", str(log_file), logging.DEBUG) + + # Test all methods + logger.debug("Debug test") + logger.info("Info test") + logger.warning("Warning test") + logger.error("Error test") + logger.critical("Critical test") + + # Test exception handling + try: + raise ValueError("Test exception") + except ValueError as e: + logger.log_exception("Caught exception", e, raise_exception=False) + + # Test rank zero logging + with patch("QEfficient.finetune.experimental.core.logger.get_local_rank") as mock_rank: + mock_rank.return_value = 0 + logger.log_rank_zero("Rank zero test") + + # Verify all messages were logged + with caplog.at_level(logging.DEBUG): + assert "Debug test" in caplog.text + assert "Info test" in caplog.text + assert "Warning test" in caplog.text + assert "Error test" in caplog.text + assert "Critical test" in caplog.text + assert "Caught exception: Test exception" in caplog.text + assert "Rank zero test" in caplog.text + + # Check file was written to + assert log_file.exists() + content = log_file.read_text() + assert "Debug test" in content + assert "Info test" in content + assert "Warning test" in content + assert "Error test" in content + assert "Critical test" in content + assert "Caught exception: Test exception" in content + assert "Rank zero test" in content + + +if __name__ == "__main__": + pytest.main([__file__, "-v"]) From 31fe21ff6c608ce9a59178d023d83a8c91f1d731 Mon Sep 17 00:00:00 2001 From: Meet Patel Date: Fri, 28 Nov 2025 17:30:12 +0530 Subject: [PATCH 27/60] [QEff. Finetune]: Added component registry and factory functionality. (#645) - Added functionality to register dataset, model, optimizer, trainer objects in a registry and fetch the class of given object based on configuration provided. - Also, added simple test cases to verify the functionality. --------- Signed-off-by: meetkuma Signed-off-by: Dhiraj Kumar Sah --- .../experimental/core/component_registry.py | 194 ++++++++++++++++++ .../experimental/tests/test_registry.py | 167 +++++++++++++++ 2 files changed, 361 insertions(+) create mode 100644 QEfficient/finetune/experimental/tests/test_registry.py diff --git a/QEfficient/finetune/experimental/core/component_registry.py b/QEfficient/finetune/experimental/core/component_registry.py index d647b73a6..7744d71e6 100644 --- a/QEfficient/finetune/experimental/core/component_registry.py +++ b/QEfficient/finetune/experimental/core/component_registry.py @@ -4,3 +4,197 @@ # SPDX-License-Identifier: BSD-3-Clause # # ----------------------------------------------------------------------------- + + +import logging +from typing import Callable, Dict, Optional, Type + +# from QEfficient.finetune.experimental.core.logger import get_logger + +# logger = get_logger() +logger = logging.getLogger(__name__) + + +def get_object(obj_dict: Dict, name: str, object_type: str, list_fn: Callable) -> Optional[Type]: + """Utility to get object from a dictionary with error handling.""" + obj = obj_dict.get(name) + if obj is None: + raise ValueError(f"Unknown {object_type}: {name}. Available: {list_fn()}") + return obj + + +class ComponentRegistry: + """Registry for managing different training components.""" + + def __init__(self): + self._optimizers: Dict[str, Type] = {} + self._schedulers: Dict[str, Type] = {} + self._datasets: Dict[str, Type] = {} + self._models: Dict[str, Type] = {} + self._data_collators: Dict[str, Type] = {} + self._metrics: Dict[str, Type] = {} + self._loss_functions: Dict[str, Type] = {} + self._callbacks: Dict[str, Type] = {} + self._hooks: Dict[str, Type] = {} + self._trainer_modules: Dict[str, Type] = {} + + def trainer_module(self, name: str, args_cls=None, required_kwargs=None): + """ + Decorator to register a trainer module with its configuration. + Each trainer module has to be binded to its args class and required kwargs. + + Args: + name: Name of the trainer type + args_cls: The arguments class for this trainer + required_kwargs: Dictionary of required keyword arguments and their default values + """ + required_kwargs = required_kwargs or {} + + def decorator(trainer_cls): + self._trainer_modules[name] = { + "trainer_cls": trainer_cls, + "args_cls": args_cls, + "required_kwargs": required_kwargs, + } + logger.info(f"Registered trainer module: {name}") + return self._trainer_modules[name] + + return decorator + + def optimizer(self, name: str): + """Decorator to register an optimizer class.""" + + def decorator(cls: Type): + self._optimizers[name] = cls + logger.info(f"Registered optimizer: {name}") + return cls + + return decorator + + def scheduler(self, name: str): + """Decorator to register a scheduler class.""" + + def decorator(cls: Type): + self._schedulers[name] = cls + logger.info(f"Registered scheduler: {name}") + return cls + + return decorator + + def dataset(self, name: str): + """Decorator to register a dataset class.""" + + def decorator(cls: Type): + self._datasets[name] = cls + logger.info(f"Registered dataset: {name}") + return cls + + return decorator + + def model(self, name: str): + """Decorator to register a model class.""" + + def decorator(cls: Type): + self._models[name] = cls + logger.info(f"Registered model: {name}") + return cls + + return decorator + + def data_collator(self, name: str): + """Decorator to register a data collator class.""" + + def decorator(fn_pointer: Type): + self._data_collators[name] = fn_pointer + logger.info(f"Registered data collator: {name}") + return fn_pointer + + return decorator + + def loss_function(self, name: str): + """Decorator to register a loss function class.""" + + def decorator(cls: Type): + self._loss_functions[name] = cls + logger.info(f"Registered loss function: {name}") + return cls + + return decorator + + def callback(self, name: str): + """Decorator to register a callback class.""" + + def decorator(cls: Type): + self._callbacks[name] = cls + logger.info(f"Registered callback: {name}") + return cls + + return decorator + + def get_trainer_module(self, name: str) -> Optional[Type]: + """Get trainer module class by name.""" + return get_object(self._trainer_modules, name, "trainer module", self.list_trainer_modules) + + def get_optimizer(self, name: str) -> Optional[Type]: + """Get optimizer class by name.""" + return get_object(self._optimizers, name, "optimizer", self.list_optimizers) + + def get_scheduler(self, name: str) -> Optional[Type]: + """Get scheduler class by name.""" + return get_object(self._schedulers, name, "scheduler", self.list_schedulers) + + def get_dataset(self, name: str) -> Optional[Type]: + """Get dataset class by name.""" + return get_object(self._datasets, name, "dataset", self.list_datasets) + + def get_model(self, name: str) -> Optional[Type]: + """Get model class by name.""" + return get_object(self._models, name, "model", self.list_models) + + def get_data_collator(self, name: str) -> Optional[Type]: + """Get data collator class by name.""" + return get_object(self._data_collators, name, "data collator", self.list_data_collators) + + def get_loss_function(self, name: str) -> Optional[Type]: + """Get loss function class by name.""" + return get_object(self._loss_functions, name, "loss function", self.list_loss_functions) + + def get_callback(self, name: str) -> Optional[Type]: + """Get callback class by name.""" + return get_object(self._callbacks, name, "callback", self.list_callbacks) + + def list_trainer_modules(self) -> list[str]: + """List all registered trainer modules.""" + return list(self._trainer_modules.keys()) + + def list_optimizers(self) -> list[str]: + """List all registered optimizers.""" + return list(self._optimizers.keys()) + + def list_schedulers(self) -> list[str]: + """List all registered schedulers.""" + return list(self._schedulers.keys()) + + def list_datasets(self) -> list[str]: + """List all registered datasets.""" + return list(self._datasets.keys()) + + def list_models(self) -> list[str]: + """List all registered models.""" + return list(self._models.keys()) + + def list_data_collators(self) -> list[str]: + """List all registered data collators.""" + return list(self._data_collators.keys()) + + def list_loss_functions(self) -> list[str]: + """List all registered loss functions.""" + return list(self._loss_functions.keys()) + + def list_callbacks(self) -> list[str]: + """List all registered callbacks.""" + return list(self._callbacks.keys()) + + +# Global registry instance +registry = ComponentRegistry() diff --git a/QEfficient/finetune/experimental/tests/test_registry.py b/QEfficient/finetune/experimental/tests/test_registry.py new file mode 100644 index 000000000..3e10aa820 --- /dev/null +++ b/QEfficient/finetune/experimental/tests/test_registry.py @@ -0,0 +1,167 @@ +# ----------------------------------------------------------------------------- +# +# Copyright (c) Qualcomm Technologies, Inc. and/or its subsidiaries. +# SPDX-License-Identifier: BSD-3-Clause +# +# ----------------------------------------------------------------------------- + +import pytest + +from QEfficient.finetune.experimental.core.component_registry import ComponentRegistry, get_object, registry + + +class TestComponentRegistry: + @pytest.fixture(autouse=True) + def setUp(self): + """Set up test fixtures before each test method.""" + self.registry = ComponentRegistry() + + @pytest.mark.parametrize( + "register_method, get_method, object_name", + [ + ("trainer_module", "get_trainer_module", "test_trainer"), + ("optimizer", "get_optimizer", "test_optimizer"), + ("scheduler", "get_scheduler", "test_scheduler"), + ("dataset", "get_dataset", "test_dataset"), + ("model", "get_model", "test_model"), + ("data_collator", "get_data_collator", "test_collator"), + ("loss_function", "get_loss_function", "test_loss"), + ("callback", "get_callback", "test_callback"), + ], + ) + def test_object_success(self, register_method: str, get_method: str, object_name: str): + """Test object registration decorator.""" + + class MockObject: + pass + + # Register with decorator + getattr(self.registry, register_method)(object_name)(MockObject) + + # Verify registration + retrieved = getattr(self.registry, get_method)(object_name) + if register_method == "trainer_module": + retrieved = retrieved["trainer_cls"] + assert retrieved == MockObject + + @pytest.mark.parametrize( + "object_type, get_method", + [ + ("trainer module", "get_trainer_module"), + ("optimizer", "get_optimizer"), + ("scheduler", "get_scheduler"), + ("dataset", "get_dataset"), + ("model", "get_model"), + ("data collator", "get_data_collator"), + ("loss function", "get_loss_function"), + ("callback", "get_callback"), + ], + ) + def test_object_failure(self, object_type: str, get_method: str, object_name: str = "non_existent"): + """Test failure when retrieving non-existent object.""" + with pytest.raises(ValueError) as exc_info: + getattr(self.registry, get_method)(object_name) + + assert f"Unknown {object_type}" in str(exc_info.value) + + def test_init_empty_registries(self): + """Test that all registries are initialized as empty dictionaries.""" + assert len(self.registry._optimizers) == 0 + assert len(self.registry._schedulers) == 0 + assert len(self.registry._datasets) == 0 + assert len(self.registry._models) == 0 + assert len(self.registry._data_collators) == 0 + assert len(self.registry._metrics) == 0 + assert len(self.registry._loss_functions) == 0 + assert len(self.registry._callbacks) == 0 + assert len(self.registry._hooks) == 0 + assert len(self.registry._trainer_modules) == 0 + + def test_trainer_module_with_args_and_kwargs(self): + """Test trainer module registration with args class and required kwargs.""" + + class MockArgs: + pass + + class MockTrainer: + pass + + # Register with decorator including args class and required kwargs + self.registry.trainer_module( + "test_trainer_with_args", args_cls=MockArgs, required_kwargs={"param1": "default1", "param2": "default2"} + )(MockTrainer) + + # Verify registration details + module_info = self.registry.get_trainer_module("test_trainer_with_args") + assert module_info["trainer_cls"] == MockTrainer + assert module_info["args_cls"] == MockArgs + assert module_info["required_kwargs"] == {"param1": "default1", "param2": "default2"} + + def test_list_methods(self): + """Test all list methods return correct keys.""" + + # Register some dummy items + class DummyClass: + pass + + self.registry.optimizer("opt1")(DummyClass) + self.registry.scheduler("sched1")(DummyClass) + self.registry.dataset("ds1")(DummyClass) + self.registry.model("model1")(DummyClass) + self.registry.data_collator("coll1")(lambda x: x) + self.registry.loss_function("loss1")(DummyClass) + self.registry.callback("cb1")(DummyClass) + self.registry.trainer_module("tm1")(DummyClass) + + # Test lists + assert self.registry.list_optimizers() == ["opt1"] + assert self.registry.list_schedulers() == ["sched1"] + assert self.registry.list_datasets() == ["ds1"] + assert self.registry.list_models() == ["model1"] + assert self.registry.list_data_collators() == ["coll1"] + assert self.registry.list_loss_functions() == ["loss1"] + assert self.registry.list_callbacks() == ["cb1"] + assert self.registry.list_trainer_modules() == ["tm1"] + + def test_logging_on_registration(self, mocker): + """Test that registration logs messages.""" + mock_logger = mocker.patch("QEfficient.finetune.experimental.core.component_registry.logger") + + class MockClass: + pass + + # Test optimizer registration logging + self.registry.optimizer("test_opt")(MockClass) + mock_logger.info.assert_called_with("Registered optimizer: test_opt") + + # Reset mock + mock_logger.reset_mock() + + # Test trainer module registration logging + self.registry.trainer_module("test_tm")(MockClass) + mock_logger.info.assert_called_with("Registered trainer module: test_tm") + + +class TestGetObjectFunction: + def test_get_object_success(self): + """Test get_object function success case.""" + test_dict = {"key1": "value1", "key2": "value2"} + + result = get_object(test_dict, "key1", "test_type", lambda: ["key1", "key2"]) + assert result == "value1" + + def test_get_object_failure(self): + """Test get_object function failure case.""" + test_dict = {"key1": "value1"} + + with pytest.raises(ValueError) as exc_info: + get_object(test_dict, "nonexistent", "test_type", lambda: ["key1", "key2"]) + + assert "Unknown test_type: nonexistent" in str(exc_info.value) + assert "Available: ['key1', 'key2']" in str(exc_info.value) + + +class TestGlobalRegistry: + def test_global_registry_instance(self): + """Test that global registry instance exists and is of correct type.""" + assert isinstance(registry, ComponentRegistry) From 5e9d760da5a3d3d39ee1099756a92042e01535ed Mon Sep 17 00:00:00 2001 From: Tanisha Chawada Date: Fri, 5 Dec 2025 15:07:40 +0530 Subject: [PATCH 28/60] [QEff. Finetune]: Adding optimizer registry and its test cases (#649) Adding a Script for Registering and Retrieving Optimizer Classes The script includes: get_optimizer() Returns the optimizer class and kwargs. Additionally, there is a test_optimizer.py script that validates the functionality of the optimizer registration and retrieval process. --------- Signed-off-by: Tanisha Chawada Signed-off-by: Dhiraj Kumar Sah --- .../finetune/experimental/core/optimizer.py | 25 +++++ .../experimental/tests/test_optimizer.py | 96 +++++++++++++++++++ 2 files changed, 121 insertions(+) create mode 100644 QEfficient/finetune/experimental/tests/test_optimizer.py diff --git a/QEfficient/finetune/experimental/core/optimizer.py b/QEfficient/finetune/experimental/core/optimizer.py index d647b73a6..d4f82cbeb 100644 --- a/QEfficient/finetune/experimental/core/optimizer.py +++ b/QEfficient/finetune/experimental/core/optimizer.py @@ -4,3 +4,28 @@ # SPDX-License-Identifier: BSD-3-Clause # # ----------------------------------------------------------------------------- + +""" +Optimizer components for the training system. +""" + +import torch.optim as optim + +from QEfficient.finetune.experimental.core.component_registry import registry + +registry.optimizer("Adam")(optim.Adam) +registry.optimizer("AdamW")(optim.AdamW) +registry.optimizer("SGD")(optim.SGD) + + +def prepare_optimizer(opt_config): + """ + Create optimizer from config. + Args: opt_config: Dictionary containing optimizer configuration. + Returns: Tuple of optimizer class and its arguments. + """ + opt_name = opt_config.pop("optimizer_name") + opt_cls = registry.get_optimizer(opt_name) + opt_config["lr"] = float(opt_config["lr"]) + optimizer_cls_and_kwargs = (opt_cls, opt_config) + return optimizer_cls_and_kwargs diff --git a/QEfficient/finetune/experimental/tests/test_optimizer.py b/QEfficient/finetune/experimental/tests/test_optimizer.py new file mode 100644 index 000000000..e105d5ddf --- /dev/null +++ b/QEfficient/finetune/experimental/tests/test_optimizer.py @@ -0,0 +1,96 @@ +# ----------------------------------------------------------------------------- +# +# Copyright (c) Qualcomm Technologies, Inc. and/or its subsidiaries. +# SPDX-License-Identifier: BSD-3-Clause +# +# ----------------------------------------------------------------------------- + +import copy + +import pytest +import torch.nn as nn +import torch.optim as optim + +from QEfficient.finetune.experimental.core.component_registry import registry +from QEfficient.finetune.experimental.core.optimizer import prepare_optimizer + +OPTIMIZER_CONFIGS = { + "Adam": { + "optimizer_name": "Adam", + "opt_cls": optim.Adam, + "lr": 1e-4, + "weight_decay": 0.01, + "betas": (0.9, 0.999), + "eps": 1e-8, + "amsgrad": False, + }, + "AdamW": { + "optimizer_name": "AdamW", + "opt_cls": optim.AdamW, + "lr": 1e-4, + "weight_decay": 0.01, + "betas": (0.9, 0.999), + "eps": 1e-8, + "amsgrad": False, + }, + "SGD": { + "optimizer_name": "SGD", + "opt_cls": optim.SGD, + "lr": 1e-4, + "momentum": 0.9, + "weight_decay": 0.01, + "dampening": 0.0, + "nesterov": False, + }, + "RMSprop": { + "optimizer_name": "RMSprop", + "opt_cls": optim.RMSprop, + }, +} + +REGISTRY_CONFIG = { + "RMSprop": { + "optimizer_name": "RMSprop", + "opt_cls": optim.RMSprop, + }, +} + + +@pytest.fixture +def dummy_model(): + return nn.Sequential( + nn.Linear(10, 5), + nn.ReLU(), + nn.Linear(5, 1), + ) + + +@pytest.mark.parametrize("opt_name", OPTIMIZER_CONFIGS.keys()) +def test_optimizers(opt_name, dummy_model): + """Test that all registered optimizers can be created with their configs.""" + config = copy.deepcopy(OPTIMIZER_CONFIGS[opt_name]) + + config.pop("opt_cls") + try: + optimizer_class_and_kwargs = prepare_optimizer(config) + assert optimizer_class_and_kwargs is not None + except ValueError as e: + assert "Unknown optimizer" in str(e) + return + optimizer_class = optimizer_class_and_kwargs[0] + opt_inst = optimizer_class(dummy_model.parameters(), **optimizer_class_and_kwargs[1]) + assert isinstance(opt_inst, optim.Optimizer) + assert len(list(opt_inst.param_groups)) == 1 + + for key in ["lr", "weight_decay", "betas", "eps", "momentum", "dampening", "nesterov", "amsgrad"]: + if key in config: + assert opt_inst.param_groups[0][key] == config[key], f"{key} mismatch" + + +@pytest.mark.parametrize("opt_name, opt_cls", REGISTRY_CONFIG.items()) +def test_registered_optimizer(opt_name, opt_cls): + """Test that the optimizer registerd correctly.""" + registry.optimizer(opt_name)(opt_cls) + optimizer_class = registry.get_optimizer(opt_name) + assert optimizer_class is not None + assert optimizer_class == opt_cls From 92e4436df602682f423bafd2bf23ef122835815a Mon Sep 17 00:00:00 2001 From: Dhiraj Kumar Sah Date: Fri, 5 Dec 2025 17:39:32 +0530 Subject: [PATCH 29/60] [QEff. Finetune]: Added Base dataset class and SFT dataset classes along with its test cases. (#647) Edited the SFTDataset class to enable custom dataset loading. Updated the dataset.py file to only enable support for SFTDataset type. Created test file to check the functionalities. --------- Signed-off-by: Dhiraj Kumar Sah --- .../finetune/experimental/core/dataset.py | 251 +++++++++ .../experimental/core/utils/dataset_utils.py | 25 + .../experimental/tests/test_dataset.py | 528 ++++++++++++++++++ 3 files changed, 804 insertions(+) create mode 100644 QEfficient/finetune/experimental/tests/test_dataset.py diff --git a/QEfficient/finetune/experimental/core/dataset.py b/QEfficient/finetune/experimental/core/dataset.py index d647b73a6..4a243c40b 100644 --- a/QEfficient/finetune/experimental/core/dataset.py +++ b/QEfficient/finetune/experimental/core/dataset.py @@ -4,3 +4,254 @@ # SPDX-License-Identifier: BSD-3-Clause # # ----------------------------------------------------------------------------- + +""" +Dataset components for the training system. +""" + +import importlib +import os +import re +from abc import ABC, abstractmethod +from typing import Any, Callable, Dict + +from datasets import load_dataset, load_dataset_builder +from torch.utils.data import Dataset + +from QEfficient.finetune.experimental.core.component_registry import registry +from QEfficient.finetune.experimental.core.utils.dataset_utils import ( + apply_train_test_split, +) + + +class BaseDataset(Dataset, ABC): + """Base class for all datasets to ensure consistent interface.""" + + def __init__(self, dataset_name: str, split: str, seed: int = 42, **kwargs): + self.dataset_name = dataset_name + self.split = split + self.seed = seed + self.kwargs = kwargs + self._initialize_dataset() + + @abstractmethod + def _initialize_dataset(self): + """Subclasses should implement this to load and prepare the dataset.""" + pass + + @abstractmethod + def __len__(self): + """Return the number of samples in the dataset.""" + pass + + @abstractmethod + def __getitem__(self, idx): + """Should return a dictionary with 'input_ids', 'attention_mask', and 'labels'.""" + pass + + +@registry.dataset("sft_dataset") +class SFTDataset(BaseDataset): + """ + A Supervised Fine-Tuning (SFT) dataset class for text data. + + This class handles loading data from Hugging Face datasets or custom JSON files, + filtering out invalid samples, and applying a prompt/completion templating for SFT tasks. + + Args: + dataset_name (str): The name of the dataset to load from Hugging Face datasets. + Ignored if json_file_path is provided. + split (str): The dataset split to use (e.g., "train", "validation", "test"). + split_ratio (float): Ratio for train/test split when only one split is available. + seed (int): Random seed for reproducibility. + json_file_path (str, optional): Path to a custom JSON file containing the dataset. + If provided, this takes precedence over dataset_name. + prompt_template (str): A string template for constructing the prompt. Variables in the + template should be enclosed in curly braces, e.g., "Answer the question: {question}". + completion_template (str): A string template for constructing the completion (target). + Variables should be enclosed in curly braces, e.g., "{answer}". + + Raises: + RuntimeError: If any variables specified in `prompt_template` or `completion_template` + are not found as columns in the loaded dataset. + """ + + def __init__( + self, + dataset_name: str, + split: str, + split_ratio: float = 0.8, + seed: int = 42, + **kwargs, + ): + self.split_ratio = split_ratio + self.json_file_path = kwargs.get("json_file_path", None) + self.prompt_template = kwargs.get("prompt_template", None) + self.completion_template = kwargs.get("completion_template", None) + self.prompt_func_path = kwargs.get("prompt_func", None) + self.completion_func_path = kwargs.get("completion_func", None) + self.remove_samples_with_empty_columns = kwargs.get("remove_samples_with_empty_columns", True) + + if self.json_file_path not in (None, ""): + if not os.path.isfile(self.json_file_path): + raise FileNotFoundError(f"JSON file not found or invalid: '{self.json_file_path}'") + if (self.prompt_template is None and self.prompt_func_path is None) or ( + self.prompt_template is not None and self.prompt_func_path is not None + ): + raise RuntimeError("Either provide prompt_template or prompt_func in the config.") + if (self.completion_template is None and self.completion_func_path is None) or ( + self.completion_template is not None and self.completion_func_path is not None + ): + raise RuntimeError("Either provide completion_template or completion_func in the config.") + + # Call parent class __init__ which will call _initialize_dataset + super().__init__(dataset_name, split, seed, **kwargs) + + def _initialize_dataset(self): + """ + Initialize the dataset from either HuggingFace or a custom JSON file. + + This method loads the dataset, applies splitting if necessary, and prepares + it for preprocessing with prompt/completion templates. + """ + if self.json_file_path: + # Load dataset from JSON file + self.dataset = load_dataset("json", data_files=self.json_file_path, split="train") + + # Apply train/test split if needed + if self.split in ["train", "test"]: + self.dataset = apply_train_test_split(self.dataset, self.split_ratio, self.split, self.seed) + else: + # Load dataset from HuggingFace + db = load_dataset_builder(self.dataset_name) + available_splits = [] + if db.info.splits is not None: + available_splits = list(db.info.splits.keys()) + + if self.split not in available_splits: + raise ValueError(f"Split {self.split} is not available for dataset {self.dataset_name}.") + + # FIXME: Add streaming support for larger datasets. + self.dataset = load_dataset(self.dataset_name, split=self.split) + + if len(available_splits) == 1: + self.dataset = apply_train_test_split(self.dataset, self.split_ratio, self.split, self.seed) + + self.dataset = self._setup_templates(self.dataset, self.dataset.column_names) + + def _setup_templates(self, dataset, dataset_columns): + """ + Set up prompt/completion templates or functions and apply preprocessing. + """ + if self.prompt_template: + self.prompt_func = None + # Extract variables from templates and check if they exist in dataset columns + prompt_variables = re.findall(r"\{(.*?)\}", self.prompt_template) + for var in prompt_variables: + if var not in dataset_columns: + raise RuntimeError( + f"Prompt template variable '{var}' not found in dataset columns: {dataset_columns}." + ) + else: + prompt_variables = dataset_columns + self.prompt_func = self.import_func(self.prompt_func_path) + + if self.completion_template: + self.completion_func = None + # Extract variables from templates and check if they exist in dataset columns + completion_variables = re.findall(r"\{(.*?)\}", self.completion_template) + for var in completion_variables: + if var not in dataset_columns: + raise RuntimeError( + f"Completion template variable '{var}' not found in dataset columns: {dataset_columns}." + ) + else: + completion_variables = dataset_columns + self.completion_func = self.import_func(self.completion_func_path) + + # Filter out samples with None or empty strings in relevant columns + relevant_columns = list(set(prompt_variables + completion_variables)) + if self.remove_samples_with_empty_columns: + dataset = dataset.filter(lambda example: self._filter_empty_or_none_samples(example, relevant_columns)) + return dataset + + def import_func(self, func_path: str) -> Callable: + if ":" not in func_path: + raise ValueError("func_path must be in the format 'module_file_path:function_name'.") + module_file_path, function_name = func_path.split(":") + + try: + module = importlib.import_module(module_file_path) + except Exception: + raise RuntimeError(f"Unable to import module : {module_file_path}.") + if not hasattr(module, function_name): + raise ValueError(f"Function {function_name} not found in module {module_file_path}.") + return getattr(module, function_name) + + def _filter_empty_or_none_samples(self, example: Dict[str, Any], relevant_columns: list) -> bool: + """ + Filters out samples where any of the relevant columns are None or contain only whitespace. + + Args: + example (Dict[str, Any]): A single sample from the dataset. + relevant_columns (list): List of column names to check for empty or None values. + + Returns: + bool: True if the sample should be kept, False otherwise. + """ + for column in relevant_columns: + value = example.get(column) + if value is None or (isinstance(value, str) and not value.strip()): + return False + return True + + def _preprocess_sample(self, example: Dict[str, Any]) -> Dict[str, str]: + """ + Applies the prompt and completion templates to a single example. + + Args: + example (Dict[str, Any]): A single sample from the dataset. + + Returns: + Dict[str, str]: A dictionary containing the 'prompt' and 'completion' strings. + """ + prompt_text = ( + self.prompt_func(example) if self.prompt_func is not None else self.prompt_template.format(**example) + ) + completion_text = ( + self.completion_func(example) + if self.completion_func is not None + else self.completion_template.format(**example) + ) + return { + "prompt": prompt_text, + "completion": completion_text, + } + + def __len__(self) -> int: + """ + Returns the number of samples in the dataset. + + Returns: + int: The total number of samples. + """ + return self.dataset.num_rows + + def __getitem__(self, idx: int) -> Dict[str, str]: + """ + Retrieves a processed sample from the dataset at the given index. + This method doesn't tokenize the input items, it is expected that the SFTTrainer will handle tokenization. + + Args: + idx (int): The index of the sample to retrieve. + + Returns: + Dict[str, str]: A dictionary containing the processed 'prompt' and 'completion' for the sample. + """ + # Get the raw example using .select and access the first element + example = self.dataset.select(indices=[int(idx)])[0] + + # Apply preprocessing (templating) on the fly + processed_example = self._preprocess_sample(example) + + return processed_example diff --git a/QEfficient/finetune/experimental/core/utils/dataset_utils.py b/QEfficient/finetune/experimental/core/utils/dataset_utils.py index d647b73a6..11e2fecfc 100644 --- a/QEfficient/finetune/experimental/core/utils/dataset_utils.py +++ b/QEfficient/finetune/experimental/core/utils/dataset_utils.py @@ -4,3 +4,28 @@ # SPDX-License-Identifier: BSD-3-Clause # # ----------------------------------------------------------------------------- +def insert_pad_token(tokenizer): + # Add pad token if it doesn't exist + if tokenizer.pad_token is None: + # Try to use existing special token as pad token + if tokenizer.eos_token is not None: + tokenizer.pad_token = tokenizer.eos_token + elif tokenizer.bos_token is not None: + tokenizer.pad_token = tokenizer.bos_token + elif tokenizer.sep_token is not None: + tokenizer.pad_token = tokenizer.sep_token + else: + # Add a new pad token + tokenizer.add_special_tokens({"pad_token": "[PAD]"}) + + +def apply_train_test_split(dataset, split_ratio, split, seed): + """ + Apply train/test split to the dataset based on split_ratio. + """ + splitted_dataset = dataset.train_test_split(test_size=(1 - split_ratio), seed=seed) + if split == "test": + dataset = splitted_dataset["test"] + else: + dataset = splitted_dataset["train"] + return dataset diff --git a/QEfficient/finetune/experimental/tests/test_dataset.py b/QEfficient/finetune/experimental/tests/test_dataset.py new file mode 100644 index 000000000..ca2fc1450 --- /dev/null +++ b/QEfficient/finetune/experimental/tests/test_dataset.py @@ -0,0 +1,528 @@ +# ----------------------------------------------------------------------------- +# +# Copyright (c) Qualcomm Technologies, Inc. and/or its subsidiaries. +# SPDX-License-Identifier: BSD-3-Clause +# +# ----------------------------------------------------------------------------- + +""" +Tests for dataset components. +""" + +import json +import os +import tempfile +import unittest +from unittest.mock import MagicMock, patch + +from QEfficient.finetune.experimental.core.dataset import BaseDataset, SFTDataset + +SEED = 42 +SPLIT_RATIO = 0.8 + + +class TestBaseDataset(unittest.TestCase): + """Tests for BaseDataset abstract class.""" + + def test_base_dataset_cannot_be_instantiated(self): + """Test that BaseDataset cannot be instantiated directly.""" + with self.assertRaises(TypeError): + BaseDataset(dataset_name="test", split="train") + + +class TestSFTDataset(unittest.TestCase): + """Tests for SFTDataset class.""" + + def setUp(self): + """Set up test fixtures.""" + # Create a temporary directory for test files + self.test_dir = tempfile.mkdtemp() + self.json_file_path = os.path.join(self.test_dir, "test_dataset.json") + + # Create a dummy JSON dataset + self.dummy_data = [ + {"question": "What is AI?", "answer": "Artificial Intelligence"}, + {"question": "What is ML?", "answer": "Machine Learning"}, + {"question": "What is DL?", "answer": "Deep Learning"}, + {"question": "What is NLP?", "answer": "Natural Language Processing"}, + {"question": "", "answer": "Empty question"}, # Empty question + {"question": "Valid question", "answer": ""}, # Empty answer + {"question": None, "answer": "None question"}, # None question + {"question": "Valid question 2", "answer": None}, # None answer + ] + + with open(self.json_file_path, "w") as f: + json.dump(self.dummy_data, f) + + def tearDown(self): + """Clean up test fixtures.""" + # Remove temporary files and directories + import shutil + + if os.path.exists(self.test_dir): + shutil.rmtree(self.test_dir) + + @patch("QEfficient.finetune.experimental.core.dataset.load_dataset") + @patch("QEfficient.finetune.experimental.core.dataset.load_dataset_builder") + def test_sft_dataset_with_huggingface_dataset_and_templates(self, mock_builder, mock_load): + """Test loading from HuggingFace dataset with templates using mocked data.""" + # Create mock dataset with dummy data + mock_dataset = MagicMock() + mock_dataset.column_names = ["text", "label"] + mock_dataset.num_rows = 3 + + # Mock the select method to return individual samples + def mock_select(indices): + sample_data = [ + {"text": "Sample text 1", "label": "Label 1"}, + {"text": "Sample text 2", "label": "Label 2"}, + {"text": "Sample text 3", "label": "Label 3"}, + ] + return [sample_data[indices[0]]] + + mock_dataset.select = mock_select + mock_dataset.filter = lambda func: mock_dataset # Return self for filtering + + # Mock train_test_split to return a dict with train/test splits + mock_split_result = {"train": mock_dataset, "test": mock_dataset} + mock_dataset.train_test_split = lambda test_size, seed: mock_split_result + + # Mock the dataset builder to indicate multiple splits are available + mock_info = MagicMock() + mock_info.splits = {"train": MagicMock(), "test": MagicMock()} + mock_builder.return_value.info = mock_info + + # Mock load_dataset to return our mock dataset + mock_load.return_value = mock_dataset + + # Create the dataset + dataset = SFTDataset( + dataset_name="dummy_hf_dataset", + split="train", + prompt_template="Text: {text}", + completion_template="Label: {label}", + ) + + self.assertIsNotNone(dataset) + self.assertEqual(len(dataset), 3) + + # Test __getitem__ + sample = dataset[0] + self.assertIn("prompt", sample) + self.assertIn("completion", sample) + self.assertTrue(sample["prompt"].startswith("Text:")) + self.assertTrue(sample["completion"].startswith("Label:")) + + def test_sft_dataset_with_json_file_and_templates(self): + """Test loading from JSON file with templates.""" + dataset = SFTDataset( + dataset_name="dummy", # Ignored when json_file_path is provided + split="train", + json_file_path=self.json_file_path, + prompt_template="Q: {question}", + completion_template="A: {answer}", + ) + + self.assertIsNotNone(dataset) + # After filtering empty/None values and applying train split (default 0.8) + # we get a subset of the 4 valid samples + self.assertGreater(len(dataset), 0) + self.assertLessEqual(len(dataset), 4) + + # Test __getitem__ + sample = dataset[0] + self.assertIn("prompt", sample) + self.assertIn("completion", sample) + self.assertTrue(sample["prompt"].startswith("Q:")) + self.assertTrue(sample["completion"].startswith("A:")) + + def test_sft_dataset_json_file_without_filtering(self): + """Test loading from JSON file without filtering empty samples.""" + dataset = SFTDataset( + dataset_name="dummy", + split="train", + json_file_path=self.json_file_path, + prompt_template="Q: {question}", + completion_template="A: {answer}", + remove_samples_with_empty_columns=False, + ) + + # When filtering is disabled and split="train" is used, it still applies train/test split + # So we get ~80% of 8 samples = ~6 samples + self.assertGreater(len(dataset), 0) + self.assertLessEqual(len(dataset), 8) + + def test_sft_dataset_train_test_split_from_json(self): + """Test train/test split when loading from JSON file.""" + train_dataset = SFTDataset( + dataset_name="dummy", + split="train", + split_ratio=SPLIT_RATIO, + json_file_path=self.json_file_path, + prompt_template="Q: {question}", + completion_template="A: {answer}", + seed=SEED, + ) + + test_dataset = SFTDataset( + dataset_name="dummy", + split="test", + split_ratio=SPLIT_RATIO, + json_file_path=self.json_file_path, + prompt_template="Q: {question}", + completion_template="A: {answer}", + seed=SEED, + ) + + # After filtering, we have 4 valid samples + # With split ratio, train should have ~3 samples, test should have ~1 sample + self.assertGreater(len(train_dataset), 0) + self.assertGreater(len(test_dataset), 0) + # Total should equal the filtered dataset size + self.assertEqual(len(train_dataset) + len(test_dataset), 4) + + def test_sft_dataset_with_custom_prompt_function(self): + """Test loading with custom prompt function.""" + # Create a temporary module file with custom functions + func_file_path = os.path.join(self.test_dir, "custom_funcs.py") + with open(func_file_path, "w") as f: + f.write(""" +def custom_prompt(example): + return f"Custom prompt: {example['question']}" + +def custom_completion(example): + return f"Custom completion: {example['answer']}" +""") + + # Add the test directory to sys.path temporarily + import sys + + sys.path.insert(0, self.test_dir) + + try: + dataset = SFTDataset( + dataset_name="dummy", + split="train", + json_file_path=self.json_file_path, + prompt_func="custom_funcs:custom_prompt", + completion_func="custom_funcs:custom_completion", + ) + + self.assertIsNotNone(dataset) + self.assertGreater(len(dataset), 0) + + # Test that custom functions are applied + sample = dataset[0] + self.assertTrue(sample["prompt"].startswith("Custom prompt:")) + self.assertTrue(sample["completion"].startswith("Custom completion:")) + finally: + # Clean up + sys.path.remove(self.test_dir) + if os.path.exists(func_file_path): + os.remove(func_file_path) + + def test_sft_dataset_missing_template_variable(self): + """Test error when template variable is not in dataset columns.""" + with self.assertRaises(RuntimeError) as context: + SFTDataset( + dataset_name="dummy", + split="train", + json_file_path=self.json_file_path, + prompt_template="Q: {nonexistent_column}", + completion_template="A: {answer}", + ) + + self.assertIn("not found in dataset columns", str(context.exception)) + + def test_sft_dataset_missing_completion_template_variable(self): + """Test error when completion template variable is not in dataset columns.""" + with self.assertRaises(RuntimeError) as context: + SFTDataset( + dataset_name="dummy", + split="train", + json_file_path=self.json_file_path, + prompt_template="Q: {question}", + completion_template="A: {nonexistent_column}", + ) + + self.assertIn("not found in dataset columns", str(context.exception)) + + def test_sft_dataset_no_prompt_template_or_func(self): + """Test error when neither prompt_template nor prompt_func is provided.""" + with self.assertRaises(RuntimeError) as context: + SFTDataset( + dataset_name="dummy", + split="train", + json_file_path=self.json_file_path, + completion_template="A: {answer}", + ) + + self.assertIn("Either provide prompt_template or prompt_func", str(context.exception)) + + def test_sft_dataset_both_prompt_template_and_func(self): + """Test error when both prompt_template and prompt_func are provided.""" + with self.assertRaises(RuntimeError) as context: + SFTDataset( + dataset_name="dummy", + split="train", + json_file_path=self.json_file_path, + prompt_template="Q: {question}", + prompt_func="module:function", + completion_template="A: {answer}", + ) + + self.assertIn("Either provide prompt_template or prompt_func", str(context.exception)) + + def test_sft_dataset_no_completion_template_or_func(self): + """Test error when neither completion_template nor completion_func is provided.""" + with self.assertRaises(RuntimeError) as context: + SFTDataset( + dataset_name="dummy", + split="train", + json_file_path=self.json_file_path, + prompt_template="Q: {question}", + ) + + self.assertIn( + "Either provide completion_template or completion_func", + str(context.exception), + ) + + def test_sft_dataset_both_completion_template_and_func(self): + """Test error when both completion_template and completion_func are provided.""" + with self.assertRaises(RuntimeError) as context: + SFTDataset( + dataset_name="dummy", + split="train", + json_file_path=self.json_file_path, + prompt_template="Q: {question}", + completion_template="A: {answer}", + completion_func="module:function", + ) + + self.assertIn( + "Either provide completion_template or completion_func", + str(context.exception), + ) + + def test_sft_dataset_invalid_func_path_format(self): + """Test error when func_path doesn't contain colon separator.""" + with self.assertRaises(ValueError) as context: + SFTDataset( + dataset_name="dummy", + split="train", + json_file_path=self.json_file_path, + prompt_func="invalid_format", + completion_template="A: {answer}", + ) + + self.assertIn("must be in the format", str(context.exception)) + + def test_sft_dataset_invalid_module_import(self): + """Test error when module cannot be imported.""" + with self.assertRaises(RuntimeError) as context: + SFTDataset( + dataset_name="dummy", + split="train", + json_file_path=self.json_file_path, + prompt_func="nonexistent_module:function", + completion_template="A: {answer}", + ) + + self.assertIn("Unable to import module", str(context.exception)) + + def test_sft_dataset_invalid_function_name(self): + """Test error when function doesn't exist in module.""" + # Create a temporary module file without the expected function + func_file_path = os.path.join(self.test_dir, "test_module.py") + with open(func_file_path, "w") as f: + f.write("def some_other_function():\n pass\n") + + import sys + + sys.path.insert(0, self.test_dir) + + try: + with self.assertRaises(ValueError) as context: + SFTDataset( + dataset_name="dummy", + split="train", + json_file_path=self.json_file_path, + prompt_func="test_module:nonexistent_function", + completion_template="A: {answer}", + ) + + self.assertIn("not found in module", str(context.exception)) + finally: + sys.path.remove(self.test_dir) + if os.path.exists(func_file_path): + os.remove(func_file_path) + + def test_sft_dataset_filter_empty_or_none_samples(self): + """Test filtering of samples with empty or None values.""" + dataset = SFTDataset( + dataset_name="dummy", + split="train", + json_file_path=self.json_file_path, + prompt_template="Q: {question}", + completion_template="A: {answer}", + remove_samples_with_empty_columns=True, + ) + + # Verify that all samples have valid (non-empty) questions and answers + for i in range(len(dataset)): + sample = dataset[i] + # Extract the actual question and answer from the formatted strings + question = sample["prompt"].replace("Q: ", "").strip() + answer = sample["completion"].replace("A: ", "").strip() + # Verify neither is empty + self.assertTrue(len(question) > 0, f"Question should not be empty: {sample['prompt']}") + self.assertTrue(len(answer) > 0, f"Answer should not be empty: {sample['completion']}") + + def test_sft_dataset_getitem_returns_correct_format(self): + """Test that __getitem__ returns the correct format.""" + dataset = SFTDataset( + dataset_name="dummy", + split="train", + json_file_path=self.json_file_path, + prompt_template="Q: {question}", + completion_template="A: {answer}", + ) + + sample = dataset[0] + + # Check that sample is a dictionary + self.assertIsInstance(sample, dict) + + # Check that it has the required keys + self.assertIn("prompt", sample) + self.assertIn("completion", sample) + + # Check that values are strings + self.assertIsInstance(sample["prompt"], str) + self.assertIsInstance(sample["completion"], str) + + def test_sft_dataset_len(self): + """Test __len__ method.""" + dataset = SFTDataset( + dataset_name="dummy", + split="train", + json_file_path=self.json_file_path, + prompt_template="Q: {question}", + completion_template="A: {answer}", + ) + + # Check that len returns an integer + self.assertIsInstance(len(dataset), int) + + # Check that len is positive + self.assertGreater(len(dataset), 0) + + # Check that we can iterate through all samples + for i in range(len(dataset)): + sample = dataset[i] + self.assertIsNotNone(sample) + + def test_sft_dataset_with_multiple_template_variables(self): + """Test templates with multiple variables.""" + # Create a more complex JSON dataset + complex_data = [ + {"context": "The sky", "question": "What color?", "answer": "Blue"}, + {"context": "Math", "question": "What is 2+2?", "answer": "4"}, + ] + + complex_json_path = os.path.join(self.test_dir, "complex_dataset.json") + with open(complex_json_path, "w") as f: + json.dump(complex_data, f) + + try: + dataset = SFTDataset( + dataset_name="dummy", + split="train", + json_file_path=complex_json_path, + prompt_template="Context: {context}\nQuestion: {question}", + completion_template="Answer: {answer}", + ) + + # With split="train", it applies train/test split, so we get ~80% of 2 samples + self.assertGreater(len(dataset), 0) + self.assertLessEqual(len(dataset), 2) + + sample = dataset[0] + self.assertIn("Context:", sample["prompt"]) + self.assertIn("Question:", sample["prompt"]) + self.assertIn("Answer:", sample["completion"]) + finally: + if os.path.exists(complex_json_path): + os.remove(complex_json_path) + + def test_sft_dataset_seed_reproducibility(self): + """Test that using the same seed produces the same split.""" + dataset1 = SFTDataset( + dataset_name="dummy", + split="train", + split_ratio=SPLIT_RATIO, + json_file_path=self.json_file_path, + prompt_template="Q: {question}", + completion_template="A: {answer}", + seed=SEED, + ) + + dataset2 = SFTDataset( + dataset_name="dummy", + split="train", + split_ratio=SPLIT_RATIO, + json_file_path=self.json_file_path, + prompt_template="Q: {question}", + completion_template="A: {answer}", + seed=SEED, + ) + + # Both datasets should have the same length + self.assertEqual(len(dataset1), len(dataset2)) + + # Both datasets should have the same samples + for i in range(len(dataset1)): + sample1 = dataset1[i] + sample2 = dataset2[i] + self.assertEqual(sample1["prompt"], sample2["prompt"]) + self.assertEqual(sample1["completion"], sample2["completion"]) + + @patch("QEfficient.finetune.experimental.core.dataset.load_dataset") + @patch("QEfficient.finetune.experimental.core.dataset.load_dataset_builder") + def test_sft_dataset_invalid_split(self, mock_builder, mock_load): + """Test error when requesting an invalid split.""" + # Mock the dataset builder to return specific splits + mock_info = MagicMock() + mock_info.splits = {"train": MagicMock(), "validation": MagicMock()} + mock_builder.return_value.info = mock_info + + with self.assertRaises(ValueError) as context: + SFTDataset( + dataset_name="dummy_dataset", + split="nonexistent_split", + prompt_template="Q: {question}", + completion_template="A: {answer}", + ) + + self.assertIn("not available", str(context.exception)) + + def test_sft_dataset_invalid_json_path(self): + """Test error when an invalid JSON file path is provided.""" + invalid_path = "/path/to/nonexistent/file.json" + + with self.assertRaises(FileNotFoundError) as context: + SFTDataset( + dataset_name="dummy", + split="train", + json_file_path=invalid_path, + prompt_template="Q: {question}", + completion_template="A: {answer}", + ) + + self.assertIn("JSON file not found or invalid", str(context.exception)) + self.assertIn(invalid_path, str(context.exception)) + + +if __name__ == "__main__": + unittest.main() From 57cb01ad3bb8aafb41ecd97a3f92887a9362a90a Mon Sep 17 00:00:00 2001 From: Dhiraj Kumar Sah Date: Mon, 8 Dec 2025 09:46:03 +0000 Subject: [PATCH 30/60] [QEff.finetune] WIP - Adding TrainerClass and tests for init checks. Signed-off-by: Dhiraj Kumar Sah --- .../experimental/core/trainer/base_trainer.py | 8 + .../experimental/core/trainer/sft_trainer.py | 14 + .../experimental/tests/test_trainer.py | 246 ++++++++++++++++++ 3 files changed, 268 insertions(+) create mode 100644 QEfficient/finetune/experimental/tests/test_trainer.py diff --git a/QEfficient/finetune/experimental/core/trainer/base_trainer.py b/QEfficient/finetune/experimental/core/trainer/base_trainer.py index d647b73a6..6d4cc5140 100644 --- a/QEfficient/finetune/experimental/core/trainer/base_trainer.py +++ b/QEfficient/finetune/experimental/core/trainer/base_trainer.py @@ -4,3 +4,11 @@ # SPDX-License-Identifier: BSD-3-Clause # # ----------------------------------------------------------------------------- +from transformers import Trainer, TrainingArguments + +from QEfficient.finetune.experimental.core.component_registry import registry + + +@registry.trainer_module(name="base", args_cls=TrainingArguments, required_kwargs={}) +class BaseTrainer(Trainer): + pass # Just using the standard Trainer diff --git a/QEfficient/finetune/experimental/core/trainer/sft_trainer.py b/QEfficient/finetune/experimental/core/trainer/sft_trainer.py index d647b73a6..f85da2643 100644 --- a/QEfficient/finetune/experimental/core/trainer/sft_trainer.py +++ b/QEfficient/finetune/experimental/core/trainer/sft_trainer.py @@ -4,3 +4,17 @@ # SPDX-License-Identifier: BSD-3-Clause # # ----------------------------------------------------------------------------- +from trl import SFTConfig, SFTTrainer + +from QEfficient.finetune.experimental.core.component_registry import registry +from QEfficient.finetune.experimental.core.config_manager import PeftConfig + +# def get_default_peft_config(): +# """Get default peft config from config_manager and convert to peft library config.""" +# peft_config_params = PeftConfig() +# return peft_config_params.to_peft_config() + + +@registry.trainer_module(name="sft", args_cls=SFTConfig, required_kwargs={"peft_config": PeftConfig}) +class SFTTrainerModule(SFTTrainer): + pass # Just using the standard SFTTrainer diff --git a/QEfficient/finetune/experimental/tests/test_trainer.py b/QEfficient/finetune/experimental/tests/test_trainer.py new file mode 100644 index 000000000..d82a7a339 --- /dev/null +++ b/QEfficient/finetune/experimental/tests/test_trainer.py @@ -0,0 +1,246 @@ +# ----------------------------------------------------------------------------- +# +# Copyright (c) Qualcomm Technologies, Inc. and/or its subsidiaries. +# SPDX-License-Identifier: BSD-3-Clause +# +# ----------------------------------------------------------------------------- + +import pytest +from peft import IA3Config, LoraConfig + +from QEfficient.finetune.experimental.core.component_registry import registry +from QEfficient.finetune.experimental.core.config_manager import PeftConfig +from QEfficient.finetune.experimental.core.trainer.base_trainer import BaseTrainer +from QEfficient.finetune.experimental.core.trainer.sft_trainer import ( + SFTTrainerModule, + get_default_peft_config, +) + + +class TestBaseTrainer: + """Test suite for BaseTrainer class.""" + + def test_base_trainer_registered(self): + """Test that BaseTrainer is registered in the registry.""" + trainer_list = registry.list_trainer_modules() + assert "base" in trainer_list + + def test_base_trainer_info_structure(self): + """Test that BaseTrainer registration has correct structure.""" + trainer_info = registry.get_trainer_module("base") + + assert isinstance(trainer_info, dict) + assert "trainer_cls" in trainer_info + assert "args_cls" in trainer_info + assert "required_kwargs" in trainer_info + + def test_base_trainer_class(self): + """Test that BaseTrainer class is correct.""" + from transformers import Trainer, TrainingArguments + + trainer_info = registry.get_trainer_module("base") + trainer_cls = trainer_info["trainer_cls"] + + # The decorator returns the dict, but BaseTrainer is the original class + assert trainer_cls.__name__ == "BaseTrainer" + assert issubclass(trainer_cls, Trainer) + assert trainer_info["args_cls"] == TrainingArguments + + +class TestSFTTrainerModule: + """Test suite for SFTTrainerModule class.""" + + def test_sft_trainer_registered(self): + """Test that SFTTrainerModule is registered in the registry.""" + trainer_list = registry.list_trainer_modules() + assert "sft" in trainer_list + + def test_sft_trainer_info_structure(self): + """Test that SFTTrainerModule registration has correct structure.""" + trainer_info = registry.get_trainer_module("sft") + + assert isinstance(trainer_info, dict) + assert "trainer_cls" in trainer_info + assert "args_cls" in trainer_info + assert "required_kwargs" in trainer_info + + def test_sft_trainer_class(self): + """Test that SFTTrainerModule class is correct.""" + from trl import SFTConfig, SFTTrainer + + trainer_info = registry.get_trainer_module("sft") + trainer_cls = trainer_info["trainer_cls"] + + assert trainer_cls == SFTTrainerModule["trainer_cls"] + assert issubclass(trainer_cls, SFTTrainer) + assert trainer_info["args_cls"] == SFTConfig + + def test_sft_trainer_required_kwargs(self): + """Test that SFTTrainerModule has peft_config in required_kwargs.""" + trainer_info = registry.get_trainer_module("sft") + + assert "peft_config" in trainer_info["required_kwargs"] + assert callable(trainer_info["required_kwargs"]["peft_config"]) + + +class TestGetDefaultPeftConfig: + """Test suite for get_default_peft_config function.""" + + def test_returns_lora_config(self): + """Test that get_default_peft_config returns a LoraConfig instance.""" + peft_config = get_default_peft_config() + assert isinstance(peft_config, LoraConfig) + + def test_has_correct_defaults(self): + """Test that the returned config has the expected default values.""" + peft_config = get_default_peft_config() + + assert peft_config.r == 8 + assert peft_config.lora_alpha == 16 + assert peft_config.lora_dropout == 0.1 + # target_modules might be a set or list depending on peft version + target_modules = peft_config.target_modules + if isinstance(target_modules, set): + assert target_modules == {"q_proj", "v_proj"} + else: + assert set(target_modules) == {"q_proj", "v_proj"} + assert peft_config.bias == "none" + assert peft_config.task_type == "CAUSAL_LM" + + def test_is_callable(self): + """Test that get_default_peft_config is callable.""" + assert callable(get_default_peft_config) + + def test_creates_new_instance_each_call(self): + """Test that each call creates a new config instance.""" + config1 = get_default_peft_config() + config2 = get_default_peft_config() + + # They should be different instances + assert config1 is not config2 + # But have the same values + assert config1.r == config2.r + assert config1.lora_alpha == config2.lora_alpha + + +class TestPeftConfigConversion: + """Test suite for PeftConfig conversion methods.""" + + def test_to_lora_config(self): + """Test that PeftConfig.to_peft_config() correctly converts to LoraConfig.""" + peft_config_params = PeftConfig( + lora_r=16, + lora_alpha=32, + lora_dropout=0.05, + target_modules=["q_proj", "k_proj", "v_proj"], + bias="all", + task_type="CAUSAL_LM", + peft_type="LORA", + ) + + lora_config = peft_config_params.to_peft_config() + + assert isinstance(lora_config, LoraConfig) + assert lora_config.r == 16 + assert lora_config.lora_alpha == 32 + assert lora_config.lora_dropout == 0.05 + # target_modules might be converted to set + target_modules = lora_config.target_modules + if isinstance(target_modules, set): + assert target_modules == {"q_proj", "k_proj", "v_proj"} + else: + assert set(target_modules) == {"q_proj", "k_proj", "v_proj"} + assert lora_config.bias == "all" + assert lora_config.task_type == "CAUSAL_LM" + + def test_to_ia3_config(self): + """Test that PeftConfig.to_peft_config() correctly converts to IA3Config.""" + peft_config_params = PeftConfig( + target_modules=["q_proj", "v_proj"], + task_type="CAUSAL_LM", + peft_type="IA3", + ) + + ia3_config = peft_config_params.to_peft_config() + + assert isinstance(ia3_config, IA3Config) + # target_modules might be converted to set + target_modules = ia3_config.target_modules + if isinstance(target_modules, set): + assert target_modules == {"q_proj", "v_proj"} + else: + assert set(target_modules) == {"q_proj", "v_proj"} + assert ia3_config.task_type == "CAUSAL_LM" + + def test_unsupported_type_raises_error(self): + """Test that unsupported peft_type raises ValueError.""" + peft_config_params = PeftConfig(peft_type="UNSUPPORTED") + + with pytest.raises(ValueError) as exc_info: + peft_config_params.to_peft_config() + + assert "Unsupported peft_type: UNSUPPORTED" in str(exc_info.value) + assert "Supported types: 'LORA', 'IA3'" in str(exc_info.value) + + +class TestPeftConfigDefaults: + """Test suite for PeftConfig default values.""" + + def test_default_values(self): + """Test that PeftConfig has correct default values.""" + peft_config = PeftConfig() + + assert peft_config.lora_r == 8 + assert peft_config.lora_alpha == 16 + assert peft_config.lora_dropout == 0.1 + assert peft_config.target_modules == ["q_proj", "v_proj"] + assert peft_config.bias == "none" + assert peft_config.task_type == "CAUSAL_LM" + assert peft_config.peft_type == "LORA" + + def test_custom_values(self): + """Test that PeftConfig accepts custom values.""" + peft_config = PeftConfig( + lora_r=32, + lora_alpha=64, + lora_dropout=0.2, + target_modules=["q_proj", "k_proj", "v_proj", "o_proj"], + bias="all", + task_type="SEQ_2_SEQ_LM", + peft_type="LORA", + ) + + assert peft_config.lora_r == 32 + assert peft_config.lora_alpha == 64 + assert peft_config.lora_dropout == 0.2 + assert peft_config.target_modules == ["q_proj", "k_proj", "v_proj", "o_proj"] + assert peft_config.bias == "all" + assert peft_config.task_type == "SEQ_2_SEQ_LM" + assert peft_config.peft_type == "LORA" + + +class TestTrainerRegistry: + """Test suite for trainer registration in the component registry.""" + + def test_both_trainers_registered(self): + """Test that both base and sft trainers are registered.""" + trainer_list = registry.list_trainer_modules() + + assert "base" in trainer_list + assert "sft" in trainer_list + assert len(trainer_list) >= 2 + + def test_registry_returns_dict(self): + """Test that registry returns dict for trainer modules.""" + base_info = registry.get_trainer_module("base") + sft_info = registry.get_trainer_module("sft") + + assert isinstance(base_info, dict) + assert isinstance(sft_info, dict) + + def test_trainer_classes_correct(self): + """Test that trainer classes are correctly stored.""" + base_info = registry.get_trainer_module("base") + sft_info = registry.get_trainer_module("sft") + assert base_info["trainer_cls"] == BaseTrainer["trainer_cls"] + assert sft_info["trainer_cls"] == SFTTrainerModule["trainer_cls"] From cc62a782a9fa8635b734ef6faac95040e5b74f3c Mon Sep 17 00:00:00 2001 From: Dhiraj Kumar Sah Date: Tue, 9 Dec 2025 13:51:43 +0000 Subject: [PATCH 31/60] Minor changes to the trainer class registration was done. Added tests in test_trainer which also check for model integration with trainer class created. Some of the tests can later be moved to end to end integration tests. Signed-off-by: Dhiraj Kumar Sah --- .../experimental/core/trainer/sft_trainer.py | 5 - .../experimental/tests/test_trainer.py | 467 ++++++++++++------ 2 files changed, 326 insertions(+), 146 deletions(-) diff --git a/QEfficient/finetune/experimental/core/trainer/sft_trainer.py b/QEfficient/finetune/experimental/core/trainer/sft_trainer.py index f85da2643..3223c5966 100644 --- a/QEfficient/finetune/experimental/core/trainer/sft_trainer.py +++ b/QEfficient/finetune/experimental/core/trainer/sft_trainer.py @@ -9,11 +9,6 @@ from QEfficient.finetune.experimental.core.component_registry import registry from QEfficient.finetune.experimental.core.config_manager import PeftConfig -# def get_default_peft_config(): -# """Get default peft config from config_manager and convert to peft library config.""" -# peft_config_params = PeftConfig() -# return peft_config_params.to_peft_config() - @registry.trainer_module(name="sft", args_cls=SFTConfig, required_kwargs={"peft_config": PeftConfig}) class SFTTrainerModule(SFTTrainer): diff --git a/QEfficient/finetune/experimental/tests/test_trainer.py b/QEfficient/finetune/experimental/tests/test_trainer.py index d82a7a339..dd855433b 100644 --- a/QEfficient/finetune/experimental/tests/test_trainer.py +++ b/QEfficient/finetune/experimental/tests/test_trainer.py @@ -5,17 +5,28 @@ # # ----------------------------------------------------------------------------- +import os +import shutil + import pytest -from peft import IA3Config, LoraConfig +import torch +from datasets import Dataset +from peft import LoraConfig +from transformers import Trainer, TrainingArguments +from trl import SFTConfig, SFTTrainer from QEfficient.finetune.experimental.core.component_registry import registry -from QEfficient.finetune.experimental.core.config_manager import PeftConfig +from QEfficient.finetune.experimental.core.model import HFModel from QEfficient.finetune.experimental.core.trainer.base_trainer import BaseTrainer from QEfficient.finetune.experimental.core.trainer.sft_trainer import ( SFTTrainerModule, - get_default_peft_config, ) +LORA_R = 8 +LORA_ALPHA = 16 +LORA_DROPOUT = 0.1 +MAX_LENGTH = 128 + class TestBaseTrainer: """Test suite for BaseTrainer class.""" @@ -36,7 +47,6 @@ def test_base_trainer_info_structure(self): def test_base_trainer_class(self): """Test that BaseTrainer class is correct.""" - from transformers import Trainer, TrainingArguments trainer_info = registry.get_trainer_module("base") trainer_cls = trainer_info["trainer_cls"] @@ -66,7 +76,6 @@ def test_sft_trainer_info_structure(self): def test_sft_trainer_class(self): """Test that SFTTrainerModule class is correct.""" - from trl import SFTConfig, SFTTrainer trainer_info = registry.get_trainer_module("sft") trainer_cls = trainer_info["trainer_cls"] @@ -83,142 +92,6 @@ def test_sft_trainer_required_kwargs(self): assert callable(trainer_info["required_kwargs"]["peft_config"]) -class TestGetDefaultPeftConfig: - """Test suite for get_default_peft_config function.""" - - def test_returns_lora_config(self): - """Test that get_default_peft_config returns a LoraConfig instance.""" - peft_config = get_default_peft_config() - assert isinstance(peft_config, LoraConfig) - - def test_has_correct_defaults(self): - """Test that the returned config has the expected default values.""" - peft_config = get_default_peft_config() - - assert peft_config.r == 8 - assert peft_config.lora_alpha == 16 - assert peft_config.lora_dropout == 0.1 - # target_modules might be a set or list depending on peft version - target_modules = peft_config.target_modules - if isinstance(target_modules, set): - assert target_modules == {"q_proj", "v_proj"} - else: - assert set(target_modules) == {"q_proj", "v_proj"} - assert peft_config.bias == "none" - assert peft_config.task_type == "CAUSAL_LM" - - def test_is_callable(self): - """Test that get_default_peft_config is callable.""" - assert callable(get_default_peft_config) - - def test_creates_new_instance_each_call(self): - """Test that each call creates a new config instance.""" - config1 = get_default_peft_config() - config2 = get_default_peft_config() - - # They should be different instances - assert config1 is not config2 - # But have the same values - assert config1.r == config2.r - assert config1.lora_alpha == config2.lora_alpha - - -class TestPeftConfigConversion: - """Test suite for PeftConfig conversion methods.""" - - def test_to_lora_config(self): - """Test that PeftConfig.to_peft_config() correctly converts to LoraConfig.""" - peft_config_params = PeftConfig( - lora_r=16, - lora_alpha=32, - lora_dropout=0.05, - target_modules=["q_proj", "k_proj", "v_proj"], - bias="all", - task_type="CAUSAL_LM", - peft_type="LORA", - ) - - lora_config = peft_config_params.to_peft_config() - - assert isinstance(lora_config, LoraConfig) - assert lora_config.r == 16 - assert lora_config.lora_alpha == 32 - assert lora_config.lora_dropout == 0.05 - # target_modules might be converted to set - target_modules = lora_config.target_modules - if isinstance(target_modules, set): - assert target_modules == {"q_proj", "k_proj", "v_proj"} - else: - assert set(target_modules) == {"q_proj", "k_proj", "v_proj"} - assert lora_config.bias == "all" - assert lora_config.task_type == "CAUSAL_LM" - - def test_to_ia3_config(self): - """Test that PeftConfig.to_peft_config() correctly converts to IA3Config.""" - peft_config_params = PeftConfig( - target_modules=["q_proj", "v_proj"], - task_type="CAUSAL_LM", - peft_type="IA3", - ) - - ia3_config = peft_config_params.to_peft_config() - - assert isinstance(ia3_config, IA3Config) - # target_modules might be converted to set - target_modules = ia3_config.target_modules - if isinstance(target_modules, set): - assert target_modules == {"q_proj", "v_proj"} - else: - assert set(target_modules) == {"q_proj", "v_proj"} - assert ia3_config.task_type == "CAUSAL_LM" - - def test_unsupported_type_raises_error(self): - """Test that unsupported peft_type raises ValueError.""" - peft_config_params = PeftConfig(peft_type="UNSUPPORTED") - - with pytest.raises(ValueError) as exc_info: - peft_config_params.to_peft_config() - - assert "Unsupported peft_type: UNSUPPORTED" in str(exc_info.value) - assert "Supported types: 'LORA', 'IA3'" in str(exc_info.value) - - -class TestPeftConfigDefaults: - """Test suite for PeftConfig default values.""" - - def test_default_values(self): - """Test that PeftConfig has correct default values.""" - peft_config = PeftConfig() - - assert peft_config.lora_r == 8 - assert peft_config.lora_alpha == 16 - assert peft_config.lora_dropout == 0.1 - assert peft_config.target_modules == ["q_proj", "v_proj"] - assert peft_config.bias == "none" - assert peft_config.task_type == "CAUSAL_LM" - assert peft_config.peft_type == "LORA" - - def test_custom_values(self): - """Test that PeftConfig accepts custom values.""" - peft_config = PeftConfig( - lora_r=32, - lora_alpha=64, - lora_dropout=0.2, - target_modules=["q_proj", "k_proj", "v_proj", "o_proj"], - bias="all", - task_type="SEQ_2_SEQ_LM", - peft_type="LORA", - ) - - assert peft_config.lora_r == 32 - assert peft_config.lora_alpha == 64 - assert peft_config.lora_dropout == 0.2 - assert peft_config.target_modules == ["q_proj", "k_proj", "v_proj", "o_proj"] - assert peft_config.bias == "all" - assert peft_config.task_type == "SEQ_2_SEQ_LM" - assert peft_config.peft_type == "LORA" - - class TestTrainerRegistry: """Test suite for trainer registration in the component registry.""" @@ -244,3 +117,315 @@ def test_trainer_classes_correct(self): sft_info = registry.get_trainer_module("sft") assert base_info["trainer_cls"] == BaseTrainer["trainer_cls"] assert sft_info["trainer_cls"] == SFTTrainerModule["trainer_cls"] + + +class TestSFTTrainerWithModel: + """Test suite for SFTTrainer integration with model loading.""" + + @pytest.fixture(autouse=True) + def cleanup_output_dirs(self): + """Fixture to clean up test output directories after each test.""" + # Setup: yield control to the test + yield + + # Teardown: clean up output directories + output_dirs = ["./test_output", "./test_output_peft"] + for output_dir in output_dirs: + if os.path.exists(output_dir): + try: + shutil.rmtree(output_dir) + print(f"\nCleaned up: {output_dir}") + except Exception as e: + print(f"\nWarning: Failed to clean up {output_dir}: {e}") + + @pytest.fixture + def model_config(self): + """Fixture for basic model configuration.""" + return { + "model_name": "HuggingFaceTB/SmolLM-135M", + "auto_class_name": "AutoModelForCausalLM", + "use_cache": False, + "torch_dtype": "float32", + "attn_implementation": "eager", + "device_map": None, + "use_peft": False, + } + + @pytest.fixture + def peft_model_config(self): + """Fixture for model configuration with PEFT.""" + return { + "model_name": "HuggingFaceTB/SmolLM-135M", + "auto_class_name": "AutoModelForCausalLM", + "use_cache": False, + "torch_dtype": "float32", + "attn_implementation": "eager", + "device_map": None, + "use_peft": True, + "peft_config": { + "lora_r": LORA_R, + "lora_alpha": LORA_ALPHA, + "lora_dropout": LORA_DROPOUT, + "target_modules": ["q_proj", "v_proj"], + "bias": "none", + }, + } + + @pytest.fixture + def dummy_dataset(self): + """Fixture for creating a dummy dataset.""" + + data = { + "text": [ + "This is a test sentence for training.", + "Another example text for the model.", + "Third sample to ensure proper batching.", + ] + } + return Dataset.from_dict(data) + + def test_hf_model_initialization(self, model_config): + """Test that HFModel can be initialized properly.""" + + model = HFModel(**model_config) + assert model is not None + assert model.model_name == model_config["model_name"] + assert model.auto_class_name == model_config["auto_class_name"] + + def test_hf_model_load_model(self, model_config): + """Test that HFModel can load the underlying model.""" + + model = HFModel(**model_config) + loaded_model = model.load_model() + + assert loaded_model is not None + assert hasattr(loaded_model, "forward") + assert hasattr(loaded_model, "config") + + def test_hf_model_load_tokenizer(self, model_config): + """Test that HFModel can load the tokenizer.""" + + model = HFModel(**model_config) + tokenizer = model.load_tokenizer() + + assert tokenizer is not None + assert hasattr(tokenizer, "encode") + assert hasattr(tokenizer, "decode") + assert tokenizer.pad_token is not None + + def test_hf_model_with_peft_config(self, peft_model_config): + """Test that HFModel can be initialized with PEFT configuration.""" + + model = HFModel(**peft_model_config) + assert model.use_peft is True + assert model.lora_config is not None + assert model.lora_config.r == LORA_R + assert model.lora_config.lora_alpha == LORA_ALPHA + + def test_model_forward_pass(self, model_config): + """Test that the loaded model can perform a forward pass.""" + + model = HFModel(**model_config) + loaded_model = model.load_model() + tokenizer = model.load_tokenizer() + + # Prepare input + text = "This is a test." + inputs = tokenizer(text, return_tensors="pt") + + # Perform forward pass + with torch.no_grad(): + outputs = loaded_model(**inputs) + + assert outputs is not None + assert hasattr(outputs, "logits") + assert outputs.logits.shape[0] == 1 # batch size + + def test_sft_trainer_instantiation_with_model(self, model_config, dummy_dataset): + """Test that SFTTrainer can be instantiated with a loaded model.""" + + # Load model and tokenizer + hf_model = HFModel(**model_config) + model = hf_model.load_model() + tokenizer = hf_model.load_tokenizer() + + # Create SFT config + sft_config = SFTConfig( + output_dir="./test_output", + max_length=MAX_LENGTH, + per_device_train_batch_size=1, + num_train_epochs=1, + logging_steps=1, + save_strategy="no", + bf16=False, + fp16=False, + ) + + # Get SFTTrainer from registry + trainer_info = registry.get_trainer_module("sft") + trainer_cls = trainer_info["trainer_cls"] + + # Instantiate trainer + trainer = trainer_cls( + model=model, + args=sft_config, + train_dataset=dummy_dataset, + processing_class=tokenizer, + ) + + assert trainer is not None + assert trainer.model is not None + assert trainer.tokenizer is not None + + def test_sft_trainer_with_peft_model(self, peft_model_config, dummy_dataset): + """Test that SFTTrainer works with PEFT-enabled models.""" + + # Load model and tokenizer + hf_model = HFModel(**peft_model_config) + model = hf_model.load_model() + tokenizer = hf_model.load_tokenizer() + + # Get PEFT config + peft_config = hf_model.load_peft_config() + assert peft_config is not None + assert isinstance(peft_config, LoraConfig) + + # Create SFT config + sft_config = SFTConfig( + output_dir="./test_output_peft", + max_length=MAX_LENGTH, + per_device_train_batch_size=1, + num_train_epochs=1, + logging_steps=1, + save_strategy="no", + bf16=False, + fp16=False, + ) + + # Get SFTTrainer from registry + trainer_info = registry.get_trainer_module("sft") + trainer_cls = trainer_info["trainer_cls"] + + # Instantiate trainer with PEFT config + trainer = trainer_cls( + model=model, + args=sft_config, + train_dataset=dummy_dataset, + processing_class=tokenizer, + peft_config=peft_config, + ) + + assert trainer is not None + assert trainer.model is not None + + def test_model_training_mode(self, model_config): + """Test that model can be set to training and evaluation modes.""" + + model = HFModel(**model_config) + loaded_model = model.load_model() + + # Test training mode + loaded_model.train() + assert loaded_model.training is True + + # Test evaluation mode + loaded_model.eval() + assert loaded_model.training is False + + def test_model_parameters_accessible(self, model_config): + """Test that model parameters are accessible.""" + + model = HFModel(**model_config) + loaded_model = model.load_model() + + # Check parameters + params = list(loaded_model.parameters()) + assert len(params) > 0 + + # Check named parameters + named_params = dict(loaded_model.named_parameters()) + assert len(named_params) > 0 + + def test_tokenizer_encoding_decoding(self, model_config): + """Test that tokenizer can encode and decode text properly.""" + + model = HFModel(**model_config) + tokenizer = model.load_tokenizer() + + # Test encoding + text = "Hello, world!" + encoded = tokenizer.encode(text) + assert len(encoded) > 0 + + # Test decoding + decoded = tokenizer.decode(encoded) + assert isinstance(decoded, str) + assert len(decoded) > 0 + + def test_model_device_placement(self, model_config): + """Test that model can be moved to different devices.""" + + model = HFModel(**model_config) + loaded_model = model.load_model() + + # Test CPU placement + loaded_model = loaded_model.to("cpu") + assert next(loaded_model.parameters()).device.type == "cpu" + + def test_sft_trainer_train_dataset_required(self, model_config): + """Test that SFTTrainer requires a training dataset.""" + + # Load model and tokenizer + hf_model = HFModel(**model_config) + model = hf_model.load_model() + tokenizer = hf_model.load_tokenizer() + + # Create SFT config + sft_config = SFTConfig( + output_dir="./test_output", + max_length=MAX_LENGTH, + per_device_train_batch_size=1, + num_train_epochs=1, + bf16=False, + fp16=False, + ) + + # Get SFTTrainer from registry + trainer_info = registry.get_trainer_module("sft") + trainer_cls = trainer_info["trainer_cls"] + + # Attempt to instantiate without dataset should raise TypeError + with pytest.raises(TypeError, match="'NoneType' object is not iterable"): + trainer_cls( + model=model, + args=sft_config, + processing_class=tokenizer, + ) + + def test_peft_model_trainable_parameters(self, peft_model_config): + """Test that PEFT model has correct trainable vs total parameters ratio.""" + + # Load model with PEFT configuration + hf_model = HFModel(**peft_model_config) + model = hf_model.load_model() + + # Count total parameters + total_params = sum(p.numel() for p in model.parameters()) + + # Count trainable parameters + trainable_params = sum(p.numel() for p in model.parameters() if p.requires_grad) + + # Count frozen parameters + frozen_params = total_params - trainable_params + print(f"Total Parameters: {total_params:,}") + print(f"Trainable Parameters: {trainable_params:,}") + print(f"Frozen Parameters: {frozen_params:,}") + # Assertions to ensure PEFT is properly configured + assert total_params > 0, "Model should have parameters" + assert trainable_params > 0, "Model should have trainable parameters (PEFT adapters)" + assert frozen_params > 0, "Model should have frozen parameters (base model)" + assert trainable_params < total_params, "Trainable parameters should be less than total (PEFT efficiency)" + # Print info for debugging (will show in pytest output with -v flag) + print(f"\nTotal parameters: {total_params:,}") + print(f"Trainable parameters: {trainable_params:,}") + print(f"Frozen parameters: {frozen_params:,}") From 613865962ea366c39d3a6df8e56e6f126a735e92 Mon Sep 17 00:00:00 2001 From: Dhiraj Kumar Sah Date: Thu, 18 Dec 2025 09:05:34 +0000 Subject: [PATCH 32/60] Addressed comments. Added the modification to test on custom num_layers for a model. Signed-off-by: Dhiraj Kumar Sah --- .../experimental/tests/test_trainer.py | 126 +----------------- 1 file changed, 4 insertions(+), 122 deletions(-) diff --git a/QEfficient/finetune/experimental/tests/test_trainer.py b/QEfficient/finetune/experimental/tests/test_trainer.py index dd855433b..b22d02bfc 100644 --- a/QEfficient/finetune/experimental/tests/test_trainer.py +++ b/QEfficient/finetune/experimental/tests/test_trainer.py @@ -145,10 +145,11 @@ def model_config(self): "model_name": "HuggingFaceTB/SmolLM-135M", "auto_class_name": "AutoModelForCausalLM", "use_cache": False, - "torch_dtype": "float32", + "torch_dtype": "float16", "attn_implementation": "eager", "device_map": None, "use_peft": False, + "model_config_kwargs": {"num_hidden_layers": 1}, } @pytest.fixture @@ -158,10 +159,11 @@ def peft_model_config(self): "model_name": "HuggingFaceTB/SmolLM-135M", "auto_class_name": "AutoModelForCausalLM", "use_cache": False, - "torch_dtype": "float32", + "torch_dtype": "float16", "attn_implementation": "eager", "device_map": None, "use_peft": True, + "model_config_kwargs": {"num_hidden_layers": 1}, "peft_config": { "lora_r": LORA_R, "lora_alpha": LORA_ALPHA, @@ -184,44 +186,6 @@ def dummy_dataset(self): } return Dataset.from_dict(data) - def test_hf_model_initialization(self, model_config): - """Test that HFModel can be initialized properly.""" - - model = HFModel(**model_config) - assert model is not None - assert model.model_name == model_config["model_name"] - assert model.auto_class_name == model_config["auto_class_name"] - - def test_hf_model_load_model(self, model_config): - """Test that HFModel can load the underlying model.""" - - model = HFModel(**model_config) - loaded_model = model.load_model() - - assert loaded_model is not None - assert hasattr(loaded_model, "forward") - assert hasattr(loaded_model, "config") - - def test_hf_model_load_tokenizer(self, model_config): - """Test that HFModel can load the tokenizer.""" - - model = HFModel(**model_config) - tokenizer = model.load_tokenizer() - - assert tokenizer is not None - assert hasattr(tokenizer, "encode") - assert hasattr(tokenizer, "decode") - assert tokenizer.pad_token is not None - - def test_hf_model_with_peft_config(self, peft_model_config): - """Test that HFModel can be initialized with PEFT configuration.""" - - model = HFModel(**peft_model_config) - assert model.use_peft is True - assert model.lora_config is not None - assert model.lora_config.r == LORA_R - assert model.lora_config.lora_alpha == LORA_ALPHA - def test_model_forward_pass(self, model_config): """Test that the loaded model can perform a forward pass.""" @@ -318,60 +282,6 @@ def test_sft_trainer_with_peft_model(self, peft_model_config, dummy_dataset): assert trainer is not None assert trainer.model is not None - def test_model_training_mode(self, model_config): - """Test that model can be set to training and evaluation modes.""" - - model = HFModel(**model_config) - loaded_model = model.load_model() - - # Test training mode - loaded_model.train() - assert loaded_model.training is True - - # Test evaluation mode - loaded_model.eval() - assert loaded_model.training is False - - def test_model_parameters_accessible(self, model_config): - """Test that model parameters are accessible.""" - - model = HFModel(**model_config) - loaded_model = model.load_model() - - # Check parameters - params = list(loaded_model.parameters()) - assert len(params) > 0 - - # Check named parameters - named_params = dict(loaded_model.named_parameters()) - assert len(named_params) > 0 - - def test_tokenizer_encoding_decoding(self, model_config): - """Test that tokenizer can encode and decode text properly.""" - - model = HFModel(**model_config) - tokenizer = model.load_tokenizer() - - # Test encoding - text = "Hello, world!" - encoded = tokenizer.encode(text) - assert len(encoded) > 0 - - # Test decoding - decoded = tokenizer.decode(encoded) - assert isinstance(decoded, str) - assert len(decoded) > 0 - - def test_model_device_placement(self, model_config): - """Test that model can be moved to different devices.""" - - model = HFModel(**model_config) - loaded_model = model.load_model() - - # Test CPU placement - loaded_model = loaded_model.to("cpu") - assert next(loaded_model.parameters()).device.type == "cpu" - def test_sft_trainer_train_dataset_required(self, model_config): """Test that SFTTrainer requires a training dataset.""" @@ -401,31 +311,3 @@ def test_sft_trainer_train_dataset_required(self, model_config): args=sft_config, processing_class=tokenizer, ) - - def test_peft_model_trainable_parameters(self, peft_model_config): - """Test that PEFT model has correct trainable vs total parameters ratio.""" - - # Load model with PEFT configuration - hf_model = HFModel(**peft_model_config) - model = hf_model.load_model() - - # Count total parameters - total_params = sum(p.numel() for p in model.parameters()) - - # Count trainable parameters - trainable_params = sum(p.numel() for p in model.parameters() if p.requires_grad) - - # Count frozen parameters - frozen_params = total_params - trainable_params - print(f"Total Parameters: {total_params:,}") - print(f"Trainable Parameters: {trainable_params:,}") - print(f"Frozen Parameters: {frozen_params:,}") - # Assertions to ensure PEFT is properly configured - assert total_params > 0, "Model should have parameters" - assert trainable_params > 0, "Model should have trainable parameters (PEFT adapters)" - assert frozen_params > 0, "Model should have frozen parameters (base model)" - assert trainable_params < total_params, "Trainable parameters should be less than total (PEFT efficiency)" - # Print info for debugging (will show in pytest output with -v flag) - print(f"\nTotal parameters: {total_params:,}") - print(f"Trainable parameters: {trainable_params:,}") - print(f"Frozen parameters: {frozen_params:,}") From cafb00c6ef28aaeb37314e08d7d46a87e35f0272 Mon Sep 17 00:00:00 2001 From: Dhiraj Kumar Sah Date: Fri, 2 Jan 2026 11:49:25 +0000 Subject: [PATCH 33/60] Rebased to update the branch with mainline. Addressed comments. Added a check for validation of a PEFT Model being setup properly. Modified BaseTrainer class to take peft_config and initialize the PEFT model as done by default in SFTTrainer. Updated test_trainer to also check for the above mentioned changes. In summary, this PR contains the setup for BaseTrainer and SFTTrainerModule along with tests to anbale usage of Trainers for PEFT Finetuning. Signed-off-by: Dhiraj Kumar Sah --- .../experimental/core/trainer/base_trainer.py | 69 ++++- .../experimental/tests/test_trainer.py | 258 +++++++++++++++--- 2 files changed, 286 insertions(+), 41 deletions(-) diff --git a/QEfficient/finetune/experimental/core/trainer/base_trainer.py b/QEfficient/finetune/experimental/core/trainer/base_trainer.py index 6d4cc5140..0a3c50f7f 100644 --- a/QEfficient/finetune/experimental/core/trainer/base_trainer.py +++ b/QEfficient/finetune/experimental/core/trainer/base_trainer.py @@ -4,11 +4,76 @@ # SPDX-License-Identifier: BSD-3-Clause # # ----------------------------------------------------------------------------- +from typing import Optional + +from peft import get_peft_model from transformers import Trainer, TrainingArguments from QEfficient.finetune.experimental.core.component_registry import registry +from QEfficient.finetune.experimental.core.config_manager import PeftConfig -@registry.trainer_module(name="base", args_cls=TrainingArguments, required_kwargs={}) +@registry.trainer_module(name="base", args_cls=TrainingArguments, required_kwargs={"peft_config": PeftConfig}) class BaseTrainer(Trainer): - pass # Just using the standard Trainer + """ + Extended Trainer class that supports PEFT (Parameter-Efficient Fine-Tuning). + + This trainer extends the standard HuggingFace Trainer to optionally apply + PEFT configurations to the model before training. + """ + + def __init__( + self, + model=None, + args=None, + data_collator=None, + train_dataset=None, + eval_dataset=None, + processing_class=None, + model_init=None, + compute_metrics=None, + callbacks=None, + optimizers=(None, None), + preprocess_logits_for_metrics=None, + peft_config: Optional[PeftConfig] = None, + **kwargs, + ): + """ + Initialize the BaseTrainer with optional PEFT support. + + Args: + model: The model to train + args: Training arguments + data_collator: Data collator for batching + train_dataset: Training dataset + eval_dataset: Evaluation dataset + processing_class: Tokenizer or processor + model_init: Function to initialize model + compute_metrics: Function to compute metrics + callbacks: List of callbacks + optimizers: Tuple of (optimizer, scheduler) + preprocess_logits_for_metrics: Function to preprocess logits + peft_config: Optional PEFT configuration. If provided, the model will be + wrapped with PEFT before training. + **kwargs: Additional keyword arguments + """ + # Apply PEFT to model if peft_config is provided + if peft_config is not None and model is not None: + model = get_peft_model(model, peft_config) + model.print_trainable_parameters() + + # Initialize the parent Trainer class + super().__init__( + model=model, + args=args, + data_collator=data_collator, + train_dataset=train_dataset, + eval_dataset=eval_dataset, + processing_class=processing_class, + model_init=model_init, + compute_metrics=compute_metrics, + callbacks=callbacks, + optimizers=optimizers, + preprocess_logits_for_metrics=preprocess_logits_for_metrics, + **kwargs, + ) diff --git a/QEfficient/finetune/experimental/tests/test_trainer.py b/QEfficient/finetune/experimental/tests/test_trainer.py index b22d02bfc..20af61e36 100644 --- a/QEfficient/finetune/experimental/tests/test_trainer.py +++ b/QEfficient/finetune/experimental/tests/test_trainer.py @@ -11,12 +11,12 @@ import pytest import torch from datasets import Dataset -from peft import LoraConfig +from peft import LoraConfig, PeftModel from transformers import Trainer, TrainingArguments from trl import SFTConfig, SFTTrainer -from QEfficient.finetune.experimental.core.component_registry import registry -from QEfficient.finetune.experimental.core.model import HFModel +from QEfficient.finetune.experimental.core.component_registry import ComponentFactory, registry +from QEfficient.finetune.experimental.core.model import HFModel # noqa: F401 - needed for registration from QEfficient.finetune.experimental.core.trainer.base_trainer import BaseTrainer from QEfficient.finetune.experimental.core.trainer.sft_trainer import ( SFTTrainerModule, @@ -56,6 +56,13 @@ def test_base_trainer_class(self): assert issubclass(trainer_cls, Trainer) assert trainer_info["args_cls"] == TrainingArguments + def test_base_trainer_required_kwargs(self): + """Test that BaseTrainer has peft_config in required_kwargs.""" + trainer_info = registry.get_trainer_module("base") + + assert "peft_config" in trainer_info["required_kwargs"] + assert callable(trainer_info["required_kwargs"]["peft_config"]) + class TestSFTTrainerModule: """Test suite for SFTTrainerModule class.""" @@ -119,8 +126,8 @@ def test_trainer_classes_correct(self): assert sft_info["trainer_cls"] == SFTTrainerModule["trainer_cls"] -class TestSFTTrainerWithModel: - """Test suite for SFTTrainer integration with model loading.""" +class TestBaseTrainerWithModel: + """Test suite for BaseTrainer integration with model loading and PEFT.""" @pytest.fixture(autouse=True) def cleanup_output_dirs(self): @@ -129,7 +136,7 @@ def cleanup_output_dirs(self): yield # Teardown: clean up output directories - output_dirs = ["./test_output", "./test_output_peft"] + output_dirs = ["./test_output", "./test_output_peft", "./test_output_base", "./test_output_base_peft"] for output_dir in output_dirs: if os.path.exists(output_dir): try: @@ -148,13 +155,182 @@ def model_config(self): "torch_dtype": "float16", "attn_implementation": "eager", "device_map": None, - "use_peft": False, - "model_config_kwargs": {"num_hidden_layers": 1}, + "num_hidden_layers": 1, } @pytest.fixture def peft_model_config(self): - """Fixture for model configuration with PEFT.""" + """Fixture for PEFT configuration.""" + return { + "r": LORA_R, + "lora_alpha": LORA_ALPHA, + "lora_dropout": LORA_DROPOUT, + "target_modules": ["q_proj", "v_proj"], + "bias": "none", + } + + @pytest.fixture + def dummy_dataset(self): + """Fixture for creating a dummy dataset.""" + data = { + "text": [ + "This is a test sentence for training.", + "Another example text for the model.", + "Third sample to ensure proper batching.", + ] + } + return Dataset.from_dict(data) + + def test_base_trainer_instantiation_with_model(self, model_config, dummy_dataset): + """Test that BaseTrainer can be instantiated with a loaded model.""" + # Load model and tokenizer + model_name = model_config.pop("model_name") + hf_model = ComponentFactory.create_model("hf", model_name, **model_config) + model = hf_model.model + tokenizer = hf_model.tokenizer + + # Create training config + training_args = TrainingArguments( + output_dir="./test_output_base", + per_device_train_batch_size=1, + num_train_epochs=1, + logging_steps=1, + save_strategy="no", + bf16=False, + fp16=True, + ) + + # Get BaseTrainer from registry + trainer_info = registry.get_trainer_module("base") + trainer_cls = trainer_info["trainer_cls"] + + # Instantiate trainer without PEFT + trainer = trainer_cls( + model=model, + args=training_args, + train_dataset=dummy_dataset, + processing_class=tokenizer, + ) + + assert trainer is not None + assert trainer.model is not None + assert trainer.processing_class is not None + + def test_base_trainer_with_peft_model(self, model_config, peft_model_config, dummy_dataset): + """Test that BaseTrainer works with PEFT-enabled models.""" + # Load model and tokenizer + model_name = model_config.pop("model_name") + hf_model = ComponentFactory.create_model("hf", model_name, **model_config) + model = hf_model.model + tokenizer = hf_model.tokenizer + + # Load PEFT Config + peft_config = LoraConfig(**peft_model_config) + + # Create training config + training_args = TrainingArguments( + output_dir="./test_output_base_peft", + per_device_train_batch_size=1, + num_train_epochs=1, + logging_steps=1, + save_strategy="no", + bf16=False, + fp16=True, + ) + + # Get BaseTrainer from registry + trainer_info = registry.get_trainer_module("base") + trainer_cls = trainer_info["trainer_cls"] + + # Instantiate trainer with PEFT config + trainer = trainer_cls( + model=model, + args=training_args, + train_dataset=dummy_dataset, + processing_class=tokenizer, + peft_config=peft_config, + ) + + assert trainer is not None + assert trainer.model is not None + + # Verify that the model is now a PEFT model + assert isinstance(trainer.model, PeftModel), "Model should be wrapped as a PeftModel" + + # Verify that the model has the expected PEFT config + assert hasattr(trainer.model, "peft_config"), "Model should have peft_config attribute" + assert trainer.model.peft_config is not None, "PEFT config should not be None" + + # Verify trainable parameters are reduced (PEFT should make only a subset trainable) + trainable_params = sum(p.numel() for p in trainer.model.parameters() if p.requires_grad) + total_params = sum(p.numel() for p in trainer.model.parameters()) + + assert trainable_params < total_params, "PEFT should reduce the number of trainable parameters" + print(f"\nTrainable params: {trainable_params:,} / Total params: {total_params:,}") + + def test_base_trainer_without_peft_config(self, model_config, dummy_dataset): + """Test that BaseTrainer works without PEFT config (standard training).""" + # Load model and tokenizer + model_name = model_config.pop("model_name") + hf_model = ComponentFactory.create_model("hf", model_name, **model_config) + model = hf_model.model + tokenizer = hf_model.tokenizer + + # Create training config + training_args = TrainingArguments( + output_dir="./test_output_base", + per_device_train_batch_size=1, + num_train_epochs=1, + logging_steps=1, + save_strategy="no", + bf16=False, + fp16=True, + ) + + # Get BaseTrainer from registry + trainer_info = registry.get_trainer_module("base") + trainer_cls = trainer_info["trainer_cls"] + + # Instantiate trainer without PEFT config + trainer = trainer_cls( + model=model, + args=training_args, + train_dataset=dummy_dataset, + processing_class=tokenizer, + peft_config=None, # Explicitly pass None + ) + + assert trainer is not None + assert trainer.model is not None + + # Verify that the model is NOT a PEFT model + assert not isinstance(trainer.model, PeftModel), ( + "Model should not be wrapped as a PeftModel when peft_config is None" + ) + + +class TestSFTTrainerWithModel: + """Test suite for SFTTrainer integration with model loading.""" + + @pytest.fixture(autouse=True) + def cleanup_output_dirs(self): + """Fixture to clean up test output directories after each test.""" + # Setup: yield control to the test + yield + + # Teardown: clean up output directories + output_dirs = ["./test_output", "./test_output_peft"] + for output_dir in output_dirs: + if os.path.exists(output_dir): + try: + shutil.rmtree(output_dir) + print(f"\nCleaned up: {output_dir}") + except Exception as e: + print(f"\nWarning: Failed to clean up {output_dir}: {e}") + + @pytest.fixture + def model_config(self): + """Fixture for basic model configuration.""" return { "model_name": "HuggingFaceTB/SmolLM-135M", "auto_class_name": "AutoModelForCausalLM", @@ -162,15 +338,18 @@ def peft_model_config(self): "torch_dtype": "float16", "attn_implementation": "eager", "device_map": None, - "use_peft": True, - "model_config_kwargs": {"num_hidden_layers": 1}, - "peft_config": { - "lora_r": LORA_R, - "lora_alpha": LORA_ALPHA, - "lora_dropout": LORA_DROPOUT, - "target_modules": ["q_proj", "v_proj"], - "bias": "none", - }, + "num_hidden_layers": 1, + } + + @pytest.fixture + def peft_model_config(self): + """Fixture for PEFT configuration.""" + return { + "lora_r": LORA_R, + "lora_alpha": LORA_ALPHA, + "lora_dropout": LORA_DROPOUT, + "target_modules": ["q_proj", "v_proj"], + "bias": "none", } @pytest.fixture @@ -189,9 +368,10 @@ def dummy_dataset(self): def test_model_forward_pass(self, model_config): """Test that the loaded model can perform a forward pass.""" - model = HFModel(**model_config) - loaded_model = model.load_model() - tokenizer = model.load_tokenizer() + model_name = model_config.pop("model_name") + hf_model = ComponentFactory.create_model("hf", model_name, **model_config) + loaded_model = hf_model.model + tokenizer = hf_model.tokenizer # Prepare input text = "This is a test." @@ -209,9 +389,10 @@ def test_sft_trainer_instantiation_with_model(self, model_config, dummy_dataset) """Test that SFTTrainer can be instantiated with a loaded model.""" # Load model and tokenizer - hf_model = HFModel(**model_config) - model = hf_model.load_model() - tokenizer = hf_model.load_tokenizer() + model_name = model_config.pop("model_name") + hf_model = ComponentFactory.create_model("hf", model_name, **model_config) + model = hf_model.model + tokenizer = hf_model.tokenizer # Create SFT config sft_config = SFTConfig( @@ -222,7 +403,7 @@ def test_sft_trainer_instantiation_with_model(self, model_config, dummy_dataset) logging_steps=1, save_strategy="no", bf16=False, - fp16=False, + fp16=True, ) # Get SFTTrainer from registry @@ -241,18 +422,16 @@ def test_sft_trainer_instantiation_with_model(self, model_config, dummy_dataset) assert trainer.model is not None assert trainer.tokenizer is not None - def test_sft_trainer_with_peft_model(self, peft_model_config, dummy_dataset): + def test_sft_trainer_with_peft_model(self, model_config, peft_model_config, dummy_dataset): """Test that SFTTrainer works with PEFT-enabled models.""" # Load model and tokenizer - hf_model = HFModel(**peft_model_config) - model = hf_model.load_model() - tokenizer = hf_model.load_tokenizer() - - # Get PEFT config - peft_config = hf_model.load_peft_config() - assert peft_config is not None - assert isinstance(peft_config, LoraConfig) + model_name = model_config.pop("model_name") + hf_model = ComponentFactory.create_model("hf", model_name, **model_config) + model = hf_model.model + # Load PEFT Config + peft_config = LoraConfig(peft_model_config) + tokenizer = hf_model.tokenizer # Create SFT config sft_config = SFTConfig( @@ -263,7 +442,7 @@ def test_sft_trainer_with_peft_model(self, peft_model_config, dummy_dataset): logging_steps=1, save_strategy="no", bf16=False, - fp16=False, + fp16=True, ) # Get SFTTrainer from registry @@ -286,9 +465,10 @@ def test_sft_trainer_train_dataset_required(self, model_config): """Test that SFTTrainer requires a training dataset.""" # Load model and tokenizer - hf_model = HFModel(**model_config) - model = hf_model.load_model() - tokenizer = hf_model.load_tokenizer() + model_name = model_config.pop("model_name") + hf_model = ComponentFactory.create_model("hf", model_name, **model_config) + model = hf_model.model + tokenizer = hf_model.tokenizer # Create SFT config sft_config = SFTConfig( @@ -297,7 +477,7 @@ def test_sft_trainer_train_dataset_required(self, model_config): per_device_train_batch_size=1, num_train_epochs=1, bf16=False, - fp16=False, + fp16=True, ) # Get SFTTrainer from registry From ec845cbf0f951648a14cad06215f5c2cb5875ee1 Mon Sep 17 00:00:00 2001 From: Ann Kuruvilla Date: Mon, 1 Dec 2025 12:03:00 +0530 Subject: [PATCH 34/60] [QEff.Finetuning]CI enablement for Fine-Tuning (#629) Split testcase into functional and loss assertion, and enable on CI Reference metrics data is updated to latest. --------- Signed-off-by: Ann Kuruvilla Signed-off-by: Tanisha Co-authored-by: Tanisha Signed-off-by: Dhiraj Kumar Sah --- scripts/Jenkinsfile | 2 +- tests/finetune/reference_data.py | 334 +++++++++++++++---------------- tests/finetune/test_finetune.py | 224 ++++++++++++++------- 3 files changed, 325 insertions(+), 235 deletions(-) diff --git a/scripts/Jenkinsfile b/scripts/Jenkinsfile index 683ef5018..48fb03fc8 100644 --- a/scripts/Jenkinsfile +++ b/scripts/Jenkinsfile @@ -25,7 +25,6 @@ pipeline { pip install junitparser pytest-xdist && pip install librosa==0.10.2 soundfile==0.13.1 && #packages needed to load example for whisper testing pip install --extra-index-url https://download.pytorch.org/whl/cpu timm==1.0.14 torchvision==0.22.0+cpu einops==0.8.1 && #packages to load VLMs - pip install /opt/qti-aic/integrations/torch_qaic/py310/torch_qaic-0.1.0-cp310-cp310-linux_x86_64.whl && # For finetuning tests rm -rf QEfficient" ''' } @@ -168,6 +167,7 @@ pipeline { sudo docker exec ${BUILD_TAG} bash -c " cd /efficient-transformers && . preflight_qeff/bin/activate && + pip install /opt/qti-aic/integrations/torch_qaic/py310/torch_qaic-0.1.0-cp310-cp310-linux_x86_64.whl && mkdir -p $PWD/cli_qaic_finetuning && export TOKENIZERS_PARALLELISM=false && export QEFF_HOME=$PWD/cli_qaic_finetuning && diff --git a/tests/finetune/reference_data.py b/tests/finetune/reference_data.py index a2a5438f5..c94c03b0b 100644 --- a/tests/finetune/reference_data.py +++ b/tests/finetune/reference_data.py @@ -13,206 +13,206 @@ "llama_3.2_1B_config_alpaca_single_device": { "description": "Baseline for Llama on Alpaca single-device", "train_step_losses": [ - 1.5112206935882568, - 1.2211230993270874, - 1.9942185878753662, - 2.093623161315918, - 0.9168124198913574, - 1.2125635147094727, - 0.3648962676525116, - 1.6231939792633057, - 0.8259601593017578, - 0.7741442918777466, - 1.7359141111373901, - 2.118462085723877, - 2.061161994934082, - 0.8256913423538208, - 0.8088029623031616, - 1.761340618133545, - 1.6828027963638306, - 1.3538823127746582, - 2.0672550201416016, - 3.1532647609710693, + 1.5110896825790405, + 1.2206485271453857, + 1.9950776100158691, + 2.091615676879883, + 0.9182446599006653, + 1.1993569135665894, + 0.36413607001304626, + 1.6241482496261597, + 0.8270177245140076, + 0.7749958634376526, + 1.73696768283844, + 2.120077610015869, + 2.061460256576538, + 0.8267984390258789, + 0.8105809688568115, + 1.7627557516098022, + 1.6819559335708618, + 1.3528242111206055, + 2.0654125213623047, + 3.156151294708252, ], "eval_step_losses": [ - 1.462059736251831, - 0.24527676403522491, - 1.046107292175293, - 1.6403586864471436, - 1.395291805267334, - 2.8664817810058594, - 1.035412311553955, - 1.8670039176940918, - 3.8079662322998047, - 0.6516809463500977, + 1.4607517719268799, + 0.24302150309085846, + 1.0471211671829224, + 1.642044186592102, + 1.3949533700942993, + 2.8850066661834717, + 1.0366586446762085, + 1.8661959171295166, + 3.81632924079895, + 0.6577113270759583, ], "train_step_metrics": [ - 4.532259941101074, - 3.390994071960449, - 7.34645938873291, - 8.114261627197266, - 2.5013046264648438, - 3.3620924949645996, - 1.4403645992279053, - 5.069255828857422, - 2.2840728759765625, - 2.1687355041503906, - 5.674112319946289, - 8.318334579467773, - 7.855090141296387, - 2.283458948135376, - 2.2452187538146973, - 5.820234775543213, - 5.380615711212158, - 3.872429847717285, - 7.903097629547119, - 23.412376403808594, + 4.531666278839111, + 3.389385223388672, + 7.352773189544678, + 8.09798812866211, + 2.504889488220215, + 3.3179824352264404, + 1.43927001953125, + 5.074095249176025, + 2.286489486694336, + 2.1705832481384277, + 5.680093288421631, + 8.33178424835205, + 7.857433319091797, + 2.2859883308410645, + 2.2492144107818604, + 5.828476905822754, + 5.376060962677002, + 3.8683345317840576, + 7.8885498046875, + 23.480052947998047, ], "eval_step_metrics": [ # steps 0-9 - 4.31483793258667, - 1.2779749631881714, - 2.8465487957000732, - 5.157018661499023, - 4.036152362823486, - 17.575077056884766, - 2.816267251968384, - 6.468885898590088, - 45.05870819091797, - 1.9187631607055664, + 4.309197902679443, + 1.27509605884552, + 2.8494362831115723, + 5.1657185554504395, + 4.034786224365234, + 17.9036865234375, + 2.819779396057129, + 6.463661193847656, + 45.437110900878906, + 1.9303690195083618, ], }, # Scenario 2: Single-device llama 3.2-1B training on GSM8k dataset. "llama_3.2_1B_config_gsm8k_single_device": { "description": "Baseline for Llama on GSM8k single-device", "train_step_losses": [ - 2.250276803970337, - 2.3231687545776367, - 1.9379945993423462, - 1.5981022119522095, - 1.9867562055587769, - 1.4573354721069336, - 1.8969658613204956, - 1.2177824974060059, - 1.6489791870117188, - 1.5380687713623047, - 1.4025083780288696, - 1.5301083326339722, - 1.6858205795288086, - 1.383747935295105, - 1.7968919277191162, - 1.4075607061386108, - 1.6447738409042358, - 1.2807793617248535, - 0.8450672030448914, - 1.5795941352844238, + 2.250361204147339, + 2.3252110481262207, + 1.9360781908035278, + 1.5984115600585938, + 1.9874038696289062, + 1.4579044580459595, + 1.8975679874420166, + 1.2175723314285278, + 1.6473736763000488, + 1.537960410118103, + 1.4019465446472168, + 1.5310447216033936, + 1.6878201961517334, + 1.3849903345108032, + 1.7976438999176025, + 1.4060133695602417, + 1.646375060081482, + 1.2835280895233154, + 0.8465587496757507, + 1.5783095359802246, ], "eval_step_losses": [ - 1.7081595659255981, - 1.719305157661438, - 1.153528094291687, - 2.0051634311676025, - 1.3372926712036133, - 1.3009852170944214, - 1.2207027673721313, - 1.3452664613723755, - 1.329830288887024, - 1.307450532913208, + 1.707140326499939, + 1.7226355075836182, + 1.1531383991241455, + 2.0035903453826904, + 1.3362350463867188, + 1.3013248443603516, + 1.2195535898208618, + 1.3454742431640625, + 1.3299248218536377, + 1.3073854446411133, ], "train_step_metrics": [ - 9.490362167358398, - 10.207969665527344, - 6.944809913635254, - 4.943641662597656, - 7.291841506958008, - 4.294501304626465, - 6.6656389236450195, - 3.3796849250793457, - 5.201667308807373, - 4.655590534210205, - 4.065384864807129, - 4.618677139282227, - 5.396877765655518, - 3.989826202392578, - 6.030873775482178, - 4.0859761238098145, - 5.179838180541992, - 3.5994436740875244, - 2.328134298324585, - 4.852985858917236, + 9.49116325378418, + 10.228837966918945, + 6.93151330947876, + 4.945170879364014, + 7.296566009521484, + 4.296945571899414, + 6.66965389251709, + 3.378974676132202, + 5.193322658538818, + 4.655086040496826, + 4.063101291656494, + 4.623003959655762, + 5.407680034637451, + 3.994786262512207, + 6.0354108810424805, + 4.0796589851379395, + 5.188138961791992, + 3.60935115814209, + 2.3316092491149902, + 4.846755504608154, ], "eval_step_metrics": [ # steps 0-9 - 5.518795013427734, - 5.580649375915527, - 3.1693549156188965, - 7.42730712890625, - 3.8087174892425537, - 3.672913074493408, - 3.38956880569458, - 3.8392088413238525, - 3.7804012298583984, - 3.6967368125915527, + 5.5131731033325195, + 5.599266052246094, + 3.1681201457977295, + 7.415632247924805, + 3.8046915531158447, + 3.674160957336426, + 3.3856759071350098, + 3.8400065898895264, + 3.7807586193084717, + 3.69649600982666, ], }, # Scenario 3: Single-device google-bert/bert-base-uncased training on IMDB dataset. "bert_base_uncased_config_imdb_single_device": { "description": "Baseline for google-bert/bert-base-uncased on IMDB single-device", "train_step_losses": [ - 0.357421875, - 0.546875, - 0.98486328125, - 0.35302734375, - 1.23828125, - 0.60791015625, - 0.44384765625, - 0.791015625, - 0.7861328125, - 0.51318359375, - 0.50244140625, - 0.90087890625, - 0.8818359375, - 0.86279296875, - 0.6396484375, - 0.49267578125, - 0.97119140625, - 0.7451171875, - 0.798828125, - 0.7080078125, + 0.390625, + 0.51220703125, + 0.9208984375, + 0.4052734375, + 1.1640625, + 0.6533203125, + 0.5087890625, + 0.76171875, + 0.63525390625, + 0.50146484375, + 0.5439453125, + 0.947265625, + 0.89013671875, + 0.80419921875, + 0.6533203125, + 0.4580078125, + 0.92041015625, + 0.7412109375, + 0.7197265625, + 0.62158203125, ], "eval_step_losses": [ - 0.634765625, - 0.8173828125, + 0.6044921875, + 0.798828125, 0.9072265625, - 0.7177734375, - 0.59423828125, - 0.69921875, - 0.7109375, - 0.7216796875, - 0.6064453125, - 0.7041015625, + 0.70361328125, + 0.59912109375, + 0.66357421875, + 0.6962890625, + 0.75390625, + 0.61328125, + 0.6806640625, ], "train_step_metrics": [ 1.0, 1.0, 0.5, - 0.5, - 0.5, - 0.5, - 0.5, - 0.5, - 0.5, - 0.5, - 0.5, - 0.5, - 0.5, - 0.5, - 0.5, - 0.5, - 0.5, - 0.5, - 0.449951171875, - 0.4091796875, + 0.49999988079071045, + 0.49999988079071045, + 0.5, + 0.5000002384185791, + 0.5000002384185791, + 0.6250002384185791, + 0.6249998807907104, + 0.625, + 0.6000000238418579, + 0.5833332538604736, + 0.5714285373687744, + 0.5714285373687744, + 0.5714285373687744, + 0.5625, + 0.555555522441864, + 0.5055557489395142, + 0.5101010203361511, ], - "eval_step_metrics": [1.0, 0.0, 0.0, 0.0, 1.0, 0.0, 0.0, 0.0, 1.0, 0.0], + "eval_step_metrics": [1.0, 0.0, 0.0, 0.0, 1.0, 1.0, 0.0, 0.0, 1.0, 1.0], }, # Scenario 4: Distributed google-bert/bert-base-uncased training (world_size=2) "bert_base_uncased_config_imdb_distributed_ws2": { diff --git a/tests/finetune/test_finetune.py b/tests/finetune/test_finetune.py index 300ade704..dc9acf1ca 100644 --- a/tests/finetune/test_finetune.py +++ b/tests/finetune/test_finetune.py @@ -21,7 +21,7 @@ from tests.finetune import constants as constant from tests.finetune import reference_data as ref_data -alpaca_json_path = os.path.join(os.getcwd(), "alpaca_data.json") +alpaca_json_path = os.path.join(os.getcwd(), "./dataset/alpaca_data.json") def clean_up(path): @@ -34,7 +34,8 @@ def clean_up(path): def download_alpaca(): alpaca_url = "https://raw.githubusercontent.com/tatsu-lab/stanford_alpaca/refs/heads/main/alpaca_data.json" response = requests.get(alpaca_url) - + # Create directory if it doesn't exist + os.makedirs(os.path.dirname(alpaca_json_path), exist_ok=True) with open(alpaca_json_path, "wb") as f: f.write(response.content) @@ -140,15 +141,7 @@ def assert_list_close(ref_list, actual_list, atol, name, scenario_key, current_w ] -@pytest.mark.skip() # remove when it's clear why diff val_step_loss values are observed in diff runs on existing code (even without PR #478 changes) -@pytest.mark.cli -@pytest.mark.on_qaic -@pytest.mark.finetune -@pytest.mark.parametrize( - "model_name,task_mode,max_eval_step,max_train_step,dataset_name,data_path,intermediate_step_save,context_length,run_validation,use_peft,device,scenario_key", # This parameter will be used to look up reference data - configs, -) -def test_finetune( +def train_function( model_name, task_mode, max_eval_step, @@ -211,93 +204,190 @@ def test_finetune( download_alpaca() results = finetune(**kwargs) + all_ref_metrices = { + "ref_train_losses": ref_train_losses, + "ref_eval_losses": ref_eval_losses, + "ref_train_metrics": ref_train_metrics, + "ref_eval_metrics": ref_eval_metrics, + } - # Assertions for step-level values using the helper function - assert_list_close( - ref_train_losses, - results["train_step_loss"], - constant.LOSS_ATOL, - "Train Step Losses", - scenario_key, - current_world_size, - current_rank, - ) - assert_list_close( - ref_eval_losses, - results["eval_step_loss"], - constant.LOSS_ATOL, - "Eval Step Losses", - scenario_key, - current_world_size, - current_rank, - ) - assert_list_close( - ref_train_metrics, - results["train_step_metric"], - constant.METRIC_ATOL, - "Train Step Metrics", - scenario_key, - current_world_size, - current_rank, - ) - assert_list_close( - ref_eval_metrics, - results["eval_step_metric"], - constant.METRIC_ATOL, - "Eval Step Metrics", + all_config_spy = { + "train_config_spy": train_config_spy, + "generate_dataset_config_spy": generate_dataset_config_spy, + "generate_peft_config_spy": generate_peft_config_spy, + "get_dataloader_kwargs_spy": get_dataloader_kwargs_spy, + "update_config_spy": update_config_spy, + "get_custom_data_collator_spy": get_custom_data_collator_spy, + "get_preprocessed_dataset_spy": get_preprocessed_dataset_spy, + "get_longest_seq_length_spy": get_longest_seq_length_spy, + "print_model_size_spy": print_model_size_spy, + "train_spy": train_spy, + "current_world_size": current_world_size, + "current_rank": current_rank, + } + return results, all_ref_metrices, all_config_spy + + +@pytest.mark.cli +@pytest.mark.on_qaic +@pytest.mark.finetune +@pytest.mark.parametrize( + "model_name,task_mode,max_eval_step,max_train_step,dataset_name,data_path,intermediate_step_save,context_length,run_validation,use_peft,device,scenario_key", # This parameter will be used to look up reference data + configs, +) +def test_finetune_functional( + model_name, + task_mode, + max_eval_step, + max_train_step, + dataset_name, + data_path, + intermediate_step_save, + context_length, + run_validation, + use_peft, + device, + scenario_key, + mocker, +): + results, all_ref_metrices, all_config_spy = train_function( + model_name, + task_mode, + max_eval_step, + max_train_step, + dataset_name, + data_path, + intermediate_step_save, + context_length, + run_validation, + use_peft, + device, scenario_key, - current_world_size, - current_rank, + mocker, ) + # Assertions for step-level values using the helper function assert results["avg_epoch_time"] < 60, "Training should complete within 60 seconds." - - train_config_spy.assert_called_once() - generate_dataset_config_spy.assert_called_once() + all_config_spy["train_config_spy"].assert_called_once() + all_config_spy["generate_dataset_config_spy"].assert_called_once() if task_mode == Task_Mode.GENERATION: - generate_peft_config_spy.assert_called_once() - get_longest_seq_length_spy.assert_called_once() - print_model_size_spy.assert_called_once() - train_spy.assert_called_once() - - assert update_config_spy.call_count == 1 - assert get_custom_data_collator_spy.call_count == 2 - assert get_dataloader_kwargs_spy.call_count == 2 - assert get_preprocessed_dataset_spy.call_count == 2 - - args, kwargs = train_spy.call_args + all_config_spy["generate_peft_config_spy"].assert_called_once() + all_config_spy["get_longest_seq_length_spy"].assert_called_once() + all_config_spy["print_model_size_spy"].assert_called_once() + all_config_spy["train_spy"].assert_called_once() + assert all_config_spy["update_config_spy"].call_count == 1 + assert all_config_spy["get_custom_data_collator_spy"].call_count == 2 + assert all_config_spy["get_dataloader_kwargs_spy"].call_count == 2 + assert all_config_spy["get_preprocessed_dataset_spy"].call_count == 2 + args, kwargs = all_config_spy["train_spy"].call_args train_dataloader = args[2] eval_dataloader = args[3] optimizer = args[4] - batch = next(iter(train_dataloader)) assert "labels" in batch.keys() assert "input_ids" in batch.keys() assert "attention_mask" in batch.keys() - assert isinstance(optimizer, optim.AdamW) - assert isinstance(train_dataloader, DataLoader) if run_validation: assert isinstance(eval_dataloader, DataLoader) else: assert eval_dataloader is None - - args, kwargs = update_config_spy.call_args_list[0] + args, kwargs = all_config_spy["update_config_spy"].call_args_list[0] train_config = args[0] assert max_train_step >= train_config.gradient_accumulation_steps, ( "Total training step should be more than " f"{train_config.gradient_accumulation_steps} which is gradient accumulation steps." ) - if use_peft: saved_file = os.path.join(train_config.output_dir, "complete_epoch_1/adapter_model.safetensors") else: saved_file = os.path.join(train_config.output_dir, "complete_epoch_1/model.safetensors") assert os.path.isfile(saved_file) - clean_up(train_config.output_dir) clean_up("qaic-dumps") if dataset_name == "alpaca_dataset": clean_up(alpaca_json_path) + + +@pytest.mark.skip() # remove when it's clear why diff val_step_loss values are observed in diff runs on existing code (even without PR #478 changes) +@pytest.mark.cli +@pytest.mark.on_qaic +@pytest.mark.finetune +@pytest.mark.parametrize( + "model_name,task_mode,max_eval_step,max_train_step,dataset_name,data_path,intermediate_step_save,context_length,run_validation,use_peft,device,scenario_key", # This parameter will be used to look up reference data + configs, +) +def test_finetune_assert( + model_name, + task_mode, + max_eval_step, + max_train_step, + dataset_name, + data_path, + intermediate_step_save, + context_length, + run_validation, + use_peft, + device, + scenario_key, + mocker, +): + results, all_ref_metrices, all_config_spy = train_function( + model_name, + task_mode, + max_eval_step, + max_train_step, + dataset_name, + data_path, + intermediate_step_save, + context_length, + run_validation, + use_peft, + device, + scenario_key, + mocker, + ) + + # Assertions for step-level values using the helper function + assert_list_close( + all_ref_metrices["ref_train_losses"], + results["train_step_loss"], + constant.LOSS_ATOL, + "Train Step Losses", + scenario_key, + all_config_spy["current_world_size"], + all_config_spy["current_rank"], + ) + assert_list_close( + all_ref_metrices["ref_eval_losses"], + results["eval_step_loss"], + constant.LOSS_ATOL, + "Eval Step Losses", + scenario_key, + all_config_spy["current_world_size"], + all_config_spy["current_rank"], + ) + assert_list_close( + all_ref_metrices["ref_train_metrics"], + results["train_step_metric"], + constant.METRIC_ATOL, + "Train Step Metrics", + scenario_key, + all_config_spy["current_world_size"], + all_config_spy["current_rank"], + ) + assert_list_close( + all_ref_metrices["ref_eval_metrics"], + results["eval_step_metric"], + constant.METRIC_ATOL, + "Eval Step Metrics", + scenario_key, + all_config_spy["current_world_size"], + all_config_spy["current_rank"], + ) + clean_up("qaic-dumps") + + if dataset_name == "alpaca_dataset": + clean_up(alpaca_json_path) From 22390a6b63802174feb9aa976381af918590a653 Mon Sep 17 00:00:00 2001 From: Dhiraj Kumar Sah Date: Tue, 2 Dec 2025 12:17:44 +0530 Subject: [PATCH 35/60] [BUGFIX] Patch for issues with export via replicate_kv_heads script CLI for CB (#646) InputHandler has changes to create position_ids based on CB batch size. Signed-off-by: Dhiraj Kumar Sah --- QEfficient/exporter/export_hf_to_cloud_ai_100.py | 2 +- QEfficient/utils/generate_inputs.py | 3 ++- 2 files changed, 3 insertions(+), 2 deletions(-) diff --git a/QEfficient/exporter/export_hf_to_cloud_ai_100.py b/QEfficient/exporter/export_hf_to_cloud_ai_100.py index b769680ef..2547d9db3 100644 --- a/QEfficient/exporter/export_hf_to_cloud_ai_100.py +++ b/QEfficient/exporter/export_hf_to_cloud_ai_100.py @@ -202,7 +202,7 @@ def export_kvstyle_transformed_model_to_onnx( batch_size=len(Constants.INPUT_STR), tokenizer=tokenizer, config=transformed_model.config, - prompt=Constants.INPUT_STR, + prompt=Constants.INPUT_STR * (full_batch_size if full_batch_size else 1), prompt_len=Constants.PROMPT_LEN, ctx_len=seq_len, full_batch_size=full_batch_size, diff --git a/QEfficient/utils/generate_inputs.py b/QEfficient/utils/generate_inputs.py index 7d07db530..95474acfd 100644 --- a/QEfficient/utils/generate_inputs.py +++ b/QEfficient/utils/generate_inputs.py @@ -68,7 +68,8 @@ def prepare_pytorch_inputs(self): batch_size, input_len = input_ids.shape inputs.pop("attention_mask") inputs.pop("token_type_ids", None) - position_ids = torch.arange(input_len).view(1, -1) + usable_bs = self.full_batch_size if self.full_batch_size else 1 + position_ids = torch.arange(input_len).view(1, input_len).repeat(usable_bs, 1) inputs["input_ids"] = torch.concat( [ input_ids, From 5a8bd0b125b4f036587d83e75de85207886a8066 Mon Sep 17 00:00:00 2001 From: Rishin Raj Date: Tue, 2 Dec 2025 21:13:14 +0530 Subject: [PATCH 36/60] Add custom op examples and documentation (#638) Added step by step instructions for adding custom op in Qeff --------- Signed-off-by: Rishin Raj Co-authored-by: Hem Agnihotri Signed-off-by: Dhiraj Kumar Sah --- .../CustomGELU/src/customgelu_aic.cpp | 23 ++ .../CustomGELU/src/customgelu_functions.cpp | 74 ++++ .../CustomGELU/src/customgelu_interpreter.cpp | 33 ++ .../customop/CustomGELU/src/customop_lib.so | Bin 0 -> 16440 bytes examples/onboarding_guide/customop/README.md | 343 ++++++++++++++++++ .../customop/custom_op_config.yaml | 27 ++ .../customop/example_custom_op.py | 86 +++++ .../customop/example_pytorch_transforms.py | 54 +++ 8 files changed, 640 insertions(+) create mode 100644 examples/onboarding_guide/customop/CustomGELU/src/customgelu_aic.cpp create mode 100644 examples/onboarding_guide/customop/CustomGELU/src/customgelu_functions.cpp create mode 100644 examples/onboarding_guide/customop/CustomGELU/src/customgelu_interpreter.cpp create mode 100644 examples/onboarding_guide/customop/CustomGELU/src/customop_lib.so create mode 100644 examples/onboarding_guide/customop/README.md create mode 100644 examples/onboarding_guide/customop/custom_op_config.yaml create mode 100644 examples/onboarding_guide/customop/example_custom_op.py create mode 100644 examples/onboarding_guide/customop/example_pytorch_transforms.py diff --git a/examples/onboarding_guide/customop/CustomGELU/src/customgelu_aic.cpp b/examples/onboarding_guide/customop/CustomGELU/src/customgelu_aic.cpp new file mode 100644 index 000000000..ac018b91d --- /dev/null +++ b/examples/onboarding_guide/customop/CustomGELU/src/customgelu_aic.cpp @@ -0,0 +1,23 @@ +//----------------------------------------------------------------------------- +// +// Copyright (c) Qualcomm Technologies, Inc. and/or its subsidiaries. +// SPDX-License-Identifier: BSD-3-Clause +// +//----------------------------------------------------------------------------- + +#include "CustomOpAICInterface.h" +#include "stddef.h" + +extern "C" { + +/* The AIC compilation target supports an API similar to the Interpreter API. +Additionally, threadId, which is the AIC thread ID, is passed. +Kernel is invoked by four AIC threads with threadId equal to 0, 1, 2, and 3. */ + +void CustomGELUAIC( + const CustomOpContext *ctx, + const int32_t threadId) +{ +} + +} diff --git a/examples/onboarding_guide/customop/CustomGELU/src/customgelu_functions.cpp b/examples/onboarding_guide/customop/CustomGELU/src/customgelu_functions.cpp new file mode 100644 index 000000000..f0ebb8f89 --- /dev/null +++ b/examples/onboarding_guide/customop/CustomGELU/src/customgelu_functions.cpp @@ -0,0 +1,74 @@ +//----------------------------------------------------------------------------- +// +// Copyright (c) Qualcomm Technologies, Inc. and/or its subsidiaries. +// SPDX-License-Identifier: BSD-3-Clause +// +//----------------------------------------------------------------------------- + +#include "CustomOpFunctions.h" +#include "CustomOpInterpreterInterface.h" +#include "CustomOpTileConfigHelpers.h" +#include "CustomOpTypes.h" + +#include + +extern "C" { +bool customOpVerify( + const CustomOpPropertiesHandle *const opProp) +{ + /* Refer to function declaration at CustomOpFunctions.h for usage. */ + + return true; +} + +const char * customOpSelectImpl( + const CustomOpPropertiesHandle *const opProp, + const CustomOpKernelInfo *const kernelInfos, + const int32_t numKernels, + const char *backend) +{ + /* Refer to function declaration at CustomOpFunctions.h for usage. */ + + /* For AIC pick 'AIC', for Interpreter pick 'Interpreter' */ + if (strcmp(backend, "AIC") == 0) + { + return ""; + } + else if (strcmp(backend, "Interpreter") == 0) + { + return ""; + } + return nullptr; +} + +bool customOpInferShape( + CustomOpPropertiesHandle *const opProp) +{ + /* Refer to function declaration at CustomOpFunctions.h for usage. */ + + return false; +} + +bool customOpSetProperties( + CustomOpPropertiesHandle *opProp) +{ + /* Refer to function declaration at CustomOpFunctions.h for usage. */ + + return false; +} + +bool customOpMapTiles( + CustomOpPropertiesHandle *opProp) +{ + /* Refer to function declaration at CustomOpFunctions.h for usage. */ + + return false; +} +void customOpDeallocateMemory( + CustomOpPropertiesHandle *opProp) +{ + /* Refer to function declaration at CustomOpFunctions.h for usage. */ + + CustomOpTileConfigHelpers::destroyTileConfigsAndMergeConfigs(opProp); +} +} diff --git a/examples/onboarding_guide/customop/CustomGELU/src/customgelu_interpreter.cpp b/examples/onboarding_guide/customop/CustomGELU/src/customgelu_interpreter.cpp new file mode 100644 index 000000000..bdae3430a --- /dev/null +++ b/examples/onboarding_guide/customop/CustomGELU/src/customgelu_interpreter.cpp @@ -0,0 +1,33 @@ +//----------------------------------------------------------------------------- +// +// Copyright (c) Qualcomm Technologies, Inc. and/or its subsidiaries. +// SPDX-License-Identifier: BSD-3-Clause +// +//----------------------------------------------------------------------------- + +/* +* This file can be compiled separately and can be loaded using dlopen +* Compilation command: (tried with gcc 5.5) +* g++ -shared -std=c++11 -fPIC -o _lib.so .cpp .cpp -I/opt/qti-aic/dev/inc +* for example: g++ -shared -std=c++11 -fPIC -o reluop_lib.so reluop_functions.cpp reluop_interpreter.cpp -I/opt/qti-aic/dev/inc +*/ + +#include "CustomOpInterpreterInterface.h" + +extern "C" { +void CustomGELUInterpreter( + CustomOpContext *ctx) +{ + /* The interpreter implementation is provided to the compiler as a shared library + (or collection of shared libraries). Each shared library can contain multiple + versions (flavors) of implementations of the operation, refered onwards as kernels. + A kernel is selected at model compilation time by the selection function. The + developer is responsible for compilation of these shared libraries. As the interface + is C, the shared libraries can be compiled by various compilers (GCC, CLANG, etc). + In addition, as these shared libraries are running on the Host CPU, the developer + can open files, dump results, use stdout/stderr for printing debug messages, etc. + This makes the Interpreter implementation a very effective way for debugging the + operation functionality as part of model execution. The signature of the + kernel (implementation) is generic, and fits any custom operation. */ +} +} diff --git a/examples/onboarding_guide/customop/CustomGELU/src/customop_lib.so b/examples/onboarding_guide/customop/CustomGELU/src/customop_lib.so new file mode 100644 index 0000000000000000000000000000000000000000..f1013b1e32ae1d4f418ae61196b3246bc24025af GIT binary patch literal 16440 zcmeHOeQaCR6~9heNTDPFQdU}^rt3yOs7cbaG-U;+snb`Mv{}=j@=?9I_DkZ;ACdjs zHjIvr3`7=?y1^v20o$|=6=@sW)Cv9=Q_5G$gf^I_snR4C25632SO_008>{y__nl+E zygGp~X&;hrWS@I}=i{Dx-be2BJMZ>Tds~Iir=(S?YgLg`D#(H-)`|vUYt^~w$*z{|YS~Wci<~T>JY5|l#d(VYJUB#l-KSU+j;2{))%UC0j%qF zJNj|P>twq+4#Z1s9#5!@_c_?$p@Vlo3?b5+6cu` z$pcfrj(L(_)wJ^Z2UldCSsK6NQUB#Hyf*jB2Y*1Sq<=O(N_zUJjYp$L)629^WYaU-p=?p&vyX9 z?`Hf8kNzR3EmqZPIV;`=J!&_z{twwtp98Bb<8>ZCZ$eIfo}`0`w%;MD8;IDR z$JkE?~w0ND9MC68`czA zBb#)+jJQDf4+Qkl z05)3Tcsvye+g68_NTmmq6V#?qd-vufj$41)!ncaYc4zEpWW|a=CKXsiY{cnpYTvw} zb;k~KRiH`fzS$UP46LS;q5{``1+M$){+ttsq%Y%~lIzZ1_I2EvE7f0tQ6u>qEGIs@ zAT~$)mird}e~|n6K|b%RJa{ML!611l-v5OkyUxMs9>qgzAm_s6UQB`s7ta4Wwf-p= z?!I5o4{EW#7gjr6{q1+*=eqEvE?lNc;(8Y@_hQ1=x^TJI5MFNO43sla&OkW>BFo)!j^<3BC%v~Zam|DN#h-mSfFRN6oI{T@E+;S(O- z^6)!_H||{@M&G!1_Dw{ZPucS@)i<%5r>V}#kJT(XfN0dyOrhVP^;a(^vh1`mG+`Wl z^E%_`sY=85obl3WyABSXWCvCGv7VYmh+FI_9|zXI0kO)i=r#t|-%4F$=yiLpF}(iQ zU~+RX?_3mL&&_@oyl*FLr`V70g^(O|!_FAmf9eTJ@Xn_T(BMfw!NyQ_VyOG1G1M_; z42Q;yd*?M&(^$VZhDLL*p3dj*9LdhX%ohPYv6NLVEUJtYRX)JJ_r&ZcFpPS>I1UX> zkZb6V1`VF9)d3EN5aej?3Ji46<=2Tnhxo^g(a;19apE>(xO>7FdeJyKR$=&vR2oB1 zA?O@&LvTdShJ&?q+-eI9ypR7QpJ#r)TR5MFexWGzyM;*LXzno*4u&RtFrP54360g< zu?ML)2KVO_9WRUxts!VljXsFU9YgdZi2gW5KfVP=2opF?6Yz~piT{`r|AZc6I9Mf7 z000C3%P5M`(3p1gTYNj31}51(=5{#C@L|*$GV#X`X2`@-BpeOpXqXe%Vhm)n)2}B< zxki$Q^)+$XG$KTLJGmGPkIv1HXrKryUzz)5(jtE-IA_PeJ?HCJ;}7(${8qUpqznn2Fe*I zXP}&cat6v7_@Bx^%jQ<4+)r103oEbbM~L(+bnZkxe-CIJ^a$u}pf7;l`Bpx^5u2U| zK%;v9pU-~<^uoz}{t)P=L7xF_27L{52T|Bv^maZ!3;d(+YwxOPJ@2fFpJQZ4em~^rAa4CEQ{ph5*7LNTCDBgf&5Cy_3tSnuP_Vtb(p_BgMAa^y)($S zLw?f?avSmv$QL0FCF9>vjPHKP7hrxhZh4y{C#c-Y87OCqa$j)P$#+H)?YoQmEV3^|HX)xvunLk#oJkM? zVm)u7gG@V_?qb?6@}Oh6skL>Te`)vbY|_s98yW&lf%=sVS2p*cNM7r9piCDj;GlgujP75!7pcA&TGN1M#rZvRz}c47l2=YA4%I8JKNI}h^S&&y=f=R2o=_6P=6mynZ5DU?r_|U+NpHL3}^SLNDAfR zDB+F<>Pu@-v3<-8r_rJN8Qo^pi#)>uQ zj$gBFTT4gC3~kv+?--bP^TZ4p3>q7^(Ht`=vrpIkD?K-*!&c_T8@IG{;Fnj^#JdRM zU{m|n4K3~F*0#2;&~|fs%ZB!lIZX+?Ipx)apu5;z>h(+4+`EeNXMUgI-+gUhX73IE z((4WMzJxPJF77YR(en~Q$xIa9Gx(60KKdIRGrUeBq4DbyrCvlS`Lc=Fo$(HY3SIu_E$P~k)j&fuDcK$P0g?o37naOX(ihEU>G_pu;am(mgqw@=-sdq0oGh4rcM z0d(B<)4iuB`n-Gx#XshlM_X|6kN-{Y+i4 zmvw3vFx=V;_OiYfD(h*e=*I4ZRX3wYYg-v#*0Vwvv!=u^dP4gkr?s;1vR)P%WP6IA zbR>R;)n0VSx7f@2Tz<-e|nz zKkKo-+YbOu=n-=6YI6Pv^c!?&??Tt0ZnBS{8w;9OUwEN^fKClQVlVras*h;@XV}uY zEB3M;m-n3IeP-Ez%eZnJX`e$=6nlAJZR{$5Xny@&{1SVi6A-!WRgiV-9V#rNg?xzp z`w+P8gKQ9Fd(rdS3r`zN(vkRO-(gH+Ph+_oJ;Nr9)e9oAUs*t$j@Y}YoO^E7ee7_A u1Eb$Z=$<9>k^9#iU=q9Aer=P(#XUk9S9EE+>`~F8LsvT*=Xne~tokP!#V}+5 literal 0 HcmV?d00001 diff --git a/examples/onboarding_guide/customop/README.md b/examples/onboarding_guide/customop/README.md new file mode 100644 index 000000000..e8e523b70 --- /dev/null +++ b/examples/onboarding_guide/customop/README.md @@ -0,0 +1,343 @@ +# Adding Custom Operations to QEfficient + +Custom ops are hardware optimized implementations of neural network operators for Qualcomm Cloud AI 100. This example walks you through the complete process of creating, registering, and deploying custom operations. + +## When to Add Custom Ops + +Add custom ops when: + +- Replacing standard PyTorch ops with faster hardware-optimized versions +- Implementing operations the compiler doesn't support natively +- Optimizing frequently used operations in your model + +## Understanding the 3-Layer Pattern + +Every custom op in QEfficient follows a 3-layer architecture: + +1. **ONNX Script** - Defines the ONNX representation that gets exported (which the compiler later reads) +2. **PyTorch Autograd Function** - Bridges PyTorch execution and ONNX export +3. **nn.Module Wrapper** - What users interact with in their code + +--- + +## Step 1: Review the Example Template + +Before creating a custom op, examine the example template to understand the structure: + +**File:** `examples/onboarding_guide/customop/example_custom_op.py` + +This file demonstrates a complete custom GELU implementation with all three layers. Study how: +- Constants are defined in the ONNX script +- The autograd function handles both PyTorch execution and ONNX export +- The module wrapper provides a clean user interface + +--- + +## Step 2: Create the Custom Op File + +Create a new file under `QEfficient/customop/.py` with the following structure: + +### Layer 1: ONNX Script + +This defines the ONNX representation that PyTorch exports (which the compiler will later read and compile). + +```python +import onnxscript +import torch +from torch import nn +from QEfficient.utils import constants + +ops = getattr(onnxscript, "opset" + str(constants.ONNX_EXPORT_OPSET)) + +@onnxscript.script(onnxscript.values.Opset(domain="com.qti.aisw.onnx", version=1)) +def CustomOpBluePrint(input: onnxscript.FLOAT): + """ + ONNX implementation of the operation. + + Important constraints: + - Domain MUST be "com.qti.aisw.onnx" for custom ops + - Use ops.Constant() for any constant values + """ + +``` + + +PyTorch's ONNX exporter uses this to generate the ONNX graph + +### Layer 2: PyTorch Autograd Function + +This bridges PyTorch execution and ONNX export. + +```python +class CustomOpBluePrintFunc(torch.autograd.Function): + + @staticmethod + def forward(input: torch.Tensor, mode: str = "default"): + """ + PyTorch implementation - can use ANY PyTorch operations. + This runs during normal PyTorch execution (training, inference, etc.) + """ + + + @staticmethod + def setup_context(ctx, inputs, outputs): + """Store any tensors needed for backward pass (not needed for inference-only ops)""" + + + @staticmethod + def symbolic(g: torch.Graph, input: torch.Value, mode: str = "default") -> torch.Value: + """ + Called during ONNX export - maps to the ONNX script. + """ + return g.onnxscript_op(CustomOpBluePrint, input) +``` + +### Layer 3: Module Wrapper + +This is the user facing interface. + +```python +class CustomOpBluePrintAIC(nn.Module): + """ + User-facing module wrapper. + This is what users instantiate and use in their models. + """ + + def __init__(self, mode: str = "default"): + super().__init__() + pass + + def forward(self, input: torch.Tensor) -> torch.Tensor: + """Forward pass delegates to the autograd function""" + return CustomOpBluePrintFunc.apply(input, self.mode) +``` + +Provides a clean, standard `nn.Module` interface + +--- + +## Step 3: Register the Custom Op in Qeff + +Add the custom op to `QEfficient/customop/__init__.py`: + +```python +from QEfficient.customop. import CustomOpBluePrintAIC + +__all__ = [ + # ... existing exports ... + "CustomOpBluePrintAIC", +] +``` + +This makes the custom op importable from `QEfficient.customop`. + +--- + +## Step 4: Map to PyTorch Transforms + +Add the mapping in `QEfficient/transformers/models/pytorch_transforms.py` to automatically replace standard PyTorch modules with the custom op. + + +### Example from the Codebase + +```python +from transformers.activations import NewGELUActivation +from transformers.models.llama.modeling_llama import LlamaRMSNorm +from QEfficient.customop import CustomGELUAIC, CustomRMSNormAIC + +class CustomOpsTransform(ModuleMappingTransform): + _module_mapping = { + # Activation functions + NewGELUActivation: CustomGELUAIC, + + # Normalization layers + LlamaRMSNorm: CustomRMSNormAIC, + } +``` + +**How the transform works:** +1. When a model is loaded, QEfficient scans all modules +2. For each module type in `_module_mapping`, it replaces the module with the custom implementation +3. The replacement happens automatically before ONNX export + +--- + +## Step 5: Export the Model with QEff + +Use QEfficient to export the model with custom ops applied: + +```python +from QEfficient import QEFFAutoModelForCausalLM +from transformers import AutoTokenizer + +# Model name +model_name = "gpt2" + +# Load model - custom ops are automatically applied via transforms +model = QEFFAutoModelForCausalLM.from_pretrained(model_name) + +# Export to ONNX +# The custom ops will be included in the ONNX graph +export_path = model.export() + +print("Model exported successfully with custom ops!") +print(f"Model path : {export_path}") +``` + +The export process: +1. Loads the PyTorch model +2. Applies `CustomOpsTransform` to replace standard ops with custom ops +3. Saves the ONNX model + +--- + +## Step 6: Generate C++ and YAML Files + +Use the SDK tool to generate the custom op package structure from the ONNX model: + +```bash +# Generate custom op package from ONNX model +python /opt/qti-aic/tools/custom-ops/gen_custom_op_package.py \ + --onnx_model path/to/model.onnx \ + --output_dir CustomOp_Package \ + --domain com.qti.aisw.onnx +``` + +This generates: +``` +CustomOp_Package/ +ā”œā”€ā”€ custom_op_config.yaml # Op configuration for compiler +└── / + └── src/ + ā”œā”€ā”€ customop_functions.cpp # Utility functions (to be implemented) + └── customop_interpreter.cpp # Backend implementation (to be implemented) +``` + +--- + +## Step 7: Build the Shared Library + +Compile the C++ code into a shared library (.so file) that the runtime can load. + +### Reference the SDK Example + +See `/opt/qti-aic/examples/apps/custom-op/basic-example/README.md` for detailed instructions. + +### Build Command + +```bash +# Navigate to the custom op package +cd CustomOp_Package//src + +# Compile the shared library +g++ -shared -std=c++11 -fPIC \ + -o customop_lib.so \ + customop_functions.cpp \ + customop_interpreter.cpp \ + -I/opt/qti-aic/dev/inc \ + -I/opt/qti-aic/dev/lib +``` + +After building, verify the .so file exists: +```bash +ls -lh customop_lib.so +``` + +--- + +## Step 8: Compile the Model with Custom Op + +Pass the custom op YAML config to the compiler: + +```python +from QEfficient import QEFFAutoModelForCausalLM +from transformers import AutoTokenizer + +model_name = "gpt2" + +# Load model with custom ops +model = QEFFAutoModelForCausalLM.from_pretrained(model_name) + +# Compile with custom op - pass the YAML config path +model.compile( + num_cores=16, + registered_custom_op="CustomOp_Package/custom_op_config.yaml" +) + +print("Model compiled successfully with custom op!") +``` +--- + + +## Workflow Summary + +Here's the complete workflow from start to finish: + +### Quick Reference + +1. **Review** `example_custom_op.py` to understand the 3-layer pattern +2. **Create** `QEfficient/customop/your_op.py` with ONNX script, autograd function, and module wrapper +3. **Register** in `QEfficient/customop/__init__.py` +4. **Map** in `QEfficient/transformers/models/pytorch_transforms.py` +5. **Export** model with QEff (custom ops automatically applied) +6. **Generate** C++ and YAML files using SDK tool +7. **Implement** C++ functions and build .so library +8. **Compile** model with `registered_custom_op` parameter + +### Workflow Diagram + +```mermaid +graph TD + A[Start: Need Custom Op] --> B[Review example_custom_op.py] + B --> C[Create QEfficient/customop/your_op.py] + C --> C1[Layer 1: ONNX Script] + C --> C2[Layer 2: Autograd Function] + C --> C3[Layer 3: Module Wrapper] + C1 --> D[Register in __init__.py] + C2 --> D + C3 --> D + D --> E[Add mapping in pytorch_transforms.py] + E --> F[Export model with QEff] + F --> G[Generate C++ & YAML with SDK tool] + G --> H[Implement C++ functions] + H --> I[Build .so library] + I --> J[Compile model with registered_custom_op] + J --> K[Run inference on device] + K --> L[Verify performance] + L --> M{Performance OK?} + M -->|Yes| N[Done!] + M -->|No| O[Optimize C++ implementation] + O --> I + + style A fill:#2196F3,stroke:#1976D2,stroke-width:2px,color:#fff + style N fill:#4CAF50,stroke:#388E3C,stroke-width:2px,color:#fff + style C fill:#FFC107,stroke:#FFA000,stroke-width:2px,color:#000 + style J fill:#FF9800,stroke:#F57C00,stroke-width:2px,color:#fff + style K fill:#E91E63,stroke:#C2185B,stroke-width:2px,color:#fff +``` + +### Key Files Reference + +| Purpose | File Path | +|---------|-----------| +| Example template | `examples/onboarding_guide/customop/example_custom_op.py` | +| SDK custom op tool | `/opt/qti-aic/tools/custom-ops/gen_custom_op_package.py` | +| SDK examples | `/opt/qti-aic/examples/apps/custom-op/` | + +--- + +## Examples and References + +### Example Implementations + +- **`example_custom_op.py`** - Complete template showing the 3-layer pattern +- **`example_pytorch_transforms.py`** - How to register custom ops in transforms +- **`QEfficient/customop/rms_norm.py`** - Real implementation with learnable parameters +- **`QEfficient/customop/ctx_scatter_gather.py`** - Advanced custom op example + +### Documentation + +- **[Custom Ops Directory](../../../QEfficient/customop/)** - All custom op implementations +- **[PyTorch Transforms](../../../QEfficient/transformers/models/pytorch_transforms.py)** - Transform registry +- **[SDK Custom Op Documentation](/opt/qti-aic/examples/apps/custom-op)** - Hardware-specific details +- **[Contributing Guide](../../../CONTRIBUTING.md)** - How to contribute custom ops diff --git a/examples/onboarding_guide/customop/custom_op_config.yaml b/examples/onboarding_guide/customop/custom_op_config.yaml new file mode 100644 index 000000000..06187e80b --- /dev/null +++ b/examples/onboarding_guide/customop/custom_op_config.yaml @@ -0,0 +1,27 @@ +--- +version: Major.Minor.Patch +CustomOps: +# CustomGELU + - type: CustomGELU + package: com.qti.aisw.onnx + inputs: + - name: in1 + maxDims: 5 + parameters: [] + outputs: + - name: out1 + maxDims: 5 + functionsLibrary: ./CustomGELU/src/customgelu_lib.so + implementations: + - backend: Interpreter + type: CustomGELUInterpreter + impl: ./CustomGELU/src/customgelu_lib.so + - backend: AIC + type: CustomGELUAIC + impl: ./CustomGELU/src/customgelu_aic.cpp + memoryConfig: + DDR: + CacheableDDR: + VTCM: [in1, out1] + requiredFor: +... diff --git a/examples/onboarding_guide/customop/example_custom_op.py b/examples/onboarding_guide/customop/example_custom_op.py new file mode 100644 index 000000000..f682bcebf --- /dev/null +++ b/examples/onboarding_guide/customop/example_custom_op.py @@ -0,0 +1,86 @@ +# ----------------------------------------------------------------------------- +# +# Copyright (c) Qualcomm Technologies, Inc. and/or its subsidiaries. +# SPDX-License-Identifier: BSD-3-Clause +# +# ----------------------------------------------------------------------------- + +""" +Custom op template - shows the 3-layer pattern we use for all custom ops. +""" + +import onnxscript +import torch +from torch import nn + +from QEfficient.utils import constants + +ops = getattr(onnxscript, "opset" + str(constants.ONNX_EXPORT_OPSET)) + + +# Layer 1: ONNX Script +# This is what the compiler sees when it compiles your model + + +@onnxscript.script(onnxscript.values.Opset(domain="com.qti.aisw.onnx", version=1)) +def CustomOpBluePrint(input: onnxscript.FLOAT): + """ + ONNX implementation of your operation. + Important: Only use tensor inputs - no strings or other types! + """ + sqrt_2 = ops.Constant(value_floats=[1.4142135623730951]) + half = ops.Constant(value_floats=[0.5]) + one = ops.Constant(value_floats=[1.0]) + + x_scaled = ops.Div(input, sqrt_2) + erf_x = ops.Erf(x_scaled) + result = ops.Mul(input, ops.Mul(half, ops.Add(one, erf_x))) + + return result + + +# Layer 2: PyTorch Autograd Function +# Connects PyTorch execution to ONNX export +# Pytorch forward function is called during PyTorch execution (CPU/GPU). +# When running on ONNX runtime, the CustomOpBluePrint function (Layer 1) is called instead. + + +class CustomOpBluePrintFunc(torch.autograd.Function): + @staticmethod + def forward(input: torch.Tensor, mode: str = "default"): + """PyTorch implementation - can use any PyTorch ops""" + if mode == "approximate": + return 0.5 * input * (1.0 + torch.tanh(0.7978845608028654 * (input + 0.044715 * input**3))) + else: + return input * 0.5 * (1.0 + torch.erf(input / 1.4142135623730951)) + + @staticmethod + def setup_context(ctx, inputs, outputs): + pass + + @staticmethod + def symbolic(g: torch.Graph, input: torch.Value, mode: str = "default") -> torch.Value: + """Called during ONNX export - don't pass string params!""" + return g.onnxscript_op(CustomOpBluePrint, input).setTypeAs(input) + + +# Layer 3: Module Wrapper +# What users actually interact with + + +class CustomOpBluePrintAIC(nn.Module): + def __init__(self, mode: str = "default"): + super().__init__() + if mode not in ["default", "approximate"]: + raise ValueError(f"mode must be 'default' or 'approximate', got {mode}") + self._mode_str = mode + + @property + def mode(self) -> str: + return self._mode_str if hasattr(self, "_mode_str") else "default" + + def forward(self, input: torch.Tensor) -> torch.Tensor: + return CustomOpBluePrintFunc.apply(input, self.mode) + + def extra_repr(self) -> str: + return f"mode={self.mode}" diff --git a/examples/onboarding_guide/customop/example_pytorch_transforms.py b/examples/onboarding_guide/customop/example_pytorch_transforms.py new file mode 100644 index 000000000..591890c52 --- /dev/null +++ b/examples/onboarding_guide/customop/example_pytorch_transforms.py @@ -0,0 +1,54 @@ +# ----------------------------------------------------------------------------- +# +# Copyright (c) Qualcomm Technologies, Inc. and/or its subsidiaries. +# SPDX-License-Identifier: BSD-3-Clause +# +# ----------------------------------------------------------------------------- + +""" +Example pytorch_transforms.py showing how to register custom operations. + +This file demonstrates how to add custom operation mappings to the transform +system so they are automatically applied when loading models. + +For the actual production transforms, see: +- QEfficient/transformers/models/pytorch_transforms.py +""" + +from transformers.activations import ( + NewGELUActivation, +) + +from QEfficient.base.pytorch_transforms import ModuleMappingTransform +from QEfficient.customop import CustomGELUAIC + + +class CustomOpsTransform(ModuleMappingTransform): + """ + Maps standard PyTorch operations to custom Cloud AI 100 implementations. + + How it works: + 1. When a model is loaded, this transform scans all modules + 2. For each module type in _module_mapping, it replaces the module + with the corresponding custom implementation + 3. The replacement happens automatically before ONNX export + """ + + _module_mapping = { + # ACTIVATION FUNCTIONS + # GELU + NewGELUActivation: CustomGELUAIC, + # TODO: Add other activation functions + # nn.SiLU: CustomSiLUAIC, + # nn.Mish: CustomMishAIC, + # NORMALIZATION LAYERS + # RMSNorm - Used by Llama, Mistral, Mixtral, etc. + # from transformers.models.llama.modeling_llama import LlamaRMSNorm + # LlamaRMSNorm: CustomRMSNormAIC, + # TODO: Add your model's normalization layers + # YourModelRMSNorm: CustomRMSNormAIC, + # OTHER OPERATIONS + # TODO: Add other custom operations + # nn.Linear: CustomLinearAIC, # If you have a custom linear layer + # nn.Embedding: CustomEmbeddingAIC, # If you have custom embeddings + } From 7c50f755a20407ba9568c048fe00666deb2baf5a Mon Sep 17 00:00:00 2001 From: Rishin Raj Date: Wed, 3 Dec 2025 10:29:33 +0530 Subject: [PATCH 37/60] Added torchvision (#650) Added torchvision 0.22.0 cpu version to environment Signed-off-by: Rishin Raj Co-authored-by: Hem Agnihotri Signed-off-by: Dhiraj Kumar Sah --- pyproject.toml | 2 ++ 1 file changed, 2 insertions(+) diff --git a/pyproject.toml b/pyproject.toml index ea3c3405d..8e179ab4a 100644 --- a/pyproject.toml +++ b/pyproject.toml @@ -44,6 +44,8 @@ dependencies = [ "torch@https://download.pytorch.org/whl/cpu/torch-2.4.1%2Bcpu-cp38-cp38-linux_x86_64.whl ; python_version=='3.8' and platform_machine=='x86_64'", "torch@https://download.pytorch.org/whl/cpu/torch-2.7.0%2Bcpu-cp39-cp39-manylinux_2_28_x86_64.whl ; python_version=='3.9' and platform_machine=='x86_64'", "torch@https://download.pytorch.org/whl/cpu/torch-2.7.0%2Bcpu-cp310-cp310-manylinux_2_28_x86_64.whl ; python_version=='3.10' and platform_machine=='x86_64'", + "torchvision@https://download.pytorch.org/whl/cpu/torchvision-0.22.0%2Bcpu-cp39-cp39-manylinux_2_28_x86_64.whl ; python_version=='3.9' and platform_machine=='x86_64'", + "torchvision@https://download.pytorch.org/whl/cpu/torchvision-0.22.0%2Bcpu-cp310-cp310-manylinux_2_28_x86_64.whl ; python_version=='3.10' and platform_machine=='x86_64'", ] [project.optional-dependencies] From 19d84987ef0c0797991bd1aface0cdc75364b5c8 Mon Sep 17 00:00:00 2001 From: smedhe Date: Thu, 4 Dec 2025 11:08:14 +0530 Subject: [PATCH 38/60] removed platform sdk dependency (#609) This PR updates QEff to support QPC generation on systems without the Platform SDK by refactoring the module loading behavior. Users can now compile models and generate QPCs using QEff with only the Apps SDK installed. Background: Previously, both Apps SDK and Platform SDK were required to compile and generate QPCs using QEff. The goal is to allow QPC generation with only the Apps SDK installed for systems without Ultra cards. Changes: Refactored init.py and generation/cloud_infer.py to use lazy loading via importlib for qaicrt and aicapi. This ensures that Platform SDK-dependent modules are only loaded when explicitly needed, avoiding import errors during initialization and QPC generation. Signed-off-by: Sharvari Medhe Co-authored-by: Hem Agnihotri Signed-off-by: Dhiraj Kumar Sah --- QEfficient/__init__.py | 69 +++++++++++++--------------- QEfficient/generation/cloud_infer.py | 62 ++++++++++++++++--------- 2 files changed, 72 insertions(+), 59 deletions(-) diff --git a/QEfficient/__init__.py b/QEfficient/__init__.py index 33c6f5588..7f63b34ca 100644 --- a/QEfficient/__init__.py +++ b/QEfficient/__init__.py @@ -9,19 +9,49 @@ import warnings import QEfficient.utils.model_registery # noqa: F401 +from QEfficient.base import ( + QEFFAutoModel, + QEFFAutoModelForCausalLM, + QEFFAutoModelForCTC, + QEFFAutoModelForImageTextToText, + QEFFAutoModelForSpeechSeq2Seq, + QEFFCommonLoader, +) +from QEfficient.compile.compile_helper import compile +from QEfficient.exporter.export_hf_to_cloud_ai_100 import qualcomm_efficient_converter +from QEfficient.generation.text_generation_inference import cloud_ai_100_exec_kv +from QEfficient.peft import QEffAutoPeftModelForCausalLM +from QEfficient.transformers.transform import transform from QEfficient.utils import custom_format_warning from QEfficient.utils.logging_utils import logger +# Users can use QEfficient.export for exporting models to ONNX +export = qualcomm_efficient_converter +__all__ = [ + "transform", + "export", + "compile", + "cloud_ai_100_exec_kv", + "QEFFAutoModel", + "QEFFAutoModelForCausalLM", + "QEFFAutoModelForCTC", + "QEffAutoPeftModelForCausalLM", + "QEFFAutoModelForImageTextToText", + "QEFFAutoModelForSpeechSeq2Seq", + "QEFFCommonLoader", +] # For faster downloads via hf_transfer # This code is put above import statements as this needs to be executed before # hf_transfer is imported (will happen on line 15 via leading imports) os.environ["HF_HUB_ENABLE_HF_TRANSFER"] = "1" # Placeholder for all non-transformer models registered in QEfficient - # custom warning for the better logging experience warnings.formatwarning = custom_format_warning +# Conditionally import QAIC-related modules if the SDK is installed +__version__ = "0.0.1.dev0" + def check_qaic_sdk(): """Check if QAIC SDK is installed""" @@ -37,40 +67,5 @@ def check_qaic_sdk(): return False -# Conditionally import QAIC-related modules if the SDK is installed -__version__ = "0.0.1.dev0" - -if check_qaic_sdk(): - from QEfficient.base import ( - QEFFAutoModel, - QEFFAutoModelForCausalLM, - QEFFAutoModelForCTC, - QEFFAutoModelForImageTextToText, - QEFFAutoModelForSpeechSeq2Seq, - QEFFCommonLoader, - ) - from QEfficient.compile.compile_helper import compile - from QEfficient.exporter.export_hf_to_cloud_ai_100 import qualcomm_efficient_converter - from QEfficient.generation.text_generation_inference import cloud_ai_100_exec_kv - from QEfficient.peft import QEffAutoPeftModelForCausalLM - from QEfficient.transformers.transform import transform - - # Users can use QEfficient.export for exporting models to ONNX - export = qualcomm_efficient_converter - - __all__ = [ - "transform", - "export", - "compile", - "cloud_ai_100_exec_kv", - "QEFFAutoModel", - "QEFFAutoModelForCausalLM", - "QEFFAutoModelForCTC", - "QEffAutoPeftModelForCausalLM", - "QEFFAutoModelForImageTextToText", - "QEFFAutoModelForSpeechSeq2Seq", - "QEFFCommonLoader", - ] - -else: +if not check_qaic_sdk(): logger.warning("QAIC SDK is not installed, eager mode features won't be available!") diff --git a/QEfficient/generation/cloud_infer.py b/QEfficient/generation/cloud_infer.py index 5068c174e..652a641e2 100644 --- a/QEfficient/generation/cloud_infer.py +++ b/QEfficient/generation/cloud_infer.py @@ -5,6 +5,8 @@ # # ----------------------------------------------------------------------------- +import platform +import sys from pathlib import Path from typing import Dict, List, Optional, Union from warnings import warn @@ -13,32 +15,29 @@ try: import qaicrt + + is_qaicrt_imported = True except ImportError: - import platform - import sys + try: + sys.path.append(f"/opt/qti-aic/dev/lib/{platform.machine()}") + import qaicrt - sys.path.append(f"/opt/qti-aic/dev/lib/{platform.machine()}") - import qaicrt + is_qaicrt_imported = True + except ImportError: + is_qaicrt_imported = False try: import QAicApi_pb2 as aicapi -except ImportError: - import sys - sys.path.append("/opt/qti-aic/dev/python") - import QAicApi_pb2 as aicapi + is_aicapi_imported = True +except ImportError: + try: + sys.path.append("/opt/qti-aic/dev/python") + import QAicApi_pb2 as aicapi -aic_to_np_dtype_mapping = { - aicapi.FLOAT_TYPE: np.dtype(np.float32), - aicapi.FLOAT_16_TYPE: np.dtype(np.float16), - aicapi.INT8_Q_TYPE: np.dtype(np.int8), - aicapi.UINT8_Q_TYPE: np.dtype(np.uint8), - aicapi.INT16_Q_TYPE: np.dtype(np.int16), - aicapi.INT32_Q_TYPE: np.dtype(np.int32), - aicapi.INT32_I_TYPE: np.dtype(np.int32), - aicapi.INT64_I_TYPE: np.dtype(np.int64), - aicapi.INT8_TYPE: np.dtype(np.int8), -} + is_aicapi_imported = True + except ImportError: + is_qaicrt_imported = False class QAICInferenceSession: @@ -58,6 +57,25 @@ def __init__( :activate: bool. If false, activation will be disabled. Default=True. :enable_debug_logs: bool. If True, It will enable debug logs. Default=False. """ + if not (is_qaicrt_imported and is_aicapi_imported): + raise ImportError( + "Unable to import `qaicrt` and/or `QAicApi_pb2` libraries required for executing QPC files on the CLOUD AI platform.\n" + "Please ensure that the QAIC platform SDK and apps SDK are installed correctly." + ) + + # Build dtype mapping once (depends on aicapi constants) + self.aic_to_np_dtype_mapping = { + aicapi.FLOAT_TYPE: np.dtype(np.float32), + aicapi.FLOAT_16_TYPE: np.dtype(np.float16), + aicapi.INT8_Q_TYPE: np.dtype(np.int8), + aicapi.UINT8_Q_TYPE: np.dtype(np.uint8), + aicapi.INT16_Q_TYPE: np.dtype(np.int16), + aicapi.INT32_Q_TYPE: np.dtype(np.int32), + aicapi.INT32_I_TYPE: np.dtype(np.int32), + aicapi.INT64_I_TYPE: np.dtype(np.int64), + aicapi.INT8_TYPE: np.dtype(np.int8), + } + # Load QPC if device_ids is not None: devices = qaicrt.QIDList(device_ids) @@ -77,7 +95,7 @@ def __init__( raise RuntimeError("Failed to getIoDescriptor") iodesc.ParseFromString(bytes(iodesc_data)) self.allowed_shapes = [ - [(aic_to_np_dtype_mapping[x.type].itemsize, list(x.dims)) for x in allowed_shape.shapes] + [(self.aic_to_np_dtype_mapping[x.type].itemsize, list(x.dims)) for x in allowed_shape.shapes] for allowed_shape in iodesc.allowed_shapes ] self.bindings = iodesc.selected_set.bindings @@ -97,7 +115,7 @@ def __init__( # Create input qbuffers and buf_dims self.qbuffers = [qaicrt.QBuffer(bytes(binding.size)) for binding in self.bindings] self.buf_dims = qaicrt.BufferDimensionsVecRef( - [(aic_to_np_dtype_mapping[binding.type].itemsize, list(binding.dims)) for binding in self.bindings] + [(self.aic_to_np_dtype_mapping[binding.type].itemsize, list(binding.dims)) for binding in self.bindings] ) @property @@ -205,6 +223,6 @@ def run(self, inputs: Dict[str, np.ndarray]) -> Dict[str, np.ndarray]: continue outputs[output_name] = np.frombuffer( bytes(output_qbuffers[buffer_index]), - aic_to_np_dtype_mapping[self.bindings[buffer_index].type], + self.aic_to_np_dtype_mapping[self.bindings[buffer_index].type], ).reshape(self.buf_dims[buffer_index][1]) return outputs From be976228d10764791dbee5f2d0ff03a6d3a273fa Mon Sep 17 00:00:00 2001 From: Abhishek Kumar Singh Date: Thu, 4 Dec 2025 14:08:41 +0530 Subject: [PATCH 39/60] Added memory and time optimization for onnx transforms (#640) ### Memory Optimization Added periodic memory cleanup to FP16ClipTransform and SplitTensorsTransform to reduce memory usage during large tensor processing. Also avoids redundant external data loading when already present. ### Time Optimized ONNX Transform via Class Merging and Thread Pooling It merges the FP16 and Split ONNX transform classes into a single implementation to eliminate redundant tensor loading and iteration. Additionally, the transform logic has been refactored to use a **thread pool**, replacing the previous sequential loop to parallelize tensor operations. #### Performance Benchmarks:- | Model | Original Duration (s) | Optimized Duration (s) | |----------------|------------------------|-------------------------| | LLaMA 3.1 8B | 88.35 | 58.55 | | LLaMA 3.1 70B | 1029.82 | 727.37 | > **Note:** Thread count is set to `os.cpu_count() * 4` to better handle I/O-bound workloads. Performance may vary depending on system hardware and threading capabilities. --------- Signed-off-by: abhishek-singh591 Signed-off-by: Dhiraj Kumar Sah --- QEfficient/base/modeling_qeff.py | 49 +-- QEfficient/base/onnx_transforms.py | 324 ++++++++++-------- QEfficient/exporter/export_utils.py | 5 +- QEfficient/peft/auto.py | 14 +- QEfficient/peft/onnx_transforms.py | 4 +- .../transformers/models/modeling_auto.py | 11 +- QEfficient/utils/_utils.py | 1 + QEfficient/utils/constants.py | 7 + tests/base/test_onnx_transforms.py | 16 +- 9 files changed, 255 insertions(+), 176 deletions(-) diff --git a/QEfficient/base/modeling_qeff.py b/QEfficient/base/modeling_qeff.py index 72f5c050e..ef7e83adf 100644 --- a/QEfficient/base/modeling_qeff.py +++ b/QEfficient/base/modeling_qeff.py @@ -19,7 +19,12 @@ import onnx import torch -from QEfficient.base.onnx_transforms import CustomOpTransform, OnnxTransform, RenameFunctionOutputsTransform +from QEfficient.base.onnx_transforms import ( + BaseOnnxTransform, + CustomOpTransform, + OnnxTransformPipeline, + RenameFunctionOutputsTransform, +) from QEfficient.base.pytorch_transforms import PytorchTransform from QEfficient.compile.qnn_compiler import compile as qnn_compile from QEfficient.generation.cloud_infer import QAICInferenceSession @@ -51,7 +56,7 @@ class QEFFBaseModel(ABC): """ _pytorch_transforms: List[PytorchTransform] - _onnx_transforms: List[OnnxTransform] + _onnx_transforms = [BaseOnnxTransform] @classmethod def _transform_names(cls) -> List[str]: @@ -82,26 +87,26 @@ def __init__(self, model: torch.nn.Module, **kwargs) -> None: else: logger.info(f"Pytorch transforms applied to model: {self.model_name}") - def _offload_model_weights(self, offload_pt_weights) -> bool: - """ - Clear PyTorch weights after export if offload_pt_weights is set to True - - Returns: - bool: True if weights were successfully offloaded, False otherwise - """ - # Check if offloading is enabled and weights are not already offloaded + def _offload_model_weights(self, offload_pt_weights: bool) -> bool: + """Clear PyTorch model weights to reduce memory usage after ONNX export.""" if offload_pt_weights and not self._is_weights_offloaded: try: - self.model = self.model.to_empty(device="meta") - self._is_weights_offloaded = True - logger.info("Model weights offloaded to meta device") - + for param in self.model.parameters(): + if param.storage(): + param.storage().resize_(0) + for buffer in self.model.buffers(): + if buffer.storage(): + buffer.storage().resize_(0) + + meta_model = self.model.to("meta") + del self.model gc.collect() - logger.info("PyTorch weights cleared after export") - return True + self.model = meta_model + self._is_weights_offloaded = True + return True except Exception as e: - logger.error(f"Failed to offload model weights: {e}") + logger.warning(f"Weight clearing failed, continuing: {e}") return False return False @@ -273,8 +278,9 @@ def _export( ) logger.info("PyTorch export successful") _ = self._offload_model_weights(offload_pt_weights) - model = onnx.load(tmp_onnx_path, load_external_data=False) + + # Clear temporary references transform_kwargs = { "onnx_base_dir": str(tmp_onnx_dir), "model_name": self.model_name, @@ -282,15 +288,18 @@ def _export( if onnx_transform_kwargs is not None: transform_kwargs.update(onnx_transform_kwargs) - for transform in self._onnx_transforms: - model, transformed = transform.apply(model, **transform_kwargs) + onnx_transforms = OnnxTransformPipeline(transforms=self._onnx_transforms) + model, transformed = onnx_transforms.apply(model, **transform_kwargs) + # Add metadata to the model model.metadata_props.append( onnx.StringStringEntryProto(key="qeff_transforms", value=",".join(self._transform_names())) ) logger.info("ONNX transforms applied") onnx.save(model, onnx_path) + del model + gc.collect() logger.info("Transformed ONNX saved") except Exception as e: diff --git a/QEfficient/base/onnx_transforms.py b/QEfficient/base/onnx_transforms.py index 1bc483eed..945850c50 100644 --- a/QEfficient/base/onnx_transforms.py +++ b/QEfficient/base/onnx_transforms.py @@ -5,11 +5,16 @@ # # ---------------------------------------------------------------------------- -from typing import Any, Dict, Optional, Tuple +import logging +import os +import warnings +from concurrent.futures import ThreadPoolExecutor, as_completed +from typing import Any, Dict, List, Optional, Tuple, Type import numpy as np +import onnx import torch -from onnx import ModelProto, external_data_helper, numpy_helper +from onnx import ModelProto, TensorProto, external_data_helper, numpy_helper from QEfficient.customop.ctx_scatter_gather import ( CtxGather, @@ -32,102 +37,53 @@ CtxScatterFuncCB3D, ) from QEfficient.customop.rms_norm import CustomRMSNorm, CustomRMSNormFunc +from QEfficient.utils.constants import FILE_CHUNK_SIZE_DEFAULT, ONNX_EXPORT_OPSET, SIZE_THRESHOLD_DEFAULT +logger = logging.getLogger(__name__) -class OnnxTransform: - """ - OnnxTransform is the base class for graph modifications on exported onnx. - """ + +class BaseOnnxTransform: + """Base class for ONNX graph modifications. Should NOT be instantiated.""" def __init__(self): - raise TypeError("Transform classes are not to be instantiated. Directly use the `apply` method.") + raise TypeError("Transform classes are not to be instantiated. Use the `apply` method directly.") @classmethod def apply(cls, model: ModelProto, **kwargs) -> Tuple[ModelProto, bool]: - """ - Override this class to apply a transformation. - :param model: The model's ONNX graph to transform - :param kwargs: Parameters needed for specific transforms. All transforms should take **kwargs to ignore unneeded kwargs. - - :returns: ONNX graph after applying the transform - :returns: Boolean indicating whether transform was applied - """ raise NotImplementedError("Use subclasses for ONNX transform") -class FP16ClipTransform(OnnxTransform): - """ - Clips the tensor values to be in FP16 range, but preserves -inf values. - """ +class FP16ClipTransform(BaseOnnxTransform): + """Clip FP32 tensors to FP16 range to avoid overflow during conversion.""" @classmethod - def apply(cls, model: ModelProto, *, onnx_base_dir: Optional[str] = None, **kwargs) -> Tuple[ModelProto, bool]: - """ - :param onnx_base_dir: Base directory to load tensors - """ - finfo = np.finfo(np.float16) - fp16_max = finfo.max - fp16_min = finfo.min - transformed = False + def apply(cls, tensor: TensorProto, onnx_base_dir: str, fp16_max: float, fp16_min: float) -> bool: + nptensor = numpy_helper.to_array(tensor, onnx_base_dir) + if nptensor.dtype == np.float32 and (np.any(nptensor > fp16_max) or np.any(nptensor < fp16_min)): + neg_inf_mask = np.isinf(nptensor) & (nptensor < 0) + clipped_tensor = np.clip(nptensor, fp16_min, fp16_max) - for tensor in external_data_helper._get_all_tensors(model): - nptensor = numpy_helper.to_array(tensor, onnx_base_dir) - if nptensor.dtype == np.float32 and (np.any(nptensor > fp16_max) or np.any(nptensor < fp16_min)): - neg_inf_mask = np.isinf(nptensor) & (nptensor < 0) - clipped_tensor = np.clip(nptensor, fp16_min, fp16_max) + if neg_inf_mask.any(): + clipped_tensor = np.where(neg_inf_mask, np.float32("-inf"), clipped_tensor) - # Restore -inf values - if neg_inf_mask.any(): - clipped_tensor = np.where(neg_inf_mask, np.float32("-inf"), clipped_tensor) + tensor.CopyFrom(numpy_helper.from_array(clipped_tensor, tensor.name)) + return True + return False - new_tensor = numpy_helper.from_array(clipped_tensor, tensor.name) - tensor.CopyFrom(new_tensor) - transformed = True - return model, transformed - - -class SplitTensorsTransform(OnnxTransform): - """ - Split external tensors file - """ +class SplitTensorsTransform(BaseOnnxTransform): + """Split large tensors into external data files for efficient storage.""" @classmethod def apply( - cls, - model: ModelProto, - *, - model_name: str, - onnx_base_dir: Optional[str] = None, - file_chunk_size: int = 10 * 2**30, # 10 GiB - size_threshold: int = 1024, - **kwargs, - ) -> Tuple[ModelProto, bool]: - """ - :param model_name: Used for naming external files. i.e. {model_name}_0.onnx.data - :param onnx_base_dir: Base directory to load tensors (if not already loaded). - :param file_chunk_size: Chunk size to split external files into. - :param size_threshold: Only tensors greater than this threshold (in bytes) will be saved externally. - """ - file_num = 0 - current_file_size = 0 - transformed = False - external_data_helper.load_external_data_for_model(model, onnx_base_dir) - for tensor in external_data_helper._get_all_tensors(model): - if tensor.HasField("raw_data") and ((tsize := len(tensor.raw_data)) > size_threshold): - transformed = True - current_file_size += tsize - if current_file_size > file_chunk_size: - file_num += 1 - current_file_size = tsize - external_data_helper.set_external_data(tensor, f"{model_name}_{file_num}.onnx.data") - return model, transformed + cls, tensor: TensorProto, model_name: str, file_num: int, mapping: Dict[str, Tuple[TensorProto, str]] + ) -> None: + file_name = f"{model_name}_{file_num}.onnx.data" + mapping[tensor.name] = (tensor, file_name) -class CustomOpTransform(OnnxTransform): - """ - Transform to register custom operations and add their function protos to the ONNX model. - """ +class CustomOpTransform(BaseOnnxTransform): + """Register custom ONNX ops and append their function prototypes to the model.""" _custom_ops: Dict[str, Tuple[Any, Any]] = { "CustomRMSNormFunc": (CustomRMSNormFunc, CustomRMSNorm), @@ -142,80 +98,174 @@ class CustomOpTransform(OnnxTransform): } @classmethod - def register_custom_op(cls, op_name: str, func_class: Any, onnxscript_func: Any) -> None: - """Register a custom operation.""" - cls._custom_ops[op_name] = (func_class, onnxscript_func) - - @classmethod - def apply(cls, model: ModelProto, *, opset_version: int = 17, **kwargs) -> Tuple[ModelProto, bool]: - """ - Apply custom op registration and add all function protos to the model. - - :param model: The ONNX model to transform. - :param opset_version: ONNX opset version for symbolic registration. - :returns: (Transformed model, success flag). - """ - transformed = False - - # Register all custom op symbolic functions with torch.onnx + def apply(cls, model: ModelProto) -> bool: + op_applied = False for op_name, (func_class, _) in cls._custom_ops.items(): if hasattr(func_class, "symbolic"): - torch.onnx.register_custom_op_symbolic(f"::{op_name}", func_class.symbolic, opset_version) - - func_names = {func.name for func in model.functions} + torch.onnx.register_custom_op_symbolic(f"::{op_name}", func_class.symbolic, ONNX_EXPORT_OPSET) + existing = {f.name for f in model.functions} for _, onnxscript_func in cls._custom_ops.values(): proto = onnxscript_func.to_function_proto() - if proto.name not in func_names: + if proto.name not in existing: model.functions.append(proto) - transformed = True - - return model, transformed + op_applied = True + return op_applied -class RenameFunctionOutputsTransform(OnnxTransform): - """ - Renames function outputs in decoder layers by removing 'Internal' from '_InternalRetainedState' patterns. - """ +class RenameFunctionOutputsTransform(BaseOnnxTransform): + """Rename outputs of decoder-related functions for better clarity.""" @classmethod - def apply(cls, model: ModelProto, **kwargs) -> Tuple[ModelProto, bool]: - """ - Rename function outputs in decoder layer nodes. - - :param model: The ONNX model to transform - :returns: Transformed model and boolean indicating whether transform was applied - """ + def apply(cls, model: ModelProto) -> bool: graph = model.graph - op_type_to_func_map = {func.name: func for func in model.functions} - decoder_layer_patterns = ["DecoderLayer", "Block", "Layer"] - transformed = False + op_type_to_func = {f.name: f for f in model.functions} + decoder_patterns = ["DecoderLayer", "Block", "Layer"] + renamed = False + model_out_map = {v.name: i for i, v in enumerate(graph.output)} + layer_idx = 0 - # Create a dict mapping output name to its index for quick lookup - model_graph_outputs_map = {val.name: idx for idx, val in enumerate(model.graph.output)} - - layer_index = 0 for node in graph.node: - if any(pattern in node.name or pattern in node.op_type for pattern in decoder_layer_patterns): - func = op_type_to_func_map.get(node.op_type) - if func is None: + if any(p in node.name or p in node.op_type for p in decoder_patterns): + func = op_type_to_func.get(node.op_type) + if not func: continue - for i, out_name in enumerate(func.output): if "_InternalRetainedState" in out_name: - transformed = True - original_output_name = node.output[i] - - # Generate new name based on key/value - if "key" in out_name: - new_name = f"past_key.{layer_index}_RetainedState" - elif "value" in out_name: - new_name = f"past_value.{layer_index}_RetainedState" - node.output[i] = new_name - - # Update graph output name if it exists - if original_output_name in model_graph_outputs_map: - idx = model_graph_outputs_map[original_output_name] - model.graph.output[idx].name = new_name - layer_index += 1 + renamed = True + orig = node.output[i] + new = ( + f"past_key.{layer_idx}_RetainedState" + if "key" in out_name + else f"past_value.{layer_idx}_RetainedState" + if "value" in out_name + else orig + ) + node.output[i] = new + if orig in model_out_map: + graph.output[model_out_map[orig]].name = new + layer_idx += 1 + return renamed + + +class AdapterWeightsToInputsTransform(BaseOnnxTransform): + @classmethod + def apply(cls, model: onnx.ModelProto, *, adapter_name: str, **kwargs) -> Tuple[onnx.ModelProto, bool]: + transformed = False + removed_initializers = [] + + # Find nodes with lora weights as inputs + weight_suffix = f".{adapter_name}.weight" + lora_weight_nodes = { + inp: node for node in model.graph.node for inp in node.input if inp.endswith(weight_suffix) + } + + for i, weight in enumerate(model.graph.initializer): + if weight.name.endswith(weight_suffix): + transformed = True + + # Create input/output for lora weights + new_weight_name = weight.name[: -len(weight_suffix)] + ".weight" + type_proto = onnx.helper.make_tensor_type_proto(weight.data_type, shape=list(weight.dims)) + inp = onnx.ValueInfoProto(name=new_weight_name, type=type_proto) + out = onnx.ValueInfoProto(name=new_weight_name + "_RetainedState", type=type_proto) + model.graph.input.append(inp) + model.graph.output.append(out) + + # Create a node that connects input -> output + node = onnx.helper.make_node("Identity", [inp.name], [out.name], new_weight_name + "_identity") + model.graph.node.append(node) + + # Rename weight input + lora_weight_node = lora_weight_nodes[weight.name] + for j, inp in enumerate(lora_weight_node.input): + if inp == weight.name: + lora_weight_node.input[j] = new_weight_name + + # Remove weight initializers + removed_initializers.append(i) + + if transformed: + for i in sorted(removed_initializers, reverse=True): + model.graph.initializer.pop(i) + return model, transformed + + +class OnnxTransformPipeline(BaseOnnxTransform): + """Pipeline to apply multiple ONNX transformations in sequence.""" + + def __init__(self, transforms: List[Type[BaseOnnxTransform]]): + if not transforms: + warnings.warn("Transform list is empty. No transformations will be applied.") + self.transforms = transforms + + def apply( + self, + model: ModelProto, + *, + model_name: str = "", + onnx_base_dir: Optional[str] = None, + file_chunk_size: int = FILE_CHUNK_SIZE_DEFAULT, + size_threshold: int = SIZE_THRESHOLD_DEFAULT, + **kwargs, + ) -> Tuple[ModelProto, bool]: + if not self.transforms: + return model, False + + # Same logic as before, but replace `transforms` with `self.transforms` + mapping: Dict[str, Tuple[TensorProto, str]] = {} + requested = set(self.transforms) + applied = {t: False for t in requested} + f16_applied = False + do_fp16 = FP16ClipTransform in requested + do_split = SplitTensorsTransform in requested + fp16_min, fp16_max = np.finfo(np.float16).min, np.finfo(np.float16).max + file_num_tracker = {"num": 0, "size": 0} + external_data_helper.load_external_data_for_model(model, onnx_base_dir) + + if do_fp16 or do_split: + for tensor in external_data_helper._get_all_tensors(model): + if do_fp16 and FP16ClipTransform.apply(tensor, onnx_base_dir, fp16_max, fp16_min): + f16_applied = True + applied[FP16ClipTransform] = f16_applied + + if do_split and tensor.HasField("raw_data"): + tsize = len(tensor.raw_data) + if tsize > size_threshold: + if file_num_tracker["size"] + tsize > file_chunk_size: + file_num_tracker["num"] += 1 + file_num_tracker["size"] = tsize + else: + file_num_tracker["size"] += tsize + applied[SplitTensorsTransform] = True + SplitTensorsTransform.apply(tensor, model_name, file_num_tracker["num"], mapping) + + def _set_external_data(tensor, file_name): + external_data_helper.set_external_data(tensor, file_name) + + max_workers = min(32, (os.cpu_count() or 1) * 4) + logger.info(f"Applying external data mapping with {max_workers} threads") + + with ThreadPoolExecutor(max_workers=max_workers) as executor: + futures = [executor.submit(_set_external_data, tensor, file_name) for tensor, file_name in mapping.values()] + for future in as_completed(futures): + try: + future.result() + except Exception as e: + logger.error(f"Failed to set external data: {e}") + + # Non-looping transforms + if CustomOpTransform in requested: + applied[CustomOpTransform] = CustomOpTransform.apply(model) + + if RenameFunctionOutputsTransform in requested: + applied[RenameFunctionOutputsTransform] = RenameFunctionOutputsTransform.apply(model) + + if AdapterWeightsToInputsTransform in requested: + applied[AdapterWeightsToInputsTransform] = AdapterWeightsToInputsTransform.apply(model, **kwargs) + + for t, done in applied.items(): + logger.info(f"Transform '{t.__name__}' applied={done}") + + return model, any(applied.values()) diff --git a/QEfficient/exporter/export_utils.py b/QEfficient/exporter/export_utils.py index eec756e4b..fac2441c8 100644 --- a/QEfficient/exporter/export_utils.py +++ b/QEfficient/exporter/export_utils.py @@ -17,7 +17,7 @@ import torch from onnx import external_data_helper -from QEfficient.base.onnx_transforms import FP16ClipTransform +from QEfficient.base.onnx_transforms import FP16ClipTransform, OnnxTransformPipeline from QEfficient.utils import constants @@ -219,7 +219,8 @@ def fix_onnx_fp16( :str: Updated base name of exported ONNX model. """ model = onnx.load(os.path.join(gen_models_path, f"{model_base_name}.onnx")) - model, fp16_fix = FP16ClipTransform.apply(model, onnx_base_dir=gen_models_path) + onnx_transforms = OnnxTransformPipeline(transforms=[FP16ClipTransform]) + model, fp16_fix = onnx_transforms.apply(model, model_name="", onnx_base_dir=gen_models_path) if fp16_fix: # Save FP16 model diff --git a/QEfficient/peft/auto.py b/QEfficient/peft/auto.py index 99d64cc2f..e69aebb2b 100644 --- a/QEfficient/peft/auto.py +++ b/QEfficient/peft/auto.py @@ -18,11 +18,15 @@ from transformers.generation.streamers import BaseStreamer from QEfficient.base.modeling_qeff import QEFFBaseModel -from QEfficient.base.onnx_transforms import FP16ClipTransform, OnnxTransform, SplitTensorsTransform +from QEfficient.base.onnx_transforms import ( + AdapterWeightsToInputsTransform, + BaseOnnxTransform, + FP16ClipTransform, + SplitTensorsTransform, +) from QEfficient.base.pytorch_transforms import PytorchTransform from QEfficient.generation.cloud_infer import QAICInferenceSession from QEfficient.peft.lora import QEffAutoLoraModelForCausalLM -from QEfficient.peft.onnx_transforms import AdapterWeightsToInputsTransform from QEfficient.peft.pytorch_transforms import PeftModelInputsTransform from QEfficient.transformers.models.pytorch_transforms import CustomOpsTransform, KVCacheTransform from QEfficient.utils import constants @@ -66,7 +70,11 @@ class QEffAutoPeftModelForCausalLM(QEFFBaseModel): """ _pytorch_transforms: List[PytorchTransform] = [CustomOpsTransform, KVCacheTransform, PeftModelInputsTransform] - _onnx_transforms: List[OnnxTransform] = [FP16ClipTransform, AdapterWeightsToInputsTransform, SplitTensorsTransform] + _onnx_transforms: List[BaseOnnxTransform] = [ + FP16ClipTransform, + AdapterWeightsToInputsTransform, + SplitTensorsTransform, + ] _hf_auto_class = AutoPeftModelForCausalLM def __init__(self, model: nn.Module): diff --git a/QEfficient/peft/onnx_transforms.py b/QEfficient/peft/onnx_transforms.py index d31d35243..c949e028b 100644 --- a/QEfficient/peft/onnx_transforms.py +++ b/QEfficient/peft/onnx_transforms.py @@ -9,10 +9,10 @@ import onnx -from QEfficient.base.onnx_transforms import OnnxTransform +from QEfficient.base.onnx_transforms import BaseOnnxTransform -class AdapterWeightsToInputsTransform(OnnxTransform): +class AdapterWeightsToInputsTransform(BaseOnnxTransform): @classmethod def apply(cls, model: onnx.ModelProto, *, adapter_name: str, **kwargs) -> Tuple[onnx.ModelProto, bool]: transformed = False diff --git a/QEfficient/transformers/models/modeling_auto.py b/QEfficient/transformers/models/modeling_auto.py index cbff5be91..829593b0a 100644 --- a/QEfficient/transformers/models/modeling_auto.py +++ b/QEfficient/transformers/models/modeling_auto.py @@ -27,10 +27,7 @@ import QEfficient from QEfficient.base.modeling_qeff import QEFFBaseModel -from QEfficient.base.onnx_transforms import ( - FP16ClipTransform, - SplitTensorsTransform, -) +from QEfficient.base.onnx_transforms import FP16ClipTransform, SplitTensorsTransform from QEfficient.base.pytorch_transforms import SplitGateUpWeightsTransform from QEfficient.generation.cloud_infer import QAICInferenceSession from QEfficient.generation.text_generation_inference import ( @@ -2307,10 +2304,8 @@ class QEFFAutoModelForCausalLM(QEFFBaseModel): SplitGateUpWeightsTransform, KVCacheExternalModuleMapperTransform, ] - _onnx_transforms = [ - FP16ClipTransform, - SplitTensorsTransform, - ] + + _onnx_transforms = [FP16ClipTransform, SplitTensorsTransform] def __init__( self, diff --git a/QEfficient/utils/_utils.py b/QEfficient/utils/_utils.py index 1fb0311eb..131a7fc26 100644 --- a/QEfficient/utils/_utils.py +++ b/QEfficient/utils/_utils.py @@ -568,6 +568,7 @@ def wrapper(self, *args, **kwargs): onnx_transform_kwargs=all_args.get("onnx_transform_kwargs", None), use_onnx_subfunctions=all_args.get("use_onnx_subfunctions", False), ) + export_dir = export_dir.with_name(export_dir.name + "-" + export_hash) kwargs["export_dir"] = export_dir self.export_hash = export_hash diff --git a/QEfficient/utils/constants.py b/QEfficient/utils/constants.py index 1504bdae5..d5e632584 100644 --- a/QEfficient/utils/constants.py +++ b/QEfficient/utils/constants.py @@ -84,9 +84,13 @@ def get_models_dir(): ONNX_EXPORT_EXAMPLE_TOP_PS = 0.80 ONNX_EXPORT_EXAMPLE_MIN_PS = 0.99 ONNX_EXPORT_OPSET = 17 +FILE_CHUNK_SIZE_DEFAULT = 10 * 2**30 # 10 GB +SIZE_THRESHOLD_DEFAULT = 1024 + COMPILER = ["/opt/qti-aic/exec/qaic-exec", "-aic-hw"] DEFAULT_AIC_HW_VERSION = "ai100" +ONNX_TRANSFORM_MEMORY_CLEANUP_INTERVAL = 100 # InternVL constants # Fixing the feature size with reference to OpenGVLab/InternVL2_5-1B, OpenGVLab/InternVL2_5-38B and OpenGVLab/InternVL2_5-78B @@ -128,6 +132,9 @@ def get_models_dir(): QWEN2_5_VL_HEIGHT = 354 QWEN2_5_VL_WIDTH = 536 +# Modules to cache while clearing the pytorch weights +CACHE_MODULES = ["get_output_names", "get_dummy_inputs", "get_onnx_dynamic_axes", "get_specializations"] + class Constants: # Export Constants. diff --git a/tests/base/test_onnx_transforms.py b/tests/base/test_onnx_transforms.py index 7e3ec066e..25a3b15d9 100644 --- a/tests/base/test_onnx_transforms.py +++ b/tests/base/test_onnx_transforms.py @@ -8,7 +8,11 @@ import numpy as np import onnx -from QEfficient.base.onnx_transforms import FP16ClipTransform, SplitTensorsTransform +from QEfficient.base.onnx_transforms import ( + FP16ClipTransform, + OnnxTransformPipeline, + SplitTensorsTransform, +) def test_fp16clip_transform(): @@ -32,7 +36,9 @@ def test_fp16clip_transform(): } """) onnx.checker.check_model(test_onnx, True, True, True) - transformed_onnx, transformed = FP16ClipTransform.apply(test_onnx) + + onnx_transforms = OnnxTransformPipeline(transforms=[FP16ClipTransform]) + transformed_onnx, transformed = onnx_transforms.apply(test_onnx, model_name="") assert transformed assert onnx.numpy_helper.to_array(transformed_onnx.graph.initializer[0]) == 65504.0 assert onnx.numpy_helper.to_array(transformed_onnx.graph.initializer[1]) == 2147483647 @@ -63,7 +69,8 @@ def test_fp16clip_transform_external(tmp_path): np.array(-1e10, dtype="float32").tofile(tmp_path / external_tensors_file) onnx.checker.check_model(onnx_path, True, True, True) - transformed_onnx, transformed = FP16ClipTransform.apply(test_onnx, onnx_base_dir=str(tmp_path)) + onnx_transforms = OnnxTransformPipeline(transforms=[FP16ClipTransform]) + transformed_onnx, transformed = onnx_transforms.apply(test_onnx, model_name="", onnx_base_dir=str(tmp_path)) assert transformed assert onnx.numpy_helper.to_array(transformed_onnx.graph.initializer[0]) == -65504.0 @@ -92,7 +99,8 @@ def test_split_tensors_transform(tmp_path): tensors.tofile(tmp_path / external_tensors_file) onnx.checker.check_model(onnx_path, True, True, True) - trans_onnx, transformed = SplitTensorsTransform.apply( + onnx_transforms = OnnxTransformPipeline(transforms=[SplitTensorsTransform]) + trans_onnx, transformed = onnx_transforms.apply( test_onnx, model_name="test_split", onnx_base_dir=str(tmp_path), From f7b33b38b52ff221c4395661ac74c16cd9bff9f9 Mon Sep 17 00:00:00 2001 From: vaibverm Date: Thu, 4 Dec 2025 03:04:05 -0800 Subject: [PATCH 40/60] Adding support for BlockedKV attention in CasualLM models (#618) ### Objective: This PR introduces the KV blocking technique for CausalLM models where the K/V cache is read and processed block by block in the attention computation. Number of desired KV blocks are defined at model initialization in the "from_pretrained" call to export the ONNX with required number of KV blocks. As a result, the following changes are introduced: ### Changes: 1. SoftMax needs to be changed from regular SoftMax to online SoftMax where the running maximum and cumulative denominators are tracked and updated once each block is processed to retain mathematical accuracy compared to regular SoftMax. 2. Changes to CTXGather and CTXGatherCB custom ops to read only 1 block worth of data in each cache gather/read. 3. Changes to read_only function in QEffDynamicCache to allow reading of a cache block by block rather than full K/V cache. 4. Generation of attention mask per block. 5. Changes to eager_attention_forward implementation in the llama model to allow BlockedKV attention and online SoftMax implementation. 6. Wrapping the num_kv_blocks variable inside qaic_config to keep consistent calling style. 7. A new PyTorch transform to pass the num_kv_blocks variable to QEffLlamaAttention block. 8. A new constant added for num_kv_blocks. 9. Added tests to switch the BlockedKV feature on and off. Please review and feel free to suggest changes and tests. --------- Signed-off-by: Vaibhav Verma Co-authored-by: Hem Agnihotri Signed-off-by: Dhiraj Kumar Sah --- QEfficient/customop/__init__.py | 11 +- QEfficient/customop/ctx_scatter_gather.py | 26 ++++ QEfficient/customop/ctx_scatter_gather_cb.py | 38 +++++ QEfficient/transformers/cache_utils.py | 65 +++++++++ .../transformers/modeling_attn_mask_utils.py | 3 +- .../models/llama/modeling_llama.py | 108 +++++++++++++-- .../transformers/models/modeling_auto.py | 23 +++- .../transformers/models/pytorch_transforms.py | 21 +++ .../models/qwen2_5_vl/modeling_qwen2_5_vl.py | 130 ++++++++++++++++-- QEfficient/utils/constants.py | 1 + .../models/test_causal_lm_models.py | 41 +++++- 11 files changed, 441 insertions(+), 26 deletions(-) diff --git a/QEfficient/customop/__init__.py b/QEfficient/customop/__init__.py index ff0709f82..35830aa91 100644 --- a/QEfficient/customop/__init__.py +++ b/QEfficient/customop/__init__.py @@ -5,8 +5,15 @@ # # ----------------------------------------------------------------------------- -from QEfficient.customop.ctx_scatter_gather import CtxGatherFunc, CtxGatherFunc3D, CtxScatterFunc, CtxScatterFunc3D +from QEfficient.customop.ctx_scatter_gather import ( + CtxGatherFunc, + CtxGatherFunc3D, + CtxGatherFuncBlockedKV, + CtxScatterFunc, + CtxScatterFunc3D, +) from QEfficient.customop.ctx_scatter_gather_cb import ( + CtxGatherFuncBlockedKVCB, CtxGatherFuncCB, CtxGatherFuncCB3D, CtxScatterFuncCB, @@ -16,12 +23,14 @@ __all__ = [ "CtxGatherFunc", + "CtxGatherFuncBlockedKV", "CtxScatterFunc", "CtxGatherFunc3D", "CtxScatterFunc3D", "CustomRMSNormAIC", "GemmaCustomRMSNormAIC", "CtxGatherFuncCB", + "CtxGatherFuncBlockedKVCB", "CtxScatterFuncCB", "CtxGatherFuncCB3D", "CtxScatterFuncCB3D", diff --git a/QEfficient/customop/ctx_scatter_gather.py b/QEfficient/customop/ctx_scatter_gather.py index 269ccb0be..c7dc8639a 100644 --- a/QEfficient/customop/ctx_scatter_gather.py +++ b/QEfficient/customop/ctx_scatter_gather.py @@ -145,3 +145,29 @@ def setup_context(ctx, inputs, outputs): @staticmethod def symbolic(g: torch.Graph, data: torch.Value, ctx_indices: torch.Value, comp_ctx_len: int) -> torch.Value: return g.onnxscript_op(CtxGather, data, ctx_indices, comp_ctx_len).setTypeAs(data) + + +@onnxscript.script(onnxscript.values.Opset("com.qualcomm.cloud", 1)) +def CtxGatherBlockedKV(data: onnxscript.FLOAT, ctx_indices: onnxscript.INT32) -> onnxscript.FLOAT: + ctx_indices = ops.Unsqueeze(ctx_indices, [-1]) + return ops.GatherND(data, ctx_indices, batch_dims=2) + + +class CtxGatherFuncBlockedKV(torch.autograd.Function): + """ + Function to gather only the valid key values from KV-cache. + """ + + @staticmethod + def forward(data: torch.Tensor, ctx_indices: torch.Tensor): + batch_indices = torch.arange(data.shape[0]).view(-1, 1, 1) + head_indices = torch.arange(data.shape[1]).view(1, -1, 1) + return data[batch_indices, head_indices, ctx_indices] + + @staticmethod + def setup_context(ctx, inputs, outputs): + pass + + @staticmethod + def symbolic(g: torch.Graph, data: torch.Value, ctx_indices: torch.Value) -> torch.Value: + return g.onnxscript_op(CtxGatherBlockedKV, data, ctx_indices).setTypeAs(data) diff --git a/QEfficient/customop/ctx_scatter_gather_cb.py b/QEfficient/customop/ctx_scatter_gather_cb.py index cc9693716..8a06bc2b1 100644 --- a/QEfficient/customop/ctx_scatter_gather_cb.py +++ b/QEfficient/customop/ctx_scatter_gather_cb.py @@ -139,6 +139,44 @@ def symbolic( return g.onnxscript_op(CtxGatherCB, data, batch_index, ctx_indices, comp_ctx_len).setTypeAs(data) +@onnxscript.script(onnxscript.values.Opset("com.qualcomm.cloud", 1)) +def CtxGatherBlockedKVCB( + data: onnxscript.FLOAT, batch_index: onnxscript.INT32, ctx_indices: onnxscript.INT32 +) -> onnxscript.FLOAT: + batch_size = ops.Gather(ops.Shape(batch_index), [0]) + num_heads = ops.Gather(ops.Shape(data), [1]) + ctx_len = ops.Gather(ops.Shape(ctx_indices), [2]) + + # Expanded shape to create indices + zero = ops.Constant(value_ints=[0]) + one = ops.Constant(value_ints=[1]) + exp_shape = ops.Concat(batch_size, num_heads, ctx_len, one, axis=0) + + # Create indices + batch_idx = ops.Expand(ops.Unsqueeze(batch_index, [2, 3]), exp_shape) + head_idx = ops.Expand(ops.Unsqueeze(ops.Range(zero, num_heads, one), [0, 2, 3]), exp_shape) + ctx_idx = ops.Expand(ops.Unsqueeze(ctx_indices, [3]), exp_shape) + indices = ops.Concat(batch_idx, head_idx, ctx_idx, axis=3) + + return ops.GatherND(data, indices) + + +class CtxGatherFuncBlockedKVCB(torch.autograd.Function): + @staticmethod + def forward(data: torch.Tensor, batch_index: torch.Tensor, ctx_indices: torch.Tensor): + batch_indices = batch_index.view(-1, 1, 1) + head_indices = torch.arange(data.shape[1]).view(1, -1, 1) + return data[batch_indices, head_indices, ctx_indices] + + @staticmethod + def setup_context(ctx, inputs, outputs): + pass + + @staticmethod + def symbolic(g: torch.Graph, data: torch.Value, batch_index: torch.Value, ctx_indices: torch.Value) -> torch.Value: + return g.onnxscript_op(CtxGatherBlockedKVCB, data, batch_index, ctx_indices).setTypeAs(data) + + @onnxscript.script(onnxscript.values.Opset("com.qualcomm.cloud", 1)) def CtxGatherCB3D( data: onnxscript.FLOAT, batch_index: onnxscript.INT32, ctx_indices: onnxscript.INT32 diff --git a/QEfficient/transformers/cache_utils.py b/QEfficient/transformers/cache_utils.py index 292fe0487..62cc71a4c 100644 --- a/QEfficient/transformers/cache_utils.py +++ b/QEfficient/transformers/cache_utils.py @@ -15,6 +15,8 @@ from QEfficient.customop import ( CtxGatherFunc, CtxGatherFunc3D, + CtxGatherFuncBlockedKV, + CtxGatherFuncBlockedKVCB, CtxGatherFuncCB, CtxGatherFuncCB3D, CtxScatterFunc, @@ -87,6 +89,50 @@ def read_only(self, cache_kwargs): v_out = torch.where(invalid_mask.unsqueeze(-1), torch.tensor(0.0, dtype=torch.float32), v_out) return k_out, v_out + def read_only_blockedKV(self, start_index, end_index, cache_kwargs): + """ + Reads the `key_states` and `value_states` for the layer for each KV block. + + Parameters: + cache_kwargs (`Dict[str, Any]`, `optional`): + Additional arguments for the cache subclass. No additional arguments are used in `DynamicCache`. + + start_index (`int`): + Start index of the K/V block to read + + end_index (`int`): + End index of the K/V block to read + + Return: + A tuple containing the updated key and value states. + """ + # Gather + k_out, v_out = self.keys, self.values + position_ids = cache_kwargs.get("position_ids") + batch_index = cache_kwargs.get("batch_index", None) + batch, num_kv_heads, _, _ = k_out.shape + ctx_indices = torch.arange(start=start_index, end=end_index)[None, None, ...] + gather_limit = position_ids.max(1, keepdim=True).values.unsqueeze(1) + invalid_mask = ctx_indices > gather_limit + + if torch.onnx.is_in_onnx_export(): + invalid_idx_value = torch.iinfo(torch.int32).max + else: + invalid_idx_value = 0 + + ctx_indices = torch.where(invalid_mask, invalid_idx_value, ctx_indices) + + if batch_index is not None: + k_out = CtxGatherFuncBlockedKVCB.apply(k_out, batch_index, ctx_indices) + v_out = CtxGatherFuncBlockedKVCB.apply(v_out, batch_index, ctx_indices) + else: + ctx_indices = ctx_indices.expand(batch, num_kv_heads, ctx_indices.shape[-1]) + k_out = CtxGatherFuncBlockedKV.apply(k_out, ctx_indices) + v_out = CtxGatherFuncBlockedKV.apply(v_out, ctx_indices) + + v_out = torch.where(invalid_mask.unsqueeze(-1), torch.tensor(0.0, dtype=torch.float32), v_out) + return k_out, v_out + def write_only(self, key_states, value_states, cache_kwargs): """ Write in the cache with the new `key_states` and `value_states` for the layer. @@ -284,6 +330,25 @@ def read_only(self, layer_idx, cache_kwargs): """ return self.layers[layer_idx].read_only(cache_kwargs) + def read_only_blockedKV(self, start_index, end_index, layer_idx, cache_kwargs): + """ + Reads the `key_states` and `value_states` for the layer `layer_idx`. + + Parameters: + start_index (`int`): + Start index of the K/V block to read + end_index (`int`): + End index of the K/V block to read + layer_idx (`int`): + The index of the layer to cache the states for. + cache_kwargs (`Dict[str, Any]`, `optional`): + Additional arguments for the cache subclass. No additional arguments are used in `DynamicCache`. + + Return: + A tuple containing the updated key and value states. + """ + return self.layers[layer_idx].read_only_blockedKV(start_index, end_index, cache_kwargs) + def write_only(self, key_states, value_states, layer_idx, cache_kwargs): """ Write in the cache with the new `key_states` and `value_states` for the layer `layer_idx`. diff --git a/QEfficient/transformers/modeling_attn_mask_utils.py b/QEfficient/transformers/modeling_attn_mask_utils.py index 4faedba33..629c10dd6 100644 --- a/QEfficient/transformers/modeling_attn_mask_utils.py +++ b/QEfficient/transformers/modeling_attn_mask_utils.py @@ -14,6 +14,7 @@ def _create_causal_mask( position_ids, target_length, sliding_window: Optional[int] = None, + start_index: Optional[int] = 0, ): """ A utility attention mask class that allows one to: @@ -40,7 +41,7 @@ def _create_causal_mask( attention_mask = attention_mask.unsqueeze(1) else: query_indices = position_ids.unsqueeze(-1) - kv_indices = torch.arange(target_length).view(1, 1, -1) + kv_indices = torch.arange(start=start_index, end=target_length).view(1, 1, -1) attention_mask = kv_indices > query_indices attention_mask = attention_mask.unsqueeze(1) diff --git a/QEfficient/transformers/models/llama/modeling_llama.py b/QEfficient/transformers/models/llama/modeling_llama.py index 73b947dba..fb3aed556 100644 --- a/QEfficient/transformers/models/llama/modeling_llama.py +++ b/QEfficient/transformers/models/llama/modeling_llama.py @@ -5,7 +5,7 @@ # # ----------------------------------------------------------------------------- -from typing import List, Optional, Tuple, Union +from typing import Any, Dict, List, Optional, Tuple, Union import torch from torch import nn @@ -113,6 +113,7 @@ def eager_attention_forward( attn_weights = torch.where( attention_mask, torch.tensor(MIN_MASKED_ATTENTION_VALUE, dtype=torch.float32), attn_weights ) + attn_weights = nn.functional.softmax(attn_weights, dim=-1, dtype=torch.float32).to(query.dtype) attn_output = torch.matmul(attn_weights, value_states) attn_output = attn_output.transpose(1, 2).contiguous() @@ -120,6 +121,80 @@ def eager_attention_forward( return attn_output, attn_weights +def eager_attention_forward_blockedKV( + module: nn.Module, + query: torch.Tensor, + key: torch.Tensor, + value: torch.Tensor, + attention_mask: Optional[torch.Tensor], + scaling: float, + num_kv_blocks: Optional[torch.Tensor] = None, + cache_kwargs: Optional[Dict[str, Any]] = None, + layer_idx: int = None, + past_key_value: Optional[Cache] = None, + **kwargs, +): + # Initialize result tensor + output = torch.zeros_like(query) + + # Initialize Running Maximum + batch_size, num_heads, seq_len, _ = query.shape + current_max = torch.full((batch_size, num_heads, seq_len), float(MIN_MASKED_ATTENTION_VALUE)) + + # Initialize Denominator + current_denominator = torch.zeros(batch_size, num_heads, seq_len) + + past_seen_tokens = cache_kwargs.get("past_seen_tokens") + position_ids = cache_kwargs.get("position_ids") + block_size = -(-past_seen_tokens // num_kv_blocks) + masked_tensor = torch.tensor(MIN_MASKED_ATTENTION_VALUE, dtype=torch.float32) + + for j in range(num_kv_blocks): + start_index = j * block_size + end_index = (j + 1) * block_size + K_block, V_block = past_key_value.read_only_blockedKV(start_index, end_index, layer_idx, cache_kwargs) + K_block_states = repeat_kv(K_block, module.num_key_value_groups) + V_block_states = repeat_kv(V_block, module.num_key_value_groups) + past_seen_tokens_start = start_index + past_seen_tokens_end = torch.where( + torch.tensor(past_seen_tokens, dtype=torch.int) < torch.tensor(end_index, dtype=torch.int), + past_seen_tokens, + end_index, + ) + causal_mask_block = _create_causal_mask( + position_ids=position_ids, target_length=past_seen_tokens_end, start_index=past_seen_tokens_start + ) + + # Compute attention scores for the block + attn_weights_block = torch.matmul(query, K_block_states.transpose(2, 3)) * scaling + if attention_mask is not None: + attn_weights_block = torch.where(causal_mask_block, masked_tensor, attn_weights_block) + + # Update Running row maximum + prev_max = current_max + current_max = torch.max(prev_max, attn_weights_block.max(dim=-1).values) + delta_max = prev_max - current_max + + current_exp = torch.exp( + attn_weights_block - current_max.unsqueeze(-1) + ) # Subract current_max from each column of attn_weights_block + + # update running denominator + prev_denominator = current_denominator + current_denominator = prev_denominator * torch.exp(delta_max) + current_exp.sum(axis=-1) + + prob = current_exp / current_denominator.unsqueeze(-1) + + prev_output = output + output = ((prev_denominator / current_denominator).unsqueeze(-1)) * prev_output * torch.exp( + delta_max.unsqueeze(-1) + ) + torch.matmul(prob, V_block_states) + attn_output = output.transpose(1, 2).contiguous() + attn_weights = None + + return attn_output, attn_weights + + class QEffLlamaAttention(LlamaAttention): """Multi-headed attention from 'Attention Is All You Need' paper""" @@ -136,6 +211,7 @@ def forward( batch_index: Optional[torch.LongTensor] = None, use_cache: bool = False, cache_position: Optional[torch.LongTensor] = None, + num_kv_blocks: Optional[torch.Tensor] = None, **kwargs, ) -> Tuple[torch.Tensor, Optional[torch.Tensor], Optional[Tuple[torch.Tensor]]]: input_shape = hidden_states.shape[:-1] @@ -151,17 +227,29 @@ def forward( value_states = self.v_proj(hidden_states, **kwargs).view(hidden_shape).transpose(1, 2) kv_seq_len = past_key_value.get_seq_length(self.layer_idx, cache_position) + past_seen_tokens = past_key_value.get_seq_length() if past_key_value is not None else 0 cos, sin = self.rotary_emb(value_states, seq_len=kv_seq_len) query_states, key_states = qeff_apply_rotary_pos_emb(query_states, key_states, cos, sin, position_ids) if past_key_value is not None: - cache_kwargs = {"batch_index": batch_index, "position_ids": position_ids} - if comp_ctx_lengths is not None: - attention_mask = attention_mask[:, :, :, : comp_ctx_lengths.shape[-1]] - cache_kwargs["CCL"] = attention_mask.shape[-1] - key_states, value_states = past_key_value.update(key_states, value_states, self.layer_idx, cache_kwargs) - - attention_interface = eager_attention_forward + if num_kv_blocks is not None: + cache_kwargs = { + "batch_index": batch_index, + "position_ids": position_ids, + "past_seen_tokens": past_seen_tokens, + } + past_key_value.write_only(key_states, value_states, self.layer_idx, cache_kwargs) + else: + cache_kwargs = {"batch_index": batch_index, "position_ids": position_ids} + if comp_ctx_lengths is not None: + attention_mask = attention_mask[:, :, :, : comp_ctx_lengths.shape[-1]] + cache_kwargs["CCL"] = attention_mask.shape[-1] + key_states, value_states = past_key_value.update(key_states, value_states, self.layer_idx, cache_kwargs) + + if num_kv_blocks is not None: + attention_interface = eager_attention_forward_blockedKV + else: + attention_interface = eager_attention_forward attn_output, attn_weights = attention_interface( self, @@ -170,6 +258,10 @@ def forward( value_states, attention_mask, scaling=self.scaling, + num_kv_blocks=num_kv_blocks, + cache_kwargs=cache_kwargs, + layer_idx=self.layer_idx, + past_key_value=past_key_value, **kwargs, ) attn_output = attn_output.reshape(*input_shape, -1).contiguous() diff --git a/QEfficient/transformers/models/modeling_auto.py b/QEfficient/transformers/models/modeling_auto.py index 829593b0a..f3618cb1e 100644 --- a/QEfficient/transformers/models/modeling_auto.py +++ b/QEfficient/transformers/models/modeling_auto.py @@ -39,6 +39,7 @@ from QEfficient.generation.vlm_generation import VisionLanguageGeneration from QEfficient.transformers.modeling_utils import DYNAMIC_SEQ_LEN_SUPPORTED_MODEL_ARCH from QEfficient.transformers.models.pytorch_transforms import ( + BlockedKVAttentionTransform, CustomOpsTransform, KVCacheExternalModuleMapperTransform, KVCacheTransform, @@ -748,7 +749,7 @@ class QEffCausalLMForTextImageToTextModel(QEFFBaseModel): ] _onnx_transforms = [FP16ClipTransform, SplitTensorsTransform] - def __init__(self, model, **kwargs): + def __init__(self, model, qaic_config, **kwargs): """ Initializes the language decoder component for multimodal models. @@ -756,13 +757,20 @@ def __init__(self, model, **kwargs): ---------- model : nn.Module The full HuggingFace multimodal model from which the language decoder is extracted. + qaic_config : dict, optional + A dictionary for QAIC-specific configurations. Supported keys include: + - **num_kv_blocks** (int): Number of K/V blocks for BlockedKV attention implementation. **kwargs : Additional keyword arguments passed to the base class constructor. """ super().__init__(model, **kwargs) self.model = model.get_qeff_language_decoder() + self.model.qaic_config = qaic_config self.hash_params["qeff_auto_class"] = self.__class__.__name__ + if self.model.qaic_config is not None and self.model.qaic_config.get("num_kv_blocks", None) is not None: + BlockedKVAttentionTransform.apply(self.model, num_kv_blocks=self.model.qaic_config.get("num_kv_blocks")) + def export( self, inputs, @@ -932,7 +940,7 @@ def __init__( self.comp_ctx_lengths_prefill, self.comp_ctx_lengths_decode = process_ccl_specializations(qaic_config) self.vision_model = QEffVisionEncoderForTextImageToTextModel(model, **kwargs) - self.lang_model = QEffCausalLMForTextImageToTextModel(model, **kwargs) + self.lang_model = QEffCausalLMForTextImageToTextModel(model, qaic_config=qaic_config, **kwargs) self.continuous_batching = continuous_batching self.input_shapes, self.output_names = None, None @@ -1607,6 +1615,9 @@ def __init__( ---------- model : nn.Module The full HuggingFace multimodal model. + qaic_config : dict, optional + A dictionary for QAIC-specific configurations. Supported keys include: + - **num_kv_blocks** (int): Number of K/V blocks for BlockedKV attention implementation. **kwargs : Additional keyword arguments. `full_batch_size` is not supported here. @@ -1620,6 +1631,7 @@ def __init__( super().__init__(model, **kwargs) self.comp_ctx_lengths_prefill, self.comp_ctx_lengths_decode = process_ccl_specializations(qaic_config) + self.model.qaic_config = qaic_config # to handle internvl models if hasattr(self.model.config, "llm_config") and hasattr(self.model.config, "vision_config"): @@ -1633,6 +1645,9 @@ def __init__( self.model.config.use_cache = True self.hash_params["qeff_auto_class"] = self.__class__.__name__ + if self.model.qaic_config is not None and self.model.qaic_config.get("num_kv_blocks", None) is not None: + BlockedKVAttentionTransform.apply(self.model, num_kv_blocks=self.model.qaic_config.get("num_kv_blocks")) + @classmethod def from_pretrained( cls, @@ -2331,6 +2346,7 @@ def __init__( - **return_pdfs** (bool): If True, returns probability distributions along with sampled tokens. For Speculative Decoding Target Language Models, this is always True. - **max_top_k_ids** (int): Maximum number of top K tokens (<= vocab size) to consider during sampling. + - **num_kv_blocks** (int): Number of K/V blocks for BlockedKV attention implementation. **kwargs : Additional keyword arguments passed to the base class constructor. @@ -2379,6 +2395,9 @@ def __init__( if self.is_tlm: self.model.qaic_config["return_pdfs"] = True + if self.model.qaic_config is not None and self.model.qaic_config.get("num_kv_blocks", None) is not None: + BlockedKVAttentionTransform.apply(self.model, num_kv_blocks=self.model.qaic_config.get("num_kv_blocks")) + @property def model_name(self) -> str: """ diff --git a/QEfficient/transformers/models/pytorch_transforms.py b/QEfficient/transformers/models/pytorch_transforms.py index c7c9d5e25..21a867eb5 100644 --- a/QEfficient/transformers/models/pytorch_transforms.py +++ b/QEfficient/transformers/models/pytorch_transforms.py @@ -6,6 +6,7 @@ # ----------------------------------------------------------------------------- import warnings +from functools import partial from types import MethodType from typing import Callable, Optional, Tuple, Union @@ -850,3 +851,23 @@ def get_decoder_layer_classes_for_export(model: nn.Module) -> set: model_decoder_classes.add(module.__class__) return model_decoder_classes + + +class BlockedKVAttentionTransform: + _module_mapping = { + QEffLlamaAttention, + QEffQwen2_5_VLAttention, + } + + @classmethod + def apply(cls, model: nn.Module, num_kv_blocks) -> Tuple[nn.Module, bool]: + transformed = False + for module in model.modules(): + if type(module) in cls._module_mapping: + repl_module = type(module) + module.__class__ = repl_module + module.forward = MethodType(partial(repl_module.forward, num_kv_blocks=num_kv_blocks), module) + transformed = True # Set to True if at least one transformation occurs + elif module.__class__.__name__.endswith("Attention") and type(module) not in cls._module_mapping: + warnings.warn(f"KV blocking is not yet supported for {type(module)}.") + return model, transformed diff --git a/QEfficient/transformers/models/qwen2_5_vl/modeling_qwen2_5_vl.py b/QEfficient/transformers/models/qwen2_5_vl/modeling_qwen2_5_vl.py index baffb44c5..33a434db1 100644 --- a/QEfficient/transformers/models/qwen2_5_vl/modeling_qwen2_5_vl.py +++ b/QEfficient/transformers/models/qwen2_5_vl/modeling_qwen2_5_vl.py @@ -7,7 +7,7 @@ import math import os -from typing import Callable, List, Optional, Tuple, Union +from typing import Any, Callable, Dict, List, Optional, Tuple, Union import torch import torch.nn as nn @@ -361,6 +361,79 @@ def forward(self, x, seq_len=None): ) +def eager_attention_forward_blockedKV( + module: nn.Module, + query: torch.Tensor, + key: torch.Tensor, + value: torch.Tensor, + attention_mask: Optional[torch.Tensor], + num_kv_blocks: Optional[torch.Tensor] = None, + cache_kwargs: Optional[Dict[str, Any]] = None, + layer_idx: int = None, + past_key_value: Optional[Cache] = None, + **kwargs, +): + # Initialize result tensor + output = torch.zeros_like(query) + + # Initialize Running Maximum + batch_size, num_heads, seq_len, _ = query.shape + current_max = torch.full((batch_size, num_heads, seq_len), float(MIN_MASKED_ATTENTION_VALUE)) + + # Initialize Denominator + current_denominator = torch.zeros(batch_size, num_heads, seq_len) + + past_seen_tokens = cache_kwargs.get("past_seen_tokens") + position_ids = cache_kwargs.get("position_ids") + block_size = -(-past_seen_tokens // num_kv_blocks) + masked_tensor = torch.tensor(MIN_MASKED_ATTENTION_VALUE, dtype=torch.float32) + + for j in range(num_kv_blocks): + start_index = j * block_size + end_index = (j + 1) * block_size + K_block, V_block = past_key_value.read_only_blockedKV(start_index, end_index, layer_idx, cache_kwargs) + K_block_states = repeat_kv(K_block, module.num_key_value_groups) + V_block_states = repeat_kv(V_block, module.num_key_value_groups) + past_seen_tokens_start = start_index + past_seen_tokens_end = torch.where( + torch.tensor(past_seen_tokens, dtype=torch.int) < torch.tensor(end_index, dtype=torch.int), + past_seen_tokens, + end_index, + ) + causal_mask_block = _create_causal_mask( + position_ids=position_ids, target_length=past_seen_tokens_end, start_index=past_seen_tokens_start + ) + + # Compute attention scores for the block + attn_weights_block = torch.matmul(query, K_block_states.transpose(2, 3)) / math.sqrt(module.head_dim) + if attention_mask is not None: + attn_weights_block = torch.where(causal_mask_block, masked_tensor, attn_weights_block) + + # Update Running row maximum + prev_max = current_max + current_max = torch.max(prev_max, attn_weights_block.max(dim=-1).values) + delta_max = prev_max - current_max + + current_exp = torch.exp( + attn_weights_block - current_max.unsqueeze(-1) + ) # Subract current_max from each column of attn_weights_block + + # update running denominator + prev_denominator = current_denominator + current_denominator = prev_denominator * torch.exp(delta_max) + current_exp.sum(axis=-1) + + prob = current_exp / current_denominator.unsqueeze(-1) + + prev_output = output + output = ((prev_denominator / current_denominator).unsqueeze(-1)) * prev_output * torch.exp( + delta_max.unsqueeze(-1) + ) + torch.matmul(prob, V_block_states) + attn_output = output.transpose(1, 2).contiguous() + attn_weights = None + + return attn_output, attn_weights + + def eager_attention_forward_q_blocked( module: nn.Module, query: torch.Tensor, @@ -440,6 +513,10 @@ def eager_attention_forward( key: torch.Tensor, value: torch.Tensor, attention_mask: Optional[torch.Tensor], + num_kv_blocks: Optional[torch.Tensor] = None, + cache_kwargs: Optional[Dict[str, Any]] = None, + layer_idx: int = None, + past_key_value: Optional[Cache] = None, **kwargs, ): """ @@ -449,6 +526,19 @@ def eager_attention_forward( if blocking_mode == "q": return eager_attention_forward_q_blocked(module, query, key, value, attention_mask, **kwargs) + elif blocking_mode != "q" and num_kv_blocks is not None: + return eager_attention_forward_blockedKV( + module, + query, + key, + value, + attention_mask, + cache_kwargs=cache_kwargs, + num_kv_blocks=num_kv_blocks, + layer_idx=layer_idx, + past_key_value=past_key_value, + **kwargs, + ) elif blocking_mode == "default": # Original implementation key_states = repeat_kv(key, module.num_key_value_groups) @@ -490,6 +580,7 @@ def forward( output_attentions: bool = False, use_cache: bool = False, cache_position: Optional[torch.LongTensor] = None, + num_kv_blocks: Optional[torch.Tensor] = None, **kwargs, ) -> Tuple[torch.Tensor, Optional[torch.Tensor], Optional[Tuple[torch.Tensor]]]: bsz, q_len, _ = hidden_states.size() @@ -504,6 +595,7 @@ def forward( kv_seq_len = key_states.shape[-2] kv_seq_len = past_key_value.get_seq_length(self.layer_idx, cache_position) + past_seen_tokens = past_key_value.get_seq_length() if past_key_value is not None else 0 cos, sin = self.rotary_emb(value_states, seq_len=kv_seq_len) @@ -512,17 +604,27 @@ def forward( ) if past_key_value is not None: - # sin and cos are specific to RoPE models; cache_position needed for the static cache - cache_kwargs = { - "sin": sin, - "cos": cos, - "batch_index": batch_index, - "position_ids": position_ids[0], - } - if comp_ctx_lengths is not None: - attention_mask = attention_mask[:, :, :, : comp_ctx_lengths.shape[-1]] - cache_kwargs["CCL"] = attention_mask.shape[-1] - key_states, value_states = past_key_value.update(key_states, value_states, self.layer_idx, cache_kwargs) + if num_kv_blocks is not None: + cache_kwargs = { + "sin": sin, + "cos": cos, + "batch_index": batch_index, + "position_ids": position_ids[0], + "past_seen_tokens": past_seen_tokens, + } + past_key_value.write_only(key_states, value_states, self.layer_idx, cache_kwargs) + else: + # sin and cos are specific to RoPE models; cache_position needed for the static cache + cache_kwargs = { + "sin": sin, + "cos": cos, + "batch_index": batch_index, + "position_ids": position_ids[0], + } + if comp_ctx_lengths is not None: + attention_mask = attention_mask[:, :, :, : comp_ctx_lengths.shape[-1]] + cache_kwargs["CCL"] = attention_mask.shape[-1] + key_states, value_states = past_key_value.update(key_states, value_states, self.layer_idx, cache_kwargs) attention_interface: Callable = eager_attention_forward @@ -532,6 +634,10 @@ def forward( key_states, value_states, attention_mask, + num_kv_blocks=num_kv_blocks, + cache_kwargs=cache_kwargs, + layer_idx=self.layer_idx, + past_key_value=past_key_value, **kwargs, ) diff --git a/QEfficient/utils/constants.py b/QEfficient/utils/constants.py index d5e632584..3752db40c 100644 --- a/QEfficient/utils/constants.py +++ b/QEfficient/utils/constants.py @@ -146,6 +146,7 @@ class Constants: MAX_QPC_LIMIT = 30 MAX_RETRIES = 10 # This constant will be used set the maximum number of retry attempts for downloading a model using huggingface_hub snapshot_download NUM_SPECULATIVE_TOKENS = 2 + NUM_KV_BLOCKS = 8 MAX_TOP_K_IDS = ONNX_EXPORT_EXAMPLE_MAX_TOP_K_IDS SAMPLER_OPS = { "repetition_penalties", diff --git a/tests/transformers/models/test_causal_lm_models.py b/tests/transformers/models/test_causal_lm_models.py index 321a466ab..ead636759 100644 --- a/tests/transformers/models/test_causal_lm_models.py +++ b/tests/transformers/models/test_causal_lm_models.py @@ -67,6 +67,11 @@ "Qwen/Qwen2-0.5B", ] +test_models_blockedKV = [ + # "meta-llama/Llama-3.3-70B-Instruct", + "meta-llama/Llama-3.2-1B", +] + def get_custom_n_layers(model_name): """ @@ -147,6 +152,7 @@ def check_causal_lm_pytorch_vs_kv_vs_ort_vs_ai100( qnn_config: Optional[str] = None, config: Optional[AutoConfig] = None, pytorch_hf_tokens: Optional[list] = None, + qaic_config: Optional[dict] = None, ): """ Validate the PyTorch model, the PyTorch model after KV changes, the ONNX model, and the Cloud AI 100 model, both with and without continuous batching. @@ -179,7 +185,7 @@ def check_causal_lm_pytorch_vs_kv_vs_ort_vs_ai100( is_tlm = False if num_speculative_tokens is None else True qeff_model = QEFFAutoModelForCausalLM( - copy.deepcopy(model_hf), is_tlm=is_tlm, pretrained_model_name_or_path=model_name + copy.deepcopy(model_hf), is_tlm=is_tlm, pretrained_model_name_or_path=model_name, qaic_config=qaic_config ) pytorch_kv_tokens = api_runner.run_kv_model_on_pytorch(qeff_model.model) @@ -243,7 +249,11 @@ def check_causal_lm_pytorch_vs_kv_vs_ort_vs_ai100( pytorch_hf_tokens = [pytorch_hf_tokens for _ in range(full_batch_size)] qeff_model = QEFFAutoModelForCausalLM( - model_hf, continuous_batching=True, is_tlm=is_tlm, pretrained_model_name_or_path=model_name + model_hf, + continuous_batching=True, + is_tlm=is_tlm, + pretrained_model_name_or_path=model_name, + qaic_config=qaic_config, ) onnx_model_path = qeff_model.export() @@ -488,3 +498,30 @@ def test_prefiill_only_pytorch_vs_kv_vs_ort_vs_ai100_qnn(): check_causal_lm_pytorch_vs_kv_vs_ort_vs_ai100( model_name, n_layer=n_layer, prefill_only=False, enable_qnn=True, qnn_config=qnn_config_json_path ) + + +@pytest.mark.on_qaic +@pytest.mark.parametrize("model_name", test_models_blockedKV) +def test_causal_blockedKV_pytorch_vs_kv_vs_ort_vs_ai100(model_name): + """ + Test function to validate the PyTorch model for KV blocking, the PyTorch model after KV changes, the ONNX model, and the Cloud AI 100 model, both with and without continuous batching. + ``Mandatory`` Args: + :model_name (str): Hugging Face Model Card name, Example: ``gpt2`` + """ + n_layer = get_custom_n_layers(model_name) + + qaic_config = dict(num_kv_blocks=Constants.NUM_KV_BLOCKS) + check_causal_lm_pytorch_vs_kv_vs_ort_vs_ai100(model_name=model_name, n_layer=n_layer, qaic_config=qaic_config) + + +@pytest.mark.on_qaic +@pytest.mark.parametrize("model_name", test_models_blockedKV) +def test_causal_nonBlockedKV_pytorch_vs_kv_vs_ort_vs_ai100(model_name): + """ + Test function to validate the PyTorch model for KV blocking, the PyTorch model after KV changes, the ONNX model, and the Cloud AI 100 model, both with and without continuous batching. + ``Mandatory`` Args: + :model_name (str): Hugging Face Model Card name, Example: ``gpt2`` + """ + n_layer = get_custom_n_layers(model_name) + + check_causal_lm_pytorch_vs_kv_vs_ort_vs_ai100(model_name=model_name, n_layer=n_layer) From 66c9f9be62480bcd636c15b3cf335354d22f56a9 Mon Sep 17 00:00:00 2001 From: asmigosw Date: Fri, 5 Dec 2025 13:57:48 +0530 Subject: [PATCH 41/60] Continuous Batching for VLMs (#610) Adding CB support for VLMs: 1. Llava 2. Llava_Next 3. Gemma3 4. Mistral3 5. InternVL2_5 6. InternVL3_5 7. Molmo --------- Signed-off-by: Asmita Goswami Co-authored-by: Mamta Singh <168400541+quic-mamta@users.noreply.github.com> Co-authored-by: Hem Agnihotri Signed-off-by: Dhiraj Kumar Sah --- QEfficient/generation/embedding_handler.py | 158 +++- QEfficient/generation/vlm_generation.py | 10 + .../models/gemma3/modeling_gemma3.py | 119 ++- .../models/internvl/modeling_internvl.py | 115 ++- .../models/llama4/modeling_llama4.py | 4 +- .../models/llava/modeling_llava.py | 129 +++- .../models/llava_next/modeling_llava_next.py | 146 ++-- .../models/mistral3/modeling_mistral3.py | 134 ++-- .../transformers/models/modeling_auto.py | 5 + .../models/molmo/modeling_molmo.py | 91 ++- .../models/qwen2_5_vl/modeling_qwen2_5_vl.py | 4 +- QEfficient/utils/constants.py | 10 + QEfficient/utils/run_utils.py | 131 ++++ .../granite_vision/continuous_batching.py | 67 ++ .../models/internvl/continuous_batching.py | 100 +++ .../test_continuous_batching.py | 720 ++++++++++++++++++ .../test_image_text_to_text_models.py | 0 17 files changed, 1710 insertions(+), 233 deletions(-) create mode 100644 examples/image_text_to_text/models/granite_vision/continuous_batching.py create mode 100644 examples/image_text_to_text/models/internvl/continuous_batching.py create mode 100644 tests/transformers/models/image_text_to_text/test_continuous_batching.py rename tests/transformers/models/{ => image_text_to_text}/test_image_text_to_text_models.py (100%) diff --git a/QEfficient/generation/embedding_handler.py b/QEfficient/generation/embedding_handler.py index 76da7afc2..e07b5dd04 100644 --- a/QEfficient/generation/embedding_handler.py +++ b/QEfficient/generation/embedding_handler.py @@ -12,15 +12,17 @@ operations, separating them from the main text generation logic. """ -from typing import Any, Dict, Optional, Tuple +from io import BytesIO +from typing import Any, Dict, List, Optional, Tuple import numpy as np import requests import torch from PIL import Image -from transformers import AutoImageProcessor +from transformers import AutoImageProcessor, AutoTokenizer from QEfficient.generation.cloud_infer import QAICInferenceSession +from QEfficient.utils import constants from QEfficient.utils.logging_utils import logger @@ -37,6 +39,9 @@ def __init__( qeff_model: Optional[QAICInferenceSession], vision_session: Optional[QAICInferenceSession], processor: Optional[AutoImageProcessor], + tokenizer: Optional[AutoTokenizer], + image_height: Optional[int] = None, + image_width: Optional[int] = None, config: Optional[Dict[str, Any]] = None, lang_session: Optional[QAICInferenceSession] = None, ): @@ -46,12 +51,18 @@ def __init__( Args: vision_session: QAICInferenceSession for vision model processor: AutoImageProcessor for image preprocessing + tokenizer: AutoTokenizer for text tokenization + image_height: Desired image height for resizing + image_width: Desired image width for resizing config: Configuration dictionary with vision model parameters lang_session: Optional language session for coordination (to avoid resource conflicts) """ self._qeff_model = qeff_model self._vision_session = vision_session self._processor = processor + self._tokenizer = tokenizer + self._image_height = image_height + self._image_width = image_width self._config = config or {} self._lang_session = lang_session # Store language session for coordination @@ -70,6 +81,124 @@ def is_available(self) -> bool: """ return self._vision_session is not None and self._processor is not None + def prepare_internVL_inputs(self, img_url: str, prompt: str) -> Dict[str, np.ndarray]: + """ + Prepare inputs for InternVL model + + Args: + image_url: URL or path to image + prompt: Text query to process with image + """ + if not self._tokenizer: + raise ValueError("Tokenizer is required for InternVL input preparation") + pixel_values = [] + num_patches_list = [] + questions = [] + img = requests.get(img_url, stream=True) + image = Image.open(BytesIO(img.content)).convert("RGB") + + if self._image_height and self._image_width: + image = image.resize((self._image_height, self._image_width)) + else: + logger.warning("Height and Width not specified. Using default image size for num_patches = 13.") + image = image.resize((constants.INTERN_IMAGE_HEIGHT, constants.INTERN_IMAGE_WIDTH)) + + # preprocess the resized image + pixel_value = self._processor.load_image(image, max_num=12) + num_patches_list.append(pixel_value.shape[0]) + pixel_values.append(pixel_value) + + question = "\n" + prompt + questions.append(question) + + pixel_values = torch.cat(pixel_values, dim=0) + + # Chat Template information for prompt preprocessing + messages: List[List[str]] = [] + roles = ("<|im_start|>user\n", "<|im_start|>assistant\n") + prompt = self._processor(pixel_values, questions, messages, roles, num_patches_list=num_patches_list) + + inputs = self._tokenizer(prompt, return_tensors="pt") + inputs["pixel_values"] = pixel_values.clone() + + # Convert to numpy arrays + vision_inputs = {} + for k, v in inputs.items(): + if k in { + "pixel_values", + "image_masks", + "image_input_idx", + "valid_idx", + "aspect_ratio_ids", + "aspect_ratio_mask", + }: + vision_inputs[k] = np.array(v) + + # Convert specific inputs to float16 + vision_inputs_fp16 = {"pixel_values", "image_masks"} + for k in vision_inputs_fp16: + if k in vision_inputs: + vision_inputs[k] = vision_inputs[k].astype("float16") + + lang_inputs = {k: v for k, v in inputs.items() if k not in vision_inputs} + + return vision_inputs, lang_inputs + + def prepare_molmo_inputs(self, image_url: str, query: str) -> Dict[str, np.ndarray]: + """ + Download and preprocess image into model inputs + Args: + image_url: URL or path to image + query: Text query to process with image + Returns: + Dictionary of vision model inputs + Raises: + ValueError: If vision handler is not properly initialized + RuntimeError: If image processing fails + """ + if not self.is_available(): + raise ValueError("Vision handler not properly initialized. Need both vision_session and processor.") + + try: + # Download image + if image_url.startswith(("http://", "https://")): + image = Image.open(requests.get(image_url, stream=True).raw) + else: + image = Image.open(image_url) + image = image.resize((constants.MOLMO_IMAGE_HEIGHT, constants.MOLMO_IMAGE_WIDTH)) + inputs = self._processor.process(images=[image], text=query) + inputs = {k: v.unsqueeze(0) for k, v in inputs.items()} + inputs["attention_mask"] = torch.ones((inputs["input_ids"].shape), dtype=torch.int64) + valid = inputs["image_input_idx"] > 0 + valid = valid.reshape(1, -1) + inputs["valid_idx"] = torch.nonzero(valid)[:, 1].unsqueeze(0) + inputs["pixel_values"] = inputs.pop("images") + + # Convert to numpy arrays + vision_inputs = {} + for k, v in inputs.items(): + if k in { + "pixel_values", + "image_masks", + "image_input_idx", + "valid_idx", + "aspect_ratio_ids", + "aspect_ratio_mask", + }: + vision_inputs[k] = np.array(v) + + # Convert specific inputs to float16 + vision_inputs_fp16 = {"pixel_values", "image_masks"} + for k in vision_inputs_fp16: + if k in vision_inputs: + vision_inputs[k] = vision_inputs[k].astype("float16") + + lang_inputs = {k: v for k, v in inputs.items() if k not in vision_inputs} + + return vision_inputs, lang_inputs + except Exception as e: + raise RuntimeError(f"Failed to process image {image_url}: {str(e)}") + def prepare_vlm_inputs(self, image_url: str, query: str, prefill_seq_len: int) -> Dict[str, np.ndarray]: """ Download and preprocess image into model inputs @@ -77,6 +206,7 @@ def prepare_vlm_inputs(self, image_url: str, query: str, prefill_seq_len: int) - Args: image_url: URL or path to image query: Text query to process with image + prefill_seq_len: Padded sequence length for language model Returns: Dictionary of vision model inputs @@ -95,6 +225,17 @@ def prepare_vlm_inputs(self, image_url: str, query: str, prefill_seq_len: int) - else: image = Image.open(image_url) + if self._image_height and self._image_width: + image = image.resize((self._image_width, self._image_height)) + else: + logger.warning("Height and Width not specified. Using default image size.") + if "mistral3" in self._qeff_model.model.config.model_type: + image = image.resize((constants.MISTRAL3_IMAGE_HEIGHT, constants.MISTRAL3_IMAGE_WIDTH)) + if "llava_next" in self._qeff_model.model.config.model_type: + image = image.resize( + (constants.GRANITEVISION_IMG_SIZE_HEIGHT, constants.GRANITEVISION_IMG_SIZE_WIDTH) + ) + # Prepare conversation format conversation = [ { @@ -323,7 +464,18 @@ def get_processed_inputs( try: ## Get vlm inputs ## - vision_inputs, lang_inputs = self.prepare_vlm_inputs(image_url, query, prefill_seq_len) + if ( + hasattr(self._qeff_model.model.config, "model_type") + and self._qeff_model.model.config.model_type == "internvl_chat" + ): + vision_inputs, lang_inputs = self.prepare_internVL_inputs(image_url, query) + elif ( + hasattr(self._qeff_model.model.config, "model_type") + and self._qeff_model.model.config.model_type == "molmo" + ): + vision_inputs, lang_inputs = self.prepare_molmo_inputs(image_url, query) + else: + vision_inputs, lang_inputs = self.prepare_vlm_inputs(image_url, query, prefill_seq_len) # Handle padding for language model pad_token_id = 1 diff --git a/QEfficient/generation/vlm_generation.py b/QEfficient/generation/vlm_generation.py index 5eb91d142..b37fdc74a 100644 --- a/QEfficient/generation/vlm_generation.py +++ b/QEfficient/generation/vlm_generation.py @@ -88,6 +88,8 @@ def __init__( enable_debug_logs: bool = False, write_io_dir: Optional[str] = None, full_batch_size: Optional[int] = None, + image_height: Optional[int] = None, + image_width: Optional[int] = None, is_tlm: bool = False, include_sampler: bool = False, return_pdfs: bool = False, @@ -107,6 +109,8 @@ def __init__( enable_debug_logs: Enable debug logging write_io_dir: Directory for I/O file writing full_batch_size: Enable continuous batching (new feature) + image_height: Desired image height for resizing + image_width: Desired image width for resizing is_tlm: Target language model flag include_sampler: Enable on-device sampling (new feature) return_pdfs: Return probability distributions @@ -143,6 +147,9 @@ def __init__( ) self.qeff_model = qeff_model self.processor = processor + self.tokenizer = tokenizer + self.image_height = image_height + self.image_width = image_width self._vision_qpc_path = vision_qpc_path self.device_id = device_id # Store device_id for vision components self.enable_debug_logs = enable_debug_logs # Store for vision components @@ -173,6 +180,9 @@ def _init_vision_components(self): qeff_model=self.qeff_model, vision_session=self._vision_session, processor=self.processor, + tokenizer=self.tokenizer, + image_height=self.image_height, + image_width=self.image_width, config=vision_config, lang_session=self._session, # Pass language session for coordination ) diff --git a/QEfficient/transformers/models/gemma3/modeling_gemma3.py b/QEfficient/transformers/models/gemma3/modeling_gemma3.py index 398259d8b..c91d2fe32 100644 --- a/QEfficient/transformers/models/gemma3/modeling_gemma3.py +++ b/QEfficient/transformers/models/gemma3/modeling_gemma3.py @@ -610,6 +610,7 @@ def forward( image_idx, past_key_values, comp_ctx_lengths: Optional[List[int]] = None, + batch_index: Optional[torch.LongTensor] = None, ): inputs_embeds = self.model.get_input_embeddings()(input_ids) B, N, C = inputs_embeds.shape @@ -625,6 +626,7 @@ def forward( position_ids=position_ids, past_key_values=past_key_values, comp_ctx_lengths=comp_ctx_lengths, + batch_index=batch_index, use_cache=True, ) image_idx = (indices1.max() + 1).unsqueeze(0).unsqueeze(0) @@ -684,6 +686,9 @@ def get_specializations( comp_ctx_lengths_prefill: Optional[List[int]] = None, comp_ctx_lengths_decode: Optional[List[int]] = None, kv_offload: bool = False, + continuous_batching: bool = False, + kv_cache_batch_size: Optional[int] = None, + full_batch_size: Optional[int] = None, **compiler_options, ): prefill_seq_len = prefill_seq_len if prefill_seq_len else 32 @@ -707,50 +712,72 @@ def get_specializations( lang = [] for i in range(0, len(comp_ctx_lengths_prefill)): - lang.append( - { - "batch_size": batch_size, - "seq_len": prefill_seq_len, - "ctx_len": ctx_len, - "comp_ctx_lengths": comp_ctx_lengths_prefill[i], - "sliding_window": self.language_model.config.sliding_window, - "img_size": img_size, - "mm_tokens_per_image": mm_tokens_per_image, - } - ) - - for i in range(0, len(comp_ctx_lengths_decode)): - lang.append( - { - "batch_size": batch_size, - "seq_len": "1", - "ctx_len": ctx_len, - "comp_ctx_lengths": comp_ctx_lengths_decode[i], - "sliding_window": self.language_model.config.sliding_window, - "img_size": img_size, - "mm_tokens_per_image": mm_tokens_per_image, - } - ) - - else: - lang = [ - { - "batch_size": batch_size, + lang_prefill = { + "batch_size": 1 if continuous_batching else batch_size, "seq_len": prefill_seq_len, "ctx_len": ctx_len, + "comp_ctx_lengths": comp_ctx_lengths_prefill[i], "sliding_window": self.language_model.config.sliding_window, "img_size": img_size, "mm_tokens_per_image": mm_tokens_per_image, - }, - { - "batch_size": batch_size, + "vision_batch_size": batch_size, + } + if continuous_batching: + lang_prefill["full_batch_size"] = kv_cache_batch_size + else: + lang_prefill["batch_size"] = kv_cache_batch_size + if full_batch_size: + lang_prefill["full_batch_exec_size"] = full_batch_size + lang.append(lang_prefill) + + for i in range(0, len(comp_ctx_lengths_decode)): + lang_decode = { + "batch_size": full_batch_size if continuous_batching else batch_size, "seq_len": "1", "ctx_len": ctx_len, + "comp_ctx_lengths": comp_ctx_lengths_decode[i], "sliding_window": self.language_model.config.sliding_window, "img_size": img_size, "mm_tokens_per_image": mm_tokens_per_image, - }, - ] + "vision_batch_size": batch_size, + } + if continuous_batching: + lang_decode["full_batch_size"] = kv_cache_batch_size + else: + lang_decode["batch_size"] = kv_cache_batch_size + lang.append(lang_decode) + + else: + lang_prefill = { + "batch_size": 1 if continuous_batching else batch_size, + "seq_len": prefill_seq_len, + "ctx_len": ctx_len, + "sliding_window": self.language_model.config.sliding_window, + "img_size": img_size, + "mm_tokens_per_image": mm_tokens_per_image, + "vision_batch_size": batch_size, + } + if continuous_batching: + lang_prefill["full_batch_size"] = kv_cache_batch_size + else: + lang_prefill["batch_size"] = kv_cache_batch_size + if full_batch_size: + lang_prefill["full_batch_exec_size"] = full_batch_size + + lang_decode = { + "batch_size": full_batch_size if continuous_batching else batch_size, + "seq_len": "1", + "ctx_len": ctx_len, + "sliding_window": self.language_model.config.sliding_window, + "img_size": img_size, + "mm_tokens_per_image": mm_tokens_per_image, + "vision_batch_size": batch_size, + } + if continuous_batching: + lang_decode["full_batch_size"] = kv_cache_batch_size + else: + lang_decode["batch_size"] = kv_cache_batch_size + lang = [lang_prefill, lang_decode] specializations = {} @@ -761,17 +788,21 @@ def get_specializations( else: return lang, compiler_options - def get_onnx_dynamic_axes(self, comp_ctx_lengths: Optional[List[int]] = None, kv_offload: bool = False): + def get_onnx_dynamic_axes( + self, comp_ctx_lengths: Optional[List[int]] = None, kv_offload: bool = False, continuous_batching: bool = False + ): # Define dynamic axes vision_dynamic_axes = {} lang_dynamic_axes = {} lang_dynamic_axes["input_ids"] = {0: "batch_size", 1: "seq_len"} lang_dynamic_axes["position_ids"] = {0: "batch_size", 1: "seq_len"} - lang_dynamic_axes["vision_embeds"] = {0: "batch_size", 1: "mm_tokens_per_image"} + lang_dynamic_axes["vision_embeds"] = {0: "vision_batch_size", 1: "mm_tokens_per_image"} + if continuous_batching: + lang_dynamic_axes["batch_index"] = {0: "batch_size"} vision_dynamic_axes["pixel_values"] = {0: "batch_size", 2: "img_size", 3: "img_size"} - pkv_dynamic_axes = {0: "batch_size", 2: "ctx_len"} - pkv_dynamic_sliding_axes = {0: "batch_size", 2: "sliding_window"} + pkv_dynamic_axes = {0: "full_batch_size" if continuous_batching else "batch_size", 2: "ctx_len"} + pkv_dynamic_sliding_axes = {0: "full_batch_size" if continuous_batching else "batch_size", 2: "sliding_window"} layer_switch = ( self.language_model.config.sliding_window_pattern if hasattr(self.language_model.config, "sliding_window_pattern") @@ -837,7 +868,9 @@ def get_dummy_pkv_cache(self, config, batch_size, seq_len): past_key_values.append(pkv) return past_key_values - def get_dummy_inputs(self, comp_ctx_lengths: Optional[List[int]] = None, kv_offload: bool = False): + def get_dummy_inputs( + self, comp_ctx_lengths: Optional[List[int]] = None, kv_offload: bool = False, continuous_batching: bool = False + ): if vis_cfg := getattr(self.config, "vision_config", None): img_size = getattr(vis_cfg, "image_size", 896) else: @@ -876,15 +909,21 @@ def get_dummy_inputs(self, comp_ctx_lengths: Optional[List[int]] = None, kv_offl .repeat(constants.ONNX_EXPORT_EXAMPLE_BATCH_SIZE, 1) ) lang_inputs["image_idx"] = torch.zeros((inputs_shapes["image_idx"]), dtype=torch.int64) + + bs: int = constants.ONNX_EXPORT_EXAMPLE_BATCH_SIZE + fbs: int = constants.ONNX_EXPORT_EXAMPLE_FBS + # Add data for KV lang_inputs["past_key_values"] = self.get_dummy_pkv_cache( config=self.language_model.config, - batch_size=constants.ONNX_EXPORT_EXAMPLE_BATCH_SIZE, + batch_size=fbs if continuous_batching else bs, seq_len=constants.ONNX_EXPORT_EXAMPLE_SEQ_LEN, ) if comp_ctx_lengths is not None: lang_inputs["comp_ctx_lengths"] = torch.randint(0, 100, (40,), dtype=torch.long) + if continuous_batching: + lang_inputs["batch_index"] = torch.arange(bs).view(bs, 1) inputs = {} if kv_offload: diff --git a/QEfficient/transformers/models/internvl/modeling_internvl.py b/QEfficient/transformers/models/internvl/modeling_internvl.py index 96c59325f..85c331aa8 100644 --- a/QEfficient/transformers/models/internvl/modeling_internvl.py +++ b/QEfficient/transformers/models/internvl/modeling_internvl.py @@ -44,6 +44,7 @@ def forward( image_idx, past_key_values, comp_ctx_lengths: Optional[List[int]] = None, + batch_index: Optional[torch.LongTensor] = None, ): input_embeds = self.model.language_model.get_input_embeddings()(input_ids) B, N, C = input_embeds.shape @@ -69,6 +70,7 @@ def forward( position_ids=position_ids, past_key_values=past_key_values, comp_ctx_lengths=comp_ctx_lengths, + batch_index=batch_index, use_cache=True, ) image_idx = (indices1.max() + 1).unsqueeze(0).unsqueeze(0) @@ -91,6 +93,9 @@ def get_specializations( comp_ctx_lengths_prefill: Optional[List[int]] = None, comp_ctx_lengths_decode: Optional[List[int]] = None, kv_offload: bool = False, + continuous_batching: bool = False, + kv_cache_batch_size: Optional[int] = None, + full_batch_size: Optional[int] = None, **compiler_options, ): num_patches = compiler_options.pop("num_patches", None) @@ -124,50 +129,71 @@ def get_specializations( lang = [] for i in range(0, len(comp_ctx_lengths_prefill)): - lang.append( - { - "batch_size": batch_size, - "seq_len": prefill_seq_len, - "ctx_len": ctx_len, - "comp_ctx_lengths": comp_ctx_lengths_prefill[i], - "num_patches": num_patches, - "img_size": img_size, - "vision_size": vision_size, - } - ) - - for i in range(0, len(comp_ctx_lengths_decode)): - lang.append( - { - "batch_size": batch_size, - "seq_len": "1", - "ctx_len": ctx_len, - "comp_ctx_lengths": comp_ctx_lengths_decode[i], - "num_patches": num_patches, - "img_size": img_size, - "vision_size": vision_size, - } - ) - - else: - lang = [ - { - "batch_size": batch_size, + lang_prefill = { + "batch_size": 1 if continuous_batching else batch_size, "seq_len": prefill_seq_len, "ctx_len": ctx_len, + "comp_ctx_lengths": comp_ctx_lengths_prefill[i], "num_patches": num_patches, "img_size": img_size, "vision_size": vision_size, - }, - { - "batch_size": batch_size, + } + if continuous_batching: + lang_prefill["full_batch_size"] = kv_cache_batch_size + else: + lang_prefill["batch_size"] = kv_cache_batch_size + if full_batch_size: + lang_prefill["full_batch_exec_size"] = full_batch_size + lang.append(lang_prefill) + + for i in range(0, len(comp_ctx_lengths_decode)): + lang_decode = { + "batch_size": full_batch_size if continuous_batching else batch_size, "seq_len": "1", "ctx_len": ctx_len, + "comp_ctx_lengths": comp_ctx_lengths_decode[i], "num_patches": num_patches, "img_size": img_size, "vision_size": vision_size, - }, - ] + } + + if continuous_batching: + lang_decode["full_batch_size"] = kv_cache_batch_size + else: + lang_decode["batch_size"] = kv_cache_batch_size + lang.append(lang_decode) + + else: + lang_prefill = { + "batch_size": 1 if continuous_batching else batch_size, + "seq_len": prefill_seq_len, + "ctx_len": ctx_len, + "num_patches": num_patches, + "img_size": img_size, + "vision_size": vision_size, + } + if continuous_batching: + lang_prefill["full_batch_size"] = kv_cache_batch_size + else: + lang_prefill["batch_size"] = kv_cache_batch_size + if full_batch_size: + lang_prefill["full_batch_exec_size"] = full_batch_size + + lang_decode = { + "batch_size": full_batch_size if continuous_batching else batch_size, + "seq_len": "1", + "ctx_len": ctx_len, + "num_patches": num_patches, + "img_size": img_size, + "vision_size": vision_size, + } + + if continuous_batching: + lang_decode["full_batch_size"] = kv_cache_batch_size + else: + lang_decode["batch_size"] = kv_cache_batch_size + + lang = [lang_prefill, lang_decode] specializations = {} @@ -176,18 +202,24 @@ def get_specializations( specializations["lang"] = lang return specializations, compiler_options else: + lang[0].pop("vision_size") + lang[1].pop("vision_size") return lang, compiler_options - def get_onnx_dynamic_axes(self, comp_ctx_lengths: Optional[List[int]] = None, kv_offload: bool = False): + def get_onnx_dynamic_axes( + self, comp_ctx_lengths: Optional[List[int]] = None, kv_offload: bool = False, continuous_batching: bool = False + ): # Define dynamic axes vision_dynamic_axes = {} lang_dynamic_axes = {} lang_dynamic_axes["input_ids"] = {0: "batch_size", 1: "seq_len"} lang_dynamic_axes["position_ids"] = {0: "batch_size", 1: "seq_len"} lang_dynamic_axes["vision_embeds"] = {1: "vision_size"} + if continuous_batching: + lang_dynamic_axes["batch_index"] = {0: "batch_size"} vision_dynamic_axes["pixel_values"] = {0: "batched_num_patches", 2: "img_size", 3: "img_size"} - pkv_dynamic_axes = {0: "batch_size", 2: "ctx_len"} + pkv_dynamic_axes = {0: "full_batch_size" if continuous_batching else "batch_size", 2: "ctx_len"} for i in range(self.language_model.config.num_hidden_layers): for kv in ["key", "value"]: lang_dynamic_axes[f"past_{kv}.{i}"] = pkv_dynamic_axes @@ -222,7 +254,9 @@ def get_output_names(self, kv_offload: bool = False): return lang_output_names return output_names - def get_dummy_inputs(self, comp_ctx_lengths: Optional[List[int]] = None, kv_offload: bool = False): + def get_dummy_inputs( + self, comp_ctx_lengths: Optional[List[int]] = None, kv_offload: bool = False, continuous_batching: bool = False + ): if vis_cfg := getattr(self.config, "vision_config", None): img_size = getattr(vis_cfg, "image_size", constants.INTERN_IMG_SIZE) else: @@ -271,10 +305,13 @@ def get_dummy_inputs(self, comp_ctx_lengths: Optional[List[int]] = None, kv_offl ) lang_inputs["image_idx"] = torch.zeros((1, 1), dtype=torch.int64) + bs: int = constants.ONNX_EXPORT_EXAMPLE_BATCH_SIZE + fbs: int = constants.ONNX_EXPORT_EXAMPLE_FBS + # Add data for KV kv_cache_shape = get_padding_shape_from_config( config=self.language_model.config, - batch_size=constants.ONNX_EXPORT_EXAMPLE_BATCH_SIZE, + batch_size=fbs if continuous_batching else bs, seq_len=constants.ONNX_EXPORT_EXAMPLE_SEQ_LEN, ) @@ -285,6 +322,8 @@ def get_dummy_inputs(self, comp_ctx_lengths: Optional[List[int]] = None, kv_offl if comp_ctx_lengths is not None: lang_inputs["comp_ctx_lengths"] = torch.randint(0, 100, (40,), dtype=torch.long) + if continuous_batching: + lang_inputs["batch_index"] = torch.arange(bs).view(bs, 1) inputs = {} if kv_offload: diff --git a/QEfficient/transformers/models/llama4/modeling_llama4.py b/QEfficient/transformers/models/llama4/modeling_llama4.py index 0bcdf8ae0..7a2f687fe 100644 --- a/QEfficient/transformers/models/llama4/modeling_llama4.py +++ b/QEfficient/transformers/models/llama4/modeling_llama4.py @@ -1065,9 +1065,7 @@ def get_specializations( else: lang_decode["batch_size"] = kv_cache_batch_size - lang = [] - lang.append(lang_prefill) - lang.append(lang_decode) + lang = [lang_prefill, lang_decode] specializations = {} diff --git a/QEfficient/transformers/models/llava/modeling_llava.py b/QEfficient/transformers/models/llava/modeling_llava.py index dc6653db0..d5f5ee920 100644 --- a/QEfficient/transformers/models/llava/modeling_llava.py +++ b/QEfficient/transformers/models/llava/modeling_llava.py @@ -18,6 +18,7 @@ from QEfficient.utils.logging_utils import logger BS = 1 +FBS = 4 NUM_CHANNEL = 3 SEQ_LEN = 592 CTX_LEN = 1024 @@ -61,6 +62,7 @@ def forward( image_idx, past_key_values, comp_ctx_lengths: Optional[List[int]] = None, + batch_index: Optional[torch.LongTensor] = None, ): inputs_embeds = self.model.get_input_embeddings()(input_ids) vision_embeds = vision_embeds.to(inputs_embeds.device, inputs_embeds.dtype) @@ -76,6 +78,7 @@ def forward( position_ids=position_ids, past_key_values=past_key_values, comp_ctx_lengths=comp_ctx_lengths, + batch_index=batch_index, return_dict=True, ) @@ -140,7 +143,13 @@ def forward( image_idx = torch.where(image_idx < next_image_idx, next_image_idx, image_idx) return logits, pixel_values, image_idx, outputs.past_key_values - def get_dummy_inputs(self, comp_ctx_lengths: Optional[List[int]] = None, kv_offload: bool = False, **kwargs): + def get_dummy_inputs( + self, + comp_ctx_lengths: Optional[List[int]] = None, + kv_offload: bool = False, + continuous_batching: bool = False, + **kwargs, + ): num_layers = self.config.text_config.num_hidden_layers num_key_value_heads = self.config.text_config.num_key_value_heads head_dim = self.config.text_config.hidden_size // self.config.text_config.num_attention_heads @@ -165,8 +174,8 @@ def get_dummy_inputs(self, comp_ctx_lengths: Optional[List[int]] = None, kv_offl for i in range(num_layers): lang_inputs["past_key_values"].append( ( - torch.zeros(BS, num_key_value_heads, CTX_LEN, head_dim), - torch.zeros(BS, num_key_value_heads, CTX_LEN, head_dim), + torch.zeros(FBS if continuous_batching else BS, num_key_value_heads, CTX_LEN, head_dim), + torch.zeros(FBS if continuous_batching else BS, num_key_value_heads, CTX_LEN, head_dim), ) ) lang_inputs["position_ids"] = torch.full(lang_inputs["position_ids"].shape, CTX_LEN - 1) @@ -174,6 +183,8 @@ def get_dummy_inputs(self, comp_ctx_lengths: Optional[List[int]] = None, kv_offl if comp_ctx_lengths is not None: lang_inputs["comp_ctx_lengths"] = torch.randint(0, 100, (40,), dtype=torch.long) + if continuous_batching: + lang_inputs["batch_index"] = torch.arange(BS).view(BS, 1) inputs = {} if kv_offload: @@ -193,6 +204,9 @@ def get_specializations( comp_ctx_lengths_prefill: Optional[List[int]] = None, comp_ctx_lengths_decode: Optional[List[int]] = None, kv_offload: bool = False, + continuous_batching: bool = False, + kv_cache_batch_size: Optional[int] = None, + full_batch_size: Optional[int] = None, **compiler_options, ): max_num_images = compiler_options.pop("max_num_images", 1) @@ -218,49 +232,72 @@ def get_specializations( lang = [] for i in range(0, len(comp_ctx_lengths_prefill)): - lang.append( - { - "batch_size": batch_size, - "seq_len": prefill_seq_len, - "ctx_len": ctx_len, - "comp_ctx_lengths": comp_ctx_lengths_prefill[i], - "max_num_images": max_num_images, - "img_size": img_size, - "vision_size": vision_size, - } - ) - - for i in range(0, len(comp_ctx_lengths_decode)): - lang.append( - { - "batch_size": batch_size, - "seq_len": "1", - "ctx_len": ctx_len, - "comp_ctx_lengths": comp_ctx_lengths_decode[i], - "max_num_images": max_num_images, - "img_size": img_size, - "vision_size": vision_size, - } - ) - else: - lang = [ - { - "batch_size": batch_size, + lang_prefill = { + "batch_size": 1 if continuous_batching else batch_size, "seq_len": prefill_seq_len, "ctx_len": ctx_len, + "comp_ctx_lengths": comp_ctx_lengths_prefill[i], "max_num_images": max_num_images, "img_size": img_size, "vision_size": vision_size, - }, - { - "batch_size": batch_size, + "vision_batch_size": batch_size, + } + if continuous_batching: + lang_prefill["full_batch_size"] = kv_cache_batch_size + else: + lang_prefill["batch_size"] = kv_cache_batch_size + if full_batch_size: + lang_prefill["full_batch_exec_size"] = full_batch_size + lang.append(lang_prefill) + + for i in range(0, len(comp_ctx_lengths_decode)): + lang_decode = { + "batch_size": full_batch_size if continuous_batching else batch_size, "seq_len": "1", "ctx_len": ctx_len, + "comp_ctx_lengths": comp_ctx_lengths_decode[i], "max_num_images": max_num_images, "img_size": img_size, "vision_size": vision_size, - }, - ] + "vision_batch_size": batch_size, + } + if continuous_batching: + lang_decode["full_batch_size"] = kv_cache_batch_size + else: + lang_decode["batch_size"] = kv_cache_batch_size + lang.append(lang_decode) + else: + lang_prefill = { + "batch_size": 1 if continuous_batching else batch_size, + "seq_len": prefill_seq_len, + "ctx_len": ctx_len, + "max_num_images": max_num_images, + "img_size": img_size, + "vision_size": vision_size, + "vision_batch_size": batch_size, + } + if continuous_batching: + lang_prefill["full_batch_size"] = kv_cache_batch_size + else: + lang_prefill["batch_size"] = kv_cache_batch_size + if full_batch_size: + lang_prefill["full_batch_exec_size"] = full_batch_size + + lang_decode = { + "batch_size": full_batch_size if continuous_batching else batch_size, + "seq_len": "1", + "ctx_len": ctx_len, + "max_num_images": max_num_images, + "img_size": img_size, + "vision_size": vision_size, + "vision_batch_size": batch_size, + } + if continuous_batching: + lang_decode["full_batch_size"] = kv_cache_batch_size + else: + lang_decode["batch_size"] = kv_cache_batch_size + + lang = [lang_prefill, lang_decode] specializations = {} @@ -269,9 +306,13 @@ def get_specializations( specializations["lang"] = lang return specializations, compiler_options else: + lang[0].pop("vision_size") + lang[1].pop("vision_size") return lang, compiler_options - def get_onnx_dynamic_axes(self, comp_ctx_lengths: Optional[List[int]] = None, kv_offload: bool = False): + def get_onnx_dynamic_axes( + self, comp_ctx_lengths: Optional[List[int]] = None, kv_offload: bool = False, continuous_batching: bool = False + ): # Define dynamic axes num_layers = self.config.text_config.num_hidden_layers @@ -281,11 +322,19 @@ def get_onnx_dynamic_axes(self, comp_ctx_lengths: Optional[List[int]] = None, kv lang_dynamic_axes = { "input_ids": {0: "batch_size", 1: "seq_len"}, "position_ids": {0: "batch_size", 1: "seq_len"}, - "vision_embeds": {0: "batch_size", 1: "vision_size"}, + "vision_embeds": {0: "vision_batch_size", 1: "vision_size"}, } + if continuous_batching: + lang_dynamic_axes["batch_index"] = {0: "batch_size"} for i in range(num_layers): - lang_dynamic_axes[f"past_key.{i}"] = {0: "batch_size", 2: "ctx_len"} - lang_dynamic_axes[f"past_value.{i}"] = {0: "batch_size", 2: "ctx_len"} + lang_dynamic_axes[f"past_key.{i}"] = { + 0: "full_batch_size" if continuous_batching else "batch_size", + 2: "ctx_len", + } + lang_dynamic_axes[f"past_value.{i}"] = { + 0: "full_batch_size" if continuous_batching else "batch_size", + 2: "ctx_len", + } if comp_ctx_lengths is not None: lang_dynamic_axes["comp_ctx_lengths"] = {0: "comp_ctx_lengths"} diff --git a/QEfficient/transformers/models/llava_next/modeling_llava_next.py b/QEfficient/transformers/models/llava_next/modeling_llava_next.py index 2e4848b6b..878d04a45 100755 --- a/QEfficient/transformers/models/llava_next/modeling_llava_next.py +++ b/QEfficient/transformers/models/llava_next/modeling_llava_next.py @@ -20,6 +20,9 @@ from QEfficient.utils._utils import IOInfo from QEfficient.utils.logging_utils import logger +BS = constants.ONNX_EXPORT_EXAMPLE_BATCH_SIZE +FBS = constants.ONNX_EXPORT_EXAMPLE_FBS + class QEffLlavaNextEncoderWrapper(nn.Module): def __init__(self, model): @@ -133,6 +136,7 @@ def forward( image_idx, past_key_values, comp_ctx_lengths: Optional[List[int]] = None, + batch_index: Optional[torch.LongTensor] = None, ): inputs_embeds = self.model.get_input_embeddings()(input_ids) image_features = vision_embeds.to(inputs_embeds.device, inputs_embeds.dtype) @@ -149,6 +153,7 @@ def forward( position_ids=position_ids, past_key_values=past_key_values, comp_ctx_lengths=comp_ctx_lengths, + batch_index=batch_index, ) image_idx = (indices1.max() + 1).unsqueeze(0).unsqueeze(0) logit_index = position_ids.to(torch.int32).argmax(1, keepdim=True) @@ -165,7 +170,13 @@ def get_qeff_vision_encoder(self): def get_qeff_language_decoder(self): return QEffLlavaNextDecoderWrapper(self) - def get_dummy_inputs(self, comp_ctx_lengths: Optional[List[int]] = None, kv_offload: bool = False, **kwargs): + def get_dummy_inputs( + self, + comp_ctx_lengths: Optional[List[int]] = None, + kv_offload: bool = False, + continuous_batching: bool = False, + **kwargs, + ): num_layers = self.config.text_config.num_hidden_layers num_key_value_heads = self.config.text_config.num_key_value_heads head_dim = self.config.text_config.hidden_size // self.config.text_config.num_attention_heads @@ -214,13 +225,13 @@ def get_dummy_inputs(self, comp_ctx_lengths: Optional[List[int]] = None, kv_offl lang_inputs["past_key_values"].append( ( torch.zeros( - constants.ONNX_EXPORT_EXAMPLE_BATCH_SIZE, + FBS if continuous_batching else BS, num_key_value_heads, constants.GRANITEVISION_CTX_LEN, head_dim, ), torch.zeros( - constants.ONNX_EXPORT_EXAMPLE_BATCH_SIZE, + FBS if continuous_batching else BS, num_key_value_heads, constants.GRANITEVISION_CTX_LEN, head_dim, @@ -232,6 +243,9 @@ def get_dummy_inputs(self, comp_ctx_lengths: Optional[List[int]] = None, kv_offl if comp_ctx_lengths is not None: lang_inputs["comp_ctx_lengths"] = torch.randint(0, 100, (40,), dtype=torch.long) + if continuous_batching: + lang_inputs["batch_index"] = torch.arange(BS).view(BS, 1) + inputs = {} if kv_offload: inputs["vision"] = vision_inputs @@ -250,6 +264,9 @@ def get_specializations( comp_ctx_lengths_prefill: Optional[List[int]] = None, comp_ctx_lengths_decode: Optional[List[int]] = None, kv_offload: bool = False, + continuous_batching: bool = False, + kv_cache_batch_size: Optional[int] = None, + full_batch_size: Optional[int] = None, **compiler_options, ): max_num_images = compiler_options.pop("max_num_images", 1) @@ -306,62 +323,85 @@ def get_specializations( lang = [] for i in range(0, len(comp_ctx_lengths_prefill)): - lang.append( - { - "batch_size": batch_size, - "seq_len": prefill_seq_len, - "ctx_len": ctx_len, - "comp_ctx_lengths": comp_ctx_lengths_prefill[i], - "image_size_height": image_size_height, - "image_size_width": image_size_width, - "num_patches": num_patches, - "max_num_images": max_num_images, - "img_size": img_size, - "vision_size": vision_size, - } - ) - - # Remaining elements use comp_ctx_lengths[1:] in a loop - for i in range(0, len(comp_ctx_lengths_decode)): - lang.append( - { - "batch_size": batch_size, - "seq_len": "1", - "ctx_len": ctx_len, - "comp_ctx_lengths": comp_ctx_lengths_decode[i], - "image_size_height": image_size_height, - "image_size_width": image_size_width, - "num_patches": num_patches, - "max_num_images": max_num_images, - "img_size": img_size, - "vision_size": vision_size, - } - ) - else: - lang = [ - { - "batch_size": batch_size, + lang_prefill = { + "batch_size": 1 if continuous_batching else batch_size, "seq_len": prefill_seq_len, "ctx_len": ctx_len, + "comp_ctx_lengths": comp_ctx_lengths_prefill[i], "image_size_height": image_size_height, "image_size_width": image_size_width, "num_patches": num_patches, "max_num_images": max_num_images, "img_size": img_size, "vision_size": vision_size, - }, - { - "batch_size": batch_size, + "vision_batch_size": batch_size, + } + if continuous_batching: + lang_prefill["full_batch_size"] = kv_cache_batch_size + else: + lang_prefill["batch_size"] = kv_cache_batch_size + if full_batch_size: + lang_prefill["full_batch_exec_size"] = full_batch_size + lang.append(lang_prefill) + + # Remaining elements use comp_ctx_lengths[1:] in a loop + for i in range(0, len(comp_ctx_lengths_decode)): + lang_decode = { + "batch_size": full_batch_size if continuous_batching else batch_size, "seq_len": "1", "ctx_len": ctx_len, + "comp_ctx_lengths": comp_ctx_lengths_decode[i], "image_size_height": image_size_height, "image_size_width": image_size_width, "num_patches": num_patches, "max_num_images": max_num_images, "img_size": img_size, "vision_size": vision_size, - }, - ] + "vision_batch_size": batch_size, + } + if continuous_batching: + lang_decode["full_batch_size"] = kv_cache_batch_size + else: + lang_decode["batch_size"] = kv_cache_batch_size + lang.append(lang_decode) + else: + lang_prefill = { + "batch_size": 1 if continuous_batching else batch_size, + "seq_len": prefill_seq_len, + "ctx_len": ctx_len, + "image_size_height": image_size_height, + "image_size_width": image_size_width, + "num_patches": num_patches, + "max_num_images": max_num_images, + "img_size": img_size, + "vision_size": vision_size, + "vision_batch_size": batch_size, + } + if continuous_batching: + lang_prefill["full_batch_size"] = kv_cache_batch_size + else: + lang_prefill["batch_size"] = kv_cache_batch_size + if full_batch_size: + lang_prefill["full_batch_exec_size"] = full_batch_size + + lang_decode = { + "batch_size": full_batch_size if continuous_batching else batch_size, + "seq_len": "1", + "ctx_len": ctx_len, + "image_size_height": image_size_height, + "image_size_width": image_size_width, + "num_patches": num_patches, + "max_num_images": max_num_images, + "img_size": img_size, + "vision_size": vision_size, + "vision_batch_size": batch_size, + } + if continuous_batching: + lang_decode["full_batch_size"] = kv_cache_batch_size + else: + lang_decode["batch_size"] = kv_cache_batch_size + + lang = [lang_prefill, lang_decode] specializations = {} if kv_offload: @@ -369,9 +409,13 @@ def get_specializations( specializations["lang"] = lang return specializations, compiler_options else: + lang[0].pop("vision_size") + lang[1].pop("vision_size") return lang, compiler_options - def get_onnx_dynamic_axes(self, comp_ctx_lengths: Optional[List[int]] = None, kv_offload: bool = False): + def get_onnx_dynamic_axes( + self, comp_ctx_lengths: Optional[List[int]] = None, kv_offload: bool = False, continuous_batching: bool = False + ): # Define dynamic axes num_layers = self.config.text_config.num_hidden_layers vision_dynamic_axes = { @@ -381,11 +425,19 @@ def get_onnx_dynamic_axes(self, comp_ctx_lengths: Optional[List[int]] = None, kv lang_dynamic_axes = { "input_ids": {0: "batch_size", 1: "seq_len"}, "position_ids": {0: "batch_size", 1: "seq_len"}, - "vision_embeds": {0: "batch_size", 1: "vision_size"}, + "vision_embeds": {0: "vision_batch_size", 1: "vision_size"}, } + if continuous_batching: + lang_dynamic_axes["batch_index"] = {0: "batch_size"} for i in range(num_layers): - lang_dynamic_axes[f"past_key.{i}"] = {0: "batch_size", 2: "ctx_len"} - lang_dynamic_axes[f"past_value.{i}"] = {0: "batch_size", 2: "ctx_len"} + lang_dynamic_axes[f"past_key.{i}"] = { + 0: "full_batch_size" if continuous_batching else "batch_size", + 2: "ctx_len", + } + lang_dynamic_axes[f"past_value.{i}"] = { + 0: "full_batch_size" if continuous_batching else "batch_size", + 2: "ctx_len", + } if comp_ctx_lengths is not None: lang_dynamic_axes["comp_ctx_lengths"] = {0: "comp_ctx_lengths"} diff --git a/QEfficient/transformers/models/mistral3/modeling_mistral3.py b/QEfficient/transformers/models/mistral3/modeling_mistral3.py index 694ed4cde..89e19c65b 100644 --- a/QEfficient/transformers/models/mistral3/modeling_mistral3.py +++ b/QEfficient/transformers/models/mistral3/modeling_mistral3.py @@ -176,20 +176,22 @@ def forward( image_idx, past_key_values, comp_ctx_lengths: Optional[List[int]] = None, + batch_index: Optional[torch.LongTensor] = None, ): - inputs_embeds = self.model.get_input_embeddings()(input_ids) - vision_embeds = vision_embeds.to(inputs_embeds.device, inputs_embeds.dtype) + inputs_embeds = self.model.language_model.get_input_embeddings()(input_ids) mask = input_ids == self.model.config.image_token_index indices1 = mask.to(torch.int64).cumsum(1) - 1 indices1 = torch.where(indices1 != -1, indices1 + image_idx, indices1) indices0 = torch.arange(mask.shape[0]).view(-1, 1) image_features_expanded = vision_embeds.unsqueeze(0)[indices0, indices1] - inputs_embeds_1 = torch.where(mask.unsqueeze(-1), image_features_expanded, inputs_embeds) - outputs = self.model.model( - inputs_embeds=inputs_embeds_1, + image_embeds = torch.where(mask.unsqueeze(-1), image_features_expanded, inputs_embeds) + inputs_embeds = torch.where(input_ids.shape[1] == torch.tensor(1), inputs_embeds, image_embeds) + outputs = self.language_model( + inputs_embeds=inputs_embeds, position_ids=position_ids, past_key_values=past_key_values, comp_ctx_lengths=comp_ctx_lengths, + batch_index=batch_index, ) # Cast to int32 to avoid ONNXRT issue @@ -250,7 +252,13 @@ def forward( return logits, pixel_values, image_idx, outputs.past_key_values - def get_dummy_inputs(self, comp_ctx_lengths: Optional[List[int]] = None, kv_offload: bool = False, **kwargs): + def get_dummy_inputs( + self, + comp_ctx_lengths: Optional[List[int]] = None, + kv_offload: bool = False, + continuous_batching: bool = False, + **kwargs, + ): inputs_shapes = {} inputs_shapes["input_ids"] = (constants.ONNX_EXPORT_EXAMPLE_BATCH_SIZE, constants.ONNX_EXPORT_EXAMPLE_SEQ_LEN) height = self.config.vision_config.image_size @@ -290,10 +298,14 @@ def get_dummy_inputs(self, comp_ctx_lengths: Optional[List[int]] = None, kv_offl .repeat(constants.ONNX_EXPORT_EXAMPLE_BATCH_SIZE, 1) ) lang_inputs["image_idx"] = torch.zeros((inputs_shapes["image_idx"]), dtype=torch.int64) + + bs: int = constants.ONNX_EXPORT_EXAMPLE_BATCH_SIZE + fbs: int = constants.ONNX_EXPORT_EXAMPLE_FBS + # Add data for KV kv_cache_shape = get_padding_shape_from_config( - config=self.language_model.config, - batch_size=constants.ONNX_EXPORT_EXAMPLE_BATCH_SIZE, + config=self.model.config.text_config, + batch_size=fbs if continuous_batching else bs, seq_len=constants.ONNX_EXPORT_EXAMPLE_SEQ_LEN, ) @@ -304,6 +316,8 @@ def get_dummy_inputs(self, comp_ctx_lengths: Optional[List[int]] = None, kv_offl if comp_ctx_lengths is not None: lang_inputs["comp_ctx_lengths"] = torch.randint(0, 100, (40,), dtype=torch.long) + if continuous_batching: + lang_inputs["batch_index"] = torch.arange(bs).view(bs, 1) inputs = {} if kv_offload: @@ -324,6 +338,9 @@ def get_specializations( comp_ctx_lengths_prefill: Optional[List[int]] = None, comp_ctx_lengths_decode: Optional[List[int]] = None, kv_offload: bool = False, + continuous_batching: bool = False, + kv_cache_batch_size: Optional[int] = None, + full_batch_size: Optional[int] = None, **compiler_options, ): if img_size is None and hasattr(self.config.vision_config, "image_size"): @@ -352,46 +369,66 @@ def get_specializations( lang = [] for i in range(0, len(comp_ctx_lengths_prefill)): - lang.append( - { - "batch_size": batch_size, - "seq_len": prefill_seq_len, - "ctx_len": ctx_len, - "comp_ctx_lengths": comp_ctx_lengths_prefill[i], - "image_size": img_size, - "vision_size": vision_size, - } - ) - - # Remaining elements use comp_ctx_lengths[1:] in a loop - for i in range(0, len(comp_ctx_lengths_decode)): - lang.append( - { - "batch_size": batch_size, - "seq_len": "1", - "ctx_len": ctx_len, - "comp_ctx_lengths": comp_ctx_lengths_decode[i], - "image_size": img_size, - "vision_size": vision_size, - } - ) - else: - lang = [ - { - "batch_size": batch_size, + lang_prefill = { + "batch_size": 1 if continuous_batching else batch_size, "seq_len": prefill_seq_len, "ctx_len": ctx_len, + "comp_ctx_lengths": comp_ctx_lengths_prefill[i], "image_size": img_size, "vision_size": vision_size, - }, - { - "batch_size": batch_size, + } + if continuous_batching: + lang_prefill["full_batch_size"] = kv_cache_batch_size + else: + lang_prefill["batch_size"] = kv_cache_batch_size + if full_batch_size: + lang_prefill["full_batch_exec_size"] = full_batch_size + lang.append(lang_prefill) + + # Remaining elements use comp_ctx_lengths[1:] in a loop + for i in range(0, len(comp_ctx_lengths_decode)): + lang_decode = { + "batch_size": full_batch_size if continuous_batching else batch_size, "seq_len": "1", "ctx_len": ctx_len, + "comp_ctx_lengths": comp_ctx_lengths_decode[i], "image_size": img_size, "vision_size": vision_size, - }, - ] + } + + if continuous_batching: + lang_decode["full_batch_size"] = kv_cache_batch_size + else: + lang_decode["batch_size"] = kv_cache_batch_size + lang.append(lang_decode) + else: + lang_prefill = { + "batch_size": 1 if continuous_batching else batch_size, + "seq_len": prefill_seq_len, + "ctx_len": ctx_len, + "image_size": img_size, + "vision_size": vision_size, + } + if continuous_batching: + lang_prefill["full_batch_size"] = kv_cache_batch_size + else: + lang_prefill["batch_size"] = kv_cache_batch_size + if full_batch_size: + lang_prefill["full_batch_exec_size"] = full_batch_size + + lang_decode = { + "batch_size": full_batch_size if continuous_batching else batch_size, + "seq_len": "1", + "ctx_len": ctx_len, + "image_size": img_size, + "vision_size": vision_size, + } + + if continuous_batching: + lang_decode["full_batch_size"] = kv_cache_batch_size + else: + lang_decode["batch_size"] = kv_cache_batch_size + lang = [lang_prefill, lang_decode] specializations = {} @@ -404,7 +441,9 @@ def get_specializations( lang[1].pop("vision_size") return lang, compiler_options - def get_onnx_dynamic_axes(self, comp_ctx_lengths: Optional[List[int]] = None, kv_offload: bool = False): + def get_onnx_dynamic_axes( + self, comp_ctx_lengths: Optional[List[int]] = None, kv_offload: bool = False, continuous_batching: bool = False + ): # Define dynamic axes num_layers = self.config.text_config.num_hidden_layers @@ -417,9 +456,18 @@ def get_onnx_dynamic_axes(self, comp_ctx_lengths: Optional[List[int]] = None, kv "vision_embeds": {0: "vision_size"}, } + if continuous_batching: + lang_dynamic_axes["batch_index"] = {0: "batch_size"} + for i in range(num_layers): - lang_dynamic_axes[f"past_key.{i}"] = {0: "batch_size", 2: "ctx_len"} - lang_dynamic_axes[f"past_value.{i}"] = {0: "batch_size", 2: "ctx_len"} + lang_dynamic_axes[f"past_key.{i}"] = { + 0: "full_batch_size" if continuous_batching else "batch_size", + 2: "ctx_len", + } + lang_dynamic_axes[f"past_value.{i}"] = { + 0: "full_batch_size" if continuous_batching else "batch_size", + 2: "ctx_len", + } if comp_ctx_lengths is not None: lang_dynamic_axes["comp_ctx_lengths"] = {0: "comp_ctx_lengths"} diff --git a/QEfficient/transformers/models/modeling_auto.py b/QEfficient/transformers/models/modeling_auto.py index f3618cb1e..91866e4c0 100644 --- a/QEfficient/transformers/models/modeling_auto.py +++ b/QEfficient/transformers/models/modeling_auto.py @@ -1284,6 +1284,8 @@ def generate( device_ids: List[int] = None, runtime_ai100: bool = True, generation_len: Optional[int] = None, + image_height: Optional[int] = None, + image_width: Optional[int] = None, ) -> Union[torch.Tensor, np.ndarray]: """ Generates output by executing the compiled QPC(s) on Cloud AI 100 Hardware cards. @@ -1342,6 +1344,8 @@ def generate( full_batch_size=fbs, comp_ctx_lengths_prefill=self.comp_ctx_lengths_prefill, comp_ctx_lengths_decode=self.comp_ctx_lengths_decode, + image_height=image_height, + image_width=image_width, ) # Call generate method @@ -2493,6 +2497,7 @@ def from_pretrained( kv_offload=kv_offload, pretrained_model_name_or_path=pretrained_model_name_or_path, qaic_config=qaic_config, + continuous_batching=continuous_batching, **kwargs, ) return cls( diff --git a/QEfficient/transformers/models/molmo/modeling_molmo.py b/QEfficient/transformers/models/molmo/modeling_molmo.py index c088158c4..7bfa58fc0 100644 --- a/QEfficient/transformers/models/molmo/modeling_molmo.py +++ b/QEfficient/transformers/models/molmo/modeling_molmo.py @@ -43,14 +43,14 @@ def eager_attention_forward( if num_q_heads != num_kv_heads: assert num_q_heads % num_kv_heads == 0 repeat_factor = num_q_heads // num_kv_heads - _, _, S, D = k.shape + B, _, S, D = k.shape k = k.unsqueeze(2) k = k.expand(-1, -1, repeat_factor, -1, -1) - k = k.reshape(1, num_q_heads, S, D) + k = k.reshape(B, num_q_heads, S, D) v = v.unsqueeze(2) v = v.expand(-1, -1, repeat_factor, -1, -1) - v = v.reshape(1, num_q_heads, S, D) + v = v.reshape(B, num_q_heads, S, D) attn_weights = torch.matmul(q, k.transpose(2, 3)) * scale_factor @@ -596,6 +596,7 @@ def forward( image_idx, past_key_values, comp_ctx_lengths: Optional[List[int]] = None, + batch_index: Optional[torch.LongTensor] = None, ): if input_ids is not None: input_ids = input_ids * (input_ids != -1).to(input_ids.dtype) @@ -613,6 +614,7 @@ def forward( position_ids=position_ids, past_key_values=past_key_values, comp_ctx_lengths=comp_ctx_lengths, + batch_index=batch_index, use_cache=True, ) next_idx = (indices1.max() + 1).unsqueeze(0).unsqueeze(0) @@ -694,6 +696,9 @@ def get_specializations( comp_ctx_lengths_decode: Optional[List[int]] = None, valid_size: int = None, kv_offload: bool = False, + continuous_batching: bool = False, + kv_cache_batch_size: Optional[int] = None, + full_batch_size: Optional[int] = None, **compiler_options, ): prefill_seq_len = prefill_seq_len if prefill_seq_len else 1024 @@ -725,12 +730,20 @@ def get_specializations( for i in range(0, len(comp_ctx_lengths_prefill)): lang_prefill = { - "batch_size": batch_size, + "batch_size": 1 if continuous_batching else batch_size, "seq_len": prefill_seq_len, "ctx_len": ctx_len, "comp_ctx_lengths": comp_ctx_lengths_prefill[i], "valid_size": valid_size, + "vision_batch_size": batch_size, } + if continuous_batching: + lang_prefill["full_batch_size"] = kv_cache_batch_size + else: + lang_prefill["batch_size"] = kv_cache_batch_size + + if full_batch_size: + lang_prefill["full_batch_exec_size"] = full_batch_size if kv_offload: values = { "img_size": img_size, @@ -746,12 +759,17 @@ def get_specializations( for i in range(0, len(comp_ctx_lengths_decode)): lang_decode = { - "batch_size": batch_size, + "batch_size": full_batch_size if continuous_batching else batch_size, "seq_len": "1", "ctx_len": ctx_len, "comp_ctx_lengths": comp_ctx_lengths_decode[i], "valid_size": valid_size, + "vision_batch_size": batch_size, } + if continuous_batching: + lang_decode["full_batch_size"] = kv_cache_batch_size + else: + lang_decode["batch_size"] = kv_cache_batch_size if kv_offload: values = { "img_size": img_size, @@ -767,13 +785,33 @@ def get_specializations( else: lang_prefill = { - "batch_size": batch_size, + "batch_size": 1 if continuous_batching else batch_size, "seq_len": prefill_seq_len, "ctx_len": ctx_len, "valid_size": valid_size, + "vision_batch_size": batch_size, } - lang_decode = {"batch_size": batch_size, "seq_len": "1", "ctx_len": ctx_len, "valid_size": valid_size} + if continuous_batching: + lang_prefill["full_batch_size"] = kv_cache_batch_size + else: + lang_prefill["batch_size"] = kv_cache_batch_size + + if full_batch_size: + lang_prefill["full_batch_exec_size"] = full_batch_size + + lang_decode = { + "batch_size": full_batch_size if continuous_batching else batch_size, + "seq_len": "1", + "ctx_len": ctx_len, + "valid_size": valid_size, + "vision_batch_size": batch_size, + } + + if continuous_batching: + lang_decode["full_batch_size"] = kv_cache_batch_size + else: + lang_decode["batch_size"] = kv_cache_batch_size if kv_offload: values = { @@ -787,9 +825,7 @@ def get_specializations( lang_prefill[key] = value lang_decode[key] = value - lang = [] - lang.append(lang_prefill) - lang.append(lang_decode) + lang = [lang_prefill, lang_decode] specializations = {} @@ -800,13 +836,15 @@ def get_specializations( else: return lang, compiler_options - def get_onnx_dynamic_axes(self, comp_ctx_lengths: Optional[List[int]] = None, kv_offload: bool = False): + def get_onnx_dynamic_axes( + self, comp_ctx_lengths: Optional[List[int]] = None, kv_offload: bool = False, continuous_batching: bool = False + ): # Define dynamic axes vision_dynamic_axes = {} lang_dynamic_axes = {} lang_dynamic_axes["input_ids"] = {0: "batch_size", 1: "seq_len"} lang_dynamic_axes["position_ids"] = {0: "batch_size", 1: "seq_len"} - lang_dynamic_axes["vision_embeds"] = {0: "batch_size", 1: "valid_size"} + lang_dynamic_axes["vision_embeds"] = {0: "vision_batch_size", 1: "valid_size"} vision_dynamic_axes["pixel_values"] = {0: "batch_size", 1: "num_images", 2: "img_tile", 3: "img_size"} vision_dynamic_axes["image_input_idx"] = {0: "batch_size", 1: "num_images", 2: "num_patch"} @@ -816,8 +854,17 @@ def get_onnx_dynamic_axes(self, comp_ctx_lengths: Optional[List[int]] = None, kv num_layers = self.model.config.n_layers for i in range(num_layers): - lang_dynamic_axes[f"past_key.{i}"] = {0: "batch_size", 2: "ctx_len"} - lang_dynamic_axes[f"past_value.{i}"] = {0: "batch_size", 2: "ctx_len"} + lang_dynamic_axes[f"past_key.{i}"] = { + 0: "full_batch_size" if continuous_batching else "batch_size", + 2: "ctx_len", + } + lang_dynamic_axes[f"past_value.{i}"] = { + 0: "full_batch_size" if continuous_batching else "batch_size", + 2: "ctx_len", + } + + if continuous_batching: + lang_dynamic_axes["batch_index"] = {0: "batch_size"} if comp_ctx_lengths is not None: lang_dynamic_axes["comp_ctx_lengths"] = {0: "comp_ctx_lengths"} @@ -851,7 +898,13 @@ def get_output_names(self, kv_offload: bool = False): return lang_output_names return output_names - def get_dummy_inputs(self, comp_ctx_lengths: Optional[List[int]] = None, kv_offload: bool = False, **kwargs): + def get_dummy_inputs( + self, + comp_ctx_lengths: Optional[List[int]] = None, + kv_offload: bool = False, + continuous_batching: bool = False, + **kwargs, + ): inputs_shapes = {} inputs_shapes_lang = {} inputs_shapes["input_ids"] = (constants.ONNX_EXPORT_EXAMPLE_BATCH_SIZE, constants.ONNX_EXPORT_EXAMPLE_SEQ_LEN) @@ -902,10 +955,14 @@ def get_dummy_inputs(self, comp_ctx_lengths: Optional[List[int]] = None, kv_offl .repeat(constants.ONNX_EXPORT_EXAMPLE_BATCH_SIZE, 1) ) lang_inputs["image_idx"] = torch.zeros((inputs_shapes["image_idx"]), dtype=torch.int64) + + bs: int = constants.ONNX_EXPORT_EXAMPLE_BATCH_SIZE + fbs: int = constants.ONNX_EXPORT_EXAMPLE_FBS + # Add data for KV kv_cache_shape = get_padding_shape_from_config( config=self.config, - batch_size=constants.ONNX_EXPORT_EXAMPLE_BATCH_SIZE, + batch_size=fbs if continuous_batching else bs, seq_len=constants.ONNX_EXPORT_EXAMPLE_SEQ_LEN, ) @@ -916,6 +973,8 @@ def get_dummy_inputs(self, comp_ctx_lengths: Optional[List[int]] = None, kv_offl if comp_ctx_lengths is not None: lang_inputs["comp_ctx_lengths"] = torch.randint(0, 100, (40,), dtype=torch.long) + if continuous_batching: + lang_inputs["batch_index"] = torch.arange(bs).view(bs, 1) inputs = {} if kv_offload: diff --git a/QEfficient/transformers/models/qwen2_5_vl/modeling_qwen2_5_vl.py b/QEfficient/transformers/models/qwen2_5_vl/modeling_qwen2_5_vl.py index 33a434db1..63e046600 100644 --- a/QEfficient/transformers/models/qwen2_5_vl/modeling_qwen2_5_vl.py +++ b/QEfficient/transformers/models/qwen2_5_vl/modeling_qwen2_5_vl.py @@ -1169,9 +1169,7 @@ def smart_resize( else: lang_decode["batch_size"] = kv_cache_batch_size - lang = [] - lang.append(lang_prefill) - lang.append(lang_decode) + lang = [lang_prefill, lang_decode] specializations = {} diff --git a/QEfficient/utils/constants.py b/QEfficient/utils/constants.py index 3752db40c..e0b003422 100644 --- a/QEfficient/utils/constants.py +++ b/QEfficient/utils/constants.py @@ -100,6 +100,8 @@ def get_models_dir(): INTERN_CTX_LEN = 4096 INTERN_PREFILL_SEQ_LEN = INTERN_CTX_LEN - 256 # 4096-256 INTERN_NUM_CHANNELS = 3 +INTERN_IMAGE_HEIGHT = 1000 +INTERN_IMAGE_WIDTH = 747 INTERN_IMG_CONTEXT_TOKEN = 151667 # Specific to InternVL3_5 series, same token won't work for InternVL2_5 series @@ -135,6 +137,14 @@ def get_models_dir(): # Modules to cache while clearing the pytorch weights CACHE_MODULES = ["get_output_names", "get_dummy_inputs", "get_onnx_dynamic_axes", "get_specializations"] +# Mistral3 Constants +MISTRAL3_IMAGE_HEIGHT = 1540 +MISTRAL3_IMAGE_WIDTH = 1540 + +# Molmo Constants +MOLMO_IMAGE_HEIGHT = 536 +MOLMO_IMAGE_WIDTH = 354 + class Constants: # Export Constants. diff --git a/QEfficient/utils/run_utils.py b/QEfficient/utils/run_utils.py index c54dadeac..61553e7ea 100644 --- a/QEfficient/utils/run_utils.py +++ b/QEfficient/utils/run_utils.py @@ -6,6 +6,7 @@ # ----------------------------------------------------------------------------- import os +from typing import List import numpy as np import onnx @@ -276,6 +277,54 @@ def __init__( self.config = config self.gen_len = max_gen_len + @torch.no_grad() + def run_vlm_hf_model_on_pytorch_CB(self, model, images, queries): + """ + Function responsible for running HuggingFace ``PyTorch`` model for continuous batching + and return the output tokens for each prompt/image pair. + + ``Mandatory`` Args: + :model (torch.nn.module): Original ``PyTorch`` model + :images (List[PIL.Image]): List of input images + :queries (List[str]): List of input queries + + Return: + :List[numpy.ndarray]: List of generated output tokens for each prompt + """ + generated_ids = [] + + for idx, (image, query) in enumerate(zip(images, queries)): + # Prepare conversation format for each image-query pair + conversation = [ + { + "role": "user", + "content": [ + {"type": "text", "text": query}, + {"type": "image"}, + ], + }, + ] + prompt = self.processor.apply_chat_template(conversation, add_generation_prompt=True) + + # Process inputs + inputs = self.processor(images=image, text=prompt, return_tensors="pt") + if "pixel_values" in inputs: + inputs["pixel_values"] = inputs["pixel_values"].to(torch.float32) + + # Generate tokens + output = model.generate(**inputs, max_new_tokens=self.gen_len, do_sample=False) + offset_output = output[0, inputs["input_ids"].shape[1] :] + + # Decode and print output + py_output = self.processor.tokenizer.decode(offset_output).strip() + print(f"Original HF Model Outputs (Torch CPU) for prompt {idx}:") + print("Query:", repr(query)) + print("Completion:", repr(py_output)) + + generated_ids.append(offset_output.numpy()) + + return generated_ids + @torch.no_grad() def run_vlm_hf_model_on_pytorch(self, model, inputs): output = model.generate(**inputs, max_new_tokens=self.gen_len, do_sample=False) @@ -448,6 +497,57 @@ def __init__(self, batch_size, processor, config, image, prompt, prompt_len, ctx self.config = config self.gen_len = max_gen_len + @torch.no_grad() + def run_vlm_hf_model_on_pytorch_CB(self, model, images, queries): + """ + Function responsible for running HuggingFace ``PyTorch`` model for continuous batching + and return the output tokens for each prompt/image pair. + + ``Mandatory`` Args: + :model (torch.nn.module): Original ``PyTorch`` model + :images (List[PIL.Image]): List of input images + :queries (List[str]): List of input queries + + Return: + :List[numpy.ndarray]: List of generated output tokens for each prompt + """ + generated_ids = [] + + for idx, (image, query) in enumerate(zip(images, queries)): + num_patches_list = [] + pixel_values = [] + questions = [] + + pixel_value = self.processor.load_image(image, max_num=12) + num_patches_list.append(pixel_value.shape[0]) + question = "\n" + query + + pixel_values.append(pixel_value) + pixel_values = torch.cat(pixel_values, dim=0) + questions.append(question) + + # Chat Template information for prompt preprocessing + messages: List[List[str]] = [] + roles = ("<|im_start|>user\n", "<|im_start|>assistant\n") + prompt = self.processor(pixel_values, questions, messages, roles, num_patches_list=num_patches_list) + + inputs = self.processor.tokenizer(prompt, return_tensors="pt") + inputs["pixel_values"] = pixel_values.clone() + + generation_config = dict(max_new_tokens=self.gen_len, do_sample=False) + generation_config["eos_token_id"] = self.processor.tokenizer.convert_tokens_to_ids("<|im_end|>\n".strip()) + + # Decode and print output + outputs = model.generate(**inputs, **generation_config) + offset_output = outputs[0].detach().numpy() + + py_output = self.processor.tokenizer.decode(offset_output, skip_special_tokens=True).strip() + print(f"Original HF Model Outputs (Torch CPU) for prompt {idx}:") + print("Completion:", repr(py_output)) + generated_ids.append(offset_output) + + return generated_ids + @torch.no_grad() def run_vlm_hf_model_on_pytorch(self, model, inputs, generation_config): outputs = model.generate(**inputs, **generation_config) @@ -490,3 +590,34 @@ def run_vlm_hf_model_on_pytorch(self, model, inputs, generation_config): print("Original HF Model Outputs (Torch CPU):") print("Completion:", repr(py_output)) return generated_ids + + @torch.no_grad() + def run_vlm_hf_model_on_pytorch_CB(self, model, images, queries, generation_config): + """ + Function responsible for running HuggingFace ``PyTorch`` model for continuous batching + and return the output tokens for each prompt/image pair. + + ``Mandatory`` Args: + :model (torch.nn.module): Original ``PyTorch`` model + :images (List[PIL.Image]): List of input images + :queries (List[str]): List of input queries + :generation_config (dict): Generation configuration parameters + + Return: + :List[numpy.ndarray]: List of generated output tokens for each prompt + """ + generated_ids = [] + for idx, (image, query) in enumerate(zip(images, queries)): + inputs = self.processor.process(images=[image], text=query) + inputs = {k: v.unsqueeze(0) for k, v in inputs.items()} + outputs = model.generate_from_batch( + inputs, generation_config, tokenizer=self.processor.tokenizer, do_sample=False + ) + + offset_output = outputs[0, inputs["input_ids"].size(1) :] + + py_output = self.processor.tokenizer.decode(offset_output, skip_special_tokens=True).strip() + print(f"Original HF Model Outputs (Torch CPU) for prompt {idx}:") + print("Completion:", repr(py_output)) + generated_ids.append(offset_output) + return generated_ids diff --git a/examples/image_text_to_text/models/granite_vision/continuous_batching.py b/examples/image_text_to_text/models/granite_vision/continuous_batching.py new file mode 100644 index 000000000..22c4270bc --- /dev/null +++ b/examples/image_text_to_text/models/granite_vision/continuous_batching.py @@ -0,0 +1,67 @@ +# ----------------------------------------------------------------------------- +# +# Copyright (c) Qualcomm Technologies, Inc. and/or its subsidiaries. +# SPDX-License-Identifier: BSD-3-Clause +# +# ----------------------------------------------------------------------------- + +import transformers +from transformers import AutoConfig, AutoProcessor, TextStreamer + +from QEfficient import QEFFAutoModelForImageTextToText + +## For AWQ model update pytorch version to 2.8.* +model_id = "ibm-granite/granite-vision-3.2-2b" +config = AutoConfig.from_pretrained(model_id) +config.text_config.num_hidden_layers = 2 + +qeff_model = QEFFAutoModelForImageTextToText.from_pretrained( + model_id, + attn_implementation="eager", + kv_offload=True, + config=config, + continuous_batching=True, +) +tokenizer = transformers.AutoTokenizer.from_pretrained(model_id) +processor = AutoProcessor.from_pretrained(model_id) + +batch_size = 1 +## Vision + Text ## +qeff_model.compile( + batch_size=batch_size, + full_batch_size=4, + prefill_seq_len=5500, + ctx_len=6000, + num_cores=16, + num_devices=4, + img_size=384, + mxfp6_matmul=False, +) + +image_urls = [ + "http://images.cocodataset.org/val2017/000000039769.jpg", + "http://images.cocodataset.org/val2017/000000039769.jpg", + "http://images.cocodataset.org/val2017/000000039769.jpg", + "http://images.cocodataset.org/val2017/000000039769.jpg", +] + +prompts = [ + "Describe the image", + "What are the objects in the image?", + "What is the main subject of the image?", + "What colors are predominant in the image?", +] + +streamer = TextStreamer(tokenizer) +output = qeff_model.generate( + tokenizer=tokenizer, + prompts=prompts, + processor=processor, + images=image_urls, + generation_len=10, + image_height=1610, + image_width=1109, +) +print(output.generated_ids) +print(tokenizer.batch_decode(output.generated_ids)) +print(output.generated_texts) diff --git a/examples/image_text_to_text/models/internvl/continuous_batching.py b/examples/image_text_to_text/models/internvl/continuous_batching.py new file mode 100644 index 000000000..ca3e0ede3 --- /dev/null +++ b/examples/image_text_to_text/models/internvl/continuous_batching.py @@ -0,0 +1,100 @@ +# ----------------------------------------------------------------------------- +# +# Copyright (c) Qualcomm Technologies, Inc. and/or its subsidiaries. +# SPDX-License-Identifier: BSD-3-Clause +# +# ---------------------------------------------------------------------------- + +from transformers import AutoConfig, AutoModelForCausalLM, AutoTokenizer + +from QEfficient import QEFFAutoModelForCausalLM +from QEfficient.utils.test_utils import InternProcessor + +model_id = "OpenGVLab/InternVL2_5-1B" +config = AutoConfig.from_pretrained(model_id, trust_remote_code=True) +# For Testing Purpose Only +config.llm_config.num_hidden_layers = 2 +config.vision_config.num_hidden_layers = 2 + +# The original Intern-VL model, despite being multimodal, is loaded using `AutoModelForCausalLM` in Huggingface. +# To maintain compatibility, we load this model using `QEFFAutoModelForCausalLM`. +model_hf = AutoModelForCausalLM.from_pretrained( + model_id, + low_cpu_mem_usage=False, + trust_remote_code=True, + config=config, +) + +tokenizer = AutoTokenizer.from_pretrained(model_id, trust_remote_code=True, use_fast=False) +processor = InternProcessor(model_hf, tokenizer) + + +continuous_batching = True +if continuous_batching: + qeff_model = QEFFAutoModelForCausalLM.from_pretrained( + model_id, + attn_implementation="eager", + kv_offload=True, + config=config, + continuous_batching=True, + trust_remote_code=True, + ) + + qeff_model.compile( + num_patches=13, # Set num_patches according to image_height and image_width, default is 13 (747 x 1000) + prefill_seq_len=128, + ctx_len=4096, + num_cores=16, + num_devices=4, + batch_size=1, + full_batch_size=4, + mxfp6_matmul=True, + mxint8_kv_cache=True, + aic_enable_depth_first=True, + mos=1, + ) +else: + qeff_model = QEFFAutoModelForCausalLM.from_pretrained( + model_id, attn_implementation="eager", kv_offload=True, config=config, trust_remote_code=True + ) + + qeff_model.compile( + num_patches=13, + prefill_seq_len=128, + ctx_len=4096, + num_cores=16, + num_devices=4, + batch_size=1, + mxfp6_matmul=True, + mxint8_kv_cache=True, + aic_enable_depth_first=True, + ) + +image_urls = [ + "https://huggingface.co/datasets/huggingface/documentation-images/resolve/main/datasets/cat_style_layout.png", + "https://huggingface.co/datasets/huggingface/documentation-images/resolve/0052a70beed5bf71b92610a43a52df6d286cd5f3/diffusers/rabbit.jpg", + "https://huggingface.co/datasets/huggingface/documentation-images/resolve/main/datasets/cat_style_layout.png", + "https://huggingface.co/datasets/huggingface/documentation-images/resolve/0052a70beed5bf71b92610a43a52df6d286cd5f3/diffusers/rabbit.jpg", +] + +prompts = [ + "Can you describe the image in detail?", + "What are the objects in the image?", + "What is the main subject of the image?", + "What colors are predominant in the image?", +] + +exec_info = qeff_model.generate( + tokenizer=tokenizer, + prompts=prompts, + processor=processor, + images=image_urls, + device_ids=[0, 1, 2, 3], + generation_len=10, + image_height=747, + image_width=1000, +) + +print("Generated texts:", exec_info.generated_texts) +print("Generated IDs:", exec_info.generated_ids) +print(exec_info) diff --git a/tests/transformers/models/image_text_to_text/test_continuous_batching.py b/tests/transformers/models/image_text_to_text/test_continuous_batching.py new file mode 100644 index 000000000..2f33b7ee8 --- /dev/null +++ b/tests/transformers/models/image_text_to_text/test_continuous_batching.py @@ -0,0 +1,720 @@ +# ----------------------------------------------------------------------------- +# +# Copyright (c) Qualcomm Technologies, Inc. and/or its subsidiaries. +# SPDX-License-Identifier: BSD-3-Clause +# +# ---------------------------------------------------------------------------- + +from io import BytesIO +from typing import List + +import pytest +import requests +from PIL import Image +from transformers import ( + AutoConfig, + AutoModelForCausalLM, + AutoModelForImageTextToText, + AutoProcessor, + AutoTokenizer, + GenerationConfig, +) + +from QEfficient.transformers.models.modeling_auto import QEFFAutoModelForCausalLM, QEFFAutoModelForImageTextToText +from QEfficient.utils import hf_download +from QEfficient.utils._utils import get_num_layers_vlm +from QEfficient.utils.device_utils import get_available_device_id +from QEfficient.utils.run_utils import ApiRunnerInternVL, ApiRunnerMolmo, ApiRunnerVlm +from QEfficient.utils.test_utils import InternProcessor + +NEW_GENERATION_TOKENS = 10 + +# TODO: Add CB support for kv_offload=False case +test_models_config = [ + # CONFIG PARAMS NEEDED FOR A MODEL TO BE TESTED + # ( + # model_name, + # kv_offload, + # batch_size, + # prompt_len, + # ctx_len, + # img_size, + # img_url_list", + # text_prompt_list, + # number of layers of the model, + # full_batch_size + # ), + ( + "llava-hf/llava-1.5-7b-hf", + True, + 1, + 784, + 1024, + 336, + [ + "https://huggingface.co/datasets/huggingface/documentation-images/resolve/main/datasets/cat_style_layout.png", + "https://huggingface.co/datasets/huggingface/documentation-images/resolve/0052a70beed5bf71b92610a43a52df6d286cd5f3/diffusers/rabbit.jpg", + "https://huggingface.co/datasets/huggingface/documentation-images/resolve/main/datasets/cat_style_layout.png", + "https://huggingface.co/datasets/huggingface/documentation-images/resolve/0052a70beed5bf71b92610a43a52df6d286cd5f3/diffusers/rabbit.jpg", + ], + [ + "Can you describe the image in detail?", + "What are the objects in the image?", + "What is the main subject of the image?", + "What colors are predominant in the image?", + ], + 1, + 4, + ), + # Disabled in CI due to performance issues + # ( + # "meta-llama/Llama-4-Scout-17B-16E-Instruct", + # True, + # 1, + # 128, + # 3072, + # 336, + # ["https://huggingface.co/datasets/huggingface/documentation-images/resolve/main/datasets/cat_style_layout.png", + # "https://huggingface.co/datasets/huggingface/documentation-images/resolve/0052a70beed5bf71b92610a43a52df6d286cd5f3/diffusers/rabbit.jpg", + # "https://huggingface.co/datasets/huggingface/documentation-images/resolve/main/datasets/cat_style_layout.png", + # "https://huggingface.co/datasets/huggingface/documentation-images/resolve/0052a70beed5bf71b92610a43a52df6d286cd5f3/diffusers/rabbit.jpg",], + # ["Can you describe the image in detail?", + # "What are the objects in the image?", + # "What is the main subject of the image?", + # "What colors are predominant in the image?"], + # 4, + # 4, + # ), + ( + "google/gemma-3-4b-it", + True, + 1, + 128, + 3072, + 896, + [ + "https://huggingface.co/datasets/huggingface/documentation-images/resolve/main/datasets/cat_style_layout.png", + "https://huggingface.co/datasets/huggingface/documentation-images/resolve/0052a70beed5bf71b92610a43a52df6d286cd5f3/diffusers/rabbit.jpg", + "https://huggingface.co/datasets/huggingface/documentation-images/resolve/main/datasets/cat_style_layout.png", + "https://huggingface.co/datasets/huggingface/documentation-images/resolve/0052a70beed5bf71b92610a43a52df6d286cd5f3/diffusers/rabbit.jpg", + ], + [ + "Can you describe the image in detail?", + "What are the objects in the image?", + "What is the main subject of the image?", + "What colors are predominant in the image?", + ], + 1, + 4, + ), + ( + "mistralai/Mistral-Small-3.1-24B-Instruct-2503", + True, + 1, + 128, + 4096, + 1540, + [ + "https://huggingface.co/datasets/huggingface/documentation-images/resolve/main/datasets/cat_style_layout.png", + "https://huggingface.co/datasets/huggingface/documentation-images/resolve/0052a70beed5bf71b92610a43a52df6d286cd5f3/diffusers/rabbit.jpg", + "https://huggingface.co/datasets/huggingface/documentation-images/resolve/main/datasets/cat_style_layout.png", + "https://huggingface.co/datasets/huggingface/documentation-images/resolve/0052a70beed5bf71b92610a43a52df6d286cd5f3/diffusers/rabbit.jpg", + ], + [ + "Can you describe the image in detail?", + "What are the objects in the image?", + "What is the main subject of the image?", + "What colors are predominant in the image?", + ], + 1, + 4, + ), + ( + "Qwen/Qwen2.5-VL-3B-Instruct", + True, + 1, + 128, + 4096, + 1540, + [ + "https://picsum.photos/id/237/536/354", + "https://picsum.photos/id/237/536/354", + "https://picsum.photos/id/237/536/354", + "https://picsum.photos/id/237/536/354", + ], + [ + "Can you describe the image in detail?", + "What are the objects in the image?", + "What is the main subject of the image?", + "What colors are predominant in the image?", + ], + 2, + 4, + ), + # ( + # "meta-llama/Llama-3.2-11B-Vision-Instruct", + # True, + # 1, + # 32, + # 512, + # 560, + # ["https://huggingface.co/datasets/huggingface/documentation-images/resolve/main/datasets/cat_style_layout.png", + # "https://huggingface.co/datasets/huggingface/documentation-images/resolve/0052a70beed5bf71b92610a43a52df6d286cd5f3/diffusers/rabbit.jpg", + # "https://huggingface.co/datasets/huggingface/documentation-images/resolve/main/datasets/cat_style_layout.png", + # "https://huggingface.co/datasets/huggingface/documentation-images/resolve/0052a70beed5bf71b92610a43a52df6d286cd5f3/diffusers/rabbit.jpg",], + # ["Can you describe the image in detail?", + # "What are the objects in the image?", + # "What is the main subject of the image?", + # "What colors are predominant in the image?"], + # 7, + # 4, + # ), +] + +intern_model_config = [ + ( + "OpenGVLab/InternVL2_5-1B", + True, + 1, + 384, + 512, + [ + "https://huggingface.co/datasets/huggingface/documentation-images/resolve/main/datasets/cat_style_layout.png", + "https://huggingface.co/datasets/huggingface/documentation-images/resolve/0052a70beed5bf71b92610a43a52df6d286cd5f3/diffusers/rabbit.jpg", + "https://huggingface.co/datasets/huggingface/documentation-images/resolve/main/datasets/cat_style_layout.png", + "https://huggingface.co/datasets/huggingface/documentation-images/resolve/0052a70beed5bf71b92610a43a52df6d286cd5f3/diffusers/rabbit.jpg", + ], + [ + "Can you describe the image in detail?", + "What are the objects in the image?", + "What is the main subject of the image?", + "What colors are predominant in the image?", + ], + 2, + 4, + ), + ( + "OpenGVLab/InternVL3_5-1B", + True, + 1, + 384, + 512, + [ + "https://huggingface.co/datasets/huggingface/documentation-images/resolve/main/datasets/cat_style_layout.png", + "https://huggingface.co/datasets/huggingface/documentation-images/resolve/0052a70beed5bf71b92610a43a52df6d286cd5f3/diffusers/rabbit.jpg", + "https://huggingface.co/datasets/huggingface/documentation-images/resolve/main/datasets/cat_style_layout.png", + "https://huggingface.co/datasets/huggingface/documentation-images/resolve/0052a70beed5bf71b92610a43a52df6d286cd5f3/diffusers/rabbit.jpg", + ], + [ + "Can you describe the image in detail?", + "What are the objects in the image?", + "What is the main subject of the image?", + "What colors are predominant in the image?", + ], + 2, + 4, + ), +] + +molmo_model_config = [ + # Disabled in CI due to HF issues + # ( + # "allenai/Molmo-7B-D-0924", + # True, + # 1, + # 128, + # 4096, + # ["https://huggingface.co/datasets/huggingface/documentation-images/resolve/main/datasets/cat_style_layout.png", + # "https://huggingface.co/datasets/huggingface/documentation-images/resolve/0052a70beed5bf71b92610a43a52df6d286cd5f3/diffusers/rabbit.jpg", + # "https://huggingface.co/datasets/huggingface/documentation-images/resolve/main/datasets/cat_style_layout.png", + # "https://huggingface.co/datasets/huggingface/documentation-images/resolve/0052a70beed5bf71b92610a43a52df6d286cd5f3/diffusers/rabbit.jpg",], + # ["Can you describe the image in detail?", + # "What are the objects in the image?", + # "What is the main subject of the image?", + # "What colors are predominant in the image?"], + # 2, + # 4, + # ), +] + + +def load_image_text_to_text_model(model_config): + model_path = hf_download( + repo_id=model_config._name_or_path, + ignore_patterns=["*.onnx", "*.ot", "*.md", "*.tflite", "*.pdf", "*.h5", "*.msgpack"], + ) + try: + model_hf = AutoModelForImageTextToText.from_pretrained( + model_path, + low_cpu_mem_usage=False, + config=model_config, + ) + except ValueError: + model_hf = AutoModelForCausalLM.from_pretrained( + model_path, + low_cpu_mem_usage=False, + trust_remote_code=True, + config=model_config, + ) + params = sum(p.numel() for p in model_hf.parameters()) + model_hf.eval() + return model_hf, params + + +def set_num_layers(config, n_layer=1): + ## -1 indicates use all the layers of the model. + if n_layer == -1: + return config + elif hasattr(config, "model_type") and "mllama" in config.model_type: + config.text_config.num_hidden_layers = n_layer + config.text_config.cross_attention_layers = [ + x for x in config.text_config.cross_attention_layers if x < n_layer + ] + elif hasattr(config, "text_config"): + config.text_config.num_hidden_layers = n_layer + config.vision_config.num_hidden_layers = n_layer + elif hasattr(config, "llm_config"): + config.llm_config.num_hidden_layers = n_layer + config.vision_config.num_hidden_layers = n_layer + else: + config.num_hidden_layers = n_layer + return config + + +def check_image_text_to_text_pytorch_vs_ai100_continuous_batching( + model_name: str, + img_size: int, + image_urls: List[str], + queries: List[str], + prompt_len: int, + ctx_len: int, + max_gen_len: int = 20, + batch_size: int = 1, + n_layer: int = 1, + num_devices: int = 1, + full_batch_size: int = 4, + kv_offload: bool = True, +): + model_config = {"model_name": model_name} + model_config["img_size"] = img_size + config = AutoConfig.from_pretrained(model_config["model_name"], trust_remote_code=True) + config = set_num_layers(config, n_layer=n_layer) + model_hf, _ = load_image_text_to_text_model(config) + processor = AutoProcessor.from_pretrained(model_name, trust_remote_code=True, padding=True) + + n_layer = get_num_layers_vlm(config) + + image_height = None + image_width = None + + images = [] + for img_url in image_urls: + image = Image.open(requests.get(img_url, stream=True).raw) + if model_name == "mistralai/Mistral-Small-3.1-24B-Instruct-2503": + image_height = 1540 + image_width = 1540 + image = image.resize((image_height, image_width)) + images.append(image) + + conversation = [ + { + "role": "user", + "content": [ + {"type": "text", "text": queries[0]}, + {"type": "image"}, + ], + }, + ] + prompt = processor.apply_chat_template(conversation, add_generation_prompt=True) + api_runner = ApiRunnerVlm( + batch_size, + processor, + config, + images[0], + conversation, + prompt, + prompt_len, + ctx_len, + max_gen_len, + n_layer, + ) + + # For same prompt + image_list = [images[0]] * full_batch_size + prompt_list = [queries[0]] * full_batch_size + + pytorch_hf_tokens = api_runner.run_vlm_hf_model_on_pytorch_CB(model_hf, image_list, prompt_list) + + qeff_model = QEFFAutoModelForImageTextToText.from_pretrained( + model_config["model_name"], + kv_offload=kv_offload, + config=config, + continuous_batching=True, + ) + + qeff_model.export() + + if not get_available_device_id(): + pytest.skip("No available devices to run model on Cloud AI 100") + + qeff_model.compile( + img_size=model_config["img_size"], + num_cores=16, + num_devices=num_devices, + prefill_seq_len=prompt_len, + ctx_len=ctx_len, + batch_size=batch_size, + full_batch_size=full_batch_size, + mxfp6_matmul=False, + ) + + print("QPC Outputs (QAIC):") + exec_info = qeff_model.generate( + tokenizer=processor.tokenizer, + processor=processor, + images=[image_urls[0]] * full_batch_size, + prompts=prompt_list, + generation_len=max_gen_len, + image_height=image_height, + image_width=image_width, + ) + + qpc_tokens = exec_info.generated_ids[:, :max_gen_len] + print("QPC Outputs (QAIC) for Continuous Batching with same prompt:") + print(exec_info.generated_texts) + + for i in range(full_batch_size): + assert (pytorch_hf_tokens[i] == qpc_tokens[i]).all(), ( + f"Tokens don't match for prompt {i} between HF and QPC output for same prompts" + ) + + # For different prompts + pytorch_hf_tokens = api_runner.run_vlm_hf_model_on_pytorch_CB(model_hf, images, queries) + + print("QPC Outputs (QAIC):") + exec_info = qeff_model.generate( + tokenizer=processor.tokenizer, + processor=processor, + images=image_urls, + prompts=queries, + generation_len=max_gen_len, + image_height=image_height, + image_width=image_width, + ) + + qpc_tokens = exec_info.generated_ids[:, :max_gen_len] + print("QPC Outputs (QAIC) for Continuous Batching with different prompt:") + print(exec_info.generated_texts) + + for i in range(full_batch_size): + assert (pytorch_hf_tokens[i] == qpc_tokens[i]).all(), ( + f"Tokens don't match for prompt {i} between HF and QPC output for different prompts" + ) + return + + +def check_molmo_image_text_to_text_pytorch_vs_ai100_continuous_batching( + model_name: str, + image_urls: List[str], + queries: List[str], + prompt_len: int, + ctx_len: int, + max_gen_len: int = 20, + batch_size: int = 1, + n_layer: int = 1, + num_devices: int = 1, + full_batch_size: int = 4, + kv_offload: bool = True, +): + model_config = {"model_name": model_name} + + config = AutoConfig.from_pretrained(model_config["model_name"], trust_remote_code=True) + config._attn_implementation = "eager" + config = set_num_layers(config, n_layer=n_layer) + model_hf, _ = load_image_text_to_text_model(config) + n_layer = (n_layer, n_layer) + + processor = AutoProcessor.from_pretrained(model_name, trust_remote_code=True, padding=True) + tokenizer = AutoTokenizer.from_pretrained(model_name, trust_remote_code=True) + + images = [] + for img_url in image_urls: + img = requests.get(img_url, stream=True) + image = Image.open(BytesIO(img.content)).convert("RGB") + image = image.resize((536, 354)) + images.append(image) + + api_runner = ApiRunnerMolmo( + batch_size, + processor, + config, + images[0], + queries[0], + prompt_len, + ctx_len, + max_gen_len, + n_layer, + ) + + generation_config = GenerationConfig(max_new_tokens=NEW_GENERATION_TOKENS, stop_strings="<|endoftext|>") + + # For same prompt + image_list = [images[0]] * full_batch_size + prompt_list = [queries[0]] * full_batch_size + pytorch_hf_tokens = api_runner.run_vlm_hf_model_on_pytorch_CB(model_hf, image_list, prompt_list, generation_config) + + qeff_model = QEFFAutoModelForCausalLM.from_pretrained( + model_name, + trust_remote_code=True, + attn_implementation="eager", + kv_offload=kv_offload, + config=config, + continuous_batching=True, + ) + + qeff_model.export() + + qeff_model.compile( + prefill_seq_len=prompt_len, + ctx_len=ctx_len, + num_devices=4, + batch_size=1, + full_batch_size=full_batch_size, + mxfp6_matmul=False, + mxint8_kv_cache=True, + aic_enable_depth_first=True, + mos=1, + ) + + exec_info = qeff_model.generate( + tokenizer=tokenizer, + processor=processor, + images=[image_urls[0]] * full_batch_size, + prompts=prompt_list, + generation_len=max_gen_len, + ) + + qpc_tokens = exec_info.generated_ids[:, :max_gen_len] + print("QPC Outputs (QAIC) for Continuous Batching with same prompt:") + print(exec_info.generated_texts) + + for i in range(full_batch_size): + assert (pytorch_hf_tokens[i] == qpc_tokens[i]).all(), ( + f"Tokens don't match for prompt {i} between HF and QPC output for same prompts" + ) + + # For different prompts + pytorch_hf_tokens = api_runner.run_vlm_hf_model_on_pytorch_CB(model_hf, images, queries, generation_config) + exec_info = qeff_model.generate( + tokenizer=tokenizer, + processor=processor, + images=image_urls, + prompts=queries, + generation_len=max_gen_len, + ) + + qpc_tokens = exec_info.generated_ids[:, :max_gen_len] + print("QPC Outputs (QAIC) for Continuous Batching with different prompt:") + print(exec_info.generated_texts) + + for i in range(full_batch_size): + assert (pytorch_hf_tokens[i] == qpc_tokens[i]).all(), ( + f"Tokens don't match for prompt {i} between HF and QPC output for different prompts" + ) + return + + +def check_intern_image_text_to_text_pytorch_vs_ai100_continuous_batching( + model_name: str, + image_urls: str, + queries: str, + prompt_len: int, + ctx_len: int, + max_gen_len: int = 20, + batch_size: int = 1, + n_layer: int = 1, + kv_offload: bool = True, + num_devices: int = 1, + full_batch_size: int = 4, +): + model_config = {"model_name": model_name} + + config = AutoConfig.from_pretrained(model_config["model_name"], trust_remote_code=True) + config._attn_implementation = "eager" + config = set_num_layers(config, n_layer=n_layer) + model_hf = AutoModelForCausalLM.from_pretrained( + model_name, + low_cpu_mem_usage=False, + trust_remote_code=True, + config=config, + ) + n_layer = get_num_layers_vlm(config) + + tokenizer = AutoTokenizer.from_pretrained(model_name, trust_remote_code=True, use_fast=False) + processor = InternProcessor(model_hf, tokenizer) + + generation_config = dict(max_new_tokens=max_gen_len, do_sample=False) + generation_config["eos_token_id"] = tokenizer.convert_tokens_to_ids("<|im_end|>\n".strip()) + + images = [] + for img_url in image_urls: + img = requests.get(img_url, stream=True) + image = Image.open(BytesIO(img.content)).convert("RGB") + image = image.resize((448, 448)) + images.append(image) + + api_runner = ApiRunnerInternVL( + batch_size, + processor, + config, + images[0], + queries[0], + prompt_len, + ctx_len, + max_gen_len, + n_layer, + ) + + # For same prompt + image_list = [images[0]] * full_batch_size + prompt_list = [queries[0]] * full_batch_size + + pytorch_hf_tokens = api_runner.run_vlm_hf_model_on_pytorch_CB(model_hf, image_list, prompt_list) + + qeff_model = QEFFAutoModelForCausalLM.from_pretrained( + model_name, + trust_remote_code=True, + attn_implementation="eager", + kv_offload=True, + config=config, + continuous_batching=True, + ) + + qeff_model.export() + + qeff_model.compile( + num_patches=1, + prefill_seq_len=prompt_len, + ctx_len=ctx_len, + num_devices=4, + batch_size=1, + full_batch_size=full_batch_size, + mxfp6_matmul=False, + ) + + exec_info = qeff_model.generate( + tokenizer=tokenizer, + processor=processor, + images=[image_urls[0]] * full_batch_size, + prompts=prompt_list, + generation_len=max_gen_len, + image_height=448, + image_width=448, + ) + + qpc_tokens = exec_info.generated_ids[:, :max_gen_len] + print("QPC Outputs (QAIC) for Continuous Batching for same prompts:") + print(exec_info.generated_texts) + + for i in range(full_batch_size): + assert (pytorch_hf_tokens[i] == qpc_tokens[i]).all(), ( + f"Tokens don't match for prompt {i} between HF and QPC output for same prompts" + ) + + # For different prompts + pytorch_hf_tokens = api_runner.run_vlm_hf_model_on_pytorch_CB(model_hf, images, queries) + + exec_info = qeff_model.generate( + tokenizer=tokenizer, + processor=processor, + images=image_urls, + prompts=queries, + generation_len=max_gen_len, + image_height=448, + image_width=448, + ) + + qpc_tokens = exec_info.generated_ids[:, :max_gen_len] + print("QPC Outputs (QAIC) for Continuous Batching for different prompts:") + print(exec_info.generated_texts) + + for i in range(full_batch_size): + assert (pytorch_hf_tokens[i] == qpc_tokens[i]).all(), ( + f"Tokens don't match for prompt {i} between HF and QPC output for different prompts" + ) + return + + +@pytest.mark.on_qaic +@pytest.mark.multimodal +@pytest.mark.parametrize( + "model_name, kv_offload, batch_size, prompt_len, ctx_len, img_size, img_urls, queries, n_layer, full_batch_size", + test_models_config, +) +def test_image_text_to_text_pytorch_vs_ai100_continuous_batching( + model_name, kv_offload, batch_size, prompt_len, ctx_len, img_size, img_urls, queries, n_layer, full_batch_size +): + """ + Test function to validate the PyTorch model, the PyTorch model after KV changes, the ONNX model, and the Cloud AI 100 model, without continuous batching. + ``Mandatory`` Args: + :model_name (str): Hugging Face Model Card name, Example: ``gpt2`` + """ + check_image_text_to_text_pytorch_vs_ai100_continuous_batching( + model_name=model_name, + prompt_len=prompt_len, + ctx_len=ctx_len, + max_gen_len=NEW_GENERATION_TOKENS, + img_size=img_size, + image_urls=img_urls, + queries=queries, + n_layer=n_layer, + batch_size=batch_size, + kv_offload=kv_offload, + full_batch_size=full_batch_size, + ) + + +@pytest.mark.on_qaic +@pytest.mark.multimodal +@pytest.mark.parametrize( + "model_name, kv_offload, batch_size, prompt_len, ctx_len, img_urls, queries, n_layer, full_batch_size", + molmo_model_config, +) +def test_image_text_to_text_molmo_pytorch_vs_ai100_continuous_batching( + model_name, kv_offload, batch_size, prompt_len, ctx_len, img_urls, queries, n_layer, full_batch_size +): + check_molmo_image_text_to_text_pytorch_vs_ai100_continuous_batching( + model_name=model_name, + prompt_len=prompt_len, + ctx_len=ctx_len, + max_gen_len=NEW_GENERATION_TOKENS, + image_urls=img_urls, + queries=queries, + n_layer=n_layer, + batch_size=batch_size, + kv_offload=kv_offload, + full_batch_size=full_batch_size, + ) + + +@pytest.mark.on_qaic +@pytest.mark.multimodal +@pytest.mark.parametrize( + "model_name, kv_offload, batch_size, prompt_len, ctx_len, img_url, queries, n_layer, full_batch_size", + intern_model_config, +) +def test_image_text_to_text_intern_pytorch_vs_ai100_continuous_batching( + model_name, kv_offload, batch_size, prompt_len, ctx_len, img_url, queries, n_layer, full_batch_size +): + check_intern_image_text_to_text_pytorch_vs_ai100_continuous_batching( + model_name=model_name, + prompt_len=prompt_len, + ctx_len=ctx_len, + max_gen_len=NEW_GENERATION_TOKENS, + image_urls=img_url, + queries=queries, + n_layer=n_layer, + batch_size=batch_size, + kv_offload=kv_offload, + full_batch_size=full_batch_size, + ) diff --git a/tests/transformers/models/test_image_text_to_text_models.py b/tests/transformers/models/image_text_to_text/test_image_text_to_text_models.py similarity index 100% rename from tests/transformers/models/test_image_text_to_text_models.py rename to tests/transformers/models/image_text_to_text/test_image_text_to_text_models.py From 9e3546bda0adf8c4abafa85e3788698c8c751918 Mon Sep 17 00:00:00 2001 From: Abukhoyer Shaik Date: Mon, 8 Dec 2025 14:13:18 +0530 Subject: [PATCH 42/60] [Jenkins]: jenkins Timeout increased (#654) Signed-off-by: Abukhoyer Shaik Signed-off-by: Dhiraj Kumar Sah --- scripts/Jenkinsfile | 11 ++++++++--- 1 file changed, 8 insertions(+), 3 deletions(-) diff --git a/scripts/Jenkinsfile b/scripts/Jenkinsfile index 48fb03fc8..134770638 100644 --- a/scripts/Jenkinsfile +++ b/scripts/Jenkinsfile @@ -33,7 +33,7 @@ pipeline { parallel { stage('Run Non-CLI Non-QAIC Tests') { steps { - timeout(time: 25, unit: 'MINUTES') { + timeout(time: 40, unit: 'MINUTES') { sh ''' sudo docker exec ${BUILD_TAG} bash -c " cd /efficient-transformers && @@ -200,8 +200,13 @@ pipeline { echo "Failed to change ownership: ${error}" } } - junit testResults: 'tests/tests_log.xml' - + script { + try { + junit testResults: 'tests/tests_log.xml', allowEmptyResults: true + } catch (error) { + echo "No test results file found or parsing failed: ${error}" + } + } script { try { sh ''' From 71f0a646c01dcd0b42760afa401e5d930b670aa2 Mon Sep 17 00:00:00 2001 From: vjanfaza Date: Mon, 8 Dec 2025 09:32:17 -0800 Subject: [PATCH 43/60] Adding ccl_enabled flag during model loading and passing CCL lists during compilation process (#623) In these changes, instead of passing CCL lists during model loading, I passed a flag called ccl_enabled to specify whether CCL feature is enabled or not and moved passing CCL lists to compilation process. --------- Signed-off-by: Vahid Janfaza Co-authored-by: Hem Agnihotri Signed-off-by: Dhiraj Kumar Sah --- .../transformers/models/modeling_auto.py | 76 +- .../transformers/spd/spd_transform_forward.py | 2 + QEfficient/utils/check_ccl_specializations.py | 9 +- examples/performance/README.md | 50 ++ .../compute_context_length/README.md | 5 +- .../compute_context_length/basic_inference.py | 12 +- .../fp32_nodes_gemma3_27b.yaml | 685 +++++++++++++++++ .../fp32_nodes_gemma3_4b.yaml | 698 ++++++++++++++++++ .../compute_context_length/gemma3.py | 24 +- .../gemma3/fp32_nodes_gemma3_27b.yaml | 685 +++++++++++++++++ .../gemma3/fp32_nodes_gemma3_4b.yaml | 698 ++++++++++++++++++ .../compute_context_length/gpt_oss.py | 23 +- .../compute_context_length/granite_vision.py | 9 +- .../compute_context_length/internvl.py | 25 +- .../compute_context_length/llama4.py | 26 +- .../compute_context_length/llama4_cb.py | 20 +- .../llama4_multi_image.py | 14 +- .../compute_context_length/mistral3.py | 12 +- .../compute_context_length/molmo.py | 22 +- .../compute_context_length/qwen2_5_vl.py | 23 +- .../compute_context_length/qwen2_5_vl_cb.py | 18 +- .../compute_context_length/qwen3moe.py | 54 ++ .../ccl_qwen3moe_inference.py | 24 +- .../compute_context_length/vlm_inference.py | 15 +- 24 files changed, 3125 insertions(+), 104 deletions(-) create mode 100755 examples/performance/compute_context_length/fp32_nodes_gemma3_27b.yaml create mode 100755 examples/performance/compute_context_length/fp32_nodes_gemma3_4b.yaml create mode 100755 examples/performance/compute_context_length/gemma3/fp32_nodes_gemma3_27b.yaml create mode 100755 examples/performance/compute_context_length/gemma3/fp32_nodes_gemma3_4b.yaml create mode 100644 examples/performance/compute_context_length/qwen3moe.py diff --git a/QEfficient/transformers/models/modeling_auto.py b/QEfficient/transformers/models/modeling_auto.py index 91866e4c0..8edc1f3f0 100644 --- a/QEfficient/transformers/models/modeling_auto.py +++ b/QEfficient/transformers/models/modeling_auto.py @@ -937,11 +937,13 @@ def __init__( self.model = model self.config = model.config - self.comp_ctx_lengths_prefill, self.comp_ctx_lengths_decode = process_ccl_specializations(qaic_config) - self.vision_model = QEffVisionEncoderForTextImageToTextModel(model, **kwargs) self.lang_model = QEffCausalLMForTextImageToTextModel(model, qaic_config=qaic_config, **kwargs) self.continuous_batching = continuous_batching + self.ccl_enabled = False + if qaic_config: + self.ccl_enabled = qaic_config.get("ccl_enabled", False) + self.comp_ctx_lengths_prefill, self.comp_ctx_lengths_decode = None, None self.input_shapes, self.output_names = None, None @property @@ -985,6 +987,7 @@ def from_pretrained(cls, pretrained_model_name_or_path: str, qaic_config: Option logger.warning("Updating low_cpu_mem_usage=False") kwargs.update({"attn_implementation": "eager", "low_cpu_mem_usage": False}) + model = cls._hf_auto_class.from_pretrained(pretrained_model_name_or_path, **kwargs) return cls( model, @@ -1095,6 +1098,8 @@ def compile( compile_dir: Optional[str] = None, *, prefill_seq_len: Optional[int] = None, + comp_ctx_lengths_prefill: Optional[List[int]] = None, + comp_ctx_lengths_decode: Optional[List[int]] = None, ctx_len: Optional[int] = None, batch_size: int = 1, full_batch_size: Optional[int] = None, @@ -1179,10 +1184,21 @@ def compile( output_names = self.model.get_output_names(kv_offload=True) + # if ccl_enabled is True read Compute-Context-Length lists + if self.ccl_enabled: + if comp_ctx_lengths_prefill is None or comp_ctx_lengths_decode is None: + logger.warning( + "Please set comp_ctx_lengths_prefill and comp_ctx_lengths_decode with a proper list of context lengths. Using non-CCL default model." + ) + self.comp_ctx_lengths_prefill, self.comp_ctx_lengths_decode = process_ccl_specializations( + comp_ctx_lengths_prefill, comp_ctx_lengths_decode, ctx_len, prefill_seq_len + ) + # For supporting VLLM and Disaggregated with CCL - if "comp_ctx_lengths_prefill" in compiler_options: - self.comp_ctx_lengths_prefill = compiler_options.pop("comp_ctx_lengths_prefill") - self.comp_ctx_lengths_decode = compiler_options.pop("comp_ctx_lengths_decode") + if comp_ctx_lengths_prefill is not None or comp_ctx_lengths_decode is not None: + self.comp_ctx_lengths_prefill, self.comp_ctx_lengths_decode = process_ccl_specializations( + comp_ctx_lengths_prefill, comp_ctx_lengths_decode, ctx_len, prefill_seq_len + ) specializations, compiler_options = self.model.get_specializations( batch_size=batch_size, @@ -1634,7 +1650,6 @@ def __init__( raise NotImplementedError("Continuous batching is not supported for image-text-to-text models yet.") super().__init__(model, **kwargs) - self.comp_ctx_lengths_prefill, self.comp_ctx_lengths_decode = process_ccl_specializations(qaic_config) self.model.qaic_config = qaic_config # to handle internvl models @@ -1648,6 +1663,10 @@ def __init__( else: self.model.config.use_cache = True self.hash_params["qeff_auto_class"] = self.__class__.__name__ + self.ccl_enabled = False + if qaic_config: + self.ccl_enabled = qaic_config.get("ccl_enabled", False) + self.comp_ctx_lengths_prefill, self.comp_ctx_lengths_decode = None, None if self.model.qaic_config is not None and self.model.qaic_config.get("num_kv_blocks", None) is not None: BlockedKVAttentionTransform.apply(self.model, num_kv_blocks=self.model.qaic_config.get("num_kv_blocks")) @@ -1687,6 +1706,7 @@ def from_pretrained( logger.warning("Updating low_cpu_mem_usage=False") kwargs.update({"attn_implementation": "eager", "low_cpu_mem_usage": False}) + from transformers import AutoConfig config = AutoConfig.from_pretrained(pretrained_model_name_or_path, trust_remote_code=True) @@ -1741,6 +1761,8 @@ def compile( *, prefill_seq_len: Optional[int] = None, ctx_len: Optional[int] = None, + comp_ctx_lengths_prefill: Optional[List[int]] = None, + comp_ctx_lengths_decode: Optional[List[int]] = None, batch_size: int = 1, full_batch_size: Optional[int] = None, kv_cache_batch_size: Optional[int] = None, @@ -1810,10 +1832,21 @@ def compile( kv_cache_batch_size = kv_cache_batch_size or full_batch_size or batch_size output_names = self.model.get_output_names() + # if ccl_enabled is True read Compute-Context-Length lists + if self.ccl_enabled: + if comp_ctx_lengths_prefill is None or comp_ctx_lengths_decode is None: + logger.warning( + "Please set comp_ctx_lengths_prefill and comp_ctx_lengths_decode with a proper list of context lengths. Using non-CCL default model." + ) + self.comp_ctx_lengths_prefill, self.comp_ctx_lengths_decode = process_ccl_specializations( + comp_ctx_lengths_prefill, comp_ctx_lengths_decode, ctx_len, prefill_seq_len + ) + # For supporting VLLM and Disaggregated with CCL - if "comp_ctx_lengths_prefill" in compiler_options: - self.comp_ctx_lengths_prefill = compiler_options.pop("comp_ctx_lengths_prefill") - self.comp_ctx_lengths_decode = compiler_options.pop("comp_ctx_lengths_decode") + if comp_ctx_lengths_prefill is not None or comp_ctx_lengths_decode is not None: + self.comp_ctx_lengths_prefill, self.comp_ctx_lengths_decode = process_ccl_specializations( + comp_ctx_lengths_prefill, comp_ctx_lengths_decode, ctx_len, prefill_seq_len + ) # Get specializations from modelling file # TODO: expose this via the auto class as well @@ -2378,8 +2411,6 @@ def __init__( # Set use_cache=True to get KV values as output during ONNX export model.config.use_cache = True - self.comp_ctx_lengths_prefill, self.comp_ctx_lengths_decode = process_ccl_specializations(qaic_config) - super().__init__(model, qaic_config=qaic_config, **kwargs) self.num_layers = model.config.num_hidden_layers self.continuous_batching = continuous_batching @@ -2388,6 +2419,10 @@ def __init__( self.is_tlm = transformed self.hash_params["qeff_auto_class"] = self.__class__.__name__ + self.ccl_enabled = False + if qaic_config: + self.ccl_enabled = qaic_config.get("ccl_enabled", False) + self.comp_ctx_lengths_prefill, self.comp_ctx_lengths_decode = None, None # ---Sampling--- # Note: SamplerTransform should be applied after all other transforms @@ -2833,6 +2868,8 @@ def compile( *, prefill_seq_len: int = 32, ctx_len: int = 128, + comp_ctx_lengths_prefill: Optional[List[int]] = None, + comp_ctx_lengths_decode: Optional[List[int]] = None, batch_size: int = 1, full_batch_size: Optional[int] = None, kv_cache_batch_size: Optional[int] = None, @@ -2924,10 +2961,18 @@ def compile( """ + # if ccl_enabled is True read Compute-Context-Length lists + if self.ccl_enabled: + if comp_ctx_lengths_prefill is None or comp_ctx_lengths_decode is None: + logger.warning( + "Please set comp_ctx_lengths_prefill and comp_ctx_lengths_decode with a proper list of context lengths. Using non-CCL default model." + ) + self.comp_ctx_lengths_prefill, self.comp_ctx_lengths_decode = process_ccl_specializations( + comp_ctx_lengths_prefill, comp_ctx_lengths_decode, ctx_len, prefill_seq_len + ) + # For supporting VLLM and Disaggregated with CCL - if "comp_ctx_lengths_prefill" in compiler_options and "comp_ctx_lengths_decode" in compiler_options: - comp_ctx_lengths_prefill = compiler_options.pop("comp_ctx_lengths_prefill") - comp_ctx_lengths_decode = compiler_options.pop("comp_ctx_lengths_decode") + if comp_ctx_lengths_prefill is not None or comp_ctx_lengths_decode is not None: if isinstance(comp_ctx_lengths_prefill, str): import ast @@ -2942,6 +2987,9 @@ def compile( self.comp_ctx_lengths_prefill = comp_ctx_lengths_prefill self.comp_ctx_lengths_decode = comp_ctx_lengths_decode + self.comp_ctx_lengths_prefill, self.comp_ctx_lengths_decode = process_ccl_specializations( + self.comp_ctx_lengths_prefill, self.comp_ctx_lengths_decode, ctx_len, prefill_seq_len + ) # --- Validation --- if prefill_only is not None and not isinstance(prefill_only, bool): raise TypeError("`prefill_only` must be a boolean.") diff --git a/QEfficient/transformers/spd/spd_transform_forward.py b/QEfficient/transformers/spd/spd_transform_forward.py index e82bf4cdf..4703cb18d 100644 --- a/QEfficient/transformers/spd/spd_transform_forward.py +++ b/QEfficient/transformers/spd/spd_transform_forward.py @@ -76,6 +76,7 @@ def tlm_forward( attention_mask: Optional[torch.Tensor] = None, position_ids: Optional[torch.LongTensor] = None, past_key_values: Optional[Union[Cache, List[torch.FloatTensor]]] = None, + comp_ctx_lengths: Optional[torch.LongTensor] = None, batch_index: Optional[torch.LongTensor] = None, inputs_embeds: Optional[torch.FloatTensor] = None, labels: Optional[torch.LongTensor] = None, @@ -123,6 +124,7 @@ def tlm_forward( attention_mask=attention_mask, position_ids=position_ids, past_key_values=past_key_values, + comp_ctx_lengths=comp_ctx_lengths, batch_index=batch_index, inputs_embeds=inputs_embeds, use_cache=use_cache, diff --git a/QEfficient/utils/check_ccl_specializations.py b/QEfficient/utils/check_ccl_specializations.py index 308c69554..0d6a078f6 100644 --- a/QEfficient/utils/check_ccl_specializations.py +++ b/QEfficient/utils/check_ccl_specializations.py @@ -6,14 +6,7 @@ # ----------------------------------------------------------------------------- -def process_ccl_specializations(qaic_config): - if qaic_config is None: - return None, None - ccl_prefill = qaic_config.pop("comp_ctx_lengths_prefill", None) - ccl_decode = qaic_config.pop("comp_ctx_lengths_decode", None) - ctx_len = qaic_config.pop("ctx_len", None) - prefill_seq_len = qaic_config.pop("prefill_seq_len", 128) - +def process_ccl_specializations(ccl_prefill, ccl_decode, ctx_len, prefill_seq_len): if ccl_prefill is None or ccl_decode is None: return None, None diff --git a/examples/performance/README.md b/examples/performance/README.md index 48d34d972..9308ce6db 100644 --- a/examples/performance/README.md +++ b/examples/performance/README.md @@ -95,6 +95,56 @@ python on_device_sampling.py \ --top-p 0.89 ``` +### Compute-Context-Length + +Calculating Context-Length dynamically during inference for getting the best related performance within each window of context-length + +#### compute_context_length/basic_inference.py +Configure CCL parameters: 1) ccl-enabled: to activate CCL feature, 2) comp-ctx-lengths-prefill: list of context length to be used during prefilling, and 3) comp-ctx-lengths-decode: list of context lengths to be used during decoding. + +**Usage for Text-only models:** +```bash +python compute_context_length/basic_inference.py \ + --model-name meta-llama/Llama-3.1-8B \ + --num-cores 16 \ + --prefill-seq-len 32 \ + --ctx-len 1024 \ + --ccl-enabled \ + --comp-ctx-lengths-prefill 500,1000 \ + --comp-ctx-lengths-decode 512,1024 +``` + +**Usage for VLM models such as mllama and llava:** +```bash +python compute_context_length/vlm_inference.py \ + --model-name meta-llama/Llama-3.2-11B-Vision-Instruct \ + --hf-token "" \ + --num-cores 16 \ + --prefill-seq-len 32 \ + --ctx-len 8192 \ + --img-size 560 \ + --ccl-enabled \ + --comp-ctx-lengths-prefill 4096 \ + --comp-ctx-lengths-decode 6144,8192 +``` + +**Usage with other MoE and Multimodal models:** +For various models available in compute_context_length directory such as gemma3, gpt_oss, granite_vision, internvl, llama4_cb, llama4_multi_image, llama4, mistral3, molmo, qwen2_5_vl, qwen2_5_vl_cb, and qwen3moe, use the related inference script and only change the model-name and ccl configuration in the related script. The following is an example of each model: +```bash +python compute_context_length/gemma3.py +python compute_context_length/gpt_oss.py +python compute_context_length/granite_vision.py +python compute_context_length/internvl.py +python compute_context_length/llama4_cb.py +python compute_context_length/llama4_multi_image.py +python compute_context_length/llama4.py +python compute_context_length/mistral3.py +python compute_context_length/molmo.py +python compute_context_length/qwen2_5_vl.py +python compute_context_length/qwen2_5_vl_cb.py +python compute_context_length/qwen3moe.py +``` + ## Performance Tips 1. **Speculative Decoding**: Best for long-form generation where draft model is much faster than target diff --git a/examples/performance/compute_context_length/README.md b/examples/performance/compute_context_length/README.md index bbc240645..9f1d29b9a 100644 --- a/examples/performance/compute_context_length/README.md +++ b/examples/performance/compute_context_length/README.md @@ -68,7 +68,7 @@ python vlm_inference.py \ Basic CCL usage with text-only language models. **Supported Models:** -- Llama (3.2, 3.3) +- Llama (3.2, 3.3, swiftkv) - Gemma/Gemma-2 - Mistral - Phi/Phi-3 @@ -77,6 +77,9 @@ Basic CCL usage with text-only language models. - GPT-2, GPT-J - CodeGen - OLMo-2 +- Mistral/Mixtral +- Qwen2 +- Falcon **Command-Line Arguments:** - `--model-name`: HuggingFace model ID (default: meta-llama/Llama-3.2-1B) diff --git a/examples/performance/compute_context_length/basic_inference.py b/examples/performance/compute_context_length/basic_inference.py index a4407b05a..4533c47e8 100644 --- a/examples/performance/compute_context_length/basic_inference.py +++ b/examples/performance/compute_context_length/basic_inference.py @@ -46,6 +46,11 @@ def main(): default=1024, help="Maximum context length", ) + parser.add_argument( + "--ccl-enabled", + action="store_true", + help="Enable compute-context-length (CCL) feature", + ) parser.add_argument( "--comp-ctx-lengths-prefill", type=lambda x: [int(i) for i in x.split(",")], @@ -113,9 +118,7 @@ def main(): args.model_name, continuous_batching=args.continuous_batching, qaic_config={ - "comp_ctx_lengths_prefill": args.comp_ctx_lengths_prefill, - "comp_ctx_lengths_decode": args.comp_ctx_lengths_decode, - "ctx_len": args.ctx_len, # Required for CCL validation + "ccl_enabled": args.ccl_enabled, }, ) @@ -132,6 +135,9 @@ def main(): if args.continuous_batching: compile_kwargs["full_batch_size"] = args.full_batch_size + if args.ccl_enabled: + compile_kwargs["comp_ctx_lengths_prefill"] = args.comp_ctx_lengths_prefill + compile_kwargs["comp_ctx_lengths_decode"] = args.comp_ctx_lengths_decode qpc_path = model.compile(**compile_kwargs) print(f"Model compiled successfully to: {qpc_path}") diff --git a/examples/performance/compute_context_length/fp32_nodes_gemma3_27b.yaml b/examples/performance/compute_context_length/fp32_nodes_gemma3_27b.yaml new file mode 100755 index 000000000..d2a4bf164 --- /dev/null +++ b/examples/performance/compute_context_length/fp32_nodes_gemma3_27b.yaml @@ -0,0 +1,685 @@ +FP32NodeInstanceNames: + + - /language_model/layers.0/Add_1_output_0 + - /language_model/layers.0/Add_2_output_0 + - /language_model/layers.0/Add_3_output_0 + - /language_model/layers.0/Add_output_0 + - /language_model/layers.1/Add_1_output_0 + - /language_model/layers.1/Add_2_output_0 + - /language_model/layers.1/Add_3_output_0 + - /language_model/layers.1/Add_output_0 + - /language_model/layers.2/Add_1_output_0 + - /language_model/layers.2/Add_2_output_0 + - /language_model/layers.2/Add_3_output_0 + - /language_model/layers.2/Add_output_0 + - /language_model/layers.3/Add_1_output_0 + - /language_model/layers.3/Add_2_output_0 + - /language_model/layers.3/Add_3_output_0 + - /language_model/layers.3/Add_output_0 + - /language_model/layers.4/Add_1_output_0 + - /language_model/layers.4/Add_2_output_0 + - /language_model/layers.4/Add_3_output_0 + - /language_model/layers.4/Add_output_0 + - /language_model/layers.5/Add_1_output_0 + - /language_model/layers.5/Add_2_output_0 + - /language_model/layers.5/Add_3_output_0 + - /language_model/layers.5/Add_output_0 + - /language_model/layers.6/Add_1_output_0 + - /language_model/layers.6/Add_2_output_0 + - /language_model/layers.6/Add_3_output_0 + - /language_model/layers.6/Add_output_0 + - /language_model/layers.7/Add_1_output_0 + - /language_model/layers.7/Add_2_output_0 + - /language_model/layers.7/Add_3_output_0 + - /language_model/layers.7/Add_output_0 + - /language_model/layers.8/Add_1_output_0 + - /language_model/layers.8/Add_2_output_0 + - /language_model/layers.8/Add_3_output_0 + - /language_model/layers.8/Add_output_0 + - /language_model/layers.9/Add_1_output_0 + - /language_model/layers.9/Add_2_output_0 + - /language_model/layers.9/Add_3_output_0 + - /language_model/layers.9/Add_output_0 + - /language_model/layers.10/Add_1_output_0 + - /language_model/layers.10/Add_2_output_0 + - /language_model/layers.10/Add_3_output_0 + - /language_model/layers.10/Add_output_0 + - /language_model/layers.11/Add_1_output_0 + - /language_model/layers.11/Add_2_output_0 + - /language_model/layers.11/Add_3_output_0 + - /language_model/layers.11/Add_output_0 + - /language_model/layers.12/Add_1_output_0 + - /language_model/layers.12/Add_2_output_0 + - /language_model/layers.12/Add_3_output_0 + - /language_model/layers.12/Add_output_0 + - /language_model/layers.13/Add_1_output_0 + - /language_model/layers.13/Add_2_output_0 + - /language_model/layers.13/Add_3_output_0 + - /language_model/layers.13/Add_output_0 + - /language_model/layers.14/Add_1_output_0 + - /language_model/layers.14/Add_2_output_0 + - /language_model/layers.14/Add_3_output_0 + - /language_model/layers.14/Add_output_0 + - /language_model/layers.15/Add_1_output_0 + - /language_model/layers.15/Add_2_output_0 + - /language_model/layers.15/Add_3_output_0 + - /language_model/layers.15/Add_output_0 + - /language_model/layers.16/Add_1_output_0 + - /language_model/layers.16/Add_2_output_0 + - /language_model/layers.16/Add_3_output_0 + - /language_model/layers.16/Add_output_0 + - /language_model/layers.17/Add_1_output_0 + - /language_model/layers.17/Add_2_output_0 + - /language_model/layers.17/Add_3_output_0 + - /language_model/layers.17/Add_output_0 + - /language_model/layers.18/Add_1_output_0 + - /language_model/layers.18/Add_2_output_0 + - /language_model/layers.18/Add_3_output_0 + - /language_model/layers.18/Add_output_0 + - /language_model/layers.19/Add_1_output_0 + - /language_model/layers.19/Add_2_output_0 + - /language_model/layers.19/Add_3_output_0 + - /language_model/layers.19/Add_output_0 + - /language_model/layers.20/Add_1_output_0 + - /language_model/layers.20/Add_2_output_0 + - /language_model/layers.20/Add_3_output_0 + - /language_model/layers.20/Add_output_0 + - /language_model/layers.21/Add_1_output_0 + - /language_model/layers.21/Add_2_output_0 + - /language_model/layers.21/Add_3_output_0 + - /language_model/layers.21/Add_output_0 + - /language_model/layers.22/Add_1_output_0 + - /language_model/layers.22/Add_2_output_0 + - /language_model/layers.22/Add_3_output_0 + - /language_model/layers.22/Add_output_0 + - /language_model/layers.23/Add_1_output_0 + - /language_model/layers.23/Add_2_output_0 + - /language_model/layers.23/Add_output_0 + - /language_model/layers.24/Add_1_output_0 + - /language_model/layers.24/Add_2_output_0 + - /language_model/layers.24/Add_3_output_0 + - /language_model/layers.24/Add_output_0 + - /language_model/layers.25/Add_1_output_0 + - /language_model/layers.25/Add_2_output_0 + - /language_model/layers.25/Add_3_output_0 + - /language_model/layers.25/Add_output_0 + - /language_model/layers.26/Add_1_output_0 + - /language_model/layers.26/Add_2_output_0 + - /language_model/layers.26/Add_3_output_0 + - /language_model/layers.26/Add_output_0 + - /language_model/layers.27/Add_1_output_0 + - /language_model/layers.27/Add_2_output_0 + - /language_model/layers.27/Add_3_output_0 + - /language_model/layers.27/Add_output_0 + - /language_model/layers.28/Add_1_output_0 + - /language_model/layers.28/Add_2_output_0 + - /language_model/layers.28/Add_3_output_0 + - /language_model/layers.28/Add_output_0 + - /language_model/layers.29/Add_1_output_0 + - /language_model/layers.29/Add_2_output_0 + - /language_model/layers.29/Add_3_output_0 + - /language_model/layers.29/Add_output_0 + - /language_model/layers.30/Add_1_output_0 + - /language_model/layers.30/Add_2_output_0 + - /language_model/layers.30/Add_3_output_0 + - /language_model/layers.30/Add_output_0 + - /language_model/layers.31/Add_1_output_0 + - /language_model/layers.31/Add_2_output_0 + - /language_model/layers.31/Add_3_output_0 + - /language_model/layers.31/Add_output_0 + - /language_model/layers.32/Add_1_output_0 + - /language_model/layers.32/Add_2_output_0 + - /language_model/layers.32/Add_3_output_0 + - /language_model/layers.32/Add_output_0 + - /language_model/layers.33/Add_1_output_0 + - /language_model/layers.33/Add_2_output_0 + - /language_model/layers.33/Add_3_output_0 + - /language_model/layers.33/Add_output_0 + - /language_model/layers.34/Add_1_output_0 + - /language_model/layers.34/Add_2_output_0 + - /language_model/layers.34/Add_3_output_0 + - /language_model/layers.34/Add_output_0 + - /language_model/layers.35/Add_1_output_0 + - /language_model/layers.35/Add_2_output_0 + - /language_model/layers.35/Add_3_output_0 + - /language_model/layers.35/Add_output_0 + - /language_model/layers.36/Add_1_output_0 + - /language_model/layers.36/Add_2_output_0 + - /language_model/layers.36/Add_3_output_0 + - /language_model/layers.36/Add_output_0 + - /language_model/layers.37/Add_1_output_0 + - /language_model/layers.37/Add_2_output_0 + - /language_model/layers.37/Add_3_output_0 + - /language_model/layers.37/Add_output_0 + - /language_model/layers.38/Add_1_output_0 + - /language_model/layers.38/Add_2_output_0 + - /language_model/layers.38/Add_3_output_0 + - /language_model/layers.38/Add_output_0 + - /language_model/layers.39/Add_1_output_0 + - /language_model/layers.39/Add_2_output_0 + - /language_model/layers.39/Add_3_output_0 + - /language_model/layers.39/Add_output_0 + - /language_model/layers.40/Add_1_output_0 + - /language_model/layers.40/Add_2_output_0 + - /language_model/layers.40/Add_3_output_0 + - /language_model/layers.40/Add_output_0 + - /language_model/layers.41/Add_1_output_0 + - /language_model/layers.41/Add_2_output_0 + - /language_model/layers.41/Add_3_output_0 + - /language_model/layers.41/Add_output_0 + - /language_model/layers.42/Add_1_output_0 + - /language_model/layers.42/Add_2_output_0 + - /language_model/layers.42/Add_3_output_0 + - /language_model/layers.42/Add_output_0 + - /language_model/layers.43/Add_1_output_0 + - /language_model/layers.43/Add_2_output_0 + - /language_model/layers.43/Add_3_output_0 + - /language_model/layers.43/Add_output_0 + - /language_model/layers.44/Add_1_output_0 + - /language_model/layers.44/Add_2_output_0 + - /language_model/layers.44/Add_3_output_0 + - /language_model/layers.44/Add_output_0 + - /language_model/layers.45/Add_1_output_0 + - /language_model/layers.45/Add_2_output_0 + - /language_model/layers.45/Add_3_output_0 + - /language_model/layers.45/Add_output_0 + - /language_model/layers.46/Add_1_output_0 + - /language_model/layers.46/Add_2_output_0 + - /language_model/layers.46/Add_3_output_0 + - /language_model/layers.46/Add_output_0 + - /language_model/layers.47/Add_1_output_0 + - /language_model/layers.47/Add_2_output_0 + - /language_model/layers.47/Add_3_output_0 + - /language_model/layers.47/Add_output_0 + - /language_model/layers.48/Add_1_output_0 + - /language_model/layers.48/Add_2_output_0 + - /language_model/layers.48/Add_3_output_0 + - /language_model/layers.48/Add_output_0 + - /language_model/layers.49/Add_1_output_0 + - /language_model/layers.49/Add_2_output_0 + - /language_model/layers.49/Add_3_output_0 + - /language_model/layers.49/Add_output_0 + - /language_model/layers.50/Add_1_output_0 + - /language_model/layers.50/Add_2_output_0 + - /language_model/layers.50/Add_3_output_0 + - /language_model/layers.50/Add_output_0 + - /language_model/layers.51/Add_1_output_0 + - /language_model/layers.51/Add_2_output_0 + - /language_model/layers.51/Add_3_output_0 + - /language_model/layers.51/Add_output_0 + - /language_model/layers.52/Add_1_output_0 + - /language_model/layers.52/Add_2_output_0 + - /language_model/layers.52/Add_3_output_0 + - /language_model/layers.52/Add_output_0 + - /language_model/layers.53/Add_1_output_0 + - /language_model/layers.53/Add_2_output_0 + - /language_model/layers.53/Add_3_output_0 + - /language_model/layers.53/Add_output_0 + - /language_model/layers.54/Add_1_output_0 + - /language_model/layers.54/Add_2_output_0 + - /language_model/layers.54/Add_3_output_0 + - /language_model/layers.54/Add_output_0 + - /language_model/layers.55/Add_1_output_0 + - /language_model/layers.55/Add_2_output_0 + - /language_model/layers.55/Add_3_output_0 + - /language_model/layers.55/Add_output_0 + - /language_model/layers.56/Add_1_output_0 + - /language_model/layers.56/Add_2_output_0 + - /language_model/layers.56/Add_3_output_0 + - /language_model/layers.56/Add_output_0 + - /language_model/layers.57/Add_1_output_0 + - /language_model/layers.57/Add_2_output_0 + - /language_model/layers.57/Add_3_output_0 + - /language_model/layers.57/Add_output_0 + - /language_model/layers.58/Add_1_output_0 + - /language_model/layers.58/Add_2_output_0 + - /language_model/layers.58/Add_3_output_0 + - /language_model/layers.58/Add_output_0 + - /language_model/layers.59/Add_1_output_0 + - /language_model/layers.59/Add_2_output_0 + - /language_model/layers.59/Add_3_output_0 + - /language_model/layers.59/Add_output_0 + - /language_model/layers.60/Add_1_output_0 + - /language_model/layers.60/Add_2_output_0 + - /language_model/layers.60/Add_3_output_0 + - /language_model/layers.60/Add_output_0 + - /language_model/layers.61/Add_1_output_0 + - /language_model/layers.61/Add_2_output_0 + - /language_model/layers.61/Add_3_output_0 + - /language_model/layers.61/Add_output_0 + - /language_model/norm/Add_output_0 + - /language_model/layers.0/self_attn/Mul_output_0 + - /language_model/layers.2/self_attn/Mul_output_0 + - /language_model/layers.3/self_attn/Mul_output_0 + - /language_model/layers.4/self_attn/Mul_output_0 + - /language_model/layers.5/self_attn/Mul_output_0 + - /language_model/layers.6/self_attn/Mul_output_0 + - /language_model/layers.7/self_attn/Mul_output_0 + - /language_model/layers.8/self_attn/Mul_output_0 + - /language_model/layers.9/self_attn/Mul_output_0 + - /language_model/layers.10/self_attn/Mul_output_0 + - /language_model/layers.11/self_attn/Mul_output_0 + - /language_model/layers.12/self_attn/Mul_output_0 + - /language_model/layers.13/self_attn/Mul_output_0 + - /language_model/layers.14/self_attn/Mul_output_0 + - /language_model/layers.15/self_attn/Mul_output_0 + - /language_model/layers.16/self_attn/Mul_output_0 + - /language_model/layers.17/self_attn/Mul_output_0 + - /language_model/layers.18/self_attn/Mul_output_0 + - /language_model/layers.19/self_attn/Mul_output_0 + - /language_model/layers.20/self_attn/Mul_output_0 + - /language_model/layers.21/self_attn/Mul_output_0 + - /language_model/layers.22/self_attn/Mul_output_0 + - /language_model/layers.23/self_attn/Mul_output_0 + - /language_model/layers.24/self_attn/Mul_output_0 + - /language_model/layers.25/self_attn/Mul_output_0 + - /language_model/layers.26/self_attn/Mul_output_0 + - /language_model/layers.27/self_attn/Mul_output_0 + - /language_model/layers.28/self_attn/Mul_output_0 + - /language_model/layers.29/self_attn/Mul_output_0 + - /language_model/layers.30/self_attn/Mul_output_0 + - /language_model/layers.31/self_attn/Mul_output_0 + - /language_model/layers.32/self_attn/Mul_output_0 + - /language_model/layers.33/self_attn/Mul_output_0 + - /language_model/layers.34/self_attn/Mul_output_0 + - /language_model/layers.35/self_attn/Mul_output_0 + - /language_model/layers.36/self_attn/Mul_output_0 + - /language_model/layers.37/self_attn/Mul_output_0 + - /language_model/layers.38/self_attn/Mul_output_0 + - /language_model/layers.39/self_attn/Mul_output_0 + - /language_model/layers.40/self_attn/Mul_output_0 + - /language_model/layers.41/self_attn/Mul_output_0 + - /language_model/layers.42/self_attn/Mul_output_0 + - /language_model/layers.43/self_attn/Mul_output_0 + - /language_model/layers.44/self_attn/Mul_output_0 + - /language_model/layers.45/self_attn/Mul_output_0 + - /language_model/layers.46/self_attn/Mul_output_0 + - /language_model/layers.47/self_attn/Mul_output_0 + - /language_model/layers.48/self_attn/Mul_output_0 + - /language_model/layers.49/self_attn/Mul_output_0 + - /language_model/layers.50/self_attn/Mul_output_0 + - /language_model/layers.51/self_attn/Mul_output_0 + - /language_model/layers.52/self_attn/Mul_output_0 + - /language_model/layers.53/self_attn/Mul_output_0 + - /language_model/layers.54/self_attn/Mul_output_0 + - /language_model/layers.55/self_attn/Mul_output_0 + - /language_model/layers.56/self_attn/Mul_output_0 + - /language_model/layers.57/self_attn/Mul_output_0 + - /language_model/layers.58/self_attn/Mul_output_0 + - /language_model/layers.59/self_attn/Mul_output_0 + - /language_model/layers.60/self_attn/Mul_output_0 + - /language_model/layers.61/self_attn/Mul_output_0 + - /language_model/layers.0/input_layernorm/CustomRMSNorm_output_0 + - /language_model/layers.0/post_attention_layernorm/CustomRMSNorm_output_0 + - /language_model/layers.0/post_feedforward_layernorm/CustomRMSNorm_output_0 + - /language_model/layers.0/pre_feedforward_layernorm/CustomRMSNorm_output_0 + - /language_model/layers.0/self_attn/k_norm/CustomRMSNorm_output_0 + - /language_model/layers.0/self_attn/q_norm/CustomRMSNorm_output_0 + - /language_model/layers.1/input_layernorm/CustomRMSNorm_output_0 + - /language_model/layers.1/post_attention_layernorm/CustomRMSNorm_output_0 + - /language_model/layers.1/post_feedforward_layernorm/CustomRMSNorm_output_0 + - /language_model/layers.1/pre_feedforward_layernorm/CustomRMSNorm_output_0 + - /language_model/layers.1/self_attn/k_norm/CustomRMSNorm_output_0 + - /language_model/layers.1/self_attn/q_norm/CustomRMSNorm_output_0 + - /language_model/layers.2/input_layernorm/CustomRMSNorm_output_0 + - /language_model/layers.2/post_attention_layernorm/CustomRMSNorm_output_0 + - /language_model/layers.2/post_feedforward_layernorm/CustomRMSNorm_output_0 + - /language_model/layers.2/pre_feedforward_layernorm/CustomRMSNorm_output_0 + - /language_model/layers.2/self_attn/k_norm/CustomRMSNorm_output_0 + - /language_model/layers.2/self_attn/q_norm/CustomRMSNorm_output_0 + - /language_model/layers.3/input_layernorm/CustomRMSNorm_output_0 + - /language_model/layers.3/post_attention_layernorm/CustomRMSNorm_output_0 + - /language_model/layers.3/post_feedforward_layernorm/CustomRMSNorm_output_0 + - /language_model/layers.3/pre_feedforward_layernorm/CustomRMSNorm_output_0 + - /language_model/layers.3/self_attn/k_norm/CustomRMSNorm_output_0 + - /language_model/layers.3/self_attn/q_norm/CustomRMSNorm_output_0 + - /language_model/layers.4/input_layernorm/CustomRMSNorm_output_0 + - /language_model/layers.4/post_attention_layernorm/CustomRMSNorm_output_0 + - /language_model/layers.4/post_feedforward_layernorm/CustomRMSNorm_output_0 + - /language_model/layers.4/pre_feedforward_layernorm/CustomRMSNorm_output_0 + - /language_model/layers.4/self_attn/k_norm/CustomRMSNorm_output_0 + - /language_model/layers.4/self_attn/q_norm/CustomRMSNorm_output_0 + - /language_model/layers.5/input_layernorm/CustomRMSNorm_output_0 + - /language_model/layers.5/post_attention_layernorm/CustomRMSNorm_output_0 + - /language_model/layers.5/post_feedforward_layernorm/CustomRMSNorm_output_0 + - /language_model/layers.5/pre_feedforward_layernorm/CustomRMSNorm_output_0 + - /language_model/layers.5/self_attn/k_norm/CustomRMSNorm_output_0 + - /language_model/layers.5/self_attn/q_norm/CustomRMSNorm_output_0 + - /language_model/layers.6/input_layernorm/CustomRMSNorm_output_0 + - /language_model/layers.6/post_attention_layernorm/CustomRMSNorm_output_0 + - /language_model/layers.6/post_feedforward_layernorm/CustomRMSNorm_output_0 + - /language_model/layers.6/pre_feedforward_layernorm/CustomRMSNorm_output_0 + - /language_model/layers.6/self_attn/k_norm/CustomRMSNorm_output_0 + - /language_model/layers.6/self_attn/q_norm/CustomRMSNorm_output_0 + - /language_model/layers.7/input_layernorm/CustomRMSNorm_output_0 + - /language_model/layers.7/post_attention_layernorm/CustomRMSNorm_output_0 + - /language_model/layers.7/post_feedforward_layernorm/CustomRMSNorm_output_0 + - /language_model/layers.7/pre_feedforward_layernorm/CustomRMSNorm_output_0 + - /language_model/layers.7/self_attn/k_norm/CustomRMSNorm_output_0 + - /language_model/layers.7/self_attn/q_norm/CustomRMSNorm_output_0 + - /language_model/layers.8/input_layernorm/CustomRMSNorm_output_0 + - /language_model/layers.8/post_attention_layernorm/CustomRMSNorm_output_0 + - /language_model/layers.8/post_feedforward_layernorm/CustomRMSNorm_output_0 + - /language_model/layers.8/pre_feedforward_layernorm/CustomRMSNorm_output_0 + - /language_model/layers.8/self_attn/k_norm/CustomRMSNorm_output_0 + - /language_model/layers.8/self_attn/q_norm/CustomRMSNorm_output_0 + - /language_model/layers.9/input_layernorm/CustomRMSNorm_output_0 + - /language_model/layers.9/post_attention_layernorm/CustomRMSNorm_output_0 + - /language_model/layers.9/post_feedforward_layernorm/CustomRMSNorm_output_0 + - /language_model/layers.9/pre_feedforward_layernorm/CustomRMSNorm_output_0 + - /language_model/layers.9/self_attn/k_norm/CustomRMSNorm_output_0 + - /language_model/layers.9/self_attn/q_norm/CustomRMSNorm_output_0 + - /language_model/layers.10/input_layernorm/CustomRMSNorm_output_0 + - /language_model/layers.10/post_attention_layernorm/CustomRMSNorm_output_0 + - /language_model/layers.10/post_feedforward_layernorm/CustomRMSNorm_output_0 + - /language_model/layers.10/pre_feedforward_layernorm/CustomRMSNorm_output_0 + - /language_model/layers.10/self_attn/k_norm/CustomRMSNorm_output_0 + - /language_model/layers.10/self_attn/q_norm/CustomRMSNorm_output_0 + - /language_model/layers.11/input_layernorm/CustomRMSNorm_output_0 + - /language_model/layers.11/post_attention_layernorm/CustomRMSNorm_output_0 + - /language_model/layers.11/post_feedforward_layernorm/CustomRMSNorm_output_0 + - /language_model/layers.11/pre_feedforward_layernorm/CustomRMSNorm_output_0 + - /language_model/layers.11/self_attn/k_norm/CustomRMSNorm_output_0 + - /language_model/layers.11/self_attn/q_norm/CustomRMSNorm_output_0 + - /language_model/layers.12/input_layernorm/CustomRMSNorm_output_0 + - /language_model/layers.12/post_attention_layernorm/CustomRMSNorm_output_0 + - /language_model/layers.12/post_feedforward_layernorm/CustomRMSNorm_output_0 + - /language_model/layers.12/pre_feedforward_layernorm/CustomRMSNorm_output_0 + - /language_model/layers.12/self_attn/k_norm/CustomRMSNorm_output_0 + - /language_model/layers.12/self_attn/q_norm/CustomRMSNorm_output_0 + - /language_model/layers.13/input_layernorm/CustomRMSNorm_output_0 + - /language_model/layers.13/post_attention_layernorm/CustomRMSNorm_output_0 + - /language_model/layers.13/post_feedforward_layernorm/CustomRMSNorm_output_0 + - /language_model/layers.13/pre_feedforward_layernorm/CustomRMSNorm_output_0 + - /language_model/layers.13/self_attn/k_norm/CustomRMSNorm_output_0 + - /language_model/layers.13/self_attn/q_norm/CustomRMSNorm_output_0 + - /language_model/layers.14/input_layernorm/CustomRMSNorm_output_0 + - /language_model/layers.14/post_attention_layernorm/CustomRMSNorm_output_0 + - /language_model/layers.14/post_feedforward_layernorm/CustomRMSNorm_output_0 + - /language_model/layers.14/pre_feedforward_layernorm/CustomRMSNorm_output_0 + - /language_model/layers.14/self_attn/k_norm/CustomRMSNorm_output_0 + - /language_model/layers.14/self_attn/q_norm/CustomRMSNorm_output_0 + - /language_model/layers.15/input_layernorm/CustomRMSNorm_output_0 + - /language_model/layers.15/post_attention_layernorm/CustomRMSNorm_output_0 + - /language_model/layers.15/post_feedforward_layernorm/CustomRMSNorm_output_0 + - /language_model/layers.15/pre_feedforward_layernorm/CustomRMSNorm_output_0 + - /language_model/layers.15/self_attn/k_norm/CustomRMSNorm_output_0 + - /language_model/layers.15/self_attn/q_norm/CustomRMSNorm_output_0 + - /language_model/layers.16/input_layernorm/CustomRMSNorm_output_0 + - /language_model/layers.16/post_attention_layernorm/CustomRMSNorm_output_0 + - /language_model/layers.16/post_feedforward_layernorm/CustomRMSNorm_output_0 + - /language_model/layers.16/pre_feedforward_layernorm/CustomRMSNorm_output_0 + - /language_model/layers.16/self_attn/k_norm/CustomRMSNorm_output_0 + - /language_model/layers.16/self_attn/q_norm/CustomRMSNorm_output_0 + - /language_model/layers.17/input_layernorm/CustomRMSNorm_output_0 + - /language_model/layers.17/post_attention_layernorm/CustomRMSNorm_output_0 + - /language_model/layers.17/post_feedforward_layernorm/CustomRMSNorm_output_0 + - /language_model/layers.17/pre_feedforward_layernorm/CustomRMSNorm_output_0 + - /language_model/layers.17/self_attn/k_norm/CustomRMSNorm_output_0 + - /language_model/layers.17/self_attn/q_norm/CustomRMSNorm_output_0 + - /language_model/layers.18/input_layernorm/CustomRMSNorm_output_0 + - /language_model/layers.18/post_attention_layernorm/CustomRMSNorm_output_0 + - /language_model/layers.18/post_feedforward_layernorm/CustomRMSNorm_output_0 + - /language_model/layers.18/pre_feedforward_layernorm/CustomRMSNorm_output_0 + - /language_model/layers.18/self_attn/k_norm/CustomRMSNorm_output_0 + - /language_model/layers.18/self_attn/q_norm/CustomRMSNorm_output_0 + - /language_model/layers.19/input_layernorm/CustomRMSNorm_output_0 + - /language_model/layers.19/post_attention_layernorm/CustomRMSNorm_output_0 + - /language_model/layers.19/post_feedforward_layernorm/CustomRMSNorm_output_0 + - /language_model/layers.19/pre_feedforward_layernorm/CustomRMSNorm_output_0 + - /language_model/layers.19/self_attn/k_norm/CustomRMSNorm_output_0 + - /language_model/layers.19/self_attn/q_norm/CustomRMSNorm_output_0 + - /language_model/layers.20/input_layernorm/CustomRMSNorm_output_0 + - /language_model/layers.20/post_attention_layernorm/CustomRMSNorm_output_0 + - /language_model/layers.20/post_feedforward_layernorm/CustomRMSNorm_output_0 + - /language_model/layers.20/pre_feedforward_layernorm/CustomRMSNorm_output_0 + - /language_model/layers.20/self_attn/k_norm/CustomRMSNorm_output_0 + - /language_model/layers.20/self_attn/q_norm/CustomRMSNorm_output_0 + - /language_model/layers.21/input_layernorm/CustomRMSNorm_output_0 + - /language_model/layers.21/post_attention_layernorm/CustomRMSNorm_output_0 + - /language_model/layers.21/post_feedforward_layernorm/CustomRMSNorm_output_0 + - /language_model/layers.21/pre_feedforward_layernorm/CustomRMSNorm_output_0 + - /language_model/layers.21/self_attn/k_norm/CustomRMSNorm_output_0 + - /language_model/layers.21/self_attn/q_norm/CustomRMSNorm_output_0 + - /language_model/layers.22/input_layernorm/CustomRMSNorm_output_0 + - /language_model/layers.22/post_attention_layernorm/CustomRMSNorm_output_0 + - /language_model/layers.22/post_feedforward_layernorm/CustomRMSNorm_output_0 + - /language_model/layers.22/pre_feedforward_layernorm/CustomRMSNorm_output_0 + - /language_model/layers.22/self_attn/k_norm/CustomRMSNorm_output_0 + - /language_model/layers.22/self_attn/q_norm/CustomRMSNorm_output_0 + - /language_model/layers.23/input_layernorm/CustomRMSNorm_output_0 + - /language_model/layers.23/post_attention_layernorm/CustomRMSNorm_output_0 + - /language_model/layers.23/post_feedforward_layernorm/CustomRMSNorm_output_0 + - /language_model/layers.23/pre_feedforward_layernorm/CustomRMSNorm_output_0 + - /language_model/layers.23/self_attn/k_norm/CustomRMSNorm_output_0 + - /language_model/layers.23/self_attn/q_norm/CustomRMSNorm_output_0 + - /language_model/layers.24/input_layernorm/CustomRMSNorm_output_0 + - /language_model/layers.24/post_attention_layernorm/CustomRMSNorm_output_0 + - /language_model/layers.24/post_feedforward_layernorm/CustomRMSNorm_output_0 + - /language_model/layers.24/pre_feedforward_layernorm/CustomRMSNorm_output_0 + - /language_model/layers.24/self_attn/k_norm/CustomRMSNorm_output_0 + - /language_model/layers.24/self_attn/q_norm/CustomRMSNorm_output_0 + - /language_model/layers.25/input_layernorm/CustomRMSNorm_output_0 + - /language_model/layers.25/post_attention_layernorm/CustomRMSNorm_output_0 + - /language_model/layers.25/post_feedforward_layernorm/CustomRMSNorm_output_0 + - /language_model/layers.25/pre_feedforward_layernorm/CustomRMSNorm_output_0 + - /language_model/layers.25/self_attn/k_norm/CustomRMSNorm_output_0 + - /language_model/layers.25/self_attn/q_norm/CustomRMSNorm_output_0 + - /language_model/layers.26/input_layernorm/CustomRMSNorm_output_0 + - /language_model/layers.26/post_attention_layernorm/CustomRMSNorm_output_0 + - /language_model/layers.26/post_feedforward_layernorm/CustomRMSNorm_output_0 + - /language_model/layers.26/pre_feedforward_layernorm/CustomRMSNorm_output_0 + - /language_model/layers.26/self_attn/k_norm/CustomRMSNorm_output_0 + - /language_model/layers.26/self_attn/q_norm/CustomRMSNorm_output_0 + - /language_model/layers.27/input_layernorm/CustomRMSNorm_output_0 + - /language_model/layers.27/post_attention_layernorm/CustomRMSNorm_output_0 + - /language_model/layers.27/post_feedforward_layernorm/CustomRMSNorm_output_0 + - /language_model/layers.27/pre_feedforward_layernorm/CustomRMSNorm_output_0 + - /language_model/layers.27/self_attn/k_norm/CustomRMSNorm_output_0 + - /language_model/layers.27/self_attn/q_norm/CustomRMSNorm_output_0 + - /language_model/layers.28/input_layernorm/CustomRMSNorm_output_0 + - /language_model/layers.28/post_attention_layernorm/CustomRMSNorm_output_0 + - /language_model/layers.28/post_feedforward_layernorm/CustomRMSNorm_output_0 + - /language_model/layers.28/pre_feedforward_layernorm/CustomRMSNorm_output_0 + - /language_model/layers.28/self_attn/k_norm/CustomRMSNorm_output_0 + - /language_model/layers.28/self_attn/q_norm/CustomRMSNorm_output_0 + - /language_model/layers.29/input_layernorm/CustomRMSNorm_output_0 + - /language_model/layers.29/post_attention_layernorm/CustomRMSNorm_output_0 + - /language_model/layers.29/post_feedforward_layernorm/CustomRMSNorm_output_0 + - /language_model/layers.29/pre_feedforward_layernorm/CustomRMSNorm_output_0 + - /language_model/layers.29/self_attn/k_norm/CustomRMSNorm_output_0 + - /language_model/layers.29/self_attn/q_norm/CustomRMSNorm_output_0 + - /language_model/layers.30/input_layernorm/CustomRMSNorm_output_0 + - /language_model/layers.30/post_attention_layernorm/CustomRMSNorm_output_0 + - /language_model/layers.30/post_feedforward_layernorm/CustomRMSNorm_output_0 + - /language_model/layers.30/pre_feedforward_layernorm/CustomRMSNorm_output_0 + - /language_model/layers.30/self_attn/k_norm/CustomRMSNorm_output_0 + - /language_model/layers.30/self_attn/q_norm/CustomRMSNorm_output_0 + - /language_model/layers.31/input_layernorm/CustomRMSNorm_output_0 + - /language_model/layers.31/post_attention_layernorm/CustomRMSNorm_output_0 + - /language_model/layers.31/post_feedforward_layernorm/CustomRMSNorm_output_0 + - /language_model/layers.31/pre_feedforward_layernorm/CustomRMSNorm_output_0 + - /language_model/layers.31/self_attn/k_norm/CustomRMSNorm_output_0 + - /language_model/layers.31/self_attn/q_norm/CustomRMSNorm_output_0 + - /language_model/layers.32/input_layernorm/CustomRMSNorm_output_0 + - /language_model/layers.32/post_attention_layernorm/CustomRMSNorm_output_0 + - /language_model/layers.32/post_feedforward_layernorm/CustomRMSNorm_output_0 + - /language_model/layers.32/pre_feedforward_layernorm/CustomRMSNorm_output_0 + - /language_model/layers.32/self_attn/k_norm/CustomRMSNorm_output_0 + - /language_model/layers.32/self_attn/q_norm/CustomRMSNorm_output_0 + - /language_model/layers.33/input_layernorm/CustomRMSNorm_output_0 + - /language_model/layers.33/post_attention_layernorm/CustomRMSNorm_output_0 + - /language_model/layers.33/post_feedforward_layernorm/CustomRMSNorm_output_0 + - /language_model/layers.33/pre_feedforward_layernorm/CustomRMSNorm_output_0 + - /language_model/layers.33/self_attn/k_norm/CustomRMSNorm_output_0 + - /language_model/layers.33/self_attn/q_norm/CustomRMSNorm_output_0 + - /language_model/layers.34/input_layernorm/CustomRMSNorm_output_0 + - /language_model/layers.34/post_attention_layernorm/CustomRMSNorm_output_0 + - /language_model/layers.34/post_feedforward_layernorm/CustomRMSNorm_output_0 + - /language_model/layers.34/pre_feedforward_layernorm/CustomRMSNorm_output_0 + - /language_model/layers.34/self_attn/k_norm/CustomRMSNorm_output_0 + - /language_model/layers.34/self_attn/q_norm/CustomRMSNorm_output_0 + - /language_model/layers.35/input_layernorm/CustomRMSNorm_output_0 + - /language_model/layers.35/post_attention_layernorm/CustomRMSNorm_output_0 + - /language_model/layers.35/post_feedforward_layernorm/CustomRMSNorm_output_0 + - /language_model/layers.35/pre_feedforward_layernorm/CustomRMSNorm_output_0 + - /language_model/layers.35/self_attn/k_norm/CustomRMSNorm_output_0 + - /language_model/layers.35/self_attn/q_norm/CustomRMSNorm_output_0 + - /language_model/layers.36/input_layernorm/CustomRMSNorm_output_0 + - /language_model/layers.36/post_attention_layernorm/CustomRMSNorm_output_0 + - /language_model/layers.36/post_feedforward_layernorm/CustomRMSNorm_output_0 + - /language_model/layers.36/pre_feedforward_layernorm/CustomRMSNorm_output_0 + - /language_model/layers.36/self_attn/k_norm/CustomRMSNorm_output_0 + - /language_model/layers.36/self_attn/q_norm/CustomRMSNorm_output_0 + - /language_model/layers.37/input_layernorm/CustomRMSNorm_output_0 + - /language_model/layers.37/post_attention_layernorm/CustomRMSNorm_output_0 + - /language_model/layers.37/post_feedforward_layernorm/CustomRMSNorm_output_0 + - /language_model/layers.37/pre_feedforward_layernorm/CustomRMSNorm_output_0 + - /language_model/layers.37/self_attn/k_norm/CustomRMSNorm_output_0 + - /language_model/layers.37/self_attn/q_norm/CustomRMSNorm_output_0 + - /language_model/layers.38/input_layernorm/CustomRMSNorm_output_0 + - /language_model/layers.38/post_attention_layernorm/CustomRMSNorm_output_0 + - /language_model/layers.38/post_feedforward_layernorm/CustomRMSNorm_output_0 + - /language_model/layers.38/pre_feedforward_layernorm/CustomRMSNorm_output_0 + - /language_model/layers.38/self_attn/k_norm/CustomRMSNorm_output_0 + - /language_model/layers.38/self_attn/q_norm/CustomRMSNorm_output_0 + - /language_model/layers.39/input_layernorm/CustomRMSNorm_output_0 + - /language_model/layers.39/post_attention_layernorm/CustomRMSNorm_output_0 + - /language_model/layers.39/post_feedforward_layernorm/CustomRMSNorm_output_0 + - /language_model/layers.39/pre_feedforward_layernorm/CustomRMSNorm_output_0 + - /language_model/layers.39/self_attn/k_norm/CustomRMSNorm_output_0 + - /language_model/layers.39/self_attn/q_norm/CustomRMSNorm_output_0 + - /language_model/layers.40/input_layernorm/CustomRMSNorm_output_0 + - /language_model/layers.40/post_attention_layernorm/CustomRMSNorm_output_0 + - /language_model/layers.40/post_feedforward_layernorm/CustomRMSNorm_output_0 + - /language_model/layers.40/pre_feedforward_layernorm/CustomRMSNorm_output_0 + - /language_model/layers.40/self_attn/k_norm/CustomRMSNorm_output_0 + - /language_model/layers.40/self_attn/q_norm/CustomRMSNorm_output_0 + - /language_model/layers.41/input_layernorm/CustomRMSNorm_output_0 + - /language_model/layers.41/post_attention_layernorm/CustomRMSNorm_output_0 + - /language_model/layers.41/post_feedforward_layernorm/CustomRMSNorm_output_0 + - /language_model/layers.41/pre_feedforward_layernorm/CustomRMSNorm_output_0 + - /language_model/layers.41/self_attn/k_norm/CustomRMSNorm_output_0 + - /language_model/layers.41/self_attn/q_norm/CustomRMSNorm_output_0 + - /language_model/layers.42/input_layernorm/CustomRMSNorm_output_0 + - /language_model/layers.42/post_attention_layernorm/CustomRMSNorm_output_0 + - /language_model/layers.42/post_feedforward_layernorm/CustomRMSNorm_output_0 + - /language_model/layers.42/pre_feedforward_layernorm/CustomRMSNorm_output_0 + - /language_model/layers.42/self_attn/k_norm/CustomRMSNorm_output_0 + - /language_model/layers.42/self_attn/q_norm/CustomRMSNorm_output_0 + - /language_model/layers.43/input_layernorm/CustomRMSNorm_output_0 + - /language_model/layers.43/post_attention_layernorm/CustomRMSNorm_output_0 + - /language_model/layers.43/post_feedforward_layernorm/CustomRMSNorm_output_0 + - /language_model/layers.43/pre_feedforward_layernorm/CustomRMSNorm_output_0 + - /language_model/layers.43/self_attn/k_norm/CustomRMSNorm_output_0 + - /language_model/layers.43/self_attn/q_norm/CustomRMSNorm_output_0 + - /language_model/layers.44/input_layernorm/CustomRMSNorm_output_0 + - /language_model/layers.44/post_attention_layernorm/CustomRMSNorm_output_0 + - /language_model/layers.44/post_feedforward_layernorm/CustomRMSNorm_output_0 + - /language_model/layers.44/pre_feedforward_layernorm/CustomRMSNorm_output_0 + - /language_model/layers.44/self_attn/k_norm/CustomRMSNorm_output_0 + - /language_model/layers.44/self_attn/q_norm/CustomRMSNorm_output_0 + - /language_model/layers.45/input_layernorm/CustomRMSNorm_output_0 + - /language_model/layers.45/post_attention_layernorm/CustomRMSNorm_output_0 + - /language_model/layers.45/post_feedforward_layernorm/CustomRMSNorm_output_0 + - /language_model/layers.45/pre_feedforward_layernorm/CustomRMSNorm_output_0 + - /language_model/layers.45/self_attn/k_norm/CustomRMSNorm_output_0 + - /language_model/layers.45/self_attn/q_norm/CustomRMSNorm_output_0 + - /language_model/layers.46/input_layernorm/CustomRMSNorm_output_0 + - /language_model/layers.46/post_attention_layernorm/CustomRMSNorm_output_0 + - /language_model/layers.46/post_feedforward_layernorm/CustomRMSNorm_output_0 + - /language_model/layers.46/pre_feedforward_layernorm/CustomRMSNorm_output_0 + - /language_model/layers.46/self_attn/k_norm/CustomRMSNorm_output_0 + - /language_model/layers.46/self_attn/q_norm/CustomRMSNorm_output_0 + - /language_model/layers.47/input_layernorm/CustomRMSNorm_output_0 + - /language_model/layers.47/post_attention_layernorm/CustomRMSNorm_output_0 + - /language_model/layers.47/post_feedforward_layernorm/CustomRMSNorm_output_0 + - /language_model/layers.47/pre_feedforward_layernorm/CustomRMSNorm_output_0 + - /language_model/layers.47/self_attn/k_norm/CustomRMSNorm_output_0 + - /language_model/layers.47/self_attn/q_norm/CustomRMSNorm_output_0 + - /language_model/layers.48/input_layernorm/CustomRMSNorm_output_0 + - /language_model/layers.48/post_attention_layernorm/CustomRMSNorm_output_0 + - /language_model/layers.48/post_feedforward_layernorm/CustomRMSNorm_output_0 + - /language_model/layers.48/pre_feedforward_layernorm/CustomRMSNorm_output_0 + - /language_model/layers.48/self_attn/k_norm/CustomRMSNorm_output_0 + - /language_model/layers.48/self_attn/q_norm/CustomRMSNorm_output_0 + - /language_model/layers.49/input_layernorm/CustomRMSNorm_output_0 + - /language_model/layers.49/post_attention_layernorm/CustomRMSNorm_output_0 + - /language_model/layers.49/post_feedforward_layernorm/CustomRMSNorm_output_0 + - /language_model/layers.49/pre_feedforward_layernorm/CustomRMSNorm_output_0 + - /language_model/layers.49/self_attn/k_norm/CustomRMSNorm_output_0 + - /language_model/layers.49/self_attn/q_norm/CustomRMSNorm_output_0 + - /language_model/layers.50/input_layernorm/CustomRMSNorm_output_0 + - /language_model/layers.50/post_attention_layernorm/CustomRMSNorm_output_0 + - /language_model/layers.50/post_feedforward_layernorm/CustomRMSNorm_output_0 + - /language_model/layers.50/pre_feedforward_layernorm/CustomRMSNorm_output_0 + - /language_model/layers.50/self_attn/k_norm/CustomRMSNorm_output_0 + - /language_model/layers.50/self_attn/q_norm/CustomRMSNorm_output_0 + - /language_model/layers.51/input_layernorm/CustomRMSNorm_output_0 + - /language_model/layers.51/post_attention_layernorm/CustomRMSNorm_output_0 + - /language_model/layers.51/post_feedforward_layernorm/CustomRMSNorm_output_0 + - /language_model/layers.51/pre_feedforward_layernorm/CustomRMSNorm_output_0 + - /language_model/layers.51/self_attn/k_norm/CustomRMSNorm_output_0 + - /language_model/layers.51/self_attn/q_norm/CustomRMSNorm_output_0 + - /language_model/layers.52/input_layernorm/CustomRMSNorm_output_0 + - /language_model/layers.52/post_attention_layernorm/CustomRMSNorm_output_0 + - /language_model/layers.52/post_feedforward_layernorm/CustomRMSNorm_output_0 + - /language_model/layers.52/pre_feedforward_layernorm/CustomRMSNorm_output_0 + - /language_model/layers.52/self_attn/k_norm/CustomRMSNorm_output_0 + - /language_model/layers.52/self_attn/q_norm/CustomRMSNorm_output_0 + - /language_model/layers.53/input_layernorm/CustomRMSNorm_output_0 + - /language_model/layers.53/post_attention_layernorm/CustomRMSNorm_output_0 + - /language_model/layers.53/post_feedforward_layernorm/CustomRMSNorm_output_0 + - /language_model/layers.53/pre_feedforward_layernorm/CustomRMSNorm_output_0 + - /language_model/layers.53/self_attn/k_norm/CustomRMSNorm_output_0 + - /language_model/layers.53/self_attn/q_norm/CustomRMSNorm_output_0 + - /language_model/layers.54/input_layernorm/CustomRMSNorm_output_0 + - /language_model/layers.54/post_attention_layernorm/CustomRMSNorm_output_0 + - /language_model/layers.54/post_feedforward_layernorm/CustomRMSNorm_output_0 + - /language_model/layers.54/pre_feedforward_layernorm/CustomRMSNorm_output_0 + - /language_model/layers.54/self_attn/k_norm/CustomRMSNorm_output_0 + - /language_model/layers.54/self_attn/q_norm/CustomRMSNorm_output_0 + - /language_model/layers.55/input_layernorm/CustomRMSNorm_output_0 + - /language_model/layers.55/post_attention_layernorm/CustomRMSNorm_output_0 + - /language_model/layers.55/post_feedforward_layernorm/CustomRMSNorm_output_0 + - /language_model/layers.55/pre_feedforward_layernorm/CustomRMSNorm_output_0 + - /language_model/layers.55/self_attn/k_norm/CustomRMSNorm_output_0 + - /language_model/layers.55/self_attn/q_norm/CustomRMSNorm_output_0 + - /language_model/layers.56/input_layernorm/CustomRMSNorm_output_0 + - /language_model/layers.56/post_attention_layernorm/CustomRMSNorm_output_0 + - /language_model/layers.56/post_feedforward_layernorm/CustomRMSNorm_output_0 + - /language_model/layers.56/pre_feedforward_layernorm/CustomRMSNorm_output_0 + - /language_model/layers.56/self_attn/k_norm/CustomRMSNorm_output_0 + - /language_model/layers.56/self_attn/q_norm/CustomRMSNorm_output_0 + - /language_model/layers.57/input_layernorm/CustomRMSNorm_output_0 + - /language_model/layers.57/post_attention_layernorm/CustomRMSNorm_output_0 + - /language_model/layers.57/post_feedforward_layernorm/CustomRMSNorm_output_0 + - /language_model/layers.57/pre_feedforward_layernorm/CustomRMSNorm_output_0 + - /language_model/layers.57/self_attn/k_norm/CustomRMSNorm_output_0 + - /language_model/layers.57/self_attn/q_norm/CustomRMSNorm_output_0 + - /language_model/layers.58/input_layernorm/CustomRMSNorm_output_0 + - /language_model/layers.58/post_attention_layernorm/CustomRMSNorm_output_0 + - /language_model/layers.58/post_feedforward_layernorm/CustomRMSNorm_output_0 + - /language_model/layers.58/pre_feedforward_layernorm/CustomRMSNorm_output_0 + - /language_model/layers.58/self_attn/k_norm/CustomRMSNorm_output_0 + - /language_model/layers.58/self_attn/q_norm/CustomRMSNorm_output_0 + - /language_model/layers.59/input_layernorm/CustomRMSNorm_output_0 + - /language_model/layers.59/post_attention_layernorm/CustomRMSNorm_output_0 + - /language_model/layers.59/post_feedforward_layernorm/CustomRMSNorm_output_0 + - /language_model/layers.59/pre_feedforward_layernorm/CustomRMSNorm_output_0 + - /language_model/layers.59/self_attn/k_norm/CustomRMSNorm_output_0 + - /language_model/layers.59/self_attn/q_norm/CustomRMSNorm_output_0 + - /language_model/layers.60/input_layernorm/CustomRMSNorm_output_0 + - /language_model/layers.60/post_attention_layernorm/CustomRMSNorm_output_0 + - /language_model/layers.60/post_feedforward_layernorm/CustomRMSNorm_output_0 + - /language_model/layers.60/pre_feedforward_layernorm/CustomRMSNorm_output_0 + - /language_model/layers.60/self_attn/k_norm/CustomRMSNorm_output_0 + - /language_model/layers.60/self_attn/q_norm/CustomRMSNorm_output_0 + - /language_model/layers.61/input_layernorm/CustomRMSNorm_output_0 + - /language_model/layers.61/post_attention_layernorm/CustomRMSNorm_output_0 + - /language_model/layers.61/post_feedforward_layernorm/CustomRMSNorm_output_0 + - /language_model/layers.61/pre_feedforward_layernorm/CustomRMSNorm_output_0 + - /language_model/layers.61/self_attn/k_norm/CustomRMSNorm_output_0 + - /language_model/layers.61/self_attn/q_norm/CustomRMSNorm_output_0 + - /language_model/norm/CustomRMSNorm_output_0 + diff --git a/examples/performance/compute_context_length/fp32_nodes_gemma3_4b.yaml b/examples/performance/compute_context_length/fp32_nodes_gemma3_4b.yaml new file mode 100755 index 000000000..1c8aa1c41 --- /dev/null +++ b/examples/performance/compute_context_length/fp32_nodes_gemma3_4b.yaml @@ -0,0 +1,698 @@ +FP32NodeInstanceNames: + + - /language_model/layers.0/Add_output_0 + - /language_model/layers.0/Add_1_output_0 + - /language_model/layers.0/input_layernorm/CustomRMSNorm_output_0 + - /language_model/layers.0/self_attn/q_norm/CustomRMSNorm_output_0 + - /language_model/layers.0/self_attn/k_norm/CustomRMSNorm_output_0 + - /language_model/layers.0/post_attention_layernorm/CustomRMSNorm_output_0 + - /language_model/layers.0/Add_2_output_0 + - /language_model/layers.0/pre_feedforward_layernorm/CustomRMSNorm_output_0 + - /language_model/layers.0/post_feedforward_layernorm/CustomRMSNorm_output_0 + - /language_model/layers.0/Add_3_output_0 + - /language_model/layers.1/Add_output_0 + - /language_model/layers.1/Add_1_output_0 + - /language_model/layers.1/input_layernorm/CustomRMSNorm_output_0 + - /language_model/layers.1/self_attn/q_norm/CustomRMSNorm_output_0 + - /language_model/layers.1/self_attn/k_norm/CustomRMSNorm_output_0 + - /language_model/layers.1/post_attention_layernorm/CustomRMSNorm_output_0 + - /language_model/layers.1/Add_2_output_0 + - /language_model/layers.1/pre_feedforward_layernorm/CustomRMSNorm_output_0 + - /language_model/layers.1/post_feedforward_layernorm/CustomRMSNorm_output_0 + - /language_model/layers.1/Add_3_output_0 + - /language_model/layers.2/Add_output_0 + - /language_model/layers.2/Add_1_output_0 + - /language_model/layers.2/input_layernorm/CustomRMSNorm_output_0 + - /language_model/layers.2/self_attn/q_norm/CustomRMSNorm_output_0 + - /language_model/layers.2/self_attn/k_norm/CustomRMSNorm_output_0 + - /language_model/layers.2/post_attention_layernorm/CustomRMSNorm_output_0 + - /language_model/layers.2/Add_2_output_0 + - /language_model/layers.2/pre_feedforward_layernorm/CustomRMSNorm_output_0 + - /language_model/layers.2/post_feedforward_layernorm/CustomRMSNorm_output_0 + - /language_model/layers.2/Add_3_output_0 + - /language_model/layers.3/Add_output_0 + - /language_model/layers.3/Add_1_output_0 + - /language_model/layers.3/input_layernorm/CustomRMSNorm_output_0 + - /language_model/layers.3/self_attn/q_norm/CustomRMSNorm_output_0 + - /language_model/layers.3/self_attn/k_norm/CustomRMSNorm_output_0 + - /language_model/layers.3/post_attention_layernorm/CustomRMSNorm_output_0 + - /language_model/layers.3/Add_2_output_0 + - /language_model/layers.3/pre_feedforward_layernorm/CustomRMSNorm_output_0 + - /language_model/layers.3/post_feedforward_layernorm/CustomRMSNorm_output_0 + - /language_model/layers.3/Add_3_output_0 + - /language_model/layers.4/Add_output_0 + - /language_model/layers.4/Add_1_output_0 + - /language_model/layers.4/input_layernorm/CustomRMSNorm_output_0 + - /language_model/layers.4/self_attn/q_norm/CustomRMSNorm_output_0 + - /language_model/layers.4/self_attn/k_norm/CustomRMSNorm_output_0 + - /language_model/layers.4/post_attention_layernorm/CustomRMSNorm_output_0 + - /language_model/layers.4/Add_2_output_0 + - /language_model/layers.4/pre_feedforward_layernorm/CustomRMSNorm_output_0 + - /language_model/layers.4/post_feedforward_layernorm/CustomRMSNorm_output_0 + - /language_model/layers.4/Add_3_output_0 + - /language_model/layers.5/input_layernorm/CustomRMSNorm_output_0 + - /language_model/layers.5/self_attn/q_norm/CustomRMSNorm_output_0 + - /language_model/layers.5/self_attn/k_norm/CustomRMSNorm_output_0 + - /language_model/layers.5/post_attention_layernorm/CustomRMSNorm_output_0 + - /language_model/layers.5/Add_output_0 + - /language_model/layers.5/pre_feedforward_layernorm/CustomRMSNorm_output_0 + - /language_model/layers.5/post_feedforward_layernorm/CustomRMSNorm_output_0 + - /language_model/layers.5/Add_1_output_0 + - /language_model/layers.6/Add_output_0 + - /language_model/layers.6/Add_1_output_0 + - /language_model/layers.6/input_layernorm/CustomRMSNorm_output_0 + - /language_model/layers.6/self_attn/q_norm/CustomRMSNorm_output_0 + - /language_model/layers.6/self_attn/k_norm/CustomRMSNorm_output_0 + - /language_model/layers.6/post_attention_layernorm/CustomRMSNorm_output_0 + - /language_model/layers.6/Add_2_output_0 + - /language_model/layers.6/pre_feedforward_layernorm/CustomRMSNorm_output_0 + - /language_model/layers.6/post_feedforward_layernorm/CustomRMSNorm_output_0 + - /language_model/layers.6/Add_3_output_0 + - /language_model/layers.7/Add_output_0 + - /language_model/layers.7/Add_1_output_0 + - /language_model/layers.7/input_layernorm/CustomRMSNorm_output_0 + - /language_model/layers.7/self_attn/q_norm/CustomRMSNorm_output_0 + - /language_model/layers.7/self_attn/k_norm/CustomRMSNorm_output_0 + - /language_model/layers.7/post_attention_layernorm/CustomRMSNorm_output_0 + - /language_model/layers.7/Add_2_output_0 + - /language_model/layers.7/pre_feedforward_layernorm/CustomRMSNorm_output_0 + - /language_model/layers.7/post_feedforward_layernorm/CustomRMSNorm_output_0 + - /language_model/layers.7/Add_3_output_0 + - /language_model/layers.8/Add_output_0 + - /language_model/layers.8/Add_1_output_0 + - /language_model/layers.8/input_layernorm/CustomRMSNorm_output_0 + - /language_model/layers.8/self_attn/q_norm/CustomRMSNorm_output_0 + - /language_model/layers.8/self_attn/k_norm/CustomRMSNorm_output_0 + - /language_model/layers.8/post_attention_layernorm/CustomRMSNorm_output_0 + - /language_model/layers.8/Add_2_output_0 + - /language_model/layers.8/pre_feedforward_layernorm/CustomRMSNorm_output_0 + - /language_model/layers.8/post_feedforward_layernorm/CustomRMSNorm_output_0 + - /language_model/layers.8/Add_3_output_0 + - /language_model/layers.9/Add_output_0 + - /language_model/layers.9/Add_1_output_0 + - /language_model/layers.9/input_layernorm/CustomRMSNorm_output_0 + - /language_model/layers.9/self_attn/q_norm/CustomRMSNorm_output_0 + - /language_model/layers.9/self_attn/k_norm/CustomRMSNorm_output_0 + - /language_model/layers.9/post_attention_layernorm/CustomRMSNorm_output_0 + - /language_model/layers.9/Add_2_output_0 + - /language_model/layers.9/pre_feedforward_layernorm/CustomRMSNorm_output_0 + - /language_model/layers.9/post_feedforward_layernorm/CustomRMSNorm_output_0 + - /language_model/layers.9/Add_3_output_0 + - /language_model/layers.10/Add_output_0 + - /language_model/layers.10/Add_1_output_0 + - /language_model/layers.10/input_layernorm/CustomRMSNorm_output_0 + - /language_model/layers.10/self_attn/q_norm/CustomRMSNorm_output_0 + - /language_model/layers.10/self_attn/k_norm/CustomRMSNorm_output_0 + - /language_model/layers.10/post_attention_layernorm/CustomRMSNorm_output_0 + - /language_model/layers.10/Add_2_output_0 + - /language_model/layers.10/pre_feedforward_layernorm/CustomRMSNorm_output_0 + - /language_model/layers.10/post_feedforward_layernorm/CustomRMSNorm_output_0 + - /language_model/layers.10/Add_3_output_0 + - /language_model/layers.11/input_layernorm/CustomRMSNorm_output_0 + - /language_model/layers.11/self_attn/q_norm/CustomRMSNorm_output_0 + - /language_model/layers.11/self_attn/k_norm/CustomRMSNorm_output_0 + - /language_model/layers.11/post_attention_layernorm/CustomRMSNorm_output_0 + - /language_model/layers.11/Add_output_0 + - /language_model/layers.11/pre_feedforward_layernorm/CustomRMSNorm_output_0 + - /language_model/layers.11/post_feedforward_layernorm/CustomRMSNorm_output_0 + - /language_model/layers.11/Add_1_output_0 + - /language_model/layers.12/Add_output_0 + - /language_model/layers.12/Add_1_output_0 + - /language_model/layers.12/input_layernorm/CustomRMSNorm_output_0 + - /language_model/layers.12/self_attn/q_norm/CustomRMSNorm_output_0 + - /language_model/layers.12/self_attn/k_norm/CustomRMSNorm_output_0 + - /language_model/layers.12/post_attention_layernorm/CustomRMSNorm_output_0 + - /language_model/layers.12/Add_2_output_0 + - /language_model/layers.12/pre_feedforward_layernorm/CustomRMSNorm_output_0 + - /language_model/layers.12/post_feedforward_layernorm/CustomRMSNorm_output_0 + - /language_model/layers.12/Add_3_output_0 + - /language_model/layers.13/Add_output_0 + - /language_model/layers.13/Add_1_output_0 + - /language_model/layers.13/input_layernorm/CustomRMSNorm_output_0 + - /language_model/layers.13/self_attn/q_norm/CustomRMSNorm_output_0 + - /language_model/layers.13/self_attn/k_norm/CustomRMSNorm_output_0 + - /language_model/layers.13/post_attention_layernorm/CustomRMSNorm_output_0 + - /language_model/layers.13/Add_2_output_0 + - /language_model/layers.13/pre_feedforward_layernorm/CustomRMSNorm_output_0 + - /language_model/layers.13/post_feedforward_layernorm/CustomRMSNorm_output_0 + - /language_model/layers.13/Add_3_output_0 + - /language_model/layers.14/Add_output_0 + - /language_model/layers.14/Add_1_output_0 + - /language_model/layers.14/input_layernorm/CustomRMSNorm_output_0 + - /language_model/layers.14/self_attn/q_norm/CustomRMSNorm_output_0 + - /language_model/layers.14/self_attn/k_norm/CustomRMSNorm_output_0 + - /language_model/layers.14/post_attention_layernorm/CustomRMSNorm_output_0 + - /language_model/layers.14/Add_2_output_0 + - /language_model/layers.14/pre_feedforward_layernorm/CustomRMSNorm_output_0 + - /language_model/layers.14/post_feedforward_layernorm/CustomRMSNorm_output_0 + - /language_model/layers.14/Add_3_output_0 + - /language_model/layers.15/Add_output_0 + - /language_model/layers.15/Add_1_output_0 + - /language_model/layers.15/input_layernorm/CustomRMSNorm_output_0 + - /language_model/layers.15/self_attn/q_norm/CustomRMSNorm_output_0 + - /language_model/layers.15/self_attn/k_norm/CustomRMSNorm_output_0 + - /language_model/layers.15/post_attention_layernorm/CustomRMSNorm_output_0 + - /language_model/layers.15/Add_2_output_0 + - /language_model/layers.15/pre_feedforward_layernorm/CustomRMSNorm_output_0 + - /language_model/layers.15/post_feedforward_layernorm/CustomRMSNorm_output_0 + - /language_model/layers.15/Add_3_output_0 + - /language_model/layers.16/Add_output_0 + - /language_model/layers.16/Add_1_output_0 + - /language_model/layers.16/input_layernorm/CustomRMSNorm_output_0 + - /language_model/layers.16/self_attn/q_norm/CustomRMSNorm_output_0 + - /language_model/layers.16/self_attn/k_norm/CustomRMSNorm_output_0 + - /language_model/layers.16/post_attention_layernorm/CustomRMSNorm_output_0 + - /language_model/layers.16/Add_2_output_0 + - /language_model/layers.16/pre_feedforward_layernorm/CustomRMSNorm_output_0 + - /language_model/layers.16/post_feedforward_layernorm/CustomRMSNorm_output_0 + - /language_model/layers.16/Add_3_output_0 + - /language_model/layers.17/input_layernorm/CustomRMSNorm_output_0 + - /language_model/layers.17/self_attn/q_norm/CustomRMSNorm_output_0 + - /language_model/layers.17/self_attn/k_norm/CustomRMSNorm_output_0 + - /language_model/layers.17/post_attention_layernorm/CustomRMSNorm_output_0 + - /language_model/layers.17/Add_output_0 + - /language_model/layers.17/pre_feedforward_layernorm/CustomRMSNorm_output_0 + - /language_model/layers.17/post_feedforward_layernorm/CustomRMSNorm_output_0 + - /language_model/layers.17/Add_1_output_0 + - /language_model/layers.18/Add_output_0 + - /language_model/layers.18/Add_1_output_0 + - /language_model/layers.18/input_layernorm/CustomRMSNorm_output_0 + - /language_model/layers.18/self_attn/q_norm/CustomRMSNorm_output_0 + - /language_model/layers.18/self_attn/k_norm/CustomRMSNorm_output_0 + - /language_model/layers.18/post_attention_layernorm/CustomRMSNorm_output_0 + - /language_model/layers.18/Add_2_output_0 + - /language_model/layers.18/pre_feedforward_layernorm/CustomRMSNorm_output_0 + - /language_model/layers.18/post_feedforward_layernorm/CustomRMSNorm_output_0 + - /language_model/layers.18/Add_3_output_0 + - /language_model/layers.19/Add_output_0 + - /language_model/layers.19/Add_1_output_0 + - /language_model/layers.19/input_layernorm/CustomRMSNorm_output_0 + - /language_model/layers.19/self_attn/q_norm/CustomRMSNorm_output_0 + - /language_model/layers.19/self_attn/k_norm/CustomRMSNorm_output_0 + - /language_model/layers.19/post_attention_layernorm/CustomRMSNorm_output_0 + - /language_model/layers.19/Add_2_output_0 + - /language_model/layers.19/pre_feedforward_layernorm/CustomRMSNorm_output_0 + - /language_model/layers.19/post_feedforward_layernorm/CustomRMSNorm_output_0 + - /language_model/layers.19/Add_3_output_0 + - /language_model/layers.20/Add_output_0 + - /language_model/layers.20/Add_1_output_0 + - /language_model/layers.20/input_layernorm/CustomRMSNorm_output_0 + - /language_model/layers.20/self_attn/q_norm/CustomRMSNorm_output_0 + - /language_model/layers.20/self_attn/k_norm/CustomRMSNorm_output_0 + - /language_model/layers.20/post_attention_layernorm/CustomRMSNorm_output_0 + - /language_model/layers.20/Add_2_output_0 + - /language_model/layers.20/pre_feedforward_layernorm/CustomRMSNorm_output_0 + - /language_model/layers.20/post_feedforward_layernorm/CustomRMSNorm_output_0 + - /language_model/layers.20/Add_3_output_0 + - /language_model/layers.21/Add_output_0 + - /language_model/layers.21/Add_1_output_0 + - /language_model/layers.21/input_layernorm/CustomRMSNorm_output_0 + - /language_model/layers.21/self_attn/q_norm/CustomRMSNorm_output_0 + - /language_model/layers.21/self_attn/k_norm/CustomRMSNorm_output_0 + - /language_model/layers.21/post_attention_layernorm/CustomRMSNorm_output_0 + - /language_model/layers.21/Add_2_output_0 + - /language_model/layers.21/pre_feedforward_layernorm/CustomRMSNorm_output_0 + - /language_model/layers.21/post_feedforward_layernorm/CustomRMSNorm_output_0 + - /language_model/layers.21/Add_3_output_0 + - /language_model/layers.22/Add_output_0 + - /language_model/layers.22/Add_1_output_0 + - /language_model/layers.22/input_layernorm/CustomRMSNorm_output_0 + - /language_model/layers.22/self_attn/q_norm/CustomRMSNorm_output_0 + - /language_model/layers.22/self_attn/k_norm/CustomRMSNorm_output_0 + - /language_model/layers.22/post_attention_layernorm/CustomRMSNorm_output_0 + - /language_model/layers.22/Add_2_output_0 + - /language_model/layers.22/pre_feedforward_layernorm/CustomRMSNorm_output_0 + - /language_model/layers.22/post_feedforward_layernorm/CustomRMSNorm_output_0 + - /language_model/layers.22/Add_3_output_0 + - /language_model/layers.23/input_layernorm/CustomRMSNorm_output_0 + - /language_model/layers.23/self_attn/q_norm/CustomRMSNorm_output_0 + - /language_model/layers.23/self_attn/k_norm/CustomRMSNorm_output_0 + - /language_model/layers.23/post_attention_layernorm/CustomRMSNorm_output_0 + - /language_model/layers.23/Add_output_0 + - /language_model/layers.23/pre_feedforward_layernorm/CustomRMSNorm_output_0 + - /language_model/layers.23/post_feedforward_layernorm/CustomRMSNorm_output_0 + - /language_model/layers.23/Add_1_output_0 + - /language_model/layers.24/Add_output_0 + - /language_model/layers.24/Add_1_output_0 + - /language_model/layers.24/input_layernorm/CustomRMSNorm_output_0 + - /language_model/layers.24/self_attn/q_norm/CustomRMSNorm_output_0 + - /language_model/layers.24/self_attn/k_norm/CustomRMSNorm_output_0 + - /language_model/layers.24/post_attention_layernorm/CustomRMSNorm_output_0 + - /language_model/layers.24/Add_2_output_0 + - /language_model/layers.24/pre_feedforward_layernorm/CustomRMSNorm_output_0 + - /language_model/layers.24/post_feedforward_layernorm/CustomRMSNorm_output_0 + - /language_model/layers.24/Add_3_output_0 + - /language_model/layers.25/Add_output_0 + - /language_model/layers.25/Add_1_output_0 + - /language_model/layers.25/input_layernorm/CustomRMSNorm_output_0 + - /language_model/layers.25/self_attn/q_norm/CustomRMSNorm_output_0 + - /language_model/layers.25/self_attn/k_norm/CustomRMSNorm_output_0 + - /language_model/layers.25/post_attention_layernorm/CustomRMSNorm_output_0 + - /language_model/layers.25/Add_2_output_0 + - /language_model/layers.25/pre_feedforward_layernorm/CustomRMSNorm_output_0 + - /language_model/layers.25/post_feedforward_layernorm/CustomRMSNorm_output_0 + - /language_model/layers.25/Add_3_output_0 + - /language_model/layers.26/Add_output_0 + - /language_model/layers.26/Add_1_output_0 + - /language_model/layers.26/input_layernorm/CustomRMSNorm_output_0 + - /language_model/layers.26/self_attn/q_norm/CustomRMSNorm_output_0 + - /language_model/layers.26/self_attn/k_norm/CustomRMSNorm_output_0 + - /language_model/layers.26/post_attention_layernorm/CustomRMSNorm_output_0 + - /language_model/layers.26/Add_2_output_0 + - /language_model/layers.26/pre_feedforward_layernorm/CustomRMSNorm_output_0 + - /language_model/layers.26/post_feedforward_layernorm/CustomRMSNorm_output_0 + - /language_model/layers.26/Add_3_output_0 + - /language_model/layers.27/Add_output_0 + - /language_model/layers.27/Add_1_output_0 + - /language_model/layers.27/input_layernorm/CustomRMSNorm_output_0 + - /language_model/layers.27/self_attn/q_norm/CustomRMSNorm_output_0 + - /language_model/layers.27/self_attn/k_norm/CustomRMSNorm_output_0 + - /language_model/layers.27/post_attention_layernorm/CustomRMSNorm_output_0 + - /language_model/layers.27/Add_2_output_0 + - /language_model/layers.27/pre_feedforward_layernorm/CustomRMSNorm_output_0 + - /language_model/layers.27/post_feedforward_layernorm/CustomRMSNorm_output_0 + - /language_model/layers.27/Add_3_output_0 + - /language_model/layers.28/Add_output_0 + - /language_model/layers.28/Add_1_output_0 + - /language_model/layers.28/input_layernorm/CustomRMSNorm_output_0 + - /language_model/layers.28/self_attn/q_norm/CustomRMSNorm_output_0 + - /language_model/layers.28/self_attn/k_norm/CustomRMSNorm_output_0 + - /language_model/layers.28/post_attention_layernorm/CustomRMSNorm_output_0 + - /language_model/layers.28/Add_2_output_0 + - /language_model/layers.28/pre_feedforward_layernorm/CustomRMSNorm_output_0 + - /language_model/layers.28/post_feedforward_layernorm/CustomRMSNorm_output_0 + - /language_model/layers.28/Add_3_output_0 + - /language_model/layers.29/input_layernorm/CustomRMSNorm_output_0 + - /language_model/layers.29/self_attn/q_norm/CustomRMSNorm_output_0 + - /language_model/layers.29/self_attn/k_norm/CustomRMSNorm_output_0 + - /language_model/layers.29/post_attention_layernorm/CustomRMSNorm_output_0 + - /language_model/layers.29/Add_output_0 + - /language_model/layers.29/pre_feedforward_layernorm/CustomRMSNorm_output_0 + - /language_model/layers.29/post_feedforward_layernorm/CustomRMSNorm_output_0 + - /language_model/layers.29/Add_1_output_0 + - /language_model/layers.30/Add_output_0 + - /language_model/layers.30/Add_1_output_0 + - /language_model/layers.30/input_layernorm/CustomRMSNorm_output_0 + - /language_model/layers.30/self_attn/q_norm/CustomRMSNorm_output_0 + - /language_model/layers.30/self_attn/k_norm/CustomRMSNorm_output_0 + - /language_model/layers.30/post_attention_layernorm/CustomRMSNorm_output_0 + - /language_model/layers.30/Add_2_output_0 + - /language_model/layers.30/pre_feedforward_layernorm/CustomRMSNorm_output_0 + - /language_model/layers.30/post_feedforward_layernorm/CustomRMSNorm_output_0 + - /language_model/layers.30/Add_3_output_0 + - /language_model/layers.31/Add_output_0 + - /language_model/layers.31/Add_1_output_0 + - /language_model/layers.31/input_layernorm/CustomRMSNorm_output_0 + - /language_model/layers.31/self_attn/q_norm/CustomRMSNorm_output_0 + - /language_model/layers.31/self_attn/k_norm/CustomRMSNorm_output_0 + - /language_model/layers.31/post_attention_layernorm/CustomRMSNorm_output_0 + - /language_model/layers.31/Add_2_output_0 + - /language_model/layers.31/pre_feedforward_layernorm/CustomRMSNorm_output_0 + - /language_model/layers.31/post_feedforward_layernorm/CustomRMSNorm_output_0 + - /language_model/layers.31/Add_3_output_0 + - /language_model/layers.32/Add_output_0 + - /language_model/layers.32/Add_1_output_0 + - /language_model/layers.32/input_layernorm/CustomRMSNorm_output_0 + - /language_model/layers.32/self_attn/q_norm/CustomRMSNorm_output_0 + - /language_model/layers.32/self_attn/k_norm/CustomRMSNorm_output_0 + - /language_model/layers.32/post_attention_layernorm/CustomRMSNorm_output_0 + - /language_model/layers.32/Add_2_output_0 + - /language_model/layers.32/pre_feedforward_layernorm/CustomRMSNorm_output_0 + - /language_model/layers.32/post_feedforward_layernorm/CustomRMSNorm_output_0 + - /language_model/layers.32/Add_3_output_0 + - /language_model/layers.33/Add_output_0 + - /language_model/layers.33/Add_1_output_0 + - /language_model/layers.33/input_layernorm/CustomRMSNorm_output_0 + - /language_model/layers.33/self_attn/q_norm/CustomRMSNorm_output_0 + - /language_model/layers.33/self_attn/k_norm/CustomRMSNorm_output_0 + - /language_model/layers.33/post_attention_layernorm/CustomRMSNorm_output_0 + - /language_model/layers.33/Add_2_output_0 + - /language_model/layers.33/pre_feedforward_layernorm/CustomRMSNorm_output_0 + - /language_model/layers.33/post_feedforward_layernorm/CustomRMSNorm_output_0 + - /language_model/layers.33/Add_3_output_0 + - /language_model/norm/CustomRMSNorm_output_0 + - /language_model/layers.0/self_attn/Mul_output_0 + - /language_model/layers.0/self_attn/Mul_1_output_0 + - /language_model/layers.0/self_attn/Mul_2_output_0 + - /language_model/layers.0/self_attn/Mul_3_output_0 + - /language_model/layers.0/self_attn/Mul_4_output_0 + - /language_model/layers.0/self_attn/Mul_5_output_0 + - /language_model/layers.0/self_attn/Mul_6_output_0 + - /language_model/layers.0/self_attn/Mul_7_output_0 + - /language_model/layers.0/self_attn/Mul_8_output_0 + - /language_model/layers.1/self_attn/Mul_9_output_0 + - /language_model/layers.2/self_attn/Mul_output_0 + - /language_model/layers.2/self_attn/Mul_1_output_0 + - /language_model/layers.2/self_attn/Mul_2_output_0 + - /language_model/layers.2/self_attn/Mul_3_output_0 + - /language_model/layers.2/self_attn/Mul_4_output_0 + - /language_model/layers.2/self_attn/Mul_5_output_0 + - /language_model/layers.2/self_attn/Mul_6_output_0 + - /language_model/layers.2/self_attn/Mul_7_output_0 + - /language_model/layers.2/self_attn/Mul_8_output_0 + - /language_model/layers.2/self_attn/Mul_9_output_0 + - /language_model/layers.3/self_attn/Mul_output_0 + - /language_model/layers.3/self_attn/Mul_1_output_0 + - /language_model/layers.3/self_attn/Mul_2_output_0 + - /language_model/layers.3/self_attn/Mul_3_output_0 + - /language_model/layers.3/self_attn/Mul_4_output_0 + - /language_model/layers.3/self_attn/Mul_5_output_0 + - /language_model/layers.3/self_attn/Mul_6_output_0 + - /language_model/layers.3/self_attn/Mul_7_output_0 + - /language_model/layers.3/self_attn/Mul_8_output_0 + - /language_model/layers.3/self_attn/Mul_9_output_0 + - /language_model/layers.4/self_attn/Mul_output_0 + - /language_model/layers.4/self_attn/Mul_1_output_0 + - /language_model/layers.4/self_attn/Mul_2_output_0 + - /language_model/layers.4/self_attn/Mul_3_output_0 + - /language_model/layers.4/self_attn/Mul_4_output_0 + - /language_model/layers.4/self_attn/Mul_5_output_0 + - /language_model/layers.4/self_attn/Mul_6_output_0 + - /language_model/layers.4/self_attn/Mul_7_output_0 + - /language_model/layers.4/self_attn/Mul_8_output_0 + - /language_model/layers.4/self_attn/Mul_9_output_0 + - /language_model/layers.5/self_attn/Mul_output_0 + - /language_model/layers.5/self_attn/Mul_1_output_0 + - /language_model/layers.5/self_attn/Mul_2_output_0 + - /language_model/layers.5/self_attn/Mul_3_output_0 + - /language_model/layers.5/self_attn/Mul_4_output_0 + - /language_model/layers.5/self_attn/Mul_5_output_0 + - /language_model/layers.5/self_attn/Mul_6_output_0 + - /language_model/layers.5/self_attn/Mul_7_output_0 + - /language_model/layers.5/self_attn/Mul_8_output_0 + - /language_model/layers.5/self_attn/Mul_9_output_0 + - /language_model/layers.6/self_attn/Mul_output_0 + - /language_model/layers.6/self_attn/Mul_1_output_0 + - /language_model/layers.6/self_attn/Mul_2_output_0 + - /language_model/layers.6/self_attn/Mul_3_output_0 + - /language_model/layers.6/self_attn/Mul_4_output_0 + - /language_model/layers.6/self_attn/Mul_5_output_0 + - /language_model/layers.6/self_attn/Mul_6_output_0 + - /language_model/layers.6/self_attn/Mul_7_output_0 + - /language_model/layers.6/self_attn/Mul_8_output_0 + - /language_model/layers.6/self_attn/Mul_9_output_0 + - /language_model/layers.7/self_attn/Mul_output_0 + - /language_model/layers.7/self_attn/Mul_1_output_0 + - /language_model/layers.7/self_attn/Mul_2_output_0 + - /language_model/layers.7/self_attn/Mul_3_output_0 + - /language_model/layers.7/self_attn/Mul_4_output_0 + - /language_model/layers.7/self_attn/Mul_5_output_0 + - /language_model/layers.7/self_attn/Mul_6_output_0 + - /language_model/layers.7/self_attn/Mul_7_output_0 + - /language_model/layers.7/self_attn/Mul_8_output_0 + - /language_model/layers.7/self_attn/Mul_9_output_0 + - /language_model/layers.8/self_attn/Mul_output_0 + - /language_model/layers.8/self_attn/Mul_1_output_0 + - /language_model/layers.8/self_attn/Mul_2_output_0 + - /language_model/layers.8/self_attn/Mul_3_output_0 + - /language_model/layers.8/self_attn/Mul_4_output_0 + - /language_model/layers.8/self_attn/Mul_5_output_0 + - /language_model/layers.8/self_attn/Mul_6_output_0 + - /language_model/layers.8/self_attn/Mul_7_output_0 + - /language_model/layers.8/self_attn/Mul_8_output_0 + - /language_model/layers.8/self_attn/Mul_9_output_0 + - /language_model/layers.9/self_attn/Mul_output_0 + - /language_model/layers.9/self_attn/Mul_1_output_0 + - /language_model/layers.9/self_attn/Mul_2_output_0 + - /language_model/layers.9/self_attn/Mul_3_output_0 + - /language_model/layers.9/self_attn/Mul_4_output_0 + - /language_model/layers.9/self_attn/Mul_5_output_0 + - /language_model/layers.9/self_attn/Mul_6_output_0 + - /language_model/layers.9/self_attn/Mul_7_output_0 + - /language_model/layers.9/self_attn/Mul_8_output_0 + - /language_model/layers.9/self_attn/Mul_9_output_0 + - /language_model/layers.10/self_attn/Mul_output_0 + - /language_model/layers.10/self_attn/Mul_1_output_0 + - /language_model/layers.10/self_attn/Mul_2_output_0 + - /language_model/layers.10/self_attn/Mul_3_output_0 + - /language_model/layers.10/self_attn/Mul_4_output_0 + - /language_model/layers.10/self_attn/Mul_5_output_0 + - /language_model/layers.10/self_attn/Mul_6_output_0 + - /language_model/layers.10/self_attn/Mul_7_output_0 + - /language_model/layers.10/self_attn/Mul_8_output_0 + - /language_model/layers.10/self_attn/Mul_9_output_0 + - /language_model/layers.11/self_attn/Mul_output_0 + - /language_model/layers.11/self_attn/Mul_1_output_0 + - /language_model/layers.11/self_attn/Mul_2_output_0 + - /language_model/layers.11/self_attn/Mul_3_output_0 + - /language_model/layers.11/self_attn/Mul_4_output_0 + - /language_model/layers.11/self_attn/Mul_5_output_0 + - /language_model/layers.11/self_attn/Mul_6_output_0 + - /language_model/layers.11/self_attn/Mul_7_output_0 + - /language_model/layers.11/self_attn/Mul_8_output_0 + - /language_model/layers.11/self_attn/Mul_9_output_0 + - /language_model/layers.12/self_attn/Mul_output_0 + - /language_model/layers.12/self_attn/Mul_1_output_0 + - /language_model/layers.12/self_attn/Mul_2_output_0 + - /language_model/layers.12/self_attn/Mul_3_output_0 + - /language_model/layers.12/self_attn/Mul_4_output_0 + - /language_model/layers.12/self_attn/Mul_5_output_0 + - /language_model/layers.12/self_attn/Mul_6_output_0 + - /language_model/layers.12/self_attn/Mul_7_output_0 + - /language_model/layers.12/self_attn/Mul_8_output_0 + - /language_model/layers.12/self_attn/Mul_9_output_0 + - /language_model/layers.13/self_attn/Mul_output_0 + - /language_model/layers.13/self_attn/Mul_1_output_0 + - /language_model/layers.13/self_attn/Mul_2_output_0 + - /language_model/layers.13/self_attn/Mul_3_output_0 + - /language_model/layers.13/self_attn/Mul_4_output_0 + - /language_model/layers.13/self_attn/Mul_5_output_0 + - /language_model/layers.13/self_attn/Mul_6_output_0 + - /language_model/layers.13/self_attn/Mul_7_output_0 + - /language_model/layers.13/self_attn/Mul_8_output_0 + - /language_model/layers.13/self_attn/Mul_9_output_0 + - /language_model/layers.14/self_attn/Mul_output_0 + - /language_model/layers.14/self_attn/Mul_1_output_0 + - /language_model/layers.14/self_attn/Mul_2_output_0 + - /language_model/layers.14/self_attn/Mul_3_output_0 + - /language_model/layers.14/self_attn/Mul_4_output_0 + - /language_model/layers.14/self_attn/Mul_5_output_0 + - /language_model/layers.14/self_attn/Mul_6_output_0 + - /language_model/layers.14/self_attn/Mul_7_output_0 + - /language_model/layers.14/self_attn/Mul_8_output_0 + - /language_model/layers.14/self_attn/Mul_9_output_0 + - /language_model/layers.15/self_attn/Mul_output_0 + - /language_model/layers.15/self_attn/Mul_1_output_0 + - /language_model/layers.15/self_attn/Mul_2_output_0 + - /language_model/layers.15/self_attn/Mul_3_output_0 + - /language_model/layers.15/self_attn/Mul_4_output_0 + - /language_model/layers.15/self_attn/Mul_5_output_0 + - /language_model/layers.15/self_attn/Mul_6_output_0 + - /language_model/layers.15/self_attn/Mul_7_output_0 + - /language_model/layers.15/self_attn/Mul_8_output_0 + - /language_model/layers.15/self_attn/Mul_9_output_0 + - /language_model/layers.16/self_attn/Mul_output_0 + - /language_model/layers.16/self_attn/Mul_1_output_0 + - /language_model/layers.16/self_attn/Mul_2_output_0 + - /language_model/layers.16/self_attn/Mul_3_output_0 + - /language_model/layers.16/self_attn/Mul_4_output_0 + - /language_model/layers.16/self_attn/Mul_5_output_0 + - /language_model/layers.16/self_attn/Mul_6_output_0 + - /language_model/layers.16/self_attn/Mul_7_output_0 + - /language_model/layers.16/self_attn/Mul_8_output_0 + - /language_model/layers.16/self_attn/Mul_9_output_0 + - /language_model/layers.17/self_attn/Mul_output_0 + - /language_model/layers.17/self_attn/Mul_1_output_0 + - /language_model/layers.17/self_attn/Mul_2_output_0 + - /language_model/layers.17/self_attn/Mul_3_output_0 + - /language_model/layers.17/self_attn/Mul_4_output_0 + - /language_model/layers.17/self_attn/Mul_5_output_0 + - /language_model/layers.17/self_attn/Mul_6_output_0 + - /language_model/layers.17/self_attn/Mul_7_output_0 + - /language_model/layers.17/self_attn/Mul_8_output_0 + - /language_model/layers.17/self_attn/Mul_9_output_0 + - /language_model/layers.18/self_attn/Mul_output_0 + - /language_model/layers.18/self_attn/Mul_1_output_0 + - /language_model/layers.18/self_attn/Mul_2_output_0 + - /language_model/layers.18/self_attn/Mul_3_output_0 + - /language_model/layers.18/self_attn/Mul_4_output_0 + - /language_model/layers.18/self_attn/Mul_5_output_0 + - /language_model/layers.18/self_attn/Mul_6_output_0 + - /language_model/layers.18/self_attn/Mul_7_output_0 + - /language_model/layers.18/self_attn/Mul_8_output_0 + - /language_model/layers.18/self_attn/Mul_9_output_0 + - /language_model/layers.19/self_attn/Mul_output_0 + - /language_model/layers.19/self_attn/Mul_1_output_0 + - /language_model/layers.19/self_attn/Mul_2_output_0 + - /language_model/layers.19/self_attn/Mul_3_output_0 + - /language_model/layers.19/self_attn/Mul_4_output_0 + - /language_model/layers.19/self_attn/Mul_5_output_0 + - /language_model/layers.19/self_attn/Mul_6_output_0 + - /language_model/layers.19/self_attn/Mul_7_output_0 + - /language_model/layers.19/self_attn/Mul_8_output_0 + - /language_model/layers.19/self_attn/Mul_9_output_0 + - /language_model/layers.20/self_attn/Mul_output_0 + - /language_model/layers.20/self_attn/Mul_1_output_0 + - /language_model/layers.20/self_attn/Mul_2_output_0 + - /language_model/layers.20/self_attn/Mul_3_output_0 + - /language_model/layers.20/self_attn/Mul_4_output_0 + - /language_model/layers.20/self_attn/Mul_5_output_0 + - /language_model/layers.20/self_attn/Mul_6_output_0 + - /language_model/layers.20/self_attn/Mul_7_output_0 + - /language_model/layers.20/self_attn/Mul_8_output_0 + - /language_model/layers.20/self_attn/Mul_9_output_0 + - /language_model/layers.21/self_attn/Mul_output_0 + - /language_model/layers.21/self_attn/Mul_1_output_0 + - /language_model/layers.21/self_attn/Mul_2_output_0 + - /language_model/layers.21/self_attn/Mul_3_output_0 + - /language_model/layers.21/self_attn/Mul_4_output_0 + - /language_model/layers.21/self_attn/Mul_5_output_0 + - /language_model/layers.21/self_attn/Mul_6_output_0 + - /language_model/layers.21/self_attn/Mul_7_output_0 + - /language_model/layers.21/self_attn/Mul_8_output_0 + - /language_model/layers.21/self_attn/Mul_9_output_0 + - /language_model/layers.22/self_attn/Mul_output_0 + - /language_model/layers.22/self_attn/Mul_1_output_0 + - /language_model/layers.22/self_attn/Mul_2_output_0 + - /language_model/layers.22/self_attn/Mul_3_output_0 + - /language_model/layers.22/self_attn/Mul_4_output_0 + - /language_model/layers.22/self_attn/Mul_5_output_0 + - /language_model/layers.22/self_attn/Mul_6_output_0 + - /language_model/layers.22/self_attn/Mul_7_output_0 + - /language_model/layers.22/self_attn/Mul_8_output_0 + - /language_model/layers.22/self_attn/Mul_9_output_0 + - /language_model/layers.23/self_attn/Mul_output_0 + - /language_model/layers.23/self_attn/Mul_1_output_0 + - /language_model/layers.23/self_attn/Mul_2_output_0 + - /language_model/layers.23/self_attn/Mul_3_output_0 + - /language_model/layers.23/self_attn/Mul_4_output_0 + - /language_model/layers.23/self_attn/Mul_5_output_0 + - /language_model/layers.23/self_attn/Mul_6_output_0 + - /language_model/layers.23/self_attn/Mul_7_output_0 + - /language_model/layers.23/self_attn/Mul_8_output_0 + - /language_model/layers.23/self_attn/Mul_9_output_0 + - /language_model/layers.24/self_attn/Mul_output_0 + - /language_model/layers.24/self_attn/Mul_1_output_0 + - /language_model/layers.24/self_attn/Mul_2_output_0 + - /language_model/layers.24/self_attn/Mul_3_output_0 + - /language_model/layers.24/self_attn/Mul_4_output_0 + - /language_model/layers.24/self_attn/Mul_5_output_0 + - /language_model/layers.24/self_attn/Mul_6_output_0 + - /language_model/layers.24/self_attn/Mul_7_output_0 + - /language_model/layers.24/self_attn/Mul_8_output_0 + - /language_model/layers.24/self_attn/Mul_9_output_0 + - /language_model/layers.25/self_attn/Mul_output_0 + - /language_model/layers.25/self_attn/Mul_1_output_0 + - /language_model/layers.25/self_attn/Mul_2_output_0 + - /language_model/layers.25/self_attn/Mul_3_output_0 + - /language_model/layers.25/self_attn/Mul_4_output_0 + - /language_model/layers.25/self_attn/Mul_5_output_0 + - /language_model/layers.25/self_attn/Mul_6_output_0 + - /language_model/layers.25/self_attn/Mul_7_output_0 + - /language_model/layers.25/self_attn/Mul_8_output_0 + - /language_model/layers.25/self_attn/Mul_9_output_0 + - /language_model/layers.26/self_attn/Mul_output_0 + - /language_model/layers.26/self_attn/Mul_1_output_0 + - /language_model/layers.26/self_attn/Mul_2_output_0 + - /language_model/layers.26/self_attn/Mul_3_output_0 + - /language_model/layers.26/self_attn/Mul_4_output_0 + - /language_model/layers.26/self_attn/Mul_5_output_0 + - /language_model/layers.26/self_attn/Mul_6_output_0 + - /language_model/layers.26/self_attn/Mul_7_output_0 + - /language_model/layers.26/self_attn/Mul_8_output_0 + - /language_model/layers.26/self_attn/Mul_9_output_0 + - /language_model/layers.27/self_attn/Mul_output_0 + - /language_model/layers.27/self_attn/Mul_1_output_0 + - /language_model/layers.27/self_attn/Mul_2_output_0 + - /language_model/layers.27/self_attn/Mul_3_output_0 + - /language_model/layers.27/self_attn/Mul_4_output_0 + - /language_model/layers.27/self_attn/Mul_5_output_0 + - /language_model/layers.27/self_attn/Mul_6_output_0 + - /language_model/layers.27/self_attn/Mul_7_output_0 + - /language_model/layers.27/self_attn/Mul_8_output_0 + - /language_model/layers.27/self_attn/Mul_9_output_0 + - /language_model/layers.28/self_attn/Mul_output_0 + - /language_model/layers.28/self_attn/Mul_1_output_0 + - /language_model/layers.28/self_attn/Mul_2_output_0 + - /language_model/layers.28/self_attn/Mul_3_output_0 + - /language_model/layers.28/self_attn/Mul_4_output_0 + - /language_model/layers.28/self_attn/Mul_5_output_0 + - /language_model/layers.28/self_attn/Mul_6_output_0 + - /language_model/layers.28/self_attn/Mul_7_output_0 + - /language_model/layers.28/self_attn/Mul_8_output_0 + - /language_model/layers.28/self_attn/Mul_9_output_0 + - /language_model/layers.29/self_attn/Mul_output_0 + - /language_model/layers.29/self_attn/Mul_1_output_0 + - /language_model/layers.29/self_attn/Mul_2_output_0 + - /language_model/layers.29/self_attn/Mul_3_output_0 + - /language_model/layers.29/self_attn/Mul_4_output_0 + - /language_model/layers.29/self_attn/Mul_5_output_0 + - /language_model/layers.29/self_attn/Mul_6_output_0 + - /language_model/layers.29/self_attn/Mul_7_output_0 + - /language_model/layers.29/self_attn/Mul_8_output_0 + - /language_model/layers.29/self_attn/Mul_9_output_0 + - /language_model/layers.30/self_attn/Mul_output_0 + - /language_model/layers.30/self_attn/Mul_1_output_0 + - /language_model/layers.30/self_attn/Mul_2_output_0 + - /language_model/layers.30/self_attn/Mul_3_output_0 + - /language_model/layers.30/self_attn/Mul_4_output_0 + - /language_model/layers.30/self_attn/Mul_5_output_0 + - /language_model/layers.30/self_attn/Mul_6_output_0 + - /language_model/layers.30/self_attn/Mul_7_output_0 + - /language_model/layers.30/self_attn/Mul_8_output_0 + - /language_model/layers.30/self_attn/Mul_9_output_0 + - /language_model/layers.31/self_attn/Mul_output_0 + - /language_model/layers.31/self_attn/Mul_1_output_0 + - /language_model/layers.31/self_attn/Mul_2_output_0 + - /language_model/layers.31/self_attn/Mul_3_output_0 + - /language_model/layers.31/self_attn/Mul_4_output_0 + - /language_model/layers.31/self_attn/Mul_5_output_0 + - /language_model/layers.31/self_attn/Mul_6_output_0 + - /language_model/layers.31/self_attn/Mul_7_output_0 + - /language_model/layers.31/self_attn/Mul_8_output_0 + - /language_model/layers.31/self_attn/Mul_9_output_0 + - /language_model/layers.32/self_attn/Mul_output_0 + - /language_model/layers.32/self_attn/Mul_1_output_0 + - /language_model/layers.32/self_attn/Mul_2_output_0 + - /language_model/layers.32/self_attn/Mul_3_output_0 + - /language_model/layers.32/self_attn/Mul_4_output_0 + - /language_model/layers.32/self_attn/Mul_5_output_0 + - /language_model/layers.32/self_attn/Mul_6_output_0 + - /language_model/layers.32/self_attn/Mul_7_output_0 + - /language_model/layers.32/self_attn/Mul_8_output_0 + - /language_model/layers.32/self_attn/Mul_9_output_0 + - /language_model/layers.33/self_attn/Mul_output_0 + - /language_model/layers.33/self_attn/Mul_1_output_0 + - /language_model/layers.33/self_attn/Mul_2_output_0 + - /language_model/layers.33/self_attn/Mul_3_output_0 + - /language_model/layers.33/self_attn/Mul_4_output_0 + - /language_model/layers.33/self_attn/Mul_5_output_0 + - /language_model/layers.33/self_attn/Mul_6_output_0 + - /language_model/layers.33/self_attn/Mul_7_output_0 + - /language_model/layers.33/self_attn/Mul_8_output_0 + - /language_model/layers.33/self_attn/Mul_9_output_0 + - /language_model/layers.0/self_attn/Softmax_output_0 + - /language_model/layers.1/self_attn/Softmax_output_0 + - /language_model/layers.2/self_attn/Softmax_output_0 + - /language_model/layers.3/self_attn/Softmax_output_0 + - /language_model/layers.4/self_attn/Softmax_output_0 + - /language_model/layers.5/self_attn/Softmax_output_0 + - /language_model/layers.6/self_attn/Softmax_output_0 + - /language_model/layers.7/self_attn/Softmax_output_0 + - /language_model/layers.8/self_attn/Softmax_output_0 + - /language_model/layers.9/self_attn/Softmax_output_0 + - /language_model/layers.10/self_attn/Softmax_output_0 + - /language_model/layers.11/self_attn/Softmax_output_0 + - /language_model/layers.12/self_attn/Softmax_output_0 + - /language_model/layers.13/self_attn/Softmax_output_0 + - /language_model/layers.14/self_attn/Softmax_output_0 + - /language_model/layers.15/self_attn/Softmax_output_0 + - /language_model/layers.16/self_attn/Softmax_output_0 + - /language_model/layers.17/self_attn/Softmax_output_0 + - /language_model/layers.18/self_attn/Softmax_output_0 + - /language_model/layers.19/self_attn/Softmax_output_0 + - /language_model/layers.20/self_attn/Softmax_output_0 + - /language_model/layers.21/self_attn/Softmax_output_0 + - /language_model/layers.22/self_attn/Softmax_output_0 + - /language_model/layers.23/self_attn/Softmax_output_0 + - /language_model/layers.24/self_attn/Softmax_output_0 + - /language_model/layers.25/self_attn/Softmax_output_0 + - /language_model/layers.26/self_attn/Softmax_output_0 + - /language_model/layers.27/self_attn/Softmax_output_0 + - /language_model/layers.28/self_attn/Softmax_output_0 + - /language_model/layers.29/self_attn/Softmax_output_0 + - /language_model/layers.30/self_attn/Softmax_output_0 + - /language_model/layers.31/self_attn/Softmax_output_0 + - /language_model/layers.32/self_attn/Softmax_output_0 + - /language_model/layers.33/self_attn/Softmax_output_0 + diff --git a/examples/performance/compute_context_length/gemma3.py b/examples/performance/compute_context_length/gemma3.py index c31b1748a..d9672b9e3 100644 --- a/examples/performance/compute_context_length/gemma3.py +++ b/examples/performance/compute_context_length/gemma3.py @@ -20,21 +20,27 @@ tokenizer = transformers.AutoTokenizer.from_pretrained(model_id, trust_remote_code=True) processor = AutoProcessor.from_pretrained(model_id) -# pass HF_TOKEN if gated model -# For running the model in single QPC approach use kv_offload=False. For Dual QPC approach use kv_offload=True ### +## Activate Compute-Context-Length (CCL) feature by setting ccl_enabled=True when loading the model with from_pretrained(). +## Use the optional comp_ctx_lengths argument to provide two lists of context lengths for the prefilling and decoding processes. If comp_ctx_lengths=None, the model will run with its default context length. +## - The first list, comp_ctx_lengths_prefill, defines the compute-context-length values for the prefilling process. +## -- The process starts with the first value in the list and gradually increases the context length based on the position_id of the current prompt chunk. +## - The second list, comp_ctx_lengths_decode, defines the compute-context-length values for the decoding process. +## -- During decoding, the model selects an appropriate context length from the list based on the input prompt length and cache index. +## -- It starts from the correct value in the list and increases the context length dynamically when the cache index exceeds the current threshold. + ctx_len = 8192 comp_ctx_lengths_prefill = [3072] comp_ctx_lengths_decode = [4096, ctx_len] +# pass HF_TOKEN if gated model +# For running the model in single QPC approach use kv_offload=False. For Dual QPC approach use kv_offload=True ### qeff_model = QEFFAutoModelForImageTextToText.from_pretrained( model_id, config=config, attn_implementation="eager", kv_offload=True, qaic_config={ - "comp_ctx_lengths_prefill": comp_ctx_lengths_prefill, - "comp_ctx_lengths_decode": comp_ctx_lengths_decode, - "ctx_len": ctx_len, + "ccl_enabled": True, }, ) @@ -54,7 +60,9 @@ aic_enable_depth_first=True, skip_vision=True, mos=1, - node_precision_info="examples/gemma3_example/fp32_nodes_gemma3_27b.yaml", + node_precision_info="examples/performance/compute_context_length/fp32_nodes_gemma3_4b.yaml", + comp_ctx_lengths_prefill=comp_ctx_lengths_prefill, + comp_ctx_lengths_decode=comp_ctx_lengths_decode, ) messages = [ @@ -90,7 +98,9 @@ mxint8_kv_cache=False, aic_enable_depth_first=True, mos=1, - node_precision_info="examples/gemma3_example/fp32_nodes_gemma3_27b.yaml", + node_precision_info="examples/performance/compute_context_length/fp32_nodes_gemma3_4b.yaml", + comp_ctx_lengths_prefill=comp_ctx_lengths_prefill, + comp_ctx_lengths_decode=comp_ctx_lengths_decode, ) ### IMAGE + TEXT ### diff --git a/examples/performance/compute_context_length/gemma3/fp32_nodes_gemma3_27b.yaml b/examples/performance/compute_context_length/gemma3/fp32_nodes_gemma3_27b.yaml new file mode 100755 index 000000000..d2a4bf164 --- /dev/null +++ b/examples/performance/compute_context_length/gemma3/fp32_nodes_gemma3_27b.yaml @@ -0,0 +1,685 @@ +FP32NodeInstanceNames: + + - /language_model/layers.0/Add_1_output_0 + - /language_model/layers.0/Add_2_output_0 + - /language_model/layers.0/Add_3_output_0 + - /language_model/layers.0/Add_output_0 + - /language_model/layers.1/Add_1_output_0 + - /language_model/layers.1/Add_2_output_0 + - /language_model/layers.1/Add_3_output_0 + - /language_model/layers.1/Add_output_0 + - /language_model/layers.2/Add_1_output_0 + - /language_model/layers.2/Add_2_output_0 + - /language_model/layers.2/Add_3_output_0 + - /language_model/layers.2/Add_output_0 + - /language_model/layers.3/Add_1_output_0 + - /language_model/layers.3/Add_2_output_0 + - /language_model/layers.3/Add_3_output_0 + - /language_model/layers.3/Add_output_0 + - /language_model/layers.4/Add_1_output_0 + - /language_model/layers.4/Add_2_output_0 + - /language_model/layers.4/Add_3_output_0 + - /language_model/layers.4/Add_output_0 + - /language_model/layers.5/Add_1_output_0 + - /language_model/layers.5/Add_2_output_0 + - /language_model/layers.5/Add_3_output_0 + - /language_model/layers.5/Add_output_0 + - /language_model/layers.6/Add_1_output_0 + - /language_model/layers.6/Add_2_output_0 + - /language_model/layers.6/Add_3_output_0 + - /language_model/layers.6/Add_output_0 + - /language_model/layers.7/Add_1_output_0 + - /language_model/layers.7/Add_2_output_0 + - /language_model/layers.7/Add_3_output_0 + - /language_model/layers.7/Add_output_0 + - /language_model/layers.8/Add_1_output_0 + - /language_model/layers.8/Add_2_output_0 + - /language_model/layers.8/Add_3_output_0 + - /language_model/layers.8/Add_output_0 + - /language_model/layers.9/Add_1_output_0 + - /language_model/layers.9/Add_2_output_0 + - /language_model/layers.9/Add_3_output_0 + - /language_model/layers.9/Add_output_0 + - /language_model/layers.10/Add_1_output_0 + - /language_model/layers.10/Add_2_output_0 + - /language_model/layers.10/Add_3_output_0 + - /language_model/layers.10/Add_output_0 + - /language_model/layers.11/Add_1_output_0 + - /language_model/layers.11/Add_2_output_0 + - /language_model/layers.11/Add_3_output_0 + - /language_model/layers.11/Add_output_0 + - /language_model/layers.12/Add_1_output_0 + - /language_model/layers.12/Add_2_output_0 + - /language_model/layers.12/Add_3_output_0 + - /language_model/layers.12/Add_output_0 + - /language_model/layers.13/Add_1_output_0 + - /language_model/layers.13/Add_2_output_0 + - /language_model/layers.13/Add_3_output_0 + - /language_model/layers.13/Add_output_0 + - /language_model/layers.14/Add_1_output_0 + - /language_model/layers.14/Add_2_output_0 + - /language_model/layers.14/Add_3_output_0 + - /language_model/layers.14/Add_output_0 + - /language_model/layers.15/Add_1_output_0 + - /language_model/layers.15/Add_2_output_0 + - /language_model/layers.15/Add_3_output_0 + - /language_model/layers.15/Add_output_0 + - /language_model/layers.16/Add_1_output_0 + - /language_model/layers.16/Add_2_output_0 + - /language_model/layers.16/Add_3_output_0 + - /language_model/layers.16/Add_output_0 + - /language_model/layers.17/Add_1_output_0 + - /language_model/layers.17/Add_2_output_0 + - /language_model/layers.17/Add_3_output_0 + - /language_model/layers.17/Add_output_0 + - /language_model/layers.18/Add_1_output_0 + - /language_model/layers.18/Add_2_output_0 + - /language_model/layers.18/Add_3_output_0 + - /language_model/layers.18/Add_output_0 + - /language_model/layers.19/Add_1_output_0 + - /language_model/layers.19/Add_2_output_0 + - /language_model/layers.19/Add_3_output_0 + - /language_model/layers.19/Add_output_0 + - /language_model/layers.20/Add_1_output_0 + - /language_model/layers.20/Add_2_output_0 + - /language_model/layers.20/Add_3_output_0 + - /language_model/layers.20/Add_output_0 + - /language_model/layers.21/Add_1_output_0 + - /language_model/layers.21/Add_2_output_0 + - /language_model/layers.21/Add_3_output_0 + - /language_model/layers.21/Add_output_0 + - /language_model/layers.22/Add_1_output_0 + - /language_model/layers.22/Add_2_output_0 + - /language_model/layers.22/Add_3_output_0 + - /language_model/layers.22/Add_output_0 + - /language_model/layers.23/Add_1_output_0 + - /language_model/layers.23/Add_2_output_0 + - /language_model/layers.23/Add_output_0 + - /language_model/layers.24/Add_1_output_0 + - /language_model/layers.24/Add_2_output_0 + - /language_model/layers.24/Add_3_output_0 + - /language_model/layers.24/Add_output_0 + - /language_model/layers.25/Add_1_output_0 + - /language_model/layers.25/Add_2_output_0 + - /language_model/layers.25/Add_3_output_0 + - /language_model/layers.25/Add_output_0 + - /language_model/layers.26/Add_1_output_0 + - /language_model/layers.26/Add_2_output_0 + - /language_model/layers.26/Add_3_output_0 + - /language_model/layers.26/Add_output_0 + - /language_model/layers.27/Add_1_output_0 + - /language_model/layers.27/Add_2_output_0 + - /language_model/layers.27/Add_3_output_0 + - /language_model/layers.27/Add_output_0 + - /language_model/layers.28/Add_1_output_0 + - /language_model/layers.28/Add_2_output_0 + - /language_model/layers.28/Add_3_output_0 + - /language_model/layers.28/Add_output_0 + - /language_model/layers.29/Add_1_output_0 + - /language_model/layers.29/Add_2_output_0 + - /language_model/layers.29/Add_3_output_0 + - /language_model/layers.29/Add_output_0 + - /language_model/layers.30/Add_1_output_0 + - /language_model/layers.30/Add_2_output_0 + - /language_model/layers.30/Add_3_output_0 + - /language_model/layers.30/Add_output_0 + - /language_model/layers.31/Add_1_output_0 + - /language_model/layers.31/Add_2_output_0 + - /language_model/layers.31/Add_3_output_0 + - /language_model/layers.31/Add_output_0 + - /language_model/layers.32/Add_1_output_0 + - /language_model/layers.32/Add_2_output_0 + - /language_model/layers.32/Add_3_output_0 + - /language_model/layers.32/Add_output_0 + - /language_model/layers.33/Add_1_output_0 + - /language_model/layers.33/Add_2_output_0 + - /language_model/layers.33/Add_3_output_0 + - /language_model/layers.33/Add_output_0 + - /language_model/layers.34/Add_1_output_0 + - /language_model/layers.34/Add_2_output_0 + - /language_model/layers.34/Add_3_output_0 + - /language_model/layers.34/Add_output_0 + - /language_model/layers.35/Add_1_output_0 + - /language_model/layers.35/Add_2_output_0 + - /language_model/layers.35/Add_3_output_0 + - /language_model/layers.35/Add_output_0 + - /language_model/layers.36/Add_1_output_0 + - /language_model/layers.36/Add_2_output_0 + - /language_model/layers.36/Add_3_output_0 + - /language_model/layers.36/Add_output_0 + - /language_model/layers.37/Add_1_output_0 + - /language_model/layers.37/Add_2_output_0 + - /language_model/layers.37/Add_3_output_0 + - /language_model/layers.37/Add_output_0 + - /language_model/layers.38/Add_1_output_0 + - /language_model/layers.38/Add_2_output_0 + - /language_model/layers.38/Add_3_output_0 + - /language_model/layers.38/Add_output_0 + - /language_model/layers.39/Add_1_output_0 + - /language_model/layers.39/Add_2_output_0 + - /language_model/layers.39/Add_3_output_0 + - /language_model/layers.39/Add_output_0 + - /language_model/layers.40/Add_1_output_0 + - /language_model/layers.40/Add_2_output_0 + - /language_model/layers.40/Add_3_output_0 + - /language_model/layers.40/Add_output_0 + - /language_model/layers.41/Add_1_output_0 + - /language_model/layers.41/Add_2_output_0 + - /language_model/layers.41/Add_3_output_0 + - /language_model/layers.41/Add_output_0 + - /language_model/layers.42/Add_1_output_0 + - /language_model/layers.42/Add_2_output_0 + - /language_model/layers.42/Add_3_output_0 + - /language_model/layers.42/Add_output_0 + - /language_model/layers.43/Add_1_output_0 + - /language_model/layers.43/Add_2_output_0 + - /language_model/layers.43/Add_3_output_0 + - /language_model/layers.43/Add_output_0 + - /language_model/layers.44/Add_1_output_0 + - /language_model/layers.44/Add_2_output_0 + - /language_model/layers.44/Add_3_output_0 + - /language_model/layers.44/Add_output_0 + - /language_model/layers.45/Add_1_output_0 + - /language_model/layers.45/Add_2_output_0 + - /language_model/layers.45/Add_3_output_0 + - /language_model/layers.45/Add_output_0 + - /language_model/layers.46/Add_1_output_0 + - /language_model/layers.46/Add_2_output_0 + - /language_model/layers.46/Add_3_output_0 + - /language_model/layers.46/Add_output_0 + - /language_model/layers.47/Add_1_output_0 + - /language_model/layers.47/Add_2_output_0 + - /language_model/layers.47/Add_3_output_0 + - /language_model/layers.47/Add_output_0 + - /language_model/layers.48/Add_1_output_0 + - /language_model/layers.48/Add_2_output_0 + - /language_model/layers.48/Add_3_output_0 + - /language_model/layers.48/Add_output_0 + - /language_model/layers.49/Add_1_output_0 + - /language_model/layers.49/Add_2_output_0 + - /language_model/layers.49/Add_3_output_0 + - /language_model/layers.49/Add_output_0 + - /language_model/layers.50/Add_1_output_0 + - /language_model/layers.50/Add_2_output_0 + - /language_model/layers.50/Add_3_output_0 + - /language_model/layers.50/Add_output_0 + - /language_model/layers.51/Add_1_output_0 + - /language_model/layers.51/Add_2_output_0 + - /language_model/layers.51/Add_3_output_0 + - /language_model/layers.51/Add_output_0 + - /language_model/layers.52/Add_1_output_0 + - /language_model/layers.52/Add_2_output_0 + - /language_model/layers.52/Add_3_output_0 + - /language_model/layers.52/Add_output_0 + - /language_model/layers.53/Add_1_output_0 + - /language_model/layers.53/Add_2_output_0 + - /language_model/layers.53/Add_3_output_0 + - /language_model/layers.53/Add_output_0 + - /language_model/layers.54/Add_1_output_0 + - /language_model/layers.54/Add_2_output_0 + - /language_model/layers.54/Add_3_output_0 + - /language_model/layers.54/Add_output_0 + - /language_model/layers.55/Add_1_output_0 + - /language_model/layers.55/Add_2_output_0 + - /language_model/layers.55/Add_3_output_0 + - /language_model/layers.55/Add_output_0 + - /language_model/layers.56/Add_1_output_0 + - /language_model/layers.56/Add_2_output_0 + - /language_model/layers.56/Add_3_output_0 + - /language_model/layers.56/Add_output_0 + - /language_model/layers.57/Add_1_output_0 + - /language_model/layers.57/Add_2_output_0 + - /language_model/layers.57/Add_3_output_0 + - /language_model/layers.57/Add_output_0 + - /language_model/layers.58/Add_1_output_0 + - /language_model/layers.58/Add_2_output_0 + - /language_model/layers.58/Add_3_output_0 + - /language_model/layers.58/Add_output_0 + - /language_model/layers.59/Add_1_output_0 + - /language_model/layers.59/Add_2_output_0 + - /language_model/layers.59/Add_3_output_0 + - /language_model/layers.59/Add_output_0 + - /language_model/layers.60/Add_1_output_0 + - /language_model/layers.60/Add_2_output_0 + - /language_model/layers.60/Add_3_output_0 + - /language_model/layers.60/Add_output_0 + - /language_model/layers.61/Add_1_output_0 + - /language_model/layers.61/Add_2_output_0 + - /language_model/layers.61/Add_3_output_0 + - /language_model/layers.61/Add_output_0 + - /language_model/norm/Add_output_0 + - /language_model/layers.0/self_attn/Mul_output_0 + - /language_model/layers.2/self_attn/Mul_output_0 + - /language_model/layers.3/self_attn/Mul_output_0 + - /language_model/layers.4/self_attn/Mul_output_0 + - /language_model/layers.5/self_attn/Mul_output_0 + - /language_model/layers.6/self_attn/Mul_output_0 + - /language_model/layers.7/self_attn/Mul_output_0 + - /language_model/layers.8/self_attn/Mul_output_0 + - /language_model/layers.9/self_attn/Mul_output_0 + - /language_model/layers.10/self_attn/Mul_output_0 + - /language_model/layers.11/self_attn/Mul_output_0 + - /language_model/layers.12/self_attn/Mul_output_0 + - /language_model/layers.13/self_attn/Mul_output_0 + - /language_model/layers.14/self_attn/Mul_output_0 + - /language_model/layers.15/self_attn/Mul_output_0 + - /language_model/layers.16/self_attn/Mul_output_0 + - /language_model/layers.17/self_attn/Mul_output_0 + - /language_model/layers.18/self_attn/Mul_output_0 + - /language_model/layers.19/self_attn/Mul_output_0 + - /language_model/layers.20/self_attn/Mul_output_0 + - /language_model/layers.21/self_attn/Mul_output_0 + - /language_model/layers.22/self_attn/Mul_output_0 + - /language_model/layers.23/self_attn/Mul_output_0 + - /language_model/layers.24/self_attn/Mul_output_0 + - /language_model/layers.25/self_attn/Mul_output_0 + - /language_model/layers.26/self_attn/Mul_output_0 + - /language_model/layers.27/self_attn/Mul_output_0 + - /language_model/layers.28/self_attn/Mul_output_0 + - /language_model/layers.29/self_attn/Mul_output_0 + - /language_model/layers.30/self_attn/Mul_output_0 + - /language_model/layers.31/self_attn/Mul_output_0 + - /language_model/layers.32/self_attn/Mul_output_0 + - /language_model/layers.33/self_attn/Mul_output_0 + - /language_model/layers.34/self_attn/Mul_output_0 + - /language_model/layers.35/self_attn/Mul_output_0 + - /language_model/layers.36/self_attn/Mul_output_0 + - /language_model/layers.37/self_attn/Mul_output_0 + - /language_model/layers.38/self_attn/Mul_output_0 + - /language_model/layers.39/self_attn/Mul_output_0 + - /language_model/layers.40/self_attn/Mul_output_0 + - /language_model/layers.41/self_attn/Mul_output_0 + - /language_model/layers.42/self_attn/Mul_output_0 + - /language_model/layers.43/self_attn/Mul_output_0 + - /language_model/layers.44/self_attn/Mul_output_0 + - /language_model/layers.45/self_attn/Mul_output_0 + - /language_model/layers.46/self_attn/Mul_output_0 + - /language_model/layers.47/self_attn/Mul_output_0 + - /language_model/layers.48/self_attn/Mul_output_0 + - /language_model/layers.49/self_attn/Mul_output_0 + - /language_model/layers.50/self_attn/Mul_output_0 + - /language_model/layers.51/self_attn/Mul_output_0 + - /language_model/layers.52/self_attn/Mul_output_0 + - /language_model/layers.53/self_attn/Mul_output_0 + - /language_model/layers.54/self_attn/Mul_output_0 + - /language_model/layers.55/self_attn/Mul_output_0 + - /language_model/layers.56/self_attn/Mul_output_0 + - /language_model/layers.57/self_attn/Mul_output_0 + - /language_model/layers.58/self_attn/Mul_output_0 + - /language_model/layers.59/self_attn/Mul_output_0 + - /language_model/layers.60/self_attn/Mul_output_0 + - /language_model/layers.61/self_attn/Mul_output_0 + - /language_model/layers.0/input_layernorm/CustomRMSNorm_output_0 + - /language_model/layers.0/post_attention_layernorm/CustomRMSNorm_output_0 + - /language_model/layers.0/post_feedforward_layernorm/CustomRMSNorm_output_0 + - /language_model/layers.0/pre_feedforward_layernorm/CustomRMSNorm_output_0 + - /language_model/layers.0/self_attn/k_norm/CustomRMSNorm_output_0 + - /language_model/layers.0/self_attn/q_norm/CustomRMSNorm_output_0 + - /language_model/layers.1/input_layernorm/CustomRMSNorm_output_0 + - /language_model/layers.1/post_attention_layernorm/CustomRMSNorm_output_0 + - /language_model/layers.1/post_feedforward_layernorm/CustomRMSNorm_output_0 + - /language_model/layers.1/pre_feedforward_layernorm/CustomRMSNorm_output_0 + - /language_model/layers.1/self_attn/k_norm/CustomRMSNorm_output_0 + - /language_model/layers.1/self_attn/q_norm/CustomRMSNorm_output_0 + - /language_model/layers.2/input_layernorm/CustomRMSNorm_output_0 + - /language_model/layers.2/post_attention_layernorm/CustomRMSNorm_output_0 + - /language_model/layers.2/post_feedforward_layernorm/CustomRMSNorm_output_0 + - /language_model/layers.2/pre_feedforward_layernorm/CustomRMSNorm_output_0 + - /language_model/layers.2/self_attn/k_norm/CustomRMSNorm_output_0 + - /language_model/layers.2/self_attn/q_norm/CustomRMSNorm_output_0 + - /language_model/layers.3/input_layernorm/CustomRMSNorm_output_0 + - /language_model/layers.3/post_attention_layernorm/CustomRMSNorm_output_0 + - /language_model/layers.3/post_feedforward_layernorm/CustomRMSNorm_output_0 + - /language_model/layers.3/pre_feedforward_layernorm/CustomRMSNorm_output_0 + - /language_model/layers.3/self_attn/k_norm/CustomRMSNorm_output_0 + - /language_model/layers.3/self_attn/q_norm/CustomRMSNorm_output_0 + - /language_model/layers.4/input_layernorm/CustomRMSNorm_output_0 + - /language_model/layers.4/post_attention_layernorm/CustomRMSNorm_output_0 + - /language_model/layers.4/post_feedforward_layernorm/CustomRMSNorm_output_0 + - /language_model/layers.4/pre_feedforward_layernorm/CustomRMSNorm_output_0 + - /language_model/layers.4/self_attn/k_norm/CustomRMSNorm_output_0 + - /language_model/layers.4/self_attn/q_norm/CustomRMSNorm_output_0 + - /language_model/layers.5/input_layernorm/CustomRMSNorm_output_0 + - /language_model/layers.5/post_attention_layernorm/CustomRMSNorm_output_0 + - /language_model/layers.5/post_feedforward_layernorm/CustomRMSNorm_output_0 + - /language_model/layers.5/pre_feedforward_layernorm/CustomRMSNorm_output_0 + - /language_model/layers.5/self_attn/k_norm/CustomRMSNorm_output_0 + - /language_model/layers.5/self_attn/q_norm/CustomRMSNorm_output_0 + - /language_model/layers.6/input_layernorm/CustomRMSNorm_output_0 + - /language_model/layers.6/post_attention_layernorm/CustomRMSNorm_output_0 + - /language_model/layers.6/post_feedforward_layernorm/CustomRMSNorm_output_0 + - /language_model/layers.6/pre_feedforward_layernorm/CustomRMSNorm_output_0 + - /language_model/layers.6/self_attn/k_norm/CustomRMSNorm_output_0 + - /language_model/layers.6/self_attn/q_norm/CustomRMSNorm_output_0 + - /language_model/layers.7/input_layernorm/CustomRMSNorm_output_0 + - /language_model/layers.7/post_attention_layernorm/CustomRMSNorm_output_0 + - /language_model/layers.7/post_feedforward_layernorm/CustomRMSNorm_output_0 + - /language_model/layers.7/pre_feedforward_layernorm/CustomRMSNorm_output_0 + - /language_model/layers.7/self_attn/k_norm/CustomRMSNorm_output_0 + - /language_model/layers.7/self_attn/q_norm/CustomRMSNorm_output_0 + - /language_model/layers.8/input_layernorm/CustomRMSNorm_output_0 + - /language_model/layers.8/post_attention_layernorm/CustomRMSNorm_output_0 + - /language_model/layers.8/post_feedforward_layernorm/CustomRMSNorm_output_0 + - /language_model/layers.8/pre_feedforward_layernorm/CustomRMSNorm_output_0 + - /language_model/layers.8/self_attn/k_norm/CustomRMSNorm_output_0 + - /language_model/layers.8/self_attn/q_norm/CustomRMSNorm_output_0 + - /language_model/layers.9/input_layernorm/CustomRMSNorm_output_0 + - /language_model/layers.9/post_attention_layernorm/CustomRMSNorm_output_0 + - /language_model/layers.9/post_feedforward_layernorm/CustomRMSNorm_output_0 + - /language_model/layers.9/pre_feedforward_layernorm/CustomRMSNorm_output_0 + - /language_model/layers.9/self_attn/k_norm/CustomRMSNorm_output_0 + - /language_model/layers.9/self_attn/q_norm/CustomRMSNorm_output_0 + - /language_model/layers.10/input_layernorm/CustomRMSNorm_output_0 + - /language_model/layers.10/post_attention_layernorm/CustomRMSNorm_output_0 + - /language_model/layers.10/post_feedforward_layernorm/CustomRMSNorm_output_0 + - /language_model/layers.10/pre_feedforward_layernorm/CustomRMSNorm_output_0 + - /language_model/layers.10/self_attn/k_norm/CustomRMSNorm_output_0 + - /language_model/layers.10/self_attn/q_norm/CustomRMSNorm_output_0 + - /language_model/layers.11/input_layernorm/CustomRMSNorm_output_0 + - /language_model/layers.11/post_attention_layernorm/CustomRMSNorm_output_0 + - /language_model/layers.11/post_feedforward_layernorm/CustomRMSNorm_output_0 + - /language_model/layers.11/pre_feedforward_layernorm/CustomRMSNorm_output_0 + - /language_model/layers.11/self_attn/k_norm/CustomRMSNorm_output_0 + - /language_model/layers.11/self_attn/q_norm/CustomRMSNorm_output_0 + - /language_model/layers.12/input_layernorm/CustomRMSNorm_output_0 + - /language_model/layers.12/post_attention_layernorm/CustomRMSNorm_output_0 + - /language_model/layers.12/post_feedforward_layernorm/CustomRMSNorm_output_0 + - /language_model/layers.12/pre_feedforward_layernorm/CustomRMSNorm_output_0 + - /language_model/layers.12/self_attn/k_norm/CustomRMSNorm_output_0 + - /language_model/layers.12/self_attn/q_norm/CustomRMSNorm_output_0 + - /language_model/layers.13/input_layernorm/CustomRMSNorm_output_0 + - /language_model/layers.13/post_attention_layernorm/CustomRMSNorm_output_0 + - /language_model/layers.13/post_feedforward_layernorm/CustomRMSNorm_output_0 + - /language_model/layers.13/pre_feedforward_layernorm/CustomRMSNorm_output_0 + - /language_model/layers.13/self_attn/k_norm/CustomRMSNorm_output_0 + - /language_model/layers.13/self_attn/q_norm/CustomRMSNorm_output_0 + - /language_model/layers.14/input_layernorm/CustomRMSNorm_output_0 + - /language_model/layers.14/post_attention_layernorm/CustomRMSNorm_output_0 + - /language_model/layers.14/post_feedforward_layernorm/CustomRMSNorm_output_0 + - /language_model/layers.14/pre_feedforward_layernorm/CustomRMSNorm_output_0 + - /language_model/layers.14/self_attn/k_norm/CustomRMSNorm_output_0 + - /language_model/layers.14/self_attn/q_norm/CustomRMSNorm_output_0 + - /language_model/layers.15/input_layernorm/CustomRMSNorm_output_0 + - /language_model/layers.15/post_attention_layernorm/CustomRMSNorm_output_0 + - /language_model/layers.15/post_feedforward_layernorm/CustomRMSNorm_output_0 + - /language_model/layers.15/pre_feedforward_layernorm/CustomRMSNorm_output_0 + - /language_model/layers.15/self_attn/k_norm/CustomRMSNorm_output_0 + - /language_model/layers.15/self_attn/q_norm/CustomRMSNorm_output_0 + - /language_model/layers.16/input_layernorm/CustomRMSNorm_output_0 + - /language_model/layers.16/post_attention_layernorm/CustomRMSNorm_output_0 + - /language_model/layers.16/post_feedforward_layernorm/CustomRMSNorm_output_0 + - /language_model/layers.16/pre_feedforward_layernorm/CustomRMSNorm_output_0 + - /language_model/layers.16/self_attn/k_norm/CustomRMSNorm_output_0 + - /language_model/layers.16/self_attn/q_norm/CustomRMSNorm_output_0 + - /language_model/layers.17/input_layernorm/CustomRMSNorm_output_0 + - /language_model/layers.17/post_attention_layernorm/CustomRMSNorm_output_0 + - /language_model/layers.17/post_feedforward_layernorm/CustomRMSNorm_output_0 + - /language_model/layers.17/pre_feedforward_layernorm/CustomRMSNorm_output_0 + - /language_model/layers.17/self_attn/k_norm/CustomRMSNorm_output_0 + - /language_model/layers.17/self_attn/q_norm/CustomRMSNorm_output_0 + - /language_model/layers.18/input_layernorm/CustomRMSNorm_output_0 + - /language_model/layers.18/post_attention_layernorm/CustomRMSNorm_output_0 + - /language_model/layers.18/post_feedforward_layernorm/CustomRMSNorm_output_0 + - /language_model/layers.18/pre_feedforward_layernorm/CustomRMSNorm_output_0 + - /language_model/layers.18/self_attn/k_norm/CustomRMSNorm_output_0 + - /language_model/layers.18/self_attn/q_norm/CustomRMSNorm_output_0 + - /language_model/layers.19/input_layernorm/CustomRMSNorm_output_0 + - /language_model/layers.19/post_attention_layernorm/CustomRMSNorm_output_0 + - /language_model/layers.19/post_feedforward_layernorm/CustomRMSNorm_output_0 + - /language_model/layers.19/pre_feedforward_layernorm/CustomRMSNorm_output_0 + - /language_model/layers.19/self_attn/k_norm/CustomRMSNorm_output_0 + - /language_model/layers.19/self_attn/q_norm/CustomRMSNorm_output_0 + - /language_model/layers.20/input_layernorm/CustomRMSNorm_output_0 + - /language_model/layers.20/post_attention_layernorm/CustomRMSNorm_output_0 + - /language_model/layers.20/post_feedforward_layernorm/CustomRMSNorm_output_0 + - /language_model/layers.20/pre_feedforward_layernorm/CustomRMSNorm_output_0 + - /language_model/layers.20/self_attn/k_norm/CustomRMSNorm_output_0 + - /language_model/layers.20/self_attn/q_norm/CustomRMSNorm_output_0 + - /language_model/layers.21/input_layernorm/CustomRMSNorm_output_0 + - /language_model/layers.21/post_attention_layernorm/CustomRMSNorm_output_0 + - /language_model/layers.21/post_feedforward_layernorm/CustomRMSNorm_output_0 + - /language_model/layers.21/pre_feedforward_layernorm/CustomRMSNorm_output_0 + - /language_model/layers.21/self_attn/k_norm/CustomRMSNorm_output_0 + - /language_model/layers.21/self_attn/q_norm/CustomRMSNorm_output_0 + - /language_model/layers.22/input_layernorm/CustomRMSNorm_output_0 + - /language_model/layers.22/post_attention_layernorm/CustomRMSNorm_output_0 + - /language_model/layers.22/post_feedforward_layernorm/CustomRMSNorm_output_0 + - /language_model/layers.22/pre_feedforward_layernorm/CustomRMSNorm_output_0 + - /language_model/layers.22/self_attn/k_norm/CustomRMSNorm_output_0 + - /language_model/layers.22/self_attn/q_norm/CustomRMSNorm_output_0 + - /language_model/layers.23/input_layernorm/CustomRMSNorm_output_0 + - /language_model/layers.23/post_attention_layernorm/CustomRMSNorm_output_0 + - /language_model/layers.23/post_feedforward_layernorm/CustomRMSNorm_output_0 + - /language_model/layers.23/pre_feedforward_layernorm/CustomRMSNorm_output_0 + - /language_model/layers.23/self_attn/k_norm/CustomRMSNorm_output_0 + - /language_model/layers.23/self_attn/q_norm/CustomRMSNorm_output_0 + - /language_model/layers.24/input_layernorm/CustomRMSNorm_output_0 + - /language_model/layers.24/post_attention_layernorm/CustomRMSNorm_output_0 + - /language_model/layers.24/post_feedforward_layernorm/CustomRMSNorm_output_0 + - /language_model/layers.24/pre_feedforward_layernorm/CustomRMSNorm_output_0 + - /language_model/layers.24/self_attn/k_norm/CustomRMSNorm_output_0 + - /language_model/layers.24/self_attn/q_norm/CustomRMSNorm_output_0 + - /language_model/layers.25/input_layernorm/CustomRMSNorm_output_0 + - /language_model/layers.25/post_attention_layernorm/CustomRMSNorm_output_0 + - /language_model/layers.25/post_feedforward_layernorm/CustomRMSNorm_output_0 + - /language_model/layers.25/pre_feedforward_layernorm/CustomRMSNorm_output_0 + - /language_model/layers.25/self_attn/k_norm/CustomRMSNorm_output_0 + - /language_model/layers.25/self_attn/q_norm/CustomRMSNorm_output_0 + - /language_model/layers.26/input_layernorm/CustomRMSNorm_output_0 + - /language_model/layers.26/post_attention_layernorm/CustomRMSNorm_output_0 + - /language_model/layers.26/post_feedforward_layernorm/CustomRMSNorm_output_0 + - /language_model/layers.26/pre_feedforward_layernorm/CustomRMSNorm_output_0 + - /language_model/layers.26/self_attn/k_norm/CustomRMSNorm_output_0 + - /language_model/layers.26/self_attn/q_norm/CustomRMSNorm_output_0 + - /language_model/layers.27/input_layernorm/CustomRMSNorm_output_0 + - /language_model/layers.27/post_attention_layernorm/CustomRMSNorm_output_0 + - /language_model/layers.27/post_feedforward_layernorm/CustomRMSNorm_output_0 + - /language_model/layers.27/pre_feedforward_layernorm/CustomRMSNorm_output_0 + - /language_model/layers.27/self_attn/k_norm/CustomRMSNorm_output_0 + - /language_model/layers.27/self_attn/q_norm/CustomRMSNorm_output_0 + - /language_model/layers.28/input_layernorm/CustomRMSNorm_output_0 + - /language_model/layers.28/post_attention_layernorm/CustomRMSNorm_output_0 + - /language_model/layers.28/post_feedforward_layernorm/CustomRMSNorm_output_0 + - /language_model/layers.28/pre_feedforward_layernorm/CustomRMSNorm_output_0 + - /language_model/layers.28/self_attn/k_norm/CustomRMSNorm_output_0 + - /language_model/layers.28/self_attn/q_norm/CustomRMSNorm_output_0 + - /language_model/layers.29/input_layernorm/CustomRMSNorm_output_0 + - /language_model/layers.29/post_attention_layernorm/CustomRMSNorm_output_0 + - /language_model/layers.29/post_feedforward_layernorm/CustomRMSNorm_output_0 + - /language_model/layers.29/pre_feedforward_layernorm/CustomRMSNorm_output_0 + - /language_model/layers.29/self_attn/k_norm/CustomRMSNorm_output_0 + - /language_model/layers.29/self_attn/q_norm/CustomRMSNorm_output_0 + - /language_model/layers.30/input_layernorm/CustomRMSNorm_output_0 + - /language_model/layers.30/post_attention_layernorm/CustomRMSNorm_output_0 + - /language_model/layers.30/post_feedforward_layernorm/CustomRMSNorm_output_0 + - /language_model/layers.30/pre_feedforward_layernorm/CustomRMSNorm_output_0 + - /language_model/layers.30/self_attn/k_norm/CustomRMSNorm_output_0 + - /language_model/layers.30/self_attn/q_norm/CustomRMSNorm_output_0 + - /language_model/layers.31/input_layernorm/CustomRMSNorm_output_0 + - /language_model/layers.31/post_attention_layernorm/CustomRMSNorm_output_0 + - /language_model/layers.31/post_feedforward_layernorm/CustomRMSNorm_output_0 + - /language_model/layers.31/pre_feedforward_layernorm/CustomRMSNorm_output_0 + - /language_model/layers.31/self_attn/k_norm/CustomRMSNorm_output_0 + - /language_model/layers.31/self_attn/q_norm/CustomRMSNorm_output_0 + - /language_model/layers.32/input_layernorm/CustomRMSNorm_output_0 + - /language_model/layers.32/post_attention_layernorm/CustomRMSNorm_output_0 + - /language_model/layers.32/post_feedforward_layernorm/CustomRMSNorm_output_0 + - /language_model/layers.32/pre_feedforward_layernorm/CustomRMSNorm_output_0 + - /language_model/layers.32/self_attn/k_norm/CustomRMSNorm_output_0 + - /language_model/layers.32/self_attn/q_norm/CustomRMSNorm_output_0 + - /language_model/layers.33/input_layernorm/CustomRMSNorm_output_0 + - /language_model/layers.33/post_attention_layernorm/CustomRMSNorm_output_0 + - /language_model/layers.33/post_feedforward_layernorm/CustomRMSNorm_output_0 + - /language_model/layers.33/pre_feedforward_layernorm/CustomRMSNorm_output_0 + - /language_model/layers.33/self_attn/k_norm/CustomRMSNorm_output_0 + - /language_model/layers.33/self_attn/q_norm/CustomRMSNorm_output_0 + - /language_model/layers.34/input_layernorm/CustomRMSNorm_output_0 + - /language_model/layers.34/post_attention_layernorm/CustomRMSNorm_output_0 + - /language_model/layers.34/post_feedforward_layernorm/CustomRMSNorm_output_0 + - /language_model/layers.34/pre_feedforward_layernorm/CustomRMSNorm_output_0 + - /language_model/layers.34/self_attn/k_norm/CustomRMSNorm_output_0 + - /language_model/layers.34/self_attn/q_norm/CustomRMSNorm_output_0 + - /language_model/layers.35/input_layernorm/CustomRMSNorm_output_0 + - /language_model/layers.35/post_attention_layernorm/CustomRMSNorm_output_0 + - /language_model/layers.35/post_feedforward_layernorm/CustomRMSNorm_output_0 + - /language_model/layers.35/pre_feedforward_layernorm/CustomRMSNorm_output_0 + - /language_model/layers.35/self_attn/k_norm/CustomRMSNorm_output_0 + - /language_model/layers.35/self_attn/q_norm/CustomRMSNorm_output_0 + - /language_model/layers.36/input_layernorm/CustomRMSNorm_output_0 + - /language_model/layers.36/post_attention_layernorm/CustomRMSNorm_output_0 + - /language_model/layers.36/post_feedforward_layernorm/CustomRMSNorm_output_0 + - /language_model/layers.36/pre_feedforward_layernorm/CustomRMSNorm_output_0 + - /language_model/layers.36/self_attn/k_norm/CustomRMSNorm_output_0 + - /language_model/layers.36/self_attn/q_norm/CustomRMSNorm_output_0 + - /language_model/layers.37/input_layernorm/CustomRMSNorm_output_0 + - /language_model/layers.37/post_attention_layernorm/CustomRMSNorm_output_0 + - /language_model/layers.37/post_feedforward_layernorm/CustomRMSNorm_output_0 + - /language_model/layers.37/pre_feedforward_layernorm/CustomRMSNorm_output_0 + - /language_model/layers.37/self_attn/k_norm/CustomRMSNorm_output_0 + - /language_model/layers.37/self_attn/q_norm/CustomRMSNorm_output_0 + - /language_model/layers.38/input_layernorm/CustomRMSNorm_output_0 + - /language_model/layers.38/post_attention_layernorm/CustomRMSNorm_output_0 + - /language_model/layers.38/post_feedforward_layernorm/CustomRMSNorm_output_0 + - /language_model/layers.38/pre_feedforward_layernorm/CustomRMSNorm_output_0 + - /language_model/layers.38/self_attn/k_norm/CustomRMSNorm_output_0 + - /language_model/layers.38/self_attn/q_norm/CustomRMSNorm_output_0 + - /language_model/layers.39/input_layernorm/CustomRMSNorm_output_0 + - /language_model/layers.39/post_attention_layernorm/CustomRMSNorm_output_0 + - /language_model/layers.39/post_feedforward_layernorm/CustomRMSNorm_output_0 + - /language_model/layers.39/pre_feedforward_layernorm/CustomRMSNorm_output_0 + - /language_model/layers.39/self_attn/k_norm/CustomRMSNorm_output_0 + - /language_model/layers.39/self_attn/q_norm/CustomRMSNorm_output_0 + - /language_model/layers.40/input_layernorm/CustomRMSNorm_output_0 + - /language_model/layers.40/post_attention_layernorm/CustomRMSNorm_output_0 + - /language_model/layers.40/post_feedforward_layernorm/CustomRMSNorm_output_0 + - /language_model/layers.40/pre_feedforward_layernorm/CustomRMSNorm_output_0 + - /language_model/layers.40/self_attn/k_norm/CustomRMSNorm_output_0 + - /language_model/layers.40/self_attn/q_norm/CustomRMSNorm_output_0 + - /language_model/layers.41/input_layernorm/CustomRMSNorm_output_0 + - /language_model/layers.41/post_attention_layernorm/CustomRMSNorm_output_0 + - /language_model/layers.41/post_feedforward_layernorm/CustomRMSNorm_output_0 + - /language_model/layers.41/pre_feedforward_layernorm/CustomRMSNorm_output_0 + - /language_model/layers.41/self_attn/k_norm/CustomRMSNorm_output_0 + - /language_model/layers.41/self_attn/q_norm/CustomRMSNorm_output_0 + - /language_model/layers.42/input_layernorm/CustomRMSNorm_output_0 + - /language_model/layers.42/post_attention_layernorm/CustomRMSNorm_output_0 + - /language_model/layers.42/post_feedforward_layernorm/CustomRMSNorm_output_0 + - /language_model/layers.42/pre_feedforward_layernorm/CustomRMSNorm_output_0 + - /language_model/layers.42/self_attn/k_norm/CustomRMSNorm_output_0 + - /language_model/layers.42/self_attn/q_norm/CustomRMSNorm_output_0 + - /language_model/layers.43/input_layernorm/CustomRMSNorm_output_0 + - /language_model/layers.43/post_attention_layernorm/CustomRMSNorm_output_0 + - /language_model/layers.43/post_feedforward_layernorm/CustomRMSNorm_output_0 + - /language_model/layers.43/pre_feedforward_layernorm/CustomRMSNorm_output_0 + - /language_model/layers.43/self_attn/k_norm/CustomRMSNorm_output_0 + - /language_model/layers.43/self_attn/q_norm/CustomRMSNorm_output_0 + - /language_model/layers.44/input_layernorm/CustomRMSNorm_output_0 + - /language_model/layers.44/post_attention_layernorm/CustomRMSNorm_output_0 + - /language_model/layers.44/post_feedforward_layernorm/CustomRMSNorm_output_0 + - /language_model/layers.44/pre_feedforward_layernorm/CustomRMSNorm_output_0 + - /language_model/layers.44/self_attn/k_norm/CustomRMSNorm_output_0 + - /language_model/layers.44/self_attn/q_norm/CustomRMSNorm_output_0 + - /language_model/layers.45/input_layernorm/CustomRMSNorm_output_0 + - /language_model/layers.45/post_attention_layernorm/CustomRMSNorm_output_0 + - /language_model/layers.45/post_feedforward_layernorm/CustomRMSNorm_output_0 + - /language_model/layers.45/pre_feedforward_layernorm/CustomRMSNorm_output_0 + - /language_model/layers.45/self_attn/k_norm/CustomRMSNorm_output_0 + - /language_model/layers.45/self_attn/q_norm/CustomRMSNorm_output_0 + - /language_model/layers.46/input_layernorm/CustomRMSNorm_output_0 + - /language_model/layers.46/post_attention_layernorm/CustomRMSNorm_output_0 + - /language_model/layers.46/post_feedforward_layernorm/CustomRMSNorm_output_0 + - /language_model/layers.46/pre_feedforward_layernorm/CustomRMSNorm_output_0 + - /language_model/layers.46/self_attn/k_norm/CustomRMSNorm_output_0 + - /language_model/layers.46/self_attn/q_norm/CustomRMSNorm_output_0 + - /language_model/layers.47/input_layernorm/CustomRMSNorm_output_0 + - /language_model/layers.47/post_attention_layernorm/CustomRMSNorm_output_0 + - /language_model/layers.47/post_feedforward_layernorm/CustomRMSNorm_output_0 + - /language_model/layers.47/pre_feedforward_layernorm/CustomRMSNorm_output_0 + - /language_model/layers.47/self_attn/k_norm/CustomRMSNorm_output_0 + - /language_model/layers.47/self_attn/q_norm/CustomRMSNorm_output_0 + - /language_model/layers.48/input_layernorm/CustomRMSNorm_output_0 + - /language_model/layers.48/post_attention_layernorm/CustomRMSNorm_output_0 + - /language_model/layers.48/post_feedforward_layernorm/CustomRMSNorm_output_0 + - /language_model/layers.48/pre_feedforward_layernorm/CustomRMSNorm_output_0 + - /language_model/layers.48/self_attn/k_norm/CustomRMSNorm_output_0 + - /language_model/layers.48/self_attn/q_norm/CustomRMSNorm_output_0 + - /language_model/layers.49/input_layernorm/CustomRMSNorm_output_0 + - /language_model/layers.49/post_attention_layernorm/CustomRMSNorm_output_0 + - /language_model/layers.49/post_feedforward_layernorm/CustomRMSNorm_output_0 + - /language_model/layers.49/pre_feedforward_layernorm/CustomRMSNorm_output_0 + - /language_model/layers.49/self_attn/k_norm/CustomRMSNorm_output_0 + - /language_model/layers.49/self_attn/q_norm/CustomRMSNorm_output_0 + - /language_model/layers.50/input_layernorm/CustomRMSNorm_output_0 + - /language_model/layers.50/post_attention_layernorm/CustomRMSNorm_output_0 + - /language_model/layers.50/post_feedforward_layernorm/CustomRMSNorm_output_0 + - /language_model/layers.50/pre_feedforward_layernorm/CustomRMSNorm_output_0 + - /language_model/layers.50/self_attn/k_norm/CustomRMSNorm_output_0 + - /language_model/layers.50/self_attn/q_norm/CustomRMSNorm_output_0 + - /language_model/layers.51/input_layernorm/CustomRMSNorm_output_0 + - /language_model/layers.51/post_attention_layernorm/CustomRMSNorm_output_0 + - /language_model/layers.51/post_feedforward_layernorm/CustomRMSNorm_output_0 + - /language_model/layers.51/pre_feedforward_layernorm/CustomRMSNorm_output_0 + - /language_model/layers.51/self_attn/k_norm/CustomRMSNorm_output_0 + - /language_model/layers.51/self_attn/q_norm/CustomRMSNorm_output_0 + - /language_model/layers.52/input_layernorm/CustomRMSNorm_output_0 + - /language_model/layers.52/post_attention_layernorm/CustomRMSNorm_output_0 + - /language_model/layers.52/post_feedforward_layernorm/CustomRMSNorm_output_0 + - /language_model/layers.52/pre_feedforward_layernorm/CustomRMSNorm_output_0 + - /language_model/layers.52/self_attn/k_norm/CustomRMSNorm_output_0 + - /language_model/layers.52/self_attn/q_norm/CustomRMSNorm_output_0 + - /language_model/layers.53/input_layernorm/CustomRMSNorm_output_0 + - /language_model/layers.53/post_attention_layernorm/CustomRMSNorm_output_0 + - /language_model/layers.53/post_feedforward_layernorm/CustomRMSNorm_output_0 + - /language_model/layers.53/pre_feedforward_layernorm/CustomRMSNorm_output_0 + - /language_model/layers.53/self_attn/k_norm/CustomRMSNorm_output_0 + - /language_model/layers.53/self_attn/q_norm/CustomRMSNorm_output_0 + - /language_model/layers.54/input_layernorm/CustomRMSNorm_output_0 + - /language_model/layers.54/post_attention_layernorm/CustomRMSNorm_output_0 + - /language_model/layers.54/post_feedforward_layernorm/CustomRMSNorm_output_0 + - /language_model/layers.54/pre_feedforward_layernorm/CustomRMSNorm_output_0 + - /language_model/layers.54/self_attn/k_norm/CustomRMSNorm_output_0 + - /language_model/layers.54/self_attn/q_norm/CustomRMSNorm_output_0 + - /language_model/layers.55/input_layernorm/CustomRMSNorm_output_0 + - /language_model/layers.55/post_attention_layernorm/CustomRMSNorm_output_0 + - /language_model/layers.55/post_feedforward_layernorm/CustomRMSNorm_output_0 + - /language_model/layers.55/pre_feedforward_layernorm/CustomRMSNorm_output_0 + - /language_model/layers.55/self_attn/k_norm/CustomRMSNorm_output_0 + - /language_model/layers.55/self_attn/q_norm/CustomRMSNorm_output_0 + - /language_model/layers.56/input_layernorm/CustomRMSNorm_output_0 + - /language_model/layers.56/post_attention_layernorm/CustomRMSNorm_output_0 + - /language_model/layers.56/post_feedforward_layernorm/CustomRMSNorm_output_0 + - /language_model/layers.56/pre_feedforward_layernorm/CustomRMSNorm_output_0 + - /language_model/layers.56/self_attn/k_norm/CustomRMSNorm_output_0 + - /language_model/layers.56/self_attn/q_norm/CustomRMSNorm_output_0 + - /language_model/layers.57/input_layernorm/CustomRMSNorm_output_0 + - /language_model/layers.57/post_attention_layernorm/CustomRMSNorm_output_0 + - /language_model/layers.57/post_feedforward_layernorm/CustomRMSNorm_output_0 + - /language_model/layers.57/pre_feedforward_layernorm/CustomRMSNorm_output_0 + - /language_model/layers.57/self_attn/k_norm/CustomRMSNorm_output_0 + - /language_model/layers.57/self_attn/q_norm/CustomRMSNorm_output_0 + - /language_model/layers.58/input_layernorm/CustomRMSNorm_output_0 + - /language_model/layers.58/post_attention_layernorm/CustomRMSNorm_output_0 + - /language_model/layers.58/post_feedforward_layernorm/CustomRMSNorm_output_0 + - /language_model/layers.58/pre_feedforward_layernorm/CustomRMSNorm_output_0 + - /language_model/layers.58/self_attn/k_norm/CustomRMSNorm_output_0 + - /language_model/layers.58/self_attn/q_norm/CustomRMSNorm_output_0 + - /language_model/layers.59/input_layernorm/CustomRMSNorm_output_0 + - /language_model/layers.59/post_attention_layernorm/CustomRMSNorm_output_0 + - /language_model/layers.59/post_feedforward_layernorm/CustomRMSNorm_output_0 + - /language_model/layers.59/pre_feedforward_layernorm/CustomRMSNorm_output_0 + - /language_model/layers.59/self_attn/k_norm/CustomRMSNorm_output_0 + - /language_model/layers.59/self_attn/q_norm/CustomRMSNorm_output_0 + - /language_model/layers.60/input_layernorm/CustomRMSNorm_output_0 + - /language_model/layers.60/post_attention_layernorm/CustomRMSNorm_output_0 + - /language_model/layers.60/post_feedforward_layernorm/CustomRMSNorm_output_0 + - /language_model/layers.60/pre_feedforward_layernorm/CustomRMSNorm_output_0 + - /language_model/layers.60/self_attn/k_norm/CustomRMSNorm_output_0 + - /language_model/layers.60/self_attn/q_norm/CustomRMSNorm_output_0 + - /language_model/layers.61/input_layernorm/CustomRMSNorm_output_0 + - /language_model/layers.61/post_attention_layernorm/CustomRMSNorm_output_0 + - /language_model/layers.61/post_feedforward_layernorm/CustomRMSNorm_output_0 + - /language_model/layers.61/pre_feedforward_layernorm/CustomRMSNorm_output_0 + - /language_model/layers.61/self_attn/k_norm/CustomRMSNorm_output_0 + - /language_model/layers.61/self_attn/q_norm/CustomRMSNorm_output_0 + - /language_model/norm/CustomRMSNorm_output_0 + diff --git a/examples/performance/compute_context_length/gemma3/fp32_nodes_gemma3_4b.yaml b/examples/performance/compute_context_length/gemma3/fp32_nodes_gemma3_4b.yaml new file mode 100755 index 000000000..1c8aa1c41 --- /dev/null +++ b/examples/performance/compute_context_length/gemma3/fp32_nodes_gemma3_4b.yaml @@ -0,0 +1,698 @@ +FP32NodeInstanceNames: + + - /language_model/layers.0/Add_output_0 + - /language_model/layers.0/Add_1_output_0 + - /language_model/layers.0/input_layernorm/CustomRMSNorm_output_0 + - /language_model/layers.0/self_attn/q_norm/CustomRMSNorm_output_0 + - /language_model/layers.0/self_attn/k_norm/CustomRMSNorm_output_0 + - /language_model/layers.0/post_attention_layernorm/CustomRMSNorm_output_0 + - /language_model/layers.0/Add_2_output_0 + - /language_model/layers.0/pre_feedforward_layernorm/CustomRMSNorm_output_0 + - /language_model/layers.0/post_feedforward_layernorm/CustomRMSNorm_output_0 + - /language_model/layers.0/Add_3_output_0 + - /language_model/layers.1/Add_output_0 + - /language_model/layers.1/Add_1_output_0 + - /language_model/layers.1/input_layernorm/CustomRMSNorm_output_0 + - /language_model/layers.1/self_attn/q_norm/CustomRMSNorm_output_0 + - /language_model/layers.1/self_attn/k_norm/CustomRMSNorm_output_0 + - /language_model/layers.1/post_attention_layernorm/CustomRMSNorm_output_0 + - /language_model/layers.1/Add_2_output_0 + - /language_model/layers.1/pre_feedforward_layernorm/CustomRMSNorm_output_0 + - /language_model/layers.1/post_feedforward_layernorm/CustomRMSNorm_output_0 + - /language_model/layers.1/Add_3_output_0 + - /language_model/layers.2/Add_output_0 + - /language_model/layers.2/Add_1_output_0 + - /language_model/layers.2/input_layernorm/CustomRMSNorm_output_0 + - /language_model/layers.2/self_attn/q_norm/CustomRMSNorm_output_0 + - /language_model/layers.2/self_attn/k_norm/CustomRMSNorm_output_0 + - /language_model/layers.2/post_attention_layernorm/CustomRMSNorm_output_0 + - /language_model/layers.2/Add_2_output_0 + - /language_model/layers.2/pre_feedforward_layernorm/CustomRMSNorm_output_0 + - /language_model/layers.2/post_feedforward_layernorm/CustomRMSNorm_output_0 + - /language_model/layers.2/Add_3_output_0 + - /language_model/layers.3/Add_output_0 + - /language_model/layers.3/Add_1_output_0 + - /language_model/layers.3/input_layernorm/CustomRMSNorm_output_0 + - /language_model/layers.3/self_attn/q_norm/CustomRMSNorm_output_0 + - /language_model/layers.3/self_attn/k_norm/CustomRMSNorm_output_0 + - /language_model/layers.3/post_attention_layernorm/CustomRMSNorm_output_0 + - /language_model/layers.3/Add_2_output_0 + - /language_model/layers.3/pre_feedforward_layernorm/CustomRMSNorm_output_0 + - /language_model/layers.3/post_feedforward_layernorm/CustomRMSNorm_output_0 + - /language_model/layers.3/Add_3_output_0 + - /language_model/layers.4/Add_output_0 + - /language_model/layers.4/Add_1_output_0 + - /language_model/layers.4/input_layernorm/CustomRMSNorm_output_0 + - /language_model/layers.4/self_attn/q_norm/CustomRMSNorm_output_0 + - /language_model/layers.4/self_attn/k_norm/CustomRMSNorm_output_0 + - /language_model/layers.4/post_attention_layernorm/CustomRMSNorm_output_0 + - /language_model/layers.4/Add_2_output_0 + - /language_model/layers.4/pre_feedforward_layernorm/CustomRMSNorm_output_0 + - /language_model/layers.4/post_feedforward_layernorm/CustomRMSNorm_output_0 + - /language_model/layers.4/Add_3_output_0 + - /language_model/layers.5/input_layernorm/CustomRMSNorm_output_0 + - /language_model/layers.5/self_attn/q_norm/CustomRMSNorm_output_0 + - /language_model/layers.5/self_attn/k_norm/CustomRMSNorm_output_0 + - /language_model/layers.5/post_attention_layernorm/CustomRMSNorm_output_0 + - /language_model/layers.5/Add_output_0 + - /language_model/layers.5/pre_feedforward_layernorm/CustomRMSNorm_output_0 + - /language_model/layers.5/post_feedforward_layernorm/CustomRMSNorm_output_0 + - /language_model/layers.5/Add_1_output_0 + - /language_model/layers.6/Add_output_0 + - /language_model/layers.6/Add_1_output_0 + - /language_model/layers.6/input_layernorm/CustomRMSNorm_output_0 + - /language_model/layers.6/self_attn/q_norm/CustomRMSNorm_output_0 + - /language_model/layers.6/self_attn/k_norm/CustomRMSNorm_output_0 + - /language_model/layers.6/post_attention_layernorm/CustomRMSNorm_output_0 + - /language_model/layers.6/Add_2_output_0 + - /language_model/layers.6/pre_feedforward_layernorm/CustomRMSNorm_output_0 + - /language_model/layers.6/post_feedforward_layernorm/CustomRMSNorm_output_0 + - /language_model/layers.6/Add_3_output_0 + - /language_model/layers.7/Add_output_0 + - /language_model/layers.7/Add_1_output_0 + - /language_model/layers.7/input_layernorm/CustomRMSNorm_output_0 + - /language_model/layers.7/self_attn/q_norm/CustomRMSNorm_output_0 + - /language_model/layers.7/self_attn/k_norm/CustomRMSNorm_output_0 + - /language_model/layers.7/post_attention_layernorm/CustomRMSNorm_output_0 + - /language_model/layers.7/Add_2_output_0 + - /language_model/layers.7/pre_feedforward_layernorm/CustomRMSNorm_output_0 + - /language_model/layers.7/post_feedforward_layernorm/CustomRMSNorm_output_0 + - /language_model/layers.7/Add_3_output_0 + - /language_model/layers.8/Add_output_0 + - /language_model/layers.8/Add_1_output_0 + - /language_model/layers.8/input_layernorm/CustomRMSNorm_output_0 + - /language_model/layers.8/self_attn/q_norm/CustomRMSNorm_output_0 + - /language_model/layers.8/self_attn/k_norm/CustomRMSNorm_output_0 + - /language_model/layers.8/post_attention_layernorm/CustomRMSNorm_output_0 + - /language_model/layers.8/Add_2_output_0 + - /language_model/layers.8/pre_feedforward_layernorm/CustomRMSNorm_output_0 + - /language_model/layers.8/post_feedforward_layernorm/CustomRMSNorm_output_0 + - /language_model/layers.8/Add_3_output_0 + - /language_model/layers.9/Add_output_0 + - /language_model/layers.9/Add_1_output_0 + - /language_model/layers.9/input_layernorm/CustomRMSNorm_output_0 + - /language_model/layers.9/self_attn/q_norm/CustomRMSNorm_output_0 + - /language_model/layers.9/self_attn/k_norm/CustomRMSNorm_output_0 + - /language_model/layers.9/post_attention_layernorm/CustomRMSNorm_output_0 + - /language_model/layers.9/Add_2_output_0 + - /language_model/layers.9/pre_feedforward_layernorm/CustomRMSNorm_output_0 + - /language_model/layers.9/post_feedforward_layernorm/CustomRMSNorm_output_0 + - /language_model/layers.9/Add_3_output_0 + - /language_model/layers.10/Add_output_0 + - /language_model/layers.10/Add_1_output_0 + - /language_model/layers.10/input_layernorm/CustomRMSNorm_output_0 + - /language_model/layers.10/self_attn/q_norm/CustomRMSNorm_output_0 + - /language_model/layers.10/self_attn/k_norm/CustomRMSNorm_output_0 + - /language_model/layers.10/post_attention_layernorm/CustomRMSNorm_output_0 + - /language_model/layers.10/Add_2_output_0 + - /language_model/layers.10/pre_feedforward_layernorm/CustomRMSNorm_output_0 + - /language_model/layers.10/post_feedforward_layernorm/CustomRMSNorm_output_0 + - /language_model/layers.10/Add_3_output_0 + - /language_model/layers.11/input_layernorm/CustomRMSNorm_output_0 + - /language_model/layers.11/self_attn/q_norm/CustomRMSNorm_output_0 + - /language_model/layers.11/self_attn/k_norm/CustomRMSNorm_output_0 + - /language_model/layers.11/post_attention_layernorm/CustomRMSNorm_output_0 + - /language_model/layers.11/Add_output_0 + - /language_model/layers.11/pre_feedforward_layernorm/CustomRMSNorm_output_0 + - /language_model/layers.11/post_feedforward_layernorm/CustomRMSNorm_output_0 + - /language_model/layers.11/Add_1_output_0 + - /language_model/layers.12/Add_output_0 + - /language_model/layers.12/Add_1_output_0 + - /language_model/layers.12/input_layernorm/CustomRMSNorm_output_0 + - /language_model/layers.12/self_attn/q_norm/CustomRMSNorm_output_0 + - /language_model/layers.12/self_attn/k_norm/CustomRMSNorm_output_0 + - /language_model/layers.12/post_attention_layernorm/CustomRMSNorm_output_0 + - /language_model/layers.12/Add_2_output_0 + - /language_model/layers.12/pre_feedforward_layernorm/CustomRMSNorm_output_0 + - /language_model/layers.12/post_feedforward_layernorm/CustomRMSNorm_output_0 + - /language_model/layers.12/Add_3_output_0 + - /language_model/layers.13/Add_output_0 + - /language_model/layers.13/Add_1_output_0 + - /language_model/layers.13/input_layernorm/CustomRMSNorm_output_0 + - /language_model/layers.13/self_attn/q_norm/CustomRMSNorm_output_0 + - /language_model/layers.13/self_attn/k_norm/CustomRMSNorm_output_0 + - /language_model/layers.13/post_attention_layernorm/CustomRMSNorm_output_0 + - /language_model/layers.13/Add_2_output_0 + - /language_model/layers.13/pre_feedforward_layernorm/CustomRMSNorm_output_0 + - /language_model/layers.13/post_feedforward_layernorm/CustomRMSNorm_output_0 + - /language_model/layers.13/Add_3_output_0 + - /language_model/layers.14/Add_output_0 + - /language_model/layers.14/Add_1_output_0 + - /language_model/layers.14/input_layernorm/CustomRMSNorm_output_0 + - /language_model/layers.14/self_attn/q_norm/CustomRMSNorm_output_0 + - /language_model/layers.14/self_attn/k_norm/CustomRMSNorm_output_0 + - /language_model/layers.14/post_attention_layernorm/CustomRMSNorm_output_0 + - /language_model/layers.14/Add_2_output_0 + - /language_model/layers.14/pre_feedforward_layernorm/CustomRMSNorm_output_0 + - /language_model/layers.14/post_feedforward_layernorm/CustomRMSNorm_output_0 + - /language_model/layers.14/Add_3_output_0 + - /language_model/layers.15/Add_output_0 + - /language_model/layers.15/Add_1_output_0 + - /language_model/layers.15/input_layernorm/CustomRMSNorm_output_0 + - /language_model/layers.15/self_attn/q_norm/CustomRMSNorm_output_0 + - /language_model/layers.15/self_attn/k_norm/CustomRMSNorm_output_0 + - /language_model/layers.15/post_attention_layernorm/CustomRMSNorm_output_0 + - /language_model/layers.15/Add_2_output_0 + - /language_model/layers.15/pre_feedforward_layernorm/CustomRMSNorm_output_0 + - /language_model/layers.15/post_feedforward_layernorm/CustomRMSNorm_output_0 + - /language_model/layers.15/Add_3_output_0 + - /language_model/layers.16/Add_output_0 + - /language_model/layers.16/Add_1_output_0 + - /language_model/layers.16/input_layernorm/CustomRMSNorm_output_0 + - /language_model/layers.16/self_attn/q_norm/CustomRMSNorm_output_0 + - /language_model/layers.16/self_attn/k_norm/CustomRMSNorm_output_0 + - /language_model/layers.16/post_attention_layernorm/CustomRMSNorm_output_0 + - /language_model/layers.16/Add_2_output_0 + - /language_model/layers.16/pre_feedforward_layernorm/CustomRMSNorm_output_0 + - /language_model/layers.16/post_feedforward_layernorm/CustomRMSNorm_output_0 + - /language_model/layers.16/Add_3_output_0 + - /language_model/layers.17/input_layernorm/CustomRMSNorm_output_0 + - /language_model/layers.17/self_attn/q_norm/CustomRMSNorm_output_0 + - /language_model/layers.17/self_attn/k_norm/CustomRMSNorm_output_0 + - /language_model/layers.17/post_attention_layernorm/CustomRMSNorm_output_0 + - /language_model/layers.17/Add_output_0 + - /language_model/layers.17/pre_feedforward_layernorm/CustomRMSNorm_output_0 + - /language_model/layers.17/post_feedforward_layernorm/CustomRMSNorm_output_0 + - /language_model/layers.17/Add_1_output_0 + - /language_model/layers.18/Add_output_0 + - /language_model/layers.18/Add_1_output_0 + - /language_model/layers.18/input_layernorm/CustomRMSNorm_output_0 + - /language_model/layers.18/self_attn/q_norm/CustomRMSNorm_output_0 + - /language_model/layers.18/self_attn/k_norm/CustomRMSNorm_output_0 + - /language_model/layers.18/post_attention_layernorm/CustomRMSNorm_output_0 + - /language_model/layers.18/Add_2_output_0 + - /language_model/layers.18/pre_feedforward_layernorm/CustomRMSNorm_output_0 + - /language_model/layers.18/post_feedforward_layernorm/CustomRMSNorm_output_0 + - /language_model/layers.18/Add_3_output_0 + - /language_model/layers.19/Add_output_0 + - /language_model/layers.19/Add_1_output_0 + - /language_model/layers.19/input_layernorm/CustomRMSNorm_output_0 + - /language_model/layers.19/self_attn/q_norm/CustomRMSNorm_output_0 + - /language_model/layers.19/self_attn/k_norm/CustomRMSNorm_output_0 + - /language_model/layers.19/post_attention_layernorm/CustomRMSNorm_output_0 + - /language_model/layers.19/Add_2_output_0 + - /language_model/layers.19/pre_feedforward_layernorm/CustomRMSNorm_output_0 + - /language_model/layers.19/post_feedforward_layernorm/CustomRMSNorm_output_0 + - /language_model/layers.19/Add_3_output_0 + - /language_model/layers.20/Add_output_0 + - /language_model/layers.20/Add_1_output_0 + - /language_model/layers.20/input_layernorm/CustomRMSNorm_output_0 + - /language_model/layers.20/self_attn/q_norm/CustomRMSNorm_output_0 + - /language_model/layers.20/self_attn/k_norm/CustomRMSNorm_output_0 + - /language_model/layers.20/post_attention_layernorm/CustomRMSNorm_output_0 + - /language_model/layers.20/Add_2_output_0 + - /language_model/layers.20/pre_feedforward_layernorm/CustomRMSNorm_output_0 + - /language_model/layers.20/post_feedforward_layernorm/CustomRMSNorm_output_0 + - /language_model/layers.20/Add_3_output_0 + - /language_model/layers.21/Add_output_0 + - /language_model/layers.21/Add_1_output_0 + - /language_model/layers.21/input_layernorm/CustomRMSNorm_output_0 + - /language_model/layers.21/self_attn/q_norm/CustomRMSNorm_output_0 + - /language_model/layers.21/self_attn/k_norm/CustomRMSNorm_output_0 + - /language_model/layers.21/post_attention_layernorm/CustomRMSNorm_output_0 + - /language_model/layers.21/Add_2_output_0 + - /language_model/layers.21/pre_feedforward_layernorm/CustomRMSNorm_output_0 + - /language_model/layers.21/post_feedforward_layernorm/CustomRMSNorm_output_0 + - /language_model/layers.21/Add_3_output_0 + - /language_model/layers.22/Add_output_0 + - /language_model/layers.22/Add_1_output_0 + - /language_model/layers.22/input_layernorm/CustomRMSNorm_output_0 + - /language_model/layers.22/self_attn/q_norm/CustomRMSNorm_output_0 + - /language_model/layers.22/self_attn/k_norm/CustomRMSNorm_output_0 + - /language_model/layers.22/post_attention_layernorm/CustomRMSNorm_output_0 + - /language_model/layers.22/Add_2_output_0 + - /language_model/layers.22/pre_feedforward_layernorm/CustomRMSNorm_output_0 + - /language_model/layers.22/post_feedforward_layernorm/CustomRMSNorm_output_0 + - /language_model/layers.22/Add_3_output_0 + - /language_model/layers.23/input_layernorm/CustomRMSNorm_output_0 + - /language_model/layers.23/self_attn/q_norm/CustomRMSNorm_output_0 + - /language_model/layers.23/self_attn/k_norm/CustomRMSNorm_output_0 + - /language_model/layers.23/post_attention_layernorm/CustomRMSNorm_output_0 + - /language_model/layers.23/Add_output_0 + - /language_model/layers.23/pre_feedforward_layernorm/CustomRMSNorm_output_0 + - /language_model/layers.23/post_feedforward_layernorm/CustomRMSNorm_output_0 + - /language_model/layers.23/Add_1_output_0 + - /language_model/layers.24/Add_output_0 + - /language_model/layers.24/Add_1_output_0 + - /language_model/layers.24/input_layernorm/CustomRMSNorm_output_0 + - /language_model/layers.24/self_attn/q_norm/CustomRMSNorm_output_0 + - /language_model/layers.24/self_attn/k_norm/CustomRMSNorm_output_0 + - /language_model/layers.24/post_attention_layernorm/CustomRMSNorm_output_0 + - /language_model/layers.24/Add_2_output_0 + - /language_model/layers.24/pre_feedforward_layernorm/CustomRMSNorm_output_0 + - /language_model/layers.24/post_feedforward_layernorm/CustomRMSNorm_output_0 + - /language_model/layers.24/Add_3_output_0 + - /language_model/layers.25/Add_output_0 + - /language_model/layers.25/Add_1_output_0 + - /language_model/layers.25/input_layernorm/CustomRMSNorm_output_0 + - /language_model/layers.25/self_attn/q_norm/CustomRMSNorm_output_0 + - /language_model/layers.25/self_attn/k_norm/CustomRMSNorm_output_0 + - /language_model/layers.25/post_attention_layernorm/CustomRMSNorm_output_0 + - /language_model/layers.25/Add_2_output_0 + - /language_model/layers.25/pre_feedforward_layernorm/CustomRMSNorm_output_0 + - /language_model/layers.25/post_feedforward_layernorm/CustomRMSNorm_output_0 + - /language_model/layers.25/Add_3_output_0 + - /language_model/layers.26/Add_output_0 + - /language_model/layers.26/Add_1_output_0 + - /language_model/layers.26/input_layernorm/CustomRMSNorm_output_0 + - /language_model/layers.26/self_attn/q_norm/CustomRMSNorm_output_0 + - /language_model/layers.26/self_attn/k_norm/CustomRMSNorm_output_0 + - /language_model/layers.26/post_attention_layernorm/CustomRMSNorm_output_0 + - /language_model/layers.26/Add_2_output_0 + - /language_model/layers.26/pre_feedforward_layernorm/CustomRMSNorm_output_0 + - /language_model/layers.26/post_feedforward_layernorm/CustomRMSNorm_output_0 + - /language_model/layers.26/Add_3_output_0 + - /language_model/layers.27/Add_output_0 + - /language_model/layers.27/Add_1_output_0 + - /language_model/layers.27/input_layernorm/CustomRMSNorm_output_0 + - /language_model/layers.27/self_attn/q_norm/CustomRMSNorm_output_0 + - /language_model/layers.27/self_attn/k_norm/CustomRMSNorm_output_0 + - /language_model/layers.27/post_attention_layernorm/CustomRMSNorm_output_0 + - /language_model/layers.27/Add_2_output_0 + - /language_model/layers.27/pre_feedforward_layernorm/CustomRMSNorm_output_0 + - /language_model/layers.27/post_feedforward_layernorm/CustomRMSNorm_output_0 + - /language_model/layers.27/Add_3_output_0 + - /language_model/layers.28/Add_output_0 + - /language_model/layers.28/Add_1_output_0 + - /language_model/layers.28/input_layernorm/CustomRMSNorm_output_0 + - /language_model/layers.28/self_attn/q_norm/CustomRMSNorm_output_0 + - /language_model/layers.28/self_attn/k_norm/CustomRMSNorm_output_0 + - /language_model/layers.28/post_attention_layernorm/CustomRMSNorm_output_0 + - /language_model/layers.28/Add_2_output_0 + - /language_model/layers.28/pre_feedforward_layernorm/CustomRMSNorm_output_0 + - /language_model/layers.28/post_feedforward_layernorm/CustomRMSNorm_output_0 + - /language_model/layers.28/Add_3_output_0 + - /language_model/layers.29/input_layernorm/CustomRMSNorm_output_0 + - /language_model/layers.29/self_attn/q_norm/CustomRMSNorm_output_0 + - /language_model/layers.29/self_attn/k_norm/CustomRMSNorm_output_0 + - /language_model/layers.29/post_attention_layernorm/CustomRMSNorm_output_0 + - /language_model/layers.29/Add_output_0 + - /language_model/layers.29/pre_feedforward_layernorm/CustomRMSNorm_output_0 + - /language_model/layers.29/post_feedforward_layernorm/CustomRMSNorm_output_0 + - /language_model/layers.29/Add_1_output_0 + - /language_model/layers.30/Add_output_0 + - /language_model/layers.30/Add_1_output_0 + - /language_model/layers.30/input_layernorm/CustomRMSNorm_output_0 + - /language_model/layers.30/self_attn/q_norm/CustomRMSNorm_output_0 + - /language_model/layers.30/self_attn/k_norm/CustomRMSNorm_output_0 + - /language_model/layers.30/post_attention_layernorm/CustomRMSNorm_output_0 + - /language_model/layers.30/Add_2_output_0 + - /language_model/layers.30/pre_feedforward_layernorm/CustomRMSNorm_output_0 + - /language_model/layers.30/post_feedforward_layernorm/CustomRMSNorm_output_0 + - /language_model/layers.30/Add_3_output_0 + - /language_model/layers.31/Add_output_0 + - /language_model/layers.31/Add_1_output_0 + - /language_model/layers.31/input_layernorm/CustomRMSNorm_output_0 + - /language_model/layers.31/self_attn/q_norm/CustomRMSNorm_output_0 + - /language_model/layers.31/self_attn/k_norm/CustomRMSNorm_output_0 + - /language_model/layers.31/post_attention_layernorm/CustomRMSNorm_output_0 + - /language_model/layers.31/Add_2_output_0 + - /language_model/layers.31/pre_feedforward_layernorm/CustomRMSNorm_output_0 + - /language_model/layers.31/post_feedforward_layernorm/CustomRMSNorm_output_0 + - /language_model/layers.31/Add_3_output_0 + - /language_model/layers.32/Add_output_0 + - /language_model/layers.32/Add_1_output_0 + - /language_model/layers.32/input_layernorm/CustomRMSNorm_output_0 + - /language_model/layers.32/self_attn/q_norm/CustomRMSNorm_output_0 + - /language_model/layers.32/self_attn/k_norm/CustomRMSNorm_output_0 + - /language_model/layers.32/post_attention_layernorm/CustomRMSNorm_output_0 + - /language_model/layers.32/Add_2_output_0 + - /language_model/layers.32/pre_feedforward_layernorm/CustomRMSNorm_output_0 + - /language_model/layers.32/post_feedforward_layernorm/CustomRMSNorm_output_0 + - /language_model/layers.32/Add_3_output_0 + - /language_model/layers.33/Add_output_0 + - /language_model/layers.33/Add_1_output_0 + - /language_model/layers.33/input_layernorm/CustomRMSNorm_output_0 + - /language_model/layers.33/self_attn/q_norm/CustomRMSNorm_output_0 + - /language_model/layers.33/self_attn/k_norm/CustomRMSNorm_output_0 + - /language_model/layers.33/post_attention_layernorm/CustomRMSNorm_output_0 + - /language_model/layers.33/Add_2_output_0 + - /language_model/layers.33/pre_feedforward_layernorm/CustomRMSNorm_output_0 + - /language_model/layers.33/post_feedforward_layernorm/CustomRMSNorm_output_0 + - /language_model/layers.33/Add_3_output_0 + - /language_model/norm/CustomRMSNorm_output_0 + - /language_model/layers.0/self_attn/Mul_output_0 + - /language_model/layers.0/self_attn/Mul_1_output_0 + - /language_model/layers.0/self_attn/Mul_2_output_0 + - /language_model/layers.0/self_attn/Mul_3_output_0 + - /language_model/layers.0/self_attn/Mul_4_output_0 + - /language_model/layers.0/self_attn/Mul_5_output_0 + - /language_model/layers.0/self_attn/Mul_6_output_0 + - /language_model/layers.0/self_attn/Mul_7_output_0 + - /language_model/layers.0/self_attn/Mul_8_output_0 + - /language_model/layers.1/self_attn/Mul_9_output_0 + - /language_model/layers.2/self_attn/Mul_output_0 + - /language_model/layers.2/self_attn/Mul_1_output_0 + - /language_model/layers.2/self_attn/Mul_2_output_0 + - /language_model/layers.2/self_attn/Mul_3_output_0 + - /language_model/layers.2/self_attn/Mul_4_output_0 + - /language_model/layers.2/self_attn/Mul_5_output_0 + - /language_model/layers.2/self_attn/Mul_6_output_0 + - /language_model/layers.2/self_attn/Mul_7_output_0 + - /language_model/layers.2/self_attn/Mul_8_output_0 + - /language_model/layers.2/self_attn/Mul_9_output_0 + - /language_model/layers.3/self_attn/Mul_output_0 + - /language_model/layers.3/self_attn/Mul_1_output_0 + - /language_model/layers.3/self_attn/Mul_2_output_0 + - /language_model/layers.3/self_attn/Mul_3_output_0 + - /language_model/layers.3/self_attn/Mul_4_output_0 + - /language_model/layers.3/self_attn/Mul_5_output_0 + - /language_model/layers.3/self_attn/Mul_6_output_0 + - /language_model/layers.3/self_attn/Mul_7_output_0 + - /language_model/layers.3/self_attn/Mul_8_output_0 + - /language_model/layers.3/self_attn/Mul_9_output_0 + - /language_model/layers.4/self_attn/Mul_output_0 + - /language_model/layers.4/self_attn/Mul_1_output_0 + - /language_model/layers.4/self_attn/Mul_2_output_0 + - /language_model/layers.4/self_attn/Mul_3_output_0 + - /language_model/layers.4/self_attn/Mul_4_output_0 + - /language_model/layers.4/self_attn/Mul_5_output_0 + - /language_model/layers.4/self_attn/Mul_6_output_0 + - /language_model/layers.4/self_attn/Mul_7_output_0 + - /language_model/layers.4/self_attn/Mul_8_output_0 + - /language_model/layers.4/self_attn/Mul_9_output_0 + - /language_model/layers.5/self_attn/Mul_output_0 + - /language_model/layers.5/self_attn/Mul_1_output_0 + - /language_model/layers.5/self_attn/Mul_2_output_0 + - /language_model/layers.5/self_attn/Mul_3_output_0 + - /language_model/layers.5/self_attn/Mul_4_output_0 + - /language_model/layers.5/self_attn/Mul_5_output_0 + - /language_model/layers.5/self_attn/Mul_6_output_0 + - /language_model/layers.5/self_attn/Mul_7_output_0 + - /language_model/layers.5/self_attn/Mul_8_output_0 + - /language_model/layers.5/self_attn/Mul_9_output_0 + - /language_model/layers.6/self_attn/Mul_output_0 + - /language_model/layers.6/self_attn/Mul_1_output_0 + - /language_model/layers.6/self_attn/Mul_2_output_0 + - /language_model/layers.6/self_attn/Mul_3_output_0 + - /language_model/layers.6/self_attn/Mul_4_output_0 + - /language_model/layers.6/self_attn/Mul_5_output_0 + - /language_model/layers.6/self_attn/Mul_6_output_0 + - /language_model/layers.6/self_attn/Mul_7_output_0 + - /language_model/layers.6/self_attn/Mul_8_output_0 + - /language_model/layers.6/self_attn/Mul_9_output_0 + - /language_model/layers.7/self_attn/Mul_output_0 + - /language_model/layers.7/self_attn/Mul_1_output_0 + - /language_model/layers.7/self_attn/Mul_2_output_0 + - /language_model/layers.7/self_attn/Mul_3_output_0 + - /language_model/layers.7/self_attn/Mul_4_output_0 + - /language_model/layers.7/self_attn/Mul_5_output_0 + - /language_model/layers.7/self_attn/Mul_6_output_0 + - /language_model/layers.7/self_attn/Mul_7_output_0 + - /language_model/layers.7/self_attn/Mul_8_output_0 + - /language_model/layers.7/self_attn/Mul_9_output_0 + - /language_model/layers.8/self_attn/Mul_output_0 + - /language_model/layers.8/self_attn/Mul_1_output_0 + - /language_model/layers.8/self_attn/Mul_2_output_0 + - /language_model/layers.8/self_attn/Mul_3_output_0 + - /language_model/layers.8/self_attn/Mul_4_output_0 + - /language_model/layers.8/self_attn/Mul_5_output_0 + - /language_model/layers.8/self_attn/Mul_6_output_0 + - /language_model/layers.8/self_attn/Mul_7_output_0 + - /language_model/layers.8/self_attn/Mul_8_output_0 + - /language_model/layers.8/self_attn/Mul_9_output_0 + - /language_model/layers.9/self_attn/Mul_output_0 + - /language_model/layers.9/self_attn/Mul_1_output_0 + - /language_model/layers.9/self_attn/Mul_2_output_0 + - /language_model/layers.9/self_attn/Mul_3_output_0 + - /language_model/layers.9/self_attn/Mul_4_output_0 + - /language_model/layers.9/self_attn/Mul_5_output_0 + - /language_model/layers.9/self_attn/Mul_6_output_0 + - /language_model/layers.9/self_attn/Mul_7_output_0 + - /language_model/layers.9/self_attn/Mul_8_output_0 + - /language_model/layers.9/self_attn/Mul_9_output_0 + - /language_model/layers.10/self_attn/Mul_output_0 + - /language_model/layers.10/self_attn/Mul_1_output_0 + - /language_model/layers.10/self_attn/Mul_2_output_0 + - /language_model/layers.10/self_attn/Mul_3_output_0 + - /language_model/layers.10/self_attn/Mul_4_output_0 + - /language_model/layers.10/self_attn/Mul_5_output_0 + - /language_model/layers.10/self_attn/Mul_6_output_0 + - /language_model/layers.10/self_attn/Mul_7_output_0 + - /language_model/layers.10/self_attn/Mul_8_output_0 + - /language_model/layers.10/self_attn/Mul_9_output_0 + - /language_model/layers.11/self_attn/Mul_output_0 + - /language_model/layers.11/self_attn/Mul_1_output_0 + - /language_model/layers.11/self_attn/Mul_2_output_0 + - /language_model/layers.11/self_attn/Mul_3_output_0 + - /language_model/layers.11/self_attn/Mul_4_output_0 + - /language_model/layers.11/self_attn/Mul_5_output_0 + - /language_model/layers.11/self_attn/Mul_6_output_0 + - /language_model/layers.11/self_attn/Mul_7_output_0 + - /language_model/layers.11/self_attn/Mul_8_output_0 + - /language_model/layers.11/self_attn/Mul_9_output_0 + - /language_model/layers.12/self_attn/Mul_output_0 + - /language_model/layers.12/self_attn/Mul_1_output_0 + - /language_model/layers.12/self_attn/Mul_2_output_0 + - /language_model/layers.12/self_attn/Mul_3_output_0 + - /language_model/layers.12/self_attn/Mul_4_output_0 + - /language_model/layers.12/self_attn/Mul_5_output_0 + - /language_model/layers.12/self_attn/Mul_6_output_0 + - /language_model/layers.12/self_attn/Mul_7_output_0 + - /language_model/layers.12/self_attn/Mul_8_output_0 + - /language_model/layers.12/self_attn/Mul_9_output_0 + - /language_model/layers.13/self_attn/Mul_output_0 + - /language_model/layers.13/self_attn/Mul_1_output_0 + - /language_model/layers.13/self_attn/Mul_2_output_0 + - /language_model/layers.13/self_attn/Mul_3_output_0 + - /language_model/layers.13/self_attn/Mul_4_output_0 + - /language_model/layers.13/self_attn/Mul_5_output_0 + - /language_model/layers.13/self_attn/Mul_6_output_0 + - /language_model/layers.13/self_attn/Mul_7_output_0 + - /language_model/layers.13/self_attn/Mul_8_output_0 + - /language_model/layers.13/self_attn/Mul_9_output_0 + - /language_model/layers.14/self_attn/Mul_output_0 + - /language_model/layers.14/self_attn/Mul_1_output_0 + - /language_model/layers.14/self_attn/Mul_2_output_0 + - /language_model/layers.14/self_attn/Mul_3_output_0 + - /language_model/layers.14/self_attn/Mul_4_output_0 + - /language_model/layers.14/self_attn/Mul_5_output_0 + - /language_model/layers.14/self_attn/Mul_6_output_0 + - /language_model/layers.14/self_attn/Mul_7_output_0 + - /language_model/layers.14/self_attn/Mul_8_output_0 + - /language_model/layers.14/self_attn/Mul_9_output_0 + - /language_model/layers.15/self_attn/Mul_output_0 + - /language_model/layers.15/self_attn/Mul_1_output_0 + - /language_model/layers.15/self_attn/Mul_2_output_0 + - /language_model/layers.15/self_attn/Mul_3_output_0 + - /language_model/layers.15/self_attn/Mul_4_output_0 + - /language_model/layers.15/self_attn/Mul_5_output_0 + - /language_model/layers.15/self_attn/Mul_6_output_0 + - /language_model/layers.15/self_attn/Mul_7_output_0 + - /language_model/layers.15/self_attn/Mul_8_output_0 + - /language_model/layers.15/self_attn/Mul_9_output_0 + - /language_model/layers.16/self_attn/Mul_output_0 + - /language_model/layers.16/self_attn/Mul_1_output_0 + - /language_model/layers.16/self_attn/Mul_2_output_0 + - /language_model/layers.16/self_attn/Mul_3_output_0 + - /language_model/layers.16/self_attn/Mul_4_output_0 + - /language_model/layers.16/self_attn/Mul_5_output_0 + - /language_model/layers.16/self_attn/Mul_6_output_0 + - /language_model/layers.16/self_attn/Mul_7_output_0 + - /language_model/layers.16/self_attn/Mul_8_output_0 + - /language_model/layers.16/self_attn/Mul_9_output_0 + - /language_model/layers.17/self_attn/Mul_output_0 + - /language_model/layers.17/self_attn/Mul_1_output_0 + - /language_model/layers.17/self_attn/Mul_2_output_0 + - /language_model/layers.17/self_attn/Mul_3_output_0 + - /language_model/layers.17/self_attn/Mul_4_output_0 + - /language_model/layers.17/self_attn/Mul_5_output_0 + - /language_model/layers.17/self_attn/Mul_6_output_0 + - /language_model/layers.17/self_attn/Mul_7_output_0 + - /language_model/layers.17/self_attn/Mul_8_output_0 + - /language_model/layers.17/self_attn/Mul_9_output_0 + - /language_model/layers.18/self_attn/Mul_output_0 + - /language_model/layers.18/self_attn/Mul_1_output_0 + - /language_model/layers.18/self_attn/Mul_2_output_0 + - /language_model/layers.18/self_attn/Mul_3_output_0 + - /language_model/layers.18/self_attn/Mul_4_output_0 + - /language_model/layers.18/self_attn/Mul_5_output_0 + - /language_model/layers.18/self_attn/Mul_6_output_0 + - /language_model/layers.18/self_attn/Mul_7_output_0 + - /language_model/layers.18/self_attn/Mul_8_output_0 + - /language_model/layers.18/self_attn/Mul_9_output_0 + - /language_model/layers.19/self_attn/Mul_output_0 + - /language_model/layers.19/self_attn/Mul_1_output_0 + - /language_model/layers.19/self_attn/Mul_2_output_0 + - /language_model/layers.19/self_attn/Mul_3_output_0 + - /language_model/layers.19/self_attn/Mul_4_output_0 + - /language_model/layers.19/self_attn/Mul_5_output_0 + - /language_model/layers.19/self_attn/Mul_6_output_0 + - /language_model/layers.19/self_attn/Mul_7_output_0 + - /language_model/layers.19/self_attn/Mul_8_output_0 + - /language_model/layers.19/self_attn/Mul_9_output_0 + - /language_model/layers.20/self_attn/Mul_output_0 + - /language_model/layers.20/self_attn/Mul_1_output_0 + - /language_model/layers.20/self_attn/Mul_2_output_0 + - /language_model/layers.20/self_attn/Mul_3_output_0 + - /language_model/layers.20/self_attn/Mul_4_output_0 + - /language_model/layers.20/self_attn/Mul_5_output_0 + - /language_model/layers.20/self_attn/Mul_6_output_0 + - /language_model/layers.20/self_attn/Mul_7_output_0 + - /language_model/layers.20/self_attn/Mul_8_output_0 + - /language_model/layers.20/self_attn/Mul_9_output_0 + - /language_model/layers.21/self_attn/Mul_output_0 + - /language_model/layers.21/self_attn/Mul_1_output_0 + - /language_model/layers.21/self_attn/Mul_2_output_0 + - /language_model/layers.21/self_attn/Mul_3_output_0 + - /language_model/layers.21/self_attn/Mul_4_output_0 + - /language_model/layers.21/self_attn/Mul_5_output_0 + - /language_model/layers.21/self_attn/Mul_6_output_0 + - /language_model/layers.21/self_attn/Mul_7_output_0 + - /language_model/layers.21/self_attn/Mul_8_output_0 + - /language_model/layers.21/self_attn/Mul_9_output_0 + - /language_model/layers.22/self_attn/Mul_output_0 + - /language_model/layers.22/self_attn/Mul_1_output_0 + - /language_model/layers.22/self_attn/Mul_2_output_0 + - /language_model/layers.22/self_attn/Mul_3_output_0 + - /language_model/layers.22/self_attn/Mul_4_output_0 + - /language_model/layers.22/self_attn/Mul_5_output_0 + - /language_model/layers.22/self_attn/Mul_6_output_0 + - /language_model/layers.22/self_attn/Mul_7_output_0 + - /language_model/layers.22/self_attn/Mul_8_output_0 + - /language_model/layers.22/self_attn/Mul_9_output_0 + - /language_model/layers.23/self_attn/Mul_output_0 + - /language_model/layers.23/self_attn/Mul_1_output_0 + - /language_model/layers.23/self_attn/Mul_2_output_0 + - /language_model/layers.23/self_attn/Mul_3_output_0 + - /language_model/layers.23/self_attn/Mul_4_output_0 + - /language_model/layers.23/self_attn/Mul_5_output_0 + - /language_model/layers.23/self_attn/Mul_6_output_0 + - /language_model/layers.23/self_attn/Mul_7_output_0 + - /language_model/layers.23/self_attn/Mul_8_output_0 + - /language_model/layers.23/self_attn/Mul_9_output_0 + - /language_model/layers.24/self_attn/Mul_output_0 + - /language_model/layers.24/self_attn/Mul_1_output_0 + - /language_model/layers.24/self_attn/Mul_2_output_0 + - /language_model/layers.24/self_attn/Mul_3_output_0 + - /language_model/layers.24/self_attn/Mul_4_output_0 + - /language_model/layers.24/self_attn/Mul_5_output_0 + - /language_model/layers.24/self_attn/Mul_6_output_0 + - /language_model/layers.24/self_attn/Mul_7_output_0 + - /language_model/layers.24/self_attn/Mul_8_output_0 + - /language_model/layers.24/self_attn/Mul_9_output_0 + - /language_model/layers.25/self_attn/Mul_output_0 + - /language_model/layers.25/self_attn/Mul_1_output_0 + - /language_model/layers.25/self_attn/Mul_2_output_0 + - /language_model/layers.25/self_attn/Mul_3_output_0 + - /language_model/layers.25/self_attn/Mul_4_output_0 + - /language_model/layers.25/self_attn/Mul_5_output_0 + - /language_model/layers.25/self_attn/Mul_6_output_0 + - /language_model/layers.25/self_attn/Mul_7_output_0 + - /language_model/layers.25/self_attn/Mul_8_output_0 + - /language_model/layers.25/self_attn/Mul_9_output_0 + - /language_model/layers.26/self_attn/Mul_output_0 + - /language_model/layers.26/self_attn/Mul_1_output_0 + - /language_model/layers.26/self_attn/Mul_2_output_0 + - /language_model/layers.26/self_attn/Mul_3_output_0 + - /language_model/layers.26/self_attn/Mul_4_output_0 + - /language_model/layers.26/self_attn/Mul_5_output_0 + - /language_model/layers.26/self_attn/Mul_6_output_0 + - /language_model/layers.26/self_attn/Mul_7_output_0 + - /language_model/layers.26/self_attn/Mul_8_output_0 + - /language_model/layers.26/self_attn/Mul_9_output_0 + - /language_model/layers.27/self_attn/Mul_output_0 + - /language_model/layers.27/self_attn/Mul_1_output_0 + - /language_model/layers.27/self_attn/Mul_2_output_0 + - /language_model/layers.27/self_attn/Mul_3_output_0 + - /language_model/layers.27/self_attn/Mul_4_output_0 + - /language_model/layers.27/self_attn/Mul_5_output_0 + - /language_model/layers.27/self_attn/Mul_6_output_0 + - /language_model/layers.27/self_attn/Mul_7_output_0 + - /language_model/layers.27/self_attn/Mul_8_output_0 + - /language_model/layers.27/self_attn/Mul_9_output_0 + - /language_model/layers.28/self_attn/Mul_output_0 + - /language_model/layers.28/self_attn/Mul_1_output_0 + - /language_model/layers.28/self_attn/Mul_2_output_0 + - /language_model/layers.28/self_attn/Mul_3_output_0 + - /language_model/layers.28/self_attn/Mul_4_output_0 + - /language_model/layers.28/self_attn/Mul_5_output_0 + - /language_model/layers.28/self_attn/Mul_6_output_0 + - /language_model/layers.28/self_attn/Mul_7_output_0 + - /language_model/layers.28/self_attn/Mul_8_output_0 + - /language_model/layers.28/self_attn/Mul_9_output_0 + - /language_model/layers.29/self_attn/Mul_output_0 + - /language_model/layers.29/self_attn/Mul_1_output_0 + - /language_model/layers.29/self_attn/Mul_2_output_0 + - /language_model/layers.29/self_attn/Mul_3_output_0 + - /language_model/layers.29/self_attn/Mul_4_output_0 + - /language_model/layers.29/self_attn/Mul_5_output_0 + - /language_model/layers.29/self_attn/Mul_6_output_0 + - /language_model/layers.29/self_attn/Mul_7_output_0 + - /language_model/layers.29/self_attn/Mul_8_output_0 + - /language_model/layers.29/self_attn/Mul_9_output_0 + - /language_model/layers.30/self_attn/Mul_output_0 + - /language_model/layers.30/self_attn/Mul_1_output_0 + - /language_model/layers.30/self_attn/Mul_2_output_0 + - /language_model/layers.30/self_attn/Mul_3_output_0 + - /language_model/layers.30/self_attn/Mul_4_output_0 + - /language_model/layers.30/self_attn/Mul_5_output_0 + - /language_model/layers.30/self_attn/Mul_6_output_0 + - /language_model/layers.30/self_attn/Mul_7_output_0 + - /language_model/layers.30/self_attn/Mul_8_output_0 + - /language_model/layers.30/self_attn/Mul_9_output_0 + - /language_model/layers.31/self_attn/Mul_output_0 + - /language_model/layers.31/self_attn/Mul_1_output_0 + - /language_model/layers.31/self_attn/Mul_2_output_0 + - /language_model/layers.31/self_attn/Mul_3_output_0 + - /language_model/layers.31/self_attn/Mul_4_output_0 + - /language_model/layers.31/self_attn/Mul_5_output_0 + - /language_model/layers.31/self_attn/Mul_6_output_0 + - /language_model/layers.31/self_attn/Mul_7_output_0 + - /language_model/layers.31/self_attn/Mul_8_output_0 + - /language_model/layers.31/self_attn/Mul_9_output_0 + - /language_model/layers.32/self_attn/Mul_output_0 + - /language_model/layers.32/self_attn/Mul_1_output_0 + - /language_model/layers.32/self_attn/Mul_2_output_0 + - /language_model/layers.32/self_attn/Mul_3_output_0 + - /language_model/layers.32/self_attn/Mul_4_output_0 + - /language_model/layers.32/self_attn/Mul_5_output_0 + - /language_model/layers.32/self_attn/Mul_6_output_0 + - /language_model/layers.32/self_attn/Mul_7_output_0 + - /language_model/layers.32/self_attn/Mul_8_output_0 + - /language_model/layers.32/self_attn/Mul_9_output_0 + - /language_model/layers.33/self_attn/Mul_output_0 + - /language_model/layers.33/self_attn/Mul_1_output_0 + - /language_model/layers.33/self_attn/Mul_2_output_0 + - /language_model/layers.33/self_attn/Mul_3_output_0 + - /language_model/layers.33/self_attn/Mul_4_output_0 + - /language_model/layers.33/self_attn/Mul_5_output_0 + - /language_model/layers.33/self_attn/Mul_6_output_0 + - /language_model/layers.33/self_attn/Mul_7_output_0 + - /language_model/layers.33/self_attn/Mul_8_output_0 + - /language_model/layers.33/self_attn/Mul_9_output_0 + - /language_model/layers.0/self_attn/Softmax_output_0 + - /language_model/layers.1/self_attn/Softmax_output_0 + - /language_model/layers.2/self_attn/Softmax_output_0 + - /language_model/layers.3/self_attn/Softmax_output_0 + - /language_model/layers.4/self_attn/Softmax_output_0 + - /language_model/layers.5/self_attn/Softmax_output_0 + - /language_model/layers.6/self_attn/Softmax_output_0 + - /language_model/layers.7/self_attn/Softmax_output_0 + - /language_model/layers.8/self_attn/Softmax_output_0 + - /language_model/layers.9/self_attn/Softmax_output_0 + - /language_model/layers.10/self_attn/Softmax_output_0 + - /language_model/layers.11/self_attn/Softmax_output_0 + - /language_model/layers.12/self_attn/Softmax_output_0 + - /language_model/layers.13/self_attn/Softmax_output_0 + - /language_model/layers.14/self_attn/Softmax_output_0 + - /language_model/layers.15/self_attn/Softmax_output_0 + - /language_model/layers.16/self_attn/Softmax_output_0 + - /language_model/layers.17/self_attn/Softmax_output_0 + - /language_model/layers.18/self_attn/Softmax_output_0 + - /language_model/layers.19/self_attn/Softmax_output_0 + - /language_model/layers.20/self_attn/Softmax_output_0 + - /language_model/layers.21/self_attn/Softmax_output_0 + - /language_model/layers.22/self_attn/Softmax_output_0 + - /language_model/layers.23/self_attn/Softmax_output_0 + - /language_model/layers.24/self_attn/Softmax_output_0 + - /language_model/layers.25/self_attn/Softmax_output_0 + - /language_model/layers.26/self_attn/Softmax_output_0 + - /language_model/layers.27/self_attn/Softmax_output_0 + - /language_model/layers.28/self_attn/Softmax_output_0 + - /language_model/layers.29/self_attn/Softmax_output_0 + - /language_model/layers.30/self_attn/Softmax_output_0 + - /language_model/layers.31/self_attn/Softmax_output_0 + - /language_model/layers.32/self_attn/Softmax_output_0 + - /language_model/layers.33/self_attn/Softmax_output_0 + diff --git a/examples/performance/compute_context_length/gpt_oss.py b/examples/performance/compute_context_length/gpt_oss.py index b211ba914..39a5d48ed 100644 --- a/examples/performance/compute_context_length/gpt_oss.py +++ b/examples/performance/compute_context_length/gpt_oss.py @@ -11,26 +11,27 @@ model_id = "openai/gpt-oss-20b" # weights are not required to convert to fp32 +## Activate Compute-Context-Length (CCL) feature by setting ccl_enabled=True when loading the model with from_pretrained(). +## Use the optional comp_ctx_lengths argument to provide two lists of context lengths for the prefilling and decoding processes. If comp_ctx_lengths=None, the model will run with its default context length. +## - The first list, comp_ctx_lengths_prefill, defines the compute-context-length values for the prefilling process. +## -- The process starts with the first value in the list and gradually increases the context length based on the position_id of the current prompt chunk. +## - The second list, comp_ctx_lengths_decode, defines the compute-context-length values for the decoding process. +## -- During decoding, the model selects an appropriate context length from the list based on the input prompt length and cache index. +## -- It starts from the correct value in the list and increases the context length dynamically when the cache index exceeds the current threshold. + ctx_len = 4096 # In moe models like gpt-oss, since prefill_seq_len=1 both comp_ctx_lengths_prefill and comp_ctx_lengths_decode can share similar lists. -# Set the list of ccl during prefilling process -comp_ctx_lengths_prefill = [512, ctx_len] -# Set the list of ccl during decoding process -comp_ctx_lengths_decode = [512, ctx_len] - +# Set the list of ccl during prefilling and decoding processes +comp_ctx_lengths_prefill = comp_ctx_lengths_decode = [1024, ctx_len] qeff_model = QEFFAutoModelForCausalLM.from_pretrained( model_id, qaic_config={ - "comp_ctx_lengths_prefill": comp_ctx_lengths_prefill, - "comp_ctx_lengths_decode": comp_ctx_lengths_decode, - "ctx_len": ctx_len, - "prefill_seq_len": 1, # Passing prefill_seq_len is mandatory for CCL goal in moe models. Currently we can get best perf using PL=1. + "ccl_enabled": True, }, ) tokenizer = AutoTokenizer.from_pretrained(model_id) -onnx_model_path = qeff_model.export() qpc_path = qeff_model.compile( prefill_seq_len=1, # Currently we can get best perf using PL=1 i.e. decode-only model, prefill optimizations are being worked on. ctx_len=ctx_len, @@ -41,6 +42,8 @@ mos=1, aic_enable_depth_first=True, num_speculative_tokens=None, + comp_ctx_lengths_prefill=comp_ctx_lengths_prefill, + comp_ctx_lengths_decode=comp_ctx_lengths_decode, ) print(f"qpc path is {qpc_path}") streamer = TextStreamer(tokenizer) diff --git a/examples/performance/compute_context_length/granite_vision.py b/examples/performance/compute_context_length/granite_vision.py index 39b139bad..6dd38395c 100644 --- a/examples/performance/compute_context_length/granite_vision.py +++ b/examples/performance/compute_context_length/granite_vision.py @@ -20,6 +20,7 @@ def run_model( kv_offload=False, prefill_seq_len=5500, ctx_len=6000, + ccl_enabled=False, comp_ctx_lengths_prefill=None, comp_ctx_lengths_decode=None, generation_len=128, @@ -41,9 +42,7 @@ def run_model( token=token, kv_offload=kv_offload, qaic_config={ - "comp_ctx_lengths_prefill": comp_ctx_lengths_prefill, - "comp_ctx_lengths_decode": comp_ctx_lengths_decode, - "ctx_len": ctx_len, + "ccl_enabled": ccl_enabled, }, ) @@ -56,6 +55,8 @@ def run_model( num_cores=num_cores, num_devices=num_devices, mxfp6_matmul=False, + comp_ctx_lengths_prefill=comp_ctx_lengths_prefill, + comp_ctx_lengths_decode=comp_ctx_lengths_decode, ) ## STEP - 3 Load and process the inputs for Inference @@ -96,6 +97,7 @@ def run_model( num_cores = 16 num_devices = 4 ctx_len = 8192 + ccl_enabled = True comp_ctx_lengths_prefill = [5500] comp_ctx_lengths_decode = [6144, ctx_len] @@ -106,6 +108,7 @@ def run_model( image_url=image_url, prefill_seq_len=prefill_seq_len, ctx_len=ctx_len, + ccl_enabled=ccl_enabled, comp_ctx_lengths_prefill=comp_ctx_lengths_prefill, comp_ctx_lengths_decode=comp_ctx_lengths_decode, generation_len=generation_len, diff --git a/examples/performance/compute_context_length/internvl.py b/examples/performance/compute_context_length/internvl.py index 827d50c97..19bcf4bc1 100644 --- a/examples/performance/compute_context_length/internvl.py +++ b/examples/performance/compute_context_length/internvl.py @@ -174,26 +174,22 @@ def run_intern_on_aic( prefill_seq_len=3840, num_devices=1, num_cores=16, + ctx_len=512, + ccl_enabled=False, + comp_ctx_lengths_prefill=None, + comp_ctx_lengths_decode=None, ): ## STEP 1 -- LOAD THE MODEL # The original Intern-VL model, despite being multimodal, is loaded using `AutoModelForCausalLM` in Huggingface. # To maintain compatibility, we load this model using `QEFFAutoModelForCausalLM`. - ctx_len = 8192 - comp_ctx_lengths_prefill = [4096] - comp_ctx_lengths_decode = [6144, ctx_len] - - # model = QEFFAutoModelForCausalLM.from_pretrained(model_name, kv_offload=kv_offload, trust_remote_code=True) - model = QEFFAutoModelForCausalLM.from_pretrained( model_name, kv_offload=kv_offload, trust_remote_code=True, qaic_config={ - "comp_ctx_lengths_prefill": comp_ctx_lengths_prefill, - "comp_ctx_lengths_decode": comp_ctx_lengths_decode, - "ctx_len": ctx_len, + "ccl_enabled": ccl_enabled, }, ) @@ -205,6 +201,8 @@ def run_intern_on_aic( ctx_len=ctx_len, prefill_seq_len=prefill_seq_len, mxfp6_matmul=False, + comp_ctx_lengths_prefill=comp_ctx_lengths_prefill, + comp_ctx_lengths_decode=comp_ctx_lengths_decode, ) ## STEP 3 -- SETUP THE PROCESSOR @@ -263,6 +261,11 @@ def run_intern_on_aic( num_devices = 4 num_cores = 16 + ctx_len = 8192 + ccl_enabled = True + comp_ctx_lengths_prefill = [4096] + comp_ctx_lengths_decode = [6144, ctx_len] + run_intern_on_aic( model_name=model_name, prompt=prompt, @@ -273,6 +276,10 @@ def run_intern_on_aic( prefill_seq_len=prefill_seq_len, num_devices=num_devices, num_cores=num_cores, + ctx_len=ctx_len, + ccl_enabled=ccl_enabled, + comp_ctx_lengths_prefill=comp_ctx_lengths_prefill, + comp_ctx_lengths_decode=comp_ctx_lengths_decode, ) diff --git a/examples/performance/compute_context_length/llama4.py b/examples/performance/compute_context_length/llama4.py index 534be8f96..8cdbd70a1 100644 --- a/examples/performance/compute_context_length/llama4.py +++ b/examples/performance/compute_context_length/llama4.py @@ -17,6 +17,14 @@ config.text_config.num_hidden_layers = 4 config.vision_config.num_hidden_layers = 2 +## Activate Compute-Context-Length (CCL) feature by setting ccl_enabled=True when loading the model with from_pretrained(). +## Use the optional comp_ctx_lengths argument to provide two lists of context lengths for the prefilling and decoding processes. If comp_ctx_lengths=None, the model will run with its default context length. +## - The first list, comp_ctx_lengths_prefill, defines the compute-context-length values for the prefilling process. +## -- The process starts with the first value in the list and gradually increases the context length based on the position_id of the current prompt chunk. +## - The second list, comp_ctx_lengths_decode, defines the compute-context-length values for the decoding process. +## -- During decoding, the model selects an appropriate context length from the list based on the input prompt length and cache index. +## -- It starts from the correct value in the list and increases the context length dynamically when the cache index exceeds the current threshold. + ctx_len = 8192 # Set the list of ccl during prefilling process comp_ctx_lengths_prefill = [3072] @@ -27,12 +35,10 @@ model_id, attn_implementation="eager", kv_offload=True, + config=config, qaic_config={ - "comp_ctx_lengths_prefill": comp_ctx_lengths_prefill, - "comp_ctx_lengths_decode": comp_ctx_lengths_decode, - "ctx_len": ctx_len, + "ccl_enabled": True, }, - config=config, ) tokenizer = transformers.AutoTokenizer.from_pretrained(model_id) processor = AutoProcessor.from_pretrained(model_id) @@ -47,13 +53,15 @@ ctx_len=ctx_len, img_size=336, num_cores=16, - num_devices=4, + num_devices=8, max_num_tiles=17, mxfp6_matmul=True, mxint8_kv_cache=True, aic_enable_depth_first=True, skip_vision=True, mos=1, + comp_ctx_lengths_prefill=comp_ctx_lengths_prefill, + comp_ctx_lengths_decode=comp_ctx_lengths_decode, ) messages = [ @@ -77,7 +85,7 @@ ) streamer = TextStreamer(tokenizer) - output = qeff_model.generate(inputs=inputs, device_ids=[0, 1, 2, 3], generation_len=100) + output = qeff_model.generate(inputs=inputs, device_ids=[0, 1, 2, 3, 4, 5, 6, 7], generation_len=100) print(output.generated_ids) print(tokenizer.batch_decode(output.generated_ids)) print(output) @@ -89,12 +97,14 @@ ctx_len=ctx_len, img_size=336, num_cores=16, - num_devices=4, + num_devices=8, max_num_tiles=17, mxfp6_matmul=True, mxint8_kv_cache=True, aic_enable_depth_first=True, mos=1, + comp_ctx_lengths_prefill=comp_ctx_lengths_prefill, + comp_ctx_lengths_decode=comp_ctx_lengths_decode, ) ### IMAGE + TEXT ### @@ -121,7 +131,7 @@ ) inputs["pixel_values"] = inputs["pixel_values"].to(torch.float32) streamer = TextStreamer(tokenizer) - output = qeff_model.generate(inputs=inputs, device_ids=[0, 1, 2, 3], generation_len=100) + output = qeff_model.generate(inputs=inputs, device_ids=[0, 1, 2, 3, 4, 5, 6, 7], generation_len=100) print(output.generated_ids) print(tokenizer.batch_decode(output.generated_ids)) print(output) diff --git a/examples/performance/compute_context_length/llama4_cb.py b/examples/performance/compute_context_length/llama4_cb.py index ea7c09d69..ffbbff67f 100644 --- a/examples/performance/compute_context_length/llama4_cb.py +++ b/examples/performance/compute_context_length/llama4_cb.py @@ -19,6 +19,14 @@ tokenizer = transformers.AutoTokenizer.from_pretrained(model_id) processor = AutoProcessor.from_pretrained(model_id) +## Activate Compute-Context-Length (CCL) feature by setting ccl_enabled=True when loading the model with from_pretrained(). +## Use the optional comp_ctx_lengths argument to provide two lists of context lengths for the prefilling and decoding processes. If comp_ctx_lengths=None, the model will run with its default context length. +## - The first list, comp_ctx_lengths_prefill, defines the compute-context-length values for the prefilling process. +## -- The process starts with the first value in the list and gradually increases the context length based on the position_id of the current prompt chunk. +## - The second list, comp_ctx_lengths_decode, defines the compute-context-length values for the decoding process. +## -- During decoding, the model selects an appropriate context length from the list based on the input prompt length and cache index. +## -- It starts from the correct value in the list and increases the context length dynamically when the cache index exceeds the current threshold. + ctx_len = 4096 # Set the list of ccl during prefilling process comp_ctx_lengths_prefill = [3072] @@ -34,9 +42,7 @@ config=config, continuous_batching=True, qaic_config={ - "comp_ctx_lengths_prefill": comp_ctx_lengths_prefill, - "comp_ctx_lengths_decode": comp_ctx_lengths_decode, - "ctx_len": ctx_len, + "ccl_enabled": True, }, ) @@ -53,6 +59,8 @@ mxint8_kv_cache=True, aic_enable_depth_first=True, mos=1, + comp_ctx_lengths_prefill=comp_ctx_lengths_prefill, + comp_ctx_lengths_decode=comp_ctx_lengths_decode, ) else: qeff_model = QEFFAutoModelForImageTextToText.from_pretrained( @@ -61,9 +69,7 @@ kv_offload=True, config=config, qaic_config={ - "comp_ctx_lengths_prefill": comp_ctx_lengths_prefill, - "comp_ctx_lengths_decode": comp_ctx_lengths_decode, - "ctx_len": ctx_len, + "ccl_enabled": True, }, ) @@ -79,6 +85,8 @@ mxint8_kv_cache=True, aic_enable_depth_first=True, mos=1, + comp_ctx_lengths_prefill=comp_ctx_lengths_prefill, + comp_ctx_lengths_decode=comp_ctx_lengths_decode, ) image_urls = [ diff --git a/examples/performance/compute_context_length/llama4_multi_image.py b/examples/performance/compute_context_length/llama4_multi_image.py index d7c403e5f..fd513fe45 100644 --- a/examples/performance/compute_context_length/llama4_multi_image.py +++ b/examples/performance/compute_context_length/llama4_multi_image.py @@ -17,6 +17,14 @@ config.text_config.num_hidden_layers = 4 config.vision_config.num_hidden_layers = 2 +## Activate Compute-Context-Length (CCL) feature by setting ccl_enabled=True when loading the model with from_pretrained(). +## Use the optional comp_ctx_lengths argument to provide two lists of context lengths for the prefilling and decoding processes. If comp_ctx_lengths=None, the model will run with its default context length. +## - The first list, comp_ctx_lengths_prefill, defines the compute-context-length values for the prefilling process. +## -- The process starts with the first value in the list and gradually increases the context length based on the position_id of the current prompt chunk. +## - The second list, comp_ctx_lengths_decode, defines the compute-context-length values for the decoding process. +## -- During decoding, the model selects an appropriate context length from the list based on the input prompt length and cache index. +## -- It starts from the correct value in the list and increases the context length dynamically when the cache index exceeds the current threshold. + ctx_len = 8192 # Set the list of ccl during prefilling process comp_ctx_lengths_prefill = [5376] @@ -29,9 +37,7 @@ kv_offload=True, config=config, qaic_config={ - "comp_ctx_lengths_prefill": comp_ctx_lengths_prefill, - "comp_ctx_lengths_decode": comp_ctx_lengths_decode, - "ctx_len": ctx_len, + "ccl_enabled": True, }, ) tokenizer = transformers.AutoTokenizer.from_pretrained(model_id) @@ -49,6 +55,8 @@ mxint8_kv_cache=True, aic_enable_depth_first=True, mos=1, + comp_ctx_lengths_prefill=comp_ctx_lengths_prefill, + comp_ctx_lengths_decode=comp_ctx_lengths_decode, ) ### Multi_image Prompt ### diff --git a/examples/performance/compute_context_length/mistral3.py b/examples/performance/compute_context_length/mistral3.py index 96ed519f5..3763fbcde 100644 --- a/examples/performance/compute_context_length/mistral3.py +++ b/examples/performance/compute_context_length/mistral3.py @@ -19,6 +19,7 @@ def run_model( kv_offload=False, prefill_seq_len=128, ctx_len=4096, + ccl_enabled=False, comp_ctx_lengths_prefill=None, comp_ctx_lengths_decode=None, generation_len=128, @@ -37,15 +38,16 @@ def run_model( config = AutoConfig.from_pretrained(model_name) config.vision_config._attn_implementation = "eager" + # For Testing Purpose Only + config.text_config.num_hidden_layers = 4 + config.vision_config.num_hidden_layers = 2 model = QEFFAutoModelForImageTextToText.from_pretrained( model_name, kv_offload=kv_offload, config=config, qaic_config={ - "comp_ctx_lengths_prefill": comp_ctx_lengths_prefill, - "comp_ctx_lengths_decode": comp_ctx_lengths_decode, - "ctx_len": ctx_len, + "ccl_enabled": ccl_enabled, }, ) @@ -58,6 +60,8 @@ def run_model( num_cores=num_cores, num_devices=num_devices, mxfp6_matmul=False, + comp_ctx_lengths_prefill=comp_ctx_lengths_prefill, + comp_ctx_lengths_decode=comp_ctx_lengths_decode, ) ## STEP - 3 Load and process the inputs for Inference @@ -96,6 +100,7 @@ def run_model( generation_len = 128 num_cores = 16 num_devices = 4 + ccl_enabled = True comp_ctx_lengths_prefill = [4096] comp_ctx_lengths_decode = [6144, ctx_len] @@ -106,6 +111,7 @@ def run_model( image_url=image_url, prefill_seq_len=prefill_seq_len, ctx_len=ctx_len, + ccl_enabled=ccl_enabled, comp_ctx_lengths_prefill=comp_ctx_lengths_prefill, comp_ctx_lengths_decode=comp_ctx_lengths_decode, generation_len=generation_len, diff --git a/examples/performance/compute_context_length/molmo.py b/examples/performance/compute_context_length/molmo.py index f68481631..b5f1f50e6 100644 --- a/examples/performance/compute_context_length/molmo.py +++ b/examples/performance/compute_context_length/molmo.py @@ -15,13 +15,21 @@ model_id = "allenai/Molmo-7B-D-0924" config = AutoConfig.from_pretrained(model_id, trust_remote_code=True) - +# For Testing Purpose Only # config.num_hidden_layers = 2 +## Activate Compute-Context-Length (CCL) feature by setting ccl_enabled=True when loading the model with from_pretrained(). +## Use the optional comp_ctx_lengths argument to provide two lists of context lengths for the prefilling and decoding processes. If comp_ctx_lengths=None, the model will run with its default context length. +## - The first list, comp_ctx_lengths_prefill, defines the compute-context-length values for the prefilling process. +## -- The process starts with the first value in the list and gradually increases the context length based on the position_id of the current prompt chunk. +## - The second list, comp_ctx_lengths_decode, defines the compute-context-length values for the decoding process. +## -- During decoding, the model selects an appropriate context length from the list based on the input prompt length and cache index. +## -- It starts from the correct value in the list and increases the context length dynamically when the cache index exceeds the current threshold. + # load the model ctx_len = 8192 -comp_ctx_lengths_prefill = [3072] -comp_ctx_lengths_decode = [4096, 8192] +comp_ctx_lengths_prefill = [3072] # None # +comp_ctx_lengths_decode = [4096, 8192] # None # qeff_model = QEFFAutoModelForCausalLM.from_pretrained( model_id, @@ -29,9 +37,7 @@ trust_remote_code=True, config=config, qaic_config={ - "comp_ctx_lengths_prefill": comp_ctx_lengths_prefill, - "comp_ctx_lengths_decode": comp_ctx_lengths_decode, - "ctx_len": ctx_len, + "ccl_enabled": True, }, ) tokenizer = transformers.AutoTokenizer.from_pretrained(model_id, trust_remote_code=True) @@ -51,6 +57,8 @@ aic_enable_depth_first=True, skip_vision=True, mos=1, + comp_ctx_lengths_prefill=comp_ctx_lengths_prefill, + comp_ctx_lengths_decode=comp_ctx_lengths_decode, ) inputs = processor.process(text="Tell me about yourself") @@ -74,6 +82,8 @@ mxint8_kv_cache=True, aic_enable_depth_first=True, mos=1, + comp_ctx_lengths_prefill=comp_ctx_lengths_prefill, + comp_ctx_lengths_decode=comp_ctx_lengths_decode, ) ### IMAGE + TEXT ### diff --git a/examples/performance/compute_context_length/qwen2_5_vl.py b/examples/performance/compute_context_length/qwen2_5_vl.py index 00f43a73f..20960b6a9 100644 --- a/examples/performance/compute_context_length/qwen2_5_vl.py +++ b/examples/performance/compute_context_length/qwen2_5_vl.py @@ -19,11 +19,20 @@ ## For AWQ model update pytorch version to 2.8.* model_id = "Qwen/Qwen2.5-VL-32B-Instruct" config = AutoConfig.from_pretrained(model_id) -# config.text_config.num_hidden_layers = 2 +# For Testing Purpose Only +config.text_config.num_hidden_layers = 2 + +## Activate Compute-Context-Length (CCL) feature by setting ccl_enabled=True when loading the model with from_pretrained(). +## Use the optional comp_ctx_lengths argument to provide two lists of context lengths for the prefilling and decoding processes. If comp_ctx_lengths=None, the model will run with its default context length. +## - The first list, comp_ctx_lengths_prefill, defines the compute-context-length values for the prefilling process. +## -- The process starts with the first value in the list and gradually increases the context length based on the position_id of the current prompt chunk. +## - The second list, comp_ctx_lengths_decode, defines the compute-context-length values for the decoding process. +## -- During decoding, the model selects an appropriate context length from the list based on the input prompt length and cache index. +## -- It starts from the correct value in the list and increases the context length dynamically when the cache index exceeds the current threshold. ctx_len = 8192 -comp_ctx_lengths_prefill = [4096] -comp_ctx_lengths_decode = [6144, ctx_len] +comp_ctx_lengths_prefill = [4096] # None # +comp_ctx_lengths_decode = [6144, ctx_len] # None # qeff_model = QEFFAutoModelForImageTextToText.from_pretrained( model_id, @@ -31,9 +40,7 @@ kv_offload=True, config=config, qaic_config={ - "comp_ctx_lengths_prefill": comp_ctx_lengths_prefill, - "comp_ctx_lengths_decode": comp_ctx_lengths_decode, - "ctx_len": ctx_len, + "ccl_enabled": True, }, ) tokenizer = transformers.AutoTokenizer.from_pretrained(model_id) @@ -59,6 +66,8 @@ aic_enable_depth_first=True, skip_vision=True, mos=1, + comp_ctx_lengths_prefill=comp_ctx_lengths_prefill, + comp_ctx_lengths_decode=comp_ctx_lengths_decode, ) messages = [ @@ -103,6 +112,8 @@ mxint8_kv_cache=True, aic_enable_depth_first=True, mos=1, + comp_ctx_lengths_prefill=comp_ctx_lengths_prefill, + comp_ctx_lengths_decode=comp_ctx_lengths_decode, ) ### IMAGE + TEXT ### diff --git a/examples/performance/compute_context_length/qwen2_5_vl_cb.py b/examples/performance/compute_context_length/qwen2_5_vl_cb.py index 6954d356f..fc330e14e 100644 --- a/examples/performance/compute_context_length/qwen2_5_vl_cb.py +++ b/examples/performance/compute_context_length/qwen2_5_vl_cb.py @@ -16,7 +16,16 @@ ## For AWQ model update pytorch version to 2.8.* model_id = "Qwen/Qwen2.5-VL-32B-Instruct" config = AutoConfig.from_pretrained(model_id) -# config.text_config.num_hidden_layers = 2 +# For Testing Purpose Only +config.text_config.num_hidden_layers = 4 + +## Activate Compute-Context-Length (CCL) feature by setting ccl_enabled=True when loading the model with from_pretrained(). +## Use the optional comp_ctx_lengths argument to provide two lists of context lengths for the prefilling and decoding processes. If comp_ctx_lengths=None, the model will run with its default context length. +## - The first list, comp_ctx_lengths_prefill, defines the compute-context-length values for the prefilling process. +## -- The process starts with the first value in the list and gradually increases the context length based on the position_id of the current prompt chunk. +## - The second list, comp_ctx_lengths_decode, defines the compute-context-length values for the decoding process. +## -- During decoding, the model selects an appropriate context length from the list based on the input prompt length and cache index. +## -- It starts from the correct value in the list and increases the context length dynamically when the cache index exceeds the current threshold. ctx_len = 8192 comp_ctx_lengths_prefill = [4096] @@ -29,9 +38,7 @@ config=config, continuous_batching=True, qaic_config={ - "comp_ctx_lengths_prefill": comp_ctx_lengths_prefill, - "comp_ctx_lengths_decode": comp_ctx_lengths_decode, - "ctx_len": ctx_len, + "ccl_enabled": True, }, ) tokenizer = transformers.AutoTokenizer.from_pretrained(model_id) @@ -52,6 +59,8 @@ mxint8_kv_cache=True, aic_enable_depth_first=True, mos=1, + comp_ctx_lengths_prefill=comp_ctx_lengths_prefill, + comp_ctx_lengths_decode=comp_ctx_lengths_decode, ) image_urls = [ @@ -75,6 +84,7 @@ processor=processor, images=image_urls, generation_len=100, + device_ids=[0, 1, 2, 3], ) print(output.generated_ids) print(tokenizer.batch_decode(output.generated_ids)) diff --git a/examples/performance/compute_context_length/qwen3moe.py b/examples/performance/compute_context_length/qwen3moe.py new file mode 100644 index 000000000..b53a28362 --- /dev/null +++ b/examples/performance/compute_context_length/qwen3moe.py @@ -0,0 +1,54 @@ +# ----------------------------------------------------------------------------- +# +# Copyright (c) Qualcomm Technologies, Inc. and/or its subsidiaries. +# SPDX-License-Identifier: BSD-3-Clause +# +# ----------------------------------------------------------------------------- + +from transformers import AutoTokenizer + +from QEfficient import QEFFAutoModelForCausalLM +from QEfficient.utils.constants import Constants + +model_name = "Qwen/Qwen3-30B-A3B-Instruct-2507" +""" +# For CB inference, set continuous_batching to True and add full_batch_size,mxfp6,mxint8 argument in compile function +# We will use prompt_len=1 for compilation for both cb and non-cb inference +""" + +## Activate Compute-Context-Length (CCL) feature by setting ccl_enabled=True when loading the model with from_pretrained(). +## Use the optional comp_ctx_lengths argument to provide two lists of context lengths for the prefilling and decoding processes. If comp_ctx_lengths=None, the model will run with its default context length. +## - The first list, comp_ctx_lengths_prefill, defines the compute-context-length values for the prefilling process. +## -- The process starts with the first value in the list and gradually increases the context length based on the position_id of the current prompt chunk. +## - The second list, comp_ctx_lengths_decode, defines the compute-context-length values for the decoding process. +## -- During decoding, the model selects an appropriate context length from the list based on the input prompt length and cache index. +## -- It starts from the correct value in the list and increases the context length dynamically when the cache index exceeds the current threshold. + +ctx_len = 1024 +prefill_seq_len = 1 +# In moe models when compiling with prefill_seq_len=1 and non-continuous-batching mode, prefill and decode will share the same ccl specializations. +comp_ctx_lengths_prefill = comp_ctx_lengths_decode = [256, 512, ctx_len] + +model = QEFFAutoModelForCausalLM.from_pretrained( + model_name, + continuous_batching=False, + qaic_config={ + "ccl_enabled": True, + }, +) + +model.compile( + prefill_seq_len=prefill_seq_len, + ctx_len=ctx_len, + batch_size=1, + num_cores=16, + num_devices=4, + mxfp6_matmul=True, + mxint8_kv_cache=True, + mos=1, + comp_ctx_lengths_prefill=comp_ctx_lengths_prefill, + comp_ctx_lengths_decode=comp_ctx_lengths_decode, +) + +tokenizer = AutoTokenizer.from_pretrained(model_name) +exec_info = model.generate(prompts=Constants.INPUT_STR, tokenizer=tokenizer) diff --git a/examples/performance/compute_context_length/qwen3moe_example/ccl_qwen3moe_inference.py b/examples/performance/compute_context_length/qwen3moe_example/ccl_qwen3moe_inference.py index d2fa208df..9fb4c4d43 100644 --- a/examples/performance/compute_context_length/qwen3moe_example/ccl_qwen3moe_inference.py +++ b/examples/performance/compute_context_length/qwen3moe_example/ccl_qwen3moe_inference.py @@ -16,21 +16,25 @@ # We will use prompt_len=1 for compilation for both cb and non-cb inference """ +## Activate Compute-Context-Length (CCL) feature by setting ccl_enabled=True when loading the model with from_pretrained(). +## Use the optional comp_ctx_lengths argument to provide two lists of context lengths for the prefilling and decoding processes. If comp_ctx_lengths=None, the model will run with its default context length. +## - The first list, comp_ctx_lengths_prefill, defines the compute-context-length values for the prefilling process. +## -- The process starts with the first value in the list and gradually increases the context length based on the position_id of the current prompt chunk. +## - The second list, comp_ctx_lengths_decode, defines the compute-context-length values for the decoding process. +## -- During decoding, the model selects an appropriate context length from the list based on the input prompt length and cache index. +## -- It starts from the correct value in the list and increases the context length dynamically when the cache index exceeds the current threshold. + ctx_len = 1024 prefill_seq_len = 1 -# In moe models when compiling with prefill_seq_len=1 and non-continuous-batching mode, prefill and decode will share the same specializations. -comp_ctx_lengths_prefill = [256, 512, ctx_len] -comp_ctx_lengths_decode = [256, 512, ctx_len] +# In moe models when compiling with prefill_seq_len=1 and non-continuous-batching mode, prefill and decode will share the same ccl specializations. +comp_ctx_lengths_prefill = [256, 512, ctx_len] # None # +comp_ctx_lengths_decode = [256, 512, ctx_len] # None # model = QEFFAutoModelForCausalLM.from_pretrained( model_name, continuous_batching=False, - qaic_config={ - "comp_ctx_lengths_prefill": comp_ctx_lengths_prefill, - "comp_ctx_lengths_decode": comp_ctx_lengths_decode, - "ctx_len": ctx_len, - "prefill_seq_len": prefill_seq_len, - }, + ccl_enabled=True, + num_hidden_layers=4, ) model.compile( @@ -42,6 +46,8 @@ mxfp6_matmul=True, mxint8_kv_cache=True, mos=1, + comp_ctx_lengths_prefill=comp_ctx_lengths_prefill, + comp_ctx_lengths_decode=comp_ctx_lengths_decode, ) # mos=1, tokenizer = AutoTokenizer.from_pretrained(model_name) diff --git a/examples/performance/compute_context_length/vlm_inference.py b/examples/performance/compute_context_length/vlm_inference.py index 0920ddf30..876daa3e6 100644 --- a/examples/performance/compute_context_length/vlm_inference.py +++ b/examples/performance/compute_context_length/vlm_inference.py @@ -30,6 +30,7 @@ def run_model( kv_offload=True, prefill_seq_len=32, ctx_len=8192, + ccl_enabled=False, comp_ctx_lengths_prefill=None, comp_ctx_lengths_decode=None, generation_len=128, @@ -77,9 +78,7 @@ def run_model( attn_implementation="eager", kv_offload=kv_offload, qaic_config={ - "comp_ctx_lengths_prefill": comp_ctx_lengths_prefill, - "comp_ctx_lengths_decode": comp_ctx_lengths_decode, - "ctx_len": ctx_len, + "ccl_enabled": ccl_enabled, }, ) @@ -93,6 +92,8 @@ def run_model( num_cores=num_cores, num_devices=num_devices, mxfp6_matmul=False, + comp_ctx_lengths_prefill=comp_ctx_lengths_prefill, + comp_ctx_lengths_decode=comp_ctx_lengths_decode, ) print(f"Model compiled successfully to: {qpc_path}") @@ -177,6 +178,11 @@ def main(): default=8192, help="Maximum context length", ) + parser.add_argument( + "--ccl-enabled", + action="store_true", + help="Enable compute-context-length (CCL) feature", + ) parser.add_argument( "--comp-ctx-lengths-prefill", type=lambda x: [int(i) for i in x.split(",")], @@ -198,7 +204,7 @@ def main(): parser.add_argument( "--img-size", type=int, - default=336, + default=560, help="Image size for processing", ) parser.add_argument( @@ -223,6 +229,7 @@ def main(): kv_offload=args.kv_offload, prefill_seq_len=args.prefill_seq_len, ctx_len=args.ctx_len, + ccl_enabled=args.ccl_enabled, comp_ctx_lengths_prefill=args.comp_ctx_lengths_prefill, comp_ctx_lengths_decode=args.comp_ctx_lengths_decode, generation_len=args.generation_len, From 1a8ec9dff6f5c509fb9297cdd9c1519305160bf5 Mon Sep 17 00:00:00 2001 From: Amit Raj Date: Mon, 22 Dec 2025 08:22:41 +0000 Subject: [PATCH 44/60] Diffusers support (#604) This pull request introduces **Diffusers architecture support** to the **Efficient Transformers** framework, enabling seamless integration of diffusion models. 1. **Support of model [black-forest-labs/FLUX1-schnell](https://huggingface.co/black-forest-labs/FLUX.1-schnell)** 2. **Flexible Configuration** - Supports JSON-based configuration files for easy compilation and execution. 3. **Performance Benchmarking** - Implements a performance matrix for Diffusers models to enable benchmarking for each modules. 4. **Testing Framework** - Includes initial test scripts for Diffusers (In progress). 5. **Support of onnx subfunction graph using flag use_onnx_function** 6. **Support parallel compilation of modules using flag `parallel_compile`** --------- Signed-off-by: Amit Raj Signed-off-by: Dhiraj Kumar Sah --- QEfficient/__init__.py | 2 + QEfficient/base/modeling_qeff.py | 55 +- QEfficient/diffusers/README.md | 95 ++ QEfficient/diffusers/__init__.py | 6 + QEfficient/diffusers/models/__init__.py | 6 + QEfficient/diffusers/models/normalization.py | 40 + .../diffusers/models/pytorch_transforms.py | 56 ++ .../diffusers/models/transformers/__init__.py | 6 + .../models/transformers/transformer_flux.py | 327 +++++++ QEfficient/diffusers/pipelines/__init__.py | 6 + .../pipelines/configs/flux_config.json | 99 ++ .../diffusers/pipelines/flux/__init__.py | 6 + .../diffusers/pipelines/flux/pipeline_flux.py | 854 ++++++++++++++++++ .../diffusers/pipelines/pipeline_module.py | 481 ++++++++++ .../diffusers/pipelines/pipeline_utils.py | 218 +++++ .../transformers/models/modeling_auto.py | 90 -- .../transformers/models/pytorch_transforms.py | 16 + QEfficient/transformers/models/t5/__init__.py | 6 + .../transformers/models/t5/modeling_t5.py | 145 +++ QEfficient/utils/__init__.py | 1 - QEfficient/utils/_utils.py | 54 +- QEfficient/utils/constants.py | 7 + QEfficient/utils/export_utils.py | 235 +++++ QEfficient/utils/hash_utils.py | 3 +- docs/image/girl_laughing.png | Bin 0 -> 430404 bytes examples/diffusers/flux/README.md | 243 +++++ examples/diffusers/flux/flux_1_schnell.py | 45 + .../diffusers/flux/flux_1_shnell_custom.py | 113 +++ examples/diffusers/flux/flux_config.json | 99 ++ pyproject.toml | 3 +- scripts/Jenkinsfile | 7 +- tests/base/test_export_memory_offload.py | 2 +- tests/diffusers/diffusers_utils.py | 175 ++++ tests/diffusers/flux_test_config.json | 123 +++ tests/diffusers/test_flux.py | 448 +++++++++ tests/transformers/test_causal_lm.py | 2 +- tests/transformers/test_speech_seq2seq.py | 2 +- tests/utils/test_hash_utils.py | 2 +- 38 files changed, 3899 insertions(+), 179 deletions(-) create mode 100644 QEfficient/diffusers/README.md create mode 100644 QEfficient/diffusers/__init__.py create mode 100644 QEfficient/diffusers/models/__init__.py create mode 100644 QEfficient/diffusers/models/normalization.py create mode 100644 QEfficient/diffusers/models/pytorch_transforms.py create mode 100644 QEfficient/diffusers/models/transformers/__init__.py create mode 100644 QEfficient/diffusers/models/transformers/transformer_flux.py create mode 100644 QEfficient/diffusers/pipelines/__init__.py create mode 100644 QEfficient/diffusers/pipelines/configs/flux_config.json create mode 100644 QEfficient/diffusers/pipelines/flux/__init__.py create mode 100644 QEfficient/diffusers/pipelines/flux/pipeline_flux.py create mode 100644 QEfficient/diffusers/pipelines/pipeline_module.py create mode 100644 QEfficient/diffusers/pipelines/pipeline_utils.py create mode 100644 QEfficient/transformers/models/t5/__init__.py create mode 100644 QEfficient/transformers/models/t5/modeling_t5.py create mode 100644 QEfficient/utils/export_utils.py create mode 100644 docs/image/girl_laughing.png create mode 100644 examples/diffusers/flux/README.md create mode 100644 examples/diffusers/flux/flux_1_schnell.py create mode 100644 examples/diffusers/flux/flux_1_shnell_custom.py create mode 100644 examples/diffusers/flux/flux_config.json create mode 100644 tests/diffusers/diffusers_utils.py create mode 100644 tests/diffusers/flux_test_config.json create mode 100644 tests/diffusers/test_flux.py diff --git a/QEfficient/__init__.py b/QEfficient/__init__.py index 7f63b34ca..2d8f72e0a 100644 --- a/QEfficient/__init__.py +++ b/QEfficient/__init__.py @@ -18,6 +18,7 @@ QEFFCommonLoader, ) from QEfficient.compile.compile_helper import compile +from QEfficient.diffusers.pipelines.flux.pipeline_flux import QEffFluxPipeline from QEfficient.exporter.export_hf_to_cloud_ai_100 import qualcomm_efficient_converter from QEfficient.generation.text_generation_inference import cloud_ai_100_exec_kv from QEfficient.peft import QEffAutoPeftModelForCausalLM @@ -39,6 +40,7 @@ "QEFFAutoModelForImageTextToText", "QEFFAutoModelForSpeechSeq2Seq", "QEFFCommonLoader", + "QEffFluxPipeline", ] # For faster downloads via hf_transfer # This code is put above import statements as this needs to be executed before diff --git a/QEfficient/base/modeling_qeff.py b/QEfficient/base/modeling_qeff.py index ef7e83adf..ea347016b 100644 --- a/QEfficient/base/modeling_qeff.py +++ b/QEfficient/base/modeling_qeff.py @@ -8,7 +8,6 @@ import gc import inspect import logging -import re import shutil import subprocess import warnings @@ -21,26 +20,21 @@ from QEfficient.base.onnx_transforms import ( BaseOnnxTransform, - CustomOpTransform, OnnxTransformPipeline, - RenameFunctionOutputsTransform, ) from QEfficient.base.pytorch_transforms import PytorchTransform from QEfficient.compile.qnn_compiler import compile as qnn_compile from QEfficient.generation.cloud_infer import QAICInferenceSession -from QEfficient.transformers.cache_utils import InvalidIndexProvider -from QEfficient.transformers.models.pytorch_transforms import get_decoder_layer_classes_for_export from QEfficient.utils import ( constants, create_json, create_model_params, dump_qconfig, - export_wrapper, generate_mdp_partition_config, hash_dict_params, load_json, ) -from QEfficient.utils.torch_patches import apply_torch_patches, undo_torch_patches +from QEfficient.utils.export_utils import export_wrapper logger = logging.getLogger(__name__) @@ -125,9 +119,35 @@ def _model_offloaded_check(self) -> None: logger.error(error_msg) raise RuntimeError(error_msg) + @property + def model_name(self) -> str: + """ + Get the model class name without QEff/QEFF prefix. + + This property extracts the underlying model's class name and removes + any QEff or QEFF prefix that may have been added during wrapping. + + Returns: + str: Model class name (e.g., "CLIPTextModel" instead of "QEffCLIPTextModel") + """ + mname = self.model.__class__.__name__ + if mname.startswith("QEff") or mname.startswith("QEFF"): + mname = mname[4:] + return mname + @property @abstractmethod - def model_name(self) -> str: ... + def get_model_config(self) -> Dict: + """ + Get the model configuration as a dictionary. + + This is an abstract property that must be implemented by all subclasses. + Typically returns: self.model.config.__dict__ + + Returns: + Dict: The configuration dictionary of the underlying model + """ + pass @abstractmethod def export(self, export_dir: Optional[str] = None) -> Path: @@ -188,7 +208,6 @@ def _export( onnx_transform_kwargs: Optional[Dict[str, any]] = None, export_dir: Optional[str] = None, offload_pt_weights: bool = True, - use_onnx_subfunctions: bool = False, ) -> str: """ Export the PyTorch model to ONNX and apply ONNX transforms @@ -253,18 +272,8 @@ def _export( input_names.append(param) try: - # Initialize the registry with your custom ops + # Export to ONNX export_kwargs = {} if export_kwargs is None else export_kwargs - if use_onnx_subfunctions: - warnings.warn( - "The subfunction feature is experimental. Please note that using compile consecutively with and without subfunction may produce inconsistent results." - ) - apply_torch_patches() - InvalidIndexProvider.SUBFUNC_ENABLED = True - output_names = [re.sub("_RetainedState", "_InternalRetainedState", s) for s in output_names] - export_kwargs["export_modules_as_functions"] = get_decoder_layer_classes_for_export(self.model) - self._onnx_transforms.append(RenameFunctionOutputsTransform) - self._onnx_transforms.append(CustomOpTransform) torch.onnx.export( self.model, @@ -309,12 +318,6 @@ def _export( finally: shutil.rmtree(tmp_onnx_dir, ignore_errors=True) - if use_onnx_subfunctions: - undo_torch_patches() - InvalidIndexProvider.SUBFUNC_ENABLED = False - self._onnx_transforms.remove(CustomOpTransform) - self._onnx_transforms.remove(RenameFunctionOutputsTransform) - self.onnx_path = onnx_path return onnx_path diff --git a/QEfficient/diffusers/README.md b/QEfficient/diffusers/README.md new file mode 100644 index 000000000..40d45e984 --- /dev/null +++ b/QEfficient/diffusers/README.md @@ -0,0 +1,95 @@ + +

+ +--- + +## ✨ Overview + +QEfficient Diffusers brings the power of state-of-the-art diffusion models to Qualcomm Cloud AI 100 hardware for text-to-image generation. Built on top of the popular HuggingFace Diffusers library, our optimized pipeline provides seamless inference on Qualcomm Cloud AI 100 hardware. + +## šŸ› ļø Installation + +### Prerequisites + +Ensure you have Python 3.8+ and the required dependencies: + +```bash +# Create Python virtual environment (Recommended Python 3.10) +sudo apt install python3.10-venv +python3.10 -m venv qeff_env +source qeff_env/bin/activate +pip install -U pip +``` + +### Install QEfficient + +```bash +# Install from GitHub (includes diffusers support) +pip install git+https://github.com/quic/efficient-transformers + +# Or build from source +git clone https://github.com/quic/efficient-transformers.git +cd efficient-transformers +pip install build wheel +python -m build --wheel --outdir dist +pip install dist/qefficient-0.0.1.dev0-py3-none-any.whl +``` + +--- + +## šŸŽÆ Supported Models +- āœ… [`black-forest-labs/FLUX.1-schnell`](https://huggingface.co/black-forest-labs/FLUX.1-schnell) + +--- + + +## šŸ“š Examples + +Check out our comprehensive examples in the [`examples/diffusers/`](../../examples/diffusers/) directory: + +--- + +## šŸ¤ Contributing + +We welcome contributions! Please see our [Contributing Guide](../../CONTRIBUTING.md) for details. + + + +--- + +## šŸ™ Acknowledgments + +- **HuggingFace Diffusers**: For the excellent foundation library +- **Stability AI**: For the amazing Stable Diffusion models +--- + +## šŸ“ž Support + +- šŸ“– **Documentation**: [https://quic.github.io/efficient-transformers/](https://quic.github.io/efficient-transformers/) +- šŸ› **Issues**: [GitHub Issues](https://github.com/quic/efficient-transformers/issues) + +--- + diff --git a/QEfficient/diffusers/__init__.py b/QEfficient/diffusers/__init__.py new file mode 100644 index 000000000..75daf1953 --- /dev/null +++ b/QEfficient/diffusers/__init__.py @@ -0,0 +1,6 @@ +# ----------------------------------------------------------------------------- +# +# Copyright (c) Qualcomm Technologies, Inc. and/or its subsidiaries. +# SPDX-License-Identifier: BSD-3-Clause +# +# ---------------------------------------------------------------------------- diff --git a/QEfficient/diffusers/models/__init__.py b/QEfficient/diffusers/models/__init__.py new file mode 100644 index 000000000..75daf1953 --- /dev/null +++ b/QEfficient/diffusers/models/__init__.py @@ -0,0 +1,6 @@ +# ----------------------------------------------------------------------------- +# +# Copyright (c) Qualcomm Technologies, Inc. and/or its subsidiaries. +# SPDX-License-Identifier: BSD-3-Clause +# +# ---------------------------------------------------------------------------- diff --git a/QEfficient/diffusers/models/normalization.py b/QEfficient/diffusers/models/normalization.py new file mode 100644 index 000000000..933832ed8 --- /dev/null +++ b/QEfficient/diffusers/models/normalization.py @@ -0,0 +1,40 @@ +# ----------------------------------------------------------------------------- +# +# Copyright (c) Qualcomm Technologies, Inc. and/or its subsidiaries. +# SPDX-License-Identifier: BSD-3-Clause +# +# ---------------------------------------------------------------------------- +from typing import Optional, Tuple + +import torch +from diffusers.models.normalization import AdaLayerNormContinuous, AdaLayerNormZero, AdaLayerNormZeroSingle + + +class QEffAdaLayerNormZero(AdaLayerNormZero): + def forward( + self, + x: torch.Tensor, + shift_msa: Optional[torch.Tensor] = None, + scale_msa: Optional[torch.Tensor] = None, + ) -> Tuple[torch.Tensor, torch.Tensor, torch.Tensor, torch.Tensor, torch.Tensor]: + x = self.norm(x) * (1 + scale_msa[:, None]) + shift_msa[:, None] + return x + + +class QEffAdaLayerNormZeroSingle(AdaLayerNormZeroSingle): + def forward( + self, + x: torch.Tensor, + scale_msa: Optional[torch.Tensor] = None, + shift_msa: Optional[torch.Tensor] = None, + ) -> Tuple[torch.Tensor, torch.Tensor, torch.Tensor, torch.Tensor, torch.Tensor]: + x = self.norm(x) * (1 + scale_msa[:, None]) + shift_msa[:, None] + return x + + +class QEffAdaLayerNormContinuous(AdaLayerNormContinuous): + def forward(self, x: torch.Tensor, conditioning_embedding: torch.Tensor) -> torch.Tensor: + emb = conditioning_embedding + scale, shift = torch.chunk(emb, 2, dim=1) + x = self.norm(x) * (1 + scale)[:, None, :] + shift[:, None, :] + return x diff --git a/QEfficient/diffusers/models/pytorch_transforms.py b/QEfficient/diffusers/models/pytorch_transforms.py new file mode 100644 index 000000000..d3c84ee63 --- /dev/null +++ b/QEfficient/diffusers/models/pytorch_transforms.py @@ -0,0 +1,56 @@ +# ----------------------------------------------------------------------------- +# +# Copyright (c) Qualcomm Technologies, Inc. and/or its subsidiaries. +# SPDX-License-Identifier: BSD-3-Clause +# +# ----------------------------------------------------------------------------- + +from diffusers.models.normalization import AdaLayerNormContinuous, AdaLayerNormZero, AdaLayerNormZeroSingle, RMSNorm +from diffusers.models.transformers.transformer_flux import ( + FluxAttention, + FluxAttnProcessor, + FluxSingleTransformerBlock, + FluxTransformer2DModel, + FluxTransformerBlock, +) +from torch import nn + +from QEfficient.base.pytorch_transforms import ModuleMappingTransform +from QEfficient.customop.rms_norm import CustomRMSNormAIC +from QEfficient.diffusers.models.normalization import ( + QEffAdaLayerNormContinuous, + QEffAdaLayerNormZero, + QEffAdaLayerNormZeroSingle, +) +from QEfficient.diffusers.models.transformers.transformer_flux import ( + QEffFluxAttention, + QEffFluxAttnProcessor, + QEffFluxSingleTransformerBlock, + QEffFluxTransformer2DModel, + QEffFluxTransformerBlock, +) + + +class CustomOpsTransform(ModuleMappingTransform): + _module_mapping = { + RMSNorm: CustomRMSNormAIC, + nn.RMSNorm: CustomRMSNormAIC, # for torch.nn.RMSNorm + } + + +class AttentionTransform(ModuleMappingTransform): + _module_mapping = { + FluxSingleTransformerBlock: QEffFluxSingleTransformerBlock, + FluxTransformerBlock: QEffFluxTransformerBlock, + FluxTransformer2DModel: QEffFluxTransformer2DModel, + FluxAttention: QEffFluxAttention, + FluxAttnProcessor: QEffFluxAttnProcessor, + } + + +class NormalizationTransform(ModuleMappingTransform): + _module_mapping = { + AdaLayerNormZero: QEffAdaLayerNormZero, + AdaLayerNormZeroSingle: QEffAdaLayerNormZeroSingle, + AdaLayerNormContinuous: QEffAdaLayerNormContinuous, + } diff --git a/QEfficient/diffusers/models/transformers/__init__.py b/QEfficient/diffusers/models/transformers/__init__.py new file mode 100644 index 000000000..75daf1953 --- /dev/null +++ b/QEfficient/diffusers/models/transformers/__init__.py @@ -0,0 +1,6 @@ +# ----------------------------------------------------------------------------- +# +# Copyright (c) Qualcomm Technologies, Inc. and/or its subsidiaries. +# SPDX-License-Identifier: BSD-3-Clause +# +# ---------------------------------------------------------------------------- diff --git a/QEfficient/diffusers/models/transformers/transformer_flux.py b/QEfficient/diffusers/models/transformers/transformer_flux.py new file mode 100644 index 000000000..5cb44af45 --- /dev/null +++ b/QEfficient/diffusers/models/transformers/transformer_flux.py @@ -0,0 +1,327 @@ +# ----------------------------------------------------------------------------- +# +# Copyright (c) Qualcomm Technologies, Inc. and/or its subsidiaries. +# SPDX-License-Identifier: BSD-3-Clause +# +# ---------------------------------------------------------------------------- +from typing import Any, Dict, Optional, Tuple, Union + +import numpy as np +import torch +from diffusers.models.attention_dispatch import dispatch_attention_fn +from diffusers.models.modeling_outputs import Transformer2DModelOutput +from diffusers.models.transformers.transformer_flux import ( + FluxAttention, + FluxAttnProcessor, + FluxSingleTransformerBlock, + FluxTransformer2DModel, + FluxTransformerBlock, + _get_qkv_projections, +) + +from QEfficient.utils.logging_utils import logger + + +def qeff_apply_rotary_emb( + x: torch.Tensor, freqs_cis: Union[torch.Tensor, Tuple[torch.Tensor]] +) -> Tuple[torch.Tensor, torch.Tensor]: + """ + Apply rotary embeddings to input tensors using the given frequency tensor. This function applies rotary embeddings + to the given query or key 'x' tensors using the provided frequency tensor 'freqs_cis'. The input tensors are + reshaped as complex numbers, and the frequency tensor is reshaped for broadcasting compatibility. The resulting + tensors contain rotary embeddings and are returned as real tensors. + + Args: + x (`torch.Tensor`): + Query or key tensor to apply rotary embeddings. [B, H, S, D] xk (torch.Tensor): Key tensor to apply + freqs_cis (`Tuple[torch.Tensor]`): Precomputed frequency tensor for complex exponentials. ([S, D], [S, D],) + + Returns: + Tuple[torch.Tensor, torch.Tensor]: Tuple of modified query tensor and key tensor with rotary embeddings. + """ + cos, sin = freqs_cis # [S, D] + cos = cos[None, :, None, :] + sin = sin[None, :, None, :] + cos, sin = cos.to(x.device), sin.to(x.device) + B, S, H, D = x.shape + x_real, x_imag = x.reshape(B, -1, H, D // 2, 2).unbind(-1) + x_rotated = torch.stack([-x_imag, x_real], dim=-1).flatten(3) + out = (x.float() * cos + x_rotated.float() * sin).to(x.dtype) + return out + + +class QEffFluxAttnProcessor(FluxAttnProcessor): + _attention_backend = None + _parallel_config = None + + def __call__( + self, + attn: "QEffFluxAttention", + hidden_states: torch.Tensor, + encoder_hidden_states: torch.Tensor = None, + attention_mask: Optional[torch.Tensor] = None, + image_rotary_emb: Optional[torch.Tensor] = None, + ) -> torch.Tensor: + query, key, value, encoder_query, encoder_key, encoder_value = _get_qkv_projections( + attn, hidden_states, encoder_hidden_states + ) + + query = query.unflatten(-1, (attn.heads, -1)) + key = key.unflatten(-1, (attn.heads, -1)) + value = value.unflatten(-1, (attn.heads, -1)) + + query = attn.norm_q(query) + key = attn.norm_k(key) + + if attn.added_kv_proj_dim is not None: + encoder_query = encoder_query.unflatten(-1, (attn.heads, -1)) + encoder_key = encoder_key.unflatten(-1, (attn.heads, -1)) + encoder_value = encoder_value.unflatten(-1, (attn.heads, -1)) + + encoder_query = attn.norm_added_q(encoder_query) + encoder_key = attn.norm_added_k(encoder_key) + + query = torch.cat([encoder_query, query], dim=1) + key = torch.cat([encoder_key, key], dim=1) + value = torch.cat([encoder_value, value], dim=1) + + if image_rotary_emb is not None: + query = qeff_apply_rotary_emb(query, image_rotary_emb) + key = qeff_apply_rotary_emb(key, image_rotary_emb) + + hidden_states = dispatch_attention_fn( + query, key, value, attn_mask=attention_mask, backend=self._attention_backend + ) + hidden_states = hidden_states.flatten(2, 3) + hidden_states = hidden_states.to(query.dtype) + + if encoder_hidden_states is not None: + encoder_hidden_states, hidden_states = hidden_states.split_with_sizes( + [encoder_hidden_states.shape[1], hidden_states.shape[1] - encoder_hidden_states.shape[1]], dim=1 + ) + hidden_states = attn.to_out[0](hidden_states) + hidden_states = attn.to_out[1](hidden_states) + encoder_hidden_states = attn.to_add_out(encoder_hidden_states) + + return hidden_states, encoder_hidden_states + else: + return hidden_states + + +class QEffFluxAttention(FluxAttention): + def __qeff_init__(self): + processor = QEffFluxAttnProcessor() + self.processor = processor + + +class QEffFluxSingleTransformerBlock(FluxSingleTransformerBlock): + def forward( + self, + hidden_states: torch.Tensor, + encoder_hidden_states: torch.Tensor, + temb: torch.Tensor, + image_rotary_emb: Optional[Tuple[torch.Tensor, torch.Tensor]] = None, + joint_attention_kwargs: Optional[Dict[str, Any]] = None, + ) -> Tuple[torch.Tensor, torch.Tensor]: + text_seq_len = encoder_hidden_states.shape[1] + hidden_states = torch.cat([encoder_hidden_states, hidden_states], dim=1) + shift_msa, scale_msa, gate = torch.split(temb, 1) + residual = hidden_states + norm_hidden_states = self.norm(hidden_states, scale_msa, shift_msa) + mlp_hidden_states = self.act_mlp(self.proj_mlp(norm_hidden_states)) + joint_attention_kwargs = joint_attention_kwargs or {} + attn_output = self.attn( + hidden_states=norm_hidden_states, + image_rotary_emb=image_rotary_emb, + **joint_attention_kwargs, + ) + hidden_states = torch.cat([attn_output, mlp_hidden_states], dim=2) + gate = gate.unsqueeze(1) + hidden_states = gate * self.proj_out(hidden_states) + hidden_states = residual + hidden_states + # if hidden_states.dtype == torch.float16: + hidden_states = hidden_states.clip(torch.finfo(torch.float32).min, torch.finfo(torch.float32).max) + + encoder_hidden_states, hidden_states = hidden_states[:, :text_seq_len], hidden_states[:, text_seq_len:] + return encoder_hidden_states, hidden_states + + +class QEffFluxTransformerBlock(FluxTransformerBlock): + def forward( + self, + hidden_states: torch.Tensor, + encoder_hidden_states: torch.Tensor, + temb: torch.Tensor, + image_rotary_emb: Optional[Tuple[torch.Tensor, torch.Tensor]] = None, + joint_attention_kwargs: Optional[Dict[str, Any]] = None, + ) -> Tuple[torch.Tensor, torch.Tensor]: + temb1 = tuple(torch.split(temb[:6], 1)) + temb2 = tuple(torch.split(temb[6:], 1)) + norm_hidden_states = self.norm1(hidden_states, shift_msa=temb1[0], scale_msa=temb1[1]) + gate_msa, shift_mlp, scale_mlp, gate_mlp = temb1[-4:] + + norm_encoder_hidden_states = self.norm1_context(encoder_hidden_states, shift_msa=temb2[0], scale_msa=temb2[1]) + + c_gate_msa, c_shift_mlp, c_scale_mlp, c_gate_mlp = temb2[-4:] + + joint_attention_kwargs = joint_attention_kwargs or {} + + # Attention. + attention_outputs = self.attn( + hidden_states=norm_hidden_states, + encoder_hidden_states=norm_encoder_hidden_states, + image_rotary_emb=image_rotary_emb, + **joint_attention_kwargs, + ) + + if len(attention_outputs) == 2: + attn_output, context_attn_output = attention_outputs + elif len(attention_outputs) == 3: + attn_output, context_attn_output, ip_attn_output = attention_outputs + + # Process attention outputs for the `hidden_states`. + attn_output = gate_msa.unsqueeze(1) * attn_output + hidden_states = hidden_states + attn_output + + norm_hidden_states = self.norm2(hidden_states) + norm_hidden_states = norm_hidden_states * (1 + scale_mlp[:, None]) + shift_mlp[:, None] + + ff_output = self.ff(norm_hidden_states) + ff_output = gate_mlp.unsqueeze(1) * ff_output + + hidden_states = hidden_states + ff_output + if len(attention_outputs) == 3: + hidden_states = hidden_states + ip_attn_output + + # Process attention outputs for the `encoder_hidden_states`. + context_attn_output = c_gate_msa.unsqueeze(1) * context_attn_output + encoder_hidden_states = encoder_hidden_states + context_attn_output + + norm_encoder_hidden_states = self.norm2_context(encoder_hidden_states) + norm_encoder_hidden_states = norm_encoder_hidden_states * (1 + c_scale_mlp[:, None]) + c_shift_mlp[:, None] + + context_ff_output = self.ff_context(norm_encoder_hidden_states) + encoder_hidden_states = encoder_hidden_states + c_gate_mlp.unsqueeze(1) * context_ff_output + # if encoder_hidden_states.dtype == torch.float16: + encoder_hidden_states = encoder_hidden_states.clip(-65504, 65504) + + return encoder_hidden_states, hidden_states + + +class QEffFluxTransformer2DModel(FluxTransformer2DModel): + def forward( + self, + hidden_states: torch.Tensor, + encoder_hidden_states: torch.Tensor = None, + pooled_projections: torch.Tensor = None, + timestep: torch.LongTensor = None, + img_ids: torch.Tensor = None, + txt_ids: torch.Tensor = None, + adaln_emb: torch.Tensor = None, + adaln_single_emb: torch.Tensor = None, + adaln_out: torch.Tensor = None, + guidance: torch.Tensor = None, + joint_attention_kwargs: Optional[Dict[str, Any]] = None, + controlnet_block_samples=None, + controlnet_single_block_samples=None, + return_dict: bool = True, + controlnet_blocks_repeat: bool = False, + ) -> Union[torch.Tensor, Transformer2DModelOutput]: + """ + The [`FluxTransformer2DModel`] forward method. + + Args: + hidden_states (`torch.Tensor` of shape `(batch_size, image_sequence_length, in_channels)`): + Input `hidden_states`. + encoder_hidden_states (`torch.Tensor` of shape `(batch_size, text_sequence_length, joint_attention_dim)`): + Conditional embeddings (embeddings computed from the input conditions such as prompts) to use. + pooled_projections (`torch.Tensor` of shape `(batch_size, projection_dim)`): Embeddings projected + from the embeddings of input conditions. + timestep ( `torch.LongTensor`): + Used to indicate denoising step. + block_controlnet_hidden_states: (`list` of `torch.Tensor`): + A list of tensors that if specified are added to the residuals of transformer blocks. + joint_attention_kwargs (`dict`, *optional*): + A kwargs dictionary that if specified is passed along to the `AttentionProcessor` as defined under + `self.processor` in + [diffusers.models.attention_processor](https://github.com/huggingface/diffusers/blob/main/src/diffusers/models/attention_processor.py). + return_dict (`bool`, *optional*, defaults to `True`): + Whether or not to return a [`~models.transformer_2d.Transformer2DModelOutput`] instead of a plain + tuple. + Returns: + If `return_dict` is True, an [`~models.transformer_2d.Transformer2DModelOutput`] is returned, otherwise a + `tuple` where the first element is the sample tensor. + """ + + hidden_states = self.x_embedder(hidden_states) + + timestep = timestep.to(hidden_states.dtype) * 1000 + if guidance is not None: + guidance = guidance.to(hidden_states.dtype) * 1000 + + encoder_hidden_states = self.context_embedder(encoder_hidden_states) + + if txt_ids.ndim == 3: + logger.warning( + "Passing `txt_ids` 3d torch.Tensor is deprecated." + "Please remove the batch dimension and pass it as a 2d torch Tensor" + ) + txt_ids = txt_ids[0] + if img_ids.ndim == 3: + logger.warning( + "Passing `img_ids` 3d torch.Tensor is deprecated." + "Please remove the batch dimension and pass it as a 2d torch Tensor" + ) + img_ids = img_ids[0] + + ids = torch.cat((txt_ids, img_ids), dim=0) + image_rotary_emb = self.pos_embed(ids) + + if joint_attention_kwargs is not None and "ip_adapter_image_embeds" in joint_attention_kwargs: + ip_adapter_image_embeds = joint_attention_kwargs.pop("ip_adapter_image_embeds") + ip_hidden_states = self.encoder_hid_proj(ip_adapter_image_embeds) + joint_attention_kwargs.update({"ip_hidden_states": ip_hidden_states}) + + for index_block, block in enumerate(self.transformer_blocks): + encoder_hidden_states, hidden_states = block( + hidden_states=hidden_states, + encoder_hidden_states=encoder_hidden_states, + temb=adaln_emb[index_block], + image_rotary_emb=image_rotary_emb, + joint_attention_kwargs=joint_attention_kwargs, + ) + + # controlnet residual + if controlnet_block_samples is not None: + interval_control = len(self.transformer_blocks) / len(controlnet_block_samples) + interval_control = int(np.ceil(interval_control)) + # For Xlabs ControlNet. + if controlnet_blocks_repeat: + hidden_states = ( + hidden_states + controlnet_block_samples[index_block % len(controlnet_block_samples)] + ) + else: + hidden_states = hidden_states + controlnet_block_samples[index_block // interval_control] + + for index_block, block in enumerate(self.single_transformer_blocks): + encoder_hidden_states, hidden_states = block( + hidden_states=hidden_states, + encoder_hidden_states=encoder_hidden_states, + temb=adaln_single_emb[index_block], + image_rotary_emb=image_rotary_emb, + joint_attention_kwargs=joint_attention_kwargs, + ) + + # controlnet residual + if controlnet_single_block_samples is not None: + interval_control = len(self.single_transformer_blocks) / len(controlnet_single_block_samples) + interval_control = int(np.ceil(interval_control)) + hidden_states = hidden_states + controlnet_single_block_samples[index_block // interval_control] + + hidden_states = self.norm_out(hidden_states, adaln_out) + output = self.proj_out(hidden_states) + + if not return_dict: + return (output,) + + return Transformer2DModelOutput(sample=output) diff --git a/QEfficient/diffusers/pipelines/__init__.py b/QEfficient/diffusers/pipelines/__init__.py new file mode 100644 index 000000000..75daf1953 --- /dev/null +++ b/QEfficient/diffusers/pipelines/__init__.py @@ -0,0 +1,6 @@ +# ----------------------------------------------------------------------------- +# +# Copyright (c) Qualcomm Technologies, Inc. and/or its subsidiaries. +# SPDX-License-Identifier: BSD-3-Clause +# +# ---------------------------------------------------------------------------- diff --git a/QEfficient/diffusers/pipelines/configs/flux_config.json b/QEfficient/diffusers/pipelines/configs/flux_config.json new file mode 100644 index 000000000..73b92265f --- /dev/null +++ b/QEfficient/diffusers/pipelines/configs/flux_config.json @@ -0,0 +1,99 @@ +{ + "description": "Default configuration for Flux pipeline", + + "modules": + { + "text_encoder": + { + "specializations":{ + "batch_size": 1, + "seq_len": 77 + }, + "compilation": + { + "onnx_path": null, + "compile_dir": null, + "mdp_ts_num_devices": 1, + "mxfp6_matmul": false, + "convert_to_fp16": true, + "aic_num_cores": 16, + "compile_only":true + }, + "execute": + { + "device_ids": null + } + + }, + "text_encoder_2": + { + "specializations": + { + "batch_size": 1, + "seq_len": 256 + }, + "compilation": + { + "onnx_path": null, + "compile_dir": null, + "mdp_ts_num_devices": 1, + "mxfp6_matmul": false, + "convert_to_fp16": true, + "aic_num_cores": 16, + "compile_only": true + }, + "execute": + { + "device_ids": null + } + }, + "transformer": + { + "specializations": + { + "batch_size": 1, + "seq_len": 256, + "steps": 1 + }, + "compilation": + { + "onnx_path": null, + "compile_dir": null, + "mdp_ts_num_devices": 4, + "mxfp6_matmul": true, + "convert_to_fp16": true, + "aic_num_cores": 16, + "mos": 1, + "mdts-mos": 1, + "compile_only":true + }, + "execute": + { + "device_ids": null + } + }, + "vae_decoder": + { + "specializations": + { + "batch_size": 1, + "channels": 16 + }, + "compilation": + { + "onnx_path": null, + "compile_dir": null, + "mdp_ts_num_devices": 1, + "mxfp6_matmul": false, + "convert_to_fp16": true, + "aic_num_cores": 16, + "aic-enable-depth-first": true, + "compile_only":true + }, + "execute": + { + "device_ids": null + } + } + } +} diff --git a/QEfficient/diffusers/pipelines/flux/__init__.py b/QEfficient/diffusers/pipelines/flux/__init__.py new file mode 100644 index 000000000..75daf1953 --- /dev/null +++ b/QEfficient/diffusers/pipelines/flux/__init__.py @@ -0,0 +1,6 @@ +# ----------------------------------------------------------------------------- +# +# Copyright (c) Qualcomm Technologies, Inc. and/or its subsidiaries. +# SPDX-License-Identifier: BSD-3-Clause +# +# ---------------------------------------------------------------------------- diff --git a/QEfficient/diffusers/pipelines/flux/pipeline_flux.py b/QEfficient/diffusers/pipelines/flux/pipeline_flux.py new file mode 100644 index 000000000..511746469 --- /dev/null +++ b/QEfficient/diffusers/pipelines/flux/pipeline_flux.py @@ -0,0 +1,854 @@ +# ----------------------------------------------------------------------------- +# +# Copyright (c) Qualcomm Technologies, Inc. and/or its subsidiaries. +# SPDX-License-Identifier: BSD-3-Clause +# +# ---------------------------------------------------------------------------- + +# TODO: Pipeline Architecture Improvements +# 1. Introduce QEffDiffusionPipeline base class to provide unified export, compile, +# and inference APIs across all diffusion pipelines, promoting code reusability +# and consistent interface design. +# 2. Implement persistent QPC session management strategy to retain/drop compiled model +# sessions in memory across all pipeline modules. + +import os +import time +from typing import Callable, Dict, List, Optional, Union + +import numpy as np +import torch +from diffusers import FluxPipeline +from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion import retrieve_timesteps +from tqdm import tqdm + +from QEfficient.diffusers.pipelines.pipeline_module import ( + QEffFluxTransformerModel, + QEffTextEncoder, + QEffVAE, +) +from QEfficient.diffusers.pipelines.pipeline_utils import ( + ONNX_SUBFUNCTION_MODULE, + ModulePerf, + QEffPipelineOutput, + calculate_compressed_latent_dimension, + compile_modules_parallel, + compile_modules_sequential, + config_manager, + set_module_device_ids, +) +from QEfficient.generation.cloud_infer import QAICInferenceSession +from QEfficient.utils.logging_utils import logger + + +class QEffFluxPipeline: + """ + QEfficient-optimized Flux pipeline for high-performance text-to-image generation on Qualcomm AI hardware. + + This pipeline provides an optimized implementation of the Flux diffusion model specifically designed + for deployment on Qualcomm AI Cloud (QAIC) devices. It wraps the original HuggingFace Flux model + components with QEfficient-optimized versions that can be exported to ONNX format and compiled + into Qualcomm Program Container (QPC) files for efficient inference. + + The pipeline supports the complete Flux workflow including: + - Dual text encoding with CLIP and T5 encoders + - Transformer-based denoising with adaptive layer normalization + - VAE decoding for final image generation + - Performance monitoring and optimization + + Attributes: + text_encoder (QEffTextEncoder): Optimized CLIP text encoder for pooled embeddings + text_encoder_2 (QEffTextEncoder): Optimized T5 text encoder for sequence embeddings + transformer (QEffFluxTransformerModel): Optimized Flux transformer for denoising + vae_decode (QEffVAE): Optimized VAE decoder for latent-to-image conversion + modules (Dict[str, Any]): Dictionary of all pipeline modules for batch operations + model (FluxPipeline): Original HuggingFace Flux model reference + tokenizer: CLIP tokenizer for text preprocessing + scheduler: Diffusion scheduler for timestep management + + Example: + >>> from QEfficient.diffusers.pipelines.flux import QEffFluxPipeline + >>> pipeline = QEffFluxPipeline.from_pretrained("black-forest-labs/FLUX.1-schnell") + >>> images = pipeline( + ... prompt="A beautiful sunset over mountains", + ... height=512, + ... width=512, + ... num_inference_steps=28 + ... ) + >>> images.images[0].save("generated_image.png") + """ + + _hf_auto_class = FluxPipeline + + def __init__(self, model, *args, **kwargs): + """ + Initialize the QEfficient Flux pipeline. + + This pipeline provides an optimized implementation of the Flux text-to-image model + for deployment on Qualcomm AI hardware. It wraps the original HuggingFace Flux model + components with QEfficient-optimized versions that can be exported to ONNX and compiled + for QAIC devices. + + Args: + model: Pre-loaded FluxPipeline model + **kwargs: Additional arguments including height and width + """ + + # Wrap model components with QEfficient optimized versions + self.model = model + self.text_encoder = QEffTextEncoder(model.text_encoder) + self.text_encoder_2 = QEffTextEncoder(model.text_encoder_2) + self.transformer = QEffFluxTransformerModel(model.transformer) + self.vae_decode = QEffVAE(model.vae, "decoder") + + # Store all modules in a dictionary for easy iteration during export/compile + self.modules = { + "text_encoder": self.text_encoder, + "text_encoder_2": self.text_encoder_2, + "transformer": self.transformer, + "vae_decoder": self.vae_decode, + } + + # Copy tokenizers and scheduler from the original model + self.tokenizer = model.tokenizer + self.text_encoder.tokenizer = model.tokenizer + self.text_encoder_2.tokenizer = model.tokenizer_2 + self.tokenizer_max_length = model.tokenizer_max_length + self.scheduler = model.scheduler + + # Override VAE forward method to use decode directly + self.vae_decode.model.forward = lambda latent_sample, return_dict: self.vae_decode.model.decode( + latent_sample, return_dict + ) + + # Sync max position embeddings between text encoders + self.text_encoder_2.model.config.max_position_embeddings = ( + self.text_encoder.model.config.max_position_embeddings + ) + + @classmethod + def from_pretrained( + cls, + pretrained_model_name_or_path: Optional[Union[str, os.PathLike]], + **kwargs, + ): + """ + Load a pretrained Flux model from HuggingFace Hub or local path and wrap it with QEfficient optimizations. + + This class method provides a convenient way to instantiate a QEffFluxPipeline from a pretrained + Flux model. It automatically loads the base FluxPipeline model in float32 precision on CPU + and wraps all components with QEfficient-optimized versions for QAIC deployment. + + Args: + pretrained_model_name_or_path (str or os.PathLike): Either a HuggingFace model identifier + (e.g., "black-forest-labs/FLUX.1-schnell") or a local path to a saved model directory. + **kwargs: Additional keyword arguments passed to FluxPipeline.from_pretrained(). + + Returns: + QEffFluxPipeline: A fully initialized pipeline instance with QEfficient-optimized components + ready for export, compilation, and inference on QAIC devices. + + Raises: + ValueError: If the model path is invalid or model cannot be loaded + OSError: If there are issues accessing the model files + RuntimeError: If model initialization fails + + Example: + >>> # Load from HuggingFace Hub + >>> pipeline = QEffFluxPipeline.from_pretrained("black-forest-labs/FLUX.1-schnell") + >>> + >>> # Load from local path + >>> pipeline = QEffFluxPipeline.from_pretrained("/path/to/local/flux/model") + >>> + >>> # Load with custom cache directory + >>> pipeline = QEffFluxPipeline.from_pretrained( + ... "black-forest-labs/FLUX.1-dev", + ... cache_dir="/custom/cache/dir" + ... ) + """ + # Load the base Flux model in float32 on CPU + model = cls._hf_auto_class.from_pretrained( + pretrained_model_name_or_path, + torch_dtype=torch.float32, + device_map="cpu", + **kwargs, + ) + + return cls( + model=model, + pretrained_model_name_or_path=pretrained_model_name_or_path, + **kwargs, + ) + + def export(self, export_dir: Optional[str] = None, use_onnx_subfunctions: bool = False) -> str: + """ + Export all pipeline modules to ONNX format for deployment preparation. + + This method systematically exports each pipeline component (CLIP text encoder, T5 text encoder, + Flux transformer, and VAE decoder) to ONNX format. Each module is exported with its specific + configuration including dynamic axes, input/output specifications, and optimization settings. + + The export process prepares the models for subsequent compilation to QPC format, enabling + efficient inference on QAIC hardware. ONNX subfunctions can be used for certain modules + to optimize memory usage and performance. + + Args: + export_dir (str, optional): Target directory for saving ONNX model files. If None, + uses the default export directory structure based on model name and configuration. + The directory will be created if it doesn't exist. + use_onnx_subfunctions (bool, default=False): Whether to enable ONNX subfunction + optimization for supported modules. This can optimize thegraph and + improve compilation efficiency for models like the transformer. + + Returns: + str: Absolute path to the export directory containing all ONNX model files. + Each module will have its own subdirectory with the exported ONNX file. + + Raises: + RuntimeError: If ONNX export fails for any module + OSError: If there are issues creating the export directory or writing files + ValueError: If module configurations are invalid + + Note: + - All models are exported in float32 precision for maximum compatibility + - Dynamic axes are configured to support variable batch sizes and sequence lengths + - The export process may take several minutes depending on model size + - Exported ONNX files can be large (several GB for complete pipeline) + + Example: + >>> pipeline = QEffFluxPipeline.from_pretrained("black-forest-labs/FLUX.1-schnell") + >>> export_path = pipeline.export( + ... export_dir="/path/to/export", + ... use_onnx_subfunctions=True + ... ) + >>> print(f"Models exported to: {export_path}") + """ + for module_name, module_obj in tqdm(self.modules.items(), desc="Exporting modules", unit="module"): + # Get ONNX export configuration for this module + example_inputs, dynamic_axes, output_names = module_obj.get_onnx_params() + + export_params = { + "inputs": example_inputs, + "output_names": output_names, + "dynamic_axes": dynamic_axes, + "export_dir": export_dir, + } + + if use_onnx_subfunctions and module_name in ONNX_SUBFUNCTION_MODULE: + export_params["use_onnx_subfunctions"] = True + + module_obj.export(**export_params) + + @staticmethod + def get_default_config_path() -> str: + """ + Get the absolute path to the default Flux pipeline configuration file. + + Returns: + str: Absolute path to the flux_config.json file containing default pipeline + configuration settings for compilation and device allocation. + """ + return "QEfficient/diffusers/pipelines/configs/flux_config.json" + + def compile( + self, + compile_config: Optional[str] = None, + parallel: bool = False, + height: int = 512, + width: int = 512, + use_onnx_subfunctions: bool = False, + ) -> None: + """ + Compile ONNX models into optimized QPC format for deployment on Qualcomm AI hardware. + + Args: + compile_config (str, optional): Path to a JSON configuration file containing + compilation settings, device mappings, and optimization parameters. If None, + uses the default configuration from get_default_config_path(). + parallel (bool, default=False): Compilation mode selection: + - True: Compile modules in parallel using ThreadPoolExecutor for faster processing + - False: Compile modules sequentially for lower resource usage + height (int, default=512): Target image height in pixels. + width (int, default=512): Target image width in pixels. + use_onnx_subfunctions (bool, default=False): Whether to export models with ONNX + subfunctions before compilation. + + Raises: + RuntimeError: If compilation fails for any module or if QAIC compiler is not available + FileNotFoundError: If ONNX models haven't been exported or config file is missing + ValueError: If configuration parameters are invalid + OSError: If there are issues with file I/O during compilation + + Example: + >>> pipeline = QEffFluxPipeline.from_pretrained("black-forest-labs/FLUX.1-schnell") + >>> # Sequential compilation with default config + >>> pipeline.compile(height=1024, width=1024) + >>> + >>> # Parallel compilation with custom config + >>> pipeline.compile( + ... compile_config="/path/to/custom_config.json", + ... parallel=True, + ... height=512, + ... width=512 + ... ) + """ + # Ensure all modules are exported to ONNX before compilation + if any( + path is None + for path in [ + self.text_encoder.onnx_path, + self.text_encoder_2.onnx_path, + self.transformer.onnx_path, + self.vae_decode.onnx_path, + ] + ): + self.export(use_onnx_subfunctions=use_onnx_subfunctions) + + # Load compilation configuration + config_manager(self, config_source=compile_config) + + # Calculate compressed latent dimension using utility function + cl, latent_height, latent_width = calculate_compressed_latent_dimension( + height, width, self.model.vae_scale_factor + ) + + # Prepare dynamic specialization updates based on image dimensions + specialization_updates = { + "transformer": {"cl": cl}, + "vae_decoder": { + "latent_height": latent_height, + "latent_width": latent_width, + }, + } + + # Use generic utility functions for compilation + if parallel: + compile_modules_parallel(self.modules, self.custom_config, specialization_updates) + else: + compile_modules_sequential(self.modules, self.custom_config, specialization_updates) + + def _get_t5_prompt_embeds( + self, + prompt: Union[str, List[str]] = None, + num_images_per_prompt: int = 1, + max_sequence_length: int = 512, + device_ids: Optional[List[int]] = None, + ): + """ + Encode text prompts using the T5 text encoder for detailed semantic understanding. + + T5 provides rich sequence embeddings that capture fine-grained text details, + complementing CLIP's global representation in Flux's dual encoder setup. + + Args: + prompt (str or List[str]): Input prompt(s) to encode + num_images_per_prompt (int): Number of images to generate per prompt + max_sequence_length (int): Maximum token sequence length (default: 512) + device_ids (List[int], optional): QAIC device IDs for inference + + Returns: + tuple: (prompt_embeds, inference_time) + - prompt_embeds (torch.Tensor): Encoded embeddings [batch*num_images, seq_len, 4096] + - inference_time (float): T5 encoder inference time in seconds + """ + prompt = [prompt] if isinstance(prompt, str) else prompt + batch_size = len(prompt) + + # Tokenize prompts with padding and truncation + text_inputs = self.text_encoder_2.tokenizer( + prompt, + padding="max_length", + max_length=max_sequence_length, + truncation=True, + return_length=False, + return_overflowing_tokens=False, + return_tensors="pt", + ) + text_input_ids = text_inputs.input_ids + + # Check for truncation and warn user + untruncated_ids = self.text_encoder_2.tokenizer(prompt, padding="longest", return_tensors="pt").input_ids + if untruncated_ids.shape[-1] >= text_input_ids.shape[-1] and not torch.equal(text_input_ids, untruncated_ids): + removed_text = self.text_encoder_2.tokenizer.batch_decode( + untruncated_ids[:, self.text_encoder_2.tokenizer.model_max_length - 1 : -1] + ) + logger.warning( + f"The following part of your input was truncated because `max_sequence_length` is set to " + f"{self.text_encoder_2.tokenizer.model_max_length} tokens: {removed_text}" + ) + + # Initialize QAIC inference session if not already created + if self.text_encoder_2.qpc_session is None: + self.text_encoder_2.qpc_session = QAICInferenceSession( + str(self.text_encoder_2.qpc_path), device_ids=device_ids + ) + + # Allocate output buffers for QAIC inference + text_encoder_2_output = { + "last_hidden_state": np.random.rand( + batch_size, max_sequence_length, self.text_encoder_2.model.config.d_model + ).astype(np.int32), + } + self.text_encoder_2.qpc_session.set_buffers(text_encoder_2_output) + + # Prepare input for QAIC inference + aic_text_input = {"input_ids": text_input_ids.numpy().astype(np.int64)} + + # Run T5 encoder inference and measure time + start_t5_time = time.perf_counter() + prompt_embeds = torch.tensor(self.text_encoder_2.qpc_session.run(aic_text_input)["last_hidden_state"]) + end_t5_time = time.perf_counter() + text_encoder_2_perf = end_t5_time - start_t5_time + + # Duplicate embeddings for multiple images per prompt + _, seq_len, _ = prompt_embeds.shape + prompt_embeds = prompt_embeds.repeat(1, num_images_per_prompt, 1) + prompt_embeds = prompt_embeds.view(batch_size * num_images_per_prompt, seq_len, -1) + + return prompt_embeds, text_encoder_2_perf + + def _get_clip_prompt_embeds( + self, + prompt: Union[str, List[str]], + num_images_per_prompt: int = 1, + device_ids: Optional[List[int]] = None, + ): + """ + Encode text prompts using the CLIP text encoder for global semantic representation. + + CLIP provides pooled embeddings that capture high-level semantic meaning, + working alongside T5's detailed sequence embeddings in Flux's dual encoder setup. + + Args: + prompt (str or List[str]): Input prompt(s) to encode + num_images_per_prompt (int): Number of images to generate per prompt + device_ids (List[int], optional): QAIC device IDs for inference + + Returns: + tuple: (pooled_prompt_embeds, inference_time) + - pooled_prompt_embeds (torch.Tensor): Pooled embeddings [batch*num_images, 768] + - inference_time (float): CLIP encoder inference time in seconds + """ + prompt = [prompt] if isinstance(prompt, str) else prompt + batch_size = len(prompt) + + # Tokenize prompts + text_inputs = self.tokenizer( + prompt, + padding="max_length", + max_length=self.tokenizer_max_length, + truncation=True, + return_overflowing_tokens=False, + return_length=False, + return_tensors="pt", + ) + + text_input_ids = text_inputs.input_ids + + # Check for truncation and warn user + untruncated_ids = self.tokenizer(prompt, padding="longest", return_tensors="pt").input_ids + if untruncated_ids.shape[-1] >= text_input_ids.shape[-1] and not torch.equal(text_input_ids, untruncated_ids): + removed_text = self.tokenizer.batch_decode(untruncated_ids[:, self.tokenizer_max_length - 1 : -1]) + logger.warning( + f"The following part of your input was truncated because CLIP can only handle sequences up to " + f"{self.tokenizer_max_length} tokens: {removed_text}" + ) + + # Initialize QAIC inference session if not already created + if self.text_encoder.qpc_session is None: + self.text_encoder.qpc_session = QAICInferenceSession(str(self.text_encoder.qpc_path), device_ids=device_ids) + + # Allocate output buffers for QAIC inference + text_encoder_output = { + "last_hidden_state": np.random.rand( + batch_size, self.tokenizer_max_length, self.text_encoder.model.config.hidden_size + ).astype(np.float32), + "pooler_output": np.random.rand(batch_size, self.text_encoder.model.config.hidden_size).astype(np.int32), + } + self.text_encoder.qpc_session.set_buffers(text_encoder_output) + + # Prepare input for QAIC inference + aic_text_input = {"input_ids": text_input_ids.numpy().astype(np.int64)} + + # Run CLIP encoder inference and measure time + start_text_encoder_time = time.perf_counter() + aic_embeddings = self.text_encoder.qpc_session.run(aic_text_input) + end_text_encoder_time = time.perf_counter() + text_encoder_perf = end_text_encoder_time - start_text_encoder_time + # Extract pooled output (used for conditioning in Flux) + prompt_embeds = torch.tensor(aic_embeddings["pooler_output"]) + + # Duplicate embeddings for multiple images per prompt + prompt_embeds = prompt_embeds.repeat(1, num_images_per_prompt) + prompt_embeds = prompt_embeds.view(batch_size * num_images_per_prompt, -1) + + return prompt_embeds, text_encoder_perf + + def encode_prompt( + self, + prompt: Union[str, List[str]], + prompt_2: Optional[Union[str, List[str]]] = None, + num_images_per_prompt: int = 1, + prompt_embeds: Optional[torch.FloatTensor] = None, + pooled_prompt_embeds: Optional[torch.FloatTensor] = None, + max_sequence_length: int = 512, + ): + """ + Encode text prompts using Flux's dual text encoder architecture. + + Flux employs both CLIP and T5 encoders for comprehensive text understanding: + - CLIP provides pooled embeddings for global semantic conditioning + - T5 provides detailed sequence embeddings for fine-grained text control + + Args: + prompt (str or List[str]): Primary prompt(s) for both encoders + prompt_2 (str or List[str], optional): Secondary prompt(s) for T5. If None, uses primary prompt + num_images_per_prompt (int): Number of images to generate per prompt + prompt_embeds (torch.FloatTensor, optional): Pre-computed T5 embeddings + pooled_prompt_embeds (torch.FloatTensor, optional): Pre-computed CLIP pooled embeddings + max_sequence_length (int): Maximum sequence length for T5 tokenization + + Returns: + tuple: (prompt_embeds, pooled_prompt_embeds, text_ids, encoder_perf_times) + - prompt_embeds (torch.Tensor): T5 sequence embeddings [batch*num_images, seq_len, 4096] + - pooled_prompt_embeds (torch.Tensor): CLIP pooled embeddings [batch*num_images, 768] + - text_ids (torch.Tensor): Position IDs for text tokens [seq_len, 3] + - encoder_perf_times (List[float]): Performance times [CLIP_time, T5_time] + """ + prompt = [prompt] if isinstance(prompt, str) else prompt + + if prompt_embeds is None: + # Use primary prompt for both encoders if secondary not provided + prompt_2 = prompt_2 or prompt + prompt_2 = [prompt_2] if isinstance(prompt_2, str) else prompt_2 + + # Encode with CLIP (returns pooled embeddings) + pooled_prompt_embeds, text_encoder_perf = self._get_clip_prompt_embeds( + prompt=prompt, + device_ids=self.text_encoder.device_ids, + num_images_per_prompt=num_images_per_prompt, + ) + + # Encode with T5 (returns sequence embeddings) + prompt_embeds, text_encoder_2_perf = self._get_t5_prompt_embeds( + prompt=prompt_2, + num_images_per_prompt=num_images_per_prompt, + max_sequence_length=max_sequence_length, + device_ids=self.text_encoder_2.device_ids, + ) + + # Create text position IDs (required by Flux transformer) + text_ids = torch.zeros(prompt_embeds.shape[1], 3) + + return prompt_embeds, pooled_prompt_embeds, text_ids, [text_encoder_perf, text_encoder_2_perf] + + def __call__( + self, + height: int = 512, + width: int = 512, + prompt: Union[str, List[str]] = None, + prompt_2: Optional[Union[str, List[str]]] = None, + negative_prompt: Union[str, List[str]] = None, + negative_prompt_2: Optional[Union[str, List[str]]] = None, + true_cfg_scale: float = 1.0, + num_inference_steps: int = 28, + timesteps: List[int] = None, + guidance_scale: float = 3.5, + num_images_per_prompt: Optional[int] = 1, + generator: Optional[Union[torch.Generator, List[torch.Generator]]] = None, + latents: Optional[torch.FloatTensor] = None, + prompt_embeds: Optional[torch.FloatTensor] = None, + pooled_prompt_embeds: Optional[torch.FloatTensor] = None, + negative_prompt_embeds: Optional[torch.FloatTensor] = None, + negative_pooled_prompt_embeds: Optional[torch.FloatTensor] = None, + output_type: Optional[str] = "pil", + callback_on_step_end: Optional[Callable[[int, int, Dict], None]] = None, + callback_on_step_end_tensor_inputs: List[str] = ["latents"], + max_sequence_length: int = 512, + custom_config_path: Optional[str] = None, + parallel_compile: bool = False, + use_onnx_subfunctions: bool = False, + ): + """ + Generate images from text prompts using the QEfficient-optimized Flux pipeline on QAIC hardware. + + This is the main entry point for text-to-image generation. It orchestrates the complete Flux + diffusion pipeline optimized for Qualcomm AI Cloud devices. + + Args: + height (int, optional): Target image height in pixels. Must be divisible by 8. Default: 512. + width (int, optional): Target image width in pixels. Must be divisible by 8. Default: 512. + prompt (str or List[str]): Primary text prompt(s) describing the desired image(s). + Required unless `prompt_embeds` is provided. + prompt_2 (str or List[str], optional): Secondary prompt for T5 encoder. If None, uses `prompt`. + negative_prompt (str or List[str], optional): Negative prompt(s) describing what to avoid. + Only used when `true_cfg_scale > 1.0`. + negative_prompt_2 (str or List[str], optional): Secondary negative prompt for T5. If None, uses `negative_prompt`. + true_cfg_scale (float, optional): True classifier-free guidance scale. Values > 1.0 enable + negative prompting. Default: 1.0 (disabled). + num_inference_steps (int, optional): Number of denoising steps. Default: 28. + timesteps (List[int], optional): Custom timestep schedule. If provided, overrides `num_inference_steps`. + guidance_scale (float, optional): Guidance scale for classifier-free guidance. Default: 3.5. + num_images_per_prompt (int, optional): Number of images to generate per prompt. Default: 1. + generator (torch.Generator or List[torch.Generator], optional): Random generator for reproducibility. + latents (torch.FloatTensor, optional): Pre-generated latent tensors. If None, random latents are generated. + prompt_embeds (torch.FloatTensor, optional): Pre-computed T5 text embeddings. Shape: [batch, seq_len, 4096]. + pooled_prompt_embeds (torch.FloatTensor, optional): Pre-computed CLIP pooled embeddings. Shape: [batch, 768]. + negative_prompt_embeds (torch.FloatTensor, optional): Pre-computed negative T5 embeddings. + negative_pooled_prompt_embeds (torch.FloatTensor, optional): Pre-computed negative CLIP embeddings. + output_type (str, optional): Output format. Options: "pil" (default), "np", or "latent". + callback_on_step_end (Callable, optional): Callback function executed after each denoising step. + callback_on_step_end_tensor_inputs (List[str], optional): Tensor names to pass to callback. Default: ["latents"]. + max_sequence_length (int, optional): Maximum token sequence length for T5 encoder. Default: 512. + custom_config_path (str, optional): Path to custom JSON configuration file for compilation settings. + parallel_compile (bool, optional): Whether to compile modules in parallel. Default: False. + use_onnx_subfunctions (bool, optional): Whether to export transformer blocks as ONNX subfunctions. Default: False. + + Returns: + QEffPipelineOutput: A dataclass containing: + - images: Generated image(s) in the format specified by `output_type` + - pipeline_module: Performance metrics for each pipeline component (text encoders, transformer, VAE) + + Raises: + ValueError: If input validation fails or parameters are incompatible. + RuntimeError: If compilation fails or QAIC devices are unavailable. + FileNotFoundError: If custom config file is specified but not found. + + Example: + >>> from QEfficient.diffusers.pipelines.flux import QEffFluxPipeline + >>> pipeline = QEffFluxPipeline.from_pretrained("black-forest-labs/FLUX.1-schnell") + >>> result = pipeline( + ... prompt="A serene mountain landscape at sunset", + ... height=1024, + ... width=1024, + ... num_inference_steps=28, + ... guidance_scale=7.5 + ... ) + >>> result.images[0].save("mountain_sunset.png") + >>> print(f"Transformer inference time: {sum(result.pipeline_module[2].perf):.2f}s") + """ + device = self.model._execution_device + + if height is None or width is None: + logger.warning("Height or width is None. Setting default values of 512 for both dimensions.") + + self.compile( + compile_config=custom_config_path, + parallel=parallel_compile, + height=height, + width=width, + use_onnx_subfunctions=use_onnx_subfunctions, + ) + + # Set device IDs for all modules based on configuration + set_module_device_ids(self) + + # Validate all inputs + self.model.check_inputs( + prompt, + prompt_2, + height, + width, + negative_prompt=negative_prompt, + negative_prompt_2=negative_prompt_2, + prompt_embeds=prompt_embeds, + negative_prompt_embeds=negative_prompt_embeds, + pooled_prompt_embeds=pooled_prompt_embeds, + negative_pooled_prompt_embeds=negative_pooled_prompt_embeds, + callback_on_step_end_tensor_inputs=callback_on_step_end_tensor_inputs, + max_sequence_length=max_sequence_length, + ) + + self._guidance_scale = guidance_scale + self._interrupt = False + + # Step 2: Determine batch size from inputs + if prompt is not None and isinstance(prompt, str): + batch_size = 1 + elif prompt is not None and isinstance(prompt, list): + batch_size = len(prompt) + else: + batch_size = prompt_embeds.shape[0] + + # Step 3: Encode prompts with both text encoders + has_neg_prompt = negative_prompt is not None or ( + negative_prompt_embeds is not None and negative_pooled_prompt_embeds is not None + ) + do_true_cfg = true_cfg_scale > 1 and has_neg_prompt + + (prompt_embeds, pooled_prompt_embeds, text_ids, text_encoder_perf) = self.encode_prompt( + prompt=prompt, + prompt_2=prompt_2, + prompt_embeds=prompt_embeds, + pooled_prompt_embeds=pooled_prompt_embeds, + num_images_per_prompt=num_images_per_prompt, + max_sequence_length=max_sequence_length, + ) + + # Encode negative prompts if using true classifier-free guidance + if do_true_cfg: + ( + negative_prompt_embeds, + negative_pooled_prompt_embeds, + negative_text_ids, + ) = self.encode_prompt( + prompt=negative_prompt, + prompt_2=negative_prompt_2, + prompt_embeds=negative_prompt_embeds, + pooled_prompt_embeds=negative_pooled_prompt_embeds, + num_images_per_prompt=num_images_per_prompt, + max_sequence_length=max_sequence_length, + ) + + # Step 4: Prepare timesteps for denoising + timesteps, num_inference_steps = retrieve_timesteps(self.scheduler, num_inference_steps, device, timesteps) + num_warmup_steps = max(len(timesteps) - num_inference_steps * self.scheduler.order, 0) + self._num_timesteps = len(timesteps) + + # Step 5: Prepare initial latents + num_channels_latents = self.transformer.model.config.in_channels // 4 + latents, latent_image_ids = self.model.prepare_latents( + batch_size * num_images_per_prompt, + num_channels_latents, + height, + width, + prompt_embeds.dtype, + device, + generator, + latents, + ) + + # Step 6: Calculate compressed latent dimension for transformer buffer allocation + cl, _, _ = calculate_compressed_latent_dimension(height, width, self.model.vae_scale_factor) + + # Initialize transformer inference session + if self.transformer.qpc_session is None: + self.transformer.qpc_session = QAICInferenceSession( + str(self.transformer.qpc_path), device_ids=self.transformer.device_ids + ) + + # Allocate output buffer for transformer + output_buffer = { + "output": np.random.rand(batch_size, cl, self.transformer.model.config.in_channels).astype(np.float32), + } + self.transformer.qpc_session.set_buffers(output_buffer) + + transformer_perf = [] + self.scheduler.set_begin_index(0) + + # Step 7: Denoising loop + with self.model.progress_bar(total=num_inference_steps) as progress_bar: + for i, t in enumerate(timesteps): + # Prepare timestep embedding + timestep = t.expand(latents.shape[0]).to(latents.dtype) + temb = self.transformer.model.time_text_embed(timestep, pooled_prompt_embeds) + + # Compute AdaLN (Adaptive Layer Normalization) embeddings for dual transformer blocks + adaln_emb = [] + for block_idx in range(len(self.transformer.model.transformer_blocks)): + block = self.transformer.model.transformer_blocks[block_idx] + # Process through norm1 and norm1_context + f1 = block.norm1.linear(block.norm1.silu(temb)).chunk(6, dim=1) + f2 = block.norm1_context.linear(block.norm1_context.silu(temb)).chunk(6, dim=1) + adaln_emb.append(torch.cat(list(f1) + list(f2))) + adaln_dual_emb = torch.stack(adaln_emb) + + # Compute AdaLN embeddings for single transformer blocks + adaln_emb = [] + for block_idx in range(len(self.transformer.model.single_transformer_blocks)): + block = self.transformer.model.single_transformer_blocks[block_idx] + f1 = block.norm.linear(block.norm.silu(temb)).chunk(3, dim=1) + adaln_emb.append(torch.cat(list(f1))) + adaln_single_emb = torch.stack(adaln_emb) + + # Compute output AdaLN embedding + temp = self.transformer.model.norm_out + adaln_out = temp.linear(temp.silu(temb)) + + # Normalize timestep to [0, 1] range + timestep = timestep / 1000 + + # Prepare all inputs for transformer inference + inputs_aic = { + "hidden_states": latents.detach().numpy(), + "encoder_hidden_states": prompt_embeds.detach().numpy(), + "pooled_projections": pooled_prompt_embeds.detach().numpy(), + "timestep": timestep.detach().numpy(), + "img_ids": latent_image_ids.detach().numpy(), + "txt_ids": text_ids.detach().numpy(), + "adaln_emb": adaln_dual_emb.detach().numpy(), + "adaln_single_emb": adaln_single_emb.detach().numpy(), + "adaln_out": adaln_out.detach().numpy(), + } + + # Run transformer inference and measure time + start_transformer_step_time = time.perf_counter() + outputs = self.transformer.qpc_session.run(inputs_aic) + end_transformer_step_time = time.perf_counter() + transformer_perf.append(end_transformer_step_time - start_transformer_step_time) + + noise_pred = torch.from_numpy(outputs["output"]) + + # Update latents using scheduler (x_t -> x_t-1) + latents_dtype = latents.dtype + latents = self.scheduler.step(noise_pred, t, latents, return_dict=False)[0] + + # Handle dtype mismatch (workaround for MPS backend bug) + if latents.dtype != latents_dtype: + if torch.backends.mps.is_available(): + latents = latents.to(latents_dtype) + + # Execute callback if provided + if callback_on_step_end is not None: + callback_kwargs = {} + for k in callback_on_step_end_tensor_inputs: + callback_kwargs[k] = locals()[k] + callback_outputs = callback_on_step_end(self, i, t, callback_kwargs) + latents = callback_outputs.pop("latents", latents) + prompt_embeds = callback_outputs.pop("prompt_embeds", prompt_embeds) + + # Update progress bar + if i == len(timesteps) - 1 or ((i + 1) > num_warmup_steps and (i + 1) % self.scheduler.order == 0): + progress_bar.update() + + # Step 8: Decode latents to images (unless output_type is "latent") + if output_type == "latent": + image = latents + else: + # Unpack and denormalize latents + latents = self.model._unpack_latents(latents, height, width, self.model.vae_scale_factor) + latents = (latents / self.vae_decode.model.scaling_factor) + self.vae_decode.model.shift_factor + + # Initialize VAE decoder inference session + if self.vae_decode.qpc_session is None: + self.vae_decode.qpc_session = QAICInferenceSession( + str(self.vae_decode.qpc_path), device_ids=self.vae_decode.device_ids + ) + + # Allocate output buffer for VAE decoder + output_buffer = {"sample": np.random.rand(batch_size, 3, height, width).astype(np.int32)} + self.vae_decode.qpc_session.set_buffers(output_buffer) + + # Run VAE decoder inference and measure time + inputs = {"latent_sample": latents.numpy()} + start_decode_time = time.perf_counter() + image = self.vae_decode.qpc_session.run(inputs) + end_decode_time = time.perf_counter() + vae_decode_perf = end_decode_time - start_decode_time + + # Post-process image + image_tensor = torch.from_numpy(image["sample"]) + image = self.model.image_processor.postprocess(image_tensor, output_type=output_type) + + # Build performance metrics + perf_metrics = [ + ModulePerf(module_name="text_encoder", perf=text_encoder_perf[0]), + ModulePerf(module_name="text_encoder_2", perf=text_encoder_perf[1]), + ModulePerf(module_name="transformer", perf=transformer_perf), + ModulePerf(module_name="vae_decoder", perf=vae_decode_perf), + ] + + return QEffPipelineOutput( + pipeline_module=perf_metrics, + images=image, + ) diff --git a/QEfficient/diffusers/pipelines/pipeline_module.py b/QEfficient/diffusers/pipelines/pipeline_module.py new file mode 100644 index 000000000..41a3d29f7 --- /dev/null +++ b/QEfficient/diffusers/pipelines/pipeline_module.py @@ -0,0 +1,481 @@ +# ----------------------------------------------------------------------------- +# +# Copyright (c) Qualcomm Technologies, Inc. and/or its subsidiaries. +# SPDX-License-Identifier: BSD-3-Clause +# +# ---------------------------------------------------------------------------- + +from typing import Dict, List, Tuple + +import torch +import torch.nn as nn + +from QEfficient.base.modeling_qeff import QEFFBaseModel +from QEfficient.base.onnx_transforms import FP16ClipTransform, SplitTensorsTransform +from QEfficient.diffusers.models.pytorch_transforms import ( + AttentionTransform, + CustomOpsTransform, + NormalizationTransform, +) +from QEfficient.diffusers.models.transformers.transformer_flux import ( + QEffFluxSingleTransformerBlock, + QEffFluxTransformerBlock, +) +from QEfficient.transformers.models.pytorch_transforms import ( + T5ModelTransform, +) +from QEfficient.utils import constants + + +class QEffTextEncoder(QEFFBaseModel): + """ + Wrapper for text encoder models with ONNX export and QAIC compilation capabilities. + + This class handles text encoder models (CLIP, T5) with specific transformations and + optimizations for efficient inference on Qualcomm AI hardware. It applies custom + PyTorch and ONNX transformations to prepare models for deployment. + + Attributes: + model (nn.Module): The wrapped text encoder model (deep copy of original) + _pytorch_transforms (List): PyTorch transformations applied before ONNX export + _onnx_transforms (List): ONNX transformations applied after export + """ + + _pytorch_transforms = [CustomOpsTransform, T5ModelTransform] + _onnx_transforms = [FP16ClipTransform, SplitTensorsTransform] + + @property + def get_model_config(self) -> Dict: + """ + Get the model configuration as a dictionary. + + Returns: + Dict: The configuration dictionary of the underlying text encoder model + """ + return self.model.config.__dict__ + + def __init__(self, model: nn.Module) -> None: + """ + Initialize the text encoder wrapper. + + Args: + model (nn.Module): The text encoder model to wrap (CLIP or T5) + """ + super().__init__(model) + self.model = model + + def get_onnx_params(self) -> Tuple[Dict, Dict, List[str]]: + """ + Generate ONNX export configuration for the text encoder. + + Creates example inputs, dynamic axes specifications, and output names + tailored to the specific text encoder type (CLIP vs T5). + + Returns: + Tuple containing: + - example_inputs (Dict): Sample inputs for ONNX export + - dynamic_axes (Dict): Specification of dynamic dimensions + - output_names (List[str]): Names of model outputs + """ + bs = constants.ONNX_EXPORT_EXAMPLE_BATCH_SIZE + + # Create example input with max sequence length + example_inputs = { + "input_ids": torch.zeros((bs, self.model.config.max_position_embeddings), dtype=torch.int64), + } + + # Define which dimensions can vary at runtime + dynamic_axes = {"input_ids": {0: "batch_size", 1: "seq_len"}} + + # T5 only outputs hidden states, CLIP outputs both hidden states and pooled output + if self.model.__class__.__name__ == "T5EncoderModel": + output_names = ["last_hidden_state"] + else: + output_names = ["last_hidden_state", "pooler_output"] + example_inputs["output_hidden_states"] = False + + return example_inputs, dynamic_axes, output_names + + def export( + self, + inputs: Dict, + output_names: List[str], + dynamic_axes: Dict, + export_dir: str = None, + export_kwargs: Dict = None, + ) -> str: + """ + Export the text encoder model to ONNX format. + + Args: + inputs (Dict): Example inputs for ONNX export + output_names (List[str]): Names of model outputs + dynamic_axes (Dict): Specification of dynamic dimensions + export_dir (str, optional): Directory to save ONNX model + export_kwargs (Dict, optional): Additional export arguments + + Returns: + str: Path to the exported ONNX model + """ + return self._export( + example_inputs=inputs, + output_names=output_names, + dynamic_axes=dynamic_axes, + export_dir=export_dir, + export_kwargs=export_kwargs, + ) + + def compile(self, specializations: List[Dict], **compiler_options) -> None: + """ + Compile the ONNX model for Qualcomm AI hardware. + + Args: + specializations (List[Dict]): Model specialization configurations + **compiler_options: Additional compiler options (e.g., num_cores, aic_num_of_activations) + """ + self._compile(specializations=specializations, **compiler_options) + + +class QEffUNet(QEFFBaseModel): + """ + Wrapper for UNet models with ONNX export and QAIC compilation capabilities. + + This class handles UNet models with specific transformations and optimizations + for efficient inference on Qualcomm AI hardware. UNet is commonly used in + diffusion models for image generation tasks. + + Attributes: + model (nn.Module): The wrapped UNet model + _pytorch_transforms (List): PyTorch transformations applied before ONNX export + _onnx_transforms (List): ONNX transformations applied after export + """ + + _pytorch_transforms = [CustomOpsTransform] + _onnx_transforms = [FP16ClipTransform, SplitTensorsTransform] + + @property + def get_model_config(self) -> Dict: + """ + Get the model configuration as a dictionary. + + Returns: + Dict: The configuration dictionary of the underlying UNet model + """ + return self.model.config.__dict__ + + def __init__(self, model: nn.Module) -> None: + """ + Initialize the UNet wrapper. + + Args: + model (nn.Module): The pipeline model containing the UNet + """ + super().__init__(model.unet) + self.model = model.unet + + def export( + self, + inputs: Dict, + output_names: List[str], + dynamic_axes: Dict, + export_dir: str = None, + export_kwargs: Dict = None, + ) -> str: + """ + Export the UNet model to ONNX format. + + Args: + inputs (Dict): Example inputs for ONNX export + output_names (List[str]): Names of model outputs + dynamic_axes (Dict): Specification of dynamic dimensions + export_dir (str, optional): Directory to save ONNX model + export_kwargs (Dict, optional): Additional export arguments + + Returns: + str: Path to the exported ONNX model + """ + return self._export( + example_inputs=inputs, + output_names=output_names, + dynamic_axes=dynamic_axes, + export_dir=export_dir, + export_kwargs=export_kwargs, + ) + + def compile(self, specializations: List[Dict], **compiler_options) -> None: + """ + Compile the ONNX model for Qualcomm AI hardware. + + Args: + specializations (List[Dict]): Model specialization configurations + **compiler_options: Additional compiler options + """ + self._compile(specializations=specializations, **compiler_options) + + +class QEffVAE(QEFFBaseModel): + """ + Wrapper for Variational Autoencoder (VAE) models with ONNX export and QAIC compilation. + + This class handles VAE models with specific transformations and optimizations + for efficient inference on Qualcomm AI hardware. VAE models are used in diffusion + pipelines for encoding images to latent space and decoding latents back to images. + + Attributes: + model (nn.Module): The wrapped VAE model (deep copy of original) + type (str): VAE operation type ("encoder" or "decoder") + _pytorch_transforms (List): PyTorch transformations applied before ONNX export + _onnx_transforms (List): ONNX transformations applied after export + """ + + _pytorch_transforms = [CustomOpsTransform] + _onnx_transforms = [FP16ClipTransform, SplitTensorsTransform] + + @property + def get_model_config(self) -> Dict: + """ + Get the model configuration as a dictionary. + + Returns: + Dict: The configuration dictionary of the underlying VAE model + """ + return self.model.config.__dict__ + + def __init__(self, model: nn.Module, type: str) -> None: + """ + Initialize the VAE wrapper. + + Args: + model (nn.Module): The pipeline model containing the VAE + type (str): VAE operation type ("encoder" or "decoder") + """ + super().__init__(model) + self.model = model + + # To have different hashing for encoder/decoder + self.model.config["type"] = type + + def get_onnx_params(self, latent_height: int = 32, latent_width: int = 32) -> Tuple[Dict, Dict, List[str]]: + """ + Generate ONNX export configuration for the VAE decoder. + + Args: + latent_height (int): Height of latent representation (default: 32) + latent_width (int): Width of latent representation (default: 32) + + Returns: + Tuple containing: + - example_inputs (Dict): Sample inputs for ONNX export + - dynamic_axes (Dict): Specification of dynamic dimensions + - output_names (List[str]): Names of model outputs + """ + bs = constants.ONNX_EXPORT_EXAMPLE_BATCH_SIZE + + # VAE decoder takes latent representation as input + example_inputs = { + "latent_sample": torch.randn(bs, 16, latent_height, latent_width), + "return_dict": False, + } + + output_names = ["sample"] + + # All dimensions except channels can be dynamic + dynamic_axes = { + "latent_sample": {0: "batch_size", 1: "channels", 2: "latent_height", 3: "latent_width"}, + } + + return example_inputs, dynamic_axes, output_names + + def export( + self, + inputs: Dict, + output_names: List[str], + dynamic_axes: Dict, + export_dir: str = None, + export_kwargs: Dict = None, + ) -> str: + """ + Export the VAE model to ONNX format. + + Args: + inputs (Dict): Example inputs for ONNX export + output_names (List[str]): Names of model outputs + dynamic_axes (Dict): Specification of dynamic dimensions + export_dir (str, optional): Directory to save ONNX model + export_kwargs (Dict, optional): Additional export arguments + + Returns: + str: Path to the exported ONNX model + """ + return self._export( + example_inputs=inputs, + output_names=output_names, + dynamic_axes=dynamic_axes, + export_dir=export_dir, + export_kwargs=export_kwargs, + ) + + def compile(self, specializations: List[Dict], **compiler_options) -> None: + """ + Compile the ONNX model for Qualcomm AI hardware. + + Args: + specializations (List[Dict]): Model specialization configurations + **compiler_options: Additional compiler options + """ + self._compile(specializations=specializations, **compiler_options) + + +class QEffFluxTransformerModel(QEFFBaseModel): + """ + Wrapper for Flux Transformer2D models with ONNX export and QAIC compilation capabilities. + + This class handles Flux Transformer2D models with specific transformations and optimizations + for efficient inference on Qualcomm AI hardware. Flux uses a transformer-based diffusion + architecture instead of traditional UNet, with dual transformer blocks and adaptive layer + normalization (AdaLN) for conditioning. + + Attributes: + model (nn.Module): The wrapped Flux transformer model + _pytorch_transforms (List): PyTorch transformations applied before ONNX export + _onnx_transforms (List): ONNX transformations applied after export + """ + + _pytorch_transforms = [AttentionTransform, NormalizationTransform, CustomOpsTransform] + _onnx_transforms = [FP16ClipTransform, SplitTensorsTransform] + + @property + def get_model_config(self) -> Dict: + """ + Get the model configuration as a dictionary. + + Returns: + Dict: The configuration dictionary of the underlying Flux transformer model + """ + return self.model.config.__dict__ + + def __init__(self, model: nn.Module) -> None: + """ + Initialize the Flux transformer wrapper. + + Args: + model (nn.Module): The Flux transformer model to wrap + use_onnx_subfunctions (bool): Whether to export transformer blocks as ONNX functions + for better modularity and potential optimization + """ + super().__init__(model) + + def get_onnx_params( + self, + batch_size: int = constants.ONNX_EXPORT_EXAMPLE_BATCH_SIZE, + seq_length: int = constants.FLUX_ONNX_EXPORT_SEQ_LENGTH, + cl: int = constants.FLUX_ONNX_EXPORT_COMPRESSED_LATENT_DIM, + ) -> Tuple[Dict, Dict, List[str]]: + """ + Generate ONNX export configuration for the Flux transformer. + + Creates example inputs for all Flux-specific inputs including hidden states, + text embeddings, timestep conditioning, and AdaLN embeddings. + + Args: + batch_size (int): Batch size for example inputs (default: FLUX_ONNX_EXPORT_BATCH_SIZE) + seq_length (int): Text sequence length (default: FLUX_ONNX_EXPORT_SEQ_LENGTH) + cl (int): Compressed latent dimension (default: FLUX_ONNX_EXPORT_COMPRESSED_LATENT_DIM) + + Returns: + Tuple containing: + - example_inputs (Dict): Sample inputs for ONNX export + - dynamic_axes (Dict): Specification of dynamic dimensions + - output_names (List[str]): Names of model outputs + """ + example_inputs = { + # Latent representation of the image + "hidden_states": torch.randn(batch_size, cl, self.model.config.in_channels, dtype=torch.float32), + "encoder_hidden_states": torch.randn( + batch_size, seq_length, self.model.config.joint_attention_dim, dtype=torch.float32 + ), + "pooled_projections": torch.randn(batch_size, self.model.config.pooled_projection_dim, dtype=torch.float32), + "timestep": torch.tensor([1.0], dtype=torch.float32), + "img_ids": torch.randn(cl, 3, dtype=torch.float32), + "txt_ids": torch.randn(seq_length, 3, dtype=torch.float32), + # AdaLN embeddings for dual transformer blocks + # Shape: [num_layers, FLUX_ADALN_DUAL_BLOCK_CHUNKS, FLUX_ADALN_HIDDEN_DIM] + "adaln_emb": torch.randn( + self.model.config["num_layers"], + constants.FLUX_ADALN_DUAL_BLOCK_CHUNKS, + constants.FLUX_ADALN_HIDDEN_DIM, + dtype=torch.float32, + ), + # AdaLN embeddings for single transformer blocks + # Shape: [num_single_layers, FLUX_ADALN_SINGLE_BLOCK_CHUNKS, FLUX_ADALN_HIDDEN_DIM] + "adaln_single_emb": torch.randn( + self.model.config["num_single_layers"], + constants.FLUX_ADALN_SINGLE_BLOCK_CHUNKS, + constants.FLUX_ADALN_HIDDEN_DIM, + dtype=torch.float32, + ), + # Output AdaLN embedding + # Shape: [batch_size, FLUX_ADALN_OUTPUT_DIM] for final projection + "adaln_out": torch.randn(batch_size, constants.FLUX_ADALN_OUTPUT_DIM, dtype=torch.float32), + } + + output_names = ["output"] + + # Define dynamic dimensions for runtime flexibility + dynamic_axes = { + "hidden_states": {0: "batch_size", 1: "cl"}, + "encoder_hidden_states": {0: "batch_size", 1: "seq_len"}, + "pooled_projections": {0: "batch_size"}, + "timestep": {0: "steps"}, + "img_ids": {0: "cl"}, + } + + return example_inputs, dynamic_axes, output_names + + def export( + self, + inputs: Dict, + output_names: List[str], + dynamic_axes: Dict, + export_dir: str = None, + export_kwargs: Dict = None, + use_onnx_subfunctions: bool = False, + ) -> str: + """ + Export the Flux transformer model to ONNX format. + + Args: + inputs (Dict): Example inputs for ONNX export + output_names (List[str]): Names of model outputs + dynamic_axes (Dict): Specification of dynamic dimensions + export_dir (str, optional): Directory to save ONNX model + export_kwargs (Dict, optional): Additional export arguments (e.g., export_modules_as_functions) + + Returns: + str: Path to the exported ONNX model + """ + + if use_onnx_subfunctions: + export_kwargs = {"export_modules_as_functions": {QEffFluxTransformerBlock, QEffFluxSingleTransformerBlock}} + + # Sort _use_default_values in config to ensure consistent hash generation during export + self.model.config["_use_default_values"].sort() + + return self._export( + example_inputs=inputs, + output_names=output_names, + dynamic_axes=dynamic_axes, + export_dir=export_dir, + export_kwargs=export_kwargs, + offload_pt_weights=False, # As weights are needed with AdaLN changes + ) + + def compile(self, specializations: List[Dict], **compiler_options) -> None: + """ + Compile the ONNX model for Qualcomm AI hardware. + + Args: + specializations (List[Dict]): Model specialization configurations + **compiler_options: Additional compiler options (e.g., num_cores, aic_num_of_activations) + """ + self._compile(specializations=specializations, **compiler_options) diff --git a/QEfficient/diffusers/pipelines/pipeline_utils.py b/QEfficient/diffusers/pipelines/pipeline_utils.py new file mode 100644 index 000000000..24eb36f53 --- /dev/null +++ b/QEfficient/diffusers/pipelines/pipeline_utils.py @@ -0,0 +1,218 @@ +# ----------------------------------------------------------------------------- +# +# Copyright (c) Qualcomm Technologies, Inc. and/or its subsidiaries. +# SPDX-License-Identifier: BSD-3-Clause +# +# ---------------------------------------------------------------------------- + +import os +from concurrent.futures import ThreadPoolExecutor, as_completed +from dataclasses import dataclass +from typing import Any, Dict, List, Optional, Union + +import numpy as np +import PIL.Image +from tqdm import tqdm + +from QEfficient.utils._utils import load_json +from QEfficient.utils.logging_utils import logger + + +def calculate_compressed_latent_dimension(height: int, width: int, vae_scale_factor: int) -> int: + """ + Calculate the compressed latent dimension. + Args: + height (int): Target image height in pixels + width (int): Target image width in pixels + vae_scale_factor (int): VAE downsampling factor (typically 8 for Flux) + + Returns: + int: Compressed latent dimension (cl) for transformer input buffer allocation + """ + latent_height = height // vae_scale_factor + latent_width = width // vae_scale_factor + # cl = compressed latent dimension (divided by 4 for Flux's 2x2 packing) + cl = (latent_height * latent_width) // 4 + return cl, latent_height, latent_width + + +def config_manager(cls, config_source: Optional[str] = None): + """ + JSON-based compilation configuration manager for diffusion pipelines. + + Supports loading configuration from JSON files only. Automatically detects + model type and handles model-specific requirements. + Initialize the configuration manager. + + Args: + config_source: Path to JSON configuration file. If None, uses default config. + """ + if config_source is None: + config_source = cls.get_default_config_path() + + if not isinstance(config_source, str): + raise ValueError("config_source must be a path to JSON configuration file") + + # Direct use of load_json utility - no wrapper needed + if not os.path.exists(config_source): + raise FileNotFoundError(f"Configuration file not found: {config_source}") + + cls.custom_config = load_json(config_source) + + +def set_module_device_ids(cls): + """ + Set device IDs for each module based on the custom configuration. + + Iterates through all modules in the pipeline and assigns device IDs + from the configuration file to each module's device_ids attribute. + """ + config_modules = cls.custom_config["modules"] + for module_name, module_obj in cls.modules.items(): + module_obj.device_ids = config_modules[module_name]["execute"]["device_ids"] + + +def compile_modules_parallel( + modules: Dict[str, Any], + config: Dict[str, Any], + specialization_updates: Dict[str, Dict[str, Any]] = None, +) -> None: + """ + Compile multiple pipeline modules in parallel using ThreadPoolExecutor. + + Args: + modules: Dictionary of module_name -> module_object pairs to compile + config: Configuration dictionary containing module-specific compilation settings + specialization_updates: Optional dictionary of module_name -> specialization_updates + to apply dynamic values (e.g., image dimensions) + """ + + def _prepare_and_compile(module_name: str, module_obj: Any) -> None: + """Prepare specializations and compile a single module.""" + specializations = config["modules"][module_name]["specializations"].copy() + compile_kwargs = config["modules"][module_name]["compilation"] + + if specialization_updates and module_name in specialization_updates: + specializations.update(specialization_updates[module_name]) + + module_obj.compile(specializations=[specializations], **compile_kwargs) + + # Execute compilations in parallel + with ThreadPoolExecutor(max_workers=len(modules)) as executor: + futures = {executor.submit(_prepare_and_compile, name, obj): name for name, obj in modules.items()} + + with tqdm(total=len(futures), desc="Compiling modules", unit="module") as pbar: + for future in as_completed(futures): + try: + future.result() + except Exception as e: + logger.error(f"Compilation failed for {futures[future]}: {e}") + raise + pbar.update(1) + + +def compile_modules_sequential( + modules: Dict[str, Any], + config: Dict[str, Any], + specialization_updates: Dict[str, Dict[str, Any]] = None, +) -> None: + """ + Compile multiple pipeline modules sequentially. + + This function provides a generic way to compile diffusion pipeline modules + sequentially, which is the default behavior for backward compatibility. + + Args: + modules: Dictionary of module_name -> module_object pairs to compile + config: Configuration dictionary containing module-specific compilation settings + specialization_updates: Optional dictionary of module_name -> specialization_updates + to apply dynamic values (e.g., image dimensions) + + """ + for module_name, module_obj in tqdm(modules.items(), desc="Compiling modules", unit="module"): + module_config = config["modules"] + specializations = module_config[module_name]["specializations"].copy() + compile_kwargs = module_config[module_name]["compilation"] + + # Apply dynamic specialization updates if provided + if specialization_updates and module_name in specialization_updates: + specializations.update(specialization_updates[module_name]) + + # Compile the module to QPC format + module_obj.compile(specializations=[specializations], **compile_kwargs) + + +@dataclass(frozen=True) +class ModulePerf: + """ + Data class to store performance metrics for a pipeline module. + + Attributes: + module_name: Name of the pipeline module (e.g., 'text_encoder', 'transformer', 'vae_decoder') + perf: Performance metric in seconds. Can be a single float for modules that run once, + or a list of floats for modules that run multiple times (e.g., transformer steps) + """ + + module_name: str + perf: int + + +@dataclass(frozen=True) +class QEffPipelineOutput: + """ + Data class to store the output of a QEfficient diffusion pipeline. + + Attributes: + pipeline_module: List of ModulePerf objects containing performance metrics for each module + images: Generated images as either a list of PIL Images or numpy array + """ + + pipeline_module: list[ModulePerf] + images: Union[List[PIL.Image.Image], np.ndarray] + + def __repr__(self): + output_str = "=" * 60 + "\n" + output_str += "QEfficient Diffusers Pipeline Inference Report\n" + output_str += "=" * 60 + "\n\n" + + # Module-wise inference times + output_str += "Module-wise Inference Times:\n" + output_str += "-" * 60 + "\n" + + # Calculate E2E time while iterating + e2e_time = 0 + for module_perf in self.pipeline_module: + module_name = module_perf.module_name + inference_time = module_perf.perf + + # Add to E2E time + e2e_time += sum(inference_time) if isinstance(inference_time, list) else inference_time + + # Format module name for display + display_name = module_name.replace("_", " ").title() + + # Handle transformer specially as it has a list of times + if isinstance(inference_time, list) and len(inference_time) > 0: + total_time = sum(inference_time) + avg_time = total_time / len(inference_time) + output_str += f" {display_name:25s} {total_time:.4f} s\n" + output_str += f" - Total steps: {len(inference_time)}\n" + output_str += f" - Average per step: {avg_time:.4f} s\n" + output_str += f" - Min step time: {min(inference_time):.4f} s\n" + output_str += f" - Max step time: {max(inference_time):.4f} s\n" + else: + # Single inference time value + output_str += f" {display_name:25s} {inference_time:.4f} s\n" + + output_str += "-" * 60 + "\n\n" + + # Print E2E time after all modules + output_str += f"End-to-End Inference Time: {e2e_time:.4f} s\n\n" + output_str += "=" * 60 + "\n" + + return output_str + + +# List of module name that require special handling during export +# when use_onnx_subfunctions is enabled +ONNX_SUBFUNCTION_MODULE = ["transformer"] diff --git a/QEfficient/transformers/models/modeling_auto.py b/QEfficient/transformers/models/modeling_auto.py index 8edc1f3f0..16a809c96 100644 --- a/QEfficient/transformers/models/modeling_auto.py +++ b/QEfficient/transformers/models/modeling_auto.py @@ -124,21 +124,6 @@ def from_pretrained(cls, pretrained_model_name_or_path: str, *args, **kwargs): model = cls._hf_auto_class.from_pretrained(pretrained_model_name_or_path, *args, **kwargs) return cls(model, pretrained_model_name_or_path=pretrained_model_name_or_path) - @property - def model_name(self) -> str: - """ - Get the name of the underlying HuggingFace model. - - Returns - ------- - str - The model's class name, with "QEff" or "QEFF" prefix removed if present. - """ - mname = self.model.__class__.__name__ - if mname.startswith("QEff") or mname.startswith("QEFF"): - mname = mname[4:] - return mname - class MultimodalUtilityMixin: """ @@ -701,21 +686,6 @@ def compile( **compiler_options, ) - @property - def model_name(self) -> str: - """ - Get the name of the underlying vision encoder model. - - Returns - ------- - str - The model's class name, with "QEff" or "QEFF" prefix removed if present. - """ - mname = self.model.__class__.__name__ - if mname.startswith("QEff") or mname.startswith("QEFF"): - mname = mname[4:] - return mname - @property def get_model_config(self) -> dict: """ @@ -869,21 +839,6 @@ def compile( **compiler_options, ) - @property - def model_name(self) -> str: - """ - Get the name of the underlying language decoder model. - - Returns - ------- - str - The model's class name, with "QEff" or "QEFF" prefix removed if present. - """ - mname = self.model.__class__.__name__ - if mname.startswith("QEff") or mname.startswith("QEFF"): - mname = mname[4:] - return mname - @property def get_model_config(self) -> dict: """ @@ -946,21 +901,6 @@ def __init__( self.comp_ctx_lengths_prefill, self.comp_ctx_lengths_decode = None, None self.input_shapes, self.output_names = None, None - @property - def model_name(self) -> str: - """ - Get the name of the underlying multimodal model. - - Returns - ------- - str - The model's class name, with "QEff" or "QEFF" prefix removed if present. - """ - mname = self.model.__class__.__name__ - if mname.startswith("QEff") or mname.startswith("QEFF"): - mname = mname[4:] - return mname - @classmethod def from_pretrained(cls, pretrained_model_name_or_path: str, qaic_config: Optional[dict] = None, **kwargs): """ @@ -2131,21 +2071,6 @@ def cloud_ai_100_generate( ), ) - @property - def model_name(self) -> str: - """ - Get the name of the underlying multimodal model. - - Returns - ------- - str - The model's class name, with "QEff" or "QEFF" prefix removed if present. - """ - mname = self.model.__class__.__name__ - if mname.startswith("QEff") or mname.startswith("QEFF"): - mname = mname[4:] - return mname - @property def get_model_config(self) -> dict: """ @@ -2437,21 +2362,6 @@ def __init__( if self.model.qaic_config is not None and self.model.qaic_config.get("num_kv_blocks", None) is not None: BlockedKVAttentionTransform.apply(self.model, num_kv_blocks=self.model.qaic_config.get("num_kv_blocks")) - @property - def model_name(self) -> str: - """ - Get the name of the underlying Causal Language Model. - - Returns - ------- - str - The model's class name, with "QEff" or "QEFF" prefix removed if present. - """ - mname = self.model.__class__.__name__ - if mname.startswith("QEff") or mname.startswith("QEFF"): - mname = mname[4:] - return mname - def __repr__(self) -> str: return self.__class__.__name__ + "\n" + self.model.__repr__() diff --git a/QEfficient/transformers/models/pytorch_transforms.py b/QEfficient/transformers/models/pytorch_transforms.py index 21a867eb5..07b9fe7e1 100644 --- a/QEfficient/transformers/models/pytorch_transforms.py +++ b/QEfficient/transformers/models/pytorch_transforms.py @@ -197,6 +197,10 @@ Starcoder2ForCausalLM, Starcoder2Model, ) +from transformers.models.t5.modeling_t5 import ( + T5Attention, + T5LayerNorm, +) from transformers.models.whisper.modeling_whisper import ( WhisperAttention, WhisperDecoder, @@ -417,6 +421,10 @@ QEffStarcoder2ForCausalLM, QEffStarcoder2Model, ) +from QEfficient.transformers.models.t5.modeling_t5 import ( + QEffT5Attention, + QEffT5LayerNorm, +) from QEfficient.transformers.models.whisper.modeling_whisper import ( QEffWhisperAttention, QEffWhisperDecoder, @@ -808,6 +816,14 @@ class KVCacheExternalModuleMapperTransform(ExternalModuleMapperTransform): _match_class_replace_method = {} +class T5ModelTransform(ModuleMappingTransform): + # supported architectures + _module_mapping = { + T5Attention: QEffT5Attention, + T5LayerNorm: QEffT5LayerNorm, + } + + class PoolingTransform: """ Apply a pooling transformation to the model. This transformation appends a pooling layer to the model, allowing for the reduction of spatial dimensions in the output. diff --git a/QEfficient/transformers/models/t5/__init__.py b/QEfficient/transformers/models/t5/__init__.py new file mode 100644 index 000000000..75daf1953 --- /dev/null +++ b/QEfficient/transformers/models/t5/__init__.py @@ -0,0 +1,6 @@ +# ----------------------------------------------------------------------------- +# +# Copyright (c) Qualcomm Technologies, Inc. and/or its subsidiaries. +# SPDX-License-Identifier: BSD-3-Clause +# +# ---------------------------------------------------------------------------- diff --git a/QEfficient/transformers/models/t5/modeling_t5.py b/QEfficient/transformers/models/t5/modeling_t5.py new file mode 100644 index 000000000..f54201465 --- /dev/null +++ b/QEfficient/transformers/models/t5/modeling_t5.py @@ -0,0 +1,145 @@ +# ----------------------------------------------------------------------------- +# +# Copyright (c) Qualcomm Technologies, Inc. and/or its subsidiaries. +# SPDX-License-Identifier: BSD-3-Clause +# +# ----------------------------------------------------------------------------- + +import torch +import torch.nn as nn +from transformers import EncoderDecoderCache +from transformers.models.t5.modeling_t5 import ( + T5Attention, + T5LayerNorm, +) + + +class QEffT5LayerNorm(T5LayerNorm): + def forward(self, hidden_states): + # T5 uses a layer_norm which only scales and doesn't shift, which is also known as Root Mean + # Square Layer Normalization https://arxiv.org/abs/1910.07467 thus variance is calculated + # w/o mean and there is no bias. Additionally we want to make sure that the accumulation for + # half-precision inputs is done in fp32 + + div_first = hidden_states * torch.rsqrt(torch.tensor(hidden_states.shape[-1], dtype=torch.float32)) + variance = div_first.pow(2).sum(-1, keepdim=True) + hidden_states = hidden_states * torch.rsqrt(variance + self.variance_epsilon) + + # convert into half-precision if necessary + if self.weight.dtype in [torch.float16, torch.bfloat16]: + hidden_states = hidden_states.to(self.weight.dtype) + + return self.weight * hidden_states + + +class QEffT5Attention(T5Attention): + def forward( + self, + hidden_states, + mask=None, + key_value_states=None, + position_bias=None, + past_key_value=None, + layer_head_mask=None, + query_length=None, + use_cache=False, + output_attentions=False, + cache_position=None, + ): + """ + Self-attention (if key_value_states is None) or attention over source sentence (provided by key_value_states). + """ + # Input is (batch_size, seq_length, dim) + # Mask is (batch_size, 1, 1, key_length) (non-causal encoder) or (batch_size, 1, seq_length, key_length) (causal decoder) + batch_size, seq_length = hidden_states.shape[:2] + + # if key_value_states are provided this layer is used as a cross-attention layer for the decoder + is_cross_attention = key_value_states is not None + + query_states = self.q(hidden_states) + query_states = query_states.view(batch_size, -1, self.n_heads, self.key_value_proj_dim).transpose(1, 2) + + # Check is encoder-decoder model is being used. Otherwise we'll get `DynamicCache` + if past_key_value is not None and isinstance(past_key_value, EncoderDecoderCache): + is_updated = past_key_value.is_updated.get(self.layer_idx) + if is_cross_attention: + # after the first generated id, we can subsequently re-use all key/value_states from cache + curr_past_key_value = past_key_value.cross_attention_cache + else: + curr_past_key_value = past_key_value.self_attention_cache + else: + curr_past_key_value = past_key_value + + current_states = key_value_states if is_cross_attention else hidden_states + if is_cross_attention and past_key_value is not None and is_updated: + # reuse k,v, cross_attentions + key_states = curr_past_key_value.layers[self.layer_idx].keys + value_states = curr_past_key_value.layers[self.layer_idx].values + else: + key_states = self.k(current_states) + value_states = self.v(current_states) + key_states = key_states.view(batch_size, -1, self.n_heads, self.key_value_proj_dim).transpose(1, 2) + value_states = value_states.view(batch_size, -1, self.n_heads, self.key_value_proj_dim).transpose(1, 2) + + if past_key_value is not None: + # save all key/value_states to cache to be re-used for fast auto-regressive generation + cache_position = cache_position if not is_cross_attention else None + key_states, value_states = curr_past_key_value.update( + key_states, value_states, self.layer_idx, {"cache_position": cache_position} + ) + # set flag that curr layer for cross-attn is already updated so we can re-use in subsequent calls + if is_cross_attention: + past_key_value.is_updated[self.layer_idx] = True + + # compute scores, equivalent of torch.einsum("bnqd,bnkd->bnqk", query_states, key_states), compatible with onnx op>9 + scores = torch.matmul(query_states, key_states.transpose(3, 2)) + + if position_bias is None: + key_length = key_states.shape[-2] + # cache position is 0-indexed so we add 1 to get the real length of queries (aka with past) + real_seq_length = query_length if query_length is not None else cache_position[-1] + 1 + if not self.has_relative_attention_bias: + position_bias = torch.zeros( + (1, self.n_heads, seq_length, key_length), device=scores.device, dtype=scores.dtype + ) + if self.gradient_checkpointing and self.training: + position_bias.requires_grad = True + else: + position_bias = self.compute_bias( + real_seq_length, key_length, device=scores.device, cache_position=cache_position + ) + if past_key_value is not None: # This block is where the patch applies + position_bias = position_bias[:, :, -1:, :] # Added by patch + + if mask is not None: + causal_mask = mask[:, :, :, : key_states.shape[-2]] + position_bias = position_bias + causal_mask + + if self.pruned_heads: + mask = torch.ones(position_bias.shape[1]) + mask[list(self.pruned_heads)] = 0 + position_bias_masked = position_bias[:, mask.bool()] + else: + position_bias_masked = position_bias + + scores += position_bias_masked + + # (batch_size, n_heads, seq_length, key_length) + attn_weights = nn.functional.softmax(scores.float(), dim=-1).type_as(scores) + attn_weights = nn.functional.dropout(attn_weights, p=self.dropout, training=self.training) + + # Mask heads if we want to + if layer_head_mask is not None: + attn_weights = attn_weights * layer_head_mask + + attn_output = torch.matmul(attn_weights, value_states) + + attn_output = attn_output.transpose(1, 2).contiguous() + attn_output = attn_output.view(batch_size, -1, self.inner_dim) + attn_output = self.o(attn_output) + + outputs = (attn_output, position_bias) + + if output_attentions: + outputs = outputs + (attn_weights,) + return outputs diff --git a/QEfficient/utils/__init__.py b/QEfficient/utils/__init__.py index 49f0ad30b..3d6583f85 100755 --- a/QEfficient/utils/__init__.py +++ b/QEfficient/utils/__init__.py @@ -16,7 +16,6 @@ create_model_params, custom_format_warning, dump_qconfig, - export_wrapper, generate_mdp_partition_config, get_num_layers_from_config, get_num_layers_vlm, diff --git a/QEfficient/utils/_utils.py b/QEfficient/utils/_utils.py index 131a7fc26..26bae7a34 100644 --- a/QEfficient/utils/_utils.py +++ b/QEfficient/utils/_utils.py @@ -12,7 +12,6 @@ import subprocess import xml.etree.ElementTree as ET from dataclasses import dataclass -from pathlib import Path from typing import Any, Dict, List, Optional, Tuple, Union import requests @@ -27,9 +26,8 @@ PreTrainedTokenizerFast, ) -from QEfficient.utils.cache import QEFF_HOME from QEfficient.utils.constants import KWARGS_INCLUSION_LIST, QEFF_MODELS_DIR, Constants, QnnConstants -from QEfficient.utils.hash_utils import create_export_hash, json_serializable +from QEfficient.utils.hash_utils import json_serializable from QEfficient.utils.logging_utils import logger @@ -532,61 +530,11 @@ def create_model_params(qeff_model, **kwargs) -> Dict: """ model_params = copy.deepcopy(kwargs) model_params = {k: v for k, v in model_params.items() if k in KWARGS_INCLUSION_LIST} - model_params["config"] = qeff_model.model.config.to_diff_dict() model_params["peft_config"] = getattr(qeff_model.model, "active_peft_config", None) model_params["applied_transform_names"] = qeff_model._transform_names() return model_params -def export_wrapper(func): - def wrapper(self, *args, **kwargs): - export_dir = kwargs.get("export_dir", None) - parent_dir = self.model_architecture or self.model_name - export_dir = Path(export_dir or (QEFF_HOME / parent_dir / self.model_name)) - - # PREPROCESSING OF PARAMETERS - - # Get the original signature - original_sig = inspect.signature(func) - - # Remove 'self' from parameters - params = list(original_sig.parameters.values())[1:] # skip 'self' - new_sig = inspect.Signature(params) - - # Bind args and kwargs to the new signature - bound_args = new_sig.bind(*args, **kwargs) - bound_args.apply_defaults() - - # Get arguments as a dictionary - all_args = bound_args.arguments - - export_hash, filtered_hash_params = create_export_hash( - model_params=self.hash_params, - output_names=all_args.get("output_names"), - dynamic_axes=all_args.get("dynamic_axes"), - export_kwargs=all_args.get("export_kwargs", None), - onnx_transform_kwargs=all_args.get("onnx_transform_kwargs", None), - use_onnx_subfunctions=all_args.get("use_onnx_subfunctions", False), - ) - - export_dir = export_dir.with_name(export_dir.name + "-" + export_hash) - kwargs["export_dir"] = export_dir - self.export_hash = export_hash - - # _EXPORT CALL - onnx_path = func(self, *args, **kwargs) - - # POST-PROCESSING - # Dump JSON file with hashed parameters - hashed_params_export_path = export_dir / "hashed_export_params.json" - create_json(hashed_params_export_path, filtered_hash_params) - logger.info("Hashed parameters exported successfully.") - - return onnx_path - - return wrapper - - def execute_command(process: str, command: str, output_file_path: Optional[str] = None): """ Executes the give command using subprocess. diff --git a/QEfficient/utils/constants.py b/QEfficient/utils/constants.py index e0b003422..613d7049a 100644 --- a/QEfficient/utils/constants.py +++ b/QEfficient/utils/constants.py @@ -144,6 +144,13 @@ def get_models_dir(): # Molmo Constants MOLMO_IMAGE_HEIGHT = 536 MOLMO_IMAGE_WIDTH = 354 +# Flux Transformer Constants +FLUX_ONNX_EXPORT_SEQ_LENGTH = 256 +FLUX_ONNX_EXPORT_COMPRESSED_LATENT_DIM = 4096 +FLUX_ADALN_HIDDEN_DIM = 3072 +FLUX_ADALN_DUAL_BLOCK_CHUNKS = 12 # 6 chunks for norm1 + 6 chunks for norm1_context +FLUX_ADALN_SINGLE_BLOCK_CHUNKS = 3 +FLUX_ADALN_OUTPUT_DIM = 6144 # 2 * FLUX_ADALN_HIDDEN_DIM class Constants: diff --git a/QEfficient/utils/export_utils.py b/QEfficient/utils/export_utils.py new file mode 100644 index 000000000..eea92a490 --- /dev/null +++ b/QEfficient/utils/export_utils.py @@ -0,0 +1,235 @@ +# ----------------------------------------------------------------------------- +# +# Copyright (c) Qualcomm Technologies, Inc. and/or its subsidiaries. +# SPDX-License-Identifier: BSD-3-Clause +# +# ----------------------------------------------------------------------------- + +import inspect +import re +import warnings +from pathlib import Path +from typing import Dict + +from QEfficient.base.onnx_transforms import CustomOpTransform, RenameFunctionOutputsTransform +from QEfficient.transformers.cache_utils import InvalidIndexProvider +from QEfficient.transformers.models.pytorch_transforms import get_decoder_layer_classes_for_export +from QEfficient.utils.cache import QEFF_HOME +from QEfficient.utils.hash_utils import create_export_hash +from QEfficient.utils.logging_utils import logger +from QEfficient.utils.torch_patches import apply_torch_patches, undo_torch_patches + + +def export_wrapper(func): + """ + Decorator for export methods that orchestrates the complete export lifecycle. + + Responsibilities: + 1. Prepare export directory structure + 2. Generate reproducible hash for export configuration + 3. Setup ONNX subfunction environment (if enabled) + 4. Execute the wrapped export function + 5. Cleanup subfunction environment (if enabled) + 6. Save export metadata + + Args: + func: The export method to wrap (typically _export) + + Returns: + Wrapped function with complete export lifecycle management + """ + + def wrapper(self, *args, **kwargs): + # 1. Prepare export directory + export_dir = _prepare_export_directory(self, kwargs) + + # 2. Generate hash and finalize export directory path + export_hash, filtered_hash_params = _generate_export_hash(self, args, kwargs, func) + export_dir = export_dir.with_name(export_dir.name + "-" + export_hash) + kwargs["export_dir"] = export_dir + self.export_hash = export_hash + + # 3. Setup ONNX subfunctions if requested + # TODO: No need of this variable, if export_kwargs contains classes (refer diffusers) + if use_onnx_subfunctions := kwargs.get("use_onnx_subfunctions", False): + _setup_onnx_subfunctions(self, kwargs) + + # 4. Execute the actual export + onnx_path = func(self, *args, **kwargs) + + # 5. Save export metadata + _save_export_metadata(export_dir, filtered_hash_params) + + # 6. Always cleanup subfunctions if they were setup + if use_onnx_subfunctions: + _cleanup_onnx_subfunctions(self) + + return onnx_path + + return wrapper + + +def _prepare_export_directory(qeff_model, kwargs) -> Path: + """ + Prepare and return the base export directory path. + + Args: + qeff_model: The QEff model instance + kwargs: Keyword arguments containing optional export_dir + + Returns: + Path object for the base export directory + """ + export_dir = kwargs.get("export_dir", None) + parent_dir = qeff_model.model_architecture or qeff_model.model_name + return Path(export_dir or (QEFF_HOME / parent_dir / qeff_model.model_name)) + + +def _generate_export_hash(qeff_model, args, kwargs, func): + """ + Generate export hash from model parameters and export arguments. + + The hash ensures reproducibility and prevents conflicts between + different export configurations. + + Args: + qeff_model: The QEff model instance + args: Positional arguments to the export function + kwargs: Keyword arguments to the export function + func: The export function being wrapped + + Returns: + Tuple of (export_hash: str, filtered_hash_params: dict) + """ + # Extract use_onnx_subfunctions before binding (it's used by wrapper, not _export) + use_onnx_subfunctions = kwargs.pop("use_onnx_subfunctions", False) + + # Extract function signature + original_sig = inspect.signature(func) + params = list(original_sig.parameters.values())[1:] # Skip 'self' + new_sig = inspect.Signature(params) + # Bind all arguments + bound_args = new_sig.bind(*args, **kwargs) + bound_args.apply_defaults() + all_args = bound_args.arguments + + # Use the model's current configuration for hashing to ensure any post-load modifications are captured + # TODO: Replace with get_model_config property of modeling classes and remove the if-else + # Determine the config dict to use, preferring .to_diff_dict() if available + + if hasattr(qeff_model.model, "config") and hasattr(qeff_model.model.config, "to_diff_dict"): + config_val = qeff_model.model.config.to_diff_dict() + elif hasattr(qeff_model.model, "model") and hasattr(qeff_model.model.model.config, "to_diff_dict"): + config_val = qeff_model.model.model.config.to_diff_dict() + else: + config_val = qeff_model.model.config + + qeff_model.hash_params.update( + { + "config": config_val, + } + ) + + # Generate hash from relevant parameters + export_hash, filtered_hash_params = create_export_hash( + model_params=qeff_model.hash_params, + output_names=all_args.get("output_names"), + dynamic_axes=all_args.get("dynamic_axes"), + export_kwargs=all_args.get("export_kwargs", None), + onnx_transform_kwargs=all_args.get("onnx_transform_kwargs", None), + use_onnx_subfunctions=use_onnx_subfunctions, + ) + + return export_hash, filtered_hash_params + + +def _setup_onnx_subfunctions(qeff_model, kwargs): + """ + Setup ONNX subfunction export environment. + + This function prepares the model and environment for exporting with + ONNX subfunctions enabled. It: + - Applies necessary torch patches + - Modifies output names for subfunction compatibility + - Adds subfunction-specific ONNX transforms + - Updates export kwargs with module classes + + Args: + qeff_model: The QEff model instance + kwargs: Export keyword arguments (modified in-place). + """ + warnings.warn( + "The subfunction feature is experimental. Please note that using compile " + "consecutively with and without subfunction may produce inconsistent results." + ) + + # Apply torch patches for subfunction support + apply_torch_patches() + InvalidIndexProvider.SUBFUNC_ENABLED = True + + # Store original state for restoration during cleanup + qeff_model._original_onnx_transforms = qeff_model._onnx_transforms.copy() + + # Transform output names for subfunction compatibility + if "output_names" in kwargs: + kwargs["output_names"] = [ + re.sub("_RetainedState", "_InternalRetainedState", name) for name in kwargs["output_names"] + ] + + # Add subfunction-specific ONNX transforms + qeff_model._onnx_transforms.append(RenameFunctionOutputsTransform) + qeff_model._onnx_transforms.append(CustomOpTransform) + + # Configure export to use modules as functions + export_kwargs = kwargs.get("export_kwargs", {}) + + # TODO: Handle this in the modelling class QEFFTransformersBase,remove from here. Refer diffusers implementation + export_kwargs["export_modules_as_functions"] = get_decoder_layer_classes_for_export(qeff_model.model) + kwargs["export_kwargs"] = export_kwargs + + +def _cleanup_onnx_subfunctions(qeff_model): + """ + Cleanup ONNX subfunction export environment. + + Restores the model and environment to pre-subfunction state by: + - Undoing torch patches + - Resetting InvalidIndexProvider flag + - Restoring original ONNX transforms list + + Args: + qeff_model: The QEff model instance + + Note: + This function is called in a finally block to ensure cleanup + even if export fails. Errors during cleanup are logged but + not re-raised to avoid masking the original exception. + """ + try: + # Undo torch patches + undo_torch_patches() + InvalidIndexProvider.SUBFUNC_ENABLED = False + + # Restore original ONNX transforms + if hasattr(qeff_model, "_original_onnx_transforms"): + qeff_model._onnx_transforms = qeff_model._original_onnx_transforms + delattr(qeff_model, "_original_onnx_transforms") + + except Exception as e: + logger.error(f"Error during subfunction cleanup: {e}") + + +def _save_export_metadata(export_dir: Path, filtered_hash_params: Dict): + """ + Save export metadata to JSON file for reproducibility. + + Args: + export_dir: Directory where the export was saved + filtered_hash_params: Dictionary of parameters used for hashing + """ + # Import here to avoid circular dependency + from QEfficient.utils._utils import create_json + + hashed_params_path = export_dir / "hashed_export_params.json" + create_json(hashed_params_path, filtered_hash_params) + logger.info("Hashed parameters exported successfully.") diff --git a/QEfficient/utils/hash_utils.py b/QEfficient/utils/hash_utils.py index 948b72e6a..68ccab0d4 100644 --- a/QEfficient/utils/hash_utils.py +++ b/QEfficient/utils/hash_utils.py @@ -14,7 +14,8 @@ def json_serializable(obj): if isinstance(obj, set): - return sorted(obj) + # Convert set to a sorted list of strings for consistent hashing + return sorted([cls.__name__ if isinstance(cls, type) else str(cls) for cls in obj]) raise TypeError(f"Object of type {obj.__class__.__name__} is not JSON serializable") diff --git a/docs/image/girl_laughing.png b/docs/image/girl_laughing.png new file mode 100644 index 0000000000000000000000000000000000000000..9e58da61dfdc75588c86569fc769291a3c65d964 GIT binary patch literal 430404 zcmV)7K*zs{P)%;Km+LMeXG6d)vLE}xo6((tl_R7;U01BYbv`s-_3i^ zJtr3T&p%%x0{_`R{f}}e$XG-`0BZ~}8-oDI0Elb=fdKD`#50R%8$3|asLWPP6u zAo}kzF!&$+7XT4~8PUjJ;mvTR{K3EoVC36K;bsWPgbc{cfc|3w0AK<{1j7J;27HqN zm>mNFFd`rr06;Xr>@NXe1Z@Q%fc(Ww=${5)KwfG)*78GtQ-H8!Ib022g@fCP?QgUDWZ2*9;%nxxEt zXo!fJh+1PU$Y662MFvE)ewbC#$j=pve3JBz7{QObA0@tL=q1p{bNEj#0&&zz#0Pp zY*P=)-iR_Y05W=bSJR;uk${nXcM**xA|OUYKm+7Qh6G|+1kn$!K`|Sw=)pp>WWLn; z1|U$;=-6E^F!}@)i6c5HN3Lu`Bip4xZlm?xQvbRAD{ev)%?Edai>CGZ-;Pz)SvQ zVzs9XXgqo2IMe~Y7(uL;9~kyc@`{WIZ1h=Py;v+g1)$^578#CC!@DFPa0m`ugn@PH zgF=DV41XQ!g&so@m;`l@fq)RgcSTeH0SFi!WSFF!Fee5P&>#|`(b*$3#*a4|3=pou~e6bYmB=%B=Z0mB?egan-{g%i)-83C;-28r476X}hX zYM=oFN|6j205E95s8T5&cZNw@$cWzV$*q(Pur^l7xDG><`|jOT(m~N4;^R;$1BP+H z4%JZ$<*kF>91R);cT}U~0lV)szhL#eU!jb(nd%YHod zc_z38;-KhIh)D!QV}wE=FcUBsgUpDj1pp#$TXL;42uYTRPle9d1TjUEq)FEAZxV6a zv}LteET@2wB#0&j;HoMOrp9uXrwrC6wr!Y!(OLjbOhQc5Iz#{_U zRKm;z%*158>1JWQXsj1)W00ANReFVMsWwDMv!m=R;vNPE1pR_w$iQ_NQk1Pgp;XKf zye01_l=MMVsoMaN2_bANG90;B=XvM_481?B@aSy-FaZcz>^jE!f}%Sb7@^ud?L&7Y z!kvNHvutJ%8mdF;#|JyTG0cp}2CR-C2v)7a4SAYkSioNxp+Nvr;gyG@A5f9%@Ckzr zd>7GR@Y(}g=h!Dz98e4lI*hEwKrC~xiV8}8;Mv1hlq~VJh=GV?IEZ80i?`SG`?ncq30|KcM>n(!oiilv0Fg-cey{y`K)h_F%9OO9y)^(Mp#+uY% zn&oNJlvP~;x7Jz!AG$HbM9z6=PEreitj3<0O(X%x#6$!Zfx)tgj%}EL$YRh~6S!&! zl*nh;kzOy&CVUp>Hlm5TT7nRMenPasc)H0LsSzRq8}(y!Qhbn4^$Wx;2ow$FJ{?#J zGz3ostdWCs6(HAk=N>ykZ=Bv|^C@&EZ zSLRl_Cwy77R{==K1_#0^!pY?24FEm$B0&!l_!cf4$btA467BF&^28)yC=wMgh>Aqa zV0>U&KlQ-})E^GgtVkHIx$c!l!00L^T5=3*U*~=#Y3+ z6C<;Wfv`oBEYuIc2l-`YjLyFa@*T8A4TsKOSD-_Sb(&RLItEgMVr*g^yTI6ik=RE-iBY+j z%nky2XfrXqN*WCU21F(@28}h~s3VgQEV{s(W;XEGFhQq|Rj^kWe>D6U2tRk;l~|8n zE%V7LNU=Aya-mtF#37cf%N1qMpC5V>SM_G2IAIHH*xR~yh+!{ zOc*Jd20>#XTVaqw%sP%e`pASdL$zGd5`lr30Wh(Khyj5l;Gr2)D;mn$R?wK>$}FQ{ zqQoE>rD!193a?n9W}*euVaL4j3si2Cb0=K5&@Aj)*2i969a(8m^4ojFtJJ9REQ=^k|wF0 zYugrxTWb*!jIod+fCbE|vT6a)LY@pUb!Ab`XT@?^JLaZovm`MvXxrAEn-9*p?=R7VV2m=r!3%-v^zruY{X?w)aj!cT-iK7yfLD2w| zHfwMwpeTgEvQl*xlck{QsVk!ILKvSQ|4}8oqJF}=bKTpdefM`71 z^DY3UP}6~k%!CG+)sTSLI?Y#UEC`W}NkUW?Kt27~0D*ZQefB>lV6(iT)2LnNm8^vS z%m7GqQp7@FFL)}@(F4UI6>f+(inEG{%$f`pX_6r_VbD89qf->q;Z4B_k0d(y2;8+y zoi>Fhba+#=&#T@+1nD>&9V-w_*%OKtT7pWYuww^1_C>?YAT8_I%s2#bSgkSSd_ES$ z5dgJi0SBVD)iKPPMpuN9W_Dt}k*IXSmqrGU(K+&>?3me+NfI=c8JeaotE#FhU`&(L z80$+MM#HhS1_6!5K{9Zy1GIHhv~^QdO+$sXY2Htafh4ntP*={mc03wdV~oMX*epqV znQN-5ESCMj(Ava+t*WXi8?@vTy~ZGdL9_^oPZLXS%|n9xq5p$WYQap79N8o>Hc8XW zxt4*9wPM$OvPfhij!a;VD-1z|7doW3Bm4pV!>z;uE&2w(6Gz8E1{wvO=CGCaC@D>W$*I zh;Gmp8THU0)&e#}q09m&VSs~Y#S#?sief~?6(TzmfM|ScrQRkoDpeBb1bsV<6G6d{ zK`&v56&w1eRoGSlF{lV6Sz?sxM_&!aMoc;Zdnymi=xb7Rc-U7oKw_A&DUXhKwk*cga5FUSS2zNN{36;l}p)DnwePvV4W=M9>bj{AC^$P1$MqsYD7#elM! zphn0+mg1GlTp}5f0a77Vmi}wkcx4FzF8@JP>oEU8L^Xva#$Sa#^j$UT34`~9DGfRO zfD4f#$&__RTWuL=%?Y!1Dv+#B^@X-IVmdJR)L0m5{RAQ2-wQpm0pqh$zI)`zON%e; zwAQw5i-5*j0xpZ9EGhyp#`Xt;EXjd`Z z)YGC^04IY%9|7CCF>JDIQY_0}e>@nhmsL4m%(KKy);8;=JUct-_Xq3a&Cy`3s20oO zylu%Mn8W}ZM8oXMCt~g-*jt}k)cH~^V`jjrt`S|5qzKqHZJJsHAaceCGl|o}@gJR7 zMJ0(-sysxs!T1fGAXFbhrENvVI`S7Shw6VM;6buLm9^8~t~HPsf@mtG3P7=;;npJi z5DkwK27iA^6?9t?2v=+opyERZ7ZCy20Qvy@%Gf?JqP#k6Iut&&!v5PBJnEIcgBl6; zK^UKEYOE{E|u+gCd(^NVu7_0U!dgA5jy&;O`Y`uM8SxDnd3m zrr}V00#J_NQA^x1!oT{H|4`E@K^(k=MfRmW!5IKVpVJfJ5UUUoj{}X-9~O2g6>1`k z>dcD60>GitQb$v1K0NV*ATbw2h*%2BPn8^L(oEir6Rgct3l}B|F-~fCK0A}GeNr~U zky^<6GQxfyyzm6=P)zVx95l4s(~gY^pDDgSDeZu(Lad@=m?Q27)nZ~}5DXF#g3O1n zwu~O=3X*6VzdHh9nBqX8JjvI9`O+jIk?2z%00!9-J4Y@}GC)`^%et-8H1G8XNoIkO zTx-A*xwdiET5F84hKLO#qG{Wf7>c4sOxMOc)~5L|T`o=+^Yf})rb#-UOc2qv*tRVL z=lxNZ_ZIW{VtIa;m?XiLsx}83*%jen_=NMBxeN^S@)P!(!v~{IPOf*N+;E`AJYbthr`da6FzkKC(fa@AZ1aJ_9({ih>d)HbGz_0AOPb zN*RGglv0bTs+n-uTVn!~CO{6D7K>?1ji)-J(YiIsd{Nk>m!t_82Cmv>Q4~|>T0ldL zwR6VW^|iI$AU{1hDav`4r-?PdOb!sOS1XMK`HZfZbAeseLK$a7c95o-wYDhc))-?% z7oy7`EKK2DQiUSTs7%NcE_L`iQ33@Bh-1&Fp}%NtBBlT{vC;|+UU5gXqx!{%Kv24n zoE53%kn`+9e^=h3AIzaJ2>?k}2*~@AbwyyJAmcv#(7|1Rfax7@4tlS{VwB!=AUclG z%6Nr8ImY`#Sx5lwK!#R+{8QxpsPdA$k+BU{Nl17A_zt5|oY6KYD#r1NEHGe>4lt@c zmTmx=Kt9ROwOCsAGynjYk<_^-QFrOHF5F-yPL%)2!=$WRF(pQRK**lpzCtmEW`kaX zDC_>Z^TSH4bVxfB7b*w*jrAXe^A0$IMq|Q-F-;^ei(xEL$+iEg2|%lN_g#(oBkf3l zYxE17Rk$!{0>mrDY>JO65k|2iu^}owhRHHnqT806E7<`0~`jbD2 z!9>Xx50}Gh?VKhjgG&&NZ)iqvvBb>A^}rw2dpv$~jtJ-$aCBSpt~xWX)Pb z%;X4JmycKrqE`k0$vJB?U?wIDrfr-t#sDNq0#wyyT}(@47>_2K>stgco6d<{Sro3V zOp-EjT^W-YYdJAlKc6^aKscEm5unZT>2lFBPbS+8*fb?DTk99-vUqR)Wk>TB!7(g% z@pj9PTvgZW8*8mg$vKa1PdIa|Ivr_&3UW{o{T<)bFR!@5G3u!FHhh&;DFS>-e1=I& zm^$GK=-e`r58!S~P(&+&KKh&_gnO<0;L1%^R&{ZfcdCM6l7UqLgZQH3;zYX?K`LS; z0GRFrBSHpGtlKQn&G1su8#t_Q2zT;!7Zuq$UF(!)crz;`DGpgDG!*nIb|ToH5ax@K z9T3ro&-Jb}VCTybcNqY|5)&9^PoMm%wysqccWi}K$gCSkeEbMxWf15rs~Hf!9|sfx z&=^$ug9{@tQqmub0en+K05{mNfDYb% z8|AS%F(`G$1l-g$BKpF7n^-i+$VB9t zCQUPgX0a#|lV<(=?Cg}lS&L~hFxabWAi_Kywyq>>5XhLu7YDwUu1N$6q6@@fDI9jzp&w|!qPNK4tI`Os7YL5P+{HK@G{Z{pRtx-^z|h;+JrFgPCO|L> z%*Oh)uD%{rWBF1iEzrjTYV508iG_NT37~@H?`@1yZw`TX53Pw01gkF==zoldV1W5R ztJO*y<>`_s@Q@@kuO1L9)XM;O3>`oeEDc>mn;oK`UD#c5S_9Z4$fBaib68s()|P}T zRf1w;Pz^@pCKx_2k(&dk(8Tt~sSJE4T*D9|!om22?gt6*Hxn85uBb$2LQ&o7536yG zY%sLVzd|6VgTaV~*a3laEm%V&H&DFyQo*s=D#~H|iD;^W#yMgGjtPz5#la$40B9YR z%c5;)Jl^Q{@}_Y`StK?w7R$1*F7-ff5WzrVlDevmu}PY`)+K4?=dERORgok%>7{j9 zEf-b4*YEd6#iFn_8T1B&;SeaR%C@M>;b>@$!PYux%Xvi=^?F&B_X&)1TogsWKk4@d zOnh`Y&w4#F?P5Ly*Vbs9gPpAlXJ?0HxyZ7_S|)0}Ad+){#7xX={aR%(AbUvx0unQT zVRoJpx~55N;-L2IVa2}-M2XOExRwW|R0E*bJK^MJQ z2c815hjs>lW!5|^L_bB1%nD|!5Tmy*fqq1P6hl)bJnb0?%KMR8uMqaA9?^LYd&77@ z)MOv56o4oeqt>T&jG4BWqaQDxjsM-LwF{2gN_!M5bVfMF%H%d7;0GG;0-u*r``*M8+LLL`F|0{;1+cp7FO;U#m=(HL;7YOvJD6j_P#ivgIV@wl6|AWmGSob3?aUZi9 zlkzFeIE&$1Pk<6 zQPb(@$fDYqXsu(*fymPw5ueT|vUhrj*wu|O7QwVF6=lKXvNRu01`N%zteFiWI&y8@ zQcFpO1`rX6ks62AC28W?n#dYsoFg=b*b!6fTAG)Sx zM}%%MovyEKo&mJ3vc@o5A_8PINDPSHGXPBV|%>D8Vv6TpV)ugWlJ6A zCX)in+?B7XZ;ndMWL*LR`kZ0B5<7*mL2Gp`5u;U(okZb;q;e9bKw!{c$hyvkRlPwF zPpEiYiws^W8Nv8Eez9_1!o2@RJu-!0r^LJ5JD-dlL(6A9bYfxPNVqh%Y`UTzFj zWtC?=VrpH}@AsK_u_)OIE?JuZ0~$jJt#f`OIh!O78Zf4x<*jS5u~k(iX@75Lm)dr| zoDcFrTUV~7#o~;S2ZM24FB!=gliH+i>c%xTF{9z8!L4GkSk4RRw`rcG30H0D7ELct zcei$?XH#R_B+F-uWz&?-HbuD{jz{g1ilWSVnK8udh+JY5=bSMKvm+wiWWk7IqKyo`J~|PqFd>-}?75#EQ26wB7Bq`b*X(7G z8sAuTYFVI!HD(#F7o3k!B{oU!5+I#J8=r6;v?Um%s-q}+XI|CXAc`{Vz9AU+O88m9 zt{~%`Fpi>sgzk@a`Ujtu4r@dDsjgYHdP{1u}^taiR*62nY|D}v34(*l694mt3Y z&d`De3KI;5&o z9Ydlw^^Ia+@_T>y9gqWG(MS8eS%Q-k=QB9)-d+NbFUWANW#%+Z>ZYAer~Te2%QFBv zKRfmNqqQw1iDkyNY5+_x&%MAi5fM3(oeCXs)6_Px%!@Qh(>!bHrq}DIHc5xWJjtv{ zT-)SHPL8vrUsKZ#t2V7V~C(V`sEBvFUWNICDfcONaeI zQIzxfv?+_pc&o1FdD2fNc@2xYsSwQKe9;;8ceOR)_MX?49M2U(-<5hAzNmH zeos+lXGG*x)RFTc8`P4vRiRe3K*e!Y0t|ua$O}T?fQ5{#kY><|RY61{+TE%1%$vJP z#*7AtG%cjG*K55BmX87%+2~E>TX|;+zgUG@xxd#lX*-K^DWse_I1-{& zif*BDCh#0o3`ZqAS|b<9Vi-`{viu^Z1yaNy*=|EX`1#jTp zLSb~D;*b=OA?#9pHG`o;%1weqh}8>&+_C=pwN4b?NH#+Fd58FSpkHlySUO>1V^Eb) zur-8Vl?FhxC}H}QLBJDaL!arVs#~QHUjGkJfY{s6O&te}*ZC&~R8Wt~%ZI8O0)j82 z4fZ0slXs#46GN|r>*H}a)7asetD(kvw z^IqR3wy7I--0StLsx^lF`q=)UXBf+}F$N48U$AD30S0SrV$-H=Y|LUx1YpvopnwrZZ7KR-bf5?r$PpDfSm+5n}58nzF58U;-Dve)v3 zAYh9AthyXEZMumeB@7cGKHrl>ZGe@X0AxH^a<;Mh@f>FY{3ibIf5rCtWar%${S z<=>9XQ;|9H9MmR8_7{T$QL8ZMQulsgUk>p5{|LB+DwB>MAT$aC)zceCl!~48o8>5P z)6Y7*vtvOJFvPy1iuut%KbvCQeSesJAqe|T>mVc{0a(i9xU(lEhEB;N#eq}V=#T~v z4%qD-K%)2=Y`hK~c4yu9<_QUi^~m}lj#L7Kwg9jfS(XZGX;v5ubuU(wWknG5;jCQS zKP;Lgz>c|wIJ+V<((H~^1Avv>#R@&sY4(Ok5wL5fq-Lz^!1)J-*ouAwL-DIxVrFkX zVgyHT%pFw>=1%RkM-1Kg6cLa}cCFEvCU@`THO?sOBIdej(j=|w5`;~LNU^k5JqF2> zF(BwZsUrWetQ(RPiKuPc;b_p*H85vsj%bWUGzqm$Q#V9Z78SK*O#*-|6QD`b%$g)2 zf6@!2Y1X!FKkqx|T56LdA%{)TtdG`)`CvXP08HCdP2IL_ZIfh}8*6NCh9>K+_b1C` z#b6GPj)5&PCn*)vS(YVPYRhFY==XNEw;n!z&@L-9W;`A*Z91Es)meW$T5DYm4d-dT zu{Ozi!^&ad=Bdegqd~9NpH5HC&W?h7e}61O2Go6qLlot(6)}IE7Hg?X0T!xOq1xoD*IienL~ zuoFB0-SBvY7GsQDhnsZlu7;)okl889AR41hHRPElM|Ks}5z$9}KtpMf9(G+ih7dFG za7awX@1iEmUQ5X0s*+9|NewfC7A3~8vrq>Fzf-U*A47nIWh+oWDP&|12N^i_r64Mx z>Imst5B9`W%SA%B)N^I6Nr+xg>}{}5BzPX+*&D$w1Biwlf^cY)Ai8pD20Ah*QjKb{ zq9u`MB8Cpf?{cgJ6|*URU?6uOG)$*ZFN>DfLwzUX9nl}uoyU+O3?>wXxoedzNw~o? z4^haDbZYcj~iDTj0`*(pwz{k+G385xGQ*B=_D`E2SKs;bUX91imN ze37L#@8$DFS+|rX1LUNxt8!81z1-N%v(stbbHl-CHlI;hHZ7$@YwN?}_|%xRTGoze zJXlX`vRIx^&&~(Kp)uAGA$Vv<6)Q&L_whwwbZyH9oO6DgBQF&!@F7zr64`MK6;`<; z*h=P^ z0CWo9!zw2^3(Z+Kp zVZmXN1j<#&gRbgyMpGEvB+!$c=!Qd*I(S>Xs1K zRh8$xrmh_kGZ|x?bN)%PVIKgolhKo`COa2iI0mpL$+G@pah|1V)A|x1ble()&Q)kF z5Kqr%%wW@OGM*%R*?c-PoB~s7Qe;dsn`eWvs;Z_K3{q>7x+=0PXS7mA0KaT zT)2P#!E;YPU1rPixVN>v+q$N0-LkBRz`6GP?2)k+ZQ`18eQh!vuJy9awRCLI#D7AObP9j#IE+R28a<6|LR}=0|cWx&KcuNu6$ciBSCW1Lnr_R6ZyK5 zaFr=gIbW>TLKs8skr!@#3jVc;!!2rr)9NEAy?B=KnA%B&@Nn zt`pe_am6oo6i3KdP-vxBA`faim|1oR69OZKFuFbzz8Ho`mBM2{d@u$<)>au!djz@> zjlKzYj5-lcb`$&IZ3Ypdan2F3O)Qg~;|L5yn80tp-s4KvGX2C@zfJB}4=Wn4o^SjiH@D`E?;G z2+)TlqCCedWQZ=awtr=^f_8dKue49zMT#mMFO~lK1Ev)7y#hM1f8@Ls6y5U6O!Vuj z_HxQD0(vwy#vj6E41`mvR{9HwjB(N#$QxQ#=Ad?1nnXJ>-rtjS5X3*!5$b#`=!@jV zR2KZUxBj({(nQ{)OND^}Tu`4Nii|-1-m)l(J;VS4pfRp(3|nR(=KztI{m%F?9)JuC z#NKhY4D4D50C}31%Q8)Ka?HR6t#hq0kR(adR87;6b2dq>v5CpbvN4b)Hd`*+)|slT zmdnDsl4pwuWp3Y*8$hH5E5LdiRCrp1-(r@!rAyvM6`P zyG1oCi<6E1==l76nNI=ibg>xr(>&=f=JVRQ$#}ysE~eFBG~U`?J3l`?K6x-4Wke1D zjIl&)3>dVIokd$N7l|>BT7VQ0U4%N79aQW`#o@>ry{Avg1Pq30R3;q-6##^P2bwLn zqCj$=usq&b3;;*1;!Ko3P=sZzwllGLPT*7a8Bia(Q@H7xb z(;na`1lhE|5ON;+UJwNiv5*yZ7{4yvBY&oVcNhE+us>QFM1+uK0k2HIco&Ea#6;?Q zDI`Qjr&Gq{Lj(yM1|meue$~6N-dG?aG1;)}WksYohG`VfI`;*!KfvAzP-%_-1{MUq zUIkds6H<={yvZ>_Eh4`SwwyKNqBKf$Ax01(@(k2?j@Q44t*ZbZ=K(sm#ZVBe+J!;r zr5jpm9r9yA^gGM}RK=AEhL@cdh*lv3nQ#OZ@Jd`P2n~i>oz>Iz#MlF%0}nsW8EXK* zSi`_|-6ToEL0&y=)OtU1guvcUByz}%#w0dzt|cOJZQC~1+N!E;Vi=&PN`suF3EHe~ z96KOn;*{BN3&rB}{B*Xnz0J(UatQ#|+O>_fwrK!3No-lvW!;Q7CTnX6lOce!(`8jL zw#|4vE~}C`sq40-gzA=3N^L$GO#l0&vg*TL~5N$jjVlh4h%3+UDt#Dps8x-9E-*8hID-z&!0(j>P!8$!9Lb_qhCqiI$B~t^fZ%R;d+7JY z@p*XzOvokf`Y7J2{6t2fE@UD`he;y%ehUE|LV`}EnhLQ2qqh^p z^6dyFl$803r$cJ!q>w~Qkyzjw0sLk`LaIZ(OO%W5yr4K97)pXV*D6t#OgRLt(KRa| z=@V2g1|645{fw|WH;k2xCCcI~AHwu)rZBPr)=3G4OuvS`I_8h{aq4vV|I4vC9a5-N zr|a^maYK@Osh=z2gmKn<2MCDBSj&WHY+Y4pp0#c3h^#eD+gNLXygSUqWY7Q@?~ps< z#2_Qqb&baOWx77ULQDp1)3_{2>dINyHI6fK^!~Hf`HhCB%ZPd5gSHq=HDHp94CR-x?FxTM3lXdrpA{$YT((wj7STO(+IdKY5bcp2SV2^1*<)Rmhh=huNpwheIlmoEl zGlZ7VimZFp_2gR8D(p9;QF<#(Pho~x&c+H}6e!wjKv+CvMGx#WCs;tw0aZ6aeS%J- zK+R;Btgu9bpRgq!JAgGSi|WF}u!Ck?<9C3O= z3{jI5y`w*%gC5{Ma3alute6LJ=Ll3%JH|leRAk%%%HD7&4a7i#n7tf9>}Lp$VJJ8O z@fI1G!C3HzBce5R+oVZeFPF^NwyddjslIST0B93KL_`b*%fj!J0FK$(%vXpSFwRl0 z*8_&sg)?%jFx?6c1to*hmGYs10#^!V670&JPEsoTtE%SE|8n2bhi%W{_Y zd-bd`sUadJhc*G`B(-hZdg8{+M1)qHbWaRb6BWWk<(?7(N&a(vVfM^&rH|VQU&>w<`02nYBl;|-qfJ2b|oFk3zaLlfn zaRdS7Ug}|R%u%6tXN`Xwg2MaFnEc8nKq8c#9HpRE&>2@e2sMEIBMCU86j>T2(P|fz z%y#%#2aqHBL#IzMVysVO6s3&!)Uk?hDHL;<{eT=G_E+ewKbn_-3>xw$Hj2SP$H8Ok zVwBuR^AM!yFLXQr4Kw>x0sh6m{<}V-1ptV~qR|;b^gFXFrPc(nCSpL6*+y%G!&s0{ zZ+OJl$AyG2#yCkx=}3Cn8Uu{h7+>~dG#!GnC=86&!yrIhhbhRdHbA2Op4R&+#wUxS zd=4Z=(nEx3)d)+6RFD~b{dEZFiWSo#RST&74QfxU=1^6USc~dc%rZkzTCZ%LAcd_z z0^LvtRHr8nsdfW_!5?!SSXGiH&N-VTj;OAi;b7vZoiFBjZvfnqYzYB~$V8#*##jS} zoqwWjTYt1KFmuyZlktRFt}0?@7PDostcSzVa4^dI8F5SGnyLi{{eJ&^dhW<=ZfqYN z9iPsoTiaI**=4bCjcpu`H}Z^oN5_W@c03vmhA9HQ_wGASU3uz<-~Zv~-uV3cAAK@e zU!SaRq=`X<;UJr=4KD8P?mv3ea&!1-|MgejEZcH>WAEx2>~jnq;P`inYn4EXzFa zjV6P84{l91M$7r!kD+T@wAi}VT5OsI0s6h3YpHe4Z`jVv%J^C9fjwUXbp^bZh6DgW z_*BS`OoPf2dc9RqQ8FHwg+5JXQ(04>868FiN}rZ;zm93)OcF zA$&i)!5jv~*Ca*gNcdFu=Ih|UqB%vBC#jJ1gpUD;NwAT_Yv4{eMa_+vRHJ9{`rUK2 z5=M_=iJ@%NqgFz8ngQ66WFI~BlZ5XyJdeQ436R7z7}+rC=|=z{)%^-Iz!Il{P)2~7 zDiF&gK&Vx&p;4?x2z=^Cu+E<%6zwnrk?cf>S_LaEB5L|K(s_x2L9D*?^-w}CJP3B| z>J!lJqc{2ncgg}F{=`YqNey}%q(qevWMmOyp(_AunL+d%LewTGB`7R()?})NcsS;0 znLJ(e>vAPxqz9=4jba2a-p(Le067S(ZW{z_s|FCz*hN{4h9jnVf?3%%&XF|+e8NC= z!&*!1nyOAPbB=79FfcIH4H<;tU|d&iF|YDuxR@8znBDD5fG*3eqncc60DHYGNwTJH zFI{>1^zf)EXm{_a?F)GC-q8mie$v+T>T|D|JpJBZ{OIcL)v_oTML}(qWy$aU-q)wc zb8{iRvUBC`jXUhjPk!>=`o>O@rI#=5Yz&8~&F)lphxuS_vavjy9X@<`dUi%v>Ya_P z)3c+K{iFUMGl@NT@c8oOE7aD*^`6Q5pI*OV?ZLHYde)lBVAv2eL~Kki@1LI>Z*H$| zZEqdzAD^F|Z0xSrt|DlSHM8ZsKgccGswhXpNl{i!Q(NmTCATekN{R--0J}~mFj{u+ z*J4n6k^GgC9-(vm1Y3{zO_CHt|4mZj&+1PDhyZ%yVHmoF2_hT-T1VK?3seR7AnJn? z2Eobp@_+_I%_fA5ucBu_Mu|)!b_y(&zc6*w2ZV7j>t+yYEl^FBUoWh05&>t*L6XZ=FF{MflOlLSnFJ*L(*j*9%OHVgqSMNBsIEyLmwks;>4|ok|r%)TOkY|Kvmw z&^cmdg+j%>oeqMhyC?>NqZCBk+Za9;2cMXs)iZAi3=^?ILriI!Rb{>8Wv@S)&d$*q z1R%uLwboa9I6#1wsBsNAHioUunx<)8)3$A%<;LcVqDf4)xxV%A-Xm&xYjd|K7HMwU z+SP4cEGuKou-~V|EDAb5JDQ!H&lj`tc;n&0!N;H6eCG1Yo7>wT{Nls2a(;1l=hKgG zj>nUO!^7M6?o`XhwWew8oyQOV;E(>+|Lec`$LquO_2KmNc)ovhe0+48f=N<->#bLx ze)_394{lxD**QNudj7>1KK}TFS6+M-8y;l6)5GH=%bL1AKRDXEc!^rz7IwEUpHGjc z=cnrim~n0E5-!W4oUM#2}elVy%XbMe_bHR4dM`Al(%T)i-%Z)4hy#vN z>M}~t!IU^@xLBEK)R_pZtj;~iiWy*5pBtFN25z9PfD-m_1eA!f2KFJQ);eSvF*H)r z44@X%!~=KqU_vc!3UJH5To6E#g+W7n{!lC4FvJ_wu7utx7C>jDJ>~@7rtvh;J1EIR zG0)xthknZ>W5{lXjBoVU0|bJIKyc*(p6H42%;1#p%HaFuKl(&*YgjX=o|lnU9%DZ%Dj;pfgcImZm~N5C)%RjCb{JKbiCks+ z`adP~poY!A6Ju?vWI?&*Uy_0-(Ox2gt~0<|78bOf)pZTs49cUL=3guE7k1+{K_3Oc zh}Mv!G)bDK_THi~7J!ZEm5XY;wsCrPn&qiY468@s93>3>yx+REY1(0bR4wY|qDZnd z>t#BJwm%%U1>L)OznACRn_IwOEHJbL4$)@mFilcd)4{<>)!?K1hbbnFD~E&O?uCof z#r(m|#}Dr9TTJrNVE^%9v78Sl8>gpJLe8?@>EX#A{NZZ*X*j<)qsWI%@s;q2E zE!7A6ho|%Xv~TOG+}YVH%B4lj``O{a;nmAe&8E}o{BV71?R+}(Gj6RRqAbrE*EVg7 zwxbb3E)}0S`f@?}fq|q9i$k1*qlJfnI^fk9RlzY3l1|Q3iri`d)4LqH;|(G(s{Ju&5ZIvYpizia zNq`t)JrW`qBddzSlQo7QY9gxB8P3o&R;?3*5p>M|PYG~XXQNZ8Vd6XNARuV~&Cq}F4v4OFfDUQaNDiyPF#za|p7oI~K#Rs8 z8owY+0(|mtgYkIERgA_U)9}RngXAhY^D2rJ#6QfUUu_G}4G(JHB83U)9<)P?fO9Uz6oH*{o142Q$EPXwM*T_CmWyI;Q=8a?9L~;X zr-$cPuUy^S-dGleb5s?}s$M20%hF*}ZV-ZSht)t zbyHPUl4fX=EKMHVKkW5KXXhtLmW}ekd^TI(7(9A(`#=64{oOma?tb#wy@R8|+WeF2 zH>O2BUCeebZjbtd_NcDQ=bpc||7icUmtXnC&wu*bE3Yhz=Hb1^gM9FK z|M4qtzOl8vZLKYf*?fKg-asM0xV^nGUf&+9@20&`*30X*7!1d=^BEwVPmf2V!4H1= zXIr})+%_8mj ziZ8;o7$mRD@|BLTxB=XGUxNSvQi3lb;Ej_1V;@&T)L^*hgMgh=zr;cXDf&4-<|!5DA|kBChN>r2i^`R~v<1eZ`_M0icIh2rQ7pZV2Ao5Rst2=YpOhlZN|Z z2*JBSorttYL4(HQpb73iMye|9)47fi1k8+1_O+#eU*4BtU^x*?9VLg#?7dLx8dN}l zu=IfSKm+g&kqVIhPyLRt2u)YC1L!7Day<;5n0QXqKoM3?MOLSM=ZqfQux#NS_hgrv>?7vAEEn2>9~9w z)En^gExlxACLM)SH>g7h-T4({B3JRIy83sp%bA?K)EmImCozjj_bu3+|p|{_B z`SJeaM-L7jJ=)Kbr+dBZ!}mUU@x_<#KHMK<)yCRj@2j8xs~@~$6KtC1tU6ok_aEGT z*qrrVeDT@4pWR6kbMNl${-A&3vzwb+Td#iZx%Ynd-nW1EH^2MmfBvm+|1L96CgZZ0 zE#`%_Gc@UFGy-6Pq;{0#JpqV5lSZQCpsGh)vAJ+#&s>xhZj zu|`8u)# z*sZ;l4QEW6s9U|*OSjjU|-a}jz^zOBf5KCL`YRakK=;wA_Pp<+o~C!xS0fpdV6 zv4qfpcu|0hh$_C63qxooJWiURRW*L-iNF}YaMzzxsw)Q-EY$!A9BA$}%9UcWJcwi` zJ|-qZK*a1wf|I^LCD2u$Nbq~OGcz-h^CnDl*&uHdv6BnKmsssxybXJvqTr{cFvzl} zE&#`4?X_pp4*=PrpM#P0h8BG4y;IK`$AX!3BO-Yw2cpf?Vc{_^AJ;MHx`pUT#XwC+ z-Ua=OO=5j6aNGwe!hRS{876NiiTu#Z9CSPcB67rzWYl$FJ(YlPE1f$>#I19!Bfl;Q ztwCd$&{)ghRmK`)n7FCx6w~?HG&5;0%a~h4YOBUI?R+-dSYI0s`mS-%U{kn$I$9rX zCYZU}uW85Oa8MM>^RpR}rHbp)IqIFCxDVdH|KQ$vo^Q06+~0pp0Ou#?)?|ptjhUv zK6BVsZaJO|ZhiLg!NJ4fcsLkNoP&NpKR-EY%hG^#EjBG7qBZ8?g$u=Exp(2Bu}SM( z)wFC={$<4pT=xI~P*WQY|;>TqXl9#v-yL|4W|jLL2=+$}Rk@ z!esedYukm>3&@9TuOIeK2GCPZH46eMv%bp(~xnT(GP?%13&N=F|2xP!0tVHN4fucIA z1r4|v0*KLL4f7|jq;HYY#2=5yUEGxC``*ZZtpCNyCfPazXk|iz@mUo>3R#rkI4EcD z+5(w5O@gSNi^@Du8}vY@7IYR=q@DcdoXCzCMdzq>USXjs3UGwXK-gBT ztC<~B>#`)-SYI>9S(>`Kv6xUxOt7}LX-xL`@u6#ps2z@ad7dJhs;tnMq9`BlKQ79m zsq3a~Z{E6H%$vSluS+~Vt>!a_w%;4BrP&}KjDPXVcZu4QgQJtfquI15mNi25_|f5@ zKOE%!v-6|FqvOGFaOKLS^^NuAa{kS4eC^`iUezq$eC_qcbh?<%_AXrhw?F*J+UE9e zeB+Dxz$AIn6tjQtkN(ESuy3&0-Puh08F6}Yy1f4BXEn7qKD+*fx8GdM&QojVr_;$` zc>B|vlkp_YlVZ8pfB1N3cc&`KVqRLrVmW7WX=d5AO;s9Wva~mzOp3*FJQ?@;y{f9% z7?Y&r*jiJTg==bJZkQ!j#6+Uf))!o^Xnp+SPsAl6jVbj98{Kpb&DP`oqBy7 z@(x*4OkiN}bXi9w?#(AkDt1Y2LGXIi3MmzTf~1E#K(xH_1ES;7LT{=ad3blua{z&1 zvEqS=2i%C`yHb#v2>g()1Rz92zY-&czlmb(j{(qWW;|lFyw;Eo1%k|eYQ$e;toOyIZkjvWREEsSATgNOBHKSUpVFl#rs1oNf7dZz*>RF zWc;ak2!Lp;>17!ZCuzcrP19sqkGMrN0&P(96dO{d*k-!UVr`4-fl5JzxLFX>o;y(y8Kj2ZeGkUZ1465{a^j^ zCvSi5?VtU!{`A&6-~QdN{rSK7>(9OU<}crS?0C?|J}oT_a5GPG#n5Ao@?ZST`tx01~4PZxv3p#F>L^va&-ClS?5AVBaTC;g9MjCYKsIK3;6b1D< z$t({D#@By`KhY2~0{Wna-++vP(VwkpxH||&L!cPcl?mi%KfewOnc4UQt7!Svy&G?))2Hhgk~$1^S08Pgkr66XRPVAvt0;QIPyL z-5zK$Y~&~3?F8Ec%IN%)p2;fq9f5&OFyI}EsyG%-5EMH?!tSF($B{cQYd8qPt-Rj> zn(GMmh&qH5OqOu)NU6k>5(F)IyI zQY5Sv^>V%}%VIj4ot~Z@ot&PYo|SdATrLkD?-xZe=nsz%PR>rI<)Yf&*xBCRGsf0! zP1yFwy`q{gidogvdlxR|dH>+>a8cJ=yBEsVJv=x2!Jb-aaPD^-pgS(`Y=nbLX?RZBNfn@+=?pCr#51 zha+mqHO}@&Rn=tKu%C~%H~01*9VSV_qlc5e`dj$65wRYvE)wGhNM3_6 z##+BkzD

)_Qb8Kqi7T?JpNqT{l%x7t4|yAS07$sFHThHy8Kz26=D);lcUYd~fga?)JsHq)%_&S(bc0bEnhl!T#xk zgF~>UX=*RDKW7NzM8lJW5V!GoflKXdI$%k}k7ZyKAQ9G~uP?m%2PV6A<>HNxe)S*xgMaYw-s3FcuYBo^&p!NUdu`|a_ph5#|9|$s{GXpM7f)S% zx|a^V``zz;`72+%_RPh)t)IR23^)AX-h<3$cW>SA_eO2)Ah9JvwzfuOhW*i}AAh{H zKDN-*%SA8iO=qWx?Nw#lHf^5fuH|+%H^x>~+Ys*VUESE&25_lC1CZF%q$vS4ZEI5F zqdRCUG57oZx~hzD<`B?e(Dg!Ip~7T&WB~wSPm+Q~1cPV*44?rt1$e+t;gSPQ8xgky^+fG4t+^XX&aqL*m@h*`fYPmHWvZtk>g3LU@^ z0R%9a2r=XoB`)P-GSVhBSJ5^Rfvma^G>Cl!jb|`$K%wwWVjp5r$xzO#w`IW|jV<}q zk%ZvLF}FVQ!=>U{i+ ze4sEA2i8S)9@EvmTB+WTmO+bLB8TbNkBHIX%j}gx7r1fY)1ZSJ`^HiFqS+5WA(7!+ zhy!+hRQ9@|v><0;TxjLuFr^?54#p#JhsfcDyhG%AYU30&)SikO`~RF{*D?{XV|G9d zkONn@u69*fH+AcpmWaP=X!Z>&>v?uOOn(%vL@-}gYnv0Ht1DNeevl_nBar^ z_m2(_(ZKq~x=n0VRBhAlT)42ayL)nS`q`(q@^rAZesO+M&Q7Ykw-2f6PtWQH4^OHJ zwzl@xH@4@C_ULGKb~aBiLBx+fxn4AQaBzNbc=E+BzrDG>y(p?1H}0OE&bGHVo_Xr> zy*r_y~E1&<$TL}2huii^j zc<$xrzV=&RdHnd%ARnWH8y|hTHXQ73Plm(v8(;f!SuU50YB{gVMKd0(o$Q~Uot_;W zAKkuld)&__gMKxiw^e(5e6+WDAxo`6mnBKt)Q%XfB_@A3LfblP)4knGS(3C(>)N(y z8wPILhKx%R%fM-#drh=ST9l>F+JclUf-(>G^lD|PIV;_fS^;5dN=ntC4Eb1@c8r=u zAc0}$GqK=!DGabJQDc&GNa~!Dgz#A@v1S3C5=nY8^26M%uIW@L zkwSKN^Dtj2sGW+HQH&kZjH?}yD}@rta7nj0Fh!E3IiwgH{_U!K_$-W>I8mTfS@2r% z3zZHBUf7|G0CCG8{lGV?vtn4$3%hkuQpGB-nT06|I>?wP=L}|5;}tPU6N4CPUPGS) z9tslq=>+3ls4!IliYeSdd^Z8V?x=Ntkq8So)^JH@q&vA;1-#W7_E_}Uh5JrQK(s*= z$73mUO|wu3CVf5xiH$Tv#8DiA@ONM+9Z6Bc8{8`;bim?6gboEt6dnT-Sa?s2w5km;G8(kjSOkNNOh69EZ{6b=hZinmO_E@e5MyGp$#`ut zShrv&gUKKt8OGEm!_nmM;20cUyYlqz&R&+K&b3Wdr;z4p@BDOj{ga#f50AFicP{K* z{^+CYj}A^BJeVFlnjao4fBxP_^JV?+2Os|U=fC*nJMYhCwaxmAs(k$TL1vS^-MtS! z{@9uH#+^qWef;Ug%X`DM;rV=eGM|3$um9~=zW(KxUwX!HNlo$S!JR++dw=-P|K-18 z$XOcLH@BW*$oxyK>^`*R>|LU*5hxOuje)}t5`ofz{U2UvyOsCV6 zqr*$PYwP{w{a^j6X}F-K1a622gVE{f$z*MOxPLGn4g14CrO@WLt-2Za(0ygo}JGS5Fu^q7MR&JgFJ1^B1LSgCP8b#kc07fZ82Y@X-eXu zcXYp_!W11}&u3+?WdID(WL8%dKq}&eSU5%ih~!Jv8X(-T6Q}g0eo{BjMAqt}8Vr6y z-262Ps<@Q?h)@rpo_&k$QG5t8Dep&=iS#9q8?WHvjQdal$^J)3fD{ZKsCVKyV6=9M z#vL8?rbv-ZwJ)L^zzXJcRIGC)2dpHD@Ft|6SbY=!bv1b$2xf44l$hx)bvH-m!2LOh zL8Nmst}FJUOLc?s4GmC|schV1zrqNCBA^xvbkQszOo(l>4*_w9RcPup2=Eg#ssk5H zN-#3aASIq1_eaOQ+p#cX!T1GMs}JqGrTPN`d65{z+o@XwDiB_D17rqJhH!<@4V{^8 z|D<^kp7ZNM6MQjdkjx%)hsXf2WKi^p3?Rj-!HwmReCQ6T|1(xh2dUELYpPovLPRvy zT3`3=O9DWbGN6nk7-JX+ED%vy*2Qv3uC)Lc_bx4`3m|IBW;rdX=F69!+PiS^V1K`= z3MOaJ7`AQ0i)rZ^7!5WrUwJx7^V_!{^s@EiqvGag`*&_1o}V^rBx%J5nEGx_O zo8Nr`xbaZ(8-hFnkHX2^La{2du|Jw&gr>!xc-n@HyHggQl@t`+YTi?9* z@ZoSYsET^AnDqyJa&1$U^Vw;+Ja5~!Y+5pwfL&WMRqJa*cI|w+=x2ijv()ynHKX2u z+ct%yDeDB&wrPjGQC&68Ib;2Kkf?MHWe;*;dx&5FEP!Fjuo~1gHo;o}#STR9QCxo) z>z76XS~>90e?#w#J~?VSH`kR?%*-UKNP#gj&7cn=d=%p9>H(;3{hIECxUo{5j=n@M zHY%^0C4U*MpkZ`w#0R1-8Hh){Ap;lf1Nh1hwGhg`LR2oaM{%k9l=!E%khsp5RhFSEXyh)#{=%v+W@dG`U#K(5qLHH(CKhDoUA2*F+b~w2x5(Z zPAG!zDq_0!-#7Cl|i(|ShUn~+mLfqR?Fddm?XA8=yB_o%kts9 zhuhmbiAjoCG3pI>Hn)!t4vzMZ*DoKLdhh7*;UE32 zzjJoFyng-Wo1g#uC%=04`OAAxU)+6k^R`9&=?{MJ<*$AD!QES5ctmWYV7tmL};7?xDI_YOKcH7V2trDlDA zv{rn$KPxp(Vfk78G=;AzG=MZ3D?xU;dbO|Acuvh1cS}xjIBCP%(0FWGKs{7yS zGdjCO$=VuUABmk|1+fskI@;j36xTgIg0=#X-{O>YJY;yJdm`n)vYZb%7W{RlOvy-> z@-W2RS-I0ZHDsY>aTWkT&$*WB)=u0NWv5pEhm`XnTVFH_hLNo?h?WRX&(3yscBUt% zOk@ytc6JAY`uu#pK3?11+-vIgtUNzIo%QnGU^J+Ts;L{toMnA#>ZaxXBrTUUb8_|C zGyQ!0^B@0YwrGs)PsSUGEn8~)S^wy4T2zbAZhpG8zJ2-9Wpdkh@7)~?$De!c%b(r; zcs~8nm%jYfAAkQZfBB1_KmX#3@BZ{xzwxy%r<|Oe98J@-F3U@o_ilfBRi7scB8=zKa~%;$qizL*vd9^%^O`kj0C@7%ck%(bT%{n^^)+H5vm zUt2Gi^ECr`FTMtKalZ__@ zAw%PR&pJH;WuvUibOQ?OtFAo=k9*Qi@CpLUJ&6!8CdL(J1Kvbq-C0=;p;xjc0S0#h z5JPOwXq^qJa58gDqlZTZ!Hs5_m34PXupVm^>@Ic2BDOxPDOYR3+T9rQ;+3ZZge^C* za*g>T&zyMuB zA=9ioyM?=pnC^;Cyykw##ICTAxpAlk^uw z=@@$b{)G!WJKJkteCyRWUwz@}D_72rPe1th!_(>Pl~+H1R#ZR!u-H=ck3e5?dM;5>Ez+TtW z2Wh^4Hajbpj}8tlT-`f6KIskeyq|3EZePD~yC!FIyDTdpNHH0td9y4Gfi)Qr83*NT z&TW$*)Wx!@%h`N^Hp#M_9Mx5oC25xB(ym)V`N6K z7{MU1Y9KKHSYxs*&3k#0SYUR}HBD1jRns(W+cr(xv~AnE)*n9!fY#U~u{N=3YD|J) zf&E54Cb21^(RBgni|RS*^$NTT`UW_UltWS8M6?c)sj{sF>AC|7DMI+W#f^3YDFgsS zji+?AH}ZGY4}lvgL)TaYcs=rOcpt<&@xUCNj_6864?XbWRXmH$h!GXfwFMJm0SE=h zAl7!U?!nEH@4-O&g${OjCb7nP5QC0`7#3y@W+$xI)1d$jwUH5PhYRHY#Lojh3Zj1NzwjutP6_9uWPVT zeWs{%o#6`#_Cx|aSjDJ~p8XqR5LwGpJv~G)h>5X@7cd_j=nwK>|5G^^gTvW#g^XYk=_Qvo0;^WV5JeaKS z+`IRvDC_-$ll_Ck`}ZHt=Cj8S9wg|Vy0rE6FTMGjUw(T$&aU77@YPqI-`!dN|nAx zS=7_#UVh0j-M@GD!o|(|_pk3iyz%NAueMdWwJ~}8VE^8&&p!Xf&;R+K{pA zc($0G&&tp4JmMsu&gP5d{Qjf;^F?`by2#T0ARj-xe{^tkUbk4cu5}G#GnouqR};8p zF>Bk3sZA_b%PG}Oy{!9rKkp67(ixkqt*sSB+3$@IY}4>$u$d+O^Ydw*_n8R{qA`tY z7^L_e0S#L8RhnSXuU3lT7MVm~)&oz_pfzBONv%l@nyReJWl=86x~!VIX&cuzZC%xM z-IP^TR`s$dmql5WWl6V)T|OA~{m|T|YgJfS;&T8R>vAqyU1oBc5hvNtBwj}@ET>l=xtqoanRN0u@#S&K5A@xz zy4uIjJl_G(V6E^3sXSA0QHfPof^=mDF#!8BExUt_;`;f_Q7C&3QsMVNG~W86?0YN4 z6KD(|8e_dZ&^=TASHUC}3=+GtD#~){4PVpH_SS`W-+lk!(f;RPIbVlrH3NK3-g`H1-Fp0|-~Ef$x$)ZAfmGR9Odl&ot7q0Al|F8e-8(;q#G)pSx-~8>by!-Ai z*EY9_u%hb0-Mh~}_guN0FOH7ieD$Rt{oC&^msLL5JTIzOKL3S9Ro}jGV{dbOa`f=d zXCDrFX}g?SSQg8Zd_Zq~>Gcmjc<1l^!@muAdvrFhT~ZZp*vs0oIXPP#&8qFI*KR#J z{N+bCe)NkEDo59zetJHeAws!aQtOTm4-7P^ZC$f)Zn2yl7mL~U`ewDLTjA(JBY@3& zgDmeiEiD#BUAH85Btc`@7ykPN-quQzCy|UIFx6#+%vqZDvMfsz0Bl>TtJ>Iv0gScA zSn%Qjvb|zR0(j#D5YBhxCu)XF@Mv abv$uGyHH%N>OM(j-Zj}Z_k9*Yan;qH|C zP{gx6R77MX(SIiL&L6DGC)_x`&wO{wwYp3x32TW*2+qJE1^5?(-3rNJ)#dMwUUA zGe&#PvFuUakHHY;?xz&qR40KIt`yxv!*!_S!%naxFk23A$fU2sB{5v1^gaSQF^9Fz zGPGTbETHVbg_>r~?t-L!!g3Ss4u#$EqT2{sg3TS5F!-n;QzXNp#iyWc?s&7**=kGs z7REsaPqh#c{qnA^t^p$u0(mQ7ePRzB7_{vCDLV-w?TH7{lzc(m~3czOB z;OOu)&jy3x5EI+$^$ns)j4?Ruj~3IST9hfKr-x^EZa-cdU%G$y?0et)NwK7}lf~NF z`qisfj*j-PUcGWYoweNFxqt8OqlfQ)^!|hW$7Jl<<^^l}^X2mXqx-Yz^h9>FPw|@B7Kdzgmm*n%a<*iRXyK?cu@#Fo=JG*bZ z`uzUG2LwckS-Y}(`Pwsg?>#&|K796>Yt^zG^m}ztfAF(kTDP2^JbL4`=WgHqcyDX% z`KPZ{^>VU4E~_OVp3mo;*k$eRA0D61s`;`_(|mVxrzw_9Zf|FIKAV=yC4yllb0Gdm_(ETP-!=zdvx*Ra$< zx9ArGq)VC9-}n0}`I|Uddcn?x9rRcsRkd7@o)qVm->x$70FP{#Ae^qH>L136 zHBJXc29lU%83sdGA_@_^^oz(Oygy-#ZS&i!`kX$7?S5Dr85|D@Q+1;h*!66D^)jfl zA{(Yl=z~F?f*b`zLOIyU4grcEgpz4NVs&DT^dHo2tE2kstrZu`JBg?@_1N8{MGBO3 z>ml_)8A6*Oyq1)mIH0aA399_q*jzT z5~>boSw<}}kueq=5j$gzbFE=({2>YeXUAvdqT1QMV2o)BD$8cPHi6~RF)hoomt<94 zFRH>C=%u6G?F&s+H$+^UrlJqt|KQ={qqa49)60q{l!l% zKle;s)Xve_(jCp_SyH6N^patg^&dYxzJC4I&h`Y`c6N3=$+Mduefsrp{>IOK{7Yo| z!@u!+fBn556ep)|zx_s@(>p);;H%$w8>qUpxBK|u;Pk=cCV}nU%UiowUwV1*=_l{Z z4rkY{KC>)Zh1|mAawUifn z-Z6+LDvqWg7iddNXb~!;#*Sem5NcSMV{l1rVyMj3W@&QD19phBp_e2K*sm<7Gq2sz zP<@q2g3g{lffKNPn>Zx&LCqfPq>Ge^42q4Hs4cT!Pa{Vlc^l=32K@m$UM`oZ&CnP& zY>Z(dMB|;|LEcA7PfyPGE?h2)^6`U52iYQbWY8N`)pC7pqu0--rwdo( z+3|U?ERRlRO>5tM|5iF2>~3Akd%cH`A1xQl+zw{bv%}NFJj=iSjj#X7cmKj>HcJM} zx@laSqBQ_bwaik}%hJSRv6!Z4b8Eix*6XG@+t^vJ9e?uPhu`|@*Q!PP@kbw|7Juv8 zUrsYR+Q0wW>(5Nq*NR1PcyjdAOV5wix7RN`RnC^5eejFJhY#Cnxw~`G_LA1NAAR)U zwTsWD1pSSz|K|Vs&z^q%^(FTI=|B7DZ-4$vpWeJxRn>5mVOwu)jC-lQba8t)$bqX~ zmJJ8PlhebEjg8&iZA@`*)9~p`APBA!J`9Vn&fGkHBGZDinb** zvJ%$&@+wDiJf5SLP$LUcPE5hb_!1xqB|Db*u0IhaVv2~RoTCK*M&#SenF)beMH19w z0RY0$9UbI{Zjqd{#N%${VG)BN9K;uRnBsxhAOER=jvtJG42WPr*R$!fJ4Q19n0oK_dVEk|Y@wWjGe664wfcTFlFcG@+R&zy9SaO*; z76FJJPKOWqsWo!Iqeu0X^M3T8H)J&N$X}HXnI}{~Ln$__j7-<;`F|)bt_suSukOAr zN3Xbs$aHUy6l+@rc#-pbIm_5_< z6+n5OrzTw#OL7j4Ns`31&L)-|Az0@)O|tFv3&3!8I}K{^6tj)Av8RxxTppoX@63Iv8$lZch5++qXY^;koA?Kf3RT zUVh`PU%mUw53hfi_xr#mNpeP$_w!z6QUh7S>x1E>Kl=33>s(EL`|UTYV*2`*UW2lE z|J`?9e&x0Cc>Gu2|Er4^wqAVx+J#G75ANN3_S)6W^^I9^ynbP0V{E-8M{9pfX{>I;~a5_J~ymxtTeZ8#DlO$&(XXy3UUd=6}X|E_+;G{noRV^*5 z<<{P=Gk9`-Je~~mepuS=SXJBHG3^00itpB%O6TO1~qM z*97N?2&}a>$+E0p6w9J2Nw@2CUs9*pB=R)O&K@Sv*0#np0 z*qc=}TIy*6Itx2w<%s~D6I}wh2iWM?0LiOGb0r$AIu~Fg+)4(@XEY*j?3!58qD<}> z0M8E{k@rFcQA4QBqD=k ztF%MGw?Z`BIfSQU7guI*Mf$n{9~D=Ad6W2I{HqU#mZJXASbAXT}h6$0Ayf;Lf z%}%DX=`728bw%Z}+MH~qV5+JzDI_Kb#zEdYIXxW?x979!?%n&uh=>pFJ|0gtj!x!{ zb3gmphv{H!(%yJDu9gcxcEm_{K0TW)s?UGU zp77?{%Z}ZVsZMzpZxG|{q4Wi)b1xg{^|1U{6GAU|NU`4 zd*>HFfAy8;b}#KbJh(rd&cE>0FD**izI+i8e*VXQ{qXwzt5>eP^TEfjy!`n;{_elH zytlV88gJ~4OUVB0r=Q%~pOsbl#kW3pdU~8^NuDLeV!oU&48m|S+1S0%&rEfGG92W< zP*%$qUw*dVx7)*wwe_9T=``;Ts=C2+;1EV@li6~-BoZCS}zW#jP4VGApPXwg21>JTA$_oH1wm3|U?5%{l zn{1eKX4%&j|H*&-ZDa&AfNbQX^+2~*Ivfq5u-AZEI#F@3J02DU_t7ZP2=rgx^!HOg z0HdP-{@~*f`s9-R{zW^P?4}3ow5C7v2Ox80{g9{*r5D2VI!LcKkCK(Wz#5$>da{auYC4FL3 zmhED(V8FU+%98rMQ7`Yix?xAgK(%O#vdEGY$QBi!pBMM;Kit{9aOdXjt&Oc)x9(^8 z;ObM){i}cVmo7`!Ha1+{meqV&ROw*Y5I=mhpY`*mX|C)J{{G+m8|R0QzWco&%!_7w zW2dSXiN)=W@gPYK4jv|nxx9OMb2KzeXJ-%BC)w}(<`;WCesK5RXwd)m?|tY0```bo zTX%1NOoo>)UK;cU#`5Oocw=pt+9XYLlcdwdi~ue@^?ZwVG#M|;0uu&=qtla%S1#`C z?)kM^O;ewqJ?;%`-b;!3=x(diCBM&r+S=ICQ^*S(W|hMtF^VlfpS zg~Mt?4Pb$qL^t*sZY?xm(!PESAo>T@qX_w1cJH_`Ct-hBJf!Qn?Ay*uiS-}>BJKY#b9 zvM5R>hiJec134$@U(mP> zImaxVk%En?KK>I6<5U1yB8?CrGe}FY6P{@aD5GFHb|O5$-kU*x4zSl7V`Nh^ zg}(^NEY)kM@d1oNuE8ncUP9&sC2;NR0^-)JdgFA5X z1nXL5mhqNq)jDm$aK|tdPaqORz4kwTgV?a$hi_=u8bddH6(&h?hyp3N#Ex`;Sjnv# z+Rl`OOA)~D1p=&lZ^$FjKY*DD|MCC*+sqPCliy)nWpwqq5&Kp~U4cE0g1~AF?5$o9 zrc=L)ViP3rCo?Uj6v#g05}=<|#5?m9+7%6ECmMq~)869xb6io}WMM0asEgpq=#F@P zISgx?m13!l2_ogIHG+A6GEOIY27wQCeUVt0Ez`e*E$KAAj1q^umQJXJ=Z7TULEAwbapiAnM*r+m#*&3XXj5nb8R}Ge)Q?5#d-OSZ~gYqfA-^7Uw+}E z_iqqafADwy_Tkxd>%!LKhmU^o{huY2-1y|4>8*WsZ+}+Kt8)2w{+&Pi*MIt5Gu%8n zpVe)>vAI#z?XoB_$@`h<=jouI0a4p5HYO8maWv@2t;@TX|$az$0G&}_snd?8(|2iluLXe)R1@BVW

`-JtOgjgR>atTGAd=XQ zvR8-!KdmYZ_@=9i0AfnX|0W;+#{cTS{2c=b#sIMKA!gsJU}VFPbPT7=gF0l|f6$y| zBq08`Q6BAQ(L`i7;L{8Ue#0TZaTSUIFrA6h$8ut6Qe<6x;BQq4EhWh5+YdF zHhz?hF<>mUWNd0M$!y-X)U@s6{m19$XQT0$33j$GRBe?Qe`=LIJD(Y_qyCz!c{rF< zwR!K|>({S89!=K!{r;VscT<~OyL#!yr=QKs`Mfy)JOA$A{qW-tzW3KZzHs5$Vp$rX z=bnD%e0EyZfs>-P=&M)okyz%;*2WKZg`^kGr-Wv{kxpDn0 z+gR)EZVaE<*?xTY`tI)LOV2->ax*nSw zZS7px+56)^{%3hU*t&RO=jw&Q`r7fq(VzYI-@UkZZU20EvTSePdoW)v^0YUfPoH}3 zh5ggBn|JSD*uAi`xp8)O+P0J+H*J|%Ga8MI;eMWPY^<5g)OFoUvwohFYi*JopUi*f z+uyl)`)1j;Tf2K~&GzQSx-Nj*@!DF|QkL|3c{*Mjf+3SywhYi3s+k%`HGxY~9F0aM zh1vY{^zr4Q!Z5sZo>p!IGbqN-9VEFOa@%i80>G)ig-b;) zxWc1FH<%>g1KRnodZC4#n-dj)Fsj_nU%=LeGQ_ukonbSu_k=NedSRA z@t=4#DDg*txUEAdH)0_CNB_5P8qmBeN?O|+01Q(o9qt%W@aU`-d&K~ta(Q*3Kq?pj zB%O(J05ST|q{n%)atBWz(C-hf5Q-XSQ!AzqktB+maRy)_2=-I14pPuz2wLc*Gwto= zY6h598p3T5RiU~?{o(%NqiAfd`)rv4qZX1ucil36Xmvw$1VnUgi-^V`J7Oj$*FSjg zlRJBxd&p)u>i2u;?(W`VdVX+tP_?vNw)t?}8n$WL6wA}&hjm?D-P?Qlxff0jkEhe~ zQIAq%>Z(aIwA8-#+|!HM^z`(>SKs>lo3FfF)#dlU_ZPqMmCrBg+28o1f2U^q=YR6O ze%gC*?{+WiyQ(}rI>`qEm*O)oJ^#}4FMRUe`!{afozA#~WL8(pqFozrJUY0a_j+qv z+n?ROb9{P!d3UE;EHSZ{FI*&U08(R3)0WF(W)b`OXfVi=#8ySIv9)>U?t`y?<6DQv zXZIf7+1|cTR&7fJ#`b#IQ&%pxbu$=G>bgm?WNmFSnT*Ec5nC`eaZO=UN)pOaG!~2H z!X(@u3>xQ(X5pxDt|4~YI~R|R_KUJ)Bt~jo%fJ>5A{b-YkppgB>k-{K$H1 zh@L*20hx)7HS(vLhT!vdtd#0b3<-4uL;hu4tPmMl_%pcI8j9Dih~9-(;mW-`kQv_K zc#G(KsMSOW?;~{l_=peTwXPdrfr7x@f&DUD)D5)(nEV!-A(j9NLG`|%E$YNGKz8U+ z%CA8-ltGo@2_{(WiS&ypZj=we_<#Ao{wA^!c8NB4GU@>6@R{J@h?^%&IJjj#1gN3| zx}%{Cn*?_ZFsqj!8yjdaDBL^>DuT#e2aXY*tm)KNs;v?^B?k45`3V44Qx*nAZ9?cg zKqw{F+=p+pj$4pNSuNLL^V)EIHgqhG(jzIj3T!DxWz}REe;ziGF@~8_YmKlR21bJk z7(*?SV?=gD0GOp2wNRDr^kjbb_WiA$t!JKn77R9R)3go&ZDP=*ZB0nPkQ_dqHnpWT z`Q)QpCnt-d)`0{ z;NbMq?nP_NDl7t=U+T{{IDpO^V#gB z*I%tlI@o`7bb5$39rf4F=cnV1jm)GQ>ubY7f3ch=X$tHZcr+f}f3W|ZzxlU6c=v<% zKlu3SQ&(KuUb*trC)cmg4_cwjE2+F(J_au*Pu0cRPLnuoelj5Oy&MO7rDDA+{U?NlN~s zTeD(!)G}~%0yN45zOq90hv+vJhh2IFh0Bp0PI>{_omyO8(>V5jF{3i zZEI&RA#iosK6-d?ba;O4scRQ5UUaT4io%*C%lZZl8?IWa>(*6l9_`n1ul4aw%acFT_RIX{)C!^6|P%IZ4_`o>^3oeul<%7shI`FYjMHV3`m{`yxQJiOl!O`*I@$uQkiz~~^czpcA%dZ$?`-9%2 z2al4(T)cSY;p0b(MRDokg}uF^NghbHo5di$=#S9{9o}3yj^--QC{00#Gj#3CrgudPk184%A1kS{0lJD= z2z>(psMQe*w6gY1KmkMQ8!!4MY=pE#$lc`0BTe_9|jKq#h@^% zQ^shzj6>WJ*1Bcx+yJ7h)9FT-u<#HQANBu_cFc^c5$lFT>NRosZ*g0#BUjpNk zQGRPH1OsRo&{a&19kh3D-$OK4pSm_04ybj@WtpaVVsd2522W0p8Phx;FHYOVlBd)5 z!w+v3Rej~^wa>2KY8#j4`T2A@nXH|joIH4R_riq>7k9R@EPeR!-v0h0D&U3Zo`3M@ z{(N!%*5_ZJ&deQxvU->9)rcV zzWl|5!^2NL`V7&ubz2sV&8@L1f??+K^XZi*)+9FGMo2?X`T;9qqVj1crqM~2YH&+ zbye0=OdVnATrr=YFP77mTF1`Vq;(CVv03VnjjbTHK&^9$7kRwFn zn6oTznx<}QKp}J}B+o+w1uRs4&YfMHV~D`3ht>QqhFVVs(BfmMJA+u6C&dxQC_D!T zSzHPZ(RpTnM4EEe6))UtliG$~cL)GF<(M(@9EmwH%g!!Z<`2^aS+lZvssQsKbF;?G?YSH&L6| z2|a0h3xvhYSwJ0aB~D%5BwR z`w>B(=>=Kq>o<-9l=`(N96dGo-sy=)JwOE3XeWYuBnC4E9mDYJ*CII&GaXn=#{=kE z92J*<`w;dpS)uyMRn+=-yabHal=UqG28`DK05SQva8=i7l6YB9lY|L`e4!=BNn!zP zRTF|mPN}6&Z+w&_$#X9}$K;T?Y1*pg)aHh=#iA&ydTp{^R#-KBc(7a)@Y&6Wr>D!T zmpy*;uxEQfR5i_bvN4%#K6w1FmnThG9iJZbvgDQ5UR}=SkM|$V&gQ#2SMJ=q4Qcb* z%P-gS+5W=^lW`xxBw0QgjkdRT_8%V*;n{q8e0E|8)+UodKi%6}yR^4E84vH@{Ipyg z_YGd!yEI-O>_2*RacA2wOeW*~{nMSz?bFl!qFziUgG(2;`bkok_0!Ki<(OKwvsp1e zUyRpAj@-t^W?91F{-ep-xM_L1C=Sj}cP?K(KRIoSCeKr3b9Op&#G}dR#?6~opLzQH zY*ExjS(J|+9dg2D=}ejqhQqhM@C5??`7eI8xwUn)zn`T2G|l?MjN4`~$olz!+hjZ$ z7EOJ=m|wnhwJnRRmu9^lBWX{Xw5yGZ^GxiP5=s z$y_YU>GA2ovRW{LO$;C`$^`=_NrHfWlQ`X?(lxFoiSKjUIsoAWP17{47106j7J3xz z;97jm-P&$ZGz0`>CS$@f9l1b3TsE*)l}88*;yc7l11}sD+&@`&z6NH{)z8q?T+o1q zBAD)ZrR<)S6Gk$<%B>`$8s6+s!oZbzMa>0jAu9KZbFiYoYS%)KW%RtNi^=TP4*l9~ z7+8m;v+-Djb&jTSB_gEz#OOyPbIH1Dz(0XPQNN4Ted~=baEfGJ!-^b6Q^ZUP z47877mo$vR$(b{mRt~7cT5L zs;i=Io3?STH=Hcz#q4}J7>@e+m>Evbs>PCxUHhx=|DtZ~*7nY=o1ZPG(_WGqi{r^; zFj&8F`_}gEg~fcjzyH`#i_E=2`qZUM*DhbYbNANGyAPjx`sLdXK07@-d;Pf=wnx3$ z>CtkrIGN8!!y!4^+T1WEnQU&hh{s1Kv)QaI7we<@;fm)N{F91ach{=-LEp0$o=)8%BeHl58+j!!OMyLxoI-|vslXS4m2 zqdRvWFYEfHSKqjO>)zqX(P%WWX*wSEi40LQpPr4!lQhN5_ADliYc{tw&KL9T-Q6rn z*|lpMn?{$y*nkHG|nljG-(UsNg zbar%depuA=yqB^PYmiBj!dQ69Kcp6aoU| zvc>m`n_=8^O2qbyx}Q;I~WxYN=*!EN5Xn56QJ3zhcRos z_0)^2D~SryorpPuRr!IrfV81qfkRN*QOfNDLi*0z)j%V(6~8|J8*}WF zu9$*S$)b+7VaK3AA{0mDEe|I8WjAEau(}Nl^D2ZKVwNl_K&qX>(FfSY|Gc<%-HY7) zK&rB%{;~DtiE!r(@@>VEy3PHNxP3ir!-#Dp%gkj5k|wPf9Y1LsFNGlU*&IMus}&&< zlcu?Jsz@v`OJK*SM3&IqUT?noU!up1)9$0li6(O<@M{=&Y0O` zq8Nw1+n?13)5)xzUf*1a#N&g#d&kEbgMog3F>UH5#$an_fu^&GaddTY`Ss(k>(<@9 zd)K+i&F$5rN8cTMs3wz1JH5WRzFn>+)2VCgb=M+74#tMkQd zaddR&pML%iO!dc~eY{=|i@oC)Z(n`&_{p>9Z_(Hg&^kMtPx{U)`1N{y|G`J=)%Ev2 z`@#?cgg0+ronKsj=R4n>%@&C9_~7W|-9^`T2ZzUZ?%nzD(RYaO^5*vH`l@Xvi-QGQ zV;nB#v*~nBhN`+E)DQ-R0l4q_%gf8R-LMY9lVdjQgA&5v2Om8%N0HTfNsLGsf^V7z zfj65C5(bGw2uPesRE@_;E5Fzsz^-4lG8td*-X;4zT z3OlrQ%XX8lBMy(1KJQCJB+SvYPm5n$!W}5}J_!l8zF$U#a`2Hy%Km-2z9|?guh%gI zC_%soQ@Z_=zw9{fYOEOAia!@4lLi||fw&YDl=%W!2AyQUPDTd*$N%xK7zvq*RTo4k znE)s=jM8Z(#W)=uiXHd0_!OAwK-~Uwi~yjdG_&SJt^v(m0t$p6yP85<-XvZBF@Q-G zrG@rN-6dmT!1l_HxW5x4mj6l)Cp!dN_rCKyKl;HBtux*a z;ytr7+;nS?j>*}oa_i;#>gpzVBSUk3vH9D-`_Jg;kwp{iYF*fCEgtsyih43Pj3gezX%tc~IQquJ5^ zouk9UD1N;LB8|IL5+4}bIXqvLyx;BdaEtLEzRCirlCXR(-0?>~4nolO&>+vIy5L4fryyBPc0liNQg0b?~OH8HYYoGP#OhF@IG=h$&MrrX$LGz1Jw9svN_t_)H$)e4SaNa?A%NX zno6feem}oCxBlqvxojQ#j0YKl(vIG(0s@e&{|UR1#};39d{)5JkvFnFJAkC+Jhopm zMJ=h&6SW9AfacCJkoRMm{3Ui&CDOqX=`VTW0cc780)WccHm0D8|A+tce?e#n8Bv)i z833Ye$BA;Wa02#}gG`qp*AFNVQ?YP>qh$cu;esF`BZ_?0-^1bY zLhE>YV5buO9_+qwbTY=cB*Sr9>ErX`1{j&H9iIRJN2;`(_fEu%s9C>o zOcf-9kweJjeP9~RmXAr6EhyT`0IK&YhU)XP%afCLpMU;68s~g-#mWFc6xESJiEFC4@}j3Z?;-4jiH;X>!I^# zEE)D1qDqW0#%RD*jq5j^?>f|8HGX*i?(+7!olX_Za<#g-y*fQv-MxR{tnofrLq>60 zS9`OzX5HVL2R}4zU9r2py6O5ApbndWAXVi+V(15B$Qk2@<^FuOKb-~d+3}q_cNop_ z!NR#_J*;MnNmEU%tF5z1{5X=5v6Zngc*8cv7$mA{Q`ev{^u6~3DaJVTL$|tJc?~zW zHs22$c0p|A7W?~v5G71!dwU1_^ZDN1 z{$7ZlP(oZUHy7tuZ#J9j5IP@vVzQM3(ilU6u%RD_Ss;WMx=lw!qTpkQB3ef8WpUZ8 zf?0TyyqL5^w`s@icHk$gpvEL|MksY2nxY1Rq5`eu@H@&>07|6@-LOf4zUFEZ$xO=D zg&VtO=~G*dBzF`_z99mN|HJ?3CrpOXq5@MEXC?xI6b!;x9ZUpVdPTnmHIXWcjf{ z;^o!VyXERK#0@)E)HKISMV8B3L<9rfreiiyyhIg+sHra(1XN;ic(vpw6Ujn|n)OXe zAf6X}(cA?>U`iEdnjL(=$rmE+MElz^bz=h4|m zgdI6INNt2kYBXKLEaH(IkTMeww&6nlF%m2zo{m&ghU4rnXnvFGR)T7)%Rd@iEcD;F z{&sHRc)A7uw-M7$^f#J@V#Y@AV0(V~ShQV0NSJE}(zy^}8r70KhVi4*>P+DRB8DMY zXH$NinM6PVLR3cMY;}Hm_0=D~`SDNwL@_e6K@KsjRzqc5B-3|3##q&C*(l=k=V#|v z!_(*IrrP%dUEgj{@%8IhLmyVF<%b_WoHnzUuU{fzZ7a*w_3}a#VhABbuaS_0hBRp^ z#CW?}U0&aOczksK_~_}2Cqy)xOs%z(c2XG|qhBspK1RcK-S>pRisCmF%Y(xM145)t z$I}I@y!$j0| zo8Wy_*D-oS76DYFr~;6pIBUW%XpCVPRFqIu!H`?8u9r7Ae()(=*mUdbtDE3GqUoY1 zS2^bh43M?c=6bn0IeB~X`mF+cpy#KjfTY54GiW%7(DMS)JT0Pu`w9jXLy$ zM?wYl!856<3}FZ&AqHYH#zfJWha;+}0)c8C1g52R3k#ej15o_5anTmv1%MH0J1D+v z2Lytm(+&VCsZ?)t#Fz~eBNG4&!+^yr{%2dDY_+joLq@xhBLE$l6G-q>+%_EX8UV*3 zj_(Br87X0hUMUzm!t6F27;RB;p+`?L`{`Q)P@t?(6b0xK=~2?ZPzL*yBaIPQ4g_>O zc!K#949NC$${Z0XTu?@K*}AQ|W9^DP8*x9`Dk&E&y+%solV~1<3jld0avC|TfPexb zswVoZ!xN(jCBz3+WrB?Rszd;oWaV$}7k%Fqw-~$j6S!90k z{N-1Fc>L3!{?r%9GPX@6mHMy>(l1x*7&XS2rXd<*LWrthteedCXY>7=i+6G& zj~;#U3%yEk;<|vp~&42?imdf{j6;XRly(5_i82vFl-EH zTiY1CAtM4!-E6wvR5hq!j^J5QnaEZrSOk>rIf=YSj-BF0pB5 zpWHkA_+anm=2k2FlfU@hi`Vb|(=WdC5xs=di*?)H?>g^n_33Bd>H6+=x$e5IZknoT zA}EqQ+&ggAy*_yba;fSc8lW zfpvU+c@aX##^iDCfQS;KSZgF|90pJVL`vH%0ZkhA1fZZK869@`=_w| zz!C>2|Mfe8&|!l!I5B*u~u9^Lqo=FJ{Qw)I$=yNIY{0w4tn zxk7o%u@p-S<70r^h%ChfW6l-<5!2Nq(!%$rwJ~0G$x=$H0L8FsF%{dpwA~1W%Yzcv z&`!Jpph%36W25x#)BYNRffAiy`tB*Sof5yyoJtf%OP30H(-jmEM59wp^{2w`*qp&c~1D)A`F6FJHcTJ)h6oranJAyIHOTMG%9CVP36ohY;3% zpE*tevTTh#n9MetW!JAic<@2r4c)MgVW^!mh?dY}i<;&&G z%a?Bn)071@wV5sU=Ld`1WqkeSq6@>x`Q^p+^>;t}^zF;n_dmF6h%YbKi-WmAynpZE zqemZw0LJn0@x390$1k5<->&W+-C>Kis_xx;xHsPu2>`&3W{VkFv+k}1x{J$q%hgps z^c&v=4PCcvTc@a9*Y*8i*rrv?#vtMlh7fWUP!bL*_$E{wvNCW9uyB$E^C8vv5RH0p zc~vGGkP?23U`QAW}OMo29(YukZ*1^377=jfhi>UmqLQJ?`zBg2@uEO@p{agRWv#b1>j zWZ#KxGPsx7@c^I@OOgMG+W87#?&8gGUm}g<5Gs^ZKsi(vg)Np(HRFDfk_8b}&ElZR z{7AVn_}~8be~N%)2vCrK0GJbEC%6Y@VoF3|M!lp;lXC-_HAJ`M4yG14k2vvTitB7~H* z7;oq}zbt*Ba(0MV29}jbgcx&#oOTzH3J_7E@0kb`xJY7+A^8p~nwN5AT@)X)RA5w9 zBw}(%?%9)PFQ2^rH-G)t3ULTw=!Y1ht6E|^3;_fI(HKj}>wb9h{Eg-5*~#(`zkeQr z-3-!B7q4HR08mJtb1AQD3G z1G1_H6oII~Y>hEUH`gcK@`eEW!G~@`64$r4WOx|D;CmqG27h{TvDx(JXVC+EC{9v|NG_%?M(eb(;>~vw<_T8)3v&r=SqkF&l#qZCrullZcrg?Dh{@tSoi~Si8 zP1@OHzW3GR$Io9rKRP-hS50U0>7;#dbWbFx3C(;~Po|Z#K5Qfo*H)t-!UoK4eBM5(xHFjt8hd)u zMk6F9Hf)%Yl8d4uF-k~O5XtTU{d_0@?a!Dmki^GKepa3En-$gaX(GuhrCo(xj zh$3o4ln`SOAYs4=Agat`!3$7woTH@T%~HAvS!W_;5-CIN}FIwMvqGqx3apLTRkvm%Mnl|n>PhzW)Wd#W{yFakccJ3-~|z4j5=zS2mnBo zhzLNSq8r@^L?RN2O;rgXDKODDUq8M$Ise!H=HGO~CQ7PVG;9n2B{OfWC1MqGeY?@ z_TlkQ|NJLkfAj3k+1d5#N&uR@PbaS3_*Hi~eDcwUZC!u;mEl})zAL3xGbA~KaQ&mInZ&%B(*|e@og)lMI-6zi< z5D`d~5eNieEa#)NtBCSYlC2+j zf}mqelT*9dQWiV2{aH9J+_8dVDp*9zv>DK-0-yp?dWzEHCXFXfx3$bF2Nh5P5k7hcGltccw0{+n;v-YFqs(K3R2^LMp&g2%u`wbQ`J>+iws*8^}P=v zLe}jOLSm#S0;ty7RD$8Enh=4&RZd0xF!aN^sw)u>3Zjf?9)J7v;^gXI{j0wo`b|19 zW;Ui~GJQW-W6-FpY>0Atc7A!=9Ug!5{K?7h|M0Zpy_fIKKKuN8SGU&}7w41d{OfPN ze)Q<$tIKO8e)PekZqr>{zPMf85}Oc(YTK<=gZDtj551@=5?~Li1WHJivxZS2)^!EZ zi}Y<(H|?}$0tpARxitnb#^9OtV6iY}&n8eJga}RTEK^grQ3e4(GqyrNBx9^gJex62Vnj;qNQlUQ3?R;0MU@zP@0pp6HOz=$7-RIvGz3q?Kv1~` zKrm|bJt!Jmt7<#h+Xz`(g`szK7y3>`>$>W@9zdP3o4Oi?uJfBm5AS!~aB+F{%^$vc z^zp}&xs_fcG*_qBi+1|%*^B2dU;Ncy|Le~`{rEfI`QZ8UlW!ism`>++?%tct8YXjn z@$TO7VcUNH^{aQ??dtaG;_%L$<@%O59vvM|U3GhTxi?=Bu~_ME){AzZ5KhkCEjKqz z24XK5f>Q89@Iz&*{l#8YJ0C+o3<@GqLhwWsB`P2jiK>7y7v-F?jB~JF3Xuts3^S7g z1n>Q3P!KFK5yYrK#UoAZ6Xy++hC9+u6L8jb*-;k4vcUk7xLiU>g~AVwPpa-he~GT4 zq!7mAyr<2D1%PoodL~Ewn!j*vjm6?_t8OJAurwzFz^J8=@t%scf;>X1Gzn)c8b3E@ zBu2R*#I5#DHhJ6D;0!;-RUM(cP#5X!ag2m=;3wI~hteQW-dJLbQYAX(#!D#xNX&$i zXs!y4F;^uTz|<_3kq-fAv`$RLQ07BRwuRXS5slXi5EK;&AvL2Rl1OGp(t1#UD2SM) zsib5~Q~n}ZxUo`xMDiTnY>R70wh7ATrkMe-3*Mt=Q$jZ?F;fW!VB8X!`6;1RK18ON zXbd>YTHJ~HAjR5%nn$+g20H5;D8wk<2k!^VwyNr6GORJyB;Q)CbHPUtB&1>R2-IzQ zjnO)4CzU`1g`xA-Rc~LPUSBSM_OrhnhE6pih;vqiLWm0Jtm`*j)wmF3x$OL)lX~wz z{o`+*KRavP-q+u}p6?%ATwlL?`<4yeTwj0xi|^gs-t=8(taS1VHhD|h>|Pwmn3K>!qv?+A~2x>4udB{ncU2~GV%KeP%0F*Q?1Gt zO3h2W2++9eyA@9hM%Nyo$VYFj z#JKVE9#8u)DQMv6L{r3BU^zm=`+C;&VL5CS8$1vtt)B-SHaH&GcY zK>;LA0S&Y$|x46pfEAzm#hS+CBHmgEiBcUg-nN1zJtXo7HU3|C0M>cAfzjZ z38slK{mhh%lrn%3g8@J_#g1E3qor4%+`Z94>Q>9Mm&m7DU;Yd=Zdi z7*yk?Uughm+IMf?J^S|cPk;6^l^Fa0pfL(ca8+ZCWiWl;)pfn@Hp2!&bcULL{KsFv zdV6JQdU}4n*gIV8ADy0`OlH%~s{8KuK6~=*Q|Ih-HodvIX>995c>eswy}S3M+q^wF zolfRq=#6!~?_x}%qiFK1P&Gsq8zx+|Eu+k9V65Y+v6V9hsb&U6 z0<4|2mGwhp!^qS$jWLE0YF9C{vCdgb%*nI2l?4PM0}8~3C>5I%0I)GJMo>hNy}vRc9@>O)smH8&Fu(Z^`oc4nvDX5BAW z5JK?XbYd;HJ_JLwS*`z@|IL5>w}1PO>zno4Hz$AfU;nG7q4U$XAO7UC*dM4LPG7xg z3^&dEPk-`b_f>fD=FMyhFQ2bI{NUkaGBchI4`y%Qo%LaOc=y0I=IZ3#%(__hlX_Cm zY67}lEsd+2X6pMc`tI)Gz1!~U>h_}F1Yk1O4Z)LPV)BDm5OzipRg?^g3M!^q*vKp@ zF|jv!j!AYJA*lcnV5(Fe!&?ABktnRFK!%NVR$>St5Fvo1S(30b$72NUf*m91?fCzt zP%y>rgzqrTSgN?{9yI$dlr(5D>+`<2h8`-*hnWZRzzDoq9Ly8pK$T@*6KamNE z0&$q%yM6sgEo}{+;?B3xl5f~nV(rior5i}|C+vQO5oELk9VJvKfkpoAnAHGFE1m$T zs3bN?$G42onoCGk)uj8C{p9jX{^aqblYA5I7>ab1uU)ojf5^;O$WL?On&j0B`A#<&<`Dg+P(5=bM_h!H`84}S30 zRUr=R)w-%|Gn*`LHs5~z_%HtAFH=T`5Re!FZRJ9UBI>ngWBN_cSb6V#uz&Nnzc{~G zJG*~+aY?2*IK21AKYm*^tfHTP_luwZ{Fldf?o8^&RaM^)XJ_YZ99y&OHqO<~xz+lX zgnB=?+OT1XU=R%vYHVCJYia}7)^6G~&f&Cjle(&`oisHe7=w2Yj*yTns)#i-sVaj= zz>JKH(}`my05Z-X5~vtki3%Vo5s)!N$fN{F49Uiq+G%5%iH&hF_DM{?=$p~ddj$b& zEi)UcoCVaBLble$5G-@$Yz$GNCxX&v3)VOlAS42fQH61Ke76~z?)KVf6j8^-B8p+Q znDqT7^uzV#(kPt2{PzB0esO-aH>sTl6Z++~JpS_8$De-qcYpW)`}~XVU)^4>egERc zR}b&ZLw9@q?)9DfAHI3@_V7N<_YNrx`v>jm>FVtK#rMAW5qMdxZV6!%;vG6#EapUJ z7y@$Rs`+BhS66Qus_p!k==$~POMy6>PX*&<=*SpV8A2dtGUP)*$n&d%#E=S*6*#q$ z3#2l>lf>3i$&#h9m{_#uHH3igd#E^h#usfWOewLu9ly!@;nR*G$*?JV5%Tndd@hJMu8j1($LpVi6)k2| z-#dB%`f?Su+~o0&k2fThC@?dCMv0{CeZ+)PhLe~VA`GD?Br+6yu$4{dRaLd`H;iP^ zSjS1~ZQ4%R+S{Au?|<{ZtJE`5YY8>ONt*eUnX0y7o&RQm@tYHJjk!53R zYm8w660KbogCxtv&M-_qbB<#B*OT4_fRPQU|%2F>L!rGesrosFBi=TY)=`Vlt`1R9w4J zvkxEc?cH2n#^v&ZI|uC5->mlUKA6-qh>C!l_3FXhdly%yzxc<$|Iv^Cj8u$a$M)j< zIt;_!-n402=bWN324md6S&0TY-rt|?P2awI6C@aBXF!n@f>Np#AY+}XFp@$VFB4TE zl|>>VG3CB6%ECUN%2Wg*AlYhC5K8`_YK(Ec?kZ>7rXnPf7-Ps}JpfRWb}x)f9&DO9 z)s)#9w_@4WTdrigsbD8?&w6v|l^GWQbH1jSlO4Aj$0B+d-DEBPZ!FRN*4ZyEz3!mF z$Yg-R$VZeEZ2sKsD9sT{MiWvHV@sK6afM;q_KBFQKSq=k;vH(0%u%+s`O* z$w*eGU<*slZOQbMmzE%h03}U_E+!lR(r`$mL&5$!I=K z0U&}B{Uz2Zo%zn~O>ZyJ3Q{>{zGj-Ep9E?X8HFmAJXQP|`88 zVupg|H*R|_lDn>3V^Xeswmr$-Wjh5p^nP`_nocGv5@VE9@Pi;C8U+N zKtFW7@A`hTUM(#$safCjU;pv(_rL!`Hh>gVdSdLmZW#P(y$-?8rqjh@-qaHWb9>vb z*7zU(m;bcx@w=b?V7Xo+;ZJ_@CvRW9+iW(MS2rKrfAr-yUsknaA`Nb9NE>Sk(ejbvTbRLxX@fvh3}QsVf?u&o>!BLId?>NPdS zvbAg&nHAA{&rC@pvn2MA4GTz2mY+yf`+nH0H@@@YMPeq7i47S8%!3zECU#&f*UnBR zKrn<2Smeqzvqd$XV?BX-Y8Qt*-H)d7K(?xbk4;_OxpPz*dwj51OxkH<{^Q^L`=9;n zrxRx%+&jKGUw-e4&n8WE_V)anKYn$5c(A#-nc2EuuV24=vFTUnhuC#83=i+zaSV;I z6KlTx!*8!HE`kr!*#e?@_xj|;)92Cq&FV&Dbf&JGg>y4wn;2s2nuo_9JUqI;KizAp z$!fV&k;*t@7yx37Q8a3*0TESYMr%2XdX>}7?r$13E#E&i#sr$V}^9R;%OzGHzx&0WGsa*3G88u z2AaDsRY65}|0bd&=b1-BJX0bieJD^)IopOYRb4(9uz*iWLl)YH3I>iwWxO6+CwS|j zmV`vk?iT3Ver~c$xV3qs3tGCjVfU!Jjxi*B%)0~-G6tvE4-p8#Af`S-P5g8!RL5*0 za!LuYr~u(Ma|2u8#_<#ZV9Yc@iSS^@WEb)zy#w>jFP00om%Y#~+e4LKwry6p4MozI z8p8%Cpub~mMRTESFgo5p}oQOhOBC%{A^k_;sqsv75qK~WSm z1WyQ*6TpTD+0f7hAYi7XS;ma&^H@G2({h!E{+Wj{tXJ=>EUwr!A7tdeaUax!?A3pf#)!R2BG@C9iuP%@8 z9KCw?rt@9ry9v*_eytKWn{^1@S$lDLPDBTbecyK?emZTZZDmR4leV>X;<$BHq8iV7{e;k)RPdSA{l1@AVoZtAvP8XnQa+g zN5sU$3P{ANk%*W$Xk=re2oVDyAOQ#xMeo^=u?AF~b4=)mp>l149$Oh=j4@Po9b@pK z6$2Y1!4HF1QDky;&20PidgwRpq&CjARimLBR<{#}(r>^b0fhDDcDXdpt-Ix7TFoa9 z``*8N^8C|JAFWo)r%#^z@J~KF+~0ft`ee20FHdhDeDKkx8?38wy?S{6;PUkO!Qlg2 zJ3{rUi@iNmc>Lta>zA*h;z!MF+O*4?bB*xzAHKZ%@WCWvJ()Js={jtd7dI;EH=TG7 z8r6rR#ol5*dvo@-8#*$KNXBr~sH&=B&?Kpbv~6g+Gn1r?GGfM_f=*Z*aO7kR1QByN zHRyW1vDR2;tg}&M=zU76ZgQqZvp$b>vh4 zGSi#$*~JQ|>K4{VDFgY=aXno)_a@?}akGgCLk7-BB3PgUzX;I{RJi%*|xZL(V~NpS$7_{Ly3)i7rL zKg{j`l{f&APzjMK1#Ut+eEF#IpTfFP?Ptt=m(&tKNiggXN|06-s%}H0UFFq6BSL9N z&&+SSszmAQO4RYXWRHI5zHDj14EHe^N0Bl_sVL;7`zxX}R#E8K@`nmQkcYUcDk&DW zG@1J>dIzd(SS1caP(>tS;;ORF+UTPM?KWMMAQA!57{i1Bf(p*sl(MNNlgo>n-~aZH z|MF))+icd%L`(qSj70#0>|6~>iqsELgk9a<-1h(Y_kR#=jA@DNo7YbsJp6DFc>Lsf zGnoVh4dL|a>?c3|(c^Ev+FvX}90uc6_Zs`|~ct=nO} zF}5PhRP)~cLDyd|Z#HqJG z#l?h}Dzu1DAot`avNQuh-l02W2_ah}0Gn=Tn%WqPj#MQCj{xZ{Sszr!{0xi~&*&hI z69p7=Zw-`fUPu@%al)1w*yTww$G-!h(<0idFo1eH@qPo}vcb8W@Im51}Z0RbRYpj_=kfC7L( z3ga696+xADm>hyKXA;Zt5k-^{wd8p7JX%d$52U34fGDb*`hmf45;{l)zlf>Pw>$<} z0irNHJCagmX=GDViU>v*x!8#W_q*a~$*?Dzm;8&JalzTr=f8PHP>RTk1e^Z!h9S`rAZnOFv92q&jEXVF)Uj7rbqs-!tgTN^FMj*WKmPS!|5ul1@2aLTWR$>J zmKcUGbem06w}7_m1FH4GJ$dr#x4(RBblQ2D%@$9dzC1cQtef`f^B2u@a(mlV&a759 zM@NUN+vTR)+`n^wxmpup*Y~IyXGJxZg_5X070HaP~wMv*L8Mq*l})e4aa?PlE_9337t)gQn7CW>BN zU4Q)O;qvlC!e%cjwR2)Jz3;20`QiuP z|MIIZ=T#-a|HJ?MZ~w(#{_OHb4cC4XR=Au^XWi|st*d)?A6^)^Iy)IQy|Nx29bR2s z4#Qw9E!wI5;G@f{tA6Mq33-t+VvLd4sOIMOLZ76KlAX7$n_KpFk}k~2-m=>hHH4@L z=zWY)LB$$Vxk^=i3}Y0Z7nLaF9)dCchjBXrzGIxiu_}UHYCG1RV1dBl(p@TQ&1xDY=?)GlxdGoeQ;)8^lbE z#7Kl>Qq3I`b0Q518HtHfCnhI(ppp>S+J^j3DxKyzbf6*#v=s*0{@Gc5*;0(YK%vyP z^*1>axaH+bLZ?t;`N|=wmL!B00|eQ6L*{?7<&Z-2X9iGUOaN%v7Fv=7`7}VY9$}{K zN+Ktv&j5lCgv7?EYPVURo}4UiZzKw#X(qF}ZEfX5b-mtr??J&wZ=A8tA&_&;o7Zn& zK70AopZ)a9uYT(qTUDm2YEWEnHc^#`>$<7y)|z@g+rM-C!SzM=cmMHM$9Fzi1w6R( z;PUFKABO2{ad~xfdDE{3v2}ZNd`CqvNVnNc+o`MS!N=8l9VPbOt3n7t0epx-qNtET zVk7{|lbtwn*z7-Ip90wUX}iev#fjk-jleEkW7NbmjNLx>WJ=cZ$xMl{jf#6}SmHK|{9 zeUD6i7|3!LeCIt7`6xaFQK^~gx95 z^z%6@q0ckQHcty`~F=O^z7 zVjKoiJD=T~?cFuiL?g$j)9GA2`c=5Syk)>U2M7DpDItJ}Mv+wfTC{sPiGE@ccB3qB zva@lEz6%1=t`ulueGw@g3shBN3cX^8F^U*#awEEeN>)Ng%q?9C=}whMGyBlv4wgdO z6f7^hP>aK?J1^KtG?e#^EkL_8Z!Z49vah#0Ixn8oC@Z=(Cn=Y(gC0AdG@7FS?7`xy zLpl*H!AP+myRYBkcd-`5OCG)IQt*-=MS^^@DBrk(r-(?~2@Tn2&j-?!$4wMMhIj-n z$6=y0?a3G-$ozlS`Jv+Qb7p}Qp$vsDshe{8B~07`JVI(DDRz}g;vh9uW!fZTe|AU; zoj!(~<=ESwqwyPK!cvz7n9cc4##qNxZQ}EjZ!g7)OX=(w&A}K?6yJVasmTS>-7?d0 zouw#7CCxL4QJitiX6Skef`pE%x^397_k#~ZP)&B#umMD3Y;4u{9Y{ZI=g*(L+^qc< z-~Han$(y@(@6Kj3C{Y3eF}Wyx)wccML$A|z@5S@0U;pyS-Mb$?e(~h)o$tJQ^Y;1k zmtTDGql@dC(~IjtWVzY=%Rl=UPrmsrKs;E?ZkB6nDn#&MK;S4kgb*Zd`VLf?v)BTH z5@_4FNmb1!&7GtDnxS>obka1HbCq+>TI=e%VK&wnkZ6sSFDl5JDqg>>cNADzF$6l`ebjm|N7O_Kl$^YzJ2@d;Qsw- zJ?Yo$%X8R2I+6%&v#_oP^v*mrm#%dY5{jGuSqdMN!!9Ghza99F8asCwJUo${{#R4QNdFFhXvTNkO#mp za`Gi}P+Z7z=EZd_G0Hagn2)JrPO>og`4Z<^lCimLq`S7ENK(ETZCa{#$RvxTvAtX0 zRnjfN63?V#Dk>=FrYk5at&}nclFOwKRLBrn!%XOyY}&AdOkfZVlObS2!^vC#W113} zEfQ0%E=cx-(o?zAmC zAY#?jsxtIF0x%*XSYw&cT4M|%Ln^agEcV~Ler=5V?)Sd;&DUQa>>u1Yy3=>v_3iEL z>Lv!yM4N6&ghTHYjIoo~udo01KmHoI={Eh*{{G3^*RNi_czEx_o14v>H&fGfF?Fpl@QrDSJjPml`5&C3IrdC4KiEjm>39&DHZKF zR}pd%I;Ic>a#JS{&?6ruAYzk-p!XPA8M*^xbJ)nf~_d`=5VIs=xfrmv2um-n@Ky|AP-qwRnAU z1z3^6XDPXFxjuV!GM~)fy?JxA*mp$zW_5dg4N-2cZiwx{ z!M!`j_s>r+G|F^R?H$ZbRo&h!naB@4BOdP^PU|THF=R4ZLM(o1CM$>;pk({94JfQf z@pMuxK9-`9%u*M;9)o8@RS`dgzVAgMv$57%XA#RQMn@}qW?A3{Msj9+1&*&v0%v0? zcO-pv>t^fr`4TutitHut;_?6TJORxz$e9PuG38b>SrM@?MMZa^T!|4h#>J&M9gVZCC~;YFpO-- zFfySrU)LHJrE%>6Cv{U z5lWweBe^q1DmhfiUIOi;7D{B44BPfP?r3Gh7Nb>@oJGE+DKyGPB)_}d<9s7QQAH)y zgBIGS#D-&*pcJ-BHnrPWu5|VRqKd{SRaM6r*Xwl-AW)dmStBAbMC&XuiAapG@B0|N zg804%(AqU`-@aR|`uXDEx4-_?!-o$H@upt|34439>9n0p+PbbMZA)Y%QVjO?I{e}v z{#ctCvu&GsxAw2!zFiz1@7=k3vAnG(vtRx0ceDAVc6@gBZqiO~H|tlgUk_bhyK1>y z-QF%$HT9oKh(qr~7(^sp@Vau-ww_gP*3{F+IYX7TRb`De)>&hWvz4`$fEXDSj7ibI zv(~Y(Yzz|n5MvY~vd$Xo0LU0?j8Oq%V{L`V##R7G%t#!gAhIeTasnYyO`}QxFoqaI z6xCECkwhs0F$tVRiPqT^(D%U$C|esP+Nx4T??Yv4LKA!xQDZD34MQL#W1V%Can86( zh=52i&CmeiO39+JR5xI2wJkSuH#@8rM_}7tbpQcpJgP8I#Yixl&6r@?RL2MVe(0y| zbl9vf-<%vR_LjG+Cr_WBpPfP+9^QYjT&~Y9E-r7*t%K$2Or={)XV=$P*7Ci3_t%^2 zX6l|i{%SF;CYApF%YQuBo7dJN==tgE`K%7Z5QgE=2Ol5qA9w3P6|A$f$*i*V@@5@_ zN)!Y*Txbkiz&ka|5GoNHl(eA*51BI38$+*nk4xv>mswQ(BLO zV~5cgWCxuy7nC0yN&Kwrt%SggOprj^on%v6Ta@GR<_((d1|48xFah!0Uj$=H4v zQkEt9khW!Nv|;Ap10q(&-dtU;ZdZ@K^PN{Oo*(Ql7JIYVVqy%nlL;^gLR1*MSGAxO zal2XLlW*Pu))W`ndaBsO> zUESWWv)wQ##t?ms0SL)hP+;cT)swneH0`2k_9o4II+--hY%-lqrcG5hRlS%lnyQ`D z({kCVsvB2Tu5x4;iIS8sFrXphEFg)3aY@1+1yBJ)h^e3pkWf)YiA_2l5$Az+QB;(T zG0w6v$TX6+kYp-j1khB;lfpOzWHyKtLIj{RBGg)&%Ds?CM2s;(A}|G!5JFUm8YMe~ zDx$V>DVj0PMMN~tBL#M18=fw-YPE9pY}(9bg6buZGy6w}^=#72rw7MJlUcJjo3>5m ztoihlPf+{g{poaKPfy=%)~mc=zD(AHPzKS2q`L&R&|j0$07gym;{8 z{ZGF5u&Sw^ng99U|3Bu9xwB|4E?x`75C)C?#mURcz_e*q^!wlYp|$nJ`4YvoEeQ8t_Pox|h#WY5{k8izoPWGLxJU~0%tq~GXcraKC`rel;6D%+KcI9zhT z2p|j2$K=_7Dl!6T2%+zXei%lgJ30BK;y#;!;-YO?pEAlMgL@%*$}WULlw?L_jL35k zpAn#J_QWQXBrr;m2pg+RMv@?HYEZJy`=;AvA=>WMAJH zvcHBS)k==s)(L=eHx_NHPMT zM6kxWs=mEhT4Pl;#>i}pafpzb=|GW@HA3uIK5V)^twm%*Tn`>^SN@xCUOj#K##%d_&8L(7Z=bwcd->?m$6(;evuCf~oW4Cj z|NQeGSmy@sjX@&q)@x@?J81x6z3K-q%w(8jNLx3gWd{n*up?{?&zpMI){~}cnyPMG zU0cI!n3*W`ywp|Qwyky6SRR zi4>Gld5@;sLfg4%rSG?RJxLQHoesqqQ|U7`n~Uda`NsJz&v^K)OX#zJ4g4A@BZZbKY0Gl@1_mEc>ax#{^zn4E8L@X`rT55=OiFCs+VYAm;%`4=3$vMG8F_Wc zw8{=|o{~3IZW9qQW2R@A5->> z<>eAq0O*_IVTRN}zm zHEiFr)o{z6YPws61uRiwZsAKglw#5m#}E?6J6RTFOp@6MDfd-Xl|;C?zIB!BhYk>o zVFa;k2r&AH1gUEVh>$q=7*)MQW8K;L<<-^AhaY@6^uzVl^})fRtE_9AKE%HB&ROS7 z(===u(GFhvz)zl>Jbv=(VE=HlUQO%Sv&XNmuU6A)R#|uQ>g{?NPR_2Ko6aZu-MY8L z2oQ%Dee}JbO(xgZH-KE(CVls+Zqgo3#T}ETo;UTZc9Y62rtP$Cr)@o(woTPI>zpyp zT4QX})^*bWU=%UNQUYURt+j@Y2oNzSil8yhSeurVpt7?9K*m~EA#uKvkoFHqR7%t* zva?k}e-RBnCe9!Mzr>ifc?dx@bzcx=A}*#rfS?jpLx{{ALL}l4;->GI8B~M!hKw`L z7=uX08fFSHChsIUmc&K@lu4L?D1w3^HKyI$ujdC;FY3Kx)6TJ)&}7b&MKwL-W;&Vg zO=pY6!QtW2VO5aRwO?ML z411HQ4F3G`^pj6NH9*m84AS-e-~YouXb6XU3sTuEm)GYPaqz}9Q5mWFlOO+?@51tC z89G04_2GQKTW?eaqG<1Z-|x@&Kl%8(i`ib~YEy2NVYbXEp_720OHj_lh-LF^aWUl* z3q7NvD(U$mzAzFps0Br1D!fNhMTv>rQH>g7*6`oum=bv}#jeDdRffjmAsvH&0^+#y z3zRHj!2~D-6=wIQygf6)rQ3h^%jyojGx8J3WMLlLha>GXnwDLMHq$h)O#-O!@{WoX~m6UHz_NJ^t8pAzSiZJJoFJr>;PcDgw_t>j!wTP0aWJ!x;Iw%DP zSmK|N6{4M%vE0jnh)}2?08K(8g#s#qN4Qn@@NYr=!?dx|JVYg1Rg;Ml^f8!bqNWPy zJXTmwshXI2R?qk9>3qI86aXYMozG*CzFW;EEiqkOUd;}V{-^)=xBGYQY`WX%!`1E8 zyOTG2v&HTC#oRS7AAeI}TwR^rz4IUpyjkns;lrw#5!V1_e{uMSU;kP*n`u)!w*6-9 z`;G56);ebE2mi^V&%zL6R7QMo@4>8{8?uOOjH#@a`(PnTnAi>d3`NjG(~8 zAb@~OOl4~n7Zb-lSVHG)3E^PUV(Gww4{X%WcD-W;1`ZojZYoDvj3+gK&TJ8p9q zm}sKycPd2Ki18(k%2sqV$5ha_l;0#~Y1EP%`Jxi%A!1UDO!r_X8B;uia&yWJ)-8{e zOwabNjvu`}vPe3~s%}!KBN2#Z#2A7CM2QM&n2DI#r0!f|4l#nJ7H?HhW{YV-F_U41 z++`^$$OwpOs(1d zU65;(Oa$yDva746o-gJ{ckfW;*2~q0AKriY_C+(BU3KxFe)Ws_VsCwOxn3@jc|Knl zW17nL>)W$;FPg@n#7{r{;mPIAgO9#jxBC$)i8QI2MP2>w=l{fjO;y#kySzN@Zf|f1 z26*Vgq@CZp`{DJ)b>Df%?r8t+YUR;Xz3A!H#d5PG#Pw=9^gSpt8Sq_wj1SlZ5f-78J7onm-W!+20fs6lXDc znmHAT2&wF)pila!^hly^67X}W`Y$k~*T%NjDxLODrncP6@gD!P900RHa)zBT zmQx0p&=_)t4U7An6>T8#z3=h zHY1cLYq;Z+jAc)dClcr7x8+ZV?v4Ma6_<^RLC! zBY*-XQ5se4`$1Hh4H7aZm?P0>i~)eY>)5cXs_Vr_oD0h&`-GHdkP>D0P*anWLJA87Vu&M}5Fvm{6pfN{#i}YX1|LAdurb#9 z5E6Yfc%O^Lu{LNVq67e*frH`A;t+>FRDz{Y|2g3 z%of&FP179g9ZahU#;|DXdk070)pz*#@!M`SOse|y)r*^}^JZFo=d+J*muGwP=9{m6 zH=DFwciYtN;9&pxtH+PN^T<`^_1iaw;r{)5d;5#u{qEP7XQz96ZS8ny`t zfDaGv9Uk0W-*&6p&E8^hcz8_c>U!2R6Oqsl8zyY3b~fFstG0HvVGBYMLA-GCh>5a5 z%~<+hCSVuhVuxleuG9M{iz6YMlD-NcskkDMMW8yCW0ol;TIQHbnP9HP3?$2vs+3@? z_SS8;R*?!A{}i`zMDQI<7%@NfBvHoy45)-#jL!(4o*prm%G{g6xnt5)Dp_L~Ykg87 z{I*OJ5X%VkLQ-WEDwEZ70%k<994$aWbKb#B!Y#~Z1WsLhTiRxPF&7rcq)H#oaR5^S zb!JMcz3GrUUbAkQmE>?~F`rvEO?PMH0>U^WG#TYB0&f>bd3LNEE4Z3G_dL2N+tqP> zl^0}w7N9IvC}Oi&A_Fn&@QEsqE#Q!eyCid!!U#d7bdISb1d`*KIFD@@1+<%A& zllj#9elx76(}e^Kn|{5%B5fep*RL*q^rwH?hyGW;{?(6u^k%r?#)^*?`OeXCKX~s&AO?xHGTsk}czu1{whe)@LS?ye zX4X`bx|%dq?W`d-Z0f4E#Bq9|^j>$LJNz-QU8xTM;E&%2}q#Q3t$>U=bFoC&lODFU3q=3l?G-K^X zt`XYF{f;@{Z2~%@(wMGDA!Z5>kSN8Qg=)!$Bm1X`d(ZA5AZGu4G#tg6Kw*ya@kfbM zOvsXy@e^k?1_zq|m(}keUql_zEdfr+MeOP(jhxah?4$v3BqC|CiwYqs(3Y0ivB;Tc zO~92quN4%4lL8rt0x`W;*ptl&(-=(@2YO_q01=3R7;_c>d+u{~AqusV$QdnW5U4bp z6#8wKZp8BCi^b5a3RJ9jHpRtkl{aPwA?MOZ z-=UZ^-QJi&E+*bT1QAJ2Cozc%ATpVYi;F0_SnN4#gO9hDt9CNCb)D*b(5PVS{W?Uy z88$-*H`mL$nG@HqU!DK%H{bm5`#-LY`}yDh!{?uUal0N~y*oR(U%j4{qxVrFAf&LjE%h}8AHF;)S5@IZ;G72|Bi4N+ql8KVmBI9{oM zCq-J>4Frubh7bY3Fe8!zV4A>LiY_uVC=@4-P*a_4=>kzO)}*wA#OQ-3OcMF3pvGD2 zoF77rk(nhbATpD+77>Gweh7$YUBzq-JMU%aJy}bZZQBB|sVZ`HKy;I-vR!-6O&?4&Z-4y5?}7KBi%j*w(Y?VB5PH)7)swH<)>TXo zA3QuiKMm4<_|g64`ugjq-?-Ks9?ctP|M=UlZf~w@hY-7dvmzumR*9#Jy~CrU5MsCP z_ZEAL*}UJZ{V)uJ7m>udg%DP&)y2itX1!sirmowzLDj+^*to*s#IpSold4?K}i9?kOP5!(_LL$9UmTOh{I+xY&uk&)>HM;w$q#2_4DU%fBl=U zzWDrmDDaDa_@@W=ACPmarT5;xe0y;PyOf&kpEme$(feegr=s-|kJF-`3%XPo7#s;kObYlz7?Q#DmpS4ja%0ogJ!XQdsY zD3O^l^_>^$j@ek_Qe|P;uPCM10uhZh03>NXjLGfe3G0(TA_|CTjR8obp0hB0GO{s= zxFW%>;qGkH%Vwf*Yu_DwY**P3Qh&*rwOo2q$u z_kQbmR?&xd_b*N_PEM~@eLoC+zuLTg^|tThw4Hzdi_f2Z_4`jA9$mhB={GlhciYr< zK5ehhPG;7A|2v+dFCtmG4@1kj8)-o)9>vaG*#1gy&*i9 z?@gKs0XkRt7<}-EiXA_J$H zaLgPc0*VR&0Fi2*&7+zIJd%hYkcec(3>qW25*8QhoEKQuu|QCzgmjr5QZ|f9$}iE+ zs@Ip-AAS62vsriDx}CPFK*+P{cjBIKYUHx{Pe&0 zDT4d?Km4sVm9vw#r^~k|>(vHk(**)Dne}RIYy0gtFGAG$VsEor`4}q$(FaZ~AB0^$ zurb8A=?CXH1`nzs4#Z%X8tZiM)>>wCRn;`jq@B3RPA9W8nTW|Mf^(H&1E|KhTliGq(rCHLDVKO38qH`c70F)q1 z20)WUHspdXPIpT{K?D&Ig_9L$>ie#)8f&ZIJu@3?QaczkvoVAe(;c_gR?f$W#FEBb z5yqg55WEnRY9O-2WPq4$@Sc$RzV~4u<{)aCMu?(96=#D69yYd~h#&T-(Jq9i*?s~>1NYrZ?SiAeI9*i zo91Bu=;Hc}tg>P6`=+T8A%^JvV2vX}HY_qAE1)2NraCUrr23wAl}eKRiK7I6-j>BU zY#-i|F|CgOk0x0_bq>x!~1Ox@9kw(fyTro&kZh>ad9vLlF)v*sGClIz@ z8@I#`0^kn!0!dyro1`55ASjjWGiP@`*Rf_`#>~2mB}$aOYw;mQA3t5HoJuLtPjc^b z4g|BggOS5Q&G#M^OQgmr6k^CZ?b7O;ynZ$%_e+SNxl6(%RWK+`&wxZ}Y*Jb@W6@!X zDbtE%CImFrAQA|Os(@~~?)v8T_|DzcYB>yjWt{{`dC?HU?ad7WIO_m4ND!34$6*jr z7I9yF`4qt2yZ>OinEd(|Kkql|#r$Bs4yWgv+YUM})9Iqyth;`_*xO%sAqp7grZ%hF zi#vyh!H2zr{i?2g-yiMo-CSMHrc)73%@?X_os%fexo)$;s0=u3Cbg{$Q&U$iSM-Yl zT4Ra1t{O!064)4H4H1EYh$IFGbF&&NK#ajzlf&;6W{WW9`i7)DDq1qMB7l%$&ZMNR zpfQR>4IwkrF+@-ZJ|wxfG=M5;jd78vHEa<%gs6ZbIg1mcD4<~r0Da#Tz3W^Z2}Is| z1yBSwj6{R?M247+wMay4kWBDuOeKQM)>x+k#yOJ}FEE5?ts^oa0$Aq(g$&qA$k}F6 zx3kH7flLPn2Ya(-Zn-fW0lxh5+2Cbwu^;=*u)5rzO#03G(@#D-eRH-yJu<}0)#dD9 z>V23@W;fSMTQzr&KZudO`S!)V2OnSG+!&xo4?g(auYavWZ!{W$)|uK?BD%kSG_9x0 zt6K?L*Aq6j+YD*F54}$gqO+Ee5Y!NK-3CELBuY#~y@KG_F;krV(nAZ1Bt;u>RM$xp zmy(|@v1opdfkJvlwtAprnpYHJ_UqGl&dl~`5#Gs8V^-?V4qv*DWB(V7bUus?yGoq3 z3>bkx6DXNfL7$pbv$L!SLWP|sR4%V!&ebjja1z7;fo5?9E+1Mbs1$h@Tr5Urq^~jC z#_?SlzEVLAm!vLbDb|rvp>j;X_mnYmL^1{wBP%{KM_63sgx0B)ld?h&=DMCFPCx|~ zDMukA?k0k1lnKP#FEgI}NQ@WWZTy)a9AJkR8=C;K(_T(;jC$z(*eZussobWh%FJyrAhPr(a5Zww1R-WdUr1sum1Rr<@$qNFgc^Be8WA5HmX@ z1Bj|9F{@BBQ&rW(mW&IEz|K~!l67@kO{QE;Y&#`elXbSL4-XDMymxP}o%hS(`O}kE zFHa8-_f`GlufA?-*A1Jy5ARNAi}TZW5ANMPKYNR)Rb}eB?uRbKaCrCdU;e9~dLyg2 zv6cJs>)$W-r+4=Eo;?0qeeZA2`}L)(+XxoWHT43)NTkL5U^-biQ%j75&}}xG^+rMj zkzvz2<0MAR$x}@3rIh)JTLdl|Xp-iu>=bKa@jkQ5pT>V>=05W~iL{ea44Nh_W-h3V z?I`R{ks?xnG`+p_d2Ne9a}Plg(JX@rOD3*Vzm#CTC{!lgd%se#IJi7weMZIt%9tX- zQF>w9RZE3zDKrxncDcCDBNUClm18D0VN^-j!Orx#x)sMxQimnYu=7#`$dr2_(ka7l zF$Py!NDDHrV^S#2`mthXe)CExvD0?J!Hy0 z-=8r$*)?(H%H}*@v6GmYy~5K`QTjcrl+#dJYZB+$%QMRWd>6ja}iZznuE^dB`TpHDB=(XcD5gS zB#a8k%*H}4;xg8_6rhTrBupZXNx6tv5rH{~L{K#tR3tF~fMiUb;aRyLA_`;-xynr^ zFk>%pz;$X3uH}yB)e5o)13<$!mS^02q zbZ>8d5cEifH@ejs2AAoIhKqh>0;qNFXm+@fyP%Xfpz2O3Q1?MuW zzU@)s;^K1ZaN?30r1DDIg^u+5h>cs^OIXK%JldZOX$8xRgFL#qG-75URd&q47FUbc zi=$7S-JcQ!6;7(SNH|8@<4KN=a|TaLh*hvZXFiImXTD@R$c!Udzw>+fqyPLLTRT?# z|E$SbaDN*wjD`*ie3zU}$qi&Ld%S$RCUngAmLrc1bu?Ot{0QYQOhva2k#>{YyO!_0 zBHSDuaZ18uQ4T61t6YqB$#?*$s`sICl|*s2y1l+_>*;hd^Iix|3NEd6wyNr;s+wvD z0{}E_)lOS>hO8sB>t$HohIel+y7h2$d=x_(X3k~c;`m2!+6{A|womz$~xkg~x-M5zov?Q=@* z!q&_a79~C`K0}NV0ht*I4RiXGaqjZs64 zNf|VXKu$F$zmfr&*&-0LNo_j-ido|v0kYSW=%y&j*1Eb@1SCd7ku)+vRdYKx&4Q|# zZRd46H6S0{ncP{p#?tx4)yc`l{f7^k@tZ&X?(Utv^Rsu;c3Rg~iQzYsA$fw&S&%Ky*tO1F)yAzX(vu)WeJ$8P2ca$4#0~BoisBL zFlb1zcFr&(pf#p#rh^abO{V}+G{y)3K6p_nCO=a*DV-)NBjjo(c5>raY0MZ0?QLN_ zk9STjUFBUx1wu2Xl~^MO3tFm2-^Uo;{w&KSB^)y<77w)m-wv84n*du;1m>}!b!2g{ z0R8A6V|Mckq|o;8<0Cl;%6{NTSQA1?g2IT4#f^r%aWWUQWwvrgI+G0qBjS}$hcVZf zsd*SjUuEOGTe*azp(#94@orOccx#n#i=?^58^#mLv4)Omm=u@oNZw(JdP*FYwqP+z zV_LpQfNH+yFcwH{uM=#er)*SqxSbsmsU-Q+Ug&FXp$Y9{S8eM?5N#wJTt*DaeWDg=q^ zZe?p;?9XCU-^IF`R<_-2x=%m(^rJ@~t=7wa82;VA`(K7G{NVdPz=&V|;qf2;@WRd#$0CuqDrN;3lL+JRFOLh zyJY&_7*!BW^V}0#T08~Bj6@=$8lz-NE_)c1ihR=2N(wU~lGdCmh7bWTsv@&7CT(s+ zGVClnMBa28HqN=G_g(~0F$766L5P$D5sa~HED%dJKL7wR#uy?25*b8BNHL~GVg!?n zL=t~RBv)04sq4m}apb0^ombO++f19LsR$d04Psr@YO8Nwyfe0~xqkfRHt%7xp?*BgFE{*0Y&-Ziyu6G_2Sv<*B9rPA3V7K z^2PI;i!=0He|t5nC)QYGn$M>(^!xjJcMgw+&A@7h)!?``z%qpW*&YF^g7+b%b5tQp zjGzV0OLpdBAyiv}MGl==-sDY<0T zJRe}kHjIQu@sqP-i)l_t>NL#luaw15sW@|4?HGY_5q!$@6+wlO>UrN7(1^6Kh#WlSA> zxVpIRyPgftr-!KS>#v_(Uf=%3zxZ=o)vw-O{_YP?L$o1i*Y*8oEkT^Eu5WJHR6fQK z{AP7qIlgn}aPMH@edzjru~^I|?ZN&YBLe8{%{8K0!(r$o28jVwYv&A8V=(^Sr| zVK!tcYmFg9@I@yl$)co*F@V5^>$*0E*%$zfA~8xo3^GpIKp-CDSYk2?KuE@##OKL_z=@Tf)L^mJOCncj1r>| znFJk%jpRBCR=^}qm(?bSp=3BI&8BC943dC|Q3QYllnk>qfEY!QIRXkYab;XxwG%MT z)GfIx0@}JpVu(7a++sEp!Pjrjme-r3gM(Q+dGq|OAHw|L;NbY)yEiAk>-HD3Z@>Na z+i$;Z>&c{US2x%D2m3F+{`O$L`2DYcH=E2q|LlwNvkNw^->iS~lOMf%^?GxC%OI$M z*+iAPow7Bd_xtlh8I;jhu0a40+^je2)l!0}2<566*3w{+vh*p@ky8RNPSr(89lYrm zmD0^SGBX6Z@He-SLC(uUlJ?JF0R))zy&*;^$d`#7&|DCl@dXRdGsc9Yk6T>6v6v%^ z6=Vgow1v0c8zb2=%Y+fLR$AuwbD%(3!9IDAKnZ8Er?0!vF@^%8$G!VcC@K{@G&PJF zh*3Xu$KAz3ctEigB}&3#Vap{<01R6ZQq9?rqO+29DpCRGIB$_gvIvm0py+6&GNHe1 z#)E8iARntdjb&^Ej9D5eb()x*Vl8#VV|=e8d6Hsk$g=%99&CpT1H?2cq}=xmwuxbY zV$<>^9`o#_S0IdKJAS-qK;&i73_{5>Hri0Esoqq2hRkrYI4omB&byjfzaC zlllI9I%~ZjZkM;$t81^Z>v}|J>qf$W8W+=~N&%L2uyQB-4$!Fv%+NtC4hkis^=+~uD=AEuNB1VD*NmnrM}=L~NO_>!`ZF+?n? ziLVjJDxs=MOzCJyd3s0?N%}va5Ji$NNzBY9iV`y*4L&H)5JD$-#e1**Jw_U!)sk3lclTete)fYOe&5I8cD;^(SLc@>JpAb7^knGQo7FW}Ts4g#&Sv}GOI^(# zJp6dG3C2zg*KIqUOlP;NWfW1x5Y_vr0*RNSOv{YBdYg1j&N_$|vE1D(?{+?zWTw90 zmZAzu8oMMQ0>p;cgbj65y0MowR4OhE0|| zU^dK@1<^7il$n5(CWHhpxekZ%J=c>8_eA-0O6)QAs1|#$!)##Y54R~vfDA`tDxthF zG53Z0D5S{NBf=4wM{`>oY8X4Ciy0`tW&=^UBiaTYnhPegjn0*+T7s$fjYBZg5; zC{sO{n2fP^uGoMqUtOZSk`^v(8B>|^lUErbATw94IzB$0O(z=F55XBXo6ir9578nJ z7-I~vGwo*Ciw8plDhSkfzNu$y+{@>0A@X7|TP&v5R+~Y7{!hR8?33?&=hH`57t61| zdHeMBE!B%&aR^~Pn@y*)rk*IGq5@OhPVXGwvvqxWb+K7*3|Sw1h_YO*u5YeljLuqX zZPQEufs7q|@F6sHWtl4HCT-&?H=Q(%t1U4j8V15NOB<4@QxKJC4I`u?&ET93VW_JL z5wkd(6oD3^;`c!;j6sx`u9~KzdnU>g$5Ax?zix~G0RR9=L_t*JJ2IuWgcudnx(Z6q zU4j@S%tk~ZMj^^B0w9!PP(wt-X7psEs1Jf-k^DpUj$?AiRC^?GJzQ!*9O) zwrv{q!@<0XVfpsWvyVRc_{(R16!v$H4{t8d7qh*K(-Q_C)>pttRb}dCv45zl_wGKh zuI@IW?;??%PY(_bj-#mevFm-17=lk}fZ(Hq+={*3*9s%=vs1^J|I?iplo&2)y=CSH z5mVzA0IGnsPDCVv$mm2OW}&AtYh{NtLEOp{ZCykh!|%cu;P`OP=Q0;!k|=RLW}5>p z(2-s8tw%rpp6v`3$9zl=ZeNoC6h|+fMp^*N|81T7OuXzQ72flYcYB;imTSiZj4vB$ zpNz!g9mw=d&cKggizED(|FRTna?}X7GZscZ77~a{7Ua{y--7~-Eq%ofzD250VTa-hk(eq0L`xx3GU>XTHrr-go^!nz$?eVtfnpvgsYnP8LRwUQVLWb}x z7eHxr!MH343CZv_*PF>7W^0ULwkDUr1B}H2z(jsCbnD*x0Rckr!{GbDS5++%+ezEh zt$Ixss_TZK4~QfRT^C+Fdl`e=yL+#$>b?sXXUkvz^0$>S-~Hs#X1Tez@Xue~##sAa zhoLhD4i<}a|5uyM`NdTXXkEMM!`az|M6GNUeCXCgjLOC_+sZf;?fYJ$_#ql&0l|Bp zdNa?@&P23rC&pOATv<~)lhoLlsji)MHcIr~Lxy;<#y}L`Z5S~I4-)h4mq>`w2Z#co zSw9f+icI&z2T33sRQoVM8g-3CNXb1!L={NRRtP~<4V%92Q_V)Q0vQ-mW`J_pRg6($ zWG2y6BNRi7#6|&QjEX5ZKp^gYP!Y>EcK*XSVp+&U>#pSe|vLv{nNkvm%sb% zZy!FqclPQz4y*O@cIdkw{mB=vUq7BqoI`(faL`z@zCHs9Ap~2uy}*2Sw3y8OdUbdI zsIBUznVz3r-&`-%tHlbHPyk|@^#XqIi7Wv?DFhAysX(54g_ud9Qr9`s#sZKP`o;`O z+JWP&Rsj)cC(|&5q4Q|~0hV_9JbhP3vIFt}xa5H%5OJ#78uxZWH7ezRh#4pf5tK~^ zVJd(yqe|MSmsvLb=oexsKi1JILbMjyt_GL@1+iyq; zW0GMM<#_K;n(sC;bhvX8J722sV<~(?qOmwXckklJA&hnuGTW1E=4d{*Mq(82FW!M7 zm9#-VK>F|^*+JW%xnH!T$hnaIjOCsddaU>sOj!^uadn^q5(PwK5D_rWAfa;=h(;d` za$Rw^S$3N>6V_D|d}MYArfFucU!0(rPd@p0GHa^3QLg{+)w9dX>wo#PzwkqN^Xlx) z$<4)0-%jVNo8_#jTWfEwua>vVwDo)p%gyTLo7bue0+Xs~n$=;d zAzD$j>pc=Sjbp;fnHYj0Qc*&-Yz!l+nmiCRmH#LDR+Bnx=m!+7*fAghY7`Vnvg;)R zLm~_zq=KKMEei@NNlPQ;+^D!$X=W^F8WKjt*5s_cLP|~=>tc)&#TaYZrosecEC9wR z5+$vNw9tr1QLS^TX~w4-=ZG1R`Y@ywA|l><5I~?1W8Zfo5dlfn5~E1gFix2a$SRnr zjwxu!o>$_&k%B0YL3Tw6UcP+v=;8J0No~Oz zIX>8L>&fr`@W+Gu56{ov%xBX#uV2W}GX#jeHD)#3*3IPZ(S3&ogSO3NHrq#V5|DwX z&9teS#x(?l3L3-@gG6OEfG7eQHR*Qbs7?`-L_icpFf;W@b#V;m%e}~91g7Q!u+~i{ z)8Iqb4aPVTO#)L0ly;};7R@uw1C&6H*%lDQ;)m1LeI(@K!O( zjwyv4lZ~34JGP{ZNhqxn3GcMH|GK>|$qW>4oB)7|NStmftxAm-RCem)@8?dKg(+%o8_LrHYm+qtZGtE~lbC zlyST0X_R}PZ9u~Ptz=Mw)51s*mTAaYKRT%$B$6||B?gI6ovUisw)MoYF^uc=m9ZM6 zU$0l7#B68ty~~SR@8kXZcU{F%Vg&Q_<@vWy-~R9i-*21h>U{O`<>mD<0J^^G7qb~? zB%)!6NddKK8)M9DKHCf(F#v*b7K?H$MD4rI55960C|R~?`nQCL0wTgp)^OzvgQ_10 zL?xz?H2@$g%oJnLC@I7N&=?{_QHex}>5Zx}jqO5EV>kvu&00^XaGQu!C3)(}>_?Gw zMGdhbLx4#un3xK$kjDVz8;;0^t#iaIBB}FBw+ytZ=0E_5LWpSz#~6voFk5RxVvIpS zDp#e@GcUHRIgx2lKXl&r5{DRv|36v({iA7?r3Yfi-20l(h0lz1l~q};-AvDLra2tC z;VxGope5uW0SiG38DJ&ohyAZ4{j$(51d-guu1IRgC1*HfchB^+DPNU7e0=^j_nvd~ z!@chp*-%z@Rb)hb?ls3e&vTyRn2?OwLAuU{;xb*wQKFa{ngXgJu?J@K1vpPdiLMl4 zEGk!zj2yXwe8Ijd$J5FFA(oY^#=flf_GaU{tn2F4>xbuX6j}PjqBDK@wS-3f$QfJNj(Sv8p4&TgI;+P!Xzjylf z&2wm5*(?jv(z#%LIT;@w95Hgg=_ixDvKsZhmcAMl)xmVX_I1G}n=4#FNTy(d2%In( z9ZF7BP%#5EBZIse*Ayitzc&Q~i8&UjDh#Io08o$WYE*|1Lf@ybzN3hDlYS{4fuX5p zymr8HFk5ZrII_E6cYy8C=nuss?&>+(75sd>;IucxqGj7dBldl?kaE?sO-CdzUv8gv`79r z(ZR_kR9YC$!^YTW<>u>r)BP_ny6f`sl&c)zzD`%Qld&#=wXKN5^|@vn50YbL=q1Xy+HJwE?PVvUipk zRW$TLq9CBD)m7DYZRvd9HBmZ1V}}w#+cb(MgveSR67}8z zXpF&Bz!Xfgs9!bVb1@{O z2h)PF*|eYi@@wr-wtc%=R!n=-(bdh{dR*@9&(6+XOVqloFJHeY$v*qlPp7kK*S2+4 zSm>Yno4f`2Dx%XR!~jpMQ6F|9<7F)%?~YLf?{VQIy+uGoDRPPam|~R(f&1 z6ou7hyI8DDur4Ok(cZ!SN$DqO979ZqpQ*(dMOD<&p)^1lG+P5?e^#` z_Y9naWwmMg>2Kwq^fEgrDc#8#3resc3^09y&4$hihXx^s{+v(KH+N@i7lz+ClsiD} z?pX%NZI`%mQbWu+Zb;FH!+deVtp{*0^#Z$MBe##RD@=e0kl7JAiB=R9D2(d5DoY@U z+Kk5KWHMeZ*W^lcuI+*=3U+S2TwlDtc<<4p$+(tas;pN2?5Dpe$bJ9g54WrB)$MA& ziHC=eZ?4v!oq>7pV~E?PL4YW+^hFHe=-}}7W>ME85i6XB#I}M)6$K#Xbk%N~O;MH# zXl6u~n3d4>qpFO3>phjFH&M?7pa4=9&N&89RT*}Ah>CberjnhoO^F~GG2AglYAP~Z zI3S|Cs~giWQPztXz(Dz0uGt9_voc&iHWYM}Q9&Zk0-Q5d@K(^MtfFp1$|!@#6LtfcB0Ki)v)x7xT@<+4W}G5cqLD zolf__Q2J+uY3dR(SU*&e1O4m0ud32aH=A_~(G1h-$=#eK48|&$ z!(}eXhRukKcRIsV4DJjN2K0V-;I8D)&HP<$m!urv26v!A_W_*LK7(#>sQMveS~(~J zB*W3)0xMGB9ooqO4}Lf142{C@ZXg5wkZRa>o@iIk4#js4&9Em)D=Xpveckm%`INj%{O|HW%8t7nzI!Pgpnt@>@9dg5qC4dJ-RwfR zt1@`zj^ig&Dl?W@;A)dtLyVuZ_eI(_GtvVx?UB&s%uvjg&81H*Yz=m5WBwb z%xt~h6s~BtP2rtND2l@S!Vsfsh`nRlcH5?HB1BgfxkLu2%1UAwRkbH}0F`r|sBlcf zwPT{D#z>CXOaU#;TZGuf*kfAJnq~EwmPslg#KfQETP)3OCFjnx2Rvy6kn%sN)JRuX z9?HkO)P;5<8~GQ|6q19`PQZ|!Gu_bXnpQOc8`g^(0--|R^~qI*fhY}8ifZUl1SG_+ z3EORGTaXyrR=QTZHZ)u98mB@QNh4eIXbkGi*ff39qk2?F=FmK&_Y8#K$W;}OM#XHu zIy~mtek{w5NF4KMR7_`BSAmhLdhcNWV76Dd!tv=7NBB?43Mc^NI{>7h|Ivm5YRjC zoC82j3qjNGz#TM<18SQQ+8t&UecYyUP2wO4mDXJjYuE8}_LUf!g4=9*cL- zT`J?!qlT<9>}&uB17rlbw9gr4z;bq(#D*Fzf209?O5e!8KRgB@Ipq_mLB^KTqcquv z!SF3WVz4~iY`gKcAzS3*zdNLe@4Uirs)n`$;jU-dJ$-uT!|?`t_o0Y8MeDAMNdMWr zKESYH2mn$<8_L!6Aa;c6n(ek=2S|P2veUAvKq-cBw10H@_F}PE5*JTPBO);av+M;2Xo^gPX-&L=N$%;G9HuQ^1|rIim^hof zBj(y;s1K4V5h;%o6Pa@k5LGl|2>H6pHz2!2(Wzydur!ctM8Hg%4Chps5P~t2nP?DC zX)q1*1R)bKnTiKw5yBn`tnZOh!=u37z(fVnAt1RVWg+yO5>W);y2^s3y2ifa!o}4x z0ysw_I_yj6&^!>vcyij_&ew~@=E3R1-~8e?PoAEVvv#vQK03TzcI(@lvKmVaubzMR zqu=>mPh7AMef!;Kzy9EpPbRMX@;4Ww@nmyzIlW(s;_1`(uWqm2d-T!Ui&xd&Y%-m` zJ$tdYe^@|eNJUxo5&`V~(b0Obh}*`**cY`UCF}ccxm@2Y=j(QK|Kw;onYDcrRiniq ziU@q?%VufMqp2F@@8$$-Mu_a3cOD7*K6Gt^h^7c>dJd5YRRPj{mpQ_01-~OLiHIE` zQl=2noE|0H=Up+4uoLIxIGVp}X*D6H^_G^LjqdJwx~r`3B2_Nia|ojB@t!Bohk6)? z-~k31H^~t{J$nis1_-!Ye3o97Xjhe|Y6su>VZy$K*gCw?-Jbg$Zg0CtJzy~@{iyCl z_cq`wc~mR4S9hV^b{}8S02vLy<@z3N7y(ExrH~P*yCkqeU9A%rbC< z2&M)p?Pic`0IP;XfH|cMceD*dYr~O+calI#r|+D?K>(ZI@o=zG`uOc+lFpXxGRH2v z4YmI+HJRq}|E_09vq*?UX@vlQVcIvyh3ngnm_z8sld6v~v?0cx!1niN=Wj0`J$Z0* zeN&ee7k;+4x7n<(Z?Es4KIwz~)h|ACW&O$bK3vWh+fCmEsK-bD^8fp9k!EHX`yNZ) zthdd!L4@&mx?a=4<|o07%_%e`a)D7^qPFR)nXq?U$(Li zeP4NIG7EtbJTo(uzJjD;0zgEGJv*NpJJY%z#TY{D3tvW&f*m3J5t3_+FmqO_ zVLlf`l!Rx*Nw+1##ujw!L^bgiJTw6`#1uD(IlSwz24rLS=A!!;BNSnVf z+g5imddD$pSyd8(1~d~TGwF#)0jThvS!3{35kjkG+HTTjV+jHPQKP99RSoGQqf=F? z3Nt|R+A0+#c;}b^fZ0bub|v|es;VeQy}Ok49L@1)tlLhyFgiLZ99`dDZ=1!zUL7CZ z`|PiO{rCUj-+%kvtM|Y6gK5Rv)fLsn`%fQ#@!4muUc9+?a(Z$8;^6S$c6ojI_Wb1Z z(FadIxVpTO^_q`+#J--+=9_sRw}%J&FJHZU_Tdi?53LJ9`rcPHkV{7xVtoJcqqEC% zazz(|3KT`@3-(31-Y%oU)y@3T>7k06g{EtJiD~*GRYJMR7s+k^fMtUjC?KJlhQ3ck z0wJgJN<>mM9%CeSxmyRYben1VDVQn^+I>;cC`d@m!`)3ozyZj^0)xCV3C)M7k;86M zh3t@qd~>6bS@I#pUDn9~VW?p>KhG|HAOPsQpvCZgyo)`#G{0LL4k^3c{r2z!dnY(+ z4*5es+m+%wpalm87j~t7&L;ZKQ$y-K(j$kVt0B*z?c$wTYO)Pc4ba>!dcO0L=@rz{ zXbeC)T=`w5eW61$LWnxd@X_uN=O}NC2mq?-eg-v5pg*Nl0SRmwuA_81cRg566bO(l z%8_VCE@yCH7wjROX~1M8n-dTWN)a4VX-Z)Om^Dl_0|2J1WBKsWFbGP6uNfKwp_!>@ z4txwmd5+P!VzX|%XEU{4$!I(soxOfzruR>e6xzL`$#$`w)(4yATrEths&=E7&#%Az z`qhtq{6o`Fc>m&dS=NV_H>AFP zB2rUkRy7ex;&Ecqg#Hk!s%NKBOtNwv)3vRNfS?d!BtlRPq5#&IstJOXr3;~#U}^V= z0ZP>pSvyyW9TJ#CN7S`#syT@<#=a;#02Fo2>_yd_i)yZ@V~l<@rqWYc<9G_ixbPDL zR$$|b##Ey98YK2bS&#RQT7Z6|t{y*laDO|$zPWt!-jfgh^xypS*+&zu&e*Elv#m#Nsta=ewm3{1EV}-8)z!xPM^nJTG*bR?h} z+uPe4&u%uJI>+bN*NO;8G3E)eyzkwN(k3lHQzK$>NXRAtV2+DittKxi#|W6lfOC{Y zG*ud~yHvWdbIBn*ihyMv3jjcYn0NIxrpyn98Ntpop;;`1DKsL0WnBo;5H|1P_6`gg zf_1Jia?y)0a2jd9ajF?}NxCarcNKGPBZfo+?>zTi=rI_gf3A@;$&ymd&P#iUQo{x` zgPrE*&XA9WV0^d92f$1$U90)tR5Qx9bjiYYIMxYnG6ZyL@8B+ZgcyhW8Fug0b|y`b zn~8KFb@=_1y7GEtH6%pM>|RQxDwamR2@r7S@t$&0eo8eXq|_zQUFsZ?{%}llPqyo% za*~80WuoQAXV(A>hkfTu0yrR7u!H4(3jjo<1Sq7>1&9dD970I4USfVwcVD(`BN&a43Hu;1sKI7D z9y2UA+YK?BY4*rRBEGv7i8u==h)hjH(=DT_NQlI#e1im*A|$naz6;gB5DAh?0}>_6 z=+to!hy$gFW+^xfIQr1B@A~!uCe9BUuywFhrzUJ<2+s(VEDQ5Lu;X8I>=*(E=W(3G75a!+75TjI%5XmvANYq#{d+!)PBJ_P!vuKk143lKYd0#l6 zR8eLM#+C?ML`eQfjLa+&3E9+;FvdXa)hsb?YN)Cy#EXKcqLMiSz!+lYiVY~8PKBY( zGDJ#q0I7~sBQZ108~^~S3V_mp3MH98r2Z07K@lNsyE9cWL1Yy{(`cwM5~0N4nFyh8 z`qCFFv2E98kqJSx+it4LYgH`gH(j%J4302tn^9SMPuttuEt;rEjN8pRww)(e7DZW< z5~D9lU%BaY+%`?ut*$R`jL1)C^}*ik(PO^<$m&D$BV$i{C+v&W$h*ed+`1b$h00$L$f7=*=V|5EPDD1@C#SvU;A2bqrcx)6Z~DXqg1 z$Sa`?3<&e!m6}SBVeeNaYSkz9^zhh~u|{jxX=JKcL_EW+={6>H_ZU-4vI3^Nt4U~ zC^tBmwYE8)1ERCx01b_RL7L;Ro!6_0DI^|wkQu=Mc){Jnb3_F+LKK4}I^5+cQ%zL- zE_T~4uV_Mo4Bi3c-a+|?0zOynJJ5gNM|Q1A-Uzts;&S7+tN(MW0+f@nC6URlImpM& z2C5)o7hsAEQ6)x6hBGNYk#j`o3$F@M!gw;-Zr7v9wApNtc)GtAL+ttxVi@@`w2I`= zY;W&CjklZl_SMC+_us2a4T6`iuQpq_cmMR~zxstQs)6Doj)|4aqD(%Ymh>1&PLq#k z^1Wf&ZQ~?O^;7?bsJFI&04EgqB}T!_}+)B zdGnk3;_AKkPJi~7pZzC)^0zT`5L(9S(ZdHN@mB%kPM@Swl9?exqDXtNEacd$?z2VcR@e#ltTcz zBM65w@m;)!V7PJyz}N;{;~iPZ9gL8pA(&Buk8I!_5fi?YBJVEf<~Cv%9e?Z3?!__) zow7_yIXU7F`e@DdzUBKR>##KeFzGm@8zJSJA*dyREdZKQCZq2P_LK&|2v9ZC8D^TI zpxZGCyJG`8wNMg0<;4y=5M3dA`3@SsAzk8dkOY*8R5ZyiExjv1N0g&DST^_vrR=tt zr>J*;Qf^;{1Dx~O@c+A01@I081iO!&`?@SieW&a^z>VnWDMgT>$KD3pukXh9FG}AO(u3}meIlzh?8zdTA!J>)@F!%&0IwGc_RR>f7 zOIH4vqraMhF(RiK@01gDXsa>L`v4fwkXH}^llx2RLx-j?p}I-EiHXZXR29;@iZ5lg zlvY$N4-%1!_Ax3!+ccpIhldAylWF0J5KVM8nzii)n7cN%?Z%;>Ovj^Y!k)}9 z#Li-f?Y6x=fAy<>Kf8W&@a&Undc59jFk<0KsHSeRAEkpHN{Wx)f3jN7zxndTcz^Qt zTl*jX^`HIt$ImX_ygGjT-rj6)dAowxJ$v@(+izaBorNwwxPR2H*X?#&%x1SQUp@Hf zllzC;%NNg|{BXYmt`Cly?c%y!Iur+Z`SSJskA7#h-uA{tJ?7YiKKi2YMcAy@bvY`r zTHdZl)9TU5``*{rtE(!`X{>u74KX1|QL~7XF(WeuCiV{MlSB57E`&iwgh;tifT2npWF)z; zOffA7+Z5;&h9C(5rkX_}ccp6vm4}Fv%f=j5b5%+yf($Co3>*W1njvDIiyp#Rl5eV+ zDahdKqY3~xm?v85zmo&G4oyII0LqoK4j;)7A>FB9MxZ$Z(5|C0lx%Dc_|9QbLqwOX zZeei!95U7}(eL<&+_^xWB_rJRI631E{X=F=lz;$X$BPq~fEj2yxp^O)4SYa4C<Pke99f19ho5CyJ9TV{e8{{CQr zG14!8`B@+Qe7XJl+wUfm`_jc|(K`$Nf(=F5+#&QRASUnct?am zlv3-frUJ|kGlObGoHnGVI+_`hB+NO7Nq=nsnAe`B>L6XR448Zo6f7BHM2V1SkUVZl zaJRvT5uJz-qERBUEJjJzj3~L-N5ZuDk22$vfqNnpQDREBqKKrsJ(+JOUD3epXW2%K z0fCq?6-Wps8Vpop2yKI)8Uhiwo0SF%UAJDXnr2-PHkibZYqZrzHQi8+K zcntkwwUE{P;N$O&PflY?i`#W+>grJy4}h^*cD|mz|G|6MZqLr=UzcYE_Hbn)ok0WvI?n|^cq{-eiz6LcFd*Dp^WJlx+qSS)Tgw-@!J54yN@ zW$B#T-mc54tVdtWc5QCUBo7NSM0So7p44Lj`gF_cWM1aB-oDCb0iHC*hgsIoFv`1P(jA`N^*h;b+Zb3uRu2e}Ij3Bkc3B4F0KfWbSM#V&^LqS>HPgyGt{ zgS>Yj!VoqgbAhO~d!fW%rYwSHfOi7W0p8ffPtBc8F7Q)`AMhWu{QWd*t#|h3Y3V`= zys&E_23*`Ai(v0Q+^`!U73Jxk1yiFWa>`*jItw3g+(2$7x5MvM%Q&Y#Do%7xU08j?; zy!YGfM#=6yc=XK|pC2CIQy^cJVp@#GU9(Yg^YzW?$?@&g_2*xF{>OjwkLr?5WwG46 zIlGz6o_zP}49JbAdx5vj?TU=rJ{;}uuNIrq*G~gtW zcwe-lo7*|CGl1>3L&9de#VtA{Q1Ju>mu>9&Cg5s49#zX`$v}Iv$#h%QWyw{wcesCe zyg!*vTWR~yQHVOfx%})e>-nS6y{G%r3ZqczLP6`>0I1f&9`z4yF6VEr9mMsktHHC6-aj~=eeuh`{_*er!MY3ASC`ZM$uIxnFMj7w{z0I4 zy}UjeA3$w18Zq&7Z-246y?^@Y)qF{=Jep27tJP%xV6D&vL6ym5`sUS(O|$M4_709p z58LI!EIxbk?EL!tW_~3qA`z7!#6E;xML=V=#b6@lf=~g0Ou#T(-J4htRg(cRr+i=K zGmDrEfbx)RMkfHldsq5`k~?G`_h#=AtLhFL-(eeIL$iJt43p|SLlN)70z245CBc#~ zM5=s6r9#q%_@Bf3T?B%xZ_KWEFqEfeJJcF7@A%uygQ>zG^4aZI7R@heY6(zD^IEEw zmP@Odh#3qkmJIHk*mAZSd~Q-zZ8UrhXhXvUfCCgy03tRFEdyvacy%g6L5=zCChTR% zNIMS-OBH%P{zEti9m;VWX2Q$>jFH5YDbep_W=v?J&Ir;Tb-l~X@9cHJ^ppgNq)D?T zm@0CBM3oPJb_mUI$kQc|QJcGJV8Hmm00xpdt3T6Q&a_REISdIU?MllCVAphrR8}Pz zh@@JT(m52-5c|rs4*`+dcJui0hthrgcf9UL6* zk1Fo^@aF8|;P9mB_-4B;#?wxsB)^&%x7+Q5(|fR9i%8RS$H)5`1=O7L0>-5eDoB`C z9Xlq85gj|nqRND-W-+F-82Z2fF$4fjL%5!uWAe`NZ%x;LDGau_8EjG2B*7#`W+y5| zSzEFQ(sW)h&vO!yb1Lc5q$n|CJ7$hh#fS+EVu)@iO>7W7Ad;yH7%>5us-*q^fm7zl z3OphtHW9<5X+cmkG$I$Hq|znvj2=NWc1^>kp>Mj)dfTi`B7j+py;|W)Rb4LDlcHR# zmaFy37ZoyZyH0@GZQpdQ8dhad)?-Ibx~}PaU|KaDKy=O*1s8Q$dS)b776Mj`Dj*5k z-^_2fUE`~=D7h#K$D}cC&t6$*tApdVZTf9Dt!K<^W$E1>X?Jk%{`*hY+wStk<@(S5 z{cjEr4n~y%HB?|dDGyKfwr%_M=f8UR@c8m<@0*t|_Kx;!b=5Q*Gkf#-Z|;BpN3q<_ zZ?BHZ(t}&AoBiX*+rBrTy@SJ-Z(h9r-und=2F%rDxxJV;-aFWz-&{0pvs^A-yn6BY z^!}rVN2}GXk)58N7M?cSrt3oAcV$^J_(iie5++WfX%RsHi)x_Wc`($VHb}Jr;m%1X zv;7Gk$j+~sD^N|mwD*pgOf^cBm{}lZMgoJKIU_)JidM6%)lPGniJ04k=67R%E}rvz z77m5^yMK1oGu#0oLv#SkY7hm>fHuG#DrCdp*j-k-OAAA2G8>E%GNv+YSrbVhkEUKX zipB){=#T)+B;zh-X*b63HV3SKm`dBVL&?xW60V*`EqF!D>Q}?u;3w%l z?p`FJRhskiu3v$nYk|ClG?$Zkm&n~pV#Aci1{bGX93Pbw&jDQTmIdTMGDoQb^fiYuJ*VRSQwjDA>6+@Sl5@~!l8z6%vm%+3#2@Ea5&Q3Am z_qjCyj-q0bo!8u4AtaDdBwt9o%ZY)|0TLvFzyOFv&1Kb~Fms}%M8%Oa)ffU1stTf0 zF(gunj+hWd1t4t|5K(o`xioAd(KAKW(6tC6v17ElaMFl`PWsMbfT--;cDrr2P3g-J zgJ^WbA%+ATsTZCS_5WL9=bz&9<&b1jx?C*!3~`iaSAHmh3zg z$gWpGWONRY`zS?>qsmV_+xAwpnNAPZ+MT`l^7Q1i7*A{~Ef9@Ir|+Mx zyZD=zZ(c6&XP>|LyMO$H?c%L27Rb7H^6>b<(O19zY&8$R`=gI9=POqgvy-En*KZ!) zyWejXalM}I?IUgGSFexw{)mfyvzi^8t~HKEhqHs@ZQEA$B!s|RjH(gzZ5Nvdj~_n& z`n&ylaxmHpefRd|+lPT0@KZ@YHu7?1W34nuo6zuxqXTJl9AcATb! z-_hqLX5dnY+lV%hU zR76cN4U^@Gs&~PjL|*Pnbb|z@TdKTuD9?9SE9@ei+nnKKJD(`)ICv7Il;Ep26N1BMuEHB@qed414lerG4sfkLu`w$rk5EYUmbk@eTT}MQ0 zq^d-y61%>0?9fz9%(N&eZ2_aanZh?d|!Sdmnt%bej?n45>HW+dJAU zuCOKqaO}96Y}?+9#&vn`!Rg}iYB^s-?H`^#{OYr>9zHpZ7Tzot&p!IJfa3Lh$%Qk} zaZ!yPP$B!7V>8q*LsVlMB{KvkI7vtxFM6l3grRWQ{- zOfb}-1}I5EjsUQWu#t&~xhRSRr>jV=(-8>Mj+49fWRP(J9gv}+SRBHK?fk0;C?WkB z)HL`G)?{hQDqVpfnSe$>RZHgCsdOFc=?qY%Or)Y|ZBFvuRfDLS5Fs@X0}+d2sR`IE z)Jv83aBytk0J7gAfD)k?5P~QWrUnZ@#RgM6M4%M15D*NRvN#6Oi3wPykTDI=TaKhV z&`OdE`>+^b0LwC!Y^cNW-tK2 zU4t_`KjfMs9pXEDFoQxi)X@1%OKz8T6H3`NEKQY|84zpsC_y1p(7tP(^JyF@L@@(Z zK_bzJM5chCX~6^ls1hNSu0%j)R%Bv3J~~`&SGVUEhsS%Kk&&-1FF*V2`QhQQb9{RG z;LY>r&h4r2%d?Bi<>Kh*0Xo{OR{%y#-aB;AciriO2X9`T`O>#-KPoFi3^AHmTDne@ zz;i1L-^309nH-3cBQ+5bc8-uhttzS&tP7w5sbF-*K1o`K?Pg5Ch&wz|3;@ls6R}K$ zdymLw3`oR@wl5N5uo$}1dozuFuZ{tfnESq$(0lJP z9B3AUq<+Hy*f|0eiDtk=GN21-h=dasVSosUI%EVOjA-N&vn5CrUOJ~E<8&_ zHmJ(7YukEKfnu}WZWimK+5U7kQ>0DPY&P4{x#@IDK-0-&Z+~|0{>fxI9@UfaWU7d+ ztcZ!7D+?~TFhH$q_Qhya0U_5@8co!It5QK->_B^Im)+{>rr8{fss|5GUw!u3Y&1GN zxu^Xm+{A-N_kQok-+TV@=GA%l=GR~UnbA2hYF#=KA{f_`!W$Ub)FZNlaz2UR>^Uw;|4Tjz-!j3%SQ z(cZM2uA1$9wd`Ww^*u2;c8NCv7(^)u-g(O&R;((V%B!Lz$7(8KA@p}WJEj{OnAnI7 zDK8pBLp35+P&70#Gb2@rX_%3mV*^X}wTiz2_RwjLs+r_T2H`4Ogk>j;^5g9qecZW5Y?Oj zhHgo8XK|3puz@U7B4FlZnXL%PFhK#7l?Vta32kUNyl9FDXov(Zrg`L?HW7x!o|Zr; z1GA)6+AV4OEw%)PPqi!mcfdMgat0RTwpC}qG$Q(1s%`%!|S5z)G?@y-#GsA@0-N?2D2 zJs>)lwpU_QL&LOrRH8Wc8b#C)sp}d8J(x{3bka9LVN#B7*DHze!6zS{pT9miIz-3g z{fWPgzx?%=LClcarrU0})9EY(tICl;zg#T~S4vPUaI@J=MiroKHXC#-K@llNDToN6 zDt))j=ndv(Q^kzEBcd3jC<|sxGi_-P+ipZNUsFhVMMmm)Df{vQ10*;_!~>8c zi5jF&;}y4?8Vr7D_m6v*E>gU|Mc+Q2e7$Wz4^{Z-rw6_T)dvmYAVOAd7}9Ez0;rk!AIS{ z`PJg$a=kba!uid7QkUO-^{bC7_u(g>eEplhIy*oA^po%Z>aTykyt;k*$tTy>m-Cy; z#r*ui`_HOkR!k~BJh-~vP`_sM#b`fB98LDVeg4%0?~AHpB2fW`X1y`clhb?a+w0kE z{PyDf+wYz~`}osWFJ3-*@_`Yay?ImBl`mb__dO~q$hsj@=iF>G8da5;EmrGo)5I9n zBq0FA`J$2-hnXyO&N=72_aTO+>qI2=<#_>IVhbQ=4mA_gRJo?OtfI)d-cFULii#@5 z?E0C;J}mph7$hJ%VLLVy_h2cg4rw9b?0QEc?mC8C;X(!|4DOskEnkO-iE+(ijCzMp zDC9Xx80z*+4J7OIgzAWBg7~AT8i;_X1yRXskxW$5jhy1HB$ecV*WhrhNI_H(5ouVE zfItKaMv6=d2yA4Ti9sMR%^DsTM0b`)mJgLBa7@)CVO3d~l;^w$-O`X|cjb4s$;>Z9 zNJD^TUHt;cE0D5<}h7$|BWMBg^ zy`!sBx=wi;QrVASM40?6(*>xa&Xs-NGkXIFLDFDUS|gPA%9=?O=aYfK(C3-8oc98a3&S_Nn0JqK>vb}?wz z2Lp{74GbCDb}JeYAoMY9Z0WoHV0x(5`N9q9PR*oGF7}{lYqc7QfODPzL8C|{0uu8~ zx%&ZNVrIwe3{VZljETslt!K{rN&tP~F*7w}hCq&pjW{(12{2T#e9{S$R}vanR1oQ7 z=(;9${d|5?mj#FrU|m%pVkYD`YTS|?>>mY*&by*0W7ioVI}V{U0VEVKLeGQ%iW!ql zV`XWAUQ*5yjovdvNf(j?Exg0JXtZZy)2^eR^F;xS!J-uf`T`x3ufcgljvh{r!XM%Znz2_5Aku@pQAi z8BO-4)5&~wGg7MSiC|@pPfs7W&9<8Ci$q^c`Ysf{LJ9qL{mJ(}{r2mxP7aTZ@cFmT z@15NH{L9aqe*5I{6RP=OZ`91g+2!qcI;tF3&UI~AFIVi`WHQ~ar;~ClDw}4zZ5sho z)l@wavl${`QF`Y%hOpjjqDTUc2=jiDJ7wptQdiBA!0a2As6tG_!sKj2h-MZ|Q4+^D zVBq%$rG70aBM>05U)w!)Af}YN@o}Rd2+Q_L?ABin;^|gE2r% zTddN@LIo$$hP0atW=TguGcuL)6<8vscleK*?aKcIbpht?9Fjc%sAgYAbA&{wkYpfe zJ5AysGc`?4fJACExbs+QUv}|4<8Zs}$tkVwyqdCLDMfzFK1I17fec9}1YamiUM#6d zz%+^3kqM#_lSm}s)H14RjIk^Wkw8105F|$v5YZ3<4ckjpfgA;C4Sa~wHNAI5XJ+8P z`tp~b{@{n_7iSM1p1%C9>Q8Z-d$obT{-GOq3m{}3%o33lSX1m=s+tq5>Y}Z}iu2)M(?9xPL z>`Uhz;kX`o&!g$c6`nme>jgU(L)W+6Xf#Ut9%7fSY(oXqD2fK?JQx5aB2Hq|R76x7YVwJ)WWC z7!|otiB&ajW2i=TFU@>$bNb-kix)4EUEyb@_VLFbUEbbY&u_aB4vzQtrghVY+xcSN z$H62XWZ5n@#7wta|Eq97u91~4_q zzCs3~VrFTjspgK{Qu&-wmmK1gB7q=7ySd2Q-Iiu)rTsg_{E!BQpq?G4V2C8cfYq*z z9g4@HdW1nYpX$g#Fb=s4Pd9=pBAE@NWH3af{KKKwFi3lKL@bJ0QvM7Ka1;w-Q7sKy zqzsUVgA|I>C6*GVWeeUMFm+gC$!0`pkja?93yAb*aK;Ahd z0uzl?IG;9MGgI~m2a6I5azqG-n`TouL<2?yQD3^W3@57yki09LD~P?J148hH0kbOr zoTwmsat?@rSfa!z>88>UMWoqo+ooNv7FQP+i}_-`UYiLqfkfxoGnUR{;hMHZ6<5~) zaJyZNMk5I=dsmG{o#Ub^RjsV+dEZr4?X4iEoYRj`rm7PHq-c;_u$jp*MiEG!6KblF z2|%DIix>pajmDM4=#89XnvB#G)XHiPfsr^w;i4qxM3LFE^J+kOa+ORqDw!Hm8ml9a zRPtz$-h1zuP}03^Touk0L~c}+RFv!sHSGFO_%RS5p-9V~$x|=NzNY;L-VjjHnu--d zMDH77Z|>2v_vg#)^P69M_u~B7M;~5Xyg3-}xz75gIsfiu>E-x%`u6SH!za&XXXkHU zelw}v$??6L_2S~~>%GaOU)?Of`)2y=N8{Py#cju%RXv+_n-*NTZu+Wrj-4k0?Imuj z!^7LF^RjT$$>j3-nuwb1wvo-r!-w}CzxU?#+fJ}3{K@{vfxLNjeR+9h#|LHMfP5e0 zVtrFqel)JjqC|x0cr+Q0W|PsTYj0PJ#d>WBOoSTC*LSvQ*YDq@) zn3GdR+OtM4DQJ>;l5w)Eq4cf+Q@fnjV}OAfF{T8NN1qk%niNYIn}LdnLh1k{nRX1H zC)L@*Uw0`H&5VdpQAzW<1_Co<&QnB2V2%J13=II9S&Eb*2?5BGsR&u3`!dFyUuKx{ zhD<>JcIY3nJ!Bz4)&r$xQY`T#001C~Bv%~LMc0Zr2% z9Ujb=i@uBKs0(2>t*&lw#A4HSo~yE|`VbvM({zp;aV0 zZfhkK#R7}Mw|(DrZRP8t_iWP%5q#+?Y7&5&SE^6&OCv{z!H!4_YKgI zAt-3vtQM{;)nZxIUDE&(n2koGUQ$)4j$KlSrOJm84AQ?94GDc(YLf=f*?EUf5xl3q zjjC2wt|*JXYfZE$SQJEreSzpy0o8y!Fne+;W=Os$)KoNL8d_uGbR}wj>%n$QVU+=-a{W-?6!Rf&3axa?w>q7fAi%S z{d>Rp>0kZ9|K@)gR`L91Gnt)S zym{L#SI9iun{C@*3J@ZNCN6If{po}E7OTFBR!;ZZZG+T#?tDUj`*%^@I06->GsGXhf+ z95jlEl1GaT;T;$HT@Dz+K=R7Z0I;*&# z&EI8AXc+f^+$`cwMwFyQsz#W$R)T;5Xj%Hck7Zf*eP@8+SaZt3?Y5mvMqS$)CR>=C&xN7Z+>q zN+iErFP+EfY#O3%H;qOW5ecy>ixAqj-8jNkv*l6Q_kCSgbzQ1yRrna=sH(?x?Jcx@ z4+zAV8)76Q+caI8LQ2C`o=}Ji=ZO@MjGeJV=Ya``4ag7`fDwr~siM$1G(vW2v|O#$ z>m{0^;cB_MzPh@+xHvyQTdx*Ly4$oZCHFW+0$ntl*w#k}GhdARuIa;eG?`YOC3cJD zZCMmCcJ-+2V;>|EW8bRAFrLf+G4x%t-6~mG)gktX0LZAo#8G?4jybLO10;|b5RsA( zt3&`C6;<1UNn4FZ+l>HXIjUUYB?N>#%&I`52*4ICX1g*@Z8#|8#O(`jey0+gDc? zZ=UX#XjNH_-FUA;GwfN`H*Yq_ljG^$NfXUEDyk_HH|ynOZ%@FkR@X=S``!B1k7h+Z zUN`ODWWQZ)1ugX3dNO(a{EL~x4?p)t;3@*F{-Z{dhY2;K9Skr}ti+zkPFl7D7}^L0t(^EbcZsf`OzFsR8Qo9z>(4 zfE7l&*zRQk!F z3x?}p7zi2`huuxA!;S~Y<$4nSi+~xbViXlYF^GbqCW@jEqDnL|i>g5^ub$Qn%1D+j z?vIyELRMlwS}QX@4&GeJ!7sVV}vs99jG016<(h+2|oi(2Y! z(|pgs3c$|64lQ_i2fn5%Ghv0wb@DHp(cKiTIml;{=#RtI|@2DpfiBZ_h04b)C@w~^(5+lL{ zG0iBdM2&>x7}!w(g+yU;L4&E1cSTW(!g|w0vDx0iW+mQp;ayeu`DT57e#N}MT(`cc zih8ten#-FT=Ze{EKLmBrL_=K{pitN2cH76kDO}pg=%LvWMn$Q5WnM9z7H zKB1t5)b)Zyg?En76OpQC0u^EgWag6Cr^m7Pz9^AAATc>pWba*37-Ao^Z?0t1F_%Vn3jBGVn9pFLaa5EbVw8~9C^5~S5P%N-vZx>!Imj+1M#v0` z0y?ZM>FS1jpWpucug`z>%g_JOKl`pI=0M z?~@IJ#l6X3so?l)H2XHo>*1!JsFQ?<_N8kVb=P$pjEC2BR34`6P zwila?5^OfBAV!RJRgnQR5ZKLTT{N9v;jC>Z* zXs*?FMZTrg<~!$6M1bUJHZ%u$aAv@!6Fas8lXftPs)0%J)=YvQl&t&#Q4FFnf+}Rh zCHD#%B^BYIqK1@$ZuT$AyaX^IB9a=4q-DsK@)@C|h?+#wiM=pD1w>8$XD(<2%|Sf5 z4kdk68URj1*NLFeU2YhpK|>RehkYR1fdD|3osy!lrP9jG9Far?&h;DuqNLvi03jpJ z2of@Lat$H0JFsuq&0q-#+|5O#H$k-HdG4}r>H#?0BPjr+dAebA0wmkiru3E)-cVYS zsV4D&L=}-JiU>q*xDrs*EJz4}iDMrSt*Wc8XP##X(=Ojcn1Qkb~R2`rL_QZ}Iu_Ncvaaq;O3kDOy7y-<|NK`^>3I=CN-~bDgNK`6;(oBAjXEArFR$K=4~`YI zoZgQ>&1R+27qii_S(IfxLJrNc3ER=^@N#}#RqpWMc)ncSE*mktyuSV6_kZ;I)r-%+ z{`I5Nhi@;=LeRs>42Ys>57fnOxn9Q@H=8XYAP5kpIj_1b<`?TxU7j2s02m@YzIX4z z@yTMfxS8KJT^o}{gF{1%YSINHOvZnXSWQESV!AVdKm-NN;&4qvurRo`z&jxrhON9g zJfyC8mjH6yu)*Ifn?+~LC@D3Eu3!)lraS)5FwSh4L`t%0%#lCy(wK9JWs*6YDeV4d zJ6t`+ywNVkG|7+*A#&hVv7Jp&>X9;|uoJOBIzW&ZU_v7TQ&L1BKt-ns#Y7cgLSw+R zK#c(jU9f11!vYXU&o5~XVGw}E;p-jvqsh}wEUW5az?5)DzkmS%0b#az05HQ;TajYg z9A|@Ug?FkR<~%J<$&4)Rl^E>2v+8e1?3z0O1HiOGB>&EU{SCHqJGurC!6@VYIy{ks zlyy;MC0=}$RgE#G!PoVAHJi@1+m%IND8b5azIk1PN^~fF%(T6PPlSm=Iv!ccxkt&O5ILfaKUYqJr2n`!qI{%4Y*#m68Y# zE__ibz}xe)%d0C@Z2Iu}^_v&ZU-W%nl#UGA5EA?XW=sr;u~FS_w%&0AW)eqLQ95>n zrFS54+qLVBm9-mBM#Lzg7gc87Y&XQN@MR1=LRFMS-)$7A4@`lOIm8YyIZ^B+~>PHvayKmFM+|MUOj|LXF3esO*K+(#!-cEWI&0`2zTc5lYWI$Gu zw^&RmAsIyLU>2lzTAL)w2Y~Fgl9P)8m}L?;p+G={BE|_tCsZ>gOb%$uB%0`F8kH>M;E+52HY$}Hih2x^FF7dGzw>t*I>++q&F4zc>fui|fnDWGW&D`}?cy7MQz!GeRwja<$${P>H4}sHmc= z?fUfogRYCAYY72O0n#=VsH>5xm1UtO1Zh7Ddhe}oO=HQv^2KVon(j>rG~GnrJ0xUB zN$AOiH=+>4RU`1_#e8vjd!vB!?e^`Pw=Z72XtrC6%FGf0#LOhYvN42Ivei}TnX0mI zj0DE$7(perAqrt#mPJuSi|nZH+tFyuRSBlmWTa8L*v-ZhQ0n`Z$XrplqEv}$kpZLh z?20HoA(^QXgBeprh`{Jv0f@+4R3$MRnqy=Sg(sE3>P!WJiqY5DSB1$QYpe8{DL`8P4AWQp(V@Cwit^3e2kyJ%F8)NBVJ8^q^ zh%MJM6EpHXvH)XpV}8r~KCsza*4ga8fml0(UlyBgGBU_G%+I3n@7lC+*F0puB5%rcwAk0xt71yD3K(>$e{wg|lQaX~}@ldl0#(nAwDf&`<8 zsbf+CL||2BATu#SGDLJSBxkn_=}R)H(1b=MA74`$#+Q>cDANGhUtw6FlCltHP8kh3 zZ+byABv7@a*icDZW78TB69Y=-5NY_7RBh0Q5klT^JRo`ly{>oWPZ{n9NCO4{yA5c& zuM>?tKxU9#=ac~RWCK|myGYxpkr0ewaP0uHF7#DVNQ~&5CWBK#B#c23sO$PHS?goh z#o6@8QAJY3Myqo4)mN|Y9pCHwX1=)DthU8yI$y@NwW6-q>vdg~i^cNMgC}i>>-E-5 zHtl9KtI4yNnnB-3$Ig*oZyR!ij`|Q{2!xZmESH-tfO%KM5XYlp(>2LPhrkL~wCy&= zSXKqI+jecxaha`_fQg4=g20Y^f!>v6t?16LZeP85yWFf^ync0ld473zfdB`4GcLe8 z=h&rnUS=9%04YVHs+fi#S~vz^=hHUIP*tV(6-WetqVUsl>^u;Gs7xl)(1$*BlhIT{ z7eZh8LN$iIH!?0Au!=>ETDsa)kXco{_dcQTlqT~jim>xVQtmUcqoQIWV?tkeGYx$R z7SMx5>#ggfb;0^zA!tk{+3E7iQ#y(DO3PCb2}Ob=cgEOD45q;(l4wB=0LUYQdA722 zj))0FBuH?TFUQqre^yQ=#i%Z4)6wInw)e1@94nRrg5d7L3bC1?5mwXc@Ilvat=pCr zxAQi}rmWoQ&_^uvJ zr>76hx&71Qs_^IEKEMC)$@22D+&h?5)#cgS$>Wd4NB@7;fJb$NFGo}!Q-cAZ=SD~ zi;*u{Xr|89heu76w>lv zG{n4qX7IjA-jHVBo{ zpqg47;=TwB_)h}1a?4}GewC!E1WLO=DZ^)mgLD>PfMZM{`G*@)Mz&Ig#h3(JMx>S( z$7_~VC1T!eXgk0L@CSzzk6>nmDB1ZTX$Aoyk7Xl(CXz7FiF^g zi7-1eL*^7F09iH9rX<#(s>^|&s)ug4=#hybZ)a^ysUbL5E}5rKJP)5u^d;vE&<0h*~G*|?fc z>dAJy3EGWE<55*c32N%=Q5QOk(Q`@25@S^s2%43CDiR~JHx=>)&~U)PlJls=$j%uc zIrhGE-V+l$@;)a31dAa6A|OYB5VY@2j1@SsIS7n-7q^HgLb4T8P*cT-7F9q+VnB@+ zgPAx-h4+(jJt?cQ^kr3e=CUe^@uZl}%A*6Q3XL(1C7Yw5GQT}r-(JSBoSfX!@lzT< zaOHuSGjfP15E)QG5zt&&o;-%ocb9L2Y`~8dw}^mPK6!d_HDCX&-+%TO|L))Y@BR<} z+b5qq{MUc^=l_@g;{W>m`M2M`_&=sU4?^3W zy?lOna=ctD>ao4OxVm?E+%?+z;tp{>SsH6D%b9~{rB>DA3muQ9KH0I+157A4KNLMqkLT2!$i z7DB>l@35MTA?OaT*-3uD-igVUe}BgpW;)n*00Zty%ls#U`=**e$eq$Z2|#sd0x}$u z-{P(dFia3oHUUlBR(6`293*oE0=9$E)070!u;UyDZ=&54o9&1SL_-iYGE}tW+?#Ay zjni;Il2wZ16a~O(KmZg2LrZ8>9#0S*`T!VK77SCngYNU4HV5+y zKnRA4L})oXB>{PQgSp8OaDW*fR7=u7!sNh)mU^`$v8pHuQDTfSLc-P%F#&=pM+3{B{QglKKhz88xI z>9#i&i6YT7f`N&krgbJNeW$UP*p<$Wt8zS^?j7un$D`U8wJ*GPh{!}mT|iMnU1C*0 zT@eDh301WfrSonC5mr~t+wZPky{T?jqr*#g@&HGt>Za-{AeZ(X5V<~dzL>h%Y1~HW zT*caLHqp#ibo#-g!{^$>XcXNCB?dPwi z^`xHE*Q@DdGPT)qIUiM{ zht<(yu~@H{-IjY*H35VWqXcQYezVzJ;`MAgxqou+hoAoN?Be3(o0o{_D()?C+r?h0 zqJDDv=z2N7S2^)K;Y1S@0sx?tmc(T|cNd{;@W!!0@ol^6c*hnFNYO5kK*oy_ zwDL}@9{Pb?)F*-xhlrlPRNN&T*cF@r@9bk4UUlFb1{Gy`e%&Rafsok6)}-S>$i73n zz?z}2-KXAh8A<6ryet3!B?~h^17H{=QedXc7{M42&73Nlp+X)+&RN$WIm6_kwWt{k zfwU(;5q9>`nA#uQ9f+K}5}2MKm^>w{BIJ1YKoLP8LrcRRmNpb94qn@t@P*uvB{bg* zhC$Z^4r*TFsHS-bZ5k+tWL}?`rpfosUD%v(( zQI&nyVKPJ#W$$8)j*InbwZFF~L8S*lm1t%_h1)wmy!hg~dNhtA`;*z_+nX49v(~;R zW-Qo^t8&wHMBIg*$iIF2dNi4g#}fizhgDTLWF{9yiM;@X5ZPcfuC@*C&nBzaXKa8( zRat5%N?(8~p)nF80gxC3N&8wIA;vyHRUo1F&Uq&0qAbZ5=G>+aFJ8U< zVK$i-g-1qE(I|ul1|F$!t}Gmz5z=^6d+#UXx@4}3ay%M2!lK}`Vw?zc=Z-ZQC_O?24kUSZWBdH!~_dkug#Tu@Ftx)H^axO_NDACgztMv-7@G1!rUm zssg6$xhM-Rn7w0B5l|70eGJleVcYgGY6!89f+`kca(A*QV1fWm+c5#D_DvVM)}k=# zbW~3#lj&qy*L7W2h}MP}6);3FCB`x^d&gTWx&?aYs-oPx2hP+J7%En{|71M5SKPeb zT)wz|_1Wm=oAJpLKK=m457o`cjgt$Q_cbcH`l#NQ&1Hk=sGKTz5v}i+C&yo3zMcQ! z@BZMw{a1h4oBw-%`%nHq|Ls5d-beR;@<%`UtDpUcFF*U{{SThqzBr#fId)IqI|csL z&wk3~^zehdQQau5|L8r1Q54VxRfi9N9J+C2^k$3IVpP=tE<~B^A8W+h_3B_U zIXa-OS-(Ad^ZxtKUZ20%uGWV~N9$Hr>$NM3q9~`cz1MHv?jKGMX5(o+x|%PWb`w;J zx+d?Zv!W=g^=h$NE=AK95&lw~wU`b%H5HVyYpHvACoZlc#xx<8Vl(>Uw-huK5P5S@=<=;#glMO9H zingY^HiGi@#Uu>MmeINV9U^;b#8Utpl#>~2vB6$KxreJ9jJqkcF165N{Nyj)*Pk>A!VkU^#blucXMASqH8IcHx z$*ZaJWl@&f%}Nt~prMcLY;P7~zum4iSXqvUii^vecI}>jI~PTER+dgho3@PtUDqEU z&ALT%c5(jn{Uwy!QPe*MeOt}bstWje0!9UWXD&x^;JQ|N?QzTw@{jzQ4N&yrp zEksk5P?s&u2e9!={QriLBO!wIO%g&1g1#{OrAExmq`i4?lSH^Iv`T_`TzQ z{6G6o|KiX8$3Oi2pWJ`=?DdN;|MY+O*FXNFA9Zng`1DD6@8JDUo-NKV4p%20-NB@a zn=5H1|_;;E-|2kZM$uj^Q!ds4j9#0f3GNt#d>{leQn+qy-)Dm-fX6- z(#K8P{`&LZJbC!|`yYM(XTSNy#rX|+2Wqp?6zX=CS3p!C+t=hFUz~R5*ZPiHO0WDM`*m zU_eY+){{CHHDrV|DxF5im@<5m!Tlker%eV5Fl;!3fx907FNM4m0^d1^!;-Mv@hKvb z3K3Cqy~Mm|J|lO-y5N+^aA+yiQWFAcDh(`S{7GZ0qJ7&t_Cb0iM53_h1hSKt!Eeewyp{$3{kqa8KE|;Wj7E4Np;G^>)b`k?>j}%cPU#@0M z`Q9V8q9{ZKOh6HoBnBc+gANqH07wlev0Le8U}J|w=-3gW5d*S>m~3MclBs|qA~A-( zV}&$85LFZa$;H?sV&C<#Yn$~(qST|xmlc)8vg@~P*L0n#_FY7T@o41PF*Aycy*G_} z6RvzIeYsl4VCLMU=(qE;D_480i>6tbpH!1+HJg@?KK2LiZI`RJSGVI{>&co&M^MZR zJh;*Txu{g4pB-v@fi^-OG1p*WeE;O+{(-;Ud^|e1UN8Ukzx((9`G5AGJ-qkyFaGSO zfAf$2@ZjYB)!FN>e)ai}|K>;G>cT%bJ^c7R>6i1jUmtz=z0zp8Zf&!|Y6L}nxHmp~ z{_E3czhAjY-v-BidT@IA`kSMZLnb^tIJ$WC?Ox?alS#APg24~I|Ko3;f6;~Y!v_zq zudmzrYF3YXiK23TarWTRgU`QQU|IAMHR}FsbaXJgw?A8KwyS0>LE5GbipFSU{x zq5#si>HBCBay%g11>J$F%^32oOdpD5fZqz_JH{BY+69InXT#+1;DfWX7ywJOjb*$( zxm<##VmI%)$}p7kfh|wqeDdSYw1W+_ zNrKG=H*ieCASAo9Xc7Zv%s~H;1^^V4G5L_Bjm9Z4AgHxXGa8N56vRsBEJIG961#0% z`l9Pv0Er>YrjxdfH#bY~%X=sHMy0=9wxypmo2#Z3Dz&WYdQ{)c7wjw1Se8ZXJICae zW7kjW3PmNv@pz0_-nvaxb_HSUDbzC-)8yrlWCH z)(88e+1}oCx;Gxp*3DWB$+4IsJNC>77DYrv1(GX?0V0J6$$vC2s0Og8XqILM(*np$ z^P!n)jM2;p!~lR9Od*etD>IoXQCdzwL@XkrSfB$_$5i-&$U{+p0WqNSp$(=cqDkp% zYM5TfG@*YcY7t@=`nGA-t;NXb%DSxU3IJD|X1i^-p$}@VD7-I6^=LerjOxM@GoWYi zjthc=tPCoj6eSg2SW9ydfX6b}n z4X&CYdNpUp;47%6#q3_U?nCTdQL8jO+MgXvA0F-h)vsSpwg2M0`O|;-=l|$W{%AYD z{_dNvrziK^{)3;}1U`z5eUZAOFqYIe7Ts>^EQGZ+~-sJ-ofS28F}f zakFS4;Al2RSIrmmK7_;LleceQj_PsKhShR88I7mo>E3j9adp+H+^iNwU0p2~o4yk> zN8~_WUc7nw_K*E6@$T2wMLka6iTxqJS^BXp>%-ZHI=fh3sC z(-3NaK$&t6G3SHZ5xA*q;j*pMnfpajPRQvnm*>qMG<=+0m zy?ZCq$qbO%5I3RWq9}+oaYBSDg4v3Vl4pyGp=U4xGSwJ61%NcI#*obQNL86F`NAV0 zBQlakNw&a{xOvTPxq^x;OpcsGVlzzF2<25ah?w^Z5<641D3;asl&%K^LPau)5|Rwt zM42dBh%xqkAm^B!V@%7=Lf?z38r9`w>U}+))a7JURRw#pzyuKE;$~jf1xRQXtI%z` zFGTAxP4{MplY9FI?RNYA$Ir$OA63T>rq90jy_19f%U{sTFX7}NjHfWVUr(ryp_QJ) zn%RnC(g*Oa1eoCDSo>{=OC%9gzkj4fh zFFyF;?=Jo2Kl@j|{s$}h_@krkS8uBONB18;`Q~ST^&Z3F{z>Q?4+5cw(3aI`)vTc3 zdLFl3Qx|S}aCH0j95z;gpc&WWVls=(7ORrAZQADO_%w9VZa2%-+1RsV*Z19{`;XpU zpMCi3qjt4=@#}Uwu8BRycEHDn9 zLYnPSlTk8ovgc744EzlcWhki)0Mmam_(8T?PY;lcVe$b<-pWXbkX*r1z&A7ma0E2W zQCn&*5`~xMB!Y*`U~gT{k!UG)Yet^G>nUA`F04ZV?gzFtmbbI$UCgq60XXt)5bg zpb60G`%u5 zA4QDU;2U9OhD{?%u1UY`LNF_zvR z93CF*&j_Wess|75@9obHj}H#^59@lw&O7h=(1+NSMNO_SH57F~3cyI_lDb#b#1X41 zCca;VyicCK)`>_`ys)v4eGfoLtSTxdB8;Q}F^U5KPSBAMIvi%E@&YL!6^p7&E={SZ z2mupfl<2`kg@83^=mVe;BRK*>L^8u<{Gdr7jI635^f7j-q3{K$v2#&u)wE_%xMDJ! zI(AWXz1&=GZ@Ol)oG)*#uj|TB>iW&AH`D#uXjD@tD$Ni7@OKI<{=0wrU+*7I|JVQc zf5F=Sn_v7lO&cCPeLCBpV7ImTl>^05IXbFKlO{Njxx3VpHqqC5WdZ!9j)C-w{N#0I+XXGJUKp?OsYv2;ojr`BYpSg`QB_Vno{9I zjIZ9leEQ(MPe1(NFF*gX5h?0X-!<2lw|mor+ts`aQ529!?+8dpw2+D>efDru4{>7< z|Jbg!O~FKWgJWP~yV@TB29vP?1sbT@T~bKqH~FzUs6YpZJlE`LK;bTGp`<8nIE*f* zz@IAe6z6D=H_=_ol66ct%roVEW4nzsaF@^!l4W6f9>iSv=8v^Yj)%S8RE18`pw+sU{>tgr$FTTCJxHS9! zk@cVLnr+v07&yaPJH7dwQ+4W8)vcR>3lanY5Tr<1Bu$DEWm~i?O0rw(Zny29(EVGw z?Jw=_$B!(xgDgE(mPLvLNe}`-q>FTMbLBeujeGAE=Ijq^?e`SA3uFORb>6Vk3}cKr zrmDznIvb4|!+gEIc;mJEZ@l)#e15>DtVU%$nis|f)ga2YG`42s7{ws5!5AU~NbqSM zTSanpk`1FIkpdE9+RDI88aWE603eA(i6{aRHAZj}h$9i8KnP&iz>Ne#4f9|M4@j8W z&O{?cAE+t~F%c6nvQ6?ZW(*$E+5@(XCa;-IjY1SaHZgW>zuj$?;=}r~FH8xFRMdSC zYfIO|a(#8ax;nkMTyB>jSQj>Wf4y24bup^y)77eUJ__8PO)t-%zlbq}aDH+A@Y7Ge z`{(bDExf)cDu)n)WjQ&j@6AVz(aFR%qblCLi*LU({h2SAqX%$!M+;K0szhu=g{;MD zTsLFC?St=vM04l0lb`*im43VZ_|e-BZoPVU`lo;X>6d=)XJ5Sd@IU)||A+tkfAjx- z<;|ZZ9)IxsdiJBq2alfo%5VRwnI6pEdgtow>?c3|@#nwtmEH1ME?$gAC)cM>4v+8m zaSiP$)pvsHx_&bmH^CH_+s(MCcdH8`ggC02Q5UG5oh&aeT=}?kLxFoKYink z*Or^LZN_A%velcfzx|{4f848-qw1<%7i@m=_;DL<9gPD7`Orj*%o6pc|4krgpbMTsise+AIAX9foXzzifEfbOY zrIaT!2#hxX{=T#y67NmNw+CqUf6VFhMvkR9%cv5Os4}1mq!o`tGD?>-=X}hsRYXiq z~#F&z|VYAt+pTBs4NJU{MbZ;{ERR*I)U} zXWkr-#-mYf?Wh{pwkQzUB1Z9`5MyA21R8xvBkqa%&(K<)$4Hk!i8Mw5i8wIu2xyD} zfEZbFUyz_$PPerSF$sS}1WRI3A3U&y;4l_I2BHF>wZk{PCPaj!Di*dpjLBYf2rJDdu3b=E-e)Fx9`J`k}5*J#xJ3oIG z-`%yl=G8Mkd0>w3^5Pzr^AL?G>Y&25r0K$F7y3=>`%&4L`)^K`m-ysSd-?b`fBnnf z{NNA%=+D0Q>%aIjk3RbG-~Qdd`rrQjfB5CE{DpqmeeaWpjp=5`KYpc{)KbpS+)02D89{%Lyt^r zp1-g~Ij!vnkDirpjtxY&y1ILEJgM)^$J0-rJYKi0C>EpgdbuRvJ4Z*Sm)Dn@wLv4% zcWqx93!;W8hRA6@McxaIc^pS?P@FWJr9)Ydcq8V4EL73#GpwkC?Q?pgJf=I;*%|H0 z6aeT@R4NQ^ltcZ#e}Bzf<}fM-`!R(aBXsXplPsr_`Tflnt%O=5A~0ZH(wj#VA?x2k zlQE{GA4Y)8*Jk@Ql|eS2K87d-dd@r9kP%FJS{Tx+5x_7f#}gG0f$Son`3SSFhlb1+ z2hb(!f70Wn%BqBDfFOJ3CJkr;f--?kkoS}zLE->F01=pyUMo*&;efQ|vqRZyI;S7V zYT3-9579rpNI(%#M3SYnf+CVc8Fo771OsVbb7D)hU`6Pi zTTJIyyNhu%(x^TL1R!gI2${nBh=5FJs(QQGNi^H-ZZV&$_;$0|ZS?%b#?n**6^5(Q zy57gAn|9MQP4v+R7ebV%O*4|P9oNn2`9)=`;I*tq02BfsqIbS++qYl6_w4bf1{oBJ z(%Pcfwe4coR0TCv*;EB-EDA%(rmO%6t;G;y z6hzbz2(+}OEKF~yM+Huv=n!4+wjD)eq`r4byzKm@>z`a)uX@*e?R|7eKCLB*(FHF+ zK!T{2zzB@3Z5St(r=`6;Y2JS2o2DhiUR1sPd59#Ff}R@=aJGqVp~zjONRdtd)% z&C_@8AO7Jt9)J1EU;f@7|M7qMfBHMW`tzT;y7=_Y{m(LXpMLPsKl#Uh^t-=X6^P@* z@~xl!?1%5Z`_6;AcV^?=>dZIA)Q-#q$q1>VwZdIQ`*! zx9`8TyVPyJT|WJ^8Xo|g(PZS@YTI?A$-Gy%{n|TEK6$@J9*wHqc4bC$6@N6H9na@a z&QA|#^NXw1X5CJzQ6EACe)i%jL>-s*XgZm{@#eGhv**{BR~O6K!MqLqtc2HIxwCrj zO1bJ4uQuz&VxkhWtpK3PVB4vx0)(3IXdQ~aR3`3cfQK?N1N)e4qX$Ycwf?#GNHj7e zm>Y)j5*4yf^d5fLzuUk)z)L=%Lt025w})zSc1Q%(@YA_|O|W{N_7ceRUj!hc8P3<- z*-&aVKyxdjn&El`LYB1CMIa_LO=khQ3>+$dSHraVKL{o3O-Dm$;IWx z(ZP}LTSa9eKp!S&tY;y{BKF3---munv)I=`LGCWy#v z33azymPJvOr728MjT*PR^{7O{(Dz+gmwp!k&9?9EAKiNV?1}eb*LB3$bxsx5t9A5I z6*}J`RmNB*7}Z5YEDNiOh8O{T@XmVzD6Q4DcfEhMK3i{h#Ko@dgx$T9+ppcfdwlEG z;jP(tJgV!lwS}=(MTkj67zu|Rp_JjVG`J0hjY=ejw6`LWw?vxEOh5n@LFvAffCOSv zr)g9PG5Fw}bKc#Iy{EP|U2!2$jv^r9LO?cXltoB225;Dc3ZgP$@ScIZdIqG@c<+1X zs?w4nNhzP0jTP^VA%Z^}WM`S8g2@G0v*8BAGVkU_P-0nbOn;lj`uGI65>Z zcMr-+$zoYU(HK)e)Cu53BZ$EAV*T`^&Fb8GERhqtMO-MbA`vr`P)$M))i)2plNQ8VjSSEei{lj*akPm5Zb=|ZB;Z{78=FPrA->bxq- z!WxV4;QqbK<#iiM{Td%$H$&(ja6|2p*X{r!HS(oXVmq9iTkjqNSQ%yMNNK#bBB;44?gaLma0xG1? zp8*COlr(!?e*S?ywvYSY(kik9ai%}N6nh4oc_6EDwV8r@nn=jr4mpIXdm!(#2VzPFiWo%@2_cR4GC&rg=FRE^hKTmOY*HYlz>9!@hRslOYl`ri znkP+jSc;0k0LiB!3Hvh6MW`TzKxyv_q!~6y*i$-~9L;gh1th2-brv^~UUk?4oYAFW z_-jDVhKA~82q^zW;&%7l01miNN`UDNHMdi6qn^#6T$wg3#Ng`>P^8N7s| zFlAMCy|bnY(F=fO?t5p9^)AF9yX_i8g2tjKqDT-D!Mdyq=616-W^}&lw@yPuLcMi3 zUv73GVDDU0l^}6EovEk~ff!VLQ;egJDjI_jaqIndw^2N_C3lekA;dVIj;*y41Vs%f zS4D8%aN~Vn8s2oChzOywg*B`YH|+*d39*ZzM_zB+)`iZsWyyE$-g|KW&h5j+Vm4VU z4yw8-Dw~QEfrt?($?%ZULIy=leo?8_?R}pR#iYJSB@;@(H0do6V~o*D4ADmyf(t&1 zM2$+QfZhd24(tLdnD-9Fh$O))5s|ehkyR;65EKzLs(=_~0AOMO5S1w6y*DA)5YUz? znw)*&;5uFv2A~R~@owF2cP++P*0$oNMt~46SDWws=qKmb*S%NUj3(B`C|<%VCr8%u zs4x>-Osi(nG?leYX&Q?qY5_1Q?bw2MI|@BB`d0f!#gv=QYkwXbWMQ-4VJejx859W&YwT~*j}y=%mP`0c(xYN09q{&M-g45-4Xz`oQ>|idGzSX z`RRM}%D;8L`S9%e>DhL=IQsa5j}8|n-~0AAfAN=p*?#8--~PtC677HU-~O+E=WqSR zpZl5n^ILZweel8aPd^!TyZdi{=HlV|ljGOg|G-?c>%d}W zc}&vhAnt3{6#Nv4_UpqSBSwQ}e`sR%0WTBALqNyjKZJX;3(QPF(x2{4sZ-=9BuYzw zkvMHx(DWduQ<3JW4I9eP=nyysll4v_@ic`^QB@g|cK|3~MwL5(5sy zSKFg^FkF(Ggo*oOA1+IZ{sfrmmi(%WspT&Xkb+V^iTgLYq1|;L9$vl~bohZJ8v}$G zBa{e$F-l%iM*yhI3<}=I=>4c}R+~*#SOzqOQH9`rVG7sxby0u@V~fqEjgeOC)hj2r z)c0NAb5Sg}UFRd9scX~JO&1~ux?ZkFO#=#j->=u(5TvRqkXSd>dc6rDG|j|qoJufE zu6K1^?RwYu-SzclQ5XTSB_EXYtLr5q18P}X@7t&m5R5U_Fe9RBj4>_jMIi6ux^I`8 zUC>xn#jT^+d|FK>)zPiRY;s_W8jzFx7Gg98Llk3dh(c&IMj{l6B7u|3lxPfM$*P2^ zC?Y7R#u%b=Dls@GQG$=XYc+wDM5G#HP*hQgq6jwS6j23Ck~amVG~4OD7!#AEGxKsH zh@zrVB9H|HObI}=?;SBV0HBZ-iYg&+Di?tfQN5-nHA!YT9sViq5-tzxTaw zeg8)r=TGh)&!&sn;k>F#QWWsEpcn!NsZC)3D{B~`Vk!s?D8zu#b-NB@FOj*jqjA+t z&GcYCIk;u#3z$x^D*Sd0J|bH*HI-v1OmGf}oCulPm_m7sv*x%t+-=U5+wH_QY%9c= zAptI+o)j^NMBlG97Ulfz`1pQ3`qYKZ8+RA)KiT}@*Z%zP{*7Pz#-IK1-}#%r`~Jrt ze)8V;-g)hn)5ni~@ZO7^Gyme>e9IF3%x6yS+&?*g{`BcbpOlm8;FZ_5-L;R!)#=Cc z6V-JLZNRdwCvyfmJfZWakH_P>tV{K>yWULaGemNsy>)QdhtPUI8`WBs0Di)YZo4Mc zvaH?md_JGNj<5Xa!;dyw*R|cQ?fUlWWImD@)?I()UF)B`a%=J0$=zE@tLt^=`h#XF z2yNHD{@}i?%iZQDr8Q+?qiUYT*)x-QT8I(~or}t3nQ#*sh8U(flu-^9s+yOH?jfT5 zUoYo_pTd{-Q)M|QXUaIqH*ZSZp~fBZMd~eZ-xj3*#-Gk7LtP4K%Oyyr8;70*6bLb< zQG>)74@GU#>!XrDT9>qskpt^XKn%!e3}pxiVV?j}m^LOEaHgb{&khWPY@UCMproj2 zgf<7`L0_7LKpE2)5CYM?1{BgV_JJX1N`Mm*jkIyeJ`l!KE2!ccgO}pu$ z69pKLMpxHcQ=0X4$HaAAU95LNG8xy~%}#%0%%I~RjjRcmck)kSHM+4W8&Iv;x93sF=AHj1o- zDS;(FSpg+9-bci=BU=C!9#k%$m^1k%9@!|+pE#55)zrRkiPSP zh2`ip1drmzCDE;UJ0b*q{OIFHpFTQROn&FLe-p}5BuWUxz=*zgs)h-bj9MtzSSG_L z!Hajvg54lk=E|6Y&2(JPM&)Eu+lsyK+Vy3(xpI%V95>uFFd0)>Q!|x%0oBMfV_+h{ z==$Ic6;q{x&1gI?g73WVt=kzOqDmBi7|jSqNWK>g!Fdkc+`W7E)w|oDe0(yQjGOAw zi}k}NSFgYG+5gwy|L^|Z-~B)Tqks56&VHr-kN=~;$oA_WKihtC*8VU5?3;hff9ciR z_Q8X@*H;(MfB5}Z2}dWdGDV52_1VM8fv>1+H=AlQl^C(9N0UZehovnH+Hv*l>C=0! zKJZaO-%e*o+s)?blSd~fCuPlNPoEc6g{Z22F<&fKR}JxNckg`o=&_I3#ZXzlx;{Oc zjh~*a8F3xtgBO>pu6^_F-Me>>`mTNU;;M@RrFrq&m`C$bYYL zbJA04NM|6#fP@+}hG+|$e6*c+RawHo0ufV?XtFoc${0q%w(k**iUt={l1-Bm zaR^aWyu`9DH|w=q^+&gkjNxs+RVa&sccdZ_fmNimHcc=f3aB!osC2IHL!6Gsi`mGc zj_P7I9oKbbtuZE--%JJp2uNZi!WjD~k&z9M0R$EB-gj-g-bCL^44YlI*{zGRLN?w9 zGR77amn9n`B9YCq>lMK7+HSY&y55BzZDGp_jYVtG7(|mN`2i6bD6%mjgs5pnYQFna z6jd;JHVMX*F_77?u}S~QF^b#mzBb=>V+^Y}MAG29t?zc@syaTnhl~ZmWL$4n z>vq?ou>clDIB^D9%{ewL9;7WHF`b>kSDg zQg>zQ*(9!;KqR)}(W02o&3FoB4Ml@x!*(2eOO}9&2$l+4m!l9wA~Gr=dI3ddQx}sX zWQdy>ch@{>ip8v%Re(i&mZQmpjhW3x%j=8zeBN!>EnHSrwKH^ec|IQ5!=uG!xlvVX3Tq1Q zT-UaTvq>)}A3b~$F?23=XWPP-lV*IeT>2PWr5I!Ik3#Q`501u@al5=Oiegq91r7dq zv-PAv$q^qpk);YZRN+s7I5z{Mxwpq5Qtx3LfPHws0egmj&LK2IFZum$F#c2>CjH6F z2+;sp4}wOdeH%b~N^`)BFjK=h|GZ3G2xRk4f(*~g&|#!m*t~CmP*ZG7AuN5WDx_*j z@*8Kd4-*kEQ}%u)$eAhqA_M?q$Owq;jg5x$Ho)wJB7`h4YDyKNd8axNqJmJ;Xk@o% z(d6%$pkf?AtAwa0QynHucr@mfkpP*p$PC3mn-6DBRa59!g)D>30Ra(3@|c(2T$JI} z3H^WRV!=zz1Kyncq%sHZeNmLri?y8g1ccxVYokO^U}j=UCI?Xj3N*$Lk?S2JqGGZv zUS3_JA|teID*}qV+j`#vQ9*{u*xm&|^xoNm#-n<@-4tc%)iuq?^{pif0^42N)Z?~m z3(FeGhfr0u^C8vd-ELQx)pos%-uvFwrCnaT5WJ`uYgC08HAHJ!L7fjp0fyM1QH9P0 zW~xgrQ0Gmv7>%Y)GpZ^L;=2x+Y*~b^D~l>>$Q__W2$6^klJq_}x4FLFELVN!I`6Bx zuEvw&TPM@Sf~@hrCu@tUQArztqW5vT*+lQV)som&b?Evw0tjl55c@!401T5UiVBIN z0%NJUIaC3R3bMaLR8<6-V+g<`sz4AmB3c#nLD4xNRPQu6P_V|Zf;B9WD{HkVW6^{6 z68zQqdGCF2fej;(558-65@K1{q9_beh+RR(SVGb0ghUy@A}F&ZWDo;F(f6zNVznX= zE|68~vZzYyT+{;j`q=g!mj9-m%0Di=rp==Z+;U;eGX{Db$t@wv}Dc&5EgY%!f&pFf$+4;Oduoj-oK+brwRbUYouIDK|~(M@ObtMvuoq^Zqv zb-mrm@$s>#45C2-O!f^L@KTrcU&1xgpWE z?@vzd+-?rf&#xlJ>0Pulir;!x1_V5J35a^;Z zqkyU?AqolUpnO3j(kPi!pafbajVWTbCmVt|0Y(5!_L`8{4k98VW@JNY8w_Uq(!KT+ zkiLl#XCTON5!32U%q9g@~i`{y1pHH-;Gl0l+0>{dWbvlujV zj0wysgZH3djm=Jcm~J?Lq-RtKK|=6IwA<}UYYiI>(DnW0`StCC1A`hvRH3W&=Ingk zcS42~R6u<2NX7+URh4Rt!9i6<2?TO|eKnp|giuu_vk3yKQWzUWthEwjh<#O5j7TI! zY1b=pT|a4>Gl1xWcaoH~#*l0s4alV?m5oY>O3VaGptUvgraqp}r&R-9&?-VS1Oy&| zK)ggyh{hNVkwO5KzVCO-)q1_&tT#nHy7kI~$-(h>z8Fm>eN-nBLBjUB>)plmHLB@d zXNV+->s`0qLU5^KVZ-8s1P`jj9G$X3G=!vv(bzI-LlC9W+Ncbn<6^0NKR zy<30w!%u+Bd!IbJ`@{Lqed!B-@JIjd@BSZu_cNb;```Zlzx><3`ycQA>Hq%TC)@F) z8BLD|L)tL86A|eTjOX#0tKPpEvL1W=v8Pm zo<-kXpIsiEG`ALqoAp|QTB6PBdOV)oS{$5TT=rdX&ReUK$*kSAMOhyl9PfM|gL`mt z$MB1@i_PKUu-ml;=-%x+;q=+2?V!L2v~}U>*`+YuzIC{m&#sqOzHJeqH080!+AV`$W?ebm{L`s%E zK%g->rzi0|5$6OzOo;?eNo>e^L$_iKk$@zkmG&@6qJLA)C5SObz(jha$%MV>1Q9}( zgbxG? zW~xf-THjQ~s4knbD3}W_m>3Dr2#_EFhz8#=8wLnn#~|X}ZoA%GU-z5NmiFYe*XDQb zO^%KO@TT2eU!3oD{btv-(OJs`=)4o>+HTiwx54{<*Q!MCdk~MIkIsqnwjgF|nr1W} zPv;XLV~T=|=|k*$jbtp5N+cM#{iN+P3IM@JYsoOPVQWpg#y&Xkz!s{eFeRh0z!h>S z0YF4;Yq%iaGZ3p#nY@nXI9L7@3QvneW=|dbRAje%o~h1sMpn0H_LU0Z}7? zN)#6)CJlLwPsXQRMt%BV(ho^dbdJ`vMy(JS(#DU)KHBakJL`NYPcS2SqD=_ zuo@M}TehH)(Igv2RYhXav=2qnG6aZ<<-|_z?3NeSOpZ=o-CeK2#xK47;N4HpF1P*< zzw?v7_c#CA=iWO1gMaoD%A_{V#pt=U%ycZ@apV5sJft%Y)tSVs?0YbN*ODS5K@d#$1n7#TpmdwHb}; z@l25mTdG38+m(~atg4q67t{Gc*R?R7*{U(d)xl(XG@4{yfC8XI zoCqQj$=xCV6N@MkdJO=|!(9K%II$$J%>j`C1*IEAcqq_c!V~uTP}+z6)QF_Kf*#Vo0GPNYOWak(L&Onl^@&Y0KG1mv`yqItGwN*GxlN*hit0qS5jnU}z)%SSJD z61cxinBP!Rl27`usvNCXtHP8bF-f}^8S=|0Nzjl;l8dA&Vk%GBJ7Kj~m)DE=%otJ) zBH%ono~_$98Y-%~8jtGrdMk=i0hriW*S5(@qbzLK?}(vpiVz)wwp~liZrhbrBT-z} ziAZ4^Q15+k!6ReT*tOks)D)$SL4c6i#1O?eg#bX-a*UWba1pgdTBi<$B|~7D450Vk z76vsWa4&czWWyFUN)Sf$z2C0a*VmVpd3x)3zL=Gx*~a;Y@4tU`ak<-UeBVoeYBX9L z9b!{07Khz=r^r#D4{^KO1sB?`3lcyb`o7<8`?jsif)SeSZqhrp#mU{1qdT|J@ag69 z^5R-tfInta^A7*-J4(h@@mo=B~=F?iP@5^6~r0}-P&P>s-z04$*~!I!?% zezSXV+6MvMvgP@~yr{~ybuLI$?0fIpzNp6&w&S3ob20iDyWlzvq3>fK1T~gLVYx&G zW;Q1GjtJ;AtUBHF9TC=bIX_rbqso+)tff(*xiGdy+n}wfDA5+ehL}giRLkdyo)Bb5L^2C6B~?x63=*}SgHS?Ip-`rW7J+j4&V;iLEe_doi^fA$~# z>JQ)jlkIjnS=_!_U9C3TzxSX2&OiJYzxUnuzWbBe2Y_VEh)`pOqATfm4RO8<5w(Ct@ z)aqSRFmrKn{`~OxK~!Q}&1MUv@ad;NDUG>(bhuh?rqk)ur;p~dIfby8)_~%iN8qma zYv*XS9-Y6Kj+>(5?XGQ_x-RW!U%%hK_t8?sDH5ZqNM?Q02$1H0i&^Ros!W5?m<~w( zfd6I^AtNBLFYESrL#`PIh5lX=0(-LsgaMMsE;uP$q|zU6s#ri!9B_tAW4<(9n{3cT zlA{_A(`xx^Mp5P*?kKs`BW1N|T5Q6UyCKL<{zS=*h0ga;0914D#P?#&5ENfvLJ9vLVR*mD4Ys5yD&u$;gd zRMeyaN{mEKNO`kkz7)yGmkASgCz>yX=-`}3LyU(Zsi)MJ=7TbrqyWmq5+xZ?u(2XB z!OAHyC$W@jn!f{y!iJ&bEsmB^*(}__0`g~ZuVdT6jj-UK&I$?FjZkqjevdU z*Xx#%C4{=L2(sC1tTiCY2v^%Bvm$AfAS&JkTa29dRYO%#i^PadW2h?YoJ&eg*Y|DL z9n7Y6U2oQ#;A6?efGV*tyxBQxxK&L>KpYSuV~jOLVG3&uAz%y;90t-ZL}FVMKoIhD z39Hb>)q1_%9Ua}R>WYZgK0N&N(b@I&)oRoAoyIVoO%4x_%Cch8?RvRucTUucD$?3H zHMVoHLlYQPv}mlLQ7k$_1obhVon5|vb~*m|lVASDFTeTLTg_IG3T+)RZF#mzMu>rMn0R0ue8DnSt(bJW zST=Ti;8!oO8qIEY|k0Rm;rdpL}}u)vy2gtG91`{K1EBy#2*L{l?=TzW4QC z`i1+y`h}nWmVfut4?mtavv)suF>Z>Vz4!Y0lZUhEY<&ArZRnqVIJ$igTeh=Vyz181 z&#T#yYnRi*ThMKq@pyT0IXf(X4VMksx;A##F1yfGMS1$->EhO{-EK49tcgpmEg==g z-aWpzU9ToZvzDu}C?4E@weNZj0U}vrC4z#IrR8qhcWZF#pZm<4lX}$eV%NqO?(F#Z z@YVI^gU6?&0G35kxI`oqM34a?oTDx%Vz9CIK=sv{$jDSdaTm0PvFXB~|d5 zoP8-p?3vp=>zhkV8aM$Q=tdgqejHi^$eAETBcwFVAb^03kS60{ucA+;nTh~|M^TEd zgxP;SLry8s6M)8qe`#K8lfM`d0;DDY2MQJlm=u!}GXek_W+X_72t_qHCnMyVKAk*7 zM9F#{RmjrKbQ}bfFdGGw8-5l9AX6^;@v|G~KPOLwVg4&+#(ZfKQk8fWyqQn9Y2s3M zwby;7f7ZPIec%D$Mi{C9X<>urX^Z5dDrwW0;6G%T1C+vbCZLh`Y9Js4V5)%8{9`l@N}+`fJL_;@@TiR$&`<-?C3pI%(FF>Jkies;cV`(4+D5I5U40s)Z^ zaa0u;BLOx|Z5YM5#H(+v+pAUkU%m3WcY%_MK>-PjHEcO4vX}`J2uPy}Mg%Zutg#>f z5fxN@5C;&1kr+WyqXZ=p6a)ccg9c_rH6GRT>H5h-w_c%(WI8I!2-Yrp*EhvuW@<$B|bKmG30yNd;G z@#)7;UMZTUE-%lXp~gZ88@o6>c=7nrbUy99f+@QYjH&C&dAGKckqgm@7HpMCP{z~g z`Q?l4W;377m)B>L$$>4Z7`pLjOcGgf+dFHnPM<$MI=pq}w%hHtAnZd>Mc4QBsN8n$ z>T2BpJpA zS{lG=94LnY@5Cf`r0n+$84rKCx~0qkq!Kt+;G&u!rY!UX=DeJB*r$}6lI$0UXo&Kx z)KDcSXK5Y4o9qc8DES|f6erCUWH1IO(N!rKWo9E$4w_`JDabS7pae~IKW4COXxT7D za=^6bCABn|*Rx~~hcxmAx$yyjJe-#*d)Z&qp;t@iRR^RM?=6O(Dwa%#*A>RV@sL8B#Oh^^u4<&`atfWQut*nOdzVg^GUtB>s`vs5kge2YC$y3W*g9wfW;dK$!~lx{f3tnO@qBX`;Wob%Ivoz5W22dZ>BeRc=xo`>nrv?@$<8mBV*Spi_ z^=@hVHpb`*6s4--x-B#Y-*%zxAt>ib9Mu>gM#F54DQuC3CRNlJWJ1-*NNlX45hP=& zEZEvY6rGP0RGCFEDW1Ro5ND z@Bj0EHC>!sU7Y{xKl|}-{`^}%{>hJK2e*%2xpz3OKK}Hh!)jKJN2A%Itm@0tXS0J7 z4PDtZyXAT`U7WvoI7VMJ)z!AG7I#hAsCX5qs`16;#httN#^a`Kue%*ql`(Z`5UZjh z<_Guge)Q=l+s$%*Fn{#oMcaE@6e%PW||0jRYMq7pGmENt0!9TK7_rbQsh1a8>UnE1|N)pR=d z!wFV^!Qbj86KI0?^3}i`pfxd(v=1>^l7j#VK^WLfdMO_y8?>B+009N_pE7+(plNk5 z0C6sJlO02Pv>(h!7Ly2+DZq4s zQV`ed1wDLjN`cv;2pBNt)osZ%FTX3%fZ(U~;wc@drqZ%1D^T%17?XSj024)%I|(5& zY3FzdZqiKqZ66{a7)5nHGSYgzBF3UBL}TA~eHVzv+s!gK1;DB>n{B_@tea6IBGz*2 zL+^S)EsAn7o`6E|zA6fkWOM}pvTHXYAqMZgk04|jBr{#8=z1Siuh&asP*kRqal7rj z^UTzD9a=JMnF@rCky7?0M1xQ<8i?tMQ4k^(k>G=203aWHh#@%t{Moar8s9y>{pkJo zzW@Cn2jprpu~qZN+ixA+I!3||K7I7?*|TS7r|Y(hgfZ%_?~uzN+6il7eb9#JP!VIW zNC*&Ou!f?lx`=`$nZ>ku^VR#G|LmKuoZLBm`h+Q6FH}KhMpR}35H^O=uq1*t78IB$ z#t?!-P$pnw3^6H$&iO*EMGT%ef<}>O(J-dH78nIBmpU?BkEYW}IDdNi87W+FB*iY*FcMpKY2 z$rKSVc`Xoe%!)ZgKtfK_OIcR9CoppbgNP6!5Luoa`HL~ujcv-+_3rp^`k4nO@1MD| zwY?O4zU)rdF|_gGgCGC)U;6w%{k`v-YW$Ojo7w2czx6wR^m`R9;rmk!?nF^Q>K6tMl)%Q+rpI%8#a zX)+{$7$P%Ajgq}Gb7&sMI|oH6gM3PF6hKK|qys~M1OkM^d`6BP!v(zQ@^I)D=p`OV zqac%E-flLT8AzJXf$vRqKn#&BtAI5HC<;hv3LgUibZu7_bak~F)ni*&gy=&IL0xoF zjA%ceG;P=ReP?Z{LTH%CsQA8fqoxVLGuYsyD2lqauJ8N4A2%cC0|K$NyKYyO7C|+N z1_dQ!ilQiy8Hjd$8$@<(HyV$e^WOWWs!D4;M2V3A4H;6%XAFfH4Kri%1_40;L&%h} zaa8d>w43ee^A~m5)V6x`@h8{U>-piloXo&hlf^9;@Vh_$$*0dAzPP+x@7f^RhTs%b zNyI~3GcqGog@F*U?W?FwX>Da8iZQk-%F1$Wi|KfB=jiy}{Z~%z++qXU-MVYrAf649 zVL~pe${G{JGqEZVF(Ox`NjG#+7OEOTRE=d}2tYzeTiqKhf{WyWI)~`6tW~|S3>pzh z2!Jqv#t;l4PR3k5SQO>z#bfdvg9hg%K#bs<=tEQ$YncF;SQVoS+0V`=p*V5D$(1No zmw;%DB1S|(AT~@)Oq2*0RA44A!YWb4K$3M65)m^bZ734Mo{EHl`Q2+XKs4_{!J@JA z+iv;1n9Rz$rhpIb%s+hm<9F`f34yP-mp^>E{L8=a^UvS=&b>R$U;E|H|KYbk2weT^ zuYEe1O@8hxU;g?Zee>{m`tQE~(fmtqnsxW|$&34=dj|)Ti?e6TCm)aReXg9$$LrB< zbv`}19s5l)UR>{XhvmGP9$u~1x2p-K@i<~3QIt_L_dagc-E=Z0rp*6Z_&ppsZ|O&D^roK-0bqrh#~Gjoh#m>kNbUP6g+Q1oD4b%K~4^*vL( zUoMW>Zb*l4zlU~G#G^qpi83b1Jeq?`=edms^%phMUB4B0`d#WlK;Z8B|Is*9?;Z znNcMiM$rH{YbpoWMIpoa!{P-TFe}x(qa70ueKS*`8R|#KAWT|~hQoV8J{U+-Xad7* zMsH(S{jSh<)(g1~uvSrQbM41hf6^%^X z_Z_I1!nB($AchbPOYq_N_*RU;F!atxu+Lvy>^h7hMOk@VL@IDll6`-!G5FHV^E_R(~wuTus zy0(`P4YPAG1~1|fA^?I!(WnpsgwR?x1bJvgB!q~761)oNcC)%VzaEvNx^6b>jgmb$ zzDs3&z3VQP>mNP({?ixdkDfo@2B(HyOln~U#JaE~WCRxDaluqt8WrWOgToN~d@)5; z^h$l2qjLjW$cz6iA6>K+g+lmW_ z6odAz7j@CQ=%NS)AAJ;w!iBA>B7~?CQAh!aSfU_elxUcfe<}=Alf(#-1cNOMAu%%& zl|>205E~&TV-*x45u(Mlg8{jJJHg7$+`{H8x-S?j# zO{?cC`O)L&Z`?ij%Flh_!|#3P{^8xf{1-m^FTVO+Gb;b*fByZy{nx+z%4hChy*Ryf zw0QVzbzF@eK3g`2)5`A_O&OP$c&n9qGCMl{^oLK!vzg#V$$Z2jcR@i7TaD;j|JsAM8Q|>f#apkw_U!CD_;xm_ z+pa4X=3p@suR*Z~{K+Q|AKbfpbg+2kl?R_ZbRU2E=oh|t=k*8oUTin6Z6gX2B_)VP zAp%uG!`rq8Qe?`mHJC%nQ2*U{7{B~W_x+X1P&Q^0v;9A&Sd?w%hpIo9{5Kd%wjw6V z)@Z3%&q#Ok#I&9_$4AKy1r(zY@jzx~ASf{ji4;r-RH_CvnIh%DsJThZg>}~1 z@4LyIv@l0t$W|wqGYsHhx|Hmaa`}<4NmR`yz&LOp07}F#2$NKVkW>UAt(i~feaJZg zq)L0U=A1p#n*i=JKsx!ueoM_lDA?1vnl=X$N^%rSe}iewLc)!5)ufP>{;6)*JMVeMT3YE1Rp9}_I($l zIM)+{u`JOyrIp~?-8R{st(NOiRrhT-8CBa|t3kTHH?~6b`?gFq-`lLfs*EX=qQ)S> zi;9TpUEjI3cm1vT0YH+=%ZrQa7whLwE|Sk1w7Ffv;TaFS( zRn3+)LuKUr4kACKO`b)q33t#)=uM3X;=5K!GpZ~$Po_X`X{L63s z2fz8wc)Z?RUER6!$|p~sRn@fHy26^GAnnGRqX1uyS|KiVo?)yJ@kHB@FwU3{C{K?^XGH<3QNB2JZ=)?2N^OL)Gj-Nl>`97^- zHN+Bp!BiGyh;qGM5wWIBoVd+F0hEU-21q~z(h5bD6uVT0iQil}N($wu`w{fvq%&dK zGB%8>Dnatu%Go4A|C9tSdm2FZ6hanU(hyYh>Jo*daR3EFJkXrOJQHLvoD+UD1kwBr z07VrgflH~5&mb5M{m1_MF};MH6PkQ5l&KEFeWR3B$AhX#HC6kN?G*=@W(eZ|`&5X4 zkd&=TH%&kSA_m$3ASam&jKQ$q4S0Z6vKPQDqK$N=2KXOKtm z_UYgThXVj0QAMJz?~zFXq6iy~lA3akqC^H&QCT&TA&F@&LZZf9yR(JuTW`r;uC7K^ z#Y7TA@M3MF>QvdV88dVV>mn{cSL49;(WzWvfn6a5no2J>c&X~#vbzV$Sv8fQZ z>&<#Ro~HhTj0t_8?!WP<0Ri6!QfB1f!lbIj`4B>|#7NG?acuxVKvYwbhSYEwLdzy8 z{jISC2r>HJfwhKBWy|q+79$4X$ET;?e)q@EuddcXohbr9@1qz4$PvX58MY;JNwk=Z zrnRjNSrtUAtvy`KCX=SFirU(y8Vj%>8k#!)7!(l5FsC+v*(gF(kPsQr8U_(-4Iwg8bfHskf*O)hVWD?m9F$5!Kh4Og z#mp@!h?hu!q@=)rA$l({fNI}$q@rHM0ih`jC389ndHxqcnNUST2+SPC12SmZe4Xp4 z)IkGc6lKdqX+tZChysBK<=uKuHk*(M9$$P*4 z#n%FDUz|UC_3rJDpFEq6%6qQ9RZ`K!?dj=gvWN{I+&Niao{|L6F77TMN>nkUhBRJp z&JRaa7OcX?j5gPodDE;8&6++pka47pY}d1 zm(CQ$cC(b=#v{{{A%Lf+&);0%KR7s=jc4=8{Nbk$XS2ntcVGGR`6Y>30wPUz_AYqm zq9GenyhFlVybS>|r_YS5zyPuXXq18cy-BnR08uk)oo8!dAOrRdSq`r^c+5?@-jnu8 zGIUe+;~vOMp)0?T1|PRv_7As@;{NIn_+nP;V8*5rU#gk6%r{6zP4-y{v;D&!Ac6sd z2ZO^($rPmVrPGiZk@OCUj~WguDP)L9L2h)f>HFb8R1E+e3|7~B?u=R5wTehgti z;qy54%|n30B=F5;(w9CYrwZH;4CnKhAExXBP84I>7X}JimWAsbL>U@0)MPD`(Kku< zrJ$N@K_$jWh)g~NMz)sQZQB&1?RIzfc#5nsh^TUDSDQ;9lUlCz^=i9WH+2z0Fos;; zSyM3D;Ns%wps5-_?z+wZy6wjGZCMmu-!m3{+z~J^aS^ zf4J+TEovYU(YEbC0U1K@0x}-evr)Mik4uDcU0KAYw6_lqPi`IGIyjt+Y8Sfob|n%G zT5F2V^{e%Iw`~)P(fhvZy7hV^g4ULGQ#H+aJf4(Q$%Z5ZW-LrWgw|LDAVY|x5eX?R z9m*vY5nGcM{fJ12(Gq$WnHeIA7vMl4UkAicT`xDNVl+-$Gn*flR6FP301(KhIF=Z))N#;QcF+?`1 z5)={Dy!3?LJPr!~Kfe@`y+Jl;VFdK4>R-;MVUKC|{=hoqad%N#G{>b@IO{RbJ z?oYba{Ijp!yZiRNAHM(Ob3gktNAvaa`O2yN#t$F;`WIi{J^er!gEcRfyXm-?Ze6ti z3SC@Y7_!jR({bH*YirrB1-Yz8vk0c1Ot#xA=(YgN)DTr%RGWUw3e3h9m6EP6&vxy0 zb}&aa+g&e6RWn1RdNNz|cb-4~Nm0=HV2hTqJ%Hfel zJX{=o_ucQjI6FIj@aDbS&mUhr2^7DUqqH>me>(u17Bhm)9x1~7`? zp881_dVlTHlM@ApzhNe0c-yvYOjd^y;sBpP#tKzMqDBHE64U*|#ENRz#1IQpwY%OL zOUx>24g08VyMFfUg(+riSVThrywCZ=XxX)qa?*C6I4~XUM`8aK^sHFa4_>rlM(J%IwEfL>i+pIuyT+g(*QXw0tNu2$=|cZgI~ z<^14aad0r7&qm`K5d>mcl-6)zEi*F_NF+i6QrIhT6p?IU3uBl_Lr_ssjiMN(u$G8H zP4tnBXbgyHyIn+pee10Nh7q8LxW#TMt1H>Fve)AAbCmU-;SCqZi-$ z&Z7-g|Mt({|J9$r_mgK&7ESf!Y<2hO;Mvpb(b3U(+O)e(tIsh^M&t78dRZXW^W=Rb^wwW~xq>onJ23)u<-Ru3g)r1jN4Un$fr! zjl1@efyM=OKBCaU;$R11=e#i#V|e)V`Rn(ur{%=Q=wo>J^x^$CKl8>buXO$Ts$X?p zTnsMA5ROt%&%nLzV@A%U0s{twm{-ThfbzdI`a496)ZQWPn}q~w;{cc|4EmwrYVA`2 z!R8E1uJh4#Yx&wX-XJ!h+zXm*9}r62g>m8iC}4H~iRM)TF~A`P2#^ z4!KML&yIeciqWVc8x-qkeDUjl;mvQq`|xTExSoxECEs^-ml zc`=?XV)PQ7_%8GSv6mpbr7vvd+H2ppwke``TaHGfsrt^^qHFtlGOi{IQ<@jg9-W__ z&KA?MP!2sYd+|kG-nxDJqmR!P^9iiG-ucCJrlPHe)wU%|n>K8^{$jN}cyJ3XnUN6_ zSF7{+sGd~i#oD_Ny+X3%QH8V(I3d1!(I5_@{u?=s<`JTd2@Y~g7#P|i?G8J~P*sp9 zneu0A`P@s0B7tVj`AgKz{?fgKrrbnE8pJHa#U0?FJQT3kKMory5%>4CzFeKdz7o#6 zD)9!w!I#j(efgeqPZs+qfB?}zF29=mde}Eanu~5o_00gF5!__fbZ|(SYS{N02$&2U zaj0c-;u;{JJu&?YhOis4g2~1Bhhd6kyt-Macz7ftXbSArdhlbek@^s1iX06i6W|ZCBmZ*=9aH ztSTG4w(X8=DJl|tQj8NZB6R((t_lSx$}+ipu(5y`BqEVW003ibng&G6fFKePur3== z>0B?MF@~}R)%jqUlZkW+iy+Ym#TcD)bzu=n6%iPe@(nt`p@;&~%I2A#edHfM#x96Q zKm%AqgtQkgI=~o6(sGiR7531Cf(CFyIn;EA~9tLtGszn0YF5>MKEHFiNTBO z*|2(F6y+dnfSV@yr8x3ZKG19)rkaEfqN+%2ODLPVnJj@u&2+t7Pp2ajyDz=-#-IJ@ zyxpxA^Qm}qdbPTBcyN2(th#=))zzt+lozkwowc7{KR)%xhd8dr((AhIcP&4E^6c>S zH>>+65c?3l4`DPJJLkLgrXDS%+oD#s7#H({Roh7n68kPHm1Q%Tg|3j`s%F%=fQAb* zn#~ux-F1w?b!}@`rG~1kw(HBubaHfj_wxL*uIqVKcWwXR{)z9xvfiq-XWQ|5v%b8z zcE{~>HZGg08Z}qfX9d(vVGN-rEsG{;Y$gYBA82kUfB^v-_R0;okf?$To5N6FKGlhw zR3u1F#;R!*S32#$_!2;e!~YHZ%uk^%$-&{L8HQZM?~C<)--o$Q9*Xq*u6Y+`D*IEc z)tBE2^GDpQM!^9U&)U%8z(M!p(4dg~yVKk05Q>NBpMI2vp{!Kr=L6ZJBttDfe0xk& z()->BXy_a=g^=oOdMS=)0aD(Aqc;};kSK2{NQ2#)7Hp)kcmP02ZXcQ)v9oR|_jh?> zIyZl!nyj_b_cq3eNJ_RZr|p|IE!AQwY&bb#0}3;G$-a9e*tTtD%Me3d7KET8&c$up z0hsxGE*iVOQy^6?Y|-`}6k`-?4H;ykRcX3*Tb70yoew?+QH@cG!ulA4h$6PmMbVO3 zRG2x0Se9kyocBQhM585)-gUd3G3>kE6lS&E6t!*JEh-c2H3pbx%(=C2-h)c- zWx3p)UR}QO;FY&Wv)O1on~bL8WEXqrz5YX(B+n`J;Ev?WFTDhR{28K|*j{ zAH0{|JLjXID3#?{y!Ni^1K47GG@dNxi}`#pTg;nrO~$Z6V&K$%MO1~ta%n6Bnq(rV z5+NisPgFo9X(;n1iI_3T5F;Zdq5}Xl1obKbNdV9&Fp?!I6cDps7(fAhA2*kJ^`bj_ z?68?hkUm5ontJ}Id%kM>&90w}$G-2$DfoUgZpQQb zJ_MG2yW193*@yV?VbL!R=v#`|3aXfi%i&DrzlM|?MQ(bV+%gR|*g9}K ziRqEFpZw+&rA&y3YK#p&Bt!@SqC{&<&fcl{2!Yr{i3SKjMG}@Yu<{CB*CmNju=c1{5cTg8B#dde9_V+>3LBChY)kPn@LMD@=5;NxgqG8Cwe z4flPU90*b~N=TNiM2!LnQkFJ!9RcjN?PN4w_1+L_4AxrT2Vp==tSMviwxH~{2Wi$% z0RW;X63yrH#nBNNGo4H)RXrJxZDFgbu!RXBpb`Qp#29?H>q6(md4l9L?PKrT&CWR& zd}Lxn1ydOjw%cyg?!Z*G7`<|McRZcXCzHiuUY3O^En8Lu0k0sUfen>~0RYQP3O4V# z7RYPhl9vzX2VMo4m<<`k^gJ*zvqB(3i9uCj*Fy!ImdzuG3Jq;xdPG4PxJ0fZ)#dnB zSsUB2Yx}Ni$#s?}mHcEaGl>LMf`E{m=)|L@-8+nVK1u8a76VFeT#{nJpx1_Z;JI0@zR{?lp6ocI8D;CZlAqtV59TX!OCyA0HmyI(v3K zTQoLyzx>6|eDtrsw^^<4-M+hPmmfd5IQZU={%`-+|Kh*?fBgO5|AVjHy?g8WdUbew z3oUIgx089jmcE{#9o3BP{OQx%ubyBm%h3WE(M|xC5`EvcS{C9}ocEiqnjT%B?M#e@ zS$xaoI3nA+?plm%s;;Xtba7HoKL7bIf9+5I-P7~) z6H5YcG?|B0*etK0Urweoqh_<-oy?D>vs+is9+%Tm5I?F4!-Oh>Aaj5$(lVn>Q$2sg6?dUNXxJ=maC(VvR_H@RkCM{0`9I9yO1-ZX&8` z|34M-1c)k<+FKoX1J$8^(c$oN$Q$lr9g+wFLgsphPtZNulz&M+>qFxgL4mzvxT+e%Ox(IQUB5KhEUHY1kj%M-nH0n@t12<8MkZ1T%nUJi z^au0B^7@($8^dJS^{`p>A;hL;=NtkPuu2HN3*G^Q!W2w4fL>i)k49tfIsjlc5zrb# z0uo}BXst0^fcDmqN+bfq z@V$!H7|=_JXl&6#+;;A|UoW?A=halB(fDvYnT|%2W;|}jlTkCWMNw7-FaRnMMg+we zBeKa(XH19!Y#Bm~5eP|%C}@ldOiF~tP+^!YCMVbA^Cuc55DJ7CTo*~04D_vb1^E&9 z9*U?EkxZiWl8F{@VW-D*J4Acn9kQpH;_~UE&FK>hkr1MQcqfEt3@CsyC?Nxhig>0{ zMF2U9L;)s1V9_Xokkp+6HY%b?^OKh17zHjoRdp&9MK#T|CSHJjk zKYZ`eyYHRxq~u77C{c=nT^C@)n_bHdu2+{Ak3O2~Zu0h`u#>2^ncdPTtL17uog7aV zWMFmreE#ZNg=tos1aM~KpsDT-0s$k@j+FMV~qDtURiFhqjb-nKYIQC ztE0LxyVipSl^kgW5HU)WG>((d0(lAL#~ej+h=9!RN|YgAWMSnVH%UYz5dom6lA?fe zc52HJFKJjvhq@mj#fMZ}=JNPP`lCom9x=p5yrFdgAr}EJ)vO2u9s>x&4I%@YEkiEa zqukk`Je3?GDJSgBa?+1Q1tcXTBZKA=vfTn|c9ly_(Lg2d6WB}OI$$o$VGjiD=N$Gy z9}|)Ns&2N#Ux-EIe@&b39sF@_L*>!Rl7D8}H$vS?Hwjge6esUcv5!df;4k;<|%mJ4f| zs;tYps_McRX7VAr=v1S04n!zM6E%1b&VeZUpsM1vZ~M(=d%fIjop(TJ>T*0ko-8J_ zgHb&h)lFFxrm$$JC@ot<1tp&)W0(MgBBmV?qC{-c_Tl_sph#2~#YGU72y878Be8&h zphg)UiAW-hN)k~VX~4kH2fvb{DzF|=GX)UPfM_yestQm-amWWX&tsva0i}0GClBB! z-@E?FkE_tLY6#9FQdwjVT`mGdA{J#$WHVb;K%%_Q0{~RgovI>H1ep`_s8K*wJO(0Y ziEI(s69CjCh=_*LcmWT}zMq0Hh8l2i2uQ>;2#X>qnyOxUUliu-?8Td(edp`n_}b@w z_QB=y^ml&yEC0{`=}-K&TcZ5RFaE+;fA6cm_ix^PSRSleGRKnk4D6$F%e=nTFlq|3L#=}+V9Hg zTtTbRWV2c~bz^NQk{1s4eYcp+Rivt#cGE?_n~bO1tBac93!i)I>)-k=SUy{=8-vkz zz{apCE#0|w$coc?Y!E?XQCf|$scKS4md`Qm!A@}thl+eaND?$6LrI)qamh%*zM_Xc z1f)9n!D)_kF$w?@K@>qE&FgxHavc4Z!0ErP5 zM|F)z+qPe>y7jufckeDU6SIpxLPUhJLwE!S7gy3Me#j@nGz!-sIvM-REC*Y>$f|oFW^hXh8Vg(g-NCa%ymJx`N z*XwmzRsy_PZMxO<+i&0d!s`d;%iu59xZ9l^EIxX&e6eZ?@JpY6{lkCx%}xq8F6Oi2 z=jTsoj<4Ohy}UR(T#OHMfC%=6S4SGYkTavBiCv}ibs4@h+Y;UMPm-IscbOe6qcUW=0cBEdQPi}`Ydlfmo& z20k^V5FFfx_Q25q1nubxRUH&cl5&9T-~EQf%`eJ_kRTJ01mUN7W)J|QS@}nf*!MKB$Lk^S8o7(nJ+PxmeWh8JKWyyH zr%H!`1W0tFy__Z_IbR?oh!XetNr!3YWLBEeZ(3HPF-8#~W}*ZIMjte) zA*QnOy-UDY2w~T`pu)%`LLdMF zOb~~*GO7|u5Jdrvgar^v2&zJbfz-7CXbgaaCdDSyn>+pgFAb#seIqqVPrd*u5UXkk z9ub$T)xoWk%k{E%;I7Yq@tu=z{ooVF<8S`yH-GilfAwtnjVCX*6~Y6JU;LSqcR#%P z`0y5QovQ ze2NC%`K4PD5o02?XlOSO0FjU(X%F|)x!Jc`_vU;lDi7wRH|z}b^)AOs{D!x9AbjOd zD{EpG4cWq^?Y_RM%Ba#b&9-xA7Z(v6k7ADN@av?4!L#0>q{+fTp&{D_JZ7 zB7tGH1QwAM*hMLr0U?SKYH20J2%yXaiZKEza_0GkiPC9ce1w1?08t5CkAyZjE+v5PrR}urkQLS! zpdroQQdvSV0&bve)Ktb090j~rqS&<(qn1ELNM_%IbT4=6FU6KrjV3Y= zqX?knwNao-NK6vFAwEApGnFk5PCkD4^vH+1x2ngB@=5DI^Z7S_?+?HFtH1G^|KR`g z``g2Cd43JM_RV{*_)e&HDz29Cwe{y?%Z&Zz95U|#cCcN7a z7*Gx&S`N_epsKjEfKsr=epSp)+P*JFg#-+tuj>N5?RUGX8TocaB8T&-Yj=!hv))ab zfJ#-E;;3;JfkKC$*_tjWLCwDUip_D-w#Ux@w8od2g2j_jbVY`P` zNOK#2L(c*)9rq1>hA-v80i;VAS@$ZgmlAi*5a~e~dZm5;ag+CP*h_=?cN(U)2W)M> zp#TYq7^B1xoJuIjig*!82w7Fupd2Mck)BFJL;e7m5JAt~i;f}=n+1aZB% zHukOWT8R-rh?z}J(2PPN5IlsQ#Y6DXbqoQdR}G>esvuJ&0%jkz04t%T*h1W3uoR_V zp2zDKD1Bj#Rs|FzD8^EaaWsjh=K4T+1XzL+7R}u+{L-WEzAMXf7Ew_bqXc3x5Rq&V zk%&A~$;^T=A^{N)aPmu6Pz(q*20>&33z3-!yeA?c&&FVk>Kqb-M9`Q9))Ei(Q+GBD zFll}UX`oD!?SuhOkq8Vile!Rh-HYYLSAOG{fAAf;UT+#}?%g{4^FM$8H-GCl|M+Vk z_%6(+cD>V&A6^o4zxG$YG@p?xF}=_ zDkiqrw%fiYTPa$B?Zvav$=%6(cK!U>Y&w>(15#vT?5Hezbz9Y_3T@j$sn){jix-Pq zcga|xs_nw%aPi2Z39cc zY1^hL4Rcg5q=Oigh!lpxJ85|k0SPcFA`(%eY!h%a=tos&@`ocI22JC?(Mc^!z(&R2-GAJpUar~hgSJf=gOt)U#OLR0@-zEEf5go#4USF>W zoWN%SO>S_qKZJy^!_W<6GdLK&sqWjSfk+rmS!$EAfD{Q;X`i9?X9|%NBbUg@X+ zJUBPO&~m5_s2(IeMa~cin!JJ-36QKYmf1y*h}Kwx#?5)b(9m)xCFRWkCg1+rF>H<35NevM7qgsG<->GN_h1GyrBq zg2ZVXGK6XQI1VyV0jX|TTva!lWfuk26tPReHqqv0~g&@1l&M%i$+yDf&M%kEhtYwY$m_}2r53pQd za|i6S_c5@U-oL-OdV!)6BzPYYoKq^83&+MHTOb9sN}@nOj0A`VNs>>Ys3KU=fGWKP zV+xAIL@`EdEgFL=gen?IqsFKjfs76h0NzwasisZ4^9T{Lc_C-~x{=IwefAIdjA`%|)W!2hpWb=(OXk znOOB`aq!W@kB%1etE)|Euqi4p_~L45N29KH$MyJNdU)Nf5&PwC(~N6IrG!tzfT|Em z90JQwO=bor!O?pNgR<5ncTj1^c*4C0rBDh?n#Dhq|B^5T-n~nLW{Gc}ae4`e-UIsk zYdD0NeRZFp`Apd1An(znluz$35{DW%^&BDs$%{Qr@(NQsOPAypwda@AOc`t2<@K(&96<4V5)+uD1$fNaK`g{A|@Y!0eH%s{BQ%@z+wp5XNL$;#hAR? zo)9^0Qb4tqZDA-v452dBa(l(Z0Ob09w{z>&W;UOe7JMJ-V(i1Ys1{{mjcvC(=Nu8Q zwyldXfY{P#3?g`OeklRGbCx+cUsv_&YMBCYRM>TGh@mV>Q zLKxQ#YoY|ADuyVSH-@I}hcs9*=S)ClS&I!hp`hAB!6F`zLL6v%2UatALl zwmU^LnM^gz^`eFC20|~ckG6uMgxO5SQyQ(z+{aF(?4)EeI$1O;h#`322OoR{6;u;B zII=}0Y{f)lD>4GeY}Kft5QzvdAT#CpF(4%a1l}VU2my#O2*n8AtM8Fny+;!PWavWE zT{?8+`LV(DY^EwQ?fg+i1WU#y6-vQ&v+ORWlJ-_=K^!lG}XoPPd)14Ix0z3GO`1zU^Wfx;HM&}^4e(aO*=D7n68!Ch!?RB> zg2ao}@|9_041kMi<|lE52tY`nN`Qh{@2i81B^9e_(0>T=xyaTueuW@1WRHC)$D2|< z{}J=_fabZHVNe$bl!ox-;F%4a_u`+us6AJxc@FJ{b-;ZP)Fju*h`*|c#+X$s=^av~ zn+ajeg|(!FFx*91JChbCr->w0P%w!@P6;h_0HUB7*2;>d6wA|hCf#oo229&&QbR9* z2%IeWM2w*!prc||Lha*zJ~jJ#JVoK*WbJ#60XZ6&7s70Lk;b?IkbxwjU@6 z0pLjc_YfpbA0r|~U3t$4Kul@V2!IkQ|9{E)ucpbCEISa}!rhMu@yV1|m1=y;c!SSmtcR=z>f03?{(`(`<&0og+5UHuQir<$RjwWa-7r z>zqc_^zr#_%94w|wV8+zQntdJOvE;g$8F!gzrS-`m?gb@ag*lA$c1|ox;8Z7G^Lnl zcHV_>Je@ArZQnM&aUbs<1t4eX`aaFEYdb2-ePryNm`*VQu}DToQ8FTff+@Jb7Kc(sV=NW1vJT$CC{@u?=`9OjRr6!HXvu1K~aFKN&%Y(HUMK{$jQ`* zokcSu-?TE%Npm0r$L2GNkYqps6+kq@;_h7*Bg;utI|m4~MLj61dGaELOw%;1I)C?m zr|Q1{_Whf$U;N20zWJ~Jn}6|-{?VV4#_Mh^yXP-2{0A*iU^p^gH2pvSm%sWi{}zrKHX z_YeN)C%^u?zx5pUhm!(69#5$7ynnvE+43K37THbn7tCc@x6~D)0#&dQkWc}z-eLxb zNG4{?gl1@nP?m=b85L2nY!sUTAeZrJ<-LI!EhtKzELcU60hFj%oYYwxJ1!jpRtw1m zr1&hIFS0y;Z^ol4$ZTq7t1>8XP_=}F!bZ&^rsbX>15nX|eU+mFFqKcR_@{ZoqUIo? zh-I^ZiqxS&7l- zk6z%Nf)_3K_R=DpdzU5sD%AbZIvcO^4q7ym%s`eTjEg2;Ny|`0S+M5sqGM{t%atT1 zCS)p=Fak(*1&}NvTrZ^>05gr#sA|kECYi?g@bLKZ_IA5j<@w~0r+J*S0J95$07NEJ z2u%|O$gE}WfK4nkK@mjKX0zTOc5UN7+}#rcJ9f^W#=W8yV)!(iwlA+J3#dEnkCFw5 z!7Ro(Fg1?VlIBbVDaD*Kd4#1g!eRkY6|DgQ921us0F4Mt1zcgDGLa|r00c@>wp)?C z_r$!&&N31ZlbV*1fEkbj2WTdQv}9p5g*qN`U|^brt1W>c7!X()TZx&7sNp1BF!H0%Ny0D&bH$(Co6Xr^jf5Ld}bb23eY;2fJN)j=KkWWuD*Q{`Om=P<9a|M2VQ$Kmp( zQ%!LmK?KGbz{ME7r}fRtc|4^QL*Jx0uP-hi#xc&*#p^eZAMb%h(H!iF}XABrK zBH+d4$xi3{~~Wd0H#YUE~szef(l~M*eUO`QW8^f zX9NIXLMCKHCg5uCV|130F-4@s0j7Aim$;>+6<@0V+5^COAo@Ao1;MP=$qEZS25R&# zM8}-99G)47s;a3%VY)-zg8p3Z2oURUz}kTrR#qhz;75xX9Kfe(YMm?>ZsmfcXx(my z1w)4NN`d|fQ9!M13saddSWz)E14ON|Cs;@z%RfuUQ%?9N%nTQ4)_UV(i5FnS0>%)j zm^f4vt>X5I1S(c+(U!5#f*oNM^hYY=7KLIpUg(~TR9JyjvH%jHY6i4I1T_%nn{pRP z7VpAo9=!JmHVyOXG^*McU%pm}&bgR|#(SA(2Z2npC=pQ+cTLGe`=;xgUR5}_9B1#n zs0`zDyXte!L>>TRO#7$3=a95)quRP_2qdOBrx+X?F&Yfx>Gooyk{tQtGzIVGDLQr{ zModzWF%@Jm5OyRYj=f`Fl{5*75ebNB_dYmsTnai% zN#@xk6M!M3Z!Bj~GF8fiY7Pw4NP&qZC3c+V)OMX~H+=CLwlAnz9e?|q|NZ~=fA`_< zew|Knj9>sQ^GfjfDdOqz65ZA}4Y(9hXJJRy8XWKUzyECms7-60GB6}1OnII)M~TXs z#YV}w$svS5O)Esqlww3i5(n&=0~&&>_Z4LzYtR5e8G+Cw&B6%t%)Z5;BO>e7b*Dz4Z-7(Uhd)#yKR%vfr(@>%-&6etV_hH`i~E z_utx_3=Ek9<8*r7zIy8$pN>y%`?5%Kp>LgQPEY&byWwfS+H8ID&->%)xW9dII}OkC z;i6w%-S6&$$Ib0)jj-CjIi5bYU6Vm@%G>oS6OH3|9LIGRw(HGqn$avybJH|%n9qPQ z05H?CXj<;b!Z|=hV8Wu?hznusGj4bR*2;VpiW+Cx6{xO?RGx_vO_vB$aUT5a6#)1I zJd_Bz{NDuvS?cmy>Ek&Z+TxE=F`t6-6pbaV=>HPurOF?OC;%JSB7tLP)VxgK5TH!> zup&l=*@%%D4Z*3@JE~l*GG8tUps=SZA0M>R>?_R)OD#-@B`(`JWszaExT$;{t0c=J zhg48wU2bb#kfPf4EHS+%%W{qkKIMl3>`)QzJhO=^&<0c)LO@uZDxpq*pduDb2*5<% zaS?U=be*c5WO)S{(bQDbJLf$Y{bvPGRR!zX285=yA?T5q<5)P(SqWOKPWJnWtGX^ud3!JQVvP9icls0l|6Cj+jdy24pHSV;W5~#yDr4GvDviaCoL1w}CD_Av0;tDFG>g znSgWX*}2eZvm);K`yuYbL!JT^3OouDC^CD z0vZ8<0TEr-uKRwK1P{Btq=-Nd@4s7bPpcO1K78jw)AiRcU%&b8+Z`C)-F@HP-duZfm5 z$-=BKgPH)$@bg92a^OWDqU3s&y<|9CoE}*+fcZ zUfDlTOfbQUVP^S!0u^H*Rbwg!8K9sAcOWQ^98hN`1XU5JwrF_HjZj&eTF|!CXHkK?+_&u%=af^HOw2J!+jUA50*+6^pZ?_MBqLJ)^nBVKN5?*=H14O1 zO;4ulb!Q3$9Gugvo@q2SL&?IWOJy!H)Rx6_m04yi=7jZ3nE_#SD?~(cOw0vh)@7}U zhzyZ|h}o!UtQPSDMyh_+IhPGPEjJTkxzE7V6hu_a#86F>q)~IqanclXiaE}jX3$i$ zK8XMzn+gNj9BH0hj8n{GreD2#_~-w_zx(aCe~9zwP+zRN|LmLBfARCTSAm12lyk$R zMX6nrM`JXckMIBa|Lgbv^8fZ1KMNS$DZ<)`<=l6H zRa)}ze*fX-?N9&uH^2J$=8KHsG(Y2yGJIl(wg?S(o9V8<>H_-Tl3+k4lb%mg-%Q}^Npr(>MwX1ms%YD!Akg%zqD zhkesDHY5b_+)VTFVzW7|*V8y5f_Hj4JY-Aj?P}P6=tH~e!=~#QtnsV6cOTxqeSMhs z@ig!E`*^)=8}CtDhdM_`TFDg=H6+$a9mziCmmy{Jd(Ymo!-A*cN zO$)84h&nC%b6EZsmlV!O0)_x8N`yIQ1yz-tBY@@{HO-PHnTI^jDW*bR1QQ5^0452t zGALw)Ddur}ej4r%`Ty}(zx;3i?cY4lh6KTZDLm|_kN3Yj9uNQ1|LkWkS3#%IKvImF zZJwHW8aT~xJnfH%zxZeWN&r-47$-%OEJQRMC&q?- zxVXIBo$R+i-2Ks4*X{8D7Wvm|F*x}-tA+z?yoPx`|rNH+O|B+K2Xb=dXHe$3-vV49AONtlE_SJY zsM3`{sVE{E0jL7=@`R`{mpt?d1}w6P%ymo6004xUpkBhl)+-ZcB?L#vNM`1V8A;4c zjh#<(YC?0AsqNY^rqeV(@Am)nFaK$hF*LMl`{(Ba5(#K%n^ImBbWpR7!|2&vY&Or& zkHa{y^KIA9b5elYtCxqvA?B2GLf|Md7ZXabIZij1TTgJg+8)Poci6Q}Z%E|$>hg*J zyRKt^)9KW8-QhHluIo1}|!Nr{_28CT5iB_|>2MeBM2eaUf5- z$$V&6E61QPp3L|8_?$&WGc(5+{q`b`k0z7mHV#v0y76?-Vd~Zw;2MMkszh7{HrD#G#91ozz)~c_pfXaSn+9#H{h22?3Fa za>}NFz%j;l)9)UR``yXA#y6Db>|LXl5nvjpX_}hGr<4k9mIy`jG!N^|cHOVOySod% z0m80rkB1!*O?f`e69P!i+wJ;zJTf^eR(|XN0RR9=L_t(FPAP%G#b%x6Lu%k@_cYF@ z>-FW~w7cA_rg(B3t}iwKhQN+PPN${~B^M_VWG>?zL}YZ1T>|jIGm~S-L3^`55arZd>@^8QWumANgf3r6w2nJ5n8>~HEHhi`2 zuGcp&F50fe$K6D0zxGrt+#Gq0DL&q}n`>pqVQo19%sJ*+BKS6IN-57V#|&8!wl~ey zn^kC=er?VrBy>LJ`YlsgHa8b9zW?z4kAC*` zumA4vo}Qm>USGN3U>K(P)UP&-#i9hQ5U5mZQBn~hETDL01ZfTY)jbjo00pXqhw(WI zgVp}vjGkNJiqv9%X$b0qI^ns}qM92@0d7k)tH21JXTM9~TN6$3H7oyHOv;M8s1>F^ zpqXYdRr@STYh?s2iVRkbORP?n5lNA_PURM93O=uSxuxT%ti00x6nP9a%Oaw>fLDd~ zRP+U`&P3{lKxtm;oIpfuq5T+v0+?7K;4otaaiGjD3YE>uGHN{&l`&HJ56g(S%v-A5 zb&=cvgW?{8YL)1vs0iMZ8YqHe0t6&uA~2xhB*j$+!H19}AzHbX-f@n}5jiFkaRlC> zS>|%+rd`veDFyEUZJzTu%`acwrZjBUP3J=r5wUs7n#>fNCd`tHQoCtNY1(dH9Zvf> z%WBh^MJA1Dyu7%Gs!EU~6R|@cWCp^}H1F>2F4nz6>)TG%=BStJ+wr*Hta<`N!q&M} z+h|npTFDS|#*|boX9Z?6GPQEcddFodRQ7z?l^nq55mvbe9HMh>VLOv45R)jdXH&I8 z>Ls)+3Q(?c8Aes-2oT9v z2d^GTA?N8hynpxM!;c?V&c9t-ObJCBqHW`E)~l<{db8@bo4)J(x?u-toFgRZJqL8B z{RrVH|K@M|!@gNvBAV(v$B1A80bC=NlFk#3nE<=qH=S=GWv%mo^8A$s zLTicvQ$wzfk%o+FCL(}j1cczU#`n^Xl-Y=6F#*e>g=tiq&`R?G1QS35T*fSC;;R-B z3`elwQ6=VB(SNsUP*BI=R16oa;9q)H@hG4WovWQ?{#BX*H3LFYHAK=+U78jz5rP!P=LiuIC#w_$bkUn85n^P=aI=-95Ji}Jdz-BAp41#gHs zG)>GY&S|q+HH?kJ9PR08y!c|Br}s((-X9M0%d3mKk9XVt&8y3ckK@72o)5dXU;Wwl zfB62i8(;t8CP+@Fxo_KN+$loSHNFX|DGvLvy;SU>^W)=DW$@_Ri`TlRwENM!!O`r< zVorVEO_D-K!?Xh(TodAqS%_VOh;8fk$J}+>96OUtj)yb>(GW+U!`)NtdzX^_`2KkP z;^llA=Xl(#Hq&%`^X2Oo>GIp}z8lljHBHQ71%44zQP0kx7NCWUOr;epL8>rdv?4GC zC79dFNbMvcn!cZwY0aiY+5-ZKXlz=P?uz{g0l*qGMgDW!+tsx%k zdV#5EWo+A7mumSM^|(vu#Tv~ki&jC&mPs&xLiL~2#r9eaODj-Vf@WGWG*)o_jN2of zX9BSN4FCW!f!^$Fp*=Ys)5)cma_kR2@Nb~AuhEP5V3QfhzS{)k+}@( z!N77+?m+Uc+;mXT9y0_K$;)s@!HQXeY0hA3Su6|WtSRPskaBRU`0%uIx@kk>JQ||Wky9*@83V|VvZM^zKsczf^WrU@*tGaNWmjD2{J(oj%5b&Y>^;GkQwr1 zc}8jowU{_Sv7;gcKrw;3|5bnv1VjZOLM9@B@iYbJRr1@{Z=N6a!y)JUhm@20Kuqob^!o^R8?aX z-?Xl2rql4ef9}H?K27n9m2chT-@wv#V5};5E zSuCNyfY@iGd-=Cj=0xp0mO-vAQpai_mHMe-HPES)F)c2$0X3_)LCqj%B2mRpYS0!F zG(rU;QYFB`*+3^Is)|Tb1WQU?SpNWnMHZr*n6rTap8Fbvs<=>p1edW@m4m4$ajoOe zc4y`LFO1E4po9yzZIQ~TysqWEV%;9LGli(O30U4!#-3zUGjY8{h6`O4t9TKVzEFrO z#!3WY>b-Z&ph{E*o`v)xL=F&4blvyI<0xV|WoGMFPU1<`GZmDavPe$WaFcV&DVw49 z>>RDv{r$ru5jp4PdE9K)Xu4T%RaH{TF`4M5U!g(Uw!@sC4@b=N+qZuVWKcwhM4F~o zu-$Nc{o>{E@chSLy~=R{(411rx!d^N?r<9CB+?R-XPjdir&NI>J`S5i2@xkWHUU)z>%mTs?>{^|#VNkHx?p2cQlw<2gzQ~l%K{9R5i=2Ja*eZu z@pL32^bMqeLJK-0l^yIQU)M)R7Y21zMpQ&%7Ln61<*bh3;o-x7`M><1|Mh?R9dU@s z$a@a0rL70@KJLEx#qIw7yMA-w+5OG0zJK#_yI!>o+h!Y(O-wl86emPh#c>?E z&DJz^-pxa5x;3$vdB&S7n|r^yo1VWNcMr{rEnyy>?!)U#0Ay-abBd?7>z<~mTX`TK z=h(IFWSP6|dc1$QxV&VC*ffUlczC+Le0l%)@x_Z*!*JSeHU^e*CIAQ8 z0N!l2?AICH_2pF`+pKvUCQ;2QWm@SRn<_FP6dE}y(z0!9xFFboKtRQ1yKG;VDT3NU zH(a8Boln+*Y8mz}e||;*f+{gtO43>jSC$Vhv-~A~+Gi1a;Y2Q*SXO^|LDub@j_Q^! zs)-q>opm~ExGc9v$y|uIjA1LvRFVr+w;L@Y<|bOI_Juc#3wfg;Yn8oJwU!owslj>S zz^eRSoe>o5pq4Gm- zd^ny)H8dz`OkzwFIyI@J16(#>@GO;D^Rj7C=CCrAWn`-Zc2HAdLNv$KRZ&D}VU89D zP8^dZd46TP1petl7OOsIuK(gX6Bk!O@P!at?}hi;qK`H zluQr=(7-hc-j%L`iN&HIridoy8YTzx>F6D?_Y^v1N8}6{s6Ds8b&tKqE~7f4bXxSO z+s?NwJJL8a3L^i%|J8rkzWV034|}9$j+*k6J4^%@+oqrW#nr`n_wbbBw!2+7@bY%^ zGni%OZcE*iazb1l%NURJ6^Xz!D5zS95aZB6!pT2 z?3*EREnb&$@;no+qRR zrAu>Bf-F>m*8&igN3jkd%7oLj$a5sq`;h1s)s2qNM zsYp#M!y7aBei+BD zZ66*Uvg+D5r}5af4H5KhYo_xQQ;w(M7@C$GpW>7yPSe;q{L`O(b2>bRra8{>?)k92 zyipO&@x`lG!{PbmWgk!@LIxC7-*`YA#%SOHlXEVnTvDE62WpPW8k_=|S)sP7sE|r= z6saf33cH^Gv1$~SRR#dfDk|Qy6--pisYY0EPep+8xJX6?G*eXsK@cl@TZjJc{$rfdtJ@1iYX}$(41^dAo%aHeQBhIm ziQAy_3@OSqU^{?oEYH9VP^hAdHFFb~mR4D;mP%pKOkTcxNv_A^qYG{r_f6}9rzW_zZ{C0V0e!POJ-)hlZ3v-lA9s&$-hTOXf2ZI7 z=H>M*1=qFxrjc*&AEHGdbqbnsd&n&Y6ng>w-j-*k-U;87^4AA)wM{ z3rcX7(5>;d#(hSpaKn-hN-(6e`q-x+sU?n9^ZI3@c-e%t<#4Q0Z_ZhuP&11oF)c&b zs$5Wl=YpTulF(%B0k;PKz+AK-t@$FAuk?J^%-miDhyU_N#!%)hJ z#!;E|7O@}4tT}NL%&M#vEpjI3J`pQ}4G|0p)D%V7`I_3%5K)R4MwvGufLc{#v!e1Z zh-NCFQm97?il`uprc~r4CFeLxjyb0!+7&^3AIN*}92z?B%tV}r720OIef1hOX-=w1 zR3t?}FlO?>kh!9+MwOK&h{ctfQHmBLvG)c-&H;cSn{SjFO`!YHdm%rg3xK0*J8pYy zIrxUni)13sB4!Yi6xsopSdpAkaNet#Afht|Y6-y9)SD=qv5GivJ~2C0`*`<5vZQDi zge{==Y^5(#`t~Dt4F*S+SP}CPR5WwQ>K zpDXVL1On6obJ%$;25pf}S&j|>jQK3cU0=gGPgX+ULZ@@yM3Iq@B>+Krp5Yc<)d*D=le!=WWy z_u={OVHl=v)#RjCH=95F_?Qi*dA_||r8s>Oa{|jbV;2mRz(6WtxHLvY#N?>tc3s#2 zg<7WKd;m}rDSGNywN$K_AfXA7)0|bHEK7r_sg&WAN;1islgcb<);xn`%UPuiw+LJS z@5u*t4v82U0Ej)i07#mR4OFwu(KIdr>W7oFrY8}}5&?LZl8eCx(0j=5qfQTz328DSVW1NtwRAxy9iE@nS z9EviN_bkBFHUTluF;fJyi&w9W^y9nV0V+8rWJg}J5@U{$KoK1~NO882{i+AaC>dlX zoxx_b1e$@!0JZMf*U=i3^~=)M0s&KZb+uacOcrAr=9pQVt~nmZzG+{+c*R~n-rt3$ z{p}Aw@O=EA{Mk=e9tiW7Uw--f4`+E=7Bd+9jRTxNpnHS^2bDk>i5Dlx#p{ko>8lFH+J7 zXW#`4fC$uz$TOBns;L&i-7@%uPcan$4T%(#k<7rs!cxN8;aG8chXws7!s4>Ug{D~p zYw4VQmy(0fELn@R#~*mB)E102!IO3uAQh@{?-JI zrH@#A&&!qoE?3ePE^68AMaCji>Zr<0A{a3>!DmrY^WLLX0k~p$sH!oh%d1OfKAmO* zJ`Tfry>2^y_i$vq0YJ%_+2>>?HV#K7k}N5v;GN{$cHRCkk_)~K$ML|<9S+A;w|0(? z!$|=4!?4+G=9s2=ez9&-oC0BMFwQe@^YQ7qUtOByo9m0iX^^Pi`0hL9$6glbrH2Cl0-98t@H;z{HNY zptOco_ZK|^00r{8l8Dhew3#p@911$G zW|EMdQ!l>tU}1fInevP8zWoE3(c7XU0KN%`Xby4~Bt^y)r|Gy442a-z3o=>s;0M4) zjmSAGY-yw-wGL#6mCeZPK}U2xjgu$nyH2y|X?|$>U zUwrkY<@ocjeli|+<6*ve(@9E}vd-ztuV0Si2#&?#+c&qrZe0k0JwxlE?Ib4Og?QYz z%^LG;=(ySV@Swwyx36&hW*lM&K4-ucu?|LI=oTxe0tcMgxiS%x-A8 z^+>}!x505h8|`#Dsw6_p8M6Em83D*=v(rMEtAjV7Wi_fq;fnbzL*-9XXsZPr*8PQ&0|1uw z^>T;dEjBRZa2rQ z>}kDT(Y)IGN37%5TGik3Lq$;RaDoh zs#*pux?Ln$OS~jvWaolMc1EP$H~nh0-EOZh*V`32^3J=~qbF=a5gS5dBoD;BZu>NY z;Jn|BQv?>Avzg6?Mp|4sOik3Ng=8~_tj4M?XtTY(P4o2ayN`|>y+TLCjD(n@aA0Ss zd2&oLPkHpN^%5tZ2jl=Unase>OiQPVw)n7_8YmzEGGqlLXjZ`m4@L+g39|OR^B#wo zg0pwu{~>35@#=+uA^|xZ=dte_LTEg8eYfe{S6{sT@BX`g35>{L7{@V2bSNS)$IBNl zhyoH1!x7r!mN2POw~onRI5xLGr|p}=_rKZrF;1uTK8AuU5UiD--6Ca zC>ns zXq+tIL;)L0Phd-!u8>k$DT6XGv)WoLQgbEjmm<{+^t_~S&LW?{k#h56h31O{^D>bx zxEB=>i*pz934*H6e4V5gyrvAP5nSz_KT8Z?rPi+ImSqlp4$M&VQJse1G8wL={HMHH zaTZ&)Piyy4uzj*p*jw3kEa?r39Q6Xwe0n=A*v*2OA>g8wZl4tQzzob3sSco#sP+w? z-j1{kZV<7!sF^CefTE=YD8iQWG&}SFG>#J!d&W3T``vyVCNsOd-2Cvv4^7iK_Fc=0 zc#xy0CXvz*0Hm&IV~jbeWc4nLhf#7e)lJv6p+PfA$#<=(dUBR6X1Tt3IiH@p25+w} z+55xe?(Xq1L+ZF~+VK6m9~F4BYF9Q~ZQIk+4g}iZCFvY9pi_;W$RovByl;_{ngD7} zsdeazb5W_%5fxNas+CJwFIOQ`RfQx-zTiAXbz5^P6kwBrX-P2;b&OzI_Bj!(dbpEW zRudsD(0`OXsl*(kNKPqDF_Z5?=*&umQ028sVSr}<5&?+})s#dPw19_10OSZto>Cz@ zcepBsRi6&E$d-lR0y$K0OwN1f9FYrc!`{0lG=1kn;fcdiTQh?<7FPCc1-*aUOV zjIHpbQUVE(lXsn)&<}b##CeJd44sqg%sjSTMo?rEBUFTB;!IoI7~@y-6u*7dl(HGvJHst0|?X z>7O5-FaG44={SdG?R|@yA&+W_n=U2oQ4L6-XV;lUM%ARj^>IAn@XV`ALFRCghmX{7 z+pnhip=rW&8sc~geJ3Izgwx}Z*Q?=i>{p#;0nn!P!!$;jrlehHIJ%q#nNv<4eb)T` z=|k78Hr?jSo3~HHY2ui2T1J=Elf2CEr+M_wS(>Q?4ul0hKS#-0G#V}ueCY`g@H~Aj z$>ALTD;$msS*C(BMY6uCmRUtss(8IEHG3h!FO-`SvP-dQs>MlWNduoy_0NXAjHrcb z4+|!9{-w)cpd66_o>`p>qEyTgN&;B&VG;I0q|%lUvSHEDE@}{G_GBZN*&gHC<y5274Gm}9!WxgO^!rj)Y~dM@k12u+|gPu_V2VUMDbnUBXEBZ1~L4_AHL1(KAH z!<0-z;J)ql`{V9#eEa&frt$h_!(>_H^75wZ`h7eM^L(0TU@|euah#{?)oMNr#-yqU zk=Q<0oh z#3bi9N*dF2O7j@wJWP`swq4JmG3OA8nGwkl3sP@@Tt@S@6jH=RV$+PUJ~WC1h=L}d znZcDJ$}?aQ`XDru;KQn0HQuY3i5h?_R>+~|03U*HTh}zrdPS|bHh_2LJ$hCq@E%-K z>uvz!h}eLP$SRb378wj3(yAY)r0h+FSPi4-4242PNX{WMc~6ad`}LRKr|J85KQ!Jm zBXNi9Se>LS#DrRG-V27SDFV&VMv%l1O(uYLv8b-Q+;tsH3mS&NP1ml_5LMC4QiR#P zXnbO$JpYgW@*n@|$Gh~nyIeP4zq+1>)5GI)fBEv=-Q&f!o%WAcSFeBdtKShZqi=(2 zd+(d(`RS=^dr_u~>vZ?nbr0k9I8LW`4?lkWDqLM%y*yrh_w~CZ?|VK`!;hxlXOG(W1QrNbH;~jyudGl8>jO_vD>Ja#`mj5*qKl%My$H zkZ3}Lc^XA+oaR|_*RAILa2`U8WAUyN-;kJWHfu(la%6T!Zi*?1g26bCmlrqV>DV`2 z-}YA*mp^>C-X`04)cHNpnV?^w@E|?+JLI=)?2Ejm070AGlxGdpAHNhh= z<-lY@5D<;II36RDbDjY}Br{;(fQZf$dUM_pQBee8B`>%FZi?N_96>V96F3KE;E9_S z0z=z@ZvY*TLu5k$-)5ir@(RYeJ*vo&ogb&!InLPxjM=gGYT3{eWJYtUrkR;A(APit z$@l;1x9>i@Z~Nd|A_oMhESZH}=|?RmK~*vV6UfPE26SMJLdGJ=s#Vh-&wVczBP-u@ zYbLL9M9Ew4$@7cOX-1^BWjOBk9pM`BtLy9W@ne#_Zkx+1|J}!Tt%nycUi>%z&wq`E z>#ps)aR2d%D6BRsZvEr#cw@%C1$Iar=hJxHttDbu`EET_4|Dv&s#6$vjuL`HIsMG{n9ud32xpl2`0 zS|Be;06!nqE-~lRf8$cu!_o?zZ{9+QUV!Kda9d^f)S{-ovXnUGuNDlH5Nz3qQd^n} zgQ9d@-UH``#Fpj-s|8r8l9^Q(a7ts(QGU_^W2BN@ylp+{_V-5y1ouMMP!M0afKv zGO37UL?Z-M0aMAQDOWua$(nOcF{c>QJjF=>nh=nk0y-cn6?@sh1VZG}-GWuFS5^T8 zDa#55sz!k1&=C_MLtsEcV|Jb$0Xp{1aV5>?%-~eS3_#Trks=wgK%<7^6d!iSag3)S zjRRO_CMv4{o|=9`E&Iz0zFb@1^UV#cd+b|aWO5*ejA1PjV~lyTYE#S-S=6k$-7U;! zAS6cTfGL|`pf}%q{o(uXA9lO0YeL}Qe9p>^DOk$lOXvq>Psqh~p_-Ut-EmX^K4aMB z`C4vt-fk~Fxoz-nyYAQ9VVv47gs^F@ZvOE2fNIyP{@OJg-`pL?_F@}jzP{M}-EZPQ z{?ETqr2Ujz7glZOz(VjJ@9zHmAN&N#zkL10^!V(%?P>P_D9$;{8S-ShG77^mw0d%@ z+t_v%n-rfxM|38TEKhK{$8Dn~O}n-<#QE6ueM$omWSuMz{`(-TMbs=``Xt4LaD-QHlngnRiPSn#8{52 zU`|-2rD{7=;GJ^JSeu&C1XQS}DEgG~NFCz=6z{)99s84ZQppXq4 z*Jw25>FNH-6Zc(6$&fTOzU^CPr>ZfD7ywad+N4tC0h8n`s-R71cgK@A=hz45?;h{2 zuWnL~ZP)LQgJqm^?pGIcoK|hv^t|c$#}DtHcY77E49+(nAD^04-#4Lmyz(?3r-z4K z@a-@cpLrT(4(rwNbaY5SguZ!rcm!f45Hm4TLq#Yy7Uwd?K#@#LL^GQ%(o?FMRaqpn zW7UjET553vW-<`sk_?KPL##K5W0y5EA!I4=9fGlA=g1*DVlrYR@0|m5Mq~(LSu!DL%!~#}84* zJ-mN-_2R{T_xz9l!OzBFFKp3tp7!5-`Qmy7KmGbAfA`z(6W|xO7pryHKR;irTJnGl z$?~hKizCu{-Hi{Eq|D5qiQ)(=*Id4I58wOgfZa8R6}Xk~DGfv8Lypn~o}S-z+Z*u? zv@vr@^12BrA4RjNq&RSY$sm2xlbR3iblMw3j7ddP%Fg@KFwH5hS6xyhN0K8OHtamp zfH|4Gd;DR$akp2S`{Uzo9<@k7f&lPDgr)Ii{ZBju{T5_6iFr#$pIpv8mbi;%+k>*Ks9XkpzllFZ;o%^mmntbv4_P{O=a<$><>Cm*@$B&0U`m--7-~HvEz3fBttKYp-!>%L7 z+_gQTef9O%9Q+XH{j}@9{<*ZP(6;m5R_oPa+S~MuXx8=Ia2lVmxdiWsn;dO8p4zKd zb4($$&%38~_XBR;m<7v$xOC6pn7#9o)f080apbzb6^Fya9XLjZ!TFSO-v$wjDVhl} zFhSp~L4eT%V9Lq2&F-{&clY7zw_jX#{TQbNwL%2|)vU>osIVK)5r2VrDw?hTKeK^P zkbM~f>!$$#*qK^ik=~_^D8Nx2)K+v_7j$AF?Ju!-2}O4P0XWZemp}_xNO4fgYtS=J zRFAOASX49!&kPz|@_<<}L9cwF&ueEQ05DgwA_J%y^Em=r(c&aP6kFhqLPdflL!Nal z5J8uq{E6RL+JYsvAeiw2%r7~!e3CBTAMiYXu)1_nbc+D0`=Egp6W{8+hv%dDbQWP5 zsRI-<^D+*q#x7QUmkq#7l$g{Yr^wEm5o#GCX-?5O=ZSOFw(+~&Jpx=^Z6&4BR{&6) zW^B;ARRWQ$agGR%iZ8l?spgama^l!c<9xZkk(k%r8U&dsr8viwM4G1WLZ5tddv)D4 z;r?-V7-CWoRNuPqfB&xWH0>V$^MCT^miO(d{o(#Y*Y~QqV1mOu_5E5iIAhPg>D#B{ z5gcb#F87dH${E0$LXsl%r=>5#Gw@hQ{i-?5%-+O2qKPm&1Tz(Os8Za40Hnyuf~ks% zXa*2eGtFR%noY7~5lLnuIZMu`VH~D8bRD@+*f=WUT_AE<#I)$@7Sd*|3Sfabhzi1l zXv)Z#Q)7TR>5$@a&Znp`N=!Kls8|w}9Gs77-uA2e-IJySNdlnF(Ic>`s}i(oC`m=J z(9#hJ$)gVr8G*{Wj3$+RipTu{9nWcA#{o$pMb_fXmji*!xe<(NDGLDUoGc5PBAQ8- z98EH+1`0%e%(Ch>>&xqD_cV{wdetUXbY$L%BSm$jh$1<8H3U+XYVD(92V2-iHJz;%3KJcb!G^*UU%#5Oy`mJG?wUQBC7=-F%>4eB>nn7_IM-x>bRHuTdz)1V&{rb&o zCZE%sas)6iZJp;L_O8Z&j!+zbcz(RTxqWqeyF2WTIUAKl^o22kN{&QC)r@NFTk68| z{Pyo*5wP$C5kJM~8f;8I6HZz3h81VT#ZP!anas|r&Yw{K5`GFuVO2S;RN-b?98R#P zkbM@`l#p~mf-0l|SZ1`JbXh8_R8Xl}>((vU@?HZd8v=&~X-UgsK|R{@;hsCL<tSU@EIbHw}VFqNbHo-MtfFU3+M55B8Xvw&wqHWUx01+K_&mTX2>{s1pyE+`7 z0dSnB62+RvhY)5bfGA=_41h!wQ(Ep5YC@BvFxa|lpC0a;rk$ptU-wfKHAREI?ZG_aN#o*%lW_LI+p_BOMfAWitbW(hL+)uOhEn7~Z>5kKM9L9EiaXgNV z=ir@zjdMgI6?LA(EUPJ?m?@xIv640{jI?3^M4%vsqM9rj0(r-X#aBWI%+zu*9wb&# zM39_mp*L6=R~n$H=Db+dWiXMPQcS}zJU{QVTEm`P0AgfUbViYw02hOLET(tPaa~VA zR1qxgq9~!Vp^o|a;W;11Jd8TzJkFL<5y@pjR5djrH&4S+K6oaPgjoRCBW6(~B(Q0k zN`3+~bf_XmMxGfxd0=98j=e)fbX1nEEXw@haemkV)dpLE%ptcSsUi8wWMx7TG&77j zO*zIw?zC(mL>R_Abb!8Tb+MUKAR%_1fidODn@3P~9*ERbazgUtBx@p1W{PLGYb(gL zRveZ*H!V2d_A3^dZV_3PFnw5!$O{ga_uo`3qyHz~?5fAyjD?q^@V z+B6FDJkH~dzwp5wj)zxYymH)fvx$?YltTzA7J`e@7);3b-aEAfX#^v5ZD_V1Pruo$KoHqGB2IG(&OeXOzIBq#xp0b8;~P&9=aHGc z_c={5MP?4(p&&W{v(tE5R<~f9;`@&u|M;7)uQuD^@gZpjE_4U=!D|r=KwXIDpGcOq zNU!0gY~tb>MOZM98f$C9K)`bZtHIkU+vm(-S?a{kU}-4aiyE^&;X_u-;brJRh-!*; z8-_|As_$8(nGw%xUiK*}7J4gH2LIWZ{%qV`@s2u}HAE;KfR?$lGqveC@QW)7%%AZe{6%zxisiYo8 zy-qa=d`g)MkbF)e<#IutkZ6J5iWIhiiAwM+SqNCnRK+)en8frn4*TQ&tFOKaZIHdT zq1hdV&32W>nVIJpr+J>^Tr{Vcop*j3Q%WhP_4Wv?kwAfA5vP4xS0fCBRD$Z9bnklmZu!AIm2w-f5{IB9% zQD0X=FjLTMB4TD)R5F-|q!edKDd&`O+U<6yaR{rl@w!6X`VG@&)yMf8B)gToUtj0f=D*h6jOo$h|oD?LULn_$_QMy(9t7! zFKpmlaufpysgmZLJrIalHdB~m7O~*l?3-eh=@HdX6;z~YT13)Ht6t1XOVh?+T88t> z6THkPOYQ_^O8+c$5YAAbMkmlyBu zPqWe&uWx?-<~BZjd%0cx`2NSA{ro4}%Zq6|bnA;~uxeY^bvezf^KN&zc=6^i9M|(` zZnNILdVacNi0BBL-swSxJR3&3F-$s>!Jfv7$0cXQ0Q*Dv2aJ|IF7u_1>d zxyGv^w^OJDXDy&w;1@$G#U)sOQq`%!+scZ7Er0{S`Cn&x=ECa7&j~-D6{(-S2`gK@ zXdpn5^!~Iu@OgJq0{fyHhlr-1K~h+~S8>6_7m^P$FDOpMg%HnaW|?C+00A&UF8jTu zF#$BIb!D;0Wt>0nC!{2cA7z*9LLrH?2#Cuou6H&x^_WpfGE0E;XgoO1xer>I|R zOa-KcXSJNdite8~vGSrVC%zW>R>q6<$e_3hn*kO|fU1!5g*GmdRfGV>;bZ`g9XkSb zr(w!sZP$rpQ}Mw|-Zd_q=1Ihjqr^Fn!z=~><_M5LEgOKSHIBz|RLz$+w<)EjY1lah zNJ*Za_r}DYMCVtpu2#X_J?s=BEogoPida#tboNHQ?jU|D3VQ+h$v@OQPa=JXJ(?BO~7dY6H@{c6%!&R zUoW^?F-`ze1XIbNT10Y9MKu>?2op8Q0AiZJvPzbmrg41Q?F7&_Er*6fAm@t0N=ZUm zSCor35dfg1j0D~jlaU&woQH9Ed>$Tl={V)+ z+s(`W_J8>6nDEP2*RL+zZ@)iuFE5@Wt**9)^1I*Mk1;v)Ig?M8rdg-s{nP#J&wg%u zjr)6Ezl3%>4s&~W^v%i)&9HA+p6*z~c-(ESH`6@8bhNfZ_JB=yvDG2&?w`K+;>(mY z=IAsXPP>5XBH+YSstHD-IM2+KHIF$`w9b1+6nqeqIZs`?DrF5ajVZmqyZ`CeU$47< zHy`cOHf>=!W<)g9CHmqy217xpm!c2>wN~UtrA{`m>K}ZLJm*MM`-KHq#-(0eI*g?! zsJPHMBh>e!o%OxXy~qL$m9eLqeX5d6CBL*4fLN#;bqKK30?brf1h8UU^9gdVMRJXV z4!K7B8c09Uo9g!wIiJCL@TJs+np7%=wS1wvCtRl@fHfr+nv@#xd6-)-4=(>v^Ijpl z8Ss*e?X#0r;hSYc&h+nLLFK%dmN9WnHd>F{#ELZ1dE*vj?f(9u?>p~EMcS?f#O>up95hLE-h+W!%vp;%-|8ZC z5V(pAm`Wygk|Q}(sgH&(7nBfGlnPF&3M!x(O$oqI3no?X{Y{IJJ z5ZF5)URE9fi;vaP(V1znsWC8>>;x!@JROFQkEf51@o2Vq(8nIhG<*Ta;=swE-od5WQcQ81y$s(2cWiT zWke7#2#&?_2}uDRaLAGwa(8)q+P~j!FCTyWe)Hm|DNle~Q=Cu7(7iNZGxGg*8uocs z2B(nOf#j5?WEA`Dh2tT3Pe@sbd^62+-}X_YaTJGf-2?&+%)9v{lFg*V>Zf;-w@P`lHhiROk!453CtlW zm?$uD63uEkOIDF$FlPc_<<3;i9y}HyPe5{oY-Xk`sZ7$)v{Vy_NT5<7Qd0y$%V4S| zXrMBKsH9mWr8JMz>3BK-q3>E38uq>jKOiD88yFK4A`>q}3slJ_!j8yPkEiM5^YH#5 zKcDh(fEe=>hjEf5rF-+v5n%A1*-!HXVo5Wos>orOBqlXuA{V@9W+rA5(3tXY91-%> zx?io@P1mDX6UaNqA%sf0t9rug){b6`xc z+Ft$s`}>q}y`^uydj0Ev`>VG%Zw{xE?^mH+udl=ySDS6lz-_BZT;s<$6QRs0P)LV; zV`ki5zxX0f_eg;KDy%Q$)NJ~7Jl=ON-;TS7zPlL4W24EqN7saYJ5$)KH!01kMr1K3 z@4`IC3r9`k^r)ulgLAGKr`b6trcK+}G=nKKGvP1{gruTLQ{h1Y=@5rMeD}LQ`s1HB zKZMz&wEkMElY*5O9m!GvSDH#ONQBQ~?V0dWqtP;Mh1v$-|6lYk3>`Q}cC|&@=R6qr z91ODaKed{z^MmuxovET~OZZ=)DS$=18OpTN>dX~@7Quy~DfbgJakCotn(^Xb_Lbr`s2=LI_NL_q=ajbWB{WvVcG>$LyL$MZ6Cw zMUmJx;oXlv5``x8NMe9<9J_vfJRVnFdwa1ZvBU9%j-OA@{l)e3@#I4AVt?}X?aO|3 zxc>mXKtjK8d->wq?|w8R1Rm$-i&a02$CSh=JReS9+}`Arfy3_fWaLbgshGB-0w}_q zEr})3D#k1d%REX|&Dc~_6fv6_34sBzQ_Fl-i!!jXy)Iy2S#r6~6%m4>EhNPo8Bs-2 z&S{#5!*Fm-@J%4+*?VL%)kXQuN~y&PCW>5njgH90oDcisyLZF8r}^=aPchH4#@R$b z)Uqf66KYCx&cg{1Fs4LAG0x=^%uI4_d=SuSOwN%Sm~)}=FK%u(tJU@O_RX6sAOnY% zl^wdq^Qz@_V-6`e5J44*lZmiLZUedos73Ib?P?lQ^TYk-G~S3lM*04E|1=GQ#S`d$ z%DYke{oFWex?}USUtMfA>oqb1@@lneoVOeinSF4&v&5Sdou2;6R&yNipfVca)u zKgBeg`p_MA&rlT+6ahT5Cz3g3No9yjM9jYFS_M0fgY!YvkW39tj2IQ={^|b9;m!5M z)ejFJl}iLDhG4oln3TA#SZYBtsRb>Hej+Fq8bXbea0W#F9`aEOO)xOj63hW@fl4YG ztxGVn&*QskdBfsSut3-4KNh5^%%7JE+v>^rRGC*o&L@jXL@?(H<*`tG$|$(b8R*<8 zE_5PSENsftSa8?^2rTqVsPrJK-?toMoucS++*JPhyjEAvJ5)@mGyq_*nAia~z~@9)-|^?I{1Rn10?Ys1=%PQ&Qf?S>fvhSMZT5h#mzqT}gw+pl9xA^0iI z>-7rI_J`xE>o?3a&*LfQf_a=yr?209)dV-}k6CO^qD1|A{U847-{0K4u<`U~fBL8M z?s?lb!{Ic}Q`4+Pqe8yAzWVNm-@ktI_CI}iD(dyBmIV%zw0(;?GNNP3IRcR)Ws@Y9 zvgQ)~%e}hX6OIVATxSI%6(J)-K|+y8E=Up4#v-O&u|>qE=b#1SX1b1QaBwhUu}2yAM>13l1{W$TTweF%a#pU z)l!rmASW9R<79v?gy5XxwrPl}VY2fcixC|f5<3^13rGx5^g@AwKonHTWBI4>@T^i~ z6+SWl3=qh-+^m${oFzEtyPoExIUk<)_xq=BZodBbaD4aSA$a%2moGou-97A1ry;++ zY<~9g@(;iK<=0=l{c!&txbK|<)0lOAu?0u>PmfT(es4R?xb1eHE*(dtS@3ZBgYhBILRShQuY(mOxmSpR5Ypnd55GSQ(r| zm7NN_@&=5oo&={a- zx)#N&GR!tGMACAF3i4AfRn@`3@`|czDeBG2i6)}uuvAs+f(3zSQ8yz~WnyA<%tVx? z=&1Yn;a*g(Z>|sl7~54RBE(Kba*nHh9rnRQZ!ftcnLokM^ex-+uQov_WzP5f?&P0u&Q8Oe%BADPR^!SwytJbO=UZ zCJJP#nG^vF+OMD_k_cHO6$q&C4ahSyt4BFIS`5~<=apPrug-~JHq?rE6P^DrH!6om*y^E@U3GKH9vSWc3W zikc|4ZMR-;R;%^8YgS!@j=gtn6PhlFWF`oK34sGpi*D1?MIT;X*`@{0^XWJ}e(XK5 zr_{CW%a^p;$Y^@nfo19fb}L(5(#1>eHgSKDIOmSGFIJbg+tn`Ohv)q;woQ;h_osOT zBWBAoN9_PNn{Kta5-~LkZPT=&ik1U-=b0G+L(>6~53K<`nF|M|cA>&?Y&*ES~U>GAM! zA6F~)v$wZ)c>d`ZU$@)UfB4<+zWn;<5GeF+IGtX;ew`r2IRXM`nx@DcOi$i90Mu!M z-4ouvb?YlcNT+9LUmCOXZah3++}<3Xj@{Nl&eL%0yA8k$uD9%bx5{}RTq`-NX3^BG zE;s8HBd0kzb~z6*$ENWa1ObsS5Cd{x??ZSzKC6mzlmxO$HE1zXMP|OczuRsvJ-RWE z=ur&KgqgG=7zF??RbuHkOe)Y(Yie1j|7DC>m$J(JUq-HHEWgB&k`)X}lrMqMpm35) zVO8vWm-MjI_eA5ur#98XWD)0Cxe%q(ADzIfa{+`M@E%inz0wcI*3YKA$@3N1G=PtL6I3}EKjM9oYD zvO<D;s_$B3WJ$uat=|m%uI}F&ii3_|1jR&$NMKa9P<x{|E21`dTqB8K&_So&$T64}Zh92fz=SBwt5y$Mk&t7Yj z#-WE-o7=-MS?|4VHk_~{20d~t=QioH2M^!>=)J4AFRI!#O>M2s`f2MNSqor*@^UtV zWRi#@R5E~Mj72QIgi4ACh76#&*I2_y=oN5UgsM6O{^E=0>#N(3zy1xbOwaMS*>u~t zzwoV(*Sn2x>&bk&+aCKa9J{zb9Okq3(ZkceyS-Svzus(p-7Zhhr>oXE2Wt2GVez;k zNSE8(mQMK#4*>ZWe-8knJf-`0a!R3h! zGC(AtgYfpSYudIu4x-sc9GJ7j6N;fn3zU{Xft7*&C-x8{ zwveF@7%TcBxrVaVQ3b{hfK^Iv$PXU7f!sd8$PNSE140Eqf2V>(1VutZEhd`=2y@{N zgkTIn*-)?d63v2_XsN_=zOo}xkmoI71ZQz9uipV*QaBMHDYbc4^%MeROc;`loXd7f zEqI>D$YB)!2L+|vz~m`}+!H8CHV_>-UsVO5ycDFI?7}|g5k1Y_g?t4;8>L>LDj8Hj zKoMnTK+G{LH)klAvlOR9M8c9{YN{Flm2tm44E?ZLEmP=SZLOggVpCVD3@X;Rd?Z~T zIRPpW8A6*P6HwR36y?zMA3l8V_VzaRakiLP6lLj-LyTf-TuhsZH{L+oOv2mSB;>31 zPyg%(Pu_dl?e|aLf1J0e7_9Ero6GfXGG7MeX5wQSQViR};pOG_>BE&HwpIQ7%`5M# zAp`*S-kMAX62+8j2w;&s6c$+Jo~p`h(6~BUR8U2P5dg|SVjfG$pP&IymLg{;7g4bk z&3+Tb%0yIHWC+8dJJyqhah8n9BiTg4ECxVhvc9&IjfMb1mu@%V?cM(Qo9_BH9=dM7 zANnB3oU|W(@X`7C$$EWLx0Uymbq>-%hKw=J zdoU(fmt+VCjdf%U8l%}EHL)-Q8dVk*1VPZk?BqLp3>Eq2G}ebM><>LwlP|6}*Kv4r z^=4-EuYKeF)y4Ub{`mJFz5n#(+w1d_)AjXcK5g0R)5q^w1A~Zbs*2#7pL|;tKl}WP zlhfssXU~==t+G06!I*eB7^0N)aNMWRh3+t&`WQA9B^6g!&0*WsCpL*S#scEuaA?k+ zZmzDR@9J5#S>G%kihxm5V>LzJLO22jLmrcaE-JG%IGfGhZf^}@Wt{^IqP6p(I}oBO z^f7Jv!|h?WI9aG@VsVBpCtC`aEgO0d`;@dWj^AjBsN>feh0-xh<~huF zNq!vrC~{3KkUJ6rsUbu}V~7xnq6`U&YsF|5h$DJ6@(Ms`$z%|Itj(#=959k0MMR}z zkS=cm^BtePWy;o=GQf7fr>s!BP*~qG-j;og3Pz8G=_buyziJ3oK=VcnQ6Lmm1DHRv z65{X0rR8W!my~}ZB~_MbGSSi8r>yTOKvhBOa#EDhDUi@Pqaq@Bb$K%5mW|Y$;N}Kghel* zAk1V4R4`A>DUauwbMGP|WtbjSvsWcZZlgsdi6mhKjgmr2t+hxb3M?|JvjEC=;>b}I z1!B@e7v5YRK7W0DeKTx#afrhZk^q1e#6CvBXlsA6Ts%IzSj|pqUj>yu#$hk7#R@Lb$R1PLHGoOoZjBBDGn8u&INBeEU zKuy)oT0lZutMSeGW4Bv}Fbp6hQ4DtJK;FP~5{yAav#hFRWm?zoH6a@BOo$TsWHNm) zTfFz=;o~Qd$nbPAX&T?OwXv2tC;%BtWu}a*^+XoPkhNqT8cSqAK{)T`Aj#ygC>jA_ z%6pOXor-zpJYUix%8BgJ*q?s+^7VQD~dZ)(SZ4WnBmlw;kl)}{N zgH?0+@{9STwN3N()r-gPeL&6*w{f#OEKVM{)^^uK)*%`tbk%0N zw_Wd=iSx~Iy_q5>U}LTIl?dVTG+kXw=xFA)>#GI#Y636;dbD*rS>0T1If-{o=tGFX zM4C+-bQf3ayJ6Q)R+R%Z0+6(>n%#N>9>2VN+3`?wa@Hab4sFjtQM{{|WrzuZ4t;lG zPMk48P*qu!2u8?V$3nmK0fh#=FOcs?u4H5Y3T9(+8-Rr~$YPn1Adt+0mf|v_5ps_~ z-Fi`-Ree>dG1uFTsJYwS9EZclAAa!Ji_gCEjjy5f zbz`izSC`w}p>xh-k{Ac;rfhV(@9uWpbT;vo>xX!~-jlT=Oehv@@i)!+%%UbylMfJ* zKxB!WSU6vk?+hIP2+$alXHZ4hlF8?PTqPu7 zjR^&eAy5WEBm=@Apd9hor<+ax_Ud?b759fY^uxfS1c-;O3ld#bE#{MYQrF(~Jgoa& zE6LZsZfspmP+^eNoSsZSemFFwRpr{+v<;)J$TfYs{p{z%W^KN z69WfBjA~J5P4#rSdjH|YM<0K=ws4B+xmKiM2$)QTK1yee%}u{3 zBvDH=q}X>|>zl*SXC9K75pcA$(W2=ufMl^;D=5bJC1J@1Bi4Vv7AdS zJM|~XsPV}byV)N-V?Hb@3e2K_nZ(wyGQzSlGPA!KTgigdf=OjJP#oQ}G6PJ6L|{hq z=Zv4>D6GV>f61Awz%HNwU`Kl##DW>!3qSAAgci6ar?z)KN+43y{W)im>Ie_q_Zg%5 zrc4s$W(zo5k3s2N?s;M?K#?Nl70ele7*oTz{ZdUy9rp|8OQI|pY0SfdW#hS_G3}4u zm{hoRGDb>jmK38W1E6Hw`gR*bn$D)I>}%T{JIj@Gm9o~=R25Yvgu#I0q@)%_Odi!2 zMWDXxRe7BL`A$tLE747n4ci zL)csNSn z6eftzPEMX(oP7BHlLwE^lLbiFPFoOh&X0@nkepE(*K+{mI6vOj28{uL#GEU85hl`c zwd|;pNT#$l!>-`SFv-7Ymj2=@&1V{N3#q$UV3?cMbvdqpv;v zgFpPk?|kEzu5RuQhhcGYG9>xoU;bP}af^EeDdbm@nv0b3?p}Ud8ZRH`RgYvYk-0|vdyLexX13~f)>am^9gqfME%6Ab# z(HQ&1=byEciA98%ys;Lcu3U&Aj{rsyV~nb#A)YTEhFgvr@^bRTNdY4Z0vx-}`znLP zC7^O4T*n%@XgIU3DhtE!Nzf6%81au>9FOflA$0XVMwb7Um|ftXykQkYl;s`;%A}jh z{TJ`ZfWlCIh5F2&@_lOp0Pl=WDRTe>CPV)@4diORj4g~hfm|I^8M+#kl8PV}i`u`m zkfnndyB@@%3I$MuC`B)X!fFiOQ&B2Fq>_*6o^a56w}-s3zO*V>D)a)Ml~(G0&9P?L zkCD-Hi6!^CSwaGY$fBT1gq)Mhn7~K!B-7|+VLTVSVoaVmf=2m`G3Rs6iA9C1;~*;X z_U+{dUwg(XC}=EIzCL!xc{}Hr3>xR%{%}a3`>sz4j9{ISp=_|SXwlMaHaiUasB%7A zq&Q3_RSE%-h9R7toe-qT6A{FeTwUF+?`E@kUHR*9{MwTb2nLWrqVM|Q&;{}}85;WG zWVwuzF4x;l7p%jR#XN+xIdlR>6*c=aBjqhX%uHkqk+&#{Q7|bdFj;kxl4z07pp>c} z6f|)HK{7T&VG3aAo^i?Ba)3r7m0b4|Xc@2sx$k@PhmOW(2Dqp#L?j7A0#>D@NW-Cv zciZ^(E?sYqx7(ZTVVA-#3@Yr5TQ26Sc{{6Y>!KsDzFJ)TpgQkDh@7HDB}=Ji1(0Gz=@qyGvyOay=#DW?ZS!=o{OHNWgNGMSKYUiL zrZ>keinHob#CnIy`5v=2Z@Q%MTSwOBWyNGo?oCAn6-~Jv1+$5 zA%opXAjR)AAj=lo1?43 zFkm}Z@7dM~A|G!cty%gMTaw8A4tCeLXcfV>4TC*yg0*Mm69NoqSBAYajjzCZ;{|gD6%@^wjbllW}Maxs5l3WxE6?ALt0|0Cx zk;mPVH7~a8n3Y* z2n?&a+wax5$z;|KLm2vLb8&MxZVv+sOls$y{o>8#ZeTS)MgXPu6?3S3#hg+SYpk_M z29p4jCW%4pz!JG!f>M^|K^gZQFYfFIlu0I*l&`1U4O7kshRQWjN>Ipk3>gw3AQS`^ zNi3q8QbJ<^HS5QDh&Bs*6}r1^_v*I4S`X{PW^>pM!+}GWxOVR3;$n5SXq;M*x+0VM zBM0MR5hwO4ej+}zENPsCM3(g>#%Ee^*$gvOHb+vVN(jHK6 zLccy9C8kG{*?X(gXAd8*o<3YXdeC8nM95^EvxwFbBn1FNn7v~WOU-YMHO8PZV2l!hUE5a9)j#^_Pc9xl+6=oM;&C`WJbCzgzy8~g-+R^%yUJpB=)J|$vt>UVCK?@q zgpRC|rS^gM5aYhasykMxjcNNEIjA5$OFPp`wbEfBT+-|0e6Gz548*blvH)qor z?+6yn{2UI)_0^^K-hczf?Pk|Z=4hZh9GbdH0?ryndl>p5rY;P|7*?^?Ck~gl>&n*i zdNSgg%6-s`{>10A(1R819=@DMfwOZfc1qS@Hq^NaG~#J@Q_%gwi-n zg?c|HFhUWf64kNHFQo8T_>X-+`C-Wu<*Dyna+Jn^nK^}GfJiBL|J?szj z)(@e#23f^{b(Tm}ltj4?v1jcDj*N*_6heyDT4pu`Xv*3IP|-vRYD{i8v)v~lfG~^* z=cv6MaS5cn$IEDkdH9kv@0VBRj5G>~YT~4dqQm~MeRCt5ZrJrV>+Rj~*mD#WgVwud zGMg_RoViNs)}5`|xr8^Lf2vioHNmu!b^>Ue0jf>YZSk#z`HZcxhCrqA9*D>g0{|gg zbn~gS%_fG%RThDaA?FgPb&f?q5+I{eh+1h?Sp&eCL`#R52J`R#&G#QX__&$@Fyql?~Dm@-ZcgQ z4I*mZ*NJ(I2aHir%=;C{W>A30C}xBK35yE_g~J~~_9 zMg!P)VYl6ipszeA$D!}io{&;VL6f2}&aO^o{ci0wAb>=O%pM8A+Uc1&+<*mBHGrGg zMI!IO#BQJVH}>SA5D7Kp7u*FxZ7O@VTHNk#tzwJ`1?H29gjhS*cl|+zUS$9{hD6S? z!nRMtvA?~3@!o^S%h~MEA7V(X97FWZK@tEn^nEjHkHH;=j!Yhi8VTOfgr+o6+3yd_ z9x*7E!2qiF$a2Q%iLfXe$NAulDg!`vk0`vODvt~RvxoqTN;a3xC4bgm6eggcJ%G8r z&YWOTsG&T{XS~v>Rt)JDvoYJSa2XHk-y}+ zQ7TmoIZNjQQa~>hLM?(yl+q1oCdxPVCHnoo_A=<7#c72FZ+Qlq!+V6L&(zb0=Sp~7qN0BfL zr>iAMnog$HR&jl7>gmmPXRS35-+ypcTe#b7YTriX*SFWcS=`>!}bNMaz)H&cwP11pORsoNg! zu6KNl>-F*aa2O;B%gi`0CLLpU7~0uOeSVJIg%4H(OY|V zcKXTJ*N$x4sv?J!!+UP;toLe7h*8m)c6#i3Yivx)$0Os>khR7W5usMjEaoTwm;duW z{{7$jo&WjY`j!9kAN|9NkKPy1tCv^*_5bz%{@?zS-}vN%@BA z<>SW>cWL+Qzxmti_3;-TJ^33SeDq{CpDpLh_Z}}FJvvabVP8d$%&eKC6XBHaU^GO= z7;B6%j%=&8M7nK_!) z&7b|nU(A<_C-1*^wLf-;{rSbIr_N96$4?&p*^htl^{;!=)j)*( z5DnQuC8#E0>j+g{Jt1G|anODTJXO;wRcNL}d+(~_=595c_Msb&w~+&aEYIeKrK+o& z-Q8`sOV)^HX9EGm{o#lz#>n=#_BB0v{2*PvK5n+6m{g1a5RwwGE+umEj@||5|*JmEdzJCUCEg-pJ+fR&4H*HSLg{Vs1!}ty~zb)3<<%$d3)8& zrew_Fc&Mip5(r0CWoA<(+on4lj|V;MdjgY!s2~R&>s_9-?VH!Hrn5PSct_sb6ry+l z(D|&LR<5>$XpVi@?zhY3!x#sNX|`$}o-F$PRyeB2&H9ps`k2<+!*@Ua(7^G*#o6!v z$&b6F!s%o-+aC^n(wNj)PYNVZ)y=UxHgz3RjI0W*h#+i`d~LiqNl`Viq#!YHui64A z2qRb(1~r*fyk`~5wT?wqm3{QajGlW=(u_<(u2PhYQB`9tk@e17XAxlNdn<}Sswe}; zKBmCKaTt#MzUSNZe%%c_NzA-3ZaJB|s6&6;ukQ|Au>5klSAvvYHE9RyrRiV2Y%CqxugBTU9RX8}w?dv^@U4pnP}8ITt9+K~$}?Y6rZ z<7Yqp^8fW;|9>R#iy!>t`@i;U-~P@w(c16-$)Eq^^Q&c5{p0`BKc39oxBlukKKtzR zKmPqc`P1+J@zs|v|KZ>L`+x99e{{I+zw_w9fA-18Uz;r^Z3ENh;YT0%`DA~*vksjj z4rshb5Ku-#*~`-qWzttvS;e9%NRqr5vM_^1f;Uo-s;FqlS%3TE59Z7H&H5&#)DOCNZ~{r|x;|X?l7s{`C0F~y)z*;?T@TVL zR?U1K_E#MCRG(6E!w}7K=A5s+M=&4=YEU5@x44_qqB6FodYXLGOy`o0%DSAbo;;Sz z-THXgHqK3}+8CFja7sYPL5RVUamMVs&U+t*u$3JkoYLuPGQYjsAQ@8d*0hyRF>Kez zW?IdvDh)#)la!dPpo#`nO9B{6)pDxG_*op7GvHI45J$3I2n~)t5ch8yi(+nv-l;bGg z$8`m@3JUj@{gA!cwe&%S2frsxGv%vaO)qFh-wcGjMNIQxtw0XhU*!ParOUA`80n6p zCr3n)vdN6|IKYTO88v!e^_P)kV&VLnBY!Y5^Tk9PK>-O_v~UJvk^le@0fdwoQy#+r zk1hqdyWKo}_CW&njjtLnB4HSubBDuj+Dt5F=n!eC0N4RS%YA$`O!~*-n7$@!gjl9oOyh4Hf{Xo_R`sUf9%)WeN|0f zU%&a}gC~`fX*0jNT|d9PiOF1?Eb6NI;>9bfYe>NwgP`6zZ|zaFvVMEm0bod~C`pn9 z(^Tl4Rn)!@oYD}7J`6()t$3CsY|%Is$mSU$VzSB&#|vOg+&E@dq5|{fpAZ5W)MB8I zfM}elDxWWNjuCS~7IoP7;TU4j{h?oP_P6`P`p|FVfZjSH@BCsuUpl|Oz1a?j-Q9+R zPNz$8c1xx?e=vWx#kNJ`d{qTeATw}O@6ELBQmB-PNP&kCopma}%+^<=;3{7=(=J?B zicMWplDDtl{P<6Q^ybT#FJHcT{`qIOcelfKZ%|i@+JjzRZf~!@AB8)Hv(>7aG`DxR z|BwIM|MT(tPwv*&KmW<++wIXZ{L6p!oBNxazxm+@zxv&8FNZX@*0sKU_GI-}$BQeE)ZU_sM$?KK%H@zx>&!S6Azkd2@0yYfM}$PXEInegE0V zA8q&h*>VCvFJIi;?&B}~%3uG2@p4nVB}j`bfK*$?_2bC|Khsk0F+o%kigYos&=}( zdi!R&Xdhjy1j694Hcq29(`LSyA2BlGv~tM60>*kz4nPI0@n(BC#1u|XE}ot}dVPJV z0x^cW;~o%15+L}>&8M@l*`&xw#Z!R`=2D_~364cpUrO&%*Nl>Q8f*Q6|BPYhz6XF3 zg3842{S1=8I}YHh(LX=4$Ylz|U+Qc`r2_Hi%v6lpGR%7aTR<3hzG%rtIEL6U)D`SY z{wEc9j=&aal#*ph&p1UpX2Ejb7doP71ohnuVEK!Ls-!4Fd1p?6sPbsay+~(#7K}a5 zSYhhe7eLMdnW)VgSj;~ZC=28>1O!>(k3=FP8du(1LB4X>&2)|cfe5IY;-?{qMpT%^ zAPK7hvd-S^w$}Q#ZTm3z##v{&Ltq{Zk%)Njh)@M$VzNe1t@p=aV3CkGg~Xidx;-8b z)7kv?cI&Fz8aH(N>9ir>nlqH$NgS~A6-2D`RAW;2+Na;)nc~WZ)1uE?7k04 zj7X{wAD^Cf``g9IgJ1vcKkC5(rt|aD&p&%Una^+cdxJ=zNy2iryxZJOCv}KJVjaB` zz@j-0q3LW@yUG|QlUTaQ`#yA$ry{l7$Mz8-q%7@JBBLrJm5(Z_luy1Ol4!P{MIc0T ze+elFp|NCb)3!)p4M9>&EW$hl5N1ht>;3xBU+)gvA#CFi5o&8%=UnCX>G1T)YO<{0 z^{f84t1C~&VAVup>$YAzeK;t4W{6QTnU9K&TvG#*vJPDcg5G8fQhzjIg^TR*=%ek8^ zo2lJ#6JhSEbMSxjo8SG`NAI`Fp3G!6HLK;5Z+_FvCeqzn!`WspmuvmS`+UO)fhC9pm?S$X2=bb7P78B#odaNg~SPhRRJrEv=5BHEn8iSn6#D&kyEg1qLZ{A4L#UQ;O$oC^9hbw`QFUse`Y!bfJTfe6CEfe%w}Z9 zsxqR&gi)APE9YVwoFODhEY7>bVVj|O0~nGRYa;Vze_UYGwpHybj*?go zLkye*MhCsZyqK{h0Z}#O!^xw3N$CKlFVzY15QgwW44`tOch^&;R(3tjG$@2lM&k)6=g# zc=B*QhrVyTTb!Nwv(@zL&nC|v9U;WSz;QqoBJhKy#DfAzw`^2uU=^i+;t}>D_h&s)oQoBo3*nj z2uvz^cYFQxlgA38n!5FN`qAU7moHa~+Gqqo7P7{Iq|_Z#Bww|m?-0<}Q?d>S$a+pu zBuPpd*j7dfOx2i4YXI8me7k!)t^KU6oT1I#TBptB<1K#5?h^%21zQc zR+@oBjB$I|w@tH}&vu7H&?qE~A}T4QAfbs|)m2kf$9~9mCij+nIEL_Cv)*qFK>+5_ z^#MV#_}`2%UrMMd?!q}VmT;D9{6g*Cm&@bLbzcgPP4-vNCj=~}o`AXLrP1&i)fmLA z^`zpBgQKmEj%v>=fypE@+y_h;gK}wiBsV=a=cH2iL-uDbJa670GaB*~9H;>D_x!TR zJCc9^IeS$BWdo+suqOw8BtX$D{(*A3-kq0%+b9?!B9pz<2muHL1cBuKX<6l}fJmA# zeJVUc&deeuCyHYJY=}?+g)^N30LY2afT$)GZz@E*xw?r%e{p^h!q80TbybHrsHm&G zf}-N_H~=UQfwT&dXU%NC?IylC9utBU5Mzxsh6F=5#2BWN8YNkaplJx|yss*kIHVYf zXf|8Cc=ei7dT?xKQ-UA;>~rI-tK0o{-&Uq++n1Mj z);kea&~`G}cgM+W(hq|GT9bF>sv%+kA=3#{-y1{TI+n471=|KK}3n1F?3K z5WDrw)rX&aHw}C|b{~BF$%~gamv48!@^_y6zyHO5n&xe^247k4aMHHSd^~gk*;HOQ#UXG^0z3?{a>0Wn2R(g9)ZD-r01$fX{}QWTHjII~~(O)sbtWD9dt zRgfaD&$9CJfI^xUlp|NY_r&f9^^DBfC^dZtq~sqE0*+-?X;BbCkrWN$xaSK&vc_}7 zp)zTl8E7mjd;Lnf8SmfGN^YF{FP89XbAg8@FEqfS=NvPZ0F2|v_rOj08|8%xc<@!j zQq2IeY8H!w>ZlPdk5tm4pauC#r&aRPSZUt(KPB5L0Wu32QpjkN+^=cQMjRt{dJpH7 z*`?C6jNldvmxe|(B}O%fWR2VJ4!)|<5|KrO<8btKg=j(_&|54MwwMNM%n;IUH#lz%0;jm$>>gb_y4xMY5N4Cf~#L_;D9lB&uQVoFli zcC$Mm7|qMhv;^_o8}?Cl`))O#S!+XxgEK>nDXS62VkKjsB{*!-c5ng`&a{c0M z3Pa=Vq;4ia?P{e;x3{-Jm<5!TnH`ydQ%GzO^A(#yLV%jk3(V9k0WNG^lL3*5qbIZZ zgLZzlm`+#I>0(;7&15k*bv0P*Ay$+2yqVhBRF~5S-}uk?&Svd3tDee2!$=T5D>uFwW zk0x)#4oBOrT-ETPm9=b)hQSy_i?(vSjj7+-(AjzcKww)z$I5=<{iF#)Z;Y|l)pc99 z&2kmG-CgC3(@LvJ975vcn#pk=jQ3;>88&AbHA5@9XO^7?$#Wltzv*c_bg-x%}V9%6Cvl9v9YP z>4WbkcQe6T$lm-T9H+%`MD|AkQehrS4^g5&6kX5w%VGpHJ{s~IgdfMuv8Hi1ZQ34 zTSO2}l0}?(b7jWYP{|q*W-f02x!K7KbvFJ%E@^CJ21r$Sq%+85St=PoV(Rxp3^F~L zgc#8~u-G35Q8k2dNOk3mMF|W5mTXF$v(wA%bz<)OG=z9K_TJgP-=C}=^xa@;N6t8F zLQHi%zh3X6@}h0LquBM0w}^au^;SZB^5pzvKG|Gdg%Cs1eHi)#hvVJp>HNvV6~wT< z+rNH$S66LF5{7WGoDE?ZQqlhzgWg&Man|+SfefVxDY0`k>p(b)dC0J8A9b@koS!V> zF!VzR%mR`J#|=W3qUXVmtO6AgGGwh4X3eId`Bab1fid?kC4j}^DbE5B_^PkN(+}Ri zzP@1)>#G=}pVkhHZ(A0$gw~L<8JN|88gq3xoIQBDI$NGES1Ard9OkRVBAu;ouiHuR zE=m^(-0Lr2?}sn{`rr9G661P%^}GN6xBlt>@M{>*k+I@{&9s>qORjPn!f}YlL*K<@ ztmBj-OJrsPh~N!WijbrtV-*{qnb3*#zw+TXJ~@53u%_}=NSgY?Y1=O5GpH<1ef{Xv zOeP%q7`j8ZtL8H|oh+U{>5Z{Z9=VUd#ued^Sb@kOqO}$bh?;C^r5pgj7;CH_aR4=h zL}ZM$#*nd+xkdqNh=`bTqM|};QC=dRw><$#grVcRE8X4phyCql?<}q+)0fXbuYCRZ z!Nb?jKjW_d+Ix>@E%MQZ*c{q|{;Pf00$G0!OxLC~%aNRZ{%ps%{oip9BQ^3RV=$u(!-JCpr@6BKS(fko{8o*iC zOxmVZj+_|53~`uN3SB`%bORUSH5c6npHyz zCyUkH@p!$y6NGv)Y1&p16lpP;J7=r2Rg~DLIP@WeU=4y*h7=?yc@_mjnYm&HkYK=8 zzO9@ehCZLx>>Y_?4#*)SH@Q$yel?IYG38iH1<5HhWmwdeBVP)6wkgB&0(o|l3sPO; zb&lyoGD^I11zhC(Sd>PHh+|ujadD&qedOSa1wOhL`xGZP`O2k|0r`TecD|*WtMnp`BNf8jc zUEfqqRoB*91{^|2kwL?BQlqy3D5^t>WSAo-PKGoLq4Mqiuw?- znTH_|7!^`g0-aPfhfsM(R9~;}V(b=^>hZ$hQh;o4R6f`;wrvVvJ@#i*&c87h@}ea|Mf8=gn(EQh zN4M8^bLY|7Nz+c2)ogaQUH4zSctJ=GX{Rcx?CjNUJHNS|`D!^?efj0{vj-0+lV&oV zcgN#?y{QzuGt6}J`ue~7=l|x{|NZZtoy~7=FJF9l!vQ^lNfP#*n@+af0NOw$ziLNEX%Yu#Eh0n_P^CmO)s?en_4K{P#Ybn4rfA=P`0yJae&iVY^<8s*29r5R zR8n=8J<+l;4^P2@T)yhAgPSZS56=8*rIXgpX4OYulgSj*ap;B=1_ZF)IZ;xIK>!ss zmoR9Ju~nA+kTHe~VTOnh5S5idk|tvQ-nlyTTvzzn+vjKB z{C5AJz7L4;*xAL@&1PKoq2Gwy4$Vy-BGLR_OM6eM8H}@##rO)dQuB3`0MqZM)qhd z8s>!Fgb;}^Nn#dbh?%yB<3ypIPMvayAqN|ztoSUVPb^&fh;9NXXUm;pNvTu@grUfA zlrsFHqg=OuDA^!+tdKOzcOa8JWqdOKP6b9W^*HF80Tn32HX}u>1Rr*z;<9^}* zM)f9+&1&B6i$&BU@3M$~|$oR^~*em#)tGce|q?5MVZFkO9eLtkY z2~r;tBNAGbxR}kmLpPaBuWxpB&E{^qLyPY}dsrL1 zJ`RsBp1i%;0y$27?Bh2-`G7>$>;28TJ4C^{4&9LhpPkKzzVCyGkTYmND(fxT!@i$1 zjTFlz6;7&P08fxhDcXyjU4%KAROea-6J6Smr<50KjZLIZ#bwJp1fodDNp>!w=G7w2{J!E*UzaWXg6S+hK!E;Q<7xp1cL z5BmVtCdH_OvDL{8+6E^UXU(9Ihk@pk#?$2N1g5Q2l}_jG^c)sTLgmO3qajplBN|)P zBxq2CIY>k#vc@=D8OWp8=nNp{AEGfpWb(}-BlVVj9f>5*T?nH=P$d8o#^DIttM2Mm zI2`}rKm9P0lf=(I`z&E!l!7(!Ikm{<@6Q3WFlvZU_N38%cg2u9Wr1T%~*rF_gyZHD|afQ(}d6dsZF?2QSd zRkDtPx-lST&_>4MIg|g{JZL1)?u+jHjh4nE|7pAr+fekT1#>Ck^FEdqosQg3kzr0{ z8QS3z2g(bK*OnFz;2yyjC=#L(EXoDo{mAoZLNYc?stWJ$sC(___~83KAtOFz466ui zVay}BIncc0D<7R90KjZ^0iY@=h*~u$rJl{LP#(($h%tfQvm7OXOAb|%{mW29P_xUB z2uqIt`5ap7x_#gE{d}=t73OTi#FQ~w7DpBoVn{?D$i^s@s}8#(p*ePGyEzKFK6KN@ zi3kE2pxAed^A)7vebw~=5l>H+{b6Gb4Qbf!j(rFxtHpHEZnx`;v-3avvp+VbRc8)| z{pn(Ux?Ejfy&^K}{l1=~@|F`1 zk%6-^7Zt!F5J+W^5c7VvVTc^%cnF-dOx%sS-8>Bd_emZK2R1GU%WS)5t%R1*K~jZc z$Z?MVk}=j-&BUNPKY#f8?b~WHMI!4gsrl%W54<%$`O%LL+r!Yu2-+qFfIbdyFK_<% zKmG8p{la%{Zr=Rtr$77N_x{Sazx~lxP1p{bP500K)xZ7Wr>~O*0CJ$nC;}d=S6zac zH*R)u{)b8Zd#9x1Tcww|s;IiX5bnbR}#;1T#*g{ALP41j8^Q&+jV zu>#BrR#S{BF*=ii$&!BrA!l6WZC#y%fEQoZ;rdW$)xcfFE3v$m-EA6GoQ8y!FpPpp3F}cpML)O zILJ3Y{?+9R&<;~PB0n+khC5OXdxLmAG)l`+A^vB*v6tDrkIAT!*`1S&vgsS)pS zps%EXoNF>+UD82*6)d?86h>QUB|<9BM+%~tO~2c2?n5Cf_~#%|v&=F^lIlvGUWSpou5 zjLEnvZVw?PYaJS`n<}K}s_O3M&exVxznC^L^eH6^wsrL}+)h`s-F9z`owe=R^5phv z-8A-Yy@^rVc5=PhIl`}h{gbzEUje{oyX%KwChd?y5M54Z`@SCp1fV8Dk(1@gX1m)Q z;=|RHKq^9Tgb11>Q3)Rg#DdN}%v)=WVHQv@l8|h7?1w(a$m%pDu^C+~nK@Vgl;Isw z$Xmm-;E*OGZJPNQ*4&v%{-gi_fyf|<_Z2dnpP#+Gyoxc#5OJzxsjB?5kDd|Wm!E%r zxY?=50&rGO?}lM_*k51WJb(V`{B-v7pZxguhVbP4{O3P?`J=!5;ZJ}5qU$Y?saYzf ziPExakHc{QnS%Lfe)`d)_YRWY`;{lBj~+~bmd4MWo0+OMPWpb8JLjOP9IyrPbyK$!-&A4^dc(!ZT{#IL7(#l_vm+i|Bjqe_&@2kzC{I#(UGPFYDz8rkXLYjdh@TBsOqh zNs$Bz1?0HjyjsrZ>ZW9;gRIrm_42`FdA7Oz^3Cg4kDqOsXt+x**Q`8ju1l_TZ zVv)d`5K|&Eq_95jCT%@+)L5gem9;f$;s}W`sX?s0*CgH;?`!8R5^j&jT|WpSl4X{H z?TZ2sq975l0xB6S9BHv+$aX-b%ohYKvyk`Keugyi4I)&q8c|K|E94havFct`)!hl)Rg8)FKjRXKF8-2^DELdSh)O(4yj+J08 z>a(TMJ$RpUir>>e>rR#@wz5GuGFwY5 zY6$^>oMGmeqOU5BcC*th$w@Zqy3lDeDq^|PlRJ%4qDuI&S__lLz| zI;0^C(YiXO9zhIRK==Ie##mrw0x+b?oIopsnV<637=Q}od{)nDms24NCXr-Z*N4Ml z7zR$YlIE*KIlGbpl_9(0?1%4=$DH%Wn`y@$3@0;nr9 znZf)7h!i1-SVHv9Y7Ge!4QM?85Q;Sxl8~{k@^#aoL`P!MP?0!a*`^|Iu`=i^dIyM{ zlBgmQ8KcgDv4B>U2#prCiAowU9`JY_ub&^U-VBHSkN)h(wsFR}{U)TOZ8M1>_{Psq zX49p$4gU0}KRD~>Mulr*^s~sQr``z9dXj-Fb5SFeFRqap`Dm2a9 zXka8@u53+Y6>xg8dVBfi*~McX#55?whFq|z^EM;` z8{&Ep9sv;@Aq~mmFvzK(;PZv&WGF1`y;ex!t2jvR4@xOaBaMs**|eA-uRbid(g*>K z7;LepQ~{x~Gx6QBb<)wR?!G)PnohwnKj~L6D9y{ea_2vq=!{@hIb=JkC-uH?A3Kqf z4z=t!9v8{KtK!EnFH&-!s_K30m-}3GAMD?Kt?ycia?0}Hdv14%pQ#p|Tp4C7ZQ1=R z7-JMvM3tFS*{f$knOWX#V^aV$quF=PNZCM?kXWKa)9nsqsBRkpU{!0KB&mF*49Bi( z>t#$b49udgs%~!IJ~{btx9M3SiFEzILWyP4HqNBBYF@v&L+`r&cs^T(l=>(NW-^&1 zj%3hRE;0`>I>Phw6Eynj#Y=U5*Gt#+%V~RZviQkQe`?6ytk=gNvv#t%xdj04>)Xw( zHFX$*G3dQ%C(}<~y-`bOK~)G0Ay(EV6{v|qOi_)nBd856r;~{TL{^a)d0^}Shdylf z-4HpkH~mx0!uYd>j zK|sK$U;-rX!4St-TPGngGofP3piwMK$ClJL6>!1 z106UFVF)p$SFhhLPv+K=MU){3YD^5>zK(laEtGh^l0UL65_5=(=uF zS#L?0$lL>wc_-~xZ9RxgCZ|-y*s_&P_#PA#6*Nkk&7Ck`RfNV^?`QMbbhpdv<17&o z5E%g&Ak80~o-EIAmajH%ZoB=V9zx#_2-36hWa|0i(Ra?lTS#&~TbUhZ!yV{7NUu@=c$Z+$(POcYU#Q4G_nP9cEkq^>u&H^zHV3|(;Efr9r| zI5}sEvn(;oPSWI#1Whi2YRPEU0{5o1PZ3^;N|7gG{cL}Ss{7BsnJtrZ(9 zBnci3vc!auG+5GVQrp&J?M>aNsg$fTi>P%*t>J7?jD|(Y5+D#76#$MDd+9dA?R9^9 zU0MH=&tC_{x~=+dKOA=t&sJam=%JPV?d#+IurcRtw(E`| z9_O>R?+=^J^~vG{k*rl;JBXb$C)ND)uz6K+=)0Yr%+z^An_>`E-W~kua<*7_=eR$* z*&J0^1XCjG{d978vu>s{Q_W7!KQKg*@%H9wIhnPy`Qdmsb#_)aA5WfotiO2u@~%Im zlmLMNh};kdQ0N2K#*?wf{+POWIQB!7VF(Hc42CQrX@VrNYK$eP((A~t%BW;l83BFJ z?0A^rfb1KATC_a|??(ViB*PL(<-Q4kf|(%USi?*4nwHG;YVlLlLN=5beD8ozN>{uG zhfuh1sH*pcb-v&3bvI>+Qz^*CYJF^EK($O_*^E445&n(Vm;fruqqcwCUtbuDd(r1T zUJNYvNe@KI!l=>=6m)159(^SXl&6#gjA+V9AjnCw?7JB!?H~^*;Fz!wg%v@`7ytz& z5lKT}B5SNOmb?ieFtb4dgp{N(3o$z9P!ySxa^DTplZk+$Aqy@Hy>pJx48u^@4JaOV zM+9t}Mg&fl=llJhgAPYM90HL?0f$CaMD6}~NE|u#&pvo>I38l;+Te83c+jZXDHHoX zR^HdG-|V;j5D6`ZXq3;+meXnblTY88deR40w8r{<*H!H_Bpt%g)*eMz;-e>zudnY8 z$1LwfP#Ge(L?YlVl2Ieztzkq`(v%)7>eg!Qt!5FFs1r`2b>%j@-M;G%$I3gK8%3g8 zKt(vK5Nx(z5-r9lkmbp6{~1v6y<&@MtpOF_(lwGXNfH1UXD9RNX*k{PcEF4x)*G@W zC8!!7Rg%j2M~~`BbNR(f+=3$@#IyO*0yRzRfIzh(S~jh9W-SQ~YOTI*=BuT*PFaze zSSxEll0>nT;d6&dBJ8ZqI&mJNMd-UDACKK}$g^0i93ul;RYd}3QKK-3B(%mlaB;ha zaBy8M^BPZAW_GUAiHw{1@|HBn*toDIlvG@*6wuaQ3DLO9)e?F&s3-vwSf~y7Nn>j# z&IviN4#`SP8P*^mMWe<6Sui%Q&`<(Q5&9j4P1;;-UOo@&4K!ze_!mD;sE;1M*Kgkr zLpncQ0>m#q|1|W62M;g2GuGQ`(o|J@dHI|xIK6n9kiY)%CqMktAOFjL{kw|x@sp?L zXY&W=ZMQofj@>XMAgbEwu{+j_74V=b8M4pbe-Bhx_~c^MOPYgGQ!}GTlx|Pq=*X&{ zhX58~NQ1KrFpX;`!{OQ!9(S8|Hl0mQR;#oAxVOhc*zH_%s-j|ysTY0v()S%WGh3Xc zZj-{%h`#vZ^JkxY#Cv z&}q$f4%wa)L0#yXO|}{?V)Y_x#sa-?0Z%ZSUx5@ZHB)9P#p5H(ZZcb$wdQz#!vN;z zVSWWFdsPWRb8n$sYN&V4TLq`T2V;;%geNB{f#P9_fPgGx-Hru!FM(4bv6#`^yDCdBv@D7t=BAZ-b^M{z3w^|JnTanf<<|7c5;1lz20}m*4^=Ne*R#6 z*bz0xHJkm72z!eX<@EII*oC+2gAipKtHc@wyU2uK3|b^d;0+{*2EkZb)KzVa1yw`_ z@dhG+MKOq7KOFjS?EAL1&g3h`u_Odi5Hx07R5}W@#>mepjRV6)|H;BAN}vQbYqZ7{ z4N3-GMyPHSPPA{)+0c_5<<$T%RCDvG%QfjA@yQ|IhXP=*3JZOA9?6=F+0^{DMfe=yx5$mhz zu-`SyDF8Uz%qAy1$Z$MtZm#Bwle@dy?Phy&x@^3Cc(S9t{NFoqb-5+~H2GOdb=Au5A{DOdpLaH(f5djbs$BEk<2D8)>#)|bT zu*bbKPyW530+Ui+f&$q^ab$E$1{n+e>|{glSk>b2Kiaa4MSOlbiU<)E3!m|KSYmJX z0V;?R=fyP&Fd|(5Xp4?t1oIAr5ttWIB+5!0021bmPbuG1fvE3PM~K-0B5R~Z4xk_| zTv%V#acbZm{6QQ62g*c3X0$Wl1LL+4EN~D|84!T+)EB+VS4_D95jk%GU_mlSn3Aff zQBnYFj7m{8r4$dl!*nu5LIp9#A!3Swg+awTFRcABw7$jc2d=;p5wRco7~rry=A}V~ z*ccqTjztBvt?HCg7xIQu7EV<)5iv;Eg%D#uzc^p7ZxGF~PscvR7~Xq&-X9O2J%63l zi|TT^u!t$9(+8_B-`<26+olGEM~@#|zrDM>-2+id0EmehK?hZeoK1!+YaF7tXboAE z+S|5vj*J0SP)npN-cU4JSv$nI-F0WvHiaPGqZMT(QG-CB$_(QufCw8?K8=wv%KohP z=pSS$g@TY7dpp8Bj6x3UNfm_;hhFozfk@WX`P!wFj5kQ&YomsI^Y^lpBJEICjb}SkJ?&T7Mm_V38m7HZ3C1bqH8U$ml zBT{da7%e!X=m;DcSLc`@F^aG;R*hjL1eX#TLSqad8Woi!!-n_Q!{zhc+gB4)UEOZ} zI~Xf1`i_0tElvsv}|m!A#F&%W`|tiirN?sl8grym|70J|5LZ|cSD^z6aE z`OV+_lfV4pdw=Ke|Jfh?&aAd!zrWdctFJ!@DOScWSIfou3MBwAkbosev{~=}{4aj; z^vQdP_4duxC%qyKoPm?srEFPM*yiB=iKG1FXwG@vYOo8 zY(jtCBq!Ysv=`Jg0H&SJ*RS8UQ!hfz;sF!M?b=y$^Y+bbe%{ve{q|;kb#r>Qm^96^ z$LB2J=C~h3G)7Z-V-*qmKJE`gucBl!RxT`@SXqFKD6pyu2TrxMoP-gC2#TKoWE3<@ zV_1L)Av+5YWu05bu&F>QndT)x8o_taytr`em31s<3;NIRgrtP1xgWs7u9gyC0HNU9 z8DA-Xcl?XFlGd_yQoce+yt4wm?=$kcF3RIvSS%Cr(@DQF6_8Dfa&@kX%w>ylPK_dv zSDJ|cGnDyJ5fNn;7ELLCtw}@_%GTL&T~|)cUp0(?f*&D_CnVFrR1{TZ{~4AUxiT0q zS`8rPl}bc;k}^*`0U)8V##v*X_10KJ){?Pgvb5A7k|HWp)*tqJYpu1`ISZJX5iQM) zqPKp(*`yGi@s7MDXTbD_E+t`w&31b@98Q)C0ySV#NU9JyvT{}X{eCZ^m9GrweA3p= zW!XH7wsi~2H@8To8JB1OGJ2O(BS6ox>*Br-#~6<>MUkkinEh`!UnCZd1@dG)kwGLh+wH#V<1jEM z&U^yLq)AW|Rq_T%P!;7YILP0YiLZO5Zw5&fARC5=fC!F4Fi~Yya<*!!cG6H;XOi;v zF$E=KgaB)AX6^jZ>HNuQeKNHZM-`G2v`j|OLXxDUtcf`ZC*_FD6_R0=e!!G4vBpTE zBodB??zj)3L&C%15Mvtp(04Jhq@;wF$RV1TIEftkZa*A$!(pEeyL8yb{hkk8iB1S+ zo~PD5nEFRcnOFu^g9>a>L83AXb3OwKBEl-!89WI97z@tlf5lp{j+(}_Q`64;d{r%0 z(6r)fCJV+Ql?alReTg9V5vT?gECR-!`mO11!}W{1FMei2gyjCkZ~XpGUIa4Mg6Ob6 zu8${8EB*fZ?VH7{oh@4d?S|v)%NtWo_Feq!#fwir{rr31`xOcN&;IGZdG_?fPe1+n zdVTobFMnIZaWSny({8;HPJMT%D|bBZ3)sB7>yPU`9=FH+2jBQ`f7nfDbE;>W5Xpif zhs0gS`<)_J)AqRAS`D$=8&`vAd$ly3)~$=(emM5ilT&rN*Rm5>*>z?{W z%!|2lbqiUWo~!;`_@6s^%c)>DRK2Xua*jfMpA=(WHiqA+uiJovzLT$I4kGBD6z{qE-~UeuyzT=S9Ri2g2k@ zMVhAa&L0kk*oWD2Zh$9kWr#&$h|&-H^W|c{yK8(s#JG=LL_T@2x>|4EZaTKLfB>?0 zbhq7AetLU--MY$Jx_G$S?6$8jcj!UMhLCbr%;f?rsI^IzIiLQCX*b?Ca?plZ%9gO2Q`uCF^XZC`nL7Q?lMB=1l3cs&Q5c4PisG=o*h>nf{Ib?oH94mU zDk2!IF$#ndjW=YJQu0QXU{6MKYsB>V0iHG@xJel2v_^#`(OX$^!(zV z{P+LNAf3%lfBW};|5tzM+(BG#j~8FRxVzgtfARMH_nuZ&Eh^TV5Tiv&n|0cE8&wc@ z{_x>{_>JHGaCrgE3^ZAXhKvOEs(jqwv^71yvvq^};p_P1x@bv!)8iUX+^Y z>DBAc=KFm!-2?Xo3}BODGnpLsSMBVCd8nH?&YAn|Vlmw9wu|$#*0a@Vy6Hsd0dLdxr96bkOR1miY6!%1!a$o9hv3wi_s|#6)-0mxL+6GBbBQcXAj)SWWBcbg|j(UtjOUfU&M*Uj-PAY(=#| z_F#gf$lz3>Xk*C*2u@NIAwvY9Y7iN~Ay$OR+5OP3*PDygJSOIpgcE{cPKb)eXKM*X zqoD6#y|RVjUI3x0DguTK8bts}oV&97rYdtPL}ZN$P#LNhHe~a~PlO_jgybC{8e?7U z!?a3=!>~Q*5I7_bf&i+C5M#<#rV7SE5=Rz+J=)ZD%9>)Rsyc~8)FhB35_tGGPXujp>?914O4TcfYK;8!YJZv~8zuzh9r7Oh0(<{oCywfHOpA=a1fQ z4%>*+<>~+GfBpAgzkczX|L*tx>aTq7-~G-XpU&KW{ty4&&Gids>*e{AA3y(NB(s<- zTh~;^Rcfb4H!=^(4L__2_^xKYFCB*omnl__-1hu zMXWUvL=(7rj#PKY<9u50c2|Bsxp?-`=Js-XcjuM|n>yiOOd|lzd>*>X)Nfr=3wSp< zZx;u@-D=vreEG$*XCI!NK6vvcUR~{*dN!?^XXh7-e!6HEFVyajhZthy6cckE@Izt+ zRxSZ82Wk~k03a63(1>W+@>`JYQBOluG%Aqi>hWGQfA0~2_tk&KLNorJ5t|&&RFx1% z|7c__O3=JNMeVPyc@Cf;<#~_^M@k@1d6j&llF|H;Z60+uq{ZSCQALqR1hdAc z?Ak(5G4w!f&j^3kdqOPQ=7B5M&;nUgVw z2ybp~m&=8S8fVE`!Kn#HpV@$j4VYNC{Rp&i3b4;q3l5Ds8A!3LL01t;l+t$-* zy}mnShY4%U-Q7BdxR}=a{cU}|G=O9cAcd}9OlE!8lfk5+A3Be6v6$Yxx!xb6sv!Z6 zeZ8m#;X~v^rkzbLE>2#2`SyB$wAMsMU;toFj6^KO{ZB*^r^=AA#EDo$MMc#b0yK#w z46!mqF0T<%V~A4nL<|5;v)Sx6yTf8uN9K@{BV-k2C1X$kITu5qlx@7TdDQq1j=6|X zK5y=>4R)8ps2B~$X!gf|^m{z;vus_B_&xe5` zYh;cIMNyQ10tbf_IVJ|iNCK$B$|#{roIvfw8zMF_aq#32RINB`6k&kqTkog7X$(}R znoY1GvuNz3P8C&)IyHtWt7vR9;iO84MmY@_O#)I>C7XAnNf;pZ5{@9zS!5w&opF_t z6ELDm1Zb_qgvOex0Y;Eu2tZjS2q#G?6M6!OXsmIXF;5U>)_$j(o9^;;Sg&j2*yz9i z{-6HjO$S!L@$vin>z5x)mS;}S=QHd(9lG;}XQrmRm&Yjj;NitE44`DeeDi}3yStm; z`@=u5?d&$_?Q!_8{&)Y&?d7XgttXR{;n08f*%uEUo=qmz>FELlRS3NSh@Q?n>zxcrq-h2Q2c-&xP!|lzyst=ouB|!z0s9jh2nOoZHH!sglovNbo;998a z_VlEGy&Lws-Sy?{?9sGYtv6q`+pX)57=uzWb)}(Ea>9pxd)1zPSIns98*=!^zc~*I&MQ_3ZKcC}NV#O!fYQCwIG@2qO;GRZ)sV8X<7jWo4eTl+y@E zkvXuYdw68Lq6_mihEY*uy^r)*G!calD`1AgsDi;#6cutZ{|`!Gp8Iw!hKm5>_JLgB zmbiZ(exdj@pkfh>Y8l$SU)7;yD(zi42;(%bu^DUyLCp?t<7TZ~h5!Os5Y#-pNg2(B z5><4p@iR9L1&zwJXI^|iYWYXvSc}B7{6KZwo&d!addyV0{>w*5<7v82V2A*ql>KjW zpMTE-BLZr6orm$%j?teGNw_#$=kYfH6c8e076LQ|g@FiT42hGkD*z-zqHL_OmY9VI zyWNr0Oxo6z93#fmks|>~0u}1}bUX%Ys=nW^Rx4xh*dIcK6l2p&w(BhsiVkP1S?y>D zMR0E*^5a-L%SRu~}M0r^f0tt}^BNBuxX+q_}SO**c0g=dBa8A&G zh%q230y2gp9@o3q&t-k-G%EQ&`O!~*>rel(Q>t4tb@uXh_t9UuxbR}7^OoDzHm!eq z^?KlNv)+98t&g9-cqL7W-CYrlTiCFAD*{&)W1ul@1|c)UDzyAM8juX48U zj@#|};iE+}wS9kR>XR6SVv;x4yW8ESJ2*E%G(Y~~Pd@qh(P2=SRCiZz<8A}c&z5se z!3d|rzMi#EO#>@v5=rFk?DTY7No4k zgpnx55~&OLt>vZ4e@jdpN2)R&lYv&e&jdP_-KEkU3$Su4VHObqLXq&~tYFHJ+WkO4 z0dGX_I{?7!GF~>bNZDqPL$yI-(J?Z=%RB}4%CUY_{*;7f5CBn1*n%ES*l(uoOp*Xd9ELCq56&NHQW7voj(iO4eKj10-Dbzp9S?`yad&ajc3r9* zA;4;W`sV7Uu9}!wjj5fjD>_?ELw}f5uG<|B+ug;($J^~55szIkwgSSl)#`SA=m#VV z)?kX!c~@2S-FmxRPUfrm%a<<>eG)_kVg^99mU6F(K;tG`Ks1S^>w|B+b3`Z(Detah z)ub3iS(LEsdKVFsg8mUKMsZ{A)tb=?QK8yyWA>u>IEzV`kH!8)7dxY?Ya zE`Rs8e(;OG{EK(jH`d5zyL0LdoX87NlfkQ#rA5bobET5)Ak&j)$HPH>)RKr z_V)G7+pAaj^u3SZ{DkBGpQ``rwk%1`13?q+TODX;wP*nf{Hioje*wg|90T6-!s!%ADuGE>KuR3xIcQ>GVfj*zsCnw%^sGRP(}$_;gHU^DoW$y}UpB++RJSFXycJcT#w&D(Fk4PYS*&M1jPKvO8I z^03)6m4|(|-yialf_dFA(%t=?sl^zIde#jCIWFpwIXO>vx7&;5Tes^?Q#X0nXbf=( z$rF_&#TY+-{u-R~MIDC#DuwsJxLln?;k&za*9S2(VqyXW6$Rv^lH*AxkbMkSie_C3 zNZ#dSelK`1s3h1eh-xO~OCm%lstTrH#98gG&a;4R5Bmg| zFT=^nbYvUHE`Mss4HSVVVlvmGd=^Zeuj6EZaGVd0WT+mIk8xHQjk<<7f87rrKyG?h=z-^Gsh?* zVB)+t6-395MUxqC1`g&`((1+Ysg0_IO|O#I7H zf*Cc=$6HXKWB_SidWVD{Xa-q@z?=}HK~3z@G+9WdCIwN$;7gaTu73Ed&(1Hl7bmk> z=L_!;U5e2mA#u)#13(qY^jN-(zzh*Yv&A}(xEW7G$k$n(Lsha_MyIxd*dY?3lVp-q z6$nSrFCY22AdZa!b3Q+m3<($!0Xffnst91Bmc(Mpn`%ha;iyt&BJUXx1QN0-Y7#RM z2WSc@t8PRI(6X7%%zIaQpG_yG(6=2Cy0SD)8aiY~An-9pPz7|}p`k=F6&2>(ppY>u z5)lIsO;V^FZi|+(Bil?1@2t~NT1c3CAs`+nU{D)&a`$rg{Ht1o752N~5B}tbKmFBL z-q)4I58it_^t%}C{A3v=aLM5K?9qeegR8TP2kY&Ab+%%5Uw--dd*A-+-!xvvYJv~4D#<#xRcDJVwE|Vr>8Z^#lvt56v znoUwjLAE<#+*xtLjZFWo(-+ps&>ysj_aKH0wx;c)2U zX0sEiE-s%82f1Ib6ULIcESlwVzHf(BUAW?G*A>@**Sl_?WC%l+lZ>WPLjvylpqO_u zjQX;X%*sL>%)0@m9`+5;d4xd_$fhbj5~&DZW5-5z631FR{&5-+5WsTos7=}-o3NQt zIyY*lr(n=gj059Cjr#perqYzbh@(n%LX{@4X+mx?Adk~Cp{b-#8%ReN*Z7C2Tc2L} zo40BD@yB;&A{VCLX&x2Ee>HyT&9@%krtvYevjRt$DH|3hblIl!c53RUtca1W8R>v= zAR;8@(Lz8-`Sd14&S^r(cQ?15T|VJ+UaMm!L2U$57p?+I-QM){9lk?=DggIY}iI^Od<1A2}evQXJ(bvNN-rRLZO*BXr z$~nfuVd-gB(yZipjq?iU6)sAxeJU_H2q?j%6B&|5)dV)A7)@eIYN}>Hq<|qwR1GPG zl!hU!*V`0C1yoc;&@lEvQ({Du#M$4e3Tkl(no`csK=xeLu4>SiNDR|ujlcv<=n;s? zO@fYurW!-86o8N*e?UBnOelz#@ z*2M){sApB1!s7gFaeg)^hUjnZwvyn%)r0es`In!4^k4tWf4N_`pM3fm8hz_qA3po) z)5=rh*`uDHo+;30Up!r|&ZsDN`);${bX^NX&0-0T*W1mvzW3erdSgtVef8Dq>NII| zj21%We93MY0wG##F{ZMpEr}+Rgb<*1wRHQ?9dNO%PA`><#bVKry!zyq^=u(QFzyc5 zUx6Id5|9Hw^UW$LP+IHmv%+row@<+ri}Md=i^q$_S?s#q_U5o(UtU}lzUqc>d%wQj ztT(&0Ve;Ni7xQ;1$IEk7f7%?&-fsS+| zj^h0(lP1?4oQSZgxt_>^5sychG?qC29gNg51d*)Z7!x&BbSm!S1TaBE zV|mu;ug0@0vj-DbZpVKBZ+<_ryEvji8JfzEn&%)InE)8y-(!;9^hP7mgreb_wLK#m zk@ZE{{f2qp9y5b!RTM+$ce~wkwO}SDC<+e*ZF@NE4^>r_Wtn1>BuLE80|1aYhl~=3 z)E@f6mzp$m!86aBW;vh5)c3=F2z%!&rD$OJL@9tErA0GSk-}B$+ugicc71T(8G^EZ zcDH-onG~}_8V*BS)qM5n!ToOc^u;wGnMm#$L@lNurij3t4?%N?V+JgaEeK6hY%tsh z>7pbr?D(U@$(g)fStK;)P- ziXd|kIJ6-tg+yIY-~!MqFc+1t8s{tarRIwsjM%ZSz*St%sAzz_B8`ECBNhv0CI(`r zU_=O}CIZ={N7FJj0MbOm9`CRFr(edq+mec|lYjRI|Ks2M_aEISC{E9Bwp%3V4*Sr_ zqtn%stIOCPO3%#z{Xhc0qC9_au|8}*|LV(MfA-P&!^@w4{Ht$#`&)I@JbL`(^{c}# zK6_28(*gLWfBlotwX>pJRTY`cn%RrjFR{SyeE*wIUwl!`>#`}DKFa#tw zzk1Xu)yw7C+3c`;1tHbWTf5ucytZ~bV!rIk=Hs;J(7>(RI0e(Sx|l9Fnwe6_j1BZI;_F1#@$Qv}1r|b ztuH8~wy28r-6lvpoi9Q>IET*jp-Znfoluz!hIZ3b_RiZ+Za0VLHyd;gi4nk52$|3b zSu`3L0E2;wAaE|M1b|s$JiY~_2GQgYlBojunMYo(cvAaYW6m z-rXF2@$}_sRaB*|sEjdqU*vQIF+gTBFmzxjs%ESz2vZ;?0+?hKP-bcJ%$@PU@p2s< ze};%jlFPFjBW8nTPL`#t%N794>==xXkjFdBCxnbySVo2kK(lcs(+~(rgpfcDM99WT zoyjFh4sx2QSFC_h8Y&fQN0mejV@b3h*iU|MUlc z{73)eC)?hrB(Cf2Ag43hZr96sd3mCWtDq;#=K8bGPUa_O8wsh~-q)){V4~pW=ja-?VYfY;yMkdK z_phy4)bqs^J~_0zw7u_RcXRi$tjez2pPgMu9AfD6g?6)9@1MP>>-y|$Rae!!46qmYI{PRG?ne9t2qm}pLqFq0UD`EpL44UCwF zZrE-%tJO(a7Tz%tG5Zh#XsA4a37EF+A*CdS$Ud3f+}=fzM0)yQ6{PKB&s;Xm%8Wxy zCIG61_o@m6Q35(V3>|yUlSJjX^!4GOVL(spB+k6O_x3{pd;0p?s3a;FDL44u^Yp3_ z5kN=6Ceu)+hEq6_9cMH|CF<3-QFcSUAM;StjNfazuG82_#qLPxTsH$i>j*iEUMnz2XJgREb6bYsxrb_vv9oq_1 z&4V|QV`4&toa-?vfB_JZ-MBLy2@F&;9Wy9$yhwKQClv)vDx#AAlah#9G8AMXr|bot zQ4xfKaxd5mCUDU>0i!RJ7<`e)VOjC4#-{emWwAV=`4Z=+ZgpOqoL8q8TGYmc0cG{6 zilrz3+0(#aS4Jr~u?7q-9BGOLz0?^NXt!a_H(JA#Aq?Lwfg}4@is0 zkKVbxzIQc|#~5V4AAa`h&&%1&Ed2ey|NY|^)#b%1bldO##y1d5 zkwFO!y~F$4*QaM^pM3hs>g4SF?5eKkUw;1T{QTkOZrg>S9Xg5Ou->?`EaxY{UL}|f zL3?0xWcRl(p()g0Xge?{mnXYHyO=H>Jl@~k%*txEx~v!T&Fg2_ZhcNYg~An!2!*&= z8^lO7sl6)z3uLw&VF-lC`5MdvW8OkVIZ5W&m5wcF#&TdB z3vl9(k8s4~U562q7!QL{B#Ov?tHk~7OgVtOMLu)QW7WfB)f^jLn+&WY$_fCedR%{w zlXwR+kTfE)Fj33^j(~IVR@1Eg$#=9)IM0!*&5v#)P?O!f7ivdTg3qKrW!((oKAr;Ml-9b z3Jlo0n9}Xd?d8=~S^3Zp&M~2_mWvQ0fDBzv1w6WA4~hDa9=&R#$L zYB8VpVL--RyL03;#e)5;E*r+XyY1@YOf;EkRnBs$UCb7@+rwdqzVKma7nOVX=n)sq zkAC@yy1Md?L77HSPc#{rXkcOlB;u5eh-8Aq5E-Tk)FxL()&I6o^KvMdMI#6TfvI@%sz&S>T>f;kAun%T zHn67UE;E#X&gMay7{;-%`YzCf+a=Gvn-hOnss6CynR%a_s z{UBn7w>LN6{@(Y^^owVozx(00U*Ft4eCNS0e(|fSUc$CbQ61ZAQT@(uecO}$>=(Z} zTb|T)eZIQ5I62vEuJ7*E$i4mU`$-Le_V>5-e7?Ke-dx}O)BofD_?sX9ysTF9lZURH z-@U#$UoB^)8}_@GUwyHEFL_3SJVd6 z-F~;-?y90*ELPs!&F#zmZp*b>%;#5^D%<<*zU%h;Vd&?soHupWZ+XwFrE3<;5M&to zp&KHqphEVa(zuffr#LbqAVenQ{3SFdq>Q^}c_`+*pIrT?ApQ(WjOPJNAh(TeE&u^2 z0qPVjJz*mw%P=*2`R_AwL^+On#7)Mb`WVa%$7DK%4EWOo59EWy#xEme z$o8&Lhm)5csEuiiX2)9KHGEw*bHCn`k#FI^z1i3XhI@15mS@GITRv952}`oj`JciF-{^}1W-x& zQvtAPn=_2-7>trJ z0t7%}u&kfsZ0XF9E&zDvoFgz`gu*)mW5Uuq@4SJrBUcsRn0(>u3LPpi`NF&hB43yw zL_=42EIcFVkR&A}H187&yc{`4sPOS^z2iV{_5ew zT|1o2PdB$ah4#S*Z@u;Yw-c6kyWQo*)%v=B>+Sb{@a3O8e(PcEIs}eP%LiA$iQ<8R zE0$-&Zab^E+iVuAOL7G`f*4RE6_vC@$rKLz!qvW+V>)oTJb(OX^YQKW&=$VjgrV;S zR~8g@hnp|TC*Ov0Cc*jUOay9&VZU3Rt?Ifw>|QtX)qV#jC#$pbi$Qu#DM;M!+rw`A z;Pm0FY})-!Q!4A~Y&jD$RSIBakdSsk4}G`ohA7I;msL>}#bPnH+59kcZ5%>O`BKGW zFahC64O5ob1Lvy?M!tr53hbZyS{n^aBQS$FD)+~smhw7VAO#!kXE`7mBtx2*7gyqO zRLKG{wflTK3;$+02{|+AfKpNR7)col?VL^Ng6d(HXWsz z1i%a{nL_64N2|sd|G*+*21MkL3_y|+q4$2b+h!#@0;yWp^)aT!d~T@3(D#Aa0je)t z-?j+o9f>BS5|Nh8+3VeA=sLtQgeWmG!sXd~X!nF5Do6ksogfg2m9NZVHyoO}R*hlk zO*BbR0ADn(Z+BvjJrzz*E>9f0Pd~KY6{|Dho0I z3}_#}xO;SZTD^M(s)bh|V?u}tLAl7bKDKd*9!idNmfT+-`=IfOh$Kd*qkD}2$w)!Q z`wT`=D`n3L5rL^Gj7(dOOM(2tjhJ&bujXNg2mshIAgbh$ECR&5p&pS5iGhe*=04bQ z-guF(y=+t_?+_t_hl*riOl(*Jk|HZIdr#t*KSIB_ zzTa*(Ejn8+xvuLdXl5Ys;^b0>HnjW2qP~9ds;bWi)2{1(=YRU$FTeWyo%i24Ih#Lv za(;Jn^ZI`M&;K9)=d1hS)qOu(&dJi-56@ka^V501g|-X7{`DvCJbCZ4Uw`%Cciw5{ z3tXKI?cj=qDlO)#pa1Y@fA&BA`tSdvzxy{o{^{@j=GBX5kKX(4-RVMj*^vX)~Bi zXvg+Hp8-6!|1dGg2%wXs6vjYMm@?R4s+>4Npa_7FlbnqZbX>`$1TacJ$H>Kul4oU& zO;DndW}Iehg3U(LfT$AE-!eqOB%vE2C_;lAitKW4nWqQDNmvayHU2n)3V=tY(2+vO z9p0qEMCKzLGxqy78lZD83#Mbf-SllEQuKxw#yl|F#Ksr`5m*L}ah%&NKk|`w036*A z28syCnDbJJ$a|-v!_b8=ESiN?o;;%a^ zIGFqtd15rgsFsp-3fq_6C$D>r<_QZ1O9luDAR@Gei5wAvK~lT#^@krleQ znm2Y2s>VF|XveeOj#sT3Ff$Mlah3@p%V1-vk|fA{|5)03u3-5Novd3k$x_x(V$%ZFV^v&G3-3Q@bDgF@(r9ijJJlKO7jy3&<(G4sCT{t#m?DH(*^je7@Z$xKy} z=^)CEOY(*HRavTON-0Sam0VsX+44~l1Oz&A0!OlQWQM2uHkHj$7c)|2iU?>r{${FR zgfRK)K@DMo$}{ttA5%wtDauiH$%vYlYh?+BjZ81#*YrDC_CG4NAy?qs`WPURqshi) zQwt_pI|_#57Mjc{k2)heR(~8({>+$9hvDe@9H*pXJRDAl5Ex`Ff9}S=z6E#$lE!%i zjxli=?$04gYB}k5q#cgaj{zX4=B8N3kBv~0^j+V!ho-3zF^LW#%;xju$o}8_A zH!tg=aEPJrMZlL;H>CBhBh-R{3N(?@pS-x?geU@RgvushVnC)Mq|Aj=01-iQW4Z~syr)7D4GW|Kfnm(mg0UEkJ{(0=iwWX@ z((7;_8%%=tU`ce}>d;a@q^`exdG~w&^^gDZZ+?A%@=on==!QNm=Y>VNy1e|$pM4UQ zV(eE}GlH(F;PI0O>+7dY(=^Sj-GB1V`|tn75B}tPzx%tlhr18I^PQjms6`eK#N%;K0J# zn>J0YT1V0M-g)$szxnHTpL}4Q7}9KY0_--s`_t3ML~5xIhc$9j)$^1@b9o|jMBb@( z8*wGgP7TSOK6voqZ#=zyX_n9gV_Ls@Sp&w!zhJGMZM5xMIO+_LMY3_?qS3`(>5GP&?)%9nWz6h%OSyz6PwTTU-y#8om` zJMy$y+mj!9+U25C`5%?DqrHCw$VQR`j1d_}kjK$dIaieI3s)4x9*L1X03$kf-nqhg@5_?eAu=)N zutP>J%d#lT+|@BT=Y3w@3xGtLvR0T6)W_CaO0p&^C3oS}iwi z->bd2TR(W~!P)uhvtNIFdHLY^vzy!d+x>QHlIBhQ;Oeqzn(LRZ=XD*rc=q7Y*$3|# z^6h@JdUy$(+Pmvt{rsoCX=cmC```NThd=z${r&dp?9vOi_nUd$9Jbr@v-6y1Lypa& zV?b9RIagN3WGJc$B~s|y?VW_)6qEV#>Ye)R3W_qNxH_HR-#_O9#nPc)clR%W!GNGx z`Fa&nf7o1Cg$tpJL${dKPrv*OMEgU}=uTH>=ci}2FWE3f0R&e#=Sw7a*zQu-p3Y~h zMN>Lg5c|SUdNTw7CITXI4wwO1k`5sb-JnT4@w{$kRaF;7$?S<7qC@nQJBE=MU;?Hi zZ=6+$M^FgHgtZag7*B%Sy%6U4Jhyja6Gda|oAol;+>+6!=?5Vmzn{Jan!vaWAfFjo z~72!IfB zt!K_kP?&-=0H-wo2vdL&;n&Bg@fg7IILb_dO6F-O3j{~J32hQrfq^ASecy*+AfmD= zeNmX9B<%ZPHk%VsKlFL!L5Kr8%2_~ano`VfkYfVCt_z2@-|bspl>5Ulq@)nve*0p% zs*1YmLOk>#sr0c|0A^1HXNzSTf_GFmWeUSE^oYPjF~)}%7k#%oU(FGsFPi7q>vfxu z;PUhYBn=@DF%xFDKt&~)CY7WqiX_uy)6%hNErX5-;n&t-VAfn96MJ?ZkO3W{ zBl6x?Oy2v-xx)L>`;yp`^GJ@!5jn>Np(Ah%&XH$usN~2wc8JHoBFrHwDGDMpJ223s zF+`0?Lv(-)YAIR@8V1mmLZ6X%85d_db_GsRZW$^7pe)=~6V!mQ#k7ZhW5d=`OV)$- z(CxeXI~ay^7{2`EtN-7>{0|>}`dq0x#I$MquGgY~^RpF`m1X(-`ZgjG>EnyD(&XxN zd3An%cYoI`7Rdh9%U6%ye*ep7FMi`Ue(TduK7Q-12S5I+|8aSF^6<&Szxwfy1%x~U z+iFp~|IWh)m&@6#D5?rTtCIe2zx`WxclYKPPGYYMlouB)mUx?#0C4JjVF9edmF@5w~_HVTx@N<-}09ZKJ$oic=rFhVt{`Gn z6OojXnL6hx@5{m$jvb*RWJE{k91-JG`hf_TbMaJ~y4Yibo9oi3gU3V+Mi2c6M&Za27Uv>A|zab(GWQe>;Ielc{NB4@PD^!*Kbn zvQd--W~wqROH)-bvlv3(wanDaXWn~JOEH-$I|6`a-egA3KuxuD9wd>esf1ww0LPBN znPNAjwu>>@yjc+P?d@i}+f@ZUd3+xG{i1I6>uo_;Fh~qcD3V-(i{<=qXc3VZxxg^Q z(2E#Fu|vDP-ya@5ex%4L(PrOU!n66JE{e@|$LtBwF@g#L5Re&|DdgRZYK#m3*&$+P zA_9a4AUf(~^1EjyIC#75-~IT!ZR}=x~v^G zI7+p%oe*bFV9r4%{|J(`#Vv}}#eC7=J6!E-uKRh0oSB}fs{ z!TOyJ2ZsWXB=#}(X0hF@fBx4${%`;FkDfle?IItdZ8rx}t;w!d3q-iz#39IL+eVQG z56))Zq-`HxoL8QoKKtVFJCAPfUslWF?*6W*nxdS?o=&TkcJcD$qG^_i{kz}(MucI% zJHQYgpRX!{et$?oxyPjSt>~E^P1aA6{OnNq@UOJAKgY;;?U@JiPkR zU;cGb%)rc5z%?zJrlgw4lSfsx6e?fH&~=A35ghv9=ck^DVK_al`ontI->1X9w42!P zeN`xz_~30=T!5=YhUKchzj=+4IK;TWfqq@EGlBW}W7nLbYa}HCUCipu=62SU+wH3` z+;@kyMD>1FHK+A#RW)-%BG1482<%)ln@fleu`Wu->*VHWm9&TJ7Aiy`w2~O~SMtuZyG~Wgs;-+F31WoyJweX_#agR7I<`?YANs=6woMv2TI+_GPz_LGC zO$AN!%$EhCNRVOr5l2U4Vj@Oh0A%*ej-6-ck(f9)`?>3I%$|tzw&w}69oaTjHPOtk zQ2t5Bf}fJ42&O90B;}dfR6q??O;Q>L83vHVpbSbDHFOe((U8ED`;h!XjyJ`45NZMm zAT7kLgpH(DQ*SDa04emF-Jku@pZwl`{%@~twu5;nDm6|C>Vhs$XNdItX1(dGYG%GD zW)(g@TUd9unwKecH`mu_K&Us@x3l^DmmmH5@BV||{rIbo-~ZOP+xCEnuB!j?M?bk< zZ-4pAFS`9uJ8lSz6jOI78+!Ns4}DSnC6-K{mIFU31R5Aw|Bd3_wmPH z{Jr1(-QD)^)#p!bNOfJ5b=uuMcix^p|K#jqb-%g2IKN!2E+pV;b*8AI=9@+lR0M`z z`!*gr2GQ8EwGg(H22G~EdGO8Og7Q?+&E_+SF|_-V0Q+6sUXwFpM<-WMpGWW^bSboi za(QtXhSoXUZmvVvhoP^UnJ*h4H(#vIPA*TDXJt{cb9K|4oSYFkP?$G$Rru1g$d?5GWT7Xa70hHNQFgl>GP)vn(l!iT z7SQhxt*EVcYi6#?k_baL9Jc#*cL;r`iW*&Ux89~0XLbGH!Nq#B7Bg@hLFz>f*)MKL zj_0$5L~%&5?>)QN4+gZ|cel59%cf{b$3)Mb-)*+A?NZ_C>U1gMiEgJ#5|jc zs=R6ZbF7d^j_5E=A_+jUI>JQLv>uC@a*H9RBO%Vr&ap3u9S~=TpPW;hretPUc=k@s z$T{|IVr6rWhpK9(qB>gG3}h785}HYfc_Pt}ED8!}6ixskVv?M8X)%x~NJ?QClYZ6k zcwduqa@6+C?VN+4Y}Rn;F>1-<+DA)xm0aC*K9DV$zh-rpbc{`A}Z=KSrePoIBv z{?@~w`q8Ifww-lp>-X?r!`0`-`)a6x()x>nKRSO~cMPQdjt^x6a=A(9c$f-LRM~4(oeQ z6b|jM-`mieAyiA(oB|dp^eE9WFPA4wh368Gx~|(F_WS*g5G`ry`-*%`g=Zf^O39LH zRER+;c1^*I>T@fRTWQM+KiISPV{?zpE(c0FX2(nbmJAKb!dHd^}98tz0)Btnhhe$=%+Y-hWaAsp&}Q<)@Wt=!tX~VJCbu0!@n@EsLvZ)XV}i zFj-#4kO>V0W5itDa#lKz5lG{3G$Gg{7(_&1kVHaENfIEcNzw?U?7@}J6_h^!Kpcjc z5uzA^i6Hno)5V=j0+_$Trp%Z#VYSpyISqe1A^F8wX)+VYk~|TKY0}J%2-0*}faTd4 z$VQdes0ILF07PbVj+{d#%PIwC1yD6)rqLWQT20g-Nv5@9QppXFNRlKp3aBIwL+pD= zDIB_??JOixV};ZYIs^;BQgZnnX3Fl9i6KKILsZjfsmC-xXff@fyVc=dW$&C3Q)>H9 ze)x-j{r~y5AN~BR{lSEqPB{|oyKcR0y`!S44nqva#+St|^WE=$<78E- z3|%+$Lw|ey>h$zvb6Bf6;~DbIRSS;{hpm_J^S}Ag)q~5cw;#WFdDHgc;_6~`x?C>L zy=%&H##o|aGoN*#r^O75Vo2hA?aR5V7kwYF@T_wC{ONMp)U&GH?WG?anzJG7@2RMu zI?=^db^f@IP?c`Cxfu?d5-sl6n0hb(D&{8-$d?2OEk+4MQdf11iP^W^p&$CBu`Wvn zh;3^zQc5SYdRCSx#Gwxbd(Z3;tHKw~d3N4$E*i*&49G|Xj>wUl#!2?%7?^>XT=wCk zp<^yfpHU4&%PTS%iN?~L@kW}Obe!5)r8flch~^&=q^U*5N%oQtgVA@Ed-7cC9UsjJ zc|;j|-%Ot3gsZ} zE~Igx(Gk^pb6y-%JoC#VBeZ;Smersv=S}etOsc=fC*zi=A!)4iQ2h z;}C~#FtuR_LyAFC(ikPC6jc>+9JE>n)F-|ssf@ZHl?ak7|2GAV(IlY>p`rPfb4#>OoG{Q95&4%w+5;vnm6c-{x2}o92TLFWlJfB*!QMl zYMJ*4T^G7Ow!I9&LV_e3^HVjVY`e?TmgUzAU|>l#GA5Kgr8RV~wR??WSE3<6+O@y_ zA3y$2|N8e{zgVw#aZnT@BMPw>OJX)#EsgylBqWBQ-}%M|pMCX0i7rl8%Z9)A?e}Mk zDgkW|ZQl>Cu5VAS&I}QaUcPwwjrTsddGRW=!_dXAp1s=ip-s4J?TgP}kXq&A>a-^I zT+G>5vwEgMKKty82XB2KZeFa;$W`l`yKb}j>gluhzV*%Te($@#{~!MS`|rFR+WzX| zvTgUTU%y&iUafbBi>oUU^}bXv>02ak$YDsC#u|oXNl7`h!|jV}4N{lI(6*!zEkNun zNVLYCzNgK_>BF~tSv0lp)~_*k)^5}GnhDJnyu2u8CsAbR52DdDdPk}WSrt?yNJ59eRfQ{^2US8WywB1RLPA0=2|cPMLLe|Ea>$J2h!K$;0}|!i z5DfsBocBfECPr{1WATXDPT4@YMgrwt+a}b7XmT%&3@3ffTQD{{U~rV=&{%ZG??0AJ z&dASnNK6`}yyt~z((Ftv_0;80V#=Hq1Y>x>SmtTs5GM6<{+Bq#1{qFTSDao06S#@! z9koi6XlcZ5zs_mE1n-RJL?)aKPyhu;bJK(flpR8nGAd1g&Ux=0AY_d{<}`>gYF_)H zPbm>mRaJ7Y-Q4E2Aze6)B1`H<}{0Sgw3s9)^yca}0!V zzikyc41%ac@bdLd6x_F6QL@P3+%(5AJ9aJ;lQinqb0o4G<1bK>B7k$wF@vd!I418H zrgba11y$3$F^p(rff3OFV${NM$(bcm9NB?fGINWah52A62qu8R@Y#L(_ka3V&({6A zlV}i95|MuBlEfr&h(kyr#6fb8uOfNbd5WSbNmNaeQc6jtlxYc3W3-e^l1ea%)30Qj zKoh8-DNRyq%WM5iavDUwrpI0b#+a0RS30I6O{JPQQyNlUJApY#b=(n=Z&uBl$wnyz zfHEQjS`rX|1QL=ALHYnOk|wkW7V}k_$qWKPP=kaBnp9Oi3Cfn^Egqi7yD!82a~ie; zNjBRb|F=K=kN^6=><^($Iv6CRA*MbIA*Mbll>W2VcQ5aE(a! z#aUB$IP7=*kOXM9TA}l<^!MxC!^dyUPfizSC%1Ro58nUq_U8V6v;O9Hesc(Rdb+&7 zy?y)PDTLw0(-(cvhwr?<+jUjtmy7!B{A77{;!amEKQ&-d+g{%x@mn9f|LLcnJ%9TA z^6F&h+u!`{?|?DxBy z+ePJ4zwLLokOsD7lBk>)m+$E8eDUCMxjJ2*KiF<}vD;fb>~CK}=**bv^I~@4s|6Ab zU7Lm$WGFl~vvPT|Qox}LXylz2wL{X)=s5XhN5RWj0!Ng86i;SV z;>3v`&z^CfKwv`1l?x02oMU!`01{KaMR~eq z4J?jaaI*ZJ%m5@hh8Q8CSzc!nf%L)ShcSgd4j~N8&N&ZY-g6iRLMn@L=wsjZF{ZMt z=FNi8t=Bsdy1#EzvZg7nu1?C*BNRhQg9ajVY;g!FMDMWl1%NGP^Pvl#{rY~b8oQzI zVpL0x41wA~T;Y6yB`Zi|V26BreRF<(qH!qPcwI?~JWfCgNl~Y0@=@zEs@7wQW}4+M z2wA?bo>@h5VaylPDD50s*4!jOpP+BIvtr_+@{vC-n8>RG(9pyL$<%}i4Y?Dz-rK+Z zlOI3ZrRzi7cEe%l`=RT@(Dg$K5<-eGg*a05Da90n932BWp*-V8DXK)Xq?%NNYE+35 z2T4J~AStLsk*E?)BWS`*6q^EMQK@7dm#L|aK@xBbqJt^XWt>kblRH8~S2{QB$0;Y%}sKy96s|Yjz1i&N~0fMD1#e3L( zmF_;Z{mW7ZLKtrMfAp{a^MCp`zdsBdsfb)e@_n)>7E@BBR&bl(E<)#AD`KGE`ry51 z&z}~a&sL`>{i-Rqx7VIvyV*XvdSFTFqM#(J`D#8}%x9e=_d_nnt7 zZuW=a;^Of~AAfqk-JN~#hSb)vakl2g~4tK8zP13-wFeMMG#ssE7u1~7uL)0)x3et507&KKqt7=EgM4q|yu5_++o`6t+ z5&am|OL-d`GmW(ij1f4CdYC3`P{+P(!V_r}vy6Rt9uAIOGJeh8h=!B8BhLuqci?ol z!jS|%ehJ5lH)_4J?kAhB7>R%xnSs!<(TX{HyYe>NF@S2cbLF;Q$C#ho+0*fcQ9FLG zfr05UqyK9ZvyPgdeB{ws_eXY@$G|*1el;`B(`i8yc})a7z5FA5H1X!S=g)n6#=HSk zMMW~he9Rb5l41w|=$y;-FQt?#_e(zj0RR9=L_t)|u|!BnDyn88dN}M2lpN%x2+q54 zssrr%A*EOr6(Ixg5K>W887v;UAx1gu`_RkD$ytg+RXMfTEM{sfqF~lkl`kASRMilB z)3{vL{cxDiW_=&b7>vyfL93F@44o&U6o*;UbbS;A}MntNR-8(D$0(TNmbR9D6cX!9A#K+2nwp%-HirnfT#fXcXxGJGGhjsChy&p zYB-t0r~D_$Po-76kMM8*^e4}D@zu6l@7jGg9NK;eVF*ETe;+~~0y58Qkg{0!jTT7N zBxy=94k?8cgTz5o&~ZiNY+`FQ>6q+nH>Zk6NnRaH3-A^1p{o>^0{_bv8SL@rmsxB1d!NUiK%}!0* zK0JGMqfp)*dh@+3QV%hAwUC}hbuxOSNhwc5k-M7Ob zE#|8~`29b6|C`^qd3C?m`LS6Cp``<#xo7>lEv)#XV#wrK} z7bl5lu3j;fDOpNNNHHWxsV;pAU3b{Iq5$u~J8mlUt{2&~?XDZp6^rG``RQ5ZoCENL z1#{_rmLitE7$ulwNJgCN8;xq2sq32-c<~I_&>3kb0j*N;Sk8}l&^U-*m(kR?HzACvrm+L^K&_%u)7;fY=Rv)znE6GCS{yN3RDG_I&_kFc*3Y zojR;Eu?9e;_2$0oyX%{EH^d;Jn3q*kr4;wuyXP;SBU)2e&UyAeYdAv~oF^nihl&u@ zqHy0QCdW+9@h}8Upk{T`?AmreBw$w6SyQapCMM>mQ^QA!JHK$@( zqJRj@4gfO^%g$w<)5g*d3SwwR$Vn4)wh(ib|#UyD6LrgJB3>rg9F~t%5Nl7zhEHR}Z zF{BV;3^BzNLrOs;ND7jIN>GieQ6z{2(O?$Mf@y@joWKM?ffPu`sGVacfq(#pHmZy! zFQHFZ;umU6HUj+_wMW|N2iq|H((hGbNAg3^0yM3~<{H=;~k;bB2n< zVISUqbdkFLe$zDzclmI6*xfvSd=X+_mgW8Zn!Pu`ZF^Xroj$+2DNd@yvN%8aGE?^JN#~>fxj4>vD0*2#3Q~0jMZm zzutK7@9wsreERIc+4*n%=6BmyH;!CYSDTyLZ+`gg``a~$I&-VVNl{iwupeSkmCi9( zDlplwjeQrAQsMhUH|%z?+rw}m)X?uOwS9LD7yh~K_96lxNrae@nTX;rFw5tKKExr$A;l;u#+cQjA}LAC zD}}QhD1TigNs5{swh~k9L+E1YBn>IUK`Ep-NbDsI`D{=LDj^RCpb0FQM2%4;YD)P5 z^CMHP_-LS_BA}Mr5fI7dwG;+T(IkNgYC=;>c@1*Z6ywmxFoZCq7|araf(e5%B(|Y) zz@Ux+(NgLW1xII10@=ZEgWDJTSD%I5>)IP4?XGWs??3$KFFt*ilmkMSq>mb+03Z^F zn1I|MB8EVI7K8x$)d%l>czd&X`@IjIzq%`Dv-|tQlSdb?KKt}+QE%_p@4fxb`qf?P`_#2& zRxD17ldIz7B8uwJ|L`w=c=GUJwV3;A)-?0K{=tvl{qXH)PhXTx8M|%c-2IE^zVMsv zc8IoJZ(hB4F@ysVRAtlD3wE_D8xSi%afqnqeBs#0CkpuKZpvr*WIX9XFvv>#G2wjYY9SpL1 zGsE>``3K5;Bu<5ELg9}(#-re6Ec9>wcS7^O*6G7!z?xwF=@dDt{7DP>kQ=&Th`W7{=u$d7 zeCzbPzwwQ3f185d-rN_iSk311hMT&06$a0YNHN8xt_Wq`l*6H~s%p3G3s)F{XkRay zT~uXnVh9!n$qRwhgaGS66u)!F!PvFi_uhK4y}KdHGh;r^g_wt!qDqNWcqb}tHBWnNrDc|lBkZk zA-PEhGZ0Nc?0^V!wwiNhiYx|d{v z@|Fk!Q7|RM6a$(8p{eAEZOaQ{0TD@5Ooa%TortPp$zD_tV~py-kU`BQAqS=sm6#<0 z5?I1CK-$6K z51&5$tD;_&4i*lsE|%9Xzx?gr`d-Bagh;B)w(mC8$*GkKw^&Gkn`f_6FWU8?*B76C zx@gXBY5nN&gCG3p$7iPt>DH(7;`!&V_J=#q!#nT4wO{YQ{dd0m{O0A8Z+s^pRr7Nw zPb`{s{cv~JG-*-K2+Vuu3r|ROQSCxI++EkJN3mNIEz^F>CzrUm^v&fkL@2!{S}vDe z+jVy@4o^Rwee01HRWZLj@Y96Sbvu#W`Drt&iWE#zR7G}vx4kWv&DqHbXsU}ML_cGH z&=`Ykq>X)?H?*3SH8l(Gu6G>)igN*?ji@K`h=b%5=v@EMP>IGYwk$e>V{v3605uyc z7aRxXNp^E2bSH93RS`+e2nfKC$P{!;{y`=Z(>%oHrfb}&h=@dE-e(4P2+V+pF=>p+ zF)?79VpBHBSr8dQvT;=bB4(i)eNEg$h-79ir#_F!DNI}A-|)VGF!~54N?;m<#*m*$ zrph>hJY(gi>8wEnv(fcnIl55+02yfBG%*a5wrBjI%rpWL04AEl+VWVOk|Z%O%t0=y zxi1C-W$*H~ZBdCa<|^aJS<Z;t}!r~t8k-&wMzZcL#mYC&!H17kgO z7$X>@Cr_@*l3u-9mss~Pl|?h3yNk0Kv-!d;7OUI))*#zMfY3bqvY@Smswp>{b>XEA zDVhs0)s<(IaZf!0=MUa75q0f;zdtxvQpJO)h*U*A3=s^FI7)P)#N)z}AtvvAKSUxj zaH0~TRE2ArrXM=XOH{_#J^~zfIx~9&H30y;U9So4;$;5fu!{zHhou=1j|(XQAn(~7 zxnV*u%jzOiGor!p^{#t(Hv9DD{rztL_-xfH&x9-IW`+cqLh_#2vGYzuFz4*BA~Jvh zjr(2zK{R2W6&*7cYQpBh)Y#Zu_A(pgl0013$$$&_4CBPoD6B1r)xImZwW zu)p5D`ZRSn%c4q${V$g^NQH88n9I|}!-okBEOh((?|u6l z?Y>>EE`RaUkH7yrAJi56>SsUw=l}e7u1=b;yDJxUx8MEd-~H{6e)fYye>i`9)%RUL z3`r%%=nRHl%hl4lt;Ri5Q8q=p*@nJ#1v_@z^-VLcch@(C70dT85Q@Iv&-6@vQJg*; zx-Gh7W@TONHup_g^fzD59(~LF1HXFMoW9$wzigWM=I)s#znIO|=`e(@^p4ROG!DaJ z*2EArrGmIADhK_b8cfto5Jlx=UM%JnGU2wpJM^f@m(|qMj*<0XN{+_{IGda>H~eEv z3L+qWjX)ab<=jqB^80b*1{kk$Fv$CX406Y%CZk7&}UE-?iJFrqQx5)BA!(afx_cZXfJD=c#ZGI0i)=Ed|Jc1RA9%3LuujhCYCp%w_f!oqn&n9b@w zh)80`3J^s+Q{jCd20}+f3h3B_qJhO2s;Y8QgcwDXxe$peLWD;rcz)vyV0~x@mD95m ze74Ugy}=~41k2OFl*-M3<54gTm7uDMco>Gwfi7mnKBUiHUSFKfPG|M9A+;D|tP2O^ zL{&{P5^9pbxeTi29l-!-ImTQw%;ZRf*pVipv7nWvEUeJJXp@B6yo^JVkptGn5JzPo#gptE_=-wkD1P~p16P!;uN zb1>$RV(Hwz+kXGIzx&D4*Wdh&Z@pM=hY)5>?Nj&U>U~c-}?5hV?CoeyJx>_#V z`wc?1>vvC9i|bd{j~>4jLoDWQ=zBjeTs2GE7lhrgZ_C+Yceg>uyZ!yLS+17zArAd+ z*TUqNErHa0Q(_^qU*qza~Im&t{87=tH}HiBErdQkOVCon5?h*xoYq9?QeN zuc}#DX|l*5scVap`xu+D2*{#ZmDRisbydc$HMB0JUS)tn3YZZzmAkT%)V4hcpxDTf z=4%KE^CB&tY91j1DB@VA(PV^!-ms|INu!9^2j+d6*y~L(Aq~N$KS5Ocd|8I~>&wBX)N>69z^2#*(sw~N{=p|Y&M6k80zm{3JhQ6hF)P!>SpY-Gp1r9_ zQf4d)r%6>5iDHx(<80oDM5f${2oVurO44?{^M&_Ce&4+F2skZg2bstpKpX@H4UgV> z;5onbItz z*LFkK_aO`+gfR3m3?eb)C?N?^QcNL9j48ySkHgS+ZRqDzeOAio-xZUg!dAmK-v#Ra)@4olADt$`n_Wo|WTiO ze)BsY)J^p}|KRWbn}7T7E}uLqS1UhjR;&5#tCxUMRwbf~QLB2n-tLRpq8lOt$;papBy&@TYa@$6{wuWZU(( zo5Q*rcHO?I+~f1*!_%cxVKB!zQ3i=eSK$<$b`;V~KAExJjQaWf#p(RWa_vd9iTtwhERv(QCz~Bfyp(i5Hk6R&+$G|a`h~MnV9H)USKcyr4kb(ZOF((_* zWHTEdju6ObzuQ$sIj`qvyzdT4BLE^YY9@}sF`KCO6#|Kv0kY54A8cATW@Fq;9tMoq z72cy!h!XoKG1f)JM9x!`NNB_uL+ppp9zyBsvY5G|)`08HUElRhU8g8fK#cmXFFk+r zn;#r@yJge#vAe##b_HKtt-Pn(+fCoK?>>3|laF6kRWZcm$YxCm#z{AzShym8Y$2E{ zIL71)a*SY-F8}~@jvyEjrI-rm%F;2aiuGaG9}dB=S+4qFz~eAy1Yn|xjOwz8QA{i* z<(!g_6hbh>qVNV1V<^1$z8K<=Ew5&Ib-SrSA7QiKUtX-ll-MOvNovOkO)$W`ieUWw za1`1jpou9?iH3cMF-7$b7>SCI^k%c^x^`aIi&?PxdxuVzLB~8K5cf z&`gLFkwA?p8AT;$&RH6q^JoPkn>r*`iO6gKL@pylNG^}mhMb$4aVaZk9)NSeYK%(L zz6&yBG!s!tU}B~clW7zt)r25G2$mE{)C90@Y9a+BCC{!>BNSGs)#SQgFP1{pRts_FiA-6m<$>g>{2zClP8;aO8{ z_HLKV5J5>rqPT4@frK#3y?=PR5Rpwch?$WAk($zEQ_2W79*gFvsF>`rN1$Y6H!}uA zfJ&pqYTU+adG{0;B4^wJ8ANj?GccMVD4B^djl%rgRnydUe~nQ_$f>m=`EPt(&F8C| z+q)!@JY_j#o*+iQ?+8U@04UEJ`3?t{rvw>a-4O!HqrliQPu#TS!|51Noqvt;qmeX6 zB%U5%999rBQ9HtBlUltfLv^SQRlm_VS+R2Ah>NDF3+E=W4;ySkXM^Ho`km@4%BU`nXG-Ru#(V@@j5!B5n`$EOGZT{02BPeVHs`%FHHDN642d{N3NaC4S$T>oGUNauBKEFG zDJBBvJgB53>`Ry5DpdqXDZyA^Vc)i=GJEoP@%f9E5CA!;j3_y0+}{9fGKh`gQHsY2 z0TEEd`XSCr2aE;=#0s|WLLXum%30|f@2Y}J&xNCc*}0s+Oyu%bE(AbkCYR3w6ca=g zin*C0BImPAgMCpDvG)c0f{;-dkQ5W-SZic76a!+9(?Wsld&xE%RV6~R2%IH9iO>wu zG^H3JZ^bc?I2d9|1Cu#YwdkM#cA|t_QRx&-#>XEd^c?%$>u25Wrmoz@lSe7p$A9yS zAO87|c6Xg7G=pRynpp~`5}6qs-fy;jz(XIqL8&R2#Fza3d+&e#{MGe(hmO#6?%caq z51zk#=>Q!mgIp|YP2u|H?w|fIzx7vt{a4H7Vt04<_`${X?QOwclk9ebYvBC+(U&i7 z|G_``NB`wN|H=RQPyhati*NkZ@BMLI3?F{yombCZ{4f9be}4YeuYKuuef;pjJCd}T z&0@Rz;+H>v=kg&jcKgkDzW;ZgJ$?3PfA;79`~UjCefIO8onD*)YuDY+mjyG^@!_3dVH@dR+t?iK3paJa5l@0%;D*&?hDO|=X|yWQ{0f~uM^#rEz6 z7Zsded0yGjnk~%FnzP6K%eHU?-q47cQTE$cU!A^n1E&pD^ZDhI_V%N?S#;fDc5+Va z%ew0O5LNFt+vJ@vEtboIAr5jfuTS=MyK7?<$E<{!ECQZi2LtEaa(TMm?z*8zVgj=iF?*Z=RUk7G<6_|HR09~P3^yYD0IFzH zF3{2WJGJXaWzq3;>aanD&SEd{r#!nRjk` zI0Ol5W>KAILdsEmB7z2a@K7}#3kQg#JUy8yAvD)0k#s{}H}&P& z`Jo-SU~9!d>!valBr{1Vu_z8htO}c?2z$qh z`$O9_&0;pYzP_)DGKp5Rs+WO@+pb$UE*&T4AlV!xo(ofz5XqbOC8>yNL2gLBLt=K4 zaxw&gYKQ^`zxmy7ee}f-$pH{~_LdS6iKwF4xWFg_nt21{-U5s@o6-bK4;tVp=Xw4`H8TT&_+&eewGFZQF?}y*)Tvma?lo_5ExnxT*}^s{uRrvbs0_P{Ii<2cW?fW5_v1dD0XgF@$CCEG~=0<8%>SQ3u z%*N_yHc5BhoC-LF?W<|7wq#S2jfnY}GfSh|NC8G$aITZgIZAF!S{u(8Vn!;<()(fv z{dTv>NdA~JH~QK0vmUpQ9i!6JbV@HZH`e>CgwqM;8f!P1X7=ojl5|wCer*L9PWjD8 z%fpEC7S_yQ$biXjcKU9SlgAcZ7A{&sy6 zBo+<4_0H=2yz#(4{nbZzx7(Am<=JJ`%y@qYRWl<`#8fvmt^=Ty71y=*#O&M!@{6G=me-eJL17Op6` zbgn8($BxOed6IeDDUVZ{l>sOy4amgKJI5}sl3|LPB#~psnsUT=K14CIS((W*112)F zEFz#$#%2-&%z-*7mdlmYF;%Z`JTHj?f3D?gZaEG50V`}Own z^1-faq3|Dn{>AV9y?^l2zy8JI^6{^J{>v^xRaFlz&sUgEP8Oek{`t4R`N8`BwXe8~ z!#m&mcIfuO3QNfq_Swf@y#K+se)xkQ{j-1e&;GA}@vpBQU)8nmyYAON{mF0tlYayf zrI-C-yI3QQuToW3b-gIcCP)%Uf))ho*^Ip} z9BnqQa#(bGce5!!TltcF1>rPW#m%xge-ds!b;t>k%ZAD-r1tRov*r8(Pv3!RvAlZM z-@oWKH?i-zstf^3Zw}@s3-skMbldH=U(CoA2;pQ_oy;3K?1?y%Pbw(}KspQ|3^ojk zOb;(EAJ?-N*EjpFH?w4lKwt_+lOA1>*i6T$Wl)>;u1;dlY{AS$T8|BTo)q$O_pw9C zN=rl&8?klM><-AZan{QrARbu)m@ps%CK^Ekc4bjUk?ZT5Oh=oFisH0OK=Z${QC~Tp zNnp%$1g{*7bVTNeDXC3@oLuk6hUzS$29qpgA8_!{0&xMq&Xz*I>=$GQ(d zoL>N`K{j6?;O?;R+y4CgtS(D0G>n1uKsbk=A*Q55Y=yuTWX}2ZrtJrN`SP|Z`N36lc{W>} zEI$3^7cZ}O2K40J2eUahvvPkph@}vR6k{Bsi8OUN>I8^@y+J4opG8T#eY+?MVkd+_Xz>r8) zz4rznsFG5K{}nKELr2LR*(enXqFsnh={;dSHQabB=%|T9Or{!bi1832kvW2rD?)k4j`pf_Nhj&k3p%h8c0E{px7*I$9IZsS|FNS`*AEL;-sTj5GJ47gFi<|rH zpnxb9;)$mUQ(2bRcRN*Vn&$fTYZ7w|*VlJ%z4KPPZL7N8uJ0Dh+1<L|5o9pGpd(C9J zTVK6+_4d#I&0k(zoWwk^w-4U`;PuPrXUE6Bp8W3b{eHjRFhVn%_Uko^0O;kLOXFPI z$CLL@u%6ng(P4Y}-Pg+x9}+V&q2|>0&3q!hW=JTgF|k5zE#&B&H;qf(?&|s17xQCH zYvLS-Xiv}D+m|^DnFi5Jj?co)oBhozH~qGm9w)5*$wT5S-IgWC5RI|Tr1o2ThZZ$dv*OlOPfw%aPB03#zMO#%iz;8tt*X*tKrAdDz#o)dJ+M5>d36+4B4m}~mC3|Vb_)h5)gMNw zY&jSr5-M0?W@D@)=G|_;-EG*IkOhc{gc%VirEG|#w5buBDo`%mUpLAIWAFg@lxy%v z(Z_=h_^^-@A&#l6WC(c_qv65)2*!~fN7svYRunM8Jybkupvr(ol2}`1$%J4Ca?IX&6hu`q7K6#s$^I}H7Td~>BFOE4^RK?FMoY?*Z%aAPdtD3@@At02r&U8K+fWf z1>+W6mR>6ptp-Bq^lahn;11#<;>; zvbJ*8SVM>)QaEJgcA+AnOkJRiWSFhB)>`MB_ujB&Vq^H8t_C`)J%~|7bIwetqhT`u zXhBblIZ+l#A%-0LZfBTLMO6u<@zxqnpm*za%n2`{?Y7p@@#(R3#!n`v4<0q=7wRlp zoBQFLzx}(v`jbC<^X-)ZpOBDoJoX8sn7}X?+VwjE+YLEHMPy@b*Yz=EGWG3df47PC z#53kflGnkr7nQs1ZZw!hxN_O4NrgiNi%V~-Nx1R^x{r>{@$#Hhu8^aQn&{ zJ9N>{PU|78%>zKJnrbp@js(`*t`Ea74A(c;jeqE! zQ_$tKnbp<0i%d)kNQMAxWmTY%aaP+d=QN|~w3&#?cHb4-7{iRH#a|LpMTEw%jB!W3 zzXu2i-`l<&QXb(44m|f&R}=_YBqK2+mb`Aw`EX|tVk)FDghCzy6d*v$hAlhmm^s9} z+wX=Lhz+P3B#>PE;&VAxi>fHjq_rwa2xYY~c%U>L1~sJ77;e05>!FVofjrwo8Z?44sG95klLOd-V-!*aH4YJ-G1X^84v0}0za zFkuNM2~ofK?sZmp`t;)C51t})tK0o%J?!@VkAM2%bm?zzC8kjRj_an{Z`N!K5zS_^ z{ch`?ok@zdXOt+J04rqP55tkFiZG~iCWb_%jaeo_!>E$ICu@p5gdI{e%sFN2ObW3` zaCcqz`t9ZU(VPHtjFxEHIPM|DNXEt#Gw5Ve_d`lajd7MakE~5pRcCDRMyHFL?q{8-U$H3 zF3=e_{$tJI6$?f3nB`FSVKfSY0tz7zq6`>wN;w07rcA_=hQkX+5M+qB@AYnfCz??; z4t?eM$3OYe$-{H>u0B3Biz8t}jMDC2efi}t{`@arzqnTQ$QB@rXtDYkV$Q1QYyt@) z7BEFM#wLM~bx4|tgTQLvPbOy4H0$+UJ$24n$*|k?i;15xo-dlqtJlvSJseV;EGF~C z&E@4kLZruJWc^~HbkyB{{F%j4s#%S(19 z4f^ESXCZ*EYumWtX7$Uz`dj0h#q4Nv^ZLE_9{<1o+kbZZ`px6VkKVj`F>5MFUD#|W zJ5T{e1B|4OFKI z94#Na{i?nC##=zV}4qQ!?&5aE7Wr~8Y<==VSeU%CPW-7{53!YGUa1RW2| zLRu~-RD!xklZ}yCTHJ|>0*(m@y0%rxr)Q@_KQJ;XGO{8nsfrSVvrZLKOoiqLAPgt~ zDW{y%fmsWPXf5x%6naf5p@Jn-H&vDxhmZw9sj8+!+ozOBNQFa$%hxxrUtWpEC(jm5 zomac=`l?sux-uuH6X&#Ud=6=}w`PqYNKPrnqxtOm_AbS+zF9lAm34&?l(Y7Os3wDI zNDWc(^T;C&^k#u!9Sl!;MeA|i~WW+YcS_A<^Hf+ei=no2- zRANja=WZCfA=I_2ykkNrhMlSfT`Y%P70i-DjKdK2&@vG*8Nw0j8k3S1JJFaWiUKhJ=FtqrwEd8kl87L7Lyy)B`+h*3G?lHJ zx0knznFCQqYuKi=J(^FZc|Wbp7kAzC!^!o{n@>M`hD5t|@8)wtWwupSPbQPq<$l`O zX)`@KJ|;B#KJ-JX420e0=wv>bO^&9MVp+$A+r+%M*seCFQb1W>-+lhY-}x#1;+KE% zAO6GNS>0}4e1COxvaHxZ=>Ow?^t=1-zezDTWMqEy@L3;1+sEm&hI)~^9x}2H&%gZT z>#u+N$%hx~)!XU$X}jOfW{o9HeeeBjIX&LoT}jGB7;`jiRe%f)`@J`gQjpCx*~YoE zBq2t0M~^h_;{F;Lg^4P+I61#vUCytc^SMvFKtG#4{IK1;MGc1SWa?)}%j@+E88$M!f?ei~(zS;DZxFjA`F? zsz_|Q{g9K8MI%PtAztn1ZQAs?Ukv$MuIjDx&xXVJtu0mS?Iy@zwW@4AX__I%eb=TO9a~^bLqg=C8)D9Eql#E(owEwq zhp1V>W489|Z@(7;Rg4H_}+`xF@2c?T(>i1h9H)tjIH>py?_{Z)u2D@1{iMUgV(kh1_1 z5i3hhF+>myS&~vpc}PiBqsBhw`O$JS46F5BU0WvJ?z%LQ5b~_4E#&#UR?-k44B_^6 zwOIVlkW-)1)OxFCbGLc$@L|dV#v132&(6Pl@rIjOy*OU4y1Bj&>WdiLS7&E3s< zvo_8~(Ptlh24L?tyYuPf`up#0SGQ5p>DjSkf~4O)e?2`vU$vX3M~fpDj~0{X-+uGS z@BaSo=5C0Yo$t5XYBGCsx%%mU{112AV2Ji_zW?f1fBn&;v)A7}uTN)01W`eN2)ElE zfkRP7le1Kf@B2gmWY{rvU3Y!`rVgFISui=BvLQ}v)LDOW5xd*%&70Fl7j4(AckOa< z)Ze{zcJ43cDWgl0ck2O@ z0x+_MgiNBjs9cC$g1l}!;?3#t(PBDX@7nSlm(kz>N=405R$vj;j9OGp2ge2^L}S<( za?UZ0a8Q;6r3r|NYKb{aDQVVmF3}Q^1pvrs$gnYn$tXZd8AQw2US?|o9)0}*OIb!l zRU{-OHH0FlfKo~@9z2~7XXJvp6cTsIG%eg89f2Sm8T>$0IH&jIqJuNVXi)d>4e|#m zz=(mBBVLggqY|Jf;z%la#}^uP#EOQwTuwyPT1x=i&0Ykobtxq&KFMqZ5D)+7s!Tle17tO9fe{dmb&80F0TQz^NC-m;$r@9#?JuvdHoKT}e)8nW z@nX)%ch|eytF3~*|Hr7R9>v`yUTT$GLY64I; zT#ht3LpOwSm`$Q4WfftllLgN1 zw!6M-SN*n`PT5*t)gFDf@0)s3`D*CX5RrriOmKnx-HsZYw@FYyKvRa%6PK}M#gQaRIp?g3 z<%`KFNtOf2`5;RKD7gq)pior?GDH>)p*3inGlbv_Rh5}c>a(-ugR`Tvv*W7qQ6mAl zY0b@4Qi6Wam~Xy+@mK%uuWzn5F>%|cq!c7401+6n1Q9@F$eMEuL6V4)0%g&ZRe_VF zK86Pu=dZ4=(0cFcei%ZG(}_W(w%=bYoim-W^{Y2;tHx}0yV;^9gGo}#p>C?zSKmK; z^k~~}&W|58O3rfQ{A#sXZTsG=B=OOLAI__XCyV)_S>L|aoRyS)^YHx-yLLZqW+W&f z<&dtg*Z-&g!~gN${n7u|D51fdyQ_M+{7?SJ|Iz1v{QtYXT36nZGZI6VuYbHY6-&{5)OX_lndlaaeI)!Zr!8@-}b1K21$_NJY z{`CtM+im*khc%&3!dy)!r_W-yyM6O!xj0pl{pyyGIrO^Snn@Ma((D}l>#p6B<=VTm zv$HP6Aw~i;#&5fBwQC#e4D$K$d>6XQU7L}>7%a9tj099!&N&<7A!FMOcXxNQ*}NZ8 zA3`z86j9;>k`D)Dr#!97)wd|)$1wGra>kGf1*a6Y@g<-jG6r+y!|zZD6`0KU`AWuj z(wGMXWM)PlZ_#5##e#v00xB?)ma|qFY!H}G1PD>eTs`tUM^}gOpi88xhLEL%A(SOW zw4~7H`~wXzARdBg?mZsHjU1p581Zn=Eh`fOsgArsRn_7KR2|DGmJ~0t_WhfQzRy02*TjM6+U6R1}ev@_M~#n!4O$n5`k# zhkoDg3~TM0oVD-5Fa&EXf*At7eY;w>eY2c=^2z&^w=bW+dh_yazi%%dUz}bn$mn!B z-E23av8p%(MADQ~*Y$?E>-R*!3KL)T>+QU$<7y`=SrRgIA(5gWsj9Ck*e6v1F2WEJ z6>AJiL`Vd={GF8q;H;V1T9uSyU03Zctu{MVwH8lKj@PSg)-I%!LmqNqOH6*d*-UHa zE8B&TG#GD_q>!a{?5we*3hBw&$!4|hA_xG00ICuWIj0y6qD7h3t{+O2dzLJ!V2n*U zwIMM}?VV*yNPQ~oRse*MAZExJ)Ka;c362twau6Fu?joa=bC(GmG1sUu`DpQlM*?Wx%htS`yUw-}lZ$JNXz3ICF`UnXqBPmGC zIYk{pRx(JGRRKj*5Q*6hIrT9Fkq8hqEtbdI-P$msF)53PSaycMk|9IOsXaZaHR|nl z-^?a`-~Hs%$Caay1O-?z=HAuzZndGBHm^7PU7DRd`QkUv-+S-bzxw%aE04eTqo*fx zT1@@T<(sbGo}8ZS`xL-tG*_$Z2ag`dZs*zjn?L(^pZ)l!Z*SgSZtl>#zx}(vefIRh zkAC+1zxw%K4SkQ6x*S$FZzI> zYf(V~lq6TpxlH~5AY87$fTF~NDtM^j7yf7di~;Ny9o;N;U^c)hmdwjA_y2VkaJ~?CDNR`uC{UE#uH01;JYk}S?T1(lqfp|Z${3KBB2F{NN;V{Gv&sG257 zj-ve#Rq}i>b=DQE(YZQjSZ}w8kTMP-E*H~DZ9tMW%!~vGA-WtN&HUg0(Z>?HhQTvh z6aqyRKx!t7i-(VxA%)I6W0^q#i4-tpMPjyA0NQQ{!(iFg-orRb!H{G~8bqT&227&4 zr1a{zKNF*K#yZ1>083B}-n*%ctS~H&yFf(78fK0$_x;ch{SZTj0<4Na7R>@#HHk)r ztR*2pvmr1H){-@2W(NWogVvHUPU0hkf5=#nZC~G01E_@7na~#}~^PTO(*Q?!rgRE6$E2}Jf9#-9(udu&`VVy$+s?^Qi|LNb~q;&G^y=~v2cPc5j zSFC&Kb}2}G{KU_WL&BJHGpo)YoX;1t$#jZ@O1$m6%k}E6>lNW>F*{$*J+tH-W6D{W znGR;+M5XeO0(M>Ry|X+*%w?u9Y>HbDj?Yyf9#5}C3BZdhRCrY0woH0aYNKy+om`i&lZiXH=Fj&+ne>Sn>PITbml-W-`u>p z+(y;KNqzQU62iKw46#HJ?wtb6p=ojAVJpp$|z_9g_o@7^(;> zL#gi#s5it2##lgdm16=)y<;L`Ae(_;ys;Y_y5KJa+1T`&`+N}{s-?r{mI4ggT`Bh znDei{d49Fto}3=DLbq>4!8(t`8I;)|FqQQSIcMp+mPF=tZH)mbC{XN^#*A4|GUTKP zr~>yafx>1hX&VOuGL9+INXC>W5?baWC}RZ6RFvhiBw!N#X$%BWNtDDmY=Uv-(UD&om%KH4b--g@v zWPW`8_UgT7kN@f~{>{Ua`T6ne!NqyI-gc|@XnK@Gnk|+m7w@fFYnRW!`K#BjvD-tp z`+L9hx;DP1>qlwmd+#TcJfsjbYHMq=smdOw>0wWnVh4gRB zJ*$lA1^os^8p|x*13L#~<(*H@$V*iq9p`i+`oTs*<8f>}%mTt8P39iyq61P-#HD$Z zA#H4Whcmzr9=GVj#?jueA!J5l*)bZX@jIl^P>u~-Cd-wge zks4NOz?MfOBIjIH)kt6%0pn~@SB-I%v(|Loemb2nQw+%&PeAj@6d}hL`e6_RTlp-~ z)RWbE({|zNb}f=mj%Me_a|pxt-@VS+SCi)H2M?zUS68NX&M>Oxrm1sI0K#MlZ66Xb zId6=?zUu_g*AqoxOU@Z4Oi4geImKIJ7_spdRee=;F%-JHW3r5h!0d=^NV!Dbz6;+Z z7S(;XH`Wd@3@J<})Af2&Rn^JSj4?Y_l|%ru$;^8{^kKX0h9Ne!pVpoY4>1DhKm6UF zKAJDv>ucHX^KN58UrRh+)Xy%Cy@4$B@^ZD_w$o|D2r&g9WJ|=Da|9`g#UO~lEDHTF z#1OnOzVbOkQq00hFvdKlPiIkpoQqDfSO|h4^469+(6|Q_eIb-_sKn2WqXrY1!|#AF ztArT)5JFBV7we${zm`yB$&y43JizETToP^mX zQB@jZB7)jGO{v}Oj%G(9G7LQ)xY~vcBjpqtFF*U~@BZe??-uiFc|5<{Ts3pM*=_1+ zZ7UlUUR}NoIqcd!AZDR`2&a!8bRlf^+rRwlzaiAmK6o^nOplhw`~B+r@@hUivQ>3@ z@o07VI>&r8Kib}0FP29`rfmneH{HWW&tATMdH(2utLoKy|H)^cT)ug`->!f2>(6rx zB15w<&AjQ7nCZ;Sk01sD3Ee(*`}N%oAfKI{9nF@j*Eb$$_4@7X`sVi9ILFpmTZfQ0cdKD_yR4~w^V|I9OH4asIRrCg``Lf?zx~aZ zZ~N9XlM^NCn*Aigm9TOuV83TxN@D-+m`<`!24o9f+vL= zk0Z)5^7(NTRT15%%9CLjS@EOJimAAzjPYM(U@xzcgUGksWXo55eC#W(>!Jh=``s2$`z|uOVE}6yYpsGZW93-mV1P&mk=g0$LoLyFcKl$>JT@47$y{hw|x-*V`u-ku&W4dKyZHX-t9czV2C zOuQ=HZuYjS7mF!_LX4P`V`PMsB0wP+KtfHZNpegvrD%|x^D*Up*M}szs1UQnoD`k+ zBb#p<#jUEUUDPU~YOkP(y;rHdS8cWT4oVSw*DgwpSk)9IM*CHJSIsok9zkl?2=DXe zAIJ~Mb3fO8u5&&Ik$R{EBeLA-@l17g%p}Q7-hWH=@4nurV{6F)DYLf@{p%~|ARyktHzV?;yvqA1A1O5*YriI*$H9Slh^1_k7rst z(htBPtCv2#K%V4Axc73)8Kv@=H#R9zpTano4D3YY@QPS~pG2(ANtKlM;5usNaH`1N zl)sPa>a|T4T&>BIP9<|@e&*mlLwvl&{(q78hyT_hU=fLbypYc8_ zbFLmeUc7lUec|KX9DFox6CHW9(_cIv{Ws$_idYKJ$lCXs%$SXkHuG}OWAWC$U<9G# zl$k~b>%s`2puP42WFxuflf505=)j!t@W?N3b3xVZlf4=3^%i1}f8nUEsi;>2qm<)< z>vdPW8zj#H_b^HI?r+`Q95=`I_Dp^^aTD7{AI;;6g+co+ITkd8(D3uy3?%4{FIuOx zv3aE@FIc5xhBlYKUAFyxkh=+D+ z|K>M_^-r%zv>ACdlfw!m=waw!+C&3!S;m&H#J}~nFYe*+?xoEq`~9lv!_&J@0{c4^ zqi#A~QM&rii{*687sfNtQiD&E^}XfZ6Y>8xAEz$Rg7R2JZuuVK7qX>aKBMuwsclK| zS9G^n8u*Jiwy$@ z90!U=F?9-isD*$1<7d@g$jdHynZijf#OSEZvt}ZjUJF*qYjiSsey=YF4Zed!x+D@A zwbS@~B4Fma9{JI!<<|GPqM`t_E{icX;5%)xb*nz9N7Yeu)lK)DR zfk;ckyW5Zp!KefJq|_D!T)J@f(a)k+5`His5`!6u@aO^&=P@*FX1B$|V&D)uRwle^ z0k3&6FkWKr6~hZcJPw2LL=`y})vZm&uuuP3vIdk@m@^C`Lpk)nOE6}Y_yO$IuJ&an zGt-{}G>)mf&clXpL4gbEF9Q3&f8H#AGv5FY6|v%+%PPvEo;CmSzJ5vNoop2g!2`v) ze7ooips=d!rd|Pu7q&KW?YOO z$^p73$qE~S()`j&C%61G?QjP^GfkxdU6)tet7j|thee$uTJ)}zPZ|8jgL~`qHAuUV z-O97Sf-Dquh78(yQe8@a;U+M0#vhk5*+_-T%pGMptO=>4; zaGT1FN5Beb(`Y`d2k*e@jt`f$avoBN(%3U(Ran2(sNST6DJA}tCXl~KQ(Em=W7Sr8 zq8=juc6_DGjB!`7O@Z@shHz?&XF5=$sPQOWUFPX=238(=Da?~g{^!p!o+GwH;W}M1 zt#96Z>F3vuT%YQ$P>MfPECY8T-%J+?hgjxvXif(=_ePzPMuvQLZ~aArA3i|>brdU}o_*78#iIemrJhKQz5>PTM zgNUS$%3nui-U`#%<5At}oSEx^rJ`oHGF5R)GoW+i*>)q-qL0f} zdn2;3!^a$;&VjWadq5H9I_3ft7H2i{-#apKZtziwJUcH+E!C+}C8|Yw`!qH}^ALQ~ z|3aGF3%cupI^N!s91ScA8g&v54GO4NoLO`-yj~9r==}Tg>BlbA8Ccyzhb;mAI-39l z!HCy5xTwh{sN)&Dd4z1@cWsX%Q5n%<=jVGjLc&W2_U+Jp@=jyxLnZ5_f-@Zh#38ZP z6i>a)Q{51s8Ed`ixmgl2Vdb+Rir{ma&JEB3+#1T+xtml0A))W`<4UN@1$Zn*_sVNM zVdhEHwEpjU4me+UCbl=*-p%&n0ET?c-CnVeEvyLSSZ2JKP$D{%=|BwQ0FT1_snZN} zQ~`r!xl{JHHZL(ZoB3?cFK;s8aB34Z69TMFI(YDpra>&|GEU)N-`zQ|d6;w&<0u+0 zVu|1g9$0Q{epKVW=n42RuF|#^&b<&NZMY`yJr^wApI?Rr^>!u#8d+v(`PYsJlXhx2xL zYkJYxnHn}(WLLMCSo`6j=*Us!{TxJqS||<|(upkgw^g_sfV>yxLVS$)7wRn-h1<*z z+C;X6cwVFB4`p1|8yGteJPj^zeA}xV8=NlYJ}7UP*Zk~wu3*;F^V^Mov7*+{lhT8b z7mGlDsO#UYfU)9|@V_}*JKt@o;9RPudKrd0oyC$|8pN8AIG<__${0D6OHY@SLPVfn zWVo!COKb7m^UCf{R8N;}cX#)&{l$jc?f1o_pO6|7EhYACuvFc@o%&arKjWu#xbJPZ z&#`wux?E0yQ$TnVEy>oS^cldPwk&Pv_J`p!*f(113z=LHqr~o#C6IQmc4Cj)H%VdM77#t%jClXzkC@S zL=?|%xy)9Roe@RdR9=u6+C;b*>eIbO%`xPt6+n%~Nz&TeJ`)45nK^T=`UlGS8C$MD zSPY5uJ<6Np9=$dlUn6_tMCUg_ooz1!k+0n;x!*r>eLQZ7X=s5w#Iy8B^RozA3DV-! z8l%As$Y9RVpB7s^e^=FYw~{cjKMx`=A1iS9qN;mPG*IUj(LP>}&I)?r#`jP z^!v19Do?0*U(qB%YO+g-ivkR@O8h61H;6(3GJ?wci7gAGtX*cuMPJFl0j7KG>Tx0$ zb6d+QSIu{rJ9O{A{5v2BNT%Y{mW_Be0(B@K_6)xIGbVAnHocoOn34W2r5M#(&ka&3 zTXTnsl>q@S;m1ZE>}d3Fxv;B^l;)nWn+@FN@U&;kg7=)RoL>;Soq4p&wmW=XphTeB2r;$o14BtCljZwe9$Qru~;Y#uWJ-lEDl@sh_Jf-yBP@z4LT`eG$+2hHmf= zSfIh`Eb$L(w(fJAbP4BOya1!upY?Mc+W6HF9{MxDqiU`HDY-m01l8o-#a~}ML8nlV zma1I;!;cadV)|%~`F9?1^Dyh|lg(3+cQ+bB?;q)C)~+WpWT|h>@SImopETVJz53m# zs+9YgC!10Ew-?}@N~#)naqKv5s;hU|379|6s~0G&c*2KG0&_px9GOqe1-PMg*&&bg zXAZ-uWQzoqS3b)30%@Sutu@b{f=gGd+||`(2s9S>T{X&N3&-K!z2|e5{tQ{UWR_SK z_GHajao&ea$IFb$X>~^?yBmD#_n!aI#8a8?2T9#;66J7Tq(Q_GI2hVHF60Ur{vF^T z@dNix=OO}ZKUWES+^}>sO!bo4C`tOx_$76s0NsN=Y#T6tF_$$AawpTsNxFA7w?Ugf zY9ZI+R#gYr{zP92eln}Obb+q^NJJH|zJGaSFs~LoVA_PO1U&`6gc?DL1H|7^=Z@^w zI}In%>g1*wd;<++isL)?=RVtPc-n`T5_loPl(w;vWoDWi1FIwl-uI%_pQUJ?{Meqb z+-?vYpJIxb6mbt$(E8)A{J6vy=Uwxqsjs9DSFKwcfY=WpBMANV` zuj&vy)^pr_W~sB~>`Ps}>^>x<2uT$JAP5?c=V+dkvI@U^3^$y8^eX%gmjUYYM%tj8 znwqh!auLV9w<|Zox1)D!3cV4ywc?dXD;b&MtGmCHQALs%Y!nXT186h{2gS|ucQ>^b zEW&y<<2PHE4;d1?yVqz{P(^TfJ*3nSQ97DT;m6}gyE%)~gdFp~TGf8$8j{**w-+Do zc{3ZrxehlXP3LPFa;nG*Xbpu7>6}zGMZc)n%)Z?efh5o^V;Dyt+H+o~C;fM?`A%te z?wU`=bcI@msqiXQv3BUG$>&#F2{A3!R#_y4Z&IA6hIqq6{L8_;pYimt1?9d4J1@xD zxCOt_c$renR32=1zoG7`zg-`YU9Q1Gsy4F*j>-nU9`(5xO!PjjIeRs&^VZs5LH%~J z_u@YZ5^Ke1BEarl=X*{D8CM*dFoVw53p>%6b7Wd6XCC&oDcCoR7O(JuU zZA@nqMmu1Rj6|wXr_c*^2ACe9CIZUQ0B`+ZNkAPWR)?>@~3!!pI{FIi&olu^F3vxTC^-S zl1RY79o86fVykn}_(zZFvflC)J*p6y0Tygr7;6}4K-0K{AS-S6>N_*QRIk|dR41`v zYeUHq%s17Q&eJlUx=GAr8bA9{L0$)uj{5g+wd#1Z)AAG8pi$K6$x?-Vi1mC%dJUVr z3~K^Iz@hR=kdom$TGyFfDrxK(5*ht^&zOZp{G;j^=~vVR2u=X<(^+pQH8EnXOiJMu zELLW6k%)|@1PkqV^g@DydA!~xx0zJ ztg(})403d&S_dF!FI~oeMT~!5Omr75o!D+UI1fRwsKuw&J5Zp;b<OdA)Ubcz_D;kPZJEQ?laISwdsQ zwLLMxMg^~?3dZ%`t}Bc~-wS4>r1qkl>+6bHbc1c?gV7Eh;t8l=v#)qioxR1EdzFZQ z_4?lHo0I;_q(1TAk!*IjgqfEHlWg7X6pN*D@mrzY%VVxHW-GWNKMze~MX#ALAywVv z{}3lN*t`Ax)7Adt2EUhvd!6>0enXv@Yv_Et&9(p=+qj1h=^*H1Za~6xKfP)>y4`U^ zY84+(daP~;FI1t1kR30-$Wp%T(-z**D{{&Iq}FBlKfELVM!QN&@PXCEnQuMvG6?U* zKph)Jz%SU%@4?UCC@*jCi_|KD*Gr)VBHGN^xJL3giEtO?zQnFR!7j5-bQXX?e>#>dKs1_t|w z|I|s3J{oP7M4k^L5R2K9!tbC95;Jvvbc`4b+&@abG)#hO)=Hu5rQq;U7cdgFT%37E z1%|XAEKyxqUuuzcz?m_gCKe{jqCqxK%fTf^Uv%icuZ>hwbrDpd1er(h7Z}0g8 zi>u75?@#qg6RY}qe6Vu}vYMf?Qns?t|EhSm1xN|}7#hBJ9$kCe&9i=4h$UdV8Bc*W z!eE&k2TQ>)tt|fu5QSo(Nag@_27bJ#4&7rqV`S3p;4^}FC4=*MgG557g-}da zYhFOmW^pIHySY2Wlh2jk%uQv$%A-&qV?@PjgRJ&qY6kX#Bgrk95%zt7jFwJ?n7wa~ zP)wAUha-L8nv-M1!cm9U8ILG)E!{np&H!J(>dZw$b+rEj$|q|ov{`2;7_m|pTMKha zp}NAYTwg1Moo@62ZdM%D6kq;!KMn{DP43YmY0q=u5}L?1y91!giG8;-4V?X%b9y+^ z!6P72Hjeg~>fLB166qyYh?}*$OY?E>JKdd>KV0{g`66uF3z3$0^_Y{K?-(7>>%F=< zD7zh=EDk^K_vw+3`1?2H_V2|x&KHVsZ^zACEMG)P7neojHbXSHx1m2%w@}T!(0@Qb zgw>-$sg1;pvV-eo;f%PSRQ8ZEtGJv_unr~bd%4I!KEG(cNL7p8yQ|qLXYtm|17)Vp zKU}S?R6x~>r4KirY_VWkD`UA)?WEAl=;L*6h1=Z-w(b=lZ*<`Iww|yPXb&_#3=fmR z3jJNF5PtJooRwXeFUhCp+n1-~A4sCogE=g+Qmsm4qfW^jO2S=})IfDS$~vEy4x5`y zKgP>KL!(4X3re0aJCbEGyx`r=IxjQYv@V=htYFcibTycwCSu$CZ(Uq=Nm?J2ly>pc zP=ZcBOC#-5tc{nzGdVhXe7e&-W+v^=zX$k0VeS%W61dv+WUzw{qZcZjt736 z@NCx9JB^PE-A?~*Iq$D0#N0m-X5yFmvs&Z(r}iDc)oWrAQP^pZ?vp7Y2Yq8TKmf?f zsx(+KvtNYg4teW>^EB8x?ZI}^=Lsw%jG|5cZO1;%K&U6oMpAb|`ZQ_L($z8bAWET7 z0!$8Fk-_5Eou_f48$+0aECt;a3|6;SdeGkMN5jLzN1i>_*0x?fmxu0Px!XUu`>xz+ zAAesQEH3tii-t5E_izN}Yz_J5aMby3GAo-4)#PYF+tCsy>)umPV2l6!bpq`hsSVEt zLEb$#o~LhVlPa3XYTtrtKRDlq=Ut3he<(;=U{X;Xkme)&(QWv-Ri4g3uhP2#O8 z>29^(2}m9s9GokRVhFyLND&nypvh3!i-Nt~8Z5hYGSoc}sUH!2@QQ}U_eW;y%2{!H zNK@Km(N#^xqzKURWCvoRexlq^%mi#pmLYLNn(WEOJC|X<8lDpPj!3^{L2y{%r#|yx zRiq{Y)sud+ALH35BNQB?!qcw1?Uzy``V`8>il^`5(TJFfz?L8eq+Pp(AKC$;v-&pF z#Kh!#O4mu=#YE?k|DTd4l3oIs4@VQ`lXJXORQzBn{KBJtb6cSS_8c0%JnpcLsup`Q zk%qPKBE-3hp-Az;`-PIS(v5{UCn9zUz~qAmzevze<*Gcoh&oN0{ihLqQ{%k{ACNe) zKzS_%6V`gMNuY#yTxM;M#X9c4jA!SGW%MS4qqK>A= zl<#FPeZi%AMO@XijxQ}zgMKt&PF&DI2Y<*EWaWyaI2ar}O&*m~1x(N&4$m8v`jQh! z2A#YAq4_~?SlkA%40-pao&=0i_McKUhiIBI@mJ-3JM-Tad>|rD?EQqc)pznUBe*Q? zn2d5XCcW(Lo52#pZS5kY8GO`CI)cJ_$Vb-|{$JDbI5WxmHS4}}^p9;K7HJFRpbtsM53zaHPV zgMUk8lWny2C%%HVyS+;*o%2rLiF|HU_e|H&-Qf$^QX++C4(LtrH`aaWGvD%X!8Mf; zUq>f9zEl2}$)+HJ;3=OwJt$^riPL~FOBXGkn5nB%S;q)-duOd}EEG&Zvb^XpRY_2g zM8xkw{@EUh-Zvyk>Hp1e*eOAVvlsm+#OdHT^y$CN(NVz4{{i0k0i>cxG0c&b$+h5R z#c25C1gj7gXbMUm$X&QH=g3s;Lh!1;@FaPlWHQo`#Gppw)-2ci;i`U15K#$%g#o8M8c#n~eb;h72!&fO<`4e^ALw(u~o zC#Cp3-7G z067dymW=Ehk>TN;OKm%Q@|v672|Gy)Qs=QiNk9m44$x)}7v@kBk|#5{21>>kScUEa zU7Ojzdt2u?N(F!2ljX#+3LoWUKY+e?cbwv5*Yh>v_%tW_T;r~9y@pcZdLwS(Tr@D{ zbpY<%6086ys%!g^E!Ht$>#+4wV)P0{?^SX7xL-bPK#_9kW>JUTRZYAf&}a9raMc$i|I}@r-&l=k)>C)!jHKo30%BDe zLKVLTsQ{~~B;aM~8>5;oauQ%6#Rq|+2gsh+d-7zjVJVAaR;s!cY*hMm4ePo7EAdpv zisf zZrIkSy)929+jtDfgYB3~ja1V-y(Jk)7}IJT-A`v}5oJhKjp9p2|0m|Gmiw8WY8S?Z z&z=-ZK#gdb09+yrSR0#;>hsq2JboX^2_}!lRF61dZZ$EH?D1}BPHj#-`60pYg3oF6 z$<$GhCP{guBh}Nwgcha)vF2qk;X`^}f?!T~R25ArHjiIsJk}}HXKbN`8h;wg7USKb zx`y6F^xoaCN6ljL%@S5A@BUu!8_e{p-dwpnw-F$+mp@BOdc+JAw|rVY1bSJm4>~KkvCf>^J3FED zGk^EaQYe?B?{WZJ>T&&O|B)vN5;Zz1e%gzhU6Duw+}nuDwV4e0%j5QyMRy1HN8U|H z-R3umP=~;LVOw49**HWFLqdOC0G!{Ek7~(tzUOE*@&kRmAHvEaU*YzzLErIlig3kb zG3KE1L?hj+7n{AM!-PMjTLh3)R~;L~0K z*O=-HHXj=QN5MJUr(<(9Bva0cZMnWaSbg%HRx?_E6Dy+xQUA8!?^e=P5qW$JS^boo zuQQOPPL%kdjN)E5p0Ga0HwwS>&~Gumsy(ktPAPlY<}F-sBSL1U&R&zevrhj&`h&%hr#<-~7$yE_TR?<#Ef@)CB&K;Vph^mF z*wE}bi6()1KnOy*NRZpaBtkg5_wH~OH7bKLuAu}9DhI7~@A}Jb7vZS)zz_f_mA(G+ z;XPBETPbgy8%%;IAo$AGIm5l(*V}Mb?4H#;Vt1cME%bWJe}{tvVwRYzLT{%>r>}$u zcHT*VIn{Tzmw3|oPy@cG{a(6D3 zRNk8PN@pe-(!+}g^^%u0m407AF3Nb+_FJHjKX&vowh@Jo$~lSLQy%7U@I3<;3z32S zNi$qc(%i_(zmpXqXlS0+!|VCfhJePOTB>*b)DL~jRB3tZf8;7 z(=-X7*B`g&3|=@0LG-^BWUs3l(*e3k#QVPybLH@DvUB!cP)2VHN8hFZL!=~Prt)%_ zks$)nWALtlQHk;6k+*bRI{gf-G?x-lUx{NnPLZzyn+PJaY2cp}edP{n*AelFRD0D( zQIWy>URLV*wjYB?)QdMT(#Dk=^jxkL2X|kifsVFZ0tR!yUp%|jKZ<=^qk)8WA(u5t zdM^-c@gQsO3qAQ;egE*wo3MGT9~=Zbk+K7!~q4!(1GyM|1t;6_&l)I`lZR z6DAb1>_>wG0)o5#CIB%$`?w~#Z;Fd zws^D@6BnrAN`zVi3(eDJH}bdVXd!*y_o56tZ!#K4`9vj>B4{6|tN+GX?I`wiU`a`_ zq)=W&JjkyAv62-#Ah;1l&X^>--hr{XP!#BPG+t#B`t|e)?cjZWM&ZPl1Q$DWEKE;^ zcIr$PVm^>&W03=tLJvZ}^0R2;R=J52v6YJI&&KH-QdM_6kUo+F1qGEb<0gVBT9r34 z#?$66QnP88gM((t55jwwDZNbZ=_@zZGZ}ZEwVTaf2U|TCN?6Y9=m2Vx5X-^5a3Hk~ zV`ysj0er&IK~dpHtAms8xM$q=5@Z=%i(UTPZ>TG^Gw5I->VL_T7Z=VZ_ak~M)6$PPxEn6uQRoTqT*gYR7%2Iu^Nb?zCYDfkx10nBI zD%|dCB&2SKJ6hL|O>g6-_P$45zeZ|Hh5_ z8sNcs7UEfQ5;%FC`(wS6(m9w|KO)zAQu^8cV=y~p8?*MzMYz$JRFRddezOjbq2XO>sCml*GS99iO zCgYMT&=ptbhZ!po;bDlYEzEYzUe}{HG>=j3sLSb747Mysa@>JHbAG-(q&;Lt4Pic! zpK|HaZRgWBeRPCAIPhPDB?xGNY;0}hBe0B2Dg&opSZmyR*{%0P*aG(XmQU1)&HAqh(#~iakTuc(kgU#_n7w1JGy@_;Ig+kd#vXsg{VYACs4TBkM3RkCtY>~&6 zK-ddCD!e`$Fd~=Pzep$s(TMAt{ygXk*Bsiv>s5HLgQFKn zJS@M6<|6GUy!v~Hr}jc#B8km-A})RZ6WtEObZBN>1z(S&Z%+hEs9E0`J6&9e=qH*~ zush=p+B-d-ffVloHih4*Bx|mjQU8tdO2;Bn)z~TW@jWWWnQZ(Q8+N;<>+NQWN&b9N zfy$5@U+sMlb#=2#`<=pJlb1>{V68EZfld^%Sh*s_509!(2R=u|xZpKsgR;aTE6iBp z2%o(`wT^BoEA?ew+W@EKxt!+(;1_nKWMw(KMmI+tomB3gQbu3YVCA~a1ze%}&#*Y_ z{y5XQ@l+d^M#nXf=GZSvK5fiwTX4?8APR##sP69UOtEkzHXtZ(dI;9s~VqFD96(bG47T`6^;qt>-k~(CW}=if*lE!CmMCPs510Dzd^; zV>XAO871$acu6rlDb<|d&D<)IwM@n&{0A}k78*y8M^=udCwV9D(;OWO)0`c@7S!jK z@qy^p)imbvqP}^KVo2-$&Z&zgkZ8`)2=@J$`#Sw!ErS#I*^q%D%pQiHsgh~0spIbbFAbOgP4%FKT>|uXJB8kwU-GFLd@lhKK{>RCz;=e)#G}y_qc{x7apdRyZHslI2e)Ng_yEKa2VDb4(-ASZhu%%hLzVb+0(aWtgZABQ| z54pnm6ELxvYPIK^WzTrF=#K+byBmeOmAlBY77Kwg=<(utQ=^BG)d-RTLBmHC2w_o> z3JSv2m)&_CD(Ac|s+z!~UM%ieDP?&=y>nJP|E^HnWX@b1*K}}U<8=@T?(RUcI#3w+ z2%FWT(gK3SinA`fR+g8oKe!p`J>JeJPQ>=D(PrqOhhNLtO>7UsgpR7(b0&;|p~y`4 z_xWB}PnZ^OY~i092Y=w9xJo(=we$FS{w=Wjj@c@6vO7j;*gFQZ7B`jGc5$ z@!tYICh8v~=f^Gx6nS3jHg`%+f0ey2M-^!TY61TZf;eSEPrIML56ZjeJ9OJmSay#B zpU#)z@`dF+ytN9;bd|qSVHpe5tW16X>S=Xky9|=MD`I#4^=?W!K4g5Yp;jrv`sRhM zeCF+$uQ|i?lkU3!20eTdc>I;+yWhuYX`u!dqIi@`yDAR}c$Te6pT?_TuL)CkSrUmq z;_34#*O{s@mOXov6-(h)%l!!B^=3Zdm({jpu?*+wa((JMGRWp}j(7sTp|c$G(nW+L zP}EZY%_XQEZQ=N1Kh5mT^R1=~Fn$&ISyq8L{tM;Cfa=c! zE%rZ}n2TJp{R<|UuwJRWgxTPA5hcya2V5q3kEhrCSNWNhhj{am6k`2U!6-Q}q4G+| zJ75lk7{=qGcyt)E1o(b9Y71-rf6J*d^y;SJy-t>tE1jB?XfVvcU_pOA5jES>jp_>d z>_+yQzOQZU`GhplNI9?Cc8Mm}e?PzCo2bhgBS@KxRTFX@O-)EtUOAFz8XLV^{jFbcR7rT93eR3;8EK0E;;(te(CdE7!X5~@T6mR9fvFC8h>`v)S^{Y#Q&_VX)j&I z^+40TP!3sQsEI>xXIvnIdsmqIMl02F7bxS}gV#b$QNg_-&yy=M1K27*sV6OWE_L=h z*+LY&sxjl}D!62b6_-jvXOA3GsgcORo2SHv!=ezYkg zWxZqLJ+Cg8#5quU6QU%dSZ9md2xJeT$Mc%@d(F3yOU~zCqk$mn7L}IY(pZxn zIv91R0i;GeC8BYA`Y^V>iLZ<2-EvXP}gCXJ`c-gim z%!bkIF;`FX`L$qOLKF7fX60k%Wq7w8whb8ZDuLyMjZ7qNHe{l(a%Rcwa>q?MhPbQd zLEn$Xr>=Si+u8gHe8F03oLjH zk6R~~WX^1oL=oCi!gaL7X&ao@n3KsR_VaI2<`-|X4Rs#Ckf1Yz`_r(getCKcPDPM$ zdUP1+2Sa*&hgb(6e;Cu}3F<-tCL;=7*L(OQc#47LRE&-d;CDls1GN6h@y;Vo4dC=e z4v{SI#AiWS9}jhJHm_G%3u|Y@ibYvj@U(B3%#q+s3Z0IK~Zst=YQ)-QjQ|6sr@{Q$A^)p9(* zBlBAD4xU*|wSoFaMqLI!w;D@my9g2_>ZzmJcL@H1@-!+dg1-(y&h#5!<8~bm@!2s) z$6qOrklR3*QO{^zjE2Ler?VT1yxW|N4v38OB#W4rm6nS*L0@3y!u|q6c;x90Sz8F@ zv0Puw^`@iS!>oR}6JBV{&8-n))($OZ1k~5!Vk8RVp>TrF8}k5c zfuWyS{Dd`%DS5>Z+ss84wjMdMa_el$+x%+MD^h&D%5L+CLOm(h+<0)lyz*ea^=YGI_T4 zGZo*T+4!@{njzA1*jt@!3xkDu5XH+{xPK+-Ic=%(FeZ*th;s zBUWJKONN|u+)$B!n3p>}0qbR^GnhecI3TH7s(j-jo%0@5*hK^{U96rzaN?>7K8mL89qzrwW z+;golaF2KptzjU#v-(r;LnY-Hbdt&EeqtNA^9QZ1|h82yt<=hMvR39QBm zpUQ+?7(Y8)21==$(Qs#F#s);~TpzeNE?z)|NkJpQy~Vm*b!<)~ep8Qyy_anp^LFAg z-X{~$g`RBfeUD3V#g;0BhZ9r{H9b_8Rcgp`FPG+gH>Q#wTr3eU02yRZf%U-Wis@o{ z!f~CH6>PCE1`Yxuz(X)fZQ!L)<$!~iFVFX7CNR_wB&n`5kh$c_Q~V(1s}#<)r!!$_ zly7*hDpe@%$@3%T?Ofp+$>@M-&mofPsz+4aK@-B)m-&H%!7)o@lk6#!L8#dq6|6b# z*&`+I!xf~?lXFqqudbRC54aCo5tQ_22t*-%DyYt3#3Cp#fc{s3vPnk??PrmI)wn#~ zu7-9C9v3o3npeayt>aK4<_w!T-|1{NSz}nHftG5DK1&5pS<+CgvJQZ=NiASi2d~}>j2t~ZQii^R9>FI8hJcY%%u2}n z&=i33(P*}%rZtlNjr_cmxP!TJ?{3-}UyNIHfPgA(P;b;R_ORV zxj=zNQ=MxJo(%u)OyhzDkk4{glLd3UoVb%Jjkw6Yi}utFwJTg|dkV_X`X;YitstE-kdn zg!t>&2c_i|m&w0$55cqsGp8sS8Mhw(!)=|$UEX|ect4J*_BZu+;F|tLj?7-DaYI4B z_VlzSyqA-Tjtt+>O6!l>xxl@$QPbp2A*srx)YstM6tNsZuS+_A5}fU&y>-YerhNRX zFOPwu{Ax9oEYWOq6`#T+B(ROI<^le&^8+?E$EwX#@!PB&3%r6B@^7IJ*x6?zMqR%u zT&8~^W1%FpO#gy~_tf%j30$;Z_YCH#bw}Dj@`Ydj<{i=7yLFR|N-QVM^dAZ1G?ei; zM>c8_C@UJ|&K}Zhc6z^njfwMVy;y7K(VotCPn~8syly%t&+)p5hy$4PMGF%yVgx%E z=qS`-B7;6~M~>TVSn_UPO2D{LmP!Q4vAMKss2$ddYcQc9iGO zYD017)!$|z0|(xZjCgD_t^t%rVGMqylPC)(ufs^}*rvyM4~HnEy83v|cim>4Pd7V2 zNRQTojKwLElw`~xJBVvw*=){&IRRATnR+j#!0>V05#75_65pzeauLzVknMdNu+X=) zKuuM<$jkA~1DoS0pmrcl?l{P?!Jbs*7srhD(0&Ngdr_y1waoTc9`!wAM=#+(PcNtp+&kn`b7(HLs!n81SVB zIdPYB(7vFa{_K)FAGa4u&tF?fz(XhF;@BN9aRmoHI+x3x*=nZ0w#W0_qo;!nbkFXp-==SfwSb1)p3V%IYY$lpVO7ysgq1OXU;g`n|w3qw1i>1-}`W5*MpLv&j7h{zP%mLjuMfH~BWMVX@ zzZ_LuFXUPAsV=o8`3UlfmC+t)l=;GTvSJ z9={YPEkR}s!S3ayifcDl%v;o1r+=PPDmW3;e0bh$+ug)>jaa#)ek6Z)g#_9G2NkaV z9_1kV{deK_j^2*bhSGmt+5i0E@iP^P1ld#2{b7S(RHJ>{K!{CQ&M8#SGojY&v1;H> zto=+#DfstAR;rpPu)5S4OKxyC9IJ}45U5l(z318u;!F7NgJo0ZfTf2{YSM63!3-52 zhTLF2n7t|)Jlcf0HydV_%Lbgy6WT97aPWH2J1$zhP{i9QL0UEbP&)C2K#F8ul?A!* zS8gnbmMeW4O8~sHX8G*_M$pJE+1D@%&IIor6R}6q650619(GC-wXf2iaZ*F57T8_2 zEIL7_ph2Qd2kXpmLF+Gl>gFt>BL=R&!ayGKe`lzU-vojf4aazsMl{0`43f$}B$M{^BFv6X=kEqB&lmOue-^Q?Yd}!!#kVm;i3b0uTpta*t1$D%?n1*ksk+eYH7x*YfqJFa!)6-1a1>EiiUeg;@a(^PS!5#wOaROG>7*)|K4nl7NhE`*9L}XFN8fD<*zoN z{eOXy^UW{X=pH*CAKUIF+n%7>(ao$gA+4QruMzi16WfQR$@@9}#KcXJvkVe8`S`dR zfQo|Iwj(zaPNIEgTGfF`z#p328qA8hfDmvCo-Ict|yzuIx> zWKv@a#En#MEvt?%WdJnl!afo6y0DQ_^`{DdWgtJzFBilT1oQr%eZoaWWpyzW4PBjipa1rHcM{ zoVVTsp8ve(8%?htwZ$on22)_{n}Bckc;e@?|=GUK!q8GWojBhi3%~ z?(!t}h83mA>aYX^Z`?@jE%gd?R){^&QN1czzcx$Ezi+0hjO7eheYJj@n?=rU;b>Be zd1~d@X!3?72c-Oqy`qF0;pD9Dh<7(F{MW{XisM z^nCj*{OwS}!e=MOR>4YYI`$8I8%K!E0vbFLMGM~1vSSA3;QDjWh)!6Pa&KG=( z>=GF=$22?@#S__j2cl;7Xbb`%RH@1A;#&^P4*IE}?FNgFPZAEi=P9m-zcdwh9t8Ka z<#c*m$1AG9Afr?cCi8eZ^+g3C*EZ9Ffw(sJJ^2UbWun$S-FzNXnMT7)oCCqIfpCSR znXu(G3FFYmk)OL(`2ocBVN zy_bI#j7Q7f)e~+n_iN0ki7d>@Y-fLV6II!tO`9O3sACUpkGSmYMTJ0@lJ@}xc?eC~ znK!;zp;b6F{zJE4!hzd6e1l5^8H|TzyW5$;oger6+;$ckPiQh(VvEs2AFN8cE())9 zXE$|w{|5;{_P)Kr##O7 zB9e71=7)C};TAot8KUDdoOOJL=XZ5?#F9pd?nnkLMh}JYS-^XeeBct@7x8jd9$Ux3 zoAclU@x!g=eyNdn{}kIt8BfAFiAqTg3?XF6<*SgxoftqQr}B0MjiHy6Rn?T88L{vC zn1ZieKlF;Epb9zVG{jyhryR4yBr)a~Qv!tjes67+avJ(RgfMiyv$k?xV!pgtx%%k4 z@81IOPk;K;6ywX+uU4De4?lf+^56&zk(nqp$Wx?-sj;T9c0$I5n1>;C`>tz;6lLf` zj)@T4-EOnl5MqpR7(&ZE9L=P1_hKUt1DMrPp5CvnB5MoL)gj8np@&scbC;+HPQG(&&m<6f@-z&CS zBMCu31Zph=nN1?H!3aQ4;C%48Ev>|Oooav zA+mK=Br9ZNO-{LQdlgWi{l44n+I_okrghac!_ZlzdVjd0XGF|NCX<;ct+)GXI%)Uo#>Lr$Hk-TCi-)hTJ95*v zZ>|i0uT0zTmPd=@*Z*Cqx zJUd#}yIueF*WZ)k@nZh!`&X03tXHpo_6MK5e*5a5|Ixo>K<_LnuU2;-fAaCr=AliK zYUW)v4C!V)Y+Bm1dE2Ic{inakP=l)-O#b?c2BD@JD~|CyCM!`~N>x|M{zFw&jOmYpuQa6W)08IhDIQ-<~@&H_V+# z4v8T-q$pY6K)P2tGhy1 zI{8gc*kP^Z5Bqsfw}3+7RM)A~Cp_=oYyIM_ySM8}ZLKjPaj_1kS4GlkHJy*AM#VSl zet8YOM^S5xDq1&+5fvTU(iNsEXmfrpiTCcmbA0cu>t*Bnj(E-ewNY8ETPUXU!w1HV z`nGM>SE*e&YeNv29^HEQE5|4Ir$xP6k9X@)0i=l)wD0>EBcSn2FE5u@ZE#g}aIiO@ zjX8NAy=Y=hEF4o(<)n!E~xVyHe z|MrGc1>o&fn0VMr@_+Gt$DHo~WR@7SF(`?0A6Z+N4Cb+mWuUarMccwk&|+Q^`=V##+B=?UvDuD&X;8wd~&XKRmrHXb^uV8MbwBSsLX*y2C*i98X{G2 z&Z%lqI8gyaYn`!B6c$vO6B#P%D*x=;rX!@fDpQh_;;5|hb&@z`az`Xvv7jsqME&fG zC)M%M_`$ulC=*gl3<$O$AYhfqtO%wom%d-DmJg0k-n(WkAUB&}N%5B?Ji}u=J4=n5YnvBvsET2(xm|y0wO~GR+{_+=z;XjLq_AA`+0i z$s(aB#Kg=BNGWgNEZKt~sw^t3DRCM|r~EOtUSn%*;mEoivV_}a;QSu4b-WXyvrbh* z@VQznCEl!>)TbC?-*2^r(8F$5|0hLk1s!P|0ly$MMKle~H7WPdiV$D^y|YBn34o?SMZ zw(t9TXENTKo5}`mKY#qO-5(Rn=IR{KZG}!-LUydUdt>;V(b>^}qT1?r47%@b&pBDDLd+ zH=A|vez{nG^zmm0w;r$^^=P_~B(ZP%cH=>v0HSsK#|PDPvToXLz4m8ki;q7JUwm$t z3unkWD@L1s8Ttjp7Wzh-rX-7qWE~k&-&7!B*$9l>$-DD=?`%Sd%gZ`kPf8_$)v|Z> z-r=1$7%2IEdHxF78&@$F=J<`{H{RRbJsKAyhg3L6)~Ny^SO5v0&@P+s_~rT4x(A|z z{hd*1A$dpeg zHAUA2V;r;gAuiiSvZI7W1Vm}o^poi{#)wFL9|Z`F@f?cMaZ1)u-*v`dU6oZ?r08Ra zB2hR3fUs0$1*%0+6wWa-0*&gaF=jTOiX>x+3?zxH8UqtjSvcm{wOv&>0xk>d`&L=D zjdUg^78Xu&d39Z%98cf4*AN1#wa6;j4wfat1`&uD&FRIWRP)~P;r_^gq^wI8VFCbE zYm7k?LM)vvY{q_=2r?*#2pQ=UfGF5`R&s)-k4cdfDKfJR_}_L*1?B{*#^fkAXRXbq z7KB_$4*<7x5W{$0>`b$Ryi!EJ&>gkH&$J6){R(5Nhe{B>pBx=N{^Bt>noesD;p*&Cu$s>IUp#*{p3mmHvl!a_ z!#O&>S}ktB`EcDu1o`WK@Oxg`(YQ(>Ia2}HKmWIXR$BMrFMe_N;r+k$Y1peM$^ioGzMQeEP+$2e)EaJ%9Zi>gwRm$#S#qeYDm!!S^vD zpard+v*hl)`L-*^i|Y;)+SO)pc7E&jF%eu`oOfMcPp6Z;xhq}jJJ_t7$B)l`_M>$E z+*z(hPLO-Q?wV_mp8YlVSH?h!NuY4m+!|Y1)V}9LIzGPjjo%eB|MIg(uzEEvwIE)v z*9nWAqdR~lB6aH(#Q+Kdg_++uxbybT?%{M)r|3mfRXS@21$N*2By_!Qp1wL;cYRg5 zMDB%kE@$WJ$g8O+QA@O37Yw+N4X zb*|<&5)c1AvhZ-~s(|uf#K|E3fXnMmv_yV=!vO8*!0o|S00*Foltcf&7W=uCl;+`| zwkoB8>lWoS$X@ej3c6iMG6kF>4_~JG5Sz9Y5hRK+hUgU~rI2l?);ed4rrof3=CD~V zOIM*9Q9%MxPMk!QQ!t2iSsG*d7`oU;j;gUNENRl1x~9nuQ!(jyvh(ugc~#6`yf|x` z{^0Q7_|D;SvszqV{N{Im^>A;`P)%-5b{BAhS~)X9Df%|7mrb|vzE3RRWAY(zV&D6e z60jP;6a#>!7&n_uh~APqq8L0GDqN9OI%Qer)j_ruJjS{hSQ3otap@c?I7@{!!Vy7@ z$${Y%6K6uQZJP0TYzVuqT`rd*To<-*wrN)ii3}-Fkdf-_{0EExcZ zDF6alLedXE{&capq)DwM&F7=KAWO(efW;z~hH5e;kU{tQVwDo?mG$A&5r8Y45(;7x zNht}dA)5lDLSd}3z~De6LSp7D?9GOdt`8wfS&l-CP1pB*;yAeF@oQNqxoRfQMnpNw zfQHmW#H=RB?b$G}1SCnlk6qvUKE_vi8Wem)z& zeDNg#G^=%i6cQLWIzL-LjPrx}bnGy)5rwv$&8NY8XG^Nax8Ho{+3SmPXI@Um?RuGf z6Ey{(RiohC%d>MIbiI^|YyZ~UZ~G7`GxD48y>EZ#M}PV=j&?ksui6k4oU=dq{tue# z<$N-^cmIJK&B~&B_W0SPoNP9IoA9%zr>EzaZ#=xee0_TM=-I8?M{B<{MUfJ=F=AoS zSyAm4&Fkl{9GX|pUmhMEAK$vuE?22t`OVT2g2ujYnA3PVn;jmSx=JZ=*T&`I`pKjA zi^rz(C0argmAlji-lVt*n*|7{EqOH8Vw7TM(%NvVV!1rp`Nr?;+<*7cryq5%9@W}f zlo&zXxR~yg?B+&es~5tSn=C*{B?2}eoHG2a3qhBySoN>WLh?}o(8EIQ2lBBIJE3x04QdAFPu zCzg~3(48e~Y9zZ0a&;6BfDwxhzGBV7wynrAo4&I29|3Ss&EZhGZb)vfh#NP+SH?9# zgs=IXhb*G6Ob7tHEqU6K*_!V?5#f{uPUC9~2n#3X)oRU&$rxtteV^N+Lg=kCgmS&O zh~j;0H|w>tZaf|fL*m@YZbD4!rin4AatJ=97&+u(o2n>K!FRrIyU_a>!l)WGt96R7 zTBa|*Jago3pBy9jO?$O>Fn{p&!*aIg#)o9*fTQ4vpfXtWo%g-(yU=wVXf_!zfEsJO z@2xR8ymGTxph8L-f(S$Dij-p0w@L`kZTco^YWfb0VNhE*G#P3xGaZmErl?{Og`=|M z9;+Nhkk6i~Dnm>FkefMYQ2Wur5rg2Kss!n*%VPaQwVE{-1i}dqzcAZ0A$u&S&{Ew7R?UiTpu6OO>;bw zHAtkYF(zRpAY%-ni9WZtE6#Vl||uW^gbjG&Kdx5 z&KgUb&88|xNw_Fn4BnBHRyC!j*)XWFF0qKh=U+Y*B3F(-{^h5C?XUjM;%ZseMGzu4 ze*Nn55 z*KN~qN{h>@7OlKpBzHgQ%hr5+ARZ(xc$gX^OxgJeV`bIzf>7)C%ZXX@)&vxd0 z3{BTX0aKQ)tjcoi8~OBSpExu`Ae^`lg+XPhs+tI6j9uG?m~2(<-9A=l0;j?i-1N&A zuP#4-jBQ(~)&^?}24akjre1v`T@S2U*0vt`_A)J=THQ!eDQ44mzkBb!Z@oHw(OkZC z&|3onWb0Wq*>SF9-^I-}NwmhP8O@L1-aWWGsmHZ}(kMuD#;R!7HULtNOV7O6w69)Y zTwh-7Pbcrb@s=6L zG|XrNksvwJC@Vb&Me4x$a+0n5Y4{46Q%YMdTtpGnd4GL*;oHsoAAC?x54_BZ(JiDIVxeMCjk3fT^S31SUSa}a1B$GbD_Bxd z7BPhw{Cd4~)+Et&ww32nx!eYK;p~Tv1qSaJwRWX@7zh0lNqbYZO^p_t# z|Mbf@-@3nCpHqR%fTjdn^n&YNo1jhCDe1-e83-CvEtc)Y<B0 zuzUMxcCuH_#?D!Y(O+F%KmOdmeod=uUS2qr$$Z}sh1iO3v{{j`7L^-M6-1haLzRSL zCgtsSZ@u-6#cHuWf61Hc5{;#CU>ffp)|1(Kc@>uzMw5brdhhuD2fO>nd%JT2Oakbf zk`>hu!=`DXau0gd_s?FxzP!BH-`%@^_YIXCTdO{#kT|LcLg2`f89Ggp(iR%Z$W1=> z^WBgaf+0;rawtQlev=3f1}PCy&3byi0Zz9Rv)tGPRJRrck!>pHjS4hB_1o-e-1e^A zxPG_k6kDX|D+$p}JjL*Snva0|Z*(i=)-5QMUvl!*b+`SR^VbWgqT+qu^(`}(WkCi+ zIi&!gZPTy_8dL#cHiV@sKtw>=ZcWbiDH>-5wC!5v6k}8Y=Sl)fkz-74*AkK=Q()=XX0)=%1 zs4PWck$?=aBt(cIfv7|Mvw!pbzKgrJ?(E(^E~}y}ZQ-b{in7QItVybDu|s`+aak7S z{-~}9N@ENtA#%zOtE42ZD7+7fWQ{AF6J~2o*5;ssiV#u`j7uUugqZi>loW)FQH8$q z%u{bzU%v@Z9|II6`bVB(=Tt|zGtkxT3qeScZ@|vjT}|89aORh zt#H;F3cf9zu?7L89*qoHgEX$Ho$0g-$+!{;MWgq95>>S8K6D(TB3bvt@BiiB{)4|# zA+1)vO_CHQvq{%B>*d9rJNp1|v4Ur>u0$Q0VmhBbe)7d+JUM@LS&w%nlbJJ+x|OD$ zj53G5_oL}vg5uLpU%h;>S~ubOljrY!<3Y2y{_DT}jo<&mKIukI29I8{dv? zKPv6X@!|8wUjipo^Wle|+PePLZ~bP7=H>Z?DT~Qy^xMDlt?{HPM=lEYLD~?XzC8Q# zbaB0kKl}OPyLTV#9Zsvlpdex4J6^BCy6s(EqOs1FNapp+v#ytMz1OUL(=@_p$ds-m z01)nVTX1xy03Jo^FyWOL`qc^H*Hm)b2 zKn8XX=F{0YaacF2q}(O%L6)8W`18jvPG3KK?38=)EKkA%qn2RgpM}a+YVPN?q2-O>a<#}Xz>z(&UlLIod%I=_4eaj(a%@+7BC<~{QGxV4> zZU@P}Z&L`u-1$JZU_}6%SYnC-iUh1egegXYG^81GzBLgQ4N*~+F(tHi-FAt^Sz9=Z zNUkXI%H@tEx&=P?Z3O zkR8cTo~FpgSOqG}^56W)pD)+z$^Opn;qG``5UFY!)kWdZpsJ+O89i%p} zDb&^>00OZppd~Ua$re^YV~WN?SvY1k#^xV6;8r>ezzW;f0VGb5W8e4LXu-+F5@onK#H*Hx9H;&OMVBIYf=u(^BK z5t$fcX$s$Vg>k|fLa+{Y=aa$}F`=QdFgEpliV+lxvW(1SRf%HjdA>jW{Nqpm*5CU5 z82zh@&1%&jAM78@$0j!8%1-8cs|cq{ug00G98Idt`Z{zgM`WBEPp6uqL+zTLY!w4@ z0tI`$y!!m{m*4!>w>lplJ^J#yzy6KGk^KFC_YeNlfBsM3eD4;kefsjb*R~DaTkpMj zes(45fT75T4sHHemdWouNO<-Mu|BonYd5w^`=>F zVo3FDIy>AO&FAH$HYIV|tWTeAo~;kTOl1lY<7UFyD;r z!woRRDoZ{{Wx#K~F6`fGFF}VT0EPqwxl!>;reJea>0s}L;l$P<>TBDzsvuu6Y528S zf9w0b*$m)DU#d5CyBV`qIwhMqXZQJ*rMG>(moI=jk z1D|4Gf4(@a#}%OgBo>V^rWA!G#()M`B15pwq~uir0ora;y4uII-t^AYuU=gI^rs)T z!6)Iv!`=C;s%le@#|O9XQ8}$A2h14z6oaagbB;Ml5{Yb$0aXwXmJou7h7j7eb=Ibs zB&jP(5nXRK=T{dYB?X9y*XtD#c;8o5-SxqT7?~&YX_7>a6tr*-0Sp;dVPP^_*M(@Z zg-Mc_W9bS>qM$@1oUAp7C<;hqol7wikSj_bLQHabbsc@Mh5!M`h-yr1$e@BDEM4{! zfT|Lp%~?CfFbf)UeZ6{ddRojTdxv|YQPK75rr&g_3lc;UnT#2#g1K01y3if&&1My9 zO1xb$$xt>Q_!!Zc7$Xv>a#6TpLmV<`QFX{j;p~DDLByQNSr~^(oPdmLnr^XN`4B{a z2(7h-4C0W?oNqJ*NW$6ln`Ah?nI%8x%#vbu(JCRj!dhpEC@YtI@O|h*@G(U|L8E}f zV>*b7I2%?xv!EeWNGt;I(Wjplww#V;ZQH*-ebqG0WHPY@ktHA?Lj&hQX#p zG8(xXjVmA$0cH-NolZuJ#WHae<*c+;0R$?G!Wm34Aad9BckexX_Wbpq{>i`o8~@Jt z-u>Xj`>07L;~FIF?2Hj*XK#G9*jP*5V(FZl&vqttJsQR+TH0mY`XQ^@4dNy za43w=o;^2MI5Ym@@e@Kh-rIlm<;&%&y}DdXCL_>ji1+qJzxKgUM!zHqJC9i2;G&D&aal*2Upg%bebaj zPB0kv0vAy+NaRT)Sh)X5x02Mh2`GW0fV;ww5Aa_=w*f^36s1wgii zlS3NyO$z~Sw*TB@v$=XYPk|oB6D?lHK}Jl z`qirKeR4%zj*2^X4(bx?QFZU(oBJmZJFfdcDd}K$5Q(AlzTpsnB|DJ>L^N7LWG2wi z_jO&jU5f}^*B~luiYeM62T4K<0YF5AtXa3668XfV@u-i%6fUHg)to6w)*YspP}QN1 zO4s$RfLLP;S;Q<@Cu_-A%($Q>j4}Ao7li{PW1Jzk-fYH`5n6DiCBmj@in1`40K^tX zKpC_TeKr;A05)n}S4i;3|K?As-Gkc?@6GlmRb`5iVG9W%fhuC;gdh%$@47zvab50C z3YJt<1#eZtAZUPs_eEJSGm>S2vaZ3PA(Q1jEHc!70jh$JA>WsXXsjVb5k-SQT90d2 z6q}~)`;e;^VH?PtnF(gh26=Bd09VbbU3hfwZp5zUz&#tmtD>V|-E|YmMR9S(4CqZ$7;L(Py8zdbChYxMI#6F)v$J=EYW-$ikY2t% zi%~!PF#HrJTjt{)%$mjfAQl-_irE0C&hYk`Q}^qPfm`DvOGT6pUtXqpM%%I{I$>*fz zK$UGC&o<40Z{VQeAXgO?BvO?S0;e=2gKk~I+YE>;CcpJk!VUfpgVs}(hcW;H0v+lj zx9M^_jZJBX+i!PvgTEk8AL$R-~!7M(ZrGzBuNAi7LG9r2wFr7 zEKww(h;MsNqDnxPRT=oz>sR}GM-~edFi0XP#gKUwQ7#LIph!w!y4FwXoxW?7VRwFT zdUkQKSOuV!Z|*$2b8xb=TrP}pZ-3+0(M%MI7_@7A3K1kp@|xHI82}u5SwuCW3JEwQ z!_ci80$8t>2(Y+b=0tiRii!K+y${YhRIJLXuw}{{T4WS;##k~bG7=W04bh_^QLxq; z#LBvYYzQ6+ZDEO2IXO#~$XR0`2jYn%V-aHru5e}LkiZ(-G_5lQAtjCq+Vy=|4s-|s zvTD+Vh6?8l7(_}*L^W|L$Hkxg;744ICi{Eyy_rQ#F^WP`wYEr+Es-;}axVDT^l?;I zcTWydVlqI4q8ehf)|IXtyjso)r>ZJAMe7JD4?~=lK3eR?_>&tANKWy-RiR()I%iZgk?K~+u6L`Y}~ z>zv6kifSDd&N<_Zv90&Ea827@Ew8833Hwx3Mp3MDh`=mmX^r9Co%zMp6)UJAJF33_ z{hx@q@BjG^#yj)LZ2IPV?_OM9iS)M)cdxFXYYkE9%CepoKDB*l7mKTMe@-ako7ilq zsQSKt^2JjL+IQ*2tFt!vzxlU*|JI$|(Wrj;dU5Zq50-8C@lSr7Qa79J3ecPHJ~+EL zolSQ?`|QcbkDgtx<1at?A{g`J)yt0`Jxc(eeEw|H;k^eZXRlws{qWw&$$Y)I`isB( z@$*;b#@eIf-BD?z=NB(dAH4I8vy}nGF1yHm$Vd_(4ff~xa`tbhood=usWpnn0k)Du3NaQAEHEY+`+Aa}U z$=G7rxBm9w$?d~iDqK2;NTbonkRznBbmXWSkI|OPw)y1Iqk1%c^WhuTk)kk%#4&PG z5Fn7DLSpL(22qjR>L^2X*p^e>(y3X*Kj?Nc%#Og4h(J_hRt2f%3rU9yBY$wcNfOmO zsO9j7EeZrfG!_sIMesv2KtA_#(+FL?$K(AJZQ({7g(EY^7~1Gt z-?au2fkD|Ip$I92wntV?sq4BByo#)rYa;Z~GiYEAQIf*C>%pL?7;EcMZJZ696fkkL z&R8O6OukJ;BqjDKwV}_Kq(hodriKjYaGZcd0}R5X;w%BGB`l1w#;Sr3p)89KeBsQf zth!B8yPBgC*_5Q~`=Mf;j3o<#26MNeBWqZN48@e{YW#yA{~{_$;p*9>bk3*d zX!@iHC@BIOW)KlhQDWqnFw2%WRb^2XW$0tO=|UHn8MlGtWJw|8so4_(qL4&GN`3Sp z1ZEzrD#B)fu|z==rx=q6ppv0H>=RM0+r~Ujsfd7B=d!J3z23BazcZblzIr(xk6vG# zw?6Fe9j(_ZLoll9JUNLl5ixUFlqp7MERj0rLSzNQLy#`_-DWbLAX%WocU@VSYE&i> zAmqfk_>;&^r#oFAt*w02sKv{(e&^uu;P(DcfA-aw!VmUV5b zq8`!W~=jJ7~>qw#z?-vdOAf!kGBU-`wUY%Z%5j>col z{`I2|X|wca&z8?Whh}M%3rB{u?=I8&6>vk^TO<)DIFst2-oG>7Jy@K*>@S~N?NA{> z>1KzMgX6wg#pS}5^=P_-uDZIoe(&vf?j9e4?`yJ!wFaqlRaLl2U6sy>YG8eFcJ=6s z$J6PgE}W#KIa+ON?@IhNeU5N;`3|V~5a}i&NrgcLl(&V8l2xAsN(dqJWm#nT%r*f! zzXk?snT0`R7^hG{IP3HVOetqV+_Xs!(Vsy1<$(x91UKP|+wOV<%^S+sXo2Akym1F) z`WJ6DmLV-7e|g)_j`MvmP|FGmnKTC$RUi>T0&-?F8$}-o4FCaAAKIkh#j7U=dpq;d zRAS79G=!Kq)b+TqmSY?hBjuzKt}d@r(5fE2cy)Sqe$|F3q#u0q-QB(MrdhQ$5Yuu3CW)zv6@4n9_8g+QT?DQS+$3yF~o z05Y?&wrM(ML1Xi`lYnmW{_^*j(6(JL))i%uI2NTb#;9m+Hpxd=u7S+IK&5n6g;}BN zdr>f!R-2V`g>yEg=&W_FAY*(8+0@D@86pv3j))Av`3y}dfeNaM#!o)`cyw|+J2arFQLneCW#8o{`icthnC8eq?+OC~UC#yv}scTC*8%?}V zIcIKMR-5JO*5R#BKmXhq=Zey}LXb>NP2axp=KYKF%a^Y&pFDXn9Z!GzuYTuh`SQ*i z`!Alq^ggytJKfvuVr+d^mHOhBzc@a*1z^yTa*7K3d%G#F9z47=sYeFspZ?Q-`di=o z-u1=$U;Xj-745Bqz3IpS_j?DE$De$5eR|b3KE_l{r`68<<0p?FJ^pgGyB{K3S8Y1I zT%~8{*FX5-Pj+?>KKjLn`#ZBY-@5zuJNHJT0u8cq*ZZdL*KPOw)oWu+)I^57Z;g>^ zR1}3Z&X6NhSX&gLuv%?=A2`K!)4HlUx^=s(Yb7#e#R5XMOEId{)5&!AV7hZ)Xw2f8 zCM?$LMYAMB&e)3=Pb>DWT`r$}wtD&4Xeca@MBi+}D%DnLksu;=K7<7Tr`5il4P zUR_+BonI0X!gdxP-Wn*;AyG$fM(4pqyKVcHj0|lD<%9&og`vzG%F-%G?>kmj7!s88 z1pPG?QNBxt{OOGOY~z4*_#-L}0~{a#4RuVAtLO(pAU9qR4Sn=OxHN7D|NMJKh7dFt z|7xyu*?t`v4aaj|Ha?D6r@oA>XhzF%+FR8~iKA11NP z=)2zcK7=5NH3~7HfFwpDQ6>W+^eXI#g70JU%gur;h8WATDvU#bzVq318iGdexHeF#8kjJ4L7 z%q$TJOMZqTryH*5Ms0?KkWt4RTTWmt)^%BrD(j4@rkI!+hQ>^@jrG?&KIhi}tHc;X z3^ApzmTQ&FM&NdU$!^9=-E2ICB%`9T zh7iP}+@UAO(E-?BeWv@$k(Dr>9>Y98R7+dD^WT37UGjx>_!- zFLw8KH&<6!j}G=vQi{!LRnK;3yZej9^-uot&u$+d+&?+^?ce>L829+aE3Dk6ZNK@e zA2jRrTkqWO*9+!ot>L7rw*BdcA63t~;?&tDYI*t%M!cD=a-zXWb7OGYcC9j%VW#}BLdPP4p_%>^lgTAvD- z-KxfW{dEI&G@qX^+vRe-x?DcI_r|Y%wIxIV7G=a$)1O^0tMSBG zJBa3C;B#-loh&UE_!?@@3@U)b(F`aGtT;$P0o6DH5I`nE-*qCK&+&YwBB-bUsiF*K zT>0-4!g@3E=lYc4{-LeLlLq;wX22+GMRQ-U0!n^kIQ@r^-@#6%U;RJw_%FkO09!an zRV01oGe89d8A6;@L4iO((E^Z0PLV^1-Ew(-@BV!NjWJ{`xd14^tFzbV7pHH(^`>vz zvUI}HAOY}tb2YBYX1VmOzrMV7rkYK6&(5x{E;gG!vBcl}-mmt}qHQ)&;PB49ay03? z7=4J|6Cg)M5l3hS!C=a1n9jK9d*MJJsDcXX^-@7sn{#j5R9`$ z=?ci|*TkUUY}vFO0FFmBvp0rvNKWf}Fo0H7rLHPvPzIMpjs%F93nPsy{_8*bW8e8V zzVXh(_un_UK$x|3RyaB56f_AV5-Dnkhdc9woqDUj7uKv#R$~nUfg&n-pNgUwW_1-a zAc}eT&Z1;ZG3CJhBobM2cq*t;PV`j0i6JDyqOfIAxGd&T007xa_;32ce2AHs%V%&B z4k^YVlt~jar*xCTsGuwiH|?yFx9`N9MAe7Xww{wbfBI@?w%hwn;v|6QS67JS3|W-v zs4jByH4t;kmme97609?#l9GfVoM2Lo_I7pvL?D)x1x4TWhIBR_S;CZrj3rz882ZqU zC$p>7x~eNcNfP_ei^NfFZ{5E2FaP*2BHIUVzPW7tN1uJUTr}^$_ul&A>h8(yM;|?z z?HoY@NA_s%5Q;pMSaN z<@nxv=j-t3>9gLm2W+`$d#1W$sK&b~V!z?tox{Lk|L|ZmnI%P2 z6iC*(%GlD{q9|)}g((VFVnDJ**N44>!_j0qnvSPCJJb2R8jVNOc~MS6!Y**H$hMr` zx_fZ{&BHry9o~8K)*J86kM36W?#0W~Uw;2zpMCTLxcD@kezJb?s9#@K1%ZZseF=UE zaSgEnFs7VieKa|GSd3aY7;SLet}Vewfys8PAVz-Qqr+zIb)^^3@3GgNJY3 z-8-C-tx!iL)-ETv02&iP)_#K}8AE7>jzm`8YA*A9EV5OC0tx^tux9rcFn|c~z?Tho zhQt^YgbaG$CJ6vSG?Y~%2%sS8R;{@W>ip^g$iY`KaKeD9I>>tl;9n^-yO~VL7lmer zXVymKF&(qqM+PYW=G@OE95+Bv9txE;16?q*m5H*V3{x#&j*`nH2W=6jA!P!%Ynmjy zv%A-Ktthr#2%Hi~7vihamjLPho!i-qWL?p8P3fF73KHA4ZF_Ia>ipvB>62HDkBjBv z-o4}f{mI$+3jr|Yc)owUY5LV_rOeLQ=p*J5EM-lJR2-oNjGa$D4o(A!IZLx^8(`k7 zSC$A6gZD_t%x%{zU|H6NTvgT170cy1AGQdX0~lQCQXUUEIp+{*x!ELPLG&>Y$ZR~a z2$q~AvBnZ05jtx)B{HfSjfEsJXHTZgfHMMGK&;bhm@B&Rk4PW2&)=r zRRv*421TNxtghPiKmPyx^EgVd zjx9z6YfNEnt~S+e#BSF6BOw4pPCjJ$wPu#v#~5M?F~z_k#290YDWw!bpHkrLAxTMM zlo&(kIi(m<--WJ;zVpu5x~|X9FJ|+d%f+>j>>uui&{(H7d(!d)tB@0liqe8|S=y{` z9!)BiG@s3f8e>FD2BU^9lBKTeM&+n33xiaUrqCA7gp@pk0IDd5II2q@+S@0)eIH(} z;pdN@-h2CD=fkH@&n{N&+wZ+?g!Xrio;|znH<1IB#?GcwzgpHiqtmlfwnaJKLuQjg zw`q1}yWOTcIoWkKEv~O#JnyP{a(L_L@Be#$=LdiO{mZMRf_?G&A|;s4_CsVPIz3UJ^PMELT(^MEmNg?rR#wdw%|$U9jiyCCu4g-^r)RU> zJw-63O(|97sIa5LO{cSidNNb8K`168G`6#&`tYGWcw=<(;MRNJy!-ZhMO`h=pPzpG zc<_bM{lg%_VcZWdQ}jt(B&J=~vF1tgmj(hz(INz=MrXO1l=y{RhVfU)1$rr_&p zpDo~%E5354SXQPXf~uq#0l}3PP$CCK0pu)<$hrOlurde{WN-tZ0Lm7C8o(vYr#C{@ zm=1#!D2eDc>2oXe`ARyfTkinmB}In@;B3?RIemXl4 zq1P`jSId60+RP^PyKg_-ESCTRL`S#p?H--&m=1`v!fKr|jtLyTSTN25u03H2fP;1MzSK!hRn zRaFiKD0> zfph<)!v7`i4?(!`PyOWI~I z<^-<95|ac@ed3rnMosLyKKdY>3g?-Bg%HO8{??sVo1IE%u(YF0jaVFnQA zOg1_gqL`vF6oMy$!kL`hwQd?$Iv^m0u4&7vtVY%At4mv&x-{bZx}dr=MPW`aFHGr_ zAVxMo6IW6fKX~Wiqo=PuxMyem#l`Bu8xOml&n^~y-yR?zGxr+ynXhwaQ<}Sn4oViUK;S0f?uB-Ysrqx|DbS%sH#?P2TUQsE)iYoMG5%<6`!)vYaT3*&#y0=Crw@eLNVWq=h3Zd{k! z)QKUjeYjNe;xv4JKvck>DT)LF1+-zkT%CUK&36=f<)~3o;K_8-B_9z!{PdTzor%Sy zp`Db473|uu={+k|quFxRJp1zHded}$`1W^xZ9bozoxVOeKB()-o#Qu0h>0MjVK!?K%WQXjQbX9BW*zS73}S zT#_^h&J&X%1u@oy5D*m!(4Z(RS54t65k#_p7*dKXiVB3HYOP~t=Ui?n0T@PYg9xl- zXlU0FZQHJ_N*1o_B5?qO;C*fuLsI8V3?5VnP!-WuNrwtMSDM$$%m45H@4qdk_BX%% zjd*nl?Iwk;s7gUF)}kTLaTEL|^y8|!y|ZHkI5E+%SRi7y0*Z)r4h*(kXN*-qw6s+~ zCqOXP4xGOrGep)T$ch>G`U=%S%%Bh=<_A)lv;K1^YsryqInZtE1;-r!378Ux5K_tk zR1!jrY4FEKPC2kwML5PGa$Q9L8A0a0K$9c^R%C_LG^-Gz5G>Z~YCP(FZw<{SV`42# z0ZIY@Xn=@ChzJDAvQzEq|W_G|B#mASlH^qw!Tug3f1ma%F3u4((W z1HwsFQ}WXi{PNtEWUV6l)$*KE{Pk~t>yuAE?Yrje_2s|&@BHD5r_UD){>4Y1%=V|> z`R=b@uNJ1XuddD>zj&e)_YMye@0GO_JEMb#llQ-O>pQhx%G>&-}Jb9HXQ+Ja|MK}UPHY4?_!o{Y-b;e0-+MtvWjJb&@aPd{tD-#^&Dd;jFt z$=>dCTo4%%WHzMM5rE*3!wImB;mlL-K;3>Ffz*LvMZ^r*=d7)wbY!sied3hg6I5vk zYg66A^*3rv*`CkCRv|ZMy$(nB7T3ubR1O`@xU**8+SUk=!J_RduoWfg_MXYpeNHaA zxespOrwnG@{2IP$(A;(U6)}xpB~%k3p;)V~G=V)8`0-~S-nxCfw>yo#HAoso1(+o! z9#5-Rub$tzd&hU-V7|v~ln9%pFU>ebIls6%zr0>;np?Mz-v8j8>&2xHu^#W#)7{Zz zZ?o|+a_+MUp=VB8mz^LXE0aa$U>yN+O2K=dqBQNMs!AVxj9yi}@7J3(D0W@n_hD3z z2(>Eh&V066uRGtDRhgs7vbHZfyhCJb3lOqolo?bbbKCl=Du~DxuBxk;k|+=nkpUoM zNJOl4LuX-1gs?N88G|7PK!`CILkN%(XO)4;2dIK1g{TFQq(B5AMpbpzDxk3iT9bPe z$kBLQ{EL72FFya`i*NttZyw*i>z6AGA%>oXHQ!Z4Lbzz!CRNP)I4^78S}uHie2_TR)WJy3nioNer&eIhMiDO={2UT7|gq%_yQxX>62Tt7go$tHT(^E8Nx!O!7 z)0EQn@)}e@*;)*tb45YMkhO|Hgu*PGa%6N-I3k!$XV#h!Vp$YOz@i~U>ufcy)oPH` zg$QJ>7fUn*Mw80*X3>RM)b*xq1k_kp7gdV=Z~fLc7mH=nrCx~1KKb%hIW7bP8tvNd z-kT3j?%%q)ju+>v!WNUdmcFmbN=0|->DjaArYK@!6`agRKlsakdw8pMML!zD-Q(E@ z@4feqBlM#Z1~ z?BjBNP*?ju{^6tdzxm#LZ-++HHJewL%gKCly}lTYik;~+^kKbO2`ZvnZ8}@lk+m9+ ze)7v-w22?Q{f(nLZ-6U%&T*?nWJ>A;3{Xj?a>bO~sE=aH>6b6gM)O@EgVwQvfayb8 zt=i?X@t&O>IaeCzSR|!b*E6IsRfkX>YH{dJ-aq`+-#dEe+vRkZ+opf{@D-u z#WRO}SX@ZE0>3~_oD{Uw`e66=I|;3C8u1%a&ml?_EoSAdcc?xVE=E;tEs~>E-~Zy{ zPdZ*-adHy;hmF%X+_Fibl{rR1_4wh2Ze3Rw_q!{A42!@37&W8fwo0O!YrdG z>q#M3QZxA=#Fg4d4%IF2oT|`Sc&Igsj>|Aj+IxwjgJJ*`P@p(!CH2A*zrts)%X= z^_+hCqaT0!Ti=>YM*?Z67&Mj$3+pzk&2;B@Iz0fUzDcQ7$F`<2uDcgcUWg#De)Aje zvhS}iE_ZhhN8|m2!-v+4tg&soM&OisB9N3~@FGe?NaicdN~5HhqIZsx_#|PvGu<>x zG!UZieHT)+g=yMNS(di2zHcX!aZ2I?i-H{>7i)}RW>)n+up$eTt~3hHfOA$98t=(j z>&T$a#$%2#kL{$YDG`8jD#(<^7L@5*nT~5zNGSpV3!6+36T}!?>Ds-nq)sfp9?v1$^590t-ljpZw(UKl*2XboA}t{X75Re~1>V>?1IQ2%;be3G{ls zHWrU}cWPtuqnv*n1o;y)2so<7DuX`*4LavCP-BRQzz|tODv;yReMr5JNg*mjc0J_E zUKUUxr0mEAL^39;XS16DiI^oLPL@pesf%ce%!yTntN}w&r0qH%L!LNTrT4+d7-9?| z`Vc}2A%y6o?>!1|O!#V)7;DP9Oq@7HOB7?U zghnZ6(HSHaNQu$d#&@Gh-E|>Gh*7O^1m@)EWW8GX5Y|oOtShYZzRxUL@O*f9^y%kc z+Oi~DGSaiNMK3TL)rJ*?DpRFiLI78duP>HWQ6^4BU8e+XBgymSqfcl1w;Hu44<5Gd zDy^6IkB?qIeg4gN-f7$R^B0#)q$z&)+waUq=GF7(u}u!@5O_M>|Ln=jR?XS<(inLA z;r+H*yQ=)Nzxdf3@4mZpwEq`B`03HD+h|-b@_Kpx_6P5+n+-Z*P(ib4z3=;O)s|@6 z)y7D%GuvM+J8D6IGKy0h8l{d+&Eq{CY+G#&8cQhM+J$sP4QU?QA@n?(Ix> zX0zFRzxDF@(-(jG{qMg%{c=9CZ{9xmwfEoryMOpM%d%K+Hju;2^UOYM>IjHAJsEO% zeXd^BEW#8(NSqX8JQ>^4^uCWNW{ovVQcyB@qt4Gc&RZN^b+|mTg;iO;B3}n79Ax!P zjy4d{#J~bkk%@EWT*hHCFa@AnkV>}7^*rN$rSZ?v6uFKmhy3KWCD_&ukm7cG02CrX zR3boN%t>DZE+!c{`iZ}e*1U6H>#~GXlRTn96IZ&@nW@l?}J}S zF)bJEdf_cq1y#?!c=q!7sWE2naDH^M+jmO@Jvclzww_K8P>qC?`rcA*=0MtJWarch zV@OzHjM+LzK-QS$`f5~{!FM8zh{jkE4I!>p>y)A~Hc2YV62M$9mVl%xRatP#whjOU zSC)}kkvbo$x(qSobOct|^q~t;n48tQa?bZ2i6XOtaEg?v9u;Se$ww!PbJiNmqSjaz z;lS3I98iZuyP`>|%fAAZ> z`Hj7?shu%okWfG)N3hhT0A%LV8L6fu2x^cBh?GbPSgmp12T?J`qGZC{ebUu5zWl360%+8Fa*rCXJC9;>n|fVaPsHRMdL3vK$& z#}K!9v?0V0hhMq1C?pmEPU1r(V{=m&8RJ88MOoLCbH-R>$N>Pe#2|$$0nu2S(I*i| zNxctk*JdrR$=Reu0hy;pvooLfUDx}FX)S ze0hnc=z1?m>pr}A`D$lehRvq1?#U-l4UG18i)P(4>wa~;@qJGttFA03lcQVvuU|i> z@fddZ$9L|{cJ@C1NRv%;@6~1#zD2W|KNkjbbWq~>NRehffNPiC;NBb6)G!N z%%|hoWIP^?N>{awfAs0+AAkHYqTD_?YTDJupM0FP@BnHC0o)c-$;>FCaApW_u+Sr_ zYK+lX>&l|@UE4OSDU+;u*vFfg{;z)BC^m0O9JiYLAqQ@d>dAl(!Powl1(FgABpS-G zl~rymT>x9f-q!22rTd2KX2=-blEo@pRd`-*6t-bQxFsc22$4v$Hz+3z>97-QX=OkK zVgWK%5veSmy?Xxg`Ll0+Q@gS|a#D`Q8685^}2HFX{oviY7<0#!q(!Yb^@XyO#2MeF-ujoob4DnfwQ zmsf8MK6sN`(-9OPrcf3ZRID{IMQfb`vSieS zvT#1T#3M&CAd-><1T;j>7zMCp4Q27-?EHW6zy9C;tN-jjJvp3x_t)R9P%2j%G=>Zz z8glC9APs+v+73@q89vJJ9gk^B-F0tvJ4OoUZ)SevybCp#r(&hlwO zB@}|FoK%5G6qvPbeLWgoTwXY56LTNDPu@5{R8A?xkg0ESWUNtzlmrlb3)8M z83Kd#q!vIHNKr>s?c27ltR@yt);JJzwOEm%E<|v~7#9x>jCiSZ6?%ui8`M8NF zY6%bo8R&~Ip1k$;{l0Jdwm&#HSYBM+y?gND%U8Cj`radO?D}G~1J+E(lWA2!WHZ`{ z$&4xZ6C-I_Pm)ig$tbEsQmBkr z(=+=-g{u?ea@93W5GHFZAPP#Cy54s{5;+2bu~ax~tVztQVqCdeH^sPSgp^VYL75R4 zK>I!{SN*E)JYJj6p(C$*?{m@pTXt>6XnHX0vP;z^@sG0dyNPs#^pM5EM}G<{cm-RVu(KdQfr3C}~ps_>)Hp zc=z6IYtdTDx%Mfh3fIy@ZiB4P2X2# zol-bw9Xk|S+5q39D-T3d~mq`#?^^?DO__73j7 zbMMQSPrH6okBUG1dw=+cfA8=9^5ds{hxhKi`Q(eo&bifc-TOXj3?d%j*{jp*Wy=cI zRjkkj-$$bL~*`m&}M*!s*W9R_{tySY9 zqfgNm77Ze3mXJm7NtKi_33tSy((y@g``z6;@9*p%ThZ>~#qz~3uAcocEgyx&WANu7 z8;-pwLbY@7<_A=af)55Grl#GjRPDi?Um-WEY+V{NDxE*QqEz9&L1P^bZ%$=9Fzsaw7r$=tI8Iz z(R^NKBr=9I>H{|eu0>Qw7 z=aC-)h-d(Wa7$MV^3WlD6hHtHLLwF4bp{Lq*}DG8haXkb@qBkoB+LO&t)bnW*?PGM zea9)(rF;4O#ZQ0wNl_L@$A^VARbel#E>2GF*rFDNswhJ2S)#FI462AE_I*zZ!W?{v zvCq;GGFX%jjS@oFY^LLJ;fl6x5lIn@HNE$3+psWMGDM(Sn4;MU&z=>g~2v!Bmqfd+yIZq~8E}U)a1d3mEN;Bu@#F}ibfScaZ{5k-D9QdX>nCXa& zGq!LJ01~s0!TYQS8kS-f3CyAZNJ^vtNT`U$7Jy{HR<1H=CZmzE)@4O#2r&Da361xW zMY5+RhFI9rSZ9kOG59`$V$+4PuE>(-lvHh1sDN-HYXDVLJxkBRDn;S?uFo%y+|7A- zeAq`hJUaaN)29HoX*T1L1xo9tJJ`R~t#~rtfAsjp>DBe_{?5JQqw~|WSoExB(|Kk9 zTUN8(&87!Qkh-GAax&h(eQ#8k&akcAZJziBg=7N1LvTh?^KAAS2nTipW7?=fm}KxmYZPbu^kx##2J$ zoXh)_il)fXM`du1k_T7p+`ebXcI%b+b>-NH zz;3#K^k!Yn_NKFv%yc?(&ayCB8&hh#&WC^s969S3WNX&VahrfRtd9gau7^`3L~9Lm z3c;%ZAz|Qb1k1n)>Ja2WFl-WtiaJze502d%nd#PZkd0wkUL-@uK>oOSF3NWSt1@ki z_f&Iu(4guW-q@zRG2jsIqo^!{U2W(s8$?AzG~F<+W!?jZATYRjy>8pahW%x0KFD-- z2%ed(H7bxI7tRrZs66`ov!mmKsw!OJk_dwoRk>bmnspocSXJXsKYp@U_@m>K!=pVy z9@Rx%jgL<5EY{t4yi?fHqLKX8o)*wPkDHnqRNKqIvAi{*&`Au2c82ZtuvW8T+a1Iq>i1{a#zu!a_0Q(TgP)NOX zm?RK_Gd6@^jK=5*xG)5w))WYaqZo2v%Rl|`r~lD^{Qunh<~M)$uYJ#!npk{{edry5 z3Y)^czFH`uq-^0JQO!NNtU_dpvSL+0OerByRaTs$s^r$KA#uc*JQn4}GuNf%WC&47 zl0-G6FlgiSMmjyr8TpSl{)27I1LZjckzk0!f{c+FGk6EcU{2{2Rsb@_8nS3Ik!%%_ zRTHx<%=O}`o{geJHA=?xK9aR89GSCqMpcurElgdPWNAw}q>Qv8kuiof)^+I|C1xes zw7sjUw(Y$SWmSNoAS?!3-%e+xZ#&;dGHyQKPcemVy*I(hX!PVoGoH?zElXE*y}w$w zqurzb_Fw&3J)MbxHU`$jp+DR&`lhd{I`1NW-I{9GR#m%Lw&&+Yf*9w8#=CRo*seC9 zXcYHqI^G$LoP6=c7xRPJ>FF7PlG?MEePS4oDrfQ5tvd>Ky=uOE^1QC=s;*PwPd@z& zoxNT!eGKot{owq1`RLgz8cqN7hkyB-zx7+Y1Z&2GnUn?Y+hb48l%}1KfL~y7R&0&ilpm*0|hpk=y5=Uw{77<+ER4 ze;T_pfxc~;6l8LE7qDpCCiI;EC#3Ptt+L!jp@Oh<);e^B^FDYVIeSf1OzwvO-4;b` zB|wyU+I;dR1vUlQf`F8Tb(=$;qaXnIE957e>}kOI^Zc`ASih#HR~_~OP*vc;7B*P3 z_@*>KZsK`Zlajv$V6jbN|7cwiqQb^ZB7D86<;fGA1XSISIrJ$*Cbk1xZOFrx4mrg9wGQDfy;Z zAt4gk!bX-&@2PNQRmGTcSX)fFmO%pZcr;2OTI+04234@+tdqp3kdMng#NG#iOp|gxP|7ZWlZ~n?R|Jv_< zXK!k&q`c z0LCaH10)eqjdAEajm$A|lza``9PXK9z=4bsB{W%PjYK(V1v0;+kY{>kh>=4`IpKK- zF&UmTL_?yQ5@&&Feu3n)B0#j(U0<(8<5A4L0RYB27BtS4bzz*#R}!IGYm76jz#@ns zDT1)%-Z4X<;2U2W2Q0?WdbOU-roy^et(`Thnpg?!tJkN8hkG#w>l}b%AO&sKi+d;2 zS1(S5z_|((gp~{dndfKAPo7_>D_vc`es%u*^xDnJ55D#0tMjwqV__X@A5!Pp&klAs zs}13qMa$F8cBtSI@A7$B!UMV8a^ zvwAY^eNeE71=-pZV^fXHsC#b} zZ~fNfy}zP+Z?!mKh9dd?^^@k=M@6_Ubu+38v|}j86t zCWWuAwL#yy{p3`Kh?X2SJ)E*IkRe2(YB1kk!}XQPaUues!dhn?(J->*0((U*YIl9T zK(u#`?_Rl8kw$S9y$^lg@9ge=@%Z&R=zM29-KiH>SG#++_KpvonIe_-s4U7#RZ?P2 z?5s6N5}6c{Y$o4xszxSzQK4%aRT?n6;J%@?ONF|xB(k}yDsgCHlxP?crZ^<`N^hN`GawDbauW-~2_pTK~hp^Edz2@BOvcPd@kS#givbmaFFF7mwQ2iV!rZ zv6S&20PMXNL?um{SR^-j#TX+eYn+c>B#|*B$egTkS+SGJ>6nq21_Vn2mzE+ZYO3UGC7@Fks$#} z?ybaa^i^4xp${P>V~i_ojO=hQ%7_RE5UM0rQ9&SMR8U0;3}_^4d+!xAB#&sSqEvwB z{hhnFFJ7I-5KvW_jVYM9>AIcWT}w|TMQcmn2VwD{`;FiEjmKZUsHf#((}<+LZw#e3 zA3ps44?jNMt=2)Dt(d>&XU<`Mg~F;zA}Xo0udou#MJxjVo=eP7?C2g zqS(EzjOr~lIYm35KN6ltamo>+Pf-xj!j?8`NSDRHzkw8mhP^7L(vglI`DJpo7Rp2D~t_&e4LQ+jgt4)_9)k5m6MPtbsr=reS;bZ|y zOQ=GKNs=oaB95xcr~<38mRSs~BgHASFfuXKf5gScZz;Ji-G4kOD@Qq#8v9O-PcWNK#HLEG#_e zg>hgAU>Hnz&_SypDxs<<3lpKq@r>DsuPUP3?q5{_LP7+at#dIFk~J>B@KhBIk+E63 zz>!mu!Z}c#k7gI=*R~k@C_Zos>TF?+mADP~MYOIUS0tfL7j0Q8f^+WT;z}6|T0;CJ92!q@IY;kyi%Mk%iz_-uII?zH#^cUu$EmNY^jEeDUbX z-ne#RtQN6pYUj9*v)$c#T(YI6?^saDd3JyDXTMC5EMiFBlC|X8bqB1ARonEQiptdU z$InmC7K@$zgXgba{^TbgZrbqb;`)5CW`ku3JC(xPAPgUcV;p3tJ_uBbHE9 zDY8Tb>*hx%>jby&KD>K!G;+=&>`W(BVNq0*gluBWx~*(U**2-<=d3hlHknQ)lMuZ3 zUR9M~J8=&VG06UaEW<$s-FEqc8UtJL&(Pva!_Kkg0YovoiV*>TRgy}|)EDFwX=G7V z%0Y1i2q1bh%Wv&ngPv$00=B;ow&lEJe2s}7e$Uqu4sO&z;wgM|aG)t6KuU=SGc=U} zp{#St8dU_fh>%z$Pq?D0894z(W38&be)aOnlgDJJ>lziAPA2Qsa?`iuaQ9%m-dvAH z^?bHBneGMVvaAd_Yb*j7LQsj`=XiOJDSI?>`waqWVwKdkD>PcwMdFl0$BeNtrndKq zHHJ7HjYmZZDupWmsA+r@1`y$3L38h}b1tTkQeqCqpdqk^VvIidO}|mh@EkzSb}+8< zAx5sNDu=)#YPP2-AhQDKbTmfg(Wq=f7gc=p)*z_(zPHvT$&;xfs7Tr}KOlmLXbcs` zst5q}-V3uOP?n-_NJC->8LFHaSH=GBZtr~n>TvXY$^Y5E{n7vGKmN!6n}77r{}2D< zzx?0*zy9<8>Hq%!^n)+HEJveyRMq3Ms4Q6&h~xy2L6c}IOItYSLjXWa1{Cu08Dku* zCCY{f09gc)jC0wiCKHur33LAs{r))~j{rld7t*qyqv4x%Vr^l<)G$ zoVCx@cw~!W)3k}9uEw!XoOIJ}`s7_%6=jvN(ZuYE;-g=F_Rc$R^&wWHN$1n;JGbl7 z&UdEIUcMAVA^5xn3diBk3zUyjy_3|=B=vxoI z@2;-^sZZz;4^MVOf7Lf@s&?*w<2yp``sK@t(nlXX`t@&ryY2km-p<9@#r5T-ak^Mu zOy|?(x}D8t78#VH*z0w;zP!G*KXWR-^1-h>dHys3GEfLTB3D(BSjn2}<#PA%Ffdf3 zdNSJuq$KJ&DN;y@jLSTEh(c6A4uC=vI4O|=c0Tn{Qc?l3MUAFFvWmot3M60`%gucE zxE@asO$ZSHbK7iDy3usxDw|Nc-nXkpJg2@bo!z$kC6MR{v#S-sup(j+6`>kOGv(@d z_s#E|yz?7DDq`gIYVq{3tS{Lwz3+^h)bm@m-t{3FGNbw7?!o=4J~-UJRT~#}6l#L5E9^8e@RKc6hwlKeo-#W^P;u1ZghM#)6>&pBxcaFSlBELB#b1CSQxSV z!2gV9G?Jf48n|l&h+Xc2#FD$f0E3yHY2E#_D4AKQu84>u+?^jz+^h%YODoHKb?c_M z5q{i%{u4kzRN49F2uY#y;pFh>j zE9eBQPwh0~?cPkZ4nn4Ab;VV48P!k}Oi>L8%%(G7LXLDIO4Ji&(t+Wv$AsSQh^IMY z!uTs>x3^@e8fYT|Mq75X!^H|kJrSrf*AN1;DWwdw?hjO3AA$?jW}yCH#E4K$#846Z zY_=KWuRs3y;^Kl39fz-c>&dTyRH6D|TikL>Nq1IBBDm0e z{Mj>f=mMX;I*ZAUPVb(bUA;bkefPo1;c0idS*vl{iR{MNY!;)fj_+sH(D3@j*9VX> zX;>Z~9v{AZ@%117=zH@c@~iOkkALCXwolS67wtT(x7(ZRo0l&ypFDZ{c;&msnd#zS z@i#yD>Gyv3TL+6)C5eJ@Ko<~;rJ+IKQd*``%~*Ya5W2}OP|bto1X_?lP|=Z)q62bZOwb*c)g$-T?>+dzpZmkNuC9jR=JnYp zKcnsW!WT3VZs+&kpDpiRK6^oer$>)dx;|RtL5A| z63sF8>+QBsqiri)OUpSpM??;>%D1W+(fVfHZ-$z;6u|A zJ2hZ-DW%PNv)ycVyCJ1)Ruk`UnaK6z1yPd%IhCmunPTN(&nu%z54m5C!K8MngzG)L zI5FO)c#WKME?G)RDdm!jN|6#%N+}~U0GMd)@pt`DI~iuKvGLn68nPM#koP`IDOQqZ zVDe2+Ek0DoCW~?GhaSkl>1^p^+=al-F_J3<$3aYh7<0*(_=~^zcYpeK|IWM( zp5glHLRGJ?w%$P-n&)4CJ@1-X13;L?R*U89v-Ozd(@(y7=YuE5C+_LfPn(YKoF07r z^~;=fv6#hi9Xb+%C-1(!y;;wiMechTb~=vZm=BNdU0zpxbW08-lx1X6d#&buP+z#<-L>R&p!M3==j7p4H6;JvuDp&i&^UX zdAnH656{l7lYpW#qA_J42H?7x&r-xd4flaX!2pvMK_KTu5y=yUBA|jrEElcSpScO4 z^PvT#7&9?c>en0_3JCt?sN259UjU-0z0hXXn>fH!)`WCYnEay6vqt zYx_Qxb~V(A+5?85Ql&>+PWQ_Oi($xKuaO+)L4K7^o}avnr7qB-X`>&AaY?Df9D)~<`EoXly>reCh}l5ft}UfFA5zZ4n8z^@QWJuKMbY(c%wp`E znyBi$X~$s@5fyRFj?pnPaw(+hogQDDUkb3b-BxX#v$>E` zL50RU5lu0dl%-TQu;!A-Fa5X1CoEB0G{I08nZ+ zn1L8%)ucL>ltt7amr0NU2-WpSMMb70A)M5O(;E|4N{{T{bWg~vdP%xf!ePJ#wf)HTg|v#DYlLI~c6;D&x2hcRW@&&8r3laaNN86vP`t3eg(b+2MMrY7)e z(K<8D0*WcA3Iy^xDk?hW*=*)RSoitkUw!d|?|#4ayju?ktD_gME@xd6`|;%H0HmBA zpFDr@@_M~n9xRJNFZAm#UvBg8_Ir=M{OofycyRB*m{g2r^HtyXB}XG@R*RF%zWYaa-+lk; z;)RRXc=O`?;~!&u&1l**{^VX*E#tU7I57-zXHx_X=UDH!BePi`-YxjK1mW1065RJGJZGiR^ zoH|t9_Wt{}YRXWmeSCHH*-yPnQ|UL;N^00U45{g4^=$jNvNzl3{kK(7sy%i^gi{&uO=qxA*Tq1j8JQ=Z3KQ2Rk!WQDPFwQgXlBfGHDKA%d1d zi2Y`R2J^1VsbD=n5w35p&(ANUK=4h|G_$V5O7kS$A{itd=3n4Q*Gsp&G&RI2hSJ|km`K*Rm^}|rb8g1jdt|__9 z+L@zzzCzao$;Gh;z#@g6BSKR{sQ&m^iu&N0C^%OgsMeb;1wW>200fZZZaHhVyDJ6j z+WE=p-Q8~Ikd9YJY9N4{U2g!P@lE5HVY!@DNf+@{dl?-QrC)1%rqM+cm4A=*3B6iIcS%&aD9EAYFmvn?P}M98NWp3CYRm+H#NLP2%nX&= zdG?F!k7tj5=h1Kf@$2jDu)bLEaP{=3vU`!^R+wmY+8x~mH=i$#9I)($g>UDf+dI?7 z&HDP{;{1BEKEHmw8Md2#yY6>UiZbG45rqJNT)`6C)8(O(+iHv#ydl>A$0*SBXYKc) z2K*A&6x9ijr#%*Qd&2B#J2p+jW>UplDuk10zP9{tY~UvJ=bLfBQ&9&ZM1m@FsCVEu zD4zs?mG9w*36O~tO#wdt{EK(q`H+zVd#bh;b&)}|4~@|w_OoSML$vGuyVCWla%VnU z9vmE8UtE$Fg5=OOp+mC6!v%oefAHY9KK!lYgCmKOVm>%Hs`A_V+u%bLxj(EaST0X3fQvz2Bt>o>TQqHblS>n)NPjc76SM(>TuBMzt7Y zD#bB7#KtqD0!q=`&D!;PM~;9D5Q-E5En1vsW@5(*CZ>k=jguIp6z{pHw6o3#7065- zVras}%?J1wFTNcNmwaGtQ5=404T#S)Pyx4EFy$7_9>-O zR7GpqXdJsK?@xssE88G()R6;u??Iv8u1}5@ju3=Qd^=ku(8A_?BLcaUmyrmf-wP<*QexN2j2bMQ%lM%n6-uyAA*xQ!1&Ow<;N? z4SaP*@M2O-QqD+hgd7?}&LYk?fHcNLj?Dmx3=nII#bknmfSijUvUA?d*!iog>(#*_ zdRNrcsE8m@9qmHf_$G`g60re{Ijx6&sIFU8RGZaLVk}iLCXeK;=Jh}!RQ4Uu?v?N$ zJp92szxU^_H*x*y`MhjifAZr7*JN3MV0ju&9v(h^-*qcuS7I4=Ns>viF~%ek&H9o@ z$15Y7nG@A@MLnIS)pb>()t;=fYpNS1 zfGKLR^7QE!-~RS@K+KUNGXkqWK%LeBF^+N8HdJvW#u})LNCv8C;JiR7Wo$clv%dcL z(=T7W-UZJmCnqrv4<0=@x^rAg0f1pQAQ6%qQv@(ZWPs|>(S)EXQc@ZRkV0rE)^9cp z*oMwZafpOgO73^#^XIQjnb9@gJK_|FaojEEi#W!TN*WSXKOnW&mlw0ye6!tDWPy<% z+<6E_pjfgjT!A+%w4(>5-pOzg*)8{ZH>-SZ zuCI~kaJ9U?zKU6ZUE`a^Ig`@(fQEe^fyo7jl|KeV%vCdph(t)pIp>t3ilmf72zB>F zgdng_FDRZlG#(I&TFRQT9oVO7?D2ov_difQ;6!sS zQaKN~Mt(`1D<-=kv#J`a31I?OL~2&G6allYYj3XC091j{5U6Q=E)f`WJrydUM71WH zIp<3$DaRzEnjnCPw7&c7qc4vS4u^hs|G~YKviDvIZgxE|I7hHIKm-aYmog4VN6ojt z`@!d5e!bodtDk=oR?Se#Fzm*D=ZGA``SZ(r_a1)s>t|$L>TPwf za%~gGSjJJ$U)mVe^zgxhlcUo-#@8=j_1kNvk<50RakiN6uGSyC^WJA4e|+!$p@M~u z=Zi(#&9cFI?1A{;_`nmf*~P2#7)#3K<*SS3^6>Qd6vY}JR3`0fHw%tsySchqUsogh zAr9@V1ps#3HXS0HVG%=e$S#-SLX$3BxyqY=hop(M2n{+~;s71s#Ud_mb zr_Wxz{oeb?1jvYv$(bM-n;{_YhM013&Lc4~DZp;%H=9iyhdhpKMrPh|#jB-M%YF8~ zQ7ev_T|@0L&K~0NZ@u%o|H*pP{`%%1!1Y(ZlVt>?Ne$}n=2lRwCbh$=M2tzCQ{z8wzd1rj zh-PF}XNy>e6vw+^`||no58wNcRhf_gYc$H_^a~VIS}tc@+wAYssqL%eym3tDuP$;- z1axsdetkZyldet5698lM!#X-_QWEb1kRbw^S`p1fav5VxB_jhdmOSP( z5ZJt(<#E(pKvhy2cKy?*Pc_@g!ATba8z8_i^n{rAD^4J6x`v%Al5$F2*Q__!-7H*P zUoKY*1tEr6*KRl4dSG|6Za4IG;1z|GO3B%IN-4#n^{Qn8N7@G0hAzf2OU}7S(ZF;t zpLd}tsZ33`DWY|)H#5hM*^4SOl^6p#PmTaV1ShUOkn@!rX$p?G-)&c`rJ$yw?7|qM zYF3e1)3$AMetD6#fMM{#v0pR|i8w+c08}3WFaxn6N2wYxx*tNYr00L?E0I)SYq}>j=&&xAxP8^n)$pN#{PRh{7%z`oB%jM z$G*$QUwk15Z@=|`abV(S=VuAXhh>aOQ~+qx_s!gY{`C3jTMvKz`Ps$IPD?&LUL3Aw zIpyJIy!`6b(Xs=?n9_W{08=RR-3?_nUkpQHc8lfw=K6XuZ`iO2Ar7NSAu{J&Kg1LZaht^es-965 zlwANLVxMy%a$}6nF*~*i5T{gM0)Xntno?>)tA;UUB1WW?lY*sGs;G5L`Q^*A(+3ZS zk_0iv?3`DH9E%AspcdJ!cWmg9=S?^7<_D`~+Xh5SV_$}yjyp($vx1Ttp&GN0_ceDB z(1BZ8bHaDtd-n%FT8{>1PSbFGhU1OK-i%W6c=GnilixnR|6tbm!?t;FaC9GXR?+0@oXpD}&RfbwE9qZY%v1LT|V6V_F|Kaq({7C-pBF-4d3n zAb=T(kb#JRiRuA#I~Cly>&w<*v`< zT3Q{hipJABCrwkKaS)MVNUC*0*>Bg?(9AKa6f2TqPBELAl&mFnjVBN^Wn|4dZsWz- z)%tpS=j8P0a5ZZ~{hYynZh1QeN>xs=kj&3dyTq8f0>jzt6zi_{STh@Gku zlY)8g2oa4;QN*;UDipCXODtN{vT9Mw)hb8~V0z@J*0|l>%YYj-m20ON^;LH50keeEswVutUcpC6{5nyQ*sD|+3OcX&GqHh`CwX} zJihe}siD`9wJdPtVEm!lV^D+0cSsR>dy}!O$ck=~MKn%kuA+!h-lMp#ED<(;D z)`Cd8U5`M{dr?K=`dTnML=;g#Dk8ur6NU*cgxcQ{(;l0yZs+{`+4;~5xfa<2dt_#I z2Ego{3!v!8o0pt!C+;6L871fB&jEvJBdof&21Ou7WFy8GFP~?T z)4O*oxL?zS9WxS*G41+ZwTL7DMMm}vh((GSQGzjdy^F>P4dDPx@7ZA3S;9^aGNm*#GW#K6v)@`STa&_a8lS2=|Zg zBDjw}U!$KH^U1*qawLE;Mq)R_k_|KIC!c=${qKFpGk)@$Pwqc?K=bD1_3P`+P16N1 zK~kQz!>Cr|;e$I};Lkt%Qhb2ZNALgPfAZwpzxC|tr+@Igt~f~SeT$AlAcHJAuoV_zbY@V==}e4<>=xvX+~M0N zkACaoYS(&p@#ghsKZe(zx^&5y6+O-F&ki5V7Kfc{j>7Ej@?bTafufmt=TuZxh`>1{ z1V&&mG-H}dIaZ_13GWBE-Tc0J+tVJt@i`IQR^a`0|3)7G232GWK!^^24cL_GsvV8U z$fK(sw+f_HVzdv~sG~$(Dd40|n7+zhsn9@rXfih;vgnn(>g>#YC;QUyY1G@R3rpH^ji>_b**DYByO@(eF<|dv>M z=8}?Q$3)%{BRb@gWHDcEcDuG|vSjBxF-rwWz?iUV*Gn#-W~y~<;T?+zBB?=96Twt0 z6-{EPXckLZO3_jz7s(}0Z9Yt~U*F)NO@z-qA#K`RRkreEd@d^0->v>nu_9We>@$*7 zOr+-D8I>rrraL%XzI^@yf#MK-;NX2J$$Rg-ujZymz>FdVkYY|q42FSS83)Hi01Bk& z|NDRa7k~Em|I;r%|0IL}K-+EXcOwGz!)`I}(l8v%7mc5P`Q^)|>4sgedHBKa{MJu@ z@k_sO;6vN`F1Ypeb=QTIQX2vys>m#a>sK%S-j9C&!}s2LeQ`#?3;AePz*G^7UcGwZ zn&6tY>AD=FjJrSsSbwv0*A|@a*2v-M1gUedp+8-gb`AJ7(rR@Kf;_n)bNBj65}M zlYc|KeBWUAl^8VHN#AZ>E6!7U_KJVPM5#fTGTx_O@026Vh)h%~{<*SAgW2gzxH_Zq@w~UPa@_zcJU|J|6fUEBDWXSl{*I&-&i{)a8 zD)m4kLa{>3ZQBwWfteI`PR#%b3^fZIn!t7&*W1x|eCO`L!Qt%q@Q4u+94J-qmr{~y z5iONK$`xZ$H3dLq&oRb0jQgD+lu}X}E1mK3@?yK&0it)Vo<;+byNo7KT$vt0-85V3lxmLiB~Q29SVNFcD= zZJiJ6?M@7q%lW~<>hkK^fRf5?7{SO9pB@~V+Uu*USX2xh2L?pYoO1|n=r^r*l`7x} z8Ec{whzKC$l2*%Q4cqn3mzY(AnR6-C;RA8PF`2j$C~MXA!HJgI8;DUZnu{i}qL370 zv6M9zEviMNh!s^albUv_x);C0Ev~wsohME-0G8@CB$A~X2k2gJW1?16YCv&KUd^SH zTuLfxW*{ebPA=EiF-6~mB1J({$t4#sWADtM!b-X1#bVygW>uTYL_T;?DWa;-G>56P4n?!3b>wsA*XP$M?HK9htCxs$|M7#vyGKRs>9g0H^{`mYY2J0K*{^={ zG4t%^VvVuK;Tm&v&fmTNfC9`<4&HwJ?qB}fzh1PK4)v10HDm6OG+gpvkRdVH3blmES|W=yQ!EJLnGWbwnd~8 zS&9m=tEH^Ek%<^06R{d1yBcI9MQhBJX4`Fs%k|B{os)tpV7VkIW9NM(J0db6sbVf_ z227;6uu(|@Mx_V<0|GIb6@zS=2tmXuhFV3r=z_U9@Z#{v?_|Hc-jt>}m>(R?yXN`F ze?30?IoZaAKz_yHXt}!AwX2nzzjOEThxZ>nJ~%vT+YTv^bI6|Ap#udlAcH#H5HeP) zf5RGQv z>I%dF6wEn?h|aN!niwLmb8Xv#q3Sd>hT!qm&=x=eD<3o#sf?gflfn$7O603ygW0WE z=9ckLT_Nfja7z`vW&DVmsOC~q&M~E!Qm))mfGS~2CFh*gH07L&m|D&;1U@=BJb!(r zYDCz!p$Se(cF3TRvr55~#SoK31M9jD2~$pHh7P%qG3<~RN5XUqTa*FSF-hy9?}o1{!n-g}}1KrRiLRDr-d1mdRayyuHo zmmmG?*N2C*&DGlZ3&9aN=Jj@$a?E*bTOS%HTExTbfy4;h>XZpHW3{7ZeO}n`~ z@1K8a$uJ-;+ZqmTOUQ%C^D&I8x`$=Tq)!)K9 zv|mrtZI?d{B!K%BHdcGfq?`@Wj+WF}OC5@#N@` z5KWW-BxO(mkuoL%h%udAUVm{`2DST-?si>R%x4I|7!VlLN>MQp5JjuJR|EuARO2eR zEiBGx$Wj`D!|L`Na~33Sd@7llO|4#GszpjcAT^xL7ez|W zIe2d-#866MS8WOj3~+KBvEV%-*TAnDVef;lOkXinAO%tYu}adf(QQ)G0j30^Qr=h+ z>Ru9H6U&H*U_T94UitpY+=oz42Z)tYL`t;{v?k#U4X^H zzw`FP#}6O;^k=_Z-)s*LR-2pa-+KSUpZx4+AAaY(&%ge<7d||`lT#+p!^Ki_Vb4I= z&gTZ0#uyx=G7hQy`qQrpaywgo_2u);W_R!X58C-E#v%1FUJdd284}G74_2qAtNHxq z;_~e4ryzansF**!`}o<{Uw-TNKKPyQJzlP`=`iMj0~{YOuP@I$37MLLbD=0Cfy;HD zkp*$Fm@}}M`o;mk!Qt^{Go)li5IGDfkEv9%2Qw>DoO2~>$*PJ_4Z=)KRYWE#3?c#% za~vZ~wwO>Id^iX8aCBXWE5_?AuoO7>VT_E!5oja;p3iSO*&@}B5AfU@{@3T6;#=#znx&w3pMUwqaJjtnO0Yuk!ErR77OU_{C z*r5p)3C#UA$H-%XjKGaG4bJDSckD2baqJTm07Xf8FORO?kw#_?$sw6YNm)xt<5*Hu zlTuQWm@MB6+nZsFOhpo+A_#yqO_+6UHC!XYQgXd;b18q>A&xOH?#8@LNtlu-00W{YGF3C&?Glhf1W6-m z(h>>OOopMaN0dsgn4k~d%$Tv{?1J~+5mJni+0DDftFxD5inDGWW31R{Qz5M3|NyVV7h1a z3o)At4cE{ItoayVu&0WuRD*wnlvB)ER3sP8a(Z$)?BcVpUy2wz)*>oevWVDryG11D zT?ow{@3gj^HO))`hLo?@*C}T}n9UA;_P4*3G5)9ItP#n`V}C-fVWKcTS>23@&MDTEAS($$JQg zFV4r!raXD;$=6?h?wpNR*N3O~UtL^mce}jn1LDE)(O1vTUww6{qa1c^M|ktmCyQC< zm$Uc3`-AhFtN!}sodbU|#~*y}{jb0NdK~2R_!LUHe{vWAG;5LKtFSg(5uGpR$HzxV z6m!m+sR2PT;!^ke?8x~TlXE_VK!k=$9*vb88(=Boou9%L40OsrB_e7=SJSPTxia*@ z0D%BlYt$T(X!Qv<(X3VgP>Oojyn1oY+}(Tl7Px@iwv z$9_u=7K{18av7K!;-kf4=0oeeLv#oZfx*c3o%JpC6X13Puv#|XY=8|A_gj_Qj{FVz z0V^Ey#xQgb8{yuiVN&xVPbUFQZ5je`weCT5L{qt}u`G4nOm$hyfD<9~b}4Mrp)jcz z@Ro&iYx!?zZ%9PbY+l2)lNKM`=bwMdg!k{>O(~t7zaGc3T&_TJJ=e&gXhr~M+}~_j z-D2h!D{i|mn|FB>>b8fn5v79-+Xud-AVV$MYjz*H9Vd1zcr7PAd0C)KgUruA*`!39tlhp2nQK9R%@ zo#T{B8yYiheXs&;m{rR^5D7jwcHTKvoy``W!)CXwNBMTxwM|=c37$_+PcC0y6LD>R z9l4w`5T{beIWTD0VUd|@D!Am>GZ9!}fO*pdqNCMHN=YeWl@J0@?Iu7&6M(ep7}>d$ zOUk+J8X{CHreacB&jjF@0Ws$S#MP*_2&QaVY|NU9RTCfpFjWzyI<6BCp_vjO*0d== zPvn`ZL&byw>D0nhjox%m5F4`Sq;fX{k?M&hH2{){!3@AibD{>e}8-GA_-zxQYN@80SA0YQh| z?%|_*|M-9Tk3@%ytBdR&&fD*Q@bND`K3yzYghzKzZr1D5(>r57bn_-Kv<@JrKwMIPFmD>NGRAwS zcNzR<(>oW4ob!IO-4!Wj#W50CUTX*!MjEjwKe8eQ{y~5GksK zm_*e8i-_}#gx&||0=b|7A_mA{72E(b%UKXu&2q6%zkb#o-)RnywT@nK+KM+6(*XZ8rh_p6jMY-_DTU3ca9%^Fe)$-mQu!^ zA@8nV=y-_%mIp`3{_5iL`qk^a+jX8!mj`zb4o()UJIjNkuDvs#AGgg>7dpZK5;qXw?DfUA&zIM`Qsn5UK}?P*grG%pE~rp7#;ByF zaJ~Dbq#PORW~b~8<;_gM)Dfu&B5k+*db=G{%DE7M_cUumOnqpnn>p1KV-G0KaU}_a zCV-h|H}B@6Fph=Eo6>AH3&BZ>1a@#RbB=D-HwGpqITrv@Q|DMEcTF%+1ItB_G3V%= ziz(-f>&=)&hp`6`vqT_{X}8^K5y#jzUeyr&e)_{K5wT8`xEp${#8yPhi4bO8mvg$_ ztbvGwS2OR}F|X!}S=+sQ`HDPiEp=M?YR*xpYf@rhGzZFPhX)5KXCe$u$T6y9RB3}- zF6Zm@y1F;leGed1sLe4klcFSQ!*tp6hl#*76W2s zO0}3E-6=K__DVyXhz`sXe+ehG7&6su`^2)7J*9Lqnl(__o99kKO|1`yn#w8yX2(ZI z$A?FGOiWC~q83vQP3yf!Ge?97YMOFv+jinRfCcBN8gj6|zS+8F@$+AQ@?ZX!|MKDE zhkxgfzjx>E{H@2QS7&Gc?7#ZIefH&xdAs`h`SUUByYIZ4b3|1S*alAHI1Al!wuCG! zAY;CN|NhzeIXU<7SD!_&ejEtwop;{et}nrr=1%+m?|gVYY>Sqd`X8U39NpZkFRm}g6c3M=r-$8q))0uP z6v;yz5wUUY>x+vqrM8_T1vTnsb5$|3oH8Tl6dT`^SUd-&>WApOZ))-Cx^B1IHcjKb z2QVq33ZRHeb+*g7fK~@zQ?-~)Uw-xU;gfg5tTTkB z4K+Nj?wMmuDq^K*iVZWExJp$-LMF4CLTakTOb{os4eSH65OK=jrB3a_z zKRLZWcda8NA`?k5qM4-eowpvND((HR>=uTH{c2#pcKwDi#AKmSX|pytH6sH$rfSSX zRhnEAnGCTO5vsQj5C9U9V`8^=_Nib}tu+;RqG^Z*nEpAu`CZ*^tEaEMpIr8DhV~|$ zmC&G4z6=40hH>n7!-Izp9a9=&;~FUi#WYLfL)V7u%XJaA9z;-Ahb=%^EIU92Peuw1 zDJMi$H8DWPH6IgHDj&wNV*&&i`=O+wrBX}e`sRAQ*_l$x$p=q_!7~A%X%l?y8wtTP zduCGwv|P%RSpcvbdh!eiF1QdFL7anG8y55K?CjMTW1V7C&Qt0vlNG5P2y!l!QUpYm zk#Z`CB*j1pjYP{Z^v;o)vU4?5t8E-35>P*m)!wdc+9tGPjHO7G84r#3tByABUk0VL4yCe)T%W(G;pgiV>@ntYZCfjNUP#zed0+u38Xez_u_Hq@KQVJ1y=PLaVz}T_bL_?^_t_r4LrTXDbxd1An zqA<}q4P?qFLUN9X*;NB7s)8Bk*g2l`gE;N3_BMx5WnYs3Y~mxF)0>W&uGz@o-9?aX>U;g{Q zd368b48!@e7vpAk@ATyH+i&^R;wI-i58wLg<;(5RkCNC=R#*oIhjBYHK+fBT508_M zu4$9NSoCt2%pLsVH&4I)t?zkkx0k#AdcC@L@7~)FUtC>C6RuJmV|w`RJ3s#0pM(%* zj=1ETv)A|UJ`!~)%unC@;ONeypa1I9+2Q>^{{0`0{f!ohyNC*b(3vz2RW;1!m%Aa% zSL*#UxotzIHCP8N!;$4!V~-)w)ZF2y8F|f4I8)L4i<;C_*ZU*;0Q# zTt3AT)#%Zacke%Z=$SyQWzULfH#kFYRQhpqd42Zmd5Q7S{Rhi-j!9fG&e|Aa%BS!eYJYKCT5oY3*t2e?KW|H~-8Sb{=gTy$1SW;>lpkPqjZptB?dy0|64!sR z-_}t1uOB!5N~0r5GK%#)Uxs zXtBCDKNFRhN-pYLU~-HoIl(^uq289Y;qBTkrN|Cb8B5N~#iDDw#bWLpUtC@RATsZ* z#?1_vxoY>gHWNfxjjI4u6%F`}QS%gd;2bo8ouitUQk{Hs??zihOHrvHJyvfgGD83c zt6T;~Af){XKtz@D%{5<|Zr`}(c>Vto>-GeQny^IQU{C-eG8$Me1qhFij?C=r>|9mX zH|zC!t*Tuc2rf;E2ZD<-7oafLuWV*P%{PB-}`kn9m_MQ8O#AWEW|KK0| zXD5e;oAu3aKKlHNFJE@eVz=IX@!1z$a8QOPPwtf**PHcbbCYA5HNlfScyN4vd5(_z zn0KXI4sw(E|Mma*Tl9;~b^7|#R|?LMzx~}Go?oqBJ%4@Y&S_5lkN&}*hgrwIxjeht zUavP7m&~O|oZWpuv&Efz4^K{x{_FqK|M5qE=ZBlCXHOnK2*6s>ybB(zA|fg4X2^H% z-`Q+$yz|a6Af%jQ8fLSmNZJj%dTF*zI}C#X^ux%^V3>1Z;@PYd6OrP)pB%~5M5RcX z*jiu!AR<}>Z1RJxdCmYpKs@(zB73zrskKauDX2BDd^kINB<7mg0??I| z7$o&qI^I|rL+B0`4~S;LIYN!YaCLDx4m|Kgax9uv?{1x6adDgaF@Fk67-;ZOwxk)+dyz@=K!{eF5$vjnyR(;T-omnhFtOXz` zRtg)UkxUzZafBwNYeE~sIQ0Fl-)?sdB&8h8myIVgWg<1{W-Wk;lpzl67^`^~VU}Fa z-L7eKN`y$H1ZCE`VRO?4M?`JcJ%9cp=j?;eNdR$-vG$)tASQJja*kD@s8;!cnMK8I zlB#+*`R789|V5|e6)DS96&gIHCGvEECfS{Ho;Sm{-az=)x$%W!fNwn@VUMQW(2 zRbr^>1n2B+b8*rL6FO$+oMT5knXF72nEiFn6VB=Cev9`<6HK%9t@>tqW;3i|!o;p= zTQC%Z)xkkOjtFq)?w#3emL+f2n=)nxgld|K1IB7y3Q$UJCPuS*_Ctz~-g|O$x%u0_ z{?(UXp8xo7{`QA|@Wb!^_P6dHu4WFsW-f!1c)FUOE;=Xa{@s(epFHR~?>x-r{(}$R zdUbYI-Lp$E7n(usa#z*@AAR=K>gXtz;nmBRDWz_4lrsI~uYUgE_}=c-#pTnlzw^T% zzBqq%b$R*t;e+4&_;0D-oPF^rc#l4y*Wdrc-~Hs1pE=O?KYXj?{^aCfzI0^iXx@~* zZ-XnPbWJOohB!D1j+>HYF`xItmI%abNHNA#-QzubP#|Pxt_q|v_023G0}%M&!D>!B z5tCN$zak})7wY!O??v-YuXh#rbT$n0GPtyJ0IP!*07+&KHX==OK7p zwc+l;;_=<%dv}i=*^3unUEW;EWMN}O{0(d8eVv_J_{q89_AuC=T1dzUlT#KGO;`Kg z#(iRqB2cyZxh>8#(Sqt`VA|v|?%hQJC-K?jSyZQwJp~*0ObFX^yV3Sm&(j%y%RrjY zf@w(Db6}aBZ=ODVwd-@!c8q*=d3ms0Y96=ijVdWHFtuHSs9onpvze33mY(E%8`+X(4dyDlJhvkU6!~VHfNV-LmGf^)-?yqWr}?pe2hceHs1S`lYt;= zU2BS&^KP@-jbk*it_`5pIEQA7#Vq9!5oWXQ@bI7?`^|RC-o+RJafq>i#uAIwY667H&X-vRVjJ9+qPZPwvAb({3+F7UQq?-W z2aYFB!K919I>d`;sTuFZR4S*qUX%3Z+MmciOjKK)y#Q(ABuxED9TooLj%wmi?~&ok z%|muX%vQ~+m;iAc$J5i}ZrKKKts$-~8>*9OJh?e0+D=-94QB-M{<&Kl$VD z{lO2v`+L9lgNyT<%ggoir!SZDS?h8A`f}EGug-7C&9kU)8Ua6of3Y1F_wM}FU;XU# z-h-IpZ+`jfFl)!0AHMxo9Cp_)pPwEd{5SvdU%1x&@!$K?ZS1XhjP~RI_FvySIow@e z-F@#}>DsH!=1>3ZPyYA+hyUIG>c9A>XRptnzBqgE=<)V?*D{l6=Y#j5>)Q3^Izrs0 z-B=P5qQR`0qjDQMEd~b8xgzR95R;T*OwmBRb1{y!C(Sw67E@JIP6$;4!0hUg4TMv*pa2M9{TK{@2q#ZW@;JZayTcN5Xy(oZCh!E7 zw`Wg3YQ1i6uKaBI_^tOjv}W3bWt- zIT>2jWZmK`6TR(LA5cLd-3tNs=0JGc`A=&ws)xZh*0=lN7pg3IkJb>9nbCfcTjycg z%L<5iGVI2>PTq@uaq8@*@9PZ$RPW)*Tdsl%HaQ|q3^175eu75FE|BPU>P=b>6h3P%#!&E22o`gOegsO4oKxXsY2f zfjd|n^!;|Txe*ZO+-}>mqm%^^Yoad$1|Pr-L^Z`g*fd^MnSImDE^pT0xX&r8ZBp8n zIBH_=5UB~C)Pi?p23TEgG^?URB0F3y(l{I(%&x9304&9%qO)$ElWh92n=SgjfBfJ< z?nee>CQ(K9Ss>+-OAaAa@JCFGX%Z#pU1(A)KoC=8B*)aXjTVVRtcDJvB2uaYnwn%O zI2i|m^S;{f+?tjEBC$haLR1m&2*A8^h~PatA_j2G6MJ|M+{-?zVzO`-RjC5Bsb?qL z_cpxeIPAIAt_~XPcrwqT+lzVnZ|Z?y5}@uq*$KJcQd9ID*tB{>R1Bar)RU-Q9tJO zb|1D@$bC#;Ak0s_LKhrjvw7W`TCQOqvr4a z;Sc}n$3OqzgKz)E|NH;TY12>!3McRV)=lB#gL|?YJMbTV_*=jJ^fzv{2s1b4;m)1Y z)$*X?al73}#7dZQ>E^SPg`F3hsQQK^Vn|#u2sNpx4#9f@0)Ws2H3c$eQk5~MK92o3 z#GGyeeAp2?hP??|%sESS{s&_$DhB9%@ZL9}@!m1>7zgLsdv?xMuZUqceDV2LVD#?0 z?=4p=L{2fQ=r|04h=I~LI3y`t{ z=d9L4GeW9+sTvZmb~_C0@%lYJ_2&QBgzfCnP=-Auvj;t~u6y4wHDOx#!n78i)(eCP z%-?`j_xip1R@coFj=x_6PpV+UdV|_@3cQi**FNm_uBuNyDSK26kemaeo6WXc%xxP=!nojN*TwVn9Dvnoe-pmSXnKW*Vk8V2%hQW=#bG~ zJbSJxDaDd9J4VD@L=>1kA*w19)|@XeAv7~dQi?$=dAl2dd@djYVlYa+>9?+N+rFPK zIt0^FoI~%pYL&&n3`1~3f88`Z@8+i1v`y3a5FA+1B3&D<*Vm68KNc-#ug?xvt6jfK zMUbd2U_`ZPf{HK!8l)7R_c0e$oXt8bBDu<6?O?G8j#YBodL|6cgBc-t=W7&WZJf=l zx;{uL-n+fJ@OJt`Fys0#M@-d~3LT+mL=yy~diCKgx`?_@idWU?{xq-}$%y2g^Ugb8 zDNejU42W(SK{RzOd`p1dU;l3a%<8+M0DxL)K#a^BeABcoF{|kHdc9h$ibzbEnYyN1 z%oZwn7ts3bCrUTWAvVxQMF_#&N=UW zH|w1D04P#oO4IH~O;m}U_nw^tfMFcRF{+suj4|aRW9{J7V#&oyk|a}xAs~=s?YHsO z%h$uEXQMO@-ciX(Rd)R@rNN{$&Z8+KfEGkkO*XOA0roN`00!Xwr`6U3%oI%t9q_C> zd;+wn8NNj{mXs}ujnBUR_2%MphhvGuY% ziTHYR1uFB-&l`XL^!W1ic@sj6CB;%yymtneRSl_#-ZEjT9vBgcx$g%eCdcR;IVZ_V zMlg2ZtU#cEf~A0C=e#2_G+|RVYn_XIG{LT!&1bXYqodt!7o~^+v#Xrqw((=%KYH}w z^7ZBU`B@X1IK*jLi>RRNIZ3V$o^j)li%JMxmW*g)>f1Jyk`SSkJfF{2t7S@w$q`Z4 zHiQ`aVb-;^f2b&5PIb(ul(YA~UY-a@M5abWr4%r5%)z-yodO16gfQ=Bf!R$-0+ZI< zrs{7}GsP8D4EN*=6V+1tGjz<39Wn25MRJavpH79^A<|y4Uh#)I7BI0X5IG;3TAL;w z1v4-saR|+Jw`)Qx#cEP%DP^->6VkoYJImQ(eSNJ`9zT8Nciw*d#jk$$pZ}x( z#b>|x<%bU*+*!4;+_bBCpAs;)ZI}Akb*doBSmA|aYg+o`<@5^ndxX8VwrX}GOA&57?YxjgTs zxVHCmt6B=0MmNA|>jMALb5H0~#Yf@x8=}3d9D@=Vp;h-bKp@yB+Siu@>em4TU_|$3 z7rfv0O`Bqb`b#uZL}cfY{MF^U45gu9(HicyTN8!0ZB6rXwOTBfL+k;;5xRQL0hgjB z7erCZ6=gv*WGIsBK(O1ba~vDjTwPrkks&6;8dqqurk&5`OwRi-rjdymsqx;iFH#I4 zrZMF_pD#rXh}uQhbiPPBSS|X^`rhf$`sU(hed(ObsqiGErWjKeHGp9lLlaB^nUNTX z>hlz9(KAvLxTFLoW{PC0C641}w?V|dPet(N zrf<8(REBXVS>Ac)-RIAqr;>wji{^ILRcEX$8PF6pl{BWY32xc|A;pr4mCytcLBysB zgivz!&M{Mrv6Nz>O$ba>Bzy1cNv|ps*UQXJH59z_(@TvLPrqIrRfOW1gJbWw3BJCH zO$ewm)fQh(njJd`~lXEp8 ziK&*A>bAIJ8}N_wHS0PBA`u_^1udeBSwn7psN$ zu6lYClZvsU#p>V}zx-tpGcc3VxUg6*hnQcSpNFm+hH)6j#X;v7RrKYHm+!yxA_B7x#}(DCQR*e-7lAM+a}e^WNv5eK9{e z&Y5zy?ak)iqkDHAKm6-o{QBVJ@YyH7!m=}s-qfP(=Wa}KDhJSzxCn8 ztIz)QPk-;#t7jLNmsv;*>ko*8LmY>3*ES&#=Twj=sTEaG_1-a|ih2%2Tyh@9-UkPO zXcSztw*{LTQX-UUz*cc)#{G3d)muKNvWIWg#Hy0RkjQ&g%cZD7wG354AQA-tE}}&& z$9!{rb8>p>9J|J`!X7MjQ6#Y`izgy`8_Obpq{Y3ft$;OxuC|TfL#a zK{I{xS9O0(w-4JtIqr>bDpv|_9m&8 zaxt;sL+~B}h-i$XV^>s&IOWoHotOpZC5x#!PhbLIUE9?51WcLGIjqBtf)y3Vtfh!Z zH){Z);`bHag&Nyph-Q_d6M_%kd1g>`M4p-VTa!t#z8^34v0%1m@$2p7S2@Zxn8kVT zLSW~dbJYQH8f+`jSy#l)Az|eNP20067Rjm_006lftpN7@uv)B`$-tPX2^F8^8ePy%{P8BBYemwcXEu`KyPIAFht(ap?1q&R<^SXq&6P zZ9*GJrF64yyIHT6^EmDt+}Bls(<+vvy_s&1;p0*l1mXOIWyE$AVqV*H2`9&1i;8Z zkR>y_5;L0SaYTSIj`R6kiXcFxIa;-LK+_r$3ssM;)wZAlbyjN`Gcf`K5{hU_DW?LJ zQp|)%W>`d%s3M|w3OJ6D*>88-)!|`iS`h&SXT*w7j15pk)5xZvQUwMj=e>We!lb>o zYS*Td6c?@Dxu(F*IB=K&Ac;Amu5Hl}DWG#0+*N=1{QOJRZ5%Eb^2x!`tnCnNzF35| zaZPK4>{(Ew#3H47jN@buJt63~R!Ozs*Bx1PNZRA~u;;DT{->IK*54l7GIn53nYcYO z>Q1`?LQn~Kw09WYp9|lp{0O)2w-o9413raiTHa;E;q8sPTq+V8Y+a{7Sj zxC3G!r|qFtk*ZCC=!VjK+!V9eyd6In6-$Z^~2Us0l_g_G0g&62(-R--m{dv z?bl|-K(4N?7PD0v%VM^eg=W?^UE`a^g}_-7J8YY#R`7~F5TaNC*3i0?V;vLYIQIQE zIJcND7W389XHSdR@!_3*7;d)LK#E{*WEKXhnj~gPMKYj?Nb8$h=1hvwvqJz?SS}CO z>+N#6ni^k5c34!i$e71AgjomE3;oj3{X|uCX}2#a~m20l$@EE5giej zddI;BtHfTWoKqFrl#*v%$HXEX1%zn zY8u2;RJ3x&rxd%}H@xL6Fgx1wnQxDW=`U%j_}@&FtTKVx;y7j#b&i?H5%>MT&MlS; zDO%N*-Z=m&LDjxQWXDb0gbB7sJcT ztK;Jn&(sD!I9P7`-F&uiKsm)b$ET=DBnOA{e%!GO!x#ZM<-*Q~rUkQE(=6K7Id^_N zytqog`SQGEw|l)=g<0%Vzg;(nD{!(~-+c1X$KU$ycXs1$Hs{UFF#O4%{mHXuUp=}1@Na(llbh?;zxM|}I6Yo|>-~2Fb3oRpnk~l2jcYp= z$wW}Hh#IqJtZ@{jl%iGuB&ObbHj{>Zzu7sYVYdUbO4DLyx8KlMWkOHO&H;3PH?v)wQN&~~#uI6TOb*pp-Gf+w)*%0Koy$w^WK z5mY4rRWTKrRssrdg4%Jh>La+Tp;I+7+X5Ke){ayXXtIW zU5Bo_f9D>kRqxs&1t!ZMGca_fI7P45M#uIMVYT0xq>oegUPZNlw}-`ThyIPr9j1&K z12DQR{WR@+0g<3)OWvlz?8Th>(O`O?PK9X?GJ!(OJju!?s?Qs-g555E_gSp)4Sm3y z(r*(#1PrkvR&~G^b%c&E#a=+>%Y*CdttmQ3CFic~R;!~D6C-IcGGcbF;i6eVkWs*t zphyzSL)sx3ppL`#;{5DzwH(Kxo6Xj{&3ryvv`yzjAaaOkn&ZfX+2->+1c|_O4l~Ct`pND0zg^}4Vhe(UuTgD zmsfm4)c_`wZa|1BHf_7>Bbs^Vz+g{s@cob!X}7ys&KznfDSGE;ZBr`M70CxDxp>Fk zyTil7&1M@@LdX4>z+lR_AVOk8W9AAa17R#Fmn^n4;}!WbA%*j=$X9p!F%VyAg4U*W+fFcI6OEEO~@sM5K7UMvnn=C z2qB=MbKT3ct1rKP^})A3bdLJi&zH+)wg_#LbKY&Y2=d_Wojj(tSzcbP`xF&5YXQQx z2_BJ6O!B;KQ?i8Z)o9A$%V#gno}Yc^+rM@8;#tXYd3@TB`sqi{8@FU?R)>efu>GAM zyw`>FZ~o%v2Ho|mXRtkkvYp*I62*HD?u{k>(VzXv|Mmaz-<@A?yRQB2cRtwNY!>rC z#NK%^GZ5!dN;1u8*fmX#sR>?eqG*W{&`MI@zYDox`ZI{-9O(VF(4l_?-1V~oxR1&m3z>+$K+m*m~?>B-Id zT1;~q9osnW2&C~o#}PqQORT#AO;j`GKmg3@_4KBBo$!p>w*fhrpJeXDh`b|J%t=#{ zqQt>BjWK@u?DNa@Cd}rgl;Hhxwveo(Soh0rA4vnMv`@#GKp%ujpl+sS#Jq2>p>A#8 zr~~Xxgzr?P0nm&Pr!fLyvbaD4(8})w1VXdQX#vwI5E8UD<@;xxgm5&yN)_~(Ua|eO zfVfZp*~3-#4RXa)_jPjcof1O!qP1#XF>$!xthY^xv6+XMqI03`W`qc;hsQ^+UY&!| zVzKg!jEV$4IBt9_$$2)_O5qj}(VR;(DC4-jx_*sNxm=XZuGiOH(|V%TxyEtRcoi`d zB9M|6^VUpKNy89LjS!vlU=|v$psE63DUL^n2Y2tCq%;nFk3i$t*S%u!E;LR}QjRI5 zns12HXvWjD0~SjxMZ_Q%Vebvhd6(68F$q~ej&&j#<4EjMHZk-*NG_fYpa2=Mww<4M zO-^yPm=k)l>Q@A>FJB|_YI(RBc4B~lt#1iQMXP$NL}X%FD& zo1fKfF=1u6Rk14~jl&RpJ&uSN)s#Iu$KG?-wcdrAv`0kj*m*~UzVXPg?l)@EcI_|> z&N)JEoR?e(n8~G7x^8u~-hA@OCm(+B?pu%VwjIBC{`6+EdHCRwidfEfP7ZQOO}iQ* zE|-VSxl#%t#-VSRO3s96qA82(W|3%{(kCB(I`5jpW%JST!|e?^22Ed@5-% zTgDs#pzn8G*NK!;a@BUzo{281wV3H%1zsCPLIPDnr~*F#%O#I-EPE5&2^drX+!stR zDH?odKv^~A^7Q$O5Zc3|gE;QYgiJcml9B^*>_XEKVQ@}MVt|t4#GE&!>e)H9+qP#O zAR>w?fCE1_ZiyH~+TdCjG+By8MTgzad%sxBKl$RLvzv491cXONhs*iwXn7ETQ4B;u z0fC@;B312+?%U^zgE2eju_`1)Rb}tYvtOxB^dP`nJ<8&LUHdKczi+f)e@r1wIMc*Mvi)D}k)wT^5Bou3zajX>A+evYDrL~2A)rGd zjA<-6b-pPnhrrBue0=!nXCECb7oHhRnx+v&_U*PGJO=_c5iJ>1(9{#Bm~Pfrq4D#@ zB1bXsXRpt@Zr1un(ij|=763_-1+3&;N@NG#bBa+xx~@gUB8KEDHzUQk+wGb*9G@IX zF4Z`g5XKZmRBEPO&S-{4V1z`fQltQa!L;G>!DrQ?K+dOJCT=JI5?`#>9Kxm_kx5mD zmg@o^S$h3;uPVDFKt4H6p?Ac>iP$)ptk6`Ry` zlXnSMGsRni^CV+OoJc`LQ?;*F>`1pCGjZJ|fKe&M%;vMM@=8TTKmg5qA3_N19ZdU} zx^@EtHD!m3yK-|m+5sl|ivRpy{@d4Q>z1Lveyyn-K6>l%d+!#B zZO(u6z282+y#CvtetP!e)%U;iPJ=mZueaN^W42OMl|36O0Hjh3jT{PSosw%D6FXN* z)*^sBj6=>@RK_7n(cqdo^p{c$OtjRvhuU4$=)U@^o-{;iW>zKPDuo&Myh4TjfkD;V z5n!L#Q$&@$10n&~tTz`IXQwAeZE!gbjwv`6&7v91N*Xn1DbafKW_};UxpGg5iMy4T-eHsi^i<;>eW66mCOeE*w z^76bNw)5pYm8?cL+wHE8v#!gzXH)|4-ib<#qlk>vq9?}Pb_-<8IGfGZn@x&2rCf3f z&Y2bfTOBNhVVzvysot_+BTwDRtI2Khfq|h`q z=q9G**r{sDdBQu8Jp>|G#E8f$Sg)FM0nnxifZ)UAe3vA4vssox{iQG@$~i zF^)cXCdb6pc*F(^boFypgV=kL7pwvNdGi6lVFIg8qENGX??%MdFCS)B{cc_gY!@T0@S)v{w! z-*tYq9F(6vyS{w2{rp$I9IkJkee~Js-J`a1|LVW_S4XSU*RL+fg~P-4;r&GyWW95r zef0AF(H+Wz4m&VdK7Qxm^zOg+&QP3vWBket{wn1ga#W;f=y5$NJ@ffFq0lA>Pdfr5BwbxO)?`NNHaa? zL6D3hMTioKfj~n9!LIJ=>Z-1}GP5%C=AB|V!`|+`20g5OB8zEJS=Tb}jf{x1kNaBR zcsX8-+*$>ok_RS~Y9$S0O3=0BfjO_|RiuUyG4oJ{WjT%G*rblO;uG^ zQr|T^tHJr7^;^qdF>>zKiagyII2^LS~-;OHWo zv1Bq?7HL92g_D_dnSu1PH=n$?e!+T?52nqd-EJIAi%mD>hYSiBkDL5BRK zSG&v1Y<;bst4o!pT@ubdvcrCGN|TQgWg+j0V7G z>6V!-%e)?5NT#l0OlaENl)=YbhFs?PxYa$5l$J8|=O`J_62> znG>hNoS8EK@oFMtBBI1ZksTZ3HjcwciI4#aw$lWr38^p>A~UAUDJPF_BWKDv0>_DS zS7D}7(xZnDO5w5ORvV|(r4M;P$68O@X}sKBboC$pI9^s?#7JZW-4fUrW7S-LUPlNu3*AMZW=VEn*0hfVTc~xFVk4a%|uK( zkr8Mja1|tHMpt!(yW7KQnYULL-J14_CJBTQGJ7U)saj|63%O=ri-}E@0&85u34N6S zIlvlp&s;7}-2u^6#|*nMB?43L-BU`BA3T)W>T*!6&tH6ebGUIea#;3v1M!px!XQe~ z93v_Lz>$LEw%#TBtl@Jv)%pB9GXSeLh+vKJMvp` zS0Y04T$nlaE`SiQVPc=Q!<(C%-r>=sx1N9U>Z1=nLidt+x0`COlt{I^DFLYTI?su@ z0rM-Fcn!w+B`luwl+HJPmk&*+tfuw}3&Zg>MT`Ci9#wl~IOHG_i`#21Jk>h^8 z+U^pAN-rrVre!(hoR%g?IM*2vO&yU^PT_dmAMQe$ZsuW20tZZFpox=#pXQ^ef*TX5 zbaQXL17qz?T)PM|IY8^OwANKNo8qoaFqB;9<)=UTS*zlJsU$NWhTK~f5qEPn1d!H( z84xuQPnjX=J5UiJadxD+iX(REM7|jcIgq28j?++EU3x=+K_11R=xb*>J!-EBm5vmM=Kx7pTZ-c8$~ zOs{XQ9q{9ifB9R#_02IuKg!`yr;E$$+d~uC#8m0B+ik!ET$cHiQew)8T-%%??5j+- zi+y?{fA$y8Q<>g=_ub^Mo2Hkae)d~`<+l~=>9eQv>so4ey-|M2mnS1(^A zhGEDqC)Y+OCJTa)D_EDlFuGd@Fk5x&aqNPvHt{&$2mm2Z#>HJJW6oTNqWiSu>}CXb zx!oloFcEF9-h8&755qWZ#?g8sx9w)y?51fmjni0$99(yhaDLX({L0ZEfjWNQ&wds_ ztzTZ8{_%^bv_N!12hhNKqWGPG$dMonbg-&Q*ZtyrX`HbjSjGJ45XjM4){p44E&{8% z|C|c6zCq8B7To_8!bm!k2eD}1L&Tw&CjxgT#LSo&%Q%QRbE>m$2G;rX;K74WpMN%O zci;Htx9)EC`~7j4w!_7ihoPIATkkT@GaynTF>Ru?>mt&{eWrx^jJ?U*x-;{*UqoG4A>P)c!ia%7~D5KsjgT zgn(|^dygRp(Gzoy>P1dVTcp>La&HP?p<3yp4t_eFa?YpYGUQ?gy$K@EwRbTzA5$3< zgG0Ed29~Sl7D(V(&JYE$ z7GOY0SyfU>08WWT1rSr>l1oZ!isyYa3F5h_^c-RcXE}1>gh=rx!vB?WlpXLrJ$45D zqrf?b(j3p?xx?YG*=$S6gbePPQ|44s9&$;XIHfgVH$GFu2LZSlI+RqHDKis7*oJc% zI1Nag%OE1HHB+6+@bJL{0{~3p_JRQ4eD?a;lgD@ayWZjXr?16moHnL5O;Z<`=f1mq zkV=`h+ocO|y1IOrIUf(F*8RoJ-Ld2C46pZgceH=@kN?I0?;rlrlLwEc?ePBBzB~@e zxx9JRe*UvhdhNT5?Kj?kyc@95@DKm#_kQ~058dj;j%b9s+?6!E^X{YX|LLFn;lJ@W zd4z}09{t9*-o6?uYaI#6b)FBY01`UU+#3WpUbuTRf++bA?M$8$a^fycRD0{HA|iKp zH$?GlSZ^&>e|MuO54W%fsCJ3KDLo5e{u|)_Zp_a8MOh6BRR!oEUT` z2nN}~{N~MdqV&!ezVPzZD-k~(PSzT#3^}V*wZ1HSAZz`^1mFrDzL(%eef<&+nz*9_kHc6JqpGwTVuZvgSY@$xbGm(X`=(pJn0A{~GNQF6t%_9hZmtnXy}G2r z@^bD;j#=9vQry-B7yOLu0|5A{JO^L3odh9ajXNM#Wm;X9Yj&eL8r)C%uwLf0eu|Yb zz;nI)nN+xkfQ<0`1+UnY`6?w0$q#_6$KLkqLT2ot_t>&ecTnZ0n&YXh$l&(W5L|94&Cska!ZpMquNopH&K_oM* zwK-^)7CPdR^VQ{UGYuwI=bBSWlsIKhi71tl!9i6oFD_k;3C%PPFH>Qn-XsE>x+oAv zFfXCc^AdO-A=Tcd%{0#o5vm#iiO6Q0Ky5c|>%8o?yOMKGDcre%0-8t367()^o+yQM zGB&|;+Izy)1%jsT<|!3IO2lj0Q8bkTGSM*PQgY6m^BVgWw4A#m(%KrYtKc{QN&FQI z=$=L3^PRx7mc|eDG|hHqL}jPE+#5zNR*hE5He>E zFv@{BxheuVxFJm2sat>W@ai<*O`FjH?r!g5XxQ$y*SBvTJh~irW!R3*aDS@3TW$67 z@`5v;mRg2UOjDVv%FW@n^{Qe>w7J}zYK1(|v@uS<{OIPFFE;kPz zUHY?09M_`23>}-g-)3{NniR*_UqK_&@*0-{o1QbvfkCa3W?}mP$nCh8X?i;DDkA1_b18 zDe-Fkag2b)-kVt;hYY4n6kR;=5Ju%E0_Kze05rBeAObgyXGcYQ?;_%6A$APs9}~CM z5Wr01k3`ics7Y_&t1Waa<1c>lqr>g>(cIKeWEn`v~D`E)Wfgmc^hx>%d_^!o7X#m!4YDY>|4TbASDFwgsC zInB#aRHBvG;ePtRH&vbW_WuLx%USIc`2Kkb2w~8#06BnY1iBvA?tnnSYncbd;p>GK zy^?eu^TCMLn_!K7p!Fjzne)(pzOEc>^)A?2$qH+__e#~Rnj(n!hZRbK`&%mrzI(@j z=Lx5a%mvDr=0!wMoys&Ux}0PIre|-z_2$hhCMbz(o%hH6o9oxN`8;3EcOo%GYz2{6SI?u~Emb%QRV!Jt>4n)*iFQxR>m`Oxh@9{$@rI?ukh$u2Sc<-(D8o3wd228~1?r_XGkK@S^h-0SKH!3d8_tW-5Alae0=cLoS&UpC8ecW(!vV$K`Rp zTmVqrqL6@cin4*tX1m#Ly0qPPceuOTjGF)?hGFVW?{4odHruOdTS{Jbgi@CN8&c-i&FO4@+$)L>O`b@Yd^gyPaz{OrO2J^*mMNH>cJcpz__% zer~5zYjw)`pZ~Le_M5-;>)-j#cUv=H7QOqQ{oOzK)9-)3Ojn=${N{WA?f3FH2pSCe z_3i7&?|cEe{PTbE5B~69`-30+=m+n7;oZOS`+x0QU;7Hkg5aNh_I&BB)poJFbOkhI zBWpcj3=vxAC8b1+W*Pw@Z;c_=w$>b>T~tyU8OcW_gYv#ZYor3*-aDA-Vg_3?03 z1#nRb5W-CeL0WS&LI_{U>O}%Tpxy=D5WFo5n%DVAZu{$NlYV#iSxPVrnLrrbdhLBd zFx95g4Ad~LAPkF^xggltEezo3kbozoVj`dtc=%WnfvGfe@l_4A9`j*%Tf6aW-H9YNosfir5jvxj-vc%fKQ07EP$G8_B1bd? z4UeLmQs}6H?}IQR-@A|l0)kZ(glI&D@qzK-=bVStCFdA7xt)oLRqVg2L(f@@aQ>IU z0iU6(82wihLX2a8z?l%8NC5?yYBNk}zu#Xz+~t99ZfSD}H*Kc06 z<#cy@qhhu8({Y)Pr%mSFh6nadDZTY3txCJTy`JZNDa2stif%a-YbI4ipjS~50PkIh zu=o1#!ERtKoHoNIGxu71uSi}>L4zR|={-Qvbbf+2;}|onnTO^)=LBYiK8>TQ1kaO8 zI-QmWmsg3=-Il6Lh2yCafk{uqW?u5-hHf?_z8DLejb)J5GqbskTi%E@sw65O6b2guu*E^qNyX)@7JBs+yTg%GP@#%1BIT z*2XdC6eWfP0AZnuP_1((ACQBot({!41(^6+?ijEts0tvMX-w>a1)Wb=WWvOl3+Iw@ z3dv3i1}&ugksgEVa|9tbqP4s9EKQCy6eKv?P(mN!260Q&dD(2Xk?9oGE+wzsk_fBF z6z?;!0?#^+VGh8Ye9hxqnFi55FNfV`d$_x0PML>g?iZJrhIX6}yUWdXH*Uu9csRJH zm)AErk5^X@Ztw1ziUVfOFJ8WQ^!Nc2o(}WX#YL}6Nhvd-`%uc|!1Jm zkK4^{(gw9Zd+YH>Kl|yw_wWDv|JT3wU;e?r@`uAX{ipxgA8qr4zxCID_bNegu_h&t zoQoQEH|7NHQMzFk!ggHGXn@$g+-*J`d#`Td?(@9tw!47-W2U{=trMXK03^Njs;+7h zlbndGQzM_@KL_m+RY9ieW+obcTt(c}tRtFfcj<|sw3 z;MnFDx39bCIF6fPx|p^@%HYb3q19Po9RpqEl%a9DhfO_tn4`hzSA1H)tCl1}tfG0_ z5rxSB1VNF^yc=}$zCu9e<}MI@2w? z&N%N`M|)ml;OsUybHtxZTUY&05Fe)CWgIT9HeK}g?#@h7Att)mT>@BL>i+t6%Hz$O zSIzjl zaN^!t&WRH_cyN$yH&aRxf5P_R?(XQRlyL51HRdSey5{k-{*i;K9RAa=H zIo^(S^^qr~RMDY=@v5@D4dUu|t2jF|G_u)n<8s%WNUU8Z3Y=?2J&)g_lS z4#V+qDy69DI1KY~_87{ZL#IaX&>^boj&Q{2WT}obotr zRJD{MswowSy6hC@U``wnLu>R<;!HstCl1eAe0Qn_5l#R;OcN4xmF;%R1Vc%M%l`V- z-BV7-!`(Cut;w{zI4<^`Kl#CLeEZv~9o&!eQifeh*+3sWd2o_r8OP9W6Z8IXRHN57 z*G94 zzI}30TJP5f`jbEV>5Gp(q1xNs@$0|!?eG8mFP^^t)qm^X`aA#U|KLCVSN`q4{a^f7 z|NOh(`RLV$pZsfo@Rv@fTTF1b-*0voZw|MMbw~*~xf4;52uVN$AU7}v151U6X>d?v zY^^4abpe=p4LdMJayN~BE0b`0JBC%*LlbFk8X8a&b=3&!3#JDF7|}#U)Xk-d$a)G% zZ>A!>n`k_3Lrw|l^~)FFes!@q9d6{fw|QRnx7Zr=C8LqnWF2)@Il3<3V%k^m#QokM zu*zOiX|k4ABf&VN%gs0xZmmieLSzIFZ4a2D&e~q>Z*ETe&9oWIkT?%{L?BZI2UP

(*wv(UUE0Ql|Jh&Zh%{o69gpI1t3` z4uI;g^sa#Fq~aM#`%>6V{cxItXi{yPX+lOr)H73Mt_s%7YHw!NYM)A3U9Y^F2+dv8 zn<}TY%=Kz_X=;g5N(|tK;}IB}w%QlwlsTCiQ38P7cB>-jI;6w^L(cBj!*f7{gi+;c z>LJUyHy--0NXO1kjF}k_rkvC))cj>AlvWQS^IGVbI0bE+N*;!hD5;(0#zgVier2Ax zcbcrMKorT)sy#T{W6ZTn?=nrB)+-?9l+9rnM|X4xH}Z;5ny-DR>-WffZ)!BxloKGN zoMPYA?u7j6)tig$rGsAI-b`hq*2_3-ciU4vJ$m}+mmhyv@}LIe?!l|OyN^EjI`qHm7vc7YFH9-(Yszo{>t zzj*c0$1M8wuYKWXKlzJ){qOv(Uw-nU8NPUR_v7#X@Gt-6Z@j*_8y`K}OZwrDKAP`t z>;BC+4%3i*IsLExcmM8(AN=gYAAE#EdH98Q{-6Kv|N3wLTYs>7z`2O@W!wzSDkMDV zEEqZDg9C*fL)Asp)J09*a!#BH!H1y`x|_Xz`Fb42-l}R3XSc_hgBa*&NK$ycr19Js z>IkUPT|LA_?$+bH7eRC;%$x$KUiIZ*fMzaTYVS*Hb8EAyWJ=5NM7azpNp0jdFuQi^ zRoV%(Wrj?|?vZfeD;Z0O>E2f57G4x|TqTq-nQew)D#cBtiK&Od1dxcy(Vc9uc73{T zt|=E4F|$%qB$cfCdRV)(48qFb>aM0?4hSt~kS_H3gtP)SuqImHZvZR!10VnjU=C^~ zWNVm~pA|Uf!4gpL4~FQ(@rTzBLXRPp!1?@NUHRcN72pUF51&3mOs+oW@s}U{Qksg`+~wx> z@bcwn1HrS0R~hxe1dNk+$PV?kakkXU+@^L}acxv9z|E zChNApJ5HMoA)C6XnP_mEA=ccClXfk+^d44QG_%b(_FnJ92x1d*bwYM`A{J2q+>D#n z8UUI>1XwSvs*0*3LdwY;iPF-fwmOYGkPHlrEZsvFL12kc#KgdfIb|j$$6h-k=3H9s z>t;iYKyE-l0Ej>ipl+gVyBlwBuYuiBOUi9o+$<*+v(`mS0bwXvduOKIc0=fiU>b5F z9C9MWNYF#@x-5v4IH@W)m}!dot(EyFQ&TlO$8efD*coj>M8rH4BzA zym@rc^z)Bz-+ueq@y*TEZo7Z;*%#k=aIqae`RI90<&)>(Y-z%49B`(Gy}@YHvcwt=7nZ%Q>Hphuy`dOG5(bT}1$(w;rdV zXg4r->#aq4KbT<@O8~eUf-xb6&{HHa1@@rQdu?E5D!ne!E74j&PedX;&__^n6=@Ys zHxI6eu`MTbbrUYh+`4pCQ?sa)MrLA)lS@O%2FB=-BM^|fbum#Q2DePK9fz@SmueAC z6%=wv#@N;S?Q;0|)n_~orDOz41hiH^#woh;&S%a!WEJ4+7_~wvgvf*h&g6+8aC-_z zWeECXxLWJ5%-w+2>73M~fyX0PF~XDp!#HqG4gx(R{_ACOzHk871{lYCX+HuW+KLCA z?b>G>!HSfwSpPXc?sJ~o))fKK)WFfyQ_cq7ZQ1V9)eh&`j)&&zt^y!x%XBs7O*ziX z^_$z>W;cyf?@b+>=w~lq)n$46*^{xPF|$c?*Kyn}ReRT~%LgTAFe9hTIYi}zG!0`; zNvw;CqZ{C|thT9P$ei$0XECeNMYYzJayHYH^U}Jyi*+?o6IBO?kR^0!fF5W4yv)qG z*4A1St;jfxeh%PM?PbWwB5TT&}o^!swzIkx@Ag7GT zW0~AR%yQ1|j)dVKUNh^@j`mOl1(6)wMi2?XZHZ`C9ftAlZhv+4@HiidaLijoI?SiH z-+A};aBBv;&BgW2tvj6RvOga3P|C33as1=&{K;?s&Tld*q9XbhqFttGQ~Ltob6YT_lh98;eEH?Ce*Mjh*WUX$4%^N4<;$DTo z{_LlBZ*HeCefbOTe&hZ3?vAbVgCG6;&CTlr85GxcxMghG-aLLZ{Ne9^>)-qL{+<8F z|NZ~=yMOgB|2O~Wzx%`A|NSpNd-U|-My0DkNuzfIiPBDC@J&i-otjD2OgkfhTjm^( z8JHuO1042;%gYN@@iiH`yPK-4iDYi3N(d@qqEfr`ZYm;u4GmHWDCVpPj?+X%8j>S{ zxvKTvYwb(#%{@RxbobUOGimSF*RShwPl)CMCeaVTOeL3+Uf%CIh7zv#TpE-q7luP2|k&Fdv!t$zog;fn| zZm|cP^Aw%}Y(W71%gaoxN>bkqw=FCJe4MR>m4gziqFI2@PSuP&}$zJ4v{hD@d0?(ceEfIO9aTu$HplRx|I-~8rg#N}>pO|-U) zX@dyUIJKs?$9Z#kA@1WiN$;1Jmr*`*cRcLtX>R)b&E1Q!+-F1|EK?BL;u=0e|_ojiw{1%y1E((o;|!g-n=63dfMaR&6l6x zH-7EOSHAwmhfkicwSW1qfBU_s8>l5(^c*g8DBBwtf6vdh!Nv#XI+>Rp}WR2?~VjK zM^{3-SH1u_pgDL` zYvQemno&vsm^qKR5V=T0fJBUdDoVt@Y8%X15A%M1IskDLei2cydaINsj1O+Bk9Pgb z?<0Ni{ucnjmaVGG70<`}E93k}J<2m3jJkJm=@Fm5mhGrnH&=IcPzODWL=9J`?m1Rs zWrOckLk@8bz`Z&M&J=}*h8NGKvo*Z#K3EUYSNK07gF;3E?zq(2rRT!aj-S1CsSYq5pt#1+7G)()${>hWa!~_89{`#;_n@w{u zFmrKp!ptc((~ejjUfk9n{q*_e!)JHb`*DPvj=%SN-#DD;r$2t4@FD>}dHUq$?&X)? zdvG!NpZ?jObbufJ=mS?;o<~)>a+Eja- z)dWnCzzG1PiHJDCsh*ryMl^KI!6=-W)O(sNfXx-(kCXOuvn>Yjxbm+NVd`f$833Hq z7kwq(T`9z*UA4y`868U=l?bwGL&1VK!1dRLpx~=W^!(4)b;H4Mg@j@iI9mY_@SfRl z3oX~#L9p&n5b1R1Xp7hhMFbH!CwF`JcrzBieslZsvp0PaaA#x_C&O_ZYi+%TV~<|H zezhIO$5#&zcY8n-KtpP^=Tve^$HQSh9+)uYgqanri5%+z&~wRGR}Yrn0KAmJ%;xz- z1j{m)5_)tWhXDa|%1!#=bX1EJ1fMnoBJ|$7NEbt3F%5+X5t_+3B$YmvaVSMZ2pP^> zQYk~UbPc(<3NdrS)`Te|FhThKp=4IGs_H;)wj0JwI8Z_ZCQi&|0$_;1>k1V$oXA91 zSC=YEiA8m(E#(9r*_}?90;xx&+GMF60EaS!#%>q}aAYdIn**r#lv3+0CkDW}^pX+- zx;dH^CKJtxQxL^KK>{OU7vaQe!S#AdIp&DoBj*HrS4s&G)ZGz8K@Hqd%@SuslBff; z&!4BY#sbM#y+1OuJF03RKMrd{5nupl_wFzzUJJM9d7h@tFb>4Y9n8Ip01+|;n7T$4 zuJjefC6YK1rL*6~WAKWiGC-VbJ)LG|9>-Bk#_dMkmZeJXm%B@;EmTAEGE-t6hGDb4 z+3)8?YqzB}2Hp&tGH$;6y&n^oum0LsZ}+#|OhJyb z?e**0{Ez?pA6&n>xp{S)cMrfxns6S|wE0{A`rkQRAO6LE_s75WTfhAmKltF|U+#bR z_y6YY>24gep++9ISw-lrpNNC&qLtc>f)-4!&Oi}QA0cZ@{Nl6EhM|~=o2d4vcvk7X zb%+hlRi!(aN%xiES>t|VU2qi>5$Wb?A}Sr+2+=H{PS42^&B4ZDlqPLi5)-1I<^#BA z=8KC9H{ajhw0Z=ToN!Z^bRiIC1L#0;Fq)YlsGGQnuO0;eQ;}I!y10R>dhY^2iD*b= z${E3H7g6I7U6>Ogr(_OY1qhsdUXEdOj=y4-_?!t|7n{!o6Ze{#^DYwBgjK`2ibU^o zrET3K96;3^Tus%LL{SYi!XQ-@RAQhATEopi9Rmf^SPNj)1u3EdD1x1{;a1;)gT+N1 ztTNE~@8~hpdR$y*TL-{B_ZL zY0Y*iB@wxGa6xo1*Dhn3;^Ysn-8g190e7bC>b*6h)S8%Dq|`D~N@S*hPK46Cdvw{b zil$T$y|!7jHmRXOT55%mmP!TCF>|d;DrK%>;BGx7Km;=ZwCLM16GH60uV7`A0|4Zd zhn(kS=EQ2=q-*a(VMb8xOf-(8n|JNvE)Gj=sT2o4%*Qg8&|e;o`*E1e3<p5;!8RE3oWZ>;7&J0MoPyG5`RV+U{=m0Jz;o9orZLo^u2!e}XY z?{TU`FYemXSv1|f*_XuM__Z%Te41ary!-Bd|AWl=lV5y%dG+A?-}~Ob{5O7Mnj90? z<{y3dJe6XJ-g)=!Pk!->hZk4#y#MgWKlqpa>TjucC;$Dw`8WSh|Kb1T%U^!)t;Y|p z-@KWIF;vc*af_~ulE?r>tU8FhstBQ{Oo+y-6nI_#nW?o-NI5aWI^aj&P-F<4r^nAR zZbCQHE?uO@yl>qKHJmh0>S@retmwFl<2d3Sf>1 z4pwD0)Z;SG^GuBEv+Ww2YFH!J9~c6-Q!KJ)ZBN)(BKGHVisJl>_#)Ry99%SHD<-CD zN+zfR7KRBoRf+pYRwad*s=Jv=Y+-&bS&Z9;)~ty2>^>jptL}6C&!6wHm67-rpMksI zi$?>?4H75rL1s2LVs;1ZnmDCw?|k7QC4BMXW)tm%~;5lbxY9d`E1d#x)Dseli zi$sHxW6ndX-NAuCUCq6zsvC2PT7uS1)c}A1+-w+ffQF`8a*5!dXhIPY%A~I9a9S2q z0KfqK*lpxHQH9nPBK^DKksEz*OyKw2?H zB1$~hMa9{^ ztHY2Iv8#Oe;ZJ_!H@-G7xVFt$hE2u<-Q~E<$f=4bLI+1CGo8wiowONZI#m7PPe0jg z9-Z!5%7xk9elos!bvn+cUw-h(`(Jzi?RVdK`Ra|!e04GGwio-8eE9K;QnKoDx!WCI zzs6<$8-MMu|LjNKfA)Cujjz7<^N&BgxSD?Rci#UG{ty4hZ~WTV9zEFQ3~_X~+A^1c zhlGUSi3!YO;R=|<%$jsDYqh%uOF+{ws%T<%Q}Dq0!OcQV6;;x!h@5f755NPUhPBSu z-B%SUfCc?4(nW=cYVYFijKGjeKAsMT!#<}hP3GeX5r!ehkV>F{?ud}4@!>l>J(#7> z^#IbKEzl~O;=N_{>|FvNz&wt-ad!or#I3tkkuDObV?r936QP>`u3dGiN<_HEQ|Pi( zK(Q}!R=~n1EMOT;i>B)VkTxJ-KCkT zca^Xhi)xRby1SZ%^&Se!(olu78ENIDZ4aF4d-n z=SeN*EF@GF6J=y{N}S8cPoC{|JNoSTo6}Li(#7U#%ve@b3ctAI0ng01I+#I9$xRaBGM^BkNppgww$oA}yh}{(xYX7pjutTf+M1co-xDKMTuwQqMxm%iLdl+l z#2lB}0249+!7vskG?&dV&WHKI)z#~lFAr~S-g)~i)hfN4St!bz>NssM@u{_Lep;54 z8Qc@`-QB@~)$u1k{qV`NcVB;YGapX#;qd-f-WfLh=&gr8``JgYp1=C+`Rh+!-yBc- z-}`I7R_D|0QNRC_PXM{s1*e=R{NDGzn;$-U|6A{W^2rB}t~L)JZrb zT1=}LYb!wJ1Z$VYdY{zVKTp1kHQ`rqmsL5rGQjY8JjSm~-tiB8m8h$#hKpK7SD43L z%(`h)AyMcGRkU~M(z~jth-ep4v##3JL?ph7n9k3Vq_}ogR3FTK)q57?44(BqYv9Cc zIEt6~Y(R)>-|x#7EbKXE&}w&w2+_!tT{N*jdbD}(y$7x9>z7CG1uRMPwzO8|w4Xoy z_~Wa~i)T+CnTexuwnX}CU;p~2&tIHGJSQVanbmuw21JwhX;}{QNmY>u2)i0G57X3o z8^&SUY}{cQ2R8`~aso_9?l26~;W#5;W||U@i8FJUzo-;-bc~k3G!Er7x4Lvh2H<&F zQX)o5L>HUwl!t^Vr@`GsOj^(9!tBe7t!Uc}6PPnNIS_(7s9H)H9Lb$jl0yPQ2O^d( zZn7Q9fS4RmH4){MOeHaE?*QJUpO)(Gkyo+ZZACO}u0$*jbM3X8IT2z`#wdVh=qFhe zOjRz%4XJE$VIp%tN+}PCQ&hxgt6}emaf!}5zu9VyHn-Y3AQ`|?tC{aEFOSR8yNiOT zi?qkdx%V6fa&17nchadj%XKz1y>kD6e_xkR7 znszA~QYk74f}+#*y}wCno&-A!FpS_3yi=)Fm=CY{{af!t#z zNr`&|aMqd76JeicMo7dak`Ne_yjwk5Tfi(aQ_hsQd+)k{DG@~n@oF?Pi|JayyIWIr z#3tRqbIxN*n^FcMa#s;Tbam~bB7jKDDm_IZxVf3GO=Pf|zdXbb-~`d(700ZB>ssY| zR%V{_+|H^|Far}?`;w!2R@7S8rrJciDJ@N!NF4OF$YV~am++ftZ3$({^e(_eh>=5~Lo z&A{B{G#}%>>C%UBAmZaZH|Y`T?&!lfENx!u(OqhtiK)(wkmh+|g3#>%U?eLS6NwrGOIp-Y6hJ%}WYpu5GV4;5x-crSh(88X9RHeCt8YE^UP%}f^ zFH1Ka$3lQZDGZQO677cOZl*1PZ&F5e)8JS@T@kUAVhV(q5T~51$>nah)S8H6acqi8 zTtywwRMwuOoTkmBZb)D%iO{=cO3WAo1rkhCZuK;kbg`Mpfsh~sJbxx5-32%jWj7>Fu=>Tw@6~BY%xM++0YaF9h#6B(Lr$3}sklm5m>;CfQ?(y4iU*Fu# zr}^RKMw>~~obqu#S{hG6&)*y$KYZ})$+oK8wf4!!FD`fE;qJ}j#}7ACxq0*A@@n_i zvnQFM9_vuH@4Wl&fBK*O=dbsNyVL%E@^Aj_?LZ8DGctf`7m=pg=F{g4$Y00XGLl@@XvNFt}Jdo~Du`_@MXN`;6Y$tI2_&5owny(w#h&68d0O z1Hd4BS47_p+&E=SSsc5%sP zp7jTaK;)~*3ysh|4}Duv^LU?Y9HL!hRrjcfX;*8aqTbcx*IliPQFo~-wM!MN3QfJL z*N|()?%>un?hv&z2Oso;of+p9p$nZ6t`4vBHvp^P2hIwASeJ-1F@UyWf8wzEW<@1J z8dVyii}YG666KsX+w$z~?OShM+*}`j{KJn=d+I&i-7TMe_TuvD;@5ud{fo;9Tt!3; z#cZhy`10s-^K0+F|KlJ0q&LlFsx1oRdqN&kVWP(mpAJJ_mKhz$z0N0h$vGX4hma+R zNyL_l8gm+lGK|CNbW(G0*lxG2wOD!>yd*Y<)>eVF zDe-Bp#3ZH$-g;l=WnPw)6B1IQNX~J0L`UYsTp@8z917i%srQaV<2Xg|va1q$iquKN zrq)D&LK+0!P%cl9w3uIi$hle<;zVwOv3y?2%U zVLz5a;9&jHgDZ4kWYO4gVKtgKI7WQGySu2kuTcT>Qd^g9CJtg=MUSm6y*E__=xXB7 z-J9wneG!pRe3*ykNazD0vvC4I-cBrs4s~Zl)a* zw+(R%(Pal5zT0P(1)WhB3a#Jq1_~!L~%)6ib#V;N|dkSUvM}PG5 zAAJA$x4-?3>$@YQ{JlT>0a_n3Z->pJ?e_UEf0`0Leeh70e)ZtdVeXIKdMl0Nr=Q*Y z_!po4z5ne0^>_Y_zw=-_n4HFxN=i(q(o8!O@3y<`b~kP|sU$}~H^tt&KW!BviX0C6 z-Nml8ihvfr6l5kgvo$*gkQt*ZFw~D6RieSnV|dTZL6FQ`0GtVt!QCVfdBRv4IML~N z3M3mL*i)NWM+C-`-P*%XErcQ_{ zW4ahNQ%Z$k8cI%V?!9*ak6)N51C^BG9AD}0l|_SJjb+2tPP>W`%p;sm&*Z7azANrx zCZ=8DV?Fn4liIa)tzuNw7SXC!MVGEi*V<%hvNWlpt;bEUes`f93M1Ky`&L!CD@m5^gMxaDv3yRLFqRW<2ah7p<71i|WZc<1SpuYKjqAO7N(%iM{xxzEdD z=p_#fzz!MmrfmBnZ4nhEw2SQ~F_yxJZYJY2s(C5Q8OEs$g%8IAIMv$6adbBX%TXqU z#7wo;X&hXYn5NC7+9OXt0;1!j5^)14V~HQ>sm>{7cQ0jaSS8}#1l$QxO_`|GW(J0M zs47NkW-e+S$-xx>)D(%`q2$a+gkEwnH2~<^)wN5HNnFKLBI=+N!yCwYYKu5mM231kAMe zo>Ec~F|EC~u42&D#Gs4CQ(L>#rc0Nu+QgcG8g^OBA4Of%#av`PN<>%FTI{TGlEzyz z#v^xF=8DXDC^6opXV83 zdZ%b1&eBCyhjE%;qB z`pV^)w;2b9`EWADoVJO|m0SO;ckZS{pk7Yo4(yXJRJ^&{q485KmYLg z(|6wc@Pp@Hc=}eK>)ky3(?9ytzx0>Cl~aG`ooD;~a`W=0IQF~Kmh#WO|Ksewo3_i+ zFE6fs^k+YO>&exZzx>wY$CpE%>dF7!-}_I0@Av+OF2+dXkT=u#;Ob%;M|WDM5X7c2AG|>*wRFBJ2y9~p2Oyf39SHp(Y7$B$YYlDRat!x?{f9dRJUPU+e zBrA|RD1`fp&KV590{z?BG!P;x(U{#$+{C=8Hu2W2sWF?fD`x-y(=duF7&yF~&;#QRso)3Co-InKb{%j3i&CRj2pEX+!5V>!Fn1>-C zLIA3BMWkt*q8e62GsC0Dmrve)e5&oU7q>tE@RL(p-h1zz?Zwo4txIJpT^xyX9@D^( zfD@=J@4Wr=;iHQ`|G^KMndf|JvOk^76y2pQ<1h@RM4KZgT55IIX)I>SoU~h})LR>d zV%l4uQ^r!tkOp!x)fgqhh7r?fQ!8T%!85uOfnki9kWy0B#EFnrrxB1FAaPgAWk4XN z9%)40=UAio-&rRmqTTK1t-E8hXu8rQb zOLYIa^z{#@e>~N$yX{8$X}2BPGBaUoRoaqr zesNcyAMFTb_vq2>&2_JRGmSs_;JMTE*-KrH*AFj?IDYR3AHRNn{TuJU_rWhdoo)Ex zk3X6=r5*1!yWQzH&v$R0zV+ZIAO7OKue>++<<*;4uU)C5i9SSR4o}C(gM5 zVHX7eP7H1#T7@%<3ukb}SCNmxeF#6?AF;OTEPRz(*jf71O}kk$?GiY@b+y{Hb*o}a z*V>Ilpo{lzty`_y#G82SR>i7FQ)^v<5SE~&LoW(zPAQz%koe#E|01yu@beq$Ea+Wh zG|nsF852>BdT6%uP!*S;bals4av2MNt2q&k<1h{Q^w~oy=`Vi#!HZY7U;N@1o<4m9 zu1HvhNfqkS!2!WCb7X}R=s40>zVyZIZu^s;f7pd~mk$%C{p}q%F7sk)z4erFn9_E; zt*x5tIFz>3-F6y>q2$t9M@L2iw_GR{>M9Wmj>v8vhjq>c5JgeVnc38OYr`-wrx;oz zp&@Y!986kEglJxJ?kb49KkP&6M(pEI60@n5%!$|(h}i(eX>MAEjA$k2lsF{@LNg#v zj@VUm%9pzL>(Z=tuicw~DE98P+0tyTeQtfO{Z#v@wc4e&-p#wXxQluKnDN}IS`%@A$P(Kh z_rowClLM-{sH$6w^30G7t&3~G2XH^}eQwG`Qgd@?O}dJO%fa2uyI8ATyP{K~5_wjP z#6;k}%rls!#AdqGMb($3*Dgq@iy;CL3?(Ca*UFrN{7^%2-=F4bdv#bkjp>j7^!s0U z?@JFZHxGB4aZD+VpS^f9ZN}v=X9Q;6j1wwur(LUd>gnUx?ZwTK^4R9KzdgSF^sRA| zKYjUXvqPzeuYc+7F=O}i_y7Js`(r)AQR?;|!+Da&%7L{;glXdG(UPMRmIy z$0-5e>9ibf-qbgr{F{I4U%HxxS1+$mr`nD7`(>H6RqsuSF%PMX8Ngd>(lpwx2mu+y zx|tAxnnXG`0Pb(@9zA-Zs$H6z#GDSU;2NUm5aGm25-g~l9bs$Snz^YW_!`US>Jaq+ z9l=Y^gpd+TYj-y{ZqA6-8@g$0F5)JH&M9-s#ED?dK{izp3+Y$9;n&j-+`UIFQmhDo zXaKeM0QNC)M5mRjih#2TCZX?hWl={8>zaA_inY>at42vE=%uA>#fJVp;65#XHBWv zS)>}{{L1=bD5qD-B9bY@Ehv4N;;nG&muc zcEbAj$;HiFhs)v7(+A9+GpMP!ALn_ThH+xmYG!4aA~gwAGQs;_`U<7=-S7Xft8I4M zaWevfm>v%Mh?k6Bl+83*m%c1xDhx1`jD%5PI;0Z0s-=|u@zhlud>9H7rcAENt2qvr zr2zmjj>E`+Ln-KC7FA|Si2ay z!*K@poO0{p2&Q_PPmVs8LVzZH$cYeg&gNEzGNe=ztMq9aLMt0-4MY^8UlogVXmxKQ z$Q%dp(rT31i+EQ*E$vkMsr6%Ri^yF2(ls8=roJ?-qE+S8+9G}K{n*>AQq>mi%}fEB zN`Rs25mx5QyqFskn?|henuMsU9%c1=t&pV$R|28TTi=cb5>-sAWZP@+wRaH(#GG>P zk}{Ve1Awh*OW9n2sCS90&di&1BDy;q6@10p)WvdkBkQ{b(qr7f)@Bc`0jVODr`dpjTM?&9+0 zXP>_F*46u8-o1MH+1J1R`xn!e9H)z`?|%P{EbzO(^QBjBZa;f*GavgqU;4ry zf9Ic=}ePSULj zO1EZiy)U&MkTnk}r4;z2qhSI{Xkfv|N2Dvb55utEANIG0-S#3gN2*oi2D(S+fCsH} zZ~oHAk26&h(H_>KNDYd?89n4H>S$(GmqmJS%Mvh;N=wX%Sz70SlC0}}8<(2~{bj-o zC3D0WhpPdRXzl&B-m7~=KwzRID&o4-+TBidKAdg=HD@M=fz!w(amqOjrIei2q>DJY zx^?R-tbdNxIqw1I+?{CsrJxemq@Q~Om7RYyaYToZN5pEkF4n|a*CjXru_i`ctaS|( zwX2Ch>l*)h9i!ajya9yZ?p~xC<;C}6p7Yzs&&$B)`9!ON^mFdVe0>#H)0(Pav_XL~ zBdBZZ+HJ^XCpvx80aZNfg2RrczS0a*-Gy@Hk8WkaLNF znV5%CGP9~ia;mE)qL>O0-OMO)ZEd?5cHfRM93jx4twv)h6b1ZF7t9UzRGT zrL`{cOfOxQzW$M}T1C~oHfc?oNbOBb?~VsfIg);R7q!@+fsoxWcw{^4h|C-@1=@O^ z_SUJ#%%q8^sKuO{Q%L~6)P*oSsMe|wvYFRbL%?bPh>572P7Q!^&aKPt;%ZsiJkRs8 z5He#jwIP=wC-h)+NWqX3{P_zxP*vz0UoUU%t7!>6oUs zA3xcD_WJJj_Tu5?SKfW=qaXixzTLlgdGqkmdsh$M`{c#(qvzNEKunx*K&$j$QPf!LRb9rhq{O}TWm!yBTdT{=#3{2_N3@&^5vz1lbBB}? z5r)feJzUL#&|1;!=*~7{LQ@TqlB$FuF$4l#<+vQwlnKMKlMq}w6OO}>bEY-Oe~oO2 z6Fube_w?_5?9Z=!r{|KD6$}apEa>1s!94CsQR`++ZRx$6b@e9R#K_i(#(Ota2wQ*} zYW&??L=0fnSHS1i19GDkaa-RO=WGf`TFrRpwZ&Hpj)VIec0P38HL}706&(oJmEXMHFSTti#>bEHAN-APNxi#y^V$H^bVzA8m2sq8;L}e(E;*Ny z2{k7g5)*o2*cN^;reFW+R|x3`Km6HYZj^>;*i2=EhAN%G`?6Tq%!!b4VaGL|(A}MJ zv3l`fr&foYn5b4!QFkXow*U!XsWpOf*$!7%tiTny&!wKZZ2_QPG6a40zr7ju)| znUX6ob4pkWXC~1|PXIA<#C`3Go-zY@?`;^;+RQ~blwzvGF!ol_bj*d3Lz?F97Zt|q?^O77@U@BjwNqY9?+&Mre*TkhzW?RNSDVb`_4C_5|I@$t z@Ry&xJ}fttZjS!l@BQc>{#XC#3va#s?D0b;$SDyLIBHi!NEwka$i&bP zL|whBHWx+Xgy_a@Lt=6x2XxOVh3}+ESMaWZlbl19;fymmMBw1Mrml(nD^GC#EglKd z>!c14>x{a$RR!ctqPD*g!geuWhf31Y7YkX?y~s)ZrR^0nbT$q&)(+W z_~zBOf9u&VKKQ9BZFZOAG?|Hc9|q0?y?pU}Gfv|$GO+^^vl+PAkn!=u%~!wtrExQT z{OM<6Kq=)?0HL;-*rzGiy7WjFQZR9JV8mf4y=ynUJM7&flyn~RH$^qx~`wP&Wx zoH!$;0FzaX2$|W;cDrq;n1^8kCt?nyGBFK_w2Om_O4lAK`R-OqHg_U&2>Cd;J5a)u z4bYJdP>qH%ta_}qHDPxKZ~=EZo+)x8RYOR?F^zLoZE?!sF=_9f~FA<7o!K zl#)4o_UfdVmR27=xO(;K<+G=c$AM3W`SDwCeeh`&XEAx}-FI#d_>ce5cYg1${LV$8 zzU=#adi>~NDw{D~{`k*+^6KM{F0b-CUwJ!ELyxHRoQ?U|b?$yR_83oS5@j-pGjT zfCzzSBA~0B5M&t2I1WrqPH2d3Ia5jrfokhZYh4;KAc8gVrc)WFVPFC?Aw~mJILj=3 zg+@Zh4B%0i1*@TDy%K_I_50^?)eiZ3f1K@FY92kEVxW#{7+yaM8g*e{ z>+5qSzJGN&4uaJ`H-gxoj~N zLpDf`9Co$sG`#ir!580o_xk$wlTTmP?m!d}cEkeZT+HDxA1U+2<;64%8EN36i*Bdw z_3M4%p)P$WDH4G~3YJ%aaCU&PWa)Jr3la7%1d$@{L?mj&NnMe_y)zS0B1~#fn;HNy z1CoPvHA*NVg)_Q?p*wYjAcL4`nPnQLDGlW4ii&V*RZPShldH^xHe@atFSet!qIIBI;d+VURBFYkmVDaY}@ga}K@GvMlC)JRQ{xm?;;` zSquWc3>}*|9OrtRm)iQ_G~XTO!*MyBmU(W6Q=Mz8UFX*4rSA_XL{zi6ws~nw>vLpza1BYvRo30Yql5B4=jIiMR}d0o2x;^trXU_rtsZvjUnU zaz4z<+`G6pl_o+tot8?JQp(dXq5~r5Jk&17TFbbp>WXYYOYJ~7P6L9PG-OPh@kbxN z#4=u74!bEyYd1IhTyh!GZo9p?xkDgVP*DWB-NT{Fu~!GbzP=r|V> z5<57hOq|>}ap~Porv(Xf$wU-hAMJ(!V=6-`U>+kq6RD|mS>w)DbQ_2X(FyV8XU`u! zeyAqk-dY#w=OrK**z@VJq6@1eMpm#5958HAt4K?<tt7w7J3Qqx=TI>NU_(Tx3RN-b}VpJdVFr?xhDGa7A4k{*w-c>Q;KEw+1IONPJ za|$OZ5q60v>_}X8T+h{efc^7zU`1p;&wcZIy+Vv`a6XPBm{Tm6Fo-2v)Xac_H9p@S z5Rnb4VW_}+oxd3b^5thq=gI-VSk{dowc^Pc(p~I?H~4>e4qC0}Zx&JZvB3#{2R>fBkpA_R(j*xZb}G|1>e>Vc^86 zjMs18kS72(peScYLXoc0r!hTwc=?6*-u>*=i<|vkz<3}|2@rBhnbUTAA!_@({WxR- zNQBdn-E>OZ%;|VoHrriojT6UptnLcdUAk5`BS2RJa{%nUFUvxddY6@9RPQ3gINU$S zoU$qaP!~%nMXyvOh?{E3@swa#@zkvK3WPVe$5IMACO`s0U^6nKoCX9C5qAxvL!0Nn z^zCmyef;?4tJlOS82MIvPALI`VXbNoqQZpHUckiC#oUg|Qq`M^qa#DNemI@1>!xIp z-dc^~4J5of9DtA$xf>xmNJ`XNyO#;9J?#_^ueaVgMFyO!C?Kk8E~N|u5#@-eGc&|8 zO=Xz!G#MhMqyV++TzeH=M31%YPxUa@dG8|V}#H|9@gC2())fsR*{ecQp#$$Kc24d z_WM&i)Ou{KiI%(plsQa~?&Enb2^^LtAEdPbH1JZ5qe_ z^8fP>f8)2mrM^tlFb-4BgMm4E?{c}j%n?6n&=nRX3o$EX72Aw?zdxLgM{O`6`t9Ls z?>=SryBXeGpB_JYbp7ImtIVgn&2IYi`7fFKrr@g=pX@F-cl)^t5@kXkN*+qeiE>VH zFN#=gOp_bA_iv)KQC#C=AN?m_3Kc_b*m0)8zCB_Ack}mC8S96CL(4& z=X$#XtAKX8y*b_9+?ImV1|B@#?yh#8TuI7A-~9F$zx_M!eD>;R$989^M(*T!%$rTA zr~Tc{D@HO?Tg#ykk)!5iGnKdBdb-(ee)`deht{J~x_32o1{zZ-oa=JhOa&0a&_oH% z`qjmDX~&#%T{<%_t*IMwa>PUth@V9@m(scr5~aMfMkSrIaU9X6JSIp8$T=CRNIT7Q zP8r>UjAeqMFaiQYckO0kii9FU%&Jz#>9BO33S~$c3={x~QWI_BfXvK*It=VupFVu> z+^IJg1nNPIY*x?6YO z&!Wd6`A8dlyi>OGjm78R(s)`AxVeYrM8BIVD|9AWnut?+LvJ{(z`3)-pX$C z$ka<=b&t(vo@aL-$D-O(A~jcpV{Ki1v)d4HU5;OP_wjs?oBe!J;5=PDm=62vXHTv! zb{i4>;(K38h{wZi@BMH*eDRC#9uKFShU3ivjslA&M{+Do32k5)7@0ih$c6`3Yi%|Y zN(6{G6=F6sHDwO(iUWXlNhzD%%UCo}fhdAcDb4k?*^EZewVR!%;P8bRM5!E@5&<$q zJK@?4tOBOw#tcZ%BT>%7Z-qH=ua%J^p|X^WtLze_tE)00IuM8ROVCAiF_iPs?1*S! z$OvGjoe;x+>gMD+Fip8^hGEPlC*Eu~$h25H)n%>=x)vq^XthN$xrl%}Go5?QV1=zB zNi#Zj?*pIlUQna=C-8mlvY$OH;Af8*xSz9kJ{RLVxC2E{*V>y6Yi{CwpUdZeI%1nG zEHxUh)mkxp|2Z*6euvMAfw-w0$OC->GQg-)P;hf~GiY59m@tL)VHie6cGU#LoZNA# za(8#UyFEDAw8`7c^ytZs7+ROjw0&@~yS(Dx`YT`EJV?)9eY`*2VInijiN-N+cAM)r zuMll036W|S_t@g0OCK^_U2WcZ`>7**^y%}eK8)K$c_lB%cb6B$JTD6%*510b%Lm)z ze4ClweOc!1W=p|ki6S#1B`oC#gm?%bS9V3EO?ML%;+>E8Dt zt(HVV6LoD)ZeZ%(#T*D6!Z#pd(>O4qiMG}pp|<%r-=&01XlAOC5`W>HXFvYYUz8M; z^bl{coU^KIw;PPwJr%|@mO<4bAa*V9&E?qosf!b)!pNxu?Uy<)^KKffEs2N-b7BBf z?Zc4W6wL-^0tk0Z>mtGaxy6bA2*lW=Mb%?sHVt-8MPy!PHvpj1sWwqZFvM=w&4Cjn z=Alq7>L7@-%FR5#J{;d1mz$;C)pon|>v?&7JY65>*LSC5)n?F~mhQFtTy<%7nETS& z(wYNzwWiWVx@t9PCX2L1q^WiD$a;(%mDmQvOccaGow|Y|E07rW?qYbFTWzqP<>l?M z0pBk2a(}#Q_0Z(@FdOpS@#qYK8#yHFl*#}Dugb*x0H%I zB!=4NuYC23|L`CGH&>Te+VpfhEVU7HnTFomQmbmui8+xPyt>)XwaxSCbg0YexEW|1 z`A`iKyxAW%mk)mS!E>fG@0XOytDDBP;6ur77NQC?FtL(LNC_h;?_5J-DKRok+v)j> zPrm$>uPn=)asu%9K{2zNg&!7HG4+aM!&-?RQEILlDqo9q4xIMsU@?ZTJSYGFn7a5hUJla(Aa-!=9aM%yIVE&g5iyArnZxm@ zBAIbJPD2_^Tt!=Lp)y`=>9`Ki>p>r#Mi@RXi&Wk9K8Q==-lO7YMLs}iQi7dbYqZz! zB!F|4HG)$jPQ+=gGL1A$T*0lg*g%iv ziUyeY?5f~~%sCvGDdn6pAu%OF>a~mLX|AuYZ_VxD!;8y@!^MLkB}|D-eLGwXDIxX; zkEY-LtKV{74*Q$^-9F}sGM3%Njxy|TUzd!;4g`jPDb+5~w4T7;dHU$BM-TTmx6eQM z?6jZ5(P!?QiaVw>i0E3VEPXeP4ti>*NbTQk#wfg|6h0MWrq)GN%T$msQ6#}-MC`R@ zPHvWSaVJ2m%e)yzS4Cn2WX^;HV4N6{MV*-w(~t*OCk$c$)gctR5k*?#=|lg%*gwp;6s+}y0S&cw(0)YU zqPh1)*VvS3k-t71j`MP{*`S-6)h0w74|L{~D0eY%4g}242FK9W2OJ-6Z&h_bAWlOO zRZc4zfQW~~UepYIZu8vgo9mmKyE|v>t_sl2L*#3Sd6;q@T9@0y;r4X8KFx2Ar|VOF zbDUr9kJqQ=(Dlt}d2?E3gR0QfYS*qd*H(L9dLTu835m4X+*qYpZ z?|p_CGjHCh?!7gjfGV;80we&k$!@7dnP_S~>JMoBdwP&eX5xd?Y9pIugCYqKGl449 zaK{{DJi{K`eXXSjKXHpn3(HK442z72bB_BOzVG)D?Q6Mtd~}Yvf+0I|R^uQN1PPY1 zuu#!TfwQX;9+w$Ogc#h{t970iEgB`IxN@hG=k03qc>nltznq_6vfyUbp+TRzzUzp= z(U5tZW>W;D$HN2|!KG%aRkf7uW_5d<6hQ%%!*Q`GFR2Ua_08)WMy$(0pj@A?x2w1; z`>yYP`T48uv#ag%3kYCtwE}Yh1mgCRMP?S}5JKeGWXC{G0;MjcE{@Qwk>H?a%+l)1 z24HF`iDtUcYV9Bk!oT{(&%Xct?=ABY081{dyls@UR{G=9xYbW*rV}_Z3NsTuWmK4h zF7r%Gx#lLM6s8sdK<&_pf{Y|==v6B^fEl2pFp->s=h`O7^|WeMvkLuixxM@__FHuY zhEg&TMBzTAzVD~wG3Nq6NKD9HETIHsbf~qqxgt1Wi^wnqZwml_Qh1)Et=~#LPhi6~ zF?ano9_fjte;QD=%AUZPkqMrh1cu;DAOsW`149fH1ei?NB{&C1Y73bY-m@W^>4}l+ z?$A`IPkq2??(cp=dp-%1p7zxCBv)z(_?G+pWVr+a;;a?OV;9V<$$@Ltl55F+fB$eC zkHe-rzv|B}x^55(SWR2!QZyT6!};>P;Wz%?r_(YXcjIzgz$i&S46Dud{Qmx7UKR&a zR|>+@V{h#`ak zUJ$L+LWI>cpCU+*1B+0K5{MgG2LM%TN)Zu=2@p~cA?RWP_cm|KW9+;2dNa?5`@46~ub&O;VLwjW^NZJS?@F!P%`gnH<~&bRh+Qd~ zb1}DSRRs6DnYl};U-fYr=b=koPvep$g~$E0KD)Xfivh&Q&z@iHkH_n0@7D^qZ|~06 z+dNNw*FD@kLe+P#Uw!9$AB<(Gvdp+fc zF-AhNle-05oIo4?000;_{u%&0+}xd?UCiT%0Bsd)Pqds9RRRgX1Glpt9U8LUwbs*> zKonwVHLqH2+X@jRhP*7`fW&6N(s77QWlso92##i8&gQ4X^(m7J!QIPT<}rj$hAWC& zXRcO^tkj&#lFJ;0hLo77dGy+fIa1j4LmvWyIcQ^vg98CREp(gC^$CRMPsM)o)4D&o za!+UfDew9z@=h3;+Sl()zzrY@N@UCs7=vI4L{T6J#DF0{j3DC7E^N)MjHrml^zB`A zN=RxhwO07I=wNp}x!2l$0Z&U~e4;|PSyRIR5CalgJ%w}!iM1#=G7&b*u6xP3F12R3 zeRsRtJ+8O?*;ThX3lg0Jw`qXH=!8z>)PiWrv-h7}y?C)dOvhu+lbT|T!*+Wa`_*w? zfW!fFuBML2r7EGM*tyxN4sQI@9L0sf3C5C_hJUGZ z5;U(U2nf|J*V4RK&}Ny4Ez^&Pz%(#NiFqmJP->l~(W_~#OhPCQ$bpDKRhM~YVE`D% ziHMlcPHO}2LOk27&(F43*O%L~ElC=S&gQe>-SK$TazD)vrQDbDSoC4Gcaz>%I2t_8 z^I=(zOFm9{oa=sC#-ekDvA|+97N68-^X#~&Wy7)fZmzp*N5jYBy8;i{ZzsK3?91EZ zME#qwyd9UDV}5nF+s`&BJsiv8cz-yWd)0av#~5M|A)r9gNMz~`L|J{xS%_;beb=QF zmw9Ys%*Wlmuz&L5i(2#D!<_+6CBM19`{2_LDdF*WFgvAtt9qY03E?o-Zs_lK3lC{O z?mzhO^0?d21CFAE`KX#kvOuRYUeDUV%&wlvX zm%sYOfBR4W@xS_i{ZD`Jhrhde5M$gOA6M()^7_1#$xLg>Oz7?{Q@7#x7y-=~z^b-Z z^T{)XKxppsGB+W3m(sBA)hm*tw~WcAt$-%^t)&pSdsVOQsD^5YPE0~AyB@i1B+wVZ zf`m@7+g^umowK?*5d{e`rY@!JX5IHmOU_HKwT2)`WX<8c>pNmEm66=N%k??e%+S+w&9>l3=s>ft{qdPu_i~ULn{g-~Z0})zx&& z`~9?k9Ni#=xZR%bcSo%rf=FPk+NDm_-JJFw)rwIFr47rNrHiSSs?|%b!j#J-%*W%T<}9Jq>Q4__U`!$8TqJb4 zC~=5wzD|Kfv}z!UKtqUT8dE9?Q?Au4MFBTDuyBgOw7AtqGHEl@ahgJq^Yd-rbq&LxOJRvbQmZiwvnb$HbewWv zan02BLx{P;l2w=kGMF_>S|duCgH~;FphjX)Q>_)(vL0NQmi1bO!SdUJjTf&1;s7?o%$x&TZLW3_#; z(P)?LVJgR557V+gF2`}1<~nCP%;j;+hq)XU-_Lq?n08Bjn95OcGTAMDDDW`XyGd`3 z_4RJNIo2<3b~neotNycZUfoXf+uh;iyE{(9aW2QX&c%R3RYT?=F)+K;VdwzNwT2)F z7(+miaazW)5O5c|v+emZkMlG|iOamdINx4fUcP&Gb8~Y~5LW#FP`796IgfyjjzhmP zg<7@h(&j3CAEr4MgIV=3*Tds}=(|_nyduK;hx&sbd>4yt`tIWo&auq@%8XpWH;>M!~fFHgna6tT|m$Pj;}Ai|yBXla z&1FbkUob zLSAYJVb!ftOi-%;u2Uy~i8v8y)voUWs8(NcVU~uw(o)p)IF62xx`YIMN^X9bCIdJ{ zg%MV_-Tt^)tzDa4(1U~q3lU*qUgm`yk_6C`N}*b58>mLPAfP7eGi#GHLLx^;LIA32 z%#Dv7+jD~uRJCm;8p^+44}IVF!*F?d5kqiQFpEsW;9Aja6$dZcal*=4e7ioo*j&2t zC%qk)mk-BZyt{n>-#{S0 zxj#Hsea!V{Twd=EFCTZmeD~(4@a4^$pMCxL<1&8v`t?*QhQKj~7#%=DFtut1U|Q-b z^_Ghunw4sq5ww;l;c(dJTw+YBH3WWmc&DC`j0LyrKC;~1+>dkV`mL#yMf13vZ&pA5 z*~`Ew#!e`@!_ark0*LMwLOPCFf_(kW>*0KJcsyKe&l&iBHyvk~mg(|pb9{JHr`^Sh zr~SL{eE0oB)-QI)8TI1&s+L87cKhk#gXeGV?{DATy?y=avQOf*i;<9+1Cbyx0(mtt z5cggL%~WgKm!=d#WES)`7FHAC6GfPiS~Sq9WOQn|rbtbhL&&Pyb?N0dF9GR$Kls5? zv%-^i3@NZ!H9{g5c$&}nQx)2X4NZa(LyG|nO?vw+rIMJPv4Tqo%#0M8x#2XaMnrbU zhNN~?BMwb2s&ygqQYs)j_N_2va)r?ok1i&Z0l0MJVi1SHVLNN4UPZBxHVoe+A{ z$g#9RP>Z9mUrSxvF1Qudb|Q+|w;((Nw_meD>4dXl24-Z&5GV-7z$s8<>>?(CC{8I* z6pGx=dWZx`ATmVpzz`UPPtDwE)-ZDWH>Ou(9LKIHk3^Hd%mA5$mqy*S&RNlGSS!05>4h=#(a$XOzE2Ot8VQj20f zdp2A?yBPYeAV#aX&imsI831X?i>eL%aCWx3yk2e2hp%6K zZAdA`NNhE$Wg%w5C?Y5ZxTrP599V-ZKuYPfv`0kbQmrM2q`qsL7YWS5b1hTOY7j#- z_o6jME~VD$2t-b<&Mdhs2$f^Yc>q;qiql-G!m=zUmpxF1)c2(nChAgH54{^6_tW{=cHMOmDRP*{%*gHX)!Gj3;3sJon)x(OOPL)^wGuD@ z=VFKBB*Ln-R@)y(ASa*(Xh|V@tyUZik$gS$kwuuBv#jCCn}xC!_07f8v$dkH=BX-1YKelLWdbo7MXI>T0vv9(LoKS8oAmy*>ltoQs*C zZ_d1eD%8p(>E`iaT*lOO(=_i6N0LB6a)CajGP#hbSqidRZ^o&7@bQ^DjfZ0re{qq1 z@YyB7;Sc`V?|$|6{{9HB-#*NXG2%u#Qrd3MmTB%|_th`I_`To$2^KwNxC0QMU{XzM z)8ZytEf`{mDKrhp3H#^>fFk@YYPIQYtlBAc(9PjA4Mub#41pb8g1mYA_U`ufM}P0f zwU(UANk6KmisZBf1~)S#{MIUU3ilxb1TEE6g~O`fppsStAUuhcjaW{wA#1?E#;|ID zmTFjm>?C+X05C5T>LTEp&2ix2MZfwW_GiM85Su}QDW%v44ovKB)hn_SLLa+}VRN|} z`pDdZ29TTXrxB_Gv|o5m6pu!Suuu^~G#5IHdjqX-6u6zEjq zM+gBZ29AMapcE(xrO>Vfh>SuW7(`GQnV;e)pR7r3W(VljQVZM++Bnw$o|LM;&Mbz; zdP78gy3)U)gQBoG1GNi`(IbkaYWGt0@S zEmBa8ysbKit{)G_d07DQ*?Z4E{`8}lZ@wwD2osprQkF7n%b6yWAlr3!zUi*ED+BxL zn^$^5ZI`Nxc_wf<$)*BJa0eDjDI!wQ-1i+i6ay0oVkrv}j??6>DMqc9i;c^I z%qgWtP)L28m$@}T2*^Y+1kD;l6vAOOR8>MElDV21lv)uD+$D;tw=mI?%jI^%ZVX^* zNGvHHrqPi}c$$x;EK!Jv7tLDos!OehP%jA3$0$UnWxfLyir-HzREf!D`V`re(<`_bCXun+g*?Y0(g{6;}ErW%zYg3_z(x%}OmI zK}1}=meS;mfyK?clt5vg=KXkBv`}Cq+V79wyn6k391qJhEsGGJZMN%v#eu5lFvN$$ zL(vjaR5vZ<{pZiFHk)AZ-sRP%-=w%XOm$M2s#UhHe({E(M-50A4e z$8f*f@20UIhTV90eSf!~7Iix8j>mCabwi%#lFK|#;C}P&CIlG|hj(w@U0+ogv# z=FN8P;7y=ns?1o*(xnubYp(m_F{F5Td3kkp324mJrx=)a4~N%p-j(XH@6yoCwH%In z0A<0m^RwHV`=JYW_d6C*E3pr|{e!u!SA$l4JRT@QDS5Nn9v{c``DR)Um+xIo$N745 z_T|sNx;{^T@CUy=?x!F9=r{i2FMjc#|MF*l^UXprV(2O$ieIc!E%P^TzxfA$_`ARN z2mj#hs~ZGX01kpkK+b|JAcD-))FP!8)hf)bhBmiaYt2QAmQqfH@Mh}8lc)s=nD9xa z++t7M%}gsIGLwUKDem^WpZ)ZwFJ8RA-L55yok00uPb97~uabqhhG>OfEl6+h^uv zmIG4^sY|P&U#B$mUDv#v0Kmk8yb9e}>{f!P#Z2>3P7YiLI%NW*oq!9!rUSV*X1+D$ z;YkvT)?94ujnN5B7y?5(%!A;sNBt3EbP9qgVvzQ4Pj5_ZO)Lyd?dS*QU%Tv@8X5zvs_Q4Okt6B8j5N>f=C!?`$fA41%&*6V&)mPNI?>#$y}&o(b#y_6KY6uU09 z>0tHy-7UFt=6$ulCBTIMAqQ7Kui5+fl5=1xSAx+LnEh)Zq04N4L^ zNzx$0W(Ip%1ugr+xYTA0+oR`$a^RqJ|+8y@eJexZq1K|1j zMVGqGFc9FocQ;F3VoD%1E_qs(oBLZyVYlDkKkRRB9$tO@x{PC8=EL!TgqySTqW*ZC z_H*4G%l&@3dpJ}8jtPb4B|m@uzN+{A@Z$N4hsXWfn>#h=R>R@2t2IA9+-WUlB`^&` zKhJY>HkD-xh_#gCelMqX)l3x;*TYJfQV4aaghb2^Mo8<`CYR;a>sR-8kK=JX9>(K- z1c$1gx}hIdhw(TsQ`d*ClP-nBxNC;${cgWrg(0$Q{qA?a7dV2;IM;De47gMyp}YHg zM@7Nne*ZY{9(F6Q|J#4^yZ`2Y`M0aHbo2J%?0oyxtFi51By`7d|8V#2`Sr8wVbwWU)#6}k+T`me4I_YC1D6>Q znkf>+Bq<7uAVXvCD4L-HIglbl43UTl>GV55_C$B7&g7<@v?BAC`HV$H{7a z_WV6yIxJJ@`+n$C>T{_Nhg~r@6a^s)tJUg!yBUV=asS|_?hgcA;Ea*Clnw8T8l1z4~Np$Wka5rxQA17l0E2%K`U zqN=WpWNIlz5uTR0*+)$c%!WS3D05y6umT$6ejHQkVn|@-rLKprj}ipt<8iy$#2AHX znx+_1QWzAVn}5vHEXGLF8S^KJqR7f!`rv79IHf_=A#QO zpp)w3ygbfR21AxU^)Fw3^PAuQ!MmHcm)Dn<7iY))emsn(VCMVXL(U_jIe4GCK(ZZH z_cwQlFs!<|EFth_wVIFPs_%s{C1J#6UI1~NM`Fo2_ro9|lw1z`LzmLS!{cFpNFgyx zh#ev`rzNWccEfs_ms0C`)e)4AUy-@bl*Q-ALt|KtDgAOHG;Pd@pJKmYmF<%Y|$ zis7)^d9kt-H@v@jxZjP>UwojY{QPhK3$ynr5IDIHq*N1g8miGn1YsHpFE9?P#7P1mGsPU;udY=FQ7*zNu9&E-p6PjiM`@ zRFdur&|Z0HU_=OJ5~OvYrKp-o2qa8_nF$bxKx=U=4%NN5Tf>98BLFjZ2J9q=*k~p& zjddw8Y?h*0ip~?7t4mzHx4HPZ-(0CfE_qs}Qo$gIY*M%3(3PrY&ccEioCXP-)c2_m zA-PlQ0GjXiWRiwo2NSh@fIm@!+9BI;y8upzM9_dMPqjO+ppXkgkkeZV5QCF2GJy!j z#6d{7T>z(6K-hyI69^;WsTF8qDDc(?AUsj>@YLde`-|Er?OQAbArK;ysCtXaVj?DL zM9O(A<9=>Pm~O?_@14b7I5Njzj2r?mpwMZuM9^qWBuK&}%t4%RuC|-#?%>A)hM__A zf^ZN)0IL;DKm7QkoBR92Tq>MgD0Ep^Zy@nJK$=zZXSF0|ZAfp^oG=QwSl+q8qT3(h> z82Ttfit~89Jm0RnB-C;_G<6*dF|%6B!#EWdK|(WZ5s|qURdqDtYSxkz=c0!(pR%`0 z@v4EbYE^?d=i~8s8UuoX8xte3StFh!k!Z6(HkC92AXHTcS8G;eYfmgfuTx2*e0&42Zzs6hl(g)Cr)nz~Ok%YG6*p4kr|! z6STDC<2bX(`Ps!`KOV=q3%wOxcdNWCwG@#kv5TqW5Oxm_+f}Die*5M&CW$c;69SBf zr5nQe#rZhf<@IGs@qD%U`d1G>`Qcez%76TmU!0wZ8N7K5*DpSLb(e#{Cm);*0uCCw zzFVz#$K&Pm7yC*7=C6ObS)Kjw|M@=~qG(-$m@p7{V2T{NkfMYT#I!aVL|_&sW@>>Y z{_W`4%~Tu865+{5bUNT#HTf;t006LgY?zsli5LKh+kQPQ%kuJ@Z;pqnTGTTW5~6@p3jtMkg9^Uvckhaq zs}DY-a5f(o)C_gDT0u6v`Z#UBlln^}Uh*`JvlU0+F0H$MgCy?wgaeL4N{Km%^f7Vs zw^@_+pCX7)ffPt>U4cO8P239b?HtYhbhDm5=yU-fIaBk3c@PW%V>li16avM-!X!d% z+#dr8`jdU&q$dr6EGL=Psp%kSBb}2z$-Z6o;^~6Ildq_4jhTsn(79$WSS z8Q^iw4wO3)fwOFIO zEk%o&fd>(*r3;aef!G|EoI?nVzz8SbqMK?nI)c{PUfiwG1_vT`b2Ue1WGYo9Mj)!H zZ9q_}KCw<<7=}R7#ni>fPLYEy6%ndxKdg|ceJ`4GM|{zB?`^kNs~(r-+2*{H6eO-T zn`tcLZn}GSm&Z9T(|W!7+9>X%-V#LRRcw>MQb_Vt`WSHrIdNS8kq3ne5<9_s!J(BQpqKSfU0@Ueb>#i0{W`& zf=~=B=I5Oxvg3aD;{EMX=e|!r`!9d`fB7H(n?L;h-};k3`5R*RF_-B9eCm(%y)df7?tk2f#bvN{}i!5kv zMe{VfDlq5hA!XCy+O{6S%i@Sa(E1L=pjPDLn)L+;~=Y z0{y4HcZ9D#|NPnI_4mK~{bA_aeEh`KL1+RwB0_g23Qhe45?K8sQR%;$> zo)I8OLJsEan1E6Y{ia*3L+lZ^sayAbWIlz2!-?W>s>h&dCfiZmcwKy=(&34z>UheE zK|1vW4Zb7+MX;>|PD|fFBm^Q9gj%LmU~HGcX&Ko&i#`DADW7TP6NEt zETd@wpdlHWu*nSw#S41%Br&wHpF=4=9m_J+)v$hceR=iX`LIa}>f{_+2*XJe=H^HQ zNbTA2>y`*mi_atO4)WEj{L8No50e!VX!6%6sqfSri6uzM%X+=$Ag^D)Q3Gaa<-%dV zZ{Mz#%I=Zq`LnCLhX*HFRNL8Y=#oONOAO51>H=gYKr$c-Ey7&@TrH*Fl8E*B2*k$m>ZUV?Px5lm+JBXNw z5d|?U8K$W)3o}o1H6Sw+LaT+*0Wg}Zw@!kt0nCH3mSTV3$J?tN23`qhS z3Q3Hq);#omQ$Z`AQ`m>*=ls@?QB95RwoC-I358o(lZNh%+E>2gVZ=$(iWHn1B}hjwi5h z3wr`0Y{fX>w@f%ULI5T=XzS=wl`%9qN2yv1PNUAp+;`oJ_b$^QoT_29Hg%J5GD4ez zDuP&5twtdrJ0Q8ca3H{3VLZayH~R9Me1BIiF=vEz>`@&^17WCDt97xPml<8JFD~D_ zd86yK1P0Wu>zQRLOAMhUKt!PRX3a4j#+gIcv}#0h*d0ujiLF);F?V7X7Nn4u(x;9b z%%GGyoUNzhNXU**Ri|7Wa7bMcUXD}M3Pii(<1sH;OP?Zu2ML-BF)_d@MM3R{RaQGJ zx;Pfq4t>gtBjpOmv7Bu?AU-bhuL zvRHFn+)cVpYe^w2Qwbt-u1z9xdh(c^LSIjx*{dozFp{b%)~-)hE0A|xBBEtkn7M&T zYRxqlC!FUAO_Q*@hd`Un=I!mx`R4pEPTkPCs7bnVdx`Yon5%TyMK5%>xQpy z9=H!FbdS>^66Uh3x;}9H=I-@0O~PCscdONq`tO=5C$PU91bN9>#kG_IqsJ~lgrF)K(3|QVRXK>M z3e*1a0cxfc9v499dG)@7*X40qY9J6vn4TN23^=+Y*2Nanr7Wrx+}#Mk13Fa)HA-PP zqdMhMjs;Vaf(z6N2FL(Uhi%jN!$~;ne)`U#U7RP%4mbc2)`s0e)OLBc0AWHkLnI(D za7uw=X!~9gqSJH@@mo4lv&k_NKi$Oy$Vd&b#K?>!EY!LMAb6S`H`*i0jc%)&1|c(&dqBn7ZiOH-bivJsy;2SJMi5- z-QC-?@Xc<%oAPXK?PenGx>byUNn(fqx*FoMXV-VP@0MjDA{Oq3!3nD=Achc`kO(iX z&R@TK(}V!379>rF1s!7uHCF=4OKuGDsx{{vghHTND-kD(kph#LlQ|Vt7L1IpwN!OS z;-K2fe(hoqAu~tfE~OM`m6EFo$0hq=E()Zoh@|GsvfCe~sSaI?jAq`iR!dQJLKfgq zOpo(qZY)%-?hnUW%>kErUak7+xEnea^6~JP0`(~fK$-Kp@6nB(7CFSy?%o)LxwWE% zVC{#>fwGlcvIrx1DH>vIOAHZWVMi!cB?PV74(${eT=5BT^`sW?TI;t@HaEp5DM-_7 z#1vb?B_OnTwUQTRYE$M?aty%$mZc<#LrRRuOiRsS2ofm9NEEu&HiLJYRkv9^j`!n(pBZBeC{BTM)p1(h-rm*9a)vR5zfF18yj#hk95e|&eCs`QHV`s;`9fA$-H_w%nn;IogeFMHT@ z5&~_{*5k6E41fK%UtGQSk@P90ei&BEl!t!RrO1Ra1Qrg7CD1A0wxzwE9%W}jbSH$i zuC!Vi0nw zUcP+!@|#z0-@GlQFjF&Isn$Ht5AWXIy?pif`t5kXE7Mq)?4}~z_i^ZZ2~k4F5>V)L zt|0;-Qv{>?H}AYy>eGvl-&4}XWhz~_x>~QV0AjA7G|=!u&Ohree-O7HN_S2ot#_z( zS@Jl~hg?PgGecJv=ksB8)_1EA`WTa(W-%gxPpLG`h=tAcK-?mlsMSOS(7f3UNPvRG zfJ_vaL{Jb;>)yaoNEn4tq7eGibm`ZthQ>8;0H9yT?lk7!X#j_fX@7d1Qd>Tsws>vk zLv%6(oLV+!3R9hgIWU9zn2S|D9wv8u@5QrjEiS610E!uc`DqG9Xe~+@xp9xA1&|}v zs>rcasET(7xVeYp7`+%7-|gmCkNans88=<_b&okh*1FYOa?T=U=~ib8&Hg7)Psm zeR=Wr-EFN)Ek&5-no)v-Q{$7=no@#Nnz@gd0IXFcwU%+5hN~?yO!Kns1C$~}OfHNC zKs{HpVv6p8kdRMEkkf`v+w<;ZwxQ*8X;^iK!*ShrA;jJ8ei-^yTpf-F6h@+X zUI5`@yV9)VVOMe<`c8TEphs}f5>qwQvWx0?)ke>l9?$7Nhn z!mqym>PJ8R?Aba#>|RY7e){ruw}@=Ihr9cK@jv{tpZ?wCc#}ZYz@(7yu_)1U4Wi+9_8D8a+bW z`E5`8765aQ6d3*VB6I^JFmNPkpf^AOFyXcgKi$p#zKi;?QH-WxGjh@)W-GIvp28b?DxmjW=-hC z%p&8wtWw{Se^z~Zc765uxO;wi?%*|7t(uaMAPbdR%zzk~rIjkC$|CbTU0iK$9}Z{d z+qVyo)kwjaQ4sqSb1v1@kxYw%Yb|}>fg3TZnxi3_kb_rdUep&uW;Nmq246cQub56JnM;I$y48*53Awvn7e*8PkDW|Sq+0a zj8jf2&9$*8f|g3e=6dp%BQ#K}I}}wRGQeCF078`GJok~IEHR|^V?knYUvg2k)9_c? z>WWkq37N1dL0i4y`UDPXiAAR-*K1V-QZsTRbn~KC6Lqx~Rb}-MqM=#UqCV#WOCI{o z?c0az%V$x_lBdvxT1z+d`(@gF^;I9_`s^$TS#eTO;lpu1#HgyvvY1v~776iTw=g0A zEOq|iqYn*q_izv9UaM7A(^c24hk;4%cMr^RcXxkwakg3aR*Giry7cjj4B^YLh) z!gP0gd%oH*(P6)T@7eWn+^J!Ri4d4sp{SMZW>w0vSr5BqX}VxDbq9%o1*9@U3>+aq z&VC$A>Vnonpdw_|2qOnx=6R~8oeD8^Oyc^T=R+W1<{+{^jDPd9`|tedM~~m!8Q`Xa z51)5n@VCGCTAeq^o^4Xz@Ozit#by`}$Ft`ze*VRuJpayT|LM~;@HLdBc{F(ZNo3BuIC0qu++1m>m_IpOsiOPPtflRvEmnkK*ztVOi|g1ZD^La(Pqp;;B-6eJ0+`ZNrEil>xq<^as*fa*J$H@}_WKR+H48#~!5fvjPtFSBz#x>Wg%d7MAHIS;+S{8|+79}D^0aq{p zsDzFHnkyIp5K?eZRUnd_?J)W64qo5M!K6~)8b#>KHhzxmt7aoz-rM<;LMDK zObob~T;{1P7grZZwB*tc zLk#3BNY0_D*%bj}3^9`F;#QyR8st^ess~4NB$1-6wlijlLC~ssotJsN8lp&_I`^eI zENaLRxspH#GA((j3Zg7Qf=Y~!clR;I7~^;xlkjxEbF`&oK%R1)mb6~4A8zj-5A%B6 zMHU1<9CoX2xP2HaD3ILU-3{yYl(S89NRekYM`2b_A_lKjs|1E(u8zzOO3X;ofQ|D~ zMB=Vv6D@M6^+0|!FLc_F4H7Y?!38dJ>00Tlq4h25=IKlwQtlLe8msv3BgIz%X? z#x5=MWM&e`%?SY1Sa`Eq=N7|60E7iBu-B!yL5#6@tp<6X(W?XkS3?4p$Gk|+8|kOx zL5nU>=jz$n)SbW`Ig)A-3e!0CeP?R7H+Mle&!v{i#1dtDwyvcv(`;DO-4QOXuMu&x zxpd9r<6)fU``zwwe{+3yH68Ww`1t8}K6&%{RUkTBZqVx7hrsS|2U+yS+Cx_+<)?7 zeJ+RZee(Y2fBTE?efHe)`0n;@Scek2U%a{g;7)z5zU=3&WT|LFHW``{{$ z$N2vBHUf*Jo4fIdQnM{0cl&X*-8w-(^re)`^UMA29??Y*o5%;OX;w(^G!5_*B@Dhj z-4TgNm;pd*Wun#{fH|=PqJdGXGEaL%!c)Q7Mn(t#PUI~^3fnpWnodMpayg(@3&hL> z<{?NIBDsdZ$dH#g#lhTC*Ht70ZXs?4fE=8}hz(Jg8JHEy|WkJ&&t)1q%XYXvyVxPj$NoDN(eJd zUdCx$%0g9pkwFG<>Ep1ebt$<3HAICIdRxdnaX}k*83DkJk%W*@2mu__tbu_5$bkvm zT!pErD-hAZz!?a{nIdy!7NkIJ4S?UiMF~+t;+QBz42ff;*h$w(7coXofuo=>$kRaS zNjKyu$OI_J2!M(Vj7sHLDT~eEmGfj);r#sU>iV1mHb*fMx?9U%HZuU!n$3!PMQjE* zEQ^-eYIZla%FE1;JAU)d-@ens(agB(`qYIG(!;U5-7RwlGjd04)@pCrHU#Qcl?Ybr zetX_O+`pTrNv#5d6D@TyQuV4@QeZ+@mYlOrOF_ofy6^gc2*M&FRSgM=PZzY-+I3yd zS&}^Lk3fNt1SyJiF~BKznat4*j>p6KW-W+>RMi|YhA?!}MQLlIr7pRad6^slS*npK zS@j^e?o)`Vn8y%e3Sd95Pcob}_f^uqu|OR3XRSwz)p&8Ew$k6H@>FLPlQ zVL^hb6`9=Cv;sgYKdmUiNm>A=t(sG<2B?n9q7p)>rrNwIrUNxp^&xh}^3%xAi!>61tFs)tHz8yc#oCGaz{O;`)OR zKbogmLRwT6d|pbe4jj_3rqF%;=I-UqeJ1ABYRNU6FP_0**=zWMqv&x=(9o2F?!Y#$$XX7KFl z`SEZ}p-qFaDDwaT002ouK~&78l&tRAbSblHA+#u5=FG-WYGUYxTx$ps5JTItEZMby zmw6mj-P}VIB=5SkSr36})pf2aF*N8_t+T-L`@j8Sfa$k>@IuQNIBvSWg?`e zf41G`oI^;v!?=_jldzD1QPZ_BQcDm42X86XW@h*V2Rl{t&C{x8THzF5<@h8nd@{Ad zlW^!H3c_~QyE~lN{NQNnPb$IF|3R${Y0ab=G#z0B2op$TNdXX%i6n4HzyXMX14=*% zr{q!zPE5#6UeP!RrZZUvFAq1b|NDRX&;H3D{*!<3li&Ht-}~{8e)zrX%k9@+fAydL z>@R=$#jE`|fAiJn$A=rS1=S45C>9=|dmgu+g!KopzfS$OPeT`?gHL77byh2gAVjOQ zUZ~US1eAy&U=R$EQ<9WIiXjD#a^k`fBgGIx z2r&c+%y8oHfjJmqORZ@#A3`JtCL|zcZa|o32&xThK&&T*&8aPU8VW(vaebw8?qb*s{oVcD_Uvp~77-g#7@`PyWhza6H|zeGLlUE~l-ZpUV27{? zBIY`mezS>yl_gRDdQY`dOIDMiPrEOkNW z)q2A*MnJ9ev`h{*%~J{qfaayRR(I}(G%s0eb$4V=NqV8>xJN_@1YlZ_NS2bP+j}8k z@UD+#%0(9vDLH#PCBYd_yRr4qFXQp*eDip?A(C1wtUDnxG!{Z5H3Q&Mr!$gyEXH!S z*)E3((Q93pLqxQ^eER99+Z3k9*VpI6GWnl>@%Hck@WKF>yo7`?_J8-){jyM({2%`Q z@4kNhFjgEo_$R;hled5SbK5Wd`@HVx?agkpJ!gE}hRw47)#Z!fKHsgoPV-^`Lkj9S z3L=2C9c7by0tvCZ0kxDEKm>DT;uG}aWcjsPDl$D8OHU&}qLawb%-Y)ETY!iIAQPa2 z!4qlTn8=>oVuaX;8f`F9T@y1Qs%fp>^|2v8Qw$uFoz5FJbs+buBE%@QWu#RE1mHIP zho_CSIaWO$YJU9elMi)%IP0I;<;IORtE<2KtH1r#H}76Ql!vJl$I9^sF@?=@Rjpb? z0>}zEB1Z9wG=VPQw&Z0#PV+bk%4!t|A$79#A?xg3n)S@wgrd1Y>z3Tijggr=G6f=b zpvDMeG=%D42yCim>=nR_sL|R1y{R;bL5PqXozNYslY_PWF13cLF*czAs@q>r-Y*tu z!EJalYaldLH98`>o-z~@2M2H^tu7=Lcy~YMxtyJyA)>35r8;6&@4K{=(NqzL7)YAr z>GUO?0y7D%s;YAgN?}^$?hbEm?C#DFJJY)8d}j)CUP4G!?dI`t$X0t4A%p5_)}CL8 zYPIdwvAVjy_u|Eyx9{%nAJ*%$5D;BWomhw@uysRUwCQ^>2*nV_{oc(b%D7}AVh+=? z5K7k#33sDH2A0LkG>WMKhG=8GHyQvx2X1gvmH_c5ojpXZ|$#N|oLwx)AV3y1+ zE2P-XRi|3eK}Z-ah2X{Vv7{G_=!fwLfnUG9bqAIJ#mu1;9S=)?z9Ixbdf1QeUu_5= z&r4*1gmua>(0&>>>(#33#${oozUvLWEH%bhY8E19a&!^aYNl#xB!t37R>in`)-5>T$v7!?TlC08-B~(=kjA(N%u}fV)fZI|OY`MNZ8}|=^fRN@~ z&#o^zmM?$x<$AqYau$R@+@}zO*G|Gr4&^7%^&{gt1tfU%SAtW9$$R$ z$*Zrwv5futd|Afzs?Rf4=>Ouceksh)FJre}|C|5t$J1P|wwLqc-B%B<|C`@?{@HiB zn#c3!SFdhw_XYpspZ)c-XD@d9-OwkknGgx>@?z^{iKz!L1Y;m+ie5woBl2eWYHEEm z1qW4gcQrLtFhgPy{#NzhBt52v2(5c)Hk8v6z}u+5O}_ygRrP5x0N_ARo&#;PwFVht zMg(G1Q$Qyn1IVSs*b$LBRJWEF)*fg-soex!z!)46qZ%QJBQm;`@%GIxR>|&e-$0p% zKBOR~_MJ~Yeg46RH-~A``qk^Vj&+>J>u1kCy1LGaj!;V>$7&!rgy9+a(U!v5R%@K= ziWt*`ayph(O(u5Kf9(Bvc?YH#O8I z?`)1%1Oyae0Ru#`X3uWXW7x2+f`Ht7UVx0;CU`{bC%}eQtw46#^&o*m`{D?QPI$|< zdd2EqOA+e8vXBr^Suzml;(5*?%0f%6R&q@txteMTL|!Tdbgi{ykwhp8ND`gWtZ6sm z-GklV`E+D61fW_=p35}P(3>Fb#_4`qE)sBevKS&U03eIbGlEKBCaOh4l&ka0H?MA1 z-3A$_X$-=MSV|VAuIr|8VF|}^ViGz%T`+VZBG|kL2lZUF5@9ZdFdA63ECQ-aOsg&p z045>qrb#&FS`a8j5()eLkr1bOtaUnD#o!92%n1m0heJpm$5_<56l_s+G}RCyA(_M_ z>kvcV_totFINpyYUGh2#xSL^!?oi!xtuZBJJdR`aO)tVygM^UeFz<%ctED_GL)Xb} zU(K0Gn7#TqEvX9}gb_76IP_fyuvBxAP_pkDOa_|g^qEaM+cbl%);|r z#MA+VnWzF#8^<9cF@H;d00dPx@D_yIlA#>P+(L+TUMf|0ELGW&*uZ?dTGv{(XBz~3 z*zX;EvstHAH%@a)rm|W9Y26L;@u*W_>PyLB4hZ*;yFNt`dK||XVp-uYMWJ8 zYc{P0>?lLGeYn2?FGEP`#(hMQx#p7ddUIyAF2^y2n5P_8y-Tg?@9y57ZC4^;8a1tY z02@}rVYdf_AhP6znfK#iyE+3THzULlWQaWj);wNaZN9m^LkR|6%}UKnaaUpFTr-Qr z7@>Npb$>ioKn1L&vg7am{_p?gU;X8{!1anh`uIEhaeA|xfA=@uTPN^w3NcbTd(eK# z;f$x>|DE6Y*Z=O1zj!tN!$17(hgbJsfBxnF@t=OTOVfJQRh`pjy?6X~|M_noH3F-; zhR7x7A*DJm7pv8DnAjPC7)cPowDsAiA&(gv5P;T7Or;iZG_z7l5K+)3>vDHuX#;*n zv?d{MKnQ@ABmML_;n%#+W_CpchnAO)LR56|Fajb5Mr4ox6aa%G8IT}@7Cqh_@4oRmfAQt#mhZ1&+<>g_AKqxud0P6O%$!iV z7}e`M9{@}d46H>UI9ThFnK;l1gmB{YBQ(AiIv9FWinakeAu(#>qqd1I01%%@IwBI7 z8#N~c4uN8z5HJLej3N{X+MquW2xH`vgE+SLkPA5xHm2MuL;#JzkT8IAUMdm;0ux0E zCFgmZT(vA&OI^n0xSOqb;G`AutjlC&b~i%DMzpqKpo|zC!nEMS0bkwLH~017QKzvk zv({QwYqdh;rWwF?hw1CLxBI0QU;{2yi`prc6I@l@9Dx|Q%wTb*wL~^cxQq5MYm8qF&2wb&RGXuy~TcCN>IxRC0)KayYRVBc_>kYK+HkMq$ni?Lp zXvw7w%4*I+v|X>+p^u%gs3UNk)ppYn35B_q(kM@TzbdX2<8jWzs^4rjRsC_C?+%BH z%gfC=5tylJ0d%SA~%u6anB;>h5ru@?u~tOibox>eDpe-QJdI3Jz!e+Lyu# z5fE$rjqiQtwp?sh&o0lOKfl;)hkn(g@SHOz=7`h0f4IA~vIs&F4nYibspa8tsOn}u z%`+mzh-I0^W)?~{*9b#N`E?sms6tV2i+byy__t>%xrLrd7`x?#!l zSFc}RU7r8$Pk#JIfAsINY7E?U-NW7Si?40~^wW>7eSQQjFW)}W@chlg1Ze;C_kYut z`ZvFLHy7w&c>BdypM9`ecCS}~5|Nc~bF&;_c>TC2^~AJV^$aMC3?}HEumF*fI0s^e z775VY`i?E*+zO!~sMb~6nepYCp6{^_6n$uGY6)tj5!o4fm^RJD3`zUumpkoSlA=H~F~<=tQX_22yJ7k@_x zrUktURPlmd>QWunY4|9vKHzk@J%9h=@}sUlLm<_LOF0Q38JJPX0H5x=6Pg7?P^2Bd z5YQPxsBu3raKpSIxBUU51P;OyNFs$mK`00ZB6#xdNDvN$(iS<=5OO30EEEZaDKJI? zArJ&c;P%0dA3?zAf=-N#j@MS@n#Q^`}A4@)he zm1~t{fn0p4Hcv~=Wf>Q(R%e(;xV^2f-_&p3)VH^FKh|j}rPRFS<8jP$Tl-lEVb0}O zUw(7*IPQ-*=X#RMA|oObyEUaOu`m)>tE=@8B0W6b<-DjVMOH8%@4E!#)qKolwb?ZA zr-31}8q{jjGA|>ZlzuF%TLLsZRAlMWGxHFNk5I~>OkfIO? zamltnOj@Z_JB)MR56e>Kxd1u^%up;)Ty?u~++LiSW=>&O>f{7O=Vu!*LqIcc3$!Jd zTnvC>j1TuabzqTcJcK}-ReyVTLxh2u&?QiaC`9wJKp0UJqOzl-j>6 zrOvq^OIr05gk$0^ELz{(yn8t8V~Vj${V))sYIUrSkMH^<2+E+~%W-#~r^o%{O_>k# z?*5`nm;I&#x!PL@s z5+k?}`YMw6N%BrC$C8V)8rG`IqQkK5H=ErwO?f#U_lMp14}S8)?|%B>U;g=@4t;F& zzAwMp$6-4c+YI>8vt;w}_I{qj_UE_dfBEw-K=S4F`5*tqUu5)u^pF4W^DlpP`3(QX z|L%96pVPL7?WS7@t=s<3|Mt%{tv>zk_wL?3fI$chpa{0=I;$STphm>avD6T4h78}L z-w^=8+_cu}ZcEO){l38!0Nt#`=VBANLqp!5R>h|q;shXg+6jL<>qo@4(rgy7mLPdT z-!yPA5&#HMAYw*91F*hJ5=cD&-#{S0OW48kzwafr*6RM4B*lWu{sdVP~r4 z;oa@aFIGYwLIOTM9)I@JzdIf$B1HDU$Y7CCKn{oLalb6t|Lo8Ith!bJwW|4OdGFUK(sZ+d-b@PELDH)|C<00FcMK(Kue z?fAvkoN+sr12y0$2MNp~$bwI4jNa;#Hm?Dswgdn|2W(Cbw?NuU@)t(r^jQddjWVhCA_afo&z^_bSm0H$8e zrv+wBw-5TwyZr91-reVEtZL=}IoF1ciV|6PoaV#0ywv>7o9&DBd(ms37?4gA3Iua2 zbwMN)WM?-oHK#t#nsZsImNcw$E?R5jls!B?5=qx3BGX#2h3&-v;4!3{OPhL0jOsuf z%>W5g7m-bb0Pr}^K~}nCM++ek(KOFswn}k2OwTT_Kuh0u$K$?RZ^tD&a#afwLrP0t zIEXFP!K!9-OHsNMyRIv>rm)#fOVwz#4k-weuw*j_F^5v>wB*&g>sQ_Jepk(h7zvJL zp8FW1$Z>zTxVYROcY+{<=Ey{9hGgcDVn8=Hs9KAvyGJ}7F$T~W{vpQWT<66Can-V9 zGnpmz=B8$I&VZz91fW_|m!^-K4}sCtwV=>^SCF)l@aa z4hfOi5sDVAuI3e8SOhUi>{iJE#{GPDzOFUT(|Eq!Fq4RER_nuYuXWx;kwp8$?!Bw+ z<@Rj%@VI+CUT!;ubUYkf;yf;!b@KVZwrqwjpmE@Np4NTecl}lIJ0GnE`S*W&`#=7l ze>~lN_1XL9&(1f;u^hGk>tDP+*1-atU0y7C?$h;tH^x);C{!)63-dIuh7O=2p@T6K zG)s{i0&&ytnKks`aa>B#RnLS#jem8bzX6f^DLMS)D|#CJHx=n=JpJt@@HA%nHDIba zpjvy-&L{$8qK1P5^N<8wSF1INATvq~p#u^;-CgZJR76%Y5W_!n z=DRKcd0)H_X)STfAnr5r*n{aZP4k1+jLdBdB1jdSPmU08T{0svkbp>&%b)a(?P_S% zrx73^HP$WyAjxT(|Lu|;LHM-q1#skMtfmv%6W!4pfb`U+H5LpyIyTU;VUtv%IZ|^R z01!H;86j4+Qz02*2*HXHV-seXS1=M0K+SaqYKr(~X>wJlZlxLpbU-cOL;yUFIOceJ z#MifQd#?|V`8ZBBd(}$BwN?P^SF54#2^|+M)plOrJUou&Y8$wbGpZGB5?aPuO9%ms z+&!>Vt->rp*u(MeNVFu0(_tjyH1x0DJ#07IB!uS7gv5$SL=F}t9CLM|mIRrLszNYw ztD%!QFDZq?coY_JCm;nQ0@YfpGKXc!L)SAng8?Wf%vScfOp0pQb$!<2;4Kli@6slQ zx|HXujr4OqO#Q{xyPNw8Jk13FR7;A{G^;_;$_VN(%}cGy96{u6x9^{wJJRtuR)y<} ztB2!}z=tlb`naFx+$Ofg=4l}UOc4-Mmr7McP<3@{V_Mf*iy<`nHP)(t;(#Hr5NWOM zz(`H!qL>{D1Qsk@y#%-!O+2g;ziQFTed+@Wb6l z-+AwQFZ!D64){-h_RF8XEZ^+y`lIju^-up!y0BjL(=kLAV#;~k3{+E$yJmYRvty@J-70UXfL?OWVu3mb=qNU#b| z5Vy8hGXp>ZuY}}a&^}wU_oJWa{O)wx!nN5gV(a3X;Rf9|!{cE(F1dwAD(LR8 zBM~KY_Lv${LbNPe$Nl~e1$X086iRii9z#eWq!bkz$X}48R@?1I0E{8s-X7)*tv1q{ zyA+15I~)&$7z1}*5@BLew-`f{_h?ZfR%@X*sHGynQ+OW{3gBt8QcD%$F7bNPAICj2 za|lS_?&_xMh=AsS3EkWKo8_mC4dGk9Y{QZ`YE^Jot+kX=vubs)T5HbLorp!%idv~Q z&ZVjc2}0sPoApJIgcKZ*L%6+v+)wk{hr2Idzg7wqQdSTBAZZw<>GtNrNr+EoLtd^KBug<6G`10$Q(=_gnJ7DNiw_dGd-;KH4-0v&UqUHS;@7>(p z-re0-wPCdh5+5IT=*}1rV9qCiQl4k8)eNS&G}U>Tm-WzJo^Qd8BslnXl|Fv{>?gnZ zy&rt?{yG-%IZuaW$$#}1fAQ|k-JkyHU;g=@|Cj&gzx-FbyTL5jfBWvA{kPx0eD>o1{xARN=)7Ob-~X+T zak~5G|K!Ktd2g7P``0&b?jHBU#dn4m-+S?czxVcGT5VQuZ*OAira8BBzG?}PL*O9L zMMigmHg_O2rzdAAm|In=W(qm$@i^5|Ll6XXvzFF-vQ{A?H1PguzTZxFM0}#|K8^lQ zh9*bw6JMyI=}+|k(>|b8x-Ari34^dO)v_=XAvWMo>S72&h%6E$v}d45ha3%>_!0pf zsbD?ceD!zJe)8iV{pd%B!{Lk1zkK`ptrj~!J4ceu z`m!oarB0=e^ZfZQe_1B->I-U3O)@KkQzPhc~K^<_97VA*HlhtcWTA4pxW&iBz3QRMkM4&@B^-JEVSiczAgJ z?0WyGRg0N1hh-{Y*mbFBZK5#;F!N$%shWh%O;Q{WE3{J#(v7C`T30f!p&2$G2iMxU`0mK-p`MOKUu9mN! zoli?S%q6Rm(6THSnC!Qxr_Xd6|Wg2!aSB zCKka8ELvwu6e*UR8&NNaxT{t}B1hC(0|5Y3vw2Y@WFa+mw`Ixw+O-1lX;BA2?qKeM zluKrk_8M(I9e1l%kx#b>60vY&1Xa~8L^G8jwU(BbZDb*I#T3jhjsy+2(F{w^$jcygfTB#cu9*m(QL#SYDRFy2J4?a>#j}r^C8S7iZfP zSc1HM^D4$zat$FY%hV66YB1)dI+U8P&M%IK-PP4It=e^65K@4z_HR-LFRm|t=STna z-OanNU)_KB{->{By?S?hlS`G=FqeFJ&OiC>AAj}g%Rm3?pMNjc>&s_9{O)JJ^}Wx^ zG9Dk_NqOi(L9QwBV)*gL@xxC){lERoe|K9$vHHF5T)+M0pZ?>YTz_&M5_S3TZj77H ze(Q(-*Z;@A|LHrpTlo6=>ha7G=R8(HzKt2|x{n(Z6+j zfxDHerT93N{c*m$*tT*?8WGZ4OgzD_f1S$xHk&v}K-*UBq!K-4p!n%!=q(lZ)GxXR z5inRX?OO^w5$XLvybp5TS?z0WyVX7@e3HAy4~nzCPYR zTyC$1?IqBwZ(hC4%Aa-XzTf=*KmJFrzJ4{2^J=|)_2y2Dj)x<&8XzF%T9$%u-oD!1 zy@C1}hE2D5|Niz@b-ClRsDp(Bv|>VXaw0$paWGZWqGb_6t`JzNDFLZzqwTkOsgX89 zf2+>T96^!D-0dVrKMjPKPEiX?ef8=7ED&y|`Kpn|`j!F+__grbVIwyuLfUyg95R zS=26=IJqgIoJtle&{pnXj>v&>Spco?`{Vw27{{~A_sk(m=wo8k)B315fH(lLG*lY4 zIR~OvFQs-}Lg3?ZWR6o&k2KANm{UqbA?IU^Nv*6`9qC;1*vke$4e&7MhvQg`hy}hyX&l_N$KAVy!g~`#P#&VPO&O z9`1&S>-((zt?&DNo$BX~)lV@kmbVRQD=*cIj=$pnW%=ha{S`K z$>r6@>uxsn+ZaE3@4caoUwva@kIZ4yY})?m_42cyed|yD?5{sOPdIXi2lZ#}&+X;2 zzx!)ny?Fe={a26L$)b0M|Mr{jfB&7cp2o@2|Mma&fBu*M=HFJM$~JxoeS^f3r{jtY z45_c&n2br8D_?Y-LXAQdAp#Le%tOlEAeYx0G9HPHwRBSr12@%=s$z`2p~@7EK@bQ~ zRX)vvmBkb7s1k%b5}Z=`trmtkh$<7=vSCtDGT@!9YmcR0OO*(vyuySn5m~1|XsnhJ zO6x^Y4H-g^$m^?)^c_gmz&~g0gEo>s96aF(D=HV zO&62Nf`&m@f^?jizyjHIl!xcbx`vkyD`$-X%sG2ww3N(>hzKEYN@P8QX3i<5qAgnE za@H8rsHr(8Kk*2F#thr7v(F)|(KD#m427Gv{XHK^c`7lTq&aJGYLmuFlP5 zesR6QX0E`?^_EQ4j7HEdopnTpIeYI@WC6;ooN+c?gl^4@gJdEYHNMZ1SebLvjJ)rx zGljbZphT3Et-&1G0vTh-WCayw!a@esvZjrYgaHKrsjh6wsbm(7wJ}8Nt*DS<7A|;8 zXKhXyX;%}&nakE!0q`a=s-VV@h)Cw~V&R=>x4lBCl;jk+yO}$-j z+O`+YL&)RBe#Cs$Z8;2^KHa}}cRV#i-!+p7BT3?}4HX%pYBoOx<0mtNnq$|+n8#JM zTCJ^h%v?D_6+%ogl5w0RCw9ggbhi!@k9pXhwdknH9BrJ6%&KLqg1d*IFIA`nHSCv|@k0uJ>cF)u?*syO`A*bVJlTziGSaQ_FErDw8J(g6h|zm7Fb)!U+Imq^B=gV;0C!}1}3Bebo+Fx zj(@Ab?&MuSET9%85D->jk-{5dk-W?K382buhb}wWNVW1tC5a>gwH}RC#XOlc?RG_= zC}0daPl_3gGH7BMLSkWK0I9U93?YV;BqnM*Sgz8hO?~9H3pr&3sJx$ynx^r-w!W?e zwU05e5ZN3P=cLSl6ogJT;p3~#>9%WAN}S4coVWn_4I&Dw6o)&&s2OqQl0X?vCWsV6 z8r7q|4`gl3F{{MPfEYrk>pG^)TwIGpKtt%q;}HU~Xx)r6XF??f6t!d!V2A;YY1?+v zOi|PzOve)ZAZL85JWEnDpF@~t}&JyLEp+G!@LAj|bYp|#>Dq3<| zDy18{fL?SL-vne)Aj+&t1O`Kl<4J=kT|X!z?R>Z8rir`8nwTnQ0n}PTfV_)Gi--~$ zW6I`gcLbDMLV+(eP2-%k#-x-G$rx)4d0)qrV@#E=kboukL!X(4Fk}{H-M0N?Haj@j z+nY^6ayx8tPKh(CPAAj1KKJ>bq^uDa64Tt-T9F zNGTuRzRiHHZl-&)e(2k7v+dS^0)}kkU0q$RSI*lQyUN?jd239{*?X6BQfA>y0G!!5 zXN}oxx5m+SSnn;SacBv6GHS-7s_(W3i@8x9LS7skDr()BuYBpPFMs~c7jEAHc08TaE2vj=%MrZ@l%{m!Eucwrb(w*>>;#?N3ggPiNJC z_V>Qh`M-e)k`I{dd0c+yCAF@CSXu zgTo_log%VoQ#(}7DOz%5NR3iOS#p+KjQF1($|4d{?uU4_T9e&1xfukrbN26aXTVRJ z*R^cU@Wy$9r2v=cQ;uAr8w=~snWBZ7T8NkksAPx?DFW=!_-F{FpxbjMQyBJ8IfunL zqonA;R%9Le3XBm!ELeoppFR9&eR;N+Pny}tHFZk7x3~B7lhf1FYd;>JU#))r^Y>0q z&X$|&r)ST)+}V*c-U&d=IcCYCqS$rA`T5zyU%V@+&x&?3r|~$0M9o6N5P>-5HimWI zEo10PXL3qeSPY;u_6D~qs6~+%g^~*9ESby6tisByB4zzjwgkIV{8P?$$x=|c2GFNm zj&gRFNX5{j?n7V}YaJkxs|NAU*Xil?uxz;>GNe`k7RitTZ5lNykF)M{2NKQnP9leNKm{nPx!P=z${Lopp<@t0C6vZHj)Nx{Rj#&BTeG@eIo~YTEi(uM85bBX zudnCx*=$sC&d!muCUbEI2@7n85QQ_)wi^(r_SG)aL1fTKRs-mW2%xEau`L>7cMdav zm?9AtFR!wcJ~%0H&9Wb0=(>SGt+j&4qM&HVloAWwxqUEmvaVPnjkQ)piHOjELYXIp zRi$`=2#~CCafm5oXML$;;GO#y4Vj#oRR}PLM1sK5w%gHYT2m172(j;a6j`s9d-KJ3G@dW^@7%k0c5xcJepENp z*>t^J0qZA^9uGqRAYV0i?%YMdFa%33bOV8|*Q;*3O~YWwuo96?IhO>ch#F(Tlvg^f z+d&#<@{5wq({<*Dt^DqGrw^9PS-loDc86^Zx4mnmBy!%@^(*j@#8X zr|#tB*~R(k*!$Z@i`%#MZtYEd2rta)S?xPsguszC$Iz)} z(Uel?hIR-&ivWR$sDK5ktuuhO$ZX}t%sC6koLD#uXWW@%Fg&x2}uzIMNtHpRkGxibAdmyvZxknBXbtbW&CGN)?o=#SZBzh zfEWk10`gK%P1|imfHT*%Bg;U^Iy4TEkXcho2!JB0ED9l;%|Po`has!tj(TCRa?TkN z&N)ROATrk1RW+`gaY&gNvdC!EIPcJSu%jXS%Z;9|avO1oT|-*niYaH&EMS}i!j#!pH3Fs_t+hF4L@Zhmk!6t( zA}EmJ@&2N++|=e^K5c5BIg)R-L&}22c?C#W5{ofLa+owe3>~m8rqjLI#2LehL(Jpx z-qo@r>rxgXD?pE*oo3F*2m2N&giu$Fu#{^b8k(Tf>s;{%5LV8n z5GwD8H0MNwqEZ^+a^_OvXso^Q7o!%(_l=<-XGl2_8DmIPb4sc=l9BA-j^*lTEFobL zdCE$Vh;G1e1xaF%h^2J(q#;Hi3^B^?0%BOW(1V%dXi^g~p;%*?Wr#6^#4L-wJ?Cv* zH%O-Kht0NApwV=^cd%F2b&4qt!(=of(9vidV+xV4u9tm3SnIox#M(r{s`J@GBztEi z%l_iv?BsOZG^ob-hHQ1YTrQWZZrhzaKe2|64-ayP^XXie>)I{XYqGZFcZ#{C@}4sf zA(AywMU3gfKo(q8_1#ccO$ftFFWmp+^BCPx`QYhWFYSNjmA!*1%*WLB?FS!Tf9==z$8)pZ z_ILK-AAIADg&p2}aCmj{WHGgy?RgMnPdP0vj?2WKH1{TiC6Sckd@M%q3@wWUX;l zK#&j!a>36NgACfP+9!|BA3qvbb<<2PSDPRF%a56 zIcIY2YPtUYci+7_+w!0WytTHRpGcHh1r0j8Y}Y3j&)I!{ODlacH4d( zxr@1H$VoCv>BGXTc7<0@ycCz zY|ats0PCtKZjv1EgNDRAGntGQi^Xg@Z$@JD?mkKSTu>Kv6=kkCO==si*4?NB2oeX zG?^uHX;9(q(D|nBhCvYojFL+XtmwVp-`}&=h7fZuNnp*w0uWh62!SjZqr{18UkxDw z8M2NA6wq2v#-jBZRE;5H7K{0GR1@cM?NxG2A!He%T&}j0@gAaySvGC2Knh5Nqq>Q0 zKX!iPX*#YDp&Nqtwf9xq24iTmX-kCt{A#`4bVrAK2MZrVUwLm#acpW9&LZ2ctH({s zDP``4!FpGi3z@mVKnxNYV9tiDh?eu3j8T9xZWEF%>_@^+iMzOKZKcwIoKh|n@+3?| zDMdnb&Xx8WV~nU6qS8Bt0II^A3;#>XT?tWCm{~-I6d89J#ZnY%jWuLajG}^uOjR8m zA5^vP`wjpAOuOwlWn^8gSCN^#t4HII^XcXFy6a-jIpxV{?5tTVCevwC+VbXmGdFhC zq=}MjRc~Tg$I$0^wOzJD4@#SLYb)nRbvT)6>Az32?U?^q=gAB6Xw6$w2(R?}`Lg$S=oX%EfCr1af%HrkK z)n>CX76EiN86V8%Uwrd(l+(Ar`R!?=zyI66a%X=Q+H276wFk}b|L$8;7p_mv2%)L) zcYfpLue|lzqj%nU`>n$-e(`}9Y_C>7{oZ%}?r(qfrMq{kdc5iIpZza?d>!kSs=Kef z;2QVtyT7Qb>WiQI-0Lqt*qcosJ^WZKMKC=JIuA%G<=yjXEL)$ww^#U3~jtR{p6jWe*BB~rp@I3i?7TN zZ_oDceCaE{`qKSZo_&1!;MLdu@~^-B%lF@#&qfHEvxKZ6a@&TJld~ZVVlk5jW>H{P zKoX)pr)TG<@4x@v^G}{ga7g>(`3qp{A%~cUjJXSa5@r=&RY;(~tO*I6L+?ywt=rl2 zML4T+ncZ*3|I8{`iV&czBB-Bw_XV^N!i&uFspO*|RGuUvteRC4Xa?o-vy@A?W2v4G!m2=LS^X?^}!jc68ihI)5RuiVvZsVz^o}UfQgxN7UNvvyem~8tB5J(s`lgQNI|z< zF93;|0hFkVeHZ(Xqku3oD4~xt#e*Pn6)1g8AoKD^`hCJ9xP^_p!PoH+{a*??S>&`%?Vh<7&Oi#(6g(R_x|`` zp~5jnGFBuTYeI@cN~`tOlBvAQDW;T-vEG$oKLQDoig433%-MPCoZX3ufSDNyg+Y|d z%Scr8#-X9AAw<>}qj6|FhfT&7|p_EH188V2RGZ9&9Q_dj< zBvZB#)_X*>hKjAbRMj!dFboP%s-(#oL3LGStW{8J3@6^UecN@j`D}lGe>|#FN}Q$d z1CSma9o4?>y0+_k>)a4|ND&MWnr_=Q;~EGyo4y{6fs6&2puX|NKw?bli-7?bkkkaCqz4)ANhdt2bYN?RS6m zGynKM`n3;#_&puA_g>g{7401@?%%)tzx~gDb1epc+wi#j#Oy=yRL}h6v7*3Nb&p_`{|p& z;0^uFfHPQJaFsRQ8fVEGZ0gEawy7I3&R7Rzl&l~!naCN9kWrvWJcQNd=K5?H+Nc;Kuzw_avs4zV^n9ui&#lSrizW(}WLpP+DtnWY?Si`N>5W0+)CKQ4u&*DJB&# zEP8%r+UQqQ)V`ZQr*;hmI1YZP={a>n^N^uuX9g&Z?SI z`t&r{9npY`tRtX1Ee(r*f47cRxcT^!Cjf>L(@-WB-gN;eE3j%7%>adt$BKm!$_gx+ zRkCPCQbl3OoYSYDizR2yN`Uy5lvBs0dqF0$~lFBV?ae`T~$@iIuuPIwCklo@!nbQ z@7;ZAG?^6i!jR;m4NtGS(`C5cgduC&_n9@bq?BV!)5$D^1W49-0b+)sPbo!12r9cK zW@}>+jzGJi&pBrmG$!PjC0RqDpse2dLjEdG6U~xBuw;<+_R%3ps+_GW>nsrhvvytF zc70XXNIHa`S*@!OVAOa}0pUs0EGDzsx=~fHRvRQjL#yqO)NBTJzEWUoT^4%!{Hom! zlV${feMr_;#yC(QV;Lwjq=Bod5>!{Yobz~8+Z(H}F$5@xPzDa~ogpHkoY^`ntVMdg z(PhCHhhPYa(3Dpp5gN1>(HSC8gH-f1W*%a%0+tLZYL;@zG}c(}0Fgxu0jO9aYv@yl z_im73RY4IE7E|zAF_0k^ZbtREX*ek$*vge@u<{-OedU{`-e1g|wK+!usJ-XJ=cgCI z68g}0L)V4zY~reBI}E0(h-{1r(TpeK%DCM1&e&T=w;#OlU~lhmGMSkYU16Rt=7-0} z1SA={J`N#ehCU3=5Q!|N6OP$>-*sJ6k2r&OzQAsPOdA3VD66nkmER6wwpgsUTSL|x z6WZ0mbYe-Ig}b+IRV0?v?OO+gQX-bO@7{_-cesCG>d{Z$d-$i{`NgA)?KbNVzW>2g zhi5r(Ne(OK^kKd>(^W^=XRk(TMjl*C4t+!tL+$%6@)_L^L z|J@IAb$fd7(nlXXs*ODT=&3Lzq3cci(W8%_K7V@m-YsUhxZbt_pI$Dnwp*}-Ad+I~ zb7y8$sCvT4d;A09GI}NJa&e z6$;yG=Tw365yE1ifXNI|$+za4od{s{f2}ESmZca`$ z=Ed~zl{ep3^6QpyH2=waAO7UMU!GrFuiG_&j(qh`|EquUU;LB*W940r9EOZo*lRYX zyk2i4sWp`?`vzkP&>BZ%lj`a9)uShmPcF_96PVf2o!0|Y-M}G5G?WA|o(pR>_|#I-NWq|s*<5#e*~0mT&z5ny4GCNlidn4n#`zGk zGnD}5EJFw>rP*{!XjCBOqyWZLA#q||Z`;g(yO^O@rOaZi?_%$~LxP;rq?x9eobd>l zQX<6B#Ls4vVc68pj_S&i%~`uHCQ(;6-Kc7aA?4((bJl_+*mgy9+$1s^(l+up|ZfYn=j+ zDVhMNYTYynNJNr(7g^cWgi&epV3w5fF!V(n$T=y92$P|DR39JTnoOpIrg9@tL^6xn zBE$d){V=GiHO6~i`$`i}s*xj_G!>#QFIPjKw*9c#b^>a=S+16e706c2Xgr?GCv%6k zw(jiN^X+zhaeZNHyV##E7BfVM!u=38o3^eiYrHqj)z#7%<1N(=y`|Y~mQ!ABT14uH z!5DMnWg4>}E2@G;SNUeS*}BnaV19aX;;Uw}SxOp?j}O}Iaxohn&Zc3zHYwgdUT7Q| zTX(DO!7HyC*L?GvKL}A?d+U|abZU)z;b3(A@cH3rbpP(1r_U~)KHJ2YZ{NQE`s=Sf ze{%WWPp?1!=7U#m&2KN7=TDzrZ}{Qk?qB`;AN};h^}qR(?*(yhf9|yxZ|xt=C)3#^ z0j=72e!ZPc7wc`?g>bprk{?yoGz>{IXc7yA891r3NE2V=bP@FG23S+T3ih@{coVQ9k zyns?n&LG-$wB=d2|xYYg0O`m2jo3W{n7NEsvpv#5Xy#*nYBR!^Tjef037 z^?DligZQORT8*&Ue=A4RiyIjFV zvM94;7FJ+@-N-JIi)$QI^wWV0K#PS_m{>A%&Ya4p5M|b!IkIF`29=w=E-MthDKl`E zlr<-giBpUrl`3umK&O&b=4S5m6GO=^N^I$7(d-R7|-Wf zGocvO&=1BsTaA+1s}?^w?=LrSxz+P^UJiKGrGcT3Q2>%i*AJzVd&t9hHUWc~rELch zVljvnQ8h$G^az#^tuc|45UH_|g{;rQcA zty!(waWh&>8iT+wTjO>cAfT9e*>0<5l(VpCN(EI-s=%3v%A_W)v;uGjk+K?4DPG?l zNtRd^k_r_A+cZ}S9H)n9$(wb$?6e{lEiJ~yy;?;8V?DS&2?GWAR^Yhhm zdwI2D)FFi|k^)z@IX}B>w{2zxB5(cW<;C&gVUE#TON1##PO6(R7{}wQw8+RX|vZERb>ng~S?#R)H_pvGU{$Swlc1fJ{aRp+M|~00_#O zOI2?`Hwwx(CpxPZW#h*9Sk%a!jTBX=SV3h6fKyQ_ObSF~v4|xNqr0v{aFr#u+rvx9 zI;889^B?`_C;!_&|5uNnK4X^C)8~rvNB{eu{_KN?=zWa&!GqU-{-gIUPR{FUeE;PK z^P^kuKR)AXM0MTNl`ZRRD30yC-LxpkAwyPF1Vm#UXhAj5(1+9KXOBPmtk_19a@%yTweJII(1w#1cibXl6+&ESd#`%SA&ub7IXY zM?qE;f#M5F3Mz234N%A;K*Rv7n({91gJb|vm2Iv2w?_47bat|A*GkYJ7!~b@t{-|< zWObX~U9aeRnNwB(;;en2`VbW`vPkg@5|B04d2&9f3TlW$-?d%aj>psaV!s|ujCI!e z7!m+9qp=`vLfoc&J;cWsn@`TR4^LOmRzs&QfIDAyF;iq3g2X6Y8#8M=4Axg|-={2= zn)Rml^%$*JB5SRR77+Q6qM}L6&Q`)2V=}hdbY0F2N+P-T{$PK9=(gU_Vlqx4ND||$ zB3*B`#y26R&2|H5I0K71<0@+;4wbd@>BKv?TCG-_b>?84C+9bP9|ZcCjIk(WjrXo( zzRc6JtG*9Mds9;G`fXJ?GMG~u)gxnQ-FDk+j~_da;@^Pj){ z(yKi75#_hP{*8}5c>Mhz{A7P`zPwtIAu)8lSykgI3HwpwnksN;Ls)fu)uujUI|Njl zvxFh1m{ZI{2r+YDU3IaI>b)lfVJUWDV;%2oK0s(l2?bGh<%24MJLVhQARu(-3Zvay zvDkrjQC9lf#6WrP4k*Y_O0ob5u&NtZSH?R5jEPguAu(r-U3m9r@BHt7^k(UYg2d~$MeaqY)`cKgok*1=!?)we$W=t(o1R!yVEiiIG95+}*L zyO5wLr<98FNeG~lnVB_ZNm-aNXX*RlYI${X^0?isa>6@zU*9`?F;f*-HeI{!H>-9% z@Q{E9j?3-(YP-7HZZ@G0k|J=FoHUnnJ8@2&1%y>IGb@)nfv6NOmPi&!0xXhu>s~G9 zoh-sBM`B?vG&9L8nKcXKEGctfi6TifaW3W&X3pi~%U4nrMFoQL8DdH)r+7%Jp#d@kA0UzUmDCIWWH`USKVeg^p~6P>}oh! z^L2pdSN)TV_StppWwcFZ6C^3d%$X6fN?D0q+XXZ(u?#7iGTtf^Xbio@!aqTS6k}g| zmqRG7wVc`eDhz`mxpREDUS6gUoOh$q$a$)~Va}VjubW9-PnYXX0gZJjYcm>G-W$y% z>2NXMTg(*TN-oV$8|Mo&{`yObnCd8 zOlSLh^ILnSGRoN^jK}r;dw2VxV@Y-8Ll|;q0yKcuSO5|LLoUS(rOO%=LC?<4$PhC- zYrVB9GW3H(I}Afr)y5h`vd$@jHI7p@gai`f07_Zm`Q>>%nVnx=j>nVG^Yv;qo;0J{ ztuCKC%(oVMo6TxI*&9`mw%3aqUwd%tySdHu6*9Ns<(DIn3PZa8uoatNSuE{HzgcHjTt@kM|Z+_LWzLx?%V z$gG=9*AJ z9R*lH1h_N{mG*hr-5pDUthkC|7V?eSDw7?ue*F042S0oFmmfVE?;ZT&@$>5F@Zgn~ zX)+O8&G!z!{G~6==d(ZfU;ZUz1>fAh_tKAk^6t03`~6|vqUgPQ_f%jA98#8DT#@! zFvyU?s@-01Hwh%EMByx)xL^^5RiucnWk#1`#$kan?`M|8IjXV%?}A&BSaMO2X!qOz z=JF9c>i!O}$P#aUQ63MUT7$GWkVKf3i!qgNzP5q@auEX(^De$utl(Ul*DN`7J|7*` zd-KuqeEsCnMc6`RXCpr|=t7{Y6Bb8MUu59GkaGVGQd)MpEIjMOi_gmku|WG%>jA4 zUeCspVQ8DC_LWi6&31cnwW_Ld+YNmPKrUqNbFh`Sj@s?@Y*HO8M&4q(-Lmp%G!_wS z?fK`Spnv55F^Sbh&8bhe- zsx*&V;}A#_UtcdDe*8#8jd5K&0HJSu;xwL3L}9u&Zzhd%mUGNGbweLwvX%}H4m4#8 zhKB?qj;t|W5o1c*wp9=!84Mom)kaSl02PHV+@l~b1BTX;sf4Z{Pu5uX>oYur+5G6r_X=( z%gZ|t-ulzO{@KYzzZ!HhKls=G?oZzT`21_%_7%FDSF12y-2T$9|IYb(_~7yBlhf6Dh^Nc*rzamD983=mX5anc58nUTPYKh@4_>t1 zyz}#SzV*$&VTQVL&Q~#H1@hjPcr9}lz1iz)nM<54F9IUXstGu14BT$E&!0WLxH#>) zAlw|?`OLxb0|qmsIOLR7BQvY&&Z=ahXvmpW1yxWPltCC(cA=~Cb20OjB`^zf<}%7> z(JY#z=sM-}*OWD9iNuauJKN zC<7KrIVoz1AqaqoAQ%=l1e1~5o4H#@v-xatd47F*vP=PMKdIa(MqI7&Y9-q)_Ctt6 z%pu1VjmER-bUYe0RXu7((|S~)pEQ&E_g;GW<=6KBK|#L0_m8Ht z*?2OU&8N4H4v!A@?jGOTn~rboFJ8QTJQ=z1sGjf5Ci5A1H#<7a#`YO5+il0i%?Z3P*z0pF|h!OV9HG75GGiA|lhzQV-5dbu1H_Mfdq4K!c zvE+a=mux|1PASEhs;WYSV*H1jk5=7v1C++07-D(J5#cbzA%w1P%PMT!ZV`!$>HA(4 zoU5$$LkOF8lX6soX554r`k_~VA;kT|gE++X^*Z+X^zm7{={M`GuWGR5o4Tr-Fr@Kh zniYg)=z7SS+N`MAX$`PV~prMqavz7n=wGTtgx$T1> z8tbgBRMC6yyk+6ZY`*OVCBJIB>3qaIbepS#z1eIu3Ek@M?b+VMAN_cn<@x#PKmEV_fM_&3ONGkZ-;F@MN3UN!!G$E^ng_Vy*{$a@}1;*(#iN@%biR zw0Vh+@TXivSikh{bzxlX2Y7Za@Nv~o3bVx)WX8WJQ`D9HWiVhf>PN8ps~g{XGasiIPkab%&nuR zkI%0!`@W+T>YVE#nm)>=>y^dsdZv-cbY0(fEvHmFH?Hf-*s*ux+BMb~mDG2RsA-(@ z=)EFFLs_Lyaf=cl4<6Yo@DNhphrSI%-}db=gqXM+(l+qP)pnbp4HA_EZHO8(Zu+on zhc57@kIvRH@wV&H7$9u>ZZsM($CSB@ek>X@mxxMEVvMbuN|l$ZYY?@B2zt1G6ozzm zezj@ikj0J0At!+XzUMx6&iT5oG>7~5Z##=k?c43TuKY_cKFGoV5IHBnZpcICcIf*U zthcN6#ygYK;4G@jYQ4!hAMMW?PoT(>z4r=Q*Nyk}wi^_T^A!>qV4{bQnVIYh%ip3`AgviaWc!FiP-*Rhf5VT`2(C?F?kcVk`jKLfb1m_NuB` zxT>q1^QT20a5L=djjmK~3pueeobu*bxXUpyInKvK&$-nzSjQNeveksHFgJ;X{{`B$m@YS;p|NDRY z$7b)&qmxyD@TFh<(snzaYD;x4l*l#*nkscrvN{$XV~J zCa?rhTkov`esa;B_4s5xTm(Gtd71552k$?<+%j&3PPh4^i_HgT*C(A^^l}|w6?7fA z&l*{WB(Ydf$^(FNOo@xSpei@l0nW_KC1%9JiF0CRl&G8pLY5)RkfBf5W!(f`20m}o z^ER9Xd3e?S^wH_R{^k$=@LNCm>7(#O$p>8U^-PsWcvdG_8fKBy=A zci#Nc%U}H3(cOD*zV(&}Km6#)55M=b>+7v62RX=#ufA%j+Fo@rN@k3anTtUjN}>f( zi@gn0bIPA?CW?$DqH*B14UZo`zP>y=efB&jcX069Z1E!6MiCi=K~hel3P_~@VaI#h zbs(2-0x4O&0H;Zo*{#INoXLy#BB@0RhPLc~Tl>ik*z@Rc~spta*MP@;~ z8RB-Gl)5|ju`r`mMTra$GIQDXSi1vEg93A8HstFf6d{o049X1ZtizgS2Td4K8z#?A zyAOW(%-Tg&H6dy@U{r#v$_{0F^gn z956`Mkpboqaytw`Si0?yVi<d)HBq1Zq!rGW~zik1a8?rM7 z6{@N+WOK|cNwX@m2q4lBJ8LUnnXcQ8s!AjqgAzGQwr)%oy1rG#s&R?C{l$!h&zG0$ zt~(gd`(6>LYdh~v@wxy~I}DBMXU(X#I$w-LxsSu0J9ksgT{~3P^g)Isn}IV(NU5o7 z1W;vTKvhbi<93M5cyKhI&zo)BX3myKL{cf>4BKtDjT2{`psFPV70%fh!#OvV-)z@J zloA7gv4jK$h*`Fq&2&0J#J25ysn<~1y*ve#!9rMKh?&z6h9UHsGk_q7 zv({H0$y{7q4Sn!cHJi?2PMqZI^r9MVCX=x-M1YpaS*IC77@V;ZnGnX~N!M-zhpz7y zdxxroU_$6MCm_|Ft@kM>XHB`6aLyh9QDX|m)TMbxL5wkw**ljq7l-j^Jns8Wa@t=^ z5VWd%yY1Y#?m`SXgBTg|;rt*Cn^9f8c<WhsKfQbJ&iVPW zs;bR)Kr)qcZQ8ie;`w@Yx$SQsO{Rd$&E{&|uRCosMMScd@wKtI=KRyg7kks{Xf|Fn z^M^=+rMX?(qE5{^J zIHZ^}+v+)s)}z^v-~EJqbJpP(-+GmN{pQ>Ee(USM`O0Tr5*sVxTet4mdh&1oAevQ6ZtP~ABC0?))ij`pJNN(Y*#N3)h*a5-sVbjy8hWgpM?^!Y896f(T5G{* zGj)4Mqs%yTba@$n{QZw!c=@%S?a&4eVnMP>9AZwKRn-un%^V>m6m|r)_s)QD9F944P9)ec!HEG4e1(L<54Vm{P2Lg@B1;W$PG&)!fDI#aqWQ1s;aX zf=J{lRW3B8n3*{nZw;}4IOj8~DthaLBlAGikTn{#ZMVi)OQ!F(l{415nAp3ju3Xo5 zT^o)M?=fN)tej+z&^b5DCCXGG4IOEhjg9zA~k@vX)2^ONV9t$-V1 z$SjCxkW!2)V9-?FLxycX0O{3wJD*SP9PB?m*~AdXdyDOMv)%UEkgiQBytRcZIdrgn7rQhn#ao#nPQ)4d=5_~AProZo-#!SRDv z{_P)qdll{By_bLf%a4rnZ@l$d9OAgnU>FWh^v+YDthZ;c&7NFs;?estZg`?Jk-f~#%6X%m?$5s`6qT`LMXhXSIx zcjnP`Z?CtL#vM%j-niOp>gl-lMk|B0H&%p{kidDPBIN~`SyfpO7?cHb$|@*?eFz~% zW)y3etF7~u^`0Z=jIOCh)4i7t_a;sK;wujpx9`3G|M;)3mfHx9n$h0TaUX_t*S`L_ zH`kl9^XDgr`v>-hJo&r|*6ENk42r5v!J$mKI+^0#b6myd`G_hF#3E)Egj?@iEDAxtfeGhc-AjIyiWB zv3Ga5Y(+&;000P~eDA=2aJrVC%fy7JtkzmlHEK%=q8#y}iYT;$Gw+~4CAuk~nK=n( z5u2C`=2I*h*%b|xoLz}xGYbHfayI}f3pf?|)NoY-oaKv_Q$T<~nM#w0MHLvrKt#T3 zid~BnCq&gySDrOhW3zWyw<{jDaC$X7eDr)Yzg1gTQ8z%$v1cS!fzlU2&lV-w0 zl$;NbkJp>EQFY{UOdL{ERik?J@$)COtGlibDOaN=W!Q$CR9`y2L-D$?epdT+y9qJQ zCL`2j5LmGb3D9?a3Mng~0uk9f^vcRvh^VeDwLw%@tM+nqb^qScPu}}DWX>$sS4bEn z9?WL3yRz0<5>`RNoKrKlEJB0?WQ-NSa$e`mRaM2@E21+t4gtK@O`T9xv$3`6&1{c` zP?ba#GOMU2jta=k)>s6v&c%?t^D)GNh%O_{&Z4X=D&z5Z&6{F45M|oc_@AU}5JSA`n0fsicj{Q*+z3r{|~DxJn$?tJS94a84?) zx4##|PznW0P?EAV##!JLwspM#Mel21NqGxO1O^0*8fRQ$MG+z;EO&R+Qp=pOA>3Qc zx?wP2OWUd?I{*MuH}uX~he*zDx9zwdr(wNrw=W$Z%XXPTSF7d0UVXfO(5G&1>}PlO zfAIZx@9*2c_tm@U>}-44X0?OLymqMR)?qW5+v8jN>z;n_-qW9bcztv*`ogb#?f?GY z{12^IwBzmR>2=&bc=ff_x;sC=W}>&g@aEpZt#AI#ca1ky<(%^={7t^x{iGdWD={XYs|dOL%ZrlEfb>i7Qp)o3CnmSMAp^dESU)a zZ&j|ZS~QeCLyPNLS14d|m;O=G-rrmP~J5 zyIK=LN}OW~NXC$L1_83}A}#2w!~{rL*dnm9Q8X+>M5an9U7TIc#?yAaoy-@ry<3a@ zTUS>fpfLbYuD?oJ1~6qH1TNaUqTF!sitbMUqHKUf#2_M)s2D;41_0I_2()a`aw%99 zNuoB10*V3vp&}BiDg$ci(%ezH%QFS2JW0xhpga{6a2L{$LK@=o=nzmsVorw0RaOvU z9IQcS90D0*nb}wyQ&LewSU0+V=t3`>K`yVa+GMSpOezZ#uOjO%#F%+!7DiTdzG_B| zGpad8&SK1D+ORmnab+=cOi@_apiLa>#sLr+Z=F*}=fsB9gEKBaKD zn2*Ly+x58{0MJ-x2#hrZ#F@Jx4k?a&14Klo8-~hNs)$I=x|9--^_2}FsKTfjHFe!? zH?^-+C8Yr1SfvZeTUVjExH?Zss-|kUOR|nk-Nq0Fh>&wTtL)xsFbsGgKjs&z5C5HKku|xnEhM0zAoKcp(>%8*>P*qh~HlRU+kV80m_Dlh~Zd3aJJUlFTVQ58xI~n zy`XATgWYs^>+ZoP&(DT7tv5qR_~J_s1kGQ4=X-)Uw4IrZG-*tEI$Mx-fBqNW{{3%! z{qIb||rEFpq%))<>IkLxjW z7S7IkrV1qog6Eq7R-19-8fOV1_Cw)3Tk8~6ky0)%Iubx;wM5QZvXv55qj5cH+HLpR zYY$YkYrB5Cfx5c=!V8GKcOECx_uu=4G5#O^!{2{;e)9O~kz9z{b%9f8yfzLXCr%;9 zXb^2x*Hr~d2)iI=j-sWCi;N+VTs%%HtkyYEOj)}woTrOh$A=f^C$po&@u=QEyz|M& zjijUsNW06o>`sl+5LduiK&T+YxvawjvX6L6k!ds;FuVf=J?!*o~{kI)foX zfqgT^lvkTBr&tUs)|qBB%gXH#b5a$x)-9&fzVCK2d zOaMkD6Nn{<2)b+86BTJ1zuBg3-$n8L(CyEs7grZUH_WHgoT7784B1&gPy@RvBv!S? za_$WwCuU^;U}5iF7eY+QdY6L0?vbw{gt{*MevmVpGLY=HMP*S1##o5u#b1z)JCjeb z$r4eHL6k%liFU05Dt+IN$0P4uiYYUbb0XlZEv8Yk ztQt8w?-4$U`PQ7sHyAiwguLZ24hNaL*@%3oac3s@Oc<1f3U5(u0h2z=NXTSLB+l$}&;%md^#QN}= zS8ipECr_SEXQR*m%GW-A*8kDJ|LF(M)?fb3ufOW||LBju{nJmrS%cnt_5Q=B&-Z3` zFHbLT9UPDAscR-wS5-B7^ymqaA*4}b$dG8B%_j;Fwr0KVfBeHA{@Ry+<>&8xc>4ST z$xmlZjM+P%nXfLd=Cj3SyJZ$4LSvD1h{I$&w#GW=#1u&xjB_bx78Yjfi7F4^ov~6) zoW-`=TwM3Qs`h5L5Y1}4Ht6Py#inneChP2axi!`qZ|3to$?5RmkSqbwbUN1OCr_SV zLV{ObdQrOk@RtwwrXy?R;`HQezw&usQw;sR{a3#J>%aa#{eS*N=;QPAt04r11P|`s zf8my^M&7y4y!K#!zW40;^YgQ-)90s?iXfABRaWTwP+-W3ML}}xI1Glyjc>g5F=yRL zH5gMy#1u1;C1a9sRw>leT?u0;yde{V5SVR>XIIzri;Kf;>u2M|;%-$<`_v*b!cO@q zC->jdEea$>iXLCecvW{osqAW7@Mc2`0tQsH2tZH(5m`jFSXL=!jaieFT|7tTm@}89 z{7uD7$(0H00CI}OsjYfL8QnQhq140fK9VIP0gy6B#q286OzOn34{gk$EEPkHX{Sc1 zscCjl-@d;$-yca%IRwK>IZJHEUhf=D?jB8#_nXnm5v-`8!ojcMJ5tU4M^M6_~M4AJ{)_gFM0WlkJ@RkH{oID@rE(qu`DML=|ZWPH8c zbZ9ChD_U}{uI;utXHea?ZQljWI`M9ATusL{kX>&2$#g~{i=L_m}{8-v1eF`YU~DaT<5*URe{?jKpB%y~2#n;XE6b(RQH?5f%#k~I!s*Xd-P zBcPJBbly4Z`eCq!oV6lij3qL{*$`T5ZZM5h8Zi-zb%#R=<)&K9(O7DyRSc1;IPWrx zDwM*~k{7wD3X!4j0l-%t5J41gH~^qGkVsHg)?LE@2`eWSg_tszV}=g;`)sRsU1tG-Ac!p?&0Tc;HS#x4lK~m9dtWl79 z)L3VBDOCXPyfScR1to*V;&!_&HjlcljCF)W<-$cYWoYbFS}`FgWMx zx(2|^oS4ZP0YEa|J0f((BTxu|(1wtVuQH&kr?dUrZ0bzaa**@&&}BQQwUfPivVU;= z!rtLsbYt>OJ)QZ-s(nkr~iB3kt@Vlyb21$mx>7sPcKx;sZJ)D{^>#FN(`kbSwtYJcoijKeHH{-jLt}jD{rkeML1T~l$aI!kU~zCZ%R4N5JNlkWGz^Ow)DM-cg9(W z!(fed#y0gxL`V_1TnF9B`9&748*d%9eWw5-DaN5`Y9;JBgR$djGn@G1#l)d*w;Nwo zo^Ua5LJCnJF|r~nZiX;q&LW6t458Uo?1d1COy74#vm?sPn?7Ez+P#DQrtu1pSrJhs zYi4h(H^u;2GA!bplgyS#SS(p%ED`3EGqdxa2xA-&V26w#Ds9u=8iTvqxPtc~GGt8g z4wal>*$fnnXbE^Bl5r-cBw7+(xU-5P5t1>cv|Si7ghl~Hltn}`QF%~6E+iub=dAP2 zJFCK=(r&gxH>8-HbIO8>C3ju!_-0xCWQw8nIyV@|5#YtO=^R#Q~1b0Rv#$f&k* z83c?9fU{0TymLgLsv%|t$SMUrEzHI`K=jt85E@_Sm}=)VXKM&Rjj@PEM9EUQzcc5$ z_9?}bvU7Gk9V@VR?(*t#G@Cv>I}wG`Po8}J*MDVLFJHKK_wwTO_-OoFzwxEbX7i(; zef-O_;eYyn|MCC#Uw$i@(K6T%o^^llC(q_DEVils!#{cW(bFZlYJM;|zI$BH7XSJW z|6)8lc>9Z=YrE~~#YISIF{eGtB`ZA08dN{`y-#{?R+z z>-P5Ez6IEBwtYX;qsCS)YDQwOqfFUI?#(0Ak$WR};$YH)WcbQ^C!#I^8NRJd3||(`}Pr`UR_)+udfs5 zZQr)TaBy%iYR2R7q8g3-q<-gvUog?VmtMHMe17YA|LfoQ&E@Li-~aKy|HZrS7z2BY z>D#ZrF&T~1kS{LQu9}^kt-tx5pZ@Sizg%y+!=w4_TeI1$-aD8Z=Zve+R|*)0VONx> z7?LD3wyOHBKYjj`b4Ekf*^puqF7P5iFqX;@jR@AHKo zZH`}h@$I^qAz1{Vk~l>y7_I++kyqY{0K2Jtci}FFFzk+I-Hl5-no>ytWY(NDM$TC? z1LX{)~=aY?Ed(RMkP^rrWgLhB+7ml{E&KIGWT~nD)nZQklwuwP+01 zRiz??XssKKCrM=64y#okvyh?Em_r7KFtlx#b7~qt9*xIMJsH)g?2S55$k75;-i}7z zdqYIs5QZ3skWyl6?PNTue7$M6fDpK7LK;~T?>x)%@x&se5Ue4AQtLC+O$7ikaUb(~ z+XJF80H{~Xm1I@Um3P+p#IWf?O#Na^xAvMzth>YRu!Vc z5mQlTZOLR2s&^;?B8AANbybOC5?Z&x5#76WNQlezvKlq4+SJ}7IYUid8N!@WY4{<+ zlvCzRrg*?)7zSf#)HDbnl0WV35EVqF8=a(NbP%yj>u3moQs%_D11X?_aQ4pBjR#aB0ujlHR3?qrtSRBs z=jX1rue^LWZqJU7rYUaj+&fye=MP?b5f#tY{j+uc!8_*%_g+|a`8z*6|M7cQfBzr- z_6x7S{DYtVq8_;yUw-vhe)Y@y2MYlE=;LP(A3u5Y^x2Pp_Vd|d|FzfO`0Cew{c5@G zgWSG#KXf6)FdEq$`<%kj{@%E*NYe4q?CAJ#JY94HUtC?j^|?2j#^pGOlyE3!fvz8x zn>MS~P1AMlcH5>TSp?CLvBDZcLMU!ULZF=U&_@t$n!4+{(P)IIr>Ccjy?I^xA@udA z@|8O}JY38d&f4i@baj2PyuOs2_V;I_QGIZ@x3|A=-o+eG&(DX@z4Dn?W{a^JTo2t_ zU;N_X(c<#_^Rx4#+sDR_7Du->>y3B#=4W4?%*OpNEH|6Q{;hZ4{rHdm^gAD) zEr&z2`<# z-!!U{Q%WhOltiJbYhOF>sUoNmDkHP8n~mJ4LWewZ)Ob@lHyw{HssVA9rqg;fwuD^Q zKE~J$Aqit43Z#%?i<(U`MoJ?H?W=9M!hbh?+!Mt@WZ1Lr5`jW&m{7A!0e_%Zkn#=Z)8thjvrM zPC^qC%Wm9KRLMD3Rh?2a235HoS|G^60&0x`g3^&#H#Mmm&BWOgdW(n>Q*hoXKuQ@v zt-;3o9J3|ESpasZ<8f1?O4w{GLQ=VZ@Ak#{Iac+CxdqQ%{Gefm1sS-}}*zKYn`F1>UUs#x-{jZr9}YC-eR3bkvNUt19o8tJU>#{rK7Q z`!But;p0!9ojia2GcQjj&3e6Vw}W>zictnb4g|u`5y;T@?Y2dLzVBimVu&%OFeFv* zbpwDY<>_n|<1nr#F$T_Q+>BBV7Z(>@+o?iMe0h0IL`_{!XOroCI%>u$+V}l>y&jJm zYw2Kbzg=&vu~k(K!|?RkN!3hd^Mi}!>PJ6&=XbyH_a1%xL2W>-GB4<7Y#XAO7_HzxwviE|+q8vbJOv(}_{x%sK0` z(+gMC#@@6Q0c_W8-wnd7oN8}J-t3J=cV_d~j_w?f=M!I5&KqN65=5&CIp>seRtY%@ z0vHs;q)?KQ<(qoF?z+u7ZdV$kqq=V9tVY3r?$WP_T8a&B3^|Is8b1Bhf1$hE0GB0k z$xoN)WzLdf7J-~KC5TzGU}m_HfEbD@lrkiN#E`P4EHO*Yxi_FO4g(hsK;AVcZsOqV)b&ffQ#ECgG zLP;O1GsYlFj!v_Y!Bdu@B@UQ|#+ga&$D``_XmRJ(-pG;;ouRN$Via897u_XhTQxst`7-LY;ZqqI%qmd^7 zF=WUXl|)!rDVP!qvq2z20RS@0;H~%87K;%g#27_bRgEF%oT_+dogqV5bk9-|424G` zg#<^%I?OD91VGL?Lu9NG)%A9ZWMWEySlTU$>CB*E5kv!o3Ys$$p|6}2FbrsB#$BSu zdgCiwSI!%x>1aF~&kh#*2L}h0ubH*$2QpM%lh!&yLLy;qstQ0^qhUn}Y89_m+etGGeSY@%WHOqPOJDh& z&mY~ILEk-pxPg?XlX`z|g6M@tcVBvN)wUNG*Pyy?+jZMN{N(X;Z#=~K*?$hvhupshZ9^msI$y!qMBOea34 z&~95$Kr+sHM2MjuRo+``i3rFWiy{|i=d0`0dbtDv%?d!v>(%ml8Dbhjm`o;JKh(|W z;P9yL`XPjzvoUruo+y~QZk(%A%A?WPTFWew_4;Z#^h4icX5IE7qD4~~>#%WVBz&i7?$2fibqU zU>J5R)N%@!Tte|ul)zWvK?}yLiIo+zV9Ic_17J;AQ_>;?r6j_bMPqpkux5c96==8c z)fsL7qSc%>E0a43n|5;%+AF8=aMp|~tT9WwsgxH@EkoB> zv>Yk6`;B>UG=J%Eu|KXIN)D|`QpnajB#a@ZE)APDv@IDlhU%&+%TMo|vyL-w+BT#N z#s*H#xw@$k#Tb(UkH@1BdKCtc{k=tq!`X7#VRCWkU$-c4%rps!Jw9`aXss z)ZWgg?%-gGgzIg`BKxz+{%m55-S$zbVnJtI2&wB+h`H1*s^((IGR6>qC1c3u#GvY& zQ(=R+UT#@+JZ&c9T7`3psA5URwXq1!IP z6vY5qGNsppg#nb%eCh{v6$qKr-Q(Ny$wZVH*i_bg$DAkQNzQq>x|)q=A?C}g%PGwIZLQ&43)M3gXBsdF_I}xc?A_o zIWCvW*hdgsReM!^>v;b&Rx6`GyYgf}LfqY>!5|vC>C!1rhN1%PXh0x4HUt#5QfAG( z8|%x|pVICZSb!UT?R28Wx0$C$~Ns=>I z)(s>8DG{P4r@15JD$O8(ER9Lb95o!xYc!@ZjiIqOWB`XSgfJ+fs;Fc_UVdGC#a2;O9Gjc?$E?%v>EHJL<_TFgZ&1$v2xY)EqbXNAJjxdK96DZ`EM9BJ- zL(G|(4H_hp%%yFKR6)4%RtUE3#sE5J>$)-qIp=POXv}2XnCl@9!IBwQhB%MvDuo_^ zI7?ks7OgO-P$`B(z{~Li1t2r3}E zZc=c@uC|>qE@nYO-Sr3)B9>IaZl$HNn>K{Aa9vmZFc5&T7D4f*g+@s@bC#GBhm^?V zkP~NNk(@s5N8i&5Fs(L;%+hnbcvEXkrr)6`cGAAa!hm;Tc?F3&z0V*Bi)%P+j} z((SuP@BHY4-};ST{}=!3AKtrrcBMPJf4%}7w+F)4Q%onqqhQp&hU?sAy_M%*k>LCOc=!Te+5DB8r_xHZ}*WdiL zul?qx>&`Ei&3K$uV>Vp~&NyShI$N}fe&`8sG9G2-9AoH*(08Lz(=7JRQ{s3lk zjA^@FZ?@>HbI#_R*X!-|_4?rO&{(r+H`B??S$}=KT(18ARQ=hLrb%+%i@AGj@AhrE z)z&@T(=*dEzyRbSK!OC1WQRw*l9_H~M<(6L&c8{c4oFEP4nbfB7|dYy-n(nf2hXYG~J&dca`Qr0WVj5g9 z$LZ8o-QWE5mxhKk&A=rfBpIAfYv(G?bemS zBzbpvAxnRCxse>5w?F&&PnOI+)Jh7X)>*+c$9Y($B{8#Z+KxBNPVAGaTlo;Sm&-gK zDL2yq#_UhMCYF+8p@KwZ<1VcvV~QjKQL%Eq_H}Sguq#8&&E+4p{#lFz7IY9~1T|O! zlt}>S><2!}K+Dz(2&5n)NXQBZP|{SjH1Gw=Js>|~p9}btSdf(-&j*Z2&W8g7Gl*ab zl@TtHUhwTn!{^EHnL`9;be9sGFDaz~1ghu8fQmJQM0^g86cI6GM1`F3ZD*S`_?p2l z5mG|Js%|!$i>FNX;dtzir_O) z#0Y2%k%}V>#zcR?9gjm?IAtk>xb4Myo<|}xPRV=QwiP z1cQjG##jY07M(MoT(-_EVu*5z&JrqEGUc!@SrKS3a{|&N3C&p*Y7D7L>6#FpwbDg4 zdUi7~7nDktf^{ojpc*m=L_`Y0oU5wJDSNWZX>>voIn8m2i>O#{i&`l&vtm*qXTST= z_x|8VKmPF_{5ZuFmwAa12ndO@6r8)WwmhB+1*y)6sF{Vu*fNa3l#=tlpN6(;E_at{ zj+<_6O;}={V~k6Z%)n~NmPgQ86H^rC*Kc06b#Ms58DM5+Yh2cRdwg&{EOA!Nm%B|< z`P1q2yRW{y8|Saz91N|idd$<7ywyvAuG<^Np*T;wB z@!_Bu9NEfw6q%;Qh7faNVL-KnV}Dw$yQ;Ad5BJXbaZc5G{qAA^^5whlfBZoe{Ql4b zfORgje(!sK@ZOUrz_UeT$yFgV-aG4FzI-j3SF47Xs7XNWfB&nW8C#{4bIgzK8qHbu z4@X4`O}*W&ZnnFXty>H<mWmYFYwQMpn6?(Ltds zcY`ckSK;R3qH&cK(u|rz?O$xW4*^;vXUHHF9jIc_*yAJEI zKlZ~xBwFj1Wx2h-@B7)9U|p@`lTylXfB!&636)a&ai4bmF%r0wEIK9 zBsm@Dlr*t+t4&(sJdc@qn3k$?-qE})eN3v@1pD-28>~5=PUM>k(xW{xD<`l_n~ ziiqM8^Ry%cLPJS-S{7A6eJsq02uQL*8S@(C91Vgmu%E|1i6K+)(J3cH%vlhv2*9Ib z4gs0jdv`pYjCCm|!gHKtDgUjtXOU=mT@@lAQxbwX$4HM*tpWuq)kjJTiF!ba98@dB zE`+)YA-J4#j-I+EfNarck@nG_7x*9_Ex(*4^rtzxuf$K@jVl3!#2? z`QtzRh4`ysIIHO`}@E9(GUN? zl3nIGEwf~{Xb2DlQc6Tt1VRV}!`HOU#pOl2YS9oHY`S{0-CA!`OxBrD`MRld=1>J+ zRjhn-bA5ezx!J4{z*}?bANI%FBKhl90YFlki>;kdx9gQbY?~(MXe^Pl$9a5vJc4yM zH#gt={&%}o`;(vk^wZBiQH6H3{dv1!~QgmO&!|cf&oh!mxzYuImVRc zd0ed4v=uB|)~;5et% z`s#E#tva{rLQKoMhkFJ{oX8l?GS8Vz$byIlM9L5z2r@Ge8bd_L%1Go)QLI}ePy!$P zyW6{N)pbp+D#Pgn95-!+AinaMnG7{`%?wFk&WVgQ1`U8SBsr6zX`YJOw>TO@um(|C zwPXPS0FZUI*wjnJ2N{#ge3-~s1Aqj^lHr^|&yK|6hs!w=8U!kc5rgJY*YsF0A*8qz z?zE~{X9)>Z9a%})aJJ0bx~_n9jHzGd(>PBHs6v)5G?%MgZCx;Cw^|2dgQfMl`~FAY z-)*<6&1$_`wO#Yw?|$ge77}7%nmg-C^&&ZE&ZPt*V-RUsX69_1HP%>X*XxxiuyETp zP17}XLqsWOGC-s`P2TyELZgbueyFNSSqX5}HC^X@pt$6|pRS)=UR{Ziw`SNqLb=@}qumAKH|LmXt3u_%Aa7z2b9+BpGV&T*AXuYf3+BjpaEjio6 zG-Xji0%O`$>np#clqD-58Ec(Kz^?69tF|AOF_S zfkA;sVgh&(l~D(X;$O6hyH|{vQp!S~49)fEB3tYgkH#eDMd1 z05FTdBai!^+%n2xpjtXhrTZg*icpf+mbe@I;q~3CIc4WtXB!_vPU&#m&-1dx zWnN;+yu>(6`*F^Y!M;26ukH_- zZ9hd8RRAAcT@;;lSd)Jn#RmdX((R;Mx`&i76zLi;Qo5uCq`N~vnvp7ljh=*rh)Bbb zA8rUpNlcmK1Ox=$z59Q=u3gWbeedT!=X?%CR!Ydr)QnXYJ}t&s)IvR__O|r0z2p;q zmWpaDs6og5&h-4BKNqR&r)Z|9=wOwc>5j&gdZm!T+379B@>LbcY0qlti`s~@Bf^8# z;s>nW9a#K6S&_M*lT9mI9$PLK_9%H8G+G*>gssUWc}F=VcfMXCv@>p9!cO^=(kjQP zGnbZ9i>+)2WEsXi*1K!&0UmMrDbHfjB(6D@Lc(OQNIf8A*AbUcn7~r{I-HKB`2{uXY=i9f1Cz4ziye=FC3^?QJHy^_ zFJw-E&_}7+sL@5a4)NgL!h!}Cxyo=f2jTZbh_7GvuOGqe>xfH+(RnMXxT!^ ztk7cCyl3-aS6{}wm%99N8#PZrxOWnv#9aP?&W}I)`(1df*wApS1~&y8@$T~*SX~+1 zdu4jH^{BPC?=pOBp?l?IXH)avQ`?K>%NR6c#J_#iiSU$V$Xje)s6} z?)8Dydsn48E96SGO6#k8=cmrs)Dcuhgkmoy;8!wzvo{DZH z3^^&J>p`(L>&n%GAmd|{)4!dKjqB>0qnFidYMhs^nBvb?!Zy|Ym2>oh(WGAbvha7S zN<6WD?!DXTu8ywtdKG9@RV;L0dP+O}BUkj#UVoHB8e+*$N+sCw#Z6oF=p3a@QM`>< zmz!j~zPXseN-~p@-#7yaWr%js(zl{J%^^kdreRJB6)TtMFf>_G?$>xgk)WICdr@6( z%T+QhmdVj7-4C7kVlLVq!&Nk5=D!S5t8r=x++Bd&EqOO=VG1kSkH^YGthn{R&!B2P z&x(ZN=u-ZF_lJ1M0Q{o*e_zVZ2;FL%#;5C9ei)z;6J_FCgel~5azONwS^TVc*=@3s z%J>x&X5YKB&zANC#^%286~E%q=}58*#07EvP{3fZ*igtT=vUiL;mm0<^i;44FgGn` zjVJiGuCQw|<*VZY0>`<^=<ATVPLKu56MDxOJECKDO-3R^Z!@y>lrPppGji~Wq)|XB#2W`y!DwRLow3b zh0Roep|oZ9ggOygy3=H~2`urRo~kQjde}dr zYigeSy{&oCgSbz=H+C%p9Cf!fpPmNzd{Mz`Q;w*A3@(j*GnH#O(L$##8|1GP0v|t5IP)`XIsLG-QrXc|Ml(xecYd@48~qj3_VBg1(kMu z{1s@^={KF1Aywtn(mgXAbkL%9IDoz#d^oK6w+dlZ)c3v@oFAdWe+aucbFwEcBrXSU zFvLFQtHUp|{up;%qr8&uSj_d`Lt^(^K3v2k0L$(>i0Iq%sd}9BRml(18cifFJ$&QM zart4^Oohbr9(<7_{4B7#)O_;hNcwvIW$fQHBZs?Rdp;BEH&^>qEw@Ks@n^*cNm)YB z-i|xoFDKR-6*tH45l$i*@$O`9aI+ zn$2gh&OnLo?B)ejhIG757v>&o$Nde}@I-hA?aLW>IL9d53Sso;KN9GBJp0^471h8< z{>CO64V)uPA^BNWoPVho9R+YMt9K=#b5mT1rKgM<7rIkuOPE1A@sGVluAcv>Tr!>H zT_|eRzdAV*qUZqWmimg^!)(v9BSlR1BJbu@S_)6p`bxKiTU#&hb$L|LrG+<8@$Pl` zb$c7Sf2K@o>)`v=(kHT@9E$P4&bNKP7fCYcrb0`Y?oIDS+;dSy6ZpBHP>wr!&HYw|l;c*uLh5_#L*ot@jlPJZtz9ty(ZSA^E35upy+7s*&4HQ@k$ulg!keyb=$#?EEhz4lN85hb zPb|1ZT>RLL?C$&SFRcwpSoavXms*QZE5L^K^!8};>RNvOEcUi%xv40kJTy^y7xJ1{<5F{g0z>0{P`g`E2qWXH~<<0uz zZJ=T?fQ3V!LEmi1#+Qa~B9Cz=sJuP`IO@znK_Xjh6AXcXDHzKZp|=2HA^<*qHZ~H7 zO@I<52P~lCLV?>rOhGtq(ab(78N8msRmYHwZowp>M(>aP zSCoe45PlNc+Y?Mqj;)%1S8tpBAx247+D)c$A0IgLC6&oDDmir8Vv53t`mmRyw;4UY z^!i~-O4j#c)lR40yg`5LYHT4+@6>!81~wlp$e!vgw;UxeIGnxlUhoRdZmfnoCr|TI z*Ejaj01V`pXf<&%O-zC~yQg8j%TbOYz$H{*vHr-%%=*MYF29@S+c4 zZ$0M^X~r+*CGJdu0&+h07@|7s{qCA&7_c~#al$jkO~lGf?VPq@1EB=Dx3FE)AUD?R zEHWMyT&P`WXpt*yXE9|*Cf6&lZ&xYQJ%b4!R9aW~+ZpvnNFJ60O4gf%O>T?JzHX0q z%f5dv(vlb8OPO5RKKf1QEusY;D>$P1Z4W`>3pL+0eFUBCoXK= zY!VSV#P@%l;jB@B; zr9aX565$D8)Y@6d=+!S9jB|K zZ87rh1LlF4RZD z3J_*{Q*s8m(foF?N4`K2DeWYcu{s~k6ZNdGp%CRxLb2&46DUj021jMo#%JKaRDW>W zxpM6N!I*cIyaDHfC|ol=8e?5zVsmnGwXrw+XUZw&^jvvO#%2&eJhS?UX)9<%z(k@V;1l2@sN}o)%*5wEZ+63eT45i&B?6$Ov$e?F zq*|N=t>XEAiiLjd&Q1wX&-c6smi@u5R~P6^^HdC#c}+nxM+yK;Mc0r~TaZK-d(g4r z?r3oH15y@iCQYAtiXN7VANtXL>|TFQIIrOsQrl*{ZRrv-**fLmSZEL{%$re@e#`5Q zAEPx)vY@-Ar2-Qc^?t=(Y0tJ0JmYQS&y%|8?JNW7Yg|%%l|!1HI^iJ{%xXiwl@!oh z?}vz7dA|B3P_?-00BB*AN&9oy|LjeR^{A4*hV1CL9u_p{*omyyCs;mMdLFv~@pD(i zxpOhPp?1xyN+Q7S?!pRE3iCkutZ?J9Nd;RHv=(m$lNPYAfoNHA0=f$ z?gA?>UWa0k;jONjxyK`;`Cw=DN?~h7s40~Rv6D6%3nBivag15wcCJnBP_pa^{`fp0 zc4OuO{ao=3Q>s8%$)<|?oT{aDh0&(pJz9WAzC;FXr;+@O_nNd=7Rri=21zUvAm6)W zZr}$XY{8zi2(km#Y*M~D)F|tb+tAhg`($b7BV8tXQCVI5y)2fuhh^%K+=TKvxyWZ+ zjk0dEMHs`Ec}BkXB76s+oqP2@VS7Gy;g!g2E)PpN&V+|do^s%ZM=4rA-l^bqo#Ry9 z^hW!-ei_3pU~JakXblVv5((Lt(IaxH{he5r3`An_!x9@$3uDA6`L)Q+ZS_hE~ z`dqYE^BPfn>s1(yL%p?x$2>Oq-)}atal-UP%50`1{9j2JApboM^ZvCo44tE2yRLOP zj(kH~nXq>Q4vWmqln#<1e{4a)0Nv&}srBp@VR3tYB&n&&q{o}CG+pEtAjW}@u}JF@ zu(oKJz|=zX@8~NvP6v5Rw6rLfEI4=8^*Vf^bE%{HjiCB2>-F)hjdz4Ysgc@>kT5H1Mdwqw#FroOFI46!zkqd;{1yPFZ2D%W<+469O-I*5l<_=id@3y^hbtK`jeJ8LAd`<&S3TSzAzRWqKZv9M3}ykb_FI1B&{02C z;rkq1jF=SQZ}>VC9xC7s9gXdVLi`lbulT3|h)Cf$A!7Q||=MceAQ`YNP4_W08D{nX5E#adkP>Z;~IfArCokIFr#{xf^V5}420 z3A_xlPZxfrkW+KE`EGT;sT0fO8+mzZfrnrbdOc%w63Q-z@;^v~aA z>*&+#3~uS^rKMfES2YKvIGF92OlxD&5k7rIde@64Hy3eNPB+y-}I@-{)tP^2tDSG;&z${H@PnWUtU70D^ zkI{Yj{S-YVF$_9K`iE4Us#xoz`7;_z7?TYOOidcu9_nwhOV56gQiv)dP0F|TH_2{T{D^1YDL$dF?sk^c3Z>+F+hHxh>| zs@>h#c$mE7Zd_N>)wnB^(TN7JTQm)H3p(6WTA?cnu`l-47rWmUn))%noeOj;{%G^l zd~ktKLEz@SS}9pf=!n`jFsD7aBU^wgNfcZsZUm{UeUx72+JJ=36WtxXD;1aF#KnNh ztTnzRsEhg2cM0ZedKTj4B6hF-1c;77DI(UwyYA?5NWLA6Ud)<6|Mx& z_DTyPeRKWXb|JyV$yy3+1|cw2gmpswxcTWe%@-8Y8`>Va%p^ahtz3@hMPHhORRUhM z0n#%CbkaXAM#5{{Oaah$TD}k)a+qvtx^`(vDJ1>7YXA@TNY4I3c@g_eOHZM`XOM*Mu2~7T46Tz*^v^dcejsj529in z@qy=TOvg&`gLYsD`a{cHt=w%KGE&iaAOw(}sDSRL3j>b1b3GB9adlWkB8@bG@-)u- zy(h#Wz}fiN>H1g%zS%LILCeW6d2v!Oo2ykr@s%S9TW}=0WQp1 zcnJP^Qw>@t{5L$!)#_FUhgUfNO zRXct86fgfG{k>?8f-Iqo#G_aRWi}r{f`2_2=8m))_+; z9*=CXY`%s4(ZplZjb|F0{Oaui93a z8#8j@WXJGXih#lQF;PHa5*IUa^{P}s@lgrTtgP5NklpGb7^f!sx31qpSG(g4#jL)* zcy-NsWq4umv^e0Mk5x_xdMPM(6G*k;(pR?g=LD&IB?Ik1{CpO;D!|KFd52e`hM`F@ zK2^ptp$O&HfmMB+toO5t*@Iue^H8Ml4XLh#vqZW|w(BpVn4ve3|JYQE%$0KXtB#o2 zW6RIvHj~VzOte~ zv7`OJ|Bi#_o2^zdLwxYm;Ihu=B2PXho^0LASM2)6IL<}RB-u;e0Tx%y-9(nO=E9lp zBPm4&1_r}57!pyZfogJc4EHWv8wbgcv-#>&W<0$&K}XO^l-MIzQG-YPBvxMwkG+4E2uT@8 zb-WPL^2|Z9JmdUMJMTQOr+tVFePEcxCBLI0_3OoNlPg)AjWPyvqU&)wL>@py$zS~4 zns>O}Ljh{4y05gA*M)7;zJ=ajX#Uz$_`=ZxZ-i=D$|`Cc;063txkFv! zRY3h(JduF5(XR8KcUZHx_gAWHb!M~(d!C|_oA7y*@8Ez>zQ)We%zDkaOMKMxSduN1lZBt@vXr z>V5h{S8a8kGQhpJn$Qux68xm)qS0~R2mAUS+=;>JS@pHpwwAkd7P}WrPXVdyDVq_T zB|_Ww3lN9f6ItDDh?U;1K1k=@_Y+;hR-LyRNBSEB#|w9rTvyH6SMhRFOcowi$(XRI zhB0R~BLmE{k5;4BZq4Ge^M%2}I_O?i%T*iJAabot8|dAp2?LggB*2niyE{uSIL6$& zmqyi`oLYEIDzK{hqLI6gaTW72UyUk5T+G0)UeQ^&fn_M-^;hW_5$Qs9g1weMy+7gX z@~9g|g?38Id%4p-Anfhyjk;|+D`ig=rlH?#Y?^&ZzMr9*#T6bw7&=?}(+kz80W1*T zY^y-6|Cr&$uG3p$w^|EX4~O$PSW7vz@@nd4%$x0hwp_t2rx z|0hC8Do&$5Nn3~kr6ucs6vuLr29DPFY>U^)?aW}mP2NreXBso&Y((D6&7gb;M=1Iv zQ||hs7xv0ITi?&F+^JYh zGvUC&NmU=dx##1s*Htk!Gt%qpI2gOwqU6*c-OWRgjwM{~Z40@nsl%Fx?I?dQH+%kF zun#(Q`roeA9PRau0hI6QES0g!j`t)=c`mMM zHG!9#95`9H#!hl>^n>a4YGGZz#8A{y*Zr4DlHT@p4CtxvXYk>xmB8@(Y3j5|)VKb_ zygI091mqO{AOkbnfZsi3>-^XI`zJXqFQ85o=L$dzDt#)jG95~}Xt64G5aMa3pDwyI4HJA4Wf_SFSQcmeFLBMlNNj zXSDEb9YbteNgnS1zV{Ly9q5skIy@H!SEQ7yehlV?R9qit#Job3S(R4QY-npt6HWn1t)gjCP(y2do zETRGnjw;->kn7Q#U4E;IH#7JRvT>z)y7HAx_lGuu?bpaW+|Ayc1)Fu^gZfq9bat7M z&CE7q*zgIsY0jlz#)bW69G63F2nMO7*Aj6CpyTDJV zwE&f6X3yo3MZb7Br5cIUYUiV=z0zk zxqA_bqwGWUE%Zir7vk(4FBj$Q@Wt?TQXSopZeHC2}Brb;NkNDQLEXsK|vEA6L&woX+lF!0gkt7(sTE28%f7Bh` zs?)|Vkx{9*+Eang!kI|7hOOM@Q5)?25P5#QHyd|7A$Olz)%8w(INH{Cf~BpfRotw( z0^7^$Gohj_D@ZXEH6^|&=jlelx%tB8B7F0DDy{qG@E30I@198aX};_$ECaU2fqV<~lBSUff34cK>t``%}|kYSzAWkpTe2BB$@0 zYmz)o9RqNhz9p#CeI~n=Nt)Cl&Rd!A2(0TcM$6Ckruer4`#V)Geq6MR*!*U_uX4r8 z$jp52ij7mg7~`#EH%w2;iGuW4kaPQdTlVmT`K~x%A9SzxQ4*9?%7J{@Xkf8Z=?lz6 zmL4Q+08^PQU|-jJ<1jA?%9$!)W!?ma8WZ(*ot;gf34#A*}T9Ye{@a0xkB`P zPrWA{i7%#x=q4vPX`RjXT!F6T#a`no2c|=+-HZOro=)UE;wJlbwkgw3 z0=Q4<;n$XeGbwgcFvbm8D%MGpnF2;)AMV@2VDY~RSyL-BGbdr)4gpH+%aYWCF_&DF z2i$aN^Rfo}OJQZ~g0=xx>4?1gpl>{XZ(W@q*q zGq!g5H|geL?fPPB;jGnZKiuJgNz97kiO1zk-_dr(dmgF@KPf5lKNzI-f(e%RQ3HK_ z)`WM1&$y)j{aZvKR#w!(Z%nNfMQJxnQQY)n44)$Vpv8aMGz|&Q?(2{gsrYsk5kl{k zv*oy^NbrI)O(7(OR1gs3fA52C|E#26z0D4U8dFi(xD70(BF@*Yy9Y~0&(8_C21`iM zNtZ(>R`o!8UV*pVg?csIZ*yR|39GEU5R?IO#{@jx{4jG0Ew&)3LK`W$dLJKh z9bq?YlM5_`bWx;Xcap<3gYqL+VPSdi@oJN`5v8$8(U)a+&M@{0xJfnXe+L>@KlV2S zNrig-p+qEfge`b9uM^v|-W1Y>7+1{!p=lDo`thcD9liV06|}o`eC}WW3s-$TEIs(| zPxo5%$w)=_SdfQ@XTxbr*iTcckr$qL+e$^qz`%?DTDmln6(kuvwMmM^e}8EyFr^s( z^dy9dF`gEor|Tz{Y2WlQQ^IDSdd)mRD_P47Q^Q4Su1Hxm&Szul8J}(D`jvr!n$ac; z{FDY(@r45nR`~Pnk2OHzGmq$aA^&4l`FK-uVi8n!Y&L`FsnkZzHRv5;*5Y)Jqbyo#Gb3PG}VB{|NfoFn%o@CGi7nQy4m3zqQT&=K*Bfrc{w}W1Xky(a@xF~=4w@anyhdKT$i7N z!s4@@_@e?_0|&b(PlLM!WQr*HQ9I(IGf%fzRYm8U!X5e35JT}Ne*(D_I2DVAF%Vg4;rKja496Nz49^)1BLD!1|A)viIPQdV7EH`-2wR<=0X z>rv8ZjQ}k=lgOo&=-smXY?kbm!}kd$&*_53y$$wAzwbq5lLkwUZ+M;vU_?hU`Q}h2 zv20;(s;_U)a;3U>m??Y~&)Ul0%qR&DQZfDD5J@Kt%Kl7+Y9&S0@RNKEr8Gjl?=`2r z%KPy5n1<%>#{8KtLR!gs)~Uazr@uj5lTK&a+ZYO%%cE&2gbCHHOLPcm^BYH$@dH}0 z_!*+1{#ke*jUeS{+(qBoKn$TcyV8YBYsWsN1`H1(iX1a2xH3V8d;b8bI!!Q-{#t<% z_Sm;tK~+=~(N7({3Aqu8`+KGKcJOAqH5Ucw=~VW$uJk{+V|8%4DXlSZ^Dj0sqST-M z5{ax7nLjXr>{V&5aQa?sD!e>GnP^6w&)}?0R+*jBy8IIdzdc_i+ELO#BSALisb9j- zQFra9&jWYK#coxZtG?wRxo~&Z5|kfNdF!5$C1=4H#IhgFMkJqMZQ*sFeh-j;anB_J zHI)1O2hOiVh*-T9dg)ealVJw?XWHsB@A+T#YC{9ItXXv|8;U zPK%5pj6rq7pvTrJXPWgS;FnAAWCGidy_wXe7o08BB zbabBM;H;EM)^<%1Q_jhf@Q^c}WJ}hyy%0=%ey6PQe2Rj-eRxm2!wl zH7R%8MYfDnh*U?bCRwi`70uH7-+syC(PdX%s~sw>^vtIWDN}*$QxgIWh#LiU^SpTS zCnWD~r3fo*LW?;#D0zC|4(Kl;)zkWvBZ_y zGD30nzVCL0!gLT5z{A~jkGQ6=I&d(W`mcArpYfq8flK6M25oH7#Ud}3rei2qN!N>2_d%Y-?wg&7V`5xu)AHB zEdVwZ`YgxFKU7In ziQ@x3aK$4g#LljIR#I3>t^=3W*2n`O^D9@Y2VtG^=p_k(`>Q-J^)5u>jy3-c&&FP# zZtq3SN+ta2iR=01XOTJdUcrcA^YOu2|Je}H^h@l^vks^p{1$t7#nXRvk#!~&P+k*L z-BnR>b3AY2v^tC-WX;q=Wa!TVg3DJwL|l5too@Q!gf>g62Uh*V*-cs=;bU)3|01Q= z)MAcS_pUTJ_}_S%RIROi$jz9r0sLBO;h}hL86jN|bNKYTnsEV}rJk%iW59r^{-;3y zo5W5E%X~^QOQ8^REB={mA+81mA}(fZrUS?)y6ut1Ov7L;rQ{d0fuj zUOUePOwZF&$7sqCJWv#$I0zkr4BMZ{-bolNccwQ{BQcX!&OcxAY0DWA9Vaj^b%$q53f2BCRkQPccAx+nZ6G8RC=7UU{AcH!z@GUBv(OrQHo;EXG7fAG zrS!^a?0;2C4hG$)PUJ9f1IFv(*%;i>Zm#7AB}elMty*Mz1i!nxORM(H8rdefA>5H3 z^(rF#cT3)tQnoOS_Fd}uJ9mH3HsgOV#`uyryjsNl8K!!6TMvRVYa``-v43(4P{@6w zSy23t)s!Lgh8Zn+>2go%r>VLKHB9%KRv!Sxa3z7gCihL^-=q?!Sla06FfuK*r~z(^ z@%S&IdSyDrI&}>n7{QhdZ@xdwjA^z>pYxhfVIE=n47&`)8;q$ zbbKO6a&uf;oHsjYN0j6tDYNt*<;C-S{=$8+5CjO^sM2Fi|aBdmjUHmSB`|JvVPYn|MWM0ag_0S97`=z2Xd~;Zx zkFhpHi3aKl64G`Z&y>gy`2pU_R8C0;E4WLMJd79R$8s^sV#P)IY(3=+sQAQ}T-38T zCJW|5{>W-s&A|kvWO2Q8u9K^fpE-njOT5^q9Cvu<)HqT~f@yfkQGM{M#O3_3x2ljp zIQ-UY;LFFp&sd{(1t^*0L(v@fMJc_gW=%>(;g-C_CR+>D%3Lw{2?abtVU*6*33KfX z>qcHwff8v69gF^np_dU&eJ3TYtIvrUX%naZi=B{RyZ8V{6W`d7vnHjD zjuUv_s&M7YwBC-|6{C=XCRm=~xKD{79N{uikH?INg{NBJGuduNwP*T9%y?3#;_xX9BHouDa1^*R5 z#NZJv7=JWl&bLci9S6hE6bG(`h~S&UHpk`w{}!D0`@uoxIYf(V=KMR#EcygWWB5>1 ziHq>-(L)vEhg7Vrwf=Vj^2V=2g)K=eQ-6JhC_dV%mGZH7)hF zBq<9L)cAj9vCqO3rwZau=?7y}a%z{hfvh$U`@@sLZUTZ?qtNc%-@SMxWa!>qQ~f9V zbRrer#uukAF19_Z2bXvhQG}hoS!t@RI{$ySXJRyoczmKEtF+@q=h%Fkl2V;X{nz8& z>>%uIW;ZiIjXnCqeZLjWw??K(T`P}*^gR0v-%|;gnVD!T#qk*7{>%~W2{kjG{w`RBX>1nx_w{&1QS>vmfz6I~9B4|9AoFs*j zC0~v7^XI1w_o(F~!oxW*1)OLneaJKzI`I|KOj%ZzpRHK-T6g>tccJ_(8=mMnjjGWh3g2cZ z6a0z_6yRf#-*i_KHoDn3_+jz4(#p}k-!`oCR7E4X{(aQ9@VqT5gmhQy%{0qE` z*bUU|j}8xr@$0`0Uhn_B*yC?QvH2ay&cFu=FuI2*eT^Ij&a2E|QgLpQ@M80&=8}JxQ}odZadFf4t%;ZnpASW9UcKU>hi`Nb-qi7ku-b-Kz_zdVAKokY z*xSA-V=29QX%csW4_F;JxcW;^^!^QAuAfI;&TnYPWFzLCwR(uRQ9j} zAcscq%gQ$9D6k~t6hh;c6FSy@1|b%rA9noeMTQO3eOc{cufTbio<&DR3^0$ZMIZgT zXBe;(!yjcnGp&_e)1;l4kgaS*!%YU(O$>SY43J}hxVvcI1oy7`@yF=~?{vMvb3J#p zQk78Ve$!?m7ITGB9$g~3p$Gj`t*YSLizql&H96p-HcsU?V<5EdESJI6K9~&`oeLxH z#$#1A4VjAq^M!U`9rciKb>rIyK~Q7Z-<{wEjK9O>9s=RuJli4jM1OU0FEWLCtv~wg zZ1As9gDQ!{p@FOD+n-IAr{!u#@7R@RF~ zD?8t$bS8c=E;2FATVy1n=I9XK@Psy!=&2PE77MLi{RW}Gw6R6H10M^k8@lpr z263SdJ-TnbiI{*pqe(-uw-0s5J+oz#CzH#Axi3d*L!~4*>0SR6_4T({-GAj0tx~1! z-m7XjoRA*AY=rhEMiO@F9V87r_%?4P^{($<4qfQSGZ_7NOKA?C}M_+e_p`fmZ(nSBQtT1D~J;c8ZUXK8wsDEzy z4Rg(t9ncz0*>~>^x3ep(`#>0qBwoH9hCT}1KVW8p z+kX?&96M>)8}s71TL8z?9AM{c)%q#rTM#*KamGzZtt;X-8mDfFfnY#c?p`uu(Y|STv z%7cSSntX!O%^#68VnTaf1|82}@ova{!YTTCTVZzcX>*qBolOPlEP_UlmL5VseO^Hz zKNOo1@75HOt|TNY%(1!Kd%6lTpkoNm9qz#+LjACk>V41$TS_(uvX=gR4g!;EIh!)r z+7=k@^WepHYg+8XHdzlgAz$6XJO(nz5(A8nzmuZ8*MEs^0`cj(jVB~BO6`Lx&}oHd z0wP7`DdwvoNpLPbcsY;k8~#5)WD}O)YE0-)|HJ@KYv)mPhub_r%y@H}%yVY4Yh$qY z(~K=xa{X2!T@%R3D91@TwUb`Xai&vb0Q6XCq#K^0lars)2mof~9r(p3TvEU0*_!J_ zR&_f+@mHt$Giy83fk$dZcoSYq>yJFxp#|v-nFX@+AJGlm*ZKP?R@z150=v~HuJks|UNqH}Ohl zSfmOavqYc^62>uJ-}}LMd(3m;=a>0T+{QF1x>rFvt!wJv>E-QO(JwPFoH8EeY~Vte z?s2=OkeTcKHf|W}$F!@X7iuqXc!ZIJhNW9Jp9Ltuuzg1k_l%O!kmOwsp?CG$CPe~K{a$d$3m+Bmdg_rJ->5ap5nQ2}>v;yVmxM<)Y zQ0mkppO-Zb{>lWI9(XlRfO2$G}JT?WgC39xW65w?u?yatLi_nDB7G!{}v zTpgc2q1h$V3Vg$?;^OsvP*_`K8Z{99%d}mfln#gbu1=*Lf#lmM@2c+1Qxt zLtQjabnFpEp1W-ymY#(zubi-w$Cd9`w>owEEhx-%Ovk?oEdEy=;$xFYvqKvv#yKox zn51i?Se<)D`J;)`IL4TkdU*w%_k_x3PsW{m3OfDzjkcg>+(oo4F*^|32~A_jg;!Aq zNVVk6y$==eM;on9u`ySF7B&A}_{S;7 zUX4rV=vMg+82y5LS5pB zTo}i14*Fj%nR)2W6%51AShHZl)t_}2d%F0Ypa&B+a)76dcX8Gl3R+5y*1=jswy*W6 z-49p4Cg>Sxe~Vmpu-~sD?r@v!Txkoe{5t!-HDi%24w`WXBmUisj?bDpFR!jQHu%17 zyx4V}tjjAdwNP>k&M&^7_qN{KLcflh7s6`#Q2x~}qEV|@3NUf}&%+Mt2>JWt*M6Gj z-@pMMn;w-FhDJm=#^-^_(MIe|x8`}2uMZnV3J@hz!uD3?KI%=jcxYQiXrFEPN@NOF z9G7+#n+p<8Ji*?E%owQN8?%^gT?Gh@vamh9_zQrc@E%YiQK&6>yV5k{3zr=EexTGg z<5AFO-!IUP;--g5ZVY~Np+8(Rvq&=$=cik{3QpaI2qb_%9YmcFBGqWP`M6$5EMtfMwHZ_gbl4SG#k?z-kCltohL_@DWzryULv# z^JGwtEnC-D9EbNVx7b-9e;_{_vWG9+-Vf~W@1=dT;Fp!G@4+$F^lJnqGS(M96u5_k zSQ97E&cA;qAaPu4C%PT{oo;h{Z) zC4w2*0$UEtmGxrY<)hDO(jeW?ouF=in+gTvn=7JQIxX(cM%3DQ4Nt7HwAx-&#h(-9 zR^X4pbL+T&Tjq9oC-p7{ap&EG+U$mGl|v~zdVyqD$18i)H~51Y@Aqpw79T{vmOGcG z3H61}&z{kya`vAn_wCFqcN5)Pxx<1~tAp>)_h$$=WJLvnU`ufBUvj(ycb*1u4$HnC zT7SkWDfOa#bxWYyJb=-{C9O-Yo;3J>04G7%zSdOa5`hD&pw8MzIa)BzprtI#s=-+n zS*B^V+9ENtpdbK!^wIa;d-39MIvtOPaauYGPoF$1xry^Ky?c9?a<&FT6-xNvx?7KN zX|0Q#fxz3~Lx?HP%UHW=US{XjAZnhw+6zx1Z2s^MKMtrLe)!^QySlrZH|v#iE-{nf z=3-X`R|Wf5fA#ZmOv9ve%!yTivG6h#kVqlVd8s>pb-lWIa<$%cWD!*NcMo5G@y$HO zX^M)-oHg@2#LVW^+x`8X>e{C{lB3Kz&C&VVSVsVP$$d(e%($F1F8j|ueR_F0KI~sy zy!iP2?_Il=nK4e&aL9F4^QH=o3mrxe45VrifRqsc(ON_S%xVajRRIi)Y(1SN@* zwMyt*5Y?uu$9_PhoLLa856ql6=ERQBIcJ?mB5MqJNQtZqMw1M^BjgNnmM}tP{me50 z#u#&sZEf0a{qFAWaGZ84x@ukRsqYtq1OUc{c}_{h``{cYW+Ymc7?&IxI1Fo=-8R>zhjgolkwYX}Zdj$h*6{%iYF1 z=hWUGUJgtA-bc^g-M=%c%GnZCzPcYr=jwTi2mp|~wwvc(S(>_jIP{!STZJxotKyvz z5mg{EF=glLB@S>_^B@rdAr}S-l8AsQl?Jc~66vGdlSNAWFcC?CnjoEdQ=oJf33Itv zBw<2J#)yD*UQ~u*|fea*hFP({Su0k!t1b9(WjX)FHn z(@*~0|M-W^x_NWb(j8* zb-U!4mKn5Ktvf{f;_3DN_OPw0_ji{&axG_6ptebg7nNurdi2I%HuEwnsIU;BBMYF2 zYK*Czt&DYODj;$sNDQK=;BYdQAp)TSkS(1)S(cea_xYhnyU<#)7~?!IOG;@Nr_Uo7;D{%%uxzON>?5T5p`U$K!t8t+S-8%WktB$NQl_ zG~4dIXU`GlRU57@E{vqE?fO%nIbU8~%*(tilf%Y?isGv`5C84I|6R5-rEn1B^3s~zw z6;aRSgGd67%L1&Y*q>rf%l!H{M^7KX6;AP^ZJ3rZs3%ruD;Fx7j1dD&h$zU`q5x!8 zMYTpnfGntjL9hl4DFK+XdLI=Y@t&oQ6Ts%nKtq1(LD{iUu3JvEGDvj0F%;RgO^V5XNb+)^58s#<=ZT1v&J`X`a@r zcC)VEzJ2AKsl9vg?0P(%`qOE8GE6V{9doPN(~Aw_4_@Z905}H65o3oN=zI zLPKg-tCgVa$J23~yH#fmdS^*RNmy=F87t z9}X`MhXXG?&+O#14Ez2tEt9bp2t*4YrDVZNVXY96wHD9Au=D%52_$03lmS^>=KH(5 zX`TSodrwFJCJWx3j#(j#R4$O9RqT&@?|t2MhiRB%wsk#c-adKy`B!h=ym?(Y{pf=i z@4xqS-O-Dy)z<0zH``}d+qP~0`(OXKlG=0>4!0kWY*pKG7Iecsj1h`Z(ca#`iJAydW%c+##VI$)@`0% zv=^OiYYASekbsEXx?MT6&N*A0R%8qrYY7!VAb<-AS72A@gV@HmRowtl%#t#fEkX*8JRWmOpx}dVyVm=##D!Vw z&=|79no|muXHDKZ=jhd&uPo5>tDDN3>&vw#84kA|@^0M(N7mAAy9I<{7?h#1!5QyE z^|PP-e3&B_g9!@&SntSMV{E~nNzR@uWcJ2YK2%k0ts{c_``g3eba%f$9*5(pPbr`K zC`gB4=wlk0hZM&p2|xj1g~|hJ7LGZy0;3V@U9+09nr;(o`}be{?mz$Huit9_li&aJ zH(&hTg$flB$T-ZJkiZy+SOUVx7_2)a~qgmgCambwlU5s^?xSrrMhh$;b|lLCug z5YYe-LTMFRQ9mzuI8FBtd&GyfTXSBDi?kp6w8Uu``@;k0NEUNK=PCmVMgWnVlN>W= zOP)nQWEsZGbvMVkA_O8Id@#*$yicA}j%Zjh=LFUg0JM!u5xp}hJrZUV)hHl1>$}w| z#>iQmccPG3g);&Z1y*GhAnM1dat;B3hy_*6l9M3?(57xOBt$?9Rq#tp^OVS^`~AUM zyX}0}RMX*JSb}%tY|e4XJkF842Lv#17?#cDdf4BGx?YyWI&#j<H%O?fc@ZB_T<(K~mVj;g@Ib4osV>rFrRwfA)u5=%cVNZ551#N`}ApQQ*ajI;eb zXT`z~5LF~Bk-DG)O4giRg-BT9#+X^8)Fvb>E;vJ6w)->4TF;8Nb6-f*6rQapORnlF zrp2qZuDn`~QOOvIsW8i}voWVb-!DsMPBZ6u9*@J&$2mzhhT^i6#jGO67?1*hBvoZ* z0w|(JJl9b8Ey@rPt+NQ2IZMt+mQb8EdC4MRO&FKNLc(yp+YQ5zP^UN@P9s{6&V2gS zS6AEJ)AwI|`NgZeEYCl>>2GiQyZg;{x3^T=>~N|a55x4?SN9JG`ObHKIOE}qZ(a|_ zwEE=h5Zrj&du+zjoFj#%z2A>kmRIlI+UDA}W>(t|Gu2^^u>yQ||4@ax+g^V2#mj&H zKmMP-`qeLvvHu*te7@Z>u(8-~SIaWhu1+Sa>cgqm3>VwW{^7o^GUg$dGQX%ilu{N) zHZmJjLk7ffUXuBsX>W)%VClu#8|f_1fbyRO@|tL=K% zHl0P2W1OccCnj>sG?(b#$~$AM$Yd-fO<5o_8zlt=!f}}Dx{6DhrYQtZg>RQ-NokxW zw5DyFY3z^3-rv18JY0Y8JAxvL1i^(`2)r{KS-Ic{%l2J5AwYBnl^a`*1Ve@42zF5#8QnF{j>l9T2)f;F}XRKVNU&Yq{BM_9)?~?nIz_AoTmN#ewl}L+iJ{9KY2o9 zDfl`w3nK|R>z%8X6cwQgHLEaZ5RggRu3K^jYm+1(um+bHMWk!%hkY-~BFT8v8Oa$C zOo>IsSW{w&hH(Nt=TeCR8Ot#d|KiKn-g{tv@#LBS6``ryDuh0!PrmqO z$+{vRmwB^Z1GBf55c_!|@+nO!;Eb~dIW6ct5T5!eCf&3lfTpyl31x{{YqF@f;n1I0 z5fG6GO7t_FgMksD6#F7<)mTD65Y8FVT3ZfcBL?viaeGc1Es-#20Oh<~MYE8x0vve; z5(kh|axRFdC_e(Fl{uG!DTHd7Q{SH+4to&>Bv%DOP4n!G%gixGV@>8vh5TSFA&PL( z*qyHcfC4I-6%-MRxrJ&WCC` z%-?3bJ3!8ZP#2~Ii>A-`{wJH!Pu!ke&_P$_Wo`*#K9fX}pJYDXt;=Ek#)+sHdpqztg;&^;``|jyQ z(}wDD*KIac&Z8j=l)`yq=u5n#ztcuCP)=S(1A=WOt9 z)is+{*ELOD1p{#V>h+tKZ&GGa@xjl+&ij;8lDy251#LW49)%InjZ=-e2WoFZCM zLXnK9#^N{+-Kw?L5Q4Lzs;hQ;VbxP@#(7@LI)s(?-IBx*AX!pTWhCIU7@Fv@KTk-8 z(3!&MKq6C^x+Nl@4A7wD#EX3Ucv(G@S0U#zqoWM!kn~&0rlNwPBC{Zxvon+M+YtZ( zkRc) zQ1ozEBrlc>0;J4HSw&q4WJ;nv3ugtj&dh0X1~X?*9*{UCRH&Ppb8-fpqclZUBBU%S z$pUzud06r}FU%^E%Pi5E8WHDZk}Qg7$p`_9pfgE%PP4bh8@t5ZcHXEr#3Ivrvl%(F zK#bX0-EKMs-H!{p_RtUO+HP7Ch?hAUYmIRQmuD@-D1aD3u*MF<>|97m0VpP2k^tJw z*`kEt1YnvY8dMNY$yMGvSdhZBep2lL?xxT(Mq%n@a`t@)A z5C8g4lkjm`-reu(rnQ8|pk#@8CNe}cFZ1Pgb9?Bk;4ABZIk@1A1u*yb2UNJ)Zj4GU zDRRbIk<#mA#<8R=pyrec@#*X)DW&vTUQh;0MOKNWpC-VFC{iZ9j~oBD1lkfv|44-{ zc^E*r#F&d)5j?eW*<&8*Dw1%N1kgdqtMBtvRYQm7$y)KVxyp_cxHegzxqFoNnSs8EDT z3;~otfiP7-nXL4A&GBcuEk42-9Im04e2CG8nJ{ z167J!GASi_Mk>)OuEGWtmeeV;aiba8891Kjx$8Y6F<~*yB@vRDPE%yZlIGBQRdK%J zz;vFbQi_@alJ_AnRsn(cuFNHP&&+Kb9NC!1#&?=yDYCgdD^jc!25dufwpni0i_O`h z^WMbzwx6a#E&!8no16D{-+uGmH0EJU&i5b~Q%;ovBnAv>Dq8yBiJ)u!`Pq88=otXj z?mpbTe|K{@4pWkXsE8?-T#9FG7&ohS*?Wf)ng*bIcemdjC8pwA=e=XX;GC4)_w6*t z<)RNhIA&FAf(OFmalXFnixgF3-w|@ttie2C1`{Ty3RYV&`dAsA1V1$pl9(A!rb(zK ziRurt-*6i7qe)BA{pN#ailRUQpjOP7tC6gdKf$s(G6E%c8oxSngv4~>B~-@&5*k#B zf)j;O$=@Jcbwd>k1&qL^tc6o_DT=7ZVVB|zfaI}^SxU;1OG+ldgw9b)nL~)FR6tWI zGdqt!B4PxN9e}DBc=nAGHBr)n4nj#eW$oIA(VWA1N-hLV#d{|?HQtvZ27pW|28JaI zGB>`N=2?p&Afp8zl7fg3yTfnNAaGXlUq|0V}z} zL{EGICi)0NJb^eIGZ4m@b1Fm_V-!>&6UP}%$xTygeS3e{FZ#2E?}|<_)1yb%zy8(N zJ-742aCzna;D=v5THvdfU;nMY`8Qi|Z(rN`^3l(J@$xT!`Fb>L&)VPr!B<(*?S6hZ zs3g047tC!k=Tjb2Rs=T>@!7@o zi}m$c2{%9a)$^<8yKnAjUp(?n{`G(IAO44L-@dxN{cyfGi*xz#=B;0KImSscIXaF* zHqm-_6Pj5OA)=~IH^yRSXi;_Dw|%<+w2U`{cWSaSBktJrSdFPjB3p`%iA z%Eh}AF6cPy`=&$UQUnaAY4X8$u5--eIIq@xc6K(+Q`@wSYf>D|B&H}T#E^2HQ(X7( z4h@{pRlOF6IQpC1=+(Q8F7Wy?b})#keC7VI);jT@+BMneNO zM6UEFa>R^$n#cfFKBSsDcB19u=#Buyv|4nDfLzr$Xq5)3Y1Mt!;C4Lu%WA?8GX6#s zaheCA{id%!(ML{LVRFPG08-RM6;50cppPkXD2g%R95H7?bc9(oi-={2c@jxJc*j^w z)ikC81~C^yOgYtB@8nhnL_`q*(6(t!i>M;={ryAW0A?a+O5-p<=$&U*Fo}`F2AX-E zh|x38NgOi}sVKK3VlmCCg&3J!y*wG2m}{_g2u?xg6kDP>zd%574O_M#}JyPVY3pPXEtR@C3)wvrp_?xO@B&K8U1qB}g?w_WQUrvkgfahx&$J-#?wwjm(BeEs@& ze(U>8JZJm$t9RUX24YZ#VGKU>Z4+ZG#kzhqPHBp^?odl4u(}R;I2?d*vFPe336aDQ z5u!w5kBo<5Py-}3DNKxr%%?C1(37B_@KmT%t^J8i#Z(Z8_ip_o7Id$Bwj3+MWiO97ZLASRGGQ)O^$hGGsw9#t|^j1;q>SuAYrkBga!!2b1dN7ySs-+*Vhl*`$cfq=jYqw z!*Rcx#_{>Hr#jBv;tcWd-~Gv-{HK5Kj~ac_D180(uYBVVG5+%9ySMl8e%5h5gm2$? zhVwq`Z+7f$*lwSGcDX<7_Jd|6Cja)s?VP1+mSbztyWh5|Q zM2D3=6&Mjci)j{9wV<%cW}5Z#<@ zaxNT#4{g)-U;pygFJHbo4pUZ}Qbuy#1!DBg zskEJU-b0G2#-R~I1uWTeRux4=#IRmHS+5^Ujv6PhjL3!tq%}>e9=y(-#Qj8sl>m=5 z2%1%efXxIdM;uT;elQ5hSry!!5RfElrp9Duz+Cgb>%C!!h=Lh`8fSq*N-mYPoO9hoAUUUo1q_f0QA-KInJNRS zW<>PfSDq9Sl4CJJL{Tld0F=hHNM4Wu%X)Pd<1Jwcq@WgD%N(|G5)~w8mYJL<0|W(# zrD}o{SfylQMqtfqReOQ}0N#63HL{$G_YJop=h*tdWE?^fS**fxI5OHeB|sF_Tx7j! zh+vwBm~+>y2qEVTNXX<|6SpIEu5E%Ds=*NFA=z=BT^qb}L_kEZUcO7&y1pYOQ@yy{ z^bF2((o#y^EV_$zZz2ZNFPGCaW~KM{_f6ZEX?SscwG5hLI^UcA{D8|a#K_(Dka8r@~Jbhqf(>}n^Xh zjSo^Scp^2s6bGc)CFsB@x>q0=tW|~W$j?>f2r^j*MhaMJ7O;VtE zpIx8JA;n>E$(}zyFM0ptzxuJn^tb=PAN>#i@Bi`a#b^KO-@LNEZI=E1m=5<3pMUXm z@fCZgAKq*`iI~gVm+zjOFP>dL+3xo9FmL*ED|blke*eB(ZBow8VAXb?J$`=O_hHHx z*H=s5jK}%x`ugR&*V^ct!#b zL`LVB2(S;0YeE4Sa}hNV(`jlvQ^Cc>#r|&0vmlWPq?8u@f)P_GF(n^7AU2Ih!fBcu zdqG206H6(FU?BwdY-S>*lnmAl$NfBKlcHKSo0W5X91g*G6R2eGn9A$dZ&uGAJNDNX zPh22o3az6cS)uK_oXgAaULJP)ahwbk&{WNNZp>$~q!nIo0P4th!CkCYR~P4-&6)R& z0nSroz^7N2MRaKS>gwunIEdI{)s#3n(0QJY$6=acoX6|S%V%e2Up8m2Z@2I7x8r=G znB^iV+aC}YeNBy1Rj(TylE(j(`0eV6%;O#b1r}!b7W8KJRq)? z8!#S$uP$d$fPW~9-|wgFmVy} z+!Qck&Xr(~&@@35ik2)!Ohv@iMF)_Hi4-$X5fW=UUuOfwtZf@K6^$-91b3Xq;N9^! z765ER$LThSB5EKiIS=Dl3Wlb2 zt`SO?=NrEMpJNHG9aVdoHxkpWSqfLW~=OqCH&LlrZC8Z20MQnd`PE)u{~A{(A)*QZs1 zf{`OesH_E7TLvGzl|*87R*|)Um?0ZNU9_m03W7n~HfE4htXX|##*S2#n1lD89TA== zMyj>nC8n5ilT#CdbFLc$+3)&K6&zH_u=dBULUr50C#zLek}RP#dWsKg_DI=+%gL-v4e*R`RUQ;aHY*Ln6J5Q5Jl4zLU1ykFj@!#qU=LC_Rq$%TXW&R0c)l+tzN zxnd`Z?>YnN79A+6X;CpSHg=9ZvleN+&!&!O8V55&g5z;mEmp%}=of(qn-HdXn&)9o z9=2%ndeN9_ z5mYUvRI(w0sv==!&P*vHWy5lEqV{P7IQXmWI~l zfDOrj4JuYL5IO`;wf6^710;cx#Hw(Iz?jgPuq7w6&oh{!fg+l!s4AfO%C`cPWSgrE z5@eHk9wbjiA`yu~9wH(W5eSql1w@b#t2IcrR_ zNGYO4OiYJaeHTbWN+UB6nW|M!I0H7!h8T+|m}n-{IZa|V&9fB68l@_llLCzri~qA;eMOrKppX+kIy_gyzk<1mbie$f)@oYsqV zDk-aEEs~S-&H(p^K^0F^emgaeG3TypBo_e1N^n&rW@Z)usYxCc93(>MYK+gRj3Gvq z-)w*lK&86O0aUG$N{!8c9e>;_Yc)Fp01=C5DdHTLENbTc313exhzKC&l$lwxvUBwn zoM(}O4OO@MG|i4NrG!|;;!>$+YC`P6Kt)R_r}$kvff933Fi?U0Fr2xCEBW&Ii+69| zG;On3cKe5&O8oNEi@)*1uL8>LW_A1F{deEJY<(E_@{j%(f5!*+fBA3!M4QWR{`60g zdYNZ(ECk#xa*3kw_IBbQ|NME+|L7n5-nYN}@jM=ep>@99P2+gCHEvX=lCzqv`i^33 zfn+!4;t&|R&@3BNCde4z)rapMQha%P6R3Ii7n|j?N9Qk|UtK@GT%WC-_lRh!hvN}IFE(pJTeeM^r~TdT^|vo~`(sL}y;v`n zy{KJ1ek5uS$DPzjGo9dc>snQvw~b#RA_2eP#G~{4P`bnsV6R()B(5D zT+*r;B{ehEs>Gw36V|$62#Mslp>b z0ARC5a!>{03`JB$O}(cgQcn)Yyjq@P4O0ev{Or2S(>I<);3-wmy*fx zDdGYwmrQ_6Y@pfL13U7&!=#dyt3I$<;L14EZ$KcS!L?GwvDCAs-pzFGrvY|Og z!Fef#92s%UX`07H(-4Pom=L(>oPq+z%kz~-eE;^%dbL2HoO9#-bZ-#X&fd*$3Ujy=kp<5ceU zM+Iw}u-`u{nodPOe7GyBh$YTxbN$JfCT3@pi0ojQVJl5)I@L=5C9+| zJh82{CQUIBm>C!|=TbJ%MUM!6$GCKKt}4xp6c>IF~>Dp4u9h!bKqvnpNx zO-!kZ5TX#kp%L7M;hKmF&P><|Cw_r8Ah^8E7g`D*=F zAO7ku|KczHr~kA69Q*L=m;1N(3AkK6enfOX54$F`%f%`VaX9APgd10G?rsU;n_s`& zTwXqU^5|%AvrXAt-!F4<&QU4(dbJMd&X=ooyCCDx^uuxgaQkl6b(_`M)#k}xfBW4R zpM3e~?BeR|f)PX0kGm1nCr!pI8dFK*Jd0{6xpf|l4A6P+iL=TqDU*2yEy4($Z^TSg zQ_kw7>AH?XTN(uoO{j{oq9x|+i2J^S+3MXNd|2X2K}#v1FwGOQ%Tiq97>Jn8&NrG> zMP2U!A;pMDG35|CPq107^L%{&_U-3?^aIy84u0u^Z(3k(m&>+a0J#qzZtm~5;~2*& zk5fe7rX1%fv2#RZl4G3L%MF;Umfh9mrfYrM_z=7qxX|R|6z6%f+FYGq91i<8Z*PD7 zmWLed005A~NklA$+i{v+eEK}4>1?$!v?O?Qe>@ChPBS|rw!ZO~n|8Ab_lFr2 zAC6Npn^VfF>^L}J=bJXD%5fNS);Z>)n#*ihjBL5txDd2pl|CXOJ76Y4WMo7~Y8`jP4RK%&&Nt4r>{`cx$P;@c5`$E5Oc=8fa-GkDsHq}= zh@@N;K-EO0Y1^i4o$H!radvrS&M6}|4js&C$})>3b|@kxi41sI}cvkY>@QUv~ZS$&;t+)uvx`YKla))^oM)Q<>s~ z3`lsK=h+|w%}E!FRZh9@TShd6l)*V4V+l1~*tBbVt)Mmnf|C!M$)Q0T+9Fnj09aK4 z1VKe4gpf6^FBa>wm5Scp-bs<=Vl@s^n)0e`E|y*6cpAs^)dd3Y563CyVVI<(rkP|fP@!Az^k%Rp6i9EX?_IZ`tf&AFHwRiOp5nw27w3n5f~Y@Jz2k&+7|<&@cl zTyU6VoXhR)y%p;kV%`7k-~Iga%T5o6>&^1%($6R}`O&lQ{r)H4|MJnZH9I%wa-0opeRiJAqULFy zQk+$#{xxMOMHKLaY%G%F9EqJF?)DGN*7tpkahQ&C98GebW7~B7YRS$qx!{7TiU<>> zoKns?7bG&Xd7g6?MC{uh0CFxlr)9r%$bH*Ok!emv6$xXCbDX?$muFYrg(4Xp6)9qJ zdA?}^?+@FUQV729`mXJX0+<`O$5-FHEJ*;VsBmzqR#HN!p1*PmD-=6lZThAOM4qs1 zLf>|sZ@qFnjwo_6gufKcs&FkBj?+!}-`KK?QKDoR&U#-?lHT>@FhhM#X`}+3& zm?W#M*8P`Xytuwtfz9LL&bzqU1R^mJsOk4eHRg_-FGXT3B9>B`=R?k;sjgR-!S_}J zK#369v3DFiH_VN59dk?E5_im<_ zWJ6HGS>c#<&Z26dh6u={r=E>{~fD#Zu`vTM42&N1bfO65Cd$q7`Y zm=u%(Dj=$<#4K^nafoFq{y5tZGlB-sjtJC32tbh1tRP6(FBYrKxdJ)gU0yz3t2 zcDt;4dw)Aja}faKvrYHpdSiL)T3Rhu?>^kz?~h@zo^!f5JHNSoPpDn*hH;qZIhTx% z1n}W7n$c>pB$Z9y7nySrB;R%Yda+tAmg6*th!mUWqzcRq$REc!=F^D&6kSI(L6&k# zjvX^HfQfqVL9G<2!Rv_NnCsoIB7nerig>H1ecb}!#~|^#r3Jux`a9>Erl}nOv#SnX zRZ}$sAS5kXiWCtvkW#dWsi~Q&GC4ST;H{KGLt%K_B%7f@RSY}k z3J8-@OtsqVPdhLJVvePZxr{0M&^qsT`}-gL==;C_yI($hcy-p&AN=UkufG56-TU|3 zAyL!+{O7MzlIusSr;pEn{O5o1$AA0pG+yQuzx@1j6LpTCKlwB#J?!ruop*o#?|yo* zadVQdzq=a?EBDG_&ib$~hZ!D@X_(@hn>VY~YT5Uzb>DP;wONI>CFJqAbA*qs9yM(T z#$`+^A0Dk&PtMOpg6|Mi?hCM*)cLZA4IJ07b^1l<;(5; zR*R@wE*euQlD%_X(>TWp()0~^XCTXEcX59H_{rl(j~^{pOXt`TmU*7{2a9p{@bLQ0 zyTgzl4s$kiZEz08MYX3dJ_&7;O5PujSxhR|S`>i|Y`Ivn z^JXMsISC;XyX9)#_H8cnJRM6MiDaFcXoE(FM4uZ zOr~Mzn1}t)0LKy;3{TnAtXf2i0T2g66fi=nBF<`9Ns0vg`euKA(V|a4O$Z)HLfgng zT5h`iZfHU~i7|!7JMaB!v5wPp91oY**ZYSDW~Vk+Bq4Y}ulUM1SEgGJu zL38Ze?l`8Dvu_$hnsd6@ZndU9RLwBaDb}A*lSmQv972fE$k4YziU^o<9ss=OlnW85 zsHp<{W-Sc0_pBKVr&#D3G;XS<6)e;CovC{7%|4O`a;fLKV>YlF+dxc#859&0Yl5Q! zSJDdr8ZvX!wrXl>&UtX0(#ad-oFirf6D=`C=bd>L6($nViVjyPbwr4WYC4y>?fjS~ zr7o5f)I9Om-+p%W~{{r3Hb+aor;nhaB1tQPO?9wZ-My}fPQZk!^ZPpJUWql-&N!<17@^PF(U1B2U zKvfb{BMr4?q*QS}0H~N66frRr0Z|aKA|{F=D24_kij_lYW=IZz0i7uWdQfNP%}(tG zQ6Th04W3pLfzT6%nit0y2phvN6*nXsqog9Sq@pF}m~t{u=L4dvArr%Km*%O=Q;89Z zc5QRM+Bl=Y4$KH4=P0VBt}zH}o_i`;K2DPq3>dS)evG+rDOP={j@dc#ft#*zj)MQ zF3a`$IPHrH0;jA7SaN1CG-%pZRltPF920t!?U@1I_unM}bq`@``tOigH0O6Z#RclT}A zovqHtAu@a4H225jVaj*Aqi9*SLG#pjM*!nE1?R8N)>o_Eo5gtoAO#wx36P7J0nV{Z z$&gsoL@np+*h?12KBtUGrIehdq83Gjk(n6`RHQlrDoEK3OzZ>#0|QVd1TbQ#f8aC& zG6MjSBC4g7A|;DtDXC;tC1OVo4H)^~7_|IhyH7XvJQ_S2W2Ji3Nh7K=tuv)XPbQSC6~L+0(w zzxd|OySq2*vj)B0?r)2RzP~gI8T|b|Cb4mzgKy(Fw_Uf`oXue3+{<@w)w>T54+YIP zO%l1=?ZL4z-ftiF!(kZ4{qC^8-<4FvG-*yyvSl@`&Ql`x-pzU5P5WaSfdSPh8x`@u zW>&PYCjcP>74SX)SWFWl_KOxkrS6T?Otq4KM0A>_EX6mCstoh!*g5aIuFIvAQsy{^ z#)%X{2;Q$2OG2FIahe8|f&h7*^E|$Idc9~|E^}yt5r=;1+7^lCVYs_{zu#?R$@klx zA^NuQP3ybREfzj_M0Cy}Xwz^TI5=nhPd6&yW>R8uU4xz z_)ngHvRR&)^4-lg6%EY-T>xU_=I(y@*{|NbzsuV_Z4a~?tXO+jS8Tn9d;N`NNjm`s$&nW6I@h?L2K%YeaNtqCs1E|L4W#1`)5fqiCVRkjxrVgV)HRlxPSpX@Qyk6I=O*bB=X)XrD)S!V# zX<9c-dms4fd~vyH8^`U^_&Z;HLKcYZhfJk*1SOezFl#K9{2)(zid3AI9cKe_{-0il(yC?6&WKJ_6<&@DZI3HXHJ~TcMVhDbAILiLqf&?YET(0Qk|3%Y^J|A!Ga-7o%3pFEO1me!vKf07%7(SSc#C3j`u! z1SbVm$$(EsKaeV^5#Yxb!5G0+Q>%_h zRx<&}N|V4`Fc(Nk(4cRTDr#fZJXxl`g{#>JXM9c1`De$fXEa({)*j8SM{ag$6Mp0y2|G9kWYt-ps0+KE^nXBYS2?{h~`bF8V&s zu@rIa2$7g_%5~~eEnsz}T+@VVR77P7)GoFPk6cLe>GE{FuwMr5KKqf-U0Na?zE%IR)6MgwK56#AoYG8+=p08rS!} zmOZQovk-#tMzKJ zYQmxgCIa`};@NS6S`bw>PtZYZu&vRLbtK4{ftr zpN}zr_xk2`Kkp|P6Yj=@)W)Q@w+|`DX+FkzBr>W2pM@BkQb5FBefxUYjniQt97tBx zfFH*e%mKkUZi$*Y?gwyy48T?U4nV4)hN_@yDxiA$sy%uo?}>sVLRCPs3L5@M?W$>8 zRt33eAZn7O5P3$eL~kTD&{HfIp5huSBKj2mb_#v~1Oy>6(RAuL$UQpupFHb!_tXBq zFgK!z=;wI?)3yOID=K*!5daWZo2wG$rt9Z2ynpv@_plA#8=~Sze{RilHc%|0nG6^a zjZLAl2nb2gW|7@6T`ZS506am%zNaGDQ`-bj1+{IOhJ=EQ^I>Y?Lq9V?6 z$*Zn)-UAYoxB8cbFs78$7JV=+U&pfQiLd$=UD(n)yN!Bjh-!1n&7dnVTp1n&X}Du z5iOEco3>HZX_|s}DJNzJrx7KHDiJWD6j1{LWTIS7nbFk){4t4B(YgA#)!;gog;Eg?1-Fq;5k*O77{j1L�z7R|+9{`~8!CI8+Jzk2ifyPPsY>l?qj zdsr^ZfBujDlYjYVKl$!1C%^yXvq$L4(`Og|;$ObGetLF{bclL94kEBVYc8)ZkGs3G z4%W-%JZWe)tMdz_#bJV9y!x=+&*NM~!Fw`QAHtZ@`PrslEf$OA{&@WK#h3GNT=_+w z^7;BKG(OF9=Y39cJd8RQ_F+sii4H0UO(W&vbs8gx1ahVZMvJx+bqn90cMGSAMrbA? z2@5sewrs(gl}q!giPd41kPtwxa--EzG;FHt~*1MRlk@4kEa{=?lk3pES$n50JA zP~*6uAbRh7+ccr!r;o2LH_OekTdkMmT(#r0&ZoG){cwA~J0>MW_pm!iNvl3Qxjyrr zkK^#+ZmS$96d;E#fMg_h9A@SY85-|J1&C8oljsQ-tMi=Id-81qNS-{A+wDh@-SzdR zZM(oWk5e3md>lfzBJ~U&fPk1QHJz&N(^W$Zo(|jkkXCzQwa|kW({f5=SF529jL2G* zb#(etN8m^qjVkzs0Bh1TA^@s_NL7o9e$-i*fkDOGm>Sf$X|>aFY=#wJX+{7HMubd& ztn<5{aQ*o4!~S8lK08iP5ELv<6M?Q5tKdALzuO)RanW^v zdUbyB_QRXY^GgGBOp9fA9Cqw!xm=}`?(QB`Q4JpU2SwfwV@>Qq08_0q5Cy2EK$h&i z^TDYJpeaa7F{YeSHnpy68XxFXtP=uADI%hxV2W^hqzKSdY97f6CVcv*8pN!WQu#9# zV1#r^z(ue+MgRa)O=%V>_OTuXwUV=ACdSSA`q`(?+P-(*&C}eqZQpkQSc<4>sjFTo zSnX+m-aDwh1(P9UDY4{YrE+bE5DZK-rqV7J7uQ$sK73f6of+XI^s}$OTV7mkkK_Hr zHY}K1_~XC*2WdQR&emUkej%!hzI*xd^|asr@!xoM-s$zG>zdGbzwG$=qswJSZLme( zwq5h}cdvf%+dpE@i{(<$9S6;Y=9oeVVCEc$;I1ytJhNlxy5{P|^I1g6-S2mEPW@t0 z=}pfcKfk`XUN2YEG&OC9j-tw;OuKX(WggQ|tOx)g3$T%+(6;Sz*{wGHY7x4|xkcMC zf}#k6bxm&lyZP|v-~9CFufH~?^?Kblp=lcD*-WJrRdddn!To-lReaL`p{a-zQFE^8 zy2Wy}^p2%uM?`=Q*)cEsp83=if+?Xsesp=hZp~(uNT!SBq6uxcSa@blG91TYOi4i5 z7qG=-V&ImR6NRnxCRw}=8`EOQapwe51{nx^U6zU#WC zdGhG`t1rL!(QkeA;>DxQrdN%Jn1BhWfC!493e^v6QG{Y8_!9 zib)pTGUrlMVi9KcjsansMt077UqAoMM1ZM?cQu1Z*pn&L{5(ct?+^(+tj?D=H?J>y zE{aI(*t*7xLC$5nFIVT@IWPg|$OjTlSp^F^MnW9N$cT~zjM?$waAd^Gv&}e+UE?dS z4M68OBIwiWs~G2-+YiM4Zg*FV#HeW-5Ax)C_5Dwt-+y@9xG)v_#Wycq@Z@Ni#?9H| zo7+7T3Fv;e{rt&woMY2=$GL2W5s6ywFPD88)8hKPTlkmne)h?yFC=9|?tDw+_QyMR zAr_f)LFaNVQ;f_+$SFoebW8wiiwi2*5;{$$LK+ z0Rv((GgK`_keD1psdNA81hZ3}10W&@AW}{_a6~l#y%O*0om2xns`a;&f+t}qI?qHN z0S$rK%% ze0cQ`&0-c1^9_p_ARG?IcW-Yti=LE@!+5#AI9qInhb_(d>GIMe$6*fLidr)WGN-2N ziL8ZI4&zi}VV&lfC07De1|`HyRuFyD0y}b!RJD|4(-_ks&qErqS@u`w{l?9^9ibyF z$KmL`GXQp;PywlEAtuJqH%-&{9J3T<=W;1ojM=A>n!s%c84bX^_j8=pY|*xAno3zM zS6XRGys7|noyavbg-?vB%hoi8PA$7vSxq1}&J-{1F*KhAk9 z1zdw@agO_AEJhqWxll?T=7Ld)n7AbhvBaV@#Z@KKRCa#VlpJ-(6lb zw|BeS%i|}X6O!dLS79>%BN8DS@Cj0JitMaKj#PPqifU0UCRwd!Tj!#pA_j({RzyU= z5g4E$0@nbfTD>FSZ$?E1U`EK*Ed^Eos9;s_XQz>;O0E43pyrAiRWmWrN0O={p_-T# zvkW#jSRSoq(YmY4=FK~O|4!e%D-T;b9H)5_k4`a|G7?QuObaWF)8KnVbqR zv6?DKF_T;zxg;u5oPwAksfr@h={<>Qk=!<9W@(PA#Rizq&(G1hqA;g&7zQ|v4qVR4 zT>eP5?mEE!sGzR6xK_MHlT-WlrO?=oLrY<;=i-G2V!dN>}37*i6M za#}6=lE<&U{GF1A<9@q(_Sx5O-ozrCi_86C2wnSbcXzixdUjakY`G+)Tr_FAJC0*9 z7hu^1r^PvFyVZ|>^|N;289}u)J^;Y&{k;JLa?u?6j-1~fj&n)?NCZp_U?pWALN#-x zk{jnlg@}R=MarVBdZ)36`_w58pl+>n*&x+-vrD+q{Ng{Ld0>JL_kzmoAo?TOpbsbwp%q7v0TMP zNML9HqLxeX&LKc4S&I}c=uT59N2tu+`CKwEEtiWiMia|Izxwvuh2zcH#eR33=lu0| zufG53>U-aR{`k>n|F{3=zlga!K5uz^|L_0)^MC8#?&kT``-ks+`Pq}}#pcoGpZtrL z>s5bse*Wg+zHP$!<=Hq4FFyGsjt{5-jCNDpO>wc_z|CYLC8c87yC!fGr+mbT_J&9dsp=N`sz5(B~P7ew5XB0-R}dtX0sj-J9NxGXsqnnTyiELQ&lu0n#wdv$F9o` zBSCQF+5iA;+l^qmc^G6|^g8D8u;0yTE+$L_=F~7aZ=^`HKD%hUHi@)d$Beh{-lsYF zuDiT^bba;c!_Cdj?VD*B8PGG8l9S}k;w)=oM`?;ht(c6%u@8RPEf_%aESb;F&X|L5 zy3n*x^8R7ByW6Lv^HhogGMB7ctZ!RlAOd!5YV0wUF~v#B^2PJ#kDfg3`UN2uDNH0{ z!(lue;&Dt5yW@60j^oe-&#H#pE!%FhO2=u=A|=mB3aAo!W+V^UQi|2x0$H4^8M;7V zbBdt}ibjpUKMbOBez7scVTg#+vhA-gE}N$5ngx_Bc|=e~7C>fia_WE#YjaGXR&!8l z*1r@{0hOAxtEChXJss(YaMf#JR|1v$Wkeui1XL@a8Zlvpsw4tPzy>A) zT-hF~g2+^}Tuv#n2&MuEW}u)|NnJDpszUM3)~VtGEyk_(kw8pn=w)wZbiaKAfSuMX32w|!Wz&$@Og zd2&s6bN4W&)UP&kkv4ck0LQmC+q+|QO|x1w3-2+FXBQXuyZ!zcpFF)Hg22d3$Kxm> zKtAT81`HTuJPbp9SX8A6K@C*1fsx~rtq(v%q)6MeImZ~M^Ru%ObIC;joFnH#-*x$T zETYb{n$NiqCL*jof#zIPiKs};bE`fxHLLs!3L%6Lik#9IKF%YpPU`BNp+%U;0CEw4 zRR5TnA`%$1ZNJ{EyS`5;ug^DnJidJW>i+&-RZ~i(YKNJK-6?1B;|!{lVrsQtsdWYs zh#3Hyk!Xpzuyg%#r55Ly7ru@2-1SY!&TO3KxW4@UU;O%;Uw)XcE*D>X|95}>XaDSr z&p-LwKYILTdw+Kr-+c4kS=0Q%Z-4bLqtBBT&;Ip~FE;$_ z`PFK*LIl_PcI8%^m6)ZJO3tFid2eKMj_Dz;mJ35gb)P+cn!&(1k8A}bS+=rRZjQsA z3=T0xRSJy)6Ss(rRu@YIXlj}?jS}0?7o;H>O$KAGFGz2!p z7#MSzRV!%GJRxWS5bs0lo#wQ@ID2&cxa(Hyi_5O>MDlTae}8*_JWK@uh}i|#v}KBc zovNX!H*C5d8QV6P%}BCX_GcHD?P8_IBI*#a-!)QB)-ncy?Z!*s}PBR0Z*WJ1XTqjQ6sdE@b};Fli)XF`l2<|ynq@47Bi_; zF_GhsSeaVvBLXoX)MNgX671&Nfal zG$EG~LJ(0y6EP8G$3(0Oh%6#)+j>NktSV+IDxg-#rFd+8XuK~emL!1a9Oo?J%>dda zfI^yu$s=Ij_&Lr+fY_g~odC!E;A2K2Rofq@=a0_fG$ESz+_pT;$L+B1m(AU78?&In z<=L7sfA;(;O~)?y0`8Yze>+U&{Bm zR8y!BAULm*ODfBLSxRE06r-6*P2)UOD1<~6&ZT06h=v3Z9D_*>4aJW-=HE;|MT&^& zDMrEwQNSv@y)salonr=sI-CJOX0BxrA~DxF6Q5sPJp1&M*RNi_fAf}zz4vh(PMjK3 zHDRb?C3ehAWl{=yXJ`WC+T|pAzUkv}4^S>IH`k9J|F{2- zf7Nu&>ipSidx$(&Nk=MaF`bZ5fi&9#@*r2IM?(|oVlXb zPAOyrYF1JT-i5{^DtZ?jx1Rde2gj?`*~R0hi_IA`ftKy=*08M3*Y9udvlc@zBSJ6v z$O_Xiw8ThA#EaISU#_noZC1;Ez23CV!cnl2gXd}7-rnB4di&;Xw}1QLpKud0#N_M{kyDZA_No#F>_c;EkiAJE=Q8bK|_D6M9%gQ%Vk<_wHnI zVy=lUWXR+UOidKTz{JE(N+eYi0s%E@xV~5ilJz32J1)aa?c#2CDBzHJo+d{H8II!|n)TVDnWv#~ z!0hg}$D6}Y0N1M}N?vd&Z?~zI&CNKo{Vs4s`X28UAwOs*J2bXg^ z#nJ%Pq#!b>Q~H{rh=3`jCOF4_9FD8yvTvJd8i~j=GlLId7^j#^%$XRAi366RDQ9ww zQYSECYDk7afJg{PXsU6F)|eUAabqP$f&n`hLI8wu9J{s^fs^J3&3h*zF=g+4Nky~X z-`%OI5w17qfr2T3T8_CEOLZv?SR+2Qm}V)=p_W@E*TY4tkSVlbn)gJ=0E@OoQ#Ihw zJ{*R=F*h2OwD8m|)_?ZnUlk)3+$@$C7wez=^^ZUMq`PdwZ+-S?xOw%%7pv!g`^)|I zh|Yg?;cud13IFP!{|gJ)~IE@+UySVlcym?oyPpI-HTQAf;mSZCaR7(sG zZ%<*UK+tS@Shylw3f@c)EQ2G8mcPMWLYMrJ+v63|=2+%A| z09vOkBg?FXiy1&3NAND^(l$-sFVrl~Q^|SNF2{NP2Q91KF8@W9FNn)ZSNg1BcWe3U5Zgb0JP-7j(yW0;^o=qhhIHkwf_3iqkeU6;D~v9 z|KaZD1`NBl3n5Im`>3{R-Py%z90yAY&HA<<$Nl>3?E29e2ZbVybL+*5!I7a044UWZ z_~zZ){S=2(vT76Bv*l*H-&vk0@ZE7cjCZRgK6$iy{-W)guvmKL6oM_5uIU_F#rIl? zssK{@!@&>7DfnPS4|}?~-=|_R?T6#+n(+DezI=Rnaed+2&2FCWq@?-HF93}hh)IC~ z02n?hZ;-0k(kfjE%~Xn-T2YBP6)Q=TXsM-n9bgub8oB^RjsU2x))2661JHmFL6L|A z%|I2A(E)LF2~>EBA{cPsO35fC=M>EcCKiE@SQ#=?KvPgPF;Im9_48RvJTp99MP??< zz)%o1pb)V(u<88c%kbhc-+XWJ?zY_Aqz@m`{+@Eok`zt6YZOUTiO~g)F&P$9%Szz1 zX@XQyAgUA{ha5E((Di!RIVd@@#77rxz+gG8T+?rw7MjC2T%MINWtDW;4~_SAoTX-J z!bG0fKC(*?F~x`g?AWXjqkTJ5bXXo3{4Rbuqam8`jw}V7vAB)_N$86MfJxjV=b?3{@P(IIuKcPi<`F}!jp^OeKB1umWw_Ob4ocOQOboKOT`;N05vJv_;9|y(h|7~FYn*$ zAe(k^wpgl3R5@bl*6WgoG3R5MqDC{R;ih;Zvem5$AVA7dnqEKwtS}VfQKx9oWYCl) z7cH5c%PF@F$CUjPSIecpc8c`q$zxH8!xVyV*sqtX`RekEFTYfyBqc%KA9qunib#>n z(fAV6w%c8K&LgxP0;WtBdo?vx`ea1TDL}yTf+-_{mds{{A>L zi^bu7+jR@?2*8}@M^}%bWRrQ*b<5UgnO0r*=57fSNg45meE(EsZ}StYzH!52a5lAbD767>#h!G7O)hXzS(_yN>{F|LzE(Pl-rXd3X zLr~RvykkXF>dB9$3IGa~RWAsrCs24*13EJ$a@FJ+0yx$-*k%JyAJbvWHpmq45>sbvkg!-?J`GxxZN+FtXADJ zm01XB$$6JtMP!DtNqDLop*7L&9Wo!Vk+QqKSgI{ zvso6|si2vx*9(bp8f6@2M+yKj<`CErvycG+2&e@PX_^;(uc}1km^^cE4%xkU_WY9< z&;I!zkv0liuW6wA5cXKhDCQ$S2bCT`uNS>SC6PcRz zVR1K1(`=i|t9d%M0Uw>Mf~R55_jh{(e)9N{V~SH;b*qQn{Wwj)%{0!46q+Vw8OEs; z6DX&m5nN0{Gr@2}StNzOXoadafXErmHB1IfKbtTRd5~q1?g0GKE%DD!_5)iX1 zrLgy^=GaTlr&ZS}7zjj4Xu?UD3s6u0kH}uAyfE*5(}b!P1ppPzR!mh<3DE^V$5{2F z67&9Wn40n8;-YDql@O{y2`9 zqB9!~^FR9gf7nU+#V>x^F2Z7c;ey{k+;o9{`tx7@!Qc49@8106FTTEex6(GvSpMaI z@JBzmc=YsYw|jT<>fih0zxfaUY^e=w7~-6=N|{E-=!27zgJ%^O$KmO-r&m{(!FeQ} zN}1-l@naVnmE84xN$%;3r^}0#6y!M$yW@wO_i>)v*0)^<&W&-JQ!-EFNx{HKMVk=% zj@PTEq2OD8es$HYHi|$X^E{?JNsdg^FFR9dg4bLgZf?H#{IkCA(lmv}2MAq5i$yyh z$K|T~;?vKsudYeeAccTAPH`IdyW#F(8fTaa+}}MM_q!(0`izUSPd?c^y}o+(v|TN- zqmeKgW;@xFs^hRm{-P=Z89+uTHRVtYxtihXgtFnRpf!XF2~PNS17@qK&*(q_fJ_lU zYcC7t)KrboxN85U?t6>mT#94`s|@W5&8ssYvr5@O0Ccry2B@)Z!d#h5XaJ!05zb^a zpOL_7R0MwX9H%u3WO*8RmW9&T>lzj*v)*#<@Gx~{0CT-M7K-M#nTGoTd9DS01EqI1rQfC@P@ zfRq!01?P~Iykl~Pn2XIR4RJW@5Qu{338aYl;H_jtLG~%nP2)t0cUVfS>5{%MVPfXP zC}%D2_YXN^KL88viD~8BA2WwiPN_ieqBp zIviLoS2@K}v~E_J$s5mcCZdv46FNppswN^@$+Q(-q%}&2YG{P0p_w5&t`Z0ZsiPj1 zx)Ro$vw@Xds{0fXb15#kQgYk1O%tYZeD&SSloKM3!|+!>{`2WDsFXO*IVUYAWEj>K zLxh@|ZK_hT6d_^)aEJv_0fu2>#^4)vE{zj|=`nU}|HXgyw|Wo%^iTfn_2r65?{4pR zV_bF&>Xv8AfAi;m`Sj_hKmX=cQTZ?a_-}pp%YWnk;#YtAXaDwZ{{651?(hA<{{F*1 z`M>_3fB*0NhyR=Z$=~>|{+oX`T(tkufBg6UhkyFtwX4rBA6*Y`Zd+otIhWkFT`swl z98*adoa5DMgJy?e`Z$uc?RGa#Bm3jK!}~z^G3C(2A^KmA2JeHKT%sB!;RLr_OCW_G^q!{ThYT=bjGxft*`rIJg@9D*ZY z)z*7#+hG`UE?wKNSF2}F9;2pdwph);Yn32rT)XGVW ze@YlvI@t&5WBgierjQ7!QcMvHv@Ty&0Wh8fH(0aM5v}qa1q_S|7@jf@6>3?H=tVQY zDU+}iDY@PPQr7{eg=>wU$J47-DYZM9X)0>IUhwsW0ia&{i1i#NMn?Q-s8j^{D8)Xl zomC{~WTrj@L?lKNsm?H{elvvH4G~phgaQ;qBp}1uUDsa3$UY9DO!+ij74OurI4*s# z^9?+CoTd@)9`x;79Y^>6o$U8EN1t+OyPg_9r|evC9*@J03DF>?()OLHwcWxsUDndB z8sKt&_rXE=?9*qYfa;tAsdc`++?>TyMnh!kxIs`9B!JWC%`8dA%2;zDrJ*4-Ly?O5 z4UmF@9*4MCEXQe&Y$cUA=X!|kCwRIJQG+cHM`wusFqEuoW#<=`qaesTWYFo+Lhk&M~ zSS&il<8paEo6ght#k0$cv(^3G?QzHe?rgO_TlRrX9Ri1&hr2_}ZFe^8jsVy-ZI*Js zKN!$y>x3d|!3SbTL`6hIPe4Zl=eQIR0})k$B)Re4Q~;f*v=oFYRmH$GI0r;N`6?>Q z5}VMn&H<~pVg1^4+8=?ae)xo5t@<1JPU&%qGz9Tl42zf8Z#@b zg8t+G`0pOKxBu#AzYL3zl)An>jN8?wMReJWcyYf!CV2bBXV2chfB9#B{gXd@@i|%P zd!6=otN!ZoYsR?4rB@4A}+jLTNit*w}_xTr}Iwe&t0$m$qoIM-l zSf(+S=-be={W0dla9A!D3*UY3#pi$YJHPwOzy8VX+xK}c`}?B{9P@1E$T2Wa)4pL^ z2JcWey}!OV3n54m-$*WVjzimq_2SWC9De%KpS-;rHtP%a!{?uUuBmKq9}fH7x^c_4 zNpo5(mY=@(^u^~dn9)p{#+zB3XUTGRcf7lwiurMxkH_1#o4)*Hd3_~MK7)QEj5lbB zxwg4%V1`0O1cp?L>iV=95!#7jeVSn+LQN9~LI5=r!g{6}krR;<+PB^!z+i#^Du%!( zvAv?I8M0XoUa9GbRypjacD$kqRYhwvj;Cm#`tQ2ZBs$Hq?WD8*I21zQ)3^_SkQtc> z0gz8qol5FZL)4nNf})~H42+ma2~QC3%2S0JCyFRF_pM%u^&80& zJk5u7>s{;a9_|6G?b_w~jL`zYX4PNhvmvF$+HH6H05}eZ8f>9zjAUpiz>W}3*SG%)@mTuFjVGyANM}`T1Y}>{nAR=Gv0e#rZO&F?8YX z{=FkdYR-lIF>QyrTWnmjc>Ctn)63;2Paf@eyW59d+ik$0b=5H_4#jI-QOJMD6V^SaS?v@=fC>F?|t>Z|MGwNzy3e`pBCpAkN)t} z<1u&LlGEgw+t!cM$gUZu*^K+XkJD^`O%oi#YOzdt1~rx9nouMG0Xf=_$6-7gumPY0 ziIPk9?2;z5imoLyL8en9ZyE@Ypjc7GN(d%MP4M11Eh3`**=Cv&6?xd+{n@|$(-)7P zt(Kd9wVvZ_rRX%8Z>H@%^kF)3D%mYJFCIVj%(*0TUdH(Oi_gv%tH1iozXX6eMgu`1 z?_Dgj4;%>3&laD5a{l?JPoF=3-gOHmF7y0w^Fec3ulxOezgnNgUw`}X@UR_3%slzf zH=g*D7tem{(-&ubKaIP~%kxh^`E;{BOL=C8gqU*7Id@@wfA`He%{O;9A3ps0{5<{O zi}SC(Shnqi{-7{{raIM>;=dbSV%^ySVcn7fA$&X!K~0dT z*8c!im9M49Lk0#z%1#mYwxi>ng1leF{{ zAz`PPE~05gG65nm5!V1tJk9-2gghWM1*=;FGlP^PR?oa6L_ktRLpDG~&?*tF{MFOu zT9ujL<2_Jc-N;V!0|XBNa1rO=t{9*Fd|#u1!~${?R2od5rJg;eIC%TSYc7 z@$A|r9FKF;unWNy!RT-tQZBh^=JVA>+xca)zmM4!Nr=!ad(E1pOmhxxnC6H8s_HzY6qx|j2&nJa z47_I$1T{ybs*Up{XBF_|$jY*BTkjq{dbS&m?>^iKpl5gU;XMGenl0h(?(L6$>$72) zw_{Rn$9X>D^ON%nEa`YS`p_j&(z4pD zmtAweyCdJ--tT<3Aa>(n*S1aHwtz6DG38vOfI)B}OJ1+TAODR%`0F44{Qb>C)4D0)`J#=}ezR<2ku;9X zI>k{u0l|88wi~uDZ{FuZ#k+s@>*24y{nLN%UoU?9i>rs-&!2z#;y?Yz|51Nozx>J1 zxx4;9{%`;1|Mma@&P6jd z80J~QoTC!chc|CufBoHOpMUyh>VE8bKiTduiae1G<|;m1<;RL0_{o}s1QQAE8>&M5(;fkmXpYNX@ z_A?SZJl_AG{@XvNqQCsr&#azct|-^*&1SRNAVBH+R!3=eJ{{Td+{TCd=llE551;<- z?MwYv|A%ij8%cZsnr1x`YtBnS9eM&&N@S5=;uyYH0st^3GB;e*(1_p&MvSn4E=znC zL~xdSG*2z!{D6DF!{2-HIYKuV6dE31 zgx9e^$6QW<%*W@q-V=ZBnaI%Co4mh}m(R>lJuPka?^#oN%*VXP`cntIc+5UXQ17nr?TSkMp=HrHan2 z0h*y`lf;ZjXu?DYk*z_5Fi(}3oG5cSoyJX{nbUUN%`nG#l5?6)(id=pl=}PU!z5sy zn+oR)XgQ_hc|yeVbiN+&_G*hLA3uE>HaiiUYU_uRk()Xac0J#0%Wk*(PyhMv6`6B^ zdEBgr@pO26eqQ%iQOlWg`S^U;pEY&;)BZ7&zq#A3m`~4#z8mJL&hvP^?zfxm`Ft+j z(3H+oE7LjzVCxp9 zj)tmANX${+h=gcnDYKia3Ug@LgNa(SLxdhF(zQ*1*!KgtSyc`1jUXbqIVC0{CW>?r zTfi?GKY;>Pwy`IoZ7343155I-gFQvM*s9*&G~5RVV`y5aD

JCj59SA&dOGb*2NC!t^5#wy;?uOGmce|?q;!&Q&*T1($yKJ$pFe)Ne`wak z+dR&0loA7>HND<#x0`+#(t0~=ueavNMDu)Za+Wskwp(cW;`T*_(wf~r?XPZMKR-Vq z1ERcod409pynFZgcC#H)UUjQDuA0tG6wTdywc5V_^yT4jf2yDUkAMFC?Hd+pcJlaZPX2+$Kbm_syMX(cde;l&3WM&!!H4ykF0$u*}Yfh&- zShyS`GZzs=bfJIML@hX8L|2oj(0~w!hV8`{#y7eF5;LnSET43ITmXpvn1EcD=-g%Y zG9wN+<+2L6n;9WG`bAFUre-dnPK3k`)WzkTiV$mf$kJ>ri)+4~Re;X%RJVQ!YtlbQO| ze)xEQcl~0wT6c5JiF9tK!(rX`-D(h~mzWfA1)6d>pU0_Ah+?Li6ET_qQ8H_JNk8^B zS4sq6RlQHd+%=JIy*0pjJl^isAaXjj()WPIMcgeBwJLp2r)tb8XKZQ)S_-PVBdQsI zBgk<-9p<@tqLet-ei&-4wboa!u3h$hck}N3=jX?9wY?eDwtbfoJ{%52T-!YKiO`VK zr{|gbo|toMFL$fE?W#{`oDSpUDw+Ixw;jjxT&L2lKYzJ*hvR;a0GzPvGZO9(XQnhu zB|;+5rO}0=q$}iTUCGks()aGxni8NQ5xJ@{VdBID(=-<5(>Siz8{&ML$Ds_X)$n{c z?e~XPWwjn$ZJMh)m`Ld}E6uGTF#@P+IJrWf8S_YhIpM>K#~d$H4+5$PI0AW7MIv(u z7jdl>0D-_%Q(;adP0r&P%#kRiRINIIiY&==5wq^->X^{nh!UqHb9I1m8cR3yrGNMC z{mKTY1fQ!u?T?6@6DaUzy%uVcy7dNs z`)>cQPJVTBtCW8GQ~kgHPyf&V?*II+#^>{Y|9|!1AbKW_~^QQzl5 zWHwUAxs`dE9Y91w=*>haB{4xFaGIuxS#wuTr&B4NJ4{u^+J;;JueV;#{Xng&h&xZ(rQJe(~bP?F(=nO0HAWW>>d& zsnDulz5dzvP)a33Q=R8(Q=P^ypFdTp&xeD%^nEwY6`67_oN_MtYPWg&>cyLz{@a(= zo4!|(VV$Pqc^=OQ&bg@0ecwMn><`Do?d{Dt)vMj^#~*+A^x^00uC&AS;`*-3xEnTI z?$!NpI2b7b5YW~7##O(3`S|YRZ~pL~zqop1`{NHHkDPR|VKGF*Vic;_5}X{Tw$s9Q zennG^5DmT_km!!C1Vli*Sa=r)OITHaf@v{~q84{FclQ8;+}+L5!eFIM#LOV#`XdM$ zoJ1sI$<5*>SSaEJS8`W#cT9ef`h1s2;7-t zDP@0{TT?Mf3@L#dsMTRr$~>P(B_c3mVly{@t`yQb6e9Pz*6Z7wr{nzaeA;c+(RW3^ zckA`$>Y5Rr4*T2dyT|9_ zBnJ=?cSIs{clFw2yXs5Mc)^HrPW3!bHkDGgTX6I9I1fYVyUxt!DiN+3$o|)7HQcsL zY>@>GC@K-pY363e>}onqGZE*K5TJ=9VkBgykc1+}A1eT+lEU<-ri7%biAmj}I4`#1 zWo0l2fVtIGS&52)ghz}J`hImf9N&F-|BLUxx!qm;#b5r17ccG*@#*R5W-~yW2@L?| zI*+Z}?S>|r*~+>btL{(ZYA7=9yROgO>hU=4kEgfa|LpvJ{5OB`x8J{gv46hbt-N$M z|G&TZ@Vob){=dKZfBe_~&7b}3=RePh)|(E};FGOZ0CLP@2joJ~Dt!st8z0~1C9Lr`H#3vinV zV966(uQv)h^nEG$?aMcBU%ye&@o+9>NC}+x!Lvs(`dsq1fiGimFPoPG` z3u6oj81NB;VLOz%H@ormEu6>h(>;B9 zq%ZsV%cnCrv}VWSzHV2A*c`>6)%LjGQ{vtAwSylXpX#|b^7X1u3HxD48SWn+>Nr(b za7?)XVUvmkCf1kY&eB?yK`0(&}eSLL(b1SW$&L`k>80Yu*57pgN zSF1s_b^S2U?fH0Wh6WdxFOa%LfE_N(y>qDM0aR_NcE@OPvMBm;QzA+^x49;!^E9r8 zo`}u0a7oOmOGqrWrra^Hdj&vqB_dD_PX;V4@i7W4$~6p+*pflCB;tC2^h%N35NaM( zML<VRbq`tIU)zm-O7~I(IOi81?1L-NW(u z;cS2RyTAMO>sQBiuR8wvmoJXzA8x<>#lQZ4|J$oqSBLxg@NfRNU;pCOb{NPT!H|(% z8zZ@E*QM2}5TmN72mugt-*=)at#u`@S1W1SS`->vqJ-)ll8BI_E67w6l3B~SU^L>T zgaAb3h{P#T($+F%vDyzkAgDydQwAVrPKkfC0Rvcfe9+Gj(5c zn$?H(?wi!_);Ih8!@GCyr`inR>EStdX`E&zboCj}!_Z%EcGsKrrl)UTzuaxFa^E@1 zG}h^SRBbu;>N>Ug@%gxaK3Vf&)&1-be?HCgcDG6F`~7|26>V-BQ!_^dXUfrhyz`H@X1fi0X(&10S;b3+^=3Go zraE(WU-hN%go)2%YwqGk2&!X7AG)MkSHtl6`D{jnXr?mB^`^U8rJh~X{^8xnQg&0F zn&`{T3q(Ag=IiS%k%C&6$~?~Kycrk^t-GZ2RQg`j_orj-`d6=B5y87JpD-6n`KOPc zpU+c242j9Cp&Npq&gZ58NM;I1ss zPxI7ug@~jzRZEFarz0YnY7^s}bIvX*qVRPR$0Z685#52Ai8%oM1wQlz{o%x%Qe^T_ z&dj84b#Bo?7@xxucTSivIYf3sa(5&&iSfcC&>}#mSa5-f6C+|u$wZiFYBjEqKmBKa zVlw~D-~1*e2IHs4Co*6_W^%`|$|$CQr)lc?Y%b&ErRUuDdYYcj$DhCX{`t#0Hu>4B zoAVD}rbds`>AM%#qY&lQ%Wq!&fBwzC*;Pv^cbYNBT5l5=(@Q*+at(%jUErgj3aj9MjpdBQmd4iF0% zA|g^L#XUqE24IZ9fZ%>O9d_Gmb{fy)+m~IhOXOgR<37;^7HS$zq{M*uCG^{brszo zj`K9VdHuGw`Qzsg4-b#Wshy_w`Qw-Cm#^+_ub%hMH&?suX4v(szF$wx%6g5N1;i0k z%B349^-cEGrysxkEw7Ichx=3eOv%H_YAOzt7|mKFW10e8@&VXD(JrOOzAL`xUOm7%&lP z=5yEKs)ui`yYZXuJmTY%)cJ?mZkcW%5nCBgb*ifFDdDi`PIcV%1D^G8JU-sv&w&?k za&{zdbuOuxLP_KxC8y&&l|J3=wugs@1bLcj>4s@OVOq`Lo7{K9M4*Y_csQk0j-x8U z?KXF5qNItSI+}RNq^c>w+~(O@S>s`v3_$?q^Sna%_SMaHXuI9@`;Ygh(O+#=pB_GR zT{ktoKYY<cy+`xhCp%yOmE36192mx=etF=fgbfpZ@WmPSgJ3`AaE9ogbc_ zPxD;*o=Zk>leS&0q7g2aG&KVv3xl@-69=@pWCgepB^60bOG51uG7@EqhU6ZSAvYvI zB(oYs?A)3GuGgDVx>{!<77ZkaK{QT&W>$@=iRB0i5(0=W&+XC%wrtxNKJ5$XmROoa z8>EtZ~x}6 zew%WVW(}cMxw+n)j!&k7M02(C)Yg5EZIV_~>H(W7>~^c$u|7PvhyBE<``f?!{WouJ ze)-MIzxs#IA0CdkH`o8=!^7|1z5f^g@?Txuj6Z!muJ5k;^~PW(?z(li-bIuA)R9t=4bzm;iM2wkX5m_h_nkgcP>m;*66>G=@3&>^{-EFt4pqfD2 zhgNC{pt5Vl)hYTuWqhxt~P6M%F6wAg{1R1PUll! ziZmA)xrZNr_}h1ne{JdUJlz8{cWg~kPL33-s%rr9DTa~-RuAGdBsEKBIb6;~Ktxa{ zis3rQ;ETU{pKuU(BQGwDf&hl*5b9O~*8o5Rs3!z+13a1)*~MCz>H&@`XHnqB6w^{>X-tNA9`l79s zoFV1?^FhoLv3oOZOjt^C#GxymkrIjuF(!(6I335fS$CsUo#)kVH6Nc#rbGzLt_r}J z({2qdoY6o?3Bj1LV>UxFUiX_S&qhhr5#ee}Zb#xG1|J_!+pC-NJY`P1&6R;3k5lQo zkYd9@nMbQ))iMVJ#2fhhtolerV1e$8Yi3eRuY(--j^go2qOv>~VD2C51T<7Cf$ z0z?%FWk1-0mYV58Vq9!0Vg>-}h8)ir#VHw&W1TCwbzPCBUEd?pJdf``d;m<2S-mNc zn3_r^0y9H)BnCaO!!EupmL4i3;vXKmHh(7Iujh!8obJuqaV=tXlANGW(1 zA~NShDdx#}oVnzZl1T%DCf3?GoyV)ItHGA$E zDihH4?&^F#CSo2|lo9}n)?uL6FR$)yuD83Z@F^emPnjumcmL@@%@60jh@PhDcxr&j z&19Z`_T8Ir-@a&7#<2~nb!H+Ind)Kx)OUR;10ah|_fNn7@#9~g>t})yNZ~S)n;J2z z8jvqXHZCX##qTLfRooHia>}_I5(3)S0t*P4a^&QEjk{VDmw^f~xS11x#ZBRkzC<`I z9KaIp<6q5Kkqe}%D5?MyS4v>xuF%9)LDYPSm~a3v^TbH%PN8X!r+OKt!OY?kLj-rD zrCgWEIsO&yaiz8-o0B7@oD(4#)VcPhTd&qWySj|$kpYQ` zkPpX`8;THda+`O%mFt<&>ol*1b)Uh_rL~ffoRL(T0FX9a_mmM^)$Pzt)3g~@pAI89 zY*+c_s_#3_Tz>!KPqX=e{`C0t=I%=BR8_az>+`uCkI%!aGEWl{h_&O1u6?yWU7Z%vrUEmkEgQ(&aF{GVl0W=WV0E@aRz{ri|MSb^(j?tE{eo} z`1$kW&sXc4&E|*u17+UayaGG^^t<2u*}weLFQ2>5XOLA#zAD@P$M+AGOFWk!`(Qsaf0ab#7hihzN96|r`um^#;`{Hv>$m-vr!VJm zoTusb=5Dp#5P<7=m{0G1++SZ^Z@1f8kI(xDZS8nCy4h*}1n6p7at2UyeQ~?})%S1S zzPNew>YJfoxARn|c|MPQ%5IdoAldWdUW9ks>uG#=`});tRY1%4cX!{uy*uqE;4b%T zLIaoacu3^y)v7DKsGZKw=lRp8``@O{s!1~^LXP=GBO%}sTwz;JYDGcZ@g zu;?$CjhUN&o$o>2-4g;KLL#6A_;I_Oyx;(C21xE;?#z)QwHmf3&# zs#XcQ=(wNH7mvs#@->-tjLR0;vm^ZpSi zuXd~Rd|0izR;y@TZ&p(kWZd<`X0>{Lex9eP>&j}=KRkWDzPe)SfA{|VX`aHqkeEQk zv=$!J$N`WufG~rql7p%Es=2~;k zoD?!o)#@?dm}(O$y_*?$?g}xb zlhnhTp77_EktgZD#c zDr%NFpQm%qIT1s`Fds>4c#$7UYuW_V6Q%WPrL9$}4xSQ49zBr(b9B8?PHN7KeyMfn z`XT2uj#Hg#N?EnO`{@VKw%cyXP{w+GxPQF8d38R|SG&#K?do`V{>y*+?{Z4teD~(| z&C^^(DP_v6O{ZBhc2$HtVM=A?;@+>)C~6L1m^)an_|VK?v6#0eL#BS! z&4-bh6Oy}bx_ml4zr5YdYE5ysS)az^?bY4+bacvnw?2;}px^AS>v*nhUawc?D5|Sf zKc0@Qjzh`7)I`d#!ZQ5$;mgC})b*?EKDWut5Nx*^=6WVh>U5r^iV2RTKacbJ$I)N;WH{jN>?uXQH&(Zfk2z z)kFb6q%kLV7ir1C%(mN&m})2?m>e|p&7xv%fQ&@M#LS|~DLFbIG-)WYVnTdDwKX%( zxi~oGWU478M3V>oXZjkHP4BYfsfOea*mowW(0R;2EwXJgeg%(dJ(dj zolj%mm7E>Ob#7gk0nmNgt+^0`x_Le+^ z)0|2fy6!kWC&!Yg>q=6X#=|_$NNHI0swz#EfOPkHoUk7NL0Su&fQ2U?(9}Z_xax<% zgpmoC8a1bIiJB8r%9&Flio9+Da8+-us!8bGn^q8ahr{z@o6pzR*Y}@3OjFAlo66i8 z0_?U|L*LbQ{{8R%{-<}pzrMLr)&0{G6ZET9O2mm)L%)CCzr5c5@h^Y*>gw+9=5B8D zJfF#%NSmiQm(J0g>2y91!(c8i@2<5@H&@p=8G^V~P?Dy(A5?1UN}XGsrmNi!F*UKV z9*@)G4?q4^O{C6ZN@*cwqfUkp*fG}LP=u02J|`}WXm|nC32{k^1w(|;jG|d+>Vr|G zXmt4s)Wm7o-gPzjzu&CDROKh@kFP(3m?!9y#wtio_%4i;wBAzbtx zcH?Abfs+J*7L(4xYJkTgBfju?VCsbIa8bDu1fA{ZOIwqIx|!M~f;0+4T$iAfnP{sk~Vdh7cW>F9i&itffc<4#|`UVdd(s>HvU- zsFo6fVM^p~7(jj_R3Ic$m zm;$JS00=}zVQY*uNu!cDp=vcTEFDn+Ge;Qfxb4f~`PlaZVx8w!m_Vf@N(_<1Lx9r2 zk_QY9s!YCH^=v+mx*E2h?jKU7HqV6;x}C=H_Vz{v+B6m7l2e^vuJ-E9ZXCyyn28@A z?}uUFoT`dDmtpha^TVfyLn%dp%mg_hdLjgKChCUOm&fCIHb*uyRcE3o<+?;SE&4TZ zOH4%39mq|?G7!!!SbXf^IFIdNQ50iS7g0h?%(V(2*14rbOsp8ZuBAj(Bs}4MY4XZB zBM_Qe)J~d_Lu|<*)**`S3=af8#w=^%9{F5>Z7!0y%*5-sS4>2@zU%saHLU79K0hCr zII>pQoH><}rfCwDoKoT}BIcHJc4jlHwTT!4C1L|K01;8Q)v6zlV}uo%Dxs@L&ZX=6 zX+C!)qeI6oB&uV{!>}5T=d;f1n-y&fn!}g<$teK=p_Nn!q}JMXxAREOFJtu?&h-9uIpiNAe3Ej<{IWc7>D!H`U+?|L;8aQxD@q;8n zVn(FkV@!<@6X%qf6A`gWBSHf|pU$P|<3l6F@pNo$GJ_v}_=jKo;*ZW}Ti@R9t~cv- z|Mb%*V((LT+&|pkKW;WFZF5P$T+Eutv|jh$zIpRUfAB}&zkYMI-k4UinG=ZCzUwG; zO<2!Jf2NO2uPn3ZB!usBN^Aco`Dmz4Y^?hKf~-Qu(-#yHA|2@t^?k(PsR zL9Fd7m&sA_2xw*^i-t6gL^W^#RTo`g&L#GIkzs&E^WvfuUH)=>>X(Zl(nAAhF#t$T zL3qI;ws)tnw7~+hYwQ4F_)>`v2pDhB!giz6Kk^u+4hvBcdkdtyS&CcB%MB^%s7$LNO5+4*Ca|mIV>|O&^SMXpz48m>UqD~yz zAtVPtNKB4OJa-+@4I)FA)BVTx^t3-s(^x0U><*{%RHvGN*uhlAwB(`+g_DaVLor7H z26D>haWo`KR7$c|IkTBLGf*DuSnG6m^|H?U+g;zMc^u{Xb{Om2CCt#8a85Lo?u=6| z<$RhG=jkvL!OhjKj!#$DgPA|=&pCJFG`@ZN=Kk?Z6#&f7`$uHoZu;@OpX(G4?O3PV z+q>~RtyaTowF(LbxqSTaupcKgnyO}KoN&`^R7L^^x0G0=&9$0C6JaKIV`gHM=pORr zE+9k%%!zsNtec9sde9)?{oZL;NHR9wx zhrEYFlTMtN%^@X@94vQG(0?R=5K~TZ?b)Rzh8#6oDr19TVx~yXB`0u9ImMHcFiUNt zjfB{hj(~}jkeDLc7bRY4KmcH7@BoV-r+n5kA)%A0*D9+nz)l!SH^C^blzRXX^-_j$ zo`?0W*l;?XOzrCGT6KRqO<;b#?!nWLlCL@d`1tUAm`zj1sG#cRRc)#^s~huJ+u<}K z=Uj4E3ZBm@Z5Rd#BAcK!VNN-v$j=ZJQ#CO+Ck9iEU0+l}+E_~I`%;_0Wl#;NMR8gZ?@a3SFc_c*5mO>WCkM#r;^6gY>*t& zc6YmW1j6-d?Vtphh;MJNQ_iYb`jrul_3+`tJ8Sdq`X+U~fr+&Sr^mzlQc6o}Ab3g!~kfe64AmaqhbP)~+JB5+FwbE9ZijFXu|TThXRA4eiU6lWk(iU@|w zGi)KSjG5mIM8diO0BmB5NJt$b)WhisV7N0TLh?u&P1$0G46? za-Bqrn5vqoUhDt>vF;Fs6Tp{z)d-DDF2uLZo z`mxR>XU@IVa++-xKOD~6)i906pt(0g-u4ZYtEh@GLLzd5zVA&(1}uz+x$fS)e%v2Y zDJH&N^&&F>yn6NW;psGsZ@X^jhiQLu^Td2UpHF$tILLyaF)eJeM^y>9%1pNHzld7h~Y9<=xt6{#;KueGoDRryO+?aWu zCT&7Q5m$wjR0A!XyF}nxQgZO7Kn{ttl*A(QG$roVtL@=%06Gu-U^+VZ`TTTucU>~z zz>`7+YCuiBHmR*`0Ev?ro~s?sQ&ZU9+$2I#5v}XhN&%Sk`EZ?kdJiDk35X z$SH+mNSZic&Iue$waFZ>zq=ukii&7_$>2`NNEkeR$vN(LSVk)ni5FJnr0dmyMCJyN za!E*F&1;=p{V)FV-)y&6vK_h_4e&@e48(z39qsyi?OEd@zSX53=k@})h(!`=|} zOnwPuxg-%0fQuTEV;XNG^OXm_|4lF{B#`WR;#X8 zc(cu?Q_H!Vt1_WUb3`-gGdiZz>3JwnGJpJZ7R8~Iq2w=L9$vhDAh{gw~XD zA(hNzweHsIr>Fa?>njI4oQ}B+kNe~PI8w?@%}qHoC2p;1u8FWKSp}PFYjOAjxFjZK zasXF1U6c-y>lbgr&OrChj3b3~6!EK4 zJU>0>Qc9PlH8)@;yU6as(@(_GqR;{XiHTJe098#@-HedKoJ=WMn^eUVa)Kr{o<~T? z2qiNio~DVD=Wb1ex27E9q`|`BiG_NilwxY&G0D=RNHirxW(If8Sw&iHxhr@P>BT|B zDFdLIJ4neH5t_)5dQJ!k%oKU3aafg-7ZwyzMB7|TDW3drd^(?xbDbXdpQUP7xa&*l z5}*^O-R{a%4#(4Kv)--Nzx?_4Z(rZNz1iLF)~Ee5;e=uuCFW`=mqu>0%;S7M9=oo; z|MdBA*yogg_~|EA&4^#_pMLqv;rQ^WU#$w4u5^8u2w9py63T6y5AQ#|Q)|F+R3R96 zs38qh9UN^*Y5vFG0u3TWQjaW!#OM|=I&toWx*s?Wr?4yn<_vPifRKNShf3WCM%|RD|%n~gfR)YjUL<#Jxmn{6X3zklc z+#42>0&&5s7A239BSg8)Mci!;m$A$vACCDSf!mZ204)q!v2v&Y1P>i4pYf3v^-_HH z>K?z$mPI6(D7eIZnF2E|eSi*T3-zJyqORs5YDS_c>P`TTL_p0_#%uutWd=bt_to}S^jH}iz4%Q+(xAW0KP6gM;jPR)H*L&}J3Vu`p_2@`aD zzO6MXjNB>m?)vKCr+a4Ht~02WbupE7SERMZI=6PSTLX->36->3=kYAH*4Hn0qSDOG zd>DF_i5XHVzy002lxV#k4$lw6P`*4qU2QvX%UlW*GrhQd@#Fj7BQjz4<>8|^DC8zJ zF%3gsYbz-scwz$6oYO1il^>y}k^JoVNnKMT4#}eLY z4u*-t!c7USfw{NVO38?zrYRRhYO08gh^iLhhlJ4`8n_=^)WTnhqp%ksYS<_(PWf1q zIi(Ohl&G~fb2lP3H3p6t^(J-y{JgumKJ1UCc6)QnOvmFPGKf?y2+uGRM7A0@sC(BX zB${ijO#m@P5@}e@QUc^$I+WSXn9zt!9FSUT;NEwoitW$ywj0XOw^`?UR`K;_yIHNI znTq$_dYY!Of?Z0b>NI6eGEJP(n$2Saz|;9WH{qNEf(Q4Q*>g??65lB`(AH?na<_S|>wZwx z+G^hq%;cuSYEW$oh8$5*Zs3^{7}xn+t*uw9-PQIypEB{VDc^qgay{ff`t{E;dxgd! zR8b+KlyYl2j}?JX_3mnC0OR>|b$vCR+tdEuOn>#qe|-Px!$17yZ%)VOpZ(%{PF?EK z?)C+y0sw|pY&uWR56@qm+`!EMLLKX9=BBO^5CdG!)dijp-%ez}1i- zW0=LzLwQCK>v=g#;qtQxd}E3KB*efqRMi9GHxvbT7lo!_qXo0*RR;@>+8rZC`XY}s za}jkz*RZ`)q?RVMUNwh9O8v$#P;>!^|SaXeoi4ltQVRsswQ(&!8bBW*t&cuXVs}lBy)A@GOSHt~r?)u_dhkkf?+GFZg z1rN<;v8p;`1~qi-I87$ZFy#L8$43*$DXIEY>-FvJkMHk~XSlxZ32ZxbUJVh|EGIUw z*5<3tPP{q-Vm=(Fs#x7i#(ve+X(qG62>}qiAG(xyuC1x5IWe=NHWk$b0hNR_KE*u1 zgp35HAu3mm8VW~%XxKmmUDEt7+zx=bvqMgKLRl2R3vC+#4@rY^bqMn}y>K94QHD=>7xG7KT(T<@+XRi5iMcW!R3 znaSJ}Ihdu)O$CWnMO6TtiIF&Ea<}m6fSBomAuDJ=%2S=Ci36x;${Cmtk#Y+B2N01Q z5ILtfuFQR$#=h_Sejr9>GSi6gR8v4WPa~L>lICed#Pxc8IMpU~SPib)s_r&BbglE8 z5bAkKfWQ3ZFJ6E1a+LX(-+Xs-yBnV$6SS`DnEbe(R;w+M!PHW=X&gD@_domjk3aqN z>ecPxSf}gh)$6PM^V8>dKW$ds`*;6kGnDx}AeWmL-%!bBZd!Av>ht07IL|ZXfLT0L z`(|zittxn4l$49+)4v|C$bd-9XdWIIa>wk*4n#<9ZZY>quaPhOC@jmwMS~Mih9O*X zRwChY*{j3B8)H5OqyfMHY=!_Bu*;%<2fS!5Vk$?oXcV~Y87cndmP~?sAiGFW-GK>^ zD13Qj0skSM+j5LC$} z_untulZKHYcwIm@kI;x%#mpTMF8vf}m+xy38_3CZ$-7oFF;oWtQZZ(BKvETe&dAJD zLerKwGl>Bbv3tvu`W~-W{Wmv*yy7o+^AGP2fA?ws>8LXlS4?hJv6`*>H6;dzVYNla zS|u|gF|vz_st{o=d78&kN>gjC4zF*9P50&T`1<800*Y(3jCOmyuKRIPA8Rc+rIgIt zdRX;oJI^OKZEB@Z-I+vKv+jDG^Uo1@lala{`2^*15G=e6|Xh0ErWuI3q&}=WVbpp%I|q zT-+9_A0x;RE(Jdb`qiJsq-u!~5jg^)JYY~|MrKqM;?$bvWHHo-*Hldzk%*RTT6YAG z^r0{b2jS#_u7ZCBsmyGtjzKavPAQgMBI3l-ga~VulFKmkM9_D6oThO+&-3JNtu>Fx zDR*!n;*{9IYinKCsaZ;in88}Dv$~2ZF>9TWa!N^bRz-KnU8!}}rbOIKIpg8f)*I|P z0>Z;_RtDyr`dnK(9#6MdyUg~yKjnV)be0)_Gk*o1n@HCWiD+|m#a&haXQa#oSW4$e zEQZjouXm?;7T4C))tCstnJ?Wgi*ecjfH|1~aZ)uz9>+1t9vqwtFN?hyathBsAu(}$ z&sC*OjhI%$%A;Qau+DSNnTcwPYG>*CPTlKVbIv&@6)UNm)#@yXx$C;SyPKykk`brV zxpdw2Zuduj`0G5R+ZVfE|KYC=r)QLLSanUBid1Pfo@;Y+>W9_m&tIrd)u+1`H~al* z|1@pZ!`;pG{f7^apFgD+S1(_$GHL1NuJ49z)>ubf0bx8J-@p5DnkRMxbi&k{5TPhE zF*O25UBu;NOA=a~<3Jw#6R3J(RI`-80FontGDBi?0j337A#zX!Fk%9@r1ZpKi~vDa zB8FdbVOJsq$I*fo6?3Q9GgJW(+|dk{a}p6EAqe8EzW8Nfq3+a-$q68dk~^v^I70+1 z$I{^t9zP^-LQz{F62Hij(1}RYT$DMf*dp+AQd0+x8cL=`^9SGtN{;SJ{V@WnI4%n8 zCCCS3EsAsz2r$QWi39@h%jOYpH!kxY0lHX-5JLK->hbfqn510;7cy5vcb8ZW5+P!3 zs>Q9NB$^4J5HJHM0+f`M66ICqq0^GrKD>k%efRzK@cz^IaMTYE(-D1N&GO1*1j8oI zln^2kfz3o!v{ltM48vS!HSvZZ&Pa1@Lq8nOQ%XEeI=8v&l6d2kb3z4Z>VbT8nNX+o zs+XosW<%Yq*Y^+m-F6tOzyG{v&OLjNjJcdn(=;0;$%*?^gbSFTkEg!$v-)Y$^X!Sm zfa^5%om_7=Qs+#lZe1ya)S9$t7XlYELRAYIFasbt#l&J}DW^q0hKrU=;{hguB|+BR z903ziQ+I^O4r68&Nx7tqY6b#khR8(NR1l(qJP3mzcw9}9h&h`(r)1`dQ{rTcHQg^Y zszLU;iuzKi1e7T*4_G3CNI@bNG^euNZI}}xK0Z8k-LTtTPt&J5)x-n{wbuAdf^ISo zHDEm3RB{m!KxB$4OmKH%B&L+Oq^wQGaZ0468_uV5KnZixzVEcnU{G=T4&|H^D0m9OC ztyY!AwyLTkf{2Nefe{iqBuWT4jbr41hC+9orfAwBq!33gV)i(j0GT-G<@IK5V&JY# zL(G9l$dsvr(1s(q<2cTu?qDL2n7EWu%C~Rde)H6`d8ODcWJ*^M5)Je~JrJIhZ${y059|Ln~-w>Ldt z?R#qTD6MU`CFg#w%FMY8rca0Sjjwk~j6VygXp$=w@h82o1?xf+!}BD!&j>+1Fg4NWfV3nnfB!NU8w= z0Wrs4rA1te00EYnIjTg6D;R{j57gX%7R8i>j%eASN=74a(^4=wF@gXPfF{Dkh+x*b zVrhUL`}Zd z_jAH=KY?SRv{}>r{v^$=u2zS3GzCyig@>F^`~7xPu6D!eJTrn?TlLv2eSSVw;}G}N z@x0rt4l<>Z6X*3%=J|ND+n!ISd2X|L13I0n0-0*b1)WZZ^X+B}qU6%|dA;fq^Z7KM z$6AIps<^4)qM!=!x7KC`%$S*(NK^?tF;n365!@g6{bf-jq{aGSrb~S!M7AUWx+$h) zZizVMR7L-BZ~%`ixr^M$(F~D>p?5$|Ow7|<(JgYC9U#2pOd-g#h2sQ3UUZ7UfVgC) zV{%S>b$yLM`~7p!*~GLOhOR5mPtQ?W8P0;l1filcgC!mZTCKQbk{b~rQ7WtwYpf-v z1f`C16KzeI5@JR0loCKTn5Vf7U1Ia=ZU20l(fM#Zef#Fyr!RXnxV_yzJw3X6PS`=7 z-OVvEMbclGv8CDmaop@m!enaERN*Gm+*)f@wbmL{2j-GEwN^tri-4fP)(2m~MrNv) z_>~CNh=C7E$r@2{qLGA3h&g9wVy1bn1lae5kfhb2AD$i_O6d%i=#az=fZAG08NhSS zt)1t&A|X04XH&brxiV|}=f{8XXMY4BFJIk!|IN3%t1Hxbynh6lwyU&Sm0W0^=9E&` zb?5UrF^=QNg>PQ&#QgnFpZ@kg{f4nH<=twx?#p(Yndh6k^?KXcrr*7IySe@bQqPbO z45&#vK0N*O<^HFXSlwEah%hrpQxGw=$lrI75XiVGEE&ylUZI;~N>s&jiuT6^d zkM+?Rh?iXfF>;T;+5o`ZV{XUT@-(FK3ybCs8cI-&&jA*_XQ*n-MHlgnyVGK(_28~_ z3C{xJg|;vU1WV?jtsz%(Kv)WX!^J>EO9wO}MAe7s*JUN>af(E110Z+z;Egfj=1c(r z6=As(;_z4`6o}PUOydQ zp6i=&{_t>qo?8`fRnL>u&Lt8kT)-{+Lac!4cIePV>h$oqzZq`3E^(sk-A$XPzU$j` zXkw-3LNtk)LlZUUuFE;4c^CIiA@{%+)HRY_`YS@4I zl!?rttjg#2j}1%eikzyBZ92Dvup6>l>kBue&DEePK-koYGhjk=1PTFi(K8MTpsm(*}yVoa91KMr9q1;CVu zDW#lqSBO*2ZJOs+?NUvQ2&nF=rlyFTGpjp@r^G}o8f&+vMAAfce0tcU(>RUOJiYm5 zON`goH{*Fc^Z9f-hR2uy5lE_(l$&ZQi{*)s+<}P}3>~BK2tprWra7E(0?+&7)q2=J zA5*cCiW!K}m*>+rFR%Ndo2GfaS&a>k2mQ=Xw>LMUbJtyG=EHO*B69X-Za_rL2yTd; zaz35US65q8Xrb^GNF^g;V($Avr>Uf@W&n6@)jYZZ$Xr#H7DEJ#ykDd^`VbskV^fb9 zcqHN`0$_;5yjUw@-%>G-4o)N@%AEe>kN^1N$M=8#_rG0lHj&7rswpLlUU33*?}r`< zMHL)Zs})#1+&}&9eER0QH~06C$Mfmcixt;SYZG?WXnMCYuA%$Z94w+ z?jKI)gHyadj({>LB5MPau$ZJtHXKqB-g8MvhI8ynbyx^ zeV*In{`h>HG(+EY+NPX)Rg^Awd9JzbwyXR3-rd*hfk+W;8V|dxv|guiFXz*#kXc*Gjq^b<7oD)i`$0o$tL5X;pCsm!NG3UI!zWw;H zKb{2;Z*Eq{<5;U-U3EjEc0Ts&ZoL|?pwxD*a-Jv3MO34d5u<8-iKucUBJ+|NmTUmX zOr|X{5+M`K%{Y;YVggqKbtZIo6{kd|h6&)3;7Nq7I;2n`N+OOWZZX%G2UHrEzNzIx z?%t}XIwdyqlyh;|tnqX@H&FoKM5a*&W6q`~fesN7IH;(RmBJ>%%qs26=P%49WfHah z;mNJd=lbIhKfZkNQlz?BDOp9$ESF>wqHjb1Rii{;-bBn?-3&}zkTInM0NR8TrjkYH zaT;IVY`TmDPE4YXoU7F1dFFMZg!6Gs-HsEV#?v>izkm0`KkOeLZm+j`ZiB(&{oM6~ zO|zS2=F+jW+8Xa$y}r6)?$yVXbKfO1OPN#Rrpk#|tA3m&PAPFJDZ491Ei00WmXZkx z+?xv*N*!SZKqn>ykS3+iDvE?9=kR18Le7aeQa!Mg#GIH(nsi-3_v8KV|A zRaBT6TboU&AT?2NfJ;K9Sq$?LNC|F^gu&S|1q*B!fsDH&IxIRjhX^AFH&{$_MuB_t zqG7YkaUICM8PH|mj(qX>zYt)_rM&?C0n?a52($q_q>mBx41gB08NdjHPyy4pnVbj+ zk(gB>sb3|U(2c^C3z6y-+R_EsT@ss@OAZ$OlVASeVqvglcUW5b7M2*6*iti1Dbi8U z72Lt%!i;DU^I)1G@&zK+f2ZIYkZD3e7A|?Jp0anhTW@}^UT58GR;1H3PP^SUB^tVpP!M4=4CnJ~ zYW*-E#e+sjoH?~7=NUHJEqVI%=^haiF#`7e>To<9Kts1G9(E zU!*l@bIK{FZmM%mSzSVa(4--ecVTJq1yWTta3)fNXk=X^Pt1^r3=vbaW=^^5Rg@^H zt2Pkv$KzDW>iTMXn2&X?MA$?#=aduY42I{^^E?3)YON992tb5S%XDfPo@x8Awg^TDMxMxw%s>JBAkHSLD3nWkyVx!9ukk7xn7FnkbI zV4+FRIjL*nWNMrfApr&d!5`kgKaS^=a;Uiil`=ChCxpH$<2WJ#fVFvafIs|$Kl<}O z|MS24kN@H7W_|zRexA=SUcTs4xxU@rUT@#O|ABGZ+}+Oee7(NTIZLfBn)?FYyJ1d* z;GaKzdjIY{c>eDDw@*)x*H@ci)6I3dzkeQ9a(06yeZ-en z;Bve=f;!NpLLh$GSJ)%|p2)EZEh>V$(qc_twlQ#mC4r6>$AAS!=3.8,<3.11" dependencies = [ "transformers==4.55.0", + "diffusers== 0.35.1", "huggingface-hub==0.34.0", "hf_transfer==0.1.9", - "peft==0.13.2", + "peft==0.17.0", "datasets==2.20.0", "fsspec==2023.6.0", "multidict==6.0.4", diff --git a/scripts/Jenkinsfile b/scripts/Jenkinsfile index 134770638..d878076fa 100644 --- a/scripts/Jenkinsfile +++ b/scripts/Jenkinsfile @@ -22,6 +22,7 @@ pipeline { . preflight_qeff/bin/activate && pip install --upgrade pip setuptools && pip install .[test] && + pip install .[diffusers] && pip install junitparser pytest-xdist && pip install librosa==0.10.2 soundfile==0.13.1 && #packages needed to load example for whisper testing pip install --extra-index-url https://download.pytorch.org/whl/cpu timm==1.0.14 torchvision==0.22.0+cpu einops==0.8.1 && #packages to load VLMs @@ -69,7 +70,7 @@ pipeline { } stage('QAIC MultiModal Tests') { steps { - timeout(time: 60, unit: 'MINUTES') { + timeout(time: 120, unit: 'MINUTES') { sh ''' sudo docker exec ${BUILD_TAG} bash -c " cd /efficient-transformers && @@ -86,7 +87,7 @@ pipeline { } stage('Inference Tests') { steps { - timeout(time: 60, unit: 'MINUTES') { + timeout(time: 120, unit: 'MINUTES') { sh ''' sudo docker exec ${BUILD_TAG} bash -c " #source /qnn_sdk/bin/envsetup.sh && @@ -162,7 +163,7 @@ pipeline { // } stage('Finetune CLI Tests') { steps { - timeout(time: 5, unit: 'MINUTES') { + timeout(time: 20, unit: 'MINUTES') { sh ''' sudo docker exec ${BUILD_TAG} bash -c " cd /efficient-transformers && diff --git a/tests/base/test_export_memory_offload.py b/tests/base/test_export_memory_offload.py index d1b7a4653..f63b18f1a 100644 --- a/tests/base/test_export_memory_offload.py +++ b/tests/base/test_export_memory_offload.py @@ -27,7 +27,7 @@ @pytest.fixture def tmp_cache(tmp_path, monkeypatch): - monkeypatch.setattr("QEfficient.utils._utils.QEFF_HOME", tmp_path) + monkeypatch.setattr("QEfficient.utils.export_utils.QEFF_HOME", tmp_path) yield tmp_path diff --git a/tests/diffusers/diffusers_utils.py b/tests/diffusers/diffusers_utils.py new file mode 100644 index 000000000..305116c03 --- /dev/null +++ b/tests/diffusers/diffusers_utils.py @@ -0,0 +1,175 @@ +# ----------------------------------------------------------------------------- +# +# Copyright (c) Qualcomm Technologies, Inc. and/or its subsidiaries. +# SPDX-License-Identifier: BSD-3-Clause +# +# ----------------------------------------------------------------------------- + +""" +Common utilities for diffusion pipeline testing. +Provides essential functions for MAD validation, image validation +hash verification, and other testing utilities. +""" + +import os +from typing import Any, Dict, Tuple, Union + +import numpy as np +import torch +from PIL import Image + + +class DiffusersTestUtils: + """Essential utilities for diffusion pipeline testing""" + + @staticmethod + def validate_image_generation( + image: Image.Image, expected_size: Tuple[int, int], min_variance: float = 1.0 + ) -> Dict[str, Any]: + """ + Validate generated image properties. + Args: + image: Generated PIL Image + expected_size: Expected (width, height) tuple + min_variance: Minimum pixel variance to ensure image is not blank + + Returns: + Dict containing validation results + Raises: + AssertionError: If image validation fails + """ + # Basic image validation + assert isinstance(image, Image.Image), f"Expected PIL Image, got {type(image)}" + assert image.size == expected_size, f"Expected size {expected_size}, got {image.size}" + assert image.mode in ["RGB", "RGBA"], f"Unexpected image mode: {image.mode}" + + # Variance check (ensure image is not blank) + img_array = np.array(image) + image_variance = float(img_array.std()) + assert image_variance > min_variance, f"Generated image appears blank (variance: {image_variance:.2f})" + + return { + "size": image.size, + "mode": image.mode, + "variance": image_variance, + "mean_pixel_value": float(img_array.mean()), + "min_pixel": int(img_array.min()), + "max_pixel": int(img_array.max()), + "valid": True, + } + + @staticmethod + def check_file_exists(file_path: str, file_type: str = "file") -> bool: + """ + Check if file exists and log result. + Args: + file_path: Path to check + file_type: Description of file type for logging + Returns: + bool: True if file exists + """ + exists = os.path.exists(file_path) + status = "āœ…" if exists else "āŒ" + print(f"{status} {file_type}: {file_path}") + return exists + + @staticmethod + def print_test_header(title: str, config: Dict[str, Any]) -> None: + """ + Print formatted test header with configuration details. + + Args: + title: Test title + config: Test configuration dictionary + """ + print(f"\n{'=' * 80}") + print(f"{title}") + print(f"{'=' * 80}") + + if "model_setup" in config: + setup = config["model_setup"] + for k, v in setup.items(): + print(f"{k} : {v}") + + if "functional_testing" in config: + func = config["functional_testing"] + print(f"Test Prompt: {func.get('test_prompt', 'N/A')}") + print(f"Inference Steps: {func.get('num_inference_steps', 'N/A')}") + print(f"Guidance Scale: {func.get('guidance_scale', 'N/A')}") + + print(f"{'=' * 80}") + + +class MADValidator: + """Specialized class for MAD validation - always enabled, always reports, always fails on exceed""" + + def __init__(self, tolerances: Dict[str, float] = None): + """ + Initialize MAD validator. + MAD validation is always enabled, always reports values, and always fails if tolerance is exceeded. + + Args: + tolerances: Dictionary of module_name -> tolerance mappings + """ + self.tolerances = tolerances + self.results = {} + + def calculate_mad( + self, tensor1: Union[torch.Tensor, np.ndarray], tensor2: Union[torch.Tensor, np.ndarray] + ) -> float: + """ + Calculate Max Absolute Deviation between two tensors. + + Args: + tensor1: First tensor (PyTorch or NumPy) + tensor2: Second tensor (PyTorch or NumPy) + + Returns: + float: Maximum absolute difference between tensors + """ + if isinstance(tensor1, torch.Tensor): + tensor1 = tensor1.detach().numpy() + if isinstance(tensor2, torch.Tensor): + tensor2 = tensor2.detach().numpy() + + return float(np.max(np.abs(tensor1 - tensor2))) + + def validate_module_mad( + self, + pytorch_output: Union[torch.Tensor, np.ndarray], + qaic_output: Union[torch.Tensor, np.ndarray], + module_name: str, + step_info: str = "", + ) -> bool: + """ + Validate MAD for a specific module. + Always validates, always reports, always fails if tolerance exceeded. + + Args: + pytorch_output: PyTorch reference output + qaic_output: QAIC inference output + module_name: Name of the module + step_info: Additional step information for logging + + Returns: + bool: True if validation passed + + Raises: + AssertionError: If MAD exceeds tolerance + """ + mad_value = self.calculate_mad(pytorch_output, qaic_output) + + # Always report MAD value + step_str = f" {step_info}" if step_info else "" + print(f"šŸ” {module_name.upper()} MAD{step_str}: {mad_value:.8f}") + + # Always validate - fail if tolerance exceeded + tolerance = self.tolerances.get(module_name, 1e-2) + if mad_value > tolerance: + raise AssertionError(f"{module_name} MAD {mad_value:.6f} exceeds tolerance {tolerance:.6f}") + + # Store result + if module_name not in self.results: + self.results[module_name] = [] + self.results[module_name].append({"mad": mad_value, "step_info": step_info, "tolerance": tolerance}) + return True diff --git a/tests/diffusers/flux_test_config.json b/tests/diffusers/flux_test_config.json new file mode 100644 index 000000000..7d0c17d55 --- /dev/null +++ b/tests/diffusers/flux_test_config.json @@ -0,0 +1,123 @@ +{ + "model_setup": { + "height": 256, + "width": 256, + "num_transformer_layers": 2, + "num_single_layers": 2, + "use_onnx_subfunctions": false + }, + "mad_validation": { + "tolerances": { + "clip_text_encoder": 0.1, + "t5_text_encoder": 5.5, + "transformer": 2.0, + "vae_decoder": 1.0 + } + }, + "pipeline_params": { + "test_prompt": "A cat holding a sign that says hello world", + "num_inference_steps": 2, + "guidance_scale": 0.0, + "max_sequence_length": 256, + "validate_gen_img": true, + "min_image_variance": 1.0, + "custom_config_path": null + }, + "validation_checks": { + "image_generation": true, + "onnx_export": true, + "compilation": true + }, + "modules": + { + "text_encoder": + { + "specializations":{ + "batch_size": 1, + "seq_len": 77 + }, + "compilation": + { + "onnx_path": null, + "compile_dir": null, + "mdp_ts_num_devices": 1, + "mxfp6_matmul": false, + "convert_to_fp16": true, + "aic_num_cores": 16 + }, + "execute": + { + "device_ids": null + } + + }, + "text_encoder_2": + { + "specializations": + { + "batch_size": 1, + "seq_len": 256 + }, + "compilation": + { + "onnx_path": null, + "compile_dir": null, + "mdp_ts_num_devices": 1, + "mxfp6_matmul": false, + "convert_to_fp16": true, + "aic_num_cores": 16 + }, + "execute": + { + "device_ids": null + } + }, + "transformer": + { + "specializations": + { + "batch_size": 1, + "seq_len": 256, + "steps": 1 + }, + "compilation": + { + "onnx_path": null, + "compile_dir": null, + "mdp_ts_num_devices": 1, + "mxfp6_matmul": true, + "convert_to_fp16": true, + "aic_num_cores": 16, + "mos": 1, + "mdts-mos": 1, + "aic-enable-depth-first": true + }, + "execute": + { + "device_ids": null + } + }, + "vae_decoder": + { + "specializations": + { + "batch_size": 1, + "channels": 16 + }, + "compilation": + { + "onnx_path": null, + "compile_dir": null, + "mdp_ts_num_devices": 1, + "mxfp6_matmul": false, + "convert_to_fp16": true, + "aic_num_cores": 16 + }, + "execute": + { + "device_ids": null + } + } + } + +} diff --git a/tests/diffusers/test_flux.py b/tests/diffusers/test_flux.py new file mode 100644 index 000000000..6f4396a20 --- /dev/null +++ b/tests/diffusers/test_flux.py @@ -0,0 +1,448 @@ +# ----------------------------------------------------------------------------- +# +# Copyright (c) Qualcomm Technologies, Inc. and/or its subsidiaries. +# SPDX-License-Identifier: BSD-3-Clause +# +# ----------------------------------------------------------------------------- + +import os +import time +from typing import Any, Callable, Dict, List, Optional, Union + +import numpy as np +import pytest +import torch +from diffusers import FluxPipeline +from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion import retrieve_timesteps + +from QEfficient import QEffFluxPipeline +from QEfficient.diffusers.pipelines.pipeline_utils import ( + ModulePerf, + QEffPipelineOutput, + set_module_device_ids, +) +from QEfficient.generation.cloud_infer import QAICInferenceSession +from QEfficient.utils._utils import load_json +from tests.diffusers.diffusers_utils import DiffusersTestUtils, MADValidator + +# Test Configuration for 256x256 resolution with 2 layers # update mad tolerance +CONFIG_PATH = "tests/diffusers/flux_test_config.json" +INITIAL_TEST_CONFIG = load_json(CONFIG_PATH) + + +def flux_pipeline_call_with_mad_validation( + pipeline, + pytorch_pipeline, + height: int = 256, + width: int = 256, + prompt: Union[str, List[str]] = None, + prompt_2: Optional[Union[str, List[str]]] = None, + negative_prompt: Union[str, List[str]] = None, + negative_prompt_2: Optional[Union[str, List[str]]] = None, + true_cfg_scale: float = 1.0, + num_inference_steps: int = 28, + timesteps: List[int] = None, + guidance_scale: float = 3.5, + num_images_per_prompt: Optional[int] = 1, + generator: Optional[Union[torch.Generator, List[torch.Generator]]] = None, + latents: Optional[torch.FloatTensor] = None, + prompt_embeds: Optional[torch.FloatTensor] = None, + pooled_prompt_embeds: Optional[torch.FloatTensor] = None, + negative_prompt_embeds: Optional[torch.FloatTensor] = None, + negative_pooled_prompt_embeds: Optional[torch.FloatTensor] = None, + output_type: Optional[str] = "pil", + return_dict: bool = True, + joint_attention_kwargs: Optional[Dict[str, Any]] = None, + callback_on_step_end: Optional[Callable[[int, int, Dict], None]] = None, + callback_on_step_end_tensor_inputs: List[str] = ["latents"], + max_sequence_length: int = 512, + custom_config_path: Optional[str] = None, + parallel_compile: bool = False, + mad_tolerances: Dict[str, float] = None, +): + """ + Pipeline call function that replicates the exact flow of pipeline_flux.py.__call__() + while adding comprehensive MAD validation at each step. + + This function follows the EXACT same structure as QEffFluxPipeline.__call__() + but adds MAD validation hooks throughout the process. + """ + # Initialize MAD validator + mad_validator = MADValidator(tolerances=mad_tolerances) + + device = "cpu" + + # Step 1: Load configuration, compile models + pipeline.compile(compile_config=custom_config_path, parallel=parallel_compile, height=height, width=width) + + # Set device IDs for all modules based on configuration + set_module_device_ids(pipeline) + + # Validate all inputs + pipeline.model.check_inputs( + prompt, + prompt_2, + height, + width, + negative_prompt=negative_prompt, + negative_prompt_2=negative_prompt_2, + prompt_embeds=prompt_embeds, + negative_prompt_embeds=negative_prompt_embeds, + pooled_prompt_embeds=pooled_prompt_embeds, + negative_pooled_prompt_embeds=negative_pooled_prompt_embeds, + callback_on_step_end_tensor_inputs=callback_on_step_end_tensor_inputs, + max_sequence_length=max_sequence_length, + ) + + # Set pipeline attributes + pipeline._guidance_scale = guidance_scale + pipeline._interrupt = False + batch_size = INITIAL_TEST_CONFIG["modules"]["transformer"]["specializations"]["batch_size"] + + # Step 3: Encode prompts with both text encoders + # Use pipeline's encode_prompt method + (t5_qaic_prompt_embeds, clip_qaic_pooled_prompt_embeds, text_ids, text_encoder_perf) = pipeline.encode_prompt( + prompt=prompt, + prompt_2=prompt_2, + prompt_embeds=prompt_embeds, + pooled_prompt_embeds=pooled_prompt_embeds, + num_images_per_prompt=num_images_per_prompt, + max_sequence_length=max_sequence_length, + ) + + (t5_torch_prompt_embeds, clip_torch_pooled_prompt_embeds, text_ids) = pytorch_pipeline.encode_prompt( + prompt=prompt, + prompt_2=prompt_2, + prompt_embeds=prompt_embeds, + pooled_prompt_embeds=pooled_prompt_embeds, + num_images_per_prompt=num_images_per_prompt, + max_sequence_length=max_sequence_length, + ) + # Deactivate text encoder qpc sessions + pipeline.text_encoder.qpc_session.deactivate() + pipeline.text_encoder_2.qpc_session.deactivate() + + # MAD Validation for Text Encoders + print("šŸ” Performing MAD validation for text encoders...") + mad_validator.validate_module_mad( + clip_qaic_pooled_prompt_embeds, clip_torch_pooled_prompt_embeds, module_name="clip_text_encoder" + ) + mad_validator.validate_module_mad(t5_torch_prompt_embeds, t5_qaic_prompt_embeds, "t5_text_encoder") + + # Step 4: Prepare timesteps for denoising + timesteps, num_inference_steps = retrieve_timesteps(pipeline.scheduler, num_inference_steps, device, timesteps) + num_warmup_steps = max(len(timesteps) - num_inference_steps * pipeline.scheduler.order, 0) + pipeline._num_timesteps = len(timesteps) + + # Step 5: Prepare initial latents + num_channels_latents = pipeline.transformer.model.config.in_channels // 4 + latents, latent_image_ids = pipeline.model.prepare_latents( + batch_size * num_images_per_prompt, + num_channels_latents, + height, + width, + t5_qaic_prompt_embeds.dtype, + device, + generator, + latents, + ) + + # Step 6: Initialize transformer inference session + if pipeline.transformer.qpc_session is None: + pipeline.transformer.qpc_session = QAICInferenceSession( + str(pipeline.transformer.qpc_path), device_ids=pipeline.transformer.device_ids + ) + + # Calculate compressed latent dimension (cl) for transformer buffer allocation + from QEfficient.diffusers.pipelines.pipeline_utils import calculate_compressed_latent_dimension + + cl, _, _ = calculate_compressed_latent_dimension(height, width, pipeline.model.vae_scale_factor) + + # Allocate output buffer for transformer + output_buffer = { + "output": np.random.rand(batch_size, cl, pipeline.transformer.model.config.in_channels).astype(np.float32), + } + pipeline.transformer.qpc_session.set_buffers(output_buffer) + + transformer_perf = [] + pipeline.scheduler.set_begin_index(0) + + # Step 7: Denoising loop + with pipeline.model.progress_bar(total=num_inference_steps) as progress_bar: + for i, t in enumerate(timesteps): + if pipeline._interrupt: + continue + + # Prepare timestep embedding + timestep = t.expand(latents.shape[0]).to(latents.dtype) + temb = pipeline.transformer.model.time_text_embed(timestep, clip_qaic_pooled_prompt_embeds) + + # Compute AdaLN embeddings for dual transformer blocks + adaln_emb = [] + for block_idx in range(len(pipeline.transformer.model.transformer_blocks)): + block = pipeline.transformer.model.transformer_blocks[block_idx] + f1 = block.norm1.linear(block.norm1.silu(temb)).chunk(6, dim=1) + f2 = block.norm1_context.linear(block.norm1_context.silu(temb)).chunk(6, dim=1) + adaln_emb.append(torch.cat(list(f1) + list(f2))) + adaln_dual_emb = torch.stack(adaln_emb) + + # Compute AdaLN embeddings for single transformer blocks + adaln_emb = [] + for block_idx in range(len(pipeline.transformer.model.single_transformer_blocks)): + block = pipeline.transformer.model.single_transformer_blocks[block_idx] + f1 = block.norm.linear(block.norm.silu(temb)).chunk(3, dim=1) + adaln_emb.append(torch.cat(list(f1))) + adaln_single_emb = torch.stack(adaln_emb) + + # Compute output AdaLN embedding + temp = pipeline.transformer.model.norm_out + adaln_out = temp.linear(temp.silu(temb)) + + # Normalize timestep to [0, 1] range + timestep = timestep / 1000 + + # Prepare all inputs for transformer inference + inputs_aic = { + "hidden_states": latents.detach().numpy(), + "encoder_hidden_states": t5_qaic_prompt_embeds.detach().numpy(), + "pooled_projections": clip_qaic_pooled_prompt_embeds.detach().numpy(), + "timestep": timestep.detach().numpy(), + "img_ids": latent_image_ids.detach().numpy(), + "txt_ids": text_ids.detach().numpy(), + "adaln_emb": adaln_dual_emb.detach().numpy(), + "adaln_single_emb": adaln_single_emb.detach().numpy(), + "adaln_out": adaln_out.detach().numpy(), + } + + # MAD Validation for Transformer - PyTorch reference inference + noise_pred_torch = pytorch_pipeline.transformer( + hidden_states=latents, + encoder_hidden_states=t5_torch_prompt_embeds, + pooled_projections=clip_torch_pooled_prompt_embeds, + timestep=torch.tensor(timestep), + img_ids=latent_image_ids, + txt_ids=text_ids, + return_dict=False, + )[0] + + # Run transformer inference and measure time + start_transformer_step_time = time.time() + outputs = pipeline.transformer.qpc_session.run(inputs_aic) + end_transformer_step_time = time.time() + transformer_perf.append(end_transformer_step_time - start_transformer_step_time) + + noise_pred = torch.from_numpy(outputs["output"]) + + # Transformer MAD validation + mad_validator.validate_module_mad( + noise_pred_torch.detach().cpu().numpy(), + outputs["output"], + "transformer", + f"step {i} (t={t.item():.1f})", + ) + + # Update latents using scheduler + latents_dtype = latents.dtype + latents = pipeline.scheduler.step(noise_pred, t, latents, return_dict=False)[0] + + # Handle dtype mismatch + if latents.dtype != latents_dtype: + if torch.backends.mps.is_available(): + latents = latents.to(latents_dtype) + + # Update progress bar + if i == len(timesteps) - 1 or ((i + 1) > num_warmup_steps and (i + 1) % pipeline.scheduler.order == 0): + progress_bar.update() + + # Step 8: Decode latents to images + if output_type == "latent": + image = latents + vae_decode_perf = 0.0 # No VAE decoding for latent output + else: + # Unpack and denormalize latents + latents = pipeline.model._unpack_latents(latents, height, width, pipeline.model.vae_scale_factor) + + # Denormalize latents + latents = (latents / pipeline.vae_decode.model.scaling_factor) + pipeline.vae_decode.model.shift_factor + # Initialize VAE decoder inference session + if pipeline.vae_decode.qpc_session is None: + pipeline.vae_decode.qpc_session = QAICInferenceSession( + str(pipeline.vae_decode.qpc_path), device_ids=pipeline.vae_decode.device_ids + ) + + # Allocate output buffer for VAE decoder + output_buffer = {"sample": np.random.rand(batch_size, 3, height, width).astype(np.float32)} + pipeline.vae_decode.qpc_session.set_buffers(output_buffer) + + # MAD Validation for VAE + # PyTorch reference inference + image_torch = pytorch_pipeline.vae.decode(latents, return_dict=False)[0] + + # Run VAE decoder inference and measure time + inputs = {"latent_sample": latents.numpy()} + start_decode_time = time.time() + image = pipeline.vae_decode.qpc_session.run(inputs) + end_decode_time = time.time() + vae_decode_perf = end_decode_time - start_decode_time + + # VAE MAD validation + mad_validator.validate_module_mad(image_torch.detach().cpu().numpy(), image["sample"], "vae_decoder") + + # Post-process image + image_tensor = torch.from_numpy(image["sample"]) + image = pipeline.model.image_processor.postprocess(image_tensor, output_type=output_type) + + # Build performance metrics + perf_metrics = [ + ModulePerf(module_name="text_encoder", perf=text_encoder_perf[0]), + ModulePerf(module_name="text_encoder_2", perf=text_encoder_perf[1]), + ModulePerf(module_name="transformer", perf=transformer_perf), + ModulePerf(module_name="vae_decoder", perf=vae_decode_perf), + ] + + return QEffPipelineOutput( + pipeline_module=perf_metrics, + images=image, + ) + + +@pytest.fixture(scope="session") +def flux_pipeline(): + """Setup compiled Flux pipeline for testing""" + config = INITIAL_TEST_CONFIG["model_setup"] + + pipeline = QEffFluxPipeline.from_pretrained( + "black-forest-labs/FLUX.1-schnell", + use_onnx_subfunctions=config["use_onnx_subfunctions"], + ) + + # Reduce to 2 layers for testing + original_blocks = pipeline.transformer.model.transformer_blocks + org_single_blocks = pipeline.transformer.model.single_transformer_blocks + + pipeline.transformer.model.config["num_layers"] = config["num_transformer_layers"] + pipeline.transformer.model.config["num_single_layers"] = config["num_single_layers"] + pipeline.transformer.model.transformer_blocks = torch.nn.ModuleList( + [original_blocks[i] for i in range(0, pipeline.transformer.model.config["num_layers"])] + ) + pipeline.transformer.model.single_transformer_blocks = torch.nn.ModuleList( + [org_single_blocks[i] for i in range(0, pipeline.transformer.model.config["num_single_layers"])] + ) + + ### Pytorch pipeline + pytorch_pipeline = FluxPipeline.from_pretrained("black-forest-labs/FLUX.1-schnell") + original_blocks_pt = pytorch_pipeline.transformer.transformer_blocks + org_single_blocks_pt = pytorch_pipeline.transformer.single_transformer_blocks + pytorch_pipeline.transformer.transformer_blocks = torch.nn.ModuleList( + [original_blocks_pt[i] for i in range(0, pipeline.transformer.model.config["num_layers"])] + ) + pytorch_pipeline.transformer.single_transformer_blocks = torch.nn.ModuleList( + [org_single_blocks_pt[i] for i in range(0, pipeline.transformer.model.config["num_single_layers"])] + ) + return pipeline, pytorch_pipeline + + +@pytest.mark.diffusion_models +@pytest.mark.on_qaic +def test_flux_pipeline(flux_pipeline): + """ + Comprehensive Flux pipeline test that follows the exact same flow as pipeline_flux.py: + - 256x256 resolution - 2 transformer layers + - MAD validation + - Functional image generation test + - Export/compilation checks + - Returns QEffPipelineOutput with performance metrics + """ + pipeline, pytorch_pipeline = flux_pipeline + config = INITIAL_TEST_CONFIG + + # Print test header + DiffusersTestUtils.print_test_header( + f"FLUX PIPELINE TEST - {config['model_setup']['height']}x{config['model_setup']['width']} Resolution, {config['model_setup']['num_transformer_layers']} Layers", + config, + ) + + # Test parameters + test_prompt = config["pipeline_params"]["test_prompt"] + num_inference_steps = config["pipeline_params"]["num_inference_steps"] + guidance_scale = config["pipeline_params"]["guidance_scale"] + max_sequence_length = config["pipeline_params"]["max_sequence_length"] + + # Generate with MAD validation + generator = torch.manual_seed(42) + start_time = time.time() + + try: + # Run the pipeline with integrated MAD validation (follows exact pipeline flow) + result = flux_pipeline_call_with_mad_validation( + pipeline, + pytorch_pipeline, + height=config["model_setup"]["height"], + width=config["model_setup"]["width"], + prompt=test_prompt, + guidance_scale=guidance_scale, + num_inference_steps=num_inference_steps, + max_sequence_length=max_sequence_length, + custom_config_path=CONFIG_PATH, + generator=generator, + mad_tolerances=config["mad_validation"]["tolerances"], + parallel_compile=True, + return_dict=True, + ) + + execution_time = time.time() - start_time + + # Validate image generation + if config["pipeline_params"]["validate_gen_img"]: + assert result is not None, "Pipeline returned None" + assert hasattr(result, "images"), "Result missing 'images' attribute" + assert len(result.images) > 0, "No images generated" + + generated_image = result.images[0] + expected_size = (config["model_setup"]["height"], config["model_setup"]["width"]) + # Validate image properties using utilities + image_validation = DiffusersTestUtils.validate_image_generation( + generated_image, expected_size, config["pipeline_params"]["min_image_variance"] + ) + + print("\nāœ… IMAGE VALIDATION PASSED") + print(f" - Size: {image_validation['size']}") + print(f" - Mode: {image_validation['mode']}") + print(f" - Variance: {image_validation['variance']:.2f}") + print(f" - Mean pixel value: {image_validation['mean_pixel_value']:.2f}") + file_path = "test_flux_256x256_2layers.png" + # Save test image + generated_image.save(file_path) + + if os.path.exists(file_path): + print(f"Image saved successfully at: {file_path}") + else: + print("Image was not saved.") + + if config["validation_checks"]["onnx_export"]: + # Check if ONNX files exist (basic check) + print("\nšŸ” ONNX Export Validation:") + for module_name in ["text_encoder", "text_encoder_2", "transformer", "vae_decode"]: + module_obj = getattr(pipeline, module_name, None) + if module_obj and hasattr(module_obj, "onnx_path") and module_obj.onnx_path: + DiffusersTestUtils.check_file_exists(str(module_obj.onnx_path), f"{module_name} ONNX") + + if config["validation_checks"]["compilation"]: + # Check if QPC files exist (basic check) + print("\nšŸ” Compilation Validation:") + for module_name in ["text_encoder", "text_encoder_2", "transformer", "vae_decode"]: + module_obj = getattr(pipeline, module_name, None) + if module_obj and hasattr(module_obj, "qpc_path") and module_obj.qpc_path: + DiffusersTestUtils.check_file_exists(str(module_obj.qpc_path), f"{module_name} QPC") + + # Print test summary using utilities + print(f"\nTotal execution time: {execution_time:.4f}s") + except Exception as e: + print(f"\nTEST FAILED: {e}") + raise + + +if __name__ == "__main__": + # This allows running the test file directly for debugging + pytest.main([__file__, "-v", "-s", "-m", "flux"]) +# pytest tests/diffusers/test_flux.py -m flux -v -s --tb=short diff --git a/tests/transformers/test_causal_lm.py b/tests/transformers/test_causal_lm.py index 0810ac6ba..3eaaf0f69 100644 --- a/tests/transformers/test_causal_lm.py +++ b/tests/transformers/test_causal_lm.py @@ -211,7 +211,7 @@ def test_causal_lm_hash_creation(config, cb, tmp_path): @pytest.fixture def tmp_cache(tmp_path, monkeypatch): - monkeypatch.setattr("QEfficient.utils._utils.QEFF_HOME", tmp_path) + monkeypatch.setattr("QEfficient.utils.export_utils.QEFF_HOME", tmp_path) yield tmp_path diff --git a/tests/transformers/test_speech_seq2seq.py b/tests/transformers/test_speech_seq2seq.py index 59281b73b..bc53cb539 100644 --- a/tests/transformers/test_speech_seq2seq.py +++ b/tests/transformers/test_speech_seq2seq.py @@ -141,7 +141,7 @@ def test_seq2seq_hash_creation(config, tmp_path): @pytest.fixture def tmp_cache(tmp_path, monkeypatch): - monkeypatch.setattr("QEfficient.utils._utils.QEFF_HOME", tmp_path) + monkeypatch.setattr("QEfficient.utils.export_utils.QEFF_HOME", tmp_path) yield tmp_path diff --git a/tests/utils/test_hash_utils.py b/tests/utils/test_hash_utils.py index fefa73973..b7a5495c6 100644 --- a/tests/utils/test_hash_utils.py +++ b/tests/utils/test_hash_utils.py @@ -41,7 +41,7 @@ def test_to_hashable_float_nan(value): def test_json_serializable(): # Test with a set - assert json_serializable({1, 2, 3}) == [1, 2, 3] + assert json_serializable({1, 2, 3}) == ["1", "2", "3"] # Test with an unsupported type with pytest.raises(TypeError): json_serializable({1, 2, 3, {4, 5}}) From 2f8c7af0e6f70a1a34b3412072c3643a7cca8bf0 Mon Sep 17 00:00:00 2001 From: Abhishek Kumar Singh Date: Wed, 10 Dec 2025 14:50:47 +0530 Subject: [PATCH 45/60] Subfunction fixes for KV cache transform (#655) Signed-off-by: abhishek-singh591 Signed-off-by: Dhiraj Kumar Sah --- QEfficient/base/onnx_transforms.py | 6 ++++++ 1 file changed, 6 insertions(+) diff --git a/QEfficient/base/onnx_transforms.py b/QEfficient/base/onnx_transforms.py index 945850c50..bdf7bf677 100644 --- a/QEfficient/base/onnx_transforms.py +++ b/QEfficient/base/onnx_transforms.py @@ -19,16 +19,20 @@ from QEfficient.customop.ctx_scatter_gather import ( CtxGather, CtxGather3D, + CtxGatherBlockedKV, CtxGatherFunc, CtxGatherFunc3D, + CtxGatherFuncBlockedKV, CtxScatter, CtxScatter3D, CtxScatterFunc, CtxScatterFunc3D, ) from QEfficient.customop.ctx_scatter_gather_cb import ( + CtxGatherBlockedKVCB, CtxGatherCB, CtxGatherCB3D, + CtxGatherFuncBlockedKVCB, CtxGatherFuncCB, CtxGatherFuncCB3D, CtxScatterCB, @@ -95,6 +99,8 @@ class CustomOpTransform(BaseOnnxTransform): "CtxScatterFuncCB3D": (CtxScatterFuncCB3D, CtxScatterCB3D), "CtxGatherFuncCB": (CtxGatherFuncCB, CtxGatherCB), "CtxGatherFuncCB3D": (CtxGatherFuncCB3D, CtxGatherCB3D), + "CtxGatherFuncBlockedKV": (CtxGatherFuncBlockedKV, CtxGatherBlockedKV), + "CtxGatherFuncBlockedKVCB": (CtxGatherFuncBlockedKVCB, CtxGatherBlockedKVCB), } @classmethod From 28a4b663fa8a9bee1ed067ebcc9c5f5b33127ede Mon Sep 17 00:00:00 2001 From: Abukhoyer Shaik Date: Thu, 11 Dec 2025 15:20:21 +0530 Subject: [PATCH 46/60] [Test]: subfunction test moved to qaic Test Stage (#665) Signed-off-by: Abukhoyer Shaik Signed-off-by: Dhiraj Kumar Sah --- tests/transformers/test_subfunction.py | 1 + 1 file changed, 1 insertion(+) diff --git a/tests/transformers/test_subfunction.py b/tests/transformers/test_subfunction.py index 36cfc0ce5..6183e1282 100644 --- a/tests/transformers/test_subfunction.py +++ b/tests/transformers/test_subfunction.py @@ -44,6 +44,7 @@ config_ids = [x.model_type for x in configs] +@pytest.mark.on_qaic @pytest.mark.parametrize("config", configs, ids=config_ids) def test_subfunction_vs_nonsubfunction(config, tmp_path): tokenizer = AutoTokenizer.from_pretrained(config.model_type) From d6070aaeefabeab67738061773f40041fb1c80bf Mon Sep 17 00:00:00 2001 From: Onkar Chougule <168134249+ochougul@users.noreply.github.com> Date: Sun, 14 Dec 2025 20:10:56 +0530 Subject: [PATCH 47/60] Prefill+decode gpt oss (#608) # We should be using disaggragate serving for GPTOSS model for best performance - GPT-OSS model has 128/4 for 120b and 32/4 ratio of total_experts/experts_per_tok - We use read all experts only once always strategy in prefill-only model - And we treat weights activtions meaning read only chosen experts for decode-only model # Prefill-only model ## Blocking default behviour when `prefill_only=True` in compile API - NUM_Q_BLOCKS= set number of Q blocks in attention - NUM_FFN_BLOCKS= set number of blocks in FFN - ENABLE_OPT_SWA=0 or 1 to enable/disable optimized SWA. when enabled we will be using only valid KVs for given block in Attention reducing MACs - prefix_caching is not supported with this mode ## Chunking pass `enable_chunking=True` and `prefill_only=True` in compile API - Optimized SWA i.e. reading only valid KV as per diagonal attention mask is enabled for this version by default - This model can be used for prefix_caching by passing `kv_cache_batch_size=` in compile API # Decode-only model ## Retain Sliding window length of KV for sliding window layers, default behavour when `prefill_seq_len=1` in compile API - This reduces the amount of DDR used by the model - CB is enabled for this version pass `continous_batching=True` in `from_pretrained` call and strictly pass `full_batch_size=` and optinally `kv_cache_batch_size=` if needed ## Full KV for sliding window layers pass `retain_full_kv=True` along with `prefill_seq_len=1` in compile API - This uses higher DDR as we are retaining ctx_len KV even for sliding window layers but will be reading only sliding window len kv in attention - CB is enabled for this version pass `continous_batching=True` in `from_pretrained` call and strictly pass `full_batch_size=` and optinally `kv_cache_batch_size=` if needed - This is enabled for the usecase of multi-turn chat, where we will be running prefill-> decode and then use cache of prefill as well as decode combined to again run prefill, so we want to retain full KV for sliding window layers NOTE: * decode-only model currently fails compilation with `use_onnx_subfunctions=True` so avoid using it * 120B model needs NPI, there are two versions of NPI one with and without subfunction both are uploaded here, pass it as `node_precision_info=` * It is advised to use `use_onnx_subfunctions=True` with prefill-only model, otherwise the compilation times are too high, with this the model is supposed to export and fail during compile as it needs assert sdk, so user is supposed to run this compilation manually by pasting the command printed in the error --------- Signed-off-by: vbaddi Signed-off-by: Onkar Chougule Signed-off-by: Mamta Singh Signed-off-by: Onkar Chougule <168134249+ochougul@users.noreply.github.com> Co-authored-by: Vinayak Baddi Co-authored-by: Vinayak Baddi Co-authored-by: Mamta Singh Co-authored-by: Mamta Singh <168400541+quic-mamta@users.noreply.github.com> Signed-off-by: Dhiraj Kumar Sah --- QEfficient/__init__.py | 23 +- QEfficient/base/modeling_qeff.py | 82 ++- QEfficient/base/onnx_transforms.py | 4 +- QEfficient/customop/ctx_scatter_gather.py | 1 + QEfficient/customop/ctx_scatter_gather_cb.py | 1 + .../diffusers/pipelines/pipeline_module.py | 16 +- QEfficient/peft/auto.py | 6 +- QEfficient/peft/lora/auto.py | 4 +- QEfficient/transformers/cache_utils.py | 121 ++++ QEfficient/transformers/modeling_utils.py | 3 + .../models/gpt_oss/modeling_gpt_oss.py | 626 +++++++++++++++++- .../transformers/models/modeling_auto.py | 213 ++++-- .../transformers/models/pytorch_transforms.py | 38 ++ .../transformers/quantizers/__init__.py | 4 +- QEfficient/utils/export_utils.py | 60 +- QEfficient/utils/hash_utils.py | 3 - examples/disagg_serving/README.md | 31 + .../disagg_serving/gpt_oss_disagg_mode.py | 137 ++++ .../disagg_serving/subfunction_120b_npi.yaml | 27 + .../without_subfunc_npi_120b.yaml | 148 +++++ examples/gpt_oss_disagg_mode_with_chunking.py | 137 ++++ scripts/Jenkinsfile | 2 +- tests/peft/lora/test_lora_model.py | 4 +- tests/peft/test_peft_model.py | 6 +- tests/transformers/models/test_disagg_mode.py | 192 ++++++ tests/transformers/test_causal_lm.py | 90 ++- 26 files changed, 1805 insertions(+), 174 deletions(-) create mode 100644 examples/disagg_serving/README.md create mode 100644 examples/disagg_serving/gpt_oss_disagg_mode.py create mode 100644 examples/disagg_serving/subfunction_120b_npi.yaml create mode 100644 examples/disagg_serving/without_subfunc_npi_120b.yaml create mode 100644 examples/gpt_oss_disagg_mode_with_chunking.py create mode 100644 tests/transformers/models/test_disagg_mode.py diff --git a/QEfficient/__init__.py b/QEfficient/__init__.py index 2d8f72e0a..b507363c3 100644 --- a/QEfficient/__init__.py +++ b/QEfficient/__init__.py @@ -6,7 +6,17 @@ # ----------------------------------------------------------------------------- import os -import warnings + +# ----------------------------------------------------------------------------- # +# For faster downloads via hf_transfer +# This code is put above import statements as this needs to be executed before +# hf_transfer is imported (will happen on line 15 via leading imports) +os.environ["HF_HUB_ENABLE_HF_TRANSFER"] = "1" +# DO NOT ADD ANY CODE ABOVE THIS LINE +# Please contact maintainers if you must edit this file above this line. +# ----------------------------------------------------------------------------- # +# Placeholder for all non-transformer models registered in QEfficient +import warnings # noqa: I001 import QEfficient.utils.model_registery # noqa: F401 from QEfficient.base import ( @@ -26,6 +36,10 @@ from QEfficient.utils import custom_format_warning from QEfficient.utils.logging_utils import logger +# custom warning for the better logging experience +warnings.formatwarning = custom_format_warning + + # Users can use QEfficient.export for exporting models to ONNX export = qualcomm_efficient_converter __all__ = [ @@ -42,14 +56,7 @@ "QEFFCommonLoader", "QEffFluxPipeline", ] -# For faster downloads via hf_transfer -# This code is put above import statements as this needs to be executed before -# hf_transfer is imported (will happen on line 15 via leading imports) -os.environ["HF_HUB_ENABLE_HF_TRANSFER"] = "1" -# Placeholder for all non-transformer models registered in QEfficient -# custom warning for the better logging experience -warnings.formatwarning = custom_format_warning # Conditionally import QAIC-related modules if the SDK is installed __version__ = "0.0.1.dev0" diff --git a/QEfficient/base/modeling_qeff.py b/QEfficient/base/modeling_qeff.py index ea347016b..2c98a83f3 100644 --- a/QEfficient/base/modeling_qeff.py +++ b/QEfficient/base/modeling_qeff.py @@ -60,6 +60,7 @@ def __init__(self, model: torch.nn.Module, **kwargs) -> None: super().__init__() self.model = model self.hash_params = create_model_params(self, **kwargs) + self.prefill_onnx_path: Optional[str] = None self.onnx_path: Optional[str] = None self.qpc_path: Optional[str] = None self.qpc_session: Optional[QAICInferenceSession] = None @@ -204,10 +205,11 @@ def _export( example_inputs: Dict[str, torch.Tensor], output_names: List[str], dynamic_axes: Dict[str, Dict[int, str]], - export_kwargs: Optional[Dict[str, any]] = None, onnx_transform_kwargs: Optional[Dict[str, any]] = None, export_dir: Optional[str] = None, offload_pt_weights: bool = True, + prefill_only: Optional[bool] = False, + **export_kwargs, ) -> str: """ Export the PyTorch model to ONNX and apply ONNX transforms @@ -232,11 +234,16 @@ def _export( instance using from_pretrained() for re-export. """ + # TODO: Hack for retain_full_kv, handle this outside + export_kwargs.pop("retain_full_kv", None) onnx_path = export_dir / f"{self.model_name}.onnx" # Return early if ONNX already exists if onnx_path.is_file(): - self.onnx_path = onnx_path + if prefill_only: + self.prefill_onnx_path = onnx_path + else: + self.onnx_path = onnx_path return onnx_path # check if the model is in meta state or weights are offloaded @@ -272,9 +279,6 @@ def _export( input_names.append(param) try: - # Export to ONNX - export_kwargs = {} if export_kwargs is None else export_kwargs - torch.onnx.export( self.model, (example_inputs,), @@ -318,9 +322,42 @@ def _export( finally: shutil.rmtree(tmp_onnx_dir, ignore_errors=True) - self.onnx_path = onnx_path + if prefill_only: + self.prefill_onnx_path = onnx_path + else: + self.onnx_path = onnx_path return onnx_path + def get_onnx_path( + self, + prefill_only: Optional[bool] = False, + enable_chunking: Optional[bool] = False, + specializations: Optional[List[Dict[str, int]]] = None, + offload_pt_weights: Optional[bool] = True, + use_onnx_subfunctions: Optional[bool] = False, + retain_full_kv: Optional[bool] = False, + ): + kwargs = { + "offload_pt_weights": offload_pt_weights, + "use_onnx_subfunctions": use_onnx_subfunctions, + "retain_full_kv": retain_full_kv, + } + if prefill_only: + if self.prefill_onnx_path is None: + kwargs.update( + { + "prefill_only": prefill_only, + "prefill_seq_len": specializations[0].get("seq_len"), + "enable_chunking": enable_chunking, + } + ) + self.export(**kwargs) + return self.prefill_onnx_path + else: + if self.onnx_path is None: + self.export(**kwargs) + return self.onnx_path + @dump_qconfig def _compile( self, @@ -335,6 +372,10 @@ def _compile( enable_qnn: Optional[bool] = False, qnn_config: Optional[str] = None, use_onnx_subfunctions: bool = False, + prefill_only: Optional[str] = None, + offload_pt_weights: Optional[bool] = True, + enable_chunking: Optional[bool] = False, + retain_full_kv: Optional[bool] = None, **compiler_options, ) -> str: """ @@ -360,11 +401,18 @@ def _compile( For QNN Compilation path, when enable_qnn is set to True, any parameter passed in compiler_options will be ignored. """ - - if onnx_path is None and self.onnx_path is None: - self.export(use_onnx_subfunctions=use_onnx_subfunctions) - - onnx_path = Path(onnx_path or self.onnx_path) + onnx_path = Path( + onnx_path + if onnx_path + else self.get_onnx_path( + prefill_only, + enable_chunking, + specializations, + offload_pt_weights, + use_onnx_subfunctions, + retain_full_kv, + ) + ) compile_dir = Path(compile_dir or onnx_path.parent) qpc_path = compile_dir / "qpc" if not onnx_path.is_file(): @@ -426,6 +474,7 @@ def _compile( "mdp_ts_num_devices": mdp_ts_num_devices, "mdp_ts_json": mdp_ts_json, "num_speculative_tokens": num_speculative_tokens, + "prefill_only": prefill_only, } compile_hash = hash_dict_params(compile_hash_params) @@ -465,6 +514,16 @@ def _compile( command.append(f"-aic-binary-dir={qpc_path}") logger.info(f"Running compiler: {' '.join(command)}") + if use_onnx_subfunctions: + + class FeatureNotAvailableError(Exception): + pass + + exec_command = f'QAIC_COMPILER_OPTS_UNSUPPORTED="-loader-inline-all=0" {" ".join(command)}' + raise FeatureNotAvailableError( + "ONNX graph is exported with subfunctions, assert version of apps SDK should be used for compiling this model." + + f"\nRun following command manually with assert compiler:\n{exec_command}" + ) try: subprocess.run(command, capture_output=True, check=True) except subprocess.CalledProcessError as e: @@ -485,5 +544,4 @@ def _compile( logger.info("Hashed parameters exported successfully.") self.qpc_path = qpc_path - return qpc_path diff --git a/QEfficient/base/onnx_transforms.py b/QEfficient/base/onnx_transforms.py index bdf7bf677..16697cec9 100644 --- a/QEfficient/base/onnx_transforms.py +++ b/QEfficient/base/onnx_transforms.py @@ -95,12 +95,12 @@ class CustomOpTransform(BaseOnnxTransform): "CtxScatterFunc3D": (CtxScatterFunc3D, CtxScatter3D), "CtxGatherFunc": (CtxGatherFunc, CtxGather), "CtxGatherFunc3D": (CtxGatherFunc3D, CtxGather3D), - "CtxScatterFuncCB": (CtxScatterFuncCB, CtxScatterCB), "CtxScatterFuncCB3D": (CtxScatterFuncCB3D, CtxScatterCB3D), - "CtxGatherFuncCB": (CtxGatherFuncCB, CtxGatherCB), "CtxGatherFuncCB3D": (CtxGatherFuncCB3D, CtxGatherCB3D), "CtxGatherFuncBlockedKV": (CtxGatherFuncBlockedKV, CtxGatherBlockedKV), "CtxGatherFuncBlockedKVCB": (CtxGatherFuncBlockedKVCB, CtxGatherBlockedKVCB), + "CtxScatterFuncCB": (CtxScatterFuncCB, CtxScatterCB), + "CtxGatherFuncCB": (CtxGatherFuncCB, CtxGatherCB), } @classmethod diff --git a/QEfficient/customop/ctx_scatter_gather.py b/QEfficient/customop/ctx_scatter_gather.py index c7dc8639a..7b15effe7 100644 --- a/QEfficient/customop/ctx_scatter_gather.py +++ b/QEfficient/customop/ctx_scatter_gather.py @@ -136,6 +136,7 @@ class CtxGatherFunc(torch.autograd.Function): def forward(data: torch.Tensor, ctx_indices: torch.Tensor, comp_ctx_len: int): batch_indices = torch.arange(data.shape[0]).view(-1, 1, 1) head_indices = torch.arange(data.shape[1]).view(1, -1, 1) + ctx_indices = torch.where(ctx_indices == torch.iinfo(torch.int32).max, 0, ctx_indices) return data[batch_indices, head_indices, ctx_indices] @staticmethod diff --git a/QEfficient/customop/ctx_scatter_gather_cb.py b/QEfficient/customop/ctx_scatter_gather_cb.py index 8a06bc2b1..c15b60810 100644 --- a/QEfficient/customop/ctx_scatter_gather_cb.py +++ b/QEfficient/customop/ctx_scatter_gather_cb.py @@ -126,6 +126,7 @@ class CtxGatherFuncCB(torch.autograd.Function): def forward(data: torch.Tensor, batch_index: torch.Tensor, ctx_indices: torch.Tensor, comp_ctx_len: int): batch_indices = batch_index.view(-1, 1, 1) head_indices = torch.arange(data.shape[1]).view(1, -1, 1) + ctx_indices = torch.where(ctx_indices >= data.shape[2], 0, ctx_indices) return data[batch_indices, head_indices, ctx_indices] @staticmethod diff --git a/QEfficient/diffusers/pipelines/pipeline_module.py b/QEfficient/diffusers/pipelines/pipeline_module.py index 41a3d29f7..6d9243fdc 100644 --- a/QEfficient/diffusers/pipelines/pipeline_module.py +++ b/QEfficient/diffusers/pipelines/pipeline_module.py @@ -102,7 +102,7 @@ def export( output_names: List[str], dynamic_axes: Dict, export_dir: str = None, - export_kwargs: Dict = None, + export_kwargs: Dict = {}, ) -> str: """ Export the text encoder model to ONNX format. @@ -122,7 +122,7 @@ def export( output_names=output_names, dynamic_axes=dynamic_axes, export_dir=export_dir, - export_kwargs=export_kwargs, + **export_kwargs, ) def compile(self, specializations: List[Dict], **compiler_options) -> None: @@ -179,7 +179,7 @@ def export( output_names: List[str], dynamic_axes: Dict, export_dir: str = None, - export_kwargs: Dict = None, + export_kwargs: Dict = {}, ) -> str: """ Export the UNet model to ONNX format. @@ -199,7 +199,7 @@ def export( output_names=output_names, dynamic_axes=dynamic_axes, export_dir=export_dir, - export_kwargs=export_kwargs, + **export_kwargs, ) def compile(self, specializations: List[Dict], **compiler_options) -> None: @@ -292,7 +292,7 @@ def export( output_names: List[str], dynamic_axes: Dict, export_dir: str = None, - export_kwargs: Dict = None, + export_kwargs: Dict = {}, ) -> str: """ Export the VAE model to ONNX format. @@ -312,7 +312,7 @@ def export( output_names=output_names, dynamic_axes=dynamic_axes, export_dir=export_dir, - export_kwargs=export_kwargs, + **export_kwargs, ) def compile(self, specializations: List[Dict], **compiler_options) -> None: @@ -438,7 +438,7 @@ def export( output_names: List[str], dynamic_axes: Dict, export_dir: str = None, - export_kwargs: Dict = None, + export_kwargs: Dict = {}, use_onnx_subfunctions: bool = False, ) -> str: """ @@ -466,8 +466,8 @@ def export( output_names=output_names, dynamic_axes=dynamic_axes, export_dir=export_dir, - export_kwargs=export_kwargs, offload_pt_weights=False, # As weights are needed with AdaLN changes + **export_kwargs, ) def compile(self, specializations: List[Dict], **compiler_options) -> None: diff --git a/QEfficient/peft/auto.py b/QEfficient/peft/auto.py index e69aebb2b..6c7173072 100644 --- a/QEfficient/peft/auto.py +++ b/QEfficient/peft/auto.py @@ -253,7 +253,7 @@ def from_pretrained(cls, pretrained_name_or_path: str, *args, **kwargs): obj = cls._from_pretrained(pretrained_name_or_path, *args, **kwargs) return obj - def export(self, export_dir: Optional[str] = None, use_onnx_subfunctions: bool = False) -> str: + def export(self, export_dir: Optional[str] = None, **kwargs) -> str: """ Export the model with the active adapter to ONNX format. @@ -291,10 +291,10 @@ def export(self, export_dir: Optional[str] = None, use_onnx_subfunctions: bool = example_inputs, output_names, dynamic_axes, - export_kwargs={"do_constant_folding": False}, # To avoid merging adapter weights with base weights + do_constant_folding=False, # To avoid merging adapter weights with base weights onnx_transform_kwargs={"adapter_name": self.model.active_adapter}, export_dir=export_dir, - use_onnx_subfunctions=use_onnx_subfunctions, + **kwargs, ) def compile( diff --git a/QEfficient/peft/lora/auto.py b/QEfficient/peft/lora/auto.py index 64fa3f61c..8ff8335f5 100644 --- a/QEfficient/peft/lora/auto.py +++ b/QEfficient/peft/lora/auto.py @@ -327,7 +327,7 @@ def _init_adapter_model(self): # load_weight to model self._load_adapter_weights_to_model() - def export(self, export_dir: Optional[str] = None, use_onnx_subfunctions: bool = False) -> str: + def export(self, export_dir: Optional[str] = None, **kwargs) -> str: """ Export the model with all loaded adapters to ONNX format using ``torch.onnx.export``. @@ -387,7 +387,7 @@ def export(self, export_dir: Optional[str] = None, use_onnx_subfunctions: bool = output_names, dynamic_axes, export_dir=export_dir, - use_onnx_subfunctions=use_onnx_subfunctions, + **kwargs, ) def generate( diff --git a/QEfficient/transformers/cache_utils.py b/QEfficient/transformers/cache_utils.py index 62cc71a4c..faadaba6b 100644 --- a/QEfficient/transformers/cache_utils.py +++ b/QEfficient/transformers/cache_utils.py @@ -46,6 +46,7 @@ def _get_invalid_idx_value(cls): """ if torch.onnx.is_in_onnx_export(): if cls.SUBFUNC_ENABLED: + # TODO: should not return 0 remove this if condition, it can hurt perf return 0 else: return torch.iinfo(torch.int32).max @@ -681,6 +682,37 @@ def to_legacy_cache(self) -> Tuple[Tuple[torch.Tensor], Tuple[torch.Tensor]]: legacy_cache += ((self.key_cache[layer_idx], self.value_cache[layer_idx]),) return legacy_cache + def write_only( + self, + key_states: torch.Tensor, + value_states: torch.Tensor, + layer_idx: int, + cache_kwargs: Optional[Dict[str, Any]] = None, + ) -> Tuple[torch.Tensor, torch.Tensor]: + if len(self.key_cache) <= layer_idx: + self.key_cache.append(key_states) + self.value_cache.append(value_states) + k_out, v_out = key_states, value_states + else: + position_ids = cache_kwargs.get("position_ids") + is_sliding_layer = cache_kwargs.get("is_sliding") + _, _, ctx_len, _ = self.key_cache[layer_idx].shape + if is_sliding_layer: + kv_position_ids = torch.arange(ctx_len, dtype=torch.int64).reshape(1, -1) + self.key_cache[layer_idx] = CtxScatterFunc.apply(self.key_cache[layer_idx], kv_position_ids, key_states) + self.value_cache[layer_idx] = CtxScatterFunc.apply( + self.value_cache[layer_idx], kv_position_ids, value_states + ) + else: + kv_position_ids = position_ids + + self.key_cache[layer_idx] = CtxScatterFunc.apply(self.key_cache[layer_idx], kv_position_ids, key_states) + self.value_cache[layer_idx] = CtxScatterFunc.apply( + self.value_cache[layer_idx], kv_position_ids, value_states + ) + k_out, v_out = self.key_cache[layer_idx], self.value_cache[layer_idx] + return k_out, v_out + def update( self, key_states: torch.Tensor, @@ -747,3 +779,92 @@ def update( v_out = torch.where(invalid_mask.unsqueeze(-1), torch.tensor(0.0, dtype=torch.float32), v_out) return k_out, v_out + + def full_cache_update_chunked( + self, + key_states: torch.Tensor, + value_states: torch.Tensor, + layer_idx: int, + cache_kwargs: Optional[Dict[str, Any]] = None, + ) -> Tuple[torch.Tensor, torch.Tensor]: + position_ids = cache_kwargs.get("position_ids") + batch_index = cache_kwargs.get("batch_index") + invalid_idx_value = InvalidIndexProvider._get_invalid_idx_value() + + # Scatter + if batch_index is not None: + if torch.onnx.is_in_onnx_export(): + scatter_position_ids = torch.where(position_ids < 0, torch.iinfo(torch.int32).max, position_ids) + self.key_cache[layer_idx] = CtxScatterFuncCB.apply( + self.key_cache[layer_idx], batch_index, scatter_position_ids, key_states + ) + self.value_cache[layer_idx] = CtxScatterFuncCB.apply( + self.value_cache[layer_idx], batch_index, scatter_position_ids, value_states + ) + else: + self.key_cache[layer_idx] = CtxScatterFunc.apply(self.key_cache[layer_idx], position_ids, key_states) + self.value_cache[layer_idx] = CtxScatterFunc.apply(self.value_cache[layer_idx], position_ids, value_states) + + k_out, v_out = self.key_cache[layer_idx], self.value_cache[layer_idx] + + # Gather + ctx_len = cache_kwargs.get("CCL", k_out.shape[2]) + ctx_indices = torch.arange(ctx_len)[None, None, ...] + gather_limit = position_ids.max(1, keepdim=True).values.unsqueeze(1) + invalid_mask = ctx_indices > gather_limit + ctx_indices = torch.where(invalid_mask, invalid_idx_value, ctx_indices) + if batch_index is not None: + k_out = CtxGatherFuncCB.apply(k_out, batch_index, ctx_indices, ctx_len) + v_out = CtxGatherFuncCB.apply(v_out, batch_index, ctx_indices, ctx_len) + else: + k_out = CtxGatherFunc.apply(k_out, ctx_indices, ctx_len) + v_out = CtxGatherFunc.apply(v_out, ctx_indices, ctx_len) + v_out = torch.where(invalid_mask.unsqueeze(-1), torch.tensor(0.0, dtype=torch.float32), v_out) + + return k_out, v_out + + def sliding_window_update_chunked( + self, + key_states: torch.Tensor, + value_states: torch.Tensor, + layer_idx: int, + cache_kwargs: Optional[Dict[str, Any]] = None, + ) -> Tuple[torch.Tensor, torch.Tensor]: + position_ids = cache_kwargs.get("position_ids") + batch_index = cache_kwargs.get("batch_index") + invalid_idx_value = InvalidIndexProvider._get_invalid_idx_value() + + if batch_index is not None: + if torch.onnx.is_in_onnx_export(): + scatter_position_ids = torch.where(position_ids < 0, torch.iinfo(torch.int32).max, position_ids) + self.key_cache[layer_idx] = CtxScatterFuncCB.apply( + self.key_cache[layer_idx], batch_index, scatter_position_ids, key_states + ) + self.value_cache[layer_idx] = CtxScatterFuncCB.apply( + self.value_cache[layer_idx], batch_index, scatter_position_ids, value_states + ) + else: + self.key_cache[layer_idx] = CtxScatterFunc.apply(self.key_cache[layer_idx], position_ids, key_states) + self.value_cache[layer_idx] = CtxScatterFunc.apply(self.value_cache[layer_idx], position_ids, value_states) + + k_out, v_out = self.key_cache[layer_idx], self.value_cache[layer_idx] + sliding_window_len = cache_kwargs.get("sliding_window") + + # Gather + ctx_len = position_ids.shape[1] + sliding_window_len + ctx_indices = torch.arange(ctx_len)[None, None, ...] + first_pos_idx = position_ids[0][0] + add_idx = torch.where(first_pos_idx >= sliding_window_len, first_pos_idx - sliding_window_len, 0) + ctx_indices += add_idx + gather_limit = position_ids.max(1, keepdim=True).values.unsqueeze(1) + invalid_mask = ctx_indices > gather_limit + ctx_indices = torch.where(invalid_mask, invalid_idx_value, ctx_indices) + if batch_index is not None: + k_out = CtxGatherFuncCB.apply(k_out, batch_index, ctx_indices, ctx_len) + v_out = CtxGatherFuncCB.apply(v_out, batch_index, ctx_indices, ctx_len) + else: + k_out = CtxGatherFunc.apply(k_out, ctx_indices, ctx_len) + v_out = CtxGatherFunc.apply(v_out, ctx_indices, ctx_len) + v_out = torch.where(invalid_mask.unsqueeze(-1), torch.tensor(0.0, dtype=torch.float32), v_out) + + return k_out, v_out diff --git a/QEfficient/transformers/modeling_utils.py b/QEfficient/transformers/modeling_utils.py index 5337b44f5..47059d8dc 100644 --- a/QEfficient/transformers/modeling_utils.py +++ b/QEfficient/transformers/modeling_utils.py @@ -188,6 +188,9 @@ # This is for supporting different seq_len for different layers for Sliding window attn, chunked attn etc. DYNAMIC_SEQ_LEN_SUPPORTED_MODEL_ARCH = {"gemma3", "llama4", "gemma3_text", "llama4_text"} +# This is for supporting different modelling classes specially written for prefill-only model +SPECIALIZED_PREFILL_ONLY_MODEL_ARCH = {"gpt_oss"} + # Define a transformers layers to QEff layers dictionary # While onboarding new models make sure to add the new layer maps to this dictionary. TransformersToQEffModulesDict: Dict[Type[nn.Module], Type[nn.Module]] = { diff --git a/QEfficient/transformers/models/gpt_oss/modeling_gpt_oss.py b/QEfficient/transformers/models/gpt_oss/modeling_gpt_oss.py index 84552aff4..3efe890b8 100644 --- a/QEfficient/transformers/models/gpt_oss/modeling_gpt_oss.py +++ b/QEfficient/transformers/models/gpt_oss/modeling_gpt_oss.py @@ -4,6 +4,8 @@ # SPDX-License-Identifier: BSD-3-Clause # # ----------------------------------------------------------------------------- +import math +import os from typing import Callable, Optional, Union import torch @@ -30,8 +32,8 @@ from QEfficient.transformers.cache_utils import QEffHybridCacheForGPTOSS from QEfficient.transformers.modeling_attn_mask_utils import _create_causal_mask -from QEfficient.utils import constants from QEfficient.utils.constants import MIN_MASKED_ATTENTION_VALUE +from QEfficient.utils.logging_utils import logger class QEffGptOssExperts(GptOssExperts): @@ -42,8 +44,8 @@ def __qeff_init__(self): self.up_proj_bias = nn.Parameter(torch.empty(self.num_experts, self.expert_dim)) -class QEffGptOssMLP(GptOssMLP): - def alt_forward(self, hidden: torch.Tensor): +class QEffPrefillOnlyChunkedGptOssMLP(GptOssMLP): + def forward(self, hidden: torch.Tensor): B, S, H = hidden.shape T = B * S hidden = hidden.view(T, H) @@ -78,7 +80,62 @@ def alt_forward(self, hidden: torch.Tensor): up = (hidden @ W_u) + b_u # [T, I] # Apply GptOss activation with clamping - gate = gate.clamp(min=None, max=self.experts.limit) + gate = gate.clamp(min=torch.finfo(torch.float16).min, max=self.experts.limit) + up = up.clamp(min=-self.experts.limit, max=self.experts.limit) + + # GLU activation + glu = gate * torch.sigmoid(gate * self.experts.alpha) + intermediate = (up + 1) * glu # [T, I] + + # Down projection + down_out = (intermediate @ W_d) + b_d # [T, H] + + # Apply routing weights and accumulate + expert_out += down_out * routing_weight + + # original shape [B, S, H] + return expert_out.view(B, S, H), router_logits + + +class QEffPrefillOnlyGptOssMLP(GptOssMLP): + def forward(self, hidden: torch.Tensor): + if os.environ.get("NUM_FFN_BLOCKS", None) is not None: + return self.blocked_ffn_forward(hidden) + B, S, H = hidden.shape + T = B * S + hidden = hidden.view(T, H) + + # Router computation + router_logits = F.linear(hidden, self.router.weight, self.router.bias) + + # Top-k selection + top_w, top_i = torch.topk(router_logits, self.router.top_k, dim=-1) # both [T, K] + top_w = torch.nn.functional.softmax(top_w, dim=1, dtype=top_w.dtype) + + masked_logits = torch.zeros_like(router_logits) + masked_logits.scatter_(1, top_i, top_w) + + # Routing weights for each expert [T, E] + routing_weights = masked_logits + + # ────────────────── allocate the output tensor ───── + expert_out = hidden.new_zeros((T, H)) # accumulation buffer + + # ───────────────────────── Expert computation loop ───────────────────────────── + for e in range(self.experts.num_experts): + routing_weight = routing_weights[:, e].unsqueeze(-1) # [T, 1] + + W_g, W_u = self.experts.gate_proj[e], self.experts.up_proj[e] # [H, I], [H, I] + b_g, b_u = self.experts.gate_proj_bias[e], self.experts.up_proj_bias[e] # [I], [I] + W_d = self.experts.down_proj[e] # [I, H] + b_d = self.experts.down_proj_bias[e] # [H] + + # Gate and Up projections + gate = (hidden @ W_g) + b_g # [T, I] + up = (hidden @ W_u) + b_u # [T, I] + + # Apply GptOss activation with clamping + gate = gate.clamp(min=torch.finfo(torch.float16).min, max=self.experts.limit) up = up.clamp(min=-self.experts.limit, max=self.experts.limit) # GLU activation @@ -88,6 +145,165 @@ def alt_forward(self, hidden: torch.Tensor): # Down projection down_out = (intermediate @ W_d) + b_d # [T, H] + # Apply routing weights and accumulate + expert_out += down_out * routing_weight + + # original shape [B, S, H] + return expert_out.view(B, S, H), router_logits + + def blocked_ffn_forward(self, hidden: torch.Tensor): + B, S, H = hidden.shape + T = B * S + hidden = hidden.view(T, H) + + # Router computation + router_logits = F.linear(hidden, self.router.weight, self.router.bias) + + # Top-k selection + top_w, top_i = torch.topk(router_logits, self.router.top_k, dim=-1) # both [T, K] + top_w = torch.nn.functional.softmax(top_w, dim=1, dtype=top_w.dtype) + + masked_logits = torch.zeros_like(router_logits) + masked_logits.scatter_(1, top_i, top_w) + + # Routing weights for each expert [T, E] + routing_weights = masked_logits + + # ────────────────── allocate the output tensor ───── + expert_out = hidden.new_zeros((T, H)) # accumulation buffer + target_blocks = int(os.environ.get("NUM_FFN_BLOCKS", 1)) + block_positions = [] + for j in range(target_blocks): + block_positions.append(j * (T // target_blocks)) + # ───────────────────────── Expert computation loop ───────────────────────────── + for e in range(self.experts.num_experts): + routing_weight = routing_weights[:, e].unsqueeze(-1) # [T, 1] + + W_g, W_u = self.experts.gate_proj[e], self.experts.up_proj[e] # [H, I], [H, I] + b_g, b_u = self.experts.gate_proj_bias[e], self.experts.up_proj_bias[e] # [I], [I] + W_d = self.experts.down_proj[e] # [I, H] + b_d = self.experts.down_proj_bias[e] # [H] + + block_count = 0 + outs = [] + for block_idx in range(target_blocks): + block_count += 1 + qi = block_positions[block_idx] + + # Calculate block size (last block should be handled with remainder) + if block_idx == target_blocks - 1: + real_q_len = T - qi + else: + real_q_len = block_positions[block_idx + 1] - qi + + tgb = hidden[qi : qi + real_q_len, :] + # Gate and Up projections + # Gate and Up projections + gate = (tgb @ W_g) + b_g # [T, I] + up = (tgb @ W_u) + b_u # [T, I] + + # Apply GptOss activation with clamping + gate = gate.clamp(min=torch.finfo(torch.float16).min, max=self.experts.limit) + up = up.clamp(min=-self.experts.limit, max=self.experts.limit) + + # GLU activation + glu = gate * torch.sigmoid(gate * self.experts.alpha) + intermediate = (up + 1) * glu # [T, I] + + # Down projection + down_out_block = (intermediate @ W_d) + b_d # [T, H] + + outs.append(down_out_block) + + down_out = torch.cat(outs, dim=0) + + # Apply routing weights and accumulate + expert_out += down_out * routing_weight + + # original shape [B, S, H] + return expert_out.view(B, S, H), router_logits + + def blocked_ffn_forward_block_weights(self, hidden: torch.Tensor): + B, S, H = hidden.shape + T = B * S + hidden = hidden.view(T, H) + + # Router computation + router_logits = F.linear(hidden, self.router.weight, self.router.bias) + + # Top-k selection + top_w, top_i = torch.topk(router_logits, self.router.top_k, dim=-1) # both [T, K] + top_w = torch.nn.functional.softmax(top_w, dim=1, dtype=top_w.dtype) + + masked_logits = torch.zeros_like(router_logits) + masked_logits.scatter_(1, top_i, top_w) + + # Routing weights for each expert [T, E] + routing_weights = masked_logits + + # ────────────────── allocate the output tensor ───── + expert_out = hidden.new_zeros((T, H)) # accumulation buffer + target_blocks = int(os.environ.get("NUM_BLOCKS", 1)) + block_positions = [] + for j in range(target_blocks): + block_positions.append(j * (T // target_blocks)) + # ───────────────────────── Expert computation loop ───────────────────────────── + for e in range(self.experts.num_experts): + routing_weight = routing_weights[:, e].unsqueeze(-1) # [T, 1] + + W_g, W_u = self.experts.gate_proj[e], self.experts.up_proj[e] # [H, I], [H, I] + b_g, b_u = self.experts.gate_proj_bias[e], self.experts.up_proj_bias[e] # [I], [I] + W_d = self.experts.down_proj[e] # [I, H] + b_d = self.experts.down_proj_bias[e] # [H] + + block_count = 0 + outs = [] + for block_idx in range(target_blocks): + block_count += 1 + qi = block_positions[block_idx] + + # Calculate block size (last block should be handled with remainder) + if block_idx == target_blocks - 1: + real_q_len = T - qi + else: + real_q_len = block_positions[block_idx + 1] - qi + + tgb = hidden[qi : qi + real_q_len, :] + # Gate and Up projections + + wg_col_shape = W_g.shape[1] + wg_num_blocks = math.ceil(wg_col_shape / 128) + last_block_size = wg_col_shape % 128 if wg_col_shape % 128 != 0 else 128 + + intermediates = [] + for i in range(wg_num_blocks): + if i == wg_num_blocks - 1: + cur_gate = (tgb @ W_g[:, -last_block_size:]) + b_g[-last_block_size:] + cur_up = (tgb @ W_u[:, -last_block_size:]) + b_u[-last_block_size:] + else: + cur_gate = (tgb @ W_g[:, i * 128 : (i + 1) * 128]) + b_g[i * 128 : (i + 1) * 128] + cur_up = (tgb @ W_u[:, i * 128 : (i + 1) * 128]) + b_u[i * 128 : (i + 1) * 128] + + cur_gate = cur_gate.clamp(min=torch.finfo(torch.float16).min, max=self.experts.limit) + cur_up = cur_up.clamp(min=-self.experts.limit, max=self.experts.limit) + cur_glu = cur_gate * torch.sigmoid(cur_gate * self.experts.alpha) + cur_intermediate = (cur_up + 1) * cur_glu + intermediates.append(cur_intermediate) + + intermediate = torch.cat(intermediates, dim=-1) + + downs = [] + for i in range(wg_num_blocks): + if i == wg_num_blocks - 1: + downs.append((intermediate @ W_d[:, -last_block_size:]) + b_d[-last_block_size:]) + else: + downs.append((intermediate @ W_d[:, i * 128 : (i + 1) * 128]) + b_d[i * 128 : (i + 1) * 128]) + + down_out_block = torch.cat(downs, dim=1) + outs.append(down_out_block) + + down_out = torch.cat(outs, dim=0) + # Apply routing weights and accumulate masked_down = torch.where(routing_weight > 0, down_out * routing_weight, torch.zeros_like(expert_out)) expert_out += masked_down @@ -95,6 +311,8 @@ def alt_forward(self, hidden: torch.Tensor): # original shape [B, S, H] return expert_out.view(B, S, H), router_logits + +class QEffGptOssMLP(GptOssMLP): # ------------------- Gather based, weights as activation approach --------------- def forward_weights_as_activation(self, hidden_states): bs, seq_len, _ = hidden_states.shape @@ -142,7 +360,6 @@ def forward_weights_as_activation(self, hidden_states): # ------------------- Gather based, weights as activation approach, With Seperate Gate, up Projections --------------- def forward(self, hidden_states): - # print("Seperate Split, Up, Gate Projections") bs, seq_len, _ = hidden_states.shape hidden_states = hidden_states.view(bs * seq_len, self.experts.hidden_size) @@ -172,7 +389,7 @@ def forward(self, hidden_states): up = torch.bmm(expert_in, up_proj) + up_proj_bias.unsqueeze(1) # Apply activation with clamping - gate = gate.clamp(min=None, max=self.experts.limit) + gate = gate.clamp(min=torch.finfo(torch.float16).min, max=self.experts.limit) up = up.clamp(min=-self.experts.limit, max=self.experts.limit) # GLU activation @@ -404,6 +621,283 @@ def eager_attention_forward( return attn_output, attn_weights +def eager_attention_forward_blocked( + module: nn.Module, + query: torch.Tensor, + key: torch.Tensor, + value: torch.Tensor, + attention_mask: Optional[torch.Tensor], + scaling: float, + **kwargs, +): + key_states = repeat_kv(key, module.num_key_value_groups) + value_states = repeat_kv(value, module.num_key_value_groups) + + BS, NH, CL, DH = query.shape + target_blocks = int(os.environ.get("NUM_Q_BLOCKS", 1)) + block_positions = [] + for j in range(target_blocks): + block_positions.append(j * (CL // target_blocks)) + block_count = 0 + + outs = [] + for block_idx in range(target_blocks): + block_count += 1 + qi = block_positions[block_idx] + + # Calculate block size (last block should be handled with remainder) + if block_idx == target_blocks - 1: + real_q_len = CL - qi + else: + real_q_len = block_positions[block_idx + 1] - qi + + q_block = query[:, :, qi : qi + real_q_len, :] + scores = torch.matmul(q_block, key_states.transpose(2, 3)) * scaling + attn_mask_block = attention_mask[:, :, qi : qi + real_q_len, :] + curr_attn_weights = torch.where( + attn_mask_block, torch.tensor(MIN_MASKED_ATTENTION_VALUE, dtype=torch.float32), scores + ) + sinks = module.sinks.reshape(1, -1, 1, 1).expand( + curr_attn_weights.shape[0], -1, curr_attn_weights.shape[-2], -1 + ) + combined_logits = torch.cat([curr_attn_weights, sinks], dim=-1) + combined_logits = combined_logits - combined_logits.max(dim=-1, keepdim=True).values + curr_attn_weights = nn.functional.softmax(combined_logits, dim=-1, dtype=torch.float32) + curr_attn_weights = curr_attn_weights[..., :-1] + out_block = torch.matmul(curr_attn_weights, value_states) + outs.append(out_block) + output = torch.cat(outs, dim=2) + + output = output.view(BS, NH, CL, DH).transpose(1, 2).contiguous() + return output, output + + +def opt_eager_attention_forward_blocked( + module: nn.Module, + query: torch.Tensor, + key: torch.Tensor, + value: torch.Tensor, + attention_mask: Optional[torch.Tensor], + scaling: float, + **kwargs, +): + key_states = repeat_kv(key, module.num_key_value_groups) + value_states = repeat_kv(value, module.num_key_value_groups) + + BS, NH, CL, DH = query.shape + target_blocks = int(os.environ.get("NUM_Q_BLOCKS", 1)) + block_positions = [] + for j in range(target_blocks): + block_positions.append(j * (CL // target_blocks)) + block_count = 0 + outs = [] + for block_idx in range(target_blocks): + block_count += 1 + qi = block_positions[block_idx] + # Calculate block size (last block should be handled with remainder) + + if block_idx == target_blocks - 1: + real_q_len = CL - qi + else: + real_q_len = block_positions[block_idx + 1] - qi + + if block_idx == 0: + kv_start_idx = 0 + else: + kv_start_idx = qi - 128 + + q_block = query[:, :, qi : qi + real_q_len, :] + if kwargs.get("sliding_window"): + k_block = key_states[:, :, kv_start_idx : qi + real_q_len, :] + v_block = value_states[:, :, kv_start_idx : qi + real_q_len, :] + attn_mask_block = attention_mask[:, :, qi : qi + real_q_len, kv_start_idx : qi + real_q_len] + else: + k_block = key_states + v_block = value_states + attn_mask_block = attention_mask[:, :, qi : qi + real_q_len, :] + + scores = torch.matmul(q_block, k_block.transpose(2, 3)) * scaling + curr_attn_weights = torch.where( + attn_mask_block, torch.tensor(MIN_MASKED_ATTENTION_VALUE, dtype=torch.float32), scores + ) + sinks = module.sinks.reshape(1, -1, 1, 1).expand( + curr_attn_weights.shape[0], -1, curr_attn_weights.shape[-2], -1 + ) + combined_logits = torch.cat([curr_attn_weights, sinks], dim=-1) + combined_logits = combined_logits - combined_logits.max(dim=-1, keepdim=True).values + curr_attn_weights = nn.functional.softmax(combined_logits, dim=-1, dtype=torch.float32) + curr_attn_weights = curr_attn_weights[..., :-1] + out_block = torch.matmul(curr_attn_weights, v_block) + outs.append(out_block) + output = torch.cat(outs, dim=2) + + output = output.view(BS, NH, CL, DH).transpose(1, 2).contiguous() + return output, output + + +class QEffPrefillOnlyChunkedGptOssAttention(GptOssAttention): + """Multi-headed attention from 'Attention Is All You Need' paper""" + + def __qeff_init__(self): + self.rotary_emb = QEffGptOssRotaryEmbedding(config=self.config) + + def forward( + self, + hidden_states: torch.Tensor, + position_embeddings: tuple[torch.Tensor, torch.Tensor], + attention_mask: Optional[torch.Tensor], + position_ids: Optional[torch.LongTensor] = None, + past_key_value: Optional[Cache] = None, + batch_index: Optional[torch.LongTensor] = None, + cache_position: Optional[torch.LongTensor] = None, + sliding_mask=None, + **kwargs: Unpack[TransformersKwargs], + ) -> tuple[torch.Tensor, torch.Tensor]: + input_shape = hidden_states.shape[:-1] + hidden_shape = (*input_shape, -1, self.head_dim) + query_states = self.q_proj(hidden_states).view(hidden_shape).transpose(1, 2) + hidden_shape = (*input_shape, -1, self.head_dim) + key_states = self.k_proj(hidden_states).view(hidden_shape).transpose(1, 2) + value_states = self.v_proj(hidden_states).view(hidden_shape).transpose(1, 2) + if not (max_seq_len_cached := getattr(self.config, "max_seq_len_cached")): + max_seq_len_cached = 32 * 1024 + cos, sin = self.rotary_emb(value_states, seq_len=max_seq_len_cached) + query_states, key_states = qeff_apply_rotary_pos_emb(query_states, key_states, cos, sin, position_ids) + + if past_key_value is not None: + # sin and cos are specific to RoPE models; cache_position needed for the static cache + cache_kwargs = { + "sin": sin, + "cos": cos, + "batch_index": batch_index, + "position_ids": position_ids, + "config": self.config, + "is_sliding": self.sliding_window is not None, + "sliding_window": self.sliding_window, + } + if self.sliding_window is not None: + key_states, value_states = past_key_value.sliding_window_update_chunked( + key_states, value_states, self.layer_idx, cache_kwargs + ) + else: + key_states, value_states = past_key_value.full_cache_update_chunked( + key_states, value_states, self.layer_idx, cache_kwargs + ) + + if self.sliding_window is not None: + attention_mask = sliding_mask + # positive_pos_ids = torch.where(position_ids<0, 0, position_ids) + ctx_len = position_ids.shape[1] + self.sliding_window + ctx_indices = torch.arange(ctx_len) + first_pos_idx = position_ids[0][0] + add_idx = torch.where(first_pos_idx >= self.sliding_window, first_pos_idx - self.sliding_window, 0) + # start_idx = torch.where(first_pos_idx>=self.sliding_window, first_pos_idx-self.sliding_window, 0) + # end_idx = torch.where(first_pos_idx >= self.sliding_window, first_pos_idx+position_ids.shape[1], position_ids.shape[1]+self.sliding_window) + ctx_indices += add_idx + attention_mask = attention_mask[:, :, :, ctx_indices] + else: + attention_mask = attention_mask + + attention_interface: Callable = eager_attention_forward + attn_output, attn_weights = attention_interface( + self, + query_states, + key_states, + value_states, + attention_mask, + dropout=0.0 if not self.training else self.attention_dropout, + scaling=self.scaling, + sliding_window=self.sliding_window, + s_aux=self.sinks, # diff with Llama + **kwargs, + ) + + attn_output = attn_output.reshape(*input_shape, -1).contiguous() + attn_output = self.o_proj(attn_output) + return attn_output, attn_weights, past_key_value + + +class QEffPrefillOnlyGptOssAttention(GptOssAttention): + """Multi-headed attention from 'Attention Is All You Need' paper""" + + def __qeff_init__(self): + self.rotary_emb = QEffGptOssRotaryEmbedding(config=self.config) + + def forward( + self, + hidden_states: torch.Tensor, + position_embeddings: tuple[torch.Tensor, torch.Tensor], + attention_mask: Optional[torch.Tensor], + position_ids: Optional[torch.LongTensor] = None, + past_key_value: Optional[Cache] = None, + batch_index: Optional[torch.LongTensor] = None, + cache_position: Optional[torch.LongTensor] = None, + sliding_mask=None, + **kwargs: Unpack[TransformersKwargs], + ) -> tuple[torch.Tensor, torch.Tensor]: + input_shape = hidden_states.shape[:-1] + hidden_shape = (*input_shape, -1, self.head_dim) + query_states = self.q_proj(hidden_states).view(hidden_shape).transpose(1, 2) + hidden_shape = (*input_shape, -1, self.head_dim) + key_states = self.k_proj(hidden_states).view(hidden_shape).transpose(1, 2) + value_states = self.v_proj(hidden_states).view(hidden_shape).transpose(1, 2) + if not (max_seq_len_cached := getattr(self.config, "max_seq_len_cached")): + max_seq_len_cached = 32 * 1024 + cos, sin = self.rotary_emb(value_states, seq_len=max_seq_len_cached) + query_states, key_states = qeff_apply_rotary_pos_emb(query_states, key_states, cos, sin, position_ids) + + if past_key_value is not None: + # sin and cos are specific to RoPE models; cache_position needed for the static cache + cache_kwargs = { + "sin": sin, + "cos": cos, + "batch_index": batch_index, + "position_ids": position_ids, + "config": self.config, + "is_sliding": self.sliding_window is not None, + "sliding_window": past_key_value.sliding_window_len, + } + if self.sliding_window is not None: + sliding_window_len = past_key_value.sliding_window_len + short_read_idx = torch.arange(past_key_value.key_cache[self.layer_idx].shape[2]) + read_idx = short_read_idx + torch.where( + position_ids.max() > sliding_window_len - 1, position_ids.max() - sliding_window_len + 1, 0 + ) + # This is a trick to export with seq_len position_ids.max(), 0, read_idx) + k_cache = key_states[:, :, read_idx, :] + v_cache = value_states[:, :, read_idx, :] + else: + k_cache, v_cache = key_states, value_states + _, _ = past_key_value.write_only(k_cache, v_cache, self.layer_idx, cache_kwargs) + + if self.sliding_window is not None: + attention_mask = sliding_mask + else: + attention_mask = attention_mask + + if os.environ.get("ENABLE_OPT_SWA", "0") == "1": + attention_interface: Callable = opt_eager_attention_forward_blocked + else: + attention_interface: Callable = eager_attention_forward_blocked + attn_output, attn_weights = attention_interface( + self, + query_states, + key_states, + value_states, + attention_mask, + dropout=0.0 if not self.training else self.attention_dropout, + scaling=self.scaling, + sliding_window=self.sliding_window, + s_aux=self.sinks, # diff with Llama + **kwargs, + ) + + attn_output = attn_output.reshape(*input_shape, -1).contiguous() + attn_output = self.o_proj(attn_output) + return attn_output, attn_weights, past_key_value + + class QEffGptOssAttention(GptOssAttention): """Multi-headed attention from 'Attention Is All You Need' paper""" @@ -429,8 +923,9 @@ def forward( query_states = self.q_proj(hidden_states).view(hidden_shape).transpose(1, 2) key_states = self.k_proj(hidden_states).view(hidden_shape).transpose(1, 2) value_states = self.v_proj(hidden_states).view(hidden_shape).transpose(1, 2) - - cos, sin = self.rotary_emb(value_states, seq_len=32 * 1024) + if not (max_seq_len_cached := getattr(self.config, "max_seq_len_cached")): + max_seq_len_cached = 32 * 1024 + cos, sin = self.rotary_emb(value_states, seq_len=max_seq_len_cached) query_states, key_states = qeff_apply_rotary_pos_emb(query_states, key_states, cos, sin, position_ids) if past_key_value is not None: @@ -511,7 +1006,6 @@ def forward( residual = hidden_states hidden_states = self.post_attention_layernorm(hidden_states) hidden_states, _ = self.mlp(hidden_states) # diff with llama: router scores - # alth, _ = self.mlp.alt_forward(hidden_states) hidden_states = hidden_states.reshape(residual.shape) hidden_states = residual + hidden_states outputs = (hidden_states,) @@ -525,6 +1019,97 @@ def forward( return outputs +class QEffPrefillOnlyGptOssModel(GptOssModel): + def forward( + self, + input_ids: Optional[torch.LongTensor] = None, + attention_mask: Optional[torch.Tensor] = None, + position_ids: Optional[torch.LongTensor] = None, + past_key_values: Optional[Cache] = None, + batch_index: Optional[torch.LongTensor] = None, + inputs_embeds: Optional[torch.FloatTensor] = None, + use_cache: Optional[bool] = None, + output_attentions: Optional[bool] = None, + return_dict: Optional[bool] = None, + output_hidden_states: Optional[bool] = None, + cache_position: Optional[torch.LongTensor] = None, + **kwargs: Unpack[TransformersKwargs], + ) -> MoeModelOutputWithPast: + output_attentions = output_attentions if output_attentions is not None else self.config.output_attentions + output_hidden_states = ( + output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states + ) + use_cache = use_cache if use_cache is not None else self.config.use_cache + + return_dict = return_dict if return_dict is not None else self.config.use_return_dict + + if (input_ids is None) ^ (inputs_embeds is not None): + raise ValueError("You must specify exactly one of input_ids or inputs_embeds") + + return_legacy_cache = False + if use_cache and not isinstance(past_key_values, Cache): + return_legacy_cache = True + past_key_values = QEffHybridCacheForGPTOSS.from_legacy_cache(self.config, past_key_values) + + if inputs_embeds is None: + inputs_embeds = self.embed_tokens(input_ids) + + if cache_position is None: + past_seen_tokens = past_key_values.get_seq_length() if past_key_values is not None else 0 + cache_position = torch.arange( + past_seen_tokens, past_seen_tokens + inputs_embeds.shape[1], device=inputs_embeds.device + ) + if position_ids is None: + position_ids = cache_position.unsqueeze(0) + + # target_length = attention_mask.shape[-1] if isinstance(attention_mask, torch.Tensor) else past_seen_tokens + causal_mask = _create_causal_mask(position_ids=position_ids, target_length=past_key_values.max_cache_len) + sliding_mask = _create_causal_mask( + position_ids=position_ids, + target_length=past_key_values.max_cache_len, + sliding_window=self.config.sliding_window, + ) + hidden_states = inputs_embeds + + # decoder layers + all_hidden_states = () if output_hidden_states else None + all_self_attns = () if output_attentions else None + + for decoder_layer in self.layers: + if output_hidden_states: + all_hidden_states += (hidden_states,) + + layer_outputs = decoder_layer( + hidden_states, + attention_mask=causal_mask, + position_ids=position_ids, + past_key_value=past_key_values, + batch_index=batch_index, + use_cache=use_cache, + output_attentions=output_attentions, + cache_position=cache_position, + sliding_mask=sliding_mask, + **kwargs, + ) + hidden_states = layer_outputs[0] + + if output_attentions: + all_self_attns += (layer_outputs[1],) + + hidden_states = self.norm(hidden_states) + # add hidden states from the last decoder layer + if output_hidden_states: + all_hidden_states += (hidden_states,) + + if return_legacy_cache: + past_key_values = past_key_values.to_legacy_cache() + + return MoeModelOutputWithPast( + last_hidden_state=hidden_states, + past_key_values=past_key_values if use_cache else None, + ) + + class QEffGptOssModel(GptOssModel): def forward( self, @@ -578,7 +1163,6 @@ def forward( ) hidden_states = inputs_embeds - # position_embeddings = self.rotary_emb(hidden_states, position_ids) # decoder layers all_hidden_states = () if output_hidden_states else None @@ -708,15 +1292,15 @@ def forward( router_logits=outputs.router_logits, ) - def get_pkv_dynamic_axes( - self, - ): + def get_pkv_dynamic_axes(self, retain_full_kv: Optional[bool] = False, continuous_batching: Optional[bool] = False): pkv_dynamic_axes = [] for layer_type in self.config.layer_types: - if layer_type == "sliding_attention": - pkv_dynamic_axes.append({0: "batch_size", 2: "sliding_window"}) - elif layer_type == "full_attention": - pkv_dynamic_axes.append({0: "batch_size", 2: "ctx_len"}) + if layer_type == "sliding_attention" and not retain_full_kv: + pkv_dynamic_axes.append( + {0: "full_batch_size" if continuous_batching else "batch_size", 2: "sliding_window"} + ) + else: + pkv_dynamic_axes.append({0: "full_batch_size" if continuous_batching else "batch_size", 2: "ctx_len"}) return pkv_dynamic_axes def get_specializations( @@ -724,10 +1308,14 @@ def get_specializations( batch_size: int, prefill_seq_len: int, ctx_len: int, + **kwargs, ): batch_size = batch_size if batch_size else 1 - prefill_seq_len = prefill_seq_len if prefill_seq_len else constants.PROMPT_LEN - ctx_len = ctx_len if ctx_len else constants.CTX_LEN + if kwargs.get("prefill_only") and not kwargs.get("enable_chunking") and ctx_len != prefill_seq_len: + ctx_len = prefill_seq_len + logger.warning( + f"overriding ctx_len={prefill_seq_len}, currently we don't support ctx_len different than prefill_seq_len for prefill_only model" + ) specializations = [ { diff --git a/QEfficient/transformers/models/modeling_auto.py b/QEfficient/transformers/models/modeling_auto.py index 16a809c96..008147c03 100644 --- a/QEfficient/transformers/models/modeling_auto.py +++ b/QEfficient/transformers/models/modeling_auto.py @@ -5,6 +5,7 @@ # # ---------------------------------------------------------------------------- +import os import warnings from pathlib import Path from time import perf_counter @@ -37,13 +38,20 @@ get_compilation_dims, ) from QEfficient.generation.vlm_generation import VisionLanguageGeneration -from QEfficient.transformers.modeling_utils import DYNAMIC_SEQ_LEN_SUPPORTED_MODEL_ARCH +from QEfficient.transformers.modeling_utils import ( + DYNAMIC_SEQ_LEN_SUPPORTED_MODEL_ARCH, + SPECIALIZED_PREFILL_ONLY_MODEL_ARCH, +) from QEfficient.transformers.models.pytorch_transforms import ( BlockedKVAttentionTransform, CustomOpsTransform, KVCacheExternalModuleMapperTransform, KVCacheTransform, PoolingTransform, + PrefillOnlyChunkedTransform, + PrefillOnlyTransform, + RevertPrefillKeepAttentionTransform, + RevertPrefillOnlyTransform, SamplerTransform, SpDTransform, VlmKVOffloadTransform, @@ -301,7 +309,7 @@ def get_model_config(self) -> dict: """ return self.model.config.__dict__ - def export(self, export_dir: Optional[str] = None, use_onnx_subfunctions: bool = False) -> str: + def export(self, export_dir: Optional[str] = None, **kwargs) -> str: """ Export the model to ONNX format using ``torch.onnx.export``. @@ -338,7 +346,7 @@ def export(self, export_dir: Optional[str] = None, use_onnx_subfunctions: bool = output_names, dynamic_axes, export_dir=export_dir, - use_onnx_subfunctions=use_onnx_subfunctions, + use_onnx_subfunctions=kwargs.get("use_onnx_subfunctions", False), ) def compile( @@ -588,15 +596,7 @@ def __init__(self, model: nn.modules, **kwargs): self.model = model.get_qeff_vision_encoder() self.hash_params["qeff_auto_class"] = self.__class__.__name__ - def export( - self, - inputs, - output_names, - dynamic_axes, - export_dir=None, - offload_pt_weights=True, - use_onnx_subfunctions: bool = False, - ): + def export(self, inputs, output_names, dynamic_axes, export_dir=None, offload_pt_weights=True, **kwargs): """ Exports the vision encoder component to ONNX format. @@ -626,7 +626,7 @@ def export( dynamic_axes, export_dir=export_dir, offload_pt_weights=offload_pt_weights, - use_onnx_subfunctions=use_onnx_subfunctions, + use_onnx_subfunctions=kwargs.get("use_onnx_subfunctions", False), ) def compile( @@ -741,15 +741,7 @@ def __init__(self, model, qaic_config, **kwargs): if self.model.qaic_config is not None and self.model.qaic_config.get("num_kv_blocks", None) is not None: BlockedKVAttentionTransform.apply(self.model, num_kv_blocks=self.model.qaic_config.get("num_kv_blocks")) - def export( - self, - inputs, - output_names, - dynamic_axes, - export_dir=None, - offload_pt_weights=True, - use_onnx_subfunctions: bool = False, - ): + def export(self, inputs, output_names, dynamic_axes, export_dir=None, offload_pt_weights=True, **kwargs): """ Exports the language decoder component to ONNX format. @@ -779,7 +771,7 @@ def export( dynamic_axes, export_dir=export_dir, offload_pt_weights=offload_pt_weights, - use_onnx_subfunctions=use_onnx_subfunctions, + use_onnx_subfunctions=kwargs.get("use_onnx_subfunctions", False), ) def compile( @@ -2284,11 +2276,30 @@ class QEFFAutoModelForCausalLM(QEFFBaseModel): _onnx_transforms = [FP16ClipTransform, SplitTensorsTransform] + def prefill( + self, + enable: Optional[bool] = True, + enable_chunking: Optional[bool] = False, + retain_full_kv: Optional[bool] = False, + ): + if enable: + if enable_chunking: + self.model, tf = PrefillOnlyChunkedTransform.apply(self.model) + else: + self.model, tf = PrefillOnlyTransform.apply(self.model) + + else: + if retain_full_kv: + self.model, tf = RevertPrefillKeepAttentionTransform.apply(self.model) + else: + self.model, tf = RevertPrefillOnlyTransform.apply(self.model) + def __init__( self, model: nn.Module, continuous_batching: bool = False, qaic_config: Optional[dict] = None, + max_seq_len_cached: Optional[int] = None, **kwargs, ): """ @@ -2336,6 +2347,7 @@ def __init__( # Set use_cache=True to get KV values as output during ONNX export model.config.use_cache = True + setattr(model.config, "max_seq_len_cached", max_seq_len_cached) super().__init__(model, qaic_config=qaic_config, **kwargs) self.num_layers = model.config.num_hidden_layers self.continuous_batching = continuous_batching @@ -2348,6 +2360,7 @@ def __init__( if qaic_config: self.ccl_enabled = qaic_config.get("ccl_enabled", False) self.comp_ctx_lengths_prefill, self.comp_ctx_lengths_decode = None, None + self.hash_params["max_seq_len_cached"] = max_seq_len_cached # ---Sampling--- # Note: SamplerTransform should be applied after all other transforms @@ -2372,6 +2385,7 @@ def from_pretrained( pretrained_model_name_or_path, continuous_batching: bool = False, qaic_config: Optional[dict] = None, + max_seq_len_cached: Optional[int] = None, *args, **kwargs, ): @@ -2435,7 +2449,6 @@ def from_pretrained( qaic_config["pretrained_model_name_or_path"] = pretrained_model_name_or_path # This is support models that should be classified to in a different auto class but transformers load them via this class - if model.__class__.__name__ in MISCLASSIFIED_CAUSAL_LM_TO_QEFF_AUTO_CLASS_MAP: return MISCLASSIFIED_CAUSAL_LM_TO_QEFF_AUTO_CLASS_MAP[model.__class__.__name__]( model, @@ -2450,6 +2463,7 @@ def from_pretrained( continuous_batching=continuous_batching, qaic_config=qaic_config, pretrained_model_name_or_path=pretrained_model_name_or_path, + max_seq_len_cached=max_seq_len_cached, **kwargs, ) @@ -2465,7 +2479,56 @@ def get_model_config(self) -> dict: """ return self.model.config.__dict__ - def export(self, export_dir: Optional[str] = None, use_onnx_subfunctions: bool = False, **kwargs) -> str: + def get_seq_len_and_handle_specialized_prefill_model( + self, prefill_seq_len: Optional[int] = None, enable_chunking=False + ) -> int: + self.hash_params["prefill_only"] = True + if enable_chunking: + self.hash_params["chunking"] = True + return constants.ONNX_EXPORT_EXAMPLE_SEQ_LEN + + num_q_blocks = os.environ.get("NUM_Q_BLOCKS", None) + if num_q_blocks is None: + block_size = 128 + if prefill_seq_len is None or prefill_seq_len % block_size != 0 or prefill_seq_len < 128: + raise ValueError( + f"When prefill_only=True, 'prefill_seq_len' must be explicitly set and divisible by block_size={block_size}. " + f"Or set `NUM_Q_BLOCKS` ENV variable" + f"Received: prefill_seq_len={prefill_seq_len}" + ) + + num_q_blocks = prefill_seq_len // block_size + logger.warning( + f"Setting NUM_Q_BLOCKS={num_q_blocks} used in attention Q-blocking for prefill_only model, please set ENV variable `NUM_Q_BLOCKS` to override" + ) + os.environ["NUM_Q_BLOCKS"] = str(num_q_blocks) + num_q_blocks = int(num_q_blocks) + + num_ffn_blocks = os.environ.get("NUM_FFN_BLOCKS", None) + num_ffn_blocks = int(num_ffn_blocks) if num_ffn_blocks else num_ffn_blocks + min_seq_len = max(num_q_blocks, num_ffn_blocks) if num_ffn_blocks else num_q_blocks + if (num_ffn_blocks and min_seq_len % num_ffn_blocks != 0) or min_seq_len % num_q_blocks != 0: + raise ValueError( + f"Got NUM_FFN_BLOCKS={num_ffn_blocks} and NUM_Q_BLOCKS={num_q_blocks}, tried to set seq_len={min_seq_len} for export but," + "seq_len is not divisible by either num_ffn_blocks or num_q_blocks, try chaning the values." + ) + + self.hash_params["NUM_Q_BLOCKS"] = num_q_blocks + self.hash_params["NUM_FFN_BLOCKS"] = num_ffn_blocks + self.hash_params["ENABLE_OPT_SWA"] = os.environ.get("ENABLE_OPT_SWA", "0") + return ( + min_seq_len + if min_seq_len > constants.ONNX_EXPORT_EXAMPLE_SEQ_LEN + else constants.ONNX_EXPORT_EXAMPLE_SEQ_LEN + ) + + def export( + self, + export_dir: Optional[str] = None, + prefill_only: Optional[bool] = False, + prefill_seq_len: Optional[int] = None, + **kwargs, + ) -> str: """ Export the model to ONNX format using ``torch.onnx.export``. @@ -2491,6 +2554,33 @@ def export(self, export_dir: Optional[str] = None, use_onnx_subfunctions: bool = kv_cache_shape = get_padding_shape_from_config( self.model.config, fbs if self.continuous_batching else bs, seq_len ) + enable_chunking = kwargs.get("enable_chunking", False) + if prefill_only: + if not enable_chunking and self.continuous_batching: + raise NotImplementedError( + "Looks like you are trying to run prefix-caching without chunking, this feature is not available yet!" + ) + self.prefill(enable=True, enable_chunking=enable_chunking) + self.hash_params.pop("retain_full_kv", None) + seq_len = ( + self.get_seq_len_and_handle_specialized_prefill_model( + prefill_seq_len=prefill_seq_len, enable_chunking=enable_chunking + ) + if self.model.config.model_type in SPECIALIZED_PREFILL_ONLY_MODEL_ARCH + else seq_len + ) + kv_cache_shape[2] = seq_len + self.model.config.sliding_window if enable_chunking else seq_len + else: + self.prefill(False, retain_full_kv=kwargs.get("retain_full_kv", False)) + self.hash_params.pop("prefill_only", None) + self.hash_params.pop("NUM_Q_BLOCKS", None) + self.hash_params.pop("NUM_FFN_BLOCKS", None) + self.hash_params.pop("ENABLE_OPT_SWA", None) + self.hash_params.pop("chunking", None) + if kwargs.get("retain_full_kv", False): + kv_cache_shape[2] = seq_len + self.model.config.sliding_window + self.hash_params["retain_full_kv"] = True + example_inputs = { "input_ids": torch.zeros((bs, seq_len), dtype=torch.int64), "position_ids": torch.arange(seq_len, dtype=torch.int64).view(1, seq_len).repeat(bs, 1), @@ -2539,7 +2629,13 @@ def export(self, export_dir: Optional[str] = None, use_onnx_subfunctions: bool = else: # HACK: create common function for this including above if condition code pkv_dynamic_axes = ( - self.model.get_pkv_dynamic_axes() if hasattr(self.model, "get_pkv_dynamic_axes") else pkv_dynamic_axes + self.model.get_pkv_dynamic_axes( + retain_full_kv=kwargs.get("retain_full_kv", False) + or (prefill_only and kwargs.get("enable_chunking", False)), + continuous_batching=self.continuous_batching, + ) + if hasattr(self.model, "get_pkv_dynamic_axes") + else pkv_dynamic_axes ) pkv_dynamic_axes = ( [pkv_dynamic_axes] * self.model.config.num_hidden_layers @@ -2548,7 +2644,6 @@ def export(self, export_dir: Optional[str] = None, use_onnx_subfunctions: bool = ) for i in range(self.num_layers): - pkv_dynamic_axes[i][0] = "full_batch_size" if self.continuous_batching else "batch_size" for kv in ["key", "value"]: example_inputs["past_key_values"][i].append(torch.zeros(kv_cache_shape, dtype=torch.float32)) dynamic_axes[f"past_{kv}.{i}"] = pkv_dynamic_axes[i] @@ -2569,14 +2664,14 @@ def export(self, export_dir: Optional[str] = None, use_onnx_subfunctions: bool = output_names=output_names, dynamic_axes=dynamic_axes, ) - return self._export( example_inputs, output_names, dynamic_axes, export_dir=export_dir, - use_onnx_subfunctions=use_onnx_subfunctions, + use_onnx_subfunctions=kwargs.get("use_onnx_subfunctions", False), offload_pt_weights=kwargs.get("offload_pt_weights", True), + prefill_only=prefill_only, ) def get_sampling_inputs_and_outputs( @@ -2666,6 +2761,7 @@ def build_prefill_specialization( batch_size: int = 1, kv_cache_batch_size: Optional[int] = None, full_batch_size: Optional[int] = None, + **kwargs, ): """ Builds a dictionary representing a compilation specialization for the prefill phase. @@ -2688,11 +2784,17 @@ def build_prefill_specialization( Dict[str, Union[int, str]] A dictionary defining the prefill specialization. """ + if prefill_seq_len == 1 and self.continuous_batching: + exec_batch_size = full_batch_size + else: + exec_batch_size = 1 if self.continuous_batching else batch_size + if hasattr(self.model, "get_specializations"): spec = self.model.get_specializations( - batch_size=1 if self.continuous_batching else batch_size, + batch_size=exec_batch_size, prefill_seq_len=prefill_seq_len, ctx_len=ctx_len, + **kwargs, )[0] else: spec = { @@ -2720,6 +2822,7 @@ def build_decode_specialization( kv_cache_batch_size: Optional[int] = None, full_batch_size: Optional[int] = None, num_speculative_tokens: Optional[int] = None, + **kwargs, ): """ Builds a dictionary representing a compilation specialization for the decode phase. @@ -2790,6 +2893,9 @@ def compile( num_speculative_tokens: Optional[int] = None, prefill_only: Optional[bool] = None, use_onnx_subfunctions: bool = False, + offload_pt_weights: Optional[bool] = True, + enable_chunking: Optional[bool] = False, + retain_full_kv: Optional[bool] = None, **compiler_options, ) -> str: """ @@ -2870,6 +2976,20 @@ def compile( If `prefill_seq_len` is less than `num_speculative_tokens + 1` for TLM models. """ + if prefill_only is None or not prefill_only: + if self.continuous_batching and full_batch_size is None: + raise TypeError("`full_batch_size` is required when `continuous_batching=True`.") + if kv_cache_batch_size and not full_batch_size: + raise ValueError( + "KV caching requires continuous batching. Please set `full_batch_size` and " + "enable `continuous_batching=True` in `from_pretrained`." + ) + else: + if self.continuous_batching: + if not isinstance(kv_cache_batch_size, int): + raise ValueError( + "Please pass valid integer for kv_cache_batch_size as continuous_batching is enabled for prefill-only model" + ) # if ccl_enabled is True read Compute-Context-Length lists if self.ccl_enabled: @@ -2907,15 +3027,6 @@ def compile( if self.is_tlm: num_speculative_tokens = self.check_and_get_num_speculative_tokens(num_speculative_tokens, prefill_seq_len) - if self.continuous_batching and full_batch_size is None: - raise TypeError("`full_batch_size` is required when `continuous_batching=True`.") - - if kv_cache_batch_size and not full_batch_size: - raise ValueError( - "KV caching requires continuous batching. Please set `full_batch_size` and " - "enable `continuous_batching=True` in `from_pretrained`." - ) - if ( self.model.qaic_config is not None and self.model.qaic_config.get("include_sampler", False) @@ -2924,15 +3035,23 @@ def compile( ): raise ValueError("Currently, sampler does not support `num_speculative_tokens` > 0.") + if kv_cache_batch_size and prefill_only is not None and prefill_only: + logger.warning( + "kv_cache_batch_size will be ignored as prefill_only is set to True unless this is GPTOSS model" + ) + # Infer kv_cache_batch_size if not provided kv_cache_batch_size = kv_cache_batch_size or full_batch_size or batch_size # --- Specializations --- specializations = [] if prefill_only is None or prefill_only or prefill_seq_len == 1: + # TODO: we are handling decode-only case inside prefill call which is utterly mis-leading if self.comp_ctx_lengths_prefill is not None: # Adding elements from self.comp_ctx_lengths_prefill to prefill_specialization for i in range(0, len(self.comp_ctx_lengths_prefill)): + if prefill_only or enable_chunking: + raise NotImplementedError("prefill_only or enable_chunking is not supported with CCL") specializations.append( self.build_prefill_specialization( prefill_seq_len=prefill_seq_len, @@ -2952,6 +3071,8 @@ def compile( batch_size=batch_size, kv_cache_batch_size=kv_cache_batch_size, full_batch_size=full_batch_size, + prefill_only=prefill_only, + enable_chunking=enable_chunking, ) ) @@ -2979,6 +3100,7 @@ def compile( kv_cache_batch_size=kv_cache_batch_size, full_batch_size=full_batch_size, num_speculative_tokens=num_speculative_tokens, + prefill_only=prefill_only, ) if decode_spec: specializations.append(decode_spec) @@ -2991,7 +3113,6 @@ def compile( for i in range(self.num_layers): for kv in ["key", "value"]: custom_io[f"past_{kv}.{i}{suffix}"] = kv_cache_dtype - qpc_path = self._compile( onnx_path=onnx_path, compile_dir=compile_dir, @@ -3006,6 +3127,10 @@ def compile( aic_num_cores=num_cores, mxint8_kv_cache=mxint8_kv_cache, use_onnx_subfunctions=use_onnx_subfunctions, + prefill_only=prefill_only, + offload_pt_weights=offload_pt_weights, + enable_chunking=enable_chunking, + retain_full_kv=retain_full_kv, **compiler_options, ) @@ -3197,7 +3322,7 @@ def get_model_config(self) -> dict: """ return self.model.config.__dict__ - def export(self, export_dir: Optional[str] = None, use_onnx_subfunctions: bool = False) -> str: + def export(self, export_dir: Optional[str] = None, **kwargs) -> str: """ Export the model to ONNX format using ``torch.onnx.export``. @@ -3225,7 +3350,7 @@ def export(self, export_dir: Optional[str] = None, use_onnx_subfunctions: bool = output_names, dynamic_axes, export_dir=export_dir, - use_onnx_subfunctions=use_onnx_subfunctions, + use_onnx_subfunctions=kwargs.get("use_onnx_subfunctions", False), ) def compile( @@ -3573,7 +3698,7 @@ def from_pretrained(cls, pretrained_model_name_or_path, pooling=None, *args, **k def get_model_config(self) -> dict: return self.model.config.__dict__ - def export(self, export_dir: Optional[str] = None, use_onnx_subfunctions: bool = False) -> str: + def export(self, export_dir: Optional[str] = None, **kwargs) -> str: """ Exports the model to ``ONNX`` format using ``torch.onnx.export``. @@ -3601,7 +3726,7 @@ def export(self, export_dir: Optional[str] = None, use_onnx_subfunctions: bool = output_names, dynamic_axes, export_dir=export_dir, - use_onnx_subfunctions=use_onnx_subfunctions, + use_onnx_subfunctions=kwargs.get("use_onnx_subfunctions", False), ) def compile( diff --git a/QEfficient/transformers/models/pytorch_transforms.py b/QEfficient/transformers/models/pytorch_transforms.py index 07b9fe7e1..4ba6641cf 100644 --- a/QEfficient/transformers/models/pytorch_transforms.py +++ b/QEfficient/transformers/models/pytorch_transforms.py @@ -265,6 +265,11 @@ QEffGptOssForCausalLM, QEffGptOssMLP, QEffGptOssModel, + QEffPrefillOnlyChunkedGptOssAttention, + QEffPrefillOnlyChunkedGptOssMLP, + QEffPrefillOnlyGptOssAttention, + QEffPrefillOnlyGptOssMLP, + QEffPrefillOnlyGptOssModel, ) from QEfficient.transformers.models.gptj.modeling_gptj import ( QEffGPTJAttention, @@ -642,6 +647,39 @@ def apply(cls, model: nn.Module) -> Tuple[nn.Module, bool]: return model, transformed +class PrefillOnlyTransform(ModuleMappingTransform): + _module_mapping = { + QEffGptOssModel: QEffPrefillOnlyGptOssModel, + QEffGptOssAttention: QEffPrefillOnlyGptOssAttention, + QEffGptOssMLP: QEffPrefillOnlyGptOssMLP, + } + + +class PrefillOnlyChunkedTransform(ModuleMappingTransform): + _module_mapping = { + QEffGptOssModel: QEffPrefillOnlyGptOssModel, + QEffGptOssAttention: QEffPrefillOnlyChunkedGptOssAttention, + QEffGptOssMLP: QEffPrefillOnlyChunkedGptOssMLP, + } + + +class RevertPrefillKeepAttentionTransform(ModuleMappingTransform): + _module_mapping = { + QEffGptOssModel: QEffPrefillOnlyGptOssModel, + QEffPrefillOnlyGptOssAttention: QEffPrefillOnlyChunkedGptOssAttention, + QEffGptOssAttention: QEffPrefillOnlyChunkedGptOssAttention, + QEffPrefillOnlyGptOssMLP: QEffGptOssMLP, + QEffPrefillOnlyChunkedGptOssMLP: QEffGptOssMLP, + } + + +class RevertPrefillOnlyTransform(ModuleMappingTransform): + _module_mapping = { + **{v: k for k, v in PrefillOnlyTransform._module_mapping.items()}, + **{v: k for k, v in PrefillOnlyChunkedTransform._module_mapping.items()}, + } + + class SpDTransform: """ Apply generic QEffForCausalLM forward pass to extract `num_speculative_tokens+1` hidden states before computing logits during decode phase and extract last predicted token during prefill. diff --git a/QEfficient/transformers/quantizers/__init__.py b/QEfficient/transformers/quantizers/__init__.py index dfadc00ef..dc2308e99 100644 --- a/QEfficient/transformers/quantizers/__init__.py +++ b/QEfficient/transformers/quantizers/__init__.py @@ -5,6 +5,6 @@ # # ----------------------------------------------------------------------------- -from QEfficient.transformers.quantizers.auto import replace_transformers_quantizers +from QEfficient.transformers.quantizers.auto import replace_transformers_quantizers, undo_transformers_quantizers -__all__ = ["replace_transformers_quantizers"] +__all__ = ["replace_transformers_quantizers", "undo_transformers_quantizers"] diff --git a/QEfficient/utils/export_utils.py b/QEfficient/utils/export_utils.py index eea92a490..638f55921 100644 --- a/QEfficient/utils/export_utils.py +++ b/QEfficient/utils/export_utils.py @@ -5,6 +5,7 @@ # # ----------------------------------------------------------------------------- +import copy import inspect import re import warnings @@ -40,20 +41,19 @@ def export_wrapper(func): """ def wrapper(self, *args, **kwargs): - # 1. Prepare export directory + # 1. Setup ONNX subfunctions if requested + if use_onnx_subfunctions := kwargs.pop("use_onnx_subfunctions", False): + args, kwargs = _setup_onnx_subfunctions(self, args, kwargs) + + # 2. Prepare export directory export_dir = _prepare_export_directory(self, kwargs) - # 2. Generate hash and finalize export directory path + # 3. Generate hash and finalize export directory path export_hash, filtered_hash_params = _generate_export_hash(self, args, kwargs, func) export_dir = export_dir.with_name(export_dir.name + "-" + export_hash) kwargs["export_dir"] = export_dir self.export_hash = export_hash - # 3. Setup ONNX subfunctions if requested - # TODO: No need of this variable, if export_kwargs contains classes (refer diffusers) - if use_onnx_subfunctions := kwargs.get("use_onnx_subfunctions", False): - _setup_onnx_subfunctions(self, kwargs) - # 4. Execute the actual export onnx_path = func(self, *args, **kwargs) @@ -101,9 +101,6 @@ def _generate_export_hash(qeff_model, args, kwargs, func): Returns: Tuple of (export_hash: str, filtered_hash_params: dict) """ - # Extract use_onnx_subfunctions before binding (it's used by wrapper, not _export) - use_onnx_subfunctions = kwargs.pop("use_onnx_subfunctions", False) - # Extract function signature original_sig = inspect.signature(func) params = list(original_sig.parameters.values())[1:] # Skip 'self' @@ -116,7 +113,6 @@ def _generate_export_hash(qeff_model, args, kwargs, func): # Use the model's current configuration for hashing to ensure any post-load modifications are captured # TODO: Replace with get_model_config property of modeling classes and remove the if-else # Determine the config dict to use, preferring .to_diff_dict() if available - if hasattr(qeff_model.model, "config") and hasattr(qeff_model.model.config, "to_diff_dict"): config_val = qeff_model.model.config.to_diff_dict() elif hasattr(qeff_model.model, "model") and hasattr(qeff_model.model.model.config, "to_diff_dict"): @@ -124,26 +120,25 @@ def _generate_export_hash(qeff_model, args, kwargs, func): else: config_val = qeff_model.model.config - qeff_model.hash_params.update( + copy_of_hash_params = copy.deepcopy(qeff_model.hash_params) + copy_of_hash_params.update( { "config": config_val, } ) - # Generate hash from relevant parameters export_hash, filtered_hash_params = create_export_hash( - model_params=qeff_model.hash_params, + model_params=copy_of_hash_params, output_names=all_args.get("output_names"), dynamic_axes=all_args.get("dynamic_axes"), export_kwargs=all_args.get("export_kwargs", None), onnx_transform_kwargs=all_args.get("onnx_transform_kwargs", None), - use_onnx_subfunctions=use_onnx_subfunctions, ) return export_hash, filtered_hash_params -def _setup_onnx_subfunctions(qeff_model, kwargs): +def _setup_onnx_subfunctions(qeff_model, args, kwargs): """ Setup ONNX subfunction export environment. @@ -166,26 +161,22 @@ def _setup_onnx_subfunctions(qeff_model, kwargs): # Apply torch patches for subfunction support apply_torch_patches() InvalidIndexProvider.SUBFUNC_ENABLED = True - - # Store original state for restoration during cleanup - qeff_model._original_onnx_transforms = qeff_model._onnx_transforms.copy() - # Transform output names for subfunction compatibility if "output_names" in kwargs: kwargs["output_names"] = [ re.sub("_RetainedState", "_InternalRetainedState", name) for name in kwargs["output_names"] ] - + else: + args = list(args) + args[1] = [re.sub("_RetainedState", "_InternalRetainedState", name) for name in args[1]] + args = tuple(args) # Add subfunction-specific ONNX transforms qeff_model._onnx_transforms.append(RenameFunctionOutputsTransform) qeff_model._onnx_transforms.append(CustomOpTransform) - # Configure export to use modules as functions - export_kwargs = kwargs.get("export_kwargs", {}) - # TODO: Handle this in the modelling class QEFFTransformersBase,remove from here. Refer diffusers implementation - export_kwargs["export_modules_as_functions"] = get_decoder_layer_classes_for_export(qeff_model.model) - kwargs["export_kwargs"] = export_kwargs + kwargs["export_modules_as_functions"] = get_decoder_layer_classes_for_export(qeff_model.model) + return args, kwargs def _cleanup_onnx_subfunctions(qeff_model): @@ -205,18 +196,11 @@ def _cleanup_onnx_subfunctions(qeff_model): even if export fails. Errors during cleanup are logged but not re-raised to avoid masking the original exception. """ - try: - # Undo torch patches - undo_torch_patches() - InvalidIndexProvider.SUBFUNC_ENABLED = False - - # Restore original ONNX transforms - if hasattr(qeff_model, "_original_onnx_transforms"): - qeff_model._onnx_transforms = qeff_model._original_onnx_transforms - delattr(qeff_model, "_original_onnx_transforms") - - except Exception as e: - logger.error(f"Error during subfunction cleanup: {e}") + # Undo torch patches + undo_torch_patches() + InvalidIndexProvider.SUBFUNC_ENABLED = False + qeff_model._onnx_transforms.remove(RenameFunctionOutputsTransform) + qeff_model._onnx_transforms.remove(CustomOpTransform) def _save_export_metadata(export_dir: Path, filtered_hash_params: Dict): diff --git a/QEfficient/utils/hash_utils.py b/QEfficient/utils/hash_utils.py index 68ccab0d4..10e6686d0 100644 --- a/QEfficient/utils/hash_utils.py +++ b/QEfficient/utils/hash_utils.py @@ -56,8 +56,6 @@ def create_export_hash(**kwargs): export_params = {} export_params["output_names"] = kwargs.get("output_names") export_params["dynamic_axes"] = kwargs.get("dynamic_axes") - if kwargs.get("use_onnx_subfunctions"): - export_params["use_onnx_subfunctions"] = True export_hash_params["export_params"] = export_params export_kwargs = kwargs.get("export_kwargs") @@ -69,5 +67,4 @@ def create_export_hash(**kwargs): export_hash_params.update(onnx_transform_kwargs) if export_hash_params.get("peft_config") is not None and not isinstance(export_hash_params["peft_config"], dict): export_hash_params["peft_config"] = export_hash_params["peft_config"].to_dict() - return hash_dict_params(export_hash_params), export_hash_params diff --git a/examples/disagg_serving/README.md b/examples/disagg_serving/README.md new file mode 100644 index 000000000..fcf665357 --- /dev/null +++ b/examples/disagg_serving/README.md @@ -0,0 +1,31 @@ +# We should be using disaggragate serving for GPTOSS model for best performance + - GPT-OSS model has 128/4 for 120b and 32/4 ratio of total_experts/experts_per_tok + - We use read all experts only once always strategy in prefill-only model + - And we treat weights activtions meaning read only chosen experts for decode-only model + +# Prefill-only model +## Blocking default behviour when `prefill_only=True` in compile API + - NUM_Q_BLOCKS= set number of Q blocks in attention + - NUM_FFN_BLOCKS= set number of blocks in FFN + - ENABLE_OPT_SWA="0" or "1" to enable/disable optimized SWA. when enabled we will be using only valid KVs for given block in Attention reducing MACs + - prefix_caching is not supported with this mode + +## Chunking pass `enable_chunking=True` and `prefill_only=True` in compile API + - Optimized SWA i.e. reading only valid KV as per diagonal attention mask is enabled for this version by default + - This model can be used for prefix_caching by passing `kv_cache_batch_size=` in compile API + +# Decode-only model +## Retain Sliding window length of KV for sliding window layers, default behavour when `prefill_seq_len=1` in compile API + - This reduces the amount of DDR used by the model + - CB is enabled for this version pass `continous_batching=True` in `from_pretrained` call and strictly pass `full_batch_size=` and optinally `kv_cache_batch_size=` if needed +## Full KV for sliding window layers pass `retain_full_kv=True` along with `prefill_seq_len=1` in compile API + - This uses higher DDR as we are retaining ctx_len KV even for sliding window layers but will be reading only sliding window len kv in attention + - CB is enabled for this version pass `continous_batching=True` in `from_pretrained` call and strictly pass `full_batch_size=` and optinally `kv_cache_batch_size=` if needed + - This is enabled for the usecase of multi-turn chat, where we will be running prefill-> decode and then use cache of prefill as well as decode combined to again run prefill, so we want to retain full KV for sliding window layers + + +NOTE: +* decode-only model currently fails compilation with `use_onnx_subfunctions=True` so avoid using it +* 120B model needs NPI, there are two versions of NPI one with and without subfunction both are uploaded here, pass it as `node_precision_info=` +* It is advised to use `use_onnx_subfunctions=True` with prefill-only model, otherwise the compilation times are too high, with this the model is supposed to export and fail during compile as it needs assert sdk, so user is supposed to run this compilation manually by pasting the command printed in the error + diff --git a/examples/disagg_serving/gpt_oss_disagg_mode.py b/examples/disagg_serving/gpt_oss_disagg_mode.py new file mode 100644 index 000000000..fd0d5b045 --- /dev/null +++ b/examples/disagg_serving/gpt_oss_disagg_mode.py @@ -0,0 +1,137 @@ +# ----------------------------------------------------------------------------- +# +# Copyright (c) Qualcomm Technologies, Inc. and/or its subsidiaries. +# SPDX-License-Identifier: BSD-3-Clause +# +# ----------------------------------------------------------------------------- + +import time + +import numpy as np +import torch +from transformers import AutoTokenizer + +from QEfficient import QEFFAutoModelForCausalLM +from QEfficient.generation.cloud_infer import QAICInferenceSession + +model_id = "openai/gpt-oss-20b" # weights are not required to convert to fp32 + +prompt = """ +Once upon a time, in a small town, there lived a young boy named Alex. Alex was a curious and adventurous child, always eager to explore the world around him. One day, while playing in the park, Alex stumbled upon a mysterious old book hidden beneath a pile of leaves. The book was filled with stories of distant lands, magical creatures, and extraordinary adventures. + +As Alex flipped through the pages, he discovered a map that led to a hidden treasure. Excited by the prospect of a real-life treasure hunt, Alex decided to embark on a thrilling journey. He packed his backpack with snacks, a flashlight, and a compass, and set off into the unknown. + +The path to the treasure was not an easy one. Alex had to navigate through dense forests, cross rickety bridges, and solve riddles that guarded the treasure's location. +""" +all_outputs = [] +# Run prefill +tokenizer = AutoTokenizer.from_pretrained(model_id) +PREFILL_SEQ_LEN = 256 +CTX_LEN = 256 +inputs = tokenizer(prompt, return_tensors="np", padding=True) +position_ids = inputs["attention_mask"].sum(1, keepdims=True) +padded_len = inputs["input_ids"].shape[1] +num_chunks = -(padded_len // -PREFILL_SEQ_LEN) # ceil divide without float +padded_len = num_chunks * PREFILL_SEQ_LEN # Convert to a multiple of prompt_len + +# Initialize variables specific to request +# Calculate the max generation length. +max_gen_len = CTX_LEN - position_ids.max() +generation_len = max_gen_len + + +qeff_model = QEFFAutoModelForCausalLM.from_pretrained(model_id) +config = qeff_model.model.config +inputs = tokenizer(prompt, return_tensors="np", padding="max_length", max_length=padded_len) +inputs["position_ids"] = np.where(inputs.pop("attention_mask"), np.arange(padded_len), -1) +inputs.pop("token_type_ids", None) +inputs = {k: torch.from_numpy(v) for k, v in inputs.items()} +past_key_values = [] +for i in range(config.num_hidden_layers): + cache_len = config.sliding_window if i % 2 == 0 else PREFILL_SEQ_LEN + pad_shape = (1, 8, cache_len, 64) + past_key = torch.zeros((pad_shape), dtype=torch.float32) + past_value = torch.zeros((pad_shape), dtype=torch.float32) + pkv = (past_key, past_value) + past_key_values.append(pkv) +inputs["past_key_values"] = past_key_values + + +decode_qpc_path = qeff_model.compile( + prefill_seq_len=1, + ctx_len=CTX_LEN, + num_cores=16, + mxfp6_matmul=True, + mxint8_kv_cache=True, + num_devices=1, + mos=1, + aic_enable_depth_first=True, + num_speculative_tokens=None, + offload_pt_weights=False, +) +prefill_qpc_path = qeff_model.compile( + prefill_seq_len=PREFILL_SEQ_LEN, + ctx_len=CTX_LEN, + num_cores=16, + mxfp6_matmul=True, + mxint8_kv_cache=True, + num_devices=1, + mos=1, + aic_enable_depth_first=True, + num_speculative_tokens=None, + prefill_only=True, + use_onnx_subfunctions=True, +) + +prefill_session = QAICInferenceSession(prefill_qpc_path) + +logits_out_placeholder = np.zeros((1, 1, 201088), dtype=np.float32) +prefill_session.set_buffers({"logits": logits_out_placeholder}) +inputs.pop("past_key_values") +inputs = {k: v.detach().numpy() for k, v in inputs.items()} +st = time.time() +qpc_out = prefill_session.run(inputs) +print(f"time for prefill_run={time.time() - st} sec\n") + +decode_session = QAICInferenceSession(decode_qpc_path) +decode_session.set_buffers({"logits": logits_out_placeholder}) + +decode_inputs = { + "input_ids": np.argmax(qpc_out["logits"]).reshape(1, 1), + "position_ids": np.max(inputs["position_ids"]).reshape(1, 1) + 1, +} +print("pos_id for decodee", decode_inputs["position_ids"]) + +all_outputs.append(decode_inputs["input_ids"][0][0]) +for i in range(config.num_hidden_layers): + if i % 2 == 0 and decode_inputs["position_ids"] >= config.sliding_window: + k = qpc_out[f"past_key.{i}_RetainedState"] + v = qpc_out[f"past_value.{i}_RetainedState"] + mod_pos_id = config.sliding_window - decode_inputs["position_ids"][0][0] % config.sliding_window + decode_inputs[f"past_key.{i}"] = np.concatenate((k[:, :, mod_pos_id:, :], k[:, :, :mod_pos_id, :]), axis=-2) + decode_inputs[f"past_value.{i}"] = np.concatenate((v[:, :, mod_pos_id:, :], v[:, :, :mod_pos_id, :]), axis=-2) + else: + decode_inputs[f"past_key.{i}"] = qpc_out[f"past_key.{i}_RetainedState"] + decode_inputs[f"past_value.{i}"] = qpc_out[f"past_value.{i}_RetainedState"] + +st = time.time() +decode_out = decode_session.run(decode_inputs) +print(f"time for first run of decode with KV as input = {time.time() - st} sec\n") +decode_session.skip_buffers( + [x for x in decode_session.input_names + decode_session.output_names if x.startswith("past_")] +) +pos_id = np.max(decode_inputs["position_ids"]).reshape(1, 1) + 1 +st = time.time() +for i in range(generation_len - 2): + loop_decode_inputs = { + "input_ids": np.argmax(decode_out["logits"]).reshape(1, 1), + "position_ids": pos_id, + } + all_outputs.append(loop_decode_inputs["input_ids"][0][0]) + decode_out = decode_session.run(loop_decode_inputs) + pos_id += 1 + + +print(f"time for decode generation = {(time.time() - st) / (generation_len - 2)}") +print(all_outputs) +print(tokenizer.decode(all_outputs)) diff --git a/examples/disagg_serving/subfunction_120b_npi.yaml b/examples/disagg_serving/subfunction_120b_npi.yaml new file mode 100644 index 000000000..762703d58 --- /dev/null +++ b/examples/disagg_serving/subfunction_120b_npi.yaml @@ -0,0 +1,27 @@ +FP32NodeInstanceNames: + - CustomRMSNorm_58 + - onnx::Shape_1033777 + - CustomRMSNorm_349 + - hidden.127 + - CustomRMSNorm_27448 + - onnx::Shape_1066066 + - CustomRMSNorm_27709 + - hidden.131 + - CustomRMSNorm_54808 + - onnx::Shape_878 + - CustomRMSNorm_55105 + - hidden + - hidden_states.259 + - Add_348 + - Add_347 + - onnx::Add_1034099 + - hidden_states.267 + - Add_27708 + - onnx::Add_1066358 + - Add_27707 + - hidden_states.3 + - Add_55104 + - onnx::Add_1209 + - Add_55103 + - /model/norm/CustomRMSNorm + - /model/norm/CustomRMSNorm_output_0 \ No newline at end of file diff --git a/examples/disagg_serving/without_subfunc_npi_120b.yaml b/examples/disagg_serving/without_subfunc_npi_120b.yaml new file mode 100644 index 000000000..ec6cf034f --- /dev/null +++ b/examples/disagg_serving/without_subfunc_npi_120b.yaml @@ -0,0 +1,148 @@ +FP32NodeInstanceNames: + - /model/layers.0/Add_1_output_0 + - /model/layers.0/Add_output_0 + - /model/layers.0/input_layernorm/CustomRMSNorm_output_0 + - /model/layers.0/post_attention_layernorm/CustomRMSNorm_output_0 + - /model/layers.1/Add_1_output_0 + - /model/layers.1/Add_output_0 + - /model/layers.1/input_layernorm/CustomRMSNorm_output_0 + - /model/layers.1/post_attention_layernorm/CustomRMSNorm_output_0 + - /model/layers.10/Add_1_output_0 + - /model/layers.10/Add_output_0 + - /model/layers.10/input_layernorm/CustomRMSNorm_output_0 + - /model/layers.10/post_attention_layernorm/CustomRMSNorm_output_0 + - /model/layers.11/Add_1_output_0 + - /model/layers.11/Add_output_0 + - /model/layers.11/input_layernorm/CustomRMSNorm_output_0 + - /model/layers.11/post_attention_layernorm/CustomRMSNorm_output_0 + - /model/layers.12/Add_1_output_0 + - /model/layers.12/Add_output_0 + - /model/layers.12/input_layernorm/CustomRMSNorm_output_0 + - /model/layers.12/post_attention_layernorm/CustomRMSNorm_output_0 + - /model/layers.13/Add_1_output_0 + - /model/layers.13/Add_output_0 + - /model/layers.13/input_layernorm/CustomRMSNorm_output_0 + - /model/layers.13/post_attention_layernorm/CustomRMSNorm_output_0 + - /model/layers.14/Add_1_output_0 + - /model/layers.14/Add_output_0 + - /model/layers.14/input_layernorm/CustomRMSNorm_output_0 + - /model/layers.14/post_attention_layernorm/CustomRMSNorm_output_0 + - /model/layers.15/Add_1_output_0 + - /model/layers.15/Add_output_0 + - /model/layers.15/input_layernorm/CustomRMSNorm_output_0 + - /model/layers.15/post_attention_layernorm/CustomRMSNorm_output_0 + - /model/layers.16/Add_1_output_0 + - /model/layers.16/Add_output_0 + - /model/layers.16/input_layernorm/CustomRMSNorm_output_0 + - /model/layers.16/post_attention_layernorm/CustomRMSNorm_output_0 + - /model/layers.17/Add_1_output_0 + - /model/layers.17/Add_output_0 + - /model/layers.17/input_layernorm/CustomRMSNorm_output_0 + - /model/layers.17/post_attention_layernorm/CustomRMSNorm_output_0 + - /model/layers.18/Add_1_output_0 + - /model/layers.18/Add_output_0 + - /model/layers.18/input_layernorm/CustomRMSNorm_output_0 + - /model/layers.18/post_attention_layernorm/CustomRMSNorm_output_0 + - /model/layers.19/Add_1_output_0 + - /model/layers.19/Add_output_0 + - /model/layers.19/input_layernorm/CustomRMSNorm_output_0 + - /model/layers.19/post_attention_layernorm/CustomRMSNorm_output_0 + - /model/layers.2/Add_1_output_0 + - /model/layers.2/Add_output_0 + - /model/layers.2/input_layernorm/CustomRMSNorm_output_0 + - /model/layers.2/post_attention_layernorm/CustomRMSNorm_output_0 + - /model/layers.20/Add_1_output_0 + - /model/layers.20/Add_output_0 + - /model/layers.20/input_layernorm/CustomRMSNorm_output_0 + - /model/layers.20/post_attention_layernorm/CustomRMSNorm_output_0 + - /model/layers.21/Add_1_output_0 + - /model/layers.21/Add_output_0 + - /model/layers.21/input_layernorm/CustomRMSNorm_output_0 + - /model/layers.21/post_attention_layernorm/CustomRMSNorm_output_0 + - /model/layers.22/Add_1_output_0 + - /model/layers.22/Add_output_0 + - /model/layers.22/input_layernorm/CustomRMSNorm_output_0 + - /model/layers.22/post_attention_layernorm/CustomRMSNorm_output_0 + - /model/layers.23/Add_1_output_0 + - /model/layers.23/Add_output_0 + - /model/layers.23/input_layernorm/CustomRMSNorm_output_0 + - /model/layers.23/post_attention_layernorm/CustomRMSNorm_output_0 + - /model/layers.24/Add_1_output_0 + - /model/layers.24/Add_output_0 + - /model/layers.24/input_layernorm/CustomRMSNorm_output_0 + - /model/layers.24/post_attention_layernorm/CustomRMSNorm_output_0 + - /model/layers.25/Add_1_output_0 + - /model/layers.25/Add_output_0 + - /model/layers.25/input_layernorm/CustomRMSNorm_output_0 + - /model/layers.25/post_attention_layernorm/CustomRMSNorm_output_0 + - /model/layers.26/Add_1_output_0 + - /model/layers.26/Add_output_0 + - /model/layers.26/input_layernorm/CustomRMSNorm_output_0 + - /model/layers.26/post_attention_layernorm/CustomRMSNorm_output_0 + - /model/layers.27/Add_1_output_0 + - /model/layers.27/Add_output_0 + - /model/layers.27/input_layernorm/CustomRMSNorm_output_0 + - /model/layers.27/post_attention_layernorm/CustomRMSNorm_output_0 + - /model/layers.28/Add_1_output_0 + - /model/layers.28/Add_output_0 + - /model/layers.28/input_layernorm/CustomRMSNorm_output_0 + - /model/layers.28/post_attention_layernorm/CustomRMSNorm_output_0 + - /model/layers.29/Add_1_output_0 + - /model/layers.29/Add_output_0 + - /model/layers.29/input_layernorm/CustomRMSNorm_output_0 + - /model/layers.29/post_attention_layernorm/CustomRMSNorm_output_0 + - /model/layers.3/Add_1_output_0 + - /model/layers.3/Add_output_0 + - /model/layers.3/input_layernorm/CustomRMSNorm_output_0 + - /model/layers.3/post_attention_layernorm/CustomRMSNorm_output_0 + - /model/layers.30/Add_1_output_0 + - /model/layers.30/Add_output_0 + - /model/layers.30/input_layernorm/CustomRMSNorm_output_0 + - /model/layers.30/post_attention_layernorm/CustomRMSNorm_output_0 + - /model/layers.31/Add_1_output_0 + - /model/layers.31/Add_output_0 + - /model/layers.31/input_layernorm/CustomRMSNorm_output_0 + - /model/layers.31/post_attention_layernorm/CustomRMSNorm_output_0 + - /model/layers.32/Add_1_output_0 + - /model/layers.32/Add_output_0 + - /model/layers.32/input_layernorm/CustomRMSNorm_output_0 + - /model/layers.32/post_attention_layernorm/CustomRMSNorm_output_0 + - /model/layers.33/Add_1_output_0 + - /model/layers.33/Add_output_0 + - /model/layers.33/input_layernorm/CustomRMSNorm_output_0 + - /model/layers.33/post_attention_layernorm/CustomRMSNorm_output_0 + - /model/layers.34/Add_1_output_0 + - /model/layers.34/Add_output_0 + - /model/layers.34/input_layernorm/CustomRMSNorm_output_0 + - /model/layers.34/post_attention_layernorm/CustomRMSNorm_output_0 + - /model/layers.35/Add_1_output_0 + - /model/layers.35/Add_output_0 + - /model/norm/Add_output_0 + - /model/layers.35/input_layernorm/CustomRMSNorm_output_0 + - /model/layers.35/post_attention_layernorm/CustomRMSNorm_output_0 + - /model/layers.4/Add_1_output_0 + - /model/layers.4/Add_output_0 + - /model/layers.4/input_layernorm/CustomRMSNorm_output_0 + - /model/layers.4/post_attention_layernorm/CustomRMSNorm_output_0 + - /model/layers.5/Add_1_output_0 + - /model/layers.5/Add_output_0 + - /model/layers.5/input_layernorm/CustomRMSNorm_output_0 + - /model/layers.5/post_attention_layernorm/CustomRMSNorm_output_0 + - /model/layers.6/Add_1_output_0 + - /model/layers.6/Add_output_0 + - /model/layers.6/input_layernorm/CustomRMSNorm_output_0 + - /model/layers.6/post_attention_layernorm/CustomRMSNorm_output_0 + - /model/layers.7/Add_1_output_0 + - /model/layers.7/Add_output_0 + - /model/layers.7/input_layernorm/CustomRMSNorm_output_0 + - /model/layers.7/post_attention_layernorm/CustomRMSNorm_output_0 + - /model/layers.8/Add_1_output_0 + - /model/layers.8/Add_output_0 + - /model/layers.8/input_layernorm/CustomRMSNorm_output_0 + - /model/layers.8/post_attention_layernorm/CustomRMSNorm_output_0 + - /model/layers.9/Add_1_output_0 + - /model/layers.9/Add_output_0 + - /model/layers.9/input_layernorm/CustomRMSNorm_output_0 + - /model/layers.9/post_attention_layernorm/CustomRMSNorm_output_0 + - /model/norm/CustomRMSNorm_output_0 + \ No newline at end of file diff --git a/examples/gpt_oss_disagg_mode_with_chunking.py b/examples/gpt_oss_disagg_mode_with_chunking.py new file mode 100644 index 000000000..363e2806c --- /dev/null +++ b/examples/gpt_oss_disagg_mode_with_chunking.py @@ -0,0 +1,137 @@ +# ----------------------------------------------------------------------------- +# +# Copyright (c) Qualcomm Technologies, Inc. and/or its subsidiaries. +# SPDX-License-Identifier: BSD-3-Clause +# +# ----------------------------------------------------------------------------- + +import time + +import numpy as np +import torch +from transformers import AutoConfig, AutoTokenizer + +from QEfficient import QEFFAutoModelForCausalLM +from QEfficient.generation.cloud_infer import QAICInferenceSession + +model_id = "openai/gpt-oss-20b" # weights are not required to convert to fp32 + +prompt = """ +Once upon a time, in a small town, there lived a young boy named Alex. Alex was a curious and adventurous child, always eager to explore the world around him. One day, while playing in the park, Alex stumbled upon a mysterious old book hidden beneath a pile of leaves. The book was filled with stories of distant lands, magical creatures, and extraordinary adventures. + +As Alex flipped through the pages, he discovered a map that led to a hidden treasure. Excited by the prospect of a real-life treasure hunt, Alex decided to embark on a thrilling journey. He packed his backpack with snacks, a flashlight, and a compass, and set off into the unknown. + +The path to the treasure was not an easy one. Alex had to navigate through dense forests, cross rickety bridges, and solve riddles that guarded the treasure's location. +""" +# Run prefill +config = AutoConfig.from_pretrained(model_id) +tokenizer = AutoTokenizer.from_pretrained(model_id) +PREFILL_SEQ_LEN = 128 +CTX_LEN = 128 * 3 + +qeff_model = QEFFAutoModelForCausalLM.from_pretrained(model_id) + +decode_qpc_path = qeff_model.compile( + prefill_seq_len=1, + ctx_len=CTX_LEN, + num_cores=16, + mxfp6_matmul=True, + mxint8_kv_cache=True, + num_devices=1, + mos=1, + aic_enable_depth_first=True, + num_speculative_tokens=None, + offload_pt_weights=False, # Need the weights in memory for prefill-model export/compilation in the next step + retain_full_kv=True, +) + + +# Following command errors out by default, the user is supposed to run the printed command and provide the generated qpc path as prefill_qpc_path commenting out lines 55-68 +# prefill_qpc_path = "provide path here" +prefill_qpc_path = qeff_model.compile( + prefill_seq_len=PREFILL_SEQ_LEN, + ctx_len=CTX_LEN, + num_cores=16, + mxfp6_matmul=True, + mxint8_kv_cache=True, + num_devices=1, + mos=1, + aic_enable_depth_first=True, + num_speculative_tokens=None, + prefill_only=True, + enable_chunking=True, + use_onnx_subfunctions=True, +) + + +inputs = tokenizer(prompt, return_tensors="np", padding=True) +position_ids = inputs["attention_mask"].sum(1, keepdims=True) +generation_len = CTX_LEN - position_ids.max() +padded_len = inputs["input_ids"].shape[1] +num_chunks = -(padded_len // -PREFILL_SEQ_LEN) # ceil divide without float +padded_len = num_chunks * PREFILL_SEQ_LEN # Convert to a multiple of prompt_len +inputs = tokenizer(prompt, return_tensors="np", padding="max_length", max_length=padded_len) +inputs["position_ids"] = np.where(inputs.pop("attention_mask"), np.arange(padded_len), -1) +inputs.pop("token_type_ids", None) +inputs = {k: torch.from_numpy(v) for k, v in inputs.items()} +inputs.pop("past_key_values", None) +inputs = {k: v.detach().numpy() for k, v in inputs.items()} + + +decode_session = QAICInferenceSession(decode_qpc_path) +prefill_session = QAICInferenceSession(prefill_qpc_path) + +all_outputs = [] +for i in range(num_chunks): + chunk_inputs = inputs.copy() + chunk_inputs["input_ids"] = inputs["input_ids"][:, i * PREFILL_SEQ_LEN : (i + 1) * PREFILL_SEQ_LEN] + chunk_inputs["position_ids"] = inputs["position_ids"][:, i * PREFILL_SEQ_LEN : (i + 1) * PREFILL_SEQ_LEN] + ins = time.time() + qpc_out = prefill_session.run(chunk_inputs) + print(f"time for this run={time.time() - ins}") + for i in range(config.num_hidden_layers): + inputs[f"past_key.{i}"] = qpc_out[f"past_key.{i}_RetainedState"] + inputs[f"past_value.{i}"] = qpc_out[f"past_value.{i}_RetainedState"] + +all_outputs.append(np.argmax(qpc_out["logits"])) +decode_inputs = { + "input_ids": np.argmax(qpc_out["logits"]).reshape(1, 1), + "position_ids": np.max(inputs["position_ids"]).reshape(1, 1) + 1, +} +for i in range(config.num_hidden_layers): + decode_inputs[f"past_key.{i}"] = qpc_out[f"past_key.{i}_RetainedState"] + decode_inputs[f"past_value.{i}"] = qpc_out[f"past_value.{i}_RetainedState"] + +st = time.time() +decode_out = decode_session.run(decode_inputs) +print(f"time for first run of decode with KV as input = {time.time() - st} sec\n") +all_outputs.append(np.argmax(decode_out["logits"])) +pos_id = np.max(decode_inputs["position_ids"]).reshape(1, 1) + 1 +loop_decode_inputs = { + "input_ids": np.argmax(decode_out["logits"]).reshape(1, 1), + "position_ids": pos_id, +} + +for i in range(config.num_hidden_layers): + loop_decode_inputs[f"past_key.{i}"] = decode_out[f"past_key.{i}_RetainedState"] + loop_decode_inputs[f"past_value.{i}"] = decode_out[f"past_value.{i}_RetainedState"] + +st = time.time() +for i in range(generation_len - 2): + decode_out = decode_session.run(loop_decode_inputs) + all_outputs.append(np.argmax(decode_out["logits"])) + pos_id += 1 + for i in range(config.num_hidden_layers): + loop_decode_inputs[f"past_key.{i}"] = decode_out[f"past_key.{i}_RetainedState"] + loop_decode_inputs[f"past_value.{i}"] = decode_out[f"past_value.{i}_RetainedState"] + + loop_decode_inputs.update( + { + "input_ids": np.argmax(decode_out["logits"]).reshape(1, 1), + "position_ids": pos_id, + } + ) +ft = time.time() + +print(f"decode tok/sec={(generation_len - 2) / (ft - st)}") +print(f"input\n{prompt}\noutput\n{tokenizer.decode(all_outputs)}") diff --git a/scripts/Jenkinsfile b/scripts/Jenkinsfile index d878076fa..8f95c1d98 100644 --- a/scripts/Jenkinsfile +++ b/scripts/Jenkinsfile @@ -42,7 +42,7 @@ pipeline { mkdir -p $PWD/Non_cli_qaic && export TOKENIZERS_PARALLELISM=false && export QEFF_HOME=$PWD/Non_cli_qaic && - pytest tests -m '(not cli) and (not on_qaic) and (not finetune)' --ignore tests/vllm --junitxml=tests/tests_log1.xml && + pytest tests -m '(not cli) and (not on_qaic) and (not finetune)' --ignore tests/vllm -n 4 --junitxml=tests/tests_log1.xml && junitparser merge tests/tests_log1.xml tests/tests_log.xml && deactivate" ''' diff --git a/tests/peft/lora/test_lora_model.py b/tests/peft/lora/test_lora_model.py index 00a4216b7..46b33c60b 100644 --- a/tests/peft/lora/test_lora_model.py +++ b/tests/peft/lora/test_lora_model.py @@ -222,7 +222,7 @@ def test_auto_lora_model_for_causal_lm_noncb_export_compile_generate( # export start = perf_counter() - qeff_model.export(export_dir=tmp_path) + onnx_path = qeff_model.export(export_dir=tmp_path) end = perf_counter() export_time_0 = end - start model_path = tmp_path.with_name(tmp_path.name + "-" + qeff_model.export_hash) @@ -237,7 +237,7 @@ def test_auto_lora_model_for_causal_lm_noncb_export_compile_generate( assert export_time_1 < export_time_0 # test compile - qeff_model.compile(prefill_seq_len=32, ctx_len=64) + qeff_model.compile(onnx_path=onnx_path, prefill_seq_len=32, ctx_len=64) assert Path(qeff_model.qpc_path).is_dir() assert os.path.isfile(os.path.join(os.path.dirname(qeff_model.qpc_path), "qconfig.json")) diff --git a/tests/peft/test_peft_model.py b/tests/peft/test_peft_model.py index cc94467db..c3bb2f140 100644 --- a/tests/peft/test_peft_model.py +++ b/tests/peft/test_peft_model.py @@ -178,9 +178,9 @@ def test_auto_peft_model_for_causal_lm_activate_invalid(base_config, adapter_con def test_auto_peft_model_for_causal_lm_compile_generate(base_config, adapter_config, batch_size, tmp_path): _, lora_model = create_peft_model(base_config, adapter_config) qeff_model = QEffAutoPeftModelForCausalLM(lora_model) - qeff_model.export(tmp_path) + onnx_path = qeff_model.export(tmp_path) start = perf_counter() - qeff_model.compile(batch_size=batch_size, prefill_seq_len=32, ctx_len=128) + qeff_model.compile(onnx_path=onnx_path, batch_size=batch_size, prefill_seq_len=32, ctx_len=128) end = perf_counter() compile_time_0 = end - start @@ -197,7 +197,7 @@ def test_auto_peft_model_for_causal_lm_compile_generate(base_config, adapter_con ) start = perf_counter() - qeff_model.compile(batch_size=batch_size, prefill_seq_len=32, ctx_len=128) + qeff_model.compile(onnx_path=onnx_path, batch_size=batch_size, prefill_seq_len=32, ctx_len=128) end = perf_counter() compile_time_1 = end - start assert compile_time_1 < 0.01 * compile_time_0 diff --git a/tests/transformers/models/test_disagg_mode.py b/tests/transformers/models/test_disagg_mode.py new file mode 100644 index 000000000..6358940df --- /dev/null +++ b/tests/transformers/models/test_disagg_mode.py @@ -0,0 +1,192 @@ +# ----------------------------------------------------------------------------- +# +# Copyright (c) Qualcomm Technologies, Inc. and/or its subsidiaries. +# SPDX-License-Identifier: BSD-3-Clause +# +# ----------------------------------------------------------------------------- + +import time + +import numpy as np +import pytest +import torch +from transformers import AutoModelForCausalLM, AutoTokenizer, HybridCache + +from QEfficient import QEFFAutoModelForCausalLM +from QEfficient.generation.cloud_infer import QAICInferenceSession +from QEfficient.transformers.quantizers import replace_transformers_quantizers, undo_transformers_quantizers + +model_id = "openai/gpt-oss-120b" # weights are not required to convert to fp32 + +prompt2 = """ +Once upon a time, in a small town, there lived a young boy named Alex. Alex was a curious and adventurous child, always eager to explore the world around him. One day, while playing in the park, Alex stumbled upon a mysterious old book hidden beneath a pile of leaves. The book was filled with stories of distant lands, magical creatures, and extraordinary adventures. + +As Alex flipped through the pages, he discovered a map that led to a hidden treasure. Excited by the prospect of a real-life treasure hunt, Alex decided to embark on a thrilling journey. He packed his backpack with snacks, a flashlight, and a compass, and set off into the unknown. + +The path to the treasure was not an easy one. Alex had to navigate through dense forests, cross rickety bridges, and solve riddles that guarded the treasure's location. +""" +prompt1 = "Once upon a time" + +prompts = [prompt1, prompt2] + + +@pytest.mark.on_qaic +@pytest.mark.parametrize("model_id", [model_id]) +@pytest.mark.parametrize("prompt", prompts) +def test_disagg_mode_prefill(model_id, prompt): + # Run prefill + tokenizer = AutoTokenizer.from_pretrained(model_id) + PREFILL_SEQ_LEN = 256 + CTX_LEN = 256 + inputs = tokenizer(prompt, return_tensors="np", padding=True) + padded_len = inputs["input_ids"].shape[1] + num_chunks = -(padded_len // -PREFILL_SEQ_LEN) # ceil divide without float + padded_len = num_chunks * PREFILL_SEQ_LEN # Convert to a multiple of prompt_len + + replace_transformers_quantizers() + model = AutoModelForCausalLM.from_pretrained(model_id, num_hidden_layers=2) + config = model.config + inputs = tokenizer(prompt, return_tensors="np", padding="max_length", max_length=padded_len) + inputs["position_ids"] = np.where(inputs.pop("attention_mask"), np.arange(padded_len), -1) + inputs.pop("token_type_ids", None) + inputs = {k: torch.from_numpy(v).to(model.device) for k, v in inputs.items()} + cache = HybridCache(config=config, batch_size=1, max_cache_len=CTX_LEN) + ins = tokenizer(prompt, return_tensors="pt") + out = model(**ins, past_key_values=cache) + + undo_transformers_quantizers() + + qeff_model = QEFFAutoModelForCausalLM.from_pretrained(model_id, num_hidden_layers=2) + qeff_model.prefill(True) + config = qeff_model.model.config + inputs = tokenizer(prompt, return_tensors="np", padding="max_length", max_length=padded_len) + inputs["position_ids"] = np.where(inputs.pop("attention_mask"), np.arange(padded_len), -1) + inputs.pop("token_type_ids", None) + inputs = {k: torch.from_numpy(v) for k, v in inputs.items()} + past_key_values = [] + for i in range(config.num_hidden_layers): + cache_len = 128 if i % 2 == 0 else PREFILL_SEQ_LEN + pad_shape = (1, 8, cache_len, 64) + past_key = torch.zeros((pad_shape), dtype=torch.float32) + past_value = torch.zeros((pad_shape), dtype=torch.float32) + pkv = (past_key, past_value) + past_key_values.append(pkv) + inputs["past_key_values"] = past_key_values + + qeff_out = qeff_model.model(**inputs) + + # Check our pytorch implementation + assert (qeff_out.logits - out.logits[:, -1, :]).abs().max() < 1e-4 + + prefill_qpc_path = qeff_model.compile( + prefill_seq_len=PREFILL_SEQ_LEN, + ctx_len=CTX_LEN, + num_cores=16, + mxfp6_matmul=False, + mxint8_kv_cache=False, + num_devices=1, + mos=1, + aic_enable_depth_first=True, + num_speculative_tokens=None, + prefill_only=True, + ) + + prefill_session = QAICInferenceSession(prefill_qpc_path) + logits_out_placeholder = np.zeros((1, 1, 201088), dtype=np.float32) + prefill_session.set_buffers({"logits": logits_out_placeholder}) + inputs.pop("past_key_values") + inputs = {k: v.detach().numpy() for k, v in inputs.items()} + st = time.time() + qpc_out = prefill_session.run(inputs) + print(f"time for prefill_run={time.time() - st} sec\n") + del prefill_session + # Check QAIC output isclose with QEFF pytorch output + assert (torch.from_numpy(qpc_out["logits"]) - qeff_out.logits).abs().max() < 5e-2 + + +@pytest.mark.skip(reason="no way of currently testing this without the assert sdk") +@pytest.mark.on_qaic +@pytest.mark.parametrize("model_id", [model_id]) +@pytest.mark.parametrize("prompt", prompts) +def test_disagg_mode_prefill_chunked(model_id, prompt): + # Run prefill + tokenizer = AutoTokenizer.from_pretrained(model_id) + PREFILL_SEQ_LEN = 128 + CTX_LEN = 128 * 3 + inputs = tokenizer(prompt, return_tensors="np", padding=True) + padded_len = inputs["input_ids"].shape[1] + num_chunks = -(padded_len // -PREFILL_SEQ_LEN) # ceil divide without float + padded_len = num_chunks * PREFILL_SEQ_LEN # Convert to a multiple of prompt_len + + replace_transformers_quantizers() + model = AutoModelForCausalLM.from_pretrained(model_id, num_hidden_layers=2) + config = model.config + inputs = tokenizer(prompt, return_tensors="np", padding="max_length", max_length=padded_len) + inputs["position_ids"] = np.where(inputs.pop("attention_mask"), np.arange(padded_len), -1) + inputs.pop("token_type_ids", None) + inputs = {k: torch.from_numpy(v).to(model.device) for k, v in inputs.items()} + cache = HybridCache(config=config, batch_size=1, max_cache_len=CTX_LEN) + ins = tokenizer(prompt, return_tensors="pt") + out = model(**ins, past_key_values=cache) + + undo_transformers_quantizers() + + qeff_model = QEFFAutoModelForCausalLM.from_pretrained(model_id, num_hidden_layers=2) + qeff_model.prefill(True, enable_chunking=True) + config = qeff_model.model.config + inputs = tokenizer(prompt, return_tensors="np", padding="max_length", max_length=padded_len) + inputs["position_ids"] = np.where(inputs.pop("attention_mask"), np.arange(padded_len), -1) + inputs.pop("token_type_ids", None) + inputs = {k: torch.from_numpy(v) for k, v in inputs.items()} + past_key_values = [] + for i in range(config.num_hidden_layers): + cache_len = CTX_LEN + pad_shape = (1, 8, cache_len, 64) + past_key = torch.zeros((pad_shape), dtype=torch.float32) + past_value = torch.zeros((pad_shape), dtype=torch.float32) + pkv = (past_key, past_value) + past_key_values.append(pkv) + inputs["past_key_values"] = past_key_values + + for i in range(num_chunks): + chunk_inputs = inputs.copy() + chunk_inputs["input_ids"] = inputs["input_ids"][:, i * PREFILL_SEQ_LEN : (i + 1) * PREFILL_SEQ_LEN] + chunk_inputs["position_ids"] = inputs["position_ids"][:, i * PREFILL_SEQ_LEN : (i + 1) * PREFILL_SEQ_LEN] + + qeff_out = qeff_model.model(**chunk_inputs) + inputs["past_key_values"] = qeff_out["past_key_values"] + + # Check our pytorch implementation + assert (qeff_out.logits - out.logits[:, -1, :]).abs().max() < 1e-4 + + prefill_qpc_path = qeff_model.compile( + prefill_seq_len=PREFILL_SEQ_LEN, + ctx_len=CTX_LEN, + num_cores=16, + mxfp6_matmul=False, + mxint8_kv_cache=False, + num_devices=1, + mos=1, + aic_enable_depth_first=True, + num_speculative_tokens=None, + prefill_only=True, + enable_chunking=True, + ) + prefill_session = QAICInferenceSession(prefill_qpc_path) + prefill_session.skip_buffers( + [x for x in prefill_session.input_names + prefill_session.output_names if x.startswith("past_")] + ) + logits_out_placeholder = np.zeros((1, 1, 201088), dtype=np.float32) + prefill_session.set_buffers({"logits": logits_out_placeholder}) + inputs.pop("past_key_values") + inputs = {k: v.detach().numpy() for k, v in inputs.items()} + st = time.time() + for i in range(num_chunks): + chunk_inputs = inputs.copy() + chunk_inputs["input_ids"] = inputs["input_ids"][:, i * PREFILL_SEQ_LEN : (i + 1) * PREFILL_SEQ_LEN] + chunk_inputs["position_ids"] = inputs["position_ids"][:, i * PREFILL_SEQ_LEN : (i + 1) * PREFILL_SEQ_LEN] + qpc_out = prefill_session.run(chunk_inputs) + print(f"time for prefill_run={time.time() - st} sec\n") + del prefill_session + # Check QAIC output isclose with QEFF pytorch output + assert (torch.from_numpy(qpc_out["logits"]) - qeff_out.logits).abs().max() < 8e-2 diff --git a/tests/transformers/test_causal_lm.py b/tests/transformers/test_causal_lm.py index 3eaaf0f69..72477d56a 100644 --- a/tests/transformers/test_causal_lm.py +++ b/tests/transformers/test_causal_lm.py @@ -14,10 +14,11 @@ from transformers import AutoConfig, AutoModel, AutoModelForCausalLM from QEfficient.transformers.models.modeling_auto import QEFFAutoModelForCausalLM +from QEfficient.transformers.models.pytorch_transforms import get_decoder_layer_classes_for_export from QEfficient.utils import constants, get_padding_shape_from_config from QEfficient.utils.hash_utils import hash_dict_params -configs = [ +test_configs = [ # name, max_position_embeddings, num_hidden_layers, num_attention_heads, hidden_size, intermediate_size, vocab_size, additional_params ("gpt2", 256, 2, 4, 128, 512, 127, {}), ("codegen", 256, 2, 4, 128, 512, 127, {"rotary_dim": 16}), @@ -36,30 +37,43 @@ ("gpt_oss", 256, 3, 4, 128, 512, 127, {"num_key_value_heads": 2}), ] -configs = [ - AutoConfig.for_model( - model_name, - max_position_embeddings=max_position_embeddings, - num_hidden_layers=num_hidden_layers, - num_attention_heads=num_attention_heads, - hidden_size=hidden_size, - intermediate_size=intermediate_size, - vocab_size=vocab_size, - **additional_params, - ) - for ( - model_name, - max_position_embeddings, - num_hidden_layers, - num_attention_heads, - hidden_size, - intermediate_size, - vocab_size, - additional_params, - ) in configs +test_prefill_only_specialized_models_configs = [ + ("gpt_oss", 256, 2, 2, 32, 32, 127, {"num_key_value_heads": 2}), ] + + +def get_auto_config_from_test_config(configs): + auto_configs = [ + AutoConfig.for_model( + model_name, + max_position_embeddings=max_position_embeddings, + num_hidden_layers=num_hidden_layers, + num_attention_heads=num_attention_heads, + hidden_size=hidden_size, + intermediate_size=intermediate_size, + vocab_size=vocab_size, + **additional_params, + ) + for ( + model_name, + max_position_embeddings, + num_hidden_layers, + num_attention_heads, + hidden_size, + intermediate_size, + vocab_size, + additional_params, + ) in configs + ] + return auto_configs + + +configs = get_auto_config_from_test_config(test_configs) config_ids = [x.model_type for x in configs] +prefill_only_configs = get_auto_config_from_test_config(test_prefill_only_specialized_models_configs) +prefill_only_config_ids = [x.model_type for x in prefill_only_configs] + model_kwargs = {"attn_implementation": "eager"} @@ -144,20 +158,21 @@ def test_causal_lm_export_and_hash(config, cb, tmp_path): @pytest.mark.parametrize("cb", [False, True], ids=["nocb", "cb"]) +@pytest.mark.parametrize("subfunc", [False, True], ids=["False", "True"]) @pytest.mark.parametrize("config", configs, ids=config_ids) -def test_causal_lm_hash_creation(config, cb, tmp_path): +def test_causal_lm_hash_creation(config, cb, subfunc, tmp_path): model = AutoModelForCausalLM.from_config(config, **model_kwargs) qeff_model = QEFFAutoModelForCausalLM(model, cb) - qeff_model.export(tmp_path) + qeff_model.export(tmp_path, use_onnx_subfunctions=subfunc) hash_params = {} hash_params["config"] = qeff_model.model.config.to_diff_dict() hash_params["peft_config"] = None hash_params["applied_transform_names"] = qeff_model._transform_names() hash_params["qeff_auto_class"] = qeff_model.__class__.__name__ + hash_params["max_seq_len_cached"] = None hash_params["qaic_config"] = None # Create parameters separately for hash creation - bs: int = constants.ONNX_EXPORT_EXAMPLE_BATCH_SIZE seq_len: int = constants.ONNX_EXPORT_EXAMPLE_SEQ_LEN fbs: int = constants.ONNX_EXPORT_EXAMPLE_FBS @@ -190,12 +205,12 @@ def test_causal_lm_hash_creation(config, cb, tmp_path): ) output_names = [] output_names.append("logits") - + onnx_out_name_suffix = "InternalRetainedState" if subfunc else "RetainedState" for i in range(qeff_model.num_layers): pkv_dynamic_axes[i][0] = "full_batch_size" if qeff_model.continuous_batching else "batch_size" for kv in ["key", "value"]: dynamic_axes[f"past_{kv}.{i}"] = pkv_dynamic_axes[i] - output_names.append(f"past_{kv}.{i}_RetainedState") + output_names.append(f"past_{kv}.{i}_{onnx_out_name_suffix}") if qeff_model.continuous_batching: dynamic_axes["batch_index"] = {0: "batch_size"} @@ -204,11 +219,32 @@ def test_causal_lm_hash_creation(config, cb, tmp_path): export_params["output_names"] = output_names export_params["dynamic_axes"] = dynamic_axes hash_params["export_params"] = export_params + if subfunc: + hash_params["export_modules_as_functions"] = get_decoder_layer_classes_for_export(qeff_model.model) + manual_hash = hash_dict_params(hash_params) assert manual_hash == qeff_model.export_hash +@pytest.mark.parametrize("cb", [False, True], ids=["nocb", "cb"]) +@pytest.mark.parametrize("config", prefill_only_configs, ids=prefill_only_config_ids) +def test_prefill_only_specialized_models(config, cb, tmp_path): + model = AutoModelForCausalLM.from_config(config, **model_kwargs) + qeff_model = QEFFAutoModelForCausalLM(model, cb) + if cb: + with pytest.raises(NotImplementedError): + qeff_model.export(tmp_path, prefill_only=True, offload_pt_weights=False) + else: + with pytest.raises(ValueError): + qeff_model.export(tmp_path, prefill_only=True, offload_pt_weights=False) + qeff_model.export(tmp_path, prefill_only=True, prefill_seq_len=256, offload_pt_weights=False) + first_export_hash = qeff_model.export_hash + qeff_model.export(tmp_path, prefill_only=False, offload_pt_weights=False) + second_export_hash = qeff_model.export_hash + assert first_export_hash != second_export_hash + + @pytest.fixture def tmp_cache(tmp_path, monkeypatch): monkeypatch.setattr("QEfficient.utils.export_utils.QEFF_HOME", tmp_path) From 418ac4f5451eb97242a56622427c75320542e8af Mon Sep 17 00:00:00 2001 From: Amit Raj Date: Mon, 22 Dec 2025 08:24:35 +0000 Subject: [PATCH 48/60] Updated tests of onnx_sunfunction (#668) Update tests of onnx_subfunction to compare the hash of the .onnx file when `use_onnx_subfunction` flag is toggled --------- Signed-off-by: Amit Raj Signed-off-by: Dhiraj Kumar Sah --- tests/transformers/test_subfunction.py | 8 ++++++++ 1 file changed, 8 insertions(+) diff --git a/tests/transformers/test_subfunction.py b/tests/transformers/test_subfunction.py index 6183e1282..006337eee 100644 --- a/tests/transformers/test_subfunction.py +++ b/tests/transformers/test_subfunction.py @@ -5,6 +5,8 @@ # # ---------------------------------------------------------------------------- +import hashlib + import pytest import torch from transformers import AutoConfig, AutoModelForCausalLM, AutoTokenizer @@ -57,8 +59,14 @@ def test_subfunction_vs_nonsubfunction(config, tmp_path): without_sub_func_onnx = model_0_0.export(tmp_path, use_onnx_subfunctions=False) hash_0_1 = model_0_0.export_hash + # Test that the export hash changes when use_onnx_subfunction is toggled, indicating different parameters are used assert hash_0_0 != hash_0_1 + # Test that the exported ONNX files hash are different by comparing their hashes when use_onnx_subfunction is toggled + with_sub_func_onnx_hash = hashlib.sha256(open(with_sub_func_onnx, "rb").read()).hexdigest() + without_sub_func_onnx_hash = hashlib.sha256(open(without_sub_func_onnx, "rb").read()).hexdigest() + assert with_sub_func_onnx_hash != without_sub_func_onnx_hash + compile_params = {"prefill_seq_len": 8, "ctx_len": 16} model_0_0.compile(onnx_path=with_sub_func_onnx, **compile_params) generation_00 = model_0_0.generate(prompts=["Help me with this"], tokenizer=tokenizer) From 0af4ebd7f77f93fe61803aa2706e6b1452e8ded2 Mon Sep 17 00:00:00 2001 From: quic-xiyushi Date: Tue, 16 Dec 2025 21:06:56 -0800 Subject: [PATCH 49/60] Extend on-device sampling support for dual QPC VLMs (#597) **Overview** On-device sampling can significantly reduce host overhead and improve inference throughput; however, so far it has only been implemented for `QEffForCausalLM` models. This PR extends on-device sampling support to the language decoder of dual QPC vision language models, `QEffCausalLMForTextImageToTextModel`. In addition, it fixes the bug in gumbel noise so that it correctly simulates a multinomial distribution for random sampling. **Implementation details** ``` class _QEffAutoModelForImageTextToTextDualQPC: def __init__( self, model: nn.Module, continuous_batching: bool = False, qaic_config: Optional[dict] = None, **kwargs, ): # Omitting unchanged parts self.lang_model = QEffCausalLMForTextImageToTextModel(model, qaic_config=qaic_config, **kwargs) # ---Sampling--- # Note: SamplerTransform should be applied after all other transforms # are done. The role of the sampler is to just add nodes at the output of the # previous transform function. self.lang_model.model, _ = SamplerTransform.apply(self.lang_model.model, qaic_config, **kwargs) ``` **Usage** The usage is the similar to enable on-device sampling for `QEffForCausalLM`. ``` from QEfficient import QEFFAutoModelForImageTextToText model_id = "Qwen/Qwen2.5-VL-3B-Instruct" qeff_model = QEFFAutoModelForImageTextToText.from_pretrained( model_id, attn_implementation="eager", kv_offload=True, continuous_batching=True, qaic_config={ "include_sampler": True, "return_pdfs": False, "max_top_k_ids": 512, }, ) ``` --------- Signed-off-by: quic-xiyushi Signed-off-by: quic-sanising Signed-off-by: sanising Signed-off-by: Mamta Singh <168400541+quic-mamta@users.noreply.github.com> Co-authored-by: sanising Co-authored-by: Mamta Singh <168400541+quic-mamta@users.noreply.github.com> Signed-off-by: Dhiraj Kumar Sah --- QEfficient/generation/vlm_generation.py | 13 + .../transformers/models/modeling_auto.py | 134 +++---- .../transformers/models/pytorch_transforms.py | 4 + QEfficient/transformers/sampler/sampler.py | 88 +++-- QEfficient/utils/sampler_utils.py | 91 ++++- examples/performance/on_device_sampling.py | 13 +- tests/transformers/sampler/test_sampler.py | 330 +++++++++++++----- 7 files changed, 463 insertions(+), 210 deletions(-) diff --git a/QEfficient/generation/vlm_generation.py b/QEfficient/generation/vlm_generation.py index b37fdc74a..c603a60d0 100644 --- a/QEfficient/generation/vlm_generation.py +++ b/QEfficient/generation/vlm_generation.py @@ -36,6 +36,7 @@ write_io_files, ) from QEfficient.utils import LRUCache +from QEfficient.utils.constants import Constants from QEfficient.utils.logging_utils import logger @@ -313,6 +314,13 @@ def _execute_chunked_prefill( prefill_ccl_id = 0 lang_inputs["comp_ctx_lengths"] = self.list_of_comp_ctx_lengths_prefill[prefill_ccl_id] + if self.include_sampler: + for op in Constants.SAMPLER_OPS: + if decode_batch_id is not None: + lang_inputs[op] = self.sampling_params[op][decode_batch_id.flatten()] + else: + lang_inputs[op] = self.sampling_params[op] + for i in range(num_chunks): input_ids_slice = lang_inputs["input_ids"][:, i * self._prefill_seq_len : (i + 1) * self._prefill_seq_len] position_ids_slice = lang_inputs["position_ids"][ @@ -338,6 +346,11 @@ def _execute_chunked_prefill( chunk_inputs["comp_ctx_lengths"] = lang_inputs["comp_ctx_lengths"] + if self.include_sampler: + chunk_inputs["last_accepted_output_tokens"] = chunk_inputs["input_ids"] + for op in Constants.SAMPLER_OPS: + chunk_inputs[op] = lang_inputs[op] + outputs = self._session.run(chunk_inputs) if "image_idx_output" in outputs: diff --git a/QEfficient/transformers/models/modeling_auto.py b/QEfficient/transformers/models/modeling_auto.py index 008147c03..dc03ba82f 100644 --- a/QEfficient/transformers/models/modeling_auto.py +++ b/QEfficient/transformers/models/modeling_auto.py @@ -9,7 +9,7 @@ import warnings from pathlib import Path from time import perf_counter -from typing import Dict, List, Optional, Union +from typing import List, Optional, Union import numpy as np import torch @@ -70,6 +70,7 @@ ) from QEfficient.utils.check_ccl_specializations import process_ccl_specializations from QEfficient.utils.logging_utils import logger +from QEfficient.utils.sampler_utils import get_sampling_inputs_and_outputs class QEFFTransformersBase(QEFFBaseModel): @@ -719,7 +720,7 @@ class QEffCausalLMForTextImageToTextModel(QEFFBaseModel): ] _onnx_transforms = [FP16ClipTransform, SplitTensorsTransform] - def __init__(self, model, qaic_config, **kwargs): + def __init__(self, model, qaic_config: Optional[dict] = None, **kwargs): """ Initializes the language decoder component for multimodal models. @@ -733,7 +734,7 @@ def __init__(self, model, qaic_config, **kwargs): **kwargs : Additional keyword arguments passed to the base class constructor. """ - super().__init__(model, **kwargs) + super().__init__(model, qaic_config=qaic_config, **kwargs) self.model = model.get_qeff_language_decoder() self.model.qaic_config = qaic_config self.hash_params["qeff_auto_class"] = self.__class__.__name__ @@ -871,16 +872,16 @@ def __init__( ---------- model : nn.Module The full HuggingFace multimodal model. + qaic_config : dict, optional + A dictionary for QAIC-specific configurations. **kwargs : - Additional keyword arguments. `full_batch_size` is not supported here. - - Raises - ------ - NotImplementedError - If `full_batch_size` is provided. + Additional keyword arguments. """ if kwargs.pop("full_batch_size", None): - raise NotImplementedError("Continuous batching is not supported for image-text-to-text models yet.") + continuous_batching = True + warnings.warn( + "full_batch_size argument is deprecated. Use continuous_batching=True instead.", DeprecationWarning, 2 + ) self.model = model self.config = model.config @@ -892,6 +893,11 @@ def __init__( self.ccl_enabled = qaic_config.get("ccl_enabled", False) self.comp_ctx_lengths_prefill, self.comp_ctx_lengths_decode = None, None self.input_shapes, self.output_names = None, None + # ---Sampling--- + # Note: SamplerTransform should be applied after all other transforms + # are done. The role of the sampler is to just add nodes at the output of the + # previous transform function. + self.lang_model.model, _ = SamplerTransform.apply(self.lang_model.model, qaic_config, **kwargs) @classmethod def from_pretrained(cls, pretrained_model_name_or_path: str, qaic_config: Optional[dict] = None, **kwargs): @@ -1002,6 +1008,19 @@ def export( kv_offload=True, comp_ctx_lengths=self.comp_ctx_lengths_decode ) output_names = self.model.get_output_names(kv_offload=True) + if self.lang_model.model.qaic_config is not None and self.lang_model.model.qaic_config.get( + "include_sampler", False + ): + logits_index = output_names["lang"].index("logits") + output_names["lang"][logits_index] = "next_tokens" + inputs["lang"], output_names["lang"], dynamic_axes["lang"] = get_sampling_inputs_and_outputs( + example_inputs=inputs["lang"], + output_names=output_names["lang"], + dynamic_axes=dynamic_axes["lang"], + continuous_batching=self.continuous_batching, + vocab_size=self.model.language_model.config.vocab_size, + qaic_config=self.lang_model.model.qaic_config, + ) self.vision_model.export( inputs["vision"], @@ -1234,6 +1253,7 @@ def generate( generation_len: Optional[int] = None, image_height: Optional[int] = None, image_width: Optional[int] = None, + **kwargs, ) -> Union[torch.Tensor, np.ndarray]: """ Generates output by executing the compiled QPC(s) on Cloud AI 100 Hardware cards. @@ -1294,6 +1314,7 @@ def generate( comp_ctx_lengths_decode=self.comp_ctx_lengths_decode, image_height=image_height, image_width=image_width, + **kwargs, ) # Call generate method @@ -1576,10 +1597,15 @@ def __init__( Raises ------ NotImplementedError - If `full_batch_size` is provided. + If `full_batch_size` is provided or `include_sampler` is True. """ if kwargs.pop("full_batch_size", None): + warnings.warn( + "full_batch_size argument is deprecated. Use continuous_batching=True instead.", DeprecationWarning, 2 + ) raise NotImplementedError("Continuous batching is not supported for image-text-to-text models yet.") + if qaic_config is not None and qaic_config.pop("include_sampler", False): + raise NotImplementedError("On-device sampling is not supported for single QPC multimodal models yet.") super().__init__(model, **kwargs) self.model.qaic_config = qaic_config @@ -2196,6 +2222,8 @@ def from_pretrained( If True, uses the dual QPC approach (vision encoder KV offloaded). If False, uses the single QPC approach (entire model in one QPC). If None, the default behavior of the internal classes is used (typically dual QPC). + qaic_config : dict, optional + A dictionary for QAIC-specific configurations. **kwargs : Additional arguments passed to HuggingFace's ``from_pretrained``. @@ -2659,10 +2687,13 @@ def export( dynamic_axes["num_logits_to_keep"] = {0: "num_logits_to_keep"} if self.model.qaic_config is not None and self.model.qaic_config.get("include_sampler", False): - example_inputs, output_names, dynamic_axes = self.get_sampling_inputs_and_outputs( + example_inputs, output_names, dynamic_axes = get_sampling_inputs_and_outputs( example_inputs=example_inputs, output_names=output_names, dynamic_axes=dynamic_axes, + continuous_batching=self.continuous_batching, + vocab_size=self.model.config.vocab_size, + qaic_config=self.model.qaic_config, ) return self._export( example_inputs, @@ -2674,85 +2705,6 @@ def export( prefill_only=prefill_only, ) - def get_sampling_inputs_and_outputs( - self, - example_inputs: Dict[str, torch.Tensor], - output_names: List[str], - dynamic_axes: Dict[str, Dict[int, str]], - ): - """ - Updates the example inputs, output names, and dynamic axes to include - parameters relevant for on-device sampling during ONNX export. - - Parameters - ---------- - example_inputs : Dict[str, torch.Tensor] - Current dictionary of example inputs. - output_names : List[str] - Current list of output names. - dynamic_axes : Dict[str, Dict[int, str]] - Current dictionary of dynamic axes configurations. - - Returns - ------- - Tuple[Dict[str, torch.Tensor], List[str], Dict[str, Dict[int, str]]] - Updated example inputs, output names, and dynamic axes including - sampling-related parameters. - """ - bs: int = constants.ONNX_EXPORT_EXAMPLE_BATCH_SIZE - fbs: int = constants.ONNX_EXPORT_EXAMPLE_FBS - - example_inputs["last_accepted_output_tokens"] = torch.zeros( - (bs, constants.ONNX_EXPORT_EXAMPLE_SEQ_LEN), dtype=torch.int64 - ) - dynamic_axes["last_accepted_output_tokens"] = {0: "batch_size", 1: "seq_len"} - - example_inputs["past_repetition_penalty_buffer"] = torch.zeros( - (fbs if self.continuous_batching else bs, self.model.config.vocab_size), dtype=torch.bool - ) - dynamic_axes["past_repetition_penalty_buffer"] = { - 0: "full_batch_size" if self.continuous_batching else "batch_size", - } - output_names.append("past_repetition_penalty_buffer_RetainedState") - - example_inputs["repetition_penalties"] = ( - torch.ones((bs, 1), dtype=torch.float) * constants.ONNX_EXPORT_EXAMPLE_REPETITION_PENALTIES - ) - dynamic_axes["repetition_penalties"] = {0: "batch_size"} - - example_inputs["past_presence_penalty_buffer"] = torch.zeros( - (fbs if self.continuous_batching else bs, self.model.config.vocab_size), dtype=torch.bool - ) - dynamic_axes["past_presence_penalty_buffer"] = { - 0: "full_batch_size" if self.continuous_batching else "batch_size", - } - output_names.append("past_presence_penalty_buffer_RetainedState") - - example_inputs["presence_penalties"] = ( - torch.zeros((bs, 1), dtype=torch.float) + constants.ONNX_EXPORT_EXAMPLE_PRESENCE_PENALTIES - ) - dynamic_axes["presence_penalties"] = {0: "batch_size"} - - example_inputs["temperatures"] = ( - torch.ones((bs, 1), dtype=torch.float) * constants.ONNX_EXPORT_EXAMPLE_TEMPERATURES - ) - dynamic_axes["temperatures"] = {0: "batch_size"} - - max_top_k_ids = self.model.qaic_config.get("max_top_k_ids", constants.ONNX_EXPORT_EXAMPLE_MAX_TOP_K_IDS) - example_inputs["top_ks"] = torch.randint(1, max_top_k_ids, size=(bs, 1)).to(torch.int32) - dynamic_axes["top_ks"] = {0: "batch_size"} - - example_inputs["top_ps"] = torch.ones((bs, 1), dtype=torch.float) * constants.ONNX_EXPORT_EXAMPLE_TOP_PS - dynamic_axes["top_ps"] = {0: "batch_size"} - - example_inputs["min_ps"] = torch.ones((bs, 1), dtype=torch.float) * constants.ONNX_EXPORT_EXAMPLE_MIN_PS - dynamic_axes["min_ps"] = {0: "batch_size"} - - example_inputs["random_numbers"] = torch.rand((bs, 1), dtype=torch.float) - dynamic_axes["random_numbers"] = {0: "batch_size"} - - return example_inputs, output_names, dynamic_axes - def build_prefill_specialization( self, prefill_seq_len: int = 32, diff --git a/QEfficient/transformers/models/pytorch_transforms.py b/QEfficient/transformers/models/pytorch_transforms.py index 4ba6641cf..9e021851b 100644 --- a/QEfficient/transformers/models/pytorch_transforms.py +++ b/QEfficient/transformers/models/pytorch_transforms.py @@ -301,6 +301,7 @@ QEffGrok1MultiHeadAttention, ) from QEfficient.transformers.models.internvl.modeling_internvl import ( + QEffInternDecoderWrapper, QEffInternVisionEmbeddings, QEffInternVLModel, ) @@ -404,6 +405,7 @@ QEffQwen2_5_VLModel, QEffQwen2_5_VLTextModel, QEffQwen2_5_VLVisionAttention, + QEffQwen_2_5_vl_DecoderWrapper, QEffQwen_2_5_vl_ForConditionalGeneration, ) from QEfficient.transformers.models.qwen3.modeling_qwen3 import ( @@ -757,10 +759,12 @@ class SamplerTransform: QEffGPTJForCausalLM, QEffGraniteForCausalLM, QEffGraniteMoeForCausalLM, + QEffInternDecoderWrapper, QEffLlamaForCausalLM, QEffMptForCausalLM, QEffPhi3ForCausalLM, QEffQwen2ForCausalLM, + QEffQwen_2_5_vl_DecoderWrapper, } @classmethod diff --git a/QEfficient/transformers/sampler/sampler.py b/QEfficient/transformers/sampler/sampler.py index 96846e712..fd7b87dcd 100644 --- a/QEfficient/transformers/sampler/sampler.py +++ b/QEfficient/transformers/sampler/sampler.py @@ -24,6 +24,8 @@ class SamplerOutput(ModelOutput): probs: torch.FloatTensor = None next_tokens: torch.IntTensor = None + vision_embeds: Optional[torch.FloatTensor] = None # For VLMs + image_idx: Optional[torch.IntTensor] = None # for VLMs past_key_values: Optional[Tuple[Tuple[torch.FloatTensor]]] = None past_repetition_penalty_buffer: Optional[torch.Tensor] = None past_presence_penalty_buffer: Optional[torch.Tensor] = None @@ -47,7 +49,6 @@ def prefill_path( positions_mask = (position_ids[:, :1] != zero_tensor).view(-1, 1) mul_value = CtxScatterFuncCB3D.apply(mul_value, batch_index, zero_tensor, positions_mask) past_repetition_penalty_buffer *= mul_value - past_presence_penalty_buffer *= mul_value # Mask out-of-bounds or invalid position_ids or input_ids input_ids = torch.where(position_ids == -1, torch.iinfo(torch.int32).max, input_ids) @@ -59,6 +60,9 @@ def prefill_path( input_ids, torch.ones(input_ids.shape, dtype=torch.bool), ) + + mul_value = torch.zeros(past_presence_penalty_buffer.shape[0], 1, dtype=torch.bool) + past_presence_penalty_buffer *= mul_value return past_repetition_penalty_buffer, past_presence_penalty_buffer @@ -103,6 +107,7 @@ def sampler_forward( attention_mask: Optional[torch.Tensor] = None, position_ids: Optional[torch.LongTensor] = None, past_key_values: Optional[Union[Cache, List[torch.FloatTensor]]] = None, + comp_ctx_lengths: Optional[torch.LongTensor] = None, batch_index: Optional[torch.LongTensor] = None, inputs_embeds: Optional[torch.FloatTensor] = None, labels: Optional[torch.LongTensor] = None, @@ -112,6 +117,8 @@ def sampler_forward( return_dict: Optional[bool] = None, cache_position: Optional[torch.LongTensor] = None, num_logits_to_keep: Optional[int] = None, + vision_embeds: Optional[torch.FloatTensor] = None, + image_idx: Optional[torch.IntTensor] = None, last_accepted_output_tokens: Optional[torch.Tensor] = None, # (batch_size, spec_length or less) past_repetition_penalty_buffer: Optional[torch.Tensor] = None, repetition_penalties: Optional[torch.Tensor] = None, @@ -127,6 +134,9 @@ def sampler_forward( Perform the sampling of next tokens on the QAIC device (instead of the host) and return the next tokens and/or probability distributions. + The vision_embeds and image_idx parameters are optional + and are used only for VLMs when supported by the original forward function. + Args: last_accepted_output_tokens (`torch.Tensor`, *optional*): Output tokens accepted by the Speculative Decoding Draft Language Model. @@ -170,20 +180,37 @@ def sampler_forward( Sampling parameter that represents the random seeds to use for random sampling. Must be in [-1, 1]. """ - - outputs = self.old_forward( - input_ids=input_ids, - attention_mask=attention_mask, - position_ids=position_ids, - past_key_values=past_key_values, - batch_index=batch_index, - inputs_embeds=inputs_embeds, - use_cache=use_cache, - output_attentions=output_attentions, - output_hidden_states=output_hidden_states, - return_dict=return_dict, - cache_position=cache_position, - ) + if vision_embeds is not None: + forward_kwargs = dict( + input_ids=input_ids, + vision_embeds=vision_embeds, + position_ids=position_ids, + image_idx=image_idx, + past_key_values=past_key_values, + comp_ctx_lengths=comp_ctx_lengths, + ) + if batch_index is not None: + forward_kwargs["batch_index"] = batch_index + + logits, vision_embeds, image_idx, past_key_values = self.old_forward(**forward_kwargs) + outputs = dict(logits=logits, vision_embeds=vision_embeds, image_idx=image_idx, past_key_values=past_key_values) + if position_ids.dim() == 3: # For models using m-rope + position_ids = position_ids[0] + else: + outputs = self.old_forward( + input_ids=input_ids, + attention_mask=attention_mask, + position_ids=position_ids, + past_key_values=past_key_values, + comp_ctx_lengths=comp_ctx_lengths, + batch_index=batch_index, + inputs_embeds=inputs_embeds, + use_cache=use_cache, + output_attentions=output_attentions, + output_hidden_states=output_hidden_states, + return_dict=return_dict, + cache_position=cache_position, + ) logits = outputs.get("logits", None) assert logits is not None, f"{self.model.__class__.__name__} does not return logits." @@ -224,17 +251,6 @@ def sampler_forward( is_prefill, past_presence_penalty_buffer_prefill, past_presence_penalty_buffer_decode ) - # Greedy Sampling - greedy_samples = torch.argmax(logits, dim=1, keepdim=True) # (batch_size * spec_length, 1) - if (temperatures == 0).all() and not self.qaic_config.get("return_pdfs", False): - return SamplerOutput( - probs=None, - next_tokens=greedy_samples.reshape(-1, spec_length, 1), # Return sampled next tokens instead of logits - past_key_values=outputs.past_key_values, - past_repetition_penalty_buffer=past_repetition_penalty_buffer, - past_presence_penalty_buffer=past_presence_penalty_buffer, - ) - # Repetition Penalty if (repetition_penalties != 1.0).any(): past_repetition_penalty_buffer_selected = past_repetition_penalty_buffer[batch_index_reshaped].repeat( @@ -253,6 +269,19 @@ def sampler_forward( ) # (batch_size * spec_length, vocab_size) logits -= presence_penalties * past_presence_penalty_buffer_selected + # Greedy Sampling + greedy_samples = torch.argmax(logits, dim=1, keepdim=True) # (batch_size * spec_length, 1) + if (temperatures == 0).all() and not self.qaic_config.get("return_pdfs", False): + return SamplerOutput( + probs=None, + next_tokens=greedy_samples.reshape(-1, spec_length, 1), # Return sampled next tokens instead of logits + vision_embeds=outputs.get("vision_embeds", None), + image_idx=outputs.get("image_idx", None), + past_key_values=outputs.get("past_key_values", None), + past_repetition_penalty_buffer=past_repetition_penalty_buffer, + past_presence_penalty_buffer=past_presence_penalty_buffer, + ) + # TODO: Frequency Penalty # Temperature Scaling @@ -300,9 +329,8 @@ def sampler_forward( ) # (batch_size, spec_length, vocab_size) # Random Sampling - topk_probs_asc = torch.softmax(topk_values_asc, dim=1) # (batch_size * spec_length, max_top_k_ids) gumbel_noise = -torch.log(-torch.log(random_numbers.repeat(spec_length, 1))) # Gumbel-Max Trick - y = topk_probs_asc + gumbel_noise + y = topk_values_asc + gumbel_noise # (batch_size * spec_length, max_top_k_ids) random_samples_indices = torch.argmax(y, dim=1, keepdim=True) random_samples = torch.gather(topk_indices_asc, 1, random_samples_indices) # (batch_size * spec_length, 1) @@ -314,7 +342,9 @@ def sampler_forward( return SamplerOutput( probs=probs, next_tokens=next_tokens, # Return sampled next tokens instead of logits - past_key_values=outputs.past_key_values, + vision_embeds=outputs.get("vision_embeds", None), + image_idx=outputs.get("image_idx", None), + past_key_values=outputs.get("past_key_values", None), past_repetition_penalty_buffer=past_repetition_penalty_buffer, past_presence_penalty_buffer=past_presence_penalty_buffer, ) diff --git a/QEfficient/utils/sampler_utils.py b/QEfficient/utils/sampler_utils.py index 6fb1b326f..0460eeb3a 100644 --- a/QEfficient/utils/sampler_utils.py +++ b/QEfficient/utils/sampler_utils.py @@ -5,8 +5,11 @@ # # ----------------------------------------------------------------------------- -from typing import Optional, Set +from typing import Dict, List, Optional, Set +import torch + +from QEfficient.utils import constants from QEfficient.utils.constants import Constants from QEfficient.utils.logging_utils import logger @@ -56,3 +59,89 @@ def validate_sampler_inputs(session_inputs: Set[str], include_sampler: Optional[ ) return session_includes_sampler + + +def get_sampling_inputs_and_outputs( + example_inputs: Dict[str, torch.Tensor], + output_names: List[str], + dynamic_axes: Dict[str, Dict[int, str]], + continuous_batching: bool, + vocab_size: int, + qaic_config: Dict, +): + """ + Updates the example inputs, output names, and dynamic axes to include + parameters relevant for on-device sampling during ONNX export. + + Parameters + ---------- + example_inputs : Dict[str, torch.Tensor] + Current dictionary of example inputs. + output_names : List[str] + Current list of output names. + dynamic_axes : Dict[str, Dict[int, str]] + Current dictionary of dynamic axes configurations. + continuous_batching : bool + Whether this model will be used for continuous batching in the future. + vocab_size: int + Vocabulary size for this model. + qaic_config : Dict + QAIC config dictionary. + + Returns + ------- + Tuple[Dict[str, torch.Tensor], List[str], Dict[str, Dict[int, str]]] + Updated example inputs, output names, and dynamic axes including + sampling-related parameters. + """ + bs: int = constants.ONNX_EXPORT_EXAMPLE_BATCH_SIZE + fbs: int = constants.ONNX_EXPORT_EXAMPLE_FBS + + example_inputs["last_accepted_output_tokens"] = torch.zeros( + (bs, constants.ONNX_EXPORT_EXAMPLE_SEQ_LEN), dtype=torch.int64 + ) + dynamic_axes["last_accepted_output_tokens"] = {0: "batch_size", 1: "seq_len"} + + example_inputs["past_repetition_penalty_buffer"] = torch.zeros( + (fbs if continuous_batching else bs, vocab_size), dtype=torch.bool + ) + dynamic_axes["past_repetition_penalty_buffer"] = { + 0: "full_batch_size" if continuous_batching else "batch_size", + } + output_names.append("past_repetition_penalty_buffer_RetainedState") + + example_inputs["repetition_penalties"] = ( + torch.ones((bs, 1), dtype=torch.float) * constants.ONNX_EXPORT_EXAMPLE_REPETITION_PENALTIES + ) + dynamic_axes["repetition_penalties"] = {0: "batch_size"} + + example_inputs["past_presence_penalty_buffer"] = torch.zeros( + (fbs if continuous_batching else bs, vocab_size), dtype=torch.bool + ) + dynamic_axes["past_presence_penalty_buffer"] = { + 0: "full_batch_size" if continuous_batching else "batch_size", + } + output_names.append("past_presence_penalty_buffer_RetainedState") + + example_inputs["presence_penalties"] = ( + torch.zeros((bs, 1), dtype=torch.float) + constants.ONNX_EXPORT_EXAMPLE_PRESENCE_PENALTIES + ) + dynamic_axes["presence_penalties"] = {0: "batch_size"} + + example_inputs["temperatures"] = torch.ones((bs, 1), dtype=torch.float) * constants.ONNX_EXPORT_EXAMPLE_TEMPERATURES + dynamic_axes["temperatures"] = {0: "batch_size"} + + max_top_k_ids = qaic_config.get("max_top_k_ids", constants.ONNX_EXPORT_EXAMPLE_MAX_TOP_K_IDS) + example_inputs["top_ks"] = torch.randint(1, max_top_k_ids, size=(bs, 1)).to(torch.int32) + dynamic_axes["top_ks"] = {0: "batch_size"} + + example_inputs["top_ps"] = torch.ones((bs, 1), dtype=torch.float) * constants.ONNX_EXPORT_EXAMPLE_TOP_PS + dynamic_axes["top_ps"] = {0: "batch_size"} + + example_inputs["min_ps"] = torch.ones((bs, 1), dtype=torch.float) * constants.ONNX_EXPORT_EXAMPLE_MIN_PS + dynamic_axes["min_ps"] = {0: "batch_size"} + + example_inputs["random_numbers"] = torch.rand((bs, max_top_k_ids), dtype=torch.float) + dynamic_axes["random_numbers"] = {0: "batch_size"} + + return example_inputs, output_names, dynamic_axes diff --git a/examples/performance/on_device_sampling.py b/examples/performance/on_device_sampling.py index 6cc72b715..b4e1f4e27 100644 --- a/examples/performance/on_device_sampling.py +++ b/examples/performance/on_device_sampling.py @@ -28,6 +28,7 @@ def main(args, **kwargs): if include_sampler is not None: return_pdfs = args.override_qaic_config.get("aic_return_pdfs", None) == "true" max_top_k_ids = int(args.override_qaic_config.get("max_top_k_ids", 512)) + np.random.seed(int(args.random_number)) sampling_params = { "repetition_penalties": np.array(args.repetition_penalty, dtype=np.float32).repeat(bs).reshape(-1, 1), "presence_penalties": np.array(args.presence_penalty, dtype=np.float32).repeat(bs).reshape(-1, 1), @@ -36,7 +37,9 @@ def main(args, **kwargs): "top_ks": np.array(args.top_k, dtype=np.int32).repeat(bs).reshape(-1, 1), "top_ps": np.array(args.top_p, dtype=np.float32).repeat(bs).reshape(-1, 1), "min_ps": np.array(args.min_p, dtype=np.float32).repeat(bs).reshape(-1, 1), - "random_numbers": np.array(args.random_number, dtype=np.float32).repeat(bs).reshape(-1, 1), + "random_numbers": np.tile(np.random.uniform(low=0.0, high=1.0, size=max_top_k_ids), (bs, 1)).astype( + np.float32 + ), } qaic_config = { k: v @@ -110,10 +113,10 @@ def main(args, **kwargs): --repetition-penalty 1.9 \ --presence-penalty 0.8 \ --temperature 0.67 \ - --top-k 54720 \ + --top-k 54 \ --top-p 0.89 \ --min-p 0.6 \ - --random-number 0.26 + --random-number 26 2. For non-continuous batching: python3.10 examples/on_device_sampling.py \ @@ -130,10 +133,10 @@ def main(args, **kwargs): --repetition-penalty 1.9 \ --presence-penalty 0.8 \ --temperature 0.67 \ - --top-k 54720 \ + --top-k 54 \ --top-p 0.89 \ --min-p 0.6 \ - --random-number 0.26 + --random-number 26 """ parser = argparse.ArgumentParser(description="Run QEfficient model with On Device Sampling") diff --git a/tests/transformers/sampler/test_sampler.py b/tests/transformers/sampler/test_sampler.py index 9335e1d91..f9aa35312 100644 --- a/tests/transformers/sampler/test_sampler.py +++ b/tests/transformers/sampler/test_sampler.py @@ -5,15 +5,17 @@ # # ----------------------------------------------------------------------------- -from typing import List +from typing import List, Optional, Tuple, Union import numpy as np import pytest +from transformers import AutoConfig, AutoModelForCausalLM, AutoProcessor, AutoTokenizer -from QEfficient import QEFFAutoModelForCausalLM +from QEfficient import QEFFAutoModelForCausalLM, QEFFAutoModelForImageTextToText from QEfficient.generation.cloud_infer import QAICInferenceSession from QEfficient.utils import load_hf_tokenizer from QEfficient.utils.constants import Constants +from QEfficient.utils.test_utils import InternProcessor sampler_transform_configs = [ pytest.param( @@ -24,6 +26,20 @@ 20, # generation_len 2, # full_batch_size 1, # spec_length + False, # is_vlm + ), + pytest.param( + "OpenGVLab/InternVL2_5-1B", # model + ( + ["https://picsum.photos/id/237/536/354"] * 2, + ["Can you describe the image in detail."] * 2, + ), # images and prompts + 128, # prefill_seq_len + 4096, # ctx_len + 20, # generation_len + 2, # full_batch_size + None, # spec_length + True, # is_vlm ), ] greedy_sampling_configs = [ @@ -35,6 +51,20 @@ 20, # generation_len 4, # full_batch_size 1, # spec_length + False, # is_vlm + ), + pytest.param( + "OpenGVLab/InternVL2_5-1B", # model + ( + ["https://picsum.photos/id/237/536/354"] * 2, + ["Can you describe the image in detail."] * 2, + ), # images and prompts + 128, # prefill_seq_len + 4096, # ctx_len + 20, # generation_len + 2, # full_batch_size + None, # spec_length + True, # is_vlm ), ] random_sampling_configs = [ @@ -46,23 +76,74 @@ 20, # generation_len 4, # full_batch_size 1, # spec_length + False, # is_vlm + ), + pytest.param( + "OpenGVLab/InternVL2_5-1B", # model + ( + ["https://picsum.photos/id/237/536/354"] * 4, + ["Can you describe the image in detail."] * 4, + ), # images and prompts + 128, # prefill_seq_len + 4096, # ctx_len + 20, # generation_len + 4, # full_batch_size + None, # spec_length + True, # is_vlm ), ] +def prepare_model_setup( + model: str, is_vlm: bool, num_hidden_layers: Optional[int], prompts: Union[List, Tuple], spec_length: Optional[int] +): + additional_configs = {} + additional_params = {} + if is_vlm: + config = AutoConfig.from_pretrained(model, trust_remote_code=True) + if num_hidden_layers is not None: + config.llm_config.num_hidden_layers = num_hidden_layers + additional_configs["config"] = config + additional_configs["kv_offload"] = True + assert isinstance(prompts, tuple), "For VLMs, both image and text prompts must be provided." + additional_params["images"] = prompts[0] + prompts = prompts[1] + + if "InternVL" in model: + additional_configs["trust_remote_code"] = True + model_hf = AutoModelForCausalLM.from_pretrained( + model, + config=config, + trust_remote_code=True, + ) + tokenizer = AutoTokenizer.from_pretrained(model, trust_remote_code=True, use_fast=False) + additional_params["processor"] = InternProcessor(model_hf, tokenizer) + qeff_class = QEFFAutoModelForCausalLM + else: + additional_params["processor"] = AutoProcessor.from_pretrained(model) + qeff_class = QEFFAutoModelForImageTextToText + else: + if num_hidden_layers is not None: + additional_configs["num_hidden_layers"] = num_hidden_layers + spec_length = (spec_length or 1) - 1 + qeff_class = QEFFAutoModelForCausalLM + return additional_configs, additional_params, prompts, spec_length, qeff_class + + @pytest.mark.on_qaic @pytest.mark.parametrize( - "model, prompts, prefill_seq_len, ctx_len, generation_len, full_batch_size, spec_length", + "model, prompts, prefill_seq_len, ctx_len, generation_len, full_batch_size, spec_length, is_vlm", sampler_transform_configs, ) def test_sampler_transform( model: str, - prompts: List[str], + prompts: Union[List[str], tuple[List[str], List[str]]], prefill_seq_len: int, ctx_len: int, generation_len: int, full_batch_size: int, - spec_length: int, + spec_length: Optional[int], + is_vlm: bool, ): """ Test if `SamplerTransform` adds nodes at the output of a `QEffForCausalLM model` to enable the @@ -70,45 +151,52 @@ def test_sampler_transform( next tokens and/or probability distributions. """ # Export and compile QEfficient models - model_w_sampler = QEFFAutoModelForCausalLM.from_pretrained( + num_hidden_layers = 2 + additional_configs, additional_params, prompts, spec_length, qeff_class = prepare_model_setup( + model, is_vlm, num_hidden_layers, prompts, spec_length + ) + model_w_sampler = qeff_class.from_pretrained( model, continuous_batching=True, - num_hidden_layers=2, qaic_config={ "include_sampler": True, "return_pdfs": False, "max_top_k_ids": 512, }, + **additional_configs, ) - model_wo_sampler = QEFFAutoModelForCausalLM.from_pretrained( + model_wo_sampler = qeff_class.from_pretrained( model, continuous_batching=True, - num_hidden_layers=2, qaic_config={ "include_sampler": False, "return_pdfs": False, }, + **additional_configs, ) - model_w_sampler_qpc_path: str = model_w_sampler.compile( + model_w_sampler_qpc_path = model_w_sampler.compile( prefill_seq_len=prefill_seq_len, ctx_len=ctx_len, full_batch_size=full_batch_size, num_devices=1, num_cores=16, - num_speculative_tokens=spec_length - 1, + num_speculative_tokens=spec_length, mxint8_kv_cache=True, mxfp6_matmul=True, ) - model_wo_sampler_qpc_path: str = model_wo_sampler.compile( + model_wo_sampler_qpc_path = model_wo_sampler.compile( prefill_seq_len=prefill_seq_len, ctx_len=ctx_len, full_batch_size=full_batch_size, num_devices=1, num_cores=16, - num_speculative_tokens=spec_length - 1, + num_speculative_tokens=spec_length, mxint8_kv_cache=True, mxfp6_matmul=True, ) + if is_vlm: + model_w_sampler_qpc_path = model_w_sampler_qpc_path[1] + model_wo_sampler_qpc_path = model_wo_sampler_qpc_path[1] # Init qaic session model_w_sampler_session = QAICInferenceSession(model_w_sampler_qpc_path) @@ -139,40 +227,45 @@ def test_sampler_transform( @pytest.mark.on_qaic @pytest.mark.parametrize( - "model, prompts, prefill_seq_len, ctx_len, generation_len, full_batch_size, spec_length", + "model, prompts, prefill_seq_len, ctx_len, generation_len, full_batch_size, spec_length, is_vlm", greedy_sampling_configs, ) def test_greedy_sampling( model: str, - prompts: List[str], + prompts: Union[List[str], tuple[List[str], List[str]]], prefill_seq_len: int, ctx_len: int, generation_len: int, full_batch_size: int, - spec_length: int, + spec_length: Optional[int], + is_vlm: bool, ): """ Test greedy sampling with QPC compiled with and without On Device Sampling. """ # Export and compile QEfficient models + num_hidden_layers = 4 + additional_configs, additional_params, prompts, spec_length, qeff_class = prepare_model_setup( + model, is_vlm, num_hidden_layers, prompts, spec_length + ) model_w_sampler = QEFFAutoModelForCausalLM.from_pretrained( model, continuous_batching=True, - num_hidden_layers=4, qaic_config={ "include_sampler": True, "return_pdfs": False, "max_top_k_ids": 512, }, + **additional_configs, ) model_wo_sampler = QEFFAutoModelForCausalLM.from_pretrained( model, continuous_batching=True, - num_hidden_layers=4, qaic_config={ "include_sampler": False, "return_pdfs": False, }, + **additional_configs, ) model_w_sampler.compile( prefill_seq_len=prefill_seq_len, @@ -180,7 +273,7 @@ def test_greedy_sampling( full_batch_size=full_batch_size, num_devices=1, num_cores=16, - num_speculative_tokens=spec_length - 1, + num_speculative_tokens=spec_length, mxint8_kv_cache=True, mxfp6_matmul=True, ) @@ -190,7 +283,7 @@ def test_greedy_sampling( full_batch_size=full_batch_size, num_devices=1, num_cores=16, - num_speculative_tokens=spec_length - 1, + num_speculative_tokens=spec_length, mxint8_kv_cache=True, mxfp6_matmul=True, ) @@ -211,8 +304,9 @@ def test_greedy_sampling( "top_ks": np.array(512, dtype=np.int32).repeat(full_batch_size).reshape(-1, 1), "top_ps": np.array(1.0, dtype=np.float32).repeat(full_batch_size).reshape(-1, 1), "min_ps": np.array(0.0, dtype=np.float32).repeat(full_batch_size).reshape(-1, 1), - "random_numbers": np.array(0.0, dtype=np.float32).repeat(full_batch_size).reshape(-1, 1), + "random_numbers": np.zeros((full_batch_size, 512), dtype=np.float32), }, + **additional_params, ) model_wo_sampler_exec_info = model_wo_sampler.generate( tokenizer=tokenizer, @@ -221,6 +315,7 @@ def test_greedy_sampling( include_sampler=False, return_pdfs=False, sampling_params=None, + **additional_params, ) # Compare generated texts and ids @@ -233,24 +328,28 @@ def test_greedy_sampling( @pytest.mark.on_qaic -@pytest.mark.skip @pytest.mark.parametrize( - "model, prompts, prefill_seq_len, ctx_len, generation_len, full_batch_size, spec_length", + "model, prompts, prefill_seq_len, ctx_len, generation_len, full_batch_size, spec_length, is_vlm", random_sampling_configs, ) def test_random_sampling( model: str, - prompts: List[str], + prompts: Union[List[str], tuple[List[str], List[str]]], prefill_seq_len: int, ctx_len: int, generation_len: int, full_batch_size: int, - spec_length: int, + spec_length: Optional[int], + is_vlm: bool, ): """ Test random sampling with QPC compiled with and without On Device Sampling. """ # Export and compile QEfficient models + num_hidden_layers = None + additional_configs, additional_params, prompts, spec_length, qeff_class = prepare_model_setup( + model, is_vlm, num_hidden_layers, prompts, spec_length + ) model_w_sampler = QEFFAutoModelForCausalLM.from_pretrained( model, continuous_batching=True, @@ -259,6 +358,7 @@ def test_random_sampling( "return_pdfs": False, "max_top_k_ids": 512, }, + **additional_configs, ) model_wo_sampler = QEFFAutoModelForCausalLM.from_pretrained( model, @@ -267,6 +367,7 @@ def test_random_sampling( "include_sampler": False, "return_pdfs": False, }, + **additional_configs, ) model_w_sampler.compile( prefill_seq_len=prefill_seq_len, @@ -274,7 +375,7 @@ def test_random_sampling( full_batch_size=full_batch_size, num_devices=1, num_cores=16, - num_speculative_tokens=spec_length - 1, + num_speculative_tokens=spec_length, mxint8_kv_cache=True, mxfp6_matmul=True, ) @@ -284,13 +385,14 @@ def test_random_sampling( full_batch_size=full_batch_size, num_devices=1, num_cores=16, - num_speculative_tokens=spec_length - 1, + num_speculative_tokens=spec_length, mxint8_kv_cache=True, mxfp6_matmul=True, ) # Generate texts from prompts tokenizer = load_hf_tokenizer(pretrained_model_name_or_path=model) + np.random.seed(0) model_w_sampler_exec_info = model_w_sampler.generate( tokenizer=tokenizer, prompts=prompts, @@ -301,12 +403,15 @@ def test_random_sampling( "repetition_penalties": np.array(20.2, dtype=np.float32).repeat(full_batch_size).reshape(-1, 1), "presence_penalties": np.array(10.5, dtype=np.float32).repeat(full_batch_size).reshape(-1, 1), # "frequency_penalties": np.array(0.5, dtype=np.float32).repeat(full_batch_size).reshape(-1, 1), - "temperatures": np.array(100.1, dtype=np.float32).repeat(full_batch_size).reshape(-1, 1), - "top_ks": np.array(54720, dtype=np.int32).repeat(full_batch_size).reshape(-1, 1), + "temperatures": np.array(4.0, dtype=np.float32).repeat(full_batch_size).reshape(-1, 1), + "top_ks": np.array(512, dtype=np.int32).repeat(full_batch_size).reshape(-1, 1), "top_ps": np.array(0.89, dtype=np.float32).repeat(full_batch_size).reshape(-1, 1), "min_ps": np.array(0.6, dtype=np.float32).repeat(full_batch_size).reshape(-1, 1), - "random_numbers": np.array(0.26, dtype=np.float32).repeat(full_batch_size).reshape(-1, 1), + "random_numbers": np.tile(np.random.uniform(low=0.0, high=1.0, size=512), (full_batch_size, 1)).astype( + np.float32 + ), }, + **additional_params, ) model_wo_sampler_exec_info = model_wo_sampler.generate( tokenizer=tokenizer, @@ -315,63 +420,120 @@ def test_random_sampling( include_sampler=False, return_pdfs=False, sampling_params=None, + **additional_params, ) # Compare generated texts - golden_texts = { - "w_sampler": "Raymond and my favorite color, alongside reds or purples (I can’t have them both", - "wo_sampler": "John Smith and I am a software engineer. I have been working in the industry for the past ", - } - golden_ids = { - "w_sampler": [ - [ - 21380, - 322, - 590, - 25448, - 2927, - 29892, - 19963, - 2654, - 29879, - 470, - 3708, - 2701, - 313, - 29902, - 508, - 30010, - 29873, - 505, - 963, - 1716, - ] - ], - "wo_sampler": [ - [ - 2259, - 7075, - 322, - 306, - 626, - 263, - 7047, - 22055, - 29889, - 306, - 505, - 1063, - 1985, - 297, - 278, - 13661, - 363, - 278, - 4940, - 29871, - ] - ], - } + if model == "TinyLlama/TinyLlama-1.1B-Chat-v1.0": + golden_texts = { + "w_sampler": "Aiden and I am a freelance writer who loves to explore the world. With over", + "wo_sampler": "John Smith and I am a software engineer. I have been working in the industry for the past ", + } + golden_ids = { + "w_sampler": [ + [ + 319, + 3615, + 322, + 306, + 626, + 263, + 3005, + 295, + 749, + 9227, + 1058, + 12355, + 267, + 304, + 26987, + 278, + 3186, + 29889, + 2973, + 975, + ] + ], + "wo_sampler": [ + [ + 2259, + 7075, + 322, + 306, + 626, + 263, + 7047, + 22055, + 29889, + 306, + 505, + 1063, + 1985, + 297, + 278, + 13661, + 363, + 278, + 4940, + 29871, + ] + ], + } + elif model == "OpenGVLab/InternVL2_5-1B": + golden_texts = { + "w_sampler": "The description of this picture would be as follows:\n\nAn adorable black puppy is sitting on a wooden surface", + "wo_sampler": "The image features a black puppy sitting on a wooden surface. The puppy has a shiny, glossy coat", + } + golden_ids = { + "w_sampler": [ + [ + 785, + 4008, + 315, + 419, + 6802, + 1035, + 387, + 438, + 11017, + 1447, + 2082, + 40608, + 3691, + 41189, + 374, + 11699, + 389, + 264, + 22360, + 7329, + ] + ], + "wo_sampler": [ + [ + 785, + 2168, + 4419, + 264, + 3691, + 41189, + 11699, + 389, + 264, + 22360, + 7329, + 13, + 576, + 41189, + 702, + 264, + 41199, + 11, + 73056, + 22875, + ] + ], + } for i in range(full_batch_size): assert ( tokenizer.decode(model_w_sampler_exec_info.generated_ids[i][:generation_len]) == golden_texts["w_sampler"] From 86efef696f58ae86e5241d33e7bbbf5b6d7d6c56 Mon Sep 17 00:00:00 2001 From: Vinayak Baddi <68580231+vbaddi@users.noreply.github.com> Date: Wed, 17 Dec 2025 22:40:02 +0530 Subject: [PATCH 50/60] test: Verify ONNX subfunction usage through model inspection instead of hash comparison (#670) ## Summary Refactored the subfunction unit test to directly verify ONNX subfunction usage by inspecting the exported model structure, replacing the previous hash-based validation approach. ## Changes - Removed hash-based checks (`export_hash` and file hash comparisons) - Added ONNX model inspection utilities: - `has_gpt2block_function()`: Checks for QEffGPT2Block function definitions - Added explicit assertions to verify: - QEffGPT2Block function is defined when `use_onnx_subfunctions=True` - QEffGPT2Block function is NOT defined when `use_onnx_subfunctions=False` - QEffGPT2Block calls exist in graph nodes when subfunctions are enabled - No QEffGPT2Block calls when subfunctions are disabled - Maintained functional equivalence testing (generation output comparison) Signed-off-by: Vinayak Baddi Co-authored-by: vbaddi Signed-off-by: Dhiraj Kumar Sah --- tests/transformers/test_subfunction.py | 67 +++++++++++++++++++++----- 1 file changed, 55 insertions(+), 12 deletions(-) diff --git a/tests/transformers/test_subfunction.py b/tests/transformers/test_subfunction.py index 006337eee..47e49cf2c 100644 --- a/tests/transformers/test_subfunction.py +++ b/tests/transformers/test_subfunction.py @@ -4,9 +4,9 @@ # SPDX-License-Identifier: BSD-3-Clause # # ---------------------------------------------------------------------------- +from collections import Counter -import hashlib - +import onnx import pytest import torch from transformers import AutoConfig, AutoModelForCausalLM, AutoTokenizer @@ -46,31 +46,74 @@ config_ids = [x.model_type for x in configs] +def has_gpt2block_function(onnx_path): + """Check if ONNX model contains QEffGPT2Block function definition.""" + model = onnx.load(onnx_path, load_external_data=False) + function_names = [f.name for f in model.functions] + gpt2block_functions = [name for name in function_names if "QEffGPT2Block" in name] + return len(gpt2block_functions) > 0, gpt2block_functions + + +def get_gpt2block_call_count(onnx_path): + """Get count of QEffGPT2Block function calls in the ONNX model graph.""" + model = onnx.load(onnx_path, load_external_data=False) + calls = Counter([n.op_type for n in model.graph.node]) + gpt2block_calls = {k: v for k, v in calls.items() if "QEffGPT2Block" in k} + return gpt2block_calls + + @pytest.mark.on_qaic @pytest.mark.parametrize("config", configs, ids=config_ids) def test_subfunction_vs_nonsubfunction(config, tmp_path): tokenizer = AutoTokenizer.from_pretrained(config.model_type) model_0_0 = QEFFAutoModelForCausalLM(AutoModelForCausalLM.from_config(config, **model_kwargs), cb=False) - # model_0_0 = QEFFAutoModelForCausalLM.from_pretrained(config.model_type) + # Export with subfunctions enabled with_sub_func_onnx = model_0_0.export(tmp_path, use_onnx_subfunctions=True, offload_pt_weights=False) - hash_0_0 = model_0_0.export_hash + # Export without subfunctions without_sub_func_onnx = model_0_0.export(tmp_path, use_onnx_subfunctions=False) - hash_0_1 = model_0_0.export_hash - # Test that the export hash changes when use_onnx_subfunction is toggled, indicating different parameters are used - assert hash_0_0 != hash_0_1 + # Verify that the model with subfunctions has QEffGPT2Block function definition + has_gpt2block, gpt2block_names = has_gpt2block_function(with_sub_func_onnx) + assert has_gpt2block, ( + "Model exported with use_onnx_subfunctions=True should contain QEffGPT2Block function definition" + ) + print(f"\nGpt2Block functions found: {gpt2block_names}") + + # Verify that the model without subfunctions has no QEffGPT2Block function definition + has_gpt2block_without, _ = has_gpt2block_function(without_sub_func_onnx) + assert not has_gpt2block_without, ( + "Model exported with use_onnx_subfunctions=False should not contain QEffGPT2Block function definition" + ) + + # Get QEffGPT2Block call counts + gpt2block_calls_with_sub = get_gpt2block_call_count(with_sub_func_onnx) + gpt2block_calls_without_sub = get_gpt2block_call_count(without_sub_func_onnx) + + print(f"\nGpt2Block call counts with subfunctions: {gpt2block_calls_with_sub}") + print(f"QEffGPT2Block call counts without subfunctions: {gpt2block_calls_without_sub}") + + # Verify that QEffGPT2Block function calls exist in the subfunction model + assert len(gpt2block_calls_with_sub) > 0, ( + "Expected to find QEffGPT2Block function calls in graph when use_onnx_subfunctions=True" + ) - # Test that the exported ONNX files hash are different by comparing their hashes when use_onnx_subfunction is toggled - with_sub_func_onnx_hash = hashlib.sha256(open(with_sub_func_onnx, "rb").read()).hexdigest() - without_sub_func_onnx_hash = hashlib.sha256(open(without_sub_func_onnx, "rb").read()).hexdigest() - assert with_sub_func_onnx_hash != without_sub_func_onnx_hash + # Verify that QEffGPT2Block function calls do NOT exist in the non-subfunction model + assert len(gpt2block_calls_without_sub) == 0, ( + "Expected NO QEffGPT2Block function calls in graph when use_onnx_subfunctions=False" + ) + # Compile and test generation to ensure functional equivalence compile_params = {"prefill_seq_len": 8, "ctx_len": 16} + model_0_0.compile(onnx_path=with_sub_func_onnx, **compile_params) generation_00 = model_0_0.generate(prompts=["Help me with this"], tokenizer=tokenizer) model_0_0.compile(onnx_path=without_sub_func_onnx, **compile_params) generation_01 = model_0_0.generate(prompts=["Help me with this"], tokenizer=tokenizer) - assert generation_00.generated_texts == generation_01.generated_texts + + # Verify that both models produce the same output + assert generation_00.generated_texts == generation_01.generated_texts, ( + "Models with and without subfunctions should produce identical outputs" + ) From 358842edf61f10110289af7934aa3d7184eb414c Mon Sep 17 00:00:00 2001 From: Dhiraj Kumar Sah Date: Thu, 18 Dec 2025 12:11:35 +0530 Subject: [PATCH 51/60] =?UTF-8?q?HOTFIX:=20Testing=20the=20Finetune=20base?= =?UTF-8?q?=20CI=20failure=20by=20installing=20pytorch2.9=E2=80=A6=20(#661?= =?UTF-8?q?)?= MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit installing pytorch2.9 for FT CI test --------- Signed-off-by: Dhiraj Kumar Sah --- scripts/Jenkinsfile | 1 + 1 file changed, 1 insertion(+) diff --git a/scripts/Jenkinsfile b/scripts/Jenkinsfile index 8f95c1d98..e9925dee2 100644 --- a/scripts/Jenkinsfile +++ b/scripts/Jenkinsfile @@ -169,6 +169,7 @@ pipeline { cd /efficient-transformers && . preflight_qeff/bin/activate && pip install /opt/qti-aic/integrations/torch_qaic/py310/torch_qaic-0.1.0-cp310-cp310-linux_x86_64.whl && + pip install torch==2.9.0 torchvision==0.24.0 torchaudio==2.9.0 --index-url https://download.pytorch.org/whl/cpu && mkdir -p $PWD/cli_qaic_finetuning && export TOKENIZERS_PARALLELISM=false && export QEFF_HOME=$PWD/cli_qaic_finetuning && From 8ab31aebd16027a06721ac470b67954b8f1382e2 Mon Sep 17 00:00:00 2001 From: Sanidhya Singal Date: Thu, 18 Dec 2025 01:36:06 -0800 Subject: [PATCH 52/60] Add Support for Guided Decoding to On Device Sampling (#624) MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit ## ✨ Add Support for Guided Decoding to On Device Sampling ### šŸ“Œ Overview This PR introduces **guided decoding** capabilities in On Device Sampling for `QEffForCausalLM` and `QEffCausalLMForTextImageToTextModel` models.

### šŸš€ Motivation As outlined in [this blog on structured decoding](https://blog.vllm.ai/2025/01/14/struct-decode-intro.html), structured decoding represents a fundamental shift in controlling LLM outputs. Instead of relying on post-processing, constraints are enforced during token generation via **logits manipulation**. This approach ensures: * **Format compliance** at generation time. * Reduced error rates for structured outputs. * Performance improvements through optimized backends like **XGrammar**, which can deliver up to **5Ɨ faster token generation under load**. The constraints are provided through `token_bitmasks` which is a Boolean matrix of shape `(batch_size, vocab_size)`. Here, each element indicates whether a token should be kept (1) or masked (0). During sampling, this mask is applied to the logits before token selection, ensuring that only allowed tokens are considered. By performing this operation directly on the device, we eliminate host-device transfers, reduce latency, and improve throughput for structured decoding workloads.

### šŸ› ļø Implementation Details The guided decoding logic is injected via `include_guided_decoding=True` during model loading. No changes to the model architecture are required. ```python from QEfficient import QEFFAutoModelForCausalLM as AutoModelForCausalLM # Load model with On Device Sampler enabled qeff_model = AutoModelForCausalLM.from_pretrained( "meta-llama/Llama-3.1-8B", continuous_batching=True, qaic_config={ "include_sampler": True, "return_pdfs": False, "max_top_k_ids": 512, "include_guided_decoding": True, }, ) # Compile as usual qeff_model.compile( prefill_seq_length=128, ctx_len=256, full_batch_size=16, num_devices=4, num_speculative_tokens=0, mxint8_kv_cache=True, mxfp6_matmul=True, ) ``` To disable guided decoding, simply set `include_guided_decoding=False`. --------- Signed-off-by: quic-xiyushi Signed-off-by: quic-sanising Signed-off-by: sanising Signed-off-by: Mamta Singh <168400541+quic-mamta@users.noreply.github.com> Co-authored-by: quic-xiyushi Co-authored-by: sanising Co-authored-by: Mamta Singh <168400541+quic-mamta@users.noreply.github.com> Co-authored-by: Hem Agnihotri Signed-off-by: Dhiraj Kumar Sah --- .../generation/text_generation_inference.py | 16 +- QEfficient/generation/vlm_generation.py | 8 +- .../transformers/models/modeling_auto.py | 5 +- .../transformers/models/pytorch_transforms.py | 8 + QEfficient/transformers/sampler/sampler.py | 13 ++ QEfficient/utils/sampler_utils.py | 15 +- examples/performance/on_device_sampling.py | 43 +++- tests/transformers/sampler/test_sampler.py | 197 +++++++++++++++++- 8 files changed, 279 insertions(+), 26 deletions(-) diff --git a/QEfficient/generation/text_generation_inference.py b/QEfficient/generation/text_generation_inference.py index 7da2300d6..4fb77f272 100755 --- a/QEfficient/generation/text_generation_inference.py +++ b/QEfficient/generation/text_generation_inference.py @@ -329,6 +329,7 @@ def cloud_ai_100_exec_kv( is_tlm: bool = False, include_sampler: bool = False, return_pdfs: bool = False, + include_guided_decoding: bool = False, sampling_params: Optional[Dict[str, Any]] = None, ): """ @@ -356,6 +357,8 @@ def cloud_ai_100_exec_kv( next tokens. For Speculative Decoding Target Language Model, `return_pdfs`=True always. Otherwise, `return_pdfs`=True for Speculative Decoding Draft Language Model and `return_pdfs`=False for regular model. + :include_guided_decoding (bool, default=False): If True, enables guided token-level filtering + during decoding. Only works when `include_sampler`=True. sampling_params (Dict[str, Any], default=None): A dictionary of sampling parameters supported by the QAIC backend. The dictionary should contain the following keys: `repetition_penalties`, `presence_penalties`, `temperatures`, `top_ks`, `top_ps`, @@ -394,6 +397,7 @@ def cloud_ai_100_exec_kv( is_tlm=is_tlm, include_sampler=include_sampler, return_pdfs=return_pdfs, + include_guided_decoding=include_guided_decoding, sampling_params=sampling_params, ) @@ -442,6 +446,7 @@ def __init__( is_tlm: Optional[int] = None, include_sampler: bool = False, return_pdfs: bool = False, + include_guided_decoding: bool = False, sampling_params: Optional[Dict[str, Any]] = None, activate: bool = True, ) -> None: @@ -451,6 +456,7 @@ def __init__( self._write_io_dir = write_io_dir self.is_tlm = is_tlm self.return_pdfs = return_pdfs + self.include_guided_decoding = include_guided_decoding self.sampling_params = sampling_params self._qpc_path = qpc_path # Store qpc_path for later use @@ -461,7 +467,9 @@ def __init__( # Validate sampler inputs for On-Device Sampling self.include_sampler = validate_sampler_inputs( - session_inputs=set(self._session.input_names), include_sampler=include_sampler + session_inputs=set(self._session.input_names), + include_sampler=include_sampler, + include_guided_decoding=include_guided_decoding, ) # Fetch the variables from the QPC @@ -628,7 +636,7 @@ def prepare_decode_inputs(self): decode_inputs["batch_index"] = self.batch_index if self.include_sampler: decode_inputs["last_accepted_output_tokens"] = decode_inputs["input_ids"] - for op in Constants.SAMPLER_OPS: + for op in Constants.SAMPLER_OPS | ({"token_bitmasks"} if self.include_guided_decoding else set()): if self.batch_index is not None: decode_inputs[op] = self.sampling_params[op][self.batch_index.flatten()] else: @@ -795,7 +803,7 @@ def run_prefill(self, prompt, generation_len, prefill_logit_bs=1, decode_batch_i inputs["num_logits_to_keep"] = np.zeros((1, 1)) if self.include_sampler: inputs["last_accepted_output_tokens"] = inputs["input_ids"] - for op in Constants.SAMPLER_OPS: + for op in Constants.SAMPLER_OPS | ({"token_bitmasks"} if self.include_guided_decoding else set()): if decode_batch_id is not None: inputs[op] = self.sampling_params[op][decode_batch_id.flatten()] else: @@ -1067,6 +1075,7 @@ def __init__( is_tlm: bool = False, include_sampler: bool = False, return_pdfs: bool = False, + include_guided_decoding: bool = False, sampling_params: Optional[Dict[str, Any]] = None, ) -> None: self._qaic_model = QEffTextGenerationBase( @@ -1082,6 +1091,7 @@ def __init__( is_tlm=is_tlm, include_sampler=include_sampler, return_pdfs=return_pdfs, + include_guided_decoding=include_guided_decoding, sampling_params=sampling_params, ) self._full_batch_size = self._qaic_model.full_batch_size diff --git a/QEfficient/generation/vlm_generation.py b/QEfficient/generation/vlm_generation.py index c603a60d0..b3e03f253 100644 --- a/QEfficient/generation/vlm_generation.py +++ b/QEfficient/generation/vlm_generation.py @@ -94,6 +94,7 @@ def __init__( is_tlm: bool = False, include_sampler: bool = False, return_pdfs: bool = False, + include_guided_decoding: bool = False, sampling_params: Optional[Dict[str, Any]] = None, ): """ @@ -115,6 +116,7 @@ def __init__( is_tlm: Target language model flag include_sampler: Enable on-device sampling (new feature) return_pdfs: Return probability distributions + include_guided_decoding: Enable guided decoding in on-device sampling sampling_params: Sampling parameters for on-device sampling """ # Validate required parameters @@ -138,6 +140,7 @@ def __init__( is_tlm=is_tlm, include_sampler=include_sampler, return_pdfs=return_pdfs, + include_guided_decoding=include_guided_decoding, sampling_params=sampling_params, activate=False, # vision components need to be initialized first ) @@ -315,7 +318,7 @@ def _execute_chunked_prefill( lang_inputs["comp_ctx_lengths"] = self.list_of_comp_ctx_lengths_prefill[prefill_ccl_id] if self.include_sampler: - for op in Constants.SAMPLER_OPS: + for op in Constants.SAMPLER_OPS | ({"token_bitmasks"} if self.include_guided_decoding else set()): if decode_batch_id is not None: lang_inputs[op] = self.sampling_params[op][decode_batch_id.flatten()] else: @@ -348,7 +351,7 @@ def _execute_chunked_prefill( if self.include_sampler: chunk_inputs["last_accepted_output_tokens"] = chunk_inputs["input_ids"] - for op in Constants.SAMPLER_OPS: + for op in Constants.SAMPLER_OPS | ({"token_bitmasks"} if self.include_guided_decoding else set()): chunk_inputs[op] = lang_inputs[op] outputs = self._session.run(chunk_inputs) @@ -803,6 +806,7 @@ def generate_stream_tokens( is_tlm=self.is_tlm, include_sampler=self.include_sampler, return_pdfs=self.return_pdfs, + include_guided_decoding=self.include_guided_decoding, sampling_params=self.sampling_params, ) diff --git a/QEfficient/transformers/models/modeling_auto.py b/QEfficient/transformers/models/modeling_auto.py index dc03ba82f..88f2f29b1 100644 --- a/QEfficient/transformers/models/modeling_auto.py +++ b/QEfficient/transformers/models/modeling_auto.py @@ -2251,7 +2251,6 @@ def from_pretrained( logger.warning("Updating low_cpu_mem_usage=False") kwargs.update({"attn_implementation": "eager", "low_cpu_mem_usage": False}) - model = cls._hf_auto_class.from_pretrained(pretrained_model_name_or_path, **kwargs) return cls( model, @@ -2347,6 +2346,8 @@ def __init__( - **return_pdfs** (bool): If True, returns probability distributions along with sampled tokens. For Speculative Decoding Target Language Models, this is always True. - **max_top_k_ids** (int): Maximum number of top K tokens (<= vocab size) to consider during sampling. + - **include_guided_decoding** (bool): If True, enables guided token-level filtering + during decoding. Only works when include_sampler=True. - **num_kv_blocks** (int): Number of K/V blocks for BlockedKV attention implementation. **kwargs : Additional keyword arguments passed to the base class constructor. @@ -2443,6 +2444,8 @@ def from_pretrained( and ``return_pdfs=False`` for regular model. - **max_top_k_ids** (int): Maximum number of top K tokens (<= vocab size) to consider during sampling. The values provided in ``top_ks`` tensor must be less than this maximum limit. + - **include_guided_decoding** (bool): If True, enables guided token-level filtering + during decoding. Only works when include_sampler=True. *args : Positional arguments passed directly to `cls._hf_auto_class.from_pretrained`. diff --git a/QEfficient/transformers/models/pytorch_transforms.py b/QEfficient/transformers/models/pytorch_transforms.py index 9e021851b..b978b6193 100644 --- a/QEfficient/transformers/models/pytorch_transforms.py +++ b/QEfficient/transformers/models/pytorch_transforms.py @@ -242,6 +242,7 @@ QEffGemma3Attention, QEffGemma3CustomRMSNormAIC, QEffGemma3DecoderLayer, + QEffGemma3DecoderWrapper, QEffGemma3ForCausalLMModel, QEffGemma3ForConditionalGeneration, QEffGemma3TextModel, @@ -313,6 +314,7 @@ QEffLlamaRotaryEmbedding, ) from QEfficient.transformers.models.llama4.modeling_llama4 import ( + QEffLlama4DecoderWrapper, QEffLlama4ForCausalLM, QEffLlama4ForConditionalGeneration, QEffLlama4Router, @@ -325,9 +327,11 @@ QEffLlama4VisionModel, ) from QEfficient.transformers.models.llava.modeling_llava import ( + QEFFLlavaDecoderWrapper, QEffLlavaForConditionalGeneration, ) from QEfficient.transformers.models.llava_next.modeling_llava_next import ( + QEffLlavaNextDecoderWrapper, QEffLlavaNextForConditionalGeneration, ) from QEfficient.transformers.models.mistral.modeling_mistral import ( @@ -755,12 +759,16 @@ class SamplerTransform: _module_mapping = { QEffFalconForCausalLM, QEffGemmaForCausalLM, + QEffGemma3DecoderWrapper, QEffGPT2LMHeadModel, QEffGPTJForCausalLM, QEffGraniteForCausalLM, QEffGraniteMoeForCausalLM, QEffInternDecoderWrapper, QEffLlamaForCausalLM, + QEffLlama4DecoderWrapper, + QEFFLlavaDecoderWrapper, + QEffLlavaNextDecoderWrapper, QEffMptForCausalLM, QEffPhi3ForCausalLM, QEffQwen2ForCausalLM, diff --git a/QEfficient/transformers/sampler/sampler.py b/QEfficient/transformers/sampler/sampler.py index fd7b87dcd..5c86b6355 100644 --- a/QEfficient/transformers/sampler/sampler.py +++ b/QEfficient/transformers/sampler/sampler.py @@ -129,6 +129,7 @@ def sampler_forward( top_ps: Optional[torch.Tensor] = None, min_ps: Optional[torch.Tensor] = None, random_numbers: Optional[torch.Tensor] = None, + token_bitmasks: Optional[torch.Tensor] = None, ) -> Union[Tuple, SamplerOutput]: r""" Perform the sampling of next tokens on the QAIC device (instead of the host) @@ -179,6 +180,11 @@ def sampler_forward( random_numbers (`torch.Tensor`, *optional*): Sampling parameter that represents the random seeds to use for random sampling. Must be in [-1, 1]. + + token_bitmasks (`torch.Tensor`, *optional*): + Boolean mask used to guide token-level filtering during decoding. Each + element of this tensor indicates whether the corresponding token should be + kept (1) or masked (0). Shape: (batch_size, vocab_size) """ if vision_embeds is not None: forward_kwargs = dict( @@ -224,6 +230,13 @@ def sampler_forward( batch_index = torch.arange(batch_size).view(-1, 1) batch_index_reshaped = batch_index.view(-1) + + # Guided decoding + if token_bitmasks is not None and (token_bitmasks != 1).any(): + assert spec_length == 1, "Currently, guided decoding is not supported with Speculative Decoding" + # Mask logits where token_bitmasks is 0 with -inf + logits = torch.where(token_bitmasks == 1, logits, torch.finfo(torch.float16).min) + # Prefill past_repetition_penalty_buffer_prefill, past_presence_penalty_buffer_prefill = prefill_path( input_ids=input_ids, diff --git a/QEfficient/utils/sampler_utils.py b/QEfficient/utils/sampler_utils.py index 0460eeb3a..82a0843bc 100644 --- a/QEfficient/utils/sampler_utils.py +++ b/QEfficient/utils/sampler_utils.py @@ -14,7 +14,9 @@ from QEfficient.utils.logging_utils import logger -def validate_sampler_inputs(session_inputs: Set[str], include_sampler: Optional[bool] = None) -> bool: +def validate_sampler_inputs( + session_inputs: Set[str], include_sampler: Optional[bool] = None, include_guided_decoding: Optional[bool] = None +) -> bool: """ Validates whether the `QAICInferenceSession` inputs match inputs required for on-device sampling. @@ -31,7 +33,7 @@ def validate_sampler_inputs(session_inputs: Set[str], include_sampler: Optional[ ValueError if partial support is detected or if user intent conflicts with QPC capabilities. """ - sampler_inputs = Constants.SAMPLER_INPUTS + sampler_inputs = Constants.SAMPLER_INPUTS | ({"token_bitmasks"} if include_guided_decoding else set()) count = len(sampler_inputs & session_inputs) session_includes_sampler = True @@ -96,10 +98,9 @@ def get_sampling_inputs_and_outputs( """ bs: int = constants.ONNX_EXPORT_EXAMPLE_BATCH_SIZE fbs: int = constants.ONNX_EXPORT_EXAMPLE_FBS + seq_len: int = example_inputs["input_ids"].shape[-1] - example_inputs["last_accepted_output_tokens"] = torch.zeros( - (bs, constants.ONNX_EXPORT_EXAMPLE_SEQ_LEN), dtype=torch.int64 - ) + example_inputs["last_accepted_output_tokens"] = torch.zeros((bs, seq_len), dtype=torch.int64) dynamic_axes["last_accepted_output_tokens"] = {0: "batch_size", 1: "seq_len"} example_inputs["past_repetition_penalty_buffer"] = torch.zeros( @@ -144,4 +145,8 @@ def get_sampling_inputs_and_outputs( example_inputs["random_numbers"] = torch.rand((bs, max_top_k_ids), dtype=torch.float) dynamic_axes["random_numbers"] = {0: "batch_size"} + if qaic_config.get("include_guided_decoding", False): + example_inputs["token_bitmasks"] = torch.zeros((bs, vocab_size), dtype=torch.bool) + dynamic_axes["token_bitmasks"] = {0: "batch_size"} + return example_inputs, output_names, dynamic_axes diff --git a/examples/performance/on_device_sampling.py b/examples/performance/on_device_sampling.py index b4e1f4e27..da9c5b43b 100644 --- a/examples/performance/on_device_sampling.py +++ b/examples/performance/on_device_sampling.py @@ -21,6 +21,7 @@ def main(args, **kwargs): include_sampler = None return_pdfs = None max_top_k_ids = None + include_guided_decoding = None sampling_params = None bs = args.full_batch_size if args.full_batch_size is not None else args.batch_size if args.override_qaic_config is not None: @@ -29,6 +30,7 @@ def main(args, **kwargs): return_pdfs = args.override_qaic_config.get("aic_return_pdfs", None) == "true" max_top_k_ids = int(args.override_qaic_config.get("max_top_k_ids", 512)) np.random.seed(int(args.random_number)) + include_guided_decoding = args.override_qaic_config.get("aic_include_guided_decoding", None) == "true" sampling_params = { "repetition_penalties": np.array(args.repetition_penalty, dtype=np.float32).repeat(bs).reshape(-1, 1), "presence_penalties": np.array(args.presence_penalty, dtype=np.float32).repeat(bs).reshape(-1, 1), @@ -47,13 +49,12 @@ def main(args, **kwargs): "include_sampler": include_sampler, "return_pdfs": return_pdfs, "max_top_k_ids": max_top_k_ids, + "include_guided_decoding": include_guided_decoding, }.items() if v is not None } print("qaic_config:") pprint(qaic_config) - print("sampling_params:") - pprint(sampling_params) # Load model with On Device Sampler enabled qeff_model = AutoModelForCausalLM.from_pretrained( @@ -63,6 +64,19 @@ def main(args, **kwargs): ) print(f"{args.model_name} optimized for AI 100 \n", qeff_model) + if include_guided_decoding: + # Ideally this should come from a logits processor like xgrammar, but for the sake of the + # example, we generate a random bitmask + sampling_params.update( + { + "token_bitmasks": np.tile( + np.random.choice([True, False], size=(qeff_model.model.config.vocab_size,)), (bs, 1) + ) + } + ) + print("sampling_params:") + pprint(sampling_params) + # Compile the model for inference generated_qpc_path = qeff_model.compile( prefill_seq_len=args.prompt_len, @@ -91,6 +105,7 @@ def main(args, **kwargs): generation_len=args.generation_len, include_sampler=include_sampler, return_pdfs=return_pdfs, + include_guided_decoding=include_guided_decoding, sampling_params=sampling_params, ) @@ -109,7 +124,7 @@ def main(args, **kwargs): --num-cores 16 \ --mxint8-kv-cache \ --mxfp6-matmul \ - --override-qaic-config "aic_include_sampler:true aic_return_pdfs:false max_top_k_ids:512" \ + --override-qaic-config "aic_include_sampler:true aic_return_pdfs:false max_top_k_ids:512 aic_include_guided_decoding:false" \ --repetition-penalty 1.9 \ --presence-penalty 0.8 \ --temperature 0.67 \ @@ -129,7 +144,27 @@ def main(args, **kwargs): --num-cores 16 \ --mxint8-kv-cache \ --mxfp6-matmul \ - --override-qaic-config "aic_include_sampler:true aic_return_pdfs:false max_top_k_ids:512" \ + --override-qaic-config "aic_include_sampler:true aic_return_pdfs:false max_top_k_ids:512 aic_include_guided_decoding:false" \ + --repetition-penalty 1.9 \ + --presence-penalty 0.8 \ + --temperature 0.67 \ + --top-k 54 \ + --top-p 0.89 \ + --min-p 0.6 \ + --random-number 26 + + 3. With guided decoding: + python3.10 examples/on_device_sampling.py \ + --model-name 'meta-llama/Llama-3.1-8B' \ + --prompt-len 128 \ + --ctx-len 256 \ + --generation-len 20 \ + --full-batch-size 2 \ + --device-group [0,1,2,3] \ + --num-cores 16 \ + --mxint8-kv-cache \ + --mxfp6-matmul \ + --override-qaic-config "aic_include_sampler:true aic_return_pdfs:false max_top_k_ids:512 aic_include_guided_decoding:true" \ --repetition-penalty 1.9 \ --presence-penalty 0.8 \ --temperature 0.67 \ diff --git a/tests/transformers/sampler/test_sampler.py b/tests/transformers/sampler/test_sampler.py index f9aa35312..26cb6fda9 100644 --- a/tests/transformers/sampler/test_sampler.py +++ b/tests/transformers/sampler/test_sampler.py @@ -16,6 +16,7 @@ from QEfficient.utils import load_hf_tokenizer from QEfficient.utils.constants import Constants from QEfficient.utils.test_utils import InternProcessor +from tests.transformers.models.image_text_to_text.test_continuous_batching import set_num_layers sampler_transform_configs = [ pytest.param( @@ -92,17 +93,41 @@ True, # is_vlm ), ] +guided_decoding_configs = [ + pytest.param( + "TinyLlama/TinyLlama-1.1B-Chat-v1.0", # model + Constants.INPUT_STR * 4, # prompts + 32, # prefill_seq_len + 64, # ctx_len + 20, # generation_len + 4, # full_batch_size + 1, # spec_length + False, # is_vlm + ), + pytest.param( + "OpenGVLab/InternVL2_5-1B", # model + ( + ["https://picsum.photos/id/237/536/354"] * 2, + ["Can you describe the image in detail."] * 2, + ), # images and prompts + 128, # prefill_seq_len + 4096, # ctx_len + 20, # generation_len + 2, # full_batch_size + None, # spec_length + True, # is_vlm + ), +] def prepare_model_setup( - model: str, is_vlm: bool, num_hidden_layers: Optional[int], prompts: Union[List, Tuple], spec_length: Optional[int] + model: str, is_vlm: bool, num_hidden_layers: int, prompts: Union[List, Tuple], spec_length: Optional[int] ): additional_configs = {} additional_params = {} if is_vlm: config = AutoConfig.from_pretrained(model, trust_remote_code=True) - if num_hidden_layers is not None: - config.llm_config.num_hidden_layers = num_hidden_layers + config = set_num_layers(config, n_layer=num_hidden_layers) additional_configs["config"] = config additional_configs["kv_offload"] = True assert isinstance(prompts, tuple), "For VLMs, both image and text prompts must be provided." @@ -123,7 +148,7 @@ def prepare_model_setup( additional_params["processor"] = AutoProcessor.from_pretrained(model) qeff_class = QEFFAutoModelForImageTextToText else: - if num_hidden_layers is not None: + if num_hidden_layers != -1: additional_configs["num_hidden_layers"] = num_hidden_layers spec_length = (spec_length or 1) - 1 qeff_class = QEFFAutoModelForCausalLM @@ -165,6 +190,17 @@ def test_sampler_transform( }, **additional_configs, ) + model_w_sampler_w_guided_decoding = qeff_class.from_pretrained( + model, + continuous_batching=True, + qaic_config={ + "include_sampler": True, + "return_pdfs": False, + "max_top_k_ids": 512, + "include_guided_decoding": True, + }, + **additional_configs, + ) model_wo_sampler = qeff_class.from_pretrained( model, continuous_batching=True, @@ -184,6 +220,16 @@ def test_sampler_transform( mxint8_kv_cache=True, mxfp6_matmul=True, ) + model_w_sampler_w_guided_decoding_qpc_path = model_w_sampler_w_guided_decoding.compile( + prefill_seq_len=prefill_seq_len, + ctx_len=ctx_len, + full_batch_size=full_batch_size, + num_devices=1, + num_cores=16, + num_speculative_tokens=spec_length, + mxint8_kv_cache=True, + mxfp6_matmul=True, + ) model_wo_sampler_qpc_path = model_wo_sampler.compile( prefill_seq_len=prefill_seq_len, ctx_len=ctx_len, @@ -196,10 +242,12 @@ def test_sampler_transform( ) if is_vlm: model_w_sampler_qpc_path = model_w_sampler_qpc_path[1] + model_w_sampler_w_guided_decoding_qpc_path = model_w_sampler_w_guided_decoding_qpc_path[1] model_wo_sampler_qpc_path = model_wo_sampler_qpc_path[1] # Init qaic session model_w_sampler_session = QAICInferenceSession(model_w_sampler_qpc_path) + model_w_sampler_w_guided_decoding_session = QAICInferenceSession(model_w_sampler_w_guided_decoding_qpc_path) model_wo_sampler_session = QAICInferenceSession(model_wo_sampler_qpc_path) # Skip inputs/outputs buffers @@ -207,6 +255,12 @@ def test_sampler_transform( model_w_sampler_session.skip_buffers( set([x for x in model_w_sampler_session.output_names if x.endswith("_RetainedState")]) ) + model_w_sampler_w_guided_decoding_session.skip_buffers( + set([x for x in model_w_sampler_w_guided_decoding_session.input_names if x.startswith("past_")]) + ) + model_w_sampler_w_guided_decoding_session.skip_buffers( + set([x for x in model_w_sampler_w_guided_decoding_session.output_names if x.endswith("_RetainedState")]) + ) model_wo_sampler_session.skip_buffers( set([x for x in model_wo_sampler_session.input_names if x.startswith("past_")]) ) @@ -220,9 +274,15 @@ def test_sampler_transform( assert input_name in model_w_sampler_session.input_names, ( f"Sampler input {input_name} not found in QPC compiled with On Device Sampler" ) + assert input_name in model_w_sampler_w_guided_decoding_session.input_names, ( + f"Sampler input {input_name} not found in QPC compiled with On Device Sampler and Guided Decoding" + ) assert input_name not in model_wo_sampler_session.input_names, ( f"Sampler input {input_name} found in QPC compiled without On Device Sampler" ) + assert "token_bitmasks" in model_w_sampler_w_guided_decoding_session.input_names, ( + "Sampler input token_bitmasks not found in QPC compiled with On Device Sampler and Guided Decoding" + ) @pytest.mark.on_qaic @@ -241,14 +301,14 @@ def test_greedy_sampling( is_vlm: bool, ): """ - Test greedy sampling with QPC compiled with and without On Device Sampling. + Test greedy sampling with QPCs compiled with and without On Device Sampling. """ # Export and compile QEfficient models num_hidden_layers = 4 additional_configs, additional_params, prompts, spec_length, qeff_class = prepare_model_setup( model, is_vlm, num_hidden_layers, prompts, spec_length ) - model_w_sampler = QEFFAutoModelForCausalLM.from_pretrained( + model_w_sampler = qeff_class.from_pretrained( model, continuous_batching=True, qaic_config={ @@ -258,7 +318,7 @@ def test_greedy_sampling( }, **additional_configs, ) - model_wo_sampler = QEFFAutoModelForCausalLM.from_pretrained( + model_wo_sampler = qeff_class.from_pretrained( model, continuous_batching=True, qaic_config={ @@ -343,14 +403,14 @@ def test_random_sampling( is_vlm: bool, ): """ - Test random sampling with QPC compiled with and without On Device Sampling. + Test random sampling with QPCs compiled with and without On Device Sampling. """ # Export and compile QEfficient models - num_hidden_layers = None + num_hidden_layers = -1 additional_configs, additional_params, prompts, spec_length, qeff_class = prepare_model_setup( model, is_vlm, num_hidden_layers, prompts, spec_length ) - model_w_sampler = QEFFAutoModelForCausalLM.from_pretrained( + model_w_sampler = qeff_class.from_pretrained( model, continuous_batching=True, qaic_config={ @@ -360,7 +420,7 @@ def test_random_sampling( }, **additional_configs, ) - model_wo_sampler = QEFFAutoModelForCausalLM.from_pretrained( + model_wo_sampler = qeff_class.from_pretrained( model, continuous_batching=True, qaic_config={ @@ -547,3 +607,118 @@ def test_random_sampling( assert (model_wo_sampler_exec_info.generated_ids[i][:generation_len] == golden_ids["wo_sampler"]).all(), ( "Without sampler generated ids do not match" ) + + +@pytest.mark.on_qaic +@pytest.mark.parametrize( + "model, prompts, prefill_seq_len, ctx_len, generation_len, full_batch_size, spec_length, is_vlm", + guided_decoding_configs, +) +def test_guided_decoding( + model: str, + prompts: Union[List[str], tuple[List[str], List[str]]], + prefill_seq_len: int, + ctx_len: int, + generation_len: int, + full_batch_size: int, + spec_length: Optional[int], + is_vlm: bool, +): + """ + Test QPCs compiled with and without guided decoding. + """ + # Export and compile QEfficient models + num_hidden_layers = 2 + additional_configs, additional_params, prompts, spec_length, qeff_class = prepare_model_setup( + model, is_vlm, num_hidden_layers, prompts, spec_length + ) + model_w_sampler_w_guided_decoding = qeff_class.from_pretrained( + model, + continuous_batching=True, + qaic_config={ + "include_sampler": True, + "return_pdfs": False, + "max_top_k_ids": 1024, + "include_guided_decoding": True, + }, + **additional_configs, + ) + model_w_sampler_wo_guided_decoding = qeff_class.from_pretrained( + model, + continuous_batching=True, + qaic_config={ + "include_sampler": True, + "return_pdfs": False, + "max_top_k_ids": 1024, + }, + **additional_configs, + ) + model_w_sampler_w_guided_decoding.compile( + prefill_seq_len=prefill_seq_len, + ctx_len=ctx_len, + full_batch_size=full_batch_size, + num_devices=1, + num_cores=16, + num_speculative_tokens=spec_length, + mxint8_kv_cache=True, + mxfp6_matmul=True, + ) + model_w_sampler_wo_guided_decoding.compile( + prefill_seq_len=prefill_seq_len, + ctx_len=ctx_len, + full_batch_size=full_batch_size, + num_devices=1, + num_cores=16, + num_speculative_tokens=spec_length, + mxint8_kv_cache=True, + mxfp6_matmul=True, + ) + + # Generate texts from prompts + tokenizer = load_hf_tokenizer(pretrained_model_name_or_path=model) + np.random.seed(0) + sampling_params = { + "repetition_penalties": np.array(1.0, dtype=np.float32).repeat(full_batch_size).reshape(-1, 1), + "presence_penalties": np.array(0.0, dtype=np.float32).repeat(full_batch_size).reshape(-1, 1), + # "frequency_penalties": np.array(0.0, dtype=np.float32).repeat(full_batch_size).reshape(-1, 1), + "temperatures": np.array(0.0, dtype=np.float32).repeat(full_batch_size).reshape(-1, 1), + "top_ks": np.array(1024, dtype=np.int32).repeat(full_batch_size).reshape(-1, 1), + "top_ps": np.array(1.0, dtype=np.float32).repeat(full_batch_size).reshape(-1, 1), + "min_ps": np.array(0.0, dtype=np.float32).repeat(full_batch_size).reshape(-1, 1), + "random_numbers": np.zeros((full_batch_size, 1024), dtype=np.float32), + } + if is_vlm: + vocab_size = model_w_sampler_w_guided_decoding.model.language_model.config.vocab_size + else: + vocab_size = model_w_sampler_w_guided_decoding.model.config.vocab_size + model_w_sampler_w_guided_decoding_exec_info = model_w_sampler_w_guided_decoding.generate( + tokenizer=tokenizer, + prompts=prompts, + generation_len=generation_len, + include_sampler=True, + return_pdfs=False, + include_guided_decoding=True, + sampling_params={ + **sampling_params, + **{ + "token_bitmasks": np.tile( + np.random.choice([True, False], size=(vocab_size,)), + (full_batch_size, 1), + ) + }, + }, + **additional_params, + ) + model_w_sampler_wo_guided_decoding_exec_info = model_w_sampler_wo_guided_decoding.generate( + tokenizer=tokenizer, + prompts=prompts, + generation_len=generation_len, + include_sampler=True, + return_pdfs=False, + sampling_params=sampling_params, + **additional_params, + ) + assert ( + model_w_sampler_w_guided_decoding_exec_info.generated_ids + != model_w_sampler_wo_guided_decoding_exec_info.generated_ids + ).any(), "Sampler outputs with and without guided decoding should not match" From 56e8b1098bf1363ad0fe2784f9cd37323b9e5196 Mon Sep 17 00:00:00 2001 From: Rishin Raj Date: Fri, 19 Dec 2025 14:33:18 +0530 Subject: [PATCH 53/60] Adding memory profiling (#674) Added memory profiling tool (scripts/memory_profiling) that tracks memory, CPU, and disk I/O usage across QEfficient workflow stages. The profiler supports manual operation marking, child process tracking for accurate compilation metrics, and generates 4-panel visualizations with detailed performance reports to help identify bottlenecks and optimize resource usage. Signed-off-by: Rishin Raj Signed-off-by: Dhiraj Kumar Sah --- scripts/memory_profiling/README.md | 199 +++++ scripts/memory_profiling/__init__.py | 53 ++ .../memory_profile_llama3.2.png | Bin 0 -> 922164 bytes scripts/memory_profiling/profiler.py | 729 ++++++++++++++++++ scripts/memory_profiling/visualizer.py | 604 +++++++++++++++ 5 files changed, 1585 insertions(+) create mode 100644 scripts/memory_profiling/README.md create mode 100644 scripts/memory_profiling/__init__.py create mode 100644 scripts/memory_profiling/memory_profile_llama3.2.png create mode 100644 scripts/memory_profiling/profiler.py create mode 100644 scripts/memory_profiling/visualizer.py diff --git a/scripts/memory_profiling/README.md b/scripts/memory_profiling/README.md new file mode 100644 index 000000000..efb995815 --- /dev/null +++ b/scripts/memory_profiling/README.md @@ -0,0 +1,199 @@ +# QEfficient Memory Profiling + +A memory profiling solution for QEfficient workflows with manual operation marking. + + + +## Quick Start + +```python +from profiler import QEffMemoryProfiler +from QEfficient import QEFFAutoModelForCausalLM +from transformers import AutoTokenizer + +# Initialize profiler with verbose output to see detailed memory tracking information +profiler = QEffMemoryProfiler(verbose=True) +# Start monitoring memory usage - this begins tracking memory consumption +profiler.start_monitoring() + +# Mark the start of model loading operation for memory profiling, this will help to create stage wise partitioning the output graph +profiler.mark_operation("Loading model") + +model = QEFFAutoModelForCausalLM.from_pretrained("meta-llama/Meta-Llama-3-8B-Instruct") +tokenizer = AutoTokenizer.from_pretrained("meta-llama/Meta-Llama-3-8B-Instruct") + +# Mark the export operation +profiler.mark_operation("Export") +model.export() + +# Mark the compilation operation +profiler.mark_operation("Compile") +model.compile(prefill_seq_len=128, ctx_len=256, num_cores=16) + +# Mark the text generation operation +profiler.mark_operation("Generation") +output = model.generate(prompts=["Hello world"], tokenizer=tokenizer, generation_len=100) + +# Stop memory monitoring and generate reports +profiler.stop_monitoring() + +# Print a detailed memory usage report to the console showing peak memory and operation-wise breakdown (optional) +print(profiler.get_memory_report()) + +# Generate a visual graph of memory usage over time and save it as an image file +profiler.generate_memory_graph("profile.png") +``` + +## Configuration + +### Basic Configuration + +```python +profiler = QEffMemoryProfiler( + sampling_interval=0.1, # Sample every 100ms + output_file="my_profile.png", # Custom output file + verbose=True, # Enable detailed logging + enable_cpu_monitoring=True, # Monitor CPU usage + enable_disk_monitoring=True, # Monitor disk I/O +) +``` + +### Manual Operation Marking + +```python +profiler = QEffMemoryProfiler() +profiler.start_monitoring() + +# Manual operation marking +profiler.mark_operation("Custom Operation 1") +# ... your code ... + +profiler.mark_operation("Custom Operation 2") +# ... more code ... + +profiler.stop_monitoring() +``` + +## API Reference + +### QEffMemoryProfiler + +#### Constructor Parameters + +| Parameter | Type | Default | Description | +|-----------|------|---------|-------------| +| `sampling_interval` | `float` | `0.05` | Time between samples (seconds) | +| `output_file` | `str` | `"qeff_memory_profile.png"` | Output file path | +| `verbose` | `bool` | `False` | Enable verbose logging | +| `enable_cpu_monitoring` | `bool` | `True` | Monitor CPU usage | +| `enable_disk_monitoring` | `bool` | `True` | Monitor disk I/O | + +#### Methods + +- **`start_monitoring()`**: Start background monitoring +- **`stop_monitoring()`**: Stop monitoring and mark completion +- **`mark_operation(name: str)`**: Manually mark operation start +- **`get_memory_report() -> str`**: Generate comprehensive text report +- **`generate_memory_graph(filename: str)`**: Create visualization +- **`stop_and_save(filename: str) -> str`**: Convenience method to stop and save + +#### Properties + +- **`peak_rss`**: Peak RSS memory usage (MB) +- **`peak_operation`**: Operation during peak memory +- **`samples`**: List of collected profiling samples +- **`operations`**: List of marked operations with timestamps + +## Operation Types + +The profiler supports marking these common QEfficient operations: + +- **Model Loading**: `from_pretrained`, `AutoModel`, `AutoTokenizer` +- **Export**: `model.export()`, ONNX transforms, PyTorch transforms +- **Compilation**: `model.compile()`, QNN compilation +- **Generation**: `model.generate()`, inference execution +- **Cleanup**: Memory cleanup, garbage collection + +## Output + +### Console Report +``` +QEFFICIENT PERFORMANCE MONITORING REPORT +============================================================ +Peak Memory Usage: + • RSS (Physical): 18.7 GB at 14:23:45 + • Peak during: Compilation + +Memory Statistics: + • Current RSS: 16.2 GB (Delta: +15.8 GB) + • Duration: 185.3 seconds + • Operations: 4 + +QEfficient Operations Timeline: + 1. 0.0s - Model Loading (25.2s) [+8.2 GB] + 2. 25.2s - Export (15.4s) [+2.1 GB] + 3. 40.6s - Compilation (120.8s) [+6.3 GB] <- Peak + 4. 161.4s - Generation (18.7s) [+1.2 GB] +``` + +### Visualization + +The profiler generates a comprehensive 4-panel visualization: + +1. **Memory Timeline**: RSS usage with colored operation phases +2. **CPU Usage**: CPU utilization with performance zones +3. **Disk I/O**: Read/write activity per operation phase +4. **Phase Duration**: Timing analysis with duration labels + +#### Sample Output + +![Sample Memory Profile](memory_profile_llama3.2.png) + +*Example memory profiling output showing QEfficient workflow phases including model loading, ONNX transforms, compilation, and generation phases with detailed memory, CPU, and disk I/O metrics.* + +## Advanced Usage + + +### Accessing Raw Data + +```python +# Get synchronized data arrays +data = profiler.get_synchronized_data() +timestamps = data['timestamps'] +memory_usage = data['rss_memory'] +cpu_usage = data['cpu_usage'] + +# Access individual samples +for sample in profiler.samples: + print(f"Time: {sample.timestamp}, RSS: {sample.rss_mb} MB") +``` + +## Integration Examples + +### With Existing QEfficient Scripts + +```python +# Add to existing QEfficient workflow +profiler = QEffMemoryProfiler(output_file="workflow_profile.png") +profiler.start_monitoring() + +# Existing QEfficient code unchanged +model = QEFFAutoModelForCausalLM.from_pretrained(model_name) +# ... rest of workflow ... + +# Add at end +report = profiler.stop_and_save() +print(report) +``` + + +## Limitations + +### Disk I/O Tracking + +**Subprocess I/O Limitation**: Disk I/O tracking captures parent process I/O only. Subprocess I/O (e.g., compilation reading ONNX files via `subprocess.run()`) is not captured due to Linux I/O accounting limitations. During compilation phases, expect lower I/O readings than actual file operations performed by subprocesses. + +## Compatibility + +- **Python**: 3.7+ +- **Dependencies**: `psutil`, `matplotlib`, `numpy` diff --git a/scripts/memory_profiling/__init__.py b/scripts/memory_profiling/__init__.py new file mode 100644 index 000000000..dc1377d0b --- /dev/null +++ b/scripts/memory_profiling/__init__.py @@ -0,0 +1,53 @@ +# ----------------------------------------------------------------------------- +# +# Copyright (c) Qualcomm Technologies, Inc. and/or its subsidiaries. +# SPDX-License-Identifier: BSD-3-Clause +# +# ----------------------------------------------------------------------------- + +""" +QEfficient Memory Profiling + +A production-ready memory profiling solution specifically designed for QEfficient workflows. +Provides manual operation marking, comprehensive metrics collection, and professional visualization. + +Usage Example: + +```python +from scripts.memory_profiling import QEffMemoryProfiler + +profiler = QEffMemoryProfiler(verbose=True) +profiler.start_monitoring() +# ... your QEfficient code ... +profiler.stop_monitoring() +print(profiler.get_memory_report()) +profiler.generate_memory_graph() +``` +""" + +__version__ = "2.0.0" +__author__ = "Qualcomm Technologies, Inc." + +# Core profiler components +from .profiler import ( + MetricsCollector, + ProfilerConfig, + ProfileSample, + QEffMemoryProfiler, +) + +# Visualization component (imported on-demand) +try: + from .visualizer import QEffMemoryVisualizer +except ImportError: + # Handle case where matplotlib is not available + QEffMemoryVisualizer = None + +__all__ = [ + "QEffMemoryProfiler", + "ProfilerConfig", + "ProfileSample", + "MetricsCollector", + "QEffMemoryVisualizer", + "__version__", +] diff --git a/scripts/memory_profiling/memory_profile_llama3.2.png b/scripts/memory_profiling/memory_profile_llama3.2.png new file mode 100644 index 0000000000000000000000000000000000000000..e91c1d04a3a455ab95bb13155ac542e194b86b65 GIT binary patch literal 922164 zcmeEuXIPWl)-CF`fr1TD>bBDnY0?!Gr1xGGrGyTl1p*2Jij-{ur1xGz3!#G`MUY-X zM~svZ2sM;I!d>q^d++aj_s>25@5%G{1TZA;yVja>jycAdywy}!JWI<=OG86*R_Vzj zZ5kS8RvH@m#5rd8nvfz7YAp!gWa>=Jzu!F+c`T4 z@eAJN7v=rk77llDmkoq2a#){X62FCF@CZgoZ}x z(L-JD#N{auZ&Pg2;d<|zxUsUELKA|Lo~)AJM=R9g2T+qvPgQDz9c$KU$~%rlL~exS!Zn(UihB03PX3l3U2Cl@8r$hI zK)^Q{E)YEQz_nUfo((u4FW#8&$gS(o?cQ-R%9gx5?``vd=I$3(-U4hp`;^^~b?!pe z%hE{kJ5q0bh@;KrMgQC+_~Iqu6zkEye&GGs-YaDTW&+R8emuvQl<&Bz@8}yRgA+m9${=Ojmpm+cKljzcP`RVW7@05S_JOxNewoM7{OF+c8Vd@?H`>6OK=U|be?J48NXAogv6_47r8ox<&o z2J>&IzQa0%Zub7b}?dy(>XJsJvw;vu6RZ|M@PyhaiuUC0wWBK_l zg%BUttp@aByN0jhjHuGJhev2n)9S))OFmq0`NVv&nCq=z{Cn1du6q}wFR&5OR6(*| zeK9S~OFc>2e2@If)*@tK%KJ2NyLEQv^E5QEBO`;TSdAF`@PRTL%87YB?t}S$DqGB{Ld+_%JQ<*WZCH48cn;-C|uzKpP|ozmt{Y` zV{g1wo8)6G%=K`GFCop|Mortr@}=T_{1f)k=p5PQo*kpZm3j(!G(*xOyLi~f3VP!; z&nOQ54cL#q@^)pZ3!=Z#TNz^H zV{WQ?MP^PbIaQ_dvbFZnp$f-onkLZj9lVC`25GtU0*U?%!tYRBY1nZJobZvJ=6JDy zQV!GSFVkvqb%^BJWSE9|kD;}tng@^$pZP#1PNS_L8{@Vp&{Nk?O*g9{o|&iK(4;8M&q+)d8xbL=hGWM*R?4Q1VQu;eK=w! ztDBA6k*H}i^Jnkegqp1UeM0+3Ut}P&2k9*5J>gNh3`EXZpSiZD@*m)HTkzJhzp~O_ zi%}KabnUM%UzYxC^F-pZj&-6ONlJ$xFOoOP_baYT8p&39RBKyZ_q!Kanw?u}ChIp> zqS3wj*Ci(C(jpa{OvjUmTuTCPK z5$b(n3hgXeBxs4Co4N8^C z($Fl^hc-0gOOj>Hmk4R#dUun)t!o)4#^v6KR#R93kv8s@t?b47eZ8lNzf2G{sa*?- zkFWR5zqcHEZu)xh2jTB5N7ELQ6LpEu(C9=m8tn_F|0J@Q9BMu|BBa;A-uZbgXKJrZ zGEs)mh_2O$V&~p6I)vUK1e1}fAmW(Ja00;_OQ4~-s|6B_hIGTbk~CL`t~l=c{WL7R zq;MK5|BCKk$6XR}Vi~al&uZUZ$XmNkLvw;n9XVG`d3_dK#~IxAaq*aO$C4{YpD)49 z3TRozBPLoEU&FeGg#YiTH~hD#|B8n%Eto{%_Y)WjuriB;*}A-XuLlvk$GTyFs>XAmiUsnNZp?u&M9YusDJVOX5; zAMaCNztKE?@`T~IhT`f=b!;3{*IC*Ue7#CGzODLW^v5ejxqX($M;&7Rj8MzxeA1M0 zzkE4xs$G38*y@jsy9{gwM}SP?CXs~jJ~XmmIF{jH*_S3DU8~XSe}K64=rtK}zv>MN zt9OLPMbXR`L;q>m*RN3^g%@js?OsS`qiOm8@|XX?(s}T^G&Gjqs~jIziW;BV9{aiBY0+AS9>qsv%tC99sb zX=ZU^GLewB8+~!OyUj7?#Vx$XPLYn~%l+2o4Osk~OFU+OK0*b)lg{O|!vGes$wTAU zWuKcai+i|ROINSJ0|1_Tn3(IN@1v5FRSD{L(RZc(x~<=CJr3dG?8~l9_6KP%w>U#_ z56uU_YDtC6u~mCKaZtltIzMHy8XTD~kN=81a~UII^qz&8Vif(S5&Pj!y1lEcGH2r?=?M^ji%P9gqpMOB2F3ZjJEZ#{V#I%A0beE0dPG@b_jz z&z3*0|5H2eW&+4eo~yj*w0ft{dT=Q(uf`2gGoRUN?`E2LT|Q_IE-~CMmbww6&W3`4 zBF4Qt&tAw;4M;pumuxw%zNaoyJ zYB%`;ehjH7NI8+oYym>JlMNMf_kJ0(ID;q!F&{!VuI5~(p3EkmS zZ}i`R9Y6kCc$MR$Cx7T`sDZtl^gpoI1WIfbcy|V^ z)@pZ^>p&o%rjCdnj3#%A(D5C9S{O)2-ng~4nn-pwUi-dI)?S&_N>J%M zLMNDhdBcGy^goZLM>bT0`M58h)v0xxdJ60q8yx)LzN^%9r!^z~2qM1712+(ix zt>O$O)I0DMhEwjWr6z?g%$DUo4rhYsLpAmxwZ+mJWP5;GOkgJCLutrc9_;MrX~F~a zV_dW|nx}|jlGlt^Ed*OuD5~&l4_}d&Yxc;i39&}giYTP!zKVD#neNXnvMJ1eKUH+W zz`H&rR{ayAo`I%m_k)4G{_hqa8oY;(VwywvZwc{hbC`>$2>5TDpsY`$Yn--k9zVH} zQwefr<5UBHn6&^?va>0wc%@`59C_9pi1t^gQEdOazy9-#y@yNxbd@6yuOw)EpG)iG&+ABv zy|?LOl^l>Lc^Zo!zvxcNCoF1N*+d~uo`A@Wn^4QmEo^8fI155Hui9=M`o+xwT)XK* z>_9gC3Rm%Pf9GvRF?2y%hJhL7SlO6E*eU4XRk>YAhx)gX?heiQ`+mK%0Vhtd0l<-4 ze+w<4gUv3Rr$_H>7vuzJetCE#jQ;FM=IE1d<%q$u!slj!d}`;fQ3D8LDUHq^D^uKd zOPY4>?TwMX_4%vd>I_gdAnJwxccGPDjUsUEhptcRK^o_>YtF?#ytRTw>8f9B|^ z7j2KZc&;ha-Mb(xDdIE~h6O1+sn}5uR1`zoF`v~=$dCJ2@%xQ0ZnEIQ#u3@diaZJM$_7B7AeWXnG&LW*OqUCa zW@kx@<0>`2C$2=uw&9fE9Sv&Cs%{PuO)9kO$B*mlRV4185%wS@OFGe?;r_iTyD!zf zz30=SOO`^Yy|H$2pVg;#t*mBK1V<44y{xj9~Jr@RO`1wY|j;b9BDE zI-(p&JILTfmUGP^KK$G-0FWkav_W>!$$`#>iNPBAzO1vcs%Yq^^DKE|cCN;s-=_wDywGksJ2$@V zRPSGDd?!C%x37+@e3TprpkkzlX3KAWZ- zMysPQ_bL0Pg1tW?&^q&LWS$QiP8=<-&DF1FjNo>1FaSumhP>i-U%)%yXGVpKcHoG* z1~&Zp&T3Y5L1vzrj|d+K%X|RChP@JfXQ24E=lIkztLyzFwK*v0($x!v;eSR4%uVjo zd``Q)lW|~kpyKMT>$+5ZWrhiK+-fb8+5ch#+Y?y`pahu3#O20uxn_u}TTGa1TKS>8 zv`v3Cj#Qp7bNXm9o!dFWpMn~$?o7e{ZgeBr8_Ez`Q-ufPml;JW3-Zw_0AfQSV>9y- z{MSNH?88^I479q7jggLAT7a-SZJ?}Mb-P{BPC+>w)ZOsvJ+fw^dJbq2qYWm~1IhY& zm7aOo^&lG}mlmbPQZV3vT)33lmkMaVlU(<1KKZ8gR5>D26J#qWegW?XxF<%&r3uol zI-M6PvHjaeMDb+tICJKR=jCG+00l#pwkfs6o}#J?8ZvP9&^cB5IWQAlP-;e!{Q;0< zWWAEw(f2zzPPPilg)!>tBQWhG5#Jnc277Z2L1G&D|t#H;lKoZBhpm*X090U0^J(_l@0vQq-Z| z573Pj(bu|F&AohUv#32{fzPB@cZmfnuszD$Gq1x~Ciy+(RMpd-UxFBVWZK)UGyjkX zP@QX|PZ-3r5=j5_gzV3~?!y7Io!5}juD#Dt{5eg)0ylO&!otrQWiD&tn71-+XGfm$ zExrreY+lz!`nFudH9)OW+CG*Av-zooF(Tg<@7LaI;!ECoht;^Uy6TJJb@!PN(HZ=7 zi7}fgGS+-!29s1YQhBs|XRYk}qp5n^FqH{~*T3Ow#$x&JZ8jB4QJ#7=Xw6^*I!1P! z&76kX7(tIQ{@I`y+ndZ&<~SEsEbs}GlNUzcY7p}`am{D-WfrE%;Y?zcglel>ODakoUS$EKnuE3+&#SK$4 z^wbpgO_GxeHOvGJx_B?`QvagttCH>@S7Tn(SUru@Xm?4D-qESq@?BCn*|kI}RrF#; zfBQ#2!0c=hI8M@SE%#p#@g&vMH3E8k3T^u*Lw4atOL*J8Po-2Oi@KdosJgBg!CUX+ z235}xKwDzJ%+D723MYk-;*^hh!#dj1=7w%kjj}CQ;rF4#oUqY}Mpc-Dg>YIC;N{fZ z6#D5gw{^x$NJu_zU5lL=FRq@WbF!0pYvAp@75;oJ6^+I@B;bulNaaE#;w>FTkp~&& z&JO~7afN^aEwm28N{UFBu9``x?eHa0Blooak}uW`Basw#oM=XCr?^bh?P>`|3O}tiweGi~0(?#m*p!`LgyF$xev1TE z@Y$gK8wQ_mCe!$9SeOu1yNNEyZJX;g13iFKQc9LaVO&P|Xg|-IUtPYEKqBWL$m+vJ zrcCc|CS)4rxk9W4(Ny?xUVpN*Ct|~J1awh=-`!fHmgn1X?75S=yv&Lh`z&$di$h;+ zrFY)4gpZ$T@3cR6@p2(wO08a+ziG#^m!$equdfCRIxdABr6}VU5vw39!%JQtj2W+X zZ^(nK4Dc~HlwE4VaNl_xP`9eALNPDQE;8xm5a=h^6oA^)z2ToOAB6Tq)DWv3!VKiv z3%}ch7+V2?GZ(_{4IgUG@3; z&RNX%F{<$#xf#WDk@Sn%@r8XXwQRKlusWj8o_2%VSfLKEnX~EizBG$s=N-xjr8P6x zczf#f0B9(N74^G4tIxpzlS=jfQOGW9nn@pY?)@Hqo%}Uk+*s^J%baL&t`lKkJ>NPh z*JWy$pJunVI_q0E8>FQvrA+@5Yar zpN?k<8^r56JHEKqTMcrWLrMb9S9$yYm zw8`}12nl_&H8|9E{nt%1=^OF{>*rUFTe=(`^2v9zd0x^bfP)C6w9PmBRRLpgO1nqw zRg!P=Abtqql=08wwl59Wc%Vc88xt7*o0+8V2Pca^aO;|vWW=0ZNpb8vs5mj?QkXb- zYutx@3~Oj~k-aBINRK>06_Ow}?;O42{^E-^{VG)C6?;P}iNV ztq7Kw#oG*wIW7hO9sBQgo^K>KOi|mrx$P^>{hIXv`w0aJVEpr!Uvnb2HpXg@4=dZ> zDc4x>vN8AqT7ARiJ=Hb!!P%r@COu{)(Bp=7vH*2htI?ZMgzb#oRORm`DfMm*bLor4fc7m;aS*rW8qEXbV&j;V})1pfH3pma(MEgIPrb4nY(st?HDl7D% z)DprcYRzG|>nWHLSY_Ov*p{wmslVTT*i-qA8uE#)Mid8DNKqxc{r=s>KCoqIu<#A= zVYzxQyf6;=dDL99=%i=TfL+kDoEF!m$!a2y2B=k%7TBKT%-+JJoW!rTQOh$r>cbU_ z2?A`It~#MwwR2-!@o^XIb#sfW^5jE#m@A%_*@O+$f3SEuIX~}Xy2HX`S^fOgsWOMJ zB2#KUbL8~`MZThmM9{`9q)Zn;Fa*FL9o5hKk0WGByZo_o^^?P6Hdvm$XvD2XZJ=|L ze}rAF@xsHSPCl#7C_}5rBIZiAFPZpC1733RUGMTBgu7-Pd8EwYWVvHO^bi#sFcr{d zML7NeU^b8R8lyY;38bcC9Y|RUm34_$+^)|^HD*|r$vPO0e{j^BA=Z$1t^ zP9Q$k*~%(lLe$A>KrlPSSvva$zYLQl@J^TjE*mg^HUu4>{{IV4uT7D(7qGJ^px{ zO}*YbT@_T|C3nI+fx=T~@~8ns1;dJT5zspkyuYb`F!m)w{(#D)B7M!5YKJ~kFA%*K zR*PLNI)FF@`g?L?l26RY*#i8|#RSm`)XHG~R%mW}%!gx4yhIl`3;_|P> zkM{H7on8Z9z5Se?U%$#=-y98;N5GJG7Tu*g@u8>4x6ZH3G+~2#3-|!G_DGwI>{2pa zwtoqoA$4w1ztKmFc^TLA^eDaY8Bu%yyU96a1#@Qk87+-{xbEW#oK zrt&%J-M%`9os)Jed9ZpoL5QK7Fk6J26CF1uhG)~#%RW~b*Y0nP-SHxXqF76Hh3Dl~ z>W`7f>6{n8)bFt~CwTwgFdQHgxTOtMN7$_J-c?*E zQ;bxJ5=ertq07oDZlN@914vV z%?+vhnqIseu68EQac?#UMu;-VjnX9Y=JX(toS7hIXNUUY9W66ITc#FhC4CQThM^At zT{tPjAZoE4#qMe^E1zeJA5x@Nn^|}xQ0;(#gp|tY1LbA%B1k%yAO%s9(7n`SUZv@Fc_qErI& z97FA<^kq?aX$(<@0-HDup`CH99~|}skUA*sU{`ERqW08A`z&@+`g2l7=k%W#+G{k* z9B!Edog+?#T01q{7vLC<*u6sBkC za3G)8LLV6weAcXER?16ui+Ivvn8Pz=AXQBLvrNS`)Z{M0Ss#`NSOnXZ3gO4W@r=)l z0k!>JdGlG{^gPP`kHYg7#!oa;iAGYxxe+Z~(uDb%7M~#coOSmLK!BT)khTB1B7r!$ z6`ZrPNDyFOQy%Sep&;ywS}p=c0&=0OR|jdaw)awwd)L;v)zhfp$yz)4M6pU)NrTX)4nS~~Xus{01Qi_VO?Lg+ z?wvVSc7YvUGU;IY?Xthu|4JzTrluqS^8&!7#xRx(S5gz^BTY5i^Cg+)4V`KSyIN1Y zR1pV#Q0?JCezKHy;X{E-53Uw?G7T>hWgGL0oL(nXcXB~1fS749K^i}B?3*285ak%r zz_4!GF6x@Ay`xg%q}|r`0v|^gnUR0UR@%k3x*ux#*WS?RW!p3r^tre|w@zIh!THVb zyMY3?BU7gr%2TIvriBq?i_w0`)PMj4s$u)*5fP7S3zrwdQWZNDGHW|nk4oymEJyOt zlrxsWLOfkSwpK&>m}k_A7tkP}Ur8pd3H~abl!G?V{A>X0JEDn1_nae#-1W9(;qwbwMN>%fu9c)mM~P*xW_%5KYhBSRzi@A5)d#>onXM>CcD}Y9 zZ>RUvlKoHZ+jo=%k~jyY8S3XfXYUz|v8hui4_*-|U6-Sc&+M&#g1lDt0(G+^uIm8a zgfTD(!pi61S>sZgZ;C7OTB>X}K)2Ja9^qc=>`X_0rf)wsfvM1Oad|kPZtcVoU;o=O z&C8N*<{FLYJw0=^a-z?o2=JwS<#~KdV0#I#T)!`X!s|vj2gyQta5lg}N6V^KqkYy9 ztN`RLAd}yn9J;6cPy_JNh#$%by*N#rA!MD%aYLt#qrhwPMOk8?ur6F`+U@AvLs&@zv2y$>L+J$XrVp_Vys z5z)#EKxnF`WXf!GY-^p(su}$kW)QG%hK@^Y^4LPZM@(RdD72KiKVG?`8c)-n>g(RT z&4K|XZ``j_s$%UK%jWl;^Ohe~UKKTnjwD|*jQ;qn0tx&E>wCgjmxvzW)LdY(JWeW7 zaMXXEK$-rxu%^MW{WGETQfy8SlUsx}E^X$?&f)^PL(2_B7hn{-mEWR=L^0`KR>Pt! z{d~6#-^x%I>4$x)M_UayA3433IUF)aM`~*gZ31^C=?geq{ws@2O&<5N4iWg5$r9Sj z(G(7D=b;-X=@h2VRXus3k|T2v<;ZQnt-kmLxGVs@iZR%hz7#r`hiY_b$_Vxr5N2<1 zhPDfHXJAmYd5BmV-fFPSZBd~CVpO#=pcjp$&98sq-;UuM3?8!8}&`!@-GkI9wm0ep` zl%w|5#}1_X6eKLFoo)`Y?#U(x$Wyl69}~I~BuR01JbmZT6mXQI>OFy%M;mhZq+FU1 za9Zj+dDzfxb37R!iN!{-`5H5Ceqm0{9o}TvPcd}}#;e!NaG5Uu2mGgyV5-TNR-@^)327qUa+#&0gi}-|Uflc^s^(%VWri9Xs z)#L+PsDMcBm;_ozNt;=*33QWXH=WblV~Q0$~}k3hylDb3WAJITPS z7f`)Y6{#nxB!OvLkARluNySaT1rack>IlEa41h)Yuoiyul61(6(`yBecOb8&`IwiT zd#Hc;l+daM0K78f4ti;K`0^!hYWMcVKdrGc8jvi+K(R{5Z+pTy=mI1+W{aJ%n^X69 zisV|wdISZG561Dzji-cIRP6s~_}L}$^ZiZ5U;?70*8*6wJ-vVbzQ9PgSy+2n?W${n zv*Z0-9T~%u8(!eL?`HRCtQC%~Y2rkIA_Cbu5BTr?puU(->rdWYc9t|LlBpdN(!}s} z0I=>w{Zs`^Z2&if0;$XQ#UTXi>zI3&eL-6&vxdRyE#n?|J97fXJ@l|zZSL|hI)$I_ z_74ZD3GuzM==P1x@Zu}*`=p{+h9&HT;vuNE{Nc;gP>UU<&)ffm;{;tkDd}ve@M7@( zZa0+qZ%FEnNKqsA!9|n~IRVUULSaY2%HYLD?~~GpKA*ynw<}9`&wQb6X}X4=oE+CP zLmmUF4XomDsnRLK>Totp0k=QG!*}q%W%V8REH6^3bZ)IxbgZ=fr zf5TxfRzD^a+e}j~rLEZjyAfUy#lEiWN@DbAP=vM114^{SNirPs_cB_{y*coOp!0Fwvap3Q<73q|= zSn4Ll>r5O{M~g zTdM7gkJFSN_TWY(Nx*q7;^DI@lT+Xnmy+e(;y$l(r)!cPzlpg^)Q~NmD0s*f)Z%)!z<1L%utt=%gh=x0G08^soN{-z0hmtr<6fEk25uX3^ogMtl!76+KLLgrU2%Sz`% z#N)!EPI=f(0J8C$psHY;`ytV|afd%?emsO-^!-9s@Y!jeT}~y2MpuQIF_$N;=DfUI zO2^MSj+_55E*Z{}@~!x~@Kk;QsGKCwe-{)_?QfQC`fn|rkGhJ9nkW14NI~r113MB6d4#1-j7O}myGRAtLzu7l_16j zYB3kSVC%OZBB^=x8O#TzfVu5ucbEP8Jq5Dg{w(*^>-#|=m>IBw%8#*-5r$Dj;M+?9 z=9`qs6*-^+nDa>(0m*#Txo0@VbHTTWLCNiYR_OL#dtwJ0#1i1`+ZE8g{f09x%7~5# zaeE-4q>$YDy)=BcG}G9p14ghdngHVf@WCE+93ClJ+maUHGf#LV_C4vK@{k^2WFSLa zU2Vrc9?vQ*J%JbSD^*wy^TD64`MwtS6386+4rm#MDHGEI&lZt*FnYu3@;beTfk-0m z`4xiwEtla5XE-e30Yao~L;9-2{Y;1l1_zR?xK|EMsCe-~ z1wX^<0jkE;A_wr~pu-IxHx=d5(z#&8yCeRg|Nh}+kxF~tSEO^8jOYk*7)N1>w+dje zU=#HL0qzy6hq4}-hWjdn`tkO|mG9-O$+LDqRVQytV_{H1kB81C( znWi;&IDh*RE#ei~DfBp_8}_ER>rRnb;{+SM%I4>eKYmvsoXL2>D=UBQDs!O^eqDj` zOTUW!l4tIuajv0LFkvUq{p@~+`WtOnIQpE)ah!;HLf8V>e3fT`-+hb6~V6 zZ9tQlkobt3NkL=6Tlu*N<1JiHec9^C8LzAun;h>KGi%`=vOSSDxN8x;8#hOw5HMk( zCwCK|!2*b@=p`D@5z@Se+H~f~1H*J+90_=u!+in9&oS7p<>gUgm};En+276r)V!@Z zA`lfitA(4}-v(+6(y~=Gt348R{fzz$M-D)W_1>1cVcb5QpxPYdSK$F>0{5WEtgl3xVvzTr)2X-a%)n1l%;Q&bd?mAAXd*SXM z9Ak5bz}}KRuK)RRKf6G(q`m|FJtqH z>nF6=zaRRLayJyx7He}73!O#;%w>sSMhQB-{uo?lgEvXef}wSkm~JfMm2f=##JEGv z(;i9rN6w#xjCxr`=(c?`Dx6GN4NSijTBJP$3HJ{q|vV>0a?CGiQOoN4@`CdSP3)`ZY5gU5<6qJ!ujl0 zJ1671%fL_r{By@g>oCM^FeCvKT_J^?`+#LBoB-6ql5^J>H*TNHD?ANdIxFQ~@d$f0 zBt->G?4dyArwX!gCDXd86E;n0^D9k8_xYk!@cB@ z*BpQ%0nU!;tE0cULMC3s#A#ghm$o`Lo-)5ji*F{_MNiS!{84aC4nS`SD{ofAZ|+-v2M+YS`XfbS;qM># zrDsnWzaqtPjRd_^{R&KFe78-}E7EO__LAQxXo`Ue9!fDlc&x2Q?Y_|jGm84afBv&s zo{^C`6BrHA+go`+hK9T!vLQ0PKn!>TSS((&TG*Mvq<3x=Qanb3AFM91TuukQGqZ#* zkd7V-unG*X!?1^j`v@F(#Z5E``QtTPcuJa%PxKi1!R8;hK=WuTg+cWJ| zxO_F-ZDLhL0+Zt!5m|%3ZX|gxaDw@M#A8im@bw~zG zbk}8)P2k^}K#iG$dAauNfFr7K%W&@5ca<3~u#aGTkn@J9_UxBez_Zp5hLsq-@0W;i zHf5s+2Sgp1@0&;@LvM46Ah=IurSU6uDFaJRv?$HI(GthBU$ho?|n@m%-aDI z--nFmA!o1L(gFh@e!=VV$K0;vgUKM^-7TFi|H@m^ZNb~|q^~*rw~W;$kosUNM4d#s z;6i?D&WG=61+XHQ zo15ASWS3=LRIi+2!WsvZQ!?k8zAu(oPRYoc0F@26E7b*OuYJhO?lA}Bhyx%}R?v54 zW%P#&K@B7zyRuD~nWg~=p4~{4#f``SgBHAXNI0wY5w!on-F6j&W0)`@q~a?We(}o&&%ydQLSfzUKQ; zg`0MLV6+X)@U4ruONllBYi73+(jI9A(+&r2QpT$nGJy^1@#_=cW?k~ZAmIw~7&O$Y zq*Duqp=_O9ix9l#jnbaEeG9MTnJi*QD!uH1sz=LG7~Fdn7=C6S#mPL~r9wuCa{iDn z^w0#va>aeV?@x679zk|hkwq79^fdUUC=zDabRBe&6O$tcXdIIwLU~v30E02GV)qXe zK7VoN5!Y?rM}S%zM(;p|Ly*MMpV1w838@{R8yP4Zy03!Uhz7*KRg6(`lHWJn9yG%F zI3%yM06T{iPaJ^cB9EQMemaJY^f5%t)j&n;h_ zJOu7XU9e&4ZJrW+>Ur-yigFd;Y~d9LmFa|2zpASWB9QMzzUVTC>iICqy+%?Jq(leN+w8plKz z4(kk5_*;jx2d#rT<*`zznsImAO0OH3>juN0Z`u^8wp87a3xwkmzqL`SctG5A72~TFa|8CI>w~z%KaT8#CGyZeP(!{S<7^19g0op7)PH%KZ=cwuW#y2;%A_q5n4ukHFK1GAxk%KMo zF^3i=OVnSp0839wXBc^_A@A%8f3b)Y6Y%!58}#;r*Xq0cn4k*+^V;0^gbl^t_Se{~ z>p~6hs$-7`KWrV$(b}(6Y-tI227o52!2@?et?|)%s(?kkrUnOJe=ml$T=x`3)2F4R zbw#EOR_SE1)*~kYfPdR@2VAo}B>yr;xw=|^aAonfNbSyMD2mUELM3Os`NL~k&WV7> z0%{lrMi6mGFVaM`#YFzCkBW!&J99hTj5-%G&`yY&k?tqhbS0%5>`_OL9fc71=-N3D zd-kLuVWjPx>af?=;!f?xG;gByGhiP|Vf?t2vjgFBV1^i|N}*GWd^gmN{oYS?3~L#x~#9J-hw#tXe_X3zP2TG+c&!(e<1X}NcXGSed^ zeb8o5NQ}HPFtgAo%Y6&5&KF5rHFfJDi zp2CN}pQ-mm{K6do0uUg0PRVOzN7;a7&G#zE;1kCei-X1WL-3d9vz(a#ZGkfLii4&q zUcM`qmhqjFf}{igb+T*J7O?xlkqg;b%3%nbBLrZ^g<9seR(3XZE=-*k=6kPObvN*P zO6b2Z?^BXsy1rIATu%BDEfgx%W>D#gzk^~E;a~KaUiF&CL$U+y*~+15>@>zNB(iZW zq-$b_L1L@9)ohmu32#hJ#6e%Iq{c zrnr@SX5p!wU^3sRWQ=q`ovuJig}%3j+sb%#YJ*xYJNh!`I#g7_0WjYn3kb<51G8cc znA?MZ8$qBh4S?+o%HdNi7f|X{1fO1uX$y7tZy;68i92}8eQI!}JS$>OUxPeS+kwoK z-QgPw!3VvB<#Ga~uYo}^v~a`vFK|0#vPW#aQ7TE!hG6n1f16q20Z5RB0D_yD|J$7{ zI>G`$l7*pKIHhqkZQ}&ySST0aNh{f34;_u6m7TM%)O+{402qKa($_vvR zADga$1{KoF!N~)e9)DOAp9M9Y5)Ql7c7P#^)D>MC%eaN0I7DYzjX(`W@IBysUQ#jO z8>)>}MYfBmxmdn=)+`66`SigMd~pCfZy#{qKGm%on*9Wp=QH)7!K2M;GrBU_Uj=(Z z?0Y?&=HbikY=X7T5>T}=DE08>#-pJJc4s%VZK??o-IgBJwBFzAC=vrdp)c1GCgjWy3_@SO(?R zpR>=PZI!yWvvzsV%3sy61TaB7A-u2IFM1L`qYUkUT05d1z~h1eX3?M*3l^~mMx#@y zK|6j9BnL29ZKezo_5>Yly6!{`uaK4(U;NJj3ai0XqhF2Ik3Zl}>{}!P6ARjoNF4l9 zb|OO*+uKw3i_|Cv*mOk;!^R)oxRGFJplk~W>-cXsTHE%e4Okl=V%lo_soi|eeYacw z$bQGyr95zF
Wv;ivPx!_fQ%@FnF$UaDohk1JkB9L{{C`lL3w?LG(9M5fWdfoLo| z$F>vMBMF9eAV1h%!&jhH49;|4(G13^%0v;@P=q^3~NOHO{l)%$Ibfx3qNh!b><8pO!bTQJtfy8U;f0iIv3hy64C_VQ+4e?+rTe}>^XEX+d9jA~-ot^-8@@^EBsb=T)_ zmoF7^`9`6AEDe&WC6Y6?7Qk%X@M{(442DS9Cw<4=_JIUOIVjdMhOLE`01FA2x-^2U z?Biy-1s0C7PxClaFh)LRz@ACoGm8boE?4;-1G)})UwnPMh>IeEzq-*f=MMoolIrlS zV@!2($si4Mvs`8{I``GO96$9gha31YEfCfzt~9NA@i9*ik=+#)D%1neC%FF51e1!y zwTFf@QUW*cK4|pqWiI+&4L|=38p-SLT{X^v#)eTTMWRHUu3A?vQ`8!1$B|QXtOB8& zFH_+*|3`3&TGpdbLZh>J_BU58ayNZ>Ut;{W6Tm-qZMSHh5FZ=sWi?p&%XD{l>Odtb z(kMzQNPVfo^r^;B*S4fMSzO7WH!%~~L@9QwcJ*=0_wp; zKtz=za34TTHJE*0ik(ouLfYK;c8@X6jGSJ|2r0}H$dcqtUDosoqGdMaN%R_=LBO?6F8%4YN=gW&zyA5-WmL*^wf5m^ zie&f>GQV<+)1?F6;A^gH`v=u;Mxt9Sio>(^d({)l_LTh=_;Uzbz&h4Oso2_rO!VX? z+Hx9pjZ4h{Z#~f;4)*%37;-Pa4WD1-=3LY_jvZ-87oZAl@BzShIt&m6=yQ`CZ3kIyk;Zkjc~nlL_{qCVW8~$#N6rfA z{M^D#7%W;v5#a?X0M|=z%oaqmdf@E~JNhgsyW7#+FmJ~Wcao<1u(8R^ADNFoUfBA= zDC9vZRAT3P$$F7JU7*k+HSSv}dHmhU@bT?2HVMuD!_`?pMcu7!A4Ne)6{JIuknWUF z8j(=COLFM00Yw3k6p#)PDTyJaLy+zqx_f}3hK_H~dEe(f=R2&$S}vcp9^pTG@Be*Y z_jUcY9?YH3Af?W?8*L11n@u3}HT3B#R0O{@9BoFr3Wop8K@PCxvd-dg)w`NTHu_(g z1>KE$K!M@{54YCu)h|x)AcOm9uC5}=eU73Li~PVhL*w7-mrer;5y`naEnMaZ17e|O z%MontUJYBf>g#21I3_RS<_q1ADq2Dif4RVYGN|cDzRR+E6fhqfK6l+%Dii8U1E0PI z(0f6QUT*;#meN%8vT>3N@|Uw_y)6HHWY;f?I}e=3l-L9fW?pIJK=D8rY-G)GkM4I= z!N_>+tQorAETXFfPzZ5=o>_Vo-{{NQqntxLt{@TriZf3cRC5tZ-1_rNstg5Ly%bni- zAbH2IAybL?Lr{>~T#|9p>miCO*J zP17Lg{)*D#i!F2&fx%fDgg-%q3LT~bep>(;08tMWc{WO}$;Nq!Jb8l0I$P+NMu7K? z4+uaK)XnzB35#ETC^%|Ru8EhTQ%V-C07k}Qayq;gb*7f}H8aIGrZjZ{3;#2@)4`#{ z!NnPMoNs8)h75qVW*hu{T+o8Z=1h4j;arnL=SAu|z|rL7O8BIqr@l&%4D+sk<)Ux} zeVTr{!ljHGJcgqG*pN=UQ)gDPW0C@W8(Rcu`?6J%Xp-{wHP|?%Uu|xBC051Iax17Tc(vaRcUB zz&1{`83-`0N}cKMzwtS0g_EJJVV31KJ4bHf`(GibF$f8ZEpcm;oC}~|$rtqE=KA?x z-A=f9a#4>C(O;fthWQe-Rj8#uA6HhT{arE}1P*hXzmmTj<3Rt5(mieUe{CmOZ{JoT zYu;ZDPS4%G9`?sG-L6?)MUSs9p%;h?IO2mZ+@~;lB}l^MjJ;2pQUY)BZDh6Xla@H} z*6;{vJveJ-+LW&<8U$bpPZ5yy21V`p97hj2dboOUaWYf-7Dd)?j|mpseC1SjNhU7( za4Jek2T`V7r-m1UpyiJ^hR((PwZ1wqEC}J)^2_`tl7BuPp~Q|7c*EZJYP^>ij)Ru0 zUe8`R8cIr7s;QkfS`|}z*%Q& zxaENqgT@hje~$s2yCD0hr`^{(m;lrLxh~=S@p}JH0lxsx8ubO*m*Si7ns2MFu;$R- zl(yJQplR6pLQ^E)pEgh(7$sFD8b{dBBdH^T4KSqE$~MJ#`SGQ&H`qgu{2n}8hcsY* zM2ly0OL|w97x!oP0w|Bx8**2jo-VBXA}4=pil1QgDV1f-t09WZl(I{WD2#3=I}ZstocJr#$J>Y1t)fN<{7HwejA zo9bL1%%~0>qCw}6fu|OS08b4h@7JstHqh2)Kt!es%JsK zbd^Cye|LSRa%L5nl+#G5$V^yP3IOfqG0AOPf46CztE-RNh^fgWn)~3BQ9dkVf2={X zLqB&oyo5Te3nru#bp&##^WYD+T`Hrw?emMnfsf-jmP*6x<9#EZj=F0u6N7C)JVdQR zNH+7MjB+(O3k6;c|1Q@A*eStXK(Av*lL(}QdXow-@CH5%Zx|KS^t>|J20vsgiH%`g zQeL>u;n|UrLonPY5FFA*p9Rd|e}hBF?&2#AgG`DXTpUcuci0PgwwBv#bUCTnlqD$d zKg>*a35D3R0n8kj1Z~t+v5$-o%>Np|CyLy_&6!Wirom%Oj3LV!C`y4vWHc%g&IJI1 z5H7;sY1$=Q2XE@&6MDkpV3odmI>P32Rd{jB@Px7$>Uo5}@wl~;b0oSV^^-M6KUITM z6qiwh$wWzJm1EuDuvqI#qw7m(%5g$+kz_h4EzOT6=mP8VlR8|Eq|~42dcfZa;ldc< zq`a=UynlV;?K-lA+QS}4{5dHa3DN2w1)V%dO|uDezj#RZ5~NNBK_~)MI_7!=KY_uO zGHN$t;0dr5Z9RhEv(^R(*e#-5;LTU%VDM{s3B>lqWLAroHV=lm`E=IkQ`sL_pa-2cVnqFOGA<9raET zz`F|4YR=DJ8=$2n6jnxM;@$(LHm7d(;4+j z0)TV8N<-~c44Mw+Jk*oPo*I|OWnA8wZ7-z%Ov~-WLcJ2?kG9OK8tnZENs`z33@(I2Q`h1 zw@M-9IA7AlC6;_Ww412`B$Bw7(J^&kE|Koeg+x zzox%tv1wkg4nR|-`HDQ}{e4P?Qy#HFs0a{W1-C0r|@`XGJ!*75|5KfK6Qq-P$m2h zyXOH_8J~5mA~q6;0hZ7nNXwkrL9N1DrK)vUf;~8iUena%N!D|LvDJ+)oHa-vPwXfT zc+{c$Nzn5Q?I_#9h&zj5#zz6em2~}!dH@Iijam@za~!<*0Oxm~;w zeP`P%>oA6^rXDLB+ZbI16M$neZt^effsm7Sb2zH0Cm;|tS`q65eb%R4kd7lzBA~%R zf>+z1#TFl{M0M>~FxBrAR$ARD&PcPNnn?z)VO5wwz?^BJv%ntGj8di6qqq>L+<$lv zA2hYa#>SB&XWeBA(1%9O2Wh}g{OPh$W6=b4`ipdJzSqME)YsPZjz}#*a$5N70M-8} z!`_Dm{#!5j=^BJrMA3KqiPpsyv(r5zZtjf|gJ(Cc+Wlz631L)cK)wHXWy>Kc`Zatv z@+hHG`W~3G1EX!klrZBCd&Ti}Qnllx8#r1iE1fH&H`+vf!JM-7?@zvYn0mX=3tLwFQhR$-*?QYY4TGMMrnAgb0{A7+It~0o^ zW<-h^Ar|4isc&$$R zX#>jI^JSPW8g)V52ftE@IqGB{8fwpoO7?j5b14Sl@BFfJZkQ<7r2BB| zd<3d}&QO2w7FfE#1O*=DJAxPX(0D_d&5&y%?ZHD^mYK$^0f(9P>9V3i$M6=b{d$Cz z>Nr)a$TmQ5Dt$;V?)NP!b_9M*^hPcn9j8VEFVGVJ&l?v6{2Mqn+HVm7Tba^bcll`` zBZNdh6A*p*CPts5FmTyOpp6c4=1EJp8lrYt_gx)>g9?FmQGk+3P+Mb(M1| zT&p8nAR=8npQ(ULpp(%;6g@f?V2l3C>-36@=(3|lF%`gY9eUjZJ{HOCl@5R>)|Msgfq_HvCeQUK@geh_Wd!?2d6Z?c7kCurr z;KXh13UPIkfoca>j+TNRo`ryh9pE#_tLhwnduImXawNxD&#Wn|4hD>FvuNRjFvte6 z@ONHTi_ze^H9toKL0^NC>3aUJ8mgmf{ zFc9t|Dh&4o18M!c#f->Q^C)crU5Xx*LoX@7YiQa{oIaC5;-jSl$XAH#D2Vt0&cF}| zgBnaph_Jp!$HV*g*iDVjoS=I3bu3Tp6~ zDomW?0K1-QqbW+h5kQ%z*@;b?-2LMPZ>Ib7L?`uL7L}vy&o9+|N1wNxCj&O>!;jt_ zsTzm3W4-E5_a1=6b(q{t|E|-9mwWmC^=y)~;&6-Nn(X^14WFAt5|daUl=8~z;4K}< z%X9SKci_c?)gti|DUGs6*DzHJvWT`1;l=^GVi;LE5~XPDyJ`lbRE2D1wP? zelvK;U|z@M8j4#o$==8H+`wSIQ-FQYdk*Lg&t|Ys20)ZET$r{7NQ}{1ltv>4c3(5n z2WA$XL@Pnkse1N&lbjO@2M9iWRy{k$J6-CR&iF3;XBQNuEF&-GbBZPGT{E5B0CnY< zjAX9~NIE^!IWkE_`C`=|^>Q|$0?^GCEI*-z3Ooi7Q8kcV6RVH~1lPBQb>1dOode)4 z;sD?SqqBL;x&yx}1U>{X_{7h?BEoS?d|N>l5%_WW;-}}!y?*VVt!zFmE-Fn4{)rYV z{&o)C*H(xGdF(;HE-90(_hE<@0D@+P2of@)DUj0O_LuI{DKK}X1)Ld>g6%C@r{Ib+ zp5`@s-;!-0U>?&FScFY8%RjJG8zuk1jfudr2!yj3oop`M(_Qj>T$D&pFj}nd*l!+72kN1BY0GzhSR^w@$CT` zMFS)t077VN?)i7%o<$K7bW3cEeh>LbqhEU=R@;;b-i;%3)3CR=ZFT_X?57pI_A|(} z@Y@eY(sH-RA{%nS`ka!?*H?&Q5lMp7Z>>SneE|(_ zY8>LlkC_8bmhvU;Uz>n^2>`?qxK$mxe$-mzBnjaJU;?NLp0?D-VB?+=mzlVq?!lRg zq&?wL!vNmBY;Fj`MIa<+XM|UI(fiG{;TW0<;0nOc4fwZntAYbm97CY5qB{1JFc`8n zz?dGx5HQ{-C)_$A`EoS}7SS~#xD9Z}&Qao!qZ^<|0>^6mg~;20^JRRhVuo z2oEK!BJru^Wp1lht_XUssS2B}I5`GTbJ@G79W*>%*|!Qds*>;v&MU~x)N1`d!o;7a)Rm^L5a zgp;Xgv$s#5Wp~K2ayR=PLLBuIzvh91I1AobQ6mkY$DjQHr2r7EY=DCX3@#eZrrv?x zfZQhH-Zck+w>LpgC_v159O<#5tB2f+Gj@EQ{}=@GV=ZZlsetYe?2MZ==GmN8fVP_R zufBX;9z?<2>bvb!T-JY3W$nzODJV*Z`~`@;;zHb%Mx%8@ao3o)_^m^iZC>>^T#86N ze%aXNEV18BL?pea+8%(FlMTV59H4E0BCYHF9qM-+9aG0J>r`}YFCr*O5Q42H ziJ&sJLs12^@t@rO3|CPd0hNQpPU}2+V%#+CJ)C`UNjl|JX5dNas#*j9&j9uj(cy~f z#e=~{bOnqu=yZ}|f5BC>YwDpOtXF%nli(L@Qq4`jIfLK04?6k_B45DF;{qSuXK56VVG zyyafhxok`;hKzv$K5&y{{g8t!vh7k7EhvCK8cfkFviaAC3mGyVyR+L1N_I&27Rn6J zu;^9=5hiRP6*%nwOh1`z7iC)T&-9 z?m2kHYuA!1@WgGPFdZi^emEJ_PZ!twG38M%wFaaN zG)SX~yqB1oq;S6J1m|DDw%Kuev0#8nJReuWMuD=+fOmTn+(W>LWTT@WJ@t*Mx`U0i z5#V5)X`x#jB&aJ=g_^>yJYpW8sB+Z?vvUejAAq=Dtd!W*-ELYm)B2{Hc$g~_QfIjB z1Z;?a-m;8=$#&>N1i-_`icYI)K*x)-JWcxTLOi-6rB%3Kj}AWS(mjW z%Pl$S!&bCM=MCQh6HOZHIO6Zrd3z^v;Eel`Iv9RGrd~_&wbB|FI!+kBSkbfpiNZJb z-IPgkP64*LEVDkLe)PZtFc>NUaDtX>bx;_96R5gm^q&510i#a9Q%1$YBWq{Y0B;q* ztwZpcz1p~=FM-Y}cUT09SF{n4rnO}+W@WSn6|PidbLZ|v9N?n@Fd*RE!oVyDeQ5!m zIfETD@O7afpG|*i{{*3rs=>*NNO$a%pGOn-`d){Q6~+m3q{g91e2>lP!XGvQu4OQg z!O<`uK(NsPxnK+0)vi@QF_7Hthr@Pt?enj$dMwGnIQn?9@=T+xWN{LZ6DVc}(7eDY zn>j2)>ZH3D3i009=IZ+n4OQUQjGyowU2WUC~|={^MyWO`KHPDdb!PXUY>Pda2h zUMR1)!MF2ddo^|qK@j71(L$xPBiHH$4K~SMVXQ3swuw=>zrEg;iU7f@XLRlakH zVX)a;SusV1kP24hgJ_sDsi7)uksW{fden(k@jts3y`5)X{@)F0FJj^z*fcybRXTZw zMWFjEnMv_039cqaYw6ojp=KsPV%rEBI*p>hg@wT;s8xUz%_<-|!*-FeDfbmoVsf}- zBJpcAl&5pcYUI^o;t+3qqoeN^TU4(2nW(7Lhv1=wS8q5~_c~5;Ym0jPxP1zog1N%8 zUAGm?i;x=vb5>eX71M3)MW2da^Mk##`*tC^pg1LMvvM!)Yj`#?ylOpLo&A%0klr&b zO7y_hsobnsOM1qjOFGY?>X>{_MyK!S9pn}}kcNyd`oT>OrZq1b3jhf?2kP|ie7AU+9OA6 z5WN8RvlnwS2YAM4+?6?ZLq8s9IDwq^3%F{yZG|2IS|8dO32eY4`L3CY=)tp~h_3cP z_h4l?Zv@;zZW?WRC7`1CqQ2fAf+m(2IXM{e0mtYJ7&WJ)7gMgQ&zyqSEI}xkz3&QeAZ@)p$^gT@dS-3&X<*OT(&>Hq=(ie{@ZLy}Ah@s?@e!8=A7mZyR( z))bci9*7HXZl$-oh-778?DnMt9}fv^1ONryftgt z%w(1Sxd#&!nAX&x|NC-%@A(-AUkmg?JwZO4?<#fb9_QR895~c6?ZHJGc%)BYCq9yU zv>a%@I2h6plT&P?x1}Z6mRyx5N4`ZRzHfh`u6*hq2p4+qLA7nYf8UVEI>`d2%l^ah zK&+3quV5u>-sX!JI@n4V9~#{*Nbr5ydG`X08gFOrp#lvl#Y9jWHu@rk>F~VEWH=r{Gg;6v!df> zq-)`mwOW2Er7>kU-kx$;^c^DwkYl|;U96LYxTAZSdhV+80+@@z&-!|Risd-$d*&fk zu?CH&x=S1inKCxeqM$RsR}7*CpnUT8HeLr;#h~==eW`KYeRnA6$^<6)0OLE7RIAlT z(PmyFS^Uz-jvn?<|1n4-JttiQ^Kx862Y`#@g&udGCE&r)_g#1ZcaFQ>>uSynTbhdV zec)9DI7t}xPc(qZ)49ll6ut#4i)fYasI|_c{R3_e761q5IgF#D zn82B7D8Z;nt1qq(D&&SKE-_F^Xm}b6%A&sk4iprG03)y1QIPCbSgWf^B5fhJ6jyms3#8;43%cjN~Mi6qotvw-|7OKauNA%bNUUV~|2uFfYILvi1H)neivBK-5h zxx(QWH*-1zzYiuq6B42nFl_4E5SQri>Ky{m)kaj=NoZ!?%z1SV&zzYTSRs}nYdX8W zVnz3>^u#25R~ikyHmV7!YJIeV>5cEd)oz;PTadm>05I!%IQ6wa$+&&VU42$xTEWSf zBraV-|6CS;2;IHs6L79YGa|&OH=%%bP*fl(}(ESl7?~f9ld6 z!>_o1_T5~ZWofkzh|h(9@FNJ?|8n5F!PHU&puFY9A&j>J@4a7WzfQa%`Eo?e26mRW zYyH>Em+{j+1sIGE$jusrb0o1%&n`}iKpX|>>s4UsMo>w2EE-TsYuOS@!Y9MX=!0W#PZ+tGiHKR<$BSs6G`Qd1NH>L@$v`5TV3 z)PEUTK-XF6JQ4j4N`PRSqk=Wv9r6ksbAA8q@G@H`iS5K^UpzgTI19|_U_0|TY_Q6) zAAi$*jKzxpQvvv>rco6cLHH3Qo>ErzmBp4Q(LfYF{0wn+(ngfsZV$#SmGsZss%P;@ zq`ok0f}3FCr*v2U8Ex!VJ8rsVxqksX9(!?h_pSh6#-^4$B*3og=rMl=M&p~AQZo(Y z3Al_14L91oJS^~Wa%@b^deCmw7UgK1Nz4`GVEjdWcrHd1y7HA8O;!rOkUCt}1inwO z-)n&ee{~x94Sls#1ghDqkX*U%StK}ly5I;l-*Qb~x46>pLeOme|2 z%%-O`>zM~tArvUQiErBGBzZM0dnf-Xr6yW4jsN?lVydiMM$)Z5K9gB2fh4tHv9_+9 z@sN9!+R=V16)tIgEx-51ftZ1ct+1Z6aiwp+R(-G8mglVrg9DUPT$n)ha2=@+G!cpY z2^Gx>5ARCafVC*3YEzrp+%7SxhjvrjZLvn~;WPKi98k?XUh4ne6G&q7K6ftXk=1}1FfgeLNf#(aQFFR!9L^=X0b%6aw99 z6quNnJ>Y%)8sCHVVu5dvqf>#~p$fD-9vvGE^Vii_-ZYTEyInv%0**mB`*e}>{=G<0 znNYL4ip%_E+Kdbmk-IY502R{t&<-0h1%EAE84aWIkWcqNm@0j<1J@)k{d&IK0Qn~R zB~yCcUQ{y^O}d!9GB4;9=0uAW=u`}sVS`%KbrYmwc^<%h9QBLYa&z@@)SCvuT@*?u zJU?9*6>=cUDJtKL4_s|1+Mx-Q9qa{#=>ZMiTa~;4hpU$phRXnDB{Y85&v&4C${(xV zO`O+AQYig*RkfBe)S+pcWH=b$q%@5ij(Mv(xW|h#L-dJ$`#9jEnokDR=Pd%P1h%bO zyQ;9UPNuO@Q{?5|pBeg8qu_Bekb%so_|oGvUUHbtf*Jsf03hG|CHfw%j$gsXj(VaZ zXFE4(-}twWflGa!1U369S-ZWx1*W{m*n?{I>!w5z}QV`7$qAqGQTi7%E zEPnILvS!~?NB&RbUDsFgg=8sG?8IX`R6$Oh#^ndKr^9WsMdDp)^pv|KFK-7gsq8Za z5VRbuBd4Q`#eV>bIMQQj{c`{6eyNU{C4Zn`$fHNZCd9WF2Eb)w;kY$N1l7iBJwz9Y z_vocRx?coRfx9%Uy=nI!X4aaj4mrIo-p(kd&|xs69qRGdlH)!tvrnGPEQ2pCfpp1hyWsdWAFi^OfW0B&=BN8 z1jJe7!kEDjfvLs(Dx$_Uh4h44oItvv2rPAhj-5@Rc~yn)Cphls%f4dCuGEq#T8c5( zfJG_KaVIsLdAG>XYu&KYXM5wobg(7yd4~7lC(9uv=5D#UjYsLi20wwz$lHrw8Umc? zz{6dXVoE}6{tc8i=^YfM7a%uL`}?s*UHuhsssC@Nqc6i=#4!8iM=lib_yM~fMZ}>Q z+IxeZjqkI`$LSAkj5oeU-%fHg;RJFPg<#1)R*yA1T9Gdv0WxtIln-Gu3t zcp+=u@tOyjBkmkq+m!w)ZqChvKnw&Rt9P9{)GC`!vaKl)>5%Te;;`7P7Jh-y^_zdT z1$v_dOaEUpzn~26z2K;tHPk?$nlr%#LGe>hN4Y*jspln(XK>pywAJtQL+P+l$0?M@8N2$*b0Y zxi_e~two+Kr1#|L5F8&6uVvpW-y`23N3;Ij>K3xcZhtd1x}2-l5AXX6ZofoNOsKJB z4KpP4E`K*?YU4?lA`Rk}Pl5=P!^(rm`?V#i&2o}OeNlUqRuNN*Yk#;AR+i7$O#NV0 z|GI-`#nRbXwOllWI!f*^QBV(@+_mBw#FO0ylSjf9W@3#U=DJY%sB4GC%iG9upV?&u z3SVa(*ph}7As?h0_lCR5-r!OlXcs>8ZU3@OjMjxFec&xZsWHHp{R14Idwm%VW~cRk z48wB5>%Zb!GIVTlA&p@|uMj9NhfI&hW46swi#}{17hqh~KcHpyZX)lqC}u^YI8*X1;;*guMrckg@;iEeyiWA3D~PYK`-V{oWn6+M&r7f*WyH zfROw7jJhv-(5QhVv-*64MN-GEb~qT;a))Jh0gKwdhBBV;dvvVJL4B4Zz@-Wd?Pqd3 zKq5oQ*<7Uzyl+ai^2etH$wttu3pxl*wO{zzXsUd91NfS_fzF2aG5nLLk3L0 zZ1Z<&$;pi`uimJu;1?k3LSlqDK+|p_4mlpTKoA7 zL>s~V2k84toEEafN~r$#epqM;QTSLOzrfBc4NWKlB*n+JS_7NYiD_wUscHvjcCL|g zOCO-Gi9vc#Pk>Pq4WyXHx;t?M;#UpatbDynrYN2H;n{T$y1Dpb38kG!mn9e_nrMVG zu`t?13}#Yj>nFHf=QpWF-Hag6H+FDc*u!1i+$l)D*=-8mP+eVU;*O4vt&hN`0qV5TU`Fs1i^UE&*?ey2Q-^q2-`OWsMyBpQ| zq~IRJyVA^(NhzLjIwpxOhntU|C1+2-enikd_BTtT%{)_fg$>0T{~!awbB|sR_Hy=; z#H8%i*|Y2DGj?kv2&TMI9-@LuwtCePxBzWvIw zq%ED7CwcZE1AAIn~EjoHwTwjO1{ZaCa5eL(f?RdPx zJiz{Wq@YJ;re|l?g-F7XZN~^U>)w_7U8Cucy0_#+PRR4y#6oUns_vAL{2q&L z^0eD3HAosb&8%T}b0+!11>#`e#sYT=TREnqmU7Tb{SB6pgaP9@--3odSND|9S0W;u z7jk5t{$DC?=a#BTdarkKT(x$7#yfTr;7{uGwm(W{e{ko|&K{Wv0$gcnl_h|?t*B#sWeT!riin8EXj5ED-`RC%lU$2%;7hyV3e)6z^w|9@ZS zsai!XF{9RgZ->~u?GW!Df`7kcSP5VE+V_Xbv5u)2-^Qo9$11K)r0NE#TwPa%EhlTUy4<2EXW(njzI&wp-1rg*E^f70CQ z2PN)^#a)7;t}oJV(MPUQs>|LiZCzlD>T&c`Y^-4+D`H;yk7{Eg zPi%eb)SClTM;D73%S&=|@IO9k@s7`teabqh>X1()XZysX>0FjLK0Oh>IgKXn!u$R zwHX;djqy7yssr&e3>)qDI$X7*jl%kBLMKvF?qQH|>pRzG(Fz!1T5nKGsn2ukT|`)K z(zd__Ej-Jcj~UWGoEL{)Sd95Y7;^n@3Z7%DjZNO19S#pmhR6Fq6%CmooMpF1 z$X>j6ep_fBbFwkYTG$iW5!SYpQk=26%Ok+Tj={!jR;ncRwJ@=7FTtmSW6p6 z^YGKijqse(1)+CV5KQLx{UMm3Sv|EhI}O^Sr~6Duqd407-Ri(QRfK@{F%Q? z(F`}93VKPtZld%yYd?gIq|Tkomw&`YS!WlTx>bB@r7fsy&avZr*VFYeby+EEj5S6% z1*3~e|5|d(X+J-mxjQMXEZ$8C{fhBo?W8#up7*}Q&}V9~%7j!U+Y!W7qUgCedsiWr z{$_vkZj2I%MI08?y!B~=IRxW=qgq(^D=VJ`eUO&bDo=p=z0c5F}3mPfvx8`?h9%ziAU?JZrCX|L_OiYlovZ&){cXMz35bF|^!{ zO!t%4e!FyuTw`sF-s9VtnL@sEb$!F+e|Bv;G~MJ}NaD%a-yV{!90Tx*FMo{=`?VNL zWjUGr?1C@0tFXV0|NT2#ixqqXq{{_dyDg#dG>Ybip?HJKH$~~oLn4ax#N-{7$;G#- zq$zEd7=o}A2&lHm_%q?wFY_of=rq%=obC;$*f0lm>(gfETQtA>_Bf}S^EpjZo!{3L zXK}d`%sZAbP66V9M-*KVu02yeJl4PI)L{E>EzFhT_q_#m;X+Kw;LsLKI&p?>?!>zD z2|9s6LVC7(fm2^SzgQ#8;3yFuId0E z%jF$libWl-`W4x#a)Nwtis0`I!*4BJa~LxJLMfW5USLqnMZy$8Bdife9C{N;6utA{ z4i#jpJ(Of$;MUW1H@we`|97_k&mUauU7+gW|6bsJ4p)UVHz-Bumr`&j^L*7}@k`bny{O z-0OXk%wk)M%@{S5|Lt_EjLQ20VHW*`_@z%#;a+pGDc|pJ7dfR6qtv5;327AdgLnJ{ zC%?lxpXe@oXoorWdP>xdgkC+uOtBQrmZGbnbJEh_#*w#)+`<*(%C6V(hTCOmbxV$8 zyii9z_+{b8IS`4T;<3K2Z^^8vXJxxZ*&f+VvR!EVP@HM>dn+r~9TQ(^yON_E9epK= zcRM|*S?$I!!(fdh(=PL~ws@&KJqFJiPAp`wq`91nM%J8`tYa(M16Q7V^TH`5i--ee zsG#-r0}>+eXZ)y96y7TWL5%l~uoyVMalcRZYvy&Mpjnm$l(~RWUb}$E`X(cXIJe%8 zQJ$sk+0P%|uOtW=iyHEF(vu(w22YRc5o@{9;QzT}Ff$km6|j$K(J9Z8jwa$0>35aF z6D4MnT{@PX^jcce^xBVf|9t--R!mj)V$>#@Blu;3oXYEG#YqXll14Rmkh2ZsxG~u< zMX=$$8-*#fou!!x#ynqdNKmVIpC|+#?ZN`sRU+hkKAm3BwQ4<|6CZmY)%`cbx!@a) zK}-UBZoLhgq|sa@3OuR`!elW6d}XEsBzbk!u1j7cku=HX`o4i@l5o+LoF6LNaDys_ zCNPzjG`PvAj>?Dd&RB#cs9UV$uzDz}a%=mFu6B!E94|F0Otk_F;VbIcs>?dD6>4NA zEB@d0Lt^lBc!@%7e0Y*`2{f8FKR(FtzCVnx-xOL>lg=gm<*e%OS6%2#`z_3|%(L-hCH01o zzC~0q!f()sNC-Xzjse=Bq?IB3h$ZN+O&zy{WU5t~Do!vPrF1?z=7joPkk@Jt($~G``bv zCVzWk)2li!!eDd3cBsx^FZtEQTzbDe;t)JbKHYk?zLkf&unt<74*S_*_P}4%QmEF5 zzIBq{$P+xKtbRJ8#`9s$itcJI zem2oN0Zsljg-F6L0%_x)Fx@DAc2%psofiMxP2hc^NTqUXn$>a+B&Sq9R_gA@bZAzb zbm7o6+4Ya!mrT7sPp8AqleccIPAcL05QkzZS5|Ii1JSTgcl{Zk;znfFcseqcW4NV| zO!&Ww02zk<(Ni|xf3M7BwJP?`PZ^(rhu0~%r}uW?$CWpu068_ZbaY~ zt}zdnm!il*XENWgl)kmhnr+%cOZAWrf=w(K&IRIgvTPxpTknT|8-ea%v~{eS zgmx)vJI|zR`MU9t_|yG+3R^sE7C-757u_G>M%fV{WkIwCN5{=#_+ULNC}v}3Yt zftQUxUg&-Q%GDS9&|~Aloy+YsX>Pq$!;fuPlzUN#gr^Ga>$^2tKRXucAZyhE*xv2Q zuQ%6AkO_jF1jR8@5AOIG%zZjh5%7W45d_owadOqVx zN#z}3_4A_=EebKa4d?SVDVqCVVG$@YdP1{YJHzMwz0kjM4m_`fUqJ4ly5l6iW^6^D zJ+?2>ia_goVG$g05%a^o`Li@+Vpkw zEz!z)q;eWErvBX>rF${4^@4(-(!+j=yCMf+ZJNS5@ZCjQ>$1I(yi#ESy@P7HFlyBv zye!)d@|ed|zcet|_{gqz?ZxGA8_$+(9s^Hzo3kl|kBja$PTRzZFK%pm z6Sm&?IChxfrAX7;TkO%!J4*kxius9n{zV~&NOTCYCx;2MR@@L)=Sg$gg#;vRdTHOCe$s8`hW#LQGMn47GRu6{ zqcI($CwPC>qQ^DtJIz|9rz4f-NN<5o{=qIRR^Zs!w`b4aCQE+n83fTrD|1aC)O{K* zK0XtXOV9}kWR!z6JtTCL{LmNG7}9a_t(EnM;^$ZS(bO}iP1C9h1cqJ+8R|#@&z>pT zTS6q3pQm=a12s;&gpa+629#{fRRRm09W(M`$9gXoshKSV_LcAL%RAWRtvIW{bo)*_ zZq;)kUL6BocSoO}z3-U{-I$4|(B5XEGs8Wf4NFMGxzScwGJ07^A@T|jz zL5^tsN9vtChHK7km0ObYdhE}uyB`zU__xLtdOq&N#CfVA(Rzch`B_M_Ihh(By7aM#pky+$yEsOfC)9u*&V^j|}k z#SfhkUp+a`j=Oxsb}|J%w$IUBUe21mneNt|w+5BZjsH@EKnYFO-J2=>VDNOJB<&?b z>B9OT7cu4#Me%yKBvCpbYw#U3-iCi}Fjp}kJ6LY1=RKJ!^t#hk?degxYs!}{q4vQj zG?UJ(O9bYr5`)=EbkXx>T704MO!-j;WxD67ks^d^_X>H-?sD4K>s^OpRHHVW`a9N;ZvTbm8D z0G-i-u~4y0-pe^p{GZ7xkQPYU3RHd}>wt+W?;FWIIC!L4b*f!c^KNKi=*S{7B%sa=FUkRozrE_FdnX#Q?#e) z?iKmCQFfA{=6$nXdrS1Xu3pmqe!>T4(r<>R*lcEse`#Hw7JWQLW#fKVS8{9-O9s+T zJ+zDlnf|LIy6w3#aZcH14DmP|1qY4Oz2n%#$JBv|Pd<9$r2K5WT;hZwmrtQ^FDNZP zPs_VIlX!Y|6MAo>*l_;}(C|%epZ-a~R%6S$PjJgN`w`ee3yyU^g_4|cnxT%=GKMD@ zjkWBgtl39cmxj`oVy-2(xu|}%)c{XPZ`{0-ySofe zV0W6d^EUilzjlhQuaYy!LcyV^p@1h#f-!S=?th7B|MuhK`|FxqVn-_2w>OMV(x$bA z&gyM_xV(EWldJ;I`x%c@ZT@vI(=k7EOv{TE^H5c$-1XX3zuf%A_!*xhfST#vZ6o&L z80u#(<4>Qf&@*+uj}SQsOz3j`fq7Nppd}{33W?$01?pep*%r;^*;clLkeXyc4g|w7 zt*~{Pd247n+j5I<2tR1^6c)ZLqq&Z;>kXy7cG=;5v(5#C$z6YosKWLju=3!&%CTaR zIXT?qeXe&gxM+*pg|xraWte)3?k`+~yqOtf^fu;Sioct5+Vwo6i{fH0G8Tw9Em54w zY>0EKX1lfm`Ct2|b1p*&Q$+dlL$JFdU&j7~HuzLv=~X_O*Vlgn;aKwve6pY>%{(XZ zCMv3%()8iOAKPqT&7Qd5cksPPyR=@qiTUnT^vLbLHI!hUe7Y_(^2Snwn%!lB5k?BgA1Oh@}&$~|O?7CpVm((TNgk>tHV@G~K9VeEYrB3uYf z(WZMY`girWGzlg8V|ODQm~n?>OMOr}h}m25AGo6&C0C?j(qrdM7tqL0)F_QUYkVOq zP^^d?Zs*Gvne?PlN|n#r%Ga0weSZAUpGL?r|D&m~*1#&FXlk&d+x79!EnHi~Fe!b$ zkA$vz?Us+|#F9rb+6;9@;>h`xNaBeK+%f%mUsoN;ty1A;uC2)in!c9VV+!1ur_Mcr zuZ67W9?9pcE%Yc^E?#?vSwtWy1ecWtpkD-ZovR^T>_A4Vp&H?_Zb{z~toQq=r?vbs z)CE4QTfL6Dis^uA%zY~RZ8THYh*tJ8H54Gx=ANQ&hKE0|< zO_&7mpWrJ|Xv6tF_t0KE2vg6BrhXgJ_Pv5Ds zEGRo7g*+GgmfcUQ>lIyGYe31SJ)qo%+oln9uSWVN+vhtWV=#H@#G(M2r ziNo#x`pN+?%ua1Iz$#oQJ!9%F!sXou^9o&rakhO!^}#-&GZ-H@7$yb{I^TZsTJX8V zGyA`M*s8ui_p8-0{LlO8fZ|g6%)(M2G$VZ1AT=VYCz+us5~BJ{?^ z+!2UKKN<+ZvMd(Zo;sR>Kx)bw=%STFL}~Z z%hrXrr(-xxroJma?0quy;ox>h#*A^~&q0g?_@h2(>*<+q8&IrRAd>uM<_@>fXDEKd zFs$?z5Hu<$G{?SharC!axV|;O;+~*VNwuZ6`FuW?kCkXQ9Qaj$l@Yx1f+YQ13|xu^ zXn(aVwsT%OT^aPM#?dU8mOghX%*O>khFQlZNd*BJ;{eZ)3cqKMo}_L(NrXJicyZt7 z&dcT;`FZK-_Z0(QcXCsB>`MO_3%TBw02RA9Y2TNQ|3}zYKy{UFZ-Y`Iol1$KC?X{# zEv+Jrl%yan(w)*sh=53!2ugQIN{h5~H%fQIx6jO-JI?&?THma@E3B)TQaq)WsdWB~x|U#aU*ChKtyeFPp9uq&9U<+O>pFa<<7Yyzyq*Fg48X`45dTOqKvUCyL`F1ZJUFFPg^_GzBa0ODECxlO0* zGuusOoFVzTmOyM_jXYE~VGOiioB-vM<$^}-e{!qI!#anjFMl>^+?b~sI#OXEQDtP; zH@MR+jFHv;j)sJd2Hm4=OOc;z!e%@2IZr`OHmc5`NwdB7gdciq7_y>)nyjdl>@qCU zao5B=wf;ciM|t@pubOe+?a@lw^X_Agk2VK)g^6?AtthJtdGS@qK7^E)*j1$B*BngV zLcr{2HcoyyyFt=&gJQn*m9yo>R-vd3FQ*Vc>YcpPl(KRw-2U6yf? z>-We1Mj@ayGzgUM@vmugxd+E|GS}=icyC_Ly^|W9 zxcB`N$xhXAF_@IMDY6$YaZHI>b5q>FsNeqvKy|2Hp7O|dBYt4_Dp!lgYlB8Q7G@kZ z`8uqP-mCZ1Jw;Ge&G5EkvyHBBB!9BM^hERI*2|^(jd}Cdc3i`Xq5R~x=lxJ>&FteY z#9NH1o=k9O>(i;nSOTkAC!6}#wmdG2K z^X~B&GQu9M29lk5=qq0I@CL~RsaI}f3uG9rZ5dgFDkRs&L!Do^>qVK#i)k@p&G&|k z(EGV@w?Fo_SE58PK>5#nn0a3+A{WrmJQmW>%sQkS+Wa#0IVp;HG#Ff1PbVozzb2OpgxfHbviUsnHusw5i z%6xTBr_c}P9-@MmK~E60ps=2tX|ClH_g*fvL%L_S!FxLJ6-lLIrOf8ws_sR%k{yCm`=bkH?;|~q_gSIwrTv~H znj~{RQsDwfmv!ukFPKgwlx%lXK~HUt`2yO3<0)p!#bX^;RC5Qb*sQI!!-T6YD|2K1 z7rAsC)p4G^!o})0?S5I3k~ZP5`6Sz%a~{9z(?)w_;q%b+PdB*+JsJqyTbFw5(mfJi z2zY1T*eT-|LOaW6MsRch>Z#wP*WheFuhNNtI(f#(GF>;`(*i}O{SH;; z#PI3jwaajBH|95Z;b7yLq@DXTCHx>1OGI6awujD(ka8B&JsSg+7x&oK1cW}-WxA`k zrCsaqjgJNc4I(!_e$#Q{5&9dxhq+U}*><2HzM@WliV}oImnMmJRp2g`F}xZ!?tKp} zFO+PGQ(jygbxx3@eeZA;?NZ7+(Q6pIl%|}GlL7eQxPSASEW7#&&?TN;Gr1`)lD7kSJ9<{ z;jWhjDW)w!#7qJiD-`eZ6WbI)VJloe%vV>_z3xRu>9#c`fBf9=0`OATcYM~4!ra@adK>l$i+AE#S zC`>SSXukG}AWw+P0zz*{D}sc*+*nRjw>3!b~3w$zyhQdMckmyVuKk?(KP>Bs#|aR2eh=;O0V zg1^Q2;3d;G>+aE)4Nq_Wa?uQw0<`E}W6tx;3%8ot$KaBWh^MxOhJS3rI9_VsYQF{) zDwoANdiLtw-rz0Encfjr{jj}CWDn-fqWcNB+OepdkTAIG^ApuLqG0Msrnk-5D1yu{ zo0{j^6Ig#-W}`L^N$`)oG)0o*Vq4A@x{3D zZMsGvJ2#$LlHS(m_{NX)M4&q-Fb00Skr35khV651@8#`=_D+{`uQj#wEDPH`Wji>m zcRx5@m@0EJkC9x6%j)lZbbof2c#-O^vwk&Wx<&Y=^8mSy`-?9i78O#x^3anWyVrl6 zJb!=4<7qxg4f>lD4oXhTRdVg=xaWS^! zqM1-(b`AkOTT4KFrtY)EXd(RsV|%Hw)zfIg*W`hj`oRKn15>*@Tmgj~Oc*NP%miJS z{b(okrSN4+9^fxWh@>h^FFeJz9Jt8$+)t*@4!^=SD-Ijj4#7L&<5^GAm7hr~$C2NL zi2&U4{kEHYH>G>{?Fu4SP2BR#G&_Kz3ZYXq{Z|ou+K_;yr z)m4l!@8vd4xYQ8xrsK16?wVAtyso@e#K4}Y;%f!e(eisEjMn7Wz4c%pl5xzKI*UYS zpouoD_+syh3bNmrRyY;Mh{U-bekJmQcc6`&#LmyGs_eGl1yFE|==*&OH3XsbU3#;V zi&hNH$F>*?&`EQdJL}HetVDLHybLHy-Z`8K+@sb^NSuzWP1dO*%Ve)AU1cTwxMhz; zf{jjgU2-bbYOjWvSLmU}ekX9ZaED~l20zhs<#GZ1zt`v=kGdZxZ`-2&4G}mF1o9jO z!KDM;rsdDR+enk&5{eJL1`g3QU(6};r`XBswpYU+E2yH-R63w0Mm;otq0G;5dz|r zr2t!#n@zWdcGLzsTy)2sbFY&WI)YR_f;_ad)hlpU<&i4w{LMg9bI!{?mu>dj(=L8o zUeFHhewABONvTPL9eU02)k)EQJ?U#l0ij@5y+$(o<>e5*0iMVR|8d(!xv*?U*(T-yKw31;~mo!B0I2yREinhq1m0{GdJji;z z6)TB%%bCG!^+9h#OSyjer4fHS-MXB(3afwg%)#at#RhM zwDdrSlw=5bXPP$qsR>YS%eV^(6a$$5OT zWd>RlHYR&DB@T|KKWf~dxA00di{pMC`<` zqdWtt!|boLmLQy6TLu)5ln+?Pzuux(GkdvQXTN(1g!hd@w;zwt-J0gVnRkDBvVXMG z7DhujyQCoY<+(0Bh;nmFcj>e3W%@z?WEeOym7ui#fJu#oGR_dT$1eQ@gx;K3ev)r) z;%O}m+ht!#W@&);#R>s_%-U!*7xww3a5f~B zf6S`t?fB3NF)2T9a&j4$N^iBtO^553pYYy?fs8kljLdJ?nXX~!V#JReX)GouUH@Cm z`r}bb^sC>^FV_PV*m`DH{FW{zJ&{hBn~Qr-Q0dald*@*=94;{w4@zwI^{sXv#naG3 zffe7cIL9_K{DLgugWM_|5^oZMrTiv9!D{dtW&ihG|U120_Qe~&frU8=sbltd)$eN6KQ0NE~kJFa=d)YZ(- zC7#BThJswKFG9iI({ORKcRHU^h4vKt4s8Mmg=M;-VnLV26~oa{{8sfw%XoFx;!mhl0*zM$;S<1UjI`%48nJ z_3rSY1tg;aCNATY6n44CL3C;W%=*{t6ZBu4Ey0B37JrxQ|2(3jmz8|*_bQ1YGS_+& zKweXXSC}H|qV9LD7K-V{a<=K4TA`Bkhx4DyKRFzKjP%FW4^Oey*awkD4*-ETeTtc7q)M4&fF2_eX6bButtNoAb4g9O$l8|{UywrZ(7d`&+ z+pi12IoDL^qq^lWYb%6@wn`++O>#%wsSVt|l>5Er4im+hb05*IA&v0bEd`1T)J{N- zFS>U5GXXXPWPsuGaFtUu;hVYIo1gvrb3fVq*R2NY#3~ACNzcl3c|>rldFbaFeW#$KPIht_D8j+)|D)Kv3ha+5uGc)l8a3wa83IGNZAf0Q)!NWTf~YJcg;&W+Vi z829w!48gS+`(`N~0LlkRw1+eUV4ML5B|>*m;;r7Xikpd~q(MXZz3FvlV(I{I z7R$S!2-51l9@ z`ZVR@&LkBo{2p^nRP)zN64%T3IiQ_K5Y6@P9}ulwA)~Lb$FID_K=j#|?D*g7X7+_* zX)n&-sNmqSfj+C&VGr-|=Jd5hw9j`Qhcvvl&#IT@>WH-qb~ieF4{SbnO#vz_{;h9U z8f2e<0=PbrYt}G#z_8v|NR-h#30Bze(pY=)=-~rE=D~LHr~!@b&g+(5=v*gZ;lHZ$ z!d2WcAw69P6fe6b)Tn3O<4;>f=3Z@Z+0B0FD?{(&*?u22hq;a&!4sF)xOXRa#nlSV z6Bn6~(v%x8gB|1z&`LW>Di^TD^2@p@F#HWbOd-mXVi2qzH^fCY_{YG;lVPp-6s` zxem2uzZtfwOLy6Dx5tWkxCvjnJwLDfe1Em@`uC)I+pV5ATo)+ZYOD*i`peqOiOE0Y zKb)@~e|JzQ{T%kJgviVJ=F|%xN4BDfiC>Ygs=hv#!{aIpJuNJA9Bz`klU-5$28}Zy z@zV-1e>YqzF#Hrrw`AKnQBBq@?0aG4CH&L&!f^f2MDyk>Fb)!WtE_5-=H7{)Q(h;Q z7Sh&=@iKh*)a!f8N&{y6oZn>p!rUD%|8z20P@J5#`c+5B9uQTsBukdfk)S6F}^y2jYt z`i=%)iL1H=MF2pdj-vTF?CJVs3w9%UfN11T1M^sUu<>18aH{Atm&*ZaxwyX&WR4GC zR+i>)-KdO!va<1mDKme@=Hh97Itd6OG!uT0l53$M-=7a0dT;@Md;eImuU$MEp@niAuPTsmKX7+zfY!GW*XQyeKOdpV+!^N++#XZFEQ}UM zg?wGSIuZP9nUba;?l%vp%sLflynO&}sD$E!GIKXxXwrP`r6|DoTvZ{P{V1UEQ}lS^r+w>+sWp6pRL;Yjgktz)g!#>&G0&u01~3;nz04#V=V- zj9`L>HrKBKws&NIAu9;`pu!~mx-M3G%)eB&f9lf-DcIq6hpSW0kG+hbrq-|3&e8Nu zmDb!}SXH3PTJo^Z{ow(%5cYvQ25!QO;i*ZR@~QLIk;OaT)J1W2dY@UE6hX%~_AyTE z*mZ!Rbp~4Juc2`3Q_Dn7xQg(Ad!>ARcfG4*=4~_L!5OLV)Xfm~`|?hI255{&BTiLz z?)xrm!fs{O8+SC=SzahmHA`%zH4g0;6`)JTR_GQD-_%cp9v_s#MwNGSv=4+hSJPz; zfuTb>+UlZvl<}hU2nVy0{?r24I)g8IA|`T-q^gPW9=W+K8)~fk?c$d~YmJ|^1YAKb z+lz5V{4i5_>S^Yt#R{lx`MFdeTY3%6#}6c`f4W}=_x-dEUuX}(t!K2)Y3_gP#9v+d zopC@$J(36v)lCEM7Mx}e1B?J`_ct`4r>@Ype^R_+jy{Frp_MJ7rEyEypy8ROGCz^Xd446f zyxZ(}nRjR8hPp%TAN|VD{7hOr=0y|D_fOnpO*PH9|AaWsVo1PKxX3Uq331VB&d4AcoXxJ&S_8T79~)i;4S=6?0MAuo898;H4cMZ}%2j0>vyy&!EjqI`1&EfeJp&1G-S zXiE^l{J&>RvXutoZP%v`ur!0T7wA^FZ5{8v%pmUIK?9q7xF(CEgp0qBTYehW#MAIl zkOuOzQ!YzVe&O#i+BEYjPhns1vL^nTtmiTAjooFu4pglr9Q6H+EW#@hoGE3=#AIKN zCvvs2zkZFlW!5U)aHw{HU;AoQ%DEFn$<;M{!k93u?Wxz^Ls19nt_2i`oY+LSIySL1 zr55#x_gQt=;XXhH^jY^>JQ2PP6bgdNma~l|{bD`0fEWehUNwQn%XVm^C5Y-sL-aFr zV-Cwrkw(9$zw+JKfs2$BsgxJ4VE34^3bCC7!Q$4VKrsk~M40>DavlZ>=y~u{Zbn^^ z&gk&3=iX2f2g2%nI0L*X=mtSjn+95P96%7wY#x&!^#ex=C%?RawsEHTQjv*^;SLo_ zZC;~qeX!;|@WpPH2rAiZr>cBaPiKVIpDbNMSJ9kE`FVrP;Z_;Q?=uw#KlAwFZyEm- z1_SUTb!YkRaldzNKc`CdnKITmn z4+T`9$?L@ni7bq@?oKq%lJ@FK-(%xIk)5c(82J{v{=*YHqA0MYLkaY?a8f_MD1AM< ztRjzy=xd;Efj*b9Xu)#4TfoDRz+m(?mU-eTbQg4VcU6@aHs^)wJinIKHfd4Kk@i0a(2pGC0d^#mO zI^hS+-(h?9hu%y;V`H0Br)1hkG!pc3V248cEHbLkH{!;>Z34MM=Y?`*v&nli$dypN zTy6Rp;-+M`n+lf15MlO1X=T-AJXLDEjL1j(#y?n%`mnYJu3l}p0znRlj!8gcHZ6MW zdZOnr>M;~Z+&-4ExF`uml3~msP{4F+&5n*8yE7tKjX2i_Lio9botU7h4x=LTpUv~= zJhQLhPNq-!Bujt>VV5a6QB0t1yP&mj3nx7>U?jO@%t}a@Bx&M63w%}n9VU#S?yJ?z zu_%zvg4GNrJ5h1TD=|)f-E8_I18*)Hd^xZD<@_s@ed8w}f)Zv7ToWAh*nbn&naN|R z3r3>5$?aE0FoN!Nk9ze|yJoD=h#gHns1kh_)~WiX_d=BfZhf>hzI!|c1QyGMQ(YUJXf~aauT}<3 zTB!;m>GD?}{INe{rfl1A1QoPj<;tIFQbPv{1WDL8R>aYasw zK!V*r-nw(+4#9=wQy1Kx548|Ob6T9yb4V_Gw7u(8j6G8AdI2OE7G>My5Mg5EBqiHl zS~)KPGDEi@t5v*V!I03IpwHNpw|bhla+(+rg*rmiDd)ByU|m44FE|%K7_k}lUd4@h zf$ZrO3Bn*2bqgAaK-6$d)Zo$!yZ0~fK(JBm)c9hg^l0CdlWyQEs#62z5#8MPgC@Wc zP%yR69?X6Op%W~`T|fAOz>deUi^StcP>&DuBM&B%9@pI(6rHCPTT>NFV zmG$3vPvsmWB@pQ*r8F^?AjHsIbEYxyPl#o7+xhT5I?cGH!uBrUhbj9grtN3YHt8`M z|Ez28G$XHaHG6{!k(21;fWMom-yUuAq~%_u{qN^ViGHDMgGE#Abb;gchEzX@tIlW6 zC^w%vRjOY8>x($HVLlIjTT)X%+b0$IXDRNsp*`VcZ{+hKAPk`0d2b7@6<^%H{{26G zc9Z`jnCD^+T{ul;8J5EEv-f6rE=NMn@)PWf#T9=c$d;`N-|ql(ZaX*V~0siRW)->mqb&z6?{{UoG3)NNb7 zK3d47-Sb{66&=y3#_A1Np8U7}OxP{eyphKfmr^pzD~E&4XkrCLzW6X z?=$B=iX_W9e7bfmWkcX-$awn_Q%b2e#5DH_+*y==JLJDTR3!X|nXISGRh|sUF9zes zko>IP`K5I!+wS*|u?bq;IKS3SLEt7IYw=c!32QY3bR;rK? zcHa~HWYnbR{OCq-c{S^pRNrs==HDl!Tj}HO@5+zZ!eL#73rNlY6T%nSzzMZ~N_n&a zbf<8tf>S>4Uu%V^jRA{QZ3PPJG&lvu89p^9AIdzm2C7*C{9N|fe=^l642`QcU-fO1 z%H1y^ptWg39d@~gF*9Jr_dZ&xQlus9=0O9m#Qc58@WM5?pOi%inEpWw9h9)lyU(hW zuKxX5e|seT{GVq~{12v)SuC{s+9%YN>S4R%&8m9DVOyz6{nyiipKqnEC8td+cJl!5 z*POcb=U;UHkOzY=KQDAtwVF}}jy5vaQVLg{rW7G)?r77I%hc&9lDxhc_uGm4`yq$G zEmuvHzutk6u=@a_+%(lD46qg52zE~YK;wS8{UPe}UxzIvpa@drpgDn*@aMCni3i$c zC0+b$a@%OH5j}_Nk86(pumb0=XZ+)5%!hxZT*4KgI@f5IQsoO{Je)}w)%J`E%&b)U z&*~&3lt!zP@ByxA|F4&ep}%NsceY{Fuo7NCL<#vFm|lPmRGe*EuuOnDgiyS%_* zERKFS)eWf;)!ayq)#Mk=dD)e%QcwQ+`eCK6KvY26^Y@oKkBtH>r9W-JgaGZe&Bzh5 z=ep)1-pE(|AOEF1e7v%B?yqYfel~@JFGh1uV}9xGUw>-!N+TvJ2oUtq-`@#5&6ter z&s^bgQM5g2VKLm+^S`|3FirW!otU2NX9 zPl*9c295~oKGJcR;emJ#2nuLTu*f0ab{(PY+_tpKpPdJT!kGkM`xBQQpdH2x20W;@ z|DDsLo}pGY^Hrh*?w{C}Nc1?2iU#_O`}$&NSLyCzVf%a?5W%kz9$b)L{5Y8iugk*e3LJjlO!8$F^_~69H8~vUkl(}&N34>ewy1h@N>x2 zL*_jyK&k5eVJx5`g{?@yI{<_aPKp;!!LPSg(n4g#n`h^$z{B8=y)LTZF9ekF8*<7( zFsNx|mi^2owsn{sd-hOI2^$^oY|-r-po4&0>s*lu+JP}4+?MG$;T7(K3m3&ClvrVn zRVoP(cF!sUs}P%Tie|@llirZu!IF@@-s|WHG;|qya;f363avae!m$1^Vk6E z|4b<9{;)SOV_W9jXa;qybiwJsq8Rfwa*>xq_!G>eb&yo*=3=!|e3TEyB|DW;U3*)~ z)1onad#2W=;;kOn{#%Lu{w`9rXXr{rb!ax{Fqe1*-T7I#NWf8E|8Z38QqeK5fV4DZ zu7n({g}{RiZMU^)j&LEaALSQ|viys()EU5+;sKLY5AhPh4+Bma&ny#;;Kn|A$*nob zhFDzrqDkeY51DSs334;AwgmJvzd=5!4>_pV2!$hrLh z=(c-CO2m|_diL0PeAKK%RA7cm{cJsXF%)z0hiW9DA83y$tPEBnrm?M5uTcC%J=&)l z^7=xwQlF2m(4F4~eGbWM=%}fG_pS*svT`~te1xN(#a#L{oHP{r#&H(^mU=i9tL~+$ z3WEfii(%9^Z}{L!-+-@*9JrdjNQhY8F$qFR`fQEzB>p@!de}T{SNC_n@!ydO-7wGp z*G2osuk2W7B5(h7>gcNk!$~3dIURURQaaD0G~xNJ!>yA9LBVfm_DHD4A8`*vvEg|7$U~xj^ilw*y*9&>#fO+d61zIW!#k`gov){EXp@@JJ+gC`Qeo9ZCx__qsv zbbVzjL)64Xrz&Hd9wP}Oo>BNJj;`8bVjP@1vDy1W{i6H*na*9NMf(+0DG$>J^czFR zdHvm28Fmh<<6e0jmD0RwT-`pbrVngqb9Eyt-&HQ1`t0uRZGV&2Y|({2)AiNfEon

wR zd`*Y#+0n)Pj}QOP|4WJX{vG=MaYi|wSGtrv+8tC0RjR+5XG{UxsMryRPcYkL)bPmN z${qc*dMToFS5{0cTw5B6#~r#2#uJkVNpeIU>yEJH`dEPxak6`iUnN$CW<)X25BRjg ziyDQ4@%wYmep9nCa}lt=V3rDgp}v9hfG)*&dgADtTzB^xj4>&KY{dGnFca;lDN6`I zAUVHE3W+{0ov)=@W9MIwZF&AX0r}^Z?lU}q{%>>_uwyCC?#5~+QC6eXa}NyK+#!8V zpOAt~(FWe|^-zJdYUcAu5;Y?4zD8UW7^wV5cVRDg)jZc-WRq1+DSIWOHY#d^Lr6%waysqs#~L+f`}ou> z3Cbr#8NItr_71Coy_$h~xRLsL-U+#m{_lUi|7fxQsCY)-pp{2ZN_7dpN>3U7Bg$^m zg8zJbX~2VSCkS{sXJeAp8q_RaIE^s18TW;Na0O%r(dVg|%|Apu-2iD8_pK>H@Himt z+iZ-#zmb6hEP|Z)s1O9GNnL$=hppktMa7gikUEAbDGlgHbaLsQH8Qy1sE1fAoW^^Q z00ITIi_`2K$6527Gz~tL({5oe;2jv*la3wyC?KmvXFEC@xT#%R9vaJ(a!ULceDtPS zAol`qe&cPU>ZNOJ^;oHRXTmZ0O2Pv zW+rX=Hb`ifL7!QTkp91?NQ{H-9|`wAPSVGSfsnrG9O##nZ;OXI{7Od`QKQLv*W!Ga&dbu#?Jg)fR~hc;d|&zT=myAwFt8*K?8w^M z39q5(G%!kj7GKLOaV7w}E;Kx{5g~A3_$!toBngi|e4f{1{qZVtinCoAumvp)|JGcCeOdMf)*5dukvf+go>(?QZvMNP=cfZ*(DRgK1v5@9FYQ@1gz*=9fY)0d|xAohbg z8zBBke!uU?Z|c{!9LhT6WcSGj%>LK5dVNNl<6pI_I|69kue*U{W-Sj1icj)CG6SLH zSb7UKpCW)szbY`hdxUUO+fl33edN5zAm4zs*?U+~X>1DP1>tZ=QEZ*q9;BS7fo#z6 z)VIbKK2=ZHIT)O-VE*E@0D%LNX*c?ylqz~X`gGr4*zdsi!FSo91~60Xk5eA~0spB$ z@B$ttJV80TVXHK^5gK5%8F06{6%I4OmZ@<4nGNQ&c;a zz{kWb5>UPPW#_D2tw3<))aOkyO1$B#82ZecyOC0OF9N{l1coMYI~)HjSBfGNB>Dq0 zN009gKr-crdQkQ*HPY6 zQ~Cn@gqaVBL!pz3KUj|_l!tT7%faF3!m&-S%NmKa*epa6s~n!i-H(QWa&HH=UD9%# z3BHJV?lfhhx9*MsbA9MHYT-njbF)0Jo3p) z-qL*XCJ;%oyJhvyX#hZn^ogI^-Oh{u>&zP5{=Yp!#Mb82?QTy|h*s}farRu!wgXp3 zZNNKo0BFg!wce{&*}vQIE#sl`24mMbG5smP)Qx&u+>s59CTTZ*4U(|D8Zji5p|QoH z&!l#v-~@Gk zH~`53H+o?Lx#9lT)4npWz(6Cxf})C}W|lWdEyozyWvYbem(Y=en-392QzDGeez~6U z8DC+Yp4l{TQJ(OGOC3mH7%7F~0qI)A*n65XO9-F%)TdB&-9<+`L^T9^+BS;q!UN@Q=z z%FuzqJR)gIWly{m3A%c?gJ6o95~xg{US5h9HL$^n>C!8zeP#^;N$oESi1v$QDqI_s zeGQ+BNc8+08bW`V`}aK<-%mMvvy0t2IX;rfVatIU??-XykOeOYxUt~d-x=jVOC%TE z_IuF`uU0YIS-hpdCdAgW;*@QNiN|R(?V!jFty5ReOR<4B7wAV?tUoDc5`Y59+~Tp_ zLJKe7ycwkGE#2e$1CRy1@ev=HVJo&hkltKQzDy2>NlSX8YN0DkvAdq4(p93!&ItND z(v`b*d4n2CcGTirCz~mu?{Niq4K_Hdu(Qe$T)|8GS~cT!lgfCxA*7cHX~tbUC!;&{ z5IU^jn%Bm3RR2MRSmKu@A6dj;WcB5*K@KKNAT(Wh$B9f6y*+#cd$2DsFiBdki`MUd zmkO$VH}nvaRz#eMeXF%FvjHThSCAwHIS`~}Xf1&7@&ki4LZgAdb>Deo)S-ep+z=Vv zaB8<=rLnUX+Qp+!#yJoC_~VkWC8~1zUdu9IZvMkHR2yPZi_$}?33=mI6TikO@5Xr# z5!C+>K3fBHAl?3ej}mYZ>}dWMJ0e?3e>_TC&P%p-g^{W-A^_iyRr$*{3$Jtb&m0(} zGpXM;D6BJ~7U{RSTg^ay&j!ZsX-Bc@LKu$!D0+X(5(RO3foWU)DIko9~Yq?-?4;{ok*Xq?D zFrpH$2l(O^In{OYL$mmXsAND@2T}m-2(Vs~q=FF&Sd7>pj954$BAzRsXnOQ>{-WU8 z<7+}^kS2evZVzOpYAmVHFFhdXWR*NdyvWI?z>y(u8@w>;OqxD&)Xl3SHW2Mh3OxXC zF*bxzp$5Dm_!SCumxNy8nT*Te2UXFG<>ls^r0w^527QKCg|}nxpm@l6;Z0!(%+q`8 zsj1_TrAf3p2|VV87s0Q5iu1*K+qwo!y@;70*xmi*E5`7Ja?waV-_tlL)X9FIXqu6= z#FE5Id#+Ar^9K$x)8*az8;NwBDJ8KmyaRkVUq)BKs=nUQ>Lj0-4UWR5^RXAjSI#s* zx&g%!FOdiDhb?}Rsl%*bbgR?6>PLBqw2HjZIy!I-Z;*4>7x$rMynP8S_|~I;yCRUb zB?jjovcMl0D?B7og45t08VZ&9vIvY`xSRT_IX>OG#XGbuc62!ECUur+Goz$Q9vG5J zEMrWe+5+ncBCsWpB`nlAufS?kk>vnBr;7mG3QUm+AT$o8gL8J4br_ZId+$2W&R`}S zf6>aNZ^MVogLYld>XRCMq3$4)Ou!Mae_G)Jkc`s$baPkm*!buI6!S6*!9rZXLLmC) z<(5#pAI_pNw@NjYsA=8Z*&Quk-ks%H(*K)c)_fC2LN$9+Dn<0-0JTmP=>@Tse?}kw_ zo|V~E1BZf0Q8QeuLYXiH0O`9o@1s{&-NlE9qWkIKZZA^`MJ2OLfEA;~qLL{-j4Osg z0%D*F!r@n&?;*I2DljuwHw=|s2Gs%P z>>bYyKW8r8QMSlP7y2^Wv1jQ>e7b4e`hMUA*mt_2n?ZZxu!;*?)Tc{fK4?Z zxS)u~Gp{}g(m1*Y`;G+EY{A=<8vpjp%Z&SZqlU- zb=Xd+q-@mA8Q6*n%j%3tE}dDLAy`uz!p5m>OXG!mnqT~wQpcOIH+r9GJwpp5qLYuH zYNZuMisHZii@EzhYNU+$<69MeOc_g;`R+}o)jx62v7Rd1s5c&?h&JD2zDw|pp2Disw%1^Tibwhu|oF~h;PaBMBh0Hcf| zeh}=rfb#;G+@jVOco0BX_Of0Bc7U=cv8s|3U)HO*cT)$92#a0K@=y^!Xm*O@=nAmr zs5;}Ob&r0=@MkwpsGkXBh^cre!ZfA}I zJ0kYNzE5Lg3mJjzKJw(%s;*_?#nhivcW2hV$V`Eu4_u844~A>_U@VEY{kzH56VjAU z&X!WJ2=9+C9a)Nq<&CJp#5<6Qh~3Mq*vQgc*n-kKjq0k-1Ux!BT(|;&0Gi&ra;1Je zhU<9a+l?;MQ9g6o1--k$uwdzUEwE6AF@oMw;K+WQSx@$rfjzHmnO6cBe*~k#IK=nV z#DSP4Br<^*-xcb8!Kb$YlNH;rRpUE8cw>oUl#vt6SYVgAFD0{e4j_Sr&1rP}eowRT38~lbJ z-$X!3wY#bkL7OVDh`z3dBI5#Sz!n4Durb_C0oHWf2~GB9x;jTi%_oLJ$l#l|IW-c! zFxBCwS2~jTMrIm7=+8H?>8KN7?5=!~oeDUaBXD#^ML|qWe=Obd>y&3Ncc|7#?*$FtK*29el6amG%zbJN+QhfTVasm)pn%NeQuS z`XTw#J;MAjn2hw^4>-y&#S>&fP4gpY^^WIyiMmlebGDy#C^uk+UI2o;yE3rr<<*iq<|TwBhj-edhHAnLW$D_qZ4=)O%I7a#juzk*I-{OBlHuh?PzB%e|+aKqA(9i!cOW56`3ob9zKQhH!oiC)Wew68x3VyK5^ zcH^jx{2Ab)=K07}`@vT}tvQsU=kA03tBqf7tzi2SLczgcTltYaitNX;=0S`}I9cZS&gGXy zA)yCTpN>L9rrM+oufjNZI_tO2AAE%_i*qs2u&P0FBvDsZIySCAqR+5k8pc*Oor3=d zR7`VB6UX6CFc)ig`H$bh=e}6T`@t8tT{fr|86}NEeO{pB=I${A^WfyJ;czb|qg>B0 z-~7y8R;#3le_1C>3>DxBL|=0{0#hEjH@=LDjPQNYwKo|rBC%-`a!(%WfydhX_P1J? zYL!w#&PsI084M%iOlCv{eDV9Mrbqlv%(KRL!nJU1LiZWqKAv|DrW#f=w;KMZpBhay z4F;Tx4AY8_%--*PBBh@@@>;iG$P(zcyf5mfODj;&P*`U~`~b581i!S)rVs;cB7a`F z#jDJ>ohJ5^k@eH7>~js?I=Tgh9RjCCGuPD*n+5Gp59USCeiqXVMX>;>7ckrbJ%y9l zn?*;%=D?azM}Ur#UG*cflg5NgrT^=KW3QV%XfyrSKAK{UtR(j_wO3PY6aukkcmUV& zO7Bn!jLt#Lw!sY@TUYvo0@nQy*bALLjCUg=Asy88d)Ys!V+NHi`NZ!p6}QPPrX0M} z2whpNC=KMi%ygd#OUw;@)T%!%y2M{r!xe z1I%F@xz!DhsMuEMQ#X3;1!D+ z^E$XdnBpds8f<^#b{9UrcGZtLI|!1f_wAz+!;B!fUxlWW;lt0u*nWy076wAkAUI7! zrN@yc5T%^v4ZOn>YembxQ6+inw%6esAr|gc851{Ht;T;oR7lX1?MRnrn6kj$|Jihq zGEer!<-@DkYz3DBt`&@ zJ={V?2o8p;W`{RX<$B$SpiaYp)_Cx+d8co5!VW0iyuObin#icrC6kkT@^o3|cy^_e zAftEx`l6i4U{^tr5lmuC3umr0z6%AAf%lF980nuLeTE6ZLiV9m7=)-g83T6&gBeGHqNLAkb8M4DZh73d!k~9_5)^oF}aFoEnL#l2#ui2so zB6bb8ecAZBVrn>hv?2=&_A(2G4KlVVWWj!tWx!#<&8Kgw;mrBP6&M3tCNKc^jX}_d zH8Fq0FhBb7A82$t^k6@2DIgQLnkIe|4LO!V8_g~8SAHR06@titE{_AfR`C+CGNa6ge zT)9-APAk<84xqu~Tpc9+nGb|xlx89$Ov9Zj&Py%?_;p+Z6R9gqYiF$OVdRf2z7N~U zMmX`Ha(9sX8slCIFqf|ToniTuUxGgtq)uc4WNd7dPqJoD{e*uA{ z@7z+G=lPb91mmC-TpdI4=a5l%F3M|e=aqTlu8Bx>q`kdFv(Q>Dk@+N9&L0mWUkV3} z7!B1fy^JkKWCzTgL-QWJlou!#EU?afw`+dC@N~69AY7#H2 zHFot%3Md*NRUA*YpQMP?(GL$Mn#t#_2%fRky>5Ot#eM4=J{UT>RuSZx(fmldXbHa& zb4V+O`;_9AbfrH^Ra@~->nFB&Nhd8d}La{9}wdkYb|)&mWf|LOom)YT!($ z-Wt|Ir0M-p7BUsb>tSqD{jYECg}S~pur=u*JFis*SY+9^*k>D_dR+v?-q(-cD#_WUwbvRFJ!Hz++75Uq?4KKWw?72 zIAvkh+~UV!y&rMK!-<-WJF?{wMr2QEIjUqIePYyhT1}ktq?I< z{$x&Cbpo)==>}CxZwR-otg2{3m`8!v;)Zac-lwUnkMMURqId^Xiz~2?c-olRjzP^J z*xJ5Dik;DPpzFC2!jliO!bDtu}p}&ZGcvX5FaqYod#bR=nfa`*UM!VZ(v3 ztc}_j=5}xPs3A9Ka9AzjgrtRw=3AAOoOgESq{RA{w%CLT)(f-!DrsPGjmOx5s&6K; zd*caciZ}5b=v9?*KIw{rLOAKptc?@g!de&ol>rO6IG-jAG4s~$$1-k>V!v=Z)Io93oS4RJ zQLrW=w@P7Ak+#bE?jH$r&rx!I>ZnBS7ss~F!V@xnE$`W0CPrp*otA&G`MS%kpAYgL zl?RC7iYeYJOl`C0dws=rlJUV6IplRzS z`BEpsX5K&fQT;JziO}7Z@^VrXzq@`?85_XF_hzYAgm(K%lARq4e2)GJy8ryGJmsIh z@jn2B-a}mcuPG6?s4|u=S;t1 z-LN1cU;zRW(%m85p-3pP^wPD!E=$J}OMUmgXTBNFJI5KuefCjd_J7B9{p!NM6iNd+ z0$d7-LIiRh7(hEb2W2IJ{j_VfqH#wPjwEfZBbqmiW68JF ze129v@s#5VczVFmiNkGy&y~pFw;%~^Y5FWqd)oKm<%_rUK*$KPe(%qUG&VZm0j+s7 z1H+cvw7hrvbCA+p;2RwMIt2hgxFj-(XLqtYBd5}2fEAas@%wkY1yD>QO3R(znP%T_ zadsXl)T#F~rM_r!KFeDmZzA3PHi}rq|IUq+04l`QR`Z1E(!XCoVjN53_;*L3Q=@$( zdkfIPnjrfO3*_AMfJZ>ZZ>RzirZgu>&!%e$-C)-faqA^u_(8=C zrlc(bF%j??GTU)ifU0mS=;&*XO9Y^^{-9>wQ)zh|z)9YnN#f~g(L6%9ZU4N{JHJ0jXz@IlDhNw%EIGO<;G!8a{lz zM-O6(KQ-6h2DvsMey;Aw8*u35bfr}LyMTHI4QXFulseg)a@FAI)6iER>6l(y_DhR$ z?!4>v(*1P1QH%ZUuN(zHamlJ=TnVHm12`54A0$dFE!d`d4!UbFcT9ph?Eg}YEZ-hE z;*tF?ZrRqbzJk?araiMo5CVL^&!3|dWKkf9n&F#Jq&N~2x@PJib_T)%QNo`cW@ym) zoiHH^VAY<%#{@uqxuuG?+oR^iSlO3X8({3NP;;?I*Ul`*B-1d!831B@z>U$mvt7s4 z_p21z{XHvu6q@*P)svsK;Ik;et6(t z!~sn}ddf}d4gh!wXvQxqrWzoX{6t8L{o#0aG;NRoQzWmUyV8GNMq_~%{GUvW?~7vJ zy4h|5zk3I`X>nZoVwjpf-QGJMz>I<R)MTl#a}I}VVil#f)Ptnjy^xZ!-<)cZgbLeqK|=2#(jm?7q;?SlpMK`_fM*noJH9q z7E;dy406usB@F6NNS_pVeaVUOjzaR;ei}=mnFqPQN;Fw8YHM3#dAmdFdgxUVnwz;A zSC#M|{8G?}f5=wpXunR)hX7rpEb^=uL;=#@g7&J_psIUi`qG{ljg`+L|1Co(EA3zc_UP6mlxm*Rn8W0=mHeDNoPu zf&3f_qr9Mn4v(=Z=Wo?eFPy3Bdlt^VgBvu!!rceY9i)7BhI4%0g-Xsp+Lh#;1(qrt zP0y+RS>}0xG|+$mSQ!YxKnU+0pw-Ffep!$Sf@<~-C`!(noX&+2P@ zkb1z#{?OjYmp`!9?gAN{h4j1J;-Wa4UX`CG#hgO_#~<-1Z~4~lbJyI0{e~*Q9fvmQ z^|d~{zR8V;kqkMW6c2aF%9S5A#)abp!ORC@Aq6&rZy@LwVMBvtT5fhZ#l`0`)Y1Xw z@%QWA<T>$A2qtRb8?kR$-@z{Or+{SyT4GY9a$aTrvA43IgQ9OUNa zZlm~7o!6=El3KQd#&)L4i=?lX$E!OE#5^=^_yp*CnNtom0P8Z2SpxabT{CiE;E#k- z!O#gAHl3pMs^LqQCuR@=I6?!{QZh%Dab+YfT_#$U?ZJea3ap8$_4w9F!KjBDB&ope_e_6}}aj z$3L-GE#+=>EI!DfV#a^rB6cO5?p+7(eF0F1j(mQ4pH_G;THH;_N9adirpB*KjrL3p zwWb|Lu))6mho#;qbLw1|cX&zgE3GClWk2djiRNWE1&H|SNG~(wTRn*fGgRwzR{#x9 zPWGl6#P$GyRDqVc8D$KD@o2z+g#qSCV16RgS#Q8eB&@n|;UmHlHL9a9|BTuWFhCkm z0j+gjBp`HfGtSWZ6F6VM{I>VNCmKoB$~+D*3K#nnhokNCUU`+T$dH@1Z|Vt2M9sc_ zem1#JSF#Rh%j1kMLqIjdAb8N9a3;WRjLT6pif1|z&HM=1v5#PUBk!j>4v9{{)A19q z$;NlLz{o0Tv)IPgr*u#YT4A8nnrV4N0fvC|_?ZEH#ct}D``7#AA5Q1Fyc(bY#RB`T zFEX_?*bG>2coc6ko(&X9fIQ0a#3+_vf4`Qt~omvwG_sk;6CPgryY3NGg1SC-Z+4~fXneE-jFF-5X-sNZLUe9#n z%d|H!6KA1cu|4qF0r@oL5#kg!U^pa*C0}!GfPmHqj6V1w(C=<84gv|%>V3(8_s7_& zSeN}?M$^ubx0YsDFK&A2x$S0suq+7184_A(JlSS;kLiNa4N=^B_gR1*55Vx1$UDOh zIJnSQP*~?65y#QmGpQGnKan>FL)<`4JNRaHM%&=u0Kh1UnjGFI48kQrNcjDlOX@1qKa?g6u9E=GDYKTTUA#EUa00<0V=W$7tmCw!uZ>dU=H+5j- zkT-{wjiPW)dC<@*Q@ghk@Ng%21j_<~2Z)i18gD$69>QQ4PIP~m1{_EHVc(SGHNkOc zN6tw=BYZc+X*38YotCKF@-PS_1jX8Y&n+Mg6fV6MF86%;cWLrUcj)2(bJ;Z-paI3n zh~d%=#3c9D6TrxWor$DJc;H&z`P?6h7|7RPb=hq8&twl(fRGnVcCe5N+-w#AyTHqVA@*Y@ zcVMZ0X-sR_!2hBg0`ptlgY~$hSkCr6(uPuQ4&!HE|enE6f{{{K; z4}hv|uvo`JO6qkjnC3wYNIBn`9a8+aG@plCG!{Rozr1dX_Y7d@<58qpuB|x7J4aTq z9!Py4dz8fXsfLfAoNW;i{wVi7{`1lY^G|ocEFA8-?__k3?8{_&kIf$fGkI1rdl^W` z1R08&)9eoj13bSUhgkr9$xCOBuA}kQ&xAD8u$lKa`yyRu6?OriK$O@Wm?nMyp5b*o z5cT(gE8?lw{+K)1pHQ8AdXKdOnPXunU(ihhC*}6-ORqq%c|rgE(<%J5kjxJQDk1QJq-)($vrYAC ze-pNl_+51mh5_+j7HOX`R9v{><*uT5GqtSB9)qO-xW|Mpm1#+nxk48I>kDVFTixFvW_Tdjor!S+KCndMN0cmbIf zwUje3N=JzqH$XLO3*0v(Ze4j^w#BRRGPktF2mP!wG0GW4(?^JYB-M|$Y1TK!Z|cG9 zOp|}SoyO4npZW`Myxj=dGgsJu>aWR(E)i#j2&FDwqrZt2QtF*E!Y4npXC!e-{r~=z zwf6kXO!B|P-1klt0HTp>!?FaX#8^1(??u~RM|WMa{{|3JPFNGgC|{bzqhxjGzj5Pz z{p#6rK&=re*hB)y#oIeZ2TPWf{PCC-(Euu&(F{_ujcmPQFv;idiWd+!YGR0!x}AI2 znZLZS4`~6?=9?h(X(8(d9*DkSvfJ7P8CeLWJE2E2`rp7wE-A1j1R!WId2pdi^SK5v zTSfgf5q9p4y1kD+brMs$MI(*6jfWdDh?^|a=xfKS6bazi%j?>!R6qSn3pT|Fx=FrX zdmGEOG;dVQZ}HOkHl9P}8=PcY(W2|op?Q)Xh?aKYC3{8!MxB4fKXDV?@u^(3RvZDo);!?I{Y-m&EJ$?0pMHr|c? zVn=MR?l_6BH%_&^4uC(} z{~(bs?O`+$0wRw%y~3A|`t?6E;~Tjc8*m!?y+Zm zlu~V$xfQIDhNVqW0^tER!{;+U1D7KCQ$v>Cq4SFnkG7ccnASaTR)GE%HkNw~3BsSt znDn0G4eA5uXI0NBJMYgMtau+pJsGM$G?fv&H8ZKi1Hre!s7)vkzk*q!fMS49TMsU> z&>&S^bhc<}CvejZG^e>0B44O|@h@+6QcDBc9JN z8m7Qe{TeW+U`YXSd}MwJG9G~&rQXNuK8T5Y8T#P=IMVN_T8*q5mHyX}E&!R;Pviv? z(?dVV{mu@}^HBoV3J8@ffFQfVmD9`|nM`?!zjyxqe4Dh!B(bMG6X>)xiCIlTrBke` zBj<^BJb=5$7w@&{yXyImKoAPhRuH)6-2eglJW!<F$!>@4H7V7E5wGl!6(X_B!WS~L-(y3 zNLR57Rx!|IjHzY78MGU=?fLiGN!q){5^RKc z?4^D)M_)P?-#>h$qF9KxKp9jvbd!6|y}bv9{!yGkjjU3*ySty3J6yo1GNeK(gLgeT zSoSL3&&7uFjZe@>{J8t?K0L&$*#8&^xpnpWy(_wS8F!7cdyZ<05uZBjDKK*yMR9i> zho3)q@Mu1pB=qXLE`Qa@&_({!HQSKV%Cp;1lX2{x!{sn~IL=r(m82S})a*XcN2NQuNwU}e>tT6|s5hs=s(Nezz3*0XXsdfY31ep^Mm~FL3$@HLMm*|yqcJD$foj5&zle(%L$}U zSm{znP9C7^p2hWG3P5%l7(z$qzxt7=_xf9~HWb~XnArq04n(UA3qc6kJH_9gg^Iro zd$jjppq4_@Qtt1jJ0(doU)#c&am4Vp0M+ZX>0qWhUB4(N{J{8hEwa0U&q4*c!9P09<0^$2IrW2RPTY)EY17>*nvaU zPpd9sg8z$&%PRpX{(Z_fyNB|e3=jai(EomKeX;05-Mvlopw_$f$B2VxwL*b~1n4Tk zU7l4-HvNr*nNwq?wfm-{NP6+1CD(aJ*6J-Vk!Xvjlu-l^Ai$pqX~FDa;4^13w3Pk5 z>r)w>3ic*2I(I>?6MwlboNBuzcj0T!YC}^C=5^@;tS-yQbKnJtAuaj)r87USG^-nc z#<+OGrREGS{I@{+3LH-D!_~IH%ivmk?+kRvKo|XH{SWr`;ffvk>9MLzr_gx>iGTtL znotmxp;!FcOEZKcHRQ>}i}&hy?XX(^@$S5zXCt$@>S+6S=Y`J_zWb+udp>Vq?;c)If`q>-3eFiEVg9ZGPL<0%t7*tn-i_1mt_t23?%UGn{&w(#&!@1fP#6p`94E{>d_sG6rCJ_Y66RBzv zKWcpX+!sLFEHIuQ$bDSCmKivF^)^tS|CTkR+_3ZjMnhn=%zya0=Z!3w`S*!N9A^gr z^C0gFB7tlkXN;}m>^rb5T)ra7{jL$lYy)*=Po|?m?|#kQGUvxh(}9@nQ{ZLK%2pE0 za$MB%K3&hZdsL5oFMu!^fcYrF2Ge8*%;5PaVE$2@UKg~-brn46OzJsWm+37{B(cR3 z^CE%i{8H=G@Q9}9hl*dHJ+#dfd4QHny_)-tkZj7#BvVN_^W6g_*E+mgjErhCkSc?D zI%(vQJay8KCVm@h8{L>~WTS0)%6{|q=|*JwEOcA42D?wQE+*0TdM4xB_KIKDd#m+1 zW*XWt5+07hl%iIu@-f<8y&IwRiy{&jHuIteIz|NpPI zrif2eOnfA?jgE#oYC?+AIkjUatFaB*$z5_rf38EFx@Df5hA%RAUU9ZDR5CItu6i1? zM}6bv3-}fNkr5HoVEwKJMZe+xRV*^-gok%7=(%()dyvcrFQTlCa;~ zJ!qQR(WGdLdu%-{O+u#Jc`{*W6&hZ}3@Nf3%}bJ-ZF8*JYj#Q%H~%0N@XHp%MU*E9 z@8GCKv%pF(;$1r+mu_Bu-Uj%@h-o%bKGWhgFf?quB8`HQYv7zeZrXYgT%_H)#*&7ASAhvG4dE zX3@w`%evUI;c*afVGT6PBN^sEMonX5koX3qqWt7d7%vw=mH&@&R%ghuLN+*qG ze7YXY`gDC`ihPN<75e7ZSoQmwmRf^euW?i^v43@qP|Inzce-&AX*fpn>7Uc}pq7@G z4u|8OJ{KJO`2`Y}6NJkmNf-{LZT^+6^9btcBNTKdDJ=7On;rt|3%1y=8rw`8|OWO~8AhaHr#z>Hhx;{h#08rmaq# z_S^mQ=Ac7AjpQ>jD#y1-?7|s7|mvqfgkYq@B}B+lHU6F2>Z%k%F1GOfv>RJAdGw(FI4I|cZbyM8Xb|K@P;wdHHo*bP_NmZ<&d5}Ub3$z{gTef@aq^gwMv zVbgq6QC*b8(flCew7HL=GcVZqPNMEbY0X=X-R82dyyix1WJS^f+>!b4tBBQx7X44(atDlA`w_;_F(l7v() zhjE&{+9ZnsLq0u!3XtJ9n@p%4&a~LJWH(kyqz|MHHXJ4WX${vKpe(ct`716Mq^=t; zx1eEhLz+nei6qSZ8EIz5og|?GAE1sq#1?MYIgY{1jB}69x7fI*4Pr4T{~UU_RpgfI z;O+YFd#LMmcIcSG4otG%c#fdc&tsVuFa8=2(OW_;kER(}VwTaFO=nir>@focj*cNF zUsrx)%9f?@K94G$ORIiDtBk79nf9DKU{@R-+1^kQb-v8X08hS0DJpf*Q+)nRWTd@J zL*OKDJ6zd@e--zMZ9bdfT+FMJ9|4{UnfEaZysak0EWQDghs&JQ8*CK>E(5vZ_ zly_`%Y~K0y?;zRm<^7|i?$XQ<^ZXO3s68ls-0F!yW2HXgG|u*U+bd%3{_QPtappAC zv1zqOci`;t4vpktV7#h&`7B$M9anCLmfE>sXC{GC-WE2n4{eH9_#We(HaG-Gt>}dYe z=(pOV25a+>kI>yLbl3KutS+h!B>!6GKR1&BzP@6784Ma!qe>iU(7x!N3+!Yki#4<* zl8AM{-Pj186u6)P?U|%3@71=Fd1%*<+fy>&2JigLF-AGCC3pr^Wi&a0>0!b-)W_=s z2c9$iy8ywWY$Oeq{pX0w&};BPyPRLtcn?o0?Cz~GxlIhotiAt`;c!gr#GPPAsi-`9 zB4>TU>F9xUU3v2L&GXAws%Wz!vL=Vvamo6C&)mtTx%F0Sq6ERR6e_e+OhUQpECp;M zZ@;e!Y~5aS>Jo!!gwmf)Q2AMJs+~KWkhj#d$bIziPE%bv%uiT5&WxkhAHL{1O?IHh zG=(};)tU;Ci4nz2&u=*HA9Re>$vud)A%Ess?YA{`=W^$5G){ ztN*v{%_K7lkKB{`D5KiuvJDBn->EsLv`vFR1hWNg>FHOZwad1vithCwbHwJ{{E)AKfK=hQd1IBe3@hfY<{W@r%w zTP@XW-OaMG_ne`X2jp4=apGkrO?6V<5f8cisSPncpWbeDM3k_;ave;Ft0@&IKehCO zR8>{(8vd|dmja(uJf}xUN3Odh`+kO#4Z`oQzwLpVH;?Id!N9iHS|yPzuj}Ao<<;(* zQ);7;r0H6SjyRDBop#JNDru8hD}24ABIKZaB^lH67cy1f##@>eWB0KO6DLo;mKl}$ zeH>%)&msUX-)=oyxckprD4oB1XJQt8&$?-()adl{c%h%%r@!UIWK=EO_DFpbUiT+B z-bHR7Ui*R;9&c^eF=X7ZS08P-yA7r@j&Byji)L%=iR5)CK0PTFe4Hw6r$8>=XZ*5t z-gPswarMi%f{pRZz=xYBvw4O12m*(n$3KglBW!nT&kb9P@wDpA zRAQz>9Wa~jQ4U9ElFm*FRF!TD(euT(S?-K|>^QZxHOTyQHs>Wwc7F7!6pOV`=G!{C zn_mZkvKFE9zOvs=3EGD1z-=bRqLthZCXO0aps!a8m?FQ|a@9eXA!BD#y+uviX(8l!@;4>x; zLY-&^jdp~+`{`DZ^U$QXMD%B`*u4f-X3lBN#0A;jy1y1ljVmQ~Yz{-MlRD?xv^zyo zok$7APrtb~?QF*uh7YePyKa2v&q}{&?lGWz`*^m=GTTWy$eKhP?fZ8x#Vn!c@l%S1 z-Bb8@9+|zkv&UsgDV@6yom|ZGiu&iX4@ABZ2l+u$5mSHPy`7WxO|&K{9bAaHN`{r( zt0gUQ@8&4>L>5c(OEeYd?6(Rc91tYs@~h}6%E-WJhh)rf*leUdEM4TWL#A_gQKdv1 z``-D~dj5BtMpH^cuG{5qGHe8W(iHJ=}`Zx8s`*gX2?A zYP^;(u11h5dZQXuq_^uJ?>}n~ynNPpllJOAZ?z1NjTF-okeEn8Uqm*E76fUgA*S-n z#f_kZA)*^MhBK!vgZ?&7jeoE=VzLXr3YnQl6%0VAixikpIpQh2)^7vPjM4qe^a1{# z9dqX#Jy!?+R@0L<2xLt7G9=#-X;Phc&{B;>Oa#r^Oxo2NPmmySU_p{;1hG(;#-UB5QXY}-+*dl#yFP~2-R?nfJHaZBDk zkFO%A5ceIkr{HxPBlvcXB*(*Hl2l%vIiJqn&g3oH#x6CLNxdAo5oc2!37M{BlAoLN z5-I6{dUMQAW7?je1XAS1C{@1waBS!sz}J7wIqljL?eU}ABQb1Qh^TXO`74ZxZ&i2UXbB&G*nN-C~F~84I;E_N(*>#@#MIBH!=|&h8VC&F@59(p5p& z$YYzkod)eMAK9cYS`(L!ZkHt{Bb=uER#Q_W(S+yp=Ubl|sHf{5TR}w9QjZ{O?7jQz z?)fUCdJG23D9m}QRWdjU8y64QM7K9>m-&c^S^v5}5P7UfY;NNzBBVm2T?R@j5)mS+ zK)+2A6MpxcY-C3EOR@CeWTpyQudDfK`c)mfm>X*g$WHez@mq6VhZlU`x}eswo*~&s zb&{I0tVPVzT4qggIifmHOwhKtsSE*Jq|$S?P`H@WB~a2nw!>j2Tzv4p7wy}*sPvGB zvPaDgKZk6tjC?Ii6XiQ1rn{C$!U-QCk~q~*yb-_b z@LI8zJBe>&)`GzB*OUP~iw@I2r8`|xN!(q>0*)b}X0AGh^Q+OAo{K@9Tt;RmM{jE6 z5TYAJHj|q0je4Qk*V>lz!*r(@&=2PbT5o*_q&7}UuN+X+k9;hcdvBvwpx+@$^3R?L z0WJR0@l0N;`<82DThL_V)@X8rimc8`Xr#T~Mv>}i%X(!h0WExW$&fWN5L0^Ze0mWx z6DxlR|2q6`M#$q!qG2&Y*DQxRc^D7alp?dixXD<=UT7F)_cZAr!Ajz^zfwL z;y!XswmZs8Ah57QL*k$H0bah{THX-e{I?SGOYF_pTO03=alY``9bMcLxk=CyuXs%f z^C@H@`m$syZ={=O<6UGVB$gmvvYJY|?j}ht@Sp`oGNUuV|Qthr^SaCdU5Ep6L63EI@=p>OX%ieuY4)F|39bjvl zUMb+dza5a_R;xm@!^%(hPbOOtErRcUMyRxItbP+-fuWo4?Kv`z|}M{m%;Wi@+3 z8jhMq(!=rT=vuanY7G_>tGY)7`T3jE{2ufiLia|a&oM3e@nVewSa1!A+0|DeBNBu} zPK`ygvNU9S-Dv2^cIq*qS}gXvwf3cRS_##)+z)g(e_|N__hKHb+D(r9=dBDqF^8Fl zagQf$2N8Upe?}KgCZcJ>zcp8zN6o1)Wju}pWmw?Fl9FUwpx*eZ1of9~y{tWnw(Kde znsOsfwVc~o6>A~ub+@&g6#{zxFkwZ@Hq~-v(g&!83r6-+M7_SA(c2`uc;UBo=h}s^ z)50&8I^*mhB-knn-6zC$V#aCkQbiJ>$uG<$B@hj*r^|7jWSUs6G?|>wUAylTB;l5 zrpH+uFP;y*&rMu8JN%R9(Gik$t9SRtO894H&hDt6vV6osF)5+fJ2}OzC_@?lK=R<> zpqW)vdqMi~n`91?`845CMnr6|iYRo3c_SoKcJht#EH)*Dt$6Eg7Dob|@6%|pw|lla z@ksAxb1aXUVcZxoa(+g)6gJ!TqhlYF`2m51=9kwsp^Xl<G`Q;)(jmn%96zc{u_~WVAq=Pwy>A-@<3*?qvlRy5be=( z_MnovNb9oc1DG$Unl>vOjbP1HTA*(hJwI+BvkN6w9A?%k7HCwj$ewy09t>GO5*G1e z04SV^A@boueg5B0&-}L52APOO{X8yDR+zc|=xS14NqK5vth`v;lXFn0)54u5<};Av zqNk!Sz;y_^d66`luOv6Z&k!sDV*f?Fm(h^PGCaoBu89nfwk3smn%SWdnZ zc=t_+!FCwQ;G-I`SbIex{IgMfAoh$&J0>U&n_N@k*GAIvQh%E zI?XlQtuuz-yd~6qmR8$6HS*wNF410ej^`;FlgcvhCXla{phCBW@@H4f)PK%!Io;cc z84|7mz2R7`+yiC(A}7MNwHk3uw7=OX%vAsT*u~?udPXudU{|$ITk;d^hQ^_5T(1p= z{e=u2{Wh(@<@xDJ$x#}bk9)+0Hqu?;u{j&9H~BXUWx{UKPdQXV%X~x)eFs>vlzdW) zC{%~Qw?EoW2~{%jTrX(@dBoqWFSwhqk^`Jo!5;z1U~&5%hA>Nu{LRHIcJkPMvi8yL zi#ll*Z#kb`2l=yUHGPBWg*ss|@1?u)o<<}BYkA=mY*QZxGglQG5q`#jdcOacGroqEP3O)xS5CQ}_xycxG9$_Inq0D*#X3ssWuPB?5nwDMCvDJI zxP!buI(s{5d!DkL2#;PKF=h-5z4*(+6aI25QxpQ`gH8;*>K5_8V~Xp;f`k=A%i zt5tg7e&s&A+T+1VOC&nYxJmXjSypt7(WENu(Dkgj8Q{^`?Oo%x#%4ht9}zIc+g-#l z(W^C#3jkm6%;+@e=@sNpC5Zj?orCt`O93102+UL^kW2EuI_eb0=`D~xzo~TceY>zm z-1EXYeb|h=)SK22o~HlH?To)Qqa!`~fA17L4~oF)d;h$5V5rwex#+WBT&6*}gNDY2 zp&6VZN(TNJ%vpD4&fiG|oPm4+==)Ds3x&BHBY#)hdzYq}wHd+6=X1~d7agmi+D(pz zaa&OLknFGT1^w3#cH7!YoYTc+JEkV_1mb^qAd4Li2ndpS=-o~Z*t^YHC4Ww|GA9WY z7%O{^S_%NjHuwpqz26l^$*&>p=hQ$=7v_fh&<2|J6-~rSm8!yz#xjNj$c`llUm~X9 z4~D@LYYdvShA8qAp#XYH2N!yuja^?N#(hzn803(Gzpzo%nl##`ZAw5B0P5>4(`wTw zaiSOY&ew)PAA$&Uh90P%AO9j1$PBVec8`awOLNq%ox++oSw}hOtb7AaB&_N$F2?2L zF||3MJ|1D>sI#hxvYg2)ERLipOqXbTWCdZ3AI!fzDdJ8y&2_Me(9Mm_$od^Eb0fMm znQW>`e8j8jFH(r?Q!a%cX`Lf;mTQBmEceJaJJf8y9b`J*O8HjrtC`9Xk`dmEX)T+? zuAdKc2g1JA_xE94lJl}EX`!30z42t9d)M9mY(AassbubzIzCsm+>yNDcP*;d_pCYg-EG#6&K!pK`1AZ=;BHu-ZChNX!euUoFR$xGF(u|7nV z36I;k*tHH&N!QO3FC=W10O-;rYtyq(_;6dPjyo}L)m8;yuxkYu-LTTHa4YoQnj|tw zMhM5ULDV_NE+w0cS%|lbN8~VYfX@^Opf)7WaP%7nD<;`!Z)8}0lpnE?esGq*`UMWY z)GfLqXR)Z{$hkTgrE!_Gsi%_ecwb(sXTP7n9vT=qFkF$VpwS!3E`Ajxokzqv(jF5a z?{2Nt3d`P3@lTP$!1RXw&K*8>0ocL;1Mj2}a97BUWY8_#Jy+7Xe^B^%A#NU_`#4R* z#Nh>~_Nqb6YdW7|t>6&v=^;K*J9<9?*Wr^pPnToHi0xodY+`UKI_v^JFe=bz zahI@`bYUji^<0S-egP;LY7N>^Kz>37xaNJInVrLyOoJfx0x$#0NGoNtK$PeC{Anb7 zu=1S{(&CnKkWr;fn~$r`z&kg2?FZ%cQc+XA)1uz* zs_ym<>;Ujxszs?S;E%!05gt>++&l%9^d$bWS(l%iHT~GB?9VmU|DX#$6vFEP16EoJ?J5s~99 zmQyZd;>Vm-EoBDMzJZ~EyGu%{blPRHVMk+%0Eq)FtqQ>v8_+{Rk%hTavx7;MLMTrT z7}9lA`k(U$ynN;@Ds(wa`p=sW<)wK2Q|OIaOjJqUss5^XAGrUQ`rK*JP?fO;iy=&U(tPuEDMA!R_aGsDxXZb;}pUtduz)D9H6sx9RFS)h3B zuRxje#~Tk;*kH(bJ4(Z+PG24AX8NWoA-)J#y4F;S-mFm3Q?X$y>aYMOEpvjS%#O*J zH}R#3aS8^>Bf;9WnPld403{1_0Ch%qhQ?hR`H-lg6ub9B&5=H?7HTOGP>|JD$#Vj}84MsbOz) z``)Z|XBux(38jB7=m9ErK*gKZNmh(%p08e0#m-zSFO-0l+lBNBR)U`AO8J>nLQ=LX zxPa|%zN`VC+V)1V9+Wl7)fp6yVtxz^s``y9($46p^70X&TR`hkeuG2o)n$vxA*JJ= zW^1U9zAERLqx_Nm+RBgfLP@urj)bm5&p8JG zbQ8ow)C@<+f@NboR=z}Ik;DCUnhckx9FgeDMxT&0!zL|I zsSLL-Np#+i7a9{l?~YBAeG+Y|HKN@?BUmC;i(&Ov>-~^Z%xdj9cJemIig%MhN%J!R zwSecQod(O6C^rhe4Aa|kCRs~`V*D(WYks-fKmiG&Qq1ztSpZ(X-Ewif$o==tXOZ7- zKpRiK6k^dqza?a9!&fVX?6)tG9A+ zfW+303t3r7DT>+on5~>hKq3hwocd{B73v9XdP=})jEXUG6w(R!;K(?gp~9kCcsR^$ zYFefPl`fYG3i)0P>z=S#$ zy@%&##1m%n*vi*m*UNbCHL5hI@_S}p1Jwf~2ZA_FvTOje$g9dsWV9==wH^Psx6y{1Iu828n-2V_(<$r?fBE@s zs!6tZ*$iOCd|$LxzmpY)3)s60Hw$kOMfHwG@rcQ!p8>aDvpo6(*V?ptD|1xu#!$LW zrJb{dft4S)Cm!jMGH;nNu9{ z1hfBHee^xi^V}Av1klBcSW)&f*5-sJABmDotwV6gtB-Fc;)p4})amvUadUl!?#vlU z#liJ)I4mg-HrF<|Ym?qM{sCa;%K77T+9~bQ%fPpV0oj~0aj|Gs%74uyz8^}T4R2y1!LG$*Q&Ko>vWMQV~~9Ne=oC# zdZ13<|Mxu~xG3q3bu>cG!<*)~Tq0dwx)F7K#kYz33~9j&6|{ zgR4)+AWfQp&MF-ApGQ22letGBvOpw}bR%xr`=n9Lgf|;8eVSyDUD+6K2UG;FtBwGM z;j#YCj<515{9CXUcFvdI%F?2;qmR-!a~-LQ9;FTW6xGZ?RXH8dzuPi7Z`@kd%&j%2 z7PfJagCVAyxhtWSO6C6nhImDucO}w*y8|_j%$<9(n+7MJ-ZG&nwMn+4#Lk`Y3)Y9X zv^BZxJtaGy4pZ>z0;Hz3XRG5)pBd~JbCn>j$M@@vZ{3}^FWF!%??fa1SCvX5PrZ*G zJ`2f6IQ~neb&!dW_x7e~`KxN9G6YKa+1VSf68V0p92xr?%9+cC8%tQ}>z0|&4o|}J zXE&!f&Aj*gjm6wI+Diu56klE`AE1mq_ig%>a_{`0BDTwG($Ckl4p&ZXh9zJ7Q&L{$ zO=}qhuunp-4$ljVKg9Wfu;%u01}yJAu@(ZUHu94eG5=o+TTa*heL#4451B7sk^l47 zLxNTbXE(V_uAT@`C}2J-tG#OFIZ7VHl~L)UzRw5Xe{zr1skqywZwlrBZ@f3 z)#roWT}?Y^tsOWXwj=d4CAsQVxpJ-*W;Z@4MVFQ?n)50@0-auik#Zdt>8CV~t~nv` zUR*ZEeX=X=HWv=A?nPL1WbY~~sBrG9i%=tkQp>%^T$ykHnssx-PWw_b(*iO~);P%P+GJm-+VL=(z>VkfOJCaPWCrz`ajh>iMP{ov| z3aTc!5m(4_?D%@%1PbeDtViXpr=JwADMZdRkfR9Sd=GW_P46{u7Y9uP5BeK#4JYMXG zPJTfUi|_|kW|Zgp;l2%e${^dlQO%|gfHlIYyv$a!-UE_q*{2V@{aQXhxii9ov~(st zJ4^xkq^%aOv?qr=st!2Rkq4HXV`l0W^KLHWrzLS_L3YK3fbihO;BGc1`*xI`%#Gwx z5>8P*7gS5KP50rJn*TuAvg>z3A7hjIF&5MW&dJ7*FV56q##8}xX(~57+nMR-Bx_T-1OlBFFoMaptnP?;A%lH(;rA3g8~-)>yNCsBy6I!6Z_{S zizV}89z8(XV`s_S$LaU^ek*}loHm1KS-{GeZ5-!v@m2;8Xh@2YnP$ciL`D(&^r5x-7eaVY8e4Kox zq=h?hty+stPHi1NdpQo8;W$)F-GD$tdFjI-<|TqTsdn2gYna1<*S?S zi(UMAC~(ChH^lycBI0}vtpz$YE49_`q0&aS9F}oMZ|x>P2z&1KZg@CftLUxMo;7TLZ!spSPKYh(MmiS;Wad|0ybsHZZ(svxsNLjF+=~COkJiUy6x6m?+xBj`bE- zH)RU0J7|+%uLh4cb^Ynx0|#(HgUtilnZzMK+$l;x(f2A1h7fsM(R9E;h^|Y3jS+17 zYI>GNf5QM(8RpW@M7r(xMXE@%GlfSYjY$RD5$@ug`9mrIb%7IfE};181P$!uyoESp{iyN>Xa&E{}zyl+9NkGeWzvfh~b%{K+qaIO?_IT?p+t+1%5682Zh|# zD7pfGB1MZ6YC*HyZ`^#d+V^M()ewh)kRk%b`?~RP(m7Z zK?F$^Y3Y)dmhO&0U0?<2k{n8UKsts{5ow0*Vd#c|A%>p!8F%-)3VyHO@4xq-m+Ml_ znHkQUr#^At_vblF@P$rc%XM*psvl3aMY$Y{+dW6tACxtF>1Npq`;-8RDdu>7V&?Od z&kmL>rI1c|tKypNMB)yDwAvLo1wMdC*g`zxCq|W{V(ufe(&Vz$mr9ux-T6P?lpaE% zMi=+ASNV9Q=BjQ|7YZ!)(G>|wK)jD>j;G4YlTYF$0FYjpbYC9KFzD{E-w9%k z5{Rn%L(HEHT4Y?9HT#xa1dG9sda|dhhjViTj8xPtMlgjoC0ttxqOW>R#1dlt#i zYF-BSs)dO}1P~#1Pum=-4%XZCc8~K%n?5Yk;ZG9P8tR~WcM*J*aek$hxsTkJEsFiR z1RQEI0r+X50iA~QRQ>&FLhYIiNjvM2Z2ro%p@Umk7YF z-4TNoug?zvK~A;2s)z{Hyb_DjcnL*ifr7s`H8JLo*?S>L))$b!;nih^_t%BuDuc)F z&g~`y>3{{Lb4h3`nV&=^%i(wGR#PfRmxnnI?M!Z5d__kyVCJ#r*BmoP>A+IkUSiX9 ztKe&AM#q7qZiM8q7PyW8iv3?#Q++Pc`z5 zyt|0(wj;lT>x6t|Wk4krtfuwrVr0GSE79YnJk{+ui4|w;G>N5)J!}~0-$0eylsfXGXZ{2#xb^EKr!w~ey?d?-YjCHX7HuME9Xz$vYewDF#Za)6&TGS5b1A0{Xx7n2r#+jY1T$} z=!NgSnj25~S~8!f*TPR=U$pM7gKKMcq`9nDR5@(H>}%Vqq1g6N zTf{zBlBo}fx%T|2)(d<{2cXIIlPjTY6*g7_DS~z7J`IEGu%X==GccU})FG@?iY=zJ z<17_-7gZZRrAn$_voON((1;aaa>%?6#*Z)KSsNLvFFTw)D6zXm!Gdit&Wlv#CSDyyi%>W3cTwZrt?#n+`}-u(2Gi6Bh_%uKS4A{H$#Z>PiN`7 zDczTEx?1%NTH=k`c}_#VKhqr(vjCO?!posQPu>Z-m_XzypH7&01M12+=o!t@vtbzz zU(b)yBbX^K{eFP*s0;$R`Od02qO&a_7IetEHqdfsxb5;i2YRo(#70XZDT)vs9(i%B zz85~Jli@L;iWK@S&f?gPVJtQfmpd4Ui;wZGN#JZPA-yp7w1RW2!gS1LhJ%=f*z*r! zabO9Eo9~8X(04E-TFU?u;8U!1d+fVcC`2PA?wD?-0n5DAfKsi8et39k%opp?y5!V5 z74|NL#a4@`c?|VLc+r7A{a!YY>0~N7lA=M5V2f3M>G=?*MREa&Uj9(`LXEEJDsPWl&3T8*~}4 zTtI#e6kL68>zfi(2NX;54=OG#OFyl!Oh_ht8NG1P@xb>6jTNy}xvO2>#E51Q0Ov>h zhVy&^McY4L1wQkV93Up3V`?uD`O~SI$6%<`B8!w`BoexdqOC|49f9ns%+U=7rdeMo zn4?zK>Hv#RbAQz1{zyMg&ZGX-)b9Xu4mz(-!p*Iu?$HCs8B8vevn09>R4%Rxr98)u zf0gCd{cc~wpJNrW2$a~{qw{s6?;Ajr@Ef0h29T_)16K|(MP}lajD6z^v@Q?D*DPLt z-W~_sb*)VU=p`@g;goAU8>u_qz^Gn`wU}7^iAr5-+{_!O!(yeEKhTFAPVP(?>j5N@ zze2;RY5n}D5;9eUM5^9SWLsAksC@y_8?vUFz69V)iw9a^$K&U3RirmcO2}y9(^jAz`UtomtkZ z^i$XA_aD*@ae0Hyw96H~1Oy^F=H~*5#IO|mS)JZk6uy?re)~4qUH|k?D)x6c{M#4< z{4D}{9i|A#WNa0P_fol*f016F78A{A;L`bWTc#9gdHRMOIGinvIai za|DymDT3=4{E(bBr@!Ot`2$Bcw{PQm0s_xiW(~U`Wbj~ifspypGl~nfdfmT;kER4% zJ6^1&I9SWKa#_wf!Rwbnq!%X4#XC0)yO^+D$#l~NS0Z0RJ4k$o%|*ifMN}R$ZqL z-_#%d(9nP1y8;*R%#zS@BI?*pAhtW@iw2$_)j;Pu-EE%h%Xb{04KD_?P#B2G09hF| zWNHb5urJ%38mLdHK)VT|dr6{+n^^PD zH#{UmBfR#c<3HAtu3R1PQs0WVg1>Lvc9#r|u^W`ZD8%2!e)!1qBoG_+{Bd#TO$r^j zN)I19gZS9uR4?7DW;-;yJ7^D| zqv4mCaEOYAEb&?TU{IFzxFVGft(-TiX3DRLL^n1Iy5iNj;$AN3G9p_3z^H}9=GUgf zg_eaI!*Q9a(b)R-Ia=YnrL4809?fs!+&bLn^xUfp=I{8U+$GY?X;r=3zpU=ExBjlj zbM7JQuhDAsZm5&q&$;N7F!Xp~ECiLk(T>P0n$jxA*CL2(y?wklLMNeJmGMaLcdl?z5(Z75OA<=d#RbA5v@6K$Q5P2`gj0` z;y2;vGqYKL8K%q3+i=Tj%QSInD}Qo1%+>9t)(q9%#}d$!5@Gd>62YQ)gVf0-^LFB& z2R63GN(C#guD^Jt?u?A15MJ>ffZ#MzZvOCgLmkZA_`kVZ0)kYx{q6BhDwR>E#;bn1 zl-k;%*-W?;Ut#Yms33%3fASIQq}(d3DHlpyE zzd)&P=T;f%wC;O88q#zT4k!8d3%~pA%;0vW{clwVG{x!{e~u66LBa@ZZrraiAZ2Io zUQmq3m)Id%kED2ouBm$;4llzEL zm#k%DKdLJVvZ+oavXf(?Q~msE-P?ij%$5><3g0!Rrop0^E1RLgIEMIlb8H8zA(&8T zlCwTyPbPV}o!2!TT*TQjblr%rpS{8&+34mVYlTBHF;ub1;Kb9da^N;LHI41)=^=s_ zO)*xc--$a4PPgNB_`O(a;!+faz&`PQsL;cJp`liVFf|+m6naj;j?R8iE(Qqmbg}zFi`(rpL}!FEp%!HPwH_sDgh+~#=8L@kZ(4!xqNy10#mJr zL{f$K1DFkzUqlxs^U1<`s(_YhpiQqF?-h_FoTGg=R6wL-&yglAd^@Z?8nLFL{2?C* zGeM9G_?Z`X$XNOSMU8QUB3R+&5-^;{gW`}CHOCcf)CH@z{!N%SwAsT#`8Dsww=b0Q zL%3kK*zzkRcW_wwnB#3SFkf7rnjf48c4{PTa61v>uD16Y(f)hqz3eM|1CBA8Sz_j- zBb!y)v_4AUvgBhM3W>i`BdWUmx#zS$GK->Fh;!!BAUXSJxBE~5TGj0}rl9(|$C9^l zChuQob6-Cc>dEDGWh{t2L`QJQ?Z@dQmJN#7zo!p&oVfB$fBkppdFx!@e`f$7G48HJ zh$sMEL>0iCTVAIQ!Q#c)08oP<+I^r?T+UA6yRSv4AjEEw4C3xL=K{ez0@lpr8bx~x zcQ|EB)W9e`yjuBnf@Raa!niA&PsZ%Bm7(K42dx~Oaa!7JjdamdJ8kyWCjO5~8et`g z3FN3^`PV-1(#rK>wvhK}Zh?PlQ)ppoUTz`ewaxdAC<#gD47|(7k1v+rr}aHH9f^2< zF1tg~t1Br;vfmqvu7L-?*)!{mb?dK+c9Dl1*Jg0Djg|TD4ut(Ym=}SclXEe9-n$-$td}$SN&(Wm-MkXtxSR+Rr@SYT& zn-&tWwGqh43^ihLe`vk_iztM&_BGEb3%-f{zIz`{14 zb;?^HR%m&2KW=f^I*nUes`WvAVMqC*k-JHYPd93@*#)mTJnB>d_f(pgNUm4I!tV6W zXkaLd%hBa%{8YmHca!}xZk{qI1gT?;dDzT?KQ0HW4&D%Lz!aV2fO-_x#ub;cim9!3 zev3Tss4TX6ufT+p4R#65x;1y}rAo+jPKAisEf&n_oHW)2o@RQVNm);vIUoOe;F(p}(pRwZZbGb!En{2_Z?@} zxk7!}s&@{FI&{Lv{E1clq1Crdhn2vnh*&lTcsD2CLVwPcvHEn`;EC77WcBq{RldB= z&6Y7aa^IzqvITL+ddlXn1EM#0;-l9szq`NCC8u@O$K2(*^Y?f2&+PL{X+a`SxjH}N zxO~NiXg&>NpXQm1q{_e_$5M8zICBRqshrHwML$vHBpV-F+V9mFQ~M|-nz1*5#CLiy zq1xWaO^UZZmVrzarnyVp4eE=p14|ox-FTi<2Zkw4M(-0P`Bo73t-imjT&J1FOebUI zscC93XK?W1UNg*II;7#yV|0z45Ak-B)#_7G-8f;gNjxIE)`?4HqDhm9GZ;OQJDfnF z*H8Z6()s4dlj`g$+C&Jln#!k}LEz0HpXg(}=PC;RB%$HL^;(gms`VoH^;?%~b8pyI z5*6-OS=!ZbwO>tSI~WhaK%*J0St68XlEu{b-kF!cU9S_RI7J)%m%FM0i50c3>NI3s7Hoa88G?$Q z-UL;Sg#JA3Y$jE@GR>nJkK`doF(za8SHCUv{I5|WlxJ3oj{6sdXh>IWv`bG+`>2S% z{eaYL-fE(=wGmF#-@r@e^>SUw@`}mqLc5So}S0!q_l^V4d<1vR5(Uk((L zxlx(9Hlb2OFy+Nosor%$(i;IFA(N3S_;Nw(;wTy0$|Ygab5WzlQVjUDOC!qs!&y2x z{Cy=X=%7Uv;CDXM61Fw0Qc!DK<{cje@O#1L#FTU50w|xJL-kM~Ox3#a>dqMKiya z7CdU=_ZCl?Vh}3Wlo#R>dLf>bn~p$n;4V1{aTI^L&3CkMPePMx?Z~rE$$j$?AMTio z$@M<`nAH3lMgHV@IyBKe1x2Q@F3DhbGKRNtcgy4=3$i4ooaP{t#%)6A>9RpZ zHMDxCd7-@3lY5V28{TD!9ZS9z5rTXFIPKo%I2HX8_!aQV$*OxEa)${&zX4o3$a;W_ zqwefGnnaEMd_hL#*~xvGh36*=2IQXe#iR(Gi@W(#&6T;U3y^5T3b8QAZZ6FQQsCN1 zy-ylhYwRG%FG7&K65OGDAYaNKS9?<`N&3`!uGh2SHk}05-&M=(RHE{R1Im*PisCQ` zVHprG4`4RLD13MJ%O1TX67^2*IC^=nS?6SjjLFpoVU{THW@_hML9uoMJTTQCB8zsxUCPYlCI?|1_NOnRGT-fSuDLZR=boU) z6^zitGA>)GHcHFA{k4*tH&sG6I|Uw|T%E7DPdiqj z#Yu)^xNbww+4U;J^K#=_1V|3&lJCiph#eP#sB&pN_WK+D=d4@)RJIEM83k{w!TZ{o9Rn z;Zs|!8~c4aLn~3jw5-?KbL?=hncMBF1%;tG6Ztjh@q*72NXkysULsxoCcjwMb8E%< zL;-cCN?!vP)LzNzU@Ym%n+HmSbjME;XI$yiz-SMy`&=6VOOVy7qxL1&Vy4&I7pXxF zM=k@tGZ&ipVJqvdKW-Yy*`~?P!ub35#-Qz%Id<4uE#(o@y0;P%GKfR7PK}AfqGaIhb+~Mk)i9N$6hT@xgLTcExqLw^ZYgsvM{yw)`9{6{H_G@MU(Hh(i;vpFl1L?8= zd49YzDI$D|ZEun}>H^!G!Lf3+Y&)>FK1YVum=drjd=RwV0KN&iM4nl{Ezd@ZQSq?wA`%2LU? zQM+7pmwR6^zUWh!uSUSo*wpX1@xB2}Q3HFin_Hq&O3w`W+sf3=P2fx|!CEaD6%~9o zCfmOn2e-qK%7M8Y-}J6zIV#YV$#NGYJIB8@zj^Rr{p3|iuch{tOIjfF)7s9X57t+o zVy^FA_gVWDF=>eD6wn8ipDUm4{4kgAzIrNg0TAXJ8?BPY+aoYoNy1GERE0!w=L&^h zvD#}rA?i-U3$O04V<3V=S*$0Rp}9n!jP`?;3ypWxxBU~<4?Ay~@ZWQG6GGA-P51G5 z`%?JM){t5e_eBE(W9czDac=>H>ZWha5@+Na4y(ySOv*K#IqGekdOzHj!Mvf$Z<+R& zv3jH^T+n@$jPAbeIAe_OR$#JCabLpvNwPO{3I}iZ=2kMu-cK9Ae}<3x)tT9-6$e&C z0zx-2sv0&pnAEWn4Ow5R4?YUE78*)@?#thr+Oi1Rj&tWbVhXuh5 z*io}f;LiiXRzH1Pj`!aT>sCF}=bjY##iC35Abb_v7$KXtKK`&|&MacrY)m9_obw5c zsP}^FNM~oH4UKZ&LVdR5Aap>9yELb&aVzCxqZ+1sOx~V%Iq`gbMMfsr`8X#e=L?CLdL3GJCy64haNa{zXIo5=)%>Y?o)204DT?`9g@ zPx-^ewDk5kMiHo64~65-8ua9|J`@0zIT(WziHX*}v7~!jm?%>IBj47wrK%4z>JvJ~uN*^Y)6W)mG=Cg6PIrgiw4phLfxj6e76 z2oh}*!X6VPdFZ!|E(Vb!ejKL>u>JNutPA*&ie)`R(p#-=<<<(RCVtI0@j_}M(*NVGPvm5~IkUw|g#ghQ%4FV7P>WD0HMNqzY&|7RKlPhb7GX;bVLbR~kNzpm4 z!DKp-KWAwjmAlb6^sZn>UPPfjx0$uxDlnaWLd6@wH6kBUV*CfC$3F1Vvy|FQc9G38 z738Qrc~)96Olmt*>HDXJy}Ne7*b+{;DUh%EI{k1KE^eN&{&gx_s1p|SpRJBD>Wr{0 zVpt?T__!9*M6Pubd&H|Kt3CfWF)%7(&U}SOw{UfP<}rQY0Ua_cvdBkq;*YC>99!}U zS<@>G;6-0K+0Q}+E3Z3_&h6ikkFx76Q$P$j57DS_B={G4UIb~qqxx>=^Ma|(HjtAy zZbiHc=CNrn?v4s;;owp~kE_#GG1TPkE% zJl8J)VyF;4_8>v~WYe4vc`P)dkq*~cx>78^GT@D%tNGvH?yF~V>LL&pXt|(jlsAz= zHMX6dH~TS@b(A3_+}_HZxC5_PA;&MI<$3GyN|t)|eQX>5%+C})Q~8k|t49gBHd>ax zllpuoyXJrm;OReDe3f@N9}*N?wMQu4Y{r@L4d6@m-MugShs-OU$YQuoU=>sH7_f{Asexq>M{A+c$x& zZwWHBtjCfC*^Ru7+3)r-LKpPD;P%n|$cs<~SXjHdc1n!LRltO8W1NUYaG3?ihb27b zg7~&I3R!wA(goe|(Ep+J|4{tmbjs6xC_#^RtAMc5C!9}aLUml71CBJHIngl(VB`Sk z#hbc_IXl*3D|iNz&N^JD2b`d%^JkL<${Pui7HjxGnbY9F+*zwVE<-;tI3(as@WoB8UU!sc2z#@!k^BYzU7jdS#y7~ zxu%21M(KmEvDF(}+1`{K3IaQ24mb24d~U?ZCo-W5qQKY67xsptR{~2pZs@7jM3 z{nM2B3i<@TKOx4t-I0MDRwkR>ZQ-^4vjal}VmKZQhN&@}hNu-3Kq;%jJ{QuIP%R7A zMJp>YqTg=vw6~dF*fNo#m+e<$2b2K_Yf3koZm~iY*8!s%tJ$Wm0>I+%sh!r?V_Uyq zRwl3qvM*R?kd0mFnIvK1feHbB^Z!M z+c4F=T<%tWt6ezmo-nP{iyyD^7SYp`63BM1ZoUO{hV0G3d2d#D?n4m8x938pc zbK?2aY1{G5o8KKS5}q;M-@X6eN$T}m|L;cs>yH2R=>K|;{$NVKu9Yj)IANk&-0=Q~ zWpFl>`d?Zwd+jvtdCP_gwI}A$tf=6X^mmT>V~Iq#$k+H)b$-EQy*pU(D+JOy{^PF+ zr9GWX`f(Y*%>t|vc}G-!dYEvdRQ=7#D|)3WU$g#cf8;n5!EDNJ9jY5imxYAwD$8(lIes)IhyO7C^5$N1EQeMvN%D6eU0fPJ ze<61^AEmygDaH(JT;&h_E=T~w&fu^ug8$##_E~!5>({5o{f~m_!RYeJ%Fbd&*d5q? z?6{MPi{OFg)WRTvUj|9h*XZ4^ zQ1Z-Y)n8l2>*Wxu>))0=ebcMd zO=oNxlD@+A-6w9b{0|o9Ot3A5q=bPFx3PcqIAWK$OUs^=aA<{)1lf$_eD1Q*ibnxl zP)nIr)1n$+Ka~1V;v&V0m~a})7#bp0Wu1(@!A_9!mz2!It>V;MkiZ#H!~kM~QWW>m zCF8e|PI$2Tt@QF6ttD&;>yh~5^uvOcQ+O@I7%Xy5-o!~XA!&WRi8{;cN% z0X2}U-`v8jG_fDoB6e6tx**rDmJc{*7J$w~n%+<3=QfzwjGCk5)lbRmaU`Rn=WY1l zU;qeTiEG?yi~iXlHU$KBWA#Nc)ya;n*+fyf!d^!ypnq=p#r7Vb54H&{`F0t3x!X6<+3R`X#NmnFKXjAy8ZLYwh23O(R9cA;Y zByRwVbi6@YE!!qoU~$q6#Upsk(3S<&5i(x}H{1)F*fP=mzYICid$4yHu4?W5IPcWPtxhaXofC_4+MuH?!f;hR{#e=-r}cC^>t5|1pZduG zDs;$IeOG?}7zIJ{mF%LOgqi>C;_rTYik&tx4=QEU&@x(m`u0+S-hE-3Q`z;5rSDlR zlJjd-{OJypyi3jUrvg!tG?%iI#>5Wq;?cb~UQRKZZ0@`ML@7RLpNbj^zK9*QdHfpO z4tp8qEBQhd5p9@Y0kfK)YyZdUp=*OF0C-={4=Uk_p$7Y!knPDD{i2ya>Sa{t67Q;m zjG})#(15jOd9BP-^x8-WiRE4s8IU)u(OeLm)ANp@Z=X;Dax2mW)~fldGK~Qs)xqbM zrsLTWnyKlC2^LgYyvzzn(Fhly?0~Q62N^wkiM!#b6=9cvrpI-I_`82D)#<*QBkG9H z{*dGmRHz3Cmm`%;8l66Up(UyqAN8XcyLJ(KdyR!tn;ZSpe-rF=4P2)%nie*O?9z+Z zY855;y%|i0yQctGox3P!&~B)?wgvrN(^RZh+)T?q<6t_m{@r9Pp+z7`dAq%oVWgy!C(BQ?Q8P&(l2=%Ez z(HP}hVRp+77^t_T z>u&=*+aiotmy5!8UeZG{Oi^!jH)k&&6$2SuB^I^7BGGMD&^0Q~b0u#Ueo6Ao0&`kxmFT-x z{(B-FdOBk7<`gz&ZWA-tZ>#D0^9A)46kKO24yx`wL`tb&5yG_bgc6QBE8IYofFY(#aV`YPsr;SS5V?wh z`6MkhdhBEx4feNSwi5TW^p(Muy!@CemndJPzB%%)J|u~@`fv+R8&lA#{lj6G-6@7Z z0r8&+q9EXecraUYG<*v6Q3?@DWX{6ae<|#K2eD>)uwxsQ?=b@fYMYZXYh=FGFoCUR zyP|s zw2M|ny4h+NAqp8n15@0eo~SP5Z(KD9H>?q)rb~hpYSVe7BK=^X17xawXY|}ywm&yY z(R$B$lpn32*$$?MW)tt0Hx{qgA5#eIJn>KQ5uOuNU?g4DbnYb5D!<5BDOHFestzOu zd>MoZFuPt`<|r5+`$5k1kei!8oVuIIj{$~TXK-4_4;-xOOKT0nm{h}-Uw)p;TV-z? zaM){^N}xN^f!}6SkU(j{z6kk_u2ZL((`f<)Cx#fdY+vgoeR%smx@)r|gGglS`KS+& z4CZz7UL_Yk{6uOcY<#Eera&g+M@~QhEWf`kQ^E5A_m!Z$KheFK;S_GY(ASJsHWqA~352AWZ;ZT*ADe?pxmn3z4#P^;?UMc}3Su1#_MMPJ)!q;3!!-jk z&54Bm$=YJ^!xfw|2>3-}OKjD^Q`FQA0RK+N^!2Vc<>jw!d?ox5-x2;PvnwuSlw74b z0&~LQ(b735z$Fg!0)t*v0fG%#fKG6gkLfz)(f49M0HOX%|6r$-MS7==gX0?f-(q zZ+_nu1nxxuG0MYDZ~`l_N&PzEIH<0a%~YO8@^{4k%Pdv|7(#`&^Z*S8tPUXao(3Qw z8)9yr4HXvibCq7>qYa9$N2%}xw}^rFmK(kTpx7m!0#zsXx(*&aTq@@(5IuMhX>u*5 zMZ{h@aDk*q(P!O*am{0_!#KfFGepCADcu#JbBMJ-y)$?9OwRBxhd`;&N4Y^o4SX;M zz`FPE1sa%?$8Fp!$|@)Z+-0GsPQ3*$P`bRK8k>LXb~Ky0C8^07mCWeA6b^!6S-Sed z#;pL0HW13u2dhB9w&(R|+onopvR{9kE0*}5@j4lgCp^VN8jh=hbX}qI>R1_z*9F$O=W1o+-XYaIS% zhg>YM9h=X5AzoM#RMFfPg~AynCc|t7hS-$d9u!8FgbpbdYe<3mo3w?LmggL&#`N#g z@JnZ6=Wm4p=BD+J4J)rSprau3G9YuAb{7Sf z6u51Sd{Q1@zG6V`aWubTd5rY|E_Zw#%zOc41@B;t)m$$=&nzhiJU9$wpTG*UxvW9a zYTk#j1=wc;1A=*S{WQ{^^>y#LN>hJlx1!6Fzb0UE&Fq>x#aI=Wsr~gIH$(oc6EfC` z#h1re#hR_FkxX^W}%i1%mif)j0$NCVJ_f zMV6vYHrRVpj89rhcvI>(W}xU;@VJ(O13;OeuX!-bt4?m0&L#6OWcU)t$q{P$J{^2P z1&ERL7CvpA(g7~V4?+$JFU<|!mUMy8YumqQ`9mi(g8IVG_d)qkNn>tf9Wj)9X@B?J zr`5r0prnBYv9yzI+r4xE;|yI8Od5zBIR#Vy)AP4fPxm!JX6ry@L+#ZpMi|Ju)Y~Dn zYR&_Axrf!&?PC7uI6J8eqyqzN^;S}WY1{!8Z1!#5<)V&|`C|`Z zo-HDifg#tYMjG47O+{!0!A`JfJ8A>VClqZlZz-Wc6PIgnqO{Sz`M1L`Lz{T&T~57m$Q*D!gT5c*_hgXIdJspIKlOvD1l7&(~yxwO(qM z^__5bk*E9NqFz)#le9@kMZ`h3YqgZZw`)O-kVke`hr16bC<%f=q)JqOj;)n4!gggY zguMK8uNm+^&NslOCLY)I(tsRCb`_bd!HqcpiH|iQ0{*xY(jYe#mm*_#^hX=7>r0Tr z0iN^q=y_nNqw$s+Y{>9HNy4C3qTJxPmORX*h7MIQ0u<|6fU}?(oUKFifEt@#4~#xp-&r^3-&Y%Fa-5OA{S1h$OIMu`H4!wb_&ZB;)GKH zA#c+KsB-0=>vj=_36w^Z|iOq0=BK%NR+<^MnxL1L?gYL~iL+uv5)U zF0kTFnU#n)QqAa>xO|fW8=G4ctNi z6(}fdM^2u%=wuKvWr?eJ&nXwuupg1m9^1xiROTI(U8%D`sr~Myl~`ex*ookbK5DIX zP6eoj)j;5E%kK=7iQa_VRg6m@oUjBMc;vjh_tpha=@4c9d6BgF0-{<8kWZkFMtW@G zr3VO^IYCJWqn_Ayhp`M|AqWp0hPB&Q;@FV^V;M*7Me189TvCiZ%bA(v$ZD&lU6COP zc9(^L4Z`v&nCROp;BdT7j+*I7qmo66B7Q&E+>8UO)9nIOE4ywG$=q$kd zHoF9U{3tCB3W8lvP))!i*M`UC05(w4`wEC4Jg)10^?wwW>fY^=O>S#SMhGtOxe{@h z3jJ-c3P6yA0pkVqn~`1d&zV>66dNagDymKW(3;J>_HH&7&066S1r(Udc%4+*HA?-4 zH!shD7V6tC4z?KdE;93r0Z!UftVZ0-&;ec! z7+k!G!PgHi)q@Hx_1z313g0;o5bQGvkw=0WDx3pztB|2%+a^p=4B*S~g#?^OrL66~ zZJ;sG98F0Tsy<5dfnSJgw~)`dZe}WWQMx_3Kz1gp3C{vGRi@3xcwx5{0N+0Hc13gvWFxm+ z>2EjAwaJQsLJD-13??)&dXpLyjeHfC4;@vCwShMGzP~90k{ns8l$RZZh=#}bNKu#$ zRj)3OJ=sJ;bau*tdfS|>elw9KlogazxIEjspv5yQe6iy158nP zd}@CY!$eef@bX-#qP$=j06jr5s*IJ55Upk(8p6tS#Ui9V3+Vh=>39PW+}PCHHlc|U zOx2cWYiFdUW8aOXJk0}}){lV7I|A@IP-xzL**^;|H95RDJofQ2 zRi1Gg+^F4_*hvAf$!jTA2kmzF3=+f@V584sU^o^H~K^U28W-r^;?#99-q-jOUw74MBT*fTgd3T|i zCQzN9@c~wM_@j`#=TB!0ndxLlmrC1ZMV6zF5MVXoDli8VGZvRNxi*~!?VqmeYtNU` zSmdhIT`<+h@n>@?YF%#7{L*0&*{D2ybad<$z1QOIA6l>V0tmQ&`GsX@zv9(l^wx@E zn_gv!?qR=)p_|w(E2O8yL<-!Zu4LXvZ~H=p@H+($8D;J#ky9(0i_xj-XH_lABh1>p$-j7d^p0So83=Db}Xhn_nAe1ab!8KOJz)Qg+jzoFmuRls8A6Fyu z8Ryo9)3+_`C@8yUNhqODqIkI#KWgf;H(Ktv3VpvYz*#g4YO4DP=8%;D9Y|jY-0r?F zzwDmN4&LcE=`nm^NkfzNhlo+RAeX*kmrjQ8q1ccV_pqz9+l~!0U2s#dp2nF{HiEX6 zh{(*@Y&x|`Q*m9X=_eeU}<8ix>^>ur)TPYU-dPW?wv$O_dxdc@ zrovin{Y!ZX-D`G-&Jm6cOd63urCUU$5KnK+?ruKQdKBbY5sxVDX&V28^5R-RQ~HJ% zt;%$xTYE&(tShvu;K{q%?dWr!2AQesMKN|yg6R)CwNCPR%;e*EIS1C97+JkGg-yBj zGCy2CWKYF5VQfaU10b6pHFq-~&)FbzR{c?}6<%KPT`(nuK%R!Zrz61&tdpNkK${dq zLlWA4xfJu;uPieh9>!-nKK0-!gymuGh({Kcp*ZO69FB#%xf%EYXmrw~=vOImET6JZ z_e^3!MNuha_bsj6AxNU}++Lhg(J3_xxi0v3n)E=TbZkvlQIY#bU_d~dWw~cD!`Bh^ zOK6TyWXMM9|K9a>KcXbABmYQL!Ruza;f)wyUv*=fakctRoz^SYxc2v2;^Whf7y?bk z%JoX-Vrn1WzwzQ>iR4gRxq_Y+eY(ls#g+|h9Iu!`R}%I8QXW$=U$uQHviA6ReN4dm z#%7FN=~FGsp$H~{$CCDcncQ$RGjESiX7c(ZGZID}$-p;Kx{&@!0->+Ay2e~cGpSMv zu54#*rIl_er;$&c+gbt@zC9KSixfeXl4!#)HWMnbqdg^m9_k&H z6?5n>HBa&XwC?-hMC6eC4T%9t#s3Yy4|49ii~SuXei(THJ^!^^{CYh-S}$}Wol~Dt z>o7l|d$YV!hbKMkHENE7QNRemo_VWh-w^S(xQ^yid+l9&M)xdlDCVrRD}J%gv4U4f zZvWyg8}7qIo0u@_&e-}|@A_H{7hGW+mLr-vEjB@DP zOCLVaU&hnZ=B2?#D~aageECu$JONOp2;`Azrxou7G(|ndTR5d->`o@fN;Bjd-W^Ta!-X?tlA62 z#KfWB*|J?fV+?V>0>2G{mia_y5@`WP7|1$*XS8g428!xA=yJ^AKl z?543mQ;`txkEWvSKTSpDi1odurX~uZY}IUqn0}JK|HwX8kYN=sdVC=9`?+(bmoAHC zJSP{WBv7*1@PhnzT|fR!FDozrjYIbc9ysi-A@e1IuUV`NateCyz3=Voi;0RdPT<{~ z@Zxiv&7sG(AP_@#4JUaN?6FpdyKAyfo~TGl)?l`_NE6Ma08#muI+8f9k zqk~M#kkASbi(@-@Qy?ezkKaFg__0dA&hy|P(XMXzx4c(gU&8thc2+Kb(Sy4V=-4Ul z7)cQ*?Vp}dse+&>ppPbUf2GfElq30xz>?m|`IwjOYyt}65VhW3m2x+HF{nzgDh0FxQ7G@n&Xcugt}rD7MFPvJZ(S-m~d1MF?jYH-~!U z@DdOxovGe=;9^qzwpmoPkHj8=6Qz<9Nsp4j;QC7;@u5Y3E)px2t*1-Qu7If*|+r;LGH-}OTTQqrLlii3&ZW8_Si&Aop z%N!bQmnZf3aZN{OXJ-#D&ZDOLeOgWqi^Su{Ek@-D%h!V6B_(I%C5l1n^}VM&RM+y0 z8kCO@cIx_-m+mJkn3|fhva`!EE2i{=^|I6Wk}yGdiNpmxp~sJM%&&1?nqIs2Q|jpH zUiVvGe)HjWL#UqVSWQnbewt7r6x#7;{*HuW_ju8KvhA!rkoT z-?|r3@!PHz`~GC-B$%uXA_lP5^Q{W&G_d2mY5j<+z=5bYpd$(TS^SwYzAk*fJcMm! zyT>TqA2E!L7QBjWigB3zS5D@faPboJW8SW~+1bs%?^(%q@Rea_MEc-(U+eV zc2(xDZQulzs)6s*hpaUm%qQ9r)gg_F@gu)HUR`QAkOymNkv26oW4nF(AutL9K@{w| zkAkR41I;=Q9>HDFz3fG`d8w(Xj#$dE64PpII3omtyRuNAU)MG#DsnJ9vpkR=r(=G6 zuu_!e;OtzM;&tM$vP0mm3XUH&e$utG7mSIBNXyJ*oTI>?juf(0OU*k~G|aMSy^dbX zE|L@E@t=krbGVZBBd4&4Vaph6+@CLW(k=%lV{88N1O$8`kg%;z!p3lpYfhc< z1|DbUz8~9MOq{`>8>-VK_weAra(i**uJ`9(2EYT2TXw3e?`u~$$7d<{=)8XYI%{mO zNGJQ^dV^wA_LGPP$b;&GBvBv9>Or_Mct&xSgS7NtT|#K#O6*8IvQ`mV?}H?|nP<>Y zf3oLPJ+`1x!3;gBO!3B`CT!2sI*bbSR_RNMMDN5w#aqk@VE92KNN=|)9GO1euBM7lfG11N|H64Ko<(m5a^(k0zU z*MP(j0}S80_1yn=)pO^06n%i1J!`M^#xLF}zR0t*;AcTjkjX98sQB}?z8N}x-B;j( zPaWNM&B5?!=)h^nk?W!*fyQQPIUEMjD%NLybQSn@y&_OR<$GDjZjOD^{u|6#@eSdO~=| z6r`nJY3_Re{r6l(T5Ru!ThFyDH1iE!o@05Z+pEH?{rH}b@udrYj&D*^P(-j5een9g zv2{dA8SS#wTT3cBp&%I=T5|Cp&E(nWqR zq^+J`)y>C|pCYckqMl z?--R&z7}jQ5^ZXKTwq-P8YAGmmi$GbD`dbK`&#=X)-COvBoQ36ii z;wL61LZYLyyF$jnQ)Y>OENCT9Q$0+t+5Z5(Uo$S#NGGC|snT}s$2pWtFi$M!L}PRZLP=O4Y%kc<=BY`MGnckd2B=`yS}&eWeyw42DOc>y||fpNCdN zL_||-YiL|`+@Zb0WXf4#3wg&*i>o zI9_BnAW3|Y>s!Biu-(L`6PSSl&DGV_rk0iv*7j?jo}Pq9j;1?c1PHhCIAZ&5D&PK9 z5!>UZ9V`|+9T$v>n`#J(Zc}_zD>o%Wkf1${49w^WA~SojGc{1yLI?jhVo)>T^nqV*$qhEgp(ha zrHr9OHSW!3FVx?$p%V3|Y;gAQ@K_iu%DVUZVot2EYq|YBiMYPXN+I|4);`=qGrRp# zf8On_Z&+;44%%fNLMR+;>6b>aJNyD&5&L8_VVV{_IVB@16GE28=ek-_!h)j}X7+n? zG}9=$(o8Xf+%|ldyxU&F*5e)JQwi60h&WYiwfky#SSv>@Pm5VKyQ3VtXwSfw+rIGwg_93Rs9HR@LD__iIwrx;wbE~Yv5zkH$aDXG2j_zCVFVqM(`b{cER*Og6X|5X3~6@irgs1Ky-Dz{(MT2Lis z=KuDIO7#>~W}qlOZpp#*7ArTma`gc|3Wu%ulw_3P1l4d?#fo&#*IHjG^fflN2iDfs zIGar~nRnOalkNrPejt$*=0P%_D2v)dmliTQc6M22)!go;Q0AT@Q&nN?hNlv&lFe40 z=Yfat>h>cz@elLX&3K|b_NUM9ukNmtA3QzA)l%CYx4^(h`za+XC}??8#M#-|WxuFz zV}ENblyB<;e&1y?;^4_wQqj4(gI!T5a>GhzEFtTEDKw^G;N&dmHidFzb7d^8WYk5m zE6p=ooW@n_ytbWuM&E%sl&c$qp1O*Xk~SHA{TK(#09w=4VWoFTy6+R}{FJMU?%lwxjX(x6>&iQo+cbL$9|9HnlY|VJ}9trkW2G z8$(Wyyz!_!jBQsPkH=fYoK)gHXZiEYJ_3T_sTNVJwEzuG=w*^hswpTTdhlR`PH$SNeHbza9QLA*T! z+J`p51Gm2QgY8=Qfbl0JzG9YWY-+6AgmuYBpSGz7e@Ur-s;irSJ!IKcxl+xp+}{z= zi_%36S)lI@m6$WD(!k$cF^%td!WcwwTyZQj)%+I%X%!VnMKa-a>dFO6E#|zj#oi1kfYNdttmED!%Z&QCFzlH0BRnH8;=JS9UTI#QUwAkLVwy3c=G!Vl_3`{nj*MbJew_x1Hn8pZUj3n7~@b#Rc|RLJ9jb6v5S@kJVb zH?@I*0m8`-A8vA+b*DFApFVv`bn+x8=B;JH{D-H9Q=L0IJBf%-GTQ|X7MX@8tWOB< zE^%t)J6R``a9fUQ%|_eiUgR-J|Mu4dZ5TcueOeBx|lPBqG<~x%2r8~k}ql)zFh^D8f$y;QkrMsV#;-zTzS03FJ z7q1u*-)npuiQCj9{DTWZk#>c>A{;WMv%k**Mky$r5K_ z1;M0nBU8B|?AfztQ;U;X+1V^HeAZC;W4>f6Glxd)w%e;i(Y}J-%GD@*lAwcj4R`un z^ZXe0^CGHDsAO+{zX$E=SY$r*pcA{m$Z%Z_atmZ(02jLP`@4niyAD)>GkEULs4AS_ zOk2dH0_n+GGI#K6cVm8eT!+Hffou-Z+bJbj5tpcQ!WhV zM12mVL=|HO$ty%e@X29p+L@@Tz05=nC8&R(*EkXWK4BE?YC62_N_O_F;?&@$Pj}G! zy_#@$^lq`z(cP65SuGuPJvgk+a<0TBLS%pP89~x|o2c&Ah+{R<>T+04HOLPZ7zI+h zPw6RcOF5{7iR>8oPy%o@tLmo!505wcGX7bwxM4Ph6M?*Y&i{ zC^8wy>|TT&tntlKE=z@VCLNvL(9m$EeGJZF7^`OH%1j%pbuMjd9B(_F83PN8wBv|<3DWIAO!v-(E*^*N z)7@BLMlta7m&J?N&ZI32rxdtsEh|n%bV2HrivKd#nPk7tTEYZqUcX1ogEWWb;fy#B zocwy#LDk=`$GT7b*5qjO+t6hNCVP%n>L|^8g-wg3da>)6*u|#_sbNbNk}5O_g-I0Iej%({`T!Ut@>@)Me~q! zQwK|}C%I>Ei&9#ea9bdmcbW9RPY}V))jq=&n-3M54Jc^aNO+dG>5QVr|ezvyZn%aw`@* zE?M|YSzrD)E_xoySeJclpLEe@naC$iA-qzjR5?zG!Hx+UEHuulSgsTU#EGR86O*x$ zu8JD<-i(fjFeWm6n|$l}(Z$#kXQj$b31^FqyY86}7IvDKj9e2Htyai6oXPnc`p~vC z5X>!?LPQm8kfAJoImuF`VJ^yWs6whN%R_b4hZ`Yc_^CpNg$UvmwsB2qw;t_m@#*4c zH-d|Fo+r+pqu?HR(!K z=6WroOcdN_rKe)cKEyH05RZFcYkmhI(%ldC9qW#g7(RZ?L`PSrL}^n;oI? zLW;CCu){~zXdUK*QXL(F}wV(<_;NkSTIx0KUz|4hO zW11(MOX|72d@<13D**t!wzlS{<+(E*ZVEkvMVz4BoLos?p?O~$8K2!;$K{eO2LF2j z7o$EyMI%^jsT!WEeKLmT+_`JD*QLKoQ}UWCwXe6WJ6U?-chZ^{m&nCX4C@}VTMLFn zmo%z9v=i2ePM(L@loZKh4myaZk-vG3O#c2T=1bdnoh3M~dg{HD!*0fSLf5F$DFVKe z6z3>ZvZfvhW9<{K{~Nl`&H=@Kv=*hRUN6d)_)yv2+EbmmtKXu88KC`Jw{BTr9+eCg88)B8jH0D>Hn8$0 z7!n#feP%*AWJ1IUa>}GpNO3Q?1c1Sg3(VI9B@N zJq~wm!#M%`Om&XpsZ9VSuZo)5yDwk95Sgq<$7FsEh)S2Dh<)^7;I{z_CZ~K2+Jjl} zpa!wY(#w-Pmo>%w(U)M{Y{RkPO%GG8!kvaUw2h4XYqUl>DMi2ZEU*PLc>vA6y%<43 zvF7iW`G%8|=f&BC1z0!rXbcJTr32Ui7zvNwd-w^HV~`h%{efr6&W z*`QiqnaS=DrIKD=3#HkJ5#Ap%wmH*Uzsxp9bz?r$dnkTooFU^nkwIT~Z}ExsmF$XTI=51MpYA^f|+ zl9l+U&yQw0u83r6mMqoW^c-^gY|8==z@z$6wVD0=f_yELj0{G$r();)?}n3JN*^eA z9K*w|JXt@IW822BdYFwSe!YDq<;9B^99xIFxqgeLNqltTuakuuFaFiIz)}0=0U@b+ zHisKsT6AzNBR^HzM_)YdfbhnhBWM1&$^CE-dx)KagVz5tg^z(65x#nL>+cCZ^Wdwh zXHY<{{qh*6fo{aRw~Ld5OkEEw27j>B)TEp|^mI4)?OU12bF7iB8E+Ho;$AssQ1ROU zl#!fLcAihE+ct^T=;&1c`g(Jrti9d^pF@WZlb^Yi{q4o^y3e08WfplBdMZjT@|daR zqMLD+4_RrG8AQi)GK*|yTRMjXTLyT4zJ;vQozqhM>}TY8zvr6n@}Ack^;C4(a)Q||?6E%J`o z6?hzA9I1p&JRSgDCz+*+3f8+|(wiM2;INbq4MB$OY@5koVTQQt;mqG=1f>UE3Ko`v zRrRk{l5V(j>$*3KQMdnWNQ>T`dl`u=eq-#QA8{L^xGe5-N|&awzC>&HFoEABAGz8r z%Lw6(U)B|5eM<@z4(1d-m(%74*%^Vuk%{D5-FAk8IwK>uhP^1!#Ceb9J_3u1Uj25B ztI+H+8+pe2`oGhrXJ*Q|tKWA-77R?v3S~e?r@9bfYAX9(i&*-EzU zDe5vGzxewUp*j!@C_*o0zcDAT=nhc#LT~H(Wm4D8B}OivQPv;hr>0UHj#3^&74F5Vo|veSMqT+f_Y0@EoVsi_mD- zn3c_e)2B}Zj+!6FwlBeH)YjcbCFmCu2Y3RIGbWvh-X_qUFPE;fX}hh~)wOKl*R!BD z#XTJKMR>|q+{DT6LI6<+2*?H==C|!mmC+2K6xA82a6~0r{IT+@v%{<4Dd$1AuTsiK z-)NHEjQGpGNmFGzJdGwndU-0hjR@ovPjmc+GZQqWDrfaVlo%?pM7=3+Aj`J|90T&E z=H=NK44?qoCh++?kWHK*gtHa(>3C|-#>{#+os>JLcum$J1ImB3vnw#9Iw2tiftu1< zW+{CVq}b+B$e+WD5Zn_BFM#dn8ZQw`a#d^%AM z-6_{*Y7`Sn?}OB@N`rATTbJ!OryAei&yQZM_F>|9)i7^$A4)zF-deULhnG;ZFg#uF z_U*ddXuaThfIqNtDdGO@K*^cgFOJHH9#pm{X_h$T0QTwT?k?=o3mB=kNsOT5>Kw(k zIMiF;o3%JyIVoA$b$xN~lYz$_Gg4BN(IWDkR3dKo1s&eib|6{-`dGIwIqpK(cL6V7 z$^*M~PuDSqcD^G)4BEBl2>D3Ptik~GvSDkqWwr8%fc;70sT0J+p*<#{Blz~B>UkXx zkD`Ct7BvY#3<3A)`br&yQwluU%Vc7u81Hsu+KEQpn+`R!^p~6#au}h6^hr z747~bWNb_vO(_n0+t%!M&k8yhl(V!=c*orQhjuUV+jO7Bls*|Q%>#TQ`?6T>n-PmX zKk{a5b)d+XP0Y-jWD3sI#ip!Wg*$it&ak-PbLGm_(06a-Yi z)y76A7jC68eT@|iDX@DPJ)mxgyS}%_H2?bFmg9jc%ER3rGuWDf$MBB1Y(%eA?>=y- z7*Se>Xai)iunS4cpyS-bi)qQU%*(I-B(*f{t3peePx=stjva54q<;{5Jz#%tue;b> z6Da0#L;o}lrr;hE<&{zQjb+!B#me<|-jT4kZ<9iqRmjS7-76f}Y#o<})%D!NeB*g+szDNJ~r=cb(|H8~ksvZh5@PfC@e?ANW+>5TuMEcIHT zhpPYiQDTms?6z#qnZK1DOZ{{CnHP+7V)M_nseV>1n3k4ZZ-~7o_vV=|$$Enh{KSU= z7A~$kVn?-2G6w?U;TKi@?-${_UIq6-L_zu5U_JWHc$3))0xz2r#3y>kpPr90IX5;q z!(T~%@+R?@t!F2@0sRFcp;akv<1^*LDBAbRl`Bn^CAD8WKqiq5)q>)PNznps(UM`k ziXjHT(q`P}#Z-c*GTRxGmUoNfm$bO?g00t2VduM4$2sCQ_#$8{qd9j3=YzU z3-;Ufu4C0DM+ubZRkC`fO(|9y56N{2Z+yGThBB9skSJdneJLp=#o84MFfx1fUa(58 z*1XLSmUlQFprjCE-4Gp+iXdBA5dw-jf)j@eFb%`^$Je%_cIMN5Z0bxQZ^Eie!Y-dq z#ze=M^l|t~&dki1OnxR#(TRft60YkpP^%rva^ps43bfklbHgs!Mbi&n%`!zTEz)S< zL_xtt{w0pS)b`){S2+u~xUwu&rd9k2Nmtw8ORKBYLqjMr`W}}Z`yI!{uX!M8`27K& zkg3c(;=aWfjsup0Tkqo$Dj}zioP7>XPVAm-yUFO*7%&F;hEgLo%{hk4U$fHCYDK=@5D8uI~oU&T9d74Ys^&^xCFXfM6mT%RwzNt+S($n!g>8#Yb>`E z6n_?|(By#vqq1&&&Clof4*)Um=4!b(9zL}aN%=UdZ**L&y5+XDCrpT z+{N$$!is9y^;B1$pa6e*l=-0e*vy9JSMQ+qzmKifW8|vABK6%S;vMamnN}P*I zP3VbKB} zn7aq0CMfo*Z7m843Y~?8rOsk)!JaJDcBP?0F(GpiINTS!8nl;L73^Q)OhMt%Uiz(rD4dZ`B4^lt0>lT}hHUuyb zuai~)@rD;1vEDVuO$Lrhk>3o9h{&k(KR@3oPTkpHit6msbx#?-a>)BNwQ%BjUAJ4R zq!p5|X?j7>Fb$GEp{tO9+l9BXJ@&DxifHS%*Gz_TwO=kiU3j(f`sX#j88KB)r8N5L zpfOJ`M|`X3UY}yaJ4&*S-9r@T{KdC^Z;S~DoIckE3SG(S$pkscXC=_7M&eiNaR#4H z*)McUfPhdXXRwHjf+C}(g#{qYCu5^%jVgU_=t7A`DpW9okzCGx3&3M7i_{~qrt5u$ zI1zV*S#UByWiCaU=5}rZCjBIc|A+}>nQ~08CK)AV7WAp&P+%%K-6>sLN96ilHHVKC zKB^}H8AFoNt;^G=AvuKWIx9O4dqgqzGl`1oavFJL!rsmh^Cem6E-QdSHtYTS>jP#x zO%3-|0p;0T8qJ{+bY!}5IpOwh_VMSwr#ib!#@H?lVI0u>hJK?i>oPq#nyNK5HTh~e zsobjC_Mtw+73KMwIo1HST9i$zptmPZrs8Nk_YzLmqI$j0YbTRn1Q~>`XhEHfz$RfR zwOq_lal|1PYNv5sm*p~}5fM>NPQ`z!crv(TsIIdNvs6svs-FV9MfT^P=^&PE8FcfiD5wiA5GsI+BA${iKW%(6e$ z){>9Y#J_4eWteO<*O9=7=`T|R3?{VovF)PBHF|neP-<|&=^!E{Rn1b#>B(1?0v?i? zi&n`4k8f&i>;#CY2#7fViY3d+%2I^^^*Kzs*PH`thTrVVCYN+P$LC!naRq8|D;6aBcekKSC3FSUYMvl_C>Ha0fWC4!eokn(_%$IQxf z4C3XYXuD7rNO=M`${+*;E_Fem3|vyA653huWM*Pgi1WmkHJif$?F5NQWk;wwG;rLQ z+Q5yex3W)$dUH~TpgKqRaqMEBYet11r`z`N{rBU?Fk{v2N{#|CaBuD;m3GQ6x2NFxqS=%B=lneK1l|J7K`F|$2^?Zri}|U{5ByzBvcxiWsZTA zY2}^vSWtJd)!d?gaH)^<W7sWeF@9n*<|LF7T!%d(Xb&AF!QpazJ9S8dHw}+R>RA!dXd{;zV2FOwx z^ci>KssmrV=yu;4K}oz#b7C<&Zulrq&943Lmo75{vr5$-`y3d+NuWFy8tSJ2qJZ0e zL9l#&CW`#b8HHT5zP`R)Bi+7mw|d=)G5LrFFD$eS@KW|5A=R4Q1&q-1KuVci zpuA(Kw)Uf8kw&xqJ%IN%zBIF&0@pY_cr2am{}*xpd7J5jmj(u!a2~HYX5Cng9ExXo zHdp04KGYCVh$w`8vuelu?rH=RixS-IKHGL-RIM+mam^kfZaO^9e*D8z1(YSU*rU!{ z!k}AI)p~2C394=T&b#N0@s-XR`38B#W&>BET2+=J0pf`n%L+%xNC>2=PJvZbRo@#+ zmks;Kbxa8kQ=*F;j{(f4e&5dab~*qPi0B8qFro>yU(Poj;gkn4qX1pI|-LH7&HQ~5@I zGndu)Upu?I748RK3r$vG8>jvtAf4|_@|7~5ZVJ_z`vDp#9m(SC8e)wn${V7RY;Rx! z=hTr#lOm^ooKplL-e7hbkUGcoXmi#kIoZhqWN#n5_sIlsF6xWD*x@SVPB*n!Sn2T) z#6JNP9dat+n_|!_8{M078CL`z)07p}v{2Tzq+9XCr{xB^a9o#}0$@?!<@45K6?Yohalc#`k0?2S>Z=p7L zIeVZCz_y?y@$zL;Q| zO5%*LM4Mj&QwR4djxnvVY>$SZ9{%TS{rLLf)4UbKY@lxF>OfjPFlwFXeb?x$H;eU6 zCC0DBxBhL6pAsv$5f(<1#6cTl)-8vs-i*xH1d$K}sF=i1mH8ZlViZS?9zC~XsSyfa z0g1K%?X%HK2;<6XBBuoGRHMFcPc`)^8+I)1b)7||duW|#Hx9K1{mg7`ylI!nc zT3`rJf-B`xt0z`*dNt3UMM9^CI8DaK|HLj0boq}!SD>W*>szrp*DPwIHqU8k4 z%0v*X9}kP>#>P&NvrBXP+sI7zvPLog^;bN{&_5|;{HUw`GmgjZsY`KT_ep%~H@R=$ zCbwz5PpB=T<`WlJzlq%VabRQl2g_XJGG^XX8xKSD@mN8cy?-9*e|l@9Ecm|2OscR( zG5+;oufh!0A~QN#3p0owQ{r@c9q}2W(2TG;f!q-q8XDn)y_1nv?M~)nRp|KGn3$Yn z2UstV8j81S7JkzQ(IbH}>HmQe!w96os#*Nb#HZ`=MPBpaH@s#4ysq+_tDIsHV{R!e z&SMhJWpwNetfHQT+SVzzeFEOl@%7v8B5(3rDO`y8Xl1LCEjT2g@Riv{i~jQr@Fo5> znFG2!nm<+V?X~Y8#H2*YUsg6)X`hzySnZI^WU5TmS}Y3z!#&umI{Pc8xXmcbDLVm~ z5}+8r&t~%C{Pm+~KB#|)h|0<)Tn|}oHRI>x5Gu**VOsRwpW?JDeMZS0QfIO{Jo^8y z;Q#*RRdCzDAlae}t72IKdqs}**C)P7JX1Inie3^obi1MW`qQ1jWz1L>Dq8jR8@lq? zf8X`UlY9RC|eTBaVKBoMB@REbC_}2fjF4p~iRUeBzorkUSBE zbIc+3G9@2UOa9++?(e_(VW<@M^FSCclcw0<#1?i@)!qqj1&=B?9?zbUyJ3$Ll2S0$ zihg+XeTV(?vj6?XYwg$Jyj~`sgJM!z%Ep*Xmw9bwrXU0A>Jk!{-@aUn3Z>)f{;^sA z`#Jve@4(;82__$n3m8p1Nt^c8PV#?I5H889OZ?!^F5i%TBfmFyvdsN|+&wSxU*}oe zB%g&H1Pv+#^560Y@&?I$|1VJe^L{hqE2)M;@s0KQ=0g(@cl9F~H*k4ti@jn#dQNth z|5=q^zRKnOwlRqsMaj}OWYd@(dx!#;O>-!7-gNdck3aJ_P-FSu+ewM!SKK1~(9((3 zrfs*>d{4ZD4WCdkijGCArtg(Phu2dMH}`+XERf4fcY|P0tzR%;H~!%dB;Efo3MfIk zfA;)2`RUUMP+o#&LK?8Jp`4#;`t;kEplsQGT%3Qd<*gY{Ajb5_5vI8&J)$a`cPvkD z{d*&YyrvUG&YS#u+mnHiSVMU%hSPy%X|O0cFDMF-!r`N*IzY}w4qCtk+uYI#A}T?L z>=ySbkoGQgCW%M%{glH!R?;bVp$c;$=qd2xx3Bq;(yF-oyRgEuy5G$m}pjZtJaxlf>Y{Jwk_{Tt4xc)-$uryY+QV z@Ak_sHA=(|^XCnISdrINVugDReS-jt2AhvL+$ve;*>{ zv&c=ChPBSi%i}ne)b9)wI&j_Z*vh8PfEM9_pu-YFsX54WE^-*Wkh(7~53L(m|J!Y_ zKiLT4ZTdHtRdDn)%$Ac!;mDZx-$xe0vwdC4?cm=(dm0%P04J|00o)E2t%zP1`mHom z6j}v9(%_IcZ{p$gGr`zO-g)J}-=|#Ng}5EN_>A*bc&sRi1WBW@D0bBmeX&8* z%4IH+=1UAY2n0lKg5Q4));P;F0A#_kcHldTY7o zJUdDp^mSz=?|((rn^-XUj1vM0$wf#4ke?IkCz>Sw$_0_&x|Qw&7qj;2!~Xcet_XJi zH9Qf`8k_jzRF3L>NuZRQBBB<{fZib&GDv@hqP{F;_2PdYubwkc#k|f>O;1OB24eNc z1cd)v+i$d!s5h6#s-Hb&!tv2GE?niO3hr`Xa=YJlCVb%R*%0&+JFN7wJJ2!$J&+q zF^&1aoN}t`ebE0t20b)z4CtZ+9djop z4|Wb;y>a6P2nf(ayUEGPh-?XwaseBQU}98MthJP$?|+H}M(nk-sP~usO9c7J9SpW( z78--D@~_MA&RVFULc(LimmK0{+)<{gn6LKIs6)g7mKlqQPQgFCDa?FM&s| zI5TlJBq->zZ*2lJ-Cz~S1mU$wNBq_D!bVzQi%tU{l^bN-}Lel(-=)5@ufS5Lmp} zRq2hCnI16}z-9e++G?6*2$#d&E<qtOcTXYh-?${I`9p@Cttqap_CfA@^BZXf^ZS2g(ofoXXekukumgTdtu0V8W z5iL%C`&tV^foMp;A_3IGgOj`TbaaemWS(9rw7|X}{xPG*t7*oH54yV6I?m4yzKRYN zNoOiNd*f)o@C>(s3=w%t! z{Ue(8F-Aa5hi0X`I8La`sqsg+*0tJtagwxT_yn@E9wf+8D#<$dc1>^LA4JrX_&(p- zJ4`^-kJ;q(Ao=uvZv7WO-1-T9S%Ph)IwQ;D=nMiclUfVM2ItRx927<#<^}6x>dNnW zVEEKYBlKi_plgM9fh6Fll>m&zM_LKQAQa%q&&bZ^nDSFVdBT6WAAR27h=~D_kx`uc zy_XU%%a_Y9bRX5s>rzrctdJem}HQOY=fxXz`(#M z{s#ggHvq6*xdX-|lcH^ybA~T8Hb^SaMqx;b>TkgoAGW(wJ$FkJ`bh&GyYYykc2ZGTUOIH zKcY646Xw740JItd02$zoTCBgtdJ^3H141LLj$*5Ro>Rs@u#6HJa!b=$0RLbUPc@#i z(+CMNuJWORVx(!lFdb~isgZibHv*_2Z*+Ar7qFIIzH;T`5G%3^B*0FZ2pr$JSE#a0 z4FFir7fjH=xCs_;=-bY+T@`yr*zooanUcTI3dy3Mf>tE1ntf0}K)x#byzkO9M&JqOd!vtV4;dlurk4kH%m+ z#LOG(vcSk~F_Gq08mG)CmD~eXUJ~+v#i{=9$J;|9e#!$@zIRl*^%5?v9*~A z!yBpIvlE_1Zx@EV#gyH1b2C62c)9s#(Ou2lcu`%6RA8qZ-A{c<+vWqE^=B~YNH@3fJ`sszK){L*WH%!w}({}Drb^+-SZg0I^ zrbO0dlGq(z=Zw_jwqs6{_Ta?=4*iznsC{_z)&;2C5aB1{MOH7U>Ii9kD^pUv+a_2J zObFSfCA8^MHEvA`>M5A2wOl3)l4wa3R#7AAUI5eaOQ27{jfvCDd!N_cuurRS1=g=9a*Qm9KuLlwt3*@+Y|oWhD~H$ z>HzM+n3=%gYO(M4d6i7uucI zRYC9ngERj3?=tw*>3>Z+1TdRbJq!tf{`cv#)w+EgBNtbplBsI0S_XuDMNFhAij~W3 z&JLW8Thi~+Mci?b3%(>&OgH-Upbq+O{&%SHKED87G08mvKAR~9ZQItOR1FuG@-TK? zouzU=7^q{7KB(CxoJ3fSeRg!W>3(Svn9_kLwy)VP8T06YDMgk2@~u-~L-mSe`RlKn z;3D*U{aOW~n7d?n=YxPi2Y8e=26tt3{5dIzbJbJu6KWTsHSP}J9Zg3<+4{W{4Bl8~OJN8huj z*FwX4T=T?Pwac!87>8*Zf%v`JW792-L1xY{2;h|$!48)RQ{3kp9NE1bn3xj#B#vra zcP&+TtZrZ|QPuL%9WLXAB5J0?qQ?-Jz=WjY^b`g@F-!T~hy`x|SVIh1C)<~Q+yOcJ zRolM^ym%=#;gi*%EfQDX6WS_yj6gz`??CooEMzwqnxa$T(rVJxfBn>mRc%0uj_77; z9uBsRbP;y3H^Y5*^KP!X?dOPKyCh1ZQLD)SBt1{t;cYP+FvV%O&EQ*zs;>Lp0d@6Y zT2WC^;q8v+AW?j)>cZ&H>_gJirm7oix8NwJ>%Nl$j=Z_)T$6K$e7z*QORXQt#0V(E z2dH(vJWEe2feik6MZw7hwNGFyx68&8P^gI$R)3I%>INFP?gX)8flD;No}>fWu~HlL zBf>Pn=&z6C z`}+i56A_*y2GfJrzrU2%ML#4xSJwTxa*L3lo8=td6`7*h(+|40LKkf2Gi%s;Js=cU zK*_!cgLG;m;m7cdV&z?Esr+wD*a|Ggi=qfhPXwL?5Ga_g#EiHxF zlDEO=m&&&%JX|KReJ9sA&av&?0>+u1hDP5;$h)0Z zt`X#AcEI~Bfb*R-9dbkaLDVP@H(W2cm1?4&`!3lyU%dnEMhD#_RhDM1+4lN@ngPOzHRY7Yn?9SV!0l=1y z6_|7v%VuMlx35sp(4b&gT1J&`br`_ z{<-$ILf;-s?lw*~ZBMxyy5U6$wR!GbezX!y$I!q(v$7`0jHWi-5{sB?!<2X0Fs<;!EuB= z9}*d;W1?0 zRw*+LV>X*eT3Z>`{*rMsvCvUxeb3qZ`t4e>|4^I0WAQ(d$$z%kKVO;ZL2QI=@x&)V ziVx-E1YE&u z0fUyH7xNq;v2sQ_Ht?h0^%=jP_A%W9?;L^OTPFpvZ0$v^^f`i*CN9@r7Nps(+l&P< zAjsZLr(uA{K9*Nnj|I_kypF)8X|9(VaZLx;`Jem@BV}M(nDq%lrY4BZ+P4P;9GI$n zdCZ62eMSgn7$cK$*amD}+J}+^92S%^i(>OZC#BfXAAjpP44L5#jJ>hmC5x>&w$l8# zAk?G^2=yxWU8DgrA1;)jal4+=ZDfymyzJhSsrZL6uqZjP@_fkbSF5IouQy}gynFZL zqWHagE_44Dcd=Mqbi>bYHGURqO7*xKZJ`d%EGxaC;1oq!59Lx%F|^#yrj9d|E7%{X?}+K{S$0g7pyHzG0&`K5o5x*^JpD4`;bL>ao!tt@eP<9(yt{3P%>HZJ zE9_nrjkrnE>ZCBtrDB>WrftidA+n2lM7Z9MVrlwZICoHTDxiurCp#5m9EZ-k;i6GHV zNlA$?0l(rU5-DRf8@STinJ6J#k?fG{dqpdZiRSpIuvb{ zYtzRPe)X$C9hH6&(_H>jqAG`w?9)Nyj=#IM;R(~N5D(kcK%wlVY!*c{2zy{Kp9z+4 z_7cnp)P!L&e*S{#&=}A85}uU4%r3gWkpzRu>_Hb7f*Xc91E~_UE9{h|3m$_gC+OMp zXXJlgNRM7JLt6UT+!zg9mjcu%`46>Wwgz<7W4N*UE<}Pi!H-nOj{-#!LE@~4wvXaL zEKdtxQv4GW6GLEj!bpXKF@A3cDcfl!#@yBea2t%;7Xs)9Fb=}_?MVO>OKCG;mJ@Ql z_A8xYpv4WU)9-|SXrq~3)O0gj9q~M?Re(9DHEIi;TdM!$<*XYR^N}Q?2*v|#hXDeJ z)`o~*@wS*)XmoTk$e!oHc;A;AKuIUTj@}C%EH+-OamB=%50w~WGnIpBK?K~)Uahsy zR(7W~szeU*7aZ_HmmU+DID8(X+!JK1%+~(a0zjM8o_l%E>fOO%$qlok?C1LBp-cfi zRCjOA5En$g!D4&U8XP1#B*@{~J!2X3>I(~%_7|TX(>mmbhfBs;{=l03 zRD<4r<&qIHOL0E0++##Qcx$P^WvB34Xrj|%9)5wD-E_3lIaLFkhDV91vu$0XlTvTo zAR;DKhH)t*Gy=(Bvyic!X<>947Pv%$-W>Go;ktGYz-vZDQgXvu9C4~OjE@~2O@j1Q z?y_ywWs8B4ii(q;Ww`8~bhax3nqo}j79Z!S^EeZp>ToJS(#Io5j?96Ez7rb!lO|H( zY|1T##^uw!nuZtd1(|{)iqaiQt<*tn$1&~9K26yOpbTGM;#as%EsCdxDXLCp2au+b z2pnMoF7=CiPu{eNuDEdryp#vqGl-6%T!tG1pNg_+yjYy0fuD^NaM?&7GP>xK0L%CU zyP!qL{V2~oOWL29l>eMVCE9 z*SqHzo}Fal!g!X0lnv$+A_M2jC;SqC8i~R0PrU^dR2VJ7t?PR5Yl1~g>gCodSap#o zp=iF;AB7CsjJPcbxD+T}-LJj-$E-;K*926C`$@+`#6h8$ zDy;%18ARNguU*wen8Vn^X?rrYH4oge)h0dVFvzFcwW?HCG@xRa4GbXf$BWxVqB3AE z0ppF!x4_%zvXNJh5DMYg^V&+P=e4Y!#^Sc#Y7{TUb%dvg?kAr(do2^1n!3$x7+Ul0 z<7?Xa34iw9M}eXV-UW6lAaiM)TE(V- z!Mn`2N|T7KvI*K=5!=n}_$OWL+XS+L0qNVlW#}*ylGh9Of-U$2nQa18ld1cHxfZmA zzkHTCfr^@OpRhr9hKh>Cvod{a%L&z=`G6Fo9OiqpGd*?B6&Y0=1d?pAvXM4GI(gRpZ4Cq9vt18Aoc?H6-7hDlXd%J`4s$Lk3^X zrtUQgaV#go-a60glqub;3f&uXevNWLune&*!-0{i`82lxoIDT!9{|UlcX+ivj%+s$ z8q=oWE?^N!ON*^IBe=df#xOx5F4U45M}w|QA*N{jIz-Hs9vYHjM94llsSEPgLkE?^ zEzkV9#7V#2Kcu!3%YATG&?YNzr$JJif;QP0?DYdTx|k<+R#Gae2d;um;h@F;OgIBt zNfw+ykNac_?oM66txxH(CuLBv0+(h0?o|TJJXUGn`K2<#y|>RKyL~RqI79;9pncBr zE-1kUlHt&nZOb2e8bfPQ(8}^=%8r!!fdD}_+54QeHzXZRPnwGX>oWjp(7Mu|L$M0- zyr@gg4_{0|&kbhu~l3CA!r2 zfhjQjW7=l(7VX;L@X}HSo6?{%cr|{o1?U0NQeR_0;|}GL;uUCC{gsU&%V0n2MejC# zGEFV}4m7r)ZW`NJAM>pe3j%2((4m7vO{XHTN_)*|E-gQFf2r7x zRVajMO?{bQ0s1*OX+uSv{ge1jJB$@R34Z;a3$L>?wpPgwo0flON8>a2A5ctkwPdDb zI1~rJ^Mi~SS`nX2uQ!GUnsB-AAxby!w4tEHBen0Kc9H!_q6YrOscr#_e$~yPJ&Cs& z841~7)H*wp&@RkU{e|DV>neE7ERLu4720}YU8AfbpI;9xMf59kg&)aWvAt^qtVYwqG5ueI5#1C;O!( z5G?=pp?QVr&10TnDC593q3A8*XKOVu_t-MZE>G!L*8NZZU1u}kcP8h%AuxgQl6z7Y zzh&z!lEOAA&#aphlo#bn3n4jxIa6yo>6QzE%v{*v3`5s7qs0sDqn(r>(a^=j^m7bkzG93Hy^*Xd>fz4GY~(hMB1l`To{ z{rlU^P*bRxAZ9RlX^weUz^?jKx73|Gvm_JYhGsEG81ZdM(wL7m3FEe@vw2~? z!8sgM01adrk}X=<&g`7rFl?asao!L!jKXrHSvZ`ho9}2 z`<(FCgNN24x5Yd_c=XplB(1l^XmX4;@y*POxlS+{u7y5I2quUmV_YwH%>vsVnR{|= zc>B4#xn_4Xf%iPDxO9E=eD}6EQQ7XYoO7<_&>-(%k-hu}+XaVWH|@Lz(wMpQ`*49UhO~sQwbmE5RHPRm4z75XE z&ER)P(m{m@SU!T!+qI7$KjJDVnn^&}LrqQ1kfxkob5fBFw7aN|jsQiRAgKIAcYE~u z%Fr;*hTRJq;@~aRP7a!UeiAT~?3WFW0SaTWvERvWOx?9N4SO*HK#LE9xHg+nw_O~; z^hlHpUL%@|u{5Bv20J1mpi_yYvx8TPp$*WuO@>Fw8U0qwnD~epJkz2Bg zkmN$4aOT2=RHz@IvqdtvYsrQC?g=`{vd~^&YKf=r_L)iW*!XvA^G~~2#Fki+kCL3H_oBg zW1q#s$rC4{D7Z>wD9$!;C8F5$hy0mAt+q5MN}5+eN_s;=*j&p73`^hYU9aUm4(<(APuSMnpmEn_+bos>fnTWxYIgx0+1PpVq^>a4)=|T*uQ|)(m0R z8MO>f(w`hrl$VoJhA4G)y+0eKdwGz-G!k&&iT&;ja>&qYy3YZpTwHCGHHnRnzmogO zQkA8)xfuzvnD$TC_3G^YK}%`6bhg%NZ$Y1?eJGHqKY>x}$F5S+tvdWK$`m5%XdX(y zv#BFVNgB*qEtcA$D+KyG(#r-LuqXGWATW@SjI&(oPzG%lqSKkdeU;i7>$8AW7=!*f zsYS5BLxP)To=K80V6yu!U%w3f5kfo3Yv`A8Lwx*f&4)_Uj+zT@ z<8UHDrf?TZ!_WfUHcb2I%e_0sBz-*CgaKLKblp=hkacGg(6%lNM0E#(_)SyD`8q)v z*a}9OiBoiS zzJ#>Tm-OrZ7^(ZtY&TyYz1XG#IF(l_r%q~8K zbe=)pdIemXe>!V}HzcgcHi$+b^;H|C0f5G3xtuK5u<<;|FVx_)W=T&1U>%}F>`A6O ziTA>V;>w^!{C^)ouRzA1-Kf;6wJT8}$<6Ao1!#W}1^(|Z{rql8{_eHCw578ahYQZ+ zHi6X`3rk$j@)rg-)hM=)tXDZW((W4Va)_2rw**7+(Jpu?UM-}_xGS|VHqJm@<54uU zS29+FnQDIL;>xHg1V_90m5_b>agg@uWgI&Gs&M

Kc!`TfesZkEw33-@CEdfQcDO zd=SI^s`Jdo=esg8cH5h)KlF%TM<2iPj7aD#?%p;K+PrI$G~ueX8~`D=y{EZ3<1XQX z*i+SJv9^3xgBvT1!u}o;iqs9DumlsRB(yBR=z<%o?8d zHbva`)5uR1ulw>)DWKoH%*-5HV^7Nag_p__0Or%I58hL|zc*b;*%>2E2`}X67==z#u4$+?13{#>!e6L*IyGGbCicnCe3(oD8m3 z$zV7p-NZ3(e^CH9`A9$tF2{iSElDlg@I0`@0qFxjf4i+^T@tYxz=JxlG=iG(kII@P zTI}IsenrxX!efH0>9VXMBtXY62u1-t3Et0bfP7cs5Choar*l8r_15dZzt!O~?t{k- zxFT4=Jv@Fmv>^j6P3m9&k5Ao2CZ+*W=;UY@d-ZAz=KE072!i1^4-V*lfitN*6V$e7 zBYemvSE~7D^8hAk@kp5%6>Yd*gyC-gK{B#nlSn_mthBX*It)XrX`38F_|XE9jk>M= zE9__3z|1Lk@>R8qsk&)M(`9cSov@OsYEw7*2>Ns5KAVGR=^F{J>N-dT{dO}?9Qids zk2<~fHvRATo>x2ZJ4viW(=JY`#~+LrA;q?(-X1w}NDt-$ly13m1~(zf^l1Vz9!&d5s!ro zfyB&#=FW6j1}2CZpa%)?5{#?oq>TavSip8a)P%vI$U~UHNRBP`{q&E&s)GG*%%@ME z$VC$z7W$H59leV9nJWJAcs;WHbgj*bNZuZh6L#F~H1tkS7>=$i890pS_zfs=#iQ-^ zynGtZ${uwd-%zO+VuX_poA&n~@#`l=y{^N%Z!8Lzd9N;yZEZ3;TMAe9x>DXzmHo~2 zAoh6cIh$kfyt2XF6VdG()W_;r=&K`wI3qm3NM}$pG zRt_lLlRBA^#@rkYkYnM1g?x*G(~x5G=Q*4`PS$(mW%CkJZRl-HLk@j)P*BiILSlM) z7p-N9E?e{pO3Ax)lctUY8Zs*5~WV4OF zuEK@+>qYvvl5BXk3P2|L?@tlZY52CStx`bf*H@n5Ih)~n8k@V~JLOqg%F3}z?t5fH z+W|@!ygS?a=bf}iZpt)rG-1{Idgpa?f46al8iuymAIAiAM_*dZB&67^Rp_vO&_%AKYGq#=|i|$N$L~`mj zHGXsUbN-`9!n_eKO?a^dYFfHRzHjj48yn>lDSO6f3;yx2tbXU|KO$hFg|i^P)b>Ke zguAURHiwtSWM;vA!%08ZSwA+ep2se+4JO8P-I%JEK`zbo?w5#kh>DlpEXllCeC3II z(&TQw!?dsEQVaFe`}PT?!wfOK|J{MZPSo@^_f=GamBsCK%g-vh)<_mW-C?YyTJU{f zsIc0)$1kAg!dk^<27e(jB_jiwtYsNQeO6HU;ln^>@!CA$!P}o*Wy|s^B=wjH-H(u; z=MKF(baD;fkIHA^v#{i|dYoiXoRlPc?AWoo+OL$#*|y6q>I60RpEIe@N80$F%LxaRFiO7Q#NA<#>7H=BCGp3{8--ZfWf3YZ8xwK7m1J4dg zQVzUoP>%zyZp|F-@SIDjHgQW@yshWD9-rIuHILNQ)zvpyZO;zSvMlMmY@aT9bD>chQpE_Ymu)35XDIxP{qu8=|N}H zB(m1<@<(5;Yw%bRq@elcOZTGE3>vg*>4FE%Yg(dRz!LvC7Ubi|0!qwKM!0UXXkTD} zs*VQKai%ELLl+m9Tf~=`Jmw^w9w;|LY)fHUOV9#DvDhE;jgT;%dEl@<-THFBXJ}X$ zQyuKqQxWophK5pV8fIa{PaM^54P)N*6K&w?&1=ywsO7p~pPZc9p|gvpCPj#HA+boL zI{`bi{(@n*#Tqm6OaTt?D_1xd^PD4VG(|=Kxr!u^a`Srn@y!W&T3AKi;ErWF_K!x> zR?eVjJBqvY#59dmDOiLoE=<(aFfsF*b`4jl3A4TGT#pykIZf)`NPOMFdpz3=6e^z?g9 zoSmH?hORbnUCvAkGvYB+5b#zzVbCz9^}zaT8NM@P1}x>W0x)Y$Nw}8wQ)M9rb1qA5_efrlLk);PRG&J& zIK%RkfS)YcH|+Fv#wsoOJl33Htv+CgmuidU_F;;JbmNY}>W zqw>9{eZ|w`L^r27vuQRh~UK?{Q#VH-+}fVuqHwnJC5Q3(AqMGlvdL$JEPTQiW%$zaBn8JpKe-)xdCakZW+-&}lmOAmJD-GazL^SXefpar17 zuynZ1c9@)GeBh<68gQQ>-emSwCOJ*C=~8E*b7;IBpT#W4#aYJr8Fb zj&iyn*?pR>1Gzy9kep0YV)ja7@<{g3I=idX^1UyQ zoF=7RysZLnrWcDB1G?uFCf4yK%VXG%p2SGvdcx}VQ)_Hc zaPV9{X4^Y1H8xg?1WR}Di(04a>_t=7`+r^toKReJ09d)*Tsv#Z|- z(Q%0ruoe$2EeLC#{&qMwj9)_iCH+pS(4eFHTdja@~8`|4-ynetS&!YDWm-~#D zsi}ERzz{y9SqjBt-^VL2_?8*H8L{;JsGG5wSv$ktmIuA__=sxpN!tdn!qTXtCoH++ zmL(~xj&`ZR4Hs<=F0t?#>R9g{qMlxFPm5;uqQ)dBR< zo2+%E^gaDC*1Ii-s5Kw6X~458b00#Rn!ej5X2Ll*SFeNjC#3^yn28`H8wUr6X>o*L zx2>%$Zky|AxniEhfDn2%NFxU@wrV1;fB*=ObXupxHr0r$)Z$1}&LkUHU|=JDwE*-X z(cllx+EIOsf$+%y%L|@Di^c1c{w&s5FmFMn4I#^aNd~b5DKM~Q ztp$6+WFxir3LUQuL=zBvlf*!{hU{jN^O}F3B>!Yn>0JtHP8nl&%&xbkDQ?y`IXaqA zb)$xfat_=jyB@0O!~uIu?e^`eDd0L4Q+H0La!n8kuI4I#Js`umj1>3p+wj%RE5f1f&YpRnt7hHSft`v+d_$;>k>?$t` zZxc$mWD#zjQI{0Aa3Ox24+v?*S&bDv0)k}I#eV=kD#OWA&}SA}`tC!4_!eOj+Gi8X*%uQePV>O@kTQRsLwC+US3mrh1lRZ` z!PSMlI&EyEXBRZ0(~1N(rTBvfJef`4ti;zM-IDi;$25YX{2_UsrN(R(#)xs1EdP$) zrOLhG`wpzfFziVU zZQ;A0M_=-Xu_c#?)1F~Z@_;Cr!e`w!KE`jAXxDO)xZ#NDrWV_CeyHO3`glluk~i2w zkG(#YxA4PX==<)Tp2zvgXFKL>IRlf1ACt0(@?%L zS}-Jhws}{!7Vdh%yIW}rPmNh;iHl2^8S)h~Wpkejrvig5sw11-*oJW7?o)!$RnC|z zn#*wPAG~&yxC|u=Dw2>25z@qYo)j;OmZ8N}YOZEB@1TfQxAkAafWZhTocP1EGq z2~bYNKq@=!)KIGwKV3XTh#1486>i^t72&$*4i15PEFFtbn`(n|jg4|QuRVH19|M^Tqf$zxJ7)VPmcmO|@*IMc|WvJj&3PP)IL z6XCs#v3x!F-_ynD)(bWdJzZ!+dV&^ zf}sSnM&f&gWjNTraIE@NZ|zW*MZv*-kK!!zS^hKE^nM&A&KrsU{3#EWl?6?0ah)n~ zDr_ff!*oxmZLZ$&rB5SG8^!EQaOJG0<&Snnf_}RUpPK=cBx!GN@1Sbvp(^n;ikUTh z`U-fm_)LUrSq!vnge{EDy`BW&aoGm^Wq|FhNoUJ|MZVbHHkUC}3iyE~w-j)GW9>x3 z-jBZ*9;%F07Fuh_Rt9V$IeFuLI>6h>$ytiGtEzkmHTCuF3=BIT-H0hVEEhsA%2U9Cq){+oV%K-vEZ)8$|pAepG1wjor|VP>@k~cgf0^Z2&u|6ERn@UO#L2p+8g&{THJo8my4WkI_cnkb@R!|6S#r~LsG{Kwa}*DENv zrkNOs#$RU z@*%qF?wQuxC0>A=Td^m+(*8cRmXcm}y&ya|j3Xc0`GV(Hit(V3yKhfkqDnw4A+K>x zi8p#NnBOE+&dtUBiT4i2!XD$LL9+c9WjVAXgXRU3r}!^){E*D`!EP%kxrLBF4eh1oD_+rM6?ul<~!NP ziWOe$^q)UUr5|U%_TDFLcJ{m|W|>7k#6)$|1elgr*x5y1XYWU>8v_1-!{N#opf6H5 zutWQci!kg|NsvHz=v6HeX1UYEt0Ucex393vn4g4k*DiAQ3TbEAT^bb)3$oa;&?&nS zSzBQpll=Ae8iT4nn=3xZOZiiH&t}JYT3Vdv&|bQ5!s~+ryW3osg@ua~5?be!A3ZXK z&K*=+%JvGm0GW`sJ)i>6^OdzJEZU_Lf+LkfgWn8zYRO6IK|||TL46M%aV-ICN{VO2 zMj`Dm#F^3wI!VCdU3C&UFPNze?7mhYrz>(#t{8(?WYD+7p`DF)2__fbZ7(JWeaBMK-G@$|}e(9n7QoS=z&%m)3`tJ2vqS=IYX>N*U;IT9ag zRlYxYUek{X^Y6guAr@UwpbG3>*_m^YasvUY6~J;up?iW*`OzeV$J12Xb&Sz zV{iUZD3HL+$KmTr_f){dEdDWf(g(NInYRL@ksePV9NgLD9Q2fqH@fQ#vZ8_{@|*PFqw8-+fAP@J3K z@WiDEy^$?juiwrXWmQv+HiKaG3iat`gJ0brMHa$co=zKJEEJW_zLL$drg8OuD zyAlHbc`AYsY2XTHNnQPz@Et)n($NBq#oK^t?s1O9?BS=ZPba?YSSAqhn;3>;7Ms8r zV<=gziwJA6e*Z7)FTv;#|C!*U!DDA>c$)lE_9V7_>F zF#?gAN1xQ5Po1iym$GKX57lx!pT5kqVMd+;nvdN}JALywf_?RxS?=!Rv@!m-i6*A) z0udT9k?3LOTb_t0O4Y2fM7P!1$zjn1;ca4@Ww3ehP(b?UJJbJ~H0Ghi87vRHLs8LV z2&>f8)VoM=VouyV3|d%5P0P(8*z&L9(uMjWoXE|MwnhAam1ZNA{vJmRLz zSw6N8!Fnokr~|2koxg0JD5_r*kTHgN+~e0v&pj?c#k9q^x-VmB8@JtSPo@ajR!hrh zd&XeT756J`R}qMT_`E^|ac34jUCC(rr>>qp9Rdjs|GGg);q#qbXB3@FwX1Qy^rFea zOFMa?=V@OUfFtt@h7#^W*#}&LNm&3O@6WN6;_LS*n+}(T{6fo4_0#j2=7?EUQHcVu zNc-=?;tNl>`$eIPbN=-#NOkf{?cO3Lb!FqL-?1##B8XwBmC2@_KEXZiOV4{o-7xsb$eOVHJ7b`)TP`g+C_}KLOY3ganyA)iM@ZQ0=I?r_mn2bA zq1Hn1-L9@mV*i2dqvqU*b2>Axechz?+}0leB*$x73hgCPLIY3HL{L~R8p~g=2P;ty zO!Ngil@7UR>gq;|q36=dVLXTC7s%N*hTpenfHAxbKwd{J+$XN;e5f9qDW{WlpEkE^v*W9Q{LRmLh3l&(cDen8hw*ifChH0TG5{LSn@Y2X9;>k}%j#(r#O zHhxJp9*mWK?|sCjn@@6gs>!bk9lhXUy~fCwhySwPb6wa-OU=Ha_qULb%xV29aW`TJ ztgVcQi=B7dJ1P{&78vBKM59x$ylwjP0{;3HemnIpSivlry&eTB7p0_+=FzvJ6E_Fk^-ifHQL2 zUs+Qz6~bdzC67?qe-c*_ch?%94?WU#HkSBQ^+AnGLmwpBLRD7#56A_&wi}(cG?&ZClj-}jYN*T9G#u{ z7eU))pSxLwf+iSG?^Wb0UJK#mK04vPa=b7r`?O`)cWZ>lY1xN*8lO137#>siqhhIMS@wVx$9b3xp>{(QSlH)!uc;nUpW>_ z5D3w+QgmS6#6qxrHtph(56NK#T>ris2}@{yN)>9pz0C)U$NAgY+UjP#ySV7FEa){cJv(U5ttWj&vP4B^4i&iioa zJVA(t9>#HBmwXkwaqV}E))UEd8C-60MqPB7k-bRno9TzuB z0F)bY#E$Q8KibtLC741=0^QSvnWO`Vi~na}`|Afvc=TSPp*bQ@Up(R9?1)mq1|4Jj zr2M4q(%lDx9Qmx>J?3+RwTN%l@l|t`a`99@@8i(4F& z!&dJwmY*8%etZNm9p3lUE$zmA6hU=4sJ)Z+MWbl_OzD4`2`$;+^khHNUswcu?uZD` z`X7-WPk|Trw~xnI2!=0-Dqkx^?FxRed7S3Pr9&TWns{v912v7OkURU7rFrNAffDg1 zxva`e!!7j*)#xFDNcOG=s<3xO-f#-lbgfVF!{;4pByIUAM9Wo_h;qtgUX82tG@9|- z#B$lNREdYk-~NYxemi~TUoCzF-Q8Mq`Q*5(0>~!%->?k>*R1t&uphx7-g@RVQ`!gv#6KM*U3YO z=^IOd4|{{KhN(g&*UufFt)i&;AG9ow^wstCKQq8}QJ5t{5>3Q4#06yv(Nx>W{krC9 z&RxUWWtrDkN8;1_MX1nwD#@-}l%_5#?hhWVE*id$PAz0m9&F674Gam?WQfP0A(Ok_ znJabV0yhVy7r*uXWRca5glwDa3-a8_$GN%BPbM6YMU_ocowdA?8viJ=v7^sjR`DWr z&9#oSx00xX@(78H92=cDR3Xhq@y8RTF6H?@Cxr|xfSYQoBpyyOex-^ZhG-M zIYOe%G_)@VXN;C|cKuC@pup(o+c%(Msl#yOOFblPvM@qXj48p>@Ne%+u(b!RGqC({ zy#-_h(?typM-?LZTNNke7+NH=f2Pf!fBSD!m`l%v#oa8ps7BD_wVd0gfI)My+-j_S z`*$IywgAzkD}g(^mIEjFJNn8n2#yS*Rvhwp0yAUp#J$;yug^w-|d zG|-UmSe$z&6C`M26>TAcioJBK25GA8KhCpI=?4K(uXFc`e2-@0^2H*n7i-yeBL&Z7 zeFrATB7QyDe|)`|`#*x;xMk2$U;P6A#frf2w4G-3!y63Sv7y@PQ9o`s*SN1+wGj)+ z0>FruEbxzGdI0G+V>yIGcrfA8#IQE3U&(LGgfnP3u+j*QuVH=H8;Xc|6Y)a7d){U9 zBhwdH#j#Sh`#oo;OS0^WXUdgrY@FknklhV}m}0oJn3lys*-y#t7?2rOIaBGym#34i z-5b=8(fTd=Z*SqxyYMsp7nK1qza7TxcrUovn))o;W|1%3RH$$AM5%$P7BL;zfDf?g zC`Ebs4u?B;;zeSr*--ONagjE!xeY(FRCX$u9FOlU18;`v*--4?UqILp!-G6byzJGD zM}1wJ!oLN661k=$NuCAm2c|-2S$_e!bEgo)Jv<6ZSL^4Oj*Y0870d5+wC!+ebz`4? z*o1`gKcDIU%vhce5v%QHFXSnX6Rw<4oTSMPbpA2#vxr^|TKHf&L?6P@$c7S{tY=ghnYy;i#qhdDQ24o{~f%4KIB+co1w0M3MxMf?r`|;2_BhV$=XK)_r?iYEDhOY^Fvk@Z)BF4{omCPz!~VJrtIyXJ zl#m$%X_)k$_(}fAiLc#x^c_S?iw=8M-!=s!pWqZO4)S08caOnx^S~Xk+CUbXTCNIo z!zZ`eA?A>>Eql^In1AQ+@k3QL*jicAjMoiOa1Y6!d9N*^O-N zJ85ul%0S?HMx+%)rDKm4Q*t;278aFsyH}Iq4wZ{LP8iLIi8I>${VD(Py`tBjh54{x z+vM7u0FX#V8woL8EBL!a*NJ-z>8aiMTsPEc=p8$Xe*GKBB3ZX*SybZ3<$$*)HDwvN z(lhO;)iN<8d<6(442f=qRa+W96rCC;0ovpKYI4|3uf? zH6|w}(=1(=GV?!dttMGK^y^BQQ`+(yz8 z9!MXTyinqXfg)2v^~-(X?6KgxwewE>ocBk+tQe--MD`&BFFgu;h&|kPsLILQPSz(&a#2OAkTqu;|3fsIIJMj5aVfIZkaP`WQ@g(& zCh4n3KqjHAXe1-od}?BH+13mp5jnE9t*kQin9VjWP9tyoF1VDzkOkqtD#vweGfL&z zIb?=RGAgV&g9S@DQ_1s#xg#AoSkRKr380C$ABnD?(n_tUcpeM6C+Xu+rq{bjPM2uX zW~r@{;GWT+zO5u}JQy$n#v+0aAFmPVZc3)TE3FVjTvWqxY&?^^mtNFs(#2I(H2PBT z)4QYO_vP&Q$4YGY>Pk7-l^5)@k*0}+#XgN4#Htu*(Ek>jt(jEGxvfN| z=AK8lpgr<3!?*wR?CdPa%YxYr;y)PnMhI9&kW*0jj->v)vR_|V$%~g#>do`an=tcF zsm_iwRI2Sm_>~ml4|AxlhsHHn8K~ufTdR%O$Dj8z&HvYtkM7By+n9bPAn+qquP(qO zZ*P(ND2?t_R+O-MSRLChRB%`@->SWQIMTO+Z-%R$e=c&#ymS6~T+x$083hJK|DtjQ zOV=7d6Ncye5Yu9<`#twRxqVeXPUC>*1u{YpGi7P!%>9mJ!yc&$X<5f&yQWXO4?O4O z3SM?kA&WQ=QFDMQ)Md#Ls-N5UxeJ(cZYs&8d7XCFxjwsG_fX@squZlzGS*$Lz+m~~G zXJj3~f0#%=NXT(o$RIvr@H`l=2WjMm&>LA&g*2_=Vsspu6>s_7OSX<6B(5y{nGukJ z^+hdGN+55;VZbTLt)G*x;1}%r56tz~y?C7YYbwR`3mvQzc7#_jPC!;sRC0vVTV!VD(zVAh?tb(7OCuk`NPvW#B3 zZX)6KUG&?vqlL#L2bnGIYBQ84{<^b&eEq)&A4Tb6trQZ9{X%vqL|0c5-krwlg;^JE z28!Wd_ju7-43l&_jIvMctm;SXYLim6s}j6SyMlCC`8;e6Uz~)tyD(QhswB6Dsvq## zW7Mnq7j5?E6P8H+zeso8zA>#&qw_IA)!50;M9z&&_m*}f20Qsn3g>aU`nmT;bWi-5 zqi(3zNa0;$&W0RG7C#49A$fl z4Q~I&D2#G*8{Cf&H6WIs&vjzTsJyp};=ahL1dTcr0atLWR?L#i{ES%!YUk*`;F)hU z?Ex<+&3}(;V~!z|P3`+^+}GE)zT)eCmoy!anZ%qVDGruK)S`tu0IvaD5&Ss7Q7$kv zEOcCe2sq6{pvgn~J_amiY8M=;mr}`gfbrjsblYK)g(>&=u@~-bAlh?vdW0zY3mukV z$i7kcd#d5%z*wIs#)SIxEQvpAi&GCKJG_IGfHb6xdo}L_&y1w zn%W}+4GULG^5PC8q!76}`7gNcVULwJ|FN|H+7h2lmDDT4L`C18R@Z2{AUgwqzXpKa zek7ut#ll*a^O_wfvK`GL1-}C9ws++9F`C!WSc_tg=#6-jpEK%V;3q$-FA+?eOhIJ; z&5zs;*U{PpUgMUi*vU`ty8W3s(5`A7)qhG+2E+_id?ZCcUe}tusbT`$Cz8$!EJ?=l zYN2ZXe8YDA*;TPb8Xm(~eeGJ3_1*5;l60<7Gd=KHboOS0@oOO1yr_R%F#WF~LF?Bw zeEZ0bzZySwh?>ua=5>SMh@>_KLu>RaGxa!KKFZ-~u$iZLZL54w$~))xy-|W{2CyQm&j8%by4SUyl~PnSKP6Pe~HV$0E{ZHc1HAp#;rRMQ7*2 zU3@;~aDKD!l88d6$PJ6)tSmGLCw1b-4C8Td-UtinJb_PnKKi$pf|E~yO&1$ zQ`(2BURh6tWw*14_4oL*Iswv8bb@W$n8efjQO_jf?GL|>e)By#gh@UiY-FCPJ%Y2L zzMgml;UZhJGq z66n^hdmdv*e|CV3e$6`EWv!6~W+HpJ5sHU^lcg&vN+;V^2&UY?$ptZWsei0Ay_;&V zrcDy~Q@c7DB8nh#|9S)x47Q2<>cjURKP>%zHfIg55E9Q;IUP|&{Po9HPc^bJ>X6E# z=#P|1#z{;**IKSD)Wg@q+2j9Iye@c{09EtigP}e{CDR{oY5lX>`cE1}SZswzU6ZMJ zPtVLKOYQBr%i`};ek#7#+A@0tXYIbwWc|;PK1YOXs-0nVbmcvyd_U zFZvVvy&7aXoVk(LtZxZ49%L<&B%_=KEk3L>Al;r9k9LgEfgvn|aotVe1)rl^=_22| zv%RL52ixjy@DK1ZOL>&4s>=p?e(g3Q%kLZ<3eZDusb!7)Lh@U{`0h3m$WZhjw||V<6t1_@TD17OuQSOm zthsLV8i8?OH?#y1cGQ1X(jH=y_fa2XT;8&GPT|7jY)=dRs(C%O&RPf=Yb*voFQ?o$ zvLqN+tVK#~{MV|AE1ikIj@Gg&+6R;4?>>K|2N4ZdPqpmcGwidtd#d@T@lz&F5AtHZ z#*Qy3_>bcVA1x>lGJ~#!E2w$}vdy^aR+x&qpXz()2?rlwpv)|-_R<&nw#}KqUfpN#I5Gp836-ec>^R>E2s45Cr z$dWK0`A*w{g9!iz~|Dv&!5&m4ef61spV;L26GnAXT=h1NBh^B z+QGp991htHjHLJOsk^y70|j|H1!uv1SFsgYG@;3oWGz%)O?jaw14G1R;2Vs@!`ilm zV2qFfe(V5cVo{O0|GKaHjuy$}8u(c*4o?t8hyHK&iN&jkkS zboHMh6Wk}r@3MR453usv*GA72gWzLlwDt4nAEX*Amasehfzgm@54xyo>x>I4+$ zM28jMno8GK*!s;LSUsGE!}k+2=Ug3%guloIAMiA{I4~l5)+lbSBl`9c94$)wo?(5n zI$(}irOw26ue;u?wzx+ge5q5P-PV6z%bzxTuV6Ff@*exj>e4T-iodN_K?_mBJdde~ z$q@_iKP1!dK_~IQgTX^c%vKc_E(wWN&o+ug9ig2?o#j+L_kiDWcY=-1c4f56)XXe9 zpfOThJWkKywyf-1I3`@=)owc%f*NR0lINXW6_tu=9)s&VMwIt7V!-=PAS8-XP3zIg zwUyzl+edbWW&NKzTwVL4{9%MQ|J<7_M&zv8+1=xvQ6=o&bKQke`Cunm-L3kK7U7e2 zHUo*`e`b4cJl6I1x*{9O={J6U%-fQZixn0kfY13^WquGYkIZ;EI(QIKJqXRUyoo%O z@l5$007k5rPS+kDcP764k5K5_%534Ds8Mg(^r)AM@uj2*AS^kpcm-vFxZW5P3v6_b ztrdrY7J%K_2yDIppWcT&_k=mV%!5gR=(W?XOm}=bdcNwtz=FSGYZr%xi`5-Ok^H!T z(;qjsu8yrYL)BMO+wuLL_k_!4O2OuX#IKj|S&*MDy*;AtPMEaN3x@p|>>=^bJn@n5 z{VsYkcP|DpZ~t5Y(CP|<{FT>HN-b;OaYaJuj{uYHR%s_o6V71LVi_qrR_Rx5Rcm&M z{>%sSi>DD6otJJ?T$n9j*4Ra#GL$p9*}scAusXW875?9nU|h$%6mjb zO%21aZ-$ByN$wAhSV4eJAh36VHXk_EaUcz)I&(I;pg;yFTo1vTAm5uRJvy2+V7g^D zi@US4yVfsybo7aqkYlAiw0#S!~RhZkAN@f%w1UAV^z{`pQpOS4@ zP}M#QeW%X8v;~(sHYO&fxG{%^V7eovrlhBI3l05e7`{K11T*q%M}#o#dU8fqW4XUx(tQ?(TJ2#mY$t=cRHck z)_D|3Z&Ndcd^+++C-AfCS#uF%4f^vtC!AcbJQcO{9ci0?Kx8+#S9$4f*e*y;JoHjH z<%az}rdT@^DGd8*tx6hW;?aUq!(UszF5Fsdy6_;iP#k?BI z3X()O5eWho&R6@Odq0x_`A$$7E(o?RYVO?YXfTeSB{4cda0`Un3!A`p7)tn+AC+yN z9av35%KDSEFqqwi40U{$#;OYw22hW}saXNcc5WOxQJUUa)mLQIs$nI#th8Z8GaQUC zg4waz&0@va>S>O%ujyR#v1#&CO$rO2D1MJzQue%H?}^73y~+CW#3-Y&s7QzuFuT0C zN}^tnEnw>DOozWIh6{};lP1pq6E^R?;7}*UCrC_1cUmON7hnXvn$FfJHC;Ygi|`H1 z)jz`Ov^CW+4*od9?~$j_b7kI#e1ARD6Oxi(&tWP4gz!n6ee0&;cUt9y;~dIv@qAm1 z+C5V2FNF}_zIL{lG6*skyGneTU$5bka_+AkIMCpu^Yzvu*a5B__&BTMFG7L_A~Sa% zd#jgx*yH@|nAEY~Fjn2ldk0LjnA>?t**)<0#XDFniojLO9O+bIfv`RBML^qqfZse( zKx1PA8?-=^&@>$#68q?{0}fAPl<2;<%SPKihcjInrRFdIVOj@?ugP!ds8(#4W&l>l zW)&~rT4U2xzup3|tLJ(*W~b^wj~_Fg*i#u8uhJoaSvdxzN}-4`2!4T)kqrImm$^ts z>IKsK`M>J@BozM1VNXiFG8AQMPZvoa?~+*k`e(8F#K|7};j)*r<3aJu%v%>mv|7kC*Xo2u}VBuB<5>fIPU|RuhxlNFCtu?hzJr&d}?l}IEyMDK~UR&Dc*@NA~ z5y7BzU}F64LQF_toIb)jVR|@KnQLsJ&=IeVCe_npq&zJZVWP}D#IYGGUSv|mlkS<$ z>bug1A4+BY+J{y}z*EXWh_>q8rvQ4Nt)4g;Eb=57z(Na(ih7;{JAA{}h&ulxVVxH* zVBu1v9W|uAT}gI-Jc!@Yx-UMnp`jrT`=Z1k3(|yB)6qo*$!FAqb90o2)X(SD{uAZq z!s4%oU0Ch^Ig+I*>xhb?V!V*gAMw5Y=g6u@2L<&ot5G*(5u3HeSw~p1u!-z>J5zj{ zelzBF3H+eq%$#1nliGThVtV-yIE*DdT}c}HTn}oq$F-Z!e!*0|1HQESpLsnDT0&nz zw`mqDfYBnmF3H%Yf6@4xxcvVaC#01CE6?CTlUGR-C?)=EbN1YvDKb6&jRqF0`pjxS zV0|KK6OGyR56jSN$Apy(CbEcYP=PPy*xXaniXNWqI$k^&#x zUvF^@CMCdAQjZ&JK2Za^Zy2Q4DR9>waAxg6haXkcwcIlV7Qb&GFn|YewLoe($>fXc z9@4Nq*qClbfC3J`EYY0aOeC@~etnrXNZ2eH1`Ezxl!$fj;Llj23~sGE1waW5q}G$B zFS0>dtPeysz$^4z#pUDeRcuYH9WY0=RuAQ9i~%(&r)Z#VkpYy_fKkluD<~!5tIQG6 z;6c@^-rd;|wN`(HcF&*_FtV8dL0Kg23#o$8iH-)J1jOTTYyPFak`hdgFwytZ0S)|2 znQTOoE4Eb_I8-YDY!u{im4IUuApXz-33NeSo=R0!)nub}4KPM!EOo`&V`BJ)dww+A z%ewr2$nUcdKDVE*vx&KydTjBXcxFZRKf@&c!zWlw9Neo3n|hd`Ecfpr>dMdk9bc&I zb07?CTZ) z?8CZf%>{Z3(6x?O*dGwBI;vcHoDAe=cDqMnRh0@vSCNbjBqCTWAL^(TI#x2kDaD8q zl-Rgo(rg-Hm*`PjtgjOeWt=VvI8FXjou$}*^i9GY9r0>A=le13Ul)0%&4Z?R7!4}H zj9z)oK=h`dqB(wv2YF}lTMci4?vQ?g)!$>+}f59keIxtXL z3JG2aXf?`NWh1k;iM!fglV@1!2Q3Nqlcr8MeiK~Rul{#`48R{fQVi|PPt=}W(*f;p z(8`RScR{jbHvKFhim6F!kMRwOib0>fdE&ZmdM+J^qp&5mgKy?A(zXzuKBbwVZ!W(O5gP(hwTat*6DgKk{ksSR%imGA^ z1%#Z7-RqJ5YJyPV-2rZcaalNxTN(H6P6gc=Nvr%sgDRrUVzZ`0+v@kVS_;LhWQ$Ua zT=B1EKBhMRuV5lzcC z>i^xG>^ls!Haq};;nAOTOUuiE8#|v6bhJcTx8ZZ<;I74k&gVDzsngd+LQPD& z0K1Bb&q=NR8V$T4sV95ZBv3_`3}7mfwi^;P5;#X|NZggTa4}KQm6^=|i4E_0;Iraa z0m*m6jR(^?6BT_C<46(^MiMKkG6)Q^xum0Qq_(EM*(GL@W5A;;M{db`eWMg?h+X?D zLgo>xhse$d78HN_i%54sg$AZ3k6GD5a6Ck*#u|W0*TPfadlIxXzmr*2r2|$2V3f?6 zF~L`Zhcpd=K+G)w#3~;6e$qg92f2<0U~pXk25<&@B8E$z7;n{^#GokwRu*7*Vgx6{ zUfBbgXeG8AyuH7tv(mr9FVfa3=##cW7)m5b8|iAWF^VZD+fi%Q z1+7x?qm0vEl;C6=xPx~*rd1R@_E%0~c7P!coA1yMo+!$Wo_?U>0WM#$U5Qkw=hhK-zYMcJ*;^s_;l5`F=i50{00n$vI<-1Wp{f$S{@Hh6kV8`F zX4QA%_BSq%h#3Yy*%6jakoWoQC>`((y3)Bu9Knxu${OgrfN~$yQW}ffewK5=KVGu- zt!!*CQT2+(6d5l~8q*vdCqb&4%-j){VT_B-yMGi< z!`5ViC@_MT2n^wa#bS0Ep27u~<)76gNj=fGO-|>}w!Lr=v|4-_n_t=#l(a1F-u{6$ zV1B)X_o_m521*x>M0Ir;bNd6~Y(FOQNw z65cS18U5<|2O5PFIbwt&V0Kx3_lC5a z_eV#0ze0Qn_H-VxyuWK@T{p^7sOcRzQJTRAeplR_{mrzpkpeGBFXp-*&n**39Rq8? zdL<2hZn!n_+d)_U-e|8Y>5{G8!Ps+&^kiYwS+g-3tX4iI^_@-$(%Ps(<7i4d8=ReU zU<{1j(y$mBSy_^=d8%KfQQ|0HDDH&*!?}J^F$BB}haVnAh0;4>cOFrbU5+on-e*DJpwQ(S3Wv4j0ZUV3?O|3e&aq+j4v+yEkVws?>#@M?3TR%1 zU^;TX3vB!wN4?V|BvF{lX21bAe-Az&<>A!s^yZ^?lMot+U5FA3H z4lH-aU#-Fh2l=;@Cj%AR?D29<4DoU-NG~c0#MuLkML&5c9j9mtOWVD@Ha4Oj02x?4 z!1@LQwxvO}a(Gza6u$gDIi1qip2Q%ZCrG0P@3P6s!r8V8@+0XOU{`kkNtU#Eu*j{o z*W(nZ3OJ3ShJc(Z7K8QKT}HtuABk9WTnX4#HFY(Tf4OI4|M&$kH|=i<4)v$E(Qc3{ zY%2UWk5gXH&#mC!gf*}q9rx1h{k>$je9Qt)l-L^9>Hxm2n8>B%tWt@bY-)jLnw|)# zU(NhhGGF2%dup?+!_wyF#^@*6XwBJKo{lG4wkFxt%w<7NYIfAaKDJ$Ejg>%Xlta-u z_60!)gc8>&i-dBj!LddTmwIqp1#FTlj{`#23>sXnN+uWt_}Ux^$X5Etf~SyCKPj`# zqlf30iY^d80D>;8GG|b3PBBuPxo`v)3<3;9lw)!wA;uw?MBK~_4+<~E;JEb74@?AD z@}Yq45wYVtUH7uFI7RLuo3v$IQ-zJ<@Pbz**m_Kww1Nz*>}>AGP^iGVvNCb}cH2tS zGr9KrYx0|$LqRjMIfd>upuEuj9Gz|Ue!^}Wk89u0ti}T2W?zwf8hVuN!=zWPL^T`b zesrQ4$ntQ6DF^R}h`CvOys&6x1H=wcSp&h;S#7HXRck820V#95z4k{jcS|RoOp{61 z$3PwPykaaTKu4_=38S+jEuTXn^!EF?fd=|7vQCR9nP{FyMK8yoEIY8mqIc!B5~-Et zk*$U6*3(M9k39E{vvP9MME5;@O2nwNcTB=?#_P7(CfTgKzX{YtqbLL=4 z*y}kh(y`1W9yK00CpR7M*`XFxgr{p2o-M<{taC zqGW9PgGl$z{s%0%(B}zVS>QJp}xx zJar^ibA*F?9jvfzZVWJRe8S~bDA~HJI6iF57Ec8yh}?sHoxDPXW|i5zF^rv53vFdM zSo#`)9Y*e!KhI6Dw7o499TBa{BWbgdXo;%)LW5L4|13EX`><%M%?<%82go3ae7y7vxwczdShw(sI7Gi4ii`n>{UkhD*) z3yAbwr});@wby|OH5prQ4C-Yv@D~o--xYol(Ds=T@_+z})?42X9%T6$<*$w-y^*~O z;(TV_+iM|m65Go(zJ@~W zOsY3}4nwh26?fem{*5C`4V|LU=vEM_>AJ{@@pkLostrcl&Pja~gSU&DZuxZmaz@I? zKR|zpUDZq!#LF1LX#Aln*pdYsinh{s)>b+7<|g$he=FydGJLefC0Pm?LC+}x4)4v& z%pqGXKVG*CzalbO+U4y&`&svS6U9E=AynlJ*S zq$E&{eZ5Uw1r$<%8c{_#O}m7Eg&=Zo9G&8o(hCNU${ql}zw3+Xk-?y87!>BU&1+nsna2X%Un5ITuF|=>LE^IlkihvuQBt~FjM=C>VZ@}0&-stTC zSv^==rJoHIs)f92CY!uI{|7HPR)A%4G{4*qg*Su`W7v2PRx)#_#=>*ppTS3tO%{Ty z-0ZMI4fx6;T%Cq}K^gclDK*62(EZeKyUpPNP-E>8eD6S}XkTlMtX|e>4KE`}&3Id^8F`nd?n+LHV+6ryM9hz+rvePo#BA;w zCH4((&Q2B2HrL4pF5UAiIe7{Xc>NA(dH0FQ!#}zHpau2ci$JS{3M%uJP9panjzvIp zs_o;$;t4euJXNx8q15E1cRwXpAsbbki;>m|RBIt0yc#Kzm8Z<;<{dL!ALQ$OIGzUBodkx}_1!M;sy;fYUl z*ZVpDdn*3T1Nm(C~jJ2Bt1F7mb@?cbO9(6BtCPjHRC&ZV@Nh`-YR5rn!81crGGs|MP(BmFOlth%Qs1I>ykU4GLhzFQR5|F3~w zZW)@j{##W*)4*V?nb(tVeWps)Yvu@Q3=+^mW^JOjkr8u^S^^T`6n#Bf)6P>6*mW~9 zZgsi&1Mc8oJtTmt{rml{aJ-0~5M;Ew5TmxHC&nGm3HJ{~x;*;(e@edLoAU0co)U%7 z?Na#|avuleeeZTxV>7x@wQ2K)3N}!u;4P~D9rjv4BuiO$W28FMV<&!)K*;y?RZ=00 zU<{W$8GQY5P45&hMp`BeCPF&lzx0s7HJ&k;#Bh6D+tJY8{)Q`YeT3ho&jhYD z+>C80{4aYCv9);h=hX{$AZp|qe~p1td|pID;NjMNZFOn#upUM1HCD*lKg>87{da6;eI`uWm6#j4Z%B#YhA9Fk+J+{|o>g zW7~nor3^%pGlJBg%jBWj#?wbEwLmeX^e0tSrKtT_Z*ojIw^k0PjLB-+eV)}Zjq6KG zG22NOsMO-dCE8%O0S1aYgKwZEdj{a_l$->O?YJTK%TK}JE63q!$yfnECslq8XAn`k z%BB8vB2L}X+8WugypF2TI6XPY#Lt2RwcrrIl>oFBf5m1z&dGh}7nd>=LP9BFOi!Pf zl;}nR2xykZlzS#1urQKFVL;zCT|}VeIc|X3Mab0rvB#p`l=rtv{Y+X27;yzP)))sF zm-U=%m(E5RZ*-uo%Q*i2c=Y(E-21DifH_jDRe_@ZAgh;QTF~m$SihmZ02n`h$UKOJufed*UkXpDQqJA1e+LBeqhz{OmL zJHAbZGsgu&7zvi6w_V!#T z8O4n>>YzoAm~zllCjz!oKT_T^xVUMJXPVFmYz`o}aUY8cexo$aF5{dF1Q#g$1?P1i zU*CV<&^kCPfeWK^s$VDKo3(XtihZG0ZcA$t`ZMD1>vJW)2R#N9VGeBMrX7Z0&ZHhi zwrsCpV%04)MK#mq3Kox{b^lG?273;Rw{Jd&%{^GmW{)*fDzfJ(?Hj!tp-{htkMW5g zEe(r!63Jd?@h4jurtKmmH#@KQ*;Or9hUAdxi$_5#+ZV5eu-SHbn-LM=9cAavzTr+M zECW^vb6~e(YSb1{!m36}I; z6y%LO#>tJS(_Jq0Q zW9D)O4u8-0@uY5LhmWbtw+T*mhVp3*E_StK8Fdt!xpmQe$H5Ve?OlB}y%nQY4K}WB zt_!+rSu!adn=0=^Byk>~8(Qi>f9;wLaMFVL^!)etF;LPhQxKanHe4obzwZ!m#A`ax z7lA}23*W`c>We{*Y5DE{y?len$01s7~6_Q+_R4U2f4qTr`xg^a_($-Y}roX2m( zg=Sqn9~?A^o?@V-71}K2em`2(E%|M>rIx1N%m9-hytG?DRHd~d2#HJMw?bHIK3lO6 z96Ky4n~}Dj>lI5rkzx)?FtOY_lP**&ZS)0>Kgw2;yu9m(&$9hl$ZlUzqqVWWk?I;(aw9ho8;O0_0v9 zpL;vD_j$o8XeREkr;WF1kvV==G`O zwg`C{$nPZ#CI^tM8N6M7HR>cU&yUuU=vKVk=(WEkBOSQ8y)Yo8(2aRakEb%>VA+G- z$ZqrbGa_UcZPd6p%Ia0B*M>u^<%FZ1 zU0CkW_znG-=;-(z~t+ro(NjMaN%#WQ>S|5YK4PYcF!Hu?1ObS$q6 zVb0S>3tu-{6`9~PXgoP2SNvNhGxwv+CMw`cbzX-;>ej(G*CAPH4;#%XAw;Bv1-qY%K<82XcRcmI&qwMXsqMejd@zx0lJ zdSt2h-ReQG{Da`Y9g$cVe)Pu zGMA^knfBp(i&Ru1G$m|f60EEhBBx!*?@2Fj3KyEm#GHJeL}Vl-dU~AeQ+7Ums%7={ zb@od(_P!eI0z2AuWtRrSI454XyVh{wVTwfHPbo*CV)Vr7K=YrAF1{QeT>4186baAQ z6@ko8+(^Z;vzHE+f*35r1{-9cJYVb_AF1Q)%@(9{%25a{fT$c;*7(H|WiPWt=a zm1707C8e*NwAI5XexuTR89*ZB^Zztw5mNG1=GmXX9a%V0K}vEs-vtKfe5rl(5;mA& zEsYUS6z2o^svXGi1`&MG04H0tv*U`wlTot{H_j4@M+;4|oB}Zs@6eOuFJ+zFc5n4T z&jgVDCw3J4Euj{G3EH%2YhAlh^?)vF-DapM@XHtX-~DY})hmnaoNKX(tGR)o5dA?d zX`GDr8enO>h|!slqiecTXBH(b;-@X2RoAv4cgx3U8}l$sP~^rl za~NM58zRQ*A(avmWtGjKcejy4G&A#+4XPjU$H3^N8kn1V1eE?IZS_^*@D{eS=M3rn zsj(<0*oErxKJI5@^t7)Qsr`z-WAHt_-ckL5#w_zHtq08KJibep%TQaP%XY~kExK2b zy^|q0$v;za6b|1Sv$+s-(6$T(#nVDmqT-5NmP0{@4d<5LnA1p7wdRbh%UGp@Vn8v~ z`n_!5K*asZX=+L}Va0ZG%C?W50!(XD$`u^i+h1Tx)_MV@7mnfZkGqCMmMBBDQ5_|*5D+C^`;WXLngoqxAiw=In(d;Ei23Ru)j4%xGjoalk7}?4T&rMw|pcDy8t$R z1fdv$%aNBKUh10}9T~YZ2C9wnd^yF%0(skZ_C>qhH4K~H!zHHjfQoqJ z-}F+yQX#IX{a-nbIWm0c@Y2=i(i=e0%7+4!`~;xX{2CGeMDc>5eCj67SI5w>036?y zv8rU0gMc$z1ZZ{!&;3gKC&lb5U;Vt?NZcR%Fa~r)as`^1ND|pr)h1g_sMQ8&r3DfB|qRDyVnz6W#-V~mc zA1PuTk<|?qf0#%4*FN>S$zmWoxSE@@wLi4qnKp4Blp~uqNk3-#gi*8PB8qQcGV{F@ z&_DO^chM0v0UH{XC{A{>w!e=3h={d(^D<5E{#^2XnO0j~haOMVqX|XDkhAbdh8LSe zntxDiD2v9VeN!N8-KI-C^L6WGmE`)TxG*$yAAMFT3ZYtcM=0?7mcD5-gnEmnzr|*t+3%F>I6ilI*9M7T>7{2kgfl zEORvzHLthEJow6r|42vjOu9&%bNe9l;;@Q$wBeRXkT_bw;-&UC@E1z?x@%9qm?~X< z?B>exVd(VH0@;sEj{BhHE!9v`>2zD1nkyZuUhI)fIm}Q^TjIpMyvtT#HZ|8+>Vl|p z?C81u#YfVIKgW*MSZeg+Ft!(+#f$bRkvu>g`KaKl#+A`uI;yzS$3olWlHD0upWRl` zw{=uwjN7zOq8r$_1fw~v^=UhG9Uxz_u*M`wiSS$g|3EB~*7mb#Ku^O>BHoV z-x(iDlK`*QhSWWQ*O;TJx;#}>7D103G@ed~1PsufwwCW?Dw|iXKw<<laL_8 z9Ais@20#~$BerWPw{LQi5y;aFzz-$klvO?>+c#10kBd^_3D9LCtmix9%oMF2kAJqU z7JOlwV5L0%xhG%c(hgpYxK@S3ZxSH&qyG%%@q8t-CG_HKxmcspK^>XS6c9+xAVao3mva+Z)aAJ`3|1_h*zF@OcPfMZRK9k6vJb_p4v^ z>E+*jJSXKP4A&#vaABsEIv5T>r}S*OplK#M2DVq`7JbSRHENRCkS|$V5md`xx-p!S zv!=;Og!zM+XJ|M>TA`@dVvCD6t*T1ZR=aA{|Kbt)3mT6>(9rB@O4ZkZohS*+P->Ho z#MG9M>XDX)Y!+auR;exkCzQ~ zryOfQ7Y~uO2{2>EAkG=z>@;i3U6P;w>7wH&%h@L3u4+*E0N6RgGUALctVg%%pK}7U zs^thJe&20#Em>AMbMz8l=iT#kt`4TY{h#M09XeKBm4ft}9@X=%F^~RTsI+V^PB-f0 z+t`HPrKW5v{Zsv&l=V}2qz%$ukgmbl?I z<_7dp)chLRryr0Mn51CjxRuG9g)0}=FELY#6u$D747DAyKJ>+bv4*B%h=X(}MeFME zbq}_0xt1|z-+QG*Cf!~sr6|tdmyD4L5mno-vO8cbBQO2n^(2IB=k^{Vf_-h3!Il5s zNMM6MJKD&5Ja$Pt!mH?mYvTtxD>{VWo84IfieIa`ZjLem54)YExY()6Hl*mo*;btI zd{ps*5WK78C(B}bsu_svkn~n2V=PKwoyTBS2obT4TApOX96i@#>C(TBNqg6LXF-RO z+R8;5ZW9!{65%t>0^hqZkmWC* zc13@mOc|qyrWSg6hqHuPB=V$v+XTKzxgzcaos z^e5zqi(K;6UrCnIfZtK6UvCEhRW>NAMY|T{PqyLib1iSaA!VHBUW-GYf^?0nSUN+J zQRt3Zf`y@%@3_Bd5K#fm5L}BK@*vUMw z8k>SXc1$^lu9k9aHYa#??Sm{7kR>|J?{=VcN&#QFF{g2q>(FU#7Oxlj==IDc%$PnE_vKt5rgd{4T!8)hTtA z<|R#;$N&!)d%F+YKy%O0)r*WfiR?7;>|@O~yJ5aXA843q3YiuduUyRCnaDQ&u`En6 z?LGN89Ft04VbU;Hr6T> z-n9?3@Ca$nGqT-7Ls$QKL+YX){80SB?-5@or@G+D(bDvmRLZsw^>cAov!61E7YG3b zTd{bm{6>cF3i0XC*DC1n4DSy=5UnA3I-YQYgj5a!Ssc zxn=J3!^A?aayIS{6&Bo65n;)8(IU#T1RDJD&#RL`rF@e|uf{{h;v&OJ%f1sRVnX2N zL)P7owpmhA-yq(&-C90*FT?OA;8ORjwjbNPo5)kb?k&bl^BJ1Hs)0Jec{mwLN`~%@ zIh{zHHUKA%cI*IVG& z7(hYy6AyXfC4+WxCv30$-W@dTh0pBxmf3$^uhNd}z44;ZUY#-jjnA*s!aOA^927pEb&bS<0(@&Z-k1zS8m>I)jw7qPYj zgAErX0qYc4^vM;2upKk;3YhYo&Z?{&E^oB#S~eH=YylSALJ+d}7uy<4q$+{S0HCjT z;`*}c`ABLBQhH##h;_Dm_kWc~(_*CKDgk6FBlZ&t_<{5yF{ge50kNtFgnxUmg{Xw< zYiVV&`Jc*xG&$9yNA#x5RXxB3E$Y2B_YOH(VGRerh|eaOl9Ufa*kCSAB{BhSyyiA>S?r3e4>U;wlEM!rOg1zjFy739Xl0E?lO!2IbwZL@IWe?e zYSodFyJK)Xu#NM@sFaGbj_~)BFyOPHJgcRQ$71aq+#t_K#dy2xl z^-3v^83#482CbL9oG?EnmZ-~mz<@S0YM8l+Fzx3K)SRp96<8F(Uyd}CCyB!OmOy^n z(E3D$`96^}R+&9)9}7l-r5`^CqLflk)~!i_y3oYxYH{K`b@Lb~Kb*KzGN2YyAQ~s`C?Oa?BJCJM-X-KPVHWD05*NHlt2NNynERbq@khu|4t}2+!3+M9K zz7A;ez5wQ|Isuv|FU-u$I@{fU1HWA{hzncAgD@U277m+DYXgRny(qTR$A0_kWnWFb z=N|z`r`@pU>`61^D@d=kFR3>H8;iD*lGRihcw{_g8)qy262f1iLK_+yX0kv$|EvKq z8$gckt_=NS&*Z;$ITGx2As(}#1&g?!rrCm`4!Hp?&HM69n%`mNLo)ci!b|ubCMM<{ zPXFx(T3;~2EP3{KoO2Ta(IP)v9GmCRpOUYH&9fWc=)3^F0Ec`{PEM?_VHXCMOfSCD zmP0YdZyg?4fcdEZ*Co4oev~=4n%|3dN;u=LGDoNzEo0w;tF80>?EM!KN-}ha(z>Y6 zkx>_}udS^rie^YE!<(9QLu;5XV@gFVxcZxoQ>$AA=Ud6e!ynMA!mD04PRlT3bE0ar zXnndkCzIMb+k(CMJTjdbl$=j9uu)wfSV~_V9KWmn$nfqaX7|Ry5xMO(#u#%6uKJeP zIONY&x8GaG6wa=EXWhd0@l_Ck`TI}Q;f9CR(^A9J$XjdnBgKy6fsYb>)9%R}D?C~E zVA-r<#xlN?*n9|2)|y8vZ_!>^(tBSHp_4yqamV%bILp1E;s{+2zWFN0U_0Vr&mskJ`QH)8m1el|bC1 z{#l&hJR2cRk%#tXyIQx;vx6IfA{QP?%1fWbe)5s@HDTWK*=5M2rIPpPTu`5Td!rpA z`SYI=_P>YZwHC0L1;r9-k75kJexW%sjsxwu6{I{qJ<9>Vw~F7(aSET9#nuFwU`C6F zmU|N4T7nqZ16(YyI)OSn0xbq)22~*7Wq5Ef!zo}lQTm`^SJY`n2J9c|K0ZGAg@waw zBL#bVLmKwL2NhEdtPF^S(Bw807=s zy6(CzdDihm<-AbPX*u|AuWTQP^8v!ZmHdKY_1aYHR39ZWD&|tqKe={i4j8{ez|0kj zN%h3rlG2}OQ`)|tC+Fx~?n#Yxmj9^tm&SUs(7aa_8>fCqlq1bgeSwoh6tJp$lEhR= zc!{_HbM0q(xj;e%HY$>PWUlqxWVuhMi62%6h;ZaGN&(4B&Ye`VylsW&pp7zu)6d7%+S&W8WE>hIp&dpZi$PW z`t}`m+jcGO>~+hmg8W}cxJkl#WHV-$wISn>~{pTm7 z$eM#D!rm{9sc$gY1(oMOyhpMh;M<7QKcH;RKoDrFaQcTYel_Q|ic`Z0V#b?@tHsK& z!Z#WZ4osV03sFr`1szj*HPbMO$K&zmOe}A+g_AD2GwjQM7r|WX(US!|&8-fn1sN7fyvImi3=s6;@pzn%aJG z{t;jmDpvJO>Q3hdjD(ZCNhtUz#yOxO(>d?zz*|Z(5R5ZwZ546H{Vy%zYm*;N2|NY1 zpYBRP0^bM1ZyuZOF@QajQ8Q_{79Gx#Hdezn(!@w8&9qUXTUto~cfFl$;153v&;}1! zE7ZgDI;H`oeVX!%^=itik(0qn#c{%VyEzo3OnzX@KNT%2os_6ZV&)@l#{Rz^L!z?f-Z5o)O4EYI$bKL&sww5;0=rr z`6E~DJ8vnzr4@BiXd=0fqQ`1htV^I@N_dgCz7SP)rp-IRT`q6?c~Dk>=tO*z+xJNG z1wziJ{c$)pwtztAG}wyhu=m%^sxv~&56`RA^gVUMC;~b%TP)Uj%4+7@eMXb7$$N3? zz_|}km3!bNXplPS;v1SI3eTqG>HpLi9h0i{p11qySe_y|Zf&0f>Fl{;uXG*V;q=5W zpYhdbqHw8#8dMHjSP0jr{(uJSL-rne(lO-MQ%_xu61ZU9{|s<%w{3Cl8;hhtra+jR z4OlN@$|2w+4|Xf};Xoic#E-Igtw6kHS@;Z_0~=5F=7LG_PAq*3yr#tb+vgQSe*fz0 z8>`uDP$j7$Wt4n<5gUY)100f!wLj>Tz(&f-fGUI3GSCO`cDYL6$B_`sGT#_4lfA{fHmEA- z!Q2`B#Mqd+2iWxRn3Ay^xqyPqt-MNs^$HYr6a+cY1T0siddO@7h6TT2HD7Hq*bGSG z%~f!Gk@Y>f{14oe89jsY(k6q_*;C$A8$xX5aUVfCvsTO$^??#e&bs_^g~)Br@^Q^Y zS7lgF=SQk~Ow`i~;@v|v5F^97u_&T;bnf&B9Q@h%4)AIKPdL!Dri3kV90XrTe%Y7C zQHa{&~WTp{;(fvHWvOawOJbsJ2ZEq#NQKg4B;W$im+GqPV$-D!ASwD+{ zrd8V9bF1~8Hl!2!CDGvwX+lKT$_<6^YF4P}2YJv=R3Weo`I)r(%VSm8y7k7cNl;bun&&xK0>H+3gI8;Cr> zaUu(VsO_{>B5vEd+BV7gt~nFkmQreoVc@x_yZXfA6a|SeR?xhm2$Pd_s{|$;dmFQA zb=K#(Km|ogrlA@Y&-;aOn;KjBZDcI%lGb~}gs<)1uppPBME8yK=H?oR2c*BJI9^VB z33&I+m5+B%y5~886EOW0D%ro}vO72g{C%%_lJ)XW>agA=g%aUpj|t83f*hk3lkWB5q+R z`ZcDB%-stx1`dma?2{^2r>W0vzs~J?j8wAw&S6>d9V)5GDLVQd4_RA>4-}vxRvODY z<2*Q5^Yau%?+I#A^Uq#CFp;djt;;6mQOj!3#OruPAK0f;6UCI2*9`Y^g9DPlB2|+r zs}yhRNHQ>rk>yu4BUF@`3@keOUr$j`3UmyfU$|i7e`-W}r66$L&hOEBKYNYYO8by< zenZovK4@5wY_eE+suczQU}_9jJ5+EVW3P9=T29)$_T*%PRT;T3TA^eQ%$>@8U1CG9 z{F}ULK>=P~Q+|g({y0MHe@pk>!m>zD_Ji4%Izl-$Udq5}U2<(D!5O=k-ucuG;!Bki zO=mN+cpv<|WKBi>bHJVEf$1Lbb!__sD#J{`{T8MOaA?1H-6DIn)etl&JE5baTRYwr?DrTm&Y|H@mS$Cr1mU&dcLBmE?A0Uc?!U%+ugx(bp}H6xbf0|bNKNF%8IbUm*PF)9 z?K}P%xipCGP5Gt;i}|w)pr`mx14S~5ZL8tt9iO|d;}rY>WbU=`7~C?j$stkqJVKS<_dd|a-7!O=W4RcDlVggYn4*Y!@eAkCDN!|sHAy4|$F1?TQTI3V{R$oLfzFy?Us34Yg96z!{p@Rc zf~XzVs7ZRQQw92SH`fd%_xg8?4cyWu_qYF$ipS4Ywo2PSRCy2@SR(bTD5#-Ec}bkS z#*ZR)Icy_G3r4s0tBO#S!?RB?tWeq?0tpK`W-kz+Of#rmO#?2zZ{mp$*G^d2c>u@( zM8M;gdtFHmWU=a>RD=#yjUmgZCms{u2IJm5`DJ6byP8*J>1&DkQXATX)JjS&w$Gs+ zDbyqmFXL9Dd&qE8*=^u}Qr!o!SevkAH5Li@AS~lZGAZ&fnhR#IeO`632b+~aKp^m7 zE6Hp`rjxwSV8y!&EODUz!-L%0Oabqp_N`X|hk!!8anbDZT_>A?<`)|y`NVDdb-492 zlNd0zdv}Mj+ehH=q>)fWV{0pDxJaPiAuO>srIta`ZU} zt4HD6^h9bgFp&leE|j;VCP3L0_(MpGT)4nTR-T33L|?1vz{Un#DfI9eIHrJ^66U+h zxeVBlgQQ+&b7PIN@)}1lQU<14p%lH+J^$0!6&r{x3}bofvNQL+$W2CYMOR*O0TBp+!RZr#-E&Bu1g*=NKA9pM;&u_$?xH1ev z{CG)w;cr;jJ0_q21@_N10CZ#2ibQ4g2ZKyL@P15`=dW#3@OO+-iohUpW69obXid_k z8bQ6g1=^Sbslt!h;)H8ekvADopt(bgU2l%Ne$VG+lN`i8Y)51WOdWg?3%GuVU0>3w ztd;{G4l~1=E|Q9GPy`PacXs%FW>*AQq~X8p=m@Uy164yR6fE}|SiIVYqzMAxTBOEe zhhsAL$HGZVJy_^?x}Vep$i$GW`iqmoD7Q)TT6f$Ydjcd+TcE8USk~k22Mj$S-&>8( zj82UQ&Qa6Tz@Uv{b_x>zkstwkA-XwZ|aoeyXL>o%-f zs^FhiC>4Np84y3fKP3fi+CktT9y5T)f)(T(U{ZOpr%hj$U01>Na-aUy2AEqUe7s_p zmn|`!5#do%zsH zlY0&K3{#WEl!D1YjtBUoy^@0iB_d?x(bWqph$tB6F(Y&vzt?xLZwCy;DEqC~Nl$?h ze`Sui(37l;`;j1wx>v*pH;mO_DD+_of=_Bqq=m$64g#kbToUU21yuQWEwkztSm8A9 zrSRQ_10YwCv)OiS6<7qHe?@2A6EOy*O7Rfnt@9{r?x?)(Mz}8=;^Yt%a5!wXQ)>iB zC_`<$p0?OGj3~lRT%-}H1iP?o+1_~PnpHLiY<(-k=~%S#)ULZf+D#8wd%ZjO_fP~ladhK^>WJZ?o2R1EX!chU`hv~mcdXEQ*<(&0bT~lo^2Ju z_tCqsp5#enAs`(Hs5)K4~QYxH51|R%muzW7hQxD5YsJ z9S%AMii-&U%a4_Q&+;pb1)S`oD(yz{=k`e_h+VW{n=TcLOz!_j*H?f=wY6~N$;rs%LAQs^`f-8X*qTYm?0!Qod7~4RB$2_2=+m2* zm?^Xc`=#TOy+hZM1qKSTiltTK=&dmhZ3G6S@BP01P;@{SgG_g&Dr~!#qlW)AEnjB< zU1Q+vY;>3of164PkxE``z_RgyJtbAc%2*4)@}`?Y2CH2vUcC5(M&1oasZZN1q`a;7 zmeDLRwcu7XumAyb#K>S#Ao6X3e&w#dya%)47>p2bO!`a1Wpb21DKm1?L$&XM$g#Ar zqvsW;?Ot?k9D@Tt7@g!Wj$z}JqJ(!>I`wTn^4Dma;+C&ue(FkTEx`k7#6zNOBbyX}Siha0lgNGNTG9Y^0=m zW!{$W3*Lc^yE8gjwIugxMH51PI*0!_I4m^F)xn5hlw9mw#Rc}!Q7x5BO70g$RfVcQ zdu)(FK`BQL^8`v?GIhgHLKV10B=lnn8LUpUg#}rv+|3Ai*`?kczA_+CkkPh{@V(nE zL7nr1ZMI)SQ||b%Uuo<%1v{&#ppz~_K(li7!Fq`+0@+?my7&FGmQ#w z5*6!=GsWjh(uz^Xq9SzRi>0Ams4)PV5+gSsB_|r=9&&P_!}U(q96R59*+38v%9R>3 zrSI*XpaJgtfT4w;HkP8?1j+qO1Z%!Q@s)hpZhi(0FPyoYLW)$Vn*r5;p_oNTahL)v zePj9=2Ydnr|K#tl7p~?>fC$0J^q{hQ`ciTa69v{UvA%YaaP4ze6h=#GVGp zLa)Ewq)3HdWxdCsk?5pH%R7)fGR>-5Rw1q?mZ6|H$+x;P%H446@=-pzAr-fxxlrG% zY%6$Zj6ZU;db*&crdd8+oHeYGL#)@6k(P~PHXx&#oloG$B&SX= zl6kc6ES*b)Wpv5BUr|<}M#89wW`PF@ka3TZPSzA9$sd1iDA}}CL(#aAN&CX#2*RPCc}u%fY@kdLEo_m~?F&2Jf7M4;Y?s-M#5;t)eMqarq31;ou*cNl!k-R8=szyn#@?@%oxbH;Ch)6erul`QcumhukNAQg@^y84 z9)>uY?P7~Dj#865&Hwmw0anAon+<{zB`q`R&d>=O2MM`HUh!*HR;>B=hPIqal1pE} zC$dFDw7G5axr_3|y)|mm&s8U@YX(%4B!m>!S(huMa)h@E$F`T(30j5IO-=Ck5|Z&vK1RT<*_YhZXVZgX516gah&cy`n^Jz*;^^Gub?s;(q@q1B%F^; z+OEON^tQ$=1fJbkp%!-&0`eZo9K;d*3{rv{%x_&uSNFuT`!!OMxy4{TtnZ3rt%N6E zcCieJxP9P;^zB5nj1f5z-`bloQuZ6?at67IismAFk&irla++u`1gssim`;=B8mA^> z7PAfTAMK}%clftf&y2h|G7G{!h%|eII68trw09}9-EQcG{4A<*ff+ray|&smj@^dk zVQJ2o4CaDKfpr<)_=uP-E^=>>=ZK}s2X*Wv2E$~{Gs{^r@fKF`QQKf!>Z@*2?J0F> zlC&{$j#bAhzO1&fv+h&7^CP|6TKZg~&=i<&TeAs+Uw(5%92qz$#aRqIG zI`~*jtoP5k^7Pu7wZx3p#0!z13$qJVQ;cXD=i7f==Vt&meL0!K8Znc)NtY*zpTLZb ztREX>H)O8BeAcL2$;K8WQ2TPLXN8Kss3bwLq^5+!kT0+{p4RVf5~-y3fuobt6QHHJ zM?}zaiiWqwio!gLQAmjfLqubF2+sY!M1fUouCEGyY+|0< zb;eB4BN7&*5c_cS+{Kc~dci`+gz%+kXWP+62(t2wHc!e_(Pg6baFo|&Tn_7z;0`Y1 zMfVxg1g$I&L9inxk8(O^CTL7(GP-9HUceunqn`%9Z1u$qT)4^_T`~}-@DpXOf4GFG zR4R+Y9;zk7@6>4U>Ap;UWy0Z9!RdN(jccCb3f;&JmKBprE!cP*o8#%218mwNeeHKS zx*w90EBHPXw1ddaeJiarLz2B_!=>Gs!b6as^L5-KK9==`r6n|Vw!66@%V~daJF7X# zC5-k^G(N%_1Z(1%KWmNqQ#TUVe82uSX`CG^LE$<_ciyfupN3;2(vnl?Fc~GnJx$JgQ#3AYyyuYw_GbN- z2)%G-@rC#MU8%89Q6oAo9bC%BxkUqaGvS3>ZF|gm>m52>)gzY$gR^1hO>S=LiTHhD zC%hFxu-vN|OPwnCEnYBXbB-!AIAW@O>KpH1!f8VDo{^fUP5*R!tHNC-dH=@GvFKhe zYd%Sl9d=1}PwauUcV(lan!&-rh?%T`P|mKe)g;#&tDa?54)mIz@*nZR_$3)(iTkQiGQ1GYD2Upyh;`)2HBl- zG@`1?baJU5z9|lEDC@U7_}0o@80iWMS+u0cdW8PSkTysEtk?ICC`ay-Bmeahr}Kr) zB%lxYiN`uOi-NmY@pN6qR zoQRnylUs-AygJHT^y@Aj{-~5%xECr-M!o(rzTQ3d!x5#c)H!j_<%$>7n$Ge+S#jj2JJwPI&Etl?<9~OD_1#f zo7R28pg8-_qg{H4MapqIM`aM#}O4Tj@c(8If#D(4wEk&N(Gf?Ncjp}RVHIS zB@STxUTuA=+7Gr8*X(VtB_7pv?EFbPHjGm0@_SUT{fm@t%J`rpXyE#@+XqoQ(9k){xjztM-KgZj}5Np_3Wr-r5Lq8{Yk+wmNoI#Q$8+-|r@V_WZ3R z8QnAbiNL}6n>LLHv+XW3?<*`K%V**GI-U;&I*mWGA*=M%4l;X6s~-1s+sD;M-4kmM zG#M5T8pL+Kg1^}n5y95O`lk8=gV!}wqlBDepq#J(yOx*CvDp$M0Y5=K)0FvLtauU6 zDB;!quLHd|zQ)us67&Y3?jE7Ay?q`mDE2y_{2U17r?<|JktCA6evXPI4iv8oZey8Z z6nDEPlQ=9=E;pB**CfZcz0V%=^aqFPZ~9W@u92b_Z}~c^<3NlsIMo zeU<-u^Wz`=k~fQV*;gtK1f1f>e?)va=jFYVGA3VgPxluKWh}q+LyK=ueb_lW&Io+$ zhM>m9vJ*UW2^Y%+{_R##g-^``y~#pLp+ty3;`jqc(={Z<`To>&~7rNBk&hYY-&kA=_^O@Mpc@@@v@|1Ruf2t@I&P05*P zfIXQT{~LsOQ&%M1C9$_wfVjdtE=I-ZF>%-YrRd@=f+g<=67ZmUhkD<!GUFZ(uxyeT|q2Dd@TQ)T@&0sh4eVFU+XW zxwV1@T5i^KR?|}#N#hf6Z6a=Q=&~JLwv$c2N2}n=f%y_0_zF1PTqZa;^R15)_^AGM zkN|NL*{*X8`oZa(ugnbNJNyv%YsT>82#z7xcJx))b+^YF3tTmTry`k_id9q?�EWT&XyR+wtqs-cK%vbA(1euX7carMe=65ggF4tHIW;-s+uhOQ7d`G?1 z3`Sao3j^uMkdTb;i}KS3NzaH5y2FWwxs}2s=TZ%7h6c^G4DXB2`M0s?ZZD}-lsw&T zRm~WS=94rMZG9GogC3ATSYGyzBO*l#{LZTU)7#lz_S12xZ}Rh$%Ky=~fG_;;{7;+X zy(hN8o`_zg-&DZusG^{dz@);Ye#sF|ERhXbq2az?u0nEwxuReF-S`;gg)zUsS0z_| z$lI6<>4GLvi9qe=*meFL(r@GUu0smupqXO#tqGCD%mV+L8e7h5dZ`c6Ar^#?`f?Ru zBwQ?~w)QqBwuj!}d0rx3$4}8E*)oe!Q(dw*BJLV+6Yl- z`(60OZ^4=+XzrGFzdsnzNDQ*$^#|diw#N8a+Pt$DnAu)iU`|@+>Z91FUf3=rManp?g-^GmSt8oW zPKGn_(pWUDeQ^fW^x$M|!pITJ1kUG-hyAD4gzPQ;mtfk-cV7KXdg+fr*<+KDv2n2_ z@{HEqqZ;hF09>cUUj1`k_or|OuXLPiln-i;e}FWTYBv5QDJlztMzxB{#d`X|6w*sB zy@ZQ&Pr*h$>iW~}ojmjwkL@xsqEZT9c&9zSjBtBln>4pKy1~zT$mL`h?m0B_XZLTZ2gA5P9vtc3lKKE`5 z5%cX>qd`-Hi`Ef7K!nCk*+@zQ=C6oP{UN)A3bp8C`OjO6g&prRP!SPjuS8bAo5u?V z5_zPn2l8oFh@^{RkMP)Vke&L`XA(Hwg>(q2?poE9Jrxbg^W9 z#b^7HQJU`l>&Jw><5OHC7JD4fbMiymY)+iWGd`At?YWI+Z1t$@?Lr>;IsZPE>1#K* zHovn+hRj$Z-&{Vtsd6gD7~Kj_dB}nJfSOKJHkaYfSzMMXJXx5L^K4;!n zZChrM(zaEN%0%0I_pOHM4@-Nz|G82Bdb$`GH>tN;lmGe`I>kl0UV5_@B~gXwrS*E- zr$-pL$6A=z7DIay2%>!Fp1R&gSloWV@r%qzI$teSUkTwT`Tk;Zw-?r#77I?I^5waB z0u6tYiebjAZ-Ptrl`2BAWagJ6!%MO#FXkvL0&f#hf82R8Fx zO@+=^sMzU6MfAJpDu=H~v}Vavf7TcMpqbwsPw0!E5O_l2Kp z{tGSB{>ZNpC;Q`^cyXw_mxk%(R@3iBcabpxQC||!w6%q zFua>0)ij(<{m`n*By7kFJDg0=%b9PUy+8@MKreJTivT?Y_f~B+QHAy>>Mp*yl8-Ur z>6V=K^JvfBy|r!H`?2}2hN9C{2>RfE{`)wISUBt6`WpxA(2UUa*8{!TQ`TIQmORkm1)wb>kl3K1rw^(7BMog_!afAa0LuL_eOmdx$J51v zTK|r6{IgJWrH?kvs9)OU`3sj1emB~~(#%Xf*O}$i(O?66}gE$TI(Ks>+uw zqWV#Q}^9PT+uoIAsu9lO7rGEbG?!7 z6Wh$;(|%vBz-&r|;*H|ePiG~sf1Fnj48n7hdT4^4DWlUpdjnb zxHotDX3gIr6mOS*4CrGq;s0HqeU^zca6+0q!SqG!WZ&$I>=SPMNUNBM0 z=^wU*NpMdgxpJ2y%H#>Rtb9*QguB4d)AQJ@d_S?bGeR{7PQC0of~T5q{M`HSM)oiK z&ucyT+dWx0%px=}FaanEj5p^x(3&2gQ@s#*iT%%-{brlnksJ=+&dVSBBcN64x>XQs z1kK%{yU>LN>^9;+TnFR>`E<}*7-L%c=OW-{>RsQS{IdHP?bW`8t&*DFE$J5JNJhds zPw~cCX1$1%Yog#-H1 zL;lO<)}EnXd3nX!g4c1g9kBhZ+TYJZ(pC~OC!yry67PYf7aHP^Qr)00bFj*(#O-JV za%ekilQuKq2A;g04U-)jb|cAwfj5uUB_(}Lt9SLIEt{y1;?T1mEi3AX&OgEY;G8kXkFH=`;xS9Xo>D|Zn%Psq6hDZ1JFNxbcCeCssdul?R4y$hel z{FlE;N&mz3j})Su>5UQXxfL0-G_R;}K0Jp4ohi5NJCdU@8ig|6-uIqU(oplQGw`AS zzB6Vl&TDeQwK%F4A!;Qx70=wb40x~tPUg5_yE;cMe$>K((wCMEWAt6ocuW5MGtwvb zs$tXQ)r}v20YU%A{Zg&%IH19Jx(gJ7StuAJtbyiaWH|KT3dTT94-;G zVuV24qL9uBnVx#^Mf+Mm|yp|2b^<4=GS>PGA4*0UH)cdWqLoR zm4sa&=RD~-|0D=g%Jl}#lR2VcWvG15cTP$2jLY4N(fH<5v8C9J+_J+ncGELScyXH9 z5gbsJC4Ti0i9P@KdUC{)Va*u>Q4C5 zJ<)Z?s=WsOBglu(u*>wE=GT!eL#1lCMKO%VUzj8s`})0mf}K<5%~}xeE)$j?e7Ip* z9QQ;f&d?TjNk)pWOACND!ezMEE8=&<%Z-qXyj$}GW#lF7m!=QcjL!tn>OuK4|`P*%(8J{PX*x-Wii&bg`3qXZ z+Cuj0_${&g5p>zx%sY<_5`gd2w}I@az`KoO5|oHH%v6D~H5c@2(xCPAMIba~R3EI1 zp{-LM?g?#?0A8`BUHX!9*H^NxU~ys&s#kYCpQ4B(*b1fbB#ELv$S$1oRju`pe|u$P z%JMGFZ&9V#sB037eHzq{Qc)b(UQ7k~=ZbAGv(NpggSst5No0F&TG?Sz0bba<;`_06 zMNYKVD_O~VBlDBsU0>5Yoja1I#-brh&J?*@rvMEBr3u7Lc}aR)lp=p_t_vnMXO!gh zlydI=ey}b+ceJ|Hzcwif&#rlSTie?G9*G4TXd) z2s6e+LpUrnB4Y3lX)Hix(3I^<{nZ*mKZP>Va{`*DWfc`e^Yin&bKTsq2v|P!)!W|W zuUS^5S1tU-U%emWWCQxA??-0l5`7E3$G)tk!1i}bR?BJaXn$p)1n01x-*p`%xm~}% zW6MfA%xrtEoC1K)Oi(uWyf!^EjcAYi2Ak$>KQar;mbRSUKuR$R(YKg5y#;N&ZQFMY z&z$wT9%Qr7I}cpfe)?@O*%=vo7b!f>2nM{H=r<~l1zkW87S-CAZR@MU;m?}%*$fjF58Y%|R@Qan7a&=htmpjWhR8GLC^&SOl%u50Wx^*;@+z z8vbmD+JbzsMn?5W#%u4nSedG&>@=+B{7d@=e&NzgKYUt3!mgO;G|g6_DXgFJfYdEP zuT@*IWUgO;!wa>N0vG@(hW@*ijL#q~l_qtz;OYdrBwTY~Ugm0reDcE9W)bf+b9p^G zo06<`@=K^x@ocH%f8x#sb74oU(FYE{CO3Xo#n?BsHg)5tM(c)e5a5&j#l>>6&D^Vo zCt?k|gWAlsl_R!{ix}>q&2aF1mtVne&OoPP8CHP?8R*QWirq0*3+qVM*xpzOiHRBQ z)q9o=xBmDLmJKi>fI$i8A(84wft(#HqWd8+l()E!c5Vd&IE(B3S(CW@eA(j_GpXGM z5n>zpZf!y6h*Pa{EK`vY-njx#KI!a)w#n@UR9N#a51xNV=(T% z6Mw~ws)kcb8-*xZClu{RLa%O(Pv%*!M=%>g%B*IQKLKo9GUiI#NPw-2$9Vhh({7?V z#y4ACrG6P1G%Zn_a-hxD9q&bw@tL7o89nww_V^ZPdBvqwRGxvrLa$;OJT};iJg$;L zv5SDyQujv#*UtD``bk#f-e)q@C%4%@$!5Rl^(6uSXfBN!HNkb!hHpWR2daFD$G8e~a zjZ%S$`tu$7&IF;)n_>LNDpXPCZ-7kxxI6PJqestKlgY6*o@{WSMNuxtZniC~H%zvV zeuwXwcG#&FStf%L-2K?LuwP8KzTePe$lD%uQu5>9`T}deE=NgIGe=3wMF{H0x%R^ex!|uCtP3cdavozu`77G#oVvq)BHN^3KZzOfh0)Ke&@Q}{cC~z zf7cW4$pW=MV&P_auqG@Eo69n4Wv|`?!Jj68_zHyf*X()rR_kz3YYn6eo`h!I;PVlt zUhya{F=!)c`SQ4qKZa-bE*3r$OM*$P!^}MPv9j}Ayq9q0vKT{ET560lQ^sK!h_SYE zoAxodp^T%=zhCYM4Pyjr*R|-!hge{`Z#%CSwtLPC^lec9!Uy6$*HNhT6W>ZFzhvAJ z!RmOh)=*^r=c(w{6G3_`{41&R)sdb!tgQ@c#o}O5+gL#baKy5io`BEU{yXBM7tlW- z14KyH{D`r=fSJNKZahU8qLX6pM3Rgql}1~DG@1^# z*+pk;Tshe<(K&>vzA|lDh4x|k&GtzO`1EB*iGSO z8pFjiRd$;7;tDdM{6;pUUl*HlHkQ(^?YUxpGvDe-I)aN_=|gS&^U|tn4#ofNGg)iI zx^0EK?l$j(e~DN4dOYq4*TgEh^<0uDa-aZOh&O^k#tG~LNIi)e_vLi3k2xngSk0;y z=VoShLFd6`qNl0ri#4%%!)*-JI}>bK4=TZYP|TQAY5HlppcKS>pVS;3vf3_|ekeXh zW-_2kQe*OeQ&LjS2g^zJY<8t&YK^&CI`IrvIs}b6?3=iNfj-g!s0Qz5BxVngH~QRt ze1B%>Is@qQ?j0uj72Dv;;B(tX)e}2^4W-kh6Lqwzzjin$f|ssVVrrs}-@XRcK@Ix2 z9l?Xy1bMt>mX^4<_~%bOUzz;IM~r#=5%jTwR6Y<+mrn(XS%WSndS%H-N=k}1n5M%v ztJaAJ{9!OTilCvw1){6Lui9_jRAglXp&ONi*tTDrY+FfP{Wbpf;s7%;0{U5tpo)B4 znwIwPk%u7kgFHd~{JwxHwm;>X$mASe;&n5qbguEL`8^mw)_T zp7)-f$Ea4G9JQoZ4xc4sydZjkyGv%*FJS=zxP8vA*8~M4QwK6n^glWCwVwG#(=qFT zKB8FnIa%(vNT!9{XAubdJzb5w)=871zAHaI_l=_tG2Gih6lVJ1-pbX}eD_=w2hMp6 zLos2@idJk40W0wsIh%))sRh|$0&fovoH>bniytM*t?xPymY8X&h21E+ura$Z>a=p1 zONrII{ho&1k_{JfF43e!OiD@#(o@)6bN8aHTFYn{82Td3TH9$EYO>bX*LxolUtawY zB*j`Ii&q4Y0(3Xd(b|{CnDcRYIy$3TmX^@?N|*xzWoQ>2o-7sYLPz$&G@|7*gT^|e zTg?nL?X*>q0{`?Sv)jq%{*X5`wDCps*CMOGN$c?kXJeOIiYhB2#$wBKz z(vM2Plp@MF@on!tevz?IXkT&o9;-{q&GkFWg4)G_=nI@?;${v=#B{otx{t@OjZN{a zE^6Z}YHv z#>QMysO!>5oh0P2>%L+UN95^7Mx{vc^OQ=dFPucGal8Za>bQFcGIeD+LX)3zv&C*&&uTXw#3t3!F6o8z(BwWBHFQ~+{seXdP&XX(#58GKU30Yo@%{e$o&(sie+s=VweDw+; zNTx&QWuJy+*?qHuSm-TnezgA)*iYyVccuJ{fk}St5uTwOGX(o2ehvJ53t0sTgXm7$ z2$p$9z*vow3DWWFppyg>ipFAK>6@8sIf%GW-E!oETiNVm-4ILDTfyG0CEYJAM-^qC zu!XnOr*(8-a6Fu$K#PK=as;y5C;*SdfL1B1oiMj%nKag-KD?=|ZRNU$pa?0i%1F5l z;N}A;=E1S+npRRz-`w0}%egkEe2M5yc78sS*pA-_;ndl;9%uXJA%I6GEn|3{GN4Nr z5LpVabps%cvL_X=5WN{mNzZ`!qys)LblA|x#Z!xXd(rbcE3dD{sx+`jWU*H$M8W1( z4r1()AXxpT&La?5QTQ}pd_rM;@*opNn*@wn&FH>VZUcH#+p2nIT#qH@{psSCMufo! z;TX@3yDYa!d;~fIIIaoe`79)I`ALldd`H!k=-*H8`YzH zW@MB;EW0tMeDIsk(%XtT*C zcVA%dWyv(El}huEhb&qvYyQZ|}$&9<0c0qmoR5<(KRh7)qV{sQ8VGo_p4{QHYm@=U;pw|{2vtrXv`GO&0Yt#ybx8Xg3G>VE5Y+% z;Jg=wH1>vHZq3m}kbWs~B&N3jpKJe|7lqM`(+bpypFxHHUC-S)9lZ6fY&CWX0Jk=f zIAsCR;%&LRzR}Ag6`A1vaviMJQHp}l54v2yw>x#qxJwR zB=mfu{EQI@2$wR7jQfy5QhXg=q^@%~2P%bz0gtLSWqEg}f(UpW3zv>$WwRty&$@Yh ze5Ru;mspFDpbV7!yIf>V)=fZ<$7l$A|ICk0fdbcYz>C;I67TH~D?oMqU1gkMM4)yz zyEZEU|JmN9ps-|E`tSOP9wye~xO(;%5R#ILst95Bi4%`C?`C* zPneZSK}AJNTY%DHZ@;OTr~_#}Lk-Q`KBm@Rzx=!AUne3?r-%Q;Q%G{1THnD7K$YI} zoysbHbOwW~5JIb7CSYdU9(M6l&)F&JZEI=qaYQE<(Q?__IU4X3LK`M!G&N(&XYKl) zVERZHwk<&#&2S)pPp7Z9mxi2KP|)*QM}OYr{k>{=B_%ZH0Qi6fun2<#0^;Ai(L^g5 z16|;RW^UVT7nonPI~H;~?ANpDm!#`YlNg z8Nb6dztGWRE$5^6t^fnPu|iG)By<^W{2ShG=qLj@B_}ERD-WIWfq2Vguv7-}_duy2 z3*MoutXy}MQ#Cp#3uJB&DJl2dGyjkar`fHHqHB2+MzA-aZ3RG!305M{SgW4LCd;ag zi%&|TK+kUwMNI*>;8$sqJsTA@wWtWGOQA2{o*}rx;FL}ygG8my;%ViAz@()#Y3iD;69M*>0O9D|3a*QZ5!(v&ZlQO zwK9qJtX#{m^^`+`XyS17)rsj5a9ZEaRYcT-a%uF_unw6UG~UHQaDe~PQeL4Hk@~!jsDIlQ;bRDcbYm$`7{zYZ-@Ef50h)#oVc3pgT-MWl7*vK z$Cw&$7pPyFg?#ffGXnhyVhg^_Y_AYV6ZX0 zry=p(b1|H0+W7f$Iu89~LMKcW3hrov9$NMrdThT7CxIj{`WQt?*;JIz(>5;5^XV~lCAXtS!d+G*JI&t% zMVFgQub&4(d{upX0+scOdq3ZmJPU`y&_e9 zq4q0Pc13d)D+iNekILksO6xat1YLPYo2d;pJqk*Q)~JCNPb^W?_95^oAtBk8n0k!L zP4i{9`2MWfet9@IVUT1Fd5P%h^Aw{Dtr}MlG;vD@UtEg+a9%1TD5$GfZx@71ke_IC z+5sY9Gv>0L4u_d&$Ud{Qva&(-D5=eW%#KloDQr4=M9o2OUG}%$k5HXFBCtH^snzQE zw$Xdif#i-r+aUP6;dlWGxnWw4 z)DtK?I^{Sox9zWP2fGHb^Dz}}@zB8fp?C~;1nyYoUHb(~07*k&I+B^|p_s z{EUZY=TWk-un=z$Pl6qJ;W($f^XBr+^fvxu^*#+JiOk^~h}6-BK=S#{RJtn<6Pb_+D<8$RVmDKRux6)xFEJtQ!vbgM7 zH@|)D1UAeXYTQWR(dz>vnhPtWJX;LW3pj{OC`Gm($4)~$Rs^;Z_s$*FUP8v&J+ZnR zjfw;=NrCZ*p z(ODZGYKl_`?RcW)hkwCZ-+X^{|1uwO(MSSQ~wRWJ2?{lk}wPX6R_bls~?{^ z2hGN#$f5(JQcd^7&oQ+ba2c$72Mmk{3w3%@rAR5MsEojYiD0+O8FgHcSQ@Pws#%86 z&w2=4*s|GJB}GL=`G%v#0x48anAVFx2=#1wRSb-qy3^6U%blH_g?1~NEr&>2h=?{I zzp`z%-63+^lirn@8D}8HyE=XjO1ptcAlryc3X<&o6+jN8;b`R^mm0%>7`G!%vFpvC z(}x-c_B3SdA>E)^nCtfH3VN>+g&Jo{EAKWBG4u3Y+&f#L6p*PEjCDII8;yF5E^izy zv6&9X?Jrk}xVcP-;@kR+4A!PdL|RW`^+C;gkcnS=;#suygeU>8zIVCZN)EU@fNn9G z?@V@_T`DPZ-eo#22iK+JEzW_eoAct@yV0DL8ZBLXKO#;V?FIyNr?;V_YILYEf5WWC zp(`b>?2TqsLQZ0w!ISo3I2DWO@Hd$iT2)BOi!`@EadRYRYHF%<>9}I0x{KSa#w5HM zI2J*RFkaM4Lbj&*FgQGfvW`(+MJ4@kf6M656LlhF-x8e(mJ7C-#l_dC`7RL%@=$H` zd&}I>&P~+xRh$B00IPjnwL24O$rTMXKzS$*0=uYp+L4tREyU!2QBX9R_xSM*eg*Ra zHWm>_JJV>fNhHKcfq{YdxmxSpZ?{1`zG-@*;WUmGxXR}BB9DEV6^8}m6UA%8y*XE8 zSXh8N4f*8jKNl!P-VG@!${TjPaR}uUG;Ow@&<+X?2p>prbXT5+{@1Njgi=Ond22Zu zu!1O2s`*kT?-%Xz^`hOZ>q!-M&OT#A6G$x{?Y~c%4q9@^{k~ZEuKT3uxfG6$+3j=N z@ETrBsH{|vb=eBnYh}!qh)N~0`jTH@ zuf>27r}L0Dw|!R2*(97fs+WWXzN%nrYik^O0fV>`&Fh zz!(zEj#B|5-6dxH&Ca&AzTg#SpegZcT;|9i2TTQ&c6KYBq{pjHhsZvhXEq1*U*o~8 zMsB<9B!Y`=q7oAxo{p~BBMXhnXAG3o-Y9E2u3ab_ApsYTnwM9j#$m1m9U#&V8R+WH z1BlqT~!$gR6JC!$n1te~+lH+8G_{D@y1IH&sgWixC zR!8rOMUE)MalX*!JM+01=AxsWB9U#|A6!A-2csPek!~yGi6wGcmL(RccQn!OVH<*g4bN+%jJ`0aWUABpv z?E&`Tcp&AbsHv&hUXG8C51IQ^VIPug=+}cR1mfsm7xrwY?gnBe-84#bbO90&ST`&S zZO$0Ra;ZY9i@3d#OFlXalA!dHfR|{{ymvAYqWe=4t_`xq%AvwfUp3MUC4`qp2Wbl-NwASE`1$ePxy{xs|Ky3s(N6y)&BHmpaJN0J=^pe?nxi(>zA z$RT!zA&L z>TnZY_eJwKuT+#|=cc)=t$ltSKV}0S$Q3cSpSI*e303{Nt+iO*aw0 z4;){qWbSjheu)vrt{}v;e))dD2#qX#A(da2h9@{DEn1{jQdOZ01QR!vSWafa<<+I$ z`$G-|fBeK7uUUi2qMRL7|98RRtwAGjoB0@fT); zVz?3XR_vqJ1@L@37KdExc+rCf4ccTL*YlXG84$xw&a9CQ4jvo!K4J_@8D9=rA*d~LD~;Z zoz*-{UouL-W=PXsHXEJVp;N!pfKK9qb3>4OlfKwwNFRTDZa_|$YLOE5XVS(cTeTTT zveRd{ocBC?;mBKKOHv7_m)jjW}2;<&SCQic2jXn%r~-W zSED~|Z!A+a#~NMQ_?h>Y!0O3&&@XYuX_;8d3qb++L@7bfNsm`_noFUy!?o3~Lq1v! zmXB*KDTvMr4O?=SsjM$`a;^5|Uf%i8D*+*Dn#{Lv_hw_=p8RNtWORL_SLr&k_o2iD zO*t$P<1GGTK`NR{;&4ypturiRVwA__UMMOy%(eeVD_vUQV02pG zNoM3jWCoYtCz^#fGcBI{Ov&|O;dedXw7!*?iwxVjk3J%f7_mj*e3zsGkNo81|9bfl zq1}f;_XId$^7X{%t*D}%ExY$Qr-S9_CIP2z(*KogVu?yJ>hA5M8 zX@J?-7{P&@tSr%5v5znR(Yv$~&ObkH(N98f0>PvG-g3qJ(6vIN089y1o7HcaYV~5F zbiQRN=396w=d_{xcgr%W7zpmTK(rjNi{C#|L9RL7hX%n5*0U){7(i>2cTp6k-cX5uUcbOimVtDL` z>c>aazGQgZGCuT3WL$`%uf*_AyLw5$i814z8b7~&n!2#_DVDD_TufeOLG(XM=I2ne zZGY=fQFSpluXX2#H#>JulJ~^V(x6(OljT%dy>sWZ4ExU$us+jBP5s0QHv~7^_8?oW zB%`u&n8~jF38qk0Y)7I<^Srg{jAEAZjkPVoi#Q*V_TUYKu7MPVTA_=Oo}T=WSm)g1 z#KbZE&AZL^$JH(%HX+r$|1s?6=j?9+Kvn$Nh<|r?H*^gG0i7J`s0bvYnd9foY-(a8 z>L@F&48)ark%vhsD*G3DdwPD#OPQFMw5vO+I*RWjTCXzjtmOV;Z3Xs9l1HBB5Bq+dm@}hmQY)EgkkTS^w-*RS7 z`3pcN6i9MT-#&gUCMJejZ)RA4#Z;oVZ`H=D1i321Kc`JBaaO7&CBJP)ugw)B`sn6* zbsA{M%Uq&geSX0B!H~>a2y?r&r$+<}7zI=rv|g&|30kI+y-xd6AQ))$Wj)+;(eY#U zkXpQxK7CJJs-K){AhZ3c2Qv5tdansTmkctn(8s>^dGt%s79l&lfKIWx`_|(BWZe-k z)habm`{#L{zFX8aFu-h4P<%$}dJ7$ZTxh|3$IQzIy{z4bdZ=PQ`fK^X`GyVWBbl1^UgeM+-$6 zc#bRGhAZsM4mVTzefBLPPMGlFIS!ViYMr|Jvei<@4!24iHv>diUj4Y_LumIEn}Az! z5{FOOVWj-$bg8X}AS>F{G}0Q8_P{pWU7tpyaVe-Fa)-gnLsSV-5$L=D3%*?@)3cki z5TNBi<-2-7r&RqB@A@zD^7STKpu>+HSAyz|)*({KakE1N?SG;9yuf6x+w+8)Gj#@{^0-$SFZ zvnO?UU|mXl|9Z4u`SDvl=o?k0tRT9aebS-sZs<~`9ov+_TikrM?dAZggU`i!We4x% zSrfn0zH5n(?I5nzJ>*!a9(jVn{nv9%P>{Y(#8O~jwxg-NCNHCTgC#5kSFI**kti;>c*m!!){!O)4OM2FoR-#MSm=V;_@B@*I+Ol8CsBI)9 z873X01VEu+Z&Jr^p_O~Nz22vn!~_W0X%(~DKjG3QDQN{zK%Dyj==$n_sMf7-48$Px zAS!KhL;R>lu@EM*Dd{dY94A=Dc zeT6{?0u=n=Br4w*X?jEF@Ikhr4|FGH$(QM<2nGGHSQm*I*y8 z0x%s#SVQyO`{eH)&UBJSY&ArDXWNP6Zk}XA1ecbnsYBvDwLl{aaLkAj_dzL25S#%C zzU9*heIW)mGg3>bxXX`9E;J&51U_SA10!*1awrZ3w>Y}N)g z?z?_2;F;7ul<9>6QL^lj4PY8B{flYHug|~BG{p9NWX%Qhpy1Gs&mKa^?UCHdGs`8s zh$dXGfU*DHxF%OQIZZ_?8?J}Q(+A4A*{ezK!Nn!1f&^;)>yGA40;J2GO z`~m??xXi)9F{eqy>gQV4yf%=1IVmjgc~ zjF82`HvL^)?N7M~j^dY41FZrD?zsi}g4_v;CUQG8?5{B{YkgNED2(5SCS>*@i$1@O#ORoPZOX zvmJ=2S*+V56o<;i@)>w7LXf8xQPcr#0trii1_FGFIW^SP>&KVi?U9062ehG`$Rv^e z*9)yH0MW#)Dlm^KsvDIS{ipR>;FFd_lPPG@U^)VRZu-SVljBtm_z*{zzo z>Y?Rk(uiheyT9F1jm5rrTns--na?hFNH_nSyAfDf$Wlkw1uAtfXk;{`>!wh#Q8FPn zwsi_j`l4SvG`wfYzpe7gY3NfbYJ6*AdHV$Xp=89g{qj>p)jDnr@S1DhyN*Ij84E;Rl zKB3;#8TPA@0Z^u6Y_TSyWqKvo5V*}=i{-jpHOm>$*B*P|6R?g3Fc^t-uM}$yJ0|$} zDMFwG;0!-4v2&>u&3yb6*tj7`c7%baEYL1cBpn&Bh>4^ej2*)8PI~2NM5#>FoA)W{ zpbrqk&wRY2pwI;eGRrU{9UFC!6yy@T^m=$tkTarTIqnMSO?}!J%6tpomG$KCU{Bp@ z&@>5u3s(kuS!(@i_zV&Q?| zb9ZXUXF5jn5Yya{n1qF0Z6HpCf*J@^7IkabriBWgz9P$y@Z2s`r8Dx|QARbi1dFDf zkEN_tUSAk2Hg5@7pI$(hmMU01Q_uZ!qrrV~XieXqSNuJ$+Vn$obn3^~;=)q|mJa~pwz`^%kDA)@P$4~zhb;J=Wu z8mDF0){-iV`gJc)IhA_-m3-g zX={y6&l_2$KhX{biQ;-mqfECx>2L-~Jz6fL)w<%ZX?iMSRIWCG{6dqJ?Q2K)s7Qrx zsqpWA+yk4eX3iyCRB2aip+35mBNlPwSycw+DitT#BX6mnY}7_dYytEz@d9Tt1nt zU!e&;VAb6@`izc@A^{k8Q5FI~m9n3*cYC#%LE=w_!L_x$Rohrk51)h<*^QHYRE*p& zAr9J2r3%l35r6#nklefw^DAL8&V=%)%z%V&b`k=D@ckayI2ZY?8kCB#IVJBv)dIj1SY5{#n?rni@mhe zx9-+}gj3XUBJHU4-h-fwh(gXNB(1c7ir$LhposvWX+YIfn$0d9D#$wd>U+pJSv<6O zkNzMhtx~ctmvSkNr2N-rq_ASB z5cmB+>;X9^p{~o9`BajTKa~vn!*WFVba3h9V%)#pZqk>if(qZIBc^6%I#UP=S+j+e zf^Fm)wh(BuHTX#a(kM{fi~;m4O!yqhB45?>|A$Pf~A8+m!AT);mQg!J(S0} z@&-!~%V2^Pt)B}&?nmqx4==B*6a`)uT*5tR5lIXU?;`wBa(4kSw9 z>tLEAY%$;aevS-v7DlsKF!@prJO+f~r~^kfy{kj6-wHJ9_3gE!yaJp|rH>iA4VAOP z7Q2=6l!UmI83&Ib%rTe(2Zf#w6q)FIOgv3%2`geN;@KM&G2uKDXdGy1;hTb8PDzjw zmzxQ>5lBrSj(6d`2}6H*f{=Ca78fV6yBl;n8L8Gf4fuEHgGcb!0cA$yt_U+~fla8m z;fKq+kK{i$4HYi#gP#eYV0qUuThv$s1b_Swu=5_O^fL|LIg80Xd3Q=lbm=A)voP#Zp$(IFoS!w}M}vKn0v;XMtb->3V7uH{k10#25N=n-H9?3h zLcJ-U{*cX#E@ewpmu){L&0<9Fw?jyL%$&*Bh|t6p@7!sZWs74fCB%6Rc>wu1$aLW| z1G~JrVAkY zeC@uqc*4h=p4D+C0Xz19Z064m%j9)Nvd; z9Q=042Zhr(mg+>yu|EnI5WoGNDwvfd&o*ap zJeBg&U$H6W62Z0kPI4KOu(#y8W*KQg`R8Pn^=JRov7e!f`f$1c7|uTGMg!K!4(k6p zqfb&(twbqD)X1RlpS!>N)oJb^)(4M$SE7TQA=Mn9kF99|o8EY|UDd696jbfVC|rt0 zOkK=tRUJ4ysvYgfXh^@p)v_t73%?d^&*!$F1qaKEjk4)jx7g~at*|M9Lwq`=uGyUB z8+A~U74o_C^8#+K1wapU*=^e%D}2V^8T-Lk!BhesB^0g%txJ40L-smWRNK&JGZ{vu z338mo{ARI5QD&#c<8&vwYwTU7Uo+#OCg+78%}7hjvk?K$h8UObK3ip%JaD1dol8Bx z3>V3X=;b{D#A}b{F(ZM@5Q+-;!)fTb8%7<#kmJU;Z{L8@1FJy$BT#OTAk=KtMIgj# ztZ#c`hf9@0i$L5DlU&ZZkW4JP6f@{Hbf~;?$*au*m!W}2t0_Iv+X4LN)!arX!eA~p z^f8ST0Zk3kLg1=zfL^=J7NS!{7RzcS5G7)?UFG!-P?jRZ5Bp(X7?ABstEQQKmY3?n z2OdvMm<&Fcj(L!oUIl?OZMybV>7_%SYKX=iXno1Y&fT^GVD~2`JoV*EdB_k$m>Q5n zjNf=0i~|OecC`Sq4M8U6S@9_WLqA)3CCtq;ijvl+9S$0C7*q<*JD$s1v3vCCbb_*x z6k=Ed29xT_%JI-u03Xo+fv|u6{P`u2$#$Sj1-hQ6Qz@k7;m>)$gXjpAleb3K6T7rI z5Ib2h@SEVbb_6MFOOCaY(;l$Ax1RO5lgWPHGWVl>L6kpQ16DW!o3Qi49ED-*`-KBn zO(N{XT+0x<3hbYm)H-%El73S%H-bbB)N1gH;EdWY$UML%&^Adb)lhi~w?2=St3EwF z=kIj0JpBjwa-Lh@BON@_lUAG#f9?;C_{-YvueIzCgxu;9&V-J+`e8Ierb(|MD69{= z#knpFSZbG8?y`b&E4Llhx8xzQ=ke0$)`uNn{dC8;4J3SfQM29xvjb5h+32lF^gdv0vY8m(n&28zP6=I?Ww#~EftqF609bHCRVN{1G9dA|= zp&W+;KQN@h_z)Acc)?db-w$;LF*?8pQ}VIiFLTPLbYM_NvG*-dR)##LnX70oI?HD4 z_zpX@R54t;e{%q1rGzjE>Fq0zb@9VIZejopwE$Ku@qvy`REj*1Fs}vV zyapaWsN+!~4PVIj^68sd-bY^r%?)ZcB^vnEC&_KSKom6x;!h;0=mv{1lr+00GGzXz zP5E5#lN3kbTe1{i0viMK&Rhb@!J%__i@DZ%MZHprTCb_zW)aFCHe~^>J)O57_fpUN zEKqxkSh%dB`!b7C3%;XT(`e|I`96GkdEU7md(CAr%hYqV@d23m53-k%&Zt)0m3~bd z#2m1|rGMs2Sr2J#Fh_Q9I@Bv~uxW)3FmF)x{dN0)|JT-ZxY`E{G5gcoKGw}VIceBl z*h17rghwFXx`t(g;tZO<49=RdJCoDXLHYuaiyRCf2)=gFFPBFAJzW7?Ma+Zo@3SBW zk%W!uT36pOB<-R`d@0GU0(Zi8s8T4=7ST9De^&}xL^%hSHl%us^_`*@TdR|pgMdH7 z#svLyw_mHfD&F_1gvcsqxsg`DnG-MPRB|WO$v>tstFosD2vr}U1K)U!Q1$*4VUzar z+`fjKeMJgTQZ7{N_%i#fPCV}H4A)aM@11`(b^Q48nH{@D(D`U3R__@n+pE^6! z)nJx$qGKr_=Q(mqLCz6h{++_BgVzNl@t9CK&NlF+LTIUoNwSg{?j1Bp)?0B8Eu+9u z8{xLw0>a>Y}LLKdH9B0h@qz0*dZ`(m>Y+cp4P*{4Jh64lv5;ARxp8tAdwUN?d6pud=7IFjOA1ExY4T&hKF+mGT)(M=F~E zB(vJySi*zq=z=UPp4JSFOkT%{`p2vy2K-rux$w~HP9bvia=m9SIt0N`iqEqByx7)! z4WId%4FD{?IUuA?h8_=c#|-*8YxBp9AS%UkE5;2@Vm+UgrXHOV=XB^LJLX*>+rH-q zg!jTUSQ_{#D$GvZTRa@&E55%TrIk$jCtTNzdS!bTH#pV%i<+6X}>eyW-m}R`YPrkUbOnwWm zDSwGN9|m+oKa^+k5@>I)wmqRe(9rgp>t^3I(6ljg-^8qir~hOoo=rQSe4*UA?Tl1E zj$9x<8&@xHd5~Erxj?7X#fUON>NgKb%y-Ql*C1#A*E(ScU%1kR_X)ecf}8$M{rG{L z>PX*duoZ5Wm9q&zE~oXgpmAmR!qVoL-XYTE{ryRD8s6VrkjpU3xVn}gcTwMA^hri` zb{i-!Krze@=W2RMiY9;qzlno8dkB26$AhT$1hdoEr^oPEdp|ai)PTvG)q2k0Z%K`v zK@Q5TrKdOF7+M9o4q}}9a{7rbw{KxuK#k6Q>9O^UmTeA9A#~KGJ_3BG@B57oHP0r| z1~(r7bkDGN#~c1p&_FhoalaejPwoRbdw^ZnT(J;(9x)z*<)W-@nGWeX7i4GYXJH5G z@ZYnqGuO}$h^nO<@7q9i$cl$IeirPip0PXp8M_{Emy(4$N#V8O-MeXq+hcaXGtWlg zK7RZDq=IE``oIK;^r2!Ay?ajjm6gh`HsG?SdM#FQz19fo4y}6#?6Vk;sYqd1h8JYP z@T&v-5Q?1Ys6ow=3DyliF8%n~Sg&rhcmvA*D^p}CP%Bv@r_IuJ3g#r(g})YY#a{+! z-w5_8JYy5TtstU71HnbF9fC*2ZBMXy0<*xj6bscm9!%Ee^Pzom5L^06Thj0scQ*i1 za5Nu)sbWEjkrdeXm=Wrs38Er6jNOajEwI1;+?HIdu-$p_$2cycMbkpdW5(%{7*7YJ z@LXeWJo9V6h2lSsSs5c&bHT(1T<}nYs&Dv?KHAR9BE=eZr1K@-36d}H9p=_II0XoP zeT=;vQt7hIWj(vtV&vt{BS)e}aZfz*)K#PVnM|1< zorGhu@%9}$00rd&3{&btfBhn>)5`YJRZHDTCD zozTu^2<{=#Tz%+VP=rBvPld+2#LgxE^`5Ta-LVi*Q9u)C=UtVYJm~VNJU_q6=?_)X zIL+Cl`P9Ck0fYYm1ir|^pxZ^FraHS}rX2}{K!zh z#mhf#+d05>fKzMc%H^PQ1r;&JGB8}E7muQ z{82_Q%Uvvu=;d6Aq-wbxEqZ}U^j3IuRMwfz$U_;_$jb_In!kjBMBD_=qg<;0Mg_@2 z4g&EpkkM{6cztm=7rLNAs>^$`*a0S8mgCy0gSx2o+9%1rNHTZCkgrk+No?y$~L z$MV_5M4g?!9IB8G2n{$R#p_~Ww0&fMtK}<9OlBN2-(Bho>2UC7s6Hz8ts9%&I=INC zQH+ib@g&`(7yvK`g6Cz2luuIZt{mi> zzdvOBrKrW!5kaiq_O??CP%{hQx~bi9VOr{{s#jzkA>n%Idvt%v>G(%Z#V)^pUNQ^f z#AdroPgoecZU!#y2WKO=RHAdizy9%u_FQ}S_90Q#S4vWIt4sR ztj8+vYeg>X+>2z164(5}DSg8l!Cc9lSwOn95Gf~hwn%2^m8oe&w zWII;@m)JYm7Y%#Y02U)zx-oWLXkYGCS4|Y+diz4 z_cZB%xOs0Og>WYP{c`%|bht!RuS7Lz$}Pz@_WY_tJ@x->5&Rh<6yl8$js_^9`sUEP z7cZja`XfJvY9^dzJqmei;|m>1 z{&c7ZF#+;_|Md~i0=UXuV`BptAOfRa?AND#Dz>!Pl7Ow|#CNmdCjeSL|(=V|fLG|z(WRy4& z`Io!)K-xS4GDb~w7E9Bd+0DAY2f0uEno#+Ff2YmrjCSz7@=jdH(ou-%*<}*LTv$*` zZg0?y%8~@J`lhC~Zzl<#xTpEJ`_o9Gz}@SV)Os5oo%wIfCu*8X@^LorY2-6lff`)Vo^k-!i39G-ZKf6NO{aUYM zGqW_Z>rT4Hq_kY$7w7pJ8}|177vw7A-#YNq3U+Rw_KUEv{_p4<{>D*1IMpw{;T@x* zyK-EC*6_}4^ETz}lzi`}9a-I8fh(Nn*#vQ*;C+`PQFLE%`Zr-58>FcM3 zB@WVaB1|7qd#vv{{|_lhXz?f6#FH-IpHEN8VZF%8J@h|#YrRc3BA_@AoM4q>CvsWcslkMl(OpQKWZcIufzpt~b;^p%%LFn3~`;bQ`g+ z=)Ugr+ax@G-%@1C(A|P2-vrA|6R+4S=YAvy408XWN}6IjpHP&3dXnYlr}0Sw?LSxa z_g7;rZQZ2Z0>0CBw=lI6n1mZP_0`WZ&d2cB#lL1yKf2gX_UX%{!%Y9jAPeW~C0NJW zyaO~pvivAlB2(o11X#8()0ov^jfy|it|GrSzIM}0?hQ~P(3~%ll5tHPfpw*)kw2Ju ztak=hWYxulOToeaJ&QvA0@;%EUxwDg?{J9oT9ZGE*zKpy;t{Okdge@>5<{kY{Ik!e zl0?}?o7yD}(Sg*PR}!QS(+jT#DQRK~abC2)uR?0mn;=eN-NT)4+S?K?L?s0|f3&)% zkymow+bk3MqNe}7=Yw3oA#hM8L`kUBDDwL<7}@x2w|e$8FE(m@ClG!a>}b{Dqv(4=o=_0n6^ zyCCGTNiwwMt(&xv9Z`ON@*47r|MemHa^_UP(^G+MQ@hqnWRUsDxQ-^Lj#RiIG$%n3 zs{kRrT_fA`H^DOV3{q~P^J}}YE){oz8DiT8IN}Tn5=`&)-(f zIJD4Z4NXi5=gTwBX$9SxNGU84BW3`}pSqL$q+ilMR1lL~vJ#M;g>4R_!d|!FK)!u5 z<3d0-%m4SjN$Zk&2KbA-sRiBHhL!6Ig&1;vHQNk776tV^MXXf?YA#z~75!%7uz`h) zf>-#ayljfe&v&__PVwzlrFMSy`L1DI=W{HO7n7_Qn*b``5?)zs>6V`CwyqBJxSYJmbmTTG6A`ZLPQX^175X?;t+1 zw3_G0z9t&lq3mM#_0l=be|>?@b4{aj_O~bTU#sVc_1ZgtY&k84Y(%xXCoGLe$_rj1 z*2Fe@9Y{-|LcvI%LkTcP~$}LIb-tljYY)UGcg`KuL;K0}hEq;Uei0 z%5wz=B%jJFxP)nP=--dznJ9TF&BA-z($X?p`$X#~WphiX0yHwgaXD|$(}kS8=Yyz4 zn>QQr`6O!fhrk_`7JGB;S4hATi^$f!huz2v#=qrOP;2om{Nby;A5GKTGNOmr>66|3 zOh>X(?g4)i+oXbwJ4a4(mNag@Jl?PoB1Ns@>FJ4x5dZ#|ISVwLB5?L=_%NDT^pZ_c zREB=G(vX>~^_se-2Bu;$TziUAE8Nr@EERy@_K~lF^dvdo|GNOy2f>U>i`Q_VejTqu zB$cf?;jExm1zw$RaG&o5ciw;X2!AAk&o2m9ixs2z!bf}a&z&b( zYt@aoRur3}so~$AN0b1)Zdvyg(#WdyhY)NHyC0{OA82IT`FRC|V~gxRu*;^%Z$>?) zisUw}CqC}E{>08{Ca@tB=F>hq?sp9cpOXrTlHZb-r3&^+z`xBN)=H=tHq5Xp z?eQhO@sdHfpNSkLE4a@CIHluFR|#3W3(J3|j48hc_Z>W2|3?#qTuR#Cr&|9jt5WG) zj{f*egUuV6t`UKHx#A|{k}K+kqt-!2e_qWrcD^19(MN1qW@Jwzq_*5T=%gtN3{RbP zuJ4&xy4w0&pTz9WSZV!AqHPGjC}1enTwsSSC5q~SltS#q1K<1Cnq;+t8{gdwBkj{M zp$*42=Gh%t4By}UKTDHif0f@BClQ-{&c!3saFa~P?h{{OE^m-cFj#^D7)vWj`5#Y6YpZ9=36V^hjx&QahOdY z0f*dKN%IMtiw=K&O%|37{l60X;KCa^`j^aSXgc5KovBT!X}J|d-3AeT4{}bB{edYn64Mx8n=AgN3o$jJQn;p%3WdHl zGQ^a2>S&7E&FJT=zjicd7ie$up(#mwd22Ptu(p}Vu)9I_>?~h}xxX~c+~1vmQ}GBx zGz2{uU)Wt)|M6KFN}2Nmz=+_e%^A0Uhe*PKLSaZ9bdFksw37YTz-u9THMl%8O238E zrd}PUyO*R6yKMg&LM~RP4hFp$OYXRZW#-REfL3^({G|e+P z8Iu2TybDL4O5&WbeXq-xD$(gNDT=SZPZ5S2zgwiOt^Ol`h8?Vk#k%0L?k_LZcca^4#Tn4N@mLioa;)D^Lp3dQAX zFWS@}J8NU%ux>*V2vSQAU=H|qgO#B#Z$g~apn*?`QEt;RzNPIkbybx~Mf=+isO`U7 zl@Ky6ztV56L!AM63%3JW4l;8ixxK#yozqh`pS{kAR!^PX?Lq$Y!cTibo~t%U=KLN_ zG1LYkPyslp-7Jdx-WqQ~A2>=QhzGtgK=C=?Sp9x&cen31h^fo0F!#+2tGM$82#`JJ zEo4%zi~}BBRoLMI!`eHJfAH^*ce?t!k;zqB5m+qMHrVOobgsX8zV@Q?%;Xc@cd-+l zG;C{1CbP&T`3q*adf^Cbf1pi?5fhI+ESqNx`X1zxwougJ+N%@XOv~@9(@I^u{^#gC z_>I>mR2vLToNaGs5GB0UnNPX+>^#`URaMN({&Cvov1IS*hc-uxIF%ptXlA=;W*0Ar zSutn3sQX!Avby!zo{vdNs;mup*@uRN#0(6suS`>xnH_y((>(H?{Lu_zhRbYn0IazRs=y^zf#2L`L zAfFo2dwyC}+@PemIqyt_$9ao-^>3Sd)It+79PSSn|B-ms8U2`e=jrQ%9!+Ge@8zdAc#CJBn8|^KDGJ58Pah-)JiR7AWT+j zVXl^sn!Fyvz8rxa_XOyMg1^hYx!Gq_)!D_1aLjhMwS#`xmq0~RsVREZdz))$hLwvlE z$7P2=!)tV_E(LLtTyEgh09^h2YzENO0fwjRh}Y4OH>RcljaAU2Qx@49q?`%N*tE1X znLs0V@*NpfaL41b>4gmhahdvkxI4UKcR{Uw4&^AmV#E@8sw?YYLz)!>w+YV#kG55q zZr!?8W^Y<6-4U{;r?qAsR3A~8K2`yXCStAlIG?? zMoU#pl&m(b(Z=fO&_#dSEDXTDy8|=-%?J2&Qkd%}(0|d{GK1liC)JO5AO#!NCJ=e*Fn>=ZVsJ)6g zqrvwKl(uRBr`o*@%mkcV7I9fM+gW`(JzSAeRKy#@<59p_J|Xb@*)z+D!`ACT7<

3V39X~PM`U6|yTDQgsLKnCxC?9J=yVCCuxn~WxyH{xb953>k^o|U0EJfrm^C?k01 zd`WMV+*-Lyl^Vaw*Ze;dR7!xghc+{Tw@~3%d z=apU-Pphc|wvEd%7AQ{$4SHvjahN&OCeRYv?#@QsbH`q1)!WfmwMe_Q#Z#Jyg`)Y~ zNCA_(`(52+o~zu}@h-cAOalE+d^QSOnEN=#8FWt1uESI1U(X2grMuID{TyK7(&&_& zcmn|Ja73@g*+(=(TE5kbOrpgkUy>2H3%^yF1wj+;gx}jp%$6&Dx;0f}phPf~bI>g_8&6EzTQ<+du z)@#Io?=P5}TaEoB+p$s5CyN3dPxvOY2)3w$p$Pl@*+c6c-EX|Xq9&!FQehT#&%3_0 z3!W($W2^NVe0Pkb0&V!=Gn=z~2dz2P1)GDCFOE&-VZtAXbrx%Um@G45I>h1_9om=? zmrrMC&8;x|Y!<`o=XLKS9cc-?vUk!9C^*t!rT;z4lsGp`z0jT}@EB%!_;q$) zto%_zQ7AHAdq91E$qTCbjJ==t)pj?sKHDO*_0TI{79>>bz_Z%vtWMR zgh~U*IblXp#f?2qiZz_VnSSPd@2}8F9-(ENZM7ZunzQfdv0NX$ANN2?`XY~0bf^L- zXdR;iZ2~Vz4M_;-+o{p`Rp=! zuCqw+HQC;vEWtsX-|s+J(#0U3;%^nCX59aN{Bf+VBRT86Hl{}6ESuQI$UeYwR_Lw4 zAr0}D`OHSC5=71X?!$+yuU~JL>nSpee*hket0gX~OQ*~s0TLQUc?9ra^v8fPC9^Q; z04fV8cGilxDre4uAd|fK)y9T94l`^BT4LqWaa6k6bn_i~d3AaDmv^4O2n&nJ27^&6 z{Q58=-!i~(f>=vQlvqXtA?qHjHOMC+S|@gOQz#4|Xz)|gqdIj}85}&gDWe@79gD2G z*nKv?9Y^d5tv8hW*nrM-9YY{>pDrxnE3-XNchGrtcbHR_#y#0 zG$~jDe;2ynUsn*Jtk*)hE~MKV0!@d1zW6|*-wpz%#A{wL1!};%i#e2e z5LCVmfkIuMA5m1{< z0xrVJ%41>{>I2+rd~VvO6Qm{;)z@A z%)Kny$lLWR;&>%C^689P*Z92{NVWL;?(cr_(D*GSi2ug^tSWYC{vd=}toE98EU&}^ zkuD=Mr&?#cPASCb?(kyt`}axU#BbI6YZTPQnF#$7G&<7w`N9@;J@w90llzKMb90$4 zU0qrNF7$Qh0k)iq-dQU8n3(7c4)Ekfb~kT62I|gU&^w2MM(%vYpk28hECHS5q%<&c zCh<_GBMettrD!tJ`dcE9WAdx)TVUj|b(aqVef&Gy+ffi`bh!9!JhI`tXf7`!qa4Ah z&kMN%kS#>vNi3hP{JulT^#gjCum;$;?Wvl=*Mk>@99Q*BR}ad?T5M%rqAOchK*8ZZ#&FF*3_fYxY*bn*|!%r30^?I-q3dgA_mov1nCX75Up@Jf=dma zK2TZN)bga_Met%uP|GR#-lJqPcV0j1jG^TY`-5>-#6qTPS`;4j#^?S5r5@*JvQeRq zr3;%@Pxwvg9_Q}P!<1d8PNbI&Om^6764N)TqZm@VP(_D_sp64WY7G^%2@vyDTVdBL zM>FH^lV1IwPyQ$FPdCRPTn_Iqfe3@4l(S9rVQ53XeXf-43MErWdk$M+82kH~|uGZpC0A zN$Wc3F;?dofv%8KGkPE{FCmJvdZP!5hZmK2&^zuGU1lp@Ga> ztbuq1sAMC~#~JqiZXrfy@-JZW>$C*T0yvq-T_k(&gNI%5R8JKQJOictt&mDes={Y|L=7H2bz5x_<2* zc(xr}K+Z>h`4tkU!aS4k)>%eR7^g;v2 z3yB@XeWmqVX?#RI(z{$kBe7}){$N6{3Wi5(rDrzAgV*PDulo7MU_iCnnz{&;H;sEu zguZXNQfzH|vfN$+iMvovL~&0#Hp6KUle6^*3<*SSkq6Ij=%C9p+!T;8 zrXp#@uuypWz!|D=_t*$)fYdJx*ysD!{Xt91m>kG&+rP02YF1zz^Zg|5)@-#|eO@x9 ze&U8f&wp-nsySIPXI}C|C113V+&cc^7@4H!{USToxb(7vFBnhH9TR@1rI#a8eK)5n zh(P|0^cxr&at8YM8D+knqm&Y7>sGygmM{KyJnPZJ)Lw+WK|l2uj{Z*&Y7r%m*>*gH z{dnSVvuG_4XdBnBU2`1}tXiJ4K7(fQpE4RAs)8dYAayWBff0^@__j5_FCk|PkbfZv z)Vfj4$d25vqM|a_RpIt&OI|@iBULe`5??@VMqcTghZMX($`Z4!z3L3k8L%nA26Ry| zYN88_8%62px`;MD#=_uhUDdBvvz_>ieUFva&c-dKmFF)AN*5#F9p5%NuaWM`*MS8? z){xU3yj9(N3WeH)PrE=xclg53b}2(H71{EJMQ*x)lV+3aVON@&KlCQMoMPYaZ!DcU zad*&FRY^?9sp`%LKHmj~Pz9(1!VGPZbWk*}36DHIpILG<=ar5a!>rcAw-lE+#4}V3 zNZ9tLo)RVuqqNVsu}_Rh{3Dy|1F59%|7tPlSqi%pclxfZBWj<}L((_@wH%c@(6B3N zq0I_sftC|7Xh$>ai*lZAkH>-14PiE0_b7rwA|pOt^4YUz;CHJMC+^dQa@m+tMgu0^ zoumD69|a9!APtS3aFKm3ao>SILA9K;g(w6No*FEkON$P%_wV2DVizKr`IjyXN}+P+ zz^c9oP-{q#(0xMw1Uk1nN=iMKYVaD_7+7<_#05pqM(Xlzc1pJTDgN9db8CRd&w*=IhpS-~X zEfRJEvezfi@v7g+JRVmKT1Su(Z0e^bm3_^O2?;q0ytb8dADSH{r{O(L;*FDHSj<8*ncc4Qip+XS4p<6%f>GwARb;id-jG(n+hm=)nJG3 zDz-iePgk{%7yLV=LyIgsE`qdF7U=%h?T;K}+Z!~ZWg8&X*7lijFN>JK z@Dhrl2H)T3u*FdKyokDaZwQ*#R8#SlRg`oy>)n{{h3Fx6Ceyv2t|ja0RNB$^s42DR zy)*L;ro;_=e z1U4l^sH$e`boo)2)&{Mtf|}8?yD<{c_YP0?gSNOsQ9>Vd)se#M?aw z-+AfV9)bvnunqaJm|TuJq+3FogW{Z>h7aZ9KDjk(Q9YjKA3RbPYIXvIt<&uwX9Pdv zw}Tj^PdaM15Kb~OW&+WF)=g{M0HQ|^T5UCrR-MWMcJ@%Fm}le6ZFH&nF`|0Rpq&=1 z_G}FJ%52%xLRJn$(6frqcPYxsQn?S_dlwO5y)<^qz^9HzTs+OzdA?T#mP8vk8i@Kh zO>h69Jkr|FXT1vYT#L2&I1s4lUOnCLBwY6?==FZ#Xt)$*On6KA`Hu4`rS<2Iht&iUpYWgGa4Q!PX;s7+G*>F@t6gUYe$L8}rCk_sk>T`u9iY zY$~hY>a57L4W_)AQPd6rZv!)O zNc^fhP*uYY?BEu{iZ(L0xSlQGG$l`%91VhLC(AEkU=8CJ{f65?Myj+vzY%7fxN6N+J1Fb*z4!NwVg%;kx!3CMG6T>x1tXzug0|oEB^hOGRLI z(FMs=d5fi}$;;th#XO#?X)D7OT8jn-Sh!D!u~)n(g{&~7`YBz#3vRZ1bp-B;xXu6i z5pt?kV#5cwC?P4aeRZ${^n7w0a&q9x1-^J6#ECQKBF&g-oYlcD6}<1WfjXMWjCPxO zQ$yq%!5?6i=)LS<(fq;CnLs75zf!#Q)O+TmzPE1P++aR((E*9e$aSM4wzKZrys*o_ z;mx@qQn{yB)Q`4;5`y>J5}z2@Rt)AC)dumY@JS;h5>R2aM)6rcG;l+A zXQPNTJ+rJzV%cy*4pdcl%yt6f8OhE%P^svu8E%J~y;!xfP+`@Tq48ED;J%$yD{|^l1U-+RmbbTe>y};C`hB0K=xKee#UY>L zbac$!KANRK3U7+xsmok^$w28wy8b*NTjd zjUmGLFTK)`>uH%^)Oa*y;E(c^<9euji&?-Nr39J;cuzI0F~Q$ z;<_aLvkRP?KT{GVx76$Rr2OIM(A>F7wL3#&u2Jk zEA@tGgP*xt;Q3mmUghYGQwlSE;;XmzeY$2RlX_gAuQ6L|+IER~6$r8X?7rgYE54_5 z2P1NWsm7LZ?T8%;{RQ8=6yp?QL@-&h=13$ta+de@-UYdj z^>`o^VwGdoxbc}*^9`6qB5ob_gmqis&QlccK-BXErB8C+-jx^QymG*SE1jBI_)mU* zO)%%wskC?lvMXEQM79(f*e#qP46n3cJ_oTJwvgCyUtUDqmb8$jula+Z3XF9%B69S@ zpxA)*fbB0=lh_f}MSPi`zj$#$V}HLW7{Sc77iU73dc4#6!x~NtaXC(UUOtPMq`iV} z2sUbD90FQrq_;0L-%&<3W#J~h{#`~oU$Iyg_jEEp{ z`g{pQj!nev7lsMyP&la@EOdfKjn{hW3PiY=!$1OerNa5g9|YVR>D6_%I7-i*S2%;L zTvNFSk76&}99=g-16j9&9(7oQ&k|sir~qe(VhrZCv@~X2%OHI^&a1gY!1z0*iw~Ah z-=j+-+=EI(d~06K2iishAHOmp-jn76S5@yD55YXd$S9$pu%O7J!3+jAs!r35D&TqCio0;} zVg?j=NOh2EKO1^M7`xi>Xz0XIPUOggsZ)(HQV&3$`&Qn9AM~+5fBbl3bPDvom`7KH zQI>xBy9GGiPklSFZQOm1fj1h{1;j5fF*~FEp~kh>24!_q>6XxZ0v8jRj=xG>a(d@% zx8F(I73QpC!X|gP8~de#pm^^(H=Nxqm5S4N9J7lsAB=#f>WJIKT^QB)C7t^$rCAzC zJr}|hJ4V{+prqZFc(281UL3Ulzqh(h+r^{_5<1lk_VB!U^9Du4 zGdhpnI=Q(&;On<su4R z`?&@z9DkU{LD*?`dTLCHuKi}AX12~GeY7~iYjC4Zf}M}AxZB5@$2Ll-w>7D-8T^TM zwDa@x#kauQuOoguxaS*;H5SUZ><~|mP_UCp8>dNXGy~>%I*|2za5@7#@ z;SSX3pkI94f&w2@j1pKK0gNE^?3t|ixCYEpJCo&3BF{sQp3f^5_g{OmwX1YO5meEy zYBe!-r{?69o|vnnCJOHz3eH~=VMAjGSCK)w3$ANVO?Ti%-CLzOa3wMD0pFe;F%Y(t zl?mn&!IKA>@WBQhyMS6z1kGF%&)P~$A>ZUmi%avwkL%c)YeO%n@?EhExvpSFQ(J-* zMa;s^GK~CLJ&p7{pg^W9*W+w)hi3!_&w)A|-IC}Em_ZbIc3BaA)jBH;gYjLC$IA5V zfKyA?r+`Ng*Lbn^>N^MmgZ^Un#ickX-@Xl@%V*9)k`Z}=$D0#Yf%?fQDL?IKe4FcG zDuQ2yS$QkB0~k}JxEFhY53>2r<_Dc(tI0weJ-rXelRD&sw`fl)1PdV~5gU*)3Vq}C z1$u)3DX6zEI2>#`483@!0^=}iipHn}7|SUGz;^cR*)JtKfS^EE9>$ZLl$`v;_BYOW z6*%Kh;xs9a@|$?>4NS@yIn%|!f1Gi&HSRl3Be5drZmO5ktp1{G7ipg^M!(aD4|NF+d`sB?d=_OwgH9!+!Q}GuxM^JXD${9j z&%Of?B^lR-^DAqnOn&Ti z5cN1CQj~TKP8x1o*I5ei z|4rD%To>1%iG+2}nFNY`I^FOVOuSX9;kz^ps$ByXW*Waj z*lz?4_?qPsA9P$77`eQkhCa}{IiCu4!frgWD&j8` z^zb;;PfkVcR-FH=l%p?eJv_vHmX?+jlz$vUJoL1@fc3HG36;idS7rof<*&0!XkGQQ5#`0=Al&!8K{YBiX- zy@I6_U4;kC4OhE5d%f%E!uoxw`JqV>f%~v$)RstGh7^}}h=$5U76cF=G$64cr)ojC zYUeYk;BvWC932M}gkf97uf*AdHrMZuwY)R3?3`=9EaL3!83yQlo!qouVF$4PW6E{S zHK@WAx7;hXM1RlJ=V?7`Vdb)O{`QNm1SyJdU%w&)WAAq>oO_Cb z>~Z6SFIGc!p}iEdzDcb~l*wTcG&w~F4t_HHFIanrbI_^pWh(5Ly7Ui76e9Qbbdu|o z&hUQA?hSVB*!vxaV&eCh2{9gXB9V{`a;*UN?7Kb=gK)e95S@FmCR0?*2+| zK(aZj=D8d4A$pszy<$V!6PAYwoN1otX9rfAJ1Q1g4gtR7V5E!*X5hO6%!0%2n+a^x zU8}RhUj5Ph_8Bglm*V^ux~k_38ca*kk1`&Bq!XDMRr}DbFd@|z1r3WbrrUPzZsEh) zQ7|BhW$YA}N&p)<11v~K1w5$&9Ln{eok-Cdwgg{Ph?Da#eSmoXcWD`P=U9Cfg^o)< zHSWRfPw0c|UioL%7SNck5*SU%${s=!v$nw-2`ODUQ8V|EP`>9+pC0sN1phFk#ti2& zOabd);Rr5v_I9Thf*M3@^k`@q{y)0D0xYU+{d;Uwz(k}}Boq-)IwS-ILAoVHx*McX z2}MCfx)I5tB&8Ju6eMIAI;Dq_7&^Xp59i+hx#!+*p676IW|-M~?X}l>;}_Y3m9!*&-2#ZeV3ERBs_x%Xa>L_Ouzq zH*DZr0D{6Gve>_DqlrH@fB-9i2QTG3=ETL##}WaBrQCbtbn| zg*v0%&(KF_6cw=C%q>hEWZVu;&`(VBGG^>K;C%~#I@Ll|@1;Aj2C~lyF}cI6i!(mNwS?=R6^*Vu`GbebM@*3+XooTyiOCM+fyaufbF|A`i+lN;U~H}g^fF6Kv% zt*XVs2sYg$C0Ra(M8_@sOqwb!u|E?lYbWfkMIL?JkV2pV3zA}(5;9$>byx41nj6Ha zv}oE;L@Q5tv9b~?^ji2gZh+U}jYy7e5DJuXe9Cr*0ec!xm zzirt{JDu<~X^b*_7R0PYP6HZ-#)dBS$mQAFs-DC*^I%p=s&@p_AIFl+bj>9BU+RF3 zqheDD)6SXbdb|9P!;(x{oNSwUzvkTj$mE6R3O6X!X-%M{28gbn; ztzKz==sGpx|AJKfq^hD~z8b8GG>tz@LqVDNGy=4M_!tY;(UqH-LMu?Yg(_GkNJLD7 z2IYw#yPypMsc#9m`3c5*JXqQUmlk4R4fE1(?U#eUGP63HP523h>tk#EGLMl+l9A6? zTzH=-)n3=&j5J03(jKF{KL*&NBTh|cDyC^J7aME7QFAXkL6iACV0q`j?(T1zfWW+& zkgsvI`z)2hi_B{irkJk>g_8%J*)#|8-KI|mwIZ|g3N0_Wk>VgsSLgw*pPUzfflmEjJcC-;*Et>`+OKGBb@eId zp6J|9&@~PS3L=FGp@^x_ZH3I|pbig5fm-G2qc|uNlz_!HfLg>sDkiLNj?|TE!+)7d zOM*8m`$F+mmBV8A`2m5cxs|!DR&*RBm(V57&~K1EkbSuY)_$^x^KM1ZsuV3oHW-jC zT&K}3b9lA8Tu+6>3Tf7(^?{(iA&dj)6UlXH^YKQqvhi?DWi1xE)uxnE~tMt3;@=!B)1#=I0f z!LGR+Aw}X+Sl?vGbsXN+2hpIvef#K&l_y89$=gaws;?gDIQC2|9SgFdctmMzMGgFA zA;2mG6K$Z3v)J+C_QHF?GiSbXO$RD#KcJ(wDTH`BC^6AWNABOXU!$t$yko5}c3C)^ zLC*2xm)Moh*y-7NYJ@nbR=0pvn+V;fK4NXbZQd~c0gbY);MS*>Nn+f>Nn*LJeVG7( zVq9A|-O`S18HxATEyk^yJe9(}4tA!E-ziI}_pe|?uCGX)W3fo&63I))C`am_|ICX1 zJdj=@3A}!jNLNHs8)C6h>@Gm<@F1A!XV8a~HQAuie6ge0+|M8sdzL8QxMa<|zt|Bi z9!PzrV|SY`)|q$M5Q=XDi5}UH3J~L>&mhp*o;Y{@JZI~dB&dnxIzg~CLza#K8@+_8 ztgKn@L;f`zIu_~C2e?%a7>(F%FTUWK+8D(H%3})bi&s__plpw5Z(MTQUexFkcIO4L zE9V5YZh1)H@U4^aYVH+##^2uxd*hnZs{1g2rHzNcWwwIyQ8L^+!Vp3&Xe=j!R$AQ9Xj#! zgb#MFUJ5X?9HPUg&7h>{

pHq{(^wKqffVLV#iV>%;xB-vP`HaLpU7N{&^$1!2*t z+Ec=yhv)GT@}E|x?2CvnpkUG0Ttf2_)gc#!NolCowBj?04#UJpOFVyJS#+30vOUU! z-_&l8b>upk-UTZ1G=+1X<5**k*B;>q+O~NuxCd@!mhWziif0clxYgcmZle_oc_cB_ zcCvtn31EsCw-oKVn+wXhDF>?)Y5UH(MNBj-+>&w9T7 z+&dx=_H0cW7ysII$`?cnuh0z2DXhqSNdt;czkmNOBhPsOeyxAFYiO(Ms|`K8O7cz+ zH@2SIu2$k5dQCE(=O$tOSu00;pjz)@*=p}hp<9=gOu}wunpbC56cQUi^J_b23v`B= z1(=&KK>-2nqjKBXAm@i`+8u}5%gk#wMOoP(dH^he*0av9<-Il0Lptw@BGnh<6F<~! z`qq$XWU1NrsXWow$FFd-;-CV$a-7V*R7<14C=hl7I*&h9^cj#QB92OkE0SXYQO}7` zZ0jNa7ca> zFd&U9;NODdHpEd|h3*@65N&7y@Flcqd`bGA7DspbrYGIitDwvG@lQ1YY=9cBopzeJ zES&X6K*y1DA!mCqcrP#MLAkbC;8aFZoTh)7H0N2U zyfPu*l*3GD^fK>Tv|)0ecNdezEtC&kvq*=XhLXoA7Ba&$r@1~*yq1BEGE}%invIrkmVjZLw#}{! zzSmhFl;xMqBpTF@6?8DLvnx844=}$;n<##(`s>#(ojGX7GiHo}r{LNN>X46sXIZ1- z9djz@!Z)!&t4(;6xCzDW1@qD2t1me&EJAm61%$xVAw(iF6L|LE;WBCG&z2VO+CH)A zReNmpa$a}w-UgF^!&G9|&{MGl&!tz0X!6`pb?ZVtWGxt|N~}PR3H#8yvS~RUwEbku zUF@|-)&5VOd|(Mb9DNk_4~#De5uo=AxERIq6@zs;~WlEEVwfdiJ+u5I%8 zVyqAtc~K{v+>aivY@d*7!hh#^SZJXZG-Q~`C>S=WKIB^3Y5S|p)4|$NMux~S3bd#0 z@y%{7f4<~uSMO>mpghH41q!W=mXzU1pb8>*+avA+jT6ZNz*}<(S3ihFvmo2Lezjjp zX)ZiEI&CoI16>D?AOWiB{U>j${PzV)=1?nH!XSP?166Kl;9w*)WQbxsHiS=OGm+ z6%#NhtzK345p_J{`K?~Sc=;4m1iuOl#G6HQ5UHO4ySpg0?qDoj*Tv(DI&-Nf6d|6M zr2vx_OnqQ{6&9_!rR@nTJiq&Tp4!mM5xVDt3-GhzAlMf}qyT^drRRgHme6$njRhYe z{-=Ku!OZ$aZwnk~WiMorj(&8|NuCZF+7_>_8as>JoIp7Mx0oixWw-i(lKsTbUx59< zcaO@MfN$AdtO@>7z+2->GMtwoG?azxx(pH*M6GOdr!M`*>2y~C`J!w!kDH8);%eHC zDqs5cA^u69uH`=H^1P*KD7$xV_%k|ax;el_j^xoX(zt=#*Rl%(3ct>LO=&%Zs-fd< zPwHzh#=W}SY33ZiP^T{@F;PGYa$w`!_y}vYnZ;K-j_wVrdbyX8YZAnO;i1kWAaO4) zNm54#{}Mmrr1>u>ZmVAI4lO3rjR8=I&xCdVw)hjSrwAqg%Dvn#v=L1yN0!M=es+Hf z)OiTSdNl!#f(U>S`HAP!#fF4b>gtP%)+pd9jay72oK|zmrr$7%5wzE z(~Z#!3s>v(Wl&hqsciEIUnsPkp?L~vSyoLAqA1Aa$^xkc+>MQVp`*?{9&T%!D@Eo* zLQ@_Z+4i8(l31byje)Yw70aPwmZ>jENFGuo;5`4^SW*|X{K2*X5qPKn0YGd}W7a6P z$e3t}LytFw|CkeE_cF4z{WM>_pi{ZFdkm^nj$gI_qYKM}gar5q@D1ai+31HKKRgBy zHaxv{=LLRcvWo^6#lLu&=a?6110V$q%b2*hxQJoGMRgy{++O^C z3_*B)|3_7Q5Y;4x$FMP!G5wwttWK%v6hgK)A3S`xY-Vh1%%i~Y6)}qh=fcX(mQz=u zvsi37aG@rkCT)Ux2a)B2&TCs=515t9>1yYYwQJ0V7t9cK z!A~Uu2Wwl$p3tT`uyy}vq(Uxu+4GO5d|z|s;x2+SsWd-a^Xv=l;cp{0QxH+%-R0L$ z(u4&096Pey6`*$!p!T4pJ@fn|D^ZjA{<7D(^sv!ksN?>P>yDe+^`T78D&4_MrOa=a zvt{|?c=F6IW*$cQh}ubioR1C82XnSM1(4((d zJl7C9W;%1x2PhhzL+9J22sH6y#)is-%>3vW=;_lPXSnZL>Td*$-(4sv{u604J&^WR zt#8~8Jk*QsS{s8hrp_D$1ddZ}pY9fMkwft?02VnMn&JUgJ-?u0D29nCdz&>TICu&Z z5fMYRB4lrWYGVcBha;77W%usgoANp5_Y}{foll=`IXhUXxwW>ihNHS|r~JYk-3}SP z;8K?am-{!O++9GyA1jKBE3X(5hD6KS#!}=uFl)RKk3axDtoV>%FYtHBV@u*dV+s^m zZdz>ZCyAYdk3LJ5lag)HUqQyz`&4&hnyK)_NK1tndJpTf4Zv~-4CdBGK34l^5V+2@d31qg2* z1+K3EFkdy3xTpRzLHRY{&vtmtRok`#U#<+wl@{EYvOJ6iEma{s{voe5?Y6#+dA9{e z5RsDQ(CV&2nAaE9O=3P~lL*q(Hitd}LxsPa8Jk_$7w6Afo za81n}e)2Q&w$2>*ewo9Tb|6>SvbRfT4ni9eD(;)~MJgcMW*ln^C9kTTt>?Y<6EZOs zy?_g}ve70Cd}6F!Ba_}Kn#|r*(`ZD8SI2dl0ZNZP?)xCsFVz*l**z+a(uNj$Yy7fg z$)zu~jYpOSqfia`E8k28goKoA-;olg_c&R^Ob}bR`2xVx3H%ljU&muhP;FeD&gCD> zB%o4zdjHZi(3{M|?!_NV>syN?5H|ic=pu8nLgVM5n%#=FZpL9ZyB@M}o6*3E+qL-O zm>a$l;t{*c`fJDKqB(z{cM#r0Qfw?^CZo-(=Z{AxK-{|47a?^>^;ixBTgKCWxS^?h7Xe=21ja*keLq%Be6ZGCNT33MzQqb9 z&~>$-;ilsO{eqQpcb?a`GOVjN7^ik*w>Ft`i<^yd4&g<6X2%*jVq9G}6wbxw&hUH@sw z+yPWl7ZaaqNnD50ZPN0E$1wGQRN>G-&K#;DPg(T=`Rn29#VqA*`9oR!E$qxy5e6>1 z*~T+Iw^RvbykJU7-Z3|leYC*;T?mmmkkt2B6HbJ*J9ii1lLGcWGFH|&8EXdyO zH+6H!cNg|>m4cwE*WcfNRFzDFT-*KA*|R}+3ZfxuMsV{fDGc~QBU!q`7hprxfha5< zN~)b|O4XT$fpiyGG}70G+;VlcWaQ-5%yMVz3Ab>(yCQ1@y2Y^| zKYs3McR zsg=_8_bVL_L=PGf8~@|!wYey<1La&D55zkGqTmFs8x`R5ZPR@`**7 zewErA3vNeLLvH1U2g^s5>aBtCXg+;Eo(8V1R8 z{n>o^+F+unBkB0q6oSNh%~tjRlje0BX^xVeA@#Y7@CsdkW(?N#lI@e1|>*2gc2_5*7w?uy4Zm zs@`%nGBT_2Ij-YTy}k7M5@?yLXlczlSHiwB)5a=po$i!`??7A-}C@YWcY_At}X-}SuSH)O+ zxn;-=2`6}{DhcgXw0N{eF21K`)LrZ^zJ%Fd1&E5hbACO@!&Qzg<#ISc9$`__%&uh#=>a*w~`A z$Fm?M(emR_+zSB~4()fOuk#4Revt=U^w8`h-9E5?Apf_Tpa#X?UP`XMv!wPH;%&s05^+M$CY!5k0 zcK(7HRF-=`+a{~#Xl3}GBu@tvaEJr&GO1GLhqhNsq6M6T(zJmsBs{czO(vRCGHn~D zjI^}_-n>zUq7Q&EGE8;7vg@#-X`uG_5lp0x7PSI9?>aO_n1d$(ou>p2ey*!OmEA=v zX1Nyhz3F0{eajgCS`Wn6akdjt(X05b zc%d{*#JO!MdLWf;>}YR69KRRGNsU-P^n5tGyQRP=ycI-{dzNK+#RT%*n9vo zk$9f|MB@aPdQ1t>hP2(9vC;1HrAS%~uuA~|R+}$R$ZSF>ujuht_gqVQMn+t!@J>dH zM_6d+6x6w}u%2ha^f3SS{axeF*_2BZ6lP)VQnt2>pxFmdpkMVQRGj(~#5({p&`ELt z>JL8@Uk`bDoFu>b$1H(!{-bx-5z3Psn@L{*=9~r!(x$K@fP3l!Vh#puSiVGEjdSPD zfsX7InaEpruf;ipKwXA1=?P(GKTi2M-R_}8Zn`l~E&MQ&UB#uQFeopn@UzLl7Wx^K z&A%w*c$L#@3ul(=Y#YAsHwt{!W_Fwg2zyx*T79u9>26gTN!4;!g9%C4#{kK+E*f5o z+lq5qS`6rnY&qZ+hnEq)X^{HB9g$f%zJCd2{QJhbYxA(*lY+6CXDXEyz%pu(&aAY( zIAsRy9vyweDH*_nUGQISSFuDOWeU~aQ>C~jq@*xy9FG+{C6w-a@~w}2XjqtOZ`}p0;)(nw zFLH1)Td-bvnvQ$C8{1@O>x3b}3v5U|uI zxxv8pDUb`rXfx9I_<>+wdC=c;+cuoFV(LR;iGFk{a>K#&??eZ|?7`)}6{3_+kD>=_ zv;qD+uWGd`;q*}P7dP+BI{viUV{;W->$UMvzNoVc!iaQB&5~)DDEJkaOw&-RM?0rGAcOWSFj9_M-0K2dMR0zrFRBtsB>`|JhtPeePWHr_uT&MRJL^ zZ^c{|WrU^)xU?Z^>&l=<2R9!w&M?muz)2O{ZUIUFpPy9{EjT*umGU~0Yfs*nVjG!~4{ zmV2Q~QCW#E~V<2YJl(*+jFnwSf(T}0>=fNE=JW~;uL zhFNnNu>s35F(7mWfK2<{Y>lED!*~cn5wc&OWeGD>*WZwFX9J=Z4PYz;JWX#3dg1(` z?~_%Tm>hGc0@6y(ojvO?dU-FspoL!`9S>Xo3Q|T#YVBJK)z?Y{)+f&(Yv<15TOOtx znTr$8E9O@F!a|}y@a!#LF4fBM?u3d!HUiScL7*Pj#6Rmq;YW+f(!Ht&x0d6Y;2I8G z>xrMnA;dOl{>QI=wjDix?wnHY&Y=--S*p3p$|?(U-@HS2f-vABJF~59opDMN=GHo& zh76^n?9^JUG#sO`c~Vp%4tu5z%&Z|8*@m+AcE!2=vL1Bnt)`0JxVJOC>Eu)>pVsfA z?aDZZne6+0?#Il1NTWiC3s&2bQp)WJwYCw=){Pq3snhUH-r?r++%v~6-?VnjvVDqW zVBj9Z6AmDY3p6CPtB>{8WJA0)!(+9Gvv1m>^n#g7Mnf|YY52FQo5o9M7y&x{E4tNY z(yhpCl~?xHFGAaeABvx5we{K1i_y=8`Wwf4w|rFMGXy*X#LgL{K#90yWJPflTJ*4k zW^`w%)nD=jd3v>oB?2?x4|TY@=bRAyDF5E`lhD1|!(>Gv@UL+Gt{G^oQ}P?6i@&AF z{`x{LxVf`pa)3fu?>XdFXt3DRz@&Nr9k}zUueG`)02V=No6~)rD3C&+SV4FVP&fcH zqz$}yTM^MO7}{fmvK-Vz%4V0ExZKxY9$f(2s7mGPO>D;Ie4lmElR7)gfifBYQoV_Tt|jjr>W1Fk-}FYIAf#(96}G3 zNu;$Lu|2`#m=iOY6F0awEY=Lo{MY0x?KrG?XpCEW3;rt50g%1Yvz{UvP>*%>oePit zKr0HtD@F5~h6rt~C?S2MYrH=a0Mm*X7EbEgJOf?w^e#`Tnp?!$F5@boa32dhwlFyQ z6b~hkXPBBsySEtZY=%5MxXXjthN;1SICfLR>DBg?2&ERKyl+xln%NXT^ zc%~90biKH+Xph|^#;jNPBlPCJ&zDD2X zoX~lB-!MV1Ut6t`e^w|lHDANk8VY0|Yo<#Q^HW$<3$b9o2^vGDTH@vdZc+wxz^mJJP9~TyJo>0`D$7+-G+J z|EC1)v1xv(go6FXd|LWM_DkNJz)2yz)5neuYVd*X9xts!iR_-X4#Kz0f(DL8tkZ@f$U}v#sbus36i( z7k@mq5a~v;)T0GK+JF8>&FxWng$%}B^JujpW3{*sW8K3C>13SWfzpb6xZmTVQgOd* z&!mgZz7V59eu(&e8XD3l+bY~|d#L4JarXCUSY%{+i*{K$Ukh=pY7pHa zcVCC0D+rm1nXBe@LI8$1-2j;0ajN^ShwGXGIy5wumF*_bZj0>3r9g;6*<~x{jlSF`+X>Kzpp(L?{X3BGP0o(b)9D|_{0)(%SZ!gyxe9|0GNCSWSJD6 z6B}kHe0_a204nM<-vtpB??zQFe^tJEwO9Gazsf4UQ;Bo(Ou@gPd9!Im{KvhWkIKB_ysV}0?Ua{P*w*^XLfqTOakP@I=RqWt%$K*qP} z&aCERH|v?JHep*$HBk2_^{BFY`{XK38LTxU`8PUA&=` z9khJXVT2RN;Vv8*`5yO#9AaK)5BF3|jYG9Kk z-hy64GZqZ%g94cc&3xu24wnp`84#BPs?HPTkF1yaS=@aO zqM~V_E7TLrO8JWc+xpq@4?+@cT&k5=U>i(Y3!f#DC`AUfZ8l55raOO#lUdYXc}~62 z$W|7}g2*c-T2AN}1t`9O&aMXJ-~aItdAhUP$PBB_uxT~a#AMV-q6?r)R=GCs7QYI6 zIat%bWXvK;r+OxqR|u`-RMw<3WIp#w%!xRC+Q&tl9?i!k<;x2@4anxz8NV zTIx?P3DG^z(Dr013lWiba>hxKWrXT&1~u3+&^*KDXqTt5Ub_O-77X}BeLzAExLD>m zJHqIgtQH$w&T*_TE2D}5NG@HbIo0LlKDqhaj?YtVR&h89rUGb^&RLL9D7%AZs7nYa#{fSQg= z7<~qyT7ao!j=6C7u=f{Qbx4O1Z%;1EUQVQ7x7lp%nGB0hDr-|aaCPvI@R|XB1=L;i zMF}iE)WJdqfuZ~%j*N^)i&)$P0Y1hMfQ5V0Gu2pOWc1lrR(_K729Iy`C<;1!x0K>m z@g{6gE2Sy7>-lqQP>_axBsx`pVe zQ1aP*wBXi!-wDNcr00rQIKrgFuMob*`^)+8aORTqpHIGl)&sSv=Fcemg^U+OR0mN{ z|AHiJHvGi*Ml1afNCLG};8f$iUgWTXXk6;Xql=vq#v{k$*tlAnNoHbixE*3FB?K-V zZ>#lhi#blzH%k6XJNjSeg8bk6G}x#Vfe6gGF%GX?cSJT25RK8CrXel7qr?E=&Q_S| zf%;U{?xwj6lv|@GaN4^Zb!EIM3)Ye+!^ z(C~`Yo;zbUKC3Ly8!I$wrUZC*Yc!vID#SYzDi86SgijP1CcOXvmW@2#`IZOzib^Q& zqV?pVU^AR-DH5UH19i={xsM&6@+V;i}WHY+hj8;OS1 zeWB7(Axv_Ag0U>OTDS^J~zd>hhoRb&;S){s_=RO3L|B$~_>H~S3t zi$Ccv#Rea-(x&WE%>tH&pZnGMv8e}+X7~4J4=0&_Gh;*(*$PNC629v{&x42yE(vq( ztvkM6aq{cB#{6H69#Z`&?AF; zyop&keX|&C-8yuU@XPO z1Xk+)^Bf5OGWi9HEh(VOVg4h8L1#pi>xDo4-iR2s525Zr7rP=HUHV3ttLu|S@dP_k z93FyR7d*UAD5+ZII#v`%4t4M%atV&+F{iTy>IsmPjJ%)Ig6ePEl4|Q@2Nvs2?AR|| zDUu5?4ecU)XVR1C)GGC&2G^gHROV?b7F6xggy+B7s;AU$-b$hDDvh1zCtW8z`JX?o zJSa|OMKxoZ*s8OjGc66(pH-jj4r20<8;oKZo0=X`(O%}PnnUA z)z)|Q2%p4zG^;lzxt3u(T;B+rm8#M(%T=FD<5YLA8=1z#>g~6oiG< z;eE5E6g#uf>wcr6){ka(Cz>24BmUOYyFy%SKz^g7down6yaK}|`x9>&^N~oPgy4oE zqLhE^G_Qf<%SnfFvH$W{hQsl`5cuf&a2HslXUYj}-0RY^4}@I9oa9nS)*;`_^^@l_ zOS5~673?)tni{@+n*wAp_F97b2VIaLNZK_foX>D%K3$r~nn!8+;7mzCVE!F8_*mhF zl=|t`QP&&oW>04MMg6l<4f_7-Oo3=jjSAQ|pin;j^;rj-JWHhIhBL7rJ5+)eBB?h& z5I*$oacCd}VAxIf15(0s|Nemko!gEZ{QMeSI&Rsi(or8CvQ;VrZ&K*1Sdk371=$tl zzMHBuf8EUgK6b({-feJ?9gqa~Tab#)f(`&K+B`=AN(~jF z&kr8Id_om(N$EGzqibiE56+R49vkBkWg8PQQ0+dS??FgYZL$Ei6U^$h)u0h!t9EJ9 zjgh-ZJ=H_e;wAGTRN?uZK89((Ujv2({BO0uaJa8^+gvmMq~n4bvx@Qr@xw~O@k>Ep z(42+`O|#W;$HML+H_cz-(42g3fXwA$v*sI^ZzKO7YiQzq*valn%F1>Yb?Kmy!$**% zK-6UTVZAPY>%cwL85PSeP8kwP&IhON{P70+-c0k{N=VU)PVAf{#K^-JISt31b~fI- z6tOCi0S{fLVV%pcn0B2UCGYh@ptzl@w){t4K+!Vr&%<5D>@)p}wwCb2g;-GQtl76! z@cBNGbx@dYut7l8UQKSck9Alwn{aJMMsMNPh=MSd9@+l@DGjUF(7cCpH#xzwu_iB} zMDCre1gc|58wS+5Oaacl37}p4a;q=^_`{$(6hHo)goYh0Lup#j(~%~3Iq;I82JkA% zpg(o7eSawtdHAO~MRTP$7GK0V$}JNWmiE2hsO|^5m}0Zoris6pN8cF~G0G(JjJA>r z&SARQ7b@ly$dYG+nAWtIpb#Yo|r&8Jlw z*2U?y*f8|ye37I_uWt$=i4w8FQ61HsH~aCHg>csYo?+hl@37RDkzGCno)dWrY)>JN zcPs|7{NQc<7}{hQGBvFv=A6%>&=AuA6*{Db01_G|(2Rw0!$}hX(DG{h{#_naeh}ys z7{e*UJ~F~W0Nr})cbnUHPJ2Bp=`54R=Rrv{a;ziRF|l^a%93n6&p%liq-5+B#i_!0 z2R0v1111C^nHrvmr~SdQmn61aGC2kEj}8mDu0Qn5DafB9hfOUQKG#w-TM}zkR!=Lp zw?bb7^%wZ@hZDmq8Za-{3rkOEWdnW;BKdl(fe+B$wiU1EnIsNFo8}Mg^ z_gv}>bu43=TWqGT7s zazgAPgmjDgob!yJ8eN+%=rEPme2nZIp-jNiKscL>o4qdI^Q&=B({Q#Y&|*=~K$=mJ z09L;86g01L?(z+Uzbin-<8KcMmKeS346IU*jxfYm-4`eb?cpw+4uB;<gMu(qM z?D(%Uw7F!cw){9Gt&e5(D6mRccx<>EXG+FT|3j67Mh*yqtIUAu7Y=GMxlK+8(K)SQ zW_k8I5y$1>7>BO_cIT$4`gjGSzVz_+l!odEJHXQjeGJC|V{x13vNYUSqK z>4`)bU6Ik~BGX(oH|3II8D+^)Fq!`A3KPZk#H>xfT8QXkm&}+x$32!qXJO({QcO8m zB=hvdQBr;OnZ(cvZtjeC0Kz{K{ zI&j&(vX8zokKYtYpim}f!=H&Auqpd59qfkqQGpaDCdPL3!fwWEA{u0NB&)-L&L@<8 zN3-#9OTFMvK*!pWC$m?;-w?&c|Hxaqs6)D)4?DFrVt80 zx&ZW*&%jvnsXAIHIX1T7wzU#gzWq5*xS!I={hL*D@v&Aj`eOe#)W~n%$x)!M{H-bu zKlni=zKT6U(^>XW;+xJ_kV3}!Df^uTafS;8FOauByUhTH)5y$<`4IOAnM240Dd-2* z3U-SkG5Oyv0umY9?RFkz7U)6k!qOlHnbOSW$FB4i--DZjx3ztD*6t^tpU(5odxAo3 z7xWSD=ovT*?qS5U`y(bftjPF3Le!$O=~D3I>MVb@$WcX~E6?_PTPmPkL`_Ilkmvq-P}l9=N_|UuKGbBhOj~CshV)Wr>I;8- zZ$Dl}8G-0%L@T*Ec3iGsF@+)@*)uVbmyxL%pde|2K*j-k4+T#$3W^M%!63yzkIfYY zWo2ch2!cjBulUa)Cy~~lS{Y*Z4INo$a-vJ>nJpf8*7{L%$HYgwU)2*sd5e?ll}x`y z()BEoUYlD(#uczk(f;7PAvCkEocuG3B;Zxv%*Qv}dO3t;zQXbcYvB}^3JX<#^29G` z|8EzIf+mFstCfi?HJ*gp?i}wwid{i6q%kt5;%}>mxQjwG4jI3lRBn*#pEqCaxW1OaRLFf{GDrnB=-O28hnHmoOCQ=b0sEZ(p?*9?%2{YpiGmv1tJ&G zd$@asf)|o%@JsPisK)>2UWAG+$ZSjEUi`J`+AKSMVM9n0xnX_8>?}fCW-*dp7cpsd zz9f>6#^G1js`s=JDH+&l4{0EZ-N;~YnTI2an*#Hn#Q>?Vmva{RPMfSASbO5vJzY>> zn%bXw6M&c3ZiRikJ<#r0mv>3p<`Bxn?$bL`&1oiN1%-m1IVZp*F)=XQ2h}ZrysF&2`>cyy*8`8Jil%~Uk1l|WEwe%h zV=7Vl;s}H5)7~dxd;>+S#*LtPh^FMK@gr_JF1>dvvqW~boqvpLw403PT=HEpe-}K3 zQAR5}N?vzSfpkEqDlI;;N_Oj{RK>BtfZ(Letyc$+)7)^5a<%EbA?VU6f|r!D*v|kH z4R0_xoA&lr0^e1(Z^KcvK^3IK#dHZH05JZgKMMDU<%wP&e+K=MKnlTrbgSjJ^_zbS zhDCV4)z_N=oNq8LM{B|GsSXXw=KQO`SHGs-iB$k8jfp@?(?Iz*r_#o0nm?Ef*vNt; zTp>!yFsBz>z+i-NXH83(Ad0EYi&QT1h1-MZ8A5e~!W^iYh9YnX;3T-zgXBdxt9B;H zSUrMr9)1PX0yLn$l^5^6^doVN$9>&K+o{KpHiJ+Dv2nP5!ycEzwCs4L`r86KWJgAI zY8g+K6bpj#XyI7Uerxm=M0a1md@1khAEe{KUGVu;<^h|mzocdnG2uqmB~mW5+?2dz zspgt{k*HjppuC{QHO@wJWLC5-Z39;vP}Sl3Trd2x=Jlz%;_Mv>uTb|`;=L~Zn09(- zrt?bd-Mcfm_MWY>>ZX^=1)Rtm{(2-Kiy72a-0&2vIhxQ?(;-_kaQuTSxL<7g!OVLC zn8|hb@4p;cf%kj2L8@Wm4N=)lBXe#C&_Hvhrc9a)p)}}6Oz~m#t+0;QR^QM{4}u;t z`WcyY|Mdma@4>X;Gn?9Cef?Q3FE?PIWV_Pz0@iEj3rkN^-w=gFwj5aVG@rTdp|Dj5yIFpO;kP;G6>r z{~HoUbsGX+y$tm}X#^UQ$P3EZJO{9>wl~`jdZ#DDszIJ|Teh+2&IF< zYq`ApNO+S^+{R?;t7^U}s=`tkdKC0aJemeRyQdhKB?kEQrZkd4}$$8 zWMAY5!suLa-&nJ26?ssok+y#qJ_elgE+vWa`5Gb;mi+4ff-e{Hrka|WJ0)3HOXs%P zNU{@e(36rZgxp9za88|YXVx^#FEGAAg_ifz{x2&%k%y4swtWZLt}$#&Q$KIl7uz|1Y0`+R7N~S~B|;O{H=mH? z1JA@j6u5}6{QhH9)7cLCdy9uhMowYWAM2QJ^It8zPdGwRq}>oaz1)0e+{*d;4B@%|Jj^ybK<{(r z1HX73;eK%4C3JkeCyaV>EC!aQ?^rhFFh_QVqS-ATfOc&NkQv5|tzv)~Di_BgJ<8te zoAVgLr3ckcl4W`h&OKR*s|XlwD{92;p;OX)RGH#^!u|Kkuv1WzM}2u=nrMW9ie_VVRRS4+^V_t2Eb$SgaXLC!{<5&Ah@KAadO z@}9E)Q&(_Bg*M!;OUFcA%tdaPg8RSlO~O`bL{yrEAf#X;HDNrOreu> zLIsHig}r<}(9P23TttCV^1Uu)h@xHXN4SUXisfg(#mZd&$;nWnCzWbY5V>KwF@I-g zypLR*M;4X1!rVD}4pC3jVIqv4@THM4NXeQ2|Rz)_q ze>@UX^o#E#oF?)HUgj$w>dci#maCIxw-v;MME-BPPZrL_o@2cl{}>Jp?C znu@&nACE*KYOuy=Qyd!~pMG^M9%v%dAKwFBwmNUl9=v2WK?)M>wLN;7^3GG62PJWU zmH@OX*HZ!32@1vWv0GZLx0~vCcMUpa@vGZ zzKjp8p-joRr>(qp#Of(o{R|Q4jlK9;_RrOqQNszq{N{iA!(MNHC>j`B4gqK;#P+-v zM7LKiGBzknkd+S%?zekIgdEA7_`&$3&TE-n&)?#9@v$?eN$HX98^JX88|wSLz~*MmZO7(L8$Qlp$oC& z(Q?0UGeAfJ3GYOmJ_?XaBr|qSD{n*h6w6R{LFrc_Fu*OpV)DuD*}-RG zpb9u%K$tD3*&*@LTLoI-)PVC49#;;8Do0SsI9d^ifb3`lxDyTMoXT^`=lMZp5(gpX z>Pjw1A95J|Z2NI7f%?zBQ&AEikhu-UXA*?1+rB;z(TH%2A;Sbmhs{J~3Yt6!>1Dh( z)2lat}^$e>f=Wrsgedg*;^a z^)rg2$l%AjS6jNVT7i_#(DW&ow52S-v{`8h~k%g2)%Q#g1PH!{rXKbVYmk z)2sNaz5j~IkJ&3gEG-&Np-}iqhwvLUBOcoZ-IbGv;94k&ii+y* z+o$sSK_KPx1C-|RU~2M%^R0C1ZO`qe173SOMY1<(q(J>VLv6SXjB~~cBZ)r?fzL%~ zNJ#teoDByO^LXnIeBFLBfUgssYh($rrhZ+LIedsBq)+>o@q_%HtS$}%6Zf}H{9cdM z>h9S^9}8?UD%p?@R^~{pHR6E&Hsz-qoNQJ-Z|L@R=9~skncU%yao9;G78G_%R-JEN zrr@FPG5F5OfXSM=mC5*=gE%38fdn z47r47cqw{mr9VZLE~>tQS%K)k1E+|Fea ztPN5T859M%Sy16p28q304`A{EMjgI}?TG}`YxJ`C|G_jcZwmTD%$T58YQYRFt z;|3#gt-Xm5I3dLPeY-enmelNlwAdQZ#cS;2y3x9+l4+ZMJ6+7~FA})VvjHQIQxaDi z|G%*nB*}Wl(W#ywk_glw=V)+df$egeRa4N$1fpKgh=<_X04k>sW{AU}Dl^>)dgTBI zNVA665mAXcf9A}u^rCOyzFoh4TLDy-;;W!Ax1aVUo*9b>oz7t+?}U`2F|@q*XR^vy zF@O!eE;T#q_yEgA$3b@PD^h$gI|r^`6Q2aZhmE8+WYo>7Y~P@5-?{>J0%^qg>->10 z`Ms`=Kh=cG+dRmoSpek3M&b5%!?b9NMR3ZiQK3X$yYh{i1WcSuo6E@mM?C3`y3Nc| zgDYRC!a(=;BUDQtWD;FoYDmA0ZUjMagc#6TKE^=^U>qyvLxdz(YQPTWC z-vAl)3J~az{>bQGyX7?@n^qu+L%!5I{3GPr2$!cTsYWB5owb*Q!0hZ?|5=j-~oI{Yxl*$M$&6x>uf2uy}Y(ciV ziQKH5x{$k0=V%5;Ig^9?0?4P2+K0xr0NbL~Cy&8X+bB~l?I@}*7x11JhF7djxg zgN9VZ;GnqA3bV5|1SVU~YSNJw(Euu1$_;~Y=qS)>t{!q%=-X8C2%cV>ua8aM4D6e77E?Qc+LEt*=ot~C{JfRez4%sZF3nMFzZ!x|d= z^u8OtH|%bcq?qGOb#t9Mc@&lC`yk*|TO1fMQBsWyAgB`1zgzPxQlvUH;lFRa)T|0f z9*HV#eLf>co;NBvZ9fEi{fe(6=Om)uGeZ=ZJ-$fvzw0RH!Ja(>ZnuG_qk#xN@tVa0 zMUewyTtJxvP1>#k*=9%MpOI(EYN)O-Gd~1lgGvw>T$nb;qy{1^eMJ5JIP-fb^!>(O zy7kx_`j|24gd50&qDrR#vk-|i{Vb(&%o-ZGYyX_!PoIA?NaMKiN>EgX8HHkz`K#as z&?5qWpDeu0p{s+eH(crBK53luA@l0k=4H;xDwqp+tM}<2BVWoBYQHEVI{(VmFG;<; z;MViMlRj9OXd!R{V4=`lf2k4-?yWb8Vt!^w)UX`{Sf%vAK9VrdNguGOB>^?22*6CC zRb3bB$U*$npD`=z_s}E+u&X$@gSF175Q;S{wXM(}Tqf&dX+UP)NvceK9d2SVr`o(& zhYr1h>7^t=hmpmB8daEQEHZsQAWu-0_M48T3c)b(Yt44Vfq}(*#qU34NPL*s6ICtP zkYBy&@}*bnR-S(P!~MM^Y5+G81nDf^E9ZYKKPWRO*V1wT`kF;OPegoN%z@-MWhSC= z0c0u(D9jXVk{J{NGm4CyXW(!UVA~P47~+*Frwb@qiVw%XA{HyvVnV zO#F(@vHiN&@FwK(&kvwXzVb6R;8qu-psBM(+DYedJuOe#+5MB&v6E7@YDg{C+sLr? zt9z4Y>-I0Bx>6A@c)w58U(=Ouw)2_Tvu~%v{;S1XleR|&adEe*Pd;fuRaG&q@dw<7ueXh^-xf-1O5J^H{5j+^o9}VmH)(b+EkZ|)o zDB9SuYSJq|r|41x6ih(KXHY+fxB>)Gy*YV#dz=@5HUg^Ua_iug7yz}X3`xK=uJhuM zl$^8`ffNlW{Wq5pF4+rZb3hwQ!b~th*<->j7N&VX9^#b|oq82YytcmZ>7{Rr#QP&N zaEyTO@*~bE+uJ7Hfxge6tyDdY8!WNgbMX{82F)A&ntVg~pT2D}^0Ov{DV<&t&GRQ*xX2azGu{~q!1Lb5~bsf>%kGDHLv zFIDf&ZcPsh(f===hqUhraFl<43OW?ppz;Gd^#!He%2Vl5>x*MU%fs2q3GrO_tw2m% z(&u1*t6{#P5H##D;48})c7TMo)N;c?{iNT4kAenhQA9(Q2*@x1-PZ0d(HP@oeg1m^e+}}(^0k7UeS6^6NxEFBas-L|N4I7v}sjfo;YYg$aN2rpgf_j z%kF>HbpeR5i$@1H>vHT8uV^~2F*4`<*(0EISM68lgX6cwbgcNMqlEcOq&0Nsey)&M zQ<^fmAj6FZg6ZSmioBVT&i>>2ozVB?`rWRw2Crc>KqtASW-W-4KcbGw)%3JreQuy* zc^JX>b-wdLrq*vrzEWWa=$g-d31)IAQ)5SNX+`3WJ!A8JGr+e+InH^P1Y0<}N%C9& z1qqSpZ-R;-ROLf3Wj$bW$zgM&5WHYX!-~>?=w?*a? zlYy!Zq7{V5Zq8TWfp?CCy2QlB?%kM)IVH^Ric>R!*@~bXz^~3}Y;4qzts1d|9I_mw zP^B_dn}Af0(AykHC(D)igqAk=?zY=&A0gKgv?ZIwkLgJR!GeAL=waw25P5G6kuPV* zVjV@lw#uS!rm#$UCwsXliqmbezf6zi-TBB4}+8O#wJ{# zi}Tv(JoXSW1fQ)es?KVB_msZmQT;z?flvmDe;6!eC`AFn_n=V^_5dXILV3*BkS4}& z#RO-o2B3HM#wqf#)Svc3R= z>g!(&s=sp}ztya*e;2Y{Z#|3Plz~4hWQSUP36mC$FtOqNN@;TGx%5#ZzFLF7tZ9CV z;Lfjiy5$BK&da_1H2T#V8IgZn=Y&Bu?Y;ZbIAq3Bi9>e(1p#3YtfHWx;M#ru>=|wC zV#ZTb8pH|_b(`z|i*0ksUL5i)8f?7OftY+LY~C(X^((q{YSin7`ynE!J`s+L&|66k zbq%h;%Xcc!%x|2-gnugUpIK_{q3j!)n>5*oX_6(Rao*>8^+uqh3{z-xX!@m#{E+yq zKg-AZFAH!N{ER&}cFe`Ao%>~#Jm^X)o0+>9zbZkQCA}^3^1e|hd$u}XgVhi!;H$Ja8!MS@RTBw5pr}{kz{(Zz&Ut&L1 z9Dj5CvgT5rEZH^tu3nIF{J}r~bX@i7F94t;?tJU}pS0M+yXDHapCpY{AfNQvYNiCT z+0EUwT4!hU{~&z$e|_;sn;tvPuM65}m{L$j4sv6HljYydslD-@RpP^xeT559}f5ez*Mt6RioCZq}&XdLHLs|1YF+MB7h~ zn0mZ-bJ968^s+cTl`EG1eD3q}U)$%3u#C@+Kl?ps_jkE-=boH$#%%dq;A=CS&`QBD zNlf&~X?cK#O=353b6in(PCh1y}FEB<1^nh zpJ+F;fTNj-tD|?|O;A4b0&^`QXcN76BD)y?uT=A0WC{Bi9?bv<<{6JeJS?1lq! zza-4k)JD6g9KZF*wah%FzM%CkZ)M~c{+m=M%(q0s)xW+rZEfpb)<)j^$K}sDLTE5# zEMrO`9#P?Wu(7d$sM%t5;SnZm5q4E6+dYf1L--~~3{>BU9(V1}ANZ7`%Ag44O^K3T zS?KNT_Q}mI^x%D(>jHue&CI7K9_?*)xX&F(j;sIaj4aLafTWf=@hZ7);$DkClEf+- z>>A~e_ftELPd{*=c7LN>@*jBDzZ&WP3FLAUKqaQzV*&g@+qHefEvhc!RJhv7pr39+ ze1Zl&gC`uA5up+-s&nD3pm4BH$ znCxP6#Wi5YvM6rIlCybL?vn-E$NE8M(sFw z+;;0s?p@@=16SX7u_TYZu|6yDHjlsIBcVZ}H_@B&aiLh3NF0sO!O!>pqowd8t`r$b zBpJDqK_oi;y0m&|Xy{I}P<`6Y&Q9VcJ}PDJCRpc6BaZ!0n9qfi;CuL=AWyPF`{6C> z_PUU^(uSp12hSmC!k z@s=p*;L{+!=ZHg}#0;kn2{B$&dXF?~)dBxVe^ET%>)^S%;;_~FV9fOVBeqBWJ6flt z2ob7@>*q8-M}(_2@2dQFHwdrJdBeWm30V+sAo$OE!)oU&1pd8jgyNs~ zVZ7#Ri0C^r`mC#CAUhHgt=-3-5!Tl{eJ}- zkza08F5nKMB@#hv3p9e28!N4F5xlZcW_vKUew{6pUPOawcev%@)>;Cj=mXQd_v^>! zRHT6vkDs%$Jv1>gDm=?)Ieh@n0eRr=j1p=0bQaMyW8_vQ%5|)-%P*$7w2%#2Xv$i( zM@TXlGLsh8a}+q%eNT&76iwW48Mi`5u$rC6Huaq0t+Z%c#qqiHd`R=m1@?Y<)#}$& zhx$9|^$quv+!>1%&sN1Bbnp|O8jTFGn5I>%v@mDOIwe}vgXbykX)qQvdK3I+5CK+2 z^JNlF+Q8%PF12dFJ*n(b7&?+e#_SKQL?R;8@op(SMe6`7Hh3CGUlX1V^8VkKRJXZ@ z>meNDOm_;;SQx#^N}ANlqwCEyNQnqYbueFWS{+K+o4(*R6)M!<-yeNS_cRX*58|83 z4=y8jX?@8n%TqX!MVI_QPPK5D@i<*%W>7?i&fwzU(Ed6#el^L>SvhFw9ImXfD1W)O zoU5aRnPrGZq*?iQI_HF@ATEVNDJ5yuX2jM?4S5fdLJFhm77A)rpAs_PYh}F-(7tQp zG$emD^2Lj>JpsL8UKDm)OTS|4!FwCzVWS0A3&$6$CCZu|t0gbCt;K(K{R@s8`+kDH z{l@UOWR}1C`tP5%r9YoqMMPRqKfXTKY7Ha1 zMu@P5%3W>7Q81Lmr zhh&|NJ~rmsbwt{;Ws&jI-PX%)NRZ%80p_PaIl87mzN#xBbN?IL>wuWkn0wp_=(OgN ze6bXR*P+C=$A9{riTwhB6-!c~SY7i_o{v0hQ9Q>tIeB{ioGZKWlJFF*?o|^*j(yQj zZ2jLoLw=A_g}UmiGaV-TWw&mf@1&Y)f)1WzE-ro0lbVKeiIi#jvj{bJiR|oERucjl z6_*Cj#&2&TLBGUVZm`?azKOA}0e|OE{ta)J(4Ou|<9wLqN@6?|6nE~vzmY9sb&*!m zoP$EpTyOOR-JGU&dIC>kQQ6l!`xt1Al?VRPE!X+LsM|*RiK4Hh!L*Oc7)#bmvzJpj zQ)GoSu&@$ul~n6>BVoqRJbq{|5?k?Jgk?9oPKU65(G^D)$9>5zEM)ph{-Sq8)c`4Y z@t;I5U1~pU-FWuxLdRQ_QeY`!)o@@N4a#={N{Svio+4Xv(h_rU5b`AQWn;xlh6d* zsers&ZXLBx^VUgEe`0pctz<4hdNd(YppMLs)6Ky(VO|d5#^=LKMlP5zmYdH$Uippm zFMg2O5$6*x-QErn=&wlk|GuOcJzd}Z=Danl>ZgxjuYIc57><+J*itV2z~9!=LR>p- zYYwt_3T-3quDYpq@>IVZFl$*Inv+hiNfP>%xVJ?5WU82GfXJR-WB zByFT{_3{c)rG-owd5ZDXZ!*{MO^GY+4zt1U^iv7|Ai`ngf;vnzLr219%qDHjJ0_xz#AB z!=~UOT>Kx_$Z1(X*}=z;Dd}{!rNn;Uog?z+&MeQo*_WeoYuNIr_l2%6$v+b?8A1QF zwq~tMjEw`NPh&o({lIQ6O&A=2vSJbF&SF8_6fOx>>DHdQZk|pk@-_V-scASygZ90n z$ZhG*YSdMh7xk6`An(e5>V{Gty2hj(ns7!U{L1 zR2A&R%@cOW^NtaT4&v&mP}_S?y>J_3Hm>^AqNhtjBB`l}ZRp(QA5-o0ynNGNl~{qy zS+si0qGUFT{la3RYgN-;(`?47A;G9m%{aO^{#!s}k-B==Bbh)oP@0HScqARB)pPx) zMTE4Th{!7*%*D=Eyey^(-{_Z8ZD3i|^!8G8zVe0Pt9fvT_$S1NBUuRJCYlJh&_m4^ z)gXmS@nWC1^$soQ)|El+EoMk(Si6nIX5hyBK%3WQ$q=}V$9(fwy_wX-Z`C1&9@BgF*?8i z%NEwfb@mgvy`k7p9rWrC+{b56bY-h$BcB$C&}Xy;u-1eR%x~V)&dp}Nol9V?ntz{O zHA)@}ji=slisYdlg~eLeL8|$aOWJf~l;)pQX~Ix!OMN~?I+K@Q)QdRPfQoU_oM>(g z$n3w9_TyYSPTVX6YFMT@`mQBFK`B2F!y)#V9zzxk`ePuy&cV*AoR{D3b&LqRq+RO+ zVlo-FV2ZSP-CB!=r3aQz=i=$RSEftSez=UdUY9=9(Yrkrgfro}W|We6HT@K_U#mFG zeV#;_E(wpsv^-{3#zo9iik5GDSD4ryzcnBbc>HII9usrRz^X)xcuR=2@%x-=CGl)W znFV7)&!o2gD0ev!pa)x)(4YVQ;z@znBXwl-!fFUt0fFW(8@^RTsx;0GThTeBDTLAGPKuXn&(ELrvu{eiqed97mSLT4-2wE}Kdh@kUx~TXeK$9g>fw za5j{v@(MoVFr2Zm8=eyvM=NOFPcIlfv+g=r;=408VkVHA$5jlmVE-a){}-S2k9;k2 z*ky+o3QLke%7||Yif*R|8Y%|Jd(zp($dn(%gdX?5fV9FT`tf`?=(s8!nOs71q~vTme9oH=b(*pLqwUB`udAZH$TA{AWtnY+ zqI}**CN>6@Ss(w3Cd%v^bABspa?od@Pc4`^(|quZp4eF8?z7(x`5b7v@H;lDA-jW= zer)1ZT|Wt*w_B+LWrJ6GJ~PYQ2;RGK1vC0k{$yP}^LzwhK2(o9DA9&J6ru}yp?;Q| zf9{s z()znvJ{MdZ@G#!F_&hFKI1g>|Q-7CLRv ztw=ntPuq}nhealStxjQMnufQQXg1_8~)C##!3 zJPwYOcw-apyB%A2R{ARP^vaWYWfeUbSI*j!@NDtHHp;N0uOIE`2Iu%0RBNiX9?YvLcsttBsIWU;TDKV>X=!q)6H1FMN@w z-AU-@xMG%sGnX6UMebiZBgopOOiEkUOHZuRNuQyZ6ViIG4TdzBbU0=oBVxAEdR~Wf zlTq>P7Ab>95IXI^uvX7P=8%tbdqg7@f(@*ns|oWYCYK2aq9{6X;c}1n%hT2~=*hr~ zgfJ3X;mFu2;SYG(e|O>UA65h3F;9Pls@mpyF-qCw7|%O;F-X=dNUYt8Ja&??z_zGb z)(=1AR|u-0j~ft()xT@1Exd9L`+UeEk(!A=%57NfSlIl`=FRC)UfJ-=69or8er7@M zIHFEnft7oPpLP3~qL1aKu90*5_q{XZu9R>+U8_t2F-k~xGzFNx!|oIzi@)<{sy!|YO87mP13#C z7JII)=(353&#MAM9#O^}0lpO^pRKOZ_6 zC4lit>9<^#7m3j!CURwfmTlhD;E759AXz;gMX+{TFB}ez&eWt$U@YU8}&eB+YU*yNf6jS9-G$-|a3h{7OdQS!{D- z(D{QGye~?a$I2cC1Raja>h6p~z=tl|2`LW^kJZdG4Vz#qDylS2Rj+mFXcZeT>YX8S zCHZ*WKqyJ*`n3o4b(QoA_LhuHqSLf(G3Hz>L%Ne`(y}{~jTM*3Mn%1q4jLPfT_H8W z=64Y<%Yo!_mtG@UNtfFgah;$0vmIf79v9c{b+4$nu)eVNG-a1A|KgXG3)i+4v~+Z; z%(Q-v5AxMNIlZz{8~7v-zsmMSeE(jih<%L`YJeggI9eOk#C{i^+T@bXvJMKy?v!gO zpq7YSo@RheNZIPfeA{lJ$ygk#ntyJxmp%!8Ul=#S^}R8p<31eDRq~mS)r8- z)-)Jm-=&4E5BGNJJt1Rb`i=bzkWESGXw~$x$GDTPBarIFTv)yc+9JEoy+vPD5c8RN zC|5+hr~C;AyOYuEe(Vsg$w=5k`hx1QZLrsq<#Kw9YE zbsin_j(JJw7jdwN3rEPAi$i!9_vLIz&fD18^+S*=3&{3v-w{Wd$p41t-H|i+@4&BJ zZZN(3{wjO#%enzm>5=lI%=dGr9==_R*vHm|Lf6cuS&3B?tiunDmAQ^Y|A9J)dLv*e zwY*0S>op=1%19W@DEEHO(*U@PO;IpS+O!3ijMcIJHaQi@ff%8=x!AG3B6uz6Oe82 zx|am=afkoS*Z5+(wpXodUz>u;Zc#oPGnsDCOz;s7I(Ci$IX8mO6v^{tz|OSu=K1l& z$X;R<{O4yPI|+Jb0Q5~)gKtCRSJ&~(b-q?%&crxU)Jnm9e3b1cC{mMb@S03AlwgQF z@b>LA#+2f|eSSjqpx3RLBf#rh9XrxyDJ*4)f!mt;V}w`rzjj%ippxO6WgtM|fHv(( ztc|Sgb1CtSVfFa0SMjDJgZyto=*{ke1^x6$Hw{NiN!mJKh-qQpqh#{kaON4 z^bhB-9+=%RY%ty#$>~;SDrP@`+bpJ<$UJqZyC0dS_*|3N<#&cWTW8nZH=N~*t!<42 z_Jk`7I0%spauO|ualjT(q7m*2bhP2 zp7w&OpiAUIxuq`(PiMA1Y!*HX(GPoHRpch3Dw@MRjbt?&S=YD*zh2($`xL@&;J7cy zX4P6*7huUvbM59gnfHk{EcbKqb=6MA#+Z_!d;Anv*ks~;Wdp$2ka|}pp5PB+8`LVA zPFK_-5ZIv%JcQd-#=;6OKQhku(ccKRp=(4f#!-#>)YjjZLB??60|Z9^GvF9g9(QZ` zVd1fw)x1x|M%$ZTejnnoNW(&B=S2X?L3i8e0GIoH_xwouv+@?J&8uln`sl@Y{-?qb=I_?&q~uZp=4;PNPVW z3UqRMtYp#xfj?eSc|KvDIVI{kZ64)dRANZ1!hf0oS*6kqyA^b~)F}Lp3V%57R_E09 zXFbPO385#boI44JgEg*o|H6jyH$%XON>m!k%E~wRURu$hA%ZVp`H2SH z1CCR_;~*d~T51*yx^Qe}O1G}4U!my-t_yvFHxA8QXwqQ(<-V}s5a;Jsnha3z)&@Bj z`$rgrp5_10pknLKCEv~81_VUTTGER2tUY>C!rZpm_D7ld?6X-Gx9##5Wd-sV)w#hK zE>G|ga6@fYIqrnX`7XV9_gZ&2!h*xdFIMxI*1RoNHH!q8EjU-%twMAsR<&Qc*ikJb zs+_Ts)!=eyml36>9dbsH>h=uWfBsuVD9Y_UDIV9ZH)mix&gh8r9p_xhv_Ve)>{$ct zf}6}r0WW^_FUr6gY55o9sFJ2%TMYq-)d0aKPLAH;ZEwu3@r_}?4hUs ziJm6A9GMB{hArsSL9*!W>zHfEuQ`hd42Z_=6Xb;fD@7k>e5>%M~%AY1326{M4W*aDc9M#SjD!zgI@?42YWJ#bc-ZtZ2ZtP05 zL>3%VqhsI#NpcYAinf{9*Pp=e zT@rYb_?VE~=PKi8T+!mv@+A^kViwM+S6`|v4mjk=}9oU=k}>%UrSE^`1B{gqU!Io&IXM^3s%Ew6pAsD-aEocg6V#vLno)9 z1W?~Q2K%-3XZAaT$C)lIWS&fMo(dyoVNsl<7U7QLSJFJDL!Aj&wEw2_mRkC)Mtm!r79XB|K z^ewfI4E`YHHNAAeQK)y*D$ zeQa<8o0xw50(qxyz+&GK*44RvdRqT!EuV`8nA!&QgO$P26D`t;Kca2*ALnp&+?pM~ zTCP*QZ#npiVELfKU@(4yKzE#RYHMDXp!Z`2sAATw z;)^}~@9Wk~P9rT#)m%GH-wyRQkr)5CR@sJoj;79!m6!&=S-{>=M)CL&D+on$VDUjW zm@%bD1tyRJ@LVXhA46mU1nq_+C?103JlqGpO)uJ$iu72qk_qBkX9Ue5UjGLKOX9`6 z)|YV*(JD5&qRg^$+gwZW9cSsq&FD{smwjqkRk!B&iXQq#7Z&5)nUM$@{Hk=hGm?*PAQZ#Hhg}i{OJXe;XA= z8bc`#kPX&7YJH$A+F4>Jo#eN%esjWvL<@|>WcgcEjJNcXng;)RuY+pVR&O9pcQ?#_ zq)Y}BLAtCP#M~F;YcS>@`n#H___N2JaNzW?C9MAlPM~t{6`J_-$O*O}GQDT~i{D9U zBQsz4T-vBu%gIC4vd+ib6$eGc=!+^e-{kzWHOu;U{eHoq^@e48^y8QNSl^ZLx8x}! z^e0o|1vN5uv}f`Sk*(-`8JCfS%Ujx}qB9QT%e;)+@f4!wA?BXJoJMFx4e{zd2S7&u=Y-<@V#aQ?LwTmpcUtB+h5 z{R)FOaGzZ!m-CD3=fvDWzd70-D22%`0oR=?kb8BVn|`~qq}Z{Ji&Q?_7^DPYz4O=k zMxKxhwRTGyiltqFd1|$w3l5#5^)ELOz?E%taxZ@ zYPMGf1s&_I_Pu@+8LClLF!iy1Rq~GZlPRXz*5oucR^Y_a>Y}sTYo$yEUlK zJ+=5>XxxNHk0oRxCXZ#LNh3mch|y79G%vz4g4AOD#^Wb)#@t+Kj%UEL&eGn#5W5em zn{T{~ke&uQ0amx>w_mn7Gl7!{`8=Pd(rIWM}BFP?qrV6Rpgyeep`n zf0x25d(Eg^_5dGC)}HP0%vzE$o{2RG3_E)fpASS6_@|m$TKdt&Wyo_a^tYcqNaEAx z3%czc<{e0WHtQtA&dAqJFNe;_#?#2L;(8uTliNm7QDqaY9}bIAYIiEcvIcKn;?%WA z+UDQIGliFxyrJUWpov&b|GeT5jM~))PGRMdGh`%XT#7~Wb8sH_ASpJ8t}U$A!B@Oy z5b0$bt8`nPRzUK*^L2^`5k+OFtJX?Gd7NwYs(EWfv^`{ayrdd(6-AvaCeW!yTjG7> zHbRdoUhR7Rq@&!`$&Mrz;kGU)KQc{8uz}El2_tmn#>?udjqeF` zQGfdC%Tg@%oc@0TBx#)>dpIX{hKU~3u~qP3w0690z`5wJ#e>PN3I5VT#n+T&sy`9H zB=x&Hy+~S33SMf1xy3`}209`=6g|~sF@09hb>b}L8igRMr_l3HLGzRm&u(rgn@(&m zP!vvjb1jOq^!0w?E4wbM&=SycvLiB!Cq+!}jof;-$1L2|=!O$|P#LR?mM$(HnrXVA z(L5y`Y{)Y5EQnm8(n14a9^r=u3FMyU5+sFAYO8$yfzRm5ll%q+#hJO@yPY(nV#@-A zIe-e28Wf_JS@wGR`>*o?RFROak;%b6Uchs8y8Qa}7>kdM{#Wr&mzq8ZEQetnWn=m2+3}WmIIG$bu8^!IO zQvZdVuvXhTz0CvpFK_vY6tLghj`?h#d|rWXCBcZu%Bt{Nwcd=!)HpmutL;pEN$HBu zt@b>Tg4p5hm!1V6?myYw3j4|UqPk_@OHk^W_+s&w-6Yb zUtKv6vQ{(xsrlh5@rd40n6$pCcNQU|5kKEyVJM+CNaK4wAcLZmphVuqnj(hVYiz8z7=R zdHMO2Gh7hih6>He-AR1+!XCfBV{C&AUBu+0QLl-O1&SoA6VThxrvg{QPtx6@=GN3c z-9NcLjboOIZ-I1AdrVTH*gCF@s@xp2kv zG8z5hZIn-sX(+oANSMf|=sVf(n3?{U-6>a+aa?N48H82lTCVMv!godn$%n$st`|pd ziqiF9&zeC>M7Durofcw+w*yTB66D!2K2V4D^~+^s_@rzHf*W@>pLYO~2dmoQ;jo$u zrHkThQ;;Rue9nB=tK2+|aQ0cxX#3AW+oo9`G?)}hUKKE%A`f^r-PE+Yl6MKc`tInZp zV^_F57&cX_MwHOE4>UASR`L$&+tJ&1lC%_I1k z=$A*FLDcX6X-gvgiGULF%*&FSrQx$HElfim^=k}uphg^-uKeN6K^Dr&9}T%#t_S!n zTd3%lyapjr&!3&6PU5FqK?wSpF}tN!u(9Utgq{ ziL(71aoYM5Bkqf*G*D>^4t6?%Le?U~e-l~~{=WOia7>nF@J3(7JtZ{@1=D~WXcH=< z$A@!$W9@4UY+e1V zKUSB^;bPIU%c?~Otb~F9!jy>2Ox5`uk@aNl77z0Ufg@lv{=dr^JW-%}qY5R^js?aNP-Fl~^=&FUEE8SunU7MV(Gj2#Z9AIiNL(Q+5{XT;Rp z5>^u3E+N;~^;~dn{dm%MCJ(kSi;ExTcA)Aibgi`^Jw5fjpsQX;v&o+^H z|1&Sp_44(xu3678+pDTJaxx!fXhqJixcCiSQg~o!XgHqCMpy~R0D;*Q@%8InD57>A zSbE7y4-7vnL**+}1XP#`rI+M4O`s~c6^B3^L{jyC<+8N4I)(1<^2~3vgjZhD#2jZ~ z8M?_HHxqG|gW~Gc%W*#IJB}0Lj;Lpcs|CJV0?jLGy)-j$l)WKu=o ziqqTSA-tuZ*&9mIOzSw;d8k!%+|DZB@-Ip+FED&FlNeXhDY(IKn~dlj-Mx^x9AFn8 zc=_4Rr9vv4t;`uWWou;u*y|5*QMNf7FEgF}Gb?oDD7^2tQ@I_k#R zE?8eK(*z^&k^sb&hKYH^)sHYDnOd0BgH^`5dDpl)=mn?aKa`~5wklOyKZT_Exb>eU%P^JhnPR3Doc5qUF>jRS7bg@7#a4*? z-R}d^wzIIHJ_+>ImcpU*d?O+5+x#QMVqtS)#RXc!Zfgb*;UJ2T19Ec>ATFz+RpFty zs39+;X+%Z_S0c)b1AG^I zgXFU+IpH*^HQaGobE}b$5h&3AHd6|}ydNKsrtW~EKJSZb8awE;j{ukv7Z)z`&YM94 zTrayRn@Ut!7GqywNd;#`XJ(sr+gHho2-eE_%}0WX7lMo=Re#Y3wEKH-K9CD7!cgqk zd(UqT72oI(6ekNo&#u;+0m0|@8iTqR`Q%e1-IQ{ydWU0A?IrBOR+`KyO+D`z?V7});ohJ!3ld!qo3pBGr znZ%g~$3gcPVNxXtrD+zdK_mThUwVgo8)F5pmTowpIP1)IwN7VWK^iF)OLd}r(KjPq zit(rIiM;vug=PU=804jVDZ~ZwQyI@en2O&lM38}CH}dn@%QG2y$YQN;I8>#D-4`OK zcQ(u*rsf`+rxT||w zgT*Gr2+c7&TehO2!p6!fg4cf6q8cDAeF#x(b0FA~KkIB(Ma_ECkajkh1y}oOne$81 z5VY=~E3Uxb9$PU}Sz@1Z>|9Ldi5xBmx^8RHd^jQA>%`7?D)07oDGHN^+{HeK;Tnl~ z*c9X-?wDsuwjA5?44NKs{3ouJ$Gd=AEDRQJW-+O|5qw^8zyBzPl4(%`o>9u~Fzzrx z(AhkPV_~i$RPvFuf=O2YyEO25LuYl=BSr~bGt#*dF;S=5osz!#wae`i?RUV0A5XbA96JXwbv%!6F04Z(v2LKhI<;J1rzrv2s`!ants+Mw zQ3Dx0K_lp^)E59!=VwCH&4vdeljJm-@i&6$!I&Un*y)U1yu?Y0o6}<<)k8pwh#n@L zg?8Z!sICoEk%-^isiXEq_Xubn#nDJgTZ8vb$}8LkqqM5Q9gFp z_S87RX|gtschxF#$YeD!N`}+0Rp5o$^;_cPy@6y&l6UZ3ef6EOVIN`9J+SH6zUxtqA?(?I_X+t) z3J$N|JvRy+eH5X3ZdnK~N9Tt%DvEx+glOb&GE%oUliw2GeI!UPoa}DwJgg?j%m>_X z?2Hn+dg>Ugxrerj4XsOPBJ=K*sGa?M&T@B5X*t4w0mi0!CUMTo8Q~u-B9v~EKe~Jn zzu{cEu+@C?8ar6wL$+&QC^@E9(xi)C*vUX$&#|kofp+Efy}}Kdhl#xS;{<=zU)Uf# zaeaL0l5^dg?Q8YTTg6=R1Vq%oh)46Y+IliA4|C2u=9e4H6d~g@T*t&mi>GcqGBZZ) zoSD)Bl4t7Ql?4fDVkvs*0@eYRN)TwbP5&Z2D{YBzcOms5NzzWfeVmAuq3Ss?`Q#Dm%?xP|H_V;KW^#W@S>$q26)JhywJpd zUS1I4jG%_F-FnV7BZbz zlTp~I8JSuyR8$>!;zI%Y_(m!=+l_%eVH;mT2K(K? z+Z}qcb!!!P&i+MdI@T``M$(Q>bSAm}t>4devb%zP4O!Vb*7!%GKUqm1^gL487ay!| zo+-9X@)PiR1t*p^g7p)9KT}@UefCW?8Mrve>wjF^rwCb;fa9P&KHb_W6lsu3pZ08X zscVK!iKtu!l5b#R9)=i;nf_?t?$OTmOB{nqv7Z(qVIifskSVTOMJ4ARpCOwaS}q!5 zim~s!!nw-lXg`b&qzszl%L(1E&pVvz{krgV&(|Sig<;D!g$LpuliPWa6Cu%Ym}puZ zvwC+z&@pmEOO_T0(Z=Gj~s93E<*jA0EB` z{bsv+-=3;?aYs_D@>z}4c0v~?uD|QTd(hob!B&644VO-NZEk<)#)oP}fBymbyP1Gq zjg?;ddoZL_ofd|9B@Xu1p3*5xH#T)}C*{md?$wJDrP>O+*UEO+efMe<>`+-~6M#~7 zOKpU6WSWe@{{=PE!|&HerI2+Al&B@pWd$9eH1&cg$uLkEYfTgxdaZaonHPAXvSA8jNs)U^L0++E=548!`Y?WW z;t2VgXyVhmJdFwWFRWTr)LG>E8V|2YKtg5DDa|Z?=h)2Apb`f^%RpdTa2oc&U*lF{ zmL+2()g?QJIeznYNOf}RW?C*l5gNE{v>&?+dX&d?i<(Jho{=@VKPyz2at2H~81lJ7m6acmA?kz8S#R@=@+4m3~=9jI-H z${uw1;$3^<`S~K_8x!Qam4%kXyXS>*I_i01+{tMKC;=%Xk<^4H@V#zAy4yzH@jy!A z%Wc{&!)j|f29c#9hGuP*AzWadJ9cy{VT5U%-F1Hh5GqY>qqVbNf}w zqn13|-{n0<9BCLg$x-Kq7kz=h40ND9e`){#6Uo~l{20juz`dx=QQLtAzXRP= zi(}uKf-)EA{2x^8;4YC#B^%XAV|-Vd`1yd(cpg~d&PHv#>|cbx@7U79&gkLukex6u zB;MsB>1>amULtu=aB!KefZ8ZyhDfoG{$@1FK~ilj7lFpUM1=-kLdHzuHY{qz`AzRI z8J%oMg388gse@L9sU$hS&vd2Mp#*HO`e*lhrW|6bv^)%S?=Skpr(Q+MuI#O;4;oXz9r;gDa1 zd*U9#N!ky=9<>*XD=Qj*!9v|D$0qg{NC)_zkP`B_yizefZ<}0d)D>}Fw^y^eBc-IK zc63e`2_v1Asyc;h-l9Wxx_?|aT0{Z>EI)pW1YTK?CRiG;wjcEX0BC)n*jKB$oWDk} z83zih{V2Ej(Q-%l_tYBa&>qz@g@YN2f)dFGY_ohNLe%bZ{S;3s(P(7hjfs`kv zeV1sQC&Yg#R+f`4eQdNKE$zLBU2mp>{p0~ib3q7`%G*vahQ6f-OvO4uf*u3T0cPuT z=KzvboE_(^46_xEqqz$@Nrl$ZyED~uVN@lMOAyWcDTk#N2&whohtC7VZ4yLC)$uxU z6SBicD!-lOXI049)t>DN@4kKTthaU$zy<#zv%wM@pOu2rvbr;&){s^d(@jU7h&Jjj z)R1oFkgtQG#feOgxa*QJ7dLmx_U!#|)p5IUGVc~2De};yZeHow4Xy3EE(Q@?TtMzNDw#k>P zs%L9LrcpNJbdd}EI;4pI?O`Y`?bEKfsw)C5L#6Y{ZuGg`3)y5`2i-)AheK!tzp3LX z(r#U`AxBO%^Lqo6Q^{jS6^186xfSZT$y6D}Ywmv}JWte5{_PL0UpS*ulBa~92y%i% z76T}zD~_BwAxj%=ppax>V`KAhOH*Het{M+SaufrS@HP z1aIVgyzbi^lVNnpB-PH`wx$+vx*NW$q+TYIYIML_5#0^x;lbz?%f;HYTe^2Rq3H!J z%wbcK{GyokJ{56D>+cV;QhD@EAy1&tV`=yhFbriqPqGJ+%a{fA29g8y?l#8L!ahgu z9x`#~M;?5Cg5AVX@cXjGKNN7iuk|`eCub5%I9jF3?j$S;t?noOqNhaqz%wn^gJST@ zy5}NB|3tlgJM6LqqT3I*-Z*0Fqy5h-=vO;i%p8U>^LKFYv#)d5o2t<-n~GSb@1wuU zQgMV#=CS2@Wvbt-8-w;OCi%5a6Cc-nc;Pd3Srl1L6hDzC2+r(G$r1|j1 zN{hd}-fIRyHhkx&o}F;jltPGLeIpRwU5Tkx=`$PRc*nhwB)(TM?8~z^g3maLWHQxO zPI}P8SbV2+H?o(ErWG~^%BlWfW@T3Nd>Z^$fA;)GWUTy?GcIGpO@q6okXQe>${COy zmmoQVLZPDVfoGiTLd|2mP>qLE1&mM~FiXnJLlj03`q?@wc+!p-Om4BncG2cQ|85uG zN@s1TdE^bty~mb88%1c~EvmUy`?XqmoN7fAu^K%O??NB(dEdUcGaxqK*Y)%m*+-#) zPwaNM_`c@ZveztYCP-KV^|B*}d|;15x{mnr$3|t%8^I5?#{mC9hf~`}&mNh#hre-U z2={r*hktUSqr;_Tv)hE=0{A;zw!$(BpZ=d)5cqHQgCL8A;VpibR`0XR&QB5&M2v?# z`d(nVY0ezX=`?6uJRO-CJ@Xi_mFgJSGl*QR5re|wnzurOQJF%ps$h292+&$jT{Y7y zv*+ynF_v-g5m&E;+DM%I)epg!xvIihHB{cupjVy z7OX=T&xo6IIMY9mO>7-E8}d)at-F9E36Z3u=YG{EzGBUPTAu zoy)_pZZ#@Flw7+QXsipRPi?*Gp%@luQ%UGGxI=-;H#`V~5O>V?;7d$FW00ZQ-7i;P z1Wm>@1wpK2XBTp6<1+BlGBqq>MmrkEREYVBMD&MpO={n$93NU%;DfMuI}N36wzXc2 zodP3%oC>nZz7#0L^R*v{BirqFGlQ6-1U^}1Q=R2y@kj#hLX+VSd~e5CI~`ATDV=Uw z^;RTp1^uyb+-2OQCV$5En}RLcyvZx}U+fqUO5ELF+Y!Ehi884|Iw1HQQ7`*nS^y!8 zV=YH1?rRNCN7>y_be}z6b88tFSW^XLFJWgSGU@#GB@$lAVHu@z7WX)L;>8>IxKyHP~CyRkq4 z>5vBLZjeSrQaWxLr5ovn?;7-pect`%57-r$d*;lXbFTP>Bjy76RPp#fzv7IQNK=^! z42oXMCKx{IYzd=Pm}!sW(*RsVfs-6X?j`5Lc%bV$rp4N;*ZhXkXV7cbYScPYDhvdQ zgQUzP;B}#*t5c~PRbxFCN=g7|_!sQwF!hbE2a{0}3RTQEI}|}1f#*C_;OO?eFfejy zP_Vp#+Y{Wb)AeGrQOkBmDbj&fn~}mBue_ro@-EG?G#{IFiB?y*dpx-e?R&w9=YCzl z6R#G1y4X6GL#d4tpM6i1?m2$W7S4f&##=niTUh`6%XMiw6rUG(){;_r7vT@&ncrD!?W^pQEm_@cI9hv4{W5U9_{*Zhq=| z#ZI>Z+eaA@{#N8ZuvCbP?RuQBS!;AsW*uGU0;8{S-&^gxeV1={pS!wxTK80CM-a7n zIOG;LEY-p>#t?g-Gw*kSw?bgJ@R`eKVAvao`^yPW@BHsy_`qYZ$ro6S^)+R|?pEet zQb)ZZuo2lYwaFb0VB1uUaKwZjr1xfv5x`4A)~POo^YeFK{B^|Y>jMe&?xEO#2uXX{A>O8z=U!F4I`T3Z9=CN*;!ByB8xCPtzkFhFY-|`Jfk3For-#- zv7*}P*J$k`s2{mf^(<9%GlOn~Xzt+|u*^=LU%PJZU6&YvT9xJ@HEJTg$sYQR*|N=O z(zdbDz}V4FWNZ!gt$v)2CiAtNLI8en${h__TwW zgtu!B7U98Olv>ikbS)KfYk~oRYjJl$)8at;v=8$?w`+)jROAq!*virPAlPp6c1?2bQ&*JzWL{hUdQhe7{Jy} z_VZ4R`)okwVG~kUMMQNMKmd5d0Xm4Z82b6Eov*O?(?Nc^r2IKtT0&8VnPIn{WZ7H* znOPer$B@I9`ZpMA^7o!KG=5J6yT6T8D%}u1qK<8@3(Z~{KhEEZ#_Vf4AfGsshQmer zctfwX&{Zh(*nV5Jjc|I1e?^1{bo1gKCadjDmM~F-9QQ&t>-PWXz|?7P8!75d2z#`Aq1xwpLbjl?YsJ45zneH>dCbe>cjj5`yStz`CRP zBNVcXfxuV@FuR-Y+Ja`<8!DtQ)zsqIa^pXxEeX}u3eg}N(#S`3sq)dQr+_Dx){@q= zX5Q1AiLEjD+_cZ^ObCH?JbC)4*g9 zpfNc<+(WP(pnIADGV@u9i4UM(N67s2wHIazH9PWAc~TclY&R4<m~w z3Qu9?rCYvO{UODjWqiol_PGjzFk<(fhKU9 zcO&kP{*j>pa2C=m=v0I4{Y@U*d}fRW|K)xzy~5?n0M~iU@;x)O!n{;zj^j0woB_9b2Wjh~$9d+F zs2Fx>eUZ;q{Sd~W;e5${(uefq^<83rBwr#~*YL66w0sE2ANj4@X}1uBS%S^(MhhRe z?C^)e(e&wxig#FMGWT;mME(NA4jh|j3067Q_vr49|8qbdImhoO{nd&%2Rub-m9slR zWe&mXgUEpiJ4zQqgi#c=9uOH|G(2e ziPEj{{i)v3vuAg1D~fMqO8Br==;$9`M#g{WGYCMYGcPT4r3@=v2zluJ@kMqnNPhB< zK<&rf>HSLiv&)G>@Qb70i%hmNYns=HS*%P%#S3{EKDLu;YyE2xd&JZTI3;sZ3(kL- zHJy4$=)DKLzmMR_=;)6)C-2u&@&^_25o0a_v$zXv1@s^9>%?dP4F{n|o|fs?TWB}r z?!#wyGWanAKg7=2c(1g016a2c==yY$q(?>M;$h?>!2QjDom0i#cjd_$caP5mfWD7F zjoTb1kBu(4@sp=`VTjb0@jgtS@gqz~kP@5y%|~E+ z=AYl*K}aXHdr_#xvf3jv34obEZF*iIem`_}!+kv@eh!S|!g@!jcwSj@W?vw66a-KT zYNXs|SR1K#cVZqNRAUT!V-PeeZK$iaNfo(;wL2)kv;bS)1{%JijR2 z*5A5lf8f6dI@zQT6rp3@NQu!ZDTNmJ&TKd{TjFX}5e@=~6EyiA&#j2@M5QBlFogR9 z14t|_Tdi*nd7;x>Ilo!5LHTTp3t6|hIrRLLSGZQ#ZfM+X&?%A9{tGjMrYBHaQ{fcp zRIJrWYe{wG38lae(ex{li|G!l(5utJCFjLFduCY;LC^d-5-waiduE}Jc7CT%YGStk zv_D5%V5U*+qC{?5bOx8&=JXevZX$6lOz*(Oib6VCa$7@WZXa#-D8v<+pcwd$cPB4l z90$h}brjNp9x=*p3;YZZ3t91geY$@dECG4;Fu~bj0Oj7C7xaN1}V3S()+?I`-lK=F9vQiB!n` z9D?qUs2D+%1NxA&mWbA%;P6&vX78bl_J{m72GW5)T7}MMxE1a)@&k8xV5dtrVQvS^ zWC~_&+`*Zl)rwd=C?U}npn6<#VgaOmdI5a>4M>*d`31lr4rHd?=!}dq9Fo$S36EJ# zn}R(O{KzGBxEy?rkM$-m5w+aqX-u8mGeomaDAgYJkNHLHE5LsL4TcoSh!~>H*LIqN zlC%L*%Vy|N8%fW&fo1`lmFq8AS6zcb0fzCEao7CnYtzWmAItih7|2gjae8rCf&q)^ zvjJeYg4}8#8BiHq%KpFnJYm~3QEgb)Z*RL%0I~U&LUczwwX8a_m ztyT1OAQ{(Ct4D3wgOh_X(Q&IDVrWAN<__QZGJlimuK3B?Jb3%QcIYviV$eN^k52Z* zX~Bt!YW|Fkv41dL^>d?wzELbyPtWy4yW-~vwb3~8es_ieo*OVrT`@s!e2oBDiUCCd zC|qiJ>>3)JQt||OR4W$KPZGp;C!9#^+M8+sK=Q9*Vh2vsPlwz0sFzBY|B*2qn2b~t zSDy5wN)`M{*u9nrpw(Dl%TsTFp=8!wG5`86uO`U7uYC(3(tA3I*b9L?CvaGHtEvM^ znHmsJ%AsPT5Kvi27#afimYZ2XXMsOM;LwWl^1&z+uhV*RIHOK33D7eE8%;w`-v=Hh zY&J8GG4PpgcZd;% zE8;>rkg6#qS0klVfjrZ<<>g;m{APxKAqIoCUj7#N#h>$baK*^N@iK#gJQr_f@AiH$|gsi>fk1m;pg;O&tcPN((}3O;-780TL_@i^ojudE(RBbBUZZHJwe(fz>=_f!P4 zlPF82KT=5726ZpQ#^`xR+-NEaUSz>-c9QHaJxPxFoVEi%e?RZ7_r_3L$0Ca2O;^{6 z=1_VNouBw{ZRzBRcm^L$l0eQy%Er1x@?ornN=8Nnt&m9`%01SmyyhlUdwgc$Q|VQt z=D~N)OoJnaMf;@G@Kyg(o;ZOFCQ0GF$g7D$0`4YBEk0);HA^_70!2?x*CFR!XuS;R ztA{viv5o!6>^{7Sy!oJneFj1`@nAi_m;XrmGDPRTk<C0 zM*ia52UV+=pA!&57yzjs0`sdIyvi1dxQp!;`*~|Z`uBi4lLxbGh~-zKBp}gj*Ge~l7W|6U^Xmd$sU46G7H#4WVrtgI@^;xNktaTk z;D{S7vb&?BxAi|5f$&umD+j+RCQ%{XY8{bywWP0I>zf|64qVk-48alGQChd`As6AD z>fOYsFS)BRO$AS$jvqMvSP%G<%;q7mE*pegfiejZ3)us)a@3>Jk3jV{i|qYVMhn9m zjN-8u{5g({)cVueg8fi4QF@g8U^ z2=X`obvP5di(E1*1(T3*#IVA}eEgG@p2XKpL)EUbnk{;_qeUGQqM|8A>Ld(nv6V!i zoTAuyOz6%Su!wy3Ki zx7o=CplREUbJ%Q!6;LcMRPXez zCCNW5FEP;x!T-O+>0X^1x&ay-?%*Wye~+B&2x@7Igx9gt!4$T3>S0hqfo1`T3-LH_ zwM;yJ#Z<85jTs#tNGh%pT1^#8<312qjXQ3MSZB$7yXBmbzZ4Zy^v}K@}Bk z;$HdRlyDj07IRKvz@sw}!}q?t5yE%;GwDwGe+L>3&H4!dQoP0x zmxuN{7|!O#13!2_t9C!@bpePVNg)_2<-D!f250KAMdyXX457;^NUspoKrauJk6PRl|EWA;b6U{( zpYhU0+GXXr9wA$noFzY70bAxei$%r{T{jiWpnGo~CSL8sZHI(LNl~#kbN1|+zuZ=A z;w1;RLDcTK>)6Y4S8g1~F_ur=cu&AWv1}tDAz`~*f}${Or5HiN#CiI$LLCH_z8^Tr z)A-?0XW;P2&(05q36E_C0>$z!K_7HJTb!^D#iMxR4eqZ?#4Wve^=lVJw>AK^?`+P| zRr*2HdbDXxuUWv2G_ws+1r>ir zrmAf6?d*2beMW0DHk!R)V<$OTy?v)~o6`hWP~+?{4(HSf&uP`@e?}z`%JiLx?m~^8 zn13?6+aOrZtRCJL@^UnHAYOgG0(uz)nTxK^+oHmL?E<(3_RdEKF(STr#b!qv zgQrD+FE{~yOTK-$zmoUwj!0m$Z-#;NgrN;L(oO}qK2p|{ zcameWWAg4yOtQNTLi2C7QK_}hkyU8B#9rt9?Dnon7PB}?$>8d8+m<47w}KDO-XF0D zUCI4u8NNgO4{N?B8Mkpl+XT82R)=rzm1QL;mg?UtS*io zZf;|&VYn`=r>U%SdXQU-9qfNfH!db)KEFt^ZoK92OE?LUpaYl!Ugk6S^x}t2RwZSn z{Q8wf$6^%6L&ggm0{m98FKAC^#HQlst1pC@Ny}~m1LKcp4KhOiIp(b9LX5$2y zRqrkjO`S)d$`wE^xO*s&Ij>qf;afz1V;BJbo_Tk zoEens<<@Cj5q#hF_se{)-{^q-ZX8gC3=7$T2~lWO{YIFrm7Qy?#QVjL-yZUUPo|t} zVc8hn+#xQBh#JCnw&~)zG^|PP&Tb6^ZinXa;s1;(nyowFOPpbIhj1a4|L2<1Y`a_@ z&B_%}fW=@dp)8)u*@2(D@Oj-h8}Cu$49(s+{nHn_Ylc0QY?b|!XhHa z7+1>7N27i@BJy6&)l;>osU!H5x?!NyAGxlnSC=?>ctuj+G2H{zpTY3ySkk*0g~^X} zcsyvH2*tiO(?_A*E5~)>6iRCaq*nL4hWypBk(s(XdK25raxFL8qk2;*$kAjkf`Eyz znG_-`+X74T(ATL4)wW+p*7kSG*i+BH`mds~`>%}1OuCkxu531%+lW!X*Dg7dbZpo+4xJ#?IyJ#LUV96jGCUWF_R|2jNft%$|g*xto! zw6fmgo!Wt5S?k?m1-G^Wyb*<8|6RV}PEKq?%m8$ms2Z)F{`HWepLKX{Izp!(lqHB= zh+{)cfsq_{AOMi zMA5&bDOzB2uQkcPBOcM|g`2r`rN~QZalfc>-oTfmRm%K9JOzL$!mGkZh_K>1M?3O* z(Kigzv#Dg~*5Px1JehA#8bhb}>08CHhB^O@%@G+7!Joy`?s0)aUr#F-{~Dn`-^~!c z+`0WTFFc|9`jg-e4w>K-8|kVaOZ%^p8mEotc zRUt*hnkGuJJF}XtV&_|*@#UUfc;*S!AXIqle|Jnzjn~6c$;kc6=|*=;8UdjDN4G`N z|IaNW|GRs|{?x*#Ejr>x@?DXrTGjBdLMiz#+px47Z7&VR9cV*EcXbgSI&*|IPd%hg zTR_>qXIlB=uXPb_!LNx6uwITEypIr`612)vr*eIMJeI|jo>Lx)~prV?#(r( z;A}%0+3tE-s%+f^@(vW6=(w9vq<UMg}Xi5*3Kj-Sd*w^ z3|@Ulq={;>+dIjRYq9dvoTT6l=OjhL8RJSiK5lQ zIp2P_;$*t7;$Whg#zzBriV6967JSLT4~f(3GRaPL(lTh3_(Ug1T-=ve^>*=wAkR;3 zgiqsqM`OKayy=mssPd*}pAWA;3df%N+mWEZ$#M(C zJPgIz<%=TMjlkxZuQ}gRx0MfDlWXrvyO1kb(H17Uios=o@E}rGCINhfV zagiEpgr@bo<4J*IA>fEXrS<28Tg#h6&Mv`^hy={%gq@$e?L4cy{2owODZe@DuZim8 z+ZjoM1+cLcl8J?I4fE#R5wKNz-I#-MnTDELd{9|M1vOKbi0EC`9!@h37h{QYTTKW@ z@4!UrR0eVO`P6e6e?@6ee0|CbL8^7@x%qAduLW#uQ1{9X6%@C{H0NA$TDf7SOhCe% z8ztne-rL)s$vLoo`zPc0aCVrBzhjNR+&xzzV-<@udND&oewWhKzkN#L4GOcJ^e9&3D1w z?P8FgAEMnu;%u;Zo9|Z(;i%&_{yEd*THqlibP|5`KR(KN&yO;j%pGONqST2b8&1>2 z*cwaxg|;35Ll1?QcMqj*k8dbvOl$Tj*>K9m42581@^!84oQ>sG%S)Ao^|X0MG?v#X z`21yfd?>rV&Om$|i{a*z)YC;662nHjTWEqVTu`igm?%3T8gh8BAa(hCIR`Oh<-L7u z)Hu?Yv8|(SkXp+q&njzajdqd4=Rzm?7d7enN5IV@GNaM#npAPPDA|uyRhlSGoA)sH9+Ttpma9?SpZxSLSd>fXCrv=#ONc)hdNo7qHrQM;aRHvABGE)KSSOT2(5i zcsMoRU~sIrUE`HjmUf%>L6cDfQ79!q33&7r%SaKVN>XBotG0gJFp{jbq$Cv7_dEXd zG);RbALWosbW<6Z&t;vP-4x6Qp1+rmFDyNGijLqLB*#8K7kmkwxAD&Bc=~`OGK)_Q zO{+=%Wu+wjjAP?*TL#GL+%zglgtcI)6*8;Jo>f4NPP?Ij%ngY9&P1+UkhD2NWWSdU zuhah?f`DOvT~)qPv54gHE%X?6sEo_C&fL| zt*bAd)ubz9(UZRSN-kAu!X2E`=6s1H+R>U zYc@tch2F-#sXV>aa>8nmJ>xeQVIzgy!W{+y{lIpz$?8&nED|5Ab5b|CH(OF)?Z>C^ zTq1!FrOM|hC5BjWBDX^<;+ghQY}(aq8bzbiyMGk?((KbXRm>*Oe|1^@JuaQ<2vlNJ z47^GaY2Hz%*;r>t>dBh{a)qBHcq<{=qDBoEAa7Rr<6YOA2M(Xv%zSTFn0~W?i1z~9 z1$HH5A<#KmP$?P{Q&Us>qL+u3XbVbtEGsNGp+s83P_sLs%r|RsvTdP&ED5FMco1l4 zLOGeNBQH>Is{HwTYrl8GUsT(q(#Si5shRy>qCU zc)KZLN~`uusQy=1R19YeLTVa=gOX={DWdS33!l;6Pz_sB=n-4+D-ar9w?|9R`t&pZ z<>&fV-{zA%Wo_+DAD`}R+RL#%ll6@bkEZogE}ylws5pmKzCS{sH#Jl`HfBww|2_EG zMElN16OW`BPuYNzVQ2fJzm!zM=WWsgXe_5;zp3lb^}b&j=D zUA_OFq_>h!;#{ZSpBYa>NV4kk2I3~8t`35#6%HC224y9ErvscQugTX+nxVVpwFN#m zNL3USEjt9zZZ%lNyl#Yu{_@!~r?L#lQ@Z@-_rlHT@8ferAT2z4cQtW+{i%|2XfYz$ zKgB?7t|-T}cG!FwhmtopJsrsckZ2o@xdx<{h%Ca0kR3Ekw?Iu@4GJ+YVEOnG<9@V{ zkY4s1Jr2b{@MOe6N2APK3Cz8~dI}_N0O2@OvhC`!m{VrETD?Q@njDlXWuR{BFyyt} z+|r8e5tLt3Q=?>euLJAs85(M%gA0^knW_Zju;D5)a|*Zs68*NZq#^sM*YZi%@Ay5w zho2g0@)08q_lobDv$2wRPz|d?p)`SF>`s&%=V#ApH>wYYSymzqIddHJkWL9>+@{ob z`C{&8WKPj-O+2W)VB(*h+Z)CrI=jn&mf$w{7f%Ycyfsd|nMR>mB}vx9*@>1?b@$^J zXpQKe{!b(;%-iwVxN^tFt?ucM1V3x_$wo!rTmtjh`{z~PYg^fA=BWJoyJom>R zTd>A%vbq=(@uGA^Dew$#lur$f~s^StW{hw=;R2a!0?*1PA^*wx(pakof_)U&b|~u%fF_&Fmr2-& z%NF)7V39tq6oAvm8F$UN)#&BEGtn^o7;q%?^dm@!cdN8Vf2^4{!*UcpxIA$DgDHsnMxHP*wi;LfIrNx*KieKqd}$DcQ8k7JO%M9?0hWTF{=RKVCs0^5O!d;?gz(uq?D z&2?3{MF)*#?5MGA>H(bWd8or910hD>op{ z-+DSxkum+x;n`mRT9gSqgP*LD5RWmyt9>g4-EZ*#-u1U2cWpgnU@h1QutoK)!yJvW z)KN0HG#M~}Gvf{&(o~nH1AQ_nn_by`#0`!imiHk?49}bVM27EWe?9Vdd*Eku33#g3 zya3?zPq`N%LzV%Zx!LWf$ORNCg|2<3esI}woLtf-Zk$X#yNkHFkT>a@bM02J*aokG==h}xwprfU?>j6P z&v++D_vdO6tvYqMJU!TUd%qY#r*;G3Za!V;ooIOTs=#`>CG%is#74%}*7oZDn!|{d zSlP)x>vh-QS-lA z|8VB9i^MOv9U+vN;9GT)F&(SQi~Mn2jQIKqNbTW(f)#iAtag>76|vJ)5Z}(u(ja!0 zat>jy9`aufV@?JgM|7vBs&H)QTf_G&J24bqdyR7dmvVd&|UYa*n&8 z+?W!DNWyU9Kh5N-ZcX)jfkHYH>C$r3WQv-`Y!$X;`sm+nJySjHoRZUb*QEVDVA{-c8js9-HDsE_)svI>Wnh=1TEVDs-%}1p-hXT#BU@7rE&*jESBGDZT(uHNdSz@w0emn8IElkDCyP3M{&nr4_$t4$tJ>8)jw{|!yP&@1H;^N{ufgS%g zmdBR}FEwrzi#RDJLfg2 zyIN>T!$omh*7v|#Q(*!`x^hD z{jHAC{(d|&Gcyo;FLeYzhY6M1eI;RGRt}EF^4T~d3&*cS6H_5F=BgMMCZU3mL2%F= zQNx-%yAR(KaS0Wt(v$ps0^FAL*17`Ri)TF|4pwUg#`fAB8nV@jo_>CLt^mY59igRW za_ry&opv&$e(tSEgjSb0(?!p7A!fXcQi%ws$Z~%U^V_pIiB}lJgt9)pTQ_@;927UR zeX*jIy~kk->mB~6M!)uAL%DwZXsX6=c@A5#GQKm>S~*&};)#IW0!PYMN*Q@e>$veD~?w7ckjYZ2m^TsoRw$oIitq zR7hvC>mY1^CbzQ#V3kR5iS_kmtl?xRp9&rTHOh-8D)=PYuyZw}@o!~(;4G+M{s}?A zQ>aoGKqw!TpIBBkO~00icn{0W#rGu(L* zlVoAcyfvKubb~tHi$CF#=3T4*Nn<6*G48tY_Gkrq+xL$r7?1DpuLlLxL%W;v(y;L*KRkme$vffVcM)?a9T%FAKAq+`WNmJD`mZ&oY>*nH_`WUx7H(`mxI zNI82e=k+CGJ;Y-hSxbg%4wI9-weP{ZXDZF*`n+l~J7uk-RukK4{8}R2N1TJGvDu6b z_tWY8@vH9zjwL^V>izcJ?~*e9&27`7fE-{-BG4Tr_KZ)w-Du)YLC8ebe-u#=1yA2#Q*I5kOi4$8yBQg}y z8U_vG`MULCF(!z1DBEIvy^_OYnoi3?QTR40|3lJq>389mFcUGAra#&yoN*iWNPZOL zKvJ6=w>NQqvO}Gf!(WGK;}$K9xK}HIFQ#v)KkxBYgSRo|!6lNGqIUFF`OnM^qcUaz zGMY9Hhu?gq{VqwpG7`Lq#>CHCW9P6YoLMoiS+-gfZCAh9Y_<7BflvvVc30CiZG&XV zM7kc5h}9iQs#NQa?WPBi!% zap4DwJX=kJc6r`iN$CW;$5R<91F}AG@xF1!$Gy#Kl^yEtvdEnJy1JBNGtt(;Tj6-$ zH#5JhcS~lB_Cs-=2HEh9_=@c~Lzp0WxM;fB`-4&M-%P$Ts&?&Sx!HHhEZ{_Y1#Xdw zZlS|LJTm{f#?$Yoqfej~E+Rn<(B~n?U!O0CRX~rYA>lobhA-v5Jw4qVx>f!6@Cww| zIp*CNve@;|J0fNSved1{XS|<3j=JXIdi<{Ieqshg338E(d$co+CNf-^j)f|7vJwZF zj2AAG>~9T8t#Js{aM0}Os?VXL5f8-;)ljZD8xMO9+pa3(=1k;%;K?}_f#ImT#YXmn zO>C4q=I-CF6A5q5leZQ=Ia;CJGH!{G_d%?mzwSZDgj3jb4#EeRzE-NO^{3Z4P?y-- z$6Cc>N;NJsAS+zID5F8Rkt&s|kprLb%cXhk%A|fHnKthrhi&@eEtUWrr?%?`Y4N=I zxA}K^4zk)k;7#M8PH}AYI8JrUUCfFf&3OCv1BabFYlPN5Y)^Gf)EN6wNya2j+9yx{ z`#L?XL{J#pv?;9F>EVne({!-tI2rZun9b&2dqrUnY?_^2T)Y z*s9o_)ycgM&KiCi0#24fwKk)bkCa>_-1>DR+9K6=qTs522(m_aZ3o5p``AE)_MfBf zvk|gOjOKot@bTjJ(9qB*hIt1Ih8|M8HSWvY1xIa?N{7|lOT-e?TYm7bw8}Z^xCmxZ z2!g#IVEf&#g>KO)L*^|X{>@JudqL+~QbPi|H+PX2H@d=IXHfiLukd@T%4%a=H5)T| z<*On%K^0K87CKHE`F_l~8xYT@I80niWl7b_E#!U2mkO7d^RQM)M!5C0b=8)dBq3<> z)unG9^~r4Upd89;3?sLC?>_H8T}wD+588+pYU!C%XZ0<)Ov`vJPOFeuel9+7D0O(iBchs;y#%rK;uO$tTLK^;8kB z8;Dz;ms};L3X}H+2LTI*yEjrv-34teo_#EyrFc#QMz_b+>Nw3ws|qnx#Qd58NyKTYIP z63Wt})y-`V^pAGQWsGu21l?K1W>>q z06~d1gj)@4C{1jLSHbi|8J9m)-UmwIu4jRXqnqvII-L7uttN-aM_rKa!yEOH)IN~1n}I~wpj?K!YGKmSf_g5Y=869uduvaLfRC*yP!XN zH7`}3ddz0KaCb>&HNd6cA}?~4h%*#|PA|}t#-empcjOH1% zv)ezx_IgE9$oT@t^K(B}DH%R=eVRb-Dln?$9O|OJoQs?~*|O7EpfhGKsCQ4a-K2`eKjV>05fV#;)VX6$;4Y!&wp+C6^uIK1y)4|n zXCT+xTc}}^fn(va?c+Gg5`LNJ44S9a$8t>{0bTVPL|}nyQQDDL3`vJpU91 zni)yhr4|V8zwm!HnlGx2$o<`uzDdrk$ev;ULzEKTq7iP@wZ)Xre;_WRBdWtMlmu zt0NqlV*)hB!-v}jWR8}+FlMY@baR<$U)>?F*(kNitVt>{>1~j<~Xa~Y_DEqQ4#9Wq`{cCB60Ew4PNa2&o#n3a+_!cOZ8#k`^dPQ5ZT%>es?||G-wYMe^2Afe5bY0kS6R@!{mU?VWIxC zaN+jUX89Rj*FpXrnd?D!#i67toLn7gkz{prc()f+Z#3?Q<^|=Bv{HqQ~VL?u2 zbwq(PZhr(Xl28BX=puwaKRQePWYS3~0Vq1P5Xrr>n{IfN)J-^wi_E}J8kUoil8 zB-12Y1{FIfjNm7>z$9T(3K! zGPKi>0#_4lU&3~JHJ0B-W$^rWM$u_ha|3Q>$B?OYAq$CLVE5;)!g4Y-m&$n6{9l)&$guikH?7LIQoiN)AXS%;sLWT+w!M^d^^??2h1>Ynerh&O z8e>%7hc9qBx^JV-6L2cx(Dw$-ZqffPFaH@Fe3d9zl10mac)+%~H#|J72P79H=U1(? zH-owg$r=zNxe>fL)MN|ka)YiL(+dgqvji&mC2l# z!^631HAeq*tKpW9Z+M|5OkMHCtHNCJdQc%wSf~h>WLH_=@!e>LM(^4(n&Xw}wEEt6 z(x8j2*NU=mDz@acbHjMH()+GU5oZd)UEEFQZK_MW6XyCu@5!GFKmF$UVotch6Lymb zk#LM7Axohy5~f}b({u+CdU|?f^zn;;Yt3#eThNYKvVh>6EUX?23cEkNV%O4C#tGt`=9`(z%SRAo(!$f7G5=4?@95vy z@JVi>Z>niCxyc{-B5&O*Tc)3%mO<|Czb~z>6XU2y%8!UT<;hWdVfJ`n(#jrm4fncw z?=fCM^E}F$qVBgCS6~V;c3-Hx>AdR~Zpgr63a9(=Bc^69-*LDo0!QY#94=jG+S^Ch zosydlE;UsjWPpC(>Nu_o&-(t&D`=f)Sf&F6PM>oneR{E521veq(3dOH_1Me~Z@@!_ zO{i0m=6K|*t}PA;)!$;^tHJen+gY{0;zcFg26)617V)f9+bH#^;w>xf1n{lB{s4q0OK6fN^$$wU3Tudw z*Vwn-EeV0=z0EcV3rWgbUdij!9~CWgXG$B(8YuIsdHC19V7~^6TGE)FhQm$&9Zk}1 z6N1_~ny?V8c)8z^yKdMHHfQL^T~3ZWm-CE>%q#6z{QN^e;XC zO!4$upyoHymyO7?g-=&zuEg69y<<-KHrw|XK9T+M7(-+s6(O=EcI$xqvxF=&Dw zL&H2zM?+*3-|Nd|RMeVI`&@))-Z|++J0qqcMtb7bs8*X47{RBF!!S@BJU600apfmkRjfOC)oY9AvnS^o4h;xY6{4pa#sl zzZ;^Yr0t4tmxLE5Hc`l_?S*ODBH75b_=EYQpzbCPinC~Q z9Yge$kkT2d%zW&@>pRuNNe@`pMfbj34DtNm)fD|Wklmo-tCze4vr%_4;t}<9=6mE_ z4aoNNT8?BaXo<;$$7H`p(ja6-&;5hg5dXL1+RU|(^*8yz(4Hg;jq6kc5~7vup*I9| zMiyeES#fmgSu#QUOXD~>eR(k)Ik)ZL)u{VyOoQujkCI3jQ%=lW3VM70IqTW*;a|ui`ws9Sq!Naxs;*<^mfRNV#qI zGASpI>N|r*95Wn0LI^W5YE5xNC`WHr%Hk1%HIcZ0zk-pl0^T-{d4~&Mb939=Tm&A+ zb`sNLM-i$c_cEyxuD@1kKO#9C*;j! zhG9f%$J3)A77A4DE%?jykeT&Pf#9AXtiU=XKKu8PPgY^TLN$6@@O`MWU9gd{u@5MV zag6=);Cgz{)aZC2(e@fD;(yGBFum$-Z)Z7NldWch$~I(!dVp{&(Ea>UwbWEC#A|II$4QBf=@0{jdR4#@jr0-A>zS2UdZWwyxD*UVJO;# z#By7v7eY=Xw0~Bi-=o+-^g`(BTxzsA;rtC`KCgIGASre^z41Z5Yc7Qs{MOZg8Efj} zx$@S@Rd;I--h3;uHrLh8L|&25k36NYqL9>-LwmUJ|xm znGP=~SOl@8v8L0&kJ8x+GKf7~SJpY5mDkn`!RR+yZ`g`6*=;A&U>SK zQ!_9Ge|9r6_L2um$O2J^G?yhAR0%dxr88r{-;t>He{$h>LsCy=95)MPVS%VapK~vL zKIyqJ`a(zzk8N?&fR39ELi*0r$SOg)C8g$i#W$aiZ@6jxZ@32!h&b5X#Z}?7h`R7h zb$RY^ zq58+b2mXTc)8M*th{kF@ENh~GzLH3V74%_~Lnn0Hk2-&lbs8kP&m>>6s`qQ&I@JOr zOmMySLWsm^S)KpChqzAnbfRL4L-)cwVWh|QG{xIvO4RfhDLM_pnbz?h$KxRRPbw=% zpEM@i=y2Tjr>Pd$O1Jxw8})O+G9MwpG9OUmj5yyVxd(;b1x-X#gjF=;??UXluTR^) zm|Qu_VFMKg7Vt8ai+AAyb#wOXdU;P$`I0QmP0y~M>5=qnpP|>3oNB#1b?u=y`^pGW zgr=34=}=!k^q4)ZmP;v1@%+=EP`(^*yQA2CdH+5H%iX7f=m|9~7qB@Mn-~A?4#6}w z_Yp7y#MY!k#mF*PPG4x)A6fyK7OVz(^D!A~$Gy)G36xZ`F;rVC2|NK| z-V3uj7LMtbyo{>JjK-~1Pgi<>H-F!EDi)iMOA`KgSmN)RkTvk%gNt^a;`IG}HU&#j z<>XJt69;qr5%j_#WiM!xW!gZA0nPFk6z2HG9NG&FTQKQt)4M^V@HDQ@t75S)wPOVq zjV`gXY3oYU#*4D~m9!Xrxe!jQ#_xD&|g7$8J zKs8UzrmxGvD8<%%mu&CKli$BAngKBz!78tbQK*l<;f0GN3wZ`Vqu%pOdg$00EE(mo zCTpYV9_~dDkKXm~HpPP4AxQ?K<+{eANY{NtUwImasnm6jXi_m7-jy`3|{q0m#- z+wk#wl=Th$im_901=P#_-+6Q1ng(Zp-zY~> zW{sZAIEO@ZW)J<@p3wm@+@@_xhT9v-0;o|?S;c=9iNM`eMg%Wke5US7-;knIeo~3l zO(Idl$+CR1HeF{x79%>zmSs9R*X`}^tCTq`tOd8T?yE4`6aRh+^&ON2r8zHv+*{*1 zzU4^%x^|d#{!U)vY#R?0DYrrZF$((0?z10?hPLD0Bnd?60HVlaeyU5(T6o z&(3U{`c_hV6)W=_2Hi`e-&B$mk7$}OLG2FUcP58q)%8{6+u*rr!sP7Rtm}&Jsvv*m z@|^3K&P@&p*liu!4;DK19C!{JM{%Y7;lPQGg${}B;=hOeT*)Pj^>2%DwPc$|I=6@Q z&;9*wKV2`(=B~3midxS}^GBF9==ixk9D6tV)XuR!CU)L?xZc@$YyCLzxQ;|as|IKe&-f+LjzG6t<)KEcTb?o*Bst5(7+Jf zt}Z&Rm_P$MuVKP}qc?@*@9p>{T;(U|>OrTlzG^;Y!>kdnp(5FBRY) z^Q!?<*mzEnV@4~9ovU(UDo(DbVk`r`kovcG1YS&zw#%N5NKvkFsdb*yap#Z+eq!Zu ziZY9GIojMB0!?X-EBn}4fAbty49~Nw@t+>KO#xg`L6^FL%gOPWYQio3bzi{W{&+lL zSuMd}3TrdQHw*GoFYE0}8$4Y(*pa>Eq`u5U4Z!^%Je>4Xj%%X42&EDmve-;9Q_`q$FYBv6avl{ZP@}JFWF> zjJB*7R2xBlg=u_q*N*~tTP}wtT8{&WF%oan zCt#mb88&AYR!Jw14PH}x38gtm^>C#Obo`pnGKDluLi5_6dHAS@MwZMU`nQaz*1 zg&?@$PIcF+id+<$YCTB!iPSu+P76xcZf&lgE%6%TG3AvcDC+|GGf!h{%eSfnPT7Ai zerk>W%ioyB_*5JUcrR?;z1ug*;a>x>!M-r@4ouwe$u9g|Tt z2dSm1ev~qNtMj7T{Ix%deY{yutiRCn=>FK#TOk&fciMHX+J=fpa2{swu)n=q!QTDG z=m2%J-ts|%y{Hx2nW;mr-}eb(yOqs264n>;li7cp>quVH@SXC>og`Sg-eDKFoqt=s zA7!<2t;S#dn`d&G+jMRJkpFn^<|VCZV_#!UIQ4pCL(a)nJB}CrA|Y%SrtiuE8Kf&| z_*vr4A76=;r+ac)vWaAjfdG&G5me9l3bTlp;#(^>UW&Qh3`K5mGrp(pd)QemmY&Bp zA}ASv8<-1~01D8Y+4WeC_jGAk^S`A(Wj=a%x##$mXl{V8`50GRp%54e>O~fi^xi~% zdTbX}#M9$Zhd=Hq&XNmigJvd#AzP5|Rq3~H1*IB@R$y1?ox7L*Y^iBvEj*h<4lTA59`m5D20XSp$BwQ-cVD$DQQId zQez|uB-{eXj!@d9x__0(q4Ja2i$1JSnTzF&ZhV?K?`+y*yZK;xdEN-w!^3~St2@zg z!X3G%{;_j3)8AL=G0@qIdx@LW(&{jz98%jE3xm7a5sTpJp&&ox6-Q> zHS8{_fIrL@8Ui@Sy9Ybe(?Vh6%J$R0CxEaXK;DV}siqf)*brJkYVnReG)VPXKxT@P zb3Cc*?iHTAb1Zm9>m`GFn?tsbIsCrZ2nd*{*##6H{1bO(K+cgO5Qx@u7%mmw!PV^u zTC7+0HxYJs2$=P{UGe~s@E2nbqU!n3By7T@)-Exs+_#s9>=ad5%PrUNL{IgmF1wTj zbg=|EQn~S2pKA{V;7tOH{$7pW{)klLO$QW^YP{lxfx{@4{U|D&7p+hsoIFsO>3P;* zl%Zg>ZewHLA8U$f#auo5=>OHushVd*!=0B*!2d`EqX3T*@rib0n8ivOf@4j>h!ghj zdV5eTHc|@?PqoebG|jd~#~a_6JMMLO$*rB#%A|+zRP#ZfYW!9nXExtfZ_P$mAeZPC zhx|{(yfc~u;)63l7^hyg5J78MA@JWCK62&dgk0s=} zPs{(N&3XS5oK=PYJvAij5a&>>pO5cy!gzR?NG0FcT4`C+3nPo;`T6-gW^niMES6%V zo2zO!!!}kMvtIHvKgEYp>g zo=?c0YBeD7B518Bam{yp_NQ|Jen>fJqU%qxzJVq`qlaFhN8Wd?pb8(V?KiLQ`t31u zv*qrpB?jVUq`N>hCl$;q>;C+oeQqp8Rn z-FnEJ$UvX)l43D(?_u9IF5V0CDkMz(LOI5i_rUbojbkLZMrdW^cEDAa^!76uGlidVsoqf(W?k68ifxoC)HJX73U) zRuZ)cnAdHJ8mz?nMC24>?;76i$y4l?<T&^d=~B>;0&qTOsIRpTr;OegoMwt z`0C}hZ~k6zi650D+-o$gz31n<8N1#*7U6p|^5V11ke*!e>tl>k40MRTc)n9CJDYzj z0DtI=?U&b;y$Fhj{Vb@hpkPt{F6C&TgC(dKK(YWd#)0*MFwvd6TJ4+MD60k1rV1=Y z-L(efJ^6sP*Y1Per~aTIQlef|SBk@Z|MRmTf4L)xZ0*+&8K(@fkskYyYujoY0w10b z0(MERkXMEQhXD`w9q3o2;&5y_J&G}Up&;Mh7+bGp9w&{h-}v5@L6omsmPaZhnKxS9 z;ZToA3{_?d*LpXQiDVy6t|=b9*C5bjKtDdRI%X|oW5RIL_=2VzRF@B;ZhUqi1);>u z5Uqbva6gbYx1D>*YMM+K2&f7!7D7#&K>9b&X5G#q-6tspgF0g4Uc>Sr&Z_+FL2-y) z59j;c_8i`{j0$!TpFBs<>myOXgNw3jXN$Y@c@ub72H)@tHqnWnmM%R9ADHEqRz}zF zT*J?3PuG^YUNbet)*j%@VQop@V&4sA6ubfRqZ&*Q3KBW6s`mEwBLTZ)ploW>=XXM< zHE(qNnqqXo%E8XjF^t6J3ALcTch39{jD;|d0@~`_=t1_hlNuR%iI8683g*HK@Cw^t zNDiZ1&<1L0RdscB)t;ckt9>MY-E*DslJ*oY0HWL`4%d-~i9yoUCn1S$a1Mf@>rW`W zzwNWVo==Mr<-EoWJjQtiz<5VP-unFNu}}NnDCB->(3OlH;<>Tac!>Cc>Pl$(Ev;;o zRaCvzqN9D-0fM=?WQz#i?J5xIfk%CZINfFGI)&}~*&uc-?VBXW=tEVa7B zCrXU=yZM=8L8{l3gdWqEjCnVcLqdY{M*gTV1B2jtJ*~o5=6Z{j7>=61pL2n-E&O=C zqoeEW=(@pEn~cxESt@PW_1}d1RR@SDyYfp>r<nTkDM_ z6Vpj4;YwBRMgzxXnL-4b|jul-*>1YYD7<7eRt@e|GmYW)8u1$=@y!`@r5a zu%G*Pd#5pRl;;-M6Nr6fwUV{Or_PPw)eLANK)hu0_U1AVH}`ORFy1?u-s1qRn(CAn zCj^*!Rm1e#r%&0WrPkm%I!Z@pG)lJH#uehjYB)Ael;V#teyP%jsxlcZ&4XW1Hik9Q z*5t^0P0tTY4cM;P6n1f5?W_ht~?>KEePjqQcd*rc=2HrEo*HA{EJ>$2nmjB@eg6ibjIx zPSzDdsX05al@EjUG!#FmfE*XZ$ymgk2K!d=dB(l)0bDWk36oUHRklY>@8OFL^yY8N zBdR%az^6Ny-kHh8yl=~@l~7*e}AXPB)Pf1)T2 zh-x9jpMhx9KgtHQTo1VD0xxW|rZ*AB{l@F~_aZ>+qxj!Mmyt{Ro&hNbwIn z5)_A<)3KOgsHNkioh?T196+%U^fZY1!(0)&%Q(`0JI*+{hmXCb8L!ZJtsD*wpBqB6 zZLBLUXUP&{#ABLPKr*bO2=6S$pwBDSgm6IGmS|^%n&J}gvtfJ)uGR0z&kdd-ZIkGC z3?^4aQSXiPd>2S)KywzNW&H2ETt~6A!aEM8j9Du5ua1)3&yCycLXjSkO>drt(1NMi z*;iSYtOXDJkVP=(KaapM_hqXAcpe~R)z6nSJRSkL1hU*Y4G$_GKH8$_G|%_C$o?lPTGgU+cRDEj-J5xCZu7iSCp4zy*p=q&)l6<7F&nXmO z8qq|Y5{HgQhI&fLZWMEygDP~YC^3O(O|sc9(g*)nU<3Q$Hz~&<2ISE;^WU5nI0rY643Y&Vz zN;ax>Sg+?s$^_L`*d!R}kkvP0+`>!sa`J==tQV*Y380T?&Y0z53s_C>Ey^#c@kSJC zK%x=9ffCEs((8^;eD7JILJ;MP8I6%|f<`x&dJWbaa$m9GO1? z8J)x@14Y*Ay~{QxO#B(f#kaIZEy1DXssCBOnlM}!dO+HN%cV+T(OzU=(=GGEFVq}L zHK)dQOa-k!FWmOXhV2Vw7rw8Xw<);I@_5Q-XHAj4v-&DRoRu!#j?^rGPIp~Cvl%Fe z;Au$!Lc5>Mx-O@i9-dudNSfW_ZNApIE>YieNUP|^SkHwQNKMXD;7*|niMIUke}5fo z>QJKhf&|XU+`|C5{qTpjzH^{@9N(T)p`)W>773Sz#hm!XsP^n>?E=TM=(RyxjJonx zXZcE(W-jNrBWdk^!Xz|*Z=pdBZ}In?_5T>8XJq{W?6iL5eJv(ZRvkJ_CfjRY8nBY> z#>Zd-8l6Xg2iDpDq8Q^l8?^)7y4e)0>y5b&_PL;8)2xNpov$zDToIAz+W=5inIHyY z?nR&W_IFvbTtB|1-!efaUrzkqhtU52qxFJYQG~K^zz`rTvT&GJ-80yCm|D_q4!Qd% zhO+8eMNtjXZ6&ucui2*XuB#}HxS*a_?tsHXp*dF=C^JQ;7mb84To3V za0YOcgxQR_L!4i@)80eio;XwMQ=>$8cLZgwZ9-ZR>N5AW9?xB{MZd!l!fIY9*&hIF zkAN-zmI!O!@EB{XnU}in+KbHQub=6Iut0Z^h`i%fK&H>lN2Yb}f$@WN1*`Bn^>+G* zmSC-V_g~2Oer}hLO3y`m%VGMJZb!cJDkd>aFy4vY*{j8L@_4&v^Q(Y>7@H#Z?_K4K zrNcSN;0oOfJr$*^txMnxfo6oJ+7T?l(Q;hFqavQK-lkUQpKdZuE>XB*CX@T~Q#9NG zSeIDq7K2gVUb#Pa@gXfo&I$-K24WN`&zDW1jO3du?gI3v3tct1G7MPGCO=>Gpa)WZ z_Lqpn&SUDW4!5sD^8-tU#0!mwLj`m6j2tf}+mTh|uVW!n#Js21>bIcK(gd;Vzg172 z5`7{KhEms+PIcPvXze69kBgCeQC2B^0E5m{4r7gI6%|8Zo`clN{pBm+^h|BX7!bfQ zI3$XxEd;GN8@G{ZsWeb**kK(ADh?Z60b@?%VppT>&jz`Zg_hoT-(Pl-gLD2O$~iVb z7_k~UY3V+cwgvk);0RYM>{5Mxm)pPb6<4N3dj)5-94Uj3#X==GR6&n^st^%F|8*eLSxBV-*k0MrGSC&-b*52T?F>=B z+m{n%ioU_hA@DlA% z)9U)4Nz!mu!L>{0fnUCx;`i4n!ry`bB%s!3kXQ+~Rp1$0c_ZBfK3UWCarDX4WKkv7r38$emvsCEMe}uL)W^DgOk
R;jM&Q)b;MAFSmudGG*~~lq!k$fXn#!KFxM|{wSiv&dpMqoaFKJ$Df-{(l`-yT8!r8jW6u!qx8f?I(!k6v<*~g76&m^0N@W z=5u$rz!Mj+<3L&BGX1ou_2x(VK_IL)yJH#9dQ@p?Tc09Pua{X3rKgRA$I;AfymIJs zo}2keMkuyb`R_2Z`MVIdkbZJKu_kZUmjTtm!k@9R#u2$HZsO5RkbQuikynP30%x#X z$SE9Tvi^03@YAD**wYF?i3q_4o})MmW~51iIE!o^Bog0mWNo8)qnj{VoszFNl`ZbV zmnLV&mnLqP#Xz@@9AN!h1p+${7@u*zzyUrExSr2JQNG$PBXCWAsocM{d*5V4jChi?i)s6AJ-PU8^Q|NPRx%_=wru$aawrDH3j2FDr}C4v%SnVrH2mpb=C#>k z$vF$59%6?3Q0JU+=!GY{e$!e-H6=CUj^4r49#Wf&N7%lki92Mys&iIVy$oGduC@s~ zbDm7K8$z7~T71b68e2`Yp5*dhJ2z6|WZLQrP@xcam;Z0UJKtw%o2O>(nt3if*bkEi zuq_2O97Rq(_N_?Om_>7F0cR3MV}JAKh6ZOpwR!&ljX}^4mOrmA@)n>#?)d-#zt}BY zVPxK`sLLAsU~@yF5_oi=uA`%q&hwK~^6wSi>lBqaf8qW^YPM?<_II{Fm|&W}iC>#H zMQT!)i?@NN_L~(7YXM2(NZtP>NrZo~ynIKX$*W~X5^Q5E%AiuobW!Nt*72FQ8hSJ% zV^2)H5RGhO>DTo6!`8Ozx%&TYc0Qs7q^#8fZf*V@{O?4fcDD)&XEn=dzmyzh2JHi4hn= zA5`@#7~ved3xMh~C~I8Ddpp!G>h&9`u1GAla;HVxbZYnMY!})VhP2nre~lYFThs>B zFI1XMJx{cVsmn%<)!9*Zc#E^H#j;$UgI&D|Qx{+~Qtae|4c#B;`iiF=J4@@#mx%K} zaHYh#Ns;NYsHZbLJn&B=6E>^mLK1 zJhoXqN_^16SzsLD*y9F>AH%Eat*CN!K4M7Xw9VBH;$~NsD~1uPAgw|@B%!| zKl4;dN@t0@M)}an;2nT-bP1`#KFKfg(^f|wm-&3c%LlL&#DQk^elM~t%}K_AUu5w` zb=$ud(Z|2C&Km zhEuBKM?9ivWTB1U&mnWw&t>!(t;<_vdHeG~bZ_kMpV0F*LvkQ9f3N?5a0UYe2#z1T zx(_Ahzu;gWsZ81t25!eG?1A|^3vbCOt)!>kRSk$1NzD%h;TY|fD_>=oF{*M%C}I%NCzfav%G$k-#<1<|8N2i&#tkvJ!cH0loC6tzS7E#!93St)(v> z+ML8}dZbUSH(m!PvYyNsj6q#~S}yOImGyY(yL#)LpH=LBi5CZ$WqSH|3Q6{PF)=YR zvkT(6%}Wk z=+>|d&DiE9W=nR&-R1M2;7U*cgjsIi@P(Wi_SAX6Ika;79iJ+b$FxX}avjG-dP=mQ zB@&Wr2nvdb8J(*&F+|@y4*{c$AjYuJ=*h(D8I5$HGPxt-2agqJ z9^8bl)oNRSK#;2%vRMcTnfs(Jc`5Sf^n4^V1q7ly)biYnE7DiMkL?|qKOZnMyYk@) z)h&qvq{Ly_3$g=}-4zf_Rs0j?t3C?C8NY2TS0seOl;6pdcc0_hC30cYxyW{T5Zw4E zXD>}d2RW{_m#bnNMZ;coQ;)|GE79@nt$0P^Xn?3 zO9jnUbg;Uqk-d4ilYPEE@N7BWl4kZ?69Edt6QOfttP7aw+K9DO?kRrL$peVw$}dNz zvvK~=FEZ?B%(p1xQQ9F_VMQ(P-he$>y4oKMlsn@&yoWKQ8EP97Rj9tq!Ivp>=a%Nr zb>vKH#aw)qP#WPF+VcOz@kGXm5olGzj(TC3c?zEo6eS21;Z*tk&B@(bsH>aVoY|MK zknWaKIJ$B!&!Vhkd{ZGK`9d|n2R<@mZoypV%e(jsfuj~a^UpmyiR!&I2s#~@x{=>h zoSvV4Y}X%*m!FlO9Or%~;WF2;s0|X0or&a)Vk+&BnRjn(P;+Y&%}GUG>tlP$F6~`W z@+XTYnj$Z8H#>!Dc`g0^;o|% zaS&CrJ=+=yve^N2TdJupZVI?>$yb0`qF7AhgIHvGl4t*&2PBF}4`_r<49Vv!K>Qh` zF>n+N_ll)9w!V||rfGHl<&?rjyR-Y3x6w9dItuzp$v(b`-ct+vuOgxbmmeYu_s+jB zHJS{-d#Bw)fav~f7yIKc#drkKsLda?!ehZo-#9g+2b#SXkC#zm$?rX`PJ&>Nv2E2X z{!INTR!n<}#cmqBmemmf+A}U$>669CuJ`9r-vKyT!G8MvfT5i~Kz|joogtGVw}T-&V5{N2ocUaI%kT%))g1O(VoB=t}$k5XB}Fdo8UChD0LO?u;zeKe|ute`9_)H zoh&LisqjO9h2gXnfh$|y_#B@}H6!nA4?eZfPoXNowLY^^on2-TWhoW!f$gl%D!tX+ z_L11OOikB`t0~EHUV1|HMmi69@>w+FH8M^r7SxR6{`1dLisAvdkcw=g0p&aRa$Gjl z^8KUECn%WU{)24Z7^vloncH)1vym%J9cyD!JsqB&p8p&Jz`9Tzp<4Jb$Gn+9wz<$| zU6~S7RD28ZjVTCo?L!`6}7>WQpFeHSJ`P3@Sh0v_2+jEis=$4v+ zsDhR%bYXCqZg(f)z+_c!-`b-?tGwF$dPY@sJ-wI8qHPqZ@A)KvIM{b;GqVxw@i=qn z`azltv1UjQoCeQinSupdmx9imPBsXh3dhQ&PD{sP4S2Z*DQD(&hm>qwb$dN<`1PJP z{pz)7p7g8kzaeaIbLyLi2an8uTIP%W;_*jXCi2PQV4+1|hd~+V)62loVxWBDfYIbe zBOn?of-w{WCH6+`+kin}zX%qxce$lNB-{amVMZe}IvDh|abEENszU}4gw1#lqG(+u z$Ui$P#}|Eqsi&dMP-W6(ky;4_Rvv36&*5CgNy7}=y8Siau{RkmG2gd!2GTq_PWT=$96 zE9QGtY5)!o6-pD+nS;m}OpCUe1peX4J%L;H&d!lq+J%LzEqbQCWZC7J3vO=9hz&M; zZ|k&P^?IU>>)`4-hRk-a=j%E(#}{sHn!+#FoUe#(bD4b6FPmL>|?HFen*hINluv|nEs9p==|&s8F%*hb~z zMUCH{fGKQZiJ*|A9AZ}%MysUz@=c8tjR&uJpuauvI&=L&d+uH_DR+w~19C;a-g0sd z33vf_EkrTl!2<~629jXcbt*j02VcR9r1*uqA4k=*l-$CuZA1w|Fksa>9zD#!zhrST zQu4ZrpB&v~uriJ-TEB#O6t7NRTVHrG>|9xNM<`6Amt5SgYy=A3j-#MBpcr`S$e;a) zKW5A&mupm@ztjy4feKUKkOj7+MoV+FGtz`Q3nwccDFjBS4XXVnke0T3hBAz1p3J$% zu`b){((^SYs2AxGtaU3SR_s^g5G;p14sj{B2e_*A8h^=YC)RB=iT3>^UKoH$VcVc@ z$>O{Wj$X^9v}H)s5WBT7R?O>9%uRrbxN9nhB3W))nhRq! zRrvNQNE7gcNu&YiB3~++x+$Po963j;#YHw#*sN_$G5)2OlfM0CzDU+H>d*Ggx9s+5Rv}j1`sTzaoFv zumK160(eX&aV&c7NnsZ|=@qW_h;BaG*ZtGe7k>T^9ZV+K87RrWjt)?XXGOuVuErt^ zCmRxjgMgT>9}n?1(rYWwA^b6JrwXCG+WpsxaRA8H{}_C0t?j##ld#Sv1qocqYmyA0 zyvNfrNUaPDMuyA^C0VvVM{cSMN!h@OtIQUHc@aNmT!&DqxFg5s@x3x}9jR*?p@>Cj zBOx~F7=}t8N#z7LHpM8apW=2@oL4JEKQeJJFb7`AYC(Yc)EbT6|ABLZLx=_z57LOt z@>g+d;$#z)XE`&;Fh`m$=P)p{AdG#gSxZVVE~`li@~4A54ecaQ+Cj~g-A{%HqCV}& z2UIa!FtOS3b`k|UWJ#i=gjl>itK>IYQ*;|`N}=Db4KHs*wU^Cx_QK&)vVSIDE0>?mfTIbb&DLxMr+K z{vn9Z;k&!rJv>QUN5(-Qp{saZNQgp4*6W_*BGGbe-)79$i&XyGum{Wj0+?3KPt-19bZu50RL!$S@ytMO56*o(uB6* zG;!*>n!El#BNNUPC=B3M55If-@1Ty8CjmkV`V3C0a9VQv)1vFP_I1PV2dLECrB+(> zL!;OBQB%X0-bK&Uq}I+Lh1^%++W*t&l9D6Jr~Ybmg9ht%Z#g1#tiT=Lk;Saz!n${9 zke%dCw5E!(1wL!aBkvz~D2pg^UiD(-t3^+VorKY2U^w(I`_2Jf!0389FNgvw@hgzf z;)!f*l^$o$H7bnYP4PWj z>NLsnuLUa-c+M{cZ3ny5t_zoacBPBb0>OWlnKTD zK7WhtymtBsMiUhnw9(JTiARfg)IbiZjEJ0ckuSbyi!v}ZIPk9WeU+ak8hD#O4wl3B zRV1#0V2z?n0zyNa)4#mZRZF9nE8Mik+oQc50cvJJ_d*2Cpe!OQL+6(n$g;P;gOQ-BbV{dV-9>=)NU{<3zG>6ou1o7jQz*Q zl{V(BM12i|;f29sUW%*?5nC7(dK+waa*=f}$4lHcp68?j`j5WQvq=!NK*?Dz6BK>! zCEgplO?($yBh&^NZ$Z6p!G$mYp%v$qwKKW`HXx1QaP3ly59G28gK_d3-3=QOE1`~6E`;%#@B0Oxz(8HOl}$kZ-UV}9AGnc$z37hYYWlP{z}9mvu9%(Lnx8kR@!F>P|~<1-YARj-8SI&g0qJrgytq|i?o zbVs6B_Z?c5Xcg6w=qC>#dyVU`V_6fT=4%Au7@Og;@jJ-U9VpG0DYpWn4zYo2N6det zfvXt=nArB+?RIO3T=ppZ23lf6-)TWh*|}L5GHA;IqN&dBF&JsgG*rX#mX}EJN2Xw? z^(q(!pKbX&DBrl((&#zLuioUkd`nf86TA&h7e@i=EV6A+20%QEwIyxq0Yqz?reAg9 zYVuUel^gl zF;+X~Ths~ftmGIFGhqXKqLtBHhr~UQ`W=ORMZfB=grfsuPN_M%eWQmP-n{A>oB3(6 z3qQXIduMx51^g97qNtBje@>`BDb3b`$kIawHF)0!U>6VTd~Q*{qkUd(a^plRo+r5^;puNK=rqZ0`_u z{T;v<^{eGjoEuE?Y$A}y2u+Z2E8Ud=;CwM`5!;K}ghjMc;bArngX=14`DMw)o>#UB zR1pLuCl0p8w{0?aZ&mM1HIR}BqX%ku?M`MSr;sY`$q^gFDfv$#3f!)v#F$1%D{Xsb zed-8|kMWAwAKyIy7Y5vpV><2O_wzX`Wi0#mU6`_yU`VBqb$i$NO;?F1ZJ2_o(UpneN0@Jo5H zsI9#8)^qW5SenF@_4{znD4M=ObuK1H4iHAI>r1+s^edD4?ByOZ>1jMuH)N@F!9|&3 z-ReAL#sb!0ibKeS=O{6Dxg`c^rP2(~ckQP*)BrI|JR=*CEL*Yj7z(TUlQ-%h>52Ie z(;UTch`h(ouu_o_W@dfKOu1X3E~rYf(AMJ0ds=!a<;ag zK=SVPth$?CXm%v<5`{H-YurqNbO# z?i>LWrX}4>Kr+t>aG_7~a%tJw-d;rnPA$!r*ANQS^@Fh~OE3HN+-OCm&C+YW*nMYF zVs|b4m!3V-$k>23x$i4Pg}iHXnZrNxYmqd$zVWdT!e{lJy%dx@#}1$GM1|i`_MZ4Y zkdQg)#%vNQ^zIW-QICF>iKy&gl2Tt34U$oWD8^wltAKFv%dQU`Pt;Is|(IDrb% z%+SkT%#Agwqb+%VIV4+uzFVFa#hRa zkPsM2E>{a8+9nv8GE>>(HgG+->3T57y^eP5jsKu%PFa##_YzA0I2ke|kzPkv#vfd; zuQ2%X?qe`wRR+?L0)wUNWt-*PeiXern9cv<9p7N=am*5*4QXdxG?uK05?iwH>Y^(u zdp=28S27Tf1)~35jz@w}*e4z}epuy%Cd|;g-DhlW=p#@MdB4uy`ir0fk#_RQn~788 zmg5O}H|LGTm8hj?MVgAG7pVcPL%HtGtyCjAy%YKoaf^@nfkzaS*Dx!M} z9BuvDNwH?jwC7%o;gqKD4w8_9u>Ry17!}u$3V!S4G7*Du(O6BMdlnohnyU~znb+=P zGQKW{LA>xiQN8-}H)h+C5M?Osz(wJfckzV#%|18|jrWk%w1Ifqw(3GuzV2f-JBrL} z&TWS2@AGsy;6wJatlRtm#xO{n@sJ{DlGdEJsMx$WpQvNxlil(tGZX{mzCb_~dj6T~ z&O0yiR{}00PHz^iI$L>#GyBJx@tWn&8_Lp3BjB-~c$Gi+UXXM2U6qRn!P`F9I~eEI zHu)H`J}EO|WTK(srF~8K6xN^oM-L8X56AvAA6X9~M662RBj6o$y5-rkOwl1a8rkzz zc0w6F{|)nX_QRRNR?NB)N<)oA5a;5Rf7I_Sg8Pf|QMABTIG8eWeAgAbmozHkQL@k? zFAfQ}LZM5nqTVxSKvNlsKf;47$*&54r!GtMcTX(wyrR4evA!8VX#nzOOXIZjS%xROfm+SZo{VVuNzAyU5>Uxie7;h|%g^$6iLUo*M1$O*bYw zHO2jH#uC!WWs^UUc^!aWWw08-27Qg^3tYWG6^%_W%r8y8@`0EB^9a0ukbUx1M)VCB z>Z5r5+^Bc(zM8Gx!ie?S^_nvt^F8qxX-H238JRU?Zn=Kp&unBH-)Jr;HnUOE2yc+L zRt?&_haps51QMko61ksV29#VjnvUHHVCwtqSaoA{D<3G4Ck%9%9J2B>DQ9e6x7;`a zjak?Zmbb{uXc&~iSt~=4pY>N3`XT7HyTTblCy74o*DQ)Hk(TBh8Kex=%Xy%Q2-rC{ zvf1BX3+tFdYtyG%aYP)UP+^Lb z7);DCU_8=8c7u$pwX!-zgk50oa8n`;wnr2~(MI{9l*51yi<~=#1Kmhj&T1l54D|I^ zcU$G<b@zj=6()JetYa!DzKlC=f-@h^? z-}^Cj20XzRLA+F}H-0%4zG`@O9o}Dl8-{ckVrKCz8J>i}mPtZkoicv-ZTc-Flz@qj zA7_T+x5*ZBp9AG)Brw=A&V$v68e}gCHXI13iRZfW4hVwD5gRugB1(cdp*7}#1iccEF`;9G7(Pfa*u~PL4lUCGoxvnB{~~Cm8cD`SMx7KK0!a|uwUr(?_$js( zV3r(wJ9va13G893a6tLY1`2t-HZ8|x9UXG(FGLoM zKaT@fCXOC}65a7J7O;0HuO>gyOg6Z|q+$QdDM~f7o8IG2Fgxv2ySzo7Wt~#8_k4Cv z5e$4qcFqP|ENy<6wz-?B`_=tBC+;P4?=1GOV>~XF;uDhe&}j$o}#SGBzz4CTRuVwIncInGquIGpWlkL z9A%Rq8e%hIupVPXD;*2ijjCE(M!(=D3^`xouVihz_n7*{yZN^F=51yQqGSFup?tAH zoLxw*3$e{|Mf^(cJ740IJ@k-3Sf9;XFn(z91p`;rRzbN>lG`=Zk-7w~VKXEOjFZ%6 zqpEsyUnU=Tdq*f4h7-SckFN$itPpsMJtuPSpC|_CLmz^(ua7VSwG7SHrs=h7G#l&3 zh=?VmziwwWr)arN-Wd`BST59g-Bj_N3Qb5mk?VBM6is-_HAeEZ4!!Y}1S!GT6o3DA z-~}$~e&1;T?<+)ddjwH4dd!D~EXQ(jLJ-{-zSxBrpt@1v`@xn+$qOD7rl-uW>FIlh zLAY5@tA+?93Sb|^+7(;9Use?4&S}e7RSeS&6#@&|&qT(F-}Oaoo>2=}WEuWD8pUP8 zU267~R)SRic!jWODf`Ou`Z~LQW<=zS6v#F#ePLJ%GKB^zRq!y@QkaJUa2!$`6?+AK zVNpb$rtQJrGF5w@j1*yz7%s~njt@{OzwV=gvv%0pQH*S|+@2`iqms8qHdjNE{8eLY zlCG=l^ZNyvK2*KAV!cI8*-Nc%eoa0L(Oy+M4T{`w3X0M!%F`Zl} zZs%NbBaBptb9zqP|8$s@sGXt&kDIA8;$C=PQqmia6a@zB>v-@JAzQKA*CcuA7!mVp z;}F``(~; z+(08KQnPa~p|EaePob>MrRt4G>GKpa3gvM&VCXe6xMz|wxCg(Ol?*9xNT8U9DT4bK zcwiVzQl^3yedH#QuwBIB!G6Yi34vjo3o)g55q43zFhdvF3)M4IJUB|2PWWEXD=w;h zqPfF0QE7xM6aPH|caYt=HSSFMEuofo@Hu*oEIFKghKKhbImP-3@jUDmhS8!y#VA|b?Krg|75bm??J@F6C|`^ zXA865451+u#9E2~*$|Mlw%tHb;F-CSBm;JsK;rGz<|GQr-tqMk{SC-U^C(0#^fVsR zdPK>Gq;B1RPemz5ge3!Ipdia3jXquD;VyL;JjS)Vfl05aAqS?cgB&0nX-}T5cIVX* z2prCc4~SNAMP~%mQ$@F0<3GMlMF<9D!VkiSC~xok&yk0uKIf-sUo0~Tyo;odL20>r z?37bl3x4Gq;v^)GlnNp=2{y8J83${=W$3|vZ32oCH66BV%L_?hQ(IS zR}ljU;lH*lIq{A~$y~X?TU7j;)Tgm?CLaxl{sYtY-o4kk*uoU^t~*AAVJK7heTMdM zu%3T({!LrWi-EuX?l*4D>zXJtChYu9Vtr?a#gS*UQ;x@^i{$vooggc|BT!p-{ClEX zJ5uSMj$vH3ZA?il6NnF$?asNRN+9!8HFFlXy_&DS|7rHx@Z{duQO4nQ&Mc$PsH0_$ z1Ia$M1bO|jyWwRiCGXZKraguZW0o(AvSiFt`L6eCP5-?gDgTX1u0MIf5hx5qi5U`t zcX=h$!hPvExZM1AmQyg4EDqotC11}DSD~GKK@v-;CS51$@8f52xM=+=*_~(?7x}HZ zbjpB+>WO`#AxOse_cO40g{*_=g<_V|wA|{FGdd`y#Od#3Yvw&D3Q6zey(?eRdH_w& zjgey5BuHPTHeFwc)He6?*~SNq9!0|tz2C=be#-;B7dJ%4=G;8hJlrN{NP*hxe|bOO zVdDB(7Ne?iwz?x#??p!{wftIBM(j98zT@Jm!0|x-yqf4{NA2kYG#_ zZwx%|7^Kb&3GoEe_T*>YlyKV=KPm8sTkr0W&%i?t?YANO-A^ZrpnQ#!M>~zq?~!3~ z8180DDW5ML@OdS1u286w@BzdQ?=wLqm0HmAtaqT72W5SboCMSDFbOL zzSEbg$&WG3w}ZK^(jHiZjDV{6p`m--Oey;WR420E%U6K)>+j;zv%&dcHN zm{#1i5wfhUzR;^Jq!}ahWM~ZK-NErRNn%vW$X(1vIC%y5BT)VmOUlMLyH4z}OZs zlf+8M%p-q&L2Q>+l ze(zR@IrT?#E%q3h#5Po7FW7eKw!ID4I#Ap9CA1Tmc=JrmT}5uk=?`M!h2V+eEAvxiCO$7`uw_J91>+frzR2gv_QVyzHi~1s(5Q z5)m1qmB3qjMoC>hhRHI2Tv^V3$(ts9qcOjJ4&?G9p%ve>z^K4U#9zb#^Cf8vl>@d1 zH)2zijY||4L9n-ah)v+vOBSauKh_g)vCFX=4LG6XwYu0@S?5>pZ*mhNFt*>O{`%y% z)PMmUmDHnL+b`&`?IQlQKM|KpbKB|6&1_d>73$7BnH$nS+v5EjN7pCr{IikwTB3$0 zMg3RKxrbxlJV#yjg*Ie0(y`4^Gu^F#N3c#=Z6#qWCxghut%yB&@#C zxNeDJtGASfCP-3FU1E5?lPcDm{P45$Z0%m+2LkzQABf&dwBv1bzjWd4xm&oqByeu# z)5>eYwt1n}%&qx;-;m4CF5$mA&_s-(R*A%bGv+^3=%buvb`T80op?*5I`c%yRREB=Z%lX%pzSh=y8>>lR zPg|gV;2f+eE#++Y=Qte?d3|A?7~!+#Bqec{ZXriweGp(HzWEy+yzRP+K0%MF- zy5yk@OQ-LTuUWgnX=2nb9q!`Ed<|Xoa_q5CNtF*bK+Ae;(pu0Lzw&K?C#{KGnU?cW zPDii(6<(JAe@a$}2g$tO(R?5DNhX_p_p0xA(rw=rcuz0H>p8aPSpRNh(q6(ThWS`K zQ15S9C*%FHea4@xB_mpy5&k>1V-^JYxYcT+2(H? z%yQ&}dm}?ztwB}avAhmbed>G(NbY3RFBik?i~2t6@U3s}#GqVxZ+bplP&15OMA1au zy#)X#cP713yX`-*i@n%>x%|-5CZtpY=i=$#N5%6fCoGDmz~S;g_XmDxQUh{+qA)NQ zPz*=k7)*|^dy`4&#H{W)dG;~PfSQ9gh|vfPihP>(iCu+R=_D z5?;Hm+DGfAO{9(wV1pLNn6K3=WO&A%Zn~XSULblj*XGuyxSUG! z&QOQ!jOr14=+b*`$!F$dEh-|Z0i8+=UVKMA#F&<-dV-TH@zNBQ^X1PW^Sn%^Ld9IS z^z=g4`kK>7v+<_+Ab2RkV2P24X-20xJScN{K2t$__>q3&x8Ru9B3asZI!qIN4iC77 z+qPOwuw$W}%K-YtHdms@xF4piVO}LyXcp@0jQ<4(F(PCqm#Nq-%fz`||1(28DlyRh zbb$n9W+3gz89RkOG=M!~Is|FB2E+iN1r%YRM6&lrJG8c~V??a=q)bgwRBET2-p@o4 z>&~18yN8~&F@B>)PGAH`TY#J9pXiabICE`sg1ZUE5^Qg@y%#MoZDtq|!H3hFc|#Rzy5~qlkgy#74rfa=tc{G4b#e6j~(C`t5$moI%^UGp~7=TQu%KU`EC4 zypZO*qorWa*(x8cd%vyYL;_Kv*H-n!=H#X&{)(>mLWE_X7U$qwgddn7L@it8UoNU> zt%oEC{|xT)z#LuE$k&2?G@h2o(unlDc@8G-i*!Zg<(}JrD3F9{k%y|(^fqR{ooN!&%f`QKOAT}74-c@io z21fFi$(~)Fy|pb7GaE$W141TQC&}yA1k8<7@(7GF#wswkY$mE3r}sEZ?vxf5c`a_b zI*m@m)!>L?s&d9pz+HP0PwxFcQ$~JJ|C824p+K+t2DtSIP66Kh6xrjE)=UwyspGs4 zr}M!pB7)n7EqqD8HZAGYTbQIpgP$zcBbnN`6vE=NlbZ9ebsSy<}m-~A&Y>4Th9AFT6P-*s`5Wk&coa6)nj-*xXZHLRw;w^$EB;;x1CQGV19pq z(YRZ>yM;lAkLUe<7mwS;<+UZYC7S-T{bzPDX{qC;^emH01{Zsgt6-HO&<_n0Yt)e| z^7k;>MF>Eq>|G`9jt6T%YGH`r_uJmzougaM$mIFa)ek)*uXNV%kAFs99b3OwSE?Pn zB&@jJtiMSUJtOY@V=>pKq`NnF&H63cMeVZ9!mjk@jA^6iUGm>?JaBHQo7ue02V7co z;TF6YdxZ{^ihj^pp+SR=8CX1lvs_wg$BWOsK2dBeJ5JvH99$beNa}z=G{xP6o!;G zxfm&9tqa_8Thj&w_YAPn z^SNaY&Tm=|VdvUAtql==fr3iQx2q2el<$_Ri264B*}o+(D5&M=xx9M54O+9`K$HHw zZMXEF&ll)Iq`g=ehSI({Swzy*49vU?7q_%}E$SrpsDgI5RQ2zK7ZKA-Mr>8ZxD;gcD zSa5K1W#m8y>{7_do`KL?y}R7GzvPeg?otFEQ7eUuv%6O=o-YtD^>$`=<#czrHk6Fa zpax6u;*XnIEdS&20kyBa z@7He0UTJU~kX%piw7b9q^n`hxu#GIwKnSxhIerX+xN~-0cK*+wq>L?!E~_Yv;X8Ko zxj-Ri6_Vz)=jLS3Tmm1)i(`gr8kxl0_a@i3y5G09;w60EpgeNNuy+h7fCPpoJ z#4?8>lv>{0v;Lm$4+o-lf0$J^H|&6WTkJMraOlFR5ad_;>EO_igsIUpX!rNUu+~+t zH&+akjZ8kPwmdMZFvA?}e#W3f2vR4nKbg{gz7N=u)KF7yVa2rQVS>OxPD?q|0bnqi z>`lbEAODeu;wtiacbs4e8F;F42oDE?g&c)toYbXu#>Jc5HMTvEcbm-IKuPB0$eH{_ zgaynV2;zx_Hv;B2%xehcwvtSRSATf$;t;f~5|i@4P|02c>dC|N$d~**ng$W7bk)w7 zH3~on^9k2+TVB64z;-UQbh+aVP7jM7pa41$`;O>SK9y|Dcb|KN>(V&Gg=Tw7q;+fe^8uOxqd+65dvvP`Z| zdF(hi%#`f$=7IDW53eRaL7ZvIYbtj)X_-E%qW_0PaF`s0Pb_1qwQV@5o~xVpw;|Rx zY+P3HE(M-Ih)A`My|1Lk`ddy5yYGWPo2R{sqYeus6R%(@cl~90RP+%03_{oLnbNdF zn+bPRB$>)V@5$Secm$33bZ>SChbr5KMQ!o7mLkq}bR?yygSfosK=y3Px7+x2*v9=Q zOf$3AB;h9M^D?Wsk0m9Q9S3%9FV1cEv;Z6L>dAvXMy@*c&JNjjJYz|Di^^tU19v@I zK6@F(WrXYJKNThDtWh^Vk%>Aj?!hcnQdZ&s<{t0?F3;5{EOwk5YvSsJ(((oe zc;+>C&$g%gvt5zNZiDI9GuvY>!BKMFd|xIXpK41^d5aY@yJl5%F<$Jho=+Nz0b_GU(dQHbSoz~>cWc|Lqx@=rVz9Hxigir zNaR~c*t_(QkPmEYV-y0;KRkA;Q`Xlx2td?YmsOGv{ee57=gEhlTzv{IQ9BQB?B(qL zyqOC5+9Iq|E;}lF?-%7}vz}WD7K@gA5()^}Cygl>uVEE)oE0s`+>))od-ChWSTpU3 z3S(VgUAMHQXuqygomM{=ZHLH~$M!uJ9-eyOb%pNLFSqYiW6=6F|8`V8+Q%NDaWZBI z)3VVBTre7aLUMw3!Wa8vVEvFFwXk1j>P)E{mBOCQ-m7IgCb<|#Mw1uuSnUC7lwyy# z&^_Uq-dvZ6xk*O4A8~fAW420mp*`NmJ0}hvK%FFo;Ff zSk@QAL#;pp3CJb>VYfw!pJ!wFmd|)X-{2XiM4DyYi*R6t+trsgJxUfZr@gNH`;krk zfI}C%ct@H+jzWoSGR(7zN7l4_zc;+0Mo$L@9M)p zjh4Q6y$K2ZUt`>6lNiMLnK&neNw8Ll3Nsjc-IPd3pGzPJ? zZLcR`<|uQ)n0ZP>fuYE{p7&g$a7L!u;dCYjk}N~r`oDq*<<@wI`e7ZeH?sYlc73-T zSN`92o>B>JwNG($_cbkofOYxQGb+F{;#ezN(vBA%FFegB;kH~Io2J)PKTW&j?X}<; z|0#)8(6~K$pB#CcRrQMRe(hC`D~rq5|D|3T^wzlUw9(9Xjf=I&#wVs{M2@TzMyrtM z9FRCSXBRez^4UzQzLD*ji?@`LzNkR-t>D{hv(q#DN`g1yKHFVzRLnd@TS7L!TWf); z*d0AzNj#bj?_Btu(ghNFcILfHlKOLsX%v2r@5i+G++?oGkVBM9-r2Xw#p|5aL8|Z=K<})r=*^xFA46i zM8ifnQ}qurU~2vkbIuCzkgs0;m+{FD$SVfd!1xAm{ut&^PK%Mm29Ue^;3XuaT2De- zkEIHCZ+Yt>G0XW*-?;5M0;0<{4`=_^Ki`H@L?{w=#1!sp7OTA<8e!V1TavyED zIq%3%2&#jc5ZN1^&jp@F(z1Vs8q)0(R#4rcim6Ldz)ZwRvjL3CEbfyz#yd9|2Vk1w zF`5;BXOSf6g3M^|M)l2^k$ZY%tXGr7XCk>}9eP}!Gm}InMe#(6AHPq5y)N@^G-p32 zV5$A7@6sO^<_UHw!o|>NKCOAJq#B1j5WHcQo8iA_KyUko#~1r4D|u&Q-Zvf0eV%;} zCeimrw6J9PNA5_Vf#aVz9_2!5Rcv(Hs$=+N%6NS2R8;OP>0R<@|_`t{mlmqUM_ z65ymP5Yn?ZimQ!}Y&@SmJ@bd`wJ_BLYhRgpGM9WS>dk-VD==axw0xO_J!tRS?PTUlm-P?eR#LoV`?X z6!75KsZF)7Un_!N^f<&bj--jo!+SY(VpF?F@c9&)%qOMH+3qk5nckH`V{m_`vpn`U z{^y>-4+wk7WRRMMh9jyLFl*+}bN2);HDG7CWYdje{ruZ!kaEZ5bdBXx1MSyvqADa{ zSzqTC@W<7z^)KORlmpaOly^0$S`^3MD17a8A1yAr+OXd#n_H2se?mjM2rn;(o^Pw} zBT4AAu&KI&{RRp$Qak`6FUiedx&>}PK2qiKvBGy19FWfEJ|Q`5;~GK0nYx|Vvk2mS z{gLUti~3Sh0Cyt^6(S2j7L9KP&iv8Jnl>9Ps?H7pVn**m7kldV3j-mn$F4I*0;ak z-@_1|x8|26_6I&U%T8rHAd+q*quCr0*X;dJ;Tl(Ihg#dKB(~Uc zE^D>ebv#yg#vaBf?2KQ|DK8#jlc1fvcN!XIR-xJZxEKLRXJ+B`YYI{ z038*}5&wl;^*qLmo5~sS;J+6sD~C6ze5Gq9&DYb<`&*3{!O93f6-!r#tX$0ZlX&&t zJY-wo#a~i(AQH4;LGRc+mNd0?)Aholde(}@rnY77%X~-x0LbRd_AQ$jlOCC_7_>M zgbT!f$UlM4ze=SJlRTIdMr-au)ok1ZIp*KPU#`cFaj=;NIxc>4{EgbKdN$4>=cK<5 z0Lw>5`(7wNw695%uVtJI!S)t$(3S7Me@w1-R7v*x93!Nr$A>gCdILF_mL;GNi`I70 z#N(Ln6&BhSA?owI=ld?$jn1d>hcf%lmzmC}v(oo*VG+p^9!tq$WQ#j`FK_0Z;MMBY zkHmC$2SSvNzaCZjeB2SzAih-~T4Uz6GBJ`maNoC!%Wp_NZ80X3q@A<~S#zye?dw;R z|DAcoLlI!cjLS=#)@!AB5`i0lhle=er~2%CamXj~@7rx%jcyyl*i*ez8aq#TZuVVz z83Z{7JDne14xos8gmdT0cldIf(v$H-t?WmR@Np9o=C{e~X9_Rdtht+6CudM@y%gCa zo?CcA1DOYRs`)+Z5XdHlD0-~AZPB6+_@4KsN8P7js9l<>ntz$HeTB+D(F^*2Ol-~y zKF`OFP4C5|@y9!s;Vk@fYFlSM+4sP;2rY+mk{TBSrA&b>>)x@}?;)?`DuIZJRU2{QDou>CD-CM1@O^`A^ zjk838fhoxh$y0Pz%Xd(jnp5XU#Yc1+GfsdqC~Y*+tHD|)55wUztzEK6+@e+L?yA3p z9K@4cw%E1&`M+!3(MatH^kfl@(0L;YN^~J1cVd`f)dkK8JJRzL$DOF!n@d8<(r#{KuFSLQ;-^2_B)0~pi|`W4GYa$H)`MbnMOv=q^`07KR3@X z-|w23Gg9nNtpGr5!N+!6Pt(Wkld#FuXZP(>vV#Sc;+U>BJ%& zrmUKs;;Mrk5Iza5$$^Hu`G zzvnadW%h<&`d!zDqCEkztSJSEEeQ#k8;%)rgWft2JI}KyMFiyG$J%g-7dC z$TA4kinZV#>f1J7k{8#H5Qxy=z-YyqOPw$3BGMHSfBkXiS{PTu+;=U3QV)d&0=yu^ zOFm6Ft;2p9Bkk8Yrx0yPY|4-8`k<=qKyIxmd_P&U+ADre z7}km|$)i^Msth-i7yJ~w*Y^)jR#kM2r~)cIRnF_AV_Ijj`pqQ|=XXcq$}!gyf=R7p z&pPiU!5kAJQFOhWCp;vd@%$Mj<`%4c2ATGvi@?YTyp z;0sfdm+C*mbv@$1XVa+z&$9lnMd&8N?!vjwgPC!K;Vor@hsx7<8R_15%DPaVQ=8TLl@P!;pK;`@ zua5~B8BY=sEYXSaOpxMPS)da;Z;#fycGot&Dhwq0kkT^KF(dWoM*{?CjENx>&S>82W8t!4vOJCJ!CEk%EO$g z=fJKp2YY4*TzdN1(q?jCbh&p1hQnjp?XRHMXMoe7 z8jqy9{zrszQa4_^rymL3rTCSN{1I@r#HyqHWfZIUo}63n(?=Km19Yu*z3Ptr{K)tV zyzF`#AXx1?Pog^YE?+07XDQHWCcM$r9D(u+zZ$a>#R>5EtIa5P!BgqC6CiA|oLVul z({Niu`$5WxMvRwFfwl;j@4;5)GR~_HPk|f3F3{>+K~|3?_0xnK@5C&`3pnqSZJMraW&JERslnnF=JD-^WG_5tg-Fu*&q!E7 zHoN1!wBs*_1x#+& z&2F9Fi}aBQMlcBBHvi3uQU?p~r2~w#S365Q9%^yDz$<8w%iesY@}6j2MGJD40ak2L zHiuST?@<)IS5Y@Now!JJZJ)Dhn&peiKc}WJL_n9E*h&*WkGV5k`SIQ z#!g`C&(<*?FNP~cArm1d{p(gjU!8^8xKV7nZe=;|)L{V)Dv^LsZFOKfO>mj!!HZ2L zw@(ps;QR!;c~;OqIgTI@+f*$}rz5JDp}C+ud;yRCI@k(bRk8z{wr}`-&!7(y6={-N zWy`toogEx#eIl!DPSHN><-lhoPiWHOppdEXE&q2>H)~z#ccTxr=!S!nHfS1(=Vo7W z#PFzI?%bzNJ3OLW-;EnaDob6L<+Xcc%q-)iDO7mRWSx-%Fp{@;OgLnu_K$ z1Bc~{^`3lOT^%nqo=L%T_GObqn0e~xFVoCoHl}lUUyTMwN6&=EU(16cj8}_;q$x@( z?sw>NTxZf1GdisjmPhZNlH_ zV93O~^my7zTmnBK4Q=i5-4MYI@TG;1PK%9tUXX&;!d0yZcNLN%jxnZRKQahd!&=smSZ~ z#Z>mfn|IPB@l3zgVnpbaY{rwuhk0X%4tVQ|P!wH-O2#n%CV%Rf4gdc& zICbX=Xx=rCkiUw%Jn-a+B0hWFg7a$0@^^LH-csXH)nB+efJ!b^$`jChs3U-5jUf~O zlR+IF9rpjg12?+`m>R%EB?`W!xwLkt6`*_3VO!gRG1l;W;oUckzq51{Ee`EOc`t(C zs+^TUdpX8BgFB3vtM)ess+w(R%!8l_O=3PprAQrp+OIJ1W_FwKPkwV#$Yq3dH}$P= zSS2j2U_55P;3%0==s7c)+&%Efomf{x7!CH~IIM$vcGa4zWyId^MkO#M&(&v@?l+k9 zSXo6d%~c{8CHGEPV0xipR|dv@o9<27weA1R{O2FBkT1!szWYd$H@35++32}Kc;c0= z*OkcQdM_Vn%oE*`ichD0pne+YY$@Y%a}yZ75N2E{KQJ$DIDKP`YM*%g@Xc?*L9>r7 z+@OzHgKd+T<#6?$K0F*|DpOlL=(MHn?81mvlXQ|HTnIL(awo>Ub5XYSXotsc@?C-9 z!L4-3K-d(I_d2%`3}+;@tS;H4E1*>z>neVYh)#w6)omQC&>MB|ar`@s{XB z`E|c2uE=h|c(L0KJ}ItM9*DIN6<5oplO)?i$exIUm62m2BVmOQw~*y)m*UvJoXZS+ z??CD9kON3}4f^Hk`kMWuR{XnkGbi)rHUr0w+YEfrf(Gk%aBmhWu4X%?nQK9kc$C8)-9TJlX zNGaCsb=h}|`hnF!B+$WBf%;R~63o?j8EOW^qgn~W2Ro}KeJ5*Hq5=t?m^M#?>U6JKn)X4S9 zs>%wI&~se7?lAb*%j#8dQR)u^WL1gLpHND9pT-!AO8@o8EvzI0mzgp#?B z%V9xN<7z}}#+zGk5N?C80N{|epI^EIK1j%6{~jMr8 zSmE3kaA*+)R$gI)-$(yLpmUg|(8Q2X@eK}AF;x9{B49HWA*6;A{)5Fcyz)O8C+(zh zJuszQXq;o&Y~lOJZ(?V#0^kzva_`wWm+JXaU&HTS?Zx5NItV#|TO{BliF)7v@oYi7 zbrYd9;gFJaNI(GIjX+rQ>(E*+($owub{rz^W^nhmg_vd|>*cDX?y-6+^paq2n$7}V+N%;O^FdHEd>lw;gS-1f)bcwY2$Ii zenS>hvkQ{uGV-dDt2o#_C_nwFQ+W58;WF3P|5t2mX=fMZhi&tVKz1=`_{l8CEO$0q zWaAFD)gCK@OUx15&AQIt6V&+1*TSO>9IVUM?2E=&yF3G%fRrKRKH3W)>5Tq7ErTL) zi{}diO@zS0=Js%&O^$YHel2o*#;w8A=lRL1bkT6myFMR+fd0aqYiMCnq(`3SzIdhB z-Bmt?9U+P0?I+%>zm6ac0uNftjw0pEUL7b)1S4FO1>mB5>?t2EdI;oZSV(76dl&(0 z+ID-Toj*XDW*D3cANT*p22q=9dFF$EFLp?@6Z;WKY1WG3IknFsE)Easf1?2j6Ajpl z9+TmIL*gHVnFg#qll`s5XWuWMAp*I@?rxj7LG0KGmRzv;i-^|u&;{DRM!C`$2Oi5Z zkw221E)WGRA17Qc)t&L7%#V!A3OzKHNPj9d#(L3$4K477r$FiUxxpTWY4Wt1{hiyM zW|NmRU|*An`F<4aH#EkCKz=IAj>xOze<@+QpPZq}$OWN9MbI6Ps`0Uq1uhWLEMfKj zL;U*5(K&?Gq2-O{#4o%QmZYDFQvDX=@}7`=6#8BD)ool;d8(2ivXdm`y1%lVi3XbQ zo&EX>+Ky>%ohDf#q+Jr4cB%6@*nyTcZ6U{noSneOQJr#$wuyix#(u?~3yPRfCsWKk zSZ0>XIP;9=w^B!(?dnMDXUj;J98a6`CyB~9+P-Zy&ulHks_>XW%y^95BtQwf6);p8 z0{r&rRCz~j_dTr};@1qk+zW7(HM}6>zWlk}rocRulej0h4r!Stf;;5kkd+bySrPmj zS!sH3pZ8JDrW0B)zhQtY@Xkzvq{Z+Xq+DRM5XMAAyY$ZeJpARKL6wCo?Z%jCV`R}z zWV5}G3!|qFgAPt`zp$OCW)<_?ySh#(US<6dsw)7Sot-_CDsi^@61O!)PdLWVedhI_ zP|2YBCOR(qO;B}ozo1~f0_btQ@6ve?(j~4KLKw)raU%^V*!)NZK*n2D6!~^#VFVMw z=)Ysvp{CjAXL*6jx1^ZuUH`<`g!o|uV*_U^xVmtVfvK1p1nV;|I=ZhG}|PU<+g^P@dh#LmO9 z)Bg|(6pS_eJ}ZPYxjQ#JTggtN0QHn|GA(XZk52pW=k(3Hi-RPzHBba~N4wcau~wol zyfd6$&L4zEltX?^B3`o@?$dv;D&#KX(khmkpNc0p1qF_|h;d?AkNAIQV#J{+Ujan{ zS_i0mjQXqhA6uAAKfd3%Iazu=K*8`3NVGqzG29|82njK3=U?TsUFz0aJeK&vTj_l} zZ=nqcx4I8Hm34-}zRIuH7iwP8-SQ7X2+#x&kSPSiGTzUyvu41_Rta0V*DHLMBhW zIO*s8HlIhd)<%nO7FOjtPKnF2w>cZ`l*G3{yVJIJ?C^h)krhTK?ArW)GQy18y7e7q zhep(sFXCm=h&X@AyL6kc@6uaJb#S!Cy4_K;*lWP| z!jUPRtxl3mmO6|-{Sb7wzOXcZ#6?3PhK>1|S3&)3ggW#<(~x#}!rzZ8+HQmG;g|Ko z(&%mmT}4Juw9`v-vc3xn2K4t=#S3ffmQ#0soX0dn1dQDD`|dW&siS3iSF| zCu7d~bazNDsUdtLZoSM?MC^)v+rO6KK8~kZ*w|2A-(Zwg z``X`A!2Kx*D}2-K1>}OFkA@?Q9pP-*xj|N+4idtIg`*Vd8J&IQ>$e? zD*H$pS=sn?lr3r|%LDmBBW(S{?F>Nh!tAF@)BSLj0EdqTx2Y`W-Ayi8c<;-%h|{ef`* z?mAPF<5*|fK_CfOna3~)Ik))qKoQTR?lAv_s@(+jt0I8au425`c0TwR+@S2TD)c}$ zR0u5*$^@)mS*MMl&MeA)>Jj+eo|;Z5<3>i5~w zt0|)O*Ibaekct0$d7faqB;A|#a^GL#n;b6S4{>)Nq2_`KPC1VH2?#1V zk>wsG^gu=w@xsBANjHW*=7K^j1Ha*_iSSRj@oEiOb_DhpO?z68CyEQLyePC^>kEgP zky!#Ta#>x%$iV%32DqllwoWW6y1ezsU?X}ouyd>OK{o#C1mEL{GP{ANu=aC?^LXl8 zX!AWB?MDt-I<}|(z-xkcL!DYznB9S!5q@h!26)n_~8VJ z#OfQK25*#UwXL?Z49Rxl%;Eu|=cfqEVs{Ov$t7g%lF8CCTeXTIqUEF|gG|vS2N5~^ zGZOL<6ar5{eMEd;0oy`~&L5kCAbFYa@ghhazfZ(s_f@0Arp2wdpOYfNGX#w#mzS?d3VSsRx&k!xU1>)VT99 z3f(>DcldXVfjfC_SKM8wnwern_4qxq7)lB_2qHG~kuciWAr;2tfTO^W{IcfO@2XTA zEY>fMPo5Kf^blAW-d~}70=)R#oyD4lyZTAq=46)CDQ)<{#E0FMZOt6R^ahptxO65! z7bqE1PX8z1u=HQRA)pJu`A*R^M74F=Li306)bo+{&bi?t0XR>i<}e`Z-5sFTK%S_7 zhJq`S)~UAbZ!e<|%K?N)j@bN;)_FJ~&Yff9CU%5m2_okoB+sqf9~{ltl2sKM{FQ7X#+yQsM9Op zndKj6rKXH%fkQL!LzB~!kG&gx`@#9^y1bPE&Uc2p_-`B{?O;7%eK4y2aI7}SqSJJO z?0>7q7I-%lnZz~&4p2P?6?~~~quA@aBzUbOV9q zg|0qJHgXy4=uUsLek0*O*8zTrfVM7!>TzZcvoADT{f;qyZ6Jf!aq#5^H@qzfO{@T3Sc<7bH;uY``I)Z8%eUMcz6U=~k7 zS6FWFU9I1o4*rQ>m;!#0R=fK|z+rL1GoF;&4sd*t`pn~H^Wi<# zk_{1KDGPtywCbc%P5K+TUE%N``R`xo(wPda`E}`|Vu02zNe64STnPW84P28P@q=Ov zZlCd3B_<|Dl3KN`ke8MMfLA*kijml){EiTST^F@VJK? zyf6cjYf1dm+j(u*3XKuG=hju;2*yV__0Naj{au|v4h`We2U0e_2lfQ~ID;B#pprA6 zX9(~B7aK%X$JmihQ!fr~+-WVTqFzh;qIiQJt}59?$v0v;jloX%j}IUbcs7cxk6%II znRib_VpREj%XExG#LG=YK#Dpem(R9#OVbHbExV?P(o*wU?wAC3h69r4Gm?NyELS9+{+%Ta zierY2sP>79!YjFt&2ys+xNpu?ey^KC-erQo+arY}ub5H<{uKzVBe69&j~-O+E&~S! ztXD!fJ8STR_JdBk_^6fsr+p=RY9>ccPJO*-A+(k+-jBnR`TlL*wKcn_S`i*5BUEBQ|Ni_O@bb5 z`<07-=)%4%zwGi+)TQmNq$STq2YD+!B#c#-$Az(AP>$TzzsFCt1Y$RX_>?*Y1j*po zS(p)@G*niS9kqG^-ga5Fi=j6*cl;!@4=iyURo=VEYLnCB)3&Lb)$)cig$g0h%TLD2 z-T07HAy2SJJbo}HJOhe!dSFt>o~*AH2~S<-=x{Ao$u+Ca`YL z?{upNR2tldRG|vf<9pm)WjW~-uyfUZQ2PQrw0pifv*I7*Iw_hh436rwYMp(%tZ-%Y ztSBZN5u_1_AmN8J1mK`BZnF6w`bW#~iPrHGEHS}+m2`q`IBb{c>oKD!4mY zBkcoB$v=_G>Z@{U)YjL3t-iG02v7`GXOyO3ZAPX-!uttS9CR`~yhj}m)e zj^O-#TAwat(gBjDcG>wMs{7Dk@w{7G;UVI|Vx&+O-w)cC2H*M8Zt-W^zmp);EiMtr zmDz|)>k%#h_YQS9Q=>HWg+`A0_u!_5Wj7RB1Bf6E9zZA&aFTeoob)^3{sm*M=z0uE zk+Q!KK>H1jASRuz6yv*4Vs}rS9<<*%Ok2=^B19=a$Lhd7S+s2EGppLbx2_uZxQ<4$a8_mh|BLK7 zNWHu#7HSF5V}7zoApScbNgCRrOIrGI7B$d+dWB;}ekw8(w@ zXMF18p8A7ZaN^NN;=|vbcoGn&IQPq~uemh}-xFGV9TFn2S~d`bo#4A^J}mW=sR~ZHV#ZgL%Wre*)bIS{5;KG-NIr3 zg;vhFDE-kwMnE!D^;BiFBKs;rbtr!8VmKr=ODs@Y7|^|(ck?rQ2fEGU_zJb~$9zPH zknV__secHhPiK(A_OxN)GHfs885RnBT|&{Z8y7tda(k}GT=;Udd#OWJXuhlO|8B1m=Q_Uy|`fLW?C6JrxJ?ycGuJk<& zx&+t|*!Fj_7Zyoe61RCcT-FNC66M8K-;#yYae7&0R|G0aN_CU+1*lcrAX_ z`ZZKkAVQ+%bJI-GBKIAVx9;*mhfibgnR96&CdC4|jiw;KoID|;?}Y^0!IFUFXlC^5V`h|78*(?XZ?IuB^?%vXoY8rjr@zZemDWM&8>@RCpC5HAeS%m z?In^F!2+>BB$$RqmGg#k};J&af06nI#L-d|rr zflJVcsT0cJArkjK>_;>BUd!UAK*cykhe|D+d344asNzWAjI7Rm^W{Zo`6&?X5F(vO z{ZD12KW!Ad85;6lBfBEovTGTc_CJFKAc2UnwaMDNiWF$x;&NCEbz0NK>}XLMKflBJ zVkg6cS3aHJBt${^1>B171RoHtI(F)1p0(ckAYWqSNIxgdK;$?=uh}_pYeq~r%Iv2v z=GlNNz*WE{Yr(Q+p27D{!ClyPfZt1atzJqFuwpkh(=4Bq|at8e(Vo?CXbrvuv zSdS{z0M-?W4wNbxBqzB9&@WbI+qm9ZBEiS4Lp9wtL2|l9qi0h8Oal2q6w%Zmj$LDX z*2DRUSUUVb>Jx+G4Lec0;v>_P;+_vtuY5Q@7@0uvCXXcLoV}QjIxz=}iSuJgfTScG zZ1}P{!6*aUF+KzD4bhle)ol$jvU!GjW&@uYvg@_gZ-a3CR`G}oe*tNZOfx`ea#_xU zv#iK|vicHoU%Ijja!hCOm@4hYK{%sdYHvks6PYS3X|B z@1-c7nfLi5Gw(g-Mzn%%bw5VgG~~mLn`JiBcsfho3lH7WmAx{xVK~wjCu$p@uI3<$ zFo<-Ky(n20fe*e>rjW~C%Te4#DiM%m!0tQcxc>~A$3_~8l@}O#fo%=6y(tFzC`2WU zI@ICoG^`97J^d$SzH{)4i@o+0lxLhNXT6r^fXh-;U(z%T!-Cg;6p?f#=peNv<60FrPOU*TtHJ%m8PPtHRZr*Jm>~ZIcWXIC=euRQ2)BU@%)(x z*{{m!S`mUy?DK~M5Ay}CQH1|5k*ruN;!EklL{X}Gw{9(>-hZvk)|~w1h1V8K%Z0;z zdI?@)oh7(7oNm7JZZg~0X|N%6OcR!i(JFh`my=5KDiKT!g2?$GKXKcY z7bMC$U;~2U1G&ffr6-n75q8;Ym%-znI|7|Z~@ut(@vx++*k!3B&%Hxlx?Fd z^b??Wx6^FgX-JoplX?*Fgv4h(NERXKP!`+>{xhP{2LxtN`1xL8)_1dQlrDcbyQj?L zI>~jQy@O`!p5*n=hYb0e$Ljg3Z^{fVtMhsde|B%_G}Ln_e6BVApY6A46mitk^ImTO z=3W0VVsR#8zmxycVZHNOJ`l?~rb`k2w$<(YtC_Cfp)UfhTi^+>Jq9mBM6F`c+;fPS z%v3fWt|DHCIhj$^>rVJ;o8hdf;gv@iVi4k*J(Ip7&%yVbyjS3R@>pgavF}m zma5#{oKG(CHQcJS`t}(Z3S!epNEQ)2b7r_`j`)-sjV%22zvB~dmQ{319>^iStc)3q zRBSlM$D{qCW1(w}*bQTGG=g}=$X^Quf5VVZq<#WNyC_)=o?B6?UgI-#$J0GvQ!)YV zB`k-0wTD@WSXLQM#!`Y6MzQ1jmE8s4m6|zO4A%LwN3uN1QOj-ui=2ZQeQ=R@Y9KnI z8TZ0^(UX+(r{~oOLhV3t2}IPSr9@}=?|KZp`Kr*ralVekrG;H66 zAbIs!)Jz$hE`>eVo>SpndoaT$sX=9`FSbIf|EeMwL)TYDqz(P~$G5aC1SDdZ`}+FG z(x8Fb%&NVg7p{Zx>UbsTu3og*{zL5K^r3#;dZayK(O4l|#g^gsst;rl+G+NGCO9G% z2nHDRNNY_&0sf9W@QW5s1zaH@Hk-UHIrPB@RH=YlM@5h}E;J|Htba=~eZoh*hlW}t zBu6bcsiG2ge^NtVd7Cc?%*9!<_Aaw44I6iurE)fNq=4BU2QmJba!lIiY%>eX{XBc<1i5Q15dwbj0)o}g8+;+ zDU;%J)U>dZI+z09&&<6LX0qL!etSsB9`eKb^=F;-p^?HEjaAht+MCNl3k?l~_~6eV z1qv3Xne*19(I40v43Ts6U+1f>zAoneZE1ItLG(-#rc52jRJl;A__u8(c?@TaSilwm zLf}!yv`*J;yw<9JG@Rf~il<+(DtZQN$PT;XZw_=AOV-b4?sso+CfyW```k@xj4UK1 zcEpI^afYM9t9mt5%ddsw*BZFmAMjF>wd&j*q@?WbCB&q2+Bv#62* zw~AD=HMG>w@jcx{?u@8qybNoN?*P{W*z#v*xf6ivVJwL(eQVEOtoIp;YLE5`uo0~X zt%_K_z@$ENM;Kej$44fg;3E`ZOV~}*o)=8ql{`#^7IafYuF6lvCtmD_x;OyXz}XXA zI3c!^LK{z)C6Dz9Hc(HR?$)mgD~FNlJGkYK*JS%j<9~N|^g60jo+474(nGxr=F0I< zgefJudelxq}-vAh*fHL_Q zwUqt?uiO=~u}zwiFeNj=&D)J7R>lSLNWR{B2lXd@FhrG+9KXh^&>LXzGl$_`hX7c$ z)=EC!Um~RMIDMHq-w-_y5tZ0hI_Xg1Bcr=tCTh5Rm&J?1(G%sqy$%2Id170kIT>5I zr+F!+>fv??B72HtqWqj|wn(I!KBgonSrsiz_r2bVEp@V-|LD6gf7^aRZQf4%zMi2z z+9wcX*vUG_KLXoY&}ro$ho85pz`2`opW^&CAO`Cz3LJgul63NdKjIH|M1lU|fK(J{ z)tK|_7M9YF#ZO4A^`>;I3fuK$0q*L+9EHkYYt74Ws(_tor18Z-&}?sy|V1+fP&7YII)>7Fh5`NvhDpQXw!~)$N=k zj4Z3w=jms=%Yd#-hE{Xco+^n;fvr!9U zQwpNY+^dKVAD5e>n#(&@2{TU?~7GV3>?4K?CWGNWo1I5tB`pTW=$~rK408GK0*7B#k+CDJ@233cV zl?48JRx!dWrx5Z7itFOJqOfC$2Skf3eLXLl3y7dmk-Y6;@>M6sRBArktuZfS#l~p(y**=>be!3;h|6uNG9wT#{&U5{H8;bI)9LqgmXeX}CFbR9^jSmV z={}H!4iY0?XnUG%lKdmU)zhymoN)+3?@VFamHJrs_+Z8QEl?GAcqr4e>(jL=C>1+u zSj&3ty=LJ6oU2E+0;iw(ZVGV}7kOHUhpqsCo-~q-cFHabW@yl!Y#H^t6VuM+Xq*-3 zJxxI;G@n&Fjwns3bbAWj*=?!8gTO+ z$fo`up51#uu`OY^Vf+yD#x0la!O;N35a9-9q-^n2Otl!i$%vLEuuJ9Y!GdnTPTeT- z=Ex`o_cLJDqjx7R68Oa{ScQ%p^N`9#!24$tIH1+UF=!}FREz@DMT-|(-!SHM5*DTP z|LL~i&4wmLWm}q=5r@=(8pW80>A_lrR9KsWVN5<*V9D$W*| zed8b``x%o4^rm~Nc4g}$M)ojtQQUSV>XxkQevIrUxq5wKj@UAa^;#so%+!NSB2rXE zdvy{VsGom3Z}v~mAty6rgWUlYCu+R&==M0G1|+i5j}IW0UwmnRBS21cH!tjl&EzAnPOiFS59G5?=@4<5j`LjVtfv$nbhiXA0McWxMXphxbo`K@k8_MeM9m{Oze6jVCIzGl<;tg zFYs%3{@i1(c-!w)G0-kxh_0AiGoGW}m|U!=RyCo1W2fVcls1Xi#V?HzY78takC9Z} z=92B~8!ky5*E${V<_cW1P$DD;`V^W7B-h7bn`B0JUJ4PMwjS=;n44qrIl0@Ejbc_i z^4S_Dolgv~((4|dO+pQkAiL+=+tz>K{9dGMr+eio`$P(JOzq~z{R!npiWagr5|4bv zt0DTm8BN%^&ueN7B}m152^LYDnm?|kI6d8q{6$jV%dABh(nxRkjWGTkm{rmzjT_O;^EOx{j`5F>J}CvJeR#CNe2nZWT$8!`P8LX zi)LJ}o*qT(V7UaILYLbQy4>~`h@Aw43ql73qoVOWe`pRJM?ksJKeoJ!*2bpv0U_Ai z7&7?!8_7lc?n9f;Q0i0BBU=^UzJ3^?YVofZQbABKzj#l5y}N-jol9FP)zb&3c<@;z zf0W_*v9$E`Yl{`zi;pVZT;~V!Ibg=jV65gPq9^~T!e!I(9jO6yJ2ocHe8BmN7xNb^ zn4}J5vXETy{T$1x3{M5y-IdY))J{YV=OhT8aYlA3b`B(GI{iW6iX;gy=N4b(IrYfL zsb|XKgfjC-s#T#GQQm{FlHJz*41Q&)C#uclw5YcSWeA@n`BpLW44y4pp1`FtIH6zXTQSEHz8+uA7fmi0)0G_bz7? zjr;T!V$hb+xB%hnZa`lUN0g`)_Fb*AN(^d45!rCsKnoMbEHoa72b0f-t+`=Fiq?3v zibza!m~ApKdX<b~P8txjDJ zGiT`XiC2g=(x}L077`%;q_>}M79Cw3c)KdpyB?AV{0ZIq=OtZ4NTXa|k7`b>)BNYd z|2=gQXn5aS*xSYjg7~rJgS`w0m^{~ue*_tunWYMd1F-w3#MT7Szy!OiSmmpBh1%GO zkZi=6FST+j_5MA|ir+|mdBLB{?I}jfb>H*$wjrd!$Z+N1lTiH}8g6z*)ICd-0{rT8 zHBA+HaG$t+=nHZ=V?mdKAMYpv!l@;Q%Bnm*$|EL?!_n9{Mzzv@+{bY3sPl9g+wF({bejR9D+a%jU5A@y+daHbnfR6WkoGlCsLtWoOp?rEh6i6WU_qI>1$*afhOLZP?7`{l?{4|M?> zC!{Z7un>g3nfi{PM62&DH$px*2N$(+bjK=fa6oAK6GyG5Q&i^bOS{oUN(hzSVKPO) zroQGvH?i8hW%oA#h;bBH9L-BbJCC-;&G@K_*n0^by{dNT5JN)->g6UZggn{Jma140;&ZG4B(^nB+oB+|a zJ_NopXbHcGIKwysWQ;>h(M*?i(7Us02H@*A+}Ip?%@=Wov!0IeQeDx^Ggff{h-fvQ?IF0-bq4o&WMnBlE8XLpj#fgPacE8HfxSge!v6_Vk$*hSnTm5jL4l90zh;c%DL&vW&=1E|*$ z>~nVM(uhuUL!{Ws=M zGJCg`<CPYFm6IGf zPyF)#JTA8ap5Y%qe>m|m-~|q+n+yK4kHChcGAl-7Yj3@h*?E1<+{b=4Ik90TF!tHKFRqIm_Z&wWUD4ZP+A4#&kSIWbpTZf2(BQ54y94Y#Aef9S<(34W!+2-n7Jpue?xllXB&}+*K4I{N_>Aa+q^$#5tpu6*dV$-q|`a^F^a-A zcpUAvr;DgA9+?i27!cmM+;a7hE=));3fVaAfLOw-+Nu6JGw@=~k`wH^>YslVK%XZ*mCFf25_m=v0el} z9)@oQpyS&qCtf~2guor)-A7h!x52t`b>G^;Wvl!W3##QW(t#-f4Pm~nDXw4Ck+SK&C%aL^NaL{;-it8Oe)fg& zmnXa9j^6QFV`QuM$|z!G8Z9XW!0n7Q^vNo0+B}B}(w60V%M|bK7pH>IxGJMiYJ~)n z0~%yJx@^V9=hz*#SPJy{PVg`hA+mT|8RJPs$ zlf2$Ez9cWk?E#q1gbf`=Q;;1;cXZL&6eJ}t%fvG5DJ~=o<*IV}oh*lwds|q2{;f0& zN*bO_>75Kxm&;O7+W_t6rVom1o2dv)U27BI3sWpwFX)I%pnZihdgyynr$`8-rss(S ztzipB>zN6MMo&iPIk;_lL+b$6bgDK14}0nchmO?NS5`(C1L38-kgJ( zn>#CmX&gd>I?msG9Ou+i4Kz^M1ut#nuLg{JF61;JqG1S0Rw3=Qc{gyBoLfc3facm> zJ4QU4q`=F$iH(THI<&KP=_OAI<+);jSDaI7a4-+|;Ao(^CG-*Qwe2WQbtx`Jd^;z} znS{26->xb;*Rs_ep!}~m%8Rg)E=e`~Fu3XjRGq5joa0)T(0*a0L{qCtZZgfXEi4?q;bfZ}?F3vIIz?0W6J&ynrI#Y)-7_ySI8?OydB z;jXK(VV5Sz*=(XJ;t3V{VAc(uZ@_ESd`kEiB+r%6UcEz8!kQ}Xo7xc!emJB8N) zKrR)A$wyX1lpeebJYGlxp>Hs1Aat-tw5{u$PAGq+ZO09=Psw2$k>|pA{SN&G=Jt1# z)hh=CRbSu5w%CLQ2RfUXarw{Cbt#6gmeRywWKF^~z$Lm+Fqt|`6S3$fD zs27sE3=dpLrA=IR3mT7{;F8HrbG|so*+g$=*HCZkNxoWMeiOuKMMPS~mfF1)o#%%K zbrNRjHZNSAf1-jd7g{yn6A~XHQ^d!0r3}U(LZg^}n4voS7DeX#Q zE{WwV;y51{9D?$O2?|cB*w@PjUrQ`Xao?KpK5{yFR}FdJJ9#DlSR&<9iR7PU6zIYx zjEYPad$stBI?*lLS=<@2EiuF`xZ%@yH8)?T$ z?pfkT4t0!lus%q#{W#tpp7vr8nUE}nUCcu-dyeuV5R?HPM`-@Y40opU@Z9)iF02$U zZ&99UJjO~wNvSg{+5ngqXTjPD<)>Q)FP|o;Nxc~iHB_+fv=}`VW=h|+v$h=I*BRQD zX_x%qd~JUjo3D2s1Y?sP8H4!v1jyjFy-+n z6#go)I&MnLBpa_Xe!s-89R&O)82CoVbK2qvHYDOZy6OZgT*i$;S_}Suj;h zInq>SReyo7^JIBe;p&;sy6a{RUvdD61i=mtAlZT#S;^073BnhebbqDL)-wl55cji) z65EGmgt#XvgU%;6?HBOxpQtMj&MiM{NhiD$$MQm@L!SBvg=!#X@XX+P{#3yeSFtH| zyU$<4dy$-jLF70}yDIz>>R-|NE|G>NsTOueZ9{UjAY&O(RVP+G2!!hAd%c7WOz(RU z$I*too-c7ANHktlF6xepeYWEIV04P%)#!%*s5ksQ<41wm;W?dN{Lt6;d`c|i*^JIM zhcv+r?A4`j(=H1LaM1eBIEbBoq{t zPltlMJ@s9{LTWqza^$`mMN;7BSO)>u(^yoS1IE<9vUKy+MyGFpFKE2Dy*z|lOkN$d znfllyb0sHg^$Y-$MinOAAoVQ7Pb%BOxDkp}{AJ1;s8ph|T@>|tsJKH=71JndwPAdE9e(yOR!#W^jH-JkB4BXv#hB`6L z44MeIIrP30BH;ff!DLF-V)q3I45gm1_?%;m!33IR6ko|Go(z9fYHv|CMvn0&<0@
Cdu-Qcuv@pWP(QMB~`=uT{X&j3y|szkb=&u6s}u|7rwEwd zA}{8sN|RB_J_WJ>gi=cGW9LYJC-J!Me<%$t8*8&*YUDh6!;_cA$AH#Yhx5{I?VF&A z<-NmZd%q>MI}as3;&h6J3ri9f2fG+IHE8baW9P)ye^B|pyb&ow>e0bQP4;F5%Qd&H zGv$EX0#oQB9TEbjDGtiMK*Ti`v7kkOGlRA$(c)@U`3t(Of?=CBQ6e56$btcMX3XBwDMzffJw>m#s>0UMU7aApV{*6Tx6jCN`#a3jv{wPL@Vemyw*(aB)`paReQ~1b2=k{tWzPS!7$w-^7z*= zreml!(RD4C85y+CyULvjLVueZ9~1V+8|&?;D?aeJsWFJJ582c@?ewH;eo~%2X2VW( zye3K@$=s4r;BM+%LBFZyQl$Mx4B^YW2o`g1H^fJ2+HcZrsuUi$ocpCC%`AwM3frqo zI(9Xb$T0TVUor7m`C8o*Lad7K68+oO_SbA)-nNHd#i{o&igfj=tc%m5D+t+(f=aqe zMdK9_Ryk~K!cSav&MZyxqit$(gp`pAnBjpB)>pwR>Hjj(|2!j5mz9^7R|hZO)<&yC z#Tg_l(t}d!fX8nrY|LHYHvlnvJg51(FY;6nNZo&u65dAd)Ol*EFj&`7o^5Os+n z8ZF!hWJw=3!A}`vrW3`(+g?34X55z-!t0mEPlDv1;RCZYYn2Dol3&}7#Y#7g-Ond&niJvazmOT*Sqb>EFbF+=&@owcUYn&?L#? zDgi_)u0_tg#iDu3wPSQ#y#*0~T`qxh^>`suYy7*H;<_^Y&wme;0qdE2^AR?!uWVWc zS&`yuW2?842{9#whqV$tEst-*S|GmFLvGs(*?=5F#NY6DTuntY@))%;c;TnN=jqq` z_~7j3ddTMpz*El@Sftne1Gg}{9o-~4%Kw5B-!lkqXk}}MZzPw^Z%kMZDrvIJ<(6s& z{Xi>~BkiPhoh&|z4^yJVo#k&ErPJIQ zL|Cm7+oT7wAaHus%if?rg6>kyD264YbvNB}e$;s=)DmnA4P@y%hVPm^; zf7`LYS%=zv^PT%SuvS}fbkijDhcCE?Ldo&<0fXHDpfV^*!4z1^pDctZ_|7PV(2Rl0@@eF=K06_~*vQ zE}7Xap2i_CvlZtwJ*$;5XYJ-Ap2U9?PWO&6=tGN!pUZ7;%3Tc6s&8x*Tgxu`5hQc` zkSEK%7hW2uSBla%xVcXuW0J@WsYry}l=ltLr!AV#X$~(Kwr*t97Uhelf;RU4NZ!KP zOvn1T?jxjg!hyCrx|qj$70NDS=IqV-9rNeP*?ie28M$<*)gnf;r{&pm92c;Ob)-73 z*|#{5+wNf?k4~nlwC)zPt|lxWAJjy7u}H+x3foTaJIs9|q~D}>R^qn$EK+Xp(aPDh z*x#E?Lv!MwZWQVpk5~8|Ktc;}i z3pbi)%6UN|K|kcu!QNQ!=Z2FzxSLc0T`eJ}Yq4U*^~iPm`%qFH9lP$4yZ|i}-;?ee zwS)|Zat^Wm&9+7aG(+5@z<-unELichKv%Vy&T=@hdq7J|>+S7L^q4A{%Wwn^$`IiO zd;G=^R`LG41xfc6$xq6s+iS)g8n>o!D0Fe6f%9%}?dMpni~F>8J>$iq$^Oag%{h)O zSk}hWchOX4QG+l*I`3O8%{0O3`@P`U)VbXW$Mx>124;6>SrO3V z&~$cv#1n7YF3--Q7|J529-Kp^-duH0n$mqEU4<~#`wn+G@tVjVZ6sk221(Z%54D+d z87IdF++?G(-kNw9+5I8~E`Y~Hh0XIBuV|adem`Jf{pX?_zt1%G8X4f=)GXk9D7xBJ ztKFnv0$oJ{NNRV=4vHNOmlD-X8)%!) zbd;*As|ywoQfTMy?)G|Pq{E1MQ6RWfBNU8o0k7M@NFgP;w#+^}>w7Ay8JaQMI)(H>m30pSU-7A%3<* zOZ{%%B>Cz)Y3rFUWX_5g;RJ@?3uAQ37F)!-^U{NlP#{d|cKT(*mB!WH>w$qIWmowY zOwOJB$|9{mMxQbq%EI!5eJL1d$!{c<&o{PP`RuQChY-ZwSH=bK8(6O&f+VE)sKkP^ zmdxh{&4>E_EZB@y3Y?Q7as%I_YBlK>J^da?-@5#T%kTsl_1D1tja-b$fa(`R=5Gfm zAE-TMYG_xxG8`j* zdhb~EB^ma5@sWH!G1uOK?CtJ7WoD&e)fM5fCq_)nJc*lX$OIc)N9V?taB6(%V%f%Y zORD~0)!t;ovA-zD=QHa_EuFac=3Cy?srybc1Wl1V7?Qsnn3+ECsIN!pOssK9tpsap z^iC{%iN{3^G`ny0vBMPp?!!pl_4je>u3h@YFy6vpd(E9m%%0)Sirl`f%ieNyAOP6+ zq}g*g={E;cter)}rhPavyrJ5eln-@NMs^06mfE=cHF~t{dlUdEFtn@WBjOJuQ1{w5;fLg(nw*9lx-L3|qr2&i7;TN@p3VPML&cXGy3}anI7rpwh3}PX_5UF?N0$9VvIldD2^Yx52PDVWqSC zrTuL6ZT}1Ws}^?Y7LZc;5%bA{!t!ga0yw`B8n<94h?oq~yYogkNH8A{*>=ii0}NK< z*8Gku5WVpdjTqW4o>DvdY3-!ixLid!OGTwUj<#HbdQj$CB}yZs&~>$#wQ)YSeusFr zBu>+HSRPzCyd2_NYqNdAyhB4_62Y<5e9EQQB4r9^OVt49daWqvM?BC>_Ra`GY}ggrL{Qr7sTP?8fvB zcTFRYP3-5hlXayds~L~7^(8R zwvoNH!bXI7%N8OHXP#^XfC3@tUD+kXl9*vp@#7a)IZS|f1y;g*m`C7VW)JAzJsmR- zeWG9+xge+W)QK}n)F7w%;LVyI5cHj@E6somb*)@dpRMf!edAHa6x*YWPKJ#Yj-&n@ z<85v`-@?Rm_vJ+nf0W^f>end}4WwdMyz7&65^H!E_hIT71E97Z28!t3UP)eew7}4o zD-vh_K5miz?ls+1Rdox2C5RP z3BNbasPLszKvc5UQ!!=FElKCPCK=t;M}BiFxXHTIR#B6j0~taR0o^HPaW1#zvr~0w z7k7&ji7!qvI_b{=2V9l|1X$;T!gz+c#TeL`hVx3ToLx=t;*yN#PEFOQcaE>T%ylLj z4p}|scz65?ipF?1!OC$+uIs60QtVmH?Jx(_myLr768zuNS~SHD_$<@2)e_{j=; zjQK!0W~jnn50%`U6{EiEN|?(mI{z?2-0XDf!FwU#$R%c>cYX9ZT^)?Uom{}y?J zTlrieCQ4Bm_cBEfg+;wqPc$=a6L-74llYF)H{QnRp+?KW^>{mzl1trKF(z#-ASE*` z@8iws&nRSzR~j65cbOa8Xg9m=y_eH|L4aY6TxDG5q$QiVS@ew#%gvzT8%8DUg2*jP zyXsFP5FE>0J1NRHl0CVn8JVtPrSVNr1MM1xnw zQ#l$v6#Ej@OFdLPr|)yS6@4ePpxfT%=5ZgM(1%aEe=QZ?J;lWQq8972_$+aNp8k%P zqQ!*{PL`!<1|i$+l4}IkQFTUU$L>u9CXBVB?ziCrOPl!teyzSxy5H+qVy()S!)^R* z;1#XPPQyGOogr6QlKPh)%E()jAHIq^*=G+<_?~X=@5P$EB$%; zlsf;D;+-RXbV5|!uYmmyI6yI-s5PBAkXIq|%9udXn$*8((15 z%C$CRqvv8mg` z9jEw|n{w7v3S&!t_K>>$2oiA1s4s*s2spBRWUXPARE^i7SwCrJ4@*;V-mrCL6bs3N zFn|ux#q)Wz46V5BK+IL+%n_#h)9q8teW!A_w#UWGfA}$}QgltQ*BijFkA>U~A&P;G zO^e+=9VnF8-u>|xBsB0;h*balIp#8t9e8WeHd1cgCVCS7t=t*eb@WOsO+A3jz&8AK zDhwBq<_%zUa-9X7n5Z`Q{wFjI^XeIDq~E8rjPSRHobgQw7V4uz3ENyw9lrVyDFJ|n zUrykLE#1fbIZd7HUse|dR`$kPCC8%|-OPvFl9~9F$>>aX%77yl?_HA>gEM>>cyj0S z#4Vz0|Sl6 z8Jlvv({#BfIvvC*dA4LgPcN#c`P(z^jjyh9{=+ePO?Nt!U-3t{*Zd$(iZa-3U7@`v zq9vHuD}7>t*@rdSlxbZ~-Mw=c8Ws2dZh?qqyYKHg&L)Qn?wFuEhkdVWFi{fEnwyuG zN=|I|L1#f)*A6}#RJ1qbL!q{=q5?r2DeI&=9u!ZqDCJk{NqZb<_^8;I24*!NoEjX4 z2`w$#@tTd0chNMt37cvMq)hIc!7}PQ-h^<_P?$btYrhWkMWL|7A3|+||Z{-CFF<;2}Ah0y`d#@p%aw{pznKZ#5BvZ>uhLIbJI^@jTF(`=6t#Bh|Ku zjO6=gUga;ih7h?rR`(*Oh$!|YKYiqsH~)E^TjeIEvnR!koLRXlpI zCuS=9j26vC6n)>ozFbi6VGyB8rZWyoq)JBRec9jI8`$Ux_b+Fs^=8f(We`5%d3y8xY!cNkB)xZA|UR6W^45zEsY2XR**XXTv?0j-nGxwqnMroS*b zY_Z@EWHBE$$WTqijo5RZSBHvkwuXm|sDTe-d0b<6ZT3_J`5;+g;z0EMPt~aZjW;0` zPfhLwHZPl=$0oDQ9F#V{R{QZ8AUZ58`n7AK1g>=v>QT*Y^Lb_nOOdfb8XyEz3s1Z6 z!`@;Yr={49z<1GS$c*HoWeU!VwWuu!z4uB-AA@7^K0T2E85QGXZ^oXU!sah=%6 z?A4;3x*jQRl8*rV5LL(xW#@d?wVW+?IttYwl#&h%d-YHkhZXNerTCgXp@bnmHakSr zd81CQLiu2_mBi|6IR7W{5r^TWM?%i`pbqN@QwmD-zTf@vZJGU`sj)!4`~Gaxqt&6- zk+bBDuOqiT^{1+=rYOjnYhy2T@M~L*#~rjgBc(|xrA3CiBZ$+X#A^T_-yiil6p?-Z zY~QQxz_@tYgK2Y}m9HM}z7R#5Ve6n^YInO-xl)OY-sf{pTDVT{SV^Bmu2G%)_8S@4 z#j$@j{mPC^uZ0O-%cul9==n*NY&%_d8z>lkU@uxIprQm&A(2yB-6!qIEkoU;)0H-z zF9=4Mic75;yi>15NW)TuUOJo&NGS00kNVSHl`~w_^_v;OkO+9_s(pka-Z=WXsPY%q z@_&HTy0h$zi@%cytV9hL^NVe@Y?pwLG%5L6#AK4)H$V?%bS<17@4i}}ts7dOe{i-e zETLcDGxeeiMJ8qO7|d5w1FoA`A8%#m%;b@R0Z{=4e5syu^~h#o!2Xb--a4Wfw`W;9zKZ7{Sjo;=HgF);-!gBJiv| zX5kRZde{GC9SnXssqsG03lXV5*=up#+-$e*Ecu%#-nK9{_(kp%{CTkNxcaY|WdiKU z0u3%^EuKaDGwF3(=9v+h-IQ zdHju;Sr(e5tp}`gowP2%w^;GAGvEC#f0^cAFRa|@KdNp#MuLjksC$utb5u&q9ZJeK zDZafbil}z^yTLhWB~{whc3JzIJ~lWSbb$!<*N7h9rGu9#Avl+&?bWE;M9wtV*%QNo zXN0H@$DrEg`)Kf`))Moyw6iK-xF_a^_^I3-rb=oGy4FyoHt$+r%}7-1!J?{eq-IWU zR~#{rI8Hvb`Gfi(3>=&@$pb04iUE`#?8yPbDdITddG+N&w|q<&6>l(f^VCCCUU*8Y zKR$dLndzvQGF%+Bkr9(eWC(GsX=?;%;fs7@Ybz1%RN@2a(k)W>Peaa%dZ6y|L_2SfTYpm zSqzBxs^MQv*^^ThKp^*?y<@lf5z)pF1T%Mu#*Z;36Hj+$o7-p(x_~6yw`rpD&bzHP z<;He}KR$iKa9VyuZ=oaqgMFj>_UsF&9~feP_uCpPx%P5UvO&Jn_@{hF*vw^Cu4!!| zjZGnW=z>*Jr_21k+Q^EPe_SYIc+_=$=xON!_J5un^>5_I&-SrqroRp`_3eZR)|xXb zk#3;X=hRSBOaRo0Tj%Lk(3fim5a4%On@tlkT%=lr`cU5mTed*nKU+IzV~iZ2NGSMo z?c~&3H0J}#ZLeiV^X0|op&hp}8{{6%wa_{cn&Z z!JQHKgZrfgGp(P4>s|nI?X%lHv)Hn;bi1|Yn1B5k6K`YKz*T08-C6I|p<``{zWM(0 zudN_AL&(g!3@4FD!(Z?hMf`wx!*RwE{LBJDykav;LOSL}+t9x9*qUjAT8e)Ru?(e0 zmCt^y7fnZ^f`Cv+?kiFX6(JYmWYFI?ussQHMMH86V?HQMyFW2%83^~!cK&UmvNjtB z%hIfqIy8)JOB(FHM7$>?&_vLwMHTZlU4CzO@C9!dRDRV%YtNpa59W-e%D0YGE{~SD zY0IW~H(B@f)Y_`%cwl9@klR%Elw>h4;pIcUl>Sl>T^Jkfr77s{UA52{DV)G5wdNFU z&0j8OLn4+y(`pE-Seu5Uz~9S+AKJ;`Ia6xSsE%MH%0t9PGU@G9Y^+w~_&0 zF*)?vZ}ryQyDs~L7VY(GTYY8oeiAp#!cCB_#I4d7Uv`;VJ2wux_rDh8VGtD}N0Ja` zWIyjex-)dLVg4<0W^|Z$Xn?Ad{a7YZTnSaA@?H#UH$E~3Kg12{R)R0C|xizO9r?}hLm$%wTKFTez z5^_Zh@01iIr+@j^I*5N%DPI=-UH3TFba2rTPlNXc)RS>L?tXXuWNgJz?VILo97pMA zxkAR>%Lo#`5=at?Y!KKs;u5}=A!a7!wKhBIJqb-iI|`Z28Xt1PNr!a&?R={htZipT z%>s58*GW6w{mn?Dn9A2sZcg0-{@JRIbggj5?+%xl#g9T&@ZaOs^FOMn2tRs;2_sy? z=RT~Tu!pBEaX{FMBAx== znjF{P65ek|%}W$pb|3^J_=7LQde%| z@ZesuGOP88y=jk;8>zTnQT7zY)%5aH4S3xJxTrQLHL=7#5m^m-OZ~uJ3RYi5_08hD zd`IsYV9F6s=hp{u0SptK%Oc3WZ_v5u3il+KMedUxs{8OHOAdt|N-d&-*KKVn_acWW zvHwT%{q-zI$apW>&0foDr>tfTo)5T-(ojZi`QkJEF&X`RAD)jLUC&o(qqfZSI(5_4 z^tk7POhKbB2iemD#)<2PynIdosPHeNUjP+;W2je@CVXMecty%*zR<@i$G&8YxQw$s zQK!;wc3O+Y5?bFy(O=g4bsCa>dr#TjwnLir`^Sr4F8Ifwuu5QNeS&c-dQ|<)sJpom z=D6p@E_fX`wo(>r3VdKCLN?YQ#mE0e1&Mh|QfQO$ce5v^>PIL35d(Qo3;bafOb5|9 z&;?hqF@9)Hc$w3;KZTh#w@ zp*{wcY%nb^P03u2RT3uRIzmkhDWK26pRN)syC|09qQ8qMSHX5Oo8ynEzNSJOh9Lrb0d5F zC!(}B#HUBO39I$anWFp;qxf^P%B^XWKD5V-N&AfMH=wNdJr_iiX##i)Gr1`Gq}fxV zUwZ~h&1KETs&^U?%yG%EnyS7n3KlDvfxmb9&w~>@_`Ky`b;0A){U}A3g|WnOn*E8M zNBP#yjc(8uBt3;V29jcmIN{xKUh+LiX>}D_B1f2!WHGe$eaA?c1aF4Xfb4WTw8l=l z**!^fvNVD;@#3w^a@kA$jEAp%iuT*G2$j$rZ;xl?@=ojYK$`2fWU&!dF}(K@{y(|U z_fu60<8P)M>M-tmYa7e>3_tB{&xYyuEScIb!!>F=HE8Nmwtb_W)EUGbCgXY!)QN6k zIEjh2J(e>-$x@37z-BfY(e-t~@~or>$x;>~kf5R`G! zKt?>cZ?5>C5#SAt5s9w*cWDX>qRa?Ngr$hlR&EwEH-t~w`}Oln_(-GrDOoYQOIDQTa1)*jqKJ7kDHwo7quLn9k)t$ zV^v(3g0x4~s%2JP#ry#Ye2diG+segY>E@-gUL#-$d=AconCXF<{tL@9zBL!xIKNl1 zhBW9FXlZ@sFfQjoQ@FP@c8t4tea51mJb5O;%4PMe5!cK?H?ymg-74t=ZS0uz|7r9q zR{Nkx7>-$DSNb>5qwZ|>3-ojV#D4qy+$tG1Cl(gp@q_5MKlY#4<8l3KxTvs>+i)PS z1xNg|0x@_f@R23D#0(oJS5bOpja)69KG$2ZtH#r`Fy8yVx{t^cg(k(v*JL6sN6N)B zD#xBdJ*Y(o`-AwUw%?pAl7brfd(1h_%>qYE9h!FDztu!UP`RIkoYW6kvl?mPKl|2T zaY&Gmz|R}Ft(K#H&2^F~$LZI#6(~T|n-HgN0Kb+jT3jcKbQnE8H4X{p;`}o!p);H8 zZf(PClLLQ0>@JcNbMt5RpK`ZuMjgP;KFZ{@rbnT1xH`qz9II@VJ}bd`-7lI*K&!fa z^JDS~{OS}*p{3P2p&DjX`XnLrR&yNAZ-|P<=074_|F!tJ_msA$xePR$@lvz*WVdb?}X+*O1LH< zfH~%oemJN_7PFpPqw!U87l_~Q!9G=no2ti@k4H>5A5ZxGaR;wkaPOb0ig8I9dIJ@&XU=G@H*8~Exb06; ztfxE{xP*Y~ZyiK*`uI-s^EPNyu(o|T=XzPm9KOwCF~s+Zje}m%%?AiC>+j>EN4Go) zMeZ=b8qeq#eZ+Sfc0sSQs%SUWx1#$wz(nw&DE&s_rg;*ZoUy7q8Y&PjZ^6!O*3Dlj zgH>G7M%-aSs#?_9cs5#Yx{J1K_)`T-eUrn4QZ$wuvLy(m6fe0%i!MYB3O?!!HJx(X zz4-bGWjb@T@5AuX+yPjON3zNIGn7Fb%Rg#>F6906TZ0F7(b-8xZFf1C1@nk-wJmjB z>F(nf&D$a<_umn_uHdito56&v!{=4V`Znv>`BWiy8ql`_T^cYg=Yw7?DW7NIs*ah*o=sS{VI5pfil|e&?2A2Oy+rm;lgy1D<|W zH%8gnoEPAq;2circs8xw*)-sIt?Rg}M-&?`#`O@zOi8NV%uN_w4~Q<^@f&XlZI4mj zZhRDjS{E%B729d?(obTs_5Re1(_E`7C`TQ>#y&f@&KJ3Q%^+jy`g?kaW8I&SgxDiF zWk}__e5v%}_5s=E;CxARUEuYhcroQbUMIJ^&_Za>9c-LY*Y2{o1j+ReEFB`5%@&gL zSx76;spAzHZc9y@0raw?e;6HnLDieZk|#2_L@vSnqHHLa;ybEB03?Mu{hNCmgQ=f_ zXbNlJOhecwE1aUdXppt4RAD@{8lT8Ddr-v%clSc~wWH7+N~Lln-u`{y=7fh1lQ5W7 zEPv)9yuzpZxuQY>TovO6YAip|_$5%inZ1otAg7_7WQDzL zn+=y;G~1@%0~u*%2AC=|uqN(&J8`d4G2t@lzJFXlVpZbxxuE<7l3N9Q=%wPE>^U#t zBa#od6>@I~w}2|;W!kIQ3_KSkEUR()@=&)#{s)E>2;Z%fV1?$FNJX)sfwO@sqrG%` z42r$`_g4d?@f8)O244YZ%&gp*|NIw)^&f-c5seFT1%CaBgffiiaHg2E)YouBQ>19J z`o1L{pRskbkzC@$1XgbLAJ;O0IyIIAd2ok{mcDFw2{XTfBEewI;skvP*KBAk6E77~ z=4s?w+(QeFJe_%Oy(-9hulTXvuTxDydK=iW-cbbZ~zvbJSSq} z)rrYON}eGJJh4!?cpJ4UeE6X{x3JsHld1aT#+E3*oP+ZT(?^!#OgMK#Afk~`&XOJ4 zEYw6d8&z)kRk?MMb3EJ4gtg{}b5CZtFJPq!n3E#f2O({%Jn?Jv+0N~zUq$AT6T)*P#M8@cSw; za@W%o6XF$4nOG4s>zI!sN&zs?H$)Cb###kz04L$j|4k94kfs60Nzrrrj=ckY#h4^t z$+*u-et(gx&|u7t($nFDI&o89{m^ckSnJ8y;m%ljWO049XolH-PjsdZQff1g3f|isqO&jID_%+PM^5ew5msbJ@n}5hI)y~AkSW47x+l196HoH2-!>CXepC(>Pd zr>@@Uu6wa_wqql{1TqJX%mOe?gKII4XH(0uFFeiGg+UrKCf70(O{Nxu9T45HgqeEW>+XYM44QF> z9zEgnMM#R8mnP@5TD9#BzlM!D*^vZLY_E|wTnywx_lB;D4%)hJE=^26h9{KU1Svb) z8zP-?lQ-MywB;;)Znjwjiumt`d7CE5n;xZ+PNowJjxBi&b|V*ki}FjxSK+o}E*4{xEGu8O0C3eN*+td9=w-FLBB?bP-< z(H$Sml8L??&ayO609KqiY-c$QRn|v!Y)m4`9Y_B9)u#5i_n+EoZ+fyjTW{)?rZa9U zaTu6uYKl91{7gQ9<%IE~l^=)8uQLm6Z43807)y1Oq+mLJ&TC#5d=kWg1et)FuJPag zV&adOdaH$mpuZ39v$FHYuXq@A4af@P)WnxxK=FErj9&T;3+5=4d@OS8NWuc*;<3s; zHQi|m>mfK)rfbZvq>ALq@X*Yz^{!pB!A!)t1%LIs$$C3iGoR!uH&&z+rh9rSx%^D! zFzB`sh9bvL4{~6OIX;>K|2lI28$HhBL+=4_8uI-HtMM?nU#v`JBT~`y?1ADx_zD5) zaA`~Owu%?5ci%1g0lB!;?d$z^dFk%My^3}h3TAh~%LB5_H+Dr)wvbZ+htzl6Pr`hwVnOX$Baij|g7+uetV4zEZ?1}hozdjmRAt;g&;S|@^sJ>kHqSQT!+o)tF-eASY zNKa^X-28BFP$-LUYO9c#*w&|aje;_hva>b^VcSF1*GLY`JVCE1x?_~sJ`jBoLZmc( zv2ccqMFoP61i^Q+Q>GU4d@L*+w5g&FGef_4c^p?j9Pi7&%#w*nY5+85%hqKa8Vbs{ z8)x1(oS;jw^aY7HX?4{iN0qyE$83!1@#T9j0ou48h~-I;UF-a{pgXKzKTzV!?Xh1M z@dpPxWoUZa4)5cC4_$M( zV+~flXFiPci*3JfG#!kBJVjbT3ad=_!ZY@~Fzm|biIY#3l$ zn`t?7aFso7MEt6@{_bZ0O&aZk~!C`eVX(M9Q1MZf_mQl&;jr1uhf zumMsBsZvFHm);YUCcXC(AW|ei3;_}dgnu7kzWd$#pXX6$eDLICpIz2kZ&~C39=ti) z;|^s)1>PD7T#V+q~lf> zc8HyR&7*CXdYNa(u0UBX+WW%)VHa|MLxT@GY;n^raBrWX0n;sIF8SQT_9jQP!@uJVz>-V6{{flIqcL$l4*#Ctv zL*6@|+cJ%gA1uX7%oSl{0`ft16-?S}L%&l4`~FFF;$)aqYRK-nzB`6j7LXM1B?`wjeF|PsoxXTDT7G}KZRecDFcgZFRu#(u*yT?0u+>nD z5ioZ}+&)A&TFXGrm1=5WRn6DlD`B5g3>^i}*Q08wS-9^elGhFkJ%_iWVl)8S0+7Le z1)CP0BCzj8O|9Ls6wRSF)BpzQYbx07J6fo(1OeaVi4f=$PJ zfpWzE%AM}v$i-L#MC~BwS(?bhz+pbVQOXx#-j_HybYxLHV0u^DszoS#|N0`t)w4&^ z*RFF#GzJ7WR8NwT>ks~(|`&dPoGY@lX0VNm7s(nhrTF%%+{C5J8YC5=oR4INsH&}v;h_1>T0-4Bcj z{_D!T3=JO^&j*gs{moORQ1$jSh-^G%zj!*D4})sNFHnK=Dngiat`K=36Bj7|obO^t zrY8gZBUG3fyyRsueXDb>-%c)qCC^&+~6b86xE#@py#JOVm7DPzUo4yR7K9`dQUIl+zQh#zV>P}76Jgr zI{H6-N9xi04SmKN+DM=1F}f=nmLi#UNQG<7N74W^7kI^%(gc zu&7!u8k`x=tCUX#=E)J4-qGw4q}5sg?R2-(ENipGCG&HD=o(0iJsv1CU0E#PVOrj+ z?oh)W(wd74CN29t82kxea1bK-KNk>o{SkH=ln=yZC`qR<+eYM;)dZ%?$+@7d%2Ok; zf&tf*17v~{WCnyLQXdXmJb)@h&Ou`1`3mB9q5W-bS54p3M1zfXZls@_Kh zN>2-SvK~i_=twD_JEUR?_i%8`WeG_0fEC#m;i|-WbYya60}d1nu|0+dIcaD>@{3E7YE=vOTM$8bwr-q=qhcP zV9Na&`RxBhiqSOiV^sA{IL!o1?EdC(VWa>0Ad86BzBn_d%~GSi`_sC$H=0MrfS_2n zED$?~s#{MKgX1T^Z}BppV{6xZlpd%>7MFj@@uaC0ReKXzxwSn_-x=k!?{9mkLs+c~ z?jwBdgMi}$oMMI%4#;_p9(dxb|BMmzAFhy^$FFUxGprsjS}Dx}&qoi=b7~B{c?I@` z3|<~X_vPj+z7H_(vGpZ^4%@>@*vzO%eGv<;2BZvaAY}l?t>fcZjO~zPRiMVBbYA)7 zlG|onB^GS|D09u3iWY@k@c{*oD^U6{W3f59utnhe;z^ScXwg~5tv^TH#RR{F3a(T% zIsn)qbpGFXEF$XI6E31uwYV?AS$2sVl#ha>GP2XP&M_;|dB^~dC}sgMVc*U9#na43 zZ!lhOU}p{zGej=-@Y>k3xOHega&_mIH?FnSZqwZT27-NZ^7L@Rn@<;z#8JZg39Px6 zkL@&Xa79xpA*MU{;^L*0BIt=uh$WgfLL`z-;7$ zep`T<0P3m=Yvz68HcRqZpt@G|Gvr4Q)8Rfh!zg0z2{S~j0y>lV0iCI;#Y!|)PA;0B zeRsUw-5mpXOkGc{0k^H#{A+)Ow7ivMN{|8^?bOC4oeQ0fSHIw%{}Hghg=>eN?>`q4 zSbLf8AaWHud0|h`@CtdZJxW6hRI+j|3q_4I2E6F}n#z#qV7R*;y(s_p5xa_tUjL4> zKBRIfb{H-@LqaWyh?jVPh_SpC=;^~Do)!y;R&Jr}N@2>A&wfRZhx z1eS34&+o)EL&ige(gF)TOZErJMe;W#5N2J%|E#F5=ZQ~i2bpP;G@vEfD9dA7_)Hsg zlgi!KgAbmjcf@Q)Jt9>}=0o&Ff^6TpW za*JzKCZYXacsL*lKYfDZAp)J(!PTJU{JjqNZd=E;u2}l4hF<)ObzBhAz5Ko}RqOm2 zF7G_J#3-Ug*4(SkwO}D_U|wxzL&X%oT-|tq)-Ie1mX{k+P}P3LLaeM4gxEX>a^=Eg1H=c1Mo(0fTs&p?o=uN270ED z#0yADWJEKg&t~2VIbVyd8B!D+X#oJbz6sC4k!3u(gwvT4~3nxsk>V}R!= z9A<=pfwyB@;__v}X6yF6{+B++PXQXR_8q?3xe;07ThhJ{E_D=HN#J-f{xn!w>hSw##|C-BEj_q+g zg;}?0?c1zzbSIt7$c(xa(oAh)i#`qW-BrHvyR0j|?Qo?%*FuZEma`0blKOuokk4%n zHch+qs5t^{e)vB5?GT1&`|L4y(JLCj8~F+;m_T!F+5IT@fwQW?p9Xf1aCpBqm%a}K zh>YR3J48FIFkKUG^?!(hLBp?nYzWDuq@w&lS>J(X!4A^2;JjvKQ7~WvFVci&>2_4# zGuMal8r-eCbDxm;1c@a$6ca(;8Gqu-}zL;L< zj2GrHb{tYpsF)w?giu51YAEIel5g)GQ@k3yAapjSxC3Iz*cVKn?+8y}N0H-|F-;yV z7sbU&Uv&lL71rc>n3{PxvybE*ccA|y?EK2mb$(uDLTT@80y*xWnsohR;W3szk=1Di z_FaITkQR(ETV0w=Msd8_r}#M!s1(7kx&l*BFZAkWnlLv@UxKF)ySYImww}I~(*GZ2 zX+s|fnHgGV9N6|9h&x*lltNFZ#{@Lw$gIlN0Mxen?zA=R{GM5=oaPjIs=83F!{VLx zUd4>dhkFd&YY()cZ@1dc`UkMA9KO9KQV{I6*;NJrM6zE>yGPnN#*2?EX?SgpzD{V6G{5yojxSqT9G7cs)Um)>L7}L*jb3L~VIg z3_K4Y$(d)*%UNdv%h$&W@DnL0pGgor=5T>T#gn8C@HC5SIrGYktpRBFPyg1Y#F`3? zDukxpe&sZS!juo+aFhZAtjpT;jr_lfNt#pO zoGk>Rd^=6>GPn-D!hMx$iwj<{{yf{)|Lq#xn`B?;=?NaA1{|WG80&k-Kg9mvAa5`k z0PI{Ui_PCHzzL?Z=))+kNd}*aLSwx~< zxO7%=Ipxj^iP`^}1cEXl2uz&m1$*I6i3WAnahRcXHpjE0cv`HCZMuq&vQo=KyCi5W zx*EdF=FQ=|+<(tY>k{;`sP0_m$)QqwX9PgZ`Acd|<~T1Y0EM=!rf|qB3D!7h<(@p3 zkzL3P9Hi(AINiGEJn%dMvi45FU`0U}&1g8xT?3NUjEvaW*mve)d~%uqq^Wj6anQPo1`M4InN{SrQmrc42|ki`vN zh>(%lDj=KUC~Jk8Pe56-D)4TPB5)ppw&K33mk=|{7O$rHfk@EFR3yr4`- zEF34j<^8~Gvw{ymd4c(2E=$U^G8gYpwrPj*@?J^#E4!{r9YRniRets;$-v6b=#~6^7#j>7xfrkh*WXYKhIX zY-q+2UP;UK;KO1-3qW7Q@{uSIu#Hx-1u7-C;8=#fwR={nyP6aWmH26KfsPRW+z={7 z2~awCVCR9zS?2Unib5564R{T-&S!x&e!~v&LK*rZ;AM1hl)ULpMN}`gLHI^W{Ysf} zO!qYx+`t5ffT20uWw)x(;s;5#ZfWC(#JQVL?7Z<9mXmsy^Xq9ynp2Yl&p z#Ns#7@bWn$*-S@LiHujRE8D&zYWx1=Mz5w|y&-6AWZncgsv&u?NsOQdeO;Rd8e%GX zP!VSl$w7E}&w26G)?K|TpZo=^=AwezF%UfNFrOVL<~W^C3e`0{53xSQ-yeqXRFRp7 zS(gr!b>aRy>zX>ux_sxpM0)8-1l(8RqHKk^Hrv|DmUVChgWW;Z(+K)#3>W;8*q~jN zmH<~G<&2Fn(7V?w7Z^ovcfKYCy=$d9I$Efj`a-*w;CYh`yiWY63$ z?gs@PF}Hbh7#mR0w*CUO9WL&3$HPIr*}pFFX^Ff2BO zl_B{K>Z^1|{M!Hy?R&4*{T-Wh5U$v9(#|rFsKm*=ni`faUf4E*DqscnS0sJ6KLy`J zLF92ieizWjVq>!fz+g2;Fj&K%D{L%5D5R_Q$Q}<@c-=Ld+e0_)br*5rvYFc}VXsN- zFqEjB9(m>65wQ;{U5nZD_YX)`$_EWz00rwu$-1Z5O0z+!jJ5FF3ZUH8j~KC_yyrdo zbl39^-xpPw%ig@k0b|;kp@+}Lo|?5i;bFiR)1}`3O60B`l%FcI9&<8$9UhtCW0I9I zon0}GhPJmmnMi)nz$P9w!BqI4V*1UIDF6wjDoxAx2wmXUs+GJTsD)0fVd?MG1TLZ2 z*X}>JSx-HKNF)&7m5ZgpBw{C1KHk|7?A02Z_L{xwt_DH)Mt{HolccQk2?U>2eb$>W zcbK*KB}0lDxKTZC_saPTnk*7g68$sJyP~ED$ z+z$aUlj-ykXzBHaA~oQ#D`mcU!in(G9}wmZuKPr=o18jRsk?~D%1*D_g1f91J>K*n z=lE9ss$g#P2M!+ljIQTC&&b}sm)UHO zTz@0xReU1t@B?Nngb$1`;^*;-Xk%72+~~w`a`V!OphLaTe-!Oc@_`+|3^Q#rI}-n; zY9$KeX)GJL2-KjAm1=d1YbuhbKwa_Wf zOhd5s8zC8Zf`H!Tft~h9UFc*5m&3EpPty#z`mISwWrW$DA_Hm_sC4^G4cR>eQV9SL zlt<%l$$I_`_{?>3>nvhoTp3j+^Y@x<%Vk4@{38$2%xoyl{4D7+|AEg&7${GwFC5$) zlwm7OpCf<-3t&xBmd7NBvh_ERzZ+_*O!qPcJ%u|v3eEo#p0o|UresT0urBbwsJ*xz zTVi1uT!n2@!KX1K``+)FO~^|LL!mGcqb25FEy;Uo_Mr$Qx!8T|lQO2V2zlpnH~1;? z!G`UZ^Kj<8gZmTgd9gu>?~HVy7GcbqyHI0xtGNfn&*c!G&*3rkxpU1iizyJmyKuTT zIF|nd5D$Z*7FZ_lp%V#G4jo9Yu+w{82YH-mR4dNgOx}C9RUp*)a*z1+p4T6|UQA58 zdy?;)803cYFoXzIK(9nMA{^CY!yl7h%VM)1M%S+=zQbCek*SGAFd|5%N8f z_fOwSJU@`BB6)rDaF`m3fA*JFd#-_;JU5 zBe;ld$CrMW{aov!1Nj+C)IpYBBq<={Nyr^7w$HAHBr;wnc@DCw6x4RV*lLv?Fig24 zn3p`ES0xQljoX&lWW##es?_;cz?(*cE=Q zIy|o%=*#Uo3Q1d>Jx#XZwjoL6EnqU|xI6g?pX;m^gzc8*3QG{)bBGn2Pmg-he9r&_ z!+A)pd006ckR6iewp@DvpCvgQ`ZdCSjBhu@uw#SxT)ISWy4Kv6c4XWB!VJ@8n8 zD4fLU-}k-ex?)@YCbY6u{`~C$=I-}ZPKNT#zO$O8Z_2%T^o6n%qoJMKCChLlL!QB3 zQ>HB%{4cof3;{<0o?-i`OFgSh5Md5emvI&Xd27xX0ty*w3dK*^+7jk;cE$pBXAqFg zw8&1?i^z}wtaw&UZ!*o?!KO_!NODA$ecuy@DYn9F6|xbM6q7%&9w%cs?Q5jkGq#Fb zfz+cZ0Oe~hwX@rOdPs|lG4@Gc-InfboVi?#x>C+o+~D}2n*awSj=u6)iwHE9B6cYX zw9>WG$Z}6PNRf30cHquM6$cT_16n~MZo1tlYQ%P{J3;tE^Wm4FQY_KD+zFiGN~J*i zxIEM6RsEBYeyqT2tCehLNSESH$S8L2-hei`J0L=5E0q>DY}0r7dkM0knS2A?g+_h$ zp^lzVQLpy%?7ON*p?8=bt^MTX6biB2u}3puVjMUj-1FVbJqs!J-Mqb%HFn3~Y$5iA z({~meA#4&i?GCToA7W?~?ogFjs-bO}-fJxO#TJeVi_x0{OSRy|&cOWJe=Yt6v1_2o zXO8oc8n$Qk@^y*uN*-r{D5gcy`%#&zP#!@<|Hc(tqkG>iE1h^7!<7-}KT@7()_k$t zdVLU?0uoC;^K5Cm+b;kghDFqRWt$vsH%><@Q);_%OMRvXReP7RTg!uK+P;SwT1;lK zN0K(;Bu&SU;j$vLDptdA(mOVj_$GqjSA-Z~JONEvur-J%0XZ=}@9K?%fsF&OvtJ{J z3i~aG>_*{_dl?#uci?j-@`t0|(D$oTz7%7mo5IY#KOH{`YXfc~r&+npS7UkXCL!b& z|C&_FjuHaP(}UrH)9>Ul!Sh7>QRCi^a^5$d%&r*BzTWDFZp(9V zq94BTP3Z$l`)(JlPIGSPL_&oCwEF`eG`uN8l5dlL9W{;a!KRD=x>Ko2>zU76n?$(G zg^?t${vXajw~CiV)z47@ACk?w9J9aX%OkUs1*5)H{M5DedOE6pxvS&NrT3TRgC3jy zq+gw^L`*iMBI>MdTNl-7#oIZ8H3E8b%;D5cSvY;>6r{B+Z;~XZ%uXgltL53n5RcB{ zpI~uK6Q1p&5C|dt0H>7kid#tHQfaZxuR790io*xFM3=nB0I?YQ~K`;$6qhj~geX{ZH5RqNRZ%4y+wO~us9h>~k{RMU{f()H% z!?t6tRNA1P4}j7ifL0w5Gq@wC_13$_XfChdXR%su66W1?U}sU~GevlByZJSB`KgBE z%+ugSK=g<0#u{MuYY7pfvW8bG5k^W}C%0%7LB2vVpl#ZRn|qC_rzxhOub?Pn>?||n zemlaN4?g1(76a;OAJnFhH|^Yie;BQR|Ngvn<>@)vi7IFN$^DI((K73+8ltGj2cs>< zO@GUmYC~Tqxwmnr!>Rx970f(yJE-IpNq}&}^ybr$M|owD7e8fZxOnR+dzuwHrUv^#g|0~Kb0d3{K;biX znD_I&s=ee1;qy2W0v)`4kBkwRb}~!6W3Ph?#y$tHfb{)1b}lm@Ii#LkOIz{bvC0Dp zIuK=XSbMOup-b)oiw^+~4&bfv%g>7W+@+wtO3|UxHyY_tp)B=M-vqmN9GPhQytAwp zk$a|VnA&ie0gwGnZY-H^6me_cn`nxMP6aH?)e$Q8ciL3hJ>QElawLG`JbtmF^*D2d zTV>-P(0tORY*vVwx79(7(h^IayBRvRd;|rf)@CC61Y@3vUAUs?YT($wjwwFzmms30 zOw4vRk&&ga>AT*&xenUGaB_;BECn_!Lk3V+t4FIqAXL9f5Rl9_@#)eMoU*cq)^$)~}zj5I$UDhH^l-W94G~hK~0>4H!rie+&TX7@NG2=I*b9wM1lH^d=jcscG|_! z>=95kwU;hm3dzX#>2!pM5UEAd^axVRSyN9~_2TA$9?;J$S0|m|v;0|Kh^jncZ&tVW zK5MHGBGUOCztdNZ`wsx@XF8RnCdHmoy@-Fdy;5;n+85~bAugZ+zw|xGOo@GOVXx1U zksCx@F-np0WN)lFd@c0#UZcu2zIwoXPXhQ}fz0PtQ0fl?lz%?R6B0$)Ef<8hl(A`# ztbJnj!l2g>QpjQ}Icn|k<3A+Ky1(b|cPg;Y%v$ISKdLV z(v6zBISDIz@GF+xn*%^mvLc`bC36MN7ApxrHNf>V&iA$RqQ3m+xgqI#&=t|hYqwra zI7QbvlL&B?l{)vS6It}eZx7yJLxjnT5A0N@ctCOM@bx{J?C5?*zyGTl$Y5X5g~|oz zY=zg&z8KX3XBQhgrKRzdd+M#oRnVgoh?$ijzyN1zgk6GQF@7-Yhjbq6yt~`~hNUk|;Vb zRB}Sj30eP2ZV^*9IGGba>|FUcnFG`{rKicwl9&{n@5l&-l%B7?scKvTQs~Qe0^W-H{8m1{uADU;E<^rZ1VQJ|IobhrFevct zv%y0yv|rko?DVFEw@33uqs^`Xi{2-Fzx#n+tBH*A)`e(eCbUB{34!C@SV8~nb-D9} zcR}EFDqMCXRajnW>O}i)mu(&<9O;UnUW{ayD1%Dj--;$#Q9DJlfjF5Rn^~-JIv^lc zo;W9Lk0p`D69}TR&xTTce%|M7wML>D?+9@R^BxpFCni@MGx}&_|28*9(@ zcYo|b&aL70#(ntvX+?Sj9$}`LD8(D)$d-C>b4?+3``lV^nBjL&6i=H`-@_#=OVFdGmm3R5xdk zD4(?dJz4lVjQoT7fy?~rdpJP`DC@PCNZ2<)FNAQvwHQE)5taDs#6?Dxsa}PTEFi(IAQxC8sY|1UF;9*Crl1(VzpI80M2Ed4PMp`?}N#*s~vwS?@}#fD{)b z(ZJ@3R)7VqCv*xJ96?g;FZ8RX|Dd1k8FnrSwAvfHs=mBf?fLPv*U}g(@B=6`XES}a zop{6fCh97s2HNkhQ&vS+%o@N2=QFi4hm)D1-xhC~az?;f!F)3-}+X$^o(v;btah|DiT|ac7{7Lt!Cz5mSfvPy(3%MKD}f^2scZ2HI;+OfSH9>b8^u_gaKT z?)xj3*jJ5URh-COW!VcUJG1d&iX9WfH>`6$`&Q!L7l>xW-H+>EqREpr<15I?F-YdP z>7`dT`1=z}eX>cNnk54{bcZZkyyuO7y$L}|bD1y?+bBcx!!z#DZKYkRrFxB6l&^kE z6iCAkhoAst4V6ZoR)$_%{WPm-3}-}6($42B{n3;wAWM3(JC)STP|(k8utnWcDoHE{ zc2u4ZoQ<$Q@E^wpOo4Y%Jsj#5IC@DaA75=vp#YFg2ULaY@=aaZCT2LS0iPE zy}N`T-h0`}rKUq9Ov=9S?oAjsZ9ywOYI3;IjHM7q zOtU}2SD@p9sv(sV)R3i0KJ4vqxk^4f+})^HG{G3d>qr~!W50qW%HTAt%PtQlQIlk? zT|&Lu))&Jw`JRO&j?ssFa;=NJ$@dN&nOV};A9VT@{rs2m7j{F|-P<%EM!)wlpLcqN z<h5^6~gHODt49F{0qmGpKvQX*4h0Eh6qOrhFU{9;X^PY=*qEKIjz zSW_s*yF~_UK_Itr*{rglerM_Kyk7A+b;S?(Acg&-vBJW*+$*A?p!{2{QEGc!rP^&* z6$sFiTp!j^b~Pom`gT7b7w2p_v-T@}bMHw(WsZk|CQ0WTa($ev$wj^TH|z1*WfQ4p zFFT#RC!zkQg-@I=FBA4B|5QooSRm#X7mQaD9I<3>-xC@grWyl1t7OKBl*!%utzQKW zW+P1!v@zTf#CeX?z8EVpGa|x3OLN*hafckOK($7?++)wVmvgs?EgjPQmi?Z zVwpl_p-Y?P7;1_iub>BDWa?GmnVIYVBmMDv=P%g3^$7JyPjugusEf_n^k1$sz2jY& zPR_#j9WTqr*T;MGE^}@!aLJ1YTk!glr}ywn1j!fM__GphUXWgl0+#XbV_He^ zf4s6v;0Ry%od8JsgN8a;Q?4tTZJJW3S}`h1p^!`u*)H7JC=UH`?A{z4ILkqiuJ`6_ zRtr~|7a%;-CJZ+^DH&z*2_uP|IKpGodOmrb30Qr#4=zW%=~gf>M(7*N!Y?i_ewA*> zL6(2i0B$tZ(s?7s^^uhom$vx$)Ak(o8W#pDigpM*W}w+#c@nbYmEi>^}ZbU-*qv1FzH)kv z3x4L$|1Ia2A|vFodZ)C5pEl{MEJc4h1q z^LTX17GV~GFBW3@-A`K4NcUefn$S^SL`@Rh2ll|*3zOa33LTV$G<~{&+?8UW%iW>9peM~b~5PXmEcqzL8$`X*fH9k9ZR zQl7eO6eSiFI%!Y+XS=4AR8sAzb7OUiLZ!%_mb*p;d%7j8pf*=jcv3*j@B!bsgy4!x zTEmW&Di7pyoKY>FXJr<1=2d+vJ*;peIY`wBg4TxDjTc>DnF8KWfa2jfdHeAi6-~I! zE=*RwF)YK8zvd{R2n;+x&FYYJFo%8W23OwsH=SFOU83UdkWN5`X@3 zqc8f+S73?M{F--ukV*gU*cNR#Yv#c1j*^J-9AH@&wD4N-(E(!^vRv3F?bPFy_noYP zfXbh-yVgq;uomlp;Gq1(xzM`8NU3u1j6h(!#BUWEvHd%gPEb!yOgjx$yS!VmdP+i| zmhI0=eWWtYfo**p10844{IkWuCGyy|v@P{X?zlZPeXpjholom#kFqhs8$-i%+%NvS zpT3-r#-MXlJUhB3{LQVA3l4)@q?Y3q6pn17{iNdZXt|}FHDAsaOM)@3#UXomq)7PZ zZFx{X4jWE89Ol0O!;vQZU{32@KRPIv`tvP)@X6LOty~22OBJGFGHw?owAwViNdV8Q zSF^{ed~5v*sLZ^R)9(1Wz!(z@@X+5%zX^ud@9k5We=d#dyVO=4q1TEiK$^NFN>6=d zNKT(A;7iGPQp@Lml=%L-Wq;-d7Y+0BE3Lf6*K*Nywgy-_0P@a8l>};V1v|+FsuCt~ zTl)LUWZG7H-?acotZ7gAsRoxjzP;)!>b<>ys|<(F{1F}X;`u)5wcKV+nvRlqhk|kG z@OW5)xHNFkdN6?@)414vZMmA?LX|&$t@MrKK*as@@1kkjn}xPkz5xaoJ>&myR(R{Y zyl881Rjs~YOD2q^XgVtuA)R$w7z8IFT4=?_V;Te9xs@K%>rxud(gi)&IOM0l275;5 zZ0Xr~7IMmGMD%z*m9~7zp7yt*N~&Br*+V(5|3@O_O{Mj~8Vw2sMg=VJ>rla6QKT3q zYxZwRw&7UZJ>W*+wOZ}Cd#FysoPT-5THP6M4?8CQq_bW1wHzNYc=(r5g?sl;251d9 z{Ies`>4Uo&U?FA?Q$J*914+F@mAg&0Ccmn1Q^vM$J!1Mi&D{%8kuQXGXk*_{;faA`~`f;%M4BIb(E6ZYCoI9t*&-9 zsGHS&R~)ov&MSnukF5idk=?}fDibVczJiScI?@foKJN7J&4(g&Pndg5mth7+>sLqR z4Vc)CX*(^cJFR|fT{(yTuw!n%vDoKn_3d#v5$Rk1VBo3Zn_z|TXeKrTT45_I>zwj_ zMhPLO{ACQmd!yYSC{PA<3^3?Szb|j}v5QWc1qDUQR>&`3meRn%^7>7|!HBW|i#k6! zet8#o^vl;yZAFJ5Jk?CYzxd=cA!|=B%%7k&>bZHmButME6!eE`zl2|F^k4%a93jqU zdBnXUOvpT4?;)rW@i{Xv_NW@bau;6_!8 zsnA*hTh<^w)u2P-VDB(#REw6ws-U1CEXFiz7+$-yExML>O)R_Ie;n8$P~;o;cuO+J zTX%${1rGhLXTB~&GX9H|z_NAptBKsoXffx`&dv+U?l#ntC{EC{WtQL8M$gegT~w}Y z{`AR9M+r=81u$<1Ok)E!$m3^aRCExsx_`03F(9Rl%Q_^lTmF2}Py-1GA$##2w6|rV zZ(>8jTYM|p*r-and8xT3DN)E?0Z8O`AhnId&tH&LlpRC#(Sf)mn#kTTs-AT%oA(RL z*yEYMlQp6^g6l*sRkB45gnI5Pc+g%9Xklh%#{I|${E^0XLGxU7n`Y%K5b<&ke)Jz; zD$}5?x`MmGD}Px)#s_qFdZRfvh*>DB46lCsMc22?4E_7T$$RomyH^J0jDWRgmVixB zLE)r{&)14KUuw_i^(`zD@&aar`~@<`$>$^DAA0N4=uW{zm_(u!MJ}XA3U3skV4)A5 zrV;b@6d!)n7?in{W62)+T8b_$<#iv%>?yZ=dekLf875Otrk=E6zt0*XJEY+XUgr9s zYxWZC6Ftp4+vDDob82cnD82gZYziftL2r*b6j>3W@hC=~TTC(KsIcbnOwaK|mAtXbfbjsZE}1*2*h)mJ`ypJy;SJnyJlD=>@Q zf!HD=Hg+;-O90fN2Y9pODSylkXukkpAQ-}J~2){qa+SIS&LEBn%GNAUH;qvc4 zORT7r!Hal#QO}l!^z4|d<@J}DCYr)k+1I2p*;eiv_d*?mK#JpCx3;VK;@4z1p}qE; zj}=fBP*bZ&Iaq)!7jLbb#NH9p$zEv>3vlfAeAayoaK-ckHIo%!=$($l@(n)fHp{=Z zEw$QTT)%1YGsE^VkxNl$k2>V#ua}%Aiky3;o)#L7NX%0`j z(AN((<%CZ1hYo7X8aGz!+HH5nD147DSuQr1&g6|h9>W9+>Km~R>CKN>-5g23t2#Og zu@?p_3P6wopyo4ls10Ql{k)ThMmP#| z?;5SJooHf96h28)PJJ^YhL5jH?~xYw6wP^Pp4WfWUAIW#9Iw09qxrLXKTK9fn;Udd z5uGYhjXX)^*N6oTXy&GL1%mgtrxahnzRAaC@G|k2bmgYXMR0^$Eve`6fil?+Gh1Nz z{wU-Fy)iBOnrp#7nmI_|Oz8OL$8y1+^yLj%F@;&|q8$jYKUiKNq3(V!G6Z!dt#%7J zF9EfsnBlMOx^1Q8_{++9BSW`)2-#XUkq%d=G?g{Dm60#*{_^@?>6lUNj->p0f_g@Osj~^1k~%l1@xU?1nNiiponI}7sry0> z-=hqQlSo*kcBOE5)DWyPzgY7%=qLn?1uxDGb*O~_U9^1DDAv^MglXV=tZjLxLDA3X zQAx2D?4YGAo#V1&DJ1ruttt;49IYrF=a)?KH_AA#o#KwcCNumw7M1W25syv_oEs^^ zcnvaIXpyUhN+R)CyD59=syr^kTbDNMrB#(x2yn{Z&}Ilw;gSbA`NLwq4$YUP@V6>dtX_Jm+$&$&pR4Vya_w-nLT{LvPHS(8P6^+_{-}8V z?M)-J={6h#d{V^-V4Mi`ivT2gZsXs;&i#{l@)g2DS1{;mE$OxrIFzT`T2ML71p1Tc zzHo4Garl|+we|tMH-!!jN9wAQye z@JeGAy(-h7lZ2w6fm~%lWTKl_t)P9YZ(YU;f*a3 zGn)=!(}GBl4!^;^PF9}})RQ@a1lV}uJ0=U1)g<`W5U&hO3@X#K%UNOt=TAMwxchT^ z%x-OQhV6xvWS=~lk?p?&x}9>#Yf1dM>q5=M)Cjc+sVNN)k4>%+%iLroN$ATHdTS%H z=o?QIX^P_7%hq2HiBF4}2d(uKD1`RZ3-@x{|0Q5G1?TFx=~9=bBCPexLHFtP;Ffy@ zOVTxn#;wz*4$Gwu4*3qp6+b|IvhXz{_57-!IpFs#s1X+E067Qx*2(XF7ubEcyN09K zN&Rj(&bbQpJ@Q_2P6>v3bp+)8>{hx+E6uIu(sabB{@lE+yx1ZMbJ5>jol?7ea+1Yj zsTTr%8vdM` zztv;szo{B0$!T1_@~&#-C+16$5PrKOTYh)IVg&DZ%WAYN7=LQDGCLzUi z>?Y}IPXB!KyEDImI zg8v>F7M$;34gexEK;c zxE_R#902wP?Be@_qyb!h2q-xehK)V*5^1TZ0nDy2qOq5W&GbFSXeodtIWE5$d&(cJ z@WzpY<75C&pKkjc5^(v7yCg9m0B)dw5Wn0NPsfn#ZfH1pzDl#w755x8NTK%%@CT}( z(z)p@(bjW3DM8YF$CQ*8Is*aauFSp7Qu)hA-9|SZnOZ)!i8>yjSCf3T5e*f}05@(9 zl5Ya(>S&!gOsd!wzQSDC)McG~h3G17_H6|+{%2)|(=_djV}M5{T^5_DB)^2C3Fz#w z!o(89Ug#wQ@$4W**XnWdS)%KnYkHl+FgdM+AV9wuLI4eEg&q$w4V7RfC(;L zvcGWLOmBF|<4z*zFtVe99|E)(R|9w}WmXC5IRPmUczVRI@k@I=onX|yaY=Tc7t}4w zB6+Q$<>1Q59~=es74z|W5T_9V$!wvF4Fh}<;g_5S3!HNH#9tcKQ6kJhOQ(sgd=Xp? zNjiU-hkZ2OXn-sk?p(2ooRR(oT$nxuFE-;)vn8ZrYj^R_TcsX{V;@d5emV2Gw`OmsD(j1pGW*(+q>HJE-Ff~wjtS6Yc zo3v0<$6aY)oWLlrUt?x}R^nP=mxHU#t?8EHNZ#WN59$o5!{zo&=;*#mRAadkeMwEK zr_!NLNk`o@fieU~=ejoyH9cNEA0M&nhIV{jyC(Lv-cJ0w<%GM&L`!5d(Z0$p+!$d~ z$Z2`k4N^)@}1rAfQnL5?l) zhUC*H@}iV!+uvtIZ9NfXPdv}`eLm`xT_28x>bZMWAty%5H=`LI|4Hkqg5{g1<(SnJ2V zB~>`vVxw)`r58kvYreq9la70H%!S(Tdf%T>&dEq@i)*Di*q4Blds%@H-MHIOBbGt-CX!@nkX``aV-gq1k^ z9?L%qj_TmSAAg_%I(}TaGF4%&V2?B@qUjc5PwWrWA@{8B_5V({E4-(3Yc%8TR<_o4 zgsL60@)DvRlRfQ=@lA>|=x@y8$9D9L(6Z;6OR@2ZBRsz`rj6m#Umbt&XcfE_=oK3~ zy9adh<}XbSX=juq({zVe69*r|#;;D6c-;hzXc(<-aBW;C?GBi`7=(U3q>_U40^XZ; z?(4UD(AAUIW4~WnU^D0XG21_B`sn^==>6=rlda)PNup~s?3PaF)G`cf2OfH*>ijQ`;Js#m5j z`toJ`nMh0F6$Oo{#Z9i8&P#C*;? zJY{&-Di>pR6dqFzUj79vAzVgQR&3hH)z#H9(bqrswdeoi+5U=0;9&OLhd0eRvQ}@^ zd^Bmv$6*T;i)dJo*eju zIn+foUa^P&9gV=s$VY`hv)a|Jku@#el47QygJHeyX0gV@?A(~-bhG6Jj(lIqe)O;RV1>ErQ(ow;T2(9 zna24;S#1T;CBE~%R_Qv$WB;T*PVn)TpIjy=PEg-l9=vJV;Q5~QsKQO{?=DryITGV0^>cJc=0IgdCDF>5hH%2N6_`m&|6Y0j5579( zQmWjr(oHFsCDZWar+1f^rQvMLulfR%fE!T>(PY&CR$-otIkF^QD${4R92&65a$0?S z{X$I!ufvafX-2R^*czInN@jWK+oN-l<+GTCbN-eh?wRSlB6ZZr@*QFdIjoysXau=| z^2Lh#^tgJJx$>sE4CmFaF=4)hr6omjdZOz^mx)MH)n(03HCVO}3NE-lBhSWSqxi>7 z@!as*#LHmO7evE&Hgir^wqd62KC3n5$RKJ>6fV4$n~Dli5Mm40@eT`8?9y+GYbehF z9n05p?(M`9rw0cGmB90EFC-|BEE?IB{G_6y4tvB=m|$O1@iOk>#B|mn16JhtTKsY5 z$@z|qOSc9F27I{x_QO<2S(ip1k(QvZ*vo4gLRr*Z8OfuyDYz>m<^H2mq}RQ05GLiK zf!HdDt$8|`8ri+$x%~ab55~NQkuxviG{@+lw)VZ|bMc1is#H`bVUkH^+E}e4@f`RY z=I})_U)SRBUv8%dR7X^dqj&PcWj5-NTXW4!&V{nmboW239IF4v z)q97-wY~4d&Pk6JN%SB>5Tf^57`;U&dM6@EqJ>0>&gh2GyCDS8`;0!KGdiP; z_S+&kpYQw5b@^u|*?X_G*Luo*-_NsfY$?8x0Qd~P!KKCP5~b7M{G}eh&uHjhwgeRjq_V}2acOlx#aS5I%`>=0$hwSF+^u%&MGueHW)1mKqco0xY7j#Q8|4~+5I zOnllP#DbK$JtT-AUwLJKM&u|tMpet9&fbDlL2Rt$Ap7B1$V>D3?UkyXK2iYi0Y2%t z6RV*t=*k&mvj46atboAv4y}TNzq1)n;67sZ^>?&VYC8C~rGaOd<|k?^+a=1V2gf>V zY0=9WR0i>+`y_>PPVkH7_zs)0vX>`;MQ^U!T1b4OFfRC7I+#6jm)M$*p2fB;Hv+n?$6Xmt!T%lQ<};LhL|~ProlDk=*H7g z=Pkq7mU7ZM^;z0&?MlGhq#4_3M%e=esK5U|-Yrwb?hMIsKB2z_%YdbR3+SnWI(aDR zhYe;2yv{V+_b^*xXC+Ho!x_Wiq7Z!IH@M!#btTiOc8&JXY@8&q0#`iijUdGo=PX1+ z3hGs5lva`H&|6thgr?MLAhgg;nVvT#Tnmr=ktPd^_;%eCH)(Md@sqz>z>)wMF6}lW zfb@Iqb}~|?iY4`~Nz!0HU!FxBi|3E5pUE!Lt#+&P=(T~g6p!BgzlP#6%Iu$Zjv%lj z$EBjdquLG2kgW1r`5f!F;oN~CWH;C}$hh^>gc%Z|+`Gz7BbMQRvf6>^ec;TS3q!ma ziyr*-ZIJEj)rv|Nrr6{t#QRHL0uz)ZQ|Kz$3+N*&DCDvOB>j&@=e>5Pd}5#3If8AT z>e4CZl}vApL{vV=93d>C4um4XOtJK}q691XnlcoQli~&`+BXZh+gA>JY0pN!O$o@2 z(j4`BZw`&Jef`_6?jFO?*1tG;TZM*a1%4NY{)YT0pYqE-^DDrbM+RKG_3-0YzYpo0;UyedA zw8*!4&=Es|d{_iypryM-(WcOFd9;uyYE4V*tmfR`p;oSXa*cO_V;Dty!#bn zRcfCHDpAPeYw?U|2tIkpHW>J5503N}Ub+f^?k$ImIo8CnrEn#bIW zqdGUza_ljWqUozUG8CgJITPrmciG(sQwNN>-A3C}vc8UswOd^Y|_- zwoOH(z=Dm<4K|?f=4GrYzK0|PvLT^ywXx6i#1jum4?f?qQt)z_^su>c6>6_MU|eUA z3F`8e9{*hRUzUS2meA5_widF@Lph^HeRn84+Xd9LyBbm%wQ;?dfT9PrfM^zUs9f2(tv_Wx&apIzS?(F9>eY~O}iLAG*KMEoRjmkLDLZ5Nkm{;e|a zS3nkEn6I<)Hk_G1GM$*n@q3$y_mSDsl}0XQtGv)a=}qGv*K;Nt-_WA^NVBia83r|0 z%PuS1a~8$zffqow*~eb&2GRaA3GcX~{ebheeBEa}bAbcImF0#eCK6Nq&plHFou%X1 z^oQ2g3Vcw=qoW!I1_qby*_Tv;PFWd7nQ$w}zg*C=|9M{=FFQApj6;;SM$`H6JXqdB zL58AJRm_&@VmR6?BGw_|k*RqX_|!nZcWqhxJk;-ys@fW9R>n9vaL0eYa@o}je%$^p zDELRMbldS^0AI)=Z)>^f9VG@x5$P3XolZ#Me?HCZA@c(eyZ(KJM#Gks_CM{)*mK<8 zTa+18a!t%}49=@eW;T8pvus}4R&_l+&~wv@A+e&+BHxY<`N_*>z}GCWt2?P z6@6Hz04ak9EUdBNL#`(5dWKKm=DppQR|y*GucAL2ZS6-l9!?FUwETVHpK5Ff@V-es z$w0%J43LKYzD;36T<8kw;1S!W{C}ScsG03f%_nNSm@&mhoJ861sv}lPg9b8Kx~+U? z=TU=O@OkqIK9a3Yz6!b&4CdSXGFn-Sn(g5ukfzS)_MqK7>t83-1ok}_oj<1f-KhTR z@;5bm>oO>!rlb?0VFgM3>fgxK;vM$-Z|HX3YzbDp_KSyHyn5Q;6^Cku09u7Q#LDgD zR>1toxAY*>*D6}r^#VuRX(38-$0R0|pIIL*(P!})j=ItX6&4&Z&nj^99O38F5XSv^ z7#vi3yW=W)#=)uk2xu)EE!IZ8Kr(+KuE6hmB1O;HVVmGqF2$Gy)YhHCas;W>$?&yxE7DGp9@ZHh`l;oBv(SIAc9!tT#7#bV)}$XUSPu zn#lMnQlE_|_N$SzA5K*wU(Bi{fY5W;&i#yw=g11u?cr0b5UIe!sw}AweaGJBsF4JC z4v>D(EzY;!?f41Sb@cOJpzI&Pe*^}ibw=^y&Y=LeqJ<6J85dX%gOv&n4jDn1LO1qD zE;SSRn`E=jlWI7oNaV7O7Etw+FoOVidHT`(cTmly54 zrm;J39vda$QgY=i2+n%hu#9}fFu%3Bw+}glf)3Oh2hzc7LCmkF4k9OIpAR8UEvKq> zq*k_DXsZy#)l_+9;UOyeYd+tvZi$2QiryV4hyYsrR^pjH{a+^kuJ>1!=rDFin6UE$ zZ&cBJp@mx%Q*pw6eyUBU`5j-oyr!SMO;YY**CH>Uc~_%HtMQXFW`5xu_a;HmMjb!( zl2Xda<<`EhAy)+1$MAhPC z&*X!a;;OR(^pRbDUV9jCO0eB=j77&6*LW(&fHt7CtIGfUT~a_Uxe`c0?1#BLbo=ke=<~Fvk+ita$@z9P(7ufCLZxUR zV=LKUpy`-Kg$b~4>^8z>aG3gDA#tl6Jm09K$jGzibHR1oAkU77de`B;?ch&W*9v4C za+(FYjsI>R`TMIEhaYZi^4xD2lGoIzCl`hHinHpG@Qrji;r7c^QwS_n4Mu4;3HYvv z7siU!18`#q0bwv{P>>?k*nKg>*L1FVm3^URBSpgELZ%`_qm`4n%ra!1y=l1LO*JmYiDd?X5#G9aORf=vx-eAsR->cV9{1Jz!V4W(Mp4#i4Q>09{=E?no~Z63 z`J7iW3D(g`A|`;scLcd~!6*lJ>VHYeOYNfpj}1zv?XNlr=h0W?Wmr!@I(wi$%tdz@ z(R?9Hbd0z&eZjw(TwK2%O6fGir~1R}`Owi1%SmzX0RXE1tcd1DG-r@7U*Uzne@REePbR3mmX+#TW#U2z7AN~gfg}Hhoa6U}4L$1C zjtqo3tM3anqzsXh+tDbkjNYnR(?U6YfnUUla*YuE9^^Y(5(`nn<=2v%r3i@s5-v@|!cnP=IO5{}D6-JT?efgU^01Ym9X8c;&vbo@5o~O2pvR`Z- zlH)z%VjvZwms6Hy>FIwY4l^4W_1+R2I;XjM_~7QptV{nFz)B}n zVy>aynb$r?)MkZq1?Ka81_=vxWBo!D{=3(^ml~|e5Tc?F$cp=F^m0Lpk8P0Mw&8bF zYOLNbPxAd69}Ht>t|m0M$0RiUnwZ8Otg?75%}GpK$z&3pZL`3vn5HbZ_VeJ5b0&+o zP3h~YZOt`~pJUHiO<8X+Wr$wv&QN0J{dbJo-kQ>z?%Caf(tp8y-re|IRQY0a7=^Hi zTwj0X_-FGPsRIq=Uu=!q@aSZpBkf8{|HALUjO_<wvcG8Q=k*4JR61-7nCsCMTPjh9<^{_Q}kSBO3IW}vlc2&YHRARAC#$%?O}rBX;M zL)0;NvQ;nVkL|VCpWRz7RIWDlkpdVEbfu|DR>q=1lPUP(Oqd;n=N@i$O_vI^2Eb4h z;9Ej0a{`I-?L1A1#gE8uf8yc_s?I=$WNS#9?pPk;{!PJ?Zm&||c=cDft&+FjHTb(! zfb2hsQ(Aog)0r10ph&Yl)$YSfBIZf!{hST_OV>j_6K=m{3x zT;2a-#U!0qo-C~%DIujVBkt6pdUonQ4MkGV)^X{Eg6G zCA4yvgn_Pm|A$Bx5nYFHW@;&#kJ9_o$W41s-GS7LGdR#BjI@uf5W{zUbmFDYk&TKw z-006BOFCa+fsSlRQFf*OI+3w>{P(oLsn62lZXGWHx(4oQU2Ni`eZ>!SeBgh6wv9B@ z06^>$`o<&ZPP*%sS0>G9tip@t$6gV%l1oJLx7_0LrX?B2 zRR`tOmF-pc1r~Tz{msq87Q2J+b;#vXd>#s*C)*|CyAYE-Rd}ELMlLE()~h(BkpQnZ zG5$;r>5`=={&teK@vWySuFYzD*JB8cPn&T>JA_I{Jh9r_N#i89V}EE$ZS^)?E!Qqo zlQu??xgv;3^ib@4?9N$_jl+skz~5#6#gY|s4QIl*go$`bQ}PJmx#& zMImW%Po(j5g)^5+183}aNly_)d2DK=@@Vf|;OfiMVckkXt12H%--q$Ft2A*AdQolN%LAX6Xb)X>B* zo_>Le7m>oAkEXEoa2&{tMm^#8>+!ZY@lb2p#wPJ{$K(kY*@XL@j&|%91eS|3ky{_n zBprlAF;xV~EwkxwSbqC+H=}YOVS2HpdFN4q#sBBErazt&@Y%LdqY8X)@X}0oPrG+bP9EX4O@atvzR6;tl0@tV({nRXJHq^?(lR?%-SNH(Mn+ zAp|}S_vE4&L`SZVz#2E)rX7vEY2-tlsyDw4x^Pho^?l{}z*OO%*8o3TF!amTZTl1F zzf*d4iIk0fNCsUC`6>J7TVTK(Q*%UG;_m&t*rHphh&`=1=Js2Y2X3OF1of~?_ z|Ghenw++G9LYQfK?4HWz%;_vJL!o9*QA;M0;hY)$H!o&c2v~reF5&@cD~KeHe<+z^ zuRbs_OI=d@-;kTNsA2%3q=ZdF_zmi13co?w*or#$igIM;5*4M8lC~Tiev7@;s}|=c z;u?VUS{Of-LCA9*AKar2*)C*H1{jb&xB9$4k5cjs&z=?MT|{>UnSj*dWh1kz&pUi0 zdXm8t19uH?Mj43?%Ayd=5vZ#YS-HC>-t7q=h529|MP00my@LqyNCZY?y=V2m{9ypz z^+E=f`&u>&%^Kaj4gZ_f=JY*Vw5tT*qXd*b>>n{N1%C(*p7ON>9X^^Fte#Et`*2uM zNb`z1?hN3BDjd8NZ&|eS#)cE|@uW-74;IYdm4SYSkRT3T#+>&u(yYDB|MR3qdjOsG zVoT6op&V0aLjHFEI5In#x)>ON6ND82(0odfM7HiNU`D zfY`SweV1a!&eXB(iAcfKf9H#GOAYA9`rQJ;f@UK*fGYGgerf2l1f>>pm1Ea47@3ln zSI7@n(}AJ>ZUC=#n+j#7tclKwfhcu4m0fT>^#`-?eCj77 zEpDGc3bYnl*mjFF8!tv9$-(^L-xbMhky{COsi+(DuJ3B5T%Me< z;k;A&>8P8?%JqTxs($|WP+xcVSnAQ#Y$aS`>by;HY*bcG5ng9LCqk&D8Qnj7tc;|A z#Y#o|Vn=2_!+omYny$34!A#I-I0=Y*lreA6-W4g8`Iq7(GXtzi8b)Yp9!OYx<92*} z500$Pq8L@yR&jCLuu&YtbevSlKs3Wg_ zMo6%5^p$q|p|CE%IeD%3w3MB14#w}((D(p$zSLnQ?X2?*JIyYVGrw#Tf-g5TB@ZS{ zX@DADth?`j@jp%nWEA^316Pj&Nz6({*3S4PO^XYZXXvMYA-O&Si zB;At32C|C}9>4$NDw`Q3ON8IeIJYoArp%wsp1c2No&ch#y~4s;YOCom!EgS29^C7Z zO{Y}+OFv^z^#864m$cs)*cKdu@?9>PFvvExW-&-C9Lh*zJtneVHe3E9CFQ_dyFoxq z>}7|$z4nw7AaKC|@3e62nbK5OFsfMPaWUl@{-z^FsKQl`8$yYO*H(gZm*O#)c(%0w zi@ud_xLU$xQ-A|qH9T#L$4qWDsGsC?EC@*#n=X97xJ}U86tS`A?yqMT%1h~Opty2Y zqdjiPU)7f3@U)i~A@5YiRF?w-VmHuvPGw1NirfE>F8QX5w%FuuC&L5zqg%_uJl}; zy_>vBY8pG0F})Upznw1Z_F+-cQIFWssb}M1U!dr-zo3Av#_xn|*y!zR9Z~bv2-ZJu z0hn~7+f-v*1&56Xd4A}C+}PbjTq-9eb65o_i8{6!C7MatI1jQT|LB?(erYFT`EBuS zn(FK2O-+8kuy+y6b38}uKB3_NocAtbYiKBuTG;zlzX5ne>PbPgr4;{0A_Bh3`Hgm+ zNi7C&Qmaj(rww8!BUY^0WxTn*n2Zgqp`oY7tvBu78g+f(UNg-4xTmP6pMgqp((sq% zC0AG0;aFDy5t}*DGGrxxqgdRi^TY9B=dF9M^kgDAKh;Ju+#fXY=Anb^NNET&NPi8l z142g{gNAfccJ`p+EhXgEAj_cxJHq`4KI$A_Y%Q2F8k9Y15|Zv9P$lC|J;v5yY1yxH zwfgqsx2nZpla}Jj{{R{p^!Md{Ex>uIW-vu+3?2?^aOLk?fb@(O&7lK^Cuy|u1wawu_2nwFTZZ> z2>>H3*Fv&l?s21*S_Yxalftq?50+tW^$CA=K}iN6drvd7gAw3t zQN!+h=p(vbfNUk{zd;<)=KPC`lthcSIuV4}!<;U@0=TJXDO(FpPl$#iKW5Fm-LSW# zQu;{-$v1=65agFZ+Pl!MtM7mt1$Bo3%8K`?LQU!Oq)V}O96Xza{e}ZtX-$viy!Pwge0)32ir`L|5EEqn5XIn#eD0SB+1(oJzKcT)_C^4}isYES2t2=TF5`0#-M$YXSV z2MS3J^*VH=2h_xZoLb{#9LRlGMe<^0y>3it74phJao9ewb8(8>uy}tB$E7p>*c{+> z;gH-uq{{mppTZ{Jh3v>;|7WqI--3umU3Q&h;wSH<`81T#^*QNE&)2<~{J?OZ6EF?v z&b>Xah5gvMnnD|^jTDMu8-o6|9DUsi8z&82nRzZ+yD|h7-8KlyULRf&E>{r z5h^c%K5*_9@&mSdY7+9)ni(l*WMH3)@JH(CEMCBNpCmRnwa$&4ep2p*@$hUwESB~M zLkvGxP-|!u55)D6t#0sdAb%A-pa_!@)-t-B8sfYDd)kZGSJh?2-)|caKcr46JKp)O z;}mcQ-TG-SkH>{pput6ZGYIB`6Ij{yIiN*F-0)^U*b~kgi5u7Sx~jyL7@@A`W(1!V zQcul=HF>BqG&Mk%Npo3ktJ6@Nenw|OY2TfQ?fnrG%R%PF4y{;Jbem%Um5$g3*6F?Z zGhyoQrX&e{oYH%RR?}7P(lF67^{{*(RvGT{fQ#5Ho>U1q9CJPwEN*kG)dIpW5Lc=X zYOPCNzNldIOun}%r?5wWBn$vy5{k$9GPZ>36y`PmeTCx^f|R|hLadDIstVc3oAE`z z^gTkW@Qd^lHk_73+9u@*_9hiiLT;rA!wU1)UhYM>(3syvdW|+o3P8_PsS?PMlMWNP zQKA#$-m{~4HXILvTiJaNi?E$he%U59o&HvMH$#sRARqhTRM=Pcw$8Rcq3FA~fiJj# ziv|~tJL)F?muutT1ozr`=V0dDsfR932>Z2M=v7%=e)5CLq>b$)qS6dU*nM|7&(?J9 z)t;dcdHgv&PYMf>co7_F1aX604BoLH-%qujj;8((<5p;fa zvGfHsq{ck?1!E_r6{n8H?zR=o)309Ik6*hcZAYOtUoqAf_TS{&7X+l8S@J&-R0=;E z`{m-O{SzDRL?zE+O_WwvVYwVLKmkILS8=}%M|%6Vg8cbkCVw%^xYFO9#sna43if! z96b`W=J~fXnG8el@5C|{yZORDeg63AUc_AJgfYW+^P64TVr(^$Lzu6@{<1?sZJK}; zT>-2GuyVLk4IgQp+L_%IM+6cE&IdNJbo&Q>W8N!{GK~8AJgsV7rt`pOdBj}bK-tRe zi4K<^khJZ~iX5g4a<*-~isWoNEH-`UOjR2hjTUeUSN+i$5w#FP+)fHW$kCk4Y`GjF z)Q%4wljq#EJIHz?8HvMcjt8QL<~62*n;muax-D7Jv3{fk$-+Ua-BI4!OyOO9DRW%A z^P(t{KqDVYJj*-cDhlnftW68tsypPkJ#@s##du%8yXMb*f{`#>=3|kEODqJqUttZ3 zi;Jq``$3L{t59Wj=*rwsVj0}94i%WeJ@rYF7zjv3K7VhUetUZQUd;Qn;(QO8Hh4Jg z^a6EJ7ARWpaNX9;uhLKo|J2VKb3U#g^2Qa=Q2AdReM}rCCqGkx2|$4zQ@F_`Gp~BK z1?b59{V(Xi^5$S>E&J@8g?Q(pX^x&l(2Q5B*rhY!SNVY2vja{&d7UGBUe7VWx)WL$ zIuG?n5pcBsT)H?nyr==C)*klDU~*XP^!Wc+UZV#P5b2ApZ`kiIjWxfv>V5yqYXZOb z*6lTitEIvS{pW+aF3AZ{%q3O3ee*|Dvd%!$+5Q7;EdO+Ro^I1j)zrt@iInNscy>#q zS>`I{DjLJ9mlY0>G%ycQ#2LV|De7Y_UOPJWq!Q-`=!|Y~x0ly`k6x#XIvr=D6N>Mw zJe~%znLqS825_x&Lj{p(h6AV8@>-yw+DPh%LCfcvlHBt`Wp)zWkjkxp{Lu?f&(JD< z3aihNRet%TeDrdAg{zUU>P&9u*oHeB&qo$6bkgGZ%wH0w)j>m4Drd$qqB3qP&2gN) zkx?s&b`OL&xDA}K+_>GB9+ZtWm!5@cLZ}$PLwWAoGe?IL>K6xjo}GFQydrL%6Pwv9EX4G^!!%Qm!|0 zS_TN;n#>hLsw2@S4x)Kyy4EzfwgmCdfZHl_%&`t*-b&<5UzsE5`3&nt4C_vEI( zCh2F#|9NM!`nzXf#}K%%Vi`R(Iy$h|!inx@HmZ4^9k)Lho9#ymdD?GSfCrqe?b6{S zR49J7RE|xOF&MenQ`sEq`v|Bc?u>0W@WG9$y~Nu%rBUwP{{UjnVHp}fgZMZkbI!Oh zZ;ty^gCStMhPRy#m7~@S8+Sl}f`EM`Lp>hMtjqm`RfJHKNv~stakCD@zmbff@jqK7 zc+Tlg9h*k9F{1Tv1a_9ve((ab8p0AxdE=Uz()o=4q9=APv9Q(_G)!bK@Mk%{C1#s7R8KurP$XpXUh&EMdi4^Qhwa( zg=GOsh1|=(r*dC9^SDxBP=}p9>NR5+_$y%AFeM$x!+7 zem3*Gv`KLpT$5|f+L%|}T?asvh@%$ZTR-TE+vimQB-(^X3$XIQwxXI5Qe7X}Z<$Lx zpR6{_t8378a~O=adjObVAjkV8(^3v1>R4F&aW}2d=SHkUcBsw53CMIO&rLb}Co}1U z+6ojYFH~QW>8$1KsvjSxuGzgk6#MjSHP!-?i}C@f!6iXo3f-L*H`mD`Ub$EaW~Y3S zYg`QW6>6#}0F2uCNJa)sx}n6xdDK5TJO+&RN8bF8b8C!-b)Rm)2H zyQ*3p5!JnxX97afPE@&>z1BrWaVk{)U|ZY_Ac(To4+)4;w*}k`(Z1! z3R)N>%CmG$Y)U9@px$0EeMid07nRUld}p(~c{>KJ^R|Zb>4|QDC%2BF|JlzlK>FB_ zBW(iKR*L^h`S~_*W};EKw!Ar})gSvxZV0gRm?iY#8u3Zh0q?yBV|k~1=iAoR;yGID zlLhfxDT85Jar;N(g5+NcQI{bpK^571Vd2yf@2y$`#^t#EpWj%pKjw2Q;(Dtz`;xoL zD%45&Z-;Z%ooo{{uFblMy8bFSKi_#E;! zY=*28XaKWkM|G*+u&ebKD{!|{a<5FjTI)C^L|4h0j|V+8=6~554qqLB%}N&PsQB-# ztae2&W=M8vb|L`6a~B{yC!=#Q0Vk0`k+7v~<^W0JS9f(5iZ2hFzx*^W4J$J`TqiIt z*8vjZk>eYz&>R3ELyV4A09j0k1p|+8^azl^D0JI&nfaRg8eT@3NG?Fft@L!BOFEI_ z zeIl(ie<626K59o|S1L~g^JX2z$5%Yu6iE1LsYyxOXr&_CQL|&k*FH<2v_kuN=AefL zY^qx6OJ_n^0GFP&-sqR*Wmgk>x!>tw3~nBi$7ICGWvPG$Vi*YVA~}JzZF9H~Y5?H9 z<@0ivRX*{lqPZ+A&wyP>E#z6dVLB-LQ#Qfyn1-;L$Iuf;lEX5y{4UGttCOrLzGKZf z7u$21T${-W1=?Sf*${>Hc$MhxT8;p#J!2X--?wP0Vo)DxRg;3e8Ktl!)YjZmeimh1 z8PgXu&O^a>(6ZQ8H|%qs&y5ee5YC0I0Dn9pzU)wcMqP0g&|ZeP1t|z4W9VD3bc$OT zajTj*PG8%P)nK-MZaiqzj&}Pz!il3qFg-d)ThM zW!LG*XJ<+L)0ZGnQ`mJti5ciBVeFoo=t)qs<`gmb9XKl@fVK!QaXF2{re59nC-jS( zd#u@9ZB4B-Vy-$g$4lI?i>hNOjc=p*uEpOBLJQOZy7poerzY3bp4~=shQxaXd?I(# zbvit_@PPxNs9^+W?PG_edKg=|UzF&I#KfRpyQPrOEI<19Is(w0WPDgaC$_&_WE|cn`1N1Aj<5?m<$(AS?r5jI@|m77>*=aRn$RUJ-QTbjsu&m%0n{pm z5U{(IgCKM6K)jOSdUbJI`b)fZClQ@Yy^>?WZZLW&$-RVZ%%5fh3vE+IKBo7Tt3Xg+ z%U5Cl@^h@9?cT2yq;1;g=xB!`pCnwf9q8zIA&c+Ez&T*?;I6xBT1Zv)&rfKf*k##d za}TNr)Y6q`1%8suXioa`3D_NRe&)>X$mD2Aobxm;dX|z)uKDIRi;AP3*g_tHXR*5g zN}2CxPa-7G>XJ3~vCrtA3X6fjU|@WF)i00A5F1UCbxje(8BI?oHQ?vi$&-az&$D4d z6m0UK&k}99$Q=MAE}c?}fcH@PVJiPaFG7lrBI4E8&^oT;&s!3+{O=yYBE(;Zob7C*iTB2=Y#-TtU1AgpJ%^RB9k1^7{wuQzC0QA;TFN7KZ&(e zxD$rX{-=8Y`*SHwU(pAnKSat4ZoqGI@ceh$67akwI^?GNo4SWgi^teg?qx~fz;w_D z0v(ob+XER*-s|B&<<-S$Um$)E0kH4#xpFEpoR4GLO;sa0Ag=LOxn0Si6hJS|?A^+c zRI-(ZS6-&(Q7I|I^A^kdgzOq_c!&arw7Q{EbK|2Mt-bc3O#}DFx>z~iG*(o0X(lu?ks2*kf4Oc_2+e_tWsMCQbFLJWqNao1X3lbTSrpw) zW2QvP%9MSdw_4j20VvhQ-_4RMMX8iDGI{7 z&;_^plB@lJQxX7dJ|oN5o>%nk<+n!MfNuYmAzTFrISf5_)GpNzB?Gjk4=0G4WoX3m zqw@-ByxPta(aILSDUEd?&W156vj|XcJ;MXu_$$<=M70(lkZMhX^H)Oetdv!T=UxhX z{r7%`)s<*_vfrcph<{r6B3W+hVQVmSVX~|_IVLRgj&E&hi>X8yHD2^H!ui#uI1ujC z9k+z#AyGn(@(t@t(St8tTYgBy#DEyliSJIU+?TOU|XiDGWJ88h_ zr8atK8hY=%Ki|z*KGA>V@-ddRwuusbLh327Zker}LTlPOFXaF9rw~ho9;hJEfo(<>S~5Q-UUpj$T_OC|SMJSVVtnDS}lY z))4-2H=f=g9kFkIZAql$iKC8-Yd1G#=~Y^_Em)e}s=xph>#L@{)|!AxOfjK)J#JBF z?-@1>x{04KPzUJ>nrwCof8UXl5@;9pT~;|)Ihg@zcvQ`k+=H;|81s>Ry;}R4I(TV; z6ghUkTfDiVb}GSkhimwvq=Yuq!7}~b*+ekynD>}j@ULXs>hS5YcD4nNOd2nj2EJjd z((`K7q;9g;wM~u+O<|)1hjvm&C|yxgg^TxjJ-xh6yD1mmZf6dVmv`jPj!YT%cRG}1 zyMipH8f?Ps$2f}`hariI?L0udS5=yoSvQGjcue-FdTX-a;wZN3R}eN(LNqX`iI(Dke=2v`1lySs5pw^JcWi!h$z}wo6=LgC z+T(t|^7Qh|!BDsL34N{}CiPvh`w<6avX?)USOMllWy)2y>F}vxRB{T8jD|!gODIF~ z;sPd;3pmjv7>c~EFTVH#|Ab9e#NPjj@c=<#r*8Y6uy&06pti=zK^)@bgu!4}j3msp z5^p7HFRT!|zB3Hyw7R|KrjpBM9=mVUk>6^zue~M5Pw9M z{1drdVgbf$53VEabyryHUEP&mQ!D;EC{7;Od7Kk<(P{n$#KW)XPh20 zZeXIml!J@itH9%LmPf*+&n{-T5+>C#5@{C_8sbjT^1Q>`*BY(-g@;1gP1RZ^4_jE&G$Y^Lnf*%v^qH+ki!yw zmomLm26S=F18@I$ZX+Z57`i%4tb5$0_&rVjw|h@g`QiAx?#>;4R1s-d1LlN_xcrnY zlin>_NZ3}+_gfBAVIgvLMe$Cuw5KHC5+7iE>lWDR;qv8+Nj#VRBpe9Qxc z27sPqD3u)rZqwHS+#HGeB{}5k%3B0Aq);Yye56F5S=I#{r;?U*2&AU2r>7DjRp~r9 zufT+~s%;5F&#YO?2bNtsox0z!?AMdUC=LG{k_?(=$0yJs^vZ9j2IJ}GM-!`3AE>Go z$IQlKcr|fLIlhZht+P)R_ZueUh-lE0X_Wh^@S{FLt=H)&r*386Rq}<}O8$ch3i=R? zA8V)nbMkI)1y2F_&5?cvg`5SM#k^0EzgP^4Z#`JN_6~6p1ruSy2$2%#x^Hi5_-yKq zx%00lATYu?F6d42jpRpz3P92*Oej*&rjc_Tgdan(?9j<%N+EPRNbWS|T_%H=4YRNT z;_Jh#fdH4@Z~qQohc#uh7ZlakeoiY{O}Zm&B}A5OyW64#3vO!EeCAAtj&TZj&4>E- zW;#l>a{l4{C!@voH+x)cJI}WE8X51pHYlao=(8y5Lioc!ZPb@Zb#qF%wrzz; zW?)fI@fAmVK177K-{cl()O4w#rvmF0hOMspZT#vIavZnKt7)7H;I$uLOy#aP9>~yp z{NgQzAs?v_&x^DyFo2$P^!2TlmOEns11aCWks!NvI#*ynEqW;p()G^msPJ}@f?U6! z5p_vzP0?;fJyXkF516EhX<`3{i7O@g@r2eLnwUpFe!0FEHzkFcuZ7TeF2EHQ;LcDj ze=@o@lhPQui8RhFFj>CGYfNm!yWc>P1BQF6VFiuc^Op9-9TIsz40Tqo8+x97o7|>%#fF8 z)eo4yyXr?5!KFl1+44EFFRbVIHCy+pSX^o|48dzF&5<+pa0K9`?|*@nkG53r-vQ(2P-iliY;JX*Eu=@J+JF5<_3A<`P|v@B1$D)=m+ zm6}4-d(yql`&tQNT#DPjRFvfJ;d&j70E@8fjcaA zcU8x1oz$iwy7u-ibPVC_=6mxZC@n-auuozS8s>IMLBn80{GXhbK{^+zyw3~zZe%V{ zP~K*|dvZJ8P=SI(u;CBqZ(ZgDJ>|%Y7SOhG2C$Y?B+Vohg#e&;S0hZVheM}6 zqYlN7QPiUr=WSH@qkyXD_ow;IoH zftQ-TveZ#_e}F4(pR`9r2r~jSV~T-xk+waYr8BWe(_NX~YT>HW2_I{9#K{w#{w*W9 z;7L;p*owF>=9#FcJskCFoafZ%v*OEbSI#Vk52XM{7pxFdG>y?8BWr4}kzR6;8mj`1 zXVAI*Le$_)P6DKADn6=XQMkT2lm*L>R#@7KtNa(Cm0)U#a#P}e{1bE?KQwP7&I<{cs#{vJf?x|~Dr-yzQwo^Nc~N{&uw$Js;$UB0wNYko(|Z2Ie{8G!Wo^vs zX@IdS`Iz8yyM{A{b&9cP8rC&^Vl&&F@$8sLOwP#c7&Q>3*DfD^;)% z_|x^d=sKI+7e#IiN^#`?jfkouo-S;+k=t!g+ZxxmLt(a@&ztt@CWZCI-(*z5CCc@znuqBYt=J;VL&$u9oYBw9`n&U03%?vyMl6> zrSCbmrUf`;?QMsPGfjsb_5-~N#Ib9RzH1-V&)B+xLAQykzaIu6gWl4X&tzC|;B|a` zp1a?`MuU`%KMt^$H1DpX&?ju>jBTaR5c4hRDXP%UzkQP{+a2s{(;Y`y7Wer@saGR>=rEATaoP*=Lr0bTLL?j{~aZN&AVt8!XTcx?VIP3*7}uz`}mu}zo5XwS3Ov@Plqqwqn{O{ERA`Kme~yLtdk1u zTCqn%#^^(1r(Xl}AM?V)0d^~9aELA4+B~X)!ih4u`>-e#Uh~>J_>>cOCdrD4q+FiK z^O&cNj%dt|_+@fi!&LJ`7;Z6%6i|EFhiz9N#YDujk)Wa&GllBx)T%d925q9&M|v#Z zi#ZKEgej-IE-G#HDbOsGv624ehmt07o~*xBf9Yk_fi^>@DwLt*03bu@G><`U`72*6 z-ZDf?|E}Hz2HpG_s26A+m^J=C&`F;C8=hjqsd!wfL_-$z=VE{%kFCfxJUWrtKdWB9 zqi;wzO|qUS%hyPCWj!fhY3IsD6d-G-O1eGZSw^9tn{&>0`T z4u+&o(YG==jYSFrC=hIw=QFO?Xu z+SspbtgzKJu(Vab)|9J@lb;TGfBZ76!wo3gSUoyEPV82crUPuBM5O6M*GE9vIZ(i3 z4Xn4xjxUMiO0o!RbMtHgmB#+&H|F80Pu7@;Tq~cxjhylIkZJAKRym4!;KN7B-+x(-^^(<%@^-Z2G8>7 zppf~;iv^Qg6p%dh!$9$@7q^R8sni4;w&8B;Qf)_N+aE!#Tz7$TAHnvY{V1*NqS&ov z!xNBSQg_EJbMi@$Q4$7V2;V#&$lexO6Pgf89H`p)kQu3tSFV%B-W1+knO-y1Y*D<= zO>?h^NT-T`N{N)Tl%yctp`suu0;05_ba$hqba!*4OS<9P7wG6b&*SsXyVke9zrK6T z=<%F8uGrV!zx~_$+V>=3j;BOT?Uo0dw|f<1L>PW`2Sg)o&*4#~-?(AJ5FEuh< zc_)X8{WakU8vE|p)O8K5h(JtY_TDIgB;=Q{;E!5-uP-E3LMDa9AMu?t+LhpBq8d-C z`FH%{y41mRWT>pekh~1V_PX03E$!q{%J|zB?Oz7W2lnEI4G+r+bIL!{&iZBj_Uvki zhZ>7dtCnYI8$k$11WE$ASG$;Z)9!&=@O^nd^K``~yvXaEotH@P+isa;;9TYBGR^#i z6Dr*_u4P4)HCb|PB*?%2&Cc(kEE`}5xnMdi_E*_Symi)qu zQ)EqSRSRS31+uSFT>4D6lnch!3tkzMEvqQXH|NB~ftxkT*@C;76|jure~=USBIg3| z(|5J7u^g36G+fCZb7`Kzd!{;hjoI%L%j7#;&BrXCjva|m!+KI_(MLiaEqPB(=X3eG zg;Uji52zwzLQ5@2s{{xyQQdX$gr{6TZ^f6wxYfdREo&*voHp;9@MAAi3hBf?FB4QL zPT_R>zD{Eo_jnmHf9z%PYk#lqOYs?y8J_wo+*9KHC2j3Hz6Vcs4yDn_=+ygrLkci9 z1)p95x1Qgetz6&{^JQC=LO>USJutc6V9nehvz~sP3KkFtM>teR+oHcMz6lli%qku~TyHky zj$06pYo*fvdXA9wZH%QlO$g7Wi?v4&_td8QS+IK0Z57^C^rF5LCr>D0aP_M^$|V%p!^yXWrNC#a-_WYUMKmT-7EiKdY>8sSj6;xb+#Iw5Cm;9?QX-KC-02OGxN{p zg`eG7+dM{+KdQ)Cs4;u$xt}Q2usuQIgVZ%G;RL1_mf#?j#=D$xm5q3s38uYMzMcl5 zm$p5DluH8Y!FC;D0Zauuus=0240M#o-$Mcg={OI}fN&U^A_d#m6&s4sLZ4Ur{ zw<;haeOoACgx@(<=e^ZJ-HRJn1on_`$0Nmx^pgz(YY7(gY&YXG)=N)NE!n78=936B z9Ga%SEXZQ0ykuSf{Mn&r2X=f#RUWzuR;lQ%J$vSBK_geEUC=x`aQSEw?3G&Sx4r{X zXSZG{iEXCFLGCp%0x7Cmx>8(JLXbW!=dd(Q&_yFuV`jEdn2@^?5DZ%a!V@&#Sv~E@ z;hwlst_^$UK!?-ShA!Cw08`G-j~1#|Y%G?C6h!I-jnp#F#Vy;4(gj*-0VN?W%2yBu z1UwI!?1kC!us_6AND-Z+3aQL*6cTqfqV`jsV1!(a#Pm0@WcLY;f*NoZ)f3}dMPyFA zcmorfp8UE^mog5Jrn&gY_58!1_RHi9JWk?vcsD*BSab03rKmJlA<~Bw!n%;{_co3e zG!bN@Kq)YZ8nZ9M`*F?>d%f^us9do7kd|}W$qiuiJz=)I>R8ttgRtfa+lK4)MbxTq zdQ2>%hP=Pl`}yka4|30aeA;6kr^hceipe*E@R8(lpOY;Cwz;x09bv>Vu|f98)q+g; z`p=C+PJO>u5Gccjuzehlt&Uvd6|*l18nAtAhTrZ(ad6R%QSe;Kr&*czLtQT&2CFW1 zU8kCd9a8QRPTPmD!V)JfL?&hXmk;xp%*Aexctb+>vu86&>E|sI)E;en#AL4o=27Uk z^PB1qZ{1aIA5Hr6+~Is`-1D*cLv7elb>{WmPS-(ye@O^W+U@KVh{=dFb7d0#dGVCJ ztc8JtuY*7$K+u$t1Fm=NHv^PLExBVYTl@ld#AZ1o3CZOx$&5$^f_k!DFkkF*J=Gu| z)KUtsow~33&)sB)fQvI2!$Q-P?E}(`H4cVvHhF8-?W?O@44zyLzW2V%qKosCnpJ{r z=mAbm*ei{;e|_1!TgI*~vR$ab%UbPpPtTH5S?@2zEF5xECQ!ITPY;NZngC{?W z|CkeOW&(zRyw#BBYx-SflXv`yhxB4)mEMV3t&u{}!L~+AHv}EwNM^i2Dx{ON=GnwV zS99fN6C~Af*vl8Kl!+b09=?vJdtmwKBfhl@!#JkT<)*S5oh(%4ragn{7N5(}$3MNM zAn~N<{h3MZMO?2lqr?&2%Lc6|dlFMOt~Tm3n{QP#BGY6&SQY+SQcm=O`YYpLNina| z3_I?3mZb^z`fBqs$6ynMH0R{_=1_oG9YZDU@9G^rqs`~hF4yA22??7C>i>Aqn ztG$wAA1%(3w{GgvD?+VT>D1eIo){*)mh5!()DcqiZkW6mXTMmdUN)VV9(0#8P(g)p zs`N{y4ik|iQ`c5xcgUE zdGcbK8NPKVi&A|VY7Z2iOmJYMAyQz~F9>?&J8!GWT|^l2Wn(Qgr}X05vstgTI^iSD z8g{e{I~%Z7D*6zO`DNulQzSByNvI);cpjhn`J~gc(UZ!;A5=0FD;h1&#zqGonR-j~ zf_74&HsQVNH7Ql0Glb+sJ$%oO#Z>7CKbOK`P-wq-DO}0t91lknBz!igloLXwu^1K0 zEjr}i?#z4DPk3LET~~@9vY7kW{q=PC$AbESmG0!(#`4#8N%cFDyk@FzKg}-Vu!(U+ z4jJZKpGePqojIahZMj56j8#I)e*P9-#YF{~3z>xiOQBkx`bcIoY!n~ozmwOpDX_VA zH4AD58#apDFVdbW5$)vA_sxqMeXiIZl9wJ|q9Bwf|Lr~RScEd%g9q8&9nFHn}H{3&`j54PhJWieW=kp>VT@ zV{3WK%iOlJO@8fNFfVIP5Dh$?neuCHm+7aucg_9>_YHHE#1p@j`f#Eq*|=(dE->0s6pF`riwAcLVYIxgNYBl~rue4CG+JGUOt2B23t)|NOuy}c{{Xf6#W z7QeBy_zd!;KDY%>61Z1HN2hyazB^uBWNtRYVgHRz-H((qB#LCSe zofYCNuo;C&{iKlxSObspv#=>5Jf}51q$oYQx?0aQ*As|sU053n{sd;m_O}?@TbJjY z5<`RA0G4|i1h!b7CARJX`9t0Fydcjx1GF{7M7y{|M-8xU=b&9QmL<*N=hdVu9#V+6 z1@|*XPGlsKz!m!YhbII$1TnqiciIWhwF7k)8NmK}EWc7?sUBJ020Dd`PcBsVqaZr6fdo$R0QXI{*o4j&M$8{oMf^F>+b%*$x*u;z}YKk)YnRX=s% zCdtl{m%tD4yWwzjLp>L-!n?&fTT~&sp z;9J$#RNp3k$6v@|JmG|ME4H(GJn_Z0MdlvQ+h8chU;jzA&$+T?JU>MRwxrNABvB7r z&D(*M1lzp6DXF#CN>Haq?Ki#3nV`>mlhWc!RoB%wB0}ww8+LIe!c_91H0?UCsmH^Z z;*Y9c6O62ywAN%i#UP)ic|LJA$G}V3MgwvrO-3&{GY&43yPGv zpl8xtM4Bwi!a9EZTVY&jD& z`e(de@~yHAP!QFzXHhzOeD-9v=i1*<~9V-0u z`AI)~=R)HKrMj0?I08Kfs#GHOMZQ~eR=hOC%KfmT@6Fi;!uLgs9`jEwh2^|C z6VWp}F;BBxQAbCES>u;u_@24ayGxPd5zQYq*tJNze~dZ%Prq#|vTQxGRguJ-Ii)vEL zFrgJjRKBhGh(7_0C~!*?Al;LtJ$DJJ<>W_Fm#++mZd zlWNZMSDyFF4cpVoUzr*t!HZg&Cn3rS3`;yKGfYU*TBAl2Kn%g%EjD7e&)(h?O|L)s zZqX;oxNRowR7gU^K(W2x`uFsLmjDnxUe$g--=q`qfZga+s&aOI zGyuLTOZ)b=QHAdNJq?_C?OayORc61-9q($6z!@%XYf3sDX7GaDGx8N9+O-Os$tDx| z)A;*U&+JXF<0mzD2_}5_54Muzvu`d{m=mbj6axK`uAH434b(4Ev%t1B!)8hx{PUNt zMl-uaKQinZIrzH2msRG%Z_^j5G-kM&92)=KN!R z!lQi4vFhvMJCE|!vKc8?^4dlm2m*R$`{wIc*u?3_nvCTYgm%p<>c(jo&>8&anr{5D zN70WZmc_{~`EBqiFx7mfwQ^V~NNQazy}{tN8?uQW3@Pb`2;t%=Zg4czbsMDz3=`3BOA{9|X2Am2SD&>5<_32P;{fc8TV8ym>gE-U+YsYhYh_z^j)b4O~f) z$u&)yEx6PZXB7%X1({@|SxtPj4KUUF-xo*e7(V1G_u+TomId?2vVq~rL`U#tPp-q6vcdbHnUqf zv9S@aH^m_@a^IP~Vgc!DCM|EUI*uv+Tq`S#&X7BkUylef$j4&#a;sT7;_x}oOB?#a zhWt(=V#p0dM>e+k;_G@xdf1Jr%6(@4dR@^haPLSj5-Tpp1>>~iZ z0&qD7%IhmL8C(c%9e<{2H{NOT>OHKM5O%)y?c1W`Jexy$>aO(?Bm17T&DtC+!P2Gg zdVMJ+xx>0lM?NiB+yCIDx5XwO=;Cd(Gg4AKe#c@q710mFiHYatqO_K*Zx+R{6hCr( zOwpCy-S1-EO%!iP%cp4}?%~9{gS}X-bWm%_kD#tSG&z5sdlu>lB{1+<{MeI^b*H!6pd%(DK1!U|D$`h_P$D1u2-&MIpT7`)^k*@lPpSW@f^}1OO;z^Y&z~Kz9$%HIr1XaPtZE2vbOvupoVl(5xYv8vt6VpBAT8z}X}nORqyAbZ00+^MH( z^9&0NH0s*=<0qe0uGhVO_U7nSZ|kHg!SN%pfwij<2;5xM$_2k+_9=tGB1hI;4!4tXJ`7%sBUsimcY((p`L7^YvB-caCUtqH; zE;i}m4IXt(&FhJ8qVU^jo`2hfzZW-rH`qjUYEP)ppXs%bXw;u76ce9R&v&BfK0`l! z(pl5bT}`YFEb@%A0V1a9HG5~5RGCefl4!A*UgPxZJu~9m-G$jqB|lE4-tyuo={fY$ zjJ8y{TTPbSk=@HR+7&laJE)ZmIH~cCn0b!NwwN6ziSQGV2{EMgY_+~8PM@sB=m)u; zuwkagOvBx`4#FQ_iX9=G10hV<>!PR*wGSlPCAf!fzk-M`Y`|xyOjpkbUBN{q*wsfN z(GFDJw_Z3ZR%kz9e)qP~32NOpdG%RIy#kPph>an6!4*CeL8`<{|gCoZs4|dRju26YzvC^;~|tudSuys z1oD@WK*&v2yje=p%xoJ4DtHQh`4PK1!W zyF50X@z3)(!-1EMYVvS?Yw_ZKSON!bCmf#3#r~;80v0yoUeYe?Tt29heUgcH<;o?~ z&rTUVv?S?zXFCUv@8xA5jr!ccbFgSz${~^OQ`1W8zGqbgs;8!o8rjiTNX?Qa5^&5p20KobD?B`Oh)gkmTnm`^-A(B1R*?ei1FLhmDj1C z6gwj_R!y_gXd^j#U*q<~es{)CE$>5oRR@^8kGFW-jGPyFn_?-|UW+46c8dko%!dtt z*4waQHk+7((|WXagXMTPckgJyc85Xl&Z1mxLL_m>XrG?>_}8{U+z-X&3-u~yB0J+0 zA+dzypZ_XH!>qah+LzF}r2S`gaKInl9Pq{OAXHFZ@`_57a!Vof zKK42Dh1#F6OQ23!sd4i*7=KB$NlK_*xe?+Uy#6P7ns00gGUz^^;bo?6C)&0?4$T=m zCA2s@>mUX1^59>{Umu7soE%G}_t#Gb4c%uk>#?4&K>^!ca%UC~LT!hG23##qU;S2_ z_i$E*`uwAdZ`d{53=?V|VCU}5C6?oXT{Xx)oq6decfQ(a3MJdmkMK{ z{Meli=4*MB$AjrE?snnhOXHG#&ijHd_{E;WG%KCbqjx&&=ke!}6Ne@LB&5kV$fCF? zBnWaCJ6oU=3jSSY*5&VA4;Ubf==F){;>odhfO0Kd(GbXh$fYR+GYS0L7HdAlS&u;d zf-51!d4V|0vtMhR`m7cn!;V9BoErDZE!gzhZ#BS37j*?HYpnGptPjd6OU6I#5<%%v zJC-GM>uC-`o{DX}nxX)UHLum@ zuCJ^|mROeavPYygIT_Ssv97AeOLz7^6^tZ|+C?r`NB2fa^>6FtAt1{>n-#pRI)c!) zxg)Z)<^Ik%r#M|Qq~+a-h^9|}qsOkI8WqQsGXm+9)V5wG;yl!$x>lGWotOK$_uOF$ zy;!f#B|9x}rn3Y}|ACz^5wnRWk1PE7@cicBFp#)_D zyQ9TRj%8b|y#9)J>IMHR$UDtQEXw4VvlTU5a1{xC|~*z$(pAR za6i0aFEY53V?kz}z5@fbq$FgaGrOp*h5y|MessJEoC4%)2(ffFEVjubygoea3uRb_ z_sKq4899gZA24)TGMtt1 zOZd7wNZJ%A;81=bVe*@+1m8M7e(9cT-9PC$(avX~NJSt+gM?B#4fOYiZ+YKGL#Huw z0T{cu!bN_K1nlt^!vgi|M3sz!ktMkEUN_EJkJ249?PuW<=H&UV63~8*Z;^rA#Cpv+ zZWV~?)q8uoyMH!2WUJG1mnNbtLu8CyMFwRM>hiddymZO!y228hVwvtEK&oAib_GGW zXapvYjIrzrP5a=dZ@LL}ax4FEay8?Sx|?GJR*%p0Uhcjf7cACjXxD?Q4KtOEOi%9-O?QrYWm|V+=PjxfF5MmeY|nevP(h){8@BElO$UhR zGUFqmguS=w=PL*y&eL7~s*BN^q4HlN; ztyH1xQNE)~c{f=EG|VKK5-4n%X<2w!T4#s$&fWc!HOx-(oHeG@b=&B|yCZU4XTSZn zyXE{cGwJ;EA18;60u8aXX;-m0ExEAXB`gb;PC%0<&PC4@$i3b|3;C>>kXMXS6MOBJ zQg?4$>C0AFQA$%%v0V-qt4SK$Z1rU-?DwCyloLO)5C;`6yeSn`niG@w=iCKJp|Hc^ z2mK$>x!bOuV){(1uw^3W_*~nqRSL*KWl?ANjUm{ta z*-y9!B^y0nDUa^he%FwtRLSJ$@Ni$!!on$k3PRa?b?fQU&z&zZ+>0!haTZs|seL!Q zKglMcJ&w6R8FN6nSI`irWzs}5Vzyp+axLRS?f&fi{P@{TOqSog?OLI9DTLxNt~W@_ zdV13CC0W&y5mQjKZ_{CdFEXJP-}gZ|G&r#!b)1v8>IzOJ5qgNc@5Z-+Rbg(IFOUb!$Vh_o6>`m61N7SP`rh)vAbRq{Tl? z;*a6|*H6in2qT=n*Etk-`R6hN|9ak!fA1C={dngQ87!KAa3trT2VDGSV*iH=9BVxh zEA+|C&#Lvder95dQC)@>9w3U*G+EQ;a}a{rarGe)--M1R?&x@oiLI?g5Yg z;ezi?xnT5@7XS4%fuzX5KEeDgLiEei9cw*yhn(>r&c2D;VjdROF^#yR7T@Y(-Vb93 z4|MLuD^ju2eDa}aIEw+2X)hM({6HCgCNPhHNcRpSvA|q+;?O%HSO4U#K0*54d zZqA-yH%l}5H$1%~_b>YmAK}3H*(m(fy?^=G(Syv|$|LLUe>aokuK@$GKp>F`UC1h7^y#6yDI4- z`xoq=?B*|T{!Irzy7|wRDdpb4{BM?N*sT8kX7+zLl0Li3qv6kQT_wC!Ei7!kI=ORa zcWdHyrfgqgV@irL{`b%s>KF5UE1g@oc9_5mCGp`GW>w59;-Xg${^+r(z&#tYPAFBG z)oULJqHA*xw%*8R<)_Cx7p}bok25ty`s~C-d@}fx2Cvrax2>-BLYWgMPT0{?qr2RX z0H^LJjZJvI5R!c7bSp2POH%vs{s<_mY-zl-Eex&b`P(`kd)}}-!K%avTbxUZSM2B* z=;1x0)e8yvE)k}svYbN~PZ_y2?>4uAde{DVT>jt9-L3!C-2JbF`eSmLtl!}E7uxUr z1^$URVI4rr{eZY;-^*7LSu4XoX5qhlI`AjbxYwD8$1U*w%V~f8ptfr z3|4*=jQ{I||M21HWe}-FC9A%dhu%^Wc`Sxs{0}Gn+gBeyqG;9~!WUqK|Lw@R5}^motYq5Vk)|NY$W-&}Tq#oeGKllh?Kj@Xlbs6mWjIgQcQ!@G=hzu-WwV=u!` zPQ>>+|NGyUKO#j!s6Q8a7)mpg4jS*Go_{~@Z{Il9(xEEi+VwG#QHvewUgO2jj#Ozd z1-yCF0Por}U8rTFCHGp%voy1^*ZlmF+NMa)_&pn#iGOe$AbiaK@#T6kL17qr!C2EA zrh>o1%iVn}!O$+wezU?sQBqQ}fKJiP{k*z}z)%Ae87cJrN25x39<8J>j&W1@qRr}} z_?4oaE$Ncy=XEqS1t_-0gqCJ3iWG2p7VqE+caXL-mhF&MKm`DJp>x5^=eq5a-@S_e z+6ThZFbn*pD|U8YpHj3$@h-)@8cdwjU76|Sv|fxZFE77cURzr`uH~?-weW#F z3+rE<`4pL^3TEw+cvnB+;=|xo8j<$e%oeO;__o!jyE5X~H#ea;Od8)tT8Lj3S*7-k zejAMa)}mgbWXmwqKM^ggn|I&$|AE!;#>@80IeBGe`of~=%5gN$tl?__u{uZm-eFN6!~78Hza_tZei(8d>rxlI zb9d!!Hs>m++^NKZI$nPGIL*F|gM=z)_rc?Yt_t#^;y5?dB6L)VUV3${h66)Gs(pMwtJ{i*rmyDcv_# zW<%T7?YGuj1NL_K{D&uY%;~WI&XqkhG;}XhU#?gYy>zcig2jG&b8RW{XE5-$Yqb@L zaX#S~fUHWu!BJON=Lzq(jPi>x8Lc~nVbN=2zWHCeSJ~J12UbRRx{hQqI|Bm)6BAQY zmAAjY|K+}HBlUtw`2uURr_A)6bJ6^h!Dw}9+j4MlG{SpV<<)1lx|JmE-fg;h>igKo z+#a6YUfG&X+MfB=$&akwO3pT75RglWQXQm%8w#$8-MVr`@TV!o}=dTw5yLK|w)I zb0f8ZlhP;P##e9Mx^?MZ@_ifs@9k@OT=f0*aLGJtc`R65Bs&kGS79ld2DAGdyI`;` zsVd6L2bg=ldi6@)L|v0GxIQ{M+67jPuZh${cBzaP5=!ZY{k&aShM{c%aI=f= z#vAx?c~0g{r3x45p)TF8SQw8A<=lM%*6O}TIA{fZEgK+39^io6Cyrev4WJ|T+wMH9 zFmt3$wdtkg<%#2Dlf-B%=7+0Kl#jC>{$(#S`LLbu9|U0_P|7nGeWL(EDbEpGz5;It z?KAs6rtqx4A0EPJy{#`#2VM^0vM`XJ9IXwM)%zY({QJ4^ulp5IgU6VE%bU(rC|-Sv z%X`ymy2_7Mz4FxsKg`tB)L4I!!_(FuEbzZv&y0iZ9Cm1RLbfR2atO1|T`4K4_vyY@s5C)H(r!G9!!?!s@b0<4UHa2M zk{`mF7cGT(B(Du*OClA2%XDETXEcNuq0$#dZE{*=uPzAFjB=Sxob2D4u^>s< zOZoh%LEk@yscIU+nrI1z*9tS5O|)1pwaU_t!Yg#WSKEuWPk}^uz`Ib(;dW{z&#@0p z8k?A4y7ZDfWD?|A%qzd&wyezu4d>ASt zjWtGKdQ9p)KpnCf`?cz|Cn^Qm%;X$WN3Aj!t9yd}E+h=vj|9Y)k^LZg)~edxLV}N? z{DJ}k0%gne`=axn9U&-B*J)N;n;!<9cRWEp!xfZyS;IU2b8KuDHvYdC6%={Bj`*yOXrCiSF>Lc5|fYRBAOnqvh9u&vnZz zH{n?WZV~U_pNDo4py!>UmAL_S-uLxuK{tFlwBi}>e{>vcPg2p))MWW=|KY<2Mp!|s zi_PNdy}4%IZ{HG;ahbb=7gJ83H`R37#`XcoFj~Lbp} zA?Qvhkz_}}piG+8Yp#&LI^+gxGqrUIj3^Umbs zRK$Adz&P731opdMsbfDkzpyZwPXLen=s8tJpyGbZhqCM-h3zpI3^!0s@))puL_(xS zH-e!3jai2MoYOg=pt3h+@-U4;6dq=iZPFyr*--Ya#byk6j~>tf&gm0)j8^ksP*2t8 zzf`s=*H>1)D8b>nRL8foIo4NT6TE`-PKO-$JVh=JI~)CnOw0|O;`J5)uRlt(AVSjoBhTH0-o8@zE@q zXNYkMpn*o?AN^B`%KP0j}tlVe^KSNol`YLQxK@ zjwDSAH88jeqUq!4-Kl)7m&h~~;qX5A}Gs#{>NnvmYWon-r8ykniibM6g5{{r{L_X=4CMssBZ6^{>+-&@1-dGh2*_Cg&O2F5YY z-3A(k+1{A-LaTv#dN}?N+$QV+vd-tcxS5vw9d-y=^+EGDhR@5($;st|j$^dnTt)6E z%_yu`awK8>JKHPN(fiIiek<6b(4Z)@P|+ z6Tm`ycH5$l#eAxx%DE3sjIFmj2^&2!oOpO^YilW>JvoPszEAxWDHG^dnoJA4k9C^h zh8lr^5CRi5-1Ce;nNTapNHAlr+MmJ8q>h8?^(DEx92Nj@%il1nXPU*JG2jYz*u+p6 zm0+{o0E?#P#k1^k?dgHC5StkyKx=TqC(Y81BMT1z7p4FdjkrYR&x=cbBO~i^SHyDndgX zBQ@IM<ki!eP^BSh2#B4D(hiq%-zb<0ua-LZe!icIXYrlf%}e zN`%ACc568w=!`jNQmS!~pWmq+5Rwzb2c)9_BNWbg7@oH+8YSsp6-3IpwXnRnxOn&W zS02mpUj zU)zs!FggN+k8KcltHc@Ct67|{pnSD1n2V}M@%UWPK};g(HU&TAdL}n>4mj=~8g!=c z^jERxDEFeEAW$-uAccjFh=T*&h{c%$U-m+clKt8+OP|znJmj^Me;q8Lt$6nA8Pfk3 zz5}hca(AK=fFiVm9u*(D6^JdcIjfrFuWYSzl$iOV>h@d(g?R2Vw8jJ4{?0ha$LB=R zTCHL1L)Mdc-rnAL{))!8il1r{(8>EY?H7f%gf{4pfn`ce>AZ8J6cEvI)VAUtGK(HO zfKInoki2Wtn1k=+iyoFF(cmYm{5JW)aFriIaxK}BFwltLa>{X|&y6mBz>kNAq>0q5 zMZymQZ~+;7pSG3K6XNH&nKMC+&-sLIzAta@0HaFCWv&nQSE`N&<`6;5_clm2r;dV& z#l*%X0#b5yabX{S?~}PsekZnv;G5&ig^TU%ctBcNxv;h58iHK!*)pcSQEOF3u+|Xv z>3%P`J6Uq|Av{P@yx-lRLnkPu`o`{( zX^Fy>Sgj;1x>CjaWhA{puA+j?<%@d)4GZ6z_BtIp5uTr)Z*03d=qyxZzoo0>6Bw8W zO?kz_CJ%ZlAM}tP20#z8@UPk|dWwDgE%5@0)ZCr zm@_O~dvNmPNh!}fi&^ptG7H5fJD6&puSy{VDz=P=j~vlcl3WTJ1BB+%)6*m0zBt>@ zhn(puBT(cbZ!VwFu|tjKdfIJ}M~6=*e-aN5Ii<69vo>aR!-bd8j{vW%zz<@M23o&7 zfJJXlU*Me+1d?DC<0Vcg9>kt+hrZd{ZvX0AnPseO@XscJ!{a9uklaB2hrxQ-X zFYbv;DwJ2x--CF5wFo zv){?wCxyJzrx& z3w$Dl+6*vk<*TuySCHM&(9(*^S5XiC1DllnGRTe$hjNOV00NT3OX$X2fv{M?iU}6? ze0;n=&O-@$7cZX?Yup3pz%29J?TbsPb4*Q3QwO(0#0q&;j*&Boc{8pc3__(l0k>3) zD=#cnjs7L8IlHp@pZH?O>9`3i(bk0^jo9*f03*W%dseNb9#DVi<(akw1<)%kgev`M zVX>neEW!2QAeKWAOHWB55d19x&zCfeSz3G>avB;MQY0!%q#wYa)|m!s zC+~A@Z33ykuWu=EQkj*?b&l|CM}{ZhRcFJ>Mn3e}v(K?f@Ktu!+b4tDzx!FqCc7Ls zmlwE~i0>H&E}09lt}>p;yt%i>%Qc_g^6wf?hE$|$>NqFk-`74rOFXL|nL9hvvXWm( zgvj*dw4C*cINEv%V7E!Mg3V-}4x9glf)$xb_Yv2Cqkg;`94wvD&y|(82tnL)ly~uG zLoy7nG*A640UE8Vds>IMJlhB}1n$AjHyhw%A@e1gn>k^1J6G#kE06E{+$#a=Gm^c^ zyF{fULn*KOu3$zvYg?y?naL85;kVHfA$QoEdjmZ9jT^8&O2N5Rj!Fv% z$o@{{nA4B%T)+uCAZ^%jk&X_cEf69};6Q7_#BWWx#G5A;;bigF;wY21>}4PzH!+34ou5 z;^R6nx@Cw%$m%f`2KW};W$R(aD%qQJ2bz(wu+)jX%P?yB9Ai!JHxi9)iw{wif?q^z ziu-R80L*^K@y$rV)u(_M@xwrL1Vt`njK0~~5eWNv2s0Eg4<3C0 zDBnXbG3P_x(s92TxY7dBb?fM4V?yDkBBk zh&9c4xJur{zA$lp%5TDIH{d79WYCjcwZo^|63sr9T#{M&QwM|*3*2D^U_$_mh5C5Y z2R{>FKHYH0INt~Dnl22l-8%EVT~OJg0wxRWXr+|LEHbSlBL=5NcLCsOJ75moLV+4c zv050Z$~nXTyp&>RwF0enF1X6qlmthbCv+T`=HM-@)%i~N92Cs&VYv-&6+75%A2Cw8tYVW z0GB*y{+n?NZUhme+1b%cD!@M{ zhPbe(-d4WVf^IEZa37aj8%FsudZz~WI%o#`tSiO#SLYPqIID7C!gzoZ7VQB5?M1->H?t&SRDa>hZAAu-4C;z=75BKIp5_+D~rNRG!>v$ z8;cxv4#Yu{8J6SvB!mWw_g@1_;W)78B35>-3*pe(g7SNN1}o+*Kvcy?pZ-VzAG<0C zg==cr`bohC#GCZRYO#S!>;fc;Z@Vl{eT;RVWOoL536c5rh>nNu#E@D{buc0zFmH{X z$KX0a{F@nk#c4WrhocXo&bEt! zP*ksK4q}7hu(~16{wmRKG9l}DIZT{8ko#3i2P3jn)e9swayc%^r(ZmK zmi1xRNy#WNkHHdWDy2L?0INX%&{jYq#I5=#z&JjungbG+Oe@6d!ELib@YocFkr%ug zPD?{`Tl)?f9t*1uIk<-0bEPNzFj@WfTo5vS)xyr_Ha}rpPfCU=l#u)b?6*+C_5O}N^LefqtKZM{_ zPQei@b9(SAUSvM(mhnU!Ke#MnI@65+)~L%kr+}fEiJ=Vsj-Dz^J(I|*+QFPOH8@PA<<|(%*Ivd#K$^a z$Y@EKE!>D)Dgxf;+!-Mlq)gYC#9LlJ2IU>{c>eMs^AOL_cb^I5WS7pYpp#E`f%t-c zU<(@Xt6QY@tApO5p(-39Oq#cWu;2nL3Jg2fO!hg|85E_csR{6k`8GcKpn2xN2jzQ5 zEuLpSJp^eQFCfo`xq}zE<_K(iA{VuyC&xrjDGgL#_GpOB@exnH?N#}92#iwVaE^sl z8NulE*l$eVnzR{uL8Z1E#7C?hI-&p4AFM@!XAwACaWhKvhb2gS(%LU|L~o5{rqeo zf_MNLD5E}C$>1Ja%ZCU=g|w`a-{ln$G_J19hj|INk%bH+howycfqu2(9{3&nq4;Q{ zTA;{J?M_&hekYR{&r-{Kn?3x{ArCAI_&$6t2%6Vr=(f@r6t0b+xZBei79jK37aRC# zD}eJgDmx{6LseBZV-CDwqphX(3Wzw-j6yKNJADv1CvX{$li!%Hxl&&WkVIB}6QbT~ zJ=sQwOdz;p4qn{(T>}wDO~_`Th(c)zER$^syC&z*=oOH~x!_sbsB#t{(cG@obDP8! z@HuQtwIX-}?2djhxq7+#E%|;3(l3p5W#|oCetPl;B3D~hlRJS8%pP*34S)asz4468 zIZv1EtqrQ($2s2kVe5qi1w{Lc4@5iX&E&#HZlJw$4DA;Jjld5PLS+6~AcMG-Ar`ZW zq1Yh=BO2#E3C=h7=GT%&C^RdW0J~8jN`O4O3eap>0iobM_n0<|`eDG@mKr*2bnBX! zLXfr1**IPyi(U*A%RTU(hZVC7dWR7D*pI`08dM}aDJ<+XBHJvdQYwzO?LfMod$Sl9 z-G_F}p0R3#(Nu%z+94BQYwf2p+Ld)iiP<0BPY0=MdxL`Zl~kdmWo2dm#^{Ig@u3Ms zVT}`QDi*zFheX|UO(4mk0tPOr%ILv^2SH|Saz<6uQBrMkkTin^x5pCWv1?5_P%H&Z zY$2F2F`Zr4o~)NTD(HBq?fk7N(DX(fs?KHpg^M3LH2sw_Id?!qxPY%$flwAM@9O<{ zr99Tbb%=lxBmObL#t+J_=XU10M1NRhoRl1 z!rVgW4)=%lB@|y^08QqUgTiD+pZ*}Yu}kLnS&ato2IK&A_?e*K5BOlGkqkENdDKvw zz>aN~RyRU}4x=QZlkh{;gbf}`7WMV_uYhTSmc3OM{#JNwa+Wo z*VcL{801`724pW7j|%qOA2o)lDhC`g>8B3e7$cFIP2m%*)NiOv~AmbBINNO|$1(}e4y_NxjVuoS`=W8@IG*I9VS?Cmqu_X{w zX+C&R&eE>vV{D=aL+2PQZAXkF@WWX+P+4>a=gz^mZ{O~w@_=6-av8-dP;UT*87w(( zgJ~}XCvx*0;@QGppr|)mXpImNa$18aDy?~Y1u+H+y;8C*`2$0@2&;E1(Z<+iVb-9mT&<#6sW?u(k9-F0oMHnFvvq~ z3WgrAMlaL=0l|n^JV@001ffsFb!4xN1UBmefj^fPx^+p6W7yXz2e>wruYe$z0t*Qw zG`S&SGVKf+w*v#;88q#6CYsp3LEMIq(Au3+NE#qQ3o@e}{6jR2MgYPDf%PKE;oKeY zdgKZMj!KaA1h%)LBSl@(74djLV3W;3ya;i;8H~ta2iYxm8ht+F+!9B>hc58|x9KM< zqQnFFHDu?%rywZsC@xI#o~B#+`~(YRshscxQKmyECO|+5qu@iX$2{%lPJK)4c{V_ zq^piXyNI*JcRd|*N&p=OK@cSH z!yroy@wK<+vq(q5 z@^nK;3O;w!pcPC|LWnkP`JUbL1~Mvj(yUef6IXN6OQPrTXP@9ugjZ`6Kghz!3cE*Q+N$ zFaPRCtp975x+?M?!-7WKx^WFxQ3S=TTD5Ko;TV9u7tP5EqXb&P3e$~yl2coicZ8Wss<5j{T+mVZf%{Fo^PNhfUmwk96!5Jx)IzgA~KoO1TQ z&;qb+Z1SM9F-wRTgVdA`ms5XRK~fATQS`?#mA#7*z(H81k7$B0pZZisN_XDTfH&X; z^8j@Px0|Jzl!YAS5XBZ0f;J%Jv$sVIr-vefxPKj2!_f|fpALQ6BLwvB zkhC3LK8f5i1bIE~;U!0`FJ~2z!8g%!>p-WH!dd#Iu0LG}N+$AeOPnhK_6rFC>i1|D zXCHLZh8W8^nct_Pk&2@VdUV~Ry}GJu2&{!+{*%AmClXqXvBABs|)E0q6q* zc=9Kad$(*2)ydvnf{{VYWGiDS-&QCEWL zfPaU#y+KLh5cVR8Is2ncfJS!yP25msczU}#8Q1?|?>z&WJlno;TU)JEEJGgn=MQ zYEuvrufX=agRCqFo`s^~EkG-yP^oVMIutaJJ%KRiiBzpze;Q7g&AexGVa#U%NFQ)8v8*1YQILLhq?VvIT`1W+NvilQ2 zsf&UCJ`IO-gPn?$775UmwxnsQqtA!FJyvN7s$)bDyO*%xjts&Ey$@Fc!Ti61x(lEK zF?f>*&?dP@r|azn7Tf`dc2s`s*}wlj(#b)yn_*e%1pXNjBtb*SJ3kC?aHZFE2+2at zFLXby_k)(la+JO zmlkNy!~iA9-6apS2+~ai!VN)`hEkE(Zx@b+0?{0Swp^P)Ieh`iw~;~+86W_^EbB>F zSy?#(n{^Ng%Ama?(nDkvKnw~%s>lFDSWkqy4*MZ|24T+7w~O9bCv+DbHt3Cvb8tDp zioC}Ocn%;*R%$@V9D?>o!AsDy^i}2>*M0fR=S_Z14MCeW5HXR!+iFl(R~N}wBQ5NL zq!D9@#5W{Y|5?xpzoJ^$ivt9&n=}EC_kBp43Mj@_fVBKq_3A16jSPUV5nkg3I@dB4(Q13+pk~0Mlj@lWXdS>fn2x8qn6W!E)If?hW!vzfM9KU@I)a(tOr(R z+oJ}fwCj$Ez6XY9H$qj=io`2x<>disuLXyR0P8`(7$QJ`K7quvIY5D1*hj!i)wSSO zlr5yG`MK(+s;y!&t2YPmwfiVq6>5y_1z#d;efibIb?~5}sCLNwTA4;{XvYOx-fcLkQ@}?l_o}nuRE~bPHibW z6`b1<wSj44Nrb#M1cJ0pL#Sx%w1N8(o?v8Q;%06TXXh%n$hXi;z23P#2v5 zTrRXHcPKdN7ub`vSXj&~tceD2O4u=-jw{JQumuW5tx#Om+p#0TRLke9t162BGLWuu zLADP>0{M9ynFaY&Bk=p66hp3Pq-lk$I36*SlQ8kub0T)bcj-dur;H|oaS~fNRMH|pw zNcOLfF#tdroy8%z6KZTOKtT=Pi`e`N5?h{6WVkIRBSFkt=)tWVK+$W; zY`^;D<+)b%Ro(Ej5P}d=lZCJ&2=GT*(p9LqIt{AMMv$=?N8%tt4CZL_T-=aSB*elD z(ln9+Aw!u^n)*+(%=RcqGFA@jHtjf@(}z4CK%kWwV#IpjvWkJ$hD_3#LP+fDLZTHJ z1hqoIL1ZopWA4T#haphiMY6p^&TT2~q(&qkM}U!7Sz-seTS&SZkcL7A;;Lc>(eG8x zlolgv3BVinY>h2j5O|nTQBiTsTdB-3eJ7Gz!$&F`D}8zLYrbl&GqZr2rK>7?^zA@P zQcTJMYMrK5XTSjlKzAJkI6xMi+m#8+u?RqOh`rOwy%{oaf<~gHpgr!ATR^%<_=0oo z0J(WdYmyMw2@r`@(?r&LBs_wEdkO5jy=B=+u+?4>!Z`V-g2=U?B0+mw@qFE_MV_Dq zeSXYS#3=qR6*>|M>_0MaVY|>fhEK0FVREfUv$TjfLcy@czB?i4c2jiP32+KXRMZVAo)ac@-obxJ$r5gDJYlq9w4P7 z5c3_7J^-MuQ-H*;whACEg7E@KvrBXV(A8J4v7*QIZi8cV2QZZ= z%_Hyz7T6uAhEaxd7->nIMgkP%>;cV|+;B2bhWbg0a>KPzPEC1zG?c6ovz%L{S_=?-eGxU6#f zxpC?~)awD4gj^SBVIvv=aYk@Le#sT|F_MJ&Ra=|v?Ew{u*PCPO>k+R3L8Z`-2jC?d zihvpi(N$0%-H8q>VBA)X6vsEKI_6_QF@#I;?{Qg0Z^2J%gIV3Yh@0JkLbj$@(?C5q ziU90WWz`_jH7*S#HTTdiK-P0bd)SC5m(zhsM8Va>akt^?=PaRmc>DJ`Qr4TNuK7f1JIt{2Lk3u75MjA9K@Jx z*@B>W#4Zg$omZe_4+tV6Fy&bNh){@3xTzfU%E&t!fZd+L3UxrCSwXb6SaQt_2pn`0B^MVqDdbAq%X2 zz2iI$Bv?V-G5Dum{##Rb&@|u`M_@HXp+%Mzin=<)yCbqW5GX|l_yH$RKDPb7sohsy zEChwG4W)`o&?%foz31cZH_km}F53{F+f)e%a4~@Q?dT@daSA=NVwh0z=5dz6IFhb- z8f-u-;7TBn{_{sFeOHg8rRY+JBCU>tXb1F-(GBmiKE8py1xhC9hmrFo3(4VCRaFmx z=6Th30B(-+eQ@W!-km#5t#X1I8+l@+Tl+!CfpC6|yT)vmJXQ`FCo(IM_z$%Q5j0Rj zjv)zI2LyK@)4f-~-%?QA4UeSbIi`PuEC#wkry_Ul zBtz4K3Dqul?^Q-c-e`YyfbJ6B5+)%O(jYXh`T|ZM3JR6f;3?z(3-ME^E`JLG5;a23xWEt=#I)lg;^&Z$}d;<^GbJ27Ua02PR*b|7Z7--bydja2{`J*5a z^skMN!zHW+IjAy_8&-uingQMyok7&D?KpJlL`MZgLll*DLu7q`@^1hsG$taqaIAw# zo!@yDY6Ya)kZVUYA{2$h27m)lMVVk+=ZG}b6P)E04@9P;PLtN-wl~pKOTl<)@~bxJ z%AT*nSmtBY%L8rECE5v%e~kAqFoh)qSj$M=_dMF)*wbv~L zynSQH9Seeq7@sa0P-OT9PsApFkw##=O?sH?wHJ?dEa=+SK$_KYK;jJKD^O)3`aNdA*qt7&znAQT z&;aQEr6TfK3Kt+%B575o>blLlb^yhW>8Q9ZsKvqFj5c<2hwWrB!9gxJq?dt`h8{va zxi@`hZs`11OG+^YjiC7ab=2fAgul#I6TH%!#TF1MZb#|~C7EtcJLFwzAf!SU9U2oZ zKcxJ`4!d+J6|TVfQYV8#FX(mjdQ5X4XLdZ00@)+01~E=xDzp-agBouOB9)N)0$iGe zQK)~AIRRzY+F(vzXI*-HQ~PhJXy1bpOkdOlvJ|pJLF+5s(*Ex@A78`(798<6F4Wk& zfCETl*;o<|a027DYS4dV{Yd*)YZl_KAV})SksxNHj@Zh!z%poA8(uk!W|GHi##yky+Jz?8q%j0LPVY60E;i%Y2Zr_ zePyqIa4RpuA`hG`$;L&XJC=!{fTm9lk#`Zak@oa-UmjerOHh>Q2>|nmQ}36IcGkW&zYd*yt82<-!Rn4idiXR383FF zXLDg^W|jGa@=M6h=)^0?kS2E#7-1Cz!&$Z=OcNw3??f%mo<-G*PI4602`Y0VRb_2$ zZ5Ms_^l#8m50`XX|8D8wB&~Y{&zFy({X7k7GFSJ6AE(K;bWv|p1B+_IESyp?crRZ; zTP1;jDJcCARcAv;>M*0VfAJ6lfHVKQVF1IqfoLc6MKRvUE|EsfM1Jy!5+gsnl84I~ zcx#>HI3#x*KBb)=fIYg>%i>c&*ftgW1+|CKA?}&n zg(Ah)D#&C>Lth4#F(QsvC&-l5L4TSJW>3_dKxw@H*I&V&|07C-$ifbF`nZ`rDlQUC zMSuS)@+ii94eTE^!RtEQX=!;^m)8%h?Q&c+9+U#gOdl9Us^idAWNXb_SugH;7UljZ2w3k_ zPx~QY4W&s_hqcM9rP+J8k&(^ZCs)}n&%^!r89zZ)jlkH}gfU_=78>_%q4Za3!_hH* z&3MoA{5-(pE5@>u2cEs$jCl~6ZV7r}M4up6zZw(89@M}8uc=bK5;4rA09ZO1EGpOs zGY>(jN}x%Ap5?+Sf`ur3h$Q2&6SH&x)hbrW8j>b1YztMe$-3;y!;T))B;vc1d0dh0 z+p|-vL5Xt=^L`6jdRY}{H^v6QYVwoIu|nVtTuWu73qtsuSV5&VT%{G5E^`-ZII0T| z_8`6inT!l8zgj2bzkeB7uBF-_?@GXih+ku~B{}X#e%yit83@qO0Hwn!s)Ha%R8#`D z{cax{mA4*N)W;4`nfI?z{Ce(9?#v3NVUBe>wpD7=0d=>h(%;DA|ov6X(vX^xvvAP}CIYC~*uz`_V7t}2y8^;|;AYuc|mHN4hU@r*^(jzlZl)jpXfJ_KU?gAJ$9AUW#)xO9_ zr;cpy5CYClEW<&1P_OO*gVFK=TM|0-JU#DLii;XUtR7)M7a^eINUcW99^)N~q+&qYee1v{_v0lj70`M0w z-M-t023sI?oQsVC*w~C&2%WHI+-;P^X&}*Vxa)*7$bF1N6)rc$l+E&yDFrcENY{yk zJGQ#_&^~n0hoG6CDhXW$8lW?_jK*jHav}o3Vk~ZT_6>rvkjMR1d)Ry_;sh`elCa`| zjD0I!grka-iyp1Y8_Z zAghQ}!%pW{fCYA3@Pll!AMx20LwG%8@PppxN1XDnc~Y9GPDK z`MS2;^fj>ej0K8M#$D8TBl^>75S<-TE~j`m$$3E^d>YOrFJd1IBt&cg@-F}tWLN|B z`Z*h$nghB<%}>u|{JML0M$awf^hCg%cEK1g7p5It{61h|ypS9N;>*B3`z2izwoq^a zo)(R86s4hQnA0E^m+nUs1*vV$AX?n%oU#Rhz|kWW(1#7M${CTs>?v)AQXD~rL@%hs zA`m`{s;*VO4~QL?sl0=gL--?*0!>^GngJDozyS@Ia)1sy8tCm2%0G2!qSE?m;egOsA_t-IS`!TdgG3sm6(pPyi#Rs&1`Ip2?YW3$o3Pe4YH%zF0#&H#%PfzUz}uJHDP zvJc#%ub@^i@$f~4a8WT4vmAiiU0!A(i+=yBuL_h$bxBjU9{=o;HES}5(VPo};3AA8 zvj%}n#n6|!pErAnCSAC?KP?GennA{#O%fUvFXsEeqXL+Bj-4txiruH~Kb;PSV%P@D zp-v+e5l98%joTnLqnZH}u4FqR)d>Wb=uHDrC=PTnh^jB2A~S$wU6wA5O7&G0EMgQYQ{&i zmvW7CzBoNdN~uGL2f$C$y-%4!?}uT1R&)OoKY0w^@Y+xLeN%RodQB@ z&x2F&YCceY0$wBbti;_J#tos+pGlPIc0K{c=OD(OYo(d@Ro(Pd?CX7T6q2+9I2^nH zX7ykjlMkGIgNpGbHs2xI2+wTACHzEWa67tsenxed5GP~(LhE20`W z=&lCAGU{*0KCrryUk5&QMdt~UIG!JomMmeK0(gUUqv+m z&QJ-oMd}r>Z>S)zSyz=|s#?8)O8<|DV@ESjAf{R8@JdTcE&)Yz2M966LO(PEbOr1< zQHX1<1KAtRg$>ll5df+;||fsqAu9taf{VaeJ4*OeKULWu9B5Ros!;iLu%ufT%q zi>d(li;(`^rYGD%eY+J2SWqyU2GE15Y6Q(OykN^A0KE}c1CC6k3$cE)Nb79}Ax4xS z2@4xtwuz=9!Pk8HD`!N{{PNC(4tB%0BIiUOqY0i+F;PbeZIDiCU8q{l`P5_bM> z7$z3~9(uyn(M+J-31*Wa7_90{(8x8^n?U_4hG-d3+n<1Fg1k^5NBjv8i`xi={d=wx zsNmNHatuyG^hc&1V4`Xf%>{y4F@#DqiUdJ7V7-CE-Ht8<(rY2$AE>=;NEL+W7sMUZHZz(aF%1qy8$~A7DW^j> zh!74mJB-!0n*HI@AYBCNxgZLi1eF20#vqu3vp9lI8I(s5z9i9wf`0B4xB`&xu^9Li zrNG%ZI7@WOXms8*5LB<=fG&ZD50PLX)&gbNHiU1weF97;9f6XYcKr5XW4B|txCxYN zEvf1%TCn0)DMsjekfMIQV5sGss z;LV6Mg~NvjKyh^Yo3-oS1${-rUR(Z(PrsJhg?2jP13<3{_OXi5C-+ums{V)?Liz|; z-d?Y^AZ3NhrBx5xKey=)vgrQvFK`1CB3b^wwV9|30j>NykhZGlAx#$mYpykE&`Lu` zqYYo(*{^@OCTY$8`_t7+^8YT=-z($)*pgVpP4)*Q*j}zw`M}T* zmcDuP%3}oEJu%%<3^1r^!Re^R;K;_o;@$77FP*%5hF~l`XSnD3ns2s#J67AW&=@jv zozgN%be=C8sv}_R|FUZjrX?p5X<@Xx8rpv!?0^2hyyky-Zqb|d@F0!<`h$P|c}?r+ zhc6J5e}4Y&Z|1)FUmoN0Ncewy>J;|c|MGZ{(Y61}BX*8T{{39i|2mr0fBxS)RbEK& z7nkX%P*mT;l%*H0MCS;0Pvx71(2qBVvY6&!IJN`699zs~z8udf^tpP{llOZ`d$Fyy z>?2_HCP+;eS$!K8KM_W?Oab zux8QU4;m)MgtA(P69@JwDM_a>SI|CR=zp&tuMqaGc+h4gdZ;7aEFa4WiFPn#z4y9crU2sT&Lv7eEe_)!vR!^Z zp;AGw^G&!XHmEm2DB5D81$O(P^E?BNxh2Kmf!@o6jjVLSebd%LUu{wN^wBB~2J>y-< zeF^T&>~d>>oS?U_??9(JZ7hT{IZX~dR?@Xhb(LIpo++1%`PAH1GiMWIP@_^gLmOsd zSHkh`qI1qG6Xh10#dPu5$Zg7>vG3=5bA);E|9*@wEQtc;v=EIYt`VMFMCiX=e)oHy zXm!F~!sFf1YwhEAh3X#mE^LdkrIuu6J)WA$u4>ocgR9HDxrOx%SHieM@#k_PWp1sp zy*O@?H8!eAlRnU_rcEz%*3DSSZ`bixd#bocyxNP|`28bKtCEWE9<+t!u5dHgNB7Fh z3p4`Hmo(<{y=F0)OFsDU#`v>Y>pgXx>fRK4Sy?qz1&y0f z#4V(lVd~w(&$ga<-~FIftlfyeLgr{<5|>S9?zKyA^Dr`8GG+dro|g8Y&-X#md2^66N6e;^ldF%eb0Fo1lI_gCNgW zqmDT@qE)b@X&+l^S(#M)ielxQGl#YC?l`4e($H?UuB2=1eu&^)*M)teho5LDe7+;s z%Hj;HP3hU;fiEniUdk zz9yp7SI0#ynCjWAtzy+&((xOYFBbihj^D-Qw<-O;{sOi1f`|8D&@l1kb&8mF^nI)9 z9Ov)FRWMdF(MGaHbVkDTc6oh1e{kppP9`U8_fD<6SqFLub|zUquHAlj%_rZPT49;S zutzs{b7Y$1WCZ?>G>zu-Wk2u4>3Gwq6b#+Et6JQ0!j9WGL|{xUYIvxTHB5rqX=)^# zp59EnXPJaX@F%(9pdb1Ni!k_;v+5*vp@^((SZ49;H&<>#d?yQBVaqi{sB*K0NX_4E zHuItfG+AZrwz9(C!{PTQ#Mh8+Fb>@lk-_l{Cwe1Qm39_8H8y-!jb5`WsLYZ5JHD;a z!OKdX;WG=fuDe_ODOB5mKS?PuPwJa5W2T>}j4#GQ#W+V|8f~8?V((@Wt?ixBWpvzfcm_C3@0&$zfEa=3lXZUk z;H-R{#kck919=Q-d9CW1e}^XTob1mRVNGr97psV_Nt0o4j~;gSHfqldK&y7VoI=nd z+}Zc0+yJ|GsV$^BduLdPjON+j!?X3r>4V?C31~^T?dqlRa&w%&{36`mCPqmQ7spsc7fAi*$Lbi-^ zT3pThKU+WbD)5%PSH?_&M;6!23BgM9W(a9&ZhSf=Y(J59wmwJlV0u(AS~9KiRXJmV zliK%C2l#`d-i)x&3n||MLoP_ zrxio_%9WY7lG4?ZiJNxX*}-0;iz$|Iuul317-pO!?CL@RuVd=8wDqJpV|5_ozc0g^ z@HzGv8YRB^^-MHmXnsh=elj$p)2y4ylv0;SajWw&YX9z06&#slSb^dOmX3n`nXtLy zp>5o!kyR~~hCCx&=;EvBV?Ul42+Ck-2QW=UK2`04pMw?|lEI=(OHGG|$-%g|p- z$}*rc(_R2YqVDsq(kQcD!N^9ut*{ZPzk2ylyTIOtF$# z{TIuC==li`at+J9T7n+Do4*AkbDCWJood+# zsTuEJSg70%o0&hI6U*>)HMA4tX6EMh98qj=_q%nZY$R<0inN~1_tuPVrt#z|r)$#E z>hnC`i&9<_d+2n!+TaHhe6bi|h5tSx`i7xl(TahXgI(NYhz)T`Q$wqgn-t5CR8$ON zoz9Q=0?NR~e6hb83Phsq`KL=9y?ETsyHebUr%QNzsEK5DpNe;?(AzHS<1FLj6myFd z)gP-7e0pY}R46^H+{H!bpvJzXMI%^;YpAw?<}Taq30}QYaA(~rLtGwchngOC4nFK- zHS^jX@|JYF{!`BuN|dxnsJ7~`38#JiHR{XBczk|X{4Sgo+`68CX`?*aRKO+gnMRAv zwi@$mDG@jW{z|LsM=}#9-@P253gZqBT~Bt6 zf}(3fsp^0`HL}-4xktI%gS_=w*^~_Blak(tw!WPd6Jgl}8*9;dd0S&_3uGTmn+|=Y za(+UhHlO7lQj-TaNHTF{IC=QUzNC5m!-Ic1zUv_}m*PE%e5Tq5BBi1b!!{b!4}PX0 zKT!Y+t-45OUX?WK@Qv@Q&TUPiSrTF%W?kaelS%ixAKi?YmrQ^tl%{DArjfXE2FfV- z{8;9U)cUlPO*`H#ld|;ivQwB4#(dY#kX`-$AKq+Rh%e?+7u(b059+k>%pp631rYtd zXhh~Nes1DMcQRKjTqbR!M#3oH$VwA}>^D3<|MuE3%7c$t7eC1KlKU3tx*MaD%*b%xkzt76^eQ(^Ig&1t3d*`#+7uf%xn1&CUTuz0TrHqUb?$Hf*m`#174~w!U{stFfgKE@9LnkPa23+L+;M}Fxjv8T zLB`Ewx62q9?DNF9?u{5^w(1;QKE^6e!|tzSOH$ej3p&kxd6m@USu$s1VYra=aG0Rh z-l%T6&3nbFNK00AQHXNwIyJK2_7)*=GT`^1i0((6q+v{d>So=@(2$u~oKadx&GI2Q z_@F`OCYXoal5G3e^`4TkNOw;$wZl8By*A)oTTO&zGI|!p4+bm{DL1G0k+t0-wuYTt z5QR|ISkm@=@LwgDDU$r^;4_S#LERrXByL$9_SNW&mFKKuR7O)sX}0u&O?;p4vBR9P zQ!Q0KZLY)BWeQ<)cZT?4t~8Eh!Y*I$-rn@&g#C8!mr2KtM{`s>Ll{D&JxYZykp^7Y?&iz4VoE}L46j6+Y7`sZFR%A)q zUs*kXosdzNsuq76)_UzaWje=9%yV83f`e&3YcrgRA^$|sIgapUT7Ba?C@R(S?M39YS^SYR)uifn_sHhJEY-#ls#%rv-Pu$ zT>1jv{ItErAgzY{U1ML^Nl1Sbg{YW`ZCDyBUfM|0w9uc+i#8Hev7S99-QQ*Mt*G{F z^^k*hE-tdRJ4T1HR>eHOe}a11wll7qRq%a7S?R6w9&^F&p2-RsJgl>U-a($_L@ylf zlKK~82hP`XzDiQdz1_9i(k#Bcm96hu>jYhJH8z@OpX^5Dl08Bui-xjiF_O-UPlx;@*>k?p zQk^og!cp?A#_<9hX6s&|tI0|D>&JDkbVDy2QLabo+1>5im|0nQsL?ZXL%H-ecCMZF zCEuCH3cWx}Cp~m%3S$Htx%_5d|0<+ug+wo{bKpqd6OoRhKh*HDchL}Z_+))E@z)Xs zUr&Ju_GSxZ)GwN&EYEy9&yAlrWt6P2yOP`E_*WsNG%CJ*=C36kt0vCGkdYL`>>TIr zQi;mlsqK{6K(nQ<7=tj0qwwBqq!s|94OrMA#8Tf1b);;!N#_lH*nb>Ey z8>jX%lxCe`AH4o3Sg0j&Bq*EL%ZPbUiMS%Ov80-+-i5`jx4bTjFGxhuEpZAn7UKrA zkJ9&=oo6fW3@dOteB=qeBW=M!{Um5t8Zi(ac+S z$-Tm_WJ-oOu`GPXUtObe9%fogRx2bPQrby0>{V*lK8Lk6=cr?Qu}wj$0Zy!*UU?%( zDLoaom!Ch{>K*#37k})ynsURZa}+WD0o94Qajt$)NvJH*iC#cj5Ys4_f6t<}xI0fu zL9aR7Q~CrFFgNu)_q}4*S{2&nJGxEFm_kl-5H&6CGG$RO+M>2}OX-9Y{x!fBV65gmHX|^e#?Vf(`bp|&6M_r#yck0<}omZx^V~e4hqbOJ#ZfEB`YS* zaF8_B$FFTwA`|8Vwi*~vE_!;9vupht9{tb6{a;w`TJnqe)gKDHS+;Kt*T*yK{w@)L ztP%YTY93t(4h%iKR6Ne%t&h2yc4}Qrf#$)bQDHF^yVGtY6PF|$gF_up4|#%N9%m~q z6%WnW$K85%Z=~E_9iW!hq&WRB-(eP}Fx{=#x@6cxde_CdDq-udPj z+5B+F1J+haEp3T*NpG~~+T+XJpIt6tY@ld0`qgX+z%*75SU`n;u=R4U0?vN}v10Hp zoOHBG*vK%ZrHnG%Zqq!KfSFm4aJC1CsGhwd>Pj zcFiq2j=qj;&v@&xg28fRQ(+bwH$kY0Pv-y5!NSibt{>MHxIsHhT+kj*MPXK(zVCot zOz4ynOU1!G8O*PS5 zrC05|r;YGpe5<0Jg^3Co!PmZ_`ppH*PYzNl8Lpa(mtOSE1b-a!ju2OsQJq>Vy_j_X zf&~t~CaK)H4T_o>F2US0 zf!`BF>R05Y` z8Jfd3D}>t=8f5#n!}yOxd0tSE)4QZUXRU;P&|B_W9+<*3*EZeHWzh~;+I)LQwlB_R zes;#@%97^dU$E6TN>hp@M*}~c)zd?`s1P%j{jBSchjG?0I>+5Rah6KnckF41MCvYn z+*S{&rTvk|ET0dtfWf2*Ehhg~=l`>naa}xvN2Myn8bcN9>%fcW$2OBFAUXORAO5Yxy{vVruLNl&Cmu#xNPmg20mAEvaIj z0HevK>q4u_$4b^IEinS5*vqaZEP874X)_i32R3sVZQnm_34Q#XCu-?$3QKcS-#_j4 z>w^-a_9udij*C<+^KGj7hy+2GKg}GXaPz5*5Nsmz!AHR|KTOtiC*K8cFl?>^Rk?4%?)sB772!VX<}+o;#+ zxO@viax)R*1`SXpv@H>a9$QD3<8r^#sq297tBtKyLffZw+$C8vmBc;Bipl-pslyy%T!JoQMP#MK}W#+W23fw^ES8&_ zTYQXSJ!!0Y^$O()shlWs(_)w#@S+cWJLO9cuo8~$n^2OzmN=#4X>NJktaxIJ^p6F) zAKuIw#k{GFD9^X8hCR5%z^FU=R?1H8qeVxCra6&KS;24LrJ$zskjQ~E2bXlb`_p>` z1=N|A-NQ>wu--l@C$l@Ii^mV)3}^1)1hjvR26O)8)I&ub#4sr0I`FuL0Oat;DASVq zk)fv>nP;OKr}krBJ4=i$?zlG+&zuz0?14I8deravqlVVv@;uSF(snOSp^v$UN{TgC zt)}mzXRd80UCrT`NTEX9fqm_<*44sC2HYcTqdxb&N9AdKooF!m>emIOk}Jn%p0`pb zXy?^c3x0Q~u5+EM0v&)$$7x3SRlAuRQE}OiBmPHhywJ&DUxa5n*Q2p9L1o|4)Oxk; z%5zOHQu5Ank&(D&n{eBiFaaZVBPlSeQlU8zqHkrxd}g>uF8v2>oy}MWW>ojPm|e@+ z;~c&CGWwOEiCX;I2`XKX#l$zN2Nw1FyAZ3eQH1SRe4!r4kt=UYhH$9c=L?B(`4apf2(A170SxMEqb zyXJM$Hnux1MAN5$S;2Xy6=uZgp?fTjjuUT$m)E*4Cb@fB|5iTFX4N}SNOE6TTM7Rv zuCZVBaDm%+^1qI zQf{C?W$EoqZ77wkCZas2+isv=xA3B5`;_a(5rDC})M-C8D_z1u`1>nGZL9K}pz(a1 zO`!jVEiBz;KKlqmz0l|4aRSsM-mR(>6*_U8VGmusrUvVIbd~=>?Aw8p6b{ooU%cF4 zgGb@z6Y3N9=ezzTAdghSN+-5eEd4Pw7H$!SE9BsJGwiqte=i9r8~$g@KaXiWx)3zi z2G#dCXMKvP=m|nzp5kjKjt@VPvb%g?ZhsX`iOfE4LWAarDM-`co@X+jIiA~~owRa= z+34CjCr2r#Cu_Vv`S?_TZE6bVW90V%icK#E^J$xR;U}+NaEPoPyneYRPo|GF6Vlo$ zJ(_cOI^ImX%X`+zVq=->RQ#lmKC3D}EO}){yGrKMMTmZ)&86d&0ek%ykd}L9_C3I0G!;TmaoLtJ5@xT5A;pV`$nsm#r3|@T@-H7%WT`1 zzup)?9b#3l+SPWm_As-NLqJlg=KvAfUtC${+}%;KU8_8zUH5odjPtI-KfUKprCQqC z@b5XUNJk_#Ue-1O&g+mv;T6ENndx#DO>DQoQzjgw-z`+$iIA)=Pnf#^JU z^~EyM)k|5<+n|)1M-PPi&)0>Yr2lc`T$`GG1*%kb!0)t{jPeA2D3)_0+`m<0R5LAj zPvzK3TH49>ij{cEU;wSWU7Mex|8)0IcHEz{c#Pmp#dI9>jo%qTv~dEto~(Di&=R1(KE&VnOi4q;Ypye~WVk+|F_O$#|ol?&53w|8OS*FC6yP=PJ?C zlWjinii8TStE3QidvIkkMWE}X&q^vD69uf*p`mh)i5hww-^rmNSu5+lWfXNdsik?` z(nnaryxPgEVsk(jfZP`RY;Ke_tbh*}DJ~2>5`zpYKs+xcxtY4n-`w*`*Q`-lZDE>f zmrq}?YxQyuJ+)`!vgKQN0uUK{&$gHDAEN%vfdD(|{KEU5LA;WcC5(s)hlCZ&un*`Q z`e4$NE^9mZr*n^RTyU0cRNzv2fcg_wl|_<5hE;{Daa(<{7_BH7IIE$x)P~Sut?0dz z3YUCKI*+kv8cRoK295j;o_*M7yU;jPV(;7fp0-j~P|d|h7RW=b#OGeOI*9ocsAE~Z zvb>R^NmB}&v`A7qu=HXlYyEfgm(2*5li4!_?#g^Kg&Ad}g|nGSU5LG!Z@&`aJ>o*4 z(Nk7>A5sK0g*Trs#6BnrCYDLjOhZR1N8XP!zJZXKJoSEle*TXS9^@x1_vESb4R&uC zIMd(5&C6QpKqHgB*8;q{C!Xxd?;m{H3KE3cQK86>$*POJ1)1+z+{VwZ}p zv=&SMkQ)bkZoNldSdCOAtHafg_{Y2qJvoJ2xE$Gk4)9s>N|fXLTtgIA5D_66T|^AL z=+I!PsJ-;DG4K1UW}NnWdEwUz57Hd%NP*|Dk2Ntxo&0&TVYR*=ELN&QnC7qy0Gbd^ zPj#!brnTo+Z?Z29W|cYT(dy2gcEn_l3Lq8KKH{(Z`n zr=qkXO;`vd07E5eeTEm9EmyRExZIv$5$4&j(VacJY($dSb(98-0I}+u8@g-8y)mBW zd0P47hwzS!!F;M&ZTPZ)!@6lz(?KV=2y9Z+6_%Eqo2rQFK&x*p>%4;Hq~)hUt&m0W ztm|9mVLg2~Y~9<|0k8=n?-eDri^ytQ6?#lI0GJrJw80x zdcWjj9OX6<4=5Z!#X#s*D(J4IFlAs~jc=>Q`c1lB{becf&{J*_wfeRg@PX~}XGYrg zwl>SsO&H9ujk{5wwx6ZN@UR7<7KLMS+N4;HPbc10$5c_jWWGcz4P;cR5P$ybu!BtGzb;) zJF9gLUpeE_w*uOfmxFr8`>mYFF)0hFL*n+iVfyCS8@J+SaW4y_kKNGA_%IzWQ)*X0 z!eN;7;NjpM=g~*7x_XY?44%@ad{fyy}+E9C;9i~%*{N+>;mDxwYc|X^o082LEfuEB+pLSUzP|s4KZqbbc6*eb#h^?LHneNpa!+r2} zLZ+3c_kpoi#e%CLL%G?Dw>}7zRPof$eH+fxVg@R%P{aa#)!`oESOIe&%Mm zK_>Td&JBEd*rA`%K8^c<)oqJS(VbgQ6iTe#&`-u!eVCYs3AIJiB@K z-TxQ{eHd}1Pr9bR?3E|@*D{zD^V$|FD@4V;MVCH&7|M)_U6VBu2Eg*u0I7Bj{K`rx zwQUJNz0EjG$wiScdCxjM7n$nesbQuzNWa1~RSpFKm06TuJ@x*b^F7<>0IZUw!=FEg z?`Av?Zz><{-Za7)GawuomtRBjY&AY5pizq*q+r-$c3@*=iKS@;z zu&>%IES@+Sc(!Er1xU5xmrM^ixO!9C@yYT}&2h|FTgfA)5bdodq;t}3zx|gvjFpV{ z`7joap=t51s#;(Qqd*vjVF#Cqd?4zGKYkHllL)dIRuyPb);tbwN$f6P8xGPEn}tSK zVn$c)_9HP;1cBpB|3bOMOvwe0nH)mA#ZQf?q?uZ(LqO~V^)5hsj}Oh=Z)6wocU>rd zX&uDAJ>=<3qX$r)%v5T9@*`qS-#^GW7hvBR);}5FRwgh?@ z^}3pwq&oSak%u#GS$BDcZoH(fG>G}p+7DM&6hJuzOiOdSnIxN&Zlmhvv64j@Tqv-q zZs?)pGpw<0pZCi7deh0pfBuvYbip@?*yI^VDvihbPK@P`<)bQ!k*q4dTr zbFtDqSi(*Q`R3J~FDPTqLC9gX5b(LkLTUSzMLjYmvn>3F`Rjq_|r(x6a2 zA#AFp3zDdIH#eb&D{pC9draUSPc3qBJ=mJwTjFNv&63O$_k8}y`=I!z9+KFAM}^tU zbb4I-=>s5}P-2>@``1F8rSqsj%;;=BlB+0?I9#(L(X}Jmt<=JM#f2i$RyZH>7*km7 z0y+i4F6EdQzuscWr?{3utqSk=Z@f>FWg%4&y`awaZyonFu(Lg2byjqq;ctoJm=C{n zzGpo4R-`@<5*&Bzuv*PXRaW-`j;9&aE7DYHqqON~x!Isua{Q;8du6nfT8b1lWlL~w z3PtowsSe8KJHaOaOkO+x2b)#|z&=%ML7qm}1F*yi zMoxJG&)B7lVYTNh2B_6x_1wDv5gM+XX=IZ6ioyxI?!ux1i4n;T_f9=psnMTb;j2%5 zM8AGRZ*lx?gWH4qz1hW$x_F-9(<38U)bYGNA8F|`8JbWX2On9~ah8(B)vzu^KE%X6 zr?BSpmFa%CJrfZ{SK-atd540i)%Xa~f%enO9C^&5LBRft>5^QR4ZUs#o;LOxf)f)T zcNe<28ftwioPQ~{UP$gmo>}`so?W0!<-)Qn(Lq^XYbn3$<*m40*6X1gjH2yf3KR8R zH$E)qW$BFz|KfI2y=c#iT-11FGC z8b8iuJ=l`Ubpr~`Wh<8?Z=NryDzh{suba`BogFDzZZFtsZdxCsLnMd*fx~+bA8mEC z(`rKYbZy3Rg<0S7f~()|Mk!B$G4bb|4EjS5F0!iPE_0MZrwW3_s)aqSD!d6!zIV`r zxeKTuSBF0H$4eZAD5-W$yK0Z#TD?zZ))LhULDSu?vuYqza8johi5*e{T?4hDW+G{s z+=v0%O_f!hb;V8*SM-U}p5gQA}j8^Wo4| zSE7-2U!?;vEA821p>o5v#pygrSLmdu7&h+6c@(gTeDSOeN8*JD2wb_<9U zbpaIjo<|7Z-7L7m@{&GsEv-24B9IJJ*K!7##g6RXOi*rs$PEJLLjUAiz1s!{7HsX+ z-2$O;Z`+57Qu1}=UZihiS_pzzSC4wXPsY%c7WC-LlG68|#fkiX?7eqbQ|bCPs-q+0 zs4(g%3IgLOpdupDq^pdeAOu8uQ$Z=A2uLTyG9!*s6r`7^h?I~-goGMm1(6b3LMJL! zLO@CqT9Wg`y=P?h_1oX~*SXGh&iVLbOa|7<%Ddk6)cd|4i~=*(P(9$^KK$QH0!z}xC8xL}IVpaJG0ZQg3mXdcQXGo}(I z!mifE3Fq$p`v-eloqzY#U++Ag$=+pR2^u;Vt!Nz&GOvUqs5d4Ld&kZ!`4YlLCQ^*M znv<)77}XM&ZYr9Xzr9fbMR|wx>Mv)WzufNAd8WFoM>#s)I2hNpOLM=ehZ6rZ@6mba zqwEP=&h{ckQ*wDf#oKs!8H;LFsD2;}*D%4w;naQIX}w+KlGr;osvf6MY!*r$S7PD$ z!2#Pn`zFHp%gKo2$IG0oee{;HTlky^0zthQ8iX(N&S6axq{I$1x#b}|DZWtd`{Ov^@Q0V>K83Nw^S%|qWoiTC!KP26 z0QbW{IV`>pDpy&3pU^=-dp4p4n~2Cfy(9pELEXv`s)re@%*Zd=D3xa<=Zpz+{M)U* zSZnm_3TQ$$Jdz#43E@*9}c+1>=S?_$~YtCn1*+YN5E-vWBY1t=CKX)rJcL(>Jp;WGXRoRxG zHDRkx$2%~lf@F~TLqpRM!tj;e-;P0yG#(snex~+5D6CzoL6h=&^=ZLs+Q|Ec$tpc4 zhuQl??2fOJzVB$O{Lp(jmqyoaYJvuxTnfL4Cxnhw5llIHPg&`3w&vINoKUN!m-=E1 zd)-rI*=qgolmQN7wV{YSwdjV`EsS?+*j}YBXIr6xS1QyKH#R=zV3omoP((R7GvP|( zdQ!=PeCsmaYVfjFGTeuj#y^-TLP#>we6W*N4i5J3gLogOq1pt+I@v(? z2i)lR1DzwnWgWm48DQ6ne$ya4rnGlO70bdeuoeJsf zQjosR9?cIyP~_M=UlAiCaZ*?_&n;wWzN!AdA=bzhAhfAlje zY=-~jk{z)}*Tss_Wkpz@D5KEXHu=L9Cc|vcG^o^@>$c?F`C}HIAa+)%;7{V3XnT9( zhb`Cv+9KytLxMIwUpHlQ4(w7d%H@l10|Q14m`gQ2UdHMSSpci{5TO|D_(52uy1&UI z(v;g>T&r2x_l{U7BFYLq&q{A)T1zVI-^A|qdP1&rN-RKtZFov0F>a%*szhy+z1@hF zY3&jbeGtCL#Spfu3u`z943Ea}P>gE>woH43M_Ut}@Z7g%5)rE%6>&l8^pVRq-7td* z(8AIi8RV*7=8^keNCZy!$mv^8P8SXg#`MYo9C*BnYS${)D=)Kf0cu!)7Xd7T#dqk- z)P1X?UK=H;jgM%jP`iVdNK)h6GMQ3oQ6i5oyDwiYzR&r_5834eHcQ22AHiMj!G|IX z=3XOm$?vyR56rs-U17@7#@C7N2c1X2kY$Eg-Pq$>r5e_>RLINb=;~EWh!ZR=`C5SblGuOQai!@2U=h0pB zZc1!DS_)zM#30R!n?Eb<#@b|J&S(_xAvlmqmy1XY0w>j_;#rO6)w04znz(NE7dOh* zhQN-OuM;%e6YD0;@O&q>d1kkgY$t!W=3>7T#wf($utvq_8(u^Qma~9}Q|x6qoQRYr z=luRkH{4y*CMEjyCK;<{hGK{bslRMjl_GlKGLy2j#_YgH4z7ZkK9qpjOEvXIEzRv8 zR{MN?D4ikisdg38uH8jctRg;$hoynUX%6Ts15M3!WYOH%fDsbO4N zZ-i|U-Bqaa#P?Y2)Dxg|@Q*xak9;O{IDcr#DYHTDqHvX28Zj=UqVSTzxMjQ%&Ivb} zW;8BSrC22mES|RV>9Oz(Zx@{JeEyvvr1gfag`+xlM=7iqsWevl8ILT03zb4YG<}Kwx}%m;CN4vgON@ol31A~c6xwKTfFJBR0*KexKM`xnqS1o zywxNVag?l@idj6@NzGFu(!G6;fKx*Iu>}%vGjllXYNO zI7w$cyx1S5f}*`4E~Swyr2#>;U{! zmeccmdtP6flQw3cs6bP5E*AV$g$sguYqFHFJ~!+VZKOf5A>lJMrE^cnGLy7>Pl9R4 zDN>J0_w)<5iQ24;UVzO@Hk(4pWjc1)rZn_IZ?D()FRITb9lpB_r%B?Y>}Nj(ZewPf znlZ4-b>2-T)F4Fp+m_92no?@wlZ)-c#)GslGf}B-K4`w>rQT8_I|LH&D}w#~UP)Xb*UDwgnB%T=Smx)Xi=?X-HKwGCA(pL}@$ z+pCu51+2AAx!iEmr6AO_mJaZipeCASW4(bQ?3wvET_V`X`|)0lM-Z+qM;o`iUZW8t z0R39LlgicKX83-xj7S^m+V$~8hwBDDa1@|WSh2{y)Ta)sgS$eWq_>?-Uo7MBBIX?u zh#HOyybTnLBfBb2W)>I;GLfnaWAVSQ?g&^Krmb;-SU9)|Z?soD+sUUI3OO5g0pACa zFLkuzdC_v1sGsb{!AYdazCcCdWJSU_^DWx>oU_w~`p1Ak$3-j9unLnE;SGxyAy;l) z_8Po8R$(r}@q}Te2Ts>qiFA$YPB$EA;65qjrV~n+EEJ+QkV}M}d=HVk0k;>=+pGCGW>LaJLoKNLffin5v_ldtZ8cKb zN~)JO(ijO90wu`enQ6&*I8u1>&cmcs=oPS)`y_i8HrR!u89uCk4%dpE=JoI`KP?9@YF3M5~Jo zjYI{G?GJIW)SVlDropap{|tQ*_JUuj&vS&rY9|{W9(v6Uz>EqQ_S4cA{dS|TZe4+T z!4NfyoO|sDd#tPiDQ?kg5-chAmzQJ`nu5o>c31&4OfcV|=yaA$auLMIgx|U^`xKrs|?`TC`qF;@6*oUXJ)I9Bu-rK64jyIbMHzil~JpR5v zFw=qw-Or5WQ?BtYh*M4!J}rINuiWbSbTE-QSspOVe240^*)UqT#jDg~n@&oh>bAU) zokz>HFDer8h|+I>)Fx0?&!T0Bit+p1}9i7-i185 zt@p^4D>IVC zpZMIyJqm(mcQX^|3NojrO3K2A)$Ks{j7>MnnXFh`TztHT8fslJdV=gGodvwlNSAYJ z7q1fCwN3-gfmVl_?j9%VejaDoU?GL?E_oGHu1%iz1|Mn@MJd>?IovKAIh!=ncnUDT zy@NQS^UMH2o>4BhFhU%jI#0K!uog|w&b2EjaC#3#w1or8zy3Vy>GtRydg)L|tNC)( zQ@XM?6`y4p0!6S*H=;#>F|71)m!yvaq)_)_Y)b+MJKNpDKQ70~xPPo3C$3RY$FY7~ zGd>@7aQqE5CmV%TpO!j$bfF~;@_947e%bUUC;5TIOx{N*k$F;z=QDTKr^y(@^WQZX zVokd|b{aLx3;@m+1>t9n*k|oN$HWws*|9%h@+5a1ojW51I*@kkRnJg}eHWZqY3z&Z z^}Bo-DGW)p9jiDx?o6<5QIAgeO~Y&xgIgND2P=(o4AwH&?{i4H+Nl;6L?@stYBx$d z^Fvtv1lvGA7hN5!Mqv!rE3$;`TR;bp#8~5*su^HnHFBT)jx)N zCu4r%s55)Aty(apk0&DnDbM2y>u#$hwo@=VSCO7;^{2DPm++eEnqxyy{Z3wbe1Oh) zT(c0A_x3K7L6s|*UOV(h~(0NDV7oSz^d`YGjo75kGA-jc*XY-8(W0H*Kx(tQY!b$3^c zhit2@R@MAypdJf4I+k>g$J13jN; zKhy1s9*tDb)*DBhtza9IgX43IbK31cUMzFs1bk?L{b;c56`P||Y5$X*pJs|gf#6zp z_G)a#z>Ma2GjgWTYc9Or=Fr4|D<^q2Pq%DYG3{_oo_i|_u=~yYQ#1EvmXW9G$Gw1} zy&q#7w{(8}AE{RyW;zTmnL@|TvrF%)n`e2=M>>R$a@x|VF^<>?L6;LPP9=CS4lNCI zt!lu4H`9!K5l{890D|lcwYXy!JGAPbYRfuI-S;lxHe3&(B8q}D{d=!^M>+WnOaQ{L zh+pFCWeh#PwZ?39s_DU4o=r!eQW#I(fiXIOF4Z02%aiz;u-2NBrGQECL{XOf#^(cL ztzZ=3@WOE`jVu@ESWb(6!|d;ybC8|j3A-axw%lvYhAcEr9~Y^Y5FlpVu;_3OyDncW<+{V;frYe9J09$suf%7WkfkftpymvGFSs(kfdU1ymrFWJ>AOEQ-}&s(FnyN; zDN8`6bE~FgzR`$Qts2|962n01$x(H1EPZIt11PW&d2m?nbt%+x+`97H%%MPPl9-YW zr#4wBZR%59r@Bl!9=)tAxmP~xel;g2*K^)AL^e_S z;G;uIW@gC;@#w~3U-DikM?2_?T|R@OSBF&Kf)SjfTHT>bZgZ9O4|nI8+2a>GOw-#u<5PubCd+m4#iGr`qn7LscOGIGhkP&eo9yx4dF+rSygqzCOsB z{MpAG<|MDKVYP9HlNEkH5m-S)O^E(+q8`2Dp{7s$7heU%i?dT%vgWW=FY3o%a^ZSZ;qmgYk=BCFVU zYe^<`UYECFdiKVh@QlxvU(Fx;Tr2w63|%StL&LzB65}BCXX~q9_Sc7J zK(O=t5=%spHPDO+j9$|tx-Cru+gHV=-z&%l9)Ey7qdi$sarN=&ngV-XlzfNiu5^B+ zW(uiKF^vD`P}+K0EzWB`e_->`j*Phx_S<(vXu{}-4ni#Y^{nL_~F$ zLU&ea@FM^6V~`VV(<=V!T?-0YGo)T6z$tmL0ioKttfx7{a?&PPy-nRz_`RrXt|rpx zyJ*NxY8K`mKnG*%t8OJ4-if4|rd7Z<9EJ|hEW(=Lg5)jMqH~`8M+%X(?ruFp zTh^*UURE!XAV1|tUOjgQmf^ZmboGT{pAZ)Zex#K!TBxbB!r~;<>Dizifs9j(twN?< zL84r#X}~B!M?WPdv~EK)9H!DD=O=1+mE`%9`K_NU+(Js++sP|dzHR_4t8kw{$`NWf z>zz^hgA%HPcpnj;x%(@PFB(}Ym@H$b(6wC@P zm@x?G{ipUVm-o+0uo zJ#%kR^+ldUCNS&(BFP-JjCYq&x?o_n=KQUgQ)WEF_2)jv0?8Np!tf|7YeDhHp5aa} zb>mOa%hc552YVK>Htn!I5tHpbDDCrp?{x!kcc&5550}cEg}C0%*NK6Yeon>nKA)6i zYTdjEGG6n%IS{t6Qu?O>Jd#c52$)z(%wbrf`f3Wx2ukYxkJe&4W?r?|-=)&IXYgLO zCW<+oSZj^*<}*!5?d`wURCaskjT6~xEjumsLqk!6!DUGPz0!kNr_K&Xo!B?;{@kLqNG8hWTY8%I^hp&NQq!QEuPUw2>+!gaoek%=YvYMoD)4W2co2+CMLatkVQ_q&_ZllhG(dIeR<*xj&?xN9_dN=OO2V1UFs zOpb;lyIO!|v)~R?x8K`)on6a9s5;(m>w7$?`9VDc*6gUrpkkf1#In%KCxqw_FA-5Q zs9Uot0gnYz2$SX2!K_2kat21$+EHgwWXPbKEv6S4!**y*u9|&oU*gJ)t40 zb&=GGe$GOki9lZGfOddGPL>37<6m}S)vCp7cLaV>f4%p(=fmCqaue0G$eQBjKU0D> zG1FJrS#sQUb**ylth5@SScFc^WR!?$Ayz>o%jkUUk__VYQ>~rTP2-0XK`A zysF;*@)^@+9mR#Ncx}?tQ^q@9PSu>P0{)@whd`@m09ohCSYi7I$HqqydrZp}D+IM( z#HDi^PdH7^kqc?4q3C++s!k1#l#LvPcQ!2B;_WR5S($k=V(q-{@EsS~c8tpc4?54n zMh927Z$r!>YA6TgvGRk51_f(J$m3uKgp+(S2iS^IzKxoRuH* zsp$5<-hcVA4xL}K2qj8CiV&*NEEO`@ICMiL5i~KvCYqaLX_7uw2=(`yo9O>^%!%_6z!-<0jab3qE zpw-#tat@2T7@w2y5)kitD!Uhk(WQ(lv$>;*CHj3`h6&JH8|k%LW<}?&_sY=;Fq+9h zsQOli1Ek@3y(o?8PEO6_-FFLq8|Yint(3&OY_+k@_UNvysue9>3DT|qHwd(Xa9Z-+J zio^ydfx`swkM5hRi?gyeyA`D_o#4TN$aP5cav^c^u&)^l?29 zeplF<^#p!ScvlO^Ke;cLheU5O&B%l@-dDu}P@Gw!?!L9r4>rQ=y(7!S9~a1ti`JhfItK|u5x9` zN8prdeI0or)gDn{QHN2++G-mTTu^#pW!ywyH`Xh<%9X4WTiHGCTf zF2>(k-3)a)=sM##An7iHo^>kh3>Kz~=;d5{mYnt+UcYXTySL1#a%KrssR+2u_MHQ> z9}h5NPO>^m%Wi#xCR}qq@l8yv->+^>B8vl99@riNl+nzn-#NeViasEt)@EV!YgFrT zJ@aLRoy6eh3wRn+iDySN{JlMIFGF7!Lb}B+s|@|%T%oj=JtdqcBLHAXPfrk4HybDc zZ9kLlK=f0#2TS8GNY;liFnz&xo@!gd70tZnQR@EHB7#}c zP@|X}m(em^tcA5!Y1wA_RcVXXoVOR)wL=t!V^j+J==5tgjnW3~WV7fFJYX7~N*L?e zGH7gnq6PCDmN%=BuzrxKU#p<2`zU61VKCut)V;<`dd2iZ4O=Hdy{e&Kji}hJb3Tnm zQ$?g~!6+jUI%(>cZ5$$A4;e5B+n=O8Fn^$eP~gU4pRX?qnvA*ta87s3>B*ulM>O=^ zfEE)H1DN@WmRs^uh?+nz_Aebal(NA(uLqE`6BwtP1E-iLsuz=He|}D~D83;uXERRQy-^Ko=I`@b^JAWoZj1P>esVCX){I2THl{QX4=T z4B~iVkAooFahkapKWz-GJms3ui-0K((sLuXF1}j)^ue;Q(=6@WcVhy64iH8f!covv z|HJ)#$zws3Wms^BM+Et){n6!5f7Hk8TYOin?F9zlr9*-Y`p5#+5;&7RQK`l*#P*l8 z&S7!E6Arg=79m&W2skv;;8T!~;Yd~eQFiM`@t>Y)gs>dPi@oiH5<@ktv5-!_Q(4*e z)aetfK?nE0%-E_`Ep`8(n5Ty~j3yGux#4R*?zID59KKS$zLK%9>G1Iy=*cBOXZ0ge z&O|)r@Z$=VM6h7eRVAu5nd%h{{2k=x)dWTKJE8Blr;nL>N9`(h_}Rv`tuI;}ns z@nGG8hLibs_J#H#?QVDbwN!bx0I2}zQB_aE#rnrXd4go`D#UXDh&BZu7`29~=VVOV zG8+_29Fz`?UbO0E*%*DXFq!Fc8fK3QOi2R@-M$#N3>4gCYspU)e*%BW2Dg6d9nl_* zelzA(7Y@Ah>M0?rjD7F1IL4GeN2$N5VIxV9jwV;Au5~&}u;-Q5`)E9yH~^M#VJRN# zG&)JvTUhK(01*)AUS@_60j&d$iQ4#sP1l)#H5iuY+_{u>542!LYi-eY+a8jfAt8qxO!XPz zQtR^2M7*2`MNf0tM>YmOqPM}g{4Gkb@46Ybohyfpr?Jz|xXz6*zuZ=*2P&lN%L9-- z5fld5wf(?qO5y2LEM|_^O^Ql-TYP6%gAQNlrXK;gZljp24A0`o#U?z)1(6D@h&+P) zXp}-X>$Y3n%>=~|#JY{%Xv*6SCLdl~QBFHY;(Lm3;qHTK3NNsF@cot=72gctS0ze` zoq`;d6V)!468I%~1J=a|nmVA_$EFsy0QdAkG|H06Qxog!3%@eZ za=+eRrid)K-I_}x9!p5RG^`iVF6F=oss9v!?oDQ^<50`Aw>~LJ824>ZF@sD(JEXb* zA?u=9Vb7X3X$VX{00HL$9yhwP;|{1Ga`1Z|&L=@Y7Erg1<|POYo|*3-4t(~}n#fF1 zRD3B1RM;A(yso)!W|90HkO1REk}xRRv!|dH)(~ak+<&RxG=arMOj2MTC&2OrVaR~S z0$H~z+`TVAAlU=`f&jDr5ESadG|Gy?2Ues4^PxS+E3Q5>HE+yhz5?1P3-`x3apXlO zO`~)2YSR=}*vEa|@W@F!CA{ydGZXWXSZB4t&^{#bNkIjB>V;jq@fD_ot34xRq?BZk zo}Qiv{1iRCZ;##F;ctchxfr$D2barjKr-EB5YStQh&^A|*eI5Be^NbZ(G01%S#`g#Cmks-Dh%C0oWoI?E08{@y)% zQh|PcB??NHZEjaT>C!UjzRxZ(r~Il>Y|KkF3ZmBKti+!L;_)NiZbu@*)9MTaMS@u; z+87&jB^=XnT4Z^HW}ID!@r!gVXWa!Z4|@`2uewh* z9?FW!gZfOpYlu9c+89xwis2kvI&k@tUKWv^ovX~eOAU^U;hBxi*N9|g_FeHM{&YvE znIlMbQ#>vOIZ^Xyp8NyDc{1@d@2R#>qYap|She5LP}C@DPmEuF80l+c73iLPtqwTn zCtNkUo>vhp+u*@`HU zD;`%YdsmmdrtiCu{qvwwfiV9C99@?F0F-x_O+!Y=&VfLu1zqd)2;lbUIE|Apq5p~vdqZAI{ZYeOGnDc_1ah)HypE3M(-v55u)EsG@1iMJYkg)y7HZF+L% z3XP~{QbbDaJqrJ#XSM05nufgJJSge4hmW{d_Se+oqk{zgy+=GeJdg1*lsJvkVn+*V zG6RqzFY{7MGw#3rzL#K@d-q$ONw!gK`=9Rr_yPT zZjEYL z1Kn5516mCyJg6>SA8HrLAaW-#XWuGUY%rehd=sG+CMe{Z#1`pjVW1$RKFvn>B$x%k~e}D}~yUBvs*2Cew#Qm{Q8b zIL^H$)%AV;@LVr%?HyggZ^RxzRDBGIeb&2~_FeS$CIn<)kP>+fPKKNq{VrN->rDDy z!jkRGSQfj#PEYU~n{nKw)rKxE>77=+h^jxS@6neUUA-IV;jxl_GOGxM!RicWxc+yLU?zoicjx>wd=&v-34@a`I|K2dz3IHWkGbzSv z{3KjK8a0Q}&#ME~2G3Umy%X&5Ex)$+t9Km%C9*0(xs^|i3CP#t{CY#H+8jyuNdus3 z(_b5x_CRh>(z|=XKYd9Sz`4Nm$dX~Y)cTkWOeeXq{9~oclx(3@{IR4#!1*1JQ!;uA zJNsYE`eFv(SF~*g@=|(LaBXrGdy@I%TrhT4Ev3be+-wtlr;K72CJi8#V)8RHQkYOSD5D*L!Pr> zhV$a?NhH$!P`wcJrRB*ED(!!?=I-_f&r(znNbMK7kU$OfVr9@)8vKNDVwF^P`QT)D zC>wgBtt3G*(%kSCm!TMCm622$a%t&L$D z6T)rQyX5$*_wI*+et%$5XSGtdSNx==H5mZuXLh*Yf(4!Ga`&n>XGa}{z8cgE59jmM z#*P#Gw9oj_?}DIltQ=0x z&aN*UOtnm0V>Vth7`r1Yax9`=!nJZLl^r@enjj@QQ8NgTR0zR2*>Z6}qdgkN1Q;3E z*tYYe&2(Gsu-=qHjc7t06w{2*wl_#xUSFOKkzJHJAHUszWm!_Gm*1q<8%5oN3aRTX zvw_~fcVV6i0Cm>DU|jFhT_NNq3GGQh_pmsut{RYWNY&i6SUdE4d)3J7TI@~Qf-umk zWq#T0=i~UyHITBUQSD+=ZA_=flC%xIz)Cj^J@Yy~>8GuQKt2c%y{hU~jNP>B?5ksI zHLl`7kaT+!lt6Uj@%m#0E&`5zS&55r)NJlhyU|`D?s`BZ;j~z(6B=|rvoqWvq>od; z(m)t*;>xkXIC8V0PxKDL7^vrbwk(V%GuBIGwAa+p#Bb4{tY|zSXdtztzl?rQI0N?~|sC|BUH?`IE?L*!;fWB!TvF_*?8z>>|#JaH< zELDiqITr9v61ePEKu|^V4cnf35Bo$@)Sy^~8U&))u30{*i~@S5i3nMYPH8o05zII{ z#PN$QOkC0Pm7Y|HdJ>OrpsxLm=DTGidGCZWRRyjTz_XABVJ zdJA{m$W2w8qgk!MGaDBq*WPjJ-xURt1&C!G$ZFBUC zFp|qdl_Am6@IppY8z2I?e+KB1`g9jM2nkuD2LyT{mx-YGQ#f@L8E2|DeRpzG&A1}u zKzdsNo6IB_flmx<6MjPf!`gQWMg@6wj%P`(|%nF*W%VUGIo3b`GAVwfGpCl#qL!qUu zwZ55=%XxqsoSQpi?GxXh*608B{gjw=a_0QUxY&x>-Srkz6OBc86oXmwyR1)&^<_4u zIpcz(*GS0Rsb+qpje~CFpb}1R(wVUT;|{imml-dOgS+qqsoDmkYAiN7mH4dsby z?Q?u}V}Q%v^0Q;SvgCu7FF;%pII}Cv&oWf<`D={3*nohI00oA}iOutr!!7IdNBz7G zQE7;UbFOthPj)Jm@=^Wi{qe^19qq)~0xzf^ZskJ92tvJ_f!1PWF_bIl4Qpu-p2V~| z+(`R&yC2?gUUa$sS0Uq>y~L5~wVA--f|{W`mtmWppxy>t-!*2&qERT%2YT-r>NB`PA{ZN>dCCd0r%!5XobD-CA!JkrMY7%|* za(E(C!~sHOEf(Fglse-M+7zHC&eR_OQn*-TmFl`FuO=Qig2ai469KPXK#dtR(`n%> z!1IzK`7b?yKR40KyTIqQf*4J7^l_-G{2HqqQgH{JJ$6~xjsO|dn&kDy6)n%qP zW+W{SQfcEd6}k(b^Aas7cIex0$IpXQ7x`>RQLpyJmXmeX)5uLSqbf^1aDB38pTR(ebBmXv1jq( zs6nC+R}mA7=If@-oo9AtfTL>)K13x4>yNuo8~~I)uK$of8VVS9f0=_+IS`eh8`x+e zTUy*kZ#IMx-yVvUp=fB$u!gx|&1e$u;4^8ak8?K0mt;9wH*PGL4}=tl*aJXHVqk$6 zKXbFEie*S;GH0hP0(bVFre$ccb66GOD!$!7;yh^Z|GrX(yFJE{A!%C9N4R2HQ+)I*Sj^x z9GL>ZGm(y<@3cvuFB@sCDwG0?j|il^?ii*yfV9RA!7b!Jc)?L2a@e z5OkSpHU5JZLUX7^0CT9(qrOnakb?rCCPrMTh>WldtUzdej)9K4RO(eRt%a@i%iSQ> zhNbC;nyfz)IwhE6y#TU^F(54_DZ9fy@wfcczPVw|dQmM|;6~RrPrnh94{^4dlyRQ; z?O|`v;CtS?>jVXiSmgkFRF0|#IAH)s?wYyP9FLx>GTHa$q<+41Y4J!`KvutiC?&>q z+SUZ%>PqYmorB#`lv?Tqmj5O4QsW*X5`%vHJH@-&iCE0lF0Og;2oL=Pf%)8J=?pj_gpL7K(S^KE;?MerzSh}N- z;ysn9=&F8jv_m~2>D~~H)g6)mV!<;N z`pM40KjhhEoeT!ZJ+N=#Rg=el<`gTJF<68RCs`Z%tlrJj%rM|PNyAm zwoRYl%9y8H#_h<0mg=$&K*eX&2dM3OQc0LN#toxD7| zLsfNWh4sQLq0IC7NA-Go!~=|lO}eHc6w+s^!l%1u>{9h|8~s|BXLfq{9x}BA+TU(- zruTyw9)5Su!@tTWbz@bNq)4^zm#FE<2XQSw zp}Hh=+jp^{mZpNhS})a7fPjtBtLD0jC>QSbKse(&cM~^n%2w; zZAFJz(G7*x#R(h|~%S$R9tO)fW za#pn4x|FM2-&@ngo3*oIKV^QI_zVLY_5yR`#xEm{`*y3XSx1hfl)gO#G@q0yI&5Go zdOcA%ZbPa&Rnw9JAOTPv*ez|Deabn!rh0_Rr$h{&RAThTIx5&9Z?;{Df@&j#4%tAd zp{6Y8@~#|VrOix%u-+_k`bGfy*>aCoc>k930Mxt`6}xdVht%DkNXT?-#%x2)7r8OX z5)suPzYFy-yk&b{GPC1(?r=KHESMozHf7gyL>|nRmwm~4|KRj+H7fkXdd+NQ_Haa3 za`H_7Q4f!>?Ij~!DHLWWL#NxLs)iR6VCCY{n2{AZ4T}G`WC$e&*eMKUtJe;y8m9C8oqJw%<2*c&XcZKt>DrCFX zuUh=>;}5G^cGOrt3i0d+e6&}S>j2X*3ayu(q7fYZV&%FO((%_sJtn_Ng|9jm?ZnLk zl-B9&q8&eXqyxu`5m^kKtXmH2zfP|j^F$y{)lB0-2r=POgO*;i6$LA;4tTHbg?n)_ z8@@f&t0W$x5r}W^wP1o-+c^##CKSDvx(^x~z#ZyYI^_JIFVG3tX^G<5_^8I4&Ww+r zh2OX3Sfcd43mr)O(oEnZ5KT=_?_m%CFa$;K85cPqnejraWP}y1g8kpfTdR(_um3jk z=REDp-0R_>k9NX|E)c5I89F;jhDY2&3+xVS0DKeV)`S`;3!v~>t0gHLA=5rzAv{-$ zKM>=9yA!r)=lz)81KsDbZbM@;Q}A?+o&arcfVdq$3HZK{82*|>8%iE>WaUJ)pl>po zt6mhk*LKW+GFfeGHz4956O7^$LWFBVrh|Lg6Y{`j4|!2nHbz4D|Ni|GZX39A-4=iE zjM@vG2GC&_{z1Rbr~$;Z-R&iR{XP79ZSM+%+C!nlH>e>asKRBuHc5goy{aZ%RgFD& zX2mhToNlA67@PoC1chw}eGCXjfV%kCBjK(BYpK3<>U4TWMEKu93jFSiV8t>&obvJ- zPxoIEJH^pFqzUxiFBVRLUm*=zc&t!e5xSrNZUedHPN@~QYL(WhZ#~0RsLq#pdqJNA zL_0tSq<$hrOJkri13&?35DOpiV%<8kS$Iu{4*O;dB~s2nXaw?t^$+Ahyz6!=7$CVN zihB1ItYPEInNF3ze=EtYTEv6Q0E??c*c*x`yTo1d6>5~m^^ZCoZ1@{`aMiPM+X%-n7V6Aj3$6;HPP4m* zS3I4ou+LW3j8@#gf4ww!0N-h1fLh)X2D<8er$HGNy=pyfi4g*jAC}&kAgw8A7X^38~ZVPjj;MuB&{PNG~u?op`!o( zK-A5DIj@JlFtxr(Dx|N~w`sC!D*M9gTS4t#4~D<4xi~yZ^On5@_KOHo_Sf7o`1RVW4izCNaV_R5I*CdKg1QD|DV5stNA}O75}v) z{}1`imVZQaEACDQAvagH0U%LdV}Uv7eF@Rgp1Cbv`URA6X!`Xe(q8FmE`@b7aA;>k;K@s%}#Rs5Qb3HN^bX>Ip+ z_7=y@9rKSKu(Gf~I%pTswUD?R9Ery1w65~T_LJ<1B)S(87j#2%mrvHlZRS_5d3W!* zzBZwXA@eS+q5Ah@JE)_Bn=7cJOFO5u5B(WA+r$;dRt^;~n>kfG1b=V$s^A~2tgMPJ zzP)2xeDUOOzX58>5Tl1$D+WujGI;#^Dp#%QDojdBdfNz^BuU1^p#7^l5>D-X41a=5lE-5eW-%P0AQXY)`=K@SyK9 zl9gNr47b7cmFj+jlB_)R*|tI;Iv2YdT`*WRab;#^=CNg-*-n(uFI*98@V0QV*14&*vUhoP3bwS?mO{~{K!NhGJ^YyDr$uM?3 zCip;7AvZ#H)v6b9|7Do0_ocH1=dx-tca?4V{(Rjz2KU8cjSU$3i61g$|EpuU=JUTA zA=|ZXh!_Mb1D{IDGl&*dhH2(nE9h zgZ9>b9ZkTf45jSPde)GLeVFAvXBL_KJYX=AY7$_|s+4n+y^f_q3&b-9j_oDz)BVefpfNQ)enWW*FXWNw_M z@Z*LJ%$WU3O7X%G=>GhLQNLsfM(AMd(b3GnPyD4R#zCfV+j&!GdMe@H|KUfye^0#5 z78@5-5ZE*KxhZ0&v;X`3d2cR6OKsN;pUYSAqZiptH>yzIBqH>7`c+RGR-zYXhV78&OqH6om__HTg8a=AFL%>|LN(DK?U~(Qgw^N z+2dCk8Usad^*Ny*g@Dz)Ul<$Q9W^kTN}HF zi1B)hW2VkIq{ojRAN_D`jqqXkTmyc#ipK4}Plkcy({u;Fw-Ek320s=M7?_ivzppZg zxt$!rjpOr{jGUck{Y@R1uR?fn`FMO>TU*;$TdoN_)JKEC*;^M--~@7J znL9R!ABwTUTAw_9`t-&fhkv;xDkZ_{AtIA>z3j`cTsV`Vu9ys^_yK}R$|2>eV=dMZ z!dR#!{l7iOIg@aewk4NME3+^d8Q~I1T6dH>lfy5S+7#Kgs{i3_1pZk(q(8`}4=vy+ zhfMYv)@}d!=QsCu8(bWEcYnG7fhv6IUvTaK1Y^4pI@7;Z&wsl6bz@deLM9>Yp6PEG zBcWD!^17i(^1(kD%Oe`fzJ2BSMMXt{)#c?!&9qsiN5PCfe)jCJbokh#@{x38TOCEf zo2ol4pY?#|3;|?3n#XkUXP~%mtEOrkrc%M#2u(+3#AJp4(0wEIr3LtoFGj%a*xOyY zbg7!tgH%S+i)@KrRuCW=mHGBx{PN*tYgNdUbw3goS^3iY2ezdi7Q$UDG}$hcC}p>I z_xA1E=RqU?dz1a+pRUCi8GXGRRELGp$vm^Ho~Nk?X`(p=c)V|pF-c5#OYSPw9KGC> zo*bo& zGw&7r!JF3J*`^>N#jTa}d$Y~PakG4O=*4A*fae1HQ3HvASn#bFdj54N>|P0}GT1ot zqxyoK!pCQM!d?F^q~B`D72V6fSNptV>4*N?X@s(%ZQjQT6n?njKI7lsfE(yHq`Aq% zrg(BX@yMdk(;L#fm-q`*^yS|-u}a&`{98|ozlHor7|OePA4s|b+vqK9+8u*+@$ulf zi|g5U@7^68%ZZQMyvOhj#1^h#k?<;)TOXfP4xZ>L=W%*n+2gd$#k+qVyCvy+TLEAG zxRu@T{{5#2#*(w)Ds#va20z_ZiW`j6KGaj@hnd7EyBhXgZ8x2sD52KEPTCgI=WmqA z?wo`5>R2z?1A)({qLSu2Ys7R8?>UtZ_DvcBp7PI6GunCH2a*fx03^#eOYi77%u+2m%w$nKR7lv3d*z@l-zlIwq zvMV{#!|E-I*F!Cqnkw6cfBIAYB7b>dCrU&*^t_~12`d5|etSG58RBJq0VbK@;{`#l zZvFb}mOp=+o9x%tTc}6WZqn%ciQ5;5$Xa?zzY2d|m#Xag2?D>Um|dFB2cA8;ecQLc z@~t=oRpm^4)ngli^Jo6jWR;RN-m%+*Cp@h%(%gyUu?gr{ON6f49`Aj>UX0h}-2S>y zbUXKxzr6t`DB)D~w6L&no>NB2+N?g8tAyI`TeC2-L;2!whF#`wZavHu9z0n-7&q2i z=2t0z-4hF8pb8TOZvTAXPex!_Dar zH^pk@^``%fsSxfT$jnrjWd33&rB;Qili13wM?@Wwj+*}%`r*3BCt#S}0T=u>sMATj zzbXU~FANs3_{sxd>+I(b{uOKdz25MmOG23B=8)UXv`_we-y_E>T^+30Je)S6)drq_ zVZ1A8Rm#fHjR;_=Q?Mj*mYc#XT(iWWO596A)o<1+M{b!HpqKO@i*5ImbN&!kYiiR>v@ zV%zFa|H^*Yapf#h@2C8nxc5ixN*fMUy(t5KP(Jqf?vW7&#EU^@jdb_7ceYJ~8PLp` z5r-|Tj8i*riXy1lYO!*`{(9Y;_BGkf*eY5gE6hW| z1D;7eyc(_}$T{WMmo^Afj zK@!OH4_(59WRQ0STtzL!Js6rE*a`oV@kQ9P*?`-&?J8OTe;i@Bj}CWzD+IbtJwlsh zZ|UqHzAEGOW}{u3p(+y(g~&@!^o@RN%+MfMacpZM!}`Dg??f>cnnR9!cg;(CX`q3ZwSD`x?|1!kU0RAj$n%_YpZor;jG*%`fL&MxyU{8-*6>lDX8Oc8 zZ!9VmI15S?Y`d~#rqXh`c?Z)C3u)waI+<`M4j_YLiVH8Auy87K_x~>dpk_mFe{Y5kO)5S4 zW>a*wMeSoe{<$K~%YVcK-fo6KP11+|&uycrb@-}Ti~8n?RG1*1wtKA@(X^7h_6Ya8aZJX_wq>@l5PBd(Uh$|H50Rc`#H z${64u=thmynKGS9=Af%51fD243}vL;86Xk)#gAvXaBv2n@gI?fulF}2GpK>*Lk|I} z1Q$tu<#XspL)Ps1=YshAe{FeCU2Zlhb+hBqX_1G!?l@aKRxLXg$u27k#kEmt2KIF@ zsyTv!usMe>nyzXZIn^F?M17Bm$yC3u2NXR@CA)ehYCfXe2Ld>b>7r+bPKwm*c}zFW z>T4JLlXwrb$G3DZ!{4<>Z(GfOt_`^m-!cQMFx7yL#YXK)d?9i3Cq$yZZ1{oqx4mA} z?wb-lXl~VAV1-7Sus8)^b~Wn#KmC{vrC9{9nrQoFnDnTFe(Pyexi3$&TJE03U;qZp z`=1^Aof!zK<&)@vjSu&|6~G&_<$m{#W=T!<%M-cpPUO|UxEaj_oyj>sm#`b9bNIut zOad#PTqVqN=Ie}Q;;XX{s_jc>I%01pZr!oBa;ei)ph=N#t74^IpqpvZ|0%mFkTX%B z(5CYQ^zH3w`Shi!u=XMta&F}L!Rb@&0DoJ~==7%Oq)OdFDt+6Z?wRI{O2AQn8e4&bIrg_$-BHidp|9K%8+$-asuS=@P!K(VpWO!XdYK-w-fk0sgB3=GHg3ay1T;Pp*FgU`6Jw054h}cDJe($XWLWtPR<prlPl8&@uzd}zi-O}|^utwb3(wnrplRxbJP zSAsQ;hx(nSmer`LI3LpGK8TA`k5O#Sx9NoW0S}1jjkE>mL^ognONFMjwY5VvozG^x zUs+~p*qlEUnoc?dFT~>Fr(Jm!2+HhX(3c%L)_O*6!uYUcfNvSZL0I}mp;Q*Y*%*S1qDXet&`Ay`p;C1EevgpLB* zQIP=Ho5)vaF>d6!Lw9f4kg4GN=C`dH1_oSPw{GQPaBy&B-yV9a+n?$-^Y>Tz=f}1Y z4<9}>gASaKtd*ERkH%5EWuSHG$x7*HCgJFE*DnC$5U3UfSsW8<=pERYNPdH&r+N0+CPJ-e_&c z!7nhkQ7*Mn4sC{2N9gz0ppyz3DEGdpbGG4SY_-dDmzY8Uw5xEj*`Xs;_DKF&s97d9_#3?^GH{uks>d|i<544Hpls4Pcm0? zxR%BTKhwRPWhRs2#5X^vCJG4Dg^4<7{L++QHP7hYtl&*0$U2X{?wupi?SWn)cTd62 zJ;gP{ui%yEl@5K?dn-{>QdyNBTe^XRqoX6QTi10*w3`lw8SU;gb~i>x)x-$G4I({9 zUxXx4*4NRTjOu0Y174?d2(?S)20G%8KANRr3g`?p2`X4t6Sxp`krvYnu+-GF)}TrL zx;-#Kt&@K1-By7Lg~D8!gWHLQQ|ny2Wo0AE;83l227%gFuR(F-ni)OxSa^2;09gbm z`YwZT`iJ)=0=h8r*-#2ru0+q$OzUn~H8xMb#!fyxCRM%XiF2~W(BRXiW(sTK!%Rx5Y;MhrDS3ST zM5uVBC#)VEqAJio?fOTDq3FN8&?~*a#4QMo1Te@e_PNi0mYIJq6j9z`lRwFq)1;WL z>7a4_x(_`i)$HxD%*wT`I|K#ICR*afZc?XFWAD`m_>siu(oC=P6hl}&d=GF?FQQoL z`SO+r5RUn}7tOUh`NR*T;&jum>Wt0hzba?#@>?fR5p%l1siL<2KrW7?(F^t^9vZB)d_m_s%-OgZQ57~UvRwD3l) z#nBE+qt6leNM5}~1i;%mpoM+MuGNwbm%mqJ>@IwAliSY5E#GCu(~4cGaKBt${(3;l z$5bs|Qvgm*IhXNW!Z9wIYa_FztvGiEV~)~H^gX$ajlb-?&H6HVJ?I_lspF+@B8C{n zgTC}CfPWv`D=oEYIZnifsn|y6Cd)>(lsk*V2tn2TFn^gra7Ye|jDl?21 z3eT^G4j4>+3^$cQfLD91mqtfNt6!e+-)K(i^wRJ!F~07FO>5o=Y*p^SdbA%SRn7FQ zGf?yxzamPti+BBVF~`R4o2mfcgwp%m*BRLvZg-5g%)J5Z1T`Of5y^nX)<>O^-l$iv z9`N^kR_s2fd^Ft4RdZ-+IpIRQqED1G>Ddhqj*#A;&LBvw_9D7y8nazLLiw@K^xfgR zOT$|Y4#H46@M`N?QW3Zw%^X`-!!UFm^G@fK9oN*El33lHJXwF{Mx9V>hq5fo`v?We zpM(|oW$QSxf(Crak$vafX1O@%{Omfo?Kfs~-CRFE$;9jHVcJhE2UzLpugB)4Yafj* zsa95-`q09eg>GCHpCdm|2?dvLe8Ef|6ED#WsOQN#P**27;Wzgk?X);YNDA|7f?q9A zc|I}dcj=nlC|#O^bE&FFbEAyP4?j+x7twF;IP&5l?Uy&h{=fWg;Yxd@xFoyY=REuZ z5VEZT_uj)!<0lv=u1=_Dr(R;I<4Vnu!jny?a9oQf00Qhq|9l>Az z)2WxJ3CRP#V6fu-{q|@r?}pV?Sj&@yTR}KC|3S$TQq=h=t(@!0G(Kr7Gd5E6v+oSg>>Z{d#Y46_G)m>Z`0VM83 z*?6qSG^IvJ=M--e2Dq(R8^i2ZJB{z44Y|EE?(_2H=t1BIZT~i$%1Si*Y^;;pYzIu!VEE_GOD* zJH77W4G5fLcPEKIkW;kLD+sQ>hethtXu+p)q7excan_{u?46!J<}o0Pb2|YLw{?LV zW!+S(qO3d` z6{oqOkRI2$b4#n|un+9$W#?VRIW#CkzZ+Klz7i4<%22+F?zb(^AJtk*M-A(Hrj$U^ zPB)dXHti96c=2MPg~2AMg)W@lWP4DUhcbtfeM*gjHej(KvK&QN_78n*zpQrt+}vEAn709TDI^hkZ9!+|wV>u)thv z32P*-E3W(0N6JbFO7+Do%sco<1Pe$Wg{vQ$JyvlyL~CT-82Yv{S)3PeNabSG5Y?7U zn7Y@QjFF3+k%b0IMACZsT(!Z?m{5}NIKXftgj~|T7$t_Y<66>4toz&m*T^yGc7|V{ zr-b#cbB7+f+$bZpTFDNS**oq&8(jM;Z5bF22AljYXypT-9pIzSDIH&3p5lZ_oy!~= zLw<7s&y)7WUpjyxQd^GkcuQ%{=vcyHFv5|yeb&BE+2$r3m)LywEsQo1TNRg*4NPe~lVG9#ly8?A=oxTNQ{98p6#O?-MDhsUQRqR+C+MT{Oqsu4t4NvV6@%&17vl zv9r2+ERqur84QN9`s4_uSfUZH9^-bE!0l=@4QoBh6B^*aTTng6qUq}LjQZScpOtwT z7&yhL=U)r%c2(RjesH%e$=4wks}`iU;@`9~tQIAh+2O5VldO&PG)QfOUN0*ijg?7! zJ-Kdn{Soh^4n4hagX4Sc(#+UcL0}VeTXJpYv`n)!F1pP2$;!FUmeo$RT_Pd9LIGc~ zr>E zWo)8Fb#--1fuP)g=6pYDSg!TLq|GUt?<>9EAT_F#GREAri>GM|j)vPWD%ym)%T0G? z9{sfk1P$n89mOGGoo~P1nK|#86FqoY9os?nX=1!U^>#aSa<0~=x%`<3i^c*Nij z2Gg9KcT#R9PwUB(JvASmrlNDqb=I`FxzX4yc(i=%>N>+fUw@BMDOK2!axe_tSs}sL zk`g(7eu+cq4^-Va!_ExEvV8gSCEJQJOc*0+!1yverc@67^VGnWJV_Q{9y6E&%fsHl zWFayJkB45nXyuvV$f%)xP~)kYJyE4D!60AJ&v()(BU@>B8C|B&6q@=>MGWGrOiD4Wkxwh*d=o=>Dd~4Uf)x_T64yFFR zQF5*iPZ*bAq34F1So84dF>|2*S<;>*jcJrWusMli44iZ|T_`In+q<1L{&B6#ZSWFQ z6jYNfUv9PDK4U9Sd76#{ng=|`fZc*)Ws)!jRF_K873j;48y2J)c(-SyOaA&BS}r-d z8vKr}97_*a+J!kV$!*&O(zDiAKj3TDxy6?J(zEo1OuSDFcbYctc`0=in?}h$s^~Fmg;U{UMOkq6u)if$%e3+BOL^N|gLe zUrBrB$mNLAzBHt%vnMk|v!OR`m(>MEQsQRA!y{UW)%6CslPo;kQnutGhNxdKAVGF_ zt%X#A<>l;G-wDKeFVDILix`sR2YeyRUe5Ju{fkqDYF$^<)YNQ3h_Tf6cv2bJo`Y4;uj0PE9 zP0g0iCfj+l{Q8(h<8orQcZ&?hfdRhv2Q*jYE0^$nK2=@quJq zIiM5>y&(MoqFuRZbp85u)G+LAJNMZHsCN)M##g~T(c&Q? zLBWvRp|}A@fqT0T?&eVppMP(M5hVo3P}cCXjBq_I^ORJ<+0y-H9Zxz8a|<|Yuw^Hu zhCgZvvMMSHCQ<@p(VF=C0|-8@PSfRu=ZZkF`K@meV@@QR!}A9<9)TClp!Q3v$y4sh zOuiS217zDUsb_k+FaBbmPiqhbR6foo66kupb)9y`XvYYO{sV{0; zHX1iu?6DFbW)5VZGEBp9&LEKxhJ--jVcLkQa%=$IZ6~kHFSD>}9N*2Y?;_U5YyrTV^vkw(_TkWU=w7RCgqiG4jP%hYaM91nufJWOFzJD2fGTNd8u0?v6L^-D zi(f$L%ag@&1HBBRr`ak}2nX)O#Ra}rv`eJ8E11TObz zo}YwUP!TMW>4kgJM-QA9K49}bi`|n8%C7}usjlbYfSUIYBx+{wzazZ#CL1f*64b+a zQp8VbxKTirmr;yo1UHfjstM%MiL6)JZ9izFa6Ec)VLg%Z>9zLJVe+zSnn<9`YOYA; zzjp7doqgs+T8A_lGq^e`jZ`9N!B*1)2Z;0Yl9-T(j~_oC*Y6QqU7q6v_z(}OyR=T` zNRYHW=`D|#5A6;5lMVY6?X>y`B_$a5IS>R*o#{n%#o}bQ1Qj5<_f1U>q1@6Rnwp5j z7^e$9YfrRdfiOaYN?_RWU|^Q2;i^cNG5Jna-(fP$3;W${#5W}sfaYeOoW(f=B@7?K zz)lMSP*i%fF^cnIm4`AC*AyuH(iFU*f&BX(>4 zvwqgK>uCmB{J_*SL%X&QFpg$W7!hP6<_E@Mn(_G(D_Wy>V$~_s9BR@G!HqcEACcdQ zc_6^A%IoU^Ab#bPRs!_4WC6gT2|!>RK58uhb(?@TFzTb=CpTQ>fx9Rrj8J%Vt)N~f z!JLT41N&10Af6ep-i_AG)J#%%Gr!F3YyRfTz!iIdqD*2mm|sCI75D78!S02zCiOz- zAj~T6d}q)Alh>xMJ^q7!lDnsl3OZRES3bF5_XFp{u3cH7p$liSu82EdYWVcB=W+gT z0aEwbl+%v{M6yhE4*0G0V-&VwUFJFoH?@f?bJ$D#rt8^l+r5=%#-4TUdbEef^nr_} zW>5(`k9FqUYG#1C^0e!1ERh+Zd+P7i06xxoHv!ZtP|c<3bykv~A4LEf(hwSkJ$?$y zW1r&Mr-l*)G3?|%c^D*vZId?cfMW*B&A-<*EsTc&#SB=Q2`V(adjQ?DYQ73e3dc|1 zZO_xZfqwqVmy-(LC#8d6=ROP1v*u+eBNmdg_l;fTqx0_0eCXsQKmH_f4r)TQ!cK-KP0>vEhchCvZo7rgd zOrB+Ru-KmxiAAJZ{Idw+rys?Q5|Z{&=5gku%$~+3wC*uLe`Z3StNwsA#vsomHiLDT81l}1jA{m-qjknj9ouLS<*8qd>| zIg(J2$2voqk~+9{C~od`s7j&bdHA<19ni>%xB#W70ynxu0f2m@MB<4Gy%3K8xZ3ek z6R$%Bg%gL=t6XZwY+vIghcyWUciaA2f$!OHyDGG92Gz$)y#4)gyTH9=?VHp@P0Wq~JC8bP(_oLU)yJ(< z^Bq!1V?`9lvuDmO)z|LRJNC=1mA9kqXt(^=!!SDlXoxR*K0*lmJXE6wgmt6rTp1F} zmt;B&lEj9y-btDN4&RjJS_^vIYliJedAOLU#E0B7C3C7us~Ua4q9YQZHj5t!dr!b!FeuHBe&;@TYMdf5JL=9(!L~0mo9SKjQyOw010I z0?vSj@By4c?BL6i5#8et_j%j6xwe^*)j}jEQ*l-!6hesNKqNH8$V(OlQt@E=>+QfjtRf1^g{6-)YJprpFJQ68U$`cO5G-k`V8%$e0|4O z1HiJ7pxHo~r(>h+*oMAImDdrwifu%-*Kf5oW*B6+?$v9~jk3aWARhYcJOQn4Bycwr zAY2st-QTfDSHAsy#|Vtlm$&mC46XG50(fL~DwFv84o*vXqC}%iTXRE@J{iKbapR%{XU znu+dKTKXMt;mTO*ylKAaN=bt4+dJ0nWJk4>4tD#){zsk?&gJzhBYh-#*!K2(8LP=C z_p(^j5$-9}ko@D*o2IQM>9{4Vs|INKD>^Khzio9~-@=BK|G3x%+#YSfeO(k`$}2l; zv<01-<0^e-4b@PKRdmK`r0(X2;)*c$P4OPQ{IVkXg;fN`DQw%x`0;nyd zkO;-4sQI_8J2o_Z2$DiNG&p{d(7L_-g> znH<2fnxb|TcwW>UBONKh6Cn8w@$myzzMKIBvAnDr@JY5e#l1R>dGSV`B`81}do+9h zwVUSiYAVmw8y>|3HH2SR47x|fVhIeb%34m9GOBH*w*xj!UwggCV&0!yQC~uM?A@K0 z_us-X~3pPEI1suiP1y+djl=n}J%i!3)Vz?p&T%0e^>NtE1HY>9!y1lQFzc+Sjzp zA56uRiqV88@3W;F@jBM9@fd#c_pGfCws6I&&EEex_Wr1aJnd(hBO0Zj!kbE|#j(|o zA0J>vxZB?!SvcruuE{gUg(xj`qBTf8vI_K-!@!z;wvVT_wyp|{C5SOZs$crdsgtY< zpC19q!ssorg`Z<30ml#rFX*HwO@2%Xz;>Q_z*!mxdc^@pkW~}l21^Hn$awFJ7WC;# zpd;H*-6kqAj#$<|*O)7UuiU5LuwZ^Z5eO9E#exe?|NM^-IGCZii(rBct1$uWe z?kY)cb`sz0!TlpWIo<8Hy7_fQmc1>Nl#Y5zZEZ5g{kDpco*dsltMcD}6qMp`GWK-y zhRNybcGkA@l0si@n&Z`T-`?l$64n{aKK|Q-N#0(WM9U;ig+vqf02W3-F5PcoCs$R!geTOraaOjrQF?HI1-MT4&`Op@7c3%@8@u{PR)aCY1TjgIAz1)d6Q&;NBaHGdWJ4d$jcudy8ddHH6a=v$LWiBiXjw z?QE;96_mhz#p4-@+^GC;Jz4!q#P;W0)d4w0^)$(}QmtRm#5e8kcRV{X`bGW>47~Hq zu+SHGTtDwXoal?g^;Yh49U^dWIn?HS`+d63v@O#o116tx&CCMbjNDm|w6%ppU01C? zG}PW2z@b+0CFYL}a>vpTP}H@t_%QKR#R3lvm#c+}{U?rFby|_GVA}*DL(iSWIbISJ zHXAd+Z0F}`jvu>EQeeeDr(DqDQBfJbvc*#|jTI-PuhTOuu`O$HJGKCWiJksAvYnH*r?{9HK zg?=xNZOPeYQvp}9c4dhxu-Eza#+qFjKkwf7<*g2WdkwlFrsbrGGZDj;3qm~{D%u%A z<86|Hg&h+Ojs1oN_6;9j-QSF#yp*JhJ9A!0sc`?kTVbtAMdC?j@0^olGn6?u|6f~J z+1JUK>JN+QJNfRz-ABw8w_;Xi6s_D-{EP|}UFl9vf{$xgYZ5gD?(JybxprRp_2iG6 ztdrWbQ-;jnso4L(({nfkfB109sf>en&vujC{<(0_|J&+)TiOvPo<1y_T*_ayW*BHe zQf<$xQ(NxCj405I>%Kd5#ROn0-w0!mNAqM` z>MV>oJV_BgokSwx>(gF^zdFw+*U6$`eeUA?eeRV1S*K>Ms)77-<2BLeQ#;#RBa{!T z#p-rlvG=WfC7fUO=(kurdt;rikNH8Ved5}=77BAW_Jc~SZUNn%ZYeD) zNLR4_|NYQ!pSR74U9ySbZ@ZGEc0Lhr>=x9u9EX`Xm1k`%0H~?+{j1)LaJQbykFO8? zAFD*lpyuJ1hM2`{3Bi&@yp>+p>Q>2QuDc2>EOtrc!Iz^?TdwTVGS~e7986dwZby%- znJNA3So^W+E0One9OYbXBC=lT)=l2J_9~XO!ASLg=csgp%m{t6ZO&uYR36z9G2TR# zEVb*7SPjbP@nj6ky`_`ShqOjctL;tH4!jVqrxyEvuMe`9u|j$NZXfH>Ntek2*}8!7 z0l$Te!RMnetfvO-iq+2HprYse=i{1T=%3EwUQ(Hb&cdNRdmW;Gm6~i3G8?L&u1*;C z_leOS8QGox>m5xNf~ajwKZ)hv-ep_29ea=Cr3ZW@Py>uWCAXEC*jBZuSce!zN(j;C z01!%}IBbCvc#jgwDJqxz78>-6t#w?Eiv)qt^cYCkGWTY#AiqtKT6dElAnlE5&bELpQPRNz2!KmR8cr)& zd+n>L(nDaaLFT_Q7KLoxo38D+)$bR-F)E&V-DjaAA^dscS`R42zSgod6~rLPy6V-G z*`m=T1=88E(S}U-hC{&)_ahJd&m(o3u?2Rpbn^w1W?XG1o5wD%I^0z$QjhAwVPYqm#2>Nwhg^MLGZ)wY; z{k`2tfN%_9$BD9E0FR7ljsgA+4b5PBI|b@ZA~x)VaZl3QXFOnz@UMX{;pqhbrG_zM zh;bdLLtf-Bod)~N1PKtbqSbI4v)M9FgPCHOu4oJa<|BK*85`3;;JNl{qZf7YKdxyJ zK)ckOx7>CrtVph%Emh%ly;`}$T=3|JDS^;)Z7kd=nVD1y8_$axuepIWIm1WN|7+06o2Ru2D{+AwT*KIyZ@p|Rn%17_%K=-gBg2>nnT6Lrh1-OujexVw{J1GeUQqBp z8{n^euG+aVXuuTtCR*(}P+U8iu)Y0R*x>5#ZHY}4_zXC+!uNB6iC%V;tQ3ytu_?$HivV8jdhO1$eEUldfRM zH4zD4{^-wUnTLc}01~IT+rm|m2mRLs6|8UkF^V&JE~liWr4^UEo2q|XSs0mHr>SDG z=qnt*6q0m=?q$Zp5cgP(SWJ z3zjL;BQeYRF0uT?H*Q}#$t!jn&?aedIBSH9Q&6#6SqVvE*CddfoUgv32m{LuHX7& zsq-c&B_VMv-@`_IQ}HU?Ymq23RA#AWE!cMx`VqAir^?3itGZejPaJ;yuf=?FXy}2& zyL(7d!OEp5FXuY>WY&8=*%5Hc?B9?1iMC! zFCZ_vtl-4PHSH?+av(Ka=|~}q`enBXjN_^fMpTKP(s8f4Mr!Z0b04RWoLj(QD{{cr z`~$ZS7YBHMgWxv48U59pYaz}BH1vV~M#PceY$2oKOO==aUB3@hL4SBK2!5**GQMp| z1xF>1p~1E@@lpV?4uH>0=;ZPwP+BgxZ}L9@jockRN=-8R>>SjTZo5Jia-<)MWzkA4EJ}CsnWW zlqHBiDZPV?6aA!VP(ne@NNovp@>Uxu83=uO9F}J9btP6eNzH^gdgPE{u_tIH@G>6S zP!1v3`bpJm?xy`A?9J)wQ*AB3mF@rQYX9-Uo$4}ha%NjK$GXt7&awr8F&d9Y7ue;~ z6>Q}@^Xik_GK&jS8BP1v{RUUuBAw=I8!{%|ikj|WKcsf4&|$qU#(b`vYVFe43l|c= zNjnO8EQW}5{-+W^XoJ)_!+p+;0mz4|Q+Ibeo4OJ|aN9KC&s4CL;KLX!TA1@AB6E`X zi~8rS>4u$%Sd~~!P>Za1d1dVni5epQtu4@Ots(Yme zT(rRNAoXF&Q{<;T~`)hWCK6WATH8=UrznY+Ig_D+-ROmE~ns|Y^+@??CKtYu-J{)5o zW-N0mpG6cjf%-g~c(ODa?SKuf9hlywxuwAI&(Ue@hx1_)Bt~izqpg2IRz~1=DNWu? zju_hK#9?q{ION&{;?KovqPkSmd4eo&0$CAq*Uv7kj43Qva+%5;V|vvLnt@z=qc;J6 znW!(N&a{e?u(Wix2fPKI5bDqKlEPM{6J7}zDLq2>+X)|s}f&@n;%!a>geT1 zU-X~Hof*}X9Pt!R5vwV2w{yk)URdaH)9dhfd``~1$Lhq*mKj9-L2Fa}9MoK#h0F(m@CRq7sl30u5-#ajB~9j1S{-5=HWyO0G&aOm&aK}z zUt5Uwi|+*_8dl`;;iJbNDhF~R24Si`In78fF|!Cq?Bs1kQ(_`Z| zK=1F!3_C$6WxA}e-c}-~s0nYbWy3>Y+}pX9L9$|5R^y&2{C=F1ymkX+4hRY&Rc9Bjus=rxAnH2} z--3Oz&O3$bh&<2ZhzdV`*m~-xtEY$mo-~T?ysM7r)m_K@aCU}a*=Hwd?Wuy9>g0kG z(7#3dOx6;9c^;zbcM_VMFtJgZhQKyfWvsk>!Vqdc*wy9t+e6bTC9b3-F#p$I#isOelx z<&Y!ox{z5{7GpyyRGp?@EFKMhX(Y=E273s2Fky9!)F>58`8qOOgF1AfSPasc?OflT z1_Ixa36{lL;g~(ZoLpLN0{0iwiEcXb<^ew}tiAE;q?~+)^#LJAB9C0Z1XZdW3hGUho$&K(HPUmpF%t|yP+mKj|h z3%)g`$ASo@^VBTfLi~lJ4_fYU7Ck@d=>#K?QdO;R)SfV7uk)Z*0tjwuX&$T?z-+b7 z05`M`j?c@DJire?;#W0T00jkyiIBF?$^0zrTHU``qm&@v4%6yg@97Z7HKb&R>fi?w zKE!UeY~{=wM>funUQXXKYp(S^i%79&zAjtq5Pcor)6OoVJ*a#!VoEOInbFTr|BZ9B zIn+PT=9*c3i$iWd|7|l-_OP+xd|)hX>y(Eh^nLBhSKR_oe%al&waX+?uhvIP2=^<3 zuQV10?xkSd8ra+~1?7XJi_d+4(pmKNA}2WaAL2n0L9A5IWSlqhqEt0Q6t*1 zBmyG=DEp=h=SYqPA_Pl5-=d z`wp{Hh^4cHa%!JM|2pD0OXnJG^9sD-vUq^hdV`y`FQKp@QWrXXWh}}+BGLT@0~~M* zpnVkcW3DB{_^>F^hS4%LCb_@ddD}vn+6}e_{_FZJXajZ z7XlTJ5kiSzmmBM#9c&hker~S>4qV42NqA?gxpGd_{u#i32N?s)8_6(Y$l(etV|*_< zhJY(M+E0SC6i{+nuty_RLi(9M@SUvl^Py8y6Lx~TgrzenBZiu1fWb0+@-L(7OIPFy zIYeHyE-6Q*axkb&@)@4OC%IkGxQtxu{;cx9^hZm$+n25O*ch~*bek69nrRk}yfRm$ zeUziH&&#I7SYLl1nck2Y2NLDWv*^-R)~EzuDvY5w`3;wM=jNt}3dTZe1VoGobpF_J ztKNRgHtdF5t%m4J^Cx^tcu*tXHx2~ULNtMjW-RJ}O{U5i68oh|Tcn6VcEG`CKj!Z) z^p9s*15z{AbXC4y!XbN(5`98FuBdv-4MaDRHggR(Tn!9V;Ax8$Mr2ue3QUc_t5_wP zK|bNT8X8l%bu5MxM_;UcE&^fUUAdO!VfKDhiJ01> zwpMY@jZCvuM~LY_9{j1|nAOk1=3^&}bNB}}fk+KH<2@H;Pp0-vu>|90mO9PBHZtM1 z?#mdi-1auyx^z5N^yc?Gl3&ooszSFhrCa;YMfAAMwj2cO^Jf=&d#6bt9N$thzR2So zX6mzl#M~J*G%ECliB`}9161bgHlq&JYG7XsqyFYF{v8|I%fSmxr$lF&HG@b~87=1U zp)Hv*M(!gSSl3sFLM#@zIVap;ZH&O3uJ!T-ZQ0sn7?}{vcS=ew1B9lODOmF`oVMVQ z7m&R6MHKoGIC}=ys--8(M0L~jF>H#9SB%yJO)F7I4taGcbA%!sjfZyK!dM7)NW)+F z!d4v)*Hc=q{62?;r3J~?oS(JH2mLN!mtL%xoSy#+eCgjGFVi6n!Q*pt{jQfIszKJb z*O1QX9h;&|KlJOa@(a_sEWt`hE(z+Fe&1mT?}FVE)UgOf=*IVCs24(USy@bPg*7nz z)LF&c_Ia1zIVL2qa?T|g!+p|oKGmQ}VYbU0LY%J`5fs=} zdZMK#k=UNV^y|a9`%l@~n^#-h#|Lps9tF)cIMAZKXSv+_J#R*O3)w&`6k-jEp(z5hwRwG4wj*e%tK3No zQ8Zd|V)Y17X&_AwX1Vt!3(OlR4sO$xi)UPK?2WTFO4I(RC|DO5R)AM|M=Usz}_fnx!4t+M8ZeJ=Xce4%VEwbcqz6jL`Rv3NmRkIiP*cwGCaej|a zZ}45auAr-41vSX_Hbcz38W5V8+rRpo3%CD}jMS~rRY_j;#-Xer{Sud3V2p!aJyo^} zw2bS_hQZVXSlO@`6*w~0U+jih54Gc_1;;;hrk#I~fATf7v0yo!WVs$r9p&Mg0fE=a z)P((K0aiM6$;zp(*MZCq?coH~#Cx_q4JRmAGS{LI7d2x;mId7C1bnObD#E*jR67Ku zv|IH=>DKZqZAFJ6cn*!jx>e|Tg@gFZ4Xo|n_jxXu&8NN?x-63Z3jDv0OUq;dJ!8)s z$gz$TnDHQ^gdP$J1fnRSq(w~vkMs#aK{@CZ!eDCuzHLQ`t>T(tHtf9iK`QH(Zu;$)e*NAKYf}T>;GDMWCQ&1_ z=&`YKch4q9J2~k(U%c>eq{}=w0AU;b(vHKg@4xfijal(5&R=yBj}Y-7260Io(v+N? z$bloH8rt-MX;jGVht{fG7 ztRtY9A>ny!2k%4tc%P72_ix;Ml5ZbR=F&;&h3*$Kq%TTpBDpGhoO}iM`2KXhs*0(r z*|9ZGWG_dL-uQ5GCP^dK%K-FAy$IK+1ow_wAri+!{*0ZI34jp1Apb{}C163o>QSvY ze-7!lr})TV?^Z$gWuq0QtSyLE=G>@@g-Y#Ot+?7{&b41&nAgw8-xb2aP)=(nybB#f z0Cp_C#2-Fk-<>Z3Ul}BdTiFQL3LJH4`?z3YoXUi$(=xsH)$6mmBf4ou-D$45A!rM6 zoJyt*72KNXIj0yJ55d<`xi%dhYj8{ zuSg$)Q%d?FLgl%vjV|2$)Y{^5ucdk`C7>GoL4VHko4-Bj%d?FVHf~O=nTVm=Mmd^S zD?av(Ja+&fA8uCOtK}tB#xW`5qfh%8T$A5dSzHG>J8}w|K!_DETiJtI46{yBsY+pn zaa>l?FF)=0Nqkn6Qb_LS5-p>dwR*SxBxr3PwK6lG%dMx|v6_h!e`>oV8IcNi0#rw3 zE~U__A$WCZ&6qwL-fTj$t-C*R84#vXQQBAE!ypZI>~pNKqvFCq$6PqzWrMY)l`*P0 z^ss7;4~EVI^;G|ZX$b@qO{O7Co72SOx)Ib`Xs#j51{FH+Ts9afASL|SZ_Vnhj%PrX zz0ut#(?%D~g*VP7`ix(6GDZEvj+wYjtMZ2m7!(B2@zHGJoW7YokcSZ1ZfU>z} z3~~no14WDTlx-TogO{KdvDcqEP&zOIV3$vy(^UGz>6mOHA)Q@j5fOTTq_xZd_Al*4 zt=}n?k__&CNR@*gb!|i-KZz+aG~l;>3WoS7dzkKIH;j28{BQZ(2RW#RQ;|kaya!Q= zpgOWJHV52acf^`Zt>N>wjdW=lPg_n_?~E$7Z&~a3NH!&dU9hutW_WnCf8jNirW3J$ zPpTLjv34zzACQ&Y&7oH(-HSAZK5T-M@;vs8Hkod`TJHSzM`09^2oB)Y$<&D&42Mg$?i3xvuqnqHn@|Jm-^pz&F_lk52N%!^?9fP1_A|ee|q9$dp4ep)=TXU_o zQDShEc+Ww;3k8#%iiQw#QfHNESV%Hh=tPOn8GQA5vgx45g6gViQiok_rvN+Q!Jm5v2fk-9=?*cRt-O+{Y2lVQ# ziY;7EMjmgwgKe}tawBxc?-I8A1AfJ!(@#ExU5xe-&-5>}HD3DG;QzG*H=S14y>)9y zXQHWL*qgmlFxj2UpD+Ibh0)P^^h5(;lJ_El*)m|F;uVV|A*k*|`2j&eutAPA*YIBy zI0iuz0J`KgGf>#uc(V}VrZ#}aY!Axtcu)RRg5p!%FhJdU@@`E~A@x7RwF{tY@K#2& z7Xcm$X)NVW?A2q$6t?Xgk6BWbu&jS?ZPXO~;<|zJSUqGfrgukgS+2Lrgh(_vm;^;d zOAWV!@GhtV%{fUhG}e9Q zYh+~9Hy$oY%!Eqx^eDiNKmYn|%^i7MJK9uRYsZkgde+3ZvN%wl1)_@YjSwBE3tfE>jmHJHun8(G4@-q*m{!f3hpP;N_)c2Q5vT~eM@&K5 zs4mdfeoQ*~_%hcX@HI@3>pD&R6o6ZegGncK#V{4O%*PF8z;C`kAn{S!2?`WU8zlq! z4xu7z3JxjIjauZX25~=iy`p)sA51P{5bTA=qp0WM*2E~?xuQt!ORD9-BF)<|hVe^hzn38o>0aixt0^C&3_ zEhdYhL!;t5dSWlCnGg7VWv1i`brFXQ!hG@7>L7ubr8VIg?yzPkyQ2?0@+%P;HCzeu zE)Y5oqI9Xgp|XSsV#B^`6vP@Mu6Fmu*QptuY#H$qL;}c>h5c zke0oY39$Q|XQ5_~rL#tm2ljqufeQOy4{RIDvh1k2kdScVjpYlpaY!kHX1av&t&id6 zH9d9CYPw4mxtNeU&BYXXjw-gWzPDb7xDWBw#n~u(^%ruIOjif$cM66Vz-=f5%da1j zuB=fiv8)4#c^kYGk@+(&SYEt81K740###w@;er|sAu(xfw2)lq6@$l1G|33X;ebxB z2KX-`zq(_*jTrN|nY28(3=YF%;gQP-XBvP^>J3&|=$ef)w}hLmN>V$_8eFAw1-+L2 z3kebS51&G~FSb&whL;`G^nU*4^yRr2U-^;rLT(=gkkCe(NC+a^QM%S~xE7fGf0Vwi zJ?)oGr?pkp>|4#<4vU=Qy&&m3^iLyM79$nH4yM+b3^zrKB8^^NJQSO1Yz;tB5Qb<{7L%`aCJYxk*-7==;fT zr~|2)D`x<`kuGYg3NzUYv4*|}c6Sf9nsq@Q3q-KB&Y*x=^iKe%fA9(d_ElYJ;nLdd zeI9N;X;90^eEl@UB@j^u(JMSckmk~F0{rIMpgTE?!k=yj>8+GFf%OA%4xgJ` z>o3W!j0iM&sIe)m-Oio59Iq+nW^0M>8gGt^C|7g76r-rXkky5^8Za%N&QYmhgK==t zI~6+SS?f>b>i`F;E7^m}Vp{h0wW&Qj0W8A)$&D-Ll&m=Cl>K1)pqUVu+oG9n`fZH* zYrSUKc>;8P*fjT0Ke)8qccx(OweHT}5(w;7Z^e5&^5YEAG&X`N`xa{=B&uPY|FNNW zy~})RP|c8tu@NQ0Lea+h#@rwM*q zlSt1F&JnaY>VbNDt?BCEfoiG;B|dC8jJ)a^xkk|%;Tc@hz4SsZ-+*p!A7V-d|MB(jHrWx@?MJ1>OF zM%a;I`cfj#qHT%6AaD;08=X6<6%3&=fM&PM^scXV7J&h%)dse86>xm})D>4Oyy}xi zg6n9=On0sI%b#y}Aay*o;?>o=a4d~ z!wBF*7`WV%CzT(-kq+jR4egRF$)khQ430t+#C@Z@Q}$*I-;TW}2oQYwOMl^?+cI+D zg?h|h0fosV__53JbSGyX@|Y@u?*JTE_5lNI4Knm#cz*RjBN$-wbHT`J3a7-LLI(kx zj*K9{JZ^lyV4Dt|i@+eG(H#W>M6p(Rjx_B68+5~+5>K@Z&JNTL$m zpwFjH1SNE$w<$@3wn(7>G5G*L*gac?L&DACFEu3#{gYt0N_%7Lc^Q%7+JCDruc;P* zvHSTi*MC(R1dgDzc4{vNM-@<4Hx}DjrBZHnl5plRZ~Uhe+yJ!AOS%t!(IRS%Qt-tS z%nPt?EqGIb3Io2T8d5LKRu{R@=?oG$n837e+S@>ebf77i(h(=7;s@B@hUxxN6u0we zW5W+Ta*RbNe4=Gp-QHk4r#0&6wbQ-Yo&h@yu?N2uffLBQ0TRvrA7S4eh~@tNuR6^m zQY0%`Q7I)mtBg_^rBY-ZBwJ<)(W#IbY1l+Yh3r+RY}v9$dF+ww6@J%sSLbt%<9vV5 z9}18AzMuPkkL!9}uh;7;f_x>Q)d=k?kC-lWuP+-%p-ne+pYWH3|Q6M z5{I-DFklwAH5S1vGe0$acn;*zbY=>nJJCc6I=X;or!dI$M@V67C|g^nlo*a;k;D})EZ21Xx@i&Pd0UpqXI2KEU&8hCFn?uoE z{XRyB!(Cl}b<1!vQ3TxP)d3#wp!55l=~zh3$g_%XcqjK9;zJQ(DFflvF1-9RC;3nR z2t8r`1!{e$Txigp;&{Q^%;e?{yYTf>kBOo}nPc@h@YnnGY5MfwXJwpB47I#gtQmPu1+ z{G-t5-EEy_*V}0n#V5~kU(R@4J*u8%bssAx;&aszo!_X~=wGF$qNd6eNF<~}*qhe_ z+Kh}mru53s@66xL_7;8nvOfJr0vOU0hXhK6&QN3GXz0>+!5+xEP=^cxojzC)7_C0v z2H=j_TLZXD(z?IpO6$@JQBx&?#c{#Nt?|L9d-Y;Ni_mSlXw2N zJ1#}AN_j%K`L>|a(vn_ytZ#?tjcY{pF~VEy0;iUK1U^-O1&a`t*We6``6Je~Iem@X zZeScm1>WENHIvsAlJ|tsg+!>_n-{aV=w>>Ox4|R=P7u;*#$4GttMtB3-d9%ij6X8M zm^;(WqXYUrYp)TQ6y~46&;0Yc)g|zp8u{*@D-GUrdq2r(4Mj}t^m~$b>QN5(MP8I@ zqTzWDhk@9>i;1FMwniVbksCbM)S+i}_GnwtHWwv(wP4(Rfh8n)`kM!v54hnWh4qvqQvY2L$;Bv2H1 zDm-smt7BaG;>E+!p7)!-Y`dTDOcwQaiZP=}>YSDlKZGi{qXH3uXlaB^8zM8ytdAAaddaWVk3I~SQacHU22_U$ME=el8-+pGi(aq$Rrf&_ zh)f4qXP+5Q4)is~Dw8+cji6-B=rJP|!@wX=HI#5#JnEuyy~&oDy?YUa6JX@IzbBOQ`V4qen&F zpwEmL>G~zJmSnK}bp#7lY9YUtxqHU7#8osf;er45ax$qvMdzQt=Q1LG473STt05>2 zb-wxifGh3=Z4J;10aqe{>;CX}dE5Q|;?BoEUQ1DOwDQ6EcBL29yZ?4j;=e5k=-mpD z5Q4QZqd({*fJZ7g2FgMt0=;$$z=5js`4i3iAZveK(fTrHG7cLCF`OgB?+1P=XSN%i z`K)y8vr^QntuDN&UDnGfURr@^#!X2(4(l1i2udqS1TW$Pv!yB^L5xV3yn+{uzMFVK z`iat!1O4(2Gshh+VdRv^N?hjOzBJxS*~jGyrNBanRFXq#0a?rF@^vS{IxC=q$C6b0 ziOl>OXhHQnx@wPOcpm+$uNN!UGIuf^*}YpK0C;|zb5npfRk4}!s+jfC)# zOl{TmDT1u6${o$|PEa^APP14b4x~MCnrr{|dH?#an#7OFn4~)goW<`N4mf!C{u^z} z^I80p#-7Y%VlY=}3!hQ^CjTN<4&ug-&-~Q9`Q`HAYKil+M2Rm<|u)-<5 z6YTWj^;Knop9U%Z_TFFr&6_F=7Q>S}Z7)N4!=5iGUAtHAuk~8$ujxWh$!NHXnv?T- za$QpSN%rQf<@8wM@M*u4W!p~yGqcW#F`DCW>aJ^CpCq3G*YMx37XE+SO$ZD)eggG* zOVF2m)cpKp`%JE#@9zG$_i7%WJ@EKFB?Z@p{m-`hY+5hH|F>mYNaz<)Rd^Y&dRLKl%ia1aXjU+q1s8FDgpW0-ST=%i~8GmGb{PPGHzsg zke|<}yxGX{YV;PpE0uHqyFMn4VEdh7f#uu7w~}$?RNPu;GnEX~^f1Q8CgO8&UEFHh z?)p?$K%!CPU-bg#H>bB&i_882{SMCK!o7{gZw?VWron1I#`@ zVBiz!gCfQQo}W|cf7hTDbIO<9`Mhzv2w&Tn@OX2xbpiKAGB-U43WbMXl$M?tx(dVD zZ_m&1^=^!UvGm-8B8$`o+IPodm$OBjEDq}3=F)@i0x1^BtH!w7GOWhCT zl3NsB4E!%#X<%4TKl!ON)siz??cyt-d6iqv<^lCJ`PT2tW}?#e!nEHg3VJI2%?yn_ z9QSLmT5WJjs;+9>f$5xXV4&q3(8?}Fy1{31OGxTvAwFAhS(n`!?Wkvto0d%gCzcq! zi@RU!<*jZE^5iR*_0@frmL{Mv)BAqM5xvu}1c^AGzaYE&;U1ez#YHm*IEqz^D+ITB)ROSGr{b3nHUy1uf&0Ra#qOMBun!V}v@`pi|btox|*QVX( zhAx1cd^IV;HGB*PIia)bozD72#m&dnojYF;-=gRh0EjOwtSd^F9~Jzdv2&^W^9A;< zqFGQr(PUn`Y~zy@HrU9dnw7;uADB#)C~W^Ox^k5pPkB^gV)&u-!*Uh@{E$VZ46;$m zu8JLu4 zBc)tts_!OWekojvj9l&!4b36L8}c4I53$NpG9S=~y&-J#ui?o%yN=?@-nr-pXD>*$ zKUjW8oW}gv<;W9WxPCcLbI8`%I?{l?iH~(s*vjeIrEF7&*4cbzVFx)GYlZpOcRCn! ziad4}C!M_D%kCrRd&2jafF6t`F3O+l`kx%L?bn``z*KntDvUv`wQt$D&FfKec4KEf z#lDCB!{O{M5 z*k6M?^M&(fcP1eH`;@(i(3Z;H5^gNWB7HwEoWghY-;jjYG|rn}dcgZC`pb5ohuQqc zhjmH6G+Cu5%6dPwoJ^V=E^Iuo&8g-L|0`abAl^OEJh6eM z&XnGF+DE;L-^>)9s82T*@{^4;kWRC;OiunBe!9Tv(?wO^;(gLwG{5APl3!#gn0JoY z7A*=pl$W@&LX%43C7Ee3`{t&a+&F{Gi*;ma?XF+=&8t?;cTHS*k+$zFjuF2*rxQ`& zLPmD-;;9TJvn#MN?3Ej1;Q6KPC4TqoOBi=k+mFTaa{Fy57_i4<2O(OMBJ)}P@QJ^0 zO8U71a$UAmzC}9n+n~~pA7kg&0e}75ed0&rsPvk>C8%SSzuV%T$gXa>d_SV6#IJY# z^)J_Pm^)6LjTz=T;Izk$HZxe%)A``MIT2hZptuzqdp{r`pb=Ws>3|oSH*|A?Ms{p& zmk;4F?wp0I=dC*O=xY0NIxcaFZn@7&{)pdbgR?!+?I^iYoB>NHg4k_03q zDdXmpJ)Dd&$xh)@EYf0fzL6h9_giAaE-dO60bhR{9k)9pA1b(kpI54r3T_^fB?ezv zuf;%~vs8D+w8l7b-@iT>{2*+@ZxoL*h$eE_nhXA%#vmZgnarAo;#|yBQ99+WleK*G z3=E{S7E^ZseZ`Rf4a~cnh=i+IYv3{0Wg3UvmV$Y#V7-;TJuXg-un1g!wlL|9jC;?3 zREMw>BHV70nvA25N~tmM)&-|orf+68+$fd>{hc~N{c(!X7Hk=@ekv!P3L_|W(S1&;?r1v@WVJ-=d{ z;~k$Ordig=C(jmGUI;}QlEl)u1PO+tXH=A^U?`44t#4&LboLJ~`VO5F95h@0dGPzH zXGx7~AA|xOOdPz=e7Ak8efIJ}c>Mc6vwr6o7@CEPf1Zq*VVYNnYS zrdj<_8o@aEo!*(qo0xoGtT;kyoSPGPt5)xx_G;Uwb9y52@>hPNn7pqMKm0Qeki1%7 z7wJ*%$yc3Ki3~=-+7g8NU9!;$Q#xL7dLMDFPv@*YBS2hHLKyHb8pl74+O;(Q@E@PqHo z4MBYu`vnzz0bf2S|9zdN$Ig(TfHr1?hWBUG)EJ;CA~P}7puJX3YGIl+CK3lz*w?P5 zI6vXQaz(BRxl_wulEPbs&b@~x10@gagh09r5-PZJv~iE2E}IYN2W=74^~-qYOf$YX z&wOBB+~Vo!dCUb5BFThNm7IE2xOWq9>r-W>OS3<1vnzCDN7j(d3kO;#9`QTaRg z4bLT(_1Djgl1LMvR>hwa`eRK;heX2sD~$3=jWLgG>hCor8yE++-MdSqZ2!1o_(o|A zF8V19;r-B_QGJMaj2FC;gcc@hNw8Axwb_I3p9nu~MJtrTu_ao)T}gNE+{>Vv`bCk+ zpPT5iBT5RFQfHHmDI5MFW%9^k7=LT!Czt&@%O7{<+!(4)!2?SBaqOp5Z4e zuZOKar`_5TYDltSIlBzY^}li;;w(-U=PnJc-th)WnArXaHyP;W#3bjN4^Kk}Mv=$$ zHzX!(Q>y(0FHVw*y^d$kX??j zc7nceA^;70uX+{F$0s0A2)$>w@Z^5CsH?A+m6DRePGo6KdV$!KP#e$A-ZZDK8yFA( z-3>zSgA3asBuo0A-n|>>w9xbP>JV%o4&P4Vhsk;@m%NtIuYdlS^Ig#fx0==Oi7Gnr zIsf*B%XYO(u{rcoEI?W{6_tTt;NvI*qFEi9CW|lhD;9x^UiGRM*RBCj)r;USvVmoE zbabAOuSoJuB%+1YVr`AqrV`qYu$l62Lo*nxh9s&SWe4`)^5q+LL z>=Uw|m6-oUxgjo1$!qAV+{+P%5j&7>tH8G96J@|IS2GM^CfKg9<=qT)>^Rpo;Jy#O zr@Zy;ju42iiC`fewR~904E^54X%35IkrbtvUqdE(MG(C(m2nloQ_d?cjbX4@M)nhhe|Xfr6@ zm_w_`0*E64kVyd}1873RK)MQ5HH^SmSp`g*sWk!QBOWc}+)-uY7-e8zO6(JC)Bz5_ zq#zK?vUv$Tn-oP#2-d$K3aO=cny6D&EP&oB3p3F1rsH@5wu#xr+Rt|_#)UoTv`lW@ z*<1fArV^jVF9H&au$i6EzY80+!Z4s0piUIN5tDt~4yTGJ^ow4jeye(+i&LV+V5uc{ zLG3c=ToEM?!q%D45wr+wWw+zUj(FBtUT%&3%%I1jS+-bZTzWe_2o8)*yrJHRt#b3B zm({zVaSzZXR0xqgW(HJp11!H|Ss3cXxns8+@S4)fK=W-Yz{OD51>YgM2|qf#Sx3|(A%qX;AKKZKek9j`k4Ao%Y3Ti z`Sb88>uoG8Ex*qcNqi;L?N0XW25C-rARRM2wP7Z3xYKW?L7n#*a$4+ZW4It9U;7j+ zA5k<5a{J06Y@!LfhzX2DS)jlh185H0s>cj=^=g1U$ruQORHJ%I)v!@rdiCTCHTY(h z8Yl+v?ZNa$gXste^Mj_KT%FB0kK-8h?rJT{QK#=izpWQVme>p4C#C7t6gU@qWMyS( zd|D5;vFhPzmfKQvr+n|pxvT%Wt^fG+_4{CQpQ?VyHEiS(?!MWawE6$HZ6UlE_0`+zw zUAe6*oQ7=EmFtyTt)SjpG+M;W3<8S2hqJuH0=6cGh69s{!$oPKMlmQgwQNp!Gjm^yTFTv_53n(g|6RXBr+RIzafXQ{CoF9BWgQj44tR2*8@iviYP!a_D zSPW_Fn@x?4J)o&gco7McDzNpe>_0Ns7jcAX9umIZ6tL8#htzw}+7o*DmKgwLR473S z0~M!u7{oeP6@`WFbCgtc`wOv~trQy|tdM;7xdA<4H40g=d!SiP=4ab9u!TNR%LnTp zpF9axJ5%h3DYkPOJTvpBbEe|e3k<)&{J z-7Dn-V>#SgiHLLgtt>rr1s1q#`;SVbrlyX1n%T2qt3zxsi{AGMwo_2x0|5`GYVWJK zI8Q}brEqN%f@Zrqun&8}t_yAiz`P(q?=UxP(FuZuoMOFXm{jaG;F;f_+@P`-4>I$f zHfPI|%NB9~paC9=j-wnHtQHLcI}(I;UO?x?g&Z2z9v+49fWbw}&F@D5K$?NYuf6*_ zbTzFoP=w5Agq{;34k#I$nB>AHfwk&xF&rk6YHxpMG<+EOFc#pP=b{Ox_O6WoFhWvb zzW)yig8)JvhVsWCi_$&j&{I(cN|p&d061Jg3V+-tBZcK4zDx%?&$-MbEX^f=DqDvY+JFhi=j4b- zi`mb7doyU{_!0b*#zG?A0@FG|YR6fYfaJGLPj|qF*kKH*IlI_z6wj%AjAyS(OS97% ztx_+R&Mpp5>1LM8&e^XSWuUu$Hr2R7k1bH%vKb#Kxmf*ec;tNiSCf6@YlCw5ELxh3 z>z7Dtstm_Plmw!&yp~s`!?eNawr{x}o5{4@pqEaKeua(1I-eL; zme3p57z=2ZO-By4-G&*t4m`;@^ihx(XqvT013EF*=Y_J%u_DZE=!PK%f;>XN;{XOD z<&b#}*r#&h+Is_oR-04E_L~zd-fG%+m(p3XB zue%Z3W@GP4#8a|1Gr0Bbpr~uv6`KahuVwSFl*gvgKroiZebRVR)S5xibP{8xyu6mz z2Fm0wWBRt7Xxl5#RwIxpOy#rNKgP_MK4$BwQdeVo|Df&+0osJ;67p)}E&}{lBW{tR zR>d9Cga<2NY?v+kcq(H%^5P`>6Dc57cy90^swajHS{(4m?``&MqNe#eyI#v3f)@tC z?)GF|CY=0Qeem8q$tE~=9XWFdr2Q>R?IH{`3=ZN}HA^JZ$`wojrkOro(0oWGO&}!luH&W2JDf zbogqSST4|Fb)*+t;v)W|+Y-rmJ@b+=Cy+i!$)O9zAql$rnHVpD0jmL216~v*V$y6s z9@yal9no1a$`pd7E}QYk;t$?l?Ww7hdkiHIMVHKx^<<>mc0GvxlM!uNzdW1jv8TyR zHxBQ)`aAR9z9V9H@BIQ|uj3nCn7)4v$}v9MR-g}mjITbm2w5)=DPZbG^B8ETL>iK5 z4SLcGvr;%z-WGN!d7X&3Et!*rmP_SUe)P<(d6Gk|qUm6bYfOy*gU4>Ut#ZE70Z~Ms zX)hoYV7v9KU^8sG0iflPt0AuCUu1&Cnve7bSz?2XuYt5G+5nAq{;oN53_qqZwElqJ z(Ws|IYvDpzae!LJ?Rw;E1{*M}myS>fcRi>-7!6g}!Upj9%>WE7D=0ThWO|KVU|QtP z-iXA+L(y&GPWW)cb~W{UFXVj8mR&bLv?BOadJ&dQt-&{wab`rA38+~a;r$E4(s*++ z7CWGQ7IFX4!5@H&ZtQXpZ5a%U=^?xYg@QR$fdVFmn-2_}42U`dW&)VUzX478$dd+@ zO*ya@=VJr)dJz2$(_m9-q#z2DFxeWht+eYD8?jYv1wFSp66qmbla{^=Z7oM1r@!Iw z5*di07^j3LzVL}eV+2(zYPA74wymsgN9+L2aW#s9uD##LG$;G1&e(cCd}c}WbL?{L z!w)TdZ5(B5=B&@2p<;Uhaog^PCDIjVFE?AX`3f{ll&|`**LR2dVPmp=A2lrn7)&1e z$$mN~DX3g#|JzlW~!%Kv^))B6@%B`1948RZ6Gj#+|jFK$&2# zGXI<2qtxdE%4PY?_S%4MkZf&YcJCG?fQ2AkH6%KBmT)}D#|&zl2n5}XeGnnK5kLi~ zNN2DSI&klpdIBJX0%(Md0AvwFWe~8+=@W-fECkUxi3D1EXz3HXq|v%-8l={R`_o%% zn%ngPO*-v5<~5+1ZcYmc8yOQjl;(VMgDU7^hmvNXJKl7R0c~EbIAhGq5xMKl%(b+) zrCN-W<&Bfe1=Pg61F?J)W_6{eM?X|BC6mo$+`47W-dAg_o;!TIzBN$FMa08smTk{{ zBma^&v-Ep;{e5HbV>*4mggql!SVxz}$gFs3_1cO=5R%BNx&rI7ww%bkJwKV>YmaH= zI~w#%ccFk^?yR@E}2-8_!MXl6mUIf1qX0uBfT3s7y0R&qcQC7(FOXmxAyrDk^u zG?>lb`N0_)d#RPVnheVb{u z3o1W@uxtEdm0$=DW%>B{Qqo*DGi*OH^jb>85--haZ%durRl%b|bgSwecK;UuhoJT% z`a=`@9OOP;fbgX|Q^*)o>^$kq4o?7lfhtkKR)DRwSudys7#bNx#K#|q=IPiK3c+1h z3NRj<{`d$Z%eNMF5RlczHuPd61j}YHYw#>Rc}WMZN0PZ%pf}B~G*6EExQ)T7i-iBt z@_T4iL=h_0id!mb)k#20@`AOH;++%(G^peEJ$2wdIN^~-JKcG+yhy zg&7!o)*`0BRudk^3PJy*aI?1<5Tb87pm;zsy~gWHN!v;8<5T_uM)u1;Jbv#-dBpy) z4+ad^L5&Q2om1M77!Ga`rKYN`tD{E6YbWBcF3h(wE4eP2%tYnF1)B{#2o-T%gc=oHnU8vNl6ZsNzX%n? zwUzq7t*C-9mIQoMd0bZGj!IF%^d};IVl+b=fuh;oP-e`4s9rL)f@hF_Z4RVD5wWqc zkR$;LHwq9%_XN|myj8(o4!uV@=zu3?Xo505^hTVz;q@{;YcJQC5WydDKVPg@~!K@UW>4!j-ARNwTa>s=T+@NbU0ja=cz3~zq zD-N2%F5DQX>n1mudaJ#;VSf4AyS2g&hzH?*H*94OF*0}$FqieiGFb+6E{%aPB)lL0 z{)N&}-$Vt-yI@t={=+%eRxVp}P5&;oYcWRg7=`}pB?f*mJzfx#R9x_J13pEtYw@M+<5-zO6sJ%P!-YZ3Z#7r@f24QV*q zRgwxl-CngDV596N$!lY^1}}IL7m|b{;aIqzxR8~#2`rCs5&?> z@a#);Z$U;@x5>>~*P+L$-h2$_AJO=vY`)ljOBJk=DfNRnNM51GUtRWXY1rE5*W}9( z8f$+z*&tes&15+(L-CTb(2?mgY=yaD!n($)Y`YA@0$HrXmtif^zNWuW>8Q>7cs(W zU=A@5wHTV}lI>6pNN&BsI_UKg zcU#J&_jD~Ej<}hRSH-t!&ec5WoNUl!e$XKDR36&fVOWda-nPudmx#~=`DMehYqa~nn+ zXn^B^O+UbUpfbIpdhBXI6yZXGv{}GyJOI2(sx4Sq!$1qj$B2`PId8ST3Y{j1+B&+qR*H_F#>yZTQh;E62()8FwFSU>hnWHV z1R^I{JMvhHsF+0bF)Nkf}aFGjyA%Uz9=Jr%wg2meT`FK0<=eO>4C+aLsL0 zRDtxjuVEY@*g{ma0m@^R8-h4#*kQT$aKY6sb6)u@%dZN)ce3!?W3!67Mi$!W2KnS|&xsgS|ULyAH;(mLmHuWHXtoT|BtgSm5K zS8yRd7}HI6vsV71fUQR0xxWM&e?P6H@C$A{AH)Re3QkJ-Jz?Px6k;X&Cvip_fIalg z0~s%^4%j+U`7nrrbu=xII6_H~8hjlM!f6Y=!}t(_#pM~*X>pdRcPaS!`6s`;#JQw( z@D%U^n4TbHu>9qu0Tul}p`yHP;z07T?UF-?0~=p9M&}ZR9BbZ1OhC8@zjT)cI4BO zor&QW83zy4I2|}Ejda9TV(Er)1A2B8KtO{95jkJfv1udD_bCx7fmpRT&|B7k^&?`5 z0BFeFhwb=wQ3A#7mi(;rWGsho<&RS81AquTjwiJcwg-=kFPeYm=j9^4QV=Hc^|4bH&YYJEgA98+jbsHn{A z63oSQx$Gl#fud5@ea`!xj+_#oCm~!L&c5AUre&@N?4f9UhHbn4q+-%$R%4kEzvr`z|)-`%n}J%3+sN zz2nj>(JaoKDD1=q&mO>2G^tyoA%O>b&a*VL>$a47sh~%!eBO%5QMc4*rTYOuoC$)O zble}usH#~m#wQ5?!JH+wA5Q%km^*Ra zQ;_O`uWcbf1CGFrDR=sW!A)M3Zx_Q&2yq!~H5mSbi3w!9`$tF~hdUuB^8(Y>uHS&c zRx_txJ=ThOE-uNS!b#;q?^Uvcd_b*`vejhx50+XCJn`3)6cXRR33WY?!p2&o0KS5% zQt zqoy1l&lBmtZF}Ayrl}&AY#=>VK61S|=ssWh_8+o|xjR2XI5%&M~Alu}u71>GY=GNA6u}hbxbvKI` z(0{Q!2$9MCtwMa#sDMRzc@d-KX=!QTBgg`c1&#ed2JL>C$x3IkMq33hp8A>T!s9@W z=GI2vD3fM2)2QOzX5t-6%!#H2WZN$t6EN{8PRpK+oLtu=upl?qGxIJd-d9%IGLX{O zxLB55-3!<4uyX_kQ*%y*x%#zuI*-Bhp@`H38NLLDi~zD7tu=hlhHu?sAeGPi;~95| z0CaUwX`uc%U)uaz>ok`1w>GxYmcx0Uej`(hFqJB@OX3}VfA2yreWC&JL$bUFyRYfH z!iaW*M@^kgjz@8R%`R`Z)6de0lE!LJ$Ek-Ssl?KOzxvxt>%A>?8vpgE&I!nD zHISazVqd%a1ZS+PYpJp0=(zoD4ud9TRJor>=uJNO&i46QKsNAgtuHo4KJ z*L4heU+wXI5p|pUcd|{7$&K71pS4vcQAo4|u*K51&fkN-Q4)cPOI$XWyse{bUi7A{ z$J5XIS(a0piMKeh#KM3jyJFfBTM&;w;gXgaeoPjgCMVqgm@&v|+)_?&Jfh&+87iH0 zhDMQ7GUw&LrniyYcKi4w5$%>eYn0W(lMM2mqXb(%>|^6S7LA8;W*n4+ z%L&*!$ZDuz*x}#8nCp<=SiR=LRFdl{sdXQ z+uqo3p&0CN$;hcNZ#lurDz9zXr)0`_Mt84;l;sWO`Pbp`>rH3b?(gUt+$-lR>nqn` zta46PJK_@jkrN96_4!4Cm5<1le_1MlyNd=diYr6 zev!{ueJMf{E8ecR97J`Xh!2w)PoN|2i#wDT_0VqY`|z;cLw<|wAlnx#qDH_La1Erj~V%oL%}zaROhw>R|~}MSwvzDBn=t+ zJ}G9mZ3;pxK)bsk8$$Q{a-s5<>aPI#XD?IuUvm#R$ zqa}#$g%J6^(U9GKzD_F?&AkEZ-HLe|P(@?{nrEot@LDHM{tgWdB+f^BblIA4XXD*o z{_+DM6vqpWb6im6GBx!=*z+rdzKsyqKo7}<4jn*YM(fjatR)R?q0H3@MNcoRG(6_f zY*$s8M|VP4GzvZGX#4{LU{f>kD%N9TZTo(8UTcVT-4#l)`j(%k|lZl3t*-l?UQ?JqHXGvuj&?%whEaz*r zVd4@vP^0lG^7Sx|z5aGkJrN5`K-*x376d~NEzzt8GJr(PNIM~G^8gM*Pu-SLy()qf zCv`5(DKC({cG{YpW;Q;0e{RqDzgB97V;HE?iXg%N#+aPO(as^oY2vml`||ocraq6ig!0KBxw~k!9oVA8!N6`qbfQXXZK$A0x{t96 zi+3kIUkE(!D%!-@cZBf-UGk=|dKF4`=vSsbxo9~_+QpbmKp zu*$c=`@Tr36S?M`8gfO-d>o?GzpX?NCF_1i5y}CGpRjqYykG%h3;w60%07p?_HEf- zXLC8+JHE%H`qpK@QXeXa>_LX|%;)tIjU7*LdvLma_QYrT?8$x2#q-VovehDP2iI|U z9#zFYY(y!r6|7v$p;zN^U-q_jl&Sj6)aUvl4iDdPW@W{AfnZf3O$ws&sSl6v0Ac!@ zHZpF4kYDyc!e}J8B0mKvf96Qc0UCOn!8aw9FH#QS3m_2kWS7%MSQ32RGQc-j2fB7F z*xbTHyTv@n3%_;wJ5XblIYWV-%2eOba`a@D5ZyS67N~DFYUU zcWKq)c`;XVzJOpVNZ1lA>lCSZapzK^NGhLYW;+*6Y0@TwT{ZvpdM*cqe%>pkenJ5F zV4G48tezmp0lF{WFyd00{}z5MHFyLvF+2OuYnWZ3+{7pA#hCfY5*JP-)=eAJwhPsH ziYAm_Sm%yETK-ZB_>E%oQKT&kTlVC``dPm3k4QHB5qMqX$ z7p6NT+84fFF9OUQ`&&Ew3~qn=tuH?VIUiJiS^>ZAlwzJ!A{J6tSL_Q7sYvEbuf!4^ za3MS)xhWV-$lb{xVC?G2ASesDAWSwq9g@(XcBEYj%{G8bRRDiR2br+@>D|h$sNo`$ z9KDhZYO{+f1HRnEm|lJbH^E@oADm6qwV$Zl>>-MEWPVwEKUfC)@YIopt>&m#YBrKxEGqK5sdg5owU0^@*yfM_UCi&59GYPLCBc8nVW{DAn7=%KDhwqbnzn z&35v+tLG%uhZn57hpC&cGg2k?e$-aMMJMHn{I~=VWE;lb3aq_M%w#;_-(M#~^H{<% zYWRG!9hpi#IJ%x_8S=}HJ)~XozL+~xw8rtst{UNJ6ABi*sC9@Zl%=k;?uK3U*QO=& zOYe$Z&uZH@oeBOOMWWLu*r!{CoX+DPh{(FI4J*xZC*P|siedVN z=VjR)16p1tmasW`T+DhMx)0BObAMp@($hCm)}?YGw9yaph^O%PkAnX{83G?ie~ONY zYBPg~xDN<4E#_E#9lx7M4iLu(U$C3j@DZxx_z%=427o8Y*=7@I&R_#e=>XGN5zpdt zZ)RJQRMtJim?kV&w=49z{J;7p5Z57={DD)6Z%H`zp{JGFduIW`G*N+58aJaljEHmZ z2^@|*JRF&@KU~CN_i(@JN|Wp$HF)l>i+(I-X+m^*x})5D>9Z2$R=Dv+0a%?~{zBOT zuApRH2@f7^!2lu?jpr+GnpXEnEr zEO_EO3J}f6rN@eN$!x$f@bVWScPYdWG6Tw&B*4Ahpzv0|n9`<~D3|T21+=5)po7oS zkZyypLkBGZkYN4KtlQRP(?)w7)5~9BU4SV{p-Y09J&6Ii%dUMp4pNWm6)Rn>L`pJt zpjPO;1^EZ*nfrl(4~BbAO_dS1G$AaH0Y-V@+=`?85{gFF)k9UAmNVGuEQ$&sUIF2m zoG*p<*C3EG#c8I0tRBk%loS|1iMc-v(qvR5JLM?{6|Tf38-d%K?=OYwKdzh5<@lg3 z51Goxp-81rr6gVSIgD@k=7$=Dn*eJ5#PW)Z6<}_0q!6yM6~iM?pAj59uI?+C^5`!4 zN{gZ&^nkoVh&EFb9QFen&`46r?ESs0vD=jPVUvU4^8{ijHFX|5)}71nytcYz`Shml z>05dK9oP#b<^?mryUoAcqfLW;iB{wrOB;ZR`V|GHpeU;sRy*c_!m7A<0vmg0a>M$( z?u(ZeN0Q3}?_5h0Rjv0_O*Q+q@W>pxS?4HlyXnW`+o1m^UQWEcH|vUp z#=cGWOmLJNHG7sUhzXF9>qQ*j>`lpNqlKwU+Ax&pU{pBez@Q+%d~3EFAw<&;&SRTd zU%&-jO4cg?=b$EfzB=9_G#E_bjiYQK=7EydNPjkr6rX=J!ow3OJ%&K`4*>S>KheA;3?Q^s*}B*J z5w3>%kL!qi?-7KL|LjTl(b}y5+TmEv)$(~_?-A>Ts@P)!hj9U#JIXRry>_`Z9iD(J zuf7OjULo0_zDVIF&)-Befehp^R+IUQ?XyX(bF?6mQX7){q&}apI8bHa&1`q zRqg(N)b#*m+n@$k?k+jRoF>p>FuPTAKs}I6GqeJoiwj{8xpG(sPQQb(#u%3CPyK53 za>Vs4`%;R`BxC5Xpe_zPqxs1aVP5}=rA?n28m3FWSKaosg4-BowUKe)|N8q^;o;$B zz3{jjID4TLsYCQalX042j|;_F$8%c6U zY9j$kS~_xAK#)n$wDm$IWpJ_&G5j{YH(j@?~uX*x>#K@)?|cQrQN!y=r1h zpw>My@dI$MGi}^ERZT3yEC|^6PB<`0-9tBvf4wAm5D=QH1k#0Ht?9-!ple1MX$CN* zJKv5tm>soD058^_@<3WST88kP?)_$a^fmg1w z{IpxK^j4E&&QLnsU-oN`Q|s$U>4+}0x(hoPWP8&M^I5x_1eMiLa39;id|2YL? z$jKQ5xDP;2rXAw;H>9n|nGCDd`;=6UO3C@AIB$0Nc8A#my1PuvS69)@_rX?}_xS-> zrO+CkO}XZ<`~va&K7Y7K0t#q=;jjbP;L=u1B7WX!mLI|8zCvQ@dtAu2n|?pBvMM8! z`Y;%_V(&h#bg(POKP%DMPG@&80XA?6lzAvqhcX`XtiIi8pmc-6g(nAEhM;`Q(Qk9+ z*(3dUiD3_}bG^II2{;!9>fpPC)+W>kc!-BIw|7euxd1uEY|e2WE+rS!OPn|amcO`;p!3FU=)RFZ^YwZz17bml zobJ5Y7Y_B&Qo4sJO=JQl>E`{Q*Ft817upN}2j%cC{tto1kr>n7Io56l`sUVl=sR>WafZ8G)(=1krtP?qxf~` zv2Y*xLz6aK8Nqm553h!+m##3&cgLo0mlL>R?NkNE+cGKkl2*6^7) zNE=h2bIIZaG$KSAGONn*lD@s{Mr0q7@i?Yk3p>?6E2aO&yV4a^{l40zPi0ujB|ft! z07X@9muWcU|2M0jMAuIL$5>{9VUwLwUKowg9k5t4Tt6MWNd2bhH4(XKJpdZTiB6^I zO_}2V5&y}fs6&h0M8HbEWo;V!+twleP*StE{4eny>fS*t#tU@bP>ta9y0>Gc`$mCt ze;DdMJCW0YW<4M*(2oA;j0pwcO(EIg;VekLM=KsQ4g=vA(GVJ4E&x_7mJvni>4Hg?Ik!$*3%S`m}!;~30 zqfbb9LL`2^f#Kl}9D&4!@dS;xB7|N7jDQd^?z8%cs4&nDi%1xd<#bcJYV=&j2XyB` zNO<1%HFFy6;fgCgct7ZDB-IKU@+R;$1N<4O!>`oR<5PKHz4m2qhghko1tO*!NOJ^s z(|zD%0X=RM=ym(hKIr0Wt)oz$>xQB?5X?|#j>ed{VgiR2m{K9GNU8;vT89C+xpW}u z236e5u;8z;WLZ}QU^+8aCQw-_=GB&BpbYwjZP?^OV?Bi!h@M7)S9}PlRkgzzNYAT) z9Bd|Uz4#W?+rY;9N@bk#rr2FVeY%FdW4aGSlS;*ti3&e+%71)jB~uI-khdr;{RR{l zsNqlLH;&=hpbzoj=sg~L7VH3K+G`6Arkx` z1hSte68AtSPXh5*#YQH>je>5jF~AhXOT}?^>&B;o!_Y2frhPi6fki(SY~PW;grs7~ z+%KjYp7*|eG}sm?WvByyp9L*KOIrf)hg2rfes&B>n5GV#Lrj6AJR*BV;|@z+~tEr&UZG(hQ~cUh6})o;`#Q<^6V0xTQfR zAdC_^P((c%Zw18w2?mN0Y=~lLVKpV>2&@qM{!^Nz8yTA6){-nk=PxsYJU_0Z2#ui7 zM*@wY1~af|ik)JR()t|!XdY!@@#`KxQV1!C0rCoHH5t)#YvXp}dQWN2olSV}+&l;Un75)r?f6dH zNU-sPX^T$b1yIe7NE$28J$p6CTvilxwxcztc&zC~CcwVHVNj!SR}zfu8K^yfYR0Y) z3HIIl%t3q)%ua9#qlJU%qI(ng*|d;^!9YI?TdNz{xiHzd;sZ>Cx@@-xW^_T0i?fGp z$GXtVqON(VHgu5>i|1gfdRm+(t^=0MYn)%s!-vkxD}&)nLjws?%H~Z9Y=RaSKP!bM z|C%0>x6xTH_UZ!?n-z!;!?$$Bfh z)4uE|J?@@pyEz!=L*eVG|ND}6<>weIb(CtP8<7W#s{GfkD+T4v3oxz0Kx=869A{vr z*G{pl&$cZ)TLj+dfjiVFlsvmUg26~0trMuIm$`1T(!V=(NLYDAMcSn7aV zOAgg|$dCW*jf;yL9?Jv+lxI>MP>2)>l3;xnA0Pjjew{nO(D^{BR&Bu;h%k`3=>?Tv zG?=oSqX5?@4W>Y_s{tasu)}nRqhsi&3)BSou63<=TI}Y#{tw;n|1$u0ehoxT>)BS! z*Z=Hw077xLz3FIY0mvQ!9O-W?EG+!fXa_eD^}`xzQu*{yL}evTxYTvWWuytlBv7(7 zm~$(|=etstM!g*;E}5uBG^q8Zbz}>bPnmPT4UjxKcFX&KQncE=%RB!gMT$oB&Ji>l zA`^scpb)J;Q03?WwhSY-XkBn1^d;P-DwRu3vzp29&Nr1}jVMw8v;r*FCkJu4GzTGE z0c0XOQQV7;JxE0W-igi|Py>2^v4<#F;xv+kH2_l$no?^Yd;G@GSYi?OX0ZHQJo|0? zhhKP2$7|pzig9t7$5g)j$20u-jn*|#<;db}Y0K(&i49F9p?Fq_EX8Bp5OsA+*Xka8(;>+X*#g~wK;Ew%6Pn#ChQ?bRcB6ZrlJBL ztxD1-as>#-E6@rTA~)4Hz@r!22}<#hBQr;RQ{q z>y<5gqg~9`Tuot0v0O=U8`=qDn9NX`#PLmj<8*X zi1rY!6g%kX?i12jP&^cYKtmw4oUqQB%w24lfm}p=%2rlZ*6lIvfy)<_gTa%as!C}H zwp+tjDPOsB02V8P$n3H0e95{j83*Uul9X^OMYgsSAtx?Y+2hv>X1-nQ=|#APUZ+G_ ze9k?-;q=v8fl*X;J-%c2_+*cBs-D*<>Sy4|SL*-V^`^MX>awcnnIuaq)=eKs`(t_B zkFj|AB-&SBJfLE0ipiN%h3-xmW?*Bsu%{1mB%72F^@YI|eM1Wqdt`}kc zVH$cLT$h?;yirBzmHwuCzj?I`Z2<}`V@EM~i_e)GfJPOEuZk)f2vvRltW}z}DM}|P)3YMrep4a7ZEcn4ojZ3Bf4e}wA%L7L z;Clc$^WJHGhv5DwZ|uW|Er*{KIf69#{Hu|tI$p8koW=TI{hViLZfl%BFn6B3MS0gtf;2WlsD{o?TFLU9V zE~5_7erdkL+m(x92VbTr9T#8V$#*gjTS5WAI}G-sBxaS?9=ggKg|8|FJNCopADY<* z{#2+{xY{Q0q^tndhBwNMG`8J4#=^>qwkQ~q(4d`zdp#2)Be&{KX6A&<`Wlmy)BXjZ zWS$cmPN_9)?%8a12IcfXr-V~Y| z%fTYILf`jk&$`dAkN0@DYC2|36`hTz;>V2L`x*q#q`Wcwv?{(N zz^vw2e!Pzj8lpb9-&A`5>7c|7=>YQsz0K4C`%lOCmXJljG=@|`4=oI30IHcGnV0QP zKP@&g(F+J_D@57|`$L_b<3GX-v4j4n^~~aW{5y(K>t`#{{zPWY29f{dLfN8DTEWFMeeM#P>3Fem58BM#Mmcgi?bBv(c`^CXH^e?;LN409 zN6~-Ndc(W@^Oqo^w+9!Lpu`G6u!-QJ-Xx>*kRNN=n`0w_Apv}rkdUBn3~>$$8(&OA z_l>z{-RK2SDDwQBEE`+u^PSNK^brvS%pvDCoicp>G1txDkFUsdaO!!6)2%HWk{Tp=ZE$`+k7f} zqaRylUgp-!`G0(U2RxSR|NqnOWR%LxC=?B3?@_31g={J^Gs@l?B0_fdC?i57dsJ5T zW0zSr71^8r=XxG>zV++;@7H;8;&IRGzOK)BkK!a3uKTp*=qY?T<*{8g;*N>p0o%@6 zYv;eqx6M)A6&JpN!eFF;fRhtcQPn1u3Y6WoN3xzqBsiE$2&y|VP*bS(AlIQlwc1bf zWpjya+(}Cgw}8Ma2?L9qEyH-VWj+s_rS+m)6cgkkdD`cw6Ti|~W6vS1Lc~HdAh%(>cdy<@wl0dL*3)M=q8cE!JTi5o*q?Y8AK!V9=`iX~|x$d#dYAnvnyj!Bz=C)ApoB zDFB^K9sMA}A!7;KP4Px1@J;or3x#k>XBGG^TBMw@I{shNuv-y9+`K#x_i;*w2(FE= zon^?OnVV=>@R6**eAbtqVRp5j9X4!o0_*iITdU&4)TDhP)AcBAdNauz@isNF1 zbvg<(?-Xdk2jEnGyxXG(!RpQX_iGCvCh-p44?~Xf>lu*wg5nY#jMBljk1(MeFpO~+ ziCP6RlTneE7pNi>q;^HHtXc%&ZzUb$o0x=1&A%0g+zbDqrl+Sz9CSpFv!kRzLwOoF zu$<RhzL2>R7Ls`NmSyGE$)?f(#eI6WG$k zVYoKrHej_I3$#z`o3lsZgv8yK~D%Xlsa=fc_ zroB}qSb0S^EMn_o_}V_;%9Jlnh>HUFxK87X`|u!t+mx@nGWv_{ajKEgIS7emTiXn- zOs8#q86Jjr_ZoCLe9>AhsfSp$dNqN^Z+7Yed_4-(qa^hZ_j&;G{In%%$o(BaN$$Fd z1AJyWiMPbKBQ`Mv=ea57na2I?gv{T|(TdOrYC8$bFcKxal*HXo0!j*q)#?uuVTn;M zIKWkGu9t1{qA84mmNte;6wHf`CH-Eqe zG_6o0#1s^|`TFKF>?%KgRFjZEf|S#M6FM5os!fH45cqyEAnnX$=Tf6?B-YV4m&D6G zmTbm}Yv5dqVQ$NH3_Pxw?5SUSghSzP;~Hyv=Q}H=urjiLZ3IEnDc093`E@`hMxqC5 zPPajhsS!-$;J|~Rqvwx=6;pB0ho5|QQeQBV5=Lqh=p{6KKootuit;vaa4&K+MXZcu zhd#pr5$gBDo7MEX;E@AU-8B2(&%|nFRL8^o4G7)k17>Xi%_&fbWXN(^UHF)5ruo>< zucHL~I#FHnVMPuAhO2$I1i5!h$q_@dQE7Q6X84NX}!NEbBsbnP-;G$j&5H~Nb0VVzsl$J%I z9Z0eT(Zd<8!esK|D?c(7rdz>!;s6u4&w_LhZ>~i@?T#HgP$8)T-Wb8D9kB<6A42F5 z_qtucU!UXXeMSQ!(!b}=SS#qrJ*lR16T(U6BfdXLNV!L#Ktvf2{i_sBq_i&&W}Wcg zp@;=)SoCp07tH7VIip6Q^<_6*E8(rbrgO=JnA$|1Mw_8^n*H+MM-Uvz3-OYtq1`Ho z;Dav|;~ejK#Oc^nXmGzme6^#mBkm_RNH(yRPm^r?O(_I-y6(|}c2($|wg=KQwDexY z#pk>ulG;Ua*{8mYg$mbN7WzV65?`N=!n;2=MUc-G{tN{k@C$ZKF+L~$L~?|-(A5n- zd(Aavo?5tuJK0ai;6G;6v6)Z92)Ba7yN=!Ra$qv;d$P5_wo&s1JMhHuD%&_q`xQ9+ zwT&2q-gu)7q?remCwi&C?Cat+ITw;b-g^ZG8YCxv4G|%3|FEHne;uqDl`dIXy8n3v z=-*Ex0saFH)t#!&w>X$nu3)jx$Nib4I2ekJGQP)0)$^;k_QJ=mxn3gRHb1Ipcvby` zjEU>t?kQ%>8j^LjcttN%uTgq;J}7Aq5fLF_1@v-|7rT#XMuEX8S)&yc`8K^&|eiKml;} z1`;eL+`nJJV~3=~WkoCqxlqQg_KYG91z4swaCZjxHbG>@H@}WlXQv5e6!r9@{vDA&tnryy-hYd6THVnivYoeClt$|VS zA~_3_)Rnn)sL~6O@+hYxwG}4O# z>8=fs>sedpOs==~)@}?c$@y#Hd#Sj0g@li*=uCCB&|gtPnIwE|mHRx-S5~8iRPRh! z_xjf-!CY?9EJ9;X#EBo}JTLf`Yt}sg>ciJr5?k(bEK-o+V79a34R^&GPChf$4?n{$ z_t)73cVB_tr|f(Sg}-WUZf>7!=wKoBUZp&8-j-yt zb(Uk?;^G7YlnR1Lkg!^qD{*5K%i`nT*1{ueYW~&773Dd|NXjy`HjKMK!vA_tnrd)23&q~?;!;xOEl5ef zYNg4s<8|$%q2m~KNHvg=bAjKb7t)>f%^leGJL*nbHLUMp!)rqS9RMqaX~N44QR`dz z-Ftv;R_62N%a;TjfBuoD&=nkR12vWXlo8Hnq7$>ep+!A~ZksfYCgGlYXm5}G>)jkG ze0@592kbyfRtItYWu<Te!#U_JoczIVqUqA%Uq}Uo>zrHy_*O_$@incBjt2K&oHJh$53rA*c z9D&*q0t_@ewgbWhhBu@&n^q`?Gp(8P`X>C48wbN#IK-H&cCTAZ&U+?h&Z4vzVFq?@?6gk%<{*xXF~quW3Wx%- z#`6D{Q5OKts0`GK2yS#{Xy2sk2R-`7S*T<)4eEy=IHW})6{t)q0B?j47#;X4fNpd^ zT}>96^2SWm@0q7Q)%)A@hcRHXOxaUBp^};E5V;>ni?@-l2?}vh2rN!MXGXNrFx3xh3HwzSvJ{CJzL9kr~BOVp#P!@RC);9c} zIU{kyDVfht{{A>1z4uaQ0U(@Ja7kTv3Rm3%^}DFW zlEi2XZvG6>+#k;@JKySO*AfbqJF-nP`Tth_hU-FeGMF(b zYy$6I4}Mc9gE|VAwl5_=s?uC(?Y&c zzap@82ETp7`U0xfEWQ#nH89q%&YS~jCWElyAAhF}kAd5@d+vvx8~Bjp>7J?(W|&_b;<$5;xqG6rl>REX1rK~+l=(I6aU9SXjK@J9I~ zt=;KSw~f(hKf7WEi2aUe3TeyuUx!1DT_YWdND&XMIhgskU`MP8f|4Cb3>ozzYeg0i z*b=&=dG!=!&;!TGUEBfjPRNvsUBfZ)E6ibcmb%9F@O4mbgy+#*1pBd&8U4_5lG(B_R^5rN(vh=@oH zfEMA&qK2}yy0XJ&oMAQVa;}>Qr7N;A?cb@+ul(diJc`ig^3NX&)Lmx|{8~zuW%_a~ ze$KebXYRNKC;{ZGtl#7pRoDrW#(x(AAPOpgGIG&?UK6Nz8ayW2zaQ5QAU&$=Q*-ti zl7)l_4&}z&Q}_D<>WUL_nVMpdvq)g33s0cvTJE>^yRNfs%(w)e6pkP8PIlBs5xHaY+SI>INlc z3UY89+?=M{Y{tg!NHW58p17sVmuxFv#h(<+lblSIVVF78{=IE^S?CL@!2h2O6%h(Q zqNaa$o`t_x$z3tfPY7h34o((I{Kk{lb4R9fJ<(WWjyn?gTy~$llMIIf3p!8A;9k9y zK0hXg<(jgqd6Nu_7qh=qx2H!!$WS9di9OBn{~Ju?-GHO3tVSrW>_GskI5pqNPA0h( z$cS1B$d`ml6qYQUt{EXHyaqBVeU;6F@$}83^-vBj!X&f*5#!}ts(%(m#Ks~9j2%60 z7efRa1FiLV;->!|W}$Mu_BY4@Uo3qjxk!E4b`9EQtVfO>CPCGr>dO~tNKxk@Fszsn zYys>fA{uX*L?jvafTtk3aOEZL?zAgqJsf3La2v%OYJ~F|sbvYAN?W|n-yT!E)A?}1 zHtM7WOyZCS{5y&NcFWP5MBbf!Q>L;SK%F9bx4Lu9riX$QE>uinVQ?pH_dxfA55y&W zIz&ODogVU<(_EBSxR(3VHS;<&MYQF8&`e}hI;1KZ!LLSjh0}#UHq*S?_?S4m!48Ar zzs`X8qz)3T0X=| z6yRf<)kF`%BU026DuG}AtFi@aM_@C_hG#e-t~UOwi3jLS3lHsx*iyCWBCO^8uq= zIiGu;2!r6g?L2hUnKgLj?CkgfxZ`qUh7()Dkw?*iKOkGfoqMd7Mz(*aP(Rhgr8EE| z(aaGNzGP%?&({ZQZM6^N;kXP2P`r}w&b4-mLXV?v((T5L1m+y0EAUpDw5RS~KWv@; zLN_n4Dt|mDE2mz-_*lZ(=K5L~BXK^#|2z+OEsZ`wBBfER!#&y?(h<(Gc|wYzWvyp* zQoa4bo{-Y3%ge$FS1#Fg^1X?A@cAVCnL8^Ep&+1itZa2G?+_w zpNaNj9_muX@dO&K(;B(fC`k0If4xNO#&=_N#A}`S(cG(N3=6t60r~O-W0oGy62Z>$ z(V+{8kv|H!G0_asPnYv>ea2MGp6FyVS*zP10TpI9i)Mvyh8@Bo)6Ax;?kn5 z5CCW+c`f{qVkP~;LxM!k6DR1A=1w+<`){o3;c*RjC6PDBAy@jsn!e)radPr+%cXNk zmE&chhq@Ua^73S&$Y|`u%^j{Mn#%MR|8b0vXs)q?`@yh(vd?aue@*{rV_PYK6Vo6vfr3#Wmcp+mh~#$CUms#e7klPKC3nD?r;>{)JZ*BqmBHj{Q;}fcJw@=SGl4j zBpl~j>DOCTB?wt=DVSk=&vFzOW%gKmOb6P~`i(7G&|qk8=ri89VK}We-kIv#T5xhLLK@78(;+Un1==>?Q}N z>V7r#tUp&;eGoeB|Jj6xN{bj4GdH;yZW&#<(>X=I7~Ax|7-JZDB$4xk>c(OtsP(VO=&PhS4Y&hbZKO8a=hV24+8z(mJ-Zv%;DjL{uQ z_zT$TFV1uoX|aa+EEbOn?4nGcAhCiT6YNdaI>=rd}IcjFVaPK_0O#% zKrA~S-^b6f56n43Y(r&zI^>hqd7~FT&e^PxNuCiY9!Ho*G1aH8^1|Fa%W{Q+pHE29 z2O1X)wMW_Q|K3$H#aL!{MlLzJRe^Y9;o%rzh8Kl~%2x%fW?<=4+aL7wy;efWB^O$C zTkpEilhJg%-)f;fVP&TR8XDF#_y~FKJanX%-a5IXy@Z6)fe^i>bn}aM%L*Yc+4!K+Y z6iedMY64e_g{&Ybxu9|8|vhtp1zyG@hhdW|5~0OYq9nO@8q@~vE`Ta7f8QKEHoksAU` zv=Y^sA30{}HKE)-l^MD5i3d3wbr>!GdenZzY{&uf49P_k00u@TAT!ALW7Q! z^;g`(|LSR`MAyQTRs~UEd6DHIoS_L$#udptpnMO&UO4PVTe1kZyukQ#`;ON!++SUD z&3ma3Km4>vDPT=@tKx=3opQ+apbJoDoR_QPRA?>H4Y(kYJ!8W@Lj_2ySsOJkv$ujLwYfUMX&j!zcCBoGHQvMn=pCYl^H#Ij8iXt#BKf z6}+!bIbJrORH>Cts4cCAYkw=Nva3xwOJ2hZw5%*GHJ-(v7YWRHwaO3B0Y=qeUJwKQ zM>cTl{yk|PkQASOe<)U)D^!7O3mk>P(9_>o4Gq_`>XpgYS67nWW|Rx)?zGyYF7g>)M*ki*zNR&xSRVd+ zYtlU(4?QX9U&^mgrp58;op z^>yxL)|Xm;mBKexUs{raf9@cujtNUv@N(ViH5V6(OH)2xW*`cEHNYyUG~o%)0xcN*?GV9IXfQ~&df zM^Z=L&wT25VLbs5{(t_65XCw_wCGAHoWTEOOdGV}K8%3Yn0Z$z;EJF)R)wITkmjH` zO0Cq{p*6<84{!SJ=vxv0kn@k(zL$X|#{hA>VH1dX8&5osip_z~ZR9jm0dj^4c{m$j zFeYPxK73d9Xp-P?Hgt=#5kM$TnfrQK`%5Lq$^Q2RFa1I&g?vILBTAXbe=*<| zMkB;FZKOvqedeBxiSposM}O1?$7tJk6So&_^(<*X;z#AhVG$^mYJpVyZJ-6_7MvH5 z>%f^z$9+ouhr0Gv_s^N-57W28ivpVViXKk;413XGAp$aQhY>I)Gz{DlfO=H|g{TLy zA|lhCeOC#7(K zjg!`TZ!EcQqSUn61vbIKHnq}Qu~$RU$^k)4cXki28aC*kF)QG*($e3IU4c{E67SVrdz+)_xtDYA4Mmua7)(?PYyGD ziA}7!b@+8Tu-w}+y~5)!yj~7-TMoOr_R;z-6w3urLte~E<@RLs@WAZ1@$!;qBYJ-V zmI^O~Zb(C06MxnvDF88i(f|YI8-KYQL|KBSULqA+yu|rnPeCW1xe~_&G9HSE3s)Co3f(ij zcQ%yys*ouWf>eU=V1&E}9Y|f^{6xSD+d|#NI;q}4U8Dzwk^4yMK#yRJwJWQuy}TSD z#qV8`k%IG~Z{rEk?Su_G4Fz5IWeenM)FQ=HiI9fK?*Y-)NJx~9i6f1fDZ~eUUhxjDq5ZnwxS#nsm&eGR<60ai@78`ZzxL>n* zVS}xsb_$%X4A!?ezM!4D_WwMAi#0Cy+k2YAe>MQeB)5B}d_uMk2p+@0GJ8%Rs)pc- zkQ0AOKyHM#duVh6VUC_Mbvb4d{13D*em)Qm zWW1A{Re{1pmw7GvdM56SiG6Y+T*bX~U97E@quV{Hn*L}gTl;c>q<84n#E&N2V;ub8 z5qL_3M@XpWz4Vwe^p$GbuoQ5Z6b+TpzQ(4=XH|+P*zkAh=Q+Xh@#UQQd2jr`$Lzs* z8u^Ek(nUUfYIOa$wXezj2e69bhO5=2_6JMXm^O-}Rv#0Q3x8C_<1`u7S5Dvj=Olgs ziyH*kUr<0mfVV$z`ro`dD4-&bt413=Q)lJN9Jw)fhWsV?QVRV+4uaQ+|NZ^izN=$8 z_@R&1G%s0N*QGDs{s>X-5rS=Z&PP+A8TPrA#a(eLtH(dD2KPPAj&x)%wMiQf{-@T! zwAa4_y>IA?12)49E-JNta|tKw<~|~wmj9|a!bbX^R^#F}iQjwn*1l&JiGDqMDfH|C zO@^l0fMBjic>XgeFlEYxG#}!^b=Y9NZ4+!=E_fTyuX3n1_^Q@#Cx#JFk2ofD>DP)2 z*A*cvD_enlS`n}pSRIt7-he^P)iB9DwKu=BM-A_|4SR@tr{&y7%`N{l|M&AlEQyQ_ z>6??M+k)`FiDR0F(z8H0bRclzzXm31-gm9oh^)vMWRN^D|ift81F&8ws_?{PZj~lZi4X9e44+vXSX*i|N z9G-aY8%i|Vy2gmZfA(V=RN7zUy^T*wnLiMI;0i zKyA*%O42$4-lOOEi9ab7!j`&m2m0Ga@*#eg69>)yfHxIhgyk&c*F<9iZ<7WT$aSG^ z4=yr5LaavEQMG)_>qT|X$^q4T1{biTD$Vm-*Ep)|BEBgN(Ttq`3+~{PHou477;E2` zU(T&;2`9`&Z9rJy5~vaRq~m1oZ2SN+5ws^M#C>0)X{xX@>N=OP zzUVnNJ6C{KIQfib{dO5CTf-5Hr#iTKk#TZWc*B<5Ts<6bxxi+CGyq5PI9-agHyy>A}5PqaBc{R0pswwN+P z+jJ*;Hz|H3V6$Mbn8i1sk|CFRhfi&q)S<4|Q(BjwVCSYWR&xjTT&;Fc$Q;Wl zo#du|G>KEe5{V@XPtM0rvJ?8ko*8#uGlu!>5+_(79%7<@#Fe17^rdxu1s%B~u_HBuAVGo8+5!sX-!#Y4Va?r4JPEt}&%2g&Z zb({Oj_t(~!qa3OTysPfqU&ZILj(62G<5;ruC>P$LlTuC$ClBc?g!M=6#vlF7!VB?= zy^e`_6YL9eBu7Nb)gzCiX(qGAnYb4+yi}#H4<3c#Z!PQ;w-r`D{v3D8 zg7GC!3K~E#hd`Zg@7Ng}Ktar=Hdfo=xLH`q%z?IHmc}e#K?x*fL&srgU zm1>#XhkjSbJ)3ZcJcjpT`k@o*;#*@1_dz(t<-{1L-2{_cjMOo1sET4f7~Hyb2GyW& z+ytmwv!6YCU=OvW7&RbW5h=koj5oQfrLo29W2>bqwC3VSTDWd}Jw)Im%Bj=dmDM3%fO`)yck)j`0{pS#DksL%@$VX(3uwrDTTM6oh*azYlnj~_n{)*d~n;2;f5K8r_SImRthz!Ff`W2nZYjfo@HV+dFq z`x?;WSU!owz_fHLCY~i&R_RK-6nI%OzfFvRd-iZ)hYJC9s{G&XzM{hCC%^7KqVtL6 zts?h2hFi>OIKyc0*{KGY_PjUpB0`-+XCzkz3V>&;s;iI8^!Q%Qa+%bMb*KaJ*=)xl z|A19b0{Z3Q{fnSeDRFp)WU-&JfK7*@hN$0E*9C($QfDL19Jyyr9@DgczDH>5p=^Dt zr}J=LFza2!hX@FMdG646z`u*kzfj=T*G0`zn8BuyhD)}QGX#IAn~A*4p8;cPEDIoM zC%}K&UqWDiWYdC*EOkWP+NwGf3m##c=(Yy<_a$@|}(4{B*ya*3tXtY2we#-$IiRybu>bcvsFEIom$-7oj2{0foN$ z;#jlp+)98#exW#WB$7%HP&5OavCFb+V;4oeaS{*hDyo%;x|HeAVCtP)GFkX_0O*F& zBctY_ur#5@vi_5`wFs|jx>S?io-*wb;XRn%V-5DV?N#oIznRqhtf;b+^Y^#!O$!75 z2XUi&sqdXA0L$e8fcP6oNDSR6um_{N2nXHGIfc!`{-E2|HiN)*2n0L`MwPz$8Vp^a z8+R7nt_lF4s^+W%Yp4xa_6pWLiWe7*4f_P$2-54<_ZwgI*g(YxZWcQafFg4AwlIN`)a{H5^zM&OmYFumwp7n zdoeu!wX+%!ccNc;fQGEuztVv|#m;Suo-%R~tPwK)5t)|!n5#X70+Z6$(NHArnIuDS zGjwqzYsA&o=CGg3xmnX4?gq8g7nHOqEH#)bGPDOqdC6d@YEBp? zk6Eh!XgHpL>KWl?-6GVY%gaD}3@jZe(@QS?;gIEzueTn6$8!x~O$O3**C*O$bT{X9 zs}m$YiKZ;UlyD`-xkKGhGo&|mKkEcwek7KY`Sn~Ob&#v>ID2t`Ov!YX^u_F|L+;N-O1t8hxns=*ng`TL) zqZ}si_*p{6cBd8^p7r%&H0Y@aZ{m$V>ON9SzAwwH+i&3Ad$s9=ru1!NM~5?Nq*uKA z)5vEwa$y)^`Q~rBVkA$TI5B|CqlXrqmmEQ#qq2r6Hp;=Ryu3UM7^ewY2yv%khSVtl zc`T`i01=#>Ek*@YtD|Jivg8;efhk_h*lx!afW_-1<2p1&MzR_ zPoL1K!mr@|V$5iU<9^g+KWzT+$~acze)yjeuU*P$%(=xarEi4(#vVm2cjx6g-53p5 zWSyeKr^t^yQ<`Uhsc+gX|P*7)75?eS-Fy+r(7ssQ8qP`bxvK z)9;G^0Tdl0R_X0m2R*d>tvbxuUbw7nfCzU6?zI2(^fYkt0OLf1D7|0%ra+=uHOCp? z2^~2u<14_;vjh@vyS6Gr+q}PmFx}%Fm=Y#Y_PYBKb33q=*&E?$21Oc++E!2G{ZBbX z8SvYl5C3ffi4$zIYWla#3Go2cXYbP)UdXD1o*s1^ko3XxZNAn}htSG#2L4Q2&N|2q z!BSvhQUFQ=!2J1|kw$W7o;v29K_NzrXy5yN+v0dsqU;C?jRUEeWQ$Ro5qpM$(kPLoNF zVW!UlgB_n)R^Qki8s(|5DfqPM;&s2W`!2>IZcME;hSCX7?6Ow00<9NS1MXOz81Jhd znZ4aW-oh+P;MYhH0Oc$^PZ|2x!?VWEk9U#=vy#v3>TKC|_INDY;ietSN6^j1hScwP zbNSQi`Q-5CFypC0kLgF3O0J4|o*W_w2-UIEn3=jJ5_`dwwqJn=(;8BA!nCSC_nv9? zNOaDDtkLxgx{)%lmz0pV$5L7m#RAY`+@>AKm$>_tOW4C508r^D_6J!1;a$Wb4=HZW zgQ2c4%ERls7r!rK1)@8S+AXS9v@vy$&#@LNkw|9g*>lmy)~)rxOg?^u);ICyYqFLZ zqBVvDczk|evdEP8U>aI^vrn(otrJBFZN>T|-h^IUQzc>!aq z!r&sIED>o71#R?)wbygryF7Ac9+*Wy_iCC5vwZ#=1+9gYnfl><#*ZHl`&8X2c>cZ% zJ$UO=vtn>t!fpLzD>5FnRFN(R+Sp3=yPJ+pz1el~FX40sVbUWyyD-->@A+K7jH#%^h&AOe73YK> zNLT8XBbN$~{C$FQCsu9}zQ1enA^yh*Y4fBRLDE3$t)^`z4LGgnRqn-(QvB34aG*=< z4xW5Xvog_lAH6;EU`2qLOEriQfw#w->jjJD2OO{!-A4STO`lHBmC|D7v2qIk16+EG ziJmB1&)X!!f#1Bqt*8p=mOm7Qb;ZX!)b4wi%B%>GsSJ7zuOcStU{0%insFp=@fV_^ zvhWT;I}5sA{iz+s*^BR9WR`TFp{4Rt%Q-6Z2_v>&diUy$3sW2V6K)Ho$HH^vp+|ol zy+>aH$^7-khia^O7&0%Tm$|x#zcfgGW>!dY% zH^PNc@2Zxu(;7O(me@De7}=Upg33jE7V5{PrCP(dO3~P*k-J?+8LQ`8g%|IK7Vuhs z5o0A=(%Lh2c6mCBVvKDgN}%OYLZu|Z>W!KN+-QlQnDV*1F~B=7r3=HwhpeKdQat^P zPr-8!uSd;WS`4hQPhdIuaX%lT$fQuwrFZ+QAPzXPA%q;ZHE1a*Ul^}KgWWpEz9qpr zRy`f0nL$Y*z_>eMJmZBzert)#qdJ!{;qO5|N_&PjAwgvDop~gFJbrI$hg6_tGXo}? zEv$a+$WwWOBN1zwd^We2a~ejL1fK?%-6*N|4(d#2YVJ&aHx{)L7(Pqi+MmmKeACyu zkFHb8d?m^cC;RDceiLb<2q@_4a?H+d1`ccKHo_e`d@^Fsj2#+;ai-8@57PHrJs)H& ze2}B($iB1vLo0UzhNL%oPakGmoG)M{MX-;Emo3AkQC2SI)90pI6Y2(;#i z+$mhE_bdn|AYm{c#QZ|^Gtgu+>awzpkYO#*pIH5=Rg?r`Cvi;>v1_z0Am;1P?DC6Y zEmm^ysBx89`hdB+xmpct@U;3S0^jT@JE;N1$^6;b`=r$Is$4xoiYI3Z?!{M4TBF0o=gU z;1^H2n5iXgRm={@%}cS|C0Sp0vU-O!ypU*ETHgjy1KeDCgJ-x7A!V-AW#I^7NpOI7|)R!I!K`7n;zY1jZq+au)zCm zfBPl)Zf#tJ!?y4&gQv&1?|j6Xht;!$D;jUK3c)`w;37M+9YD2aZ#1ktmEi*M@dK-MlSt(%i#aw1P@Q-|4#^w zrfLBYnu;vk1Zcv~CNT1HK?nCL`6Av5y8`DEf9A=@bRpj(5D-6Qlzg_Yd z6s8X|wY-kO>Ngbg6_02)Wr8q)%&%k`f8ujVFudwVkNg^5;FIQA2=Z~7E6ymZS_8EN zuhSX%^gz=WVX!tTL@c@SyO&BPKyb?kwN`I~T5Kf}Ek#)8PB>L|WQ?pM47z{M zOP-3h@H;ZpCxN39?xf>7_fjXEx=4LnqWHU+g%6bV1^WOpS^@O`tL)ZefT;xw^D302 z=R-wkqOS>Qup;BRVo?0|1qldVr1(&^@V#3P>h_3!iEEU=OlKyBW{JE#=J<3i`Rrwb zSGh+$hB`%ktb13y^kQ0{MquJdIYxJ@yuO|;Nbc48rxSSGY{>X%d!QLUl^eSRt&@dwUO?+V$x3rST#7U!2`+k6t}Dr4AS%Bt^n`@8OAZPXx#0 zQMr)(p+zvy>jGL%M+m7S7W(L+Y;-9s_Tzg0_>>^68}kk8{FBGGiz(>~yJg_A(n zrVn@G{zM~HrzK~?6pcF~61M>tJdi8~5-U)Tpy&H&V#5>q0FY*{Nf4b*cIr2MK3=~_ z6FF(2rjEfn%MntihF8R=>Ps2({DDh~&Q$zRnjRyIzZPNCZ%0&aQo18FG^iV#Q0nMJ zm~7*GmKZY^quwtVti`43xJ{=fx2CF_O6d@$-msPJ6det#TYv*Mc}yKesy2AABzpG! z*^n=FJb!;?xvCxin8YrQS=eo}P69P^XgmWZ9bLoRk=+=LQ~hrjlXnnM<*{Lma#J%; zv#C#i@eR;&kYe25cPMMH#w)%uAVc6x%OM@I+ijdCz02n9JI9~M3G@EmpT*3U-&!PS zv^jnK7bXh2M-nFl?IvF?skxQuo;CUx-FlZY3g=72I0(bnKtvQ+!~i%WI30gzxYjS}_-3>Rnu9cCAJTHYcZY!? zAT`(&8>G8==Eitw!$)qCKR{P;s)-)oy2+?MB55_)-2INi7;@d1jRj8lR^vZG>tSC3 z1{2Qx<9$95JTMY7vuO+u$vX1`YTQ%x1?2zSyb37{IQMR}%U}q;tYc7F8stMM zrKEPW;T?8>G}5I^8J)1;W-l$WcM(?($_bdF;hNhMcb{vZ>1ssdnr1{2346NowCCK))YB;vze0ql<%Kv|wXpXe(f z6~S}@i6TXkp?u#iy6xD@6GCpRhw1k%mu#lXJUkk$qrCpSM(0nc#^CY!2fN!{=$LVL z3KNZ1J}f1Vc@G83dr+a$?8~f z3|Zi0fy8zZUmpkjsX5bd(;r9-NgWqP>)N~Z5fw9z%vkD@V&dXKmEy`vE%$uBVaBeL zZeLENznyYQeS7qulga4m@4jfJ7@WYLLmZszgd%i)=3mB(`UcCJY6vVxCFizii=k3X-1x}l&x)pZ zUJL5_Y_GzP)~y@n?(oY6iTeOw+ZX9__lU$RAFz1UzrmNU8%?Byt#eBq-OLDPQ?7Gh z&TgvE98?|=zA(N@25;t*fTPh9`h4;%dl`SnUPb(B%HsBWYS*?mg%5$22-SC8LPElv zB5DD#UY3WW@1}$?*mH2{9kJijA1)rlSO?l}_6D*mgQmsH-b~J|^TdBs1P<%4Bm1nY z#Lr1Vn5h#mgjygax4sA`>kU}X^aj%AlPdU)#z z2XrXXI8u^G9C#kh#iue-CYd$_{dO3!(bIOKhM$~->Imh(nOL&pe_3z$FD79)SZSb~ zZo@M2bV863J0T`VnIZB=C$-K_j9G9;z+q+c?h8BjH{0tVmi@omnDhMHg{S}X=pG>C zfZ7g{u%m{;Zm@ckKMXZ;Tm-kkG-h!sR?@OkdA8a#(-u=U3_T5cMYXbFJ*TczW)r zf(l02bE9Scud%c~c>c?M80waZ=foO9Czr)$P)LPzT4Gk(l5^*l$BAutsWO)D-^Rox zlTt19ofCJ1iM6~jHd)(JCwtS`wX44hoGOKzA|(iA&A)D+KgBP>^WEZ1h+W$H<+Ba8PgzTxMM&0ezGcWve=Cr1`Rb5 zOq-#zGySC7=jMd0=N6jHGn{_HC(h%7MakS(^W=^CU+753a%8NypwICXb-X>ZpL zjkhsp2PF?UisO`GyQX(w2s+TS`Wd^lcDI)7o#S%}XFN}mdf@}opTm?Uv(I(+p&}}! zYUj($5;JLZk~7cT{3wn*@-oi5PJ2iHaHFG4#fE22I=<`cZVSxl94*~94OXowSR>VE zZkV{;9?pXcAv~7480;ZRhDw{_WZ1vqs8;hUPH z9W&~AxNx*Z`q;UUKBf8@o$}QKm=7s^b6;pL+e!aEg<%7B5`Pbk^%}aUn`zz~-A%u| zIK`6}vz)5vHRRnSIkSG+`~AbDTZL?Q!qz*z1bK6cx$xJ?4c8fc5#3(0Ww^a6DTiUU znZ-H_xP?Az6(TL)!pq=3j3S523#HgkRnoLa`rAL>>l%u8VzBik^-naG02*Q1P-*Q@ zU$=%@&sxCR%$|Ur^~g`E(gXoT6EY;-vA3k;aC;Fmg@sikV}rJQYvyn7df=9uN9D2m zA^A43)`sL+!$;yOQXJID%X1tEEB5d93D^y(?<(x|CX8MW-7PXCPJ%-F`S>L@v%*YM z*M(HM#}{YHGga~?Kloj8b+xhjieHqv5>Q;Ar697bf2*&U|DmTRPS*f5`0zNbgzEO* z8~Jgd4xxDeyzgD{*Dn^g3q$36=3#0u$Y|W1`klM&zL2u$`i}LVUa5_BLC!rg{&E^# zmsS>>V=p}Z2F}FA<8K;{44)5ev7Wju9q7f#JFhrX-S25miMtN03*ha-L565)W3h}5 zr?&tcM<)Ps)TCMH0OL zF@g$#fD0tRm6CtmBAU#u)m^P#%yxn|N(Hxj-q1H3u5QU$D|uRUAyTzTD7eRNwEkVr zt3^9zIfgqmeR}ghs(QCcuJ2$;Bf;&%NCX(MLr5bOrxlINS?K9o_R-Ob0AveiaDoCx zaNUqD2FZGVfTg5_ByPuueSMyI?3@kPu)^yH@DPi*1iX@B)^F}@83(BGuaiUn6UW;y z=BpT~epkJ?^Cpyh~fGzR>3%4aZTtYu{WN1H*dW*T{4UCIC z=t$=Sp7iuX!Vu2&>6-5_f(KEDv$?tXYN&#LDsEZP@HKy{JGILZn(4DAB|h8|Yu)Sf zdjQ+0`gQ$YVj9ekFN1R`*RV1?84pbUa$t=1E^^)ke;+RamalV zFP)UR31(QJ5Hl#s^HL<``6OAnFEP|13RREwe_VH<+b*_8YG<=`9O3H|I)=EN{QLr; z$pYsGJBPUXGkHoU^q!@s>4{w8=<)AjxvPx(%-h2dW`2XNkNK^he0F%8{6;BlLn0Z) zGqni)e?;l1@)e5#MWiMhC+)F&&rX;--1m(LsAdYf=JN)Zymj)^W7?{H{5+_DZpk8n zytTt;?3|Oj%>MR|mNKV}i?kjt+=cZ=7avbzLB?L-V;(@N9)2@)RBHy+NF)Vk`oT5c z0m1W9+s59;UzpmDar7vTpU~(3KPeSK8?2!(+D1_Ih0@ zpa+=me~!EP?tQ2Q018$o5u@Q!V6FD0AT($rkwjMW@cDoUr_oT?zBFL>D5mGgilN+~ zy||ir=<5r}vD2=f|FXvRd#U+v+X4MGLi{AE{Cl+8f&)Ft&RDFE6r6W?;)bO^WoTZOIRrv65fQO@MDB!0|To8!Tx1= zP#luqMJSgc>e3C)Hm75z9~;ZemC4V|wh`*aS}bTFNw37HH|8*yz5WN0BOqr+W-7zx~)RISfg zeRoB0X#whB^(q)KjZLw+K57EJqouU6l{9;paL7?#qo6q7IfZMV5Q zatRzwA=!2TgFSW6$nkzBeun>fm+-?-mMh86wziAm#@9i~Z4%mS;VhQSzS-KP5fGv= zD^otezjR#aHJ=C3?CQ;?K9_2#JDqkLo6c!P$xgPmiQ1LBju}fTGM>C9_Ce&a$J?EU znm)TzHw`?#xmo`uHp|z)e9131>s{3eFMS5SofM3W+Yat?R}58be0jAiuD5-+Szq|L z^8ETbd#|ucyGlE0QL3X(Q?+tDbM@@ z66;9h$mWg~HO=<^ zfl1lA9cQ;$zjxd{vU4Y~$2cedUb1YP;#+wV5wGc6>t5f|zMa4N?4g=@YIi`J({uA~ z#tA=DiF7A1ecR1zujY?TkvqJTX^Gk5@((=f$-?)d6(}XpFZ_rkq%svWH)9&49!XM&#GFm2Gu@SJjcaI>~Ros-(pd1CX?s3oRD%5Ec z&-3Y;d2q=}elDQei`p)5j8m88%9os77_Sc;dc`N>jNF1fTQQg)*@m7eHv7GVO3poR zB~y1P9^+>6p`I-!Gzh(Nz?W?1-pq4GdlkSQ{yc~O`7X9RPBBlILpI)dRE86XGq*#} z>6(g^*dC51IRLk!Ro-G_e-Hh{#ZCS@T!XEfnQEh(As-?vZKi+jLyU`&H@I!Bc(y9E zLnHV~Hn{#^TlZ3R3cDPYdt`c8>5?5sxb|gUonsRtQ-bDeX%I5g7AUp#ijsZp+&KD- zz6ay}i1fr!W@)eOFfX|0pHXUy?tHY;oEN?SmZ)>}XDuk}>YQ`=r@O(+A88B0)-Us9TRdu-pj0{#3m3!lPq z>CO2fD<2BoO{M^sqN?t*7_ovxngbO!dof2(*nH=e&|W1n{P@HNtN~ACf61CN(0G0{ zg72Jw%;lHE%5n3*gL=1a;eeDzQ(WYgIN6xMHZLQ#__y(V-O|$Co{j_O0y@@y5NNC} zm|eBg3aTys8JV~rx+{L@!sTMp<8QBeA5#!!omimrqs`=Jsv^~`FBQhP*IeIv`NRmn z()~ZWt~xHNt!s~ffJh67QUW3^UD6>)NC}7_-5}jH1JWf(cQ?}AAl=;^(lGSUe244B z@4nx6-}8rO{Qa0S=d8W=I%}`Bp69``UAIjUWox)#`aU=FA=a{6kacxhm!W^*)E|fL z_j}-eF#p`qw8r30=Eyf5ibrZ&ba!`D&RRolN+QF{$J1MaH?14Jj)!gyXbltI%qS`Nv2uW#00C)RDQ>li%+X~H@1 zNv+w^V;x?6-Npn7D5wA{t##j`=d**w%nmwaed^G&lSQ>-q`^|oZ^b9do3rQN8R|jZ z3`USRClKvUv9KS7pC6ON2GYl*W&gV ztfCb{%5|cdVb$%H^Bstw?o{c8!>m5PX0{tJGAU7Y03^G~_wIzK0Ts^Mx|DA7s z+s~wg7G*>E!shIpcB$TsP+Cy!^Xi}w9@5SYB?hy-wH-i91zQyB%*c|FZ4+bKuKd!j zzpf{vjK?#X?p*lkx-CrDq7c7J0CM}P%vniqkIIsXe(>zCx02f`02^im8sKa13VdCr zZ-B1lEM1_z!*3k}AvI*rlT;7|6p|4>z@jeb2m&EILJfIT#Nw5#*suEQ31zgOfXMyC z`RBQtLB3HoSRu-cvTuBq{E)fGs8ZfkBIOgJ54C z-qRJ;B<$_0Mr_B^N8hu4Zy|t?ZVDjteP9Dcj2Tyn?aTZ!SHpIC873u0O_9)#Fl!;Q zrXAjIo2q%J`$#2;cmUunz!VSNMl3wSoFE0cWiKOiroY-f!dq|dZJScB;=6ZI|GZO8 zxT=fCqPzT)Mp+Z#b;^+)O(8ugrJjIOHVcI55_00;u)oCSwb)qXljHcz?o?+d8Q8-| zIevLVy8aiL^&?l z^ui5mf4%ZYs@4S*Cz!G}ecSLf&hNDr7QjORLu6Z^Jf&3HL|0nnLx1~fx-Vi^a*9%S06mAuW zlV{OEkZf}dj?;7W1bJ-_{76L*GNRvF;;vg?z!x3ZM4@jRM9jg3Sz zkz8gPc2)-n3rm*e)6W8#{15N);6QX-MjGrEL~tNqntf-f+q%#p`vjqc$}_&GXK+~m zHe^(dtQTH(Gh`>ChnuSu&CIg9N?R1KP{g;rnuyk$zMFs0=QuyytXOH}ka#i5A``H) zuuGU(&Z65{B0|%?4CPMr{|nUg*ZoO! zK>{)RC)F;2_>h8M(3kfQ^v`p+M*Pj!y-FMI999YrrPf&VQ$0M2!OULopWe*iBsEH) z@Xr17VyXu0pxij`3{_FVlb`jI97-$1VSI=WP5C!rP8k5`JAuE=D z*<->QX+3{fj;R6D+Vwv+#D9Og2?2RT!$O3=^`$%G+hlS?^r^Rx%BV}Lng#VmfzIc+ z{D~qvmg6s@4F*>lqtnh8NHG5$2b$wQ@RVqTu{yvDe5a_U4qSd#&R32p0L%%7-HS^+ zU;)X#i=;T*-%TCoC)?T#BKu%Flo$Neb*o3@8$Z`=#V-m4%)aObn}hn-gt+7+iH-kR zGWh!rWx**IUf%}Qe!2@MP+)W7aBx!PHId2?Uq_|=NIL%%EIthjotC}Vko}w8WWhN=E-O@qTt@stsv-&irq1rRZoNvf zUzMd)-OV-=Yh`{L*u7E7#tY|Dm$|Ku7dP%7f9>sSdjB#?t^-8inG)Lj1YaQeS(i|a z^L*lrd;luJIpukBYSZfY804QZhO@GLO9h0#P^zu@B!AX*k;WreY8TQVEwX+hY8OPI z(iWz`4QPz7>Ayx^o)Z{WmM2U*Wk-fmPCtR6wfcfc=Cc$Y{_&dL{^N(jUpg^e>j^fO zTV-#zM$$Us`0@8sD_r^N2L~!9frR3W`bsnP zY_=v1@b3r8Nr`!rwohk(N0tyZ1=i!IwAU%<_hE>dPh)NSm} z{8TeL3wFs=Cjz!AL`YoYJBK>_Ml(~{LI*#KU;*!g?I-s?`p>fjgD6Ie)|~Y74_z{{ zjh}aK+R9sd_lC2@aVnuDF^Y>JcGm>Sb11l+>1zgRrN~oJb~&EX_iKL;fZ2;D0CZH2 z91l7%fS5^g_GHS)r^XQ#q`XX)WFQ)7p0N+Y1-&5B|2SIDfsO$sxm0VWZ|CLk#bviJ z+;(CrI1BCU@AD9p5YcrK{V{P{(fL^886SST|NdaX_yL^3*UTu={PO6zn`${2Umjj@ zneQ-J|#xV&?=q=BERU##Z2<5&ZM-U0awQ2T33 zKP@<)SCW$eNcZSYG+qFoT7%=xo3<63uIzi{tIYgd3is8bL#+DZXr*=4?aS{o2MNIc z2=2OcUp@cC)NtuIL;!hx@xJkCK>{E*FCTT_C~2~3bGa?RKnF36D;$>3p_1#m#V+!^ z&$IpYocO`|dGr{3SDrf2O8x6J-?&ewa4P>;06&WA1hwv`l88tR1qPwSG(%-60-VpX z9SM8i&`Mya%_Rzf6DvP~_RHV|x2BX~qY9P7!4{JQc=~lGo;@zHJ8oI3ATNWG&st6q z-Yc+`U|w&7lE1E=bGL(b%b>K3S^6BnMs92AK|qy_3c@FtsrCmRp9=M4L#+j#XyHj2 z=eMP~Z$O~d{A8~7Ltq{Q@wuz;oV7G(?`PVM0N?biR*z)R|ezqxLQdl5_(SZF9Q8BFLc5;8!Q-wF_Yf5Dx;M5 z4vdcaw|m86W%;DN(4@}|WFA872I1~}E57me#ghBpN`s!C!WYLGYoxDP=PQzL0WrxV zj)L<>?F6gw4b9{xYq7?9^$}lup3)mFx!`&Ft<4j%-HHtZA_n858pDxy+N;zYUZ4yL z(snB{<^Sw<{5=?MhmvVPx)0u7VOS5dB*`M@ZLV{Q-k?3(mr0or>7Ty{GSLz$IvlVn z0;E;b=Y2=%?#HnE&YU_~qPRN5UP3jPr27v*Atd=?K9Yd`sW@-BokMbk(}Hk>uhwIR zH$@+H7g)$>gHaO=XNyLQ)??S=vrQPVY!}}5S3+kA%^N-~jXwJ(|M>Rp`3%xR@cqKR z|EasPGFdq1&yTZnShWe3?5|uSV|K2^;Vrvgm@WKD?@?;y9;A`rVq_5^P&vYSwUIJH z_9t^%q>G;>_Q35csfA%lt6Gw!xeGifGV1!bqr32W!shs= zJx2c1Z?^?d(ycBZ;WCPrBk%Flg0p4WNm-`voH**OdTr(Z*OlCQK+p4!xB0aG(!kj={_a!ejM&JQ3|2`jUUGFX)L+#N?{ zf)qBTP(=!D;N0ICFEBmbbj=#C?TpIn%*}o%7Yp5yr7NtJnoPWQ1T@lIH262u!*l^A z^_#6@Cwa^6z@d-B%tOwif3gg= zKR^{IVS4{Pi=%2f&G~3M;Wkj>O^Psnq1^U-kjcNk-+=v&D?*axOrJt*C_y2(TAf5O zz<`u?%(y*YUx8Sz;t9;EL?YDeQ)OVz-2U)h%L7~GY>eCnbRG(e z4g9X<1Lexl06B5ow|8xrfL`6W(?%oE$npM;6Fi;0FeiuloaA-|;?OT?LVt(v(}V5J z<@8~M(Oc&0plnLx(XP=uRltJ;sIUfDw1|I@9ixN%Nu{$tb1agMa%w0EmkG9^6Fxt< zIDJo+P$B00#q{uMK72xF7=gqK&sOzi^uMJT-5!MqL7MGkUr0R?*^Sy(*Udj4jg?mx z-+CRRU*cGM$$oh-6P}NGgrVX?YL8{xqPHaaCt3pbm^3+Nm9!t#Y$K>L2ygv-sWMgp zhH)v^#6qXBywyOKe*E4Rz5(D*00|}7ha}2HKcSi@uhuSJ?mPftiVhmR+M{?lK?agl zSCU{vbhA>A6!QFti`mC!2xBb?*Et&`td2MJ=% z@Uj8jp0wNDbn%h|&qr5}u-Pp-eYOyfPxgXGjJ@NOAxwt6Wm2PI7-C=k7~_`+5Qp56 z=UOc`J;1^mswW^)F_Dw#x@%GwWR#r-Lt$X^{ZhL*&1!)1oF#^m(K9)w3mb-LHyj7- z11ZS19ACAb;hZ43Y2!cZUOtEfW*s~{*rgvstN+ob1P+O(iyvwb*D1Fu)Y3Jn|IY&X z>%SqS!F;|IVDH@eQ8=H^u5_wLIcyn4p`!~ZSZ7Z}s^RlfPc6|tuyoV18jpUV8<~o% z=YD$%{Wc2ZCVn5?K&dda1}HVUYB&Q*LWxr@8i2S#r*+~HpcE}vznmpOJ*3j(fx$;r zT3Q;@MYSm&~QmrUsYv- z$K_9%`^2YPXsVTw_#Dt7?U8iZ%uhzjdQ3>DK|xKdP6F3 zOCSEWvKvKkQwnwoog_es2}ORsg>-wz6Zh1dL` zQ)YIIiy0uBa0|3RhjUv#x=hqU#1~z>qKwXy| z;Q!xTk@2l;;lT_LcOCAqaA`MBUL5AGVI#+g%R_pkCMnsB7Y7HgfH17A4$kM|bCTMO zu+Qg0wkUzK`}cxoSKitjNTgD)2hXT-CN&&8|Kx?kJ8E2f#PG;FG`%*yx6ZsPAZXL+ zazoYF%I2Mq$$=(EZ26enhk6GMB!K7l*pr0JJfWgxrJ>X)rplUzh?R6G%T_0i&XBHM17|xh|c0gY7@W0KXPFn|_i!8gp7H_7k$s^BR zPZiN?)0Q!$v7MK3l?$i16?rDn=VH^|<-SUZYL<#7d;$LC_cI>{#a);J2WRW(YMr0| z=$t{L;WNPJLN(RkTB}nvt^#ykMxS5FfyE_XInTC>|9QDPrd0Jxe#kT zn{N!*nd8Dw7ZIUR9BRCh9xCLNY0X|8=h3XwCJe59;4W<0pySC!*?PUlyyfcdE<%jq zyB-9}*iU^;`|l6s=KHR;d=Ognoa6H3VtQ6vD zLeLceV&!b;tq&{!*j1e@-~c=A4f|XEc!nZY1TUm>ChnWUwnwIqtflu|_SvOmvDV`# zXt$W+1@6#n|L^E#uyaJ9fBb;1L^iBr$6vvOn$5U3^%!=7qGa5cHIqn&@>zsDGU-_w z^2r%f?AzdHou(DfQnx6~L?UuhkWMS3@%eo><4oL!(GecE)W~ z7D#QiqM-w!Vc5ndnzjq}nAtsf8 zBT4mVd;cP3Zsx7p(aHC;oDS%d>UG2+gBz<;Q@BICb@%aP*arRseyxV6R$lW!7m{Y} zC$z?knExKeMuW62)4yf=4Vp!8SWqdMQmZb?n}D^Rdw& zXF6H$3{zh7=mK1-Z*q_yw!Lc!k><~A67YMVV!DCFar1d}pkSauu&EXKKp;V$%)5AB zBbpJRU$E1Cy2K19l+g3Y8pQPbh^YdD%fs8y{=kum8_1;--qt?AXm00fFnG?%y6mb& z@_fBckx#BA28g}?-)saZzv0qHHHs9Mn-Y|-UDX;ZB?zl=c_jre zJa)fsaVA4TKkqdeuDsvUfjpa1cx%pO%~LVWMolrU&Q6 z?SjJ6XRuB>6Ixoz)vpFbc^$T+pCg7RbuYU#b0M#=16z>+>2=0@XAHQDcv|8{h|4bg z;qZxLGV2DTDG(q&tRFg!b>*RL6)sl_Kk|0vBPTv3!hMHhTh~kRjYR!T>djI*oHBa21Oh|pFm(ic00^L;M zHCH*BS%2|ygMf6JZm@8s@z)b$Xi-nB}DSsLrtDTu$ zronR8+F(~BUcv`bHFW*Bp97Ln$@CRL893}OI>E1zosil4xyXB+L*qH%=JVL|i4Nw^ zO8Yzq7D5-B?LQeolg`clEh8y#?zP7-x$U#TQUmzx;rP#jcP%- zIXn1ayYl2>mHQUUT}*#MhX8pbX+{TlkiUo4jRMH_V7~$MA=A|h0b0SYkZ}3b^2h<# zJ?r638|D}oAU;M4Aa!xD3sXJ1izO4?8yH) z(HK9?s!7CApwEh@BzhS4XAFkG1z>Zos>i3nLB85dAIuwQWUcv5JEW{FDM~hzpJk7n zJ`%s5XPQO&0N93!U>R%C}FZ{Zn;I|Q+?Ol{v<(;_6HqN7KihJYZ#L12mr)$aMqPiQ?fETID0kU|#>eMR1o#eiRfR@oz};5-Zk=|sT80V$Ru{b`T%r6*JE`}luaw~ z>;d%mCiqr%b+i`R3AhYy7hj^Z;L}6I}i6rn+EYfk%0IfW(`* z_JOVOON*JJUb`K>oLn{%vxdaESZDa!8`t``BI&#wpH-VnMSY#4T|dvP^HzOa9c;N? z_nCE^tBg#!9G~->&w2m~Yb&V{9?-0YC#UbL&UZN0myYbQ}Xt#;c4ogx*MuoXNb+naIF^4XWvX~i`ud& zJjMr^eU3R7^Mo6UhEK6B!Gm9RvBP#r@N>rJ>Cs>}6#CHf7n1Rp%1O>uR`K4k`3c&6 zG6tjCn$bgm_rV%?k~y^;`|OFi*OVnXzKhQw+3@9S(tc=|zQMTtLUx5+m(3!VFq$hr zZXS!3MqVYXD|~^E=d5x2>ZO>f%T^LHq~NP5P7ho6&`w$A6;jJ5uzH9gj6*iijGGZQ@! zNOilxmC}7zaz3zc!Ms8LojG81KjW4K>;I@Iz0Ee)7yxUi==CUl+Icz6RVA!Kv1>AK zND?xg4X*Z({=t`Y+jKe%zG39K6q&hHMxu{f+nYRzHMXAd*|s-{ajyWUmbH1=e?%)!sfu*smM8ZTd*~@ccwbv;P zB$MOR+jq10{~m6K0kax|&MsrGEt@2syS|#~TOFZdjL>}+Gw&KDyjdm|%<^^1=Box2 z@%SOkMfese$yF;cyab~x>d_tY}4vzZ^0O$jBt(^q(``#}}9!dJTNn2n>V_g ztZ&Ywuo9RkF(z*R(w*B~2Ic)i@t79uz>SGzl$+aILazxoT1Fbtbe-J6-d|TB5df?S zqZsz|z4=BP=vC^aLG04@Y|2uFQST>QqtaWgK;s&ko~!;Rr;-#ioSE!Z`l2q86cP2c z=@mKw%0YU=To?F5BP@=U-CbMVhbb$U%fbCCRVr6}Q*3}*v{6rpcZdDx4Oa^moDPBc zXhUXMXzT#r52uld3er;}5>{GmM=8kp&X1$wj{|nKV3?us;CVlvl%Mao@rFYh(ZXW= zN!9|M@ERM$A2lr(bzzpX&@@AJu`x_pB|ZNse^3q@Q|I68TG57e$zB;&6G*>LnY1G(SNUu>ks751{*1RHuxti?On42oL6)6XU+^nL@ z-wazO^oUd^b>$vPDY@r&*8a7iF|ic+BzJbrA+=XW{I{hBn836!1cTLI*B{GQ11;ab zqIJG~ze^Wb0o~9&09acOP)1>9W1~RAAP&F2g!KTOw7&q*UkHE#5{d0wd(oNAkKtVO z-L(PWTMgGZkI?8kd!j>)d}QeUT$)-a#)!6+bGh2mFfG){8U%XsDxk-kasBlRbDq+G z=;tOR;lf~-Y6bK?dsOp$F7ea)QzC8R8k(%gYGpa}{QwQBa6QDp$Ugh`dK8@II9u^6 zM{RVh=u%pnSSbrF=2XLW_pkK-SU}p3X2ukAnk*G7!z8S&uu=wAEzYFO9cp7#tWl^1 zlMM9`kIRWq=B2q$osmZHi%q?!?dofmjO(j3Ce;gf7M|p|-d`>psMb-O0>Y5O09?GV zt3a(5aY@MTdNAc?fQ$?fkOKGLBq5)!xGzrt!n%os?{4?!LG$Wr`$V)a!)ain13pLK# z%KO;Z3&Jiw-o8b@nc$Dwiyg5tObCdDRe?*KG^n38XgGnB%1{?vb#k{X`!^smie$@7 zyjahw3l)Q6+)V1dT$QPz*8MiWkc*#-VZ0xKmCit5=frGhH@kcoHEX;MlU0p8NF&+D zne^IJ^dCN4>!rMCy9KdmbM-(DNk{jCxHZZ`i?$hTSD`ABb#Ix=i=)nGg(cMQOX zP=W7stI&;38M6LuNhmk{k;aGf|QoFFUd`Y9^xXq#%xM^{1bvdWW z%LmD4skt?ed9R3m0(~-fsh46FKeP=sQDtwJ+R7(}Tj(t>@~k?(m_+qNTsS4{hT1Y0Kh1Zd9>zoD}5tvHaXwwoX-njO5YmLgeXY zBM%iZZ{#T|uH2rSw)2`GnmH0#D$R#4kE1cw2^^pcg5Nx?3569WZ`?7%Q7jXSe=+&` z`c~9$6gxlHF1pqF5!kdlx&B5RXNH|9}J3u1F~>!`3x(H)b-g@OzC-351E(H~`TWGSb5GX_u+g0K2~$k!1| zN6A&*CTu)k_VYp|GGK%GyIPCQRo=WCotSNw)P&{uTQAA#^^7(4`*Bm!n;?`!k@(%u zuZ**4h=5Aw-SG+wf^=s&i5&I9Zp#jmZG8PM=A>ev?to>=g~EGi|KjeifNe+Hg8N|_ zFZVZ$iN!}7lC)&nHr&8ezoT$h-w5!)-WsOam^0obKPG$HH_C*1 zKR$#d?Uli=n=9*gIv#^9)KwBh_^Wsf>uK-_i5XBxL|L91t>QvW#gjOCUziQu@eQ;8CZ4+X(}+ehl+<%PlfnuBB!rtJ#fNJKFR2ZM=AK zYPS<57sNQMb6BMc)<{45TCn1uf-hFs8Z>8yt#6lfM^jgLen)3vFjOWQfz~70B2r7VSityxQWBZp)a=D`P!~7}5lQs}em=#R;Zd@)$7;D{4 zW#^>p9wE_%bzk@_zrU}RcDD>inAyYy88UwpDB1!r% zD!~*X{njQ@wzDKtzW!(9&<4KgKhQ5KfOp|l6TqwW`cA<{PhVeDMAQeJLM!QXTOIc1 zJdOV%t#|F=S!#$GU65)a=1Nnd#fmx%^0ubAStRF?;^>S4p~nb5=mpZ}0L@;7MfQ(P zvRL8LdfPQi?$<@6rf9r2DvnOqXAKNze$I60PMw_1E#-m=*+_Ah&BFbzDt&EeghDxJ z*QN|-=B~}_|5_`TMfra5xaN|0`XbxTY7Z%1BwIzKS;6FWDY(TeME4oRBU)pQYmLCc zSGc1xyi|dPjP6*CwHJ01|uhqa}2QKR5&wo|VwKRxzN3xGChkzv3`{zJMS8k0s|garQB|&LWL=zOiL(Emw{@NeB@7f z=BCh)wuYZKK9($L{AC?V{#4I7)PY9rYk`tW?iTm@DK@IW=!J)H9-n?nkJNhvy14xBe%+{y9;}O%hK#HFC8i@-+u_K=N7BGkKo;`$Se%M-f5Cp z0Rpo5L_~w}|W@O~1}m5Wl-UekCWy+e|`%fJaZu>QNK(Y=~tP=QVnli38y0 zvz8NCM0e9HNxtA=>Ff#Y!1Q13<*rxZsE_`)j);sXrCYTzBfSqMT7+a1PY!S5bGhQ` z=)G_ubGZm7u?jPk7M=6)m}6DpQ&Ry8-Y29!`|{<#PvdR|afslhDv-0ckP$qi_xK*r&h@nzVo%dYosQrd z)h30xF=97SWL1dMcvD<(@aD!{m`3Sx=jh~7a_Rv4-yTm>2xwt;&mGIXS|Yhg`3ye) zbqJ7~i2=-oOvq@5k6&0JVNG=aN^m2;o+O=lx&X5F-@|11xC(H5K zqD|kLNo%!D^+)D*x(mls{&B(o_(0W&a|w+s3(iFusZ>Y1S+DI3oSPXomOt>^{J2#~ zO_8mZtg_cc3PUE%8bL$GMR794p}(1FaesuKd>=8>8Qry!>=51onX(`kS%)2vs}8I6 z1p97)y6F`L&%PVLBJ^a~*l__>L81$wGNjH*bjpnfUu+)6PSlAi6{_>DMd}Pw6fj9O zpL8nh%abJc2%k?B)7mZ?j_-%;*A z#vAy^_+WUOar*3lLw7F$k~n?b{wklZH?w-MW5RdRpcE(cxb$14%b0sTR&2^wbyW1% zTV>>BG(0*BEHMD%*YCrv4vVu)ga}XsvI69v*XK~_ZV#pVNnHt#-+S*U20iBNO41NaW8=DichEGRk2c`_;l*> z!8R?CD2F=9!B%7ky!y!}1HS7x#mW`#jO2+gI%v1K8I`-o8rBtEllAni|LeL)09;78 z$F+K*-U-U>haa3}FFE{5@wQ$yYG^^>AO#uiI}+EKJ?UVWZR#32I^wooPln^EYWj7_ z5pvKw0(VLQDw?5!W$PR2L&x%$nR_+TnT0p@(5Lj0Ta{oLnmFRHsUv#^Rf*@@k)XfIIuX)AOmen-fbVSQtDNa8czxf3 z*rW?6W;9KD^*ln!PNxO7BnoflgXVvd@26mn&OZ2b+7~Qo+w&%P0=@hAL?db%K%|v6 zUUPV&pZi(!5R9A_>chnKdfw#l)T^hfJURL_5V6NJ@H9szSno!F6zeR)w;0fKEV!KR zJjjhBiMrTbh#JqEXZBXY{X8h_40fdXOhfM9WP;VWUmoA6^Rfz0KJwSODa#&w3HeOu zYE56>`?P-H>1#<9B+n%yBdp>J)6p8(r*hv6Bn?<+pq683Eqy+HlwT5M_1$gyUiZ+?KuCl z4|$PpKLV~)BcU1dQvt%Y-WGa=!-o! zlYD`JsH#0`TwfUFiSCu5*xG21V;FZoHGy+@$E?eTy{cW}4#wRreVe`Q{*0atEsw@n_e7Ez`CY+^Zxjh%pS#UW&n?V$bXk&w4 zPZG(M;brHMRedaIT)@aSw);*G$g!0Fs6rtk_UpnlaPH!^pb|ezUEsro#WNX`pw(hH z_swrxiy9dyt&jnz-maGz-hXP;|FEUoSKq-2>p|RNk|O3_<=9k9LN(n}YToPegPI zWlsQ=)p( zCvLZ&WFe&NY?T^7jG-oM3yn`PbF%r|NBg?K^?Nm^yYRVsKwk2-_Ndl0wBE8& zV*?8+S%TqQ&vD?`c@5s><=Gt};xb&y@r9K9`gfmy7%;3Rd(xsQT>N$>E%KKfd6fku)IfpwxIquw&^Vjb3$_10ec-Mq-;>2Pd$W(Y9+(%yb<8yar zd_Z>kSaI*hx4$7i_X*~Xyn^Kg=5`8+h=@oZHYaOq;Dp%c@>wn3JySLp{^h1!=4{s# z``~%s0`Pb)7s3CgKf^`u;GbbaCiDR5XN2wcco!g1vfAVhf0w$YRr(PEaGFX>^PVW= zOl;@$Tie+w?KR$lfxh&t>hoMoOK%^J5RW}?i;iu)lDf(gc7p^kM zxm9icB}e0Te;}$M7A_wcAR9ZuFqlsCOK-buib?MTg8+|*e`)Vn)}k`05n;t+p)glx zX9<*mpCOb-KjbUKQyXOSTR13C>B(j5pW<=yk>a%pP)Jl~$mOD48|sjRRfT1obaMo- zMh0b|HNX}Ay22yu&=bM5A%@MgYV!fU>D_{5Jc*D|&*cs#%dE*-9;9+G9e@{AUmzG= z|J)WSNNO~)P@44$_ zLW3be+F-*rBRt-+{h3ws!>66k4C22Vs-GWG+r$gn2@tmLnaVrw7|2gmsE=O#h|UX+ z+Oy1h2Rmn=S76dpTqQg4Gt>N_z-8XsyTJ$yK6(mTgd6J zib**oHiHopW%rkxCO?*su{dh6TJpYLcI8Sp**(j8^>*@HdgJtd2$iP+`)gnLK+ce= zuMT^7;URT4wQAK%-UO@O1ZE)|4F^Goce>{#F6W1xM|V0$tK=l-zN~TY7c7DP64;!5 z40#LbYxavx}^2;XkT1X$n$;fwM@sawlW&fGtBndnt$JuzX3NPPy;ylcCfEoj7 zA@Nr)rs)u!Zgq@K7MKZ4>P0anr>I9_{QR(owzksmd}saFSoT-{;zkOp&m`#;r+WZo z!~(Ymn~BCl*R~#@t?c5>_pe+|BUX3It!%xkwa! z>__dJeeF8uC~W5l^XZoe5+Mf9Wi!I3@lBJ`fX` zzG;@#iI8IyrwRSh!zn1y>2A%NvG{ zAkraC`7y!p2j|s_rlJ(SF|+Oxs4HKoU7$!OuGKGV!oKqdGa^fA;7f(>3FeWHx0gT5 z%~O5s>fwAiUrE(5l4|btMOgodY5r-sjLhMJ;6bA-?t062r6l<>rrE=i!<28N7iC$p zrB$LUs@gXZzeH(`f7*c4?>~QDdik^AH~$ro8XnFDs#$9 z!U5{|Yb!t;3C-JdvPh8cC}yBj27DOSTx9ODLSO|nGWXoIV!tG@mJ*up?PVHGJscg# z0MEe0z8UOHy$&K#BkR)u$Xm*cJ>dYC_cAZ^Yxl$S1b(NvWXf!ET$;QU&EC!N@j1%! zO9)LuRKuIOQ;@h`aUsrGor;70G18$8MQvezIgxB&ZqJ|h`af(f zt1WjKGJR?0H0mgqpfrpZq~Y(n9WkY*45(qsKmGKzp@#-Adk?Jz6&PZGi|Lv1^`|c& z<{q58O_JEVd!NE8N|*D(ezyo@C@Wkxgh@j43L8{zfMy^0JC`js8RGjcL;ibd{~O8I zH5+7(Gc)sR4!eN8i)U>o)KbLC-52s4iLuYoCtY{h!n35j_YwIrx+9Tt zR{U!^hi@D4^!F`_leJxQ` z0HS9Pit+(c@H@AE+7cyvn@C?|0%9=&cLilfK=zF$NK|L_EJETfAK(PKbFvb*@PTAn zRIm#a5LP9fP_v&~gsJ4eB029A(*>lr6IW<6R^ds`+z9PwEj_gEYODa^jx+f>8+uMS zenq=1+LV}H50LcsfEy`*YgCcco4Gh(rF$2sy|_Y)n4qcFPLKS}rFfp~a7wHM2xu>5 zJ6{Cq$mwlA`JsU$i#UOba~xba;(wDUL4R%M>l-`6(7waBE|A}b<+l1JbIiPN)9Fn= z6aS`1xUx$4KsjVkKO6@1gDzMKnZX*GEfX(Qe2u0T)??d8>-ohi_?^^LBW!P9nN#tQ zxBpyz0}ufHzr)SS8b*pXmN7aYV$4Egv(=j*Bmmma=Sw~LZweN%Ez?&oYW>)qS#4Z!c8t`wkjRVSa*dA^fGCeTGT&kcKwVH$##;z2I&YoST=he*zUe4OTcj1gOY1D^&l+ruonvUfQQK>u zG*j4Kf*Zk@WsR(O=>H)d{KZlDk(s6eZq?T>zshc<<9~deAj74Z_~xq2wl1>k{OX$h zfq3h*qG&uoist{`HjO_dn|2IUg^Z7scu%Vzt#h)LJgy=zdD-(FCXYMSGU3D_A08q& z7~gUqHpQ>KYofK5q*>FwJskrT43lX)3#0G&Q)vIE)w=1Wi4xrok#lGK7b4@cC$A0b zqvkq6aoF< zq{h~nE6{~G{DmEU!FnZ#;|&^{RvCYV^bWs*!#JC zXvE*Whs$N-;2Crc8@3&(sX6$Ls){2&7&PjFmtlHyKoBxv=T_vj-BF*|%k(r0S_ViJ z3XbmF9Q1RTJj0FsD2IVkU`Iu}EKDY_#?1L~m8e8cAB+Kh*hOPO<-4FCHc>A2Lx*od z-1kMhM^rboSvLRe?hJ+TZGjXl<_zO;0HijwVgkA=PWp%4apv3s5d<)mUY6}b&F zUG{0YpAY{ungFF#S9wgWWaP|>&@_LEtkqYWHze3^V9!+SEi2;1B=ay$SuHH_7- zEumE_=m3ODrKgW$f$ol=RlBTC(;uHTt&RJ=cw2z7gyIi^&4-3CDkJ_EZU^`yH>#g* zL!@puuyejFa84@y5JnF>{j@H=jcbmbqtHJ5O?H_sG!V{5sImHZ^C#CTlvY%#Dxzvf4s_gYw%BC8wx6wei*oc~k}G0S?@Z_FJ(9)h zzj)asLTXqCuHO%__+$=^z7_x02A+`nq^3c^W#^F4A(}W-=KFdXR!vrslN|nWF(kCf z)Nh6F-U|Ob|B6sDMNnO^9^0im?qWkx(4p`hr=~+PSa;U5JIuunNf>mMs1M+z>Itln z`iX#p*4IAAU{_2Jwxx9O@J*F9w&Wfr)26~Q^`|nZZriH4Og|d_NSXAT0uJ~cE`T=z zzm$mE4lwtz?CsAUfS26%3JhohikCkJPq=nTsxu=K9>%H@%TqJOe zg)SkkS3h4C*2{&`0aIws56CiDmmlr zy@=|{8$X+*r>}W9!e34ZfiFk1uiF_cif?0~SJ*qj>ZWMrkf-~xfpsbSJn%lEi&gi; zFaNZnfS=wdN7)Qk42}352~IV*CgC_MrZU}Cf_%JJR=vk!aAL-w)^;6js(h{92dN;h z=u)guxOBXpJ=+VS)~p&qB-4dp_G~CdEr-{kR3Q;8ALu&ZYrBD^&A0w=nCsUsvay^x z)qBJlvU?M$X764tl&=Fi;51&Eu9SEF4bbXz0?_w>FHAq@g;fur#j6{AdPtY}S=Z7G zJC8}xL)r>H?rZn?q0>9^-hvoQkkYO@0dI@<4vkD<)Fz3G_!{^c5@@c9ft{ua))&-v zzf_csxMqYRwae&X^+&V`8%w^;BHB-dF}*rUuAyRn>`cJ6z}al`P{oX@0bkJg8cBF; zhcBjjN8@cD(LRa&o*P~5)xv7i|FHEIP*JY!`?ifDf+!%JBHhv@ozl{wz>w0?F%r@( zDcvRAIVcE7_s~Q4P{UBe!2eN@vKwFB)EfgX}f zhvpWEoM$Q;Awe7`A6?t*q0YwhZprSZE>K(^nN zW0Hbbx*{TBQ6d+cvIwWB{q(=@R)_Z?<=VV@k5x1pd%gsH721u~8UZs0H&d83F07k_ zE@sLDiEHalY>R|FWL#`QC^eI$HTyWWOs82?`7A%im04;(KYKHsRA{TR7#;P!%w(BV z#t9hds;akCJ|Fj2nJ%&fv>jQJSGC0v1GM8cS%RGmptHOrP;VCYkA=zuWN};8tdSxV zqL0MA;$RN677)2A1zNsHcSE|sHW%i0SJj~G+JbiRFWF7y%9xj}|9L9^`_>JQR=qyR zJ~qdu@t;w05dVnD=E!n;du?ZguIfPJ(u*1bT$b!qoVyws4MqY5 zZPe8#0R-lHgUqsiom7fRSSR?fOw8~}2Ed^k5~)>L$6q%lUL*pPXs!=LJNX)hIOcTQ z)Yf4Ks8Qi9fN%BD7ZX2s-6y(+CcAc=90K*6`vDFAwNE@jw`VlP6GT27&N@;#wJ#`Z zcH5kzYoCp$oaYOFJrWYs#Xk;0;#b}>icp~G63MR7$>#dkMpsW@bxz@SP2boxdstiB z`aPiHX`qB_{ebqd&Cy=Ncip!m5kwmmggf%QW~O^A`(Hs?UR~V&K1SB?>3b8YU$>Y< z=mwxI3df-xcQ#ak?Pq+yMHfgf4!!1uF8Go?`%s+%7Y#k|A*p(rzBa@V4YY)_xoUS5 z9CLRcO&6oyQKt3+%6e=?Y(?ymw6^Kitd~T?1nM4~wYv_0%6kV`F16>pSFt zdRD%9J^|=}%Z@qBwtBi=9#EH4clfyGp*jrqM-0@Noi3%u-$4tW*1Mps!E!tLGcyI> zs;hQAGfsUtGUur|6I`qz?iSD?Lz~x#Y3zzOwcfzUv_)__?_BNlz{gf1y-D|)Ze1@G zaZB7BPPd|}wGi=-5;G+hEuIwikkk>!@pLJ;_smkQkCTANU7wmYr&z>HdQjKM0?Meq z!-YIiYjdt}B@G0sYIz6UplZn6p<&9{5r7j-7Q85riJKaPwBB`+?kiOcH?Na--VrF; z^^ym+>KBMj?or|ObL#lRD~4Q$4sh>rsg_&);i6&k`H*t&CBNO*b+%Tg?{G)^jB7X) zCIVfasgg{0pP|`R6+A`COI^(7Sq+5kPBBl$49U%z?xTO^l#;_3oS_>6jGF?55b%?? z%27g*fOKW&6fIp7tSJH2(k$3{n5in3F<;Ii(f!!r{tG~IWPorrnhm@60K;AMXjatF za^LzbmX7wW7?j(IzP^uET*3CNT;35 zk_oMJEl|E|It-m&5Ov-5B>|GRNBs6zK|w#GBqcj{dyk{qSRs@bY#wZwD+SkF}@4^Y)-qvd~3Cs$D>FW0Z{vEUR z*@g2a5epaWBA3w2GHk!?=5Lb^1GYmf=Dt`6dKMB7pRLCUWC6UhA_u-W;a>Zfmo30@ zA|{6P@V1zmo6}!z&$>ZMAN^wU;h6L!s|}my^?1;Ma*jqxrKP!|b)Bk5lbu*o90~^q ze`C6izPq|^<_UzyDM+gEnQ>-qm1|W%ya5=%B{Y*$Du{eJ!wW*jd2$`Z!G2n*S1U3 z6s$(p9d#)LWe8VXA8PIZjVV-drHMLKUDlS8OmW-&U$uzEsj&eIe%2kA)+nOKPol+4 zF1m%F7dC^ud3v8gn8)t~$%-&in^8dw(EGj1wi`ZC!AT0p%ddVO1uV6}-GbHgy{7J3 z>+5$0fl%DpLkKEktDaV;_@s8?#2=kBXX2c==TfZe>sGcWq->h32 zTK9P~@tVvxYWs&C=nCIXX>MJl+jV~@UbC8kB?@iQv;%&-E-F0hAYp1@goitl zKvdyn;2)3G3iffC#EpBGmU4P+$QZW4Ghv-_!Y6$!I<|uz^`g(D> z-D+BuuimFF3cQ8iMdf*0@L!~rob&U)*0}!A{czBy9es5w3~$sK^7#egncsSF9AZdH zuF=dSZn~<)KGN% zWQm85NRU@yWmNh1UOoDJ*gEKP^;r?@lm-rVpJbpFamS{yYfHXVTGSbEMZMl=?ieygjW)Y`-nv_AV+ zPdPZ$E~%Z+Yg;`d78ngZ!LeI@eNheqKrA${S*u!7hz$Ju20rs@`g%j&(OG-2-@K0M z-tzSQ`sMt6S?B%85t=5usZh5kJ<^L!ScXILY^`_)+k*pDM>A{+Hr@GIP&1<=Vt)HU zK}luqt}((P^bbR~`Hdd|b0;)<69@+z7}c+tglYloSE*%~r%CtNMTmBpdYPffH!+2L zI~~27UKBYjl`|Otd6creZe6i( zoK=$`VSA}OjogC_w!BS8>~}t7sAN_(8kWH|=`2z5a*-|Gvlm`B;?*gjeGiCy`=|fRMx>^_}Y-3HfIy+>yFm63O!; zG*UJa<_!;Y-@}|U-b!c@^wNLhVrpHd4&#o#w8oy`6?z%pD%h#-KK@?YX4WIQ@w)p8 ze&}$#ajowHK@R0Q{|xi)PM$dq{(f<_(P=Tj9|(1^iCJ?sqCpuUU6Q@0Nl3FqEmcbS z`Ilp(JRlaZ{2JP9pfF$3T$exdC*Xb4WUNts@98gI$obpz3n*ld&;e0|F9(e-&tF>r3lkTbJ*(!pgh??2_(_bdCLju`hWq6!a_v z(~BZ-`fw93(*T%na8Ey4G=Zk=@HCxwy!xr9sJ`(I3my#J4!-XBKI}CtLS*KeH_V^l z7kmFHfqaaglTd-+a#QK&tVAD{K^9U5^PtP)6q>ud@u1#b>uZC!o!c!nMM?@*f;`xe z%+fC_!8`hfvz1~m+1u%895#wrVlq|Y!g+s40=eA&z{&+*!=u;CqgM!s3 zUtVnNKh)3opR4rG7b?1y%^yx+g$CCrMtgB|_OhRn+G0l%GzdQMyDR7`P>ws+4Y0{k zxMk!QD~*|mkni-OeW2SE_pr%40=gCT$A&$!M9XC6!z6qz-s0*Fxq609Dq&AIfoH`DDd2Qu_No1CnmrBWv8v;5$ zv-oP@Ba!`!+q!+d)>re*1QOR=7VvZT{JPRa;89tc`4hYmHs8e3ucmRpXAFF+)d-#-ixf8_L`sq#c_74!Kz*{}Z3rjDd<9!;>@XGJ&&<9%q3C zvA<4=e_VI6ca7~8=?-(g^G}KHz0y?%DcUgKnMjS7D1&HY+1w|{D1%SGDzSy=uN$2< zC)fYBm=vnxm~A4#a@>Q|TG3V`dFIHqW`$J^}2U&_w$O(@`uJKl;RHc2p2@m-f-f6t*o3)q}u6v@ReJavz zsCxb#WtI1=VJ;5X;~)urMWybXE_Pp&Wo#eekE;qd$Y6W#YFf4)4J6kVG+jO7n1f24 z1npkHs65J<`eN5SGYAk<3k7Er? zwvbY&7S=)9{V-~%V6FoWnXSKzy0d9l?|;n_SyJ;%HH>cbDyB%cB=ohF1fsh_-rMTT z;dBA{d-;k!5;<9@)v3?Jo&G@jedE;wj@Iw50&$;e{!N|wtoFy)>MWqHRw__FU#me1 z;ms#e*yDeB>$7NImEBX!JobDQRmIxc2Xp~JPjQiT&ULwyr7t5>a8}i!DVm~ zX-J6B@(#w$02gOV${(F~9F?oDtZFrvsFp|Of)dpiMZyXidDO-;KjXaWlMOARE8Xlqd7$r(o?T}2&5>Na znJH+MC!U4k)c<07Z^2Hs(GX;G#W0PcyMA>M-61J?qfx8c{Z@{}f+5PZcaf#dCaC=V zos4HxX6^WXqaksJumsys>xq|T>#nmujA@p)_xS7SdA}YTt4T%^g^V{E{ChVA z#sq(XUhb)rnb`W2tLD5=pjoE!z3bDv{KOGuvcJ?y&YQ{19I1I3Bz^fH53^}Py)UZF zHX##a*!edCzR#S%V5y#-x4iQ&DXtEJq-JfOqJdUQ)lFjs)496LZklt0!VhSd(A0Ja z`v83vk5+O^Vnb}fU3UK6ijHT>=9CsOZY^eoUnnyi)U(ls74xs1AZDwhlSR$|nP6Nu z@EP6(#qX6s7~c$85`U%*#4Wr)a--@aLItuRz=6Gs&277~x0CCAk|I?u#5u-8G_w(v zed4SYfLv$%yr&sp)8oWTSWAi|Ow56D9_yuNOTBoXO#OKpe5{N$5&EIysqG53U-2} z!svk0$C~>eI6-{=c6DW6l>+-LtX3pC?$&MzEF@7|9V&CF>rKjA4=^Ox{tnwX4fH7v z*%Or%5DFYkpf4x0?5y>Z-)c@pQHgb5&Ud>)f43hrk|SGRRbMZ2v}Wzpu^XMk`&=by zuq61)%EL3AQjTYrwVw`d2|Tmb?*>JgR|E})-Od<7<$Vc?j{o3h$3>W3Jh22ddig9kR#esL}a0L}92h|6mR4*J06<=b&Ulvu@NEABYKL zjiSa-pfrnzP!kFk5%F$(XJfVG1|l>j^T;LitealnuY_S;wBxWzOxX1G$7g)q9#fWg zp7n?dY)G&M>n3ou6xyv7u*D5BuqDpqxVnMMaIw*IC=krZ`(3U8X^lGM+|lxKRj+N| zz-3+wbU_D;MpLwCkN;@pM6?dMM_D9u7P7n#2_9_|T<$+*ea64az9n)hH6p5bs{_$i zYfUP_|99SmWzt}qkJ2w-Tq#0?Jgf=+Q2E!qwm5);|K4aG_|WEG#UKcx5iXnu~DIGEIH+2)`)@7i(j z2J$&cXzMdl$gc|X%$-wh`UZtE8MZU-Jl5C*O8dl#O&Z!vkzgrGoT7~~yY!|Dmak9_ zm9_0Q4|t!Is6FXyJ*MFOtD$m>l`h|I2K+WcMcgho`|?pI?$BKN7g`J+9ULPX?Kjn=dLJOujRpe3I}o=<^a zi@`>ftdIGmVx(BI%9OR0LhernKB_K~+$vz9UVgzB_t}a!DCk$RGN{t$v-6ad`C{}o zT6yQ!6}suPgYuI_gR&jk9O~BJQ}@1>Haq+mSc>f!L#sfu zvwQHyo0yBNg36t)0&G25XZXDD2MKRrfQQ8O6)#$*0&1*n5$^Vqi@)CsecjWB<0WV6 z@oZ=8pD5-HKW+wQ?rv5PdF)MleAJ3%!NMEpYA|0PD__5?)nz$4zHv%-@2lPosM^q_vhMg7QCu*-FMieQo+eW#KO1JqinZ&8_!J5=9pQ(bK^WUtZ}o(0n60Ab z65LZX#)B{}LJ9ER1)OwRN@&8Bnv7I$L6Z8*fH!5r=?#abevqK1*6kbj=Isd$yAlT! zbuBO>Gl#n0ss(QV(kj!%q0%&k&ygE{xpfpFVxY8?zqqt{$fVHPAQB;jzjMZQwU2Fx zW5TL$B!6ID@m(8U3cI?iYDgu6Tcm<>dQk)0Z%`ZGA$0XjT)b*sE&MDxePNR?b!PY{ zD*ESi{Qb$eH$S6fyN)Z_mY`~rhJvXxfAdbFdN7>ccp$vpXP$#skGt8o=+G^);P)@f4|P35fh&%4Nc1N1Z1j)5sOeK!NhdT;0qQ*v}^FM*`# znh~h3RpxJDKlA{M$TY2B^aav)tk2(0Zd;aj(UI|l3C)+4>M3G8Q;D3vaz9sz!CsVF#!}wwl%E59}BDfPInEdu_w5kOnJ3 z)k|!{HX~8PtWU7ZS<(3S=--LoZ`?r>@anN%i=RY2*$MKzA2Qad<+T;w zNWnMA_}XBBBy%_hWOH0X%+ia|e0PE--TDim*fuz)TNv;pYE9RyDBDdNWgKM%HYLuv zPsht%7&gGtlMOjIb~ZvJ=i;NCrml46@;~lWi@uF=Zn;E&i5&lQwmyH4r;WHn-|~cn zrw6rgDl!cLAc;!>&y}K;!53pU!lLbfqy+4m{&J_v@Y&KZJgNxdH* z1bL#W_ODXL$(uly7=HNSGtKJ6_4?#ibKC0Hc_-~}r+F;qs-_4SHYto2IiThSAZMWv zuIbcBn;%&UK7FOPHtO?OyH2tZXWLKLlhzMa<0dO-_1ryL0R^*R3-9U1{&~Aa18C2c zEbnqsXCaY$@5Ic3nYi1$f2-Mn;g_~%vbX5}-0FY6NLD9uKG8^Xc{Mg5`M{eto`?px z>0>9K1vjOx+`Ik6!4}r#yVRbgjo=`-8w3PXA0GhopYnUQGLWqU(kh*R%^ z{7rH1o=z)rB%`kO>8sVo$YXJskLxv%ow>`DT5>*oGg=p&sk&czy*rXPU&VqGwe>E2 zntDQJKX~%5Z3cKhZ(iNp{RaSrr0=im0wA{DPu+N`GS2(WZ}ymw{8m6u;y&n<&UNMu zSoFz(F?1^XX@K$>gTl-Hpjl1c=kNuXL)CLMS~_H@1#>H*__JM}fJZ9j3Zq(vsmMK% zRYrU8Lyy2bi`x(R_n}rW)*^r-e?h|acu`hl9t1HDd zOS~wBecismkos0VJ;ZD6FnAjvV}`njU_P^MTuY1g|8tg zvH`8};ri?g-b=NYlYU*hxl)=S!waW6abMa`HH>Q?{wT1qNZxu|T zdHqqSVkM^aPfkyZaY|8$$j$v!d2OX1FX21ZQac5uO(I$EPR!0qA7s=KW4Pcv2_wbM zV;mG|?2R5QDnB3`(k4_!l5Am;kJeRWQh6L79bs}E6>k|&`-gl);KNg;MfqeuH8{^_ z487NYqjfGm$VR?-3CP>a(ypJ(i0I$V^4XZk0fxl?ee~%6e2sZ)NC**rd3t>X1EEfS z9ef6OR`MdK>7ia#8M63ZA)l>8A34^>)z}~VTKW!OZseadFjsgj^k(^6JOwaAQjtPO zJsLA{vcw@0?)alm7_Ulc(}ofVa$+~(d)bp;B(rwBelZ^zHY}D2`Ii7)wR7<0klX%% z6~$XiG~o3u`;>95#u4T2!#Ku7!rg2{4E1@JPu}$F{HH%}*wqN_=;fNL0_uff;+Zy? zwDVX_$6Yv1z6IPu#(Ca5xxVO_Q^Zz-qE!O$wcv1=o$?3nY!M}k?z`q9vYkHSaK?w@ zugs-Q9I=i$OQwQUY7lTWJQETwAbaoPexZQS0*m@TI3MOX<>=vz2wk-YX40@lVZFG*o zd&*-~1Q#o(Fu|k(V%3a%3ghvV%-AJo5xmd2-adQE-B;DmR6Vl5kXDg_{ERSx2Cd_x z_}eyrwpr7Nb}C9=Z`8?eit9s)J^*Np(E*IkrM-EyR{_F?oQ(+N#2WGncGkr!d1~9K z?+LFxY6j3#jz1sKowXHASpEL>!f2Q9B%!{Aww7m&wfxSu`-88NxlHk<=Q1X`U0MDS zLRO|F*#!5D7?<~O?$1WWfk4Qf0ogIAmsI%O=9N}{KraRgbU&lM*B5)C454v?od5Wg zuk~s^5{N~>3Lw<0p~QFfG-FWjx4_TmozK7qTlm#ouPxj%QB+T7CHVHDQ%1u)%MRQK zEGRqtu8<^cEDVCML1!JJpEPs^i*-8`x6~m!0$b^qoLlJ!*^1xR2n4YlS5b2rs%#$<(UkarvdyM7 zsXK$%`h;=QcSJSl32bM?53r(OlFB!xJ<2bLi`A`d;v=&`UcrR}io<^me14NN{3*4a zs*U2$Ne7-m>SAcYYMi>!BB7bwJa;7GpFR)p9n+V!wtj<;?w|Z-NowG}btirk5tLxh zFZJXDahcT0g0}a?)8-wQsiuq)TN!|SI+~>8C2YkPC$ku>!N_^d1fc~^)H}x6Nt}8a zq=a9MO=iT}NT67BboYjamBY808lX|t1;-$rrpte4mn7&=C`!@6&7?A$=}#sE$;{>D_C4!Es2}IYzVz$M zjlRZ>bS6MymlhoSNr3G9AV`!s1HB8Z7e{LnE7qldIafc<)DV8M2fVy^uk7h{LzWia zR{B&fx3%0+SjJl!EGewQ)wl&kos1{=5H1k>mEVx$Ax``)!*7d;~W#^ryER6dJeqG8CI0@iff~+uZrG83C>P{1K zoa+zZBWstC&hm;cn3P{h=#z=mx}N49_5QIn>b;=AX(^N7)p4g5ZsgkjUXv=ABPxdr zc5c2!4i4>ag^Q0cY39w*+x6~$qhlY+PlDn|jREG!-1BWC3I{l;)}^xwWG%GvKK8G+ z{7KC-r^`hhnLdZoOkKp`It%Br?+9++!hXXua|ls)S-!ze;T!0fykxO|J5|Z)=aSlk z=u)*-3{AKR4PFP0Jw3n+Gu`qM{jfu=7p^OXF39BtYamcpul_(yIoGc4fj`&>E)PKEAA&XAv z%d0zKg(^NVhrzFSHnhN?gQk<~RR2tJJ=q~jo!=s*SD!vE3GX@QBgM6&>sl*e%8()J z4D#{a%~n{6i6huYo}i%I^R5QlYN65N_FXVJYGb!nf`!%9WJ8^L$n((Nle&<1+j0A` zvNM+YWV+^&>wdo;>ar<`!k@+AH`aa5=qITfW&} zrZ|7zVZUO%x&2qc#Hm_X1FP1$WkRDCJ47jNi=K{tO}*@Ve}o}~0W>BSm5m`1^fCOxE$v&I;f?GerqeOL)gg{k9i95W99N4`Jav z**muQBd2_y8X6;VCvF(Ps0+W|%?@J?QBbOoh_8C>f=NwZfw^-=2@NuJwnB!L)K3O_|AV%~{4#2<@M^*NG) z!-WMCG!|9XwvY=ry~qB!fp=)r?L}1luuSc-28^${nis@O(HMboj<$k=fWydKikNGw zo>t)|L^b&OQV2rqnGpjlPc#FFg|BMIN;Y%wj}ecrz?z`NR*ETAS5<9OBeQB9g(5eq{8HMQP?$J70rZ5Sm(f6!4z#4!Z^bScOAA8jy zH*=^9BqK%jInh-DIQa(^OXCL6P`59DcyzC_u@L&S(TSW;qn@U=+b7{dUt^d%3aU_s z1d$Ssom1_(7=K%N%z#V<``8oGQp87?J|AKJbB~dFeA{E&q;EE)&N`i7K5inH1oYndgxIV2Bu}JimNd@i;mX- zTIYeUli6;^=$`Y|YJ3@LVNIFbCb4og*WXi?wypU6PI9gKdaYi>bHsh4f z>pC0ZQ1&n$z|_$zW+<>!S@;p~Nf487E_CVdC~@PMO=VZS;&Z0-kFI8pB#CF)3B+jf zzpD@Nw6&9Zu`v}t;ULgk_aXT)j#|^;2SD5b>dub?5cBjOUo!us3$!dbrA%{bF8=5) ztP@Bw?P>3~BaV;sRqgg_6?J+qtC6k`4}~eg0(z3hB+SIW zxHL`DQ32Gr|9iw~OQZFhX^V%xO4<0n^o|W<+;R+kMrr!3!&_T@M&n%{5npbT&2^0M zh1O0e7|kuXb721a2khF@^6IErn0N{Is`{XBIZ9g8LYC!fS4NKQq1?0|xU+E(E9cY* z1n|i`>@+!BKG*JaU%f{8igo=ILX9CY$sGe8PtVw0Um9jeA!l=J2DCp$Rk~t$r~Z?i zQkpMa>0-a{0Xz-yvn?5j)Quk4H5%53A$lg(xZj3a1>6s@)6+VD2lHlKbO(gK)G}nw z8n$o}+IY1t{`&!i>-wyS2-?lSLV|t#4-~dKk9jexvA>7vGtW-;_}zz^<^xOOo{x4- zN~e@TTaM}Y-msXVPd9EoEdk$4v-Tr|o|y?|oOY9%huZJ9D@87UC(}2A=(@D*FY0cGW?zHgw9hy%-fTSXay6_k`QvqS>V=^t(WmDhBrhWL)@ziy4rqM^=S0ctQ z8sPQUKD1FC_1+5uwQ9thXy}kIkS+ryYEJB*vW|B>3wzE?F`jHt@<_MdmxtnG^yrVB``!y0dOBpb|^?;krK-+&NY}GtE6cs$kOgRYqdG! zT@equ`1DzkJ$MWC@qq6`Pi|48Dp|HAtR2F!$_^XJdjvIrxx4XXDS%2$zg60_IL6NN z;z1bG%1&WBT)4BP8+Rsg(L>yl>kR!NolOysFK%&ZLAE=)f>S^<1j|UYz+ImiR%B{< z&=Pp^Hbz%vavzwm`F{^eAFX$uMylmkt#TKuk$S0Zu{jsLuO9U?6fVuY!sShW$o))X z@#MKn+XC6pyICT>Rg!>JE)s(fL!&;g6NIGJPIeQ^Y&eLIvhccEIRlL7G08(J>_4?$ zQyukL;^#!b!H$vd>2VEok|OH}3V4gLl|=&zVd+<3bltH;z=IG`5y8)Dtre7~b=e=U z`C!)M&U2X}0Vc;ks)+FB?OUKztp+H;$qfPditpVt+dxe_Q9nzWep1(tGt0@dioE zXZh;(3CVk$#C`nr59cztIyVNlTX5ZzQNo>*ih2CL&r+QbQ&r)B{QP+0nswrB)HHEU zuIFVD=XD@3L-)XZZMgVQect8gqi`yFS-L3*Gt*_=7BC$3$5q;D^XGkM4FFjLTU*5@ z?cHwJaLhG5DZpR%vye6QsL?9)brTT)YJHFJFqh*(7@93NnR z`IgxF*fzYK3OA;){<^3i7=j^yK`hNSCqvNT97l|KY4vGOYQb79yzBS`Cin2^adhMWl_%$5imV16e-NDV zg%lPI?xPXbjFbVX$104vo~IrfcxVyN+Z$eDM;-8+6MSebdX_KV z$-i#)DOFRoF5*f8U<(bOs0>_LJMZq~kq$cCm4&tmCM{R?yzoI5&h*mn#F$O$aZ zX@;pAN)JakO@`X164L(WG1eI=SD)`LeF<%pBey+4%@rBA7NpGO1l|b};#qC!i2X^w z;?Bgz*yXIP3gP|Z+5YcK@OgxDo35}|BZKaQbMt;Svy$wEw|6|hudjq$eLOvtxCs_% zXiJMDsQl3b^mlH8L6ma+%PJHDUFm1|0@Ki!<}`R79isb)Z&n8(0LDqxyXW&o03 zH$v|ilOah*;=eIpLnxeEyR{3^*#h` zi0~`l-v4@4nR8DJ*D|=;2IRaD{CfT~k}DET>#{iPHssJX$inG8k$5-i3(#cS+6Rk` zFOZph)KQ8xEQvHh(d8e~I8 zZTcbrh;V-i9PtCYZ{w)PT8gCdTLsXDy7BHb)q2bjF0jso5XMU)6qpYX9u(G(-I?6n>uo35xOG)54b`~bX9-|DvOJSR7Hfg-yCw~bw5ff!1AQVGI4mG6|?(qKS>i*Y@ z1cJ3`FuN&uIFmKcw^kCfOCEAv!l=Herk!QYe#SgR-}iE2ZR2B^)xz zwIbP_U8CnWy7PwfsxP{CMf!7~J(rCTd@G;H4<(=zR}tneF1=j9tejhA@^uAygGAT? zmA_~`lI=b?u`~x33>s=J?Bj_HnEdB<-Z%S9p3qOCPbNAoypfHiH?=9CM;!>3ctd6nN={D9 zI$^~lXV>!dWU>BmzD~v-%WW`>!D}2ufycZ^7j&!o?4#ftf%NG@9TD8P@d1uX<&+$(W?O?kDwaxlJhy)i)F8cZ20r4r*AkoIPU3q zM^ob=hnVbpIREmyI3Qf5xHlg7JDVie7$ONQTQ4K1<9~Tq0N!oT#kVk^d@9j@4yulN zU3TpePvyp-+)~_^VlxWZWFq|?KS&NWt`+Kj$q<8*(~}{kLD>0} z%UT-(9E8v^r4YagMvVl<4QQ3eoGAFnDf&Lsn8!qaSTQhw7C5JNz3szzGN- za6*;BPJ}Jz+;nnJ3nx<*jZV3XN?R4oz=(3m;#zHlQYUXa<(snEOl5lwozut4mIho6 z3GY&!g&i+whi!ZNp`x!Q2CRb&)du)2FE>eUKp>6>yw~!%m5SS<=*ZW$P0Z7^dJi@t zZrp`cPEI}NVJz(CQ+$JCl|yMJLK~WOHV1CnDs1G+HOFPs1PM7-10j_t%gRVTvyV2U zc~wr;{az~+H%2siPT{X!esbkb-OtSM9Y`uEFl&{3n8V2wlSZl{YKKh0mo6B;UkdK! z&MthBFtz)p8&*XCCcwp2I6dlmND|u5o(-qDk6!pwIRI`0de*VmYl}blyV+R=H#p4_ zhGg?Nr-!Wm1lqS0YjV+!-3%4wb^#()bs{5-VY-a&Bs!2En$P=6zRT8=`53BpAy>u> zC%!p_=THwS6u42Svk6M-mb()|3q>M(=&&MDD?(}P_QAeYmwrTR|POwR?^LnE(nYWj{M6o(` zWP8^8;t5h=wOeDaK_`pfoxV*1*k-Q2#J^Zw#E&<4Hn?=uoF+T;+PwHTcy8|WH&#IJ zRz<1k#W@bufpiIqdp2#7;|01@2)%mn#^(N<%ITw&r$h=di1w7D4||9dvX~hSD>y^$ zFx8XWJDiU{U{-l@Gel87hmR33yuTG29H5&>#4NMFjGWons2B@hPy#4-MKCwZ)l=j< zUJ=HK3?IF3kfW5!0$i;^_-_MyNbr)#mpL}$Uiw@W9kZsGC$x_q0TiQWLlsoby$Q~o z-t*90r{2E2nD<8HvYr$px*yM; zC#0cmiNruLA5ay1JvZ{STW-X`U`GbD)sS>JP;FIqW1>Dg?9WJBZ_q0Bqh;=kOpzMj zMtSdLn?V{A)v<54_yD*ttJ=SCVS16RJdCtR-BQ~8 zY%S7P^D9aOI~5aIbRcri*nSGnQE{U+V9c|Ku>O8mP}zjcF{W!7;e-+a-)m2M%`(Wk z2b5Vi(#Xhjp=32$LhI!>Agf*B^t4whYd#_9W(U0{g=C;g%Ht()gg>PY{1sp-vI99w zxB(+=U^ijF``v|C04Yfh-L>DD($07H-nVLF(y0~V!WFi12O;*Sd^oPdzYivJ`s40W zgccT3;W<1`d%@E4vK+%CPF|B)#T@LWQ8s>kLeuGpQN3A=0G8l#IoW3sF7 zl4Bvvn}`dRoI770V8w0uR>rg>*kSewMMM+z#TKD_sBePliGe7LaK807{1ov88F3c| z&@aTUKq1u>_&1vD(AzCoyHCV%BEog)7wJ9_hjgDQc`JqaeG7q=J`#3KKfCXLy6W)u z{cJ`}aN!cVI*tA-Zrkn-E>t zRRpTK4K}~h1Wb4AN%NxIMz29S4vJUH)H<%{^Y-yhIx)V?xbHn4U~=Bwdn&|6cGV8s!j4;!pE(i1hmjS@qMvW z8U`DXk^HQpo4Pv#-n+^B=x_&#JD~)b8a20aqvMpbE+wm`mP~eY;!A-tn3c7IK~X3| zEdPgn2ak~gKz<|xFGgp429;pCk*ZJL`_c$PiamSa3+z6iY0xzz&}7IT&)j_n72QSq zK}9xYtZ_${x}00+gaMeN+Yrs(InNG4)(^4H7sXb&4w9V0uF4!(;1Ea7RN33ArX>&x zW<0Y-ev|x))2irocdKf?4v?I&_OYVll>No!iHq@EN!|(>Wq?vVAHKIU8J=l;YwCgi z9+>e$nuBnJT%STl_CFI*|D2ec&poo=yu;?vi7e{5YJQGRA*gW=*EvlySI4GNE3z`> zPb$|-52?f%Ef=HJ%1&6zXgqsV>=&a?F-B^otLlE}mF@}8*)><+m-+K#cd>13x7|vh zju&5!?ro}Qo~+uf0)Op_NPK{S8F0?Nf~EC{d8#z$MGU#b599a>ZMWt2Y|ft6gT|-^ z)Q2B;f;5`rU7z2%_Z`KYV3Op#J^M1_h4H;dZh_yXX3cQ!SN8v6r-rr^>j{ZVh0+6~ zfBgd1Kx{cC7O|k*TIS{>r{5icqYZ_jnalX&&ns}SM>N?MX*(NuER9az&O0ChfYFjq za?r)gXwe?A0ZS^BRqw_ z9$!q^O-G_GUmTfWd7I0x2(TXrWM;xd6TTH@eY}}C`F{@;LmiiWMO~P=e%1C)N{#ZT znB3tZJ~fI%_S&@NgfAHC@lGWiYg@F_A`H&byNmrZ6Ts0c zTBkxLACdM-I$Is{@%-nqy=nVvGq)s2I~{H~96f%;GdLU=g9vSy>r$;~NI3|+l$;+v zT{lo^y+vKyR|f~RTz9E@?kedxj2Ov+R&rKSEJsZwbnbkp!ZJ`exRlH4*+soL9V$fQ*WU6HkPbIS1Pc3{MXbNj z71&0Yy9{tFyNT4bQ{l;vo0!UxpZp;s{LjfFulnAtmLL*Lgv@#;w?l#-qD67co)Rgz zNw>=rqiaK#!T#NFINEAZ+Ph?v;Z9IhrnPfa*51`Y7}J;kkdu3~Zdilb9%>8`86OTm z0oVxOi3u_qp8Of%o|lbZOkrZt(es{|We}fz*{qVdqvMX1g@Lr!*^hGVvzH7O2Qs^q zxy*MhuxL(>)DRe;X+Upw_r;~ZtjVjjKz{DBMn1QDr)}FG++cEI1UL(LpHM8Zi|n{5 zN(un>ow2rik2JO5`&y8^>wx8`&}qG7IUjSYbOcxKOYAYcqHu=7PCIb;^D-cfuhrlc z-Qy5;`e3FjsR3H@FuBJac#GF$_Cb-Yn>OPtkEuxcn;E>AxJmcl0D|N#%vF$V#n~d& zJ%Y===SN$6%Zv1M!}OmYV_B{h2a(i|QHbG-(aJhd{K4&!Dd^X%dfp5SWwgjn&x)0F zw*j~DXxT||#lXS0y+y9=Uk+*;W9`)U*Wy3(gv*xDO*;IWg!&d9kn<*gVU1ry1jT6D zBq=H6KFhxMp$mzpVXbuBPvyUuwHT8I*(Uat&^Z85m)khXi#M~lZjRL(k~Qs8(`=j+ zy|glDg@(Ku8ip8e&0dxN(kz$uK`#hk(2+-d_^@9Zl>?tQ~cv{W9qUopt*VT;S@d3jERV5 zd4KokT~l|J6{L0G?0qc?vW=fvIsJW^a+UH)$y=(0r@<88F(%+f>d9;H3g{K(R#}&5 zb20t$ntU`WvLm!5vj1zvPG!!%di^_q=O6Ud4O?0L{uUR6Dqer_>g$?&6E|A~1z2L+ z)qvMy&LK5lk*n3NoB_kol>8w2y>OJX5w<>d8;OH-gq#>L)JU${no}37etJD_V#rZn zy$6t|gwHbS8!|YS4$GUasEUN`b3Pd!hYgzig#WTx_#tIQ@CjjA|Y`4R4C4!j~Qv!5hK_pyG-hE-aZjN3uv^Y_kG7v3Hc zxXupRr&)r6pFv!vF-w_ru+CKEcV+T{f&!;?h6T%*78(`okgYiMEan*4N(rP+=-epq zabsMZsN&Z4b~^T-qiyUDP{F6pv%t2j{N&4%>7twL)IBDt1RCr-wQUc7$T{ew^-%rv zQkxhMJZUs@6Pl4uu>g=sH9gOIAn>$GzMcRkBn7%z(9%}k-EUdoeaIbW@j_mwl&&u> zS>Z~w$ks4r;20hl=`Mm(qGRTP!!G^As*eHBjPmzgHxnSos8q&VBITG`)t@yW&PtFQ zDgpQ>@o*C1!0lgPYSN%*LEtWQIK#aSKxF1L>U}_vjH-Qn^N=4NbFPVC=U%j zi-4W?+9B9>T>fqEy;b!NSmPLusZQdgVTV71o5S^~G}1I17--8Lu~TCeb3+a!tXVH| zt&whT#VO~t366{1oQVMk;@bkiGx+yG$$1mVr^)B^*awO|oGHB5LLU?M()`)tlKJWr z-3**XF71e%hh9@8M?QT%*|wXf7A)%ZU?XB)FFg%cNy@L@jSY!0lMHZsrHStS7(o0g z2Nbg_7s{9KJ?mUYwEIqmi5crkhW}E??u{&v6tIcWz4$>3Xl{W70X{;`v>?Hjz@R2~ z=*a4iVW;*exLQ>3*$Q7OzVrJiX2=;0P$t|Hjdi(R9y!D9EA$z2%{%I!9D#4um|mca zqNfqp8$Av(jN%Vvl|g8U8?fhtNG@mClWY>MQ7^b14kT&fF32HKbqeF~r=n;MQUd}< ztvn(LrT&Fz)%*ZF9;6>Ru{V#}$RCrCCm%W&P6FN)B}ToB=$RSMY>22B3aACE%>(~6 z%NtT_9G_I89L!If5nZBjC+HbDm~*Oo0{FhWhqoU?LxPsAuf*2JSpYU14T7D&NJgjO zgb~J@?l9+!4g9Pzm=!nN*vie|@ZW#+oMrsHH~$UP?th=hf38RN|D)@xqvBeYuVX-h zCINy=g1fr~2<{%71b53o@ZiCNySux~Ai-sD8#K6EVDR})?(cHndu!cq{^>JoB@J|) z?ylN!}sqNTwP%a>ZM3v(1ba|Efj+oxtOkRd6HKE6kTNKu@Bfl0PH#!ivxfjp1o@0JXbhB z`7b`q&}jndHf`}$k`EJQ>8+hbtN;}SRH4^Ny#FGwa}3o=f*Q@w7-=3cNreyNr8xrb z8c?%89)QAHm6kSc8Ado4Y^2wymZPpAWq7St;tWk0GZOxS;oj0WEmoSD5rywwA08*# z@pPkchhgpVz7r~Gy0R}^_Kd9=@pNq1*?i?`wj7^aJ>IZW#ln@rWdBQ%w)7+Im!C%t zv};F4|7OCB6ls-os^YerR42}fVbN{DZRsaBi1pg5l+nhiOs^t6R}`sa-QL{L?y>J3 zw0CF$WvwTnX}3Pv=!e5~N)I#Y|3PT;=~0@Cv7TyOkE46ONKz=87(5KHJwI_XnsdIV zxbjnW%@96Vd<#TY!6!p+D%FW7Hg_?9^8QLVc_uaowKs^6)jym1LYXJ`O3Jp`a^b7_ zX~o;f$>~HYY!>DcFHEuGUqB%l5Gn1fBc!OU}?(<6MH=D8So{6DD*$(J$@M z>S}8`T>Ac&;IU75Ezq{c$>zdE;Yb)Y`<*XFd97-_AkE@d8?{ojYTh103jjlO&w>Zn zV(**kOTU!nUThz90R?!u_Fh5cyV3e3&xC6vy$Rkj_&pv$1CPR6r02qG%?a5=QV}xzWLhH^Hx2#a+u!H z(w5f6;-cFO!G5jt2I*Lp11pVEmK2Kwi|on|wyu7cynIK)y%~A!FRH)jIeOjivD$NyVYR?ILS2QuKvItS_a3#xcblWy2ii*|u_SlUDkv

QJd2^`&^ZAMUx8gRQYr3^72Bb zEC}5x@5A(jbd0`T#&}E#cp6-K>{(`vcz_9bvCKSRXUcABVO@~HAQZX*wY;5Kn=mp6 z2H4CR)<7#|Vz9VK#`JS2H}9o?6v(J0h3nVG4ra@4y~RFc{b?F73&QXn6=MlYL-X^P{Gvg|X4yq8f?deFUZ7Qk zj3r&y8AJ1nz)pIJ$%7*T1)+k?35?C#QCwd?#fTcP-0z6qw;U-r5BvMS$8RUh9D+DW zZYjP}bT|9<>IK7MnA|bTx2B$QbV8@~la1GLzbj)M1i(pK9lMtq_!D}2_XI%NVU-fW zGxRA6PCIH}P4a*06jT0*VGK2_7B>7sJVEIDdgSEqm>>iS5!&{xlh~+iX9q&j)v9A+ zov#MW;zlf?wzq~nTMFH6%kGDtIM4;NyWzj!RPZP|8K(U2vlXa&7y${WwPSu`3v>&f z!&}6`w)%G=mw0YY4d2^Hgx))04;+HWswQ8JzN}CAz%vn8#Au4Pr`tdhWNpea5&Ocd z=*P{#L>JR_8ls41N{XDYv`k{4zThY@ne7?SAgPhBiu8}sBV{A@c&;VbkP zy4J6~Rq;|Nm1v8r2tD191vhf{x)0WkvED7eH_+#DFK|9Ivvx=@sPr$0!;G;Js7P-12e+4JFE}oKq{lLk)Q%cbmwq=- zLJ@%GKkRYh{5p@ozW2~*N@coDm(#95w(+1{Xv`74+P!Vzc3?YRXqn&DxlpuhUlVGW)o3TqwhWgp zi%OWE~v6lHt)^^7evSmU4Pb330v3=CT==Sng-x149f@~^|y$aGfgG&cU z=P5i+Pu{sT3+J~5?)zo>)G*E4=YupZO7jA@I=2LEGo`&9O=beW)edfPytL1}Zoz}7 zXukyz#d=@ITi7_0Ex9Y&&kc>c40~pBv%f?fx@<8@7xA zTC76sC_~@Y0MzuZ>pf`QG%H_fhqr1GLh03Z^())L1BT+u6wZ$>+w-*Npti#+yR#b& zQZz=GtJr6-Rgcm3vh1eeWaSNHT9feax!(u(q&H$)0w^{xs*7twY2ak^Q(OyJ2v$i9Bl^^{+`c(WR#@MK^@>$(xHzQj=jkfO=vC}OF6L^aJj2^eJ1TM8CQ?%+}6U;=Jg^koGVd33Q0?-)`gFYV1{rW z@zzZgaIH+$hNw4|ncs+5U9s(wA0qNVq8$%8uAt5`!-+l!p(8@Zi`ASn4WbLdHt-^m zY6~;x*k>u5^XbZY{bsP?eB$qksbCg`J*m9ybdT4x)mB8N9%V)Zl6Onh*jG!_4Uvtz zj;p1N8XngBMtNmN%>QkUhi3y7w$yVeoVV#sX%Wlx-ZCyf1=CZ{<}CL>Byu1h)73CM z*EG=KAUMA#a;x@s0uDO%A%TTDf*`KRHlw3<1YwM1r=v}S$3dVfM`(aPJ0)v?$TrVb zt|Ni{x^r$$ zZ6gf(RisQA4|`oN?{qTV*^ckK7jDcLiX7M!pz^5l<2tRiN1qr=wMLR#e}g9>{yEq# zO`iMHG?Y#}efG(zi^OE^AK`WSHQQfb{~rmNiaG!MjgVFb!wzVMxPMzK6F zNaQ;EcHYK?i>skREw&?kSIi&SnDB(FX#XfB_$iZ!v7^4HNcHli-@+P_kKN1O$a=U| z>EhX|+U4UV?+ zU1R8bwNGj)U1pd3RKLr%C~ErkMC+nB<_?->F1o5C?!yZ|EhczyS-V*W0I=`Bd`R_D z*O08@+Ebg--j@BEJ28*si=o-f0wwT+o~LZbmrs-WZK2 zPWN8R_vD)_i^pqC+bN*2mhCVE(4^QD&IKRGq;Zb-| zUVW7%fI5GFF#_Ii9Fhbj*9=eWHXlk4thn)vqz}fh?5wjxS_V=Ku;6`S!!(zs(86t% zhrjy2*Y!OOC@hRrDw)DAzl=p9&jK~=6}C0i>dwtuQGE9i=+Vy+=S^>jI#->XNTr;c zZWZTcec%3bz}ZRF?qi#3w>7bDOzV7@Q28${tt^5sYBI2z`MT@$F1l?4cCo71fe{Hs zU9vg9jEBsaYNt!poZH=*F1c90RlD83ziJP%$DJ7_Hogds=}pHyU0sL8E#~jegtiQ z;izAvg&Hj>My3=SjigU6%-l22)UYkzL@jhdwvz3^gk&W*04Zve?@F~@|Didd0$;Nk%{Wqwtmz7K2Z{LYzia!1HVQKUXKAcaYdUhod6|b}% zdE^%awG<{*{e5*(;)2PoT0U(x6tV*ePM*qj9e7ajN=_vJJsmS*v_wtvNR9GyGJgP# zKFcPjStLEC&Pv=AH_Y5BJcZbMz7i#NJ67?*rnLjA@~%t*Y3q}dhk2LR$ZLgkgJimL*kX{>2u+S!{dzHp{ZCank3VRZEI^@O{sPkdu zmF%v4xx0b?iA-9E(fP1gF#8B|;tQtcanc_^;FW@sj~O?ym=NhvcG5|9TGqq^eR`eF9s*vx0NAVb8m@jW>WBQ18jFa9A0?-s1Fe-5s7X+RXM^&o^*S(TkZ3V z)7od~YPvlvx}!(>xb?naUtb%V+_VKCqD3@6hiZ~&1O*pYmGatv&UTCF9(5%7_y)j) z3NEd6Tz-5&BOApj9UQoMUY@2{L_4=Fcp#Nm#tL=kO0?d5yq9gvv{=H%dl_XtrRqYa zN5nN4+w{U7bq5=}tQb!sBkr6l95jteSd1T-KB1NA66S!;o7tY(;%x~+k_gYT654jt zy1W$9Sc}bm#Lws!Zz<=jn*#*Z?|2ju?i_>m`xe~6?f0zRZ&jm_y%%z<(%LianyWdt zn=LLqA7sP-V-O4VuCr=^*l$w3&1s`y*b24Ufe!hd(+^SU2BqC21-e=lI`;V*Zbet<6W@X89{|$Qd`~^l1l)!GT^sN~= zXyq^}TXF{P2bQ{vq6CpJBF|Q$V~W)Tp2{L+V&B vt4oWFlnLA$@bRk4wo|R_n{` zApcTTfj-wMVYJ$3I9PiUzW^s?Ahud)3Cmk_@tn^6dm+lSbELbHUbHT3$e=y1icH@_^$oJ6>VX*n z7|fvzID0c1hT&ylBTOfBi}D8#@ZD0%PN^m@wHUkSfrZljb+)geV@1|i%i61ti51G) z{W!wp{x=)6tF!I0WsnI;@KJ(7-QG#1p1W@H)n-BXC3b6Z=#5Iry|R~XyhTAVRQGdE z)nDXI1Q_h7)2v@|tLD$PXNEar&qxl@0>RY(eVAXU2wwAkD=nzn3({+jYtM{C%v8WN zbhYY0b3wFbveRu1cgTmZK}Z)Pu?twgWjtGR&U4Z^ta`c;Z))fjs%8uwXociq(YjWP z>c0KdQ|RP5frT8c+A1a*$vV?7UDqD@b5C+O@v|{*{Sq42+7B+2%w6VUEwr*pMX-*> zA|;_?>O?Qc&Bn3{;Bm4ZtmLurvE^SdI9r|vdS$}3!V#z)4X4JDa?WE1#ypXq+5D8B z_h6Z4(!hIdR9sM>vNxA*k->d zNcW@ax-4t(jZhyCi2nW#Kmffq4Yf1{%g>!}FM9|;zYnr&dI3=TC~T*td=tBn*WIz$ zQQF=4nD;f8{vNey3H6#kOU6@d^2`gpp6 z3<>*LqHKLDQ3K|6Vdo_*4#Dg*>n(2+FPetN8-Jm!xXio8;uV_$Wj6kvIN{k}y1NsW zfHkCNTzYcCOd!-DJ&mtYsycHLjfolI?LkF*^=5dbYG-4%c`f>OSJBVir3lG{UXL&j zbGfNDDujONk`;j~YrGdNyDx9QR&9;gM6Q)YF~>Ct4)BRByIHs9@wh^t(8zj=iS9_y zuLxIG@JLR$E5G{bk#w_PK3Fq@n6Im?w0Ck;aP%K|l)ss<$? znM2p&_aVtGhly6E2!@#I(AS`?6rQ)$E!?*~?B%8#!L=Fyw9}ZEeAh!#?AWDy3e%s^ zm5t;bu4f`(I%+(7{`fP6sjYi?)?FjlnG})Gb3)pmG_NZ7FEV)kYkB{9WIpHJi)IvN z%#MIXF<OOjelRglDTSh0F+t)oJHBx=-VATnx*~KMn z{pIoonY03Mg|8Ae;kU~g8sD0pzO?$qlvti@zr6oSW7_YX2L5(*@nSDLsi4f@k;*}# z#)dGOCz>w=XHlXEU9qu1gt$n>ykW2*%rfIn5(*qN6APyBJH=VW1Wmmd$GXcf`JmnB zh$-r7hl9xsp`ZLb?TemzLnfJ9zpE&PliS@rf40R98P|(h9rT4y_~5h zoFA^ee^wQE_}0&+#-EY_8&eNXd5cmq5bRSrQ<5EPOoJ%Qowl z`tqS^qeENhDRjp>*UmrHb-%cytoFWKTw zRgWv8t8Tw_wI1J-q{)F{rsIh4d~LyZVFB?AMLL1mpE9o5wWu1O&8q~-MHjNSNFRz;KZY5l9S&*Bs| zzZ|*$dkr6+87*S@^V684&4CccDyK3!3u0h1=|89JElPuCyj|L`rEI8&5)o*~as0i( zy}5Gecuz{~<7`^XkRK!u=nUcB(*ywh!*wRrdAOIuC4i^>_r%dJxg01S$ zKgRDTT0(k$5W4kQ(e?K}_s&7{@6~13pBE-}SNBhZva#oz38;H8GVh*{YQd%< z+~u=5NThp~WEo}^@GlC)FP`)!vDK!)6V2O{=3%>NQ7^b+wUS3Dt)ipX8)$4VI8}{& zPmMm;j+cT974UKCV41FqbZx5@N+F}VOh)(-4 z?m;PshNWX${zzW1E$0DUUM~Ki*r*fl-MqzOgVsf~pM)p#+@~WVcgk(Y?-J?t^KA9xlsZ3>KTS4g?ow1G+iKVni zJKp7Dij=Y#DV&`4Q#n&)n>Ta>d6)Q_VE)VMXN5=sn`4a^PNlcAOvWjD zxqfTwadr!+8f7#6pr^^2{KY*X5pjBTnr#`4kozXB2zNs})|shYJ}(i=qkFSTf}JAt zCc5!}8!qZ;>Y7y9e1UnM&$^uE;fgk6cM~uq5e->4-8vo>NTb&3Sy4imQWJa|fzIEw zg2f9qS&1xyXT83>~!`Y_O<Im4EPUjtZrG`hKsq-Te*%$7(@-_`iJ-Lj;8ctv)fO9#^#UVe5|;z25Mj%ek-%{u2|%z|Eav{uLK z8{5gxH(`~h4_rjrBPn33*?*7CnHTA_8bHI$RVC(BzB^veGt*V~ID<4iS^g|WU?6tR zC!NkiYD{e0VrBq-pZ7Z*DYW6IxwaLrPcum?)=3b{Ae8^y){DSHJ0~>}1#XWb5 zBOga}XE47)_40iTKp5wA!*9J}w5l67;~VuUrKxQrIaA)KM!~-7*SOrIsgfTX#w?n? z78b#TE;^Q2jg#Mz)<=1G4Dj{_9;gv+^FE=%YKmi!xKsa$o2`CC8Mg$Z0~74-CUgkK zw`YEqeugf0vG8APYsK6U1hR8*)Z(_np?`)+`#>8!dbIm_KAGH(>VW5(HR8^R#F|5o z>MLUPP<8S|-nQd(Poylh9ko5Llmj^^9TVL)WQm(P8a!*8qI zan>a^d+LQa$7NdzTkl31H+3n);BqVQudWhO9H|^f!2GbE!T`4pt38ePqM7-<^VY%X zxR)PV+kP@VfMrXYeFz6Fv-1Z1$Q&-K$C!D=sL|`3{*JZne#CQ6Ay9v)Nr@8@T3pjF zvX_^p&0kPqH%s-O9sQrD1avZ?{KVJKs@hn8we)6Bvm{N=qaM)KJg{Coz7C4@1jd6_H zpgkbSTtc6*%sd=JU!2_3;U?Sa6vd8Vu*GAaZ<|3w_!8b)as`10EMzlRk7PA z=zc%ZhquH_dPsR1jD2s|-aQ=mf}g;xg7`m1_|H>8&v%GZUf5W58|QpWF9oP2zU4gn zc>nUcXn*0&bCE&=oi|4V$+Z_3Bs150%ccGb_rrh1U1f|cDhe@Hd_m{)|)mOdWg#@!Xi6y-k z=_CL?$}2{juH}=N_tuIw>_}>Q?>;%Z?Dnw6Wfw%)Y!JUwGv$8jX$>#+Vs)KWd^H^* zFk=mfVV1WXVLLBAQN8jH50UE76RS^FyZ6jIzW>3wTr!%?e^wwP`+T-$H+6a1zD!*s z!&=kjrmQW82^0ylUT`yxF=u~`S%CZD4gHS|0DnG2&Dv1&8Khp}?&A~X zZV)p!Q+1oxY73Q_pS|jS-J+HU1Y)*92BvN-+aHms`CSc{yuT2L#fI>f-8NcF9jTw` zG}}|mlp2Ujt}$6bSf*AGL5wMd`44>7w+}RA8c*6|ip@zEmAy&va9H<{ned`ad3u8I z%mhCn0IT0TDQ1~1IsV<%S8WSTyP_J!{HSPuWhT}JTEd*?)6K4ifSwyyT(3K{Qm#o` zQvdnJO84=W0B_n?av8wt(ZwXX2Ccm<>Tlc_3$AxS{dJv!YEN#3h18GLuJB(2LGE z$^lI=|k^8BOKyJpXj?-h(~e?c*NIo!s$F zPdPUsi<74m3KKX%m?e`=Y8RQCD0gHay-r@KdK#8^D1}E*L zGxn?aM{nQ`xx0c9vlh};Zfw>a|C;k*z?ubVDJz$9R1sX)nV#h%V?HQ-#cd?b-`skA zn6ADx56w+DiQm5v(7K=VBH#K+mvpmB(pSQ`kzhFRbSj*We+@3z@0O%Hq68u zl1a}8CIeDjoMdO+O?IqW{U6|E)Wp+bp+`UHZAWE4H6nR3y-ZNn(9Ay7VL#G!tL1cS z-_r8orA5~t;1S~E(?4*y87!{|7cV>NLwk&k#;Hr|M<r3&F3wx?CAaD_76xwn(Q{=d~XY720LRmmFTKT$FkQR1`**aGed{>s_;e5daFvxXL zw!dOBGH%5}{5ZreLV9E(Ht1xg4!Y><2xZW;i;5J8QX)=TPd3aq>hLfF2P8j2Y@Vx@ z5q!E?cKc<@!F~nQ?%Nn+a@2inHj7%K{m#lFX`bc3H`v2-UwIXa)Gv}Uj=kb4$1ZO+ z72_Ox)hut$S9{uIfKG+)1g4Kkz{$=%T&s1x2c|DA@7Ltd-ekily88XJdvJ^UF9sJf z%kK}6g>RfZZ;nIm4rUKbONh2@hIUW?qvG5Co4_LGbB$znum^CyfdO{Xvn-1RvGdU4 zM9{N3)z*}RsUqXig9enFyT@J8uSH{Yx_Ji87L}N=sA5X{-d}0b+EC@`jtMH;ONNst z((aW?UTVn4B3=E)n(yH`j7pgO_6yIL;Q5oP4$q@a{sa2?rK+OBqC3)($u=$6%Ka3g zB0D;NVUITzk{;V&h(+7+Qx1Jko#LZANX^AzI=%~!iLugI`X!D9p=;UC{54y_AYxO1 z$`z+xUCHW1?3^t^CZz2;JM}0&6By<{b{Td*Q!wK7_5Lz#ZFbNqlwIq5yK&3461K2D zu#ZWBGfQVV#;UCiE!#`uPpaS-GE;cE%G)n2>>t*!SOw7Y4DDDiBvO8Qs?Wf}?5Uee z*^;3~T^2xX_^M;@57sTUTGd`^J_{Xh*=y(;d;i*o36}GHialFx;^xZx2kZU!_+zO2 zT6na>RDJk*;3o;kun`^gr}1MkZJ>y#1d-VE@*5YYMh-GBi1DrtNT#s;E!0dpOg;RC zn&i810Etd}@?G1#BgOw!Z} zcv}j0Ju89`I(@vD68b)TuPOlWxN(EqZ`WkyD;b!KjOL^i=jUU?IV)0P=JX+t zK5nU*==&IndtY1rDGcHjUkjp1W|}7>egroXgk&E&NcHG7#2?bV`S% z`2tS1j&W_JP)`tIagKbmyD%Ws&*)j*+Guf-bn2xzWo(wWboP?GH=aS52<86@H}j_M zs8NHxn!;$Z*-yCsZBK)D%M5RXXPf~A?<|Jnw5-+|11H^gy$My`$ZEYf0B?3Cc5VzX zET~%!EA%0M=s#2-fOrbASW%Z;t>3wzU@K9^Q0@7*oTaD+%ZzMn3Lbt4ubqyT72Ob# z5--676fLLUW!>bK+KQ22C#)V8^R!qzGla;rG}>GUZ#04ZVy>}agB{Jqu8ilp^#im( zuXA;g_8zYKwVw2mS(0Y&Fx%8hIgTax-^x3-n{b^bkgT$pf3^Yuo1ZK;r>b&a&P$2h zW1QGZyol{VF;h+fX;Y9)9+g|RsC7;wZ5Ki9drd1DpaKsr88j(cQx_GSa32XUpMwv` z)?X53x#BA3*n(LU2$|jwl9>!}ENTl;6Rx&IV=*xy|4};(R+50?DSztZ?TE3?9X_?pj@&x8$d$ZTy8y9h6xmosq; z0=iX6y*R9e3ZuI)C-an1b{aPHS}p+BW;u(P^Q1TwMur)3>eSf}R(cJ4o+(&-JT4H% z&tBU9sKj#3CUxDK<*>;hT6T_CDf?oOS!U}me&cNJsJ~{WnF$}b3zhx|f7kUrJzPn2 zvp#l@Akm_W_dB)}zO7-L$YHIidZW`{#(X_+4wcPxyh#}xZf)(_BIK|sQJzv$NmrY* zdcjTmGYo}L8a%gLLp5sqZ`jv!U~ zZ@m*!ck60wq2y2#i$(EU)4^bE{U?DvJ#{8z-lC!^sJG((BWrbNF~!c z;w_PxXzEj*Xd7TYLr-i0?P1R%A|S#>T5pxMVrNH`5t9!~B2?4v-9OjQea{ZBFQ!S( z@v){|?%@FW`^^R4_z)h<3tVTtxnI|(n2A2Jyx@aNS=1SA)otihgxw}*N^UE3uNu_) zO>7{G;k&#|N)o>EP>4Es(~tN3-ki|2?#s##x68(LW2vZ=jthgkj?v6#t8Fz`lw^i=NfbON{uKqqqUGQ>dL*G@cEpg+33WdRlS zlg;yOt@YX~SY)O{SS3r|4$+JR&l@WDn!5MUVcNoA2{Wy_k#BjWeGK;1XAPa<|7NH6 zwHCyKpH-e6PJTp6{(R?1&CeM}&O^}?Bk3{1vEjWYE^0CIvn7(Ol$xox)I_k)$aM4d z(s!@962=*ghkVTUB{cY&_>Yi_7+&UVi%spLB>E%KG11x1xjhvy)iL4%`SGf~8pUL+ zJK9RlCz;U`!6_TmizuriQeTuW!e>168^n08uwi#oW7AwMkrSNOznGuXTyy`q?+I@roh3j*clU=o?%!C(Q zt$R4=yGU#4?t&M&Z*T4R>gu$?PGn!r)=$-Qw}~5a)%jj^@69ok5+lf)!H{#{ z1H>4zyTeeagM@#0w}=XaD(XS7<#kZ4{^36w_r6P~t2o%dC zUERn3Ib;qkfslNb&K6#)<#iUguJ3BhEEoBB^-P{OPuNRtDP*%}?m52XL}A-P-to5^ z@x+ba0{!+ATgpI)uZAwG#~w;kr&Z#r(MN}JUYLDYgj!9jC768dC|P(`xT{qiXoyriYr_8^YMG#NTev={HJDK?0OPF3KhORF`#Uw^L}CJK?< zv$P;|$&AYC#zWnZp|LAG?_ZwQj&K((?FCZ+oZzwRO?bWKpRf~l>76j=)&))`)JI#p zF7#xU3<@)$NVGqsrU(-t9s1x&GE>ER*=7PhH;s*J|MIkYT$H0}LL6$1x4QdfLGe=E z$7idPa;+hSaqA)>PBBAjhj>N~%{#i*uQZNDx<0{0a1IStjk)Xmf{->3=8@G7`xy!D z{U6Kz{pDN#caJo1Q(y>}*h|T=N@qkP{@!^#f=^2-Ig^glfQdn8i0ERQ2PS|H)4;5*PainpdE2IcPht@AmvgZJzD-~;?qH4uAaL`;YPjCW)x_OQ3R`&*0GL_; zqT|xmn`|vNm_pbay(l9cUyUtvLS<(I=j7|eF+@3qA16%RIZWSE)(!l`9OO}3`^0`K$2@h0aZG-A;$f~9awWhS6;@glr?5wh!3wX1_ z;z=}a^0+9yxD^~j%e2&r=a=JH5E6ddd_EAT7OxqWYqBylY~Hg8M)3QXyeOUp+IG7d zk)wU_e*eu#f=*(bKjbVpZIGh2nqIBGt0jbK!ZapLspXzW%%4jF7B<-<9hYC$C9!?W z*jD6r<2>NTb2V*Y&A=Uq+G%`XKw_JdXrj{7NX-k`Z4CRoY+QVs_FqoU?CD5qb4I!6 zPV&?8xjsX2lIy;}8pEw{`#1k!EGxfau1of|F>;e)cQBp=kwCJuT;lOnjFDqqMwuhVPkYPPd3CVWiuKmtg9{0l-}ufvwM>)dkj zH~bA?pvT~HE)icnP8v{oF52#X0l{Nq5Qg4jlG5=KjB(e8VYxMux&}Dpz{ddPuUHUG z*Nfgo0E+ee#6Fy4w3F)I5iJP7EDtOnJn*B;NpjchEL16!do(c`*acj#2CDOlVBAOdanG4nN%`CxEfl5C9dLh zJyxD3A^ag&o@Jf&-ea@rD!=^{sk4+{VPt3RwEif{1P2ii5?uENb_Qe(rd%CvZRk7X z^3E(qx?Lsf%dI|ALn@K`cEWpSko4P=+SE=IO}A82Q+e;it(uB3gjS9NSR{lZSsG@A)%+bc*RA=ki8xVXn$QgJt$v4`mn92G;Q5ibGF>BXI??~_qp&t z!|aFg^fh+65JTGD=d>OpLag@_*B1U~X&J8}n$UrsaOB!bKRM&Y04qn6pBo=c1!wE5 zBUzG|Uqh3I)2M%zl&T8pJ=D7)#rEPrVs?r}V@cs^@7y!HX9LI4Ef`!7c+odkWWRsw zpqIn=GMqi?owulq3sJwh8W&h=Ac3sm9p^SRDEav-En z3Ytu7IMmC#QE%=q0p!Sw*8VlDwoB`B9UKM1e1iRXk4C~aaqNER)PLV4290RfOR(2d zIR}HDP&y9VXeGCC}Ip(ok!PeJ3O- zrAnl||MKG{;m*eM@K8F9L*oFPO>$LRH=&0*D5r7t*Z-0-MRVZsdF;jJeH}jp-?TQK1>3a?b@Cae>Ao?C;7r;y{S8oMCSOx2LEH*0-X&=wZs!=bs%0H z9sS6>)4}F%6${YE2S%Y7S50VzW6GrvS3ZPvu$Xz{lfW#)%93))Y822v2g}PpS( z4v$$(KYF{ZBQ#Q>ZzfFxQqQw|wt<(9kEgghNVkJlX$_GH2ap43x-4aek)H-y&=_81 z4CE<(m%8_!kV|`%CL5Jn zkCTLyP~NB%vCdu@M>8h{d-)N^Su2y}W@b71f&NIM1|TvQ`Xh@6ZY$1K`pm~y!`Dgp zOhS>Bll-xK2;Q$ThYWD<8a|4x(j^4Wys&Y?mw=}hYk~$p$%Y4U`NqVKG^4~W4)vfS zI$uWK+tBTud#vr|of4Dvd`+9wU?(g!dp~AkKt8uUoU)jHvt>ZJrc>6LEbMlWTlI1x z1DV(C@tT`mkj;_Cwnxa>22eA;_(wie`RD4- zGv>WQq;igEW53O+CBRV#W4jg`yT_e>p|PNXsLZt`UwA&twL$DAHPWbZ%GfHsgw17f zCr2WQajmHvb+h|+?x1<&Xr2mBg;|w~cJ)cxOutZD-BIz?kgo%V03Gb|@!S>B>4kpr z#op3eucM8QTX9-Lcg?UP=|Z8E>90FnC+|%5FElX@6LA{s@gMoRn>4NsuVQ(tSZm(p6+oX*ZNfY6aAZB@0*3% z#cfF#Nw$%3W!BY9L67P8;p=IjW0x^8iL-@!4;QQDo>Om=*@z^_pZ^s%J16jKn$n_$ zBGfhUU#&h!(^lU7*|fuSTi=%0Fx+i`w90PdJsea*T9#wS1%r+0qAOopvqL&B((1Z# zuQ>tF_pY~mNmolawYP>~@}s61H20E|@wKG8fuli>^UJsilte^g(%b)1y(O0pBq+IdBQFia4-@kiL##D6Qk@rmjMC6U88 z3dw11&E3zM8RIynK7os>bzFd^iq)Nct64lvwmiD8fppX1jv}w6r&dqsw~5Ik&;A*< zBZceuQrJI)?cm(P1grOYe%7<@r@bI}lWlq8I$20jDM9Z;M@wU7TT{<@TLZ}XnI)GO zx}>hIq)Bqv0{td%lGiw05>`n%>z|G)a?jdQzvO`UMGV%meFJb|C zd4fjsoowgjU&k^3r ze(PF899##^%7XY-4L{wBj#m?vRE;^|O@a@V?*i#y<;s?v z9>1n2fd5PGco=glQ-(UPR_32@>zlH*ua6HA;=lbYWF=X|w@+?M`tg9Z>h&gkUmv4& zQ`bJbrj8y^?Tp3g)`aXKdF1*ViHTr|M^b&u6tYGbgwj{d+kDpb^UZk8=~)gw3N-it z>yY#%`UFq8S@2R^YsXa1Y%gh%CI(wtO1Qv6d^XYooP-cGnS#VyX}-Kej%%EVD~aW~ zNx}z$TFpjMKrnWCp-VPAf;3^28e%l9((gLtA~?FG&~M)u^ZJGS-AZP#(=S3Xv%3|h z8YhT^(P>KhX1@bIa#rc-=6KO=UT_YjshzGNM6TzRC&v!D*IA_$PzhW%@c#!5C=0W` zqKODeitQfT(yx!)Bc-r2Ke4K0d7r12SeaZz45Q2?wg%pk^kqgWr`dAkG*c%4oyOtA z1oHW92L0UrMu6HWh5uasq^s{fVG9$)hDD^ZM2{F~4$sZ7!(#s*U2h#0b@y%$qX>wU z3P_8J(jnc9f^>Ix$IzXU3K9<8NDU3r-7>_G(m8Z@=kR`f&f_`XbFSaZKYK1KLLwcCCFMy_&1bh{wWV%S#83#Ef~uZVm4YTot#orF|}8+}YKuNi(u` zgKP^vnHa^h;AFDTKO7*IT$4g9rH;~!d7h?dwQf+AAEd+quG$tLlW@zA2G9QC8$qJu zJtND;dqZTt<=tNdQHT_torJ~G)&HQb)|D4_%l-1OJfvP+pRqMjFOHxY(tVxzo;1cA zRnl~2FCnRyXH;FdU1J3og_d+#v7pD5&Y6A6UKhwn_^m$AWiC}St?{t{WmvqBLn6)i z!nHItH|QltBR`&ov;=Tp0w_X@{co)$uy%8F^U>6iZM-`hdp(7&fpSxVrfD|Lua2oH zC4K8$!8P~mERzIZEwR$IZL&VRHy%Uek2o!t#~Ta7=lh=kly2Go&T0NH6R`P2klTiD zZ;|(xnRS+^6z4YkFH7TI=b7I#6&#K4XV|DqWDJIQMB*C&q=Kz>4t8e7%^urQ`+2eI zTYC!|8*A6gE~M`Z(Eb-qT~hg_zb_q~lbVe{Z9A2u820M>)G4hM<)>{1em7}8uMTix zkMRMhmEE{vl?_QdgV|MpR+J9|qNzB}YuNxTxX9>bp(X*t`CoOLu5Q9AM;cS66? zs9h#|YQwEaU)_|+2Y=kuz;im|7761SjtAGLSR6~F986VWh83I7ICCv+I39<1MwsEF zeOy)gQ8K9&K73rFZ^wx@;8g@3l`JTFnSC-)n|cMhnG`V;i8h(BPQE!luwGiZ7tkNJ zz$sF#E0YprDgN=yQgkExJ!oW}mShYzRJAl5c~PKdU8t%^YPu*pQ&fZvm(YbS5!@*jLbBhEh%`zeLPCkBuQZQbp(4umpqn{uw^Iy>>tMD!b83!ANUn5}em zFXtWjz-FE?2SEvRqAkm)TWH=r`Zc9M9C+49l=e-}o!LQj>LS1|g5FZq7dbcz`^7jAfS4^OsOF}GH> zld%v&QgGseGFq_-0R#hW!T}l_bXp8N8;)qvUTCqd@}0afansf6#Ca^{_+*!X>MT&r znDVR{!ToD4{Q3Tm=l$@%B@_O9l;(^UI z=1fpr#tt!`9HCnwRln`|tY19q%CTJ@tFJ#wX5#R0rfK*1$Lj+3hB3YY)=^&%K-dD+ zPz3e~N|r0@46*~C+k}K6yfS|s8y6V`xg>#Z7>l@{N;iFUB^FTTUUn~MGbp@G2?hkI3!#LWB$u~{Fm=sUkq7Dr8h7Q z5a5_TiD*~(7StezN8Wc+?A8%DZwkx#$Xm+5siov^o;a{=^grd)Pea^TSL3tvKsz-~xCqgkVxv<>m^48<)(juvqU8-8awSW8B2lMJEq=%L9J7 zTG7D`!x=&a((XJ1P%o7KRNe%yqx)wn{sEE&CMin+k-MGWYc6rv&OtwbN5 zpUg~+k7a~2d@4G0gsBV#Ut5x;a*K6PVKFO&?x;Gu@Kf;8=qau&vIF1ot)Fk>EdXWl z=_JdUzUryU4#|A+c%?wrLYr5v#XfN+SuvlnrS_=NL1{*;sw63BQap}o)oP5dV7W** zyWou5Phn5{0CF?)nl~>BKG+X$Je4Zo%?6BdTsz*cV0~5JbU=gG&;ei_$fW%LI51So zy-sOc{`_Vx4X9uG^Nks?aa1`kcWa!Mb}dda&_#DFg1Wu<(DD=K* z_brEkxqHoOJKs@atT^LNu^l$Luf?^#n*LZY5+`EmL3DClqpdBpA7_Ch)D*Oe<;87{ z#Ad4ok=QO93OH4xqz`#L_Zr7={5@X8`^Ffwn8xZjFYiJ*1&YI;O`0lsNRR#?3YcoG zw+FB?CE~3G6WIuI&v`_PI8e&f&v)9)^0MzwK<+@`(lb@gQ9#&pac=>J(L zR6GNnTB*IT)`v+vhk4L3BR^ULia}Xd@(QI6w8A?=9hK8*MRsJkT#~H_*Zp(wBIYZtOGtK_nVyEHjxZGLZOPXIvQMM$3`L zj6RZg&iPU+8pxbU)+l4EglBLBTFfQqujm_t6&zB9(^Sb@838}ivi$1VV$ml*OQEgXgfWThy5*X)73!H zKwQbq2E`b?a1P-%02?})H5MuwJFkCjOp)dgC1USTIZ|%iG0V6Pxh%lr`uD9qGlc-_ zn3A9_`nmscAdh(GldMU)?8c70pk9r@4z<$es3-|zoC}h0GZmoG|+2)_6(;5NW~|X z)#Www{TO#>VichqanJDuFhPy3>OxH-w}FB4)vl}eLebtAj*MB@Ws_wFiy zzMLTMhMaAaZTJM@P_M*yvkZ{LBEFQc$*|!b7)%pU5+u^#&R!H_uy#WGAuB_8qqi(j zXoaP^r%p(a?YBbC{qx<-C*8I`DK3C()Q9k@m+@-;MJ}d0LP`&JaP0pr=9?aHSHz6k z4ec`{y%KzF;-W%_5Z`^BL{TdO22dXst+^YvuDcL#@HWKk%__SU|hkdEu;uo9=FvBC!v6)F3%S(Bcw57*D zWtl9QE4S9?7Gnc+!V36LzWu9ee+=5X6Q1Rf9nx>@pwV-sxiD(!E~~4$c@u{o@B$TS z84H<5r@jiy=q#$sXeYS~$~_>ff5v>G5v@?B`9N0n44iutwjarUdQ})``FuN5re3;6 z9@uObN^Vhro!se3a!0AE*lumHdF0I(&EEPLqBf**#u9>mO-(`@4X=L-FO+;(~S z?&Y$xc64pJm*Yl9(lVSlfgUyCdHMs_8sSBS5l>Q}-(B>?!k&slDL5H^0~V=6B;4oy^{Do#x79S};T{h02%{@ENBY($sTQw%H`s z^bzcA8b$IBWJbUnA6}^(iS@8>O68G@WY)d#+gnaO5BkeY=T@~fSYlXM z1~a~8)Vt2=kcSOV7@7Cis!qlswpmOm!<-#&Sj-~K`cjryhS3RHn+fx~n+JiUYfA5#SYx8IP%oqE3?r5VHUjOtZ5w$IO4$ffxWLSc= z&U4{_C#=o)!5Kvecid5%P@pZs8OOFeZiyz-{VHO#->vn>`UxIR^WH&=OwqxWJbTQ&pex8c(3ki4w^~ag#F`-d!p|@Qq5T@8OTJ?}?5Git;B`CuNN@76TH=+7` zkP=U?2YbDzF+6tbxcE4STbho9fosqv$G%5rgxaiMEQ3`E!TRn_E|rd1%Ql7Hx(L=0 zBJ!?;f(O<>gDa&c4n3?#QmbeZMm0F8JS9)$`G(1HO;1FWu0Au+ijNsS&5_otQVw`% zoLkO-^o9xmZU2I>u30^3;UEyJ0zIahY1DU}Ev0+>>@V`}@0|P@4Jzr8*roF=+N6)k zx0|1VST=S!FmMU+bfQPSs&byRO{a} z^OjLWd#wlE0;LL;dZh*A&U4hyw^w_PM#*`_6no{iU)nMqfDCDs8{zzSX!DEk>UFkX z@BaM+;i!HQXz$U)i0x;9($;C)>m4qr5enc3PIByfw-@DQhu`nGalP28<26%T?z6oN z2}T-piLSGTeC)s{Vl6nOEYPR5HttjVp)+ACqrDp9KjohbAOCW+J);gQu7D@#$j_|N!v*vf#u6eU5_`IEkpM9D>PNWUt~gxtBR=;k?qyJTUa%@ z*5vtVut+@Q+lWo?{xs2(H#0yCD>R*!AT>x1{?AqV`}i|QaK@4AsvV-le=`p06wGyA z?+RFwEu4pRM{`B3Kc(e#7Nc__%H25*^K*e47 zSsKCWIgDW>L$B7_*K$+=zp!|>-zL3)U?I}XMbg#K9BleGO>UFeQ6z-&lm^+csPHvv z5))}1%&f#48S5-q53+C{X^Rk`VibpN1;0EC_?Q(V7m?F`M74H)=gHE-ht^?RXtZ~U z@sXKNKO_9@X&Fvf_c_=nrUBKxoLTvGT(LITvC@oIS*K zrU5R$eX2d4;OK$5Y~D6t3->sV*Hf~<`s!|~RgvL^{91+BLB7NO7v$GdOPLon+6=sr#SSLqG&~9KsV(xBG9sv~ zWe$^99aUBDbbw|ztuq~` zH$6k3)ebugchdm!8+PORLk@j%6gDR5m*#o$|8n(Z%Ce06h^hQ;_7f*LFTD*fHH{Go zfY^)2(1}kw#-9w?(}jeIZDJFQ26gQ*-TnI2}VXrD=kFe1@(0T zyz+<`aIU~XYFjb$|Hz3L|5L>p*pX<-n+Vib*(tzSO?4NAj>fl|xAINSm5x(h*L#XM zgl*yo>z!upRc&HO>!JfU=>dR#WtqdEr~h7RS|sP4|YtG~6cY?C6Btl7Mk zW-fYe>DEu}V{+(IS=024nbdsrO!HY>nxmS^vp5cjrBHboy{4V!l6{=FJhK8Us76>f6wlJj1q=VFnT3iwPh9+^lzQ^28doJ^|ige>gqN*cGIP&@H9K@ zI&?~G&Je+4X()R+27NgV{q%l3=aTuQ6JnGteX1*qlkCLk`qd-gnydNW*W4~T z^W;_fsv9ciHIbDFDnbi%Iek6Hz3elv9tN=O^~Ys$#X=OVyRo8YGWA|%R0v;rfFu^u<082 zcEu%OsX!kSb&b^a3z_Mz5103AerV*az1M*pk^07OSljRBbI$h~&i%eIYRo&*6Owv* zWjag8G;u|(xDZ#$36>Nh#@bfIr`i3*mSswQMuZ)1Jctj0ME+(lx0uJ6Q7L2UswzOf znm@YP{p@>lz|1~WwR9~vqn@?*zBJ-uXXM~qofRHA>*@d}-@G@PQAzbbA5F2-;s3VV zI#Pq*80ATFAif|ylTOkdW^&&op(b zHLN?3p0QoJl0Z9;E8Q$V%-t}l-HRSfC0(fPywi36adTMFxwb7!#;oi&ThQnWIqRz& z672%l1|h8%@BEEr`i`jNlPb>3rU1RqG~pW;&<;&eYt-o$>dwdO5mNlofsMN2XlnfY=K^-UEr;xL*S0jp53u zlOtsNz+XzI@)IFwEMLeC@FLAH;-^y za0f378KWb63DjG9=Tm$gur}0kWnk+Ck}MzS$X5xjr*WHKJQAL^u>ak5s>Gqud)|?L z{ZZVEYL4#oq-`hEG|bQEXVb_qbzW=BlejLizTY?X8fifZy2VY85P$6Ohdb|~1U2pu zgG2L_Xz9l+z^~q)F*91uUR9#9{DB9qZ$7&sLeG1EqSI7to;yAsfiO2;Yt%`=_~(32 zSzYoEn@m-j8>Op9dc-&05G2Mab~}CP_SgiK?YLx=A7MPyn%kg^^V@0M(TP(6QZc8j zW=1f&L%~s>c+T7Uo+=2G-{~N)+I!Z^O6%M8G}gxN?w!XVZKht=rZMwK>+1CliwrIg z^0duF{^nZw3<%=2vxQDm*IMizF~~MX@U+5vwvW>f-)UaU)#vacyL(liM?dQn1x2O} z0A=(0*)G0YDaMs^I{kUu!;#2{xK_&Jx82)Qm#g`Q=Be<#d}5s>{yB=+d3mc^PjIp;L5?A(K~<|1G?E?5 zvU_3IB(^|L$5IiaX$)#oZx$CJ-aZY@yBD;^Bc?FsTJHP0JyJbaVG=C67;;mJL@vwK z<5gF68xPh%)hLzHpbFF2yxOwUc&E^N9oVsPUs93myI2<23Ww_Z=rwZbtK(Qj`sOHT zb4FuSz6E8(JW+H!8TUbz4<|=P=GiyznoVSaM#j?dC%=8VpYx6hVD}mJL6_tH@S2-$ z4r3~AM}lju=c6x-W~H^}bE-GOO@mD*L?JbrKlgqOW=l+m$)!|g&^M>E9;{hT2G@L4 zn~xYC>SE^R@JStWB}*19jJkgN|F@_Q$qHU?=P~Ipu~6 zfG?=HwV)HSI)15n*d9ZeVK0fW#uR_=KxZw$bLL^^#&m1Fm36F#9IK@?A;G0jM?fM_ghoirl(Mo8%EZKH?CZfzG8e$zy-RyaD(w9=Og zNURr@=nj?#WY9b6jmK;>(GH1^o<}`PKt-Wq&wSbYw#AI$61aJXeznBw zKu@=xZ-BGP-o??C;x5Nif-+9e1{~!61PBJ!_yUu z1d8l-WwRJfT1uX63SUi$AW|Bl9+R$%%Q(}V%)YT!D0s^&d9=fl=1>}EZYIuh`p6_| zl2t>#g_MuI+}yk5oCo7ex*k23*ubGHXh)w;e&7i_l%sT_8qQ04B6mE6L6i!wHgjD~ zZr$+U^J6BZ=9n^b-PKrZ;6yPPJrt1N7-GR`GJCVZKkH^MKf!SfJ`}t>_PcAuXU6<< zXbDFDNH7`@sEZZ(Ax=FUAM@l$F0B^}tSYgqJF6pEV$pmFq-z7Gh*v z*)P zNZ+_SdXDny&&T{OAkBT{Sis!B|*%kM`puvoc zQC=k`2$dJSZQM1n?i~1#%+;mi-GiF2O#q}|_dNWpix;bqJ^g8S?*e&v)--pRkFMI5x8ZV~z-{9lFzjL^_c4e|6hdu3aQ(-mgMQx*%yy2ok z5(Zm1U-C$Je2AXB_B~XskKpM$XO8T6U=Ff{FK{q?Z_p#x4Bi4Z-{ zvw6xrilf<91$SWeEQ;I!y+bujRzjODCNq_yr*3^l{so~#vT&F44{v7T`pFW9^-v>;ZTae#1f zRTK#9CH4uA8(XCuj=jKcgBoCDIe6CSY>slonM%~Z&t3hQy}tv!Z3>+1$oPik-fP${TfKeDHBet~)B%v8mJiCeQ1*y_oF&{0?86ET8`MttfW8xvG#B&zK6w>eAo{ z=+xb0PeRcv?;h5)#SL59k<;L3VAWj}?Y=Rtr2;06GpJ)i@;}?g(|lW-=TI`a<@j_B zi!KcP{hH?hHpY8^VEH^%g<8Ps{h?*uuEQFmGDhH973}{C{(tU0RK4}pqbduD4u|gE zol#T+F_GW<>7on4^HYdFmQl*)w< zqgO0KFL31ye3#f`y&hcciIh|O)}`0xa>%?u@M;=VhD|T8r>7BnL=i$OPboL zq;{XmzLK(LzNgIIK1Veg>QB0$R!s2Ha2{rCS3*%_(s;#mc4QKT-{0;0cUOF7`mx(Q zU<#A8r}wD&Y1L&Hg1i4*zOa7Nopi~~yH~6Cuvlu|v3e*Vicx-)>6Yl*vcJga)L&*y z+r%dT5n~djXT$;VJK5HKv=Y7MoT73#9E&@lD_I^E;uOq6+zrM!e{E2id`>3pz0;T0 zQR*&WC4@WTlI`kJy|fFnUWJDfXvBM0ZbK_Y)mDrYWEY1|nehT`K(@4`VkV{mneBo1 zGG4D@^ON6e)cA028k}UNQ-8bjkxrVKx3t1iYc80}g;pq_KtEH-Gd1SK>;4A5tEu@x z&bR-YwbFUS1^Yn%5`B4GIR{g-rjgTbkl;F;N*)O%4=9^D;f(Pm?WWw~y?raN!e-ms zu6VQ}z@uFp_3xa0wyYs3b46)`j*;cpCO`0kC%nn9#BMDBggXUJl3|{8#5Oi5bJau8 zWFeavV$qxO67^8A1(V1?-lB?hvN(lS$vp;bJIb_tNkl=S^eq>jZ9awheL3OGf||w` zzPPJ>{F0&K>s;h(G{4NAXJTVGZ?^At^VyeL!8+kn<}^`U84-NR0@q-%(9!?lT)a4Va$JCmX^1hq3OIQsdHRWXY(&t&(yQPEUE z;|;v)x1f%aePFSQ0()!$IbI5BW0^1g0e5}e#Ns@H;#_I zDE3sYmzFing#K<4%vEg^nXrj(t^F?puhSI%wmSkz@ufBRzqhmvSBnns_OB-V`@<^Z zlP~xY()C3u?RmEXnOu-U`!Psf(y!h;)g#toYOlMp>$caW=5hi0_5v^R4Dyrso##D& zMzMwL;i60lgtI!%Z}NU;=lCxnC{@7hp}?SS&x*DmELjmhXsOWDlgS>9ve;GSEC9#M zBO?l1=yLnc7`5#%GaA3bzN9jf%sSZ2n*R1N5l)d|RDvACTkmM?trj zNfNm3vKbp$9Z|QT`>)yDRt^;l1(AzV^%@|r8?QOTh-rNqM;6`@TS3^A9;Lo{@LC3xpSNlwMN-KgO zhKp(uwv4N|)e1PwS7?&&Q{F+GV5!Z>R)>pSKTKeyZ>k7U$H$=E+PsAi-^4g^4bQQ? z@?4{B=s$Ffsj)7TSz@imui0xeFUIMcNnzF*tlNrJ+88<89$>MpU%JTJ*>l-2ytCDm ziF9^EG6Zx4)2OA9yil^!m6WCuHZ4u{AuC|7yRqNO&WKT^m=)j_ymL5j6I0b zOF%+H&L$%{d#Gs#ft?jmtR>+Xa!?;a@L7=<5uoRcEKhibnHhX_KPsD+{OS)%)&I(RD*YuTFMR zx8M;iwy`Y~H)m_2k@L|>v!zLtNQTh0_K7u3{T*vE?z0**QZ`A%5LETD@ndVo@y!7V z*;cFYXi`tr(j5Z3zl>Y=(#Zk7$85J*xpeUcE|qFt#yF$#Ws1$xp5jX%y8uWNbn1EQ zIMC)HXOjMe2{hOGuq_9iEL8A;mm5xR$bqSUW_4?D2MDMpR`np*%~)Dx7NU@Qm@giq zyti%Hum)Gp-#4?)#;({_w4(koS>NAYC8s$9vVzL}o@#sJwkp)P*AjS z{4kQSH-Ltd9!Rf17N`K8&3aAIS*IW8}J z33Sg5{Ij4RW5HDoUe9i;p4nAXzq+6GVI|4RD~^TZmmY3+3O)=f(VE&2KI@9g*|%=) zUq75&MfH+<8LprejX7wh3aaOy?Prvf|KjytngmdqnM7>qU*x@TTR4j*^skGI>A1Lf zJkn*O1n?1;DSq-+gRxraM?(oLjKFv4;c%62c-*t7tI+0|o7pWMwD zWqY9wzVx8L<^ul?tA<+eFq6{gH`yJ(trkPlK1zPYJeI2DXrVSW7<)67W6@Jgb6~eP zwWt)5UA<&_hOx9C4xRCpcc7N z(wPmNkhZ8_;^G{R=hV2De62A8rl{R*nq{q#ALK&@O|TTCE8Ub2n|tlOasWjzGi2)b zSI%dFSzMDcuL!2qjjoN11$i4?b8DA;Gl~pej+kl}MMO#k?sZvZ`zgqHaWo@oS`N5x zm=DlpgRU1gtL7bkD@Too!b2^^@9x;N)_XX7XB_yp=BSr==CxaOx-%s}aW6a}HR*9@ zZXPo&Av>Ivt(DPs(UXdOt*t>r)}9@E+@7TgQ|RI7HLA-kHr#{i(G%=*WoN<#6*Cg) z=g-oFD(|JIaZxvt`*`$2HA^{o?ro2hFPJRd?@9T(`%C$|J&(=JDsE~lE;a3vG{JnH zxAD(w41e<~L^@4Y83z-9WanIQFyw-EI2-)Bz_XdYZ>k17Po#IqmL~+Sz3dk5_&DaD z=JoiJq#v|3@Esv2I!^WmsLD(7D8Gfo6A7C4X>BYR)f{|xnl_bkHo6e=!DZqmwJnI? zO7$o;YCOyIM~+n`J2KBE=)APioVYL*Mjha*lugQc+W0M;>1si=z|DK}Zk~l)c=t<3 z1o}fv@O^8hLcTxZPJvkY*qPefdnX}#@?Ja1;)hPF!2B>ZLSDnS4!d_WvF-wW`!#{s zT7vdaJ6q5s8cN4*8z?5O@@aKKbqAV^Xmi$cOF!F;(xk}uvptCP9RGu{h#2wJ-9{Cw zI4_>3C_gbhTB@2B?#Gkt{!6;oM(ZSo8AzstU8S=y*e1bF#H3D{H|Gn| z0epwddm$%5=@$dU80{`i$V;x#AAmXxKr<%<&{nf}xPN45iVl!;F1F2i_vGbCX%Pi^ z3#gdQ8~Y+FC<6nzASVHI*A2ZS_HGdtiF#OQsMmQtC;!n|J#AWxjA!N)`_X=BT16vV zP~Y~Jor>TE%I7WP)BeM9Bqf5-X8d8sf-YsnQJg?kQ&4 zK$P7CdTi*pqREaol-gLUnfbgswdxn9snM1>{h?J-H%>c)AS{f;yl`9UisG>Fau&=w zDO~Zc=|kr4a|!g_ifc?(6Bdj{dQesPJg@axSP=>7sHCmzuj7mKV6vRZ=JL-OGB1(R znOCpThcPQu?|u_CL2Ka9qGBxTM^=bN>Z4G1@+fIRonhVFBegSIV|cHWY+fmPYADGh zDuM+1@X+2_QvIM~#iq05r8AV8|4hqH@PZK2JQ{%$u^rDu-BhS!uey2Cd{A4yq`Ktj z3Yuh~WFl7X#=d3OBvWc^J~ ze7}8Tb>4X+|K0zMkeG4NGTf>&$=3IFh12nD1YRt<0NiarSA|f^tmHPFHEH>DU*A=C z`fBeF)Wr_!JI3o)ae2HwY-tTNpeh1{*~mQM0M867;}$gDva+Xz+(h67xK+SP!`Nc* zuug#T20kwNHXXZKVhYf3)+(kbD%3{Cd@Z0U`oEhBe7^k?KXb~ixZmyp3Cr{!!IreG zXYcUc{5j2+LiP=bU2^G3zY1O~%edKBEo6b0$k>kyUTc`-7k0mYtZOzcvNP>R*P>(g z%0B{5}7h|ls`jT!_64Sb=~|lP4uS? zCe%!fM0m6ClONvMwr0O%R=z*s-RUHrt_K59fE-i3w~pXIfekc|T2TnHH2m2*w@TIX z?tOdCRX$g>VjGaQ1=Ir+Q?UqvtBQ!HD7nFQ9{F2|uJ!B{z^Jz>pPWteGFa1Q6qPm* zo3=<|+K`;|@5+tZA*&&32iDS-I0(&EKIfbR*@meBM2+0X=)d?3aqhWFYm8 zkP8sdUN`$5fw!Td(b6l%AcE$vl-tL|tqsGH7`(C*Pc~m+tNcIPX_qUcF`JE~~)#v5G#Z3TwJkfmB2d55sq9R71|! zw0{)JViwz0wBzN(In3}!MpM^m4IW>ua%X?Z`~Ca6&hoo!r{nePQDxEgkEE(YiM8Bu z)B6=;To>}S6;1B@hJk2yKug@%H2f`o!r54Y%LE5u5^;*=TXIVtY&tp{y36Uyc~HYe zV6^9J$>vu%ryug$+n}2_BOmf4G@>bk-ji5=2`EGtiECIRe_SSoRT`!}B=JhcJZ>-kzUFjw>4-K&$9viKn0rY0C_iz@d?zavcmFt&aPzx)#vw!v7DlR3v>TH5;I2 zo*2vnET+rJi(@|v9n`OQIhtE%;;%EVfpmK(Fuh_rfqhj)jOtVVP^{5QIWFZvjA(*=n#$`y z`nRWGX-gk%7}p+UXBau9ql#HBxS$Xf+-_{(@6_`tNi?d3g!`n2Lv;*1-{?6ZG)#-n z!y(L{7?1)1_OcYK7+W(2B!p7p&0&EV>TB_%Oqnf1iRnzayWxCah2LTtG2S3|(DTT* ze)f7~?aE8`g_~?yriU1C;@aE&A!_3*&7CA-DMEVbWsVPp3m3mGYnD@2mOlJlvwz1| zEGv1B_J8@lBIl{1vVE89opz^vYYBl(-B&}LpLIg{ z$f}cfp5W6p-qQ8+Onq0Y3kYoLR`Wcq&brZ-s()M1Tyx|&l47Wo06EF_P|pkF8b%M8G_8;~YvunI@ z5+fiQBX}at2C>iC9D))>65vWjH$0H?==k2@FW31|64hUBWX%mVIXr^^HkNX4|5#;d zvC!XCe1caeB=a!ep4WK2S@Z3##h}Ma-2Z#&ZUa~1%**>4pd&zzJ#%Vly|-?f%OA+| zK*Gy76F{z6KXall~;Uj=TIM>OR;9}lJ8SR`q#r*SC`a{Q1g zdT(qN995Y;Swu(5L$EJ?eHY3VaLfjOVY^|rlFJjxK<36i#Sye{CS#0HlD>lah~mF} zS(~+kk8BQu#jTh%R4tD&t(cAVVF%@@0)pB$8CjK)^Gz|E3QA9e9370Vr)LPKpBnG} z`VuLdO=_6YOByT4Wp82sPur-cK|=39;R$Cr7Qg+N7Ea$RN!($GmsCBQiY1?jCEq5% z-z30)%=vWF#Am>JXEbmP%kLums_`=~Hx%Rh-|On%iThmg!(vg6$Hv_CV>A`ry#OHG z^g>$?6Cl)$-{v7B`ul#j?XX|m)dQfY!nG(Q>NJH{Adyem<-_pN`w>J9e>Vm?WtPie zT5Rk6%rd;>^U3zdq%|zyfOdwwO=@pEP6PM|i00O*0HD8d4-5@=jWqDpdch+A78&|p z%ym{SsdCAe%7>idfB444SU@>WCWExX5=ISX+^HU4>@Z9JHX97rZ4XOcxIYf!HiC53cR+~j4=rg2>0+>$K9u+HRKu$X7_l)dFxcweA-7*Iy6SMX=667)eT9TP0#iY ztfY``lP=hc<&;h9?zP;Jx5jPQV#(e_YMWh4j_nYTn;-X$Q<@CC+B;L6{Y_m6FHkUb zR}uZzPe{)ZDMTZ(iN2*Vpcd*;=#8KLU-lKK$+A+P-KeCOS*Rl4b?}}fCr2T;FPDei zx*}L6fUebAy76Wk42=IF(e%$!{S-A7Y9)QvGCAd7O61zZn1&<&{n}xpKhy}$kbiaQ z#&MsdFHO!5bvV=n-`w4|zVKVhK+hBrozy4A)(yj|-kLv5%>q0f;>MrKd}R14@jnk5X!I~)TE;2= z?w=1OY+{sXpXuH^k5nu#Rc@1?@N?L$xa7!bQ_FvrItVmn4vKnnaJHn>)yGbI@n}`o zhT}Hp7_8iW%Vw@@(y5;a@D@(zCdZLT0Uvi_`bGsuU_`gJHgHQuQt$kF5x z?)17$zd74_lH2qV8vT2wKWk(_dNu26B2REtxS1-TCU5xf5&JuQE}^!(4Sfq?CAy{X z!e1x5$1ZUyZ1M6L=*wDfxts->mfx|Z3T4)_+^QW)m$D{hVa>RwPv}n(p7s-=->qW?_R{_l z>KW1BRxHepf5O&fs0qq7GthzVcG(vVo)$kHVWx$i&&+e_8i#sCL(;dJy!<}r@D7{q zgv*+ty|o&HBD_dfc6?AJL+OEorsE*h3U!9jH~;4Igl~T^abgi%m_z4olRNf z^kWjoxkWe2>;X+sHmOr-%t}wj?28kzI^4XRbf`T$P7mPRH!=>h$kS^uS(Y!QTs#*! z+FdMDjm!SssYzidY0R^59`(`TETg;XKt<8An;#JMtNyldDX6khe*sS&9iK;rsi{)r z@s)QK8D#W6UjK$e-d7}ZpTbT1PkP~fH6R<#S{8z&WtjCZ=OlSyJt>KH1kF*DIV8NpNn?YFq* zU$QYQrwT!YKJIVSQXPn=Z&kC8@FijU&6ZwrKYv&r4O9M!>n{?se6RqzHG;W=_D2$| znDGDI03WPm8i16k`v{AJr)gAcDwpgo${{ta_<-@Nlxo7>sCxHwcSE;me@V+Y#UF#m zHf4q575(()#7_SFw&xDucTO#$fud5h$0NuA8m{y8m|syq^9fr-S|zUM4u$6Zn>9u) zm-@UCE6V*6{fT3hiUPTh_EJU}x*tC)jF{resMx0`y}1#|^|26szy`{gXmLDOUs+6C z3QpNC@6qZtp}2J-m~$}IuIxSN9;9CH(h9VHlKOa!Pj2#=lKMxgUjCHOs@@!*fuL(f zzsu@OkHmfFly^Z?Lsy0raK`IrWZr|T{pykHp=;C4=zg@+lieZDefpTJEnX5y@L3d|0@ zs!z68U$*_Rh0jX>MZsHRwmp~WiSQ zW1x&5`fvi$;y^<%Kor-6LOg9puMChndX}pH0OeHW2Ll()0V2Ow z8~vCCqY$?%m*diUU02)L9BFhNoe8xu?<2)tYiXD&z)OTP@8;VgEr*cZ&4gFl<*ml@ z{Tz@Xv2;AJ(EZnmbKRIa4X34Wv;CWJQWin#i&e$xX^$%}=1D+)>=U4!!d%blhhME$ z(hXE0JnwnEyGdvzPLAy0t*d>%9N&iB>W(8^Lg(t9<_2we&36u6z}H^-NJbF(0w2u* z=&QM=>li%a)}n0{>p{((;&tubUqna7APJ*_ySiq6DA-;Brsm=jPR2qU48z_+rkkf> z0(ZnR3C9_qMTcM+2NFLyEZMd;ui*Il@J*{jY zzg6IIEE%HjmY^wA9LW$ z@K%UONdI0OmzJcyao#xYJ3pKA|1tF*P)%;pwy++$2q+!}siGn((nX|;hzLlNCS9c| z2vI^0AxahLU4&2sr1#zuK}BllAT{*RLrs8?6BJHtSZky^vYtoB7oH z*=TP8XhNU!a>HGX?&r=Vy6FC;s= zwqNLl5*iljc1lpDNb}7Chm1->gFX6~f7om8bn zp}C89(&9W<5(eIOejpcixkjOQ&h9A7i%|!JL1kt@6@7(iDgXivT28{T65t+_1u_Nu zm)boSBsk@XC(i$67axj$SRqWcIBmE~vnq*zFFW)TKbWqu zJjC6iBRxF8-%6|P7xUuHZ@wYOl4xIS24X7|d*DTcNM+WdjzY{3gKR)A&jeho`aWok z`#LdH@Au25kbhFf+4xexX=ULf*r(m`^U~=J$_EtDd?*>YYC)Cjcn@w2J=sksyVt8< zMI^!YQofRxgsOKk(Io_KthL|RA1mYeEojTkn$)f%)+1`Y6>|^@7vqe^PiiFZ8+pS@ z^>E)CB^ef^x;Sh_Q)5icS?TYrk;!dl(pG*Cdvejjm0piGJF=eTiZnXszRePpml6Wk z?7_n_Ea1^KTMdAiaL8xQO4`cD0+T#1S=hBYxkX=$qJk0!rZ#(M)FJC`D1|*Tkhb=H z3YHdoa#Lv^ru414QTh6y)d{ad_L2=Y5qRM19hmMnG_U1({H z+MHDZvFOfgaj)`fm1Uin77q9cqk~nYH7V=hHp4F>pN}!r`q;VtkM(7*sm3|_V&9q4PPdT&cqVv51?CHQUB2bYXCsFs{g-FM6koI7roTa_+M? z<^ZQR-8*eH^?qJkOKy&i{C%p%TCZSoW$7D0xMKlGq#3YYb5=d@MPl`F76MRY3@CDx zNiip+{xku67vId-5SI4a;o)HQdyd8EZSsy-v*QanuqXAox=&)8v`V6#F`f1AH0|v) zrYTO9XeoD^A+WCt7Wa95sS_xn``^QTboxx$A{o+z8rKH;mloZ@;kqr4&N`@125#wm zH6JMv@P*z+jQY8kG^zsEW$b9BI%4ckdUk=Jfg1wXi|b6l1(z6KYY%&!4fAQBWru&w+$w1ZnfP^It})uCwJ2X5D^Rh0U647kWl1T z%OGx&#=DLj`yQ)~D)!XJwLBOH!Swx5{bA1z(MoCjsCIPG;=YJ{ZE@es0Z4Eh6-bIb z)1+jIcg(*$h>68yQutik5f}2I-;{M^QgLzT=3l9! z@~i@fTi==@yMqRwK_6r4;~W)_-KEpL#h0z8g5_kIRMtTH0=(a(HOf?hd#_erlnI_o z^oNE#hL-8>g6H+Sx|}tdc#`PI$xI4Lo6`)|g`~$-dKm+k?K#}@lV*tICr(}Jo>BY< z@a6JTYn~Dn>Yy9EX6W6i3V8xy4dcC^k;?2~3-Y%5o$oUTYb43Su!=}w2fO$0;0@_g zVGg#&Cctrs%vam=)`HzAJUqVEa#$d-I=T1Q+vIUczh@T&lb$$L?m3Tl9_jSnC=!j*&?qlUDqVR)5Z2m9og(B6~!b+Ns^p zOzZ88ed^%6#X0AGkDv==s}I$|r@>mVHsSiDB^f$_k>-1gFc4&_$`wnRuaO~HQ)CON z-EBmYm8s!c45VFg@97qAv2-qh7h*z63|!;UAk|kp!l_-3iUeoCpSF&k1Vldo2#o>h zM1%ucY~J0!16NxJ-7hvnuff4P%{`GuQJE$lSUc`(YvIy1(>6fHyW^v z$#aVRAP$}I1>t?to4QKGutM$=PXO~nqX2*8cr8i|nKFHoxV8Hga4Q3EXo1)nW;<>g z!S1R4M5sQAtL?QgjfHMj&p>|5i?L8BbKZ^$)G{;ON#RP=RnvUv6t5ZLCrpN-W0r_} zv(%VA+6@oDsl?jiE>#_=D(CLYz~5z1-|6tPVT1rr({ z`R{;u)ZeKuGHbtay0jXm6zi)~gSzdoUDUlA+ccFVm@efx?i$t1vyKxE1N5oF;I1p7 zW*`2>Sq;-kcR1l$_h`}UmZufvJK<`?-J-h4uWx+*9 ze2K~QkaDgEaMtr%l`Zu~PR1MNSoS?u;}r3LSoO~E>o%8e^^lJTv15PP9MF{QnL4N9?$ru_%N*ezMd0}Rj~)Ds?7g$768~jOd55}W^A!WC zF$Ls2-m@$76+M^TOPVc#|8T{mf4-%med?~uV#vlIh4AB?Es|F2bc@9h&A2nqm$%iZCduZhtoWj0*(y^bpv=4$;c*Q zmnSHU&!+9MoLPcGQWQkl!(MLl^p-)XX<)j5(7V@WhcK}u=N>woykc{CHv0+Nycu~* z>@2Dd#pOF{W*YC=anbCp`4j?AritThl-nZ7PaocJp1?H!26K%sw_9U3(Rh$vAX}gn zo+uph_h!d`P|1~`a*xXJ*UT>}VO{3aS2d1WQ)EO}H>MgruRYK4lo);%XS!8iV)w#5W)*?Q~=X_XW>;qOpV;?a3N~{1l?mZnt?xQKrP@ zn|o5(Q}lzwpNpS2p{g5>Wt-2sMww~E?dLOdzrgKQN~t#PE_DXLyOahgQ<7p11)Uo= zeH`9SJaivT`Tm5o>fBgFxGFg6{YtiRjXl5gqg^mDGt+tUa%%eBCS}2rDE@fq{P1T! z2;@C}vAqmcH|3-`A~i2_W9P?yW}^2!r%3& zSRj9COt5`(k1>Nxx&E#xt+O%hl*_5E?A{TsqNaS2JJZZB`(2FOjkwmh%kE4nw$`zF zTuRj(=bs-@*Hnt?fEDS;Kjv`6Hg8;K;pcc&V=&@vmbRlIK{7H%a9A=l>Q5K#=2*c4 zaYBMbPw!EQl4F`bn}LUcQ4IH7&=EzAcfHonKnk~T-zLPX*~0m_!}dPD!DY(^mcvhh z;!)#~iectT)}Q9Gs@*z@FZw;(t@{%={-&-t2PyN2 z6gur2e6;?>?|yY@XZgm%La8CvpZmPU*gC(O_IkGiU89neDUZsdfvs{G1;`(L<-FT2 z)KGa;NYcxTsp-V1IGyZN*%7Jy-j0IOaJ)X4-^r7!ot9F-Mh7V$r`s8;vL#pAHMPeiY4 z^6GV|!KrcTGFDHHVO#Y0)s!kfB3nL`WIo5xv|P1_+9?81$Yq6PC3!Ifj;|WKRr2v; zf;pC4VUOB&;O=5B-<_yB-lghM5@g;s)u9TOvWT6E2dRK?E$8u8Sp#~P-QA^L3lIy~ zI^Y^;Is@qw%`9U~mBbuY{Ew%SJQ%7%z1uzPG=RE@DUi&%T6yn4e106}2WqV8E7SsR zhSh@Y1GuAm*S}|12ckV!F7CK^Awe8M9kgsrthr*4v9K*uy3sS-6%micKTTqqTYh{k zz6^#rB!$Bql}Jy-5o=l-?%tzQHxQ#rV?i5$2nyf1HeVWam^K;Eo4-8}*K(>DfMU?W zH3s@e;n4)>=xRATjSQ zQWmiKIE?3BZIW4~Yb3wn{8&gp$hM^kk{ciI*(0(tDw|Egigh0v#12MJ)ky8hG`Spd z^uAdVQ_49hP%Z9r`@Y3lfqm5}M$e1UZS)Wqc$7KMYAQ={oPCX|DQJU6wJUl&@F2MW zEXv&lZZP*l+$M!}SDX?@_D(zQW1Qb29;mkU-OqtN%ueBYORco2j`AsfvQQ17VOf zQc9W~vHN^uNTvXi`x`3O`6PM4%#(K_l6MXvUBm8@-?c++d!oM}&z;$=b z;8S%}0T{}_F#Kuo4a0uy#TPR$Ne5jznumZH{;R$N$x1{XO789Hsa<%4his8IPucVw z?=QOh%Q=-Y&_uh-RX~}iPG`Elt-LC?k#TGqaF#g!9A^2wl5el0k~w)%LJJo_!DxC+ zM7EsnF-{uxojVOwaP;g2IX?hWH!Y9R9Pj}x+wcGIjqx_xIdC+TiqK9}fs32Y;y3ky zT)8wi52@`u>nIr6cI&IE6bksb91(X}be2asZ)u*`B@262;V1s78;t+aV?DXS#(CVx zbE-sf4Ve$WyoU5BV>P8Qbz`V@OYGG`vCx|*=jXF+kuz9}6P$ap8LrigF7PQ{!R;{} zHj2g|RuWput72WKcYb3+Cuv)RCye;g? zrj)K&oK4`K&L3u@4!?;0g-2ZDyuR73k&J0*f}6Y{D=L>@_C2rjD}z#C3_v(6#Q*|czph%QNnz~yQv$BLvXLT0)VZbXKy z7zXs=hrMRscN|GN74DQYsUTA}h?wZP=l@kKGx21DW#3cH-l$7ild^$G!fKxvlZXSsymIpoAH7B_2mO@QqBS!{08? zBPZ}9t08_a?_jl~(!15l#=GUD;MGDXdBcp{Zyioen*)U)ekUkmcY@Y_yJ#|t!5>x8 ze>O55pfZUV&)!U~3aQGS67;pa+*03%o8>er?Rt+O4Fp~Q|1F#WCwj*dAGba3%BFB( zPXwS5csPXTIP$L|ruX=F?ob1sIdpNqq)~fA9xH$t{ZE*%7>_WL!XCS=d-lJx1TYJI zzzwb}%&?3Tc?w|)z|9*7AX;pPVF-x7mj-_>MT867M?ZP#aH6%MCjP;KD6bulCQ4Rg zkClqCyP1FIb79MLJ${>>fN$vT&STX={7Nzqq_Te{HaSUmHYCEDPqm zqHZw>eePI9@b{eZ0Zz`zes5R|4xQOgCNz{NARbjBjhxNV8D#u%#?qxr{-@I$0+v!X zPp#}IfHn-EMUORAxQ07k`RIKpFBSvcw0);w!N2YVSQf`Uo0<`&4@rdDeCWKTD=6LT z#i(gqr3y$h${o<_$W7oU$%8jJI+;sFnnhvFmv=q<8WRzFkU}VF0>E?qAPDt$2Uzob z`3_&3GJ{P-0V&)HGu>$+;<6SKG;X}M*~ke`(eO^P%4z$U>%aT$x0GYkabWkjCgKMS z1jA}bqxy7Om%p6&4bJCYaBV>GmVBRjQsw~zf2REIMrbXS3)B$mt~~Xnyos>o`YShm zdi}hUEK4!fwN5~)g7L_+Gz)0I4*MRdRP!DB93K-bi-*}L()E9=&WuxzJ(7n7ftZW( zjYPnq#nv(5839&pxfug=6yZSo?P&1V3YixcJZ_u_eyBQgiwoG}iFQx{)M?8i?1U1~ z?_ni?MYm*rU2l86f}_fVqSt6X2#6h*c2=1P=Jq3|e^~z&oClWX^8^Tr4N~*mxmPC- z=!=`zau+P!p?y?dxc-T})LKj_!=E9- zpH)K0arWJ1=c)ij$`ES5xaCEwm=c$7`g3ceH3ozPk2w1)b179Cr{HQxlNLA_`Eb&N z|GqVlzRLNtkg6==upJI^8Vx6{uOvs`K0Pi7Rr&-St)e8d6&@bcN)v;ye5(#V#>!g& za(uJYB9`sPu`PLxSYw|m^6j5@(*jj_~9M;_nDCxf=Z|8%z&P4odd z1fXA81hADHLIfmws@w#e+iZI@a$5P%HgVmA6fAS1KhU@Y^psIX|0zKn+kscZEDKt> zjq7rt3&(_F7F~~a=Q=@*lXc87`B^QeeolBY%w8%s^){KjDK9TpT?bSLmdY2>6&6j1hw>I}1R(cO3HN^{B}vAxq^4Vbta(Cl>&Xgj8>BBhEMSOuRiIkIcsm z*S0qv8G~Wca)Yxh*p`0%2XoR{bY>*Y#s6=H`?-=L|hYlP2f&%w~(3m z3eY8y7Hp}diM_Y}w2=(mtQ2@YIIdBK{-u@av)$k7!@dLnevqeHNFZGA*5a8;n2Jhi z|IE|qvWoV-A3|*un;p(rCrk}1rV|waYM;|qi5&%ybgG#ZP*;f^O{~Rp0oxY>au;sL zVU{41UNr5O#I=*GSgdFv9{8s{;u2s(9Ge^1;8^L z=g!?Slht-YklNL_vy?H#?`I4Y^%hc<1VDrE5cF?k;TC|mvH(h63-}Sr>@akH$A1B~ zL$DH+N2^XRju!4em*Fh9>S2$$cT1rXh;apbe2GI8qti@-1;csnp$rm->FY=FtX_LbBM$$!5!_JRU37%a<$g9(pLQw5H*k-ocA zpgZu4NZJEw08obe$-ThmbFa?j{c0W;L)sZsk~%8Pt)xM=?oycp`fC)t5s_LU1znx= zN;)xVNiAIU76Y}2{{!c6{pzJ^2u$nUF+tJW1mLW^kR<3*ojqzl*HQ63_Q&1^H-4ZK zu&4q!WgG~5k{=+ZNWffAIvZvd8!@xHw^&t~0|HwuJr zN}7h3XAG=>($VfxzYt*b!-A`=ToZ77Zbx14MZ}KK!r}2 zFQ&r5!R2GCsk9lYr|9$`R(-W@R9F;r!xoBm<#<0LP-Z&=3EhQxB3%)yXo0}n8v+=8OS9dF}w!Eh{0%A z3+0@gXM8q5xsuR0$rHSJ%M;J@*6Fc_hxwL_!~DsxKow<#KVf%Vpa)Ko2~d*Qgu18# z2$(08?YZ^$Wre<>pOt&=|sNLma7taMC@DaoBGy=p;hxN`q+ zW)Sq;G(kt{MC}MbhYzNKi1JGzFi-&7$xE#iTG#-Tx+Fet7@-h8O0vV%GR&HYDf69m zMeGTV)zg7s#K)+Xb|J z$KC3J;%4%nq54&O5#Jpuk56xneHEc3ePF!N?^%z~<}lcpi?joei~P1qJ7i%Gi$hTN z4h~OFaj$M6ylYA&<5l;)IRBbj$`acf!UWi1HVccDDpk%Hevx*#dpyOfdi!I(39>`?f|KizAjQ`T`7=xESIZMmqpV514GP%GYK9kKKuJVfG*MSZCQ}8gE9Erto3;6V5=i zE#A4OQ`Y@1qhQB@x#rhJerjWE_83fgN^#meox>h%S$?SsldoCJJ|2?CR04L z?v}jA+V22j%zpq=`8&`{t$mhTJO=S|otJOCpfF)O6%3S!5{^OY1~CM>RO)8v`zL7; zrIPyCv1Z6?W&6ctp(s|Q$@e$tO43Q-CMDIEfp0O*a}E?mxe8 zgVo%S8uS9nSC|v&E%3mmmd`+IXb1?JsdXi+WUqeM)BH$oNlAVUN)dCog6pPvqxfO zg>W*`RhMt%I2TdO{~EMj1+j6+sqchiR_Q489r;E8=9v4FJP1}Lz%QD65S2iivfyyw z0vVl91F|WA=-;KTSc{`Y z8@0%KXmyZ9RRcr3+`bZaMXdON!s?w1RG@)AYEr$yMje}xYB9BWAFf3;ec`Ooo0`jW zd0t6`oIo9Y{5m^($$;pzM0tYNvt5tQe$|a(7!6XD8=Wp8^gt3lf3tv$?K^khfoA66 z<3!10U!RdL;sey*(}+Ljt`@|P2el0lFXG}|DZDo3Jm;x^NB*Z6*#kMBKI_hYtr366 zG}g|P&^b%YEGCaBcNLPo7x#U5t7407JL7FlL$8WFdl7j>#lKyahNk<4tJli(gk?3t z&=YGgY%~SOHiar!`~C2)ZCoA7H^|jNmXs*v?#Zv^$SW0H;Ig(23`tX6D_#F$z9O@5 z*F*0`hYII*VuO5%I*CBEell{Iwpf=e*J`ssj&Vx81K+Sf2wlQb49?|7WqSAK_Iu3; z2Nx%5W=^@Ns?7ZDyPeAQ<;TP3P>vh-UX_>hMosOjLVhfNxt1>w(DFi@GwBK@RVl0& z>zro9-tu9aBmzM*+t>)Z*{76Wjn;gHD#Uh`@YjpuI4_^2|L+IR(%a3s@MV%tEBQ{=p}8erkVwi#(C^H-!F!ZXDyO%`3O<$R+Xb z+GE;Vl6bCZJ$iwzB7WFOvVU?@<8S#E1s%0HJ(@{4@Z4bt=!(6uC*{y<(khVKRCs-7 zS>|*|t~T!D`EQ8eG-K`MXl37cG4mItaJS`_j}I0O|KX|d?8wWeCj5!P3>Jh0udB*0 zy-a9IWuMY!Yse;B#0087;w#&43%Jb`;;IqLbA;D?ZM6TxNJWxMRx*^}j1RaEV=s{X zG{WJBo#X$`%JgzvXt6Ip_NVPnG+B4bnUg8xv#maO^jI}CQ;3B}<;|5qnQczJ9vyG? zcOqfvK%E$o?H>KFH#-sx+n%dxjNJzDC(xuzgZf94X_xt^|d2UN1 zHN9Rvcw0SAIzjMC_iK};ARPU^9@TL2Xy&Meq2oFn~Ha%3}e6LF|7njPrr9jq1qrz)X1IA$)Tr{6#HXeB$ zEZ!*_)BFbgnW6_feDg_4NOK?eS@m27bcVGGe^f>hv4dcrYBrl(A0#$NPJ~>zjp?NS z!zvpvJMl>dAI!tk&wokX@>3o2qZpk%_8deC@up!G(_M49KI=+4S{3#y2+YBO^)R>G z7gx^JM-Pi%GG0E7tl1o#Ym#{_@(-_`DNDpd)+kk$wNn~zTxs7EY}vn%Y(;No^Q1p) z+Hict?QuQ)2_qd+e0lFZtx(j1RE2^KmA;9j#H_dm<(|`d!8#rSa$$1G9Ua#mKPx+1 z&1G1e8CMo0oncJ#uiI7x;aMp{V+J8D{-r9WT}Oj9H*&>RJn}`WWRWUslnU!vfwO^p zQ7-C98+Z1XoX$O9i4aJeVsw*djN%`$VR~)Jk{59 zEulXdb@+yvuD5ax{k?7o)r)ef%%S=A>C`8=a~p!-YXt7Q)3$_OtN%g%d;|T-rWmTR zb4~uQ@F`wBySJJ`bejH^0*vlII<9YjvE-|6ka_vwV&2k+#Kkt`r<%>6FY*yDHmg_z zx&O#%v>#t)6ck`k&v-24{-W`I@blDwFY+w!3IF;ZN(C1gIi1_?zL6pO7iM{W#I0;@ z_Wc9KPjO~0Egjbwmvl})b8IdhmQqx0Ce{|LJTiFoMx{4*LHXBt!{*b{ch8(x_py?D z3nPNUxZD36qtm(E$V|#}#7EjnN<{cIt46x1W|@ao9MycO3bBG&=d{A~dJ&Oj4m8`$Qq%- z@+KwM?r!d4)&0@{H@eI;bY^0}87H$F`@tjIJ~S<3adP?$Fi3bKWY#}eRI}0`4kvONcs82orwSo0Kc*0dW4SSdl@HD>#0GgCav>imG+uUf zW%0Ku{YKbR4O*P-qrj%ZmrSVN zTpt)c=&HUGI$VDxZR)JFB)#mt99;hv=U3~4`BphsU#{(O5xx1rASRbo_gshluNQ+c zplmq5@wb#FQknSrjp^`PfpK@F6(ow+Rhf+z^s)LVe1uhh(YBV-2COph_KC-T=pE ztgfp}8C@U_hc8|m_mLi#Y37Y!Ko`XQlK4NaHI>G&IFcZO59Z=gS%acI%f}@8rbu!~ zi^7-mYOImy&oS225RTA7=72*s3cFc4^^rARr1a62QeWrGSxe=xO=-v;A}RI7avb{e zIFv1ggBisTY2y5At`UK`sq~I01l%H1++$IzC>&{Hm+7dv@!as?%|lU7iYk@S(N>bg z%jd&aj6M>(#3Fp@p}xYmFs4!B%9!T{orJtA)-(k>EQZ<_I#-JIo*olZb71qY47=4O&Wzc`?&P<*ml?-4!z~lh_`2X0(QnXh><_w|%Jc z`Bd*y=Rc}WKofY{k3op%l{m;S>}YiEyLtOxVEwnuP$4+W_nW2HjFki0kdhDNhia?(!^%Z z#ZtVEW*3Sd;NB$fCITf%+Z?%cHJf#fx_QkJ{rJAqd5Pga>}Uo9Syt*6*j4mUp(Wf* zA-DgA_#Ii>3a!tOKbYu7w2?IK+qMhD#C)iyH`F*cYbN!~w9i}5k>s4LH(@P3doJd* zWmw_$577P48$4Xr5kW+1K+)@6b42J7=(0!MLl|;wEVw+DC%mjcfMfYF6HC3w8{Lz5 zUOU`K@bKBnmx8qE65CdRA8}scJKuer`Zcff+^#Sn7ww~D9fd@+W_ObEsBt@1L%qWhX z3P@_M$I>dT@MPK*w9~#TmA@;i`!)5(VtyVrh$$qx`@E#b-H$rS#jYKCCV>*8&-(Ve z#&k3%W^IUtG|8#7l8A4vtpZL>&SN>FuV2aiSP)@;64GQTxx}d5I=^LSe0w&!7$R8X zzEZ<>W!(FUZqkdbUMh`0xJa+9ZtZN9nDXxRzS$j$xDd~g3hn3baN3^I z<796Mn9Eb5$RLsCB%L`>p<&>B3C!=m|0SyYu2taVPj?ua*nWY7`afxK8yhP);S$7J zD)j;E^nUK9>TaVaERIgRMB(K0T^+Au*778C&Q(t|*#%Q3&<{5b)qBF!m_m#pt#gfj zBR8k(@XW6jwgkh1$vuuLJ71q=^S2Y3j^M@Tnx+2O`Nj+K9BnCoDGcZ-8rXlGK&{uuP@GReZiWF9dBWSQc-vR`Ch9;Y-F_d9&cJ6(8bD3pK5?vg#gEa z%J7x)c|A$=zRPz|;l0h{94Wuf=r zk)WiZ;NpigtBdXTt(woz#w7W0xK0^!rF{-Y%nwpEMdwa#Vrd@&|9;c*juQ%kOyw#auTXGQl+AwD}=gkF=UKVBEy@hnl zpdZ^t{v?0bPtCO?_fOf4yGOpQ0$*olEuwiOpQ+l|w`M+#AM!iYvsdrk>Ti42rYpVL_QQC4mw0KW?wqE7J zc%N(Vac6sU^*6`HE;S=pHDCLi+kX;&{yU5iJG_m&`6?sB?>lus>W?MOY6D6EW_XvhFMUXOa>q$)?>G@^q!`2ZEoZlNo9E?LQs>dQ$C zqFWUEgK;fRk_ms1kx*-wv5H2%Q9Y%Oj$2f-5PiQ`iO69=2>H3TI>9eTDi)~pQ3Hl zG!U5IZ3hzdUjOu9j~&jbX6f~uCR^K_dVThR^w$pjrD!s88WLNvfHy;uU;XD0{(lE> zvcDfsLQ}Xf2b^vkEq;#Kp@?Hin07R5AVG({%;MkXU)st=X7@JMsXH2snC+*bt~YpA zuVvJv6p)DZs|5*N?a7ZaZ$duAJj#0-T=Ig@{IEba(CeoQDASCC=^u-28YI4Za!GLT z{f~~_L(SWU;&|7^^+$XQA41LJFZp&bYAykK=xHGs2SPZgK+YwhR?My`>D;#9#8!38 zlt;V)A?jOA(o2rl>~wXp5qFY2zJB@0emAu6ao5xDHbXrZiDfS=i()6=)|ZWF%Qj>5 z+vR4eEv|<&FZ^2djg2_VXP!oO8|$b_buPQ$7g67o8v6Xj8tLn#@!02k=2;8#?wYyb zyf}-RuIOx`pus`A3%B{Q3j?7NX)C^TlJajS6E|r-8O14n58WVzJ?qBk&Q@E9L13M3T zg{;n~e0Wy$=vilcP{9W4Xbc^*{D+IH9PX6sskyUXeF9P-g= z*5__+YN46_6&<}UI%E}pqb37KG1e0r{Kb`kq07OE#8%`S!6eo*9q=}$Ow^)X?Y*We zIo5|334z`oT#Cs>d8~0#ibl@wmDgUc_8u%LS3`xl`oh+E4YxBBuMIw&DBZc2dgHCE z2|99>;aAS0!{vsxmBR2?(v!{7jAco=+fE5jmSDsTODl#X?{D9&+Kd%F7|N0u6sCD zacI6`DuRRwehh_DY#w4MX&6{AKi#qD1vMv!mEGr|;mcpmZl-*`JNVuz-}XZ=A2A;m zjE>GIiXw++&T9SXb8`FLP%o=*WQE$t{FbRMC%3x<5jKu|f<-f4{(at1l7;xH=0Sd@ zS)zib%ZugoP{Y0v8*DS#RzB4gCD?1I(nm@eDc^5+om#N2n{e1`=cBj8UB@28g}+xs z(L)XVqh@1D2v6^K?Yp(3-?SFE+EK`;RByPQgZaHwG(-t6Z%YdQD1r5@Fj&Ohx{ZhT zyBBprACK{=(Rp*PIXDIs!4v-_KVN9_?A!yN^FVAy@s_V_UAQ>17?j zf0L3*Q$o}q^Dbxy^j9mCZZ~Y$w+ft&t6hg*7#2`F(s)~J5~8{Exg7tDb~vtC#@KKm zF@DLSV^6!qK;N2|^Y-j)tH5{@TMwRG?JhmsyU-g;c``cr(n9=~(>i@?x?lWP z7lqrNoMVQ9UtKn-SIFJI6t4}V--yak5 zdBBzu{~$8l#1X~5zJuZ*z2T2HD(jTjH*uvuqD(5e+49^JqO%OWEQw-NPNNmy?zVrl z=9%$6JqP#k)b&iu0iklk=y&Z2r>-g9%>H*z<0=yqoAuS#oJPgFpf%IK({e)8uXHxYrzfQ$X#;d#vmukjB(URUHH zUFVTC7oEpLK#iBMwK`}iDaO1(cleX>@!Wj*uUqY7N9>$O^xvEW?Fwbe!sSG{Ml4SI z5Hda^p0~j^`z$ZYn^Z&mj|+E(eVh7!8e&$mHQOc@g-#E<59o>pz+b~me_9S_y2cj{ zSXooPx-<6I{OO|nm~y$H5x=O=?!0DPanI{XLhWM3+Fma5CT2a02N~?hB0qjr;c`iv z>xedcotg?Mtfb~9+(e$xvX1h4XSvk_jpE}(mE-)(q=}jE^Xos$-zd(dU(C0sYc}dp zK{Wnv8q@N^>zwmc2)*|G_)VwpB;7IWf^6M@`rpsA{!a)x>krPZq-B&Nu4Q6b4hdD3 zq*reI;^YaJu=SL_jDPF>Ztl{ohQDo5KWyVWUDW&%%lGI=rK~wb!|(6Mn=8_tNsXtS z*!1g^eY30J;z~!lothQt$#i<`&E?-ohL6v6D*dC@$M$`Y4U2RymVnabmD9-J2s_LR zUpAa_=LPRf^PDEFdxlg+OfukZQ6F*msw~;Iz-Uw{v~AB#ceP^4J-g>q$G>pD=ii72 zLmx{|&03nZS4C6ESv!jbtGBBiP(RM9xg(nrs%>^PW-F@qTwCOCPw-_!JB>ytgTi7A z+M_|c>{pNk^amO2v}-==UmFOPL(pTSoZfJxQSxSp88JVl-mao`96!w9=qWsY+7TVT z4c#iAYw^=B+w|%jkS^Sjm*P0Hy7bS~#RRb(#O*30+6Go*@7(yKSdQD5AFlqC>)nZV z?@LQZRS$hSaGZ)f-eEM=(w=+o{vEdn=VJPMmXpl35;c!iduhDW3i^(bN`6<*L)>xZ9?IS%q-76fYBuV-fklz{I z67h7W-?ErH9O_*T-0l6g1PlDvjXkEI*E=z4^{LXUmDo(Q|6S(^*n{lR>!v9Qi=LO! z0mltr_th7Ay|Pv4EL&tM$sgIN0*co2d!s=@8CUzA#u;E!Pe?i$cGqUE|_XahZ3NGk_x{cE;drsX+?LVEyBQGA` zRm8J=S#;Gfxl^CBzX4AyD$Vu}_X+i|aI^gQexqJ{C7-kTO4A#(Mj5=}F(+YcmSgO+ zDzm#q9Vw;arlDBK=9tHrNEhS`?N0B)ElA|9Ymk$})xs-#-PR2{dWWYE%k?9r`&Q)>vidzM z{1y(bY#h9`@e#{e9KR+y0m0BQ(AtV}X-eL8hn{los+f|wI z(xWRd!6;paW$u{F4RrreP3gb1N4bh@1K!Wn9us_X*5Wek;_U*8$_=l`IZtW*`LeT` zz>E|oKn(6GDgwoTB`CorD(|=mKQBrBv>aOY_VT`(;oI#MEkpa=T%X=Ad*qiRy|IVU zKs7ly`5&pZh4Cluf9GlJj8+;REuoqpAC7!qmsS0;816oz!_|jNb!(QX_O9IU`JTnv zj9E_^evKL3>T!M+wVDUnAataS`#HR#3xkXvy{KB-3AwFc!tE41C4Ot3g_2Iuag&!} zScf!-q|>!3>WGrxkEy04xuL$FrXoH1D=yY&=f+$cJ7bBr=zXBU4|U4871@+-KVcmr z(gpu%N6L0XYOXH%!sP33d0%>8B2d$#-%J)5h1Qvj!h364cjyM|apQk9|4iCQ!E3te z?VmdZ&hqE`a=6ZGl3jZJ!krWxK^ad;hEBPcEYb{XXkOg~`v zP&V9=Kc{%;MP}9<@Vj@#$*EuJ-K{$?dlI85R?+XW$h^lu%x;H8udYr?Ry*{Jb5|C+ zv&CdYEDD}0-Zsb;_04XqTK&yLW>Jx#mQB=%6f40exx}HK=$6RJNg7xV{aydP-mP=M z{3p6*lnusfEWVzhRqEPnl;)rd<0Tx`CYJQQmpHakM-Px)C+>CK$0JUi(_((@NA4}# zt4V*vTJXe_=iT3Pvi0gTbpw2r*N=OS1LLtA;0x57C0oaD3+5qZa*0n zZG|4CrYYg%iTjM!w{lwaGp3)@v*8#VZP^*QA55-Tc0ctWSWEbMmizAg**>)(uH+A? zR#6|?iY4CKO4dqYFR+^1jXMfl3?%z8ojJ33RCebHU3nE_c~#xj1;b|6yE~zuj?w1V z`}Je@@+3|T^PFr?w+wD9<9LvLp zEJfFKu8SEYX%}Ym<`gY`wsN~L9j*%Pvh7uR@NFG-3lXUrE*Smu!eQ7RU3DQ|XR4A- zHpX@D$NQ4Bc~50A8Y!h1$Tj)!tWz&}?+9mWu#E^Z{*x8fe}s%SsMPy;;T%*5CiBSk zOa2_gkCv4z`xHBIr4frrOwvlj{4@OvIlCGEM0_qci$CoQp@zJjcQLqnl!`xTKnHhC>RxO|60>e9+KFrVL zKCtebJDO?n=RsDny3{RaIPj+ud7K;8pQPi)QZMo_JexOK>j_kSK(WZgo1KqUy}iAD zY5C_HThUy82dw$5<@4GGaho^*;8VPM?3V|-0yAcN^R@ zB(iY#1HDC;J*PPYkX?jX{Zg^#A7$ivgWScO#XQTJyVaowU*5c<8?DQAYWk-lFglU1 z!)Wc)sVPgDZ!K&Rw9>Me`KRL8&Q!9&wK!!Co-IlFzMQHci5`i2OA;tADH}&#^>qSa>9M%I z)VFWWIWSnqe>uO!HcZ_1C5_axRSEA+BxV&htu>TK&1Gp5=9)e>S(2R87J2ufFolb& z4dd;@*QyG4RQIc%Q0_iYl8S!NY8|AQ$J49U)Q`~mV{iA>(pc8|x81NV@hQ#yeY@ZH znwW}r1=&WMgsaH#f!<-=fVteNJG{3a1tl@TcTL_m=b24cr!|axm^6zcW9u{H1J#4O zoE!8mb(PWB;d?F?xx<2%#$DTs3@36n_Duhpq@~GpEQ?mM?eTKJi7_bMu91|g9_VY~ z(R;H3ELQUsrUbd??jrj&S|hSQs;HvYU4)?Z7dodjKF3TK9<<&SqAHQ}jN4cPO*(RQ zDB1x$V+j_x7(=?WW$0(Ab6%m!%U7+Jw1ltVU=#3fAifoA?i4J`8129m@3L6=HVosr zUjKhwy$4W}(YNjmNR_TM=|x03g7n@66a+y)nhHn<={58wRZ2j56X`t!X`xE*Ef7jn zdW}E`kP!Itzvte2&i!UGlbK91$$qonz1Lprd46m0)v7OK^=+W}>e<-*WvnyqvgskfQVN>G{aAfLQCKB=~ z>|S`cAAuZy0=*0qzjBj_H3~U=I4rw|IdQ!}o`-?2#MT&qzrKptEbtd1i|*<7!okaf(dJB;Mx1n9}__xId;HFgeC=oF20d znN&xw$}+lk5MlMW-n^r-5?eYK51b1&?h%$dxqo|M*!PalwdV_=r}vptwzh$?tJUk+~D!I+?$*;G7k5~#U zh@Yg4g?(O+pXW+*YzM{Wl(fh3J2&Dk-FEMj%4f6Twq%&-0E zT4;NGu}CfK*Rv)N6^O3LU^}S-)2R*&zwW{QK23QtCD?+63(eGg$9R} zt;uC)WGvP2mh@+fKb@T?dxSJtd$*#gvoU0=z+FA z^zB=GRg+z{ubT%2u4V;cxy~AnD_&daV$$@Uz825Yy0g5jm3_b0u-QKH8xAI~m$Z1S z<7|mF-CBv>%lObaR;cqPD2tLFOeF_7oS5W-!tByLVHw6+?HZ16d%C(huk_r}L&L3? zd?un?Up+Wpn)?=@CM8lc35`{5B!+)(EtwTrU!g5PjD-w5-m_q6PIs9%;Zp8#U`izC zvEUo26|C`2 zB{#%;*V-7f_VpsVPmlfOu+^Z+-i6^>XEn3lrm6Bv*Y4&GO-3c@@{sEt?wlyd^;VFz zPM|?X+0P}vVh8!!?-RJ{aRN?9MzIVJtRIwya;x~ShCXM%m1`#!gb+Qt3qO$OM{H@E zgB9`DU7DbB4fW-=rC;~sE-3_pY&H3eX%4nn{Z4232WzCc70!$h+yHHle4cRGu1pV^ zq3pA*;2o*4pPiW%FssUI>O`YPf2jH4xFt*Dd!mRvv9kaJkvGZXYWDWo?nDJBBDV4tL>Fhh0dPaE==Net?~ zvQOpx;l3=T>PKDQMOTh4FCI#;GYj_ioZpj%0x<7Y(!wz_F_!21e`=lM1Q&WaUt3gV zyC?<1u>4;;Nd=yi7JIc)@vOw1TE4pf=jF+4nT~+@XzaporqW|59(V5?i}E*Rl_$m2 zqJ;IFY)^Rpl;DuOt?JXk0Zjo?FUm>40((uq;EC@%8H_8Ma%E>>t8rop%dhQ;T$T1i znqV~xPukO12r#{jt*TTQK7v7)w%{ub>*o;JeRS{CutWr_sN8}PQ2&>UX2({uOP2oF z=p&)^(~=M?d~fRnyU<>_n`XIH*)*Knu2obEH$+XIr6fl9BBz8oPJ zO@5Evs+Pn`FXU^3ucJcRrL1g$Fk`q(5S47;hlxi)OmolJ+dHuAV#Zn2k8fNVR_2d_ zZeyK6H-lYP5B`aN3=^XIhJGauY`MtIFwkz##&RPUU~&k$0&>d(|mfA^2{(hz2^h4T0wbYE1M zs!Ed3)~yrc#Y>$`%}b>}F*$fC8xj}u2EMRMGT&+>N5w)APTJ(NieUpqZQvVYemhl# z5g6xsgTlu@nLa2O;H&w4C@_q_Z<+IBoY*w_I3tVxk~r~y%#fjNV#((Tj+n(AO>~H` zJwjHI@%i*QhcHh@>U*opqq`)q0tT}ZHiv%kt|Rbst7gOMwqsxpC{52Av7ZPE6bRx- zJ3G2hx>s2=YM6`ly_hfNI=AO_lHOE2xiATWjm&0upP_!X4=n|l#wZ+f%Vv&(UWSE$ z#x^8ZvBEcQhoNg3hH1BWpJX{+VRE_UxP-07{525xt6krH_TT`lX&=@1;IewK6~JOD z3_Ek0CA?=!>-Bu^s?z$-O4O_doF-^vc#9oNojUaCG3Vzy1nxRGVX~4-k1u)hN+r|) zce`Ib%AfI<2=dYl4{jLc9qxIf{B_{yeSc-~TDjC}u+wxJ?qm^U-gmpKU|ecr=rMlK zi7bwXw73(Qi`I0tLISq60!7m}jy-lr2ET+>^vsJ#hrWY4w)C%63DsmGKvue)j`}ql zxmKFmltEPRZwIbHis!WUg<12AP1m5Ebjl)O{)HZ;qq_7@9OxVlVmMWvN?7*kCbNI1 zep;R9B(ANsn+;T?=rBWVito5)G7=?T$SU`i(DqeBv4x4-`=QidEvD9X8%(Hv+X@P_ z#^n(T5R!em)xzUv9$cT<2lfEnq0YHmOX?lSIqKLA-%xwYneK#Lu88V5na)u6a!ud& zJxlH!9&fy}#~LfHtb&|mtKKUN7mSCAzz_)TRSW{HwPou_2u#5$e~)jTcU?RUTsSG zAu;um%nmvyv&3&fyC|W5Ni#}nwzW3tdBdYkoz>>uxEjS$`OG*vpL&+dBgjnCo3|_= zD;JfyXT`q!lRlFr$UOz$Q6reG9q`$bh0NZ(Z-1Y}Ma)rP&gm0ij%7SQ1_KHMnH zE8cvu9D3Wdt-0Geojra9x@y7Xow?NFgB11ntU@|>iknCbR)1hs8eU}!U#@hrrfM*t zWIxy{q+fqpcJ6=qL^Qar_|k19jT{=oe?EQqK z9u=59X!k8l+k|sG)4;+;hpnSW%`H(8ocV-}0b`ZQWx-7i&rO>=exTTj**;tKdJfc_ znCHmm_&V<)&X8%dX_H-4`oby?f6MpDa8&b&A&jbLWshTa-ssURY9s)CSQJdQ#m{R|O-({eLb_n3*`0`S`=->)@QSrRO-%c6_b$mB$uJzS2^0 z({7o=zSVZcadrCH*7mo1#aOQX(M8cl?RHPSUHlvix|}eraWnDK$&uz$FcgM_sJSbT z&+s$8B0?~yXOxJqN+L)97wv-?!#ue0jdxIF=zcG=%B{{_3flZfvkPd%b1jto#)4u4e`^ z@SWt)qrsAt5(>!~-&jS-`oY5TY#ErJirDJM=MH*SxmC(YXiVui#3X zz|DayojIs+R^Xq99kgDfv95(jvVZe!oMg?m=agRjy@Q2VV5e?>oKNh;jgfv{#U18G ze9@36ZDDrL)OVfS>%WHXpeP<}s5-(rJ|Ifwnm2awhTjzmMzPzhNOI>H!3R{QzqR;; z2yf*35nl&`Irggc>}|k&1ubEa_P^hDwR&C@NpzQsi)E}P=(DskvTIVw%3Bglx8)VM zz>l6LZusvHZ9Mv;c-aL!Bhm|F^YT`zF$>C#lH~Ae`$CcS&!A&%-M1{;YxiK4O}r6e z3I@Pk3k@METPm3CTo)Br-$RyzRfmo!E|> zUp}s0(`qBbX zG{(tGNvHBPJ|Bton_8}%cgrBqT1FJ3nR`vlD@XImbE!Oa?4cKH{gr~v&7T^$GU zB!!|((^7+k8^1+^fD+Z;iWCE^(x$W@TJ7_lpBB4yzc|jTolrsw#w|}FaV%6RZ_Q&N681G+kBO~!v`kq=8mX~^u{8ADFHk%Sc@k@`S zXEn?CYm!5+CM4f{zq8-8;Cvj!6q6JP@|0efVd^(6;@^=d+x#Y&bIV8a_CB?;xlYKC zDVM_8N7ZW(zE{8x-S(sO&G)JEPvnNy4;;Fj6&FSxXSoN5h3XTkT0?RQjjdk1XJ^-S zDn|il;wzTaQeK)o-gC)2$gW%1Yb`RpBFH8B{5a?HlLH>+&zVn7_s2%}y&@T-|sN!>pJB>ak7w;{)w5B>+`ahuAtbB>PfnQNp zqQYiXry~w0uZ){FKbjy|0WtG~wK56pm-2+Z^J-RH7vN9k>Q(s}LhYF9_jeU%D3S zjqtF+sKEab3;jnUq^r8y%Z3Z=nP_~28Kc5Op1(qc5|AN2Ww=mJEK{q7rb?Q|oYVnl z4MYXDPz=qA@tL233LqRHnBfv!OWC;D=J)b+!>@CZAwdgMN$_>usySJIkRlI`U-}%@u`X!!zUCN8KpH4XVe6 zPS&D0C}oG2`Lt#2YoRvLlU9qjfNVAa7nfhrB$p@&ybsF?X`;KOwqm{@ z!G{Z;eOz$+z7N{u?xyZD);38l`>Z-kET-mpT0P!lPU^zy@YEw+xbn(2eGpc#YLEHu zW-_{73>xt6$5@v@3%SXXTF`u!-Zok=e1SxKqc4ED+)Hq-{eE_^8(-LKj+gv+&CJ-) zKl7ACc1k&o^{a=RRxOn9;ga9r>Bn{rOcyO*=RnXtWH)%PK$NzW(T<0w?mgk(7k499 z{#|)3fh`H!=$ETrzzW3r+((rBYAs?b5=$j@d7G3%M0T+GcX(&CT*_$MgA}=y9gO;n zGz~baZxX61^e8;J=P}V62C>JH`VSxJ?F8OuW9E2l8!ASH* zio@5;%gi5~_zp*&&2Gjg=KV{H-yy=OYxynLO_62gx3P!j11O8+;L$sx$4o|4;MLZ{CT%;cFK%hVK-JyZkyEE!i@*pQ@OnM%`KmOT!sG#61|{ddolJ@=a#7WZ&wj7 z(45ar{fRH->haSWQiH0Z_ek<+_OosN?plki8Qx>YlDrFTnk-g@*^U?~WiUEBi*_mB zFst?hcoTNn^Jn*Od5Bvdu`o!!0YuFZlRD$i2KT0b(BV`#HSHJ zwvemhbaIXrv0|>sy5AHFcZ-mW6x#sEpvsL8C5d%ge$P2i1XU2twOG*qxzpxxG6yLS zk@Y2KC}OAsH5=bH^G}P&J5IQ{u&*9p5~^<<7waKbZ@-rymewSeIGiuP6pTg#O%5iB z;LMx(amJ7Z2g8v%#aQ?Dqxkh{x~;&fc)neuZja?Aq)?QK$1>Y3cs<42J;Tr2t$O3< z&yWjNKpY}tktdk5zIAEQoi+?<(_5!3MU8j=Vx#d9FL-3;!&qk`LlD~bG74s9%ddAa zykt|8)S!Gna3cvr#qI?V`Wiv=gJY+ZQpUByz5>s?mY)N4t2LHttYHm$m6}ZOlHKxH zZg{Z69m-$op`5`KT0Y$N;?Fr(fp<9iuNp#v$-eg4MIDPs52v(fZF`>Y+Fxb93J^!* z)^A4%?fJPo?tL}%y&yNui26Uq)BvIYKh*|bpfLopDa)ZK2 zbg?e!hh)9U&<|IR+A;Px#d2dxag*cMr_H^w%&YNdj)P6HorYQe=D=g zcm|NZEuV(zd^pg)zIoMK*ZD|P;}cjR;YKm|I5NCxTJ?Ht%wl)y`(nLIvSF2ia41E4 zJUca#YxbJ8c;fPnR1dEC7YhMFwL@-mHGK%rm_vj5&bfo(dToz@GGqSqrDn7LzoT5i z_G`kHe8p_QX3nV=T%CM==`l|7!c@Bcwm(0AP4{m<%N|(eZ^#KZSoVJKx{kJZwkv#1JfD=HEtz)4lQikETFPZTUJeiEcoTOROAJ^3 z=(D8{@_sZ-E*x+9%O8`Yop`7(7p`a5E??Rzd4gQZP%aTqU4eW zoBa}MJ?$94Z$VzeKmW984hH4u^jDTadmluo$B8kZB6lNx+~`ZRCLceUojQEhXQEAgOpn9EQt) zN{y|!y^CAO_noLVS4+s8ZZdhzlYxiK-(y#dM?Zu&ErZEKcK_kaKIhU9CCk>V?!;(S zmKbUS^!y;y1Q8o06+SPIn{<8K4}#PXogx+;BzVpNwc%eiC%}@LoabU2jpPM~CmNf# zqFJC#*-TR8&=N2CWvVqs!eSy?oxeLjHSNa288afYeUxdtF{2IuiDFesq#NFt`7A6y zUr5X5luHD?=5UkQDtUixN)@!C`U@%MwisGC@P~7=_dWMNAByHwt$51Oq2XpA@;BXf z+X$mrY@e;*@v}<-k&=~EnLW8zZm)D-eh3WOapt2rQ8A1GCb%bV@jSq2&E{I%3Z*&p zNlMwJWIfQkh@6>N`LN3HO{psXn<^;K2BAjte*XfYc1RwEw^`<;ZpRN#8EB$kr^L2t z_{D=kM))7bD-Mo&*eazPeW0nO7uh}dw5@_*Ri_{-#!nIJcs-MC08ZzYarRJGuq)f= z)q9d+gpvx@D)RhLS74S-o;iaf1-{H%)ii8Dyk&L$#m4WJShZFPbD7RUE)NW=wkfw!$QirM~Us>C;6 z>V*ne(Wum?=@xtw1GR*mfxf75p?v(oG${UhYlV^U@P#CD}l^?yw*ES*K$P6k=htq zz9ps*r4-Lvp2oNB)891SO-k;?TODW#mI)0B@H8~Qx)s0!%U6> z%mX`8u7<=~6nVV_x71~B{q}!8$L0WK?imwl{cBOMXlIdCo->)nw;yeAWh+K}ZsBBQ zhY#1`1x)hkr6AH;{u0Dm2N-Wx1v^sk6*e@PJKeKcgiTjP02}5Zeq2S` zLv&1cynZz_Yu@0P(zms$q>OOxM85lVi*MJ{(!YF-I?CTYCHGOrn07P-k7w`c&32gW zWy=Lj@?MGS?52-CvUdyOML#|&PTIe)v!GfVzOseLI<~v&`kkyw z8n7J)V~k&K(AzbSuwM;L;j4}_;m3LRtDaaYC2ZN9yBt~p15bW>G}ws9qL)NW<=b6K z89@px1~K_9ubo0OTM_)9@+IG1W26fDFJhKo-ZyRhSmZXTs7}8um|ZA8Jn=^PaV1K^$E3ZVm-+*WM?TRITn!_xjIoRG$3>*8)VH|yNZnQi+pw=!%|XR)6MUxWE#=A$pU2I0Jb%$L z?J>R*A#Q!_M{os@NUbaOwVGxbFQBf!XkAcxy9m%`0WJ-Qhq48|5?z^&h4}ug467G; zE9@~|((G1OT(;rczb$$d(7Gz<)K={dt1KlkaqQg6POfsRvGkk%5Z1M6F*iKs*Yh&2 z7K8G)vhEJ&-V?LS16BEa5n0lNg zC1%G-{c7mJzZ6EdmLv~GE84Gsod>en_1gC6ty9Bu1_I_V(3PKYP&M%DY;4u1Sfr_NLE3SktZ|+cBLeHjHP!qzU@{#4~aABssb_igU z7vi~`&kecXvDa!3#I_S^qMZl3_~%ngo6517+x9`y_yN{Y$W0dfEvxKmGbqHg+2IYi z!gk(oQB-g4$dD&lp^e6dgs_U`e!(9syD6C6IoLd;?hT# zv?iBF62YIKZEmkdgEFke|B{lrG44xzrYgNGRP3bVhIK&=QQYdJAdg@U;=8}f_@2<8 z?#PGt{X%}=njw%+i40K6c2gyK-0_2yFsnP1_?#~<$*ZP6TnD_C&Q8Vebg;MF|=d{kOj3zZP*zzyA{d%*C-ys4#^+h5|So zc}dKh2U-dl#ilH(FaHW`{t}{_cAes5t=5q5HvKGC@gVcMqOl<(iiAN91CNL4r2=Od zmU{eX?PpJRe-&O3C}gNAi;l9!JOMTt=`PTgOxRqfaDXDrb6dei;Axfz5B2FJTcwmnT}A zeGM|q&8wB-w?tjvP8GL!DaVtYp*exwQqu(v z7&Uc~o(DSqv=cDiiO*T&GeWc*5lc`|8mGb5g{#`}zyj~hn2$kQ?M=z*1OHT?t7-wtUE8>nS@7(pRRK9hi=7`oBJ$p)`T^!5DsV~I9_*}(}*jzh5 z?uS*!?{l6Qb0tzP?I*PPN-z?Sk;|1o0v9K|akMQ1?n`|=3);GFU3fa5xfQT2%q`R^ z*T!OPDdn}YuCI09tgzL_PcXI&)e60?U}T`iad5{h2FFe59~$j+j#q-J9LE=N*`CJ0 zSxG4$;n$o1?+I_^fqzpvHK8EiuzJ5PCsAC!je{6+y?av3*8ZCk=>bzAYk^!W+i=}g z0L%jXShfx*@$h7uh!YWL^XG_bivEFVb5K^hJ1O17Ksx>d>{XbQ;+f?=PJc7M@x|?F zc|7BJ-|!ii$0Go^=>M=;-EiVO;cSs_v<<@sVSIE1G#@-XY6J+c2d@8scdgX!!$}Ub z9<8iK-6uGQOg0E!`NAfM zsG`<8b2$s_7vkO$;9Cdl^#gF%O0&+dSYu@a4McG=ljJJ>AV&}6w;;B*8rktcF(u9;KnF zwxKJ{UUN@;k}qrVG?qJy4t(EkoFOdTm~Ow8Qxi{TA@*2U;K6qSx2>teMpnL{a@Ng1 zROl9H5GWHkKg11 zwzlukJuW`xpu1+b)>`kXaQoq-^Dwk~uOUb6=T4!D$1Pb}zAsP5$A5OG`KO#;^B!Wu z3oV)&=6ijdqKM^DUEI)?3)5o8waLG-$5Af&4!(81((|=Q6X0I1_qkuckAizW&t?@Ud7^5NOM-9dfR^roRWJd6G^S4>ZMVS`o=dc=w z{IHf?q3sm`hg#2lz`of-qtIv_Sh=@3AWlC#QwIZtSqn&)$?R8vLK2KmjQV7Xm`D?~ zU?NQok!Pw$9?qA0GW>EWrP>nn4!{O===Ya|P8bGDv|G`98??YRO#d4ebTOvXwr^SV zG_@ZvUC~gbKXd9mCI9UUlNVOs-eDZ(Jiiy^P`?sXa50b@7c0yQ7_Fa|&AN4&~a4ashRB83$FWvfS*VpwOQqg6KW+;l0>+V?i zNB^1J;CcYE*6rg!z6)Q>WLblO*zl-0SvO`M{yOR1G-i1}EGMex$Md~EZU`!}%_$ zn=F$NMC~v{$-=zsTvotjD^UCXF?25xKZO z5lhE0(LLs3efDy?PXcydpet-&9)~)(l5xHa5$`+(-Q{%i_=HTK$G&joF&t$4Gb!Rx zd$$F~D$Qm2y0elsW5ar}C$JA5;z~l;=G~wt&NV$gg?QmcuZ^BelE$w|--)b)eRY}c zWQKVi_>P*qR^F;@*mvHCnmMb-qc70L~D|ed-J8r=Nv_I?{M&ejdf=BXr zl}lnf%E;7b$l;zWL=O#~+T9}y=AI|PLvIudlK^3zMW~!3j-}cI{n>9^7!*OE&ekl{G9P_?VPCLv#vD@j%>Y)iTP;6s==dG?=QOI%_|InS~Z@1Z?y5Fd0_xC^t3)!&f4R0%i75R>S+3nW7 zJ!@}0U+e!qvHzEcZit>-Uj_#>5TUk<`^5Hot2i3Ln~WFeWbvI_G`afHKjvN%`sPv;GI_V_nML4DeIhyUni)@^Zm?pS};v&b*oyF0_i;|o+T{9Rpbz<>g4Pn_3I1^q!F^zvhPFrX03i3gxKJ~(!x-LZaMAurC zHhOA&cF6^oGzu@g)57~2%79DvFn2$mfn2jO-6Y<970r`>zu(B#FW}MJkXkKEDl2Z@ zTjPL7fSyeo27mINr>%`nmr^on$t6FGmpbx$%<47C2!$(-oSJ;KeM73><(!T`=36)R z&OW5kc2%CdMD|v%EsW+uyJy?Ia|SG5KTF-K;v*rSv?s^<_$8KcD`@r)XM;0Yo9J)( z_;05=TRY8VL)w3Ny+!!qwx*38=t~SAZ%))Rf)-O1Ukvwk>dR22UHfFfKm@mct5$HF z1U}Ej%|v2nBR^S^9+3_d30TVJp-VortbjzU$DM_l8bEn5d@?GX{iI*KJ~#wpe~l}T z@hYV(Zr~0=;-TX>9UEk`LhMHVt~Q>me@eWNr@CHoDdbB9lt;mDrca%W(xOK*erf}1 z`mwLzb3DkU!E?%s-ro2V$N!kRp5~?A^H#6bZ5`Jz*EhGV{v0*bb{B#(RdZRS;o(OW zs~1+#E{@wQkb!b7qC#Zd)XyFJ^Kv5CN8!Se+Z&GYc*npQe8aD-8SE+;&``-CRkXoL zL=QXZ%pVEsvWiI&3?RV4!0x@9UIC3D222Y5>Fr$SJDGwNcFRVAS+idIj4R)P#c=8= zQ_OfPW%={@;)g1CQJ{3cKJ`1x*h(85Ij0-fd~s@Cq1jEJ=^G;TErA-R-q8Ct^VZw; zHBrGZ5x{Yc2wPqPB#U-rQ=yJ*{|ayKGGr*Bl>>cKrvwc&T{*IHn%S~9CL3cw{Vf!ObPhiOOYDCQL#}TKQf{1=Dve1YAJ_m#omjPX z>m9U_4?AyX4@KCFdCaoRYUod7^-I$?xGcQS&z;4Xce$3{(@@A8)B6Do1l7t2g`Q|s zG-55#GB3K^pbBC2z0JBn_^0V}hw43sZQ<@C1&KpPQRyf2STV-BwTpCF_e>p_Nj-*$ zZDh3NV;-daEl#&)&G^fHK5(-oN5o15oo79!$Z{XM^2+lb6}#R|!I43*K3&@7id#+h zT%fsHO#+;j)Xfy9Yhi7>??T^g^;DiQbt9#BCd|SnMD8ujI%u7i4)pr67OrWV1>|q} zz;LXTklXe1q#{h7AV7Y#X}59j`X_&ZIQGE?$J7&D-(S$z^>^TWJv(zdl4GR+tQ3i2 zo@jQm_XRVH6S%od%vOK&ugVBm)s;+fRD>4_4LpO721|jn9c4z9nUp?Kd8QjQ%K>0gP z^3>n{5@+*5m8oR)i)^O*rWNpj3O09tPs>Zk+_f3@{Jvi3q@o*{Y(xCD@~=Li3c~tc zA8566W7}XNEt=(0JWkU!46)09T~p!ez1=v?w^*e z1hR}$f13F;LuQq(cT_kq@BJi(a=u1JYHr$8wk#1jNE6XS??7XwP6Z#yb9DKYj)e$x z+JJ_#CYzdNEsUX;tJukq&tt&PjiwH4|%?Qz5j~^H_O)ufK3X-(E5rl_B zFeh1o+9SEbV+BX}75MSa3)zJc!ky5+HXrv=?-}@=6bY?}ZW@U4raBjArGV7(nWas85WZyTHk9@&OYR5U+ zGJk0Pw+;UPKH>eAnVQ7uhq9$=8D(^H`fBzP&yVnkWV_MtZ%GKKjO%t9}~ zh8lTKSW4ZNMg!69aGAQzxW257a0bPMh0iw$aK6}%> zAtf=5DK{iod(ox2Y%f3A8bF2X@ejh+9%?BoIypixeqqxGG@CIti)|_NV~Z?bZRg#= zT>y(6F1BD-K>5KhvN$m@Qb{pTNMYsyFN?s#itr`A!>{lBhcDuxZkNVSOL8_Vwjw=e zez9l#jqSj=t9E_L*i4zMuyxHR$-WOerx#9(VO{+48dq~-ef~ztNn9pKc_uD})z+Az z=DnHsV}&zXz2WeX_-(;mb!8HgIRW*}6FXToPU}v}L=TM41a|O_dYui*I5#`^qPb)( zXF{bdV=NQLsf+#9h7|_Euked!Ct%s?WxtMf;6eLF%tfCC_DZE^$#^%}p^o#`BTXjP z%~e8k)F-;fwgd)WsoXrQI0lM`I}^d5Z(d;;6A5FOna22PN~2kKZPqB~#k2lyC79nD zrGU5ZB`0;RHeHVrDT%XW%^z|NvjKdQJN<7ILgP#B@eU0Y)~Hg>g0ZjfLF`!B0cZMg z&-Y?yq#CP_ABtW4taS}isUAa0s`##0n>s$r7OgmgCg%9;#c$uMh{+fK0iLOtZoI+w z?lu%&C1$3SxjUK_Zgi_kZTLz^ayD+HKNa*%#IfzEO6EfbMfIh6*+;k`|1JL!J2ZpC z@{8TghTWGG%ik3R`akhcG^p?}{ulClKjm_z|ChImOqe052FqoDK){Vujp*sbTWpsR z89jFYk7uHR5uC4K0*n;#^@o#6y4R&bqzcYXLBJZzDzg8PyRqD-Ay}_*ANm^{N_>iT zW6aBQeo9dYBG^6-8m|h5H0G=PUb%(wE~L`=7#tXFrS?@u>P_lfii~WbqPROFL#-1I zClmfEppFx=Rvus3*-c zkeg*dsgQo^S~g;#{BY$u$h!0Obfh1Ek1{lCbfy8jXKMP0s8~*Sm-EH}81k`)X~2_L ziR$MRi@1maVfr~>e1&>&Y$`*~oZ8Wb$ZI!SgdFKDb?!9`oSO!o48)|5ZcAxT#X)N6 zCdV~nvHby1&!(Jq3eWOo8IF47ZQXP6ym2*KOZ@kMPhwNbBGNVmeLrU!cVh{z-^vJk z)9%htCp6qdf#Q72%YJv-&06DS?bNNNEK34r{j>Ki!x$MhFo}g2tfcu`Rwq^$4mCZE z!5JG};7KQoZ5P9)W&<~IcVsZ>o7bwVL?LyLMS6ov0zQ#5z)=swrQi0K;~i%>t%HRZ^ClIn$ANDMt79A~m-ewOSe8y%wjHhhVu?O1Q+x=&W`G3mOBEkP#qSz!ig*;{+129!=)g^%%8{rOoh<4xa_?5d|m}nf2oW7^=gQbQcGk4?*LTn!^>O^i`bg# z{cND58jril7K$L#b~c3F*+yztQGtTM>^j<;byH{>u#dR~7Zvg-pzimpPj{^P&W?}f zQ4lDO7!%Kuk8$!XamoM|-b-$BC9=660_mUj{38nc0$C!1D;hZY+0*`C5>w9I=(i6Nk-vIAJgglgGN(_=?lgMGN0$ykXRyDJ_Y5a- z4iE}^CY|z~4am?qW&!Q!bl1QvU3RRpGT)nG^$ouNCcZR{p8$tE@GDX(%|A}}DK>s6A@yOT5L_cUH1^j-x1 zEBMruHMH7~RBv0400M$KAq@TC{C5_U`mzsoFG#COJlS53Aiy`5?fU-QZw}C zX;?it@pqXvR7K4+MV}x!;z4P+Go|f5__FjO#-J1tH6D}lkQMC=ty5FxT4X*=DQ9k8 zvc|uB_veKIIqk<&kDKQ-!#HlO)?q^!b^rdRfS$5zuHszU zaq(&d4u!kt$GLOS$tS5mEGu3zyn8>=nNr;+D=4_OU07^(eprk!l6j^Z&vq-qneyHT zQu-xiKDEo1$EB}H4KITCUVP;}pDg6EUskw2%SR{tc%I^oXlB+}9D$9J;zZ0K&$zn;v*`FqE=dJ3`K7W3!=~Ml>Phh! zS&X-EhzkpFsK9u4i|I*si`8X)i}9xU*!YgGbzGHco`WrUTon#nJJV|UN>sA`45i)i z#Vyfd&i8b2$kpQ5;bP`joV9#j1##b$$u-y}O&4OmJ`h;o2pv!J92tF7{5Gz7k(OOK z`d@hkdX7a>A*+>$-#5%EFINqRJ>FM53u1fWnvVNla6IP4^sRH%Z+oUz=k!bBI0zQ* zJ5?qMw8Z@1jq6rz#QmMYG9Mn| z@Qnz1_*y(K%Gpt4`jYl7$N6U2^I4;>7gIv33kE?6F%R{Or@ zd8o3W`5E;%(l)Mov~DAOY-5~sGR)=mDzJ%vO|mp>bOy zqtynwdF^&ctNCQn;C?tX!H!bife?Rdgm4Qf#L0`pMTcjiWglCezlrw6T_7C(`>SmR zjbSu;0A9Uj+!ZR{A2n?(P(vKMhM=^nl%_SPCC5}v%D1Y27q0tugdobj(bP zASPAs-q2V-yH7vUeyWc%PvW&QGhc?)S>7GI8k}!oTu3#oD_LU>gmCgXa1P@tb_xma zZz(eI#Kdq=U`M#ahG$ki2l`=6>n-oC+p}o!U2BJR*k97h#e8%rwCTyo=u##6ab`TLOhw&XB~l?ykfzN#|P6gJ$X)*VOc zhLhr(T-;!QaRKX8iqe};&0T6%7U$ie>DOvqIk!qCr^QuEa+M~wDSOGP={IP9 zKKd;?Zy048&*wnHF8}Sk6-NOl+3d=skwO>lI)A3~@?my%r2F${mup9~!J#a)_~0JR zdihyolD9eyH+fJ%;Y>SP3YV$EvBtHo4kSqR+3tM~gzQ${msChQqTN?yn1zT<<7KmK zjXaKQ;S-T!*=0msBlMWok4b2z-L(R)G#988NuK$q6*XCs_S-d--YN1(KF>NL5-ube z61hjnWnAZJkL+0*K{SaBG<2}yX3p2LH#B&%gI3Sk(J^QOBF8!%9c0yJzI}z@}Npc^RobBo_AOl1m|ortik7l2b#W<0# z!wygJHV)^U0OD2v6bHxuJ$U|~fY37SzeEJ$syb&6dGQJN*j{sq4$i;7&H8Suh#w~&Yw`&dGr_}f!^wihuijAPr}O{|2EUg znsP%tKE`HT(2lMNxwriZ&2YSg6IACVEMe$Eb*$%f9wqoBNeP8dN=93&m_lskB{NK6E8h1s`?3NtVo^TykVy^M!{@qL-fWlO+h zUi2i#u*M@B3z3Hptx60b>1pYWR%gZ1qWcg7#6LrKeJlE}`JcOj!Of7V9ph+5!>84_ zBQ{fTk`N|yc$|`Q_(-VpeUFpH@}Xqxx;51r+uAm`5E6h1s+!-L-ei^+6Z#U~h>gr6 zQFfLUn6M1PBXGB!J?C{QmlSUZ&iLNjG~~O1?qL(f{5;|>yDY4ZRHJ?dr&2AWu z@P`2aD9$Dr$8)a5|F};fJpx^DUxH42U&^RlD!{Z+3O42;)7!zOXHS!rA#H*9j!Fo6 z%|hm<{a4|GdW!voWV4+Yae&@zlvm|pd3IO3#zQE5FWzNx#pH-WcIJyqcO+`$p}cNC zN0YqZ^1t=CdO$6GN@<7!2sYp~Ef#f+(c-DUr+3wC^wH$!*{kE_QRz7lO5VRT7&ar6 zKm>H(uul`GM)KrOD-NX3{ttu<`uIyyskF0G)=wT;RoE4!BNboZrD`l4l6*Y(Rsp7A zvL?JC<_o#N!BMwVg>5NDg>BftEspYFw)kFZC=P}9dq&J+UI3<}EMOv;ebhS)9WC>o z75qC?LdR%<39>Hj2VwFA0=Lj1VUA0Hn!-2 z2~CTNO(==TUQR+3u${ybDM3(tu>~cN07+eG8*0xpw~Wp3*J@ktUROB~bXc=XItzlu zLM(m>b@GOq-wqu34kuA2$tq^U+!x<>le%p|pUdY3_uT1A-;EtWXGzEc0Oqn^Lg24L zJK8lX&K)z2LWcBB!B!WNQt-gXq2}|IV+`Z>!aEw2p_=S}j)Pb%d`H!(k;RG7j|`Wm z)HK#|&U2CF^g`!(f)v-?cKjF7pt2fY2MTJ&sG29|KE*6iYg+0yQ}FQJD-Ai&_!soNr%k?f+MjqivgFh7)vU|5VP zi7pBFaAVE-!6l-^`xJ;h5Ym}`Vyif~_&w1!Q6O@+d#C5BsV#O_<%LV(^Q0p+A+~Eh z7JowE=k6sJ^wPP`{5y$K{aq8HDAs%Ryp~DyuxYYl$p~n2n~Vo zi7mRvkPT}@M+YJ+XLUx~s`yk3q$Dh%FLa{{_()&-N!TnAbCu_~ghGaQ&bD-Y(7~n! zVRb--md`~P^Za174{!&1`}1K?h1@ z;x&dv%6PIa`tmIo6xYkoi?*8Y_ki{g{;Y^X%;C0Dd7q0Tz}=Hz@y>n}Hb>7>!G;oF zpie~PysUw@dJ>a=1U_|!5SsJ6HYGCB7{3EDbn^8NnUt6SY)2~sbjnL$Kkfr}*4q|w z__xSwckd_C(P?Q`1KS7AF)^^W`$lX8CscEnyoi z*|K}v$mphoBQ{cQ2TCZ)lrsm1I$dFmGg~5Go9hT&#y{I37xPl}7UFvlh&|6Vkd!|V z!d7k$=@HJB1R$e&=a2|2@&wnS1Y4lLW)na#2c#+|Vf7J0GQ5X%`=gsX<2BN~qJm`B z*sMnS+F8_jwvKX)HEgp2w^G27V(Q+;7YG>JLR@3LIpzFpO!jNR5?rL>?z`_YQA)W> zeOiw!S?ym;@JTp8sjMnQDiU<-La0Jza(fDM(-7v`DtI-PmetUH$lq?0jYI*|HxrGi2RwaX|l=eHPN|Y}EFNUzlMi zxeno(Gvdbr*r!~*3owCuUhB!Bl6)2EyTg>Wd~Y3GA!5Ik1zws=*ix8#7goNv!|h9w z<$JQuF7}{S<`~|6sm-~0Dvh#<>j|y#@F9V{-P!yL@9`Vk1$6kWO_H(siX2si`9iLb zMhi+={fxhDzkM2~C5NmjHI>$-{4goZn-{-aOVGZsa@p%GT?{;2sh%4iAOf%ZSO~d5 zuCbT+{QW7JdG(W;6jiV7=UlBCWWl8MT`F-iV~EB z3gm(`&V$^Ov`Md-;}oebkPBVYjd$l(j{ox7Y_kZi z1#5=#h9|Hg!PAzUC=h67o9)uE)L#TsRD9wT5Lch*+o;7p5Ytyt$~T8NJdTf)7;bI6%kD0|7L?Eu~b+AQFu{##QN)9$}3PnEQCI?ch2a& z#v^?{EaNcwboj?j z<+JIW+*jQ}`4;tO0uy|fbVwV~u!5hU5hk3rQyNYTH_7f1(*MjS<9;ZQcv2KljNYJd zlEUy?{_ZO%IFng^nvAT899oP#uuWBB7-CD8alc6E=*jl3ylwS5+hqI8$-ZDL)vGmQ z6n#$)!IZMJpsoF%dI$3{uf?WF=6t{MRL@Uav*c{5Uw;t$A%iS$#PrU7$Ay)g5dz|{ zES7Gxf-6q5?BU2O>zpR$Px);c+h+8nUZxyo(m5mtb?fVkYuk!)O6%f{_dKQv3Fw_B^wQKj@kFTm* zk1($-?IM952296t_Zg3H3>C zIlc?W1Al#6La8E(MCR$<;K#drOCwCsFyKTup%9F~t7!~)n@G%yi;b)|ym>_4zB~%I zVHS($EUPWwdC!D)T&YYV-OV`PYD&KJ(`HIyOkqoqqmBC`&kZX8b%b3tvrRq{*ygQ-^+8p}5E27Yj#Q}L8@VE(sjw7C+DxQpY{!PZHQgUAF5~5KSaQML98rb z<&QX6aZq;j&w!dL7X2C+M?kjf0MPalfU_@)25navshUQNp>Qr`FSv_+T86`N%+cd7 zdim*ic!X|gc2s;dL}3^)V2YeKJFHH(EP)6; zj&5=K@83z596Bp@(eSy63+pC9t`K)uX6UK5u$=_mx(F)M*Ujs3c*rOBA+yEb3(prT z_(H-S5gb7+A!Vpl9ga#x5SS0_421nxeGry-CcPs&=*%8RX(;~aWeQyO{r=3O2Wlpceli# zYN4#I66S{V)_(Z&Q&yC;P()T!@x1tCq&$SpZSGo2?)iuMrh+^7E#a+eCu|z;ZF#Cv zYP}Zmn;M<+Kv1pkqW9I$ed0+n$6Y4wvcEpzqG&<}{S6JD&r{$J0o|$*VXks*T8dFw z@(MqPSEckPOQ#u8L>F$;?}?>N@TB<4p>UzwXe%rm>`#)_w{#(isNME;vaYGR!1jKn zn4+kf5lma7A8#W*FZKC9;oMvMd^yWnwS>4C{N>rq!(=MMPfNvX{=NXO-du$iU&58R zqmj&fg7cDeW;b>u_`|4>WpN5z@Y-Hx=^=Ba;g~C%xN1i$(~qdBU$9Q|(48bq*x!x2 zlugLorQU#Fisd{HisWha$ktQH-RoudQytH&5bnF%df%ey7-gLsS;=)zJ%&~XcPRax zH{mo+#Zqy$;)ZhWJ@n$oLFf86t!~nUTH)v)+{7F*AP=&xgk%4V^C+eJTfT+RvC>^e z+-Q9M1V|y2-Q3YR%c@eJyO3WB|GjJw7PopH6;aZB7nShU9SjfF%> z7|W>$H;XU$dSZEIuT}8YYmbxE^ExxNN} z5&d+Hf3f57hsd8KeSE~Lgm>Hg&w9nqT||f4U8IivmXj7Oe&I+xSB$|&<@tRX#eve< z9pQA=tf`TpJt@D?o*G}*E_pv7*jJ$AbHz~(% zo_q{;_bjVB_=t!C$Ik~Zo)tH>^_Iua)AvvDeb3o0>ST5^w$W;&h~i8BAd3@FhoV<5 z!A0TA;So|ntz9p9@kW8F)UwHA=Zi8QH9dHpRS~*84~YBbeOXHH{ob#)bAB)}QEk&n%LpLT|Fjl-DC6Oo2;J81+G*33HXbVasfI8R- zM)J-F5$4!}x%=YeLADf)UQ^3vI>Q0ij>>ZATt`vufnX~d5Wj;-&)JDaeS%Q6$`^Az zxL|hSfay(9wFPrn(BR6cS|2xQ|K1Dxd-w>lYZwmG# zH(wLaS$3rr;>6w4)dMHkZ4iPmV@^Wp3``HhBP_)#n%im z$4Uu>m>LMOz)Ww>O~Lt1lyaWaigJtFQ(1t?i8{dnj)DUfzeR;_m0#40SX{YOd~?H1 z*M^6P8N$s~cy`vFIY+6m+{CasH7=Zb>NC9E;Ng9rb@QsloeVizB(nefTSQ-raoPDV zpJE68P1$dOUOlEa6yc}|tqD7>0q1MK=jH!}74r{);^VA1lMCDu2gr^(2?;{RUW=2Z z=!m109o=(%B_Qy5D4o~%;r`Boha8TJjmGZhDiWmK9Q$-B`n8f0JtYAnGTuaxS-HV; z^>dL^M9+iKXP%)b<1vZVl3p(aA>Z*0Vjoa`GBK}7UXwC z>4l7@+l-3r6Emm?Gc``$;f-VU|5g8xc>l8+5_NFT4Q5|cUH0R$t7wO&L!@aoVTXk~ zN-Ql``PZ~F4R899!*o26-{E20|MHV^xP2ES6d%KZi+2-0q&7y%Nqy`nkDZ2abu=_& z8ZX!JwFpA6baw(GT=RhpKH!k)>gC)o#w0_vbYj1(E93bRnkKT~FE>YihGbNf_nCfX zn5oh;#wy86m*PO;`RcUjAw^3RmarAYdld_kyRPi^X**xJ98mvdy^)dSYyylTiCdW_LZME2=vmo`3qDuP%RJIDjQ4paWD;oo!7 z7S?n3e#PHrh^mfNF)?mS^Tj}9#oN}tijH#Mf`Gpe6I5S?4cD8V`uwJcy3YHsYR1Pd z7c@4VSFbAl?9v4IFW$866f9rvZw_KVk7}KDU3%pza&_qpjL$|Jw)cpB-ryIv+y>8< zx70ORT%Pv6n_h=OH{g>dRPLFnP)4r@{dh!8j?O0*d&c)DrE1m%#GYf^byTFmz58CO zFS7LE?!Z_#uWZI(TZs}00;(Png8scEk;6{yR@pnYbVG6FL4)mR>;}=&`c-riCi=(x zLk_H$0=+|DOH6qfPLO|_WL%0s4Csfoy?a#<)bq|7ma8rqPH_E?#M3H&Kb^%Ind7)H ze$e&fx?D59rm9iDcDM~$+}dxVCxvayJg`vKbQNkzr>HNfx9YkIFj@yR#N45CwcX4V zi=MM8ioC7cn(cj2SNPcn(!4(ByC_&2c`Xh)c2`wOCgs4|ocFRq4Fmv)kGglGrJ0?4 zIK|kq9ND5(wPc$|!4{60)(4b6-k5B=E4{@<-u$}|%3Ex8X&c2jNlDg*3PK*blQaQSb5HHN7FKWK( zbh>Sp*DrE_a+7nES;1@iGX$=tI#3^GXF zDYs~-qe~8|IEgktvxMDxC|H~Y`SZy*9HRv0SL{Fp-&mgt8tbdKGfB8ZO*0KvlQtm` z!2J2n9cgjnn~AYaBZ;gbf`~{BJ+xr2q+dVo7>Dn~uMq!~i{;JBp{J`SpIQ8EY!cOt z?X25}5Mf*JN(}lppDvKLVyk_;wo!S(jM4g9ykkwM6F(FVmrlN+@bf@Dp-bm<4`gz* z54(KfJ&A(R>R&>K0|XTs0yt&v-kf0nKw`*g`HX&O)oN};H(k)jfKe?8kOe4KhBQku z7Jzl7RYInIK5WS8Ye4l?%Ilk(UK9VQRQrt>hKyerEg0`b9FCsICFDx!RP57Nf=t*u zYWdHQCz+MitF!jTFWK~@MgLWo}JHpIBUYU5?! zFrs?ur$}Mjil*r1dn!>i_|t#IC+A+xQhCh25RRpFj}c5I%VrN&moIx!Vkl}Xq*C_C zo;nr%Ac6VA#Yz75^j*qAV$RR*;n{>#D6X380DbL;>1VT&9UPQ`4{%eYv^dhJzF=- zE%6x8GG5_h5zAYSCg_+MC%z*n)cw)E6Fke3EtY4`<)J*gIsx^MB*{dy0BV$MM7r>R z`qx;k@aJL|zx(5!bH;_?fUX8**c0S@u#F<80K?{O8iXz!MW+H!NHHH_sR%6~Qh7Yb zP&b;@Oxo;VG9M8)WEg(gn=#Jl!qOMc{p2>zQCVk`sCS_tfG`QABSn630d%v1pRiQF;2rKt3ljNxx(JOdFHtpB7xmcc-B} z5!$lf4kTu}(408`N=H{9?FLKMeE3T(h5w64UO_jL-LNMO{U~iH^)AVH0(kFaXR6Bt zU^q@SmPUb2-D*TMqe$B4r<269o776hTS$H8cJD9)mrg#ITVZ{ll@O|yhPmJ*Yd9aK zP@_Czuf4CmO4G#^3D+IWqoEjyW;`1Vd1p=FU#8$3g_KB*Ne~%QYa*+B)#s&lCUn_= zO@RzP2!0P7KC|B*3VO+IU8JwR^UzscmvF@M$6LRm%+N#sE$L0w`D2Wh~RIN33_h7EsOk26*@2vlkKRMYW76Hnf9VY zfy?3+8PUEjjjw%U!suP;9>sGI^hI=!as8B0v-p^B)T+%qBJ- zMvR^$IO=3Kf~=08QgOb5z{qnLzcY5Znu7?@arCI1{US4Sozz<)f9>KFqX!!vTG^FS zy_tCS!O`LRi`G(fi)x6XkVh5?MMbiS7Gi7y=Vm4)NS!1WLt&XjuF~Wj(HzpNCp7x` zPM0^DJUnbpOM>P^n>V|X>58?HoNDKN^n`=>%GJIopHuw5BxV2CQ+yQp>Dp~M3tT4Y zUp@0aug+#=sjsZ`7rkE>?b#H4@VJHf-Bo;CX%9f}UiAoM?w3tMc+qMwKfD6l9`J}4 zDv(eQbT`8fG?&Z6)KCxL!f}t*bL@~q5`!g(Z8Q`}q6YdS(z-Ph{|MURJz*b)9)4-c zCyr@l+9@|UYb7-O=Doc<-k+Z_&0h6rm1&ak18y)_sT=`UN_701D@;)QH5PSWF2}5D z&)3%E^WLlR(Z=_QQax(fNO%Xk9^?Q}iKvV`V-mh=Di&_a$n~NReZBJx4~rk$?X8@9 zEO{99aV4H}|Ej#R+p_L&gcbQ3qu$^VnLep!rTI&oG5q9?hUnIkhov(Q*C<2cRPWa@DiVKb!&*z4J*z#wKV0Yrm)^R^bnv z2@{JE-Lo!ykiyOgl@ey+hZJqceIzg&$K|iZIvU(S6W%#00c$`cS!#k|&B*p+T9&7X zGB5NPQ6r(m(w{I4Hxw~2wp|%@&wpzpf8dfzYwl{?9G`Oj(AVEHq*Jpuw|n4zCNr6R zoi)Yd>6jz`z0?d&EM7_(%-uOxalV?;eReFh)ziFlE|2jbLJgH+j7#`BF-$PAt9ypz ze6jq#)BXWyQ8+-o&G8Oz-g?oW-(GY?CKhF0XRsYbIXb1?r8-0%cJ7^3SVGSJlxqFf zG$Qm*UDFeB@9*?wz~}t~VfaDzrCLL+bHaw9^SD}esmd(9tZIlD1S^hj(`|~*K$7_; z?(6)f<0ZRme4B#9CBN$pE!!0Xigg$uZ40@^a?H?vm~=GCuXfdKG@0)#{n6ECj|FIy z*h(|$*ZxM&{)iN;^H-oHp5(_G%{I<_!Rz?nIq?7WcK>H*^>5}T>*L~1IDiMfz{(E0 z+4oyKKWQfd_h;T%uvHCk>4F*^A=kKeQjZf*BGSnVBI;N=mw)O=-O1^f;#TJtFkOZM%oHMc>_A=vyRf5O8gMBx#;a zqU!b~&U#XtT;otto=MgxX8eM#G;u$U&NVgi2R7&$H(}tYpGP_{BQD!rge5Lt4_3G&5PS&nZ^PfiXUjFoI@$x%BE9?4=Z1@?Uvfo?vfh8{3 zT5tSFJ^EucK6fG`i8zfSe{G$=ibYdA{azl&(I0FLr*bJouGQEey&6lE=+`x_fUl|2 zaI>C^uW}YIA*EB!gO3H*Vq*0^9)p_;t3hEVX}#pP%M0W&{RRbx!H7yY{vCxd=%XZA ziJH%aZn^c9U6u;Nu4_}y2s7-exx6~ObQ<3h~o6Gxc2PGtD;Tw%fZeWec1 z{i}$L>{Fg9-0bu+8Kxz6 z(@Dm=pv)1@KLkkjF7hnMMXol_29X_XyEK2SH^wT27{ z*}LO3CpV{NEf%dr%jZ+$PjDu`P`CKkuMBbRTON1RCDqr~!p&B8!zkoO#m8{1{SN&V zE<9K_1Bxnio3y{@#i54jJ`QCIhCr1mq&>Io_t!Q8`<}V|N?Um0ba@Y(n$d=RrRxLS z6x)7z}QulT*o;nVhJ($J-wq3MAm>y49ER4tV89F zG2;7P{pDVc^DuT`fcOb-?cd3L%}RW%Z__AmopAD6R%m3<;=Q1*HQXM`gy1e_0n5I= ziEl=mI|J36``sG!ebZ5=jB!Z^{AQKcw%ND}9Np3;v`sF>J0f%yHI_Zoh7A3dq(!_9Kc zi1!BP+d)WnY?cA`PBPV9f{j5T?gKCvqlBEH+D4Qr)c zY2oY-tPIq7JF1sTQ=+y`p;*$!i|#E$bDc zH)ixLzIbe9K9Hi%uK`lNZYOo<9UPfx&F_->WB_e?cP9H91%&wZAJ z71KhfgCWpKn{zj?U(U0gX`JVA$Ec+F26(Vg&PbH`I zo2Y~9E}6D$`G&$@)iZ{fX7g#Odb(g51=t-_ht1fPEKmfCxxuEcc!F}wN4y(XYbq!@ z_|Mehg6!^tY0(d^1w*neDk4Dcn%>hjy@r^O%pwVVTsD69c)cyfZ*~e3_X59* z`dKEop1z%^x#=O1@j=(`Ovbw@i->pOR{RYuWbXAu4Sj3eY#L|C zkkh5fWRLpKYn##-0ALzZ4~>R;-D{_0Q}j~$8`KOlb&;T_jk?G92We)BaL?b2%PG$+ zK1*IYrQOnfNY?w2ix{GxG>$HH9O@8bI89euYqEcNYUMDgGSCCaPN_-+_Rh8rjD^nS zdvzH;K4=m(_E(0*!75Y05StmBt4!PU{(Xq?ErtlaSb}XCPR4rn)zS;bWsCXRtp)I< z!Q8}@2O%_rGb*sad)u9O5|W?{7ClLe<1VuCf^@TdV%r49IJ(N?(dYaFO8ECs?-xax zPOwi_Y`m+gYJuGxq^s^B=R+e$NlF&K{)@r;|8!g4h5 z_VDLT$Y%Qf;bDXTs`UHv5u}^2T5!IIA|t|HA|h#nT>)SsQqPpiHv2NKu{PoE{fXIM zA_cQ37PLdFp%N<~Q^69KG!j{qnL`H2PhMXMk4_ReSo< zDZ}mX!7bKw`Q8&|-@V$I!+sUt>?y%n@9JGyezMfT3>_S^S)#BnNI#x$5U_6&4e zc5TBpjHS7N>p|Fsl-dd(;)RgPTpf8-Pn#Qr@I4Njt~{hSSKHO!bu2&iSxunbd^m*8 zR{zVxTsZO%DCNc%mvXP_qo$_yMxl|0kIv-&4s=tw_2a=L;=bNVNd`j!y&WUN$*Kg%%07i~8xB+D?-Gy1?(Y-fh3PB#kb> zl<8zkoe_mtISj@*6e6B6&>K)RSrocQVeJ+QL}lI; zx#Qx)i#AJoB{j>#`=!6`i*;GPxPaDS+K=x!qHRkUv-vYInQDXm?-YpgvcyXYJYoq6TGvRsFjJd zz6xdyes9%8el8WQ70zgd*9SqL>SvDE+x+3~!SG6BPz*Pk)5 z2$$-|7GYm5T>a4hO8F!k^^IAwG@qE`+S0Dm!_$UqIKEnLO?hG!Y9ddZV0yz}T>TLB z>6V>vv84jRDjB#m#jivkQv-O|WQXYW!2)UK3n=Mk8KO9sO=$oX!q=#}BK31m5z zRfu;PgBC7zON%w!&j5NmQ&Eur#4>$Krh+9SX&%gmh-S{!r5x>FW`h6ojrq@&#zx_I zgt2=d7~owReaaC<*lzF=!q=MXHvIf#eQ3<@4j(1DIdS2{hkZBXMPxc~dm)M(z+AdS zlWC4w5>WSA?Z01}`%g?A$&6PUF$4+fC=O8px8s-W>=9o|J_iWp&7a=pzZw;KQTq3P z^?ODhZb!^#?pLtyA(Z!rp|8N;NkmIH#(}O~9``9?j>^Fsx{n8E%fW0dS=`q2-3>>! z{KuKr2Vy{N|95ssZ}bvt@-6CV)-+>3s&~Mw&z>3vhQK2WJokR^jzL)A%~&vZo<^Ut zy8+@fK}#2QL$Lf6SB)%F!#1E)2l+X-^&dvjP=ik1_^?`Y*-E=)HHZ#F5x@rZ& zy;x2(SG^~b7mfVE7D(dgzCd*HC^3m4KdDjL@>=~u*Rb}@hx*nOwPLVytY>lGED+~0 ztMlUnA*?O*mp#l5xl!8bHmmc$RPBb6yX4 z+yetea_^Q#i9ILS4pI@G(4@SvUgW(Ny8yT^LnP!nVVU(gan4<%n?&tr{3I}E0kE{d zjk><|8OzIA(?&DTcn&V4Tsgc*AS*7`#;38;fg(WU<1=-QYHA`)**R`DR#;K|`CPY% ztJEf7C*i!)2Z$8c+Tx;buK%7FG{KO9L$hivp7{4MvT*F1*Ue{B70jf(B}A<#D9-o- zmu8mNWwGa~x$XG*Y%bs=*#Sk&{9*4=i$b-3&V~dZo7bz~X8j3{W8~YiL9sUq|EgRD z666dCT(h`$AcN_+Ag!*BI7M97Gk(@N4HZ z!nG~M2TP94?Ok~;)%FBiv5gFa{jgZ@Mz!*6H9a}9;dV}!-pF1Hv0*l9$N$3t|Cb;6 z=u0iTmvjDf`oeVDEx^&?wD}NBc7_360&PG3QryHYoUgOb{@$m-gW-Lipy_#Xqw2SIQ!ILhgCnb2E0DLRk=t~@kP{d2CGhYsP;Wrz z0Z+eDPmu&+Yrh#OlXTyogm@$gJGwdWI)7Pu@2VFLSByJ-YhS>Be~MRd8<{aW=f1B6 zMc-FD#U3-)YicO0ZqKP3{w)f&NXbCkwGhs9E>h@N!#O74P1zTK=le_m*)EiX!YQTgfwZ{gSg3nV2zvSSAA<3{Yx}bshG4;zCOFz^hyqG{_d}>H;HHzCOR7#yS z8+dqlawYRaO6GfW3_k<8JiNa*)ki8yvU&Vb?Pw)I9oT-;uh)E1hKx;pfu{&9!N{Kz zKe;dkAYK+-$j7;R3g@w!ab!l;m)J}1jXHtHX}ZKj=122@=9w45Sa~Taip8BMI!RYF zo#%5RIC^_tM9v+dDc2+QJHCMNCBC~!zx8Y4e>@sE;;fDD5!!-tMX&0o&vJ^KKJATB z-9HIbM~kJa6j2(k^bBgt`qY;c{Og&Nup*BMEHbIm=*A^!zECxY_KJUA#sA^0zdF6^ zH@T4Tc1t68E8}98vI5Sr!j?y}p0u+2rdoLiyVC7tL3?0pjWD0mWuZYy#fBv9|SMuGDumzM(-_a*j_{6Kjv=>-1d>wMQz~ ze}6y{(@hlX3;5&Wz(Oi(smEzk#M~k70;Il!ek&=X?uRACqCWb`?sDAPz(Pb8h`Nmr zM;>UU%mX;4ZzhH%SKji%j9jsXcL-U>&Bt*VgT;WLJ`bQh4Bu&w%v{Al3m6x50U?g%2KTwmM$s z3k5uc2VLaR*41)dZf+h-z|RC^>nqRJ;3S@5<4$V8`8daP%e^9J&Q;v*AQ%uuhp(?t znCTs~e`!iqu>_es3HASRL+WaHq$C;`V1G+2VWp`lFC zK<0e02w%k)+S8?fk{n*A7j({gF#I6}-`anZl81c`mY+#=KF6ye@aJn!A$8F-HQ)Vb z`6jB>x8T9@S@Po}t3PBh6#nRuQ-AO^z&ZMUh7!|igNygln;Y=kQd!D4Iv+g<;L|QY zGmxBYQ~6Bio|PVtMA~*jTXkYZzyG(4&wr_W|9xH%8G*{r4g0BJl=<{i4v}`iK?iXp zGlt`Ml0#qGn=C%QAirzB6kdZF!I!*Ws9mgfS);lt&!f`Or@n_gpw3 zfTcnQg!-y@tBNZ5_m@5(N=#^%+j^QdYc_bJ{y^sPm=gqK)Tp&O zEt2BcE(KYPz=@%Rz3ClozEHIAz8H|<=Wifc6h_9WSPltjdJqgu=yhJ^Jpj5;)J1DM zU?Y%sKPrr(FNiXxCnY2t;l2k{U24G<;0N%z2p?%%5G_rSe3-(p z;gh}nU60HZDWR5fDi3)y(|aB2Z2A4C%3yJjcx;Po%8Mr+9Gkx%Ea_;Z6mQ{==vALU zH0H|W6Nf0RlJGQ)Dn7cbPnr3<8B} zJG<#dfYF4Q)2L#3(}jz_x`p=xI#5V2+yS?iz=>Yq_=HChEE$p|P8fhyqk@k$_QQ~T zkJPtM>s^#mKCl-AM6>Ks#rqLbss*&QgLIu|&_9cf%M)P;2$HH@_<;G^FRgMM|GH?r z^~*IF8Vx(4MK3>^`dn^4EH!I#xBF2+wqqll>^d>}D1xC`S{+#LDM#$2g<2?9tAflj z6GKvZ1A|{m-rO^o3eRaRx__7rm()}knHNsQ9Ia;z zz`h%Z@h}?Xc5|7CM_JUuMd4n%4$0NUjgK(e0SSqLL2-r~Jh#9Teu1c!!B^FkJ%6B^ zAeX{ke~)j>CKnLrLC0^~J`OMChHB+b1Jh|XWCV&KgUX~M`X6s>eXqX>E8x>JX?;1B z##H&4aKno4cqTJ=SN4F}5tpLscW;<8fdm25530FJ(qh6bZt-SU!O3>jcc z^0R2R?1Kd&G@^xznY>Wg6p5z$^0F+fy3M$p^PM4`J+mBG+XwI8SQ(&@Brg1rZPT68y&A!dZz9TC#QPCX#(LN_Nmf?jFBFKb8KHriauKbn|l zyvt|R$UkaNxvk;QS&(-s5UBuUuly1rI^oZTul10DiL>@X2|sy|8un@i8%WT?Ymh$2r+lhi%1)YhJa> z{N0%bytx0clD#B2+-JQ9(tA_p99q5BJV4%p1f*Fm=d{{w_)!Ob9nBV7#!I15b$TCC zYlrXWUmgAx9h`8x`~}P;N7E-2Fr&q4WmrgUuZ*K%Rt9`2rOb1lQJ{VGsrgMG%jps8 zYBVkasGFSlSf(`*({6^dn1(J|D;?Xy?vCEf``;52WBz;j{-3rtgm{jQJ-FtG?@RG7 z+{{`J_J|@WP5#-lx!itv+IlJePW3RA5z_Z8asYI@V89gPK1Z2O6Jfh1ue%BSOuAPn z?|Mau!d`)N#V5BOXD>PyEHUo;?enT1T)>s|8I-V3f2~zSBj+ZtjaOjV0bkgobMYxW zo=LVn_(?d=eKm#4WHLWpzGYAHz{YOFAoC@zEuv&&kJNKyJez&b=7B5iu!fShe5;$) zMwlhB_o)aVug#N-ERPzF<>xyp!ok;Xu;0C4CfIy+8u~!-9f)4)2bh_1j#?_Vq?ANv z(E-I=*#vD?MHkh4Hqc!>X&_6-2Hl1F>N}obd9knP;dGn7H~;VCpN-Gvck;2{^}0PV zp_(JtefTY2QcSF3zpE%qwRB9Yx{1Rc@-I&|@@a-cSjSI{!zg?2u1lYjAzsP13nooh z8|hKY7mIF`1cZcRR{Vj@`cMD#e;oAvHqF#MuFIY)ON~fyaQzCXdl4XW^?nercL)Yu z;U~>6l7LP?Ef5jgm4LFL4Bbg%X{e|Ed-=j1=IHqW&VImsJDYxx>Nb&HR{`Jr=N1G= z4ezBW4KV>Ge4pdS?B7QFDsb#I&X=|*ufXm+9!tQov2Vb~R^>vjIxWPeU2s9LVi)?n z@~5Ox1-}H>b`Am8(lQGBU&g7r9vU zV}(bTGM<_}&D=-fofw~i7n^c(&>&{%IV!X8*}phO{`}csD*+KF=kg3K zlX+hzBV5LY3#sz*yzSG<)E|R0LaFT;L9aGzhf@D}q$i zP(0sE`5JFI?2d-`KU(K(bCs|KeaDSC!dNgpXfc~OwhX2Q;}F^P)e783xgI$JhI&qq z0N!sZLQO-~Z~k_Zur$LqN3U-W(&ee~ie&~~O9Up?|IzM^m_0E(DidA(P_iD?RCT%m zaxa)4TKEhVHG9YR4rH4Ri`rgSfBNM1AY?VN5YyAOECy6YRK~^AOnENzpwq(wz&9+Q z;^CWCg;gh?RZ|%^>)g-Ybj>m_5#XY@5EJ zPn!_-oq@7+3U{v=E{?WpdYp7AsKdB|iqHCL+fs*1JOhT) z(eA+Q1r=-dMJ8A}ny~PFh4hr=K}jPUNX+IOoAnTD{)oL6<>hZD3;RGsz{nZw z+w+?q$~@kC#0>;r%qLH6TS)e*8Ak#4l`vVSalI?7?!y~RhOF6Lxisn9SD{gWj^vS5 zwP+_RZy%g@*BKl;=E z5eQ@SNXuvjWn_TiIg*63ab_`eULNKN&oAHtM`ZGnjdOAbnA}hcDI~CJIw=${T`Y?G z--a$e{e3;tI1t|Uh>Sl6W@PpE#^%53hj0%6q7UwoT;4D7PC}G`0LjmLy)F2BrrFW2 z(F*+`7FT&6)zNA6zCKTgjkE#E$I$3yA3~-dwNWpE zS>@iN#h}-QpIKrr+3ZQHT0(jO);I#Zt(Xw8`UYem$(q#ju5qJI$)IhqqmR>U8XB+` zYNGdxLkihvbZQyGSCV*xXdXz_X)JH_dB2?W1l4*Ra{mtR=~JXjH%7&H;}SrPDihPC z)oU6O#!}AhUb!rEAfF`mo*F7d>7 zw5t2*N6ZS>;7L1I!uy`N4L;0VVV8a3oKZ4XrFDhsPrjCsb^hMW*(^d7u-3~z@1#YE zZcDQk7;y^w)#SSy*nAr^V;h>G9lCPK9Lz`)6iBBni}-{2{+r?wD-(G1gS#h>N`bwGP-+21 z)iWgMBnv1^z3np(7`2-eyYF&ZgJ~^S)|M}ci_H`n*{f{j<~G`2f~#pPzWcKn5E!vU z0+CNQm^U0{F5C^fxaBo%niaqQ)GcTjW-)^q=DL{UQdo`1rJZV%S7j;1E)oJlY0}Tg z^4^rGO@tPNHP}FcGaAdL5$?t%`Jej+12Y^5D z6d11ohRRYUzN0%J_1)T+3T%}T{-Co=?z@kn61zD&_>mh2bI!79uLC$>lKbNF`@81BEZJR1>HV{dP&!k(QnmDvnU-DG*D<#X`ahw(_ARhOeN8zNprC^P#QI9 zq*bjnTAJr`JwTZ$zOVbbuKQW9vh%S6;q2(?-8e0wUaIu& zzS|Ek#~vo^~$dK^jo&4%svNsj`nQZB0>_XrfP)tatg+ok;d zhuz}%69vtUR&x|(nI}btFJpLd<-^@=oJT+F?ETpzZS`gQbq11XXC9T6AFwBlGC!&D zzRFU3g2Qco&A%|2_CvP4eP`d1U(~}_@#~cQXe{Hg0{UPv=p)1Bc(A{x@b!7y;(JG zf8nEm`>7-8KRv6h`&r}+&rj}2{#BwD@}qfCA}QxyWiJ8Ler_|nqN+fJ@$r8qv|kbRTNfta5e&gKu4?U{nj{kllm5m$*D zc|wI0ix|trV4P>txGQY?j`Eqdy!M>_)dvbbUlh^J;v#V_UT)O2KGGE@;jJ;+w}r3q zVfvz7vBS!WLb~Htma(SU*|nm(fV8A zr=uskZa>EP2Q3+fj%NEp&(1vxc!aN+Y_x7(D!JdQB+G4;iP06up|O=)o{!Dgga0Zr z*5P|AR{mqS-wLIN)c7=qrT`%Uwziws**Oo-&SX7w9MKCA->;O=l9kV4)!>A)QKR&~ zs28mfb}v`bK>q@*D&SW?)q5gaet_~A@pHM9Gg+!jI*Naz4T`}wv4mlidq(jm|3Y7U z>Ez-09FO0!eTFAYYujDb_3U(2E2GgipnyEIwStiH#y1ehotI7w?c;Trls%wvC@-& zT5IJZx~EXees`p9K#l!*-MXcty;Jt~?Z*p--m+y_xVsxmii+XrOSe_%jKH~+yr9&N zCxFF{re@wHUq7K$z0DD_lmV*b#CM$~zwXalrkqu2}NnjaHg{@dk&U!Rwq*S148U9o8<)b`%Hr(@2Q`<7qxUBbxhUiNuS z#gCE_PQ$C{#NpjvGu#B&75>2D}X`|y(#-(CcxAXGc=8Zg1qd2{9;b3`~ z(D1t5dZFiqyrj=)MFuI~DHYwhde7sp3wrnD*J$o-wne|07(CYJ zTDgrYA?@q&w+@fiI{t0)|MonBOmso z2^Wee3BY}U!zCG}}^JqcAqFAN!5~im#xX~0_mHCsB!1I0AcUheHg{g7RqPQEo)fINX)YAYlq5~3QlRbSKbCnpr z^6YB=a_Bfis9oKb>%(ycfHKYxmoKl?T&ytKKVep{>&pD8_68a@QT+7{<}NXD_#r7} zVJ+IEyN%Eym5PnXvnfKmku|rbuz&SBL;o0Bw|&8-i>5!Ww4dK`(bTA-DrIEDUj=FTtXZ{B|B zd3*e)U2`>MdmJX+Gn4raR{o~j#o-#$l!7Aw&m^6rr3NO5Z`LY!Fbx;7*k$$niu zimwGTzq)Z{RuQ*%2A@WfVupzfvxRetrbCFGZk91OukOL0ek*ALT32zdap;%D>5Xe~ z6lwa+;a=Ci;oMDD$5L)uIKEoKv1Q8_C~F3rBIxh_kaC@JBCuzwWJ_AG+^vti@6~5#;4zG~N4V!pq z-LU(3(w)&d@tg}RxpLD2tn=-Q4f&r&{RUKQ@U38dV@G3Lhr;$uEfe^8edcf1WS`5= z&#E{xMkwZOFVirdk^J$2b5r#r7`|&~8lQyQ2J0>g+SqMk_~Xmn+G-w~7kW2_-zn@H zS=?*l5vjPxem%~|#UVHLWww2Tg^WK9P|X!Jl0<~Ylq^2In7lA0Dug0e~!T9wZ^_t$9;~J^9mpPLFr;F@4A=18*bOVx!cK~ zZrG$%HG&FE2|KzcjyHvV!x@yoq>C9`H$+o4nWtRsm<{K?OiMvt6 z(>g~StDV186wOsoi<`VQ@Lm1d{kE{*W$HEh9bG04*mk&gG2U#Fauro)Q@8!N*v$X@ zg{JOA1>dzr%1tZl59?lX53;aYDz4F1DQ3U37O={lxKPtW#w5NB7Px zd(>+JHt2lhmU(=Z8S`|^ zH`}}YlD;*$KbUESUA%kI;m3>`?~WF?P*=cg?Bb}X-KAnn9UiNbyQV)6${*9}EdCg(sd}bZBk!S9j$RH1D`ewDoE2O$ozFy6&nH$gHdUz>=dN<;&EWmOXnEel^V3jzw8g>V z8E?;RkG)v)_Ud0+x}YPSr%~LL`(#mesxd=8Xz=&y@e49jK3RpuNgbeU4K zcj@am%8L{Qu{B8v=ntsQ47`2$hiBC4BMZ*Mp3`+nsTzu4;JuX#IiQSX0@xDXek~%Fm5Q8rS*q>*NVjb^#HmNEDRGf7ImwfJ~aQtuJU_D}=<20^}B_t7&rz)T=ahA&c$BO=`hM3*>+`c~E%vmpJMHBe*tziKR`-n- zd6y!!GlRO%hw{gn8103Q7O%`SkMDSTx_IsE<&xWWt;r#|TlB~NDCQG<^ni)Gu(06k zR%D3MDHQ^OxANZ1b9W-rQ0- zxFiz4ZL4?WGt=A;A64xOWojRl-F5YL)P~f| z{?{_%r?=~u&%_J9Nlj4BYA#p2IWw`-H#^$m%vgD+UDa%u+^)ON^`)7ZudEre=DC#- z(APNgI?69gTp>Ttm^#Hd(za9oVw0!rG&gg=S)GMX&I?1U87oiOTLcGjF$PaodJEYc zm6=RrTfO+sSWuFY77@v)HxkK|W1IiDNkhKv#b(Au&8D_HW~Kqx*ShO;E zC1h0Cxq8BU^unvUQ#b2*cnTNIc{*D2=MD6!^%&=#yr%hWh3+YSQO*8NB=t`WBWNp-wtVk zuXj7w=Cmvu;nnZp8{D+LW}S?dVq2b?QxmUvBgZzzCYR?M+L{?2dd;;vopWYZXcIG% zUfBA>OS`y_HOca?wlFX}+;;rxPj*%!m*085SCvl6ZO^f$It=Gf*)z&EsO@JI?{F}% zoqhg#jZf#oM3#zo0{=3PKkw%IKh7J*d|b!CP_v#_$a&@9)pH;yYO5+M&Ax0((#m{R zD<~jfdh*!!Z~5n62S!aDde%SG%rtM$Rmh8uj(+7F6dcUgo-SNS^Rp<%5fUFBitjC?bSTZFY%*v4j$=EdHFMQjZ>$JLq$z7xZX?xCK=MU z{Xut{|F`#%pIz(4kPdc#9UYBP4A>+(bAR`_Mtop(r|V>U7$?hry{#r65rz4oMQ5RC zi#4^%tY*e>w0B$k+W-3h^jqBhQrCItbj*C?Ig7^yb`8%?r(S$}x8Kg(+SM)IZTx;$r|bVw~jK8)9gZu~u&G%duO_|LfiRHZ1daxa>_;l|RlGY3$3J9nRZ& z=LoOPgjTbb46hh}DU-bao}epwzpOd9(z3<)$>9qZF4#Jk2l3NhE?rwM z?Ox)|Wchlvczxe&BTS=*4Q7ppcnArKuZYQ5yW8xLCB;v?wEUhx z!tAqW&$@bo_54Mu_lw-$rFpWwGt;zbs>;6b{T12||8>cbdwDTS8S3nDXC)cN@Ad!q z{pSW(bj8c>AFo%h#fmU$ePOmsGmTg5O7n389bH}Bxv#-RzIH<`qV{k8>!Js4?U49! z_YpfgyEX@kpJlskQwhm3{R`#ggfo7ettf8tlW51rw;b7Zh@^R-*fpKh{r70wR(Xr^CokkmbXeBD+B zUr*~QNj@pt{`z;d|GB0@;y$j+xVn->(D(EECuFBY=jZQl$uwn|pBeX=pX`+T`t_@w zjl=)(A^+=_47(3Z_^Ir=kt_jQ?YXu- z2xQ_;lfx0U>D{4Z$QRUj?s#rfzF$1}A>9D`FZIIfTSoYg8KzBwGh?4OIFA(Ams)QR z6EpM9>TnWZ-gR~zhq$@#+}vCrcEn)O;zBKUmT%W#cI` z(Y$q{-M;fp<+$h(eAdUn7_6$^$}kq?`@6jE322nYsf1wz;~Th)H(<95pMQC+uPH^Z zySw{hcfPl`_XAm1Cz2&joIJVy!-o&GkClU2_d8r2OfB2QCP~>!M!@)eTuIucis+1t zeJwA{I9zAOytW*=C6B2xKHPaY=ftT~_4%*Je;)kfOyXHalAy_M@MROx?|m1NijV52b#-;IO50rttk%r4 zGi$?W3Y)iOH>X`vj<8<7b`z&zgzXuZh0!RBI!!ATT#12OtH{kYf9`7qt#fe!sh#3? zHp^V6z#1<~&7rk^7VfjH!Y)%^wnyYS(7Lh*lJmNzsZjx#ShLdfv^4$Z=NBwH9H}MG zFT7@VRmEHh8C#k~LKg*)Z&Skr*O%+xoF$B+n&(U3bisnbLLFA zQ~y(1!H8?hEAjC5=NArX+cumQw(Kg-=$-6u5HQc_x!-IMAj57$g4O}$t8JEL*ck%s z`GVr&;#EjD5|eG#96?bbGR}6|b{{UKV&*b>-Fro%yz~~n2CuFX-?rW>d+0@L*CUTj zM@`)h9z5vFF1oXDszgYvkCTju&fB7e@s#YbI_0d-7VrK&RJ)zA$p=G8{!)~isTFhY ztA6{|OSizO1ZW|%@-Q#-Za)yW64*M8;g84?A3TW#65;UqaqvlIj`H8dJ6%KsbGq-E z3GOs*O4eD$DY3_CVvtQzQc|=OfU>A0z7m~n5I(t6H+?jnvWw%8Pq&HSXZN@Z#NBZFLW=0Rt({owFTcCAUb}A$xMk(&h3P2EWu2AC@vKaBifUD>a(dB8c)|LvSF$yIqZsQ3VRcAAdPSBV8 z>C>m%vzm0BOYp?(D>$znuVhv9L zp`X^!IZfG~PO=8~+{YD`?}W=vv9Ryt=l3GP0F4xKr>QupGAeR1V!=&Dt3}B+xTX{>IL(pgne23zfBa03`-LN}BhHpH zpV5~?N2wDZ49o6rQ+_sEB}omJ7|9=fVINN}kQc7F$&r^t5To~TDy!J{?I5eV*)Ymg zKgxB&>(AA%CzF+(Em7*v)f#U-oK5l1UR;<>Xf1FW_eQ$Sn)x-LWdS{@r4jZWn(gMm zoX9|NGuuotwhy0bA~KsRv2VX`7PuP=O!q; zdN`x)I^ICTZaJMbi5tltt&$$d4^c!O(VOdH5v}USj^9r!-q0ZaIH{T}j@Up|Zy?kD zLJsrnE&h_d52UE>$iSB5fp4wmcs84y(;FHjPA>|LlseoO=UMlhKQQj6tFDQaq3!VL zCJl@F%%tt-x@`*!lSLNlv3-CD7(0Q(T8kEgMd2G)%LKo42vD=sH};$1^mzl0Zwz9Y|Jo zB0>6P@0A~&OE6ZpL7VMBqgl#2f&=U{qh{NvUG9sM?j&k>1M!q?3)Wtb zIlhP}WjN@Ih)_xikkgGBL0n9pjK1sXDN-e|6(e<~y(8PwVEQ&mcMxE zQV9wgT}o-15A*J$1Gl!L9!WlNr4P#|A*=bgd@)LE#$atLCBH&o8NQu6cb<$Sm3z&} zK}1nw77p*J=f`WtoAmSyDxaNB=t|1!cxJDHp>6{Z5}hF#!-;$lr}aBwOCYQ>SpotA z6wIY6TPLb0bxK?+3FOK`1sNS>&B&{)q9&&@Od6i$j(jkT@hX#-g%AVT3YPl zZC4LrBYApCe2xu#F}>+{?%(k>Z_P4~YraTuExo;)L93aU`)I9^>U&7yhOIMp4Gne+ zi(ppz1YAOX@n4vqRkWL!P+pWp-c7|P^-ll}bv?YX=4kGA+$GtA?5+l-zE2Bl9t(a0 z1h&!`dDFJ@qFhLPmigL)Q9}AZd?vcVXUem#1pV$BL@s;RIZ&k#(k-0@I^;DPX zul;t_kunr89cqqMm~9P8-hKkbuC!$fe-3SNjK&np0NdS7{@hvQ&S_hJqG1TTjTP~* zd2V*9!{$9UQsD}2M057Ma>g=~m(Jj!Q`tr#*N#@h< z8%#3v+@^cJRqqFG%>foZt5v1;@-Q-z*gw|akdTWI)9)}llr<0`M9=-VH4oid^|V$H zakj3mF4ok9dfkMo({By(QxWOfb`~Ls0QE-cMZj14Qy7&jXdRm{E2@sFsA+vUCDRn5 zv_V5L9H^g)RFyBzwH@e>m@OAbsW%GXl-%1>skfB}1qavbTdNc3WK7ZG{D>1(2-&X# z@VVa(nbPmqU*FbODL44D;X3Cv_F*a=a0>~o8oG{`HZn7dIm!Kn;$J3%{n#91qPNZp zVd;sjaLU5yICUyIIuUPT0B$@l>Rs8y`+Q-wZ z@iqoo{uk!$S~2a(I(bf1F%9#$iROjb5xp2A+r6_RMef~Hszw&I8VHAZ*AZz|yy4pY zIAEFNqJ^1wByLa7tpWS@681)6gqpjDToGhgCP;#d^@*D4CK>MwULBE_2XZ6Ox?u>b z<$;KyYNw%Pp|#DL;!Ere%P>(L}xEc?gVTcf|>^$=1VI zEe7NL4Glw$N$1KQ9=uT}n~-C5ASWj$wOV#I)|DAIb3oaCaelH4h2z$)*ZOtAdTx$^ z6aR!-E&5U&Zp#sE!Ffa$H8XDWGv7F|Yg`F-g>iPWeD_+&zz0wKSu3fG_J&zB8nLjj zC?=dqLQt;PK0f54>N*{QTJUW4)eCi*Z*lKZq<(FrW5=i;tM`u{OVrLu%WcoH=rr>l zcZs#z^w@r+L&;hpv!RNzOFR2{sh??JL}tNPdOfzgJzUbd3lJC_YqXD`$#Khvx!HoN z$RuF*?fzxW5;k?jFTjF?+9oXXvSSDIk#0iV%y+GDD||z@+8|AWlJ2^7Lc(*lAmqFx|O# z?=*F;#W84;jMKipe19%!1ps2(Je0~{uF`a>?k*+ol>M311mM|7ZRqJqqGh;4mzPRM z(?8(lZ95pAJbxsx!#sE5&8fY;3SeQ`GZeGT+f6dCq{6;YpKp9cO&ja6W~i}sAfMm7 zefzZVw<~~)K|w)R(gUE_pncJ??#sD0 zpV#}a2ql}RXr^xm&5&N1kDC_#yi0O3uJ$qTBPx6!U`T161)(B!J>k!{uKpbY>Feq; zCP=gexM%f3+OKYIZhjM>75Hlu3f^xqPZH3HR-w0$ zrqGEK>!zEL{{oW}BGinH1mh>>u+*A!ZB5BKi=ozM#@V_3j9Wr+Tf9N7I3_?g)J7Fi z5NG>Dc@a#Q3VgjSqE$O51Oc=^tX6E|+cTWk$U;>Jg?Hh2yX^ef`xubJY#&1xh&nTew^3o+F`dqK zW0OLqHH0{CZkU^+%B57r5ay_A|L8m4p`q_%(L3yW(+sc_9QkB3nq*Xzmxpd73xgr# zwDgKo(ouo_HdY7zT>h2=Nm-={DTP`wBdF{x$NFlw_O!H22i@wu-k-~_q$3#`7pdN) zNoV_C@_m@1Q7DK?{)Cuh7Sw)VV{YAZxxe4^@7ue#A45jNVpo)x$7mA}>sQ`T|1X?w zJf+vg(0^j~x*sNN`N#=f-AEhse`60PwoHe%Oo!XV1}^=EyNq%nJM7xzHuZ_!^+}|Y z2`98Y<0OoFLajNO^2$T~Eg~DJ^|trvdF**Y?qFI)S6YQ&f;rA= zAd;SQUD`;BucuEU+3njb6$gRcY=@h$0CK9zoFbgtcMoQ)dU@N>3}^o&Y?YIfBj`{$STJe$+>0xkHp6X_Lo!gS&cqaerZe0niz_jNCjXsgeSCbj3JF>K zm9JKd1IrWo2}vw5*Mg`)opZh7wFuP9M>M3P>T0eZ${ZB|MVMr*JJM0Gm5=Yq%q?DU z$^>^VZ{H)nl!43StBfG0H-R5)M4ne_W;gqMHE40!ttN6K`Li~uv zNiVil?EriPRftx#An3lARsI%?n9Q1=KbmpJp=pc$P_Q1pa^9trYZGP*G3qMltLM4@ zicNGmP)B*lXCHuf>%;NU-b&@%Zwv2C+^F1J!HL^+5OkyyV2qovLRUKSb8;GGFGVVukAcxTd3LphuCwf} z+(HwhcaWnG`!F9yLT{ngS1lPIZkM*QwjKy?)t)9>li@|r_baXb|5@Oxu>m$pu{?B} zQ1BT2I$b?!y?s|dzI39N(@|=yA($^(`-q!(V~U<$JJ|7}?D>h-7%zyM~##MpM4rW7zkhdPoYcIQa_Sr*y_*{8DK635fsuznQI zwvUuzD?Xb=b;3kqs?A_?s*>|0aucZ>Q15zsdLpD06HyL?IDeD*hqP@Sfa&x)tz5eX z2_>O*1D~odDZY~=_*BEQD|PA5YeSO%iHA}={k!X2$d)3tYD-tJT&z_24jY&-3?qTpDeMr^{ zRq+ukpGDX|MZGy)O$b|CXB4P}vvi_zTfNzWK#2$6?`UQFDCQzGV_(Feze=}aQj~!c zZWto)6J+U#SUPaS3FU z<1r(@Z%{pw8+XWK4GqdgVQ$0`-mbq{j|~v2o|>Z|DTBNfZpB4^%0y^Q3IUsRVn!g~ zBu(Z?MNPjmjCP(JZWzKYj^Hso zXLK2HIfkb>e^(z|r1~K0^WVC7YukZt3elnnpW;_&;W}`R)Di{165{^h&k%JtlDKU; z)_(Q%>Qq#8PEBo~YiRQt zo@vDEO{fO1pnOJEN_bVj!t^RB0g(RpzV5;uUc3Jh`V*m*_Zg`GM`=5Kek}S`1dI$# zZgEs@JlPmZW4L80TWk0{ZBE^cTx<>^QoLm;a(?jMLik~Xk?2me>pnk!u+6Gs_jtz@ zJ4&gaCC=qjKpeL1Byv_CxRi2wK8k<)H%1wFbF_YP_mykg3AswvLVWGyc#6ABmDE6D zACzr*c><`33^P?};kJPke34DqEK{9}^PNU0a?(plRrox?0o~8^T5o~S$=L9nr?aH( zhdJeDKUDX(#XyfyvM47CC7B6&&Fl(SnNPaA8veOs+#z|Tt z8kDijrf*9(lnm(u>`lMI(P}w4+3}}pq7ErgVidtq1XgLH_H~(U?er%hsjy(8ML-aG zfyBpaOP|g5f@(k{bIwOL7j5i>z$og?ylr$eYH@!gK7w9mhCB9#SVCsdDAPP~f^iYt za#vQ-ycu-iLZ@M9J#1@b#*RZviL)hj{mZK1?m#tvSO`jFX9qVZa@n5ZbolriN;2mw zfs2R|!tM)m4g_eCDh*q_fmN<2NWJT>vY?XF8E7rxpnFBVSK0Q}Y;pT?gS!|MwYuwN z%s(oUt(M>n6j5r9w^_JQI4pbaX~d0ZM?zg8njRP%AH6w!7YwqWtn19F$pTs_hh@d? ztYTzyy>?iQSOa8djKF0HB+)0_?r`9C6>p4LQLc`*Hm52lf>$9sT;1s3M+Fj`ta4UBWm~f9YEQ!YkA^wnL!9FR z$TpUH!C{dv90|gK-4#_ef#ZG6sRneM_J)I5+VOH_C=-~!=D6mXgDbc; zlAU?O8?-uMddL=R>Qm-jPjo}zu3iYFu$oc8LgeTlA|uE4@h{Y6G@Z6SGv&*QH_xGY zY#}<~15wj65fs#i!urLY64Y>O;;yUY{FajpyWiYN1@zuNM1D)Cc`a)|ZZ482k>~{> z$qs~z09Q&3B&K(HZuK`hsr?Ot(aGV`2}mh<>=~WWhkd#*0^Zapcek*x3b%`Y`47H) zddEcM>LStrMEbvK{#`u$`ghc z9tD6x@P-gvY+1&|LVQWU8e;uz$oM6sFr5Nn%|6hSl47k2(A8D1uI9HvRpQY>@sK29 z-6eT|K!gYibboKcp;pN)&h<4UXv9<#!lU^fDa**u`t2+&Eq%G9U%ZOi~>wdXOUWz=nJG@0Sv&37|l?!(a#60BI2`OS4&4 z`(CF%6RLlvC8d<42ZK+4+$1s~{5L*e@rnTrxsll_`ZS?|`UB&LpGneULM>T~;Uaq3 zAm)=$vbEoW*cps#>iJjKiE_K^+>2dekh%aeYaw_N%Ne95LcvA@OR!0&1Ev#m1i3P{ zF1R}{L7m)U{9ga%wIyuQd_4NoUwer4=PU|CAdbU8!1|EFd=T9(jtP^odcONFI{iQe z-9w6>{co=fh7Zz2@A!|`X3_s}rjq~va2Z^#|M5l!1}%pF|E=`F`TvLMuQl@j-jaAj zXz_nugF9jr{JgPsw?)7>@u2TRx}nHnd27uACtH{C1U)|lqy|Y@gd0{_7Y7l=Wzvnv zbabs*irQkD;s<0PrY`>(sb3qmo@Z65@Pr{T^6?;DINl%}&fl-*HEAGh{`IP9GY8oM zp_L1W%6s*}mDZ426%gjC%rEC>rF2w?hse%KQB91BANn=T=FPA{tffA zKN5a!O$vEZ({<3St7Wp-VwUXP!pFx);BNLri|GKT`^Vo_bU~R6fchh9ees_Q;%_1M z2pRd$>ss#^z`ZQ0#0UdYysqMs$_Ux|!`@HIdNU{Cswt>SCG|K0V*5MBe~&H0#mMXQ zO!nusT=WTOBITV^8L)5nW#8p*V%yr78spN5&vLJp8in zPj527W|hF}t~mnOzc2|Pc34D#)K-7LZfoZJ69QTFJ*OD(xJvQ|PvL)DG=@(#$R@(@3KFs zgZ_R!{QR9Ew|R+}MF*!v;q>KQXP<8-6($f$YO8r($rDsDpiPgO20-B^i<7YEsaSl3 z7YD;5JOL(}&W&>dJgXw4;pLZ!hMG#aJl}B5R#?Od z`3LfzrkdeJKmUs$3Vi~1Y2M|M1sUL>{ z%2!Arn~2s3(|3fFt@m(yo+9vU3@M3ZM=($+Ss#xVdqn*VlHu>k#9wyb`S*r>$=4Wv zB1XS0+fs=N*U@&=IM^F7no%$TO1Wh^_U%l>O&~8%rknWl;P_$D$9idfR|y)m+I85P z`1BOQNhv{jY*9NR}U-KxY2 zOh1d7+`LUbPCKK4_K^oTtD7eC38+X?g3G6sdY;fuM6+3IKqOF7+~-a6A#Vy>_r62P zpe7|JBqT(@BAqcMR-c#-?$z^y&BPCc3Ya$bUX57n`@^Hc!Z={}xvG8j)iPnDnup8F z#w^KJ+K=Ld_)(?g40(8F#s_e>HYZ(r1oS?bFMDvgI+6>M>i6@8?ZwxZt*p~+f`m&4 zmE@Ui`f3QA<ChdDTOm8iRZEehkmg5_v|xBulz37hQ5XIm2R>tSayJi z2QH$n5*9sQ@>#@0*f~`yZsJ=+itI72VvKw`SH zXSxB)k^**WvS^Wuk&!@u8l_A;o}mhABRdLce~?SD1##F$r1DC?T0MxBIkY>dYRZTMqpR49 z&X=gFs*;sMD45s+MHmN=koW0=O_M4^zM+7x^D*$3jC)1-*-^iDSQ_;uNVf% zOq|^h`rc2=nP}gnM}RmGu`BjH>nlY_ri7X%dm-~Jk8mbEKhWmbU37Jb37Mcz&{=X@ zXGOc)&|tt9k>2SxD{52?gGY96sHy&kv`Z>KC)MK3bgG{ibq$;aL6#6O_W&c)Vu}pe zm6%@Tjo)P)TxK3>f{1O2NL-4XviAiQaiI%Zh$kV;4V*i0?6@UF3~{GWv8(tRn-s+f;lLEs<=stt&(_KD7aiS>@EC_)}5Drl-2?(eR6-~v4Tpl%jhddkc zT*r!vLn|hYJOq{@X%U}JU6(Q=BZt@(PsC6u;)@`SlJ*^h79;G)4gC>ZnP`S!>|$bL zdG8DAv^?a|0yh%?L;T($Q7&maUlcv@&z^Y`-4Yo}J^2Iaqk!g41H4u=H@_^DL%0y!Fd95h%)7OM8Vwb5>s&3s!4Lq#1}?)3 zrYAyT{)rCvGAkE*jQj@CwhT+XKLVEE5qUtQy-tGv&TcO!Vo8VWU#|lqZ2OFk6Elgh z!2jC=JkGsDU9IP=CPRa!I zH{G*|B18|CI1Akjym8~HNn*hM{pSr`MDn<~lBF@P5NnYTsB+V9)$T?H{tSGeBvl9m zC+bj58brC|z~FbSkefP4RPHJo>4+k{84({z;z37O#2H%1Y7Vhtpw%cVqIj~9;*Zer zbUCjGmvMj#oX**ix;4)YFo|x~N3J+Woq{=!g1!VbHW+D$3xvdM($!8{sn`wRI6*s+ zFG)LHfO5+0>hh$w==Pa|p}-Q-bn^c8mP5~cwvz&e2=v)K_s`vBj`H#(>xdAG&}u?LogCs*~{OS5(85TEuTnn z#1Vu>6CFB4DLWN)YQ&Go9Aqaq!=At+E$tje!@e)U_>cfW8j>6kcuy{9$KHMRv4)uX zAbGe1R{b^E^goSMzYA6SZoo=h1~eOku@aqG3!Ei^)=t9t9yN{nOOI&mtEjFHAbW$% zJh7?Sn}HC27b+@1gqa4(1+dt~Km~!3MVO6J(r85YhwMVVEsKp;2CEMR(fO#B%SDno zfrl;Bf8YKdcD}nd`4Eg&cZg_SxSyY&pAC$exiOgeg|^ zEkztYEBlr7gz*nGjp~e8W1J# z2wNGc1BnlA9fG0g45Sti)Qn^UO2GY3yhr?#M=uiyRb~NOWbENW)I<7xHIE3CCFXom zT+o~6N&~o~GLCEQ1+%IwRfs5}Fhpz`f4`o`)Vpuv`TL(>)2&OGH0sac>r$fTuV^_# zfiF4CqH~IR7CqLSD17-R$to@|fZJXzR}4VP4c}+mpn0W_LRxYOeyo#3l}rQavn?X< zXVh5AQjyPCm2@CT_j6UeeQSL6LJ(XJ63`<~GVeS(K0^j4xp1aG=&%*(I|2dTglG?| zNyJUCu^-k86^xy%%VcDP$JU>GmFGA!$Fn5Vkd|+^1s4-3Q>si4;R!N~N-i;AlQy1l z*dM08$k+f{5HDO^!F?gUO>hyZ_PD{9Muz{tfrWi<0M1Rt9EsvSxU{Ze;t^ZVZ>%WE&emb%5w$K$r zn1cL>OQ(YdL+|6vYI57L%cMz5R**A{?x`KAre5xpz$|bQW-0NBr_yJq) z`tu2VQ76%}Uit;X*AzrqE~I0VFNJR*C+#y%jra6~UMo1HujuFMx?L?YJnf5GvB$C( z>{<@|AvWIXT*SY}bemX+46si4sVdMIlA8O|$!3I?Bh;n}HRC<2sEZ>XK*~SXUIZeM zO466%3%~m7jg>z%+#f7pWF!vIy(pubVnx_@HaPEZF#L}9d0DlcwA=pkdY&-*&QLbc zCja5X&3`ryDN(u50HsvsW9y>c zUtH!<=P0vAvio+BH4hKborr0Nw4)eTrc|KNIoNO@ z`T0YWm4_nD;cDlK3k3QRuzk}J0cWj8RW z2Z&Xk8S(;U+mIX4^6O|2Nst+V6~#!_giCk4>I1B}^g^6C!~3csAVtbJmtxqsfPef* z>5g%`_w^l8riC!&boDuiQG}GLDvtF0XHCiAFX4FXqlxqRszPpU_%f(w`$XV3DPka zH9u&m?$-NwYlCw>Xt=kA|Ev=HZQql@?`XJ}$;p@zerl}xR2=H|ke{BY61v3PdE||V zx=hN`U)DD~h7d)I;hx`-md>$oZw{plQF>A6mJvWLf!+=}*e8tK=iG}WelLP^b)#&$ z2y?AKA>dN$@CEj-3rDCjIVi?6CUG{4d3#-G(}QRYZo*xaOMc$pSo$tW8G&yOy>xy^ zJqk&BU^_{5P7b^gq*E<#`Jy=CuJA#+RwxVVm+5Fe`&~h4xA8l&;6*wZ*dXgVe%-(- zS-*R#pY9$@h>vd`HiXE^$EEhAdCV{NOrC@erFR4~(-y6zTm5vSvSd{K=N;3$C|T zv8^Wz12AUZTW5e@0aDGA7BQ`WOhI59r7{uPgYO<7H1OYK7elM2`Y+xt+ERzT*ahHi z${Q-!nrYD~``JO=yPR}!2aqgG10f(cGda@P=PLUZL9PTPq&Bq~$0HGscuzQ>#HF!! z5ZvR!1CQ{YWWGP04Q~8mciY7}*CMeVSDmwGA6Tg#@-`9emOOU(&Fw7&A(u61=y4$l zTLexLv)5doqMS*6%PF~l|9kkAE@Sfe*8&?n4lKe?<;0WINM4&=5qJ>Hcq*h#r!tLX z?GLf@$oL#MjLg0ZN0O4N7pSd_(|p}()bQV@<}EYl*!4e&k#qnoQIC! z*zEuyRo;knEWy6kEDNpKBv83aPq+f_38|vVkYH@Wl@RQPCM<<_Vd6r>y#g1jA1<9S zIa}?%uz?U&XbQ@T73=Q*ZtwIR4Z=0BfCiS+e*gf`$ju3s$Znw$7e#;BUjZ zHlnP8w?Ynw_})@t0PZcqN)>*;_bsN1N2!ja1DsGXN%M7%y6l8&wS){%N)(0m;3K^H zJ>~bv#L(IAFMq70;gANtb}G=6>D5<{PL0q`MSZ9BFi3}eA3GSNb;0;YBi1RwVkN?7 zuuKW}yEjd3CrVW{sX}PRLr!jBhloV{1bzH;c^@C|g;H;u&L~O|v-hO)^U3}J!s7v2 zDU{7vju*h#lde{tIZ`mGdn`&;W~-&kMfh^mBn@%qiSfuJ5|ltsI0WO^f=>UtRRgrB z#W}vD0?-D8A z>zL#SG0(!x9~VAJhX~DfvGCRk^@PLJx*JOqB^Oq=0qWS0pAu@xkq5C9+XC^s*gwj| zyz)8R#@q3`05f>(`)B0gfv+e#$Qr?9v6y&AfhSkDI0Gcpz=H2o@aJS(D?4=_evrqh z7sTE`#pjXETS%!&fiIkZlZP_g$ialteV3Q*1DA#SkjcUvkVfg)wpqfd0R1E% zq!C*?DMC?N1z3=?B|vSUPJd=?4$xPAU5SyKc{~R}|GnUO(&t3t=YFgOTYtJmG%6~} zCV-S55CL!kOn!$XDdb=c5d<4kR{(asgXfo8PM{vh47Y7m5sLmiSV3zf9OMjXS^m)N zK$yvFtal1JLsgt~T?wBQOIht&5x|pv#0Kt*Zm~d@4h)v3sJR5n!KlPelz_T+ z4mrokXT9r1>@mwaCB71J$OdV{jsZRpEv=}ipa3-cq3#gI`q)4kyaC=Db2LDWxxiND z3#nkw=MZ9ZCe&;pgpV@fG{G`pq(-{g(50W9ZoDG z8f<;rBg8Q<#6Cn%!`R}dCV|RCQCxB3pobBCRB|f*PY^D3A=*0Ns17(|geE%O7fE@l z55JioYCgI}SUU@m-T_)<7oJQh*WGo*ZzdleE~T=yXy;u%^l$Xc@Lc3OJIFOpPhyNj zfzKnD{qZ_Xg-TSgxq0*>L>Kh(F!HsGWCyg7EUiehhv}e(PL2N!rGAq_@(l&I50@wxJB>0}75V48a<*+Q!{#Fb{hp1_vWnN0HV%@Wuj*9?E?^_Myn3*ne$_Qad2`z=i4-$sV z6HzF32nZ<8w$bMqXR!p`v?Obi@SKE5Bh4C&1k|a{8m8fl0%Cs7ptRJgLJ}v!69NXN^^BKJ!UJ%E#oNGN{(k7bFWI3H1y5J?$LcNG99qlBo zcsh$5&H-MNqhJAfT-n*22yxdBp{S)HTIbfLe)n{PJH8h?80-Xzi47jp&gR++7vFt+ zb%Ox=6nhuY#>6Up**D&e6zB%9O^y4bSF)7EXHr;&3O_T$;FrMb#f(bbywRF0Xi{r) ze)tel#2JnCML%i`E%$e51zB07>ws(#aL8CtKmcs*F8Ex+!oo ztfbfxd0}W>4&fGcq$oK;N`W-S`H)8-jlLHp(%#VzuYiRBEf0v8!1WU$|Gs_$$jlg93ucClWKE0gqp_tdt!Nukv}sdBWX{Zd&7jPrq_QQY zJ*lWjW|Am{_Q;Y{D$7LK<$k`+>iq8e-}|rMJRaw95cT<9pX+)ruh;ASx;9M+Wb9jT zzVWSb(Vy$-9Xt2D@z$-=Livf8O9O}g?6mJ%WTq6Or;3SPf@kMi3Rg`W41Z6PF``db zW91-jGQC86nk+JL(T=kSYfb}5YV~>@xWSP`!G6eQTa3JsT8A))5kadz=qFuyJ7V_J zAfJOMQdV{Np>GxSWg&G;bALeK=*P9n8E#i}MU5lK*0%+}mXH(ek=q&P{A##=+g5ARIgyIpPd z#Vg}ol!vLSNUb1(k(Bs7 z22GjtdH@Vqv3yE8aqBfLERI3qYSTcIBQ{%lXf1c7kl(gSJ{FG{X$3@m>9j99U6U}z zgpjV7!GLSOyA0B^bIPE}>mO|s_mB2u_YH}cQrEjtVFn1|ERQNVP^`J+$)u#GFwJ-* zOv>vYz-+yEdcn(N{^BX0P8 z6vhMvtbxcVsh@<95NMEyv3Lns>>=i8JAci2&)yT1FV4&-dC7$S%w97K2akOaGZ(tZeyB!YF8 z{BE!O!Umby7cB}Ngu-}<+(YsKu*giAWqSDEf8rb(4)LQWKX)eX`AI`+PeB4jysS2eQ!4YLeC40tZ(5T~1pec{{n@+Wfjfgnl8J2LqnbW==WNy& z`}~hwTV^0V1k^!4*MYpwaaWl~+##1jg| zfldZZ#?AV-cEM}S8?z;QL|Z2WzxMyE%#~SxSWEu%+rRPaO}G9>;rPZ^yG?U`V_E*^ ziyttb@#Y`><98X|{_j71y&Tj2?=t*VL}?-o0i| zpZ(sDX`4QpKHB8-PX}m}{YQ0e!L?zZuPbo*GUms1clQsg>D;2!`{VcC`|s-a&V2Nf z%BNB4Q9}+se)K$lkiYr#CL^z*J5D|Cob=c)#kZid{c$;uWn|>d{}%k`;y-K3 zH^2DC?=}tJr1+=c_>C{V`5UvhwEw@YWZ(F!`@8S2|Njn5Z~RsBON&3Lk5) z0A*aNvkKvE1iyLtfz~BrcgRmaeXX+!6^06B$$!isniBr|!w>HT4w`z+W5n0)7!OsT zT3e|jM8Pv4vElpQ$O+xDPB6#hLf>N10Ecl%W`C#6=(Z_u9Rtn1m6e@z?m^#FP>Nc= z)#t4-AKbscrJst5^RQLF{P#Z!`@it1O+{P&T&h!j9FgKpV2)3HJNlIDx~(^G2=ICG z+tQcR%o99rY(n0#`dNn)D7wZMU(BKkXVQHK6D$53HkhZ{PS$a*m`w}55HiS|vcs$7 zLo?qWfBf-s?)qa~z<3BCv;`u|(8$Pa-n=0GY2%$cBi5{0F_q0WH5JMQx&Cbc8i81D>emZpM z(23mK#)by(w9meR%&4Sd;m2NXSH5BR(V#)8sP|R2BwMELg+^UcO zt~sQ<8)u}Yr3Jt+wFMufOM?PlM6rU!XZrT-I~=(Kh2NI#+bx$bzg6&j=dokQ7;8^v zpf1h&zGb(#1i6cR#>|;JCvM-dW5=XPlWw)NXaKsf=<)asIDmZAmOk0?6wOh;Sw{M6h>5gWSy zqth`n{PR3nX5PHHtWV#*35O3apvEdXdv<||Ntr{#$i7=6w4Dx6ceW~2S1q7V)jYi9 zR1Wk=NR2DQQgdO2j_P0k=IwVoKrmN=t2>4q!glsH!ue-_RoZKR``UQdm@ip!9d*40 zmt=|ApKY`Ph;1ygAI8MSI#Z@EIq9EGMH_<>ES`36s9E=2qo+){=8=DbV^vdH*)0GU z_=6oCt78rw(){eRFE3rX#EM29oI3TwW`)8T;GwlTb)U73%>e8FSzsGa&or%Z<3hv3 z<7uvDmtC)NKju_E6X8`ASgl#~Or=?&@0%(2d-Og&E09)m*-tTN)~{c$sjY2>IeRXL ze(;(#kGE*_4!!)KvAEBmLGC&!G)mo-l#(}X@yPsP>(+AkE4P8i9E$t$af+^7xzf}8 zuQS`N+wj3Ee+af~_Ge`+8asCEmLGq-#J5g{?P@`}QbzODc=Ojh=WiO^VBb#{uJ5?L zv|K-8&z@wgU`^?&qM1%C8Gir3ix>VQ?M_lzF@tI){Y;Sk(jhaewgmaW&33 ziS#MsbKVT*SxLwRoDdhRqs;F>VP?gt@?JgXR3CL69dj`Ga{S_C8I=Z2v8Ec{-ddI1 zcDwAxG<-riE(fC)sr3BC2(W%cxory1pS(DICjCm{fP{Z;#F_Jg{?L8xA3o9Z=66{5 z&A5s`>*XAu#OUH*%<=8DoalRJSowLX^1;)mPj}zb@9+#?-=m;Eo}9R(>CMK5hGoc>GHGi0Vz-pOEOIq}@Ko)M zn+oX>NU|KdvEBm)+~pHnt7B#Zv>C6p{W@Cw@9)3= zkNKhTJ^%Uy5)z#Nybb5g^Iffx)&2J^N-SC8w|4EK0Rgwperz0}J%=T840D{ldUXa~ z&j+_{>#_O#has{1_fI1uR-(yM(+XI%Dusb|jm;$=cC*Y8>;RBFXw*M`DofZyUM+pK z`GN&EZkuer+hqk zruB>F|Ni%1k}*(Y$qPqqnz{b*s+VjKhb2h?@C~`}ay<6VaEG_mMe+9Ib^wnhFjN{o zcgU{df)m;FL6064a76p(?U`WXHG=Brt+(ECzJ4-SLNFl3R}vARLVGj^06JBq+D+A2 zdGSvERa+h_W;S-d3t^KHiM)*Cm@RDuUl^!$i^alzCk2jMYqxNqm7 zMZHGBGl5etTYlDbuBNrWJwTDNaKK<28uJ?7?%J2Z|xk zwL;!s0^B)78GR*0bd|mdJ>#BDnmn00dqbvctqBRV93V1d`MF*^`!uk@!}`}bhtzQ+ zCYp7>;k`XqVAqJ(TYvw<>~qJ@#DuCii~1c zoV|YC4q)AsO(uo7M8E}~D4Fe+1_8~l`oQ$6)AA0H(JPvf)s^vnVtA@6CI`UBwAb9T zp3IPiLN5tUXbFB`hyV5V+inrXm#_#(2I+QTeJ1!lSpMT!P%sPTQhney;+d`UO;o4s zhLJt~=uT}kB+hUSJ`IZ0@f$au1m?We*ch{EQ#T#A%VyQq-hnW5pA8x`=y>IFGwOW2 zwo-rl&0~PR*R5q&!N!OsRctsN>e1s>KmGi36l8Oi#|g@HTffFW7@hnQ>t}6kt-YdD zZ88df5Y(`!u>`Sa$kwzszE~UCbfiyny8ij``^JqM*GE+~ao4W?1!vB*BF`31rq?H* zNQJq3w;F>32~>HbMvY2X7_IR4_rG=jekvSTB78I_z&>1M*idzKjY*SUxNqkhs0tTGOM!sw;Nv;6c%a@9^7zKe<2MY?(XJf2McKb=;F-d zJW?B6|0O80*t$Av1N+Yh55Bvk90djq-2^ELrAlR2hH|NtySuv_V#Hgm*K&_ToodXV zKi{-z1Qyl){wIhnK3M#)${J(GE=0{^?&WoamXQaJ<{0ZwoS4XC@&RmFJNl@=fhmUYNf4U~Fuc z+kjh07P$5xniyVGSU8J9wf9FK?U&8yRQ44`gr$eaVH;b-b^p3sn?4Knf5O_|_De&K%by%Hc6Lfi z%7c!M6N~1eOsJq6F-&{s$micH6vHVkuxCqz0EnlG%C|p8AcIFu0H*g%I_!+jJPdaU zS-+OkKYk*e8g&(&PbJwzYK@qnH60TQ3D~z0iUq8{gQi}8R*>4*6SnKn+Nw|KLU*CO zBu`L^*80tZbhRy=x^i?MKr|W7o_&$^9F-Ed+Q%n1%QhW(u=Aw@-$R(}C$L#;ne4zR zM|8di>;2xG0d9Gbo)MnvL3{f`X;^dDuVKK{$M?8i#9h+A#g|`xQ<+7;96oib9pN|T zjDw@2Gx5*&`WpMd!M2HH`aP5U9^CA__3P6OEKbb9gTpBGr3JlkA#Cf`;)1pxk+jIB z`RS*Rq-=o=OzGYH9M(IUh_NK_I8_C!eo(6fqu5j$QI(v28%a{mSB@q}o%mw@-U=SQ z_a~o3qYOhDXPfS>ce^QU_6TO?Z!EqfRP1dI6lrhJqK}1LtzoH%EyHQL-hco7Fts(Fo(t)|Dv?ucedS%zw3YAGLg6r?^QraMX^T1# zi?@hp)~2H2Rh;#?{wJhq=-G3W&mCN_+{>+{F$8CEQ}LDwvelF>txO>3^|rINPP(7> z%9j2}qujr7;m#&I^1k$qolNS^=hrbobLm6ilsd6x6UVr9%>Np>4EywDw$l@sHR%XL z_UzeEn?-Txuwq3ULGp6?itUt;!&j_W0Ty5k!LuS<*VuYN)jOY|ep!;`5yi*%sy*`& z$4#MkKXewFOR1>}#Se>gVj12f*c8IJ|ac@o8tEyEg^abJ(Ow)>1GsU2qU?>MJ3wO_R-MplQ~I ztlmH!thjk$&%{F3UX zrtb2d<-<$NA$2Lv$Ts5O@oTUvseVTfeE8dM3uvKNQz0ZWsH(yU9 zhSYGaX5##*sy7erYiHCGcKU#+8?7mV#$*OjB#pGrTEK*;>5pxnvbdvD%&mzEbyu(Y z!0+FN@Em2jV;|L*6N`bsr3_v$Bsh54O6!FSf2Kx|vqMX=5?xro?Q(*!-pk>(ozE;tvRJ0>^~1>hZA#fUxfGzrPT3?22sf6+ZXs7Tem| zI;6Dm(x373GuCxFgM8ZP-kTo%Jp*vO`ujI0G)Q^Ip;!Q%QEY8s{<>CosWOtm#vj{8 zk6s~gt4hyMhmprVcGQd0a5+gT&V~5f5)?FQ>=c%gmJ1BRVM+57l-B*%b*Vqau&T2y zHBV(ru62IY7Oxwp-hu*N^Nh0J&_>m-e!|bxa<^}<#31#{vV8&9$>uJ=pB9fbLt~Tf zZ^h5T8GF#nS)S3X)2JWwE}9LUFkuOT+EBs?J>Y&$?YU)XA=%fq9QJB7H1MQrv;>ir zc1%Zi#E7IirOgRNF7nzL$~7m|Fot;D;j#M*Q`$+wQV6w9R@V5t*K(iVOd}0D;iIZD z(Km_S&ehx45h?AqD1`ym<7 zAZeW~K)Sc_sspoyq(7dedjfvCr`x|m`M5BoMz}boQxwe&%)RK30BlZS(17E&vH(u; zXIR_ry}x!}!(R}YN9Uhl{iZ-Bt^4j7?>U_zn$Y^OOw}d}YC{QeCGAKs)!bLq>L)4( zmFgu>>L8=M#MbISr09g0DW_hcYIM%+1FHxn%GehEv=T7%j@_=etyAiM$}Dl=um@Z* zaw-A%tB5jq*UIqtX+e=(YqhA9RJVLcQ-We}NsHd2A?oTayx|JIyQ_*N5fQ{*Q%lPP z+z(`8L3ZnGawYh$r{_4!#q3XKLa|^~q=Fb6stxH78xxA-YHAjtsI^yL)ywnW(W*xe zPSq;~4UMvX_@8&)X{E%fCP%IsHifDQsG${RKfCGW7o{t_LmvCjn==PPs>2Iu4%cxk zI$93xa|WzY8T0j~7H_i|^1>LRn4B1SGO0E%An(!r>#5kYd?lzBThiU+`TXy*cqPcRmvTbYAod82)<4URI zNw=*yJB+Cta_7I4>utcXhchcoNaMpvP$()?v{(MbTAz+90-sd?qUc@ea;l>)iZOB&cqok!z9wOm8@k$aJpMIy77JFH)>58F-;K;flwke;(*x_>i*9W71 z9s-zfprS!~CQ2EH%O?+0*wYeo6Up>o*+3Iqujfl=N>#7ADrkkVvvUF>l>LbQIn92vlm6&t2!wTx-gAgWogkT4I_&>#d4SY9nM+}48Z5FSm0nU` z?G}0WN>Zt=TgZs5#8?#5_l7@wt_u=E($+pPCo|zlX6@F+s&DZrJ824?$?Lj0V9%gm z&fH!yeAuutF+kG8>R1@B7T->}?UC=ykuClu4Xb^zs#X(^v zNlE=wzp0tKbZL}8?*h>{*oNhDe^D+f@S&ED1EZ_00RN-{kBW*?s5nvhJ5#$fj=XSK zvon)mP{Ejm50BZ#fN#(ErF=)l&28ETo*iNna-B=9^KLK_hOgGuHXsWU)LOH1O2m@x?*E1j?&7^coe}b%jN5VX z&~~t0fA+$KY5`<-?b_wlY&d6*Z1kdxr#C=NFT#kpgav*8vMtj!I87}n_qOxsdf_MM z9DK|-GT(x4li~C*I=Q*CkIx$gfkP++p-I-8_9;O*W$I^_sJ2z6csKXK7LYk`*q zY3n**#3dukvosb^|2e<_-?@{7vQb6FbYbA76h-y$p+jfl347$qwM}JpxFSF3?95c@ zH*jFelP7Bv@s`=M zysn8f@AZD$R(&=0z#koP{S!h&r0%g=@cMp~+l3ka^ajF^xvHeEZC@slKP7St7io83 zNlcRoy^Th`Ti|41yxqYC@^dnY+vZkqc0W~q&k3{D9bG%9*J8MYN* zUnl05ojUwc!~~xN8{H7fs)w;Uvp}_eIQjCCm8qBc%tQ0{9o4v&pC4W4eeYVX6@J$m zYxaHsl>4^&(4i4KFN~mL5G6zXlcFb+8x8-b&M#Wl7I8>L$2nwuDW+Dfc?*lx?dY-#vD{m6+?BQlyLIu!2VH?zKBj7#);>pxl`-%i$&mG0nbEB?xH9 z5*lcLW`e&c39!Q$pVK3!oufyopg&=^{8#DPa9ix%ku7`}}o8>5m z2@Ljl5E912r(fAc+`fIgd-i4RdB`)r4Q|$bistQEQ z4~~dBP6@Gcjkde_Pt>XCYV$8S{JkGy{thgQNAwi}&f^6SZRFB@sqxaK^?i=T7w>d5 zxOVNDBz-!I=I(LlJ(*_6r10yteX+|&2toGa9-i-ATlVSMJN}`5Z*x&(REs`IcRdOK zl`U6kbQ!$xDR&t*2b>bgUMw(4xi{9bwH><~9g6*`=|K76Kz6hC^PGsV zutRvw?TRQLAz`lTOlbT1qEN5@2?vdaiyoR$#17i{p1+0;YE?Fi2|?-?Gy8#vu!Y4hIjZ7JR-Ow_2uWytvZ z63Yb(_A|C519jtcQfpmeF6^6{Y8UXbvVxD=G4K;mF`|ae3%)+FxUjJBV$GIC0scBC zEc!9kDlc~NinCPpY!)9 z;eNBKSDJswY(^!D@P;tiqo8k}J_1bR^c%P*@!7*!=|HDS1@FA`P6AyOp!Uxo@rYTH z5EBSLBLFTu2f7g90-F;?Utj~kjYUiiI2pl33~z<%S5b|GL`JSZ*8tuJAK(&i-_X$D z7Lf>Ot6LBmexJiY&I#UO+IWB86ptIzgw!DfgwgJlvK|O9geC&$(+a^0a1m;ujLW+Y z(7@ncdDtnPVTn#4zz>N|f+az;Rsq1IP=(UKFCMTjB;=sz063qzlUA6AB1IanJ7mb| zifp$Hc{DEa(POJV^_*g9!E~^I!aR2ZqTxgk%m?)Ql%#@@R#cc_D`zNZq~C*s^(RpN zl+)i-z^_cSu+w+FoG}L;^Qe;%DYOzqa_iKyKe5~SAeuvmp798h*iDu-GaWw_b+W1;SGhmhUEZ3wz(u#bSDDqB5)Zva3BSS8MvIV20R+XqmAOifH_j~%cGju z55Mxb_q{jzj<{_mrly^oB$c2wzP=U|WyY!b%hKX#1ahdD%nr>P!24fGV@mn0mvPiC zcw^Vo6~=oem;n-?4rf3@|)_OF*IvZ%QK$slU}_@3xevoZSHJS29)=RaaZW|14kz z0L@T5uSmcfE`(V~?5J#~jg6SR{^%5n)dwLq4&QzE-B~!&-`ZB|=-4L=3$mt~!^Hz_ zLAvX+D{BEX8B=hsN(u+p3k?oFz}s3$Q>RcpcktlB+tTJB)3>N9O9rxyfG>&SR3lfh zA|ME0u^K8rUmsOf3Rj2zvP$PNrY%=IO!vI?_B$``?%T6xPbJ)@LEuZQl9HLOEv~K2 zHuBjy!(namh`w9V-=q@kD1&E%#PoY?#|XK|Y3LU~Ppa5x-01c7Fn16j>5Z7VPClO* z`aR$-gP>pgj6O}Ul^VbE?v-liQrUF;aOXN^MMhn@vy%2&QN3*Qci+Xacbuu^gia%( z$swi{oifFiSYnP=z_@z!df|$r#$)9IuQdl^TcDH#%hVM5J0UkIfeN<|vNST&J=aBLm)K$O3@nxV)e={GlEy`a63P2qd2WEU=;2k!D{VPnEz-ELx9?X)n z7&LG)t~2k^@t3aho*S_0@Wl0xlIqu5m97{pA+xP5kwFdBEyrRk29MuY;&A4v8c;VM zHXdd|sGRzDT%SL_U(EemDQt|hp5 zTZa;>b?U39+i5SAwln0ES=rfe6iVSDN0NTvP~PMztLnzPT|a3f1}Z&XPTSz<7%Rnh z`IPQ#Pzi<{+14w^Qvda@fAt8|1i{9VQ9v5d%f*3HH@?V%iZW}Ru$SW3zAQ6X5JDN5tw+HZLx$93A1Epz(&B7q!n}F~Mwb9OLd8hG557c3#$7ee#M?~4 zq%q@J>f4zDGEh`4mGSO%PwF=-Ra$>cB#NdoRt*}Y>$)p;-X37v)VV>jNH7x>B!eD} z&jc`KFH0O&H0vdhyhQu}ZvXgLA!%+;)dU)6fd z4#w7GWnBFN0Fz(y^DAMTB3(v&FzciR+R+6JiE?rVQOKL_-Md$D#%ARsndrgP;;i!v zQer3fVw*kj<6fLaiAtM?I8jt7?$y$njMcu6%y-oK^JiD6Lk{IKV6A?y_{kxrMhNMe zfcDIpMZ%Fnhpvd5m#*x$iUe53)hhAW$t0$MQ%Nf>f;xbnHnO22a_!#=w=jeJ&tC7G zO}NklbW(v4FL{*SxbgLW-hEfNe8I#7$p^wl6}$6Lg$YrGk)zoh zzc51C)z)|;^b?t|vAwn{*M9W%^0yoR1K%9$KCSb&-wta`(b35?(NkN8tJo-~(g&8j zYC#qxb;6ijT2M+E_~rab?O**^2KK$^* z{Kq}4u3ebvF(op8+rw-Eato?rG!guhoauBoK>J+FVjj<~(h1cwSRH#W6ql8#EDfCvC)K(g}!HGJcf5Ja>i1*~&_D&SV*+Y7Es}tyy|U zbIxOS%tBE2a=JCQxX;R{t>?OpU%tG=6e6z!AF%!@>7)$NgkZyy40j{z$uyZ(hGW2` z12Ys&9$sEthL~&Cn*A{`TozdHr32B?N+la~wAF%#0G{Nh^~szcrv)m#S=s{#^5^c` zk7s2KBe;I{*=NF^nV6Uus@W-5>)XDMWtL`s8l_kjkn^bZbr&wo$9OV_7~q`Ma*<0Z zI*K$?`=|UOS4Om=JJ3k;izrxj36r`+Kyp&x;u6O3V_4As&OMj8i>j!%CGcvd+m%Pb zg%f>RMa^eW9R-|gF+2aWS-BQewPn=#uk|9pox0U`@7_%o{V>r_`rXPWR8s>ctzAGu zJL;G6fO{B{L4Xn^)~g)b@Z5I~3eSqzKcY>_O%b6qCt#z( z31^2TM_uC~Hm%s;LVwf3o7XGt-8Sji7kk(K`djB|SP<;<7IN*Dwo};UyxFFo`0PIL1iVRsN8j5Gy(ta<>=th{A10-2bT z*CKwo9cwg4aH*&>(||?AbziC3hJ0SXXfQj`#(Bq>IryBWF`4T;%#H#~*`f9-@34=G z%DQu}zyM-7aL!DRE;+F_t)zK1A2F$GvfI+MMO-M|BQelW2J!Os15eJx=qirc?KCev zw;oDLIxqC}DQDsfHOn7|XtsC#Q&PIANX9PIQ~X58CE*r-y=kIKEGbcl#0f!Nd(4zWt z-lm3@R@s7UR;#iL77QN#dyKrgpp+!1Z0l`aS$o7H_rRm~&2?#ZDug$hlO7dpy-LZ! z!Mb@-C8(hh-PpL^R_Ju-Kq{Sg(=+T1=<>_2egPOYeB8M51xX)M0}&g1e^heqYGrnH z-0p`AgEo##tSj9+Ig!fEAT8|p-ifGf1X}XKJNuW4>X;!x{>dDV7(Vv?y7hsI4+q&g z5B@dp$Rp)nZMWUA!S=8WTLi+%I5Ji=hl?(3B%JgW?Ji|Fd z6}dmEhB0r|EyZWg4x2y6{2KNwx0sh$a+0Dof!J!%Ktt-5<>yF-iOPDQt@-?m!6m*2 zr=~uRiI;qE`ptZhG6jiDSd@?c%0c8UVCKz+4Gro=HG)i5URiTNUK+5Q9BqWRC(S#3OU2JP@!A5pQJ{G#R~I?SgsHkJZ;p`Au1X zAi~#fAY@~d8l{Z{y{MWaUbkG#m8Tli6hm9l-mTENCxQoXGl%V5MpxzBQ>Yyf+n6)` zpC(bVJu`8MPa;Z!%+}=nJ&v`%_n%`Y!@jWE7?R76^i8_;N_$mZjk6I^K}4bgn9!x^ zRn#5&9Od9;kMt;pP@K8Q8`Ts*|M0M4x4w7E4{eX4l=^)3k>U?e0xn$X?LIfR4}5w5 zu1LF!oKFK!BHv|x@nfOy{cCQ+3WXajzg`Ls09B+|_V`Y%{rFTJ00yZ^jEXI7kM4C3 z+PHm}k9)np?ktCi`$oqtPLGYsqV25B?o~*qypZxHss>q(>b29uUOs)y{pykR6V5xg zmdDKPN0z*@AnE6l$g;|-9&sEO5sAd>qmFhRe-xZYaZ{~b#c%z3)h%<4j7D^>b$GVd zE9UAOO@&7%JmsDim%eU`ADV?%$Ao9DI)W<6!h6UUU$|fr{p|XyE*Y_@x%+NsAYrX6 zW-Id@Zbc;#hw{=Jck5g}+-RspeH(dU?D}yod+g0ubWCmq@8XHCgsi`~|4W5E+LR9? zy~4!GOmR~bcyaG6+5)8lCe-GOS!ayfSN8MKzk#$`|5(V44=%$t zlV|InZqr?H`1@pixAD{wIiIaf1T3NTiPx=hNBL><2R+bx!DDE30p2z`f0j0QO8p^K>3+a zW@hWfUulT2TU_&C!Wu-UO)o^c%5s2YQ)cs=8s%!QMADLUClIPXtXZWhjU{6rD;mh72m z2~+{BvKMjQ+V%iGn+S3!ON*`BNTWRpLLEv>vHdVkj!Kw|34&pr zS=9T~OowO~uQ{hs;84t6_Q;=RWp<{z74nG)W&#HR@EB8mXuhV7PN#eIjt3sMI&TS) zj+uaXW0^FdczKtM(-vH)=UVf%Ptp)bgfi#z*dS;!d(nCM#G3oIcds6|43+^WG#j?o zgRU_pK%_$ zU?IgcTy9%NUuYU*#RV}d@j?-?agXh^?&d^65b06+Rs1ww&6m7#bNDDF_Z}H?i;Ocu z0#bqR$^k(~D0h1e`mkqDi?6@lAEgHNrf3F6SU`6ZuUhYV)okI<#;LBC_&$+^6^WMf zX%rQ@u^|}?+u>;uS?Xz5V|_36mDODBAR_oBE=WG3Rms*+| zD0*BHI(1$87+yEcn{v>xY8Qb-MZ|}U#Mj+};Z&>E_M!)nR3~2j6K1*5icy^&_KNkEhCGUI5Cqz^M3bw-gW1sk!?rkexjx(KGON^k8(zbyI~-xu&0?@ zIqb~rd`3!_!PJ}3apl!N4NMr3YL1#zgr6uJ+t|0dftjI$Q@`MIimnEdc%gG0Anj@q z5vqJ3!WfI@-#CtR8cAnZ#(1s>6u7gp<^$A{Om=NJr*oa;#;%ib3q%YPM30Mfa@_7} zSCM~nK)Gdi`C21BS@4b>vTQHzwlljRVpN%`5nPy3t!c}vNqO%`Ip$PK#I4Qnt5C7hb%X4;yllMI!Re&%!u9n_Z|5#H`L?f>?&!-ql9j3r%aNdz zdxif36brUH_Qbi8JI1q>m6dg0ZgAT$)vl)~^e6569uYC|KI^w~eQ|}Fq zT&J1;e}+=ceatA+evBpY6pec7LL0$?s0JM?j5tKi)rR%c*gEa#6Wfv^#ZE1gK@Zca z9{U(N6b_s;C6qVSKnqGm3uqz30bz(k8JF<7U}wvgM%4Wi6?pXAv`CNxw2K!!{Ab+z zB%#NWjFn5BPZnjBrX?I{oUpKJ@`Rbyb0agSlvPbg{B>Omo&HN^sT9k=Kvu2{m=2K9 zLvP0WAAFELmzFutf?DVqUzdR~MDT?bbzu#++s@IYCF`n*f`dk&16jDO{|IC+5VMCP z0#NrVB4^8hAN*qDs$Ydh{6kO=xxl-lt=oUfgw|Q~%Lo_@- zsHDwSrblV+85vnE0nvM-)tQhrgKZW!Y3J15cfJ^J`h|>{%8c$+@PecJW;vdm$vTUy zg?__)Y2;``qg;ZBoqPxFg<<*HYy@f6> z_vZbD0hOFMnT~O;V{>#>skNZB$-KP#{IWDjSPB;>m^vQF{@IFr9%7{JiA7MA=_g^s zO}$C(6~nG_!$NHHWTGtINH$lwGlSf$HTiFZ+h$D#?GJ5UmgD( z$?0nei__EdzUmw2YkV;7O)X_y&WOIn1v>L7i-bTGq82eaQ;IXu@Fo2;!PVyB@}`^& zgxxJ7+8nvB7G$v#0K5$s%QAkP(SjZT9!-a%LJQvLqf5uJb`vKs>BmTbM$MW63imFS{^L?P} z#-Ai?_$Dy?3ts0txU6@RqnN92h&@ED+>b=vudXO zhlpH8w6`N-ymF%U01GNSU%em6(lhdb-Ms13|7)ydKK$Xbr&LFgg%E_Ag3-8Aq>T1E zPmF2O*PO_R_pC6F_pM5g)9X-bg!>MtJG}JU`Wo6629UzFM*luhhIYz0LaFu}%sJ|7 zI7Re_S3<7eUjJo|)`gc_j(+{izu<``^TWV6ABZ~?943V;5kI(-}bW%Pgxu~ zXXZ?WbTQ+*u-QwSJ3#Urvbv>3&ucIF-m8n}sV`k}7;i+{Z-mpN5C~B@#!eKXMkYQU z^$JqcKVsC0_++7ozC~9>fHY##myj^RG$mN>XOmvHq~xdb9;ngU@T0g0k}eJA0Inj) zsAJ3l;X_2<(Y7Oi0?Hr!wKz9g7j(WV9{oH2@;3MX-R9kAIvHVma>p!Cs4!4DGsk87g2*##`#7M-*yhT%cUZl{ThLDR;1#==1T% z3vZ@juwSz~6{(3rwI_wNAq}L!@~(TE_i^Ox%ZKKJW9(c%Z@oSuj1v1^y?VXo#g87G zsRg2a#BW|r3jY{a2gQX-aFbnS9>b7_vkr=e<1E-c{n0o4Q`7|UV^b;zeW;{V$>ez@ zMl9m}fqX=L)fh}_zT+F8ACXy<*62SmpzB15(q@z>?&GV_;KbH#8Yyr+9i#G=PUa$E zw0+g@g3pD@7N0e1su_=|OkXiyhA^j0 znL?epqhQ%nE9*bF-tAtM=@d!DzG@pn*?+v*nz ziH_2qRNp<|DjI?O<^x0M&WbY>a)Y^U0Y!DBR_b807_Q^|k@{%&o;^xBj}JRs^Yjg^_c(_8 zHr%}VdcK%({Z;XvBEx`XB%LhpRH6Kl8xX=0V`YQ`V{62s3;CG9O346r9L=NFk5j3@ zHb7Hj9X*P)3} zwpZkw8UfX%QDQ}s&cq%(xKcoN1d!C6MaVmkLBjRY`iY>lxXhlhaen<9 z1VA^lfyDlCy{i!_mF%0EcUmX@mD$HGd%?Y8iGT6XKK%w!ADH_4zaX`Wjs#&lGp$OE z+pu<_2<+3V*Mnn)dv@<;l-GUbUB>p)wO8L>aAj`~S!zb1F2;_9S5H@Yc%f-w7y@3m zwbu~bXz6Y9%t)XTJL2u-C3Xo>ZQqS-+dCS?j;{Ym`{dQ<@N8_k-D}hmoQ^}?$A?5j zi2lML4MEUFuIV~JPhl09>|DJA_^*Xov=N!%=72e|LBSbG2$4Z^B!%uXu%l^9k@Ti9EqZK$BxHKwuW?ZSVb|L3obB6UVS z6qtqYplAp{m?bJ(wjpHL3`M}Fw!z#Taq~B&ZdJ(E&c!|{&JB9CJZ%wvR9EH>6uB2Q zjZ$L`D4cO-{e9Qkm61RV(sf%uiob5Hyav?O)bJ} z+tzn~Jm{}O%rEwPoh>b`dJTu+?Yl!L#UQJPxHajB*pIy?qgcR1u`*kOQo|CZPmjhQ zwr!*K;BBd@T!{GMicBxlK*=y|{aE_TYC@QcPU*^W1@fnSZ&e$2Yh`l@ik`V0QzJud zN*U=_%In!rZNloa9_2l%4DA@JD9y-SY-VN?r3Oj0ta}skE$G_Yc){X%tf3_XD$wFM4iUQYXv{7h8Bw!0}~Myp_4}?qL|Z4Vm##r`@O= zR-p=g-rHdX2x2*V-*p%eNP29E6?>gi%*nXI{a0lT zjz9lcZ~fSY|J_Wd+F^i#U9Bc&^GHWR?8OKE67?2?r(k*NQ&vG+QvQbS+vlEjH1R6o zL*_9wH8jq}dg8J^ruT}~B`cYAsm}&AavOj2NW5X2T3||8X-SFBhI>b}*6op)3JvR8 z^pd?8XbZ}H3BMs6>>(6*)MGHgi>(IVf*rP>Q z;j{fK60JNA+O*`kH68;$V-2?2A*{SEkH%`?hK_|)zN!Pcps^cW>)BX8 zwWXw2u6*6sqkx%>fN1BL;T8UB05!V7$gOGNR1P0m+eg71TeW zTc6CebTag8{7?!`E({UDX#H3Lv~~YoyX9eRJLsEIf%4U_sQ>m0pFK%E^z2TBKHFm4 zhOQ$X%y-LxUcp}Y11dtD-Om9~$GwQ)Hm70_+fx>fb_UE7{(uZIB=g{0vz_-f>fYdy zx)qMgX_})&e@A5tg<`dMZq8RTQvD>hwR=Xp>=pEn&!;l7IDt8BnOgN-yuEccxX;Cd zb9TsldTqvqFN47Ym#&?>lFmk{00p+V0$Mc4xFN)}O3`9v^Y49v`k_7Tzh!G+pzNd9 z*lG>>yI(!hNrOYgb4IZYwcXsSQ#~@NLcCYMABMYsO!VAg{i3UPP&4K0GvlI3n-`in zWHXAhg|J7(dscuj=x)bpMlZR2i3G+1n|tZOwUpY%F1;N)vcq#{{q1;TIpxn(^&&tN zm5lO`StLPo`_HR#M%Ueyl(oK+ab`Fad+$<#(J_wqjXS!USgeA!2pl6?2!2gVqJ2~r z=O>I+VnR@8M}^)Z$s8}9wBw1Wo!fWT@jAs}cKelzj{1F4OK`(7Q&-IB;JltEKQ}Rr ze;{f?94Xe+W++#n<&n9T_))XgVe>@S9$P$cH1b`kmZU-QDCJ^;mLrAJ4Gd_K+=pFY zhuWb*EdT9+17#;PKsDM=^_81w{h|hM1JRj&Qo_1E%;d5+^o1 zP~1&z7_~nHCE==K-@7^8UfIng3;&?RvGk)6<5mPgQY|S>80FA$ZN$s$meM3+i$)l= zG5Kknr@c`nh0HY^KiT!nCEssz59MX!`yLLx49JK83&Y(P_cM?Ef8T$> z8((%>CXSUuiDiT7-r!MU0e_ouN}Y_R1nrsClVnEsbRk&LLGp%jQ9F4l?09wonL}EC za1A5M&;*XSd5V8fFdf==-~C%;J)A$eG?Ga|y%sEY7Oq{JRq_-jw39TV8uVfJ?pkV; znAdl%L%eE3!&FJUv!4cj)VtagqsLWq;`+-czBg|N;lY%ZEMA1TM z-V-QLhF4EHq$yX5#GFvjzF0!Lty!(?;MlPaAL);AuPAe_gBr`=(vk-|8^?fgjI3Tu z1BxGBU6ZGLaAs3Mf!)QBpfO=7hgxxS)Z@kHTq!_gh$V?piX+y>L1^Zn`lcA9J*xTB zkQ9G&&B(uaQI8Pvv@^TZxA;0j-g3Q!S`^w=1cyo*Vb*al11&bFJS~YKN+T!HxtDQE zc?a6d{t@^!rJhg^6ZICwawVilD21v{7rzk2PTx`1D~~y<1$ksQ4chRmLu1W7&PS^Z z4YN8*+x2Nv#N^V4{+yF&LRV{!$Q?+l6_*iAHj}8lV(A`3oF1MdT_*u?L>B5BqGIpj z{J9`<$l=QD*H)f>ej57vi&1up$|vd_<9?XX%|Q>Id&d*AsH<|>3okCY38$Yb=mZgX ziA$mgq?3Zgnt=u^lhQQVF3%PftimO)Y^|SP3!&_2%euX=mf|xet%_JIfU@I_Gfl+& zc=#dH^}!cMJP7IMUW1LX^nrrIAMK3qkHBdWe^a-2KV{{3294&DHHUddTjTE_ie%uf zk~_WBUuG}9OdGvmTsD=0bh3gTuu;T=bM7)AD{-t^AM#93-UNwI*f!vf{Y8 zN!r&<-<;#>7S!C<<}&^f^H{-XGP&l*_GUY4Dr518%e4|G%?U_Jw!s$*5MnTcoU&60 zOM6np$Is7Cxrkx&p$0CY2g@Ei;?X}(9PcXC2&A;KIb5O}g*V0m4r9iIq1YgZ*#y-f z*QezemQO3_3p{}xW~S9f>h|1@vbnl<_l>>$Vn#4m0UtyjD}EbcYNUrs$_hJmyRf{3 z?rmzLhq)=eumgpVxsQd+0?O{d#m1D|u)g5ip&Uy(3Y@O;9ii3bvO zmgUW^p97e8xCr<;m3Los(f`rq!X?3?Mknx80iz{Sa9I1V;zf*XU-D``NSFM4rQA{C za|?H(?C_U{>c;Tte^6}QCaz>kNiHtzUi;oM&gU1hgEoM9feOj<{rB5%`3UyBwsU_B z$Kb=XWJyHOps6x6gxsS4-ssmBT6y#}VvgGhLk%5s!Py^TS^ODA@=50KO-wwXMl2;t zjcVVej3T-P5VsnAW;{Vcd7vq559B(7Z4oB<|G{1~S!Rt*y)8mGCeBcajYWv_(1Cxr zIPkbIUCcfjOtrN`SB3g>i6D6_)FJA(c(?|cMMC#a10r51!rFjbO`!ZLG4@DVPUe;N zEyMq*SL7M!{3T-YRUP189?0)rQ1Prwj^*eZU)lR__UPGU?ogx@F7op z9^9J4wDV%9w@{bAQl3OUfCe3nk*((Z15!%s2AB{K! zZ2??Q$z{qIkSHodEkl*cMlxhdliD>nI9LcI&=oA2u6yjUR3&xgaIKessJf);L9*&~!RnHs^GsiT;Eo*jA+dF|_$bq2m?MM`C5}uK zjU9U_DAb|{pYZCswqz)}RpDp0qxcA!fb_W!%OE3e;w;mnhZz)<2wOZitfe`W5?w4$ zPM^lUr_Lk)EPQ!r&G@2g^MlEpE&hz4@~PH|FZNvLJ8DDNq|Ojd-Pn5h3rF!EV-+Z$Bzt7NAmA$7k8D| zBdMJF}?1LZXa+kRcn)khDE-ewDBKF4H#|*vc5F8nv&X;Vb95mQIFy`1`U& z5Wbo_zx{r5`d#rnp?^8+;SU#vmSI`pmQks6m;m7BLs1G>!p*ul$2lY~4gQ$h>NX>$ zUYw$~Ne8(PNu%<0eNhX$0#D#nn6SU{Z^8KKCRn$hZub(pVB*6=$&a;7yye8NGz;;p19|CkjZxp3@aXz z`^$7ykA$I$H9I>=JgY&J9M?WP4U(sgx8i2qi(~T}b+vZo{*(u$DOLcepNvXe%y_L* zWN$99>nngV#R?KgP-YAc3Oq!@*NMJ6TzBfI3&{RN&oAY|=JWSkTNCuXn_?Qm%DHsQvRnTL;*o|?g(ln0)H|sE*k3FB`!rHWhi=_EH-0RFGm-hX1dIzA zF@F3a8Ys{3NToTi&9s}E9G97!z^53MSpE!SHH*Shv>H516f5wIYf4R*K13G)Pqo;0 zak_1#_-AsyDE`_~YeA*FZV9aSo9o}kj~gn6d`Hlx_f2fhaGrt<7^zJQf06{qq_Iuq1Y9I+m9Q09a-{z z`r*i(sRwEvO#c2&$@7izpar^}g$=W|^h4<=)eLLk=~xo>bt z<2ZA3r;70i$jIvjmmF#e0N6Cptj!AhYfN<0@E`tkm1z8^ckhEZP<)B)!a}Tr)okW{ z&oIFb%2U(&hg}q}N1W0T1t0H_3XGwOAoJS7-zP$;R~oNn`06uDgACzkF_8Oz*gNyM zp40a2H^wru%#5*3ma)VrOSY1Ij}U1kEw+e4q-Yz=491crMahn!e*jTRlyv-75Bxe~Z)eh% z6BlR6%-@){Dzvjq<$M-RL&2?ly1N`EzejoB_ZqN3YUtnlo*#*s;H1MY68)$l8;9V6&_26)#v^by68t00c*iVt|E_2eEVuzE4C<6;_*?~h8qwVay zZfr{_}-r^(5MTX?c6y!WJiFNeS( zS?)7d`8r6@+1#IE$NO9!fv~3@GbCBo!T-abB(iLl=j(o%wxZ^CICvHzhMdY`;3UfV zfI_Sv;d5SkL0?CXij&R@x;qPNdsOXPI}t9y4AmGRxGO0yOKWDdQru)c>MP041p}Kt z{c&xG1yPmsNC#vjoubCl>3#4b`T3aDLE^Yp7rEbZ1M2j=T9kS%9X^r7mzzi zkOl+zN-k4F-BWe>yBS^yo9-j}AQ#nV`KAoUSVfADdmy6ILQ`g{mJCaB$1qM${OQ0K z44%B)`TYP+2Pw~rp`R@oit)Pa99%5|ZiN4lF}I!o+Cphz&D%4B2at<{?rzlsJfU$I zaYN?@(hqT_5|g7L(F2pdry=x57H!e+lhwXL zWeg&4HRkj10*|Q~bFSBC26G3}E&t%WD`_dGAA7`?7$KU&V2Dw|pQknlv$+xGb49v! zL}8ENJ89;Tk{y@h2y>%!Lb`CnxxXI0S5W6JhzV(>UlaME)QjJ4K;JOg<1! zttnZhborrT7_80rNfsiWaO$K^Ma^d^-6#RDNv#JKRAMquVyM3A(d5{{H76U%`UU`% zsWvQvG?K&S2c@3Kq@#2c2x1lyx{YqZVd-wM;Ka_8`b#9zyqpl4NwC|rZtXF2O}+4a z+KHEXB8#NdMKL>ie!?b2#8fOqR4fgnR|%pgf_BNG<;DtWfHO7uz-=}HPe~8vu}F8Zf;ewl zHT35z^O>NVcBSW6+#+LHWMyT2AqWUH8R$*yRv=)hmJW@HjV+#th0Fl6sHH?sx?_Vn_a zWtka`K}<0XmNd}@;0lPOBN zluYp4jsy0Hbey|r48}-{yyH8AFeN|%Ia2yL=HUK84onB%Zeyl!d&Nr;bA>xR8F7Ud z90uf)O2m)`E1{~Oyu6=c~PaN(^3w2S=49fH{15QLIyi z0d92{)h?KOsybSr>#wkwfn8;CFbN#xvD;P8ch1QffA@D&=;#X~3~$Ad?JWEwvH}$- zX?G8|a1i*JEKdq%6v?tTbFUT_6o9z@e#aC9Nh8CErz}i;s&Y^xL+~bny5z&)*1~&= zFM3Ew$rY_3q*J8DqO20YjgOpd`QTY2AQoYA38V%vB#)V`(yqgHT7age)drC2u(*-w zT3p8FQp0oEQbdzQiJxW5&LaYG!{YNLQ&0*hPp%|2Ws8oQx~#XAtD_x^z0s3z;mi_G z3u_Kx@C_rbl6X*iX3;5mTKdO#h}{eXi4fK+70Vu0W+$^zy1@ir@#B%6^lDoE2q$Is z2OE}0Di%PZ|0v=Ij1g4z`~L9m-SYw?=Tav=%Fkc0^Z4AYc!7z_*|Dpp5V0lS&^bID zyqmore{R$W?88^Q386sqIDA+QwhKv;8FBCYxgp)zV-g)XGIYXIniE^a@g4LRV$Hd2 zhrh&wN^R6xhG}fOEfc%MY5oIH#FEQ?`}^k|->b#PSsLr7Fq!?(UHb9;99^XCM{@tA zYK)Ut#*s#W%{32yNhBXNO{9qxa03zl^(9Rrq{+r3;xs%r7bW+cfWwuRf;7 zGx*pj5>DR8UlCiTfgy_5tZzU5tAfI$&;F>Nzxmg%O8rPY>PP?lWrFUW|9LRtxjN>6 zAlZKb6BOpQ`ql=Lj+V}{qjM+nSY=8ONZ*}`GzvTtHt%OCkYW4S@Uu)XqKR=Fd z`TWhle&yvyKo)fC1aJ)PlO) zYVLQ8f&#*mxw+JEq_FPv3_(Rh=^}$rPTKdkf%5x<4#PbZG?SIxSzDxyor-c9=a)@a0U zNvjklPBds;b)=VKph1x0Z+D%W{;E5^LBq*Uua8ixh^YNNXwY^4F1feu9lfJonp?l? zIql`4Y3}x4zN-C-%dAxcxq)MX@&5dY^*PL#V|hhIfk0WnQ09QBVgE80^xrOeeTU8) z#w&DrLMg?_IUV05hRey^6zaefs!$QcI5_O}^II^~sHyz&m?u0Lng7(I$3*PSr2G=0 z|AGb0I_cy5BiQY)A4bLg+k1WdMw{)4T@{AJUA(vnc?41fnMpYTAQ#(v{YD0LT!S&M z&$+xlDPmcGJItsGhfgp1?7s5xh5q(`-RImB-Q^7D@Csi+o!v(FdMzEk?HD9%c6ldl zbm}?-R+(a8-H6NeTlWBUfht&25R03ACu>})1P~qT`Ip?^Ha~d#`JI28>Q(=?N9Jo9 zF6KCs`Dnf+_y(_5b!6KK!MN z?N^B{q;8B^Rj@TEoZ46{%sO`Ls2ZqR_amyCg^~olW|h6YdKEa9IaO~PgEoJCvGvK< z=0}QO8;=RHL>se~3Xd+?D~_>D`XBy~7g+z68)-Wpx^Ox^bC$)-j#Rd(+luQ=!(77; z+z0Efb65`kFZ=UcH1Lq42O4SEsN zvGoku^-o_Vx^0vFZ~%}`A8Xt>_+PZqulI<~+(NU=C!4(- zdgQOudH>;M)^}*fMqaMzIAp@h>gsT>adkg)ZihjT-j(!Q9g04aHU8n3QF+vVoIPvC zFKn>6?k9SgKYV()>3 z6%}oIN1P%097rO>no_lvBT|MP3RDRs=&nrjQwc z((eGgm7#tU4~fi0FfqCTXE}|Q+qrqB!i9}mbW=&+0zi%jVpKZId0mU?jG_QHK3i;x z4GGxFL}^Sx`qa-G%B_rnl%!5C8|_I}=q>B_Z!2d=v-@=oAwGmN+C+U42awEwVdS7p zA{=<=e4~}Li$JbLyiarJ#AW+^;@Sx98h|Anz zjWcVY=rtzT@k2`?+7LQl?|6q)Y~rRJZH#7&rNK{wAe1hl%-{JqOR`AA=SZ_ z^==NBcd;Quv?uajMh6e?-5Y?DrVMZqlP+m|dRULq0r7t5RMIyTzqQ~>HS@SYSy0HD zEL&+Bc!@9?`!j&lGmfRu$`zY;IC+*)2=JhuKa zPc3n=jDq+jw?kl6!wXYAuL!aI(ViLEBl@wU0rh+Rvh{{^L2#@`AOK?(6kexp0G|j1 zM1KQGCM9lDhneN;Pgk_O_FoRVXVs5p399sLO(dq|`T20jFoIjO@sQ64$s&B7zWr^k z!K3*A=8_#54%O83+Chl3W~Q6iRWo&P!TBaLcZ6+BGH462P=*IY76N|@vYXsaLy{bL zQ*?N>=!N(1v<+Y#ax!*5vtu*f2tkd`l24JfHqD6~>mPXt|NhlM5TDR9|&GZGhCcXIupmv19`qAkIUOX~nOt&7dME6d^hWmEH zt{}OM-0QkY48L~%Vq%a=Za)Ne*`sD1NN5GKN1{>}!!b?^9R;17 ziPvN3sObT*V-)W5^Z*n%p<4sBpD#VLe4?xlNQrCBHp0+iiA-7j&Xh4ktj?4p-Ab^6 zoJsTSJOpO*dH?0WS5P=nXv&jGtddFG!uvKXd=H06Z1EPK1~Wc^a%7JB8QQBoRK70g zvnEfM_HO0IhGvYLI*MPR2`1%3s*Hp5Cc*+dEFlX%W?CdC6}M$Nt2|!i^kcfU(t{Rd zSL~(^9-Fy3i*{<}9?Bw-hJ5;XsLgAo8zOC#Zcu7DQX$->wsjhUex6F|n$_@O!|Db> z^Juq!gPyZFn~ECYrto`}pRh@xQn@3nDbZ{4XDO-@+UIG2YwjJ%$?mrs!MSqljmgXr=;xq`xBq8jeq!jCXy`UJPru@w)5IUMrx&wo$sZ z$GF_u9w#^wN*T_bJ$O=iMD$(rH$B?a*n4qrkV#7?m31??(PA0sEU}AYt{2YbdZD#z z=w{lY;KkF!x=s9PNZ0l)3pH8KhKXjgrhu%P7 zB!Cw~92LXrabGrII|K;h6zh&VA_WuE)g6I^ev5s|)J%Fj%uFLv)wuv^RH)ckA>5Oru33C_4P9D*2zdfx zu?vFIHd>3rLYzcIjF#mjDMQF*ox_c%pA*(e%hml<<0X2GR6#L z+Z3MgfEv17{tS1p!v6i3$=?*@|FGC}&yexcY+2PL0>dq7OE)@nmg!{7Ik=cU1S8~l z;>xPktNS=~5+h=aWZ4U1{N?sux@9sW3oVy#t5H8!?16hPRL-w%a6&rsQB1891D|iP+ZaPe>q))p2B?Mm4Bb-#t@RuGHY&0RV zbU&?(;!H*v5${Ou}uI4oKdv{~d?V=4$ zI!8DyTe0Hl{M#yhCWQ8qj2F^fI9GVLXb$Js%?;x{bQ~v_5{AF!C8!|QzSB{9oVxQVHsC_OW}wk)^_dL~ZUG_iYX zV~=qxW5alvzkph0`%B3z`!WYYI0n$kX*_H9L zqV$0cL7ooy_lSusAX$KNQ{=2Oo9}%N#0!dr)oEvdl{`loa)8`F)nWDObaDH)xuY{r z^oIz-EMy9bM`g#^Fpn7l_@vcM4D5sS%zir^`Leam#eaz-? zh*It7-%WV*Uv(7aDA3b!bdIfu_gnML6j%|ek5+2nwb@z||D`$Q>XYwMGzirMgixCT zpSvksyl!mEZb@j}tdI+B#g>`jKan%_iQ^dr0liE^^XhZl0Be%Jn6=N3l2#cUo6<(W zQb?`)e);7WvwMNlIEBIpL+zK5T9d|KD=+crnPnxD)?T6rGg2{MW2S#69>bRD?C$XO zOP5bWyfBk%1i~AjJ+$k%dktI4Q)43SfNX9}-&0D>_EgtcMb6^doiB}AhmWq%mCVlB zj)jr{I;ICJP=t!9#|osLmAVLUM37)o`Cx;fqf`ju?8J`-7qzP2>$=F}TAbX*3WhaK& z$3v&w#(caU+QfuLja!0m>)+X-EHibe8+(`wd70DNU+kRxo|1jvg>J4|ut?eQOFI%& zT`Nuf*~y(syNUicN;L;xi@pI4QY#XbA^rd`%H znjJxAMm~GR?=s*e>3Yh-PR`;r$2gMhwvn6+tvhJ0e*Wh~X74<(GML!)2qq0Q}=2wZt^h<&j^TH!m` z!$M5&XL|04u(Dxb&|&KDhNx?&imeroQFNkiFByMY(a5X7tZ+ZgrfyHai+;LoW7Ds{ zW@cgKY5NkR?rR6D!NqOSdBiNys$igH^2nursl9T(h7ma2Q_tPIXQm#bPV?~c3gn~- zrn;T@zTJCgIjIs%wM7&-##1;!l*se=g4T$N1S=7&b^d_C+K$e?F&)gY_P@>eOZ+_ z>JQm?z?4V?I27gdRy1)k8oEq|enY%F#n&2M(J?6*fbmW(eP0om1gJNo&TgG*aoi7+IEe#h91cU6^2dr(v*&VNP<&ez z1Cz}1J|TIo{{8H^GO5Yu;v@pjkBbYFJ@1QNPCu5U*K^?7XHS=7+CmU=H+WX!7AIkh z1k=xzId=xv?8K=!;0!u*>=w#2)}eKsRz_yo+VLke_TsZ&jNx}0XUDdVYw+yuZWDOK ze%r}q_cg>17=^)-gb3ec)ACb;T0N*c-%-alwzW|wz0s$W;V*72NwZSh5- zeX+@0)0v~6{xjIN{%3=*gYI7_Y@&FZb(p+9;U^ZV;3c3| zcK|5mp-heciA^i>1d$_NEkciYScrGU(Pi{V#)*nKl@s=OZ{_}rS#UA7fBc+^b@x3^ zsjZ#%;b%d9vA;${6IC(fBVwYCtz4?a7Q-ODV_D+4xgzfQrsq}npiIdKY@=|qw$$Yd zmSzU&CzV?>RM6{vD58}DxYydEdLk8m!Nw@YF_V*VH&%H}7fTny6&!lp#ddN0Jt#SO zl`|QYY&UAk_bZtGD9-9H-sX($MR3q(F{i{`jiWqU5wllYk9C`3Sw?ZWf81hdtJ3Af z|ELHDM9!|8L!h4zszV?!4l7<`YfF1+7l4T9ylEx)l<$$5?cxhT>=E71#Cvi+bIvV+ zd`RUP@DoWQsC^1uH&xZ-uR5da@sr#&1aC#;2#ae*;8$tLwMV=ecC4v};7LIb5W(l? zBVdv6RRj1b5?<(~+_s4bj>Ihz7pced4=+B3*F`9i-=}7ZQZ^)L=}i-jh4Bia0F67s z`qhox*`O{mn&#I6@#uy**zd(#FoNINX8Oo8a=^dyx%@Yy{9JGl@^Yn3{Q4XRcBfnUnfy@A zfh?zRdtsQzckL~2p36Z@xTHRFN4jWHdQO0&uU>%-q^78yH=Q?jnBFb*o@>VJ@E_Rp ztLq&q#7|FK+;RtVbV@W|J)*u7EM|=YJaNL52;#~aQf5A9B_Ycfz)GT``>L?B8<$nA zqo0sJBIwcBuE)M*sZmZ6f9{ZW2G@)#N;1$f0l5u82fUzs~8(+b}Q*!;w+z> zDxH$1$fCAppSU-Ly(2v#j=m>S0Y-pDxsw9q#VKQFMfGTvVVP)vm-JD&YEZxrfMpMQTN$B(v*@Z zFprb!I}GcS`x0ePY6Ofz&P7u#aN0w=CJG)0kPlWTobo%UV8J)I;VlK{a<4N9mO z{fWHoqJQ7WaY2Xv1lv4_B7T4Wc|S+ayUL<-e9)vNW*5J|(uDqw_(%0w9Q)QMTax)X zE%-Bom)q`8{>(i&PryR-=*osRbEh-&pgtcN<#Nt&0rMu8E-ifc+d~t_A-m3lOGhq1 zDpo~9OsQ<$+x#D7-0My2`)Nprrj1pji^8hd1!_a-7*vLSNg7$n9x4I0doRjhY6_DK z8IadC)0>Q5sn!9ZL=l>7f|duKVQaxgEHfI94uoC4H=KzHlAlvc2_h*z#@W{p zRXK@SOPSH6nPH9kH%F+(DXg`jLoTnAoPG#s>B+E6mWB@SH}&`HtHYSQ1(+^6EzvpV zE7?*ciuqX`?id<6qI{*~J1Sot=fQ~iM8aG~4zv?i2gy^q=y36QImqbCYtg-9$qF7f zyLRg3ew&cEd3HeSC!O|O5uhj=0h&AlaXT!{5|=zAWzxsQRR~p+ddar;wSsn2kNX{D zIndY;yDSiH1MUYHYyvKJJ+hTY?w6bEy1!Nwl48;EIy_?ls7Nnh(d|QK zb0_bn$TR$6wCky+*hGl+(R#<~+JW31Eg#dN`u1Lg3{C2M@w*a^+HQym-Ul5Gu#OljfFMtN+f?ri^7)=S1*6)f)c7Xa&Cy%Giwdx6Bp8`kt0Ufn2&zy#i;c8NysN_XR^bT zCC_qtw0n|`dFCNn>x&h~)Z_bt^}R8P9zR(5iDYep;@))Nhzz0;F7hQ&<$?p9woGYD z!}v=3Jv56c>q^Axv;Q;SJ|ZCow4MoIA?z*2l(O}cS5N0WR_SAdrQSsIl4VisWARid zt_%UxxW7DBXXl8D$s0Dbv^>U{vVWL;uwjQ>@s9%&7?_RDtKZAcDg_NkO!Lvu5z@oA zCo1T$KA|Ptcd%~YiKi<$F$QkD2i{#GR(=!^Ippl>jIDWqb78=)z>=8{zd6DQ*G4JZ zSc$qQrQJ!ZsSz_Dx+wvvg_F!Fl>s!#Xgo2|L2HJCom{)*?3#sGvpF#;crYdkK10@0 zGtN&anP>j+;X|AnVqdU__dUrBV_qf4yqm(YlGjypW4An;v70lG>P5D7 zxG1^M4`nk=mNLS7MGjGOTKIDt*N#B#HWY=PXv|Z&ZA(vMFSD(AhvYKk_;+chqgfrM zJv8_10s1BEx4x&GKHx|1E8g2=0iHmeX=BhrA<;xQG%sIjyc;oa)Tl%7r~dIjBWY-< zdgqK)bVpK@)$LM~u%<0r-s5&3uLr;>hHX7Jk``CG|bn-zN=HeYw zZdMfCXxWQ}YL106{tBk$25Sba<^&wscvpPIzC8i$FUvn~T-_=s(N9Bvz;W7@F$J^Bsy(h}#?jS&T%t4T1uvL!CSdfH|tgQ>*=2ltm*G4p%x;z7b=nC5C(_15By zjd##8YgVjvYE(p5(v)}DJsLs8sdRIcrNO((%ml4mwyBUq+`b=lb#KKs@~&f>J3CFr zuFiT(f&=Cgew;oB(%tQhntUfK@N2_i}e4|1SIQP)+6QZgHC z4l|DrrVl-yVq?I;J#W;t*+zz7SgS$@oCO?~fgc*1Py8-y0}>7a8P3jglz#d} zec|KK1(8`4;>dHgO>527Wv|p7zud946*&%-k%{Y1j&G>aL|5wPiqFUFHX5sMB3NG9 zP;f)cpJ$3%%g(cYH3m+09ADz<;-U(nZGGsf+P4#u*Xe+vM;JZMP(Qh_&AZ`23cBWf z+Nun?Im6v!ciSJEY*y^|)wO;hujGq2yN_i*g!IDTfuE${oB8!~6h&z1LU z`}(b_HI==7z_e7lx7$Y{*C%1ZWZ2yDHmy9M|_z-}KX#DtM?tIOj{iOLF^CCC!B?BbQV62fU=34K-g3;Gb!qcZUy1 zoxSX|nW<^=qmndiCd4Cn)SO@3DjMH1tK|G`*B&TG^0d)E#W9B91<$<0J8{{EEvB32 z8tYFRXYk@%9ZBDkeVUH-^+g%|@70CQBL5=FFhfkvPvp$jh3YkqnD5 zqnOb==$w)}5d`!D@}i|TJ^e5N`&U( zJHEe&W>I@+=2bWV?DhS(QaVkb1B%hT8tzTMuN~<|`At<)=eDk_B5 zSKU4PSpH~;;%?WaZr3YnYmK@M*Zy-($1Lw-NB@myx_-*9b!%4UR+H-ab>jM9<%c8z z{Y>H@Pv@Xk)~2-}1Mk3G9$4QjAsmb4J8L|EY`k~Zu4A>ewF8p$cV%T|4Ga%GVIvv) zIiB;x<wyUuJ&$kbsmVw>~w}b3)^M=El7nZ-(w$ zb#aNg%kwMqhh#3vtv3m$2L0X@109v>L|+_mwb=txrX;@}|6Zt)wR@@Q#U#wlZdr?O zBCc|N%4yVKC&c02qehwI=&>k60Z@5@fW(rCoiJ+P;j7 z`n_W-re0euJ&bj0@XD77NsQ+9Y7j@`z$T`rG>GUm2fRtHQm)PnP?iBDK=uw6iwTA@ zxhT6wQw@mYH`m-QXYi-=FiNx5y(_Vh5veO%{DzAx=$nmVTx{Dr-3n|vdP5gu=@*Ga zfCx_t@}o+jbq(I0cSWiTpwb}RHyY%tx29%Ruo68AaqNN;=lydSHBunlPWW5t#YuL4 z?Zu)P;|%R=dM+xai>00H$h|6t9rhsecBjNW_oE7RL5G><6e51|RORme)Dgwx_A=xz zJ>Gc@u%!@uUz7NT@?^ztY3dWflY{j+&)NY$%W*n<>a z+`D;Z`Kl(@oYtUgJs&g?Zb>9noAo1o~YQ7z-*}NiZR>7DmtS_!LNY^ z=#fEm^ogl5SF8v&=->uTsXzljbcCQ{X)=ibbz)RCXEmUx)*GiR**tpk#G7=61KY?=Z0lHk zV!h(Gf=|)or#Ol<(!aeJg$dJzO>V_y$?Tk!7NXJ@9;WyU_Av45Mb3FbMyarjlw27h z-_15qL54Yk2z3L@W6Oz|u6RrflY@LZKMveK<-vpaoZ^_59mN2Ikm1i-Cwk1T!Doc( z$XBXNd>!=2oJwO}Mk)AOgKI5U>?q(hiL9Fgo2`eoG-|mKIe`N%!+e76?uI#?N9Ku znU7vmAqTvS(*4@`+Hm8Eo6;tWNPpVARzgzZQK_CdBwLHVuGW2Q4y1NpvLo(~G1$Lq zWbR%wZS00&Lf9h<*zUjsv8Sn$6}LCO52&b^x&=Mu0c22-q_~v*+`t>iaz)jvJ9l&m z3g4H(-rTr(^ZjEQa~`#91<2zrM5^l@e-%HCfh{(j(gK}&!5QpRLyqaC_SNl&p!Fw4 z4PgL8c}crA?~V3WpNkti!SU(0_j+CTN}dbUA5bz+fNwm%s_n^*>fEs`w156dF~&$n z)D}N?ynVA%;QX&a3B~_V$5ptBD*l(vi6OF%y=tHvsl|p@uK5PzOQ47E%V@t&PHs9S z)~PBu)#BkiJ4Zm|B^bRdeGn6sx{tOX%j!nCwYZUf@x>Qf&YbxH`O&}pLipRhpXf2I zx;8Gq;>@xZk0J|389apHU-~}YedPEV|Au@0{h=CktP>tQTgT_nDf?L?T5U&ctn+TW z>=d#V@!@kb;7c^26&DzJs=YMt%cs{28U$gk4p~Ebp9pZ;(MXi zir;_548=Kp9Sg;N_rc?STi}D{zB)&LHszTgT$8Q2wf86z|s4k8XdZdObP$F8miE0DJttA2Q6k z;qF8HlRDF)Z&s>S;%xd*;bdI(a%+!A>f|jFMd+3QzB&fbrHiQ4U)G3T6*1r%zp52|jPBH^UKUmJAMQHwOySt(|BybRZ%pTQM11 zg#mBUK%oTaF{lvn!PgT$bB4xq=f*AOyz(bK`l^ zwQkr_TcU$%nAeDkO%*U#W`dg820eaJRn<4*JkW~c4^tmHpH-hyq2zm1i1RGtfCP8* z!mSI!=lX#pHM{$x~LkfE9Z73=9I02ynn47Fl9C=Jl~*X^hV74vkAX8c-l64{TEBIFx(N8u^MJ zIa46H8gDwJ(JLr4<|N+^q0!w6;*DQn3JHhs#F3?c-~9jfH-nya`GxzCS6v8ykB1_( z+;$+XGYrLL**URP`Qigx0FkRS>twdo`4Lb>%<71b5OuHYSaSbNXwX5hFKFQ=>Sub9 zXbwXW85Dl>%gw5LB(WVgRNAT}RX&9)p~X+0{F}V=i-{8^1PV>cjYM56o`S-GPLt1rg;ZSU;< zrrJ1!WuzxCg07LF`{DEF&j*!Z!3t?K%k^Nzxg>EW$?g%IyKmvNiE|QMqE)U1(VCO9 z_d#al;s|l5PDd59HET_)JzM@W*B(w+mH z8%>zqDA~De->R_Mmn}Occjy_tAgQH5e*l#t6HPrl#5Rz7NYLniC{t=Is!$nwp|3e# zdtATXgXq5N=UFaYy3{+T`rjVS-@jqHu4C^Q229m9ZQguvl--KBWub8kXqHYf)F0cj zqvqVDJ7KB^cWGQVE$u;x$_k$$D>O!hQQ#V07(ixcAog*t1oYna#AxvSs9_BYwRTlk z4qE#xufe5OCsIL&^_{^|jCPI$pHW$Tlt9yHdAUWZYwfR*Df<84|Pt4yWO9PpBsTk=AA5YUG& z4PLBW?#5!#*cmkWWA^EpQJH(_f)@R^K~eO1mMLZlu_MULJk!_xVae7#)x%2j@^t5A z-YXe9&vzWr?|QpdhPxG=rtiB_HCkEu@{9QEG-Z^vg+QFJC_X-3cDu_0yUk6_I;<(H z&3x2CHRd5P*lU!9L>N-NMn?aEZ}aiaA352}$LzN+6neCxqy&7Ra?*B}GLzti37m=g zd-@4=WQ8yQk%Yt@)*U-dJv4ORn##8vj3y)NQZ)0k10!hOC;Czi%vQhkAC9DLSlofhge2ip1|N=xxO*u8*l-vI@RD)*bwaKpGkF5+#N0FO zfKVYx_olfXYkIZ_frYpRojyypj0;~@a3uRFWr*=unXe}zmB-oU&SmQQUtN~AHN*fv zvn=~AX8{A2%E%TS9rYSCD;yzxXcFEx-81q9R0vYgf!RRoQ8l^GkT3<5`1-r}S`;iH zWW632ED7tAi{H|V(kV-Bz1@&nhsNF<4t-B$F`lMfWHj>pvYDN$HEH9ZT$_ccX|FO> zunPD?vF-s-8eFC(*DI&)=+C_HDMOU)JYL|9@i%;MIjrd!PVX6D4}Y}4fO zq2y*iiq`IG440^GHhlQ%qAoxFc)UT9IAXcKe`@06Y)Ya>%0H{8`aYxbr?IOx`_A^` zDg)NX(*Y62t3$|tQSK&IZ8^B%izBU`cpde94aI3b|?H-n#NYCKH{0DeR& zHN>Omnro9+RxI~8NQBEc$1p+{b@LUn%4j9|EnAkiOBr#6sqpooD=2GIJ&HH8pQHy9 ze5z_uG01@QI=&3R)w0SFFBzuR#3$*|gINzTQKdPrV%Q0q=a7-c#DwMcUX#X3$6K)> zU6*|V#qHX)t6LHYN?+6k>6sA08r&wHFW+*p7nWf<0tWx0J-MVagu^Cs019-m_^|7};7B6aDW`+U z-JaS<2$7+(^3hnOu83i_y`Dw|Dpyaoe$;Zj$3n$}r;gKt{yja+ z#&?OQ8{!`9Z%;_GzQ~Ra1qY?+2E&4CNM(;pdWV#(D^KjI^5dAr3~&j^-{=MmGknz#i?`{xn8S zvhdVHpJSEVc6d3T`5vSnHwV~AL63V<+Sj>taH3u1w6{$8NnLW>QMc{EdAdd1X5Prk{8sU14&1h|dlV8C4 zlgtk`mxxk=@NuV%#Ly|DrW;GFxUz%jeL!SNRl_{TlIsaK({3R0H;Ktj6uM(1+GSou6XG8J{%q)*SCg35^L>-VMfY zOM@08qy?)V=oCw;Z&va#x%P15B8BZsUweV~JDU?-W{i0LYh*cWAdBTC<~xP#k#FZ& zUG4a3J^by9BPZ+INOUcHF1GB4sFW6?U?+D5kQmEI9LUXX2{_ibjehPRnF;`n`b20* zZb9(~&JMB>uMObN+=GmJ!rbp<-BVae-CO_^p7sin(-QVvK!T9TDjr=o+StyY>3T+1lNHS>ZO>8q4sO)o zDsf1@w%!}mRvI}P@7_4CT`O)DDvr+`{=`L->@a`yobVqkuK=OrcrFa8Y42!`%!HiK zJ23cXq41o!zkE{!ZFFgMdP z&#zm@+qYi(9)LysS{W~UDJ7DHUs7-jiP-QQ1}42Xns}9x+b830oLcK$tUhxsEND~J=OORiY}zrCVu%f6|2Bw zbUxsW=$_?*0SF@QL`H|talDk5T)BTyNKWn=g1Jw^Q>dxQ?Q~z|*1k7}Nfs5f38>qe zbuP>~o&1k0FmoRf!-`l}=L+D~d1TD|X2Flpv~>r;*1k(5iDG3dhW!UGjuXa19fmeh zXiqb-ii6eq;;&7$a)F|&cCLFn{`QEJM1;33!Hyy&OzBdTKwsS3{Pl%05eLiWT$C(N!I!2Gi~d2gQoo~5>8Nc zJ%M?X(#~Zj!x7^FD+0P4=avLWKa0vjlXo@APsP+!7w^-PSkq5vz!iAj?wJF*=oi{r9qdOFAT`=@ipq z#^UbJ$?Dc=xR`LJ!JdMvy6k}St7r4#5#`ANv}A{%=+H0DIxq7yt7qmIhrzDa?jaY9glR9O>#; zMR%N^v{P;MdyqUCWXchVqjx-!ejWbMddLi3-x4_+mEQ}R$5zw~0eM|yt`>-_*2qRf znInw^V34GuLavnGCEZ1r>~$*z0zOA;l#D9RJE{y|2-ak-y6WIvED=_0@SQFbMxsDN zn#JG4yL6<*wa0EpB5rTSv!2Ral$dlbm(m262G+qe7-KCu4Ld!pL?#+Q8#0``Ze{*Y z?_U?Xyrb;dxXID5F*s)ianR~?E?ixq?vY&Wg&6)s-DT|U-$et51n4fzE#-oo?}U<0 z2F`S=nR1vUgy+IhqFS~hqs@+(_lq<~z?5aqSMwh}SVXN)7AJ^++}vtVK#*@GQKXYU9*oWKgeReaNg9D?k;oO#yHMc95-N7QO09>t41x2$Cdu zmGUBR?|DCRpHJ9T?9I`P0ulokcC5QU=dHAC#bHF)<{%KL{K@rvu3~rvn*A_%@E84z z!%T(09;5~`#IQ}HEsx*8w`U}h&Ca_Oj$~C;oO4QlOr|HfpO!XEuTb>&EMMW02bA1Y zXDg<&ea(9^q(i3Ok-#kLEJKQAuuWFp=B*r_VKl&tWtSnW_CHWqPCUavdmyhOppgio zC`o8NBLKQWe{G{mxU(tQc$KjKA+QZ8mQ;-4a9!%XVql$%3M=5vJGdSm-rkO84O`+B zQ_$>--XVEWfZ1NzFPm!!4-pK`F#ct>HPXzz9^ZWg!-Sq*`7JV2lev7Zr2C@Uszxr3g30 zNFzVHi?*a3zz?EHL~keyn`cW0L(W?EU2^Gw53u|zXYPv;3}9|OC*d*D`6t9MA^FO! z5904_%(`uT#teqi8MS!p} z9$FMpMz;r{2^VQl^0jNaEC4Azi8RNW_x@*vrNW!Qj>8{*sZb%(R~5#uxjwtlB;e*z zfS5Kv{9uvSo`ydvkBOv7rLvZc6 zaBHv+ddPiOc3L+H3%Vh$qi9&Bht3%id-Exa`|DA!#<2PCa?iKF=Kx&L{dF80PHbH= zLg!>Ix6hf()dc{k7!Y#ezI_4idus+8KCiuSqZx)H&8w7<8|OE2V3xp@68k>Y7>{l; zT@gJ-CTxZv;4PnvzlwPhD!BQDUo~&u=fy0?hxypF2k@pc&n)1LE6a|{*!%P?<*vAl zq;r5SGx>4k$Oq{!pFfX`zUMv*YWI`eEmB17uzEVzkKFn?6m2ok*YRC7@xdvC zjREqvrF2WS-0M?pZ?PPiTHQ!SR&=Y-Jv5~;glB8Cb2JCj8;meJWW1ZL>$A#A83KNS zvc}UYf6N@n>0<$ewbo{;3(0a-9j*Z@Sgn8L$;OY&@`jh4x%HWkeEM<{vnE zrXBvNCP<}upWeMwf*s$zd&eL@qoY;ztM^&;nzvKlA940D%@3a5#_VS^A=7ZSoud|g zt+z4jNU03%W)iTLGksM-yq~)W51w1R&GqT1aI`~ABu%(nve$_0MFw`Tq{qQ}{LOFy zP^_;MDtvBte^Jcil4KYC_Qrp}=%)N4q>sy@jN&*iVWvqcb>;HxoKZtqzOa5K*NN{8 zg63DfrQ0l>TNyYbIFATpa1ZjNcvSN`(uJL;585pAgG4{p=ukd^RE}jS;#fqB?3a1! zaQ3TQI|Ar^>`)pkMd7y#mdmslww-Ej<6=h|PfJKDV~A70U^n$LpRGA9Dl#Nv7gjl( zc{;VTY%2Y)xR4&c~B1nP|#kP3jvOmojmHgl!6QrLy7W=lmvAA_`X;9b!s2z!m|ds{gy? z`sJ^u{K?ugfbvjTHK$Ko86XZ9OIQXX^f5seHdKZ$Ft&0<#xS8qeyb_HKp=2!D)+uC zAN)P#`uI29&K>(iud!^rexP`txVT1tX_}luFg$^l!tVitubA9%ET<&G5Yi`U<#B>u z3C{vk3^Fe8O9J4J!r0yF;mu1k%E{@wkJI1J?b-^q*sJu86K5~r)LlL2D*>Q*yt==P(83! z=gwiQ08z8k9)MdUGZp}|Yujda%<6}>3HT_3{6@rEyf|=mN>b7!NbZt<==i!sdlruD zM|$Y|vNRQzk0%`}aS8xcVeU=enRo7}%P@J#rB!|`%?AS$o=1FgLRaCp;R z`s#jRcAV)8Nhz(yo4K)3F)w)~1{QUFwF+L}4Daw`3H=|k_?g}r>$jzCfQ3k~NJ)pdi zj-NXpAV`|f^yZB+0eEJ#&&Pelp2oJ!-$~l`W@Kbs)wFt_JvE01RNG6TGQ?o9n%WKm z0Ul{riVsIVe&5jnFg(}!56cUOGFLL`HNRJp{o5bTkLU^8Z;ZOauESTl_3&>p+1YiE%j69g?=# zyWy8JvEY=QBuqDWrIMIXP|9LSNtw!O)8&4EG|URlwLE*4GE3%EOJOfRd+1OISgPK-d+&eA5fx?!O*6WjZ${#!a?T@<0_4@g~>k7695f`bfI<;?W zTjXRtJD3=^t*b_s)8g=MR7+he<1y6sM9*{(5)kJZw8tT>M-^U6;hf>7Mnu}s@dE?SW_w@TR07f(#!)>8gapBV3h`n9L7?}@VI}V434*_eFQ-TArSDL+o zhZ{J8IES_|`ydg1zi3pBXV|&ZTf_dW`LnR+$czh7M^hqtH)+yD1Z3h5E9Pn6zxy!2 zNsRPXUb(OktX)fYhceAh|I2p#dHJj6LQG-_gGcobxvE9yzQIQE=OVe-d#>#q8Qw#! zvzLN#!3zop8TLw!bIj1LISo^&Q+t)EKBh87SY{=r_?y?Scx}pjUYpLB^`e?i*7nNe{-x{I8RlMS zAPLB|7)XUdbZ|*JW{Mc{e1m^s&8%OgC0;RCuE7t%;$b_mUOXHY1qB-}wKG0rQ{2(+ z+OuE3ogE*2ix^-HX?Xt{y7<%cbbFjc_L0AU zEEETb~v_(hZ^VhjW2s?PNm2PySmfCtC1nEb{ehCoJ-W7o2>@eFR zVI?-$tc)ngPzHCEn*UgL8Hwt^Y-t3&Wu5oItCYiz`zo|l>HR_7yc`?}((Z&v30$wG z&%HP{srYJ&Z6v9R_*RV=IdWN8ygz~K!5@J)6u33o6UG|UvFjm@UUk-;7>z<(rf-pa zOqTqbNna){>#bQo-VFX6Ed3`cRmw(Fd z600PLQ$1(Gj&nbv+j^e)BeXAt4(|#rB(Xt;sPtkyU6H_p7|$jo+5iBAZM>kJ~s^y^x%4 zFeQYx_-xw$aj}u`Ub0pSrZq~aFg)_9hLg0JW19;j;msP?t@@-2`g2p!woJ+UX8kHD zT(NT?ZLpKd4~iAv=|S;C*&}w>(uk+iGI5`xmkW}Yn$lCZ>&$vk(WYZbJw8BwG!McsV>4h2Mmm*4iG?j|L~y%DLQZmRS`^K3A&oyR zlv%``Iq_P{<)04>wZp6Cd4rv2vE`XDvdSQ`Lu_^$U|#DVepqpuN_tlD&?YUE>ISgm zYe2m$mF8fYmfey%YTAzZoxEUu7FPS?Q!4d+!rQZX}PEUjF%msvlcIA8)p5lX|z~ycMNfMp#k3 z@v!`>?Lj(oJ(Sk4IYb!&Fw)22Jyd2H_9iK@G>tgZS`Y0>$Leg(s%?;?P6V^WCyuUt zdrI<30A6kJwXIpXaPi_25#M4iYVybtPVyZ(a4$&3N{~IBbb5dH6>mOG#9jNGZgDSz z(H&*O*Hm5tLHbJLmeTosl|^e1L#aO8)s+}4=-ec&RImPDliWjW!vZ3D@qFLSKs-CEA8!>6?fRMp%Se$i;6kkNVqx718X?Ah;B|rCy*H0s!lQ z?F8k*LVmwDwt;na!_`2&K%xi4=2Bc(hphugOt?87Ef2W^7Kmy%SgEq-pkf#! znOixXKg`#?vxjP8Ma%WdsFvDTlHgk)x^-HbcUdnVyxFkL5(_`0n`S0tsso}(smcaE zd;3__kt%&hK@tjVg z-F682S|MyGJRX^ED52CT3}{l6sT9EHMd*Cfdr(r#Hh*e8r&{wK_hk>U*iz8_0&Z?P z#U|>G*(4t_Mhp^wTmFY>UE)!M2r2Sfv~)SF^6pUIZS8m;vYJ@!5J%lXpkX=75cDPV z7hga8A+9I+`uW=Tv9M5X&F^e^{W2zCl!U!roZ}eBT-qP;9z*Q`JGBK<=FuC^{SE7% zn1?^TStbRurJmZ~9v_$at+@OB?Z2Kh^+Z<|q_(2hRC8FN*V<{weR-GvL#-}n#I&*F z_2Jl^o4wsI`S)G+00K2M(2ld`(u&sM z*eKF~MvWV{9a>Aw->+s9C@UW864*v|ergCwWOvG2020^DySKINb>{tD$sjj8dYt2OxByfEpvMqkP>R*#BMU45@cQAiA zI<7t|ef|91p!%n4yTBW$mrP{)8Tg^3m&Qe}S&PCF#IO=8NQpLz&GVjDRNNIXLvWG~ zLvCfg5?EVYFMR*d5-~V0!cH?>_>Qbfih8+Uz;R1p&$|0V=c-F&MpRDoU4R3QDA4qC zc{z2*ibpOxc|?;0&Y*c6$TMWKM_!95#cS^FS66rYgQa9ITMk0~0Q#~?+}cRxCBP}7 zK0k1{GDpQ(-h(u%wMx(3mOz>4KLLq&eYxAZakSQ*E?b0RN58U#;_ zoFJ8+VGnJnVFp;YN%rx!y+Yo8lH4Nh>R_{Q{{z;d%f%$UcHE_6#%zaF4gC}16`QO z=O78%*kI?rf;V%;2l>S;8bmtt#z&8=xYoUfW}v5she6(R#7R$}lbi|O#GO1i!cmVC zdM2)Vd_v~UNpbd}(?f-RrLoVyo{^RtNfU=0h+D@GA2b_VuWht_*Q@Bw;z^T|6azm* z?=iDEFl)ewB#YOtmG&j8sok*Ug1z86^5iW(ZLMR?*kpo8?Y!;e1$58t+qLH|TZ`?F?ve=dmUUFAij5!$y*=qOh z-2t+BONad*#=ZnB$MkJKW-t>nGZa!86fMfuVoPX-R?@zQ6!liprbUY-W>Aq9l#&*0 zZ>f|Pm1T(1zNd{O?P$~fKd-mOH|G0&-~a2FV`Qn`_j&H;zOU=N&Si3HgGRkWUg1Qh zxn+yull*C-HVpPddc7YEy~+3;GC(^>*4l&JZ|2jhtjQUXnJ}Zk_N8J@en0R->GPaV z1TSJ36jsNBwL`|CX zAOno1aIy0hU}Tt){Ed-e_TBCXY-|9PFV~h{%(J|FATkBcv0ul4Y(pqG7bnaCU-Hty z@TVZ5cNYh22Y=w9UX(`ps3aFV_TX|IHkSbup;-wlu7jf5_jY2>j9i!ggOijK6RomA z+i@r`S?S>9%$Z!-IzvX)d-@EK$#t2PC*WA{ziC4!JC>Z-H6DKl|HQ; zIz@Q6jXk~L^i?pl#bS3t#jwjvB$EW7mv=ph}XcSMRJMubZ!V6 zn_vv4F2x4g_*=OahqGu-tq3@+B-O)i3~6wp~QF z1W^v!+S;dCe?iA>DF*}*FgK1jJc?q+gl{tp?T(qZjk^lEEZJ%o4Qh+Ad>I;OLQa73 zwS0k=`?3#$$JN7vq75;x#qn8*xgs4U=KUW^1)L|h_O@;4ZClD_L0Ww{M>esjG?dBU zpcjsseFT+Yw>!<~HKl#Pd6 zZ4{3_O7CgQa;nP6!1H&Qt|Gt#mp#gvUA3ln9uWpGAm5XPLKTFbr3s7(L zGQ7loR+xoj;2gv52$-P+Ka&g?<@0O%2S5plvhB8n^9%QmQ;w(efR=FrG6l3siZpBr zVR{SA%*e~C!+$;=U>J+z);sy zd4bkmgF+8r83XDp-fj%G<=2TN+X*JcW7hHZ*(I_zAW#|fI#R#G!_C1+1mRND*W;kT zRcASmg344=_(E|NtfVN38#5vZ+h`V#6!w);X9H~}46u1p-Veo;;5y;TKxTgxw*zlG z3Pje8aak+*SFbk5)B?6{rGWB~dK{0IM;P;Gs!vmFYscFaopvln4HAD9o70q^9RpuL zhqpJMwC;yt0wJ!fkY$%{gldTPPqaUUCtqO%j!j+*%bhAq7|CeRa0``~sq^F-DIhw) zabjK>hI4Xm1~W*ni*X-2#q^?T0!*Sb72NcX^~7=%OMmmO;iq9-G~7YSrh>o_ z`CH!_e*Q%@?jY!>k)lJg@HL<$sLkN|7x$aJ|8FPWCP-tq6hNAsvp7Yvrg#^n!9J~@4F-9jTa%tTE) zI(`6?sH#>Qo%Y}4Z%5fu4OBh>-RpT~<5MZtoe#_$F&i8lZ9X3+P0RGx<=k! zTF25xUJ2s|5Cc0AAPtFl1r!#57pN8X=8tMZVu2Er<3xxb*^a1(O1lNc-7tVC2k}_* z-4{mUk2a$KRFqn1`b*q3Jw+?jb=5Ref~aTl$jD3J%BbKfiRgk-qDI`^#E}wOwZp+= z6L6M^!Q2opRx|013`PXi#HGeD%jr0VwNU|rvQC#XTt}$3Mdpd6Ewc(3(&4Z?auDtM z^42g5v)qwsV9VtBqc>G#NCjRY-uJRM>U_jZZOqxinh2JsK;#8a3 zqN-0n46;ZLn+7DTU0H5RUe&0wIbp{Jf~zdiWPg5p4N^pKx<8;f*8t6c{njSZN!nvZ z!BrEA=SN5_icZzYn=;bU*^ee5oJ>2P2ytEkGJ{HXu5tl54L(&_8Pp#*WVzqWBp)VR zKB`pFWmImy3%{A+4x!`VsFmcxNuC&Zdzyft*&~vsR$Z^LI2>%}26HT{TClisa?o35 z&lKLE2nHAw+r$9VPTewyqj>-ngb(=JO6aT9Vjo4|O=o|4zU*1!Q^;Q_ECsBcT_hhR zg?^tdXl3Nuruw1t&wBEF#YRV23>0Zy39@fcb=GPigquuXl|T%3usj`lrcQq?!vjFV zDvimF9$&yR_=h1ZxzFhpFjWAp()|Mu)u=0j8cyi$Nuy5gJ_MAanO1!0Ftz>UAJ(TXHt|R@ zzWtC45)lqov6zcX^J~l)RRu&jV)ag6W5&7);qXS!FUG6`m?x39;qxS1Y!Iq4UMR~I ztkhq#ZClo(2{c%(uX!5rR1p0OrO*Jpd)o08m8k6!F@k&X|n~%>(ND++SXj*Cn!4#-b%RaXb>WPVd@<~*vJ43 zkn}hu{q%66QpOMbL+akUH!<=%GXQ$21mFmYg7Q7T<6anp@+QEFB5ET5;rnKVG_OHr zLxa;){h&pQX8Q$Uo}Fa7Xz@Os5*IVi?A&2+e@T2Y+_=gP4%w*OSx#igEud8lno9&( z(R1<@{Y#tTvFw8F?~7566r!P|dq(2$91wbm3t(ey9sMYuwG8g9QcUx^A!Ks2?UY55 z90!N*+3y;)Tm zzpC>13a61pGJz6eA^t_4lTEry&3uFIzI!8387=B5eswIa_NeI8`n&B5;49X&C^!Zy z!3c6W-w5#JT+xjg{z>KzD?SfJ|Du#FALbcVMj?T^<7?Y6NS3n-)Gun18x9r-ri%Q)a z@s=X6G>5&UIS`ON=$l5x$zQ3BB}nFl`^AG1wt8)VS@x*Q)Qr$&irAI61J)siJ2jj! zxp*GQdzq>M&_7<{u>fD6rrMB2g@XdHbf^Sj>(YpH(M`7w!$xeWJ4tK^kTL{Xzt5zA zMGAd^OaD7e?Qq~yR3sYCMB+$aCTTcbB9Je2Qj3U>M03=NHjo?;o2G%sTN5v;wWTE- zH4K$+WXFwS%gf~?79rILSR+Qj4O7}TAeT-l>tt_$Utd-}1w?>kO_W%W5{cbTGZa!~ zeBi<3gPO%3NGgpYW59a=mUIo=6quM$=?Lxs+zfSaZmIz>-QWO>kIeU>8=D)P1?Cy# zK3Dk360Kh!d~Yv|3mN4tq6oF$EZz5N({N7~7)IhDi}8PTY@<{m68jujO;lbc+EC&R z-rQmvRoEF=ZHpZF+iP*=5tWltswBX>h-;cm2Y9>VOY?U`<tsDw%iCG*5 zSa9fIH30vL+!JQ4T7{Q~ua=cZ^e+Xvg5A^Q6x{o)Y>{9Jt<&Srnm+k5} z`JjSv?wz*vxM$`sqRcP5>(Uf1ZZR4g9@rhp_cZeKg7@Dq=WM$$6!|R0GA`y*4<)BWM*q_0nto@(o`fdgx<}2g%pL97Mzmcx zyA_>uYb-g9+Z=RlwIf@w3^vdQ6QRi8{+_oemwIulpFdU;hKZ-!2-oN(N&fk__YkZkpnWzenP=-q(1lOp_- zUr0#;%2~KtsRZx5wXIF7c?@orm>X+>7(gdKHr9RXvip7Eu_`D}O&BtpZWAx=(I-Hq zC3x8QTgMvhdfH^ML?i&g#4Tr-kqP0;p}I)-$q6bsi0$8z2;c(GOdMql(a!_I>t#%j zHR$2gu)n_V=Cqw!mwZGTU8YwJ3ZMenyM3+)5@iPUg=B8EzZNE*d@fxZh%_6?!Lv^8 zMB-!2N-y17MTSHYWK@d7vaQnyyAqBu!d&R-!S(Y~Tan9P1@TU8)U5yeNl_trg$T{P zy@>$6Sjtr`&?nx#*E}Eeg$2c{Xl~pT@y_w(MVaV8ejR=jllNn7U8+E}m**Z=SFrK* z^A`{>^k1vmXX`E><9Tkr|6hBH<=vV0EiXS?>Dm2ixVJhna?Me>RBjz9heD~cGg6lp zNaS}{ya}GvED8*{Au7>(L@(RFG`KC2m%+ZkdkX%4G~W^oGNO@E^9+-2`yxOk*@>v z0B_L#6kJZjD1?T1pdmb{Ndm8zRrTcYxxD&e{57Bn@DXUHAJhRHE)^Va(W7loCeJ-a zwM16tl*wb9jVg{hb>L#ccKYBT;y5tt$Rcbo(S1CwCX+~$WOa-&-^QL<)QzI(vBwjm z+gCCVT{b15aZ4nBZVQ++l9x1Jz|LMq(^nYFkd*aYbB0q1CIOf0#u3LqS<^HLp?MtFvRVAXBFWV6fPFYs3&!9tlkIWB7?oixD1q zkYKip$4Qs$uYpa7rm;Y}f2`_9jVwid^bfH5p)i)1b?BrpQY?7zX$o=nH*}8mv#uPu zw?YKBaKFm}o_wrKn6Y~E=FLsLU--5=-g~hD=Y~YXTnZAzeSF?A0uWjP zDhRbE^uk96zA!NGH%K@ngH6&IJaT1M?508w1}y!DoLa!$rV)B<+wTC=#UV{nYXaOr zp{+sYf}L%5MByMghcXZ($AKhImr*zh#e~kY0U$^KDy`N z0YgCc!r~T!$Z{kCH+EeDmA1fg7^5FpOhr?c;O_4{fr1S{SNnSKS0+-Z)`G&g+XpnM z#oP9po0sM;(^hFNftCY-Yz2qgkT6_3ToIBtkJ%NqBtw)Dqq)0HyZwa?>a@gM@ET<>J`#V^07Zhxcf%MjHmJNFFoYGm7O$+$v_m2{5iTL-n{(C~ivi z;n|{U%D}5%Yt)3puD~MOHyvf@lP9mj`qREwZ5^k2)rE zKw_BL1|*`C#+bDE!3^|JkonaJbmQd}jd%~rEeKtTM0cj=>V}1@)cx_}$CbrK$w&T- z!6|zTkd%(m?Fc?;XF);17soG0Yg_IiuC?XK@L=cKcvMORf1tX+EDFQA&Isz!K`j4g}^w_QiGUeXzq}H&-Sd z;so~LPbdHU$F4%33Q84m8Xy>Y5ms^;`UlLD5mxa(awft8vek^*Kva1bx$oq$$&L%eh-Bqg$ z6y$yR2OgeM$+;oqdoAUikwGSphNMryx}E0bd(BHVV!764ILj?Al5?g*txGjfubanhzffEyNLdnf z08Ug;x>#^%3n~B@j>$$mEnFy4UTna5>pW`t`pgRwKY`v=bFBErroHHCMI_S?0!t$A z6lI1#F`~)EHJnHd_E?@uRmAHe?{W51!WOIQE`G)_0ig{X1nO;9J%E8kP#wNYilPCV zwHGt@(oL|jXp8IU1JjlGL#M_;q}ynai~UIF-TMYlc=c*d4LN7Ymw}CeELyeZJq@#x z1~LKMxT+=9`zrAYovb{vO~)2?<8&}Z?kC{Bh8FkwHt%RrfNdJcVB|p!p@|- zDDX9^o?a%B(K=Rq_^N>D?|;e*vCG)~hIUsCD8JLS*vslKU+OeGOsNdZ%T_(BsXES8 z_9?sU{%H58W5!;U}rdq5QsjL{`WAtLN-;tWM%#R?;%$aL68m1MY z$2XSiaBWa^=unx&NdZirR>kYuT(bHXN|L$Y{S%CbhdZPNZ`>p96n z*Q7gftclu~%wW&9?Xer(C%;IyXYc+Xdalv;r=33mF*UoGe^KmLNjr^-(9A)DpA9&x zQ*Xz4WY2tivFyN4sI<@hGPBI(hbuqrb`EV?erl7c?~Nm?ZyeeE+0CF@zo;+x?x`aF z%bP>CNU>G?j6F*Yax9oMu52&!`b`F#lk}%^!$;*HDrS;owLBey9Kr)D@T8DOOVFV3 z+O-v!1t4$z@CNZPY(iv`86%A$FM4C`?8nCr9Jor;EhrI!2G_ZYp<^v)S)LLPOeFE) z9o88gjP{@%=w%Wkw1b>tV9P`#XL%G{%+hfj;0(L*So%>I#-##wPMH8MMjo&4?E$ze z6@$2!fX-=K{2sRXKCt)A$Vm=cpK5}B#NKy=~ z{t@Kj}V*=97r|ILuAT$8L@p+^?q781-n@`+9IMI9>w)*LM>hefUJRWMBKv z!esbKi_EX#U}um0=!n^wXI|VL>`Y|iFUq%x+lzo2I29bWp8zl|tly;Dtn%knQ(JIM zyaOe`^b{isUnB&E7vKQq$KD2tg@vxotgq<+YgF@JPl`t@n3X%vRzq)lDNMF3a$OkG z)`c<5f^#1Gym{(ydILq{-TU`@*nn-o%*pBErO1ao1$p)L0z;prHeM5ReUlR%Er){; z(Q*J-bTcOTBwLn6gAmJr*oH%}?qW$)nQNlFmcem>Ut|1sNy&|-$;-G}4uAP16JJ~? zmr~!`AvQn7#yIND18vl+=?7gC3m#vZyrwUlb7*SoKBhfWS|G-^_~^maCFQl872D4~ zzD+0eT8lOmIn|Wl$i@E-mmaD~4RDD!HT)qB1|ntF6Z;%hn*z8YLJuZ@epBK&4eOy~ zAv=5`7hFmuJ~wqRWaehTKAeFjpEwsW7=cEefKV2@RS;R>B9t<+cpgk>?!tkn&aMcw zU9#fuhjGBuiU(wC0gra#-PF!k-UR6~PGHIm2UYAU7=iO<%X&3+HNlpUhBl<0mIuB? znpU7`=5|eWHt$~iJPwKqmk&DjPN3LS|DeOs>M_m4j~W3*!>Z2^d$jm06M5ne#c7JO zjV)J{dgq|otO8A+jtgL^*4=fnG{BVfnNJBwmizbj|da%=Cx@B3qTxk>;+xBkI0Lp56i;1)1ZnA&LG$d!0rV ztOCBf`OfI^{E)G;@yoM#ev+J8WBHD^ANUu`l{6dJ7D^EJ3ZA)Q*zl@Y8y>`$rY(eW zkdp$TBq$~Y=j7V z30R6b zSsrC;XBG3WS@SyeJYZ7rKbU->KrkUjFl^rnTM~zE#sZfKgSMU?Lu31zh3rSxxfGSc zQzC!#uV>HNfo5x>%kr-lJWx8n^;S$5!|eFS{lE7qOf;HZ!XAg~dGB|;Qp}B7-yWWn zaHFQ{qvNq-$1-v#61pV?@e>p1PDyVNHe*AbHTFMi_NR6 ze01h=_(ph_28?bhkBA5l@Zqg9bIsnD&0IdsYUx1s;9`>dnUdQzEEg0ql2A7hQ6AR$ z)i!xp4TRc}TR(6GuBYec%|3fmJZWNSGvqI#=R1OB+zqkQRANhi_2fJ-mVVGTweaw8 zhJ*`{Q!fpz?GxcI5%h$-vjx}Y{W z6pD^#k{}(vWsfUo96LrFp`O`Q;5FJRh@uITNOQm(!;5L@lWF4(=tZLrV4E!2rDog< z!v*hzPfa=8>Z2&b8pZ4gxR(?ciH7_pU*yxDJCo5@q@;n=l87#eG~0d%N3Jc-eE6xwgC%@5))&Q#$2c{Te#ZZmzV{=Beu z7TZn(_@-=wWr-$;DFmlszL|)D-_gKX^p75eUc3~+BO+Dwd4cccJwh^jgAp)1>89d9OV4NH=IlJHbA>+3r{?!6FmG=4dws%RuBE+KG2YHz;6I`^)I zT*U=B0uo~OXFbQ0%AQoOO{_G&T@iatx%KAom{D2n!oZ>xYt%k}_8*%W8NeIqeLyvr z5gBFKWrR5weMBE3c`iY)AWCgKtKoN9czuIxwoA2}+%R}acMunRz?U_IMH&|EQUhDfuc2}$#^x0Bf}(0j zbcg8Mo$H8^y( z0oz>BsjgLPYXNH^RQat}QnT#Z-(Im7{){(#!*hLRW+nyPmxH&)fA+CT`zaTzKYEma zx|w#zR^18>O(ge(YEW>aJCP$F1DuAHs)*J)8g(QM9U*t=My&H;?rG^OX`9H^z4G9% z?PX1WlGS^H43I@UZ>BREBFSox&U6ACV{mDF`NK=22@-@6Mj0f*hwO_%I#F-$TcW2l zHOf?X&h%uA^V5iph_Die|N4RW_1Lm0IGr})I0pPE_3%UzIU>WDfDY27{2ibUitt^G z3F2^(AsXGdEC>Jv6z%dz<4MZY0*}pd&3{|tvb;GALAK34j}NLqu}ZWi_!%GtO^ZZ^ zXxy>q_+7gee&^0g&c%zjWvSC5+bQeksJ)f*=&*JRU)d*V)vNwNC%FZrjIY*9XD_Nb ze*MfShsIREW1*F%%3aIx$0JEE9#{Dc&ruvI3TSj}Rz0R>Hs_?=1KB z|1;n?U+kX3NEgGjTr2rH&8WfnOM!ex_sgANM0!qMy>mg~fy_y>DVm1|ch!o&Dr*I9 zp9dTG)r!va%uI8T7aR(gLgP=MS$(}ez(TTO)WBQ7Ljc-&IAvjje}(Pm8v05Dh7W{B zsJfP8_i0O{o$r$?6PSJSz9bkRw*~*AAQ^WzuPSoR&U#*LCtWp$tcgw1#L>j7 zedo88twILV%uRE7uniMqQ2fY?E4s|^F0Ujk;yWHGCFFc*_IA&V9Fr2 ze8L@r+Vrw!xEQp+Mye{oydmH{)E4SyG-^Hysb@K8uS)J~?4Tq8PYm<|+)Lw;hXq_x z4R+U_d8kdL(_rVnlI%9zx|r51%{=jMQI{}hFYsA8yPhnL??$R!|DK>*w=A8d6&#(MUc$8xeE9srLJe5GM#=>C zK+Bjpy;;#(VtuH|uYHU7bfWnfV9Nae!qu0K)RikH2O%7y&o=3wc z2I2;MBWO;H>Z0xWsH7YepXf}LyD3i3Q+>uxdwrCQG^#Be0ck?D^Rz75g^i5BbMGtH zm=tdi5O^lJ@=d;OnutP0K*c#{!ZkeQQ1Au~Jp|vUJGR`AhG*buQKDm(fV9yOqDxjz z4!w-^OS(eT46t)=Vl3w5Gy_`Ykhl;syDAR!qc{rVG;oa{wZ`KKrh>+EmpA6NG&RD> z#FKm!v-Eyg!oMsmEL_p4W4wMzKb{T*vu5gclG+>N_7f@^oMfU2#9pf<@n7r{wqIGQ zBD#wDZpr1A%c`-}N&Lp%yCNf9clm>l5<&y^0LRZ zphrzx``8foAf)D3K%&<%(s0{{B2AJp^r7^r&U%TC0wR z9ApmyJ3qDVRJy?b{Lo9^lI2mb%g``{3N?_gI?$<-KLw_8Kz$N$Ho)5;AAFU@#zvZB zV{$@o60^WKfaD;}U&Ev=*cEzb`iNd9%)Y>vzlp8=>QIE0)>v(zqqg&aSbNu>pIiED zA}T`_9_;9;*~%->9^L<$w`|MYRAhn7C>x)=YLT(@>G|UkL!tf`*}D>-9`j2Z|FwHq z+4-$(j9Tx-ybI#~BR*H1=VyvnmA@WyJdt$YIJ>|0Qh>XduifFMNa<0pI}V$vGq1he-V}Y7*M9qp>0oO*tu( z$kUx!Q3ACZg*;Q5U=B)WbNJN^2p_Wq6OCHcm~2dYX~cwLZ(n?@4cz8TyQxveM$X6QI9D4OH3cXgQQOeGNXeDJAyTG*VjWbao#ntpJi+8-ENow`kKMwh zlwhL#C58{}Ohm6k|9fxC<~rO86mc}9!mef7*R)LWKB{dRV_^b_A%rl&EWpkEi{@g6 zTd8HlgegS~4tIy{_8%xQftYRJcd$HPaNt2oV`+a1)eQ9Z;kg)7NP2FNFN2ax0wXul zt)P%A!>MoSY>5*G_It66gp55n5f?VCLIL>CkDUlKd2tc%5IiL1I;cizW`$ZPrDace zY5Xk|M!~^6puYr+{sPw8B@$xJ_-^(wNEPukM1LBYLL*dshQDZ?I#t$G_!N=Ezq_&<;s4c5Rr)vz2FEVUC)@{?tOXQ7q>8QxXCA-{sc^You|i=a z&#OxI!rQ8^`_OGw%Ed}iOt;t$1_~+nZW+w>a4tHy{zFDvNI6LPg}V=tv>n+=oft0! z1nCtT70JM5iH4F^x|c7e%o*OW3j6mz7=GT6`i64wFp`8({R$Qb#MnnV z3iHqh`BGy$VB@)4R+d-CbPJf$fxrobzE4a~*Tx1+KT+gX78*fUxCUy}Jpg)S+D-Vn zLT61Uev;%|9#TMK_!v)k2|7#8B4v5`Qk)-FRf~au;KoJmF-Id7Ee{V|bwKLqxW!o% z2DsXq5itzd3aStP(qp8%MFj+_IdXxaxS|!0!=@=JN5r`?S`Z?+J?M1gQicN(+rT@F z3V`j>fF9c=vusDC>@MCFF!G@R7@3pOaGa=8z}`@DlVu=GL*uYu2(NOngpDXYZrFta z?Ak}4VnP@dstAPz5ZNQ^mynYY^0~3Z6;PF`;rNm*Uii=DhtKbEo|cWoP=!*|fd2Zk z98lIR6XQ1+nHeo%l1C>SjysAmBw!J{7`caKf=dRm$ErMf?&yR2eE!S}BR)y>O_7O< z%G4F4)h23UWK~aGeW&`#E7-i!d!Q_6fr3+QfRE7zUFT*Q@ER>UT_@HR9Fq0AshGm0 z=yuwD&{Oi-9;JYl&ho{Lj_s*JqAh3o8(hlnyX!WJlz119&)gjmwA}yw_9Mez!fvG{ zvf5+%*&bWNaq%ejH4*}`USZ*pzDX%cr6q%3#604u*L;)O9*G8{6N2j!NU$st!r(W> zjcOW-f3uEJY#MejT%p!T@;0sGNpo9vuj6Y6ssC|`>VdG$%voBQ5%=%U*%(ZLQ%){d}97% zGATrv!NXk+cLn4NjrU)}NF{>O=fIyQNh@4$$fgIBA=RWp}PuxRN z>pmI_0S8oag%y7l^3SylPCVn}MrVfsX&7LGmyo*xB7(mug&|V`fyojf>G=f(*Z9fp zRI?*wqsc{`W)&0|5I4GcEtfGQqZMFbt>AZnB8!#B0RPVPv>Jf6>as}1?z$8VNXdo- zC5VzMS&Y--!EOrggxvbx3UIDt9nZffTTpy)kmnp+83k}){$^ZIn|b^8PGYJO+nKUN zv874qwZ+CIl+Bl_Z@zN#pQ~f~^Jbfh+uvOP#zSErFlgTA>m;aBTXH*j#c#B_Ogoo) z?ZJPj#f|~!p-3|LQQ6@ch^4-{aazkb|2M~3p(`?+)VEh>pJ}n`(TK|{&2im3GBUp) zZ{BRP&uV4Yw{`3Fy>si#(>|x(QJeXpu;AgOPh; zS)+a7Dxd8up|GXX!*m>j?Nap4!Ri{%@J5JULy?dbT3B1~cey^Gvk??$f>#~paDc@{ zUfZQ83JOPtU;u{RN+-A_`cW9Lf4Bqer2#&upD{AvMFBo31#?W;U=_@ji4-r}@vkWZ zy*G~n|4S)ztC zizZbxKW`5PHbU-v8Q8&erCJZm%kDU?<{x3|;{tS4w;MVeyU#QI3$FnqSK#3r-4(m1JZ%@6U-b7&G znt%tAUEDzjri6I+;Q}L4=>WXm2fLLRJ41}h;q)E1>+Eds52Nx|NTr%!FQ>?8@_GVr z&4Ba`%D!mpeSmW-L5P}<40@PbUj>XVsv&}H%nVfg%+L8E18A*u$8P`;hjh>kjB@x1 z?o1s5SuS=Deik|7gKk2SR`TCRMGJ18hRgUx*m^6GzX&;~PWKZXA%ILX-(D241JM!= z8T%x3{|)BQozxfOR&OrBp_&L0waqSBlkZOB_UX)B6)V@S5|-TQXP9Uwx+nZVllx>g zeRm9%UT3eFS_9l3VeUyhNW*A)m9eNuZE%l~;)Dq2JG~IK1I_!C3zV)IX6tRe*YNq% zbJ55(GQPU^6G!+8pD>q?eo{0mnXhU)*ElWScQ8*UCeHued&3;}-rlm#A;s}M@=uRn z%^b*GwC~8DrGlc9J9V=jIlGq1ejUF63ot>h`d;!8PRN~bMnGx@#w=SwS|J(>YJJS? zNh$=x8#G3Q6o%@>*b9w~ z<1~!GRfx+z4|uy>q6ml#2Awz&?!qvU5MSy|Au6TQ64QsQ?}C9U8Zk%KF5O}iLH7(t z3njM_NQ+7pJu+G_MUd%~(Ycx0#*XUZQT+`)OI%sKZ&O&u63_=}pabJp)GLa>0x)DJ z5FWf{_&jYI_)sd_)5A598$Z9}N$<+PAI5+DT5)bR=Vt1xxY&*eeZ2cyt#ud*W0G%U z`Iv*q3gm?>;UU!rh(%*bI!6&Vhc1)c=ZM;Yx`dK;^a{!h19o4*jGV{z?bMHmcGtD{ zn{BwpawKOgsar;V@}{uY^|cb_Vv(VNZU&}K@%+PI6rUbXJ!R3i(G0rG@uiF7tJ6j{ z2@U3b9&Z>?tllTmG}+ivMf+lO(*0(Q&b)%mQ6trZHASCMoy-mq1Lwo> z;x-p6Vum6!gzm1(_RbYc&Dr0}e#YbSv$6pHKt2?^4T*kR_ZT_%Ec><9R6cJ=J}Bp! zH@~(~p47*(;*8btBfr)4=?zc#@pszhxEXw2Al4*vFkbv%@Kc4a+wL(zD&@jt=b&$R zEHrxI+&St9_tj>l0TCrOaji*BJW677s6?7oCkKX4fnZe@)5e zyB0<$B`aamjyW9J9HZNa{cK3AXV66M!z7LxY8)p~#wOogxxB|XW9Rf;e01ma#ER}l zeuM2s6Ez`CYcIUi-q^mOV8upXKgZuL8Ze9u@&@&t{aP0ubUNi=`mB?;{!r6|+T@`O z_b20JdJA-SXkJ+(I@w(Ilq;kWXT9aJARFDfdtb_)8a~(|HtSrCOflEqp_<8*hUG^y za+Rw)s=r=_f4-5gpL-}syntIn*baeil-a1L)G>Ma4yp0udI*-`2EvVgf;MT zIMx}09=yJLf64Uq>3X|-t@Nj`{xWf)O-yr!{x)Uq4Yj9#uRJuKC%4T#z-_{4plU<9 z+{x|hB+PrRXU1ooI4^FYTG;a+j_j{5icW8Zv-5axPH*If)&i6wv-rAm z7(8UT49cw?`br!1$sNbf? z&VDVx{Z~%T8kr`U-!l80+Ao$?<$dodKl^OId+07rGas|bD+xSCsl)$xMgQT`=m*?K zny-`|Q}^MMwu7O%Ic%a}iF86&bXtQPPTC3Rlggwp)v#$c1q z=ehBYUG_tAS_UW0x*LKb#!sj^>}hqm-?Mgp&Zn~QWa=etoBUTRF}c(?<_g0In0VtsXhnKtMO4b7pL7SL z!7{R46R|08-MEF##>g@Kmj!!2|CqjLtRi9C3R%{Rd}K<*oUD^TE^!J18+;#e5y9Ne zt~?k4dq#GCayM`eR1a=FXATv5_<{TvxprP&xp_`v2hOx)e96x)9dMVFzPUA#OFsZ_ z6DO#kJP1#Ir=^71?C15Wv2|>1yvtT$|8W0|=uXGbnJU$vcFek+(BjG}8$3R9S4drJ zgNrcOS4RU6#NZd)%>B5lMPkz(bYS?N!gbOrTh8ucXV|wN0C^=M%5vTR=c0R4 zk=kza!!ltedg&su|NQ~@`+lkH6-S)r-@p6ce-+ch*yex#C)0mxz4TZ3?)_grJPO!A zBL6>q=+FLdPFVlL*EMh4NsgBPmyf_1Qb!l781VoJB0LjEvOO#pkbXy#EF7+6Kta}? zaS53UXitj7IR7u-mCbr7l9Bz805*O6l0zc!B^t?~CexYEPJb|g#*Pq>^#kb3`NNyb;KpPJ|2#6&#Yu}Xk17HtsmuVo0R#H*d`4SqDZoYE0aA$~r=`=MOU#`{1msEFij3ux@A%(6dT%8b@Vy44}0xA}Rr*t@VcZ zL*=R&w>>{@>6<7rIrj($q_@$sQn1S2}os-p8pEAnio8iaqoD|?^Em3yD( zUzoA8d%eQOv8I!~`lkeBJ;P^YE!B4oHu!H}P_cBjaeGAY><>_zm(ZXBoB)k}RaDnx z9M}7>&ztzAfq{2P_YRyvlJ|ip(8tFIOMpcY!Qw)&tnwEyVEQ3l=)kl^viQgO^DA-C zSa2_c%#>vL;7&<;E#G8%IRud(aP57q@54`2K0sR~K#MH6mjlJb>4R}pU4m=Gh|@lA zu7B;C({E8f2MdR|iYYlL6aX6(%5%v}x8888jIVs*>$hKKdoXgy3&j<+v`BN&_e zx}eQYkPw?aFLpcn@N0Y9 zH8a?3_;V|TSOeTS!ag&3a6Kq{aQ6v0^;urN40abWn4|~sCU&iODLx2pnF@5foAx)w z!rVv|uK02p_g0{mBpYtxUVv&0j3@T~1Yyw7Wckm1{|U1jKq6#>4w}SGUOhOAgSFDm zE*q9FEGAY3iwO;}>v?Ej(EzL9DZm9h>d*x?Bnfj6?aDN>a%{249rVP z`sr!n1dU7dKlA0dr(NU`jmKBP70KaRV4+14V+Zufk}cwwtiaxN@vhH zNbmvEuKkgnz|T8l?uMuWeu_y1UQWXg=${UjcTzf+>z6kdSn>2N#C4=g2^vsZyFSsY z$6>5s>MGUf7*6^+;;ffNvW1fM#^i9ZJHe~+s(5#VLg9i%>pSk3-SX`w@Tj#LZABE-NIIoycGT2$`RD&z#cLjg(F?<-CFwcapM%{u zg4nj0??nBy(#8LkUWSdyEp{GiyJ$ZQsv}zvkP3m$nb=dSD*>(|SZlff_WgT4f!!ps zWlL>^6_nVdj8@a$_a|XX7++|j{}}+6@8*aM{(^uyvHT9C2> zv^(N^lKmlF8sgrTMJ^LPHQX9VqA>1vTfRBv(oa!B2s}d)Vx%#t&h9T$bYN}(-UR!% zvI>WjC#ylk!BozqkDG>Ac*2#tG1BkAxI%s!%AN;?`F?N< z+%hI00P@RnDAa_x4;=g=u*K!`2H__G(dtUb`-SY}hKI>Zh31q5bJ=}RfvwAH{W$C| zO%sDsAbJf=@*8AIeuP9^3s=C>5C!?|v<%0OCpp>dFVIfZeTV<$vsEQsqmKW)Gpx@a zCFx@gZ-QkN17Z|O&-wm>(yO5|c?+ua!B2-QZvm=?8b6IX918dE+b219nQsK>0^mY|FoE4(Bt9+;Es!uh zz1N|)RO^h9C$|+m#|O=|?3g+xG`d3Z5t3q{6wa;H*P1)LYTSZ z-N&PHpnjs!xya)8>UX9_XPY{MFgdCGBtRHZbhIEjDV9YZj3HJCY#+>3+NQv@*f$j` zj%u-0xQng|Hkew)19L;owrKJWcijQ)4m3J-Y$g2U70(hFq*<7ik0nFDJ__lNg>Lx0 zdn(|TOLlfbx20@*z!j*48fe1&6pKnYP;6==Y|UOJIHgoVejAs1+#Y8*BAUZ1LW|Jb2EFJXcv)i(Rs3H=ds$2w%hao;zuOYAsi&9q18 zSeIbIS#t0QyJlY%9S}Q zBOGUA-F+XNsI`nVWMZSp*NAiY=AM83jWs{og64S`;F@-qJ>G>Uo$8B7jG^RwLGVvN zEr!5MOVbvGkY#{b_v|qT`G9K(jUb7c?4Qn;5=2U^-P3cv!`Q;&V#@E=Vhjj+*Aer; z9C?p$o-HmW-xlbVyRrC|!7S_HzPWy@w>NhQv);wAC0~>Z-0Z)kTuMF-Jk}##9(LBy%PDu` zX(S6%O+40UBgjr9=!|jx`Eozz90>$rYM~EWPqBt;jf|Rvnebb6r$|N2$ysh`acC|j zi;ZBm);^5*5Dl<`Be!;ud@CE;IP!u;OWYrV=77>B34NAiZZLhd#1+g!-v|=Egw@wY5!l)cv{ST-dON=HyFD5~uCx?G@m*P8g zX_@qlVE??)y+$4nRHgJS^VihjXJOZhYQwCq9E=r0&4?j@6BX`52WS$D_l7uPRVar2 zdk&1uUgXO{U(MHUo51>)?B(R+vl=oKc(YxgQ-{4R=I>R^BiMJx?`p^Ng=S-W{pp5g zb{Wbw)HpHNKz(5h8I6)K7G_bLMXxccZ|B|u;NQTh)t^KrWS!BjYJI@uv@A#!H2wrX zaF;}MCLI(c@yr2hChj4PZV3AYiC)eZh3*;}hnrSw>!!dx23`(ND)^=>pLAPwyC^FF z15%c~4B5liZqLQ=8c{BP!bem@{(oxRqt#gFf5hFpL8)GQ!qKdO&v5k9rH|-!DIGE6 zcGxe_D@WVyXGm$o*f}li?M)3y>u8Jzh^avxj?-tBqcE8m!sZ4m3MoSo*sD8;_>2He z5+zjIsj)Ng+*nm5bVYHR`16M1-=H71X+h)*MXOxs(MWS(j1RmC{$?puEWnM2$5ot8 z6WUziTMuvx%W2pS|1RQ1bNT4mq7qFhSq8&RdxY4Xk1eDvi;Ss>j?1c#v3J6v-(qNM z>Iw-74c1?c59+zXIFA?wq``-dugaX4ihgNuT1ichV-71oxtJs;fF*<8V+>i}za4C0 zFvU$$JEAWF-M^8#PCxx%pFVa$bm=$h;7AA&hmE%7H}B!=Z&Y}#io_l*#XA-q)UuO3 z3p%*AG#59wZhJ^%p5OO8^2tlqtGxy@)T~N!$vg_zN_C%{P-uJK+S(2zfb6XLK_u1# z;hZZEB&3_EPr-hS-2i$Vm@=Re`|=Z|g1B2%c-S}1Jx@V_KnV<7{0++MlxKIed|TK? z%b-w0V|3g+@7XgmY_Xh0LcD?~(?sRb*OR@HfIPTUst{d5lfz7aUgc<&_vOoe6hL(X zEsLoO`Fi<6p0wH(#WUsFVNTuX3Vt&`u;q>a(w(1W3 z_A&`0l0(u#1P;*CuC0k>coyzLa9|Ef3zyNHy)YRp$4E`N8`MGE@hn}210t|6UPNVe zg=OgPKf!^@+l)vVgW&zOzSkTLj0@1etMdf0rU}E?wYo@A*%YeD@4$tK+;~9ezf_BDe;BRWw46>Y2L+yuq!GA8*I-E?p`FDFTc!)Qmvn zCXxdT{j?2^qD40C2L2f2CIc=T%dZYZtF*0A$xZfqjeL$+X44P{Bo1+x?0E7_LGqmw zYiguJOaRzJuZ{h9ZZtD}I1Sb_LN>;CmM|0t19Z zny121y>(D%nYEff+iE}$Db^v$yS4`me(LYP?-H8)2A?L~*K*yw#$~PHLv3o;QmYZB za5%M^bq{VAy;#R-*O2b@ZgIy$U5}9`uZzcuxkA^litgJ}`U%C6zRcAwXM{UCQ&sG} z!p|O$y@OE>xg6q=;wOUhR$|NT-o4hli87DlK@fs;4eT_!a1zHG_0cW{*a_7{fvC<^ zO^6B(6CERPqLmZ#+$TqkQdp=7=(rOP96w0i_Rsv+uK9Be02MSWWS`wDLYd?0*v~4AMs$>U0fJ+9FupH1zO6iGGx^dY4 z7AK-lLtrKphXRtzs33N1t)+A*`aFc8RK+8A-pwJK4~{fnPaS%-Pu+4ALS0_U;CVzu zY{!)Wj8A!zIlg)N{{D?eW(*pEH~USVS|~u|K_`eRRQsZ20$z%<25%+`vzY56*e{Hs z_KK=kmPb=pSO>pzhiYAieeKIOXur-YESxYpp*MZ^{{G&qUk5ILu}t3fwBb}dzz_mQ z?yI+7%1%ola0%6$cq)`^g)=V@&+HO|szWYJ!8d}8fbr5d4j5s>;0CH!>4XYOusRS~R^*A)#j|SVN`fNh^K4c@ zQxY|ShLcFI=o3P4xCzKpYeqVE?2Qd?-WXv?Z(=++-qg6FQxeH>(2kVjcCvi_lHp5& zZ;Qb?jCwx|Fom##kgp&bUGokwqr!nr939fdo*DHh3=bh$)}sQm(Pn;gVZQ!Gh4Xt3 z3G2w?&sFMx=N^@?h(3qSfk3!}E?9n*3O8sYij~p3kAh*L)P0##H6ayyxZq$GZa^;TAjgvyio& zDEu+k+R(Xa$vln)>vj!P_=oPbv%9a;On5x0-Y*#|&f!3nee;$q);`cRA$cmB_zT*! zf8kbVJya)8Cf+XsT6}%bsjpC~@QjNB0tnPIy(4cboH4Dhf3@xgbZ(IiE5wP>qjRN@ zDia`EfqHtrC`7~o>S{KuL8Y1mQa|0Mi}D`E!6pg~mI`_qhw=Qz2l;&4@RX-nHJG2U0N@4_viqQ^ zkv}VeV8#@AidYuN(9P}FF;FjulT~cR4jgfS_brFEn)vS+@^zp6{rB2q(*GHW^ho|< zYV%ziT-B1?$ri2fINmBD1+0e!SzLx<*3n&s%kivq$E#z%jazdD4|@>oqyVrXszkPe$)l$kZ*ZRJmeqbm0)r zCJU{>f!MR52EXmyOTIzksV5QT`01nozW3Vef)G&X!)yRN2O{QkednQYflFQ$oJY%v zhIv2yP=c>OqhS>ET<>si=yxv9@i(j_ofv8I9s+OKKjCN8wuBiHmMQc@8+jAqs&~hx zt_0N(o1SFqG6{3_NRL}(3b~0S$#-*e1EtT8pQ)X+8=IP#)KL)hqS}D8XaGZ@UIo5DF&h_B#5&>HunOVF)r=U6th1>|`=Ee8mdp=3FD@Ld7p zq&6MV10eTZFBD;tAP^l6$SDzuk9P;mBvznV6@1OpGOBQwV}U11`XpKs`ld4qc6ZY7 z)&$_K?ck|xVkS{{0cA1PB}WVaz!;I6j&7ms@q~=`+~`t&^SuubD`|M8+1QTuZLGj` zr9|+kLD*LF=$@5ErW%2^h{*}dKP0OS~(rBd{Nz$;>zkLPgspL*N~BMR_oQyHgT z${a1cX)@$yFAo<9JT6Mz;Q8gF2zx(_I&K3auwLcP{p82u=&V!Xo&$3Y@>~gqADon; z>(fDPTRLxcd8fX%b~MheZzupj6=85xX@pdAJCC zt8FQ1IQIHFH<6d4fq-$+HGlMtxyLFWB1pF1h4XJ7-kF6G`0mZQm@ z+RjI4-ZUGMmC%nA)&(T|9+!Y>6uRTG3PVbZb~#G(xGY|r!tAf`kCAY21P zuGB(#HmgSUD6tu}H~esjSwSQf(212~9-tZ_eP%^mVvcz=BAzq%f4vOfd@?@>wAE-u zP=-8xVvFJ|a#G0oCjJ)ch3c{7mhZm%uE6WZ1q%X$UWX2Al6M5G7zQW2h8$hg& zGX@f<{igmRGzuR8OGTxDRR&LJQE%qo@W}L!yFDSaY%B#zTX;7G)nSPopc$5>sC6_z z8Iav1QVV7;)>va5-JfZHK>6HvNQc;JAxiC#gPU8<(<(7bWMo7gj0Ojo=Hb2(RDC|j zuwgeEe~IK=CSgN#EQpD@8?GM3)!IpaK~k>)4M|J)2Al)h`62T_4xX);g(L1k86GS9!T)b~ z&y8p^3%<^Iz_&S!%BvWUEZ&g9wyf+8j`wDuL7C5WAm7^`L3 zIg=AEDTCmoiSQUL|8~^6b9^DXQ}E`m*MR^e<(cHi!6D4M+E^ZjSZNluD_?Gpj_$XW zC$t>`+*pH?29WBSRPkXX1`i7(rUa}&^qIqSIKMo&{`&6AI2Cx(9CR?D-7sE+V7^Xt zG3t1p`SI7s&}K@cJ;)|?7PvJ*b-*x^8E&9=>tq3u(41Rv#+$SsU)o5kY-3%gj@L)89 zP{=?2_(5$H6Ar3Xg&)4z3H~bi(^N@8lnz8yhi5rfRqj~pI#btmIbRNEta}$Y2=}R= zhCiObdJyW_n2-AW?U3kP14|jEnCePS8 zy?=14*Nk z2CxXb1F6qnYv-{T511C5!J{an$p@7hGG)`Jy!vWca^#Zb2z6HVVIt_0l4{)yqznsbaX$E-r3V`hoj5M&!82^`6e*HK_Ain&n3O&L3vk2LF>I1p%-Eoi2~os; zDR(QZ%-oi?_+hLKSurpS|nMQSEl zm0)6pg=`p!bAZp16U|oUYq&xYLr zu$>{nk=gEDyBZc3Q)olmkU|x{SN6j>{ulu7M$e?}z0rWEC50-eBNO-p+3t2XB!J*U zQMQ--805|4FW@0l-@qM&hH^0r^>CM{LLnTnha0pz5YwnNyZp>)?_%XSTs326XER^@~AM^{&~ zOYg>P>jf%*3H<-z<+Ws9ICriZrM!}*PSTbQycV|4AN(7bQTiY5J!ieVI8q&;71JGx z0>;D)1(%Q8r`nH+3mAP48Y1!yvPUxuArL5J!9tU+y0-wU=W0yaK)nRG$AQB{t%y|! zjgO&-f4b8CZ+5Z$*V}%v0c3j4VzT_fK>3p8er%28Rw^S7bH2QGzpQ zo|dz-vlcH4c@KH6xIih;4#0!8f;}XjX42wb&``;Q3$PhxB05sGj?}C!_u@ghPmjRI zP;;{>QA+15bx!?^%3}4h068JKLm5`O-s3e((B$|oI$Ac0p|Pz|S`I~{pj`)eP}(FQ zIJN|_)ba0vBwQVepU=5j5Lwxz@t8*1Q|5? z4x>rDhpfF2Edc7q0%L-JO+Zj^FU~7=B(RYs+d8!T(=aPCvx4>j`2!WLg!dy8p$eqo zQ?=gqS#w3OMrsn;Mv5SW9e-@g`nL#!>%RN$Zu|fQMJ$8}n(n1gg~2+A ztOhjT728l1f)bp7r5&KhdeOX2LrbB3lIun}D|SYzo>*y0&-tlaj{qaITj*g6l^Cx0{`b5N8WBbjD;|{Zp;#o`xS})lR^VwXw--nxNdHodvZ3e>a`-IDP~#06QTiQt22VYVyYD zChv9cg@7yAov27I5TF(>UAhJf4p}}a6wqoS>U!W7P?!g48T>SWzWHAZsRo=8e`tz%IpVV9cYjJK8A+I#Fx%Uq^NV9{PLcGt zNgl5hI==H_8M#7^WUq`i&D9Ff0giz>j)IhDo86-3f-g9>s~wk-^~x#-Py}Ga_a=Hb zaWcUQP0_Yt)MTTRmR>7#hyYB$M0+|T{GB<=l06<+@xEhB} zc&CbAa(v{AafE+L8Wh^~r9S!X_g_XUl2mpp?v`=DC%>Vxisalh9;kd% zYszL!W`QE3s^0LEUwi-M_FG(b)DmOY(EJPU=TG@h|FG{F^iZFE^YdSCZ~yN*3|cz+ zzU>RwA9+tvK@4>-%bOt>S9 zKtltAZ9{$^Jz|aOoKq2pFlj9Ln+;!3OVfyj;z0jLzw+l7hyQ(2{+5%gE4i;Qp;(Zk zA%Z{_!uac#NRvi97kiw1Ij9W4Ir_k~^P@KtK6T>6de3?dEQkN*$(|~wA4O{x-UN;pr-`(qe+0*T$phMU^`fuogpRvAV7`PDc{_6S8>S09=uPidl#?fQsZuqqNqnGvP(}sWluZ8u6$Vy3LiKhD~a+V~e zl6B>NF{h7{Zk_~-Z99Q=LKvmo^%Y0W5wv6!T;$Whx3oNAsAxOy^K0|CmW{2$7XjQ5 zn1a6EOW8H$`EbpmMAWy`iV1bdzu|*C<3nW?S?!^CS*razT(Qu?BY-(ipg?RK9RtAd zeiQdE4yeH6@Bk3nv;8J-uY~*@1&~CHBnQXwOhrhW9RAI5_^1e%KQb5vC}KLY?KygQ z{-)h6S|2Sg9e+kUue%Ykb(n653GC5Ca4r@;C~5KW^)T~={sDcK_JAc)%>w|xp%9M8 zKow}av{t^tQvAQ}7s|M#EI z`z1$>I}$A@hb}|$ldAan2R?pndJ0W;Ibkx=!vmNZ>c1T^$og_}%`8qvu<^Jk*DT=>qpLVufJk98|+g#6Une zMsZw4fKdF;fD%WK;NRTq4;Ojce-KV^jm^ou2(Jz9MS?{x;VKIH3Z4r3AS*B-M4Tpf zL240SHu%vs7g53OXht7AtgzwHhTH?%Qj4s_e{;0(;3r6O|Fz(wKlt#Z9LK-?@(e$9 zqku_a(w;-#U22ep=6tRVjVuUj>97M)zyK#}9%J+8pV24lDh_Wx&ihg8!Hl18mtk9~ z^wmWWV1OB#q?v8nf!2Ape=Q*iKs8u;&u0A1-+$^gCkpBoGC~66P=$(hwbr7=8@dI$ zg}UdXA&e%h!-;zb3T%cJcHDpQ5q{DPQdtr-z#{$@Y!}AK-3%CaH^7bGgLF>0b^jNS zf?rXt3Rkri^q%McIv`C~SNB7PK+4ZyuFqfePapUamlF`{ODnr8IKIE}cpY$AicQpj zfo5<1Zn9%Lup2Sg+(7r!KmF|2+b18x&Zc++?{+*uG>U%{pd+GLQAn%Fmh;A6Op-qP zC>)zS|FuwSza*CVfAwL!3P~FLf?Hj`GGpr;{?ET6!!Gq2yKhx2|Mg#9sh&m4jQ<84 z{mZj$zzp`keDky4U^lY1?45J!Uj6FN{=#3rdH6CR{7CWt+VlAF@2Z%CxJ7?vG!Q@X zY(nOPlli^nFEyDGn1V4z^|$_52;>`$YZuAi6uS`k2ldesEQ?-i615ozyuTB3N#zYG z1G;C*H+>YQK7MlgDtbS7u?E=jk;N1A>g3x7VVZwAH24V+EJp#i5r7}e(ha#2=efm| z0TV6MG9PR!azTGwe`p5$2f@**nvHA`gC3F17_gxfXubuo!WEH~$32^8If9m04X*by2E628qOkHW*F_}*26WvZ z+(<5lNnh|Rfqq{cj=+?FKq|2xOeBI{i0+11Q@Cd~U@le6B+>*jJ6KYV=7ufiLI_8= zJxRZh&3yQt%MTtr*nn=ZEAag!LvN_NYfy|k&Q&|zvkiwUSz#gKGGAmOVPT=jABO#x zDaH+ps)V>Br;ow_=qDeFxI^lnE31?#4}k$aKv#R3?SQ^j0iFRk>JflJ);Dkn2iIj5 zr84uCanP|sZ4szmC9?hI^kXpwZaKEgAI=MOHP9AE3xVH=L)NR^2GVdh_j zPmu(ZSUYNIgI^)`Z#BdQ{uqI9=gu}Foc1UvBumN(3GJiF=eV0w+VNw9ThY{reN`{I z76?t%7D_N+L0H?0XR5|@_&7kd3V8Rv)Q)5h>Ue1ZAGCn59FL(s%4{?HU&!{21f|37 z?@o=LSs17J>BshjIN00I8)bRwTlnGY(V`y=?M!gMByo!>wLz3J`1z(@<#G z2Q-$~I`9|ZwN8G#8^XT?4GK9xAh?N@;u&(*yUj$nRx1F#!%7Jz`zxszcjQsDrDrh_ zF8<9E`y;L>X_U?+jbi{2WcWa)&=}8V=~ytx!Ju6onw0_VoYjeDocxJEOkl8G@@!aJ z72F3pagl4^jn8ij-16@29X$W>Yhr&_rIiMEQ@sXxzHl${EEZ-WN8_$&2Y2F4LUcuj z;#u91LGaRyz%A-Um$?cC*ID{4oC_QAyi~=RSI=ZK1}hWJX@&+TqaE^0G%)_OPiEN9 zsPyU)`%-uWAf6j;{+8!JE1rXz=BOJ2eQdC&)PQml$}E9kM?l@ANxlS0eQzY%DKk~J0J*>Z#HEGiRoknJZe}Fs0CxP0G`t_cQ@GaziaN*{eTYMGf-pqBF0T z*I0S;?Uf2Rg5f=B=zT2K*0Tcc2DJFeIQk)eMiYK%*aJCv;?NfzhD_}c*nFCN7|%S8 z!9y|=FYgm>9(U-NE{%PNE1)U^^iW&@?B8hU)UXr8G6_#oYf(MJ(SWQJwLBrRBl-C@ zi!kbT1rR_pE-mJ9LsDW2^m{M4abP_1;dzhV)v?j%;GEJ!1Ayo_#Uz#h#JBZ?zxft# z;quW=HS(c;aGG%oRmcekg_&=Z{rSOqU}%fExy_j@bV$g+qHbYk;`AeKYYZ&ZX0Sd= zb6-Y*H;601enID?W;+8Ft37*Yax#=CHQ1xkbSl!EbudiUwwuf88j-LJA_BEUMl5O8 zCLu2-&H;2Eb|sAyhBnb2oC_u?SK-uN!oy=R6o}T53>QI)x1#`tiJul+QG(Vg(vG1A zHmEn|EEz!3*{v0Z?F!d12@q^j1{Z160}3UZLhW~N)n4t?JRaYh)U-jz6xqX)RVq8r z0rcOO;)n2$Zdqj_Hxb40qIwO!`zw0+<}CtDX#;95+RdmH?<9%*YxeAV(L8_y*zM?X zqLLNo^%6)vD1<>!B045>3eXx2*`uH1785lDq#I{O{p&!e^i1$0HUqMCE7h}DKWN_G zg&4gsDFaQ;L{5VQhs1vkau^UFKjCsfpLzVP2V*79DI#zjUj!q~LE;&XD+w)73|c=$ z2PNMvkarj1kTNkW@Ddqz0^@>EpnZ#|QPoYyBZWzyjTbQtQAQ(=*um#$(62@37LQA= z??y~oJd_CW***|h)LjHcRU{fLo)Y#&1_$tvRllPLEgTql>tQEE?hOesDKhA$(Z6w$ zLg{0ILKCNHRxygOo=Y;&NWKQMC1J3U{mQ(u;_uB~sNf4brvnFki7uz!Db^udAF?bd zEG*PcGDMC{1Mg_@z$w3O^4?!FdHY%fcAAcong?^GXk2pT7-P(*Aj8Ep!;+<-;dFS2 z2*q{pCYxqBZ1rhl8fUkWxV$s9yaQQPH~^0dILuiO8?8sVNwcz0%No4uSc5ys9^H&a z83A$ej3*~>WD%l(6zg=^7kdf!M2~gZ7!vW-x>=b;BzVEppoSiwJ;dcw=}DO0y~DRn z_r*MC#jHWtv!<41sAkAJ;KX^*E$BLGO5Z{G+lnHl$kQ`fZOc#g$GWDe}8t}uiriASn}h}6So(Cv37&} zL6z;>Rlc5kRz>XS@)YCwbyc~>Rs0nLi#&X8*29LjztfgSqWKBSU6OZw(y*!5QR;F@`Z_v6 zyZPKj5Z+B3>J0;eJ*P(N4K6FGXFDWomR|IDePaSH4@h~CLl2$eoA{1QA8a1)Ty_ig zeN#`Mg$uEEYvtC;aXOLOi+eFtAUCxIcA+Yp@+Sr@((8@lnugFs7Fyx{!uFbo>49@Y zYxac(TYHW-j^YRk9Pk*)wsbL3Fun={pmK@MU}*r|3FQefF-6E4gW-j=jov7Ja2d`R z@u&VYvC2A)T%zNwK%&VTH>89^p(8oQKD`#%{pk`glGuvt$zOyFHHe-!Va}q}MUQv$ zg?zJ5dJBNgeM(A7r0R3ScD8nyLIPH|de#|rIL#`{!V0Aml}njbc0aQ%_=>EQ-_3iS zKI)Fj=Az68mKjY~=B{uGzo&_$C=Uf<6ov-f?@4=oEed??+OWw!dCqxHL2${^pF~klF{E*ctsTo4RC*OJ%X{gK_RfqZ^R;`gUb?I-xgIJR(}3&`6uuYZ2ppUw zTF-CQUZ68t6_DqLHngPuWWCW5B{yk$s>0wrm*@t*(Zbz>SbpIT8Ial;Xg*L%bOvkAihgQ;1PRgT^JV#0;iFxi zyj(m+uGx@%~ytNKne`qrfg@sXL3Q*UZR1Mbqq z5a%P5HNVq-sFuwet4$J{$Bp?Ng|1J>n{$Rj21<>BEUzH+l?F>}g$UIXvZY8cz&AWz zKp1Y*U>enq(22b zlUW3aNVHv&7e;V(L;jewh`RyUsx7Q=jG=q!MpX=E-Ve8jpH!bi&??p>g$Wz(NzCer zbsMV=vl=Mz$I{A>wTQ+sEzuKwBLy?*3{crC_(Blb0jHjFC>ZKmzE}ldxHus1^{qp& z0oUyq_?}{hstYID%3WSKb9F>Iaq_(`x@ zg(dE^W#UZ0K(~m3Z)|MroMXsP5D=aKaCe48mFqlMb>V!$SqpzdnLMWXa?u6M$N3dO$ zf~Cv$U8*v$wv(1Hzns=87o ztka!dJx>>BDbsK!Wc#*-E6*69ZEP>p)q%l|C|5gB=#|mYMZr4zDMecLK1kaKr;&5+ zm`k13&>OtGR?%VV_WYYL>d|$ttcZOkFOc8t_M%MRI<&e$L$y~YHg)h2 zex@ut+V;e$)u(rC^^9c8YatjVbb-X3X2mPj*4AdCk$|gq8Zo|Vokdp}w2A)nH{A)} zzpe~gk5JsA~B9}L3t`^>L z-YXw#46{n}uz<4C=Z`B51nP}GzUWc&yuuu1(kPbDldhFQ(AVq?TA`PcuIo8D>u2(?^PKNVODj}P~O)ad@tpS=y0k5LR>6LNbtf8%Mw2{tX$b2@UW zA}T%+d6>v}rXTam!wy&oMiq?3S5(#1YSTN^?~AeyU8WmT4|Z9n-xDH%@>>CVfoR$i zm5PdtTtiL!+&(B6QXl5(%>M?PbFZ4Bq@XG!bDwUUbu>2q^7HDFEGjb?|rz1y0n1X7y0F{& z#gk0BvyHk?QQX_H?DwnenY}^@=iY&ef)jU5e&XiV$%&h2)D1*UJnloE;crwswXi8vYX!3=0^A0%ertI3N zB8jW&GJlXoio!Y9axB2wtc{cLmoF@`gPWhxjbuzlT!s9>G1VNW`;+IPK=FRc_n7Rt zn9=0WNQpdW$9XK0 zqEn!0^HWtWLN;+#ZsOUZVe5Y}7R%P?DS0ngZGH0+--!obkKUatTiL_yNcZ>s$L&0w z;h|x3MbtgO;p-3g=?=JPWREzHWIt{C?zjmONLsSoGU3-HvIp{9vsa|2JY1?H@;?jU z$^R3ur}~az97=|#TGOR}G*Fw%b9{R?i^r~V2j`b|>hAq41^^tfZOs^5rfwdFii~mZ z71Hs%tc@R5h~0WB!l(i<#&eaGQ-(D7P{<>>_N(wW+LtT7vbPfoxy3NP1jHf#aUFmP z;o~iL*WCZr*8pm(1SaSghjX0}UyZAeXsS}J2-5;)t=@F*fP2!tR|;!GYT7z87fQdp z3rcL0Co-}@oc1y}vJPR>HXJg>$ZvKAjzKA#Ftkgv--BKn^NNq;ag$rnEwyb88{JFs z32SJjC(^4S7;)Z8PZv{YeHl@kENl>s$V5kd^W+OhV~iBz-YYqix6aW*e7lhhP&?^z z17*69e$*YdK;>^SB5Unjq65cU?dg%(3WJC$84|}$f-KWbHFBZg#&v@pnPoEBlhce) zVmcB?yMXfo(1{Qvf@x!i{TPO|hr?^#zbeLc;o{kMP1w+0=fmmNCHw;Jd33NN-8OS5 zUn~3SH##Ua&*)kn(st>;EHWNng;Pxy6W6z2?xbw@WUY)0b8M*zI&f|?w zLlJG0{HH!F7Cc;dTo-cg8_D)dwuzt{YS*!T&S`~ar{e`^#2F$9^&LlBz&zE7E&!5y zD~FLmn6Sq9F+so}nl3Z{B{}hiD^^_n)a%W>i4EXHDe40ayfeXPQ9Y+8GYv^@RS)^!;*R1 za;o>b44&J|;hSwW=K+#*U~oz~YSm!=YdBC~kXyeuP-H!+x8OUJf(%YjKTjG7OgEIp z7^P4R`$7vO#>;UGV3%G)!;3MpjS#n)$oFk4(IsMAk-yp>aD}qR=TQT9b81!WJq1+q z2&*UuYGdp7_iF4$*(7D4q$+pH{s=^USWepJ<3gYyeP*2jtyI}3f8_mT!oHz>M5nK+ z;L;#zeITF|HBmfHOTMs}gm$QEVlW`T6uDdgusMb^bQZdpb^T~*r^qs{*NKj~0s*=X zTB&mPaUsIW65V0mBJ4ReVPw9-rf_9hs4Ikxwm1jGU8hn?HkD8Y(l?qI4`DC65Qj1G zR+LeweBqW%@JzPHBYJ7QKi`cIYXHKNQnV(>0Gl+Vi7ncG|FZ3zp&nDjK0Wm^0BhSQ zq(%?t>Zw*>*0l=J`aV2oW3G`}$gm`Q97MRJ;98vun$DqcYQ>+*x_IkNaD$!b(a1MDQBHXP zm8LY><-oed4QT_=+;h!tv^Wo>H4N@UB9!UA0faX3bb3;s3fl!3Fy&E>I&cX5NvB5s zm>OkWOGg@Ey^>mQ6eQ|8g%r|6?6?V1XbF_Mw%BaOI3kq?cX3qgT&C-XeCB$+3>|t= z0O%g(sTZscI8^mQ6eK|DTDj38ql%}9q|3!DG*dg{Vq*!<5hFG)wN_on5Jy2NuFDlT zBpIZZi79H;Pp^2WBe--3h*&$Ywn9XcpeIS_^sfDGJ#M&H`NyvAngVYiCYW;;Aa%HSM!Ktw8ROb~fTyCHPRc|6WG2F_sW`0Lg&rXy z=5%y)Yr5kxUC=L0F%-$m>uSvE!$&@#?l~?60c-NNma-iTo?$ zaUB&G3_Zun@EtynpqXuV7)W&+9fYqmqc&HZpLXa-V5D9n@^Z%?rTW#^E#{g$4-bag z^|>G3^zB#Ftb;1}ccimv>n+wP$PUNsGX&<6P%edJHWeE=Z5y553GmVk;R zM8~Nkt!2PBk^90zaXc}&#Qdmsc+g-?;KE_ik98Q#vxw}Q@Gh-3zrpL40%$&=osf+z z=O)4mN&rCC4g*6toyA@13UrzWuRz}t=ZaFdA-4n7h1GFslge;)K9dx&e%u|-3uq~P zrQH1|7~ODPWnCLk%+dncH_2jUe3(K+`vtv01`Km_nQmNm==hEs6%8-t&{a`F`F*`5 zT?G+^k67W`%;4jH{G0Q_xraxo)Q9t3ybQQcu%p~ z`s%j=KDW;v(F%KlaNDmpgea7I?^SHlWERwtcjgoeDT{%t%x1GpGqYdtAZw1cNht!1gk)G2t??X04tJg0*XPz?+p|~#sMB3xG zNVY3moG^DJ6A#{)Gk_y*5OoLRULEv6Pme6R*+oo^AAXorHiRwrA_w7&^AWA{3s!Nu zxD5HPk)lrlt9Y1+A4w&z`tT0LM35JhAn;zrFUdR|V`ku7IfIiy%(S`z2xT8CRTQmx zmkDZd?Mp5&8{X{nvSk7=Z_y%PKso)E){5x)KR1*Ch{6p~t{k)IE&)Y|S1(JnI=3;s z5XPJV!8o8LaEhnmGNnm~L_^g`=mpL6P<0rP2Rl7W(52EE_zp z5nlW7F|r5+mKK`i-fx~D+jV_Cd|ucgk6t~-#hKg~LJ4!&sZ&p`gt zY;#?}IfD_4+}BaCo~%QD7eW~X29D__$e`sg5X32!J_mz7y!hS+L2~ks{ebbqsFUA$ z)$v;i&jdG)c8KR+v-TJYieor{5{P41p?6ikpJ9HTz)-p%xcK}EkR1eIU5P?1UIvA6 z3XCVloDo3F;W3a>Ce{eU)YcXM&c-xULuAkd9n*H~1Zu}dUvc!k?x__BvdS{WShf&E zl_cgAmadL*lI89Ro-+%U`;O#v$~afJHecqatbjC59SCw2!1Gg)lEiB!e-Du!9rPG> zA513!+_Uz9@7R5aG|hLl*M4-EYQ~8*TBN#RpOtsYj%3y_naqpR-TOR?(aUj}YDU7* z8`D_yLbIPvZldCi(*C|2KrHRwguVTdio3C7%AKpjrtCJMQEvgaSbUMa;uXOnGyw(B zB*1%bcg*GW1O1Y&B$GH0sT>kW4^1~Xb`>vp3gz1!1)NaIBg5vzb)>Od{cMK z7PW-|HjjdDR#p~!h5oiS^eTbE08LD)WzoL8hbr!lMV@cv6LSWO0b+@nr?P_%-A<-0 z(M13uQ%g2w#}75&xG2UUE{vFAua_;*3sw|#w?Bkn_6`E{2fUF((?8GDlhw5#f464u zvLv^50pt}sC*Oi339eR_+6Zw}DGuN_If!Cq;AMqzblD9w%g|fLQ9Fdgv>mNfYON~0vS0Y=qva_)*~K2mqsAU9`M@svjp-?m;zr~+As?eyeGC!`Fe!2G%T=!j@5 z(Vwy@$oWe531G<()Xm~(HM5g_d2&(PEWWqfkw}D%c}30u;;*7m6W4?@KwS=SnEvzW z+}&9Dn*izYI6gTseD5Hg^5~AYCCK2C{K6uDqm)M6!VX`*cpj}TTFt-*C2)|q0bV#s z!70S;V-lP5Z*I}9`OB*6>XBK#X7Vbn@9*-x~@A^Tdj zghXysM6?QVw%b=4Hd+9W&g@Gmq1t;3_F)1aEGeu!Q151d{-?YA;bXgo%BQFPMTEaL zFy4jWb;F3tdLxVNB!N_R2s=#{K#NoAkju!65X%3~@H%K_-#_kvDv4f6Ju)*5M5QwL z&DwyAHNuJuxBBo}>HoSrw7S?l9$;4ysw-%RwBhJhM&);|muP6JtR-5!X4!xxh(v6Hrw-9! zAbBM~li9-hrkw_-f|G7&dxEl{4~X7q%UrpuNWzkuSm`SM8{RsY+ zutpbc=^5GA5rpNNJ6^5Ms)2za(ajRwH!s^jKh}X0jh3#?Ayu>*(958VjZQGymA%nA zGvA8Wl_em?lT(8@#1;}7b0Zn>io*|NQg!>vUgvJlr_*B-29KYV_s$kuoI97FZ?tFn zg80x^LhJ|e;v}v<>6h3$#jq$9N(n;_g zp=1!C)^~tve;V8sUW=P94D#3_`QcKkT-Q2|$|^&v;0vjt zz0Fzd%yp z{xH00k7V{oE04bW=SR5xXC$8=etKBu;}YV-_uu``{G3|!!>|1R^`|P#k2C$-FVNT^ zhytsxPwc~MWB+e5_oRHnRZwVPPCeT(?6ycCDaRL_p)smAKnHu0HTUA01EWFCRnHH| zt&q4yKuZSD!-h1?#Lhl?U=HrDPRc70>xF79eFDf!1r%s?Er*gdEsp0P)f3dq*+zf} z@%>Z_D)?Ha*RLVwd?8%=5Cq(H8J~0s|L+srofCT#c()m0tjHAxiYSpqc$|;ilrwLh z_Yar#?Q{!NyAhmO6uZ~%?P+QkpFW;sDcdGMp z*n|O;Y3Ut*-?Mppe|_|K~;4*>i>G81(A z-#0{`)xGq$cbFB@4U+E!A&{tB`4i@FCIqvBuP~Dq@b2+N=iq9(o3_NPq#m33=cFpA zuiwZaQi)uVUjZ>dGV>d+NNjZ7knzQpwKG1sH&!z3Nr*W|*IBWKyAgCgQ*Ir1M83>> zK|=6!$B^|@-mOW5S7Mmx>_^nHNY$R4x#@4el0?obnLXRf<>ApSYHDf4V_ec*`gp6AX|~i&`E+R z;TAS!@8f?v&glxG<49QH^G4f#8UXfM2)0U*k4e7lVG7~BIY0kxEyn038DQU1enqJo z>Tyc9c(u)Of;giD76y7q+4KA3BLxZGEw((!;#by88OO}q#$lJ|BYeCICF@TB|88s! z_y6KYTuxB+#l&`pI5fm4xvHzHNHnm;-rkzp+IH0IuJq%D zd4Dik<5SCv33;4zj4A;rz2q3KNjQetAP%XRU|K6GL9&feWWS-vxRR8!hu1)%Lqs5{ z0t2O=0teu6>53SAuvYeWkIAtFhY-$JfiqscSE4DMFnvoodX}7fNJp7M6^`_={D@gy z7~tcTSnfWrt7pb1?7#K8ppKl$?le`bkL!w6!civ?piYHB(EZbabYV$!G71i^;X8xq zjk$I^F)ni7joT5mq9>j|;IXgSp9$P8kh4R<7t`g%jDt_?BF2X_2mOhv=xzaVE=Au> zy(g`Zf|`c7ZZ??$No0Dqa>^0Czw7VgsAO2!VF<&~Ezr;(JjD?X#e??`EBPk7cH3fe z+iT`lc)UrKcyXtd%X?u${3ve0O-(@u&0NM2B2Q2(SFd!HffkDPJAt)xTB&wJ4ikn6 zcDF!&-^-K(!6VTdpV}}i3FxX_K!0@46jWPc-wJ^{1azMY`s)@>anABG08LS$E@q)1 z6Md#;Uiuk#0VrHz6iR1JmL9H}AOOo9g_XsX0q~oaj0(2dbB09)`!&)m3FW>%a4Jh7 z1^5$69f5kYxCF@@tKFl|7I|Jp4zxxg3z;=mMgmio1Z*04gZ@NVUL;uwv8K4L<2Ctf zAjl{rVguR0Estg}S^gkD*n~)s8F|?eQo(&5q|-nNRa@A^Yn@l!?Id{*ut+-p);hO# zzVc6UTa8&W@;6?F`n3PUn~Oon&}5M)s?7=T4g_#PDJd5!4g-TW-*V6hbFg^zQg7@H z-nPXPrN1wx4X&n)Qr$HPX)5a)#1mt5+*Iex}oFNIK*C^fHYCfX|S!* z1oSh?fdCAlPD;5~XDmB(4F#Ki41T})4qI}W57n741`Lk9!d`3x!_s7uG?RA!=&p1H zie=r6z-486PIqj-irf-xrbj|8FSG#eS04xUCN`*Q(5(mtNkT`VFH|j(*mHUbxl}fx z^E2XJ9U$P^IY%Vo#_9@Wub}8*@(JN+Cn=Urf;|asz!VM_fB`N8uEpw`AePQte_Z=i zGLa8JNUW2~Pdj~}Za#W}^u=}-|u$U%@uT>*I1+Aj+CsB+<(jgm81?p(r-U+7P<~r;>rOF=RPn*4x|LkK&-OIowC} z{f$deYW=ut^}w)uF9v`LLU_lYaX%OR+niCCwQJcnT6-;bGzF2h8UQ{u&wYa?i=YKp zv=~u|Eht$Co7MN6xFzJi9(}2r{gVh=p>qnT#0p~e0}3Moe)3hYge?L6Vp7wT!(-HJ z&qessg<_W_C^4EL92MCX3ia%{A@K(vX^%g;SXqfaaLmU3eXNOt2qH43OMi!%NUf@I zETpO==>YP=UxW%K46-3t#)uAB6Pn62=NJ*9LCmG|yn$q`;Cw`)I-|~~V7@so6!>rx zI2fr7xYPXj1SEJO<69fQ`iDi;K#K}9BjywcoYDbI%wU>V1WTkAgmEHgy`={%Ufbx9A3nB5=17(S`OV5x*^m(bB;bd z3qB2R>K|`VBW_SlzUTDV08rS;5sDXRe8^|^+LKP+eFw`@0382)Uc)o17 zfE2QgOKw*C5gWLz83Cw);ZZwEFiem+1Zg%tW(isPwyoTM{KMLyr%nT)A>#UiI700> zS5(BJu2vxRuy|eRFa&T?24R&ifA)(k^9vx7M4LyDE?ORcQdJSYKCV1W%u*W(8&{bQ za%f=Vq}}Py&3wiA>>mP=<&F6~n8{~wgTz@lwQ#hqCuhquEOG)O6y%o5y0pxw@|Q%a z0EaSQ4~`?DO;CTPOhs%1Q+J@unKfsD ziAI8&D<)v*T^p%JMdYZ9%p?k4_0yF~mQ5bH3iCU_56qs}0r zR#jECI=p!gB?iO}uVgSEzye?>HEc3|>Mc4{Dp31M8Vpc|uQHAgy3G{zw>t4lg^i-B>{f! zb(MPr%rBt%#>dRgkCvY76C>ay63>45M$%$HZI%@=ATMOlX2VqLjP#suW zqQOxyl1jA=UnR!mk~H=Zss!_#3h+{r;RHZ;eIIK+H8R<<@v`15sib9fDs}_4Gv5G8 z|B$8+%7XVn;`UO1DDXm#fKK&!q<;E~C18m{Wuz#XqP(FU!AKDR%Z<^pn1qBf5JLf^ zOJ^qu$QqY+4gkXF?{T-p0XB7s#5hIVtm)b$4^dR%yy>0@sHa+M>s@O_k&GrV*9(|? zhN|oIh^U#%-&ylfoSN}PxVgEfOVOOeM684ELlJZ3hX@ruF%(IvP%9U(Rntg|;ZWvC zWj|3r#DI}z6OxNH3eu`}v78+fWl3#IA$1kj8%`t zC7PatSC2S*(X;oRSrSRtkU4o~Gf8&o^GI?$pU$i<6;6+ejZ@P?hszdoPi3@E@_+IQ z>C3~b()Z6C_X9%D6^4i{lY(o?q;+UigESs7SQL#sue~OtcDik_z8!xOqPk z%L`9%P7W7P{)!;p^vqtJ^A6F|*oNmGig|a$OQs`f7&{)7uFX(ZUAg`JN{0C05@e2c zg?gWPDeP};?*&S8vitzBH3IX7?`>3?eP{8xhjX6ETaIemH|?>O)j%T>`jvXu3-mt8 ziQ=1jd1mqOwH0ALXbqixy~$u3M*Y`B>Zy_Nm{7W4jzP85>r*CjJIt zeyyh`Xct59Ce|Y>$T=eI1(L8bWY_whB&8GECWqq7p;ps=3L`KBvE6qKAx3RQ>asUT zEa6@QnhFvf5LvCQ-2QRn2z2+&k!&Oc7dcs$>_LQ7J0~CU0q2_iyW2s)cfI7&tw)%I z&L~tmQ@2+iA!JU$#@QWqb_r@DIy&}Q|9pP&ZS&)&8%#pnvBTlS58jg(7|~j8WQR(# z1DWb`u7T`4QtIHW@41NnykW$X?6O`;RYkU;1GPUWBpWFV<4*c&SdNxM<*ny=2)7nK za$*vW@;Uzy76i=Qiyu8B8W#OSezxTjYmw2LES5|Y7Ks*(o3jMP92i-C)K(5e=P&Gc zR?@{L!)yXmRvRvER2y^b8psbNp8xt?@4|BQT3{kUnEo0;)I*J57O_z;Ov$ERGs~s$K^7z&Q%#t zY!tCFve+97x;I~ud35454oE8kMUa~ao;Q8g4;hk~~uHq+q%LQ zCc30BsYdxY)=CXpBt!->F1XYdpqALTQhm4YLp?Pi*9iWR2eM|)M`&+5JN8v{2i*Z@ zB%3`Kp$fY;YvH!=|0CJfAlO6=UI2zTzeuah78DYACJeD174|rGP@;e{`w5+eL?#*F zsHJ9>PO;{loZoK#j_>W=xy0zQE{5QM9>qLqQW}eAb_C>47*ZZQ)(VYNJAv7848R?C zNa9Q#0>0uud%=afhX+alrSdSM@}OLrbBu4IWo|X{N1qq}kdZ02sBm+xkiJv@)$)dp z8+?-{1c#F3MnTxVr-m*hH5LtCaLieSB1M7T4e>1GoXHh0hfIO+3iaOCy(}TV@wx(} zN)tCd`j9|VvjufD=lKwRVY zCN)e6m(bS$)DsobBuN9wDF}O_ay%2%E{%u#F{1l2iN!4w?ZHyDQ?Y_lK}YTl(Wc;c z2Plpaxl0{akRH|?)`fB<589n5OyK11|A&lORe^A^=JDo{4nh}6#!+r%c!RbpF0&ln zRPOUov8^MX4(w{fx~rWevZEIdQcR469@rx4+Ta0)$N9+I+uY*_jlPbX0(_ z(iI!~hiei_P!|!^M5T3N4>>JFgZ820_hvg|{Zm?6pJ2JT89IaxA};CNMA|7PWldsl z!k{VlBWfhO7m|#&pXYBXB=G|Bmc)8zBrz5Hck}vFQqETA_VROg{KF+tjL;NZDwX}4 zeSd5|m7ic7{&*Ep)}+kH!L_BJB$-;~^9SLDh?xd3ksi+$YkI-@0vrfsIPbqj?@;8#mBfJ;}#fsD8 zQ>@Ol=Q1+D~x)pnbjkUHYs`L9B7I_u_Al9qv z+ducpP3yUbSPjCirn+m&gp3~fW|iuxmktKrOuuR;r08;GnQ*+u%2nh32C1^ELz>V*!sOLV+#(DLO|x!zwU-%m#;GCv^Pra!(JuRq-WND@o9VJYJon1DP6O z(V<7X*3ZxBEpJLjy7t>2pT zT+f;?IcopH>DHQ|E8;RyI;Ml3_Z4l6>#Ur6BX9LLc&a((YG3XR)%$&J6w}C5sl+y; zPp42X$iHLNvEtMwuZ~`iu8WF8NvvLGbdv6AzqTtQ!zL%nlftxrch!q4YTb&<XF{q;&ykEE`{(#tW_zXrk)2hMjqXF51P-Xy?na}f;j=ZneeBic-n_x$?mgx zlb!+G{b-Z^Vmz?j7eeK6CH~V|j}t}hAGW=^X7B7~WS{8dw$sCIyWt~FbDedS?HAQL z&wLY?n&aOR*3)3{hdJZg%Z8L3$Ly5gjD0&dlr$`}cs-bt|)42I( zo$YrhrFW@T9c`)18cf|Cx7A&t@#oX(f$2A|nujG8S%<|G{g&<5d9F3<*3HU1)@Ru^ zFVWQfa~I~;L-0C?;%OU&(XBv@k#)&XM>p|?x3aP%6{?@fEA-qy1L4e7!gwIHnj*S| zN@iLWlvaV>O>CTxy3}j9F8c^R~v{+AspqWjGy8T zI0uqp_rCcV4}4fA?fRzCD%}#>f-^2+Qk^|(VK-o5atq6`qcl5quyvMzPRMxS>jrvT z2h_3~+Qru8JG<;`PSw?lJNdC6)Q8t;#{>2?IzGM>t0@551RxbM-zjb({Jeg?fBtCv5yaVfNEUQDb) zq+o2xFD$d6fB^FYs;t*z3Pb0dEnF-eEWfYm7=CD<`BuMHDN$fOe?i_Y>2Yhdz_V39 ztO(m5RKgcm{-oWfb856bkmuDNrDloiqZtZI)s_v7N?j4>7OTnD84R^7NiDeTYf+WT z-?YbfRPAAoNYbW@Wzi+>(+Nd?MCw?o8EU1B7+EeFu8-AM2<6tH*LNEcGfl9|mPz4Czz4t(kwoR9q;ph_`0jB9^ zk#n!aA6qK9zJ=(9#9!!3UJd`7A_`QX9oqmf?9pxFUi8FrlbeYl$76JZu+=RnRh6J5 z8=`EKE)F#7N%o$!$|`s4dcCpe*SxYb^*Qw~g06H_S{B<34Jq+|vHDs@&WXy3swEF6 z+S?;L2c&P_+yUTg)p$VL9o~|m)B>)k#Y%ztrTI~{9SK2V3&jF+(-P~O-|+1ZF2-Lrm`?`NNlfa(mquedx5ssGFk0cbpphl^J2m{Kf3K7zA)d{7&@W&PTAMC zim!;+o_qwOQ3%@ur^oZB72yJV$}6JH=Os3Ig0vipe(=(sg+S$-f72jvthf2@i?X?I zUME?&y%@M`o^tbqm5Og$rDfovHxf6!Z9SbLx;Z_^EtF!-xi4z?t?BG*a;d8vvr@U( z@l@8V$M~~>=WavwgsOv zdlOA>0$Fsx7262cSdaI%q8d7anR}p%D&sMX;ttvrLMxuNwX=?c$@?OPz=G9-{ zr=SH(f@S75odTlHE)KdKn}i~B_`CJB z3cGf!9?gGtOTuP+@b!!IvHWqN=nWmJ8A2tgwm6(kx70Ee2CTdnbmizyhwvv@T7dN3Z|J7fWX!fqthjrvSaG_gWE(RTM5`i znOnT*Q_njaDki5}LbHyh80;ME`6Fwu&LInjvPhZvqc8H7D22Ml{B}2-f4z{xoP+64 zN|3GPY)g9OWVz*vbx{9>yV@$IS>S0~Gd!&lkUJ=hZjkCPy>`?^jvZrfV z9j=QsI@2;7N(vl$m^R1CG!KPLSRT0VbxWe;+_lu4tIIlT+ZPGdv`x&79gKL~F5!OB zs#~u)?{^JG)!A2L)ywU7BR#Vwq&65K(E< zq8t{v^}H|t{8+BjbIUZV_C8nk)t9N?`%RptW=OEBr~Qhc{Q-P^`oH_CkLmE1nEC2U zCx1_wU)-Yp(6X^Kng20qc%C)U>YqMo;cE~yut$BWv7>_&ez$V{oP%rhC-cwpWV$=2+Ah@) z&(G5_?R_S^u}mwixOsl8c2i=Rm21ozd6&h5jEsy+PHCst2(9b5(|D!5W8}?G`NbDF z8`krW-i=OiYqz9uKP>N-=o-03rg*+Y?=-Wy_p{;bb|QiEB4TJwD7PZ`R`TJK_M&b_ z_4Z5L0UeLK8A|0^>Z%qF?%ac?4>bRgy;4y_ z;Hh8{63nQ?x^!0LjRnzrA6Xsnbx&Vc)+jDecEw$1`iTB;*pktT()9f1zg|v!M^_i< z4E^MGEDp%_Q=W&e;1*tUv~GIYf9{20+1#?48b+R@{d`*&Kk+Bo42~OCtBW2E#1}?1 z`mru5=^qMEwbBorF10umeHE#lM%(VWA|YG{e_DBCu76vTPJrP+nc`NL!wP#8xx|v~ zc5V@u$@o5CB2Mz0hDZ}@xFWvL*g>G8Wd(1TXl}G*=F5nl31cHm54Y`xH>M=_WK=@}?mydpbn1TXXtkoK(u@6j2U~xu9UaZiPV!$t*ZwabOi^}E z#9i%?p!?Iy-lkknzq+%wsBN}==19J1=9})oQZ03hw8+R!qt8Q1wz{ZXJl{2*fBTwV zQ(#i+b(9rxv5A%*$^%w$D>8T@hmu1|xPReXT~w*Jdi3!Z{`CV{w@hPeWRhxcMkZcO zS@W}thtu|*aVvMwim@uNs(1d$RQBx|L!~+EYHLAmL{gM>8eOn(hc!KS`P;qiqfj;Y-|B zgG2pIk%6ZXf-D);>bB7WqneIMDL--NLP;d%6o~HO6}W2YLU z3WrChU&YEOZCOz^-7M#7S`D9ZZv%t;(tfJ|>4{!3 z=W(O5xeLp*m~OWZxVpJySdEUi1xicAyH7tdy;W=L&-e4-Y?u3Xl_lF z>iYZ%*JVK*l<(qmD?|vV(S7JJYfp8)F!HrmnGz*uwN%vhK0Q0_{Xag-4>cIz7x#wh z?2VXN~AGjbk{Z+~|gIquGhHriqA$ng>^@`3EvJ_F{y zivy?kF54b;{4*-yFSD}Bv-bH9>x$dc-yfb;v*Tih?lho2YU>+aCKz%nlkID zoxRaq3g&V_3k6bltx3AU`s|$#-^!Ld#%Nkh-OK-f#JzW1llR&`-X2f&R7HCpi&hcs zKv0$zVFYtKJV*2T$6=fsVXMxa&N-2Ud;^L3C|;foUCPTbz-%N z%D{AOeq(1*rL?}8Qkoz;!NsdnelTfZqBOxJiSz1ToW|ts+HMTWL9L=OmZV{B6SdT1 z`w0_sS=QS`gEcSLkDY$eZW~FnS(v@KU5S#{^6CJ90Za4!tU{;UkVx0=UeTgF$+4b% zj&?fis8b>NJoW2=N;R}C(>7_z>7D_c4C4T0H7%dh1o+`C=NR#s8IjZJ2i2=ojM_Z3 zE?xX_Q{j6bi$9;oAHUQtUVN}YC=OL0nohI6&D6Au+2kwTcKC;;PHNnxh3s;ZN7pn> zth~!>XLVdxS7~-SY^snha54>|0s|6Cp52L(7GVO6QL!BOY$;8@+#wj;lFrjGZcE~&6G z|9=1Cvh+GNXZFm}NJrg`v?M~pgOD0osY#o({KYu?(>@A*CkxzF`X1& zHZxECI#->#?yBn7F<%P=?BScgwO^Hg@qZYae?EoYo?!9~wvKQRbN&!dTey;4W2ge_ zWc)8NJY&FZ#mnoAYyJ18#%`Botj zBZ}|{unK?fAE{kF+dWwk^eWvw@KV#48gWh&E4$p2_|20Q2NkKy>9ONxNkOjDtW~i? zlQ+tJ{Lf8|2XD*0H8NS1w9#mMcatmW%fcQ<0O>fkR7N`~=rg~1iIw+cXV`*tHu0L;K zTwoa5ZkDM=Y%39K@5)yypS;^Rx$luTurDN_)~s+SY^m_LgQLBChA>#Q@h=aoZl{o7 z4OS9kcJKiXb!`|RE0t9i29}Do2V8137ZiT)G}SK`$7!xsJ5Qz9_?&R4NGOxL1)K6N z9_OEfMU3|ODlkbnp+&X!m)Cr>f0x|POt(&A%eBkC-&l~uqF<-IwJsR^>aHArv)3q9 zhWj>^W_aaH^vscf{HMJ)BzA5i#7{80G?&4+{jYCPdP`TS7j}Z{J}*=} zD+e!4$I_Raj=8hUOWX(F4BeR2zArmHSS_ou!9(-n>lUqY*Rt&uK9(V~Cw!*v*mSwI zcw{y(!V*}q1gb;yIZp0viDvw7nIn9((RABa;?3_ zeu^vSmn1C(TDjX2PMx&={x&sXSD?FDlAk*vUZ-r2zGVY_-OSdGG6{iJwUcC}RkDU% ze#5gu8ms3hHDj~oTqdw!B&<*$$rmMv8XG4- zHTXhO6AFv2&RN;%Cy!G4^(SWD{aVp%W!79d`rIT|a#@D?Bu(ME-kFNkPY;?ZKR;=E zD#3j7*NU#r(0{1BELR164AC2IJ!KM5xM$pVn{$;V%dpw{;LMll|xzjgrNZmvQV|QVq%C<_^6s%6L;ZvF|SB*6Hhd`|o?!FYzre7~3u~ z7izS$EtSf+0Y8Zxo&LPxc=ckn8f*gK!Lt^0ul-?PNGApd^eE%DXyj^RJK zI`AbB&zPj>I{Wt9b85_7&mUSivf6uZ%T%&2N9!*~dvB@FLNQLnzkZ!9`BaGb$yZ&S zyZRSy?(e_EV&kVe`uBe?{Pq8@Z$0$qx$%<^gMWBbZHcT$5Yd9z(-EwLkY0rPH{eTv zm`wwHt{$F#fb7jyk2sNb$lsa)tP-_hi9odgN8WhgPJ(}BHaOLffdm#E1b_WT;d@ZW zj!*i+y}!Ca;uDeZhmUYq6bNLEh}Qu~hK>QPD1`;;tQjp5egNtrH)t=t0mXKxxOc^h z8npNJLo0AY{{;A#wxGZmKst~XHls4_yAm@WR7pbD%uXf{=qDP`A9g~r0Xob;*s&Ez zSdx1^z)uRat-)U4pW+DgOV>>F`#U@M&jWwmhq{Jc7@;HT*ZYdeLd^~IOpz77Kb=3o z_5yyJchGgWH_$?;4gzl|LVgJZ?i>*c_<{Y7)BtQ=flFJ)bq#!rUeTJKzuX>*&jNWp zS5#%z5s3h(b&im)czC_y+B~{~BIpVtYDH+OrVe8J=uuMRr!u>%K4UXYd)wm7ZTs&9 z?m(#qC|}{XAL?zfEMSp;2YJY?fWt8R^B>(#g07NKn~7`*r!xG?Jn-v)Up~CtVo?VI z2?y0HSK!tOjb(oM5Uuz~OZw}z$cq7&4xt#bx3{!{g>qd~XoK3*Tip9a1;$wc1eC|E zfR^$Y06;*-j0Y~E6L{;mTP@_;ElZ=BYGdQU``^GD_o{{@W2N{0b@qW0xX%uNBZ^4f zzvEWhH=~yrIvY#^Ck;Iijlok#sA5M%QRqtm!m(t6ovA5`6Lz7+G0n7TBJ+pngZq&Q zOzj1`O3HIC_IgleS4>TSj0e^l?RSTZ*c%VLyt;my(!)W1DGg4uE0`Srt?SjeweZQ1i zD1zX2U|ar9=pL-``S7H?9Z(C7XcIyl8ZAI>ax0x*UF65;zD(@k77!K*;_qmvJUpsLF+ZPk_fze83a<^Foa-YO8T7 zfzBa0RRhGYQSMkHT1fr-HseDO6Y5+(qPe!;n1hu8t^d##O7eP`nKHm>sE=@dTcnHp zZJ29tcn_a_2Rx<`uuyD-_8Qt3qd_R%dm!Rk*{DL(7RB0$Ac}+m)CB}`U{GZBoK23@ zg>jCGL&vsgaSau0>|1fp<>r^V(3Zdjo`v?b=uzd!hVJ4cLLg}B6tOvumQNsp0Nsl$ zkdlSA1>lwk<4_63s;G)9z~qsu^Z#|a7qzO(fFTga3ZEocysso8@0yJy7ih(zc|-#j z?U{+lGWYj@JQ!?qkt7hwMw4r7d_}VG_P~g8L;sj#%kbAtXlI#%vK!!achG^C$LcmV zYC(si-+esRdqAHX?of6=cjsbzc<(t?cU7SGY;FU}C$&ctjlqD@r-jLvGmwN^s$K&k z2%vFq)eP`PkLb(8bq-*#ago>tPwUWx@afead);}QTy&RQM7=u4WCfE7SL_^|%Kq*++TZ7!eI4su_@@!&>R*0VOv^};Q~UZ^if=*zu01PtFcivE$%Yh zfl;8{oPbAF|M?jn2gbS;%dj);W9@e7=C_+H( z69toQd<%0Y-qT{xDFS7vFqBb00h%{7*+BviDxWf7Xt?HwqrWk(_eE@Q>wAp?@sh>d z3262-;lAP6wh?$;s9!8%r-OA<5J>Ee+}$}=uW<#p*z~CqTIG9S?ze%JMI4BtvZ2wB zoh={(Ks`>)urBiuH_HZhJ$P?7kUx}#)nj;4jFl>MQG?>C;>ipP+HhzfyzE=(>Rn0@B1(=egc(R7g7y99olO%Q`R z0~DJlE$4J(g5f8$XbioL@>$^}qv?5v1yI-=!Eg@@y!W7cAiBT{mmn=(gOMYoc#01_ z1I7k(vK;8+EX`?uqi~vyIB48Z4uWd;0jQ>V@bm9L`QXS#_W@6Z-D4zu`F{#ytq8F- zw>~V0c(>Mt%;nmR0v*qN-+W17upFA6CNEq8-!m?AY?@HUPc*#r=Q4< z?`I-CgMPqLf;57T=CC~`qAON|6}oqzd8JYCxUuWktCe8+TLK5-HeikJvT|fE76U@B+q}>s z5P9#1vcVx@PRcN)k7fIQ0`@%t{%5EjP9EWA(=!y<^KBx^@E0Nad@G!Y&Q>Nw4Xfud*gX)uvc0_E6N1FCIZ zEQs=K1BF)z)V$s)A6Vsh18l7XcnoFzYAAa~pUdCTpkjR@e?nOFjODtO>?lO@4HJ z+s4ETd7H5Ap0T#39^=2C{)dj=0mdj2Ume>PrkUE)U~JR-q!o>Mw3PIQpMwcj42bOgZI#Z$ZtvH2^gg}rLDf<@y zfd$C8-r?PrAc{ExJ6SY};!v6!L9~M*?$`#xPeYX1phY;0r|xJ7R8vAfbI_=yV`~_x z#<0VL&!J`BQrj`n1Kz;2_4Qq60M42*pRR9ruY-~VOp?^pSa&W6o5{K`2|<@xh~^r) zuvu8!2#^#fl&d~Q?tVWk;EbMnG&TY*WKIhgU2d_C1Sc!M{lIEe$lSy%ES6qvGoNE%j8rpsP1xl+P*5mdC!Ak zodoujwCKqH?^gX*530~E619Z=T;xfhOZqY8-b^&j1hd~DWapT%OJ_Y7kO2xB6tHIo z(k4_E4Y+=RXD~8-5)WKHkKRTFrl3lG&4; zLK4dOMjB#>;H|^Lsb4gKbIJxhbVcewZnCiidJCCQ(8%D7Tz(6L@MN70Y7#ItgK^Wq z(F0KvEx?ISfqJ4K#1?cvt?CnbHsI^MRpI=PxjyKibL^ZB%tkc`Eg*tNo;}n#dO$SY z4}|R{aBpkG?*@q+t&IR1u@+F@*kIG#20H`YCo=lr%n68re!Mu0^jvh-Lc%)KDcX37 zfMo%w57VJWV9bRK6Ljd-^#a+%DAb1qc2tNZZ~|LNXY=6qbLdJDNWDhEM5+x&N!Mfw z%23gyavMz@b%B@5)i zXyNdJshbAIO}lZYjF{L0ytUkd3TeB=&V=5F(eP}Q!Q#kyehFCmMaTGst}wrtUO@9j zyT2x&jE_nRI6`}Q$2K{z2yCdmXBrnD@A!vD=zz-{;(1D*57utC$^rSc@4C5)^Pj!P zI`O*NFJqA;Oq&1;&1F;o#DiKw4tIqIJlu54z&Q>)@!^xBAZUs%^DGQX2y_TjT`M~d zhg$SGL4q3(Jk8_nl81(;K7(~p_s7E~py!)ogI_-p1_qvdPJ#-iNEvqkB(;mwp9rIPaiRx7Qfd*XPuc72-W>_=c9k!F zShDX9S|y38C;~0Zkp_uXPdF_l#yB{DG`Jl+$DpmFnJiMN2Cf1`be4kErlHcGn>09$ zPMun?WtDxC(uMw6sI_QG!-I?OpbxEwPE`bI?h-mxV+Dh|9vF?edJVyv$(Y=M!gZW4 zu=V&hpg|s52xMaa{VEHYo`hW^*)m`3e7`xuL#wTG!=pT>1ovs+Kv<;^fBRBOn0SKa z+u~zP57uK-mUP}8G^8!aXeUunm+ zZUF8rA^}MRJ~I8EUWO?*148BEr#|^)phP3b4c8u2=yS-lSde*w?(F~yc&WWUVQ^F| zeRubLW8<@4BV>BxTcBLFUkxn_k&g~aZ{-42aPk7qZbkz!|HdSoWj3^=O~6PfVngXe z?;UV?!b)=Ogvp}PG6)}vIF^~fy0DXP0i!B81vx}GjgdBOo|}#}2Jp-NoGn&--wZg! zss3X<1VdTA_t!$X4N3wg>8MP{RT_QmDU`bC-9u>giibrI6kPQ70Jw(--cKf>10Wl6 zgY%ZK5{8oiYW7+-XiT<`RKv44RNuEdbYg1lVQs>$*$K1Z|2CE(l{_igpSlp8l4t4bs84AK8>udIEturyGM#*V|A%Fnw(*c^ z0GjUBZFRB-d8$MGB+Pjs(H+L3M}Lt`-zn0Dg^QjZMq_S$?OvGMfX6`$P?FbRfyIo? zSnWOBl--!e#N1;+_~f10%P~Z^55U{>=9N`cScoqaRL#?sR8`=4yyY(}dY+NgBcI62VfNpg4&O)=}Sv!@?lV;Y0KTrRxHO!C-LGV^ap^ zI=~?$Zad2GO4O&oeL9)P*`vGyI#(w+84QjolSXuQE}lxLdw=XW z_v?G%EI_Q_K;&M=VF12C36@!jI)jD*gJ#E#)InxQ=YR%?rI*i#vL6df2R2@`;z&j57hHcB$_K|n>U&j)373{+rFnG<>>#1-)U z5?I2m0kbQ-1_lOjrX-Z&3$ff`^jIWR?`cFuifci6mMNplNIMmvokzx8^k78I ziv9}H4e;=rq)2;KN$nlGUzBOG+=5gSF}RrZ3X zn_Dv)a=_%5oxW19VK%-U?$p)N-X_>?d57Ld%j58|Siq&$)vXZk6GrW<02{xwc==7Z zwwp4jqO^Dr`H%{4YD#wnd~0$h1%D_WVqk@^ro(2;ki7Kp-o5V-DatJ+`1WTu@kPYd zl@OZ*1jvex++xEu8<74_7%GxV#ajRdLlsJF7*Oplm%}nwCJF>`Y9JL4$h$L8dN~1~)7iyETCE0-Cs0+13MY;SX!63&CRo4pqj-(x)g(E{(uiAte1?3pfFY3C^mKJjj}2PvHJ018$U4_aEb!78;d z3c(?-eeN`DZSP1vLizp)l0nJA5zxOp6RIuuh<`dmC4_??(ObARsV)z9Sv$^@+?(SHG zu6a<#r$<9L3E~m7bDtT)2Se|)5sdoq8vt1YEz=4QClK^i>4z=#rzK6+5x4rb0(bb*z+&NO3fW(ziry!>445g~PTn(H~kE1Fd69lj}$7DgF ztnYk;97K1*;o<&u!t;N%4TyVKF(k|fgQy%AHOex^=z$e|MKNm}VPzIxwRKeB z+<4E7hApJK80hHeWcOlw2dA>n0S{0+^0145161hnh7Cdx+3Nz>4sDR+#l=NmS|1z? zP@HPmgV?Nh-v2E-(s5nE>=f%L88~BMF;KDfIa*3!BPO?!2C6o5mm!!E^l>nle(GL_ zr&j13<L61OP9E{Ilp&Zg%E-)h!i3qTR-=70qpLiJXMZubf!CGj6ceon&sloAC z5fmh%d26*P0}%ot<_$=VRSC2`ThQj-%((`S`b|W%?E`nt5*G%9ThK9@vNFO8cHtfNp*LQ{x_=fAQfo@n#;Z-|^N6EfN!0=s%Ca0gdzNAY;zS&9&~C z>D>nRga_4R8a$jTSNj@lDu73V?GJ%4Qfe2EeBzQ`yX_6gXh#q}?NNbIi$Ikw2_17_ z-V&_l>;RgBM9xmVX*K803`EHYNko`SW$Fd6f58MfFo8Q#TB$C!<;G0U|4(?bPR@$1Q9@`Xt=U>|hTTg4+fM44npH#NtY(uIvjPNz{W| zl~hd-rW;p_xt;P_f^s+s)UV~ESHQZJ2grCbR+Fi8Pw?sy?m=V@rZ%nKu)QybmrI8F zCqIhaoLF>h{zG#j9_$ht;6BF#usjkfSP12UD*@X0VP86&2;HS37*;~(X6$I1nEEW6 zQPD*SpbE260R-!`z!{-?)Q8GIC=a_18?~1%rG+ex;WF&(;W5=cfJGMO455x1p-QmD zhO;B8)rE~Cd`Dc5n0vSo-6>4Q}BBE(73=mQUj!BAtjMhJF)pw)`qg`7+dEKaQ`h=-c# z{CPVcgoSR;MBDh0E3! z_?tFO3wzs{=?uW@{~bVseKPEpBas5=5x6;lS8oce#z5y58VREj2~5*0cyE~!HhMh( ze*nYPPLPz8lt!>50klyOJ}5@DMn*;gdS(c3Tl6YS%n|bOz{BjFbSO@-tMk0Q(}ob8 zd|22PfqR861yqNY^N)9b!nWwWjCn%n-D&2I#S=n83EM;453dpWFW}KRz7%!xh#{kHbmMOQ$Nw;s53 z7E7o!d53&VlypPB1~goH`_iCn!v+KT-se5mgm1gr`S%Szzd;A$4)zOj3JZ-MTTG65 z37`KT|GVlj4>7T4<7fu}&_ZJcvO7A5W9yIXjt~cuNPKB%2F`_kEJ{;do%!g^k3$wM zEwWwdaIwO0H+USFg*k(M9mrQe=y{gIPzCI>5v_I6HxxapbIYL##9M%A4#ot<)Ou8$ zVpjo|4o0U6cqL%k)C=?^fwq#Yurm#Diip)9RtzN>2{4^9D@0F)AN5tppfjAKngIq> zgf3NlDH>dj9@`GRKUd>6Fh(z_8-IVq7GC!inV)pT-@(jF#T*JKm32wrD&y|DZtq7O zwW}r1aQQd29X3v1G|8>Khmbz{Pr`24-q{N+5j0cEA-f1d#~{^57=(y6x_F;S&8Zry zXk|tO?9b;wXgMBnF(?|QOPPf3d)dEuD}>54VuB*@4!m8`_XdyYBWVcD-{VX1{SZ|Q z0d&jnKn=_Z^p6*&=(yBOVKNo~RdQPh-luctVPV%oJBkLiPkW|U(@Z+LzQ}7q2t5AL zJ-EvMK}#!h-iJ{IHzbdNX8s4^44qHMdKjTNIC+bR#bbo!AE$9N^(2so0gGx`1IH)B zXz28ZqTeC3uKzF?jYm7S-FreppE`3M(<)Bhv>_tx7;JJbUJ~1zc@^fJxM=jzNXh zsx4R&V#ZJ}Tlm^wZw=3>c(Q|e+n1>xlL6;HH0r_Gj?Sq4XRnJiM#+7>wjVSV*oYfJ zD-A%GDk|;#u)pMmJ$$e&ZAPm?+&|!uX8>;%-TPtd!Yg@WW~xc3rH^B4+!j`FvvVJF zglZW~C~)H8VY!z~UB(6mtj>d!UUUpaPKf0wP|-bWCLxOsa@h0`i^mlbFmE(Xhh$e0 zdicPc$*pVyyebXbfpENTggcvnEizzjI0HC=j_7dwMJfw?U%Os@==+s@RGsMpa8E-v z7a;@W3&BZYQ|XNCW<~#*5yW1A z!Z9x0^Ixq{YZrB1p868QBY73zkQhV&eilY2HBBG{gu@qrwx$4t(_uMIX0ceNdyy9Q zm#?cnX>jG+#KiPVJ&QYzVFFu@@(<&LoNVDM>t3EWutY`Ee2=sf?+&x3!N2LznBQ?S`&<-_$k z7v|==DtbVslmpG(!^5L%9^;|0?emNDfB!`3CUf4OyFNJcla4N<_AazruyJ9^fHrnA z6g%-L+EApU7Y_$pYRI^Z%lnwVd^Qm_?1E1b3dnaNzaINr?=d*AId_@KZtm*q>m%ob zTabI*G7Pu9NUlb=N`roec>fFz8+p)+Shl)=^ciXmYRwWxDu58yq zG@Jxj3_)OAb!EX#)?olf9`U)Td2t@s%#-lR1pVY4hR9IWTZp+s?A01tE(r;7<8}%r7ooP_s^ROVUwf+j(LITNI1*5KAA_Es{#*;1$t*q~cruvM zX3vBc=g`a?6scWVq5iMiXRh8GwJSq#!8$YGzdz;$Se-6_c@|upI~|TV{qcpkEg>qS zYjtC-TzXk$zJ+Y5mFx3;_tCQ-08(4=>`y<+M}MkS|MBIit=N2Sn1@DRsrpKLxR%NO zI~V<5oXV*dO@*$$jGe?s-|*>gCnCvd@@L&kVC<9zRUK4%2+t8(mw~yLF6?H2dGPy(d|~o-0Cx&AbvzDhaoqW@AL*T_ zBudKdcC-psShsmgS6{4&sYF#S>2seOZv8di|FFg^!^nqLlb&~fS7bwzkyKHZ1kp)S zWKP$;FoRiZ|DYwNr3_>M(~A{~*%HQf`i=(rtTw~VsuIsx8Kr(_Xj1)GEGT1U4R^mA z4AILSoK+=#IZRShBz-|7Rj(qgh)G&c-Eyn+H!@F9l09h~bTCmYu$oIJzDjkfbC z(RAgUhm&J#@gZI9$#>bMMf<{naPzr6Z}b=Pitbb8!)N7&KMcr^$AoZ<;&+va|D=<9 z8hDX#oC~PW2(vA=Gm-+aF;~%^`mRv_Ek()^^*xM0hfO3 z9C$ZLI(RT=&$_0`qJcLnHzrl{_NVvds590}i{C#DnTikU6Gl4r82Gzb8MEZ3Y{Z}O zw|Mo%oAPGmd&I*w8bIvBs*v(W#C3k1{SR`vp3@IGmj*~#=RLlmN~mhrDD7=^k=O7@ zJ}jzsnMzJ6HufRQm{0GWtJzOg6Da!XuBc`1+1uvPN}6#fc}PAnq>!<{-rYEEQ)#}Q z>GObD?Hx+0QpR;sg$HAP4Gri|-&7~JZfwseGc@?h_afP#C zYuaR`omXFd!WUn~ospF+TiLyJp=~5Co3!YkntS2J8~fRN4Y$*$g5I@9w;HU>P+KGA zvIQ?k=*QKyL&iuWQv%(UAmU72#Av;HwSiarFAcBKPmsyx`E>;WAt${UJq>*|Qu>XR z9dlxLCjC#$tQ3(VRjiBV{&Rxm#{H?nxH>mbzENpp>aBj!*@mS4>A3zD-lClt_f1BY zVO!$9vO8ZA6C5K%q#IXnP!^48+R3qRY`5;MUdh>2er}SKQs!r%Ri$r`K(iQ3OB#yZ zJIK8CmUvS%hXWEq~ATAFq^r?pef&+<(>ofVfIlUp5oatHHmpQ`Q26*g()Cp#&pAk}^sc8{Ii z@cog+b)=d>`rAHUQNj($<>jc6>vkujv>Zwj{Pn7!##-F)qlA?mRR^he zqM6mZwY4kEeHr$g^|Su&9tu)J6}$a{?)rr-6uutwGAsOYxYwqyoqK^KIH;VHPIa80 zxCs+pWQ&x6KIwuXXUt_~^oBx1nbFkZ!o|*}hUe)gs%MJqC!pc$(4^EYm5@sMHL%!~ z3lqQQU9`jJKf-;P>we$v`}dldcN#A33DoFdUQ93ENo-b-f!t$Yt08tF)9(k%t&e|I zPbQFx5zGR#SrIB41he3rfrz#+$sV>Y04F)UniJ4K$MZh|Fkt82v3Q-ui$xZ$3L_dMI`Mz>`2%>#5^x zUro1*N6QE?Zcm(Hj#d;({`;o=`MbNvPy8@;2;4?_6(*mZb^@LO-XqAU87y`5Lo(tD z&|uk^G>6m|WL!-40Z|3dZ_qH%B0-f%jGIpdCwa^r8vtI4B_)GHn0Zfu1oja&6U_qB zLEZ($h$j9RE>yh<+mUD)^m5hC7kE^g9WR+tEwcXe&Rv}|HgMSgu;5nC-1De4e!nd} ze;)L0fGFbYI@$F`3S=MNOi~o5m;>vtoXS4M{c@{14^Q8B%SwsDy2kGEdPz~(&n># z@c7P7-s9j!zk@8PEpZ#g=m85ORp}>Hw#7H!Nm&{T8@Zw2UfspxVOJ8Yudx#YPm5v_D!V6XE2W z_Iwc;3ip_3zr%3jWaZ_frD7(x8ManQ+#Rx;Y&wrJL!KyXVkcCqcy+%Z+jyOIS9$2p zPvre%Ez2i56BQO}dSi!rMi(CLmPpuck-Xqx8uz)+sCKA!P%b&8G|)#YXySgQdYQMu zuw$vnlrHIMm5diBkh*yQpE8l@ni&%xwbeB4yI}qB$%E?UIg^#a;ifF9i3^Y zYusLKYO2Mr4_lfRb7v1MyBP0Cr^jlBdCSS>{Gj1xayxw|At6{L_oTzoG4@hwvKdQ< zVa_fvk7qRpZqo~cLL+92W5MuGI&r2AZ#XgezJ|x9Dkt0IO7&NKt-V?V6KR?rm9qZC z^JCRj(I*Y7ye)UPTXTG0QnZufC8Gz^yXW@Q9KZLy+UW20(+l$JsC(F{;ZpT!NkPLu zG{lb-rV!skru=*YA_CU)>!GF z#%$SXw|S)tYA6RL?TpJfDIz3pJ)^>>;k@}jEvAq80}pF%_i(Oc@1qA9gOlsjgNld5G*u2I<2}_@{SV# z#Rv*xXnFD4S1^Qx9>V~3-q+_NkPeWR+oonr|ACgl_dui2E40n`{Ko+}H3v-*)0>bp zGnD@4*okJQw#$URJ3Af3AN1k9j_T?6f6y5JZSvH<3+IR-{NFlB!QpO}i834I#8ry# zlW!i`z$8oaJ#9^WPaeH@D|+(Opa-nvVsiyA*b5K$FtoGp@eBS*5RCjz3C^B0(>grH zVl!-?Sx)`7?7|3&CQGm7EiAh&I! z{a-&&pzG-fy+XjNgRK^U_cH@%6DH!c08^}^a|PV1SkQrKM9*MDjshI$0O4bvfX1sn z<4-=76m%avoB_b1d10g^C^g)isgUbL!L}HRN3!P^p$VM;@KEXK6M#8C#8!&AJvvOn z;2R8?iNzK&Zeb*hAW(x4FXA&1)eH*7tLmVX@G^S$rhXY=DO$6wOerFnUP?2QWDeAn z))yqNpE3<$w*I`?r_Q(wVy9e)4b`hu3!8n#;#UT$ALrbm+U^nWEFKIK-KzY^`%)X6 zjVR0T2h}1@T4<1ibG82=1?9nSBzsJ+2bxOkSq@r1nZ*vuH+`H`*fAi;wb%XSQtmi? zWW-ZK(_32rK6Z)VmHq*n~7O?4$4e*Mk_O{VGS zs+Nio&$9Sw2K)6)@U*(L|JX039GG4grVQpaedF{$1~UJ6QUS-tG;Vir02K?Y_?aSX zyn?NAIUMU}1S?EI6LLkQfi{V5_|OVQrluOCR9Vkyf^8?N7qaIoAeyxZh%52K0L^`e zw!19H`sCPK9$Q zkmzT^wU^FSY5wsnUK=h{TP+gbTrbOOyj88~S3drfrl0Zca=NChc}c@thVK+zH_$enmJ(*dW!#k?i<+y$oW z&h<+7L-{}HaOR?ei_|nci&@u$)6#}wJvF=UsBYP;Kfklg$8>eK4|Cy4Ro9ctbpK;}5<)J2PI?uw<$0;M3`OmNGiu5XF~Rvps!wiWaQgk{q}6N~)o%X7?b| z?+_)FPpfZSe7YqewLWD`_kvC*b(%64=9%tRbez)G(Qq|V-PUQTZzfphc(Ha_)ymro zG5Pc_m&Wqyjy7bN>n*rbj-KgnRj!Hpv_B?%oQF)*L*<|nSa)>Pk1)z^Vrr8)m zZv>yDoI7^e!@1bRR8kd8u&$tF;MlJh1;+@WD&b+28hWD2xdS`QmHO%+cFKX*+3sZn z^bKI(`&ux8OOlLCOKg3RkSGk#>^QEN1?KEOD5!&DiK@v(#V37RBZ`uUFZXVCbkv#n zD(Sg_?lS4TWUFVRW}(U`o+H*xKWxbM%Ngq&!WG9O@UrE@QwqAzI_(>;v7k1&!|E(o z&B-|-ul+=R+0|Tgi6vzur)!)TY}lW_eCR=fQ^LmW^e0CA5|JrFwII`Sl*ZYxWyw=V z|KTZPg~XaCcg)#eD_%IADD{0q>2+zZr`097B?Uf1?;h)Rb#?}OR^CYoeo6W4uBe=4 z3Y^qD4NtE4dwOvfNDJwy+j(OJoaJb3`E`nV@ymDRJWeSk3Yece6hXIRGA z?GbGE{rX|I-%HA&`)3LxrpQQA`1JMqpA+sc+f^a-FD_{Ta*Je&0CWRgpScG&{}B*u9^BkHta~)62=gU_)?)_AdH(A@%{ljTLts3CZ}r_ zWf;3QTc_X&8xJqlp(os>*QTypwM{!)ZK1li`)#eDh%YHxeyN5&NxAHh&5l*fzIVqu zZEtW$@|?`bi`yK#tYn6T*4=D713sH+YtdQh+soOo(ZH?oXE-A~$oii5a=u5+b2@33 z^D^ai?PSgIvp%3s#{py za81pfzgg}?QnH3X^FNR2z0&w>fN0e({VqEetZ!Itz+l`-7;@z;ok7rkbgmbBTl1^U zAozH<*nwH78b+Zxa#3vxR5w5jCaV6^T5BGZhPsqV-V#J<#Y3yTdgL95ojmiS05+XA z#y7%5i*Yu7!m~-yPDW zK%+4`i)E)e6u@m!RRl$uA*V<_<&VnOyUagz{gbWLhre6naXL{m+$19_RmMU)I6yZo zxF{!?Z0+bw^qSkG?^U~Z3yZ~0$eRg>G}Q_HiP{Q1^>pxH7gJ>jdduir6xeidYrG$HT)Phh|zXJq$3G)n~WYYycYkAE?z9EKi@OAy6yqSAqMmOBhNn;H82DxW25nzaWAU!7eRX@pr9uvP$l zl>Btb`2(1HfO!+eTgyIFG-V;mobZXD0NOav4s@X*aTM;R{B-%{b4hwONxY^B{i5RX zJLH@5G%aFVQc5|Sa*4z`uP~=i`h}aU=%`2Z2=djny<@6g&UOL-r$Eenk=fL1_o~hY7JUUfeI{@Q@onQ3`tWM7S{=CxWI5WC+yr(S4?)coU zQCHco>2~iaaQ~hT7PYqgg=cs)bmW#XHXb*45fcQJqqj#_CH8Kc24ea zK^WJq`1FL>FI7HOUK}_qUrw0cQ$owLEK)C_Ra`P8D1ehSMcMDDU-%V(d%>qb}l;Q&V{l!uS|cQmvn^kds}>4%-KknX~lu_kBRAT=kuLw1g~$$?VDqA2MFX_>6P@)~}? zoa=&J+H*f~(}2{-uw4&)4PHI3XUqd^cDZiEei*zELd!=Ft5~~HCojJ_PD;0p{ViBE zzF;C=V72*YlPPwd;AN{|u%=;igF?E&t1N+R6;0o$n2_7HdNiHSD`GGMpQg+Z13lX{ z1&+>z)nABr(*|N?Q|!D#9v8Gx*aJ1WunVO2^t64_)xbR2v?R(-KtZ0r9Obp3g!CmF1gW26FU=b0w z9j2PGIf_+LG-U3d3Se8o%r?+@uw@Ceg~?GH3kgXg>pW!F=enHm`EDIcxpcd#yzO_7 z&OUgh%M=N`D{WCfJD!~Pu4t4iVew$IoU}xaY`-})*&zXPMRhbxR5vgoW3}k!*yWC) zJintmjK)sg`_i+uZFTNDBl_OSlmV`bgQKiV;HDuOYcqF5j7u>U6y*E2ToBd$C28Fr zmX5VD1jL6K<}2nEED^OY_ug+#r9QH^gGdxY$kZd-$kc*+)H&Um%)zz1-|X~gzHRTG z^WHr)tqFVUYg}~cTtLEDuHC8h%+z<(LQ31+S=a1PkMzS1f(@LKSB^Obzkuh-I!U7w z#+=pSQ_OE9w*ONq4 zW%vWCCj)5|MEFr^+9+tBp;)H_xE^rvHX4^Q0dQ~5E^zqh#A(<%D5%EU3d z^R^@ABrngO6$SD~7^V zp?$D)>n#)WSfkZsXSR!}!I({wgszXO%_zCxgs-?wbt#P%SL0~XNGu{Lbm?y?WoA8> zzp=$#`{L+CKuPM#d@o_s{J@hcuGRG$c^8tx++)t0`tUwC<=EJ{6c$oal{IoH>iT8U zp@d?u@!Z_%dcmIF_=wN+)`H6M+g@hL0wNa&u|N+q-XNxwQpGpa8WrJ-+9Lm{T8 z7wyw!gibHy7|suzzus-)D|aGCLv&k&?&kPO>j4`J?Lx8HDMD$k5mPHwK=o`ACncmy z_xIGa$Ik9F39uc$M$w-ibi1;!G{|UmE$}n&Sy*0P=}dN@O0V`9n4c?ombRyN-tgXI zjw^*VcR9zous}6!Z@*36L`=2DWJky?V{( z!FU-R!H^qiU4oPq^nFuT7UX}f4>kc~ohuPr%Bm92<+Nwv5@wru?Wc+hg(YuODtCV=C{YA^7PzI$8Kd2cV66GZch4D5X2;Lh++|Ac8rpRQNC2{&=0yQ zYFTbCPr-f^Qx$cQ(J_!xIcq;Qx7s7+w=f;+GC2f=*-1$M1iWLn8uhN&W%Dyj0W z_e&#^&C2WF*ORR~vzA|w9timoFU!1mHvs{m8Zi zN$kBp&S@!M&fJw~ti_k)i{v(kRjK>Q+1p-FP>q%$Om7zZ+UwoFTGwpJe~oRpm#^Z= zLys_~Tb{)v&Aei49_^xBYSJCH;0+DN?J=$ zcbuy5ws`q*>VPxRhYTTty>$^>F9FdtOb7tR(rCXD}$mE92BofHRmL26U@1KG0kNXbZ z+;6fL1EtfyIJfx!J`|N+CgMW!oMiI2mRsSpxKK%t4YcLZCoY!58RW%3t$jSd?X+U| zIK?O|__(*Oi;nZyS*c_V!4PPDyY(P#oy`pY5aG13Br-4XW{@%MXJ5OCGD?_H|GVkPmre@-7&_x7=}>yLH; zWPLn^zutN@?!>+F@YHoeQxbPhs>RmZF<)+u_gt5@viQ?u7Uy+s(}H8eIU$nH!X;^% zrk@VG?)Hv4?(2K*rPZB0S?QNzsip%iV!T4#r|Z;aV6@K$awf1|1N!JWApgVWKZv!G z@FhuWQ5u$D2-Ub0(1=Rm=G;{WqG#>Iu)e;MNo7sD#JV95>XZSNybV&PN*8*-eD|%^ zMzV&UOnKVIUluMD8S?Ac`br+n$H}W*4jJ|ireED-`f_Et&;|1XJ(du%b{Q!ShL)@3 zt2%^|GZS_k?K&`TgPrZG&cO(cA)9>*qw}XZN_jMG0#-M*nTtp1HaqU2ib0w|Eq3Z1 z?)f1ey`C*{Yk*!J=bby5 zw*k~mG|i1Ub-rltbJrpb*X}sYwwJ~t!eiAeIW8GK2Ya7cWFFeb2b?=4FSoo(#lp_x z@(T6oM3go0i5Mr{xYYG@MMD>9bA9{gVa$($zwyLA8CE48Kt9@_LkvNerrGj$X~f}R zD#@xuHW5INZfA_uZPU0kBf0^)^n>oV^j}56BIs?@b(r{(PA1lr_Z10EfOac$Q~j@n zO+GZC)ZE&1p$!b7W@7prE%h!vzD_dK*uwP1>j+!R%6kT4VN*0QKUS+e0!@=H+)z%+c&omxrdBs{|Qzh=~ z@1|hUP-K&)cVVrz#%+$t+s#RlciIV+)O z{wV27&`4M6;HEZAw8+v_TWSOpfXEbevNERTs;E?zwpgNocGQ$4cOYHFYv^s5q!N?b z8G#lQ6ZeXGYSsi!d(cO4t8^Ty*P>H{r9?#&_DQ3ELF6@~Gjpi=NdN6cQP!i0 z+G$B*K}4~Q=tgIwO)Ij^7!lBksX_m~@H~eM?SfwG?uv3D%dXD+V{d)dhw`@-NM+|6 zZFrEhZTT_f*RLzn4r@LQkOF!S?$G(=%NEhgvU8DCrC|kNc^U+$JJ~GKW=G{~M8dm#7gV3ATFK7R zb^LX)8Vl!b4u|o$S0!r^Tiywe1;1@A%Y2W^J!L~GOLP))mgE(N#2l_1Q8}V$VOo}x zx3aWhzs3bVukCBX{k;b}{0AGhAXd?uu)y@$xk(PM;`T|}^*?LM>DJBjo6e00o_?n_ zOH!?`eyoq@TN{CYeMtJ%Bb_|+%_h51@1v8*)A_N&B>ZCsdH>Dr&%$=HR zNr$xvVS{S5K>hiq$@YB`xoC{5<-NdRk~J%Z1S)SD!&=K;CRHV|XumG>t%|c7CLF=8 z=}j{RZvw6BZd}A$+cwjl5LIV~^p;dplnfz{dLqs2%NBCv*EQ+8%CYT!*DN7U+A1PW zO6w#m3$kvIC2!^VaZ^GnQsV2&qvg^pEG(qm*#C5){T6Vhtk4h-tXssE2C>q= zNr$ZkT2iv;960PR!;T_o0hE3H&})~JzyhrW>URJyC*G)r)S;qY5HZ;s*q-zy4R;qpDzrgwbcy>@SxBN-4 z><4HeyoUJRJs5Oeh$Mp31xBi`J)nXe{QNB_DilF1q>B2!Ll1{b+1c4q(nuAVkZ915 z7#;z=Jmy^*B%nnofN45dLC4YlMVfFqAG1$w_0+VtrcL6JL@mV6P5iiopwB6PS@1z$lnUUo zLM?8fJ72Z_Y-ficoc^eJWXAAC;esOZgGp-v#JXx7C%fH5T6s(lYV%X zzR1_V!nznLQjb?Wtw!`Sr>ohGGM8DR1+B(Zi4{WX4!Uf%z0|WlkqJV@WheRusNE_LHs!Owt zqy->=QFRXe(ff#01pbibvGBomQaO!?jLEIc7)~@myKv9V32!QSYLQs4)MKs!R0~Cy8$q*AM&5T-{Ay!&Qq5(Jy z!QtT>rzS><(8NqShKcMyC5V3}a{kMOKD4~GSNqq14vY7(Wt#^H;m@pJaEl3~) z|HRIuNWvP@NByvMTA+LIK9vl4_9uzytYqO(Wv32*a8esqROv$u8f#Og*YJOtg8;O9 zaW}z|>HV-&Ms*$w)O2`A{jz@g(?}@%kyfP+lz*tnAH=wfwdZxHTQJriN*?7G{z(M* za!*k|bK9gkCcc3&^?)KCm&%eY38&u5HDuUIdM<~X7TxOxt?5;mg z2HB+k^Qw-uDVU3%nTmfpmmPi1o0h0$_<;qsPSywct}KF7j4UvUZ&e%l-;agvXN$?Z ztT;PxFzaNZt)V#S*~7l&MArYghCwrZv>-dciIVukEpTu8+bGC2fNTZ1J>e(yIBcGz zIu>1_8Uw9!?DD>0vzgVYV3DvjIM*sLY|i#!IC*StPUUboPuTe)2Scd;@e4$6^P{!0xgI?aQ!})MFtxR{RgC?4HtO&JTCycb?rG!E2MPJSY?}rvlGsN6r>}Lq z$!H#i4sbO$njx%m0s>kBC3(b^7W~3po?P4Cgy%)HQgj#{zZ3e;75=<|vufM`lKH@L zkV;J4rVTn$dCxC@MbvCS);bo9sI*#_Cp~zLvSmwS8{C3ECHXs{HHsNA;1ILog)$Ajqgg z1q!YlkR)s#|KwQmWes%Eca{U?&+<~OzuwAo@(C+PX9Pa5EmUGkm&6|0NLGu4Zy}B0 z!I_E1hBJUYazTXmL0h;si_%{@iTISP)F^1ific(Kv5iS}5FSQ4edjEF-p}#NU*F@K zs4^lX=;>9V zn#pyZ{acy$Z{EseJe#3!(rSj0PAlqSp3l5#^UbRrV-^VxF1h6R%`d4v`d++%QwIJ`X9SMZ2A{hSfN2oXcC`7n4FJc5tyeT1MK$8=o8B1 zaAs+$icwuML^KJMBgGNwse~1WV2ccds(`-xlZRO@WO|0k4bc-{ykbOL*m3$gm?wqgs{)_bjAl~c41pU7smVU#?~z(EiCTbCwh2ikXCdiZ_xp{cuU|EqHE$c_1>LKy zEHL123@wlTn7*DM(u+|FIq)Bqa*CgqEvv>iD=@jeeW}{l>qtL4`t5|5q5cVLoYCzF zdHwjbeLs@M3Ye+Jo_)_LFvl=~HYhz7QD8=TI5TQJ{7?wHbAT^}%Q*exUN8+IF$+An zh^89(K+}D!EL6NaWGphT&weD0^|{lRuTGUgb?-BuIq+7{P2q^pyssYi2{CghMKx1* zUEW8CBHgBpyiSM$(3K98A3d5#umC03e7(pYQ=jLiS5zjzVhU7hI@slXM3(4FGsoJP z>7fOnS;9eVq3DNQhdmz^S~uO;naUwBs6=_XXU1U2WKHIjnmD|=m0lr9aFN=k3WhJq z^byuJ3A>n&d01u;9f}$ht~*KD0Ko4H313LUC+fIN+Ifk+meq1Y#ruPp>dnEiYS^P?KT91r-R zXX6Uxnj{f>8qh!ve|pqqdV|PW;6Yjw^(zVol&HHbZcjizBDMuzHoo!Z zZ*F#G4TMebOk2@6y}62}fx)_QO1mOR*+%%BA0)8_n!ueMK=2^(1vMHvgjiN$^8G>@ z-tYiD3e77Ky{Hno(@4;}pE?>I#KpxOFr&GOwrwH}8&#}aNiev(_U1Da5OCCzjDg1V z6!wH=iWGoRNMmBqUr$uuuHS%>g>|;NQ#;ssbMt)@Foo&KAl?^_X_pcv_au7+owYP zzl=~=gq~b2< zK#FmQ%SnL_rNGTJ7=$J#AdI%^(l>z`tMzzCDB=70_xO3}B1m42mGd6@PlOZ|>qau& z2b=ZnKs;yYK_K%Z&MbXO*2US|F-7G13VyxBUKV=bc?Gym>T&dJ6@_#pMw;2X5;g`8 zC+*ovD<%X;BW=p8$9)K~;)B4zK!a%p=XR3jj-^1EVgOopNgdvR5XABH+6X2DXhjGO zO*=&}a2imlqUjSt;u=2nG^Cod@F7zN&-*Yd)tHip znCYA!zwAr|A~7VGhs^3WNi&mB1*#D_=LjgH5oZ|eC4@)c)h|w?r?9=fY03<(HH>v0 z2?SESCJr6AR;a|vB2w}kQ8R)ZKH*4aa86piwJB>~k@Ox-h8c*ldZHgP!uSz-;mI7D zcia1zJ^(-5u5#U%_eaOKQ#MH14D+v(p8pFxvw zC;%g=Q6-yL{vlO|NctR_0MVN{{V5$JX{}d=XgwF|f4yb^8)F*@KqXDlVSE=|zhYS# zVq}t^qmHqvqO3bwNKr8+m3AZKV2}5ky=cL*n?ZlN6=rTUg021#9&|%ujRT|}lWH=7 z7Bxd)+P)A4OB|=PL(`C3R<*ON@k0Xz7Zov(=$3?kjO;|_Y%q{APCByn)@>FwD^fT_O(!A>@Wr%KGGoz6~Y1<6hjr0o||+|NZ1!K!|XA*w1yTYPZ|=n zv(`kiYLU+wm1X5k(UYbZ3PJ6i=G#~b zorw(9=%Q~U6de-M0nZ>C~!8i;*BXx{_+& z30O)VGU$k?UAaI#pu!Ak)za+4%|f9;i^lj*VpZtXxlX?-!tP;=UHhPR4lz_{91Bt_ zLC@o+(lli_+?vP>7=lOmDgP3Ivf^SIN%i>9mJ*;xcQKGBU2Q~fv*Q$0-V30;U7diD zfFfUld(xCW3=p0k0v=Y7+F#VKiu9pvChcE^*E5g1LWDZfA$eT7&Rh$+zWF4Kn~whf zeDb>R(3F5Paut}eq*Uqm1*>qHq4aHq zs3ZkbcxYT;>~lME7Xl|G^Nc;LA5~Vg`$^zp zrbYGTUCJX!!j~o;(-L6h%>Y0N_`5M_+;jc5J|IR#h>JX6&$ghfZj{$$KiK|;{AV1p z^|P>Aqzg!J2n~`r{pzNnmjp)nFrr*N&M8f7YD3MeQ=XTsVV#9C&G7Dq5l(q` zX?`5k&nY(cJM8f3`z8Z=3bglX!NLqPU~Qg2Git-sHl9{S9e(4ND8T>OL76=)ftTx7f4}?Q)vzN!T)p~s_nb!$1Fq#jtZCtv z<*~KfQy7go<%MVCJ20AUgQ7t@62D4Bfm{+26549Vj@?CY8jP^=Fv2lkj6z$@DP~h< zJ%vM14F7MAILI^YY_d_^j`JT*s3RV88Y^17cyVx8*anOC;@w0tM4dATi{F9y8m__U zVr@rBwGwGc;rKEHaC2H(TUS)W-Lc+a-zmG~T6Wcv6)Pg{-d#o?HjX_zg!YQChYyQY zZe@Fyl$QERnc|!%)xmXzd@2?Z6+O-=Y)%RN18k6*vD*D5l$2Inl7#!BvgKKw`Sa(O zf^;LpX7>XNiz2qe4)b<70=Mr87~bpe?@zE9>bxrPT4o#>8<#W&pP-5P$pcnS;bk%Y zh1RGJg9?1zvah};eHfRcUvQ6K*k&*+N+}Y^);VYgCzOqr#+K3@Vlb$=hFaX2KUdm* z*jKA6mL83~7Cl|hnB%P@+@ohX)+Q{AYdE%v^EVRdkk z_(YV#1onz~#f4P1BNbhe!irIyh&U85(y%Pu+DvNiUZt^d+RcusJ@CoZiTa+<#|gq$ zMc|26Zk!e3!)L3t&CAy3huqVNs^w8C+Y=3{I>JeAYUo`77J1B9%8k zfn~;)cDs7*+FEH0%MGFNOc9HUyyWHO2XU7xs`uXH2*+(E*ACGNRv8Fj3QASe@yHUa z%Y8yzYQ@i;J7)tqjRh%)>-4aqdMJo{HHl9R9&KrE7Caqa6$L6vvXtc1~1(;wAJfi2(yR(#!Z{@<%*{cw$zw-zPp zyx9}2nkerrU|1HfeHdR5nCs5~Nq6;ly1R$8P`U)rkji;9R+A zpCQ;$!alS0Sx)<^Ozf=Yj6l$rT}FS0O&|o)zk=z8u^80xm_O-|w*VQ0Ktnut)np@e z_ZF)M!#y=!P?|D*`RXQTX+^YJ9w(H7Z6#~Nj;xtNu4SVC1n{r|%D?@hZ@7PAXOG07 zg|u=j*cQFF4+8@Y5z%W6Z^rO=#KtU!m2f61*I0ZBfe5$gsxTUgmPG)los#V&d(Fi~ znWo7k!my0BEMXVs{q@&9G_*1>`b=PqtIQ{ZIdkWB!X8+4y?gD%!rqAlRUm;=#PT4J zH%YJ4wjWieVc{(rU;Ln5n`Ed~=P*5G3g?Gq)07vSA&pyR5Oh@E5ZQk$lX$Ea} zytf%<_Tt5hS{j=pUniS)imRy{Tz533OPJ$k{Ceff0!FoA5Fm-HJtg_vc_G!iW8+)x zRZpHgi7SR)kj)b~P)xJ++_Ps-b6Z;|@CRK)9yaj9xwb|aE!!tQ5*GOkQCSQo^GK@g z-@pGdbIgTCT)NV(U0Ess)$YhMCvGoYJRM$8KWpwwvv6P zQ5g^~sm7nZ z5!eH0UQS0Rti}vPV3u(}DDK|gT?P}lAZ)TC?!1A6>Z#^$kRJPd1na6iQD529&QQSH ze)8i!$r?=f$`v~n7mQhY9mtx)kHxv~z-lI6E#2>0WuUZY&!vp1VKwcVo17xK-I zs#<5yuBbM8{C;k-S-fg=DiZx6)Nazm9z=|djdeho#3IcBbJP>9`ZOfH*UvfK*wi!x zhKgWQzn`Xe!2@u3a*8b>a~=ksxlybYf~-zJPg^;*rJTw6Ngb#aU*$ly{*yDeg8*6O zoqBYB$3D=}%7dkU<`g|IR}wC{0DkoVTk+>&1$U>xNKXD)v&*k476Uo1z+BxBFh<}U zfGkzO+&8~TM`9a~>u>E>-M{||g5!*BuPi%Cq;R__5QTo%Zv&G8-c0WiaqT;Pei{+c zh`v?REDgO*%hdC1)-8`vbh1X3uRL09r=&l)gYAix8l`}X!m)uaxS3g6CS^?a9{|+7 z3(!nh4A>klGt)dfJ#Zl0$Lo(sV?Q!4SfH?L7n76*lUvSvldyn*U32El$t^0fQu(D0 z0S&GO-tRVMnYI1Ci9m<|qx=8FQUg>&f`i@k4jeoWh6Z;*>dR}cy=RU z4v|XH3(i?Pa~V}9M>UbPNCI>=&sc`B?_B>-sEz?7NY?dY1zv%y>-@qKYU8A3$y3Rn>@JM#C~3l~JKtS{T7 zTJ}WTx^>I+l|PJ())J2DREtP>N3BGqzO5qYmAv*UyP{|idw)eE%>dvQzhpkeJ&fonnXYLx8%dKtMn6t5N{G8?uPI|SEAH5nd@2M`AGtt39b8pyyZ{C&Cv~4@E=1NU{v|`8{Iy7`% zT{^e0kZ+*cgI8_AIdk)bdpmR+Kd$fXe-Q^li>RGq4^oeO*~ik zzI_z5PhVR8x8MFab?TIxhezwjw-Ln;4zat8dy49;5fD(q#$;Gj{ueD`nQI-AHZ8e^ z$rR1Qs-f!f8UNuZz(`Ex%d1y8F}eS8b^jpbJLtRORu|=)8wOf zEg;~R>{qXN#Kr52{X9KGo14#&&*PiF&=BWVNJxmIo8fud?!ug?!;Uy50e+7}yNP)2 ze|D+2^!3GJB+#zgw{NSanuo_fdH7I{4jzC>=2d4IPS|4hzwP;m=G_k1pKSMDDW3D` z?(W_;pVbb;RU6<*JPAtDEo^oht>2!MTBRxVO3}&RUo)}L-`!p7isYU>+&f%0FBDBs zdvX@=PIGbw(%FmipQ?gmafJcUF9l^+YOv=eRc_L+nk>V(gQ+66L0)sxJEz< zF*vD8deO+9+^s2sA=@~j(S@KRH~lO&6Yl~|jf(@Irft}`F+9JEU&X}aAw>gIsB4!y zfX60Z37^TjQZx+aVf+^nT6xFKZ+k2a>(i@>D^e}=ud>VvQ$mHd5_KV#J_&#!m4KI; z`}+Ex=hLeLhJ*gip0~8LEN=`52w*N~(mQTs6qA<$yJ)3i5T;$^*I60skJA_oym%uw zx6G$cpP2vipIbR+Oossj1^U@^ZcOgy9vl5`_UyV9%JNeFp7)~81U|UBM6l5%S3NSK z(l0YJ69?KrK+Vfcxy8mWz2d+Up$Hkvv?tZApD6}<+u@W!r{We8O34?ed=h*;m9=y9 zk#DNDCSvE>pHG2M&}%_&e9P#y~(>c%LRFQfIkdel&_!>_2h2-_6$6@4DDgbm$p6!u*v|9P|8qEIgy9MXa0O5A`?y*3N1Dz-ny61nk zSPeqf{lPG?t^L(8>S?*0X|`jgNDNccEy1NI&-wLZuMH-H4gy7Vv#vY!i15Hta=Nhl zH9gjOHX0K{U7;AH>?zcXVYu>V&pt2$bA?~0cvngIccMH>QN=v5=_sDI;Y6kR`lq2n z9Dqpu{El_JyoXSI;u#o6d=d!V>!5Yg@XsLqwb7ff1jP59gczG{V*DX z4avs-s9OfPxw&m?yMFa*fBQ_FsF#dw$lloAu4BLR`*&A zp~6r9c1F38IW5xG)>hW>i;9YhFXWZ(=;#PxExv$FP{${rP@$@|QAHv2zDw-=CcQY< zY4PRDm$Tgqz2DTC>RJ@LKDH+Q#{7i~HR&O_=?$XBFW!+N}at;31?cTjkQ=dAcN-#inXcuxw z?c0}ERCJg!KML8qyb4-PuT7RI>FVm z?8dQp_@j*Yl@B|vMz$q4*OCQWJ0J)g;Bv)h!kV0!SHW2^6i>H#9n?k#+S{$bjb2Az z2qcIKz=Sf}#tI$FG<6tjVcXG%B_$<=ss{UAVa?WE%Wk~fO~;RkKO2b~%T_$*^G9<= zeDd=T0v=d3#@o85Ft(0iw`lD)mX;sZr(x5=uwDue1ct!u9a zf`fyXHD0-LMLzpJS3`OQwDxqbY~Q{;5s8mW<(IpU$3SE?;Ogd)?YBe_5r?7@n;_E~ zp^p?cTBI+%v{ILGhv}~wfpk{!k|j%8$-Z3MBPn?x(R$oYp8mh%kN>vCBVejRs?oV1 z;?nvWAsp4dDy-Lb0K#y3tCwh#P!XXRDaw7n1U4NJ`toXzO)8CV?rq$-k&U}nz7(F5 zz0t>qgI6=7Hb@9`oMV@lm-B+la|9bMum=s3SUFb-7X%>|)?uNWHM$T>%Yj?XV#sEF zopISrOnMnW?UYbRRfa3#5q0+W_eWctEDcZq{uTMG+cxjawj6_)$s@b5!Ab$}gl1)B zaldBxWjQqjOu6q(mQ|0|EpptMVpEh;R5Y@C9euW(mE1qMp>omp@85U3b?bzVw}_Qd z__JLtV~EMzyuI~x09WT7A7)v&aOVp=igOn@)9PXX1*g<$3M&@3<5dC)c92e7IkkEa z@J{!edx=s|2@1b2NvII>{P}e&G7&Ic%@wSziR zf(pqR2Zw3B!m>1AGefYz?EuQ(pc{Pg(xvOM>nq;EKTtjd6wQO0H01q*7|Y$6f!mii zvPCXEah!cW0wCLA(WxK?;y}o!^OFa?yu9uSp1Y6u z?aA%4(S&cn6%;RTYia2K?okORVp{Qh9gxci24HqSD98ep7B``nOJj$?K9a_mEU&ou z__fkiRaFH34Wf|2vt>(h(->C={N2@?H)D#A&D?Mq$JVV|oh}rWBlcd@QA1wnmv0q5IY=WE;JwMR>r6kPKvK`zjJ2|LWy_p zPF!|!ia>IY;5z!(HEVbhJd*utB1WyQU%#$!>eT(8e*Sr_G@t_77O-39gY8Es%*RP1 znh4bV<~XDHFeC5z|(Cv0VUAH85#d80S| z{mmaoy`4J6zgYBCD*|aTPeO%MnNkbP{#I_?ZAIx36$mK&ss<0Nep_W{MYQ*`XU}}H zrMWsFNL&g-gD7D|3dd0eg=WL6hD|3uxe~D zqn#hyM_Wxs<|-~Z1XTr9Bs^C7ev^~7(Z?R`0RWeRg{53fX~P!^c6}4@Ez6$jaM-F0 z*^rk%ELhN-GIJUP_jT8%9EGJUM;4-$T!Z{S5Vk)O27a+Og^rG9i!)h12i$)z`*9Lb z<>CJX&V>Sw23O9hN5rHDH`#kZ_V41KMo$CFHBLp{sw~aQ7)1xo^@sh+dh62YJnMj* z1V7x6fy&Fri!YPfG80~c6p^(Jr1oNJaTS9fkhIY@P%>AX^SMoi?A_5Io1!mHqo#~07q1R ztiPo&1CVFQ=FCYL`^0QOyfK7{0c9&pqC5z#hc|=~0ALk(xVU~p+#@3`{pRBAc|KEM z)dj&z>7y?$WB(JSyE}AUfzRWH!DNX-t1xGFj@6|yT zOV@&}hII26=@p{qAMWe=9MH}@o3{H22|Fk(sjgfuVi^S(P`Rfozy<`d!^BAoA4ZSE zhIQ*&qnhrxx|(C2Ja>*3z-b#W+IYl@&u=}z7`0OaMA z%P`F8)Z}CYe?#>;pw%FrJ5lAm9s8_s7zjise?XyA%&?cR_6dZZMR27-~HCDch9ePJ^+fCHpIzq9DcQL=_f!w zAsL~qhOpb@00>kt%0wvC4suH+%7}D<1hZ@8E}}>KPXFJ^0D@LrTwFRRWjgKot@p#g zKq-oaF{q*!MoY|HxL`rq=aS+1gqL6lMd0z42h|oLKnMVsf9TDdH%2bt6#hXS*UYgh zY6(Vlswk2?!tC~TrD@d1_s}i;`s=T5o}Su|Q2{D-0$`7EV4UaV9gXrqjt5-@K{@ZlmC&v2tK$41S*0o=(J)gxTe2t z|DW^*tutpHcefGvu{m81!LTAgQ&D@-wG^vr+6}qW#^lvVKc=Fh!Z!hywKP@eL#7`; z5zqz3M-YUY+hSgoxZl93e1ocx8wx(Sc7Egf<6;+v6;K$&-86A1_X`ETyQPn0g|VDy z(9i=#0Oxz-K=JSJu0uUF%78vYtCNj21BYyfR99^~a|N`hJuqWDLP9DOcLNBVa(7KY2A0qzAb<{U5)xfbt<7XJ? z&98y{iy6~}I$9n1XGj}>00YZGA~sZ-Hf-O16p7UP_wV(p;zCl*JLDrq_v2aM4|lo|7zHcs z29HoJ5~+tTD~DhJ`cLxo5-GsxM1~4fmHSC6Ks(V+f1DqP$X?j-AUIcir)wFi1vsY# zJ)giZcX~|zOf^#!V*A0wi(z(&?H>5~=bw*uI0OM@C+rmPE+tkJ5TVABi|^2QVxw0Q z{3jr9ZV8D@ndmjveTf>b@QIj%9F3S*mBJ`kFNE4uBE!J}db}S8ORqX198R(2qyEd9 zzCXX-|GN$?UbH9x45P>8I}u4Na=v^yMQlmpVMHfj46|vABC-H8M4#;*qadtdCCE{D zUlO>XiU~DcINAUC=bv$^JodHyaN7JAh7Gijd%aTd;y`meyJQUjx9Aryev|Y@K7$3s z?Bw;}IPQfla}CDE(uIS>sk)mi&j6oB|3eO9mwO(ssm?wp^?7C-!me#{8n-1c3v4Xr zo}+R|Z;^rg@n99+H3$3S@7ntEeSITLS+6MQMdyq{*9v6gC~+O}=8P8?4xJ!cn=RzeQhc%NDsGe)vJ-+j-bbg^CCR z;()-f^&OXOfWdDsZxls-bOhm5ATI#IX9GeUtYVPL0bb(9r4B|YIo-Pbm_^9oi&5q(;Gi^#{N2Grn1|;wX?U{Z&b_p(We2cM-?1#x|$x6 ztUF)+F@9k7Y2MUF@&0pP#PR-DMiXL(sLbgxB2Q#&1BbSKvyMzYO%%)8p~&sZh1w3f zzz%}g5-G&Bem#354sax%9GhPsacB_atd++88VBcCcf({83v3;LA%CAoIcG}HdTmib znr5ZQfn(t*Wn2j^g_#U@nEq7m!BC{h8WF5og**Pxet2wLJn@*2R7T+otBiX4htn1QDn`+(yWT0?7k9-=`jh(bO<>a@=*o}3 z2}o8(My3Q-#m5jig;m4mxU2hk@(qm4)4=B$)_$SNwO=U1d!Z z0^rE~uqABD2Z$Y;eZ78Z!F*Z0dj8%0$tG|Ao`7w_cYZL`*3Mrwzb%^HD0B)X5sEaU zltwI%M8U9Rk)DkJbkT1f1#w{OP9W*gajFIDUy6lgaC0|LLduqoe25alxseWY-lkihgo-8-V|1 z32k_-x_$n*fw*2}H;Yv2&iaPR@V{^P!zJE*J&V~X=_y_*Ao$kEF$UL?Dw}Fls8@R$xxeD;b>R#l&HKze> zZa^uKZOIZD*P9$d_p#UD(PHxxDX!)eGJO#}29Dzp01>L3E#{!5a9nZek|Yz8!p{!Z z7PIM;1RmcP()#BQ{2i_yOI%)Z6j|vMtWL|*@9Fd^k3Kn6mu0l#ti1oaB^F`EN|FDG z|Mk0r%nxPOGjoi0-Bq8i(ugY9r{1oo|>ga?I7BzDNl-M;(o zI|`(b-ca!hC_>1~B(Q82iVfac7{_oke?35jM8r+|z^}0nAAb073%_p3Zvx(j<}F;Q zprPS6po-cHnreb!|DqJ$_;$YQOfz6vu|p2W{L=5gga0E92zEaJS0`~IVj=zFN57R` z(Hk}VI0RB>?t4Ltj7H-2!Pi@hxe!LEmXu#CfVMQ|0*lGfbLi)(TTbJy^pJLh^hbsf z#iu5@sCdcJrESCKvGUn3Uvl&FmsceLRP&tUOaToT?ECOQw%w)bE3|MpH5IwXTMt>9Vn&Fey@CWZq)RodXxiSWNpD4`$tMc*M_f zd31UoZd)B?=JFZh(qE4wAax3)0XRvNfK-v}1t2#QRoN6;m_T(Uq@|1^E3A$y7jPx; zZ8Q+BDPZ5Rn4#n@ix%5kT!2>hQ%QfI8Zqm!h)faSS5b(k!r*aG*9ig~%u)#miwq|~ z`VQ<|kAkyzxTvl|*}l|L_2!TF_}_qrMuZhcp&TWdcXvcR5KACA*XH2lj0W_gtP=x< zdX9>DD7+}Ct-3cK`*C8V&nm-`Ya>WqcmoC%%nqXY4dUV%3Yn+_v0r83H>_3w z_z+8_FU0$j#Dc*~+L2pSx=4I#pA=%m0yF7y*mnc5Ke&cC`TXRr_^#MfO?~Nr?lOZC zMZ-v#;-$&v#u|5Wf*pp3Dz(+F;W|BpJ3uEYMi15ofxkbJo|(lid;1JOyI~bcH8c?I zup2iYo0wp)MygDp6(~H`sD-5^oPNbB9m@P8!MLFMgaqnLC6IjjPT)v-rsh-_azRN*%I`;|ZqF?$JnRE+rL zSj5|6uV>GnV`uGv^VyxC+~YK3GLqK-V%bO^KC(wMFh0!YsXZ?OJQm!?LnBiL|CI3I;Ng+_bn3MHyJi z)vV0S{pW9DP&K%q5r@Hk3S9IC4Z>pFD{-SdE&~iZp|Fr)l$;k=aomf4W04prC(-dh z)?TCVb)uWO_sx_I0$kFZK?TeSrUA>o+$oa@U%*>}HB5T{ym_+%Jyd}}8=igbgXyKV z?@*%N`T`}DqdGe6Dz5+pwj<(Vkm#q zSJ`S000vY;dES!?J6IpYP7SrRV2~grYXc?cCF+*GY_g_o9l|pET|op41*-9oyCl-h zHTz;NPnM#Gqa%~#@wh9uxwN0gltwAt3}xV<)w$hWI5E4*RX$ifVr@y~Yc{jnCvkb+ z=f3guR0&o+Et~80l zQHVmwBO=o(jZAR8>^M0oD&hI)%h~zO$NjCLWl%7dbPr z0kw<6XNAl+w3W5_ueO&z<0x$yl;2tP`aX}UTKTWfy1RD#SDJ!%>iNKBzCIf-)P$6M zBf{#Qk3<6p49^G#OAx|Ye0aATelR%Kj2BSYp>bkXW2t(o;`brX1lG$hx>6abTclWW z=ZWMR0_Yg=3~I?lwf1j`w;9p{m9TjE#Ms!zty|v&34M0p!c9a|zRD957RSM}=9gcx z7A<9SH#0fCTy|Q`S1FF^HkV4Y63T!2*yfDh039lrNZ6;7eT(*^ims zycJ1JmY$U{28N2ITl(rA7i_lF$?5Hv7!^FK5MZvk+2KZ~%%jzRKYRX~g=yFQFZ*q{ zCA(NKVSV0%kTgS^A(uTYMkn96&6=e#<@0y!{^eZ(4@At{Z>=_h{d`4ku~5aNebA>I z*?L{7g$>A+gE;%jI<}UUmTcP1oT%?AB8TOI=cf|Ko$}7KpzBL{RnJ_Rw_sTSVX3X? zb#MaC@ryM0ie)fWk=##Y<})*Id$IP~ycB?KS>RUW>mxoTj;dC&zi_50eDXM9u^=4y zg6K=+S)%EotZw7O06?=YPAX+A`l-=|D&P%PfQj7@w%MCOB-sOt^`~{Cf=bS!%1IiAH_jVfl5p*cTag({| z$n9raGdz&VA@QnNu8CH{&iW^5Mg2K>d8Kh$c?%4|(q38cNVjwyLF~(2I7O5mJ$j_* zs;Rg^g`*wymX$?EOb_P#x;J#FyXq)R>SI5&MX<3=^>O6Enr{TghCtdinC_Om9=h$z zVl^fFm-sMl!|kQ9F6h~_)Lg0u8>R$2E5~1{mW5|z=Pgg^bNSs;U0~8S4;80vWVi-oa*nMC|QRjodFARUuDuC&{&|NFUfJw&t>Ri4;b z01$;Y6SEnV0|ZhAsw^zs;aRh0VPkK<{sj41#M<@7=yLanKYs{e&kp;^^Zu>zz`7*2 z`r^fYWApX- zFBhz~8&f6-qa!}@QrRzWySv=}UW=?O1J`@b-entJ1fP(&W6I1~ZC@Mn^BgS~A(c&s z##46XthsbXz$@Kz)8B9T{L?O$#T*<4SBf_M+Js@?w1jChA*j!o3m}ZVMH0QG3%dYN_Mi7-q)$M!RHFSjBYp1NX_}md z<4{H_B33Brid=QUN8sX)fmL}WeP&aR|BJ-wH@~Dk57fF~VAGm8r}|Dyqoka)1802Kgn~Dpae?xT zEBJbevr>3PMQa!N3!Zz{Bm^s)=Nr$B=DmF|0cI}|s8yf?brZnK_?rc}xe@)gg_&zR$M1F!Ftxx=N* z{B;-zo4@W3pOLO&(cUHMA`-Fr_a!BIHh$(#1>|1-52+|J1LY0;=pFBVn zEhdwF#+DS2gLN~yC8)DBMqLCM1quhr^H~k+QWddKg@h$Tn<7GPC9)1`)v^FXqYLi6 z1Wmz%4bz`0t*1z>gKRGT@H2&?(l2+dWwG$!SjDfNmG}O@COA|ud3b8&v1v{+Q~HfUEfhAJ>mM$&-=!Q2NXsii@$pJlr^Y=peqHYG+2F>`jhatbmT9@4D%lTdp@(Nf=!Lhvfb6TMF6E*WGKR+73!%eIFmA^jEa#u&;2r+~CVQ zZAXMHrYEVM?>vf<7jH}} z9!ji98_Ow@H_Mc>*SH>%e@Qaj%W|{!SLHtUQi%%g9E+!cLDsB&>%RIQpO-~$_MCb1 zB4Hn@)2-vVHcCinqG{?4gnwEpMp2Dj);Kykx@}@E2uO{{Lz4&01+P2Imse;WuT6JF zdUT-W7(3#gB$?I@csD9Th}r`Zp;NSd7!^O@5K4h5sAn{Oyql1aAa4jVLU{3bZ+k;# zX5cTC01XoyrtNVOJwyuMn`{sgsw%c5FkM9hd;-0?)I{t+F6p?M9*n1v2L~zA0R5go zd)l})^p-O*jVe5<3tYG}_0zl6WtuCha*FbnTfbB)v{t<=(AV-+T&KADv5$++z}Nyp zVZZ&e%heo5fV~l-C)}Gi_eC`kT8u6|ASSk>AGS?6J3F&CZl7Q2a|l(c)fz}2)tVsS zzzJWI-(w+{;VNa0ie%W8EB{%gb#w~lNOO!77e0;s2*>1WQ49IsRg&j|Ve%EMPX>ei zCdsj6iBiY_<57re4_V-G2Ap!WTHJNwW>>x)mFuHq^W|iqaJygF zV)RDN3$d+C{PX|oU>v}3CsV3r7d{gO&64W9)We9XP722zU*BjDLpMlBG*qFgd4UqJ zdh%^cc^Y|H25%Gm>_KjoSrs z$hHLPw1GK3g)$B5{kyeAwQ?6D?Q<77i=b9D$-t|7_^3nVAPUFe`P<7JJZN4e!OyP< zaog0u9w0TF2DCeX(UhUmZ2?(}FtoHAiQR;c(?tqdD9Fbri}=In2E)24tePQJvJHH4 z`qi_?{;aOKa^+5|$UMXOhdVE>9UC7AIK=AQ_3M7^@ytU~_ePYf<~R%QjhFg+mF4&w z>vPVHW$tsx-E5Y>FzssM)Z>C6i%-UPx{6HJhAV#WfAQ}a^2;{cFMr}h0GjK!O5&hT zJph|SwQ*G^Wuzr*PJt&@g^pWEU(r-L?X_8t2JE9DUu$4cv8}Z+We)1AU zbzw+&*xkKt^&&f)YUA4yAU!>eQSBL#QBj+;)llgMnJ^4=H#AY!RMn&1joNkd8Hg)H z=fFRdMl7F6l{O?huUdp$shmLXM>j7gb*cu@AJ;{ zntA3i&-eN7*UO8v+{@>4UFUV4$9Wvb`LB!3zw>9gB^`B7>}S5f-*TACsj|06+wYy& z+s_mHZHc4iO%SWO^ZbIh%$XggWBHd~tN+w%wl(edb7;3;BC#8Bm)mlJ2(E*c4;m^8 zOBJntthENHwOnWcpkr-GCUOzIt$|H(z+U8E)E;wR^}GEJV;rf@QX3khYt0g+Mwc$f zkzVlIFsSAVnzHRP*kH6MU2ttUTm4PoM9drq{;uk8rg^Vy_BG6G=~>dKP-?)61k5X^ zl|VmYt7t=M2MKlTGUV{G4ISn=mAe6~WXn1OkDR1zOPhhrn&TR+mvBe)q40$+Cq+-^ zKj8&aRm)Yo{NC%noe&M5c@r|%bd{g3o<+crO;#OpOJ}}+eCfv}k^Sa|l$;)M-f#Z# zkaI_hhYxm~bMldW_^sFOg>{+ze?Q-3=bzW{_2tBA&0g02aPwgF(IUO%E>&gSH*9X! zr*3a)`q1gUq9e|wnO3wcy?fviH}B+vF_X%=+fMaZxP{L)f7)Lw=8vF=@f9_XDi_%6 zKd7mi53;rWMA(J6KOg^JpOyV6h;-}Lt%d4kY4IX4vE5aUi0RXJrwpJqxNqR%&2T^; zXRA|wofp*(zS8dg^9fZi(bAT~*<9Yft1_4}2ANsSukk2l7*%B|n%gV3zd<@OKGMuc z4)gK1UFWlb^E|31O@hQtVR|1|EI_UR=K4i2FY=Jf&|`xc{w zt$o^?Zpc=>UW>je6e`7TltV^ZtyPC%=K}YR?Rvh%kl(k$gLTDznHA65*#uUfQ?6XhbB}}?U(s&iRl5h*SC}0%< zE^oOC!hsa|H$udS6hngPIjFkMFmJ+0{~U@po`Ua@a^Fd>kS0MGU1q~;f7Ega4qkY< zMIBi24fyitD-s6Q&PAku4%!_%z$4pD>U@lBN`T3-r2j?=esSeoCH+jKo+9gHw#Ba( zd{hvm*-KT~-nS~R=Fq@M?~41ymnMcy-jnD&KQ8g|tEW4!{yzFIZl7fv*S&b2p?1Kg zxcKb9F2|q$27EhV0v%T6mBIxJ7pg!R*rR}26HHB~!TPwiBubz*`j{bL6a)8PZ7cG64kyLsAN?OLmT_}!S7@4vo8t-GO>=zzJ3FSsV9cp_56 z!FDwhk@gYO4znVjbRS@k+@GG}%hHVTN?%%vFx6~0=in8Xfr=9kxNS!XFQFi=Bxm87 zg{pV{0R-I=Tzn_`Y2^aHoO7AO!sj1%e3$VqGfC0+w&n5sR<}-9oSi%NRg}+w{lNj= z*UlZ^WtrphblRdXH*Ea#>SeZ2GoComB&_6MkFiZAyjTA`@Oyi$zX9~}(*-Rt!ayhX zTHkvhuqc$i%Q?#mx@4jF1K2#8T#UbmC0j8IO{Dh0`r;{Gb(7ntE1k^{qkC~)8>o*H10LJzH;@Hb#rsG!>CGaFRk5AsjqeZ>ElBEZ}z*`yN91BFF5wI z!m!n!1=yeOS3Wb!LN8^OB5X-+h54E_=O}vGcI$S6a#}e2NTKLvC{jniYrj)JJ0@hI z=X~owlYqdZ?)7gd-ZnU|&FrA%-p1F@9X_A^%EAS<6{7ab~mU zJ1^8t+Wgs|P}@e$$NuHQ|NBcatKeE?nhVdDqL+GPU2I%84i=!+GCT~B zF;(R3aeFccf_GZ{`-A=YKc{Vljavln#xyFjhjZI|fO348B0=kgVhP z^!W2v{`oVnGNa#m1hzkUw*q#85(cW>QlgD8f_RK$kd-frs7sXeaLS%73H#?q`pb91 z_GIOJrf|w9sJsC7>wHHdmr#2Mx!aw?G;c_|4sD7hDTz{pBvX#)eIhpa@teOq%s+o> z^NFO01p%b#p0Dq3+IWVuUmhcs>#zvU80xE1T6|`lzEO4b1Zy7doD#RY-#KPi!R2=Q7bwnjOsyg2h ziwvk((`Rh{@6Qu5n&NjtG(7FsU$#*che0AYx2NN2_=l%)D+ahksb#?8le_i{3ec<}f{d*Go)t*tw zc<%hvYvoO-GcA@A*zXz6QFY!5)SGPbn*AL{fg8`V!5w_;Y2Q)W`Y#uH&a`Raeg1QH z^1nSxrWNiQ1ApfM#6rgBR0Aj7<$7I2FB}v>X_8=4{~kV<_h58Y`x^bfZr=Ue7REgE z__xRTkN<@12V&OUdG+B|_!yi)@OBmcIxvh62^`zGp6DF-To|WDZ$MqeI>dMm%gJEh z;YIJ2mIvgz`@4VDP~)oe83#q`SA7Egw*3{iK6Tj)q^zBH=ZHiBpa>itqKz%#uBQW(2=Zw z!iTO2bG78d!=c|T{dv&9fnge|OBtQ>dfvN)$qho<^eM_(+tD!3){U}Z=qm@KtnVsS z7wd1YMUlF=QPURb=pG(z#j5BfMnE>OFNKHHYEUDy4qZpBwW=ykn?cqS|g67rtq znt$n)KD>s%|6`Fz$)JTi0Ax;p6C`0I2hyM7>yy|?2r-2?#us&*oA&lvX^JGBgkcxe zK0SB>=ZHvTHR9h;ukMq~u+<`8&U=gHqeqWEvf|HE z@9(ci{%=&V0I4m`9isUjIM5sl3;UVwjxQ`to?E1M^N)QN*nXv67Z!>6=+QrL&}x|Q zbNGu)3SGHM6*StRj}P2fC1Q0hZ@0ANHIF9D`sd+Cs#g;DB|&?1SKDiL(tRhy93z^m9dKK9;^O^u)Y>RNwfA3tbg|wH;|WN(*+n3; z&JTdseTq@;4f~yUzbn~9Dc)3&K3oSc!G&(uJXI|D*p;aNT083)z`z_`u@eD=p9Z=_ zz(%fOr`f)tqTP=D`@^MaFX(2Ukk}ji)-||#w+B<8g z-EX%w4jflFucg<%Z6EOCdh|5YndulIrLG4Xnq@pQ+>o(i#fsD1DtXx;MB)>{+JR~l zIql+b-G2S1&>UWQ8DhzCWt8RAA(E34HCV=?jS!D2$Oe%avQ)hEePktd=+sGzh!*$T zY%8ndupV{v_3u;+hfAz>^4NwS|M~jKcg$9c$P4g8IM9*P8nh5@0~L>cA()j}N7b8i zN*z=S3Q^P?-xHPmQI3_R+mv-6yHiU)QdxfFDP23ri4aKa-sp&Cl%jT_HkDqA!KV5i>p7Q#>AsKc^tXpM#SL8#Y z`=Eok{0tpNl(SZt9&bYZ=U*X|7O`-P_W0i(yKlJUi3xytTD^LWWz<%9RE5Yx$;Bg( zHR%ih zLMMX59fe`K=s}r&;okY=>RGSFzMiJ0G}_bUhfiT>m-;X|Q#+bXf=pP`{i;L$?QW=4~))VBR- z43XDpwhtUi&*MO2ugx?t4~ik)ix&t@wxv z?xQ~%(u95Z(eM2Ii$;AZ@&5kBkN@AM!T;w=I{hOqb#Tq2g^v$_hta?cQgz5|Sr{z= zV@(3-N|@ZrsB3|fp3w0{q61({a);#w$EsNKR&gdk+5Lx2QAcaJ z6-L)DIDVwIHd2eiQRI5S$T=cFPAP}OFZ&-stS-6xnqk8g(eubmclVorLPVzmdP#N!S1m&-Tf(nP8ImTHCggy_F7n) z2%F)OFhx9!v}5RRT7-P3UaLiX$c=m1b-hy1PgDXkq@M!GB(%KuP7_qRbkF@!S?a`X zBPYaq2JvY3WmrJz)_(KNH!AZL@H*;li~IFdg^&roH<6vW$n>NkH^l7@yU7I2zlW+w zfm(q6qg|gZh^Y}yC^*Z4BQ38hoE+R}ri)?SS%a;`b|>PVPY@f9W@>6K;iK5e7QQs% zMbbcLuTQnp=|R#*TZ4-csr!tRjT>az>>BCyZemf_Mc3ywOIjutXP}XXM3n~aU-T+s z^N+@PcRDZc-Xdf~SgogYF;pT8xSy{6b4W#*6nY4NH+JMka{i z+g0yP9sHqF=XRj3tyzp~)rh9nvvF}1Hy~cb!RSb|8aO8(1wp|?!tL~A7@ESZrDlxH zN(2QICs~$j#SK+ztFTZ(FED|BS^N6_CnAp%n##R*1OLvgG3!K40&iE_MRN~$F}s&q z>%93VPOP#Lnl>|FmJOTM#B2*|>1xqvBPdDYgh{U@i)NPJC-3^e)}~W)5O!nS4sC_& zCGD2hvbUwr(EiPSvwy)g*57Ax+ZT%6LLME3FVO@%X> zI97O{ZPhfhZ&sb*EKiKmE#SOAJ|mE2?g_~5Qtmx+C-Mja%^HR#q~U@odTaNy0DU;egY11IkvWW8ClMU>`to8V?o}Gg(;D!CKLa!~}-2WHQcanIxnB z6|(CEH5Yz+;Y+a@B4LZ3u;LT;0=@!r?_z2X=8ZToiNGWTD(WxmIjjRwZHx3Rb$xQn z?uN&OWK8M5`V~IEdkx<_Y?I5Rc?T$9z0^APPs-6!gcKp92tTfQd|36kd!IB)?P~eP z&}BAhy$f#gIz4)f29DWii?F^1iN{NZ`suYgdIlJij*`@mTzl8JJa#QvOVO(LqO6H) zs>lzeYLaeXgxDDST>fVK`184bvNQa3w@&X&gAl=N4t_CDTRVDO`Hl<0xaebxV(uQW zr!vUEcHlU!O4fHp&PeOzVr{W_I#7PvAjENq7y{2-9KJfyDv+%qP6lAmx+YZ0H$@qX z&zKJ0bvHxvM~dtQ<*Ki?1XjwdVJZw~E}E%aUk|NZ*TB?Zsiw~Y|Fx(Itz zXd!N)x|YAn0^eugc1olaLZQ5Eh|Cl3fNO3A9lcgwl+Xf4=3e1)V73eh``?MUZH2qR zrOpk+^chAAjt@J%icQKTTGcS zCJbiEb`jo$5^BCM+^Nx13ZpTuI#df4DuK(A7C3iIEr&zete5SF zeBepwlj=|`EXpBG>BOX)nQ$$#aQ5mqX;1&(Zl3{f0w*|TI`x^`=(*D!kju*8MGl3O8@|6TamrP$qTWAi?kD<~G? zB5_04+sg#$jiBk6L_S@SBbg7v@<9nqucAiNp9T~n=ZY8vEmt{zoZ~yGv+t>}NUyR5 zhVEcQxXREO&=4p2bo#9vH5%x9N|*vNb;**Wtp@!q7dM)_5ZuV={utxCC|!H)ylU~} zENq1-ATG|AX&~T*xGehNr-4s8!ZW@5NrMARPR?}36S>nJEGMhOCN@{ZJpsBEWOUFr zT+v0R@uVZ~iy@8bcf^PGZD$RI=9a%GMxRd|s<7D)@C)F!OTSB^wwrBzDi+42r&a1| zVOMe*E5<;Jpg%+f6?U_s*%sG@ql)@IKIV098Ed%)zZX0)2Z}72VFzB!#5qI=e?2%7 z5zJ_GBdL1$3X+RggHn0?wx0v<*YAD+7af#9?F-L6e8rWK`i^}VC3gEnir|P$qP2g2#UOVaypM;E=9;DdBw*RG9 z;WZ6|qZZGwaRI8V+`mW>*o+}2{WiLi_&Ru-66`qW4?N=no@R4sUsp5AtwJVRLVrkw zcth^r)Tx2jj+j?+-W1sPs$3>EYQug}7`6L;RFlj}L!$f7#{0L&q$1-TeWll74Y`_3{!KG%!ZtGQw8ET9Gm#(;{V})eg*n5 zX9`q_^8&I@%(Y!wjwW}`!b!b%O2^3fE*z}c{B+3v3EfiR`x&V~|4s_c-}bxre6qg5 z(=*EI(`OGh2|TlKRFkARLcyR$GM;nwtIi6uZd2G`por2+==4dGCZW=2iLg8sdGWqx zw4?sCtF}+_n(v%Y8Javtg|&N?jZ>AH9W?dX4`w4UyfiVVV4G7{@7;96Chf2E{G>LZ z3&VbY(rtGS6QYN->jvq9DcjiC>?&%iIEGQGpUsw^e||c;x5CT~V+2Sut1gp7Swb<) z?I!4+vDJ!EpD3&)IH=hA5*)ZOX8Y>12Gw1h`l7Xw{gt)NykdO&1m?nyBM@dI>JITX zsM|LBs^?@TGcAsuojWgWPIU-$S`oEcdp!K3wRm`pRAKmDVc*Cb^BQR8OjHP55_PwN zcEO!^_ZZ>96U!p*ttE_P&^tRqW^?e+UqOVxaU#6wv2Dwis-+(AN0SesZ!x2U_V@jBVq}}%EiIsAkkcB@QkZ<=2z=7Z~Hz@ zq{3`g)S%$ruaNVpYxD|1gN5L_bi+VC7C--hAjPZ zXh)VidVA?Vp*j*7F{>zC#FI6S{}mQ(wjf!P44Li9mJzm9`)KtvufG0lN?@D-%%Ml$3)orLR^j-DY~MvF({|Cbci~?eI$VY(&^lsfo9Ix* z#{`pp=`Zcq^-@pHsO)I*a%9-4SHn3mhrM0f0gUfiryr#*!%yJ*2&C{nk5nNlP+JA5 z74)g!=~a7p*?P;z&vaIfK(AAOt%{mSp8u^)sb^2n6c$vFJ?yY< zX@z0w=|H^8U=C%F0$|XIlChlx3gRB5?ru0bLm&#JWS zK5Ff~&;9OAIUua!D#vZX)AE1N!&d4_6~hzAMvjr`cD#cR3+!plyiWtq{p)^GGQ4D_ zNT)l@N%O$DEWa=oRq_2(@ff*tSF>POh#yKWsU#`eq7BcRm{t380jUa{+$z9xrAUW* zeuyV2C#{ho5UKiKgMs8J{r&BJrc#_48WpWs@TtGM_H!VS+ApSHY=wgL-kW5b$UlnR zd*x`mt*h@fII46ZUY-ASudXi<9cXvY7duFrEkVT~-I*$7OokNI5-i$whH{?`g(5_} zwdh19OvDDRz~ALjkC#iLX7OTfGLuC_hiF8TmqbYu6FQ&FZ=pDiL<3{}+qbJ*#&Rkc zW$DL%G9td!oCFWinI0y5YvH4EK;OmO&h9vz>~HH;Z6RK0a@8NOn|&%beA2Y3`h&Q* zxCs=5$7Er_7>Nt2pi;3A+ZOxln0!rRJXO@ZwQm;Fshn|FUs1N3tXZ%!3B))E!Ca#ebyhnCa%-}SnXu;&?slPv!Q+GX{Pxnj+AH^vatGdouIN?b>q3~&Y!(E)1PHj}>p3QFX4Kc41Q5_B- zl4)vk&0yigs5I3lgP7G~L=28go z|J`?A)(({j;56;3aUX~*1_6g3{`Os$1q}kjIPFJ1`@M}=%trO4v*Kg${O9piT%&JA zs|;aK{I@vvaF#TqT`W72GJ<2tKqBApN0cbySi!VDW9;cqg?0PFYxeJp28Oa}V%gM;o&P{jvFmU{sgu^Pd}t_&H3=xoTMHR*r= zY!QM)i9tnpw3q2_z4sfZDg_VYC`GhmV)@Edk*P~pU#_#HVPqWB2^AK!%*ftE*S(CB zz6MthG4Cth!^)LQM<-pox?1nmG)3I^3<0Ejyjr;6xBUzS_olQo+GHnU%_L#fk-Hrp{#k0f*Tf&{3&J^^I z4DRLG4Rx=Rj+4Lbo?(dr_p^B0R!o$x#Um^ z#XU12C+MtLx{br2hhbHcry^u4F!5aG)9(+mN0}qj4(1dZ8yibuFR)Niu{#%)=As2a zyr>Rsi6}B~=fJ_9ORTNSr8t9hb~dfC|3ZPLh(lrwTJ#c%tsr1>I|R^{xkS?Boi`t@ zzD(ngo~7;;zkBO<-J24-eq=|lDAmykFUR^Zu9rNHPjQfIPjZ@w_}10GDO@j#!6p!( z`ep@{@?^m&@2=G7-McKn{RxPOx79qRTAUP6L=^Z^R0|eHOQHBB&JtpAgyEXmrS6}4 z&0YooLIC+WrB0d*1cyv8w7CHODxE3{kK%6PdnVG-1m>pr7Ii_-0 zTc^gUlXkt4T=th^)ge~xTCRxB34N@8B74CU6<_jcJ5^uD^=DOHI>Z!CiOe0LyBbeX zs@yt%j~bCi5;ENAr^%WKY56y{34lO#`ZH@tHE6^-X?-P@(`7Ctvz`ldxY%E08P-d)sqz=KXV9JiN9VD{_+kPATedHx{$&+bU$vxun4@! zHf-7?;HJwNN4hOic}n{?N`S8Z3%pf3Cj87#c$KxmcvE0A@g@Zy+#m36K3Qod z%yH|JJyur>r72`K)dDSS)VZtLca|QS0EB>zY{liWNX06RcT~o3%yiCJ-b%mX?vud; zzv0|m9@?)9gsi|dw1U#j>_Vtqr8zK|<4A^hp`;e%wJPr}BLKv}NHAdOiCtavAfb!A z8{eLY_&@K^wymqdc$m`S9T8?RS@Dz=)oJs)Z;!QXQ;TwZYK1Mf{@x@T!-$?0F~3~OlQ78gD2fni6lGVs@M5SaPYST1MauapUB#u*vzQbE-+ z62NTZ8LFhE0)n061vr8L3vkrJz9_eq^{6I+=XOzTl46km;t|0LnM2!wXLGX4KE5}o z4z1Lq5hX@_&D-S<(2VW(>_xX-;qg1d<0mi9SsvZ~V4Dx|T-A=Q$q%Z+RWE&2&6gR< zIt|qZ$!yCp^dYI{MDwrU&hdu|-4c+3WV6TBv&GyEuWE90f$+g_f~pech_nJnDNK>F z-6F3$#H5hDaafCS0%cq;6CfBK2B5DDo2gLu)v_wOal)>6_iPQvmyrIMJ2yv8WO{8- z!&gf*rI5YJ{0jd<)R-r5p)4bJR2@Pb5o5^|s9Zl*eURTwB9a-`vxp3Y+eeTPyo z#_BuOkq)s%?TzCAph;fb7uzeMXUIfDY?+Pc%xbb}!js{?yPWU4FO|5tgvfY{mQsrQkeuecNv#n+WqDd3)`z zLTn0j>burvZMe`>TCyDKVzNlK!-k$lvMW*IZNUYux!uqaTUyTQ(_TsuB@<%$ zK&|SduVEr76CN*~FQ0PR%ev5=ytkhY43qbkGI>6=90IR+2s)SfG?77D*a~`g8E#Go z?U_j`zfpXSxWrbwI7?HLdeCR&t34b+ea`3V;&}epz2^y)$iXOq&X9fsjrDSat!K} zuT0JrZX02dxYqMxymZnPNm`uL8^=TdM(X3J1K^$;&{I4l$B!Lrxon#vwW|oQNcpG| z{Y7qiLs0eC6C=-|UA+4=Zx7o~q#HnW^HLfZRlInAa&`XwslK(fS1RpGe^T`LX?yzq zo1;H(+oVD3wia!hG(QyRRiSz5NrS073a3tOTNjpe^2^O3UO##b9{qWP(s<2yjnBXI z_Kip!_5AJt(>uL2g5u}rxSY5s(~P5f{L+1iwc%>28r77<$W~qSwilm`DnAlc9(uvw z!0l%7>La)99^LMtFvQieu<9I}?9}X(qwyEzC;*@792tM#d$G$}MTdi@!o1J6Y#KON z@4)1dNccpwf09a~ROWkNAe-Xcm%W%xufsd%P2K*siWP_xPH0a*F1Eua$!j;(fFihM zf15c?F(CttM*KYsqpvTShWkFmO&IuR}Ul-CJl|gcMZi_vjJy z?G>cAU)<9lVIk>YPW5$w>ln^b zkq-nvP8Z)SlP`h<{^}K|H|YUKfc?FDopBLmVEH&8;z1`OOx_QU_6tUVIH5a(h?d6< zvkQT;H#mXc(d;$6e8A4DqM#$KN-lgRQ$g3DlI*p3lb5^gDgV*iMN7tjQ(hd2MIV#f zzwP`<)hRLhjY(Q<@!tZ2=yaFRv?a4+ABGkcyKw4Hu=7;tcIUQb0YKxau-J|AA>Bs)9WQ(rlKY7`V^5&EEt4p@fUwh`y&>7NhKzIMA1^ezmqysxe| zD7Y!Wiv6u$f3u#$8JY6oGxn$*0;h5L(pjkqT8=HpDVQ|BV09qNf+RAW%TzK zo02ci1$i4csM)%Gzq?=IlsBUc!{}d_%gYNL*M4sdIydZ*tmT03jfeYOb<^dhww;@L z#|AAeV8b+4fOkze%pH(&WkzYA{)&ObM<>NQ;VmkDR=|s$X0t! zR;l?ZIz8rPJHJXaiN7A+Ah7?~*lv?YHVGUYeC(Jl6`JAQFK6Nji!8SK!fe%zm9|2MNU{D{R;NVuUtmJAa;EtUBHUPw#EsozI+>fd~1C zR`=kqLEv6O6Os7>OpOSlbfL*_F1i*;Dkb5O8J#c+NOhJUA4b9_9{x}aJGN@#YP?An zwMet;_OmF?{!UFIj?kAcDrEJGWdYhXmHdc%CdCFB+{kyY3okFvOAn&@ozCf%^Jcao z5qxYhIqMAq%?onsWgMQ$ML)G{clqu&h6qM(O-Gt|u&Mfn4Qdp&eP-O+u2Hy01_>1> zcj_%|o%cn@uL#RTHK3CAGW4w8CQ{J_-)j|J+zabrQ7VxIImni~NeUySJ0NV$+$nN# zl8S`cVi<`qZ{b0*$OKw$s4N9joHx5ZCm)<^JmhARC2Uv)9|UR@}C&q!56Odor9=hQ7pR3H>VbeI8pRjK5)GsmTv__eh z;>bc=v<%bUTZUPfkY7*Q8@XN%1!0B0Yg*tJX7s>=71w z=g6Ktds?1cFgCGXMj5ElT8MqRro3gT0IA#i#oX0?Y==PNGQoV+q~eI% zR=P7zUfdX0Jks@PAhSyj!)*9A*}->8;TnGZ!CqXVruvBeqD+<`PH!GE10+dxOeoA6 z%lPfIa>DuRI>)1c2yfyN7;69qY`(y@k<&7u*gfKQQ&?qS5;i?z9{|zChMm=SpW!5} z8T@(F%swCj29D(ooSxmnI4`YS+cNCoa9xsGkhmAOSFM`6tGmyY!}9ocBhCjja(W}D zvS>Nt&fKl~%Q@Ny9EbG@rjLZ=nP*`hMz>NNC=+Y>56d9l&U7 zPVb$e-{yp{fhwx?)4HV;|HN`vcvbz9#I<&s^(^npljOhx#*7=30Tj9P?dxZWC zwRqSFW)z##2oQ3c%)Q{A^G6v>lGf~uRYykfTvP1 zd)YesNm+Pg_D`R+aTYTOk2lX_w|&MWu2m&?2j}3@AYLd-AN>9cbf7-Fs)-~{L98$M zm&5j5J8z{cb2$XPlGQLl+rasc^J(sLC#hUu=`Oe#1bAHBC;`mzmRaf>5$DrEET@1P zJZ9610BH}s!^hUb!y}H&XQ|zsIM1`fr-fM`%y|bU{G;z|B>D!+3<|Djw%PXF@?^DOd zW?dFrc3$4KMN(`H4do$(se|@n8OubNpsLe{?5LuXU)CNcpDtcDM4{W;I!BK)t}AS3|<0%KhHgu=f zwsf{!S`asnmkeLZs>IEpo6oa8Q5}Y~lhsn-L6V|SSWTzZ@~SdFk{EEcVwI_qQ|3sA z`5se24Y^;JKT%LNE?p!$S-7kma1JqZh!t6b(hLrRmOgR|cspiOTo2=6_ng)<_)jYl ziME&NmYC8Ku%BIf_iCAlT=OhxL1DyM_Brr^Z5Z@$;k(Fq@% zN5eF!3C;=ONV9ABYDHE5n;a}hX|bpdk!A=xHohNhn>`WO2t0^4uFn{~<#iR0e+@#U zwUoZin~iF2T@5{EQcFb#KW_Sl`t#5%>I$O)ung0fXLrc! z8DV^VwFjvk?FPFz@tt|cklco8_n)~AUjE>lgZ3~!d~aXtc~=n(*M^A?x2*5pexI$= zT++pD_32`Pm6(VLj_G+O7eX0bPc79nM#r zY3QRhKOt^PZqYN>7cjrtpGy#z69AbZ^YA;e1Q&A&x>BEF&OYnTva>qf%tmv5`fF)= zsg336+#A`Zj#`KHiA<>1SiJ7b0>`L~DC14kFH7EHO5mt_^A3-rYn`W` z>b~<1<%=)C7o8rdkYa6O-?qhUN~YJ+xlx|#{NNb-l{Zc4>Q7&M(bh<~XoAqy^yNlc zrE`8blcd<9B#a%iNwTKG@1OrTW|KzfilSsz{fbyC!q(t!{Shwgn{vwxq82|Na&_;U1c&RfDcrtWT+!lHS`!3%!> z^kBMpYe@zsvCl)@UUp(q(Q(k+W+-9j@j4A>4BF~17zjXiuJs%c70(mJjg01{`VT)0 zwbvXpX~-FN_+)I`%xI`gcx*sTag$)AA%SmzP`IVVbI^7i^>Y~s)h`jvlQvWefJ)-_ zwp*pM{!8qw9yPlqe4=)pI_<4oF0Wqts!F>!+ct4p zCV>_Pw|CAiZA&rkOLkV1Wq9J$aaGZ(Cp`j=y$n@J^O7M1z9nwbj*bT<_G})+D6nAz$ZG+g;`NIpDUH;k~`~bEY5%3Z}Fyn6HMsJ6iQh->y8yCN;6s9e2y;>Z0T!;{{KIqav>N#7Cj9$(#&e z^4yMvQ*OmnubD$9MVbjLMR*PzTQdRcE|X^|%nVowP!i%bOaOV(p>!jd^i?VkL35Bo znkiQEb`%TL4|PGq1aM7xw?Zag5)FpV>a+U`&sK67olc5w$+*?XMfWnkG?9hMK`rK4 z16Z3Og4fm*1On4XixqgdgRoueGD!SS$2L0OAhIPROwaQ1A9c zMly+Ml}z#lUyOoS8p!W}Xj=Irb8KAnQOWo?T6&GP)%h%nCC0G3&7U8FRX;fCSyrj@-1zkF-=K#hfFY)dduamldyUylYRV zU&O2#BxQwtK4oc|?fKk*%ESuo87GdDuNS+22=P|?CR9~tO1qPgd9ZvO$*2W~Ml>!U zUquX}r8&cD`PhWoVm6?dILJE_b52w)0BA*PHa-K05K4ex`&7%dk8Mc^(Se-c!Wfn6 zT+g2dJxYv?UM9Y;!|%>TMWlzEYxRR6r#@MJ%#u`p5nZa+ImQLJKKSf#d+kqOw)Ik; zqOccEud993ZKlw~tlU>6=N?=-i^Jp85-(dXSyDza(le>`S+8ALBb&~X0}^=C;iusa z`3ZA{k^-jUoMkzqQ=#ZGkaUV&%*)~L7)bY`{M;G}M;Dll%ebDG&%lclE0l9soO`?I zADB!fG({#tpx5_4Ny2ovm5Q=Z5I2Z?C+*EVDj}~#85be;S=4(uEK7~hrN51Y_^oPU z(xT%haR5@erk_jx6WBIU|x__Sf4`IlHCeLe1~*&WAfI7 z&s=#dICv>l%VqD=d6!=+nR)Z2UrJHc_Qk2c_OQXT$|-b2?d<83iGN~sCa!V^3_b7M zTAS;VQ!4#Fj;&r=`RPH}(M=TkfW{kx$6AZ?XVylpt23Q3*TpjEdt($w>P*Q>Gj1%* zVrE6x9EzXCW_wr;M_nkXt@R&5I2tA+DM5NJ?HdMpQihbl^$a9+3WZ4AD|(Ua&$Ak5 za~;Hq26UT){Xx!i)k@7eWtYw|QpO`kj%W!8muGGwpu0>RH>lj9 zaC$SG`pF^w<@6%E{Qh)zg=;ml%FUKeXUNKCjvf?0Viec5a#Dk{fiIwIp$i#tY`6d- z;w70hpgto%`@;88WvKKIX{Z?-i;d?7g>sk<&aZQe_W?<8uU^LfP6`L;m;4RuF0 zX>yqK&;4>aZ@d&-gAbFpWyBkO&+?uJ{*?WkkKm(<}&&xqlc zMn*})Bbpdp)eLuUu|nFXFIb4>k9S{fr)^Yj-RIx8ku{<4-F>0wYDuy_XOh-{ZbRR)1Qy{#gx9bf zg8LHA=Q_xJ<9%KJq%qIf|CLEH$Sd}ANXnM}@Vzk-58sp`bD*k~5v7k8E;ZvYFq0J_ zDJRV>r3Pk+P}ss4{GvQ%ynb`%cWvMo5b5V&0x8oHb$qTc)U&r=N-qEl{)X+!Es!Kn zYQf;gm)~TrpmZ@81sN@Ui|OG**fr2|1f5iRP#YThLrk>onf~nYrpq!LTj(mN!C?k+ z2l+#xpDD+o5OmHPI^L3Mm6ITdW2SR0bmavm8@q*mn2%c<`|VXwTlD`%n3ztaA4#VT83@Z~{XW7V8IFc15*b3@&gVo^r+ zHqX6c$ie<+a7XT(HuNA&%(XQ+iSI0fs~R_HB4#@(@ieroj9X7I&?rdy!N3Ou>&DsT zg*=L^P^-^Bl@Tx<3|J-L`_g(hU)4xzcbP1peA}`sx0yV8YV3+4AbMFt6u|yyY@z@Y zh>}ae7|BG9K$k7y+&@WtzgGPQH_z9rp64-!$*rv5o6MVYu6ePHWq)A!%5fYN=<)ha zpZ11#ny_gdaETyEp@#~M5U&<0aLvKrEG=~H-kFv)6v#yjC zIAN?2!QbPkG&#jC4-usSM_A_{j2&X`>ZR7cr=BI!uVEh1X*juuhmRNgti$75nPl2= z3C7-!IM6IC=y~(5g6wG=m{wx;-FM#&n4zM<`z25j#WSu{5~JQz0*;gX7Z2Cf)0-(i zfRHoHgE!VF%%%fkL_pJ!>EQz6Q&~(aaAm8J?bA}5cdlsn9A@uXj04LE>O_}TAOR}2 z60|kEg(lltefHT688##vm$bM~_Hb5X1U&^?;`lyW9}wAJ?(5ah1EG# zI&{Z<$~2RnajlJJX@=kVx%@kY?l;nON3*b62I??o=GAArt_iZr_K0QIv zVIRd{nT)9AU*QcNEV%rZzQ!~;fJOKZb)!Rh9OZ24r3WjI*DY2M%S^r*ZE#Q`M*zERR?D0%l6Td+pn2<2})f zk!PAE6iRw7!};C@)Gx^X69X<7-XL0>H4^S6yDA&r1k|O6D`G8xgvdlarFPP2r>^hI zUO_IKb9T=dHuXZNAq!@Vo_II0NT8>2VFDM922rigsuFRb`o4h(a>HcK$ZG z<+?r&^-_hTb-e1Ez^5{;h^-{g2wY-LUGb@N6Q>(bT@9ot5^6h}T!^9$F*;O0R03h< zM>fk?#o{443cnbd*1{qpo>?MK8#r*_*4h@d<4C#tk{8Tqzvaj`X|bFcyqzhex#GA- z`og(D7kkEn;s#Eog8NFlUs|OZFc>WpJl-rw>(sq_Bu1QV@;)ty(@CjcuY=(N3zeR9 z;wUA7cF&$NukiJ-Wstrts2BrAHr}4XDJM3!k{01&OWr5cQNmJN#+8Ak!+<*Udz$IU zCRX>>ym~A{$qr<6kxh!b`-7ZYG82!wZNNN=wz@TlveHW5n!l>r+*q%nrcjZkV33Q# zfh(*|pgJ2EA#?;Mys#4PfViU&(PFK11(OG@v*fKde>@hvkAv!sk(ddb)uh49fC>5~ zHpE(P`O+Q6o9;Qp@JD7FFc#WEX~6{)xJS6W!p$P&CUvX?lO=k}j8b9I1OA6u#hyxd z{h&FyQtYBcc^1FD{@#&#@4C+H*onE$h0SGCt)MQIn-p3vyiT6VL>XGetc#S8HUW( z*mIaq#MG4nxK>$1uAs2G37j@X_(i6>`-swt;AF`bAB&(HelcPknU{EpI?m>o=R|GPMoC~ou) zr6fiN?@U(G1#sbbkt|HEv-AWDYjOcWM4>=Swts%rtCAbW)ad5{czgdco5Q8RZg}}& z!kZ0;g$QJ6)SE;CM0~3hin+9b2yK}2m3i5U0f@x zdvE5cs0BQB3`K#A8e<0S@O)FkwtVQC^!B+@I6Qv~pFj1>C`B+OdwYtsCd`ja*yRjn z97+zM^woKKPo>EgOc8&nuoxCUWEKW~W z3>`8TITX^~M;S?M_~+t{AS_|^tkI+XP1$VkHD^63A;%6BS-5 zdt(gAPX!ehx|?i2^Hl}G!<;m&4-~ID?>(xzJZB0a!i*!q^HCKrw=ea0X&X2{9P?k- zcf3_s?%)3kZ1FI6v{3IG7TJ4&_Jv~1Q2SF$QSp!l?efiS440^O@Ha1q{)WZdbSlwk zE}_Y7wNr7VOowG%p{&vzY^O;%N%LXog^e1ebdGwdNS;5r?LWM4VsBxn-Fl+dRp{Sf z958Y+;jJ}k+B6_iwa=Yd?!*;ZtAja5B_s>qHF({H#Faz$R6C5A!x(Q)*e@m39=`r7 z4y=l4cTH~RJc9KjDhQ$6iMT?hq=8ytk~l#L%gHimOcPOkiv)#y=*dcl3mbX&9&Y7_ z1dLRTb+ zeDA}0r5i0%h{=<_RkdbYyG6RYxkY-rmCJbQ5U^0UZP&H_;lRHdb7q8l0*3_Hho~aC zxtx}k&{%+~w+^sc#~b@&f6QbtGnA&7po!+IKD{As0AysL{LY7?25a}-R$7LrVT8OOa8DBxXPdntVG!74&_FI(9vSgnQ)kKYE@65)&e@N%ElM zcf<8qj`NFWyf11uoe5fAal%{NXXtrO1Hn%zV)%4m(5-dWIV(RXMqbj*l!1!3@+jgZ zQDkL)A5c7eg!3P@Zua1+FG&0!%^9T19{=NgLwtQ*U*Q~y+P{N0RVqU^c}LN|-e%Bb^7Yd*>k&r}i91S9lI^3_keXn;t1V zp`X9>BtSuD$FGT=?5`I@3c>saC20W>C0A%%;o=HrZ(bJI&m~?<1E)^(DW44Iz;wjg?-Zs`EM$lT#X6cs@{G>H4MUC1Cs_ ze8Z*NIfTr9_IEJ{p_W8Kib~S8ao&$2?#JN;3Qew2JSt!od)jKlF+ydVu!$%lrd(u( za9^($tsrqxT`#kZo6U9Ht|X9vs8)&W5}C5VYxUQ$)9gpre;-7Kg!k{<*>fH5zGnrO zv-hu8pQS1;=P^}|?gJKH{`@0m%k>w>Y;*k*+2HSeT9hDUccpZ&MP7HKL>tZ~gsT4P z+3xFqXx5x0Up60GL!>73ZF-py_;7E#Jk#;>9XX9!83fL8h#d=2E;V`_5C6~}kZj*b z_cNkIIT+Z(u`8WDaHFmI*D6-gWtxrTU^{WRu#Lets*qD^3LwFG7)}u-k*4>~da{@K zQ0vq#7)A&!Pq}%zj+D_bG{y5cA-lvqz>A}UsJIk%LpY1+X!QU zy_0YSCX$H~LEl?_RcoRCBK-T`o4>A1ozIs#baZSK!IEDlNeb7w^}99zu^0%F`D+{k_nSj~M0Ou1)z2xVY zSE(Oj*S2*a3W=c&jV{N`6%gG8dZT6`vdDlFqA7I@*#|j@6dQm&LW*|Dm^4g_gwcGJ zcS*!WuHZudE1mTPV^GN_>M!Piv*hbkK=?rIWQ8evnNl)pQI`WIeX9gr?irr7&#}bp zV`n9y)GGB|ZT@?eP77lRO^B8Bc-n?ehjM{~WPh8}q}}kePk(Fsv$r>cw&GPp9JU~s z1V58Q*8^^&@xip&E5BO$A?5w|>&*WG?LJp=i`1pe5CXA4;p5iVNV6M8zZvX2OA>Sy z`G?*cVgE2ITZbBHNmOi7I!w_%z8h?1tIn>Dh*4IDzk4sqp z1+P=a)qKzCHz;F(D$-48JyhUUvEgUN(q?b(A(y#0GnAQ(w8gn`9B<|Zx!dBCGn=)L zW}{)T9fUxQQeeK+=x&a;q-%>7vdMNND8L0!3MO%6LOaV)^RcuP zc8O|9FapR?@UTb+ISMS0z6v3~XX8_W={>kjfZ|IrACdBsV3_<`hbM7bUNz5H=em6V zE5DQ@oGo8Sw9=ge>!0;Io%Ko14V)n75OJ;qosBu%n)y|(k$Z<9m~3*BUvY*-48`SY zW%R?i?CIxM*$eNT;7oNc9&N6!t~c_(tiM$M`S*maZ#=|{JFq`%M$8g8b{z(XM8eFI zY7z&F!=vM`6b^VLR$IahA5j^%aPY~rPWs{t0eg7j^(m>Id877U_ch~T-a&sNi`s)5 zj$DmsLtx=0Ts!+Y zHc}yUS=P|jVvmsq6sGvbTjMzaW$XQf3mTaQK zp`P%VcrYgi8dAep+<1M$~u-K?>oU=(6c0xTy6$8IV7jhzqQ7aw9h-5iyZhr<*1)>~K! zAN|lUNP4gI)W+nDX) zWT1R$Crc2cd;PY?DicJi)VYjgmVT9w)WcGDKdSdC`N(DT>Rz9mb#+|tw$82dV(~(T z8+MqHaB}n=%P>);Oh?MX6~r!IauvYQVjOa4lfKZgx#>4yl?zLd0H87zj3pny|J z%Bu`c1m^I3e*68sOIl%mCS(Gd?7q1UX_f~kG4Px)Bk@Uy4d9D7WJp@N;H2+NRld-A z=r=|hT;seI+26`JLQ67|%|aCu%94ZwMR88u_u}5lv}HWE5X^-Zh&=(bTmFDCO+`-a zHO_POP)B^0lo4>Vbp67= zT?r5;CSDS`^bd?%sq0vh4z}0JWbjZq_j?fG`2*ss>K@W^!Ol1C+bY-n2k+Po?P;Y6 zbf$zSVsXe8iQ-ucx6T(tx;cOCn4vmtU*5X+o+dYW7cV&5mlBqR&MpBs zoK7Ka{NXT1JJpIyzl3ygNYZ%olbYKVn(btSk`t|gbtcA7CRAV<{GY^QOa-s|qaRKI zHzonxSfvx(2+lWx^O&Lw7%$NsJmtpgt&d&}zj)46Uj>c0Rb?|6-BO8GTih*Su=Y($?@<|jnvMXh_|aip zngLXS;&|vI1Cha-fy^)7`g(DWtB6fVKxM?Ti40GZ{;O3nXTe85lHM#HnJ?5|&7bLa zfK-T-jNL-D3Wiz^aFz|?W7D{GyRnQl*9aAVbn;rxYlxi~aUZ>P>y~n@Jb+(c$DKDt z)=qk30ry{{6f#nl2XZ$|p57@<>n29^zRNJFY7LlImUIZ$_VNYFybfKa*h>1EPS zTUy)F5|lO>sVt4ndBVs8rQxDrax4{y*0iG-PZ=Xv*nVljfJX)+5M2mzO5>uZ^B)UN zmOum;qW(~I407ZOuK}LwG9Y6(-`h!8!N9!-aCL{kp3O;SuL{6MsUgxyk_Ym^54?S` z0{{-7g*jOgdjL6FedMlvBwbBTc$dwAn+rRlxG4UgF7%hf&aDyrWemJW`XFv%xx*AR z8j+F-GXO~!pZuIIdA&pEJSc_ND;cXuLb^jCao;ZF{t0+oUbI#WPtpQ5s30(#<#*#PT<_)cl&w53M54EKDo@h=B+91 zMS0n=P*4EkT{urD#zRv0Q|uvlIxI9r3pI%)#Px?(PgOrthf$_>{ ze~TX^%Jf}NI{#zRm49tRD*~n7d%#MEpBix7h%*V@F6n_4tvX*9@HIG`P^~m%9snm^ zc8B>Ms(I>6rrH$zcnPg#g=QyKpu~GHu=?G}HIi5@yf0c{FKxRW%g{0ru;iY9tA(pfo)>P09JV=IH9^iPc=I9BF7>Vn&G9FhQ&%)Ed)jHTJDlAa{Q1 zATk|O3<35Dd(b-tAX#))z$U^m@CJMKoN+H5JmWC8S~MWSA0xHu9iF94;RcnvaNitM z#Wco`$sI;bpGeo#^kw-!MFy`!Q2~`4%U&ryF`?5_n;soL-@UT~ubL9zaOpBi8o^6S zanik$xGs?tLV9m128%emQrZ&KI$b{-SNtq>Cv&Mujf}y>$Sh=D6rr>@e8Ii&IG(sI;VK4Z?Mt z6iTM%KoJa^6?Ie$M#4{=OJ4db${>^InSG@808V*9HXt6k1ZPO@Ge8%GX)A>rDf?wN zjZWVi%ODBCkkqT@GV;%`P;Og{zxVwElaCE`%sVdo1PMGQ3o`x&jnnYtFX@J{CH4}5rMWJCCT>NTF z8~uj|E_ODXb!>81ay%BtA&dM`|8z$t>;|QGO|AY)=wf}X2i*WC62m^xDd2C2lzNA) z2No_&3lY--E@lDu#^m(n7V#kkyxlZ?)oi%e(aYJe44viijo8NtUmu|&N5 zsw~Tt^I(Bi;!3uwX$raDk=T{RHrxc3w^9R^9Z;tV!Rcip#T`m%}fyfzi)CUbZ(L%LlJ5S6RDq zYtlLN~r#y^TE%&$MOE3V~+P7 zqn_vae!us9-Pd)V=XGA4ECV$C?ec1?na|xxF7T3G)qKde9>okxlWO$J^tr&An5{i+ z?z(a=u}JRu!DDKANtTBlTz2yz%2eLJ=3VJjeBY9P@BVwZ%`IX~OOS9id~nz8M^Kx? z1~%sA19KC9@oBSD{626Zt?GRoT6KF!?fDF9?pIjKN=zhriiP?|~LlvEuP%UDE=@Vn-rx+#V_Ot{QSsTm6aBykFHWz9k!d_$i4J#=j{Nu;yN3YOSO|B3 zEPPT2-{WaB|FZ*k>PmLusbQTt=QTb-E`H57K!-Qkd#n5VU*_cEMBmB;B^7VRm8`{( zTZkf*c8}QxY8Xka%v?MRxxz_3s6vcH5>uI07g|jGJ0T)y-GGDN472-q*Y*FS(up?I zzk|=bT~w}$)55)s;N!ArJ6JzCI!Q{eO_L&3cr{G&`&6w)JTf%PUjB<%qA{V3DgF2) zr)jeNbEKftVHTiPiEV?5NUC)N7%I7c{-~e8m`kAsuGVsQkvBG4Hw8`^i%`r49G-lU zLyX?JqkRspw3v$PBf~MEX1VlWEeDb$7l0I@=e$dI-~qPMs=lkJuAx9O zc~^BQtBaNY^8UnCi^AP8!TresG#Kp(3V)zv`?A|tyCps@=l>MDSE-BW|L*2=ro0mu zZ9QGuXJMogC$LI>C>@2jgz`sP00|{kj%|oyZ5*JG8f;v#QPZ@$IFyP!{t`!O5JP_g z{=Zj`HDN?Gt$4eEN`0%ld@7?;ob07=vYaZ5H(J}cy`T?}1(FXslkgB<)mp+sl~FOJ z{SAZ3GgZKcic}D2=0^7eE6)#KiQEK4TW(jfE_j33_Ly(cEIDA<_nS4@gTD{BHU5^k zH1NXR6p#T1;d6>?J+h!VemHB6LVufjpFy$UJ6mo{41-Na8j>KOgGexMUMZ>~u@@{C zr3|2`U=_oIry*}Uw2Y?NB9QuXSSPOW7m&nqV%0qN@&1cul_ePZdiN;v}Ol38K| z8x_lm{Xp7#5%TyhDnIjEvBDo;l{yg0-ZOTTR4mfR?OM@?B8iNAxJYZi?clvqq&#)K zQ_6Z7Y9k@C-e&r6P7(?>fYb0f1qIp3QRN1Fz9n>Eql)S!m-6-Hy%OS&p);aq&z@Dn zJ`Gcf-j3-7==%6m%l6RzJh8>D)+CSU%7)Cz=|X~ctFjq&1tntU;)Vy}$#vz&@`+`NzmE4?wQyvo5p>lgXxp+vMN-^`t40nxC!B|L#F+ppFc}E+57})NUwG zUC2~Kw26BWTZ>l zL}y3PXa^?}0`gO6f$l}8`J%2yJV;Ivxzx{j?EzQOV`rb>{3@tawjnV*e5Yka1Bd_N zQF>%{b8xs$znMyO#OHS4bIxqF!hMw@F6w|Te7?8ePJc<&39F6)lg<>5*4R|gh1g?p zAVX+BP@_MZIk3vd#X-(tI-mHwB>`kpnrju%*Ozv{&7!Q?%P+r~Idf<@&p^hoq!TbN zO{iHRoa%J-SJ!w0Kt3e@?)OZdOrgoOt#wOebphg8=)*eS9NywSnuCFVsKx>;*spbdlI2+Op}|Kc-xA#q@K@XF zZElY+v`1qpYqgP?i8pYc7u~7S1besD(4h`ZIHEti<}JC1?oJ-JMF=7cw3C_H>@(}W z*iu=(gDk*9bS48r3gn}n}?)wf~!#l37?DbHG;^NpO z6WaCi1RgOuh1C%>Gi?J&lSQ(9p56ukM%|eP!3I?;W?vzAZXvHV8xc2YAyCA!3sllq zb~;pACf39#ZGUR-;10B8Y-a~Ipe|q;vtwM?N6?C%w>*bXC~$8`592vI_K@e}2x